main.c revision 1.16 1 /* $NetBSD: main.c,v 1.16 2018/04/07 00:19:53 christos Exp $ */
2
3 #include <config.h>
4
5 #include <event2/util.h>
6 #include <event2/event.h>
7
8 #include "ntp_workimpl.h"
9 #ifdef WORK_THREAD
10 # include <event2/thread.h>
11 #endif
12
13 #include "main.h"
14 #include "ntp_libopts.h"
15 #include "kod_management.h"
16 #include "networking.h"
17 #include "utilities.h"
18 #include "log.h"
19 #include "libntp.h"
20
21
22 int shutting_down;
23 int time_derived;
24 int time_adjusted;
25 int n_pending_dns = 0;
26 int n_pending_ntp = 0;
27 int ai_fam_pref = AF_UNSPEC;
28 int ntpver = 4;
29 double steplimit = -1;
30 SOCKET sock4 = -1; /* Socket for IPv4 */
31 SOCKET sock6 = -1; /* Socket for IPv6 */
32 /*
33 ** BCAST *must* listen on port 123 (by default), so we can only
34 ** use the UCST sockets (above) if they too are using port 123
35 */
36 SOCKET bsock4 = -1; /* Broadcast Socket for IPv4 */
37 SOCKET bsock6 = -1; /* Broadcast Socket for IPv6 */
38 struct event_base *base;
39 struct event *ev_sock4;
40 struct event *ev_sock6;
41 struct event *ev_worker_timeout;
42 struct event *ev_xmt_timer;
43
44 struct dns_ctx {
45 const char * name;
46 int flags;
47 #define CTX_BCST 0x0001
48 #define CTX_UCST 0x0002
49 #define CTX_xCST 0x0003
50 #define CTX_CONC 0x0004
51 #define CTX_unused 0xfffd
52 int key_id;
53 struct timeval timeout;
54 struct key * key;
55 };
56
57 typedef struct sent_pkt_tag sent_pkt;
58 struct sent_pkt_tag {
59 sent_pkt * link;
60 struct dns_ctx * dctx;
61 sockaddr_u addr;
62 time_t stime;
63 int done;
64 struct pkt x_pkt;
65 };
66
67 typedef struct xmt_ctx_tag xmt_ctx;
68 struct xmt_ctx_tag {
69 xmt_ctx * link;
70 SOCKET sock;
71 time_t sched;
72 sent_pkt * spkt;
73 };
74
75 struct timeval gap;
76 xmt_ctx * xmt_q;
77 struct key * keys = NULL;
78 int response_timeout;
79 struct timeval response_tv;
80 struct timeval start_tv;
81 /* check the timeout at least once per second */
82 struct timeval wakeup_tv = { 0, 888888 };
83
84 sent_pkt * fam_listheads[2];
85 #define v4_pkts_list (fam_listheads[0])
86 #define v6_pkts_list (fam_listheads[1])
87
88 static union {
89 struct pkt pkt;
90 char buf[LEN_PKT_NOMAC + NTP_MAXEXTEN + MAX_MAC_LEN];
91 } rbuf;
92
93 #define r_pkt rbuf.pkt
94
95 #ifdef HAVE_DROPROOT
96 int droproot; /* intres imports these */
97 int root_dropped;
98 #endif
99 u_long current_time; /* libntp/authkeys.c */
100
101 void open_sockets(void);
102 void handle_lookup(const char *name, int flags);
103 void sntp_addremove_fd(int fd, int is_pipe, int remove_it);
104 void worker_timeout(evutil_socket_t, short, void *);
105 void worker_resp_cb(evutil_socket_t, short, void *);
106 void sntp_name_resolved(int, int, void *, const char *, const char *,
107 const struct addrinfo *,
108 const struct addrinfo *);
109 void queue_xmt(SOCKET sock, struct dns_ctx *dctx, sent_pkt *spkt,
110 u_int xmt_delay);
111 void xmt_timer_cb(evutil_socket_t, short, void *ptr);
112 void xmt(xmt_ctx *xctx);
113 int check_kod(const struct addrinfo *ai);
114 void timeout_query(sent_pkt *);
115 void timeout_queries(void);
116 void sock_cb(evutil_socket_t, short, void *);
117 void check_exit_conditions(void);
118 void sntp_libevent_log_cb(int, const char *);
119 void set_li_vn_mode(struct pkt *spkt, char leap, char version, char mode);
120 int set_time(double offset);
121 void dec_pending_ntp(const char *, sockaddr_u *);
122 int libevent_version_ok(void);
123 int gettimeofday_cached(struct event_base *b, struct timeval *tv);
124
125
126 /*
127 * The actual main function.
128 */
129 int
130 sntp_main (
131 int argc,
132 char **argv,
133 const char *sntpVersion
134 )
135 {
136 int i;
137 int exitcode;
138 int optct;
139 struct event_config * evcfg;
140
141 /* Initialize logging system - sets up progname */
142 sntp_init_logging(argv[0]);
143
144 if (!libevent_version_ok())
145 exit(EX_SOFTWARE);
146
147 init_lib();
148 init_auth();
149
150 optct = ntpOptionProcess(&sntpOptions, argc, argv);
151 argc -= optct;
152 argv += optct;
153
154
155 debug = OPT_VALUE_SET_DEBUG_LEVEL;
156
157 TRACE(2, ("init_lib() done, %s%s\n",
158 (ipv4_works)
159 ? "ipv4_works "
160 : "",
161 (ipv6_works)
162 ? "ipv6_works "
163 : ""));
164 ntpver = OPT_VALUE_NTPVERSION;
165 steplimit = OPT_VALUE_STEPLIMIT / 1e3;
166 gap.tv_usec = max(0, OPT_VALUE_GAP * 1000);
167 gap.tv_usec = min(gap.tv_usec, 999999);
168
169 if (HAVE_OPT(LOGFILE))
170 open_logfile(OPT_ARG(LOGFILE));
171
172 msyslog(LOG_INFO, "%s", sntpVersion);
173
174 if (0 == argc && !HAVE_OPT(BROADCAST) && !HAVE_OPT(CONCURRENT)) {
175 printf("%s: Must supply at least one of -b hostname, -c hostname, or hostname.\n",
176 progname);
177 exit(EX_USAGE);
178 }
179
180
181 /*
182 ** Eventually, we probably want:
183 ** - separate bcst and ucst timeouts (why?)
184 ** - multiple --timeout values in the commandline
185 */
186
187 response_timeout = OPT_VALUE_TIMEOUT;
188 response_tv.tv_sec = response_timeout;
189 response_tv.tv_usec = 0;
190
191 /* IPv6 available? */
192 if (isc_net_probeipv6() != ISC_R_SUCCESS) {
193 ai_fam_pref = AF_INET;
194 TRACE(1, ("No ipv6 support available, forcing ipv4\n"));
195 } else {
196 /* Check for options -4 and -6 */
197 if (HAVE_OPT(IPV4))
198 ai_fam_pref = AF_INET;
199 else if (HAVE_OPT(IPV6))
200 ai_fam_pref = AF_INET6;
201 }
202
203 /* TODO: Parse config file if declared */
204
205 /*
206 ** Init the KOD system.
207 ** For embedded systems with no writable filesystem,
208 ** -K /dev/null can be used to disable KoD storage.
209 */
210 kod_init_kod_db(OPT_ARG(KOD), FALSE);
211
212 /* HMS: Check and see what happens if KEYFILE doesn't exist */
213 auth_init(OPT_ARG(KEYFILE), &keys);
214
215 /*
216 ** Considering employing a variable that prevents functions of doing
217 ** anything until everything is initialized properly
218 **
219 ** HMS: What exactly does the above mean?
220 */
221 event_set_log_callback(&sntp_libevent_log_cb);
222 if (debug > 0)
223 event_enable_debug_mode();
224 #ifdef WORK_THREAD
225 evthread_use_pthreads();
226 /* we use libevent from main thread only, locks should be academic */
227 if (debug > 0)
228 evthread_enable_lock_debuging();
229 #endif
230 evcfg = event_config_new();
231 if (NULL == evcfg) {
232 printf("%s: event_config_new() failed!\n", progname);
233 return -1;
234 }
235 #ifndef HAVE_SOCKETPAIR
236 event_config_require_features(evcfg, EV_FEATURE_FDS);
237 #endif
238 /* all libevent calls are from main thread */
239 /* event_config_set_flag(evcfg, EVENT_BASE_FLAG_NOLOCK); */
240 base = event_base_new_with_config(evcfg);
241 event_config_free(evcfg);
242 if (NULL == base) {
243 printf("%s: event_base_new() failed!\n", progname);
244 return -1;
245 }
246
247 /* wire into intres resolver */
248 worker_per_query = TRUE;
249 addremove_io_fd = &sntp_addremove_fd;
250
251 open_sockets();
252
253 if (HAVE_OPT(BROADCAST)) {
254 int cn = STACKCT_OPT( BROADCAST );
255 const char ** cp = STACKLST_OPT( BROADCAST );
256
257 while (cn-- > 0) {
258 handle_lookup(*cp, CTX_BCST);
259 cp++;
260 }
261 }
262
263 if (HAVE_OPT(CONCURRENT)) {
264 int cn = STACKCT_OPT( CONCURRENT );
265 const char ** cp = STACKLST_OPT( CONCURRENT );
266
267 while (cn-- > 0) {
268 handle_lookup(*cp, CTX_UCST | CTX_CONC);
269 cp++;
270 }
271 }
272
273 for (i = 0; i < argc; ++i)
274 handle_lookup(argv[i], CTX_UCST);
275
276 gettimeofday_cached(base, &start_tv);
277 event_base_dispatch(base);
278 event_base_free(base);
279
280 if (!time_adjusted &&
281 (ENABLED_OPT(STEP) || ENABLED_OPT(SLEW)))
282 exitcode = 1;
283 else
284 exitcode = 0;
285
286 return exitcode;
287 }
288
289
290 /*
291 ** open sockets and make them non-blocking
292 */
293 void
294 open_sockets(
295 void
296 )
297 {
298 sockaddr_u name;
299
300 if (-1 == sock4) {
301 sock4 = socket(PF_INET, SOCK_DGRAM, 0);
302 if (-1 == sock4) {
303 /* error getting a socket */
304 msyslog(LOG_ERR, "open_sockets: socket(PF_INET) failed: %m");
305 exit(1);
306 }
307 /* Make it non-blocking */
308 make_socket_nonblocking(sock4);
309
310 /* Let's try using a wildcard... */
311 ZERO(name);
312 AF(&name) = AF_INET;
313 SET_ADDR4N(&name, INADDR_ANY);
314 SET_PORT(&name, (HAVE_OPT(USERESERVEDPORT) ? 123 : 0));
315
316 if (-1 == bind(sock4, &name.sa,
317 SOCKLEN(&name))) {
318 msyslog(LOG_ERR, "open_sockets: bind(sock4) failed: %m");
319 exit(1);
320 }
321
322 /* Register an NTP callback for recv/timeout */
323 ev_sock4 = event_new(base, sock4,
324 EV_TIMEOUT | EV_READ | EV_PERSIST,
325 &sock_cb, NULL);
326 if (NULL == ev_sock4) {
327 msyslog(LOG_ERR,
328 "open_sockets: event_new(base, sock4) failed!");
329 } else {
330 event_add(ev_sock4, &wakeup_tv);
331 }
332 }
333
334 /* We may not always have IPv6... */
335 if (-1 == sock6 && ipv6_works) {
336 sock6 = socket(PF_INET6, SOCK_DGRAM, 0);
337 if (-1 == sock6 && ipv6_works) {
338 /* error getting a socket */
339 msyslog(LOG_ERR, "open_sockets: socket(PF_INET6) failed: %m");
340 exit(1);
341 }
342 /* Make it non-blocking */
343 make_socket_nonblocking(sock6);
344
345 /* Let's try using a wildcard... */
346 ZERO(name);
347 AF(&name) = AF_INET6;
348 SET_ADDR6N(&name, in6addr_any);
349 SET_PORT(&name, (HAVE_OPT(USERESERVEDPORT) ? 123 : 0));
350
351 if (-1 == bind(sock6, &name.sa,
352 SOCKLEN(&name))) {
353 msyslog(LOG_ERR, "open_sockets: bind(sock6) failed: %m");
354 exit(1);
355 }
356 /* Register an NTP callback for recv/timeout */
357 ev_sock6 = event_new(base, sock6,
358 EV_TIMEOUT | EV_READ | EV_PERSIST,
359 &sock_cb, NULL);
360 if (NULL == ev_sock6) {
361 msyslog(LOG_ERR,
362 "open_sockets: event_new(base, sock6) failed!");
363 } else {
364 event_add(ev_sock6, &wakeup_tv);
365 }
366 }
367
368 return;
369 }
370
371
372 /*
373 ** handle_lookup
374 */
375 void
376 handle_lookup(
377 const char *name,
378 int flags
379 )
380 {
381 struct addrinfo hints; /* Local copy is OK */
382 struct dns_ctx *ctx;
383 char * name_copy;
384 size_t name_sz;
385 size_t octets;
386
387 TRACE(1, ("handle_lookup(%s,%#x)\n", name, flags));
388
389 ZERO(hints);
390 hints.ai_family = ai_fam_pref;
391 hints.ai_flags = AI_CANONNAME | Z_AI_NUMERICSERV;
392 /*
393 ** Unless we specify a socktype, we'll get at least two
394 ** entries for each address: one for TCP and one for
395 ** UDP. That's not what we want.
396 */
397 hints.ai_socktype = SOCK_DGRAM;
398 hints.ai_protocol = IPPROTO_UDP;
399
400 name_sz = 1 + strlen(name);
401 octets = sizeof(*ctx) + name_sz; // Space for a ctx and the name
402 ctx = emalloc_zero(octets); // ctx at ctx[0]
403 name_copy = (char *)(ctx + 1); // Put the name at ctx[1]
404 memcpy(name_copy, name, name_sz); // copy the name to ctx[1]
405 ctx->name = name_copy; // point to it...
406 ctx->flags = flags;
407 ctx->timeout = response_tv;
408 ctx->key = NULL;
409
410 /* The following should arguably be passed in... */
411 if (ENABLED_OPT(AUTHENTICATION)) {
412 ctx->key_id = OPT_VALUE_AUTHENTICATION;
413 get_key(ctx->key_id, &ctx->key);
414 if (NULL == ctx->key) {
415 fprintf(stderr, "%s: Authentication with keyID %d requested, but no matching keyID found in <%s>!\n",
416 progname, ctx->key_id, OPT_ARG(KEYFILE));
417 exit(1);
418 }
419 } else {
420 ctx->key_id = -1;
421 }
422
423 ++n_pending_dns;
424 getaddrinfo_sometime(name, "123", &hints, 0,
425 &sntp_name_resolved, ctx);
426 }
427
428
429 /*
430 ** DNS Callback:
431 ** - For each IP:
432 ** - - open a socket
433 ** - - increment n_pending_ntp
434 ** - - send a request if this is a Unicast callback
435 ** - - queue wait for response
436 ** - decrement n_pending_dns
437 */
438 void
439 sntp_name_resolved(
440 int rescode,
441 int gai_errno,
442 void * context,
443 const char * name,
444 const char * service,
445 const struct addrinfo * hints,
446 const struct addrinfo * addr
447 )
448 {
449 struct dns_ctx * dctx;
450 sent_pkt * spkt;
451 const struct addrinfo * ai;
452 SOCKET sock;
453 u_int xmt_delay_v4;
454 u_int xmt_delay_v6;
455 u_int xmt_delay;
456 size_t octets;
457
458 xmt_delay_v4 = 0;
459 xmt_delay_v6 = 0;
460 dctx = context;
461 if (rescode) {
462 #ifdef EAI_SYSTEM
463 if (EAI_SYSTEM == rescode) {
464 errno = gai_errno;
465 mfprintf(stderr, "%s lookup error %m\n",
466 dctx->name);
467 } else
468 #endif
469 fprintf(stderr, "%s lookup error %s\n",
470 dctx->name, gai_strerror(rescode));
471 } else {
472 TRACE(3, ("%s [%s]\n", dctx->name,
473 (addr->ai_canonname != NULL)
474 ? addr->ai_canonname
475 : ""));
476
477 for (ai = addr; ai != NULL; ai = ai->ai_next) {
478
479 if (check_kod(ai))
480 continue;
481
482 switch (ai->ai_family) {
483
484 case AF_INET:
485 sock = sock4;
486 xmt_delay = xmt_delay_v4;
487 xmt_delay_v4++;
488 break;
489
490 case AF_INET6:
491 if (!ipv6_works)
492 continue;
493
494 sock = sock6;
495 xmt_delay = xmt_delay_v6;
496 xmt_delay_v6++;
497 break;
498
499 default:
500 msyslog(LOG_ERR, "sntp_name_resolved: unexpected ai_family: %d",
501 ai->ai_family);
502 exit(1);
503 break;
504 }
505
506 /*
507 ** We're waiting for a response for either unicast
508 ** or broadcast, so...
509 */
510 ++n_pending_ntp;
511
512 /* If this is for a unicast IP, queue a request */
513 if (dctx->flags & CTX_UCST) {
514 spkt = emalloc_zero(sizeof(*spkt));
515 spkt->dctx = dctx;
516 octets = min(ai->ai_addrlen, sizeof(spkt->addr));
517 memcpy(&spkt->addr, ai->ai_addr, octets);
518 queue_xmt(sock, dctx, spkt, xmt_delay);
519 }
520 }
521 }
522 /* n_pending_dns really should be >0 here... */
523 --n_pending_dns;
524 check_exit_conditions();
525 }
526
527
528 /*
529 ** queue_xmt
530 */
531 void
532 queue_xmt(
533 SOCKET sock,
534 struct dns_ctx * dctx,
535 sent_pkt * spkt,
536 u_int xmt_delay
537 )
538 {
539 sockaddr_u * dest;
540 sent_pkt ** pkt_listp;
541 sent_pkt * match;
542 xmt_ctx * xctx;
543 struct timeval start_cb;
544 struct timeval delay;
545
546 dest = &spkt->addr;
547 if (IS_IPV6(dest))
548 pkt_listp = &v6_pkts_list;
549 else
550 pkt_listp = &v4_pkts_list;
551
552 /* reject attempts to add address already listed */
553 for (match = *pkt_listp; match != NULL; match = match->link) {
554 if (ADDR_PORT_EQ(&spkt->addr, &match->addr)) {
555 if (strcasecmp(spkt->dctx->name,
556 match->dctx->name))
557 printf("%s %s duplicate address from %s ignored.\n",
558 sptoa(&match->addr),
559 match->dctx->name,
560 spkt->dctx->name);
561 else
562 printf("%s %s, duplicate address ignored.\n",
563 sptoa(&match->addr),
564 match->dctx->name);
565 dec_pending_ntp(spkt->dctx->name, &spkt->addr);
566 free(spkt);
567 return;
568 }
569 }
570
571 LINK_SLIST(*pkt_listp, spkt, link);
572
573 xctx = emalloc_zero(sizeof(*xctx));
574 xctx->sock = sock;
575 xctx->spkt = spkt;
576 gettimeofday_cached(base, &start_cb);
577 xctx->sched = start_cb.tv_sec + (2 * xmt_delay);
578
579 LINK_SORT_SLIST(xmt_q, xctx, (xctx->sched < L_S_S_CUR()->sched),
580 link, xmt_ctx);
581 if (xmt_q == xctx) {
582 /*
583 * The new entry is the first scheduled. The timer is
584 * either not active or is set for the second xmt
585 * context in xmt_q.
586 */
587 if (NULL == ev_xmt_timer)
588 ev_xmt_timer = event_new(base, INVALID_SOCKET,
589 EV_TIMEOUT,
590 &xmt_timer_cb, NULL);
591 if (NULL == ev_xmt_timer) {
592 msyslog(LOG_ERR,
593 "queue_xmt: event_new(base, -1, EV_TIMEOUT) failed!");
594 exit(1);
595 }
596 ZERO(delay);
597 if (xctx->sched > start_cb.tv_sec)
598 delay.tv_sec = xctx->sched - start_cb.tv_sec;
599 event_add(ev_xmt_timer, &delay);
600 TRACE(2, ("queue_xmt: xmt timer for %u usec\n",
601 (u_int)delay.tv_usec));
602 }
603 }
604
605
606 /*
607 ** xmt_timer_cb
608 */
609 void
610 xmt_timer_cb(
611 evutil_socket_t fd,
612 short what,
613 void * ctx
614 )
615 {
616 struct timeval start_cb;
617 struct timeval delay;
618 xmt_ctx * x;
619
620 UNUSED_ARG(fd);
621 UNUSED_ARG(ctx);
622 DEBUG_INSIST(EV_TIMEOUT == what);
623
624 if (NULL == xmt_q || shutting_down)
625 return;
626 gettimeofday_cached(base, &start_cb);
627 if (xmt_q->sched <= start_cb.tv_sec) {
628 UNLINK_HEAD_SLIST(x, xmt_q, link);
629 TRACE(2, ("xmt_timer_cb: at .%6.6u -> %s\n",
630 (u_int)start_cb.tv_usec, stoa(&x->spkt->addr)));
631 xmt(x);
632 free(x);
633 if (NULL == xmt_q)
634 return;
635 }
636 if (xmt_q->sched <= start_cb.tv_sec) {
637 event_add(ev_xmt_timer, &gap);
638 TRACE(2, ("xmt_timer_cb: at .%6.6u gap %6.6u\n",
639 (u_int)start_cb.tv_usec,
640 (u_int)gap.tv_usec));
641 } else {
642 delay.tv_sec = xmt_q->sched - start_cb.tv_sec;
643 delay.tv_usec = 0;
644 event_add(ev_xmt_timer, &delay);
645 TRACE(2, ("xmt_timer_cb: at .%6.6u next %ld seconds\n",
646 (u_int)start_cb.tv_usec,
647 (long)delay.tv_sec));
648 }
649 }
650
651
652 /*
653 ** xmt()
654 */
655 void
656 xmt(
657 xmt_ctx * xctx
658 )
659 {
660 SOCKET sock = xctx->sock;
661 struct dns_ctx *dctx = xctx->spkt->dctx;
662 sent_pkt * spkt = xctx->spkt;
663 sockaddr_u * dst = &spkt->addr;
664 struct timeval tv_xmt;
665 struct pkt x_pkt;
666 size_t pkt_len;
667 int sent;
668
669 if (0 != gettimeofday(&tv_xmt, NULL)) {
670 msyslog(LOG_ERR,
671 "xmt: gettimeofday() failed: %m");
672 exit(1);
673 }
674 tv_xmt.tv_sec += JAN_1970;
675
676 pkt_len = generate_pkt(&x_pkt, &tv_xmt, dctx->key_id,
677 dctx->key);
678
679 sent = sendpkt(sock, dst, &x_pkt, pkt_len);
680 if (sent) {
681 /* Save the packet we sent... */
682 memcpy(&spkt->x_pkt, &x_pkt, min(sizeof(spkt->x_pkt),
683 pkt_len));
684 spkt->stime = tv_xmt.tv_sec - JAN_1970;
685
686 TRACE(2, ("xmt: %lx.%6.6u %s %s\n", (u_long)tv_xmt.tv_sec,
687 (u_int)tv_xmt.tv_usec, dctx->name, stoa(dst)));
688 } else {
689 dec_pending_ntp(dctx->name, dst);
690 }
691
692 return;
693 }
694
695
696 /*
697 * timeout_queries() -- give up on unrequited NTP queries
698 */
699 void
700 timeout_queries(void)
701 {
702 struct timeval start_cb;
703 u_int idx;
704 sent_pkt * head;
705 sent_pkt * spkt;
706 sent_pkt * spkt_next;
707 long age;
708 int didsomething = 0;
709
710 TRACE(3, ("timeout_queries: called to check %u items\n",
711 (unsigned)COUNTOF(fam_listheads)));
712
713 gettimeofday_cached(base, &start_cb);
714 for (idx = 0; idx < COUNTOF(fam_listheads); idx++) {
715 head = fam_listheads[idx];
716 for (spkt = head; spkt != NULL; spkt = spkt_next) {
717 char xcst;
718
719 didsomething = 1;
720 switch (spkt->dctx->flags & CTX_xCST) {
721 case CTX_BCST:
722 xcst = 'B';
723 break;
724
725 case CTX_UCST:
726 xcst = 'U';
727 break;
728
729 default:
730 INSIST(!"spkt->dctx->flags neither UCST nor BCST");
731 break;
732 }
733
734 spkt_next = spkt->link;
735 if (0 == spkt->stime || spkt->done)
736 continue;
737 age = start_cb.tv_sec - spkt->stime;
738 TRACE(3, ("%s %s %cCST age %ld\n",
739 stoa(&spkt->addr),
740 spkt->dctx->name, xcst, age));
741 if (age > response_timeout)
742 timeout_query(spkt);
743 }
744 }
745 // Do we care about didsomething?
746 TRACE(3, ("timeout_queries: didsomething is %d, age is %ld\n",
747 didsomething, (long) (start_cb.tv_sec - start_tv.tv_sec)));
748 if (start_cb.tv_sec - start_tv.tv_sec > response_timeout) {
749 TRACE(3, ("timeout_queries: bail!\n"));
750 event_base_loopexit(base, NULL);
751 shutting_down = TRUE;
752 }
753 }
754
755
756 void dec_pending_ntp(
757 const char * name,
758 sockaddr_u * server
759 )
760 {
761 if (n_pending_ntp > 0) {
762 --n_pending_ntp;
763 check_exit_conditions();
764 } else {
765 INSIST(0 == n_pending_ntp);
766 TRACE(1, ("n_pending_ntp was zero before decrement for %s\n",
767 hostnameaddr(name, server)));
768 }
769 }
770
771
772 void timeout_query(
773 sent_pkt * spkt
774 )
775 {
776 sockaddr_u * server;
777 char xcst;
778
779
780 switch (spkt->dctx->flags & CTX_xCST) {
781 case CTX_BCST:
782 xcst = 'B';
783 break;
784
785 case CTX_UCST:
786 xcst = 'U';
787 break;
788
789 default:
790 INSIST(!"spkt->dctx->flags neither UCST nor BCST");
791 break;
792 }
793 spkt->done = TRUE;
794 server = &spkt->addr;
795 msyslog(LOG_INFO, "%s no %cCST response after %d seconds",
796 hostnameaddr(spkt->dctx->name, server), xcst,
797 response_timeout);
798 dec_pending_ntp(spkt->dctx->name, server);
799 return;
800 }
801
802
803 /*
804 ** check_kod
805 */
806 int
807 check_kod(
808 const struct addrinfo * ai
809 )
810 {
811 char *hostname;
812 struct kod_entry *reason;
813
814 /* Is there a KoD on file for this address? */
815 hostname = addrinfo_to_str(ai);
816 TRACE(2, ("check_kod: checking <%s>\n", hostname));
817 if (search_entry(hostname, &reason)) {
818 printf("prior KoD for %s, skipping.\n",
819 hostname);
820 free(reason);
821 free(hostname);
822
823 return 1;
824 }
825 free(hostname);
826
827 return 0;
828 }
829
830
831 /*
832 ** Socket readable/timeout Callback:
833 ** Read in the packet
834 ** Unicast:
835 ** - close socket
836 ** - decrement n_pending_ntp
837 ** - If packet is good, set the time and "exit"
838 ** Broadcast:
839 ** - If packet is good, set the time and "exit"
840 */
841 void
842 sock_cb(
843 evutil_socket_t fd,
844 short what,
845 void *ptr
846 )
847 {
848 sockaddr_u sender;
849 sockaddr_u * psau;
850 sent_pkt ** p_pktlist;
851 sent_pkt * spkt;
852 int rpktl;
853 int rc;
854
855 INSIST(sock4 == fd || sock6 == fd);
856
857 TRACE(3, ("sock_cb: event on sock%s:%s%s%s%s\n",
858 (fd == sock6)
859 ? "6"
860 : "4",
861 (what & EV_TIMEOUT) ? " timeout" : "",
862 (what & EV_READ) ? " read" : "",
863 (what & EV_WRITE) ? " write" : "",
864 (what & EV_SIGNAL) ? " signal" : ""));
865
866 if (!(EV_READ & what)) {
867 if (EV_TIMEOUT & what)
868 timeout_queries();
869
870 return;
871 }
872
873 /* Read in the packet */
874 rpktl = recvdata(fd, &sender, &rbuf, sizeof(rbuf));
875 if (rpktl < 0) {
876 msyslog(LOG_DEBUG, "recvfrom error %m");
877 return;
878 }
879
880 if (sock6 == fd)
881 p_pktlist = &v6_pkts_list;
882 else
883 p_pktlist = &v4_pkts_list;
884
885 for (spkt = *p_pktlist; spkt != NULL; spkt = spkt->link) {
886 psau = &spkt->addr;
887 if (SOCK_EQ(&sender, psau))
888 break;
889 }
890 if (NULL == spkt) {
891 msyslog(LOG_WARNING,
892 "Packet from unexpected source %s dropped",
893 sptoa(&sender));
894 return;
895 }
896
897 TRACE(1, ("sock_cb: %s %s\n", spkt->dctx->name,
898 sptoa(&sender)));
899
900 rpktl = process_pkt(&r_pkt, &sender, rpktl, MODE_SERVER,
901 &spkt->x_pkt, "sock_cb");
902
903 TRACE(2, ("sock_cb: process_pkt returned %d\n", rpktl));
904
905 /* If this is a Unicast packet, one down ... */
906 if (!spkt->done && (CTX_UCST & spkt->dctx->flags)) {
907 dec_pending_ntp(spkt->dctx->name, &spkt->addr);
908 spkt->done = TRUE;
909 }
910
911
912 /* If the packet is good, set the time and we're all done */
913 rc = handle_pkt(rpktl, &r_pkt, &spkt->addr, spkt->dctx->name);
914 if (0 != rc)
915 TRACE(1, ("sock_cb: handle_pkt() returned %d\n", rc));
916 check_exit_conditions();
917 }
918
919
920 /*
921 * check_exit_conditions()
922 *
923 * If sntp has a reply, ask the event loop to stop after this round of
924 * callbacks, unless --wait was used.
925 */
926 void
927 check_exit_conditions(void)
928 {
929 if ((0 == n_pending_ntp && 0 == n_pending_dns) ||
930 (time_derived && !HAVE_OPT(WAIT))) {
931 event_base_loopexit(base, NULL);
932 shutting_down = TRUE;
933 } else {
934 TRACE(2, ("%d NTP and %d name queries pending\n",
935 n_pending_ntp, n_pending_dns));
936 }
937 }
938
939
940 /*
941 * sntp_addremove_fd() is invoked by the intres blocking worker code
942 * to read from a pipe, or to stop same.
943 */
944 void sntp_addremove_fd(
945 int fd,
946 int is_pipe,
947 int remove_it
948 )
949 {
950 u_int idx;
951 blocking_child *c;
952 struct event * ev;
953
954 #ifdef HAVE_SOCKETPAIR
955 if (is_pipe) {
956 /* sntp only asks for EV_FEATURE_FDS without HAVE_SOCKETPAIR */
957 msyslog(LOG_ERR, "fatal: pipes not supported on systems with socketpair()");
958 exit(1);
959 }
960 #endif
961
962 c = NULL;
963 for (idx = 0; idx < blocking_children_alloc; idx++) {
964 c = blocking_children[idx];
965 if (NULL == c)
966 continue;
967 if (fd == c->resp_read_pipe)
968 break;
969 }
970 if (idx == blocking_children_alloc)
971 return;
972
973 if (remove_it) {
974 ev = c->resp_read_ctx;
975 c->resp_read_ctx = NULL;
976 event_del(ev);
977 event_free(ev);
978
979 return;
980 }
981
982 ev = event_new(base, fd, EV_READ | EV_PERSIST,
983 &worker_resp_cb, c);
984 if (NULL == ev) {
985 msyslog(LOG_ERR,
986 "sntp_addremove_fd: event_new(base, fd) failed!");
987 return;
988 }
989 c->resp_read_ctx = ev;
990 event_add(ev, NULL);
991 }
992
993
994 /* called by forked intres child to close open descriptors */
995 #ifdef WORK_FORK
996 void
997 kill_asyncio(
998 int startfd
999 )
1000 {
1001 if (INVALID_SOCKET != sock4) {
1002 closesocket(sock4);
1003 sock4 = INVALID_SOCKET;
1004 }
1005 if (INVALID_SOCKET != sock6) {
1006 closesocket(sock6);
1007 sock6 = INVALID_SOCKET;
1008 }
1009 if (INVALID_SOCKET != bsock4) {
1010 closesocket(sock4);
1011 sock4 = INVALID_SOCKET;
1012 }
1013 if (INVALID_SOCKET != bsock6) {
1014 closesocket(sock6);
1015 sock6 = INVALID_SOCKET;
1016 }
1017 }
1018 #endif
1019
1020
1021 /*
1022 * worker_resp_cb() is invoked when resp_read_pipe is readable.
1023 */
1024 void
1025 worker_resp_cb(
1026 evutil_socket_t fd,
1027 short what,
1028 void * ctx /* blocking_child * */
1029 )
1030 {
1031 blocking_child * c;
1032
1033 DEBUG_INSIST(EV_READ & what);
1034 c = ctx;
1035 DEBUG_INSIST(fd == c->resp_read_pipe);
1036 process_blocking_resp(c);
1037 }
1038
1039
1040 /*
1041 * intres_timeout_req(s) is invoked in the parent to schedule an idle
1042 * timeout to fire in s seconds, if not reset earlier by a call to
1043 * intres_timeout_req(0), which clears any pending timeout. When the
1044 * timeout expires, worker_idle_timer_fired() is invoked (again, in the
1045 * parent).
1046 *
1047 * sntp and ntpd each provide implementations adapted to their timers.
1048 */
1049 void
1050 intres_timeout_req(
1051 u_int seconds /* 0 cancels */
1052 )
1053 {
1054 struct timeval tv_to;
1055
1056 if (NULL == ev_worker_timeout) {
1057 ev_worker_timeout = event_new(base, -1,
1058 EV_TIMEOUT | EV_PERSIST,
1059 &worker_timeout, NULL);
1060 DEBUG_INSIST(NULL != ev_worker_timeout);
1061 } else {
1062 event_del(ev_worker_timeout);
1063 }
1064 if (0 == seconds)
1065 return;
1066 tv_to.tv_sec = seconds;
1067 tv_to.tv_usec = 0;
1068 event_add(ev_worker_timeout, &tv_to);
1069 }
1070
1071
1072 void
1073 worker_timeout(
1074 evutil_socket_t fd,
1075 short what,
1076 void * ctx
1077 )
1078 {
1079 UNUSED_ARG(fd);
1080 UNUSED_ARG(ctx);
1081
1082 DEBUG_REQUIRE(EV_TIMEOUT & what);
1083 worker_idle_timer_fired();
1084 }
1085
1086
1087 void
1088 sntp_libevent_log_cb(
1089 int severity,
1090 const char * msg
1091 )
1092 {
1093 int level;
1094
1095 switch (severity) {
1096
1097 default:
1098 case _EVENT_LOG_DEBUG:
1099 level = LOG_DEBUG;
1100 break;
1101
1102 case _EVENT_LOG_MSG:
1103 level = LOG_NOTICE;
1104 break;
1105
1106 case _EVENT_LOG_WARN:
1107 level = LOG_WARNING;
1108 break;
1109
1110 case _EVENT_LOG_ERR:
1111 level = LOG_ERR;
1112 break;
1113 }
1114
1115 msyslog(level, "%s", msg);
1116 }
1117
1118
1119 int
1120 generate_pkt (
1121 struct pkt *x_pkt,
1122 const struct timeval *tv_xmt,
1123 int key_id,
1124 struct key *pkt_key
1125 )
1126 {
1127 l_fp xmt_fp;
1128 int pkt_len;
1129 int mac_size;
1130
1131 pkt_len = LEN_PKT_NOMAC;
1132 ZERO(*x_pkt);
1133 TVTOTS(tv_xmt, &xmt_fp);
1134 HTONL_FP(&xmt_fp, &x_pkt->xmt);
1135 x_pkt->stratum = STRATUM_TO_PKT(STRATUM_UNSPEC);
1136 x_pkt->ppoll = 8;
1137 /* FIXME! Modus broadcast + adr. check -> bdr. pkt */
1138 set_li_vn_mode(x_pkt, LEAP_NOTINSYNC, ntpver, 3);
1139 if (debug > 0) {
1140 printf("generate_pkt: key_id %d, key pointer %p\n", key_id, pkt_key);
1141 }
1142 if (pkt_key != NULL) {
1143 x_pkt->exten[0] = htonl(key_id);
1144 mac_size = make_mac(x_pkt, pkt_len, MAX_MDG_LEN,
1145 pkt_key, (char *)&x_pkt->exten[1]);
1146 if (mac_size > 0)
1147 pkt_len += mac_size + KEY_MAC_LEN;
1148 #ifdef DEBUG
1149 if (debug > 0) {
1150 printf("generate_pkt: mac_size is %d\n", mac_size);
1151 }
1152 #endif
1153
1154 }
1155 return pkt_len;
1156 }
1157
1158
1159 int
1160 handle_pkt(
1161 int rpktl,
1162 struct pkt * rpkt,
1163 sockaddr_u * host,
1164 const char * hostname
1165 )
1166 {
1167 char disptxt[32];
1168 const char * addrtxt;
1169 struct timeval tv_dst;
1170 int cnt;
1171 int sw_case;
1172 int digits;
1173 int stratum;
1174 char * ref;
1175 char * ts_str;
1176 const char * leaptxt;
1177 double offset;
1178 double precision;
1179 double synch_distance;
1180 char * p_SNTP_PRETEND_TIME;
1181 time_t pretend_time;
1182 #if SIZEOF_TIME_T == 8
1183 long long ll;
1184 #else
1185 long l;
1186 #endif
1187
1188 ts_str = NULL;
1189
1190 if (rpktl > 0)
1191 sw_case = 1;
1192 else
1193 sw_case = rpktl;
1194
1195 switch (sw_case) {
1196
1197 case SERVER_UNUSEABLE:
1198 return -1;
1199 break;
1200
1201 case PACKET_UNUSEABLE:
1202 break;
1203
1204 case SERVER_AUTH_FAIL:
1205 break;
1206
1207 case KOD_DEMOBILIZE:
1208 /* Received a DENY or RESTR KOD packet */
1209 addrtxt = stoa(host);
1210 ref = (char *)&rpkt->refid;
1211 add_entry(addrtxt, ref);
1212 msyslog(LOG_WARNING, "KOD code %c%c%c%c from %s %s",
1213 ref[0], ref[1], ref[2], ref[3], addrtxt, hostname);
1214 break;
1215
1216 case KOD_RATE:
1217 /*
1218 ** Hmm...
1219 ** We should probably call add_entry() with an
1220 ** expiration timestamp of several seconds in the future,
1221 ** and back-off even more if we get more RATE responses.
1222 */
1223 break;
1224
1225 case 1:
1226 TRACE(3, ("handle_pkt: %d bytes from %s %s\n",
1227 rpktl, stoa(host), hostname));
1228
1229 gettimeofday_cached(base, &tv_dst);
1230
1231 p_SNTP_PRETEND_TIME = getenv("SNTP_PRETEND_TIME");
1232 if (p_SNTP_PRETEND_TIME) {
1233 pretend_time = 0;
1234 #if SIZEOF_TIME_T == 4
1235 if (1 == sscanf(p_SNTP_PRETEND_TIME, "%ld", &l))
1236 pretend_time = (time_t)l;
1237 #elif SIZEOF_TIME_T == 8
1238 if (1 == sscanf(p_SNTP_PRETEND_TIME, "%lld", &ll))
1239 pretend_time = (time_t)ll;
1240 #else
1241 # include "GRONK: unexpected value for SIZEOF_TIME_T"
1242 #endif
1243 if (0 != pretend_time)
1244 tv_dst.tv_sec = pretend_time;
1245 }
1246
1247 offset_calculation(rpkt, rpktl, &tv_dst, &offset,
1248 &precision, &synch_distance);
1249 time_derived = TRUE;
1250
1251 for (digits = 0; (precision *= 10.) < 1.; ++digits)
1252 /* empty */ ;
1253 if (digits > 6)
1254 digits = 6;
1255
1256 ts_str = tv_to_str(&tv_dst);
1257 stratum = rpkt->stratum;
1258 if (0 == stratum)
1259 stratum = 16;
1260
1261 if (synch_distance > 0.) {
1262 cnt = snprintf(disptxt, sizeof(disptxt),
1263 " +/- %f", synch_distance);
1264 if ((size_t)cnt >= sizeof(disptxt))
1265 snprintf(disptxt, sizeof(disptxt),
1266 "ERROR %d >= %d", cnt,
1267 (int)sizeof(disptxt));
1268 } else {
1269 disptxt[0] = '\0';
1270 }
1271
1272 switch (PKT_LEAP(rpkt->li_vn_mode)) {
1273 case LEAP_NOWARNING:
1274 leaptxt = "no-leap";
1275 break;
1276 case LEAP_ADDSECOND:
1277 leaptxt = "add-leap";
1278 break;
1279 case LEAP_DELSECOND:
1280 leaptxt = "del-leap";
1281 break;
1282 case LEAP_NOTINSYNC:
1283 leaptxt = "unsync";
1284 break;
1285 default:
1286 leaptxt = "LEAP-ERROR";
1287 break;
1288 }
1289
1290 msyslog(LOG_INFO, "%s %+.*f%s %s s%d %s%s", ts_str,
1291 digits, offset, disptxt,
1292 hostnameaddr(hostname, host), stratum,
1293 leaptxt,
1294 (time_adjusted)
1295 ? " [excess]"
1296 : "");
1297 free(ts_str);
1298
1299 if (p_SNTP_PRETEND_TIME)
1300 return 0;
1301
1302 if (!time_adjusted &&
1303 (ENABLED_OPT(STEP) || ENABLED_OPT(SLEW)))
1304 return set_time(offset);
1305
1306 return EX_OK;
1307 }
1308
1309 return 1;
1310 }
1311
1312
1313 void
1314 offset_calculation(
1315 struct pkt *rpkt,
1316 int rpktl,
1317 struct timeval *tv_dst,
1318 double *offset,
1319 double *precision,
1320 double *synch_distance
1321 )
1322 {
1323 l_fp p_rec, p_xmt, p_ref, p_org, tmp, dst;
1324 u_fp p_rdly, p_rdsp;
1325 double t21, t34, delta;
1326
1327 /* Convert timestamps from network to host byte order */
1328 p_rdly = NTOHS_FP(rpkt->rootdelay);
1329 p_rdsp = NTOHS_FP(rpkt->rootdisp);
1330 NTOHL_FP(&rpkt->reftime, &p_ref);
1331 NTOHL_FP(&rpkt->org, &p_org);
1332 NTOHL_FP(&rpkt->rec, &p_rec);
1333 NTOHL_FP(&rpkt->xmt, &p_xmt);
1334
1335 *precision = LOGTOD(rpkt->precision);
1336
1337 TRACE(3, ("offset_calculation: LOGTOD(rpkt->precision): %f\n", *precision));
1338
1339 /* Compute offset etc. */
1340 tmp = p_rec;
1341 L_SUB(&tmp, &p_org);
1342 LFPTOD(&tmp, t21);
1343 TVTOTS(tv_dst, &dst);
1344 dst.l_ui += JAN_1970;
1345 tmp = p_xmt;
1346 L_SUB(&tmp, &dst);
1347 LFPTOD(&tmp, t34);
1348 *offset = (t21 + t34) / 2.;
1349 delta = t21 - t34;
1350
1351 // synch_distance is:
1352 // (peer->delay + peer->rootdelay) / 2 + peer->disp
1353 // + peer->rootdisp + clock_phi * (current_time - peer->update)
1354 // + peer->jitter;
1355 //
1356 // and peer->delay = fabs(peer->offset - p_offset) * 2;
1357 // and peer->offset needs history, so we're left with
1358 // p_offset = (t21 + t34) / 2.;
1359 // peer->disp = 0; (we have no history to augment this)
1360 // clock_phi = 15e-6;
1361 // peer->jitter = LOGTOD(sys_precision); (we have no history to augment this)
1362 // and ntp_proto.c:set_sys_tick_precision() should get us sys_precision.
1363 //
1364 // so our answer seems to be:
1365 //
1366 // (fabs(t21 + t34) + peer->rootdelay) / 3.
1367 // + 0 (peer->disp)
1368 // + peer->rootdisp
1369 // + 15e-6 (clock_phi)
1370 // + LOGTOD(sys_precision)
1371
1372 INSIST( FPTOD(p_rdly) >= 0. );
1373 #if 1
1374 *synch_distance = (fabs(t21 + t34) + FPTOD(p_rdly)) / 3.
1375 + 0.
1376 + FPTOD(p_rdsp)
1377 + 15e-6
1378 + 0. /* LOGTOD(sys_precision) when we can get it */
1379 ;
1380 INSIST( *synch_distance >= 0. );
1381 #else
1382 *synch_distance = (FPTOD(p_rdly) + FPTOD(p_rdsp))/2.0;
1383 #endif
1384
1385 #ifdef DEBUG
1386 if (debug > 3) {
1387 printf("sntp rootdelay: %f\n", FPTOD(p_rdly));
1388 printf("sntp rootdisp: %f\n", FPTOD(p_rdsp));
1389 printf("sntp syncdist: %f\n", *synch_distance);
1390
1391 pkt_output(rpkt, rpktl, stdout);
1392
1393 printf("sntp offset_calculation: rpkt->reftime:\n");
1394 l_fp_output(&p_ref, stdout);
1395 printf("sntp offset_calculation: rpkt->org:\n");
1396 l_fp_output(&p_org, stdout);
1397 printf("sntp offset_calculation: rpkt->rec:\n");
1398 l_fp_output(&p_rec, stdout);
1399 printf("sntp offset_calculation: rpkt->xmt:\n");
1400 l_fp_output(&p_xmt, stdout);
1401 }
1402 #endif
1403
1404 TRACE(3, ("sntp offset_calculation:\trec - org t21: %.6f\n"
1405 "\txmt - dst t34: %.6f\tdelta: %.6f\toffset: %.6f\n",
1406 t21, t34, delta, *offset));
1407
1408 return;
1409 }
1410
1411
1412
1413 /* Compute the 8 bits for li_vn_mode */
1414 void
1415 set_li_vn_mode (
1416 struct pkt *spkt,
1417 char leap,
1418 char version,
1419 char mode
1420 )
1421 {
1422 if (leap > 3) {
1423 msyslog(LOG_DEBUG, "set_li_vn_mode: leap > 3, using max. 3");
1424 leap = 3;
1425 }
1426
1427 if ((unsigned char)version > 7) {
1428 msyslog(LOG_DEBUG, "set_li_vn_mode: version < 0 or > 7, using 4");
1429 version = 4;
1430 }
1431
1432 if (mode > 7) {
1433 msyslog(LOG_DEBUG, "set_li_vn_mode: mode > 7, using client mode 3");
1434 mode = 3;
1435 }
1436
1437 spkt->li_vn_mode = leap << 6;
1438 spkt->li_vn_mode |= version << 3;
1439 spkt->li_vn_mode |= mode;
1440 }
1441
1442
1443 /*
1444 ** set_time applies 'offset' to the local clock.
1445 */
1446 int
1447 set_time(
1448 double offset
1449 )
1450 {
1451 int rc;
1452
1453 if (time_adjusted)
1454 return EX_OK;
1455
1456 /*
1457 ** If we can step but we cannot slew, then step.
1458 ** If we can step or slew and and |offset| > steplimit, then step.
1459 */
1460 if (ENABLED_OPT(STEP) &&
1461 ( !ENABLED_OPT(SLEW)
1462 || (ENABLED_OPT(SLEW) && (fabs(offset) > steplimit))
1463 )) {
1464 rc = step_systime(offset);
1465
1466 /* If there was a problem, can we rely on errno? */
1467 if (1 == rc)
1468 time_adjusted = TRUE;
1469 return (time_adjusted)
1470 ? EX_OK
1471 : 1;
1472 /*
1473 ** In case of error, what should we use?
1474 ** EX_UNAVAILABLE?
1475 ** EX_OSERR?
1476 ** EX_NOPERM?
1477 */
1478 }
1479
1480 if (ENABLED_OPT(SLEW)) {
1481 rc = adj_systime(offset);
1482
1483 /* If there was a problem, can we rely on errno? */
1484 if (1 == rc)
1485 time_adjusted = TRUE;
1486 return (time_adjusted)
1487 ? EX_OK
1488 : 1;
1489 /*
1490 ** In case of error, what should we use?
1491 ** EX_UNAVAILABLE?
1492 ** EX_OSERR?
1493 ** EX_NOPERM?
1494 */
1495 }
1496
1497 return EX_SOFTWARE;
1498 }
1499
1500
1501 int
1502 libevent_version_ok(void)
1503 {
1504 ev_uint32_t v_compile_maj;
1505 ev_uint32_t v_run_maj;
1506
1507 v_compile_maj = LIBEVENT_VERSION_NUMBER & 0xffff0000;
1508 v_run_maj = event_get_version_number() & 0xffff0000;
1509 if (v_compile_maj != v_run_maj) {
1510 fprintf(stderr,
1511 "Incompatible libevent versions: have %s, built with %s\n",
1512 event_get_version(),
1513 LIBEVENT_VERSION);
1514 return 0;
1515 }
1516 return 1;
1517 }
1518
1519 /*
1520 * gettimeofday_cached()
1521 *
1522 * Clones the event_base_gettimeofday_cached() interface but ensures the
1523 * times are always on the gettimeofday() 1970 scale. Older libevent 2
1524 * sometimes used gettimeofday(), sometimes the since-system-start
1525 * clock_gettime(CLOCK_MONOTONIC), depending on the platform.
1526 *
1527 * It is not cleanly possible to tell which timescale older libevent is
1528 * using.
1529 *
1530 * The strategy involves 1 hour thresholds chosen to be far longer than
1531 * the duration of a round of libevent callbacks, which share a cached
1532 * start-of-round time. First compare the last cached time with the
1533 * current gettimeofday() time. If they are within one hour, libevent
1534 * is using the proper timescale so leave the offset 0. Otherwise,
1535 * compare libevent's cached time and the current time on the monotonic
1536 * scale. If they are within an hour, libevent is using the monotonic
1537 * scale so calculate the offset to add to such times to bring them to
1538 * gettimeofday()'s scale.
1539 */
1540 int
1541 gettimeofday_cached(
1542 struct event_base * b,
1543 struct timeval * caller_tv
1544 )
1545 {
1546 #if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1547 static struct event_base * cached_b;
1548 static struct timeval cached;
1549 static struct timeval adj_cached;
1550 static struct timeval offset;
1551 static int offset_ready;
1552 struct timeval latest;
1553 struct timeval systemt;
1554 struct timespec ts;
1555 struct timeval mono;
1556 struct timeval diff;
1557 int cgt_rc;
1558 int gtod_rc;
1559
1560 event_base_gettimeofday_cached(b, &latest);
1561 if (b == cached_b &&
1562 !memcmp(&latest, &cached, sizeof(latest))) {
1563 *caller_tv = adj_cached;
1564 return 0;
1565 }
1566 cached = latest;
1567 cached_b = b;
1568 if (!offset_ready) {
1569 cgt_rc = clock_gettime(CLOCK_MONOTONIC, &ts);
1570 gtod_rc = gettimeofday(&systemt, NULL);
1571 if (0 != gtod_rc) {
1572 msyslog(LOG_ERR,
1573 "%s: gettimeofday() error %m",
1574 progname);
1575 exit(1);
1576 }
1577 diff = sub_tval(systemt, latest);
1578 if (debug > 1)
1579 printf("system minus cached %+ld.%06ld\n",
1580 (long)diff.tv_sec, (long)diff.tv_usec);
1581 if (0 != cgt_rc || labs((long)diff.tv_sec) < 3600) {
1582 /*
1583 * Either use_monotonic == 0, or this libevent
1584 * has been repaired. Leave offset at zero.
1585 */
1586 } else {
1587 mono.tv_sec = ts.tv_sec;
1588 mono.tv_usec = ts.tv_nsec / 1000;
1589 diff = sub_tval(latest, mono);
1590 if (debug > 1)
1591 printf("cached minus monotonic %+ld.%06ld\n",
1592 (long)diff.tv_sec, (long)diff.tv_usec);
1593 if (labs((long)diff.tv_sec) < 3600) {
1594 /* older libevent2 using monotonic */
1595 offset = sub_tval(systemt, mono);
1596 TRACE(1, ("%s: Offsetting libevent CLOCK_MONOTONIC times by %+ld.%06ld\n",
1597 "gettimeofday_cached",
1598 (long)offset.tv_sec,
1599 (long)offset.tv_usec));
1600 }
1601 }
1602 offset_ready = TRUE;
1603 }
1604 adj_cached = add_tval(cached, offset);
1605 *caller_tv = adj_cached;
1606
1607 return 0;
1608 #else
1609 return event_base_gettimeofday_cached(b, caller_tv);
1610 #endif
1611 }
1612
1613