main.c revision 1.19 1 /* $NetBSD: main.c,v 1.19 2024/08/18 20:47:20 christos Exp $ */
2
3 #include <config.h>
4
5 #include <event2/util.h>
6 #include <event2/event.h>
7
8 #include "ntp_workimpl.h"
9 #ifdef WORK_THREAD
10 # include <event2/thread.h>
11 #endif
12
13 #ifdef HAVE_SYSEXITS_H
14 # include <sysexits.h>
15 #endif
16
17 #include "main.h"
18 #include "ntp_libopts.h"
19 #include "kod_management.h"
20 #include "networking.h"
21 #include "utilities.h"
22 #include "log.h"
23 #include "libntp.h"
24
25 extern const char *progname;
26
27 int shutting_down;
28 int time_derived;
29 int time_adjusted;
30 int n_pending_dns = 0;
31 int n_pending_ntp = 0;
32 int ai_fam_pref = AF_UNSPEC;
33 int ntpver = 4;
34 double steplimit = -1;
35 SOCKET sock4 = -1; /* Socket for IPv4 */
36 SOCKET sock6 = -1; /* Socket for IPv6 */
37 /*
38 ** BCAST *must* listen on port 123 (by default), so we can only
39 ** use the UCST sockets (above) if they too are using port 123
40 */
41 SOCKET bsock4 = -1; /* Broadcast Socket for IPv4 */
42 SOCKET bsock6 = -1; /* Broadcast Socket for IPv6 */
43 struct event_base *base;
44 struct event *ev_sock4;
45 struct event *ev_sock6;
46 struct event *ev_worker_timeout;
47 struct event *ev_xmt_timer;
48
49 struct dns_ctx {
50 const char * name;
51 int flags;
52 #define CTX_BCST 0x0001
53 #define CTX_UCST 0x0002
54 #define CTX_xCST 0x0003
55 #define CTX_CONC 0x0004
56 #define CTX_unused 0xfffd
57 int key_id;
58 struct timeval timeout;
59 struct key * key;
60 };
61
62 typedef struct sent_pkt_tag sent_pkt;
63 struct sent_pkt_tag {
64 sent_pkt * link;
65 struct dns_ctx * dctx;
66 sockaddr_u addr;
67 time_t stime;
68 int done;
69 struct pkt x_pkt;
70 };
71
72 typedef struct xmt_ctx_tag xmt_ctx;
73 struct xmt_ctx_tag {
74 xmt_ctx * link;
75 SOCKET sock;
76 time_t sched;
77 sent_pkt * spkt;
78 };
79
80 struct timeval gap;
81 xmt_ctx * xmt_q;
82 struct key * keys = NULL;
83 int response_timeout;
84 struct timeval response_tv;
85 struct timeval start_tv;
86 /* check the timeout at least once per second */
87 struct timeval wakeup_tv = { 0, 888888 };
88
89 sent_pkt * fam_listheads[2];
90 #define v4_pkts_list (fam_listheads[0])
91 #define v6_pkts_list (fam_listheads[1])
92
93 static union {
94 struct pkt pkt;
95 char buf[LEN_PKT_NOMAC + NTP_MAXEXTEN + MAX_MAC_LEN];
96 } rbuf;
97
98 #define r_pkt rbuf.pkt
99
100 #ifdef HAVE_DROPROOT
101 int droproot; /* intres imports these */
102 int root_dropped;
103 #endif
104 u_long current_time; /* libntp/authkeys.c */
105
106 void open_sockets(void);
107 void handle_lookup(const char *name, int flags);
108 void sntp_addremove_fd(int fd, int is_pipe, int remove_it);
109 void worker_timeout(evutil_socket_t, short, void *);
110 void worker_resp_cb(evutil_socket_t, short, void *);
111 void sntp_name_resolved(int, int, void *, const char *, const char *,
112 const struct addrinfo *,
113 const struct addrinfo *);
114 void queue_xmt(SOCKET sock, struct dns_ctx *dctx, sent_pkt *spkt,
115 u_int xmt_delay);
116 void xmt_timer_cb(evutil_socket_t, short, void *ptr);
117 void xmt(xmt_ctx *xctx);
118 int check_kod(const struct addrinfo *ai);
119 void timeout_query(sent_pkt *);
120 void timeout_queries(void);
121 void sock_cb(evutil_socket_t, short, void *);
122 void check_exit_conditions(void);
123 void sntp_libevent_log_cb(int, const char *);
124 void set_li_vn_mode(struct pkt *spkt, char leap, char version, char mode);
125 int set_time(double offset);
126 void dec_pending_ntp(const char *, sockaddr_u *);
127 int libevent_version_ok(void);
128 int gettimeofday_cached(struct event_base *b, struct timeval *tv);
129
130
131 /*
132 * The actual main function.
133 */
134 int
135 sntp_main (
136 int argc,
137 char **argv,
138 const char *sntpVersion
139 )
140 {
141 int i;
142 int exitcode;
143 int optct;
144 struct event_config * evcfg;
145
146 /* Initialize logging system - sets up progname */
147 sntp_init_logging(argv[0]);
148
149 if (!libevent_version_ok())
150 exit(EX_SOFTWARE);
151
152 init_lib();
153 init_auth();
154
155 optct = ntpOptionProcess(&sntpOptions, argc, argv);
156 argc -= optct;
157 argv += optct;
158
159
160 debug = OPT_VALUE_SET_DEBUG_LEVEL;
161
162 TRACE(2, ("init_lib() done, %s%s\n",
163 (ipv4_works)
164 ? "ipv4_works "
165 : "",
166 (ipv6_works)
167 ? "ipv6_works "
168 : ""));
169 ntpver = OPT_VALUE_NTPVERSION;
170 steplimit = OPT_VALUE_STEPLIMIT / 1e3;
171 gap.tv_usec = max(0, OPT_VALUE_GAP * 1000);
172 gap.tv_usec = min(gap.tv_usec, 999999);
173
174 if (HAVE_OPT(LOGFILE))
175 open_logfile(OPT_ARG(LOGFILE));
176
177 msyslog(LOG_INFO, "%s", sntpVersion);
178
179 if (0 == argc && !HAVE_OPT(BROADCAST) && !HAVE_OPT(CONCURRENT)) {
180 printf("%s: Must supply at least one of -b hostname, -c hostname, or hostname.\n",
181 progname);
182 exit(EX_USAGE);
183 }
184
185
186 /*
187 ** Eventually, we probably want:
188 ** - separate bcst and ucst timeouts (why?)
189 ** - multiple --timeout values in the commandline
190 */
191
192 response_timeout = OPT_VALUE_TIMEOUT;
193 response_tv.tv_sec = response_timeout;
194 response_tv.tv_usec = 0;
195
196 /* IPv6 available? */
197 if (isc_net_probeipv6() != ISC_R_SUCCESS) {
198 ai_fam_pref = AF_INET;
199 TRACE(1, ("No ipv6 support available, forcing ipv4\n"));
200 } else {
201 /* Check for options -4 and -6 */
202 if (HAVE_OPT(IPV4))
203 ai_fam_pref = AF_INET;
204 else if (HAVE_OPT(IPV6))
205 ai_fam_pref = AF_INET6;
206 }
207
208 /* TODO: Parse config file if declared */
209
210 /*
211 ** Init the KOD system.
212 ** For embedded systems with no writable filesystem,
213 ** -K /dev/null can be used to disable KoD storage.
214 */
215 kod_init_kod_db(OPT_ARG(KOD), FALSE);
216
217 /* HMS: Check and see what happens if KEYFILE doesn't exist */
218 auth_init(OPT_ARG(KEYFILE), &keys);
219
220 /*
221 ** Considering employing a variable that prevents functions of doing
222 ** anything until everything is initialized properly
223 **
224 ** HMS: What exactly does the above mean?
225 */
226 event_set_log_callback(&sntp_libevent_log_cb);
227 if (debug > 0)
228 event_enable_debug_mode();
229 #ifdef WORK_THREAD
230 evthread_use_pthreads();
231 /* we use libevent from main thread only, locks should be academic */
232 if (debug > 0)
233 evthread_enable_lock_debuging();
234 #endif
235 evcfg = event_config_new();
236 if (NULL == evcfg) {
237 printf("%s: event_config_new() failed!\n", progname);
238 return -1;
239 }
240 #ifndef HAVE_SOCKETPAIR
241 event_config_require_features(evcfg, EV_FEATURE_FDS);
242 #endif
243 /* all libevent calls are from main thread */
244 /* event_config_set_flag(evcfg, EVENT_BASE_FLAG_NOLOCK); */
245 base = event_base_new_with_config(evcfg);
246 event_config_free(evcfg);
247 if (NULL == base) {
248 printf("%s: event_base_new() failed!\n", progname);
249 return -1;
250 }
251
252 /* wire into intres resolver */
253 worker_per_query = TRUE;
254 addremove_io_fd = &sntp_addremove_fd;
255
256 open_sockets();
257
258 if (HAVE_OPT(BROADCAST)) {
259 int cn = STACKCT_OPT( BROADCAST );
260 const char ** cp = STACKLST_OPT( BROADCAST );
261
262 while (cn-- > 0) {
263 handle_lookup(*cp, CTX_BCST);
264 cp++;
265 }
266 }
267
268 if (HAVE_OPT(CONCURRENT)) {
269 int cn = STACKCT_OPT( CONCURRENT );
270 const char ** cp = STACKLST_OPT( CONCURRENT );
271
272 while (cn-- > 0) {
273 handle_lookup(*cp, CTX_UCST | CTX_CONC);
274 cp++;
275 }
276 }
277
278 for (i = 0; i < argc; ++i)
279 handle_lookup(argv[i], CTX_UCST);
280
281 gettimeofday_cached(base, &start_tv);
282 event_base_dispatch(base);
283 event_base_free(base);
284
285 if (!time_adjusted &&
286 (ENABLED_OPT(STEP) || ENABLED_OPT(SLEW)))
287 exitcode = 1;
288 else
289 exitcode = 0;
290
291 return exitcode;
292 }
293
294
295 /*
296 ** open sockets and make them non-blocking
297 */
298 void
299 open_sockets(
300 void
301 )
302 {
303 sockaddr_u name;
304
305 if (-1 == sock4) {
306 sock4 = socket(PF_INET, SOCK_DGRAM, 0);
307 if (-1 == sock4) {
308 /* error getting a socket */
309 msyslog(LOG_ERR, "open_sockets: socket(PF_INET) failed: %m");
310 exit(1);
311 }
312 /* Make it non-blocking */
313 make_socket_nonblocking(sock4);
314
315 /* Let's try using a wildcard... */
316 ZERO(name);
317 AF(&name) = AF_INET;
318 SET_ADDR4N(&name, INADDR_ANY);
319 SET_PORT(&name, (HAVE_OPT(USERESERVEDPORT) ? 123 : 0));
320
321 if (-1 == bind(sock4, &name.sa,
322 SOCKLEN(&name))) {
323 msyslog(LOG_ERR, "open_sockets: bind(sock4) failed: %m");
324 exit(1);
325 }
326
327 /* Register an NTP callback for recv/timeout */
328 ev_sock4 = event_new(base, sock4,
329 EV_TIMEOUT | EV_READ | EV_PERSIST,
330 &sock_cb, NULL);
331 if (NULL == ev_sock4) {
332 msyslog(LOG_ERR,
333 "open_sockets: event_new(base, sock4) failed!");
334 } else {
335 event_add(ev_sock4, &wakeup_tv);
336 }
337 }
338
339 /* We may not always have IPv6... */
340 if (-1 == sock6 && ipv6_works) {
341 sock6 = socket(PF_INET6, SOCK_DGRAM, 0);
342 if (-1 == sock6 && ipv6_works) {
343 /* error getting a socket */
344 msyslog(LOG_ERR, "open_sockets: socket(PF_INET6) failed: %m");
345 exit(1);
346 }
347 /* Make it non-blocking */
348 make_socket_nonblocking(sock6);
349
350 /* Let's try using a wildcard... */
351 ZERO(name);
352 AF(&name) = AF_INET6;
353 SET_ADDR6N(&name, in6addr_any);
354 SET_PORT(&name, (HAVE_OPT(USERESERVEDPORT) ? 123 : 0));
355
356 if (-1 == bind(sock6, &name.sa,
357 SOCKLEN(&name))) {
358 msyslog(LOG_ERR, "open_sockets: bind(sock6) failed: %m");
359 exit(1);
360 }
361 /* Register an NTP callback for recv/timeout */
362 ev_sock6 = event_new(base, sock6,
363 EV_TIMEOUT | EV_READ | EV_PERSIST,
364 &sock_cb, NULL);
365 if (NULL == ev_sock6) {
366 msyslog(LOG_ERR,
367 "open_sockets: event_new(base, sock6) failed!");
368 } else {
369 event_add(ev_sock6, &wakeup_tv);
370 }
371 }
372
373 return;
374 }
375
376
377 /*
378 ** handle_lookup
379 */
380 void
381 handle_lookup(
382 const char *name,
383 int flags
384 )
385 {
386 struct addrinfo hints; /* Local copy is OK */
387 struct dns_ctx *ctx;
388 char * name_copy;
389 size_t name_sz;
390 size_t octets;
391
392 TRACE(1, ("handle_lookup(%s,%#x)\n", name, flags));
393
394 ZERO(hints);
395 hints.ai_family = ai_fam_pref;
396 hints.ai_flags = AI_CANONNAME | Z_AI_NUMERICSERV;
397 /*
398 ** Unless we specify a socktype, we'll get at least two
399 ** entries for each address: one for TCP and one for
400 ** UDP. That's not what we want.
401 */
402 hints.ai_socktype = SOCK_DGRAM;
403 hints.ai_protocol = IPPROTO_UDP;
404
405 name_sz = 1 + strlen(name);
406 octets = sizeof(*ctx) + name_sz; // Space for a ctx and the name
407 ctx = emalloc_zero(octets); // ctx at ctx[0]
408 name_copy = (char *)(ctx + 1); // Put the name at ctx[1]
409 memcpy(name_copy, name, name_sz); // copy the name to ctx[1]
410 ctx->name = name_copy; // point to it...
411 ctx->flags = flags;
412 ctx->timeout = response_tv;
413 ctx->key = NULL;
414
415 /* The following should arguably be passed in... */
416 if (ENABLED_OPT(AUTHENTICATION)) {
417 ctx->key_id = OPT_VALUE_AUTHENTICATION;
418 get_key(ctx->key_id, &ctx->key);
419 if (NULL == ctx->key) {
420 fprintf(stderr, "%s: Authentication with keyID %d requested, but no matching keyID found in <%s>!\n",
421 progname, ctx->key_id, OPT_ARG(KEYFILE));
422 exit(1);
423 }
424 } else {
425 ctx->key_id = -1;
426 }
427
428 ++n_pending_dns;
429 getaddrinfo_sometime(name, "123", &hints, 0,
430 &sntp_name_resolved, ctx);
431 }
432
433
434 /*
435 ** DNS Callback:
436 ** - For each IP:
437 ** - - open a socket
438 ** - - increment n_pending_ntp
439 ** - - send a request if this is a Unicast callback
440 ** - - queue wait for response
441 ** - decrement n_pending_dns
442 */
443 void
444 sntp_name_resolved(
445 int rescode,
446 int gai_errno,
447 void * context,
448 const char * name,
449 const char * service,
450 const struct addrinfo * hints,
451 const struct addrinfo * addr
452 )
453 {
454 struct dns_ctx * dctx;
455 sent_pkt * spkt;
456 const struct addrinfo * ai;
457 SOCKET sock;
458 u_int xmt_delay_v4;
459 u_int xmt_delay_v6;
460 u_int xmt_delay;
461 size_t octets;
462
463 xmt_delay_v4 = 0;
464 xmt_delay_v6 = 0;
465 dctx = context;
466 if (rescode) {
467 #ifdef EAI_SYSTEM
468 if (EAI_SYSTEM == rescode) {
469 errno = gai_errno;
470 mfprintf(stderr, "%s lookup error %m\n",
471 dctx->name);
472 } else
473 #endif
474 fprintf(stderr, "%s lookup error %s\n",
475 dctx->name, gai_strerror(rescode));
476 } else {
477 TRACE(3, ("%s [%s]\n", dctx->name,
478 (addr->ai_canonname != NULL)
479 ? addr->ai_canonname
480 : ""));
481
482 for (ai = addr; ai != NULL; ai = ai->ai_next) {
483
484 if (check_kod(ai))
485 continue;
486
487 switch (ai->ai_family) {
488
489 case AF_INET:
490 sock = sock4;
491 xmt_delay = xmt_delay_v4;
492 xmt_delay_v4++;
493 break;
494
495 case AF_INET6:
496 if (!ipv6_works)
497 continue;
498
499 sock = sock6;
500 xmt_delay = xmt_delay_v6;
501 xmt_delay_v6++;
502 break;
503
504 default:
505 msyslog(LOG_ERR, "sntp_name_resolved: unexpected ai_family: %d",
506 ai->ai_family);
507 exit(1);
508 break;
509 }
510
511 /*
512 ** We're waiting for a response for either unicast
513 ** or broadcast, so...
514 */
515 ++n_pending_ntp;
516
517 /* If this is for a unicast IP, queue a request */
518 if (dctx->flags & CTX_UCST) {
519 spkt = emalloc_zero(sizeof(*spkt));
520 spkt->dctx = dctx;
521 octets = min(ai->ai_addrlen, sizeof(spkt->addr));
522 memcpy(&spkt->addr, ai->ai_addr, octets);
523 queue_xmt(sock, dctx, spkt, xmt_delay);
524 }
525 }
526 }
527 /* n_pending_dns really should be >0 here... */
528 --n_pending_dns;
529 check_exit_conditions();
530 }
531
532
533 /*
534 ** queue_xmt
535 */
536 void
537 queue_xmt(
538 SOCKET sock,
539 struct dns_ctx * dctx,
540 sent_pkt * spkt,
541 u_int xmt_delay
542 )
543 {
544 sockaddr_u * dest;
545 sent_pkt ** pkt_listp;
546 sent_pkt * match;
547 xmt_ctx * xctx;
548 struct timeval start_cb;
549 struct timeval delay;
550
551 dest = &spkt->addr;
552 if (IS_IPV6(dest))
553 pkt_listp = &v6_pkts_list;
554 else
555 pkt_listp = &v4_pkts_list;
556
557 /* reject attempts to add address already listed */
558 for (match = *pkt_listp; match != NULL; match = match->link) {
559 if (ADDR_PORT_EQ(&spkt->addr, &match->addr)) {
560 if (strcasecmp(spkt->dctx->name,
561 match->dctx->name))
562 printf("%s %s duplicate address from %s ignored.\n",
563 sptoa(&match->addr),
564 match->dctx->name,
565 spkt->dctx->name);
566 else
567 printf("%s %s, duplicate address ignored.\n",
568 sptoa(&match->addr),
569 match->dctx->name);
570 dec_pending_ntp(spkt->dctx->name, &spkt->addr);
571 free(spkt);
572 return;
573 }
574 }
575
576 LINK_SLIST(*pkt_listp, spkt, link);
577
578 xctx = emalloc_zero(sizeof(*xctx));
579 xctx->sock = sock;
580 xctx->spkt = spkt;
581 gettimeofday_cached(base, &start_cb);
582 xctx->sched = start_cb.tv_sec + (2 * xmt_delay);
583
584 LINK_SORT_SLIST(xmt_q, xctx, (xctx->sched < L_S_S_CUR()->sched),
585 link, xmt_ctx);
586 if (xmt_q == xctx) {
587 /*
588 * The new entry is the first scheduled. The timer is
589 * either not active or is set for the second xmt
590 * context in xmt_q.
591 */
592 if (NULL == ev_xmt_timer)
593 ev_xmt_timer = event_new(base, INVALID_SOCKET,
594 EV_TIMEOUT,
595 &xmt_timer_cb, NULL);
596 if (NULL == ev_xmt_timer) {
597 msyslog(LOG_ERR,
598 "queue_xmt: event_new(base, -1, EV_TIMEOUT) failed!");
599 exit(1);
600 }
601 ZERO(delay);
602 if (xctx->sched > start_cb.tv_sec)
603 delay.tv_sec = xctx->sched - start_cb.tv_sec;
604 event_add(ev_xmt_timer, &delay);
605 TRACE(2, ("queue_xmt: xmt timer for %u usec\n",
606 (u_int)delay.tv_usec));
607 }
608 }
609
610
611 /*
612 ** xmt_timer_cb
613 */
614 void
615 xmt_timer_cb(
616 evutil_socket_t fd,
617 short what,
618 void * ctx
619 )
620 {
621 struct timeval start_cb;
622 struct timeval delay;
623 xmt_ctx * x;
624
625 UNUSED_ARG(fd);
626 UNUSED_ARG(ctx);
627 DEBUG_INSIST(EV_TIMEOUT == what);
628
629 if (NULL == xmt_q || shutting_down)
630 return;
631 gettimeofday_cached(base, &start_cb);
632 if (xmt_q->sched <= start_cb.tv_sec) {
633 UNLINK_HEAD_SLIST(x, xmt_q, link);
634 TRACE(2, ("xmt_timer_cb: at .%6.6u -> %s\n",
635 (u_int)start_cb.tv_usec, stoa(&x->spkt->addr)));
636 xmt(x);
637 free(x);
638 if (NULL == xmt_q)
639 return;
640 }
641 if (xmt_q->sched <= start_cb.tv_sec) {
642 event_add(ev_xmt_timer, &gap);
643 TRACE(2, ("xmt_timer_cb: at .%6.6u gap %6.6u\n",
644 (u_int)start_cb.tv_usec,
645 (u_int)gap.tv_usec));
646 } else {
647 delay.tv_sec = xmt_q->sched - start_cb.tv_sec;
648 delay.tv_usec = 0;
649 event_add(ev_xmt_timer, &delay);
650 TRACE(2, ("xmt_timer_cb: at .%6.6u next %ld seconds\n",
651 (u_int)start_cb.tv_usec,
652 (long)delay.tv_sec));
653 }
654 }
655
656
657 /*
658 ** xmt()
659 */
660 void
661 xmt(
662 xmt_ctx * xctx
663 )
664 {
665 SOCKET sock = xctx->sock;
666 struct dns_ctx *dctx = xctx->spkt->dctx;
667 sent_pkt * spkt = xctx->spkt;
668 sockaddr_u * dst = &spkt->addr;
669 struct timeval tv_xmt;
670 struct pkt x_pkt;
671 size_t pkt_len;
672 int sent;
673
674 if (0 != gettimeofday(&tv_xmt, NULL)) {
675 msyslog(LOG_ERR,
676 "xmt: gettimeofday() failed: %m");
677 exit(1);
678 }
679 tv_xmt.tv_sec += JAN_1970;
680
681 pkt_len = generate_pkt(&x_pkt, &tv_xmt, dctx->key_id,
682 dctx->key);
683
684 sent = sendpkt(sock, dst, &x_pkt, pkt_len);
685 if (sent) {
686 /* Save the packet we sent... */
687 memcpy(&spkt->x_pkt, &x_pkt, min(sizeof(spkt->x_pkt),
688 pkt_len));
689 spkt->stime = tv_xmt.tv_sec - JAN_1970;
690
691 TRACE(2, ("xmt: %lx.%6.6u %s %s\n", (u_long)tv_xmt.tv_sec,
692 (u_int)tv_xmt.tv_usec, dctx->name, stoa(dst)));
693 } else {
694 dec_pending_ntp(dctx->name, dst);
695 }
696
697 return;
698 }
699
700
701 /*
702 * timeout_queries() -- give up on unrequited NTP queries
703 */
704 void
705 timeout_queries(void)
706 {
707 struct timeval start_cb;
708 u_int idx;
709 sent_pkt * head;
710 sent_pkt * spkt;
711 sent_pkt * spkt_next;
712 long age;
713 int didsomething = 0;
714
715 TRACE(3, ("timeout_queries: called to check %u items\n",
716 (unsigned)COUNTOF(fam_listheads)));
717
718 gettimeofday_cached(base, &start_cb);
719 for (idx = 0; idx < COUNTOF(fam_listheads); idx++) {
720 head = fam_listheads[idx];
721 for (spkt = head; spkt != NULL; spkt = spkt_next) {
722 char xcst;
723
724 didsomething = 1;
725 switch (spkt->dctx->flags & CTX_xCST) {
726 case CTX_BCST:
727 xcst = 'B';
728 break;
729
730 case CTX_UCST:
731 xcst = 'U';
732 break;
733
734 default:
735 INSIST(!"spkt->dctx->flags neither UCST nor BCST");
736 break;
737 }
738
739 spkt_next = spkt->link;
740 if (0 == spkt->stime || spkt->done)
741 continue;
742 age = start_cb.tv_sec - spkt->stime;
743 TRACE(3, ("%s %s %cCST age %ld\n",
744 stoa(&spkt->addr),
745 spkt->dctx->name, xcst, age));
746 if (age > response_timeout)
747 timeout_query(spkt);
748 }
749 }
750 // Do we care about didsomething?
751 TRACE(3, ("timeout_queries: didsomething is %d, age is %ld\n",
752 didsomething, (long) (start_cb.tv_sec - start_tv.tv_sec)));
753 if (start_cb.tv_sec - start_tv.tv_sec > response_timeout) {
754 TRACE(3, ("timeout_queries: bail!\n"));
755 event_base_loopexit(base, NULL);
756 shutting_down = TRUE;
757 }
758 }
759
760
761 void dec_pending_ntp(
762 const char * name,
763 sockaddr_u * server
764 )
765 {
766 if (n_pending_ntp > 0) {
767 --n_pending_ntp;
768 check_exit_conditions();
769 } else {
770 INSIST(0 == n_pending_ntp);
771 TRACE(1, ("n_pending_ntp was zero before decrement for %s\n",
772 hostnameaddr(name, server)));
773 }
774 }
775
776
777 void timeout_query(
778 sent_pkt * spkt
779 )
780 {
781 sockaddr_u * server;
782 char xcst;
783
784
785 switch (spkt->dctx->flags & CTX_xCST) {
786 case CTX_BCST:
787 xcst = 'B';
788 break;
789
790 case CTX_UCST:
791 xcst = 'U';
792 break;
793
794 default:
795 INSIST(!"spkt->dctx->flags neither UCST nor BCST");
796 break;
797 }
798 spkt->done = TRUE;
799 server = &spkt->addr;
800 msyslog(LOG_INFO, "%s no %cCST response after %d seconds",
801 hostnameaddr(spkt->dctx->name, server), xcst,
802 response_timeout);
803 dec_pending_ntp(spkt->dctx->name, server);
804 return;
805 }
806
807
808 /*
809 ** check_kod
810 */
811 int
812 check_kod(
813 const struct addrinfo * ai
814 )
815 {
816 char *hostname;
817 struct kod_entry *reason;
818
819 /* Is there a KoD on file for this address? */
820 hostname = addrinfo_to_str(ai);
821 TRACE(2, ("check_kod: checking <%s>\n", hostname));
822 if (search_entry(hostname, &reason)) {
823 printf("prior KoD for %s, skipping.\n",
824 hostname);
825 free(reason);
826 free(hostname);
827
828 return 1;
829 }
830 free(hostname);
831
832 return 0;
833 }
834
835
836 /*
837 ** Socket readable/timeout Callback:
838 ** Read in the packet
839 ** Unicast:
840 ** - close socket
841 ** - decrement n_pending_ntp
842 ** - If packet is good, set the time and "exit"
843 ** Broadcast:
844 ** - If packet is good, set the time and "exit"
845 */
846 void
847 sock_cb(
848 evutil_socket_t fd,
849 short what,
850 void *ptr
851 )
852 {
853 sockaddr_u sender;
854 sockaddr_u * psau;
855 sent_pkt ** p_pktlist;
856 sent_pkt * spkt;
857 int rpktl;
858 int rc;
859
860 INSIST(sock4 == fd || sock6 == fd);
861
862 TRACE(3, ("sock_cb: event on sock%s:%s%s%s%s\n",
863 (fd == sock6)
864 ? "6"
865 : "4",
866 (what & EV_TIMEOUT) ? " timeout" : "",
867 (what & EV_READ) ? " read" : "",
868 (what & EV_WRITE) ? " write" : "",
869 (what & EV_SIGNAL) ? " signal" : ""));
870
871 if (!(EV_READ & what)) {
872 if (EV_TIMEOUT & what)
873 timeout_queries();
874
875 return;
876 }
877
878 /* Read in the packet */
879 rpktl = recvdata(fd, &sender, &rbuf, sizeof(rbuf));
880 if (rpktl < 0) {
881 msyslog(LOG_DEBUG, "recvfrom error %m");
882 return;
883 }
884
885 if (sock6 == fd)
886 p_pktlist = &v6_pkts_list;
887 else
888 p_pktlist = &v4_pkts_list;
889
890 for (spkt = *p_pktlist; spkt != NULL; spkt = spkt->link) {
891 psau = &spkt->addr;
892 if (SOCK_EQ(&sender, psau))
893 break;
894 }
895 if (NULL == spkt) {
896 msyslog(LOG_WARNING,
897 "Packet from unexpected source %s dropped",
898 sptoa(&sender));
899 return;
900 }
901
902 TRACE(1, ("sock_cb: %s %s\n", spkt->dctx->name,
903 sptoa(&sender)));
904
905 rpktl = process_pkt(&r_pkt, &sender, rpktl, MODE_SERVER,
906 &spkt->x_pkt, "sock_cb");
907
908 TRACE(2, ("sock_cb: process_pkt returned %d\n", rpktl));
909
910 /* If this is a Unicast packet, one down ... */
911 if (!spkt->done && (CTX_UCST & spkt->dctx->flags)) {
912 dec_pending_ntp(spkt->dctx->name, &spkt->addr);
913 spkt->done = TRUE;
914 }
915
916
917 /* If the packet is good, set the time and we're all done */
918 rc = handle_pkt(rpktl, &r_pkt, &spkt->addr, spkt->dctx->name);
919 if (0 != rc)
920 TRACE(1, ("sock_cb: handle_pkt() returned %d\n", rc));
921 check_exit_conditions();
922 }
923
924
925 /*
926 * check_exit_conditions()
927 *
928 * If sntp has a reply, ask the event loop to stop after this round of
929 * callbacks, unless --wait was used.
930 */
931 void
932 check_exit_conditions(void)
933 {
934 if ((0 == n_pending_ntp && 0 == n_pending_dns) ||
935 (time_derived && !HAVE_OPT(WAIT))) {
936 event_base_loopexit(base, NULL);
937 shutting_down = TRUE;
938 } else {
939 TRACE(2, ("%d NTP and %d name queries pending\n",
940 n_pending_ntp, n_pending_dns));
941 }
942 }
943
944
945 /*
946 * sntp_addremove_fd() is invoked by the intres blocking worker code
947 * to read from a pipe, or to stop same.
948 */
949 void sntp_addremove_fd(
950 int fd,
951 int is_pipe,
952 int remove_it
953 )
954 {
955 u_int idx;
956 blocking_child *c;
957 struct event * ev;
958
959 #ifdef HAVE_SOCKETPAIR
960 if (is_pipe) {
961 /* sntp only asks for EV_FEATURE_FDS without HAVE_SOCKETPAIR */
962 msyslog(LOG_ERR, "fatal: pipes not supported on systems with socketpair()");
963 exit(1);
964 }
965 #endif
966
967 c = NULL;
968 for (idx = 0; idx < blocking_children_alloc; idx++) {
969 c = blocking_children[idx];
970 if (NULL == c)
971 continue;
972 if (fd == c->resp_read_pipe)
973 break;
974 }
975 if (idx == blocking_children_alloc)
976 return;
977
978 if (remove_it) {
979 ev = c->resp_read_ctx;
980 c->resp_read_ctx = NULL;
981 event_del(ev);
982 event_free(ev);
983
984 return;
985 }
986
987 ev = event_new(base, fd, EV_READ | EV_PERSIST,
988 &worker_resp_cb, c);
989 if (NULL == ev) {
990 msyslog(LOG_ERR,
991 "sntp_addremove_fd: event_new(base, fd) failed!");
992 return;
993 }
994 c->resp_read_ctx = ev;
995 event_add(ev, NULL);
996 }
997
998
999 /* called by forked intres child to close open descriptors */
1000 #ifdef WORK_FORK
1001 void
1002 kill_asyncio(
1003 int startfd
1004 )
1005 {
1006 if (INVALID_SOCKET != sock4) {
1007 closesocket(sock4);
1008 sock4 = INVALID_SOCKET;
1009 }
1010 if (INVALID_SOCKET != sock6) {
1011 closesocket(sock6);
1012 sock6 = INVALID_SOCKET;
1013 }
1014 if (INVALID_SOCKET != bsock4) {
1015 closesocket(bsock4);
1016 bsock4 = INVALID_SOCKET;
1017 }
1018 if (INVALID_SOCKET != bsock6) {
1019 closesocket(bsock6);
1020 bsock6 = INVALID_SOCKET;
1021 }
1022 }
1023 #endif
1024
1025
1026 /*
1027 * worker_resp_cb() is invoked when resp_read_pipe is readable.
1028 */
1029 void
1030 worker_resp_cb(
1031 evutil_socket_t fd,
1032 short what,
1033 void * ctx /* blocking_child * */
1034 )
1035 {
1036 blocking_child * c;
1037
1038 REQUIRE(EV_READ & what);
1039 c = ctx;
1040 INSIST(fd == c->resp_read_pipe);
1041 process_blocking_resp(c);
1042 }
1043
1044
1045 /*
1046 * intres_timeout_req(s) is invoked in the parent to schedule an idle
1047 * timeout to fire in s seconds, if not reset earlier by a call to
1048 * intres_timeout_req(0), which clears any pending timeout. When the
1049 * timeout expires, worker_idle_timer_fired() is invoked (again, in the
1050 * parent).
1051 *
1052 * sntp and ntpd each provide implementations adapted to their timers.
1053 */
1054 void
1055 intres_timeout_req(
1056 u_int seconds /* 0 cancels */
1057 )
1058 {
1059 struct timeval tv_to;
1060
1061 if (NULL == ev_worker_timeout) {
1062 ev_worker_timeout = event_new(base, -1,
1063 EV_TIMEOUT | EV_PERSIST,
1064 &worker_timeout, NULL);
1065 INSIST(NULL != ev_worker_timeout);
1066 } else {
1067 event_del(ev_worker_timeout);
1068 }
1069 if (0 == seconds)
1070 return;
1071 tv_to.tv_sec = seconds;
1072 tv_to.tv_usec = 0;
1073 event_add(ev_worker_timeout, &tv_to);
1074 }
1075
1076
1077 void
1078 worker_timeout(
1079 evutil_socket_t fd,
1080 short what,
1081 void * ctx
1082 )
1083 {
1084 UNUSED_ARG(fd);
1085 UNUSED_ARG(ctx);
1086
1087 REQUIRE(EV_TIMEOUT & what);
1088 worker_idle_timer_fired();
1089 }
1090
1091
1092 void
1093 sntp_libevent_log_cb(
1094 int severity,
1095 const char * msg
1096 )
1097 {
1098 int level;
1099
1100 switch (severity) {
1101
1102 default:
1103 case _EVENT_LOG_DEBUG:
1104 level = LOG_DEBUG;
1105 break;
1106
1107 case _EVENT_LOG_MSG:
1108 level = LOG_NOTICE;
1109 break;
1110
1111 case _EVENT_LOG_WARN:
1112 level = LOG_WARNING;
1113 break;
1114
1115 case _EVENT_LOG_ERR:
1116 level = LOG_ERR;
1117 break;
1118 }
1119
1120 msyslog(level, "%s", msg);
1121 }
1122
1123
1124 int
1125 generate_pkt (
1126 struct pkt *x_pkt,
1127 const struct timeval *tv_xmt,
1128 int key_id,
1129 struct key *pkt_key
1130 )
1131 {
1132 l_fp xmt_fp;
1133 int pkt_len;
1134 int mac_size;
1135
1136 pkt_len = LEN_PKT_NOMAC;
1137 ZERO(*x_pkt);
1138 TVTOTS(tv_xmt, &xmt_fp);
1139 HTONL_FP(&xmt_fp, &x_pkt->xmt);
1140 x_pkt->stratum = STRATUM_TO_PKT(STRATUM_UNSPEC);
1141 x_pkt->ppoll = 8;
1142 /* FIXME! Modus broadcast + adr. check -> bdr. pkt */
1143 set_li_vn_mode(x_pkt, LEAP_NOTINSYNC, ntpver, 3);
1144 if (debug > 0) {
1145 printf("generate_pkt: key_id %d, key pointer %p\n", key_id, pkt_key);
1146 }
1147 if (pkt_key != NULL) {
1148 x_pkt->exten[0] = htonl(key_id);
1149 mac_size = make_mac(x_pkt, pkt_len, pkt_key,
1150 (char *)&x_pkt->exten[1], MAX_MDG_LEN);
1151 if (mac_size > 0)
1152 pkt_len += mac_size + KEY_MAC_LEN;
1153 #ifdef DEBUG
1154 if (debug > 0) {
1155 printf("generate_pkt: mac_size is %d\n", mac_size);
1156 }
1157 #endif
1158
1159 }
1160 return pkt_len;
1161 }
1162
1163
1164 int
1165 handle_pkt(
1166 int rpktl,
1167 struct pkt * rpkt,
1168 sockaddr_u * host,
1169 const char * hostname
1170 )
1171 {
1172 char disptxt[32];
1173 const char * addrtxt;
1174 struct timeval tv_dst;
1175 int cnt;
1176 int sw_case;
1177 int digits;
1178 int stratum;
1179 char * ref;
1180 char * ts_str;
1181 const char * leaptxt;
1182 double offset;
1183 double precision;
1184 double synch_distance;
1185 char * p_SNTP_PRETEND_TIME;
1186 time_t pretend_time;
1187 #if SIZEOF_TIME_T == 8
1188 long long ll;
1189 #else
1190 long l;
1191 #endif
1192
1193 ts_str = NULL;
1194
1195 if (rpktl > 0)
1196 sw_case = 1;
1197 else
1198 sw_case = rpktl;
1199
1200 switch (sw_case) {
1201
1202 case SERVER_UNUSEABLE:
1203 return -1;
1204 break;
1205
1206 case PACKET_UNUSEABLE:
1207 break;
1208
1209 case SERVER_AUTH_FAIL:
1210 break;
1211
1212 case KOD_DEMOBILIZE:
1213 /* Received a DENY or RESTR KOD packet */
1214 addrtxt = stoa(host);
1215 ref = (char *)&rpkt->refid;
1216 add_entry(addrtxt, ref);
1217 msyslog(LOG_WARNING, "KOD code %c%c%c%c from %s %s",
1218 ref[0], ref[1], ref[2], ref[3], addrtxt, hostname);
1219 break;
1220
1221 case KOD_RATE:
1222 /*
1223 ** Hmm...
1224 ** We should probably call add_entry() with an
1225 ** expiration timestamp of several seconds in the future,
1226 ** and back-off even more if we get more RATE responses.
1227 */
1228 break;
1229
1230 case 1:
1231 TRACE(3, ("handle_pkt: %d bytes from %s %s\n",
1232 rpktl, stoa(host), hostname));
1233
1234 gettimeofday_cached(base, &tv_dst);
1235
1236 p_SNTP_PRETEND_TIME = getenv("SNTP_PRETEND_TIME");
1237 if (p_SNTP_PRETEND_TIME) {
1238 pretend_time = 0;
1239 #if SIZEOF_TIME_T == 4
1240 if (1 == sscanf(p_SNTP_PRETEND_TIME, "%ld", &l))
1241 pretend_time = (time_t)l;
1242 #elif SIZEOF_TIME_T == 8
1243 if (1 == sscanf(p_SNTP_PRETEND_TIME, "%lld", &ll))
1244 pretend_time = (time_t)ll;
1245 #else
1246 # include "GRONK: unexpected value for SIZEOF_TIME_T"
1247 #endif
1248 if (0 != pretend_time)
1249 tv_dst.tv_sec = pretend_time;
1250 }
1251
1252 offset_calculation(rpkt, rpktl, &tv_dst, &offset,
1253 &precision, &synch_distance);
1254 time_derived = TRUE;
1255
1256 for (digits = 0; (precision *= 10.) < 1.; ++digits)
1257 /* empty */ ;
1258 if (digits > 6)
1259 digits = 6;
1260
1261 ts_str = tv_to_str(&tv_dst);
1262 stratum = rpkt->stratum;
1263 if (0 == stratum)
1264 stratum = 16;
1265
1266 if (synch_distance > 0.) {
1267 cnt = snprintf(disptxt, sizeof(disptxt),
1268 " +/- %f", synch_distance);
1269 if ((size_t)cnt >= sizeof(disptxt))
1270 snprintf(disptxt, sizeof(disptxt),
1271 "ERROR %d >= %d", cnt,
1272 (int)sizeof(disptxt));
1273 } else {
1274 disptxt[0] = '\0';
1275 }
1276
1277 switch (PKT_LEAP(rpkt->li_vn_mode)) {
1278 case LEAP_NOWARNING:
1279 leaptxt = "no-leap";
1280 break;
1281 case LEAP_ADDSECOND:
1282 leaptxt = "add-leap";
1283 break;
1284 case LEAP_DELSECOND:
1285 leaptxt = "del-leap";
1286 break;
1287 case LEAP_NOTINSYNC:
1288 leaptxt = "unsync";
1289 break;
1290 default:
1291 leaptxt = "LEAP-ERROR";
1292 break;
1293 }
1294
1295 msyslog(LOG_INFO, "%s %+.*f%s %s s%d %s%s", ts_str,
1296 digits, offset, disptxt,
1297 hostnameaddr(hostname, host), stratum,
1298 leaptxt,
1299 (time_adjusted)
1300 ? " [excess]"
1301 : "");
1302 free(ts_str);
1303
1304 if (p_SNTP_PRETEND_TIME)
1305 return 0;
1306
1307 if (!time_adjusted &&
1308 (ENABLED_OPT(STEP) || ENABLED_OPT(SLEW)))
1309 return set_time(offset);
1310
1311 return EX_OK;
1312 }
1313
1314 return 1;
1315 }
1316
1317
1318 void
1319 offset_calculation(
1320 struct pkt *rpkt,
1321 int rpktl,
1322 struct timeval *tv_dst,
1323 double *offset,
1324 double *precision,
1325 double *synch_distance
1326 )
1327 {
1328 l_fp p_rec, p_xmt, p_ref, p_org, tmp, dst;
1329 u_fp p_rdly, p_rdsp;
1330 double t21, t34, delta;
1331
1332 /* Convert timestamps from network to host byte order */
1333 p_rdly = NTOHS_FP(rpkt->rootdelay);
1334 p_rdsp = NTOHS_FP(rpkt->rootdisp);
1335 NTOHL_FP(&rpkt->reftime, &p_ref);
1336 NTOHL_FP(&rpkt->org, &p_org);
1337 NTOHL_FP(&rpkt->rec, &p_rec);
1338 NTOHL_FP(&rpkt->xmt, &p_xmt);
1339
1340 *precision = LOGTOD(rpkt->precision);
1341
1342 TRACE(3, ("offset_calculation: LOGTOD(rpkt->precision): %f\n", *precision));
1343
1344 /* Compute offset etc. */
1345 tmp = p_rec;
1346 L_SUB(&tmp, &p_org);
1347 LFPTOD(&tmp, t21);
1348 TVTOTS(tv_dst, &dst);
1349 dst.l_ui += JAN_1970;
1350 tmp = p_xmt;
1351 L_SUB(&tmp, &dst);
1352 LFPTOD(&tmp, t34);
1353 *offset = (t21 + t34) / 2.;
1354 delta = t21 - t34;
1355
1356 // synch_distance is:
1357 // (peer->delay + peer->rootdelay) / 2 + peer->disp
1358 // + peer->rootdisp + clock_phi * (current_time - peer->update)
1359 // + peer->jitter;
1360 //
1361 // and peer->delay = fabs(peer->offset - p_offset) * 2;
1362 // and peer->offset needs history, so we're left with
1363 // p_offset = (t21 + t34) / 2.;
1364 // peer->disp = 0; (we have no history to augment this)
1365 // clock_phi = 15e-6;
1366 // peer->jitter = LOGTOD(sys_precision); (we have no history to augment this)
1367 // and ntp_proto.c:set_sys_tick_precision() should get us sys_precision.
1368 //
1369 // so our answer seems to be:
1370 //
1371 // (fabs(t21 + t34) + peer->rootdelay) / 3.
1372 // + 0 (peer->disp)
1373 // + peer->rootdisp
1374 // + 15e-6 (clock_phi)
1375 // + LOGTOD(sys_precision)
1376
1377 INSIST( FPTOD(p_rdly) >= 0. );
1378 #if 1
1379 *synch_distance = (fabs(t21 + t34) + FPTOD(p_rdly)) / 3.
1380 + 0.
1381 + FPTOD(p_rdsp)
1382 + 15e-6
1383 + 0. /* LOGTOD(sys_precision) when we can get it */
1384 ;
1385 INSIST( *synch_distance >= 0. );
1386 #else
1387 *synch_distance = (FPTOD(p_rdly) + FPTOD(p_rdsp))/2.0;
1388 #endif
1389
1390 #ifdef DEBUG
1391 if (debug > 3) {
1392 printf("sntp rootdelay: %f\n", FPTOD(p_rdly));
1393 printf("sntp rootdisp: %f\n", FPTOD(p_rdsp));
1394 printf("sntp syncdist: %f\n", *synch_distance);
1395
1396 pkt_output(rpkt, rpktl, stdout);
1397
1398 printf("sntp offset_calculation: rpkt->reftime:\n");
1399 l_fp_output(&p_ref, stdout);
1400 printf("sntp offset_calculation: rpkt->org:\n");
1401 l_fp_output(&p_org, stdout);
1402 printf("sntp offset_calculation: rpkt->rec:\n");
1403 l_fp_output(&p_rec, stdout);
1404 printf("sntp offset_calculation: rpkt->xmt:\n");
1405 l_fp_output(&p_xmt, stdout);
1406 }
1407 #endif
1408
1409 TRACE(3, ("sntp offset_calculation:\trec - org t21: %.6f\n"
1410 "\txmt - dst t34: %.6f\tdelta: %.6f\toffset: %.6f\n",
1411 t21, t34, delta, *offset));
1412
1413 return;
1414 }
1415
1416
1417
1418 /* Compute the 8 bits for li_vn_mode */
1419 void
1420 set_li_vn_mode (
1421 struct pkt *spkt,
1422 char leap,
1423 char version,
1424 char mode
1425 )
1426 {
1427 if (leap > 3) {
1428 msyslog(LOG_DEBUG, "set_li_vn_mode: leap > 3, using max. 3");
1429 leap = 3;
1430 }
1431
1432 if ((unsigned char)version > 7) {
1433 msyslog(LOG_DEBUG, "set_li_vn_mode: version < 0 or > 7, using 4");
1434 version = 4;
1435 }
1436
1437 if (mode > 7) {
1438 msyslog(LOG_DEBUG, "set_li_vn_mode: mode > 7, using client mode 3");
1439 mode = 3;
1440 }
1441
1442 spkt->li_vn_mode = leap << 6;
1443 spkt->li_vn_mode |= version << 3;
1444 spkt->li_vn_mode |= mode;
1445 }
1446
1447
1448 /*
1449 ** set_time applies 'offset' to the local clock.
1450 */
1451 int
1452 set_time(
1453 double offset
1454 )
1455 {
1456 int rc;
1457
1458 if (time_adjusted)
1459 return EX_OK;
1460
1461 /*
1462 ** If we can step but we cannot slew, then step.
1463 ** If we can step or slew and and |offset| > steplimit, then step.
1464 */
1465 if (ENABLED_OPT(STEP) &&
1466 ( !ENABLED_OPT(SLEW)
1467 || (ENABLED_OPT(SLEW) && (fabs(offset) > steplimit))
1468 )) {
1469 rc = step_systime(offset);
1470
1471 /* If there was a problem, can we rely on errno? */
1472 if (1 == rc)
1473 time_adjusted = TRUE;
1474 return (time_adjusted)
1475 ? EX_OK
1476 : 1;
1477 /*
1478 ** In case of error, what should we use?
1479 ** EX_UNAVAILABLE?
1480 ** EX_OSERR?
1481 ** EX_NOPERM?
1482 */
1483 }
1484
1485 if (ENABLED_OPT(SLEW)) {
1486 rc = adj_systime(offset);
1487
1488 /* If there was a problem, can we rely on errno? */
1489 if (1 == rc)
1490 time_adjusted = TRUE;
1491 return (time_adjusted)
1492 ? EX_OK
1493 : 1;
1494 /*
1495 ** In case of error, what should we use?
1496 ** EX_UNAVAILABLE?
1497 ** EX_OSERR?
1498 ** EX_NOPERM?
1499 */
1500 }
1501
1502 return EX_SOFTWARE;
1503 }
1504
1505
1506 int
1507 libevent_version_ok(void)
1508 {
1509 ev_uint32_t v_compile_maj;
1510 ev_uint32_t v_run_maj;
1511
1512 v_compile_maj = LIBEVENT_VERSION_NUMBER & 0xffff0000;
1513 v_run_maj = event_get_version_number() & 0xffff0000;
1514 if (v_compile_maj != v_run_maj) {
1515 fprintf(stderr,
1516 "Incompatible libevent versions: have %s, built with %s\n",
1517 event_get_version(),
1518 LIBEVENT_VERSION);
1519 return 0;
1520 }
1521 return 1;
1522 }
1523
1524 /*
1525 * gettimeofday_cached()
1526 *
1527 * Clones the event_base_gettimeofday_cached() interface but ensures the
1528 * times are always on the gettimeofday() 1970 scale. Older libevent 2
1529 * sometimes used gettimeofday(), sometimes the since-system-start
1530 * clock_gettime(CLOCK_MONOTONIC), depending on the platform.
1531 *
1532 * It is not cleanly possible to tell which timescale older libevent is
1533 * using.
1534 *
1535 * The strategy involves 1 hour thresholds chosen to be far longer than
1536 * the duration of a round of libevent callbacks, which share a cached
1537 * start-of-round time. First compare the last cached time with the
1538 * current gettimeofday() time. If they are within one hour, libevent
1539 * is using the proper timescale so leave the offset 0. Otherwise,
1540 * compare libevent's cached time and the current time on the monotonic
1541 * scale. If they are within an hour, libevent is using the monotonic
1542 * scale so calculate the offset to add to such times to bring them to
1543 * gettimeofday()'s scale.
1544 */
1545 int
1546 gettimeofday_cached(
1547 struct event_base * b,
1548 struct timeval * caller_tv
1549 )
1550 {
1551 #if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1552 static struct event_base * cached_b;
1553 static struct timeval cached;
1554 static struct timeval adj_cached;
1555 static struct timeval offset;
1556 static int offset_ready;
1557 struct timeval latest;
1558 struct timeval systemt;
1559 struct timespec ts;
1560 struct timeval mono;
1561 struct timeval diff;
1562 int cgt_rc;
1563 int gtod_rc;
1564
1565 event_base_gettimeofday_cached(b, &latest);
1566 if (b == cached_b &&
1567 !memcmp(&latest, &cached, sizeof(latest))) {
1568 *caller_tv = adj_cached;
1569 return 0;
1570 }
1571 cached = latest;
1572 cached_b = b;
1573 if (!offset_ready) {
1574 cgt_rc = clock_gettime(CLOCK_MONOTONIC, &ts);
1575 gtod_rc = gettimeofday(&systemt, NULL);
1576 if (0 != gtod_rc) {
1577 msyslog(LOG_ERR,
1578 "%s: gettimeofday() error %m",
1579 progname);
1580 exit(1);
1581 }
1582 diff = sub_tval(systemt, latest);
1583 if (debug > 1)
1584 printf("system minus cached %+ld.%06ld\n",
1585 (long)diff.tv_sec, (long)diff.tv_usec);
1586 if (0 != cgt_rc || labs((long)diff.tv_sec) < 3600) {
1587 /*
1588 * Either use_monotonic == 0, or this libevent
1589 * has been repaired. Leave offset at zero.
1590 */
1591 } else {
1592 mono.tv_sec = ts.tv_sec;
1593 mono.tv_usec = ts.tv_nsec / 1000;
1594 diff = sub_tval(latest, mono);
1595 if (debug > 1)
1596 printf("cached minus monotonic %+ld.%06ld\n",
1597 (long)diff.tv_sec, (long)diff.tv_usec);
1598 if (labs((long)diff.tv_sec) < 3600) {
1599 /* older libevent2 using monotonic */
1600 offset = sub_tval(systemt, mono);
1601 TRACE(1, ("%s: Offsetting libevent CLOCK_MONOTONIC times by %+ld.%06ld\n",
1602 "gettimeofday_cached",
1603 (long)offset.tv_sec,
1604 (long)offset.tv_usec));
1605 }
1606 }
1607 offset_ready = TRUE;
1608 }
1609 adj_cached = add_tval(cached, offset);
1610 *caller_tv = adj_cached;
1611
1612 return 0;
1613 #else
1614 return event_base_gettimeofday_cached(b, caller_tv);
1615 #endif
1616 }
1617
1618 /* Dummy function to satisfy libntp/work_fork.c */
1619 extern int set_user_group_ids(void);
1620 int set_user_group_ids(void)
1621 {
1622 return 1;
1623 }
1624