main.c revision 1.20 1 /* $NetBSD: main.c,v 1.20 2026/02/08 14:53:15 christos Exp $ */
2
3 #include <config.h>
4
5 #include <event2/util.h>
6 #include <event2/event.h>
7
8 #include "ntp_workimpl.h"
9 #ifdef WORK_THREAD
10 # include <event2/thread.h>
11 #endif
12
13 #ifdef HAVE_SYSEXITS_H
14 # include <sysexits.h>
15 #endif
16
17 #include "main.h"
18 #include "ntp_libopts.h"
19 #include "kod_management.h"
20 #include "networking.h"
21 #include "utilities.h"
22 #include "log.h"
23 #include "libntp.h"
24
25 extern const char *progname;
26
27 int shutting_down;
28 int time_derived;
29 int time_adjusted;
30 int n_pending_dns = 0;
31 int n_pending_ntp = 0;
32 int ai_fam_pref = AF_UNSPEC;
33 int ntpver = 4;
34 double steplimit = -1;
35 SOCKET sock4 = -1; /* Socket for IPv4 */
36 SOCKET sock6 = -1; /* Socket for IPv6 */
37 /*
38 ** BCAST *must* listen on port 123 (by default), so we can only
39 ** use the UCST sockets (above) if they too are using port 123
40 */
41 SOCKET bsock4 = -1; /* Broadcast Socket for IPv4 */
42 SOCKET bsock6 = -1; /* Broadcast Socket for IPv6 */
43 struct event_base *base;
44 struct event *ev_sock4;
45 struct event *ev_sock6;
46 struct event *ev_worker_timeout;
47 struct event *ev_xmt_timer;
48
49 struct dns_ctx {
50 const char * name;
51 int flags;
52 #define CTX_BCST 0x0001
53 #define CTX_UCST 0x0002
54 #define CTX_xCST 0x0003
55 #define CTX_CONC 0x0004
56 #define CTX_unused 0xfffd
57 int key_id;
58 struct timeval timeout;
59 struct key * key;
60 };
61
62 typedef struct sent_pkt_tag sent_pkt;
63 struct sent_pkt_tag {
64 sent_pkt * link;
65 struct dns_ctx * dctx;
66 sockaddr_u addr;
67 time_t stime;
68 int done;
69 struct pkt x_pkt;
70 };
71
72 typedef struct xmt_ctx_tag xmt_ctx;
73 struct xmt_ctx_tag {
74 xmt_ctx * link;
75 SOCKET sock;
76 time_t sched;
77 sent_pkt * spkt;
78 };
79
80 struct timeval gap;
81 xmt_ctx * xmt_q;
82 struct key * keys = NULL;
83 int response_timeout;
84 struct timeval response_tv;
85 struct timeval start_tv;
86 /* check the timeout at least once per second */
87 struct timeval wakeup_tv = { 0, 888888 };
88
89 sent_pkt * fam_listheads[2];
90 #define v4_pkts_list (fam_listheads[0])
91 #define v6_pkts_list (fam_listheads[1])
92
93 static union {
94 struct pkt pkt;
95 char buf[LEN_PKT_NOMAC + NTP_MAXEXTEN + MAX_MAC_LEN];
96 } rbuf;
97
98 #define r_pkt rbuf.pkt
99
100 #ifdef HAVE_DROPROOT
101 int droproot; /* intres imports these */
102 int root_dropped;
103 #endif
104 u_long current_time; /* libntp/authkeys.c */
105
106 void open_sockets(void);
107 void handle_lookup(const char *name, int flags);
108 void sntp_addremove_fd(int fd, int is_pipe, int remove_it);
109 void worker_timeout(evutil_socket_t, short, void *);
110 void worker_resp_cb(evutil_socket_t, short, void *);
111 void sntp_name_resolved(int, int, void *, const char *, const char *,
112 const struct addrinfo *,
113 const struct addrinfo *);
114 void queue_xmt(SOCKET sock, struct dns_ctx *dctx, sent_pkt *spkt,
115 u_int xmt_delay);
116 void xmt_timer_cb(evutil_socket_t, short, void *ptr);
117 void xmt(xmt_ctx *xctx);
118 int check_kod(const struct addrinfo *ai);
119 void timeout_query(sent_pkt *);
120 void timeout_queries(void);
121 void sock_cb(evutil_socket_t, short, void *);
122 void check_exit_conditions(void);
123 void sntp_libevent_log_cb(int, const char *);
124 void set_li_vn_mode(struct pkt *spkt, char leap, char version, char mode);
125 int set_time(double offset);
126 void dec_pending_ntp(const char *, sockaddr_u *);
127 int libevent_version_ok(void);
128 int gettimeofday_cached(struct event_base *b, struct timeval *tv);
129
130
131 /*
132 * The actual main function.
133 */
134 int
135 sntp_main (
136 int argc,
137 char **argv,
138 const char *sntpVersion
139 )
140 {
141 int i;
142 int exitcode;
143 int optct;
144 struct event_config * evcfg;
145
146 /* Initialize logging system - sets up progname */
147 sntp_init_logging(argv[0]);
148
149 if (!libevent_version_ok())
150 exit(EX_SOFTWARE);
151
152 init_lib();
153 init_auth();
154
155 optct = ntpOptionProcess(&sntpOptions, argc, argv);
156 argc -= optct;
157 argv += optct;
158
159
160 debug = OPT_VALUE_SET_DEBUG_LEVEL;
161
162 TRACE(2, ("init_lib() done, %s%s\n",
163 (ipv4_works)
164 ? "ipv4_works "
165 : "",
166 (ipv6_works)
167 ? "ipv6_works "
168 : ""));
169 ntpver = OPT_VALUE_NTPVERSION;
170 steplimit = OPT_VALUE_STEPLIMIT / 1e3;
171 gap.tv_usec = max(0, OPT_VALUE_GAP * 1000);
172 gap.tv_usec = min(gap.tv_usec, 999999);
173
174 if (HAVE_OPT(LOGFILE))
175 open_logfile(OPT_ARG(LOGFILE));
176
177 msyslog(LOG_INFO, "%s", sntpVersion);
178
179 if (0 == argc && !HAVE_OPT(BROADCAST) && !HAVE_OPT(CONCURRENT)) {
180 printf("%s: Must supply at least one of -b hostname, -c hostname, or hostname.\n",
181 progname);
182 exit(EX_USAGE);
183 }
184
185
186 /*
187 ** Eventually, we probably want:
188 ** - separate bcst and ucst timeouts (why?)
189 ** - multiple --timeout values in the commandline
190 */
191
192 response_timeout = OPT_VALUE_TIMEOUT;
193 response_tv.tv_sec = response_timeout;
194 response_tv.tv_usec = 0;
195
196 /* IPv6 available? */
197 if (isc_net_probeipv6() != ISC_R_SUCCESS) {
198 ai_fam_pref = AF_INET;
199 TRACE(1, ("No ipv6 support available, forcing ipv4\n"));
200 } else {
201 /* Check for options -4 and -6 */
202 if (HAVE_OPT(IPV4))
203 ai_fam_pref = AF_INET;
204 else if (HAVE_OPT(IPV6))
205 ai_fam_pref = AF_INET6;
206 }
207
208 /* TODO: Parse config file if declared */
209
210 /*
211 ** Init the KOD system.
212 ** For embedded systems with no writable filesystem,
213 ** -K /dev/null can be used to disable KoD storage.
214 */
215 kod_init_kod_db(OPT_ARG(KOD), FALSE);
216
217 /* HMS: Check and see what happens if KEYFILE doesn't exist */
218 auth_init(OPT_ARG(KEYFILE), &keys);
219
220 /*
221 ** Considering employing a variable that prevents functions of doing
222 ** anything until everything is initialized properly
223 **
224 ** HMS: What exactly does the above mean?
225 */
226 event_set_log_callback(&sntp_libevent_log_cb);
227 if (debug > 0)
228 event_enable_debug_mode();
229 #ifdef WORK_THREAD
230 evthread_use_pthreads();
231 /* we use libevent from main thread only, locks should be academic */
232 if (debug > 0)
233 evthread_enable_lock_debuging();
234 #endif
235 evcfg = event_config_new();
236 if (NULL == evcfg) {
237 printf("%s: event_config_new() failed!\n", progname);
238 return -1;
239 }
240 #ifndef HAVE_SOCKETPAIR
241 event_config_require_features(evcfg, EV_FEATURE_FDS);
242 #endif
243 /* all libevent calls are from main thread */
244 /* event_config_set_flag(evcfg, EVENT_BASE_FLAG_NOLOCK); */
245 base = event_base_new_with_config(evcfg);
246 event_config_free(evcfg);
247 if (NULL == base) {
248 printf("%s: event_base_new() failed!\n", progname);
249 return -1;
250 }
251
252 /* wire into intres resolver */
253 worker_per_query = TRUE;
254 addremove_io_fd = &sntp_addremove_fd;
255
256 open_sockets();
257
258 if (HAVE_OPT(BROADCAST)) {
259 int cn = STACKCT_OPT( BROADCAST );
260 const char ** cp = STACKLST_OPT( BROADCAST );
261
262 while (cn-- > 0) {
263 handle_lookup(*cp, CTX_BCST);
264 cp++;
265 }
266 }
267
268 if (HAVE_OPT(CONCURRENT)) {
269 int cn = STACKCT_OPT( CONCURRENT );
270 const char ** cp = STACKLST_OPT( CONCURRENT );
271
272 while (cn-- > 0) {
273 handle_lookup(*cp, CTX_UCST | CTX_CONC);
274 cp++;
275 }
276 }
277
278 for (i = 0; i < argc; ++i)
279 handle_lookup(argv[i], CTX_UCST);
280
281 gettimeofday_cached(base, &start_tv);
282 event_base_dispatch(base);
283 event_base_free(base);
284
285 if (!time_adjusted &&
286 (ENABLED_OPT(STEP) || ENABLED_OPT(SLEW)))
287 exitcode = 1;
288 else
289 exitcode = 0;
290
291 return exitcode;
292 }
293
294
295 /*
296 ** open sockets and make them non-blocking
297 */
298 void
299 open_sockets(
300 void
301 )
302 {
303 sockaddr_u name;
304
305 if (-1 == sock4) {
306 sock4 = socket(PF_INET, SOCK_DGRAM, 0);
307 if (-1 == sock4) {
308 /* error getting a socket */
309 msyslog(LOG_ERR, "open_sockets: socket(PF_INET) failed: %m");
310 exit(1);
311 }
312 /* Make it non-blocking */
313 make_socket_nonblocking(sock4);
314
315 /* Let's try using a wildcard... */
316 ZERO(name);
317 AF(&name) = AF_INET;
318 SET_ADDR4N(&name, INADDR_ANY);
319 SET_PORT(&name, (HAVE_OPT(USERESERVEDPORT) ? 123 : 0));
320
321 if (-1 == bind(sock4, &name.sa,
322 SOCKLEN(&name))) {
323 msyslog(LOG_ERR, "open_sockets: bind(sock4) failed: %m");
324 exit(1);
325 }
326
327 /* Register an NTP callback for recv/timeout */
328 ev_sock4 = event_new(base, sock4,
329 EV_TIMEOUT | EV_READ | EV_PERSIST,
330 &sock_cb, NULL);
331 if (NULL == ev_sock4) {
332 msyslog(LOG_ERR,
333 "open_sockets: event_new(base, sock4) failed!");
334 } else {
335 event_add(ev_sock4, &wakeup_tv);
336 }
337 }
338
339 /* We may not always have IPv6... */
340 if (-1 == sock6 && ipv6_works) {
341 sock6 = socket(PF_INET6, SOCK_DGRAM, 0);
342 if (-1 == sock6 && ipv6_works) {
343 /* error getting a socket */
344 msyslog(LOG_ERR, "open_sockets: socket(PF_INET6) failed: %m");
345 exit(1);
346 }
347 /* Make it non-blocking */
348 make_socket_nonblocking(sock6);
349
350 /* Let's try using a wildcard... */
351 ZERO(name);
352 AF(&name) = AF_INET6;
353 SET_ADDR6N(&name, in6addr_any);
354 SET_PORT(&name, (HAVE_OPT(USERESERVEDPORT) ? 123 : 0));
355
356 if (-1 == bind(sock6, &name.sa,
357 SOCKLEN(&name))) {
358 msyslog(LOG_ERR, "open_sockets: bind(sock6) failed: %m");
359 exit(1);
360 }
361 /* Register an NTP callback for recv/timeout */
362 ev_sock6 = event_new(base, sock6,
363 EV_TIMEOUT | EV_READ | EV_PERSIST,
364 &sock_cb, NULL);
365 if (NULL == ev_sock6) {
366 msyslog(LOG_ERR,
367 "open_sockets: event_new(base, sock6) failed!");
368 } else {
369 event_add(ev_sock6, &wakeup_tv);
370 }
371 }
372
373 return;
374 }
375
376
377 /*
378 ** handle_lookup
379 */
380 void
381 handle_lookup(
382 const char *name,
383 int flags
384 )
385 {
386 struct addrinfo hints; /* Local copy is OK */
387 struct dns_ctx *ctx;
388 char * name_copy;
389 size_t name_sz;
390 size_t octets;
391
392 TRACE(1, ("handle_lookup(%s,%#x)\n", name, flags));
393
394 ZERO(hints);
395 hints.ai_family = ai_fam_pref;
396 hints.ai_flags = AI_CANONNAME | Z_AI_NUMERICSERV;
397 /*
398 ** Unless we specify a socktype, we'll get at least two
399 ** entries for each address: one for TCP and one for
400 ** UDP. That's not what we want.
401 */
402 hints.ai_socktype = SOCK_DGRAM;
403 hints.ai_protocol = IPPROTO_UDP;
404
405 name_sz = 1 + strlen(name);
406 octets = sizeof(*ctx) + name_sz; // Space for a ctx and the name
407 ctx = emalloc_zero(octets); // ctx at ctx[0]
408 name_copy = (char *)(ctx + 1); // Put the name at ctx[1]
409 memcpy(name_copy, name, name_sz); // copy the name to ctx[1]
410 ctx->name = name_copy; // point to it...
411 ctx->flags = flags;
412 ctx->timeout = response_tv;
413 ctx->key = NULL;
414
415 /* The following should arguably be passed in... */
416 if (ENABLED_OPT(AUTHENTICATION)) {
417 ctx->key_id = OPT_VALUE_AUTHENTICATION;
418 get_key(ctx->key_id, &ctx->key);
419 if (NULL == ctx->key) {
420 fprintf(stderr, "%s: Authentication with keyID %d requested, but no matching keyID found in <%s>!\n",
421 progname, ctx->key_id, OPT_ARG(KEYFILE));
422 exit(1);
423 }
424 } else {
425 ctx->key_id = -1;
426 }
427
428 ++n_pending_dns;
429 getaddrinfo_sometime(name, "123", &hints, 0,
430 &sntp_name_resolved, ctx);
431 }
432
433
434 /*
435 ** DNS Callback:
436 ** - For each IP:
437 ** - - open a socket
438 ** - - increment n_pending_ntp
439 ** - - send a request if this is a Unicast callback
440 ** - - queue wait for response
441 ** - decrement n_pending_dns
442 */
443 void
444 sntp_name_resolved(
445 int rescode,
446 int gai_errno,
447 void * context,
448 const char * name,
449 const char * service,
450 const struct addrinfo * hints,
451 const struct addrinfo * addr
452 )
453 {
454 struct dns_ctx * dctx;
455 sent_pkt * spkt;
456 const struct addrinfo * ai;
457 SOCKET sock;
458 u_int xmt_delay_v4;
459 u_int xmt_delay_v6;
460 u_int xmt_delay;
461 size_t octets;
462
463 xmt_delay_v4 = 0;
464 xmt_delay_v6 = 0;
465 dctx = context;
466 if (rescode) {
467 #ifdef EAI_SYSTEM
468 if (EAI_SYSTEM == rescode) {
469 errno = gai_errno;
470 mfprintf(stderr, "%s lookup error %m\n",
471 dctx->name);
472 } else
473 #endif
474 fprintf(stderr, "%s lookup error %s\n",
475 dctx->name, gai_strerror(rescode));
476 } else {
477 TRACE(3, ("%s [%s]\n", dctx->name,
478 (addr->ai_canonname != NULL)
479 ? addr->ai_canonname
480 : ""));
481
482 for (ai = addr; ai != NULL; ai = ai->ai_next) {
483
484 if (check_kod(ai))
485 continue;
486
487 switch (ai->ai_family) {
488
489 case AF_INET:
490 sock = sock4;
491 xmt_delay = xmt_delay_v4;
492 xmt_delay_v4++;
493 break;
494
495 case AF_INET6:
496 if (!ipv6_works)
497 continue;
498
499 sock = sock6;
500 xmt_delay = xmt_delay_v6;
501 xmt_delay_v6++;
502 break;
503
504 default:
505 msyslog(LOG_ERR, "sntp_name_resolved: unexpected ai_family: %d",
506 ai->ai_family);
507 exit(1);
508 break;
509 }
510
511 /*
512 ** We're waiting for a response for either unicast
513 ** or broadcast, so...
514 */
515 ++n_pending_ntp;
516
517 /* If this is for a unicast IP, queue a request */
518 if (dctx->flags & CTX_UCST) {
519 spkt = emalloc_zero(sizeof(*spkt));
520 spkt->dctx = dctx;
521 octets = min(ai->ai_addrlen, sizeof(spkt->addr));
522 memcpy(&spkt->addr, ai->ai_addr, octets);
523 queue_xmt(sock, dctx, spkt, xmt_delay);
524 }
525 }
526 }
527 /* n_pending_dns really should be >0 here... */
528 --n_pending_dns;
529 check_exit_conditions();
530 }
531
532
533 /*
534 ** queue_xmt
535 */
536 void
537 queue_xmt(
538 SOCKET sock,
539 struct dns_ctx * dctx,
540 sent_pkt * spkt,
541 u_int xmt_delay
542 )
543 {
544 sockaddr_u * dest;
545 sent_pkt ** pkt_listp;
546 sent_pkt * match;
547 xmt_ctx * xctx;
548 struct timeval start_cb;
549 struct timeval delay;
550
551 dest = &spkt->addr;
552 if (IS_IPV6(dest))
553 pkt_listp = &v6_pkts_list;
554 else
555 pkt_listp = &v4_pkts_list;
556
557 /* reject attempts to add address already listed */
558 for (match = *pkt_listp; match != NULL; match = match->link) {
559 if (ADDR_PORT_EQ(&spkt->addr, &match->addr)) {
560 if (strcasecmp(spkt->dctx->name,
561 match->dctx->name))
562 printf("%s %s duplicate address from %s ignored.\n",
563 sptoa(&match->addr),
564 match->dctx->name,
565 spkt->dctx->name);
566 else
567 printf("%s %s, duplicate address ignored.\n",
568 sptoa(&match->addr),
569 match->dctx->name);
570 dec_pending_ntp(spkt->dctx->name, &spkt->addr);
571 free(spkt);
572 return;
573 }
574 }
575
576 LINK_SLIST(*pkt_listp, spkt, link);
577
578 xctx = emalloc_zero(sizeof(*xctx));
579 xctx->sock = sock;
580 xctx->spkt = spkt;
581 gettimeofday_cached(base, &start_cb);
582 xctx->sched = start_cb.tv_sec + (2 * xmt_delay);
583
584 if (xmt_q) {
585 LINK_SORT_SLIST(xmt_q, xctx, (xctx->sched < L_S_S_CUR()->sched),
586 link, xmt_ctx);
587 }
588 if (xmt_q == xctx) {
589 /*
590 * The new entry is the first scheduled. The timer is
591 * either not active or is set for the second xmt
592 * context in xmt_q.
593 */
594 if (NULL == ev_xmt_timer)
595 ev_xmt_timer = event_new(base, INVALID_SOCKET,
596 EV_TIMEOUT,
597 &xmt_timer_cb, NULL);
598 if (NULL == ev_xmt_timer) {
599 msyslog(LOG_ERR,
600 "queue_xmt: event_new(base, -1, EV_TIMEOUT) failed!");
601 exit(1);
602 }
603 ZERO(delay);
604 if (xctx->sched > start_cb.tv_sec)
605 delay.tv_sec = xctx->sched - start_cb.tv_sec;
606 event_add(ev_xmt_timer, &delay);
607 TRACE(2, ("queue_xmt: xmt timer for %u usec\n",
608 (u_int)delay.tv_usec));
609 }
610 }
611
612
613 /*
614 ** xmt_timer_cb
615 */
616 void
617 xmt_timer_cb(
618 evutil_socket_t fd,
619 short what,
620 void * ctx
621 )
622 {
623 struct timeval start_cb;
624 struct timeval delay;
625 xmt_ctx * x;
626
627 UNUSED_ARG(fd);
628 UNUSED_ARG(ctx);
629 DEBUG_INSIST(EV_TIMEOUT == what);
630
631 if (NULL == xmt_q || shutting_down)
632 return;
633 gettimeofday_cached(base, &start_cb);
634 if (xmt_q->sched <= start_cb.tv_sec) {
635 UNLINK_HEAD_SLIST(x, xmt_q, link);
636 TRACE(2, ("xmt_timer_cb: at .%6.6u -> %s\n",
637 (u_int)start_cb.tv_usec, stoa(&x->spkt->addr)));
638 xmt(x);
639 free(x);
640 if (NULL == xmt_q)
641 return;
642 }
643 if (xmt_q->sched <= start_cb.tv_sec) {
644 event_add(ev_xmt_timer, &gap);
645 TRACE(2, ("xmt_timer_cb: at .%6.6u gap %6.6u\n",
646 (u_int)start_cb.tv_usec,
647 (u_int)gap.tv_usec));
648 } else {
649 delay.tv_sec = xmt_q->sched - start_cb.tv_sec;
650 delay.tv_usec = 0;
651 event_add(ev_xmt_timer, &delay);
652 TRACE(2, ("xmt_timer_cb: at .%6.6u next %ld seconds\n",
653 (u_int)start_cb.tv_usec,
654 (long)delay.tv_sec));
655 }
656 }
657
658
659 /*
660 ** xmt()
661 */
662 void
663 xmt(
664 xmt_ctx * xctx
665 )
666 {
667 SOCKET sock = xctx->sock;
668 struct dns_ctx *dctx = xctx->spkt->dctx;
669 sent_pkt * spkt = xctx->spkt;
670 sockaddr_u * dst = &spkt->addr;
671 struct timeval tv_xmt;
672 struct pkt x_pkt;
673 size_t pkt_len;
674 int sent;
675
676 if (0 != gettimeofday(&tv_xmt, NULL)) {
677 msyslog(LOG_ERR,
678 "xmt: gettimeofday() failed: %m");
679 exit(1);
680 }
681 tv_xmt.tv_sec += JAN_1970;
682
683 pkt_len = generate_pkt(&x_pkt, &tv_xmt, dctx->key_id,
684 dctx->key);
685
686 sent = sendpkt(sock, dst, &x_pkt, pkt_len);
687 if (sent) {
688 /* Save the packet we sent... */
689 memcpy(&spkt->x_pkt, &x_pkt, min(sizeof(spkt->x_pkt),
690 pkt_len));
691 spkt->stime = tv_xmt.tv_sec - JAN_1970;
692
693 TRACE(2, ("xmt: %lx.%6.6u %s %s\n", (u_long)tv_xmt.tv_sec,
694 (u_int)tv_xmt.tv_usec, dctx->name, stoa(dst)));
695 } else {
696 dec_pending_ntp(dctx->name, dst);
697 }
698
699 return;
700 }
701
702
703 /*
704 * timeout_queries() -- give up on unrequited NTP queries
705 */
706 void
707 timeout_queries(void)
708 {
709 struct timeval start_cb;
710 u_int idx;
711 sent_pkt * head;
712 sent_pkt * spkt;
713 sent_pkt * spkt_next;
714 long age;
715 int didsomething = 0;
716
717 TRACE(3, ("timeout_queries: called to check %u items\n",
718 (unsigned)COUNTOF(fam_listheads)));
719
720 gettimeofday_cached(base, &start_cb);
721 for (idx = 0; idx < COUNTOF(fam_listheads); idx++) {
722 head = fam_listheads[idx];
723 for (spkt = head; spkt != NULL; spkt = spkt_next) {
724 char xcst;
725
726 didsomething = 1;
727 switch (spkt->dctx->flags & CTX_xCST) {
728 case CTX_BCST:
729 xcst = 'B';
730 break;
731
732 case CTX_UCST:
733 xcst = 'U';
734 break;
735
736 default:
737 INSIST(!"spkt->dctx->flags neither UCST nor BCST");
738 break;
739 }
740
741 spkt_next = spkt->link;
742 if (0 == spkt->stime || spkt->done)
743 continue;
744 age = start_cb.tv_sec - spkt->stime;
745 TRACE(3, ("%s %s %cCST age %ld\n",
746 stoa(&spkt->addr),
747 spkt->dctx->name, xcst, age));
748 if (age > response_timeout)
749 timeout_query(spkt);
750 }
751 }
752 // Do we care about didsomething?
753 TRACE(3, ("timeout_queries: didsomething is %d, age is %ld\n",
754 didsomething, (long) (start_cb.tv_sec - start_tv.tv_sec)));
755 if (start_cb.tv_sec - start_tv.tv_sec > response_timeout) {
756 TRACE(3, ("timeout_queries: bail!\n"));
757 event_base_loopexit(base, NULL);
758 shutting_down = TRUE;
759 }
760 }
761
762
763 void dec_pending_ntp(
764 const char * name,
765 sockaddr_u * server
766 )
767 {
768 if (n_pending_ntp > 0) {
769 --n_pending_ntp;
770 check_exit_conditions();
771 } else {
772 INSIST(0 == n_pending_ntp);
773 TRACE(1, ("n_pending_ntp was zero before decrement for %s\n",
774 hostnameaddr(name, server)));
775 }
776 }
777
778
779 void timeout_query(
780 sent_pkt * spkt
781 )
782 {
783 sockaddr_u * server;
784 char xcst;
785
786
787 switch (spkt->dctx->flags & CTX_xCST) {
788 case CTX_BCST:
789 xcst = 'B';
790 break;
791
792 case CTX_UCST:
793 xcst = 'U';
794 break;
795
796 default:
797 INSIST(!"spkt->dctx->flags neither UCST nor BCST");
798 break;
799 }
800 spkt->done = TRUE;
801 server = &spkt->addr;
802 msyslog(LOG_INFO, "%s no %cCST response after %d seconds",
803 hostnameaddr(spkt->dctx->name, server), xcst,
804 response_timeout);
805 dec_pending_ntp(spkt->dctx->name, server);
806 return;
807 }
808
809
810 /*
811 ** check_kod
812 */
813 int
814 check_kod(
815 const struct addrinfo * ai
816 )
817 {
818 char *hostname;
819 struct kod_entry *reason;
820
821 /* Is there a KoD on file for this address? */
822 hostname = addrinfo_to_str(ai);
823 TRACE(2, ("check_kod: checking <%s>\n", hostname));
824 if (search_entry(hostname, &reason)) {
825 printf("prior KoD for %s, skipping.\n",
826 hostname);
827 free(reason);
828 free(hostname);
829
830 return 1;
831 }
832 free(hostname);
833
834 return 0;
835 }
836
837
838 /*
839 ** Socket readable/timeout Callback:
840 ** Read in the packet
841 ** Unicast:
842 ** - close socket
843 ** - decrement n_pending_ntp
844 ** - If packet is good, set the time and "exit"
845 ** Broadcast:
846 ** - If packet is good, set the time and "exit"
847 */
848 void
849 sock_cb(
850 evutil_socket_t fd,
851 short what,
852 void *ptr
853 )
854 {
855 sockaddr_u sender;
856 sockaddr_u * psau;
857 sent_pkt ** p_pktlist;
858 sent_pkt * spkt;
859 int rpktl;
860 int rc;
861
862 INSIST(sock4 == fd || sock6 == fd);
863
864 TRACE(3, ("sock_cb: event on sock%s:%s%s%s%s\n",
865 (fd == sock6)
866 ? "6"
867 : "4",
868 (what & EV_TIMEOUT) ? " timeout" : "",
869 (what & EV_READ) ? " read" : "",
870 (what & EV_WRITE) ? " write" : "",
871 (what & EV_SIGNAL) ? " signal" : ""));
872
873 if (!(EV_READ & what)) {
874 if (EV_TIMEOUT & what)
875 timeout_queries();
876
877 return;
878 }
879
880 /* Read in the packet */
881 rpktl = recvdata(fd, &sender, &rbuf, sizeof(rbuf));
882 if (rpktl < 0) {
883 msyslog(LOG_DEBUG, "recvfrom error %m");
884 return;
885 }
886
887 if (sock6 == fd)
888 p_pktlist = &v6_pkts_list;
889 else
890 p_pktlist = &v4_pkts_list;
891
892 for (spkt = *p_pktlist; spkt != NULL; spkt = spkt->link) {
893 psau = &spkt->addr;
894 if (SOCK_EQ(&sender, psau))
895 break;
896 }
897 if (NULL == spkt) {
898 msyslog(LOG_WARNING,
899 "Packet from unexpected source %s dropped",
900 sptoa(&sender));
901 return;
902 }
903
904 TRACE(1, ("sock_cb: %s %s\n", spkt->dctx->name,
905 sptoa(&sender)));
906
907 rpktl = process_pkt(&r_pkt, &sender, rpktl, MODE_SERVER,
908 &spkt->x_pkt, "sock_cb");
909
910 TRACE(2, ("sock_cb: process_pkt returned %d\n", rpktl));
911
912 /* If this is a Unicast packet, one down ... */
913 if (!spkt->done && (CTX_UCST & spkt->dctx->flags)) {
914 dec_pending_ntp(spkt->dctx->name, &spkt->addr);
915 spkt->done = TRUE;
916 }
917
918
919 /* If the packet is good, set the time and we're all done */
920 rc = handle_pkt(rpktl, &r_pkt, &spkt->addr, spkt->dctx->name);
921 if (0 != rc)
922 TRACE(1, ("sock_cb: handle_pkt() returned %d\n", rc));
923 check_exit_conditions();
924 }
925
926
927 /*
928 * check_exit_conditions()
929 *
930 * If sntp has a reply, ask the event loop to stop after this round of
931 * callbacks, unless --wait was used.
932 */
933 void
934 check_exit_conditions(void)
935 {
936 if ((0 == n_pending_ntp && 0 == n_pending_dns) ||
937 (time_derived && !HAVE_OPT(WAIT))) {
938 event_base_loopexit(base, NULL);
939 shutting_down = TRUE;
940 } else {
941 TRACE(2, ("%d NTP and %d name queries pending\n",
942 n_pending_ntp, n_pending_dns));
943 }
944 }
945
946
947 /*
948 * sntp_addremove_fd() is invoked by the intres blocking worker code
949 * to read from a pipe, or to stop same.
950 */
951 void sntp_addremove_fd(
952 int fd,
953 int is_pipe,
954 int remove_it
955 )
956 {
957 u_int idx;
958 blocking_child *c;
959 struct event * ev;
960
961 #ifdef HAVE_SOCKETPAIR
962 if (is_pipe) {
963 /* sntp only asks for EV_FEATURE_FDS without HAVE_SOCKETPAIR */
964 msyslog(LOG_ERR, "fatal: pipes not supported on systems with socketpair()");
965 exit(1);
966 }
967 #endif
968
969 c = NULL;
970 for (idx = 0; idx < blocking_children_alloc; idx++) {
971 c = blocking_children[idx];
972 if (NULL == c)
973 continue;
974 if (fd == c->resp_read_pipe)
975 break;
976 }
977 if (idx == blocking_children_alloc)
978 return;
979
980 if (remove_it) {
981 ev = c->resp_read_ctx;
982 c->resp_read_ctx = NULL;
983 event_del(ev);
984 event_free(ev);
985
986 return;
987 }
988
989 make_socket_nonblocking(fd);
990 ev = event_new(base, fd, EV_READ | EV_PERSIST,
991 &worker_resp_cb, c);
992 if (NULL == ev) {
993 msyslog(LOG_ERR,
994 "sntp_addremove_fd: event_new(base, fd) failed!");
995 return;
996 }
997 c->resp_read_ctx = ev;
998 event_add(ev, NULL);
999 }
1000
1001
1002 /* called by forked intres child to close open descriptors */
1003 #ifdef WORK_FORK
1004 void
1005 kill_asyncio(
1006 int startfd
1007 )
1008 {
1009 if (INVALID_SOCKET != sock4) {
1010 closesocket(sock4);
1011 sock4 = INVALID_SOCKET;
1012 }
1013 if (INVALID_SOCKET != sock6) {
1014 closesocket(sock6);
1015 sock6 = INVALID_SOCKET;
1016 }
1017 if (INVALID_SOCKET != bsock4) {
1018 closesocket(bsock4);
1019 bsock4 = INVALID_SOCKET;
1020 }
1021 if (INVALID_SOCKET != bsock6) {
1022 closesocket(bsock6);
1023 bsock6 = INVALID_SOCKET;
1024 }
1025 }
1026 #endif
1027
1028
1029 /*
1030 * worker_resp_cb() is invoked when resp_read_pipe is readable.
1031 */
1032 void
1033 worker_resp_cb(
1034 evutil_socket_t fd,
1035 short what,
1036 void * ctx /* blocking_child * */
1037 )
1038 {
1039 blocking_child * c;
1040
1041 REQUIRE(EV_READ & what);
1042 c = ctx;
1043 INSIST(fd == c->resp_read_pipe);
1044 process_blocking_resp(c);
1045 }
1046
1047
1048 /*
1049 * intres_timeout_req(s) is invoked in the parent to schedule an idle
1050 * timeout to fire in s seconds, if not reset earlier by a call to
1051 * intres_timeout_req(0), which clears any pending timeout. When the
1052 * timeout expires, worker_idle_timer_fired() is invoked (again, in the
1053 * parent).
1054 *
1055 * sntp and ntpd each provide implementations adapted to their timers.
1056 */
1057 void
1058 intres_timeout_req(
1059 u_int seconds /* 0 cancels */
1060 )
1061 {
1062 struct timeval tv_to;
1063
1064 if (NULL == ev_worker_timeout) {
1065 ev_worker_timeout = event_new(base, -1,
1066 EV_TIMEOUT | EV_PERSIST,
1067 &worker_timeout, NULL);
1068 INSIST(NULL != ev_worker_timeout);
1069 } else {
1070 event_del(ev_worker_timeout);
1071 }
1072 if (0 == seconds)
1073 return;
1074 tv_to.tv_sec = seconds;
1075 tv_to.tv_usec = 0;
1076 event_add(ev_worker_timeout, &tv_to);
1077 }
1078
1079
1080 void
1081 worker_timeout(
1082 evutil_socket_t fd,
1083 short what,
1084 void * ctx
1085 )
1086 {
1087 UNUSED_ARG(fd);
1088 UNUSED_ARG(ctx);
1089
1090 REQUIRE(EV_TIMEOUT & what);
1091 worker_idle_timer_fired();
1092 }
1093
1094
1095 void
1096 sntp_libevent_log_cb(
1097 int severity,
1098 const char * msg
1099 )
1100 {
1101 int level;
1102
1103 switch (severity) {
1104
1105 default:
1106 case _EVENT_LOG_DEBUG:
1107 level = LOG_DEBUG;
1108 break;
1109
1110 case _EVENT_LOG_MSG:
1111 level = LOG_NOTICE;
1112 break;
1113
1114 case _EVENT_LOG_WARN:
1115 level = LOG_WARNING;
1116 break;
1117
1118 case _EVENT_LOG_ERR:
1119 level = LOG_ERR;
1120 break;
1121 }
1122
1123 msyslog(level, "%s", msg);
1124 }
1125
1126
1127 int
1128 generate_pkt (
1129 struct pkt *x_pkt,
1130 const struct timeval *tv_xmt,
1131 int key_id,
1132 struct key *pkt_key
1133 )
1134 {
1135 l_fp xmt_fp;
1136 int pkt_len;
1137 int mac_size;
1138
1139 pkt_len = LEN_PKT_NOMAC;
1140 ZERO(*x_pkt);
1141 TVTOTS(tv_xmt, &xmt_fp);
1142 HTONL_FP(&xmt_fp, &x_pkt->xmt);
1143 x_pkt->stratum = STRATUM_TO_PKT(STRATUM_UNSPEC);
1144 x_pkt->ppoll = 8;
1145 /* FIXME! Modus broadcast + adr. check -> bdr. pkt */
1146 set_li_vn_mode(x_pkt, LEAP_NOTINSYNC, ntpver, 3);
1147 if (debug > 0) {
1148 printf("generate_pkt: key_id %d, key pointer %p\n", key_id, pkt_key);
1149 }
1150 if (pkt_key != NULL) {
1151 x_pkt->exten[0] = htonl(key_id);
1152 mac_size = make_mac(x_pkt, pkt_len, pkt_key,
1153 (char *)&x_pkt->exten[1], MAX_MDG_LEN);
1154 if (mac_size > 0)
1155 pkt_len += mac_size + KEY_MAC_LEN;
1156 #ifdef DEBUG
1157 if (debug > 0) {
1158 printf("generate_pkt: mac_size is %d\n", mac_size);
1159 }
1160 #endif
1161
1162 }
1163 return pkt_len;
1164 }
1165
1166
1167 int
1168 handle_pkt(
1169 int rpktl,
1170 struct pkt * rpkt,
1171 sockaddr_u * host,
1172 const char * hostname
1173 )
1174 {
1175 char disptxt[32];
1176 const char * addrtxt;
1177 struct timeval tv_dst;
1178 int cnt;
1179 int sw_case;
1180 int digits;
1181 int stratum;
1182 char * ref;
1183 char * ts_str;
1184 const char * leaptxt;
1185 double offset;
1186 double precision;
1187 double synch_distance;
1188 char * p_SNTP_PRETEND_TIME;
1189 time_t pretend_time;
1190 #if SIZEOF_TIME_T == 8
1191 long long ll;
1192 #else
1193 long l;
1194 #endif
1195
1196 ts_str = NULL;
1197
1198 if (rpktl > 0)
1199 sw_case = 1;
1200 else
1201 sw_case = rpktl;
1202
1203 switch (sw_case) {
1204
1205 case SERVER_UNUSEABLE:
1206 return -1;
1207 break;
1208
1209 case PACKET_UNUSEABLE:
1210 break;
1211
1212 case SERVER_AUTH_FAIL:
1213 break;
1214
1215 case KOD_DEMOBILIZE:
1216 /* Received a DENY or RESTR KOD packet */
1217 addrtxt = stoa(host);
1218 ref = (char *)&rpkt->refid;
1219 add_entry(addrtxt, ref);
1220 msyslog(LOG_WARNING, "KOD code %c%c%c%c from %s %s",
1221 ref[0], ref[1], ref[2], ref[3], addrtxt, hostname);
1222 break;
1223
1224 case KOD_RATE:
1225 /*
1226 ** Hmm...
1227 ** We should probably call add_entry() with an
1228 ** expiration timestamp of several seconds in the future,
1229 ** and back-off even more if we get more RATE responses.
1230 */
1231 break;
1232
1233 case 1:
1234 TRACE(3, ("handle_pkt: %d bytes from %s %s\n",
1235 rpktl, stoa(host), hostname));
1236
1237 gettimeofday_cached(base, &tv_dst);
1238
1239 p_SNTP_PRETEND_TIME = getenv("SNTP_PRETEND_TIME");
1240 if (p_SNTP_PRETEND_TIME) {
1241 pretend_time = 0;
1242 #if SIZEOF_TIME_T == 4
1243 if (1 == sscanf(p_SNTP_PRETEND_TIME, "%ld", &l))
1244 pretend_time = (time_t)l;
1245 #elif SIZEOF_TIME_T == 8
1246 if (1 == sscanf(p_SNTP_PRETEND_TIME, "%lld", &ll))
1247 pretend_time = (time_t)ll;
1248 #else
1249 # include "GRONK: unexpected value for SIZEOF_TIME_T"
1250 #endif
1251 if (0 != pretend_time)
1252 tv_dst.tv_sec = pretend_time;
1253 }
1254
1255 offset_calculation(rpkt, rpktl, &tv_dst, &offset,
1256 &precision, &synch_distance);
1257 time_derived = TRUE;
1258
1259 for (digits = 0; (precision *= 10.) < 1.; ++digits)
1260 /* empty */ ;
1261 if (digits > 6)
1262 digits = 6;
1263
1264 ts_str = tv_to_str(&tv_dst);
1265 stratum = rpkt->stratum;
1266 if (0 == stratum)
1267 stratum = 16;
1268
1269 if (synch_distance > 0.) {
1270 cnt = snprintf(disptxt, sizeof(disptxt),
1271 " +/- %f", synch_distance);
1272 if ((size_t)cnt >= sizeof(disptxt))
1273 snprintf(disptxt, sizeof(disptxt),
1274 "ERROR %d >= %d", cnt,
1275 (int)sizeof(disptxt));
1276 } else {
1277 disptxt[0] = '\0';
1278 }
1279
1280 switch (PKT_LEAP(rpkt->li_vn_mode)) {
1281 case LEAP_NOWARNING:
1282 leaptxt = "no-leap";
1283 break;
1284 case LEAP_ADDSECOND:
1285 leaptxt = "add-leap";
1286 break;
1287 case LEAP_DELSECOND:
1288 leaptxt = "del-leap";
1289 break;
1290 case LEAP_NOTINSYNC:
1291 leaptxt = "unsync";
1292 break;
1293 default:
1294 leaptxt = "LEAP-ERROR";
1295 break;
1296 }
1297
1298 msyslog(LOG_INFO, "%s %+.*f%s %s s%d %s%s", ts_str,
1299 digits, offset, disptxt,
1300 hostnameaddr(hostname, host), stratum,
1301 leaptxt,
1302 (time_adjusted)
1303 ? " [excess]"
1304 : "");
1305 free(ts_str);
1306
1307 if (p_SNTP_PRETEND_TIME)
1308 return 0;
1309
1310 if (!time_adjusted &&
1311 (ENABLED_OPT(STEP) || ENABLED_OPT(SLEW)))
1312 return set_time(offset);
1313
1314 return EX_OK;
1315 }
1316
1317 return 1;
1318 }
1319
1320
1321 void
1322 offset_calculation(
1323 struct pkt *rpkt,
1324 int rpktl,
1325 struct timeval *tv_dst,
1326 double *offset,
1327 double *precision,
1328 double *synch_distance
1329 )
1330 {
1331 l_fp p_rec, p_xmt, p_ref, p_org, tmp, dst;
1332 u_fp p_rdly, p_rdsp;
1333 double t21, t34, delta;
1334
1335 /* Convert timestamps from network to host byte order */
1336 p_rdly = NTOHS_FP(rpkt->rootdelay);
1337 p_rdsp = NTOHS_FP(rpkt->rootdisp);
1338 NTOHL_FP(&rpkt->reftime, &p_ref);
1339 NTOHL_FP(&rpkt->org, &p_org);
1340 NTOHL_FP(&rpkt->rec, &p_rec);
1341 NTOHL_FP(&rpkt->xmt, &p_xmt);
1342
1343 *precision = LOGTOD(rpkt->precision);
1344
1345 TRACE(3, ("offset_calculation: LOGTOD(rpkt->precision): %f\n", *precision));
1346
1347 /* Compute offset etc. */
1348 tmp = p_rec;
1349 L_SUB(&tmp, &p_org);
1350 LFPTOD(&tmp, t21);
1351 TVTOTS(tv_dst, &dst);
1352 dst.l_ui += JAN_1970;
1353 tmp = p_xmt;
1354 L_SUB(&tmp, &dst);
1355 LFPTOD(&tmp, t34);
1356 *offset = (t21 + t34) / 2.;
1357 delta = t21 - t34;
1358
1359 // synch_distance is:
1360 // (peer->delay + peer->rootdelay) / 2 + peer->disp
1361 // + peer->rootdisp + clock_phi * (current_time - peer->update)
1362 // + peer->jitter;
1363 //
1364 // and peer->delay = fabs(peer->offset - p_offset) * 2;
1365 // and peer->offset needs history, so we're left with
1366 // p_offset = (t21 + t34) / 2.;
1367 // peer->disp = 0; (we have no history to augment this)
1368 // clock_phi = 15e-6;
1369 // peer->jitter = LOGTOD(sys_precision); (we have no history to augment this)
1370 // and ntp_proto.c:set_sys_tick_precision() should get us sys_precision.
1371 //
1372 // so our answer seems to be:
1373 //
1374 // (fabs(t21 + t34) + peer->rootdelay) / 3.
1375 // + 0 (peer->disp)
1376 // + peer->rootdisp
1377 // + 15e-6 (clock_phi)
1378 // + LOGTOD(sys_precision)
1379
1380 INSIST( FPTOD(p_rdly) >= 0. );
1381 #if 1
1382 *synch_distance = (fabs(t21 + t34) + FPTOD(p_rdly)) / 3.
1383 + 0.
1384 + FPTOD(p_rdsp)
1385 + 15e-6
1386 + 0. /* LOGTOD(sys_precision) when we can get it */
1387 ;
1388 INSIST( *synch_distance >= 0. );
1389 #else
1390 *synch_distance = (FPTOD(p_rdly) + FPTOD(p_rdsp))/2.0;
1391 #endif
1392
1393 #ifdef DEBUG
1394 if (debug > 3) {
1395 printf("sntp rootdelay: %f\n", FPTOD(p_rdly));
1396 printf("sntp rootdisp: %f\n", FPTOD(p_rdsp));
1397 printf("sntp syncdist: %f\n", *synch_distance);
1398
1399 pkt_output(rpkt, rpktl, stdout);
1400
1401 printf("sntp offset_calculation: rpkt->reftime:\n");
1402 l_fp_output(&p_ref, stdout);
1403 printf("sntp offset_calculation: rpkt->org:\n");
1404 l_fp_output(&p_org, stdout);
1405 printf("sntp offset_calculation: rpkt->rec:\n");
1406 l_fp_output(&p_rec, stdout);
1407 printf("sntp offset_calculation: rpkt->xmt:\n");
1408 l_fp_output(&p_xmt, stdout);
1409 }
1410 #endif
1411
1412 TRACE(3, ("sntp offset_calculation:\trec - org t21: %.6f\n"
1413 "\txmt - dst t34: %.6f\tdelta: %.6f\toffset: %.6f\n",
1414 t21, t34, delta, *offset));
1415
1416 return;
1417 }
1418
1419
1420
1421 /* Compute the 8 bits for li_vn_mode */
1422 void
1423 set_li_vn_mode (
1424 struct pkt *spkt,
1425 char leap,
1426 char version,
1427 char mode
1428 )
1429 {
1430 if (leap > 3) {
1431 msyslog(LOG_DEBUG, "set_li_vn_mode: leap > 3, using max. 3");
1432 leap = 3;
1433 }
1434
1435 if ((unsigned char)version > 7) {
1436 msyslog(LOG_DEBUG, "set_li_vn_mode: version < 0 or > 7, using 4");
1437 version = 4;
1438 }
1439
1440 if (mode > 7) {
1441 msyslog(LOG_DEBUG, "set_li_vn_mode: mode > 7, using client mode 3");
1442 mode = 3;
1443 }
1444
1445 spkt->li_vn_mode = leap << 6;
1446 spkt->li_vn_mode |= version << 3;
1447 spkt->li_vn_mode |= mode;
1448 }
1449
1450
1451 /*
1452 ** set_time applies 'offset' to the local clock.
1453 */
1454 int
1455 set_time(
1456 double offset
1457 )
1458 {
1459 int rc;
1460
1461 if (time_adjusted)
1462 return EX_OK;
1463
1464 /*
1465 ** If we can step but we cannot slew, then step.
1466 ** If we can step or slew and and |offset| > steplimit, then step.
1467 */
1468 if (ENABLED_OPT(STEP) &&
1469 ( !ENABLED_OPT(SLEW)
1470 || (ENABLED_OPT(SLEW) && (fabs(offset) > steplimit))
1471 )) {
1472 rc = step_systime(offset);
1473
1474 /* If there was a problem, can we rely on errno? */
1475 if (1 == rc)
1476 time_adjusted = TRUE;
1477 return (time_adjusted)
1478 ? EX_OK
1479 : 1;
1480 /*
1481 ** In case of error, what should we use?
1482 ** EX_UNAVAILABLE?
1483 ** EX_OSERR?
1484 ** EX_NOPERM?
1485 */
1486 }
1487
1488 if (ENABLED_OPT(SLEW)) {
1489 rc = adj_systime(offset);
1490
1491 /* If there was a problem, can we rely on errno? */
1492 if (1 == rc)
1493 time_adjusted = TRUE;
1494 return (time_adjusted)
1495 ? EX_OK
1496 : 1;
1497 /*
1498 ** In case of error, what should we use?
1499 ** EX_UNAVAILABLE?
1500 ** EX_OSERR?
1501 ** EX_NOPERM?
1502 */
1503 }
1504
1505 return EX_SOFTWARE;
1506 }
1507
1508
1509 int
1510 libevent_version_ok(void)
1511 {
1512 ev_uint32_t v_compile_maj;
1513 ev_uint32_t v_run_maj;
1514
1515 v_compile_maj = LIBEVENT_VERSION_NUMBER & 0xffff0000;
1516 v_run_maj = event_get_version_number() & 0xffff0000;
1517 if (v_compile_maj != v_run_maj) {
1518 fprintf(stderr,
1519 "Incompatible libevent versions: have %s, built with %s\n",
1520 event_get_version(),
1521 LIBEVENT_VERSION);
1522 return 0;
1523 }
1524 return 1;
1525 }
1526
1527 /*
1528 * gettimeofday_cached()
1529 *
1530 * Clones the event_base_gettimeofday_cached() interface but ensures the
1531 * times are always on the gettimeofday() 1970 scale. Older libevent 2
1532 * sometimes used gettimeofday(), sometimes the since-system-start
1533 * clock_gettime(CLOCK_MONOTONIC), depending on the platform.
1534 *
1535 * It is not cleanly possible to tell which timescale older libevent is
1536 * using.
1537 *
1538 * The strategy involves 1 hour thresholds chosen to be far longer than
1539 * the duration of a round of libevent callbacks, which share a cached
1540 * start-of-round time. First compare the last cached time with the
1541 * current gettimeofday() time. If they are within one hour, libevent
1542 * is using the proper timescale so leave the offset 0. Otherwise,
1543 * compare libevent's cached time and the current time on the monotonic
1544 * scale. If they are within an hour, libevent is using the monotonic
1545 * scale so calculate the offset to add to such times to bring them to
1546 * gettimeofday()'s scale.
1547 */
1548 int
1549 gettimeofday_cached(
1550 struct event_base * b,
1551 struct timeval * caller_tv
1552 )
1553 {
1554 #if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1555 static struct event_base * cached_b;
1556 static struct timeval cached;
1557 static struct timeval adj_cached;
1558 static struct timeval offset;
1559 static int offset_ready;
1560 struct timeval latest;
1561 struct timeval systemt;
1562 struct timespec ts;
1563 struct timeval mono;
1564 struct timeval diff;
1565 int cgt_rc;
1566 int gtod_rc;
1567
1568 event_base_gettimeofday_cached(b, &latest);
1569 if (b == cached_b &&
1570 !memcmp(&latest, &cached, sizeof(latest))) {
1571 *caller_tv = adj_cached;
1572 return 0;
1573 }
1574 cached = latest;
1575 cached_b = b;
1576 if (!offset_ready) {
1577 cgt_rc = clock_gettime(CLOCK_MONOTONIC, &ts);
1578 gtod_rc = gettimeofday(&systemt, NULL);
1579 if (0 != gtod_rc) {
1580 msyslog(LOG_ERR,
1581 "%s: gettimeofday() error %m",
1582 progname);
1583 exit(1);
1584 }
1585 diff = sub_tval(systemt, latest);
1586 if (debug > 1)
1587 printf("system minus cached %+ld.%06ld\n",
1588 (long)diff.tv_sec, (long)diff.tv_usec);
1589 if (0 != cgt_rc || labs((long)diff.tv_sec) < 3600) {
1590 /*
1591 * Either use_monotonic == 0, or this libevent
1592 * has been repaired. Leave offset at zero.
1593 */
1594 } else {
1595 mono.tv_sec = ts.tv_sec;
1596 mono.tv_usec = ts.tv_nsec / 1000;
1597 diff = sub_tval(latest, mono);
1598 if (debug > 1)
1599 printf("cached minus monotonic %+ld.%06ld\n",
1600 (long)diff.tv_sec, (long)diff.tv_usec);
1601 if (labs((long)diff.tv_sec) < 3600) {
1602 /* older libevent2 using monotonic */
1603 offset = sub_tval(systemt, mono);
1604 TRACE(1, ("%s: Offsetting libevent CLOCK_MONOTONIC times by %+ld.%06ld\n",
1605 "gettimeofday_cached",
1606 (long)offset.tv_sec,
1607 (long)offset.tv_usec));
1608 }
1609 }
1610 offset_ready = TRUE;
1611 }
1612 adj_cached = add_tval(cached, offset);
1613 *caller_tv = adj_cached;
1614
1615 return 0;
1616 #else
1617 return event_base_gettimeofday_cached(b, caller_tv);
1618 #endif
1619 }
1620
1621 /* Dummy function to satisfy libntp/work_fork.c */
1622 extern int set_user_group_ids(void);
1623 int set_user_group_ids(void)
1624 {
1625 return 1;
1626 }
1627