if_wg.c revision 1.108 1 /* $NetBSD: if_wg.c,v 1.108 2024/07/28 14:49:31 riastradh Exp $ */
2
3 /*
4 * Copyright (C) Ryota Ozaki <ozaki.ryota (at) gmail.com>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the project nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 /*
33 * This network interface aims to implement the WireGuard protocol.
34 * The implementation is based on the paper of WireGuard as of
35 * 2018-06-30 [1]. The paper is referred in the source code with label
36 * [W]. Also the specification of the Noise protocol framework as of
37 * 2018-07-11 [2] is referred with label [N].
38 *
39 * [1] https://www.wireguard.com/papers/wireguard.pdf
40 * [2] http://noiseprotocol.org/noise.pdf
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: if_wg.c,v 1.108 2024/07/28 14:49:31 riastradh Exp $");
45
46 #ifdef _KERNEL_OPT
47 #include "opt_altq_enabled.h"
48 #include "opt_inet.h"
49 #endif
50
51 #include <sys/param.h>
52 #include <sys/types.h>
53
54 #include <sys/atomic.h>
55 #include <sys/callout.h>
56 #include <sys/cprng.h>
57 #include <sys/cpu.h>
58 #include <sys/device.h>
59 #include <sys/domain.h>
60 #include <sys/errno.h>
61 #include <sys/intr.h>
62 #include <sys/ioctl.h>
63 #include <sys/kernel.h>
64 #include <sys/kmem.h>
65 #include <sys/mbuf.h>
66 #include <sys/module.h>
67 #include <sys/mutex.h>
68 #include <sys/once.h>
69 #include <sys/percpu.h>
70 #include <sys/pserialize.h>
71 #include <sys/psref.h>
72 #include <sys/queue.h>
73 #include <sys/rwlock.h>
74 #include <sys/socket.h>
75 #include <sys/socketvar.h>
76 #include <sys/sockio.h>
77 #include <sys/sysctl.h>
78 #include <sys/syslog.h>
79 #include <sys/systm.h>
80 #include <sys/thmap.h>
81 #include <sys/threadpool.h>
82 #include <sys/time.h>
83 #include <sys/timespec.h>
84 #include <sys/workqueue.h>
85
86 #include <lib/libkern/libkern.h>
87
88 #include <net/bpf.h>
89 #include <net/if.h>
90 #include <net/if_types.h>
91 #include <net/if_wg.h>
92 #include <net/pktqueue.h>
93 #include <net/route.h>
94
95 #include <netinet/in.h>
96 #include <netinet/in_pcb.h>
97 #include <netinet/in_var.h>
98 #include <netinet/ip.h>
99 #include <netinet/ip_var.h>
100 #include <netinet/udp.h>
101 #include <netinet/udp_var.h>
102
103 #ifdef INET6
104 #include <netinet/ip6.h>
105 #include <netinet6/in6_pcb.h>
106 #include <netinet6/in6_var.h>
107 #include <netinet6/ip6_var.h>
108 #include <netinet6/udp6_var.h>
109 #endif /* INET6 */
110
111 #include <prop/proplib.h>
112
113 #include <crypto/blake2/blake2s.h>
114 #include <crypto/sodium/crypto_aead_chacha20poly1305.h>
115 #include <crypto/sodium/crypto_aead_xchacha20poly1305.h>
116 #include <crypto/sodium/crypto_scalarmult.h>
117
118 #include "ioconf.h"
119
120 #ifdef WG_RUMPKERNEL
121 #include "wg_user.h"
122 #endif
123
124 #ifndef time_uptime32
125 #define time_uptime32 ((uint32_t)time_uptime)
126 #endif
127
128 /*
129 * Data structures
130 * - struct wg_softc is an instance of wg interfaces
131 * - It has a list of peers (struct wg_peer)
132 * - It has a threadpool job that sends/receives handshake messages and
133 * runs event handlers
134 * - It has its own two routing tables: one is for IPv4 and the other IPv6
135 * - struct wg_peer is a representative of a peer
136 * - It has a struct work to handle handshakes and timer tasks
137 * - It has a pair of session instances (struct wg_session)
138 * - It has a pair of endpoint instances (struct wg_sockaddr)
139 * - Normally one endpoint is used and the second one is used only on
140 * a peer migration (a change of peer's IP address)
141 * - It has a list of IP addresses and sub networks called allowedips
142 * (struct wg_allowedip)
143 * - A packets sent over a session is allowed if its destination matches
144 * any IP addresses or sub networks of the list
145 * - struct wg_session represents a session of a secure tunnel with a peer
146 * - Two instances of sessions belong to a peer; a stable session and a
147 * unstable session
148 * - A handshake process of a session always starts with a unstable instance
149 * - Once a session is established, its instance becomes stable and the
150 * other becomes unstable instead
151 * - Data messages are always sent via a stable session
152 *
153 * Locking notes:
154 * - Each wg has a mutex(9) wg_lock, and a rwlock(9) wg_rwlock
155 * - Changes to the peer list are serialized by wg_lock
156 * - The peer list may be read with pserialize(9) and psref(9)
157 * - The rwlock (wg_rwlock) protects the routing tables (wg_rtable_ipv[46])
158 * => XXX replace by pserialize when routing table is psz-safe
159 * - Each peer (struct wg_peer, wgp) has a mutex wgp_lock, which can be taken
160 * only in thread context and serializes:
161 * - the stable and unstable session pointers
162 * - all unstable session state
163 * - Packet processing may be done in softint context:
164 * - The stable session can be read under pserialize(9) or psref(9)
165 * - The stable session is always ESTABLISHED
166 * - On a session swap, we must wait for all readers to release a
167 * reference to a stable session before changing wgs_state and
168 * session states
169 * - Lock order: wg_lock -> wgp_lock
170 */
171
172
173 #define WGLOG(level, fmt, args...) \
174 log(level, "%s: " fmt, __func__, ##args)
175
176 #define WG_DEBUG
177
178 /* Debug options */
179 #ifdef WG_DEBUG
180 /* Output debug logs */
181 #ifndef WG_DEBUG_LOG
182 #define WG_DEBUG_LOG
183 #endif
184 /* Output trace logs */
185 #ifndef WG_DEBUG_TRACE
186 #define WG_DEBUG_TRACE
187 #endif
188 /* Output hash values, etc. */
189 #ifndef WG_DEBUG_DUMP
190 #define WG_DEBUG_DUMP
191 #endif
192 /* debug packets */
193 #ifndef WG_DEBUG_PACKET
194 #define WG_DEBUG_PACKET
195 #endif
196 /* Make some internal parameters configurable for testing and debugging */
197 #ifndef WG_DEBUG_PARAMS
198 #define WG_DEBUG_PARAMS
199 #endif
200 #endif /* WG_DEBUG */
201
202 #ifndef WG_DEBUG
203 # if defined(WG_DEBUG_LOG) || defined(WG_DEBUG_TRACE) || \
204 defined(WG_DEBUG_DUMP) || defined(WG_DEBUG_PARAMS) || \
205 defined(WG_DEBUG_PACKET)
206 # define WG_DEBUG
207 # endif
208 #endif
209
210 #ifdef WG_DEBUG
211 int wg_debug;
212 #define WG_DEBUG_FLAGS_LOG 1
213 #define WG_DEBUG_FLAGS_TRACE 2
214 #define WG_DEBUG_FLAGS_DUMP 4
215 #define WG_DEBUG_FLAGS_PACKET 8
216 #endif
217
218
219 #ifdef WG_DEBUG_TRACE
220 #define WG_TRACE(msg) do { \
221 if (wg_debug & WG_DEBUG_FLAGS_TRACE) \
222 log(LOG_DEBUG, "%s:%d: %s\n", __func__, __LINE__, (msg)); \
223 } while (0)
224 #else
225 #define WG_TRACE(msg) __nothing
226 #endif
227
228 #ifdef WG_DEBUG_LOG
229 #define WG_DLOG(fmt, args...) do { \
230 if (wg_debug & WG_DEBUG_FLAGS_LOG) \
231 log(LOG_DEBUG, "%s: " fmt, __func__, ##args); \
232 } while (0)
233 #else
234 #define WG_DLOG(fmt, args...) __nothing
235 #endif
236
237 #define WG_LOG_RATECHECK(wgprc, level, fmt, args...) do { \
238 if (ppsratecheck(&(wgprc)->wgprc_lasttime, \
239 &(wgprc)->wgprc_curpps, 1)) { \
240 log(level, fmt, ##args); \
241 } \
242 } while (0)
243
244 #ifdef WG_DEBUG_PARAMS
245 static bool wg_force_underload = false;
246 #endif
247
248 #ifdef WG_DEBUG_DUMP
249
250 static char enomem[10] = "[enomem]";
251
252 #define MAX_HDUMP_LEN 10000 /* large enough */
253
254
255 static char *
256 gethexdump(const void *vp, size_t n)
257 {
258 char *buf;
259 const uint8_t *p = vp;
260 size_t i, alloc;
261
262 alloc = n;
263 if (n > MAX_HDUMP_LEN)
264 alloc = MAX_HDUMP_LEN;
265 buf = kmem_alloc(3 * alloc + 5, KM_NOSLEEP);
266 if (buf == NULL)
267 return enomem;
268 for (i = 0; i < alloc; i++)
269 snprintf(buf + 3 * i, 3 + 1, " %02hhx", p[i]);
270 if (alloc != n)
271 snprintf(buf + 3 * i, 4 + 1, " ...");
272 return buf;
273 }
274
275 static void
276 puthexdump(char *buf, const void *p, size_t n)
277 {
278
279 if (buf == NULL || buf == enomem)
280 return;
281 if (n > MAX_HDUMP_LEN)
282 n = MAX_HDUMP_LEN;
283 kmem_free(buf, 3 * n + 5);
284 }
285
286 #ifdef WG_RUMPKERNEL
287 static void
288 wg_dump_buf(const char *func, const char *buf, const size_t size)
289 {
290 if ((wg_debug & WG_DEBUG_FLAGS_DUMP) == 0)
291 return;
292
293 char *hex = gethexdump(buf, size);
294
295 log(LOG_DEBUG, "%s: %s\n", func, hex);
296 puthexdump(hex, buf, size);
297 }
298 #endif
299
300 static void
301 wg_dump_hash(const uint8_t *func, const uint8_t *name, const uint8_t *hash,
302 const size_t size)
303 {
304 if ((wg_debug & WG_DEBUG_FLAGS_DUMP) == 0)
305 return;
306
307 char *hex = gethexdump(hash, size);
308
309 log(LOG_DEBUG, "%s: %s: %s\n", func, name, hex);
310 puthexdump(hex, hash, size);
311 }
312
313 #define WG_DUMP_HASH(name, hash) \
314 wg_dump_hash(__func__, name, hash, WG_HASH_LEN)
315 #define WG_DUMP_HASH48(name, hash) \
316 wg_dump_hash(__func__, name, hash, 48)
317 #define WG_DUMP_BUF(buf, size) \
318 wg_dump_buf(__func__, buf, size)
319 #else
320 #define WG_DUMP_HASH(name, hash) __nothing
321 #define WG_DUMP_HASH48(name, hash) __nothing
322 #define WG_DUMP_BUF(buf, size) __nothing
323 #endif /* WG_DEBUG_DUMP */
324
325 /* chosen somewhat arbitrarily -- fits in signed 16 bits NUL-terminated */
326 #define WG_MAX_PROPLEN 32766
327
328 #define WG_MTU 1420
329 #define WG_ALLOWEDIPS 16
330
331 #define CURVE25519_KEY_LEN 32
332 #define TAI64N_LEN sizeof(uint32_t) * 3
333 #define POLY1305_AUTHTAG_LEN 16
334 #define HMAC_BLOCK_LEN 64
335
336 /* [N] 4.1: "DHLEN must be 32 or greater." WireGuard chooses 32. */
337 /* [N] 4.3: Hash functions */
338 #define NOISE_DHLEN 32
339 /* [N] 4.3: "Must be 32 or 64." WireGuard chooses 32. */
340 #define NOISE_HASHLEN 32
341 #define NOISE_BLOCKLEN 64
342 #define NOISE_HKDF_OUTPUT_LEN NOISE_HASHLEN
343 /* [N] 5.1: "k" */
344 #define NOISE_CIPHER_KEY_LEN 32
345 /*
346 * [N] 9.2: "psk"
347 * "... psk is a 32-byte secret value provided by the application."
348 */
349 #define NOISE_PRESHARED_KEY_LEN 32
350
351 #define WG_STATIC_KEY_LEN CURVE25519_KEY_LEN
352 #define WG_TIMESTAMP_LEN TAI64N_LEN
353
354 #define WG_PRESHARED_KEY_LEN NOISE_PRESHARED_KEY_LEN
355
356 #define WG_COOKIE_LEN 16
357 #define WG_MAC_LEN 16
358 #define WG_COOKIESECRET_LEN 32
359
360 #define WG_EPHEMERAL_KEY_LEN CURVE25519_KEY_LEN
361 /* [N] 5.2: "ck: A chaining key of HASHLEN bytes" */
362 #define WG_CHAINING_KEY_LEN NOISE_HASHLEN
363 /* [N] 5.2: "h: A hash output of HASHLEN bytes" */
364 #define WG_HASH_LEN NOISE_HASHLEN
365 #define WG_CIPHER_KEY_LEN NOISE_CIPHER_KEY_LEN
366 #define WG_DH_OUTPUT_LEN NOISE_DHLEN
367 #define WG_KDF_OUTPUT_LEN NOISE_HKDF_OUTPUT_LEN
368 #define WG_AUTHTAG_LEN POLY1305_AUTHTAG_LEN
369 #define WG_DATA_KEY_LEN 32
370 #define WG_SALT_LEN 24
371
372 /*
373 * The protocol messages
374 */
375 struct wg_msg {
376 uint32_t wgm_type;
377 } __packed;
378
379 /* [W] 5.4.2 First Message: Initiator to Responder */
380 struct wg_msg_init {
381 uint32_t wgmi_type;
382 uint32_t wgmi_sender;
383 uint8_t wgmi_ephemeral[WG_EPHEMERAL_KEY_LEN];
384 uint8_t wgmi_static[WG_STATIC_KEY_LEN + WG_AUTHTAG_LEN];
385 uint8_t wgmi_timestamp[WG_TIMESTAMP_LEN + WG_AUTHTAG_LEN];
386 uint8_t wgmi_mac1[WG_MAC_LEN];
387 uint8_t wgmi_mac2[WG_MAC_LEN];
388 } __packed;
389
390 /* [W] 5.4.3 Second Message: Responder to Initiator */
391 struct wg_msg_resp {
392 uint32_t wgmr_type;
393 uint32_t wgmr_sender;
394 uint32_t wgmr_receiver;
395 uint8_t wgmr_ephemeral[WG_EPHEMERAL_KEY_LEN];
396 uint8_t wgmr_empty[0 + WG_AUTHTAG_LEN];
397 uint8_t wgmr_mac1[WG_MAC_LEN];
398 uint8_t wgmr_mac2[WG_MAC_LEN];
399 } __packed;
400
401 /* [W] 5.4.6 Subsequent Messages: Transport Data Messages */
402 struct wg_msg_data {
403 uint32_t wgmd_type;
404 uint32_t wgmd_receiver;
405 uint64_t wgmd_counter;
406 uint32_t wgmd_packet[0];
407 } __packed;
408
409 /* [W] 5.4.7 Under Load: Cookie Reply Message */
410 struct wg_msg_cookie {
411 uint32_t wgmc_type;
412 uint32_t wgmc_receiver;
413 uint8_t wgmc_salt[WG_SALT_LEN];
414 uint8_t wgmc_cookie[WG_COOKIE_LEN + WG_AUTHTAG_LEN];
415 } __packed;
416
417 #define WG_MSG_TYPE_INIT 1
418 #define WG_MSG_TYPE_RESP 2
419 #define WG_MSG_TYPE_COOKIE 3
420 #define WG_MSG_TYPE_DATA 4
421 #define WG_MSG_TYPE_MAX WG_MSG_TYPE_DATA
422
423 /* Sliding windows */
424
425 #define SLIWIN_BITS 2048u
426 #define SLIWIN_TYPE uint32_t
427 #define SLIWIN_BPW NBBY*sizeof(SLIWIN_TYPE)
428 #define SLIWIN_WORDS howmany(SLIWIN_BITS, SLIWIN_BPW)
429 #define SLIWIN_NPKT (SLIWIN_BITS - NBBY*sizeof(SLIWIN_TYPE))
430
431 struct sliwin {
432 SLIWIN_TYPE B[SLIWIN_WORDS];
433 uint64_t T;
434 };
435
436 static void
437 sliwin_reset(struct sliwin *W)
438 {
439
440 memset(W, 0, sizeof(*W));
441 }
442
443 static int
444 sliwin_check_fast(const volatile struct sliwin *W, uint64_t S)
445 {
446
447 /*
448 * If it's more than one window older than the highest sequence
449 * number we've seen, reject.
450 */
451 #ifdef __HAVE_ATOMIC64_LOADSTORE
452 if (S + SLIWIN_NPKT < atomic_load_relaxed(&W->T))
453 return EAUTH;
454 #endif
455
456 /*
457 * Otherwise, we need to take the lock to decide, so don't
458 * reject just yet. Caller must serialize a call to
459 * sliwin_update in this case.
460 */
461 return 0;
462 }
463
464 static int
465 sliwin_update(struct sliwin *W, uint64_t S)
466 {
467 unsigned word, bit;
468
469 /*
470 * If it's more than one window older than the highest sequence
471 * number we've seen, reject.
472 */
473 if (S + SLIWIN_NPKT < W->T)
474 return EAUTH;
475
476 /*
477 * If it's higher than the highest sequence number we've seen,
478 * advance the window.
479 */
480 if (S > W->T) {
481 uint64_t i = W->T / SLIWIN_BPW;
482 uint64_t j = S / SLIWIN_BPW;
483 unsigned k;
484
485 for (k = 0; k < MIN(j - i, SLIWIN_WORDS); k++)
486 W->B[(i + k + 1) % SLIWIN_WORDS] = 0;
487 #ifdef __HAVE_ATOMIC64_LOADSTORE
488 atomic_store_relaxed(&W->T, S);
489 #else
490 W->T = S;
491 #endif
492 }
493
494 /* Test and set the bit -- if already set, reject. */
495 word = (S / SLIWIN_BPW) % SLIWIN_WORDS;
496 bit = S % SLIWIN_BPW;
497 if (W->B[word] & (1UL << bit))
498 return EAUTH;
499 W->B[word] |= 1U << bit;
500
501 /* Accept! */
502 return 0;
503 }
504
505 struct wg_session {
506 struct wg_peer *wgs_peer;
507 struct psref_target
508 wgs_psref;
509
510 int wgs_state;
511 #define WGS_STATE_UNKNOWN 0
512 #define WGS_STATE_INIT_ACTIVE 1
513 #define WGS_STATE_INIT_PASSIVE 2
514 #define WGS_STATE_ESTABLISHED 3
515 #define WGS_STATE_DESTROYING 4
516
517 volatile uint32_t
518 wgs_time_established;
519 volatile uint32_t
520 wgs_time_last_data_sent;
521 bool wgs_is_initiator;
522
523 uint32_t wgs_local_index;
524 uint32_t wgs_remote_index;
525 #ifdef __HAVE_ATOMIC64_LOADSTORE
526 volatile uint64_t
527 wgs_send_counter;
528 #else
529 kmutex_t wgs_send_counter_lock;
530 uint64_t wgs_send_counter;
531 #endif
532
533 struct {
534 kmutex_t lock;
535 struct sliwin window;
536 } *wgs_recvwin;
537
538 uint8_t wgs_handshake_hash[WG_HASH_LEN];
539 uint8_t wgs_chaining_key[WG_CHAINING_KEY_LEN];
540 uint8_t wgs_ephemeral_key_pub[WG_EPHEMERAL_KEY_LEN];
541 uint8_t wgs_ephemeral_key_priv[WG_EPHEMERAL_KEY_LEN];
542 uint8_t wgs_ephemeral_key_peer[WG_EPHEMERAL_KEY_LEN];
543 uint8_t wgs_tkey_send[WG_DATA_KEY_LEN];
544 uint8_t wgs_tkey_recv[WG_DATA_KEY_LEN];
545 };
546
547 struct wg_sockaddr {
548 union {
549 struct sockaddr_storage _ss;
550 struct sockaddr _sa;
551 struct sockaddr_in _sin;
552 struct sockaddr_in6 _sin6;
553 };
554 struct psref_target wgsa_psref;
555 };
556
557 #define wgsatoss(wgsa) (&(wgsa)->_ss)
558 #define wgsatosa(wgsa) (&(wgsa)->_sa)
559 #define wgsatosin(wgsa) (&(wgsa)->_sin)
560 #define wgsatosin6(wgsa) (&(wgsa)->_sin6)
561
562 #define wgsa_family(wgsa) (wgsatosa(wgsa)->sa_family)
563
564 struct wg_peer;
565 struct wg_allowedip {
566 struct radix_node wga_nodes[2];
567 struct wg_sockaddr _wga_sa_addr;
568 struct wg_sockaddr _wga_sa_mask;
569 #define wga_sa_addr _wga_sa_addr._sa
570 #define wga_sa_mask _wga_sa_mask._sa
571
572 int wga_family;
573 uint8_t wga_cidr;
574 union {
575 struct in_addr _ip4;
576 struct in6_addr _ip6;
577 } wga_addr;
578 #define wga_addr4 wga_addr._ip4
579 #define wga_addr6 wga_addr._ip6
580
581 struct wg_peer *wga_peer;
582 };
583
584 typedef uint8_t wg_timestamp_t[WG_TIMESTAMP_LEN];
585
586 struct wg_ppsratecheck {
587 struct timeval wgprc_lasttime;
588 int wgprc_curpps;
589 };
590
591 struct wg_softc;
592 struct wg_peer {
593 struct wg_softc *wgp_sc;
594 char wgp_name[WG_PEER_NAME_MAXLEN + 1];
595 struct pslist_entry wgp_peerlist_entry;
596 pserialize_t wgp_psz;
597 struct psref_target wgp_psref;
598 kmutex_t *wgp_lock;
599 kmutex_t *wgp_intr_lock;
600
601 uint8_t wgp_pubkey[WG_STATIC_KEY_LEN];
602 struct wg_sockaddr *wgp_endpoint;
603 struct wg_sockaddr *wgp_endpoint0;
604 volatile unsigned wgp_endpoint_changing;
605 bool wgp_endpoint_available;
606
607 /* The preshared key (optional) */
608 uint8_t wgp_psk[WG_PRESHARED_KEY_LEN];
609
610 struct wg_session *wgp_session_stable;
611 struct wg_session *wgp_session_unstable;
612
613 /* first outgoing packet awaiting session initiation */
614 struct mbuf *volatile wgp_pending;
615
616 /* timestamp in big-endian */
617 wg_timestamp_t wgp_timestamp_latest_init;
618
619 struct timespec wgp_last_handshake_time;
620
621 callout_t wgp_handshake_timeout_timer;
622 callout_t wgp_session_dtor_timer;
623
624 time_t wgp_handshake_start_time;
625
626 volatile unsigned wgp_force_rekey;
627
628 int wgp_n_allowedips;
629 struct wg_allowedip wgp_allowedips[WG_ALLOWEDIPS];
630
631 time_t wgp_latest_cookie_time;
632 uint8_t wgp_latest_cookie[WG_COOKIE_LEN];
633 uint8_t wgp_last_sent_mac1[WG_MAC_LEN];
634 bool wgp_last_sent_mac1_valid;
635 uint8_t wgp_last_sent_cookie[WG_COOKIE_LEN];
636 bool wgp_last_sent_cookie_valid;
637
638 time_t wgp_last_msg_received_time[WG_MSG_TYPE_MAX];
639
640 time_t wgp_last_cookiesecret_time;
641 uint8_t wgp_cookiesecret[WG_COOKIESECRET_LEN];
642
643 struct wg_ppsratecheck wgp_ppsratecheck;
644
645 struct work wgp_work;
646 unsigned int wgp_tasks;
647 #define WGP_TASK_SEND_INIT_MESSAGE __BIT(0)
648 #define WGP_TASK_RETRY_HANDSHAKE __BIT(1)
649 #define WGP_TASK_ESTABLISH_SESSION __BIT(2)
650 #define WGP_TASK_ENDPOINT_CHANGED __BIT(3)
651 #define WGP_TASK_SEND_KEEPALIVE_MESSAGE __BIT(4)
652 #define WGP_TASK_DESTROY_PREV_SESSION __BIT(5)
653 };
654
655 struct wg_ops;
656
657 struct wg_softc {
658 struct ifnet wg_if;
659 LIST_ENTRY(wg_softc) wg_list;
660 kmutex_t *wg_lock;
661 kmutex_t *wg_intr_lock;
662 krwlock_t *wg_rwlock;
663
664 uint8_t wg_privkey[WG_STATIC_KEY_LEN];
665 uint8_t wg_pubkey[WG_STATIC_KEY_LEN];
666
667 int wg_npeers;
668 struct pslist_head wg_peers;
669 struct thmap *wg_peers_bypubkey;
670 struct thmap *wg_peers_byname;
671 struct thmap *wg_sessions_byindex;
672 uint16_t wg_listen_port;
673
674 struct threadpool *wg_threadpool;
675
676 struct threadpool_job wg_job;
677 int wg_upcalls;
678 #define WG_UPCALL_INET __BIT(0)
679 #define WG_UPCALL_INET6 __BIT(1)
680
681 #ifdef INET
682 struct socket *wg_so4;
683 struct radix_node_head *wg_rtable_ipv4;
684 #endif
685 #ifdef INET6
686 struct socket *wg_so6;
687 struct radix_node_head *wg_rtable_ipv6;
688 #endif
689
690 struct wg_ppsratecheck wg_ppsratecheck;
691
692 struct wg_ops *wg_ops;
693
694 #ifdef WG_RUMPKERNEL
695 struct wg_user *wg_user;
696 #endif
697 };
698
699 /* [W] 6.1 Preliminaries */
700 #define WG_REKEY_AFTER_MESSAGES (1ULL << 60)
701 #define WG_REJECT_AFTER_MESSAGES (UINT64_MAX - (1 << 13))
702 #define WG_REKEY_AFTER_TIME 120
703 #define WG_REJECT_AFTER_TIME 180
704 #define WG_REKEY_ATTEMPT_TIME 90
705 #define WG_REKEY_TIMEOUT 5
706 #define WG_KEEPALIVE_TIMEOUT 10
707
708 #define WG_COOKIE_TIME 120
709 #define WG_COOKIESECRET_TIME (2 * 60)
710
711 static uint64_t wg_rekey_after_messages = WG_REKEY_AFTER_MESSAGES;
712 static uint64_t wg_reject_after_messages = WG_REJECT_AFTER_MESSAGES;
713 static unsigned wg_rekey_after_time = WG_REKEY_AFTER_TIME;
714 static unsigned wg_reject_after_time = WG_REJECT_AFTER_TIME;
715 static unsigned wg_rekey_attempt_time = WG_REKEY_ATTEMPT_TIME;
716 static unsigned wg_rekey_timeout = WG_REKEY_TIMEOUT;
717 static unsigned wg_keepalive_timeout = WG_KEEPALIVE_TIMEOUT;
718
719 static struct mbuf *
720 wg_get_mbuf(size_t, size_t);
721
722 static void wg_send_data_msg(struct wg_peer *, struct wg_session *,
723 struct mbuf *);
724 static void wg_send_cookie_msg(struct wg_softc *, struct wg_peer *,
725 const uint32_t, const uint8_t [WG_MAC_LEN],
726 const struct sockaddr *);
727 static void wg_send_handshake_msg_resp(struct wg_softc *, struct wg_peer *,
728 struct wg_session *, const struct wg_msg_init *);
729 static void wg_send_keepalive_msg(struct wg_peer *, struct wg_session *);
730
731 static struct wg_peer *
732 wg_pick_peer_by_sa(struct wg_softc *, const struct sockaddr *,
733 struct psref *);
734 static struct wg_peer *
735 wg_lookup_peer_by_pubkey(struct wg_softc *,
736 const uint8_t [WG_STATIC_KEY_LEN], struct psref *);
737
738 static struct wg_session *
739 wg_lookup_session_by_index(struct wg_softc *,
740 const uint32_t, struct psref *);
741
742 static void wg_update_endpoint_if_necessary(struct wg_peer *,
743 const struct sockaddr *);
744
745 static void wg_schedule_session_dtor_timer(struct wg_peer *);
746
747 static bool wg_is_underload(struct wg_softc *, struct wg_peer *, int);
748 static void wg_calculate_keys(struct wg_session *, const bool);
749
750 static void wg_clear_states(struct wg_session *);
751
752 static void wg_get_peer(struct wg_peer *, struct psref *);
753 static void wg_put_peer(struct wg_peer *, struct psref *);
754
755 static int wg_send_so(struct wg_peer *, struct mbuf *);
756 static int wg_send_udp(struct wg_peer *, struct mbuf *);
757 static int wg_output(struct ifnet *, struct mbuf *,
758 const struct sockaddr *, const struct rtentry *);
759 static void wg_input(struct ifnet *, struct mbuf *, const int);
760 static int wg_ioctl(struct ifnet *, u_long, void *);
761 static int wg_bind_port(struct wg_softc *, const uint16_t);
762 static int wg_init(struct ifnet *);
763 #ifdef ALTQ
764 static void wg_start(struct ifnet *);
765 #endif
766 static void wg_stop(struct ifnet *, int);
767
768 static void wg_peer_work(struct work *, void *);
769 static void wg_job(struct threadpool_job *);
770 static void wgintr(void *);
771 static void wg_purge_pending_packets(struct wg_peer *);
772
773 static int wg_clone_create(struct if_clone *, int);
774 static int wg_clone_destroy(struct ifnet *);
775
776 struct wg_ops {
777 int (*send_hs_msg)(struct wg_peer *, struct mbuf *);
778 int (*send_data_msg)(struct wg_peer *, struct mbuf *);
779 void (*input)(struct ifnet *, struct mbuf *, const int);
780 int (*bind_port)(struct wg_softc *, const uint16_t);
781 };
782
783 struct wg_ops wg_ops_rumpkernel = {
784 .send_hs_msg = wg_send_so,
785 .send_data_msg = wg_send_udp,
786 .input = wg_input,
787 .bind_port = wg_bind_port,
788 };
789
790 #ifdef WG_RUMPKERNEL
791 static bool wg_user_mode(struct wg_softc *);
792 static int wg_ioctl_linkstr(struct wg_softc *, struct ifdrv *);
793
794 static int wg_send_user(struct wg_peer *, struct mbuf *);
795 static void wg_input_user(struct ifnet *, struct mbuf *, const int);
796 static int wg_bind_port_user(struct wg_softc *, const uint16_t);
797
798 struct wg_ops wg_ops_rumpuser = {
799 .send_hs_msg = wg_send_user,
800 .send_data_msg = wg_send_user,
801 .input = wg_input_user,
802 .bind_port = wg_bind_port_user,
803 };
804 #endif
805
806 #define WG_PEER_READER_FOREACH(wgp, wg) \
807 PSLIST_READER_FOREACH((wgp), &(wg)->wg_peers, struct wg_peer, \
808 wgp_peerlist_entry)
809 #define WG_PEER_WRITER_FOREACH(wgp, wg) \
810 PSLIST_WRITER_FOREACH((wgp), &(wg)->wg_peers, struct wg_peer, \
811 wgp_peerlist_entry)
812 #define WG_PEER_WRITER_INSERT_HEAD(wgp, wg) \
813 PSLIST_WRITER_INSERT_HEAD(&(wg)->wg_peers, (wgp), wgp_peerlist_entry)
814 #define WG_PEER_WRITER_REMOVE(wgp) \
815 PSLIST_WRITER_REMOVE((wgp), wgp_peerlist_entry)
816
817 struct wg_route {
818 struct radix_node wgr_nodes[2];
819 struct wg_peer *wgr_peer;
820 };
821
822 static struct radix_node_head *
823 wg_rnh(struct wg_softc *wg, const int family)
824 {
825
826 switch (family) {
827 case AF_INET:
828 return wg->wg_rtable_ipv4;
829 #ifdef INET6
830 case AF_INET6:
831 return wg->wg_rtable_ipv6;
832 #endif
833 default:
834 return NULL;
835 }
836 }
837
838
839 /*
840 * Global variables
841 */
842 static volatile unsigned wg_count __cacheline_aligned;
843
844 struct psref_class *wg_psref_class __read_mostly;
845
846 static struct if_clone wg_cloner =
847 IF_CLONE_INITIALIZER("wg", wg_clone_create, wg_clone_destroy);
848
849 static struct pktqueue *wg_pktq __read_mostly;
850 static struct workqueue *wg_wq __read_mostly;
851
852 void wgattach(int);
853 /* ARGSUSED */
854 void
855 wgattach(int count)
856 {
857 /*
858 * Nothing to do here, initialization is handled by the
859 * module initialization code in wginit() below).
860 */
861 }
862
863 static void
864 wginit(void)
865 {
866
867 wg_psref_class = psref_class_create("wg", IPL_SOFTNET);
868
869 if_clone_attach(&wg_cloner);
870 }
871
872 /*
873 * XXX Kludge: This should just happen in wginit, but workqueue_create
874 * cannot be run until after CPUs have been detected, and wginit runs
875 * before configure.
876 */
877 static int
878 wginitqueues(void)
879 {
880 int error __diagused;
881
882 wg_pktq = pktq_create(IFQ_MAXLEN, wgintr, NULL);
883 KASSERT(wg_pktq != NULL);
884
885 error = workqueue_create(&wg_wq, "wgpeer", wg_peer_work, NULL,
886 PRI_NONE, IPL_SOFTNET, WQ_MPSAFE|WQ_PERCPU);
887 KASSERTMSG(error == 0, "error=%d", error);
888
889 return 0;
890 }
891
892 static void
893 wg_guarantee_initialized(void)
894 {
895 static ONCE_DECL(init);
896 int error __diagused;
897
898 error = RUN_ONCE(&init, wginitqueues);
899 KASSERTMSG(error == 0, "error=%d", error);
900 }
901
902 static int
903 wg_count_inc(void)
904 {
905 unsigned o, n;
906
907 do {
908 o = atomic_load_relaxed(&wg_count);
909 if (o == UINT_MAX)
910 return ENFILE;
911 n = o + 1;
912 } while (atomic_cas_uint(&wg_count, o, n) != o);
913
914 return 0;
915 }
916
917 static void
918 wg_count_dec(void)
919 {
920 unsigned c __diagused;
921
922 c = atomic_dec_uint_nv(&wg_count);
923 KASSERT(c != UINT_MAX);
924 }
925
926 static int
927 wgdetach(void)
928 {
929
930 /* Prevent new interface creation. */
931 if_clone_detach(&wg_cloner);
932
933 /* Check whether there are any existing interfaces. */
934 if (atomic_load_relaxed(&wg_count)) {
935 /* Back out -- reattach the cloner. */
936 if_clone_attach(&wg_cloner);
937 return EBUSY;
938 }
939
940 /* No interfaces left. Nuke it. */
941 if (wg_wq)
942 workqueue_destroy(wg_wq);
943 if (wg_pktq)
944 pktq_destroy(wg_pktq);
945 psref_class_destroy(wg_psref_class);
946
947 return 0;
948 }
949
950 static void
951 wg_init_key_and_hash(uint8_t ckey[WG_CHAINING_KEY_LEN],
952 uint8_t hash[WG_HASH_LEN])
953 {
954 /* [W] 5.4: CONSTRUCTION */
955 const char *signature = "Noise_IKpsk2_25519_ChaChaPoly_BLAKE2s";
956 /* [W] 5.4: IDENTIFIER */
957 const char *id = "WireGuard v1 zx2c4 Jason (at) zx2c4.com";
958 struct blake2s state;
959
960 blake2s(ckey, WG_CHAINING_KEY_LEN, NULL, 0,
961 signature, strlen(signature));
962
963 CTASSERT(WG_HASH_LEN == WG_CHAINING_KEY_LEN);
964 memcpy(hash, ckey, WG_CHAINING_KEY_LEN);
965
966 blake2s_init(&state, WG_HASH_LEN, NULL, 0);
967 blake2s_update(&state, ckey, WG_CHAINING_KEY_LEN);
968 blake2s_update(&state, id, strlen(id));
969 blake2s_final(&state, hash);
970
971 WG_DUMP_HASH("ckey", ckey);
972 WG_DUMP_HASH("hash", hash);
973 }
974
975 static void
976 wg_algo_hash(uint8_t hash[WG_HASH_LEN], const uint8_t input[],
977 const size_t inputsize)
978 {
979 struct blake2s state;
980
981 blake2s_init(&state, WG_HASH_LEN, NULL, 0);
982 blake2s_update(&state, hash, WG_HASH_LEN);
983 blake2s_update(&state, input, inputsize);
984 blake2s_final(&state, hash);
985 }
986
987 static void
988 wg_algo_mac(uint8_t out[], const size_t outsize,
989 const uint8_t key[], const size_t keylen,
990 const uint8_t input1[], const size_t input1len,
991 const uint8_t input2[], const size_t input2len)
992 {
993 struct blake2s state;
994
995 blake2s_init(&state, outsize, key, keylen);
996
997 blake2s_update(&state, input1, input1len);
998 if (input2 != NULL)
999 blake2s_update(&state, input2, input2len);
1000 blake2s_final(&state, out);
1001 }
1002
1003 static void
1004 wg_algo_mac_mac1(uint8_t out[], const size_t outsize,
1005 const uint8_t input1[], const size_t input1len,
1006 const uint8_t input2[], const size_t input2len)
1007 {
1008 struct blake2s state;
1009 /* [W] 5.4: LABEL-MAC1 */
1010 const char *label = "mac1----";
1011 uint8_t key[WG_HASH_LEN];
1012
1013 blake2s_init(&state, sizeof(key), NULL, 0);
1014 blake2s_update(&state, label, strlen(label));
1015 blake2s_update(&state, input1, input1len);
1016 blake2s_final(&state, key);
1017
1018 blake2s_init(&state, outsize, key, sizeof(key));
1019 if (input2 != NULL)
1020 blake2s_update(&state, input2, input2len);
1021 blake2s_final(&state, out);
1022 }
1023
1024 static void
1025 wg_algo_mac_cookie(uint8_t out[], const size_t outsize,
1026 const uint8_t input1[], const size_t input1len)
1027 {
1028 struct blake2s state;
1029 /* [W] 5.4: LABEL-COOKIE */
1030 const char *label = "cookie--";
1031
1032 blake2s_init(&state, outsize, NULL, 0);
1033 blake2s_update(&state, label, strlen(label));
1034 blake2s_update(&state, input1, input1len);
1035 blake2s_final(&state, out);
1036 }
1037
1038 static void
1039 wg_algo_generate_keypair(uint8_t pubkey[WG_EPHEMERAL_KEY_LEN],
1040 uint8_t privkey[WG_EPHEMERAL_KEY_LEN])
1041 {
1042
1043 CTASSERT(WG_EPHEMERAL_KEY_LEN == crypto_scalarmult_curve25519_BYTES);
1044
1045 cprng_strong(kern_cprng, privkey, WG_EPHEMERAL_KEY_LEN, 0);
1046 crypto_scalarmult_base(pubkey, privkey);
1047 }
1048
1049 static void
1050 wg_algo_dh(uint8_t out[WG_DH_OUTPUT_LEN],
1051 const uint8_t privkey[WG_STATIC_KEY_LEN],
1052 const uint8_t pubkey[WG_STATIC_KEY_LEN])
1053 {
1054
1055 CTASSERT(WG_STATIC_KEY_LEN == crypto_scalarmult_curve25519_BYTES);
1056
1057 int ret __diagused = crypto_scalarmult(out, privkey, pubkey);
1058 KASSERT(ret == 0);
1059 }
1060
1061 static void
1062 wg_algo_hmac(uint8_t out[], const size_t outlen,
1063 const uint8_t key[], const size_t keylen,
1064 const uint8_t in[], const size_t inlen)
1065 {
1066 #define IPAD 0x36
1067 #define OPAD 0x5c
1068 uint8_t hmackey[HMAC_BLOCK_LEN] = {0};
1069 uint8_t ipad[HMAC_BLOCK_LEN];
1070 uint8_t opad[HMAC_BLOCK_LEN];
1071 size_t i;
1072 struct blake2s state;
1073
1074 KASSERT(outlen == WG_HASH_LEN);
1075 KASSERT(keylen <= HMAC_BLOCK_LEN);
1076
1077 memcpy(hmackey, key, keylen);
1078
1079 for (i = 0; i < sizeof(hmackey); i++) {
1080 ipad[i] = hmackey[i] ^ IPAD;
1081 opad[i] = hmackey[i] ^ OPAD;
1082 }
1083
1084 blake2s_init(&state, WG_HASH_LEN, NULL, 0);
1085 blake2s_update(&state, ipad, sizeof(ipad));
1086 blake2s_update(&state, in, inlen);
1087 blake2s_final(&state, out);
1088
1089 blake2s_init(&state, WG_HASH_LEN, NULL, 0);
1090 blake2s_update(&state, opad, sizeof(opad));
1091 blake2s_update(&state, out, WG_HASH_LEN);
1092 blake2s_final(&state, out);
1093 #undef IPAD
1094 #undef OPAD
1095 }
1096
1097 static void
1098 wg_algo_kdf(uint8_t out1[WG_KDF_OUTPUT_LEN], uint8_t out2[WG_KDF_OUTPUT_LEN],
1099 uint8_t out3[WG_KDF_OUTPUT_LEN], const uint8_t ckey[WG_CHAINING_KEY_LEN],
1100 const uint8_t input[], const size_t inputlen)
1101 {
1102 uint8_t tmp1[WG_KDF_OUTPUT_LEN], tmp2[WG_KDF_OUTPUT_LEN + 1];
1103 uint8_t one[1];
1104
1105 /*
1106 * [N] 4.3: "an input_key_material byte sequence with length
1107 * either zero bytes, 32 bytes, or DHLEN bytes."
1108 */
1109 KASSERT(inputlen == 0 || inputlen == 32 || inputlen == NOISE_DHLEN);
1110
1111 WG_DUMP_HASH("ckey", ckey);
1112 if (input != NULL)
1113 WG_DUMP_HASH("input", input);
1114 wg_algo_hmac(tmp1, sizeof(tmp1), ckey, WG_CHAINING_KEY_LEN,
1115 input, inputlen);
1116 WG_DUMP_HASH("tmp1", tmp1);
1117 one[0] = 1;
1118 wg_algo_hmac(out1, WG_KDF_OUTPUT_LEN, tmp1, sizeof(tmp1),
1119 one, sizeof(one));
1120 WG_DUMP_HASH("out1", out1);
1121 if (out2 == NULL)
1122 return;
1123 memcpy(tmp2, out1, WG_KDF_OUTPUT_LEN);
1124 tmp2[WG_KDF_OUTPUT_LEN] = 2;
1125 wg_algo_hmac(out2, WG_KDF_OUTPUT_LEN, tmp1, sizeof(tmp1),
1126 tmp2, sizeof(tmp2));
1127 WG_DUMP_HASH("out2", out2);
1128 if (out3 == NULL)
1129 return;
1130 memcpy(tmp2, out2, WG_KDF_OUTPUT_LEN);
1131 tmp2[WG_KDF_OUTPUT_LEN] = 3;
1132 wg_algo_hmac(out3, WG_KDF_OUTPUT_LEN, tmp1, sizeof(tmp1),
1133 tmp2, sizeof(tmp2));
1134 WG_DUMP_HASH("out3", out3);
1135 }
1136
1137 static void __noinline
1138 wg_algo_dh_kdf(uint8_t ckey[WG_CHAINING_KEY_LEN],
1139 uint8_t cipher_key[WG_CIPHER_KEY_LEN],
1140 const uint8_t local_key[WG_STATIC_KEY_LEN],
1141 const uint8_t remote_key[WG_STATIC_KEY_LEN])
1142 {
1143 uint8_t dhout[WG_DH_OUTPUT_LEN];
1144
1145 wg_algo_dh(dhout, local_key, remote_key);
1146 wg_algo_kdf(ckey, cipher_key, NULL, ckey, dhout, sizeof(dhout));
1147
1148 WG_DUMP_HASH("dhout", dhout);
1149 WG_DUMP_HASH("ckey", ckey);
1150 if (cipher_key != NULL)
1151 WG_DUMP_HASH("cipher_key", cipher_key);
1152 }
1153
1154 static void
1155 wg_algo_aead_enc(uint8_t out[], size_t expected_outsize, const uint8_t key[],
1156 const uint64_t counter, const uint8_t plain[], const size_t plainsize,
1157 const uint8_t auth[], size_t authlen)
1158 {
1159 uint8_t nonce[(32 + 64) / 8] = {0};
1160 long long unsigned int outsize;
1161 int error __diagused;
1162
1163 le64enc(&nonce[4], counter);
1164
1165 error = crypto_aead_chacha20poly1305_ietf_encrypt(out, &outsize, plain,
1166 plainsize, auth, authlen, NULL, nonce, key);
1167 KASSERT(error == 0);
1168 KASSERT(outsize == expected_outsize);
1169 }
1170
1171 static int
1172 wg_algo_aead_dec(uint8_t out[], size_t expected_outsize, const uint8_t key[],
1173 const uint64_t counter, const uint8_t encrypted[],
1174 const size_t encryptedsize, const uint8_t auth[], size_t authlen)
1175 {
1176 uint8_t nonce[(32 + 64) / 8] = {0};
1177 long long unsigned int outsize;
1178 int error;
1179
1180 le64enc(&nonce[4], counter);
1181
1182 error = crypto_aead_chacha20poly1305_ietf_decrypt(out, &outsize, NULL,
1183 encrypted, encryptedsize, auth, authlen, nonce, key);
1184 if (error == 0)
1185 KASSERT(outsize == expected_outsize);
1186 return error;
1187 }
1188
1189 static void
1190 wg_algo_xaead_enc(uint8_t out[], const size_t expected_outsize,
1191 const uint8_t key[], const uint8_t plain[], const size_t plainsize,
1192 const uint8_t auth[], size_t authlen,
1193 const uint8_t nonce[WG_SALT_LEN])
1194 {
1195 long long unsigned int outsize;
1196 int error __diagused;
1197
1198 CTASSERT(WG_SALT_LEN == crypto_aead_xchacha20poly1305_ietf_NPUBBYTES);
1199 error = crypto_aead_xchacha20poly1305_ietf_encrypt(out, &outsize,
1200 plain, plainsize, auth, authlen, NULL, nonce, key);
1201 KASSERT(error == 0);
1202 KASSERT(outsize == expected_outsize);
1203 }
1204
1205 static int
1206 wg_algo_xaead_dec(uint8_t out[], const size_t expected_outsize,
1207 const uint8_t key[], const uint8_t encrypted[], const size_t encryptedsize,
1208 const uint8_t auth[], size_t authlen,
1209 const uint8_t nonce[WG_SALT_LEN])
1210 {
1211 long long unsigned int outsize;
1212 int error;
1213
1214 error = crypto_aead_xchacha20poly1305_ietf_decrypt(out, &outsize, NULL,
1215 encrypted, encryptedsize, auth, authlen, nonce, key);
1216 if (error == 0)
1217 KASSERT(outsize == expected_outsize);
1218 return error;
1219 }
1220
1221 static void
1222 wg_algo_tai64n(wg_timestamp_t timestamp)
1223 {
1224 struct timespec ts;
1225
1226 /* FIXME strict TAI64N (https://cr.yp.to/libtai/tai64.html) */
1227 getnanotime(&ts);
1228 /* TAI64 label in external TAI64 format */
1229 be32enc(timestamp, 0x40000000U + (uint32_t)(ts.tv_sec >> 32));
1230 /* second beginning from 1970 TAI */
1231 be32enc(timestamp + 4, (uint32_t)(ts.tv_sec & 0xffffffffU));
1232 /* nanosecond in big-endian format */
1233 be32enc(timestamp + 8, (uint32_t)ts.tv_nsec);
1234 }
1235
1236 /*
1237 * wg_get_stable_session(wgp, psref)
1238 *
1239 * Get a passive reference to the current stable session, or
1240 * return NULL if there is no current stable session.
1241 *
1242 * The pointer is always there but the session is not necessarily
1243 * ESTABLISHED; if it is not ESTABLISHED, return NULL. However,
1244 * the session may transition from ESTABLISHED to DESTROYING while
1245 * holding the passive reference.
1246 */
1247 static struct wg_session *
1248 wg_get_stable_session(struct wg_peer *wgp, struct psref *psref)
1249 {
1250 int s;
1251 struct wg_session *wgs;
1252
1253 s = pserialize_read_enter();
1254 wgs = atomic_load_consume(&wgp->wgp_session_stable);
1255 if (__predict_false(wgs->wgs_state != WGS_STATE_ESTABLISHED))
1256 wgs = NULL;
1257 else
1258 psref_acquire(psref, &wgs->wgs_psref, wg_psref_class);
1259 pserialize_read_exit(s);
1260
1261 return wgs;
1262 }
1263
1264 static void
1265 wg_put_session(struct wg_session *wgs, struct psref *psref)
1266 {
1267
1268 psref_release(psref, &wgs->wgs_psref, wg_psref_class);
1269 }
1270
1271 static void
1272 wg_destroy_session(struct wg_softc *wg, struct wg_session *wgs)
1273 {
1274 struct wg_peer *wgp = wgs->wgs_peer;
1275 struct wg_session *wgs0 __diagused;
1276 void *garbage;
1277
1278 KASSERT(mutex_owned(wgp->wgp_lock));
1279 KASSERT(wgs->wgs_state != WGS_STATE_UNKNOWN);
1280
1281 /* Remove the session from the table. */
1282 wgs0 = thmap_del(wg->wg_sessions_byindex,
1283 &wgs->wgs_local_index, sizeof(wgs->wgs_local_index));
1284 KASSERT(wgs0 == wgs);
1285 garbage = thmap_stage_gc(wg->wg_sessions_byindex);
1286
1287 /* Wait for passive references to drain. */
1288 pserialize_perform(wgp->wgp_psz);
1289 psref_target_destroy(&wgs->wgs_psref, wg_psref_class);
1290
1291 /*
1292 * Free memory, zero state, and transition to UNKNOWN. We have
1293 * exclusive access to the session now, so there is no need for
1294 * an atomic store.
1295 */
1296 thmap_gc(wg->wg_sessions_byindex, garbage);
1297 WG_DLOG("session[L=%"PRIx32" R=%"PRIx32"] -> WGS_STATE_UNKNOWN\n",
1298 wgs->wgs_local_index, wgs->wgs_remote_index);
1299 wgs->wgs_local_index = 0;
1300 wgs->wgs_remote_index = 0;
1301 wg_clear_states(wgs);
1302 wgs->wgs_state = WGS_STATE_UNKNOWN;
1303 }
1304
1305 /*
1306 * wg_get_session_index(wg, wgs)
1307 *
1308 * Choose a session index for wgs->wgs_local_index, and store it
1309 * in wg's table of sessions by index.
1310 *
1311 * wgs must be the unstable session of its peer, and must be
1312 * transitioning out of the UNKNOWN state.
1313 */
1314 static void
1315 wg_get_session_index(struct wg_softc *wg, struct wg_session *wgs)
1316 {
1317 struct wg_peer *wgp __diagused = wgs->wgs_peer;
1318 struct wg_session *wgs0;
1319 uint32_t index;
1320
1321 KASSERT(mutex_owned(wgp->wgp_lock));
1322 KASSERT(wgs == wgp->wgp_session_unstable);
1323 KASSERTMSG(wgs->wgs_state == WGS_STATE_UNKNOWN, "state=%d",
1324 wgs->wgs_state);
1325
1326 do {
1327 /* Pick a uniform random index. */
1328 index = cprng_strong32();
1329
1330 /* Try to take it. */
1331 wgs->wgs_local_index = index;
1332 wgs0 = thmap_put(wg->wg_sessions_byindex,
1333 &wgs->wgs_local_index, sizeof wgs->wgs_local_index, wgs);
1334
1335 /* If someone else beat us, start over. */
1336 } while (__predict_false(wgs0 != wgs));
1337 }
1338
1339 /*
1340 * wg_put_session_index(wg, wgs)
1341 *
1342 * Remove wgs from the table of sessions by index, wait for any
1343 * passive references to drain, and transition the session to the
1344 * UNKNOWN state.
1345 *
1346 * wgs must be the unstable session of its peer, and must not be
1347 * UNKNOWN or ESTABLISHED.
1348 */
1349 static void
1350 wg_put_session_index(struct wg_softc *wg, struct wg_session *wgs)
1351 {
1352 struct wg_peer *wgp __diagused = wgs->wgs_peer;
1353
1354 KASSERT(mutex_owned(wgp->wgp_lock));
1355 KASSERT(wgs->wgs_state != WGS_STATE_UNKNOWN);
1356 KASSERT(wgs->wgs_state != WGS_STATE_ESTABLISHED);
1357
1358 wg_destroy_session(wg, wgs);
1359 psref_target_init(&wgs->wgs_psref, wg_psref_class);
1360 }
1361
1362 /*
1363 * Handshake patterns
1364 *
1365 * [W] 5: "These messages use the "IK" pattern from Noise"
1366 * [N] 7.5. Interactive handshake patterns (fundamental)
1367 * "The first character refers to the initiators static key:"
1368 * "I = Static key for initiator Immediately transmitted to responder,
1369 * despite reduced or absent identity hiding"
1370 * "The second character refers to the responders static key:"
1371 * "K = Static key for responder Known to initiator"
1372 * "IK:
1373 * <- s
1374 * ...
1375 * -> e, es, s, ss
1376 * <- e, ee, se"
1377 * [N] 9.4. Pattern modifiers
1378 * "IKpsk2:
1379 * <- s
1380 * ...
1381 * -> e, es, s, ss
1382 * <- e, ee, se, psk"
1383 */
1384 static void
1385 wg_fill_msg_init(struct wg_softc *wg, struct wg_peer *wgp,
1386 struct wg_session *wgs, struct wg_msg_init *wgmi)
1387 {
1388 uint8_t ckey[WG_CHAINING_KEY_LEN]; /* [W] 5.4.2: Ci */
1389 uint8_t hash[WG_HASH_LEN]; /* [W] 5.4.2: Hi */
1390 uint8_t cipher_key[WG_CIPHER_KEY_LEN];
1391 uint8_t pubkey[WG_EPHEMERAL_KEY_LEN];
1392 uint8_t privkey[WG_EPHEMERAL_KEY_LEN];
1393
1394 KASSERT(mutex_owned(wgp->wgp_lock));
1395 KASSERT(wgs == wgp->wgp_session_unstable);
1396 KASSERTMSG(wgs->wgs_state == WGS_STATE_INIT_ACTIVE, "state=%d",
1397 wgs->wgs_state);
1398
1399 wgmi->wgmi_type = htole32(WG_MSG_TYPE_INIT);
1400 wgmi->wgmi_sender = wgs->wgs_local_index;
1401
1402 /* [W] 5.4.2: First Message: Initiator to Responder */
1403
1404 /* Ci := HASH(CONSTRUCTION) */
1405 /* Hi := HASH(Ci || IDENTIFIER) */
1406 wg_init_key_and_hash(ckey, hash);
1407 /* Hi := HASH(Hi || Sr^pub) */
1408 wg_algo_hash(hash, wgp->wgp_pubkey, sizeof(wgp->wgp_pubkey));
1409
1410 WG_DUMP_HASH("hash", hash);
1411
1412 /* [N] 2.2: "e" */
1413 /* Ei^priv, Ei^pub := DH-GENERATE() */
1414 wg_algo_generate_keypair(pubkey, privkey);
1415 /* Ci := KDF1(Ci, Ei^pub) */
1416 wg_algo_kdf(ckey, NULL, NULL, ckey, pubkey, sizeof(pubkey));
1417 /* msg.ephemeral := Ei^pub */
1418 memcpy(wgmi->wgmi_ephemeral, pubkey, sizeof(wgmi->wgmi_ephemeral));
1419 /* Hi := HASH(Hi || msg.ephemeral) */
1420 wg_algo_hash(hash, pubkey, sizeof(pubkey));
1421
1422 WG_DUMP_HASH("ckey", ckey);
1423 WG_DUMP_HASH("hash", hash);
1424
1425 /* [N] 2.2: "es" */
1426 /* Ci, k := KDF2(Ci, DH(Ei^priv, Sr^pub)) */
1427 wg_algo_dh_kdf(ckey, cipher_key, privkey, wgp->wgp_pubkey);
1428
1429 /* [N] 2.2: "s" */
1430 /* msg.static := AEAD(k, 0, Si^pub, Hi) */
1431 wg_algo_aead_enc(wgmi->wgmi_static, sizeof(wgmi->wgmi_static),
1432 cipher_key, 0, wg->wg_pubkey, sizeof(wg->wg_pubkey),
1433 hash, sizeof(hash));
1434 /* Hi := HASH(Hi || msg.static) */
1435 wg_algo_hash(hash, wgmi->wgmi_static, sizeof(wgmi->wgmi_static));
1436
1437 WG_DUMP_HASH48("wgmi_static", wgmi->wgmi_static);
1438
1439 /* [N] 2.2: "ss" */
1440 /* Ci, k := KDF2(Ci, DH(Si^priv, Sr^pub)) */
1441 wg_algo_dh_kdf(ckey, cipher_key, wg->wg_privkey, wgp->wgp_pubkey);
1442
1443 /* msg.timestamp := AEAD(k, TIMESTAMP(), Hi) */
1444 wg_timestamp_t timestamp;
1445 wg_algo_tai64n(timestamp);
1446 wg_algo_aead_enc(wgmi->wgmi_timestamp, sizeof(wgmi->wgmi_timestamp),
1447 cipher_key, 0, timestamp, sizeof(timestamp), hash, sizeof(hash));
1448 /* Hi := HASH(Hi || msg.timestamp) */
1449 wg_algo_hash(hash, wgmi->wgmi_timestamp, sizeof(wgmi->wgmi_timestamp));
1450
1451 /* [W] 5.4.4 Cookie MACs */
1452 wg_algo_mac_mac1(wgmi->wgmi_mac1, sizeof(wgmi->wgmi_mac1),
1453 wgp->wgp_pubkey, sizeof(wgp->wgp_pubkey),
1454 (const uint8_t *)wgmi, offsetof(struct wg_msg_init, wgmi_mac1));
1455 /* Need mac1 to decrypt a cookie from a cookie message */
1456 memcpy(wgp->wgp_last_sent_mac1, wgmi->wgmi_mac1,
1457 sizeof(wgp->wgp_last_sent_mac1));
1458 wgp->wgp_last_sent_mac1_valid = true;
1459
1460 if (wgp->wgp_latest_cookie_time == 0 ||
1461 (time_uptime - wgp->wgp_latest_cookie_time) >= WG_COOKIE_TIME)
1462 memset(wgmi->wgmi_mac2, 0, sizeof(wgmi->wgmi_mac2));
1463 else {
1464 wg_algo_mac(wgmi->wgmi_mac2, sizeof(wgmi->wgmi_mac2),
1465 wgp->wgp_latest_cookie, WG_COOKIE_LEN,
1466 (const uint8_t *)wgmi,
1467 offsetof(struct wg_msg_init, wgmi_mac2),
1468 NULL, 0);
1469 }
1470
1471 memcpy(wgs->wgs_ephemeral_key_pub, pubkey, sizeof(pubkey));
1472 memcpy(wgs->wgs_ephemeral_key_priv, privkey, sizeof(privkey));
1473 memcpy(wgs->wgs_handshake_hash, hash, sizeof(hash));
1474 memcpy(wgs->wgs_chaining_key, ckey, sizeof(ckey));
1475 WG_DLOG("%s: sender=%x\n", __func__, wgs->wgs_local_index);
1476 }
1477
1478 static void __noinline
1479 wg_handle_msg_init(struct wg_softc *wg, const struct wg_msg_init *wgmi,
1480 const struct sockaddr *src)
1481 {
1482 uint8_t ckey[WG_CHAINING_KEY_LEN]; /* [W] 5.4.2: Ci */
1483 uint8_t hash[WG_HASH_LEN]; /* [W] 5.4.2: Hi */
1484 uint8_t cipher_key[WG_CIPHER_KEY_LEN];
1485 uint8_t peer_pubkey[WG_STATIC_KEY_LEN];
1486 struct wg_peer *wgp;
1487 struct wg_session *wgs;
1488 int error, ret;
1489 struct psref psref_peer;
1490 uint8_t mac1[WG_MAC_LEN];
1491
1492 WG_TRACE("init msg received");
1493
1494 wg_algo_mac_mac1(mac1, sizeof(mac1),
1495 wg->wg_pubkey, sizeof(wg->wg_pubkey),
1496 (const uint8_t *)wgmi, offsetof(struct wg_msg_init, wgmi_mac1));
1497
1498 /*
1499 * [W] 5.3: Denial of Service Mitigation & Cookies
1500 * "the responder, ..., must always reject messages with an invalid
1501 * msg.mac1"
1502 */
1503 if (!consttime_memequal(mac1, wgmi->wgmi_mac1, sizeof(mac1))) {
1504 WG_DLOG("mac1 is invalid\n");
1505 return;
1506 }
1507
1508 /*
1509 * [W] 5.4.2: First Message: Initiator to Responder
1510 * "When the responder receives this message, it does the same
1511 * operations so that its final state variables are identical,
1512 * replacing the operands of the DH function to produce equivalent
1513 * values."
1514 * Note that the following comments of operations are just copies of
1515 * the initiator's ones.
1516 */
1517
1518 /* Ci := HASH(CONSTRUCTION) */
1519 /* Hi := HASH(Ci || IDENTIFIER) */
1520 wg_init_key_and_hash(ckey, hash);
1521 /* Hi := HASH(Hi || Sr^pub) */
1522 wg_algo_hash(hash, wg->wg_pubkey, sizeof(wg->wg_pubkey));
1523
1524 /* [N] 2.2: "e" */
1525 /* Ci := KDF1(Ci, Ei^pub) */
1526 wg_algo_kdf(ckey, NULL, NULL, ckey, wgmi->wgmi_ephemeral,
1527 sizeof(wgmi->wgmi_ephemeral));
1528 /* Hi := HASH(Hi || msg.ephemeral) */
1529 wg_algo_hash(hash, wgmi->wgmi_ephemeral, sizeof(wgmi->wgmi_ephemeral));
1530
1531 WG_DUMP_HASH("ckey", ckey);
1532
1533 /* [N] 2.2: "es" */
1534 /* Ci, k := KDF2(Ci, DH(Ei^priv, Sr^pub)) */
1535 wg_algo_dh_kdf(ckey, cipher_key, wg->wg_privkey, wgmi->wgmi_ephemeral);
1536
1537 WG_DUMP_HASH48("wgmi_static", wgmi->wgmi_static);
1538
1539 /* [N] 2.2: "s" */
1540 /* msg.static := AEAD(k, 0, Si^pub, Hi) */
1541 error = wg_algo_aead_dec(peer_pubkey, WG_STATIC_KEY_LEN, cipher_key, 0,
1542 wgmi->wgmi_static, sizeof(wgmi->wgmi_static), hash, sizeof(hash));
1543 if (error != 0) {
1544 WG_LOG_RATECHECK(&wg->wg_ppsratecheck, LOG_DEBUG,
1545 "%s: wg_algo_aead_dec for secret key failed\n",
1546 if_name(&wg->wg_if));
1547 return;
1548 }
1549 /* Hi := HASH(Hi || msg.static) */
1550 wg_algo_hash(hash, wgmi->wgmi_static, sizeof(wgmi->wgmi_static));
1551
1552 wgp = wg_lookup_peer_by_pubkey(wg, peer_pubkey, &psref_peer);
1553 if (wgp == NULL) {
1554 WG_DLOG("peer not found\n");
1555 return;
1556 }
1557
1558 /*
1559 * Lock the peer to serialize access to cookie state.
1560 *
1561 * XXX Can we safely avoid holding the lock across DH? Take it
1562 * just to verify mac2 and then unlock/DH/lock?
1563 */
1564 mutex_enter(wgp->wgp_lock);
1565
1566 if (__predict_false(wg_is_underload(wg, wgp, WG_MSG_TYPE_INIT))) {
1567 WG_TRACE("under load");
1568 /*
1569 * [W] 5.3: Denial of Service Mitigation & Cookies
1570 * "the responder, ..., and when under load may reject messages
1571 * with an invalid msg.mac2. If the responder receives a
1572 * message with a valid msg.mac1 yet with an invalid msg.mac2,
1573 * and is under load, it may respond with a cookie reply
1574 * message"
1575 */
1576 uint8_t zero[WG_MAC_LEN] = {0};
1577 if (consttime_memequal(wgmi->wgmi_mac2, zero, sizeof(zero))) {
1578 WG_TRACE("sending a cookie message: no cookie included");
1579 wg_send_cookie_msg(wg, wgp, wgmi->wgmi_sender,
1580 wgmi->wgmi_mac1, src);
1581 goto out;
1582 }
1583 if (!wgp->wgp_last_sent_cookie_valid) {
1584 WG_TRACE("sending a cookie message: no cookie sent ever");
1585 wg_send_cookie_msg(wg, wgp, wgmi->wgmi_sender,
1586 wgmi->wgmi_mac1, src);
1587 goto out;
1588 }
1589 uint8_t mac2[WG_MAC_LEN];
1590 wg_algo_mac(mac2, sizeof(mac2), wgp->wgp_last_sent_cookie,
1591 WG_COOKIE_LEN, (const uint8_t *)wgmi,
1592 offsetof(struct wg_msg_init, wgmi_mac2), NULL, 0);
1593 if (!consttime_memequal(mac2, wgmi->wgmi_mac2, sizeof(mac2))) {
1594 WG_DLOG("mac2 is invalid\n");
1595 goto out;
1596 }
1597 WG_TRACE("under load, but continue to sending");
1598 }
1599
1600 /* [N] 2.2: "ss" */
1601 /* Ci, k := KDF2(Ci, DH(Si^priv, Sr^pub)) */
1602 wg_algo_dh_kdf(ckey, cipher_key, wg->wg_privkey, wgp->wgp_pubkey);
1603
1604 /* msg.timestamp := AEAD(k, TIMESTAMP(), Hi) */
1605 wg_timestamp_t timestamp;
1606 error = wg_algo_aead_dec(timestamp, sizeof(timestamp), cipher_key, 0,
1607 wgmi->wgmi_timestamp, sizeof(wgmi->wgmi_timestamp),
1608 hash, sizeof(hash));
1609 if (error != 0) {
1610 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
1611 "%s: peer %s: wg_algo_aead_dec for timestamp failed\n",
1612 if_name(&wg->wg_if), wgp->wgp_name);
1613 goto out;
1614 }
1615 /* Hi := HASH(Hi || msg.timestamp) */
1616 wg_algo_hash(hash, wgmi->wgmi_timestamp, sizeof(wgmi->wgmi_timestamp));
1617
1618 /*
1619 * [W] 5.1 "The responder keeps track of the greatest timestamp
1620 * received per peer and discards packets containing
1621 * timestamps less than or equal to it."
1622 */
1623 ret = memcmp(timestamp, wgp->wgp_timestamp_latest_init,
1624 sizeof(timestamp));
1625 if (ret <= 0) {
1626 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
1627 "%s: peer %s: invalid init msg: timestamp is old\n",
1628 if_name(&wg->wg_if), wgp->wgp_name);
1629 goto out;
1630 }
1631 memcpy(wgp->wgp_timestamp_latest_init, timestamp, sizeof(timestamp));
1632
1633 /*
1634 * Message is good -- we're committing to handle it now, unless
1635 * we were already initiating a session.
1636 */
1637 wgs = wgp->wgp_session_unstable;
1638 switch (wgs->wgs_state) {
1639 case WGS_STATE_UNKNOWN: /* new session initiated by peer */
1640 break;
1641 case WGS_STATE_INIT_ACTIVE: /* we're already initiating, drop */
1642 /* XXX Who wins if both sides send INIT? */
1643 WG_TRACE("Session already initializing, ignoring the message");
1644 goto out;
1645 case WGS_STATE_INIT_PASSIVE: /* peer is retrying, start over */
1646 WG_TRACE("Session already initializing, destroying old states");
1647 /*
1648 * XXX Avoid this -- just resend our response -- if the
1649 * INIT message is identical to the previous one.
1650 */
1651 wg_put_session_index(wg, wgs);
1652 KASSERTMSG(wgs->wgs_state == WGS_STATE_UNKNOWN, "state=%d",
1653 wgs->wgs_state);
1654 break;
1655 case WGS_STATE_ESTABLISHED: /* can't happen */
1656 panic("unstable session can't be established");
1657 case WGS_STATE_DESTROYING: /* rekey initiated by peer */
1658 WG_TRACE("Session destroying, but force to clear");
1659 wg_put_session_index(wg, wgs);
1660 KASSERTMSG(wgs->wgs_state == WGS_STATE_UNKNOWN, "state=%d",
1661 wgs->wgs_state);
1662 break;
1663 default:
1664 panic("invalid session state: %d", wgs->wgs_state);
1665 }
1666
1667 /*
1668 * Assign a fresh session index.
1669 */
1670 KASSERTMSG(wgs->wgs_state == WGS_STATE_UNKNOWN, "state=%d",
1671 wgs->wgs_state);
1672 wg_get_session_index(wg, wgs);
1673
1674 memcpy(wgs->wgs_handshake_hash, hash, sizeof(hash));
1675 memcpy(wgs->wgs_chaining_key, ckey, sizeof(ckey));
1676 memcpy(wgs->wgs_ephemeral_key_peer, wgmi->wgmi_ephemeral,
1677 sizeof(wgmi->wgmi_ephemeral));
1678
1679 wg_update_endpoint_if_necessary(wgp, src);
1680
1681 /*
1682 * Count the time of the INIT message as the time of
1683 * establishment -- this is used to decide when to erase keys,
1684 * and we want to start counting as soon as we have generated
1685 * keys.
1686 *
1687 * No need for atomic store because the session can't be used
1688 * in the rx or tx paths yet -- not until we transition to
1689 * INTI_PASSIVE.
1690 */
1691 wgs->wgs_time_established = time_uptime32;
1692 wg_schedule_session_dtor_timer(wgp);
1693
1694 /*
1695 * Respond to the initiator with our ephemeral public key.
1696 */
1697 wg_send_handshake_msg_resp(wg, wgp, wgs, wgmi);
1698
1699 WG_DLOG("session[L=%"PRIx32" R=%"PRIx32"]:"
1700 " calculate keys as responder\n",
1701 wgs->wgs_local_index, wgs->wgs_remote_index);
1702 wg_calculate_keys(wgs, false);
1703 wg_clear_states(wgs);
1704
1705 /*
1706 * Session is ready to receive data now that we have received
1707 * the peer initiator's ephemeral key pair, generated our
1708 * responder's ephemeral key pair, and derived a session key.
1709 *
1710 * Transition from UNKNOWN to INIT_PASSIVE to publish it to the
1711 * data rx path, wg_handle_msg_data, where the
1712 * atomic_load_acquire matching this atomic_store_release
1713 * happens.
1714 *
1715 * (Session is not, however, ready to send data until the peer
1716 * has acknowledged our response by sending its first data
1717 * packet. So don't swap the sessions yet.)
1718 */
1719 WG_DLOG("session[L=%"PRIx32" R=%"PRIx32"] -> WGS_STATE_INIT_PASSIVE\n",
1720 wgs->wgs_local_index, wgs->wgs_remote_index);
1721 atomic_store_release(&wgs->wgs_state, WGS_STATE_INIT_PASSIVE);
1722 WG_TRACE("WGS_STATE_INIT_PASSIVE");
1723
1724 out:
1725 mutex_exit(wgp->wgp_lock);
1726 wg_put_peer(wgp, &psref_peer);
1727 }
1728
1729 static struct socket *
1730 wg_get_so_by_af(struct wg_softc *wg, const int af)
1731 {
1732
1733 switch (af) {
1734 #ifdef INET
1735 case AF_INET:
1736 return wg->wg_so4;
1737 #endif
1738 #ifdef INET6
1739 case AF_INET6:
1740 return wg->wg_so6;
1741 #endif
1742 default:
1743 panic("wg: no such af: %d", af);
1744 }
1745 }
1746
1747 static struct socket *
1748 wg_get_so_by_peer(struct wg_peer *wgp, struct wg_sockaddr *wgsa)
1749 {
1750
1751 return wg_get_so_by_af(wgp->wgp_sc, wgsa_family(wgsa));
1752 }
1753
1754 static struct wg_sockaddr *
1755 wg_get_endpoint_sa(struct wg_peer *wgp, struct psref *psref)
1756 {
1757 struct wg_sockaddr *wgsa;
1758 int s;
1759
1760 s = pserialize_read_enter();
1761 wgsa = atomic_load_consume(&wgp->wgp_endpoint);
1762 psref_acquire(psref, &wgsa->wgsa_psref, wg_psref_class);
1763 pserialize_read_exit(s);
1764
1765 return wgsa;
1766 }
1767
1768 static void
1769 wg_put_sa(struct wg_peer *wgp, struct wg_sockaddr *wgsa, struct psref *psref)
1770 {
1771
1772 psref_release(psref, &wgsa->wgsa_psref, wg_psref_class);
1773 }
1774
1775 static int
1776 wg_send_so(struct wg_peer *wgp, struct mbuf *m)
1777 {
1778 int error;
1779 struct socket *so;
1780 struct psref psref;
1781 struct wg_sockaddr *wgsa;
1782
1783 wgsa = wg_get_endpoint_sa(wgp, &psref);
1784 so = wg_get_so_by_peer(wgp, wgsa);
1785 error = sosend(so, wgsatosa(wgsa), NULL, m, NULL, 0, curlwp);
1786 wg_put_sa(wgp, wgsa, &psref);
1787
1788 return error;
1789 }
1790
1791 static void
1792 wg_send_handshake_msg_init(struct wg_softc *wg, struct wg_peer *wgp)
1793 {
1794 int error;
1795 struct mbuf *m;
1796 struct wg_msg_init *wgmi;
1797 struct wg_session *wgs;
1798
1799 KASSERT(mutex_owned(wgp->wgp_lock));
1800
1801 wgs = wgp->wgp_session_unstable;
1802 /* XXX pull dispatch out into wg_task_send_init_message */
1803 switch (wgs->wgs_state) {
1804 case WGS_STATE_UNKNOWN: /* new session initiated by us */
1805 break;
1806 case WGS_STATE_INIT_ACTIVE: /* we're already initiating, stop */
1807 WG_TRACE("Session already initializing, skip starting new one");
1808 return;
1809 case WGS_STATE_INIT_PASSIVE: /* peer was trying -- XXX what now? */
1810 WG_TRACE("Session already initializing, waiting for peer");
1811 return;
1812 case WGS_STATE_ESTABLISHED: /* can't happen */
1813 panic("unstable session can't be established");
1814 case WGS_STATE_DESTROYING: /* rekey initiated by us too early */
1815 WG_TRACE("Session destroying");
1816 wg_put_session_index(wg, wgs);
1817 KASSERTMSG(wgs->wgs_state == WGS_STATE_UNKNOWN, "state=%d",
1818 wgs->wgs_state);
1819 break;
1820 }
1821
1822 /*
1823 * Assign a fresh session index.
1824 */
1825 KASSERTMSG(wgs->wgs_state == WGS_STATE_UNKNOWN, "state=%d",
1826 wgs->wgs_state);
1827 wg_get_session_index(wg, wgs);
1828
1829 /*
1830 * We have initiated a session. Transition to INIT_ACTIVE.
1831 * This doesn't publish it for use in the data rx path,
1832 * wg_handle_msg_data, or in the data tx path, wg_output -- we
1833 * have to wait for the peer to respond with their ephemeral
1834 * public key before we can derive a session key for tx/rx.
1835 * Hence only atomic_store_relaxed.
1836 */
1837 WG_DLOG("session[L=%"PRIx32" R=(unknown)] -> WGS_STATE_INIT_ACTIVE\n",
1838 wgs->wgs_local_index);
1839 atomic_store_relaxed(&wgs->wgs_state, WGS_STATE_INIT_ACTIVE);
1840
1841 m = m_gethdr(M_WAIT, MT_DATA);
1842 if (sizeof(*wgmi) > MHLEN) {
1843 m_clget(m, M_WAIT);
1844 CTASSERT(sizeof(*wgmi) <= MCLBYTES);
1845 }
1846 m->m_pkthdr.len = m->m_len = sizeof(*wgmi);
1847 wgmi = mtod(m, struct wg_msg_init *);
1848 wg_fill_msg_init(wg, wgp, wgs, wgmi);
1849
1850 error = wg->wg_ops->send_hs_msg(wgp, m); /* consumes m */
1851 if (error) {
1852 /*
1853 * Sending out an initiation packet failed; give up on
1854 * this session and toss packet waiting for it if any.
1855 *
1856 * XXX Why don't we just let the periodic handshake
1857 * retry logic work in this case?
1858 */
1859 WG_DLOG("send_hs_msg failed, error=%d\n", error);
1860 wg_put_session_index(wg, wgs);
1861 m = atomic_swap_ptr(&wgp->wgp_pending, NULL);
1862 m_freem(m);
1863 return;
1864 }
1865
1866 WG_TRACE("init msg sent");
1867 if (wgp->wgp_handshake_start_time == 0)
1868 wgp->wgp_handshake_start_time = time_uptime;
1869 callout_schedule(&wgp->wgp_handshake_timeout_timer,
1870 MIN(wg_rekey_timeout, (unsigned)(INT_MAX / hz)) * hz);
1871 }
1872
1873 static void
1874 wg_fill_msg_resp(struct wg_softc *wg, struct wg_peer *wgp,
1875 struct wg_session *wgs, struct wg_msg_resp *wgmr,
1876 const struct wg_msg_init *wgmi)
1877 {
1878 uint8_t ckey[WG_CHAINING_KEY_LEN]; /* [W] 5.4.3: Cr */
1879 uint8_t hash[WG_HASH_LEN]; /* [W] 5.4.3: Hr */
1880 uint8_t cipher_key[WG_KDF_OUTPUT_LEN];
1881 uint8_t pubkey[WG_EPHEMERAL_KEY_LEN];
1882 uint8_t privkey[WG_EPHEMERAL_KEY_LEN];
1883
1884 KASSERT(mutex_owned(wgp->wgp_lock));
1885 KASSERT(wgs == wgp->wgp_session_unstable);
1886 KASSERTMSG(wgs->wgs_state == WGS_STATE_UNKNOWN, "state=%d",
1887 wgs->wgs_state);
1888
1889 memcpy(hash, wgs->wgs_handshake_hash, sizeof(hash));
1890 memcpy(ckey, wgs->wgs_chaining_key, sizeof(ckey));
1891
1892 wgmr->wgmr_type = htole32(WG_MSG_TYPE_RESP);
1893 wgmr->wgmr_sender = wgs->wgs_local_index;
1894 wgmr->wgmr_receiver = wgmi->wgmi_sender;
1895
1896 /* [W] 5.4.3 Second Message: Responder to Initiator */
1897
1898 /* [N] 2.2: "e" */
1899 /* Er^priv, Er^pub := DH-GENERATE() */
1900 wg_algo_generate_keypair(pubkey, privkey);
1901 /* Cr := KDF1(Cr, Er^pub) */
1902 wg_algo_kdf(ckey, NULL, NULL, ckey, pubkey, sizeof(pubkey));
1903 /* msg.ephemeral := Er^pub */
1904 memcpy(wgmr->wgmr_ephemeral, pubkey, sizeof(wgmr->wgmr_ephemeral));
1905 /* Hr := HASH(Hr || msg.ephemeral) */
1906 wg_algo_hash(hash, pubkey, sizeof(pubkey));
1907
1908 WG_DUMP_HASH("ckey", ckey);
1909 WG_DUMP_HASH("hash", hash);
1910
1911 /* [N] 2.2: "ee" */
1912 /* Cr := KDF1(Cr, DH(Er^priv, Ei^pub)) */
1913 wg_algo_dh_kdf(ckey, NULL, privkey, wgs->wgs_ephemeral_key_peer);
1914
1915 /* [N] 2.2: "se" */
1916 /* Cr := KDF1(Cr, DH(Er^priv, Si^pub)) */
1917 wg_algo_dh_kdf(ckey, NULL, privkey, wgp->wgp_pubkey);
1918
1919 /* [N] 9.2: "psk" */
1920 {
1921 uint8_t kdfout[WG_KDF_OUTPUT_LEN];
1922 /* Cr, r, k := KDF3(Cr, Q) */
1923 wg_algo_kdf(ckey, kdfout, cipher_key, ckey, wgp->wgp_psk,
1924 sizeof(wgp->wgp_psk));
1925 /* Hr := HASH(Hr || r) */
1926 wg_algo_hash(hash, kdfout, sizeof(kdfout));
1927 }
1928
1929 /* msg.empty := AEAD(k, 0, e, Hr) */
1930 wg_algo_aead_enc(wgmr->wgmr_empty, sizeof(wgmr->wgmr_empty),
1931 cipher_key, 0, NULL, 0, hash, sizeof(hash));
1932 /* Hr := HASH(Hr || msg.empty) */
1933 wg_algo_hash(hash, wgmr->wgmr_empty, sizeof(wgmr->wgmr_empty));
1934
1935 WG_DUMP_HASH("wgmr_empty", wgmr->wgmr_empty);
1936
1937 /* [W] 5.4.4: Cookie MACs */
1938 /* msg.mac1 := MAC(HASH(LABEL-MAC1 || Sm'^pub), msg_a) */
1939 wg_algo_mac_mac1(wgmr->wgmr_mac1, sizeof(wgmi->wgmi_mac1),
1940 wgp->wgp_pubkey, sizeof(wgp->wgp_pubkey),
1941 (const uint8_t *)wgmr, offsetof(struct wg_msg_resp, wgmr_mac1));
1942 /* Need mac1 to decrypt a cookie from a cookie message */
1943 memcpy(wgp->wgp_last_sent_mac1, wgmr->wgmr_mac1,
1944 sizeof(wgp->wgp_last_sent_mac1));
1945 wgp->wgp_last_sent_mac1_valid = true;
1946
1947 if (wgp->wgp_latest_cookie_time == 0 ||
1948 (time_uptime - wgp->wgp_latest_cookie_time) >= WG_COOKIE_TIME)
1949 /* msg.mac2 := 0^16 */
1950 memset(wgmr->wgmr_mac2, 0, sizeof(wgmr->wgmr_mac2));
1951 else {
1952 /* msg.mac2 := MAC(Lm, msg_b) */
1953 wg_algo_mac(wgmr->wgmr_mac2, sizeof(wgmi->wgmi_mac2),
1954 wgp->wgp_latest_cookie, WG_COOKIE_LEN,
1955 (const uint8_t *)wgmr,
1956 offsetof(struct wg_msg_resp, wgmr_mac2),
1957 NULL, 0);
1958 }
1959
1960 memcpy(wgs->wgs_handshake_hash, hash, sizeof(hash));
1961 memcpy(wgs->wgs_chaining_key, ckey, sizeof(ckey));
1962 memcpy(wgs->wgs_ephemeral_key_pub, pubkey, sizeof(pubkey));
1963 memcpy(wgs->wgs_ephemeral_key_priv, privkey, sizeof(privkey));
1964 wgs->wgs_remote_index = wgmi->wgmi_sender;
1965 WG_DLOG("sender=%x\n", wgs->wgs_local_index);
1966 WG_DLOG("receiver=%x\n", wgs->wgs_remote_index);
1967 }
1968
1969 static void
1970 wg_swap_sessions(struct wg_peer *wgp)
1971 {
1972 struct wg_session *wgs, *wgs_prev;
1973
1974 KASSERT(mutex_owned(wgp->wgp_lock));
1975
1976 wgs = wgp->wgp_session_unstable;
1977 KASSERTMSG(wgs->wgs_state == WGS_STATE_ESTABLISHED, "state=%d",
1978 wgs->wgs_state);
1979
1980 wgs_prev = wgp->wgp_session_stable;
1981 KASSERTMSG((wgs_prev->wgs_state == WGS_STATE_ESTABLISHED ||
1982 wgs_prev->wgs_state == WGS_STATE_UNKNOWN),
1983 "state=%d", wgs_prev->wgs_state);
1984 atomic_store_release(&wgp->wgp_session_stable, wgs);
1985 wgp->wgp_session_unstable = wgs_prev;
1986 }
1987
1988 static void __noinline
1989 wg_handle_msg_resp(struct wg_softc *wg, const struct wg_msg_resp *wgmr,
1990 const struct sockaddr *src)
1991 {
1992 uint8_t ckey[WG_CHAINING_KEY_LEN]; /* [W] 5.4.3: Cr */
1993 uint8_t hash[WG_HASH_LEN]; /* [W] 5.4.3: Kr */
1994 uint8_t cipher_key[WG_KDF_OUTPUT_LEN];
1995 struct wg_peer *wgp;
1996 struct wg_session *wgs;
1997 struct psref psref;
1998 int error;
1999 uint8_t mac1[WG_MAC_LEN];
2000 struct wg_session *wgs_prev;
2001 struct mbuf *m;
2002
2003 wg_algo_mac_mac1(mac1, sizeof(mac1),
2004 wg->wg_pubkey, sizeof(wg->wg_pubkey),
2005 (const uint8_t *)wgmr, offsetof(struct wg_msg_resp, wgmr_mac1));
2006
2007 /*
2008 * [W] 5.3: Denial of Service Mitigation & Cookies
2009 * "the responder, ..., must always reject messages with an invalid
2010 * msg.mac1"
2011 */
2012 if (!consttime_memequal(mac1, wgmr->wgmr_mac1, sizeof(mac1))) {
2013 WG_DLOG("mac1 is invalid\n");
2014 return;
2015 }
2016
2017 WG_TRACE("resp msg received");
2018 wgs = wg_lookup_session_by_index(wg, wgmr->wgmr_receiver, &psref);
2019 if (wgs == NULL) {
2020 WG_TRACE("No session found");
2021 return;
2022 }
2023
2024 wgp = wgs->wgs_peer;
2025
2026 mutex_enter(wgp->wgp_lock);
2027
2028 /* If we weren't waiting for a handshake response, drop it. */
2029 if (wgs->wgs_state != WGS_STATE_INIT_ACTIVE) {
2030 WG_TRACE("peer sent spurious handshake response, ignoring");
2031 goto out;
2032 }
2033
2034 if (__predict_false(wg_is_underload(wg, wgp, WG_MSG_TYPE_RESP))) {
2035 WG_TRACE("under load");
2036 /*
2037 * [W] 5.3: Denial of Service Mitigation & Cookies
2038 * "the responder, ..., and when under load may reject messages
2039 * with an invalid msg.mac2. If the responder receives a
2040 * message with a valid msg.mac1 yet with an invalid msg.mac2,
2041 * and is under load, it may respond with a cookie reply
2042 * message"
2043 */
2044 uint8_t zero[WG_MAC_LEN] = {0};
2045 if (consttime_memequal(wgmr->wgmr_mac2, zero, sizeof(zero))) {
2046 WG_TRACE("sending a cookie message: no cookie included");
2047 wg_send_cookie_msg(wg, wgp, wgmr->wgmr_sender,
2048 wgmr->wgmr_mac1, src);
2049 goto out;
2050 }
2051 if (!wgp->wgp_last_sent_cookie_valid) {
2052 WG_TRACE("sending a cookie message: no cookie sent ever");
2053 wg_send_cookie_msg(wg, wgp, wgmr->wgmr_sender,
2054 wgmr->wgmr_mac1, src);
2055 goto out;
2056 }
2057 uint8_t mac2[WG_MAC_LEN];
2058 wg_algo_mac(mac2, sizeof(mac2), wgp->wgp_last_sent_cookie,
2059 WG_COOKIE_LEN, (const uint8_t *)wgmr,
2060 offsetof(struct wg_msg_resp, wgmr_mac2), NULL, 0);
2061 if (!consttime_memequal(mac2, wgmr->wgmr_mac2, sizeof(mac2))) {
2062 WG_DLOG("mac2 is invalid\n");
2063 goto out;
2064 }
2065 WG_TRACE("under load, but continue to sending");
2066 }
2067
2068 memcpy(hash, wgs->wgs_handshake_hash, sizeof(hash));
2069 memcpy(ckey, wgs->wgs_chaining_key, sizeof(ckey));
2070
2071 /*
2072 * [W] 5.4.3 Second Message: Responder to Initiator
2073 * "When the initiator receives this message, it does the same
2074 * operations so that its final state variables are identical,
2075 * replacing the operands of the DH function to produce equivalent
2076 * values."
2077 * Note that the following comments of operations are just copies of
2078 * the initiator's ones.
2079 */
2080
2081 /* [N] 2.2: "e" */
2082 /* Cr := KDF1(Cr, Er^pub) */
2083 wg_algo_kdf(ckey, NULL, NULL, ckey, wgmr->wgmr_ephemeral,
2084 sizeof(wgmr->wgmr_ephemeral));
2085 /* Hr := HASH(Hr || msg.ephemeral) */
2086 wg_algo_hash(hash, wgmr->wgmr_ephemeral, sizeof(wgmr->wgmr_ephemeral));
2087
2088 WG_DUMP_HASH("ckey", ckey);
2089 WG_DUMP_HASH("hash", hash);
2090
2091 /* [N] 2.2: "ee" */
2092 /* Cr := KDF1(Cr, DH(Er^priv, Ei^pub)) */
2093 wg_algo_dh_kdf(ckey, NULL, wgs->wgs_ephemeral_key_priv,
2094 wgmr->wgmr_ephemeral);
2095
2096 /* [N] 2.2: "se" */
2097 /* Cr := KDF1(Cr, DH(Er^priv, Si^pub)) */
2098 wg_algo_dh_kdf(ckey, NULL, wg->wg_privkey, wgmr->wgmr_ephemeral);
2099
2100 /* [N] 9.2: "psk" */
2101 {
2102 uint8_t kdfout[WG_KDF_OUTPUT_LEN];
2103 /* Cr, r, k := KDF3(Cr, Q) */
2104 wg_algo_kdf(ckey, kdfout, cipher_key, ckey, wgp->wgp_psk,
2105 sizeof(wgp->wgp_psk));
2106 /* Hr := HASH(Hr || r) */
2107 wg_algo_hash(hash, kdfout, sizeof(kdfout));
2108 }
2109
2110 {
2111 uint8_t out[sizeof(wgmr->wgmr_empty)]; /* for safety */
2112 /* msg.empty := AEAD(k, 0, e, Hr) */
2113 error = wg_algo_aead_dec(out, 0, cipher_key, 0, wgmr->wgmr_empty,
2114 sizeof(wgmr->wgmr_empty), hash, sizeof(hash));
2115 WG_DUMP_HASH("wgmr_empty", wgmr->wgmr_empty);
2116 if (error != 0) {
2117 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
2118 "%s: peer %s: wg_algo_aead_dec for empty message failed\n",
2119 if_name(&wg->wg_if), wgp->wgp_name);
2120 goto out;
2121 }
2122 /* Hr := HASH(Hr || msg.empty) */
2123 wg_algo_hash(hash, wgmr->wgmr_empty, sizeof(wgmr->wgmr_empty));
2124 }
2125
2126 memcpy(wgs->wgs_handshake_hash, hash, sizeof(wgs->wgs_handshake_hash));
2127 memcpy(wgs->wgs_chaining_key, ckey, sizeof(wgs->wgs_chaining_key));
2128 wgs->wgs_remote_index = wgmr->wgmr_sender;
2129 WG_DLOG("receiver=%x\n", wgs->wgs_remote_index);
2130
2131 KASSERTMSG(wgs->wgs_state == WGS_STATE_INIT_ACTIVE, "state=%d",
2132 wgs->wgs_state);
2133 wgs->wgs_time_established = time_uptime32;
2134 wg_schedule_session_dtor_timer(wgp);
2135 wgs->wgs_time_last_data_sent = 0;
2136 wgs->wgs_is_initiator = true;
2137 WG_DLOG("session[L=%"PRIx32" R=%"PRIx32"]:"
2138 " calculate keys as initiator\n",
2139 wgs->wgs_local_index, wgs->wgs_remote_index);
2140 wg_calculate_keys(wgs, true);
2141 wg_clear_states(wgs);
2142
2143 /*
2144 * Session is ready to receive data now that we have received
2145 * the responder's response.
2146 *
2147 * Transition from INIT_ACTIVE to ESTABLISHED to publish it to
2148 * the data rx path, wg_handle_msg_data.
2149 */
2150 WG_DLOG("session[L=%"PRIx32" R=%"PRIx32" -> WGS_STATE_ESTABLISHED\n",
2151 wgs->wgs_local_index, wgs->wgs_remote_index);
2152 atomic_store_release(&wgs->wgs_state, WGS_STATE_ESTABLISHED);
2153 WG_TRACE("WGS_STATE_ESTABLISHED");
2154
2155 callout_halt(&wgp->wgp_handshake_timeout_timer, NULL);
2156
2157 /*
2158 * Session is ready to send data now that we have received the
2159 * responder's response.
2160 *
2161 * Swap the sessions to publish the new one as the stable
2162 * session for the data tx path, wg_output.
2163 */
2164 wg_swap_sessions(wgp);
2165 KASSERT(wgs == wgp->wgp_session_stable);
2166 wgs_prev = wgp->wgp_session_unstable;
2167 getnanotime(&wgp->wgp_last_handshake_time);
2168 wgp->wgp_handshake_start_time = 0;
2169 wgp->wgp_last_sent_mac1_valid = false;
2170 wgp->wgp_last_sent_cookie_valid = false;
2171
2172 wg_update_endpoint_if_necessary(wgp, src);
2173
2174 /*
2175 * If we had a data packet queued up, send it; otherwise send a
2176 * keepalive message -- either way we have to send something
2177 * immediately or else the responder will never answer.
2178 */
2179 if ((m = atomic_swap_ptr(&wgp->wgp_pending, NULL)) != NULL) {
2180 kpreempt_disable();
2181 const uint32_t h = curcpu()->ci_index; // pktq_rps_hash(m)
2182 M_SETCTX(m, wgp);
2183 if (__predict_false(!pktq_enqueue(wg_pktq, m, h))) {
2184 WGLOG(LOG_ERR, "%s: pktq full, dropping\n",
2185 if_name(&wg->wg_if));
2186 m_freem(m);
2187 }
2188 kpreempt_enable();
2189 } else {
2190 wg_send_keepalive_msg(wgp, wgs);
2191 }
2192
2193 if (wgs_prev->wgs_state == WGS_STATE_ESTABLISHED) {
2194 /*
2195 * Transition ESTABLISHED->DESTROYING. The session
2196 * will remain usable for the data rx path to process
2197 * packets still in flight to us, but we won't use it
2198 * for data tx.
2199 */
2200 WG_DLOG("session[L=%"PRIx32" R=%"PRIx32"]"
2201 " -> WGS_STATE_DESTROYING\n",
2202 wgs_prev->wgs_local_index, wgs_prev->wgs_remote_index);
2203 atomic_store_relaxed(&wgs_prev->wgs_state,
2204 WGS_STATE_DESTROYING);
2205 } else {
2206 KASSERTMSG(wgs_prev->wgs_state == WGS_STATE_UNKNOWN,
2207 "state=%d", wgs_prev->wgs_state);
2208 }
2209
2210 out:
2211 mutex_exit(wgp->wgp_lock);
2212 wg_put_session(wgs, &psref);
2213 }
2214
2215 static void
2216 wg_send_handshake_msg_resp(struct wg_softc *wg, struct wg_peer *wgp,
2217 struct wg_session *wgs, const struct wg_msg_init *wgmi)
2218 {
2219 int error;
2220 struct mbuf *m;
2221 struct wg_msg_resp *wgmr;
2222
2223 KASSERT(mutex_owned(wgp->wgp_lock));
2224 KASSERT(wgs == wgp->wgp_session_unstable);
2225 KASSERTMSG(wgs->wgs_state == WGS_STATE_UNKNOWN, "state=%d",
2226 wgs->wgs_state);
2227
2228 m = m_gethdr(M_WAIT, MT_DATA);
2229 if (sizeof(*wgmr) > MHLEN) {
2230 m_clget(m, M_WAIT);
2231 CTASSERT(sizeof(*wgmr) <= MCLBYTES);
2232 }
2233 m->m_pkthdr.len = m->m_len = sizeof(*wgmr);
2234 wgmr = mtod(m, struct wg_msg_resp *);
2235 wg_fill_msg_resp(wg, wgp, wgs, wgmr, wgmi);
2236
2237 error = wg->wg_ops->send_hs_msg(wgp, m); /* consumes m */
2238 if (error) {
2239 WG_DLOG("send_hs_msg failed, error=%d\n", error);
2240 return;
2241 }
2242
2243 WG_TRACE("resp msg sent");
2244 }
2245
2246 static struct wg_peer *
2247 wg_lookup_peer_by_pubkey(struct wg_softc *wg,
2248 const uint8_t pubkey[WG_STATIC_KEY_LEN], struct psref *psref)
2249 {
2250 struct wg_peer *wgp;
2251
2252 int s = pserialize_read_enter();
2253 wgp = thmap_get(wg->wg_peers_bypubkey, pubkey, WG_STATIC_KEY_LEN);
2254 if (wgp != NULL)
2255 wg_get_peer(wgp, psref);
2256 pserialize_read_exit(s);
2257
2258 return wgp;
2259 }
2260
2261 static void
2262 wg_fill_msg_cookie(struct wg_softc *wg, struct wg_peer *wgp,
2263 struct wg_msg_cookie *wgmc, const uint32_t sender,
2264 const uint8_t mac1[WG_MAC_LEN], const struct sockaddr *src)
2265 {
2266 uint8_t cookie[WG_COOKIE_LEN];
2267 uint8_t key[WG_HASH_LEN];
2268 uint8_t addr[sizeof(struct in6_addr)];
2269 size_t addrlen;
2270 uint16_t uh_sport; /* be */
2271
2272 KASSERT(mutex_owned(wgp->wgp_lock));
2273
2274 wgmc->wgmc_type = htole32(WG_MSG_TYPE_COOKIE);
2275 wgmc->wgmc_receiver = sender;
2276 cprng_fast(wgmc->wgmc_salt, sizeof(wgmc->wgmc_salt));
2277
2278 /*
2279 * [W] 5.4.7: Under Load: Cookie Reply Message
2280 * "The secret variable, Rm, changes every two minutes to a
2281 * random value"
2282 */
2283 if ((time_uptime - wgp->wgp_last_cookiesecret_time) >
2284 WG_COOKIESECRET_TIME) {
2285 cprng_strong(kern_cprng, wgp->wgp_cookiesecret,
2286 sizeof(wgp->wgp_cookiesecret), 0);
2287 wgp->wgp_last_cookiesecret_time = time_uptime;
2288 }
2289
2290 switch (src->sa_family) {
2291 case AF_INET: {
2292 const struct sockaddr_in *sin = satocsin(src);
2293 addrlen = sizeof(sin->sin_addr);
2294 memcpy(addr, &sin->sin_addr, addrlen);
2295 uh_sport = sin->sin_port;
2296 break;
2297 }
2298 #ifdef INET6
2299 case AF_INET6: {
2300 const struct sockaddr_in6 *sin6 = satocsin6(src);
2301 addrlen = sizeof(sin6->sin6_addr);
2302 memcpy(addr, &sin6->sin6_addr, addrlen);
2303 uh_sport = sin6->sin6_port;
2304 break;
2305 }
2306 #endif
2307 default:
2308 panic("invalid af=%d", src->sa_family);
2309 }
2310
2311 wg_algo_mac(cookie, sizeof(cookie),
2312 wgp->wgp_cookiesecret, sizeof(wgp->wgp_cookiesecret),
2313 addr, addrlen, (const uint8_t *)&uh_sport, sizeof(uh_sport));
2314 wg_algo_mac_cookie(key, sizeof(key), wg->wg_pubkey,
2315 sizeof(wg->wg_pubkey));
2316 wg_algo_xaead_enc(wgmc->wgmc_cookie, sizeof(wgmc->wgmc_cookie), key,
2317 cookie, sizeof(cookie), mac1, WG_MAC_LEN, wgmc->wgmc_salt);
2318
2319 /* Need to store to calculate mac2 */
2320 memcpy(wgp->wgp_last_sent_cookie, cookie, sizeof(cookie));
2321 wgp->wgp_last_sent_cookie_valid = true;
2322 }
2323
2324 static void
2325 wg_send_cookie_msg(struct wg_softc *wg, struct wg_peer *wgp,
2326 const uint32_t sender, const uint8_t mac1[WG_MAC_LEN],
2327 const struct sockaddr *src)
2328 {
2329 int error;
2330 struct mbuf *m;
2331 struct wg_msg_cookie *wgmc;
2332
2333 KASSERT(mutex_owned(wgp->wgp_lock));
2334
2335 m = m_gethdr(M_WAIT, MT_DATA);
2336 if (sizeof(*wgmc) > MHLEN) {
2337 m_clget(m, M_WAIT);
2338 CTASSERT(sizeof(*wgmc) <= MCLBYTES);
2339 }
2340 m->m_pkthdr.len = m->m_len = sizeof(*wgmc);
2341 wgmc = mtod(m, struct wg_msg_cookie *);
2342 wg_fill_msg_cookie(wg, wgp, wgmc, sender, mac1, src);
2343
2344 error = wg->wg_ops->send_hs_msg(wgp, m); /* consumes m */
2345 if (error) {
2346 WG_DLOG("send_hs_msg failed, error=%d\n", error);
2347 return;
2348 }
2349
2350 WG_TRACE("cookie msg sent");
2351 }
2352
2353 static bool
2354 wg_is_underload(struct wg_softc *wg, struct wg_peer *wgp, int msgtype)
2355 {
2356 #ifdef WG_DEBUG_PARAMS
2357 if (wg_force_underload)
2358 return true;
2359 #endif
2360
2361 /*
2362 * XXX we don't have a means of a load estimation. The purpose of
2363 * the mechanism is a DoS mitigation, so we consider frequent handshake
2364 * messages as (a kind of) load; if a message of the same type comes
2365 * to a peer within 1 second, we consider we are under load.
2366 */
2367 time_t last = wgp->wgp_last_msg_received_time[msgtype];
2368 wgp->wgp_last_msg_received_time[msgtype] = time_uptime;
2369 return (time_uptime - last) == 0;
2370 }
2371
2372 static void
2373 wg_calculate_keys(struct wg_session *wgs, const bool initiator)
2374 {
2375
2376 KASSERT(mutex_owned(wgs->wgs_peer->wgp_lock));
2377
2378 /*
2379 * [W] 5.4.5: Ti^send = Tr^recv, Ti^recv = Tr^send := KDF2(Ci = Cr, e)
2380 */
2381 if (initiator) {
2382 wg_algo_kdf(wgs->wgs_tkey_send, wgs->wgs_tkey_recv, NULL,
2383 wgs->wgs_chaining_key, NULL, 0);
2384 } else {
2385 wg_algo_kdf(wgs->wgs_tkey_recv, wgs->wgs_tkey_send, NULL,
2386 wgs->wgs_chaining_key, NULL, 0);
2387 }
2388 WG_DUMP_HASH("wgs_tkey_send", wgs->wgs_tkey_send);
2389 WG_DUMP_HASH("wgs_tkey_recv", wgs->wgs_tkey_recv);
2390 }
2391
2392 static uint64_t
2393 wg_session_get_send_counter(struct wg_session *wgs)
2394 {
2395 #ifdef __HAVE_ATOMIC64_LOADSTORE
2396 return atomic_load_relaxed(&wgs->wgs_send_counter);
2397 #else
2398 uint64_t send_counter;
2399
2400 mutex_enter(&wgs->wgs_send_counter_lock);
2401 send_counter = wgs->wgs_send_counter;
2402 mutex_exit(&wgs->wgs_send_counter_lock);
2403
2404 return send_counter;
2405 #endif
2406 }
2407
2408 static uint64_t
2409 wg_session_inc_send_counter(struct wg_session *wgs)
2410 {
2411 #ifdef __HAVE_ATOMIC64_LOADSTORE
2412 return atomic_inc_64_nv(&wgs->wgs_send_counter) - 1;
2413 #else
2414 uint64_t send_counter;
2415
2416 mutex_enter(&wgs->wgs_send_counter_lock);
2417 send_counter = wgs->wgs_send_counter++;
2418 mutex_exit(&wgs->wgs_send_counter_lock);
2419
2420 return send_counter;
2421 #endif
2422 }
2423
2424 static void
2425 wg_clear_states(struct wg_session *wgs)
2426 {
2427
2428 KASSERT(mutex_owned(wgs->wgs_peer->wgp_lock));
2429
2430 wgs->wgs_send_counter = 0;
2431 sliwin_reset(&wgs->wgs_recvwin->window);
2432
2433 #define wgs_clear(v) explicit_memset(wgs->wgs_##v, 0, sizeof(wgs->wgs_##v))
2434 wgs_clear(handshake_hash);
2435 wgs_clear(chaining_key);
2436 wgs_clear(ephemeral_key_pub);
2437 wgs_clear(ephemeral_key_priv);
2438 wgs_clear(ephemeral_key_peer);
2439 #undef wgs_clear
2440 }
2441
2442 static struct wg_session *
2443 wg_lookup_session_by_index(struct wg_softc *wg, const uint32_t index,
2444 struct psref *psref)
2445 {
2446 struct wg_session *wgs;
2447
2448 int s = pserialize_read_enter();
2449 wgs = thmap_get(wg->wg_sessions_byindex, &index, sizeof index);
2450 if (wgs != NULL) {
2451 uint32_t oindex __diagused =
2452 atomic_load_relaxed(&wgs->wgs_local_index);
2453 KASSERTMSG(index == oindex,
2454 "index=%"PRIx32" wgs->wgs_local_index=%"PRIx32,
2455 index, oindex);
2456 psref_acquire(psref, &wgs->wgs_psref, wg_psref_class);
2457 }
2458 pserialize_read_exit(s);
2459
2460 return wgs;
2461 }
2462
2463 static void
2464 wg_send_keepalive_msg(struct wg_peer *wgp, struct wg_session *wgs)
2465 {
2466 struct mbuf *m;
2467
2468 /*
2469 * [W] 6.5 Passive Keepalive
2470 * "A keepalive message is simply a transport data message with
2471 * a zero-length encapsulated encrypted inner-packet."
2472 */
2473 WG_TRACE("");
2474 m = m_gethdr(M_WAIT, MT_DATA);
2475 wg_send_data_msg(wgp, wgs, m);
2476 }
2477
2478 static bool
2479 wg_need_to_send_init_message(struct wg_session *wgs)
2480 {
2481 /*
2482 * [W] 6.2 Transport Message Limits
2483 * "if a peer is the initiator of a current secure session,
2484 * WireGuard will send a handshake initiation message to begin
2485 * a new secure session ... if after receiving a transport data
2486 * message, the current secure session is (REJECT-AFTER-TIME
2487 * KEEPALIVE-TIMEOUT REKEY-TIMEOUT) seconds old and it has
2488 * not yet acted upon this event."
2489 */
2490 return wgs->wgs_is_initiator &&
2491 atomic_load_relaxed(&wgs->wgs_time_last_data_sent) == 0 &&
2492 ((time_uptime32 -
2493 atomic_load_relaxed(&wgs->wgs_time_established)) >=
2494 (wg_reject_after_time - wg_keepalive_timeout -
2495 wg_rekey_timeout));
2496 }
2497
2498 static void
2499 wg_schedule_peer_task(struct wg_peer *wgp, unsigned int task)
2500 {
2501
2502 mutex_enter(wgp->wgp_intr_lock);
2503 WG_DLOG("tasks=%d, task=%d\n", wgp->wgp_tasks, task);
2504 if (wgp->wgp_tasks == 0)
2505 /*
2506 * XXX If the current CPU is already loaded -- e.g., if
2507 * there's already a bunch of handshakes queued up --
2508 * consider tossing this over to another CPU to
2509 * distribute the load.
2510 */
2511 workqueue_enqueue(wg_wq, &wgp->wgp_work, NULL);
2512 wgp->wgp_tasks |= task;
2513 mutex_exit(wgp->wgp_intr_lock);
2514 }
2515
2516 static void
2517 wg_change_endpoint(struct wg_peer *wgp, const struct sockaddr *new)
2518 {
2519 struct wg_sockaddr *wgsa_prev;
2520
2521 WG_TRACE("Changing endpoint");
2522
2523 memcpy(wgp->wgp_endpoint0, new, new->sa_len);
2524 wgsa_prev = wgp->wgp_endpoint;
2525 atomic_store_release(&wgp->wgp_endpoint, wgp->wgp_endpoint0);
2526 wgp->wgp_endpoint0 = wgsa_prev;
2527 atomic_store_release(&wgp->wgp_endpoint_available, true);
2528
2529 wg_schedule_peer_task(wgp, WGP_TASK_ENDPOINT_CHANGED);
2530 }
2531
2532 static bool
2533 wg_validate_inner_packet(const char *packet, size_t decrypted_len, int *af)
2534 {
2535 uint16_t packet_len;
2536 const struct ip *ip;
2537
2538 if (__predict_false(decrypted_len < sizeof(*ip))) {
2539 WG_DLOG("decrypted_len=%zu < %zu\n", decrypted_len,
2540 sizeof(*ip));
2541 return false;
2542 }
2543
2544 ip = (const struct ip *)packet;
2545 if (ip->ip_v == 4)
2546 *af = AF_INET;
2547 else if (ip->ip_v == 6)
2548 *af = AF_INET6;
2549 else {
2550 WG_DLOG("ip_v=%d\n", ip->ip_v);
2551 return false;
2552 }
2553
2554 WG_DLOG("af=%d\n", *af);
2555
2556 switch (*af) {
2557 #ifdef INET
2558 case AF_INET:
2559 packet_len = ntohs(ip->ip_len);
2560 break;
2561 #endif
2562 #ifdef INET6
2563 case AF_INET6: {
2564 const struct ip6_hdr *ip6;
2565
2566 if (__predict_false(decrypted_len < sizeof(*ip6))) {
2567 WG_DLOG("decrypted_len=%zu < %zu\n", decrypted_len,
2568 sizeof(*ip6));
2569 return false;
2570 }
2571
2572 ip6 = (const struct ip6_hdr *)packet;
2573 packet_len = sizeof(*ip6) + ntohs(ip6->ip6_plen);
2574 break;
2575 }
2576 #endif
2577 default:
2578 return false;
2579 }
2580
2581 if (packet_len > decrypted_len) {
2582 WG_DLOG("packet_len %u > decrypted_len %zu\n", packet_len,
2583 decrypted_len);
2584 return false;
2585 }
2586
2587 return true;
2588 }
2589
2590 static bool
2591 wg_validate_route(struct wg_softc *wg, struct wg_peer *wgp_expected,
2592 int af, char *packet)
2593 {
2594 struct sockaddr_storage ss;
2595 struct sockaddr *sa;
2596 struct psref psref;
2597 struct wg_peer *wgp;
2598 bool ok;
2599
2600 /*
2601 * II CRYPTOKEY ROUTING
2602 * "it will only accept it if its source IP resolves in the
2603 * table to the public key used in the secure session for
2604 * decrypting it."
2605 */
2606
2607 if (af == AF_INET) {
2608 const struct ip *ip = (const struct ip *)packet;
2609 struct sockaddr_in *sin = (struct sockaddr_in *)&ss;
2610 sockaddr_in_init(sin, &ip->ip_src, 0);
2611 sa = sintosa(sin);
2612 #ifdef INET6
2613 } else {
2614 const struct ip6_hdr *ip6 = (const struct ip6_hdr *)packet;
2615 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&ss;
2616 sockaddr_in6_init(sin6, &ip6->ip6_src, 0, 0, 0);
2617 sa = sin6tosa(sin6);
2618 #endif
2619 }
2620
2621 wgp = wg_pick_peer_by_sa(wg, sa, &psref);
2622 ok = (wgp == wgp_expected);
2623 if (wgp != NULL)
2624 wg_put_peer(wgp, &psref);
2625
2626 return ok;
2627 }
2628
2629 static void
2630 wg_session_dtor_timer(void *arg)
2631 {
2632 struct wg_peer *wgp = arg;
2633
2634 WG_TRACE("enter");
2635
2636 wg_schedule_session_dtor_timer(wgp);
2637 wg_schedule_peer_task(wgp, WGP_TASK_DESTROY_PREV_SESSION);
2638 }
2639
2640 static void
2641 wg_schedule_session_dtor_timer(struct wg_peer *wgp)
2642 {
2643
2644 /*
2645 * If the periodic session destructor is already pending to
2646 * handle the previous session, that's fine -- leave it in
2647 * place; it will be scheduled again.
2648 */
2649 if (callout_pending(&wgp->wgp_session_dtor_timer)) {
2650 WG_DLOG("session dtor already pending\n");
2651 return;
2652 }
2653
2654 WG_DLOG("scheduling session dtor in %u secs\n", wg_reject_after_time);
2655 callout_schedule(&wgp->wgp_session_dtor_timer,
2656 wg_reject_after_time*hz);
2657 }
2658
2659 static bool
2660 sockaddr_port_match(const struct sockaddr *sa1, const struct sockaddr *sa2)
2661 {
2662 if (sa1->sa_family != sa2->sa_family)
2663 return false;
2664
2665 switch (sa1->sa_family) {
2666 #ifdef INET
2667 case AF_INET:
2668 return satocsin(sa1)->sin_port == satocsin(sa2)->sin_port;
2669 #endif
2670 #ifdef INET6
2671 case AF_INET6:
2672 return satocsin6(sa1)->sin6_port == satocsin6(sa2)->sin6_port;
2673 #endif
2674 default:
2675 return false;
2676 }
2677 }
2678
2679 static void
2680 wg_update_endpoint_if_necessary(struct wg_peer *wgp,
2681 const struct sockaddr *src)
2682 {
2683 struct wg_sockaddr *wgsa;
2684 struct psref psref;
2685
2686 wgsa = wg_get_endpoint_sa(wgp, &psref);
2687
2688 #ifdef WG_DEBUG_LOG
2689 char oldaddr[128], newaddr[128];
2690 sockaddr_format(wgsatosa(wgsa), oldaddr, sizeof(oldaddr));
2691 sockaddr_format(src, newaddr, sizeof(newaddr));
2692 WG_DLOG("old=%s, new=%s\n", oldaddr, newaddr);
2693 #endif
2694
2695 /*
2696 * III: "Since the packet has authenticated correctly, the source IP of
2697 * the outer UDP/IP packet is used to update the endpoint for peer..."
2698 */
2699 if (__predict_false(sockaddr_cmp(src, wgsatosa(wgsa)) != 0 ||
2700 !sockaddr_port_match(src, wgsatosa(wgsa)))) {
2701 /* XXX We can't change the endpoint twice in a short period */
2702 if (atomic_swap_uint(&wgp->wgp_endpoint_changing, 1) == 0) {
2703 wg_change_endpoint(wgp, src);
2704 }
2705 }
2706
2707 wg_put_sa(wgp, wgsa, &psref);
2708 }
2709
2710 static void __noinline
2711 wg_handle_msg_data(struct wg_softc *wg, struct mbuf *m,
2712 const struct sockaddr *src)
2713 {
2714 struct wg_msg_data *wgmd;
2715 char *encrypted_buf = NULL, *decrypted_buf;
2716 size_t encrypted_len, decrypted_len;
2717 struct wg_session *wgs;
2718 struct wg_peer *wgp;
2719 int state;
2720 uint32_t age;
2721 size_t mlen;
2722 struct psref psref;
2723 int error, af;
2724 bool success, free_encrypted_buf = false, ok;
2725 struct mbuf *n;
2726
2727 KASSERT(m->m_len >= sizeof(struct wg_msg_data));
2728 wgmd = mtod(m, struct wg_msg_data *);
2729
2730 KASSERT(wgmd->wgmd_type == htole32(WG_MSG_TYPE_DATA));
2731 WG_TRACE("data");
2732
2733 /* Find the putative session, or drop. */
2734 wgs = wg_lookup_session_by_index(wg, wgmd->wgmd_receiver, &psref);
2735 if (wgs == NULL) {
2736 WG_TRACE("No session found");
2737 m_freem(m);
2738 return;
2739 }
2740
2741 /*
2742 * We are only ready to handle data when in INIT_PASSIVE,
2743 * ESTABLISHED, or DESTROYING. All transitions out of that
2744 * state dissociate the session index and drain psrefs.
2745 *
2746 * atomic_load_acquire matches atomic_store_release in either
2747 * wg_handle_msg_init or wg_handle_msg_resp. (The transition
2748 * INIT_PASSIVE to ESTABLISHED in wg_task_establish_session
2749 * doesn't make a difference for this rx path.)
2750 */
2751 state = atomic_load_acquire(&wgs->wgs_state);
2752 switch (state) {
2753 case WGS_STATE_UNKNOWN:
2754 case WGS_STATE_INIT_ACTIVE:
2755 WG_TRACE("not yet ready for data");
2756 goto out;
2757 case WGS_STATE_INIT_PASSIVE:
2758 case WGS_STATE_ESTABLISHED:
2759 case WGS_STATE_DESTROYING:
2760 break;
2761 }
2762
2763 /*
2764 * Reject if the session is too old.
2765 */
2766 age = time_uptime32 - atomic_load_relaxed(&wgs->wgs_time_established);
2767 if (__predict_false(age >= wg_reject_after_time)) {
2768 WG_DLOG("session %"PRIx32" too old, %"PRIu32" sec\n",
2769 wgmd->wgmd_receiver, age);
2770 goto out;
2771 }
2772
2773 /*
2774 * Get the peer, for rate-limited logs (XXX MPSAFE, dtrace) and
2775 * to update the endpoint if authentication succeeds.
2776 */
2777 wgp = wgs->wgs_peer;
2778
2779 /*
2780 * Reject outrageously wrong sequence numbers before doing any
2781 * crypto work or taking any locks.
2782 */
2783 error = sliwin_check_fast(&wgs->wgs_recvwin->window,
2784 le64toh(wgmd->wgmd_counter));
2785 if (error) {
2786 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
2787 "%s: peer %s: out-of-window packet: %"PRIu64"\n",
2788 if_name(&wg->wg_if), wgp->wgp_name,
2789 le64toh(wgmd->wgmd_counter));
2790 goto out;
2791 }
2792
2793 /* Ensure the payload and authenticator are contiguous. */
2794 mlen = m_length(m);
2795 encrypted_len = mlen - sizeof(*wgmd);
2796 if (encrypted_len < WG_AUTHTAG_LEN) {
2797 WG_DLOG("Short encrypted_len: %zu\n", encrypted_len);
2798 goto out;
2799 }
2800 success = m_ensure_contig(&m, sizeof(*wgmd) + encrypted_len);
2801 if (success) {
2802 encrypted_buf = mtod(m, char *) + sizeof(*wgmd);
2803 } else {
2804 encrypted_buf = kmem_intr_alloc(encrypted_len, KM_NOSLEEP);
2805 if (encrypted_buf == NULL) {
2806 WG_DLOG("failed to allocate encrypted_buf\n");
2807 goto out;
2808 }
2809 m_copydata(m, sizeof(*wgmd), encrypted_len, encrypted_buf);
2810 free_encrypted_buf = true;
2811 }
2812 /* m_ensure_contig may change m regardless of its result */
2813 KASSERT(m->m_len >= sizeof(*wgmd));
2814 wgmd = mtod(m, struct wg_msg_data *);
2815
2816 #ifdef WG_DEBUG_PACKET
2817 if (wg_debug & WG_DEBUG_FLAGS_PACKET) {
2818 hexdump(printf, "incoming packet", encrypted_buf,
2819 encrypted_len);
2820 }
2821 #endif
2822 /*
2823 * Get a buffer for the plaintext. Add WG_AUTHTAG_LEN to avoid
2824 * a zero-length buffer (XXX). Drop if plaintext is longer
2825 * than MCLBYTES (XXX).
2826 */
2827 decrypted_len = encrypted_len - WG_AUTHTAG_LEN;
2828 if (decrypted_len > MCLBYTES) {
2829 /* FIXME handle larger data than MCLBYTES */
2830 WG_DLOG("couldn't handle larger data than MCLBYTES\n");
2831 goto out;
2832 }
2833 n = wg_get_mbuf(0, decrypted_len + WG_AUTHTAG_LEN);
2834 if (n == NULL) {
2835 WG_DLOG("wg_get_mbuf failed\n");
2836 goto out;
2837 }
2838 decrypted_buf = mtod(n, char *);
2839
2840 /* Decrypt and verify the packet. */
2841 WG_DLOG("mlen=%zu, encrypted_len=%zu\n", mlen, encrypted_len);
2842 error = wg_algo_aead_dec(decrypted_buf,
2843 encrypted_len - WG_AUTHTAG_LEN /* can be 0 */,
2844 wgs->wgs_tkey_recv, le64toh(wgmd->wgmd_counter), encrypted_buf,
2845 encrypted_len, NULL, 0);
2846 if (error != 0) {
2847 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
2848 "%s: peer %s: failed to wg_algo_aead_dec\n",
2849 if_name(&wg->wg_if), wgp->wgp_name);
2850 m_freem(n);
2851 goto out;
2852 }
2853 WG_DLOG("outsize=%u\n", (u_int)decrypted_len);
2854
2855 /* Packet is genuine. Reject it if a replay or just too old. */
2856 mutex_enter(&wgs->wgs_recvwin->lock);
2857 error = sliwin_update(&wgs->wgs_recvwin->window,
2858 le64toh(wgmd->wgmd_counter));
2859 mutex_exit(&wgs->wgs_recvwin->lock);
2860 if (error) {
2861 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
2862 "%s: peer %s: replay or out-of-window packet: %"PRIu64"\n",
2863 if_name(&wg->wg_if), wgp->wgp_name,
2864 le64toh(wgmd->wgmd_counter));
2865 m_freem(n);
2866 goto out;
2867 }
2868
2869 #ifdef WG_DEBUG_PACKET
2870 if (wg_debug & WG_DEBUG_FLAGS_PACKET) {
2871 hexdump(printf, "tkey_recv", wgs->wgs_tkey_recv,
2872 sizeof(wgs->wgs_tkey_recv));
2873 hexdump(printf, "wgmd", wgmd, sizeof(*wgmd));
2874 hexdump(printf, "decrypted_buf", decrypted_buf,
2875 decrypted_len);
2876 }
2877 #endif
2878 /* We're done with m now; free it and chuck the pointers. */
2879 m_freem(m);
2880 m = NULL;
2881 wgmd = NULL;
2882
2883 /*
2884 * The packet is genuine. Update the peer's endpoint if the
2885 * source address changed.
2886 *
2887 * XXX How to prevent DoS by replaying genuine packets from the
2888 * wrong source address?
2889 */
2890 wg_update_endpoint_if_necessary(wgp, src);
2891
2892 /*
2893 * Validate the encapsulated packet header and get the address
2894 * family, or drop.
2895 */
2896 ok = wg_validate_inner_packet(decrypted_buf, decrypted_len, &af);
2897 if (!ok) {
2898 m_freem(n);
2899 goto update_state;
2900 }
2901
2902 /* Submit it into our network stack if routable. */
2903 ok = wg_validate_route(wg, wgp, af, decrypted_buf);
2904 if (ok) {
2905 wg->wg_ops->input(&wg->wg_if, n, af);
2906 } else {
2907 char addrstr[INET6_ADDRSTRLEN];
2908 memset(addrstr, 0, sizeof(addrstr));
2909 if (af == AF_INET) {
2910 const struct ip *ip = (const struct ip *)decrypted_buf;
2911 IN_PRINT(addrstr, &ip->ip_src);
2912 #ifdef INET6
2913 } else if (af == AF_INET6) {
2914 const struct ip6_hdr *ip6 =
2915 (const struct ip6_hdr *)decrypted_buf;
2916 IN6_PRINT(addrstr, &ip6->ip6_src);
2917 #endif
2918 }
2919 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
2920 "%s: peer %s: invalid source address (%s)\n",
2921 if_name(&wg->wg_if), wgp->wgp_name, addrstr);
2922 m_freem(n);
2923 /*
2924 * The inner address is invalid however the session is valid
2925 * so continue the session processing below.
2926 */
2927 }
2928 n = NULL;
2929
2930 update_state:
2931 /* Update the state machine if necessary. */
2932 if (__predict_false(state == WGS_STATE_INIT_PASSIVE)) {
2933 /*
2934 * We were waiting for the initiator to send their
2935 * first data transport message, and that has happened.
2936 * Schedule a task to establish this session.
2937 */
2938 wg_schedule_peer_task(wgp, WGP_TASK_ESTABLISH_SESSION);
2939 } else {
2940 if (__predict_false(wg_need_to_send_init_message(wgs))) {
2941 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE);
2942 }
2943 /*
2944 * [W] 6.5 Passive Keepalive
2945 * "If a peer has received a validly-authenticated transport
2946 * data message (section 5.4.6), but does not have any packets
2947 * itself to send back for KEEPALIVE-TIMEOUT seconds, it sends
2948 * a keepalive message."
2949 */
2950 const uint32_t now = time_uptime32;
2951 const uint32_t time_last_data_sent =
2952 atomic_load_relaxed(&wgs->wgs_time_last_data_sent);
2953 WG_DLOG("time_uptime32=%"PRIu32
2954 " wgs_time_last_data_sent=%"PRIu32"\n",
2955 now, time_last_data_sent);
2956 if ((now - time_last_data_sent) >= wg_keepalive_timeout) {
2957 WG_TRACE("Schedule sending keepalive message");
2958 /*
2959 * We can't send a keepalive message here to avoid
2960 * a deadlock; we already hold the solock of a socket
2961 * that is used to send the message.
2962 */
2963 wg_schedule_peer_task(wgp,
2964 WGP_TASK_SEND_KEEPALIVE_MESSAGE);
2965 }
2966 }
2967 out:
2968 wg_put_session(wgs, &psref);
2969 m_freem(m);
2970 if (free_encrypted_buf)
2971 kmem_intr_free(encrypted_buf, encrypted_len);
2972 }
2973
2974 static void __noinline
2975 wg_handle_msg_cookie(struct wg_softc *wg, const struct wg_msg_cookie *wgmc)
2976 {
2977 struct wg_session *wgs;
2978 struct wg_peer *wgp;
2979 struct psref psref;
2980 int error;
2981 uint8_t key[WG_HASH_LEN];
2982 uint8_t cookie[WG_COOKIE_LEN];
2983
2984 WG_TRACE("cookie msg received");
2985
2986 /* Find the putative session. */
2987 wgs = wg_lookup_session_by_index(wg, wgmc->wgmc_receiver, &psref);
2988 if (wgs == NULL) {
2989 WG_TRACE("No session found");
2990 return;
2991 }
2992
2993 /* Lock the peer so we can update the cookie state. */
2994 wgp = wgs->wgs_peer;
2995 mutex_enter(wgp->wgp_lock);
2996
2997 if (!wgp->wgp_last_sent_mac1_valid) {
2998 WG_TRACE("No valid mac1 sent (or expired)");
2999 goto out;
3000 }
3001
3002 /*
3003 * wgp_last_sent_mac1_valid is only set to true when we are
3004 * transitioning to INIT_ACTIVE or INIT_PASSIVE, and always
3005 * cleared on transition out of them.
3006 */
3007 KASSERTMSG((wgs->wgs_state == WGS_STATE_INIT_ACTIVE ||
3008 wgs->wgs_state == WGS_STATE_INIT_PASSIVE),
3009 "state=%d", wgs->wgs_state);
3010
3011 /* Decrypt the cookie and store it for later handshake retry. */
3012 wg_algo_mac_cookie(key, sizeof(key), wgp->wgp_pubkey,
3013 sizeof(wgp->wgp_pubkey));
3014 error = wg_algo_xaead_dec(cookie, sizeof(cookie), key,
3015 wgmc->wgmc_cookie, sizeof(wgmc->wgmc_cookie),
3016 wgp->wgp_last_sent_mac1, sizeof(wgp->wgp_last_sent_mac1),
3017 wgmc->wgmc_salt);
3018 if (error != 0) {
3019 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
3020 "%s: peer %s: wg_algo_aead_dec for cookie failed: "
3021 "error=%d\n", if_name(&wg->wg_if), wgp->wgp_name, error);
3022 goto out;
3023 }
3024 /*
3025 * [W] 6.6: Interaction with Cookie Reply System
3026 * "it should simply store the decrypted cookie value from the cookie
3027 * reply message, and wait for the expiration of the REKEY-TIMEOUT
3028 * timer for retrying a handshake initiation message."
3029 */
3030 wgp->wgp_latest_cookie_time = time_uptime;
3031 memcpy(wgp->wgp_latest_cookie, cookie, sizeof(wgp->wgp_latest_cookie));
3032 out:
3033 mutex_exit(wgp->wgp_lock);
3034 wg_put_session(wgs, &psref);
3035 }
3036
3037 static struct mbuf *
3038 wg_validate_msg_header(struct wg_softc *wg, struct mbuf *m)
3039 {
3040 struct wg_msg wgm;
3041 size_t mbuflen;
3042 size_t msglen;
3043
3044 /*
3045 * Get the mbuf chain length. It is already guaranteed, by
3046 * wg_overudp_cb, to be large enough for a struct wg_msg.
3047 */
3048 mbuflen = m_length(m);
3049 KASSERT(mbuflen >= sizeof(struct wg_msg));
3050
3051 /*
3052 * Copy the message header (32-bit message type) out -- we'll
3053 * worry about contiguity and alignment later.
3054 */
3055 m_copydata(m, 0, sizeof(wgm), &wgm);
3056 switch (le32toh(wgm.wgm_type)) {
3057 case WG_MSG_TYPE_INIT:
3058 msglen = sizeof(struct wg_msg_init);
3059 break;
3060 case WG_MSG_TYPE_RESP:
3061 msglen = sizeof(struct wg_msg_resp);
3062 break;
3063 case WG_MSG_TYPE_COOKIE:
3064 msglen = sizeof(struct wg_msg_cookie);
3065 break;
3066 case WG_MSG_TYPE_DATA:
3067 msglen = sizeof(struct wg_msg_data);
3068 break;
3069 default:
3070 WG_LOG_RATECHECK(&wg->wg_ppsratecheck, LOG_DEBUG,
3071 "%s: Unexpected msg type: %u\n", if_name(&wg->wg_if),
3072 le32toh(wgm.wgm_type));
3073 goto error;
3074 }
3075
3076 /* Verify the mbuf chain is long enough for this type of message. */
3077 if (__predict_false(mbuflen < msglen)) {
3078 WG_DLOG("Invalid msg size: mbuflen=%zu type=%u\n", mbuflen,
3079 le32toh(wgm.wgm_type));
3080 goto error;
3081 }
3082
3083 /* Make the message header contiguous if necessary. */
3084 if (__predict_false(m->m_len < msglen)) {
3085 m = m_pullup(m, msglen);
3086 if (m == NULL)
3087 return NULL;
3088 }
3089
3090 return m;
3091
3092 error:
3093 m_freem(m);
3094 return NULL;
3095 }
3096
3097 static void
3098 wg_handle_packet(struct wg_softc *wg, struct mbuf *m,
3099 const struct sockaddr *src)
3100 {
3101 struct wg_msg *wgm;
3102
3103 KASSERT(curlwp->l_pflag & LP_BOUND);
3104
3105 m = wg_validate_msg_header(wg, m);
3106 if (__predict_false(m == NULL))
3107 return;
3108
3109 KASSERT(m->m_len >= sizeof(struct wg_msg));
3110 wgm = mtod(m, struct wg_msg *);
3111 switch (le32toh(wgm->wgm_type)) {
3112 case WG_MSG_TYPE_INIT:
3113 wg_handle_msg_init(wg, (struct wg_msg_init *)wgm, src);
3114 break;
3115 case WG_MSG_TYPE_RESP:
3116 wg_handle_msg_resp(wg, (struct wg_msg_resp *)wgm, src);
3117 break;
3118 case WG_MSG_TYPE_COOKIE:
3119 wg_handle_msg_cookie(wg, (struct wg_msg_cookie *)wgm);
3120 break;
3121 case WG_MSG_TYPE_DATA:
3122 wg_handle_msg_data(wg, m, src);
3123 /* wg_handle_msg_data frees m for us */
3124 return;
3125 default:
3126 panic("invalid message type: %d", le32toh(wgm->wgm_type));
3127 }
3128
3129 m_freem(m);
3130 }
3131
3132 static void
3133 wg_receive_packets(struct wg_softc *wg, const int af)
3134 {
3135
3136 for (;;) {
3137 int error, flags;
3138 struct socket *so;
3139 struct mbuf *m = NULL;
3140 struct uio dummy_uio;
3141 struct mbuf *paddr = NULL;
3142 struct sockaddr *src;
3143
3144 so = wg_get_so_by_af(wg, af);
3145 flags = MSG_DONTWAIT;
3146 dummy_uio.uio_resid = 1000000000;
3147
3148 error = so->so_receive(so, &paddr, &dummy_uio, &m, NULL,
3149 &flags);
3150 if (error || m == NULL) {
3151 //if (error == EWOULDBLOCK)
3152 return;
3153 }
3154
3155 KASSERT(paddr != NULL);
3156 KASSERT(paddr->m_len >= sizeof(struct sockaddr));
3157 src = mtod(paddr, struct sockaddr *);
3158
3159 wg_handle_packet(wg, m, src);
3160 }
3161 }
3162
3163 static void
3164 wg_get_peer(struct wg_peer *wgp, struct psref *psref)
3165 {
3166
3167 psref_acquire(psref, &wgp->wgp_psref, wg_psref_class);
3168 }
3169
3170 static void
3171 wg_put_peer(struct wg_peer *wgp, struct psref *psref)
3172 {
3173
3174 psref_release(psref, &wgp->wgp_psref, wg_psref_class);
3175 }
3176
3177 static void
3178 wg_task_send_init_message(struct wg_softc *wg, struct wg_peer *wgp)
3179 {
3180 struct wg_session *wgs;
3181
3182 WG_TRACE("WGP_TASK_SEND_INIT_MESSAGE");
3183
3184 KASSERT(mutex_owned(wgp->wgp_lock));
3185
3186 if (!atomic_load_acquire(&wgp->wgp_endpoint_available)) {
3187 WGLOG(LOG_DEBUG, "%s: No endpoint available\n",
3188 if_name(&wg->wg_if));
3189 /* XXX should do something? */
3190 return;
3191 }
3192
3193 /*
3194 * If we already have an established session, there's no need
3195 * to initiate a new one -- unless the rekey-after-time or
3196 * rekey-after-messages limits have passed.
3197 */
3198 wgs = wgp->wgp_session_stable;
3199 if (wgs->wgs_state == WGS_STATE_ESTABLISHED &&
3200 !atomic_swap_uint(&wgp->wgp_force_rekey, 0))
3201 return;
3202
3203 /*
3204 * Ensure we're initiating a new session. If the unstable
3205 * session is already INIT_ACTIVE or INIT_PASSIVE, this does
3206 * nothing.
3207 */
3208 wg_send_handshake_msg_init(wg, wgp);
3209 }
3210
3211 static void
3212 wg_task_retry_handshake(struct wg_softc *wg, struct wg_peer *wgp)
3213 {
3214 struct wg_session *wgs;
3215
3216 WG_TRACE("WGP_TASK_RETRY_HANDSHAKE");
3217
3218 KASSERT(mutex_owned(wgp->wgp_lock));
3219 KASSERT(wgp->wgp_handshake_start_time != 0);
3220
3221 wgs = wgp->wgp_session_unstable;
3222 if (wgs->wgs_state != WGS_STATE_INIT_ACTIVE)
3223 return;
3224
3225 /*
3226 * XXX no real need to assign a new index here, but we do need
3227 * to transition to UNKNOWN temporarily
3228 */
3229 wg_put_session_index(wg, wgs);
3230
3231 /* [W] 6.4 Handshake Initiation Retransmission */
3232 if ((time_uptime - wgp->wgp_handshake_start_time) >
3233 wg_rekey_attempt_time) {
3234 /* Give up handshaking */
3235 wgp->wgp_handshake_start_time = 0;
3236 WG_TRACE("give up");
3237
3238 /*
3239 * If a new data packet comes, handshaking will be retried
3240 * and a new session would be established at that time,
3241 * however we don't want to send pending packets then.
3242 */
3243 wg_purge_pending_packets(wgp);
3244 return;
3245 }
3246
3247 wg_task_send_init_message(wg, wgp);
3248 }
3249
3250 static void
3251 wg_task_establish_session(struct wg_softc *wg, struct wg_peer *wgp)
3252 {
3253 struct wg_session *wgs, *wgs_prev;
3254 struct mbuf *m;
3255
3256 KASSERT(mutex_owned(wgp->wgp_lock));
3257
3258 wgs = wgp->wgp_session_unstable;
3259 if (wgs->wgs_state != WGS_STATE_INIT_PASSIVE)
3260 /* XXX Can this happen? */
3261 return;
3262
3263 wgs->wgs_time_last_data_sent = 0;
3264 wgs->wgs_is_initiator = false;
3265
3266 /*
3267 * Session was already ready to receive data. Transition from
3268 * INIT_PASSIVE to ESTABLISHED just so we can swap the
3269 * sessions.
3270 *
3271 * atomic_store_relaxed because this doesn't affect the data rx
3272 * path, wg_handle_msg_data -- changing from INIT_PASSIVE to
3273 * ESTABLISHED makes no difference to the data rx path, and the
3274 * transition to INIT_PASSIVE with store-release already
3275 * published the state needed by the data rx path.
3276 */
3277 WG_DLOG("session[L=%"PRIx32" R=%"PRIx32"] -> WGS_STATE_ESTABLISHED\n",
3278 wgs->wgs_local_index, wgs->wgs_remote_index);
3279 atomic_store_relaxed(&wgs->wgs_state, WGS_STATE_ESTABLISHED);
3280 WG_TRACE("WGS_STATE_ESTABLISHED");
3281
3282 /*
3283 * Session is ready to send data too now that we have received
3284 * the peer initiator's first data packet.
3285 *
3286 * Swap the sessions to publish the new one as the stable
3287 * session for the data tx path, wg_output.
3288 */
3289 wg_swap_sessions(wgp);
3290 KASSERT(wgs == wgp->wgp_session_stable);
3291 wgs_prev = wgp->wgp_session_unstable;
3292 getnanotime(&wgp->wgp_last_handshake_time);
3293 wgp->wgp_handshake_start_time = 0;
3294 wgp->wgp_last_sent_mac1_valid = false;
3295 wgp->wgp_last_sent_cookie_valid = false;
3296
3297 /* If we had a data packet queued up, send it. */
3298 if ((m = atomic_swap_ptr(&wgp->wgp_pending, NULL)) != NULL) {
3299 kpreempt_disable();
3300 const uint32_t h = curcpu()->ci_index; // pktq_rps_hash(m)
3301 M_SETCTX(m, wgp);
3302 if (__predict_false(!pktq_enqueue(wg_pktq, m, h))) {
3303 WGLOG(LOG_ERR, "%s: pktq full, dropping\n",
3304 if_name(&wg->wg_if));
3305 m_freem(m);
3306 }
3307 kpreempt_enable();
3308 }
3309
3310 if (wgs_prev->wgs_state == WGS_STATE_ESTABLISHED) {
3311 /*
3312 * Transition ESTABLISHED->DESTROYING. The session
3313 * will remain usable for the data rx path to process
3314 * packets still in flight to us, but we won't use it
3315 * for data tx.
3316 */
3317 WG_DLOG("session[L=%"PRIx32" R=%"PRIx32"]"
3318 " -> WGS_STATE_DESTROYING\n",
3319 wgs_prev->wgs_local_index, wgs_prev->wgs_remote_index);
3320 atomic_store_relaxed(&wgs_prev->wgs_state,
3321 WGS_STATE_DESTROYING);
3322 } else {
3323 KASSERTMSG(wgs_prev->wgs_state == WGS_STATE_UNKNOWN,
3324 "state=%d", wgs_prev->wgs_state);
3325 WG_DLOG("session[L=%"PRIx32" R=%"PRIx32"]"
3326 " -> WGS_STATE_UNKNOWN\n",
3327 wgs_prev->wgs_local_index, wgs_prev->wgs_remote_index);
3328 wgs_prev->wgs_local_index = 0; /* paranoia */
3329 wgs_prev->wgs_remote_index = 0; /* paranoia */
3330 wg_clear_states(wgs_prev); /* paranoia */
3331 wgs_prev->wgs_state = WGS_STATE_UNKNOWN;
3332 }
3333 }
3334
3335 static void
3336 wg_task_endpoint_changed(struct wg_softc *wg, struct wg_peer *wgp)
3337 {
3338
3339 WG_TRACE("WGP_TASK_ENDPOINT_CHANGED");
3340
3341 KASSERT(mutex_owned(wgp->wgp_lock));
3342
3343 if (atomic_load_relaxed(&wgp->wgp_endpoint_changing)) {
3344 pserialize_perform(wgp->wgp_psz);
3345 mutex_exit(wgp->wgp_lock);
3346 psref_target_destroy(&wgp->wgp_endpoint0->wgsa_psref,
3347 wg_psref_class);
3348 psref_target_init(&wgp->wgp_endpoint0->wgsa_psref,
3349 wg_psref_class);
3350 mutex_enter(wgp->wgp_lock);
3351 atomic_store_release(&wgp->wgp_endpoint_changing, 0);
3352 }
3353 }
3354
3355 static void
3356 wg_task_send_keepalive_message(struct wg_softc *wg, struct wg_peer *wgp)
3357 {
3358 struct wg_session *wgs;
3359
3360 WG_TRACE("WGP_TASK_SEND_KEEPALIVE_MESSAGE");
3361
3362 KASSERT(mutex_owned(wgp->wgp_lock));
3363
3364 wgs = wgp->wgp_session_stable;
3365 if (wgs->wgs_state != WGS_STATE_ESTABLISHED)
3366 return;
3367
3368 wg_send_keepalive_msg(wgp, wgs);
3369 }
3370
3371 static void
3372 wg_task_destroy_prev_session(struct wg_softc *wg, struct wg_peer *wgp)
3373 {
3374 struct wg_session *wgs;
3375 uint32_t age;
3376
3377 WG_TRACE("WGP_TASK_DESTROY_PREV_SESSION");
3378
3379 KASSERT(mutex_owned(wgp->wgp_lock));
3380
3381 /*
3382 * If theres's any previous unstable session, i.e., one that
3383 * was ESTABLISHED and is now DESTROYING, older than
3384 * reject-after-time, destroy it. Upcoming sessions are still
3385 * in INIT_ACTIVE or INIT_PASSIVE -- we don't touch those here.
3386 *
3387 * No atomic for access to wgs_time_established because it is
3388 * only updated under wgp_lock.
3389 */
3390 wgs = wgp->wgp_session_unstable;
3391 KASSERT(wgs->wgs_state != WGS_STATE_ESTABLISHED);
3392 if (wgs->wgs_state == WGS_STATE_DESTROYING &&
3393 ((age = (time_uptime32 - wgs->wgs_time_established)) >=
3394 wg_reject_after_time)) {
3395 WG_DLOG("destroying past session %"PRIu32" sec old\n", age);
3396 wg_put_session_index(wg, wgs);
3397 KASSERTMSG(wgs->wgs_state == WGS_STATE_UNKNOWN, "state=%d",
3398 wgs->wgs_state);
3399 }
3400
3401 /*
3402 * If theres's any ESTABLISHED stable session older than
3403 * reject-after-time, destroy it. (The stable session can also
3404 * be in UNKNOWN state -- nothing to do in that case)
3405 */
3406 wgs = wgp->wgp_session_stable;
3407 KASSERT(wgs->wgs_state != WGS_STATE_INIT_ACTIVE);
3408 KASSERT(wgs->wgs_state != WGS_STATE_INIT_PASSIVE);
3409 KASSERT(wgs->wgs_state != WGS_STATE_DESTROYING);
3410 if (wgs->wgs_state == WGS_STATE_ESTABLISHED &&
3411 ((age = (time_uptime32 - wgs->wgs_time_established)) >=
3412 wg_reject_after_time)) {
3413 WG_DLOG("destroying current session %"PRIu32" sec old\n", age);
3414 atomic_store_relaxed(&wgs->wgs_state, WGS_STATE_DESTROYING);
3415 wg_put_session_index(wg, wgs);
3416 KASSERTMSG(wgs->wgs_state == WGS_STATE_UNKNOWN, "state=%d",
3417 wgs->wgs_state);
3418 }
3419
3420 /*
3421 * If there's no sessions left, no need to have the timer run
3422 * until the next time around -- halt it.
3423 *
3424 * It is only ever scheduled with wgp_lock held or in the
3425 * callout itself, and callout_halt prevents rescheudling
3426 * itself, so this never races with rescheduling.
3427 */
3428 if (wgp->wgp_session_unstable->wgs_state == WGS_STATE_UNKNOWN &&
3429 wgp->wgp_session_stable->wgs_state == WGS_STATE_UNKNOWN)
3430 callout_halt(&wgp->wgp_session_dtor_timer, NULL);
3431 }
3432
3433 static void
3434 wg_peer_work(struct work *wk, void *cookie)
3435 {
3436 struct wg_peer *wgp = container_of(wk, struct wg_peer, wgp_work);
3437 struct wg_softc *wg = wgp->wgp_sc;
3438 unsigned int tasks;
3439
3440 mutex_enter(wgp->wgp_intr_lock);
3441 while ((tasks = wgp->wgp_tasks) != 0) {
3442 wgp->wgp_tasks = 0;
3443 mutex_exit(wgp->wgp_intr_lock);
3444
3445 mutex_enter(wgp->wgp_lock);
3446 if (ISSET(tasks, WGP_TASK_SEND_INIT_MESSAGE))
3447 wg_task_send_init_message(wg, wgp);
3448 if (ISSET(tasks, WGP_TASK_RETRY_HANDSHAKE))
3449 wg_task_retry_handshake(wg, wgp);
3450 if (ISSET(tasks, WGP_TASK_ESTABLISH_SESSION))
3451 wg_task_establish_session(wg, wgp);
3452 if (ISSET(tasks, WGP_TASK_ENDPOINT_CHANGED))
3453 wg_task_endpoint_changed(wg, wgp);
3454 if (ISSET(tasks, WGP_TASK_SEND_KEEPALIVE_MESSAGE))
3455 wg_task_send_keepalive_message(wg, wgp);
3456 if (ISSET(tasks, WGP_TASK_DESTROY_PREV_SESSION))
3457 wg_task_destroy_prev_session(wg, wgp);
3458 mutex_exit(wgp->wgp_lock);
3459
3460 mutex_enter(wgp->wgp_intr_lock);
3461 }
3462 mutex_exit(wgp->wgp_intr_lock);
3463 }
3464
3465 static void
3466 wg_job(struct threadpool_job *job)
3467 {
3468 struct wg_softc *wg = container_of(job, struct wg_softc, wg_job);
3469 int bound, upcalls;
3470
3471 mutex_enter(wg->wg_intr_lock);
3472 while ((upcalls = wg->wg_upcalls) != 0) {
3473 wg->wg_upcalls = 0;
3474 mutex_exit(wg->wg_intr_lock);
3475 bound = curlwp_bind();
3476 if (ISSET(upcalls, WG_UPCALL_INET))
3477 wg_receive_packets(wg, AF_INET);
3478 if (ISSET(upcalls, WG_UPCALL_INET6))
3479 wg_receive_packets(wg, AF_INET6);
3480 curlwp_bindx(bound);
3481 mutex_enter(wg->wg_intr_lock);
3482 }
3483 threadpool_job_done(job);
3484 mutex_exit(wg->wg_intr_lock);
3485 }
3486
3487 static int
3488 wg_bind_port(struct wg_softc *wg, const uint16_t port)
3489 {
3490 int error;
3491 uint16_t old_port = wg->wg_listen_port;
3492
3493 if (port != 0 && old_port == port)
3494 return 0;
3495
3496 struct sockaddr_in _sin, *sin = &_sin;
3497 sin->sin_len = sizeof(*sin);
3498 sin->sin_family = AF_INET;
3499 sin->sin_addr.s_addr = INADDR_ANY;
3500 sin->sin_port = htons(port);
3501
3502 error = sobind(wg->wg_so4, sintosa(sin), curlwp);
3503 if (error != 0)
3504 return error;
3505
3506 #ifdef INET6
3507 struct sockaddr_in6 _sin6, *sin6 = &_sin6;
3508 sin6->sin6_len = sizeof(*sin6);
3509 sin6->sin6_family = AF_INET6;
3510 sin6->sin6_addr = in6addr_any;
3511 sin6->sin6_port = htons(port);
3512
3513 error = sobind(wg->wg_so6, sin6tosa(sin6), curlwp);
3514 if (error != 0)
3515 return error;
3516 #endif
3517
3518 wg->wg_listen_port = port;
3519
3520 return 0;
3521 }
3522
3523 static void
3524 wg_so_upcall(struct socket *so, void *cookie, int events, int waitflag)
3525 {
3526 struct wg_softc *wg = cookie;
3527 int reason;
3528
3529 reason = (so->so_proto->pr_domain->dom_family == AF_INET) ?
3530 WG_UPCALL_INET :
3531 WG_UPCALL_INET6;
3532
3533 mutex_enter(wg->wg_intr_lock);
3534 wg->wg_upcalls |= reason;
3535 threadpool_schedule_job(wg->wg_threadpool, &wg->wg_job);
3536 mutex_exit(wg->wg_intr_lock);
3537 }
3538
3539 static int
3540 wg_overudp_cb(struct mbuf **mp, int offset, struct socket *so,
3541 struct sockaddr *src, void *arg)
3542 {
3543 struct wg_softc *wg = arg;
3544 struct wg_msg wgm;
3545 struct mbuf *m = *mp;
3546
3547 WG_TRACE("enter");
3548
3549 /* Verify the mbuf chain is long enough to have a wg msg header. */
3550 KASSERT(offset <= m_length(m));
3551 if (__predict_false(m_length(m) - offset < sizeof(struct wg_msg))) {
3552 /* drop on the floor */
3553 m_freem(m);
3554 return -1;
3555 }
3556
3557 /*
3558 * Copy the message header (32-bit message type) out -- we'll
3559 * worry about contiguity and alignment later.
3560 */
3561 m_copydata(m, offset, sizeof(struct wg_msg), &wgm);
3562 WG_DLOG("type=%d\n", le32toh(wgm.wgm_type));
3563
3564 /*
3565 * Handle DATA packets promptly as they arrive, if they are in
3566 * an active session. Other packets may require expensive
3567 * public-key crypto and are not as sensitive to latency, so
3568 * defer them to the worker thread.
3569 */
3570 switch (le32toh(wgm.wgm_type)) {
3571 case WG_MSG_TYPE_DATA:
3572 /* handle immediately */
3573 m_adj(m, offset);
3574 if (__predict_false(m->m_len < sizeof(struct wg_msg_data))) {
3575 m = m_pullup(m, sizeof(struct wg_msg_data));
3576 if (m == NULL)
3577 return -1;
3578 }
3579 wg_handle_msg_data(wg, m, src);
3580 *mp = NULL;
3581 return 1;
3582 case WG_MSG_TYPE_INIT:
3583 case WG_MSG_TYPE_RESP:
3584 case WG_MSG_TYPE_COOKIE:
3585 /* pass through to so_receive in wg_receive_packets */
3586 return 0;
3587 default:
3588 /* drop on the floor */
3589 m_freem(m);
3590 return -1;
3591 }
3592 }
3593
3594 static int
3595 wg_socreate(struct wg_softc *wg, int af, struct socket **sop)
3596 {
3597 int error;
3598 struct socket *so;
3599
3600 error = socreate(af, &so, SOCK_DGRAM, 0, curlwp, NULL);
3601 if (error != 0)
3602 return error;
3603
3604 solock(so);
3605 so->so_upcallarg = wg;
3606 so->so_upcall = wg_so_upcall;
3607 so->so_rcv.sb_flags |= SB_UPCALL;
3608 inpcb_register_overudp_cb(sotoinpcb(so), wg_overudp_cb, wg);
3609 sounlock(so);
3610
3611 *sop = so;
3612
3613 return 0;
3614 }
3615
3616 static bool
3617 wg_session_hit_limits(struct wg_session *wgs)
3618 {
3619 uint32_t time_established =
3620 atomic_load_relaxed(&wgs->wgs_time_established);
3621
3622 /*
3623 * [W] 6.2: Transport Message Limits
3624 * "After REJECT-AFTER-MESSAGES transport data messages or after the
3625 * current secure session is REJECT-AFTER-TIME seconds old, whichever
3626 * comes first, WireGuard will refuse to send or receive any more
3627 * transport data messages using the current secure session, ..."
3628 */
3629 KASSERT(time_established != 0 || time_uptime > UINT32_MAX);
3630 if ((time_uptime32 - time_established) > wg_reject_after_time) {
3631 WG_DLOG("The session hits REJECT_AFTER_TIME\n");
3632 return true;
3633 } else if (wg_session_get_send_counter(wgs) >
3634 wg_reject_after_messages) {
3635 WG_DLOG("The session hits REJECT_AFTER_MESSAGES\n");
3636 return true;
3637 }
3638
3639 return false;
3640 }
3641
3642 static void
3643 wgintr(void *cookie)
3644 {
3645 struct wg_peer *wgp;
3646 struct wg_session *wgs;
3647 struct mbuf *m;
3648 struct psref psref;
3649
3650 while ((m = pktq_dequeue(wg_pktq)) != NULL) {
3651 wgp = M_GETCTX(m, struct wg_peer *);
3652 if ((wgs = wg_get_stable_session(wgp, &psref)) == NULL) {
3653 WG_TRACE("no stable session");
3654 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE);
3655 goto next0;
3656 }
3657 if (__predict_false(wg_session_hit_limits(wgs))) {
3658 WG_TRACE("stable session hit limits");
3659 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE);
3660 goto next1;
3661 }
3662 wg_send_data_msg(wgp, wgs, m);
3663 m = NULL; /* consumed */
3664 next1: wg_put_session(wgs, &psref);
3665 next0: m_freem(m);
3666 /* XXX Yield to avoid userland starvation? */
3667 }
3668 }
3669
3670 static void
3671 wg_purge_pending_packets(struct wg_peer *wgp)
3672 {
3673 struct mbuf *m;
3674
3675 m = atomic_swap_ptr(&wgp->wgp_pending, NULL);
3676 m_freem(m);
3677 #ifdef ALTQ
3678 wg_start(&wgp->wgp_sc->wg_if);
3679 #endif
3680 pktq_barrier(wg_pktq);
3681 }
3682
3683 static void
3684 wg_handshake_timeout_timer(void *arg)
3685 {
3686 struct wg_peer *wgp = arg;
3687
3688 WG_TRACE("enter");
3689
3690 wg_schedule_peer_task(wgp, WGP_TASK_RETRY_HANDSHAKE);
3691 }
3692
3693 static struct wg_peer *
3694 wg_alloc_peer(struct wg_softc *wg)
3695 {
3696 struct wg_peer *wgp;
3697
3698 wgp = kmem_zalloc(sizeof(*wgp), KM_SLEEP);
3699
3700 wgp->wgp_sc = wg;
3701 callout_init(&wgp->wgp_handshake_timeout_timer, CALLOUT_MPSAFE);
3702 callout_setfunc(&wgp->wgp_handshake_timeout_timer,
3703 wg_handshake_timeout_timer, wgp);
3704 callout_init(&wgp->wgp_session_dtor_timer, CALLOUT_MPSAFE);
3705 callout_setfunc(&wgp->wgp_session_dtor_timer,
3706 wg_session_dtor_timer, wgp);
3707 PSLIST_ENTRY_INIT(wgp, wgp_peerlist_entry);
3708 wgp->wgp_endpoint_changing = false;
3709 wgp->wgp_endpoint_available = false;
3710 wgp->wgp_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
3711 wgp->wgp_intr_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
3712 wgp->wgp_psz = pserialize_create();
3713 psref_target_init(&wgp->wgp_psref, wg_psref_class);
3714
3715 wgp->wgp_endpoint = kmem_zalloc(sizeof(*wgp->wgp_endpoint), KM_SLEEP);
3716 wgp->wgp_endpoint0 = kmem_zalloc(sizeof(*wgp->wgp_endpoint0), KM_SLEEP);
3717 psref_target_init(&wgp->wgp_endpoint->wgsa_psref, wg_psref_class);
3718 psref_target_init(&wgp->wgp_endpoint0->wgsa_psref, wg_psref_class);
3719
3720 struct wg_session *wgs;
3721 wgp->wgp_session_stable =
3722 kmem_zalloc(sizeof(*wgp->wgp_session_stable), KM_SLEEP);
3723 wgp->wgp_session_unstable =
3724 kmem_zalloc(sizeof(*wgp->wgp_session_unstable), KM_SLEEP);
3725 wgs = wgp->wgp_session_stable;
3726 wgs->wgs_peer = wgp;
3727 wgs->wgs_state = WGS_STATE_UNKNOWN;
3728 psref_target_init(&wgs->wgs_psref, wg_psref_class);
3729 #ifndef __HAVE_ATOMIC64_LOADSTORE
3730 mutex_init(&wgs->wgs_send_counter_lock, MUTEX_DEFAULT, IPL_SOFTNET);
3731 #endif
3732 wgs->wgs_recvwin = kmem_zalloc(sizeof(*wgs->wgs_recvwin), KM_SLEEP);
3733 mutex_init(&wgs->wgs_recvwin->lock, MUTEX_DEFAULT, IPL_SOFTNET);
3734
3735 wgs = wgp->wgp_session_unstable;
3736 wgs->wgs_peer = wgp;
3737 wgs->wgs_state = WGS_STATE_UNKNOWN;
3738 psref_target_init(&wgs->wgs_psref, wg_psref_class);
3739 #ifndef __HAVE_ATOMIC64_LOADSTORE
3740 mutex_init(&wgs->wgs_send_counter_lock, MUTEX_DEFAULT, IPL_SOFTNET);
3741 #endif
3742 wgs->wgs_recvwin = kmem_zalloc(sizeof(*wgs->wgs_recvwin), KM_SLEEP);
3743 mutex_init(&wgs->wgs_recvwin->lock, MUTEX_DEFAULT, IPL_SOFTNET);
3744
3745 return wgp;
3746 }
3747
3748 static void
3749 wg_destroy_peer(struct wg_peer *wgp)
3750 {
3751 struct wg_session *wgs;
3752 struct wg_softc *wg = wgp->wgp_sc;
3753
3754 /* Prevent new packets from this peer on any source address. */
3755 rw_enter(wg->wg_rwlock, RW_WRITER);
3756 for (int i = 0; i < wgp->wgp_n_allowedips; i++) {
3757 struct wg_allowedip *wga = &wgp->wgp_allowedips[i];
3758 struct radix_node_head *rnh = wg_rnh(wg, wga->wga_family);
3759 struct radix_node *rn;
3760
3761 KASSERT(rnh != NULL);
3762 rn = rnh->rnh_deladdr(&wga->wga_sa_addr,
3763 &wga->wga_sa_mask, rnh);
3764 if (rn == NULL) {
3765 char addrstr[128];
3766 sockaddr_format(&wga->wga_sa_addr, addrstr,
3767 sizeof(addrstr));
3768 WGLOG(LOG_WARNING, "%s: Couldn't delete %s",
3769 if_name(&wg->wg_if), addrstr);
3770 }
3771 }
3772 rw_exit(wg->wg_rwlock);
3773
3774 /* Purge pending packets. */
3775 wg_purge_pending_packets(wgp);
3776
3777 /* Halt all packet processing and timeouts. */
3778 callout_halt(&wgp->wgp_handshake_timeout_timer, NULL);
3779 callout_halt(&wgp->wgp_session_dtor_timer, NULL);
3780
3781 /* Wait for any queued work to complete. */
3782 workqueue_wait(wg_wq, &wgp->wgp_work);
3783
3784 wgs = wgp->wgp_session_unstable;
3785 if (wgs->wgs_state != WGS_STATE_UNKNOWN) {
3786 mutex_enter(wgp->wgp_lock);
3787 wg_destroy_session(wg, wgs);
3788 mutex_exit(wgp->wgp_lock);
3789 }
3790 mutex_destroy(&wgs->wgs_recvwin->lock);
3791 kmem_free(wgs->wgs_recvwin, sizeof(*wgs->wgs_recvwin));
3792 #ifndef __HAVE_ATOMIC64_LOADSTORE
3793 mutex_destroy(&wgs->wgs_send_counter_lock);
3794 #endif
3795 kmem_free(wgs, sizeof(*wgs));
3796
3797 wgs = wgp->wgp_session_stable;
3798 if (wgs->wgs_state != WGS_STATE_UNKNOWN) {
3799 mutex_enter(wgp->wgp_lock);
3800 wg_destroy_session(wg, wgs);
3801 mutex_exit(wgp->wgp_lock);
3802 }
3803 mutex_destroy(&wgs->wgs_recvwin->lock);
3804 kmem_free(wgs->wgs_recvwin, sizeof(*wgs->wgs_recvwin));
3805 #ifndef __HAVE_ATOMIC64_LOADSTORE
3806 mutex_destroy(&wgs->wgs_send_counter_lock);
3807 #endif
3808 kmem_free(wgs, sizeof(*wgs));
3809
3810 psref_target_destroy(&wgp->wgp_endpoint->wgsa_psref, wg_psref_class);
3811 psref_target_destroy(&wgp->wgp_endpoint0->wgsa_psref, wg_psref_class);
3812 kmem_free(wgp->wgp_endpoint, sizeof(*wgp->wgp_endpoint));
3813 kmem_free(wgp->wgp_endpoint0, sizeof(*wgp->wgp_endpoint0));
3814
3815 pserialize_destroy(wgp->wgp_psz);
3816 mutex_obj_free(wgp->wgp_intr_lock);
3817 mutex_obj_free(wgp->wgp_lock);
3818
3819 kmem_free(wgp, sizeof(*wgp));
3820 }
3821
3822 static void
3823 wg_destroy_all_peers(struct wg_softc *wg)
3824 {
3825 struct wg_peer *wgp, *wgp0 __diagused;
3826 void *garbage_byname, *garbage_bypubkey;
3827
3828 restart:
3829 garbage_byname = garbage_bypubkey = NULL;
3830 mutex_enter(wg->wg_lock);
3831 WG_PEER_WRITER_FOREACH(wgp, wg) {
3832 if (wgp->wgp_name[0]) {
3833 wgp0 = thmap_del(wg->wg_peers_byname, wgp->wgp_name,
3834 strlen(wgp->wgp_name));
3835 KASSERT(wgp0 == wgp);
3836 garbage_byname = thmap_stage_gc(wg->wg_peers_byname);
3837 }
3838 wgp0 = thmap_del(wg->wg_peers_bypubkey, wgp->wgp_pubkey,
3839 sizeof(wgp->wgp_pubkey));
3840 KASSERT(wgp0 == wgp);
3841 garbage_bypubkey = thmap_stage_gc(wg->wg_peers_bypubkey);
3842 WG_PEER_WRITER_REMOVE(wgp);
3843 wg->wg_npeers--;
3844 mutex_enter(wgp->wgp_lock);
3845 pserialize_perform(wgp->wgp_psz);
3846 mutex_exit(wgp->wgp_lock);
3847 PSLIST_ENTRY_DESTROY(wgp, wgp_peerlist_entry);
3848 break;
3849 }
3850 mutex_exit(wg->wg_lock);
3851
3852 if (wgp == NULL)
3853 return;
3854
3855 psref_target_destroy(&wgp->wgp_psref, wg_psref_class);
3856
3857 wg_destroy_peer(wgp);
3858 thmap_gc(wg->wg_peers_byname, garbage_byname);
3859 thmap_gc(wg->wg_peers_bypubkey, garbage_bypubkey);
3860
3861 goto restart;
3862 }
3863
3864 static int
3865 wg_destroy_peer_name(struct wg_softc *wg, const char *name)
3866 {
3867 struct wg_peer *wgp, *wgp0 __diagused;
3868 void *garbage_byname, *garbage_bypubkey;
3869
3870 mutex_enter(wg->wg_lock);
3871 wgp = thmap_del(wg->wg_peers_byname, name, strlen(name));
3872 if (wgp != NULL) {
3873 wgp0 = thmap_del(wg->wg_peers_bypubkey, wgp->wgp_pubkey,
3874 sizeof(wgp->wgp_pubkey));
3875 KASSERT(wgp0 == wgp);
3876 garbage_byname = thmap_stage_gc(wg->wg_peers_byname);
3877 garbage_bypubkey = thmap_stage_gc(wg->wg_peers_bypubkey);
3878 WG_PEER_WRITER_REMOVE(wgp);
3879 wg->wg_npeers--;
3880 if (wg->wg_npeers == 0)
3881 if_link_state_change(&wg->wg_if, LINK_STATE_DOWN);
3882 mutex_enter(wgp->wgp_lock);
3883 pserialize_perform(wgp->wgp_psz);
3884 mutex_exit(wgp->wgp_lock);
3885 PSLIST_ENTRY_DESTROY(wgp, wgp_peerlist_entry);
3886 }
3887 mutex_exit(wg->wg_lock);
3888
3889 if (wgp == NULL)
3890 return ENOENT;
3891
3892 psref_target_destroy(&wgp->wgp_psref, wg_psref_class);
3893
3894 wg_destroy_peer(wgp);
3895 thmap_gc(wg->wg_peers_byname, garbage_byname);
3896 thmap_gc(wg->wg_peers_bypubkey, garbage_bypubkey);
3897
3898 return 0;
3899 }
3900
3901 static int
3902 wg_if_attach(struct wg_softc *wg)
3903 {
3904
3905 wg->wg_if.if_addrlen = 0;
3906 wg->wg_if.if_mtu = WG_MTU;
3907 wg->wg_if.if_flags = IFF_MULTICAST;
3908 wg->wg_if.if_extflags = IFEF_MPSAFE;
3909 wg->wg_if.if_ioctl = wg_ioctl;
3910 wg->wg_if.if_output = wg_output;
3911 wg->wg_if.if_init = wg_init;
3912 #ifdef ALTQ
3913 wg->wg_if.if_start = wg_start;
3914 #endif
3915 wg->wg_if.if_stop = wg_stop;
3916 wg->wg_if.if_type = IFT_OTHER;
3917 wg->wg_if.if_dlt = DLT_NULL;
3918 wg->wg_if.if_softc = wg;
3919 #ifdef ALTQ
3920 IFQ_SET_READY(&wg->wg_if.if_snd);
3921 #endif
3922 if_initialize(&wg->wg_if);
3923
3924 wg->wg_if.if_link_state = LINK_STATE_DOWN;
3925 if_alloc_sadl(&wg->wg_if);
3926 if_register(&wg->wg_if);
3927
3928 bpf_attach(&wg->wg_if, DLT_NULL, sizeof(uint32_t));
3929
3930 return 0;
3931 }
3932
3933 static void
3934 wg_if_detach(struct wg_softc *wg)
3935 {
3936 struct ifnet *ifp = &wg->wg_if;
3937
3938 bpf_detach(ifp);
3939 if_detach(ifp);
3940 }
3941
3942 static int
3943 wg_clone_create(struct if_clone *ifc, int unit)
3944 {
3945 struct wg_softc *wg;
3946 int error;
3947
3948 wg_guarantee_initialized();
3949
3950 error = wg_count_inc();
3951 if (error)
3952 return error;
3953
3954 wg = kmem_zalloc(sizeof(*wg), KM_SLEEP);
3955
3956 if_initname(&wg->wg_if, ifc->ifc_name, unit);
3957
3958 PSLIST_INIT(&wg->wg_peers);
3959 wg->wg_peers_bypubkey = thmap_create(0, NULL, THMAP_NOCOPY);
3960 wg->wg_peers_byname = thmap_create(0, NULL, THMAP_NOCOPY);
3961 wg->wg_sessions_byindex = thmap_create(0, NULL, THMAP_NOCOPY);
3962 wg->wg_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
3963 wg->wg_intr_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
3964 wg->wg_rwlock = rw_obj_alloc();
3965 threadpool_job_init(&wg->wg_job, wg_job, wg->wg_intr_lock,
3966 "%s", if_name(&wg->wg_if));
3967 wg->wg_ops = &wg_ops_rumpkernel;
3968
3969 error = threadpool_get(&wg->wg_threadpool, PRI_NONE);
3970 if (error)
3971 goto fail0;
3972
3973 #ifdef INET
3974 error = wg_socreate(wg, AF_INET, &wg->wg_so4);
3975 if (error)
3976 goto fail1;
3977 rn_inithead((void **)&wg->wg_rtable_ipv4,
3978 offsetof(struct sockaddr_in, sin_addr) * NBBY);
3979 #endif
3980 #ifdef INET6
3981 error = wg_socreate(wg, AF_INET6, &wg->wg_so6);
3982 if (error)
3983 goto fail2;
3984 rn_inithead((void **)&wg->wg_rtable_ipv6,
3985 offsetof(struct sockaddr_in6, sin6_addr) * NBBY);
3986 #endif
3987
3988 error = wg_if_attach(wg);
3989 if (error)
3990 goto fail3;
3991
3992 return 0;
3993
3994 fail4: __unused
3995 wg_destroy_all_peers(wg);
3996 wg_if_detach(wg);
3997 fail3:
3998 #ifdef INET6
3999 solock(wg->wg_so6);
4000 wg->wg_so6->so_rcv.sb_flags &= ~SB_UPCALL;
4001 sounlock(wg->wg_so6);
4002 #endif
4003 #ifdef INET
4004 solock(wg->wg_so4);
4005 wg->wg_so4->so_rcv.sb_flags &= ~SB_UPCALL;
4006 sounlock(wg->wg_so4);
4007 #endif
4008 mutex_enter(wg->wg_intr_lock);
4009 threadpool_cancel_job(wg->wg_threadpool, &wg->wg_job);
4010 mutex_exit(wg->wg_intr_lock);
4011 #ifdef INET6
4012 if (wg->wg_rtable_ipv6 != NULL)
4013 free(wg->wg_rtable_ipv6, M_RTABLE);
4014 soclose(wg->wg_so6);
4015 fail2:
4016 #endif
4017 #ifdef INET
4018 if (wg->wg_rtable_ipv4 != NULL)
4019 free(wg->wg_rtable_ipv4, M_RTABLE);
4020 soclose(wg->wg_so4);
4021 fail1:
4022 #endif
4023 threadpool_put(wg->wg_threadpool, PRI_NONE);
4024 fail0: threadpool_job_destroy(&wg->wg_job);
4025 rw_obj_free(wg->wg_rwlock);
4026 mutex_obj_free(wg->wg_intr_lock);
4027 mutex_obj_free(wg->wg_lock);
4028 thmap_destroy(wg->wg_sessions_byindex);
4029 thmap_destroy(wg->wg_peers_byname);
4030 thmap_destroy(wg->wg_peers_bypubkey);
4031 PSLIST_DESTROY(&wg->wg_peers);
4032 kmem_free(wg, sizeof(*wg));
4033 wg_count_dec();
4034 return error;
4035 }
4036
4037 static int
4038 wg_clone_destroy(struct ifnet *ifp)
4039 {
4040 struct wg_softc *wg = container_of(ifp, struct wg_softc, wg_if);
4041
4042 #ifdef WG_RUMPKERNEL
4043 if (wg_user_mode(wg)) {
4044 rumpuser_wg_destroy(wg->wg_user);
4045 wg->wg_user = NULL;
4046 }
4047 #endif
4048
4049 wg_destroy_all_peers(wg);
4050 wg_if_detach(wg);
4051 #ifdef INET6
4052 solock(wg->wg_so6);
4053 wg->wg_so6->so_rcv.sb_flags &= ~SB_UPCALL;
4054 sounlock(wg->wg_so6);
4055 #endif
4056 #ifdef INET
4057 solock(wg->wg_so4);
4058 wg->wg_so4->so_rcv.sb_flags &= ~SB_UPCALL;
4059 sounlock(wg->wg_so4);
4060 #endif
4061 mutex_enter(wg->wg_intr_lock);
4062 threadpool_cancel_job(wg->wg_threadpool, &wg->wg_job);
4063 mutex_exit(wg->wg_intr_lock);
4064 #ifdef INET6
4065 if (wg->wg_rtable_ipv6 != NULL)
4066 free(wg->wg_rtable_ipv6, M_RTABLE);
4067 soclose(wg->wg_so6);
4068 #endif
4069 #ifdef INET
4070 if (wg->wg_rtable_ipv4 != NULL)
4071 free(wg->wg_rtable_ipv4, M_RTABLE);
4072 soclose(wg->wg_so4);
4073 #endif
4074 threadpool_put(wg->wg_threadpool, PRI_NONE);
4075 threadpool_job_destroy(&wg->wg_job);
4076 rw_obj_free(wg->wg_rwlock);
4077 mutex_obj_free(wg->wg_intr_lock);
4078 mutex_obj_free(wg->wg_lock);
4079 thmap_destroy(wg->wg_sessions_byindex);
4080 thmap_destroy(wg->wg_peers_byname);
4081 thmap_destroy(wg->wg_peers_bypubkey);
4082 PSLIST_DESTROY(&wg->wg_peers);
4083 kmem_free(wg, sizeof(*wg));
4084 wg_count_dec();
4085
4086 return 0;
4087 }
4088
4089 static struct wg_peer *
4090 wg_pick_peer_by_sa(struct wg_softc *wg, const struct sockaddr *sa,
4091 struct psref *psref)
4092 {
4093 struct radix_node_head *rnh;
4094 struct radix_node *rn;
4095 struct wg_peer *wgp = NULL;
4096 struct wg_allowedip *wga;
4097
4098 #ifdef WG_DEBUG_LOG
4099 char addrstr[128];
4100 sockaddr_format(sa, addrstr, sizeof(addrstr));
4101 WG_DLOG("sa=%s\n", addrstr);
4102 #endif
4103
4104 rw_enter(wg->wg_rwlock, RW_READER);
4105
4106 rnh = wg_rnh(wg, sa->sa_family);
4107 if (rnh == NULL)
4108 goto out;
4109
4110 rn = rnh->rnh_matchaddr(sa, rnh);
4111 if (rn == NULL || (rn->rn_flags & RNF_ROOT) != 0)
4112 goto out;
4113
4114 WG_TRACE("success");
4115
4116 wga = container_of(rn, struct wg_allowedip, wga_nodes[0]);
4117 wgp = wga->wga_peer;
4118 wg_get_peer(wgp, psref);
4119
4120 out:
4121 rw_exit(wg->wg_rwlock);
4122 return wgp;
4123 }
4124
4125 static void
4126 wg_fill_msg_data(struct wg_softc *wg, struct wg_peer *wgp,
4127 struct wg_session *wgs, struct wg_msg_data *wgmd)
4128 {
4129
4130 memset(wgmd, 0, sizeof(*wgmd));
4131 wgmd->wgmd_type = htole32(WG_MSG_TYPE_DATA);
4132 wgmd->wgmd_receiver = wgs->wgs_remote_index;
4133 /* [W] 5.4.6: msg.counter := Nm^send */
4134 /* [W] 5.4.6: Nm^send := Nm^send + 1 */
4135 wgmd->wgmd_counter = htole64(wg_session_inc_send_counter(wgs));
4136 WG_DLOG("counter=%"PRIu64"\n", le64toh(wgmd->wgmd_counter));
4137 }
4138
4139 static int
4140 wg_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst,
4141 const struct rtentry *rt)
4142 {
4143 struct wg_softc *wg = ifp->if_softc;
4144 struct wg_peer *wgp = NULL;
4145 struct wg_session *wgs = NULL;
4146 struct psref wgp_psref, wgs_psref;
4147 int bound;
4148 int error;
4149
4150 bound = curlwp_bind();
4151
4152 /* TODO make the nest limit configurable via sysctl */
4153 error = if_tunnel_check_nesting(ifp, m, 1);
4154 if (error) {
4155 WGLOG(LOG_ERR,
4156 "%s: tunneling loop detected and packet dropped\n",
4157 if_name(&wg->wg_if));
4158 goto out0;
4159 }
4160
4161 #ifdef ALTQ
4162 bool altq = atomic_load_relaxed(&ifp->if_snd.altq_flags)
4163 & ALTQF_ENABLED;
4164 if (altq)
4165 IFQ_CLASSIFY(&ifp->if_snd, m, dst->sa_family);
4166 #endif
4167
4168 bpf_mtap_af(ifp, dst->sa_family, m, BPF_D_OUT);
4169
4170 m->m_flags &= ~(M_BCAST|M_MCAST);
4171
4172 wgp = wg_pick_peer_by_sa(wg, dst, &wgp_psref);
4173 if (wgp == NULL) {
4174 WG_TRACE("peer not found");
4175 error = EHOSTUNREACH;
4176 goto out0;
4177 }
4178
4179 /* Clear checksum-offload flags. */
4180 m->m_pkthdr.csum_flags = 0;
4181 m->m_pkthdr.csum_data = 0;
4182
4183 /* Check whether there's an established session. */
4184 wgs = wg_get_stable_session(wgp, &wgs_psref);
4185 if (wgs == NULL) {
4186 /*
4187 * No established session. If we're the first to try
4188 * sending data, schedule a handshake and queue the
4189 * packet for when the handshake is done; otherwise
4190 * just drop the packet and let the ongoing handshake
4191 * attempt continue. We could queue more data packets
4192 * but it's not clear that's worthwhile.
4193 */
4194 if (atomic_cas_ptr(&wgp->wgp_pending, NULL, m) == NULL) {
4195 m = NULL; /* consume */
4196 WG_TRACE("queued first packet; init handshake");
4197 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE);
4198 } else {
4199 WG_TRACE("first packet already queued, dropping");
4200 }
4201 goto out1;
4202 }
4203
4204 /* There's an established session. Toss it in the queue. */
4205 #ifdef ALTQ
4206 if (altq) {
4207 mutex_enter(ifp->if_snd.ifq_lock);
4208 if (ALTQ_IS_ENABLED(&ifp->if_snd)) {
4209 M_SETCTX(m, wgp);
4210 ALTQ_ENQUEUE(&ifp->if_snd, m, error);
4211 m = NULL; /* consume */
4212 }
4213 mutex_exit(ifp->if_snd.ifq_lock);
4214 if (m == NULL) {
4215 wg_start(ifp);
4216 goto out2;
4217 }
4218 }
4219 #endif
4220 kpreempt_disable();
4221 const uint32_t h = curcpu()->ci_index; // pktq_rps_hash(m)
4222 M_SETCTX(m, wgp);
4223 if (__predict_false(!pktq_enqueue(wg_pktq, m, h))) {
4224 WGLOG(LOG_ERR, "%s: pktq full, dropping\n",
4225 if_name(&wg->wg_if));
4226 error = ENOBUFS;
4227 goto out3;
4228 }
4229 m = NULL; /* consumed */
4230 error = 0;
4231 out3: kpreempt_enable();
4232
4233 #ifdef ALTQ
4234 out2:
4235 #endif
4236 wg_put_session(wgs, &wgs_psref);
4237 out1: wg_put_peer(wgp, &wgp_psref);
4238 out0: m_freem(m);
4239 curlwp_bindx(bound);
4240 return error;
4241 }
4242
4243 static int
4244 wg_send_udp(struct wg_peer *wgp, struct mbuf *m)
4245 {
4246 struct psref psref;
4247 struct wg_sockaddr *wgsa;
4248 int error;
4249 struct socket *so;
4250
4251 wgsa = wg_get_endpoint_sa(wgp, &psref);
4252 so = wg_get_so_by_peer(wgp, wgsa);
4253 solock(so);
4254 if (wgsatosa(wgsa)->sa_family == AF_INET) {
4255 error = udp_send(so, m, wgsatosa(wgsa), NULL, curlwp);
4256 } else {
4257 #ifdef INET6
4258 error = udp6_output(sotoinpcb(so), m, wgsatosin6(wgsa),
4259 NULL, curlwp);
4260 #else
4261 m_freem(m);
4262 error = EPFNOSUPPORT;
4263 #endif
4264 }
4265 sounlock(so);
4266 wg_put_sa(wgp, wgsa, &psref);
4267
4268 return error;
4269 }
4270
4271 /* Inspired by pppoe_get_mbuf */
4272 static struct mbuf *
4273 wg_get_mbuf(size_t leading_len, size_t len)
4274 {
4275 struct mbuf *m;
4276
4277 KASSERT(leading_len <= MCLBYTES);
4278 KASSERT(len <= MCLBYTES - leading_len);
4279
4280 m = m_gethdr(M_DONTWAIT, MT_DATA);
4281 if (m == NULL)
4282 return NULL;
4283 if (len + leading_len > MHLEN) {
4284 m_clget(m, M_DONTWAIT);
4285 if ((m->m_flags & M_EXT) == 0) {
4286 m_free(m);
4287 return NULL;
4288 }
4289 }
4290 m->m_data += leading_len;
4291 m->m_pkthdr.len = m->m_len = len;
4292
4293 return m;
4294 }
4295
4296 static void
4297 wg_send_data_msg(struct wg_peer *wgp, struct wg_session *wgs, struct mbuf *m)
4298 {
4299 struct wg_softc *wg = wgp->wgp_sc;
4300 int error;
4301 size_t inner_len, padded_len, encrypted_len;
4302 char *padded_buf = NULL;
4303 size_t mlen;
4304 struct wg_msg_data *wgmd;
4305 bool free_padded_buf = false;
4306 struct mbuf *n;
4307 size_t leading_len = max_hdr + sizeof(struct udphdr);
4308
4309 mlen = m_length(m);
4310 inner_len = mlen;
4311 padded_len = roundup(mlen, 16);
4312 encrypted_len = padded_len + WG_AUTHTAG_LEN;
4313 WG_DLOG("inner=%zu, padded=%zu, encrypted_len=%zu\n",
4314 inner_len, padded_len, encrypted_len);
4315 if (mlen != 0) {
4316 bool success;
4317 success = m_ensure_contig(&m, padded_len);
4318 if (success) {
4319 padded_buf = mtod(m, char *);
4320 } else {
4321 padded_buf = kmem_intr_alloc(padded_len, KM_NOSLEEP);
4322 if (padded_buf == NULL) {
4323 error = ENOBUFS;
4324 goto out;
4325 }
4326 free_padded_buf = true;
4327 m_copydata(m, 0, mlen, padded_buf);
4328 }
4329 memset(padded_buf + mlen, 0, padded_len - inner_len);
4330 }
4331
4332 n = wg_get_mbuf(leading_len, sizeof(*wgmd) + encrypted_len);
4333 if (n == NULL) {
4334 error = ENOBUFS;
4335 goto out;
4336 }
4337 KASSERT(n->m_len >= sizeof(*wgmd));
4338 wgmd = mtod(n, struct wg_msg_data *);
4339 wg_fill_msg_data(wg, wgp, wgs, wgmd);
4340 #ifdef WG_DEBUG_PACKET
4341 if (wg_debug & WG_DEBUG_FLAGS_PACKET) {
4342 hexdump(printf, "padded_buf", padded_buf,
4343 padded_len);
4344 }
4345 #endif
4346 /* [W] 5.4.6: AEAD(Tm^send, Nm^send, P, e) */
4347 wg_algo_aead_enc((char *)wgmd + sizeof(*wgmd), encrypted_len,
4348 wgs->wgs_tkey_send, le64toh(wgmd->wgmd_counter),
4349 padded_buf, padded_len,
4350 NULL, 0);
4351 #ifdef WG_DEBUG_PACKET
4352 if (wg_debug & WG_DEBUG_FLAGS_PACKET) {
4353 hexdump(printf, "tkey_send", wgs->wgs_tkey_send,
4354 sizeof(wgs->wgs_tkey_send));
4355 hexdump(printf, "wgmd", wgmd, sizeof(*wgmd));
4356 hexdump(printf, "outgoing packet",
4357 (char *)wgmd + sizeof(*wgmd), encrypted_len);
4358 size_t decrypted_len = encrypted_len - WG_AUTHTAG_LEN;
4359 char *decrypted_buf = kmem_intr_alloc((decrypted_len +
4360 WG_AUTHTAG_LEN/*XXX*/), KM_NOSLEEP);
4361 if (decrypted_buf != NULL) {
4362 error = wg_algo_aead_dec(
4363 1 + decrypted_buf /* force misalignment */,
4364 encrypted_len - WG_AUTHTAG_LEN /* XXX */,
4365 wgs->wgs_tkey_send, le64toh(wgmd->wgmd_counter),
4366 (char *)wgmd + sizeof(*wgmd), encrypted_len,
4367 NULL, 0);
4368 if (error) {
4369 WG_DLOG("wg_algo_aead_dec failed: %d\n",
4370 error);
4371 }
4372 if (!consttime_memequal(1 + decrypted_buf,
4373 (char *)wgmd + sizeof(*wgmd),
4374 decrypted_len)) {
4375 WG_DLOG("wg_algo_aead_dec returned garbage\n");
4376 }
4377 kmem_intr_free(decrypted_buf, (decrypted_len +
4378 WG_AUTHTAG_LEN/*XXX*/));
4379 }
4380 }
4381 #endif
4382
4383 error = wg->wg_ops->send_data_msg(wgp, n); /* consumes n */
4384 if (error) {
4385 WG_DLOG("send_data_msg failed, error=%d\n", error);
4386 goto out;
4387 }
4388
4389 /*
4390 * Packet was sent out -- count it in the interface statistics.
4391 */
4392 if_statadd(&wg->wg_if, if_obytes, mlen);
4393 if_statinc(&wg->wg_if, if_opackets);
4394
4395 /*
4396 * Record when we last sent data, for determining when we need
4397 * to send a passive keepalive.
4398 *
4399 * Other logic assumes that wgs_time_last_data_sent is zero iff
4400 * we have never sent data on this session. Early at boot, if
4401 * wg(4) starts operating within <1sec, or after 136 years of
4402 * uptime, we may observe time_uptime32 = 0. In that case,
4403 * pretend we observed 1 instead. That way, we correctly
4404 * indicate we have sent data on this session; the only logic
4405 * this might adversely affect is the keepalive timeout
4406 * detection, which might spuriously send a keepalive during
4407 * one second every 136 years. All of this is very silly, of
4408 * course, but the cost to guaranteeing wgs_time_last_data_sent
4409 * is nonzero is negligible here.
4410 */
4411 const uint32_t now = time_uptime32;
4412 atomic_store_relaxed(&wgs->wgs_time_last_data_sent, MAX(now, 1));
4413
4414 /*
4415 * Check rekey-after-time.
4416 */
4417 if (wgs->wgs_is_initiator &&
4418 ((time_uptime32 -
4419 atomic_load_relaxed(&wgs->wgs_time_established)) >=
4420 wg_rekey_after_time)) {
4421 /*
4422 * [W] 6.2 Transport Message Limits
4423 * "if a peer is the initiator of a current secure
4424 * session, WireGuard will send a handshake initiation
4425 * message to begin a new secure session if, after
4426 * transmitting a transport data message, the current
4427 * secure session is REKEY-AFTER-TIME seconds old,"
4428 */
4429 WG_TRACE("rekey after time");
4430 atomic_store_relaxed(&wgp->wgp_force_rekey, 1);
4431 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE);
4432 }
4433
4434 /*
4435 * Check rekey-after-messages.
4436 */
4437 if (wg_session_get_send_counter(wgs) >= wg_rekey_after_messages) {
4438 /*
4439 * [W] 6.2 Transport Message Limits
4440 * "WireGuard will try to create a new session, by
4441 * sending a handshake initiation message (section
4442 * 5.4.2), after it has sent REKEY-AFTER-MESSAGES
4443 * transport data messages..."
4444 */
4445 WG_TRACE("rekey after messages");
4446 atomic_store_relaxed(&wgp->wgp_force_rekey, 1);
4447 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE);
4448 }
4449
4450 out: m_freem(m);
4451 if (free_padded_buf)
4452 kmem_intr_free(padded_buf, padded_len);
4453 }
4454
4455 static void
4456 wg_input(struct ifnet *ifp, struct mbuf *m, const int af)
4457 {
4458 pktqueue_t *pktq;
4459 size_t pktlen;
4460
4461 KASSERT(af == AF_INET || af == AF_INET6);
4462
4463 WG_TRACE("");
4464
4465 m_set_rcvif(m, ifp);
4466 pktlen = m->m_pkthdr.len;
4467
4468 bpf_mtap_af(ifp, af, m, BPF_D_IN);
4469
4470 switch (af) {
4471 case AF_INET:
4472 pktq = ip_pktq;
4473 break;
4474 #ifdef INET6
4475 case AF_INET6:
4476 pktq = ip6_pktq;
4477 break;
4478 #endif
4479 default:
4480 panic("invalid af=%d", af);
4481 }
4482
4483 kpreempt_disable();
4484 const u_int h = curcpu()->ci_index;
4485 if (__predict_true(pktq_enqueue(pktq, m, h))) {
4486 if_statadd(ifp, if_ibytes, pktlen);
4487 if_statinc(ifp, if_ipackets);
4488 } else {
4489 m_freem(m);
4490 }
4491 kpreempt_enable();
4492 }
4493
4494 static void
4495 wg_calc_pubkey(uint8_t pubkey[WG_STATIC_KEY_LEN],
4496 const uint8_t privkey[WG_STATIC_KEY_LEN])
4497 {
4498
4499 crypto_scalarmult_base(pubkey, privkey);
4500 }
4501
4502 static int
4503 wg_rtable_add_route(struct wg_softc *wg, struct wg_allowedip *wga)
4504 {
4505 struct radix_node_head *rnh;
4506 struct radix_node *rn;
4507 int error = 0;
4508
4509 rw_enter(wg->wg_rwlock, RW_WRITER);
4510 rnh = wg_rnh(wg, wga->wga_family);
4511 KASSERT(rnh != NULL);
4512 rn = rnh->rnh_addaddr(&wga->wga_sa_addr, &wga->wga_sa_mask, rnh,
4513 wga->wga_nodes);
4514 rw_exit(wg->wg_rwlock);
4515
4516 if (rn == NULL)
4517 error = EEXIST;
4518
4519 return error;
4520 }
4521
4522 static int
4523 wg_handle_prop_peer(struct wg_softc *wg, prop_dictionary_t peer,
4524 struct wg_peer **wgpp)
4525 {
4526 int error = 0;
4527 const void *pubkey;
4528 size_t pubkey_len;
4529 const void *psk;
4530 size_t psk_len;
4531 const char *name = NULL;
4532
4533 if (prop_dictionary_get_string(peer, "name", &name)) {
4534 if (strlen(name) > WG_PEER_NAME_MAXLEN) {
4535 error = EINVAL;
4536 goto out;
4537 }
4538 }
4539
4540 if (!prop_dictionary_get_data(peer, "public_key",
4541 &pubkey, &pubkey_len)) {
4542 error = EINVAL;
4543 goto out;
4544 }
4545 #ifdef WG_DEBUG_DUMP
4546 if (wg_debug & WG_DEBUG_FLAGS_DUMP) {
4547 char *hex = gethexdump(pubkey, pubkey_len);
4548 log(LOG_DEBUG, "pubkey=%p, pubkey_len=%zu\n%s\n",
4549 pubkey, pubkey_len, hex);
4550 puthexdump(hex, pubkey, pubkey_len);
4551 }
4552 #endif
4553
4554 struct wg_peer *wgp = wg_alloc_peer(wg);
4555 memcpy(wgp->wgp_pubkey, pubkey, sizeof(wgp->wgp_pubkey));
4556 if (name != NULL)
4557 strncpy(wgp->wgp_name, name, sizeof(wgp->wgp_name));
4558
4559 if (prop_dictionary_get_data(peer, "preshared_key", &psk, &psk_len)) {
4560 if (psk_len != sizeof(wgp->wgp_psk)) {
4561 error = EINVAL;
4562 goto out;
4563 }
4564 memcpy(wgp->wgp_psk, psk, sizeof(wgp->wgp_psk));
4565 }
4566
4567 const void *addr;
4568 size_t addr_len;
4569 struct wg_sockaddr *wgsa = wgp->wgp_endpoint;
4570
4571 if (!prop_dictionary_get_data(peer, "endpoint", &addr, &addr_len))
4572 goto skip_endpoint;
4573 if (addr_len < sizeof(*wgsatosa(wgsa)) ||
4574 addr_len > sizeof(*wgsatoss(wgsa))) {
4575 error = EINVAL;
4576 goto out;
4577 }
4578 memcpy(wgsatoss(wgsa), addr, addr_len);
4579 switch (wgsa_family(wgsa)) {
4580 case AF_INET:
4581 #ifdef INET6
4582 case AF_INET6:
4583 #endif
4584 break;
4585 default:
4586 error = EPFNOSUPPORT;
4587 goto out;
4588 }
4589 if (addr_len != sockaddr_getsize_by_family(wgsa_family(wgsa))) {
4590 error = EINVAL;
4591 goto out;
4592 }
4593 {
4594 char addrstr[128];
4595 sockaddr_format(wgsatosa(wgsa), addrstr, sizeof(addrstr));
4596 WG_DLOG("addr=%s\n", addrstr);
4597 }
4598 wgp->wgp_endpoint_available = true;
4599
4600 prop_array_t allowedips;
4601 skip_endpoint:
4602 allowedips = prop_dictionary_get(peer, "allowedips");
4603 if (allowedips == NULL)
4604 goto skip;
4605
4606 prop_object_iterator_t _it = prop_array_iterator(allowedips);
4607 prop_dictionary_t prop_allowedip;
4608 int j = 0;
4609 while ((prop_allowedip = prop_object_iterator_next(_it)) != NULL) {
4610 struct wg_allowedip *wga = &wgp->wgp_allowedips[j];
4611
4612 if (!prop_dictionary_get_int(prop_allowedip, "family",
4613 &wga->wga_family))
4614 continue;
4615 if (!prop_dictionary_get_data(prop_allowedip, "ip",
4616 &addr, &addr_len))
4617 continue;
4618 if (!prop_dictionary_get_uint8(prop_allowedip, "cidr",
4619 &wga->wga_cidr))
4620 continue;
4621
4622 switch (wga->wga_family) {
4623 case AF_INET: {
4624 struct sockaddr_in sin;
4625 char addrstr[128];
4626 struct in_addr mask;
4627 struct sockaddr_in sin_mask;
4628
4629 if (addr_len != sizeof(struct in_addr))
4630 return EINVAL;
4631 memcpy(&wga->wga_addr4, addr, addr_len);
4632
4633 sockaddr_in_init(&sin, (const struct in_addr *)addr,
4634 0);
4635 sockaddr_copy(&wga->wga_sa_addr,
4636 sizeof(sin), sintosa(&sin));
4637
4638 sockaddr_format(sintosa(&sin),
4639 addrstr, sizeof(addrstr));
4640 WG_DLOG("addr=%s/%d\n", addrstr, wga->wga_cidr);
4641
4642 in_len2mask(&mask, wga->wga_cidr);
4643 sockaddr_in_init(&sin_mask, &mask, 0);
4644 sockaddr_copy(&wga->wga_sa_mask,
4645 sizeof(sin_mask), sintosa(&sin_mask));
4646
4647 break;
4648 }
4649 #ifdef INET6
4650 case AF_INET6: {
4651 struct sockaddr_in6 sin6;
4652 char addrstr[128];
4653 struct in6_addr mask;
4654 struct sockaddr_in6 sin6_mask;
4655
4656 if (addr_len != sizeof(struct in6_addr))
4657 return EINVAL;
4658 memcpy(&wga->wga_addr6, addr, addr_len);
4659
4660 sockaddr_in6_init(&sin6, (const struct in6_addr *)addr,
4661 0, 0, 0);
4662 sockaddr_copy(&wga->wga_sa_addr,
4663 sizeof(sin6), sin6tosa(&sin6));
4664
4665 sockaddr_format(sin6tosa(&sin6),
4666 addrstr, sizeof(addrstr));
4667 WG_DLOG("addr=%s/%d\n", addrstr, wga->wga_cidr);
4668
4669 in6_prefixlen2mask(&mask, wga->wga_cidr);
4670 sockaddr_in6_init(&sin6_mask, &mask, 0, 0, 0);
4671 sockaddr_copy(&wga->wga_sa_mask,
4672 sizeof(sin6_mask), sin6tosa(&sin6_mask));
4673
4674 break;
4675 }
4676 #endif
4677 default:
4678 error = EINVAL;
4679 goto out;
4680 }
4681 wga->wga_peer = wgp;
4682
4683 error = wg_rtable_add_route(wg, wga);
4684 if (error != 0)
4685 goto out;
4686
4687 j++;
4688 }
4689 wgp->wgp_n_allowedips = j;
4690 skip:
4691 *wgpp = wgp;
4692 out:
4693 return error;
4694 }
4695
4696 static int
4697 wg_alloc_prop_buf(char **_buf, struct ifdrv *ifd)
4698 {
4699 int error;
4700 char *buf;
4701
4702 WG_DLOG("buf=%p, len=%zu\n", ifd->ifd_data, ifd->ifd_len);
4703 if (ifd->ifd_len >= WG_MAX_PROPLEN)
4704 return E2BIG;
4705 buf = kmem_alloc(ifd->ifd_len + 1, KM_SLEEP);
4706 error = copyin(ifd->ifd_data, buf, ifd->ifd_len);
4707 if (error != 0)
4708 return error;
4709 buf[ifd->ifd_len] = '\0';
4710 #ifdef WG_DEBUG_DUMP
4711 if (wg_debug & WG_DEBUG_FLAGS_DUMP) {
4712 log(LOG_DEBUG, "%.*s\n", (int)MIN(INT_MAX, ifd->ifd_len),
4713 (const char *)buf);
4714 }
4715 #endif
4716 *_buf = buf;
4717 return 0;
4718 }
4719
4720 static int
4721 wg_ioctl_set_private_key(struct wg_softc *wg, struct ifdrv *ifd)
4722 {
4723 int error;
4724 prop_dictionary_t prop_dict;
4725 char *buf = NULL;
4726 const void *privkey;
4727 size_t privkey_len;
4728
4729 error = wg_alloc_prop_buf(&buf, ifd);
4730 if (error != 0)
4731 return error;
4732 error = EINVAL;
4733 prop_dict = prop_dictionary_internalize(buf);
4734 if (prop_dict == NULL)
4735 goto out;
4736 if (!prop_dictionary_get_data(prop_dict, "private_key",
4737 &privkey, &privkey_len))
4738 goto out;
4739 #ifdef WG_DEBUG_DUMP
4740 if (wg_debug & WG_DEBUG_FLAGS_DUMP) {
4741 char *hex = gethexdump(privkey, privkey_len);
4742 log(LOG_DEBUG, "privkey=%p, privkey_len=%zu\n%s\n",
4743 privkey, privkey_len, hex);
4744 puthexdump(hex, privkey, privkey_len);
4745 }
4746 #endif
4747 if (privkey_len != WG_STATIC_KEY_LEN)
4748 goto out;
4749 memcpy(wg->wg_privkey, privkey, WG_STATIC_KEY_LEN);
4750 wg_calc_pubkey(wg->wg_pubkey, wg->wg_privkey);
4751 error = 0;
4752
4753 out:
4754 kmem_free(buf, ifd->ifd_len + 1);
4755 return error;
4756 }
4757
4758 static int
4759 wg_ioctl_set_listen_port(struct wg_softc *wg, struct ifdrv *ifd)
4760 {
4761 int error;
4762 prop_dictionary_t prop_dict;
4763 char *buf = NULL;
4764 uint16_t port;
4765
4766 error = wg_alloc_prop_buf(&buf, ifd);
4767 if (error != 0)
4768 return error;
4769 error = EINVAL;
4770 prop_dict = prop_dictionary_internalize(buf);
4771 if (prop_dict == NULL)
4772 goto out;
4773 if (!prop_dictionary_get_uint16(prop_dict, "listen_port", &port))
4774 goto out;
4775
4776 error = wg->wg_ops->bind_port(wg, (uint16_t)port);
4777
4778 out:
4779 kmem_free(buf, ifd->ifd_len + 1);
4780 return error;
4781 }
4782
4783 static int
4784 wg_ioctl_add_peer(struct wg_softc *wg, struct ifdrv *ifd)
4785 {
4786 int error;
4787 prop_dictionary_t prop_dict;
4788 char *buf = NULL;
4789 struct wg_peer *wgp = NULL, *wgp0 __diagused;
4790
4791 error = wg_alloc_prop_buf(&buf, ifd);
4792 if (error != 0)
4793 return error;
4794 error = EINVAL;
4795 prop_dict = prop_dictionary_internalize(buf);
4796 if (prop_dict == NULL)
4797 goto out;
4798
4799 error = wg_handle_prop_peer(wg, prop_dict, &wgp);
4800 if (error != 0)
4801 goto out;
4802
4803 mutex_enter(wg->wg_lock);
4804 if (thmap_get(wg->wg_peers_bypubkey, wgp->wgp_pubkey,
4805 sizeof(wgp->wgp_pubkey)) != NULL ||
4806 (wgp->wgp_name[0] &&
4807 thmap_get(wg->wg_peers_byname, wgp->wgp_name,
4808 strlen(wgp->wgp_name)) != NULL)) {
4809 mutex_exit(wg->wg_lock);
4810 wg_destroy_peer(wgp);
4811 error = EEXIST;
4812 goto out;
4813 }
4814 wgp0 = thmap_put(wg->wg_peers_bypubkey, wgp->wgp_pubkey,
4815 sizeof(wgp->wgp_pubkey), wgp);
4816 KASSERT(wgp0 == wgp);
4817 if (wgp->wgp_name[0]) {
4818 wgp0 = thmap_put(wg->wg_peers_byname, wgp->wgp_name,
4819 strlen(wgp->wgp_name), wgp);
4820 KASSERT(wgp0 == wgp);
4821 }
4822 WG_PEER_WRITER_INSERT_HEAD(wgp, wg);
4823 wg->wg_npeers++;
4824 mutex_exit(wg->wg_lock);
4825
4826 if_link_state_change(&wg->wg_if, LINK_STATE_UP);
4827
4828 out:
4829 kmem_free(buf, ifd->ifd_len + 1);
4830 return error;
4831 }
4832
4833 static int
4834 wg_ioctl_delete_peer(struct wg_softc *wg, struct ifdrv *ifd)
4835 {
4836 int error;
4837 prop_dictionary_t prop_dict;
4838 char *buf = NULL;
4839 const char *name;
4840
4841 error = wg_alloc_prop_buf(&buf, ifd);
4842 if (error != 0)
4843 return error;
4844 error = EINVAL;
4845 prop_dict = prop_dictionary_internalize(buf);
4846 if (prop_dict == NULL)
4847 goto out;
4848
4849 if (!prop_dictionary_get_string(prop_dict, "name", &name))
4850 goto out;
4851 if (strlen(name) > WG_PEER_NAME_MAXLEN)
4852 goto out;
4853
4854 error = wg_destroy_peer_name(wg, name);
4855 out:
4856 kmem_free(buf, ifd->ifd_len + 1);
4857 return error;
4858 }
4859
4860 static bool
4861 wg_is_authorized(struct wg_softc *wg, u_long cmd)
4862 {
4863 int au = cmd == SIOCGDRVSPEC ?
4864 KAUTH_REQ_NETWORK_INTERFACE_WG_GETPRIV :
4865 KAUTH_REQ_NETWORK_INTERFACE_WG_SETPRIV;
4866 return kauth_authorize_network(kauth_cred_get(),
4867 KAUTH_NETWORK_INTERFACE_WG, au, &wg->wg_if,
4868 (void *)cmd, NULL) == 0;
4869 }
4870
4871 static int
4872 wg_ioctl_get(struct wg_softc *wg, struct ifdrv *ifd)
4873 {
4874 int error = ENOMEM;
4875 prop_dictionary_t prop_dict;
4876 prop_array_t peers = NULL;
4877 char *buf;
4878 struct wg_peer *wgp;
4879 int s, i;
4880
4881 prop_dict = prop_dictionary_create();
4882 if (prop_dict == NULL)
4883 goto error;
4884
4885 if (wg_is_authorized(wg, SIOCGDRVSPEC)) {
4886 if (!prop_dictionary_set_data(prop_dict, "private_key",
4887 wg->wg_privkey, WG_STATIC_KEY_LEN))
4888 goto error;
4889 }
4890
4891 if (wg->wg_listen_port != 0) {
4892 if (!prop_dictionary_set_uint16(prop_dict, "listen_port",
4893 wg->wg_listen_port))
4894 goto error;
4895 }
4896
4897 if (wg->wg_npeers == 0)
4898 goto skip_peers;
4899
4900 peers = prop_array_create();
4901 if (peers == NULL)
4902 goto error;
4903
4904 s = pserialize_read_enter();
4905 i = 0;
4906 WG_PEER_READER_FOREACH(wgp, wg) {
4907 struct wg_sockaddr *wgsa;
4908 struct psref wgp_psref, wgsa_psref;
4909 prop_dictionary_t prop_peer;
4910
4911 wg_get_peer(wgp, &wgp_psref);
4912 pserialize_read_exit(s);
4913
4914 prop_peer = prop_dictionary_create();
4915 if (prop_peer == NULL)
4916 goto next;
4917
4918 if (strlen(wgp->wgp_name) > 0) {
4919 if (!prop_dictionary_set_string(prop_peer, "name",
4920 wgp->wgp_name))
4921 goto next;
4922 }
4923
4924 if (!prop_dictionary_set_data(prop_peer, "public_key",
4925 wgp->wgp_pubkey, sizeof(wgp->wgp_pubkey)))
4926 goto next;
4927
4928 uint8_t psk_zero[WG_PRESHARED_KEY_LEN] = {0};
4929 if (!consttime_memequal(wgp->wgp_psk, psk_zero,
4930 sizeof(wgp->wgp_psk))) {
4931 if (wg_is_authorized(wg, SIOCGDRVSPEC)) {
4932 if (!prop_dictionary_set_data(prop_peer,
4933 "preshared_key",
4934 wgp->wgp_psk, sizeof(wgp->wgp_psk)))
4935 goto next;
4936 }
4937 }
4938
4939 wgsa = wg_get_endpoint_sa(wgp, &wgsa_psref);
4940 CTASSERT(AF_UNSPEC == 0);
4941 if (wgsa_family(wgsa) != 0 /*AF_UNSPEC*/ &&
4942 !prop_dictionary_set_data(prop_peer, "endpoint",
4943 wgsatoss(wgsa),
4944 sockaddr_getsize_by_family(wgsa_family(wgsa)))) {
4945 wg_put_sa(wgp, wgsa, &wgsa_psref);
4946 goto next;
4947 }
4948 wg_put_sa(wgp, wgsa, &wgsa_psref);
4949
4950 const struct timespec *t = &wgp->wgp_last_handshake_time;
4951
4952 if (!prop_dictionary_set_uint64(prop_peer,
4953 "last_handshake_time_sec", (uint64_t)t->tv_sec))
4954 goto next;
4955 if (!prop_dictionary_set_uint32(prop_peer,
4956 "last_handshake_time_nsec", (uint32_t)t->tv_nsec))
4957 goto next;
4958
4959 if (wgp->wgp_n_allowedips == 0)
4960 goto skip_allowedips;
4961
4962 prop_array_t allowedips = prop_array_create();
4963 if (allowedips == NULL)
4964 goto next;
4965 for (int j = 0; j < wgp->wgp_n_allowedips; j++) {
4966 struct wg_allowedip *wga = &wgp->wgp_allowedips[j];
4967 prop_dictionary_t prop_allowedip;
4968
4969 prop_allowedip = prop_dictionary_create();
4970 if (prop_allowedip == NULL)
4971 break;
4972
4973 if (!prop_dictionary_set_int(prop_allowedip, "family",
4974 wga->wga_family))
4975 goto _next;
4976 if (!prop_dictionary_set_uint8(prop_allowedip, "cidr",
4977 wga->wga_cidr))
4978 goto _next;
4979
4980 switch (wga->wga_family) {
4981 case AF_INET:
4982 if (!prop_dictionary_set_data(prop_allowedip,
4983 "ip", &wga->wga_addr4,
4984 sizeof(wga->wga_addr4)))
4985 goto _next;
4986 break;
4987 #ifdef INET6
4988 case AF_INET6:
4989 if (!prop_dictionary_set_data(prop_allowedip,
4990 "ip", &wga->wga_addr6,
4991 sizeof(wga->wga_addr6)))
4992 goto _next;
4993 break;
4994 #endif
4995 default:
4996 break;
4997 }
4998 prop_array_set(allowedips, j, prop_allowedip);
4999 _next:
5000 prop_object_release(prop_allowedip);
5001 }
5002 prop_dictionary_set(prop_peer, "allowedips", allowedips);
5003 prop_object_release(allowedips);
5004
5005 skip_allowedips:
5006
5007 prop_array_set(peers, i, prop_peer);
5008 next:
5009 if (prop_peer)
5010 prop_object_release(prop_peer);
5011 i++;
5012
5013 s = pserialize_read_enter();
5014 wg_put_peer(wgp, &wgp_psref);
5015 }
5016 pserialize_read_exit(s);
5017
5018 prop_dictionary_set(prop_dict, "peers", peers);
5019 prop_object_release(peers);
5020 peers = NULL;
5021
5022 skip_peers:
5023 buf = prop_dictionary_externalize(prop_dict);
5024 if (buf == NULL)
5025 goto error;
5026 if (ifd->ifd_len < (strlen(buf) + 1)) {
5027 error = EINVAL;
5028 goto error;
5029 }
5030 error = copyout(buf, ifd->ifd_data, strlen(buf) + 1);
5031
5032 free(buf, 0);
5033 error:
5034 if (peers != NULL)
5035 prop_object_release(peers);
5036 if (prop_dict != NULL)
5037 prop_object_release(prop_dict);
5038
5039 return error;
5040 }
5041
5042 static int
5043 wg_ioctl(struct ifnet *ifp, u_long cmd, void *data)
5044 {
5045 struct wg_softc *wg = ifp->if_softc;
5046 struct ifreq *ifr = data;
5047 struct ifaddr *ifa = data;
5048 struct ifdrv *ifd = data;
5049 int error = 0;
5050
5051 switch (cmd) {
5052 case SIOCINITIFADDR:
5053 if (ifa->ifa_addr->sa_family != AF_LINK &&
5054 (ifp->if_flags & (IFF_UP | IFF_RUNNING)) !=
5055 (IFF_UP | IFF_RUNNING)) {
5056 ifp->if_flags |= IFF_UP;
5057 error = if_init(ifp);
5058 }
5059 return error;
5060 case SIOCADDMULTI:
5061 case SIOCDELMULTI:
5062 switch (ifr->ifr_addr.sa_family) {
5063 case AF_INET: /* IP supports Multicast */
5064 break;
5065 #ifdef INET6
5066 case AF_INET6: /* IP6 supports Multicast */
5067 break;
5068 #endif
5069 default: /* Other protocols doesn't support Multicast */
5070 error = EAFNOSUPPORT;
5071 break;
5072 }
5073 return error;
5074 case SIOCSDRVSPEC:
5075 if (!wg_is_authorized(wg, cmd)) {
5076 return EPERM;
5077 }
5078 switch (ifd->ifd_cmd) {
5079 case WG_IOCTL_SET_PRIVATE_KEY:
5080 error = wg_ioctl_set_private_key(wg, ifd);
5081 break;
5082 case WG_IOCTL_SET_LISTEN_PORT:
5083 error = wg_ioctl_set_listen_port(wg, ifd);
5084 break;
5085 case WG_IOCTL_ADD_PEER:
5086 error = wg_ioctl_add_peer(wg, ifd);
5087 break;
5088 case WG_IOCTL_DELETE_PEER:
5089 error = wg_ioctl_delete_peer(wg, ifd);
5090 break;
5091 default:
5092 error = EINVAL;
5093 break;
5094 }
5095 return error;
5096 case SIOCGDRVSPEC:
5097 return wg_ioctl_get(wg, ifd);
5098 case SIOCSIFFLAGS:
5099 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
5100 break;
5101 switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) {
5102 case IFF_RUNNING:
5103 /*
5104 * If interface is marked down and it is running,
5105 * then stop and disable it.
5106 */
5107 if_stop(ifp, 1);
5108 break;
5109 case IFF_UP:
5110 /*
5111 * If interface is marked up and it is stopped, then
5112 * start it.
5113 */
5114 error = if_init(ifp);
5115 break;
5116 default:
5117 break;
5118 }
5119 return error;
5120 #ifdef WG_RUMPKERNEL
5121 case SIOCSLINKSTR:
5122 error = wg_ioctl_linkstr(wg, ifd);
5123 if (error)
5124 return error;
5125 wg->wg_ops = &wg_ops_rumpuser;
5126 return 0;
5127 #endif
5128 default:
5129 break;
5130 }
5131
5132 error = ifioctl_common(ifp, cmd, data);
5133
5134 #ifdef WG_RUMPKERNEL
5135 if (!wg_user_mode(wg))
5136 return error;
5137
5138 /* Do the same to the corresponding tun device on the host */
5139 /*
5140 * XXX Actually the command has not been handled yet. It
5141 * will be handled via pr_ioctl form doifioctl later.
5142 */
5143 switch (cmd) {
5144 case SIOCAIFADDR:
5145 case SIOCDIFADDR: {
5146 struct in_aliasreq _ifra = *(const struct in_aliasreq *)data;
5147 struct in_aliasreq *ifra = &_ifra;
5148 KASSERT(error == ENOTTY);
5149 strncpy(ifra->ifra_name, rumpuser_wg_get_tunname(wg->wg_user),
5150 IFNAMSIZ);
5151 error = rumpuser_wg_ioctl(wg->wg_user, cmd, ifra, AF_INET);
5152 if (error == 0)
5153 error = ENOTTY;
5154 break;
5155 }
5156 #ifdef INET6
5157 case SIOCAIFADDR_IN6:
5158 case SIOCDIFADDR_IN6: {
5159 struct in6_aliasreq _ifra = *(const struct in6_aliasreq *)data;
5160 struct in6_aliasreq *ifra = &_ifra;
5161 KASSERT(error == ENOTTY);
5162 strncpy(ifra->ifra_name, rumpuser_wg_get_tunname(wg->wg_user),
5163 IFNAMSIZ);
5164 error = rumpuser_wg_ioctl(wg->wg_user, cmd, ifra, AF_INET6);
5165 if (error == 0)
5166 error = ENOTTY;
5167 break;
5168 }
5169 #endif
5170 }
5171 #endif /* WG_RUMPKERNEL */
5172
5173 return error;
5174 }
5175
5176 static int
5177 wg_init(struct ifnet *ifp)
5178 {
5179
5180 ifp->if_flags |= IFF_RUNNING;
5181
5182 /* TODO flush pending packets. */
5183 return 0;
5184 }
5185
5186 #ifdef ALTQ
5187 static void
5188 wg_start(struct ifnet *ifp)
5189 {
5190 struct mbuf *m;
5191
5192 for (;;) {
5193 IFQ_DEQUEUE(&ifp->if_snd, m);
5194 if (m == NULL)
5195 break;
5196
5197 kpreempt_disable();
5198 const uint32_t h = curcpu()->ci_index; // pktq_rps_hash(m)
5199 if (__predict_false(!pktq_enqueue(wg_pktq, m, h))) {
5200 WGLOG(LOG_ERR, "%s: pktq full, dropping\n",
5201 if_name(ifp));
5202 m_freem(m);
5203 }
5204 kpreempt_enable();
5205 }
5206 }
5207 #endif
5208
5209 static void
5210 wg_stop(struct ifnet *ifp, int disable)
5211 {
5212
5213 KASSERT((ifp->if_flags & IFF_RUNNING) != 0);
5214 ifp->if_flags &= ~IFF_RUNNING;
5215
5216 /* Need to do something? */
5217 }
5218
5219 #ifdef WG_DEBUG_PARAMS
5220 SYSCTL_SETUP(sysctl_net_wg_setup, "sysctl net.wg setup")
5221 {
5222 const struct sysctlnode *node = NULL;
5223
5224 sysctl_createv(clog, 0, NULL, &node,
5225 CTLFLAG_PERMANENT,
5226 CTLTYPE_NODE, "wg",
5227 SYSCTL_DESCR("wg(4)"),
5228 NULL, 0, NULL, 0,
5229 CTL_NET, CTL_CREATE, CTL_EOL);
5230 sysctl_createv(clog, 0, &node, NULL,
5231 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
5232 CTLTYPE_QUAD, "rekey_after_messages",
5233 SYSCTL_DESCR("session liftime by messages"),
5234 NULL, 0, &wg_rekey_after_messages, 0, CTL_CREATE, CTL_EOL);
5235 sysctl_createv(clog, 0, &node, NULL,
5236 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
5237 CTLTYPE_INT, "rekey_after_time",
5238 SYSCTL_DESCR("session liftime"),
5239 NULL, 0, &wg_rekey_after_time, 0, CTL_CREATE, CTL_EOL);
5240 sysctl_createv(clog, 0, &node, NULL,
5241 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
5242 CTLTYPE_INT, "rekey_timeout",
5243 SYSCTL_DESCR("session handshake retry time"),
5244 NULL, 0, &wg_rekey_timeout, 0, CTL_CREATE, CTL_EOL);
5245 sysctl_createv(clog, 0, &node, NULL,
5246 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
5247 CTLTYPE_INT, "rekey_attempt_time",
5248 SYSCTL_DESCR("session handshake timeout"),
5249 NULL, 0, &wg_rekey_attempt_time, 0, CTL_CREATE, CTL_EOL);
5250 sysctl_createv(clog, 0, &node, NULL,
5251 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
5252 CTLTYPE_INT, "keepalive_timeout",
5253 SYSCTL_DESCR("keepalive timeout"),
5254 NULL, 0, &wg_keepalive_timeout, 0, CTL_CREATE, CTL_EOL);
5255 sysctl_createv(clog, 0, &node, NULL,
5256 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
5257 CTLTYPE_BOOL, "force_underload",
5258 SYSCTL_DESCR("force to detemine under load"),
5259 NULL, 0, &wg_force_underload, 0, CTL_CREATE, CTL_EOL);
5260 sysctl_createv(clog, 0, &node, NULL,
5261 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
5262 CTLTYPE_INT, "debug",
5263 SYSCTL_DESCR("set debug flags 1=log 2=trace 4=dump 8=packet"),
5264 NULL, 0, &wg_debug, 0, CTL_CREATE, CTL_EOL);
5265 }
5266 #endif
5267
5268 #ifdef WG_RUMPKERNEL
5269 static bool
5270 wg_user_mode(struct wg_softc *wg)
5271 {
5272
5273 return wg->wg_user != NULL;
5274 }
5275
5276 static int
5277 wg_ioctl_linkstr(struct wg_softc *wg, struct ifdrv *ifd)
5278 {
5279 struct ifnet *ifp = &wg->wg_if;
5280 int error;
5281
5282 if (ifp->if_flags & IFF_UP)
5283 return EBUSY;
5284
5285 if (ifd->ifd_cmd == IFLINKSTR_UNSET) {
5286 /* XXX do nothing */
5287 return 0;
5288 } else if (ifd->ifd_cmd != 0) {
5289 return EINVAL;
5290 } else if (wg->wg_user != NULL) {
5291 return EBUSY;
5292 }
5293
5294 /* Assume \0 included */
5295 if (ifd->ifd_len > IFNAMSIZ) {
5296 return E2BIG;
5297 } else if (ifd->ifd_len < 1) {
5298 return EINVAL;
5299 }
5300
5301 char tun_name[IFNAMSIZ];
5302 error = copyinstr(ifd->ifd_data, tun_name, ifd->ifd_len, NULL);
5303 if (error != 0)
5304 return error;
5305
5306 if (strncmp(tun_name, "tun", 3) != 0)
5307 return EINVAL;
5308
5309 error = rumpuser_wg_create(tun_name, wg, &wg->wg_user);
5310
5311 return error;
5312 }
5313
5314 static int
5315 wg_send_user(struct wg_peer *wgp, struct mbuf *m)
5316 {
5317 int error;
5318 struct psref psref;
5319 struct wg_sockaddr *wgsa;
5320 struct wg_softc *wg = wgp->wgp_sc;
5321 struct iovec iov[1];
5322
5323 wgsa = wg_get_endpoint_sa(wgp, &psref);
5324
5325 iov[0].iov_base = mtod(m, void *);
5326 iov[0].iov_len = m->m_len;
5327
5328 /* Send messages to a peer via an ordinary socket. */
5329 error = rumpuser_wg_send_peer(wg->wg_user, wgsatosa(wgsa), iov, 1);
5330
5331 wg_put_sa(wgp, wgsa, &psref);
5332
5333 m_freem(m);
5334
5335 return error;
5336 }
5337
5338 static void
5339 wg_input_user(struct ifnet *ifp, struct mbuf *m, const int af)
5340 {
5341 struct wg_softc *wg = ifp->if_softc;
5342 struct iovec iov[2];
5343 struct sockaddr_storage ss;
5344
5345 KASSERT(af == AF_INET || af == AF_INET6);
5346
5347 WG_TRACE("");
5348
5349 if (af == AF_INET) {
5350 struct sockaddr_in *sin = (struct sockaddr_in *)&ss;
5351 struct ip *ip;
5352
5353 KASSERT(m->m_len >= sizeof(struct ip));
5354 ip = mtod(m, struct ip *);
5355 sockaddr_in_init(sin, &ip->ip_dst, 0);
5356 } else {
5357 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&ss;
5358 struct ip6_hdr *ip6;
5359
5360 KASSERT(m->m_len >= sizeof(struct ip6_hdr));
5361 ip6 = mtod(m, struct ip6_hdr *);
5362 sockaddr_in6_init(sin6, &ip6->ip6_dst, 0, 0, 0);
5363 }
5364
5365 iov[0].iov_base = &ss;
5366 iov[0].iov_len = ss.ss_len;
5367 iov[1].iov_base = mtod(m, void *);
5368 iov[1].iov_len = m->m_len;
5369
5370 WG_DUMP_BUF(iov[1].iov_base, iov[1].iov_len);
5371
5372 /* Send decrypted packets to users via a tun. */
5373 rumpuser_wg_send_user(wg->wg_user, iov, 2);
5374
5375 m_freem(m);
5376 }
5377
5378 static int
5379 wg_bind_port_user(struct wg_softc *wg, const uint16_t port)
5380 {
5381 int error;
5382 uint16_t old_port = wg->wg_listen_port;
5383
5384 if (port != 0 && old_port == port)
5385 return 0;
5386
5387 error = rumpuser_wg_sock_bind(wg->wg_user, port);
5388 if (error)
5389 return error;
5390
5391 wg->wg_listen_port = port;
5392 return 0;
5393 }
5394
5395 /*
5396 * Receive user packets.
5397 */
5398 void
5399 rumpkern_wg_recv_user(struct wg_softc *wg, struct iovec *iov, size_t iovlen)
5400 {
5401 struct ifnet *ifp = &wg->wg_if;
5402 struct mbuf *m;
5403 const struct sockaddr *dst;
5404 int error;
5405
5406 WG_TRACE("");
5407
5408 dst = iov[0].iov_base;
5409
5410 m = m_gethdr(M_DONTWAIT, MT_DATA);
5411 if (m == NULL)
5412 return;
5413 m->m_len = m->m_pkthdr.len = 0;
5414 m_copyback(m, 0, iov[1].iov_len, iov[1].iov_base);
5415
5416 WG_DLOG("iov_len=%zu\n", iov[1].iov_len);
5417 WG_DUMP_BUF(iov[1].iov_base, iov[1].iov_len);
5418
5419 error = wg_output(ifp, m, dst, NULL); /* consumes m */
5420 if (error)
5421 WG_DLOG("wg_output failed, error=%d\n", error);
5422 }
5423
5424 /*
5425 * Receive packets from a peer.
5426 */
5427 void
5428 rumpkern_wg_recv_peer(struct wg_softc *wg, struct iovec *iov, size_t iovlen)
5429 {
5430 struct mbuf *m;
5431 const struct sockaddr *src;
5432 int bound;
5433
5434 WG_TRACE("");
5435
5436 src = iov[0].iov_base;
5437
5438 m = m_gethdr(M_DONTWAIT, MT_DATA);
5439 if (m == NULL)
5440 return;
5441 m->m_len = m->m_pkthdr.len = 0;
5442 m_copyback(m, 0, iov[1].iov_len, iov[1].iov_base);
5443
5444 WG_DLOG("iov_len=%zu\n", iov[1].iov_len);
5445 WG_DUMP_BUF(iov[1].iov_base, iov[1].iov_len);
5446
5447 bound = curlwp_bind();
5448 wg_handle_packet(wg, m, src);
5449 curlwp_bindx(bound);
5450 }
5451 #endif /* WG_RUMPKERNEL */
5452
5453 /*
5454 * Module infrastructure
5455 */
5456 #include "if_module.h"
5457
5458 IF_MODULE(MODULE_CLASS_DRIVER, wg, "sodium,blake2s")
5459