if_wg.c revision 1.120 1 /* $NetBSD: if_wg.c,v 1.120 2024/07/29 16:01:13 riastradh Exp $ */
2
3 /*
4 * Copyright (C) Ryota Ozaki <ozaki.ryota (at) gmail.com>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the project nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 /*
33 * This network interface aims to implement the WireGuard protocol.
34 * The implementation is based on the paper of WireGuard as of
35 * 2018-06-30 [1]. The paper is referred in the source code with label
36 * [W]. Also the specification of the Noise protocol framework as of
37 * 2018-07-11 [2] is referred with label [N].
38 *
39 * [1] https://www.wireguard.com/papers/wireguard.pdf
40 * [2] http://noiseprotocol.org/noise.pdf
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: if_wg.c,v 1.120 2024/07/29 16:01:13 riastradh Exp $");
45
46 #ifdef _KERNEL_OPT
47 #include "opt_altq_enabled.h"
48 #include "opt_inet.h"
49 #endif
50
51 #include <sys/param.h>
52 #include <sys/types.h>
53
54 #include <sys/atomic.h>
55 #include <sys/callout.h>
56 #include <sys/cprng.h>
57 #include <sys/cpu.h>
58 #include <sys/device.h>
59 #include <sys/domain.h>
60 #include <sys/errno.h>
61 #include <sys/intr.h>
62 #include <sys/ioctl.h>
63 #include <sys/kernel.h>
64 #include <sys/kmem.h>
65 #include <sys/mbuf.h>
66 #include <sys/module.h>
67 #include <sys/mutex.h>
68 #include <sys/once.h>
69 #include <sys/percpu.h>
70 #include <sys/pserialize.h>
71 #include <sys/psref.h>
72 #include <sys/queue.h>
73 #include <sys/rwlock.h>
74 #include <sys/socket.h>
75 #include <sys/socketvar.h>
76 #include <sys/sockio.h>
77 #include <sys/sysctl.h>
78 #include <sys/syslog.h>
79 #include <sys/systm.h>
80 #include <sys/thmap.h>
81 #include <sys/threadpool.h>
82 #include <sys/time.h>
83 #include <sys/timespec.h>
84 #include <sys/workqueue.h>
85
86 #include <lib/libkern/libkern.h>
87
88 #include <net/bpf.h>
89 #include <net/if.h>
90 #include <net/if_types.h>
91 #include <net/if_wg.h>
92 #include <net/pktqueue.h>
93 #include <net/route.h>
94
95 #ifdef INET
96 #include <netinet/in.h>
97 #include <netinet/in_pcb.h>
98 #include <netinet/in_var.h>
99 #include <netinet/ip.h>
100 #include <netinet/ip_var.h>
101 #include <netinet/udp.h>
102 #include <netinet/udp_var.h>
103 #endif /* INET */
104
105 #ifdef INET6
106 #include <netinet/ip6.h>
107 #include <netinet6/in6_pcb.h>
108 #include <netinet6/in6_var.h>
109 #include <netinet6/ip6_var.h>
110 #include <netinet6/udp6_var.h>
111 #endif /* INET6 */
112
113 #include <prop/proplib.h>
114
115 #include <crypto/blake2/blake2s.h>
116 #include <crypto/sodium/crypto_aead_chacha20poly1305.h>
117 #include <crypto/sodium/crypto_aead_xchacha20poly1305.h>
118 #include <crypto/sodium/crypto_scalarmult.h>
119
120 #include "ioconf.h"
121
122 #ifdef WG_RUMPKERNEL
123 #include "wg_user.h"
124 #endif
125
126 #ifndef time_uptime32
127 #define time_uptime32 ((uint32_t)time_uptime)
128 #endif
129
130 /*
131 * Data structures
132 * - struct wg_softc is an instance of wg interfaces
133 * - It has a list of peers (struct wg_peer)
134 * - It has a threadpool job that sends/receives handshake messages and
135 * runs event handlers
136 * - It has its own two routing tables: one is for IPv4 and the other IPv6
137 * - struct wg_peer is a representative of a peer
138 * - It has a struct work to handle handshakes and timer tasks
139 * - It has a pair of session instances (struct wg_session)
140 * - It has a pair of endpoint instances (struct wg_sockaddr)
141 * - Normally one endpoint is used and the second one is used only on
142 * a peer migration (a change of peer's IP address)
143 * - It has a list of IP addresses and sub networks called allowedips
144 * (struct wg_allowedip)
145 * - A packets sent over a session is allowed if its destination matches
146 * any IP addresses or sub networks of the list
147 * - struct wg_session represents a session of a secure tunnel with a peer
148 * - Two instances of sessions belong to a peer; a stable session and a
149 * unstable session
150 * - A handshake process of a session always starts with a unstable instance
151 * - Once a session is established, its instance becomes stable and the
152 * other becomes unstable instead
153 * - Data messages are always sent via a stable session
154 *
155 * Locking notes:
156 * - Each wg has a mutex(9) wg_lock, and a rwlock(9) wg_rwlock
157 * - Changes to the peer list are serialized by wg_lock
158 * - The peer list may be read with pserialize(9) and psref(9)
159 * - The rwlock (wg_rwlock) protects the routing tables (wg_rtable_ipv[46])
160 * => XXX replace by pserialize when routing table is psz-safe
161 * - Each peer (struct wg_peer, wgp) has a mutex wgp_lock, which can be taken
162 * only in thread context and serializes:
163 * - the stable and unstable session pointers
164 * - all unstable session state
165 * - Packet processing may be done in softint context:
166 * - The stable session can be read under pserialize(9) or psref(9)
167 * - The stable session is always ESTABLISHED
168 * - On a session swap, we must wait for all readers to release a
169 * reference to a stable session before changing wgs_state and
170 * session states
171 * - Lock order: wg_lock -> wgp_lock
172 */
173
174
175 #define WGLOG(level, fmt, args...) \
176 log(level, "%s: " fmt, __func__, ##args)
177
178 #define WG_DEBUG
179
180 /* Debug options */
181 #ifdef WG_DEBUG
182 /* Output debug logs */
183 #ifndef WG_DEBUG_LOG
184 #define WG_DEBUG_LOG
185 #endif
186 /* Output trace logs */
187 #ifndef WG_DEBUG_TRACE
188 #define WG_DEBUG_TRACE
189 #endif
190 /* Output hash values, etc. */
191 #ifndef WG_DEBUG_DUMP
192 #define WG_DEBUG_DUMP
193 #endif
194 /* Make some internal parameters configurable for testing and debugging */
195 #ifndef WG_DEBUG_PARAMS
196 #define WG_DEBUG_PARAMS
197 #endif
198 #endif /* WG_DEBUG */
199
200 #ifndef WG_DEBUG
201 # if defined(WG_DEBUG_LOG) || defined(WG_DEBUG_TRACE) || \
202 defined(WG_DEBUG_DUMP) || defined(WG_DEBUG_PARAMS)
203 # define WG_DEBUG
204 # endif
205 #endif
206
207 #ifdef WG_DEBUG
208 int wg_debug;
209 #define WG_DEBUG_FLAGS_LOG 1
210 #define WG_DEBUG_FLAGS_TRACE 2
211 #define WG_DEBUG_FLAGS_DUMP 4
212 #endif
213
214 #ifdef WG_DEBUG_TRACE
215 #define WG_TRACE(msg) do { \
216 if (wg_debug & WG_DEBUG_FLAGS_TRACE) \
217 log(LOG_DEBUG, "%s:%d: %s\n", __func__, __LINE__, (msg)); \
218 } while (0)
219 #else
220 #define WG_TRACE(msg) __nothing
221 #endif
222
223 #ifdef WG_DEBUG_LOG
224 #define WG_DLOG(fmt, args...) do { \
225 if (wg_debug & WG_DEBUG_FLAGS_LOG) \
226 log(LOG_DEBUG, "%s: " fmt, __func__, ##args); \
227 } while (0)
228 #else
229 #define WG_DLOG(fmt, args...) __nothing
230 #endif
231
232 #define WG_LOG_RATECHECK(wgprc, level, fmt, args...) do { \
233 if (ppsratecheck(&(wgprc)->wgprc_lasttime, \
234 &(wgprc)->wgprc_curpps, 1)) { \
235 log(level, fmt, ##args); \
236 } \
237 } while (0)
238
239 #ifdef WG_DEBUG_PARAMS
240 static bool wg_force_underload = false;
241 #endif
242
243 #ifdef WG_DEBUG_DUMP
244
245 static char enomem[10] = "[enomem]";
246
247 #define MAX_HDUMP_LEN 10000 /* large enough */
248
249 /*
250 * gethexdump(p, n)
251 *
252 * Allocate a string returning a hexdump of bytes p[0..n),
253 * truncated to MAX_HDUMP_LEN. Must be freed with puthexdump.
254 *
255 * We use this instead of libkern hexdump() because the result is
256 * logged with log(LOG_DEBUG, ...), which puts a priority tag on
257 * every message, so it can't be done incrementally.
258 */
259 static char *
260 gethexdump(const void *vp, size_t n)
261 {
262 char *buf;
263 const uint8_t *p = vp;
264 size_t i, alloc;
265
266 alloc = n;
267 if (n > MAX_HDUMP_LEN)
268 alloc = MAX_HDUMP_LEN;
269 buf = kmem_alloc(3*alloc + 5, KM_NOSLEEP);
270 if (buf == NULL)
271 return enomem;
272 for (i = 0; i < alloc; i++)
273 snprintf(buf + 3*i, 3 + 1, " %02hhx", p[i]);
274 if (alloc != n)
275 snprintf(buf + 3*i, 4 + 1, " ...");
276 return buf;
277 }
278
279 static void
280 puthexdump(char *buf, const void *p, size_t n)
281 {
282
283 if (buf == NULL || buf == enomem)
284 return;
285 if (n > MAX_HDUMP_LEN)
286 n = MAX_HDUMP_LEN;
287 kmem_free(buf, 3*n + 5);
288 }
289
290 #ifdef WG_RUMPKERNEL
291 static void
292 wg_dump_buf(const char *func, const char *buf, const size_t size)
293 {
294 if ((wg_debug & WG_DEBUG_FLAGS_DUMP) == 0)
295 return;
296
297 char *hex = gethexdump(buf, size);
298
299 log(LOG_DEBUG, "%s: %s\n", func, hex);
300 puthexdump(hex, buf, size);
301 }
302 #endif
303
304 static void
305 wg_dump_hash(const uint8_t *func, const uint8_t *name, const uint8_t *hash,
306 const size_t size)
307 {
308 if ((wg_debug & WG_DEBUG_FLAGS_DUMP) == 0)
309 return;
310
311 char *hex = gethexdump(hash, size);
312
313 log(LOG_DEBUG, "%s: %s: %s\n", func, name, hex);
314 puthexdump(hex, hash, size);
315 }
316
317 #define WG_DUMP_HASH(name, hash) \
318 wg_dump_hash(__func__, name, hash, WG_HASH_LEN)
319 #define WG_DUMP_HASH48(name, hash) \
320 wg_dump_hash(__func__, name, hash, 48)
321 #define WG_DUMP_BUF(buf, size) \
322 wg_dump_buf(__func__, buf, size)
323 #else
324 #define WG_DUMP_HASH(name, hash) __nothing
325 #define WG_DUMP_HASH48(name, hash) __nothing
326 #define WG_DUMP_BUF(buf, size) __nothing
327 #endif /* WG_DEBUG_DUMP */
328
329 /* chosen somewhat arbitrarily -- fits in signed 16 bits NUL-terminated */
330 #define WG_MAX_PROPLEN 32766
331
332 #define WG_MTU 1420
333 #define WG_ALLOWEDIPS 16
334
335 #define CURVE25519_KEY_LEN 32
336 #define TAI64N_LEN (sizeof(uint32_t) * 3)
337 #define POLY1305_AUTHTAG_LEN 16
338 #define HMAC_BLOCK_LEN 64
339
340 /* [N] 4.1: "DHLEN must be 32 or greater." WireGuard chooses 32. */
341 /* [N] 4.3: Hash functions */
342 #define NOISE_DHLEN 32
343 /* [N] 4.3: "Must be 32 or 64." WireGuard chooses 32. */
344 #define NOISE_HASHLEN 32
345 #define NOISE_BLOCKLEN 64
346 #define NOISE_HKDF_OUTPUT_LEN NOISE_HASHLEN
347 /* [N] 5.1: "k" */
348 #define NOISE_CIPHER_KEY_LEN 32
349 /*
350 * [N] 9.2: "psk"
351 * "... psk is a 32-byte secret value provided by the application."
352 */
353 #define NOISE_PRESHARED_KEY_LEN 32
354
355 #define WG_STATIC_KEY_LEN CURVE25519_KEY_LEN
356 #define WG_TIMESTAMP_LEN TAI64N_LEN
357
358 #define WG_PRESHARED_KEY_LEN NOISE_PRESHARED_KEY_LEN
359
360 #define WG_COOKIE_LEN 16
361 #define WG_MAC_LEN 16
362 #define WG_COOKIESECRET_LEN 32
363
364 #define WG_EPHEMERAL_KEY_LEN CURVE25519_KEY_LEN
365 /* [N] 5.2: "ck: A chaining key of HASHLEN bytes" */
366 #define WG_CHAINING_KEY_LEN NOISE_HASHLEN
367 /* [N] 5.2: "h: A hash output of HASHLEN bytes" */
368 #define WG_HASH_LEN NOISE_HASHLEN
369 #define WG_CIPHER_KEY_LEN NOISE_CIPHER_KEY_LEN
370 #define WG_DH_OUTPUT_LEN NOISE_DHLEN
371 #define WG_KDF_OUTPUT_LEN NOISE_HKDF_OUTPUT_LEN
372 #define WG_AUTHTAG_LEN POLY1305_AUTHTAG_LEN
373 #define WG_DATA_KEY_LEN 32
374 #define WG_SALT_LEN 24
375
376 /*
377 * The protocol messages
378 */
379 struct wg_msg {
380 uint32_t wgm_type;
381 } __packed;
382
383 /* [W] 5.4.2 First Message: Initiator to Responder */
384 struct wg_msg_init {
385 uint32_t wgmi_type;
386 uint32_t wgmi_sender;
387 uint8_t wgmi_ephemeral[WG_EPHEMERAL_KEY_LEN];
388 uint8_t wgmi_static[WG_STATIC_KEY_LEN + WG_AUTHTAG_LEN];
389 uint8_t wgmi_timestamp[WG_TIMESTAMP_LEN + WG_AUTHTAG_LEN];
390 uint8_t wgmi_mac1[WG_MAC_LEN];
391 uint8_t wgmi_mac2[WG_MAC_LEN];
392 } __packed;
393
394 /* [W] 5.4.3 Second Message: Responder to Initiator */
395 struct wg_msg_resp {
396 uint32_t wgmr_type;
397 uint32_t wgmr_sender;
398 uint32_t wgmr_receiver;
399 uint8_t wgmr_ephemeral[WG_EPHEMERAL_KEY_LEN];
400 uint8_t wgmr_empty[0 + WG_AUTHTAG_LEN];
401 uint8_t wgmr_mac1[WG_MAC_LEN];
402 uint8_t wgmr_mac2[WG_MAC_LEN];
403 } __packed;
404
405 /* [W] 5.4.6 Subsequent Messages: Transport Data Messages */
406 struct wg_msg_data {
407 uint32_t wgmd_type;
408 uint32_t wgmd_receiver;
409 uint64_t wgmd_counter;
410 uint32_t wgmd_packet[];
411 } __packed;
412
413 /* [W] 5.4.7 Under Load: Cookie Reply Message */
414 struct wg_msg_cookie {
415 uint32_t wgmc_type;
416 uint32_t wgmc_receiver;
417 uint8_t wgmc_salt[WG_SALT_LEN];
418 uint8_t wgmc_cookie[WG_COOKIE_LEN + WG_AUTHTAG_LEN];
419 } __packed;
420
421 #define WG_MSG_TYPE_INIT 1
422 #define WG_MSG_TYPE_RESP 2
423 #define WG_MSG_TYPE_COOKIE 3
424 #define WG_MSG_TYPE_DATA 4
425 #define WG_MSG_TYPE_MAX WG_MSG_TYPE_DATA
426
427 /* Sliding windows */
428
429 #define SLIWIN_BITS 2048u
430 #define SLIWIN_TYPE uint32_t
431 #define SLIWIN_BPW (NBBY*sizeof(SLIWIN_TYPE))
432 #define SLIWIN_WORDS howmany(SLIWIN_BITS, SLIWIN_BPW)
433 #define SLIWIN_NPKT (SLIWIN_BITS - NBBY*sizeof(SLIWIN_TYPE))
434
435 struct sliwin {
436 SLIWIN_TYPE B[SLIWIN_WORDS];
437 uint64_t T;
438 };
439
440 static void
441 sliwin_reset(struct sliwin *W)
442 {
443
444 memset(W, 0, sizeof(*W));
445 }
446
447 static int
448 sliwin_check_fast(const volatile struct sliwin *W, uint64_t S)
449 {
450
451 /*
452 * If it's more than one window older than the highest sequence
453 * number we've seen, reject.
454 */
455 #ifdef __HAVE_ATOMIC64_LOADSTORE
456 if (S + SLIWIN_NPKT < atomic_load_relaxed(&W->T))
457 return EAUTH;
458 #endif
459
460 /*
461 * Otherwise, we need to take the lock to decide, so don't
462 * reject just yet. Caller must serialize a call to
463 * sliwin_update in this case.
464 */
465 return 0;
466 }
467
468 static int
469 sliwin_update(struct sliwin *W, uint64_t S)
470 {
471 unsigned word, bit;
472
473 /*
474 * If it's more than one window older than the highest sequence
475 * number we've seen, reject.
476 */
477 if (S + SLIWIN_NPKT < W->T)
478 return EAUTH;
479
480 /*
481 * If it's higher than the highest sequence number we've seen,
482 * advance the window.
483 */
484 if (S > W->T) {
485 uint64_t i = W->T / SLIWIN_BPW;
486 uint64_t j = S / SLIWIN_BPW;
487 unsigned k;
488
489 for (k = 0; k < MIN(j - i, SLIWIN_WORDS); k++)
490 W->B[(i + k + 1) % SLIWIN_WORDS] = 0;
491 #ifdef __HAVE_ATOMIC64_LOADSTORE
492 atomic_store_relaxed(&W->T, S);
493 #else
494 W->T = S;
495 #endif
496 }
497
498 /* Test and set the bit -- if already set, reject. */
499 word = (S / SLIWIN_BPW) % SLIWIN_WORDS;
500 bit = S % SLIWIN_BPW;
501 if (W->B[word] & (1UL << bit))
502 return EAUTH;
503 W->B[word] |= 1U << bit;
504
505 /* Accept! */
506 return 0;
507 }
508
509 struct wg_session {
510 struct wg_peer *wgs_peer;
511 struct psref_target
512 wgs_psref;
513
514 int wgs_state;
515 #define WGS_STATE_UNKNOWN 0
516 #define WGS_STATE_INIT_ACTIVE 1
517 #define WGS_STATE_INIT_PASSIVE 2
518 #define WGS_STATE_ESTABLISHED 3
519 #define WGS_STATE_DESTROYING 4
520
521 uint32_t wgs_time_established;
522 volatile uint32_t
523 wgs_time_last_data_sent;
524 volatile bool wgs_force_rekey;
525 bool wgs_is_initiator;
526
527 uint32_t wgs_local_index;
528 uint32_t wgs_remote_index;
529 #ifdef __HAVE_ATOMIC64_LOADSTORE
530 volatile uint64_t
531 wgs_send_counter;
532 #else
533 kmutex_t wgs_send_counter_lock;
534 uint64_t wgs_send_counter;
535 #endif
536
537 struct {
538 kmutex_t lock;
539 struct sliwin window;
540 } *wgs_recvwin;
541
542 uint8_t wgs_handshake_hash[WG_HASH_LEN];
543 uint8_t wgs_chaining_key[WG_CHAINING_KEY_LEN];
544 uint8_t wgs_ephemeral_key_pub[WG_EPHEMERAL_KEY_LEN];
545 uint8_t wgs_ephemeral_key_priv[WG_EPHEMERAL_KEY_LEN];
546 uint8_t wgs_ephemeral_key_peer[WG_EPHEMERAL_KEY_LEN];
547 uint8_t wgs_tkey_send[WG_DATA_KEY_LEN];
548 uint8_t wgs_tkey_recv[WG_DATA_KEY_LEN];
549 };
550
551 struct wg_sockaddr {
552 union {
553 struct sockaddr_storage _ss;
554 struct sockaddr _sa;
555 struct sockaddr_in _sin;
556 struct sockaddr_in6 _sin6;
557 };
558 struct psref_target wgsa_psref;
559 };
560
561 #define wgsatoss(wgsa) (&(wgsa)->_ss)
562 #define wgsatosa(wgsa) (&(wgsa)->_sa)
563 #define wgsatosin(wgsa) (&(wgsa)->_sin)
564 #define wgsatosin6(wgsa) (&(wgsa)->_sin6)
565
566 #define wgsa_family(wgsa) (wgsatosa(wgsa)->sa_family)
567
568 struct wg_peer;
569 struct wg_allowedip {
570 struct radix_node wga_nodes[2];
571 struct wg_sockaddr _wga_sa_addr;
572 struct wg_sockaddr _wga_sa_mask;
573 #define wga_sa_addr _wga_sa_addr._sa
574 #define wga_sa_mask _wga_sa_mask._sa
575
576 int wga_family;
577 uint8_t wga_cidr;
578 union {
579 struct in_addr _ip4;
580 struct in6_addr _ip6;
581 } wga_addr;
582 #define wga_addr4 wga_addr._ip4
583 #define wga_addr6 wga_addr._ip6
584
585 struct wg_peer *wga_peer;
586 };
587
588 typedef uint8_t wg_timestamp_t[WG_TIMESTAMP_LEN];
589
590 struct wg_ppsratecheck {
591 struct timeval wgprc_lasttime;
592 int wgprc_curpps;
593 };
594
595 struct wg_softc;
596 struct wg_peer {
597 struct wg_softc *wgp_sc;
598 char wgp_name[WG_PEER_NAME_MAXLEN + 1];
599 struct pslist_entry wgp_peerlist_entry;
600 pserialize_t wgp_psz;
601 struct psref_target wgp_psref;
602 kmutex_t *wgp_lock;
603 kmutex_t *wgp_intr_lock;
604
605 uint8_t wgp_pubkey[WG_STATIC_KEY_LEN];
606 struct wg_sockaddr *wgp_endpoint;
607 struct wg_sockaddr *wgp_endpoint0;
608 volatile unsigned wgp_endpoint_changing;
609 bool wgp_endpoint_available;
610
611 /* The preshared key (optional) */
612 uint8_t wgp_psk[WG_PRESHARED_KEY_LEN];
613
614 struct wg_session *wgp_session_stable;
615 struct wg_session *wgp_session_unstable;
616
617 /* first outgoing packet awaiting session initiation */
618 struct mbuf *volatile wgp_pending;
619
620 /* timestamp in big-endian */
621 wg_timestamp_t wgp_timestamp_latest_init;
622
623 struct timespec wgp_last_handshake_time;
624
625 callout_t wgp_handshake_timeout_timer;
626 callout_t wgp_session_dtor_timer;
627
628 time_t wgp_handshake_start_time;
629
630 int wgp_n_allowedips;
631 struct wg_allowedip wgp_allowedips[WG_ALLOWEDIPS];
632
633 time_t wgp_latest_cookie_time;
634 uint8_t wgp_latest_cookie[WG_COOKIE_LEN];
635 uint8_t wgp_last_sent_mac1[WG_MAC_LEN];
636 bool wgp_last_sent_mac1_valid;
637 uint8_t wgp_last_sent_cookie[WG_COOKIE_LEN];
638 bool wgp_last_sent_cookie_valid;
639
640 time_t wgp_last_msg_received_time[WG_MSG_TYPE_MAX];
641
642 time_t wgp_last_cookiesecret_time;
643 uint8_t wgp_cookiesecret[WG_COOKIESECRET_LEN];
644
645 struct wg_ppsratecheck wgp_ppsratecheck;
646
647 struct work wgp_work;
648 unsigned int wgp_tasks;
649 #define WGP_TASK_SEND_INIT_MESSAGE __BIT(0)
650 #define WGP_TASK_RETRY_HANDSHAKE __BIT(1)
651 #define WGP_TASK_ESTABLISH_SESSION __BIT(2)
652 #define WGP_TASK_ENDPOINT_CHANGED __BIT(3)
653 #define WGP_TASK_SEND_KEEPALIVE_MESSAGE __BIT(4)
654 #define WGP_TASK_DESTROY_PREV_SESSION __BIT(5)
655 };
656
657 struct wg_ops;
658
659 struct wg_softc {
660 struct ifnet wg_if;
661 LIST_ENTRY(wg_softc) wg_list;
662 kmutex_t *wg_lock;
663 kmutex_t *wg_intr_lock;
664 krwlock_t *wg_rwlock;
665
666 uint8_t wg_privkey[WG_STATIC_KEY_LEN];
667 uint8_t wg_pubkey[WG_STATIC_KEY_LEN];
668
669 int wg_npeers;
670 struct pslist_head wg_peers;
671 struct thmap *wg_peers_bypubkey;
672 struct thmap *wg_peers_byname;
673 struct thmap *wg_sessions_byindex;
674 uint16_t wg_listen_port;
675
676 struct threadpool *wg_threadpool;
677
678 struct threadpool_job wg_job;
679 int wg_upcalls;
680 #define WG_UPCALL_INET __BIT(0)
681 #define WG_UPCALL_INET6 __BIT(1)
682
683 #ifdef INET
684 struct socket *wg_so4;
685 struct radix_node_head *wg_rtable_ipv4;
686 #endif
687 #ifdef INET6
688 struct socket *wg_so6;
689 struct radix_node_head *wg_rtable_ipv6;
690 #endif
691
692 struct wg_ppsratecheck wg_ppsratecheck;
693
694 struct wg_ops *wg_ops;
695
696 #ifdef WG_RUMPKERNEL
697 struct wg_user *wg_user;
698 #endif
699 };
700
701 /* [W] 6.1 Preliminaries */
702 #define WG_REKEY_AFTER_MESSAGES (1ULL << 60)
703 #define WG_REJECT_AFTER_MESSAGES (UINT64_MAX - (1 << 13))
704 #define WG_REKEY_AFTER_TIME 120
705 #define WG_REJECT_AFTER_TIME 180
706 #define WG_REKEY_ATTEMPT_TIME 90
707 #define WG_REKEY_TIMEOUT 5
708 #define WG_KEEPALIVE_TIMEOUT 10
709
710 #define WG_COOKIE_TIME 120
711 #define WG_COOKIESECRET_TIME (2 * 60)
712
713 static uint64_t wg_rekey_after_messages = WG_REKEY_AFTER_MESSAGES;
714 static uint64_t wg_reject_after_messages = WG_REJECT_AFTER_MESSAGES;
715 static unsigned wg_rekey_after_time = WG_REKEY_AFTER_TIME;
716 static unsigned wg_reject_after_time = WG_REJECT_AFTER_TIME;
717 static unsigned wg_rekey_attempt_time = WG_REKEY_ATTEMPT_TIME;
718 static unsigned wg_rekey_timeout = WG_REKEY_TIMEOUT;
719 static unsigned wg_keepalive_timeout = WG_KEEPALIVE_TIMEOUT;
720
721 static struct mbuf *
722 wg_get_mbuf(size_t, size_t);
723
724 static void wg_send_data_msg(struct wg_peer *, struct wg_session *,
725 struct mbuf *);
726 static void wg_send_cookie_msg(struct wg_softc *, struct wg_peer *,
727 const uint32_t, const uint8_t[static WG_MAC_LEN],
728 const struct sockaddr *);
729 static void wg_send_handshake_msg_resp(struct wg_softc *, struct wg_peer *,
730 struct wg_session *, const struct wg_msg_init *);
731 static void wg_send_keepalive_msg(struct wg_peer *, struct wg_session *);
732
733 static struct wg_peer *
734 wg_pick_peer_by_sa(struct wg_softc *, const struct sockaddr *,
735 struct psref *);
736 static struct wg_peer *
737 wg_lookup_peer_by_pubkey(struct wg_softc *,
738 const uint8_t[static WG_STATIC_KEY_LEN], struct psref *);
739
740 static struct wg_session *
741 wg_lookup_session_by_index(struct wg_softc *,
742 const uint32_t, struct psref *);
743
744 static void wg_update_endpoint_if_necessary(struct wg_peer *,
745 const struct sockaddr *);
746
747 static void wg_schedule_session_dtor_timer(struct wg_peer *);
748
749 static bool wg_is_underload(struct wg_softc *, struct wg_peer *, int);
750 static void wg_calculate_keys(struct wg_session *, const bool);
751
752 static void wg_clear_states(struct wg_session *);
753
754 static void wg_get_peer(struct wg_peer *, struct psref *);
755 static void wg_put_peer(struct wg_peer *, struct psref *);
756
757 static int wg_send_so(struct wg_peer *, struct mbuf *);
758 static int wg_send_udp(struct wg_peer *, struct mbuf *);
759 static int wg_output(struct ifnet *, struct mbuf *,
760 const struct sockaddr *, const struct rtentry *);
761 static void wg_input(struct ifnet *, struct mbuf *, const int);
762 static int wg_ioctl(struct ifnet *, u_long, void *);
763 static int wg_bind_port(struct wg_softc *, const uint16_t);
764 static int wg_init(struct ifnet *);
765 #ifdef ALTQ
766 static void wg_start(struct ifnet *);
767 #endif
768 static void wg_stop(struct ifnet *, int);
769
770 static void wg_peer_work(struct work *, void *);
771 static void wg_job(struct threadpool_job *);
772 static void wgintr(void *);
773 static void wg_purge_pending_packets(struct wg_peer *);
774
775 static int wg_clone_create(struct if_clone *, int);
776 static int wg_clone_destroy(struct ifnet *);
777
778 struct wg_ops {
779 int (*send_hs_msg)(struct wg_peer *, struct mbuf *);
780 int (*send_data_msg)(struct wg_peer *, struct mbuf *);
781 void (*input)(struct ifnet *, struct mbuf *, const int);
782 int (*bind_port)(struct wg_softc *, const uint16_t);
783 };
784
785 struct wg_ops wg_ops_rumpkernel = {
786 .send_hs_msg = wg_send_so,
787 .send_data_msg = wg_send_udp,
788 .input = wg_input,
789 .bind_port = wg_bind_port,
790 };
791
792 #ifdef WG_RUMPKERNEL
793 static bool wg_user_mode(struct wg_softc *);
794 static int wg_ioctl_linkstr(struct wg_softc *, struct ifdrv *);
795
796 static int wg_send_user(struct wg_peer *, struct mbuf *);
797 static void wg_input_user(struct ifnet *, struct mbuf *, const int);
798 static int wg_bind_port_user(struct wg_softc *, const uint16_t);
799
800 struct wg_ops wg_ops_rumpuser = {
801 .send_hs_msg = wg_send_user,
802 .send_data_msg = wg_send_user,
803 .input = wg_input_user,
804 .bind_port = wg_bind_port_user,
805 };
806 #endif
807
808 #define WG_PEER_READER_FOREACH(wgp, wg) \
809 PSLIST_READER_FOREACH((wgp), &(wg)->wg_peers, struct wg_peer, \
810 wgp_peerlist_entry)
811 #define WG_PEER_WRITER_FOREACH(wgp, wg) \
812 PSLIST_WRITER_FOREACH((wgp), &(wg)->wg_peers, struct wg_peer, \
813 wgp_peerlist_entry)
814 #define WG_PEER_WRITER_INSERT_HEAD(wgp, wg) \
815 PSLIST_WRITER_INSERT_HEAD(&(wg)->wg_peers, (wgp), wgp_peerlist_entry)
816 #define WG_PEER_WRITER_REMOVE(wgp) \
817 PSLIST_WRITER_REMOVE((wgp), wgp_peerlist_entry)
818
819 struct wg_route {
820 struct radix_node wgr_nodes[2];
821 struct wg_peer *wgr_peer;
822 };
823
824 static struct radix_node_head *
825 wg_rnh(struct wg_softc *wg, const int family)
826 {
827
828 switch (family) {
829 #ifdef INET
830 case AF_INET:
831 return wg->wg_rtable_ipv4;
832 #endif
833 #ifdef INET6
834 case AF_INET6:
835 return wg->wg_rtable_ipv6;
836 #endif
837 default:
838 return NULL;
839 }
840 }
841
842
843 /*
844 * Global variables
845 */
846 static volatile unsigned wg_count __cacheline_aligned;
847
848 struct psref_class *wg_psref_class __read_mostly;
849
850 static struct if_clone wg_cloner =
851 IF_CLONE_INITIALIZER("wg", wg_clone_create, wg_clone_destroy);
852
853 static struct pktqueue *wg_pktq __read_mostly;
854 static struct workqueue *wg_wq __read_mostly;
855
856 void wgattach(int);
857 /* ARGSUSED */
858 void
859 wgattach(int count)
860 {
861 /*
862 * Nothing to do here, initialization is handled by the
863 * module initialization code in wginit() below).
864 */
865 }
866
867 static void
868 wginit(void)
869 {
870
871 wg_psref_class = psref_class_create("wg", IPL_SOFTNET);
872
873 if_clone_attach(&wg_cloner);
874 }
875
876 /*
877 * XXX Kludge: This should just happen in wginit, but workqueue_create
878 * cannot be run until after CPUs have been detected, and wginit runs
879 * before configure.
880 */
881 static int
882 wginitqueues(void)
883 {
884 int error __diagused;
885
886 wg_pktq = pktq_create(IFQ_MAXLEN, wgintr, NULL);
887 KASSERT(wg_pktq != NULL);
888
889 error = workqueue_create(&wg_wq, "wgpeer", wg_peer_work, NULL,
890 PRI_NONE, IPL_SOFTNET, WQ_MPSAFE|WQ_PERCPU);
891 KASSERTMSG(error == 0, "error=%d", error);
892
893 return 0;
894 }
895
896 static void
897 wg_guarantee_initialized(void)
898 {
899 static ONCE_DECL(init);
900 int error __diagused;
901
902 error = RUN_ONCE(&init, wginitqueues);
903 KASSERTMSG(error == 0, "error=%d", error);
904 }
905
906 static int
907 wg_count_inc(void)
908 {
909 unsigned o, n;
910
911 do {
912 o = atomic_load_relaxed(&wg_count);
913 if (o == UINT_MAX)
914 return ENFILE;
915 n = o + 1;
916 } while (atomic_cas_uint(&wg_count, o, n) != o);
917
918 return 0;
919 }
920
921 static void
922 wg_count_dec(void)
923 {
924 unsigned c __diagused;
925
926 membar_release(); /* match atomic_load_acquire in wgdetach */
927 c = atomic_dec_uint_nv(&wg_count);
928 KASSERT(c != UINT_MAX);
929 }
930
931 static int
932 wgdetach(void)
933 {
934
935 /* Prevent new interface creation. */
936 if_clone_detach(&wg_cloner);
937
938 /*
939 * Check whether there are any existing interfaces. Matches
940 * membar_release and atomic_dec_uint_nv in wg_count_dec.
941 */
942 if (atomic_load_acquire(&wg_count)) {
943 /* Back out -- reattach the cloner. */
944 if_clone_attach(&wg_cloner);
945 return EBUSY;
946 }
947
948 /* No interfaces left. Nuke it. */
949 if (wg_wq)
950 workqueue_destroy(wg_wq);
951 if (wg_pktq)
952 pktq_destroy(wg_pktq);
953 psref_class_destroy(wg_psref_class);
954
955 return 0;
956 }
957
958 static void
959 wg_init_key_and_hash(uint8_t ckey[static WG_CHAINING_KEY_LEN],
960 uint8_t hash[static WG_HASH_LEN])
961 {
962 /* [W] 5.4: CONSTRUCTION */
963 const char *signature = "Noise_IKpsk2_25519_ChaChaPoly_BLAKE2s";
964 /* [W] 5.4: IDENTIFIER */
965 const char *id = "WireGuard v1 zx2c4 Jason (at) zx2c4.com";
966 struct blake2s state;
967
968 blake2s(ckey, WG_CHAINING_KEY_LEN, NULL, 0,
969 signature, strlen(signature));
970
971 CTASSERT(WG_HASH_LEN == WG_CHAINING_KEY_LEN);
972 memcpy(hash, ckey, WG_CHAINING_KEY_LEN);
973
974 blake2s_init(&state, WG_HASH_LEN, NULL, 0);
975 blake2s_update(&state, ckey, WG_CHAINING_KEY_LEN);
976 blake2s_update(&state, id, strlen(id));
977 blake2s_final(&state, hash);
978
979 WG_DUMP_HASH("ckey", ckey);
980 WG_DUMP_HASH("hash", hash);
981 }
982
983 static void
984 wg_algo_hash(uint8_t hash[static WG_HASH_LEN], const uint8_t input[],
985 const size_t inputsize)
986 {
987 struct blake2s state;
988
989 blake2s_init(&state, WG_HASH_LEN, NULL, 0);
990 blake2s_update(&state, hash, WG_HASH_LEN);
991 blake2s_update(&state, input, inputsize);
992 blake2s_final(&state, hash);
993 }
994
995 static void
996 wg_algo_mac(uint8_t out[], const size_t outsize,
997 const uint8_t key[], const size_t keylen,
998 const uint8_t input1[], const size_t input1len,
999 const uint8_t input2[], const size_t input2len)
1000 {
1001 struct blake2s state;
1002
1003 blake2s_init(&state, outsize, key, keylen);
1004
1005 blake2s_update(&state, input1, input1len);
1006 if (input2 != NULL)
1007 blake2s_update(&state, input2, input2len);
1008 blake2s_final(&state, out);
1009 }
1010
1011 static void
1012 wg_algo_mac_mac1(uint8_t out[], const size_t outsize,
1013 const uint8_t input1[], const size_t input1len,
1014 const uint8_t input2[], const size_t input2len)
1015 {
1016 struct blake2s state;
1017 /* [W] 5.4: LABEL-MAC1 */
1018 const char *label = "mac1----";
1019 uint8_t key[WG_HASH_LEN];
1020
1021 blake2s_init(&state, sizeof(key), NULL, 0);
1022 blake2s_update(&state, label, strlen(label));
1023 blake2s_update(&state, input1, input1len);
1024 blake2s_final(&state, key);
1025
1026 blake2s_init(&state, outsize, key, sizeof(key));
1027 if (input2 != NULL)
1028 blake2s_update(&state, input2, input2len);
1029 blake2s_final(&state, out);
1030 }
1031
1032 static void
1033 wg_algo_mac_cookie(uint8_t out[], const size_t outsize,
1034 const uint8_t input1[], const size_t input1len)
1035 {
1036 struct blake2s state;
1037 /* [W] 5.4: LABEL-COOKIE */
1038 const char *label = "cookie--";
1039
1040 blake2s_init(&state, outsize, NULL, 0);
1041 blake2s_update(&state, label, strlen(label));
1042 blake2s_update(&state, input1, input1len);
1043 blake2s_final(&state, out);
1044 }
1045
1046 static void
1047 wg_algo_generate_keypair(uint8_t pubkey[static WG_EPHEMERAL_KEY_LEN],
1048 uint8_t privkey[static WG_EPHEMERAL_KEY_LEN])
1049 {
1050
1051 CTASSERT(WG_EPHEMERAL_KEY_LEN == crypto_scalarmult_curve25519_BYTES);
1052
1053 cprng_strong(kern_cprng, privkey, WG_EPHEMERAL_KEY_LEN, 0);
1054 crypto_scalarmult_base(pubkey, privkey);
1055 }
1056
1057 static void
1058 wg_algo_dh(uint8_t out[static WG_DH_OUTPUT_LEN],
1059 const uint8_t privkey[static WG_STATIC_KEY_LEN],
1060 const uint8_t pubkey[static WG_STATIC_KEY_LEN])
1061 {
1062
1063 CTASSERT(WG_STATIC_KEY_LEN == crypto_scalarmult_curve25519_BYTES);
1064
1065 int ret __diagused = crypto_scalarmult(out, privkey, pubkey);
1066 KASSERT(ret == 0);
1067 }
1068
1069 static void
1070 wg_algo_hmac(uint8_t out[], const size_t outlen,
1071 const uint8_t key[], const size_t keylen,
1072 const uint8_t in[], const size_t inlen)
1073 {
1074 #define IPAD 0x36
1075 #define OPAD 0x5c
1076 uint8_t hmackey[HMAC_BLOCK_LEN] = {0};
1077 uint8_t ipad[HMAC_BLOCK_LEN];
1078 uint8_t opad[HMAC_BLOCK_LEN];
1079 size_t i;
1080 struct blake2s state;
1081
1082 KASSERT(outlen == WG_HASH_LEN);
1083 KASSERT(keylen <= HMAC_BLOCK_LEN);
1084
1085 memcpy(hmackey, key, keylen);
1086
1087 for (i = 0; i < sizeof(hmackey); i++) {
1088 ipad[i] = hmackey[i] ^ IPAD;
1089 opad[i] = hmackey[i] ^ OPAD;
1090 }
1091
1092 blake2s_init(&state, WG_HASH_LEN, NULL, 0);
1093 blake2s_update(&state, ipad, sizeof(ipad));
1094 blake2s_update(&state, in, inlen);
1095 blake2s_final(&state, out);
1096
1097 blake2s_init(&state, WG_HASH_LEN, NULL, 0);
1098 blake2s_update(&state, opad, sizeof(opad));
1099 blake2s_update(&state, out, WG_HASH_LEN);
1100 blake2s_final(&state, out);
1101 #undef IPAD
1102 #undef OPAD
1103 }
1104
1105 static void
1106 wg_algo_kdf(uint8_t out1[static WG_KDF_OUTPUT_LEN],
1107 uint8_t out2[WG_KDF_OUTPUT_LEN],
1108 uint8_t out3[WG_KDF_OUTPUT_LEN],
1109 const uint8_t ckey[static WG_CHAINING_KEY_LEN],
1110 const uint8_t input[], const size_t inputlen)
1111 {
1112 uint8_t tmp1[WG_KDF_OUTPUT_LEN], tmp2[WG_KDF_OUTPUT_LEN + 1];
1113 uint8_t one[1];
1114
1115 /*
1116 * [N] 4.3: "an input_key_material byte sequence with length
1117 * either zero bytes, 32 bytes, or DHLEN bytes."
1118 */
1119 KASSERT(inputlen == 0 || inputlen == 32 || inputlen == NOISE_DHLEN);
1120
1121 WG_DUMP_HASH("ckey", ckey);
1122 if (input != NULL)
1123 WG_DUMP_HASH("input", input);
1124 wg_algo_hmac(tmp1, sizeof(tmp1), ckey, WG_CHAINING_KEY_LEN,
1125 input, inputlen);
1126 WG_DUMP_HASH("tmp1", tmp1);
1127 one[0] = 1;
1128 wg_algo_hmac(out1, WG_KDF_OUTPUT_LEN, tmp1, sizeof(tmp1),
1129 one, sizeof(one));
1130 WG_DUMP_HASH("out1", out1);
1131 if (out2 == NULL)
1132 return;
1133 memcpy(tmp2, out1, WG_KDF_OUTPUT_LEN);
1134 tmp2[WG_KDF_OUTPUT_LEN] = 2;
1135 wg_algo_hmac(out2, WG_KDF_OUTPUT_LEN, tmp1, sizeof(tmp1),
1136 tmp2, sizeof(tmp2));
1137 WG_DUMP_HASH("out2", out2);
1138 if (out3 == NULL)
1139 return;
1140 memcpy(tmp2, out2, WG_KDF_OUTPUT_LEN);
1141 tmp2[WG_KDF_OUTPUT_LEN] = 3;
1142 wg_algo_hmac(out3, WG_KDF_OUTPUT_LEN, tmp1, sizeof(tmp1),
1143 tmp2, sizeof(tmp2));
1144 WG_DUMP_HASH("out3", out3);
1145 }
1146
1147 static void __noinline
1148 wg_algo_dh_kdf(uint8_t ckey[static WG_CHAINING_KEY_LEN],
1149 uint8_t cipher_key[WG_CIPHER_KEY_LEN],
1150 const uint8_t local_key[static WG_STATIC_KEY_LEN],
1151 const uint8_t remote_key[static WG_STATIC_KEY_LEN])
1152 {
1153 uint8_t dhout[WG_DH_OUTPUT_LEN];
1154
1155 wg_algo_dh(dhout, local_key, remote_key);
1156 wg_algo_kdf(ckey, cipher_key, NULL, ckey, dhout, sizeof(dhout));
1157
1158 WG_DUMP_HASH("dhout", dhout);
1159 WG_DUMP_HASH("ckey", ckey);
1160 if (cipher_key != NULL)
1161 WG_DUMP_HASH("cipher_key", cipher_key);
1162 }
1163
1164 static void
1165 wg_algo_aead_enc(uint8_t out[], size_t expected_outsize,
1166 const uint8_t key[static crypto_aead_chacha20poly1305_ietf_KEYBYTES],
1167 const uint64_t counter,
1168 const uint8_t plain[], const size_t plainsize,
1169 const uint8_t auth[], size_t authlen)
1170 {
1171 uint8_t nonce[(32 + 64) / 8] = {0};
1172 long long unsigned int outsize;
1173 int error __diagused;
1174
1175 le64enc(&nonce[4], counter);
1176
1177 error = crypto_aead_chacha20poly1305_ietf_encrypt(out, &outsize, plain,
1178 plainsize, auth, authlen, NULL, nonce, key);
1179 KASSERT(error == 0);
1180 KASSERT(outsize == expected_outsize);
1181 }
1182
1183 static int
1184 wg_algo_aead_dec(uint8_t out[], size_t expected_outsize,
1185 const uint8_t key[static crypto_aead_chacha20poly1305_ietf_KEYBYTES],
1186 const uint64_t counter,
1187 const uint8_t encrypted[], const size_t encryptedsize,
1188 const uint8_t auth[], size_t authlen)
1189 {
1190 uint8_t nonce[(32 + 64) / 8] = {0};
1191 long long unsigned int outsize;
1192 int error;
1193
1194 le64enc(&nonce[4], counter);
1195
1196 error = crypto_aead_chacha20poly1305_ietf_decrypt(out, &outsize, NULL,
1197 encrypted, encryptedsize, auth, authlen, nonce, key);
1198 if (error == 0)
1199 KASSERT(outsize == expected_outsize);
1200 return error;
1201 }
1202
1203 static void
1204 wg_algo_xaead_enc(uint8_t out[], const size_t expected_outsize,
1205 const uint8_t key[static crypto_aead_xchacha20poly1305_ietf_KEYBYTES],
1206 const uint8_t plain[], const size_t plainsize,
1207 const uint8_t auth[], size_t authlen,
1208 const uint8_t nonce[static WG_SALT_LEN])
1209 {
1210 long long unsigned int outsize;
1211 int error __diagused;
1212
1213 CTASSERT(WG_SALT_LEN == crypto_aead_xchacha20poly1305_ietf_NPUBBYTES);
1214 error = crypto_aead_xchacha20poly1305_ietf_encrypt(out, &outsize,
1215 plain, plainsize, auth, authlen, NULL, nonce, key);
1216 KASSERT(error == 0);
1217 KASSERT(outsize == expected_outsize);
1218 }
1219
1220 static int
1221 wg_algo_xaead_dec(uint8_t out[], const size_t expected_outsize,
1222 const uint8_t key[static crypto_aead_xchacha20poly1305_ietf_KEYBYTES],
1223 const uint8_t encrypted[], const size_t encryptedsize,
1224 const uint8_t auth[], size_t authlen,
1225 const uint8_t nonce[static WG_SALT_LEN])
1226 {
1227 long long unsigned int outsize;
1228 int error;
1229
1230 error = crypto_aead_xchacha20poly1305_ietf_decrypt(out, &outsize, NULL,
1231 encrypted, encryptedsize, auth, authlen, nonce, key);
1232 if (error == 0)
1233 KASSERT(outsize == expected_outsize);
1234 return error;
1235 }
1236
1237 static void
1238 wg_algo_tai64n(wg_timestamp_t timestamp)
1239 {
1240 struct timespec ts;
1241
1242 /* FIXME strict TAI64N (https://cr.yp.to/libtai/tai64.html) */
1243 getnanotime(&ts);
1244 /* TAI64 label in external TAI64 format */
1245 be32enc(timestamp, 0x40000000U + (uint32_t)(ts.tv_sec >> 32));
1246 /* second beginning from 1970 TAI */
1247 be32enc(timestamp + 4, (uint32_t)(ts.tv_sec & 0xffffffffU));
1248 /* nanosecond in big-endian format */
1249 be32enc(timestamp + 8, (uint32_t)ts.tv_nsec);
1250 }
1251
1252 /*
1253 * wg_get_stable_session(wgp, psref)
1254 *
1255 * Get a passive reference to the current stable session, or
1256 * return NULL if there is no current stable session.
1257 *
1258 * The pointer is always there but the session is not necessarily
1259 * ESTABLISHED; if it is not ESTABLISHED, return NULL. However,
1260 * the session may transition from ESTABLISHED to DESTROYING while
1261 * holding the passive reference.
1262 */
1263 static struct wg_session *
1264 wg_get_stable_session(struct wg_peer *wgp, struct psref *psref)
1265 {
1266 int s;
1267 struct wg_session *wgs;
1268
1269 s = pserialize_read_enter();
1270 wgs = atomic_load_consume(&wgp->wgp_session_stable);
1271 if (__predict_false(wgs->wgs_state != WGS_STATE_ESTABLISHED))
1272 wgs = NULL;
1273 else
1274 psref_acquire(psref, &wgs->wgs_psref, wg_psref_class);
1275 pserialize_read_exit(s);
1276
1277 return wgs;
1278 }
1279
1280 static void
1281 wg_put_session(struct wg_session *wgs, struct psref *psref)
1282 {
1283
1284 psref_release(psref, &wgs->wgs_psref, wg_psref_class);
1285 }
1286
1287 static void
1288 wg_destroy_session(struct wg_softc *wg, struct wg_session *wgs)
1289 {
1290 struct wg_peer *wgp = wgs->wgs_peer;
1291 struct wg_session *wgs0 __diagused;
1292 void *garbage;
1293
1294 KASSERT(mutex_owned(wgp->wgp_lock));
1295 KASSERT(wgs->wgs_state != WGS_STATE_UNKNOWN);
1296
1297 /* Remove the session from the table. */
1298 wgs0 = thmap_del(wg->wg_sessions_byindex,
1299 &wgs->wgs_local_index, sizeof(wgs->wgs_local_index));
1300 KASSERT(wgs0 == wgs);
1301 garbage = thmap_stage_gc(wg->wg_sessions_byindex);
1302
1303 /* Wait for passive references to drain. */
1304 pserialize_perform(wgp->wgp_psz);
1305 psref_target_destroy(&wgs->wgs_psref, wg_psref_class);
1306
1307 /*
1308 * Free memory, zero state, and transition to UNKNOWN. We have
1309 * exclusive access to the session now, so there is no need for
1310 * an atomic store.
1311 */
1312 thmap_gc(wg->wg_sessions_byindex, garbage);
1313 WG_DLOG("session[L=%"PRIx32" R=%"PRIx32"] -> WGS_STATE_UNKNOWN\n",
1314 wgs->wgs_local_index, wgs->wgs_remote_index);
1315 wgs->wgs_local_index = 0;
1316 wgs->wgs_remote_index = 0;
1317 wg_clear_states(wgs);
1318 wgs->wgs_state = WGS_STATE_UNKNOWN;
1319 wgs->wgs_force_rekey = false;
1320 }
1321
1322 /*
1323 * wg_get_session_index(wg, wgs)
1324 *
1325 * Choose a session index for wgs->wgs_local_index, and store it
1326 * in wg's table of sessions by index.
1327 *
1328 * wgs must be the unstable session of its peer, and must be
1329 * transitioning out of the UNKNOWN state.
1330 */
1331 static void
1332 wg_get_session_index(struct wg_softc *wg, struct wg_session *wgs)
1333 {
1334 struct wg_peer *wgp __diagused = wgs->wgs_peer;
1335 struct wg_session *wgs0;
1336 uint32_t index;
1337
1338 KASSERT(mutex_owned(wgp->wgp_lock));
1339 KASSERT(wgs == wgp->wgp_session_unstable);
1340 KASSERTMSG(wgs->wgs_state == WGS_STATE_UNKNOWN, "state=%d",
1341 wgs->wgs_state);
1342
1343 do {
1344 /* Pick a uniform random index. */
1345 index = cprng_strong32();
1346
1347 /* Try to take it. */
1348 wgs->wgs_local_index = index;
1349 wgs0 = thmap_put(wg->wg_sessions_byindex,
1350 &wgs->wgs_local_index, sizeof wgs->wgs_local_index, wgs);
1351
1352 /* If someone else beat us, start over. */
1353 } while (__predict_false(wgs0 != wgs));
1354 }
1355
1356 /*
1357 * wg_put_session_index(wg, wgs)
1358 *
1359 * Remove wgs from the table of sessions by index, wait for any
1360 * passive references to drain, and transition the session to the
1361 * UNKNOWN state.
1362 *
1363 * wgs must be the unstable session of its peer, and must not be
1364 * UNKNOWN or ESTABLISHED.
1365 */
1366 static void
1367 wg_put_session_index(struct wg_softc *wg, struct wg_session *wgs)
1368 {
1369 struct wg_peer *wgp __diagused = wgs->wgs_peer;
1370
1371 KASSERT(mutex_owned(wgp->wgp_lock));
1372 KASSERT(wgs->wgs_state != WGS_STATE_UNKNOWN);
1373 KASSERT(wgs->wgs_state != WGS_STATE_ESTABLISHED);
1374
1375 wg_destroy_session(wg, wgs);
1376 psref_target_init(&wgs->wgs_psref, wg_psref_class);
1377 }
1378
1379 /*
1380 * Handshake patterns
1381 *
1382 * [W] 5: "These messages use the "IK" pattern from Noise"
1383 * [N] 7.5. Interactive handshake patterns (fundamental)
1384 * "The first character refers to the initiators static key:"
1385 * "I = Static key for initiator Immediately transmitted to responder,
1386 * despite reduced or absent identity hiding"
1387 * "The second character refers to the responders static key:"
1388 * "K = Static key for responder Known to initiator"
1389 * "IK:
1390 * <- s
1391 * ...
1392 * -> e, es, s, ss
1393 * <- e, ee, se"
1394 * [N] 9.4. Pattern modifiers
1395 * "IKpsk2:
1396 * <- s
1397 * ...
1398 * -> e, es, s, ss
1399 * <- e, ee, se, psk"
1400 */
1401 static void
1402 wg_fill_msg_init(struct wg_softc *wg, struct wg_peer *wgp,
1403 struct wg_session *wgs, struct wg_msg_init *wgmi)
1404 {
1405 uint8_t ckey[WG_CHAINING_KEY_LEN]; /* [W] 5.4.2: Ci */
1406 uint8_t hash[WG_HASH_LEN]; /* [W] 5.4.2: Hi */
1407 uint8_t cipher_key[WG_CIPHER_KEY_LEN];
1408 uint8_t pubkey[WG_EPHEMERAL_KEY_LEN];
1409 uint8_t privkey[WG_EPHEMERAL_KEY_LEN];
1410
1411 KASSERT(mutex_owned(wgp->wgp_lock));
1412 KASSERT(wgs == wgp->wgp_session_unstable);
1413 KASSERTMSG(wgs->wgs_state == WGS_STATE_INIT_ACTIVE, "state=%d",
1414 wgs->wgs_state);
1415
1416 wgmi->wgmi_type = htole32(WG_MSG_TYPE_INIT);
1417 wgmi->wgmi_sender = wgs->wgs_local_index;
1418
1419 /* [W] 5.4.2: First Message: Initiator to Responder */
1420
1421 /* Ci := HASH(CONSTRUCTION) */
1422 /* Hi := HASH(Ci || IDENTIFIER) */
1423 wg_init_key_and_hash(ckey, hash);
1424 /* Hi := HASH(Hi || Sr^pub) */
1425 wg_algo_hash(hash, wgp->wgp_pubkey, sizeof(wgp->wgp_pubkey));
1426
1427 WG_DUMP_HASH("hash", hash);
1428
1429 /* [N] 2.2: "e" */
1430 /* Ei^priv, Ei^pub := DH-GENERATE() */
1431 wg_algo_generate_keypair(pubkey, privkey);
1432 /* Ci := KDF1(Ci, Ei^pub) */
1433 wg_algo_kdf(ckey, NULL, NULL, ckey, pubkey, sizeof(pubkey));
1434 /* msg.ephemeral := Ei^pub */
1435 memcpy(wgmi->wgmi_ephemeral, pubkey, sizeof(wgmi->wgmi_ephemeral));
1436 /* Hi := HASH(Hi || msg.ephemeral) */
1437 wg_algo_hash(hash, pubkey, sizeof(pubkey));
1438
1439 WG_DUMP_HASH("ckey", ckey);
1440 WG_DUMP_HASH("hash", hash);
1441
1442 /* [N] 2.2: "es" */
1443 /* Ci, k := KDF2(Ci, DH(Ei^priv, Sr^pub)) */
1444 wg_algo_dh_kdf(ckey, cipher_key, privkey, wgp->wgp_pubkey);
1445
1446 /* [N] 2.2: "s" */
1447 /* msg.static := AEAD(k, 0, Si^pub, Hi) */
1448 wg_algo_aead_enc(wgmi->wgmi_static, sizeof(wgmi->wgmi_static),
1449 cipher_key, 0, wg->wg_pubkey, sizeof(wg->wg_pubkey),
1450 hash, sizeof(hash));
1451 /* Hi := HASH(Hi || msg.static) */
1452 wg_algo_hash(hash, wgmi->wgmi_static, sizeof(wgmi->wgmi_static));
1453
1454 WG_DUMP_HASH48("wgmi_static", wgmi->wgmi_static);
1455
1456 /* [N] 2.2: "ss" */
1457 /* Ci, k := KDF2(Ci, DH(Si^priv, Sr^pub)) */
1458 wg_algo_dh_kdf(ckey, cipher_key, wg->wg_privkey, wgp->wgp_pubkey);
1459
1460 /* msg.timestamp := AEAD(k, TIMESTAMP(), Hi) */
1461 wg_timestamp_t timestamp;
1462 wg_algo_tai64n(timestamp);
1463 wg_algo_aead_enc(wgmi->wgmi_timestamp, sizeof(wgmi->wgmi_timestamp),
1464 cipher_key, 0, timestamp, sizeof(timestamp), hash, sizeof(hash));
1465 /* Hi := HASH(Hi || msg.timestamp) */
1466 wg_algo_hash(hash, wgmi->wgmi_timestamp, sizeof(wgmi->wgmi_timestamp));
1467
1468 /* [W] 5.4.4 Cookie MACs */
1469 wg_algo_mac_mac1(wgmi->wgmi_mac1, sizeof(wgmi->wgmi_mac1),
1470 wgp->wgp_pubkey, sizeof(wgp->wgp_pubkey),
1471 (const uint8_t *)wgmi, offsetof(struct wg_msg_init, wgmi_mac1));
1472 /* Need mac1 to decrypt a cookie from a cookie message */
1473 memcpy(wgp->wgp_last_sent_mac1, wgmi->wgmi_mac1,
1474 sizeof(wgp->wgp_last_sent_mac1));
1475 wgp->wgp_last_sent_mac1_valid = true;
1476
1477 if (wgp->wgp_latest_cookie_time == 0 ||
1478 (time_uptime - wgp->wgp_latest_cookie_time) >= WG_COOKIE_TIME)
1479 memset(wgmi->wgmi_mac2, 0, sizeof(wgmi->wgmi_mac2));
1480 else {
1481 wg_algo_mac(wgmi->wgmi_mac2, sizeof(wgmi->wgmi_mac2),
1482 wgp->wgp_latest_cookie, WG_COOKIE_LEN,
1483 (const uint8_t *)wgmi,
1484 offsetof(struct wg_msg_init, wgmi_mac2),
1485 NULL, 0);
1486 }
1487
1488 memcpy(wgs->wgs_ephemeral_key_pub, pubkey, sizeof(pubkey));
1489 memcpy(wgs->wgs_ephemeral_key_priv, privkey, sizeof(privkey));
1490 memcpy(wgs->wgs_handshake_hash, hash, sizeof(hash));
1491 memcpy(wgs->wgs_chaining_key, ckey, sizeof(ckey));
1492 WG_DLOG("%s: sender=%x\n", __func__, wgs->wgs_local_index);
1493 }
1494
1495 static void __noinline
1496 wg_handle_msg_init(struct wg_softc *wg, const struct wg_msg_init *wgmi,
1497 const struct sockaddr *src)
1498 {
1499 uint8_t ckey[WG_CHAINING_KEY_LEN]; /* [W] 5.4.2: Ci */
1500 uint8_t hash[WG_HASH_LEN]; /* [W] 5.4.2: Hi */
1501 uint8_t cipher_key[WG_CIPHER_KEY_LEN];
1502 uint8_t peer_pubkey[WG_STATIC_KEY_LEN];
1503 struct wg_peer *wgp;
1504 struct wg_session *wgs;
1505 int error, ret;
1506 struct psref psref_peer;
1507 uint8_t mac1[WG_MAC_LEN];
1508
1509 WG_TRACE("init msg received");
1510
1511 wg_algo_mac_mac1(mac1, sizeof(mac1),
1512 wg->wg_pubkey, sizeof(wg->wg_pubkey),
1513 (const uint8_t *)wgmi, offsetof(struct wg_msg_init, wgmi_mac1));
1514
1515 /*
1516 * [W] 5.3: Denial of Service Mitigation & Cookies
1517 * "the responder, ..., must always reject messages with an invalid
1518 * msg.mac1"
1519 */
1520 if (!consttime_memequal(mac1, wgmi->wgmi_mac1, sizeof(mac1))) {
1521 WG_DLOG("mac1 is invalid\n");
1522 return;
1523 }
1524
1525 /*
1526 * [W] 5.4.2: First Message: Initiator to Responder
1527 * "When the responder receives this message, it does the same
1528 * operations so that its final state variables are identical,
1529 * replacing the operands of the DH function to produce equivalent
1530 * values."
1531 * Note that the following comments of operations are just copies of
1532 * the initiator's ones.
1533 */
1534
1535 /* Ci := HASH(CONSTRUCTION) */
1536 /* Hi := HASH(Ci || IDENTIFIER) */
1537 wg_init_key_and_hash(ckey, hash);
1538 /* Hi := HASH(Hi || Sr^pub) */
1539 wg_algo_hash(hash, wg->wg_pubkey, sizeof(wg->wg_pubkey));
1540
1541 /* [N] 2.2: "e" */
1542 /* Ci := KDF1(Ci, Ei^pub) */
1543 wg_algo_kdf(ckey, NULL, NULL, ckey, wgmi->wgmi_ephemeral,
1544 sizeof(wgmi->wgmi_ephemeral));
1545 /* Hi := HASH(Hi || msg.ephemeral) */
1546 wg_algo_hash(hash, wgmi->wgmi_ephemeral, sizeof(wgmi->wgmi_ephemeral));
1547
1548 WG_DUMP_HASH("ckey", ckey);
1549
1550 /* [N] 2.2: "es" */
1551 /* Ci, k := KDF2(Ci, DH(Ei^priv, Sr^pub)) */
1552 wg_algo_dh_kdf(ckey, cipher_key, wg->wg_privkey, wgmi->wgmi_ephemeral);
1553
1554 WG_DUMP_HASH48("wgmi_static", wgmi->wgmi_static);
1555
1556 /* [N] 2.2: "s" */
1557 /* msg.static := AEAD(k, 0, Si^pub, Hi) */
1558 error = wg_algo_aead_dec(peer_pubkey, WG_STATIC_KEY_LEN, cipher_key, 0,
1559 wgmi->wgmi_static, sizeof(wgmi->wgmi_static), hash, sizeof(hash));
1560 if (error != 0) {
1561 WG_LOG_RATECHECK(&wg->wg_ppsratecheck, LOG_DEBUG,
1562 "%s: wg_algo_aead_dec for secret key failed\n",
1563 if_name(&wg->wg_if));
1564 return;
1565 }
1566 /* Hi := HASH(Hi || msg.static) */
1567 wg_algo_hash(hash, wgmi->wgmi_static, sizeof(wgmi->wgmi_static));
1568
1569 wgp = wg_lookup_peer_by_pubkey(wg, peer_pubkey, &psref_peer);
1570 if (wgp == NULL) {
1571 WG_DLOG("peer not found\n");
1572 return;
1573 }
1574
1575 /*
1576 * Lock the peer to serialize access to cookie state.
1577 *
1578 * XXX Can we safely avoid holding the lock across DH? Take it
1579 * just to verify mac2 and then unlock/DH/lock?
1580 */
1581 mutex_enter(wgp->wgp_lock);
1582
1583 if (__predict_false(wg_is_underload(wg, wgp, WG_MSG_TYPE_INIT))) {
1584 WG_TRACE("under load");
1585 /*
1586 * [W] 5.3: Denial of Service Mitigation & Cookies
1587 * "the responder, ..., and when under load may reject messages
1588 * with an invalid msg.mac2. If the responder receives a
1589 * message with a valid msg.mac1 yet with an invalid msg.mac2,
1590 * and is under load, it may respond with a cookie reply
1591 * message"
1592 */
1593 uint8_t zero[WG_MAC_LEN] = {0};
1594 if (consttime_memequal(wgmi->wgmi_mac2, zero, sizeof(zero))) {
1595 WG_TRACE("sending a cookie message: no cookie included");
1596 wg_send_cookie_msg(wg, wgp, wgmi->wgmi_sender,
1597 wgmi->wgmi_mac1, src);
1598 goto out;
1599 }
1600 if (!wgp->wgp_last_sent_cookie_valid) {
1601 WG_TRACE("sending a cookie message: no cookie sent ever");
1602 wg_send_cookie_msg(wg, wgp, wgmi->wgmi_sender,
1603 wgmi->wgmi_mac1, src);
1604 goto out;
1605 }
1606 uint8_t mac2[WG_MAC_LEN];
1607 wg_algo_mac(mac2, sizeof(mac2), wgp->wgp_last_sent_cookie,
1608 WG_COOKIE_LEN, (const uint8_t *)wgmi,
1609 offsetof(struct wg_msg_init, wgmi_mac2), NULL, 0);
1610 if (!consttime_memequal(mac2, wgmi->wgmi_mac2, sizeof(mac2))) {
1611 WG_DLOG("mac2 is invalid\n");
1612 goto out;
1613 }
1614 WG_TRACE("under load, but continue to sending");
1615 }
1616
1617 /* [N] 2.2: "ss" */
1618 /* Ci, k := KDF2(Ci, DH(Si^priv, Sr^pub)) */
1619 wg_algo_dh_kdf(ckey, cipher_key, wg->wg_privkey, wgp->wgp_pubkey);
1620
1621 /* msg.timestamp := AEAD(k, TIMESTAMP(), Hi) */
1622 wg_timestamp_t timestamp;
1623 error = wg_algo_aead_dec(timestamp, sizeof(timestamp), cipher_key, 0,
1624 wgmi->wgmi_timestamp, sizeof(wgmi->wgmi_timestamp),
1625 hash, sizeof(hash));
1626 if (error != 0) {
1627 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
1628 "%s: peer %s: wg_algo_aead_dec for timestamp failed\n",
1629 if_name(&wg->wg_if), wgp->wgp_name);
1630 goto out;
1631 }
1632 /* Hi := HASH(Hi || msg.timestamp) */
1633 wg_algo_hash(hash, wgmi->wgmi_timestamp, sizeof(wgmi->wgmi_timestamp));
1634
1635 /*
1636 * [W] 5.1 "The responder keeps track of the greatest timestamp
1637 * received per peer and discards packets containing
1638 * timestamps less than or equal to it."
1639 */
1640 ret = memcmp(timestamp, wgp->wgp_timestamp_latest_init,
1641 sizeof(timestamp));
1642 if (ret <= 0) {
1643 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
1644 "%s: peer %s: invalid init msg: timestamp is old\n",
1645 if_name(&wg->wg_if), wgp->wgp_name);
1646 goto out;
1647 }
1648 memcpy(wgp->wgp_timestamp_latest_init, timestamp, sizeof(timestamp));
1649
1650 /*
1651 * Message is good -- we're committing to handle it now, unless
1652 * we were already initiating a session.
1653 */
1654 wgs = wgp->wgp_session_unstable;
1655 switch (wgs->wgs_state) {
1656 case WGS_STATE_UNKNOWN: /* new session initiated by peer */
1657 break;
1658 case WGS_STATE_INIT_ACTIVE: /* we're already initiating, drop */
1659 /* XXX Who wins if both sides send INIT? */
1660 WG_TRACE("Session already initializing, ignoring the message");
1661 goto out;
1662 case WGS_STATE_INIT_PASSIVE: /* peer is retrying, start over */
1663 WG_TRACE("Session already initializing, destroying old states");
1664 /*
1665 * XXX Avoid this -- just resend our response -- if the
1666 * INIT message is identical to the previous one.
1667 */
1668 wg_put_session_index(wg, wgs);
1669 KASSERTMSG(wgs->wgs_state == WGS_STATE_UNKNOWN, "state=%d",
1670 wgs->wgs_state);
1671 break;
1672 case WGS_STATE_ESTABLISHED: /* can't happen */
1673 panic("unstable session can't be established");
1674 case WGS_STATE_DESTROYING: /* rekey initiated by peer */
1675 WG_TRACE("Session destroying, but force to clear");
1676 wg_put_session_index(wg, wgs);
1677 KASSERTMSG(wgs->wgs_state == WGS_STATE_UNKNOWN, "state=%d",
1678 wgs->wgs_state);
1679 break;
1680 default:
1681 panic("invalid session state: %d", wgs->wgs_state);
1682 }
1683
1684 /*
1685 * Assign a fresh session index.
1686 */
1687 KASSERTMSG(wgs->wgs_state == WGS_STATE_UNKNOWN, "state=%d",
1688 wgs->wgs_state);
1689 wg_get_session_index(wg, wgs);
1690
1691 memcpy(wgs->wgs_handshake_hash, hash, sizeof(hash));
1692 memcpy(wgs->wgs_chaining_key, ckey, sizeof(ckey));
1693 memcpy(wgs->wgs_ephemeral_key_peer, wgmi->wgmi_ephemeral,
1694 sizeof(wgmi->wgmi_ephemeral));
1695
1696 wg_update_endpoint_if_necessary(wgp, src);
1697
1698 /*
1699 * Even though we don't transition from INIT_PASSIVE to
1700 * ESTABLISHED until we receive the first data packet from the
1701 * initiator, we count the time of the INIT message as the time
1702 * of establishment -- this is used to decide when to erase
1703 * keys, and we want to start counting as soon as we have
1704 * generated keys.
1705 */
1706 wgs->wgs_time_established = time_uptime32;
1707 wg_schedule_session_dtor_timer(wgp);
1708
1709 /*
1710 * Respond to the initiator with our ephemeral public key.
1711 */
1712 wg_send_handshake_msg_resp(wg, wgp, wgs, wgmi);
1713
1714 WG_DLOG("session[L=%"PRIx32" R=%"PRIx32"]:"
1715 " calculate keys as responder\n",
1716 wgs->wgs_local_index, wgs->wgs_remote_index);
1717 wg_calculate_keys(wgs, false);
1718 wg_clear_states(wgs);
1719
1720 /*
1721 * Session is ready to receive data now that we have received
1722 * the peer initiator's ephemeral key pair, generated our
1723 * responder's ephemeral key pair, and derived a session key.
1724 *
1725 * Transition from UNKNOWN to INIT_PASSIVE to publish it to the
1726 * data rx path, wg_handle_msg_data, where the
1727 * atomic_load_acquire matching this atomic_store_release
1728 * happens.
1729 *
1730 * (Session is not, however, ready to send data until the peer
1731 * has acknowledged our response by sending its first data
1732 * packet. So don't swap the sessions yet.)
1733 */
1734 WG_DLOG("session[L=%"PRIx32" R=%"PRIx32"] -> WGS_STATE_INIT_PASSIVE\n",
1735 wgs->wgs_local_index, wgs->wgs_remote_index);
1736 atomic_store_release(&wgs->wgs_state, WGS_STATE_INIT_PASSIVE);
1737 WG_TRACE("WGS_STATE_INIT_PASSIVE");
1738
1739 out:
1740 mutex_exit(wgp->wgp_lock);
1741 wg_put_peer(wgp, &psref_peer);
1742 }
1743
1744 static struct socket *
1745 wg_get_so_by_af(struct wg_softc *wg, const int af)
1746 {
1747
1748 switch (af) {
1749 #ifdef INET
1750 case AF_INET:
1751 return wg->wg_so4;
1752 #endif
1753 #ifdef INET6
1754 case AF_INET6:
1755 return wg->wg_so6;
1756 #endif
1757 default:
1758 panic("wg: no such af: %d", af);
1759 }
1760 }
1761
1762 static struct socket *
1763 wg_get_so_by_peer(struct wg_peer *wgp, struct wg_sockaddr *wgsa)
1764 {
1765
1766 return wg_get_so_by_af(wgp->wgp_sc, wgsa_family(wgsa));
1767 }
1768
1769 static struct wg_sockaddr *
1770 wg_get_endpoint_sa(struct wg_peer *wgp, struct psref *psref)
1771 {
1772 struct wg_sockaddr *wgsa;
1773 int s;
1774
1775 s = pserialize_read_enter();
1776 wgsa = atomic_load_consume(&wgp->wgp_endpoint);
1777 psref_acquire(psref, &wgsa->wgsa_psref, wg_psref_class);
1778 pserialize_read_exit(s);
1779
1780 return wgsa;
1781 }
1782
1783 static void
1784 wg_put_sa(struct wg_peer *wgp, struct wg_sockaddr *wgsa, struct psref *psref)
1785 {
1786
1787 psref_release(psref, &wgsa->wgsa_psref, wg_psref_class);
1788 }
1789
1790 static int
1791 wg_send_so(struct wg_peer *wgp, struct mbuf *m)
1792 {
1793 int error;
1794 struct socket *so;
1795 struct psref psref;
1796 struct wg_sockaddr *wgsa;
1797
1798 wgsa = wg_get_endpoint_sa(wgp, &psref);
1799 so = wg_get_so_by_peer(wgp, wgsa);
1800 error = sosend(so, wgsatosa(wgsa), NULL, m, NULL, 0, curlwp);
1801 wg_put_sa(wgp, wgsa, &psref);
1802
1803 return error;
1804 }
1805
1806 static void
1807 wg_send_handshake_msg_init(struct wg_softc *wg, struct wg_peer *wgp)
1808 {
1809 int error;
1810 struct mbuf *m;
1811 struct wg_msg_init *wgmi;
1812 struct wg_session *wgs;
1813
1814 KASSERT(mutex_owned(wgp->wgp_lock));
1815
1816 wgs = wgp->wgp_session_unstable;
1817 /* XXX pull dispatch out into wg_task_send_init_message */
1818 switch (wgs->wgs_state) {
1819 case WGS_STATE_UNKNOWN: /* new session initiated by us */
1820 break;
1821 case WGS_STATE_INIT_ACTIVE: /* we're already initiating, stop */
1822 WG_TRACE("Session already initializing, skip starting new one");
1823 return;
1824 case WGS_STATE_INIT_PASSIVE: /* peer was trying -- XXX what now? */
1825 WG_TRACE("Session already initializing, waiting for peer");
1826 return;
1827 case WGS_STATE_ESTABLISHED: /* can't happen */
1828 panic("unstable session can't be established");
1829 case WGS_STATE_DESTROYING: /* rekey initiated by us too early */
1830 WG_TRACE("Session destroying");
1831 wg_put_session_index(wg, wgs);
1832 KASSERTMSG(wgs->wgs_state == WGS_STATE_UNKNOWN, "state=%d",
1833 wgs->wgs_state);
1834 break;
1835 }
1836
1837 /*
1838 * Assign a fresh session index.
1839 */
1840 KASSERTMSG(wgs->wgs_state == WGS_STATE_UNKNOWN, "state=%d",
1841 wgs->wgs_state);
1842 wg_get_session_index(wg, wgs);
1843
1844 /*
1845 * We have initiated a session. Transition to INIT_ACTIVE.
1846 * This doesn't publish it for use in the data rx path,
1847 * wg_handle_msg_data, or in the data tx path, wg_output -- we
1848 * have to wait for the peer to respond with their ephemeral
1849 * public key before we can derive a session key for tx/rx.
1850 * Hence only atomic_store_relaxed.
1851 */
1852 WG_DLOG("session[L=%"PRIx32" R=(unknown)] -> WGS_STATE_INIT_ACTIVE\n",
1853 wgs->wgs_local_index);
1854 atomic_store_relaxed(&wgs->wgs_state, WGS_STATE_INIT_ACTIVE);
1855
1856 m = m_gethdr(M_WAIT, MT_DATA);
1857 if (sizeof(*wgmi) > MHLEN) {
1858 m_clget(m, M_WAIT);
1859 CTASSERT(sizeof(*wgmi) <= MCLBYTES);
1860 }
1861 m->m_pkthdr.len = m->m_len = sizeof(*wgmi);
1862 wgmi = mtod(m, struct wg_msg_init *);
1863 wg_fill_msg_init(wg, wgp, wgs, wgmi);
1864
1865 error = wg->wg_ops->send_hs_msg(wgp, m); /* consumes m */
1866 if (error) {
1867 /*
1868 * Sending out an initiation packet failed; give up on
1869 * this session and toss packet waiting for it if any.
1870 *
1871 * XXX Why don't we just let the periodic handshake
1872 * retry logic work in this case?
1873 */
1874 WG_DLOG("send_hs_msg failed, error=%d\n", error);
1875 wg_put_session_index(wg, wgs);
1876 m = atomic_swap_ptr(&wgp->wgp_pending, NULL);
1877 m_freem(m);
1878 return;
1879 }
1880
1881 WG_TRACE("init msg sent");
1882 if (wgp->wgp_handshake_start_time == 0)
1883 wgp->wgp_handshake_start_time = time_uptime;
1884 callout_schedule(&wgp->wgp_handshake_timeout_timer,
1885 MIN(wg_rekey_timeout, (unsigned)(INT_MAX / hz)) * hz);
1886 }
1887
1888 static void
1889 wg_fill_msg_resp(struct wg_softc *wg, struct wg_peer *wgp,
1890 struct wg_session *wgs, struct wg_msg_resp *wgmr,
1891 const struct wg_msg_init *wgmi)
1892 {
1893 uint8_t ckey[WG_CHAINING_KEY_LEN]; /* [W] 5.4.3: Cr */
1894 uint8_t hash[WG_HASH_LEN]; /* [W] 5.4.3: Hr */
1895 uint8_t cipher_key[WG_KDF_OUTPUT_LEN];
1896 uint8_t pubkey[WG_EPHEMERAL_KEY_LEN];
1897 uint8_t privkey[WG_EPHEMERAL_KEY_LEN];
1898
1899 KASSERT(mutex_owned(wgp->wgp_lock));
1900 KASSERT(wgs == wgp->wgp_session_unstable);
1901 KASSERTMSG(wgs->wgs_state == WGS_STATE_UNKNOWN, "state=%d",
1902 wgs->wgs_state);
1903
1904 memcpy(hash, wgs->wgs_handshake_hash, sizeof(hash));
1905 memcpy(ckey, wgs->wgs_chaining_key, sizeof(ckey));
1906
1907 wgmr->wgmr_type = htole32(WG_MSG_TYPE_RESP);
1908 wgmr->wgmr_sender = wgs->wgs_local_index;
1909 wgmr->wgmr_receiver = wgmi->wgmi_sender;
1910
1911 /* [W] 5.4.3 Second Message: Responder to Initiator */
1912
1913 /* [N] 2.2: "e" */
1914 /* Er^priv, Er^pub := DH-GENERATE() */
1915 wg_algo_generate_keypair(pubkey, privkey);
1916 /* Cr := KDF1(Cr, Er^pub) */
1917 wg_algo_kdf(ckey, NULL, NULL, ckey, pubkey, sizeof(pubkey));
1918 /* msg.ephemeral := Er^pub */
1919 memcpy(wgmr->wgmr_ephemeral, pubkey, sizeof(wgmr->wgmr_ephemeral));
1920 /* Hr := HASH(Hr || msg.ephemeral) */
1921 wg_algo_hash(hash, pubkey, sizeof(pubkey));
1922
1923 WG_DUMP_HASH("ckey", ckey);
1924 WG_DUMP_HASH("hash", hash);
1925
1926 /* [N] 2.2: "ee" */
1927 /* Cr := KDF1(Cr, DH(Er^priv, Ei^pub)) */
1928 wg_algo_dh_kdf(ckey, NULL, privkey, wgs->wgs_ephemeral_key_peer);
1929
1930 /* [N] 2.2: "se" */
1931 /* Cr := KDF1(Cr, DH(Er^priv, Si^pub)) */
1932 wg_algo_dh_kdf(ckey, NULL, privkey, wgp->wgp_pubkey);
1933
1934 /* [N] 9.2: "psk" */
1935 {
1936 uint8_t kdfout[WG_KDF_OUTPUT_LEN];
1937 /* Cr, r, k := KDF3(Cr, Q) */
1938 wg_algo_kdf(ckey, kdfout, cipher_key, ckey, wgp->wgp_psk,
1939 sizeof(wgp->wgp_psk));
1940 /* Hr := HASH(Hr || r) */
1941 wg_algo_hash(hash, kdfout, sizeof(kdfout));
1942 }
1943
1944 /* msg.empty := AEAD(k, 0, e, Hr) */
1945 wg_algo_aead_enc(wgmr->wgmr_empty, sizeof(wgmr->wgmr_empty),
1946 cipher_key, 0, NULL, 0, hash, sizeof(hash));
1947 /* Hr := HASH(Hr || msg.empty) */
1948 wg_algo_hash(hash, wgmr->wgmr_empty, sizeof(wgmr->wgmr_empty));
1949
1950 WG_DUMP_HASH("wgmr_empty", wgmr->wgmr_empty);
1951
1952 /* [W] 5.4.4: Cookie MACs */
1953 /* msg.mac1 := MAC(HASH(LABEL-MAC1 || Sm'^pub), msg_a) */
1954 wg_algo_mac_mac1(wgmr->wgmr_mac1, sizeof(wgmi->wgmi_mac1),
1955 wgp->wgp_pubkey, sizeof(wgp->wgp_pubkey),
1956 (const uint8_t *)wgmr, offsetof(struct wg_msg_resp, wgmr_mac1));
1957 /* Need mac1 to decrypt a cookie from a cookie message */
1958 memcpy(wgp->wgp_last_sent_mac1, wgmr->wgmr_mac1,
1959 sizeof(wgp->wgp_last_sent_mac1));
1960 wgp->wgp_last_sent_mac1_valid = true;
1961
1962 if (wgp->wgp_latest_cookie_time == 0 ||
1963 (time_uptime - wgp->wgp_latest_cookie_time) >= WG_COOKIE_TIME)
1964 /* msg.mac2 := 0^16 */
1965 memset(wgmr->wgmr_mac2, 0, sizeof(wgmr->wgmr_mac2));
1966 else {
1967 /* msg.mac2 := MAC(Lm, msg_b) */
1968 wg_algo_mac(wgmr->wgmr_mac2, sizeof(wgmi->wgmi_mac2),
1969 wgp->wgp_latest_cookie, WG_COOKIE_LEN,
1970 (const uint8_t *)wgmr,
1971 offsetof(struct wg_msg_resp, wgmr_mac2),
1972 NULL, 0);
1973 }
1974
1975 memcpy(wgs->wgs_handshake_hash, hash, sizeof(hash));
1976 memcpy(wgs->wgs_chaining_key, ckey, sizeof(ckey));
1977 memcpy(wgs->wgs_ephemeral_key_pub, pubkey, sizeof(pubkey));
1978 memcpy(wgs->wgs_ephemeral_key_priv, privkey, sizeof(privkey));
1979 wgs->wgs_remote_index = wgmi->wgmi_sender;
1980 WG_DLOG("sender=%x\n", wgs->wgs_local_index);
1981 WG_DLOG("receiver=%x\n", wgs->wgs_remote_index);
1982 }
1983
1984 static void
1985 wg_swap_sessions(struct wg_peer *wgp)
1986 {
1987 struct wg_session *wgs, *wgs_prev;
1988
1989 KASSERT(mutex_owned(wgp->wgp_lock));
1990
1991 /*
1992 * Get the newly established session, to become the new
1993 * session. Caller must have transitioned from INIT_ACTIVE to
1994 * INIT_PASSIVE or to ESTABLISHED already. This will become
1995 * the stable session.
1996 */
1997 wgs = wgp->wgp_session_unstable;
1998 KASSERTMSG(wgs->wgs_state == WGS_STATE_ESTABLISHED, "state=%d",
1999 wgs->wgs_state);
2000
2001 /*
2002 * Get the stable session, which is either the previously
2003 * established session in the ESTABLISHED state, or has not
2004 * been established at all and is UNKNOWN. This will become
2005 * the unstable session.
2006 */
2007 wgs_prev = wgp->wgp_session_stable;
2008 KASSERTMSG((wgs_prev->wgs_state == WGS_STATE_ESTABLISHED ||
2009 wgs_prev->wgs_state == WGS_STATE_UNKNOWN),
2010 "state=%d", wgs_prev->wgs_state);
2011
2012 /*
2013 * Publish the newly established session for the tx path to use
2014 * and make the other one the unstable session to handle
2015 * stragglers in the rx path and later be used for the next
2016 * session's handshake.
2017 *
2018 * If wgs_prev was previously ESTABLISHED, caller must
2019 * transition it to DESTROYING and then pass through
2020 * wg_put_session_index before recycling it.
2021 *
2022 * XXX Factor that logic out into this routine.
2023 */
2024 atomic_store_release(&wgp->wgp_session_stable, wgs);
2025 wgp->wgp_session_unstable = wgs_prev;
2026 }
2027
2028 static void __noinline
2029 wg_handle_msg_resp(struct wg_softc *wg, const struct wg_msg_resp *wgmr,
2030 const struct sockaddr *src)
2031 {
2032 uint8_t ckey[WG_CHAINING_KEY_LEN]; /* [W] 5.4.3: Cr */
2033 uint8_t hash[WG_HASH_LEN]; /* [W] 5.4.3: Kr */
2034 uint8_t cipher_key[WG_KDF_OUTPUT_LEN];
2035 struct wg_peer *wgp;
2036 struct wg_session *wgs;
2037 struct psref psref;
2038 int error;
2039 uint8_t mac1[WG_MAC_LEN];
2040 struct wg_session *wgs_prev;
2041 struct mbuf *m;
2042
2043 wg_algo_mac_mac1(mac1, sizeof(mac1),
2044 wg->wg_pubkey, sizeof(wg->wg_pubkey),
2045 (const uint8_t *)wgmr, offsetof(struct wg_msg_resp, wgmr_mac1));
2046
2047 /*
2048 * [W] 5.3: Denial of Service Mitigation & Cookies
2049 * "the responder, ..., must always reject messages with an invalid
2050 * msg.mac1"
2051 */
2052 if (!consttime_memequal(mac1, wgmr->wgmr_mac1, sizeof(mac1))) {
2053 WG_DLOG("mac1 is invalid\n");
2054 return;
2055 }
2056
2057 WG_TRACE("resp msg received");
2058 wgs = wg_lookup_session_by_index(wg, wgmr->wgmr_receiver, &psref);
2059 if (wgs == NULL) {
2060 WG_TRACE("No session found");
2061 return;
2062 }
2063
2064 wgp = wgs->wgs_peer;
2065
2066 mutex_enter(wgp->wgp_lock);
2067
2068 /* If we weren't waiting for a handshake response, drop it. */
2069 if (wgs->wgs_state != WGS_STATE_INIT_ACTIVE) {
2070 WG_TRACE("peer sent spurious handshake response, ignoring");
2071 goto out;
2072 }
2073
2074 if (__predict_false(wg_is_underload(wg, wgp, WG_MSG_TYPE_RESP))) {
2075 WG_TRACE("under load");
2076 /*
2077 * [W] 5.3: Denial of Service Mitigation & Cookies
2078 * "the responder, ..., and when under load may reject messages
2079 * with an invalid msg.mac2. If the responder receives a
2080 * message with a valid msg.mac1 yet with an invalid msg.mac2,
2081 * and is under load, it may respond with a cookie reply
2082 * message"
2083 */
2084 uint8_t zero[WG_MAC_LEN] = {0};
2085 if (consttime_memequal(wgmr->wgmr_mac2, zero, sizeof(zero))) {
2086 WG_TRACE("sending a cookie message: no cookie included");
2087 wg_send_cookie_msg(wg, wgp, wgmr->wgmr_sender,
2088 wgmr->wgmr_mac1, src);
2089 goto out;
2090 }
2091 if (!wgp->wgp_last_sent_cookie_valid) {
2092 WG_TRACE("sending a cookie message: no cookie sent ever");
2093 wg_send_cookie_msg(wg, wgp, wgmr->wgmr_sender,
2094 wgmr->wgmr_mac1, src);
2095 goto out;
2096 }
2097 uint8_t mac2[WG_MAC_LEN];
2098 wg_algo_mac(mac2, sizeof(mac2), wgp->wgp_last_sent_cookie,
2099 WG_COOKIE_LEN, (const uint8_t *)wgmr,
2100 offsetof(struct wg_msg_resp, wgmr_mac2), NULL, 0);
2101 if (!consttime_memequal(mac2, wgmr->wgmr_mac2, sizeof(mac2))) {
2102 WG_DLOG("mac2 is invalid\n");
2103 goto out;
2104 }
2105 WG_TRACE("under load, but continue to sending");
2106 }
2107
2108 memcpy(hash, wgs->wgs_handshake_hash, sizeof(hash));
2109 memcpy(ckey, wgs->wgs_chaining_key, sizeof(ckey));
2110
2111 /*
2112 * [W] 5.4.3 Second Message: Responder to Initiator
2113 * "When the initiator receives this message, it does the same
2114 * operations so that its final state variables are identical,
2115 * replacing the operands of the DH function to produce equivalent
2116 * values."
2117 * Note that the following comments of operations are just copies of
2118 * the initiator's ones.
2119 */
2120
2121 /* [N] 2.2: "e" */
2122 /* Cr := KDF1(Cr, Er^pub) */
2123 wg_algo_kdf(ckey, NULL, NULL, ckey, wgmr->wgmr_ephemeral,
2124 sizeof(wgmr->wgmr_ephemeral));
2125 /* Hr := HASH(Hr || msg.ephemeral) */
2126 wg_algo_hash(hash, wgmr->wgmr_ephemeral, sizeof(wgmr->wgmr_ephemeral));
2127
2128 WG_DUMP_HASH("ckey", ckey);
2129 WG_DUMP_HASH("hash", hash);
2130
2131 /* [N] 2.2: "ee" */
2132 /* Cr := KDF1(Cr, DH(Er^priv, Ei^pub)) */
2133 wg_algo_dh_kdf(ckey, NULL, wgs->wgs_ephemeral_key_priv,
2134 wgmr->wgmr_ephemeral);
2135
2136 /* [N] 2.2: "se" */
2137 /* Cr := KDF1(Cr, DH(Er^priv, Si^pub)) */
2138 wg_algo_dh_kdf(ckey, NULL, wg->wg_privkey, wgmr->wgmr_ephemeral);
2139
2140 /* [N] 9.2: "psk" */
2141 {
2142 uint8_t kdfout[WG_KDF_OUTPUT_LEN];
2143 /* Cr, r, k := KDF3(Cr, Q) */
2144 wg_algo_kdf(ckey, kdfout, cipher_key, ckey, wgp->wgp_psk,
2145 sizeof(wgp->wgp_psk));
2146 /* Hr := HASH(Hr || r) */
2147 wg_algo_hash(hash, kdfout, sizeof(kdfout));
2148 }
2149
2150 {
2151 uint8_t out[sizeof(wgmr->wgmr_empty)]; /* for safety */
2152 /* msg.empty := AEAD(k, 0, e, Hr) */
2153 error = wg_algo_aead_dec(out, 0, cipher_key, 0, wgmr->wgmr_empty,
2154 sizeof(wgmr->wgmr_empty), hash, sizeof(hash));
2155 WG_DUMP_HASH("wgmr_empty", wgmr->wgmr_empty);
2156 if (error != 0) {
2157 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
2158 "%s: peer %s: wg_algo_aead_dec for empty message failed\n",
2159 if_name(&wg->wg_if), wgp->wgp_name);
2160 goto out;
2161 }
2162 /* Hr := HASH(Hr || msg.empty) */
2163 wg_algo_hash(hash, wgmr->wgmr_empty, sizeof(wgmr->wgmr_empty));
2164 }
2165
2166 memcpy(wgs->wgs_handshake_hash, hash, sizeof(wgs->wgs_handshake_hash));
2167 memcpy(wgs->wgs_chaining_key, ckey, sizeof(wgs->wgs_chaining_key));
2168 wgs->wgs_remote_index = wgmr->wgmr_sender;
2169 WG_DLOG("receiver=%x\n", wgs->wgs_remote_index);
2170
2171 KASSERTMSG(wgs->wgs_state == WGS_STATE_INIT_ACTIVE, "state=%d",
2172 wgs->wgs_state);
2173 wgs->wgs_time_established = time_uptime32;
2174 wg_schedule_session_dtor_timer(wgp);
2175 wgs->wgs_time_last_data_sent = 0;
2176 wgs->wgs_is_initiator = true;
2177 WG_DLOG("session[L=%"PRIx32" R=%"PRIx32"]:"
2178 " calculate keys as initiator\n",
2179 wgs->wgs_local_index, wgs->wgs_remote_index);
2180 wg_calculate_keys(wgs, true);
2181 wg_clear_states(wgs);
2182
2183 /*
2184 * Session is ready to receive data now that we have received
2185 * the responder's response.
2186 *
2187 * Transition from INIT_ACTIVE to ESTABLISHED to publish it to
2188 * the data rx path, wg_handle_msg_data.
2189 */
2190 WG_DLOG("session[L=%"PRIx32" R=%"PRIx32" -> WGS_STATE_ESTABLISHED\n",
2191 wgs->wgs_local_index, wgs->wgs_remote_index);
2192 atomic_store_release(&wgs->wgs_state, WGS_STATE_ESTABLISHED);
2193 WG_TRACE("WGS_STATE_ESTABLISHED");
2194
2195 callout_halt(&wgp->wgp_handshake_timeout_timer, NULL);
2196
2197 /*
2198 * Session is ready to send data now that we have received the
2199 * responder's response.
2200 *
2201 * Swap the sessions to publish the new one as the stable
2202 * session for the data tx path, wg_output.
2203 */
2204 wg_swap_sessions(wgp);
2205 KASSERT(wgs == wgp->wgp_session_stable);
2206 wgs_prev = wgp->wgp_session_unstable;
2207 getnanotime(&wgp->wgp_last_handshake_time);
2208 wgp->wgp_handshake_start_time = 0;
2209 wgp->wgp_last_sent_mac1_valid = false;
2210 wgp->wgp_last_sent_cookie_valid = false;
2211
2212 wg_update_endpoint_if_necessary(wgp, src);
2213
2214 /*
2215 * If we had a data packet queued up, send it; otherwise send a
2216 * keepalive message -- either way we have to send something
2217 * immediately or else the responder will never answer.
2218 */
2219 if ((m = atomic_swap_ptr(&wgp->wgp_pending, NULL)) != NULL) {
2220 kpreempt_disable();
2221 const uint32_t h = curcpu()->ci_index; // pktq_rps_hash(m)
2222 M_SETCTX(m, wgp);
2223 if (__predict_false(!pktq_enqueue(wg_pktq, m, h))) {
2224 WGLOG(LOG_ERR, "%s: pktq full, dropping\n",
2225 if_name(&wg->wg_if));
2226 m_freem(m);
2227 }
2228 kpreempt_enable();
2229 } else {
2230 wg_send_keepalive_msg(wgp, wgs);
2231 }
2232
2233 if (wgs_prev->wgs_state == WGS_STATE_ESTABLISHED) {
2234 /*
2235 * Transition ESTABLISHED->DESTROYING. The session
2236 * will remain usable for the data rx path to process
2237 * packets still in flight to us, but we won't use it
2238 * for data tx.
2239 */
2240 WG_DLOG("session[L=%"PRIx32" R=%"PRIx32"]"
2241 " -> WGS_STATE_DESTROYING\n",
2242 wgs_prev->wgs_local_index, wgs_prev->wgs_remote_index);
2243 atomic_store_relaxed(&wgs_prev->wgs_state,
2244 WGS_STATE_DESTROYING);
2245 } else {
2246 KASSERTMSG(wgs_prev->wgs_state == WGS_STATE_UNKNOWN,
2247 "state=%d", wgs_prev->wgs_state);
2248 }
2249
2250 out:
2251 mutex_exit(wgp->wgp_lock);
2252 wg_put_session(wgs, &psref);
2253 }
2254
2255 static void
2256 wg_send_handshake_msg_resp(struct wg_softc *wg, struct wg_peer *wgp,
2257 struct wg_session *wgs, const struct wg_msg_init *wgmi)
2258 {
2259 int error;
2260 struct mbuf *m;
2261 struct wg_msg_resp *wgmr;
2262
2263 KASSERT(mutex_owned(wgp->wgp_lock));
2264 KASSERT(wgs == wgp->wgp_session_unstable);
2265 KASSERTMSG(wgs->wgs_state == WGS_STATE_UNKNOWN, "state=%d",
2266 wgs->wgs_state);
2267
2268 m = m_gethdr(M_WAIT, MT_DATA);
2269 if (sizeof(*wgmr) > MHLEN) {
2270 m_clget(m, M_WAIT);
2271 CTASSERT(sizeof(*wgmr) <= MCLBYTES);
2272 }
2273 m->m_pkthdr.len = m->m_len = sizeof(*wgmr);
2274 wgmr = mtod(m, struct wg_msg_resp *);
2275 wg_fill_msg_resp(wg, wgp, wgs, wgmr, wgmi);
2276
2277 error = wg->wg_ops->send_hs_msg(wgp, m); /* consumes m */
2278 if (error) {
2279 WG_DLOG("send_hs_msg failed, error=%d\n", error);
2280 return;
2281 }
2282
2283 WG_TRACE("resp msg sent");
2284 }
2285
2286 static struct wg_peer *
2287 wg_lookup_peer_by_pubkey(struct wg_softc *wg,
2288 const uint8_t pubkey[static WG_STATIC_KEY_LEN], struct psref *psref)
2289 {
2290 struct wg_peer *wgp;
2291
2292 int s = pserialize_read_enter();
2293 wgp = thmap_get(wg->wg_peers_bypubkey, pubkey, WG_STATIC_KEY_LEN);
2294 if (wgp != NULL)
2295 wg_get_peer(wgp, psref);
2296 pserialize_read_exit(s);
2297
2298 return wgp;
2299 }
2300
2301 static void
2302 wg_fill_msg_cookie(struct wg_softc *wg, struct wg_peer *wgp,
2303 struct wg_msg_cookie *wgmc, const uint32_t sender,
2304 const uint8_t mac1[static WG_MAC_LEN], const struct sockaddr *src)
2305 {
2306 uint8_t cookie[WG_COOKIE_LEN];
2307 uint8_t key[WG_HASH_LEN];
2308 uint8_t addr[sizeof(struct in6_addr)];
2309 size_t addrlen;
2310 uint16_t uh_sport; /* be */
2311
2312 KASSERT(mutex_owned(wgp->wgp_lock));
2313
2314 wgmc->wgmc_type = htole32(WG_MSG_TYPE_COOKIE);
2315 wgmc->wgmc_receiver = sender;
2316 cprng_fast(wgmc->wgmc_salt, sizeof(wgmc->wgmc_salt));
2317
2318 /*
2319 * [W] 5.4.7: Under Load: Cookie Reply Message
2320 * "The secret variable, Rm, changes every two minutes to a
2321 * random value"
2322 */
2323 if ((time_uptime - wgp->wgp_last_cookiesecret_time) >
2324 WG_COOKIESECRET_TIME) {
2325 cprng_strong(kern_cprng, wgp->wgp_cookiesecret,
2326 sizeof(wgp->wgp_cookiesecret), 0);
2327 wgp->wgp_last_cookiesecret_time = time_uptime;
2328 }
2329
2330 switch (src->sa_family) {
2331 #ifdef INET
2332 case AF_INET: {
2333 const struct sockaddr_in *sin = satocsin(src);
2334 addrlen = sizeof(sin->sin_addr);
2335 memcpy(addr, &sin->sin_addr, addrlen);
2336 uh_sport = sin->sin_port;
2337 break;
2338 }
2339 #endif
2340 #ifdef INET6
2341 case AF_INET6: {
2342 const struct sockaddr_in6 *sin6 = satocsin6(src);
2343 addrlen = sizeof(sin6->sin6_addr);
2344 memcpy(addr, &sin6->sin6_addr, addrlen);
2345 uh_sport = sin6->sin6_port;
2346 break;
2347 }
2348 #endif
2349 default:
2350 panic("invalid af=%d", src->sa_family);
2351 }
2352
2353 wg_algo_mac(cookie, sizeof(cookie),
2354 wgp->wgp_cookiesecret, sizeof(wgp->wgp_cookiesecret),
2355 addr, addrlen, (const uint8_t *)&uh_sport, sizeof(uh_sport));
2356 wg_algo_mac_cookie(key, sizeof(key), wg->wg_pubkey,
2357 sizeof(wg->wg_pubkey));
2358 wg_algo_xaead_enc(wgmc->wgmc_cookie, sizeof(wgmc->wgmc_cookie), key,
2359 cookie, sizeof(cookie), mac1, WG_MAC_LEN, wgmc->wgmc_salt);
2360
2361 /* Need to store to calculate mac2 */
2362 memcpy(wgp->wgp_last_sent_cookie, cookie, sizeof(cookie));
2363 wgp->wgp_last_sent_cookie_valid = true;
2364 }
2365
2366 static void
2367 wg_send_cookie_msg(struct wg_softc *wg, struct wg_peer *wgp,
2368 const uint32_t sender, const uint8_t mac1[static WG_MAC_LEN],
2369 const struct sockaddr *src)
2370 {
2371 int error;
2372 struct mbuf *m;
2373 struct wg_msg_cookie *wgmc;
2374
2375 KASSERT(mutex_owned(wgp->wgp_lock));
2376
2377 m = m_gethdr(M_WAIT, MT_DATA);
2378 if (sizeof(*wgmc) > MHLEN) {
2379 m_clget(m, M_WAIT);
2380 CTASSERT(sizeof(*wgmc) <= MCLBYTES);
2381 }
2382 m->m_pkthdr.len = m->m_len = sizeof(*wgmc);
2383 wgmc = mtod(m, struct wg_msg_cookie *);
2384 wg_fill_msg_cookie(wg, wgp, wgmc, sender, mac1, src);
2385
2386 error = wg->wg_ops->send_hs_msg(wgp, m); /* consumes m */
2387 if (error) {
2388 WG_DLOG("send_hs_msg failed, error=%d\n", error);
2389 return;
2390 }
2391
2392 WG_TRACE("cookie msg sent");
2393 }
2394
2395 static bool
2396 wg_is_underload(struct wg_softc *wg, struct wg_peer *wgp, int msgtype)
2397 {
2398 #ifdef WG_DEBUG_PARAMS
2399 if (wg_force_underload)
2400 return true;
2401 #endif
2402
2403 /*
2404 * XXX we don't have a means of a load estimation. The purpose of
2405 * the mechanism is a DoS mitigation, so we consider frequent handshake
2406 * messages as (a kind of) load; if a message of the same type comes
2407 * to a peer within 1 second, we consider we are under load.
2408 */
2409 time_t last = wgp->wgp_last_msg_received_time[msgtype];
2410 wgp->wgp_last_msg_received_time[msgtype] = time_uptime;
2411 return (time_uptime - last) == 0;
2412 }
2413
2414 static void
2415 wg_calculate_keys(struct wg_session *wgs, const bool initiator)
2416 {
2417
2418 KASSERT(mutex_owned(wgs->wgs_peer->wgp_lock));
2419
2420 /*
2421 * [W] 5.4.5: Ti^send = Tr^recv, Ti^recv = Tr^send := KDF2(Ci = Cr, e)
2422 */
2423 if (initiator) {
2424 wg_algo_kdf(wgs->wgs_tkey_send, wgs->wgs_tkey_recv, NULL,
2425 wgs->wgs_chaining_key, NULL, 0);
2426 } else {
2427 wg_algo_kdf(wgs->wgs_tkey_recv, wgs->wgs_tkey_send, NULL,
2428 wgs->wgs_chaining_key, NULL, 0);
2429 }
2430 WG_DUMP_HASH("wgs_tkey_send", wgs->wgs_tkey_send);
2431 WG_DUMP_HASH("wgs_tkey_recv", wgs->wgs_tkey_recv);
2432 }
2433
2434 static uint64_t
2435 wg_session_get_send_counter(struct wg_session *wgs)
2436 {
2437 #ifdef __HAVE_ATOMIC64_LOADSTORE
2438 return atomic_load_relaxed(&wgs->wgs_send_counter);
2439 #else
2440 uint64_t send_counter;
2441
2442 mutex_enter(&wgs->wgs_send_counter_lock);
2443 send_counter = wgs->wgs_send_counter;
2444 mutex_exit(&wgs->wgs_send_counter_lock);
2445
2446 return send_counter;
2447 #endif
2448 }
2449
2450 static uint64_t
2451 wg_session_inc_send_counter(struct wg_session *wgs)
2452 {
2453 #ifdef __HAVE_ATOMIC64_LOADSTORE
2454 return atomic_inc_64_nv(&wgs->wgs_send_counter) - 1;
2455 #else
2456 uint64_t send_counter;
2457
2458 mutex_enter(&wgs->wgs_send_counter_lock);
2459 send_counter = wgs->wgs_send_counter++;
2460 mutex_exit(&wgs->wgs_send_counter_lock);
2461
2462 return send_counter;
2463 #endif
2464 }
2465
2466 static void
2467 wg_clear_states(struct wg_session *wgs)
2468 {
2469
2470 KASSERT(mutex_owned(wgs->wgs_peer->wgp_lock));
2471
2472 wgs->wgs_send_counter = 0;
2473 sliwin_reset(&wgs->wgs_recvwin->window);
2474
2475 #define wgs_clear(v) explicit_memset(wgs->wgs_##v, 0, sizeof(wgs->wgs_##v))
2476 wgs_clear(handshake_hash);
2477 wgs_clear(chaining_key);
2478 wgs_clear(ephemeral_key_pub);
2479 wgs_clear(ephemeral_key_priv);
2480 wgs_clear(ephemeral_key_peer);
2481 #undef wgs_clear
2482 }
2483
2484 static struct wg_session *
2485 wg_lookup_session_by_index(struct wg_softc *wg, const uint32_t index,
2486 struct psref *psref)
2487 {
2488 struct wg_session *wgs;
2489
2490 int s = pserialize_read_enter();
2491 wgs = thmap_get(wg->wg_sessions_byindex, &index, sizeof index);
2492 if (wgs != NULL) {
2493 KASSERTMSG(index == wgs->wgs_local_index,
2494 "index=%"PRIx32" wgs->wgs_local_index=%"PRIx32,
2495 index, wgs->wgs_local_index);
2496 psref_acquire(psref, &wgs->wgs_psref, wg_psref_class);
2497 }
2498 pserialize_read_exit(s);
2499
2500 return wgs;
2501 }
2502
2503 static void
2504 wg_send_keepalive_msg(struct wg_peer *wgp, struct wg_session *wgs)
2505 {
2506 struct mbuf *m;
2507
2508 /*
2509 * [W] 6.5 Passive Keepalive
2510 * "A keepalive message is simply a transport data message with
2511 * a zero-length encapsulated encrypted inner-packet."
2512 */
2513 WG_TRACE("");
2514 m = m_gethdr(M_WAIT, MT_DATA);
2515 wg_send_data_msg(wgp, wgs, m);
2516 }
2517
2518 static bool
2519 wg_need_to_send_init_message(struct wg_session *wgs)
2520 {
2521 /*
2522 * [W] 6.2 Transport Message Limits
2523 * "if a peer is the initiator of a current secure session,
2524 * WireGuard will send a handshake initiation message to begin
2525 * a new secure session ... if after receiving a transport data
2526 * message, the current secure session is (REJECT-AFTER-TIME
2527 * KEEPALIVE-TIMEOUT REKEY-TIMEOUT) seconds old and it has
2528 * not yet acted upon this event."
2529 */
2530 return wgs->wgs_is_initiator &&
2531 atomic_load_relaxed(&wgs->wgs_time_last_data_sent) == 0 &&
2532 (time_uptime32 - wgs->wgs_time_established >=
2533 (wg_reject_after_time - wg_keepalive_timeout -
2534 wg_rekey_timeout));
2535 }
2536
2537 static void
2538 wg_schedule_peer_task(struct wg_peer *wgp, unsigned int task)
2539 {
2540
2541 mutex_enter(wgp->wgp_intr_lock);
2542 WG_DLOG("tasks=%d, task=%d\n", wgp->wgp_tasks, task);
2543 if (wgp->wgp_tasks == 0)
2544 /*
2545 * XXX If the current CPU is already loaded -- e.g., if
2546 * there's already a bunch of handshakes queued up --
2547 * consider tossing this over to another CPU to
2548 * distribute the load.
2549 */
2550 workqueue_enqueue(wg_wq, &wgp->wgp_work, NULL);
2551 wgp->wgp_tasks |= task;
2552 mutex_exit(wgp->wgp_intr_lock);
2553 }
2554
2555 static void
2556 wg_change_endpoint(struct wg_peer *wgp, const struct sockaddr *new)
2557 {
2558 struct wg_sockaddr *wgsa_prev;
2559
2560 WG_TRACE("Changing endpoint");
2561
2562 memcpy(wgp->wgp_endpoint0, new, new->sa_len);
2563 wgsa_prev = wgp->wgp_endpoint;
2564 atomic_store_release(&wgp->wgp_endpoint, wgp->wgp_endpoint0);
2565 wgp->wgp_endpoint0 = wgsa_prev;
2566 atomic_store_release(&wgp->wgp_endpoint_available, true);
2567
2568 wg_schedule_peer_task(wgp, WGP_TASK_ENDPOINT_CHANGED);
2569 }
2570
2571 static bool
2572 wg_validate_inner_packet(const char *packet, size_t decrypted_len, int *af)
2573 {
2574 uint16_t packet_len;
2575 const struct ip *ip;
2576
2577 if (__predict_false(decrypted_len < sizeof(*ip))) {
2578 WG_DLOG("decrypted_len=%zu < %zu\n", decrypted_len,
2579 sizeof(*ip));
2580 return false;
2581 }
2582
2583 ip = (const struct ip *)packet;
2584 if (ip->ip_v == 4)
2585 *af = AF_INET;
2586 else if (ip->ip_v == 6)
2587 *af = AF_INET6;
2588 else {
2589 WG_DLOG("ip_v=%d\n", ip->ip_v);
2590 return false;
2591 }
2592
2593 WG_DLOG("af=%d\n", *af);
2594
2595 switch (*af) {
2596 #ifdef INET
2597 case AF_INET:
2598 packet_len = ntohs(ip->ip_len);
2599 break;
2600 #endif
2601 #ifdef INET6
2602 case AF_INET6: {
2603 const struct ip6_hdr *ip6;
2604
2605 if (__predict_false(decrypted_len < sizeof(*ip6))) {
2606 WG_DLOG("decrypted_len=%zu < %zu\n", decrypted_len,
2607 sizeof(*ip6));
2608 return false;
2609 }
2610
2611 ip6 = (const struct ip6_hdr *)packet;
2612 packet_len = sizeof(*ip6) + ntohs(ip6->ip6_plen);
2613 break;
2614 }
2615 #endif
2616 default:
2617 return false;
2618 }
2619
2620 if (packet_len > decrypted_len) {
2621 WG_DLOG("packet_len %u > decrypted_len %zu\n", packet_len,
2622 decrypted_len);
2623 return false;
2624 }
2625
2626 return true;
2627 }
2628
2629 static bool
2630 wg_validate_route(struct wg_softc *wg, struct wg_peer *wgp_expected,
2631 int af, char *packet)
2632 {
2633 struct sockaddr_storage ss;
2634 struct sockaddr *sa;
2635 struct psref psref;
2636 struct wg_peer *wgp;
2637 bool ok;
2638
2639 /*
2640 * II CRYPTOKEY ROUTING
2641 * "it will only accept it if its source IP resolves in the
2642 * table to the public key used in the secure session for
2643 * decrypting it."
2644 */
2645
2646 switch (af) {
2647 #ifdef INET
2648 case AF_INET: {
2649 const struct ip *ip = (const struct ip *)packet;
2650 struct sockaddr_in *sin = (struct sockaddr_in *)&ss;
2651 sockaddr_in_init(sin, &ip->ip_src, 0);
2652 sa = sintosa(sin);
2653 break;
2654 }
2655 #endif
2656 #ifdef INET6
2657 case AF_INET6: {
2658 const struct ip6_hdr *ip6 = (const struct ip6_hdr *)packet;
2659 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&ss;
2660 sockaddr_in6_init(sin6, &ip6->ip6_src, 0, 0, 0);
2661 sa = sin6tosa(sin6);
2662 break;
2663 }
2664 #endif
2665 default:
2666 __USE(ss);
2667 return false;
2668 }
2669
2670 wgp = wg_pick_peer_by_sa(wg, sa, &psref);
2671 ok = (wgp == wgp_expected);
2672 if (wgp != NULL)
2673 wg_put_peer(wgp, &psref);
2674
2675 return ok;
2676 }
2677
2678 static void
2679 wg_session_dtor_timer(void *arg)
2680 {
2681 struct wg_peer *wgp = arg;
2682
2683 WG_TRACE("enter");
2684
2685 wg_schedule_session_dtor_timer(wgp);
2686 wg_schedule_peer_task(wgp, WGP_TASK_DESTROY_PREV_SESSION);
2687 }
2688
2689 static void
2690 wg_schedule_session_dtor_timer(struct wg_peer *wgp)
2691 {
2692
2693 /*
2694 * If the periodic session destructor is already pending to
2695 * handle the previous session, that's fine -- leave it in
2696 * place; it will be scheduled again.
2697 */
2698 if (callout_pending(&wgp->wgp_session_dtor_timer)) {
2699 WG_DLOG("session dtor already pending\n");
2700 return;
2701 }
2702
2703 WG_DLOG("scheduling session dtor in %u secs\n", wg_reject_after_time);
2704 callout_schedule(&wgp->wgp_session_dtor_timer,
2705 wg_reject_after_time*hz);
2706 }
2707
2708 static bool
2709 sockaddr_port_match(const struct sockaddr *sa1, const struct sockaddr *sa2)
2710 {
2711 if (sa1->sa_family != sa2->sa_family)
2712 return false;
2713
2714 switch (sa1->sa_family) {
2715 #ifdef INET
2716 case AF_INET:
2717 return satocsin(sa1)->sin_port == satocsin(sa2)->sin_port;
2718 #endif
2719 #ifdef INET6
2720 case AF_INET6:
2721 return satocsin6(sa1)->sin6_port == satocsin6(sa2)->sin6_port;
2722 #endif
2723 default:
2724 return false;
2725 }
2726 }
2727
2728 static void
2729 wg_update_endpoint_if_necessary(struct wg_peer *wgp,
2730 const struct sockaddr *src)
2731 {
2732 struct wg_sockaddr *wgsa;
2733 struct psref psref;
2734
2735 wgsa = wg_get_endpoint_sa(wgp, &psref);
2736
2737 #ifdef WG_DEBUG_LOG
2738 char oldaddr[128], newaddr[128];
2739 sockaddr_format(wgsatosa(wgsa), oldaddr, sizeof(oldaddr));
2740 sockaddr_format(src, newaddr, sizeof(newaddr));
2741 WG_DLOG("old=%s, new=%s\n", oldaddr, newaddr);
2742 #endif
2743
2744 /*
2745 * III: "Since the packet has authenticated correctly, the source IP of
2746 * the outer UDP/IP packet is used to update the endpoint for peer..."
2747 */
2748 if (__predict_false(sockaddr_cmp(src, wgsatosa(wgsa)) != 0 ||
2749 !sockaddr_port_match(src, wgsatosa(wgsa)))) {
2750 /* XXX We can't change the endpoint twice in a short period */
2751 if (atomic_swap_uint(&wgp->wgp_endpoint_changing, 1) == 0) {
2752 wg_change_endpoint(wgp, src);
2753 }
2754 }
2755
2756 wg_put_sa(wgp, wgsa, &psref);
2757 }
2758
2759 static void __noinline
2760 wg_handle_msg_data(struct wg_softc *wg, struct mbuf *m,
2761 const struct sockaddr *src)
2762 {
2763 struct wg_msg_data *wgmd;
2764 char *encrypted_buf = NULL, *decrypted_buf;
2765 size_t encrypted_len, decrypted_len;
2766 struct wg_session *wgs;
2767 struct wg_peer *wgp;
2768 int state;
2769 uint32_t age;
2770 size_t mlen;
2771 struct psref psref;
2772 int error, af;
2773 bool success, free_encrypted_buf = false, ok;
2774 struct mbuf *n;
2775
2776 KASSERT(m->m_len >= sizeof(struct wg_msg_data));
2777 wgmd = mtod(m, struct wg_msg_data *);
2778
2779 KASSERT(wgmd->wgmd_type == htole32(WG_MSG_TYPE_DATA));
2780 WG_TRACE("data");
2781
2782 /* Find the putative session, or drop. */
2783 wgs = wg_lookup_session_by_index(wg, wgmd->wgmd_receiver, &psref);
2784 if (wgs == NULL) {
2785 WG_TRACE("No session found");
2786 m_freem(m);
2787 return;
2788 }
2789
2790 /*
2791 * We are only ready to handle data when in INIT_PASSIVE,
2792 * ESTABLISHED, or DESTROYING. All transitions out of that
2793 * state dissociate the session index and drain psrefs.
2794 *
2795 * atomic_load_acquire matches atomic_store_release in either
2796 * wg_handle_msg_init or wg_handle_msg_resp. (The transition
2797 * INIT_PASSIVE to ESTABLISHED in wg_task_establish_session
2798 * doesn't make a difference for this rx path.)
2799 */
2800 state = atomic_load_acquire(&wgs->wgs_state);
2801 switch (state) {
2802 case WGS_STATE_UNKNOWN:
2803 case WGS_STATE_INIT_ACTIVE:
2804 WG_TRACE("not yet ready for data");
2805 goto out;
2806 case WGS_STATE_INIT_PASSIVE:
2807 case WGS_STATE_ESTABLISHED:
2808 case WGS_STATE_DESTROYING:
2809 break;
2810 }
2811
2812 /*
2813 * Reject if the session is too old.
2814 */
2815 age = time_uptime32 - wgs->wgs_time_established;
2816 if (__predict_false(age >= wg_reject_after_time)) {
2817 WG_DLOG("session %"PRIx32" too old, %"PRIu32" sec\n",
2818 wgmd->wgmd_receiver, age);
2819 goto out;
2820 }
2821
2822 /*
2823 * Get the peer, for rate-limited logs (XXX MPSAFE, dtrace) and
2824 * to update the endpoint if authentication succeeds.
2825 */
2826 wgp = wgs->wgs_peer;
2827
2828 /*
2829 * Reject outrageously wrong sequence numbers before doing any
2830 * crypto work or taking any locks.
2831 */
2832 error = sliwin_check_fast(&wgs->wgs_recvwin->window,
2833 le64toh(wgmd->wgmd_counter));
2834 if (error) {
2835 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
2836 "%s: peer %s: out-of-window packet: %"PRIu64"\n",
2837 if_name(&wg->wg_if), wgp->wgp_name,
2838 le64toh(wgmd->wgmd_counter));
2839 goto out;
2840 }
2841
2842 /* Ensure the payload and authenticator are contiguous. */
2843 mlen = m_length(m);
2844 encrypted_len = mlen - sizeof(*wgmd);
2845 if (encrypted_len < WG_AUTHTAG_LEN) {
2846 WG_DLOG("Short encrypted_len: %zu\n", encrypted_len);
2847 goto out;
2848 }
2849 success = m_ensure_contig(&m, sizeof(*wgmd) + encrypted_len);
2850 if (success) {
2851 encrypted_buf = mtod(m, char *) + sizeof(*wgmd);
2852 } else {
2853 encrypted_buf = kmem_intr_alloc(encrypted_len, KM_NOSLEEP);
2854 if (encrypted_buf == NULL) {
2855 WG_DLOG("failed to allocate encrypted_buf\n");
2856 goto out;
2857 }
2858 m_copydata(m, sizeof(*wgmd), encrypted_len, encrypted_buf);
2859 free_encrypted_buf = true;
2860 }
2861 /* m_ensure_contig may change m regardless of its result */
2862 KASSERT(m->m_len >= sizeof(*wgmd));
2863 wgmd = mtod(m, struct wg_msg_data *);
2864
2865 /*
2866 * Get a buffer for the plaintext. Add WG_AUTHTAG_LEN to avoid
2867 * a zero-length buffer (XXX). Drop if plaintext is longer
2868 * than MCLBYTES (XXX).
2869 */
2870 decrypted_len = encrypted_len - WG_AUTHTAG_LEN;
2871 if (decrypted_len > MCLBYTES) {
2872 /* FIXME handle larger data than MCLBYTES */
2873 WG_DLOG("couldn't handle larger data than MCLBYTES\n");
2874 goto out;
2875 }
2876 n = wg_get_mbuf(0, decrypted_len + WG_AUTHTAG_LEN);
2877 if (n == NULL) {
2878 WG_DLOG("wg_get_mbuf failed\n");
2879 goto out;
2880 }
2881 decrypted_buf = mtod(n, char *);
2882
2883 /* Decrypt and verify the packet. */
2884 WG_DLOG("mlen=%zu, encrypted_len=%zu\n", mlen, encrypted_len);
2885 error = wg_algo_aead_dec(decrypted_buf,
2886 encrypted_len - WG_AUTHTAG_LEN /* can be 0 */,
2887 wgs->wgs_tkey_recv, le64toh(wgmd->wgmd_counter), encrypted_buf,
2888 encrypted_len, NULL, 0);
2889 if (error != 0) {
2890 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
2891 "%s: peer %s: failed to wg_algo_aead_dec\n",
2892 if_name(&wg->wg_if), wgp->wgp_name);
2893 m_freem(n);
2894 goto out;
2895 }
2896 WG_DLOG("outsize=%u\n", (u_int)decrypted_len);
2897
2898 /* Packet is genuine. Reject it if a replay or just too old. */
2899 mutex_enter(&wgs->wgs_recvwin->lock);
2900 error = sliwin_update(&wgs->wgs_recvwin->window,
2901 le64toh(wgmd->wgmd_counter));
2902 mutex_exit(&wgs->wgs_recvwin->lock);
2903 if (error) {
2904 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
2905 "%s: peer %s: replay or out-of-window packet: %"PRIu64"\n",
2906 if_name(&wg->wg_if), wgp->wgp_name,
2907 le64toh(wgmd->wgmd_counter));
2908 m_freem(n);
2909 goto out;
2910 }
2911
2912 /* We're done with m now; free it and chuck the pointers. */
2913 m_freem(m);
2914 m = NULL;
2915 wgmd = NULL;
2916
2917 /*
2918 * The packet is genuine. Update the peer's endpoint if the
2919 * source address changed.
2920 *
2921 * XXX How to prevent DoS by replaying genuine packets from the
2922 * wrong source address?
2923 */
2924 wg_update_endpoint_if_necessary(wgp, src);
2925
2926 /*
2927 * Validate the encapsulated packet header and get the address
2928 * family, or drop.
2929 */
2930 ok = wg_validate_inner_packet(decrypted_buf, decrypted_len, &af);
2931 if (!ok) {
2932 m_freem(n);
2933 goto update_state;
2934 }
2935
2936 /* Submit it into our network stack if routable. */
2937 ok = wg_validate_route(wg, wgp, af, decrypted_buf);
2938 if (ok) {
2939 wg->wg_ops->input(&wg->wg_if, n, af);
2940 } else {
2941 char addrstr[INET6_ADDRSTRLEN];
2942 memset(addrstr, 0, sizeof(addrstr));
2943 switch (af) {
2944 #ifdef INET
2945 case AF_INET: {
2946 const struct ip *ip = (const struct ip *)decrypted_buf;
2947 IN_PRINT(addrstr, &ip->ip_src);
2948 break;
2949 }
2950 #endif
2951 #ifdef INET6
2952 case AF_INET6: {
2953 const struct ip6_hdr *ip6 =
2954 (const struct ip6_hdr *)decrypted_buf;
2955 IN6_PRINT(addrstr, &ip6->ip6_src);
2956 break;
2957 }
2958 #endif
2959 default:
2960 panic("invalid af=%d", af);
2961 }
2962 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
2963 "%s: peer %s: invalid source address (%s)\n",
2964 if_name(&wg->wg_if), wgp->wgp_name, addrstr);
2965 m_freem(n);
2966 /*
2967 * The inner address is invalid however the session is valid
2968 * so continue the session processing below.
2969 */
2970 }
2971 n = NULL;
2972
2973 update_state:
2974 /* Update the state machine if necessary. */
2975 if (__predict_false(state == WGS_STATE_INIT_PASSIVE)) {
2976 /*
2977 * We were waiting for the initiator to send their
2978 * first data transport message, and that has happened.
2979 * Schedule a task to establish this session.
2980 */
2981 wg_schedule_peer_task(wgp, WGP_TASK_ESTABLISH_SESSION);
2982 } else {
2983 if (__predict_false(wg_need_to_send_init_message(wgs))) {
2984 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE);
2985 }
2986 /*
2987 * [W] 6.5 Passive Keepalive
2988 * "If a peer has received a validly-authenticated transport
2989 * data message (section 5.4.6), but does not have any packets
2990 * itself to send back for KEEPALIVE-TIMEOUT seconds, it sends
2991 * a keepalive message."
2992 */
2993 const uint32_t now = time_uptime32;
2994 const uint32_t time_last_data_sent =
2995 atomic_load_relaxed(&wgs->wgs_time_last_data_sent);
2996 WG_DLOG("time_uptime32=%"PRIu32
2997 " wgs_time_last_data_sent=%"PRIu32"\n",
2998 now, time_last_data_sent);
2999 if ((now - time_last_data_sent) >= wg_keepalive_timeout) {
3000 WG_TRACE("Schedule sending keepalive message");
3001 /*
3002 * We can't send a keepalive message here to avoid
3003 * a deadlock; we already hold the solock of a socket
3004 * that is used to send the message.
3005 */
3006 wg_schedule_peer_task(wgp,
3007 WGP_TASK_SEND_KEEPALIVE_MESSAGE);
3008 }
3009 }
3010 out:
3011 wg_put_session(wgs, &psref);
3012 m_freem(m);
3013 if (free_encrypted_buf)
3014 kmem_intr_free(encrypted_buf, encrypted_len);
3015 }
3016
3017 static void __noinline
3018 wg_handle_msg_cookie(struct wg_softc *wg, const struct wg_msg_cookie *wgmc)
3019 {
3020 struct wg_session *wgs;
3021 struct wg_peer *wgp;
3022 struct psref psref;
3023 int error;
3024 uint8_t key[WG_HASH_LEN];
3025 uint8_t cookie[WG_COOKIE_LEN];
3026
3027 WG_TRACE("cookie msg received");
3028
3029 /* Find the putative session. */
3030 wgs = wg_lookup_session_by_index(wg, wgmc->wgmc_receiver, &psref);
3031 if (wgs == NULL) {
3032 WG_TRACE("No session found");
3033 return;
3034 }
3035
3036 /* Lock the peer so we can update the cookie state. */
3037 wgp = wgs->wgs_peer;
3038 mutex_enter(wgp->wgp_lock);
3039
3040 if (!wgp->wgp_last_sent_mac1_valid) {
3041 WG_TRACE("No valid mac1 sent (or expired)");
3042 goto out;
3043 }
3044
3045 /*
3046 * wgp_last_sent_mac1_valid is only set to true when we are
3047 * transitioning to INIT_ACTIVE or INIT_PASSIVE, and always
3048 * cleared on transition out of them.
3049 */
3050 KASSERTMSG((wgs->wgs_state == WGS_STATE_INIT_ACTIVE ||
3051 wgs->wgs_state == WGS_STATE_INIT_PASSIVE),
3052 "state=%d", wgs->wgs_state);
3053
3054 /* Decrypt the cookie and store it for later handshake retry. */
3055 wg_algo_mac_cookie(key, sizeof(key), wgp->wgp_pubkey,
3056 sizeof(wgp->wgp_pubkey));
3057 error = wg_algo_xaead_dec(cookie, sizeof(cookie), key,
3058 wgmc->wgmc_cookie, sizeof(wgmc->wgmc_cookie),
3059 wgp->wgp_last_sent_mac1, sizeof(wgp->wgp_last_sent_mac1),
3060 wgmc->wgmc_salt);
3061 if (error != 0) {
3062 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
3063 "%s: peer %s: wg_algo_aead_dec for cookie failed: "
3064 "error=%d\n", if_name(&wg->wg_if), wgp->wgp_name, error);
3065 goto out;
3066 }
3067 /*
3068 * [W] 6.6: Interaction with Cookie Reply System
3069 * "it should simply store the decrypted cookie value from the cookie
3070 * reply message, and wait for the expiration of the REKEY-TIMEOUT
3071 * timer for retrying a handshake initiation message."
3072 */
3073 wgp->wgp_latest_cookie_time = time_uptime;
3074 memcpy(wgp->wgp_latest_cookie, cookie, sizeof(wgp->wgp_latest_cookie));
3075 out:
3076 mutex_exit(wgp->wgp_lock);
3077 wg_put_session(wgs, &psref);
3078 }
3079
3080 static struct mbuf *
3081 wg_validate_msg_header(struct wg_softc *wg, struct mbuf *m)
3082 {
3083 struct wg_msg wgm;
3084 size_t mbuflen;
3085 size_t msglen;
3086
3087 /*
3088 * Get the mbuf chain length. It is already guaranteed, by
3089 * wg_overudp_cb, to be large enough for a struct wg_msg.
3090 */
3091 mbuflen = m_length(m);
3092 KASSERT(mbuflen >= sizeof(struct wg_msg));
3093
3094 /*
3095 * Copy the message header (32-bit message type) out -- we'll
3096 * worry about contiguity and alignment later.
3097 */
3098 m_copydata(m, 0, sizeof(wgm), &wgm);
3099 switch (le32toh(wgm.wgm_type)) {
3100 case WG_MSG_TYPE_INIT:
3101 msglen = sizeof(struct wg_msg_init);
3102 break;
3103 case WG_MSG_TYPE_RESP:
3104 msglen = sizeof(struct wg_msg_resp);
3105 break;
3106 case WG_MSG_TYPE_COOKIE:
3107 msglen = sizeof(struct wg_msg_cookie);
3108 break;
3109 case WG_MSG_TYPE_DATA:
3110 msglen = sizeof(struct wg_msg_data);
3111 break;
3112 default:
3113 WG_LOG_RATECHECK(&wg->wg_ppsratecheck, LOG_DEBUG,
3114 "%s: Unexpected msg type: %u\n", if_name(&wg->wg_if),
3115 le32toh(wgm.wgm_type));
3116 goto error;
3117 }
3118
3119 /* Verify the mbuf chain is long enough for this type of message. */
3120 if (__predict_false(mbuflen < msglen)) {
3121 WG_DLOG("Invalid msg size: mbuflen=%zu type=%u\n", mbuflen,
3122 le32toh(wgm.wgm_type));
3123 goto error;
3124 }
3125
3126 /* Make the message header contiguous if necessary. */
3127 if (__predict_false(m->m_len < msglen)) {
3128 m = m_pullup(m, msglen);
3129 if (m == NULL)
3130 return NULL;
3131 }
3132
3133 return m;
3134
3135 error:
3136 m_freem(m);
3137 return NULL;
3138 }
3139
3140 static void
3141 wg_handle_packet(struct wg_softc *wg, struct mbuf *m,
3142 const struct sockaddr *src)
3143 {
3144 struct wg_msg *wgm;
3145
3146 KASSERT(curlwp->l_pflag & LP_BOUND);
3147
3148 m = wg_validate_msg_header(wg, m);
3149 if (__predict_false(m == NULL))
3150 return;
3151
3152 KASSERT(m->m_len >= sizeof(struct wg_msg));
3153 wgm = mtod(m, struct wg_msg *);
3154 switch (le32toh(wgm->wgm_type)) {
3155 case WG_MSG_TYPE_INIT:
3156 wg_handle_msg_init(wg, (struct wg_msg_init *)wgm, src);
3157 break;
3158 case WG_MSG_TYPE_RESP:
3159 wg_handle_msg_resp(wg, (struct wg_msg_resp *)wgm, src);
3160 break;
3161 case WG_MSG_TYPE_COOKIE:
3162 wg_handle_msg_cookie(wg, (struct wg_msg_cookie *)wgm);
3163 break;
3164 case WG_MSG_TYPE_DATA:
3165 wg_handle_msg_data(wg, m, src);
3166 /* wg_handle_msg_data frees m for us */
3167 return;
3168 default:
3169 panic("invalid message type: %d", le32toh(wgm->wgm_type));
3170 }
3171
3172 m_freem(m);
3173 }
3174
3175 static void
3176 wg_receive_packets(struct wg_softc *wg, const int af)
3177 {
3178
3179 for (;;) {
3180 int error, flags;
3181 struct socket *so;
3182 struct mbuf *m = NULL;
3183 struct uio dummy_uio;
3184 struct mbuf *paddr = NULL;
3185 struct sockaddr *src;
3186
3187 so = wg_get_so_by_af(wg, af);
3188 flags = MSG_DONTWAIT;
3189 dummy_uio.uio_resid = 1000000000;
3190
3191 error = so->so_receive(so, &paddr, &dummy_uio, &m, NULL,
3192 &flags);
3193 if (error || m == NULL) {
3194 //if (error == EWOULDBLOCK)
3195 return;
3196 }
3197
3198 KASSERT(paddr != NULL);
3199 KASSERT(paddr->m_len >= sizeof(struct sockaddr));
3200 src = mtod(paddr, struct sockaddr *);
3201
3202 wg_handle_packet(wg, m, src);
3203 }
3204 }
3205
3206 static void
3207 wg_get_peer(struct wg_peer *wgp, struct psref *psref)
3208 {
3209
3210 psref_acquire(psref, &wgp->wgp_psref, wg_psref_class);
3211 }
3212
3213 static void
3214 wg_put_peer(struct wg_peer *wgp, struct psref *psref)
3215 {
3216
3217 psref_release(psref, &wgp->wgp_psref, wg_psref_class);
3218 }
3219
3220 static void
3221 wg_task_send_init_message(struct wg_softc *wg, struct wg_peer *wgp)
3222 {
3223 struct wg_session *wgs;
3224
3225 WG_TRACE("WGP_TASK_SEND_INIT_MESSAGE");
3226
3227 KASSERT(mutex_owned(wgp->wgp_lock));
3228
3229 if (!atomic_load_acquire(&wgp->wgp_endpoint_available)) {
3230 WGLOG(LOG_DEBUG, "%s: No endpoint available\n",
3231 if_name(&wg->wg_if));
3232 /* XXX should do something? */
3233 return;
3234 }
3235
3236 /*
3237 * If we already have an established session, there's no need
3238 * to initiate a new one -- unless the rekey-after-time or
3239 * rekey-after-messages limits have passed.
3240 */
3241 wgs = wgp->wgp_session_stable;
3242 if (wgs->wgs_state == WGS_STATE_ESTABLISHED &&
3243 !atomic_load_relaxed(&wgs->wgs_force_rekey))
3244 return;
3245
3246 /*
3247 * Ensure we're initiating a new session. If the unstable
3248 * session is already INIT_ACTIVE or INIT_PASSIVE, this does
3249 * nothing.
3250 */
3251 wg_send_handshake_msg_init(wg, wgp);
3252 }
3253
3254 static void
3255 wg_task_retry_handshake(struct wg_softc *wg, struct wg_peer *wgp)
3256 {
3257 struct wg_session *wgs;
3258
3259 WG_TRACE("WGP_TASK_RETRY_HANDSHAKE");
3260
3261 KASSERT(mutex_owned(wgp->wgp_lock));
3262 KASSERT(wgp->wgp_handshake_start_time != 0);
3263
3264 wgs = wgp->wgp_session_unstable;
3265 if (wgs->wgs_state != WGS_STATE_INIT_ACTIVE)
3266 return;
3267
3268 /*
3269 * XXX no real need to assign a new index here, but we do need
3270 * to transition to UNKNOWN temporarily
3271 */
3272 wg_put_session_index(wg, wgs);
3273
3274 /* [W] 6.4 Handshake Initiation Retransmission */
3275 if ((time_uptime - wgp->wgp_handshake_start_time) >
3276 wg_rekey_attempt_time) {
3277 /* Give up handshaking */
3278 wgp->wgp_handshake_start_time = 0;
3279 WG_TRACE("give up");
3280
3281 /*
3282 * If a new data packet comes, handshaking will be retried
3283 * and a new session would be established at that time,
3284 * however we don't want to send pending packets then.
3285 */
3286 wg_purge_pending_packets(wgp);
3287 return;
3288 }
3289
3290 wg_task_send_init_message(wg, wgp);
3291 }
3292
3293 static void
3294 wg_task_establish_session(struct wg_softc *wg, struct wg_peer *wgp)
3295 {
3296 struct wg_session *wgs, *wgs_prev;
3297 struct mbuf *m;
3298
3299 KASSERT(mutex_owned(wgp->wgp_lock));
3300
3301 wgs = wgp->wgp_session_unstable;
3302 if (wgs->wgs_state != WGS_STATE_INIT_PASSIVE)
3303 /* XXX Can this happen? */
3304 return;
3305
3306 wgs->wgs_time_last_data_sent = 0;
3307 wgs->wgs_is_initiator = false;
3308
3309 /*
3310 * Session was already ready to receive data. Transition from
3311 * INIT_PASSIVE to ESTABLISHED just so we can swap the
3312 * sessions.
3313 *
3314 * atomic_store_relaxed because this doesn't affect the data rx
3315 * path, wg_handle_msg_data -- changing from INIT_PASSIVE to
3316 * ESTABLISHED makes no difference to the data rx path, and the
3317 * transition to INIT_PASSIVE with store-release already
3318 * published the state needed by the data rx path.
3319 */
3320 WG_DLOG("session[L=%"PRIx32" R=%"PRIx32"] -> WGS_STATE_ESTABLISHED\n",
3321 wgs->wgs_local_index, wgs->wgs_remote_index);
3322 atomic_store_relaxed(&wgs->wgs_state, WGS_STATE_ESTABLISHED);
3323 WG_TRACE("WGS_STATE_ESTABLISHED");
3324
3325 /*
3326 * Session is ready to send data too now that we have received
3327 * the peer initiator's first data packet.
3328 *
3329 * Swap the sessions to publish the new one as the stable
3330 * session for the data tx path, wg_output.
3331 */
3332 wg_swap_sessions(wgp);
3333 KASSERT(wgs == wgp->wgp_session_stable);
3334 wgs_prev = wgp->wgp_session_unstable;
3335 getnanotime(&wgp->wgp_last_handshake_time);
3336 wgp->wgp_handshake_start_time = 0;
3337 wgp->wgp_last_sent_mac1_valid = false;
3338 wgp->wgp_last_sent_cookie_valid = false;
3339
3340 /* If we had a data packet queued up, send it. */
3341 if ((m = atomic_swap_ptr(&wgp->wgp_pending, NULL)) != NULL) {
3342 kpreempt_disable();
3343 const uint32_t h = curcpu()->ci_index; // pktq_rps_hash(m)
3344 M_SETCTX(m, wgp);
3345 if (__predict_false(!pktq_enqueue(wg_pktq, m, h))) {
3346 WGLOG(LOG_ERR, "%s: pktq full, dropping\n",
3347 if_name(&wg->wg_if));
3348 m_freem(m);
3349 }
3350 kpreempt_enable();
3351 }
3352
3353 if (wgs_prev->wgs_state == WGS_STATE_ESTABLISHED) {
3354 /*
3355 * Transition ESTABLISHED->DESTROYING. The session
3356 * will remain usable for the data rx path to process
3357 * packets still in flight to us, but we won't use it
3358 * for data tx.
3359 */
3360 WG_DLOG("session[L=%"PRIx32" R=%"PRIx32"]"
3361 " -> WGS_STATE_DESTROYING\n",
3362 wgs_prev->wgs_local_index, wgs_prev->wgs_remote_index);
3363 atomic_store_relaxed(&wgs_prev->wgs_state,
3364 WGS_STATE_DESTROYING);
3365 } else {
3366 KASSERTMSG(wgs_prev->wgs_state == WGS_STATE_UNKNOWN,
3367 "state=%d", wgs_prev->wgs_state);
3368 WG_DLOG("session[L=%"PRIx32" R=%"PRIx32"]"
3369 " -> WGS_STATE_UNKNOWN\n",
3370 wgs_prev->wgs_local_index, wgs_prev->wgs_remote_index);
3371 wgs_prev->wgs_local_index = 0; /* paranoia */
3372 wgs_prev->wgs_remote_index = 0; /* paranoia */
3373 wg_clear_states(wgs_prev); /* paranoia */
3374 wgs_prev->wgs_state = WGS_STATE_UNKNOWN;
3375 }
3376 }
3377
3378 static void
3379 wg_task_endpoint_changed(struct wg_softc *wg, struct wg_peer *wgp)
3380 {
3381
3382 WG_TRACE("WGP_TASK_ENDPOINT_CHANGED");
3383
3384 KASSERT(mutex_owned(wgp->wgp_lock));
3385
3386 if (atomic_load_relaxed(&wgp->wgp_endpoint_changing)) {
3387 pserialize_perform(wgp->wgp_psz);
3388 mutex_exit(wgp->wgp_lock);
3389 psref_target_destroy(&wgp->wgp_endpoint0->wgsa_psref,
3390 wg_psref_class);
3391 psref_target_init(&wgp->wgp_endpoint0->wgsa_psref,
3392 wg_psref_class);
3393 mutex_enter(wgp->wgp_lock);
3394 atomic_store_release(&wgp->wgp_endpoint_changing, 0);
3395 }
3396 }
3397
3398 static void
3399 wg_task_send_keepalive_message(struct wg_softc *wg, struct wg_peer *wgp)
3400 {
3401 struct wg_session *wgs;
3402
3403 WG_TRACE("WGP_TASK_SEND_KEEPALIVE_MESSAGE");
3404
3405 KASSERT(mutex_owned(wgp->wgp_lock));
3406
3407 wgs = wgp->wgp_session_stable;
3408 if (wgs->wgs_state != WGS_STATE_ESTABLISHED)
3409 return;
3410
3411 wg_send_keepalive_msg(wgp, wgs);
3412 }
3413
3414 static void
3415 wg_task_destroy_prev_session(struct wg_softc *wg, struct wg_peer *wgp)
3416 {
3417 struct wg_session *wgs;
3418 uint32_t age;
3419
3420 WG_TRACE("WGP_TASK_DESTROY_PREV_SESSION");
3421
3422 KASSERT(mutex_owned(wgp->wgp_lock));
3423
3424 /*
3425 * If theres's any previous unstable session, i.e., one that
3426 * was ESTABLISHED and is now DESTROYING, older than
3427 * reject-after-time, destroy it. Upcoming sessions are still
3428 * in INIT_ACTIVE or INIT_PASSIVE -- we don't touch those here.
3429 */
3430 wgs = wgp->wgp_session_unstable;
3431 KASSERT(wgs->wgs_state != WGS_STATE_ESTABLISHED);
3432 if (wgs->wgs_state == WGS_STATE_DESTROYING &&
3433 ((age = (time_uptime32 - wgs->wgs_time_established)) >=
3434 wg_reject_after_time)) {
3435 WG_DLOG("destroying past session %"PRIu32" sec old\n", age);
3436 wg_put_session_index(wg, wgs);
3437 KASSERTMSG(wgs->wgs_state == WGS_STATE_UNKNOWN, "state=%d",
3438 wgs->wgs_state);
3439 }
3440
3441 /*
3442 * If theres's any ESTABLISHED stable session older than
3443 * reject-after-time, destroy it. (The stable session can also
3444 * be in UNKNOWN state -- nothing to do in that case)
3445 */
3446 wgs = wgp->wgp_session_stable;
3447 KASSERT(wgs->wgs_state != WGS_STATE_INIT_ACTIVE);
3448 KASSERT(wgs->wgs_state != WGS_STATE_INIT_PASSIVE);
3449 KASSERT(wgs->wgs_state != WGS_STATE_DESTROYING);
3450 if (wgs->wgs_state == WGS_STATE_ESTABLISHED &&
3451 ((age = (time_uptime32 - wgs->wgs_time_established)) >=
3452 wg_reject_after_time)) {
3453 WG_DLOG("destroying current session %"PRIu32" sec old\n", age);
3454 atomic_store_relaxed(&wgs->wgs_state, WGS_STATE_DESTROYING);
3455 wg_put_session_index(wg, wgs);
3456 KASSERTMSG(wgs->wgs_state == WGS_STATE_UNKNOWN, "state=%d",
3457 wgs->wgs_state);
3458 }
3459
3460 /*
3461 * If there's no sessions left, no need to have the timer run
3462 * until the next time around -- halt it.
3463 *
3464 * It is only ever scheduled with wgp_lock held or in the
3465 * callout itself, and callout_halt prevents rescheudling
3466 * itself, so this never races with rescheduling.
3467 */
3468 if (wgp->wgp_session_unstable->wgs_state == WGS_STATE_UNKNOWN &&
3469 wgp->wgp_session_stable->wgs_state == WGS_STATE_UNKNOWN)
3470 callout_halt(&wgp->wgp_session_dtor_timer, NULL);
3471 }
3472
3473 static void
3474 wg_peer_work(struct work *wk, void *cookie)
3475 {
3476 struct wg_peer *wgp = container_of(wk, struct wg_peer, wgp_work);
3477 struct wg_softc *wg = wgp->wgp_sc;
3478 unsigned int tasks;
3479
3480 mutex_enter(wgp->wgp_intr_lock);
3481 while ((tasks = wgp->wgp_tasks) != 0) {
3482 wgp->wgp_tasks = 0;
3483 mutex_exit(wgp->wgp_intr_lock);
3484
3485 mutex_enter(wgp->wgp_lock);
3486 if (ISSET(tasks, WGP_TASK_SEND_INIT_MESSAGE))
3487 wg_task_send_init_message(wg, wgp);
3488 if (ISSET(tasks, WGP_TASK_RETRY_HANDSHAKE))
3489 wg_task_retry_handshake(wg, wgp);
3490 if (ISSET(tasks, WGP_TASK_ESTABLISH_SESSION))
3491 wg_task_establish_session(wg, wgp);
3492 if (ISSET(tasks, WGP_TASK_ENDPOINT_CHANGED))
3493 wg_task_endpoint_changed(wg, wgp);
3494 if (ISSET(tasks, WGP_TASK_SEND_KEEPALIVE_MESSAGE))
3495 wg_task_send_keepalive_message(wg, wgp);
3496 if (ISSET(tasks, WGP_TASK_DESTROY_PREV_SESSION))
3497 wg_task_destroy_prev_session(wg, wgp);
3498 mutex_exit(wgp->wgp_lock);
3499
3500 mutex_enter(wgp->wgp_intr_lock);
3501 }
3502 mutex_exit(wgp->wgp_intr_lock);
3503 }
3504
3505 static void
3506 wg_job(struct threadpool_job *job)
3507 {
3508 struct wg_softc *wg = container_of(job, struct wg_softc, wg_job);
3509 int bound, upcalls;
3510
3511 mutex_enter(wg->wg_intr_lock);
3512 while ((upcalls = wg->wg_upcalls) != 0) {
3513 wg->wg_upcalls = 0;
3514 mutex_exit(wg->wg_intr_lock);
3515 bound = curlwp_bind();
3516 if (ISSET(upcalls, WG_UPCALL_INET))
3517 wg_receive_packets(wg, AF_INET);
3518 if (ISSET(upcalls, WG_UPCALL_INET6))
3519 wg_receive_packets(wg, AF_INET6);
3520 curlwp_bindx(bound);
3521 mutex_enter(wg->wg_intr_lock);
3522 }
3523 threadpool_job_done(job);
3524 mutex_exit(wg->wg_intr_lock);
3525 }
3526
3527 static int
3528 wg_bind_port(struct wg_softc *wg, const uint16_t port)
3529 {
3530 int error = 0;
3531 uint16_t old_port = wg->wg_listen_port;
3532
3533 if (port != 0 && old_port == port)
3534 return 0;
3535
3536 #ifdef INET
3537 struct sockaddr_in _sin, *sin = &_sin;
3538 sin->sin_len = sizeof(*sin);
3539 sin->sin_family = AF_INET;
3540 sin->sin_addr.s_addr = INADDR_ANY;
3541 sin->sin_port = htons(port);
3542
3543 error = sobind(wg->wg_so4, sintosa(sin), curlwp);
3544 if (error)
3545 return error;
3546 #endif
3547
3548 #ifdef INET6
3549 struct sockaddr_in6 _sin6, *sin6 = &_sin6;
3550 sin6->sin6_len = sizeof(*sin6);
3551 sin6->sin6_family = AF_INET6;
3552 sin6->sin6_addr = in6addr_any;
3553 sin6->sin6_port = htons(port);
3554
3555 error = sobind(wg->wg_so6, sin6tosa(sin6), curlwp);
3556 if (error)
3557 return error;
3558 #endif
3559
3560 wg->wg_listen_port = port;
3561
3562 return error;
3563 }
3564
3565 static void
3566 wg_so_upcall(struct socket *so, void *cookie, int events, int waitflag)
3567 {
3568 struct wg_softc *wg = cookie;
3569 int reason;
3570
3571 reason = (so->so_proto->pr_domain->dom_family == AF_INET) ?
3572 WG_UPCALL_INET :
3573 WG_UPCALL_INET6;
3574
3575 mutex_enter(wg->wg_intr_lock);
3576 wg->wg_upcalls |= reason;
3577 threadpool_schedule_job(wg->wg_threadpool, &wg->wg_job);
3578 mutex_exit(wg->wg_intr_lock);
3579 }
3580
3581 static int
3582 wg_overudp_cb(struct mbuf **mp, int offset, struct socket *so,
3583 struct sockaddr *src, void *arg)
3584 {
3585 struct wg_softc *wg = arg;
3586 struct wg_msg wgm;
3587 struct mbuf *m = *mp;
3588
3589 WG_TRACE("enter");
3590
3591 /* Verify the mbuf chain is long enough to have a wg msg header. */
3592 KASSERT(offset <= m_length(m));
3593 if (__predict_false(m_length(m) - offset < sizeof(struct wg_msg))) {
3594 /* drop on the floor */
3595 m_freem(m);
3596 return -1;
3597 }
3598
3599 /*
3600 * Copy the message header (32-bit message type) out -- we'll
3601 * worry about contiguity and alignment later.
3602 */
3603 m_copydata(m, offset, sizeof(struct wg_msg), &wgm);
3604 WG_DLOG("type=%d\n", le32toh(wgm.wgm_type));
3605
3606 /*
3607 * Handle DATA packets promptly as they arrive, if they are in
3608 * an active session. Other packets may require expensive
3609 * public-key crypto and are not as sensitive to latency, so
3610 * defer them to the worker thread.
3611 */
3612 switch (le32toh(wgm.wgm_type)) {
3613 case WG_MSG_TYPE_DATA:
3614 /* handle immediately */
3615 m_adj(m, offset);
3616 if (__predict_false(m->m_len < sizeof(struct wg_msg_data))) {
3617 m = m_pullup(m, sizeof(struct wg_msg_data));
3618 if (m == NULL)
3619 return -1;
3620 }
3621 wg_handle_msg_data(wg, m, src);
3622 *mp = NULL;
3623 return 1;
3624 case WG_MSG_TYPE_INIT:
3625 case WG_MSG_TYPE_RESP:
3626 case WG_MSG_TYPE_COOKIE:
3627 /* pass through to so_receive in wg_receive_packets */
3628 return 0;
3629 default:
3630 /* drop on the floor */
3631 m_freem(m);
3632 return -1;
3633 }
3634 }
3635
3636 static int
3637 wg_socreate(struct wg_softc *wg, int af, struct socket **sop)
3638 {
3639 int error;
3640 struct socket *so;
3641
3642 error = socreate(af, &so, SOCK_DGRAM, 0, curlwp, NULL);
3643 if (error != 0)
3644 return error;
3645
3646 solock(so);
3647 so->so_upcallarg = wg;
3648 so->so_upcall = wg_so_upcall;
3649 so->so_rcv.sb_flags |= SB_UPCALL;
3650 inpcb_register_overudp_cb(sotoinpcb(so), wg_overudp_cb, wg);
3651 sounlock(so);
3652
3653 *sop = so;
3654
3655 return 0;
3656 }
3657
3658 static bool
3659 wg_session_hit_limits(struct wg_session *wgs)
3660 {
3661
3662 /*
3663 * [W] 6.2: Transport Message Limits
3664 * "After REJECT-AFTER-MESSAGES transport data messages or after the
3665 * current secure session is REJECT-AFTER-TIME seconds old, whichever
3666 * comes first, WireGuard will refuse to send or receive any more
3667 * transport data messages using the current secure session, ..."
3668 */
3669 KASSERT(wgs->wgs_time_established != 0 || time_uptime > UINT32_MAX);
3670 if (time_uptime32 - wgs->wgs_time_established > wg_reject_after_time) {
3671 WG_DLOG("The session hits REJECT_AFTER_TIME\n");
3672 return true;
3673 } else if (wg_session_get_send_counter(wgs) >
3674 wg_reject_after_messages) {
3675 WG_DLOG("The session hits REJECT_AFTER_MESSAGES\n");
3676 return true;
3677 }
3678
3679 return false;
3680 }
3681
3682 static void
3683 wgintr(void *cookie)
3684 {
3685 struct wg_peer *wgp;
3686 struct wg_session *wgs;
3687 struct mbuf *m;
3688 struct psref psref;
3689
3690 while ((m = pktq_dequeue(wg_pktq)) != NULL) {
3691 wgp = M_GETCTX(m, struct wg_peer *);
3692 if ((wgs = wg_get_stable_session(wgp, &psref)) == NULL) {
3693 WG_TRACE("no stable session");
3694 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE);
3695 goto next0;
3696 }
3697 if (__predict_false(wg_session_hit_limits(wgs))) {
3698 WG_TRACE("stable session hit limits");
3699 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE);
3700 goto next1;
3701 }
3702 wg_send_data_msg(wgp, wgs, m);
3703 m = NULL; /* consumed */
3704 next1: wg_put_session(wgs, &psref);
3705 next0: m_freem(m);
3706 /* XXX Yield to avoid userland starvation? */
3707 }
3708 }
3709
3710 static void
3711 wg_purge_pending_packets(struct wg_peer *wgp)
3712 {
3713 struct mbuf *m;
3714
3715 m = atomic_swap_ptr(&wgp->wgp_pending, NULL);
3716 m_freem(m);
3717 #ifdef ALTQ
3718 wg_start(&wgp->wgp_sc->wg_if);
3719 #endif
3720 pktq_barrier(wg_pktq);
3721 }
3722
3723 static void
3724 wg_handshake_timeout_timer(void *arg)
3725 {
3726 struct wg_peer *wgp = arg;
3727
3728 WG_TRACE("enter");
3729
3730 wg_schedule_peer_task(wgp, WGP_TASK_RETRY_HANDSHAKE);
3731 }
3732
3733 static struct wg_peer *
3734 wg_alloc_peer(struct wg_softc *wg)
3735 {
3736 struct wg_peer *wgp;
3737
3738 wgp = kmem_zalloc(sizeof(*wgp), KM_SLEEP);
3739
3740 wgp->wgp_sc = wg;
3741 callout_init(&wgp->wgp_handshake_timeout_timer, CALLOUT_MPSAFE);
3742 callout_setfunc(&wgp->wgp_handshake_timeout_timer,
3743 wg_handshake_timeout_timer, wgp);
3744 callout_init(&wgp->wgp_session_dtor_timer, CALLOUT_MPSAFE);
3745 callout_setfunc(&wgp->wgp_session_dtor_timer,
3746 wg_session_dtor_timer, wgp);
3747 PSLIST_ENTRY_INIT(wgp, wgp_peerlist_entry);
3748 wgp->wgp_endpoint_changing = false;
3749 wgp->wgp_endpoint_available = false;
3750 wgp->wgp_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
3751 wgp->wgp_intr_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
3752 wgp->wgp_psz = pserialize_create();
3753 psref_target_init(&wgp->wgp_psref, wg_psref_class);
3754
3755 wgp->wgp_endpoint = kmem_zalloc(sizeof(*wgp->wgp_endpoint), KM_SLEEP);
3756 wgp->wgp_endpoint0 = kmem_zalloc(sizeof(*wgp->wgp_endpoint0), KM_SLEEP);
3757 psref_target_init(&wgp->wgp_endpoint->wgsa_psref, wg_psref_class);
3758 psref_target_init(&wgp->wgp_endpoint0->wgsa_psref, wg_psref_class);
3759
3760 struct wg_session *wgs;
3761 wgp->wgp_session_stable =
3762 kmem_zalloc(sizeof(*wgp->wgp_session_stable), KM_SLEEP);
3763 wgp->wgp_session_unstable =
3764 kmem_zalloc(sizeof(*wgp->wgp_session_unstable), KM_SLEEP);
3765 wgs = wgp->wgp_session_stable;
3766 wgs->wgs_peer = wgp;
3767 wgs->wgs_state = WGS_STATE_UNKNOWN;
3768 psref_target_init(&wgs->wgs_psref, wg_psref_class);
3769 #ifndef __HAVE_ATOMIC64_LOADSTORE
3770 mutex_init(&wgs->wgs_send_counter_lock, MUTEX_DEFAULT, IPL_SOFTNET);
3771 #endif
3772 wgs->wgs_recvwin = kmem_zalloc(sizeof(*wgs->wgs_recvwin), KM_SLEEP);
3773 mutex_init(&wgs->wgs_recvwin->lock, MUTEX_DEFAULT, IPL_SOFTNET);
3774
3775 wgs = wgp->wgp_session_unstable;
3776 wgs->wgs_peer = wgp;
3777 wgs->wgs_state = WGS_STATE_UNKNOWN;
3778 psref_target_init(&wgs->wgs_psref, wg_psref_class);
3779 #ifndef __HAVE_ATOMIC64_LOADSTORE
3780 mutex_init(&wgs->wgs_send_counter_lock, MUTEX_DEFAULT, IPL_SOFTNET);
3781 #endif
3782 wgs->wgs_recvwin = kmem_zalloc(sizeof(*wgs->wgs_recvwin), KM_SLEEP);
3783 mutex_init(&wgs->wgs_recvwin->lock, MUTEX_DEFAULT, IPL_SOFTNET);
3784
3785 return wgp;
3786 }
3787
3788 static void
3789 wg_destroy_peer(struct wg_peer *wgp)
3790 {
3791 struct wg_session *wgs;
3792 struct wg_softc *wg = wgp->wgp_sc;
3793
3794 /* Prevent new packets from this peer on any source address. */
3795 rw_enter(wg->wg_rwlock, RW_WRITER);
3796 for (int i = 0; i < wgp->wgp_n_allowedips; i++) {
3797 struct wg_allowedip *wga = &wgp->wgp_allowedips[i];
3798 struct radix_node_head *rnh = wg_rnh(wg, wga->wga_family);
3799 struct radix_node *rn;
3800
3801 KASSERT(rnh != NULL);
3802 rn = rnh->rnh_deladdr(&wga->wga_sa_addr,
3803 &wga->wga_sa_mask, rnh);
3804 if (rn == NULL) {
3805 char addrstr[128];
3806 sockaddr_format(&wga->wga_sa_addr, addrstr,
3807 sizeof(addrstr));
3808 WGLOG(LOG_WARNING, "%s: Couldn't delete %s",
3809 if_name(&wg->wg_if), addrstr);
3810 }
3811 }
3812 rw_exit(wg->wg_rwlock);
3813
3814 /* Purge pending packets. */
3815 wg_purge_pending_packets(wgp);
3816
3817 /* Halt all packet processing and timeouts. */
3818 callout_halt(&wgp->wgp_handshake_timeout_timer, NULL);
3819 callout_halt(&wgp->wgp_session_dtor_timer, NULL);
3820
3821 /* Wait for any queued work to complete. */
3822 workqueue_wait(wg_wq, &wgp->wgp_work);
3823
3824 wgs = wgp->wgp_session_unstable;
3825 if (wgs->wgs_state != WGS_STATE_UNKNOWN) {
3826 mutex_enter(wgp->wgp_lock);
3827 wg_destroy_session(wg, wgs);
3828 mutex_exit(wgp->wgp_lock);
3829 }
3830 mutex_destroy(&wgs->wgs_recvwin->lock);
3831 kmem_free(wgs->wgs_recvwin, sizeof(*wgs->wgs_recvwin));
3832 #ifndef __HAVE_ATOMIC64_LOADSTORE
3833 mutex_destroy(&wgs->wgs_send_counter_lock);
3834 #endif
3835 kmem_free(wgs, sizeof(*wgs));
3836
3837 wgs = wgp->wgp_session_stable;
3838 if (wgs->wgs_state != WGS_STATE_UNKNOWN) {
3839 mutex_enter(wgp->wgp_lock);
3840 wg_destroy_session(wg, wgs);
3841 mutex_exit(wgp->wgp_lock);
3842 }
3843 mutex_destroy(&wgs->wgs_recvwin->lock);
3844 kmem_free(wgs->wgs_recvwin, sizeof(*wgs->wgs_recvwin));
3845 #ifndef __HAVE_ATOMIC64_LOADSTORE
3846 mutex_destroy(&wgs->wgs_send_counter_lock);
3847 #endif
3848 kmem_free(wgs, sizeof(*wgs));
3849
3850 psref_target_destroy(&wgp->wgp_endpoint->wgsa_psref, wg_psref_class);
3851 psref_target_destroy(&wgp->wgp_endpoint0->wgsa_psref, wg_psref_class);
3852 kmem_free(wgp->wgp_endpoint, sizeof(*wgp->wgp_endpoint));
3853 kmem_free(wgp->wgp_endpoint0, sizeof(*wgp->wgp_endpoint0));
3854
3855 pserialize_destroy(wgp->wgp_psz);
3856 mutex_obj_free(wgp->wgp_intr_lock);
3857 mutex_obj_free(wgp->wgp_lock);
3858
3859 kmem_free(wgp, sizeof(*wgp));
3860 }
3861
3862 static void
3863 wg_destroy_all_peers(struct wg_softc *wg)
3864 {
3865 struct wg_peer *wgp, *wgp0 __diagused;
3866 void *garbage_byname, *garbage_bypubkey;
3867
3868 restart:
3869 garbage_byname = garbage_bypubkey = NULL;
3870 mutex_enter(wg->wg_lock);
3871 WG_PEER_WRITER_FOREACH(wgp, wg) {
3872 if (wgp->wgp_name[0]) {
3873 wgp0 = thmap_del(wg->wg_peers_byname, wgp->wgp_name,
3874 strlen(wgp->wgp_name));
3875 KASSERT(wgp0 == wgp);
3876 garbage_byname = thmap_stage_gc(wg->wg_peers_byname);
3877 }
3878 wgp0 = thmap_del(wg->wg_peers_bypubkey, wgp->wgp_pubkey,
3879 sizeof(wgp->wgp_pubkey));
3880 KASSERT(wgp0 == wgp);
3881 garbage_bypubkey = thmap_stage_gc(wg->wg_peers_bypubkey);
3882 WG_PEER_WRITER_REMOVE(wgp);
3883 wg->wg_npeers--;
3884 mutex_enter(wgp->wgp_lock);
3885 pserialize_perform(wgp->wgp_psz);
3886 mutex_exit(wgp->wgp_lock);
3887 PSLIST_ENTRY_DESTROY(wgp, wgp_peerlist_entry);
3888 break;
3889 }
3890 mutex_exit(wg->wg_lock);
3891
3892 if (wgp == NULL)
3893 return;
3894
3895 psref_target_destroy(&wgp->wgp_psref, wg_psref_class);
3896
3897 wg_destroy_peer(wgp);
3898 thmap_gc(wg->wg_peers_byname, garbage_byname);
3899 thmap_gc(wg->wg_peers_bypubkey, garbage_bypubkey);
3900
3901 goto restart;
3902 }
3903
3904 static int
3905 wg_destroy_peer_name(struct wg_softc *wg, const char *name)
3906 {
3907 struct wg_peer *wgp, *wgp0 __diagused;
3908 void *garbage_byname, *garbage_bypubkey;
3909
3910 mutex_enter(wg->wg_lock);
3911 wgp = thmap_del(wg->wg_peers_byname, name, strlen(name));
3912 if (wgp != NULL) {
3913 wgp0 = thmap_del(wg->wg_peers_bypubkey, wgp->wgp_pubkey,
3914 sizeof(wgp->wgp_pubkey));
3915 KASSERT(wgp0 == wgp);
3916 garbage_byname = thmap_stage_gc(wg->wg_peers_byname);
3917 garbage_bypubkey = thmap_stage_gc(wg->wg_peers_bypubkey);
3918 WG_PEER_WRITER_REMOVE(wgp);
3919 wg->wg_npeers--;
3920 if (wg->wg_npeers == 0)
3921 if_link_state_change(&wg->wg_if, LINK_STATE_DOWN);
3922 mutex_enter(wgp->wgp_lock);
3923 pserialize_perform(wgp->wgp_psz);
3924 mutex_exit(wgp->wgp_lock);
3925 PSLIST_ENTRY_DESTROY(wgp, wgp_peerlist_entry);
3926 }
3927 mutex_exit(wg->wg_lock);
3928
3929 if (wgp == NULL)
3930 return ENOENT;
3931
3932 psref_target_destroy(&wgp->wgp_psref, wg_psref_class);
3933
3934 wg_destroy_peer(wgp);
3935 thmap_gc(wg->wg_peers_byname, garbage_byname);
3936 thmap_gc(wg->wg_peers_bypubkey, garbage_bypubkey);
3937
3938 return 0;
3939 }
3940
3941 static int
3942 wg_if_attach(struct wg_softc *wg)
3943 {
3944
3945 wg->wg_if.if_addrlen = 0;
3946 wg->wg_if.if_mtu = WG_MTU;
3947 wg->wg_if.if_flags = IFF_MULTICAST;
3948 wg->wg_if.if_extflags = IFEF_MPSAFE;
3949 wg->wg_if.if_ioctl = wg_ioctl;
3950 wg->wg_if.if_output = wg_output;
3951 wg->wg_if.if_init = wg_init;
3952 #ifdef ALTQ
3953 wg->wg_if.if_start = wg_start;
3954 #endif
3955 wg->wg_if.if_stop = wg_stop;
3956 wg->wg_if.if_type = IFT_OTHER;
3957 wg->wg_if.if_dlt = DLT_NULL;
3958 wg->wg_if.if_softc = wg;
3959 #ifdef ALTQ
3960 IFQ_SET_READY(&wg->wg_if.if_snd);
3961 #endif
3962 if_initialize(&wg->wg_if);
3963
3964 wg->wg_if.if_link_state = LINK_STATE_DOWN;
3965 if_alloc_sadl(&wg->wg_if);
3966 if_register(&wg->wg_if);
3967
3968 bpf_attach(&wg->wg_if, DLT_NULL, sizeof(uint32_t));
3969
3970 return 0;
3971 }
3972
3973 static void
3974 wg_if_detach(struct wg_softc *wg)
3975 {
3976 struct ifnet *ifp = &wg->wg_if;
3977
3978 bpf_detach(ifp);
3979 if_detach(ifp);
3980 }
3981
3982 static int
3983 wg_clone_create(struct if_clone *ifc, int unit)
3984 {
3985 struct wg_softc *wg;
3986 int error;
3987
3988 wg_guarantee_initialized();
3989
3990 error = wg_count_inc();
3991 if (error)
3992 return error;
3993
3994 wg = kmem_zalloc(sizeof(*wg), KM_SLEEP);
3995
3996 if_initname(&wg->wg_if, ifc->ifc_name, unit);
3997
3998 PSLIST_INIT(&wg->wg_peers);
3999 wg->wg_peers_bypubkey = thmap_create(0, NULL, THMAP_NOCOPY);
4000 wg->wg_peers_byname = thmap_create(0, NULL, THMAP_NOCOPY);
4001 wg->wg_sessions_byindex = thmap_create(0, NULL, THMAP_NOCOPY);
4002 wg->wg_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
4003 wg->wg_intr_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
4004 wg->wg_rwlock = rw_obj_alloc();
4005 threadpool_job_init(&wg->wg_job, wg_job, wg->wg_intr_lock,
4006 "%s", if_name(&wg->wg_if));
4007 wg->wg_ops = &wg_ops_rumpkernel;
4008
4009 error = threadpool_get(&wg->wg_threadpool, PRI_NONE);
4010 if (error)
4011 goto fail0;
4012
4013 #ifdef INET
4014 error = wg_socreate(wg, AF_INET, &wg->wg_so4);
4015 if (error)
4016 goto fail1;
4017 rn_inithead((void **)&wg->wg_rtable_ipv4,
4018 offsetof(struct sockaddr_in, sin_addr) * NBBY);
4019 #endif
4020 #ifdef INET6
4021 error = wg_socreate(wg, AF_INET6, &wg->wg_so6);
4022 if (error)
4023 goto fail2;
4024 rn_inithead((void **)&wg->wg_rtable_ipv6,
4025 offsetof(struct sockaddr_in6, sin6_addr) * NBBY);
4026 #endif
4027
4028 error = wg_if_attach(wg);
4029 if (error)
4030 goto fail3;
4031
4032 return 0;
4033
4034 fail4: __unused
4035 wg_destroy_all_peers(wg);
4036 wg_if_detach(wg);
4037 fail3:
4038 #ifdef INET6
4039 solock(wg->wg_so6);
4040 wg->wg_so6->so_rcv.sb_flags &= ~SB_UPCALL;
4041 sounlock(wg->wg_so6);
4042 #endif
4043 #ifdef INET
4044 solock(wg->wg_so4);
4045 wg->wg_so4->so_rcv.sb_flags &= ~SB_UPCALL;
4046 sounlock(wg->wg_so4);
4047 #endif
4048 mutex_enter(wg->wg_intr_lock);
4049 threadpool_cancel_job(wg->wg_threadpool, &wg->wg_job);
4050 mutex_exit(wg->wg_intr_lock);
4051 #ifdef INET6
4052 if (wg->wg_rtable_ipv6 != NULL)
4053 free(wg->wg_rtable_ipv6, M_RTABLE);
4054 soclose(wg->wg_so6);
4055 fail2:
4056 #endif
4057 #ifdef INET
4058 if (wg->wg_rtable_ipv4 != NULL)
4059 free(wg->wg_rtable_ipv4, M_RTABLE);
4060 soclose(wg->wg_so4);
4061 fail1:
4062 #endif
4063 threadpool_put(wg->wg_threadpool, PRI_NONE);
4064 fail0: threadpool_job_destroy(&wg->wg_job);
4065 rw_obj_free(wg->wg_rwlock);
4066 mutex_obj_free(wg->wg_intr_lock);
4067 mutex_obj_free(wg->wg_lock);
4068 thmap_destroy(wg->wg_sessions_byindex);
4069 thmap_destroy(wg->wg_peers_byname);
4070 thmap_destroy(wg->wg_peers_bypubkey);
4071 PSLIST_DESTROY(&wg->wg_peers);
4072 kmem_free(wg, sizeof(*wg));
4073 wg_count_dec();
4074 return error;
4075 }
4076
4077 static int
4078 wg_clone_destroy(struct ifnet *ifp)
4079 {
4080 struct wg_softc *wg = container_of(ifp, struct wg_softc, wg_if);
4081
4082 #ifdef WG_RUMPKERNEL
4083 if (wg_user_mode(wg)) {
4084 rumpuser_wg_destroy(wg->wg_user);
4085 wg->wg_user = NULL;
4086 }
4087 #endif
4088
4089 wg_destroy_all_peers(wg);
4090 wg_if_detach(wg);
4091 #ifdef INET6
4092 solock(wg->wg_so6);
4093 wg->wg_so6->so_rcv.sb_flags &= ~SB_UPCALL;
4094 sounlock(wg->wg_so6);
4095 #endif
4096 #ifdef INET
4097 solock(wg->wg_so4);
4098 wg->wg_so4->so_rcv.sb_flags &= ~SB_UPCALL;
4099 sounlock(wg->wg_so4);
4100 #endif
4101 mutex_enter(wg->wg_intr_lock);
4102 threadpool_cancel_job(wg->wg_threadpool, &wg->wg_job);
4103 mutex_exit(wg->wg_intr_lock);
4104 #ifdef INET6
4105 if (wg->wg_rtable_ipv6 != NULL)
4106 free(wg->wg_rtable_ipv6, M_RTABLE);
4107 soclose(wg->wg_so6);
4108 #endif
4109 #ifdef INET
4110 if (wg->wg_rtable_ipv4 != NULL)
4111 free(wg->wg_rtable_ipv4, M_RTABLE);
4112 soclose(wg->wg_so4);
4113 #endif
4114 threadpool_put(wg->wg_threadpool, PRI_NONE);
4115 threadpool_job_destroy(&wg->wg_job);
4116 rw_obj_free(wg->wg_rwlock);
4117 mutex_obj_free(wg->wg_intr_lock);
4118 mutex_obj_free(wg->wg_lock);
4119 thmap_destroy(wg->wg_sessions_byindex);
4120 thmap_destroy(wg->wg_peers_byname);
4121 thmap_destroy(wg->wg_peers_bypubkey);
4122 PSLIST_DESTROY(&wg->wg_peers);
4123 kmem_free(wg, sizeof(*wg));
4124 wg_count_dec();
4125
4126 return 0;
4127 }
4128
4129 static struct wg_peer *
4130 wg_pick_peer_by_sa(struct wg_softc *wg, const struct sockaddr *sa,
4131 struct psref *psref)
4132 {
4133 struct radix_node_head *rnh;
4134 struct radix_node *rn;
4135 struct wg_peer *wgp = NULL;
4136 struct wg_allowedip *wga;
4137
4138 #ifdef WG_DEBUG_LOG
4139 char addrstr[128];
4140 sockaddr_format(sa, addrstr, sizeof(addrstr));
4141 WG_DLOG("sa=%s\n", addrstr);
4142 #endif
4143
4144 rw_enter(wg->wg_rwlock, RW_READER);
4145
4146 rnh = wg_rnh(wg, sa->sa_family);
4147 if (rnh == NULL)
4148 goto out;
4149
4150 rn = rnh->rnh_matchaddr(sa, rnh);
4151 if (rn == NULL || (rn->rn_flags & RNF_ROOT) != 0)
4152 goto out;
4153
4154 WG_TRACE("success");
4155
4156 wga = container_of(rn, struct wg_allowedip, wga_nodes[0]);
4157 wgp = wga->wga_peer;
4158 wg_get_peer(wgp, psref);
4159
4160 out:
4161 rw_exit(wg->wg_rwlock);
4162 return wgp;
4163 }
4164
4165 static void
4166 wg_fill_msg_data(struct wg_softc *wg, struct wg_peer *wgp,
4167 struct wg_session *wgs, struct wg_msg_data *wgmd)
4168 {
4169
4170 memset(wgmd, 0, sizeof(*wgmd));
4171 wgmd->wgmd_type = htole32(WG_MSG_TYPE_DATA);
4172 wgmd->wgmd_receiver = wgs->wgs_remote_index;
4173 /* [W] 5.4.6: msg.counter := Nm^send */
4174 /* [W] 5.4.6: Nm^send := Nm^send + 1 */
4175 wgmd->wgmd_counter = htole64(wg_session_inc_send_counter(wgs));
4176 WG_DLOG("counter=%"PRIu64"\n", le64toh(wgmd->wgmd_counter));
4177 }
4178
4179 static int
4180 wg_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst,
4181 const struct rtentry *rt)
4182 {
4183 struct wg_softc *wg = ifp->if_softc;
4184 struct wg_peer *wgp = NULL;
4185 struct wg_session *wgs = NULL;
4186 struct psref wgp_psref, wgs_psref;
4187 int bound;
4188 int error;
4189
4190 bound = curlwp_bind();
4191
4192 /* TODO make the nest limit configurable via sysctl */
4193 error = if_tunnel_check_nesting(ifp, m, 1);
4194 if (error) {
4195 WGLOG(LOG_ERR,
4196 "%s: tunneling loop detected and packet dropped\n",
4197 if_name(&wg->wg_if));
4198 goto out0;
4199 }
4200
4201 #ifdef ALTQ
4202 bool altq = atomic_load_relaxed(&ifp->if_snd.altq_flags)
4203 & ALTQF_ENABLED;
4204 if (altq)
4205 IFQ_CLASSIFY(&ifp->if_snd, m, dst->sa_family);
4206 #endif
4207
4208 bpf_mtap_af(ifp, dst->sa_family, m, BPF_D_OUT);
4209
4210 m->m_flags &= ~(M_BCAST|M_MCAST);
4211
4212 wgp = wg_pick_peer_by_sa(wg, dst, &wgp_psref);
4213 if (wgp == NULL) {
4214 WG_TRACE("peer not found");
4215 error = EHOSTUNREACH;
4216 goto out0;
4217 }
4218
4219 /* Clear checksum-offload flags. */
4220 m->m_pkthdr.csum_flags = 0;
4221 m->m_pkthdr.csum_data = 0;
4222
4223 /* Check whether there's an established session. */
4224 wgs = wg_get_stable_session(wgp, &wgs_psref);
4225 if (wgs == NULL) {
4226 /*
4227 * No established session. If we're the first to try
4228 * sending data, schedule a handshake and queue the
4229 * packet for when the handshake is done; otherwise
4230 * just drop the packet and let the ongoing handshake
4231 * attempt continue. We could queue more data packets
4232 * but it's not clear that's worthwhile.
4233 */
4234 if ((m = atomic_swap_ptr(&wgp->wgp_pending, m)) == NULL) {
4235 WG_TRACE("queued first packet; init handshake");
4236 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE);
4237 } else {
4238 WG_TRACE("first packet already queued, dropping");
4239 }
4240 goto out1;
4241 }
4242
4243 /* There's an established session. Toss it in the queue. */
4244 #ifdef ALTQ
4245 if (altq) {
4246 mutex_enter(ifp->if_snd.ifq_lock);
4247 if (ALTQ_IS_ENABLED(&ifp->if_snd)) {
4248 M_SETCTX(m, wgp);
4249 ALTQ_ENQUEUE(&ifp->if_snd, m, error);
4250 m = NULL; /* consume */
4251 }
4252 mutex_exit(ifp->if_snd.ifq_lock);
4253 if (m == NULL) {
4254 wg_start(ifp);
4255 goto out2;
4256 }
4257 }
4258 #endif
4259 kpreempt_disable();
4260 const uint32_t h = curcpu()->ci_index; // pktq_rps_hash(m)
4261 M_SETCTX(m, wgp);
4262 if (__predict_false(!pktq_enqueue(wg_pktq, m, h))) {
4263 WGLOG(LOG_ERR, "%s: pktq full, dropping\n",
4264 if_name(&wg->wg_if));
4265 error = ENOBUFS;
4266 goto out3;
4267 }
4268 m = NULL; /* consumed */
4269 error = 0;
4270 out3: kpreempt_enable();
4271
4272 #ifdef ALTQ
4273 out2:
4274 #endif
4275 wg_put_session(wgs, &wgs_psref);
4276 out1: wg_put_peer(wgp, &wgp_psref);
4277 out0: m_freem(m);
4278 curlwp_bindx(bound);
4279 return error;
4280 }
4281
4282 static int
4283 wg_send_udp(struct wg_peer *wgp, struct mbuf *m)
4284 {
4285 struct psref psref;
4286 struct wg_sockaddr *wgsa;
4287 int error;
4288 struct socket *so;
4289
4290 wgsa = wg_get_endpoint_sa(wgp, &psref);
4291 so = wg_get_so_by_peer(wgp, wgsa);
4292 solock(so);
4293 switch (wgsatosa(wgsa)->sa_family) {
4294 #ifdef INET
4295 case AF_INET:
4296 error = udp_send(so, m, wgsatosa(wgsa), NULL, curlwp);
4297 break;
4298 #endif
4299 #ifdef INET6
4300 case AF_INET6:
4301 error = udp6_output(sotoinpcb(so), m, wgsatosin6(wgsa),
4302 NULL, curlwp);
4303 break;
4304 #endif
4305 default:
4306 m_freem(m);
4307 error = EPFNOSUPPORT;
4308 }
4309 sounlock(so);
4310 wg_put_sa(wgp, wgsa, &psref);
4311
4312 return error;
4313 }
4314
4315 /* Inspired by pppoe_get_mbuf */
4316 static struct mbuf *
4317 wg_get_mbuf(size_t leading_len, size_t len)
4318 {
4319 struct mbuf *m;
4320
4321 KASSERT(leading_len <= MCLBYTES);
4322 KASSERT(len <= MCLBYTES - leading_len);
4323
4324 m = m_gethdr(M_DONTWAIT, MT_DATA);
4325 if (m == NULL)
4326 return NULL;
4327 if (len + leading_len > MHLEN) {
4328 m_clget(m, M_DONTWAIT);
4329 if ((m->m_flags & M_EXT) == 0) {
4330 m_free(m);
4331 return NULL;
4332 }
4333 }
4334 m->m_data += leading_len;
4335 m->m_pkthdr.len = m->m_len = len;
4336
4337 return m;
4338 }
4339
4340 static void
4341 wg_send_data_msg(struct wg_peer *wgp, struct wg_session *wgs, struct mbuf *m)
4342 {
4343 struct wg_softc *wg = wgp->wgp_sc;
4344 int error;
4345 size_t inner_len, padded_len, encrypted_len;
4346 char *padded_buf = NULL;
4347 size_t mlen;
4348 struct wg_msg_data *wgmd;
4349 bool free_padded_buf = false;
4350 struct mbuf *n;
4351 size_t leading_len = max_hdr + sizeof(struct udphdr);
4352
4353 mlen = m_length(m);
4354 inner_len = mlen;
4355 padded_len = roundup(mlen, 16);
4356 encrypted_len = padded_len + WG_AUTHTAG_LEN;
4357 WG_DLOG("inner=%zu, padded=%zu, encrypted_len=%zu\n",
4358 inner_len, padded_len, encrypted_len);
4359 if (mlen != 0) {
4360 bool success;
4361 success = m_ensure_contig(&m, padded_len);
4362 if (success) {
4363 padded_buf = mtod(m, char *);
4364 } else {
4365 padded_buf = kmem_intr_alloc(padded_len, KM_NOSLEEP);
4366 if (padded_buf == NULL) {
4367 error = ENOBUFS;
4368 goto out;
4369 }
4370 free_padded_buf = true;
4371 m_copydata(m, 0, mlen, padded_buf);
4372 }
4373 memset(padded_buf + mlen, 0, padded_len - inner_len);
4374 }
4375
4376 n = wg_get_mbuf(leading_len, sizeof(*wgmd) + encrypted_len);
4377 if (n == NULL) {
4378 error = ENOBUFS;
4379 goto out;
4380 }
4381 KASSERT(n->m_len >= sizeof(*wgmd));
4382 wgmd = mtod(n, struct wg_msg_data *);
4383 wg_fill_msg_data(wg, wgp, wgs, wgmd);
4384
4385 /* [W] 5.4.6: AEAD(Tm^send, Nm^send, P, e) */
4386 wg_algo_aead_enc((char *)wgmd + sizeof(*wgmd), encrypted_len,
4387 wgs->wgs_tkey_send, le64toh(wgmd->wgmd_counter),
4388 padded_buf, padded_len,
4389 NULL, 0);
4390
4391 error = wg->wg_ops->send_data_msg(wgp, n); /* consumes n */
4392 if (error) {
4393 WG_DLOG("send_data_msg failed, error=%d\n", error);
4394 goto out;
4395 }
4396
4397 /*
4398 * Packet was sent out -- count it in the interface statistics.
4399 */
4400 if_statadd(&wg->wg_if, if_obytes, mlen);
4401 if_statinc(&wg->wg_if, if_opackets);
4402
4403 /*
4404 * Record when we last sent data, for determining when we need
4405 * to send a passive keepalive.
4406 *
4407 * Other logic assumes that wgs_time_last_data_sent is zero iff
4408 * we have never sent data on this session. Early at boot, if
4409 * wg(4) starts operating within <1sec, or after 136 years of
4410 * uptime, we may observe time_uptime32 = 0. In that case,
4411 * pretend we observed 1 instead. That way, we correctly
4412 * indicate we have sent data on this session; the only logic
4413 * this might adversely affect is the keepalive timeout
4414 * detection, which might spuriously send a keepalive during
4415 * one second every 136 years. All of this is very silly, of
4416 * course, but the cost to guaranteeing wgs_time_last_data_sent
4417 * is nonzero is negligible here.
4418 */
4419 const uint32_t now = time_uptime32;
4420 atomic_store_relaxed(&wgs->wgs_time_last_data_sent, MAX(now, 1));
4421
4422 /*
4423 * Check rekey-after-time.
4424 */
4425 if (wgs->wgs_is_initiator &&
4426 now - wgs->wgs_time_established >= wg_rekey_after_time) {
4427 /*
4428 * [W] 6.2 Transport Message Limits
4429 * "if a peer is the initiator of a current secure
4430 * session, WireGuard will send a handshake initiation
4431 * message to begin a new secure session if, after
4432 * transmitting a transport data message, the current
4433 * secure session is REKEY-AFTER-TIME seconds old,"
4434 */
4435 WG_TRACE("rekey after time");
4436 atomic_store_relaxed(&wgs->wgs_force_rekey, true);
4437 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE);
4438 }
4439
4440 /*
4441 * Check rekey-after-messages.
4442 */
4443 if (wg_session_get_send_counter(wgs) >= wg_rekey_after_messages) {
4444 /*
4445 * [W] 6.2 Transport Message Limits
4446 * "WireGuard will try to create a new session, by
4447 * sending a handshake initiation message (section
4448 * 5.4.2), after it has sent REKEY-AFTER-MESSAGES
4449 * transport data messages..."
4450 */
4451 WG_TRACE("rekey after messages");
4452 atomic_store_relaxed(&wgs->wgs_force_rekey, true);
4453 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE);
4454 }
4455
4456 out: m_freem(m);
4457 if (free_padded_buf)
4458 kmem_intr_free(padded_buf, padded_len);
4459 }
4460
4461 static void
4462 wg_input(struct ifnet *ifp, struct mbuf *m, const int af)
4463 {
4464 pktqueue_t *pktq;
4465 size_t pktlen;
4466
4467 KASSERT(af == AF_INET || af == AF_INET6);
4468
4469 WG_TRACE("");
4470
4471 m_set_rcvif(m, ifp);
4472 pktlen = m->m_pkthdr.len;
4473
4474 bpf_mtap_af(ifp, af, m, BPF_D_IN);
4475
4476 switch (af) {
4477 #ifdef INET
4478 case AF_INET:
4479 pktq = ip_pktq;
4480 break;
4481 #endif
4482 #ifdef INET6
4483 case AF_INET6:
4484 pktq = ip6_pktq;
4485 break;
4486 #endif
4487 default:
4488 panic("invalid af=%d", af);
4489 }
4490
4491 kpreempt_disable();
4492 const u_int h = curcpu()->ci_index;
4493 if (__predict_true(pktq_enqueue(pktq, m, h))) {
4494 if_statadd(ifp, if_ibytes, pktlen);
4495 if_statinc(ifp, if_ipackets);
4496 } else {
4497 m_freem(m);
4498 }
4499 kpreempt_enable();
4500 }
4501
4502 static void
4503 wg_calc_pubkey(uint8_t pubkey[static WG_STATIC_KEY_LEN],
4504 const uint8_t privkey[static WG_STATIC_KEY_LEN])
4505 {
4506
4507 crypto_scalarmult_base(pubkey, privkey);
4508 }
4509
4510 static int
4511 wg_rtable_add_route(struct wg_softc *wg, struct wg_allowedip *wga)
4512 {
4513 struct radix_node_head *rnh;
4514 struct radix_node *rn;
4515 int error = 0;
4516
4517 rw_enter(wg->wg_rwlock, RW_WRITER);
4518 rnh = wg_rnh(wg, wga->wga_family);
4519 KASSERT(rnh != NULL);
4520 rn = rnh->rnh_addaddr(&wga->wga_sa_addr, &wga->wga_sa_mask, rnh,
4521 wga->wga_nodes);
4522 rw_exit(wg->wg_rwlock);
4523
4524 if (rn == NULL)
4525 error = EEXIST;
4526
4527 return error;
4528 }
4529
4530 static int
4531 wg_handle_prop_peer(struct wg_softc *wg, prop_dictionary_t peer,
4532 struct wg_peer **wgpp)
4533 {
4534 int error = 0;
4535 const void *pubkey;
4536 size_t pubkey_len;
4537 const void *psk;
4538 size_t psk_len;
4539 const char *name = NULL;
4540
4541 if (prop_dictionary_get_string(peer, "name", &name)) {
4542 if (strlen(name) > WG_PEER_NAME_MAXLEN) {
4543 error = EINVAL;
4544 goto out;
4545 }
4546 }
4547
4548 if (!prop_dictionary_get_data(peer, "public_key",
4549 &pubkey, &pubkey_len)) {
4550 error = EINVAL;
4551 goto out;
4552 }
4553 #ifdef WG_DEBUG_DUMP
4554 if (wg_debug & WG_DEBUG_FLAGS_DUMP) {
4555 char *hex = gethexdump(pubkey, pubkey_len);
4556 log(LOG_DEBUG, "pubkey=%p, pubkey_len=%zu\n%s\n",
4557 pubkey, pubkey_len, hex);
4558 puthexdump(hex, pubkey, pubkey_len);
4559 }
4560 #endif
4561
4562 struct wg_peer *wgp = wg_alloc_peer(wg);
4563 memcpy(wgp->wgp_pubkey, pubkey, sizeof(wgp->wgp_pubkey));
4564 if (name != NULL)
4565 strncpy(wgp->wgp_name, name, sizeof(wgp->wgp_name));
4566
4567 if (prop_dictionary_get_data(peer, "preshared_key", &psk, &psk_len)) {
4568 if (psk_len != sizeof(wgp->wgp_psk)) {
4569 error = EINVAL;
4570 goto out;
4571 }
4572 memcpy(wgp->wgp_psk, psk, sizeof(wgp->wgp_psk));
4573 }
4574
4575 const void *addr;
4576 size_t addr_len;
4577 struct wg_sockaddr *wgsa = wgp->wgp_endpoint;
4578
4579 if (!prop_dictionary_get_data(peer, "endpoint", &addr, &addr_len))
4580 goto skip_endpoint;
4581 if (addr_len < sizeof(*wgsatosa(wgsa)) ||
4582 addr_len > sizeof(*wgsatoss(wgsa))) {
4583 error = EINVAL;
4584 goto out;
4585 }
4586 memcpy(wgsatoss(wgsa), addr, addr_len);
4587 switch (wgsa_family(wgsa)) {
4588 #ifdef INET
4589 case AF_INET:
4590 break;
4591 #endif
4592 #ifdef INET6
4593 case AF_INET6:
4594 break;
4595 #endif
4596 default:
4597 error = EPFNOSUPPORT;
4598 goto out;
4599 }
4600 if (addr_len != sockaddr_getsize_by_family(wgsa_family(wgsa))) {
4601 error = EINVAL;
4602 goto out;
4603 }
4604 {
4605 char addrstr[128];
4606 sockaddr_format(wgsatosa(wgsa), addrstr, sizeof(addrstr));
4607 WG_DLOG("addr=%s\n", addrstr);
4608 }
4609 wgp->wgp_endpoint_available = true;
4610
4611 prop_array_t allowedips;
4612 skip_endpoint:
4613 allowedips = prop_dictionary_get(peer, "allowedips");
4614 if (allowedips == NULL)
4615 goto skip;
4616
4617 prop_object_iterator_t _it = prop_array_iterator(allowedips);
4618 prop_dictionary_t prop_allowedip;
4619 int j = 0;
4620 while ((prop_allowedip = prop_object_iterator_next(_it)) != NULL) {
4621 struct wg_allowedip *wga = &wgp->wgp_allowedips[j];
4622
4623 if (!prop_dictionary_get_int(prop_allowedip, "family",
4624 &wga->wga_family))
4625 continue;
4626 if (!prop_dictionary_get_data(prop_allowedip, "ip",
4627 &addr, &addr_len))
4628 continue;
4629 if (!prop_dictionary_get_uint8(prop_allowedip, "cidr",
4630 &wga->wga_cidr))
4631 continue;
4632
4633 switch (wga->wga_family) {
4634 #ifdef INET
4635 case AF_INET: {
4636 struct sockaddr_in sin;
4637 char addrstr[128];
4638 struct in_addr mask;
4639 struct sockaddr_in sin_mask;
4640
4641 if (addr_len != sizeof(struct in_addr))
4642 return EINVAL;
4643 memcpy(&wga->wga_addr4, addr, addr_len);
4644
4645 sockaddr_in_init(&sin, (const struct in_addr *)addr,
4646 0);
4647 sockaddr_copy(&wga->wga_sa_addr,
4648 sizeof(sin), sintosa(&sin));
4649
4650 sockaddr_format(sintosa(&sin),
4651 addrstr, sizeof(addrstr));
4652 WG_DLOG("addr=%s/%d\n", addrstr, wga->wga_cidr);
4653
4654 in_len2mask(&mask, wga->wga_cidr);
4655 sockaddr_in_init(&sin_mask, &mask, 0);
4656 sockaddr_copy(&wga->wga_sa_mask,
4657 sizeof(sin_mask), sintosa(&sin_mask));
4658
4659 break;
4660 }
4661 #endif
4662 #ifdef INET6
4663 case AF_INET6: {
4664 struct sockaddr_in6 sin6;
4665 char addrstr[128];
4666 struct in6_addr mask;
4667 struct sockaddr_in6 sin6_mask;
4668
4669 if (addr_len != sizeof(struct in6_addr))
4670 return EINVAL;
4671 memcpy(&wga->wga_addr6, addr, addr_len);
4672
4673 sockaddr_in6_init(&sin6, (const struct in6_addr *)addr,
4674 0, 0, 0);
4675 sockaddr_copy(&wga->wga_sa_addr,
4676 sizeof(sin6), sin6tosa(&sin6));
4677
4678 sockaddr_format(sin6tosa(&sin6),
4679 addrstr, sizeof(addrstr));
4680 WG_DLOG("addr=%s/%d\n", addrstr, wga->wga_cidr);
4681
4682 in6_prefixlen2mask(&mask, wga->wga_cidr);
4683 sockaddr_in6_init(&sin6_mask, &mask, 0, 0, 0);
4684 sockaddr_copy(&wga->wga_sa_mask,
4685 sizeof(sin6_mask), sin6tosa(&sin6_mask));
4686
4687 break;
4688 }
4689 #endif
4690 default:
4691 error = EINVAL;
4692 goto out;
4693 }
4694 wga->wga_peer = wgp;
4695
4696 error = wg_rtable_add_route(wg, wga);
4697 if (error != 0)
4698 goto out;
4699
4700 j++;
4701 }
4702 wgp->wgp_n_allowedips = j;
4703 skip:
4704 *wgpp = wgp;
4705 out:
4706 return error;
4707 }
4708
4709 static int
4710 wg_alloc_prop_buf(char **_buf, struct ifdrv *ifd)
4711 {
4712 int error;
4713 char *buf;
4714
4715 WG_DLOG("buf=%p, len=%zu\n", ifd->ifd_data, ifd->ifd_len);
4716 if (ifd->ifd_len >= WG_MAX_PROPLEN)
4717 return E2BIG;
4718 buf = kmem_alloc(ifd->ifd_len + 1, KM_SLEEP);
4719 error = copyin(ifd->ifd_data, buf, ifd->ifd_len);
4720 if (error != 0)
4721 return error;
4722 buf[ifd->ifd_len] = '\0';
4723 #ifdef WG_DEBUG_DUMP
4724 if (wg_debug & WG_DEBUG_FLAGS_DUMP) {
4725 log(LOG_DEBUG, "%.*s\n", (int)MIN(INT_MAX, ifd->ifd_len),
4726 (const char *)buf);
4727 }
4728 #endif
4729 *_buf = buf;
4730 return 0;
4731 }
4732
4733 static int
4734 wg_ioctl_set_private_key(struct wg_softc *wg, struct ifdrv *ifd)
4735 {
4736 int error;
4737 prop_dictionary_t prop_dict;
4738 char *buf = NULL;
4739 const void *privkey;
4740 size_t privkey_len;
4741
4742 error = wg_alloc_prop_buf(&buf, ifd);
4743 if (error != 0)
4744 return error;
4745 error = EINVAL;
4746 prop_dict = prop_dictionary_internalize(buf);
4747 if (prop_dict == NULL)
4748 goto out;
4749 if (!prop_dictionary_get_data(prop_dict, "private_key",
4750 &privkey, &privkey_len))
4751 goto out;
4752 #ifdef WG_DEBUG_DUMP
4753 if (wg_debug & WG_DEBUG_FLAGS_DUMP) {
4754 char *hex = gethexdump(privkey, privkey_len);
4755 log(LOG_DEBUG, "privkey=%p, privkey_len=%zu\n%s\n",
4756 privkey, privkey_len, hex);
4757 puthexdump(hex, privkey, privkey_len);
4758 }
4759 #endif
4760 if (privkey_len != WG_STATIC_KEY_LEN)
4761 goto out;
4762 memcpy(wg->wg_privkey, privkey, WG_STATIC_KEY_LEN);
4763 wg_calc_pubkey(wg->wg_pubkey, wg->wg_privkey);
4764 error = 0;
4765
4766 out:
4767 kmem_free(buf, ifd->ifd_len + 1);
4768 return error;
4769 }
4770
4771 static int
4772 wg_ioctl_set_listen_port(struct wg_softc *wg, struct ifdrv *ifd)
4773 {
4774 int error;
4775 prop_dictionary_t prop_dict;
4776 char *buf = NULL;
4777 uint16_t port;
4778
4779 error = wg_alloc_prop_buf(&buf, ifd);
4780 if (error != 0)
4781 return error;
4782 error = EINVAL;
4783 prop_dict = prop_dictionary_internalize(buf);
4784 if (prop_dict == NULL)
4785 goto out;
4786 if (!prop_dictionary_get_uint16(prop_dict, "listen_port", &port))
4787 goto out;
4788
4789 error = wg->wg_ops->bind_port(wg, (uint16_t)port);
4790
4791 out:
4792 kmem_free(buf, ifd->ifd_len + 1);
4793 return error;
4794 }
4795
4796 static int
4797 wg_ioctl_add_peer(struct wg_softc *wg, struct ifdrv *ifd)
4798 {
4799 int error;
4800 prop_dictionary_t prop_dict;
4801 char *buf = NULL;
4802 struct wg_peer *wgp = NULL, *wgp0 __diagused;
4803
4804 error = wg_alloc_prop_buf(&buf, ifd);
4805 if (error != 0)
4806 return error;
4807 error = EINVAL;
4808 prop_dict = prop_dictionary_internalize(buf);
4809 if (prop_dict == NULL)
4810 goto out;
4811
4812 error = wg_handle_prop_peer(wg, prop_dict, &wgp);
4813 if (error != 0)
4814 goto out;
4815
4816 mutex_enter(wg->wg_lock);
4817 if (thmap_get(wg->wg_peers_bypubkey, wgp->wgp_pubkey,
4818 sizeof(wgp->wgp_pubkey)) != NULL ||
4819 (wgp->wgp_name[0] &&
4820 thmap_get(wg->wg_peers_byname, wgp->wgp_name,
4821 strlen(wgp->wgp_name)) != NULL)) {
4822 mutex_exit(wg->wg_lock);
4823 wg_destroy_peer(wgp);
4824 error = EEXIST;
4825 goto out;
4826 }
4827 wgp0 = thmap_put(wg->wg_peers_bypubkey, wgp->wgp_pubkey,
4828 sizeof(wgp->wgp_pubkey), wgp);
4829 KASSERT(wgp0 == wgp);
4830 if (wgp->wgp_name[0]) {
4831 wgp0 = thmap_put(wg->wg_peers_byname, wgp->wgp_name,
4832 strlen(wgp->wgp_name), wgp);
4833 KASSERT(wgp0 == wgp);
4834 }
4835 WG_PEER_WRITER_INSERT_HEAD(wgp, wg);
4836 wg->wg_npeers++;
4837 mutex_exit(wg->wg_lock);
4838
4839 if_link_state_change(&wg->wg_if, LINK_STATE_UP);
4840
4841 out:
4842 kmem_free(buf, ifd->ifd_len + 1);
4843 return error;
4844 }
4845
4846 static int
4847 wg_ioctl_delete_peer(struct wg_softc *wg, struct ifdrv *ifd)
4848 {
4849 int error;
4850 prop_dictionary_t prop_dict;
4851 char *buf = NULL;
4852 const char *name;
4853
4854 error = wg_alloc_prop_buf(&buf, ifd);
4855 if (error != 0)
4856 return error;
4857 error = EINVAL;
4858 prop_dict = prop_dictionary_internalize(buf);
4859 if (prop_dict == NULL)
4860 goto out;
4861
4862 if (!prop_dictionary_get_string(prop_dict, "name", &name))
4863 goto out;
4864 if (strlen(name) > WG_PEER_NAME_MAXLEN)
4865 goto out;
4866
4867 error = wg_destroy_peer_name(wg, name);
4868 out:
4869 kmem_free(buf, ifd->ifd_len + 1);
4870 return error;
4871 }
4872
4873 static bool
4874 wg_is_authorized(struct wg_softc *wg, u_long cmd)
4875 {
4876 int au = cmd == SIOCGDRVSPEC ?
4877 KAUTH_REQ_NETWORK_INTERFACE_WG_GETPRIV :
4878 KAUTH_REQ_NETWORK_INTERFACE_WG_SETPRIV;
4879 return kauth_authorize_network(kauth_cred_get(),
4880 KAUTH_NETWORK_INTERFACE_WG, au, &wg->wg_if,
4881 (void *)cmd, NULL) == 0;
4882 }
4883
4884 static int
4885 wg_ioctl_get(struct wg_softc *wg, struct ifdrv *ifd)
4886 {
4887 int error = ENOMEM;
4888 prop_dictionary_t prop_dict;
4889 prop_array_t peers = NULL;
4890 char *buf;
4891 struct wg_peer *wgp;
4892 int s, i;
4893
4894 prop_dict = prop_dictionary_create();
4895 if (prop_dict == NULL)
4896 goto error;
4897
4898 if (wg_is_authorized(wg, SIOCGDRVSPEC)) {
4899 if (!prop_dictionary_set_data(prop_dict, "private_key",
4900 wg->wg_privkey, WG_STATIC_KEY_LEN))
4901 goto error;
4902 }
4903
4904 if (wg->wg_listen_port != 0) {
4905 if (!prop_dictionary_set_uint16(prop_dict, "listen_port",
4906 wg->wg_listen_port))
4907 goto error;
4908 }
4909
4910 if (wg->wg_npeers == 0)
4911 goto skip_peers;
4912
4913 peers = prop_array_create();
4914 if (peers == NULL)
4915 goto error;
4916
4917 s = pserialize_read_enter();
4918 i = 0;
4919 WG_PEER_READER_FOREACH(wgp, wg) {
4920 struct wg_sockaddr *wgsa;
4921 struct psref wgp_psref, wgsa_psref;
4922 prop_dictionary_t prop_peer;
4923
4924 wg_get_peer(wgp, &wgp_psref);
4925 pserialize_read_exit(s);
4926
4927 prop_peer = prop_dictionary_create();
4928 if (prop_peer == NULL)
4929 goto next;
4930
4931 if (strlen(wgp->wgp_name) > 0) {
4932 if (!prop_dictionary_set_string(prop_peer, "name",
4933 wgp->wgp_name))
4934 goto next;
4935 }
4936
4937 if (!prop_dictionary_set_data(prop_peer, "public_key",
4938 wgp->wgp_pubkey, sizeof(wgp->wgp_pubkey)))
4939 goto next;
4940
4941 uint8_t psk_zero[WG_PRESHARED_KEY_LEN] = {0};
4942 if (!consttime_memequal(wgp->wgp_psk, psk_zero,
4943 sizeof(wgp->wgp_psk))) {
4944 if (wg_is_authorized(wg, SIOCGDRVSPEC)) {
4945 if (!prop_dictionary_set_data(prop_peer,
4946 "preshared_key",
4947 wgp->wgp_psk, sizeof(wgp->wgp_psk)))
4948 goto next;
4949 }
4950 }
4951
4952 wgsa = wg_get_endpoint_sa(wgp, &wgsa_psref);
4953 CTASSERT(AF_UNSPEC == 0);
4954 if (wgsa_family(wgsa) != 0 /*AF_UNSPEC*/ &&
4955 !prop_dictionary_set_data(prop_peer, "endpoint",
4956 wgsatoss(wgsa),
4957 sockaddr_getsize_by_family(wgsa_family(wgsa)))) {
4958 wg_put_sa(wgp, wgsa, &wgsa_psref);
4959 goto next;
4960 }
4961 wg_put_sa(wgp, wgsa, &wgsa_psref);
4962
4963 const struct timespec *t = &wgp->wgp_last_handshake_time;
4964
4965 if (!prop_dictionary_set_uint64(prop_peer,
4966 "last_handshake_time_sec", (uint64_t)t->tv_sec))
4967 goto next;
4968 if (!prop_dictionary_set_uint32(prop_peer,
4969 "last_handshake_time_nsec", (uint32_t)t->tv_nsec))
4970 goto next;
4971
4972 if (wgp->wgp_n_allowedips == 0)
4973 goto skip_allowedips;
4974
4975 prop_array_t allowedips = prop_array_create();
4976 if (allowedips == NULL)
4977 goto next;
4978 for (int j = 0; j < wgp->wgp_n_allowedips; j++) {
4979 struct wg_allowedip *wga = &wgp->wgp_allowedips[j];
4980 prop_dictionary_t prop_allowedip;
4981
4982 prop_allowedip = prop_dictionary_create();
4983 if (prop_allowedip == NULL)
4984 break;
4985
4986 if (!prop_dictionary_set_int(prop_allowedip, "family",
4987 wga->wga_family))
4988 goto _next;
4989 if (!prop_dictionary_set_uint8(prop_allowedip, "cidr",
4990 wga->wga_cidr))
4991 goto _next;
4992
4993 switch (wga->wga_family) {
4994 #ifdef INET
4995 case AF_INET:
4996 if (!prop_dictionary_set_data(prop_allowedip,
4997 "ip", &wga->wga_addr4,
4998 sizeof(wga->wga_addr4)))
4999 goto _next;
5000 break;
5001 #endif
5002 #ifdef INET6
5003 case AF_INET6:
5004 if (!prop_dictionary_set_data(prop_allowedip,
5005 "ip", &wga->wga_addr6,
5006 sizeof(wga->wga_addr6)))
5007 goto _next;
5008 break;
5009 #endif
5010 default:
5011 panic("invalid af=%d", wga->wga_family);
5012 }
5013 prop_array_set(allowedips, j, prop_allowedip);
5014 _next:
5015 prop_object_release(prop_allowedip);
5016 }
5017 prop_dictionary_set(prop_peer, "allowedips", allowedips);
5018 prop_object_release(allowedips);
5019
5020 skip_allowedips:
5021
5022 prop_array_set(peers, i, prop_peer);
5023 next:
5024 if (prop_peer)
5025 prop_object_release(prop_peer);
5026 i++;
5027
5028 s = pserialize_read_enter();
5029 wg_put_peer(wgp, &wgp_psref);
5030 }
5031 pserialize_read_exit(s);
5032
5033 prop_dictionary_set(prop_dict, "peers", peers);
5034 prop_object_release(peers);
5035 peers = NULL;
5036
5037 skip_peers:
5038 buf = prop_dictionary_externalize(prop_dict);
5039 if (buf == NULL)
5040 goto error;
5041 if (ifd->ifd_len < (strlen(buf) + 1)) {
5042 error = EINVAL;
5043 goto error;
5044 }
5045 error = copyout(buf, ifd->ifd_data, strlen(buf) + 1);
5046
5047 free(buf, 0);
5048 error:
5049 if (peers != NULL)
5050 prop_object_release(peers);
5051 if (prop_dict != NULL)
5052 prop_object_release(prop_dict);
5053
5054 return error;
5055 }
5056
5057 static int
5058 wg_ioctl(struct ifnet *ifp, u_long cmd, void *data)
5059 {
5060 struct wg_softc *wg = ifp->if_softc;
5061 struct ifreq *ifr = data;
5062 struct ifaddr *ifa = data;
5063 struct ifdrv *ifd = data;
5064 int error = 0;
5065
5066 switch (cmd) {
5067 case SIOCINITIFADDR:
5068 if (ifa->ifa_addr->sa_family != AF_LINK &&
5069 (ifp->if_flags & (IFF_UP | IFF_RUNNING)) !=
5070 (IFF_UP | IFF_RUNNING)) {
5071 ifp->if_flags |= IFF_UP;
5072 error = if_init(ifp);
5073 }
5074 return error;
5075 case SIOCADDMULTI:
5076 case SIOCDELMULTI:
5077 switch (ifr->ifr_addr.sa_family) {
5078 #ifdef INET
5079 case AF_INET: /* IP supports Multicast */
5080 break;
5081 #endif
5082 #ifdef INET6
5083 case AF_INET6: /* IP6 supports Multicast */
5084 break;
5085 #endif
5086 default: /* Other protocols doesn't support Multicast */
5087 error = EAFNOSUPPORT;
5088 break;
5089 }
5090 return error;
5091 case SIOCSDRVSPEC:
5092 if (!wg_is_authorized(wg, cmd)) {
5093 return EPERM;
5094 }
5095 switch (ifd->ifd_cmd) {
5096 case WG_IOCTL_SET_PRIVATE_KEY:
5097 error = wg_ioctl_set_private_key(wg, ifd);
5098 break;
5099 case WG_IOCTL_SET_LISTEN_PORT:
5100 error = wg_ioctl_set_listen_port(wg, ifd);
5101 break;
5102 case WG_IOCTL_ADD_PEER:
5103 error = wg_ioctl_add_peer(wg, ifd);
5104 break;
5105 case WG_IOCTL_DELETE_PEER:
5106 error = wg_ioctl_delete_peer(wg, ifd);
5107 break;
5108 default:
5109 error = EINVAL;
5110 break;
5111 }
5112 return error;
5113 case SIOCGDRVSPEC:
5114 return wg_ioctl_get(wg, ifd);
5115 case SIOCSIFFLAGS:
5116 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
5117 break;
5118 switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) {
5119 case IFF_RUNNING:
5120 /*
5121 * If interface is marked down and it is running,
5122 * then stop and disable it.
5123 */
5124 if_stop(ifp, 1);
5125 break;
5126 case IFF_UP:
5127 /*
5128 * If interface is marked up and it is stopped, then
5129 * start it.
5130 */
5131 error = if_init(ifp);
5132 break;
5133 default:
5134 break;
5135 }
5136 return error;
5137 #ifdef WG_RUMPKERNEL
5138 case SIOCSLINKSTR:
5139 error = wg_ioctl_linkstr(wg, ifd);
5140 if (error)
5141 return error;
5142 wg->wg_ops = &wg_ops_rumpuser;
5143 return 0;
5144 #endif
5145 default:
5146 break;
5147 }
5148
5149 error = ifioctl_common(ifp, cmd, data);
5150
5151 #ifdef WG_RUMPKERNEL
5152 if (!wg_user_mode(wg))
5153 return error;
5154
5155 /* Do the same to the corresponding tun device on the host */
5156 /*
5157 * XXX Actually the command has not been handled yet. It
5158 * will be handled via pr_ioctl form doifioctl later.
5159 */
5160 switch (cmd) {
5161 #ifdef INET
5162 case SIOCAIFADDR:
5163 case SIOCDIFADDR: {
5164 struct in_aliasreq _ifra = *(const struct in_aliasreq *)data;
5165 struct in_aliasreq *ifra = &_ifra;
5166 KASSERT(error == ENOTTY);
5167 strncpy(ifra->ifra_name, rumpuser_wg_get_tunname(wg->wg_user),
5168 IFNAMSIZ);
5169 error = rumpuser_wg_ioctl(wg->wg_user, cmd, ifra, AF_INET);
5170 if (error == 0)
5171 error = ENOTTY;
5172 break;
5173 }
5174 #endif
5175 #ifdef INET6
5176 case SIOCAIFADDR_IN6:
5177 case SIOCDIFADDR_IN6: {
5178 struct in6_aliasreq _ifra = *(const struct in6_aliasreq *)data;
5179 struct in6_aliasreq *ifra = &_ifra;
5180 KASSERT(error == ENOTTY);
5181 strncpy(ifra->ifra_name, rumpuser_wg_get_tunname(wg->wg_user),
5182 IFNAMSIZ);
5183 error = rumpuser_wg_ioctl(wg->wg_user, cmd, ifra, AF_INET6);
5184 if (error == 0)
5185 error = ENOTTY;
5186 break;
5187 }
5188 #endif
5189 default:
5190 break;
5191 }
5192 #endif /* WG_RUMPKERNEL */
5193
5194 return error;
5195 }
5196
5197 static int
5198 wg_init(struct ifnet *ifp)
5199 {
5200
5201 ifp->if_flags |= IFF_RUNNING;
5202
5203 /* TODO flush pending packets. */
5204 return 0;
5205 }
5206
5207 #ifdef ALTQ
5208 static void
5209 wg_start(struct ifnet *ifp)
5210 {
5211 struct mbuf *m;
5212
5213 for (;;) {
5214 IFQ_DEQUEUE(&ifp->if_snd, m);
5215 if (m == NULL)
5216 break;
5217
5218 kpreempt_disable();
5219 const uint32_t h = curcpu()->ci_index; // pktq_rps_hash(m)
5220 if (__predict_false(!pktq_enqueue(wg_pktq, m, h))) {
5221 WGLOG(LOG_ERR, "%s: pktq full, dropping\n",
5222 if_name(ifp));
5223 m_freem(m);
5224 }
5225 kpreempt_enable();
5226 }
5227 }
5228 #endif
5229
5230 static void
5231 wg_stop(struct ifnet *ifp, int disable)
5232 {
5233
5234 KASSERT((ifp->if_flags & IFF_RUNNING) != 0);
5235 ifp->if_flags &= ~IFF_RUNNING;
5236
5237 /* Need to do something? */
5238 }
5239
5240 #ifdef WG_DEBUG_PARAMS
5241 SYSCTL_SETUP(sysctl_net_wg_setup, "sysctl net.wg setup")
5242 {
5243 const struct sysctlnode *node = NULL;
5244
5245 sysctl_createv(clog, 0, NULL, &node,
5246 CTLFLAG_PERMANENT,
5247 CTLTYPE_NODE, "wg",
5248 SYSCTL_DESCR("wg(4)"),
5249 NULL, 0, NULL, 0,
5250 CTL_NET, CTL_CREATE, CTL_EOL);
5251 sysctl_createv(clog, 0, &node, NULL,
5252 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
5253 CTLTYPE_QUAD, "rekey_after_messages",
5254 SYSCTL_DESCR("session liftime by messages"),
5255 NULL, 0, &wg_rekey_after_messages, 0, CTL_CREATE, CTL_EOL);
5256 sysctl_createv(clog, 0, &node, NULL,
5257 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
5258 CTLTYPE_INT, "rekey_after_time",
5259 SYSCTL_DESCR("session liftime"),
5260 NULL, 0, &wg_rekey_after_time, 0, CTL_CREATE, CTL_EOL);
5261 sysctl_createv(clog, 0, &node, NULL,
5262 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
5263 CTLTYPE_INT, "rekey_timeout",
5264 SYSCTL_DESCR("session handshake retry time"),
5265 NULL, 0, &wg_rekey_timeout, 0, CTL_CREATE, CTL_EOL);
5266 sysctl_createv(clog, 0, &node, NULL,
5267 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
5268 CTLTYPE_INT, "rekey_attempt_time",
5269 SYSCTL_DESCR("session handshake timeout"),
5270 NULL, 0, &wg_rekey_attempt_time, 0, CTL_CREATE, CTL_EOL);
5271 sysctl_createv(clog, 0, &node, NULL,
5272 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
5273 CTLTYPE_INT, "keepalive_timeout",
5274 SYSCTL_DESCR("keepalive timeout"),
5275 NULL, 0, &wg_keepalive_timeout, 0, CTL_CREATE, CTL_EOL);
5276 sysctl_createv(clog, 0, &node, NULL,
5277 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
5278 CTLTYPE_BOOL, "force_underload",
5279 SYSCTL_DESCR("force to detemine under load"),
5280 NULL, 0, &wg_force_underload, 0, CTL_CREATE, CTL_EOL);
5281 sysctl_createv(clog, 0, &node, NULL,
5282 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
5283 CTLTYPE_INT, "debug",
5284 SYSCTL_DESCR("set debug flags 1=log 2=trace 4=dump 8=packet"),
5285 NULL, 0, &wg_debug, 0, CTL_CREATE, CTL_EOL);
5286 }
5287 #endif
5288
5289 #ifdef WG_RUMPKERNEL
5290 static bool
5291 wg_user_mode(struct wg_softc *wg)
5292 {
5293
5294 return wg->wg_user != NULL;
5295 }
5296
5297 static int
5298 wg_ioctl_linkstr(struct wg_softc *wg, struct ifdrv *ifd)
5299 {
5300 struct ifnet *ifp = &wg->wg_if;
5301 int error;
5302
5303 if (ifp->if_flags & IFF_UP)
5304 return EBUSY;
5305
5306 if (ifd->ifd_cmd == IFLINKSTR_UNSET) {
5307 /* XXX do nothing */
5308 return 0;
5309 } else if (ifd->ifd_cmd != 0) {
5310 return EINVAL;
5311 } else if (wg->wg_user != NULL) {
5312 return EBUSY;
5313 }
5314
5315 /* Assume \0 included */
5316 if (ifd->ifd_len > IFNAMSIZ) {
5317 return E2BIG;
5318 } else if (ifd->ifd_len < 1) {
5319 return EINVAL;
5320 }
5321
5322 char tun_name[IFNAMSIZ];
5323 error = copyinstr(ifd->ifd_data, tun_name, ifd->ifd_len, NULL);
5324 if (error != 0)
5325 return error;
5326
5327 if (strncmp(tun_name, "tun", 3) != 0)
5328 return EINVAL;
5329
5330 error = rumpuser_wg_create(tun_name, wg, &wg->wg_user);
5331
5332 return error;
5333 }
5334
5335 static int
5336 wg_send_user(struct wg_peer *wgp, struct mbuf *m)
5337 {
5338 int error;
5339 struct psref psref;
5340 struct wg_sockaddr *wgsa;
5341 struct wg_softc *wg = wgp->wgp_sc;
5342 struct iovec iov[1];
5343
5344 wgsa = wg_get_endpoint_sa(wgp, &psref);
5345
5346 iov[0].iov_base = mtod(m, void *);
5347 iov[0].iov_len = m->m_len;
5348
5349 /* Send messages to a peer via an ordinary socket. */
5350 error = rumpuser_wg_send_peer(wg->wg_user, wgsatosa(wgsa), iov, 1);
5351
5352 wg_put_sa(wgp, wgsa, &psref);
5353
5354 m_freem(m);
5355
5356 return error;
5357 }
5358
5359 static void
5360 wg_input_user(struct ifnet *ifp, struct mbuf *m, const int af)
5361 {
5362 struct wg_softc *wg = ifp->if_softc;
5363 struct iovec iov[2];
5364 struct sockaddr_storage ss;
5365
5366 KASSERT(af == AF_INET || af == AF_INET6);
5367
5368 WG_TRACE("");
5369
5370 switch (af) {
5371 #ifdef INET
5372 case AF_INET: {
5373 struct sockaddr_in *sin = (struct sockaddr_in *)&ss;
5374 struct ip *ip;
5375
5376 KASSERT(m->m_len >= sizeof(struct ip));
5377 ip = mtod(m, struct ip *);
5378 sockaddr_in_init(sin, &ip->ip_dst, 0);
5379 break;
5380 }
5381 #endif
5382 #ifdef INET6
5383 case AF_INET6: {
5384 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&ss;
5385 struct ip6_hdr *ip6;
5386
5387 KASSERT(m->m_len >= sizeof(struct ip6_hdr));
5388 ip6 = mtod(m, struct ip6_hdr *);
5389 sockaddr_in6_init(sin6, &ip6->ip6_dst, 0, 0, 0);
5390 break;
5391 }
5392 #endif
5393 default:
5394 goto out;
5395 }
5396
5397 iov[0].iov_base = &ss;
5398 iov[0].iov_len = ss.ss_len;
5399 iov[1].iov_base = mtod(m, void *);
5400 iov[1].iov_len = m->m_len;
5401
5402 WG_DUMP_BUF(iov[1].iov_base, iov[1].iov_len);
5403
5404 /* Send decrypted packets to users via a tun. */
5405 rumpuser_wg_send_user(wg->wg_user, iov, 2);
5406
5407 out: m_freem(m);
5408 }
5409
5410 static int
5411 wg_bind_port_user(struct wg_softc *wg, const uint16_t port)
5412 {
5413 int error;
5414 uint16_t old_port = wg->wg_listen_port;
5415
5416 if (port != 0 && old_port == port)
5417 return 0;
5418
5419 error = rumpuser_wg_sock_bind(wg->wg_user, port);
5420 if (error)
5421 return error;
5422
5423 wg->wg_listen_port = port;
5424 return 0;
5425 }
5426
5427 /*
5428 * Receive user packets.
5429 */
5430 void
5431 rumpkern_wg_recv_user(struct wg_softc *wg, struct iovec *iov, size_t iovlen)
5432 {
5433 struct ifnet *ifp = &wg->wg_if;
5434 struct mbuf *m;
5435 const struct sockaddr *dst;
5436 int error;
5437
5438 WG_TRACE("");
5439
5440 dst = iov[0].iov_base;
5441
5442 m = m_gethdr(M_DONTWAIT, MT_DATA);
5443 if (m == NULL)
5444 return;
5445 m->m_len = m->m_pkthdr.len = 0;
5446 m_copyback(m, 0, iov[1].iov_len, iov[1].iov_base);
5447
5448 WG_DLOG("iov_len=%zu\n", iov[1].iov_len);
5449 WG_DUMP_BUF(iov[1].iov_base, iov[1].iov_len);
5450
5451 error = wg_output(ifp, m, dst, NULL); /* consumes m */
5452 if (error)
5453 WG_DLOG("wg_output failed, error=%d\n", error);
5454 }
5455
5456 /*
5457 * Receive packets from a peer.
5458 */
5459 void
5460 rumpkern_wg_recv_peer(struct wg_softc *wg, struct iovec *iov, size_t iovlen)
5461 {
5462 struct mbuf *m;
5463 const struct sockaddr *src;
5464 int bound;
5465
5466 WG_TRACE("");
5467
5468 src = iov[0].iov_base;
5469
5470 m = m_gethdr(M_DONTWAIT, MT_DATA);
5471 if (m == NULL)
5472 return;
5473 m->m_len = m->m_pkthdr.len = 0;
5474 m_copyback(m, 0, iov[1].iov_len, iov[1].iov_base);
5475
5476 WG_DLOG("iov_len=%zu\n", iov[1].iov_len);
5477 WG_DUMP_BUF(iov[1].iov_base, iov[1].iov_len);
5478
5479 bound = curlwp_bind();
5480 wg_handle_packet(wg, m, src);
5481 curlwp_bindx(bound);
5482 }
5483 #endif /* WG_RUMPKERNEL */
5484
5485 /*
5486 * Module infrastructure
5487 */
5488 #include "if_module.h"
5489
5490 IF_MODULE(MODULE_CLASS_DRIVER, wg, "sodium,blake2s")
5491