if_wg.c revision 1.102 1 /* $NetBSD: if_wg.c,v 1.102 2024/07/28 14:45:51 riastradh Exp $ */
2
3 /*
4 * Copyright (C) Ryota Ozaki <ozaki.ryota (at) gmail.com>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the project nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 /*
33 * This network interface aims to implement the WireGuard protocol.
34 * The implementation is based on the paper of WireGuard as of
35 * 2018-06-30 [1]. The paper is referred in the source code with label
36 * [W]. Also the specification of the Noise protocol framework as of
37 * 2018-07-11 [2] is referred with label [N].
38 *
39 * [1] https://www.wireguard.com/papers/wireguard.pdf
40 * [2] http://noiseprotocol.org/noise.pdf
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: if_wg.c,v 1.102 2024/07/28 14:45:51 riastradh Exp $");
45
46 #ifdef _KERNEL_OPT
47 #include "opt_altq_enabled.h"
48 #include "opt_inet.h"
49 #endif
50
51 #include <sys/param.h>
52 #include <sys/types.h>
53
54 #include <sys/atomic.h>
55 #include <sys/callout.h>
56 #include <sys/cprng.h>
57 #include <sys/cpu.h>
58 #include <sys/device.h>
59 #include <sys/domain.h>
60 #include <sys/errno.h>
61 #include <sys/intr.h>
62 #include <sys/ioctl.h>
63 #include <sys/kernel.h>
64 #include <sys/kmem.h>
65 #include <sys/mbuf.h>
66 #include <sys/module.h>
67 #include <sys/mutex.h>
68 #include <sys/once.h>
69 #include <sys/percpu.h>
70 #include <sys/pserialize.h>
71 #include <sys/psref.h>
72 #include <sys/queue.h>
73 #include <sys/rwlock.h>
74 #include <sys/socket.h>
75 #include <sys/socketvar.h>
76 #include <sys/sockio.h>
77 #include <sys/sysctl.h>
78 #include <sys/syslog.h>
79 #include <sys/systm.h>
80 #include <sys/thmap.h>
81 #include <sys/threadpool.h>
82 #include <sys/time.h>
83 #include <sys/timespec.h>
84 #include <sys/workqueue.h>
85
86 #include <lib/libkern/libkern.h>
87
88 #include <net/bpf.h>
89 #include <net/if.h>
90 #include <net/if_types.h>
91 #include <net/if_wg.h>
92 #include <net/pktqueue.h>
93 #include <net/route.h>
94
95 #include <netinet/in.h>
96 #include <netinet/in_pcb.h>
97 #include <netinet/in_var.h>
98 #include <netinet/ip.h>
99 #include <netinet/ip_var.h>
100 #include <netinet/udp.h>
101 #include <netinet/udp_var.h>
102
103 #ifdef INET6
104 #include <netinet/ip6.h>
105 #include <netinet6/in6_pcb.h>
106 #include <netinet6/in6_var.h>
107 #include <netinet6/ip6_var.h>
108 #include <netinet6/udp6_var.h>
109 #endif /* INET6 */
110
111 #include <prop/proplib.h>
112
113 #include <crypto/blake2/blake2s.h>
114 #include <crypto/sodium/crypto_aead_chacha20poly1305.h>
115 #include <crypto/sodium/crypto_aead_xchacha20poly1305.h>
116 #include <crypto/sodium/crypto_scalarmult.h>
117
118 #include "ioconf.h"
119
120 #ifdef WG_RUMPKERNEL
121 #include "wg_user.h"
122 #endif
123
124 /*
125 * Data structures
126 * - struct wg_softc is an instance of wg interfaces
127 * - It has a list of peers (struct wg_peer)
128 * - It has a threadpool job that sends/receives handshake messages and
129 * runs event handlers
130 * - It has its own two routing tables: one is for IPv4 and the other IPv6
131 * - struct wg_peer is a representative of a peer
132 * - It has a struct work to handle handshakes and timer tasks
133 * - It has a pair of session instances (struct wg_session)
134 * - It has a pair of endpoint instances (struct wg_sockaddr)
135 * - Normally one endpoint is used and the second one is used only on
136 * a peer migration (a change of peer's IP address)
137 * - It has a list of IP addresses and sub networks called allowedips
138 * (struct wg_allowedip)
139 * - A packets sent over a session is allowed if its destination matches
140 * any IP addresses or sub networks of the list
141 * - struct wg_session represents a session of a secure tunnel with a peer
142 * - Two instances of sessions belong to a peer; a stable session and a
143 * unstable session
144 * - A handshake process of a session always starts with a unstable instance
145 * - Once a session is established, its instance becomes stable and the
146 * other becomes unstable instead
147 * - Data messages are always sent via a stable session
148 *
149 * Locking notes:
150 * - Each wg has a mutex(9) wg_lock, and a rwlock(9) wg_rwlock
151 * - Changes to the peer list are serialized by wg_lock
152 * - The peer list may be read with pserialize(9) and psref(9)
153 * - The rwlock (wg_rwlock) protects the routing tables (wg_rtable_ipv[46])
154 * => XXX replace by pserialize when routing table is psz-safe
155 * - Each peer (struct wg_peer, wgp) has a mutex wgp_lock, which can be taken
156 * only in thread context and serializes:
157 * - the stable and unstable session pointers
158 * - all unstable session state
159 * - Packet processing may be done in softint context:
160 * - The stable session can be read under pserialize(9) or psref(9)
161 * - The stable session is always ESTABLISHED
162 * - On a session swap, we must wait for all readers to release a
163 * reference to a stable session before changing wgs_state and
164 * session states
165 * - Lock order: wg_lock -> wgp_lock
166 */
167
168
169 #define WGLOG(level, fmt, args...) \
170 log(level, "%s: " fmt, __func__, ##args)
171
172 #define WG_DEBUG
173
174 /* Debug options */
175 #ifdef WG_DEBUG
176 /* Output debug logs */
177 #ifndef WG_DEBUG_LOG
178 #define WG_DEBUG_LOG
179 #endif
180 /* Output trace logs */
181 #ifndef WG_DEBUG_TRACE
182 #define WG_DEBUG_TRACE
183 #endif
184 /* Output hash values, etc. */
185 #ifndef WG_DEBUG_DUMP
186 #define WG_DEBUG_DUMP
187 #endif
188 /* debug packets */
189 #ifndef WG_DEBUG_PACKET
190 #define WG_DEBUG_PACKET
191 #endif
192 /* Make some internal parameters configurable for testing and debugging */
193 #ifndef WG_DEBUG_PARAMS
194 #define WG_DEBUG_PARAMS
195 #endif
196 #endif /* WG_DEBUG */
197
198 #ifndef WG_DEBUG
199 # if defined(WG_DEBUG_LOG) || defined(WG_DEBUG_TRACE) || \
200 defined(WG_DEBUG_DUMP) || defined(WG_DEBUG_PARAMS) || \
201 defined(WG_DEBUG_PACKET)
202 # define WG_DEBUG
203 # endif
204 #endif
205
206 #ifdef WG_DEBUG
207 int wg_debug;
208 #define WG_DEBUG_FLAGS_LOG 1
209 #define WG_DEBUG_FLAGS_TRACE 2
210 #define WG_DEBUG_FLAGS_DUMP 4
211 #define WG_DEBUG_FLAGS_PACKET 8
212 #endif
213
214
215 #ifdef WG_DEBUG_TRACE
216 #define WG_TRACE(msg) do { \
217 if (wg_debug & WG_DEBUG_FLAGS_TRACE) \
218 log(LOG_DEBUG, "%s:%d: %s\n", __func__, __LINE__, (msg)); \
219 } while (0)
220 #else
221 #define WG_TRACE(msg) __nothing
222 #endif
223
224 #ifdef WG_DEBUG_LOG
225 #define WG_DLOG(fmt, args...) do { \
226 if (wg_debug & WG_DEBUG_FLAGS_LOG) \
227 log(LOG_DEBUG, "%s: " fmt, __func__, ##args); \
228 } while (0)
229 #else
230 #define WG_DLOG(fmt, args...) __nothing
231 #endif
232
233 #define WG_LOG_RATECHECK(wgprc, level, fmt, args...) do { \
234 if (ppsratecheck(&(wgprc)->wgprc_lasttime, \
235 &(wgprc)->wgprc_curpps, 1)) { \
236 log(level, fmt, ##args); \
237 } \
238 } while (0)
239
240 #ifdef WG_DEBUG_PARAMS
241 static bool wg_force_underload = false;
242 #endif
243
244 #ifdef WG_DEBUG_DUMP
245
246 static char enomem[10] = "[enomem]";
247
248 #define MAX_HDUMP_LEN 10000 /* large enough */
249
250
251 static char *
252 gethexdump(const void *vp, size_t n)
253 {
254 char *buf;
255 const uint8_t *p = vp;
256 size_t i, alloc;
257
258 alloc = n;
259 if (n > MAX_HDUMP_LEN)
260 alloc = MAX_HDUMP_LEN;
261 buf = kmem_alloc(3 * alloc + 5, KM_NOSLEEP);
262 if (buf == NULL)
263 return enomem;
264 for (i = 0; i < alloc; i++)
265 snprintf(buf + 3 * i, 3 + 1, " %02hhx", p[i]);
266 if (alloc != n)
267 snprintf(buf + 3 * i, 4 + 1, " ...");
268 return buf;
269 }
270
271 static void
272 puthexdump(char *buf, const void *p, size_t n)
273 {
274
275 if (buf == NULL || buf == enomem)
276 return;
277 if (n > MAX_HDUMP_LEN)
278 n = MAX_HDUMP_LEN;
279 kmem_free(buf, 3 * n + 5);
280 }
281
282 #ifdef WG_RUMPKERNEL
283 static void
284 wg_dump_buf(const char *func, const char *buf, const size_t size)
285 {
286 if ((wg_debug & WG_DEBUG_FLAGS_DUMP) == 0)
287 return;
288
289 char *hex = gethexdump(buf, size);
290
291 log(LOG_DEBUG, "%s: %s\n", func, hex);
292 puthexdump(hex, buf, size);
293 }
294 #endif
295
296 static void
297 wg_dump_hash(const uint8_t *func, const uint8_t *name, const uint8_t *hash,
298 const size_t size)
299 {
300 if ((wg_debug & WG_DEBUG_FLAGS_DUMP) == 0)
301 return;
302
303 char *hex = gethexdump(hash, size);
304
305 log(LOG_DEBUG, "%s: %s: %s\n", func, name, hex);
306 puthexdump(hex, hash, size);
307 }
308
309 #define WG_DUMP_HASH(name, hash) \
310 wg_dump_hash(__func__, name, hash, WG_HASH_LEN)
311 #define WG_DUMP_HASH48(name, hash) \
312 wg_dump_hash(__func__, name, hash, 48)
313 #define WG_DUMP_BUF(buf, size) \
314 wg_dump_buf(__func__, buf, size)
315 #else
316 #define WG_DUMP_HASH(name, hash) __nothing
317 #define WG_DUMP_HASH48(name, hash) __nothing
318 #define WG_DUMP_BUF(buf, size) __nothing
319 #endif /* WG_DEBUG_DUMP */
320
321 /* chosen somewhat arbitrarily -- fits in signed 16 bits NUL-terminated */
322 #define WG_MAX_PROPLEN 32766
323
324 #define WG_MTU 1420
325 #define WG_ALLOWEDIPS 16
326
327 #define CURVE25519_KEY_LEN 32
328 #define TAI64N_LEN sizeof(uint32_t) * 3
329 #define POLY1305_AUTHTAG_LEN 16
330 #define HMAC_BLOCK_LEN 64
331
332 /* [N] 4.1: "DHLEN must be 32 or greater." WireGuard chooses 32. */
333 /* [N] 4.3: Hash functions */
334 #define NOISE_DHLEN 32
335 /* [N] 4.3: "Must be 32 or 64." WireGuard chooses 32. */
336 #define NOISE_HASHLEN 32
337 #define NOISE_BLOCKLEN 64
338 #define NOISE_HKDF_OUTPUT_LEN NOISE_HASHLEN
339 /* [N] 5.1: "k" */
340 #define NOISE_CIPHER_KEY_LEN 32
341 /*
342 * [N] 9.2: "psk"
343 * "... psk is a 32-byte secret value provided by the application."
344 */
345 #define NOISE_PRESHARED_KEY_LEN 32
346
347 #define WG_STATIC_KEY_LEN CURVE25519_KEY_LEN
348 #define WG_TIMESTAMP_LEN TAI64N_LEN
349
350 #define WG_PRESHARED_KEY_LEN NOISE_PRESHARED_KEY_LEN
351
352 #define WG_COOKIE_LEN 16
353 #define WG_MAC_LEN 16
354 #define WG_COOKIESECRET_LEN 32
355
356 #define WG_EPHEMERAL_KEY_LEN CURVE25519_KEY_LEN
357 /* [N] 5.2: "ck: A chaining key of HASHLEN bytes" */
358 #define WG_CHAINING_KEY_LEN NOISE_HASHLEN
359 /* [N] 5.2: "h: A hash output of HASHLEN bytes" */
360 #define WG_HASH_LEN NOISE_HASHLEN
361 #define WG_CIPHER_KEY_LEN NOISE_CIPHER_KEY_LEN
362 #define WG_DH_OUTPUT_LEN NOISE_DHLEN
363 #define WG_KDF_OUTPUT_LEN NOISE_HKDF_OUTPUT_LEN
364 #define WG_AUTHTAG_LEN POLY1305_AUTHTAG_LEN
365 #define WG_DATA_KEY_LEN 32
366 #define WG_SALT_LEN 24
367
368 /*
369 * The protocol messages
370 */
371 struct wg_msg {
372 uint32_t wgm_type;
373 } __packed;
374
375 /* [W] 5.4.2 First Message: Initiator to Responder */
376 struct wg_msg_init {
377 uint32_t wgmi_type;
378 uint32_t wgmi_sender;
379 uint8_t wgmi_ephemeral[WG_EPHEMERAL_KEY_LEN];
380 uint8_t wgmi_static[WG_STATIC_KEY_LEN + WG_AUTHTAG_LEN];
381 uint8_t wgmi_timestamp[WG_TIMESTAMP_LEN + WG_AUTHTAG_LEN];
382 uint8_t wgmi_mac1[WG_MAC_LEN];
383 uint8_t wgmi_mac2[WG_MAC_LEN];
384 } __packed;
385
386 /* [W] 5.4.3 Second Message: Responder to Initiator */
387 struct wg_msg_resp {
388 uint32_t wgmr_type;
389 uint32_t wgmr_sender;
390 uint32_t wgmr_receiver;
391 uint8_t wgmr_ephemeral[WG_EPHEMERAL_KEY_LEN];
392 uint8_t wgmr_empty[0 + WG_AUTHTAG_LEN];
393 uint8_t wgmr_mac1[WG_MAC_LEN];
394 uint8_t wgmr_mac2[WG_MAC_LEN];
395 } __packed;
396
397 /* [W] 5.4.6 Subsequent Messages: Transport Data Messages */
398 struct wg_msg_data {
399 uint32_t wgmd_type;
400 uint32_t wgmd_receiver;
401 uint64_t wgmd_counter;
402 uint32_t wgmd_packet[0];
403 } __packed;
404
405 /* [W] 5.4.7 Under Load: Cookie Reply Message */
406 struct wg_msg_cookie {
407 uint32_t wgmc_type;
408 uint32_t wgmc_receiver;
409 uint8_t wgmc_salt[WG_SALT_LEN];
410 uint8_t wgmc_cookie[WG_COOKIE_LEN + WG_AUTHTAG_LEN];
411 } __packed;
412
413 #define WG_MSG_TYPE_INIT 1
414 #define WG_MSG_TYPE_RESP 2
415 #define WG_MSG_TYPE_COOKIE 3
416 #define WG_MSG_TYPE_DATA 4
417 #define WG_MSG_TYPE_MAX WG_MSG_TYPE_DATA
418
419 /* Sliding windows */
420
421 #define SLIWIN_BITS 2048u
422 #define SLIWIN_TYPE uint32_t
423 #define SLIWIN_BPW NBBY*sizeof(SLIWIN_TYPE)
424 #define SLIWIN_WORDS howmany(SLIWIN_BITS, SLIWIN_BPW)
425 #define SLIWIN_NPKT (SLIWIN_BITS - NBBY*sizeof(SLIWIN_TYPE))
426
427 struct sliwin {
428 SLIWIN_TYPE B[SLIWIN_WORDS];
429 uint64_t T;
430 };
431
432 static void
433 sliwin_reset(struct sliwin *W)
434 {
435
436 memset(W, 0, sizeof(*W));
437 }
438
439 static int
440 sliwin_check_fast(const volatile struct sliwin *W, uint64_t S)
441 {
442
443 /*
444 * If it's more than one window older than the highest sequence
445 * number we've seen, reject.
446 */
447 #ifdef __HAVE_ATOMIC64_LOADSTORE
448 if (S + SLIWIN_NPKT < atomic_load_relaxed(&W->T))
449 return EAUTH;
450 #endif
451
452 /*
453 * Otherwise, we need to take the lock to decide, so don't
454 * reject just yet. Caller must serialize a call to
455 * sliwin_update in this case.
456 */
457 return 0;
458 }
459
460 static int
461 sliwin_update(struct sliwin *W, uint64_t S)
462 {
463 unsigned word, bit;
464
465 /*
466 * If it's more than one window older than the highest sequence
467 * number we've seen, reject.
468 */
469 if (S + SLIWIN_NPKT < W->T)
470 return EAUTH;
471
472 /*
473 * If it's higher than the highest sequence number we've seen,
474 * advance the window.
475 */
476 if (S > W->T) {
477 uint64_t i = W->T / SLIWIN_BPW;
478 uint64_t j = S / SLIWIN_BPW;
479 unsigned k;
480
481 for (k = 0; k < MIN(j - i, SLIWIN_WORDS); k++)
482 W->B[(i + k + 1) % SLIWIN_WORDS] = 0;
483 #ifdef __HAVE_ATOMIC64_LOADSTORE
484 atomic_store_relaxed(&W->T, S);
485 #else
486 W->T = S;
487 #endif
488 }
489
490 /* Test and set the bit -- if already set, reject. */
491 word = (S / SLIWIN_BPW) % SLIWIN_WORDS;
492 bit = S % SLIWIN_BPW;
493 if (W->B[word] & (1UL << bit))
494 return EAUTH;
495 W->B[word] |= 1U << bit;
496
497 /* Accept! */
498 return 0;
499 }
500
501 struct wg_session {
502 struct wg_peer *wgs_peer;
503 struct psref_target
504 wgs_psref;
505
506 int wgs_state;
507 #define WGS_STATE_UNKNOWN 0
508 #define WGS_STATE_INIT_ACTIVE 1
509 #define WGS_STATE_INIT_PASSIVE 2
510 #define WGS_STATE_ESTABLISHED 3
511 #define WGS_STATE_DESTROYING 4
512
513 time_t wgs_time_established;
514 time_t wgs_time_last_data_sent;
515 bool wgs_is_initiator;
516
517 uint32_t wgs_local_index;
518 uint32_t wgs_remote_index;
519 #ifdef __HAVE_ATOMIC64_LOADSTORE
520 volatile uint64_t
521 wgs_send_counter;
522 #else
523 kmutex_t wgs_send_counter_lock;
524 uint64_t wgs_send_counter;
525 #endif
526
527 struct {
528 kmutex_t lock;
529 struct sliwin window;
530 } *wgs_recvwin;
531
532 uint8_t wgs_handshake_hash[WG_HASH_LEN];
533 uint8_t wgs_chaining_key[WG_CHAINING_KEY_LEN];
534 uint8_t wgs_ephemeral_key_pub[WG_EPHEMERAL_KEY_LEN];
535 uint8_t wgs_ephemeral_key_priv[WG_EPHEMERAL_KEY_LEN];
536 uint8_t wgs_ephemeral_key_peer[WG_EPHEMERAL_KEY_LEN];
537 uint8_t wgs_tkey_send[WG_DATA_KEY_LEN];
538 uint8_t wgs_tkey_recv[WG_DATA_KEY_LEN];
539 };
540
541 struct wg_sockaddr {
542 union {
543 struct sockaddr_storage _ss;
544 struct sockaddr _sa;
545 struct sockaddr_in _sin;
546 struct sockaddr_in6 _sin6;
547 };
548 struct psref_target wgsa_psref;
549 };
550
551 #define wgsatoss(wgsa) (&(wgsa)->_ss)
552 #define wgsatosa(wgsa) (&(wgsa)->_sa)
553 #define wgsatosin(wgsa) (&(wgsa)->_sin)
554 #define wgsatosin6(wgsa) (&(wgsa)->_sin6)
555
556 #define wgsa_family(wgsa) (wgsatosa(wgsa)->sa_family)
557
558 struct wg_peer;
559 struct wg_allowedip {
560 struct radix_node wga_nodes[2];
561 struct wg_sockaddr _wga_sa_addr;
562 struct wg_sockaddr _wga_sa_mask;
563 #define wga_sa_addr _wga_sa_addr._sa
564 #define wga_sa_mask _wga_sa_mask._sa
565
566 int wga_family;
567 uint8_t wga_cidr;
568 union {
569 struct in_addr _ip4;
570 struct in6_addr _ip6;
571 } wga_addr;
572 #define wga_addr4 wga_addr._ip4
573 #define wga_addr6 wga_addr._ip6
574
575 struct wg_peer *wga_peer;
576 };
577
578 typedef uint8_t wg_timestamp_t[WG_TIMESTAMP_LEN];
579
580 struct wg_ppsratecheck {
581 struct timeval wgprc_lasttime;
582 int wgprc_curpps;
583 };
584
585 struct wg_softc;
586 struct wg_peer {
587 struct wg_softc *wgp_sc;
588 char wgp_name[WG_PEER_NAME_MAXLEN + 1];
589 struct pslist_entry wgp_peerlist_entry;
590 pserialize_t wgp_psz;
591 struct psref_target wgp_psref;
592 kmutex_t *wgp_lock;
593 kmutex_t *wgp_intr_lock;
594
595 uint8_t wgp_pubkey[WG_STATIC_KEY_LEN];
596 struct wg_sockaddr *wgp_endpoint;
597 struct wg_sockaddr *wgp_endpoint0;
598 volatile unsigned wgp_endpoint_changing;
599 bool wgp_endpoint_available;
600
601 /* The preshared key (optional) */
602 uint8_t wgp_psk[WG_PRESHARED_KEY_LEN];
603
604 struct wg_session *wgp_session_stable;
605 struct wg_session *wgp_session_unstable;
606
607 /* first outgoing packet awaiting session initiation */
608 struct mbuf *volatile wgp_pending;
609
610 /* timestamp in big-endian */
611 wg_timestamp_t wgp_timestamp_latest_init;
612
613 struct timespec wgp_last_handshake_time;
614
615 callout_t wgp_handshake_timeout_timer;
616 callout_t wgp_session_dtor_timer;
617
618 time_t wgp_handshake_start_time;
619
620 volatile unsigned wgp_force_rekey;
621
622 int wgp_n_allowedips;
623 struct wg_allowedip wgp_allowedips[WG_ALLOWEDIPS];
624
625 time_t wgp_latest_cookie_time;
626 uint8_t wgp_latest_cookie[WG_COOKIE_LEN];
627 uint8_t wgp_last_sent_mac1[WG_MAC_LEN];
628 bool wgp_last_sent_mac1_valid;
629 uint8_t wgp_last_sent_cookie[WG_COOKIE_LEN];
630 bool wgp_last_sent_cookie_valid;
631
632 time_t wgp_last_msg_received_time[WG_MSG_TYPE_MAX];
633
634 time_t wgp_last_cookiesecret_time;
635 uint8_t wgp_cookiesecret[WG_COOKIESECRET_LEN];
636
637 struct wg_ppsratecheck wgp_ppsratecheck;
638
639 struct work wgp_work;
640 unsigned int wgp_tasks;
641 #define WGP_TASK_SEND_INIT_MESSAGE __BIT(0)
642 #define WGP_TASK_RETRY_HANDSHAKE __BIT(1)
643 #define WGP_TASK_ESTABLISH_SESSION __BIT(2)
644 #define WGP_TASK_ENDPOINT_CHANGED __BIT(3)
645 #define WGP_TASK_SEND_KEEPALIVE_MESSAGE __BIT(4)
646 #define WGP_TASK_DESTROY_PREV_SESSION __BIT(5)
647 };
648
649 struct wg_ops;
650
651 struct wg_softc {
652 struct ifnet wg_if;
653 LIST_ENTRY(wg_softc) wg_list;
654 kmutex_t *wg_lock;
655 kmutex_t *wg_intr_lock;
656 krwlock_t *wg_rwlock;
657
658 uint8_t wg_privkey[WG_STATIC_KEY_LEN];
659 uint8_t wg_pubkey[WG_STATIC_KEY_LEN];
660
661 int wg_npeers;
662 struct pslist_head wg_peers;
663 struct thmap *wg_peers_bypubkey;
664 struct thmap *wg_peers_byname;
665 struct thmap *wg_sessions_byindex;
666 uint16_t wg_listen_port;
667
668 struct threadpool *wg_threadpool;
669
670 struct threadpool_job wg_job;
671 int wg_upcalls;
672 #define WG_UPCALL_INET __BIT(0)
673 #define WG_UPCALL_INET6 __BIT(1)
674
675 #ifdef INET
676 struct socket *wg_so4;
677 struct radix_node_head *wg_rtable_ipv4;
678 #endif
679 #ifdef INET6
680 struct socket *wg_so6;
681 struct radix_node_head *wg_rtable_ipv6;
682 #endif
683
684 struct wg_ppsratecheck wg_ppsratecheck;
685
686 struct wg_ops *wg_ops;
687
688 #ifdef WG_RUMPKERNEL
689 struct wg_user *wg_user;
690 #endif
691 };
692
693 /* [W] 6.1 Preliminaries */
694 #define WG_REKEY_AFTER_MESSAGES (1ULL << 60)
695 #define WG_REJECT_AFTER_MESSAGES (UINT64_MAX - (1 << 13))
696 #define WG_REKEY_AFTER_TIME 120
697 #define WG_REJECT_AFTER_TIME 180
698 #define WG_REKEY_ATTEMPT_TIME 90
699 #define WG_REKEY_TIMEOUT 5
700 #define WG_KEEPALIVE_TIMEOUT 10
701
702 #define WG_COOKIE_TIME 120
703 #define WG_COOKIESECRET_TIME (2 * 60)
704
705 static uint64_t wg_rekey_after_messages = WG_REKEY_AFTER_MESSAGES;
706 static uint64_t wg_reject_after_messages = WG_REJECT_AFTER_MESSAGES;
707 static unsigned wg_rekey_after_time = WG_REKEY_AFTER_TIME;
708 static unsigned wg_reject_after_time = WG_REJECT_AFTER_TIME;
709 static unsigned wg_rekey_attempt_time = WG_REKEY_ATTEMPT_TIME;
710 static unsigned wg_rekey_timeout = WG_REKEY_TIMEOUT;
711 static unsigned wg_keepalive_timeout = WG_KEEPALIVE_TIMEOUT;
712
713 static struct mbuf *
714 wg_get_mbuf(size_t, size_t);
715
716 static int wg_send_data_msg(struct wg_peer *, struct wg_session *,
717 struct mbuf *);
718 static int wg_send_cookie_msg(struct wg_softc *, struct wg_peer *,
719 const uint32_t, const uint8_t [WG_MAC_LEN],
720 const struct sockaddr *);
721 static int wg_send_handshake_msg_resp(struct wg_softc *, struct wg_peer *,
722 struct wg_session *, const struct wg_msg_init *);
723 static void wg_send_keepalive_msg(struct wg_peer *, struct wg_session *);
724
725 static struct wg_peer *
726 wg_pick_peer_by_sa(struct wg_softc *, const struct sockaddr *,
727 struct psref *);
728 static struct wg_peer *
729 wg_lookup_peer_by_pubkey(struct wg_softc *,
730 const uint8_t [WG_STATIC_KEY_LEN], struct psref *);
731
732 static struct wg_session *
733 wg_lookup_session_by_index(struct wg_softc *,
734 const uint32_t, struct psref *);
735
736 static void wg_update_endpoint_if_necessary(struct wg_peer *,
737 const struct sockaddr *);
738
739 static void wg_schedule_session_dtor_timer(struct wg_peer *);
740
741 static bool wg_is_underload(struct wg_softc *, struct wg_peer *, int);
742 static void wg_calculate_keys(struct wg_session *, const bool);
743
744 static void wg_clear_states(struct wg_session *);
745
746 static void wg_get_peer(struct wg_peer *, struct psref *);
747 static void wg_put_peer(struct wg_peer *, struct psref *);
748
749 static int wg_send_so(struct wg_peer *, struct mbuf *);
750 static int wg_send_udp(struct wg_peer *, struct mbuf *);
751 static int wg_output(struct ifnet *, struct mbuf *,
752 const struct sockaddr *, const struct rtentry *);
753 static void wg_input(struct ifnet *, struct mbuf *, const int);
754 static int wg_ioctl(struct ifnet *, u_long, void *);
755 static int wg_bind_port(struct wg_softc *, const uint16_t);
756 static int wg_init(struct ifnet *);
757 #ifdef ALTQ
758 static void wg_start(struct ifnet *);
759 #endif
760 static void wg_stop(struct ifnet *, int);
761
762 static void wg_peer_work(struct work *, void *);
763 static void wg_job(struct threadpool_job *);
764 static void wgintr(void *);
765 static void wg_purge_pending_packets(struct wg_peer *);
766
767 static int wg_clone_create(struct if_clone *, int);
768 static int wg_clone_destroy(struct ifnet *);
769
770 struct wg_ops {
771 int (*send_hs_msg)(struct wg_peer *, struct mbuf *);
772 int (*send_data_msg)(struct wg_peer *, struct mbuf *);
773 void (*input)(struct ifnet *, struct mbuf *, const int);
774 int (*bind_port)(struct wg_softc *, const uint16_t);
775 };
776
777 struct wg_ops wg_ops_rumpkernel = {
778 .send_hs_msg = wg_send_so,
779 .send_data_msg = wg_send_udp,
780 .input = wg_input,
781 .bind_port = wg_bind_port,
782 };
783
784 #ifdef WG_RUMPKERNEL
785 static bool wg_user_mode(struct wg_softc *);
786 static int wg_ioctl_linkstr(struct wg_softc *, struct ifdrv *);
787
788 static int wg_send_user(struct wg_peer *, struct mbuf *);
789 static void wg_input_user(struct ifnet *, struct mbuf *, const int);
790 static int wg_bind_port_user(struct wg_softc *, const uint16_t);
791
792 struct wg_ops wg_ops_rumpuser = {
793 .send_hs_msg = wg_send_user,
794 .send_data_msg = wg_send_user,
795 .input = wg_input_user,
796 .bind_port = wg_bind_port_user,
797 };
798 #endif
799
800 #define WG_PEER_READER_FOREACH(wgp, wg) \
801 PSLIST_READER_FOREACH((wgp), &(wg)->wg_peers, struct wg_peer, \
802 wgp_peerlist_entry)
803 #define WG_PEER_WRITER_FOREACH(wgp, wg) \
804 PSLIST_WRITER_FOREACH((wgp), &(wg)->wg_peers, struct wg_peer, \
805 wgp_peerlist_entry)
806 #define WG_PEER_WRITER_INSERT_HEAD(wgp, wg) \
807 PSLIST_WRITER_INSERT_HEAD(&(wg)->wg_peers, (wgp), wgp_peerlist_entry)
808 #define WG_PEER_WRITER_REMOVE(wgp) \
809 PSLIST_WRITER_REMOVE((wgp), wgp_peerlist_entry)
810
811 struct wg_route {
812 struct radix_node wgr_nodes[2];
813 struct wg_peer *wgr_peer;
814 };
815
816 static struct radix_node_head *
817 wg_rnh(struct wg_softc *wg, const int family)
818 {
819
820 switch (family) {
821 case AF_INET:
822 return wg->wg_rtable_ipv4;
823 #ifdef INET6
824 case AF_INET6:
825 return wg->wg_rtable_ipv6;
826 #endif
827 default:
828 return NULL;
829 }
830 }
831
832
833 /*
834 * Global variables
835 */
836 static volatile unsigned wg_count __cacheline_aligned;
837
838 struct psref_class *wg_psref_class __read_mostly;
839
840 static struct if_clone wg_cloner =
841 IF_CLONE_INITIALIZER("wg", wg_clone_create, wg_clone_destroy);
842
843 static struct pktqueue *wg_pktq __read_mostly;
844 static struct workqueue *wg_wq __read_mostly;
845
846 void wgattach(int);
847 /* ARGSUSED */
848 void
849 wgattach(int count)
850 {
851 /*
852 * Nothing to do here, initialization is handled by the
853 * module initialization code in wginit() below).
854 */
855 }
856
857 static void
858 wginit(void)
859 {
860
861 wg_psref_class = psref_class_create("wg", IPL_SOFTNET);
862
863 if_clone_attach(&wg_cloner);
864 }
865
866 /*
867 * XXX Kludge: This should just happen in wginit, but workqueue_create
868 * cannot be run until after CPUs have been detected, and wginit runs
869 * before configure.
870 */
871 static int
872 wginitqueues(void)
873 {
874 int error __diagused;
875
876 wg_pktq = pktq_create(IFQ_MAXLEN, wgintr, NULL);
877 KASSERT(wg_pktq != NULL);
878
879 error = workqueue_create(&wg_wq, "wgpeer", wg_peer_work, NULL,
880 PRI_NONE, IPL_SOFTNET, WQ_MPSAFE|WQ_PERCPU);
881 KASSERT(error == 0);
882
883 return 0;
884 }
885
886 static void
887 wg_guarantee_initialized(void)
888 {
889 static ONCE_DECL(init);
890 int error __diagused;
891
892 error = RUN_ONCE(&init, wginitqueues);
893 KASSERT(error == 0);
894 }
895
896 static int
897 wg_count_inc(void)
898 {
899 unsigned o, n;
900
901 do {
902 o = atomic_load_relaxed(&wg_count);
903 if (o == UINT_MAX)
904 return ENFILE;
905 n = o + 1;
906 } while (atomic_cas_uint(&wg_count, o, n) != o);
907
908 return 0;
909 }
910
911 static void
912 wg_count_dec(void)
913 {
914 unsigned c __diagused;
915
916 c = atomic_dec_uint_nv(&wg_count);
917 KASSERT(c != UINT_MAX);
918 }
919
920 static int
921 wgdetach(void)
922 {
923
924 /* Prevent new interface creation. */
925 if_clone_detach(&wg_cloner);
926
927 /* Check whether there are any existing interfaces. */
928 if (atomic_load_relaxed(&wg_count)) {
929 /* Back out -- reattach the cloner. */
930 if_clone_attach(&wg_cloner);
931 return EBUSY;
932 }
933
934 /* No interfaces left. Nuke it. */
935 if (wg_wq)
936 workqueue_destroy(wg_wq);
937 if (wg_pktq)
938 pktq_destroy(wg_pktq);
939 psref_class_destroy(wg_psref_class);
940
941 return 0;
942 }
943
944 static void
945 wg_init_key_and_hash(uint8_t ckey[WG_CHAINING_KEY_LEN],
946 uint8_t hash[WG_HASH_LEN])
947 {
948 /* [W] 5.4: CONSTRUCTION */
949 const char *signature = "Noise_IKpsk2_25519_ChaChaPoly_BLAKE2s";
950 /* [W] 5.4: IDENTIFIER */
951 const char *id = "WireGuard v1 zx2c4 Jason (at) zx2c4.com";
952 struct blake2s state;
953
954 blake2s(ckey, WG_CHAINING_KEY_LEN, NULL, 0,
955 signature, strlen(signature));
956
957 CTASSERT(WG_HASH_LEN == WG_CHAINING_KEY_LEN);
958 memcpy(hash, ckey, WG_CHAINING_KEY_LEN);
959
960 blake2s_init(&state, WG_HASH_LEN, NULL, 0);
961 blake2s_update(&state, ckey, WG_CHAINING_KEY_LEN);
962 blake2s_update(&state, id, strlen(id));
963 blake2s_final(&state, hash);
964
965 WG_DUMP_HASH("ckey", ckey);
966 WG_DUMP_HASH("hash", hash);
967 }
968
969 static void
970 wg_algo_hash(uint8_t hash[WG_HASH_LEN], const uint8_t input[],
971 const size_t inputsize)
972 {
973 struct blake2s state;
974
975 blake2s_init(&state, WG_HASH_LEN, NULL, 0);
976 blake2s_update(&state, hash, WG_HASH_LEN);
977 blake2s_update(&state, input, inputsize);
978 blake2s_final(&state, hash);
979 }
980
981 static void
982 wg_algo_mac(uint8_t out[], const size_t outsize,
983 const uint8_t key[], const size_t keylen,
984 const uint8_t input1[], const size_t input1len,
985 const uint8_t input2[], const size_t input2len)
986 {
987 struct blake2s state;
988
989 blake2s_init(&state, outsize, key, keylen);
990
991 blake2s_update(&state, input1, input1len);
992 if (input2 != NULL)
993 blake2s_update(&state, input2, input2len);
994 blake2s_final(&state, out);
995 }
996
997 static void
998 wg_algo_mac_mac1(uint8_t out[], const size_t outsize,
999 const uint8_t input1[], const size_t input1len,
1000 const uint8_t input2[], const size_t input2len)
1001 {
1002 struct blake2s state;
1003 /* [W] 5.4: LABEL-MAC1 */
1004 const char *label = "mac1----";
1005 uint8_t key[WG_HASH_LEN];
1006
1007 blake2s_init(&state, sizeof(key), NULL, 0);
1008 blake2s_update(&state, label, strlen(label));
1009 blake2s_update(&state, input1, input1len);
1010 blake2s_final(&state, key);
1011
1012 blake2s_init(&state, outsize, key, sizeof(key));
1013 if (input2 != NULL)
1014 blake2s_update(&state, input2, input2len);
1015 blake2s_final(&state, out);
1016 }
1017
1018 static void
1019 wg_algo_mac_cookie(uint8_t out[], const size_t outsize,
1020 const uint8_t input1[], const size_t input1len)
1021 {
1022 struct blake2s state;
1023 /* [W] 5.4: LABEL-COOKIE */
1024 const char *label = "cookie--";
1025
1026 blake2s_init(&state, outsize, NULL, 0);
1027 blake2s_update(&state, label, strlen(label));
1028 blake2s_update(&state, input1, input1len);
1029 blake2s_final(&state, out);
1030 }
1031
1032 static void
1033 wg_algo_generate_keypair(uint8_t pubkey[WG_EPHEMERAL_KEY_LEN],
1034 uint8_t privkey[WG_EPHEMERAL_KEY_LEN])
1035 {
1036
1037 CTASSERT(WG_EPHEMERAL_KEY_LEN == crypto_scalarmult_curve25519_BYTES);
1038
1039 cprng_strong(kern_cprng, privkey, WG_EPHEMERAL_KEY_LEN, 0);
1040 crypto_scalarmult_base(pubkey, privkey);
1041 }
1042
1043 static void
1044 wg_algo_dh(uint8_t out[WG_DH_OUTPUT_LEN],
1045 const uint8_t privkey[WG_STATIC_KEY_LEN],
1046 const uint8_t pubkey[WG_STATIC_KEY_LEN])
1047 {
1048
1049 CTASSERT(WG_STATIC_KEY_LEN == crypto_scalarmult_curve25519_BYTES);
1050
1051 int ret __diagused = crypto_scalarmult(out, privkey, pubkey);
1052 KASSERT(ret == 0);
1053 }
1054
1055 static void
1056 wg_algo_hmac(uint8_t out[], const size_t outlen,
1057 const uint8_t key[], const size_t keylen,
1058 const uint8_t in[], const size_t inlen)
1059 {
1060 #define IPAD 0x36
1061 #define OPAD 0x5c
1062 uint8_t hmackey[HMAC_BLOCK_LEN] = {0};
1063 uint8_t ipad[HMAC_BLOCK_LEN];
1064 uint8_t opad[HMAC_BLOCK_LEN];
1065 size_t i;
1066 struct blake2s state;
1067
1068 KASSERT(outlen == WG_HASH_LEN);
1069 KASSERT(keylen <= HMAC_BLOCK_LEN);
1070
1071 memcpy(hmackey, key, keylen);
1072
1073 for (i = 0; i < sizeof(hmackey); i++) {
1074 ipad[i] = hmackey[i] ^ IPAD;
1075 opad[i] = hmackey[i] ^ OPAD;
1076 }
1077
1078 blake2s_init(&state, WG_HASH_LEN, NULL, 0);
1079 blake2s_update(&state, ipad, sizeof(ipad));
1080 blake2s_update(&state, in, inlen);
1081 blake2s_final(&state, out);
1082
1083 blake2s_init(&state, WG_HASH_LEN, NULL, 0);
1084 blake2s_update(&state, opad, sizeof(opad));
1085 blake2s_update(&state, out, WG_HASH_LEN);
1086 blake2s_final(&state, out);
1087 #undef IPAD
1088 #undef OPAD
1089 }
1090
1091 static void
1092 wg_algo_kdf(uint8_t out1[WG_KDF_OUTPUT_LEN], uint8_t out2[WG_KDF_OUTPUT_LEN],
1093 uint8_t out3[WG_KDF_OUTPUT_LEN], const uint8_t ckey[WG_CHAINING_KEY_LEN],
1094 const uint8_t input[], const size_t inputlen)
1095 {
1096 uint8_t tmp1[WG_KDF_OUTPUT_LEN], tmp2[WG_KDF_OUTPUT_LEN + 1];
1097 uint8_t one[1];
1098
1099 /*
1100 * [N] 4.3: "an input_key_material byte sequence with length
1101 * either zero bytes, 32 bytes, or DHLEN bytes."
1102 */
1103 KASSERT(inputlen == 0 || inputlen == 32 || inputlen == NOISE_DHLEN);
1104
1105 WG_DUMP_HASH("ckey", ckey);
1106 if (input != NULL)
1107 WG_DUMP_HASH("input", input);
1108 wg_algo_hmac(tmp1, sizeof(tmp1), ckey, WG_CHAINING_KEY_LEN,
1109 input, inputlen);
1110 WG_DUMP_HASH("tmp1", tmp1);
1111 one[0] = 1;
1112 wg_algo_hmac(out1, WG_KDF_OUTPUT_LEN, tmp1, sizeof(tmp1),
1113 one, sizeof(one));
1114 WG_DUMP_HASH("out1", out1);
1115 if (out2 == NULL)
1116 return;
1117 memcpy(tmp2, out1, WG_KDF_OUTPUT_LEN);
1118 tmp2[WG_KDF_OUTPUT_LEN] = 2;
1119 wg_algo_hmac(out2, WG_KDF_OUTPUT_LEN, tmp1, sizeof(tmp1),
1120 tmp2, sizeof(tmp2));
1121 WG_DUMP_HASH("out2", out2);
1122 if (out3 == NULL)
1123 return;
1124 memcpy(tmp2, out2, WG_KDF_OUTPUT_LEN);
1125 tmp2[WG_KDF_OUTPUT_LEN] = 3;
1126 wg_algo_hmac(out3, WG_KDF_OUTPUT_LEN, tmp1, sizeof(tmp1),
1127 tmp2, sizeof(tmp2));
1128 WG_DUMP_HASH("out3", out3);
1129 }
1130
1131 static void __noinline
1132 wg_algo_dh_kdf(uint8_t ckey[WG_CHAINING_KEY_LEN],
1133 uint8_t cipher_key[WG_CIPHER_KEY_LEN],
1134 const uint8_t local_key[WG_STATIC_KEY_LEN],
1135 const uint8_t remote_key[WG_STATIC_KEY_LEN])
1136 {
1137 uint8_t dhout[WG_DH_OUTPUT_LEN];
1138
1139 wg_algo_dh(dhout, local_key, remote_key);
1140 wg_algo_kdf(ckey, cipher_key, NULL, ckey, dhout, sizeof(dhout));
1141
1142 WG_DUMP_HASH("dhout", dhout);
1143 WG_DUMP_HASH("ckey", ckey);
1144 if (cipher_key != NULL)
1145 WG_DUMP_HASH("cipher_key", cipher_key);
1146 }
1147
1148 static void
1149 wg_algo_aead_enc(uint8_t out[], size_t expected_outsize, const uint8_t key[],
1150 const uint64_t counter, const uint8_t plain[], const size_t plainsize,
1151 const uint8_t auth[], size_t authlen)
1152 {
1153 uint8_t nonce[(32 + 64) / 8] = {0};
1154 long long unsigned int outsize;
1155 int error __diagused;
1156
1157 le64enc(&nonce[4], counter);
1158
1159 error = crypto_aead_chacha20poly1305_ietf_encrypt(out, &outsize, plain,
1160 plainsize, auth, authlen, NULL, nonce, key);
1161 KASSERT(error == 0);
1162 KASSERT(outsize == expected_outsize);
1163 }
1164
1165 static int
1166 wg_algo_aead_dec(uint8_t out[], size_t expected_outsize, const uint8_t key[],
1167 const uint64_t counter, const uint8_t encrypted[],
1168 const size_t encryptedsize, const uint8_t auth[], size_t authlen)
1169 {
1170 uint8_t nonce[(32 + 64) / 8] = {0};
1171 long long unsigned int outsize;
1172 int error;
1173
1174 le64enc(&nonce[4], counter);
1175
1176 error = crypto_aead_chacha20poly1305_ietf_decrypt(out, &outsize, NULL,
1177 encrypted, encryptedsize, auth, authlen, nonce, key);
1178 if (error == 0)
1179 KASSERT(outsize == expected_outsize);
1180 return error;
1181 }
1182
1183 static void
1184 wg_algo_xaead_enc(uint8_t out[], const size_t expected_outsize,
1185 const uint8_t key[], const uint8_t plain[], const size_t plainsize,
1186 const uint8_t auth[], size_t authlen,
1187 const uint8_t nonce[WG_SALT_LEN])
1188 {
1189 long long unsigned int outsize;
1190 int error __diagused;
1191
1192 CTASSERT(WG_SALT_LEN == crypto_aead_xchacha20poly1305_ietf_NPUBBYTES);
1193 error = crypto_aead_xchacha20poly1305_ietf_encrypt(out, &outsize,
1194 plain, plainsize, auth, authlen, NULL, nonce, key);
1195 KASSERT(error == 0);
1196 KASSERT(outsize == expected_outsize);
1197 }
1198
1199 static int
1200 wg_algo_xaead_dec(uint8_t out[], const size_t expected_outsize,
1201 const uint8_t key[], const uint8_t encrypted[], const size_t encryptedsize,
1202 const uint8_t auth[], size_t authlen,
1203 const uint8_t nonce[WG_SALT_LEN])
1204 {
1205 long long unsigned int outsize;
1206 int error;
1207
1208 error = crypto_aead_xchacha20poly1305_ietf_decrypt(out, &outsize, NULL,
1209 encrypted, encryptedsize, auth, authlen, nonce, key);
1210 if (error == 0)
1211 KASSERT(outsize == expected_outsize);
1212 return error;
1213 }
1214
1215 static void
1216 wg_algo_tai64n(wg_timestamp_t timestamp)
1217 {
1218 struct timespec ts;
1219
1220 /* FIXME strict TAI64N (https://cr.yp.to/libtai/tai64.html) */
1221 getnanotime(&ts);
1222 /* TAI64 label in external TAI64 format */
1223 be32enc(timestamp, 0x40000000U + (uint32_t)(ts.tv_sec >> 32));
1224 /* second beginning from 1970 TAI */
1225 be32enc(timestamp + 4, (uint32_t)(ts.tv_sec & 0xffffffffU));
1226 /* nanosecond in big-endian format */
1227 be32enc(timestamp + 8, (uint32_t)ts.tv_nsec);
1228 }
1229
1230 /*
1231 * wg_get_stable_session(wgp, psref)
1232 *
1233 * Get a passive reference to the current stable session, or
1234 * return NULL if there is no current stable session.
1235 *
1236 * The pointer is always there but the session is not necessarily
1237 * ESTABLISHED; if it is not ESTABLISHED, return NULL. However,
1238 * the session may transition from ESTABLISHED to DESTROYING while
1239 * holding the passive reference.
1240 */
1241 static struct wg_session *
1242 wg_get_stable_session(struct wg_peer *wgp, struct psref *psref)
1243 {
1244 int s;
1245 struct wg_session *wgs;
1246
1247 s = pserialize_read_enter();
1248 wgs = atomic_load_consume(&wgp->wgp_session_stable);
1249 if (__predict_false(wgs->wgs_state != WGS_STATE_ESTABLISHED))
1250 wgs = NULL;
1251 else
1252 psref_acquire(psref, &wgs->wgs_psref, wg_psref_class);
1253 pserialize_read_exit(s);
1254
1255 return wgs;
1256 }
1257
1258 static void
1259 wg_put_session(struct wg_session *wgs, struct psref *psref)
1260 {
1261
1262 psref_release(psref, &wgs->wgs_psref, wg_psref_class);
1263 }
1264
1265 static void
1266 wg_destroy_session(struct wg_softc *wg, struct wg_session *wgs)
1267 {
1268 struct wg_peer *wgp = wgs->wgs_peer;
1269 struct wg_session *wgs0 __diagused;
1270 void *garbage;
1271
1272 KASSERT(mutex_owned(wgp->wgp_lock));
1273 KASSERT(wgs->wgs_state != WGS_STATE_UNKNOWN);
1274
1275 /* Remove the session from the table. */
1276 wgs0 = thmap_del(wg->wg_sessions_byindex,
1277 &wgs->wgs_local_index, sizeof(wgs->wgs_local_index));
1278 KASSERT(wgs0 == wgs);
1279 garbage = thmap_stage_gc(wg->wg_sessions_byindex);
1280
1281 /* Wait for passive references to drain. */
1282 pserialize_perform(wgp->wgp_psz);
1283 psref_target_destroy(&wgs->wgs_psref, wg_psref_class);
1284
1285 /*
1286 * Free memory, zero state, and transition to UNKNOWN. We have
1287 * exclusive access to the session now, so there is no need for
1288 * an atomic store.
1289 */
1290 thmap_gc(wg->wg_sessions_byindex, garbage);
1291 WG_DLOG("session[L=%"PRIx32" R=%"PRIx32"] -> WGS_STATE_UNKNOWN\n",
1292 wgs->wgs_local_index, wgs->wgs_remote_index);
1293 wgs->wgs_local_index = 0;
1294 wgs->wgs_remote_index = 0;
1295 wg_clear_states(wgs);
1296 wgs->wgs_state = WGS_STATE_UNKNOWN;
1297 }
1298
1299 /*
1300 * wg_get_session_index(wg, wgs)
1301 *
1302 * Choose a session index for wgs->wgs_local_index, and store it
1303 * in wg's table of sessions by index.
1304 *
1305 * wgs must be the unstable session of its peer, and must be
1306 * transitioning out of the UNKNOWN state.
1307 */
1308 static void
1309 wg_get_session_index(struct wg_softc *wg, struct wg_session *wgs)
1310 {
1311 struct wg_peer *wgp __diagused = wgs->wgs_peer;
1312 struct wg_session *wgs0;
1313 uint32_t index;
1314
1315 KASSERT(mutex_owned(wgp->wgp_lock));
1316 KASSERT(wgs == wgp->wgp_session_unstable);
1317 KASSERTMSG(wgs->wgs_state == WGS_STATE_UNKNOWN, "state=%d",
1318 wgs->wgs_state);
1319
1320 do {
1321 /* Pick a uniform random index. */
1322 index = cprng_strong32();
1323
1324 /* Try to take it. */
1325 wgs->wgs_local_index = index;
1326 wgs0 = thmap_put(wg->wg_sessions_byindex,
1327 &wgs->wgs_local_index, sizeof wgs->wgs_local_index, wgs);
1328
1329 /* If someone else beat us, start over. */
1330 } while (__predict_false(wgs0 != wgs));
1331 }
1332
1333 /*
1334 * wg_put_session_index(wg, wgs)
1335 *
1336 * Remove wgs from the table of sessions by index, wait for any
1337 * passive references to drain, and transition the session to the
1338 * UNKNOWN state.
1339 *
1340 * wgs must be the unstable session of its peer, and must not be
1341 * UNKNOWN or ESTABLISHED.
1342 */
1343 static void
1344 wg_put_session_index(struct wg_softc *wg, struct wg_session *wgs)
1345 {
1346 struct wg_peer *wgp __diagused = wgs->wgs_peer;
1347
1348 KASSERT(mutex_owned(wgp->wgp_lock));
1349 KASSERT(wgs->wgs_state != WGS_STATE_UNKNOWN);
1350 KASSERT(wgs->wgs_state != WGS_STATE_ESTABLISHED);
1351
1352 wg_destroy_session(wg, wgs);
1353 psref_target_init(&wgs->wgs_psref, wg_psref_class);
1354 }
1355
1356 /*
1357 * Handshake patterns
1358 *
1359 * [W] 5: "These messages use the "IK" pattern from Noise"
1360 * [N] 7.5. Interactive handshake patterns (fundamental)
1361 * "The first character refers to the initiators static key:"
1362 * "I = Static key for initiator Immediately transmitted to responder,
1363 * despite reduced or absent identity hiding"
1364 * "The second character refers to the responders static key:"
1365 * "K = Static key for responder Known to initiator"
1366 * "IK:
1367 * <- s
1368 * ...
1369 * -> e, es, s, ss
1370 * <- e, ee, se"
1371 * [N] 9.4. Pattern modifiers
1372 * "IKpsk2:
1373 * <- s
1374 * ...
1375 * -> e, es, s, ss
1376 * <- e, ee, se, psk"
1377 */
1378 static void
1379 wg_fill_msg_init(struct wg_softc *wg, struct wg_peer *wgp,
1380 struct wg_session *wgs, struct wg_msg_init *wgmi)
1381 {
1382 uint8_t ckey[WG_CHAINING_KEY_LEN]; /* [W] 5.4.2: Ci */
1383 uint8_t hash[WG_HASH_LEN]; /* [W] 5.4.2: Hi */
1384 uint8_t cipher_key[WG_CIPHER_KEY_LEN];
1385 uint8_t pubkey[WG_EPHEMERAL_KEY_LEN];
1386 uint8_t privkey[WG_EPHEMERAL_KEY_LEN];
1387
1388 KASSERT(mutex_owned(wgp->wgp_lock));
1389 KASSERT(wgs == wgp->wgp_session_unstable);
1390 KASSERTMSG(wgs->wgs_state == WGS_STATE_INIT_ACTIVE, "state=%d",
1391 wgs->wgs_state);
1392
1393 wgmi->wgmi_type = htole32(WG_MSG_TYPE_INIT);
1394 wgmi->wgmi_sender = wgs->wgs_local_index;
1395
1396 /* [W] 5.4.2: First Message: Initiator to Responder */
1397
1398 /* Ci := HASH(CONSTRUCTION) */
1399 /* Hi := HASH(Ci || IDENTIFIER) */
1400 wg_init_key_and_hash(ckey, hash);
1401 /* Hi := HASH(Hi || Sr^pub) */
1402 wg_algo_hash(hash, wgp->wgp_pubkey, sizeof(wgp->wgp_pubkey));
1403
1404 WG_DUMP_HASH("hash", hash);
1405
1406 /* [N] 2.2: "e" */
1407 /* Ei^priv, Ei^pub := DH-GENERATE() */
1408 wg_algo_generate_keypair(pubkey, privkey);
1409 /* Ci := KDF1(Ci, Ei^pub) */
1410 wg_algo_kdf(ckey, NULL, NULL, ckey, pubkey, sizeof(pubkey));
1411 /* msg.ephemeral := Ei^pub */
1412 memcpy(wgmi->wgmi_ephemeral, pubkey, sizeof(wgmi->wgmi_ephemeral));
1413 /* Hi := HASH(Hi || msg.ephemeral) */
1414 wg_algo_hash(hash, pubkey, sizeof(pubkey));
1415
1416 WG_DUMP_HASH("ckey", ckey);
1417 WG_DUMP_HASH("hash", hash);
1418
1419 /* [N] 2.2: "es" */
1420 /* Ci, k := KDF2(Ci, DH(Ei^priv, Sr^pub)) */
1421 wg_algo_dh_kdf(ckey, cipher_key, privkey, wgp->wgp_pubkey);
1422
1423 /* [N] 2.2: "s" */
1424 /* msg.static := AEAD(k, 0, Si^pub, Hi) */
1425 wg_algo_aead_enc(wgmi->wgmi_static, sizeof(wgmi->wgmi_static),
1426 cipher_key, 0, wg->wg_pubkey, sizeof(wg->wg_pubkey),
1427 hash, sizeof(hash));
1428 /* Hi := HASH(Hi || msg.static) */
1429 wg_algo_hash(hash, wgmi->wgmi_static, sizeof(wgmi->wgmi_static));
1430
1431 WG_DUMP_HASH48("wgmi_static", wgmi->wgmi_static);
1432
1433 /* [N] 2.2: "ss" */
1434 /* Ci, k := KDF2(Ci, DH(Si^priv, Sr^pub)) */
1435 wg_algo_dh_kdf(ckey, cipher_key, wg->wg_privkey, wgp->wgp_pubkey);
1436
1437 /* msg.timestamp := AEAD(k, TIMESTAMP(), Hi) */
1438 wg_timestamp_t timestamp;
1439 wg_algo_tai64n(timestamp);
1440 wg_algo_aead_enc(wgmi->wgmi_timestamp, sizeof(wgmi->wgmi_timestamp),
1441 cipher_key, 0, timestamp, sizeof(timestamp), hash, sizeof(hash));
1442 /* Hi := HASH(Hi || msg.timestamp) */
1443 wg_algo_hash(hash, wgmi->wgmi_timestamp, sizeof(wgmi->wgmi_timestamp));
1444
1445 /* [W] 5.4.4 Cookie MACs */
1446 wg_algo_mac_mac1(wgmi->wgmi_mac1, sizeof(wgmi->wgmi_mac1),
1447 wgp->wgp_pubkey, sizeof(wgp->wgp_pubkey),
1448 (const uint8_t *)wgmi, offsetof(struct wg_msg_init, wgmi_mac1));
1449 /* Need mac1 to decrypt a cookie from a cookie message */
1450 memcpy(wgp->wgp_last_sent_mac1, wgmi->wgmi_mac1,
1451 sizeof(wgp->wgp_last_sent_mac1));
1452 wgp->wgp_last_sent_mac1_valid = true;
1453
1454 if (wgp->wgp_latest_cookie_time == 0 ||
1455 (time_uptime - wgp->wgp_latest_cookie_time) >= WG_COOKIE_TIME)
1456 memset(wgmi->wgmi_mac2, 0, sizeof(wgmi->wgmi_mac2));
1457 else {
1458 wg_algo_mac(wgmi->wgmi_mac2, sizeof(wgmi->wgmi_mac2),
1459 wgp->wgp_latest_cookie, WG_COOKIE_LEN,
1460 (const uint8_t *)wgmi,
1461 offsetof(struct wg_msg_init, wgmi_mac2),
1462 NULL, 0);
1463 }
1464
1465 memcpy(wgs->wgs_ephemeral_key_pub, pubkey, sizeof(pubkey));
1466 memcpy(wgs->wgs_ephemeral_key_priv, privkey, sizeof(privkey));
1467 memcpy(wgs->wgs_handshake_hash, hash, sizeof(hash));
1468 memcpy(wgs->wgs_chaining_key, ckey, sizeof(ckey));
1469 WG_DLOG("%s: sender=%x\n", __func__, wgs->wgs_local_index);
1470 }
1471
1472 static void __noinline
1473 wg_handle_msg_init(struct wg_softc *wg, const struct wg_msg_init *wgmi,
1474 const struct sockaddr *src)
1475 {
1476 uint8_t ckey[WG_CHAINING_KEY_LEN]; /* [W] 5.4.2: Ci */
1477 uint8_t hash[WG_HASH_LEN]; /* [W] 5.4.2: Hi */
1478 uint8_t cipher_key[WG_CIPHER_KEY_LEN];
1479 uint8_t peer_pubkey[WG_STATIC_KEY_LEN];
1480 struct wg_peer *wgp;
1481 struct wg_session *wgs;
1482 int error, ret;
1483 struct psref psref_peer;
1484 uint8_t mac1[WG_MAC_LEN];
1485
1486 WG_TRACE("init msg received");
1487
1488 wg_algo_mac_mac1(mac1, sizeof(mac1),
1489 wg->wg_pubkey, sizeof(wg->wg_pubkey),
1490 (const uint8_t *)wgmi, offsetof(struct wg_msg_init, wgmi_mac1));
1491
1492 /*
1493 * [W] 5.3: Denial of Service Mitigation & Cookies
1494 * "the responder, ..., must always reject messages with an invalid
1495 * msg.mac1"
1496 */
1497 if (!consttime_memequal(mac1, wgmi->wgmi_mac1, sizeof(mac1))) {
1498 WG_DLOG("mac1 is invalid\n");
1499 return;
1500 }
1501
1502 /*
1503 * [W] 5.4.2: First Message: Initiator to Responder
1504 * "When the responder receives this message, it does the same
1505 * operations so that its final state variables are identical,
1506 * replacing the operands of the DH function to produce equivalent
1507 * values."
1508 * Note that the following comments of operations are just copies of
1509 * the initiator's ones.
1510 */
1511
1512 /* Ci := HASH(CONSTRUCTION) */
1513 /* Hi := HASH(Ci || IDENTIFIER) */
1514 wg_init_key_and_hash(ckey, hash);
1515 /* Hi := HASH(Hi || Sr^pub) */
1516 wg_algo_hash(hash, wg->wg_pubkey, sizeof(wg->wg_pubkey));
1517
1518 /* [N] 2.2: "e" */
1519 /* Ci := KDF1(Ci, Ei^pub) */
1520 wg_algo_kdf(ckey, NULL, NULL, ckey, wgmi->wgmi_ephemeral,
1521 sizeof(wgmi->wgmi_ephemeral));
1522 /* Hi := HASH(Hi || msg.ephemeral) */
1523 wg_algo_hash(hash, wgmi->wgmi_ephemeral, sizeof(wgmi->wgmi_ephemeral));
1524
1525 WG_DUMP_HASH("ckey", ckey);
1526
1527 /* [N] 2.2: "es" */
1528 /* Ci, k := KDF2(Ci, DH(Ei^priv, Sr^pub)) */
1529 wg_algo_dh_kdf(ckey, cipher_key, wg->wg_privkey, wgmi->wgmi_ephemeral);
1530
1531 WG_DUMP_HASH48("wgmi_static", wgmi->wgmi_static);
1532
1533 /* [N] 2.2: "s" */
1534 /* msg.static := AEAD(k, 0, Si^pub, Hi) */
1535 error = wg_algo_aead_dec(peer_pubkey, WG_STATIC_KEY_LEN, cipher_key, 0,
1536 wgmi->wgmi_static, sizeof(wgmi->wgmi_static), hash, sizeof(hash));
1537 if (error != 0) {
1538 WG_LOG_RATECHECK(&wg->wg_ppsratecheck, LOG_DEBUG,
1539 "%s: wg_algo_aead_dec for secret key failed\n",
1540 if_name(&wg->wg_if));
1541 return;
1542 }
1543 /* Hi := HASH(Hi || msg.static) */
1544 wg_algo_hash(hash, wgmi->wgmi_static, sizeof(wgmi->wgmi_static));
1545
1546 wgp = wg_lookup_peer_by_pubkey(wg, peer_pubkey, &psref_peer);
1547 if (wgp == NULL) {
1548 WG_DLOG("peer not found\n");
1549 return;
1550 }
1551
1552 /*
1553 * Lock the peer to serialize access to cookie state.
1554 *
1555 * XXX Can we safely avoid holding the lock across DH? Take it
1556 * just to verify mac2 and then unlock/DH/lock?
1557 */
1558 mutex_enter(wgp->wgp_lock);
1559
1560 if (__predict_false(wg_is_underload(wg, wgp, WG_MSG_TYPE_INIT))) {
1561 WG_TRACE("under load");
1562 /*
1563 * [W] 5.3: Denial of Service Mitigation & Cookies
1564 * "the responder, ..., and when under load may reject messages
1565 * with an invalid msg.mac2. If the responder receives a
1566 * message with a valid msg.mac1 yet with an invalid msg.mac2,
1567 * and is under load, it may respond with a cookie reply
1568 * message"
1569 */
1570 uint8_t zero[WG_MAC_LEN] = {0};
1571 if (consttime_memequal(wgmi->wgmi_mac2, zero, sizeof(zero))) {
1572 WG_TRACE("sending a cookie message: no cookie included");
1573 (void)wg_send_cookie_msg(wg, wgp, wgmi->wgmi_sender,
1574 wgmi->wgmi_mac1, src);
1575 goto out;
1576 }
1577 if (!wgp->wgp_last_sent_cookie_valid) {
1578 WG_TRACE("sending a cookie message: no cookie sent ever");
1579 (void)wg_send_cookie_msg(wg, wgp, wgmi->wgmi_sender,
1580 wgmi->wgmi_mac1, src);
1581 goto out;
1582 }
1583 uint8_t mac2[WG_MAC_LEN];
1584 wg_algo_mac(mac2, sizeof(mac2), wgp->wgp_last_sent_cookie,
1585 WG_COOKIE_LEN, (const uint8_t *)wgmi,
1586 offsetof(struct wg_msg_init, wgmi_mac2), NULL, 0);
1587 if (!consttime_memequal(mac2, wgmi->wgmi_mac2, sizeof(mac2))) {
1588 WG_DLOG("mac2 is invalid\n");
1589 goto out;
1590 }
1591 WG_TRACE("under load, but continue to sending");
1592 }
1593
1594 /* [N] 2.2: "ss" */
1595 /* Ci, k := KDF2(Ci, DH(Si^priv, Sr^pub)) */
1596 wg_algo_dh_kdf(ckey, cipher_key, wg->wg_privkey, wgp->wgp_pubkey);
1597
1598 /* msg.timestamp := AEAD(k, TIMESTAMP(), Hi) */
1599 wg_timestamp_t timestamp;
1600 error = wg_algo_aead_dec(timestamp, sizeof(timestamp), cipher_key, 0,
1601 wgmi->wgmi_timestamp, sizeof(wgmi->wgmi_timestamp),
1602 hash, sizeof(hash));
1603 if (error != 0) {
1604 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
1605 "%s: peer %s: wg_algo_aead_dec for timestamp failed\n",
1606 if_name(&wg->wg_if), wgp->wgp_name);
1607 goto out;
1608 }
1609 /* Hi := HASH(Hi || msg.timestamp) */
1610 wg_algo_hash(hash, wgmi->wgmi_timestamp, sizeof(wgmi->wgmi_timestamp));
1611
1612 /*
1613 * [W] 5.1 "The responder keeps track of the greatest timestamp
1614 * received per peer and discards packets containing
1615 * timestamps less than or equal to it."
1616 */
1617 ret = memcmp(timestamp, wgp->wgp_timestamp_latest_init,
1618 sizeof(timestamp));
1619 if (ret <= 0) {
1620 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
1621 "%s: peer %s: invalid init msg: timestamp is old\n",
1622 if_name(&wg->wg_if), wgp->wgp_name);
1623 goto out;
1624 }
1625 memcpy(wgp->wgp_timestamp_latest_init, timestamp, sizeof(timestamp));
1626
1627 /*
1628 * Message is good -- we're committing to handle it now, unless
1629 * we were already initiating a session.
1630 */
1631 wgs = wgp->wgp_session_unstable;
1632 switch (wgs->wgs_state) {
1633 case WGS_STATE_UNKNOWN: /* new session initiated by peer */
1634 break;
1635 case WGS_STATE_INIT_ACTIVE: /* we're already initiating, drop */
1636 /* XXX Who wins if both sides send INIT? */
1637 WG_TRACE("Session already initializing, ignoring the message");
1638 goto out;
1639 case WGS_STATE_INIT_PASSIVE: /* peer is retrying, start over */
1640 WG_TRACE("Session already initializing, destroying old states");
1641 /*
1642 * XXX Avoid this -- just resend our response -- if the
1643 * INIT message is identical to the previous one.
1644 */
1645 wg_put_session_index(wg, wgs);
1646 KASSERTMSG(wgs->wgs_state == WGS_STATE_UNKNOWN, "state=%d",
1647 wgs->wgs_state);
1648 break;
1649 case WGS_STATE_ESTABLISHED: /* can't happen */
1650 panic("unstable session can't be established");
1651 case WGS_STATE_DESTROYING: /* rekey initiated by peer */
1652 WG_TRACE("Session destroying, but force to clear");
1653 wg_put_session_index(wg, wgs);
1654 KASSERTMSG(wgs->wgs_state == WGS_STATE_UNKNOWN, "state=%d",
1655 wgs->wgs_state);
1656 break;
1657 default:
1658 panic("invalid session state: %d", wgs->wgs_state);
1659 }
1660
1661 /*
1662 * Assign a fresh session index.
1663 */
1664 KASSERTMSG(wgs->wgs_state == WGS_STATE_UNKNOWN, "state=%d",
1665 wgs->wgs_state);
1666 wg_get_session_index(wg, wgs);
1667
1668 memcpy(wgs->wgs_handshake_hash, hash, sizeof(hash));
1669 memcpy(wgs->wgs_chaining_key, ckey, sizeof(ckey));
1670 memcpy(wgs->wgs_ephemeral_key_peer, wgmi->wgmi_ephemeral,
1671 sizeof(wgmi->wgmi_ephemeral));
1672
1673 wg_update_endpoint_if_necessary(wgp, src);
1674
1675 /*
1676 * Count the time of the INIT message as the time of
1677 * establishment -- this is used to decide when to erase keys,
1678 * and we want to start counting as soon as we have generated
1679 * keys.
1680 */
1681 wgs->wgs_time_established = time_uptime;
1682 wg_schedule_session_dtor_timer(wgp);
1683
1684 /*
1685 * Respond to the initiator with our ephemeral public key.
1686 */
1687 (void)wg_send_handshake_msg_resp(wg, wgp, wgs, wgmi);
1688
1689 WG_DLOG("session[L=%"PRIx32" R=%"PRIx32"]:"
1690 " calculate keys as responder\n",
1691 wgs->wgs_local_index, wgs->wgs_remote_index);
1692 wg_calculate_keys(wgs, false);
1693 wg_clear_states(wgs);
1694
1695 /*
1696 * Session is ready to receive data now that we have received
1697 * the peer initiator's ephemeral key pair, generated our
1698 * responder's ephemeral key pair, and derived a session key.
1699 *
1700 * Transition from UNKNOWN to INIT_PASSIVE to publish it to the
1701 * data rx path, wg_handle_msg_data, where the
1702 * atomic_load_acquire matching this atomic_store_release
1703 * happens.
1704 *
1705 * (Session is not, however, ready to send data until the peer
1706 * has acknowledged our response by sending its first data
1707 * packet. So don't swap the sessions yet.)
1708 */
1709 WG_DLOG("session[L=%"PRIx32" R=%"PRIx32"] -> WGS_STATE_INIT_PASSIVE\n",
1710 wgs->wgs_local_index, wgs->wgs_remote_index);
1711 atomic_store_release(&wgs->wgs_state, WGS_STATE_INIT_PASSIVE);
1712 WG_TRACE("WGS_STATE_INIT_PASSIVE");
1713
1714 out:
1715 mutex_exit(wgp->wgp_lock);
1716 wg_put_peer(wgp, &psref_peer);
1717 }
1718
1719 static struct socket *
1720 wg_get_so_by_af(struct wg_softc *wg, const int af)
1721 {
1722
1723 switch (af) {
1724 #ifdef INET
1725 case AF_INET:
1726 return wg->wg_so4;
1727 #endif
1728 #ifdef INET6
1729 case AF_INET6:
1730 return wg->wg_so6;
1731 #endif
1732 default:
1733 panic("wg: no such af: %d", af);
1734 }
1735 }
1736
1737 static struct socket *
1738 wg_get_so_by_peer(struct wg_peer *wgp, struct wg_sockaddr *wgsa)
1739 {
1740
1741 return wg_get_so_by_af(wgp->wgp_sc, wgsa_family(wgsa));
1742 }
1743
1744 static struct wg_sockaddr *
1745 wg_get_endpoint_sa(struct wg_peer *wgp, struct psref *psref)
1746 {
1747 struct wg_sockaddr *wgsa;
1748 int s;
1749
1750 s = pserialize_read_enter();
1751 wgsa = atomic_load_consume(&wgp->wgp_endpoint);
1752 psref_acquire(psref, &wgsa->wgsa_psref, wg_psref_class);
1753 pserialize_read_exit(s);
1754
1755 return wgsa;
1756 }
1757
1758 static void
1759 wg_put_sa(struct wg_peer *wgp, struct wg_sockaddr *wgsa, struct psref *psref)
1760 {
1761
1762 psref_release(psref, &wgsa->wgsa_psref, wg_psref_class);
1763 }
1764
1765 static int
1766 wg_send_so(struct wg_peer *wgp, struct mbuf *m)
1767 {
1768 int error;
1769 struct socket *so;
1770 struct psref psref;
1771 struct wg_sockaddr *wgsa;
1772
1773 wgsa = wg_get_endpoint_sa(wgp, &psref);
1774 so = wg_get_so_by_peer(wgp, wgsa);
1775 error = sosend(so, wgsatosa(wgsa), NULL, m, NULL, 0, curlwp);
1776 wg_put_sa(wgp, wgsa, &psref);
1777
1778 return error;
1779 }
1780
1781 static int
1782 wg_send_handshake_msg_init(struct wg_softc *wg, struct wg_peer *wgp)
1783 {
1784 int error;
1785 struct mbuf *m;
1786 struct wg_msg_init *wgmi;
1787 struct wg_session *wgs;
1788
1789 KASSERT(mutex_owned(wgp->wgp_lock));
1790
1791 wgs = wgp->wgp_session_unstable;
1792 /* XXX pull dispatch out into wg_task_send_init_message */
1793 switch (wgs->wgs_state) {
1794 case WGS_STATE_UNKNOWN: /* new session initiated by us */
1795 break;
1796 case WGS_STATE_INIT_ACTIVE: /* we're already initiating, stop */
1797 WG_TRACE("Session already initializing, skip starting new one");
1798 return EBUSY;
1799 case WGS_STATE_INIT_PASSIVE: /* peer was trying -- XXX what now? */
1800 WG_TRACE("Session already initializing, waiting for peer");
1801 return EBUSY;
1802 case WGS_STATE_ESTABLISHED: /* can't happen */
1803 panic("unstable session can't be established");
1804 case WGS_STATE_DESTROYING: /* rekey initiated by us too early */
1805 WG_TRACE("Session destroying");
1806 wg_put_session_index(wg, wgs);
1807 KASSERTMSG(wgs->wgs_state == WGS_STATE_UNKNOWN, "state=%d",
1808 wgs->wgs_state);
1809 break;
1810 }
1811
1812 /*
1813 * Assign a fresh session index.
1814 */
1815 KASSERTMSG(wgs->wgs_state == WGS_STATE_UNKNOWN, "state=%d",
1816 wgs->wgs_state);
1817 wg_get_session_index(wg, wgs);
1818
1819 /*
1820 * We have initiated a session. Transition to INIT_ACTIVE.
1821 * This doesn't publish it for use in the data rx path,
1822 * wg_handle_msg_data, or in the data tx path, wg_output -- we
1823 * have to wait for the peer to respond with their ephemeral
1824 * public key before we can derive a session key for tx/rx.
1825 * Hence only atomic_store_relaxed.
1826 */
1827 WG_DLOG("session[L=%"PRIx32" R=(unknown)] -> WGS_STATE_INIT_ACTIVE\n",
1828 wgs->wgs_local_index);
1829 atomic_store_relaxed(&wgs->wgs_state, WGS_STATE_INIT_ACTIVE);
1830
1831 m = m_gethdr(M_WAIT, MT_DATA);
1832 if (sizeof(*wgmi) > MHLEN) {
1833 m_clget(m, M_WAIT);
1834 CTASSERT(sizeof(*wgmi) <= MCLBYTES);
1835 }
1836 m->m_pkthdr.len = m->m_len = sizeof(*wgmi);
1837 wgmi = mtod(m, struct wg_msg_init *);
1838 wg_fill_msg_init(wg, wgp, wgs, wgmi);
1839
1840 error = wg->wg_ops->send_hs_msg(wgp, m);
1841 if (error == 0) {
1842 WG_TRACE("init msg sent");
1843
1844 if (wgp->wgp_handshake_start_time == 0)
1845 wgp->wgp_handshake_start_time = time_uptime;
1846 callout_schedule(&wgp->wgp_handshake_timeout_timer,
1847 MIN(wg_rekey_timeout, (unsigned)(INT_MAX / hz)) * hz);
1848 } else {
1849 wg_put_session_index(wg, wgs);
1850 /* Initiation failed; toss packet waiting for it if any. */
1851 m = atomic_swap_ptr(&wgp->wgp_pending, NULL);
1852 m_freem(m);
1853 }
1854
1855 return error;
1856 }
1857
1858 static void
1859 wg_fill_msg_resp(struct wg_softc *wg, struct wg_peer *wgp,
1860 struct wg_session *wgs, struct wg_msg_resp *wgmr,
1861 const struct wg_msg_init *wgmi)
1862 {
1863 uint8_t ckey[WG_CHAINING_KEY_LEN]; /* [W] 5.4.3: Cr */
1864 uint8_t hash[WG_HASH_LEN]; /* [W] 5.4.3: Hr */
1865 uint8_t cipher_key[WG_KDF_OUTPUT_LEN];
1866 uint8_t pubkey[WG_EPHEMERAL_KEY_LEN];
1867 uint8_t privkey[WG_EPHEMERAL_KEY_LEN];
1868
1869 KASSERT(mutex_owned(wgp->wgp_lock));
1870 KASSERT(wgs == wgp->wgp_session_unstable);
1871 KASSERTMSG(wgs->wgs_state == WGS_STATE_UNKNOWN, "state=%d",
1872 wgs->wgs_state);
1873
1874 memcpy(hash, wgs->wgs_handshake_hash, sizeof(hash));
1875 memcpy(ckey, wgs->wgs_chaining_key, sizeof(ckey));
1876
1877 wgmr->wgmr_type = htole32(WG_MSG_TYPE_RESP);
1878 wgmr->wgmr_sender = wgs->wgs_local_index;
1879 wgmr->wgmr_receiver = wgmi->wgmi_sender;
1880
1881 /* [W] 5.4.3 Second Message: Responder to Initiator */
1882
1883 /* [N] 2.2: "e" */
1884 /* Er^priv, Er^pub := DH-GENERATE() */
1885 wg_algo_generate_keypair(pubkey, privkey);
1886 /* Cr := KDF1(Cr, Er^pub) */
1887 wg_algo_kdf(ckey, NULL, NULL, ckey, pubkey, sizeof(pubkey));
1888 /* msg.ephemeral := Er^pub */
1889 memcpy(wgmr->wgmr_ephemeral, pubkey, sizeof(wgmr->wgmr_ephemeral));
1890 /* Hr := HASH(Hr || msg.ephemeral) */
1891 wg_algo_hash(hash, pubkey, sizeof(pubkey));
1892
1893 WG_DUMP_HASH("ckey", ckey);
1894 WG_DUMP_HASH("hash", hash);
1895
1896 /* [N] 2.2: "ee" */
1897 /* Cr := KDF1(Cr, DH(Er^priv, Ei^pub)) */
1898 wg_algo_dh_kdf(ckey, NULL, privkey, wgs->wgs_ephemeral_key_peer);
1899
1900 /* [N] 2.2: "se" */
1901 /* Cr := KDF1(Cr, DH(Er^priv, Si^pub)) */
1902 wg_algo_dh_kdf(ckey, NULL, privkey, wgp->wgp_pubkey);
1903
1904 /* [N] 9.2: "psk" */
1905 {
1906 uint8_t kdfout[WG_KDF_OUTPUT_LEN];
1907 /* Cr, r, k := KDF3(Cr, Q) */
1908 wg_algo_kdf(ckey, kdfout, cipher_key, ckey, wgp->wgp_psk,
1909 sizeof(wgp->wgp_psk));
1910 /* Hr := HASH(Hr || r) */
1911 wg_algo_hash(hash, kdfout, sizeof(kdfout));
1912 }
1913
1914 /* msg.empty := AEAD(k, 0, e, Hr) */
1915 wg_algo_aead_enc(wgmr->wgmr_empty, sizeof(wgmr->wgmr_empty),
1916 cipher_key, 0, NULL, 0, hash, sizeof(hash));
1917 /* Hr := HASH(Hr || msg.empty) */
1918 wg_algo_hash(hash, wgmr->wgmr_empty, sizeof(wgmr->wgmr_empty));
1919
1920 WG_DUMP_HASH("wgmr_empty", wgmr->wgmr_empty);
1921
1922 /* [W] 5.4.4: Cookie MACs */
1923 /* msg.mac1 := MAC(HASH(LABEL-MAC1 || Sm'^pub), msg_a) */
1924 wg_algo_mac_mac1(wgmr->wgmr_mac1, sizeof(wgmi->wgmi_mac1),
1925 wgp->wgp_pubkey, sizeof(wgp->wgp_pubkey),
1926 (const uint8_t *)wgmr, offsetof(struct wg_msg_resp, wgmr_mac1));
1927 /* Need mac1 to decrypt a cookie from a cookie message */
1928 memcpy(wgp->wgp_last_sent_mac1, wgmr->wgmr_mac1,
1929 sizeof(wgp->wgp_last_sent_mac1));
1930 wgp->wgp_last_sent_mac1_valid = true;
1931
1932 if (wgp->wgp_latest_cookie_time == 0 ||
1933 (time_uptime - wgp->wgp_latest_cookie_time) >= WG_COOKIE_TIME)
1934 /* msg.mac2 := 0^16 */
1935 memset(wgmr->wgmr_mac2, 0, sizeof(wgmr->wgmr_mac2));
1936 else {
1937 /* msg.mac2 := MAC(Lm, msg_b) */
1938 wg_algo_mac(wgmr->wgmr_mac2, sizeof(wgmi->wgmi_mac2),
1939 wgp->wgp_latest_cookie, WG_COOKIE_LEN,
1940 (const uint8_t *)wgmr,
1941 offsetof(struct wg_msg_resp, wgmr_mac2),
1942 NULL, 0);
1943 }
1944
1945 memcpy(wgs->wgs_handshake_hash, hash, sizeof(hash));
1946 memcpy(wgs->wgs_chaining_key, ckey, sizeof(ckey));
1947 memcpy(wgs->wgs_ephemeral_key_pub, pubkey, sizeof(pubkey));
1948 memcpy(wgs->wgs_ephemeral_key_priv, privkey, sizeof(privkey));
1949 wgs->wgs_remote_index = wgmi->wgmi_sender;
1950 WG_DLOG("sender=%x\n", wgs->wgs_local_index);
1951 WG_DLOG("receiver=%x\n", wgs->wgs_remote_index);
1952 }
1953
1954 static void
1955 wg_swap_sessions(struct wg_peer *wgp)
1956 {
1957 struct wg_session *wgs, *wgs_prev;
1958
1959 KASSERT(mutex_owned(wgp->wgp_lock));
1960
1961 wgs = wgp->wgp_session_unstable;
1962 KASSERTMSG(wgs->wgs_state == WGS_STATE_ESTABLISHED, "state=%d",
1963 wgs->wgs_state);
1964
1965 wgs_prev = wgp->wgp_session_stable;
1966 KASSERTMSG((wgs_prev->wgs_state == WGS_STATE_ESTABLISHED ||
1967 wgs_prev->wgs_state == WGS_STATE_UNKNOWN),
1968 "state=%d", wgs_prev->wgs_state);
1969 atomic_store_release(&wgp->wgp_session_stable, wgs);
1970 wgp->wgp_session_unstable = wgs_prev;
1971 }
1972
1973 static void __noinline
1974 wg_handle_msg_resp(struct wg_softc *wg, const struct wg_msg_resp *wgmr,
1975 const struct sockaddr *src)
1976 {
1977 uint8_t ckey[WG_CHAINING_KEY_LEN]; /* [W] 5.4.3: Cr */
1978 uint8_t hash[WG_HASH_LEN]; /* [W] 5.4.3: Kr */
1979 uint8_t cipher_key[WG_KDF_OUTPUT_LEN];
1980 struct wg_peer *wgp;
1981 struct wg_session *wgs;
1982 struct psref psref;
1983 int error;
1984 uint8_t mac1[WG_MAC_LEN];
1985 struct wg_session *wgs_prev;
1986 struct mbuf *m;
1987
1988 wg_algo_mac_mac1(mac1, sizeof(mac1),
1989 wg->wg_pubkey, sizeof(wg->wg_pubkey),
1990 (const uint8_t *)wgmr, offsetof(struct wg_msg_resp, wgmr_mac1));
1991
1992 /*
1993 * [W] 5.3: Denial of Service Mitigation & Cookies
1994 * "the responder, ..., must always reject messages with an invalid
1995 * msg.mac1"
1996 */
1997 if (!consttime_memequal(mac1, wgmr->wgmr_mac1, sizeof(mac1))) {
1998 WG_DLOG("mac1 is invalid\n");
1999 return;
2000 }
2001
2002 WG_TRACE("resp msg received");
2003 wgs = wg_lookup_session_by_index(wg, wgmr->wgmr_receiver, &psref);
2004 if (wgs == NULL) {
2005 WG_TRACE("No session found");
2006 return;
2007 }
2008
2009 wgp = wgs->wgs_peer;
2010
2011 mutex_enter(wgp->wgp_lock);
2012
2013 /* If we weren't waiting for a handshake response, drop it. */
2014 if (wgs->wgs_state != WGS_STATE_INIT_ACTIVE) {
2015 WG_TRACE("peer sent spurious handshake response, ignoring");
2016 goto out;
2017 }
2018
2019 if (__predict_false(wg_is_underload(wg, wgp, WG_MSG_TYPE_RESP))) {
2020 WG_TRACE("under load");
2021 /*
2022 * [W] 5.3: Denial of Service Mitigation & Cookies
2023 * "the responder, ..., and when under load may reject messages
2024 * with an invalid msg.mac2. If the responder receives a
2025 * message with a valid msg.mac1 yet with an invalid msg.mac2,
2026 * and is under load, it may respond with a cookie reply
2027 * message"
2028 */
2029 uint8_t zero[WG_MAC_LEN] = {0};
2030 if (consttime_memequal(wgmr->wgmr_mac2, zero, sizeof(zero))) {
2031 WG_TRACE("sending a cookie message: no cookie included");
2032 (void)wg_send_cookie_msg(wg, wgp, wgmr->wgmr_sender,
2033 wgmr->wgmr_mac1, src);
2034 goto out;
2035 }
2036 if (!wgp->wgp_last_sent_cookie_valid) {
2037 WG_TRACE("sending a cookie message: no cookie sent ever");
2038 (void)wg_send_cookie_msg(wg, wgp, wgmr->wgmr_sender,
2039 wgmr->wgmr_mac1, src);
2040 goto out;
2041 }
2042 uint8_t mac2[WG_MAC_LEN];
2043 wg_algo_mac(mac2, sizeof(mac2), wgp->wgp_last_sent_cookie,
2044 WG_COOKIE_LEN, (const uint8_t *)wgmr,
2045 offsetof(struct wg_msg_resp, wgmr_mac2), NULL, 0);
2046 if (!consttime_memequal(mac2, wgmr->wgmr_mac2, sizeof(mac2))) {
2047 WG_DLOG("mac2 is invalid\n");
2048 goto out;
2049 }
2050 WG_TRACE("under load, but continue to sending");
2051 }
2052
2053 memcpy(hash, wgs->wgs_handshake_hash, sizeof(hash));
2054 memcpy(ckey, wgs->wgs_chaining_key, sizeof(ckey));
2055
2056 /*
2057 * [W] 5.4.3 Second Message: Responder to Initiator
2058 * "When the initiator receives this message, it does the same
2059 * operations so that its final state variables are identical,
2060 * replacing the operands of the DH function to produce equivalent
2061 * values."
2062 * Note that the following comments of operations are just copies of
2063 * the initiator's ones.
2064 */
2065
2066 /* [N] 2.2: "e" */
2067 /* Cr := KDF1(Cr, Er^pub) */
2068 wg_algo_kdf(ckey, NULL, NULL, ckey, wgmr->wgmr_ephemeral,
2069 sizeof(wgmr->wgmr_ephemeral));
2070 /* Hr := HASH(Hr || msg.ephemeral) */
2071 wg_algo_hash(hash, wgmr->wgmr_ephemeral, sizeof(wgmr->wgmr_ephemeral));
2072
2073 WG_DUMP_HASH("ckey", ckey);
2074 WG_DUMP_HASH("hash", hash);
2075
2076 /* [N] 2.2: "ee" */
2077 /* Cr := KDF1(Cr, DH(Er^priv, Ei^pub)) */
2078 wg_algo_dh_kdf(ckey, NULL, wgs->wgs_ephemeral_key_priv,
2079 wgmr->wgmr_ephemeral);
2080
2081 /* [N] 2.2: "se" */
2082 /* Cr := KDF1(Cr, DH(Er^priv, Si^pub)) */
2083 wg_algo_dh_kdf(ckey, NULL, wg->wg_privkey, wgmr->wgmr_ephemeral);
2084
2085 /* [N] 9.2: "psk" */
2086 {
2087 uint8_t kdfout[WG_KDF_OUTPUT_LEN];
2088 /* Cr, r, k := KDF3(Cr, Q) */
2089 wg_algo_kdf(ckey, kdfout, cipher_key, ckey, wgp->wgp_psk,
2090 sizeof(wgp->wgp_psk));
2091 /* Hr := HASH(Hr || r) */
2092 wg_algo_hash(hash, kdfout, sizeof(kdfout));
2093 }
2094
2095 {
2096 uint8_t out[sizeof(wgmr->wgmr_empty)]; /* for safety */
2097 /* msg.empty := AEAD(k, 0, e, Hr) */
2098 error = wg_algo_aead_dec(out, 0, cipher_key, 0, wgmr->wgmr_empty,
2099 sizeof(wgmr->wgmr_empty), hash, sizeof(hash));
2100 WG_DUMP_HASH("wgmr_empty", wgmr->wgmr_empty);
2101 if (error != 0) {
2102 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
2103 "%s: peer %s: wg_algo_aead_dec for empty message failed\n",
2104 if_name(&wg->wg_if), wgp->wgp_name);
2105 goto out;
2106 }
2107 /* Hr := HASH(Hr || msg.empty) */
2108 wg_algo_hash(hash, wgmr->wgmr_empty, sizeof(wgmr->wgmr_empty));
2109 }
2110
2111 memcpy(wgs->wgs_handshake_hash, hash, sizeof(wgs->wgs_handshake_hash));
2112 memcpy(wgs->wgs_chaining_key, ckey, sizeof(wgs->wgs_chaining_key));
2113 wgs->wgs_remote_index = wgmr->wgmr_sender;
2114 WG_DLOG("receiver=%x\n", wgs->wgs_remote_index);
2115
2116 KASSERTMSG(wgs->wgs_state == WGS_STATE_INIT_ACTIVE, "state=%d",
2117 wgs->wgs_state);
2118 wgs->wgs_time_established = time_uptime;
2119 wg_schedule_session_dtor_timer(wgp);
2120 wgs->wgs_time_last_data_sent = 0;
2121 wgs->wgs_is_initiator = true;
2122 WG_DLOG("session[L=%"PRIx32" R=%"PRIx32"]:"
2123 " calculate keys as initiator\n",
2124 wgs->wgs_local_index, wgs->wgs_remote_index);
2125 wg_calculate_keys(wgs, true);
2126 wg_clear_states(wgs);
2127
2128 /*
2129 * Session is ready to receive data now that we have received
2130 * the responder's response.
2131 *
2132 * Transition from INIT_ACTIVE to ESTABLISHED to publish it to
2133 * the data rx path, wg_handle_msg_data.
2134 */
2135 WG_DLOG("session[L=%"PRIx32" R=%"PRIx32" -> WGS_STATE_ESTABLISHED\n",
2136 wgs->wgs_local_index, wgs->wgs_remote_index);
2137 atomic_store_release(&wgs->wgs_state, WGS_STATE_ESTABLISHED);
2138 WG_TRACE("WGS_STATE_ESTABLISHED");
2139
2140 callout_halt(&wgp->wgp_handshake_timeout_timer, NULL);
2141
2142 /*
2143 * Session is ready to send data now that we have received the
2144 * responder's response.
2145 *
2146 * Swap the sessions to publish the new one as the stable
2147 * session for the data tx path, wg_output.
2148 */
2149 wg_swap_sessions(wgp);
2150 KASSERT(wgs == wgp->wgp_session_stable);
2151 wgs_prev = wgp->wgp_session_unstable;
2152 getnanotime(&wgp->wgp_last_handshake_time);
2153 wgp->wgp_handshake_start_time = 0;
2154 wgp->wgp_last_sent_mac1_valid = false;
2155 wgp->wgp_last_sent_cookie_valid = false;
2156
2157 wg_update_endpoint_if_necessary(wgp, src);
2158
2159 /*
2160 * If we had a data packet queued up, send it; otherwise send a
2161 * keepalive message -- either way we have to send something
2162 * immediately or else the responder will never answer.
2163 */
2164 if ((m = atomic_swap_ptr(&wgp->wgp_pending, NULL)) != NULL) {
2165 kpreempt_disable();
2166 const uint32_t h = curcpu()->ci_index; // pktq_rps_hash(m)
2167 M_SETCTX(m, wgp);
2168 if (__predict_false(!pktq_enqueue(wg_pktq, m, h))) {
2169 WGLOG(LOG_ERR, "%s: pktq full, dropping\n",
2170 if_name(&wg->wg_if));
2171 m_freem(m);
2172 }
2173 kpreempt_enable();
2174 } else {
2175 wg_send_keepalive_msg(wgp, wgs);
2176 }
2177
2178 if (wgs_prev->wgs_state == WGS_STATE_ESTABLISHED) {
2179 /*
2180 * Transition ESTABLISHED->DESTROYING. The session
2181 * will remain usable for the data rx path to process
2182 * packets still in flight to us, but we won't use it
2183 * for data tx.
2184 */
2185 WG_DLOG("session[L=%"PRIx32" R=%"PRIx32"]"
2186 " -> WGS_STATE_DESTROYING\n",
2187 wgs_prev->wgs_local_index, wgs_prev->wgs_remote_index);
2188 atomic_store_relaxed(&wgs_prev->wgs_state,
2189 WGS_STATE_DESTROYING);
2190 } else {
2191 KASSERTMSG(wgs_prev->wgs_state == WGS_STATE_UNKNOWN,
2192 "state=%d", wgs_prev->wgs_state);
2193 }
2194
2195 out:
2196 mutex_exit(wgp->wgp_lock);
2197 wg_put_session(wgs, &psref);
2198 }
2199
2200 static int
2201 wg_send_handshake_msg_resp(struct wg_softc *wg, struct wg_peer *wgp,
2202 struct wg_session *wgs, const struct wg_msg_init *wgmi)
2203 {
2204 int error;
2205 struct mbuf *m;
2206 struct wg_msg_resp *wgmr;
2207
2208 KASSERT(mutex_owned(wgp->wgp_lock));
2209 KASSERT(wgs == wgp->wgp_session_unstable);
2210 KASSERTMSG(wgs->wgs_state == WGS_STATE_UNKNOWN, "state=%d",
2211 wgs->wgs_state);
2212
2213 m = m_gethdr(M_WAIT, MT_DATA);
2214 if (sizeof(*wgmr) > MHLEN) {
2215 m_clget(m, M_WAIT);
2216 CTASSERT(sizeof(*wgmr) <= MCLBYTES);
2217 }
2218 m->m_pkthdr.len = m->m_len = sizeof(*wgmr);
2219 wgmr = mtod(m, struct wg_msg_resp *);
2220 wg_fill_msg_resp(wg, wgp, wgs, wgmr, wgmi);
2221
2222 error = wg->wg_ops->send_hs_msg(wgp, m);
2223 if (error == 0)
2224 WG_TRACE("resp msg sent");
2225 return error;
2226 }
2227
2228 static struct wg_peer *
2229 wg_lookup_peer_by_pubkey(struct wg_softc *wg,
2230 const uint8_t pubkey[WG_STATIC_KEY_LEN], struct psref *psref)
2231 {
2232 struct wg_peer *wgp;
2233
2234 int s = pserialize_read_enter();
2235 wgp = thmap_get(wg->wg_peers_bypubkey, pubkey, WG_STATIC_KEY_LEN);
2236 if (wgp != NULL)
2237 wg_get_peer(wgp, psref);
2238 pserialize_read_exit(s);
2239
2240 return wgp;
2241 }
2242
2243 static void
2244 wg_fill_msg_cookie(struct wg_softc *wg, struct wg_peer *wgp,
2245 struct wg_msg_cookie *wgmc, const uint32_t sender,
2246 const uint8_t mac1[WG_MAC_LEN], const struct sockaddr *src)
2247 {
2248 uint8_t cookie[WG_COOKIE_LEN];
2249 uint8_t key[WG_HASH_LEN];
2250 uint8_t addr[sizeof(struct in6_addr)];
2251 size_t addrlen;
2252 uint16_t uh_sport; /* be */
2253
2254 KASSERT(mutex_owned(wgp->wgp_lock));
2255
2256 wgmc->wgmc_type = htole32(WG_MSG_TYPE_COOKIE);
2257 wgmc->wgmc_receiver = sender;
2258 cprng_fast(wgmc->wgmc_salt, sizeof(wgmc->wgmc_salt));
2259
2260 /*
2261 * [W] 5.4.7: Under Load: Cookie Reply Message
2262 * "The secret variable, Rm, changes every two minutes to a
2263 * random value"
2264 */
2265 if ((time_uptime - wgp->wgp_last_cookiesecret_time) >
2266 WG_COOKIESECRET_TIME) {
2267 cprng_strong(kern_cprng, wgp->wgp_cookiesecret,
2268 sizeof(wgp->wgp_cookiesecret), 0);
2269 wgp->wgp_last_cookiesecret_time = time_uptime;
2270 }
2271
2272 switch (src->sa_family) {
2273 case AF_INET: {
2274 const struct sockaddr_in *sin = satocsin(src);
2275 addrlen = sizeof(sin->sin_addr);
2276 memcpy(addr, &sin->sin_addr, addrlen);
2277 uh_sport = sin->sin_port;
2278 break;
2279 }
2280 #ifdef INET6
2281 case AF_INET6: {
2282 const struct sockaddr_in6 *sin6 = satocsin6(src);
2283 addrlen = sizeof(sin6->sin6_addr);
2284 memcpy(addr, &sin6->sin6_addr, addrlen);
2285 uh_sport = sin6->sin6_port;
2286 break;
2287 }
2288 #endif
2289 default:
2290 panic("invalid af=%d", src->sa_family);
2291 }
2292
2293 wg_algo_mac(cookie, sizeof(cookie),
2294 wgp->wgp_cookiesecret, sizeof(wgp->wgp_cookiesecret),
2295 addr, addrlen, (const uint8_t *)&uh_sport, sizeof(uh_sport));
2296 wg_algo_mac_cookie(key, sizeof(key), wg->wg_pubkey,
2297 sizeof(wg->wg_pubkey));
2298 wg_algo_xaead_enc(wgmc->wgmc_cookie, sizeof(wgmc->wgmc_cookie), key,
2299 cookie, sizeof(cookie), mac1, WG_MAC_LEN, wgmc->wgmc_salt);
2300
2301 /* Need to store to calculate mac2 */
2302 memcpy(wgp->wgp_last_sent_cookie, cookie, sizeof(cookie));
2303 wgp->wgp_last_sent_cookie_valid = true;
2304 }
2305
2306 static int
2307 wg_send_cookie_msg(struct wg_softc *wg, struct wg_peer *wgp,
2308 const uint32_t sender, const uint8_t mac1[WG_MAC_LEN],
2309 const struct sockaddr *src)
2310 {
2311 int error;
2312 struct mbuf *m;
2313 struct wg_msg_cookie *wgmc;
2314
2315 KASSERT(mutex_owned(wgp->wgp_lock));
2316
2317 m = m_gethdr(M_WAIT, MT_DATA);
2318 if (sizeof(*wgmc) > MHLEN) {
2319 m_clget(m, M_WAIT);
2320 CTASSERT(sizeof(*wgmc) <= MCLBYTES);
2321 }
2322 m->m_pkthdr.len = m->m_len = sizeof(*wgmc);
2323 wgmc = mtod(m, struct wg_msg_cookie *);
2324 wg_fill_msg_cookie(wg, wgp, wgmc, sender, mac1, src);
2325
2326 error = wg->wg_ops->send_hs_msg(wgp, m);
2327 if (error == 0)
2328 WG_TRACE("cookie msg sent");
2329 return error;
2330 }
2331
2332 static bool
2333 wg_is_underload(struct wg_softc *wg, struct wg_peer *wgp, int msgtype)
2334 {
2335 #ifdef WG_DEBUG_PARAMS
2336 if (wg_force_underload)
2337 return true;
2338 #endif
2339
2340 /*
2341 * XXX we don't have a means of a load estimation. The purpose of
2342 * the mechanism is a DoS mitigation, so we consider frequent handshake
2343 * messages as (a kind of) load; if a message of the same type comes
2344 * to a peer within 1 second, we consider we are under load.
2345 */
2346 time_t last = wgp->wgp_last_msg_received_time[msgtype];
2347 wgp->wgp_last_msg_received_time[msgtype] = time_uptime;
2348 return (time_uptime - last) == 0;
2349 }
2350
2351 static void
2352 wg_calculate_keys(struct wg_session *wgs, const bool initiator)
2353 {
2354
2355 KASSERT(mutex_owned(wgs->wgs_peer->wgp_lock));
2356
2357 /*
2358 * [W] 5.4.5: Ti^send = Tr^recv, Ti^recv = Tr^send := KDF2(Ci = Cr, e)
2359 */
2360 if (initiator) {
2361 wg_algo_kdf(wgs->wgs_tkey_send, wgs->wgs_tkey_recv, NULL,
2362 wgs->wgs_chaining_key, NULL, 0);
2363 } else {
2364 wg_algo_kdf(wgs->wgs_tkey_recv, wgs->wgs_tkey_send, NULL,
2365 wgs->wgs_chaining_key, NULL, 0);
2366 }
2367 WG_DUMP_HASH("wgs_tkey_send", wgs->wgs_tkey_send);
2368 WG_DUMP_HASH("wgs_tkey_recv", wgs->wgs_tkey_recv);
2369 }
2370
2371 static uint64_t
2372 wg_session_get_send_counter(struct wg_session *wgs)
2373 {
2374 #ifdef __HAVE_ATOMIC64_LOADSTORE
2375 return atomic_load_relaxed(&wgs->wgs_send_counter);
2376 #else
2377 uint64_t send_counter;
2378
2379 mutex_enter(&wgs->wgs_send_counter_lock);
2380 send_counter = wgs->wgs_send_counter;
2381 mutex_exit(&wgs->wgs_send_counter_lock);
2382
2383 return send_counter;
2384 #endif
2385 }
2386
2387 static uint64_t
2388 wg_session_inc_send_counter(struct wg_session *wgs)
2389 {
2390 #ifdef __HAVE_ATOMIC64_LOADSTORE
2391 return atomic_inc_64_nv(&wgs->wgs_send_counter) - 1;
2392 #else
2393 uint64_t send_counter;
2394
2395 mutex_enter(&wgs->wgs_send_counter_lock);
2396 send_counter = wgs->wgs_send_counter++;
2397 mutex_exit(&wgs->wgs_send_counter_lock);
2398
2399 return send_counter;
2400 #endif
2401 }
2402
2403 static void
2404 wg_clear_states(struct wg_session *wgs)
2405 {
2406
2407 KASSERT(mutex_owned(wgs->wgs_peer->wgp_lock));
2408
2409 wgs->wgs_send_counter = 0;
2410 sliwin_reset(&wgs->wgs_recvwin->window);
2411
2412 #define wgs_clear(v) explicit_memset(wgs->wgs_##v, 0, sizeof(wgs->wgs_##v))
2413 wgs_clear(handshake_hash);
2414 wgs_clear(chaining_key);
2415 wgs_clear(ephemeral_key_pub);
2416 wgs_clear(ephemeral_key_priv);
2417 wgs_clear(ephemeral_key_peer);
2418 #undef wgs_clear
2419 }
2420
2421 static struct wg_session *
2422 wg_lookup_session_by_index(struct wg_softc *wg, const uint32_t index,
2423 struct psref *psref)
2424 {
2425 struct wg_session *wgs;
2426
2427 int s = pserialize_read_enter();
2428 wgs = thmap_get(wg->wg_sessions_byindex, &index, sizeof index);
2429 if (wgs != NULL) {
2430 uint32_t oindex __diagused =
2431 atomic_load_relaxed(&wgs->wgs_local_index);
2432 KASSERTMSG(index == oindex,
2433 "index=%"PRIx32" wgs->wgs_local_index=%"PRIx32,
2434 index, oindex);
2435 psref_acquire(psref, &wgs->wgs_psref, wg_psref_class);
2436 }
2437 pserialize_read_exit(s);
2438
2439 return wgs;
2440 }
2441
2442 static void
2443 wg_send_keepalive_msg(struct wg_peer *wgp, struct wg_session *wgs)
2444 {
2445 struct mbuf *m;
2446
2447 /*
2448 * [W] 6.5 Passive Keepalive
2449 * "A keepalive message is simply a transport data message with
2450 * a zero-length encapsulated encrypted inner-packet."
2451 */
2452 WG_TRACE("");
2453 m = m_gethdr(M_WAIT, MT_DATA);
2454 wg_send_data_msg(wgp, wgs, m);
2455 }
2456
2457 static bool
2458 wg_need_to_send_init_message(struct wg_session *wgs)
2459 {
2460 /*
2461 * [W] 6.2 Transport Message Limits
2462 * "if a peer is the initiator of a current secure session,
2463 * WireGuard will send a handshake initiation message to begin
2464 * a new secure session ... if after receiving a transport data
2465 * message, the current secure session is (REJECT-AFTER-TIME
2466 * KEEPALIVE-TIMEOUT REKEY-TIMEOUT) seconds old and it has
2467 * not yet acted upon this event."
2468 */
2469 return wgs->wgs_is_initiator && wgs->wgs_time_last_data_sent == 0 &&
2470 (time_uptime - wgs->wgs_time_established) >=
2471 (wg_reject_after_time - wg_keepalive_timeout - wg_rekey_timeout);
2472 }
2473
2474 static void
2475 wg_schedule_peer_task(struct wg_peer *wgp, unsigned int task)
2476 {
2477
2478 mutex_enter(wgp->wgp_intr_lock);
2479 WG_DLOG("tasks=%d, task=%d\n", wgp->wgp_tasks, task);
2480 if (wgp->wgp_tasks == 0)
2481 /*
2482 * XXX If the current CPU is already loaded -- e.g., if
2483 * there's already a bunch of handshakes queued up --
2484 * consider tossing this over to another CPU to
2485 * distribute the load.
2486 */
2487 workqueue_enqueue(wg_wq, &wgp->wgp_work, NULL);
2488 wgp->wgp_tasks |= task;
2489 mutex_exit(wgp->wgp_intr_lock);
2490 }
2491
2492 static void
2493 wg_change_endpoint(struct wg_peer *wgp, const struct sockaddr *new)
2494 {
2495 struct wg_sockaddr *wgsa_prev;
2496
2497 WG_TRACE("Changing endpoint");
2498
2499 memcpy(wgp->wgp_endpoint0, new, new->sa_len);
2500 wgsa_prev = wgp->wgp_endpoint;
2501 atomic_store_release(&wgp->wgp_endpoint, wgp->wgp_endpoint0);
2502 wgp->wgp_endpoint0 = wgsa_prev;
2503 atomic_store_release(&wgp->wgp_endpoint_available, true);
2504
2505 wg_schedule_peer_task(wgp, WGP_TASK_ENDPOINT_CHANGED);
2506 }
2507
2508 static bool
2509 wg_validate_inner_packet(const char *packet, size_t decrypted_len, int *af)
2510 {
2511 uint16_t packet_len;
2512 const struct ip *ip;
2513
2514 if (__predict_false(decrypted_len < sizeof(*ip))) {
2515 WG_DLOG("decrypted_len=%zu < %zu\n", decrypted_len,
2516 sizeof(*ip));
2517 return false;
2518 }
2519
2520 ip = (const struct ip *)packet;
2521 if (ip->ip_v == 4)
2522 *af = AF_INET;
2523 else if (ip->ip_v == 6)
2524 *af = AF_INET6;
2525 else {
2526 WG_DLOG("ip_v=%d\n", ip->ip_v);
2527 return false;
2528 }
2529
2530 WG_DLOG("af=%d\n", *af);
2531
2532 switch (*af) {
2533 #ifdef INET
2534 case AF_INET:
2535 packet_len = ntohs(ip->ip_len);
2536 break;
2537 #endif
2538 #ifdef INET6
2539 case AF_INET6: {
2540 const struct ip6_hdr *ip6;
2541
2542 if (__predict_false(decrypted_len < sizeof(*ip6))) {
2543 WG_DLOG("decrypted_len=%zu < %zu\n", decrypted_len,
2544 sizeof(*ip6));
2545 return false;
2546 }
2547
2548 ip6 = (const struct ip6_hdr *)packet;
2549 packet_len = sizeof(*ip6) + ntohs(ip6->ip6_plen);
2550 break;
2551 }
2552 #endif
2553 default:
2554 return false;
2555 }
2556
2557 if (packet_len > decrypted_len) {
2558 WG_DLOG("packet_len %u > decrypted_len %zu\n", packet_len,
2559 decrypted_len);
2560 return false;
2561 }
2562
2563 return true;
2564 }
2565
2566 static bool
2567 wg_validate_route(struct wg_softc *wg, struct wg_peer *wgp_expected,
2568 int af, char *packet)
2569 {
2570 struct sockaddr_storage ss;
2571 struct sockaddr *sa;
2572 struct psref psref;
2573 struct wg_peer *wgp;
2574 bool ok;
2575
2576 /*
2577 * II CRYPTOKEY ROUTING
2578 * "it will only accept it if its source IP resolves in the
2579 * table to the public key used in the secure session for
2580 * decrypting it."
2581 */
2582
2583 if (af == AF_INET) {
2584 const struct ip *ip = (const struct ip *)packet;
2585 struct sockaddr_in *sin = (struct sockaddr_in *)&ss;
2586 sockaddr_in_init(sin, &ip->ip_src, 0);
2587 sa = sintosa(sin);
2588 #ifdef INET6
2589 } else {
2590 const struct ip6_hdr *ip6 = (const struct ip6_hdr *)packet;
2591 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&ss;
2592 sockaddr_in6_init(sin6, &ip6->ip6_src, 0, 0, 0);
2593 sa = sin6tosa(sin6);
2594 #endif
2595 }
2596
2597 wgp = wg_pick_peer_by_sa(wg, sa, &psref);
2598 ok = (wgp == wgp_expected);
2599 if (wgp != NULL)
2600 wg_put_peer(wgp, &psref);
2601
2602 return ok;
2603 }
2604
2605 static void
2606 wg_session_dtor_timer(void *arg)
2607 {
2608 struct wg_peer *wgp = arg;
2609
2610 WG_TRACE("enter");
2611
2612 wg_schedule_session_dtor_timer(wgp);
2613 wg_schedule_peer_task(wgp, WGP_TASK_DESTROY_PREV_SESSION);
2614 }
2615
2616 static void
2617 wg_schedule_session_dtor_timer(struct wg_peer *wgp)
2618 {
2619
2620 /*
2621 * If the periodic session destructor is already pending to
2622 * handle the previous session, that's fine -- leave it in
2623 * place; it will be scheduled again.
2624 */
2625 if (callout_pending(&wgp->wgp_session_dtor_timer)) {
2626 WG_DLOG("session dtor already pending\n");
2627 return;
2628 }
2629
2630 WG_DLOG("scheduling session dtor in %u secs\n", wg_reject_after_time);
2631 callout_schedule(&wgp->wgp_session_dtor_timer,
2632 wg_reject_after_time*hz);
2633 }
2634
2635 static bool
2636 sockaddr_port_match(const struct sockaddr *sa1, const struct sockaddr *sa2)
2637 {
2638 if (sa1->sa_family != sa2->sa_family)
2639 return false;
2640
2641 switch (sa1->sa_family) {
2642 #ifdef INET
2643 case AF_INET:
2644 return satocsin(sa1)->sin_port == satocsin(sa2)->sin_port;
2645 #endif
2646 #ifdef INET6
2647 case AF_INET6:
2648 return satocsin6(sa1)->sin6_port == satocsin6(sa2)->sin6_port;
2649 #endif
2650 default:
2651 return false;
2652 }
2653 }
2654
2655 static void
2656 wg_update_endpoint_if_necessary(struct wg_peer *wgp,
2657 const struct sockaddr *src)
2658 {
2659 struct wg_sockaddr *wgsa;
2660 struct psref psref;
2661
2662 wgsa = wg_get_endpoint_sa(wgp, &psref);
2663
2664 #ifdef WG_DEBUG_LOG
2665 char oldaddr[128], newaddr[128];
2666 sockaddr_format(wgsatosa(wgsa), oldaddr, sizeof(oldaddr));
2667 sockaddr_format(src, newaddr, sizeof(newaddr));
2668 WG_DLOG("old=%s, new=%s\n", oldaddr, newaddr);
2669 #endif
2670
2671 /*
2672 * III: "Since the packet has authenticated correctly, the source IP of
2673 * the outer UDP/IP packet is used to update the endpoint for peer..."
2674 */
2675 if (__predict_false(sockaddr_cmp(src, wgsatosa(wgsa)) != 0 ||
2676 !sockaddr_port_match(src, wgsatosa(wgsa)))) {
2677 /* XXX We can't change the endpoint twice in a short period */
2678 if (atomic_swap_uint(&wgp->wgp_endpoint_changing, 1) == 0) {
2679 wg_change_endpoint(wgp, src);
2680 }
2681 }
2682
2683 wg_put_sa(wgp, wgsa, &psref);
2684 }
2685
2686 static void __noinline
2687 wg_handle_msg_data(struct wg_softc *wg, struct mbuf *m,
2688 const struct sockaddr *src)
2689 {
2690 struct wg_msg_data *wgmd;
2691 char *encrypted_buf = NULL, *decrypted_buf;
2692 size_t encrypted_len, decrypted_len;
2693 struct wg_session *wgs;
2694 struct wg_peer *wgp;
2695 int state;
2696 time_t age;
2697 size_t mlen;
2698 struct psref psref;
2699 int error, af;
2700 bool success, free_encrypted_buf = false, ok;
2701 struct mbuf *n;
2702
2703 KASSERT(m->m_len >= sizeof(struct wg_msg_data));
2704 wgmd = mtod(m, struct wg_msg_data *);
2705
2706 KASSERT(wgmd->wgmd_type == htole32(WG_MSG_TYPE_DATA));
2707 WG_TRACE("data");
2708
2709 /* Find the putative session, or drop. */
2710 wgs = wg_lookup_session_by_index(wg, wgmd->wgmd_receiver, &psref);
2711 if (wgs == NULL) {
2712 WG_TRACE("No session found");
2713 m_freem(m);
2714 return;
2715 }
2716
2717 /*
2718 * We are only ready to handle data when in INIT_PASSIVE,
2719 * ESTABLISHED, or DESTROYING. All transitions out of that
2720 * state dissociate the session index and drain psrefs.
2721 *
2722 * atomic_load_acquire matches atomic_store_release in either
2723 * wg_handle_msg_init or wg_handle_msg_resp. (The transition
2724 * INIT_PASSIVE to ESTABLISHED in wg_task_establish_session
2725 * doesn't make a difference for this rx path.)
2726 */
2727 state = atomic_load_acquire(&wgs->wgs_state);
2728 switch (state) {
2729 case WGS_STATE_UNKNOWN:
2730 case WGS_STATE_INIT_ACTIVE:
2731 WG_TRACE("not yet ready for data");
2732 goto out;
2733 case WGS_STATE_INIT_PASSIVE:
2734 case WGS_STATE_ESTABLISHED:
2735 case WGS_STATE_DESTROYING:
2736 break;
2737 }
2738
2739 /*
2740 * Reject if the session is too old.
2741 */
2742 age = time_uptime - wgs->wgs_time_established;
2743 if (__predict_false(age >= wg_reject_after_time)) {
2744 WG_DLOG("session %"PRIx32" too old, %"PRIuMAX" sec\n",
2745 wgmd->wgmd_receiver, (uintmax_t)age);
2746 goto out;
2747 }
2748
2749 /*
2750 * Get the peer, for rate-limited logs (XXX MPSAFE, dtrace) and
2751 * to update the endpoint if authentication succeeds.
2752 */
2753 wgp = wgs->wgs_peer;
2754
2755 /*
2756 * Reject outrageously wrong sequence numbers before doing any
2757 * crypto work or taking any locks.
2758 */
2759 error = sliwin_check_fast(&wgs->wgs_recvwin->window,
2760 le64toh(wgmd->wgmd_counter));
2761 if (error) {
2762 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
2763 "%s: peer %s: out-of-window packet: %"PRIu64"\n",
2764 if_name(&wg->wg_if), wgp->wgp_name,
2765 le64toh(wgmd->wgmd_counter));
2766 goto out;
2767 }
2768
2769 /* Ensure the payload and authenticator are contiguous. */
2770 mlen = m_length(m);
2771 encrypted_len = mlen - sizeof(*wgmd);
2772 if (encrypted_len < WG_AUTHTAG_LEN) {
2773 WG_DLOG("Short encrypted_len: %zu\n", encrypted_len);
2774 goto out;
2775 }
2776 success = m_ensure_contig(&m, sizeof(*wgmd) + encrypted_len);
2777 if (success) {
2778 encrypted_buf = mtod(m, char *) + sizeof(*wgmd);
2779 } else {
2780 encrypted_buf = kmem_intr_alloc(encrypted_len, KM_NOSLEEP);
2781 if (encrypted_buf == NULL) {
2782 WG_DLOG("failed to allocate encrypted_buf\n");
2783 goto out;
2784 }
2785 m_copydata(m, sizeof(*wgmd), encrypted_len, encrypted_buf);
2786 free_encrypted_buf = true;
2787 }
2788 /* m_ensure_contig may change m regardless of its result */
2789 KASSERT(m->m_len >= sizeof(*wgmd));
2790 wgmd = mtod(m, struct wg_msg_data *);
2791
2792 #ifdef WG_DEBUG_PACKET
2793 if (wg_debug & WG_DEBUG_FLAGS_PACKET) {
2794 hexdump(printf, "incoming packet", encrypted_buf,
2795 encrypted_len);
2796 }
2797 #endif
2798 /*
2799 * Get a buffer for the plaintext. Add WG_AUTHTAG_LEN to avoid
2800 * a zero-length buffer (XXX). Drop if plaintext is longer
2801 * than MCLBYTES (XXX).
2802 */
2803 decrypted_len = encrypted_len - WG_AUTHTAG_LEN;
2804 if (decrypted_len > MCLBYTES) {
2805 /* FIXME handle larger data than MCLBYTES */
2806 WG_DLOG("couldn't handle larger data than MCLBYTES\n");
2807 goto out;
2808 }
2809 n = wg_get_mbuf(0, decrypted_len + WG_AUTHTAG_LEN);
2810 if (n == NULL) {
2811 WG_DLOG("wg_get_mbuf failed\n");
2812 goto out;
2813 }
2814 decrypted_buf = mtod(n, char *);
2815
2816 /* Decrypt and verify the packet. */
2817 WG_DLOG("mlen=%zu, encrypted_len=%zu\n", mlen, encrypted_len);
2818 error = wg_algo_aead_dec(decrypted_buf,
2819 encrypted_len - WG_AUTHTAG_LEN /* can be 0 */,
2820 wgs->wgs_tkey_recv, le64toh(wgmd->wgmd_counter), encrypted_buf,
2821 encrypted_len, NULL, 0);
2822 if (error != 0) {
2823 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
2824 "%s: peer %s: failed to wg_algo_aead_dec\n",
2825 if_name(&wg->wg_if), wgp->wgp_name);
2826 m_freem(n);
2827 goto out;
2828 }
2829 WG_DLOG("outsize=%u\n", (u_int)decrypted_len);
2830
2831 /* Packet is genuine. Reject it if a replay or just too old. */
2832 mutex_enter(&wgs->wgs_recvwin->lock);
2833 error = sliwin_update(&wgs->wgs_recvwin->window,
2834 le64toh(wgmd->wgmd_counter));
2835 mutex_exit(&wgs->wgs_recvwin->lock);
2836 if (error) {
2837 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
2838 "%s: peer %s: replay or out-of-window packet: %"PRIu64"\n",
2839 if_name(&wg->wg_if), wgp->wgp_name,
2840 le64toh(wgmd->wgmd_counter));
2841 m_freem(n);
2842 goto out;
2843 }
2844
2845 #ifdef WG_DEBUG_PACKET
2846 if (wg_debug & WG_DEBUG_FLAGS_PACKET) {
2847 hexdump(printf, "tkey_recv", wgs->wgs_tkey_recv,
2848 sizeof(wgs->wgs_tkey_recv));
2849 hexdump(printf, "wgmd", wgmd, sizeof(*wgmd));
2850 hexdump(printf, "decrypted_buf", decrypted_buf,
2851 decrypted_len);
2852 }
2853 #endif
2854 /* We're done with m now; free it and chuck the pointers. */
2855 m_freem(m);
2856 m = NULL;
2857 wgmd = NULL;
2858
2859 /*
2860 * Validate the encapsulated packet header and get the address
2861 * family, or drop.
2862 */
2863 ok = wg_validate_inner_packet(decrypted_buf, decrypted_len, &af);
2864 if (!ok) {
2865 m_freem(n);
2866 goto update_state;
2867 }
2868
2869 /*
2870 * The packet is genuine. Update the peer's endpoint if the
2871 * source address changed.
2872 *
2873 * XXX How to prevent DoS by replaying genuine packets from the
2874 * wrong source address?
2875 */
2876 wg_update_endpoint_if_necessary(wgp, src);
2877
2878 /* Submit it into our network stack if routable. */
2879 ok = wg_validate_route(wg, wgp, af, decrypted_buf);
2880 if (ok) {
2881 wg->wg_ops->input(&wg->wg_if, n, af);
2882 } else {
2883 char addrstr[INET6_ADDRSTRLEN];
2884 memset(addrstr, 0, sizeof(addrstr));
2885 if (af == AF_INET) {
2886 const struct ip *ip = (const struct ip *)decrypted_buf;
2887 IN_PRINT(addrstr, &ip->ip_src);
2888 #ifdef INET6
2889 } else if (af == AF_INET6) {
2890 const struct ip6_hdr *ip6 =
2891 (const struct ip6_hdr *)decrypted_buf;
2892 IN6_PRINT(addrstr, &ip6->ip6_src);
2893 #endif
2894 }
2895 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
2896 "%s: peer %s: invalid source address (%s)\n",
2897 if_name(&wg->wg_if), wgp->wgp_name, addrstr);
2898 m_freem(n);
2899 /*
2900 * The inner address is invalid however the session is valid
2901 * so continue the session processing below.
2902 */
2903 }
2904 n = NULL;
2905
2906 update_state:
2907 /* Update the state machine if necessary. */
2908 if (__predict_false(state == WGS_STATE_INIT_PASSIVE)) {
2909 /*
2910 * We were waiting for the initiator to send their
2911 * first data transport message, and that has happened.
2912 * Schedule a task to establish this session.
2913 */
2914 wg_schedule_peer_task(wgp, WGP_TASK_ESTABLISH_SESSION);
2915 } else {
2916 if (__predict_false(wg_need_to_send_init_message(wgs))) {
2917 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE);
2918 }
2919 /*
2920 * [W] 6.5 Passive Keepalive
2921 * "If a peer has received a validly-authenticated transport
2922 * data message (section 5.4.6), but does not have any packets
2923 * itself to send back for KEEPALIVE-TIMEOUT seconds, it sends
2924 * a keepalive message."
2925 */
2926 WG_DLOG("time_uptime=%ju wgs_time_last_data_sent=%ju\n",
2927 (uintmax_t)time_uptime,
2928 (uintmax_t)wgs->wgs_time_last_data_sent);
2929 if ((time_uptime - wgs->wgs_time_last_data_sent) >=
2930 wg_keepalive_timeout) {
2931 WG_TRACE("Schedule sending keepalive message");
2932 /*
2933 * We can't send a keepalive message here to avoid
2934 * a deadlock; we already hold the solock of a socket
2935 * that is used to send the message.
2936 */
2937 wg_schedule_peer_task(wgp,
2938 WGP_TASK_SEND_KEEPALIVE_MESSAGE);
2939 }
2940 }
2941 out:
2942 wg_put_session(wgs, &psref);
2943 m_freem(m);
2944 if (free_encrypted_buf)
2945 kmem_intr_free(encrypted_buf, encrypted_len);
2946 }
2947
2948 static void __noinline
2949 wg_handle_msg_cookie(struct wg_softc *wg, const struct wg_msg_cookie *wgmc)
2950 {
2951 struct wg_session *wgs;
2952 struct wg_peer *wgp;
2953 struct psref psref;
2954 int error;
2955 uint8_t key[WG_HASH_LEN];
2956 uint8_t cookie[WG_COOKIE_LEN];
2957
2958 WG_TRACE("cookie msg received");
2959
2960 /* Find the putative session. */
2961 wgs = wg_lookup_session_by_index(wg, wgmc->wgmc_receiver, &psref);
2962 if (wgs == NULL) {
2963 WG_TRACE("No session found");
2964 return;
2965 }
2966
2967 /* Lock the peer so we can update the cookie state. */
2968 wgp = wgs->wgs_peer;
2969 mutex_enter(wgp->wgp_lock);
2970
2971 if (!wgp->wgp_last_sent_mac1_valid) {
2972 WG_TRACE("No valid mac1 sent (or expired)");
2973 goto out;
2974 }
2975
2976 /*
2977 * wgp_last_sent_mac1_valid is only set to true when we are
2978 * transitioning to INIT_ACTIVE or INIT_PASSIVE, and always
2979 * cleared on transition out of them.
2980 */
2981 KASSERTMSG((wgs->wgs_state == WGS_STATE_INIT_ACTIVE ||
2982 wgs->wgs_state == WGS_STATE_INIT_PASSIVE),
2983 "state=%d", wgs->wgs_state);
2984
2985 /* Decrypt the cookie and store it for later handshake retry. */
2986 wg_algo_mac_cookie(key, sizeof(key), wgp->wgp_pubkey,
2987 sizeof(wgp->wgp_pubkey));
2988 error = wg_algo_xaead_dec(cookie, sizeof(cookie), key,
2989 wgmc->wgmc_cookie, sizeof(wgmc->wgmc_cookie),
2990 wgp->wgp_last_sent_mac1, sizeof(wgp->wgp_last_sent_mac1),
2991 wgmc->wgmc_salt);
2992 if (error != 0) {
2993 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
2994 "%s: peer %s: wg_algo_aead_dec for cookie failed: "
2995 "error=%d\n", if_name(&wg->wg_if), wgp->wgp_name, error);
2996 goto out;
2997 }
2998 /*
2999 * [W] 6.6: Interaction with Cookie Reply System
3000 * "it should simply store the decrypted cookie value from the cookie
3001 * reply message, and wait for the expiration of the REKEY-TIMEOUT
3002 * timer for retrying a handshake initiation message."
3003 */
3004 wgp->wgp_latest_cookie_time = time_uptime;
3005 memcpy(wgp->wgp_latest_cookie, cookie, sizeof(wgp->wgp_latest_cookie));
3006 out:
3007 mutex_exit(wgp->wgp_lock);
3008 wg_put_session(wgs, &psref);
3009 }
3010
3011 static struct mbuf *
3012 wg_validate_msg_header(struct wg_softc *wg, struct mbuf *m)
3013 {
3014 struct wg_msg wgm;
3015 size_t mbuflen;
3016 size_t msglen;
3017
3018 /*
3019 * Get the mbuf chain length. It is already guaranteed, by
3020 * wg_overudp_cb, to be large enough for a struct wg_msg.
3021 */
3022 mbuflen = m_length(m);
3023 KASSERT(mbuflen >= sizeof(struct wg_msg));
3024
3025 /*
3026 * Copy the message header (32-bit message type) out -- we'll
3027 * worry about contiguity and alignment later.
3028 */
3029 m_copydata(m, 0, sizeof(wgm), &wgm);
3030 switch (le32toh(wgm.wgm_type)) {
3031 case WG_MSG_TYPE_INIT:
3032 msglen = sizeof(struct wg_msg_init);
3033 break;
3034 case WG_MSG_TYPE_RESP:
3035 msglen = sizeof(struct wg_msg_resp);
3036 break;
3037 case WG_MSG_TYPE_COOKIE:
3038 msglen = sizeof(struct wg_msg_cookie);
3039 break;
3040 case WG_MSG_TYPE_DATA:
3041 msglen = sizeof(struct wg_msg_data);
3042 break;
3043 default:
3044 WG_LOG_RATECHECK(&wg->wg_ppsratecheck, LOG_DEBUG,
3045 "%s: Unexpected msg type: %u\n", if_name(&wg->wg_if),
3046 le32toh(wgm.wgm_type));
3047 goto error;
3048 }
3049
3050 /* Verify the mbuf chain is long enough for this type of message. */
3051 if (__predict_false(mbuflen < msglen)) {
3052 WG_DLOG("Invalid msg size: mbuflen=%zu type=%u\n", mbuflen,
3053 le32toh(wgm.wgm_type));
3054 goto error;
3055 }
3056
3057 /* Make the message header contiguous if necessary. */
3058 if (__predict_false(m->m_len < msglen)) {
3059 m = m_pullup(m, msglen);
3060 if (m == NULL)
3061 return NULL;
3062 }
3063
3064 return m;
3065
3066 error:
3067 m_freem(m);
3068 return NULL;
3069 }
3070
3071 static void
3072 wg_handle_packet(struct wg_softc *wg, struct mbuf *m,
3073 const struct sockaddr *src)
3074 {
3075 struct wg_msg *wgm;
3076
3077 KASSERT(curlwp->l_pflag & LP_BOUND);
3078
3079 m = wg_validate_msg_header(wg, m);
3080 if (__predict_false(m == NULL))
3081 return;
3082
3083 KASSERT(m->m_len >= sizeof(struct wg_msg));
3084 wgm = mtod(m, struct wg_msg *);
3085 switch (le32toh(wgm->wgm_type)) {
3086 case WG_MSG_TYPE_INIT:
3087 wg_handle_msg_init(wg, (struct wg_msg_init *)wgm, src);
3088 break;
3089 case WG_MSG_TYPE_RESP:
3090 wg_handle_msg_resp(wg, (struct wg_msg_resp *)wgm, src);
3091 break;
3092 case WG_MSG_TYPE_COOKIE:
3093 wg_handle_msg_cookie(wg, (struct wg_msg_cookie *)wgm);
3094 break;
3095 case WG_MSG_TYPE_DATA:
3096 wg_handle_msg_data(wg, m, src);
3097 /* wg_handle_msg_data frees m for us */
3098 return;
3099 default:
3100 panic("invalid message type: %d", le32toh(wgm->wgm_type));
3101 }
3102
3103 m_freem(m);
3104 }
3105
3106 static void
3107 wg_receive_packets(struct wg_softc *wg, const int af)
3108 {
3109
3110 for (;;) {
3111 int error, flags;
3112 struct socket *so;
3113 struct mbuf *m = NULL;
3114 struct uio dummy_uio;
3115 struct mbuf *paddr = NULL;
3116 struct sockaddr *src;
3117
3118 so = wg_get_so_by_af(wg, af);
3119 flags = MSG_DONTWAIT;
3120 dummy_uio.uio_resid = 1000000000;
3121
3122 error = so->so_receive(so, &paddr, &dummy_uio, &m, NULL,
3123 &flags);
3124 if (error || m == NULL) {
3125 //if (error == EWOULDBLOCK)
3126 return;
3127 }
3128
3129 KASSERT(paddr != NULL);
3130 KASSERT(paddr->m_len >= sizeof(struct sockaddr));
3131 src = mtod(paddr, struct sockaddr *);
3132
3133 wg_handle_packet(wg, m, src);
3134 }
3135 }
3136
3137 static void
3138 wg_get_peer(struct wg_peer *wgp, struct psref *psref)
3139 {
3140
3141 psref_acquire(psref, &wgp->wgp_psref, wg_psref_class);
3142 }
3143
3144 static void
3145 wg_put_peer(struct wg_peer *wgp, struct psref *psref)
3146 {
3147
3148 psref_release(psref, &wgp->wgp_psref, wg_psref_class);
3149 }
3150
3151 static void
3152 wg_task_send_init_message(struct wg_softc *wg, struct wg_peer *wgp)
3153 {
3154 struct wg_session *wgs;
3155
3156 WG_TRACE("WGP_TASK_SEND_INIT_MESSAGE");
3157
3158 KASSERT(mutex_owned(wgp->wgp_lock));
3159
3160 if (!atomic_load_acquire(&wgp->wgp_endpoint_available)) {
3161 WGLOG(LOG_DEBUG, "%s: No endpoint available\n",
3162 if_name(&wg->wg_if));
3163 /* XXX should do something? */
3164 return;
3165 }
3166
3167 /*
3168 * If we already have an established session, there's no need
3169 * to initiate a new one -- unless the rekey-after-time or
3170 * rekey-after-messages limits have passed.
3171 */
3172 wgs = wgp->wgp_session_stable;
3173 if (wgs->wgs_state == WGS_STATE_ESTABLISHED &&
3174 !atomic_swap_uint(&wgp->wgp_force_rekey, 0))
3175 return;
3176
3177 /*
3178 * Ensure we're initiating a new session. If the unstable
3179 * session is already INIT_ACTIVE or INIT_PASSIVE, this does
3180 * nothing.
3181 */
3182 wg_send_handshake_msg_init(wg, wgp);
3183 }
3184
3185 static void
3186 wg_task_retry_handshake(struct wg_softc *wg, struct wg_peer *wgp)
3187 {
3188 struct wg_session *wgs;
3189
3190 WG_TRACE("WGP_TASK_RETRY_HANDSHAKE");
3191
3192 KASSERT(mutex_owned(wgp->wgp_lock));
3193 KASSERT(wgp->wgp_handshake_start_time != 0);
3194
3195 wgs = wgp->wgp_session_unstable;
3196 if (wgs->wgs_state != WGS_STATE_INIT_ACTIVE)
3197 return;
3198
3199 /*
3200 * XXX no real need to assign a new index here, but we do need
3201 * to transition to UNKNOWN temporarily
3202 */
3203 wg_put_session_index(wg, wgs);
3204
3205 /* [W] 6.4 Handshake Initiation Retransmission */
3206 if ((time_uptime - wgp->wgp_handshake_start_time) >
3207 wg_rekey_attempt_time) {
3208 /* Give up handshaking */
3209 wgp->wgp_handshake_start_time = 0;
3210 WG_TRACE("give up");
3211
3212 /*
3213 * If a new data packet comes, handshaking will be retried
3214 * and a new session would be established at that time,
3215 * however we don't want to send pending packets then.
3216 */
3217 wg_purge_pending_packets(wgp);
3218 return;
3219 }
3220
3221 wg_task_send_init_message(wg, wgp);
3222 }
3223
3224 static void
3225 wg_task_establish_session(struct wg_softc *wg, struct wg_peer *wgp)
3226 {
3227 struct wg_session *wgs, *wgs_prev;
3228 struct mbuf *m;
3229
3230 KASSERT(mutex_owned(wgp->wgp_lock));
3231
3232 wgs = wgp->wgp_session_unstable;
3233 if (wgs->wgs_state != WGS_STATE_INIT_PASSIVE)
3234 /* XXX Can this happen? */
3235 return;
3236
3237 wgs->wgs_time_last_data_sent = 0;
3238 wgs->wgs_is_initiator = false;
3239
3240 /*
3241 * Session was already ready to receive data. Transition from
3242 * INIT_PASSIVE to ESTABLISHED just so we can swap the
3243 * sessions.
3244 *
3245 * atomic_store_relaxed because this doesn't affect the data rx
3246 * path, wg_handle_msg_data -- changing from INIT_PASSIVE to
3247 * ESTABLISHED makes no difference to the data rx path, and the
3248 * transition to INIT_PASSIVE with store-release already
3249 * published the state needed by the data rx path.
3250 */
3251 WG_DLOG("session[L=%"PRIx32" R=%"PRIx32"] -> WGS_STATE_ESTABLISHED\n",
3252 wgs->wgs_local_index, wgs->wgs_remote_index);
3253 atomic_store_relaxed(&wgs->wgs_state, WGS_STATE_ESTABLISHED);
3254 WG_TRACE("WGS_STATE_ESTABLISHED");
3255
3256 /*
3257 * Session is ready to send data too now that we have received
3258 * the peer initiator's first data packet.
3259 *
3260 * Swap the sessions to publish the new one as the stable
3261 * session for the data tx path, wg_output.
3262 */
3263 wg_swap_sessions(wgp);
3264 KASSERT(wgs == wgp->wgp_session_stable);
3265 wgs_prev = wgp->wgp_session_unstable;
3266 getnanotime(&wgp->wgp_last_handshake_time);
3267 wgp->wgp_handshake_start_time = 0;
3268 wgp->wgp_last_sent_mac1_valid = false;
3269 wgp->wgp_last_sent_cookie_valid = false;
3270
3271 /* If we had a data packet queued up, send it. */
3272 if ((m = atomic_swap_ptr(&wgp->wgp_pending, NULL)) != NULL) {
3273 kpreempt_disable();
3274 const uint32_t h = curcpu()->ci_index; // pktq_rps_hash(m)
3275 M_SETCTX(m, wgp);
3276 if (__predict_false(!pktq_enqueue(wg_pktq, m, h))) {
3277 WGLOG(LOG_ERR, "%s: pktq full, dropping\n",
3278 if_name(&wg->wg_if));
3279 m_freem(m);
3280 }
3281 kpreempt_enable();
3282 }
3283
3284 if (wgs_prev->wgs_state == WGS_STATE_ESTABLISHED) {
3285 /*
3286 * Transition ESTABLISHED->DESTROYING. The session
3287 * will remain usable for the data rx path to process
3288 * packets still in flight to us, but we won't use it
3289 * for data tx.
3290 */
3291 WG_DLOG("session[L=%"PRIx32" R=%"PRIx32"]"
3292 " -> WGS_STATE_DESTROYING\n",
3293 wgs_prev->wgs_local_index, wgs_prev->wgs_remote_index);
3294 atomic_store_relaxed(&wgs_prev->wgs_state,
3295 WGS_STATE_DESTROYING);
3296 } else {
3297 KASSERTMSG(wgs_prev->wgs_state == WGS_STATE_UNKNOWN,
3298 "state=%d", wgs_prev->wgs_state);
3299 WG_DLOG("session[L=%"PRIx32" R=%"PRIx32"]"
3300 " -> WGS_STATE_UNKNOWN\n",
3301 wgs_prev->wgs_local_index, wgs_prev->wgs_remote_index);
3302 wgs_prev->wgs_local_index = 0; /* paranoia */
3303 wgs_prev->wgs_remote_index = 0; /* paranoia */
3304 wg_clear_states(wgs_prev); /* paranoia */
3305 wgs_prev->wgs_state = WGS_STATE_UNKNOWN;
3306 }
3307 }
3308
3309 static void
3310 wg_task_endpoint_changed(struct wg_softc *wg, struct wg_peer *wgp)
3311 {
3312
3313 WG_TRACE("WGP_TASK_ENDPOINT_CHANGED");
3314
3315 KASSERT(mutex_owned(wgp->wgp_lock));
3316
3317 if (atomic_load_relaxed(&wgp->wgp_endpoint_changing)) {
3318 pserialize_perform(wgp->wgp_psz);
3319 mutex_exit(wgp->wgp_lock);
3320 psref_target_destroy(&wgp->wgp_endpoint0->wgsa_psref,
3321 wg_psref_class);
3322 psref_target_init(&wgp->wgp_endpoint0->wgsa_psref,
3323 wg_psref_class);
3324 mutex_enter(wgp->wgp_lock);
3325 atomic_store_release(&wgp->wgp_endpoint_changing, 0);
3326 }
3327 }
3328
3329 static void
3330 wg_task_send_keepalive_message(struct wg_softc *wg, struct wg_peer *wgp)
3331 {
3332 struct wg_session *wgs;
3333
3334 WG_TRACE("WGP_TASK_SEND_KEEPALIVE_MESSAGE");
3335
3336 KASSERT(mutex_owned(wgp->wgp_lock));
3337
3338 wgs = wgp->wgp_session_stable;
3339 if (wgs->wgs_state != WGS_STATE_ESTABLISHED)
3340 return;
3341
3342 wg_send_keepalive_msg(wgp, wgs);
3343 }
3344
3345 static void
3346 wg_task_destroy_prev_session(struct wg_softc *wg, struct wg_peer *wgp)
3347 {
3348 struct wg_session *wgs;
3349 time_t age;
3350
3351 WG_TRACE("WGP_TASK_DESTROY_PREV_SESSION");
3352
3353 KASSERT(mutex_owned(wgp->wgp_lock));
3354
3355 /*
3356 * If theres's any previous unstable session, i.e., one that
3357 * was ESTABLISHED and is now DESTROYING, older than
3358 * reject-after-time, destroy it. Upcoming sessions are still
3359 * in INIT_ACTIVE or INIT_PASSIVE -- we don't touch those here.
3360 */
3361 wgs = wgp->wgp_session_unstable;
3362 KASSERT(wgs->wgs_state != WGS_STATE_ESTABLISHED);
3363 if (wgs->wgs_state == WGS_STATE_DESTROYING &&
3364 ((age = (time_uptime - wgs->wgs_time_established)) >=
3365 wg_reject_after_time)) {
3366 WG_DLOG("destroying past session %"PRIuMAX" sec old\n",
3367 (uintmax_t)age);
3368 wg_put_session_index(wg, wgs);
3369 KASSERTMSG(wgs->wgs_state == WGS_STATE_UNKNOWN, "state=%d",
3370 wgs->wgs_state);
3371 }
3372
3373 /*
3374 * If theres's any ESTABLISHED stable session older than
3375 * reject-after-time, destroy it. (The stable session can also
3376 * be in UNKNOWN state -- nothing to do in that case)
3377 */
3378 wgs = wgp->wgp_session_stable;
3379 KASSERT(wgs->wgs_state != WGS_STATE_INIT_ACTIVE);
3380 KASSERT(wgs->wgs_state != WGS_STATE_INIT_PASSIVE);
3381 KASSERT(wgs->wgs_state != WGS_STATE_DESTROYING);
3382 if (wgs->wgs_state == WGS_STATE_ESTABLISHED &&
3383 ((age = (time_uptime - wgs->wgs_time_established)) >=
3384 wg_reject_after_time)) {
3385 WG_DLOG("destroying current session %"PRIuMAX" sec old\n",
3386 (uintmax_t)age);
3387 atomic_store_relaxed(&wgs->wgs_state, WGS_STATE_DESTROYING);
3388 wg_put_session_index(wg, wgs);
3389 KASSERTMSG(wgs->wgs_state == WGS_STATE_UNKNOWN, "state=%d",
3390 wgs->wgs_state);
3391 }
3392
3393 /*
3394 * If there's no sessions left, no need to have the timer run
3395 * until the next time around -- halt it.
3396 *
3397 * It is only ever scheduled with wgp_lock held or in the
3398 * callout itself, and callout_halt prevents rescheudling
3399 * itself, so this never races with rescheduling.
3400 */
3401 if (wgp->wgp_session_unstable->wgs_state == WGS_STATE_UNKNOWN &&
3402 wgp->wgp_session_stable->wgs_state == WGS_STATE_UNKNOWN)
3403 callout_halt(&wgp->wgp_session_dtor_timer, NULL);
3404 }
3405
3406 static void
3407 wg_peer_work(struct work *wk, void *cookie)
3408 {
3409 struct wg_peer *wgp = container_of(wk, struct wg_peer, wgp_work);
3410 struct wg_softc *wg = wgp->wgp_sc;
3411 unsigned int tasks;
3412
3413 mutex_enter(wgp->wgp_intr_lock);
3414 while ((tasks = wgp->wgp_tasks) != 0) {
3415 wgp->wgp_tasks = 0;
3416 mutex_exit(wgp->wgp_intr_lock);
3417
3418 mutex_enter(wgp->wgp_lock);
3419 if (ISSET(tasks, WGP_TASK_SEND_INIT_MESSAGE))
3420 wg_task_send_init_message(wg, wgp);
3421 if (ISSET(tasks, WGP_TASK_RETRY_HANDSHAKE))
3422 wg_task_retry_handshake(wg, wgp);
3423 if (ISSET(tasks, WGP_TASK_ESTABLISH_SESSION))
3424 wg_task_establish_session(wg, wgp);
3425 if (ISSET(tasks, WGP_TASK_ENDPOINT_CHANGED))
3426 wg_task_endpoint_changed(wg, wgp);
3427 if (ISSET(tasks, WGP_TASK_SEND_KEEPALIVE_MESSAGE))
3428 wg_task_send_keepalive_message(wg, wgp);
3429 if (ISSET(tasks, WGP_TASK_DESTROY_PREV_SESSION))
3430 wg_task_destroy_prev_session(wg, wgp);
3431 mutex_exit(wgp->wgp_lock);
3432
3433 mutex_enter(wgp->wgp_intr_lock);
3434 }
3435 mutex_exit(wgp->wgp_intr_lock);
3436 }
3437
3438 static void
3439 wg_job(struct threadpool_job *job)
3440 {
3441 struct wg_softc *wg = container_of(job, struct wg_softc, wg_job);
3442 int bound, upcalls;
3443
3444 mutex_enter(wg->wg_intr_lock);
3445 while ((upcalls = wg->wg_upcalls) != 0) {
3446 wg->wg_upcalls = 0;
3447 mutex_exit(wg->wg_intr_lock);
3448 bound = curlwp_bind();
3449 if (ISSET(upcalls, WG_UPCALL_INET))
3450 wg_receive_packets(wg, AF_INET);
3451 if (ISSET(upcalls, WG_UPCALL_INET6))
3452 wg_receive_packets(wg, AF_INET6);
3453 curlwp_bindx(bound);
3454 mutex_enter(wg->wg_intr_lock);
3455 }
3456 threadpool_job_done(job);
3457 mutex_exit(wg->wg_intr_lock);
3458 }
3459
3460 static int
3461 wg_bind_port(struct wg_softc *wg, const uint16_t port)
3462 {
3463 int error;
3464 uint16_t old_port = wg->wg_listen_port;
3465
3466 if (port != 0 && old_port == port)
3467 return 0;
3468
3469 struct sockaddr_in _sin, *sin = &_sin;
3470 sin->sin_len = sizeof(*sin);
3471 sin->sin_family = AF_INET;
3472 sin->sin_addr.s_addr = INADDR_ANY;
3473 sin->sin_port = htons(port);
3474
3475 error = sobind(wg->wg_so4, sintosa(sin), curlwp);
3476 if (error != 0)
3477 return error;
3478
3479 #ifdef INET6
3480 struct sockaddr_in6 _sin6, *sin6 = &_sin6;
3481 sin6->sin6_len = sizeof(*sin6);
3482 sin6->sin6_family = AF_INET6;
3483 sin6->sin6_addr = in6addr_any;
3484 sin6->sin6_port = htons(port);
3485
3486 error = sobind(wg->wg_so6, sin6tosa(sin6), curlwp);
3487 if (error != 0)
3488 return error;
3489 #endif
3490
3491 wg->wg_listen_port = port;
3492
3493 return 0;
3494 }
3495
3496 static void
3497 wg_so_upcall(struct socket *so, void *cookie, int events, int waitflag)
3498 {
3499 struct wg_softc *wg = cookie;
3500 int reason;
3501
3502 reason = (so->so_proto->pr_domain->dom_family == AF_INET) ?
3503 WG_UPCALL_INET :
3504 WG_UPCALL_INET6;
3505
3506 mutex_enter(wg->wg_intr_lock);
3507 wg->wg_upcalls |= reason;
3508 threadpool_schedule_job(wg->wg_threadpool, &wg->wg_job);
3509 mutex_exit(wg->wg_intr_lock);
3510 }
3511
3512 static int
3513 wg_overudp_cb(struct mbuf **mp, int offset, struct socket *so,
3514 struct sockaddr *src, void *arg)
3515 {
3516 struct wg_softc *wg = arg;
3517 struct wg_msg wgm;
3518 struct mbuf *m = *mp;
3519
3520 WG_TRACE("enter");
3521
3522 /* Verify the mbuf chain is long enough to have a wg msg header. */
3523 KASSERT(offset <= m_length(m));
3524 if (__predict_false(m_length(m) - offset < sizeof(struct wg_msg))) {
3525 /* drop on the floor */
3526 m_freem(m);
3527 return -1;
3528 }
3529
3530 /*
3531 * Copy the message header (32-bit message type) out -- we'll
3532 * worry about contiguity and alignment later.
3533 */
3534 m_copydata(m, offset, sizeof(struct wg_msg), &wgm);
3535 WG_DLOG("type=%d\n", le32toh(wgm.wgm_type));
3536
3537 /*
3538 * Handle DATA packets promptly as they arrive, if they are in
3539 * an active session. Other packets may require expensive
3540 * public-key crypto and are not as sensitive to latency, so
3541 * defer them to the worker thread.
3542 */
3543 switch (le32toh(wgm.wgm_type)) {
3544 case WG_MSG_TYPE_DATA:
3545 /* handle immediately */
3546 m_adj(m, offset);
3547 if (__predict_false(m->m_len < sizeof(struct wg_msg_data))) {
3548 m = m_pullup(m, sizeof(struct wg_msg_data));
3549 if (m == NULL)
3550 return -1;
3551 }
3552 wg_handle_msg_data(wg, m, src);
3553 *mp = NULL;
3554 return 1;
3555 case WG_MSG_TYPE_INIT:
3556 case WG_MSG_TYPE_RESP:
3557 case WG_MSG_TYPE_COOKIE:
3558 /* pass through to so_receive in wg_receive_packets */
3559 return 0;
3560 default:
3561 /* drop on the floor */
3562 m_freem(m);
3563 return -1;
3564 }
3565 }
3566
3567 static int
3568 wg_socreate(struct wg_softc *wg, int af, struct socket **sop)
3569 {
3570 int error;
3571 struct socket *so;
3572
3573 error = socreate(af, &so, SOCK_DGRAM, 0, curlwp, NULL);
3574 if (error != 0)
3575 return error;
3576
3577 solock(so);
3578 so->so_upcallarg = wg;
3579 so->so_upcall = wg_so_upcall;
3580 so->so_rcv.sb_flags |= SB_UPCALL;
3581 inpcb_register_overudp_cb(sotoinpcb(so), wg_overudp_cb, wg);
3582 sounlock(so);
3583
3584 *sop = so;
3585
3586 return 0;
3587 }
3588
3589 static bool
3590 wg_session_hit_limits(struct wg_session *wgs)
3591 {
3592
3593 /*
3594 * [W] 6.2: Transport Message Limits
3595 * "After REJECT-AFTER-MESSAGES transport data messages or after the
3596 * current secure session is REJECT-AFTER-TIME seconds old, whichever
3597 * comes first, WireGuard will refuse to send any more transport data
3598 * messages using the current secure session, ..."
3599 */
3600 KASSERT(wgs->wgs_time_established != 0);
3601 if ((time_uptime - wgs->wgs_time_established) > wg_reject_after_time) {
3602 WG_DLOG("The session hits REJECT_AFTER_TIME\n");
3603 return true;
3604 } else if (wg_session_get_send_counter(wgs) >
3605 wg_reject_after_messages) {
3606 WG_DLOG("The session hits REJECT_AFTER_MESSAGES\n");
3607 return true;
3608 }
3609
3610 return false;
3611 }
3612
3613 static void
3614 wgintr(void *cookie)
3615 {
3616 struct wg_peer *wgp;
3617 struct wg_session *wgs;
3618 struct mbuf *m;
3619 struct psref psref;
3620
3621 while ((m = pktq_dequeue(wg_pktq)) != NULL) {
3622 wgp = M_GETCTX(m, struct wg_peer *);
3623 if ((wgs = wg_get_stable_session(wgp, &psref)) == NULL) {
3624 WG_TRACE("no stable session");
3625 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE);
3626 goto next0;
3627 }
3628 if (__predict_false(wg_session_hit_limits(wgs))) {
3629 WG_TRACE("stable session hit limits");
3630 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE);
3631 goto next1;
3632 }
3633 wg_send_data_msg(wgp, wgs, m);
3634 m = NULL; /* consumed */
3635 next1: wg_put_session(wgs, &psref);
3636 next0: m_freem(m);
3637 /* XXX Yield to avoid userland starvation? */
3638 }
3639 }
3640
3641 static void
3642 wg_purge_pending_packets(struct wg_peer *wgp)
3643 {
3644 struct mbuf *m;
3645
3646 m = atomic_swap_ptr(&wgp->wgp_pending, NULL);
3647 m_freem(m);
3648 pktq_barrier(wg_pktq);
3649 }
3650
3651 static void
3652 wg_handshake_timeout_timer(void *arg)
3653 {
3654 struct wg_peer *wgp = arg;
3655
3656 WG_TRACE("enter");
3657
3658 wg_schedule_peer_task(wgp, WGP_TASK_RETRY_HANDSHAKE);
3659 }
3660
3661 static struct wg_peer *
3662 wg_alloc_peer(struct wg_softc *wg)
3663 {
3664 struct wg_peer *wgp;
3665
3666 wgp = kmem_zalloc(sizeof(*wgp), KM_SLEEP);
3667
3668 wgp->wgp_sc = wg;
3669 callout_init(&wgp->wgp_handshake_timeout_timer, CALLOUT_MPSAFE);
3670 callout_setfunc(&wgp->wgp_handshake_timeout_timer,
3671 wg_handshake_timeout_timer, wgp);
3672 callout_init(&wgp->wgp_session_dtor_timer, CALLOUT_MPSAFE);
3673 callout_setfunc(&wgp->wgp_session_dtor_timer,
3674 wg_session_dtor_timer, wgp);
3675 PSLIST_ENTRY_INIT(wgp, wgp_peerlist_entry);
3676 wgp->wgp_endpoint_changing = false;
3677 wgp->wgp_endpoint_available = false;
3678 wgp->wgp_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
3679 wgp->wgp_intr_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
3680 wgp->wgp_psz = pserialize_create();
3681 psref_target_init(&wgp->wgp_psref, wg_psref_class);
3682
3683 wgp->wgp_endpoint = kmem_zalloc(sizeof(*wgp->wgp_endpoint), KM_SLEEP);
3684 wgp->wgp_endpoint0 = kmem_zalloc(sizeof(*wgp->wgp_endpoint0), KM_SLEEP);
3685 psref_target_init(&wgp->wgp_endpoint->wgsa_psref, wg_psref_class);
3686 psref_target_init(&wgp->wgp_endpoint0->wgsa_psref, wg_psref_class);
3687
3688 struct wg_session *wgs;
3689 wgp->wgp_session_stable =
3690 kmem_zalloc(sizeof(*wgp->wgp_session_stable), KM_SLEEP);
3691 wgp->wgp_session_unstable =
3692 kmem_zalloc(sizeof(*wgp->wgp_session_unstable), KM_SLEEP);
3693 wgs = wgp->wgp_session_stable;
3694 wgs->wgs_peer = wgp;
3695 wgs->wgs_state = WGS_STATE_UNKNOWN;
3696 psref_target_init(&wgs->wgs_psref, wg_psref_class);
3697 #ifndef __HAVE_ATOMIC64_LOADSTORE
3698 mutex_init(&wgs->wgs_send_counter_lock, MUTEX_DEFAULT, IPL_SOFTNET);
3699 #endif
3700 wgs->wgs_recvwin = kmem_zalloc(sizeof(*wgs->wgs_recvwin), KM_SLEEP);
3701 mutex_init(&wgs->wgs_recvwin->lock, MUTEX_DEFAULT, IPL_SOFTNET);
3702
3703 wgs = wgp->wgp_session_unstable;
3704 wgs->wgs_peer = wgp;
3705 wgs->wgs_state = WGS_STATE_UNKNOWN;
3706 psref_target_init(&wgs->wgs_psref, wg_psref_class);
3707 #ifndef __HAVE_ATOMIC64_LOADSTORE
3708 mutex_init(&wgs->wgs_send_counter_lock, MUTEX_DEFAULT, IPL_SOFTNET);
3709 #endif
3710 wgs->wgs_recvwin = kmem_zalloc(sizeof(*wgs->wgs_recvwin), KM_SLEEP);
3711 mutex_init(&wgs->wgs_recvwin->lock, MUTEX_DEFAULT, IPL_SOFTNET);
3712
3713 return wgp;
3714 }
3715
3716 static void
3717 wg_destroy_peer(struct wg_peer *wgp)
3718 {
3719 struct wg_session *wgs;
3720 struct wg_softc *wg = wgp->wgp_sc;
3721
3722 /* Prevent new packets from this peer on any source address. */
3723 rw_enter(wg->wg_rwlock, RW_WRITER);
3724 for (int i = 0; i < wgp->wgp_n_allowedips; i++) {
3725 struct wg_allowedip *wga = &wgp->wgp_allowedips[i];
3726 struct radix_node_head *rnh = wg_rnh(wg, wga->wga_family);
3727 struct radix_node *rn;
3728
3729 KASSERT(rnh != NULL);
3730 rn = rnh->rnh_deladdr(&wga->wga_sa_addr,
3731 &wga->wga_sa_mask, rnh);
3732 if (rn == NULL) {
3733 char addrstr[128];
3734 sockaddr_format(&wga->wga_sa_addr, addrstr,
3735 sizeof(addrstr));
3736 WGLOG(LOG_WARNING, "%s: Couldn't delete %s",
3737 if_name(&wg->wg_if), addrstr);
3738 }
3739 }
3740 rw_exit(wg->wg_rwlock);
3741
3742 /* Purge pending packets. */
3743 wg_purge_pending_packets(wgp);
3744
3745 /* Halt all packet processing and timeouts. */
3746 callout_halt(&wgp->wgp_handshake_timeout_timer, NULL);
3747 callout_halt(&wgp->wgp_session_dtor_timer, NULL);
3748
3749 /* Wait for any queued work to complete. */
3750 workqueue_wait(wg_wq, &wgp->wgp_work);
3751
3752 wgs = wgp->wgp_session_unstable;
3753 if (wgs->wgs_state != WGS_STATE_UNKNOWN) {
3754 mutex_enter(wgp->wgp_lock);
3755 wg_destroy_session(wg, wgs);
3756 mutex_exit(wgp->wgp_lock);
3757 }
3758 mutex_destroy(&wgs->wgs_recvwin->lock);
3759 kmem_free(wgs->wgs_recvwin, sizeof(*wgs->wgs_recvwin));
3760 #ifndef __HAVE_ATOMIC64_LOADSTORE
3761 mutex_destroy(&wgs->wgs_send_counter_lock);
3762 #endif
3763 kmem_free(wgs, sizeof(*wgs));
3764
3765 wgs = wgp->wgp_session_stable;
3766 if (wgs->wgs_state != WGS_STATE_UNKNOWN) {
3767 mutex_enter(wgp->wgp_lock);
3768 wg_destroy_session(wg, wgs);
3769 mutex_exit(wgp->wgp_lock);
3770 }
3771 mutex_destroy(&wgs->wgs_recvwin->lock);
3772 kmem_free(wgs->wgs_recvwin, sizeof(*wgs->wgs_recvwin));
3773 #ifndef __HAVE_ATOMIC64_LOADSTORE
3774 mutex_destroy(&wgs->wgs_send_counter_lock);
3775 #endif
3776 kmem_free(wgs, sizeof(*wgs));
3777
3778 psref_target_destroy(&wgp->wgp_endpoint->wgsa_psref, wg_psref_class);
3779 psref_target_destroy(&wgp->wgp_endpoint0->wgsa_psref, wg_psref_class);
3780 kmem_free(wgp->wgp_endpoint, sizeof(*wgp->wgp_endpoint));
3781 kmem_free(wgp->wgp_endpoint0, sizeof(*wgp->wgp_endpoint0));
3782
3783 pserialize_destroy(wgp->wgp_psz);
3784 mutex_obj_free(wgp->wgp_intr_lock);
3785 mutex_obj_free(wgp->wgp_lock);
3786
3787 kmem_free(wgp, sizeof(*wgp));
3788 }
3789
3790 static void
3791 wg_destroy_all_peers(struct wg_softc *wg)
3792 {
3793 struct wg_peer *wgp, *wgp0 __diagused;
3794 void *garbage_byname, *garbage_bypubkey;
3795
3796 restart:
3797 garbage_byname = garbage_bypubkey = NULL;
3798 mutex_enter(wg->wg_lock);
3799 WG_PEER_WRITER_FOREACH(wgp, wg) {
3800 if (wgp->wgp_name[0]) {
3801 wgp0 = thmap_del(wg->wg_peers_byname, wgp->wgp_name,
3802 strlen(wgp->wgp_name));
3803 KASSERT(wgp0 == wgp);
3804 garbage_byname = thmap_stage_gc(wg->wg_peers_byname);
3805 }
3806 wgp0 = thmap_del(wg->wg_peers_bypubkey, wgp->wgp_pubkey,
3807 sizeof(wgp->wgp_pubkey));
3808 KASSERT(wgp0 == wgp);
3809 garbage_bypubkey = thmap_stage_gc(wg->wg_peers_bypubkey);
3810 WG_PEER_WRITER_REMOVE(wgp);
3811 wg->wg_npeers--;
3812 mutex_enter(wgp->wgp_lock);
3813 pserialize_perform(wgp->wgp_psz);
3814 mutex_exit(wgp->wgp_lock);
3815 PSLIST_ENTRY_DESTROY(wgp, wgp_peerlist_entry);
3816 break;
3817 }
3818 mutex_exit(wg->wg_lock);
3819
3820 if (wgp == NULL)
3821 return;
3822
3823 psref_target_destroy(&wgp->wgp_psref, wg_psref_class);
3824
3825 wg_destroy_peer(wgp);
3826 thmap_gc(wg->wg_peers_byname, garbage_byname);
3827 thmap_gc(wg->wg_peers_bypubkey, garbage_bypubkey);
3828
3829 goto restart;
3830 }
3831
3832 static int
3833 wg_destroy_peer_name(struct wg_softc *wg, const char *name)
3834 {
3835 struct wg_peer *wgp, *wgp0 __diagused;
3836 void *garbage_byname, *garbage_bypubkey;
3837
3838 mutex_enter(wg->wg_lock);
3839 wgp = thmap_del(wg->wg_peers_byname, name, strlen(name));
3840 if (wgp != NULL) {
3841 wgp0 = thmap_del(wg->wg_peers_bypubkey, wgp->wgp_pubkey,
3842 sizeof(wgp->wgp_pubkey));
3843 KASSERT(wgp0 == wgp);
3844 garbage_byname = thmap_stage_gc(wg->wg_peers_byname);
3845 garbage_bypubkey = thmap_stage_gc(wg->wg_peers_bypubkey);
3846 WG_PEER_WRITER_REMOVE(wgp);
3847 wg->wg_npeers--;
3848 if (wg->wg_npeers == 0)
3849 if_link_state_change(&wg->wg_if, LINK_STATE_DOWN);
3850 mutex_enter(wgp->wgp_lock);
3851 pserialize_perform(wgp->wgp_psz);
3852 mutex_exit(wgp->wgp_lock);
3853 PSLIST_ENTRY_DESTROY(wgp, wgp_peerlist_entry);
3854 }
3855 mutex_exit(wg->wg_lock);
3856
3857 if (wgp == NULL)
3858 return ENOENT;
3859
3860 psref_target_destroy(&wgp->wgp_psref, wg_psref_class);
3861
3862 wg_destroy_peer(wgp);
3863 thmap_gc(wg->wg_peers_byname, garbage_byname);
3864 thmap_gc(wg->wg_peers_bypubkey, garbage_bypubkey);
3865
3866 return 0;
3867 }
3868
3869 static int
3870 wg_if_attach(struct wg_softc *wg)
3871 {
3872
3873 wg->wg_if.if_addrlen = 0;
3874 wg->wg_if.if_mtu = WG_MTU;
3875 wg->wg_if.if_flags = IFF_MULTICAST;
3876 wg->wg_if.if_extflags = IFEF_MPSAFE;
3877 wg->wg_if.if_ioctl = wg_ioctl;
3878 wg->wg_if.if_output = wg_output;
3879 wg->wg_if.if_init = wg_init;
3880 #ifdef ALTQ
3881 wg->wg_if.if_start = wg_start;
3882 #endif
3883 wg->wg_if.if_stop = wg_stop;
3884 wg->wg_if.if_type = IFT_OTHER;
3885 wg->wg_if.if_dlt = DLT_NULL;
3886 wg->wg_if.if_softc = wg;
3887 #ifdef ALTQ
3888 IFQ_SET_READY(&wg->wg_if.if_snd);
3889 #endif
3890 if_initialize(&wg->wg_if);
3891
3892 wg->wg_if.if_link_state = LINK_STATE_DOWN;
3893 if_alloc_sadl(&wg->wg_if);
3894 if_register(&wg->wg_if);
3895
3896 bpf_attach(&wg->wg_if, DLT_NULL, sizeof(uint32_t));
3897
3898 return 0;
3899 }
3900
3901 static void
3902 wg_if_detach(struct wg_softc *wg)
3903 {
3904 struct ifnet *ifp = &wg->wg_if;
3905
3906 bpf_detach(ifp);
3907 if_detach(ifp);
3908 }
3909
3910 static int
3911 wg_clone_create(struct if_clone *ifc, int unit)
3912 {
3913 struct wg_softc *wg;
3914 int error;
3915
3916 wg_guarantee_initialized();
3917
3918 error = wg_count_inc();
3919 if (error)
3920 return error;
3921
3922 wg = kmem_zalloc(sizeof(*wg), KM_SLEEP);
3923
3924 if_initname(&wg->wg_if, ifc->ifc_name, unit);
3925
3926 PSLIST_INIT(&wg->wg_peers);
3927 wg->wg_peers_bypubkey = thmap_create(0, NULL, THMAP_NOCOPY);
3928 wg->wg_peers_byname = thmap_create(0, NULL, THMAP_NOCOPY);
3929 wg->wg_sessions_byindex = thmap_create(0, NULL, THMAP_NOCOPY);
3930 wg->wg_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
3931 wg->wg_intr_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
3932 wg->wg_rwlock = rw_obj_alloc();
3933 threadpool_job_init(&wg->wg_job, wg_job, wg->wg_intr_lock,
3934 "%s", if_name(&wg->wg_if));
3935 wg->wg_ops = &wg_ops_rumpkernel;
3936
3937 error = threadpool_get(&wg->wg_threadpool, PRI_NONE);
3938 if (error)
3939 goto fail0;
3940
3941 #ifdef INET
3942 error = wg_socreate(wg, AF_INET, &wg->wg_so4);
3943 if (error)
3944 goto fail1;
3945 rn_inithead((void **)&wg->wg_rtable_ipv4,
3946 offsetof(struct sockaddr_in, sin_addr) * NBBY);
3947 #endif
3948 #ifdef INET6
3949 error = wg_socreate(wg, AF_INET6, &wg->wg_so6);
3950 if (error)
3951 goto fail2;
3952 rn_inithead((void **)&wg->wg_rtable_ipv6,
3953 offsetof(struct sockaddr_in6, sin6_addr) * NBBY);
3954 #endif
3955
3956 error = wg_if_attach(wg);
3957 if (error)
3958 goto fail3;
3959
3960 return 0;
3961
3962 fail4: __unused
3963 wg_if_detach(wg);
3964 fail3: wg_destroy_all_peers(wg);
3965 #ifdef INET6
3966 solock(wg->wg_so6);
3967 wg->wg_so6->so_rcv.sb_flags &= ~SB_UPCALL;
3968 sounlock(wg->wg_so6);
3969 #endif
3970 #ifdef INET
3971 solock(wg->wg_so4);
3972 wg->wg_so4->so_rcv.sb_flags &= ~SB_UPCALL;
3973 sounlock(wg->wg_so4);
3974 #endif
3975 mutex_enter(wg->wg_intr_lock);
3976 threadpool_cancel_job(wg->wg_threadpool, &wg->wg_job);
3977 mutex_exit(wg->wg_intr_lock);
3978 #ifdef INET6
3979 if (wg->wg_rtable_ipv6 != NULL)
3980 free(wg->wg_rtable_ipv6, M_RTABLE);
3981 soclose(wg->wg_so6);
3982 fail2:
3983 #endif
3984 #ifdef INET
3985 if (wg->wg_rtable_ipv4 != NULL)
3986 free(wg->wg_rtable_ipv4, M_RTABLE);
3987 soclose(wg->wg_so4);
3988 fail1:
3989 #endif
3990 threadpool_put(wg->wg_threadpool, PRI_NONE);
3991 fail0: threadpool_job_destroy(&wg->wg_job);
3992 rw_obj_free(wg->wg_rwlock);
3993 mutex_obj_free(wg->wg_intr_lock);
3994 mutex_obj_free(wg->wg_lock);
3995 thmap_destroy(wg->wg_sessions_byindex);
3996 thmap_destroy(wg->wg_peers_byname);
3997 thmap_destroy(wg->wg_peers_bypubkey);
3998 PSLIST_DESTROY(&wg->wg_peers);
3999 kmem_free(wg, sizeof(*wg));
4000 wg_count_dec();
4001 return error;
4002 }
4003
4004 static int
4005 wg_clone_destroy(struct ifnet *ifp)
4006 {
4007 struct wg_softc *wg = container_of(ifp, struct wg_softc, wg_if);
4008
4009 #ifdef WG_RUMPKERNEL
4010 if (wg_user_mode(wg)) {
4011 rumpuser_wg_destroy(wg->wg_user);
4012 wg->wg_user = NULL;
4013 }
4014 #endif
4015
4016 wg_if_detach(wg);
4017 wg_destroy_all_peers(wg);
4018 #ifdef INET6
4019 solock(wg->wg_so6);
4020 wg->wg_so6->so_rcv.sb_flags &= ~SB_UPCALL;
4021 sounlock(wg->wg_so6);
4022 #endif
4023 #ifdef INET
4024 solock(wg->wg_so4);
4025 wg->wg_so4->so_rcv.sb_flags &= ~SB_UPCALL;
4026 sounlock(wg->wg_so4);
4027 #endif
4028 mutex_enter(wg->wg_intr_lock);
4029 threadpool_cancel_job(wg->wg_threadpool, &wg->wg_job);
4030 mutex_exit(wg->wg_intr_lock);
4031 #ifdef INET6
4032 if (wg->wg_rtable_ipv6 != NULL)
4033 free(wg->wg_rtable_ipv6, M_RTABLE);
4034 soclose(wg->wg_so6);
4035 #endif
4036 #ifdef INET
4037 if (wg->wg_rtable_ipv4 != NULL)
4038 free(wg->wg_rtable_ipv4, M_RTABLE);
4039 soclose(wg->wg_so4);
4040 #endif
4041 threadpool_put(wg->wg_threadpool, PRI_NONE);
4042 threadpool_job_destroy(&wg->wg_job);
4043 rw_obj_free(wg->wg_rwlock);
4044 mutex_obj_free(wg->wg_intr_lock);
4045 mutex_obj_free(wg->wg_lock);
4046 thmap_destroy(wg->wg_sessions_byindex);
4047 thmap_destroy(wg->wg_peers_byname);
4048 thmap_destroy(wg->wg_peers_bypubkey);
4049 PSLIST_DESTROY(&wg->wg_peers);
4050 kmem_free(wg, sizeof(*wg));
4051 wg_count_dec();
4052
4053 return 0;
4054 }
4055
4056 static struct wg_peer *
4057 wg_pick_peer_by_sa(struct wg_softc *wg, const struct sockaddr *sa,
4058 struct psref *psref)
4059 {
4060 struct radix_node_head *rnh;
4061 struct radix_node *rn;
4062 struct wg_peer *wgp = NULL;
4063 struct wg_allowedip *wga;
4064
4065 #ifdef WG_DEBUG_LOG
4066 char addrstr[128];
4067 sockaddr_format(sa, addrstr, sizeof(addrstr));
4068 WG_DLOG("sa=%s\n", addrstr);
4069 #endif
4070
4071 rw_enter(wg->wg_rwlock, RW_READER);
4072
4073 rnh = wg_rnh(wg, sa->sa_family);
4074 if (rnh == NULL)
4075 goto out;
4076
4077 rn = rnh->rnh_matchaddr(sa, rnh);
4078 if (rn == NULL || (rn->rn_flags & RNF_ROOT) != 0)
4079 goto out;
4080
4081 WG_TRACE("success");
4082
4083 wga = container_of(rn, struct wg_allowedip, wga_nodes[0]);
4084 wgp = wga->wga_peer;
4085 wg_get_peer(wgp, psref);
4086
4087 out:
4088 rw_exit(wg->wg_rwlock);
4089 return wgp;
4090 }
4091
4092 static void
4093 wg_fill_msg_data(struct wg_softc *wg, struct wg_peer *wgp,
4094 struct wg_session *wgs, struct wg_msg_data *wgmd)
4095 {
4096
4097 memset(wgmd, 0, sizeof(*wgmd));
4098 wgmd->wgmd_type = htole32(WG_MSG_TYPE_DATA);
4099 wgmd->wgmd_receiver = wgs->wgs_remote_index;
4100 /* [W] 5.4.6: msg.counter := Nm^send */
4101 /* [W] 5.4.6: Nm^send := Nm^send + 1 */
4102 wgmd->wgmd_counter = htole64(wg_session_inc_send_counter(wgs));
4103 WG_DLOG("counter=%"PRIu64"\n", le64toh(wgmd->wgmd_counter));
4104 }
4105
4106 static int
4107 wg_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst,
4108 const struct rtentry *rt)
4109 {
4110 struct wg_softc *wg = ifp->if_softc;
4111 struct wg_peer *wgp = NULL;
4112 struct wg_session *wgs = NULL;
4113 struct psref wgp_psref, wgs_psref;
4114 int bound;
4115 int error;
4116
4117 bound = curlwp_bind();
4118
4119 /* TODO make the nest limit configurable via sysctl */
4120 error = if_tunnel_check_nesting(ifp, m, 1);
4121 if (error) {
4122 WGLOG(LOG_ERR,
4123 "%s: tunneling loop detected and packet dropped\n",
4124 if_name(&wg->wg_if));
4125 goto out0;
4126 }
4127
4128 #ifdef ALTQ
4129 bool altq = atomic_load_relaxed(&ifp->if_snd.altq_flags)
4130 & ALTQF_ENABLED;
4131 if (altq)
4132 IFQ_CLASSIFY(&ifp->if_snd, m, dst->sa_family);
4133 #endif
4134
4135 bpf_mtap_af(ifp, dst->sa_family, m, BPF_D_OUT);
4136
4137 m->m_flags &= ~(M_BCAST|M_MCAST);
4138
4139 wgp = wg_pick_peer_by_sa(wg, dst, &wgp_psref);
4140 if (wgp == NULL) {
4141 WG_TRACE("peer not found");
4142 error = EHOSTUNREACH;
4143 goto out0;
4144 }
4145
4146 /* Clear checksum-offload flags. */
4147 m->m_pkthdr.csum_flags = 0;
4148 m->m_pkthdr.csum_data = 0;
4149
4150 /* Check whether there's an established session. */
4151 wgs = wg_get_stable_session(wgp, &wgs_psref);
4152 if (wgs == NULL) {
4153 /*
4154 * No established session. If we're the first to try
4155 * sending data, schedule a handshake and queue the
4156 * packet for when the handshake is done; otherwise
4157 * just drop the packet and let the ongoing handshake
4158 * attempt continue. We could queue more data packets
4159 * but it's not clear that's worthwhile.
4160 */
4161 if (atomic_cas_ptr(&wgp->wgp_pending, NULL, m) == NULL) {
4162 m = NULL; /* consume */
4163 WG_TRACE("queued first packet; init handshake");
4164 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE);
4165 } else {
4166 WG_TRACE("first packet already queued, dropping");
4167 }
4168 goto out1;
4169 }
4170
4171 /* There's an established session. Toss it in the queue. */
4172 #ifdef ALTQ
4173 if (altq) {
4174 mutex_enter(ifp->if_snd.ifq_lock);
4175 if (ALTQ_IS_ENABLED(&ifp->if_snd)) {
4176 M_SETCTX(m, wgp);
4177 ALTQ_ENQUEUE(&ifp->if_snd, m, error);
4178 m = NULL; /* consume */
4179 }
4180 mutex_exit(ifp->if_snd.ifq_lock);
4181 if (m == NULL) {
4182 wg_start(ifp);
4183 goto out2;
4184 }
4185 }
4186 #endif
4187 kpreempt_disable();
4188 const uint32_t h = curcpu()->ci_index; // pktq_rps_hash(m)
4189 M_SETCTX(m, wgp);
4190 if (__predict_false(!pktq_enqueue(wg_pktq, m, h))) {
4191 WGLOG(LOG_ERR, "%s: pktq full, dropping\n",
4192 if_name(&wg->wg_if));
4193 error = ENOBUFS;
4194 goto out3;
4195 }
4196 m = NULL; /* consumed */
4197 error = 0;
4198 out3: kpreempt_enable();
4199
4200 #ifdef ALTQ
4201 out2:
4202 #endif
4203 wg_put_session(wgs, &wgs_psref);
4204 out1: wg_put_peer(wgp, &wgp_psref);
4205 out0: m_freem(m);
4206 curlwp_bindx(bound);
4207 return error;
4208 }
4209
4210 static int
4211 wg_send_udp(struct wg_peer *wgp, struct mbuf *m)
4212 {
4213 struct psref psref;
4214 struct wg_sockaddr *wgsa;
4215 int error;
4216 struct socket *so;
4217
4218 wgsa = wg_get_endpoint_sa(wgp, &psref);
4219 so = wg_get_so_by_peer(wgp, wgsa);
4220 solock(so);
4221 if (wgsatosa(wgsa)->sa_family == AF_INET) {
4222 error = udp_send(so, m, wgsatosa(wgsa), NULL, curlwp);
4223 } else {
4224 #ifdef INET6
4225 error = udp6_output(sotoinpcb(so), m, wgsatosin6(wgsa),
4226 NULL, curlwp);
4227 #else
4228 m_freem(m);
4229 error = EPFNOSUPPORT;
4230 #endif
4231 }
4232 sounlock(so);
4233 wg_put_sa(wgp, wgsa, &psref);
4234
4235 return error;
4236 }
4237
4238 /* Inspired by pppoe_get_mbuf */
4239 static struct mbuf *
4240 wg_get_mbuf(size_t leading_len, size_t len)
4241 {
4242 struct mbuf *m;
4243
4244 KASSERT(leading_len <= MCLBYTES);
4245 KASSERT(len <= MCLBYTES - leading_len);
4246
4247 m = m_gethdr(M_DONTWAIT, MT_DATA);
4248 if (m == NULL)
4249 return NULL;
4250 if (len + leading_len > MHLEN) {
4251 m_clget(m, M_DONTWAIT);
4252 if ((m->m_flags & M_EXT) == 0) {
4253 m_free(m);
4254 return NULL;
4255 }
4256 }
4257 m->m_data += leading_len;
4258 m->m_pkthdr.len = m->m_len = len;
4259
4260 return m;
4261 }
4262
4263 static int
4264 wg_send_data_msg(struct wg_peer *wgp, struct wg_session *wgs,
4265 struct mbuf *m)
4266 {
4267 struct wg_softc *wg = wgp->wgp_sc;
4268 int error;
4269 size_t inner_len, padded_len, encrypted_len;
4270 char *padded_buf = NULL;
4271 size_t mlen;
4272 struct wg_msg_data *wgmd;
4273 bool free_padded_buf = false;
4274 struct mbuf *n;
4275 size_t leading_len = max_hdr + sizeof(struct udphdr);
4276
4277 mlen = m_length(m);
4278 inner_len = mlen;
4279 padded_len = roundup(mlen, 16);
4280 encrypted_len = padded_len + WG_AUTHTAG_LEN;
4281 WG_DLOG("inner=%zu, padded=%zu, encrypted_len=%zu\n",
4282 inner_len, padded_len, encrypted_len);
4283 if (mlen != 0) {
4284 bool success;
4285 success = m_ensure_contig(&m, padded_len);
4286 if (success) {
4287 padded_buf = mtod(m, char *);
4288 } else {
4289 padded_buf = kmem_intr_alloc(padded_len, KM_NOSLEEP);
4290 if (padded_buf == NULL) {
4291 error = ENOBUFS;
4292 goto end;
4293 }
4294 free_padded_buf = true;
4295 m_copydata(m, 0, mlen, padded_buf);
4296 }
4297 memset(padded_buf + mlen, 0, padded_len - inner_len);
4298 }
4299
4300 n = wg_get_mbuf(leading_len, sizeof(*wgmd) + encrypted_len);
4301 if (n == NULL) {
4302 error = ENOBUFS;
4303 goto end;
4304 }
4305 KASSERT(n->m_len >= sizeof(*wgmd));
4306 wgmd = mtod(n, struct wg_msg_data *);
4307 wg_fill_msg_data(wg, wgp, wgs, wgmd);
4308 #ifdef WG_DEBUG_PACKET
4309 if (wg_debug & WG_DEBUG_FLAGS_PACKET) {
4310 hexdump(printf, "padded_buf", padded_buf,
4311 padded_len);
4312 }
4313 #endif
4314 /* [W] 5.4.6: AEAD(Tm^send, Nm^send, P, e) */
4315 wg_algo_aead_enc((char *)wgmd + sizeof(*wgmd), encrypted_len,
4316 wgs->wgs_tkey_send, le64toh(wgmd->wgmd_counter),
4317 padded_buf, padded_len,
4318 NULL, 0);
4319 #ifdef WG_DEBUG_PACKET
4320 if (wg_debug & WG_DEBUG_FLAGS_PACKET) {
4321 hexdump(printf, "tkey_send", wgs->wgs_tkey_send,
4322 sizeof(wgs->wgs_tkey_send));
4323 hexdump(printf, "wgmd", wgmd, sizeof(*wgmd));
4324 hexdump(printf, "outgoing packet",
4325 (char *)wgmd + sizeof(*wgmd), encrypted_len);
4326 size_t decrypted_len = encrypted_len - WG_AUTHTAG_LEN;
4327 char *decrypted_buf = kmem_intr_alloc((decrypted_len +
4328 WG_AUTHTAG_LEN/*XXX*/), KM_NOSLEEP);
4329 if (decrypted_buf != NULL) {
4330 error = wg_algo_aead_dec(
4331 1 + decrypted_buf /* force misalignment */,
4332 encrypted_len - WG_AUTHTAG_LEN /* XXX */,
4333 wgs->wgs_tkey_send, le64toh(wgmd->wgmd_counter),
4334 (char *)wgmd + sizeof(*wgmd), encrypted_len,
4335 NULL, 0);
4336 if (error) {
4337 WG_DLOG("wg_algo_aead_dec failed: %d\n",
4338 error);
4339 }
4340 if (!consttime_memequal(1 + decrypted_buf,
4341 (char *)wgmd + sizeof(*wgmd),
4342 decrypted_len)) {
4343 WG_DLOG("wg_algo_aead_dec returned garbage\n");
4344 }
4345 kmem_intr_free(decrypted_buf, (decrypted_len +
4346 WG_AUTHTAG_LEN/*XXX*/));
4347 }
4348 }
4349 #endif
4350
4351 error = wg->wg_ops->send_data_msg(wgp, n);
4352 if (error == 0) {
4353 struct ifnet *ifp = &wg->wg_if;
4354 if_statadd(ifp, if_obytes, mlen);
4355 if_statinc(ifp, if_opackets);
4356 if (wgs->wgs_is_initiator &&
4357 ((time_uptime - wgs->wgs_time_established) >=
4358 wg_rekey_after_time)) {
4359 /*
4360 * [W] 6.2 Transport Message Limits
4361 * "if a peer is the initiator of a current secure
4362 * session, WireGuard will send a handshake initiation
4363 * message to begin a new secure session if, after
4364 * transmitting a transport data message, the current
4365 * secure session is REKEY-AFTER-TIME seconds old,"
4366 */
4367 WG_TRACE("rekey after time");
4368 atomic_store_relaxed(&wgp->wgp_force_rekey, 1);
4369 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE);
4370 }
4371 wgs->wgs_time_last_data_sent = time_uptime;
4372 if (wg_session_get_send_counter(wgs) >=
4373 wg_rekey_after_messages) {
4374 /*
4375 * [W] 6.2 Transport Message Limits
4376 * "WireGuard will try to create a new session, by
4377 * sending a handshake initiation message (section
4378 * 5.4.2), after it has sent REKEY-AFTER-MESSAGES
4379 * transport data messages..."
4380 */
4381 WG_TRACE("rekey after messages");
4382 atomic_store_relaxed(&wgp->wgp_force_rekey, 1);
4383 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE);
4384 }
4385 }
4386 end:
4387 m_freem(m);
4388 if (free_padded_buf)
4389 kmem_intr_free(padded_buf, padded_len);
4390 return error;
4391 }
4392
4393 static void
4394 wg_input(struct ifnet *ifp, struct mbuf *m, const int af)
4395 {
4396 pktqueue_t *pktq;
4397 size_t pktlen;
4398
4399 KASSERT(af == AF_INET || af == AF_INET6);
4400
4401 WG_TRACE("");
4402
4403 m_set_rcvif(m, ifp);
4404 pktlen = m->m_pkthdr.len;
4405
4406 bpf_mtap_af(ifp, af, m, BPF_D_IN);
4407
4408 switch (af) {
4409 case AF_INET:
4410 pktq = ip_pktq;
4411 break;
4412 #ifdef INET6
4413 case AF_INET6:
4414 pktq = ip6_pktq;
4415 break;
4416 #endif
4417 default:
4418 panic("invalid af=%d", af);
4419 }
4420
4421 kpreempt_disable();
4422 const u_int h = curcpu()->ci_index;
4423 if (__predict_true(pktq_enqueue(pktq, m, h))) {
4424 if_statadd(ifp, if_ibytes, pktlen);
4425 if_statinc(ifp, if_ipackets);
4426 } else {
4427 m_freem(m);
4428 }
4429 kpreempt_enable();
4430 }
4431
4432 static void
4433 wg_calc_pubkey(uint8_t pubkey[WG_STATIC_KEY_LEN],
4434 const uint8_t privkey[WG_STATIC_KEY_LEN])
4435 {
4436
4437 crypto_scalarmult_base(pubkey, privkey);
4438 }
4439
4440 static int
4441 wg_rtable_add_route(struct wg_softc *wg, struct wg_allowedip *wga)
4442 {
4443 struct radix_node_head *rnh;
4444 struct radix_node *rn;
4445 int error = 0;
4446
4447 rw_enter(wg->wg_rwlock, RW_WRITER);
4448 rnh = wg_rnh(wg, wga->wga_family);
4449 KASSERT(rnh != NULL);
4450 rn = rnh->rnh_addaddr(&wga->wga_sa_addr, &wga->wga_sa_mask, rnh,
4451 wga->wga_nodes);
4452 rw_exit(wg->wg_rwlock);
4453
4454 if (rn == NULL)
4455 error = EEXIST;
4456
4457 return error;
4458 }
4459
4460 static int
4461 wg_handle_prop_peer(struct wg_softc *wg, prop_dictionary_t peer,
4462 struct wg_peer **wgpp)
4463 {
4464 int error = 0;
4465 const void *pubkey;
4466 size_t pubkey_len;
4467 const void *psk;
4468 size_t psk_len;
4469 const char *name = NULL;
4470
4471 if (prop_dictionary_get_string(peer, "name", &name)) {
4472 if (strlen(name) > WG_PEER_NAME_MAXLEN) {
4473 error = EINVAL;
4474 goto out;
4475 }
4476 }
4477
4478 if (!prop_dictionary_get_data(peer, "public_key",
4479 &pubkey, &pubkey_len)) {
4480 error = EINVAL;
4481 goto out;
4482 }
4483 #ifdef WG_DEBUG_DUMP
4484 if (wg_debug & WG_DEBUG_FLAGS_DUMP) {
4485 char *hex = gethexdump(pubkey, pubkey_len);
4486 log(LOG_DEBUG, "pubkey=%p, pubkey_len=%zu\n%s\n",
4487 pubkey, pubkey_len, hex);
4488 puthexdump(hex, pubkey, pubkey_len);
4489 }
4490 #endif
4491
4492 struct wg_peer *wgp = wg_alloc_peer(wg);
4493 memcpy(wgp->wgp_pubkey, pubkey, sizeof(wgp->wgp_pubkey));
4494 if (name != NULL)
4495 strncpy(wgp->wgp_name, name, sizeof(wgp->wgp_name));
4496
4497 if (prop_dictionary_get_data(peer, "preshared_key", &psk, &psk_len)) {
4498 if (psk_len != sizeof(wgp->wgp_psk)) {
4499 error = EINVAL;
4500 goto out;
4501 }
4502 memcpy(wgp->wgp_psk, psk, sizeof(wgp->wgp_psk));
4503 }
4504
4505 const void *addr;
4506 size_t addr_len;
4507 struct wg_sockaddr *wgsa = wgp->wgp_endpoint;
4508
4509 if (!prop_dictionary_get_data(peer, "endpoint", &addr, &addr_len))
4510 goto skip_endpoint;
4511 if (addr_len < sizeof(*wgsatosa(wgsa)) ||
4512 addr_len > sizeof(*wgsatoss(wgsa))) {
4513 error = EINVAL;
4514 goto out;
4515 }
4516 memcpy(wgsatoss(wgsa), addr, addr_len);
4517 switch (wgsa_family(wgsa)) {
4518 case AF_INET:
4519 #ifdef INET6
4520 case AF_INET6:
4521 #endif
4522 break;
4523 default:
4524 error = EPFNOSUPPORT;
4525 goto out;
4526 }
4527 if (addr_len != sockaddr_getsize_by_family(wgsa_family(wgsa))) {
4528 error = EINVAL;
4529 goto out;
4530 }
4531 {
4532 char addrstr[128];
4533 sockaddr_format(wgsatosa(wgsa), addrstr, sizeof(addrstr));
4534 WG_DLOG("addr=%s\n", addrstr);
4535 }
4536 wgp->wgp_endpoint_available = true;
4537
4538 prop_array_t allowedips;
4539 skip_endpoint:
4540 allowedips = prop_dictionary_get(peer, "allowedips");
4541 if (allowedips == NULL)
4542 goto skip;
4543
4544 prop_object_iterator_t _it = prop_array_iterator(allowedips);
4545 prop_dictionary_t prop_allowedip;
4546 int j = 0;
4547 while ((prop_allowedip = prop_object_iterator_next(_it)) != NULL) {
4548 struct wg_allowedip *wga = &wgp->wgp_allowedips[j];
4549
4550 if (!prop_dictionary_get_int(prop_allowedip, "family",
4551 &wga->wga_family))
4552 continue;
4553 if (!prop_dictionary_get_data(prop_allowedip, "ip",
4554 &addr, &addr_len))
4555 continue;
4556 if (!prop_dictionary_get_uint8(prop_allowedip, "cidr",
4557 &wga->wga_cidr))
4558 continue;
4559
4560 switch (wga->wga_family) {
4561 case AF_INET: {
4562 struct sockaddr_in sin;
4563 char addrstr[128];
4564 struct in_addr mask;
4565 struct sockaddr_in sin_mask;
4566
4567 if (addr_len != sizeof(struct in_addr))
4568 return EINVAL;
4569 memcpy(&wga->wga_addr4, addr, addr_len);
4570
4571 sockaddr_in_init(&sin, (const struct in_addr *)addr,
4572 0);
4573 sockaddr_copy(&wga->wga_sa_addr,
4574 sizeof(sin), sintosa(&sin));
4575
4576 sockaddr_format(sintosa(&sin),
4577 addrstr, sizeof(addrstr));
4578 WG_DLOG("addr=%s/%d\n", addrstr, wga->wga_cidr);
4579
4580 in_len2mask(&mask, wga->wga_cidr);
4581 sockaddr_in_init(&sin_mask, &mask, 0);
4582 sockaddr_copy(&wga->wga_sa_mask,
4583 sizeof(sin_mask), sintosa(&sin_mask));
4584
4585 break;
4586 }
4587 #ifdef INET6
4588 case AF_INET6: {
4589 struct sockaddr_in6 sin6;
4590 char addrstr[128];
4591 struct in6_addr mask;
4592 struct sockaddr_in6 sin6_mask;
4593
4594 if (addr_len != sizeof(struct in6_addr))
4595 return EINVAL;
4596 memcpy(&wga->wga_addr6, addr, addr_len);
4597
4598 sockaddr_in6_init(&sin6, (const struct in6_addr *)addr,
4599 0, 0, 0);
4600 sockaddr_copy(&wga->wga_sa_addr,
4601 sizeof(sin6), sin6tosa(&sin6));
4602
4603 sockaddr_format(sin6tosa(&sin6),
4604 addrstr, sizeof(addrstr));
4605 WG_DLOG("addr=%s/%d\n", addrstr, wga->wga_cidr);
4606
4607 in6_prefixlen2mask(&mask, wga->wga_cidr);
4608 sockaddr_in6_init(&sin6_mask, &mask, 0, 0, 0);
4609 sockaddr_copy(&wga->wga_sa_mask,
4610 sizeof(sin6_mask), sin6tosa(&sin6_mask));
4611
4612 break;
4613 }
4614 #endif
4615 default:
4616 error = EINVAL;
4617 goto out;
4618 }
4619 wga->wga_peer = wgp;
4620
4621 error = wg_rtable_add_route(wg, wga);
4622 if (error != 0)
4623 goto out;
4624
4625 j++;
4626 }
4627 wgp->wgp_n_allowedips = j;
4628 skip:
4629 *wgpp = wgp;
4630 out:
4631 return error;
4632 }
4633
4634 static int
4635 wg_alloc_prop_buf(char **_buf, struct ifdrv *ifd)
4636 {
4637 int error;
4638 char *buf;
4639
4640 WG_DLOG("buf=%p, len=%zu\n", ifd->ifd_data, ifd->ifd_len);
4641 if (ifd->ifd_len >= WG_MAX_PROPLEN)
4642 return E2BIG;
4643 buf = kmem_alloc(ifd->ifd_len + 1, KM_SLEEP);
4644 error = copyin(ifd->ifd_data, buf, ifd->ifd_len);
4645 if (error != 0)
4646 return error;
4647 buf[ifd->ifd_len] = '\0';
4648 #ifdef WG_DEBUG_DUMP
4649 if (wg_debug & WG_DEBUG_FLAGS_DUMP) {
4650 log(LOG_DEBUG, "%.*s\n", (int)MIN(INT_MAX, ifd->ifd_len),
4651 (const char *)buf);
4652 }
4653 #endif
4654 *_buf = buf;
4655 return 0;
4656 }
4657
4658 static int
4659 wg_ioctl_set_private_key(struct wg_softc *wg, struct ifdrv *ifd)
4660 {
4661 int error;
4662 prop_dictionary_t prop_dict;
4663 char *buf = NULL;
4664 const void *privkey;
4665 size_t privkey_len;
4666
4667 error = wg_alloc_prop_buf(&buf, ifd);
4668 if (error != 0)
4669 return error;
4670 error = EINVAL;
4671 prop_dict = prop_dictionary_internalize(buf);
4672 if (prop_dict == NULL)
4673 goto out;
4674 if (!prop_dictionary_get_data(prop_dict, "private_key",
4675 &privkey, &privkey_len))
4676 goto out;
4677 #ifdef WG_DEBUG_DUMP
4678 if (wg_debug & WG_DEBUG_FLAGS_DUMP) {
4679 char *hex = gethexdump(privkey, privkey_len);
4680 log(LOG_DEBUG, "privkey=%p, privkey_len=%zu\n%s\n",
4681 privkey, privkey_len, hex);
4682 puthexdump(hex, privkey, privkey_len);
4683 }
4684 #endif
4685 if (privkey_len != WG_STATIC_KEY_LEN)
4686 goto out;
4687 memcpy(wg->wg_privkey, privkey, WG_STATIC_KEY_LEN);
4688 wg_calc_pubkey(wg->wg_pubkey, wg->wg_privkey);
4689 error = 0;
4690
4691 out:
4692 kmem_free(buf, ifd->ifd_len + 1);
4693 return error;
4694 }
4695
4696 static int
4697 wg_ioctl_set_listen_port(struct wg_softc *wg, struct ifdrv *ifd)
4698 {
4699 int error;
4700 prop_dictionary_t prop_dict;
4701 char *buf = NULL;
4702 uint16_t port;
4703
4704 error = wg_alloc_prop_buf(&buf, ifd);
4705 if (error != 0)
4706 return error;
4707 error = EINVAL;
4708 prop_dict = prop_dictionary_internalize(buf);
4709 if (prop_dict == NULL)
4710 goto out;
4711 if (!prop_dictionary_get_uint16(prop_dict, "listen_port", &port))
4712 goto out;
4713
4714 error = wg->wg_ops->bind_port(wg, (uint16_t)port);
4715
4716 out:
4717 kmem_free(buf, ifd->ifd_len + 1);
4718 return error;
4719 }
4720
4721 static int
4722 wg_ioctl_add_peer(struct wg_softc *wg, struct ifdrv *ifd)
4723 {
4724 int error;
4725 prop_dictionary_t prop_dict;
4726 char *buf = NULL;
4727 struct wg_peer *wgp = NULL, *wgp0 __diagused;
4728
4729 error = wg_alloc_prop_buf(&buf, ifd);
4730 if (error != 0)
4731 return error;
4732 error = EINVAL;
4733 prop_dict = prop_dictionary_internalize(buf);
4734 if (prop_dict == NULL)
4735 goto out;
4736
4737 error = wg_handle_prop_peer(wg, prop_dict, &wgp);
4738 if (error != 0)
4739 goto out;
4740
4741 mutex_enter(wg->wg_lock);
4742 if (thmap_get(wg->wg_peers_bypubkey, wgp->wgp_pubkey,
4743 sizeof(wgp->wgp_pubkey)) != NULL ||
4744 (wgp->wgp_name[0] &&
4745 thmap_get(wg->wg_peers_byname, wgp->wgp_name,
4746 strlen(wgp->wgp_name)) != NULL)) {
4747 mutex_exit(wg->wg_lock);
4748 wg_destroy_peer(wgp);
4749 error = EEXIST;
4750 goto out;
4751 }
4752 wgp0 = thmap_put(wg->wg_peers_bypubkey, wgp->wgp_pubkey,
4753 sizeof(wgp->wgp_pubkey), wgp);
4754 KASSERT(wgp0 == wgp);
4755 if (wgp->wgp_name[0]) {
4756 wgp0 = thmap_put(wg->wg_peers_byname, wgp->wgp_name,
4757 strlen(wgp->wgp_name), wgp);
4758 KASSERT(wgp0 == wgp);
4759 }
4760 WG_PEER_WRITER_INSERT_HEAD(wgp, wg);
4761 wg->wg_npeers++;
4762 mutex_exit(wg->wg_lock);
4763
4764 if_link_state_change(&wg->wg_if, LINK_STATE_UP);
4765
4766 out:
4767 kmem_free(buf, ifd->ifd_len + 1);
4768 return error;
4769 }
4770
4771 static int
4772 wg_ioctl_delete_peer(struct wg_softc *wg, struct ifdrv *ifd)
4773 {
4774 int error;
4775 prop_dictionary_t prop_dict;
4776 char *buf = NULL;
4777 const char *name;
4778
4779 error = wg_alloc_prop_buf(&buf, ifd);
4780 if (error != 0)
4781 return error;
4782 error = EINVAL;
4783 prop_dict = prop_dictionary_internalize(buf);
4784 if (prop_dict == NULL)
4785 goto out;
4786
4787 if (!prop_dictionary_get_string(prop_dict, "name", &name))
4788 goto out;
4789 if (strlen(name) > WG_PEER_NAME_MAXLEN)
4790 goto out;
4791
4792 error = wg_destroy_peer_name(wg, name);
4793 out:
4794 kmem_free(buf, ifd->ifd_len + 1);
4795 return error;
4796 }
4797
4798 static bool
4799 wg_is_authorized(struct wg_softc *wg, u_long cmd)
4800 {
4801 int au = cmd == SIOCGDRVSPEC ?
4802 KAUTH_REQ_NETWORK_INTERFACE_WG_GETPRIV :
4803 KAUTH_REQ_NETWORK_INTERFACE_WG_SETPRIV;
4804 return kauth_authorize_network(kauth_cred_get(),
4805 KAUTH_NETWORK_INTERFACE_WG, au, &wg->wg_if,
4806 (void *)cmd, NULL) == 0;
4807 }
4808
4809 static int
4810 wg_ioctl_get(struct wg_softc *wg, struct ifdrv *ifd)
4811 {
4812 int error = ENOMEM;
4813 prop_dictionary_t prop_dict;
4814 prop_array_t peers = NULL;
4815 char *buf;
4816 struct wg_peer *wgp;
4817 int s, i;
4818
4819 prop_dict = prop_dictionary_create();
4820 if (prop_dict == NULL)
4821 goto error;
4822
4823 if (wg_is_authorized(wg, SIOCGDRVSPEC)) {
4824 if (!prop_dictionary_set_data(prop_dict, "private_key",
4825 wg->wg_privkey, WG_STATIC_KEY_LEN))
4826 goto error;
4827 }
4828
4829 if (wg->wg_listen_port != 0) {
4830 if (!prop_dictionary_set_uint16(prop_dict, "listen_port",
4831 wg->wg_listen_port))
4832 goto error;
4833 }
4834
4835 if (wg->wg_npeers == 0)
4836 goto skip_peers;
4837
4838 peers = prop_array_create();
4839 if (peers == NULL)
4840 goto error;
4841
4842 s = pserialize_read_enter();
4843 i = 0;
4844 WG_PEER_READER_FOREACH(wgp, wg) {
4845 struct wg_sockaddr *wgsa;
4846 struct psref wgp_psref, wgsa_psref;
4847 prop_dictionary_t prop_peer;
4848
4849 wg_get_peer(wgp, &wgp_psref);
4850 pserialize_read_exit(s);
4851
4852 prop_peer = prop_dictionary_create();
4853 if (prop_peer == NULL)
4854 goto next;
4855
4856 if (strlen(wgp->wgp_name) > 0) {
4857 if (!prop_dictionary_set_string(prop_peer, "name",
4858 wgp->wgp_name))
4859 goto next;
4860 }
4861
4862 if (!prop_dictionary_set_data(prop_peer, "public_key",
4863 wgp->wgp_pubkey, sizeof(wgp->wgp_pubkey)))
4864 goto next;
4865
4866 uint8_t psk_zero[WG_PRESHARED_KEY_LEN] = {0};
4867 if (!consttime_memequal(wgp->wgp_psk, psk_zero,
4868 sizeof(wgp->wgp_psk))) {
4869 if (wg_is_authorized(wg, SIOCGDRVSPEC)) {
4870 if (!prop_dictionary_set_data(prop_peer,
4871 "preshared_key",
4872 wgp->wgp_psk, sizeof(wgp->wgp_psk)))
4873 goto next;
4874 }
4875 }
4876
4877 wgsa = wg_get_endpoint_sa(wgp, &wgsa_psref);
4878 CTASSERT(AF_UNSPEC == 0);
4879 if (wgsa_family(wgsa) != 0 /*AF_UNSPEC*/ &&
4880 !prop_dictionary_set_data(prop_peer, "endpoint",
4881 wgsatoss(wgsa),
4882 sockaddr_getsize_by_family(wgsa_family(wgsa)))) {
4883 wg_put_sa(wgp, wgsa, &wgsa_psref);
4884 goto next;
4885 }
4886 wg_put_sa(wgp, wgsa, &wgsa_psref);
4887
4888 const struct timespec *t = &wgp->wgp_last_handshake_time;
4889
4890 if (!prop_dictionary_set_uint64(prop_peer,
4891 "last_handshake_time_sec", (uint64_t)t->tv_sec))
4892 goto next;
4893 if (!prop_dictionary_set_uint32(prop_peer,
4894 "last_handshake_time_nsec", (uint32_t)t->tv_nsec))
4895 goto next;
4896
4897 if (wgp->wgp_n_allowedips == 0)
4898 goto skip_allowedips;
4899
4900 prop_array_t allowedips = prop_array_create();
4901 if (allowedips == NULL)
4902 goto next;
4903 for (int j = 0; j < wgp->wgp_n_allowedips; j++) {
4904 struct wg_allowedip *wga = &wgp->wgp_allowedips[j];
4905 prop_dictionary_t prop_allowedip;
4906
4907 prop_allowedip = prop_dictionary_create();
4908 if (prop_allowedip == NULL)
4909 break;
4910
4911 if (!prop_dictionary_set_int(prop_allowedip, "family",
4912 wga->wga_family))
4913 goto _next;
4914 if (!prop_dictionary_set_uint8(prop_allowedip, "cidr",
4915 wga->wga_cidr))
4916 goto _next;
4917
4918 switch (wga->wga_family) {
4919 case AF_INET:
4920 if (!prop_dictionary_set_data(prop_allowedip,
4921 "ip", &wga->wga_addr4,
4922 sizeof(wga->wga_addr4)))
4923 goto _next;
4924 break;
4925 #ifdef INET6
4926 case AF_INET6:
4927 if (!prop_dictionary_set_data(prop_allowedip,
4928 "ip", &wga->wga_addr6,
4929 sizeof(wga->wga_addr6)))
4930 goto _next;
4931 break;
4932 #endif
4933 default:
4934 break;
4935 }
4936 prop_array_set(allowedips, j, prop_allowedip);
4937 _next:
4938 prop_object_release(prop_allowedip);
4939 }
4940 prop_dictionary_set(prop_peer, "allowedips", allowedips);
4941 prop_object_release(allowedips);
4942
4943 skip_allowedips:
4944
4945 prop_array_set(peers, i, prop_peer);
4946 next:
4947 if (prop_peer)
4948 prop_object_release(prop_peer);
4949 i++;
4950
4951 s = pserialize_read_enter();
4952 wg_put_peer(wgp, &wgp_psref);
4953 }
4954 pserialize_read_exit(s);
4955
4956 prop_dictionary_set(prop_dict, "peers", peers);
4957 prop_object_release(peers);
4958 peers = NULL;
4959
4960 skip_peers:
4961 buf = prop_dictionary_externalize(prop_dict);
4962 if (buf == NULL)
4963 goto error;
4964 if (ifd->ifd_len < (strlen(buf) + 1)) {
4965 error = EINVAL;
4966 goto error;
4967 }
4968 error = copyout(buf, ifd->ifd_data, strlen(buf) + 1);
4969
4970 free(buf, 0);
4971 error:
4972 if (peers != NULL)
4973 prop_object_release(peers);
4974 if (prop_dict != NULL)
4975 prop_object_release(prop_dict);
4976
4977 return error;
4978 }
4979
4980 static int
4981 wg_ioctl(struct ifnet *ifp, u_long cmd, void *data)
4982 {
4983 struct wg_softc *wg = ifp->if_softc;
4984 struct ifreq *ifr = data;
4985 struct ifaddr *ifa = data;
4986 struct ifdrv *ifd = data;
4987 int error = 0;
4988
4989 switch (cmd) {
4990 case SIOCINITIFADDR:
4991 if (ifa->ifa_addr->sa_family != AF_LINK &&
4992 (ifp->if_flags & (IFF_UP | IFF_RUNNING)) !=
4993 (IFF_UP | IFF_RUNNING)) {
4994 ifp->if_flags |= IFF_UP;
4995 error = if_init(ifp);
4996 }
4997 return error;
4998 case SIOCADDMULTI:
4999 case SIOCDELMULTI:
5000 switch (ifr->ifr_addr.sa_family) {
5001 case AF_INET: /* IP supports Multicast */
5002 break;
5003 #ifdef INET6
5004 case AF_INET6: /* IP6 supports Multicast */
5005 break;
5006 #endif
5007 default: /* Other protocols doesn't support Multicast */
5008 error = EAFNOSUPPORT;
5009 break;
5010 }
5011 return error;
5012 case SIOCSDRVSPEC:
5013 if (!wg_is_authorized(wg, cmd)) {
5014 return EPERM;
5015 }
5016 switch (ifd->ifd_cmd) {
5017 case WG_IOCTL_SET_PRIVATE_KEY:
5018 error = wg_ioctl_set_private_key(wg, ifd);
5019 break;
5020 case WG_IOCTL_SET_LISTEN_PORT:
5021 error = wg_ioctl_set_listen_port(wg, ifd);
5022 break;
5023 case WG_IOCTL_ADD_PEER:
5024 error = wg_ioctl_add_peer(wg, ifd);
5025 break;
5026 case WG_IOCTL_DELETE_PEER:
5027 error = wg_ioctl_delete_peer(wg, ifd);
5028 break;
5029 default:
5030 error = EINVAL;
5031 break;
5032 }
5033 return error;
5034 case SIOCGDRVSPEC:
5035 return wg_ioctl_get(wg, ifd);
5036 case SIOCSIFFLAGS:
5037 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
5038 break;
5039 switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) {
5040 case IFF_RUNNING:
5041 /*
5042 * If interface is marked down and it is running,
5043 * then stop and disable it.
5044 */
5045 if_stop(ifp, 1);
5046 break;
5047 case IFF_UP:
5048 /*
5049 * If interface is marked up and it is stopped, then
5050 * start it.
5051 */
5052 error = if_init(ifp);
5053 break;
5054 default:
5055 break;
5056 }
5057 return error;
5058 #ifdef WG_RUMPKERNEL
5059 case SIOCSLINKSTR:
5060 error = wg_ioctl_linkstr(wg, ifd);
5061 if (error == 0)
5062 wg->wg_ops = &wg_ops_rumpuser;
5063 return error;
5064 #endif
5065 default:
5066 break;
5067 }
5068
5069 error = ifioctl_common(ifp, cmd, data);
5070
5071 #ifdef WG_RUMPKERNEL
5072 if (!wg_user_mode(wg))
5073 return error;
5074
5075 /* Do the same to the corresponding tun device on the host */
5076 /*
5077 * XXX Actually the command has not been handled yet. It
5078 * will be handled via pr_ioctl form doifioctl later.
5079 */
5080 switch (cmd) {
5081 case SIOCAIFADDR:
5082 case SIOCDIFADDR: {
5083 struct in_aliasreq _ifra = *(const struct in_aliasreq *)data;
5084 struct in_aliasreq *ifra = &_ifra;
5085 KASSERT(error == ENOTTY);
5086 strncpy(ifra->ifra_name, rumpuser_wg_get_tunname(wg->wg_user),
5087 IFNAMSIZ);
5088 error = rumpuser_wg_ioctl(wg->wg_user, cmd, ifra, AF_INET);
5089 if (error == 0)
5090 error = ENOTTY;
5091 break;
5092 }
5093 #ifdef INET6
5094 case SIOCAIFADDR_IN6:
5095 case SIOCDIFADDR_IN6: {
5096 struct in6_aliasreq _ifra = *(const struct in6_aliasreq *)data;
5097 struct in6_aliasreq *ifra = &_ifra;
5098 KASSERT(error == ENOTTY);
5099 strncpy(ifra->ifra_name, rumpuser_wg_get_tunname(wg->wg_user),
5100 IFNAMSIZ);
5101 error = rumpuser_wg_ioctl(wg->wg_user, cmd, ifra, AF_INET6);
5102 if (error == 0)
5103 error = ENOTTY;
5104 break;
5105 }
5106 #endif
5107 }
5108 #endif /* WG_RUMPKERNEL */
5109
5110 return error;
5111 }
5112
5113 static int
5114 wg_init(struct ifnet *ifp)
5115 {
5116
5117 ifp->if_flags |= IFF_RUNNING;
5118
5119 /* TODO flush pending packets. */
5120 return 0;
5121 }
5122
5123 #ifdef ALTQ
5124 static void
5125 wg_start(struct ifnet *ifp)
5126 {
5127 struct mbuf *m;
5128
5129 for (;;) {
5130 IFQ_DEQUEUE(&ifp->if_snd, m);
5131 if (m == NULL)
5132 break;
5133
5134 kpreempt_disable();
5135 const uint32_t h = curcpu()->ci_index; // pktq_rps_hash(m)
5136 if (__predict_false(!pktq_enqueue(wg_pktq, m, h))) {
5137 WGLOG(LOG_ERR, "%s: pktq full, dropping\n",
5138 if_name(ifp));
5139 m_freem(m);
5140 }
5141 kpreempt_enable();
5142 }
5143 }
5144 #endif
5145
5146 static void
5147 wg_stop(struct ifnet *ifp, int disable)
5148 {
5149
5150 KASSERT((ifp->if_flags & IFF_RUNNING) != 0);
5151 ifp->if_flags &= ~IFF_RUNNING;
5152
5153 /* Need to do something? */
5154 }
5155
5156 #ifdef WG_DEBUG_PARAMS
5157 SYSCTL_SETUP(sysctl_net_wg_setup, "sysctl net.wg setup")
5158 {
5159 const struct sysctlnode *node = NULL;
5160
5161 sysctl_createv(clog, 0, NULL, &node,
5162 CTLFLAG_PERMANENT,
5163 CTLTYPE_NODE, "wg",
5164 SYSCTL_DESCR("wg(4)"),
5165 NULL, 0, NULL, 0,
5166 CTL_NET, CTL_CREATE, CTL_EOL);
5167 sysctl_createv(clog, 0, &node, NULL,
5168 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
5169 CTLTYPE_QUAD, "rekey_after_messages",
5170 SYSCTL_DESCR("session liftime by messages"),
5171 NULL, 0, &wg_rekey_after_messages, 0, CTL_CREATE, CTL_EOL);
5172 sysctl_createv(clog, 0, &node, NULL,
5173 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
5174 CTLTYPE_INT, "rekey_after_time",
5175 SYSCTL_DESCR("session liftime"),
5176 NULL, 0, &wg_rekey_after_time, 0, CTL_CREATE, CTL_EOL);
5177 sysctl_createv(clog, 0, &node, NULL,
5178 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
5179 CTLTYPE_INT, "rekey_timeout",
5180 SYSCTL_DESCR("session handshake retry time"),
5181 NULL, 0, &wg_rekey_timeout, 0, CTL_CREATE, CTL_EOL);
5182 sysctl_createv(clog, 0, &node, NULL,
5183 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
5184 CTLTYPE_INT, "rekey_attempt_time",
5185 SYSCTL_DESCR("session handshake timeout"),
5186 NULL, 0, &wg_rekey_attempt_time, 0, CTL_CREATE, CTL_EOL);
5187 sysctl_createv(clog, 0, &node, NULL,
5188 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
5189 CTLTYPE_INT, "keepalive_timeout",
5190 SYSCTL_DESCR("keepalive timeout"),
5191 NULL, 0, &wg_keepalive_timeout, 0, CTL_CREATE, CTL_EOL);
5192 sysctl_createv(clog, 0, &node, NULL,
5193 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
5194 CTLTYPE_BOOL, "force_underload",
5195 SYSCTL_DESCR("force to detemine under load"),
5196 NULL, 0, &wg_force_underload, 0, CTL_CREATE, CTL_EOL);
5197 sysctl_createv(clog, 0, &node, NULL,
5198 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
5199 CTLTYPE_INT, "debug",
5200 SYSCTL_DESCR("set debug flags 1=log 2=trace 4=dump 8=packet"),
5201 NULL, 0, &wg_debug, 0, CTL_CREATE, CTL_EOL);
5202 }
5203 #endif
5204
5205 #ifdef WG_RUMPKERNEL
5206 static bool
5207 wg_user_mode(struct wg_softc *wg)
5208 {
5209
5210 return wg->wg_user != NULL;
5211 }
5212
5213 static int
5214 wg_ioctl_linkstr(struct wg_softc *wg, struct ifdrv *ifd)
5215 {
5216 struct ifnet *ifp = &wg->wg_if;
5217 int error;
5218
5219 if (ifp->if_flags & IFF_UP)
5220 return EBUSY;
5221
5222 if (ifd->ifd_cmd == IFLINKSTR_UNSET) {
5223 /* XXX do nothing */
5224 return 0;
5225 } else if (ifd->ifd_cmd != 0) {
5226 return EINVAL;
5227 } else if (wg->wg_user != NULL) {
5228 return EBUSY;
5229 }
5230
5231 /* Assume \0 included */
5232 if (ifd->ifd_len > IFNAMSIZ) {
5233 return E2BIG;
5234 } else if (ifd->ifd_len < 1) {
5235 return EINVAL;
5236 }
5237
5238 char tun_name[IFNAMSIZ];
5239 error = copyinstr(ifd->ifd_data, tun_name, ifd->ifd_len, NULL);
5240 if (error != 0)
5241 return error;
5242
5243 if (strncmp(tun_name, "tun", 3) != 0)
5244 return EINVAL;
5245
5246 error = rumpuser_wg_create(tun_name, wg, &wg->wg_user);
5247
5248 return error;
5249 }
5250
5251 static int
5252 wg_send_user(struct wg_peer *wgp, struct mbuf *m)
5253 {
5254 int error;
5255 struct psref psref;
5256 struct wg_sockaddr *wgsa;
5257 struct wg_softc *wg = wgp->wgp_sc;
5258 struct iovec iov[1];
5259
5260 wgsa = wg_get_endpoint_sa(wgp, &psref);
5261
5262 iov[0].iov_base = mtod(m, void *);
5263 iov[0].iov_len = m->m_len;
5264
5265 /* Send messages to a peer via an ordinary socket. */
5266 error = rumpuser_wg_send_peer(wg->wg_user, wgsatosa(wgsa), iov, 1);
5267
5268 wg_put_sa(wgp, wgsa, &psref);
5269
5270 m_freem(m);
5271
5272 return error;
5273 }
5274
5275 static void
5276 wg_input_user(struct ifnet *ifp, struct mbuf *m, const int af)
5277 {
5278 struct wg_softc *wg = ifp->if_softc;
5279 struct iovec iov[2];
5280 struct sockaddr_storage ss;
5281
5282 KASSERT(af == AF_INET || af == AF_INET6);
5283
5284 WG_TRACE("");
5285
5286 if (af == AF_INET) {
5287 struct sockaddr_in *sin = (struct sockaddr_in *)&ss;
5288 struct ip *ip;
5289
5290 KASSERT(m->m_len >= sizeof(struct ip));
5291 ip = mtod(m, struct ip *);
5292 sockaddr_in_init(sin, &ip->ip_dst, 0);
5293 } else {
5294 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&ss;
5295 struct ip6_hdr *ip6;
5296
5297 KASSERT(m->m_len >= sizeof(struct ip6_hdr));
5298 ip6 = mtod(m, struct ip6_hdr *);
5299 sockaddr_in6_init(sin6, &ip6->ip6_dst, 0, 0, 0);
5300 }
5301
5302 iov[0].iov_base = &ss;
5303 iov[0].iov_len = ss.ss_len;
5304 iov[1].iov_base = mtod(m, void *);
5305 iov[1].iov_len = m->m_len;
5306
5307 WG_DUMP_BUF(iov[1].iov_base, iov[1].iov_len);
5308
5309 /* Send decrypted packets to users via a tun. */
5310 rumpuser_wg_send_user(wg->wg_user, iov, 2);
5311
5312 m_freem(m);
5313 }
5314
5315 static int
5316 wg_bind_port_user(struct wg_softc *wg, const uint16_t port)
5317 {
5318 int error;
5319 uint16_t old_port = wg->wg_listen_port;
5320
5321 if (port != 0 && old_port == port)
5322 return 0;
5323
5324 error = rumpuser_wg_sock_bind(wg->wg_user, port);
5325 if (error == 0)
5326 wg->wg_listen_port = port;
5327 return error;
5328 }
5329
5330 /*
5331 * Receive user packets.
5332 */
5333 void
5334 rumpkern_wg_recv_user(struct wg_softc *wg, struct iovec *iov, size_t iovlen)
5335 {
5336 struct ifnet *ifp = &wg->wg_if;
5337 struct mbuf *m;
5338 const struct sockaddr *dst;
5339
5340 WG_TRACE("");
5341
5342 dst = iov[0].iov_base;
5343
5344 m = m_gethdr(M_DONTWAIT, MT_DATA);
5345 if (m == NULL)
5346 return;
5347 m->m_len = m->m_pkthdr.len = 0;
5348 m_copyback(m, 0, iov[1].iov_len, iov[1].iov_base);
5349
5350 WG_DLOG("iov_len=%zu\n", iov[1].iov_len);
5351 WG_DUMP_BUF(iov[1].iov_base, iov[1].iov_len);
5352
5353 (void)wg_output(ifp, m, dst, NULL);
5354 }
5355
5356 /*
5357 * Receive packets from a peer.
5358 */
5359 void
5360 rumpkern_wg_recv_peer(struct wg_softc *wg, struct iovec *iov, size_t iovlen)
5361 {
5362 struct mbuf *m;
5363 const struct sockaddr *src;
5364 int bound;
5365
5366 WG_TRACE("");
5367
5368 src = iov[0].iov_base;
5369
5370 m = m_gethdr(M_DONTWAIT, MT_DATA);
5371 if (m == NULL)
5372 return;
5373 m->m_len = m->m_pkthdr.len = 0;
5374 m_copyback(m, 0, iov[1].iov_len, iov[1].iov_base);
5375
5376 WG_DLOG("iov_len=%zu\n", iov[1].iov_len);
5377 WG_DUMP_BUF(iov[1].iov_base, iov[1].iov_len);
5378
5379 bound = curlwp_bind();
5380 wg_handle_packet(wg, m, src);
5381 curlwp_bindx(bound);
5382 }
5383 #endif /* WG_RUMPKERNEL */
5384
5385 /*
5386 * Module infrastructure
5387 */
5388 #include "if_module.h"
5389
5390 IF_MODULE(MODULE_CLASS_DRIVER, wg, "sodium,blake2s")
5391