if_wg.c revision 1.81 1 /* $NetBSD: if_wg.c,v 1.81 2024/07/24 20:54:43 christos Exp $ */
2
3 /*
4 * Copyright (C) Ryota Ozaki <ozaki.ryota (at) gmail.com>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the project nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 /*
33 * This network interface aims to implement the WireGuard protocol.
34 * The implementation is based on the paper of WireGuard as of
35 * 2018-06-30 [1]. The paper is referred in the source code with label
36 * [W]. Also the specification of the Noise protocol framework as of
37 * 2018-07-11 [2] is referred with label [N].
38 *
39 * [1] https://www.wireguard.com/papers/wireguard.pdf
40 * [2] http://noiseprotocol.org/noise.pdf
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: if_wg.c,v 1.81 2024/07/24 20:54:43 christos Exp $");
45
46 #ifdef _KERNEL_OPT
47 #include "opt_altq_enabled.h"
48 #include "opt_inet.h"
49 #endif
50
51 #include <sys/param.h>
52 #include <sys/types.h>
53
54 #include <sys/atomic.h>
55 #include <sys/callout.h>
56 #include <sys/cprng.h>
57 #include <sys/cpu.h>
58 #include <sys/device.h>
59 #include <sys/domain.h>
60 #include <sys/errno.h>
61 #include <sys/intr.h>
62 #include <sys/ioctl.h>
63 #include <sys/kernel.h>
64 #include <sys/kmem.h>
65 #include <sys/mbuf.h>
66 #include <sys/module.h>
67 #include <sys/mutex.h>
68 #include <sys/once.h>
69 #include <sys/percpu.h>
70 #include <sys/pserialize.h>
71 #include <sys/psref.h>
72 #include <sys/queue.h>
73 #include <sys/rwlock.h>
74 #include <sys/socket.h>
75 #include <sys/socketvar.h>
76 #include <sys/sockio.h>
77 #include <sys/sysctl.h>
78 #include <sys/syslog.h>
79 #include <sys/systm.h>
80 #include <sys/thmap.h>
81 #include <sys/threadpool.h>
82 #include <sys/time.h>
83 #include <sys/timespec.h>
84 #include <sys/workqueue.h>
85
86 #include <net/bpf.h>
87 #include <net/if.h>
88 #include <net/if_types.h>
89 #include <net/if_wg.h>
90 #include <net/pktqueue.h>
91 #include <net/route.h>
92
93 #include <netinet/in.h>
94 #include <netinet/in_pcb.h>
95 #include <netinet/in_var.h>
96 #include <netinet/ip.h>
97 #include <netinet/ip_var.h>
98 #include <netinet/udp.h>
99 #include <netinet/udp_var.h>
100
101 #ifdef INET6
102 #include <netinet/ip6.h>
103 #include <netinet6/in6_pcb.h>
104 #include <netinet6/in6_var.h>
105 #include <netinet6/ip6_var.h>
106 #include <netinet6/udp6_var.h>
107 #endif /* INET6 */
108
109 #include <prop/proplib.h>
110
111 #include <crypto/blake2/blake2s.h>
112 #include <crypto/sodium/crypto_aead_chacha20poly1305.h>
113 #include <crypto/sodium/crypto_aead_xchacha20poly1305.h>
114 #include <crypto/sodium/crypto_scalarmult.h>
115
116 #include "ioconf.h"
117
118 #ifdef WG_RUMPKERNEL
119 #include "wg_user.h"
120 #endif
121
122 /*
123 * Data structures
124 * - struct wg_softc is an instance of wg interfaces
125 * - It has a list of peers (struct wg_peer)
126 * - It has a threadpool job that sends/receives handshake messages and
127 * runs event handlers
128 * - It has its own two routing tables: one is for IPv4 and the other IPv6
129 * - struct wg_peer is a representative of a peer
130 * - It has a struct work to handle handshakes and timer tasks
131 * - It has a pair of session instances (struct wg_session)
132 * - It has a pair of endpoint instances (struct wg_sockaddr)
133 * - Normally one endpoint is used and the second one is used only on
134 * a peer migration (a change of peer's IP address)
135 * - It has a list of IP addresses and sub networks called allowedips
136 * (struct wg_allowedip)
137 * - A packets sent over a session is allowed if its destination matches
138 * any IP addresses or sub networks of the list
139 * - struct wg_session represents a session of a secure tunnel with a peer
140 * - Two instances of sessions belong to a peer; a stable session and a
141 * unstable session
142 * - A handshake process of a session always starts with a unstable instance
143 * - Once a session is established, its instance becomes stable and the
144 * other becomes unstable instead
145 * - Data messages are always sent via a stable session
146 *
147 * Locking notes:
148 * - Each wg has a mutex(9) wg_lock, and a rwlock(9) wg_rwlock
149 * - Changes to the peer list are serialized by wg_lock
150 * - The peer list may be read with pserialize(9) and psref(9)
151 * - The rwlock (wg_rwlock) protects the routing tables (wg_rtable_ipv[46])
152 * => XXX replace by pserialize when routing table is psz-safe
153 * - Each peer (struct wg_peer, wgp) has a mutex wgp_lock, which can be taken
154 * only in thread context and serializes:
155 * - the stable and unstable session pointers
156 * - all unstable session state
157 * - Packet processing may be done in softint context:
158 * - The stable session can be read under pserialize(9) or psref(9)
159 * - The stable session is always ESTABLISHED
160 * - On a session swap, we must wait for all readers to release a
161 * reference to a stable session before changing wgs_state and
162 * session states
163 * - Lock order: wg_lock -> wgp_lock
164 */
165
166
167 #define WGLOG(level, fmt, args...) \
168 log(level, "%s: " fmt, __func__, ##args)
169
170 // #define WG_DEBUG
171
172 /* Debug options */
173 #ifdef WG_DEBUG
174 /* Output debug logs */
175 #ifndef WG_DEBUG_LOG
176 #define WG_DEBUG_LOG
177 #endif
178 /* Output trace logs */
179 #ifndef WG_DEBUG_TRACE
180 #define WG_DEBUG_TRACE
181 #endif
182 /* Output hash values, etc. */
183 #ifndef WG_DEBUG_DUMP
184 #define WG_DEBUG_DUMP
185 #endif
186 /* Make some internal parameters configurable for testing and debugging */
187 #ifndef WG_DEBUG_PARAMS
188 #define WG_DEBUG_PARAMS
189 #endif
190 int wg_debug;
191 #define WG_DEBUG_FLAGS_LOG 1
192 #define WG_DEBUG_FLAGS_TRACE 2
193 #define WG_DEBUG_FLAGS_DUMP 4
194 #endif
195
196
197 #ifdef WG_DEBUG_TRACE
198 #define WG_TRACE(msg) do { \
199 if (wg_debug & WG_DEBUG_FLAGS_TRACE) \
200 log(LOG_DEBUG, "%s:%d: %s\n", __func__, __LINE__, (msg)); \
201 } while (0)
202 #else
203 #define WG_TRACE(msg) __nothing
204 #endif
205
206 #ifdef WG_DEBUG_LOG
207 #define WG_DLOG(fmt, args...) do { \
208 if (wg_debug & WG_DEBUG_FLAGS_LOG) \
209 log(LOG_DEBUG, "%s: " fmt, __func__, ##args); \
210 } while (0)
211 #else
212 #define WG_DLOG(fmt, args...) __nothing
213 #endif
214
215 #define WG_LOG_RATECHECK(wgprc, level, fmt, args...) do { \
216 if (ppsratecheck(&(wgprc)->wgprc_lasttime, \
217 &(wgprc)->wgprc_curpps, 1)) { \
218 log(level, fmt, ##args); \
219 } \
220 } while (0)
221
222 #ifdef WG_DEBUG_PARAMS
223 static bool wg_force_underload = false;
224 #endif
225
226 #ifdef WG_DEBUG_DUMP
227
228 static char *
229 gethexdump(const char *p, size_t n)
230 {
231 char *buf;
232 size_t i;
233
234 if (n > SIZE_MAX/3 - 1)
235 return NULL;
236 buf = kmem_alloc(3*n + 1, KM_NOSLEEP);
237 if (buf == NULL)
238 return NULL;
239 for (i = 0; i < n; i++)
240 snprintf(buf + 3*i, 3 + 1, " %02hhx", p[i]);
241 return buf;
242 }
243
244 static void
245 puthexdump(char *buf, const void *p, size_t n)
246 {
247
248 if (buf == NULL)
249 return;
250 kmem_free(buf, 3*n + 1);
251 }
252
253 #ifdef WG_RUMPKERNEL
254 static void
255 wg_dump_buf(const char *func, const char *buf, const size_t size)
256 {
257 if ((wg_debug & WG_DEBUG_FLAGS_DUMP) == 0)
258 return;
259
260 char *hex = gethexdump(buf, size);
261
262 log(LOG_DEBUG, "%s: %s\n", func, hex ? hex : "(enomem)");
263 puthexdump(hex, buf, size);
264 }
265 #endif
266
267 static void
268 wg_dump_hash(const uint8_t *func, const uint8_t *name, const uint8_t *hash,
269 const size_t size)
270 {
271 if ((wg_debug & WG_DEBUG_FLAGS_DUMP) == 0)
272 return;
273
274 char *hex = gethexdump(hash, size);
275
276 log(LOG_DEBUG, "%s: %s: %s\n", func, name, hex ? hex : "(enomem)");
277 puthexdump(hex, hash, size);
278 }
279
280 #define WG_DUMP_HASH(name, hash) \
281 wg_dump_hash(__func__, name, hash, WG_HASH_LEN)
282 #define WG_DUMP_HASH48(name, hash) \
283 wg_dump_hash(__func__, name, hash, 48)
284 #define WG_DUMP_BUF(buf, size) \
285 wg_dump_buf(__func__, buf, size)
286 #else
287 #define WG_DUMP_HASH(name, hash) __nothing
288 #define WG_DUMP_HASH48(name, hash) __nothing
289 #define WG_DUMP_BUF(buf, size) __nothing
290 #endif /* WG_DEBUG_DUMP */
291
292 /* chosen somewhat arbitrarily -- fits in signed 16 bits NUL-terminated */
293 #define WG_MAX_PROPLEN 32766
294
295 #define WG_MTU 1420
296 #define WG_ALLOWEDIPS 16
297
298 #define CURVE25519_KEY_LEN 32
299 #define TAI64N_LEN sizeof(uint32_t) * 3
300 #define POLY1305_AUTHTAG_LEN 16
301 #define HMAC_BLOCK_LEN 64
302
303 /* [N] 4.1: "DHLEN must be 32 or greater." WireGuard chooses 32. */
304 /* [N] 4.3: Hash functions */
305 #define NOISE_DHLEN 32
306 /* [N] 4.3: "Must be 32 or 64." WireGuard chooses 32. */
307 #define NOISE_HASHLEN 32
308 #define NOISE_BLOCKLEN 64
309 #define NOISE_HKDF_OUTPUT_LEN NOISE_HASHLEN
310 /* [N] 5.1: "k" */
311 #define NOISE_CIPHER_KEY_LEN 32
312 /*
313 * [N] 9.2: "psk"
314 * "... psk is a 32-byte secret value provided by the application."
315 */
316 #define NOISE_PRESHARED_KEY_LEN 32
317
318 #define WG_STATIC_KEY_LEN CURVE25519_KEY_LEN
319 #define WG_TIMESTAMP_LEN TAI64N_LEN
320
321 #define WG_PRESHARED_KEY_LEN NOISE_PRESHARED_KEY_LEN
322
323 #define WG_COOKIE_LEN 16
324 #define WG_MAC_LEN 16
325 #define WG_RANDVAL_LEN 24
326
327 #define WG_EPHEMERAL_KEY_LEN CURVE25519_KEY_LEN
328 /* [N] 5.2: "ck: A chaining key of HASHLEN bytes" */
329 #define WG_CHAINING_KEY_LEN NOISE_HASHLEN
330 /* [N] 5.2: "h: A hash output of HASHLEN bytes" */
331 #define WG_HASH_LEN NOISE_HASHLEN
332 #define WG_CIPHER_KEY_LEN NOISE_CIPHER_KEY_LEN
333 #define WG_DH_OUTPUT_LEN NOISE_DHLEN
334 #define WG_KDF_OUTPUT_LEN NOISE_HKDF_OUTPUT_LEN
335 #define WG_AUTHTAG_LEN POLY1305_AUTHTAG_LEN
336 #define WG_DATA_KEY_LEN 32
337 #define WG_SALT_LEN 24
338
339 /*
340 * The protocol messages
341 */
342 struct wg_msg {
343 uint32_t wgm_type;
344 } __packed;
345
346 /* [W] 5.4.2 First Message: Initiator to Responder */
347 struct wg_msg_init {
348 uint32_t wgmi_type;
349 uint32_t wgmi_sender;
350 uint8_t wgmi_ephemeral[WG_EPHEMERAL_KEY_LEN];
351 uint8_t wgmi_static[WG_STATIC_KEY_LEN + WG_AUTHTAG_LEN];
352 uint8_t wgmi_timestamp[WG_TIMESTAMP_LEN + WG_AUTHTAG_LEN];
353 uint8_t wgmi_mac1[WG_MAC_LEN];
354 uint8_t wgmi_mac2[WG_MAC_LEN];
355 } __packed;
356
357 /* [W] 5.4.3 Second Message: Responder to Initiator */
358 struct wg_msg_resp {
359 uint32_t wgmr_type;
360 uint32_t wgmr_sender;
361 uint32_t wgmr_receiver;
362 uint8_t wgmr_ephemeral[WG_EPHEMERAL_KEY_LEN];
363 uint8_t wgmr_empty[0 + WG_AUTHTAG_LEN];
364 uint8_t wgmr_mac1[WG_MAC_LEN];
365 uint8_t wgmr_mac2[WG_MAC_LEN];
366 } __packed;
367
368 /* [W] 5.4.6 Subsequent Messages: Transport Data Messages */
369 struct wg_msg_data {
370 uint32_t wgmd_type;
371 uint32_t wgmd_receiver;
372 uint64_t wgmd_counter;
373 uint32_t wgmd_packet[0];
374 } __packed;
375
376 /* [W] 5.4.7 Under Load: Cookie Reply Message */
377 struct wg_msg_cookie {
378 uint32_t wgmc_type;
379 uint32_t wgmc_receiver;
380 uint8_t wgmc_salt[WG_SALT_LEN];
381 uint8_t wgmc_cookie[WG_COOKIE_LEN + WG_AUTHTAG_LEN];
382 } __packed;
383
384 #define WG_MSG_TYPE_INIT 1
385 #define WG_MSG_TYPE_RESP 2
386 #define WG_MSG_TYPE_COOKIE 3
387 #define WG_MSG_TYPE_DATA 4
388 #define WG_MSG_TYPE_MAX WG_MSG_TYPE_DATA
389
390 /* Sliding windows */
391
392 #define SLIWIN_BITS 2048u
393 #define SLIWIN_TYPE uint32_t
394 #define SLIWIN_BPW NBBY*sizeof(SLIWIN_TYPE)
395 #define SLIWIN_WORDS howmany(SLIWIN_BITS, SLIWIN_BPW)
396 #define SLIWIN_NPKT (SLIWIN_BITS - NBBY*sizeof(SLIWIN_TYPE))
397
398 struct sliwin {
399 SLIWIN_TYPE B[SLIWIN_WORDS];
400 uint64_t T;
401 };
402
403 static void
404 sliwin_reset(struct sliwin *W)
405 {
406
407 memset(W, 0, sizeof(*W));
408 }
409
410 static int
411 sliwin_check_fast(const volatile struct sliwin *W, uint64_t S)
412 {
413
414 /*
415 * If it's more than one window older than the highest sequence
416 * number we've seen, reject.
417 */
418 #ifdef __HAVE_ATOMIC64_LOADSTORE
419 if (S + SLIWIN_NPKT < atomic_load_relaxed(&W->T))
420 return EAUTH;
421 #endif
422
423 /*
424 * Otherwise, we need to take the lock to decide, so don't
425 * reject just yet. Caller must serialize a call to
426 * sliwin_update in this case.
427 */
428 return 0;
429 }
430
431 static int
432 sliwin_update(struct sliwin *W, uint64_t S)
433 {
434 unsigned word, bit;
435
436 /*
437 * If it's more than one window older than the highest sequence
438 * number we've seen, reject.
439 */
440 if (S + SLIWIN_NPKT < W->T)
441 return EAUTH;
442
443 /*
444 * If it's higher than the highest sequence number we've seen,
445 * advance the window.
446 */
447 if (S > W->T) {
448 uint64_t i = W->T / SLIWIN_BPW;
449 uint64_t j = S / SLIWIN_BPW;
450 unsigned k;
451
452 for (k = 0; k < MIN(j - i, SLIWIN_WORDS); k++)
453 W->B[(i + k + 1) % SLIWIN_WORDS] = 0;
454 #ifdef __HAVE_ATOMIC64_LOADSTORE
455 atomic_store_relaxed(&W->T, S);
456 #else
457 W->T = S;
458 #endif
459 }
460
461 /* Test and set the bit -- if already set, reject. */
462 word = (S / SLIWIN_BPW) % SLIWIN_WORDS;
463 bit = S % SLIWIN_BPW;
464 if (W->B[word] & (1UL << bit))
465 return EAUTH;
466 W->B[word] |= 1U << bit;
467
468 /* Accept! */
469 return 0;
470 }
471
472 struct wg_session {
473 struct wg_peer *wgs_peer;
474 struct psref_target
475 wgs_psref;
476
477 int wgs_state;
478 #define WGS_STATE_UNKNOWN 0
479 #define WGS_STATE_INIT_ACTIVE 1
480 #define WGS_STATE_INIT_PASSIVE 2
481 #define WGS_STATE_ESTABLISHED 3
482 #define WGS_STATE_DESTROYING 4
483
484 time_t wgs_time_established;
485 time_t wgs_time_last_data_sent;
486 bool wgs_is_initiator;
487
488 uint32_t wgs_local_index;
489 uint32_t wgs_remote_index;
490 #ifdef __HAVE_ATOMIC64_LOADSTORE
491 volatile uint64_t
492 wgs_send_counter;
493 #else
494 kmutex_t wgs_send_counter_lock;
495 uint64_t wgs_send_counter;
496 #endif
497
498 struct {
499 kmutex_t lock;
500 struct sliwin window;
501 } *wgs_recvwin;
502
503 uint8_t wgs_handshake_hash[WG_HASH_LEN];
504 uint8_t wgs_chaining_key[WG_CHAINING_KEY_LEN];
505 uint8_t wgs_ephemeral_key_pub[WG_EPHEMERAL_KEY_LEN];
506 uint8_t wgs_ephemeral_key_priv[WG_EPHEMERAL_KEY_LEN];
507 uint8_t wgs_ephemeral_key_peer[WG_EPHEMERAL_KEY_LEN];
508 uint8_t wgs_tkey_send[WG_DATA_KEY_LEN];
509 uint8_t wgs_tkey_recv[WG_DATA_KEY_LEN];
510 };
511
512 struct wg_sockaddr {
513 union {
514 struct sockaddr_storage _ss;
515 struct sockaddr _sa;
516 struct sockaddr_in _sin;
517 struct sockaddr_in6 _sin6;
518 };
519 struct psref_target wgsa_psref;
520 };
521
522 #define wgsatoss(wgsa) (&(wgsa)->_ss)
523 #define wgsatosa(wgsa) (&(wgsa)->_sa)
524 #define wgsatosin(wgsa) (&(wgsa)->_sin)
525 #define wgsatosin6(wgsa) (&(wgsa)->_sin6)
526
527 #define wgsa_family(wgsa) (wgsatosa(wgsa)->sa_family)
528
529 struct wg_peer;
530 struct wg_allowedip {
531 struct radix_node wga_nodes[2];
532 struct wg_sockaddr _wga_sa_addr;
533 struct wg_sockaddr _wga_sa_mask;
534 #define wga_sa_addr _wga_sa_addr._sa
535 #define wga_sa_mask _wga_sa_mask._sa
536
537 int wga_family;
538 uint8_t wga_cidr;
539 union {
540 struct in_addr _ip4;
541 struct in6_addr _ip6;
542 } wga_addr;
543 #define wga_addr4 wga_addr._ip4
544 #define wga_addr6 wga_addr._ip6
545
546 struct wg_peer *wga_peer;
547 };
548
549 typedef uint8_t wg_timestamp_t[WG_TIMESTAMP_LEN];
550
551 struct wg_ppsratecheck {
552 struct timeval wgprc_lasttime;
553 int wgprc_curpps;
554 };
555
556 struct wg_softc;
557 struct wg_peer {
558 struct wg_softc *wgp_sc;
559 char wgp_name[WG_PEER_NAME_MAXLEN + 1];
560 struct pslist_entry wgp_peerlist_entry;
561 pserialize_t wgp_psz;
562 struct psref_target wgp_psref;
563 kmutex_t *wgp_lock;
564 kmutex_t *wgp_intr_lock;
565
566 uint8_t wgp_pubkey[WG_STATIC_KEY_LEN];
567 struct wg_sockaddr *wgp_endpoint;
568 struct wg_sockaddr *wgp_endpoint0;
569 volatile unsigned wgp_endpoint_changing;
570 bool wgp_endpoint_available;
571
572 /* The preshared key (optional) */
573 uint8_t wgp_psk[WG_PRESHARED_KEY_LEN];
574
575 struct wg_session *wgp_session_stable;
576 struct wg_session *wgp_session_unstable;
577
578 /* first outgoing packet awaiting session initiation */
579 struct mbuf *wgp_pending;
580
581 /* timestamp in big-endian */
582 wg_timestamp_t wgp_timestamp_latest_init;
583
584 struct timespec wgp_last_handshake_time;
585
586 callout_t wgp_rekey_timer;
587 callout_t wgp_handshake_timeout_timer;
588 callout_t wgp_session_dtor_timer;
589
590 time_t wgp_handshake_start_time;
591
592 int wgp_n_allowedips;
593 struct wg_allowedip wgp_allowedips[WG_ALLOWEDIPS];
594
595 time_t wgp_latest_cookie_time;
596 uint8_t wgp_latest_cookie[WG_COOKIE_LEN];
597 uint8_t wgp_last_sent_mac1[WG_MAC_LEN];
598 bool wgp_last_sent_mac1_valid;
599 uint8_t wgp_last_sent_cookie[WG_COOKIE_LEN];
600 bool wgp_last_sent_cookie_valid;
601
602 time_t wgp_last_msg_received_time[WG_MSG_TYPE_MAX];
603
604 time_t wgp_last_genrandval_time;
605 uint32_t wgp_randval;
606
607 struct wg_ppsratecheck wgp_ppsratecheck;
608
609 struct work wgp_work;
610 unsigned int wgp_tasks;
611 #define WGP_TASK_SEND_INIT_MESSAGE __BIT(0)
612 #define WGP_TASK_RETRY_HANDSHAKE __BIT(1)
613 #define WGP_TASK_ESTABLISH_SESSION __BIT(2)
614 #define WGP_TASK_ENDPOINT_CHANGED __BIT(3)
615 #define WGP_TASK_SEND_KEEPALIVE_MESSAGE __BIT(4)
616 #define WGP_TASK_DESTROY_PREV_SESSION __BIT(5)
617 };
618
619 struct wg_ops;
620
621 struct wg_softc {
622 struct ifnet wg_if;
623 LIST_ENTRY(wg_softc) wg_list;
624 kmutex_t *wg_lock;
625 kmutex_t *wg_intr_lock;
626 krwlock_t *wg_rwlock;
627
628 uint8_t wg_privkey[WG_STATIC_KEY_LEN];
629 uint8_t wg_pubkey[WG_STATIC_KEY_LEN];
630
631 int wg_npeers;
632 struct pslist_head wg_peers;
633 struct thmap *wg_peers_bypubkey;
634 struct thmap *wg_peers_byname;
635 struct thmap *wg_sessions_byindex;
636 uint16_t wg_listen_port;
637
638 struct threadpool *wg_threadpool;
639
640 struct threadpool_job wg_job;
641 int wg_upcalls;
642 #define WG_UPCALL_INET __BIT(0)
643 #define WG_UPCALL_INET6 __BIT(1)
644
645 #ifdef INET
646 struct socket *wg_so4;
647 struct radix_node_head *wg_rtable_ipv4;
648 #endif
649 #ifdef INET6
650 struct socket *wg_so6;
651 struct radix_node_head *wg_rtable_ipv6;
652 #endif
653
654 struct wg_ppsratecheck wg_ppsratecheck;
655
656 struct wg_ops *wg_ops;
657
658 #ifdef WG_RUMPKERNEL
659 struct wg_user *wg_user;
660 #endif
661 };
662
663 /* [W] 6.1 Preliminaries */
664 #define WG_REKEY_AFTER_MESSAGES (1ULL << 60)
665 #define WG_REJECT_AFTER_MESSAGES (UINT64_MAX - (1 << 13))
666 #define WG_REKEY_AFTER_TIME 120
667 #define WG_REJECT_AFTER_TIME 180
668 #define WG_REKEY_ATTEMPT_TIME 90
669 #define WG_REKEY_TIMEOUT 5
670 #define WG_KEEPALIVE_TIMEOUT 10
671
672 #define WG_COOKIE_TIME 120
673 #define WG_RANDVAL_TIME (2 * 60)
674
675 static uint64_t wg_rekey_after_messages = WG_REKEY_AFTER_MESSAGES;
676 static uint64_t wg_reject_after_messages = WG_REJECT_AFTER_MESSAGES;
677 static unsigned wg_rekey_after_time = WG_REKEY_AFTER_TIME;
678 static unsigned wg_reject_after_time = WG_REJECT_AFTER_TIME;
679 static unsigned wg_rekey_attempt_time = WG_REKEY_ATTEMPT_TIME;
680 static unsigned wg_rekey_timeout = WG_REKEY_TIMEOUT;
681 static unsigned wg_keepalive_timeout = WG_KEEPALIVE_TIMEOUT;
682
683 static struct mbuf *
684 wg_get_mbuf(size_t, size_t);
685
686 static int wg_send_data_msg(struct wg_peer *, struct wg_session *,
687 struct mbuf *);
688 static int wg_send_cookie_msg(struct wg_softc *, struct wg_peer *,
689 const uint32_t, const uint8_t [WG_MAC_LEN],
690 const struct sockaddr *);
691 static int wg_send_handshake_msg_resp(struct wg_softc *, struct wg_peer *,
692 struct wg_session *, const struct wg_msg_init *);
693 static void wg_send_keepalive_msg(struct wg_peer *, struct wg_session *);
694
695 static struct wg_peer *
696 wg_pick_peer_by_sa(struct wg_softc *, const struct sockaddr *,
697 struct psref *);
698 static struct wg_peer *
699 wg_lookup_peer_by_pubkey(struct wg_softc *,
700 const uint8_t [WG_STATIC_KEY_LEN], struct psref *);
701
702 static struct wg_session *
703 wg_lookup_session_by_index(struct wg_softc *,
704 const uint32_t, struct psref *);
705
706 static void wg_update_endpoint_if_necessary(struct wg_peer *,
707 const struct sockaddr *);
708
709 static void wg_schedule_rekey_timer(struct wg_peer *);
710 static void wg_schedule_session_dtor_timer(struct wg_peer *);
711
712 static bool wg_is_underload(struct wg_softc *, struct wg_peer *, int);
713 static void wg_calculate_keys(struct wg_session *, const bool);
714
715 static void wg_clear_states(struct wg_session *);
716
717 static void wg_get_peer(struct wg_peer *, struct psref *);
718 static void wg_put_peer(struct wg_peer *, struct psref *);
719
720 static int wg_send_so(struct wg_peer *, struct mbuf *);
721 static int wg_send_udp(struct wg_peer *, struct mbuf *);
722 static int wg_output(struct ifnet *, struct mbuf *,
723 const struct sockaddr *, const struct rtentry *);
724 static void wg_input(struct ifnet *, struct mbuf *, const int);
725 static int wg_ioctl(struct ifnet *, u_long, void *);
726 static int wg_bind_port(struct wg_softc *, const uint16_t);
727 static int wg_init(struct ifnet *);
728 #ifdef ALTQ
729 static void wg_start(struct ifnet *);
730 #endif
731 static void wg_stop(struct ifnet *, int);
732
733 static void wg_peer_work(struct work *, void *);
734 static void wg_job(struct threadpool_job *);
735 static void wgintr(void *);
736 static void wg_purge_pending_packets(struct wg_peer *);
737
738 static int wg_clone_create(struct if_clone *, int);
739 static int wg_clone_destroy(struct ifnet *);
740
741 struct wg_ops {
742 int (*send_hs_msg)(struct wg_peer *, struct mbuf *);
743 int (*send_data_msg)(struct wg_peer *, struct mbuf *);
744 void (*input)(struct ifnet *, struct mbuf *, const int);
745 int (*bind_port)(struct wg_softc *, const uint16_t);
746 };
747
748 struct wg_ops wg_ops_rumpkernel = {
749 .send_hs_msg = wg_send_so,
750 .send_data_msg = wg_send_udp,
751 .input = wg_input,
752 .bind_port = wg_bind_port,
753 };
754
755 #ifdef WG_RUMPKERNEL
756 static bool wg_user_mode(struct wg_softc *);
757 static int wg_ioctl_linkstr(struct wg_softc *, struct ifdrv *);
758
759 static int wg_send_user(struct wg_peer *, struct mbuf *);
760 static void wg_input_user(struct ifnet *, struct mbuf *, const int);
761 static int wg_bind_port_user(struct wg_softc *, const uint16_t);
762
763 struct wg_ops wg_ops_rumpuser = {
764 .send_hs_msg = wg_send_user,
765 .send_data_msg = wg_send_user,
766 .input = wg_input_user,
767 .bind_port = wg_bind_port_user,
768 };
769 #endif
770
771 #define WG_PEER_READER_FOREACH(wgp, wg) \
772 PSLIST_READER_FOREACH((wgp), &(wg)->wg_peers, struct wg_peer, \
773 wgp_peerlist_entry)
774 #define WG_PEER_WRITER_FOREACH(wgp, wg) \
775 PSLIST_WRITER_FOREACH((wgp), &(wg)->wg_peers, struct wg_peer, \
776 wgp_peerlist_entry)
777 #define WG_PEER_WRITER_INSERT_HEAD(wgp, wg) \
778 PSLIST_WRITER_INSERT_HEAD(&(wg)->wg_peers, (wgp), wgp_peerlist_entry)
779 #define WG_PEER_WRITER_REMOVE(wgp) \
780 PSLIST_WRITER_REMOVE((wgp), wgp_peerlist_entry)
781
782 struct wg_route {
783 struct radix_node wgr_nodes[2];
784 struct wg_peer *wgr_peer;
785 };
786
787 static struct radix_node_head *
788 wg_rnh(struct wg_softc *wg, const int family)
789 {
790
791 switch (family) {
792 case AF_INET:
793 return wg->wg_rtable_ipv4;
794 #ifdef INET6
795 case AF_INET6:
796 return wg->wg_rtable_ipv6;
797 #endif
798 default:
799 return NULL;
800 }
801 }
802
803
804 /*
805 * Global variables
806 */
807 static volatile unsigned wg_count __cacheline_aligned;
808
809 struct psref_class *wg_psref_class __read_mostly;
810
811 static struct if_clone wg_cloner =
812 IF_CLONE_INITIALIZER("wg", wg_clone_create, wg_clone_destroy);
813
814 static struct pktqueue *wg_pktq __read_mostly;
815 static struct workqueue *wg_wq __read_mostly;
816
817 void wgattach(int);
818 /* ARGSUSED */
819 void
820 wgattach(int count)
821 {
822 /*
823 * Nothing to do here, initialization is handled by the
824 * module initialization code in wginit() below).
825 */
826 }
827
828 static void
829 wginit(void)
830 {
831
832 wg_psref_class = psref_class_create("wg", IPL_SOFTNET);
833
834 if_clone_attach(&wg_cloner);
835 }
836
837 /*
838 * XXX Kludge: This should just happen in wginit, but workqueue_create
839 * cannot be run until after CPUs have been detected, and wginit runs
840 * before configure.
841 */
842 static int
843 wginitqueues(void)
844 {
845 int error __diagused;
846
847 wg_pktq = pktq_create(IFQ_MAXLEN, wgintr, NULL);
848 KASSERT(wg_pktq != NULL);
849
850 error = workqueue_create(&wg_wq, "wgpeer", wg_peer_work, NULL,
851 PRI_NONE, IPL_SOFTNET, WQ_MPSAFE|WQ_PERCPU);
852 KASSERT(error == 0);
853
854 return 0;
855 }
856
857 static void
858 wg_guarantee_initialized(void)
859 {
860 static ONCE_DECL(init);
861 int error __diagused;
862
863 error = RUN_ONCE(&init, wginitqueues);
864 KASSERT(error == 0);
865 }
866
867 static int
868 wg_count_inc(void)
869 {
870 unsigned o, n;
871
872 do {
873 o = atomic_load_relaxed(&wg_count);
874 if (o == UINT_MAX)
875 return ENFILE;
876 n = o + 1;
877 } while (atomic_cas_uint(&wg_count, o, n) != o);
878
879 return 0;
880 }
881
882 static void
883 wg_count_dec(void)
884 {
885 unsigned c __diagused;
886
887 c = atomic_dec_uint_nv(&wg_count);
888 KASSERT(c != UINT_MAX);
889 }
890
891 static int
892 wgdetach(void)
893 {
894
895 /* Prevent new interface creation. */
896 if_clone_detach(&wg_cloner);
897
898 /* Check whether there are any existing interfaces. */
899 if (atomic_load_relaxed(&wg_count)) {
900 /* Back out -- reattach the cloner. */
901 if_clone_attach(&wg_cloner);
902 return EBUSY;
903 }
904
905 /* No interfaces left. Nuke it. */
906 workqueue_destroy(wg_wq);
907 pktq_destroy(wg_pktq);
908 psref_class_destroy(wg_psref_class);
909
910 return 0;
911 }
912
913 static void
914 wg_init_key_and_hash(uint8_t ckey[WG_CHAINING_KEY_LEN],
915 uint8_t hash[WG_HASH_LEN])
916 {
917 /* [W] 5.4: CONSTRUCTION */
918 const char *signature = "Noise_IKpsk2_25519_ChaChaPoly_BLAKE2s";
919 /* [W] 5.4: IDENTIFIER */
920 const char *id = "WireGuard v1 zx2c4 Jason (at) zx2c4.com";
921 struct blake2s state;
922
923 blake2s(ckey, WG_CHAINING_KEY_LEN, NULL, 0,
924 signature, strlen(signature));
925
926 CTASSERT(WG_HASH_LEN == WG_CHAINING_KEY_LEN);
927 memcpy(hash, ckey, WG_CHAINING_KEY_LEN);
928
929 blake2s_init(&state, WG_HASH_LEN, NULL, 0);
930 blake2s_update(&state, ckey, WG_CHAINING_KEY_LEN);
931 blake2s_update(&state, id, strlen(id));
932 blake2s_final(&state, hash);
933
934 WG_DUMP_HASH("ckey", ckey);
935 WG_DUMP_HASH("hash", hash);
936 }
937
938 static void
939 wg_algo_hash(uint8_t hash[WG_HASH_LEN], const uint8_t input[],
940 const size_t inputsize)
941 {
942 struct blake2s state;
943
944 blake2s_init(&state, WG_HASH_LEN, NULL, 0);
945 blake2s_update(&state, hash, WG_HASH_LEN);
946 blake2s_update(&state, input, inputsize);
947 blake2s_final(&state, hash);
948 }
949
950 static void
951 wg_algo_mac(uint8_t out[], const size_t outsize,
952 const uint8_t key[], const size_t keylen,
953 const uint8_t input1[], const size_t input1len,
954 const uint8_t input2[], const size_t input2len)
955 {
956 struct blake2s state;
957
958 blake2s_init(&state, outsize, key, keylen);
959
960 blake2s_update(&state, input1, input1len);
961 if (input2 != NULL)
962 blake2s_update(&state, input2, input2len);
963 blake2s_final(&state, out);
964 }
965
966 static void
967 wg_algo_mac_mac1(uint8_t out[], const size_t outsize,
968 const uint8_t input1[], const size_t input1len,
969 const uint8_t input2[], const size_t input2len)
970 {
971 struct blake2s state;
972 /* [W] 5.4: LABEL-MAC1 */
973 const char *label = "mac1----";
974 uint8_t key[WG_HASH_LEN];
975
976 blake2s_init(&state, sizeof(key), NULL, 0);
977 blake2s_update(&state, label, strlen(label));
978 blake2s_update(&state, input1, input1len);
979 blake2s_final(&state, key);
980
981 blake2s_init(&state, outsize, key, sizeof(key));
982 if (input2 != NULL)
983 blake2s_update(&state, input2, input2len);
984 blake2s_final(&state, out);
985 }
986
987 static void
988 wg_algo_mac_cookie(uint8_t out[], const size_t outsize,
989 const uint8_t input1[], const size_t input1len)
990 {
991 struct blake2s state;
992 /* [W] 5.4: LABEL-COOKIE */
993 const char *label = "cookie--";
994
995 blake2s_init(&state, outsize, NULL, 0);
996 blake2s_update(&state, label, strlen(label));
997 blake2s_update(&state, input1, input1len);
998 blake2s_final(&state, out);
999 }
1000
1001 static void
1002 wg_algo_generate_keypair(uint8_t pubkey[WG_EPHEMERAL_KEY_LEN],
1003 uint8_t privkey[WG_EPHEMERAL_KEY_LEN])
1004 {
1005
1006 CTASSERT(WG_EPHEMERAL_KEY_LEN == crypto_scalarmult_curve25519_BYTES);
1007
1008 cprng_strong(kern_cprng, privkey, WG_EPHEMERAL_KEY_LEN, 0);
1009 crypto_scalarmult_base(pubkey, privkey);
1010 }
1011
1012 static void
1013 wg_algo_dh(uint8_t out[WG_DH_OUTPUT_LEN],
1014 const uint8_t privkey[WG_STATIC_KEY_LEN],
1015 const uint8_t pubkey[WG_STATIC_KEY_LEN])
1016 {
1017
1018 CTASSERT(WG_STATIC_KEY_LEN == crypto_scalarmult_curve25519_BYTES);
1019
1020 int ret __diagused = crypto_scalarmult(out, privkey, pubkey);
1021 KASSERT(ret == 0);
1022 }
1023
1024 static void
1025 wg_algo_hmac(uint8_t out[], const size_t outlen,
1026 const uint8_t key[], const size_t keylen,
1027 const uint8_t in[], const size_t inlen)
1028 {
1029 #define IPAD 0x36
1030 #define OPAD 0x5c
1031 uint8_t hmackey[HMAC_BLOCK_LEN] = {0};
1032 uint8_t ipad[HMAC_BLOCK_LEN];
1033 uint8_t opad[HMAC_BLOCK_LEN];
1034 size_t i;
1035 struct blake2s state;
1036
1037 KASSERT(outlen == WG_HASH_LEN);
1038 KASSERT(keylen <= HMAC_BLOCK_LEN);
1039
1040 memcpy(hmackey, key, keylen);
1041
1042 for (i = 0; i < sizeof(hmackey); i++) {
1043 ipad[i] = hmackey[i] ^ IPAD;
1044 opad[i] = hmackey[i] ^ OPAD;
1045 }
1046
1047 blake2s_init(&state, WG_HASH_LEN, NULL, 0);
1048 blake2s_update(&state, ipad, sizeof(ipad));
1049 blake2s_update(&state, in, inlen);
1050 blake2s_final(&state, out);
1051
1052 blake2s_init(&state, WG_HASH_LEN, NULL, 0);
1053 blake2s_update(&state, opad, sizeof(opad));
1054 blake2s_update(&state, out, WG_HASH_LEN);
1055 blake2s_final(&state, out);
1056 #undef IPAD
1057 #undef OPAD
1058 }
1059
1060 static void
1061 wg_algo_kdf(uint8_t out1[WG_KDF_OUTPUT_LEN], uint8_t out2[WG_KDF_OUTPUT_LEN],
1062 uint8_t out3[WG_KDF_OUTPUT_LEN], const uint8_t ckey[WG_CHAINING_KEY_LEN],
1063 const uint8_t input[], const size_t inputlen)
1064 {
1065 uint8_t tmp1[WG_KDF_OUTPUT_LEN], tmp2[WG_KDF_OUTPUT_LEN + 1];
1066 uint8_t one[1];
1067
1068 /*
1069 * [N] 4.3: "an input_key_material byte sequence with length
1070 * either zero bytes, 32 bytes, or DHLEN bytes."
1071 */
1072 KASSERT(inputlen == 0 || inputlen == 32 || inputlen == NOISE_DHLEN);
1073
1074 WG_DUMP_HASH("ckey", ckey);
1075 if (input != NULL)
1076 WG_DUMP_HASH("input", input);
1077 wg_algo_hmac(tmp1, sizeof(tmp1), ckey, WG_CHAINING_KEY_LEN,
1078 input, inputlen);
1079 WG_DUMP_HASH("tmp1", tmp1);
1080 one[0] = 1;
1081 wg_algo_hmac(out1, WG_KDF_OUTPUT_LEN, tmp1, sizeof(tmp1),
1082 one, sizeof(one));
1083 WG_DUMP_HASH("out1", out1);
1084 if (out2 == NULL)
1085 return;
1086 memcpy(tmp2, out1, WG_KDF_OUTPUT_LEN);
1087 tmp2[WG_KDF_OUTPUT_LEN] = 2;
1088 wg_algo_hmac(out2, WG_KDF_OUTPUT_LEN, tmp1, sizeof(tmp1),
1089 tmp2, sizeof(tmp2));
1090 WG_DUMP_HASH("out2", out2);
1091 if (out3 == NULL)
1092 return;
1093 memcpy(tmp2, out2, WG_KDF_OUTPUT_LEN);
1094 tmp2[WG_KDF_OUTPUT_LEN] = 3;
1095 wg_algo_hmac(out3, WG_KDF_OUTPUT_LEN, tmp1, sizeof(tmp1),
1096 tmp2, sizeof(tmp2));
1097 WG_DUMP_HASH("out3", out3);
1098 }
1099
1100 static void __noinline
1101 wg_algo_dh_kdf(uint8_t ckey[WG_CHAINING_KEY_LEN],
1102 uint8_t cipher_key[WG_CIPHER_KEY_LEN],
1103 const uint8_t local_key[WG_STATIC_KEY_LEN],
1104 const uint8_t remote_key[WG_STATIC_KEY_LEN])
1105 {
1106 uint8_t dhout[WG_DH_OUTPUT_LEN];
1107
1108 wg_algo_dh(dhout, local_key, remote_key);
1109 wg_algo_kdf(ckey, cipher_key, NULL, ckey, dhout, sizeof(dhout));
1110
1111 WG_DUMP_HASH("dhout", dhout);
1112 WG_DUMP_HASH("ckey", ckey);
1113 if (cipher_key != NULL)
1114 WG_DUMP_HASH("cipher_key", cipher_key);
1115 }
1116
1117 static void
1118 wg_algo_aead_enc(uint8_t out[], size_t expected_outsize, const uint8_t key[],
1119 const uint64_t counter, const uint8_t plain[], const size_t plainsize,
1120 const uint8_t auth[], size_t authlen)
1121 {
1122 uint8_t nonce[(32 + 64) / 8] = {0};
1123 long long unsigned int outsize;
1124 int error __diagused;
1125
1126 le64enc(&nonce[4], counter);
1127
1128 error = crypto_aead_chacha20poly1305_ietf_encrypt(out, &outsize, plain,
1129 plainsize, auth, authlen, NULL, nonce, key);
1130 KASSERT(error == 0);
1131 KASSERT(outsize == expected_outsize);
1132 }
1133
1134 static int
1135 wg_algo_aead_dec(uint8_t out[], size_t expected_outsize, const uint8_t key[],
1136 const uint64_t counter, const uint8_t encrypted[],
1137 const size_t encryptedsize, const uint8_t auth[], size_t authlen)
1138 {
1139 uint8_t nonce[(32 + 64) / 8] = {0};
1140 long long unsigned int outsize;
1141 int error;
1142
1143 le64enc(&nonce[4], counter);
1144
1145 error = crypto_aead_chacha20poly1305_ietf_decrypt(out, &outsize, NULL,
1146 encrypted, encryptedsize, auth, authlen, nonce, key);
1147 if (error == 0)
1148 KASSERT(outsize == expected_outsize);
1149 return error;
1150 }
1151
1152 static void
1153 wg_algo_xaead_enc(uint8_t out[], const size_t expected_outsize,
1154 const uint8_t key[], const uint8_t plain[], const size_t plainsize,
1155 const uint8_t auth[], size_t authlen,
1156 const uint8_t nonce[WG_SALT_LEN])
1157 {
1158 long long unsigned int outsize;
1159 int error __diagused;
1160
1161 CTASSERT(WG_SALT_LEN == crypto_aead_xchacha20poly1305_ietf_NPUBBYTES);
1162 error = crypto_aead_xchacha20poly1305_ietf_encrypt(out, &outsize,
1163 plain, plainsize, auth, authlen, NULL, nonce, key);
1164 KASSERT(error == 0);
1165 KASSERT(outsize == expected_outsize);
1166 }
1167
1168 static int
1169 wg_algo_xaead_dec(uint8_t out[], const size_t expected_outsize,
1170 const uint8_t key[], const uint8_t encrypted[], const size_t encryptedsize,
1171 const uint8_t auth[], size_t authlen,
1172 const uint8_t nonce[WG_SALT_LEN])
1173 {
1174 long long unsigned int outsize;
1175 int error;
1176
1177 error = crypto_aead_xchacha20poly1305_ietf_decrypt(out, &outsize, NULL,
1178 encrypted, encryptedsize, auth, authlen, nonce, key);
1179 if (error == 0)
1180 KASSERT(outsize == expected_outsize);
1181 return error;
1182 }
1183
1184 static void
1185 wg_algo_tai64n(wg_timestamp_t timestamp)
1186 {
1187 struct timespec ts;
1188
1189 /* FIXME strict TAI64N (https://cr.yp.to/libtai/tai64.html) */
1190 getnanotime(&ts);
1191 /* TAI64 label in external TAI64 format */
1192 be32enc(timestamp, 0x40000000U + (uint32_t)(ts.tv_sec >> 32));
1193 /* second beginning from 1970 TAI */
1194 be32enc(timestamp + 4, (uint32_t)(ts.tv_sec & 0xffffffffU));
1195 /* nanosecond in big-endian format */
1196 be32enc(timestamp + 8, (uint32_t)ts.tv_nsec);
1197 }
1198
1199 /*
1200 * wg_get_stable_session(wgp, psref)
1201 *
1202 * Get a passive reference to the current stable session, or
1203 * return NULL if there is no current stable session.
1204 *
1205 * The pointer is always there but the session is not necessarily
1206 * ESTABLISHED; if it is not ESTABLISHED, return NULL. However,
1207 * the session may transition from ESTABLISHED to DESTROYING while
1208 * holding the passive reference.
1209 */
1210 static struct wg_session *
1211 wg_get_stable_session(struct wg_peer *wgp, struct psref *psref)
1212 {
1213 int s;
1214 struct wg_session *wgs;
1215
1216 s = pserialize_read_enter();
1217 wgs = atomic_load_consume(&wgp->wgp_session_stable);
1218 if (__predict_false(wgs->wgs_state != WGS_STATE_ESTABLISHED))
1219 wgs = NULL;
1220 else
1221 psref_acquire(psref, &wgs->wgs_psref, wg_psref_class);
1222 pserialize_read_exit(s);
1223
1224 return wgs;
1225 }
1226
1227 static void
1228 wg_put_session(struct wg_session *wgs, struct psref *psref)
1229 {
1230
1231 psref_release(psref, &wgs->wgs_psref, wg_psref_class);
1232 }
1233
1234 static void
1235 wg_destroy_session(struct wg_softc *wg, struct wg_session *wgs)
1236 {
1237 struct wg_peer *wgp = wgs->wgs_peer;
1238 struct wg_session *wgs0 __diagused;
1239 void *garbage;
1240
1241 KASSERT(mutex_owned(wgp->wgp_lock));
1242 KASSERT(wgs->wgs_state != WGS_STATE_UNKNOWN);
1243
1244 /* Remove the session from the table. */
1245 wgs0 = thmap_del(wg->wg_sessions_byindex,
1246 &wgs->wgs_local_index, sizeof(wgs->wgs_local_index));
1247 KASSERT(wgs0 == wgs);
1248 garbage = thmap_stage_gc(wg->wg_sessions_byindex);
1249
1250 /* Wait for passive references to drain. */
1251 pserialize_perform(wgp->wgp_psz);
1252 psref_target_destroy(&wgs->wgs_psref, wg_psref_class);
1253
1254 /* Free memory, zero state, and transition to UNKNOWN. */
1255 thmap_gc(wg->wg_sessions_byindex, garbage);
1256 wg_clear_states(wgs);
1257 wgs->wgs_state = WGS_STATE_UNKNOWN;
1258 }
1259
1260 /*
1261 * wg_get_session_index(wg, wgs)
1262 *
1263 * Choose a session index for wgs->wgs_local_index, and store it
1264 * in wg's table of sessions by index.
1265 *
1266 * wgs must be the unstable session of its peer, and must be
1267 * transitioning out of the UNKNOWN state.
1268 */
1269 static void
1270 wg_get_session_index(struct wg_softc *wg, struct wg_session *wgs)
1271 {
1272 struct wg_peer *wgp __diagused = wgs->wgs_peer;
1273 struct wg_session *wgs0;
1274 uint32_t index;
1275
1276 KASSERT(mutex_owned(wgp->wgp_lock));
1277 KASSERT(wgs == wgp->wgp_session_unstable);
1278 KASSERT(wgs->wgs_state == WGS_STATE_UNKNOWN);
1279
1280 do {
1281 /* Pick a uniform random index. */
1282 index = cprng_strong32();
1283
1284 /* Try to take it. */
1285 wgs->wgs_local_index = index;
1286 wgs0 = thmap_put(wg->wg_sessions_byindex,
1287 &wgs->wgs_local_index, sizeof wgs->wgs_local_index, wgs);
1288
1289 /* If someone else beat us, start over. */
1290 } while (__predict_false(wgs0 != wgs));
1291 }
1292
1293 /*
1294 * wg_put_session_index(wg, wgs)
1295 *
1296 * Remove wgs from the table of sessions by index, wait for any
1297 * passive references to drain, and transition the session to the
1298 * UNKNOWN state.
1299 *
1300 * wgs must be the unstable session of its peer, and must not be
1301 * UNKNOWN or ESTABLISHED.
1302 */
1303 static void
1304 wg_put_session_index(struct wg_softc *wg, struct wg_session *wgs)
1305 {
1306 struct wg_peer *wgp __diagused = wgs->wgs_peer;
1307
1308 KASSERT(mutex_owned(wgp->wgp_lock));
1309 KASSERT(wgs == wgp->wgp_session_unstable);
1310 KASSERT(wgs->wgs_state != WGS_STATE_UNKNOWN);
1311 KASSERT(wgs->wgs_state != WGS_STATE_ESTABLISHED);
1312
1313 wg_destroy_session(wg, wgs);
1314 psref_target_init(&wgs->wgs_psref, wg_psref_class);
1315 }
1316
1317 /*
1318 * Handshake patterns
1319 *
1320 * [W] 5: "These messages use the "IK" pattern from Noise"
1321 * [N] 7.5. Interactive handshake patterns (fundamental)
1322 * "The first character refers to the initiators static key:"
1323 * "I = Static key for initiator Immediately transmitted to responder,
1324 * despite reduced or absent identity hiding"
1325 * "The second character refers to the responders static key:"
1326 * "K = Static key for responder Known to initiator"
1327 * "IK:
1328 * <- s
1329 * ...
1330 * -> e, es, s, ss
1331 * <- e, ee, se"
1332 * [N] 9.4. Pattern modifiers
1333 * "IKpsk2:
1334 * <- s
1335 * ...
1336 * -> e, es, s, ss
1337 * <- e, ee, se, psk"
1338 */
1339 static void
1340 wg_fill_msg_init(struct wg_softc *wg, struct wg_peer *wgp,
1341 struct wg_session *wgs, struct wg_msg_init *wgmi)
1342 {
1343 uint8_t ckey[WG_CHAINING_KEY_LEN]; /* [W] 5.4.2: Ci */
1344 uint8_t hash[WG_HASH_LEN]; /* [W] 5.4.2: Hi */
1345 uint8_t cipher_key[WG_CIPHER_KEY_LEN];
1346 uint8_t pubkey[WG_EPHEMERAL_KEY_LEN];
1347 uint8_t privkey[WG_EPHEMERAL_KEY_LEN];
1348
1349 KASSERT(mutex_owned(wgp->wgp_lock));
1350 KASSERT(wgs == wgp->wgp_session_unstable);
1351 KASSERT(wgs->wgs_state == WGS_STATE_INIT_ACTIVE);
1352
1353 wgmi->wgmi_type = htole32(WG_MSG_TYPE_INIT);
1354 wgmi->wgmi_sender = wgs->wgs_local_index;
1355
1356 /* [W] 5.4.2: First Message: Initiator to Responder */
1357
1358 /* Ci := HASH(CONSTRUCTION) */
1359 /* Hi := HASH(Ci || IDENTIFIER) */
1360 wg_init_key_and_hash(ckey, hash);
1361 /* Hi := HASH(Hi || Sr^pub) */
1362 wg_algo_hash(hash, wgp->wgp_pubkey, sizeof(wgp->wgp_pubkey));
1363
1364 WG_DUMP_HASH("hash", hash);
1365
1366 /* [N] 2.2: "e" */
1367 /* Ei^priv, Ei^pub := DH-GENERATE() */
1368 wg_algo_generate_keypair(pubkey, privkey);
1369 /* Ci := KDF1(Ci, Ei^pub) */
1370 wg_algo_kdf(ckey, NULL, NULL, ckey, pubkey, sizeof(pubkey));
1371 /* msg.ephemeral := Ei^pub */
1372 memcpy(wgmi->wgmi_ephemeral, pubkey, sizeof(wgmi->wgmi_ephemeral));
1373 /* Hi := HASH(Hi || msg.ephemeral) */
1374 wg_algo_hash(hash, pubkey, sizeof(pubkey));
1375
1376 WG_DUMP_HASH("ckey", ckey);
1377 WG_DUMP_HASH("hash", hash);
1378
1379 /* [N] 2.2: "es" */
1380 /* Ci, k := KDF2(Ci, DH(Ei^priv, Sr^pub)) */
1381 wg_algo_dh_kdf(ckey, cipher_key, privkey, wgp->wgp_pubkey);
1382
1383 /* [N] 2.2: "s" */
1384 /* msg.static := AEAD(k, 0, Si^pub, Hi) */
1385 wg_algo_aead_enc(wgmi->wgmi_static, sizeof(wgmi->wgmi_static),
1386 cipher_key, 0, wg->wg_pubkey, sizeof(wg->wg_pubkey),
1387 hash, sizeof(hash));
1388 /* Hi := HASH(Hi || msg.static) */
1389 wg_algo_hash(hash, wgmi->wgmi_static, sizeof(wgmi->wgmi_static));
1390
1391 WG_DUMP_HASH48("wgmi_static", wgmi->wgmi_static);
1392
1393 /* [N] 2.2: "ss" */
1394 /* Ci, k := KDF2(Ci, DH(Si^priv, Sr^pub)) */
1395 wg_algo_dh_kdf(ckey, cipher_key, wg->wg_privkey, wgp->wgp_pubkey);
1396
1397 /* msg.timestamp := AEAD(k, TIMESTAMP(), Hi) */
1398 wg_timestamp_t timestamp;
1399 wg_algo_tai64n(timestamp);
1400 wg_algo_aead_enc(wgmi->wgmi_timestamp, sizeof(wgmi->wgmi_timestamp),
1401 cipher_key, 0, timestamp, sizeof(timestamp), hash, sizeof(hash));
1402 /* Hi := HASH(Hi || msg.timestamp) */
1403 wg_algo_hash(hash, wgmi->wgmi_timestamp, sizeof(wgmi->wgmi_timestamp));
1404
1405 /* [W] 5.4.4 Cookie MACs */
1406 wg_algo_mac_mac1(wgmi->wgmi_mac1, sizeof(wgmi->wgmi_mac1),
1407 wgp->wgp_pubkey, sizeof(wgp->wgp_pubkey),
1408 (const uint8_t *)wgmi, offsetof(struct wg_msg_init, wgmi_mac1));
1409 /* Need mac1 to decrypt a cookie from a cookie message */
1410 memcpy(wgp->wgp_last_sent_mac1, wgmi->wgmi_mac1,
1411 sizeof(wgp->wgp_last_sent_mac1));
1412 wgp->wgp_last_sent_mac1_valid = true;
1413
1414 if (wgp->wgp_latest_cookie_time == 0 ||
1415 (time_uptime - wgp->wgp_latest_cookie_time) >= WG_COOKIE_TIME)
1416 memset(wgmi->wgmi_mac2, 0, sizeof(wgmi->wgmi_mac2));
1417 else {
1418 wg_algo_mac(wgmi->wgmi_mac2, sizeof(wgmi->wgmi_mac2),
1419 wgp->wgp_latest_cookie, WG_COOKIE_LEN,
1420 (const uint8_t *)wgmi,
1421 offsetof(struct wg_msg_init, wgmi_mac2),
1422 NULL, 0);
1423 }
1424
1425 memcpy(wgs->wgs_ephemeral_key_pub, pubkey, sizeof(pubkey));
1426 memcpy(wgs->wgs_ephemeral_key_priv, privkey, sizeof(privkey));
1427 memcpy(wgs->wgs_handshake_hash, hash, sizeof(hash));
1428 memcpy(wgs->wgs_chaining_key, ckey, sizeof(ckey));
1429 WG_DLOG("%s: sender=%x\n", __func__, wgs->wgs_local_index);
1430 }
1431
1432 static void __noinline
1433 wg_handle_msg_init(struct wg_softc *wg, const struct wg_msg_init *wgmi,
1434 const struct sockaddr *src)
1435 {
1436 uint8_t ckey[WG_CHAINING_KEY_LEN]; /* [W] 5.4.2: Ci */
1437 uint8_t hash[WG_HASH_LEN]; /* [W] 5.4.2: Hi */
1438 uint8_t cipher_key[WG_CIPHER_KEY_LEN];
1439 uint8_t peer_pubkey[WG_STATIC_KEY_LEN];
1440 struct wg_peer *wgp;
1441 struct wg_session *wgs;
1442 int error, ret;
1443 struct psref psref_peer;
1444 uint8_t mac1[WG_MAC_LEN];
1445
1446 WG_TRACE("init msg received");
1447
1448 wg_algo_mac_mac1(mac1, sizeof(mac1),
1449 wg->wg_pubkey, sizeof(wg->wg_pubkey),
1450 (const uint8_t *)wgmi, offsetof(struct wg_msg_init, wgmi_mac1));
1451
1452 /*
1453 * [W] 5.3: Denial of Service Mitigation & Cookies
1454 * "the responder, ..., must always reject messages with an invalid
1455 * msg.mac1"
1456 */
1457 if (!consttime_memequal(mac1, wgmi->wgmi_mac1, sizeof(mac1))) {
1458 WG_DLOG("mac1 is invalid\n");
1459 return;
1460 }
1461
1462 /*
1463 * [W] 5.4.2: First Message: Initiator to Responder
1464 * "When the responder receives this message, it does the same
1465 * operations so that its final state variables are identical,
1466 * replacing the operands of the DH function to produce equivalent
1467 * values."
1468 * Note that the following comments of operations are just copies of
1469 * the initiator's ones.
1470 */
1471
1472 /* Ci := HASH(CONSTRUCTION) */
1473 /* Hi := HASH(Ci || IDENTIFIER) */
1474 wg_init_key_and_hash(ckey, hash);
1475 /* Hi := HASH(Hi || Sr^pub) */
1476 wg_algo_hash(hash, wg->wg_pubkey, sizeof(wg->wg_pubkey));
1477
1478 /* [N] 2.2: "e" */
1479 /* Ci := KDF1(Ci, Ei^pub) */
1480 wg_algo_kdf(ckey, NULL, NULL, ckey, wgmi->wgmi_ephemeral,
1481 sizeof(wgmi->wgmi_ephemeral));
1482 /* Hi := HASH(Hi || msg.ephemeral) */
1483 wg_algo_hash(hash, wgmi->wgmi_ephemeral, sizeof(wgmi->wgmi_ephemeral));
1484
1485 WG_DUMP_HASH("ckey", ckey);
1486
1487 /* [N] 2.2: "es" */
1488 /* Ci, k := KDF2(Ci, DH(Ei^priv, Sr^pub)) */
1489 wg_algo_dh_kdf(ckey, cipher_key, wg->wg_privkey, wgmi->wgmi_ephemeral);
1490
1491 WG_DUMP_HASH48("wgmi_static", wgmi->wgmi_static);
1492
1493 /* [N] 2.2: "s" */
1494 /* msg.static := AEAD(k, 0, Si^pub, Hi) */
1495 error = wg_algo_aead_dec(peer_pubkey, WG_STATIC_KEY_LEN, cipher_key, 0,
1496 wgmi->wgmi_static, sizeof(wgmi->wgmi_static), hash, sizeof(hash));
1497 if (error != 0) {
1498 WG_LOG_RATECHECK(&wg->wg_ppsratecheck, LOG_DEBUG,
1499 "%s: wg_algo_aead_dec for secret key failed\n",
1500 if_name(&wg->wg_if));
1501 return;
1502 }
1503 /* Hi := HASH(Hi || msg.static) */
1504 wg_algo_hash(hash, wgmi->wgmi_static, sizeof(wgmi->wgmi_static));
1505
1506 wgp = wg_lookup_peer_by_pubkey(wg, peer_pubkey, &psref_peer);
1507 if (wgp == NULL) {
1508 WG_DLOG("peer not found\n");
1509 return;
1510 }
1511
1512 /*
1513 * Lock the peer to serialize access to cookie state.
1514 *
1515 * XXX Can we safely avoid holding the lock across DH? Take it
1516 * just to verify mac2 and then unlock/DH/lock?
1517 */
1518 mutex_enter(wgp->wgp_lock);
1519
1520 if (__predict_false(wg_is_underload(wg, wgp, WG_MSG_TYPE_INIT))) {
1521 WG_TRACE("under load");
1522 /*
1523 * [W] 5.3: Denial of Service Mitigation & Cookies
1524 * "the responder, ..., and when under load may reject messages
1525 * with an invalid msg.mac2. If the responder receives a
1526 * message with a valid msg.mac1 yet with an invalid msg.mac2,
1527 * and is under load, it may respond with a cookie reply
1528 * message"
1529 */
1530 uint8_t zero[WG_MAC_LEN] = {0};
1531 if (consttime_memequal(wgmi->wgmi_mac2, zero, sizeof(zero))) {
1532 WG_TRACE("sending a cookie message: no cookie included");
1533 (void)wg_send_cookie_msg(wg, wgp, wgmi->wgmi_sender,
1534 wgmi->wgmi_mac1, src);
1535 goto out;
1536 }
1537 if (!wgp->wgp_last_sent_cookie_valid) {
1538 WG_TRACE("sending a cookie message: no cookie sent ever");
1539 (void)wg_send_cookie_msg(wg, wgp, wgmi->wgmi_sender,
1540 wgmi->wgmi_mac1, src);
1541 goto out;
1542 }
1543 uint8_t mac2[WG_MAC_LEN];
1544 wg_algo_mac(mac2, sizeof(mac2), wgp->wgp_last_sent_cookie,
1545 WG_COOKIE_LEN, (const uint8_t *)wgmi,
1546 offsetof(struct wg_msg_init, wgmi_mac2), NULL, 0);
1547 if (!consttime_memequal(mac2, wgmi->wgmi_mac2, sizeof(mac2))) {
1548 WG_DLOG("mac2 is invalid\n");
1549 goto out;
1550 }
1551 WG_TRACE("under load, but continue to sending");
1552 }
1553
1554 /* [N] 2.2: "ss" */
1555 /* Ci, k := KDF2(Ci, DH(Si^priv, Sr^pub)) */
1556 wg_algo_dh_kdf(ckey, cipher_key, wg->wg_privkey, wgp->wgp_pubkey);
1557
1558 /* msg.timestamp := AEAD(k, TIMESTAMP(), Hi) */
1559 wg_timestamp_t timestamp;
1560 error = wg_algo_aead_dec(timestamp, sizeof(timestamp), cipher_key, 0,
1561 wgmi->wgmi_timestamp, sizeof(wgmi->wgmi_timestamp),
1562 hash, sizeof(hash));
1563 if (error != 0) {
1564 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
1565 "%s: peer %s: wg_algo_aead_dec for timestamp failed\n",
1566 if_name(&wg->wg_if), wgp->wgp_name);
1567 goto out;
1568 }
1569 /* Hi := HASH(Hi || msg.timestamp) */
1570 wg_algo_hash(hash, wgmi->wgmi_timestamp, sizeof(wgmi->wgmi_timestamp));
1571
1572 /*
1573 * [W] 5.1 "The responder keeps track of the greatest timestamp
1574 * received per peer and discards packets containing
1575 * timestamps less than or equal to it."
1576 */
1577 ret = memcmp(timestamp, wgp->wgp_timestamp_latest_init,
1578 sizeof(timestamp));
1579 if (ret <= 0) {
1580 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
1581 "%s: peer %s: invalid init msg: timestamp is old\n",
1582 if_name(&wg->wg_if), wgp->wgp_name);
1583 goto out;
1584 }
1585 memcpy(wgp->wgp_timestamp_latest_init, timestamp, sizeof(timestamp));
1586
1587 /*
1588 * Message is good -- we're committing to handle it now, unless
1589 * we were already initiating a session.
1590 */
1591 wgs = wgp->wgp_session_unstable;
1592 switch (wgs->wgs_state) {
1593 case WGS_STATE_UNKNOWN: /* new session initiated by peer */
1594 wg_get_session_index(wg, wgs);
1595 break;
1596 case WGS_STATE_INIT_ACTIVE: /* we're already initiating, drop */
1597 WG_TRACE("Session already initializing, ignoring the message");
1598 goto out;
1599 case WGS_STATE_INIT_PASSIVE: /* peer is retrying, start over */
1600 WG_TRACE("Session already initializing, destroying old states");
1601 wg_clear_states(wgs);
1602 /* keep session index */
1603 break;
1604 case WGS_STATE_ESTABLISHED: /* can't happen */
1605 panic("unstable session can't be established");
1606 break;
1607 case WGS_STATE_DESTROYING: /* rekey initiated by peer */
1608 WG_TRACE("Session destroying, but force to clear");
1609 callout_stop(&wgp->wgp_session_dtor_timer);
1610 wg_clear_states(wgs);
1611 /* keep session index */
1612 break;
1613 default:
1614 panic("invalid session state: %d", wgs->wgs_state);
1615 }
1616 wgs->wgs_state = WGS_STATE_INIT_PASSIVE;
1617
1618 memcpy(wgs->wgs_handshake_hash, hash, sizeof(hash));
1619 memcpy(wgs->wgs_chaining_key, ckey, sizeof(ckey));
1620 memcpy(wgs->wgs_ephemeral_key_peer, wgmi->wgmi_ephemeral,
1621 sizeof(wgmi->wgmi_ephemeral));
1622
1623 wg_update_endpoint_if_necessary(wgp, src);
1624
1625 (void)wg_send_handshake_msg_resp(wg, wgp, wgs, wgmi);
1626
1627 wg_calculate_keys(wgs, false);
1628 wg_clear_states(wgs);
1629
1630 out:
1631 mutex_exit(wgp->wgp_lock);
1632 wg_put_peer(wgp, &psref_peer);
1633 }
1634
1635 static struct socket *
1636 wg_get_so_by_af(struct wg_softc *wg, const int af)
1637 {
1638
1639 switch (af) {
1640 #ifdef INET
1641 case AF_INET:
1642 return wg->wg_so4;
1643 #endif
1644 #ifdef INET6
1645 case AF_INET6:
1646 return wg->wg_so6;
1647 #endif
1648 default:
1649 panic("wg: no such af: %d", af);
1650 }
1651 }
1652
1653 static struct socket *
1654 wg_get_so_by_peer(struct wg_peer *wgp, struct wg_sockaddr *wgsa)
1655 {
1656
1657 return wg_get_so_by_af(wgp->wgp_sc, wgsa_family(wgsa));
1658 }
1659
1660 static struct wg_sockaddr *
1661 wg_get_endpoint_sa(struct wg_peer *wgp, struct psref *psref)
1662 {
1663 struct wg_sockaddr *wgsa;
1664 int s;
1665
1666 s = pserialize_read_enter();
1667 wgsa = atomic_load_consume(&wgp->wgp_endpoint);
1668 psref_acquire(psref, &wgsa->wgsa_psref, wg_psref_class);
1669 pserialize_read_exit(s);
1670
1671 return wgsa;
1672 }
1673
1674 static void
1675 wg_put_sa(struct wg_peer *wgp, struct wg_sockaddr *wgsa, struct psref *psref)
1676 {
1677
1678 psref_release(psref, &wgsa->wgsa_psref, wg_psref_class);
1679 }
1680
1681 static int
1682 wg_send_so(struct wg_peer *wgp, struct mbuf *m)
1683 {
1684 int error;
1685 struct socket *so;
1686 struct psref psref;
1687 struct wg_sockaddr *wgsa;
1688
1689 wgsa = wg_get_endpoint_sa(wgp, &psref);
1690 so = wg_get_so_by_peer(wgp, wgsa);
1691 error = sosend(so, wgsatosa(wgsa), NULL, m, NULL, 0, curlwp);
1692 wg_put_sa(wgp, wgsa, &psref);
1693
1694 return error;
1695 }
1696
1697 static int
1698 wg_send_handshake_msg_init(struct wg_softc *wg, struct wg_peer *wgp)
1699 {
1700 int error;
1701 struct mbuf *m;
1702 struct wg_msg_init *wgmi;
1703 struct wg_session *wgs;
1704
1705 KASSERT(mutex_owned(wgp->wgp_lock));
1706
1707 wgs = wgp->wgp_session_unstable;
1708 /* XXX pull dispatch out into wg_task_send_init_message */
1709 switch (wgs->wgs_state) {
1710 case WGS_STATE_UNKNOWN: /* new session initiated by us */
1711 wg_get_session_index(wg, wgs);
1712 break;
1713 case WGS_STATE_INIT_ACTIVE: /* we're already initiating, stop */
1714 WG_TRACE("Session already initializing, skip starting new one");
1715 return EBUSY;
1716 case WGS_STATE_INIT_PASSIVE: /* peer was trying -- XXX what now? */
1717 WG_TRACE("Session already initializing, destroying old states");
1718 wg_clear_states(wgs);
1719 /* keep session index */
1720 break;
1721 case WGS_STATE_ESTABLISHED: /* can't happen */
1722 panic("unstable session can't be established");
1723 break;
1724 case WGS_STATE_DESTROYING: /* rekey initiated by us too early */
1725 WG_TRACE("Session destroying");
1726 /* XXX should wait? */
1727 return EBUSY;
1728 }
1729 wgs->wgs_state = WGS_STATE_INIT_ACTIVE;
1730
1731 m = m_gethdr(M_WAIT, MT_DATA);
1732 if (sizeof(*wgmi) > MHLEN) {
1733 m_clget(m, M_WAIT);
1734 CTASSERT(sizeof(*wgmi) <= MCLBYTES);
1735 }
1736 m->m_pkthdr.len = m->m_len = sizeof(*wgmi);
1737 wgmi = mtod(m, struct wg_msg_init *);
1738 wg_fill_msg_init(wg, wgp, wgs, wgmi);
1739
1740 error = wg->wg_ops->send_hs_msg(wgp, m);
1741 if (error == 0) {
1742 WG_TRACE("init msg sent");
1743
1744 if (wgp->wgp_handshake_start_time == 0)
1745 wgp->wgp_handshake_start_time = time_uptime;
1746 callout_schedule(&wgp->wgp_handshake_timeout_timer,
1747 MIN(wg_rekey_timeout, (unsigned)(INT_MAX / hz)) * hz);
1748 } else {
1749 wg_put_session_index(wg, wgs);
1750 /* Initiation failed; toss packet waiting for it if any. */
1751 m = atomic_swap_ptr(&wgp->wgp_pending, NULL);
1752 m_freem(m);
1753 }
1754
1755 return error;
1756 }
1757
1758 static void
1759 wg_fill_msg_resp(struct wg_softc *wg, struct wg_peer *wgp,
1760 struct wg_session *wgs, struct wg_msg_resp *wgmr,
1761 const struct wg_msg_init *wgmi)
1762 {
1763 uint8_t ckey[WG_CHAINING_KEY_LEN]; /* [W] 5.4.3: Cr */
1764 uint8_t hash[WG_HASH_LEN]; /* [W] 5.4.3: Hr */
1765 uint8_t cipher_key[WG_KDF_OUTPUT_LEN];
1766 uint8_t pubkey[WG_EPHEMERAL_KEY_LEN];
1767 uint8_t privkey[WG_EPHEMERAL_KEY_LEN];
1768
1769 KASSERT(mutex_owned(wgp->wgp_lock));
1770 KASSERT(wgs == wgp->wgp_session_unstable);
1771 KASSERT(wgs->wgs_state == WGS_STATE_INIT_PASSIVE);
1772
1773 memcpy(hash, wgs->wgs_handshake_hash, sizeof(hash));
1774 memcpy(ckey, wgs->wgs_chaining_key, sizeof(ckey));
1775
1776 wgmr->wgmr_type = htole32(WG_MSG_TYPE_RESP);
1777 wgmr->wgmr_sender = wgs->wgs_local_index;
1778 wgmr->wgmr_receiver = wgmi->wgmi_sender;
1779
1780 /* [W] 5.4.3 Second Message: Responder to Initiator */
1781
1782 /* [N] 2.2: "e" */
1783 /* Er^priv, Er^pub := DH-GENERATE() */
1784 wg_algo_generate_keypair(pubkey, privkey);
1785 /* Cr := KDF1(Cr, Er^pub) */
1786 wg_algo_kdf(ckey, NULL, NULL, ckey, pubkey, sizeof(pubkey));
1787 /* msg.ephemeral := Er^pub */
1788 memcpy(wgmr->wgmr_ephemeral, pubkey, sizeof(wgmr->wgmr_ephemeral));
1789 /* Hr := HASH(Hr || msg.ephemeral) */
1790 wg_algo_hash(hash, pubkey, sizeof(pubkey));
1791
1792 WG_DUMP_HASH("ckey", ckey);
1793 WG_DUMP_HASH("hash", hash);
1794
1795 /* [N] 2.2: "ee" */
1796 /* Cr := KDF1(Cr, DH(Er^priv, Ei^pub)) */
1797 wg_algo_dh_kdf(ckey, NULL, privkey, wgs->wgs_ephemeral_key_peer);
1798
1799 /* [N] 2.2: "se" */
1800 /* Cr := KDF1(Cr, DH(Er^priv, Si^pub)) */
1801 wg_algo_dh_kdf(ckey, NULL, privkey, wgp->wgp_pubkey);
1802
1803 /* [N] 9.2: "psk" */
1804 {
1805 uint8_t kdfout[WG_KDF_OUTPUT_LEN];
1806 /* Cr, r, k := KDF3(Cr, Q) */
1807 wg_algo_kdf(ckey, kdfout, cipher_key, ckey, wgp->wgp_psk,
1808 sizeof(wgp->wgp_psk));
1809 /* Hr := HASH(Hr || r) */
1810 wg_algo_hash(hash, kdfout, sizeof(kdfout));
1811 }
1812
1813 /* msg.empty := AEAD(k, 0, e, Hr) */
1814 wg_algo_aead_enc(wgmr->wgmr_empty, sizeof(wgmr->wgmr_empty),
1815 cipher_key, 0, NULL, 0, hash, sizeof(hash));
1816 /* Hr := HASH(Hr || msg.empty) */
1817 wg_algo_hash(hash, wgmr->wgmr_empty, sizeof(wgmr->wgmr_empty));
1818
1819 WG_DUMP_HASH("wgmr_empty", wgmr->wgmr_empty);
1820
1821 /* [W] 5.4.4: Cookie MACs */
1822 /* msg.mac1 := MAC(HASH(LABEL-MAC1 || Sm'^pub), msg_a) */
1823 wg_algo_mac_mac1(wgmr->wgmr_mac1, sizeof(wgmi->wgmi_mac1),
1824 wgp->wgp_pubkey, sizeof(wgp->wgp_pubkey),
1825 (const uint8_t *)wgmr, offsetof(struct wg_msg_resp, wgmr_mac1));
1826 /* Need mac1 to decrypt a cookie from a cookie message */
1827 memcpy(wgp->wgp_last_sent_mac1, wgmr->wgmr_mac1,
1828 sizeof(wgp->wgp_last_sent_mac1));
1829 wgp->wgp_last_sent_mac1_valid = true;
1830
1831 if (wgp->wgp_latest_cookie_time == 0 ||
1832 (time_uptime - wgp->wgp_latest_cookie_time) >= WG_COOKIE_TIME)
1833 /* msg.mac2 := 0^16 */
1834 memset(wgmr->wgmr_mac2, 0, sizeof(wgmr->wgmr_mac2));
1835 else {
1836 /* msg.mac2 := MAC(Lm, msg_b) */
1837 wg_algo_mac(wgmr->wgmr_mac2, sizeof(wgmi->wgmi_mac2),
1838 wgp->wgp_latest_cookie, WG_COOKIE_LEN,
1839 (const uint8_t *)wgmr,
1840 offsetof(struct wg_msg_resp, wgmr_mac2),
1841 NULL, 0);
1842 }
1843
1844 memcpy(wgs->wgs_handshake_hash, hash, sizeof(hash));
1845 memcpy(wgs->wgs_chaining_key, ckey, sizeof(ckey));
1846 memcpy(wgs->wgs_ephemeral_key_pub, pubkey, sizeof(pubkey));
1847 memcpy(wgs->wgs_ephemeral_key_priv, privkey, sizeof(privkey));
1848 wgs->wgs_remote_index = wgmi->wgmi_sender;
1849 WG_DLOG("sender=%x\n", wgs->wgs_local_index);
1850 WG_DLOG("receiver=%x\n", wgs->wgs_remote_index);
1851 }
1852
1853 static void
1854 wg_swap_sessions(struct wg_peer *wgp)
1855 {
1856 struct wg_session *wgs, *wgs_prev;
1857
1858 KASSERT(mutex_owned(wgp->wgp_lock));
1859
1860 wgs = wgp->wgp_session_unstable;
1861 KASSERT(wgs->wgs_state == WGS_STATE_ESTABLISHED);
1862
1863 wgs_prev = wgp->wgp_session_stable;
1864 KASSERT(wgs_prev->wgs_state == WGS_STATE_ESTABLISHED ||
1865 wgs_prev->wgs_state == WGS_STATE_UNKNOWN);
1866 atomic_store_release(&wgp->wgp_session_stable, wgs);
1867 wgp->wgp_session_unstable = wgs_prev;
1868 }
1869
1870 static void __noinline
1871 wg_handle_msg_resp(struct wg_softc *wg, const struct wg_msg_resp *wgmr,
1872 const struct sockaddr *src)
1873 {
1874 uint8_t ckey[WG_CHAINING_KEY_LEN]; /* [W] 5.4.3: Cr */
1875 uint8_t hash[WG_HASH_LEN]; /* [W] 5.4.3: Kr */
1876 uint8_t cipher_key[WG_KDF_OUTPUT_LEN];
1877 struct wg_peer *wgp;
1878 struct wg_session *wgs;
1879 struct psref psref;
1880 int error;
1881 uint8_t mac1[WG_MAC_LEN];
1882 struct wg_session *wgs_prev;
1883 struct mbuf *m;
1884
1885 wg_algo_mac_mac1(mac1, sizeof(mac1),
1886 wg->wg_pubkey, sizeof(wg->wg_pubkey),
1887 (const uint8_t *)wgmr, offsetof(struct wg_msg_resp, wgmr_mac1));
1888
1889 /*
1890 * [W] 5.3: Denial of Service Mitigation & Cookies
1891 * "the responder, ..., must always reject messages with an invalid
1892 * msg.mac1"
1893 */
1894 if (!consttime_memequal(mac1, wgmr->wgmr_mac1, sizeof(mac1))) {
1895 WG_DLOG("mac1 is invalid\n");
1896 return;
1897 }
1898
1899 WG_TRACE("resp msg received");
1900 wgs = wg_lookup_session_by_index(wg, wgmr->wgmr_receiver, &psref);
1901 if (wgs == NULL) {
1902 WG_TRACE("No session found");
1903 return;
1904 }
1905
1906 wgp = wgs->wgs_peer;
1907
1908 mutex_enter(wgp->wgp_lock);
1909
1910 /* If we weren't waiting for a handshake response, drop it. */
1911 if (wgs->wgs_state != WGS_STATE_INIT_ACTIVE) {
1912 WG_TRACE("peer sent spurious handshake response, ignoring");
1913 goto out;
1914 }
1915
1916 if (__predict_false(wg_is_underload(wg, wgp, WG_MSG_TYPE_RESP))) {
1917 WG_TRACE("under load");
1918 /*
1919 * [W] 5.3: Denial of Service Mitigation & Cookies
1920 * "the responder, ..., and when under load may reject messages
1921 * with an invalid msg.mac2. If the responder receives a
1922 * message with a valid msg.mac1 yet with an invalid msg.mac2,
1923 * and is under load, it may respond with a cookie reply
1924 * message"
1925 */
1926 uint8_t zero[WG_MAC_LEN] = {0};
1927 if (consttime_memequal(wgmr->wgmr_mac2, zero, sizeof(zero))) {
1928 WG_TRACE("sending a cookie message: no cookie included");
1929 (void)wg_send_cookie_msg(wg, wgp, wgmr->wgmr_sender,
1930 wgmr->wgmr_mac1, src);
1931 goto out;
1932 }
1933 if (!wgp->wgp_last_sent_cookie_valid) {
1934 WG_TRACE("sending a cookie message: no cookie sent ever");
1935 (void)wg_send_cookie_msg(wg, wgp, wgmr->wgmr_sender,
1936 wgmr->wgmr_mac1, src);
1937 goto out;
1938 }
1939 uint8_t mac2[WG_MAC_LEN];
1940 wg_algo_mac(mac2, sizeof(mac2), wgp->wgp_last_sent_cookie,
1941 WG_COOKIE_LEN, (const uint8_t *)wgmr,
1942 offsetof(struct wg_msg_resp, wgmr_mac2), NULL, 0);
1943 if (!consttime_memequal(mac2, wgmr->wgmr_mac2, sizeof(mac2))) {
1944 WG_DLOG("mac2 is invalid\n");
1945 goto out;
1946 }
1947 WG_TRACE("under load, but continue to sending");
1948 }
1949
1950 memcpy(hash, wgs->wgs_handshake_hash, sizeof(hash));
1951 memcpy(ckey, wgs->wgs_chaining_key, sizeof(ckey));
1952
1953 /*
1954 * [W] 5.4.3 Second Message: Responder to Initiator
1955 * "When the initiator receives this message, it does the same
1956 * operations so that its final state variables are identical,
1957 * replacing the operands of the DH function to produce equivalent
1958 * values."
1959 * Note that the following comments of operations are just copies of
1960 * the initiator's ones.
1961 */
1962
1963 /* [N] 2.2: "e" */
1964 /* Cr := KDF1(Cr, Er^pub) */
1965 wg_algo_kdf(ckey, NULL, NULL, ckey, wgmr->wgmr_ephemeral,
1966 sizeof(wgmr->wgmr_ephemeral));
1967 /* Hr := HASH(Hr || msg.ephemeral) */
1968 wg_algo_hash(hash, wgmr->wgmr_ephemeral, sizeof(wgmr->wgmr_ephemeral));
1969
1970 WG_DUMP_HASH("ckey", ckey);
1971 WG_DUMP_HASH("hash", hash);
1972
1973 /* [N] 2.2: "ee" */
1974 /* Cr := KDF1(Cr, DH(Er^priv, Ei^pub)) */
1975 wg_algo_dh_kdf(ckey, NULL, wgs->wgs_ephemeral_key_priv,
1976 wgmr->wgmr_ephemeral);
1977
1978 /* [N] 2.2: "se" */
1979 /* Cr := KDF1(Cr, DH(Er^priv, Si^pub)) */
1980 wg_algo_dh_kdf(ckey, NULL, wg->wg_privkey, wgmr->wgmr_ephemeral);
1981
1982 /* [N] 9.2: "psk" */
1983 {
1984 uint8_t kdfout[WG_KDF_OUTPUT_LEN];
1985 /* Cr, r, k := KDF3(Cr, Q) */
1986 wg_algo_kdf(ckey, kdfout, cipher_key, ckey, wgp->wgp_psk,
1987 sizeof(wgp->wgp_psk));
1988 /* Hr := HASH(Hr || r) */
1989 wg_algo_hash(hash, kdfout, sizeof(kdfout));
1990 }
1991
1992 {
1993 uint8_t out[sizeof(wgmr->wgmr_empty)]; /* for safety */
1994 /* msg.empty := AEAD(k, 0, e, Hr) */
1995 error = wg_algo_aead_dec(out, 0, cipher_key, 0, wgmr->wgmr_empty,
1996 sizeof(wgmr->wgmr_empty), hash, sizeof(hash));
1997 WG_DUMP_HASH("wgmr_empty", wgmr->wgmr_empty);
1998 if (error != 0) {
1999 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
2000 "%s: peer %s: wg_algo_aead_dec for empty message failed\n",
2001 if_name(&wg->wg_if), wgp->wgp_name);
2002 goto out;
2003 }
2004 /* Hr := HASH(Hr || msg.empty) */
2005 wg_algo_hash(hash, wgmr->wgmr_empty, sizeof(wgmr->wgmr_empty));
2006 }
2007
2008 memcpy(wgs->wgs_handshake_hash, hash, sizeof(wgs->wgs_handshake_hash));
2009 memcpy(wgs->wgs_chaining_key, ckey, sizeof(wgs->wgs_chaining_key));
2010 wgs->wgs_remote_index = wgmr->wgmr_sender;
2011 WG_DLOG("receiver=%x\n", wgs->wgs_remote_index);
2012
2013 KASSERT(wgs->wgs_state == WGS_STATE_INIT_ACTIVE);
2014 wgs->wgs_state = WGS_STATE_ESTABLISHED;
2015 wgs->wgs_time_established = time_uptime;
2016 wgs->wgs_time_last_data_sent = 0;
2017 wgs->wgs_is_initiator = true;
2018 wg_calculate_keys(wgs, true);
2019 wg_clear_states(wgs);
2020 WG_TRACE("WGS_STATE_ESTABLISHED");
2021
2022 callout_stop(&wgp->wgp_handshake_timeout_timer);
2023
2024 wg_swap_sessions(wgp);
2025 KASSERT(wgs == wgp->wgp_session_stable);
2026 wgs_prev = wgp->wgp_session_unstable;
2027 getnanotime(&wgp->wgp_last_handshake_time);
2028 wgp->wgp_handshake_start_time = 0;
2029 wgp->wgp_last_sent_mac1_valid = false;
2030 wgp->wgp_last_sent_cookie_valid = false;
2031
2032 wg_schedule_rekey_timer(wgp);
2033
2034 wg_update_endpoint_if_necessary(wgp, src);
2035
2036 /*
2037 * If we had a data packet queued up, send it; otherwise send a
2038 * keepalive message -- either way we have to send something
2039 * immediately or else the responder will never answer.
2040 */
2041 if ((m = atomic_swap_ptr(&wgp->wgp_pending, NULL)) != NULL) {
2042 kpreempt_disable();
2043 const uint32_t h = curcpu()->ci_index; // pktq_rps_hash(m)
2044 M_SETCTX(m, wgp);
2045 if (__predict_false(!pktq_enqueue(wg_pktq, m, h))) {
2046 WGLOG(LOG_ERR, "%s: pktq full, dropping\n",
2047 if_name(&wg->wg_if));
2048 m_freem(m);
2049 }
2050 kpreempt_enable();
2051 } else {
2052 wg_send_keepalive_msg(wgp, wgs);
2053 }
2054
2055 if (wgs_prev->wgs_state == WGS_STATE_ESTABLISHED) {
2056 /* Wait for wg_get_stable_session to drain. */
2057 pserialize_perform(wgp->wgp_psz);
2058
2059 /* Transition ESTABLISHED->DESTROYING. */
2060 wgs_prev->wgs_state = WGS_STATE_DESTROYING;
2061
2062 /* We can't destroy the old session immediately */
2063 wg_schedule_session_dtor_timer(wgp);
2064 } else {
2065 KASSERTMSG(wgs_prev->wgs_state == WGS_STATE_UNKNOWN,
2066 "state=%d", wgs_prev->wgs_state);
2067 }
2068
2069 out:
2070 mutex_exit(wgp->wgp_lock);
2071 wg_put_session(wgs, &psref);
2072 }
2073
2074 static int
2075 wg_send_handshake_msg_resp(struct wg_softc *wg, struct wg_peer *wgp,
2076 struct wg_session *wgs, const struct wg_msg_init *wgmi)
2077 {
2078 int error;
2079 struct mbuf *m;
2080 struct wg_msg_resp *wgmr;
2081
2082 KASSERT(mutex_owned(wgp->wgp_lock));
2083 KASSERT(wgs == wgp->wgp_session_unstable);
2084 KASSERT(wgs->wgs_state == WGS_STATE_INIT_PASSIVE);
2085
2086 m = m_gethdr(M_WAIT, MT_DATA);
2087 if (sizeof(*wgmr) > MHLEN) {
2088 m_clget(m, M_WAIT);
2089 CTASSERT(sizeof(*wgmr) <= MCLBYTES);
2090 }
2091 m->m_pkthdr.len = m->m_len = sizeof(*wgmr);
2092 wgmr = mtod(m, struct wg_msg_resp *);
2093 wg_fill_msg_resp(wg, wgp, wgs, wgmr, wgmi);
2094
2095 error = wg->wg_ops->send_hs_msg(wgp, m);
2096 if (error == 0)
2097 WG_TRACE("resp msg sent");
2098 return error;
2099 }
2100
2101 static struct wg_peer *
2102 wg_lookup_peer_by_pubkey(struct wg_softc *wg,
2103 const uint8_t pubkey[WG_STATIC_KEY_LEN], struct psref *psref)
2104 {
2105 struct wg_peer *wgp;
2106
2107 int s = pserialize_read_enter();
2108 wgp = thmap_get(wg->wg_peers_bypubkey, pubkey, WG_STATIC_KEY_LEN);
2109 if (wgp != NULL)
2110 wg_get_peer(wgp, psref);
2111 pserialize_read_exit(s);
2112
2113 return wgp;
2114 }
2115
2116 static void
2117 wg_fill_msg_cookie(struct wg_softc *wg, struct wg_peer *wgp,
2118 struct wg_msg_cookie *wgmc, const uint32_t sender,
2119 const uint8_t mac1[WG_MAC_LEN], const struct sockaddr *src)
2120 {
2121 uint8_t cookie[WG_COOKIE_LEN];
2122 uint8_t key[WG_HASH_LEN];
2123 uint8_t addr[sizeof(struct in6_addr)];
2124 size_t addrlen;
2125 uint16_t uh_sport; /* be */
2126
2127 KASSERT(mutex_owned(wgp->wgp_lock));
2128
2129 wgmc->wgmc_type = htole32(WG_MSG_TYPE_COOKIE);
2130 wgmc->wgmc_receiver = sender;
2131 cprng_fast(wgmc->wgmc_salt, sizeof(wgmc->wgmc_salt));
2132
2133 /*
2134 * [W] 5.4.7: Under Load: Cookie Reply Message
2135 * "The secret variable, Rm, changes every two minutes to a
2136 * random value"
2137 */
2138 if ((time_uptime - wgp->wgp_last_genrandval_time) > WG_RANDVAL_TIME) {
2139 wgp->wgp_randval = cprng_strong32();
2140 wgp->wgp_last_genrandval_time = time_uptime;
2141 }
2142
2143 switch (src->sa_family) {
2144 case AF_INET: {
2145 const struct sockaddr_in *sin = satocsin(src);
2146 addrlen = sizeof(sin->sin_addr);
2147 memcpy(addr, &sin->sin_addr, addrlen);
2148 uh_sport = sin->sin_port;
2149 break;
2150 }
2151 #ifdef INET6
2152 case AF_INET6: {
2153 const struct sockaddr_in6 *sin6 = satocsin6(src);
2154 addrlen = sizeof(sin6->sin6_addr);
2155 memcpy(addr, &sin6->sin6_addr, addrlen);
2156 uh_sport = sin6->sin6_port;
2157 break;
2158 }
2159 #endif
2160 default:
2161 panic("invalid af=%d", src->sa_family);
2162 }
2163
2164 wg_algo_mac(cookie, sizeof(cookie),
2165 (const uint8_t *)&wgp->wgp_randval, sizeof(wgp->wgp_randval),
2166 addr, addrlen, (const uint8_t *)&uh_sport, sizeof(uh_sport));
2167 wg_algo_mac_cookie(key, sizeof(key), wg->wg_pubkey,
2168 sizeof(wg->wg_pubkey));
2169 wg_algo_xaead_enc(wgmc->wgmc_cookie, sizeof(wgmc->wgmc_cookie), key,
2170 cookie, sizeof(cookie), mac1, WG_MAC_LEN, wgmc->wgmc_salt);
2171
2172 /* Need to store to calculate mac2 */
2173 memcpy(wgp->wgp_last_sent_cookie, cookie, sizeof(cookie));
2174 wgp->wgp_last_sent_cookie_valid = true;
2175 }
2176
2177 static int
2178 wg_send_cookie_msg(struct wg_softc *wg, struct wg_peer *wgp,
2179 const uint32_t sender, const uint8_t mac1[WG_MAC_LEN],
2180 const struct sockaddr *src)
2181 {
2182 int error;
2183 struct mbuf *m;
2184 struct wg_msg_cookie *wgmc;
2185
2186 KASSERT(mutex_owned(wgp->wgp_lock));
2187
2188 m = m_gethdr(M_WAIT, MT_DATA);
2189 if (sizeof(*wgmc) > MHLEN) {
2190 m_clget(m, M_WAIT);
2191 CTASSERT(sizeof(*wgmc) <= MCLBYTES);
2192 }
2193 m->m_pkthdr.len = m->m_len = sizeof(*wgmc);
2194 wgmc = mtod(m, struct wg_msg_cookie *);
2195 wg_fill_msg_cookie(wg, wgp, wgmc, sender, mac1, src);
2196
2197 error = wg->wg_ops->send_hs_msg(wgp, m);
2198 if (error == 0)
2199 WG_TRACE("cookie msg sent");
2200 return error;
2201 }
2202
2203 static bool
2204 wg_is_underload(struct wg_softc *wg, struct wg_peer *wgp, int msgtype)
2205 {
2206 #ifdef WG_DEBUG_PARAMS
2207 if (wg_force_underload)
2208 return true;
2209 #endif
2210
2211 /*
2212 * XXX we don't have a means of a load estimation. The purpose of
2213 * the mechanism is a DoS mitigation, so we consider frequent handshake
2214 * messages as (a kind of) load; if a message of the same type comes
2215 * to a peer within 1 second, we consider we are under load.
2216 */
2217 time_t last = wgp->wgp_last_msg_received_time[msgtype];
2218 wgp->wgp_last_msg_received_time[msgtype] = time_uptime;
2219 return (time_uptime - last) == 0;
2220 }
2221
2222 static void
2223 wg_calculate_keys(struct wg_session *wgs, const bool initiator)
2224 {
2225
2226 KASSERT(mutex_owned(wgs->wgs_peer->wgp_lock));
2227
2228 /*
2229 * [W] 5.4.5: Ti^send = Tr^recv, Ti^recv = Tr^send := KDF2(Ci = Cr, e)
2230 */
2231 if (initiator) {
2232 wg_algo_kdf(wgs->wgs_tkey_send, wgs->wgs_tkey_recv, NULL,
2233 wgs->wgs_chaining_key, NULL, 0);
2234 } else {
2235 wg_algo_kdf(wgs->wgs_tkey_recv, wgs->wgs_tkey_send, NULL,
2236 wgs->wgs_chaining_key, NULL, 0);
2237 }
2238 WG_DUMP_HASH("wgs_tkey_send", wgs->wgs_tkey_send);
2239 WG_DUMP_HASH("wgs_tkey_recv", wgs->wgs_tkey_recv);
2240 }
2241
2242 static uint64_t
2243 wg_session_get_send_counter(struct wg_session *wgs)
2244 {
2245 #ifdef __HAVE_ATOMIC64_LOADSTORE
2246 return atomic_load_relaxed(&wgs->wgs_send_counter);
2247 #else
2248 uint64_t send_counter;
2249
2250 mutex_enter(&wgs->wgs_send_counter_lock);
2251 send_counter = wgs->wgs_send_counter;
2252 mutex_exit(&wgs->wgs_send_counter_lock);
2253
2254 return send_counter;
2255 #endif
2256 }
2257
2258 static uint64_t
2259 wg_session_inc_send_counter(struct wg_session *wgs)
2260 {
2261 #ifdef __HAVE_ATOMIC64_LOADSTORE
2262 return atomic_inc_64_nv(&wgs->wgs_send_counter) - 1;
2263 #else
2264 uint64_t send_counter;
2265
2266 mutex_enter(&wgs->wgs_send_counter_lock);
2267 send_counter = wgs->wgs_send_counter++;
2268 mutex_exit(&wgs->wgs_send_counter_lock);
2269
2270 return send_counter;
2271 #endif
2272 }
2273
2274 static void
2275 wg_clear_states(struct wg_session *wgs)
2276 {
2277
2278 KASSERT(mutex_owned(wgs->wgs_peer->wgp_lock));
2279
2280 wgs->wgs_send_counter = 0;
2281 sliwin_reset(&wgs->wgs_recvwin->window);
2282
2283 #define wgs_clear(v) explicit_memset(wgs->wgs_##v, 0, sizeof(wgs->wgs_##v))
2284 wgs_clear(handshake_hash);
2285 wgs_clear(chaining_key);
2286 wgs_clear(ephemeral_key_pub);
2287 wgs_clear(ephemeral_key_priv);
2288 wgs_clear(ephemeral_key_peer);
2289 #undef wgs_clear
2290 }
2291
2292 static struct wg_session *
2293 wg_lookup_session_by_index(struct wg_softc *wg, const uint32_t index,
2294 struct psref *psref)
2295 {
2296 struct wg_session *wgs;
2297
2298 int s = pserialize_read_enter();
2299 wgs = thmap_get(wg->wg_sessions_byindex, &index, sizeof index);
2300 if (wgs != NULL) {
2301 KASSERT(atomic_load_relaxed(&wgs->wgs_state) !=
2302 WGS_STATE_UNKNOWN);
2303 psref_acquire(psref, &wgs->wgs_psref, wg_psref_class);
2304 }
2305 pserialize_read_exit(s);
2306
2307 return wgs;
2308 }
2309
2310 static void
2311 wg_schedule_rekey_timer(struct wg_peer *wgp)
2312 {
2313 int timeout = MIN(wg_rekey_after_time, (unsigned)(INT_MAX / hz));
2314
2315 callout_schedule(&wgp->wgp_rekey_timer, timeout * hz);
2316 }
2317
2318 static void
2319 wg_send_keepalive_msg(struct wg_peer *wgp, struct wg_session *wgs)
2320 {
2321 struct mbuf *m;
2322
2323 /*
2324 * [W] 6.5 Passive Keepalive
2325 * "A keepalive message is simply a transport data message with
2326 * a zero-length encapsulated encrypted inner-packet."
2327 */
2328 WG_TRACE("");
2329 m = m_gethdr(M_WAIT, MT_DATA);
2330 wg_send_data_msg(wgp, wgs, m);
2331 }
2332
2333 static bool
2334 wg_need_to_send_init_message(struct wg_session *wgs)
2335 {
2336 /*
2337 * [W] 6.2 Transport Message Limits
2338 * "if a peer is the initiator of a current secure session,
2339 * WireGuard will send a handshake initiation message to begin
2340 * a new secure session ... if after receiving a transport data
2341 * message, the current secure session is (REJECT-AFTER-TIME
2342 * KEEPALIVE-TIMEOUT REKEY-TIMEOUT) seconds old and it has
2343 * not yet acted upon this event."
2344 */
2345 return wgs->wgs_is_initiator && wgs->wgs_time_last_data_sent == 0 &&
2346 (time_uptime - wgs->wgs_time_established) >=
2347 (wg_reject_after_time - wg_keepalive_timeout - wg_rekey_timeout);
2348 }
2349
2350 static void
2351 wg_schedule_peer_task(struct wg_peer *wgp, unsigned int task)
2352 {
2353
2354 mutex_enter(wgp->wgp_intr_lock);
2355 WG_DLOG("tasks=%d, task=%d\n", wgp->wgp_tasks, task);
2356 if (wgp->wgp_tasks == 0)
2357 /*
2358 * XXX If the current CPU is already loaded -- e.g., if
2359 * there's already a bunch of handshakes queued up --
2360 * consider tossing this over to another CPU to
2361 * distribute the load.
2362 */
2363 workqueue_enqueue(wg_wq, &wgp->wgp_work, NULL);
2364 wgp->wgp_tasks |= task;
2365 mutex_exit(wgp->wgp_intr_lock);
2366 }
2367
2368 static void
2369 wg_change_endpoint(struct wg_peer *wgp, const struct sockaddr *new)
2370 {
2371 struct wg_sockaddr *wgsa_prev;
2372
2373 WG_TRACE("Changing endpoint");
2374
2375 memcpy(wgp->wgp_endpoint0, new, new->sa_len);
2376 wgsa_prev = wgp->wgp_endpoint;
2377 atomic_store_release(&wgp->wgp_endpoint, wgp->wgp_endpoint0);
2378 wgp->wgp_endpoint0 = wgsa_prev;
2379 atomic_store_release(&wgp->wgp_endpoint_available, true);
2380
2381 wg_schedule_peer_task(wgp, WGP_TASK_ENDPOINT_CHANGED);
2382 }
2383
2384 static bool
2385 wg_validate_inner_packet(const char *packet, size_t decrypted_len, int *af)
2386 {
2387 uint16_t packet_len;
2388 const struct ip *ip;
2389
2390 if (__predict_false(decrypted_len < sizeof(*ip))) {
2391 WG_DLOG("decrypted_len=%zu < %zu\n", decrypted_len,
2392 sizeof(*ip));
2393 return false;
2394 }
2395
2396 ip = (const struct ip *)packet;
2397 if (ip->ip_v == 4)
2398 *af = AF_INET;
2399 else if (ip->ip_v == 6)
2400 *af = AF_INET6;
2401 else {
2402 WG_DLOG("ip_v=%d\n", ip->ip_v);
2403 return false;
2404 }
2405
2406 WG_DLOG("af=%d\n", *af);
2407
2408 switch (*af) {
2409 #ifdef INET
2410 case AF_INET:
2411 packet_len = ntohs(ip->ip_len);
2412 break;
2413 #endif
2414 #ifdef INET6
2415 case AF_INET6: {
2416 const struct ip6_hdr *ip6;
2417
2418 if (__predict_false(decrypted_len < sizeof(*ip6))) {
2419 WG_DLOG("decrypted_len=%zu < %zu\n", decrypted_len,
2420 sizeof(*ip6));
2421 return false;
2422 }
2423
2424 ip6 = (const struct ip6_hdr *)packet;
2425 packet_len = sizeof(*ip6) + ntohs(ip6->ip6_plen);
2426 break;
2427 }
2428 #endif
2429 default:
2430 return false;
2431 }
2432
2433 if (packet_len > decrypted_len) {
2434 WG_DLOG("packet_len %u > decrypted_len %zu\n", packet_len,
2435 decrypted_len);
2436 return false;
2437 }
2438
2439 return true;
2440 }
2441
2442 static bool
2443 wg_validate_route(struct wg_softc *wg, struct wg_peer *wgp_expected,
2444 int af, char *packet)
2445 {
2446 struct sockaddr_storage ss;
2447 struct sockaddr *sa;
2448 struct psref psref;
2449 struct wg_peer *wgp;
2450 bool ok;
2451
2452 /*
2453 * II CRYPTOKEY ROUTING
2454 * "it will only accept it if its source IP resolves in the
2455 * table to the public key used in the secure session for
2456 * decrypting it."
2457 */
2458
2459 if (af == AF_INET) {
2460 const struct ip *ip = (const struct ip *)packet;
2461 struct sockaddr_in *sin = (struct sockaddr_in *)&ss;
2462 sockaddr_in_init(sin, &ip->ip_src, 0);
2463 sa = sintosa(sin);
2464 #ifdef INET6
2465 } else {
2466 const struct ip6_hdr *ip6 = (const struct ip6_hdr *)packet;
2467 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&ss;
2468 sockaddr_in6_init(sin6, &ip6->ip6_src, 0, 0, 0);
2469 sa = sin6tosa(sin6);
2470 #endif
2471 }
2472
2473 wgp = wg_pick_peer_by_sa(wg, sa, &psref);
2474 ok = (wgp == wgp_expected);
2475 if (wgp != NULL)
2476 wg_put_peer(wgp, &psref);
2477
2478 return ok;
2479 }
2480
2481 static void
2482 wg_session_dtor_timer(void *arg)
2483 {
2484 struct wg_peer *wgp = arg;
2485
2486 WG_TRACE("enter");
2487
2488 wg_schedule_peer_task(wgp, WGP_TASK_DESTROY_PREV_SESSION);
2489 }
2490
2491 static void
2492 wg_schedule_session_dtor_timer(struct wg_peer *wgp)
2493 {
2494
2495 /* 1 second grace period */
2496 callout_schedule(&wgp->wgp_session_dtor_timer, hz);
2497 }
2498
2499 static bool
2500 sockaddr_port_match(const struct sockaddr *sa1, const struct sockaddr *sa2)
2501 {
2502 if (sa1->sa_family != sa2->sa_family)
2503 return false;
2504
2505 switch (sa1->sa_family) {
2506 #ifdef INET
2507 case AF_INET:
2508 return satocsin(sa1)->sin_port == satocsin(sa2)->sin_port;
2509 #endif
2510 #ifdef INET6
2511 case AF_INET6:
2512 return satocsin6(sa1)->sin6_port == satocsin6(sa2)->sin6_port;
2513 #endif
2514 default:
2515 return false;
2516 }
2517 }
2518
2519 static void
2520 wg_update_endpoint_if_necessary(struct wg_peer *wgp,
2521 const struct sockaddr *src)
2522 {
2523 struct wg_sockaddr *wgsa;
2524 struct psref psref;
2525
2526 wgsa = wg_get_endpoint_sa(wgp, &psref);
2527
2528 #ifdef WG_DEBUG_LOG
2529 char oldaddr[128], newaddr[128];
2530 sockaddr_format(wgsatosa(wgsa), oldaddr, sizeof(oldaddr));
2531 sockaddr_format(src, newaddr, sizeof(newaddr));
2532 WG_DLOG("old=%s, new=%s\n", oldaddr, newaddr);
2533 #endif
2534
2535 /*
2536 * III: "Since the packet has authenticated correctly, the source IP of
2537 * the outer UDP/IP packet is used to update the endpoint for peer..."
2538 */
2539 if (__predict_false(sockaddr_cmp(src, wgsatosa(wgsa)) != 0 ||
2540 !sockaddr_port_match(src, wgsatosa(wgsa)))) {
2541 /* XXX We can't change the endpoint twice in a short period */
2542 if (atomic_swap_uint(&wgp->wgp_endpoint_changing, 1) == 0) {
2543 wg_change_endpoint(wgp, src);
2544 }
2545 }
2546
2547 wg_put_sa(wgp, wgsa, &psref);
2548 }
2549
2550 static void __noinline
2551 wg_handle_msg_data(struct wg_softc *wg, struct mbuf *m,
2552 const struct sockaddr *src)
2553 {
2554 struct wg_msg_data *wgmd;
2555 char *encrypted_buf = NULL, *decrypted_buf;
2556 size_t encrypted_len, decrypted_len;
2557 struct wg_session *wgs;
2558 struct wg_peer *wgp;
2559 int state;
2560 size_t mlen;
2561 struct psref psref;
2562 int error, af;
2563 bool success, free_encrypted_buf = false, ok;
2564 struct mbuf *n;
2565
2566 KASSERT(m->m_len >= sizeof(struct wg_msg_data));
2567 wgmd = mtod(m, struct wg_msg_data *);
2568
2569 KASSERT(wgmd->wgmd_type == htole32(WG_MSG_TYPE_DATA));
2570 WG_TRACE("data");
2571
2572 /* Find the putative session, or drop. */
2573 wgs = wg_lookup_session_by_index(wg, wgmd->wgmd_receiver, &psref);
2574 if (wgs == NULL) {
2575 WG_TRACE("No session found");
2576 m_freem(m);
2577 return;
2578 }
2579
2580 /*
2581 * We are only ready to handle data when in INIT_PASSIVE,
2582 * ESTABLISHED, or DESTROYING. All transitions out of that
2583 * state dissociate the session index and drain psrefs.
2584 */
2585 state = atomic_load_relaxed(&wgs->wgs_state);
2586 switch (state) {
2587 case WGS_STATE_UNKNOWN:
2588 panic("wg session %p in unknown state has session index %u",
2589 wgs, wgmd->wgmd_receiver);
2590 case WGS_STATE_INIT_ACTIVE:
2591 WG_TRACE("not yet ready for data");
2592 goto out;
2593 case WGS_STATE_INIT_PASSIVE:
2594 case WGS_STATE_ESTABLISHED:
2595 case WGS_STATE_DESTROYING:
2596 break;
2597 }
2598
2599 /*
2600 * Get the peer, for rate-limited logs (XXX MPSAFE, dtrace) and
2601 * to update the endpoint if authentication succeeds.
2602 */
2603 wgp = wgs->wgs_peer;
2604
2605 /*
2606 * Reject outrageously wrong sequence numbers before doing any
2607 * crypto work or taking any locks.
2608 */
2609 error = sliwin_check_fast(&wgs->wgs_recvwin->window,
2610 le64toh(wgmd->wgmd_counter));
2611 if (error) {
2612 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
2613 "%s: peer %s: out-of-window packet: %"PRIu64"\n",
2614 if_name(&wg->wg_if), wgp->wgp_name,
2615 le64toh(wgmd->wgmd_counter));
2616 goto out;
2617 }
2618
2619 /* Ensure the payload and authenticator are contiguous. */
2620 mlen = m_length(m);
2621 encrypted_len = mlen - sizeof(*wgmd);
2622 if (encrypted_len < WG_AUTHTAG_LEN) {
2623 WG_DLOG("Short encrypted_len: %lu\n", encrypted_len);
2624 goto out;
2625 }
2626 success = m_ensure_contig(&m, sizeof(*wgmd) + encrypted_len);
2627 if (success) {
2628 encrypted_buf = mtod(m, char *) + sizeof(*wgmd);
2629 } else {
2630 encrypted_buf = kmem_intr_alloc(encrypted_len, KM_NOSLEEP);
2631 if (encrypted_buf == NULL) {
2632 WG_DLOG("failed to allocate encrypted_buf\n");
2633 goto out;
2634 }
2635 m_copydata(m, sizeof(*wgmd), encrypted_len, encrypted_buf);
2636 free_encrypted_buf = true;
2637 }
2638 /* m_ensure_contig may change m regardless of its result */
2639 KASSERT(m->m_len >= sizeof(*wgmd));
2640 wgmd = mtod(m, struct wg_msg_data *);
2641
2642 /*
2643 * Get a buffer for the plaintext. Add WG_AUTHTAG_LEN to avoid
2644 * a zero-length buffer (XXX). Drop if plaintext is longer
2645 * than MCLBYTES (XXX).
2646 */
2647 decrypted_len = encrypted_len - WG_AUTHTAG_LEN;
2648 if (decrypted_len > MCLBYTES) {
2649 /* FIXME handle larger data than MCLBYTES */
2650 WG_DLOG("couldn't handle larger data than MCLBYTES\n");
2651 goto out;
2652 }
2653 n = wg_get_mbuf(0, decrypted_len + WG_AUTHTAG_LEN);
2654 if (n == NULL) {
2655 WG_DLOG("wg_get_mbuf failed\n");
2656 goto out;
2657 }
2658 decrypted_buf = mtod(n, char *);
2659
2660 /* Decrypt and verify the packet. */
2661 WG_DLOG("mlen=%lu, encrypted_len=%lu\n", mlen, encrypted_len);
2662 error = wg_algo_aead_dec(decrypted_buf,
2663 encrypted_len - WG_AUTHTAG_LEN /* can be 0 */,
2664 wgs->wgs_tkey_recv, le64toh(wgmd->wgmd_counter), encrypted_buf,
2665 encrypted_len, NULL, 0);
2666 if (error != 0) {
2667 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
2668 "%s: peer %s: failed to wg_algo_aead_dec\n",
2669 if_name(&wg->wg_if), wgp->wgp_name);
2670 m_freem(n);
2671 goto out;
2672 }
2673 WG_DLOG("outsize=%u\n", (u_int)decrypted_len);
2674
2675 /* Packet is genuine. Reject it if a replay or just too old. */
2676 mutex_enter(&wgs->wgs_recvwin->lock);
2677 error = sliwin_update(&wgs->wgs_recvwin->window,
2678 le64toh(wgmd->wgmd_counter));
2679 mutex_exit(&wgs->wgs_recvwin->lock);
2680 if (error) {
2681 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
2682 "%s: peer %s: replay or out-of-window packet: %"PRIu64"\n",
2683 if_name(&wg->wg_if), wgp->wgp_name,
2684 le64toh(wgmd->wgmd_counter));
2685 m_freem(n);
2686 goto out;
2687 }
2688
2689 /* We're done with m now; free it and chuck the pointers. */
2690 m_freem(m);
2691 m = NULL;
2692 wgmd = NULL;
2693
2694 /*
2695 * Validate the encapsulated packet header and get the address
2696 * family, or drop.
2697 */
2698 ok = wg_validate_inner_packet(decrypted_buf, decrypted_len, &af);
2699 if (!ok) {
2700 m_freem(n);
2701 goto out;
2702 }
2703
2704 /*
2705 * The packet is genuine. Update the peer's endpoint if the
2706 * source address changed.
2707 *
2708 * XXX How to prevent DoS by replaying genuine packets from the
2709 * wrong source address?
2710 */
2711 wg_update_endpoint_if_necessary(wgp, src);
2712
2713 /* Submit it into our network stack if routable. */
2714 ok = wg_validate_route(wg, wgp, af, decrypted_buf);
2715 if (ok) {
2716 wg->wg_ops->input(&wg->wg_if, n, af);
2717 } else {
2718 char addrstr[INET6_ADDRSTRLEN];
2719 memset(addrstr, 0, sizeof(addrstr));
2720 if (af == AF_INET) {
2721 const struct ip *ip = (const struct ip *)decrypted_buf;
2722 IN_PRINT(addrstr, &ip->ip_src);
2723 #ifdef INET6
2724 } else if (af == AF_INET6) {
2725 const struct ip6_hdr *ip6 =
2726 (const struct ip6_hdr *)decrypted_buf;
2727 IN6_PRINT(addrstr, &ip6->ip6_src);
2728 #endif
2729 }
2730 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
2731 "%s: peer %s: invalid source address (%s)\n",
2732 if_name(&wg->wg_if), wgp->wgp_name, addrstr);
2733 m_freem(n);
2734 /*
2735 * The inner address is invalid however the session is valid
2736 * so continue the session processing below.
2737 */
2738 }
2739 n = NULL;
2740
2741 /* Update the state machine if necessary. */
2742 if (__predict_false(state == WGS_STATE_INIT_PASSIVE)) {
2743 /*
2744 * We were waiting for the initiator to send their
2745 * first data transport message, and that has happened.
2746 * Schedule a task to establish this session.
2747 */
2748 wg_schedule_peer_task(wgp, WGP_TASK_ESTABLISH_SESSION);
2749 } else {
2750 if (__predict_false(wg_need_to_send_init_message(wgs))) {
2751 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE);
2752 }
2753 /*
2754 * [W] 6.5 Passive Keepalive
2755 * "If a peer has received a validly-authenticated transport
2756 * data message (section 5.4.6), but does not have any packets
2757 * itself to send back for KEEPALIVE-TIMEOUT seconds, it sends
2758 * a keepalive message."
2759 */
2760 WG_DLOG("time_uptime=%ju wgs_time_last_data_sent=%ju\n",
2761 (uintmax_t)time_uptime,
2762 (uintmax_t)wgs->wgs_time_last_data_sent);
2763 if ((time_uptime - wgs->wgs_time_last_data_sent) >=
2764 wg_keepalive_timeout) {
2765 WG_TRACE("Schedule sending keepalive message");
2766 /*
2767 * We can't send a keepalive message here to avoid
2768 * a deadlock; we already hold the solock of a socket
2769 * that is used to send the message.
2770 */
2771 wg_schedule_peer_task(wgp,
2772 WGP_TASK_SEND_KEEPALIVE_MESSAGE);
2773 }
2774 }
2775 out:
2776 wg_put_session(wgs, &psref);
2777 m_freem(m);
2778 if (free_encrypted_buf)
2779 kmem_intr_free(encrypted_buf, encrypted_len);
2780 }
2781
2782 static void __noinline
2783 wg_handle_msg_cookie(struct wg_softc *wg, const struct wg_msg_cookie *wgmc)
2784 {
2785 struct wg_session *wgs;
2786 struct wg_peer *wgp;
2787 struct psref psref;
2788 int error;
2789 uint8_t key[WG_HASH_LEN];
2790 uint8_t cookie[WG_COOKIE_LEN];
2791
2792 WG_TRACE("cookie msg received");
2793
2794 /* Find the putative session. */
2795 wgs = wg_lookup_session_by_index(wg, wgmc->wgmc_receiver, &psref);
2796 if (wgs == NULL) {
2797 WG_TRACE("No session found");
2798 return;
2799 }
2800
2801 /* Lock the peer so we can update the cookie state. */
2802 wgp = wgs->wgs_peer;
2803 mutex_enter(wgp->wgp_lock);
2804
2805 if (!wgp->wgp_last_sent_mac1_valid) {
2806 WG_TRACE("No valid mac1 sent (or expired)");
2807 goto out;
2808 }
2809
2810 /* Decrypt the cookie and store it for later handshake retry. */
2811 wg_algo_mac_cookie(key, sizeof(key), wgp->wgp_pubkey,
2812 sizeof(wgp->wgp_pubkey));
2813 error = wg_algo_xaead_dec(cookie, sizeof(cookie), key,
2814 wgmc->wgmc_cookie, sizeof(wgmc->wgmc_cookie),
2815 wgp->wgp_last_sent_mac1, sizeof(wgp->wgp_last_sent_mac1),
2816 wgmc->wgmc_salt);
2817 if (error != 0) {
2818 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
2819 "%s: peer %s: wg_algo_aead_dec for cookie failed: "
2820 "error=%d\n", if_name(&wg->wg_if), wgp->wgp_name, error);
2821 goto out;
2822 }
2823 /*
2824 * [W] 6.6: Interaction with Cookie Reply System
2825 * "it should simply store the decrypted cookie value from the cookie
2826 * reply message, and wait for the expiration of the REKEY-TIMEOUT
2827 * timer for retrying a handshake initiation message."
2828 */
2829 wgp->wgp_latest_cookie_time = time_uptime;
2830 memcpy(wgp->wgp_latest_cookie, cookie, sizeof(wgp->wgp_latest_cookie));
2831 out:
2832 mutex_exit(wgp->wgp_lock);
2833 wg_put_session(wgs, &psref);
2834 }
2835
2836 static struct mbuf *
2837 wg_validate_msg_header(struct wg_softc *wg, struct mbuf *m)
2838 {
2839 struct wg_msg wgm;
2840 size_t mbuflen;
2841 size_t msglen;
2842
2843 /*
2844 * Get the mbuf chain length. It is already guaranteed, by
2845 * wg_overudp_cb, to be large enough for a struct wg_msg.
2846 */
2847 mbuflen = m_length(m);
2848 KASSERT(mbuflen >= sizeof(struct wg_msg));
2849
2850 /*
2851 * Copy the message header (32-bit message type) out -- we'll
2852 * worry about contiguity and alignment later.
2853 */
2854 m_copydata(m, 0, sizeof(wgm), &wgm);
2855 switch (le32toh(wgm.wgm_type)) {
2856 case WG_MSG_TYPE_INIT:
2857 msglen = sizeof(struct wg_msg_init);
2858 break;
2859 case WG_MSG_TYPE_RESP:
2860 msglen = sizeof(struct wg_msg_resp);
2861 break;
2862 case WG_MSG_TYPE_COOKIE:
2863 msglen = sizeof(struct wg_msg_cookie);
2864 break;
2865 case WG_MSG_TYPE_DATA:
2866 msglen = sizeof(struct wg_msg_data);
2867 break;
2868 default:
2869 WG_LOG_RATECHECK(&wg->wg_ppsratecheck, LOG_DEBUG,
2870 "%s: Unexpected msg type: %u\n", if_name(&wg->wg_if),
2871 le32toh(wgm.wgm_type));
2872 goto error;
2873 }
2874
2875 /* Verify the mbuf chain is long enough for this type of message. */
2876 if (__predict_false(mbuflen < msglen)) {
2877 WG_DLOG("Invalid msg size: mbuflen=%lu type=%u\n", mbuflen,
2878 le32toh(wgm.wgm_type));
2879 goto error;
2880 }
2881
2882 /* Make the message header contiguous if necessary. */
2883 if (__predict_false(m->m_len < msglen)) {
2884 m = m_pullup(m, msglen);
2885 if (m == NULL)
2886 return NULL;
2887 }
2888
2889 return m;
2890
2891 error:
2892 m_freem(m);
2893 return NULL;
2894 }
2895
2896 static void
2897 wg_handle_packet(struct wg_softc *wg, struct mbuf *m,
2898 const struct sockaddr *src)
2899 {
2900 struct wg_msg *wgm;
2901
2902 KASSERT(curlwp->l_pflag & LP_BOUND);
2903
2904 m = wg_validate_msg_header(wg, m);
2905 if (__predict_false(m == NULL))
2906 return;
2907
2908 KASSERT(m->m_len >= sizeof(struct wg_msg));
2909 wgm = mtod(m, struct wg_msg *);
2910 switch (le32toh(wgm->wgm_type)) {
2911 case WG_MSG_TYPE_INIT:
2912 wg_handle_msg_init(wg, (struct wg_msg_init *)wgm, src);
2913 break;
2914 case WG_MSG_TYPE_RESP:
2915 wg_handle_msg_resp(wg, (struct wg_msg_resp *)wgm, src);
2916 break;
2917 case WG_MSG_TYPE_COOKIE:
2918 wg_handle_msg_cookie(wg, (struct wg_msg_cookie *)wgm);
2919 break;
2920 case WG_MSG_TYPE_DATA:
2921 wg_handle_msg_data(wg, m, src);
2922 /* wg_handle_msg_data frees m for us */
2923 return;
2924 default:
2925 panic("invalid message type: %d", le32toh(wgm->wgm_type));
2926 }
2927
2928 m_freem(m);
2929 }
2930
2931 static void
2932 wg_receive_packets(struct wg_softc *wg, const int af)
2933 {
2934
2935 for (;;) {
2936 int error, flags;
2937 struct socket *so;
2938 struct mbuf *m = NULL;
2939 struct uio dummy_uio;
2940 struct mbuf *paddr = NULL;
2941 struct sockaddr *src;
2942
2943 so = wg_get_so_by_af(wg, af);
2944 flags = MSG_DONTWAIT;
2945 dummy_uio.uio_resid = 1000000000;
2946
2947 error = so->so_receive(so, &paddr, &dummy_uio, &m, NULL,
2948 &flags);
2949 if (error || m == NULL) {
2950 //if (error == EWOULDBLOCK)
2951 return;
2952 }
2953
2954 KASSERT(paddr != NULL);
2955 KASSERT(paddr->m_len >= sizeof(struct sockaddr));
2956 src = mtod(paddr, struct sockaddr *);
2957
2958 wg_handle_packet(wg, m, src);
2959 }
2960 }
2961
2962 static void
2963 wg_get_peer(struct wg_peer *wgp, struct psref *psref)
2964 {
2965
2966 psref_acquire(psref, &wgp->wgp_psref, wg_psref_class);
2967 }
2968
2969 static void
2970 wg_put_peer(struct wg_peer *wgp, struct psref *psref)
2971 {
2972
2973 psref_release(psref, &wgp->wgp_psref, wg_psref_class);
2974 }
2975
2976 static void
2977 wg_task_send_init_message(struct wg_softc *wg, struct wg_peer *wgp)
2978 {
2979 struct wg_session *wgs;
2980
2981 WG_TRACE("WGP_TASK_SEND_INIT_MESSAGE");
2982
2983 KASSERT(mutex_owned(wgp->wgp_lock));
2984
2985 if (!atomic_load_acquire(&wgp->wgp_endpoint_available)) {
2986 WGLOG(LOG_DEBUG, "%s: No endpoint available\n",
2987 if_name(&wg->wg_if));
2988 /* XXX should do something? */
2989 return;
2990 }
2991
2992 wgs = wgp->wgp_session_stable;
2993 if (wgs->wgs_state == WGS_STATE_UNKNOWN) {
2994 /* XXX What if the unstable session is already INIT_ACTIVE? */
2995 wg_send_handshake_msg_init(wg, wgp);
2996 } else {
2997 /* rekey */
2998 wgs = wgp->wgp_session_unstable;
2999 if (wgs->wgs_state != WGS_STATE_INIT_ACTIVE)
3000 wg_send_handshake_msg_init(wg, wgp);
3001 }
3002 }
3003
3004 static void
3005 wg_task_retry_handshake(struct wg_softc *wg, struct wg_peer *wgp)
3006 {
3007 struct wg_session *wgs;
3008
3009 WG_TRACE("WGP_TASK_RETRY_HANDSHAKE");
3010
3011 KASSERT(mutex_owned(wgp->wgp_lock));
3012 KASSERT(wgp->wgp_handshake_start_time != 0);
3013
3014 wgs = wgp->wgp_session_unstable;
3015 if (wgs->wgs_state != WGS_STATE_INIT_ACTIVE)
3016 return;
3017
3018 /*
3019 * XXX no real need to assign a new index here, but we do need
3020 * to transition to UNKNOWN temporarily
3021 */
3022 wg_put_session_index(wg, wgs);
3023
3024 /* [W] 6.4 Handshake Initiation Retransmission */
3025 if ((time_uptime - wgp->wgp_handshake_start_time) >
3026 wg_rekey_attempt_time) {
3027 /* Give up handshaking */
3028 wgp->wgp_handshake_start_time = 0;
3029 WG_TRACE("give up");
3030
3031 /*
3032 * If a new data packet comes, handshaking will be retried
3033 * and a new session would be established at that time,
3034 * however we don't want to send pending packets then.
3035 */
3036 wg_purge_pending_packets(wgp);
3037 return;
3038 }
3039
3040 wg_task_send_init_message(wg, wgp);
3041 }
3042
3043 static void
3044 wg_task_establish_session(struct wg_softc *wg, struct wg_peer *wgp)
3045 {
3046 struct wg_session *wgs, *wgs_prev;
3047 struct mbuf *m;
3048
3049 KASSERT(mutex_owned(wgp->wgp_lock));
3050
3051 wgs = wgp->wgp_session_unstable;
3052 if (wgs->wgs_state != WGS_STATE_INIT_PASSIVE)
3053 /* XXX Can this happen? */
3054 return;
3055
3056 wgs->wgs_state = WGS_STATE_ESTABLISHED;
3057 wgs->wgs_time_established = time_uptime;
3058 wgs->wgs_time_last_data_sent = 0;
3059 wgs->wgs_is_initiator = false;
3060 WG_TRACE("WGS_STATE_ESTABLISHED");
3061
3062 wg_swap_sessions(wgp);
3063 KASSERT(wgs == wgp->wgp_session_stable);
3064 wgs_prev = wgp->wgp_session_unstable;
3065 getnanotime(&wgp->wgp_last_handshake_time);
3066 wgp->wgp_handshake_start_time = 0;
3067 wgp->wgp_last_sent_mac1_valid = false;
3068 wgp->wgp_last_sent_cookie_valid = false;
3069
3070 /* If we had a data packet queued up, send it. */
3071 if ((m = atomic_swap_ptr(&wgp->wgp_pending, NULL)) != NULL) {
3072 kpreempt_disable();
3073 const uint32_t h = curcpu()->ci_index; // pktq_rps_hash(m)
3074 M_SETCTX(m, wgp);
3075 if (__predict_false(!pktq_enqueue(wg_pktq, m, h))) {
3076 WGLOG(LOG_ERR, "%s: pktq full, dropping\n",
3077 if_name(&wg->wg_if));
3078 m_freem(m);
3079 }
3080 kpreempt_enable();
3081 }
3082
3083 if (wgs_prev->wgs_state == WGS_STATE_ESTABLISHED) {
3084 /* Wait for wg_get_stable_session to drain. */
3085 pserialize_perform(wgp->wgp_psz);
3086
3087 /* Transition ESTABLISHED->DESTROYING. */
3088 wgs_prev->wgs_state = WGS_STATE_DESTROYING;
3089
3090 /* We can't destroy the old session immediately */
3091 wg_schedule_session_dtor_timer(wgp);
3092 } else {
3093 KASSERTMSG(wgs_prev->wgs_state == WGS_STATE_UNKNOWN,
3094 "state=%d", wgs_prev->wgs_state);
3095 wg_clear_states(wgs_prev);
3096 wgs_prev->wgs_state = WGS_STATE_UNKNOWN;
3097 }
3098 }
3099
3100 static void
3101 wg_task_endpoint_changed(struct wg_softc *wg, struct wg_peer *wgp)
3102 {
3103
3104 WG_TRACE("WGP_TASK_ENDPOINT_CHANGED");
3105
3106 KASSERT(mutex_owned(wgp->wgp_lock));
3107
3108 if (atomic_load_relaxed(&wgp->wgp_endpoint_changing)) {
3109 pserialize_perform(wgp->wgp_psz);
3110 mutex_exit(wgp->wgp_lock);
3111 psref_target_destroy(&wgp->wgp_endpoint0->wgsa_psref,
3112 wg_psref_class);
3113 psref_target_init(&wgp->wgp_endpoint0->wgsa_psref,
3114 wg_psref_class);
3115 mutex_enter(wgp->wgp_lock);
3116 atomic_store_release(&wgp->wgp_endpoint_changing, 0);
3117 }
3118 }
3119
3120 static void
3121 wg_task_send_keepalive_message(struct wg_softc *wg, struct wg_peer *wgp)
3122 {
3123 struct wg_session *wgs;
3124
3125 WG_TRACE("WGP_TASK_SEND_KEEPALIVE_MESSAGE");
3126
3127 KASSERT(mutex_owned(wgp->wgp_lock));
3128
3129 wgs = wgp->wgp_session_stable;
3130 if (wgs->wgs_state != WGS_STATE_ESTABLISHED)
3131 return;
3132
3133 wg_send_keepalive_msg(wgp, wgs);
3134 }
3135
3136 static void
3137 wg_task_destroy_prev_session(struct wg_softc *wg, struct wg_peer *wgp)
3138 {
3139 struct wg_session *wgs;
3140
3141 WG_TRACE("WGP_TASK_DESTROY_PREV_SESSION");
3142
3143 KASSERT(mutex_owned(wgp->wgp_lock));
3144
3145 wgs = wgp->wgp_session_unstable;
3146 if (wgs->wgs_state == WGS_STATE_DESTROYING) {
3147 wg_put_session_index(wg, wgs);
3148 }
3149 }
3150
3151 static void
3152 wg_peer_work(struct work *wk, void *cookie)
3153 {
3154 struct wg_peer *wgp = container_of(wk, struct wg_peer, wgp_work);
3155 struct wg_softc *wg = wgp->wgp_sc;
3156 unsigned int tasks;
3157
3158 mutex_enter(wgp->wgp_intr_lock);
3159 while ((tasks = wgp->wgp_tasks) != 0) {
3160 wgp->wgp_tasks = 0;
3161 mutex_exit(wgp->wgp_intr_lock);
3162
3163 mutex_enter(wgp->wgp_lock);
3164 if (ISSET(tasks, WGP_TASK_SEND_INIT_MESSAGE))
3165 wg_task_send_init_message(wg, wgp);
3166 if (ISSET(tasks, WGP_TASK_RETRY_HANDSHAKE))
3167 wg_task_retry_handshake(wg, wgp);
3168 if (ISSET(tasks, WGP_TASK_ESTABLISH_SESSION))
3169 wg_task_establish_session(wg, wgp);
3170 if (ISSET(tasks, WGP_TASK_ENDPOINT_CHANGED))
3171 wg_task_endpoint_changed(wg, wgp);
3172 if (ISSET(tasks, WGP_TASK_SEND_KEEPALIVE_MESSAGE))
3173 wg_task_send_keepalive_message(wg, wgp);
3174 if (ISSET(tasks, WGP_TASK_DESTROY_PREV_SESSION))
3175 wg_task_destroy_prev_session(wg, wgp);
3176 mutex_exit(wgp->wgp_lock);
3177
3178 mutex_enter(wgp->wgp_intr_lock);
3179 }
3180 mutex_exit(wgp->wgp_intr_lock);
3181 }
3182
3183 static void
3184 wg_job(struct threadpool_job *job)
3185 {
3186 struct wg_softc *wg = container_of(job, struct wg_softc, wg_job);
3187 int bound, upcalls;
3188
3189 mutex_enter(wg->wg_intr_lock);
3190 while ((upcalls = wg->wg_upcalls) != 0) {
3191 wg->wg_upcalls = 0;
3192 mutex_exit(wg->wg_intr_lock);
3193 bound = curlwp_bind();
3194 if (ISSET(upcalls, WG_UPCALL_INET))
3195 wg_receive_packets(wg, AF_INET);
3196 if (ISSET(upcalls, WG_UPCALL_INET6))
3197 wg_receive_packets(wg, AF_INET6);
3198 curlwp_bindx(bound);
3199 mutex_enter(wg->wg_intr_lock);
3200 }
3201 threadpool_job_done(job);
3202 mutex_exit(wg->wg_intr_lock);
3203 }
3204
3205 static int
3206 wg_bind_port(struct wg_softc *wg, const uint16_t port)
3207 {
3208 int error;
3209 uint16_t old_port = wg->wg_listen_port;
3210
3211 if (port != 0 && old_port == port)
3212 return 0;
3213
3214 struct sockaddr_in _sin, *sin = &_sin;
3215 sin->sin_len = sizeof(*sin);
3216 sin->sin_family = AF_INET;
3217 sin->sin_addr.s_addr = INADDR_ANY;
3218 sin->sin_port = htons(port);
3219
3220 error = sobind(wg->wg_so4, sintosa(sin), curlwp);
3221 if (error != 0)
3222 return error;
3223
3224 #ifdef INET6
3225 struct sockaddr_in6 _sin6, *sin6 = &_sin6;
3226 sin6->sin6_len = sizeof(*sin6);
3227 sin6->sin6_family = AF_INET6;
3228 sin6->sin6_addr = in6addr_any;
3229 sin6->sin6_port = htons(port);
3230
3231 error = sobind(wg->wg_so6, sin6tosa(sin6), curlwp);
3232 if (error != 0)
3233 return error;
3234 #endif
3235
3236 wg->wg_listen_port = port;
3237
3238 return 0;
3239 }
3240
3241 static void
3242 wg_so_upcall(struct socket *so, void *cookie, int events, int waitflag)
3243 {
3244 struct wg_softc *wg = cookie;
3245 int reason;
3246
3247 reason = (so->so_proto->pr_domain->dom_family == AF_INET) ?
3248 WG_UPCALL_INET :
3249 WG_UPCALL_INET6;
3250
3251 mutex_enter(wg->wg_intr_lock);
3252 wg->wg_upcalls |= reason;
3253 threadpool_schedule_job(wg->wg_threadpool, &wg->wg_job);
3254 mutex_exit(wg->wg_intr_lock);
3255 }
3256
3257 static int
3258 wg_overudp_cb(struct mbuf **mp, int offset, struct socket *so,
3259 struct sockaddr *src, void *arg)
3260 {
3261 struct wg_softc *wg = arg;
3262 struct wg_msg wgm;
3263 struct mbuf *m = *mp;
3264
3265 WG_TRACE("enter");
3266
3267 /* Verify the mbuf chain is long enough to have a wg msg header. */
3268 KASSERT(offset <= m_length(m));
3269 if (__predict_false(m_length(m) - offset < sizeof(struct wg_msg))) {
3270 /* drop on the floor */
3271 m_freem(m);
3272 return -1;
3273 }
3274
3275 /*
3276 * Copy the message header (32-bit message type) out -- we'll
3277 * worry about contiguity and alignment later.
3278 */
3279 m_copydata(m, offset, sizeof(struct wg_msg), &wgm);
3280 WG_DLOG("type=%d\n", le32toh(wgm.wgm_type));
3281
3282 /*
3283 * Handle DATA packets promptly as they arrive. Other packets
3284 * may require expensive public-key crypto and are not as
3285 * sensitive to latency, so defer them to the worker thread.
3286 */
3287 switch (le32toh(wgm.wgm_type)) {
3288 case WG_MSG_TYPE_DATA:
3289 /* handle immediately */
3290 m_adj(m, offset);
3291 if (__predict_false(m->m_len < sizeof(struct wg_msg_data))) {
3292 m = m_pullup(m, sizeof(struct wg_msg_data));
3293 if (m == NULL)
3294 return -1;
3295 }
3296 wg_handle_msg_data(wg, m, src);
3297 *mp = NULL;
3298 return 1;
3299 case WG_MSG_TYPE_INIT:
3300 case WG_MSG_TYPE_RESP:
3301 case WG_MSG_TYPE_COOKIE:
3302 /* pass through to so_receive in wg_receive_packets */
3303 return 0;
3304 default:
3305 /* drop on the floor */
3306 m_freem(m);
3307 return -1;
3308 }
3309 }
3310
3311 static int
3312 wg_socreate(struct wg_softc *wg, int af, struct socket **sop)
3313 {
3314 int error;
3315 struct socket *so;
3316
3317 error = socreate(af, &so, SOCK_DGRAM, 0, curlwp, NULL);
3318 if (error != 0)
3319 return error;
3320
3321 solock(so);
3322 so->so_upcallarg = wg;
3323 so->so_upcall = wg_so_upcall;
3324 so->so_rcv.sb_flags |= SB_UPCALL;
3325 inpcb_register_overudp_cb(sotoinpcb(so), wg_overudp_cb, wg);
3326 sounlock(so);
3327
3328 *sop = so;
3329
3330 return 0;
3331 }
3332
3333 static bool
3334 wg_session_hit_limits(struct wg_session *wgs)
3335 {
3336
3337 /*
3338 * [W] 6.2: Transport Message Limits
3339 * "After REJECT-AFTER-MESSAGES transport data messages or after the
3340 * current secure session is REJECT-AFTER-TIME seconds old, whichever
3341 * comes first, WireGuard will refuse to send any more transport data
3342 * messages using the current secure session, ..."
3343 */
3344 KASSERT(wgs->wgs_time_established != 0);
3345 if ((time_uptime - wgs->wgs_time_established) > wg_reject_after_time) {
3346 WG_DLOG("The session hits REJECT_AFTER_TIME\n");
3347 return true;
3348 } else if (wg_session_get_send_counter(wgs) >
3349 wg_reject_after_messages) {
3350 WG_DLOG("The session hits REJECT_AFTER_MESSAGES\n");
3351 return true;
3352 }
3353
3354 return false;
3355 }
3356
3357 static void
3358 wgintr(void *cookie)
3359 {
3360 struct wg_peer *wgp;
3361 struct wg_session *wgs;
3362 struct mbuf *m;
3363 struct psref psref;
3364
3365 while ((m = pktq_dequeue(wg_pktq)) != NULL) {
3366 wgp = M_GETCTX(m, struct wg_peer *);
3367 if ((wgs = wg_get_stable_session(wgp, &psref)) == NULL) {
3368 WG_TRACE("no stable session");
3369 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE);
3370 goto next0;
3371 }
3372 if (__predict_false(wg_session_hit_limits(wgs))) {
3373 WG_TRACE("stable session hit limits");
3374 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE);
3375 goto next1;
3376 }
3377 wg_send_data_msg(wgp, wgs, m);
3378 m = NULL; /* consumed */
3379 next1: wg_put_session(wgs, &psref);
3380 next0: m_freem(m);
3381 /* XXX Yield to avoid userland starvation? */
3382 }
3383 }
3384
3385 static void
3386 wg_rekey_timer(void *arg)
3387 {
3388 struct wg_peer *wgp = arg;
3389
3390 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE);
3391 }
3392
3393 static void
3394 wg_purge_pending_packets(struct wg_peer *wgp)
3395 {
3396 struct mbuf *m;
3397
3398 m = atomic_swap_ptr(&wgp->wgp_pending, NULL);
3399 m_freem(m);
3400 pktq_barrier(wg_pktq);
3401 }
3402
3403 static void
3404 wg_handshake_timeout_timer(void *arg)
3405 {
3406 struct wg_peer *wgp = arg;
3407
3408 WG_TRACE("enter");
3409
3410 wg_schedule_peer_task(wgp, WGP_TASK_RETRY_HANDSHAKE);
3411 }
3412
3413 static struct wg_peer *
3414 wg_alloc_peer(struct wg_softc *wg)
3415 {
3416 struct wg_peer *wgp;
3417
3418 wgp = kmem_zalloc(sizeof(*wgp), KM_SLEEP);
3419
3420 wgp->wgp_sc = wg;
3421 callout_init(&wgp->wgp_rekey_timer, CALLOUT_MPSAFE);
3422 callout_setfunc(&wgp->wgp_rekey_timer, wg_rekey_timer, wgp);
3423 callout_init(&wgp->wgp_handshake_timeout_timer, CALLOUT_MPSAFE);
3424 callout_setfunc(&wgp->wgp_handshake_timeout_timer,
3425 wg_handshake_timeout_timer, wgp);
3426 callout_init(&wgp->wgp_session_dtor_timer, CALLOUT_MPSAFE);
3427 callout_setfunc(&wgp->wgp_session_dtor_timer,
3428 wg_session_dtor_timer, wgp);
3429 PSLIST_ENTRY_INIT(wgp, wgp_peerlist_entry);
3430 wgp->wgp_endpoint_changing = false;
3431 wgp->wgp_endpoint_available = false;
3432 wgp->wgp_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
3433 wgp->wgp_intr_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
3434 wgp->wgp_psz = pserialize_create();
3435 psref_target_init(&wgp->wgp_psref, wg_psref_class);
3436
3437 wgp->wgp_endpoint = kmem_zalloc(sizeof(*wgp->wgp_endpoint), KM_SLEEP);
3438 wgp->wgp_endpoint0 = kmem_zalloc(sizeof(*wgp->wgp_endpoint0), KM_SLEEP);
3439 psref_target_init(&wgp->wgp_endpoint->wgsa_psref, wg_psref_class);
3440 psref_target_init(&wgp->wgp_endpoint0->wgsa_psref, wg_psref_class);
3441
3442 struct wg_session *wgs;
3443 wgp->wgp_session_stable =
3444 kmem_zalloc(sizeof(*wgp->wgp_session_stable), KM_SLEEP);
3445 wgp->wgp_session_unstable =
3446 kmem_zalloc(sizeof(*wgp->wgp_session_unstable), KM_SLEEP);
3447 wgs = wgp->wgp_session_stable;
3448 wgs->wgs_peer = wgp;
3449 wgs->wgs_state = WGS_STATE_UNKNOWN;
3450 psref_target_init(&wgs->wgs_psref, wg_psref_class);
3451 #ifndef __HAVE_ATOMIC64_LOADSTORE
3452 mutex_init(&wgs->wgs_send_counter_lock, MUTEX_DEFAULT, IPL_SOFTNET);
3453 #endif
3454 wgs->wgs_recvwin = kmem_zalloc(sizeof(*wgs->wgs_recvwin), KM_SLEEP);
3455 mutex_init(&wgs->wgs_recvwin->lock, MUTEX_DEFAULT, IPL_SOFTNET);
3456
3457 wgs = wgp->wgp_session_unstable;
3458 wgs->wgs_peer = wgp;
3459 wgs->wgs_state = WGS_STATE_UNKNOWN;
3460 psref_target_init(&wgs->wgs_psref, wg_psref_class);
3461 #ifndef __HAVE_ATOMIC64_LOADSTORE
3462 mutex_init(&wgs->wgs_send_counter_lock, MUTEX_DEFAULT, IPL_SOFTNET);
3463 #endif
3464 wgs->wgs_recvwin = kmem_zalloc(sizeof(*wgs->wgs_recvwin), KM_SLEEP);
3465 mutex_init(&wgs->wgs_recvwin->lock, MUTEX_DEFAULT, IPL_SOFTNET);
3466
3467 return wgp;
3468 }
3469
3470 static void
3471 wg_destroy_peer(struct wg_peer *wgp)
3472 {
3473 struct wg_session *wgs;
3474 struct wg_softc *wg = wgp->wgp_sc;
3475
3476 /* Prevent new packets from this peer on any source address. */
3477 rw_enter(wg->wg_rwlock, RW_WRITER);
3478 for (int i = 0; i < wgp->wgp_n_allowedips; i++) {
3479 struct wg_allowedip *wga = &wgp->wgp_allowedips[i];
3480 struct radix_node_head *rnh = wg_rnh(wg, wga->wga_family);
3481 struct radix_node *rn;
3482
3483 KASSERT(rnh != NULL);
3484 rn = rnh->rnh_deladdr(&wga->wga_sa_addr,
3485 &wga->wga_sa_mask, rnh);
3486 if (rn == NULL) {
3487 char addrstr[128];
3488 sockaddr_format(&wga->wga_sa_addr, addrstr,
3489 sizeof(addrstr));
3490 WGLOG(LOG_WARNING, "%s: Couldn't delete %s",
3491 if_name(&wg->wg_if), addrstr);
3492 }
3493 }
3494 rw_exit(wg->wg_rwlock);
3495
3496 /* Purge pending packets. */
3497 wg_purge_pending_packets(wgp);
3498
3499 /* Halt all packet processing and timeouts. */
3500 callout_halt(&wgp->wgp_rekey_timer, NULL);
3501 callout_halt(&wgp->wgp_handshake_timeout_timer, NULL);
3502 callout_halt(&wgp->wgp_session_dtor_timer, NULL);
3503
3504 /* Wait for any queued work to complete. */
3505 workqueue_wait(wg_wq, &wgp->wgp_work);
3506
3507 wgs = wgp->wgp_session_unstable;
3508 if (wgs->wgs_state != WGS_STATE_UNKNOWN) {
3509 mutex_enter(wgp->wgp_lock);
3510 wg_destroy_session(wg, wgs);
3511 mutex_exit(wgp->wgp_lock);
3512 }
3513 mutex_destroy(&wgs->wgs_recvwin->lock);
3514 kmem_free(wgs->wgs_recvwin, sizeof(*wgs->wgs_recvwin));
3515 #ifndef __HAVE_ATOMIC64_LOADSTORE
3516 mutex_destroy(&wgs->wgs_send_counter_lock);
3517 #endif
3518 kmem_free(wgs, sizeof(*wgs));
3519
3520 wgs = wgp->wgp_session_stable;
3521 if (wgs->wgs_state != WGS_STATE_UNKNOWN) {
3522 mutex_enter(wgp->wgp_lock);
3523 wg_destroy_session(wg, wgs);
3524 mutex_exit(wgp->wgp_lock);
3525 }
3526 mutex_destroy(&wgs->wgs_recvwin->lock);
3527 kmem_free(wgs->wgs_recvwin, sizeof(*wgs->wgs_recvwin));
3528 #ifndef __HAVE_ATOMIC64_LOADSTORE
3529 mutex_destroy(&wgs->wgs_send_counter_lock);
3530 #endif
3531 kmem_free(wgs, sizeof(*wgs));
3532
3533 psref_target_destroy(&wgp->wgp_endpoint->wgsa_psref, wg_psref_class);
3534 psref_target_destroy(&wgp->wgp_endpoint0->wgsa_psref, wg_psref_class);
3535 kmem_free(wgp->wgp_endpoint, sizeof(*wgp->wgp_endpoint));
3536 kmem_free(wgp->wgp_endpoint0, sizeof(*wgp->wgp_endpoint0));
3537
3538 pserialize_destroy(wgp->wgp_psz);
3539 mutex_obj_free(wgp->wgp_intr_lock);
3540 mutex_obj_free(wgp->wgp_lock);
3541
3542 kmem_free(wgp, sizeof(*wgp));
3543 }
3544
3545 static void
3546 wg_destroy_all_peers(struct wg_softc *wg)
3547 {
3548 struct wg_peer *wgp, *wgp0 __diagused;
3549 void *garbage_byname, *garbage_bypubkey;
3550
3551 restart:
3552 garbage_byname = garbage_bypubkey = NULL;
3553 mutex_enter(wg->wg_lock);
3554 WG_PEER_WRITER_FOREACH(wgp, wg) {
3555 if (wgp->wgp_name[0]) {
3556 wgp0 = thmap_del(wg->wg_peers_byname, wgp->wgp_name,
3557 strlen(wgp->wgp_name));
3558 KASSERT(wgp0 == wgp);
3559 garbage_byname = thmap_stage_gc(wg->wg_peers_byname);
3560 }
3561 wgp0 = thmap_del(wg->wg_peers_bypubkey, wgp->wgp_pubkey,
3562 sizeof(wgp->wgp_pubkey));
3563 KASSERT(wgp0 == wgp);
3564 garbage_bypubkey = thmap_stage_gc(wg->wg_peers_bypubkey);
3565 WG_PEER_WRITER_REMOVE(wgp);
3566 wg->wg_npeers--;
3567 mutex_enter(wgp->wgp_lock);
3568 pserialize_perform(wgp->wgp_psz);
3569 mutex_exit(wgp->wgp_lock);
3570 PSLIST_ENTRY_DESTROY(wgp, wgp_peerlist_entry);
3571 break;
3572 }
3573 mutex_exit(wg->wg_lock);
3574
3575 if (wgp == NULL)
3576 return;
3577
3578 psref_target_destroy(&wgp->wgp_psref, wg_psref_class);
3579
3580 wg_destroy_peer(wgp);
3581 thmap_gc(wg->wg_peers_byname, garbage_byname);
3582 thmap_gc(wg->wg_peers_bypubkey, garbage_bypubkey);
3583
3584 goto restart;
3585 }
3586
3587 static int
3588 wg_destroy_peer_name(struct wg_softc *wg, const char *name)
3589 {
3590 struct wg_peer *wgp, *wgp0 __diagused;
3591 void *garbage_byname, *garbage_bypubkey;
3592
3593 mutex_enter(wg->wg_lock);
3594 wgp = thmap_del(wg->wg_peers_byname, name, strlen(name));
3595 if (wgp != NULL) {
3596 wgp0 = thmap_del(wg->wg_peers_bypubkey, wgp->wgp_pubkey,
3597 sizeof(wgp->wgp_pubkey));
3598 KASSERT(wgp0 == wgp);
3599 garbage_byname = thmap_stage_gc(wg->wg_peers_byname);
3600 garbage_bypubkey = thmap_stage_gc(wg->wg_peers_bypubkey);
3601 WG_PEER_WRITER_REMOVE(wgp);
3602 wg->wg_npeers--;
3603 if (wg->wg_npeers == 0)
3604 if_link_state_change(&wg->wg_if, LINK_STATE_DOWN);
3605 mutex_enter(wgp->wgp_lock);
3606 pserialize_perform(wgp->wgp_psz);
3607 mutex_exit(wgp->wgp_lock);
3608 PSLIST_ENTRY_DESTROY(wgp, wgp_peerlist_entry);
3609 }
3610 mutex_exit(wg->wg_lock);
3611
3612 if (wgp == NULL)
3613 return ENOENT;
3614
3615 psref_target_destroy(&wgp->wgp_psref, wg_psref_class);
3616
3617 wg_destroy_peer(wgp);
3618 thmap_gc(wg->wg_peers_byname, garbage_byname);
3619 thmap_gc(wg->wg_peers_bypubkey, garbage_bypubkey);
3620
3621 return 0;
3622 }
3623
3624 static int
3625 wg_if_attach(struct wg_softc *wg)
3626 {
3627
3628 wg->wg_if.if_addrlen = 0;
3629 wg->wg_if.if_mtu = WG_MTU;
3630 wg->wg_if.if_flags = IFF_MULTICAST;
3631 wg->wg_if.if_extflags = IFEF_MPSAFE;
3632 wg->wg_if.if_ioctl = wg_ioctl;
3633 wg->wg_if.if_output = wg_output;
3634 wg->wg_if.if_init = wg_init;
3635 #ifdef ALTQ
3636 wg->wg_if.if_start = wg_start;
3637 #endif
3638 wg->wg_if.if_stop = wg_stop;
3639 wg->wg_if.if_type = IFT_OTHER;
3640 wg->wg_if.if_dlt = DLT_NULL;
3641 wg->wg_if.if_softc = wg;
3642 #ifdef ALTQ
3643 IFQ_SET_READY(&wg->wg_if.if_snd);
3644 #endif
3645 if_initialize(&wg->wg_if);
3646
3647 wg->wg_if.if_link_state = LINK_STATE_DOWN;
3648 if_alloc_sadl(&wg->wg_if);
3649 if_register(&wg->wg_if);
3650
3651 bpf_attach(&wg->wg_if, DLT_NULL, sizeof(uint32_t));
3652
3653 return 0;
3654 }
3655
3656 static void
3657 wg_if_detach(struct wg_softc *wg)
3658 {
3659 struct ifnet *ifp = &wg->wg_if;
3660
3661 bpf_detach(ifp);
3662 if_detach(ifp);
3663 }
3664
3665 static int
3666 wg_clone_create(struct if_clone *ifc, int unit)
3667 {
3668 struct wg_softc *wg;
3669 int error;
3670
3671 wg_guarantee_initialized();
3672
3673 error = wg_count_inc();
3674 if (error)
3675 return error;
3676
3677 wg = kmem_zalloc(sizeof(*wg), KM_SLEEP);
3678
3679 if_initname(&wg->wg_if, ifc->ifc_name, unit);
3680
3681 PSLIST_INIT(&wg->wg_peers);
3682 wg->wg_peers_bypubkey = thmap_create(0, NULL, THMAP_NOCOPY);
3683 wg->wg_peers_byname = thmap_create(0, NULL, THMAP_NOCOPY);
3684 wg->wg_sessions_byindex = thmap_create(0, NULL, THMAP_NOCOPY);
3685 wg->wg_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
3686 wg->wg_intr_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
3687 wg->wg_rwlock = rw_obj_alloc();
3688 threadpool_job_init(&wg->wg_job, wg_job, wg->wg_intr_lock,
3689 "%s", if_name(&wg->wg_if));
3690 wg->wg_ops = &wg_ops_rumpkernel;
3691
3692 error = threadpool_get(&wg->wg_threadpool, PRI_NONE);
3693 if (error)
3694 goto fail0;
3695
3696 #ifdef INET
3697 error = wg_socreate(wg, AF_INET, &wg->wg_so4);
3698 if (error)
3699 goto fail1;
3700 rn_inithead((void **)&wg->wg_rtable_ipv4,
3701 offsetof(struct sockaddr_in, sin_addr) * NBBY);
3702 #endif
3703 #ifdef INET6
3704 error = wg_socreate(wg, AF_INET6, &wg->wg_so6);
3705 if (error)
3706 goto fail2;
3707 rn_inithead((void **)&wg->wg_rtable_ipv6,
3708 offsetof(struct sockaddr_in6, sin6_addr) * NBBY);
3709 #endif
3710
3711 error = wg_if_attach(wg);
3712 if (error)
3713 goto fail3;
3714
3715 return 0;
3716
3717 fail4: __unused
3718 wg_if_detach(wg);
3719 fail3: wg_destroy_all_peers(wg);
3720 #ifdef INET6
3721 solock(wg->wg_so6);
3722 wg->wg_so6->so_rcv.sb_flags &= ~SB_UPCALL;
3723 sounlock(wg->wg_so6);
3724 #endif
3725 #ifdef INET
3726 solock(wg->wg_so4);
3727 wg->wg_so4->so_rcv.sb_flags &= ~SB_UPCALL;
3728 sounlock(wg->wg_so4);
3729 #endif
3730 mutex_enter(wg->wg_intr_lock);
3731 threadpool_cancel_job(wg->wg_threadpool, &wg->wg_job);
3732 mutex_exit(wg->wg_intr_lock);
3733 #ifdef INET6
3734 if (wg->wg_rtable_ipv6 != NULL)
3735 free(wg->wg_rtable_ipv6, M_RTABLE);
3736 soclose(wg->wg_so6);
3737 fail2:
3738 #endif
3739 #ifdef INET
3740 if (wg->wg_rtable_ipv4 != NULL)
3741 free(wg->wg_rtable_ipv4, M_RTABLE);
3742 soclose(wg->wg_so4);
3743 fail1:
3744 #endif
3745 threadpool_put(wg->wg_threadpool, PRI_NONE);
3746 fail0: threadpool_job_destroy(&wg->wg_job);
3747 rw_obj_free(wg->wg_rwlock);
3748 mutex_obj_free(wg->wg_intr_lock);
3749 mutex_obj_free(wg->wg_lock);
3750 thmap_destroy(wg->wg_sessions_byindex);
3751 thmap_destroy(wg->wg_peers_byname);
3752 thmap_destroy(wg->wg_peers_bypubkey);
3753 PSLIST_DESTROY(&wg->wg_peers);
3754 kmem_free(wg, sizeof(*wg));
3755 wg_count_dec();
3756 return error;
3757 }
3758
3759 static int
3760 wg_clone_destroy(struct ifnet *ifp)
3761 {
3762 struct wg_softc *wg = container_of(ifp, struct wg_softc, wg_if);
3763
3764 #ifdef WG_RUMPKERNEL
3765 if (wg_user_mode(wg)) {
3766 rumpuser_wg_destroy(wg->wg_user);
3767 wg->wg_user = NULL;
3768 }
3769 #endif
3770
3771 wg_if_detach(wg);
3772 wg_destroy_all_peers(wg);
3773 #ifdef INET6
3774 solock(wg->wg_so6);
3775 wg->wg_so6->so_rcv.sb_flags &= ~SB_UPCALL;
3776 sounlock(wg->wg_so6);
3777 #endif
3778 #ifdef INET
3779 solock(wg->wg_so4);
3780 wg->wg_so4->so_rcv.sb_flags &= ~SB_UPCALL;
3781 sounlock(wg->wg_so4);
3782 #endif
3783 mutex_enter(wg->wg_intr_lock);
3784 threadpool_cancel_job(wg->wg_threadpool, &wg->wg_job);
3785 mutex_exit(wg->wg_intr_lock);
3786 #ifdef INET6
3787 if (wg->wg_rtable_ipv6 != NULL)
3788 free(wg->wg_rtable_ipv6, M_RTABLE);
3789 soclose(wg->wg_so6);
3790 #endif
3791 #ifdef INET
3792 if (wg->wg_rtable_ipv4 != NULL)
3793 free(wg->wg_rtable_ipv4, M_RTABLE);
3794 soclose(wg->wg_so4);
3795 #endif
3796 threadpool_put(wg->wg_threadpool, PRI_NONE);
3797 threadpool_job_destroy(&wg->wg_job);
3798 rw_obj_free(wg->wg_rwlock);
3799 mutex_obj_free(wg->wg_intr_lock);
3800 mutex_obj_free(wg->wg_lock);
3801 thmap_destroy(wg->wg_sessions_byindex);
3802 thmap_destroy(wg->wg_peers_byname);
3803 thmap_destroy(wg->wg_peers_bypubkey);
3804 PSLIST_DESTROY(&wg->wg_peers);
3805 kmem_free(wg, sizeof(*wg));
3806 wg_count_dec();
3807
3808 return 0;
3809 }
3810
3811 static struct wg_peer *
3812 wg_pick_peer_by_sa(struct wg_softc *wg, const struct sockaddr *sa,
3813 struct psref *psref)
3814 {
3815 struct radix_node_head *rnh;
3816 struct radix_node *rn;
3817 struct wg_peer *wgp = NULL;
3818 struct wg_allowedip *wga;
3819
3820 #ifdef WG_DEBUG_LOG
3821 char addrstr[128];
3822 sockaddr_format(sa, addrstr, sizeof(addrstr));
3823 WG_DLOG("sa=%s\n", addrstr);
3824 #endif
3825
3826 rw_enter(wg->wg_rwlock, RW_READER);
3827
3828 rnh = wg_rnh(wg, sa->sa_family);
3829 if (rnh == NULL)
3830 goto out;
3831
3832 rn = rnh->rnh_matchaddr(sa, rnh);
3833 if (rn == NULL || (rn->rn_flags & RNF_ROOT) != 0)
3834 goto out;
3835
3836 WG_TRACE("success");
3837
3838 wga = container_of(rn, struct wg_allowedip, wga_nodes[0]);
3839 wgp = wga->wga_peer;
3840 wg_get_peer(wgp, psref);
3841
3842 out:
3843 rw_exit(wg->wg_rwlock);
3844 return wgp;
3845 }
3846
3847 static void
3848 wg_fill_msg_data(struct wg_softc *wg, struct wg_peer *wgp,
3849 struct wg_session *wgs, struct wg_msg_data *wgmd)
3850 {
3851
3852 memset(wgmd, 0, sizeof(*wgmd));
3853 wgmd->wgmd_type = htole32(WG_MSG_TYPE_DATA);
3854 wgmd->wgmd_receiver = wgs->wgs_remote_index;
3855 /* [W] 5.4.6: msg.counter := Nm^send */
3856 /* [W] 5.4.6: Nm^send := Nm^send + 1 */
3857 wgmd->wgmd_counter = htole64(wg_session_inc_send_counter(wgs));
3858 WG_DLOG("counter=%"PRIu64"\n", le64toh(wgmd->wgmd_counter));
3859 }
3860
3861 static int
3862 wg_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst,
3863 const struct rtentry *rt)
3864 {
3865 struct wg_softc *wg = ifp->if_softc;
3866 struct wg_peer *wgp = NULL;
3867 struct wg_session *wgs = NULL;
3868 struct psref wgp_psref, wgs_psref;
3869 int bound;
3870 int error;
3871
3872 bound = curlwp_bind();
3873
3874 /* TODO make the nest limit configurable via sysctl */
3875 error = if_tunnel_check_nesting(ifp, m, 1);
3876 if (error) {
3877 WGLOG(LOG_ERR,
3878 "%s: tunneling loop detected and packet dropped\n",
3879 if_name(&wg->wg_if));
3880 goto out0;
3881 }
3882
3883 #ifdef ALTQ
3884 bool altq = atomic_load_relaxed(&ifp->if_snd.altq_flags)
3885 & ALTQF_ENABLED;
3886 if (altq)
3887 IFQ_CLASSIFY(&ifp->if_snd, m, dst->sa_family);
3888 #endif
3889
3890 bpf_mtap_af(ifp, dst->sa_family, m, BPF_D_OUT);
3891
3892 m->m_flags &= ~(M_BCAST|M_MCAST);
3893
3894 wgp = wg_pick_peer_by_sa(wg, dst, &wgp_psref);
3895 if (wgp == NULL) {
3896 WG_TRACE("peer not found");
3897 error = EHOSTUNREACH;
3898 goto out0;
3899 }
3900
3901 /* Clear checksum-offload flags. */
3902 m->m_pkthdr.csum_flags = 0;
3903 m->m_pkthdr.csum_data = 0;
3904
3905 /* Check whether there's an established session. */
3906 wgs = wg_get_stable_session(wgp, &wgs_psref);
3907 if (wgs == NULL) {
3908 /*
3909 * No established session. If we're the first to try
3910 * sending data, schedule a handshake and queue the
3911 * packet for when the handshake is done; otherwise
3912 * just drop the packet and let the ongoing handshake
3913 * attempt continue. We could queue more data packets
3914 * but it's not clear that's worthwhile.
3915 */
3916 if (atomic_cas_ptr(&wgp->wgp_pending, NULL, m) == NULL) {
3917 m = NULL; /* consume */
3918 WG_TRACE("queued first packet; init handshake");
3919 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE);
3920 } else {
3921 WG_TRACE("first packet already queued, dropping");
3922 }
3923 goto out1;
3924 }
3925
3926 /* There's an established session. Toss it in the queue. */
3927 #ifdef ALTQ
3928 if (altq) {
3929 mutex_enter(ifp->if_snd.ifq_lock);
3930 if (ALTQ_IS_ENABLED(&ifp->if_snd)) {
3931 M_SETCTX(m, wgp);
3932 ALTQ_ENQUEUE(&ifp->if_snd, m, error);
3933 m = NULL; /* consume */
3934 }
3935 mutex_exit(ifp->if_snd.ifq_lock);
3936 if (m == NULL) {
3937 wg_start(ifp);
3938 goto out2;
3939 }
3940 }
3941 #endif
3942 kpreempt_disable();
3943 const uint32_t h = curcpu()->ci_index; // pktq_rps_hash(m)
3944 M_SETCTX(m, wgp);
3945 if (__predict_false(!pktq_enqueue(wg_pktq, m, h))) {
3946 WGLOG(LOG_ERR, "%s: pktq full, dropping\n",
3947 if_name(&wg->wg_if));
3948 error = ENOBUFS;
3949 goto out3;
3950 }
3951 m = NULL; /* consumed */
3952 error = 0;
3953 out3: kpreempt_enable();
3954
3955 #ifdef ALTQ
3956 out2:
3957 #endif
3958 wg_put_session(wgs, &wgs_psref);
3959 out1: wg_put_peer(wgp, &wgp_psref);
3960 out0: m_freem(m);
3961 curlwp_bindx(bound);
3962 return error;
3963 }
3964
3965 static int
3966 wg_send_udp(struct wg_peer *wgp, struct mbuf *m)
3967 {
3968 struct psref psref;
3969 struct wg_sockaddr *wgsa;
3970 int error;
3971 struct socket *so;
3972
3973 wgsa = wg_get_endpoint_sa(wgp, &psref);
3974 so = wg_get_so_by_peer(wgp, wgsa);
3975 solock(so);
3976 if (wgsatosa(wgsa)->sa_family == AF_INET) {
3977 error = udp_send(so, m, wgsatosa(wgsa), NULL, curlwp);
3978 } else {
3979 #ifdef INET6
3980 error = udp6_output(sotoinpcb(so), m, wgsatosin6(wgsa),
3981 NULL, curlwp);
3982 #else
3983 m_freem(m);
3984 error = EPFNOSUPPORT;
3985 #endif
3986 }
3987 sounlock(so);
3988 wg_put_sa(wgp, wgsa, &psref);
3989
3990 return error;
3991 }
3992
3993 /* Inspired by pppoe_get_mbuf */
3994 static struct mbuf *
3995 wg_get_mbuf(size_t leading_len, size_t len)
3996 {
3997 struct mbuf *m;
3998
3999 KASSERT(leading_len <= MCLBYTES);
4000 KASSERT(len <= MCLBYTES - leading_len);
4001
4002 m = m_gethdr(M_DONTWAIT, MT_DATA);
4003 if (m == NULL)
4004 return NULL;
4005 if (len + leading_len > MHLEN) {
4006 m_clget(m, M_DONTWAIT);
4007 if ((m->m_flags & M_EXT) == 0) {
4008 m_free(m);
4009 return NULL;
4010 }
4011 }
4012 m->m_data += leading_len;
4013 m->m_pkthdr.len = m->m_len = len;
4014
4015 return m;
4016 }
4017
4018 static int
4019 wg_send_data_msg(struct wg_peer *wgp, struct wg_session *wgs,
4020 struct mbuf *m)
4021 {
4022 struct wg_softc *wg = wgp->wgp_sc;
4023 int error;
4024 size_t inner_len, padded_len, encrypted_len;
4025 char *padded_buf = NULL;
4026 size_t mlen;
4027 struct wg_msg_data *wgmd;
4028 bool free_padded_buf = false;
4029 struct mbuf *n;
4030 size_t leading_len = max_hdr + sizeof(struct udphdr);
4031
4032 mlen = m_length(m);
4033 inner_len = mlen;
4034 padded_len = roundup(mlen, 16);
4035 encrypted_len = padded_len + WG_AUTHTAG_LEN;
4036 WG_DLOG("inner=%lu, padded=%lu, encrypted_len=%lu\n",
4037 inner_len, padded_len, encrypted_len);
4038 if (mlen != 0) {
4039 bool success;
4040 success = m_ensure_contig(&m, padded_len);
4041 if (success) {
4042 padded_buf = mtod(m, char *);
4043 } else {
4044 padded_buf = kmem_intr_alloc(padded_len, KM_NOSLEEP);
4045 if (padded_buf == NULL) {
4046 error = ENOBUFS;
4047 goto end;
4048 }
4049 free_padded_buf = true;
4050 m_copydata(m, 0, mlen, padded_buf);
4051 }
4052 memset(padded_buf + mlen, 0, padded_len - inner_len);
4053 }
4054
4055 n = wg_get_mbuf(leading_len, sizeof(*wgmd) + encrypted_len);
4056 if (n == NULL) {
4057 error = ENOBUFS;
4058 goto end;
4059 }
4060 KASSERT(n->m_len >= sizeof(*wgmd));
4061 wgmd = mtod(n, struct wg_msg_data *);
4062 wg_fill_msg_data(wg, wgp, wgs, wgmd);
4063 /* [W] 5.4.6: AEAD(Tm^send, Nm^send, P, e) */
4064 wg_algo_aead_enc((char *)wgmd + sizeof(*wgmd), encrypted_len,
4065 wgs->wgs_tkey_send, le64toh(wgmd->wgmd_counter),
4066 padded_buf, padded_len,
4067 NULL, 0);
4068
4069 error = wg->wg_ops->send_data_msg(wgp, n);
4070 if (error == 0) {
4071 struct ifnet *ifp = &wg->wg_if;
4072 if_statadd(ifp, if_obytes, mlen);
4073 if_statinc(ifp, if_opackets);
4074 if (wgs->wgs_is_initiator &&
4075 wgs->wgs_time_last_data_sent == 0) {
4076 /*
4077 * [W] 6.2 Transport Message Limits
4078 * "if a peer is the initiator of a current secure
4079 * session, WireGuard will send a handshake initiation
4080 * message to begin a new secure session if, after
4081 * transmitting a transport data message, the current
4082 * secure session is REKEY-AFTER-TIME seconds old,"
4083 */
4084 wg_schedule_rekey_timer(wgp);
4085 }
4086 wgs->wgs_time_last_data_sent = time_uptime;
4087 if (wg_session_get_send_counter(wgs) >=
4088 wg_rekey_after_messages) {
4089 /*
4090 * [W] 6.2 Transport Message Limits
4091 * "WireGuard will try to create a new session, by
4092 * sending a handshake initiation message (section
4093 * 5.4.2), after it has sent REKEY-AFTER-MESSAGES
4094 * transport data messages..."
4095 */
4096 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE);
4097 }
4098 }
4099 end:
4100 m_freem(m);
4101 if (free_padded_buf)
4102 kmem_intr_free(padded_buf, padded_len);
4103 return error;
4104 }
4105
4106 static void
4107 wg_input(struct ifnet *ifp, struct mbuf *m, const int af)
4108 {
4109 pktqueue_t *pktq;
4110 size_t pktlen;
4111
4112 KASSERT(af == AF_INET || af == AF_INET6);
4113
4114 WG_TRACE("");
4115
4116 m_set_rcvif(m, ifp);
4117 pktlen = m->m_pkthdr.len;
4118
4119 bpf_mtap_af(ifp, af, m, BPF_D_IN);
4120
4121 switch (af) {
4122 case AF_INET:
4123 pktq = ip_pktq;
4124 break;
4125 #ifdef INET6
4126 case AF_INET6:
4127 pktq = ip6_pktq;
4128 break;
4129 #endif
4130 default:
4131 panic("invalid af=%d", af);
4132 }
4133
4134 kpreempt_disable();
4135 const u_int h = curcpu()->ci_index;
4136 if (__predict_true(pktq_enqueue(pktq, m, h))) {
4137 if_statadd(ifp, if_ibytes, pktlen);
4138 if_statinc(ifp, if_ipackets);
4139 } else {
4140 m_freem(m);
4141 }
4142 kpreempt_enable();
4143 }
4144
4145 static void
4146 wg_calc_pubkey(uint8_t pubkey[WG_STATIC_KEY_LEN],
4147 const uint8_t privkey[WG_STATIC_KEY_LEN])
4148 {
4149
4150 crypto_scalarmult_base(pubkey, privkey);
4151 }
4152
4153 static int
4154 wg_rtable_add_route(struct wg_softc *wg, struct wg_allowedip *wga)
4155 {
4156 struct radix_node_head *rnh;
4157 struct radix_node *rn;
4158 int error = 0;
4159
4160 rw_enter(wg->wg_rwlock, RW_WRITER);
4161 rnh = wg_rnh(wg, wga->wga_family);
4162 KASSERT(rnh != NULL);
4163 rn = rnh->rnh_addaddr(&wga->wga_sa_addr, &wga->wga_sa_mask, rnh,
4164 wga->wga_nodes);
4165 rw_exit(wg->wg_rwlock);
4166
4167 if (rn == NULL)
4168 error = EEXIST;
4169
4170 return error;
4171 }
4172
4173 static int
4174 wg_handle_prop_peer(struct wg_softc *wg, prop_dictionary_t peer,
4175 struct wg_peer **wgpp)
4176 {
4177 int error = 0;
4178 const void *pubkey;
4179 size_t pubkey_len;
4180 const void *psk;
4181 size_t psk_len;
4182 const char *name = NULL;
4183
4184 if (prop_dictionary_get_string(peer, "name", &name)) {
4185 if (strlen(name) > WG_PEER_NAME_MAXLEN) {
4186 error = EINVAL;
4187 goto out;
4188 }
4189 }
4190
4191 if (!prop_dictionary_get_data(peer, "public_key",
4192 &pubkey, &pubkey_len)) {
4193 error = EINVAL;
4194 goto out;
4195 }
4196 #ifdef WG_DEBUG_DUMP
4197 if (wg_debug & WG_DEBUG_FLAGS_DUMP) {
4198 char *hex = gethexdump(pubkey, pubkey_len);
4199 log(LOG_DEBUG, "pubkey=%p, pubkey_len=%lu\n%s\n",
4200 pubkey, pubkey_len, hex);
4201 puthexdump(hex, pubkey, pubkey_len);
4202 }
4203 #endif
4204
4205 struct wg_peer *wgp = wg_alloc_peer(wg);
4206 memcpy(wgp->wgp_pubkey, pubkey, sizeof(wgp->wgp_pubkey));
4207 if (name != NULL)
4208 strncpy(wgp->wgp_name, name, sizeof(wgp->wgp_name));
4209
4210 if (prop_dictionary_get_data(peer, "preshared_key", &psk, &psk_len)) {
4211 if (psk_len != sizeof(wgp->wgp_psk)) {
4212 error = EINVAL;
4213 goto out;
4214 }
4215 memcpy(wgp->wgp_psk, psk, sizeof(wgp->wgp_psk));
4216 }
4217
4218 const void *addr;
4219 size_t addr_len;
4220 struct wg_sockaddr *wgsa = wgp->wgp_endpoint;
4221
4222 if (!prop_dictionary_get_data(peer, "endpoint", &addr, &addr_len))
4223 goto skip_endpoint;
4224 if (addr_len < sizeof(*wgsatosa(wgsa)) ||
4225 addr_len > sizeof(*wgsatoss(wgsa))) {
4226 error = EINVAL;
4227 goto out;
4228 }
4229 memcpy(wgsatoss(wgsa), addr, addr_len);
4230 switch (wgsa_family(wgsa)) {
4231 case AF_INET:
4232 #ifdef INET6
4233 case AF_INET6:
4234 #endif
4235 break;
4236 default:
4237 error = EPFNOSUPPORT;
4238 goto out;
4239 }
4240 if (addr_len != sockaddr_getsize_by_family(wgsa_family(wgsa))) {
4241 error = EINVAL;
4242 goto out;
4243 }
4244 {
4245 char addrstr[128];
4246 sockaddr_format(wgsatosa(wgsa), addrstr, sizeof(addrstr));
4247 WG_DLOG("addr=%s\n", addrstr);
4248 }
4249 wgp->wgp_endpoint_available = true;
4250
4251 prop_array_t allowedips;
4252 skip_endpoint:
4253 allowedips = prop_dictionary_get(peer, "allowedips");
4254 if (allowedips == NULL)
4255 goto skip;
4256
4257 prop_object_iterator_t _it = prop_array_iterator(allowedips);
4258 prop_dictionary_t prop_allowedip;
4259 int j = 0;
4260 while ((prop_allowedip = prop_object_iterator_next(_it)) != NULL) {
4261 struct wg_allowedip *wga = &wgp->wgp_allowedips[j];
4262
4263 if (!prop_dictionary_get_int(prop_allowedip, "family",
4264 &wga->wga_family))
4265 continue;
4266 if (!prop_dictionary_get_data(prop_allowedip, "ip",
4267 &addr, &addr_len))
4268 continue;
4269 if (!prop_dictionary_get_uint8(prop_allowedip, "cidr",
4270 &wga->wga_cidr))
4271 continue;
4272
4273 switch (wga->wga_family) {
4274 case AF_INET: {
4275 struct sockaddr_in sin;
4276 char addrstr[128];
4277 struct in_addr mask;
4278 struct sockaddr_in sin_mask;
4279
4280 if (addr_len != sizeof(struct in_addr))
4281 return EINVAL;
4282 memcpy(&wga->wga_addr4, addr, addr_len);
4283
4284 sockaddr_in_init(&sin, (const struct in_addr *)addr,
4285 0);
4286 sockaddr_copy(&wga->wga_sa_addr,
4287 sizeof(sin), sintosa(&sin));
4288
4289 sockaddr_format(sintosa(&sin),
4290 addrstr, sizeof(addrstr));
4291 WG_DLOG("addr=%s/%d\n", addrstr, wga->wga_cidr);
4292
4293 in_len2mask(&mask, wga->wga_cidr);
4294 sockaddr_in_init(&sin_mask, &mask, 0);
4295 sockaddr_copy(&wga->wga_sa_mask,
4296 sizeof(sin_mask), sintosa(&sin_mask));
4297
4298 break;
4299 }
4300 #ifdef INET6
4301 case AF_INET6: {
4302 struct sockaddr_in6 sin6;
4303 char addrstr[128];
4304 struct in6_addr mask;
4305 struct sockaddr_in6 sin6_mask;
4306
4307 if (addr_len != sizeof(struct in6_addr))
4308 return EINVAL;
4309 memcpy(&wga->wga_addr6, addr, addr_len);
4310
4311 sockaddr_in6_init(&sin6, (const struct in6_addr *)addr,
4312 0, 0, 0);
4313 sockaddr_copy(&wga->wga_sa_addr,
4314 sizeof(sin6), sin6tosa(&sin6));
4315
4316 sockaddr_format(sin6tosa(&sin6),
4317 addrstr, sizeof(addrstr));
4318 WG_DLOG("addr=%s/%d\n", addrstr, wga->wga_cidr);
4319
4320 in6_prefixlen2mask(&mask, wga->wga_cidr);
4321 sockaddr_in6_init(&sin6_mask, &mask, 0, 0, 0);
4322 sockaddr_copy(&wga->wga_sa_mask,
4323 sizeof(sin6_mask), sin6tosa(&sin6_mask));
4324
4325 break;
4326 }
4327 #endif
4328 default:
4329 error = EINVAL;
4330 goto out;
4331 }
4332 wga->wga_peer = wgp;
4333
4334 error = wg_rtable_add_route(wg, wga);
4335 if (error != 0)
4336 goto out;
4337
4338 j++;
4339 }
4340 wgp->wgp_n_allowedips = j;
4341 skip:
4342 *wgpp = wgp;
4343 out:
4344 return error;
4345 }
4346
4347 static int
4348 wg_alloc_prop_buf(char **_buf, struct ifdrv *ifd)
4349 {
4350 int error;
4351 char *buf;
4352
4353 WG_DLOG("buf=%p, len=%lu\n", ifd->ifd_data, ifd->ifd_len);
4354 if (ifd->ifd_len >= WG_MAX_PROPLEN)
4355 return E2BIG;
4356 buf = kmem_alloc(ifd->ifd_len + 1, KM_SLEEP);
4357 error = copyin(ifd->ifd_data, buf, ifd->ifd_len);
4358 if (error != 0)
4359 return error;
4360 buf[ifd->ifd_len] = '\0';
4361 #ifdef WG_DEBUG_DUMP
4362 if (wg_debug & WG_DEBUG_FLAGS_DUMP) {
4363 log(LOG_DEBUG, "%.*s\n", (int)MIN(INT_MAX, ifd->ifd_len),
4364 (const char *)buf);
4365 }
4366 #endif
4367 *_buf = buf;
4368 return 0;
4369 }
4370
4371 static int
4372 wg_ioctl_set_private_key(struct wg_softc *wg, struct ifdrv *ifd)
4373 {
4374 int error;
4375 prop_dictionary_t prop_dict;
4376 char *buf = NULL;
4377 const void *privkey;
4378 size_t privkey_len;
4379
4380 error = wg_alloc_prop_buf(&buf, ifd);
4381 if (error != 0)
4382 return error;
4383 error = EINVAL;
4384 prop_dict = prop_dictionary_internalize(buf);
4385 if (prop_dict == NULL)
4386 goto out;
4387 if (!prop_dictionary_get_data(prop_dict, "private_key",
4388 &privkey, &privkey_len))
4389 goto out;
4390 #ifdef WG_DEBUG_DUMP
4391 if (wg_debug & WG_DEBUG_FLAGS_DUMP) {
4392 char *hex = gethexdump(privkey, privkey_len);
4393 log(LOG_DEBUG, "privkey=%p, privkey_len=%lu\n%s\n",
4394 privkey, privkey_len, hex);
4395 puthexdump(hex, privkey, privkey_len);
4396 }
4397 #endif
4398 if (privkey_len != WG_STATIC_KEY_LEN)
4399 goto out;
4400 memcpy(wg->wg_privkey, privkey, WG_STATIC_KEY_LEN);
4401 wg_calc_pubkey(wg->wg_pubkey, wg->wg_privkey);
4402 error = 0;
4403
4404 out:
4405 kmem_free(buf, ifd->ifd_len + 1);
4406 return error;
4407 }
4408
4409 static int
4410 wg_ioctl_set_listen_port(struct wg_softc *wg, struct ifdrv *ifd)
4411 {
4412 int error;
4413 prop_dictionary_t prop_dict;
4414 char *buf = NULL;
4415 uint16_t port;
4416
4417 error = wg_alloc_prop_buf(&buf, ifd);
4418 if (error != 0)
4419 return error;
4420 error = EINVAL;
4421 prop_dict = prop_dictionary_internalize(buf);
4422 if (prop_dict == NULL)
4423 goto out;
4424 if (!prop_dictionary_get_uint16(prop_dict, "listen_port", &port))
4425 goto out;
4426
4427 error = wg->wg_ops->bind_port(wg, (uint16_t)port);
4428
4429 out:
4430 kmem_free(buf, ifd->ifd_len + 1);
4431 return error;
4432 }
4433
4434 static int
4435 wg_ioctl_add_peer(struct wg_softc *wg, struct ifdrv *ifd)
4436 {
4437 int error;
4438 prop_dictionary_t prop_dict;
4439 char *buf = NULL;
4440 struct wg_peer *wgp = NULL, *wgp0 __diagused;
4441
4442 error = wg_alloc_prop_buf(&buf, ifd);
4443 if (error != 0)
4444 return error;
4445 error = EINVAL;
4446 prop_dict = prop_dictionary_internalize(buf);
4447 if (prop_dict == NULL)
4448 goto out;
4449
4450 error = wg_handle_prop_peer(wg, prop_dict, &wgp);
4451 if (error != 0)
4452 goto out;
4453
4454 mutex_enter(wg->wg_lock);
4455 if (thmap_get(wg->wg_peers_bypubkey, wgp->wgp_pubkey,
4456 sizeof(wgp->wgp_pubkey)) != NULL ||
4457 (wgp->wgp_name[0] &&
4458 thmap_get(wg->wg_peers_byname, wgp->wgp_name,
4459 strlen(wgp->wgp_name)) != NULL)) {
4460 mutex_exit(wg->wg_lock);
4461 wg_destroy_peer(wgp);
4462 error = EEXIST;
4463 goto out;
4464 }
4465 wgp0 = thmap_put(wg->wg_peers_bypubkey, wgp->wgp_pubkey,
4466 sizeof(wgp->wgp_pubkey), wgp);
4467 KASSERT(wgp0 == wgp);
4468 if (wgp->wgp_name[0]) {
4469 wgp0 = thmap_put(wg->wg_peers_byname, wgp->wgp_name,
4470 strlen(wgp->wgp_name), wgp);
4471 KASSERT(wgp0 == wgp);
4472 }
4473 WG_PEER_WRITER_INSERT_HEAD(wgp, wg);
4474 wg->wg_npeers++;
4475 mutex_exit(wg->wg_lock);
4476
4477 if_link_state_change(&wg->wg_if, LINK_STATE_UP);
4478
4479 out:
4480 kmem_free(buf, ifd->ifd_len + 1);
4481 return error;
4482 }
4483
4484 static int
4485 wg_ioctl_delete_peer(struct wg_softc *wg, struct ifdrv *ifd)
4486 {
4487 int error;
4488 prop_dictionary_t prop_dict;
4489 char *buf = NULL;
4490 const char *name;
4491
4492 error = wg_alloc_prop_buf(&buf, ifd);
4493 if (error != 0)
4494 return error;
4495 error = EINVAL;
4496 prop_dict = prop_dictionary_internalize(buf);
4497 if (prop_dict == NULL)
4498 goto out;
4499
4500 if (!prop_dictionary_get_string(prop_dict, "name", &name))
4501 goto out;
4502 if (strlen(name) > WG_PEER_NAME_MAXLEN)
4503 goto out;
4504
4505 error = wg_destroy_peer_name(wg, name);
4506 out:
4507 kmem_free(buf, ifd->ifd_len + 1);
4508 return error;
4509 }
4510
4511 static bool
4512 wg_is_authorized(struct wg_softc *wg, u_long cmd)
4513 {
4514 int au = cmd == SIOCGDRVSPEC ?
4515 KAUTH_REQ_NETWORK_INTERFACE_WG_GETPRIV :
4516 KAUTH_REQ_NETWORK_INTERFACE_WG_SETPRIV;
4517 return kauth_authorize_network(kauth_cred_get(),
4518 KAUTH_NETWORK_INTERFACE_WG, au, &wg->wg_if,
4519 (void *)cmd, NULL) == 0;
4520 }
4521
4522 static int
4523 wg_ioctl_get(struct wg_softc *wg, struct ifdrv *ifd)
4524 {
4525 int error = ENOMEM;
4526 prop_dictionary_t prop_dict;
4527 prop_array_t peers = NULL;
4528 char *buf;
4529 struct wg_peer *wgp;
4530 int s, i;
4531
4532 prop_dict = prop_dictionary_create();
4533 if (prop_dict == NULL)
4534 goto error;
4535
4536 if (wg_is_authorized(wg, SIOCGDRVSPEC)) {
4537 if (!prop_dictionary_set_data(prop_dict, "private_key",
4538 wg->wg_privkey, WG_STATIC_KEY_LEN))
4539 goto error;
4540 }
4541
4542 if (wg->wg_listen_port != 0) {
4543 if (!prop_dictionary_set_uint16(prop_dict, "listen_port",
4544 wg->wg_listen_port))
4545 goto error;
4546 }
4547
4548 if (wg->wg_npeers == 0)
4549 goto skip_peers;
4550
4551 peers = prop_array_create();
4552 if (peers == NULL)
4553 goto error;
4554
4555 s = pserialize_read_enter();
4556 i = 0;
4557 WG_PEER_READER_FOREACH(wgp, wg) {
4558 struct wg_sockaddr *wgsa;
4559 struct psref wgp_psref, wgsa_psref;
4560 prop_dictionary_t prop_peer;
4561
4562 wg_get_peer(wgp, &wgp_psref);
4563 pserialize_read_exit(s);
4564
4565 prop_peer = prop_dictionary_create();
4566 if (prop_peer == NULL)
4567 goto next;
4568
4569 if (strlen(wgp->wgp_name) > 0) {
4570 if (!prop_dictionary_set_string(prop_peer, "name",
4571 wgp->wgp_name))
4572 goto next;
4573 }
4574
4575 if (!prop_dictionary_set_data(prop_peer, "public_key",
4576 wgp->wgp_pubkey, sizeof(wgp->wgp_pubkey)))
4577 goto next;
4578
4579 uint8_t psk_zero[WG_PRESHARED_KEY_LEN] = {0};
4580 if (!consttime_memequal(wgp->wgp_psk, psk_zero,
4581 sizeof(wgp->wgp_psk))) {
4582 if (wg_is_authorized(wg, SIOCGDRVSPEC)) {
4583 if (!prop_dictionary_set_data(prop_peer,
4584 "preshared_key",
4585 wgp->wgp_psk, sizeof(wgp->wgp_psk)))
4586 goto next;
4587 }
4588 }
4589
4590 wgsa = wg_get_endpoint_sa(wgp, &wgsa_psref);
4591 CTASSERT(AF_UNSPEC == 0);
4592 if (wgsa_family(wgsa) != 0 /*AF_UNSPEC*/ &&
4593 !prop_dictionary_set_data(prop_peer, "endpoint",
4594 wgsatoss(wgsa),
4595 sockaddr_getsize_by_family(wgsa_family(wgsa)))) {
4596 wg_put_sa(wgp, wgsa, &wgsa_psref);
4597 goto next;
4598 }
4599 wg_put_sa(wgp, wgsa, &wgsa_psref);
4600
4601 const struct timespec *t = &wgp->wgp_last_handshake_time;
4602
4603 if (!prop_dictionary_set_uint64(prop_peer,
4604 "last_handshake_time_sec", (uint64_t)t->tv_sec))
4605 goto next;
4606 if (!prop_dictionary_set_uint32(prop_peer,
4607 "last_handshake_time_nsec", (uint32_t)t->tv_nsec))
4608 goto next;
4609
4610 if (wgp->wgp_n_allowedips == 0)
4611 goto skip_allowedips;
4612
4613 prop_array_t allowedips = prop_array_create();
4614 if (allowedips == NULL)
4615 goto next;
4616 for (int j = 0; j < wgp->wgp_n_allowedips; j++) {
4617 struct wg_allowedip *wga = &wgp->wgp_allowedips[j];
4618 prop_dictionary_t prop_allowedip;
4619
4620 prop_allowedip = prop_dictionary_create();
4621 if (prop_allowedip == NULL)
4622 break;
4623
4624 if (!prop_dictionary_set_int(prop_allowedip, "family",
4625 wga->wga_family))
4626 goto _next;
4627 if (!prop_dictionary_set_uint8(prop_allowedip, "cidr",
4628 wga->wga_cidr))
4629 goto _next;
4630
4631 switch (wga->wga_family) {
4632 case AF_INET:
4633 if (!prop_dictionary_set_data(prop_allowedip,
4634 "ip", &wga->wga_addr4,
4635 sizeof(wga->wga_addr4)))
4636 goto _next;
4637 break;
4638 #ifdef INET6
4639 case AF_INET6:
4640 if (!prop_dictionary_set_data(prop_allowedip,
4641 "ip", &wga->wga_addr6,
4642 sizeof(wga->wga_addr6)))
4643 goto _next;
4644 break;
4645 #endif
4646 default:
4647 break;
4648 }
4649 prop_array_set(allowedips, j, prop_allowedip);
4650 _next:
4651 prop_object_release(prop_allowedip);
4652 }
4653 prop_dictionary_set(prop_peer, "allowedips", allowedips);
4654 prop_object_release(allowedips);
4655
4656 skip_allowedips:
4657
4658 prop_array_set(peers, i, prop_peer);
4659 next:
4660 if (prop_peer)
4661 prop_object_release(prop_peer);
4662 i++;
4663
4664 s = pserialize_read_enter();
4665 wg_put_peer(wgp, &wgp_psref);
4666 }
4667 pserialize_read_exit(s);
4668
4669 prop_dictionary_set(prop_dict, "peers", peers);
4670 prop_object_release(peers);
4671 peers = NULL;
4672
4673 skip_peers:
4674 buf = prop_dictionary_externalize(prop_dict);
4675 if (buf == NULL)
4676 goto error;
4677 if (ifd->ifd_len < (strlen(buf) + 1)) {
4678 error = EINVAL;
4679 goto error;
4680 }
4681 error = copyout(buf, ifd->ifd_data, strlen(buf) + 1);
4682
4683 free(buf, 0);
4684 error:
4685 if (peers != NULL)
4686 prop_object_release(peers);
4687 if (prop_dict != NULL)
4688 prop_object_release(prop_dict);
4689
4690 return error;
4691 }
4692
4693 static int
4694 wg_ioctl(struct ifnet *ifp, u_long cmd, void *data)
4695 {
4696 struct wg_softc *wg = ifp->if_softc;
4697 struct ifreq *ifr = data;
4698 struct ifaddr *ifa = data;
4699 struct ifdrv *ifd = data;
4700 int error = 0;
4701
4702 switch (cmd) {
4703 case SIOCINITIFADDR:
4704 if (ifa->ifa_addr->sa_family != AF_LINK &&
4705 (ifp->if_flags & (IFF_UP | IFF_RUNNING)) !=
4706 (IFF_UP | IFF_RUNNING)) {
4707 ifp->if_flags |= IFF_UP;
4708 error = if_init(ifp);
4709 }
4710 return error;
4711 case SIOCADDMULTI:
4712 case SIOCDELMULTI:
4713 switch (ifr->ifr_addr.sa_family) {
4714 case AF_INET: /* IP supports Multicast */
4715 break;
4716 #ifdef INET6
4717 case AF_INET6: /* IP6 supports Multicast */
4718 break;
4719 #endif
4720 default: /* Other protocols doesn't support Multicast */
4721 error = EAFNOSUPPORT;
4722 break;
4723 }
4724 return error;
4725 case SIOCSDRVSPEC:
4726 if (!wg_is_authorized(wg, cmd)) {
4727 return EPERM;
4728 }
4729 switch (ifd->ifd_cmd) {
4730 case WG_IOCTL_SET_PRIVATE_KEY:
4731 error = wg_ioctl_set_private_key(wg, ifd);
4732 break;
4733 case WG_IOCTL_SET_LISTEN_PORT:
4734 error = wg_ioctl_set_listen_port(wg, ifd);
4735 break;
4736 case WG_IOCTL_ADD_PEER:
4737 error = wg_ioctl_add_peer(wg, ifd);
4738 break;
4739 case WG_IOCTL_DELETE_PEER:
4740 error = wg_ioctl_delete_peer(wg, ifd);
4741 break;
4742 default:
4743 error = EINVAL;
4744 break;
4745 }
4746 return error;
4747 case SIOCGDRVSPEC:
4748 return wg_ioctl_get(wg, ifd);
4749 case SIOCSIFFLAGS:
4750 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
4751 break;
4752 switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) {
4753 case IFF_RUNNING:
4754 /*
4755 * If interface is marked down and it is running,
4756 * then stop and disable it.
4757 */
4758 if_stop(ifp, 1);
4759 break;
4760 case IFF_UP:
4761 /*
4762 * If interface is marked up and it is stopped, then
4763 * start it.
4764 */
4765 error = if_init(ifp);
4766 break;
4767 default:
4768 break;
4769 }
4770 return error;
4771 #ifdef WG_RUMPKERNEL
4772 case SIOCSLINKSTR:
4773 error = wg_ioctl_linkstr(wg, ifd);
4774 if (error == 0)
4775 wg->wg_ops = &wg_ops_rumpuser;
4776 return error;
4777 #endif
4778 default:
4779 break;
4780 }
4781
4782 error = ifioctl_common(ifp, cmd, data);
4783
4784 #ifdef WG_RUMPKERNEL
4785 if (!wg_user_mode(wg))
4786 return error;
4787
4788 /* Do the same to the corresponding tun device on the host */
4789 /*
4790 * XXX Actually the command has not been handled yet. It
4791 * will be handled via pr_ioctl form doifioctl later.
4792 */
4793 switch (cmd) {
4794 case SIOCAIFADDR:
4795 case SIOCDIFADDR: {
4796 struct in_aliasreq _ifra = *(const struct in_aliasreq *)data;
4797 struct in_aliasreq *ifra = &_ifra;
4798 KASSERT(error == ENOTTY);
4799 strncpy(ifra->ifra_name, rumpuser_wg_get_tunname(wg->wg_user),
4800 IFNAMSIZ);
4801 error = rumpuser_wg_ioctl(wg->wg_user, cmd, ifra, AF_INET);
4802 if (error == 0)
4803 error = ENOTTY;
4804 break;
4805 }
4806 #ifdef INET6
4807 case SIOCAIFADDR_IN6:
4808 case SIOCDIFADDR_IN6: {
4809 struct in6_aliasreq _ifra = *(const struct in6_aliasreq *)data;
4810 struct in6_aliasreq *ifra = &_ifra;
4811 KASSERT(error == ENOTTY);
4812 strncpy(ifra->ifra_name, rumpuser_wg_get_tunname(wg->wg_user),
4813 IFNAMSIZ);
4814 error = rumpuser_wg_ioctl(wg->wg_user, cmd, ifra, AF_INET6);
4815 if (error == 0)
4816 error = ENOTTY;
4817 break;
4818 }
4819 #endif
4820 }
4821 #endif /* WG_RUMPKERNEL */
4822
4823 return error;
4824 }
4825
4826 static int
4827 wg_init(struct ifnet *ifp)
4828 {
4829
4830 ifp->if_flags |= IFF_RUNNING;
4831
4832 /* TODO flush pending packets. */
4833 return 0;
4834 }
4835
4836 #ifdef ALTQ
4837 static void
4838 wg_start(struct ifnet *ifp)
4839 {
4840 struct mbuf *m;
4841
4842 for (;;) {
4843 IFQ_DEQUEUE(&ifp->if_snd, m);
4844 if (m == NULL)
4845 break;
4846
4847 kpreempt_disable();
4848 const uint32_t h = curcpu()->ci_index; // pktq_rps_hash(m)
4849 if (__predict_false(!pktq_enqueue(wg_pktq, m, h))) {
4850 WGLOG(LOG_ERR, "%s: pktq full, dropping\n",
4851 if_name(ifp));
4852 m_freem(m);
4853 }
4854 kpreempt_enable();
4855 }
4856 }
4857 #endif
4858
4859 static void
4860 wg_stop(struct ifnet *ifp, int disable)
4861 {
4862
4863 KASSERT((ifp->if_flags & IFF_RUNNING) != 0);
4864 ifp->if_flags &= ~IFF_RUNNING;
4865
4866 /* Need to do something? */
4867 }
4868
4869 #ifdef WG_DEBUG_PARAMS
4870 SYSCTL_SETUP(sysctl_net_wg_setup, "sysctl net.wg setup")
4871 {
4872 const struct sysctlnode *node = NULL;
4873
4874 sysctl_createv(clog, 0, NULL, &node,
4875 CTLFLAG_PERMANENT,
4876 CTLTYPE_NODE, "wg",
4877 SYSCTL_DESCR("wg(4)"),
4878 NULL, 0, NULL, 0,
4879 CTL_NET, CTL_CREATE, CTL_EOL);
4880 sysctl_createv(clog, 0, &node, NULL,
4881 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4882 CTLTYPE_QUAD, "rekey_after_messages",
4883 SYSCTL_DESCR("session liftime by messages"),
4884 NULL, 0, &wg_rekey_after_messages, 0, CTL_CREATE, CTL_EOL);
4885 sysctl_createv(clog, 0, &node, NULL,
4886 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4887 CTLTYPE_INT, "rekey_after_time",
4888 SYSCTL_DESCR("session liftime"),
4889 NULL, 0, &wg_rekey_after_time, 0, CTL_CREATE, CTL_EOL);
4890 sysctl_createv(clog, 0, &node, NULL,
4891 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4892 CTLTYPE_INT, "rekey_timeout",
4893 SYSCTL_DESCR("session handshake retry time"),
4894 NULL, 0, &wg_rekey_timeout, 0, CTL_CREATE, CTL_EOL);
4895 sysctl_createv(clog, 0, &node, NULL,
4896 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4897 CTLTYPE_INT, "rekey_attempt_time",
4898 SYSCTL_DESCR("session handshake timeout"),
4899 NULL, 0, &wg_rekey_attempt_time, 0, CTL_CREATE, CTL_EOL);
4900 sysctl_createv(clog, 0, &node, NULL,
4901 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4902 CTLTYPE_INT, "keepalive_timeout",
4903 SYSCTL_DESCR("keepalive timeout"),
4904 NULL, 0, &wg_keepalive_timeout, 0, CTL_CREATE, CTL_EOL);
4905 sysctl_createv(clog, 0, &node, NULL,
4906 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4907 CTLTYPE_BOOL, "force_underload",
4908 SYSCTL_DESCR("force to detemine under load"),
4909 NULL, 0, &wg_force_underload, 0, CTL_CREATE, CTL_EOL);
4910 sysctl_createv(clog, 0, &node, NULL,
4911 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4912 CTLTYPE_INT, "debug",
4913 SYSCTL_DESCR("set debug flags 1=debug 2=trace 4=dump"),
4914 NULL, 0, &wg_debug, 0, CTL_CREATE, CTL_EOL);
4915 }
4916 #endif
4917
4918 #ifdef WG_RUMPKERNEL
4919 static bool
4920 wg_user_mode(struct wg_softc *wg)
4921 {
4922
4923 return wg->wg_user != NULL;
4924 }
4925
4926 static int
4927 wg_ioctl_linkstr(struct wg_softc *wg, struct ifdrv *ifd)
4928 {
4929 struct ifnet *ifp = &wg->wg_if;
4930 int error;
4931
4932 if (ifp->if_flags & IFF_UP)
4933 return EBUSY;
4934
4935 if (ifd->ifd_cmd == IFLINKSTR_UNSET) {
4936 /* XXX do nothing */
4937 return 0;
4938 } else if (ifd->ifd_cmd != 0) {
4939 return EINVAL;
4940 } else if (wg->wg_user != NULL) {
4941 return EBUSY;
4942 }
4943
4944 /* Assume \0 included */
4945 if (ifd->ifd_len > IFNAMSIZ) {
4946 return E2BIG;
4947 } else if (ifd->ifd_len < 1) {
4948 return EINVAL;
4949 }
4950
4951 char tun_name[IFNAMSIZ];
4952 error = copyinstr(ifd->ifd_data, tun_name, ifd->ifd_len, NULL);
4953 if (error != 0)
4954 return error;
4955
4956 if (strncmp(tun_name, "tun", 3) != 0)
4957 return EINVAL;
4958
4959 error = rumpuser_wg_create(tun_name, wg, &wg->wg_user);
4960
4961 return error;
4962 }
4963
4964 static int
4965 wg_send_user(struct wg_peer *wgp, struct mbuf *m)
4966 {
4967 int error;
4968 struct psref psref;
4969 struct wg_sockaddr *wgsa;
4970 struct wg_softc *wg = wgp->wgp_sc;
4971 struct iovec iov[1];
4972
4973 wgsa = wg_get_endpoint_sa(wgp, &psref);
4974
4975 iov[0].iov_base = mtod(m, void *);
4976 iov[0].iov_len = m->m_len;
4977
4978 /* Send messages to a peer via an ordinary socket. */
4979 error = rumpuser_wg_send_peer(wg->wg_user, wgsatosa(wgsa), iov, 1);
4980
4981 wg_put_sa(wgp, wgsa, &psref);
4982
4983 m_freem(m);
4984
4985 return error;
4986 }
4987
4988 static void
4989 wg_input_user(struct ifnet *ifp, struct mbuf *m, const int af)
4990 {
4991 struct wg_softc *wg = ifp->if_softc;
4992 struct iovec iov[2];
4993 struct sockaddr_storage ss;
4994
4995 KASSERT(af == AF_INET || af == AF_INET6);
4996
4997 WG_TRACE("");
4998
4999 if (af == AF_INET) {
5000 struct sockaddr_in *sin = (struct sockaddr_in *)&ss;
5001 struct ip *ip;
5002
5003 KASSERT(m->m_len >= sizeof(struct ip));
5004 ip = mtod(m, struct ip *);
5005 sockaddr_in_init(sin, &ip->ip_dst, 0);
5006 } else {
5007 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&ss;
5008 struct ip6_hdr *ip6;
5009
5010 KASSERT(m->m_len >= sizeof(struct ip6_hdr));
5011 ip6 = mtod(m, struct ip6_hdr *);
5012 sockaddr_in6_init(sin6, &ip6->ip6_dst, 0, 0, 0);
5013 }
5014
5015 iov[0].iov_base = &ss;
5016 iov[0].iov_len = ss.ss_len;
5017 iov[1].iov_base = mtod(m, void *);
5018 iov[1].iov_len = m->m_len;
5019
5020 WG_DUMP_BUF(iov[1].iov_base, iov[1].iov_len);
5021
5022 /* Send decrypted packets to users via a tun. */
5023 rumpuser_wg_send_user(wg->wg_user, iov, 2);
5024
5025 m_freem(m);
5026 }
5027
5028 static int
5029 wg_bind_port_user(struct wg_softc *wg, const uint16_t port)
5030 {
5031 int error;
5032 uint16_t old_port = wg->wg_listen_port;
5033
5034 if (port != 0 && old_port == port)
5035 return 0;
5036
5037 error = rumpuser_wg_sock_bind(wg->wg_user, port);
5038 if (error == 0)
5039 wg->wg_listen_port = port;
5040 return error;
5041 }
5042
5043 /*
5044 * Receive user packets.
5045 */
5046 void
5047 rumpkern_wg_recv_user(struct wg_softc *wg, struct iovec *iov, size_t iovlen)
5048 {
5049 struct ifnet *ifp = &wg->wg_if;
5050 struct mbuf *m;
5051 const struct sockaddr *dst;
5052
5053 WG_TRACE("");
5054
5055 dst = iov[0].iov_base;
5056
5057 m = m_gethdr(M_DONTWAIT, MT_DATA);
5058 if (m == NULL)
5059 return;
5060 m->m_len = m->m_pkthdr.len = 0;
5061 m_copyback(m, 0, iov[1].iov_len, iov[1].iov_base);
5062
5063 WG_DLOG("iov_len=%lu\n", iov[1].iov_len);
5064 WG_DUMP_BUF(iov[1].iov_base, iov[1].iov_len);
5065
5066 (void)wg_output(ifp, m, dst, NULL);
5067 }
5068
5069 /*
5070 * Receive packets from a peer.
5071 */
5072 void
5073 rumpkern_wg_recv_peer(struct wg_softc *wg, struct iovec *iov, size_t iovlen)
5074 {
5075 struct mbuf *m;
5076 const struct sockaddr *src;
5077 int bound;
5078
5079 WG_TRACE("");
5080
5081 src = iov[0].iov_base;
5082
5083 m = m_gethdr(M_DONTWAIT, MT_DATA);
5084 if (m == NULL)
5085 return;
5086 m->m_len = m->m_pkthdr.len = 0;
5087 m_copyback(m, 0, iov[1].iov_len, iov[1].iov_base);
5088
5089 WG_DLOG("iov_len=%lu\n", iov[1].iov_len);
5090 WG_DUMP_BUF(iov[1].iov_base, iov[1].iov_len);
5091
5092 bound = curlwp_bind();
5093 wg_handle_packet(wg, m, src);
5094 curlwp_bindx(bound);
5095 }
5096 #endif /* WG_RUMPKERNEL */
5097
5098 /*
5099 * Module infrastructure
5100 */
5101 #include "if_module.h"
5102
5103 IF_MODULE(MODULE_CLASS_DRIVER, wg, "sodium,blake2s")
5104