if_wg.c revision 1.80 1 /* $NetBSD: if_wg.c,v 1.80 2024/07/24 20:29:43 christos Exp $ */
2
3 /*
4 * Copyright (C) Ryota Ozaki <ozaki.ryota (at) gmail.com>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the project nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 /*
33 * This network interface aims to implement the WireGuard protocol.
34 * The implementation is based on the paper of WireGuard as of
35 * 2018-06-30 [1]. The paper is referred in the source code with label
36 * [W]. Also the specification of the Noise protocol framework as of
37 * 2018-07-11 [2] is referred with label [N].
38 *
39 * [1] https://www.wireguard.com/papers/wireguard.pdf
40 * [2] http://noiseprotocol.org/noise.pdf
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: if_wg.c,v 1.80 2024/07/24 20:29:43 christos Exp $");
45
46 #ifdef _KERNEL_OPT
47 #include "opt_altq_enabled.h"
48 #include "opt_inet.h"
49 #endif
50
51 #include <sys/param.h>
52 #include <sys/types.h>
53
54 #include <sys/atomic.h>
55 #include <sys/callout.h>
56 #include <sys/cprng.h>
57 #include <sys/cpu.h>
58 #include <sys/device.h>
59 #include <sys/domain.h>
60 #include <sys/errno.h>
61 #include <sys/intr.h>
62 #include <sys/ioctl.h>
63 #include <sys/kernel.h>
64 #include <sys/kmem.h>
65 #include <sys/mbuf.h>
66 #include <sys/module.h>
67 #include <sys/mutex.h>
68 #include <sys/once.h>
69 #include <sys/percpu.h>
70 #include <sys/pserialize.h>
71 #include <sys/psref.h>
72 #include <sys/queue.h>
73 #include <sys/rwlock.h>
74 #include <sys/socket.h>
75 #include <sys/socketvar.h>
76 #include <sys/sockio.h>
77 #include <sys/sysctl.h>
78 #include <sys/syslog.h>
79 #include <sys/systm.h>
80 #include <sys/thmap.h>
81 #include <sys/threadpool.h>
82 #include <sys/time.h>
83 #include <sys/timespec.h>
84 #include <sys/workqueue.h>
85
86 #include <net/bpf.h>
87 #include <net/if.h>
88 #include <net/if_types.h>
89 #include <net/if_wg.h>
90 #include <net/pktqueue.h>
91 #include <net/route.h>
92
93 #include <netinet/in.h>
94 #include <netinet/in_pcb.h>
95 #include <netinet/in_var.h>
96 #include <netinet/ip.h>
97 #include <netinet/ip_var.h>
98 #include <netinet/udp.h>
99 #include <netinet/udp_var.h>
100
101 #ifdef INET6
102 #include <netinet/ip6.h>
103 #include <netinet6/in6_pcb.h>
104 #include <netinet6/in6_var.h>
105 #include <netinet6/ip6_var.h>
106 #include <netinet6/udp6_var.h>
107 #endif /* INET6 */
108
109 #include <prop/proplib.h>
110
111 #include <crypto/blake2/blake2s.h>
112 #include <crypto/sodium/crypto_aead_chacha20poly1305.h>
113 #include <crypto/sodium/crypto_aead_xchacha20poly1305.h>
114 #include <crypto/sodium/crypto_scalarmult.h>
115
116 #include "ioconf.h"
117
118 #ifdef WG_RUMPKERNEL
119 #include "wg_user.h"
120 #endif
121
122 /*
123 * Data structures
124 * - struct wg_softc is an instance of wg interfaces
125 * - It has a list of peers (struct wg_peer)
126 * - It has a threadpool job that sends/receives handshake messages and
127 * runs event handlers
128 * - It has its own two routing tables: one is for IPv4 and the other IPv6
129 * - struct wg_peer is a representative of a peer
130 * - It has a struct work to handle handshakes and timer tasks
131 * - It has a pair of session instances (struct wg_session)
132 * - It has a pair of endpoint instances (struct wg_sockaddr)
133 * - Normally one endpoint is used and the second one is used only on
134 * a peer migration (a change of peer's IP address)
135 * - It has a list of IP addresses and sub networks called allowedips
136 * (struct wg_allowedip)
137 * - A packets sent over a session is allowed if its destination matches
138 * any IP addresses or sub networks of the list
139 * - struct wg_session represents a session of a secure tunnel with a peer
140 * - Two instances of sessions belong to a peer; a stable session and a
141 * unstable session
142 * - A handshake process of a session always starts with a unstable instance
143 * - Once a session is established, its instance becomes stable and the
144 * other becomes unstable instead
145 * - Data messages are always sent via a stable session
146 *
147 * Locking notes:
148 * - Each wg has a mutex(9) wg_lock, and a rwlock(9) wg_rwlock
149 * - Changes to the peer list are serialized by wg_lock
150 * - The peer list may be read with pserialize(9) and psref(9)
151 * - The rwlock (wg_rwlock) protects the routing tables (wg_rtable_ipv[46])
152 * => XXX replace by pserialize when routing table is psz-safe
153 * - Each peer (struct wg_peer, wgp) has a mutex wgp_lock, which can be taken
154 * only in thread context and serializes:
155 * - the stable and unstable session pointers
156 * - all unstable session state
157 * - Packet processing may be done in softint context:
158 * - The stable session can be read under pserialize(9) or psref(9)
159 * - The stable session is always ESTABLISHED
160 * - On a session swap, we must wait for all readers to release a
161 * reference to a stable session before changing wgs_state and
162 * session states
163 * - Lock order: wg_lock -> wgp_lock
164 */
165
166
167 #define WGLOG(level, fmt, args...) \
168 log(level, "%s: " fmt, __func__, ##args)
169
170 // #define WG_DEBUG
171
172 /* Debug options */
173 #ifdef WG_DEBUG
174 /* Output debug logs */
175 #ifndef WG_DEBUG_LOG
176 #define WG_DEBUG_LOG
177 #endif
178 /* Output trace logs */
179 #ifndef WG_DEBUG_TRACE
180 #define WG_DEBUG_TRACE
181 #endif
182 /* Output hash values, etc. */
183 #ifndef WG_DEBUG_DUMP
184 #define WG_DEBUG_DUMP
185 #endif
186 /* Make some internal parameters configurable for testing and debugging */
187 #ifndef WG_DEBUG_PARAMS
188 #define WG_DEBUG_PARAMS
189 #endif
190 int wg_debug;
191 #define WG_DEBUG_FLAGS_LOG 1
192 #define WG_DEBUG_FLAGS_TRACE 2
193 #define WG_DEBUG_FLAGS_DUMP 4
194 #endif
195
196
197 #ifdef WG_DEBUG_TRACE
198 #define WG_TRACE(msg) do { \
199 if (wg_debug & WG_DEBUG_FLAGS_TRACE) \
200 log(LOG_DEBUG, "%s:%d: %s\n", __func__, __LINE__, (msg)); \
201 } while (0)
202 #else
203 #define WG_TRACE(msg) __nothing
204 #endif
205
206 #ifdef WG_DEBUG_LOG
207 #define WG_DLOG(fmt, args...) do { \
208 if (wg_debug & WG_DEBUG_FLAGS_LOG) \
209 log(LOG_DEBUG, "%s: " fmt, __func__, ##args); \
210 } while (0)
211 #else
212 #define WG_DLOG(fmt, args...) __nothing
213 #endif
214
215 #define WG_LOG_RATECHECK(wgprc, level, fmt, args...) do { \
216 if ((wg_debug & WG_DEBUG_FLAGS_LOG) && \
217 ppsratecheck(&(wgprc)->wgprc_lasttime, \
218 &(wgprc)->wgprc_curpps, 1)) { \
219 log(level, fmt, ##args); \
220 } \
221 } while (0)
222
223 #ifdef WG_DEBUG_PARAMS
224 static bool wg_force_underload = false;
225 #endif
226
227 #ifdef WG_DEBUG_DUMP
228
229 static char *
230 gethexdump(const char *p, size_t n)
231 {
232 char *buf;
233 size_t i;
234
235 if (n > SIZE_MAX/3 - 1)
236 return NULL;
237 buf = kmem_alloc(3*n + 1, KM_NOSLEEP);
238 if (buf == NULL)
239 return NULL;
240 for (i = 0; i < n; i++)
241 snprintf(buf + 3*i, 3 + 1, " %02hhx", p[i]);
242 return buf;
243 }
244
245 static void
246 puthexdump(char *buf, const void *p, size_t n)
247 {
248
249 if (buf == NULL)
250 return;
251 kmem_free(buf, 3*n + 1);
252 }
253
254 #ifdef WG_RUMPKERNEL
255 static void
256 wg_dump_buf(const char *func, const char *buf, const size_t size)
257 {
258 if ((wg_debug & WG_DEBUG_FLAGS_DUMP) == 0)
259 return;
260
261 char *hex = gethexdump(buf, size);
262
263 log(LOG_DEBUG, "%s: %s\n", func, hex ? hex : "(enomem)");
264 puthexdump(hex, buf, size);
265 }
266 #endif
267
268 static void
269 wg_dump_hash(const uint8_t *func, const uint8_t *name, const uint8_t *hash,
270 const size_t size)
271 {
272 if ((wg_debug & WG_DEBUG_FLAGS_DUMP) == 0)
273 return;
274
275 char *hex = gethexdump(hash, size);
276
277 log(LOG_DEBUG, "%s: %s: %s\n", func, name, hex ? hex : "(enomem)");
278 puthexdump(hex, hash, size);
279 }
280
281 #define WG_DUMP_HASH(name, hash) \
282 wg_dump_hash(__func__, name, hash, WG_HASH_LEN)
283 #define WG_DUMP_HASH48(name, hash) \
284 wg_dump_hash(__func__, name, hash, 48)
285 #define WG_DUMP_BUF(buf, size) \
286 wg_dump_buf(__func__, buf, size)
287 #else
288 #define WG_DUMP_HASH(name, hash) __nothing
289 #define WG_DUMP_HASH48(name, hash) __nothing
290 #define WG_DUMP_BUF(buf, size) __nothing
291 #endif /* WG_DEBUG_DUMP */
292
293 /* chosen somewhat arbitrarily -- fits in signed 16 bits NUL-terminated */
294 #define WG_MAX_PROPLEN 32766
295
296 #define WG_MTU 1420
297 #define WG_ALLOWEDIPS 16
298
299 #define CURVE25519_KEY_LEN 32
300 #define TAI64N_LEN sizeof(uint32_t) * 3
301 #define POLY1305_AUTHTAG_LEN 16
302 #define HMAC_BLOCK_LEN 64
303
304 /* [N] 4.1: "DHLEN must be 32 or greater." WireGuard chooses 32. */
305 /* [N] 4.3: Hash functions */
306 #define NOISE_DHLEN 32
307 /* [N] 4.3: "Must be 32 or 64." WireGuard chooses 32. */
308 #define NOISE_HASHLEN 32
309 #define NOISE_BLOCKLEN 64
310 #define NOISE_HKDF_OUTPUT_LEN NOISE_HASHLEN
311 /* [N] 5.1: "k" */
312 #define NOISE_CIPHER_KEY_LEN 32
313 /*
314 * [N] 9.2: "psk"
315 * "... psk is a 32-byte secret value provided by the application."
316 */
317 #define NOISE_PRESHARED_KEY_LEN 32
318
319 #define WG_STATIC_KEY_LEN CURVE25519_KEY_LEN
320 #define WG_TIMESTAMP_LEN TAI64N_LEN
321
322 #define WG_PRESHARED_KEY_LEN NOISE_PRESHARED_KEY_LEN
323
324 #define WG_COOKIE_LEN 16
325 #define WG_MAC_LEN 16
326 #define WG_RANDVAL_LEN 24
327
328 #define WG_EPHEMERAL_KEY_LEN CURVE25519_KEY_LEN
329 /* [N] 5.2: "ck: A chaining key of HASHLEN bytes" */
330 #define WG_CHAINING_KEY_LEN NOISE_HASHLEN
331 /* [N] 5.2: "h: A hash output of HASHLEN bytes" */
332 #define WG_HASH_LEN NOISE_HASHLEN
333 #define WG_CIPHER_KEY_LEN NOISE_CIPHER_KEY_LEN
334 #define WG_DH_OUTPUT_LEN NOISE_DHLEN
335 #define WG_KDF_OUTPUT_LEN NOISE_HKDF_OUTPUT_LEN
336 #define WG_AUTHTAG_LEN POLY1305_AUTHTAG_LEN
337 #define WG_DATA_KEY_LEN 32
338 #define WG_SALT_LEN 24
339
340 /*
341 * The protocol messages
342 */
343 struct wg_msg {
344 uint32_t wgm_type;
345 } __packed;
346
347 /* [W] 5.4.2 First Message: Initiator to Responder */
348 struct wg_msg_init {
349 uint32_t wgmi_type;
350 uint32_t wgmi_sender;
351 uint8_t wgmi_ephemeral[WG_EPHEMERAL_KEY_LEN];
352 uint8_t wgmi_static[WG_STATIC_KEY_LEN + WG_AUTHTAG_LEN];
353 uint8_t wgmi_timestamp[WG_TIMESTAMP_LEN + WG_AUTHTAG_LEN];
354 uint8_t wgmi_mac1[WG_MAC_LEN];
355 uint8_t wgmi_mac2[WG_MAC_LEN];
356 } __packed;
357
358 /* [W] 5.4.3 Second Message: Responder to Initiator */
359 struct wg_msg_resp {
360 uint32_t wgmr_type;
361 uint32_t wgmr_sender;
362 uint32_t wgmr_receiver;
363 uint8_t wgmr_ephemeral[WG_EPHEMERAL_KEY_LEN];
364 uint8_t wgmr_empty[0 + WG_AUTHTAG_LEN];
365 uint8_t wgmr_mac1[WG_MAC_LEN];
366 uint8_t wgmr_mac2[WG_MAC_LEN];
367 } __packed;
368
369 /* [W] 5.4.6 Subsequent Messages: Transport Data Messages */
370 struct wg_msg_data {
371 uint32_t wgmd_type;
372 uint32_t wgmd_receiver;
373 uint64_t wgmd_counter;
374 uint32_t wgmd_packet[0];
375 } __packed;
376
377 /* [W] 5.4.7 Under Load: Cookie Reply Message */
378 struct wg_msg_cookie {
379 uint32_t wgmc_type;
380 uint32_t wgmc_receiver;
381 uint8_t wgmc_salt[WG_SALT_LEN];
382 uint8_t wgmc_cookie[WG_COOKIE_LEN + WG_AUTHTAG_LEN];
383 } __packed;
384
385 #define WG_MSG_TYPE_INIT 1
386 #define WG_MSG_TYPE_RESP 2
387 #define WG_MSG_TYPE_COOKIE 3
388 #define WG_MSG_TYPE_DATA 4
389 #define WG_MSG_TYPE_MAX WG_MSG_TYPE_DATA
390
391 /* Sliding windows */
392
393 #define SLIWIN_BITS 2048u
394 #define SLIWIN_TYPE uint32_t
395 #define SLIWIN_BPW NBBY*sizeof(SLIWIN_TYPE)
396 #define SLIWIN_WORDS howmany(SLIWIN_BITS, SLIWIN_BPW)
397 #define SLIWIN_NPKT (SLIWIN_BITS - NBBY*sizeof(SLIWIN_TYPE))
398
399 struct sliwin {
400 SLIWIN_TYPE B[SLIWIN_WORDS];
401 uint64_t T;
402 };
403
404 static void
405 sliwin_reset(struct sliwin *W)
406 {
407
408 memset(W, 0, sizeof(*W));
409 }
410
411 static int
412 sliwin_check_fast(const volatile struct sliwin *W, uint64_t S)
413 {
414
415 /*
416 * If it's more than one window older than the highest sequence
417 * number we've seen, reject.
418 */
419 #ifdef __HAVE_ATOMIC64_LOADSTORE
420 if (S + SLIWIN_NPKT < atomic_load_relaxed(&W->T))
421 return EAUTH;
422 #endif
423
424 /*
425 * Otherwise, we need to take the lock to decide, so don't
426 * reject just yet. Caller must serialize a call to
427 * sliwin_update in this case.
428 */
429 return 0;
430 }
431
432 static int
433 sliwin_update(struct sliwin *W, uint64_t S)
434 {
435 unsigned word, bit;
436
437 /*
438 * If it's more than one window older than the highest sequence
439 * number we've seen, reject.
440 */
441 if (S + SLIWIN_NPKT < W->T)
442 return EAUTH;
443
444 /*
445 * If it's higher than the highest sequence number we've seen,
446 * advance the window.
447 */
448 if (S > W->T) {
449 uint64_t i = W->T / SLIWIN_BPW;
450 uint64_t j = S / SLIWIN_BPW;
451 unsigned k;
452
453 for (k = 0; k < MIN(j - i, SLIWIN_WORDS); k++)
454 W->B[(i + k + 1) % SLIWIN_WORDS] = 0;
455 #ifdef __HAVE_ATOMIC64_LOADSTORE
456 atomic_store_relaxed(&W->T, S);
457 #else
458 W->T = S;
459 #endif
460 }
461
462 /* Test and set the bit -- if already set, reject. */
463 word = (S / SLIWIN_BPW) % SLIWIN_WORDS;
464 bit = S % SLIWIN_BPW;
465 if (W->B[word] & (1UL << bit))
466 return EAUTH;
467 W->B[word] |= 1U << bit;
468
469 /* Accept! */
470 return 0;
471 }
472
473 struct wg_session {
474 struct wg_peer *wgs_peer;
475 struct psref_target
476 wgs_psref;
477
478 int wgs_state;
479 #define WGS_STATE_UNKNOWN 0
480 #define WGS_STATE_INIT_ACTIVE 1
481 #define WGS_STATE_INIT_PASSIVE 2
482 #define WGS_STATE_ESTABLISHED 3
483 #define WGS_STATE_DESTROYING 4
484
485 time_t wgs_time_established;
486 time_t wgs_time_last_data_sent;
487 bool wgs_is_initiator;
488
489 uint32_t wgs_local_index;
490 uint32_t wgs_remote_index;
491 #ifdef __HAVE_ATOMIC64_LOADSTORE
492 volatile uint64_t
493 wgs_send_counter;
494 #else
495 kmutex_t wgs_send_counter_lock;
496 uint64_t wgs_send_counter;
497 #endif
498
499 struct {
500 kmutex_t lock;
501 struct sliwin window;
502 } *wgs_recvwin;
503
504 uint8_t wgs_handshake_hash[WG_HASH_LEN];
505 uint8_t wgs_chaining_key[WG_CHAINING_KEY_LEN];
506 uint8_t wgs_ephemeral_key_pub[WG_EPHEMERAL_KEY_LEN];
507 uint8_t wgs_ephemeral_key_priv[WG_EPHEMERAL_KEY_LEN];
508 uint8_t wgs_ephemeral_key_peer[WG_EPHEMERAL_KEY_LEN];
509 uint8_t wgs_tkey_send[WG_DATA_KEY_LEN];
510 uint8_t wgs_tkey_recv[WG_DATA_KEY_LEN];
511 };
512
513 struct wg_sockaddr {
514 union {
515 struct sockaddr_storage _ss;
516 struct sockaddr _sa;
517 struct sockaddr_in _sin;
518 struct sockaddr_in6 _sin6;
519 };
520 struct psref_target wgsa_psref;
521 };
522
523 #define wgsatoss(wgsa) (&(wgsa)->_ss)
524 #define wgsatosa(wgsa) (&(wgsa)->_sa)
525 #define wgsatosin(wgsa) (&(wgsa)->_sin)
526 #define wgsatosin6(wgsa) (&(wgsa)->_sin6)
527
528 #define wgsa_family(wgsa) (wgsatosa(wgsa)->sa_family)
529
530 struct wg_peer;
531 struct wg_allowedip {
532 struct radix_node wga_nodes[2];
533 struct wg_sockaddr _wga_sa_addr;
534 struct wg_sockaddr _wga_sa_mask;
535 #define wga_sa_addr _wga_sa_addr._sa
536 #define wga_sa_mask _wga_sa_mask._sa
537
538 int wga_family;
539 uint8_t wga_cidr;
540 union {
541 struct in_addr _ip4;
542 struct in6_addr _ip6;
543 } wga_addr;
544 #define wga_addr4 wga_addr._ip4
545 #define wga_addr6 wga_addr._ip6
546
547 struct wg_peer *wga_peer;
548 };
549
550 typedef uint8_t wg_timestamp_t[WG_TIMESTAMP_LEN];
551
552 struct wg_ppsratecheck {
553 struct timeval wgprc_lasttime;
554 int wgprc_curpps;
555 };
556
557 struct wg_softc;
558 struct wg_peer {
559 struct wg_softc *wgp_sc;
560 char wgp_name[WG_PEER_NAME_MAXLEN + 1];
561 struct pslist_entry wgp_peerlist_entry;
562 pserialize_t wgp_psz;
563 struct psref_target wgp_psref;
564 kmutex_t *wgp_lock;
565 kmutex_t *wgp_intr_lock;
566
567 uint8_t wgp_pubkey[WG_STATIC_KEY_LEN];
568 struct wg_sockaddr *wgp_endpoint;
569 struct wg_sockaddr *wgp_endpoint0;
570 volatile unsigned wgp_endpoint_changing;
571 bool wgp_endpoint_available;
572
573 /* The preshared key (optional) */
574 uint8_t wgp_psk[WG_PRESHARED_KEY_LEN];
575
576 struct wg_session *wgp_session_stable;
577 struct wg_session *wgp_session_unstable;
578
579 /* first outgoing packet awaiting session initiation */
580 struct mbuf *wgp_pending;
581
582 /* timestamp in big-endian */
583 wg_timestamp_t wgp_timestamp_latest_init;
584
585 struct timespec wgp_last_handshake_time;
586
587 callout_t wgp_rekey_timer;
588 callout_t wgp_handshake_timeout_timer;
589 callout_t wgp_session_dtor_timer;
590
591 time_t wgp_handshake_start_time;
592
593 int wgp_n_allowedips;
594 struct wg_allowedip wgp_allowedips[WG_ALLOWEDIPS];
595
596 time_t wgp_latest_cookie_time;
597 uint8_t wgp_latest_cookie[WG_COOKIE_LEN];
598 uint8_t wgp_last_sent_mac1[WG_MAC_LEN];
599 bool wgp_last_sent_mac1_valid;
600 uint8_t wgp_last_sent_cookie[WG_COOKIE_LEN];
601 bool wgp_last_sent_cookie_valid;
602
603 time_t wgp_last_msg_received_time[WG_MSG_TYPE_MAX];
604
605 time_t wgp_last_genrandval_time;
606 uint32_t wgp_randval;
607
608 struct wg_ppsratecheck wgp_ppsratecheck;
609
610 struct work wgp_work;
611 unsigned int wgp_tasks;
612 #define WGP_TASK_SEND_INIT_MESSAGE __BIT(0)
613 #define WGP_TASK_RETRY_HANDSHAKE __BIT(1)
614 #define WGP_TASK_ESTABLISH_SESSION __BIT(2)
615 #define WGP_TASK_ENDPOINT_CHANGED __BIT(3)
616 #define WGP_TASK_SEND_KEEPALIVE_MESSAGE __BIT(4)
617 #define WGP_TASK_DESTROY_PREV_SESSION __BIT(5)
618 };
619
620 struct wg_ops;
621
622 struct wg_softc {
623 struct ifnet wg_if;
624 LIST_ENTRY(wg_softc) wg_list;
625 kmutex_t *wg_lock;
626 kmutex_t *wg_intr_lock;
627 krwlock_t *wg_rwlock;
628
629 uint8_t wg_privkey[WG_STATIC_KEY_LEN];
630 uint8_t wg_pubkey[WG_STATIC_KEY_LEN];
631
632 int wg_npeers;
633 struct pslist_head wg_peers;
634 struct thmap *wg_peers_bypubkey;
635 struct thmap *wg_peers_byname;
636 struct thmap *wg_sessions_byindex;
637 uint16_t wg_listen_port;
638
639 struct threadpool *wg_threadpool;
640
641 struct threadpool_job wg_job;
642 int wg_upcalls;
643 #define WG_UPCALL_INET __BIT(0)
644 #define WG_UPCALL_INET6 __BIT(1)
645
646 #ifdef INET
647 struct socket *wg_so4;
648 struct radix_node_head *wg_rtable_ipv4;
649 #endif
650 #ifdef INET6
651 struct socket *wg_so6;
652 struct radix_node_head *wg_rtable_ipv6;
653 #endif
654
655 struct wg_ppsratecheck wg_ppsratecheck;
656
657 struct wg_ops *wg_ops;
658
659 #ifdef WG_RUMPKERNEL
660 struct wg_user *wg_user;
661 #endif
662 };
663
664 /* [W] 6.1 Preliminaries */
665 #define WG_REKEY_AFTER_MESSAGES (1ULL << 60)
666 #define WG_REJECT_AFTER_MESSAGES (UINT64_MAX - (1 << 13))
667 #define WG_REKEY_AFTER_TIME 120
668 #define WG_REJECT_AFTER_TIME 180
669 #define WG_REKEY_ATTEMPT_TIME 90
670 #define WG_REKEY_TIMEOUT 5
671 #define WG_KEEPALIVE_TIMEOUT 10
672
673 #define WG_COOKIE_TIME 120
674 #define WG_RANDVAL_TIME (2 * 60)
675
676 static uint64_t wg_rekey_after_messages = WG_REKEY_AFTER_MESSAGES;
677 static uint64_t wg_reject_after_messages = WG_REJECT_AFTER_MESSAGES;
678 static unsigned wg_rekey_after_time = WG_REKEY_AFTER_TIME;
679 static unsigned wg_reject_after_time = WG_REJECT_AFTER_TIME;
680 static unsigned wg_rekey_attempt_time = WG_REKEY_ATTEMPT_TIME;
681 static unsigned wg_rekey_timeout = WG_REKEY_TIMEOUT;
682 static unsigned wg_keepalive_timeout = WG_KEEPALIVE_TIMEOUT;
683
684 static struct mbuf *
685 wg_get_mbuf(size_t, size_t);
686
687 static int wg_send_data_msg(struct wg_peer *, struct wg_session *,
688 struct mbuf *);
689 static int wg_send_cookie_msg(struct wg_softc *, struct wg_peer *,
690 const uint32_t, const uint8_t [WG_MAC_LEN],
691 const struct sockaddr *);
692 static int wg_send_handshake_msg_resp(struct wg_softc *, struct wg_peer *,
693 struct wg_session *, const struct wg_msg_init *);
694 static void wg_send_keepalive_msg(struct wg_peer *, struct wg_session *);
695
696 static struct wg_peer *
697 wg_pick_peer_by_sa(struct wg_softc *, const struct sockaddr *,
698 struct psref *);
699 static struct wg_peer *
700 wg_lookup_peer_by_pubkey(struct wg_softc *,
701 const uint8_t [WG_STATIC_KEY_LEN], struct psref *);
702
703 static struct wg_session *
704 wg_lookup_session_by_index(struct wg_softc *,
705 const uint32_t, struct psref *);
706
707 static void wg_update_endpoint_if_necessary(struct wg_peer *,
708 const struct sockaddr *);
709
710 static void wg_schedule_rekey_timer(struct wg_peer *);
711 static void wg_schedule_session_dtor_timer(struct wg_peer *);
712
713 static bool wg_is_underload(struct wg_softc *, struct wg_peer *, int);
714 static void wg_calculate_keys(struct wg_session *, const bool);
715
716 static void wg_clear_states(struct wg_session *);
717
718 static void wg_get_peer(struct wg_peer *, struct psref *);
719 static void wg_put_peer(struct wg_peer *, struct psref *);
720
721 static int wg_send_so(struct wg_peer *, struct mbuf *);
722 static int wg_send_udp(struct wg_peer *, struct mbuf *);
723 static int wg_output(struct ifnet *, struct mbuf *,
724 const struct sockaddr *, const struct rtentry *);
725 static void wg_input(struct ifnet *, struct mbuf *, const int);
726 static int wg_ioctl(struct ifnet *, u_long, void *);
727 static int wg_bind_port(struct wg_softc *, const uint16_t);
728 static int wg_init(struct ifnet *);
729 #ifdef ALTQ
730 static void wg_start(struct ifnet *);
731 #endif
732 static void wg_stop(struct ifnet *, int);
733
734 static void wg_peer_work(struct work *, void *);
735 static void wg_job(struct threadpool_job *);
736 static void wgintr(void *);
737 static void wg_purge_pending_packets(struct wg_peer *);
738
739 static int wg_clone_create(struct if_clone *, int);
740 static int wg_clone_destroy(struct ifnet *);
741
742 struct wg_ops {
743 int (*send_hs_msg)(struct wg_peer *, struct mbuf *);
744 int (*send_data_msg)(struct wg_peer *, struct mbuf *);
745 void (*input)(struct ifnet *, struct mbuf *, const int);
746 int (*bind_port)(struct wg_softc *, const uint16_t);
747 };
748
749 struct wg_ops wg_ops_rumpkernel = {
750 .send_hs_msg = wg_send_so,
751 .send_data_msg = wg_send_udp,
752 .input = wg_input,
753 .bind_port = wg_bind_port,
754 };
755
756 #ifdef WG_RUMPKERNEL
757 static bool wg_user_mode(struct wg_softc *);
758 static int wg_ioctl_linkstr(struct wg_softc *, struct ifdrv *);
759
760 static int wg_send_user(struct wg_peer *, struct mbuf *);
761 static void wg_input_user(struct ifnet *, struct mbuf *, const int);
762 static int wg_bind_port_user(struct wg_softc *, const uint16_t);
763
764 struct wg_ops wg_ops_rumpuser = {
765 .send_hs_msg = wg_send_user,
766 .send_data_msg = wg_send_user,
767 .input = wg_input_user,
768 .bind_port = wg_bind_port_user,
769 };
770 #endif
771
772 #define WG_PEER_READER_FOREACH(wgp, wg) \
773 PSLIST_READER_FOREACH((wgp), &(wg)->wg_peers, struct wg_peer, \
774 wgp_peerlist_entry)
775 #define WG_PEER_WRITER_FOREACH(wgp, wg) \
776 PSLIST_WRITER_FOREACH((wgp), &(wg)->wg_peers, struct wg_peer, \
777 wgp_peerlist_entry)
778 #define WG_PEER_WRITER_INSERT_HEAD(wgp, wg) \
779 PSLIST_WRITER_INSERT_HEAD(&(wg)->wg_peers, (wgp), wgp_peerlist_entry)
780 #define WG_PEER_WRITER_REMOVE(wgp) \
781 PSLIST_WRITER_REMOVE((wgp), wgp_peerlist_entry)
782
783 struct wg_route {
784 struct radix_node wgr_nodes[2];
785 struct wg_peer *wgr_peer;
786 };
787
788 static struct radix_node_head *
789 wg_rnh(struct wg_softc *wg, const int family)
790 {
791
792 switch (family) {
793 case AF_INET:
794 return wg->wg_rtable_ipv4;
795 #ifdef INET6
796 case AF_INET6:
797 return wg->wg_rtable_ipv6;
798 #endif
799 default:
800 return NULL;
801 }
802 }
803
804
805 /*
806 * Global variables
807 */
808 static volatile unsigned wg_count __cacheline_aligned;
809
810 struct psref_class *wg_psref_class __read_mostly;
811
812 static struct if_clone wg_cloner =
813 IF_CLONE_INITIALIZER("wg", wg_clone_create, wg_clone_destroy);
814
815 static struct pktqueue *wg_pktq __read_mostly;
816 static struct workqueue *wg_wq __read_mostly;
817
818 void wgattach(int);
819 /* ARGSUSED */
820 void
821 wgattach(int count)
822 {
823 /*
824 * Nothing to do here, initialization is handled by the
825 * module initialization code in wginit() below).
826 */
827 }
828
829 static void
830 wginit(void)
831 {
832
833 wg_psref_class = psref_class_create("wg", IPL_SOFTNET);
834
835 if_clone_attach(&wg_cloner);
836 }
837
838 /*
839 * XXX Kludge: This should just happen in wginit, but workqueue_create
840 * cannot be run until after CPUs have been detected, and wginit runs
841 * before configure.
842 */
843 static int
844 wginitqueues(void)
845 {
846 int error __diagused;
847
848 wg_pktq = pktq_create(IFQ_MAXLEN, wgintr, NULL);
849 KASSERT(wg_pktq != NULL);
850
851 error = workqueue_create(&wg_wq, "wgpeer", wg_peer_work, NULL,
852 PRI_NONE, IPL_SOFTNET, WQ_MPSAFE|WQ_PERCPU);
853 KASSERT(error == 0);
854
855 return 0;
856 }
857
858 static void
859 wg_guarantee_initialized(void)
860 {
861 static ONCE_DECL(init);
862 int error __diagused;
863
864 error = RUN_ONCE(&init, wginitqueues);
865 KASSERT(error == 0);
866 }
867
868 static int
869 wg_count_inc(void)
870 {
871 unsigned o, n;
872
873 do {
874 o = atomic_load_relaxed(&wg_count);
875 if (o == UINT_MAX)
876 return ENFILE;
877 n = o + 1;
878 } while (atomic_cas_uint(&wg_count, o, n) != o);
879
880 return 0;
881 }
882
883 static void
884 wg_count_dec(void)
885 {
886 unsigned c __diagused;
887
888 c = atomic_dec_uint_nv(&wg_count);
889 KASSERT(c != UINT_MAX);
890 }
891
892 static int
893 wgdetach(void)
894 {
895
896 /* Prevent new interface creation. */
897 if_clone_detach(&wg_cloner);
898
899 /* Check whether there are any existing interfaces. */
900 if (atomic_load_relaxed(&wg_count)) {
901 /* Back out -- reattach the cloner. */
902 if_clone_attach(&wg_cloner);
903 return EBUSY;
904 }
905
906 /* No interfaces left. Nuke it. */
907 workqueue_destroy(wg_wq);
908 pktq_destroy(wg_pktq);
909 psref_class_destroy(wg_psref_class);
910
911 return 0;
912 }
913
914 static void
915 wg_init_key_and_hash(uint8_t ckey[WG_CHAINING_KEY_LEN],
916 uint8_t hash[WG_HASH_LEN])
917 {
918 /* [W] 5.4: CONSTRUCTION */
919 const char *signature = "Noise_IKpsk2_25519_ChaChaPoly_BLAKE2s";
920 /* [W] 5.4: IDENTIFIER */
921 const char *id = "WireGuard v1 zx2c4 Jason (at) zx2c4.com";
922 struct blake2s state;
923
924 blake2s(ckey, WG_CHAINING_KEY_LEN, NULL, 0,
925 signature, strlen(signature));
926
927 CTASSERT(WG_HASH_LEN == WG_CHAINING_KEY_LEN);
928 memcpy(hash, ckey, WG_CHAINING_KEY_LEN);
929
930 blake2s_init(&state, WG_HASH_LEN, NULL, 0);
931 blake2s_update(&state, ckey, WG_CHAINING_KEY_LEN);
932 blake2s_update(&state, id, strlen(id));
933 blake2s_final(&state, hash);
934
935 WG_DUMP_HASH("ckey", ckey);
936 WG_DUMP_HASH("hash", hash);
937 }
938
939 static void
940 wg_algo_hash(uint8_t hash[WG_HASH_LEN], const uint8_t input[],
941 const size_t inputsize)
942 {
943 struct blake2s state;
944
945 blake2s_init(&state, WG_HASH_LEN, NULL, 0);
946 blake2s_update(&state, hash, WG_HASH_LEN);
947 blake2s_update(&state, input, inputsize);
948 blake2s_final(&state, hash);
949 }
950
951 static void
952 wg_algo_mac(uint8_t out[], const size_t outsize,
953 const uint8_t key[], const size_t keylen,
954 const uint8_t input1[], const size_t input1len,
955 const uint8_t input2[], const size_t input2len)
956 {
957 struct blake2s state;
958
959 blake2s_init(&state, outsize, key, keylen);
960
961 blake2s_update(&state, input1, input1len);
962 if (input2 != NULL)
963 blake2s_update(&state, input2, input2len);
964 blake2s_final(&state, out);
965 }
966
967 static void
968 wg_algo_mac_mac1(uint8_t out[], const size_t outsize,
969 const uint8_t input1[], const size_t input1len,
970 const uint8_t input2[], const size_t input2len)
971 {
972 struct blake2s state;
973 /* [W] 5.4: LABEL-MAC1 */
974 const char *label = "mac1----";
975 uint8_t key[WG_HASH_LEN];
976
977 blake2s_init(&state, sizeof(key), NULL, 0);
978 blake2s_update(&state, label, strlen(label));
979 blake2s_update(&state, input1, input1len);
980 blake2s_final(&state, key);
981
982 blake2s_init(&state, outsize, key, sizeof(key));
983 if (input2 != NULL)
984 blake2s_update(&state, input2, input2len);
985 blake2s_final(&state, out);
986 }
987
988 static void
989 wg_algo_mac_cookie(uint8_t out[], const size_t outsize,
990 const uint8_t input1[], const size_t input1len)
991 {
992 struct blake2s state;
993 /* [W] 5.4: LABEL-COOKIE */
994 const char *label = "cookie--";
995
996 blake2s_init(&state, outsize, NULL, 0);
997 blake2s_update(&state, label, strlen(label));
998 blake2s_update(&state, input1, input1len);
999 blake2s_final(&state, out);
1000 }
1001
1002 static void
1003 wg_algo_generate_keypair(uint8_t pubkey[WG_EPHEMERAL_KEY_LEN],
1004 uint8_t privkey[WG_EPHEMERAL_KEY_LEN])
1005 {
1006
1007 CTASSERT(WG_EPHEMERAL_KEY_LEN == crypto_scalarmult_curve25519_BYTES);
1008
1009 cprng_strong(kern_cprng, privkey, WG_EPHEMERAL_KEY_LEN, 0);
1010 crypto_scalarmult_base(pubkey, privkey);
1011 }
1012
1013 static void
1014 wg_algo_dh(uint8_t out[WG_DH_OUTPUT_LEN],
1015 const uint8_t privkey[WG_STATIC_KEY_LEN],
1016 const uint8_t pubkey[WG_STATIC_KEY_LEN])
1017 {
1018
1019 CTASSERT(WG_STATIC_KEY_LEN == crypto_scalarmult_curve25519_BYTES);
1020
1021 int ret __diagused = crypto_scalarmult(out, privkey, pubkey);
1022 KASSERT(ret == 0);
1023 }
1024
1025 static void
1026 wg_algo_hmac(uint8_t out[], const size_t outlen,
1027 const uint8_t key[], const size_t keylen,
1028 const uint8_t in[], const size_t inlen)
1029 {
1030 #define IPAD 0x36
1031 #define OPAD 0x5c
1032 uint8_t hmackey[HMAC_BLOCK_LEN] = {0};
1033 uint8_t ipad[HMAC_BLOCK_LEN];
1034 uint8_t opad[HMAC_BLOCK_LEN];
1035 size_t i;
1036 struct blake2s state;
1037
1038 KASSERT(outlen == WG_HASH_LEN);
1039 KASSERT(keylen <= HMAC_BLOCK_LEN);
1040
1041 memcpy(hmackey, key, keylen);
1042
1043 for (i = 0; i < sizeof(hmackey); i++) {
1044 ipad[i] = hmackey[i] ^ IPAD;
1045 opad[i] = hmackey[i] ^ OPAD;
1046 }
1047
1048 blake2s_init(&state, WG_HASH_LEN, NULL, 0);
1049 blake2s_update(&state, ipad, sizeof(ipad));
1050 blake2s_update(&state, in, inlen);
1051 blake2s_final(&state, out);
1052
1053 blake2s_init(&state, WG_HASH_LEN, NULL, 0);
1054 blake2s_update(&state, opad, sizeof(opad));
1055 blake2s_update(&state, out, WG_HASH_LEN);
1056 blake2s_final(&state, out);
1057 #undef IPAD
1058 #undef OPAD
1059 }
1060
1061 static void
1062 wg_algo_kdf(uint8_t out1[WG_KDF_OUTPUT_LEN], uint8_t out2[WG_KDF_OUTPUT_LEN],
1063 uint8_t out3[WG_KDF_OUTPUT_LEN], const uint8_t ckey[WG_CHAINING_KEY_LEN],
1064 const uint8_t input[], const size_t inputlen)
1065 {
1066 uint8_t tmp1[WG_KDF_OUTPUT_LEN], tmp2[WG_KDF_OUTPUT_LEN + 1];
1067 uint8_t one[1];
1068
1069 /*
1070 * [N] 4.3: "an input_key_material byte sequence with length
1071 * either zero bytes, 32 bytes, or DHLEN bytes."
1072 */
1073 KASSERT(inputlen == 0 || inputlen == 32 || inputlen == NOISE_DHLEN);
1074
1075 WG_DUMP_HASH("ckey", ckey);
1076 if (input != NULL)
1077 WG_DUMP_HASH("input", input);
1078 wg_algo_hmac(tmp1, sizeof(tmp1), ckey, WG_CHAINING_KEY_LEN,
1079 input, inputlen);
1080 WG_DUMP_HASH("tmp1", tmp1);
1081 one[0] = 1;
1082 wg_algo_hmac(out1, WG_KDF_OUTPUT_LEN, tmp1, sizeof(tmp1),
1083 one, sizeof(one));
1084 WG_DUMP_HASH("out1", out1);
1085 if (out2 == NULL)
1086 return;
1087 memcpy(tmp2, out1, WG_KDF_OUTPUT_LEN);
1088 tmp2[WG_KDF_OUTPUT_LEN] = 2;
1089 wg_algo_hmac(out2, WG_KDF_OUTPUT_LEN, tmp1, sizeof(tmp1),
1090 tmp2, sizeof(tmp2));
1091 WG_DUMP_HASH("out2", out2);
1092 if (out3 == NULL)
1093 return;
1094 memcpy(tmp2, out2, WG_KDF_OUTPUT_LEN);
1095 tmp2[WG_KDF_OUTPUT_LEN] = 3;
1096 wg_algo_hmac(out3, WG_KDF_OUTPUT_LEN, tmp1, sizeof(tmp1),
1097 tmp2, sizeof(tmp2));
1098 WG_DUMP_HASH("out3", out3);
1099 }
1100
1101 static void __noinline
1102 wg_algo_dh_kdf(uint8_t ckey[WG_CHAINING_KEY_LEN],
1103 uint8_t cipher_key[WG_CIPHER_KEY_LEN],
1104 const uint8_t local_key[WG_STATIC_KEY_LEN],
1105 const uint8_t remote_key[WG_STATIC_KEY_LEN])
1106 {
1107 uint8_t dhout[WG_DH_OUTPUT_LEN];
1108
1109 wg_algo_dh(dhout, local_key, remote_key);
1110 wg_algo_kdf(ckey, cipher_key, NULL, ckey, dhout, sizeof(dhout));
1111
1112 WG_DUMP_HASH("dhout", dhout);
1113 WG_DUMP_HASH("ckey", ckey);
1114 if (cipher_key != NULL)
1115 WG_DUMP_HASH("cipher_key", cipher_key);
1116 }
1117
1118 static void
1119 wg_algo_aead_enc(uint8_t out[], size_t expected_outsize, const uint8_t key[],
1120 const uint64_t counter, const uint8_t plain[], const size_t plainsize,
1121 const uint8_t auth[], size_t authlen)
1122 {
1123 uint8_t nonce[(32 + 64) / 8] = {0};
1124 long long unsigned int outsize;
1125 int error __diagused;
1126
1127 le64enc(&nonce[4], counter);
1128
1129 error = crypto_aead_chacha20poly1305_ietf_encrypt(out, &outsize, plain,
1130 plainsize, auth, authlen, NULL, nonce, key);
1131 KASSERT(error == 0);
1132 KASSERT(outsize == expected_outsize);
1133 }
1134
1135 static int
1136 wg_algo_aead_dec(uint8_t out[], size_t expected_outsize, const uint8_t key[],
1137 const uint64_t counter, const uint8_t encrypted[],
1138 const size_t encryptedsize, const uint8_t auth[], size_t authlen)
1139 {
1140 uint8_t nonce[(32 + 64) / 8] = {0};
1141 long long unsigned int outsize;
1142 int error;
1143
1144 le64enc(&nonce[4], counter);
1145
1146 error = crypto_aead_chacha20poly1305_ietf_decrypt(out, &outsize, NULL,
1147 encrypted, encryptedsize, auth, authlen, nonce, key);
1148 if (error == 0)
1149 KASSERT(outsize == expected_outsize);
1150 return error;
1151 }
1152
1153 static void
1154 wg_algo_xaead_enc(uint8_t out[], const size_t expected_outsize,
1155 const uint8_t key[], const uint8_t plain[], const size_t plainsize,
1156 const uint8_t auth[], size_t authlen,
1157 const uint8_t nonce[WG_SALT_LEN])
1158 {
1159 long long unsigned int outsize;
1160 int error __diagused;
1161
1162 CTASSERT(WG_SALT_LEN == crypto_aead_xchacha20poly1305_ietf_NPUBBYTES);
1163 error = crypto_aead_xchacha20poly1305_ietf_encrypt(out, &outsize,
1164 plain, plainsize, auth, authlen, NULL, nonce, key);
1165 KASSERT(error == 0);
1166 KASSERT(outsize == expected_outsize);
1167 }
1168
1169 static int
1170 wg_algo_xaead_dec(uint8_t out[], const size_t expected_outsize,
1171 const uint8_t key[], const uint8_t encrypted[], const size_t encryptedsize,
1172 const uint8_t auth[], size_t authlen,
1173 const uint8_t nonce[WG_SALT_LEN])
1174 {
1175 long long unsigned int outsize;
1176 int error;
1177
1178 error = crypto_aead_xchacha20poly1305_ietf_decrypt(out, &outsize, NULL,
1179 encrypted, encryptedsize, auth, authlen, nonce, key);
1180 if (error == 0)
1181 KASSERT(outsize == expected_outsize);
1182 return error;
1183 }
1184
1185 static void
1186 wg_algo_tai64n(wg_timestamp_t timestamp)
1187 {
1188 struct timespec ts;
1189
1190 /* FIXME strict TAI64N (https://cr.yp.to/libtai/tai64.html) */
1191 getnanotime(&ts);
1192 /* TAI64 label in external TAI64 format */
1193 be32enc(timestamp, 0x40000000U + (uint32_t)(ts.tv_sec >> 32));
1194 /* second beginning from 1970 TAI */
1195 be32enc(timestamp + 4, (uint32_t)(ts.tv_sec & 0xffffffffU));
1196 /* nanosecond in big-endian format */
1197 be32enc(timestamp + 8, (uint32_t)ts.tv_nsec);
1198 }
1199
1200 /*
1201 * wg_get_stable_session(wgp, psref)
1202 *
1203 * Get a passive reference to the current stable session, or
1204 * return NULL if there is no current stable session.
1205 *
1206 * The pointer is always there but the session is not necessarily
1207 * ESTABLISHED; if it is not ESTABLISHED, return NULL. However,
1208 * the session may transition from ESTABLISHED to DESTROYING while
1209 * holding the passive reference.
1210 */
1211 static struct wg_session *
1212 wg_get_stable_session(struct wg_peer *wgp, struct psref *psref)
1213 {
1214 int s;
1215 struct wg_session *wgs;
1216
1217 s = pserialize_read_enter();
1218 wgs = atomic_load_consume(&wgp->wgp_session_stable);
1219 if (__predict_false(wgs->wgs_state != WGS_STATE_ESTABLISHED))
1220 wgs = NULL;
1221 else
1222 psref_acquire(psref, &wgs->wgs_psref, wg_psref_class);
1223 pserialize_read_exit(s);
1224
1225 return wgs;
1226 }
1227
1228 static void
1229 wg_put_session(struct wg_session *wgs, struct psref *psref)
1230 {
1231
1232 psref_release(psref, &wgs->wgs_psref, wg_psref_class);
1233 }
1234
1235 static void
1236 wg_destroy_session(struct wg_softc *wg, struct wg_session *wgs)
1237 {
1238 struct wg_peer *wgp = wgs->wgs_peer;
1239 struct wg_session *wgs0 __diagused;
1240 void *garbage;
1241
1242 KASSERT(mutex_owned(wgp->wgp_lock));
1243 KASSERT(wgs->wgs_state != WGS_STATE_UNKNOWN);
1244
1245 /* Remove the session from the table. */
1246 wgs0 = thmap_del(wg->wg_sessions_byindex,
1247 &wgs->wgs_local_index, sizeof(wgs->wgs_local_index));
1248 KASSERT(wgs0 == wgs);
1249 garbage = thmap_stage_gc(wg->wg_sessions_byindex);
1250
1251 /* Wait for passive references to drain. */
1252 pserialize_perform(wgp->wgp_psz);
1253 psref_target_destroy(&wgs->wgs_psref, wg_psref_class);
1254
1255 /* Free memory, zero state, and transition to UNKNOWN. */
1256 thmap_gc(wg->wg_sessions_byindex, garbage);
1257 wg_clear_states(wgs);
1258 wgs->wgs_state = WGS_STATE_UNKNOWN;
1259 }
1260
1261 /*
1262 * wg_get_session_index(wg, wgs)
1263 *
1264 * Choose a session index for wgs->wgs_local_index, and store it
1265 * in wg's table of sessions by index.
1266 *
1267 * wgs must be the unstable session of its peer, and must be
1268 * transitioning out of the UNKNOWN state.
1269 */
1270 static void
1271 wg_get_session_index(struct wg_softc *wg, struct wg_session *wgs)
1272 {
1273 struct wg_peer *wgp __diagused = wgs->wgs_peer;
1274 struct wg_session *wgs0;
1275 uint32_t index;
1276
1277 KASSERT(mutex_owned(wgp->wgp_lock));
1278 KASSERT(wgs == wgp->wgp_session_unstable);
1279 KASSERT(wgs->wgs_state == WGS_STATE_UNKNOWN);
1280
1281 do {
1282 /* Pick a uniform random index. */
1283 index = cprng_strong32();
1284
1285 /* Try to take it. */
1286 wgs->wgs_local_index = index;
1287 wgs0 = thmap_put(wg->wg_sessions_byindex,
1288 &wgs->wgs_local_index, sizeof wgs->wgs_local_index, wgs);
1289
1290 /* If someone else beat us, start over. */
1291 } while (__predict_false(wgs0 != wgs));
1292 }
1293
1294 /*
1295 * wg_put_session_index(wg, wgs)
1296 *
1297 * Remove wgs from the table of sessions by index, wait for any
1298 * passive references to drain, and transition the session to the
1299 * UNKNOWN state.
1300 *
1301 * wgs must be the unstable session of its peer, and must not be
1302 * UNKNOWN or ESTABLISHED.
1303 */
1304 static void
1305 wg_put_session_index(struct wg_softc *wg, struct wg_session *wgs)
1306 {
1307 struct wg_peer *wgp __diagused = wgs->wgs_peer;
1308
1309 KASSERT(mutex_owned(wgp->wgp_lock));
1310 KASSERT(wgs == wgp->wgp_session_unstable);
1311 KASSERT(wgs->wgs_state != WGS_STATE_UNKNOWN);
1312 KASSERT(wgs->wgs_state != WGS_STATE_ESTABLISHED);
1313
1314 wg_destroy_session(wg, wgs);
1315 psref_target_init(&wgs->wgs_psref, wg_psref_class);
1316 }
1317
1318 /*
1319 * Handshake patterns
1320 *
1321 * [W] 5: "These messages use the "IK" pattern from Noise"
1322 * [N] 7.5. Interactive handshake patterns (fundamental)
1323 * "The first character refers to the initiators static key:"
1324 * "I = Static key for initiator Immediately transmitted to responder,
1325 * despite reduced or absent identity hiding"
1326 * "The second character refers to the responders static key:"
1327 * "K = Static key for responder Known to initiator"
1328 * "IK:
1329 * <- s
1330 * ...
1331 * -> e, es, s, ss
1332 * <- e, ee, se"
1333 * [N] 9.4. Pattern modifiers
1334 * "IKpsk2:
1335 * <- s
1336 * ...
1337 * -> e, es, s, ss
1338 * <- e, ee, se, psk"
1339 */
1340 static void
1341 wg_fill_msg_init(struct wg_softc *wg, struct wg_peer *wgp,
1342 struct wg_session *wgs, struct wg_msg_init *wgmi)
1343 {
1344 uint8_t ckey[WG_CHAINING_KEY_LEN]; /* [W] 5.4.2: Ci */
1345 uint8_t hash[WG_HASH_LEN]; /* [W] 5.4.2: Hi */
1346 uint8_t cipher_key[WG_CIPHER_KEY_LEN];
1347 uint8_t pubkey[WG_EPHEMERAL_KEY_LEN];
1348 uint8_t privkey[WG_EPHEMERAL_KEY_LEN];
1349
1350 KASSERT(mutex_owned(wgp->wgp_lock));
1351 KASSERT(wgs == wgp->wgp_session_unstable);
1352 KASSERT(wgs->wgs_state == WGS_STATE_INIT_ACTIVE);
1353
1354 wgmi->wgmi_type = htole32(WG_MSG_TYPE_INIT);
1355 wgmi->wgmi_sender = wgs->wgs_local_index;
1356
1357 /* [W] 5.4.2: First Message: Initiator to Responder */
1358
1359 /* Ci := HASH(CONSTRUCTION) */
1360 /* Hi := HASH(Ci || IDENTIFIER) */
1361 wg_init_key_and_hash(ckey, hash);
1362 /* Hi := HASH(Hi || Sr^pub) */
1363 wg_algo_hash(hash, wgp->wgp_pubkey, sizeof(wgp->wgp_pubkey));
1364
1365 WG_DUMP_HASH("hash", hash);
1366
1367 /* [N] 2.2: "e" */
1368 /* Ei^priv, Ei^pub := DH-GENERATE() */
1369 wg_algo_generate_keypair(pubkey, privkey);
1370 /* Ci := KDF1(Ci, Ei^pub) */
1371 wg_algo_kdf(ckey, NULL, NULL, ckey, pubkey, sizeof(pubkey));
1372 /* msg.ephemeral := Ei^pub */
1373 memcpy(wgmi->wgmi_ephemeral, pubkey, sizeof(wgmi->wgmi_ephemeral));
1374 /* Hi := HASH(Hi || msg.ephemeral) */
1375 wg_algo_hash(hash, pubkey, sizeof(pubkey));
1376
1377 WG_DUMP_HASH("ckey", ckey);
1378 WG_DUMP_HASH("hash", hash);
1379
1380 /* [N] 2.2: "es" */
1381 /* Ci, k := KDF2(Ci, DH(Ei^priv, Sr^pub)) */
1382 wg_algo_dh_kdf(ckey, cipher_key, privkey, wgp->wgp_pubkey);
1383
1384 /* [N] 2.2: "s" */
1385 /* msg.static := AEAD(k, 0, Si^pub, Hi) */
1386 wg_algo_aead_enc(wgmi->wgmi_static, sizeof(wgmi->wgmi_static),
1387 cipher_key, 0, wg->wg_pubkey, sizeof(wg->wg_pubkey),
1388 hash, sizeof(hash));
1389 /* Hi := HASH(Hi || msg.static) */
1390 wg_algo_hash(hash, wgmi->wgmi_static, sizeof(wgmi->wgmi_static));
1391
1392 WG_DUMP_HASH48("wgmi_static", wgmi->wgmi_static);
1393
1394 /* [N] 2.2: "ss" */
1395 /* Ci, k := KDF2(Ci, DH(Si^priv, Sr^pub)) */
1396 wg_algo_dh_kdf(ckey, cipher_key, wg->wg_privkey, wgp->wgp_pubkey);
1397
1398 /* msg.timestamp := AEAD(k, TIMESTAMP(), Hi) */
1399 wg_timestamp_t timestamp;
1400 wg_algo_tai64n(timestamp);
1401 wg_algo_aead_enc(wgmi->wgmi_timestamp, sizeof(wgmi->wgmi_timestamp),
1402 cipher_key, 0, timestamp, sizeof(timestamp), hash, sizeof(hash));
1403 /* Hi := HASH(Hi || msg.timestamp) */
1404 wg_algo_hash(hash, wgmi->wgmi_timestamp, sizeof(wgmi->wgmi_timestamp));
1405
1406 /* [W] 5.4.4 Cookie MACs */
1407 wg_algo_mac_mac1(wgmi->wgmi_mac1, sizeof(wgmi->wgmi_mac1),
1408 wgp->wgp_pubkey, sizeof(wgp->wgp_pubkey),
1409 (const uint8_t *)wgmi, offsetof(struct wg_msg_init, wgmi_mac1));
1410 /* Need mac1 to decrypt a cookie from a cookie message */
1411 memcpy(wgp->wgp_last_sent_mac1, wgmi->wgmi_mac1,
1412 sizeof(wgp->wgp_last_sent_mac1));
1413 wgp->wgp_last_sent_mac1_valid = true;
1414
1415 if (wgp->wgp_latest_cookie_time == 0 ||
1416 (time_uptime - wgp->wgp_latest_cookie_time) >= WG_COOKIE_TIME)
1417 memset(wgmi->wgmi_mac2, 0, sizeof(wgmi->wgmi_mac2));
1418 else {
1419 wg_algo_mac(wgmi->wgmi_mac2, sizeof(wgmi->wgmi_mac2),
1420 wgp->wgp_latest_cookie, WG_COOKIE_LEN,
1421 (const uint8_t *)wgmi,
1422 offsetof(struct wg_msg_init, wgmi_mac2),
1423 NULL, 0);
1424 }
1425
1426 memcpy(wgs->wgs_ephemeral_key_pub, pubkey, sizeof(pubkey));
1427 memcpy(wgs->wgs_ephemeral_key_priv, privkey, sizeof(privkey));
1428 memcpy(wgs->wgs_handshake_hash, hash, sizeof(hash));
1429 memcpy(wgs->wgs_chaining_key, ckey, sizeof(ckey));
1430 WG_DLOG("%s: sender=%x\n", __func__, wgs->wgs_local_index);
1431 }
1432
1433 static void __noinline
1434 wg_handle_msg_init(struct wg_softc *wg, const struct wg_msg_init *wgmi,
1435 const struct sockaddr *src)
1436 {
1437 uint8_t ckey[WG_CHAINING_KEY_LEN]; /* [W] 5.4.2: Ci */
1438 uint8_t hash[WG_HASH_LEN]; /* [W] 5.4.2: Hi */
1439 uint8_t cipher_key[WG_CIPHER_KEY_LEN];
1440 uint8_t peer_pubkey[WG_STATIC_KEY_LEN];
1441 struct wg_peer *wgp;
1442 struct wg_session *wgs;
1443 int error, ret;
1444 struct psref psref_peer;
1445 uint8_t mac1[WG_MAC_LEN];
1446
1447 WG_TRACE("init msg received");
1448
1449 wg_algo_mac_mac1(mac1, sizeof(mac1),
1450 wg->wg_pubkey, sizeof(wg->wg_pubkey),
1451 (const uint8_t *)wgmi, offsetof(struct wg_msg_init, wgmi_mac1));
1452
1453 /*
1454 * [W] 5.3: Denial of Service Mitigation & Cookies
1455 * "the responder, ..., must always reject messages with an invalid
1456 * msg.mac1"
1457 */
1458 if (!consttime_memequal(mac1, wgmi->wgmi_mac1, sizeof(mac1))) {
1459 WG_DLOG("mac1 is invalid\n");
1460 return;
1461 }
1462
1463 /*
1464 * [W] 5.4.2: First Message: Initiator to Responder
1465 * "When the responder receives this message, it does the same
1466 * operations so that its final state variables are identical,
1467 * replacing the operands of the DH function to produce equivalent
1468 * values."
1469 * Note that the following comments of operations are just copies of
1470 * the initiator's ones.
1471 */
1472
1473 /* Ci := HASH(CONSTRUCTION) */
1474 /* Hi := HASH(Ci || IDENTIFIER) */
1475 wg_init_key_and_hash(ckey, hash);
1476 /* Hi := HASH(Hi || Sr^pub) */
1477 wg_algo_hash(hash, wg->wg_pubkey, sizeof(wg->wg_pubkey));
1478
1479 /* [N] 2.2: "e" */
1480 /* Ci := KDF1(Ci, Ei^pub) */
1481 wg_algo_kdf(ckey, NULL, NULL, ckey, wgmi->wgmi_ephemeral,
1482 sizeof(wgmi->wgmi_ephemeral));
1483 /* Hi := HASH(Hi || msg.ephemeral) */
1484 wg_algo_hash(hash, wgmi->wgmi_ephemeral, sizeof(wgmi->wgmi_ephemeral));
1485
1486 WG_DUMP_HASH("ckey", ckey);
1487
1488 /* [N] 2.2: "es" */
1489 /* Ci, k := KDF2(Ci, DH(Ei^priv, Sr^pub)) */
1490 wg_algo_dh_kdf(ckey, cipher_key, wg->wg_privkey, wgmi->wgmi_ephemeral);
1491
1492 WG_DUMP_HASH48("wgmi_static", wgmi->wgmi_static);
1493
1494 /* [N] 2.2: "s" */
1495 /* msg.static := AEAD(k, 0, Si^pub, Hi) */
1496 error = wg_algo_aead_dec(peer_pubkey, WG_STATIC_KEY_LEN, cipher_key, 0,
1497 wgmi->wgmi_static, sizeof(wgmi->wgmi_static), hash, sizeof(hash));
1498 if (error != 0) {
1499 WG_LOG_RATECHECK(&wg->wg_ppsratecheck, LOG_DEBUG,
1500 "%s: wg_algo_aead_dec for secret key failed\n",
1501 if_name(&wg->wg_if));
1502 return;
1503 }
1504 /* Hi := HASH(Hi || msg.static) */
1505 wg_algo_hash(hash, wgmi->wgmi_static, sizeof(wgmi->wgmi_static));
1506
1507 wgp = wg_lookup_peer_by_pubkey(wg, peer_pubkey, &psref_peer);
1508 if (wgp == NULL) {
1509 WG_DLOG("peer not found\n");
1510 return;
1511 }
1512
1513 /*
1514 * Lock the peer to serialize access to cookie state.
1515 *
1516 * XXX Can we safely avoid holding the lock across DH? Take it
1517 * just to verify mac2 and then unlock/DH/lock?
1518 */
1519 mutex_enter(wgp->wgp_lock);
1520
1521 if (__predict_false(wg_is_underload(wg, wgp, WG_MSG_TYPE_INIT))) {
1522 WG_TRACE("under load");
1523 /*
1524 * [W] 5.3: Denial of Service Mitigation & Cookies
1525 * "the responder, ..., and when under load may reject messages
1526 * with an invalid msg.mac2. If the responder receives a
1527 * message with a valid msg.mac1 yet with an invalid msg.mac2,
1528 * and is under load, it may respond with a cookie reply
1529 * message"
1530 */
1531 uint8_t zero[WG_MAC_LEN] = {0};
1532 if (consttime_memequal(wgmi->wgmi_mac2, zero, sizeof(zero))) {
1533 WG_TRACE("sending a cookie message: no cookie included");
1534 (void)wg_send_cookie_msg(wg, wgp, wgmi->wgmi_sender,
1535 wgmi->wgmi_mac1, src);
1536 goto out;
1537 }
1538 if (!wgp->wgp_last_sent_cookie_valid) {
1539 WG_TRACE("sending a cookie message: no cookie sent ever");
1540 (void)wg_send_cookie_msg(wg, wgp, wgmi->wgmi_sender,
1541 wgmi->wgmi_mac1, src);
1542 goto out;
1543 }
1544 uint8_t mac2[WG_MAC_LEN];
1545 wg_algo_mac(mac2, sizeof(mac2), wgp->wgp_last_sent_cookie,
1546 WG_COOKIE_LEN, (const uint8_t *)wgmi,
1547 offsetof(struct wg_msg_init, wgmi_mac2), NULL, 0);
1548 if (!consttime_memequal(mac2, wgmi->wgmi_mac2, sizeof(mac2))) {
1549 WG_DLOG("mac2 is invalid\n");
1550 goto out;
1551 }
1552 WG_TRACE("under load, but continue to sending");
1553 }
1554
1555 /* [N] 2.2: "ss" */
1556 /* Ci, k := KDF2(Ci, DH(Si^priv, Sr^pub)) */
1557 wg_algo_dh_kdf(ckey, cipher_key, wg->wg_privkey, wgp->wgp_pubkey);
1558
1559 /* msg.timestamp := AEAD(k, TIMESTAMP(), Hi) */
1560 wg_timestamp_t timestamp;
1561 error = wg_algo_aead_dec(timestamp, sizeof(timestamp), cipher_key, 0,
1562 wgmi->wgmi_timestamp, sizeof(wgmi->wgmi_timestamp),
1563 hash, sizeof(hash));
1564 if (error != 0) {
1565 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
1566 "%s: peer %s: wg_algo_aead_dec for timestamp failed\n",
1567 if_name(&wg->wg_if), wgp->wgp_name);
1568 goto out;
1569 }
1570 /* Hi := HASH(Hi || msg.timestamp) */
1571 wg_algo_hash(hash, wgmi->wgmi_timestamp, sizeof(wgmi->wgmi_timestamp));
1572
1573 /*
1574 * [W] 5.1 "The responder keeps track of the greatest timestamp
1575 * received per peer and discards packets containing
1576 * timestamps less than or equal to it."
1577 */
1578 ret = memcmp(timestamp, wgp->wgp_timestamp_latest_init,
1579 sizeof(timestamp));
1580 if (ret <= 0) {
1581 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
1582 "%s: peer %s: invalid init msg: timestamp is old\n",
1583 if_name(&wg->wg_if), wgp->wgp_name);
1584 goto out;
1585 }
1586 memcpy(wgp->wgp_timestamp_latest_init, timestamp, sizeof(timestamp));
1587
1588 /*
1589 * Message is good -- we're committing to handle it now, unless
1590 * we were already initiating a session.
1591 */
1592 wgs = wgp->wgp_session_unstable;
1593 switch (wgs->wgs_state) {
1594 case WGS_STATE_UNKNOWN: /* new session initiated by peer */
1595 wg_get_session_index(wg, wgs);
1596 break;
1597 case WGS_STATE_INIT_ACTIVE: /* we're already initiating, drop */
1598 WG_TRACE("Session already initializing, ignoring the message");
1599 goto out;
1600 case WGS_STATE_INIT_PASSIVE: /* peer is retrying, start over */
1601 WG_TRACE("Session already initializing, destroying old states");
1602 wg_clear_states(wgs);
1603 /* keep session index */
1604 break;
1605 case WGS_STATE_ESTABLISHED: /* can't happen */
1606 panic("unstable session can't be established");
1607 break;
1608 case WGS_STATE_DESTROYING: /* rekey initiated by peer */
1609 WG_TRACE("Session destroying, but force to clear");
1610 callout_stop(&wgp->wgp_session_dtor_timer);
1611 wg_clear_states(wgs);
1612 /* keep session index */
1613 break;
1614 default:
1615 panic("invalid session state: %d", wgs->wgs_state);
1616 }
1617 wgs->wgs_state = WGS_STATE_INIT_PASSIVE;
1618
1619 memcpy(wgs->wgs_handshake_hash, hash, sizeof(hash));
1620 memcpy(wgs->wgs_chaining_key, ckey, sizeof(ckey));
1621 memcpy(wgs->wgs_ephemeral_key_peer, wgmi->wgmi_ephemeral,
1622 sizeof(wgmi->wgmi_ephemeral));
1623
1624 wg_update_endpoint_if_necessary(wgp, src);
1625
1626 (void)wg_send_handshake_msg_resp(wg, wgp, wgs, wgmi);
1627
1628 wg_calculate_keys(wgs, false);
1629 wg_clear_states(wgs);
1630
1631 out:
1632 mutex_exit(wgp->wgp_lock);
1633 wg_put_peer(wgp, &psref_peer);
1634 }
1635
1636 static struct socket *
1637 wg_get_so_by_af(struct wg_softc *wg, const int af)
1638 {
1639
1640 switch (af) {
1641 #ifdef INET
1642 case AF_INET:
1643 return wg->wg_so4;
1644 #endif
1645 #ifdef INET6
1646 case AF_INET6:
1647 return wg->wg_so6;
1648 #endif
1649 default:
1650 panic("wg: no such af: %d", af);
1651 }
1652 }
1653
1654 static struct socket *
1655 wg_get_so_by_peer(struct wg_peer *wgp, struct wg_sockaddr *wgsa)
1656 {
1657
1658 return wg_get_so_by_af(wgp->wgp_sc, wgsa_family(wgsa));
1659 }
1660
1661 static struct wg_sockaddr *
1662 wg_get_endpoint_sa(struct wg_peer *wgp, struct psref *psref)
1663 {
1664 struct wg_sockaddr *wgsa;
1665 int s;
1666
1667 s = pserialize_read_enter();
1668 wgsa = atomic_load_consume(&wgp->wgp_endpoint);
1669 psref_acquire(psref, &wgsa->wgsa_psref, wg_psref_class);
1670 pserialize_read_exit(s);
1671
1672 return wgsa;
1673 }
1674
1675 static void
1676 wg_put_sa(struct wg_peer *wgp, struct wg_sockaddr *wgsa, struct psref *psref)
1677 {
1678
1679 psref_release(psref, &wgsa->wgsa_psref, wg_psref_class);
1680 }
1681
1682 static int
1683 wg_send_so(struct wg_peer *wgp, struct mbuf *m)
1684 {
1685 int error;
1686 struct socket *so;
1687 struct psref psref;
1688 struct wg_sockaddr *wgsa;
1689
1690 wgsa = wg_get_endpoint_sa(wgp, &psref);
1691 so = wg_get_so_by_peer(wgp, wgsa);
1692 error = sosend(so, wgsatosa(wgsa), NULL, m, NULL, 0, curlwp);
1693 wg_put_sa(wgp, wgsa, &psref);
1694
1695 return error;
1696 }
1697
1698 static int
1699 wg_send_handshake_msg_init(struct wg_softc *wg, struct wg_peer *wgp)
1700 {
1701 int error;
1702 struct mbuf *m;
1703 struct wg_msg_init *wgmi;
1704 struct wg_session *wgs;
1705
1706 KASSERT(mutex_owned(wgp->wgp_lock));
1707
1708 wgs = wgp->wgp_session_unstable;
1709 /* XXX pull dispatch out into wg_task_send_init_message */
1710 switch (wgs->wgs_state) {
1711 case WGS_STATE_UNKNOWN: /* new session initiated by us */
1712 wg_get_session_index(wg, wgs);
1713 break;
1714 case WGS_STATE_INIT_ACTIVE: /* we're already initiating, stop */
1715 WG_TRACE("Session already initializing, skip starting new one");
1716 return EBUSY;
1717 case WGS_STATE_INIT_PASSIVE: /* peer was trying -- XXX what now? */
1718 WG_TRACE("Session already initializing, destroying old states");
1719 wg_clear_states(wgs);
1720 /* keep session index */
1721 break;
1722 case WGS_STATE_ESTABLISHED: /* can't happen */
1723 panic("unstable session can't be established");
1724 break;
1725 case WGS_STATE_DESTROYING: /* rekey initiated by us too early */
1726 WG_TRACE("Session destroying");
1727 /* XXX should wait? */
1728 return EBUSY;
1729 }
1730 wgs->wgs_state = WGS_STATE_INIT_ACTIVE;
1731
1732 m = m_gethdr(M_WAIT, MT_DATA);
1733 if (sizeof(*wgmi) > MHLEN) {
1734 m_clget(m, M_WAIT);
1735 CTASSERT(sizeof(*wgmi) <= MCLBYTES);
1736 }
1737 m->m_pkthdr.len = m->m_len = sizeof(*wgmi);
1738 wgmi = mtod(m, struct wg_msg_init *);
1739 wg_fill_msg_init(wg, wgp, wgs, wgmi);
1740
1741 error = wg->wg_ops->send_hs_msg(wgp, m);
1742 if (error == 0) {
1743 WG_TRACE("init msg sent");
1744
1745 if (wgp->wgp_handshake_start_time == 0)
1746 wgp->wgp_handshake_start_time = time_uptime;
1747 callout_schedule(&wgp->wgp_handshake_timeout_timer,
1748 MIN(wg_rekey_timeout, (unsigned)(INT_MAX / hz)) * hz);
1749 } else {
1750 wg_put_session_index(wg, wgs);
1751 /* Initiation failed; toss packet waiting for it if any. */
1752 m = atomic_swap_ptr(&wgp->wgp_pending, NULL);
1753 m_freem(m);
1754 }
1755
1756 return error;
1757 }
1758
1759 static void
1760 wg_fill_msg_resp(struct wg_softc *wg, struct wg_peer *wgp,
1761 struct wg_session *wgs, struct wg_msg_resp *wgmr,
1762 const struct wg_msg_init *wgmi)
1763 {
1764 uint8_t ckey[WG_CHAINING_KEY_LEN]; /* [W] 5.4.3: Cr */
1765 uint8_t hash[WG_HASH_LEN]; /* [W] 5.4.3: Hr */
1766 uint8_t cipher_key[WG_KDF_OUTPUT_LEN];
1767 uint8_t pubkey[WG_EPHEMERAL_KEY_LEN];
1768 uint8_t privkey[WG_EPHEMERAL_KEY_LEN];
1769
1770 KASSERT(mutex_owned(wgp->wgp_lock));
1771 KASSERT(wgs == wgp->wgp_session_unstable);
1772 KASSERT(wgs->wgs_state == WGS_STATE_INIT_PASSIVE);
1773
1774 memcpy(hash, wgs->wgs_handshake_hash, sizeof(hash));
1775 memcpy(ckey, wgs->wgs_chaining_key, sizeof(ckey));
1776
1777 wgmr->wgmr_type = htole32(WG_MSG_TYPE_RESP);
1778 wgmr->wgmr_sender = wgs->wgs_local_index;
1779 wgmr->wgmr_receiver = wgmi->wgmi_sender;
1780
1781 /* [W] 5.4.3 Second Message: Responder to Initiator */
1782
1783 /* [N] 2.2: "e" */
1784 /* Er^priv, Er^pub := DH-GENERATE() */
1785 wg_algo_generate_keypair(pubkey, privkey);
1786 /* Cr := KDF1(Cr, Er^pub) */
1787 wg_algo_kdf(ckey, NULL, NULL, ckey, pubkey, sizeof(pubkey));
1788 /* msg.ephemeral := Er^pub */
1789 memcpy(wgmr->wgmr_ephemeral, pubkey, sizeof(wgmr->wgmr_ephemeral));
1790 /* Hr := HASH(Hr || msg.ephemeral) */
1791 wg_algo_hash(hash, pubkey, sizeof(pubkey));
1792
1793 WG_DUMP_HASH("ckey", ckey);
1794 WG_DUMP_HASH("hash", hash);
1795
1796 /* [N] 2.2: "ee" */
1797 /* Cr := KDF1(Cr, DH(Er^priv, Ei^pub)) */
1798 wg_algo_dh_kdf(ckey, NULL, privkey, wgs->wgs_ephemeral_key_peer);
1799
1800 /* [N] 2.2: "se" */
1801 /* Cr := KDF1(Cr, DH(Er^priv, Si^pub)) */
1802 wg_algo_dh_kdf(ckey, NULL, privkey, wgp->wgp_pubkey);
1803
1804 /* [N] 9.2: "psk" */
1805 {
1806 uint8_t kdfout[WG_KDF_OUTPUT_LEN];
1807 /* Cr, r, k := KDF3(Cr, Q) */
1808 wg_algo_kdf(ckey, kdfout, cipher_key, ckey, wgp->wgp_psk,
1809 sizeof(wgp->wgp_psk));
1810 /* Hr := HASH(Hr || r) */
1811 wg_algo_hash(hash, kdfout, sizeof(kdfout));
1812 }
1813
1814 /* msg.empty := AEAD(k, 0, e, Hr) */
1815 wg_algo_aead_enc(wgmr->wgmr_empty, sizeof(wgmr->wgmr_empty),
1816 cipher_key, 0, NULL, 0, hash, sizeof(hash));
1817 /* Hr := HASH(Hr || msg.empty) */
1818 wg_algo_hash(hash, wgmr->wgmr_empty, sizeof(wgmr->wgmr_empty));
1819
1820 WG_DUMP_HASH("wgmr_empty", wgmr->wgmr_empty);
1821
1822 /* [W] 5.4.4: Cookie MACs */
1823 /* msg.mac1 := MAC(HASH(LABEL-MAC1 || Sm'^pub), msg_a) */
1824 wg_algo_mac_mac1(wgmr->wgmr_mac1, sizeof(wgmi->wgmi_mac1),
1825 wgp->wgp_pubkey, sizeof(wgp->wgp_pubkey),
1826 (const uint8_t *)wgmr, offsetof(struct wg_msg_resp, wgmr_mac1));
1827 /* Need mac1 to decrypt a cookie from a cookie message */
1828 memcpy(wgp->wgp_last_sent_mac1, wgmr->wgmr_mac1,
1829 sizeof(wgp->wgp_last_sent_mac1));
1830 wgp->wgp_last_sent_mac1_valid = true;
1831
1832 if (wgp->wgp_latest_cookie_time == 0 ||
1833 (time_uptime - wgp->wgp_latest_cookie_time) >= WG_COOKIE_TIME)
1834 /* msg.mac2 := 0^16 */
1835 memset(wgmr->wgmr_mac2, 0, sizeof(wgmr->wgmr_mac2));
1836 else {
1837 /* msg.mac2 := MAC(Lm, msg_b) */
1838 wg_algo_mac(wgmr->wgmr_mac2, sizeof(wgmi->wgmi_mac2),
1839 wgp->wgp_latest_cookie, WG_COOKIE_LEN,
1840 (const uint8_t *)wgmr,
1841 offsetof(struct wg_msg_resp, wgmr_mac2),
1842 NULL, 0);
1843 }
1844
1845 memcpy(wgs->wgs_handshake_hash, hash, sizeof(hash));
1846 memcpy(wgs->wgs_chaining_key, ckey, sizeof(ckey));
1847 memcpy(wgs->wgs_ephemeral_key_pub, pubkey, sizeof(pubkey));
1848 memcpy(wgs->wgs_ephemeral_key_priv, privkey, sizeof(privkey));
1849 wgs->wgs_remote_index = wgmi->wgmi_sender;
1850 WG_DLOG("sender=%x\n", wgs->wgs_local_index);
1851 WG_DLOG("receiver=%x\n", wgs->wgs_remote_index);
1852 }
1853
1854 static void
1855 wg_swap_sessions(struct wg_peer *wgp)
1856 {
1857 struct wg_session *wgs, *wgs_prev;
1858
1859 KASSERT(mutex_owned(wgp->wgp_lock));
1860
1861 wgs = wgp->wgp_session_unstable;
1862 KASSERT(wgs->wgs_state == WGS_STATE_ESTABLISHED);
1863
1864 wgs_prev = wgp->wgp_session_stable;
1865 KASSERT(wgs_prev->wgs_state == WGS_STATE_ESTABLISHED ||
1866 wgs_prev->wgs_state == WGS_STATE_UNKNOWN);
1867 atomic_store_release(&wgp->wgp_session_stable, wgs);
1868 wgp->wgp_session_unstable = wgs_prev;
1869 }
1870
1871 static void __noinline
1872 wg_handle_msg_resp(struct wg_softc *wg, const struct wg_msg_resp *wgmr,
1873 const struct sockaddr *src)
1874 {
1875 uint8_t ckey[WG_CHAINING_KEY_LEN]; /* [W] 5.4.3: Cr */
1876 uint8_t hash[WG_HASH_LEN]; /* [W] 5.4.3: Kr */
1877 uint8_t cipher_key[WG_KDF_OUTPUT_LEN];
1878 struct wg_peer *wgp;
1879 struct wg_session *wgs;
1880 struct psref psref;
1881 int error;
1882 uint8_t mac1[WG_MAC_LEN];
1883 struct wg_session *wgs_prev;
1884 struct mbuf *m;
1885
1886 wg_algo_mac_mac1(mac1, sizeof(mac1),
1887 wg->wg_pubkey, sizeof(wg->wg_pubkey),
1888 (const uint8_t *)wgmr, offsetof(struct wg_msg_resp, wgmr_mac1));
1889
1890 /*
1891 * [W] 5.3: Denial of Service Mitigation & Cookies
1892 * "the responder, ..., must always reject messages with an invalid
1893 * msg.mac1"
1894 */
1895 if (!consttime_memequal(mac1, wgmr->wgmr_mac1, sizeof(mac1))) {
1896 WG_DLOG("mac1 is invalid\n");
1897 return;
1898 }
1899
1900 WG_TRACE("resp msg received");
1901 wgs = wg_lookup_session_by_index(wg, wgmr->wgmr_receiver, &psref);
1902 if (wgs == NULL) {
1903 WG_TRACE("No session found");
1904 return;
1905 }
1906
1907 wgp = wgs->wgs_peer;
1908
1909 mutex_enter(wgp->wgp_lock);
1910
1911 /* If we weren't waiting for a handshake response, drop it. */
1912 if (wgs->wgs_state != WGS_STATE_INIT_ACTIVE) {
1913 WG_TRACE("peer sent spurious handshake response, ignoring");
1914 goto out;
1915 }
1916
1917 if (__predict_false(wg_is_underload(wg, wgp, WG_MSG_TYPE_RESP))) {
1918 WG_TRACE("under load");
1919 /*
1920 * [W] 5.3: Denial of Service Mitigation & Cookies
1921 * "the responder, ..., and when under load may reject messages
1922 * with an invalid msg.mac2. If the responder receives a
1923 * message with a valid msg.mac1 yet with an invalid msg.mac2,
1924 * and is under load, it may respond with a cookie reply
1925 * message"
1926 */
1927 uint8_t zero[WG_MAC_LEN] = {0};
1928 if (consttime_memequal(wgmr->wgmr_mac2, zero, sizeof(zero))) {
1929 WG_TRACE("sending a cookie message: no cookie included");
1930 (void)wg_send_cookie_msg(wg, wgp, wgmr->wgmr_sender,
1931 wgmr->wgmr_mac1, src);
1932 goto out;
1933 }
1934 if (!wgp->wgp_last_sent_cookie_valid) {
1935 WG_TRACE("sending a cookie message: no cookie sent ever");
1936 (void)wg_send_cookie_msg(wg, wgp, wgmr->wgmr_sender,
1937 wgmr->wgmr_mac1, src);
1938 goto out;
1939 }
1940 uint8_t mac2[WG_MAC_LEN];
1941 wg_algo_mac(mac2, sizeof(mac2), wgp->wgp_last_sent_cookie,
1942 WG_COOKIE_LEN, (const uint8_t *)wgmr,
1943 offsetof(struct wg_msg_resp, wgmr_mac2), NULL, 0);
1944 if (!consttime_memequal(mac2, wgmr->wgmr_mac2, sizeof(mac2))) {
1945 WG_DLOG("mac2 is invalid\n");
1946 goto out;
1947 }
1948 WG_TRACE("under load, but continue to sending");
1949 }
1950
1951 memcpy(hash, wgs->wgs_handshake_hash, sizeof(hash));
1952 memcpy(ckey, wgs->wgs_chaining_key, sizeof(ckey));
1953
1954 /*
1955 * [W] 5.4.3 Second Message: Responder to Initiator
1956 * "When the initiator receives this message, it does the same
1957 * operations so that its final state variables are identical,
1958 * replacing the operands of the DH function to produce equivalent
1959 * values."
1960 * Note that the following comments of operations are just copies of
1961 * the initiator's ones.
1962 */
1963
1964 /* [N] 2.2: "e" */
1965 /* Cr := KDF1(Cr, Er^pub) */
1966 wg_algo_kdf(ckey, NULL, NULL, ckey, wgmr->wgmr_ephemeral,
1967 sizeof(wgmr->wgmr_ephemeral));
1968 /* Hr := HASH(Hr || msg.ephemeral) */
1969 wg_algo_hash(hash, wgmr->wgmr_ephemeral, sizeof(wgmr->wgmr_ephemeral));
1970
1971 WG_DUMP_HASH("ckey", ckey);
1972 WG_DUMP_HASH("hash", hash);
1973
1974 /* [N] 2.2: "ee" */
1975 /* Cr := KDF1(Cr, DH(Er^priv, Ei^pub)) */
1976 wg_algo_dh_kdf(ckey, NULL, wgs->wgs_ephemeral_key_priv,
1977 wgmr->wgmr_ephemeral);
1978
1979 /* [N] 2.2: "se" */
1980 /* Cr := KDF1(Cr, DH(Er^priv, Si^pub)) */
1981 wg_algo_dh_kdf(ckey, NULL, wg->wg_privkey, wgmr->wgmr_ephemeral);
1982
1983 /* [N] 9.2: "psk" */
1984 {
1985 uint8_t kdfout[WG_KDF_OUTPUT_LEN];
1986 /* Cr, r, k := KDF3(Cr, Q) */
1987 wg_algo_kdf(ckey, kdfout, cipher_key, ckey, wgp->wgp_psk,
1988 sizeof(wgp->wgp_psk));
1989 /* Hr := HASH(Hr || r) */
1990 wg_algo_hash(hash, kdfout, sizeof(kdfout));
1991 }
1992
1993 {
1994 uint8_t out[sizeof(wgmr->wgmr_empty)]; /* for safety */
1995 /* msg.empty := AEAD(k, 0, e, Hr) */
1996 error = wg_algo_aead_dec(out, 0, cipher_key, 0, wgmr->wgmr_empty,
1997 sizeof(wgmr->wgmr_empty), hash, sizeof(hash));
1998 WG_DUMP_HASH("wgmr_empty", wgmr->wgmr_empty);
1999 if (error != 0) {
2000 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
2001 "%s: peer %s: wg_algo_aead_dec for empty message failed\n",
2002 if_name(&wg->wg_if), wgp->wgp_name);
2003 goto out;
2004 }
2005 /* Hr := HASH(Hr || msg.empty) */
2006 wg_algo_hash(hash, wgmr->wgmr_empty, sizeof(wgmr->wgmr_empty));
2007 }
2008
2009 memcpy(wgs->wgs_handshake_hash, hash, sizeof(wgs->wgs_handshake_hash));
2010 memcpy(wgs->wgs_chaining_key, ckey, sizeof(wgs->wgs_chaining_key));
2011 wgs->wgs_remote_index = wgmr->wgmr_sender;
2012 WG_DLOG("receiver=%x\n", wgs->wgs_remote_index);
2013
2014 KASSERT(wgs->wgs_state == WGS_STATE_INIT_ACTIVE);
2015 wgs->wgs_state = WGS_STATE_ESTABLISHED;
2016 wgs->wgs_time_established = time_uptime;
2017 wgs->wgs_time_last_data_sent = 0;
2018 wgs->wgs_is_initiator = true;
2019 wg_calculate_keys(wgs, true);
2020 wg_clear_states(wgs);
2021 WG_TRACE("WGS_STATE_ESTABLISHED");
2022
2023 callout_stop(&wgp->wgp_handshake_timeout_timer);
2024
2025 wg_swap_sessions(wgp);
2026 KASSERT(wgs == wgp->wgp_session_stable);
2027 wgs_prev = wgp->wgp_session_unstable;
2028 getnanotime(&wgp->wgp_last_handshake_time);
2029 wgp->wgp_handshake_start_time = 0;
2030 wgp->wgp_last_sent_mac1_valid = false;
2031 wgp->wgp_last_sent_cookie_valid = false;
2032
2033 wg_schedule_rekey_timer(wgp);
2034
2035 wg_update_endpoint_if_necessary(wgp, src);
2036
2037 /*
2038 * If we had a data packet queued up, send it; otherwise send a
2039 * keepalive message -- either way we have to send something
2040 * immediately or else the responder will never answer.
2041 */
2042 if ((m = atomic_swap_ptr(&wgp->wgp_pending, NULL)) != NULL) {
2043 kpreempt_disable();
2044 const uint32_t h = curcpu()->ci_index; // pktq_rps_hash(m)
2045 M_SETCTX(m, wgp);
2046 if (__predict_false(!pktq_enqueue(wg_pktq, m, h))) {
2047 WGLOG(LOG_ERR, "%s: pktq full, dropping\n",
2048 if_name(&wg->wg_if));
2049 m_freem(m);
2050 }
2051 kpreempt_enable();
2052 } else {
2053 wg_send_keepalive_msg(wgp, wgs);
2054 }
2055
2056 if (wgs_prev->wgs_state == WGS_STATE_ESTABLISHED) {
2057 /* Wait for wg_get_stable_session to drain. */
2058 pserialize_perform(wgp->wgp_psz);
2059
2060 /* Transition ESTABLISHED->DESTROYING. */
2061 wgs_prev->wgs_state = WGS_STATE_DESTROYING;
2062
2063 /* We can't destroy the old session immediately */
2064 wg_schedule_session_dtor_timer(wgp);
2065 } else {
2066 KASSERTMSG(wgs_prev->wgs_state == WGS_STATE_UNKNOWN,
2067 "state=%d", wgs_prev->wgs_state);
2068 }
2069
2070 out:
2071 mutex_exit(wgp->wgp_lock);
2072 wg_put_session(wgs, &psref);
2073 }
2074
2075 static int
2076 wg_send_handshake_msg_resp(struct wg_softc *wg, struct wg_peer *wgp,
2077 struct wg_session *wgs, const struct wg_msg_init *wgmi)
2078 {
2079 int error;
2080 struct mbuf *m;
2081 struct wg_msg_resp *wgmr;
2082
2083 KASSERT(mutex_owned(wgp->wgp_lock));
2084 KASSERT(wgs == wgp->wgp_session_unstable);
2085 KASSERT(wgs->wgs_state == WGS_STATE_INIT_PASSIVE);
2086
2087 m = m_gethdr(M_WAIT, MT_DATA);
2088 if (sizeof(*wgmr) > MHLEN) {
2089 m_clget(m, M_WAIT);
2090 CTASSERT(sizeof(*wgmr) <= MCLBYTES);
2091 }
2092 m->m_pkthdr.len = m->m_len = sizeof(*wgmr);
2093 wgmr = mtod(m, struct wg_msg_resp *);
2094 wg_fill_msg_resp(wg, wgp, wgs, wgmr, wgmi);
2095
2096 error = wg->wg_ops->send_hs_msg(wgp, m);
2097 if (error == 0)
2098 WG_TRACE("resp msg sent");
2099 return error;
2100 }
2101
2102 static struct wg_peer *
2103 wg_lookup_peer_by_pubkey(struct wg_softc *wg,
2104 const uint8_t pubkey[WG_STATIC_KEY_LEN], struct psref *psref)
2105 {
2106 struct wg_peer *wgp;
2107
2108 int s = pserialize_read_enter();
2109 wgp = thmap_get(wg->wg_peers_bypubkey, pubkey, WG_STATIC_KEY_LEN);
2110 if (wgp != NULL)
2111 wg_get_peer(wgp, psref);
2112 pserialize_read_exit(s);
2113
2114 return wgp;
2115 }
2116
2117 static void
2118 wg_fill_msg_cookie(struct wg_softc *wg, struct wg_peer *wgp,
2119 struct wg_msg_cookie *wgmc, const uint32_t sender,
2120 const uint8_t mac1[WG_MAC_LEN], const struct sockaddr *src)
2121 {
2122 uint8_t cookie[WG_COOKIE_LEN];
2123 uint8_t key[WG_HASH_LEN];
2124 uint8_t addr[sizeof(struct in6_addr)];
2125 size_t addrlen;
2126 uint16_t uh_sport; /* be */
2127
2128 KASSERT(mutex_owned(wgp->wgp_lock));
2129
2130 wgmc->wgmc_type = htole32(WG_MSG_TYPE_COOKIE);
2131 wgmc->wgmc_receiver = sender;
2132 cprng_fast(wgmc->wgmc_salt, sizeof(wgmc->wgmc_salt));
2133
2134 /*
2135 * [W] 5.4.7: Under Load: Cookie Reply Message
2136 * "The secret variable, Rm, changes every two minutes to a
2137 * random value"
2138 */
2139 if ((time_uptime - wgp->wgp_last_genrandval_time) > WG_RANDVAL_TIME) {
2140 wgp->wgp_randval = cprng_strong32();
2141 wgp->wgp_last_genrandval_time = time_uptime;
2142 }
2143
2144 switch (src->sa_family) {
2145 case AF_INET: {
2146 const struct sockaddr_in *sin = satocsin(src);
2147 addrlen = sizeof(sin->sin_addr);
2148 memcpy(addr, &sin->sin_addr, addrlen);
2149 uh_sport = sin->sin_port;
2150 break;
2151 }
2152 #ifdef INET6
2153 case AF_INET6: {
2154 const struct sockaddr_in6 *sin6 = satocsin6(src);
2155 addrlen = sizeof(sin6->sin6_addr);
2156 memcpy(addr, &sin6->sin6_addr, addrlen);
2157 uh_sport = sin6->sin6_port;
2158 break;
2159 }
2160 #endif
2161 default:
2162 panic("invalid af=%d", src->sa_family);
2163 }
2164
2165 wg_algo_mac(cookie, sizeof(cookie),
2166 (const uint8_t *)&wgp->wgp_randval, sizeof(wgp->wgp_randval),
2167 addr, addrlen, (const uint8_t *)&uh_sport, sizeof(uh_sport));
2168 wg_algo_mac_cookie(key, sizeof(key), wg->wg_pubkey,
2169 sizeof(wg->wg_pubkey));
2170 wg_algo_xaead_enc(wgmc->wgmc_cookie, sizeof(wgmc->wgmc_cookie), key,
2171 cookie, sizeof(cookie), mac1, WG_MAC_LEN, wgmc->wgmc_salt);
2172
2173 /* Need to store to calculate mac2 */
2174 memcpy(wgp->wgp_last_sent_cookie, cookie, sizeof(cookie));
2175 wgp->wgp_last_sent_cookie_valid = true;
2176 }
2177
2178 static int
2179 wg_send_cookie_msg(struct wg_softc *wg, struct wg_peer *wgp,
2180 const uint32_t sender, const uint8_t mac1[WG_MAC_LEN],
2181 const struct sockaddr *src)
2182 {
2183 int error;
2184 struct mbuf *m;
2185 struct wg_msg_cookie *wgmc;
2186
2187 KASSERT(mutex_owned(wgp->wgp_lock));
2188
2189 m = m_gethdr(M_WAIT, MT_DATA);
2190 if (sizeof(*wgmc) > MHLEN) {
2191 m_clget(m, M_WAIT);
2192 CTASSERT(sizeof(*wgmc) <= MCLBYTES);
2193 }
2194 m->m_pkthdr.len = m->m_len = sizeof(*wgmc);
2195 wgmc = mtod(m, struct wg_msg_cookie *);
2196 wg_fill_msg_cookie(wg, wgp, wgmc, sender, mac1, src);
2197
2198 error = wg->wg_ops->send_hs_msg(wgp, m);
2199 if (error == 0)
2200 WG_TRACE("cookie msg sent");
2201 return error;
2202 }
2203
2204 static bool
2205 wg_is_underload(struct wg_softc *wg, struct wg_peer *wgp, int msgtype)
2206 {
2207 #ifdef WG_DEBUG_PARAMS
2208 if (wg_force_underload)
2209 return true;
2210 #endif
2211
2212 /*
2213 * XXX we don't have a means of a load estimation. The purpose of
2214 * the mechanism is a DoS mitigation, so we consider frequent handshake
2215 * messages as (a kind of) load; if a message of the same type comes
2216 * to a peer within 1 second, we consider we are under load.
2217 */
2218 time_t last = wgp->wgp_last_msg_received_time[msgtype];
2219 wgp->wgp_last_msg_received_time[msgtype] = time_uptime;
2220 return (time_uptime - last) == 0;
2221 }
2222
2223 static void
2224 wg_calculate_keys(struct wg_session *wgs, const bool initiator)
2225 {
2226
2227 KASSERT(mutex_owned(wgs->wgs_peer->wgp_lock));
2228
2229 /*
2230 * [W] 5.4.5: Ti^send = Tr^recv, Ti^recv = Tr^send := KDF2(Ci = Cr, e)
2231 */
2232 if (initiator) {
2233 wg_algo_kdf(wgs->wgs_tkey_send, wgs->wgs_tkey_recv, NULL,
2234 wgs->wgs_chaining_key, NULL, 0);
2235 } else {
2236 wg_algo_kdf(wgs->wgs_tkey_recv, wgs->wgs_tkey_send, NULL,
2237 wgs->wgs_chaining_key, NULL, 0);
2238 }
2239 WG_DUMP_HASH("wgs_tkey_send", wgs->wgs_tkey_send);
2240 WG_DUMP_HASH("wgs_tkey_recv", wgs->wgs_tkey_recv);
2241 }
2242
2243 static uint64_t
2244 wg_session_get_send_counter(struct wg_session *wgs)
2245 {
2246 #ifdef __HAVE_ATOMIC64_LOADSTORE
2247 return atomic_load_relaxed(&wgs->wgs_send_counter);
2248 #else
2249 uint64_t send_counter;
2250
2251 mutex_enter(&wgs->wgs_send_counter_lock);
2252 send_counter = wgs->wgs_send_counter;
2253 mutex_exit(&wgs->wgs_send_counter_lock);
2254
2255 return send_counter;
2256 #endif
2257 }
2258
2259 static uint64_t
2260 wg_session_inc_send_counter(struct wg_session *wgs)
2261 {
2262 #ifdef __HAVE_ATOMIC64_LOADSTORE
2263 return atomic_inc_64_nv(&wgs->wgs_send_counter) - 1;
2264 #else
2265 uint64_t send_counter;
2266
2267 mutex_enter(&wgs->wgs_send_counter_lock);
2268 send_counter = wgs->wgs_send_counter++;
2269 mutex_exit(&wgs->wgs_send_counter_lock);
2270
2271 return send_counter;
2272 #endif
2273 }
2274
2275 static void
2276 wg_clear_states(struct wg_session *wgs)
2277 {
2278
2279 KASSERT(mutex_owned(wgs->wgs_peer->wgp_lock));
2280
2281 wgs->wgs_send_counter = 0;
2282 sliwin_reset(&wgs->wgs_recvwin->window);
2283
2284 #define wgs_clear(v) explicit_memset(wgs->wgs_##v, 0, sizeof(wgs->wgs_##v))
2285 wgs_clear(handshake_hash);
2286 wgs_clear(chaining_key);
2287 wgs_clear(ephemeral_key_pub);
2288 wgs_clear(ephemeral_key_priv);
2289 wgs_clear(ephemeral_key_peer);
2290 #undef wgs_clear
2291 }
2292
2293 static struct wg_session *
2294 wg_lookup_session_by_index(struct wg_softc *wg, const uint32_t index,
2295 struct psref *psref)
2296 {
2297 struct wg_session *wgs;
2298
2299 int s = pserialize_read_enter();
2300 wgs = thmap_get(wg->wg_sessions_byindex, &index, sizeof index);
2301 if (wgs != NULL) {
2302 KASSERT(atomic_load_relaxed(&wgs->wgs_state) !=
2303 WGS_STATE_UNKNOWN);
2304 psref_acquire(psref, &wgs->wgs_psref, wg_psref_class);
2305 }
2306 pserialize_read_exit(s);
2307
2308 return wgs;
2309 }
2310
2311 static void
2312 wg_schedule_rekey_timer(struct wg_peer *wgp)
2313 {
2314 int timeout = MIN(wg_rekey_after_time, (unsigned)(INT_MAX / hz));
2315
2316 callout_schedule(&wgp->wgp_rekey_timer, timeout * hz);
2317 }
2318
2319 static void
2320 wg_send_keepalive_msg(struct wg_peer *wgp, struct wg_session *wgs)
2321 {
2322 struct mbuf *m;
2323
2324 /*
2325 * [W] 6.5 Passive Keepalive
2326 * "A keepalive message is simply a transport data message with
2327 * a zero-length encapsulated encrypted inner-packet."
2328 */
2329 WG_TRACE("");
2330 m = m_gethdr(M_WAIT, MT_DATA);
2331 wg_send_data_msg(wgp, wgs, m);
2332 }
2333
2334 static bool
2335 wg_need_to_send_init_message(struct wg_session *wgs)
2336 {
2337 /*
2338 * [W] 6.2 Transport Message Limits
2339 * "if a peer is the initiator of a current secure session,
2340 * WireGuard will send a handshake initiation message to begin
2341 * a new secure session ... if after receiving a transport data
2342 * message, the current secure session is (REJECT-AFTER-TIME
2343 * KEEPALIVE-TIMEOUT REKEY-TIMEOUT) seconds old and it has
2344 * not yet acted upon this event."
2345 */
2346 return wgs->wgs_is_initiator && wgs->wgs_time_last_data_sent == 0 &&
2347 (time_uptime - wgs->wgs_time_established) >=
2348 (wg_reject_after_time - wg_keepalive_timeout - wg_rekey_timeout);
2349 }
2350
2351 static void
2352 wg_schedule_peer_task(struct wg_peer *wgp, unsigned int task)
2353 {
2354
2355 mutex_enter(wgp->wgp_intr_lock);
2356 WG_DLOG("tasks=%d, task=%d\n", wgp->wgp_tasks, task);
2357 if (wgp->wgp_tasks == 0)
2358 /*
2359 * XXX If the current CPU is already loaded -- e.g., if
2360 * there's already a bunch of handshakes queued up --
2361 * consider tossing this over to another CPU to
2362 * distribute the load.
2363 */
2364 workqueue_enqueue(wg_wq, &wgp->wgp_work, NULL);
2365 wgp->wgp_tasks |= task;
2366 mutex_exit(wgp->wgp_intr_lock);
2367 }
2368
2369 static void
2370 wg_change_endpoint(struct wg_peer *wgp, const struct sockaddr *new)
2371 {
2372 struct wg_sockaddr *wgsa_prev;
2373
2374 WG_TRACE("Changing endpoint");
2375
2376 memcpy(wgp->wgp_endpoint0, new, new->sa_len);
2377 wgsa_prev = wgp->wgp_endpoint;
2378 atomic_store_release(&wgp->wgp_endpoint, wgp->wgp_endpoint0);
2379 wgp->wgp_endpoint0 = wgsa_prev;
2380 atomic_store_release(&wgp->wgp_endpoint_available, true);
2381
2382 wg_schedule_peer_task(wgp, WGP_TASK_ENDPOINT_CHANGED);
2383 }
2384
2385 static bool
2386 wg_validate_inner_packet(const char *packet, size_t decrypted_len, int *af)
2387 {
2388 uint16_t packet_len;
2389 const struct ip *ip;
2390
2391 if (__predict_false(decrypted_len < sizeof(struct ip)))
2392 return false;
2393
2394 ip = (const struct ip *)packet;
2395 if (ip->ip_v == 4)
2396 *af = AF_INET;
2397 else if (ip->ip_v == 6)
2398 *af = AF_INET6;
2399 else
2400 return false;
2401
2402 WG_DLOG("af=%d\n", *af);
2403
2404 switch (*af) {
2405 #ifdef INET
2406 case AF_INET:
2407 packet_len = ntohs(ip->ip_len);
2408 break;
2409 #endif
2410 #ifdef INET6
2411 case AF_INET6: {
2412 const struct ip6_hdr *ip6;
2413
2414 if (__predict_false(decrypted_len < sizeof(struct ip6_hdr)))
2415 return false;
2416
2417 ip6 = (const struct ip6_hdr *)packet;
2418 packet_len = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen);
2419 break;
2420 }
2421 #endif
2422 default:
2423 return false;
2424 }
2425
2426 WG_DLOG("packet_len=%u\n", packet_len);
2427 if (packet_len > decrypted_len)
2428 return false;
2429
2430 return true;
2431 }
2432
2433 static bool
2434 wg_validate_route(struct wg_softc *wg, struct wg_peer *wgp_expected,
2435 int af, char *packet)
2436 {
2437 struct sockaddr_storage ss;
2438 struct sockaddr *sa;
2439 struct psref psref;
2440 struct wg_peer *wgp;
2441 bool ok;
2442
2443 /*
2444 * II CRYPTOKEY ROUTING
2445 * "it will only accept it if its source IP resolves in the
2446 * table to the public key used in the secure session for
2447 * decrypting it."
2448 */
2449
2450 if (af == AF_INET) {
2451 const struct ip *ip = (const struct ip *)packet;
2452 struct sockaddr_in *sin = (struct sockaddr_in *)&ss;
2453 sockaddr_in_init(sin, &ip->ip_src, 0);
2454 sa = sintosa(sin);
2455 #ifdef INET6
2456 } else {
2457 const struct ip6_hdr *ip6 = (const struct ip6_hdr *)packet;
2458 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&ss;
2459 sockaddr_in6_init(sin6, &ip6->ip6_src, 0, 0, 0);
2460 sa = sin6tosa(sin6);
2461 #endif
2462 }
2463
2464 wgp = wg_pick_peer_by_sa(wg, sa, &psref);
2465 ok = (wgp == wgp_expected);
2466 if (wgp != NULL)
2467 wg_put_peer(wgp, &psref);
2468
2469 return ok;
2470 }
2471
2472 static void
2473 wg_session_dtor_timer(void *arg)
2474 {
2475 struct wg_peer *wgp = arg;
2476
2477 WG_TRACE("enter");
2478
2479 wg_schedule_peer_task(wgp, WGP_TASK_DESTROY_PREV_SESSION);
2480 }
2481
2482 static void
2483 wg_schedule_session_dtor_timer(struct wg_peer *wgp)
2484 {
2485
2486 /* 1 second grace period */
2487 callout_schedule(&wgp->wgp_session_dtor_timer, hz);
2488 }
2489
2490 static bool
2491 sockaddr_port_match(const struct sockaddr *sa1, const struct sockaddr *sa2)
2492 {
2493 if (sa1->sa_family != sa2->sa_family)
2494 return false;
2495
2496 switch (sa1->sa_family) {
2497 #ifdef INET
2498 case AF_INET:
2499 return satocsin(sa1)->sin_port == satocsin(sa2)->sin_port;
2500 #endif
2501 #ifdef INET6
2502 case AF_INET6:
2503 return satocsin6(sa1)->sin6_port == satocsin6(sa2)->sin6_port;
2504 #endif
2505 default:
2506 return false;
2507 }
2508 }
2509
2510 static void
2511 wg_update_endpoint_if_necessary(struct wg_peer *wgp,
2512 const struct sockaddr *src)
2513 {
2514 struct wg_sockaddr *wgsa;
2515 struct psref psref;
2516
2517 wgsa = wg_get_endpoint_sa(wgp, &psref);
2518
2519 #ifdef WG_DEBUG_LOG
2520 char oldaddr[128], newaddr[128];
2521 sockaddr_format(wgsatosa(wgsa), oldaddr, sizeof(oldaddr));
2522 sockaddr_format(src, newaddr, sizeof(newaddr));
2523 WG_DLOG("old=%s, new=%s\n", oldaddr, newaddr);
2524 #endif
2525
2526 /*
2527 * III: "Since the packet has authenticated correctly, the source IP of
2528 * the outer UDP/IP packet is used to update the endpoint for peer..."
2529 */
2530 if (__predict_false(sockaddr_cmp(src, wgsatosa(wgsa)) != 0 ||
2531 !sockaddr_port_match(src, wgsatosa(wgsa)))) {
2532 /* XXX We can't change the endpoint twice in a short period */
2533 if (atomic_swap_uint(&wgp->wgp_endpoint_changing, 1) == 0) {
2534 wg_change_endpoint(wgp, src);
2535 }
2536 }
2537
2538 wg_put_sa(wgp, wgsa, &psref);
2539 }
2540
2541 static void __noinline
2542 wg_handle_msg_data(struct wg_softc *wg, struct mbuf *m,
2543 const struct sockaddr *src)
2544 {
2545 struct wg_msg_data *wgmd;
2546 char *encrypted_buf = NULL, *decrypted_buf;
2547 size_t encrypted_len, decrypted_len;
2548 struct wg_session *wgs;
2549 struct wg_peer *wgp;
2550 int state;
2551 size_t mlen;
2552 struct psref psref;
2553 int error, af;
2554 bool success, free_encrypted_buf = false, ok;
2555 struct mbuf *n;
2556
2557 KASSERT(m->m_len >= sizeof(struct wg_msg_data));
2558 wgmd = mtod(m, struct wg_msg_data *);
2559
2560 KASSERT(wgmd->wgmd_type == htole32(WG_MSG_TYPE_DATA));
2561 WG_TRACE("data");
2562
2563 /* Find the putative session, or drop. */
2564 wgs = wg_lookup_session_by_index(wg, wgmd->wgmd_receiver, &psref);
2565 if (wgs == NULL) {
2566 WG_TRACE("No session found");
2567 m_freem(m);
2568 return;
2569 }
2570
2571 /*
2572 * We are only ready to handle data when in INIT_PASSIVE,
2573 * ESTABLISHED, or DESTROYING. All transitions out of that
2574 * state dissociate the session index and drain psrefs.
2575 */
2576 state = atomic_load_relaxed(&wgs->wgs_state);
2577 switch (state) {
2578 case WGS_STATE_UNKNOWN:
2579 panic("wg session %p in unknown state has session index %u",
2580 wgs, wgmd->wgmd_receiver);
2581 case WGS_STATE_INIT_ACTIVE:
2582 WG_TRACE("not yet ready for data");
2583 goto out;
2584 case WGS_STATE_INIT_PASSIVE:
2585 case WGS_STATE_ESTABLISHED:
2586 case WGS_STATE_DESTROYING:
2587 break;
2588 }
2589
2590 /*
2591 * Get the peer, for rate-limited logs (XXX MPSAFE, dtrace) and
2592 * to update the endpoint if authentication succeeds.
2593 */
2594 wgp = wgs->wgs_peer;
2595
2596 /*
2597 * Reject outrageously wrong sequence numbers before doing any
2598 * crypto work or taking any locks.
2599 */
2600 error = sliwin_check_fast(&wgs->wgs_recvwin->window,
2601 le64toh(wgmd->wgmd_counter));
2602 if (error) {
2603 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
2604 "%s: peer %s: out-of-window packet: %"PRIu64"\n",
2605 if_name(&wg->wg_if), wgp->wgp_name,
2606 le64toh(wgmd->wgmd_counter));
2607 goto out;
2608 }
2609
2610 /* Ensure the payload and authenticator are contiguous. */
2611 mlen = m_length(m);
2612 encrypted_len = mlen - sizeof(*wgmd);
2613 if (encrypted_len < WG_AUTHTAG_LEN) {
2614 WG_DLOG("Short encrypted_len: %lu\n", encrypted_len);
2615 goto out;
2616 }
2617 success = m_ensure_contig(&m, sizeof(*wgmd) + encrypted_len);
2618 if (success) {
2619 encrypted_buf = mtod(m, char *) + sizeof(*wgmd);
2620 } else {
2621 encrypted_buf = kmem_intr_alloc(encrypted_len, KM_NOSLEEP);
2622 if (encrypted_buf == NULL) {
2623 WG_DLOG("failed to allocate encrypted_buf\n");
2624 goto out;
2625 }
2626 m_copydata(m, sizeof(*wgmd), encrypted_len, encrypted_buf);
2627 free_encrypted_buf = true;
2628 }
2629 /* m_ensure_contig may change m regardless of its result */
2630 KASSERT(m->m_len >= sizeof(*wgmd));
2631 wgmd = mtod(m, struct wg_msg_data *);
2632
2633 /*
2634 * Get a buffer for the plaintext. Add WG_AUTHTAG_LEN to avoid
2635 * a zero-length buffer (XXX). Drop if plaintext is longer
2636 * than MCLBYTES (XXX).
2637 */
2638 decrypted_len = encrypted_len - WG_AUTHTAG_LEN;
2639 if (decrypted_len > MCLBYTES) {
2640 /* FIXME handle larger data than MCLBYTES */
2641 WG_DLOG("couldn't handle larger data than MCLBYTES\n");
2642 goto out;
2643 }
2644 n = wg_get_mbuf(0, decrypted_len + WG_AUTHTAG_LEN);
2645 if (n == NULL) {
2646 WG_DLOG("wg_get_mbuf failed\n");
2647 goto out;
2648 }
2649 decrypted_buf = mtod(n, char *);
2650
2651 /* Decrypt and verify the packet. */
2652 WG_DLOG("mlen=%lu, encrypted_len=%lu\n", mlen, encrypted_len);
2653 error = wg_algo_aead_dec(decrypted_buf,
2654 encrypted_len - WG_AUTHTAG_LEN /* can be 0 */,
2655 wgs->wgs_tkey_recv, le64toh(wgmd->wgmd_counter), encrypted_buf,
2656 encrypted_len, NULL, 0);
2657 if (error != 0) {
2658 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
2659 "%s: peer %s: failed to wg_algo_aead_dec\n",
2660 if_name(&wg->wg_if), wgp->wgp_name);
2661 m_freem(n);
2662 goto out;
2663 }
2664 WG_DLOG("outsize=%u\n", (u_int)decrypted_len);
2665
2666 /* Packet is genuine. Reject it if a replay or just too old. */
2667 mutex_enter(&wgs->wgs_recvwin->lock);
2668 error = sliwin_update(&wgs->wgs_recvwin->window,
2669 le64toh(wgmd->wgmd_counter));
2670 mutex_exit(&wgs->wgs_recvwin->lock);
2671 if (error) {
2672 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
2673 "%s: peer %s: replay or out-of-window packet: %"PRIu64"\n",
2674 if_name(&wg->wg_if), wgp->wgp_name,
2675 le64toh(wgmd->wgmd_counter));
2676 m_freem(n);
2677 goto out;
2678 }
2679
2680 /* We're done with m now; free it and chuck the pointers. */
2681 m_freem(m);
2682 m = NULL;
2683 wgmd = NULL;
2684
2685 /*
2686 * Validate the encapsulated packet header and get the address
2687 * family, or drop.
2688 */
2689 ok = wg_validate_inner_packet(decrypted_buf, decrypted_len, &af);
2690 if (!ok) {
2691 m_freem(n);
2692 goto out;
2693 }
2694
2695 /*
2696 * The packet is genuine. Update the peer's endpoint if the
2697 * source address changed.
2698 *
2699 * XXX How to prevent DoS by replaying genuine packets from the
2700 * wrong source address?
2701 */
2702 wg_update_endpoint_if_necessary(wgp, src);
2703
2704 /* Submit it into our network stack if routable. */
2705 ok = wg_validate_route(wg, wgp, af, decrypted_buf);
2706 if (ok) {
2707 wg->wg_ops->input(&wg->wg_if, n, af);
2708 } else {
2709 char addrstr[INET6_ADDRSTRLEN];
2710 memset(addrstr, 0, sizeof(addrstr));
2711 if (af == AF_INET) {
2712 const struct ip *ip = (const struct ip *)decrypted_buf;
2713 IN_PRINT(addrstr, &ip->ip_src);
2714 #ifdef INET6
2715 } else if (af == AF_INET6) {
2716 const struct ip6_hdr *ip6 =
2717 (const struct ip6_hdr *)decrypted_buf;
2718 IN6_PRINT(addrstr, &ip6->ip6_src);
2719 #endif
2720 }
2721 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
2722 "%s: peer %s: invalid source address (%s)\n",
2723 if_name(&wg->wg_if), wgp->wgp_name, addrstr);
2724 m_freem(n);
2725 /*
2726 * The inner address is invalid however the session is valid
2727 * so continue the session processing below.
2728 */
2729 }
2730 n = NULL;
2731
2732 /* Update the state machine if necessary. */
2733 if (__predict_false(state == WGS_STATE_INIT_PASSIVE)) {
2734 /*
2735 * We were waiting for the initiator to send their
2736 * first data transport message, and that has happened.
2737 * Schedule a task to establish this session.
2738 */
2739 wg_schedule_peer_task(wgp, WGP_TASK_ESTABLISH_SESSION);
2740 } else {
2741 if (__predict_false(wg_need_to_send_init_message(wgs))) {
2742 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE);
2743 }
2744 /*
2745 * [W] 6.5 Passive Keepalive
2746 * "If a peer has received a validly-authenticated transport
2747 * data message (section 5.4.6), but does not have any packets
2748 * itself to send back for KEEPALIVE-TIMEOUT seconds, it sends
2749 * a keepalive message."
2750 */
2751 WG_DLOG("time_uptime=%ju wgs_time_last_data_sent=%ju\n",
2752 (uintmax_t)time_uptime,
2753 (uintmax_t)wgs->wgs_time_last_data_sent);
2754 if ((time_uptime - wgs->wgs_time_last_data_sent) >=
2755 wg_keepalive_timeout) {
2756 WG_TRACE("Schedule sending keepalive message");
2757 /*
2758 * We can't send a keepalive message here to avoid
2759 * a deadlock; we already hold the solock of a socket
2760 * that is used to send the message.
2761 */
2762 wg_schedule_peer_task(wgp,
2763 WGP_TASK_SEND_KEEPALIVE_MESSAGE);
2764 }
2765 }
2766 out:
2767 wg_put_session(wgs, &psref);
2768 m_freem(m);
2769 if (free_encrypted_buf)
2770 kmem_intr_free(encrypted_buf, encrypted_len);
2771 }
2772
2773 static void __noinline
2774 wg_handle_msg_cookie(struct wg_softc *wg, const struct wg_msg_cookie *wgmc)
2775 {
2776 struct wg_session *wgs;
2777 struct wg_peer *wgp;
2778 struct psref psref;
2779 int error;
2780 uint8_t key[WG_HASH_LEN];
2781 uint8_t cookie[WG_COOKIE_LEN];
2782
2783 WG_TRACE("cookie msg received");
2784
2785 /* Find the putative session. */
2786 wgs = wg_lookup_session_by_index(wg, wgmc->wgmc_receiver, &psref);
2787 if (wgs == NULL) {
2788 WG_TRACE("No session found");
2789 return;
2790 }
2791
2792 /* Lock the peer so we can update the cookie state. */
2793 wgp = wgs->wgs_peer;
2794 mutex_enter(wgp->wgp_lock);
2795
2796 if (!wgp->wgp_last_sent_mac1_valid) {
2797 WG_TRACE("No valid mac1 sent (or expired)");
2798 goto out;
2799 }
2800
2801 /* Decrypt the cookie and store it for later handshake retry. */
2802 wg_algo_mac_cookie(key, sizeof(key), wgp->wgp_pubkey,
2803 sizeof(wgp->wgp_pubkey));
2804 error = wg_algo_xaead_dec(cookie, sizeof(cookie), key,
2805 wgmc->wgmc_cookie, sizeof(wgmc->wgmc_cookie),
2806 wgp->wgp_last_sent_mac1, sizeof(wgp->wgp_last_sent_mac1),
2807 wgmc->wgmc_salt);
2808 if (error != 0) {
2809 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
2810 "%s: peer %s: wg_algo_aead_dec for cookie failed: "
2811 "error=%d\n", if_name(&wg->wg_if), wgp->wgp_name, error);
2812 goto out;
2813 }
2814 /*
2815 * [W] 6.6: Interaction with Cookie Reply System
2816 * "it should simply store the decrypted cookie value from the cookie
2817 * reply message, and wait for the expiration of the REKEY-TIMEOUT
2818 * timer for retrying a handshake initiation message."
2819 */
2820 wgp->wgp_latest_cookie_time = time_uptime;
2821 memcpy(wgp->wgp_latest_cookie, cookie, sizeof(wgp->wgp_latest_cookie));
2822 out:
2823 mutex_exit(wgp->wgp_lock);
2824 wg_put_session(wgs, &psref);
2825 }
2826
2827 static struct mbuf *
2828 wg_validate_msg_header(struct wg_softc *wg, struct mbuf *m)
2829 {
2830 struct wg_msg wgm;
2831 size_t mbuflen;
2832 size_t msglen;
2833
2834 /*
2835 * Get the mbuf chain length. It is already guaranteed, by
2836 * wg_overudp_cb, to be large enough for a struct wg_msg.
2837 */
2838 mbuflen = m_length(m);
2839 KASSERT(mbuflen >= sizeof(struct wg_msg));
2840
2841 /*
2842 * Copy the message header (32-bit message type) out -- we'll
2843 * worry about contiguity and alignment later.
2844 */
2845 m_copydata(m, 0, sizeof(wgm), &wgm);
2846 switch (le32toh(wgm.wgm_type)) {
2847 case WG_MSG_TYPE_INIT:
2848 msglen = sizeof(struct wg_msg_init);
2849 break;
2850 case WG_MSG_TYPE_RESP:
2851 msglen = sizeof(struct wg_msg_resp);
2852 break;
2853 case WG_MSG_TYPE_COOKIE:
2854 msglen = sizeof(struct wg_msg_cookie);
2855 break;
2856 case WG_MSG_TYPE_DATA:
2857 msglen = sizeof(struct wg_msg_data);
2858 break;
2859 default:
2860 WG_LOG_RATECHECK(&wg->wg_ppsratecheck, LOG_DEBUG,
2861 "%s: Unexpected msg type: %u\n", if_name(&wg->wg_if),
2862 le32toh(wgm.wgm_type));
2863 goto error;
2864 }
2865
2866 /* Verify the mbuf chain is long enough for this type of message. */
2867 if (__predict_false(mbuflen < msglen)) {
2868 WG_DLOG("Invalid msg size: mbuflen=%lu type=%u\n", mbuflen,
2869 le32toh(wgm.wgm_type));
2870 goto error;
2871 }
2872
2873 /* Make the message header contiguous if necessary. */
2874 if (__predict_false(m->m_len < msglen)) {
2875 m = m_pullup(m, msglen);
2876 if (m == NULL)
2877 return NULL;
2878 }
2879
2880 return m;
2881
2882 error:
2883 m_freem(m);
2884 return NULL;
2885 }
2886
2887 static void
2888 wg_handle_packet(struct wg_softc *wg, struct mbuf *m,
2889 const struct sockaddr *src)
2890 {
2891 struct wg_msg *wgm;
2892
2893 KASSERT(curlwp->l_pflag & LP_BOUND);
2894
2895 m = wg_validate_msg_header(wg, m);
2896 if (__predict_false(m == NULL))
2897 return;
2898
2899 KASSERT(m->m_len >= sizeof(struct wg_msg));
2900 wgm = mtod(m, struct wg_msg *);
2901 switch (le32toh(wgm->wgm_type)) {
2902 case WG_MSG_TYPE_INIT:
2903 wg_handle_msg_init(wg, (struct wg_msg_init *)wgm, src);
2904 break;
2905 case WG_MSG_TYPE_RESP:
2906 wg_handle_msg_resp(wg, (struct wg_msg_resp *)wgm, src);
2907 break;
2908 case WG_MSG_TYPE_COOKIE:
2909 wg_handle_msg_cookie(wg, (struct wg_msg_cookie *)wgm);
2910 break;
2911 case WG_MSG_TYPE_DATA:
2912 wg_handle_msg_data(wg, m, src);
2913 /* wg_handle_msg_data frees m for us */
2914 return;
2915 default:
2916 panic("invalid message type: %d", le32toh(wgm->wgm_type));
2917 }
2918
2919 m_freem(m);
2920 }
2921
2922 static void
2923 wg_receive_packets(struct wg_softc *wg, const int af)
2924 {
2925
2926 for (;;) {
2927 int error, flags;
2928 struct socket *so;
2929 struct mbuf *m = NULL;
2930 struct uio dummy_uio;
2931 struct mbuf *paddr = NULL;
2932 struct sockaddr *src;
2933
2934 so = wg_get_so_by_af(wg, af);
2935 flags = MSG_DONTWAIT;
2936 dummy_uio.uio_resid = 1000000000;
2937
2938 error = so->so_receive(so, &paddr, &dummy_uio, &m, NULL,
2939 &flags);
2940 if (error || m == NULL) {
2941 //if (error == EWOULDBLOCK)
2942 return;
2943 }
2944
2945 KASSERT(paddr != NULL);
2946 KASSERT(paddr->m_len >= sizeof(struct sockaddr));
2947 src = mtod(paddr, struct sockaddr *);
2948
2949 wg_handle_packet(wg, m, src);
2950 }
2951 }
2952
2953 static void
2954 wg_get_peer(struct wg_peer *wgp, struct psref *psref)
2955 {
2956
2957 psref_acquire(psref, &wgp->wgp_psref, wg_psref_class);
2958 }
2959
2960 static void
2961 wg_put_peer(struct wg_peer *wgp, struct psref *psref)
2962 {
2963
2964 psref_release(psref, &wgp->wgp_psref, wg_psref_class);
2965 }
2966
2967 static void
2968 wg_task_send_init_message(struct wg_softc *wg, struct wg_peer *wgp)
2969 {
2970 struct wg_session *wgs;
2971
2972 WG_TRACE("WGP_TASK_SEND_INIT_MESSAGE");
2973
2974 KASSERT(mutex_owned(wgp->wgp_lock));
2975
2976 if (!atomic_load_acquire(&wgp->wgp_endpoint_available)) {
2977 WGLOG(LOG_DEBUG, "%s: No endpoint available\n",
2978 if_name(&wg->wg_if));
2979 /* XXX should do something? */
2980 return;
2981 }
2982
2983 wgs = wgp->wgp_session_stable;
2984 if (wgs->wgs_state == WGS_STATE_UNKNOWN) {
2985 /* XXX What if the unstable session is already INIT_ACTIVE? */
2986 wg_send_handshake_msg_init(wg, wgp);
2987 } else {
2988 /* rekey */
2989 wgs = wgp->wgp_session_unstable;
2990 if (wgs->wgs_state != WGS_STATE_INIT_ACTIVE)
2991 wg_send_handshake_msg_init(wg, wgp);
2992 }
2993 }
2994
2995 static void
2996 wg_task_retry_handshake(struct wg_softc *wg, struct wg_peer *wgp)
2997 {
2998 struct wg_session *wgs;
2999
3000 WG_TRACE("WGP_TASK_RETRY_HANDSHAKE");
3001
3002 KASSERT(mutex_owned(wgp->wgp_lock));
3003 KASSERT(wgp->wgp_handshake_start_time != 0);
3004
3005 wgs = wgp->wgp_session_unstable;
3006 if (wgs->wgs_state != WGS_STATE_INIT_ACTIVE)
3007 return;
3008
3009 /*
3010 * XXX no real need to assign a new index here, but we do need
3011 * to transition to UNKNOWN temporarily
3012 */
3013 wg_put_session_index(wg, wgs);
3014
3015 /* [W] 6.4 Handshake Initiation Retransmission */
3016 if ((time_uptime - wgp->wgp_handshake_start_time) >
3017 wg_rekey_attempt_time) {
3018 /* Give up handshaking */
3019 wgp->wgp_handshake_start_time = 0;
3020 WG_TRACE("give up");
3021
3022 /*
3023 * If a new data packet comes, handshaking will be retried
3024 * and a new session would be established at that time,
3025 * however we don't want to send pending packets then.
3026 */
3027 wg_purge_pending_packets(wgp);
3028 return;
3029 }
3030
3031 wg_task_send_init_message(wg, wgp);
3032 }
3033
3034 static void
3035 wg_task_establish_session(struct wg_softc *wg, struct wg_peer *wgp)
3036 {
3037 struct wg_session *wgs, *wgs_prev;
3038 struct mbuf *m;
3039
3040 KASSERT(mutex_owned(wgp->wgp_lock));
3041
3042 wgs = wgp->wgp_session_unstable;
3043 if (wgs->wgs_state != WGS_STATE_INIT_PASSIVE)
3044 /* XXX Can this happen? */
3045 return;
3046
3047 wgs->wgs_state = WGS_STATE_ESTABLISHED;
3048 wgs->wgs_time_established = time_uptime;
3049 wgs->wgs_time_last_data_sent = 0;
3050 wgs->wgs_is_initiator = false;
3051 WG_TRACE("WGS_STATE_ESTABLISHED");
3052
3053 wg_swap_sessions(wgp);
3054 KASSERT(wgs == wgp->wgp_session_stable);
3055 wgs_prev = wgp->wgp_session_unstable;
3056 getnanotime(&wgp->wgp_last_handshake_time);
3057 wgp->wgp_handshake_start_time = 0;
3058 wgp->wgp_last_sent_mac1_valid = false;
3059 wgp->wgp_last_sent_cookie_valid = false;
3060
3061 /* If we had a data packet queued up, send it. */
3062 if ((m = atomic_swap_ptr(&wgp->wgp_pending, NULL)) != NULL) {
3063 kpreempt_disable();
3064 const uint32_t h = curcpu()->ci_index; // pktq_rps_hash(m)
3065 M_SETCTX(m, wgp);
3066 if (__predict_false(!pktq_enqueue(wg_pktq, m, h))) {
3067 WGLOG(LOG_ERR, "%s: pktq full, dropping\n",
3068 if_name(&wg->wg_if));
3069 m_freem(m);
3070 }
3071 kpreempt_enable();
3072 }
3073
3074 if (wgs_prev->wgs_state == WGS_STATE_ESTABLISHED) {
3075 /* Wait for wg_get_stable_session to drain. */
3076 pserialize_perform(wgp->wgp_psz);
3077
3078 /* Transition ESTABLISHED->DESTROYING. */
3079 wgs_prev->wgs_state = WGS_STATE_DESTROYING;
3080
3081 /* We can't destroy the old session immediately */
3082 wg_schedule_session_dtor_timer(wgp);
3083 } else {
3084 KASSERTMSG(wgs_prev->wgs_state == WGS_STATE_UNKNOWN,
3085 "state=%d", wgs_prev->wgs_state);
3086 wg_clear_states(wgs_prev);
3087 wgs_prev->wgs_state = WGS_STATE_UNKNOWN;
3088 }
3089 }
3090
3091 static void
3092 wg_task_endpoint_changed(struct wg_softc *wg, struct wg_peer *wgp)
3093 {
3094
3095 WG_TRACE("WGP_TASK_ENDPOINT_CHANGED");
3096
3097 KASSERT(mutex_owned(wgp->wgp_lock));
3098
3099 if (atomic_load_relaxed(&wgp->wgp_endpoint_changing)) {
3100 pserialize_perform(wgp->wgp_psz);
3101 mutex_exit(wgp->wgp_lock);
3102 psref_target_destroy(&wgp->wgp_endpoint0->wgsa_psref,
3103 wg_psref_class);
3104 psref_target_init(&wgp->wgp_endpoint0->wgsa_psref,
3105 wg_psref_class);
3106 mutex_enter(wgp->wgp_lock);
3107 atomic_store_release(&wgp->wgp_endpoint_changing, 0);
3108 }
3109 }
3110
3111 static void
3112 wg_task_send_keepalive_message(struct wg_softc *wg, struct wg_peer *wgp)
3113 {
3114 struct wg_session *wgs;
3115
3116 WG_TRACE("WGP_TASK_SEND_KEEPALIVE_MESSAGE");
3117
3118 KASSERT(mutex_owned(wgp->wgp_lock));
3119
3120 wgs = wgp->wgp_session_stable;
3121 if (wgs->wgs_state != WGS_STATE_ESTABLISHED)
3122 return;
3123
3124 wg_send_keepalive_msg(wgp, wgs);
3125 }
3126
3127 static void
3128 wg_task_destroy_prev_session(struct wg_softc *wg, struct wg_peer *wgp)
3129 {
3130 struct wg_session *wgs;
3131
3132 WG_TRACE("WGP_TASK_DESTROY_PREV_SESSION");
3133
3134 KASSERT(mutex_owned(wgp->wgp_lock));
3135
3136 wgs = wgp->wgp_session_unstable;
3137 if (wgs->wgs_state == WGS_STATE_DESTROYING) {
3138 wg_put_session_index(wg, wgs);
3139 }
3140 }
3141
3142 static void
3143 wg_peer_work(struct work *wk, void *cookie)
3144 {
3145 struct wg_peer *wgp = container_of(wk, struct wg_peer, wgp_work);
3146 struct wg_softc *wg = wgp->wgp_sc;
3147 unsigned int tasks;
3148
3149 mutex_enter(wgp->wgp_intr_lock);
3150 while ((tasks = wgp->wgp_tasks) != 0) {
3151 wgp->wgp_tasks = 0;
3152 mutex_exit(wgp->wgp_intr_lock);
3153
3154 mutex_enter(wgp->wgp_lock);
3155 if (ISSET(tasks, WGP_TASK_SEND_INIT_MESSAGE))
3156 wg_task_send_init_message(wg, wgp);
3157 if (ISSET(tasks, WGP_TASK_RETRY_HANDSHAKE))
3158 wg_task_retry_handshake(wg, wgp);
3159 if (ISSET(tasks, WGP_TASK_ESTABLISH_SESSION))
3160 wg_task_establish_session(wg, wgp);
3161 if (ISSET(tasks, WGP_TASK_ENDPOINT_CHANGED))
3162 wg_task_endpoint_changed(wg, wgp);
3163 if (ISSET(tasks, WGP_TASK_SEND_KEEPALIVE_MESSAGE))
3164 wg_task_send_keepalive_message(wg, wgp);
3165 if (ISSET(tasks, WGP_TASK_DESTROY_PREV_SESSION))
3166 wg_task_destroy_prev_session(wg, wgp);
3167 mutex_exit(wgp->wgp_lock);
3168
3169 mutex_enter(wgp->wgp_intr_lock);
3170 }
3171 mutex_exit(wgp->wgp_intr_lock);
3172 }
3173
3174 static void
3175 wg_job(struct threadpool_job *job)
3176 {
3177 struct wg_softc *wg = container_of(job, struct wg_softc, wg_job);
3178 int bound, upcalls;
3179
3180 mutex_enter(wg->wg_intr_lock);
3181 while ((upcalls = wg->wg_upcalls) != 0) {
3182 wg->wg_upcalls = 0;
3183 mutex_exit(wg->wg_intr_lock);
3184 bound = curlwp_bind();
3185 if (ISSET(upcalls, WG_UPCALL_INET))
3186 wg_receive_packets(wg, AF_INET);
3187 if (ISSET(upcalls, WG_UPCALL_INET6))
3188 wg_receive_packets(wg, AF_INET6);
3189 curlwp_bindx(bound);
3190 mutex_enter(wg->wg_intr_lock);
3191 }
3192 threadpool_job_done(job);
3193 mutex_exit(wg->wg_intr_lock);
3194 }
3195
3196 static int
3197 wg_bind_port(struct wg_softc *wg, const uint16_t port)
3198 {
3199 int error;
3200 uint16_t old_port = wg->wg_listen_port;
3201
3202 if (port != 0 && old_port == port)
3203 return 0;
3204
3205 struct sockaddr_in _sin, *sin = &_sin;
3206 sin->sin_len = sizeof(*sin);
3207 sin->sin_family = AF_INET;
3208 sin->sin_addr.s_addr = INADDR_ANY;
3209 sin->sin_port = htons(port);
3210
3211 error = sobind(wg->wg_so4, sintosa(sin), curlwp);
3212 if (error != 0)
3213 return error;
3214
3215 #ifdef INET6
3216 struct sockaddr_in6 _sin6, *sin6 = &_sin6;
3217 sin6->sin6_len = sizeof(*sin6);
3218 sin6->sin6_family = AF_INET6;
3219 sin6->sin6_addr = in6addr_any;
3220 sin6->sin6_port = htons(port);
3221
3222 error = sobind(wg->wg_so6, sin6tosa(sin6), curlwp);
3223 if (error != 0)
3224 return error;
3225 #endif
3226
3227 wg->wg_listen_port = port;
3228
3229 return 0;
3230 }
3231
3232 static void
3233 wg_so_upcall(struct socket *so, void *cookie, int events, int waitflag)
3234 {
3235 struct wg_softc *wg = cookie;
3236 int reason;
3237
3238 reason = (so->so_proto->pr_domain->dom_family == AF_INET) ?
3239 WG_UPCALL_INET :
3240 WG_UPCALL_INET6;
3241
3242 mutex_enter(wg->wg_intr_lock);
3243 wg->wg_upcalls |= reason;
3244 threadpool_schedule_job(wg->wg_threadpool, &wg->wg_job);
3245 mutex_exit(wg->wg_intr_lock);
3246 }
3247
3248 static int
3249 wg_overudp_cb(struct mbuf **mp, int offset, struct socket *so,
3250 struct sockaddr *src, void *arg)
3251 {
3252 struct wg_softc *wg = arg;
3253 struct wg_msg wgm;
3254 struct mbuf *m = *mp;
3255
3256 WG_TRACE("enter");
3257
3258 /* Verify the mbuf chain is long enough to have a wg msg header. */
3259 KASSERT(offset <= m_length(m));
3260 if (__predict_false(m_length(m) - offset < sizeof(struct wg_msg))) {
3261 /* drop on the floor */
3262 m_freem(m);
3263 return -1;
3264 }
3265
3266 /*
3267 * Copy the message header (32-bit message type) out -- we'll
3268 * worry about contiguity and alignment later.
3269 */
3270 m_copydata(m, offset, sizeof(struct wg_msg), &wgm);
3271 WG_DLOG("type=%d\n", le32toh(wgm.wgm_type));
3272
3273 /*
3274 * Handle DATA packets promptly as they arrive. Other packets
3275 * may require expensive public-key crypto and are not as
3276 * sensitive to latency, so defer them to the worker thread.
3277 */
3278 switch (le32toh(wgm.wgm_type)) {
3279 case WG_MSG_TYPE_DATA:
3280 /* handle immediately */
3281 m_adj(m, offset);
3282 if (__predict_false(m->m_len < sizeof(struct wg_msg_data))) {
3283 m = m_pullup(m, sizeof(struct wg_msg_data));
3284 if (m == NULL)
3285 return -1;
3286 }
3287 wg_handle_msg_data(wg, m, src);
3288 *mp = NULL;
3289 return 1;
3290 case WG_MSG_TYPE_INIT:
3291 case WG_MSG_TYPE_RESP:
3292 case WG_MSG_TYPE_COOKIE:
3293 /* pass through to so_receive in wg_receive_packets */
3294 return 0;
3295 default:
3296 /* drop on the floor */
3297 m_freem(m);
3298 return -1;
3299 }
3300 }
3301
3302 static int
3303 wg_socreate(struct wg_softc *wg, int af, struct socket **sop)
3304 {
3305 int error;
3306 struct socket *so;
3307
3308 error = socreate(af, &so, SOCK_DGRAM, 0, curlwp, NULL);
3309 if (error != 0)
3310 return error;
3311
3312 solock(so);
3313 so->so_upcallarg = wg;
3314 so->so_upcall = wg_so_upcall;
3315 so->so_rcv.sb_flags |= SB_UPCALL;
3316 inpcb_register_overudp_cb(sotoinpcb(so), wg_overudp_cb, wg);
3317 sounlock(so);
3318
3319 *sop = so;
3320
3321 return 0;
3322 }
3323
3324 static bool
3325 wg_session_hit_limits(struct wg_session *wgs)
3326 {
3327
3328 /*
3329 * [W] 6.2: Transport Message Limits
3330 * "After REJECT-AFTER-MESSAGES transport data messages or after the
3331 * current secure session is REJECT-AFTER-TIME seconds old, whichever
3332 * comes first, WireGuard will refuse to send any more transport data
3333 * messages using the current secure session, ..."
3334 */
3335 KASSERT(wgs->wgs_time_established != 0);
3336 if ((time_uptime - wgs->wgs_time_established) > wg_reject_after_time) {
3337 WG_DLOG("The session hits REJECT_AFTER_TIME\n");
3338 return true;
3339 } else if (wg_session_get_send_counter(wgs) >
3340 wg_reject_after_messages) {
3341 WG_DLOG("The session hits REJECT_AFTER_MESSAGES\n");
3342 return true;
3343 }
3344
3345 return false;
3346 }
3347
3348 static void
3349 wgintr(void *cookie)
3350 {
3351 struct wg_peer *wgp;
3352 struct wg_session *wgs;
3353 struct mbuf *m;
3354 struct psref psref;
3355
3356 while ((m = pktq_dequeue(wg_pktq)) != NULL) {
3357 wgp = M_GETCTX(m, struct wg_peer *);
3358 if ((wgs = wg_get_stable_session(wgp, &psref)) == NULL) {
3359 WG_TRACE("no stable session");
3360 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE);
3361 goto next0;
3362 }
3363 if (__predict_false(wg_session_hit_limits(wgs))) {
3364 WG_TRACE("stable session hit limits");
3365 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE);
3366 goto next1;
3367 }
3368 wg_send_data_msg(wgp, wgs, m);
3369 m = NULL; /* consumed */
3370 next1: wg_put_session(wgs, &psref);
3371 next0: m_freem(m);
3372 /* XXX Yield to avoid userland starvation? */
3373 }
3374 }
3375
3376 static void
3377 wg_rekey_timer(void *arg)
3378 {
3379 struct wg_peer *wgp = arg;
3380
3381 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE);
3382 }
3383
3384 static void
3385 wg_purge_pending_packets(struct wg_peer *wgp)
3386 {
3387 struct mbuf *m;
3388
3389 m = atomic_swap_ptr(&wgp->wgp_pending, NULL);
3390 m_freem(m);
3391 pktq_barrier(wg_pktq);
3392 }
3393
3394 static void
3395 wg_handshake_timeout_timer(void *arg)
3396 {
3397 struct wg_peer *wgp = arg;
3398
3399 WG_TRACE("enter");
3400
3401 wg_schedule_peer_task(wgp, WGP_TASK_RETRY_HANDSHAKE);
3402 }
3403
3404 static struct wg_peer *
3405 wg_alloc_peer(struct wg_softc *wg)
3406 {
3407 struct wg_peer *wgp;
3408
3409 wgp = kmem_zalloc(sizeof(*wgp), KM_SLEEP);
3410
3411 wgp->wgp_sc = wg;
3412 callout_init(&wgp->wgp_rekey_timer, CALLOUT_MPSAFE);
3413 callout_setfunc(&wgp->wgp_rekey_timer, wg_rekey_timer, wgp);
3414 callout_init(&wgp->wgp_handshake_timeout_timer, CALLOUT_MPSAFE);
3415 callout_setfunc(&wgp->wgp_handshake_timeout_timer,
3416 wg_handshake_timeout_timer, wgp);
3417 callout_init(&wgp->wgp_session_dtor_timer, CALLOUT_MPSAFE);
3418 callout_setfunc(&wgp->wgp_session_dtor_timer,
3419 wg_session_dtor_timer, wgp);
3420 PSLIST_ENTRY_INIT(wgp, wgp_peerlist_entry);
3421 wgp->wgp_endpoint_changing = false;
3422 wgp->wgp_endpoint_available = false;
3423 wgp->wgp_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
3424 wgp->wgp_intr_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
3425 wgp->wgp_psz = pserialize_create();
3426 psref_target_init(&wgp->wgp_psref, wg_psref_class);
3427
3428 wgp->wgp_endpoint = kmem_zalloc(sizeof(*wgp->wgp_endpoint), KM_SLEEP);
3429 wgp->wgp_endpoint0 = kmem_zalloc(sizeof(*wgp->wgp_endpoint0), KM_SLEEP);
3430 psref_target_init(&wgp->wgp_endpoint->wgsa_psref, wg_psref_class);
3431 psref_target_init(&wgp->wgp_endpoint0->wgsa_psref, wg_psref_class);
3432
3433 struct wg_session *wgs;
3434 wgp->wgp_session_stable =
3435 kmem_zalloc(sizeof(*wgp->wgp_session_stable), KM_SLEEP);
3436 wgp->wgp_session_unstable =
3437 kmem_zalloc(sizeof(*wgp->wgp_session_unstable), KM_SLEEP);
3438 wgs = wgp->wgp_session_stable;
3439 wgs->wgs_peer = wgp;
3440 wgs->wgs_state = WGS_STATE_UNKNOWN;
3441 psref_target_init(&wgs->wgs_psref, wg_psref_class);
3442 #ifndef __HAVE_ATOMIC64_LOADSTORE
3443 mutex_init(&wgs->wgs_send_counter_lock, MUTEX_DEFAULT, IPL_SOFTNET);
3444 #endif
3445 wgs->wgs_recvwin = kmem_zalloc(sizeof(*wgs->wgs_recvwin), KM_SLEEP);
3446 mutex_init(&wgs->wgs_recvwin->lock, MUTEX_DEFAULT, IPL_SOFTNET);
3447
3448 wgs = wgp->wgp_session_unstable;
3449 wgs->wgs_peer = wgp;
3450 wgs->wgs_state = WGS_STATE_UNKNOWN;
3451 psref_target_init(&wgs->wgs_psref, wg_psref_class);
3452 #ifndef __HAVE_ATOMIC64_LOADSTORE
3453 mutex_init(&wgs->wgs_send_counter_lock, MUTEX_DEFAULT, IPL_SOFTNET);
3454 #endif
3455 wgs->wgs_recvwin = kmem_zalloc(sizeof(*wgs->wgs_recvwin), KM_SLEEP);
3456 mutex_init(&wgs->wgs_recvwin->lock, MUTEX_DEFAULT, IPL_SOFTNET);
3457
3458 return wgp;
3459 }
3460
3461 static void
3462 wg_destroy_peer(struct wg_peer *wgp)
3463 {
3464 struct wg_session *wgs;
3465 struct wg_softc *wg = wgp->wgp_sc;
3466
3467 /* Prevent new packets from this peer on any source address. */
3468 rw_enter(wg->wg_rwlock, RW_WRITER);
3469 for (int i = 0; i < wgp->wgp_n_allowedips; i++) {
3470 struct wg_allowedip *wga = &wgp->wgp_allowedips[i];
3471 struct radix_node_head *rnh = wg_rnh(wg, wga->wga_family);
3472 struct radix_node *rn;
3473
3474 KASSERT(rnh != NULL);
3475 rn = rnh->rnh_deladdr(&wga->wga_sa_addr,
3476 &wga->wga_sa_mask, rnh);
3477 if (rn == NULL) {
3478 char addrstr[128];
3479 sockaddr_format(&wga->wga_sa_addr, addrstr,
3480 sizeof(addrstr));
3481 WGLOG(LOG_WARNING, "%s: Couldn't delete %s",
3482 if_name(&wg->wg_if), addrstr);
3483 }
3484 }
3485 rw_exit(wg->wg_rwlock);
3486
3487 /* Purge pending packets. */
3488 wg_purge_pending_packets(wgp);
3489
3490 /* Halt all packet processing and timeouts. */
3491 callout_halt(&wgp->wgp_rekey_timer, NULL);
3492 callout_halt(&wgp->wgp_handshake_timeout_timer, NULL);
3493 callout_halt(&wgp->wgp_session_dtor_timer, NULL);
3494
3495 /* Wait for any queued work to complete. */
3496 workqueue_wait(wg_wq, &wgp->wgp_work);
3497
3498 wgs = wgp->wgp_session_unstable;
3499 if (wgs->wgs_state != WGS_STATE_UNKNOWN) {
3500 mutex_enter(wgp->wgp_lock);
3501 wg_destroy_session(wg, wgs);
3502 mutex_exit(wgp->wgp_lock);
3503 }
3504 mutex_destroy(&wgs->wgs_recvwin->lock);
3505 kmem_free(wgs->wgs_recvwin, sizeof(*wgs->wgs_recvwin));
3506 #ifndef __HAVE_ATOMIC64_LOADSTORE
3507 mutex_destroy(&wgs->wgs_send_counter_lock);
3508 #endif
3509 kmem_free(wgs, sizeof(*wgs));
3510
3511 wgs = wgp->wgp_session_stable;
3512 if (wgs->wgs_state != WGS_STATE_UNKNOWN) {
3513 mutex_enter(wgp->wgp_lock);
3514 wg_destroy_session(wg, wgs);
3515 mutex_exit(wgp->wgp_lock);
3516 }
3517 mutex_destroy(&wgs->wgs_recvwin->lock);
3518 kmem_free(wgs->wgs_recvwin, sizeof(*wgs->wgs_recvwin));
3519 #ifndef __HAVE_ATOMIC64_LOADSTORE
3520 mutex_destroy(&wgs->wgs_send_counter_lock);
3521 #endif
3522 kmem_free(wgs, sizeof(*wgs));
3523
3524 psref_target_destroy(&wgp->wgp_endpoint->wgsa_psref, wg_psref_class);
3525 psref_target_destroy(&wgp->wgp_endpoint0->wgsa_psref, wg_psref_class);
3526 kmem_free(wgp->wgp_endpoint, sizeof(*wgp->wgp_endpoint));
3527 kmem_free(wgp->wgp_endpoint0, sizeof(*wgp->wgp_endpoint0));
3528
3529 pserialize_destroy(wgp->wgp_psz);
3530 mutex_obj_free(wgp->wgp_intr_lock);
3531 mutex_obj_free(wgp->wgp_lock);
3532
3533 kmem_free(wgp, sizeof(*wgp));
3534 }
3535
3536 static void
3537 wg_destroy_all_peers(struct wg_softc *wg)
3538 {
3539 struct wg_peer *wgp, *wgp0 __diagused;
3540 void *garbage_byname, *garbage_bypubkey;
3541
3542 restart:
3543 garbage_byname = garbage_bypubkey = NULL;
3544 mutex_enter(wg->wg_lock);
3545 WG_PEER_WRITER_FOREACH(wgp, wg) {
3546 if (wgp->wgp_name[0]) {
3547 wgp0 = thmap_del(wg->wg_peers_byname, wgp->wgp_name,
3548 strlen(wgp->wgp_name));
3549 KASSERT(wgp0 == wgp);
3550 garbage_byname = thmap_stage_gc(wg->wg_peers_byname);
3551 }
3552 wgp0 = thmap_del(wg->wg_peers_bypubkey, wgp->wgp_pubkey,
3553 sizeof(wgp->wgp_pubkey));
3554 KASSERT(wgp0 == wgp);
3555 garbage_bypubkey = thmap_stage_gc(wg->wg_peers_bypubkey);
3556 WG_PEER_WRITER_REMOVE(wgp);
3557 wg->wg_npeers--;
3558 mutex_enter(wgp->wgp_lock);
3559 pserialize_perform(wgp->wgp_psz);
3560 mutex_exit(wgp->wgp_lock);
3561 PSLIST_ENTRY_DESTROY(wgp, wgp_peerlist_entry);
3562 break;
3563 }
3564 mutex_exit(wg->wg_lock);
3565
3566 if (wgp == NULL)
3567 return;
3568
3569 psref_target_destroy(&wgp->wgp_psref, wg_psref_class);
3570
3571 wg_destroy_peer(wgp);
3572 thmap_gc(wg->wg_peers_byname, garbage_byname);
3573 thmap_gc(wg->wg_peers_bypubkey, garbage_bypubkey);
3574
3575 goto restart;
3576 }
3577
3578 static int
3579 wg_destroy_peer_name(struct wg_softc *wg, const char *name)
3580 {
3581 struct wg_peer *wgp, *wgp0 __diagused;
3582 void *garbage_byname, *garbage_bypubkey;
3583
3584 mutex_enter(wg->wg_lock);
3585 wgp = thmap_del(wg->wg_peers_byname, name, strlen(name));
3586 if (wgp != NULL) {
3587 wgp0 = thmap_del(wg->wg_peers_bypubkey, wgp->wgp_pubkey,
3588 sizeof(wgp->wgp_pubkey));
3589 KASSERT(wgp0 == wgp);
3590 garbage_byname = thmap_stage_gc(wg->wg_peers_byname);
3591 garbage_bypubkey = thmap_stage_gc(wg->wg_peers_bypubkey);
3592 WG_PEER_WRITER_REMOVE(wgp);
3593 wg->wg_npeers--;
3594 if (wg->wg_npeers == 0)
3595 if_link_state_change(&wg->wg_if, LINK_STATE_DOWN);
3596 mutex_enter(wgp->wgp_lock);
3597 pserialize_perform(wgp->wgp_psz);
3598 mutex_exit(wgp->wgp_lock);
3599 PSLIST_ENTRY_DESTROY(wgp, wgp_peerlist_entry);
3600 }
3601 mutex_exit(wg->wg_lock);
3602
3603 if (wgp == NULL)
3604 return ENOENT;
3605
3606 psref_target_destroy(&wgp->wgp_psref, wg_psref_class);
3607
3608 wg_destroy_peer(wgp);
3609 thmap_gc(wg->wg_peers_byname, garbage_byname);
3610 thmap_gc(wg->wg_peers_bypubkey, garbage_bypubkey);
3611
3612 return 0;
3613 }
3614
3615 static int
3616 wg_if_attach(struct wg_softc *wg)
3617 {
3618
3619 wg->wg_if.if_addrlen = 0;
3620 wg->wg_if.if_mtu = WG_MTU;
3621 wg->wg_if.if_flags = IFF_MULTICAST;
3622 wg->wg_if.if_extflags = IFEF_MPSAFE;
3623 wg->wg_if.if_ioctl = wg_ioctl;
3624 wg->wg_if.if_output = wg_output;
3625 wg->wg_if.if_init = wg_init;
3626 #ifdef ALTQ
3627 wg->wg_if.if_start = wg_start;
3628 #endif
3629 wg->wg_if.if_stop = wg_stop;
3630 wg->wg_if.if_type = IFT_OTHER;
3631 wg->wg_if.if_dlt = DLT_NULL;
3632 wg->wg_if.if_softc = wg;
3633 #ifdef ALTQ
3634 IFQ_SET_READY(&wg->wg_if.if_snd);
3635 #endif
3636 if_initialize(&wg->wg_if);
3637
3638 wg->wg_if.if_link_state = LINK_STATE_DOWN;
3639 if_alloc_sadl(&wg->wg_if);
3640 if_register(&wg->wg_if);
3641
3642 bpf_attach(&wg->wg_if, DLT_NULL, sizeof(uint32_t));
3643
3644 return 0;
3645 }
3646
3647 static void
3648 wg_if_detach(struct wg_softc *wg)
3649 {
3650 struct ifnet *ifp = &wg->wg_if;
3651
3652 bpf_detach(ifp);
3653 if_detach(ifp);
3654 }
3655
3656 static int
3657 wg_clone_create(struct if_clone *ifc, int unit)
3658 {
3659 struct wg_softc *wg;
3660 int error;
3661
3662 wg_guarantee_initialized();
3663
3664 error = wg_count_inc();
3665 if (error)
3666 return error;
3667
3668 wg = kmem_zalloc(sizeof(*wg), KM_SLEEP);
3669
3670 if_initname(&wg->wg_if, ifc->ifc_name, unit);
3671
3672 PSLIST_INIT(&wg->wg_peers);
3673 wg->wg_peers_bypubkey = thmap_create(0, NULL, THMAP_NOCOPY);
3674 wg->wg_peers_byname = thmap_create(0, NULL, THMAP_NOCOPY);
3675 wg->wg_sessions_byindex = thmap_create(0, NULL, THMAP_NOCOPY);
3676 wg->wg_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
3677 wg->wg_intr_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
3678 wg->wg_rwlock = rw_obj_alloc();
3679 threadpool_job_init(&wg->wg_job, wg_job, wg->wg_intr_lock,
3680 "%s", if_name(&wg->wg_if));
3681 wg->wg_ops = &wg_ops_rumpkernel;
3682
3683 error = threadpool_get(&wg->wg_threadpool, PRI_NONE);
3684 if (error)
3685 goto fail0;
3686
3687 #ifdef INET
3688 error = wg_socreate(wg, AF_INET, &wg->wg_so4);
3689 if (error)
3690 goto fail1;
3691 rn_inithead((void **)&wg->wg_rtable_ipv4,
3692 offsetof(struct sockaddr_in, sin_addr) * NBBY);
3693 #endif
3694 #ifdef INET6
3695 error = wg_socreate(wg, AF_INET6, &wg->wg_so6);
3696 if (error)
3697 goto fail2;
3698 rn_inithead((void **)&wg->wg_rtable_ipv6,
3699 offsetof(struct sockaddr_in6, sin6_addr) * NBBY);
3700 #endif
3701
3702 error = wg_if_attach(wg);
3703 if (error)
3704 goto fail3;
3705
3706 return 0;
3707
3708 fail4: __unused
3709 wg_if_detach(wg);
3710 fail3: wg_destroy_all_peers(wg);
3711 #ifdef INET6
3712 solock(wg->wg_so6);
3713 wg->wg_so6->so_rcv.sb_flags &= ~SB_UPCALL;
3714 sounlock(wg->wg_so6);
3715 #endif
3716 #ifdef INET
3717 solock(wg->wg_so4);
3718 wg->wg_so4->so_rcv.sb_flags &= ~SB_UPCALL;
3719 sounlock(wg->wg_so4);
3720 #endif
3721 mutex_enter(wg->wg_intr_lock);
3722 threadpool_cancel_job(wg->wg_threadpool, &wg->wg_job);
3723 mutex_exit(wg->wg_intr_lock);
3724 #ifdef INET6
3725 if (wg->wg_rtable_ipv6 != NULL)
3726 free(wg->wg_rtable_ipv6, M_RTABLE);
3727 soclose(wg->wg_so6);
3728 fail2:
3729 #endif
3730 #ifdef INET
3731 if (wg->wg_rtable_ipv4 != NULL)
3732 free(wg->wg_rtable_ipv4, M_RTABLE);
3733 soclose(wg->wg_so4);
3734 fail1:
3735 #endif
3736 threadpool_put(wg->wg_threadpool, PRI_NONE);
3737 fail0: threadpool_job_destroy(&wg->wg_job);
3738 rw_obj_free(wg->wg_rwlock);
3739 mutex_obj_free(wg->wg_intr_lock);
3740 mutex_obj_free(wg->wg_lock);
3741 thmap_destroy(wg->wg_sessions_byindex);
3742 thmap_destroy(wg->wg_peers_byname);
3743 thmap_destroy(wg->wg_peers_bypubkey);
3744 PSLIST_DESTROY(&wg->wg_peers);
3745 kmem_free(wg, sizeof(*wg));
3746 wg_count_dec();
3747 return error;
3748 }
3749
3750 static int
3751 wg_clone_destroy(struct ifnet *ifp)
3752 {
3753 struct wg_softc *wg = container_of(ifp, struct wg_softc, wg_if);
3754
3755 #ifdef WG_RUMPKERNEL
3756 if (wg_user_mode(wg)) {
3757 rumpuser_wg_destroy(wg->wg_user);
3758 wg->wg_user = NULL;
3759 }
3760 #endif
3761
3762 wg_if_detach(wg);
3763 wg_destroy_all_peers(wg);
3764 #ifdef INET6
3765 solock(wg->wg_so6);
3766 wg->wg_so6->so_rcv.sb_flags &= ~SB_UPCALL;
3767 sounlock(wg->wg_so6);
3768 #endif
3769 #ifdef INET
3770 solock(wg->wg_so4);
3771 wg->wg_so4->so_rcv.sb_flags &= ~SB_UPCALL;
3772 sounlock(wg->wg_so4);
3773 #endif
3774 mutex_enter(wg->wg_intr_lock);
3775 threadpool_cancel_job(wg->wg_threadpool, &wg->wg_job);
3776 mutex_exit(wg->wg_intr_lock);
3777 #ifdef INET6
3778 if (wg->wg_rtable_ipv6 != NULL)
3779 free(wg->wg_rtable_ipv6, M_RTABLE);
3780 soclose(wg->wg_so6);
3781 #endif
3782 #ifdef INET
3783 if (wg->wg_rtable_ipv4 != NULL)
3784 free(wg->wg_rtable_ipv4, M_RTABLE);
3785 soclose(wg->wg_so4);
3786 #endif
3787 threadpool_put(wg->wg_threadpool, PRI_NONE);
3788 threadpool_job_destroy(&wg->wg_job);
3789 rw_obj_free(wg->wg_rwlock);
3790 mutex_obj_free(wg->wg_intr_lock);
3791 mutex_obj_free(wg->wg_lock);
3792 thmap_destroy(wg->wg_sessions_byindex);
3793 thmap_destroy(wg->wg_peers_byname);
3794 thmap_destroy(wg->wg_peers_bypubkey);
3795 PSLIST_DESTROY(&wg->wg_peers);
3796 kmem_free(wg, sizeof(*wg));
3797 wg_count_dec();
3798
3799 return 0;
3800 }
3801
3802 static struct wg_peer *
3803 wg_pick_peer_by_sa(struct wg_softc *wg, const struct sockaddr *sa,
3804 struct psref *psref)
3805 {
3806 struct radix_node_head *rnh;
3807 struct radix_node *rn;
3808 struct wg_peer *wgp = NULL;
3809 struct wg_allowedip *wga;
3810
3811 #ifdef WG_DEBUG_LOG
3812 char addrstr[128];
3813 sockaddr_format(sa, addrstr, sizeof(addrstr));
3814 WG_DLOG("sa=%s\n", addrstr);
3815 #endif
3816
3817 rw_enter(wg->wg_rwlock, RW_READER);
3818
3819 rnh = wg_rnh(wg, sa->sa_family);
3820 if (rnh == NULL)
3821 goto out;
3822
3823 rn = rnh->rnh_matchaddr(sa, rnh);
3824 if (rn == NULL || (rn->rn_flags & RNF_ROOT) != 0)
3825 goto out;
3826
3827 WG_TRACE("success");
3828
3829 wga = container_of(rn, struct wg_allowedip, wga_nodes[0]);
3830 wgp = wga->wga_peer;
3831 wg_get_peer(wgp, psref);
3832
3833 out:
3834 rw_exit(wg->wg_rwlock);
3835 return wgp;
3836 }
3837
3838 static void
3839 wg_fill_msg_data(struct wg_softc *wg, struct wg_peer *wgp,
3840 struct wg_session *wgs, struct wg_msg_data *wgmd)
3841 {
3842
3843 memset(wgmd, 0, sizeof(*wgmd));
3844 wgmd->wgmd_type = htole32(WG_MSG_TYPE_DATA);
3845 wgmd->wgmd_receiver = wgs->wgs_remote_index;
3846 /* [W] 5.4.6: msg.counter := Nm^send */
3847 /* [W] 5.4.6: Nm^send := Nm^send + 1 */
3848 wgmd->wgmd_counter = htole64(wg_session_inc_send_counter(wgs));
3849 WG_DLOG("counter=%"PRIu64"\n", le64toh(wgmd->wgmd_counter));
3850 }
3851
3852 static int
3853 wg_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst,
3854 const struct rtentry *rt)
3855 {
3856 struct wg_softc *wg = ifp->if_softc;
3857 struct wg_peer *wgp = NULL;
3858 struct wg_session *wgs = NULL;
3859 struct psref wgp_psref, wgs_psref;
3860 int bound;
3861 int error;
3862
3863 bound = curlwp_bind();
3864
3865 /* TODO make the nest limit configurable via sysctl */
3866 error = if_tunnel_check_nesting(ifp, m, 1);
3867 if (error) {
3868 WGLOG(LOG_ERR,
3869 "%s: tunneling loop detected and packet dropped\n",
3870 if_name(&wg->wg_if));
3871 goto out0;
3872 }
3873
3874 #ifdef ALTQ
3875 bool altq = atomic_load_relaxed(&ifp->if_snd.altq_flags)
3876 & ALTQF_ENABLED;
3877 if (altq)
3878 IFQ_CLASSIFY(&ifp->if_snd, m, dst->sa_family);
3879 #endif
3880
3881 bpf_mtap_af(ifp, dst->sa_family, m, BPF_D_OUT);
3882
3883 m->m_flags &= ~(M_BCAST|M_MCAST);
3884
3885 wgp = wg_pick_peer_by_sa(wg, dst, &wgp_psref);
3886 if (wgp == NULL) {
3887 WG_TRACE("peer not found");
3888 error = EHOSTUNREACH;
3889 goto out0;
3890 }
3891
3892 /* Clear checksum-offload flags. */
3893 m->m_pkthdr.csum_flags = 0;
3894 m->m_pkthdr.csum_data = 0;
3895
3896 /* Check whether there's an established session. */
3897 wgs = wg_get_stable_session(wgp, &wgs_psref);
3898 if (wgs == NULL) {
3899 /*
3900 * No established session. If we're the first to try
3901 * sending data, schedule a handshake and queue the
3902 * packet for when the handshake is done; otherwise
3903 * just drop the packet and let the ongoing handshake
3904 * attempt continue. We could queue more data packets
3905 * but it's not clear that's worthwhile.
3906 */
3907 if (atomic_cas_ptr(&wgp->wgp_pending, NULL, m) == NULL) {
3908 m = NULL; /* consume */
3909 WG_TRACE("queued first packet; init handshake");
3910 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE);
3911 } else {
3912 WG_TRACE("first packet already queued, dropping");
3913 }
3914 goto out1;
3915 }
3916
3917 /* There's an established session. Toss it in the queue. */
3918 #ifdef ALTQ
3919 if (altq) {
3920 mutex_enter(ifp->if_snd.ifq_lock);
3921 if (ALTQ_IS_ENABLED(&ifp->if_snd)) {
3922 M_SETCTX(m, wgp);
3923 ALTQ_ENQUEUE(&ifp->if_snd, m, error);
3924 m = NULL; /* consume */
3925 }
3926 mutex_exit(ifp->if_snd.ifq_lock);
3927 if (m == NULL) {
3928 wg_start(ifp);
3929 goto out2;
3930 }
3931 }
3932 #endif
3933 kpreempt_disable();
3934 const uint32_t h = curcpu()->ci_index; // pktq_rps_hash(m)
3935 M_SETCTX(m, wgp);
3936 if (__predict_false(!pktq_enqueue(wg_pktq, m, h))) {
3937 WGLOG(LOG_ERR, "%s: pktq full, dropping\n",
3938 if_name(&wg->wg_if));
3939 error = ENOBUFS;
3940 goto out3;
3941 }
3942 m = NULL; /* consumed */
3943 error = 0;
3944 out3: kpreempt_enable();
3945
3946 #ifdef ALTQ
3947 out2:
3948 #endif
3949 wg_put_session(wgs, &wgs_psref);
3950 out1: wg_put_peer(wgp, &wgp_psref);
3951 out0: m_freem(m);
3952 curlwp_bindx(bound);
3953 return error;
3954 }
3955
3956 static int
3957 wg_send_udp(struct wg_peer *wgp, struct mbuf *m)
3958 {
3959 struct psref psref;
3960 struct wg_sockaddr *wgsa;
3961 int error;
3962 struct socket *so;
3963
3964 wgsa = wg_get_endpoint_sa(wgp, &psref);
3965 so = wg_get_so_by_peer(wgp, wgsa);
3966 solock(so);
3967 if (wgsatosa(wgsa)->sa_family == AF_INET) {
3968 error = udp_send(so, m, wgsatosa(wgsa), NULL, curlwp);
3969 } else {
3970 #ifdef INET6
3971 error = udp6_output(sotoinpcb(so), m, wgsatosin6(wgsa),
3972 NULL, curlwp);
3973 #else
3974 m_freem(m);
3975 error = EPFNOSUPPORT;
3976 #endif
3977 }
3978 sounlock(so);
3979 wg_put_sa(wgp, wgsa, &psref);
3980
3981 return error;
3982 }
3983
3984 /* Inspired by pppoe_get_mbuf */
3985 static struct mbuf *
3986 wg_get_mbuf(size_t leading_len, size_t len)
3987 {
3988 struct mbuf *m;
3989
3990 KASSERT(leading_len <= MCLBYTES);
3991 KASSERT(len <= MCLBYTES - leading_len);
3992
3993 m = m_gethdr(M_DONTWAIT, MT_DATA);
3994 if (m == NULL)
3995 return NULL;
3996 if (len + leading_len > MHLEN) {
3997 m_clget(m, M_DONTWAIT);
3998 if ((m->m_flags & M_EXT) == 0) {
3999 m_free(m);
4000 return NULL;
4001 }
4002 }
4003 m->m_data += leading_len;
4004 m->m_pkthdr.len = m->m_len = len;
4005
4006 return m;
4007 }
4008
4009 static int
4010 wg_send_data_msg(struct wg_peer *wgp, struct wg_session *wgs,
4011 struct mbuf *m)
4012 {
4013 struct wg_softc *wg = wgp->wgp_sc;
4014 int error;
4015 size_t inner_len, padded_len, encrypted_len;
4016 char *padded_buf = NULL;
4017 size_t mlen;
4018 struct wg_msg_data *wgmd;
4019 bool free_padded_buf = false;
4020 struct mbuf *n;
4021 size_t leading_len = max_hdr + sizeof(struct udphdr);
4022
4023 mlen = m_length(m);
4024 inner_len = mlen;
4025 padded_len = roundup(mlen, 16);
4026 encrypted_len = padded_len + WG_AUTHTAG_LEN;
4027 WG_DLOG("inner=%lu, padded=%lu, encrypted_len=%lu\n",
4028 inner_len, padded_len, encrypted_len);
4029 if (mlen != 0) {
4030 bool success;
4031 success = m_ensure_contig(&m, padded_len);
4032 if (success) {
4033 padded_buf = mtod(m, char *);
4034 } else {
4035 padded_buf = kmem_intr_alloc(padded_len, KM_NOSLEEP);
4036 if (padded_buf == NULL) {
4037 error = ENOBUFS;
4038 goto end;
4039 }
4040 free_padded_buf = true;
4041 m_copydata(m, 0, mlen, padded_buf);
4042 }
4043 memset(padded_buf + mlen, 0, padded_len - inner_len);
4044 }
4045
4046 n = wg_get_mbuf(leading_len, sizeof(*wgmd) + encrypted_len);
4047 if (n == NULL) {
4048 error = ENOBUFS;
4049 goto end;
4050 }
4051 KASSERT(n->m_len >= sizeof(*wgmd));
4052 wgmd = mtod(n, struct wg_msg_data *);
4053 wg_fill_msg_data(wg, wgp, wgs, wgmd);
4054 /* [W] 5.4.6: AEAD(Tm^send, Nm^send, P, e) */
4055 wg_algo_aead_enc((char *)wgmd + sizeof(*wgmd), encrypted_len,
4056 wgs->wgs_tkey_send, le64toh(wgmd->wgmd_counter),
4057 padded_buf, padded_len,
4058 NULL, 0);
4059
4060 error = wg->wg_ops->send_data_msg(wgp, n);
4061 if (error == 0) {
4062 struct ifnet *ifp = &wg->wg_if;
4063 if_statadd(ifp, if_obytes, mlen);
4064 if_statinc(ifp, if_opackets);
4065 if (wgs->wgs_is_initiator &&
4066 wgs->wgs_time_last_data_sent == 0) {
4067 /*
4068 * [W] 6.2 Transport Message Limits
4069 * "if a peer is the initiator of a current secure
4070 * session, WireGuard will send a handshake initiation
4071 * message to begin a new secure session if, after
4072 * transmitting a transport data message, the current
4073 * secure session is REKEY-AFTER-TIME seconds old,"
4074 */
4075 wg_schedule_rekey_timer(wgp);
4076 }
4077 wgs->wgs_time_last_data_sent = time_uptime;
4078 if (wg_session_get_send_counter(wgs) >=
4079 wg_rekey_after_messages) {
4080 /*
4081 * [W] 6.2 Transport Message Limits
4082 * "WireGuard will try to create a new session, by
4083 * sending a handshake initiation message (section
4084 * 5.4.2), after it has sent REKEY-AFTER-MESSAGES
4085 * transport data messages..."
4086 */
4087 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE);
4088 }
4089 }
4090 end:
4091 m_freem(m);
4092 if (free_padded_buf)
4093 kmem_intr_free(padded_buf, padded_len);
4094 return error;
4095 }
4096
4097 static void
4098 wg_input(struct ifnet *ifp, struct mbuf *m, const int af)
4099 {
4100 pktqueue_t *pktq;
4101 size_t pktlen;
4102
4103 KASSERT(af == AF_INET || af == AF_INET6);
4104
4105 WG_TRACE("");
4106
4107 m_set_rcvif(m, ifp);
4108 pktlen = m->m_pkthdr.len;
4109
4110 bpf_mtap_af(ifp, af, m, BPF_D_IN);
4111
4112 switch (af) {
4113 case AF_INET:
4114 pktq = ip_pktq;
4115 break;
4116 #ifdef INET6
4117 case AF_INET6:
4118 pktq = ip6_pktq;
4119 break;
4120 #endif
4121 default:
4122 panic("invalid af=%d", af);
4123 }
4124
4125 kpreempt_disable();
4126 const u_int h = curcpu()->ci_index;
4127 if (__predict_true(pktq_enqueue(pktq, m, h))) {
4128 if_statadd(ifp, if_ibytes, pktlen);
4129 if_statinc(ifp, if_ipackets);
4130 } else {
4131 m_freem(m);
4132 }
4133 kpreempt_enable();
4134 }
4135
4136 static void
4137 wg_calc_pubkey(uint8_t pubkey[WG_STATIC_KEY_LEN],
4138 const uint8_t privkey[WG_STATIC_KEY_LEN])
4139 {
4140
4141 crypto_scalarmult_base(pubkey, privkey);
4142 }
4143
4144 static int
4145 wg_rtable_add_route(struct wg_softc *wg, struct wg_allowedip *wga)
4146 {
4147 struct radix_node_head *rnh;
4148 struct radix_node *rn;
4149 int error = 0;
4150
4151 rw_enter(wg->wg_rwlock, RW_WRITER);
4152 rnh = wg_rnh(wg, wga->wga_family);
4153 KASSERT(rnh != NULL);
4154 rn = rnh->rnh_addaddr(&wga->wga_sa_addr, &wga->wga_sa_mask, rnh,
4155 wga->wga_nodes);
4156 rw_exit(wg->wg_rwlock);
4157
4158 if (rn == NULL)
4159 error = EEXIST;
4160
4161 return error;
4162 }
4163
4164 static int
4165 wg_handle_prop_peer(struct wg_softc *wg, prop_dictionary_t peer,
4166 struct wg_peer **wgpp)
4167 {
4168 int error = 0;
4169 const void *pubkey;
4170 size_t pubkey_len;
4171 const void *psk;
4172 size_t psk_len;
4173 const char *name = NULL;
4174
4175 if (prop_dictionary_get_string(peer, "name", &name)) {
4176 if (strlen(name) > WG_PEER_NAME_MAXLEN) {
4177 error = EINVAL;
4178 goto out;
4179 }
4180 }
4181
4182 if (!prop_dictionary_get_data(peer, "public_key",
4183 &pubkey, &pubkey_len)) {
4184 error = EINVAL;
4185 goto out;
4186 }
4187 #ifdef WG_DEBUG_DUMP
4188 if (wg_debug & WG_DEBUG_FLAGS_DUMP) {
4189 char *hex = gethexdump(pubkey, pubkey_len);
4190 log(LOG_DEBUG, "pubkey=%p, pubkey_len=%lu\n%s\n",
4191 pubkey, pubkey_len, hex);
4192 puthexdump(hex, pubkey, pubkey_len);
4193 }
4194 #endif
4195
4196 struct wg_peer *wgp = wg_alloc_peer(wg);
4197 memcpy(wgp->wgp_pubkey, pubkey, sizeof(wgp->wgp_pubkey));
4198 if (name != NULL)
4199 strncpy(wgp->wgp_name, name, sizeof(wgp->wgp_name));
4200
4201 if (prop_dictionary_get_data(peer, "preshared_key", &psk, &psk_len)) {
4202 if (psk_len != sizeof(wgp->wgp_psk)) {
4203 error = EINVAL;
4204 goto out;
4205 }
4206 memcpy(wgp->wgp_psk, psk, sizeof(wgp->wgp_psk));
4207 }
4208
4209 const void *addr;
4210 size_t addr_len;
4211 struct wg_sockaddr *wgsa = wgp->wgp_endpoint;
4212
4213 if (!prop_dictionary_get_data(peer, "endpoint", &addr, &addr_len))
4214 goto skip_endpoint;
4215 if (addr_len < sizeof(*wgsatosa(wgsa)) ||
4216 addr_len > sizeof(*wgsatoss(wgsa))) {
4217 error = EINVAL;
4218 goto out;
4219 }
4220 memcpy(wgsatoss(wgsa), addr, addr_len);
4221 switch (wgsa_family(wgsa)) {
4222 case AF_INET:
4223 #ifdef INET6
4224 case AF_INET6:
4225 #endif
4226 break;
4227 default:
4228 error = EPFNOSUPPORT;
4229 goto out;
4230 }
4231 if (addr_len != sockaddr_getsize_by_family(wgsa_family(wgsa))) {
4232 error = EINVAL;
4233 goto out;
4234 }
4235 {
4236 char addrstr[128];
4237 sockaddr_format(wgsatosa(wgsa), addrstr, sizeof(addrstr));
4238 WG_DLOG("addr=%s\n", addrstr);
4239 }
4240 wgp->wgp_endpoint_available = true;
4241
4242 prop_array_t allowedips;
4243 skip_endpoint:
4244 allowedips = prop_dictionary_get(peer, "allowedips");
4245 if (allowedips == NULL)
4246 goto skip;
4247
4248 prop_object_iterator_t _it = prop_array_iterator(allowedips);
4249 prop_dictionary_t prop_allowedip;
4250 int j = 0;
4251 while ((prop_allowedip = prop_object_iterator_next(_it)) != NULL) {
4252 struct wg_allowedip *wga = &wgp->wgp_allowedips[j];
4253
4254 if (!prop_dictionary_get_int(prop_allowedip, "family",
4255 &wga->wga_family))
4256 continue;
4257 if (!prop_dictionary_get_data(prop_allowedip, "ip",
4258 &addr, &addr_len))
4259 continue;
4260 if (!prop_dictionary_get_uint8(prop_allowedip, "cidr",
4261 &wga->wga_cidr))
4262 continue;
4263
4264 switch (wga->wga_family) {
4265 case AF_INET: {
4266 struct sockaddr_in sin;
4267 char addrstr[128];
4268 struct in_addr mask;
4269 struct sockaddr_in sin_mask;
4270
4271 if (addr_len != sizeof(struct in_addr))
4272 return EINVAL;
4273 memcpy(&wga->wga_addr4, addr, addr_len);
4274
4275 sockaddr_in_init(&sin, (const struct in_addr *)addr,
4276 0);
4277 sockaddr_copy(&wga->wga_sa_addr,
4278 sizeof(sin), sintosa(&sin));
4279
4280 sockaddr_format(sintosa(&sin),
4281 addrstr, sizeof(addrstr));
4282 WG_DLOG("addr=%s/%d\n", addrstr, wga->wga_cidr);
4283
4284 in_len2mask(&mask, wga->wga_cidr);
4285 sockaddr_in_init(&sin_mask, &mask, 0);
4286 sockaddr_copy(&wga->wga_sa_mask,
4287 sizeof(sin_mask), sintosa(&sin_mask));
4288
4289 break;
4290 }
4291 #ifdef INET6
4292 case AF_INET6: {
4293 struct sockaddr_in6 sin6;
4294 char addrstr[128];
4295 struct in6_addr mask;
4296 struct sockaddr_in6 sin6_mask;
4297
4298 if (addr_len != sizeof(struct in6_addr))
4299 return EINVAL;
4300 memcpy(&wga->wga_addr6, addr, addr_len);
4301
4302 sockaddr_in6_init(&sin6, (const struct in6_addr *)addr,
4303 0, 0, 0);
4304 sockaddr_copy(&wga->wga_sa_addr,
4305 sizeof(sin6), sin6tosa(&sin6));
4306
4307 sockaddr_format(sin6tosa(&sin6),
4308 addrstr, sizeof(addrstr));
4309 WG_DLOG("addr=%s/%d\n", addrstr, wga->wga_cidr);
4310
4311 in6_prefixlen2mask(&mask, wga->wga_cidr);
4312 sockaddr_in6_init(&sin6_mask, &mask, 0, 0, 0);
4313 sockaddr_copy(&wga->wga_sa_mask,
4314 sizeof(sin6_mask), sin6tosa(&sin6_mask));
4315
4316 break;
4317 }
4318 #endif
4319 default:
4320 error = EINVAL;
4321 goto out;
4322 }
4323 wga->wga_peer = wgp;
4324
4325 error = wg_rtable_add_route(wg, wga);
4326 if (error != 0)
4327 goto out;
4328
4329 j++;
4330 }
4331 wgp->wgp_n_allowedips = j;
4332 skip:
4333 *wgpp = wgp;
4334 out:
4335 return error;
4336 }
4337
4338 static int
4339 wg_alloc_prop_buf(char **_buf, struct ifdrv *ifd)
4340 {
4341 int error;
4342 char *buf;
4343
4344 WG_DLOG("buf=%p, len=%lu\n", ifd->ifd_data, ifd->ifd_len);
4345 if (ifd->ifd_len >= WG_MAX_PROPLEN)
4346 return E2BIG;
4347 buf = kmem_alloc(ifd->ifd_len + 1, KM_SLEEP);
4348 error = copyin(ifd->ifd_data, buf, ifd->ifd_len);
4349 if (error != 0)
4350 return error;
4351 buf[ifd->ifd_len] = '\0';
4352 #ifdef WG_DEBUG_DUMP
4353 if (wg_debug & WG_DEBUG_FLAGS_DUMP) {
4354 log(LOG_DEBUG, "%.*s\n", (int)MIN(INT_MAX, ifd->ifd_len),
4355 (const char *)buf);
4356 }
4357 #endif
4358 *_buf = buf;
4359 return 0;
4360 }
4361
4362 static int
4363 wg_ioctl_set_private_key(struct wg_softc *wg, struct ifdrv *ifd)
4364 {
4365 int error;
4366 prop_dictionary_t prop_dict;
4367 char *buf = NULL;
4368 const void *privkey;
4369 size_t privkey_len;
4370
4371 error = wg_alloc_prop_buf(&buf, ifd);
4372 if (error != 0)
4373 return error;
4374 error = EINVAL;
4375 prop_dict = prop_dictionary_internalize(buf);
4376 if (prop_dict == NULL)
4377 goto out;
4378 if (!prop_dictionary_get_data(prop_dict, "private_key",
4379 &privkey, &privkey_len))
4380 goto out;
4381 #ifdef WG_DEBUG_DUMP
4382 if (wg_debug & WG_DEBUG_FLAGS_DUMP) {
4383 char *hex = gethexdump(privkey, privkey_len);
4384 log(LOG_DEBUG, "privkey=%p, privkey_len=%lu\n%s\n",
4385 privkey, privkey_len, hex);
4386 puthexdump(hex, privkey, privkey_len);
4387 }
4388 #endif
4389 if (privkey_len != WG_STATIC_KEY_LEN)
4390 goto out;
4391 memcpy(wg->wg_privkey, privkey, WG_STATIC_KEY_LEN);
4392 wg_calc_pubkey(wg->wg_pubkey, wg->wg_privkey);
4393 error = 0;
4394
4395 out:
4396 kmem_free(buf, ifd->ifd_len + 1);
4397 return error;
4398 }
4399
4400 static int
4401 wg_ioctl_set_listen_port(struct wg_softc *wg, struct ifdrv *ifd)
4402 {
4403 int error;
4404 prop_dictionary_t prop_dict;
4405 char *buf = NULL;
4406 uint16_t port;
4407
4408 error = wg_alloc_prop_buf(&buf, ifd);
4409 if (error != 0)
4410 return error;
4411 error = EINVAL;
4412 prop_dict = prop_dictionary_internalize(buf);
4413 if (prop_dict == NULL)
4414 goto out;
4415 if (!prop_dictionary_get_uint16(prop_dict, "listen_port", &port))
4416 goto out;
4417
4418 error = wg->wg_ops->bind_port(wg, (uint16_t)port);
4419
4420 out:
4421 kmem_free(buf, ifd->ifd_len + 1);
4422 return error;
4423 }
4424
4425 static int
4426 wg_ioctl_add_peer(struct wg_softc *wg, struct ifdrv *ifd)
4427 {
4428 int error;
4429 prop_dictionary_t prop_dict;
4430 char *buf = NULL;
4431 struct wg_peer *wgp = NULL, *wgp0 __diagused;
4432
4433 error = wg_alloc_prop_buf(&buf, ifd);
4434 if (error != 0)
4435 return error;
4436 error = EINVAL;
4437 prop_dict = prop_dictionary_internalize(buf);
4438 if (prop_dict == NULL)
4439 goto out;
4440
4441 error = wg_handle_prop_peer(wg, prop_dict, &wgp);
4442 if (error != 0)
4443 goto out;
4444
4445 mutex_enter(wg->wg_lock);
4446 if (thmap_get(wg->wg_peers_bypubkey, wgp->wgp_pubkey,
4447 sizeof(wgp->wgp_pubkey)) != NULL ||
4448 (wgp->wgp_name[0] &&
4449 thmap_get(wg->wg_peers_byname, wgp->wgp_name,
4450 strlen(wgp->wgp_name)) != NULL)) {
4451 mutex_exit(wg->wg_lock);
4452 wg_destroy_peer(wgp);
4453 error = EEXIST;
4454 goto out;
4455 }
4456 wgp0 = thmap_put(wg->wg_peers_bypubkey, wgp->wgp_pubkey,
4457 sizeof(wgp->wgp_pubkey), wgp);
4458 KASSERT(wgp0 == wgp);
4459 if (wgp->wgp_name[0]) {
4460 wgp0 = thmap_put(wg->wg_peers_byname, wgp->wgp_name,
4461 strlen(wgp->wgp_name), wgp);
4462 KASSERT(wgp0 == wgp);
4463 }
4464 WG_PEER_WRITER_INSERT_HEAD(wgp, wg);
4465 wg->wg_npeers++;
4466 mutex_exit(wg->wg_lock);
4467
4468 if_link_state_change(&wg->wg_if, LINK_STATE_UP);
4469
4470 out:
4471 kmem_free(buf, ifd->ifd_len + 1);
4472 return error;
4473 }
4474
4475 static int
4476 wg_ioctl_delete_peer(struct wg_softc *wg, struct ifdrv *ifd)
4477 {
4478 int error;
4479 prop_dictionary_t prop_dict;
4480 char *buf = NULL;
4481 const char *name;
4482
4483 error = wg_alloc_prop_buf(&buf, ifd);
4484 if (error != 0)
4485 return error;
4486 error = EINVAL;
4487 prop_dict = prop_dictionary_internalize(buf);
4488 if (prop_dict == NULL)
4489 goto out;
4490
4491 if (!prop_dictionary_get_string(prop_dict, "name", &name))
4492 goto out;
4493 if (strlen(name) > WG_PEER_NAME_MAXLEN)
4494 goto out;
4495
4496 error = wg_destroy_peer_name(wg, name);
4497 out:
4498 kmem_free(buf, ifd->ifd_len + 1);
4499 return error;
4500 }
4501
4502 static bool
4503 wg_is_authorized(struct wg_softc *wg, u_long cmd)
4504 {
4505 int au = cmd == SIOCGDRVSPEC ?
4506 KAUTH_REQ_NETWORK_INTERFACE_WG_GETPRIV :
4507 KAUTH_REQ_NETWORK_INTERFACE_WG_SETPRIV;
4508 return kauth_authorize_network(kauth_cred_get(),
4509 KAUTH_NETWORK_INTERFACE_WG, au, &wg->wg_if,
4510 (void *)cmd, NULL) == 0;
4511 }
4512
4513 static int
4514 wg_ioctl_get(struct wg_softc *wg, struct ifdrv *ifd)
4515 {
4516 int error = ENOMEM;
4517 prop_dictionary_t prop_dict;
4518 prop_array_t peers = NULL;
4519 char *buf;
4520 struct wg_peer *wgp;
4521 int s, i;
4522
4523 prop_dict = prop_dictionary_create();
4524 if (prop_dict == NULL)
4525 goto error;
4526
4527 if (wg_is_authorized(wg, SIOCGDRVSPEC)) {
4528 if (!prop_dictionary_set_data(prop_dict, "private_key",
4529 wg->wg_privkey, WG_STATIC_KEY_LEN))
4530 goto error;
4531 }
4532
4533 if (wg->wg_listen_port != 0) {
4534 if (!prop_dictionary_set_uint16(prop_dict, "listen_port",
4535 wg->wg_listen_port))
4536 goto error;
4537 }
4538
4539 if (wg->wg_npeers == 0)
4540 goto skip_peers;
4541
4542 peers = prop_array_create();
4543 if (peers == NULL)
4544 goto error;
4545
4546 s = pserialize_read_enter();
4547 i = 0;
4548 WG_PEER_READER_FOREACH(wgp, wg) {
4549 struct wg_sockaddr *wgsa;
4550 struct psref wgp_psref, wgsa_psref;
4551 prop_dictionary_t prop_peer;
4552
4553 wg_get_peer(wgp, &wgp_psref);
4554 pserialize_read_exit(s);
4555
4556 prop_peer = prop_dictionary_create();
4557 if (prop_peer == NULL)
4558 goto next;
4559
4560 if (strlen(wgp->wgp_name) > 0) {
4561 if (!prop_dictionary_set_string(prop_peer, "name",
4562 wgp->wgp_name))
4563 goto next;
4564 }
4565
4566 if (!prop_dictionary_set_data(prop_peer, "public_key",
4567 wgp->wgp_pubkey, sizeof(wgp->wgp_pubkey)))
4568 goto next;
4569
4570 uint8_t psk_zero[WG_PRESHARED_KEY_LEN] = {0};
4571 if (!consttime_memequal(wgp->wgp_psk, psk_zero,
4572 sizeof(wgp->wgp_psk))) {
4573 if (wg_is_authorized(wg, SIOCGDRVSPEC)) {
4574 if (!prop_dictionary_set_data(prop_peer,
4575 "preshared_key",
4576 wgp->wgp_psk, sizeof(wgp->wgp_psk)))
4577 goto next;
4578 }
4579 }
4580
4581 wgsa = wg_get_endpoint_sa(wgp, &wgsa_psref);
4582 CTASSERT(AF_UNSPEC == 0);
4583 if (wgsa_family(wgsa) != 0 /*AF_UNSPEC*/ &&
4584 !prop_dictionary_set_data(prop_peer, "endpoint",
4585 wgsatoss(wgsa),
4586 sockaddr_getsize_by_family(wgsa_family(wgsa)))) {
4587 wg_put_sa(wgp, wgsa, &wgsa_psref);
4588 goto next;
4589 }
4590 wg_put_sa(wgp, wgsa, &wgsa_psref);
4591
4592 const struct timespec *t = &wgp->wgp_last_handshake_time;
4593
4594 if (!prop_dictionary_set_uint64(prop_peer,
4595 "last_handshake_time_sec", (uint64_t)t->tv_sec))
4596 goto next;
4597 if (!prop_dictionary_set_uint32(prop_peer,
4598 "last_handshake_time_nsec", (uint32_t)t->tv_nsec))
4599 goto next;
4600
4601 if (wgp->wgp_n_allowedips == 0)
4602 goto skip_allowedips;
4603
4604 prop_array_t allowedips = prop_array_create();
4605 if (allowedips == NULL)
4606 goto next;
4607 for (int j = 0; j < wgp->wgp_n_allowedips; j++) {
4608 struct wg_allowedip *wga = &wgp->wgp_allowedips[j];
4609 prop_dictionary_t prop_allowedip;
4610
4611 prop_allowedip = prop_dictionary_create();
4612 if (prop_allowedip == NULL)
4613 break;
4614
4615 if (!prop_dictionary_set_int(prop_allowedip, "family",
4616 wga->wga_family))
4617 goto _next;
4618 if (!prop_dictionary_set_uint8(prop_allowedip, "cidr",
4619 wga->wga_cidr))
4620 goto _next;
4621
4622 switch (wga->wga_family) {
4623 case AF_INET:
4624 if (!prop_dictionary_set_data(prop_allowedip,
4625 "ip", &wga->wga_addr4,
4626 sizeof(wga->wga_addr4)))
4627 goto _next;
4628 break;
4629 #ifdef INET6
4630 case AF_INET6:
4631 if (!prop_dictionary_set_data(prop_allowedip,
4632 "ip", &wga->wga_addr6,
4633 sizeof(wga->wga_addr6)))
4634 goto _next;
4635 break;
4636 #endif
4637 default:
4638 break;
4639 }
4640 prop_array_set(allowedips, j, prop_allowedip);
4641 _next:
4642 prop_object_release(prop_allowedip);
4643 }
4644 prop_dictionary_set(prop_peer, "allowedips", allowedips);
4645 prop_object_release(allowedips);
4646
4647 skip_allowedips:
4648
4649 prop_array_set(peers, i, prop_peer);
4650 next:
4651 if (prop_peer)
4652 prop_object_release(prop_peer);
4653 i++;
4654
4655 s = pserialize_read_enter();
4656 wg_put_peer(wgp, &wgp_psref);
4657 }
4658 pserialize_read_exit(s);
4659
4660 prop_dictionary_set(prop_dict, "peers", peers);
4661 prop_object_release(peers);
4662 peers = NULL;
4663
4664 skip_peers:
4665 buf = prop_dictionary_externalize(prop_dict);
4666 if (buf == NULL)
4667 goto error;
4668 if (ifd->ifd_len < (strlen(buf) + 1)) {
4669 error = EINVAL;
4670 goto error;
4671 }
4672 error = copyout(buf, ifd->ifd_data, strlen(buf) + 1);
4673
4674 free(buf, 0);
4675 error:
4676 if (peers != NULL)
4677 prop_object_release(peers);
4678 if (prop_dict != NULL)
4679 prop_object_release(prop_dict);
4680
4681 return error;
4682 }
4683
4684 static int
4685 wg_ioctl(struct ifnet *ifp, u_long cmd, void *data)
4686 {
4687 struct wg_softc *wg = ifp->if_softc;
4688 struct ifreq *ifr = data;
4689 struct ifaddr *ifa = data;
4690 struct ifdrv *ifd = data;
4691 int error = 0;
4692
4693 switch (cmd) {
4694 case SIOCINITIFADDR:
4695 if (ifa->ifa_addr->sa_family != AF_LINK &&
4696 (ifp->if_flags & (IFF_UP | IFF_RUNNING)) !=
4697 (IFF_UP | IFF_RUNNING)) {
4698 ifp->if_flags |= IFF_UP;
4699 error = if_init(ifp);
4700 }
4701 return error;
4702 case SIOCADDMULTI:
4703 case SIOCDELMULTI:
4704 switch (ifr->ifr_addr.sa_family) {
4705 case AF_INET: /* IP supports Multicast */
4706 break;
4707 #ifdef INET6
4708 case AF_INET6: /* IP6 supports Multicast */
4709 break;
4710 #endif
4711 default: /* Other protocols doesn't support Multicast */
4712 error = EAFNOSUPPORT;
4713 break;
4714 }
4715 return error;
4716 case SIOCSDRVSPEC:
4717 if (!wg_is_authorized(wg, cmd)) {
4718 return EPERM;
4719 }
4720 switch (ifd->ifd_cmd) {
4721 case WG_IOCTL_SET_PRIVATE_KEY:
4722 error = wg_ioctl_set_private_key(wg, ifd);
4723 break;
4724 case WG_IOCTL_SET_LISTEN_PORT:
4725 error = wg_ioctl_set_listen_port(wg, ifd);
4726 break;
4727 case WG_IOCTL_ADD_PEER:
4728 error = wg_ioctl_add_peer(wg, ifd);
4729 break;
4730 case WG_IOCTL_DELETE_PEER:
4731 error = wg_ioctl_delete_peer(wg, ifd);
4732 break;
4733 default:
4734 error = EINVAL;
4735 break;
4736 }
4737 return error;
4738 case SIOCGDRVSPEC:
4739 return wg_ioctl_get(wg, ifd);
4740 case SIOCSIFFLAGS:
4741 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
4742 break;
4743 switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) {
4744 case IFF_RUNNING:
4745 /*
4746 * If interface is marked down and it is running,
4747 * then stop and disable it.
4748 */
4749 if_stop(ifp, 1);
4750 break;
4751 case IFF_UP:
4752 /*
4753 * If interface is marked up and it is stopped, then
4754 * start it.
4755 */
4756 error = if_init(ifp);
4757 break;
4758 default:
4759 break;
4760 }
4761 return error;
4762 #ifdef WG_RUMPKERNEL
4763 case SIOCSLINKSTR:
4764 error = wg_ioctl_linkstr(wg, ifd);
4765 if (error == 0)
4766 wg->wg_ops = &wg_ops_rumpuser;
4767 return error;
4768 #endif
4769 default:
4770 break;
4771 }
4772
4773 error = ifioctl_common(ifp, cmd, data);
4774
4775 #ifdef WG_RUMPKERNEL
4776 if (!wg_user_mode(wg))
4777 return error;
4778
4779 /* Do the same to the corresponding tun device on the host */
4780 /*
4781 * XXX Actually the command has not been handled yet. It
4782 * will be handled via pr_ioctl form doifioctl later.
4783 */
4784 switch (cmd) {
4785 case SIOCAIFADDR:
4786 case SIOCDIFADDR: {
4787 struct in_aliasreq _ifra = *(const struct in_aliasreq *)data;
4788 struct in_aliasreq *ifra = &_ifra;
4789 KASSERT(error == ENOTTY);
4790 strncpy(ifra->ifra_name, rumpuser_wg_get_tunname(wg->wg_user),
4791 IFNAMSIZ);
4792 error = rumpuser_wg_ioctl(wg->wg_user, cmd, ifra, AF_INET);
4793 if (error == 0)
4794 error = ENOTTY;
4795 break;
4796 }
4797 #ifdef INET6
4798 case SIOCAIFADDR_IN6:
4799 case SIOCDIFADDR_IN6: {
4800 struct in6_aliasreq _ifra = *(const struct in6_aliasreq *)data;
4801 struct in6_aliasreq *ifra = &_ifra;
4802 KASSERT(error == ENOTTY);
4803 strncpy(ifra->ifra_name, rumpuser_wg_get_tunname(wg->wg_user),
4804 IFNAMSIZ);
4805 error = rumpuser_wg_ioctl(wg->wg_user, cmd, ifra, AF_INET6);
4806 if (error == 0)
4807 error = ENOTTY;
4808 break;
4809 }
4810 #endif
4811 }
4812 #endif /* WG_RUMPKERNEL */
4813
4814 return error;
4815 }
4816
4817 static int
4818 wg_init(struct ifnet *ifp)
4819 {
4820
4821 ifp->if_flags |= IFF_RUNNING;
4822
4823 /* TODO flush pending packets. */
4824 return 0;
4825 }
4826
4827 #ifdef ALTQ
4828 static void
4829 wg_start(struct ifnet *ifp)
4830 {
4831 struct mbuf *m;
4832
4833 for (;;) {
4834 IFQ_DEQUEUE(&ifp->if_snd, m);
4835 if (m == NULL)
4836 break;
4837
4838 kpreempt_disable();
4839 const uint32_t h = curcpu()->ci_index; // pktq_rps_hash(m)
4840 if (__predict_false(!pktq_enqueue(wg_pktq, m, h))) {
4841 WGLOG(LOG_ERR, "%s: pktq full, dropping\n",
4842 if_name(ifp));
4843 m_freem(m);
4844 }
4845 kpreempt_enable();
4846 }
4847 }
4848 #endif
4849
4850 static void
4851 wg_stop(struct ifnet *ifp, int disable)
4852 {
4853
4854 KASSERT((ifp->if_flags & IFF_RUNNING) != 0);
4855 ifp->if_flags &= ~IFF_RUNNING;
4856
4857 /* Need to do something? */
4858 }
4859
4860 #ifdef WG_DEBUG_PARAMS
4861 SYSCTL_SETUP(sysctl_net_wg_setup, "sysctl net.wg setup")
4862 {
4863 const struct sysctlnode *node = NULL;
4864
4865 sysctl_createv(clog, 0, NULL, &node,
4866 CTLFLAG_PERMANENT,
4867 CTLTYPE_NODE, "wg",
4868 SYSCTL_DESCR("wg(4)"),
4869 NULL, 0, NULL, 0,
4870 CTL_NET, CTL_CREATE, CTL_EOL);
4871 sysctl_createv(clog, 0, &node, NULL,
4872 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4873 CTLTYPE_QUAD, "rekey_after_messages",
4874 SYSCTL_DESCR("session liftime by messages"),
4875 NULL, 0, &wg_rekey_after_messages, 0, CTL_CREATE, CTL_EOL);
4876 sysctl_createv(clog, 0, &node, NULL,
4877 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4878 CTLTYPE_INT, "rekey_after_time",
4879 SYSCTL_DESCR("session liftime"),
4880 NULL, 0, &wg_rekey_after_time, 0, CTL_CREATE, CTL_EOL);
4881 sysctl_createv(clog, 0, &node, NULL,
4882 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4883 CTLTYPE_INT, "rekey_timeout",
4884 SYSCTL_DESCR("session handshake retry time"),
4885 NULL, 0, &wg_rekey_timeout, 0, CTL_CREATE, CTL_EOL);
4886 sysctl_createv(clog, 0, &node, NULL,
4887 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4888 CTLTYPE_INT, "rekey_attempt_time",
4889 SYSCTL_DESCR("session handshake timeout"),
4890 NULL, 0, &wg_rekey_attempt_time, 0, CTL_CREATE, CTL_EOL);
4891 sysctl_createv(clog, 0, &node, NULL,
4892 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4893 CTLTYPE_INT, "keepalive_timeout",
4894 SYSCTL_DESCR("keepalive timeout"),
4895 NULL, 0, &wg_keepalive_timeout, 0, CTL_CREATE, CTL_EOL);
4896 sysctl_createv(clog, 0, &node, NULL,
4897 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4898 CTLTYPE_BOOL, "force_underload",
4899 SYSCTL_DESCR("force to detemine under load"),
4900 NULL, 0, &wg_force_underload, 0, CTL_CREATE, CTL_EOL);
4901 sysctl_createv(clog, 0, &node, NULL,
4902 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4903 CTLTYPE_INT, "debug",
4904 SYSCTL_DESCR("set debug flags 1=debug 2=trace 4=dump"),
4905 NULL, 0, &wg_debug, 0, CTL_CREATE, CTL_EOL);
4906 }
4907 #endif
4908
4909 #ifdef WG_RUMPKERNEL
4910 static bool
4911 wg_user_mode(struct wg_softc *wg)
4912 {
4913
4914 return wg->wg_user != NULL;
4915 }
4916
4917 static int
4918 wg_ioctl_linkstr(struct wg_softc *wg, struct ifdrv *ifd)
4919 {
4920 struct ifnet *ifp = &wg->wg_if;
4921 int error;
4922
4923 if (ifp->if_flags & IFF_UP)
4924 return EBUSY;
4925
4926 if (ifd->ifd_cmd == IFLINKSTR_UNSET) {
4927 /* XXX do nothing */
4928 return 0;
4929 } else if (ifd->ifd_cmd != 0) {
4930 return EINVAL;
4931 } else if (wg->wg_user != NULL) {
4932 return EBUSY;
4933 }
4934
4935 /* Assume \0 included */
4936 if (ifd->ifd_len > IFNAMSIZ) {
4937 return E2BIG;
4938 } else if (ifd->ifd_len < 1) {
4939 return EINVAL;
4940 }
4941
4942 char tun_name[IFNAMSIZ];
4943 error = copyinstr(ifd->ifd_data, tun_name, ifd->ifd_len, NULL);
4944 if (error != 0)
4945 return error;
4946
4947 if (strncmp(tun_name, "tun", 3) != 0)
4948 return EINVAL;
4949
4950 error = rumpuser_wg_create(tun_name, wg, &wg->wg_user);
4951
4952 return error;
4953 }
4954
4955 static int
4956 wg_send_user(struct wg_peer *wgp, struct mbuf *m)
4957 {
4958 int error;
4959 struct psref psref;
4960 struct wg_sockaddr *wgsa;
4961 struct wg_softc *wg = wgp->wgp_sc;
4962 struct iovec iov[1];
4963
4964 wgsa = wg_get_endpoint_sa(wgp, &psref);
4965
4966 iov[0].iov_base = mtod(m, void *);
4967 iov[0].iov_len = m->m_len;
4968
4969 /* Send messages to a peer via an ordinary socket. */
4970 error = rumpuser_wg_send_peer(wg->wg_user, wgsatosa(wgsa), iov, 1);
4971
4972 wg_put_sa(wgp, wgsa, &psref);
4973
4974 m_freem(m);
4975
4976 return error;
4977 }
4978
4979 static void
4980 wg_input_user(struct ifnet *ifp, struct mbuf *m, const int af)
4981 {
4982 struct wg_softc *wg = ifp->if_softc;
4983 struct iovec iov[2];
4984 struct sockaddr_storage ss;
4985
4986 KASSERT(af == AF_INET || af == AF_INET6);
4987
4988 WG_TRACE("");
4989
4990 if (af == AF_INET) {
4991 struct sockaddr_in *sin = (struct sockaddr_in *)&ss;
4992 struct ip *ip;
4993
4994 KASSERT(m->m_len >= sizeof(struct ip));
4995 ip = mtod(m, struct ip *);
4996 sockaddr_in_init(sin, &ip->ip_dst, 0);
4997 } else {
4998 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&ss;
4999 struct ip6_hdr *ip6;
5000
5001 KASSERT(m->m_len >= sizeof(struct ip6_hdr));
5002 ip6 = mtod(m, struct ip6_hdr *);
5003 sockaddr_in6_init(sin6, &ip6->ip6_dst, 0, 0, 0);
5004 }
5005
5006 iov[0].iov_base = &ss;
5007 iov[0].iov_len = ss.ss_len;
5008 iov[1].iov_base = mtod(m, void *);
5009 iov[1].iov_len = m->m_len;
5010
5011 WG_DUMP_BUF(iov[1].iov_base, iov[1].iov_len);
5012
5013 /* Send decrypted packets to users via a tun. */
5014 rumpuser_wg_send_user(wg->wg_user, iov, 2);
5015
5016 m_freem(m);
5017 }
5018
5019 static int
5020 wg_bind_port_user(struct wg_softc *wg, const uint16_t port)
5021 {
5022 int error;
5023 uint16_t old_port = wg->wg_listen_port;
5024
5025 if (port != 0 && old_port == port)
5026 return 0;
5027
5028 error = rumpuser_wg_sock_bind(wg->wg_user, port);
5029 if (error == 0)
5030 wg->wg_listen_port = port;
5031 return error;
5032 }
5033
5034 /*
5035 * Receive user packets.
5036 */
5037 void
5038 rumpkern_wg_recv_user(struct wg_softc *wg, struct iovec *iov, size_t iovlen)
5039 {
5040 struct ifnet *ifp = &wg->wg_if;
5041 struct mbuf *m;
5042 const struct sockaddr *dst;
5043
5044 WG_TRACE("");
5045
5046 dst = iov[0].iov_base;
5047
5048 m = m_gethdr(M_DONTWAIT, MT_DATA);
5049 if (m == NULL)
5050 return;
5051 m->m_len = m->m_pkthdr.len = 0;
5052 m_copyback(m, 0, iov[1].iov_len, iov[1].iov_base);
5053
5054 WG_DLOG("iov_len=%lu\n", iov[1].iov_len);
5055 WG_DUMP_BUF(iov[1].iov_base, iov[1].iov_len);
5056
5057 (void)wg_output(ifp, m, dst, NULL);
5058 }
5059
5060 /*
5061 * Receive packets from a peer.
5062 */
5063 void
5064 rumpkern_wg_recv_peer(struct wg_softc *wg, struct iovec *iov, size_t iovlen)
5065 {
5066 struct mbuf *m;
5067 const struct sockaddr *src;
5068 int bound;
5069
5070 WG_TRACE("");
5071
5072 src = iov[0].iov_base;
5073
5074 m = m_gethdr(M_DONTWAIT, MT_DATA);
5075 if (m == NULL)
5076 return;
5077 m->m_len = m->m_pkthdr.len = 0;
5078 m_copyback(m, 0, iov[1].iov_len, iov[1].iov_base);
5079
5080 WG_DLOG("iov_len=%lu\n", iov[1].iov_len);
5081 WG_DUMP_BUF(iov[1].iov_base, iov[1].iov_len);
5082
5083 bound = curlwp_bind();
5084 wg_handle_packet(wg, m, src);
5085 curlwp_bindx(bound);
5086 }
5087 #endif /* WG_RUMPKERNEL */
5088
5089 /*
5090 * Module infrastructure
5091 */
5092 #include "if_module.h"
5093
5094 IF_MODULE(MODULE_CLASS_DRIVER, wg, "sodium,blake2s")
5095