if_wg.c revision 1.56 1 /* $NetBSD: if_wg.c,v 1.56 2020/09/08 16:39:57 riastradh Exp $ */
2
3 /*
4 * Copyright (C) Ryota Ozaki <ozaki.ryota (at) gmail.com>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the project nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 /*
33 * This network interface aims to implement the WireGuard protocol.
34 * The implementation is based on the paper of WireGuard as of
35 * 2018-06-30 [1]. The paper is referred in the source code with label
36 * [W]. Also the specification of the Noise protocol framework as of
37 * 2018-07-11 [2] is referred with label [N].
38 *
39 * [1] https://www.wireguard.com/papers/wireguard.pdf
40 * [2] http://noiseprotocol.org/noise.pdf
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: if_wg.c,v 1.56 2020/09/08 16:39:57 riastradh Exp $");
45
46 #ifdef _KERNEL_OPT
47 #include "opt_inet.h"
48 #endif
49
50 #include <sys/param.h>
51 #include <sys/types.h>
52
53 #include <sys/atomic.h>
54 #include <sys/callout.h>
55 #include <sys/cprng.h>
56 #include <sys/cpu.h>
57 #include <sys/device.h>
58 #include <sys/domain.h>
59 #include <sys/errno.h>
60 #include <sys/intr.h>
61 #include <sys/ioctl.h>
62 #include <sys/kernel.h>
63 #include <sys/kmem.h>
64 #include <sys/mbuf.h>
65 #include <sys/module.h>
66 #include <sys/mutex.h>
67 #include <sys/percpu.h>
68 #include <sys/pserialize.h>
69 #include <sys/psref.h>
70 #include <sys/queue.h>
71 #include <sys/rwlock.h>
72 #include <sys/socket.h>
73 #include <sys/socketvar.h>
74 #include <sys/sockio.h>
75 #include <sys/sysctl.h>
76 #include <sys/syslog.h>
77 #include <sys/systm.h>
78 #include <sys/thmap.h>
79 #include <sys/threadpool.h>
80 #include <sys/time.h>
81 #include <sys/timespec.h>
82 #include <sys/workqueue.h>
83
84 #include <net/bpf.h>
85 #include <net/if.h>
86 #include <net/if_types.h>
87 #include <net/if_wg.h>
88 #include <net/pktqueue.h>
89 #include <net/route.h>
90
91 #include <netinet/in.h>
92 #include <netinet/in_pcb.h>
93 #include <netinet/in_var.h>
94 #include <netinet/ip.h>
95 #include <netinet/ip_var.h>
96 #include <netinet/udp.h>
97 #include <netinet/udp_var.h>
98
99 #ifdef INET6
100 #include <netinet/ip6.h>
101 #include <netinet6/in6_pcb.h>
102 #include <netinet6/in6_var.h>
103 #include <netinet6/ip6_var.h>
104 #include <netinet6/udp6_var.h>
105 #endif /* INET6 */
106
107 #include <prop/proplib.h>
108
109 #include <crypto/blake2/blake2s.h>
110 #include <crypto/sodium/crypto_aead_chacha20poly1305.h>
111 #include <crypto/sodium/crypto_aead_xchacha20poly1305.h>
112 #include <crypto/sodium/crypto_scalarmult.h>
113
114 #include "ioconf.h"
115
116 #ifdef WG_RUMPKERNEL
117 #include "wg_user.h"
118 #endif
119
120 /*
121 * Data structures
122 * - struct wg_softc is an instance of wg interfaces
123 * - It has a list of peers (struct wg_peer)
124 * - It has a threadpool job that sends/receives handshake messages and
125 * runs event handlers
126 * - It has its own two routing tables: one is for IPv4 and the other IPv6
127 * - struct wg_peer is a representative of a peer
128 * - It has a struct work to handle handshakes and timer tasks
129 * - It has a pair of session instances (struct wg_session)
130 * - It has a pair of endpoint instances (struct wg_sockaddr)
131 * - Normally one endpoint is used and the second one is used only on
132 * a peer migration (a change of peer's IP address)
133 * - It has a list of IP addresses and sub networks called allowedips
134 * (struct wg_allowedip)
135 * - A packets sent over a session is allowed if its destination matches
136 * any IP addresses or sub networks of the list
137 * - struct wg_session represents a session of a secure tunnel with a peer
138 * - Two instances of sessions belong to a peer; a stable session and a
139 * unstable session
140 * - A handshake process of a session always starts with a unstable instance
141 * - Once a session is established, its instance becomes stable and the
142 * other becomes unstable instead
143 * - Data messages are always sent via a stable session
144 *
145 * Locking notes:
146 * - wg interfaces (struct wg_softc, wg) is listed in wg_softcs.list and
147 * protected by wg_softcs.lock
148 * - Each wg has a mutex(9) wg_lock, and a rwlock(9) wg_rwlock
149 * - Changes to the peer list are serialized by wg_lock
150 * - The peer list may be read with pserialize(9) and psref(9)
151 * - The rwlock (wg_rwlock) protects the routing tables (wg_rtable_ipv[46])
152 * => XXX replace by pserialize when routing table is psz-safe
153 * - Each peer (struct wg_peer, wgp) has a mutex wgp_lock, which can be taken
154 * only in thread context and serializes:
155 * - the stable and unstable session pointers
156 * - all unstable session state
157 * - Packet processing may be done in softint context:
158 * - The stable session can be read under pserialize(9) or psref(9)
159 * - The stable session is always ESTABLISHED
160 * - On a session swap, we must wait for all readers to release a
161 * reference to a stable session before changing wgs_state and
162 * session states
163 * - Lock order: wg_lock -> wgp_lock
164 */
165
166
167 #define WGLOG(level, fmt, args...) \
168 log(level, "%s: " fmt, __func__, ##args)
169
170 /* Debug options */
171 #ifdef WG_DEBUG
172 /* Output debug logs */
173 #ifndef WG_DEBUG_LOG
174 #define WG_DEBUG_LOG
175 #endif
176 /* Output trace logs */
177 #ifndef WG_DEBUG_TRACE
178 #define WG_DEBUG_TRACE
179 #endif
180 /* Output hash values, etc. */
181 #ifndef WG_DEBUG_DUMP
182 #define WG_DEBUG_DUMP
183 #endif
184 /* Make some internal parameters configurable for testing and debugging */
185 #ifndef WG_DEBUG_PARAMS
186 #define WG_DEBUG_PARAMS
187 #endif
188 #endif
189
190 #ifdef WG_DEBUG_TRACE
191 #define WG_TRACE(msg) \
192 log(LOG_DEBUG, "%s:%d: %s\n", __func__, __LINE__, (msg))
193 #else
194 #define WG_TRACE(msg) __nothing
195 #endif
196
197 #ifdef WG_DEBUG_LOG
198 #define WG_DLOG(fmt, args...) log(LOG_DEBUG, "%s: " fmt, __func__, ##args)
199 #else
200 #define WG_DLOG(fmt, args...) __nothing
201 #endif
202
203 #define WG_LOG_RATECHECK(wgprc, level, fmt, args...) do { \
204 if (ppsratecheck(&(wgprc)->wgprc_lasttime, \
205 &(wgprc)->wgprc_curpps, 1)) { \
206 log(level, fmt, ##args); \
207 } \
208 } while (0)
209
210 #ifdef WG_DEBUG_PARAMS
211 static bool wg_force_underload = false;
212 #endif
213
214 #ifdef WG_DEBUG_DUMP
215
216 static char *
217 gethexdump(const char *p, size_t n)
218 {
219 char *buf;
220 size_t i;
221
222 if (n > SIZE_MAX/3 - 1)
223 return NULL;
224 buf = kmem_alloc(3*n + 1, KM_NOSLEEP);
225 if (buf == NULL)
226 return NULL;
227 for (i = 0; i < n; i++)
228 snprintf(buf + 3*i, 3 + 1, " %02hhx", p[i]);
229 return buf;
230 }
231
232 static void
233 puthexdump(char *buf, const void *p, size_t n)
234 {
235
236 if (buf == NULL)
237 return;
238 kmem_free(buf, 3*n + 1);
239 }
240
241 #ifdef WG_RUMPKERNEL
242 static void
243 wg_dump_buf(const char *func, const char *buf, const size_t size)
244 {
245 char *hex = gethexdump(buf, size);
246
247 log(LOG_DEBUG, "%s: %s\n", func, hex ? hex : "(enomem)");
248 puthexdump(hex, buf, size);
249 }
250 #endif
251
252 static void
253 wg_dump_hash(const uint8_t *func, const uint8_t *name, const uint8_t *hash,
254 const size_t size)
255 {
256 char *hex = gethexdump(hash, size);
257
258 log(LOG_DEBUG, "%s: %s: %s\n", func, name, hex ? hex : "(enomem)");
259 puthexdump(hex, hash, size);
260 }
261
262 #define WG_DUMP_HASH(name, hash) \
263 wg_dump_hash(__func__, name, hash, WG_HASH_LEN)
264 #define WG_DUMP_HASH48(name, hash) \
265 wg_dump_hash(__func__, name, hash, 48)
266 #define WG_DUMP_BUF(buf, size) \
267 wg_dump_buf(__func__, buf, size)
268 #else
269 #define WG_DUMP_HASH(name, hash) __nothing
270 #define WG_DUMP_HASH48(name, hash) __nothing
271 #define WG_DUMP_BUF(buf, size) __nothing
272 #endif /* WG_DEBUG_DUMP */
273
274 #define WG_MTU 1420
275 #define WG_ALLOWEDIPS 16
276
277 #define CURVE25519_KEY_LEN 32
278 #define TAI64N_LEN sizeof(uint32_t) * 3
279 #define POLY1305_AUTHTAG_LEN 16
280 #define HMAC_BLOCK_LEN 64
281
282 /* [N] 4.1: "DHLEN must be 32 or greater." WireGuard chooses 32. */
283 /* [N] 4.3: Hash functions */
284 #define NOISE_DHLEN 32
285 /* [N] 4.3: "Must be 32 or 64." WireGuard chooses 32. */
286 #define NOISE_HASHLEN 32
287 #define NOISE_BLOCKLEN 64
288 #define NOISE_HKDF_OUTPUT_LEN NOISE_HASHLEN
289 /* [N] 5.1: "k" */
290 #define NOISE_CIPHER_KEY_LEN 32
291 /*
292 * [N] 9.2: "psk"
293 * "... psk is a 32-byte secret value provided by the application."
294 */
295 #define NOISE_PRESHARED_KEY_LEN 32
296
297 #define WG_STATIC_KEY_LEN CURVE25519_KEY_LEN
298 #define WG_TIMESTAMP_LEN TAI64N_LEN
299
300 #define WG_PRESHARED_KEY_LEN NOISE_PRESHARED_KEY_LEN
301
302 #define WG_COOKIE_LEN 16
303 #define WG_MAC_LEN 16
304 #define WG_RANDVAL_LEN 24
305
306 #define WG_EPHEMERAL_KEY_LEN CURVE25519_KEY_LEN
307 /* [N] 5.2: "ck: A chaining key of HASHLEN bytes" */
308 #define WG_CHAINING_KEY_LEN NOISE_HASHLEN
309 /* [N] 5.2: "h: A hash output of HASHLEN bytes" */
310 #define WG_HASH_LEN NOISE_HASHLEN
311 #define WG_CIPHER_KEY_LEN NOISE_CIPHER_KEY_LEN
312 #define WG_DH_OUTPUT_LEN NOISE_DHLEN
313 #define WG_KDF_OUTPUT_LEN NOISE_HKDF_OUTPUT_LEN
314 #define WG_AUTHTAG_LEN POLY1305_AUTHTAG_LEN
315 #define WG_DATA_KEY_LEN 32
316 #define WG_SALT_LEN 24
317
318 /*
319 * The protocol messages
320 */
321 struct wg_msg {
322 uint32_t wgm_type;
323 } __packed;
324
325 /* [W] 5.4.2 First Message: Initiator to Responder */
326 struct wg_msg_init {
327 uint32_t wgmi_type;
328 uint32_t wgmi_sender;
329 uint8_t wgmi_ephemeral[WG_EPHEMERAL_KEY_LEN];
330 uint8_t wgmi_static[WG_STATIC_KEY_LEN + WG_AUTHTAG_LEN];
331 uint8_t wgmi_timestamp[WG_TIMESTAMP_LEN + WG_AUTHTAG_LEN];
332 uint8_t wgmi_mac1[WG_MAC_LEN];
333 uint8_t wgmi_mac2[WG_MAC_LEN];
334 } __packed;
335
336 /* [W] 5.4.3 Second Message: Responder to Initiator */
337 struct wg_msg_resp {
338 uint32_t wgmr_type;
339 uint32_t wgmr_sender;
340 uint32_t wgmr_receiver;
341 uint8_t wgmr_ephemeral[WG_EPHEMERAL_KEY_LEN];
342 uint8_t wgmr_empty[0 + WG_AUTHTAG_LEN];
343 uint8_t wgmr_mac1[WG_MAC_LEN];
344 uint8_t wgmr_mac2[WG_MAC_LEN];
345 } __packed;
346
347 /* [W] 5.4.6 Subsequent Messages: Transport Data Messages */
348 struct wg_msg_data {
349 uint32_t wgmd_type;
350 uint32_t wgmd_receiver;
351 uint64_t wgmd_counter;
352 uint32_t wgmd_packet[0];
353 } __packed;
354
355 /* [W] 5.4.7 Under Load: Cookie Reply Message */
356 struct wg_msg_cookie {
357 uint32_t wgmc_type;
358 uint32_t wgmc_receiver;
359 uint8_t wgmc_salt[WG_SALT_LEN];
360 uint8_t wgmc_cookie[WG_COOKIE_LEN + WG_AUTHTAG_LEN];
361 } __packed;
362
363 #define WG_MSG_TYPE_INIT 1
364 #define WG_MSG_TYPE_RESP 2
365 #define WG_MSG_TYPE_COOKIE 3
366 #define WG_MSG_TYPE_DATA 4
367 #define WG_MSG_TYPE_MAX WG_MSG_TYPE_DATA
368
369 /* Sliding windows */
370
371 #define SLIWIN_BITS 2048u
372 #define SLIWIN_TYPE uint32_t
373 #define SLIWIN_BPW NBBY*sizeof(SLIWIN_TYPE)
374 #define SLIWIN_WORDS howmany(SLIWIN_BITS, SLIWIN_BPW)
375 #define SLIWIN_NPKT (SLIWIN_BITS - NBBY*sizeof(SLIWIN_TYPE))
376
377 struct sliwin {
378 SLIWIN_TYPE B[SLIWIN_WORDS];
379 uint64_t T;
380 };
381
382 static void
383 sliwin_reset(struct sliwin *W)
384 {
385
386 memset(W, 0, sizeof(*W));
387 }
388
389 static int
390 sliwin_check_fast(const volatile struct sliwin *W, uint64_t S)
391 {
392
393 /*
394 * If it's more than one window older than the highest sequence
395 * number we've seen, reject.
396 */
397 #ifdef __HAVE_ATOMIC64_LOADSTORE
398 if (S + SLIWIN_NPKT < atomic_load_relaxed(&W->T))
399 return EAUTH;
400 #endif
401
402 /*
403 * Otherwise, we need to take the lock to decide, so don't
404 * reject just yet. Caller must serialize a call to
405 * sliwin_update in this case.
406 */
407 return 0;
408 }
409
410 static int
411 sliwin_update(struct sliwin *W, uint64_t S)
412 {
413 unsigned word, bit;
414
415 /*
416 * If it's more than one window older than the highest sequence
417 * number we've seen, reject.
418 */
419 if (S + SLIWIN_NPKT < W->T)
420 return EAUTH;
421
422 /*
423 * If it's higher than the highest sequence number we've seen,
424 * advance the window.
425 */
426 if (S > W->T) {
427 uint64_t i = W->T / SLIWIN_BPW;
428 uint64_t j = S / SLIWIN_BPW;
429 unsigned k;
430
431 for (k = 0; k < MIN(j - i, SLIWIN_WORDS); k++)
432 W->B[(i + k + 1) % SLIWIN_WORDS] = 0;
433 #ifdef __HAVE_ATOMIC64_LOADSTORE
434 atomic_store_relaxed(&W->T, S);
435 #else
436 W->T = S;
437 #endif
438 }
439
440 /* Test and set the bit -- if already set, reject. */
441 word = (S / SLIWIN_BPW) % SLIWIN_WORDS;
442 bit = S % SLIWIN_BPW;
443 if (W->B[word] & (1UL << bit))
444 return EAUTH;
445 W->B[word] |= 1UL << bit;
446
447 /* Accept! */
448 return 0;
449 }
450
451 struct wg_session {
452 struct wg_peer *wgs_peer;
453 struct psref_target
454 wgs_psref;
455
456 int wgs_state;
457 #define WGS_STATE_UNKNOWN 0
458 #define WGS_STATE_INIT_ACTIVE 1
459 #define WGS_STATE_INIT_PASSIVE 2
460 #define WGS_STATE_ESTABLISHED 3
461 #define WGS_STATE_DESTROYING 4
462
463 time_t wgs_time_established;
464 time_t wgs_time_last_data_sent;
465 bool wgs_is_initiator;
466
467 uint32_t wgs_local_index;
468 uint32_t wgs_remote_index;
469 #ifdef __HAVE_ATOMIC64_LOADSTORE
470 volatile uint64_t
471 wgs_send_counter;
472 #else
473 kmutex_t wgs_send_counter_lock;
474 uint64_t wgs_send_counter;
475 #endif
476
477 struct {
478 kmutex_t lock;
479 struct sliwin window;
480 } *wgs_recvwin;
481
482 uint8_t wgs_handshake_hash[WG_HASH_LEN];
483 uint8_t wgs_chaining_key[WG_CHAINING_KEY_LEN];
484 uint8_t wgs_ephemeral_key_pub[WG_EPHEMERAL_KEY_LEN];
485 uint8_t wgs_ephemeral_key_priv[WG_EPHEMERAL_KEY_LEN];
486 uint8_t wgs_ephemeral_key_peer[WG_EPHEMERAL_KEY_LEN];
487 uint8_t wgs_tkey_send[WG_DATA_KEY_LEN];
488 uint8_t wgs_tkey_recv[WG_DATA_KEY_LEN];
489 };
490
491 struct wg_sockaddr {
492 union {
493 struct sockaddr_storage _ss;
494 struct sockaddr _sa;
495 struct sockaddr_in _sin;
496 struct sockaddr_in6 _sin6;
497 };
498 struct psref_target wgsa_psref;
499 };
500
501 #define wgsatoss(wgsa) (&(wgsa)->_ss)
502 #define wgsatosa(wgsa) (&(wgsa)->_sa)
503 #define wgsatosin(wgsa) (&(wgsa)->_sin)
504 #define wgsatosin6(wgsa) (&(wgsa)->_sin6)
505
506 #define wgsa_family(wgsa) (wgsatosa(wgsa)->sa_family)
507
508 struct wg_peer;
509 struct wg_allowedip {
510 struct radix_node wga_nodes[2];
511 struct wg_sockaddr _wga_sa_addr;
512 struct wg_sockaddr _wga_sa_mask;
513 #define wga_sa_addr _wga_sa_addr._sa
514 #define wga_sa_mask _wga_sa_mask._sa
515
516 int wga_family;
517 uint8_t wga_cidr;
518 union {
519 struct in_addr _ip4;
520 struct in6_addr _ip6;
521 } wga_addr;
522 #define wga_addr4 wga_addr._ip4
523 #define wga_addr6 wga_addr._ip6
524
525 struct wg_peer *wga_peer;
526 };
527
528 typedef uint8_t wg_timestamp_t[WG_TIMESTAMP_LEN];
529
530 struct wg_ppsratecheck {
531 struct timeval wgprc_lasttime;
532 int wgprc_curpps;
533 };
534
535 struct wg_softc;
536 struct wg_peer {
537 struct wg_softc *wgp_sc;
538 char wgp_name[WG_PEER_NAME_MAXLEN + 1];
539 struct pslist_entry wgp_peerlist_entry;
540 pserialize_t wgp_psz;
541 struct psref_target wgp_psref;
542 kmutex_t *wgp_lock;
543 kmutex_t *wgp_intr_lock;
544
545 uint8_t wgp_pubkey[WG_STATIC_KEY_LEN];
546 struct wg_sockaddr *wgp_endpoint;
547 struct wg_sockaddr *wgp_endpoint0;
548 volatile unsigned wgp_endpoint_changing;
549 bool wgp_endpoint_available;
550
551 /* The preshared key (optional) */
552 uint8_t wgp_psk[WG_PRESHARED_KEY_LEN];
553
554 struct wg_session *wgp_session_stable;
555 struct wg_session *wgp_session_unstable;
556
557 /* first outgoing packet awaiting session initiation */
558 struct mbuf *wgp_pending;
559
560 /* timestamp in big-endian */
561 wg_timestamp_t wgp_timestamp_latest_init;
562
563 struct timespec wgp_last_handshake_time;
564
565 callout_t wgp_rekey_timer;
566 callout_t wgp_handshake_timeout_timer;
567 callout_t wgp_session_dtor_timer;
568
569 time_t wgp_handshake_start_time;
570
571 int wgp_n_allowedips;
572 struct wg_allowedip wgp_allowedips[WG_ALLOWEDIPS];
573
574 time_t wgp_latest_cookie_time;
575 uint8_t wgp_latest_cookie[WG_COOKIE_LEN];
576 uint8_t wgp_last_sent_mac1[WG_MAC_LEN];
577 bool wgp_last_sent_mac1_valid;
578 uint8_t wgp_last_sent_cookie[WG_COOKIE_LEN];
579 bool wgp_last_sent_cookie_valid;
580
581 time_t wgp_last_msg_received_time[WG_MSG_TYPE_MAX];
582
583 time_t wgp_last_genrandval_time;
584 uint32_t wgp_randval;
585
586 struct wg_ppsratecheck wgp_ppsratecheck;
587
588 struct work wgp_work;
589 unsigned int wgp_tasks;
590 #define WGP_TASK_SEND_INIT_MESSAGE __BIT(0)
591 #define WGP_TASK_RETRY_HANDSHAKE __BIT(1)
592 #define WGP_TASK_ESTABLISH_SESSION __BIT(2)
593 #define WGP_TASK_ENDPOINT_CHANGED __BIT(3)
594 #define WGP_TASK_SEND_KEEPALIVE_MESSAGE __BIT(4)
595 #define WGP_TASK_DESTROY_PREV_SESSION __BIT(5)
596 };
597
598 struct wg_ops;
599
600 struct wg_softc {
601 struct ifnet wg_if;
602 LIST_ENTRY(wg_softc) wg_list;
603 kmutex_t *wg_lock;
604 kmutex_t *wg_intr_lock;
605 krwlock_t *wg_rwlock;
606
607 uint8_t wg_privkey[WG_STATIC_KEY_LEN];
608 uint8_t wg_pubkey[WG_STATIC_KEY_LEN];
609
610 int wg_npeers;
611 struct pslist_head wg_peers;
612 struct thmap *wg_peers_bypubkey;
613 struct thmap *wg_peers_byname;
614 struct thmap *wg_sessions_byindex;
615 uint16_t wg_listen_port;
616
617 struct threadpool *wg_threadpool;
618
619 struct threadpool_job wg_job;
620 int wg_upcalls;
621 #define WG_UPCALL_INET __BIT(0)
622 #define WG_UPCALL_INET6 __BIT(1)
623
624 #ifdef INET
625 struct socket *wg_so4;
626 struct radix_node_head *wg_rtable_ipv4;
627 #endif
628 #ifdef INET6
629 struct socket *wg_so6;
630 struct radix_node_head *wg_rtable_ipv6;
631 #endif
632
633 struct wg_ppsratecheck wg_ppsratecheck;
634
635 struct wg_ops *wg_ops;
636
637 #ifdef WG_RUMPKERNEL
638 struct wg_user *wg_user;
639 #endif
640 };
641
642 /* [W] 6.1 Preliminaries */
643 #define WG_REKEY_AFTER_MESSAGES (1ULL << 60)
644 #define WG_REJECT_AFTER_MESSAGES (UINT64_MAX - (1 << 13))
645 #define WG_REKEY_AFTER_TIME 120
646 #define WG_REJECT_AFTER_TIME 180
647 #define WG_REKEY_ATTEMPT_TIME 90
648 #define WG_REKEY_TIMEOUT 5
649 #define WG_KEEPALIVE_TIMEOUT 10
650
651 #define WG_COOKIE_TIME 120
652 #define WG_RANDVAL_TIME (2 * 60)
653
654 static uint64_t wg_rekey_after_messages = WG_REKEY_AFTER_MESSAGES;
655 static uint64_t wg_reject_after_messages = WG_REJECT_AFTER_MESSAGES;
656 static unsigned wg_rekey_after_time = WG_REKEY_AFTER_TIME;
657 static unsigned wg_reject_after_time = WG_REJECT_AFTER_TIME;
658 static unsigned wg_rekey_attempt_time = WG_REKEY_ATTEMPT_TIME;
659 static unsigned wg_rekey_timeout = WG_REKEY_TIMEOUT;
660 static unsigned wg_keepalive_timeout = WG_KEEPALIVE_TIMEOUT;
661
662 static struct mbuf *
663 wg_get_mbuf(size_t, size_t);
664
665 static int wg_send_data_msg(struct wg_peer *, struct wg_session *,
666 struct mbuf *);
667 static int wg_send_cookie_msg(struct wg_softc *, struct wg_peer *,
668 const uint32_t, const uint8_t [], const struct sockaddr *);
669 static int wg_send_handshake_msg_resp(struct wg_softc *, struct wg_peer *,
670 struct wg_session *, const struct wg_msg_init *);
671 static void wg_send_keepalive_msg(struct wg_peer *, struct wg_session *);
672
673 static struct wg_peer *
674 wg_pick_peer_by_sa(struct wg_softc *, const struct sockaddr *,
675 struct psref *);
676 static struct wg_peer *
677 wg_lookup_peer_by_pubkey(struct wg_softc *,
678 const uint8_t [], struct psref *);
679
680 static struct wg_session *
681 wg_lookup_session_by_index(struct wg_softc *,
682 const uint32_t, struct psref *);
683
684 static void wg_update_endpoint_if_necessary(struct wg_peer *,
685 const struct sockaddr *);
686
687 static void wg_schedule_rekey_timer(struct wg_peer *);
688 static void wg_schedule_session_dtor_timer(struct wg_peer *);
689
690 static bool wg_is_underload(struct wg_softc *, struct wg_peer *, int);
691 static void wg_calculate_keys(struct wg_session *, const bool);
692
693 static void wg_clear_states(struct wg_session *);
694
695 static void wg_get_peer(struct wg_peer *, struct psref *);
696 static void wg_put_peer(struct wg_peer *, struct psref *);
697
698 static int wg_send_so(struct wg_peer *, struct mbuf *);
699 static int wg_send_udp(struct wg_peer *, struct mbuf *);
700 static int wg_output(struct ifnet *, struct mbuf *,
701 const struct sockaddr *, const struct rtentry *);
702 static void wg_input(struct ifnet *, struct mbuf *, const int);
703 static int wg_ioctl(struct ifnet *, u_long, void *);
704 static int wg_bind_port(struct wg_softc *, const uint16_t);
705 static int wg_init(struct ifnet *);
706 static void wg_stop(struct ifnet *, int);
707
708 static void wg_peer_work(struct work *, void *);
709 static void wg_job(struct threadpool_job *);
710 static void wgintr(void *);
711 static void wg_purge_pending_packets(struct wg_peer *);
712
713 static int wg_clone_create(struct if_clone *, int);
714 static int wg_clone_destroy(struct ifnet *);
715
716 struct wg_ops {
717 int (*send_hs_msg)(struct wg_peer *, struct mbuf *);
718 int (*send_data_msg)(struct wg_peer *, struct mbuf *);
719 void (*input)(struct ifnet *, struct mbuf *, const int);
720 int (*bind_port)(struct wg_softc *, const uint16_t);
721 };
722
723 struct wg_ops wg_ops_rumpkernel = {
724 .send_hs_msg = wg_send_so,
725 .send_data_msg = wg_send_udp,
726 .input = wg_input,
727 .bind_port = wg_bind_port,
728 };
729
730 #ifdef WG_RUMPKERNEL
731 static bool wg_user_mode(struct wg_softc *);
732 static int wg_ioctl_linkstr(struct wg_softc *, struct ifdrv *);
733
734 static int wg_send_user(struct wg_peer *, struct mbuf *);
735 static void wg_input_user(struct ifnet *, struct mbuf *, const int);
736 static int wg_bind_port_user(struct wg_softc *, const uint16_t);
737
738 struct wg_ops wg_ops_rumpuser = {
739 .send_hs_msg = wg_send_user,
740 .send_data_msg = wg_send_user,
741 .input = wg_input_user,
742 .bind_port = wg_bind_port_user,
743 };
744 #endif
745
746 #define WG_PEER_READER_FOREACH(wgp, wg) \
747 PSLIST_READER_FOREACH((wgp), &(wg)->wg_peers, struct wg_peer, \
748 wgp_peerlist_entry)
749 #define WG_PEER_WRITER_FOREACH(wgp, wg) \
750 PSLIST_WRITER_FOREACH((wgp), &(wg)->wg_peers, struct wg_peer, \
751 wgp_peerlist_entry)
752 #define WG_PEER_WRITER_INSERT_HEAD(wgp, wg) \
753 PSLIST_WRITER_INSERT_HEAD(&(wg)->wg_peers, (wgp), wgp_peerlist_entry)
754 #define WG_PEER_WRITER_REMOVE(wgp) \
755 PSLIST_WRITER_REMOVE((wgp), wgp_peerlist_entry)
756
757 struct wg_route {
758 struct radix_node wgr_nodes[2];
759 struct wg_peer *wgr_peer;
760 };
761
762 static struct radix_node_head *
763 wg_rnh(struct wg_softc *wg, const int family)
764 {
765
766 switch (family) {
767 case AF_INET:
768 return wg->wg_rtable_ipv4;
769 #ifdef INET6
770 case AF_INET6:
771 return wg->wg_rtable_ipv6;
772 #endif
773 default:
774 return NULL;
775 }
776 }
777
778
779 /*
780 * Global variables
781 */
782 LIST_HEAD(wg_sclist, wg_softc);
783 static struct {
784 struct wg_sclist list;
785 kmutex_t lock;
786 } wg_softcs __cacheline_aligned;
787
788 struct psref_class *wg_psref_class __read_mostly;
789
790 static struct if_clone wg_cloner =
791 IF_CLONE_INITIALIZER("wg", wg_clone_create, wg_clone_destroy);
792
793 static struct pktqueue *wg_pktq __read_mostly;
794 static struct workqueue *wg_wq __read_mostly;
795
796 void wgattach(int);
797 /* ARGSUSED */
798 void
799 wgattach(int count)
800 {
801 /*
802 * Nothing to do here, initialization is handled by the
803 * module initialization code in wginit() below).
804 */
805 }
806
807 static void
808 wginit(void)
809 {
810 int error __diagused;
811
812 wg_psref_class = psref_class_create("wg", IPL_SOFTNET);
813
814 mutex_init(&wg_softcs.lock, MUTEX_DEFAULT, IPL_NONE);
815 LIST_INIT(&wg_softcs.list);
816
817 wg_pktq = pktq_create(IFQ_MAXLEN, wgintr, NULL);
818 KASSERT(wg_pktq != NULL);
819
820 error = workqueue_create(&wg_wq, "wgpeer", wg_peer_work, NULL,
821 PRI_NONE, IPL_SOFTNET, WQ_MPSAFE|WQ_PERCPU);
822 KASSERT(error == 0);
823
824 if_clone_attach(&wg_cloner);
825 }
826
827 static int
828 wgdetach(void)
829 {
830 int error = 0;
831
832 mutex_enter(&wg_softcs.lock);
833 if (!LIST_EMPTY(&wg_softcs.list)) {
834 mutex_exit(&wg_softcs.lock);
835 error = EBUSY;
836 }
837
838 if (error == 0) {
839 psref_class_destroy(wg_psref_class);
840
841 if_clone_detach(&wg_cloner);
842 }
843
844 return error;
845 }
846
847 static void
848 wg_init_key_and_hash(uint8_t ckey[WG_CHAINING_KEY_LEN],
849 uint8_t hash[WG_HASH_LEN])
850 {
851 /* [W] 5.4: CONSTRUCTION */
852 const char *signature = "Noise_IKpsk2_25519_ChaChaPoly_BLAKE2s";
853 /* [W] 5.4: IDENTIFIER */
854 const char *id = "WireGuard v1 zx2c4 Jason (at) zx2c4.com";
855 struct blake2s state;
856
857 blake2s(ckey, WG_CHAINING_KEY_LEN, NULL, 0,
858 signature, strlen(signature));
859
860 CTASSERT(WG_HASH_LEN == WG_CHAINING_KEY_LEN);
861 memcpy(hash, ckey, WG_CHAINING_KEY_LEN);
862
863 blake2s_init(&state, WG_HASH_LEN, NULL, 0);
864 blake2s_update(&state, ckey, WG_CHAINING_KEY_LEN);
865 blake2s_update(&state, id, strlen(id));
866 blake2s_final(&state, hash);
867
868 WG_DUMP_HASH("ckey", ckey);
869 WG_DUMP_HASH("hash", hash);
870 }
871
872 static void
873 wg_algo_hash(uint8_t hash[WG_HASH_LEN], const uint8_t input[],
874 const size_t inputsize)
875 {
876 struct blake2s state;
877
878 blake2s_init(&state, WG_HASH_LEN, NULL, 0);
879 blake2s_update(&state, hash, WG_HASH_LEN);
880 blake2s_update(&state, input, inputsize);
881 blake2s_final(&state, hash);
882 }
883
884 static void
885 wg_algo_mac(uint8_t out[], const size_t outsize,
886 const uint8_t key[], const size_t keylen,
887 const uint8_t input1[], const size_t input1len,
888 const uint8_t input2[], const size_t input2len)
889 {
890 struct blake2s state;
891
892 blake2s_init(&state, outsize, key, keylen);
893
894 blake2s_update(&state, input1, input1len);
895 if (input2 != NULL)
896 blake2s_update(&state, input2, input2len);
897 blake2s_final(&state, out);
898 }
899
900 static void
901 wg_algo_mac_mac1(uint8_t out[], const size_t outsize,
902 const uint8_t input1[], const size_t input1len,
903 const uint8_t input2[], const size_t input2len)
904 {
905 struct blake2s state;
906 /* [W] 5.4: LABEL-MAC1 */
907 const char *label = "mac1----";
908 uint8_t key[WG_HASH_LEN];
909
910 blake2s_init(&state, sizeof(key), NULL, 0);
911 blake2s_update(&state, label, strlen(label));
912 blake2s_update(&state, input1, input1len);
913 blake2s_final(&state, key);
914
915 blake2s_init(&state, outsize, key, sizeof(key));
916 if (input2 != NULL)
917 blake2s_update(&state, input2, input2len);
918 blake2s_final(&state, out);
919 }
920
921 static void
922 wg_algo_mac_cookie(uint8_t out[], const size_t outsize,
923 const uint8_t input1[], const size_t input1len)
924 {
925 struct blake2s state;
926 /* [W] 5.4: LABEL-COOKIE */
927 const char *label = "cookie--";
928
929 blake2s_init(&state, outsize, NULL, 0);
930 blake2s_update(&state, label, strlen(label));
931 blake2s_update(&state, input1, input1len);
932 blake2s_final(&state, out);
933 }
934
935 static void
936 wg_algo_generate_keypair(uint8_t pubkey[WG_EPHEMERAL_KEY_LEN],
937 uint8_t privkey[WG_EPHEMERAL_KEY_LEN])
938 {
939
940 CTASSERT(WG_EPHEMERAL_KEY_LEN == crypto_scalarmult_curve25519_BYTES);
941
942 cprng_strong(kern_cprng, privkey, WG_EPHEMERAL_KEY_LEN, 0);
943 crypto_scalarmult_base(pubkey, privkey);
944 }
945
946 static void
947 wg_algo_dh(uint8_t out[WG_DH_OUTPUT_LEN],
948 const uint8_t privkey[WG_STATIC_KEY_LEN],
949 const uint8_t pubkey[WG_STATIC_KEY_LEN])
950 {
951
952 CTASSERT(WG_STATIC_KEY_LEN == crypto_scalarmult_curve25519_BYTES);
953
954 int ret __diagused = crypto_scalarmult(out, privkey, pubkey);
955 KASSERT(ret == 0);
956 }
957
958 static void
959 wg_algo_hmac(uint8_t out[], const size_t outlen,
960 const uint8_t key[], const size_t keylen,
961 const uint8_t in[], const size_t inlen)
962 {
963 #define IPAD 0x36
964 #define OPAD 0x5c
965 uint8_t hmackey[HMAC_BLOCK_LEN] = {0};
966 uint8_t ipad[HMAC_BLOCK_LEN];
967 uint8_t opad[HMAC_BLOCK_LEN];
968 int i;
969 struct blake2s state;
970
971 KASSERT(outlen == WG_HASH_LEN);
972 KASSERT(keylen <= HMAC_BLOCK_LEN);
973
974 memcpy(hmackey, key, keylen);
975
976 for (i = 0; i < sizeof(hmackey); i++) {
977 ipad[i] = hmackey[i] ^ IPAD;
978 opad[i] = hmackey[i] ^ OPAD;
979 }
980
981 blake2s_init(&state, WG_HASH_LEN, NULL, 0);
982 blake2s_update(&state, ipad, sizeof(ipad));
983 blake2s_update(&state, in, inlen);
984 blake2s_final(&state, out);
985
986 blake2s_init(&state, WG_HASH_LEN, NULL, 0);
987 blake2s_update(&state, opad, sizeof(opad));
988 blake2s_update(&state, out, WG_HASH_LEN);
989 blake2s_final(&state, out);
990 #undef IPAD
991 #undef OPAD
992 }
993
994 static void
995 wg_algo_kdf(uint8_t out1[WG_KDF_OUTPUT_LEN], uint8_t out2[WG_KDF_OUTPUT_LEN],
996 uint8_t out3[WG_KDF_OUTPUT_LEN], const uint8_t ckey[WG_CHAINING_KEY_LEN],
997 const uint8_t input[], const size_t inputlen)
998 {
999 uint8_t tmp1[WG_KDF_OUTPUT_LEN], tmp2[WG_KDF_OUTPUT_LEN + 1];
1000 uint8_t one[1];
1001
1002 /*
1003 * [N] 4.3: "an input_key_material byte sequence with length
1004 * either zero bytes, 32 bytes, or DHLEN bytes."
1005 */
1006 KASSERT(inputlen == 0 || inputlen == 32 || inputlen == NOISE_DHLEN);
1007
1008 WG_DUMP_HASH("ckey", ckey);
1009 if (input != NULL)
1010 WG_DUMP_HASH("input", input);
1011 wg_algo_hmac(tmp1, sizeof(tmp1), ckey, WG_CHAINING_KEY_LEN,
1012 input, inputlen);
1013 WG_DUMP_HASH("tmp1", tmp1);
1014 one[0] = 1;
1015 wg_algo_hmac(out1, WG_KDF_OUTPUT_LEN, tmp1, sizeof(tmp1),
1016 one, sizeof(one));
1017 WG_DUMP_HASH("out1", out1);
1018 if (out2 == NULL)
1019 return;
1020 memcpy(tmp2, out1, WG_KDF_OUTPUT_LEN);
1021 tmp2[WG_KDF_OUTPUT_LEN] = 2;
1022 wg_algo_hmac(out2, WG_KDF_OUTPUT_LEN, tmp1, sizeof(tmp1),
1023 tmp2, sizeof(tmp2));
1024 WG_DUMP_HASH("out2", out2);
1025 if (out3 == NULL)
1026 return;
1027 memcpy(tmp2, out2, WG_KDF_OUTPUT_LEN);
1028 tmp2[WG_KDF_OUTPUT_LEN] = 3;
1029 wg_algo_hmac(out3, WG_KDF_OUTPUT_LEN, tmp1, sizeof(tmp1),
1030 tmp2, sizeof(tmp2));
1031 WG_DUMP_HASH("out3", out3);
1032 }
1033
1034 static void
1035 wg_algo_dh_kdf(uint8_t ckey[WG_CHAINING_KEY_LEN],
1036 uint8_t cipher_key[WG_CIPHER_KEY_LEN],
1037 const uint8_t local_key[WG_STATIC_KEY_LEN],
1038 const uint8_t remote_key[WG_STATIC_KEY_LEN])
1039 {
1040 uint8_t dhout[WG_DH_OUTPUT_LEN];
1041
1042 wg_algo_dh(dhout, local_key, remote_key);
1043 wg_algo_kdf(ckey, cipher_key, NULL, ckey, dhout, sizeof(dhout));
1044
1045 WG_DUMP_HASH("dhout", dhout);
1046 WG_DUMP_HASH("ckey", ckey);
1047 if (cipher_key != NULL)
1048 WG_DUMP_HASH("cipher_key", cipher_key);
1049 }
1050
1051 static void
1052 wg_algo_aead_enc(uint8_t out[], size_t expected_outsize, const uint8_t key[],
1053 const uint64_t counter, const uint8_t plain[], const size_t plainsize,
1054 const uint8_t auth[], size_t authlen)
1055 {
1056 uint8_t nonce[(32 + 64) / 8] = {0};
1057 long long unsigned int outsize;
1058 int error __diagused;
1059
1060 le64enc(&nonce[4], counter);
1061
1062 error = crypto_aead_chacha20poly1305_ietf_encrypt(out, &outsize, plain,
1063 plainsize, auth, authlen, NULL, nonce, key);
1064 KASSERT(error == 0);
1065 KASSERT(outsize == expected_outsize);
1066 }
1067
1068 static int
1069 wg_algo_aead_dec(uint8_t out[], size_t expected_outsize, const uint8_t key[],
1070 const uint64_t counter, const uint8_t encrypted[],
1071 const size_t encryptedsize, const uint8_t auth[], size_t authlen)
1072 {
1073 uint8_t nonce[(32 + 64) / 8] = {0};
1074 long long unsigned int outsize;
1075 int error;
1076
1077 le64enc(&nonce[4], counter);
1078
1079 error = crypto_aead_chacha20poly1305_ietf_decrypt(out, &outsize, NULL,
1080 encrypted, encryptedsize, auth, authlen, nonce, key);
1081 if (error == 0)
1082 KASSERT(outsize == expected_outsize);
1083 return error;
1084 }
1085
1086 static void
1087 wg_algo_xaead_enc(uint8_t out[], const size_t expected_outsize,
1088 const uint8_t key[], const uint8_t plain[], const size_t plainsize,
1089 const uint8_t auth[], size_t authlen,
1090 const uint8_t nonce[WG_SALT_LEN])
1091 {
1092 long long unsigned int outsize;
1093 int error __diagused;
1094
1095 CTASSERT(WG_SALT_LEN == crypto_aead_xchacha20poly1305_ietf_NPUBBYTES);
1096 error = crypto_aead_xchacha20poly1305_ietf_encrypt(out, &outsize,
1097 plain, plainsize, auth, authlen, NULL, nonce, key);
1098 KASSERT(error == 0);
1099 KASSERT(outsize == expected_outsize);
1100 }
1101
1102 static int
1103 wg_algo_xaead_dec(uint8_t out[], const size_t expected_outsize,
1104 const uint8_t key[], const uint8_t encrypted[], const size_t encryptedsize,
1105 const uint8_t auth[], size_t authlen,
1106 const uint8_t nonce[WG_SALT_LEN])
1107 {
1108 long long unsigned int outsize;
1109 int error;
1110
1111 error = crypto_aead_xchacha20poly1305_ietf_decrypt(out, &outsize, NULL,
1112 encrypted, encryptedsize, auth, authlen, nonce, key);
1113 if (error == 0)
1114 KASSERT(outsize == expected_outsize);
1115 return error;
1116 }
1117
1118 static void
1119 wg_algo_tai64n(wg_timestamp_t timestamp)
1120 {
1121 struct timespec ts;
1122
1123 /* FIXME strict TAI64N (https://cr.yp.to/libtai/tai64.html) */
1124 getnanotime(&ts);
1125 /* TAI64 label in external TAI64 format */
1126 be32enc(timestamp, 0x40000000UL + (ts.tv_sec >> 32));
1127 /* second beginning from 1970 TAI */
1128 be32enc(timestamp + 4, ts.tv_sec & 0xffffffffU);
1129 /* nanosecond in big-endian format */
1130 be32enc(timestamp + 8, ts.tv_nsec);
1131 }
1132
1133 /*
1134 * wg_get_stable_session(wgp, psref)
1135 *
1136 * Get a passive reference to the current stable session, or
1137 * return NULL if there is no current stable session.
1138 *
1139 * The pointer is always there but the session is not necessarily
1140 * ESTABLISHED; if it is not ESTABLISHED, return NULL. However,
1141 * the session may transition from ESTABLISHED to DESTROYING while
1142 * holding the passive reference.
1143 */
1144 static struct wg_session *
1145 wg_get_stable_session(struct wg_peer *wgp, struct psref *psref)
1146 {
1147 int s;
1148 struct wg_session *wgs;
1149
1150 s = pserialize_read_enter();
1151 wgs = atomic_load_consume(&wgp->wgp_session_stable);
1152 if (__predict_false(wgs->wgs_state != WGS_STATE_ESTABLISHED))
1153 wgs = NULL;
1154 else
1155 psref_acquire(psref, &wgs->wgs_psref, wg_psref_class);
1156 pserialize_read_exit(s);
1157
1158 return wgs;
1159 }
1160
1161 static void
1162 wg_put_session(struct wg_session *wgs, struct psref *psref)
1163 {
1164
1165 psref_release(psref, &wgs->wgs_psref, wg_psref_class);
1166 }
1167
1168 static void
1169 wg_destroy_session(struct wg_softc *wg, struct wg_session *wgs)
1170 {
1171 struct wg_peer *wgp = wgs->wgs_peer;
1172 struct wg_session *wgs0 __diagused;
1173 void *garbage;
1174
1175 KASSERT(mutex_owned(wgp->wgp_lock));
1176 KASSERT(wgs->wgs_state != WGS_STATE_UNKNOWN);
1177
1178 /* Remove the session from the table. */
1179 wgs0 = thmap_del(wg->wg_sessions_byindex,
1180 &wgs->wgs_local_index, sizeof(wgs->wgs_local_index));
1181 KASSERT(wgs0 == wgs);
1182 garbage = thmap_stage_gc(wg->wg_sessions_byindex);
1183
1184 /* Wait for passive references to drain. */
1185 pserialize_perform(wgp->wgp_psz);
1186 psref_target_destroy(&wgs->wgs_psref, wg_psref_class);
1187
1188 /* Free memory, zero state, and transition to UNKNOWN. */
1189 thmap_gc(wg->wg_sessions_byindex, garbage);
1190 wg_clear_states(wgs);
1191 wgs->wgs_state = WGS_STATE_UNKNOWN;
1192 }
1193
1194 /*
1195 * wg_get_session_index(wg, wgs)
1196 *
1197 * Choose a session index for wgs->wgs_local_index, and store it
1198 * in wg's table of sessions by index.
1199 *
1200 * wgs must be the unstable session of its peer, and must be
1201 * transitioning out of the UNKNOWN state.
1202 */
1203 static void
1204 wg_get_session_index(struct wg_softc *wg, struct wg_session *wgs)
1205 {
1206 struct wg_peer *wgp __diagused = wgs->wgs_peer;
1207 struct wg_session *wgs0;
1208 uint32_t index;
1209
1210 KASSERT(mutex_owned(wgp->wgp_lock));
1211 KASSERT(wgs == wgp->wgp_session_unstable);
1212 KASSERT(wgs->wgs_state == WGS_STATE_UNKNOWN);
1213
1214 do {
1215 /* Pick a uniform random index. */
1216 index = cprng_strong32();
1217
1218 /* Try to take it. */
1219 wgs->wgs_local_index = index;
1220 wgs0 = thmap_put(wg->wg_sessions_byindex,
1221 &wgs->wgs_local_index, sizeof wgs->wgs_local_index, wgs);
1222
1223 /* If someone else beat us, start over. */
1224 } while (__predict_false(wgs0 != wgs));
1225 }
1226
1227 /*
1228 * wg_put_session_index(wg, wgs)
1229 *
1230 * Remove wgs from the table of sessions by index, wait for any
1231 * passive references to drain, and transition the session to the
1232 * UNKNOWN state.
1233 *
1234 * wgs must be the unstable session of its peer, and must not be
1235 * UNKNOWN or ESTABLISHED.
1236 */
1237 static void
1238 wg_put_session_index(struct wg_softc *wg, struct wg_session *wgs)
1239 {
1240 struct wg_peer *wgp __diagused = wgs->wgs_peer;
1241
1242 KASSERT(mutex_owned(wgp->wgp_lock));
1243 KASSERT(wgs == wgp->wgp_session_unstable);
1244 KASSERT(wgs->wgs_state != WGS_STATE_UNKNOWN);
1245 KASSERT(wgs->wgs_state != WGS_STATE_ESTABLISHED);
1246
1247 wg_destroy_session(wg, wgs);
1248 psref_target_init(&wgs->wgs_psref, wg_psref_class);
1249 }
1250
1251 /*
1252 * Handshake patterns
1253 *
1254 * [W] 5: "These messages use the "IK" pattern from Noise"
1255 * [N] 7.5. Interactive handshake patterns (fundamental)
1256 * "The first character refers to the initiators static key:"
1257 * "I = Static key for initiator Immediately transmitted to responder,
1258 * despite reduced or absent identity hiding"
1259 * "The second character refers to the responders static key:"
1260 * "K = Static key for responder Known to initiator"
1261 * "IK:
1262 * <- s
1263 * ...
1264 * -> e, es, s, ss
1265 * <- e, ee, se"
1266 * [N] 9.4. Pattern modifiers
1267 * "IKpsk2:
1268 * <- s
1269 * ...
1270 * -> e, es, s, ss
1271 * <- e, ee, se, psk"
1272 */
1273 static void
1274 wg_fill_msg_init(struct wg_softc *wg, struct wg_peer *wgp,
1275 struct wg_session *wgs, struct wg_msg_init *wgmi)
1276 {
1277 uint8_t ckey[WG_CHAINING_KEY_LEN]; /* [W] 5.4.2: Ci */
1278 uint8_t hash[WG_HASH_LEN]; /* [W] 5.4.2: Hi */
1279 uint8_t cipher_key[WG_CIPHER_KEY_LEN];
1280 uint8_t pubkey[WG_EPHEMERAL_KEY_LEN];
1281 uint8_t privkey[WG_EPHEMERAL_KEY_LEN];
1282
1283 KASSERT(mutex_owned(wgp->wgp_lock));
1284 KASSERT(wgs == wgp->wgp_session_unstable);
1285 KASSERT(wgs->wgs_state == WGS_STATE_INIT_ACTIVE);
1286
1287 wgmi->wgmi_type = htole32(WG_MSG_TYPE_INIT);
1288 wgmi->wgmi_sender = wgs->wgs_local_index;
1289
1290 /* [W] 5.4.2: First Message: Initiator to Responder */
1291
1292 /* Ci := HASH(CONSTRUCTION) */
1293 /* Hi := HASH(Ci || IDENTIFIER) */
1294 wg_init_key_and_hash(ckey, hash);
1295 /* Hi := HASH(Hi || Sr^pub) */
1296 wg_algo_hash(hash, wgp->wgp_pubkey, sizeof(wgp->wgp_pubkey));
1297
1298 WG_DUMP_HASH("hash", hash);
1299
1300 /* [N] 2.2: "e" */
1301 /* Ei^priv, Ei^pub := DH-GENERATE() */
1302 wg_algo_generate_keypair(pubkey, privkey);
1303 /* Ci := KDF1(Ci, Ei^pub) */
1304 wg_algo_kdf(ckey, NULL, NULL, ckey, pubkey, sizeof(pubkey));
1305 /* msg.ephemeral := Ei^pub */
1306 memcpy(wgmi->wgmi_ephemeral, pubkey, sizeof(wgmi->wgmi_ephemeral));
1307 /* Hi := HASH(Hi || msg.ephemeral) */
1308 wg_algo_hash(hash, pubkey, sizeof(pubkey));
1309
1310 WG_DUMP_HASH("ckey", ckey);
1311 WG_DUMP_HASH("hash", hash);
1312
1313 /* [N] 2.2: "es" */
1314 /* Ci, k := KDF2(Ci, DH(Ei^priv, Sr^pub)) */
1315 wg_algo_dh_kdf(ckey, cipher_key, privkey, wgp->wgp_pubkey);
1316
1317 /* [N] 2.2: "s" */
1318 /* msg.static := AEAD(k, 0, Si^pub, Hi) */
1319 wg_algo_aead_enc(wgmi->wgmi_static, sizeof(wgmi->wgmi_static),
1320 cipher_key, 0, wg->wg_pubkey, sizeof(wg->wg_pubkey),
1321 hash, sizeof(hash));
1322 /* Hi := HASH(Hi || msg.static) */
1323 wg_algo_hash(hash, wgmi->wgmi_static, sizeof(wgmi->wgmi_static));
1324
1325 WG_DUMP_HASH48("wgmi_static", wgmi->wgmi_static);
1326
1327 /* [N] 2.2: "ss" */
1328 /* Ci, k := KDF2(Ci, DH(Si^priv, Sr^pub)) */
1329 wg_algo_dh_kdf(ckey, cipher_key, wg->wg_privkey, wgp->wgp_pubkey);
1330
1331 /* msg.timestamp := AEAD(k, TIMESTAMP(), Hi) */
1332 wg_timestamp_t timestamp;
1333 wg_algo_tai64n(timestamp);
1334 wg_algo_aead_enc(wgmi->wgmi_timestamp, sizeof(wgmi->wgmi_timestamp),
1335 cipher_key, 0, timestamp, sizeof(timestamp), hash, sizeof(hash));
1336 /* Hi := HASH(Hi || msg.timestamp) */
1337 wg_algo_hash(hash, wgmi->wgmi_timestamp, sizeof(wgmi->wgmi_timestamp));
1338
1339 /* [W] 5.4.4 Cookie MACs */
1340 wg_algo_mac_mac1(wgmi->wgmi_mac1, sizeof(wgmi->wgmi_mac1),
1341 wgp->wgp_pubkey, sizeof(wgp->wgp_pubkey),
1342 (const uint8_t *)wgmi, offsetof(struct wg_msg_init, wgmi_mac1));
1343 /* Need mac1 to decrypt a cookie from a cookie message */
1344 memcpy(wgp->wgp_last_sent_mac1, wgmi->wgmi_mac1,
1345 sizeof(wgp->wgp_last_sent_mac1));
1346 wgp->wgp_last_sent_mac1_valid = true;
1347
1348 if (wgp->wgp_latest_cookie_time == 0 ||
1349 (time_uptime - wgp->wgp_latest_cookie_time) >= WG_COOKIE_TIME)
1350 memset(wgmi->wgmi_mac2, 0, sizeof(wgmi->wgmi_mac2));
1351 else {
1352 wg_algo_mac(wgmi->wgmi_mac2, sizeof(wgmi->wgmi_mac2),
1353 wgp->wgp_latest_cookie, WG_COOKIE_LEN,
1354 (const uint8_t *)wgmi,
1355 offsetof(struct wg_msg_init, wgmi_mac2),
1356 NULL, 0);
1357 }
1358
1359 memcpy(wgs->wgs_ephemeral_key_pub, pubkey, sizeof(pubkey));
1360 memcpy(wgs->wgs_ephemeral_key_priv, privkey, sizeof(privkey));
1361 memcpy(wgs->wgs_handshake_hash, hash, sizeof(hash));
1362 memcpy(wgs->wgs_chaining_key, ckey, sizeof(ckey));
1363 WG_DLOG("%s: sender=%x\n", __func__, wgs->wgs_local_index);
1364 }
1365
1366 static void
1367 wg_handle_msg_init(struct wg_softc *wg, const struct wg_msg_init *wgmi,
1368 const struct sockaddr *src)
1369 {
1370 uint8_t ckey[WG_CHAINING_KEY_LEN]; /* [W] 5.4.2: Ci */
1371 uint8_t hash[WG_HASH_LEN]; /* [W] 5.4.2: Hi */
1372 uint8_t cipher_key[WG_CIPHER_KEY_LEN];
1373 uint8_t peer_pubkey[WG_STATIC_KEY_LEN];
1374 struct wg_peer *wgp;
1375 struct wg_session *wgs;
1376 int error, ret;
1377 struct psref psref_peer;
1378 uint8_t mac1[WG_MAC_LEN];
1379
1380 WG_TRACE("init msg received");
1381
1382 wg_algo_mac_mac1(mac1, sizeof(mac1),
1383 wg->wg_pubkey, sizeof(wg->wg_pubkey),
1384 (const uint8_t *)wgmi, offsetof(struct wg_msg_init, wgmi_mac1));
1385
1386 /*
1387 * [W] 5.3: Denial of Service Mitigation & Cookies
1388 * "the responder, ..., must always reject messages with an invalid
1389 * msg.mac1"
1390 */
1391 if (!consttime_memequal(mac1, wgmi->wgmi_mac1, sizeof(mac1))) {
1392 WG_DLOG("mac1 is invalid\n");
1393 return;
1394 }
1395
1396 /*
1397 * [W] 5.4.2: First Message: Initiator to Responder
1398 * "When the responder receives this message, it does the same
1399 * operations so that its final state variables are identical,
1400 * replacing the operands of the DH function to produce equivalent
1401 * values."
1402 * Note that the following comments of operations are just copies of
1403 * the initiator's ones.
1404 */
1405
1406 /* Ci := HASH(CONSTRUCTION) */
1407 /* Hi := HASH(Ci || IDENTIFIER) */
1408 wg_init_key_and_hash(ckey, hash);
1409 /* Hi := HASH(Hi || Sr^pub) */
1410 wg_algo_hash(hash, wg->wg_pubkey, sizeof(wg->wg_pubkey));
1411
1412 /* [N] 2.2: "e" */
1413 /* Ci := KDF1(Ci, Ei^pub) */
1414 wg_algo_kdf(ckey, NULL, NULL, ckey, wgmi->wgmi_ephemeral,
1415 sizeof(wgmi->wgmi_ephemeral));
1416 /* Hi := HASH(Hi || msg.ephemeral) */
1417 wg_algo_hash(hash, wgmi->wgmi_ephemeral, sizeof(wgmi->wgmi_ephemeral));
1418
1419 WG_DUMP_HASH("ckey", ckey);
1420
1421 /* [N] 2.2: "es" */
1422 /* Ci, k := KDF2(Ci, DH(Ei^priv, Sr^pub)) */
1423 wg_algo_dh_kdf(ckey, cipher_key, wg->wg_privkey, wgmi->wgmi_ephemeral);
1424
1425 WG_DUMP_HASH48("wgmi_static", wgmi->wgmi_static);
1426
1427 /* [N] 2.2: "s" */
1428 /* msg.static := AEAD(k, 0, Si^pub, Hi) */
1429 error = wg_algo_aead_dec(peer_pubkey, WG_STATIC_KEY_LEN, cipher_key, 0,
1430 wgmi->wgmi_static, sizeof(wgmi->wgmi_static), hash, sizeof(hash));
1431 if (error != 0) {
1432 WG_LOG_RATECHECK(&wg->wg_ppsratecheck, LOG_DEBUG,
1433 "wg_algo_aead_dec for secret key failed\n");
1434 return;
1435 }
1436 /* Hi := HASH(Hi || msg.static) */
1437 wg_algo_hash(hash, wgmi->wgmi_static, sizeof(wgmi->wgmi_static));
1438
1439 wgp = wg_lookup_peer_by_pubkey(wg, peer_pubkey, &psref_peer);
1440 if (wgp == NULL) {
1441 WG_DLOG("peer not found\n");
1442 return;
1443 }
1444
1445 /*
1446 * Lock the peer to serialize access to cookie state.
1447 *
1448 * XXX Can we safely avoid holding the lock across DH? Take it
1449 * just to verify mac2 and then unlock/DH/lock?
1450 */
1451 mutex_enter(wgp->wgp_lock);
1452
1453 if (__predict_false(wg_is_underload(wg, wgp, WG_MSG_TYPE_INIT))) {
1454 WG_TRACE("under load");
1455 /*
1456 * [W] 5.3: Denial of Service Mitigation & Cookies
1457 * "the responder, ..., and when under load may reject messages
1458 * with an invalid msg.mac2. If the responder receives a
1459 * message with a valid msg.mac1 yet with an invalid msg.mac2,
1460 * and is under load, it may respond with a cookie reply
1461 * message"
1462 */
1463 uint8_t zero[WG_MAC_LEN] = {0};
1464 if (consttime_memequal(wgmi->wgmi_mac2, zero, sizeof(zero))) {
1465 WG_TRACE("sending a cookie message: no cookie included");
1466 (void)wg_send_cookie_msg(wg, wgp, wgmi->wgmi_sender,
1467 wgmi->wgmi_mac1, src);
1468 goto out;
1469 }
1470 if (!wgp->wgp_last_sent_cookie_valid) {
1471 WG_TRACE("sending a cookie message: no cookie sent ever");
1472 (void)wg_send_cookie_msg(wg, wgp, wgmi->wgmi_sender,
1473 wgmi->wgmi_mac1, src);
1474 goto out;
1475 }
1476 uint8_t mac2[WG_MAC_LEN];
1477 wg_algo_mac(mac2, sizeof(mac2), wgp->wgp_last_sent_cookie,
1478 WG_COOKIE_LEN, (const uint8_t *)wgmi,
1479 offsetof(struct wg_msg_init, wgmi_mac2), NULL, 0);
1480 if (!consttime_memequal(mac2, wgmi->wgmi_mac2, sizeof(mac2))) {
1481 WG_DLOG("mac2 is invalid\n");
1482 goto out;
1483 }
1484 WG_TRACE("under load, but continue to sending");
1485 }
1486
1487 /* [N] 2.2: "ss" */
1488 /* Ci, k := KDF2(Ci, DH(Si^priv, Sr^pub)) */
1489 wg_algo_dh_kdf(ckey, cipher_key, wg->wg_privkey, wgp->wgp_pubkey);
1490
1491 /* msg.timestamp := AEAD(k, TIMESTAMP(), Hi) */
1492 wg_timestamp_t timestamp;
1493 error = wg_algo_aead_dec(timestamp, sizeof(timestamp), cipher_key, 0,
1494 wgmi->wgmi_timestamp, sizeof(wgmi->wgmi_timestamp),
1495 hash, sizeof(hash));
1496 if (error != 0) {
1497 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
1498 "wg_algo_aead_dec for timestamp failed\n");
1499 goto out;
1500 }
1501 /* Hi := HASH(Hi || msg.timestamp) */
1502 wg_algo_hash(hash, wgmi->wgmi_timestamp, sizeof(wgmi->wgmi_timestamp));
1503
1504 /*
1505 * [W] 5.1 "The responder keeps track of the greatest timestamp
1506 * received per peer and discards packets containing
1507 * timestamps less than or equal to it."
1508 */
1509 ret = memcmp(timestamp, wgp->wgp_timestamp_latest_init,
1510 sizeof(timestamp));
1511 if (ret <= 0) {
1512 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
1513 "invalid init msg: timestamp is old\n");
1514 goto out;
1515 }
1516 memcpy(wgp->wgp_timestamp_latest_init, timestamp, sizeof(timestamp));
1517
1518 /*
1519 * Message is good -- we're committing to handle it now, unless
1520 * we were already initiating a session.
1521 */
1522 wgs = wgp->wgp_session_unstable;
1523 switch (wgs->wgs_state) {
1524 case WGS_STATE_UNKNOWN: /* new session initiated by peer */
1525 wg_get_session_index(wg, wgs);
1526 break;
1527 case WGS_STATE_INIT_ACTIVE: /* we're already initiating, drop */
1528 WG_TRACE("Session already initializing, ignoring the message");
1529 goto out;
1530 case WGS_STATE_INIT_PASSIVE: /* peer is retrying, start over */
1531 WG_TRACE("Session already initializing, destroying old states");
1532 wg_clear_states(wgs);
1533 /* keep session index */
1534 break;
1535 case WGS_STATE_ESTABLISHED: /* can't happen */
1536 panic("unstable session can't be established");
1537 break;
1538 case WGS_STATE_DESTROYING: /* rekey initiated by peer */
1539 WG_TRACE("Session destroying, but force to clear");
1540 callout_stop(&wgp->wgp_session_dtor_timer);
1541 wg_clear_states(wgs);
1542 /* keep session index */
1543 break;
1544 default:
1545 panic("invalid session state: %d", wgs->wgs_state);
1546 }
1547 wgs->wgs_state = WGS_STATE_INIT_PASSIVE;
1548
1549 memcpy(wgs->wgs_handshake_hash, hash, sizeof(hash));
1550 memcpy(wgs->wgs_chaining_key, ckey, sizeof(ckey));
1551 memcpy(wgs->wgs_ephemeral_key_peer, wgmi->wgmi_ephemeral,
1552 sizeof(wgmi->wgmi_ephemeral));
1553
1554 wg_update_endpoint_if_necessary(wgp, src);
1555
1556 (void)wg_send_handshake_msg_resp(wg, wgp, wgs, wgmi);
1557
1558 wg_calculate_keys(wgs, false);
1559 wg_clear_states(wgs);
1560
1561 out:
1562 mutex_exit(wgp->wgp_lock);
1563 wg_put_peer(wgp, &psref_peer);
1564 }
1565
1566 static struct socket *
1567 wg_get_so_by_af(struct wg_softc *wg, const int af)
1568 {
1569
1570 return (af == AF_INET) ? wg->wg_so4 : wg->wg_so6;
1571 }
1572
1573 static struct socket *
1574 wg_get_so_by_peer(struct wg_peer *wgp, struct wg_sockaddr *wgsa)
1575 {
1576
1577 return wg_get_so_by_af(wgp->wgp_sc, wgsa_family(wgsa));
1578 }
1579
1580 static struct wg_sockaddr *
1581 wg_get_endpoint_sa(struct wg_peer *wgp, struct psref *psref)
1582 {
1583 struct wg_sockaddr *wgsa;
1584 int s;
1585
1586 s = pserialize_read_enter();
1587 wgsa = atomic_load_consume(&wgp->wgp_endpoint);
1588 psref_acquire(psref, &wgsa->wgsa_psref, wg_psref_class);
1589 pserialize_read_exit(s);
1590
1591 return wgsa;
1592 }
1593
1594 static void
1595 wg_put_sa(struct wg_peer *wgp, struct wg_sockaddr *wgsa, struct psref *psref)
1596 {
1597
1598 psref_release(psref, &wgsa->wgsa_psref, wg_psref_class);
1599 }
1600
1601 static int
1602 wg_send_so(struct wg_peer *wgp, struct mbuf *m)
1603 {
1604 int error;
1605 struct socket *so;
1606 struct psref psref;
1607 struct wg_sockaddr *wgsa;
1608
1609 wgsa = wg_get_endpoint_sa(wgp, &psref);
1610 so = wg_get_so_by_peer(wgp, wgsa);
1611 error = sosend(so, wgsatosa(wgsa), NULL, m, NULL, 0, curlwp);
1612 wg_put_sa(wgp, wgsa, &psref);
1613
1614 return error;
1615 }
1616
1617 static int
1618 wg_send_handshake_msg_init(struct wg_softc *wg, struct wg_peer *wgp)
1619 {
1620 int error;
1621 struct mbuf *m;
1622 struct wg_msg_init *wgmi;
1623 struct wg_session *wgs;
1624
1625 KASSERT(mutex_owned(wgp->wgp_lock));
1626
1627 wgs = wgp->wgp_session_unstable;
1628 /* XXX pull dispatch out into wg_task_send_init_message */
1629 switch (wgs->wgs_state) {
1630 case WGS_STATE_UNKNOWN: /* new session initiated by us */
1631 wg_get_session_index(wg, wgs);
1632 break;
1633 case WGS_STATE_INIT_ACTIVE: /* we're already initiating, stop */
1634 WG_TRACE("Session already initializing, skip starting new one");
1635 return EBUSY;
1636 case WGS_STATE_INIT_PASSIVE: /* peer was trying -- XXX what now? */
1637 WG_TRACE("Session already initializing, destroying old states");
1638 wg_clear_states(wgs);
1639 /* keep session index */
1640 break;
1641 case WGS_STATE_ESTABLISHED: /* can't happen */
1642 panic("unstable session can't be established");
1643 break;
1644 case WGS_STATE_DESTROYING: /* rekey initiated by us too early */
1645 WG_TRACE("Session destroying");
1646 /* XXX should wait? */
1647 return EBUSY;
1648 }
1649 wgs->wgs_state = WGS_STATE_INIT_ACTIVE;
1650
1651 m = m_gethdr(M_WAIT, MT_DATA);
1652 m->m_pkthdr.len = m->m_len = sizeof(*wgmi);
1653 wgmi = mtod(m, struct wg_msg_init *);
1654 wg_fill_msg_init(wg, wgp, wgs, wgmi);
1655
1656 error = wg->wg_ops->send_hs_msg(wgp, m);
1657 if (error == 0) {
1658 WG_TRACE("init msg sent");
1659
1660 if (wgp->wgp_handshake_start_time == 0)
1661 wgp->wgp_handshake_start_time = time_uptime;
1662 callout_schedule(&wgp->wgp_handshake_timeout_timer,
1663 MIN(wg_rekey_timeout, INT_MAX/hz) * hz);
1664 } else {
1665 wg_put_session_index(wg, wgs);
1666 /* Initiation failed; toss packet waiting for it if any. */
1667 if ((m = atomic_swap_ptr(&wgp->wgp_pending, NULL)) != NULL)
1668 m_freem(m);
1669 }
1670
1671 return error;
1672 }
1673
1674 static void
1675 wg_fill_msg_resp(struct wg_softc *wg, struct wg_peer *wgp,
1676 struct wg_session *wgs, struct wg_msg_resp *wgmr,
1677 const struct wg_msg_init *wgmi)
1678 {
1679 uint8_t ckey[WG_CHAINING_KEY_LEN]; /* [W] 5.4.3: Cr */
1680 uint8_t hash[WG_HASH_LEN]; /* [W] 5.4.3: Hr */
1681 uint8_t cipher_key[WG_KDF_OUTPUT_LEN];
1682 uint8_t pubkey[WG_EPHEMERAL_KEY_LEN];
1683 uint8_t privkey[WG_EPHEMERAL_KEY_LEN];
1684
1685 KASSERT(mutex_owned(wgp->wgp_lock));
1686 KASSERT(wgs == wgp->wgp_session_unstable);
1687 KASSERT(wgs->wgs_state == WGS_STATE_INIT_PASSIVE);
1688
1689 memcpy(hash, wgs->wgs_handshake_hash, sizeof(hash));
1690 memcpy(ckey, wgs->wgs_chaining_key, sizeof(ckey));
1691
1692 wgmr->wgmr_type = htole32(WG_MSG_TYPE_RESP);
1693 wgmr->wgmr_sender = wgs->wgs_local_index;
1694 wgmr->wgmr_receiver = wgmi->wgmi_sender;
1695
1696 /* [W] 5.4.3 Second Message: Responder to Initiator */
1697
1698 /* [N] 2.2: "e" */
1699 /* Er^priv, Er^pub := DH-GENERATE() */
1700 wg_algo_generate_keypair(pubkey, privkey);
1701 /* Cr := KDF1(Cr, Er^pub) */
1702 wg_algo_kdf(ckey, NULL, NULL, ckey, pubkey, sizeof(pubkey));
1703 /* msg.ephemeral := Er^pub */
1704 memcpy(wgmr->wgmr_ephemeral, pubkey, sizeof(wgmr->wgmr_ephemeral));
1705 /* Hr := HASH(Hr || msg.ephemeral) */
1706 wg_algo_hash(hash, pubkey, sizeof(pubkey));
1707
1708 WG_DUMP_HASH("ckey", ckey);
1709 WG_DUMP_HASH("hash", hash);
1710
1711 /* [N] 2.2: "ee" */
1712 /* Cr := KDF1(Cr, DH(Er^priv, Ei^pub)) */
1713 wg_algo_dh_kdf(ckey, NULL, privkey, wgs->wgs_ephemeral_key_peer);
1714
1715 /* [N] 2.2: "se" */
1716 /* Cr := KDF1(Cr, DH(Er^priv, Si^pub)) */
1717 wg_algo_dh_kdf(ckey, NULL, privkey, wgp->wgp_pubkey);
1718
1719 /* [N] 9.2: "psk" */
1720 {
1721 uint8_t kdfout[WG_KDF_OUTPUT_LEN];
1722 /* Cr, r, k := KDF3(Cr, Q) */
1723 wg_algo_kdf(ckey, kdfout, cipher_key, ckey, wgp->wgp_psk,
1724 sizeof(wgp->wgp_psk));
1725 /* Hr := HASH(Hr || r) */
1726 wg_algo_hash(hash, kdfout, sizeof(kdfout));
1727 }
1728
1729 /* msg.empty := AEAD(k, 0, e, Hr) */
1730 wg_algo_aead_enc(wgmr->wgmr_empty, sizeof(wgmr->wgmr_empty),
1731 cipher_key, 0, NULL, 0, hash, sizeof(hash));
1732 /* Hr := HASH(Hr || msg.empty) */
1733 wg_algo_hash(hash, wgmr->wgmr_empty, sizeof(wgmr->wgmr_empty));
1734
1735 WG_DUMP_HASH("wgmr_empty", wgmr->wgmr_empty);
1736
1737 /* [W] 5.4.4: Cookie MACs */
1738 /* msg.mac1 := MAC(HASH(LABEL-MAC1 || Sm'^pub), msg_a) */
1739 wg_algo_mac_mac1(wgmr->wgmr_mac1, sizeof(wgmi->wgmi_mac1),
1740 wgp->wgp_pubkey, sizeof(wgp->wgp_pubkey),
1741 (const uint8_t *)wgmr, offsetof(struct wg_msg_resp, wgmr_mac1));
1742 /* Need mac1 to decrypt a cookie from a cookie message */
1743 memcpy(wgp->wgp_last_sent_mac1, wgmr->wgmr_mac1,
1744 sizeof(wgp->wgp_last_sent_mac1));
1745 wgp->wgp_last_sent_mac1_valid = true;
1746
1747 if (wgp->wgp_latest_cookie_time == 0 ||
1748 (time_uptime - wgp->wgp_latest_cookie_time) >= WG_COOKIE_TIME)
1749 /* msg.mac2 := 0^16 */
1750 memset(wgmr->wgmr_mac2, 0, sizeof(wgmr->wgmr_mac2));
1751 else {
1752 /* msg.mac2 := MAC(Lm, msg_b) */
1753 wg_algo_mac(wgmr->wgmr_mac2, sizeof(wgmi->wgmi_mac2),
1754 wgp->wgp_latest_cookie, WG_COOKIE_LEN,
1755 (const uint8_t *)wgmr,
1756 offsetof(struct wg_msg_resp, wgmr_mac2),
1757 NULL, 0);
1758 }
1759
1760 memcpy(wgs->wgs_handshake_hash, hash, sizeof(hash));
1761 memcpy(wgs->wgs_chaining_key, ckey, sizeof(ckey));
1762 memcpy(wgs->wgs_ephemeral_key_pub, pubkey, sizeof(pubkey));
1763 memcpy(wgs->wgs_ephemeral_key_priv, privkey, sizeof(privkey));
1764 wgs->wgs_remote_index = wgmi->wgmi_sender;
1765 WG_DLOG("sender=%x\n", wgs->wgs_local_index);
1766 WG_DLOG("receiver=%x\n", wgs->wgs_remote_index);
1767 }
1768
1769 static void
1770 wg_swap_sessions(struct wg_peer *wgp)
1771 {
1772 struct wg_session *wgs, *wgs_prev;
1773
1774 KASSERT(mutex_owned(wgp->wgp_lock));
1775
1776 wgs = wgp->wgp_session_unstable;
1777 KASSERT(wgs->wgs_state == WGS_STATE_ESTABLISHED);
1778
1779 wgs_prev = wgp->wgp_session_stable;
1780 KASSERT(wgs_prev->wgs_state == WGS_STATE_ESTABLISHED ||
1781 wgs_prev->wgs_state == WGS_STATE_UNKNOWN);
1782 atomic_store_release(&wgp->wgp_session_stable, wgs);
1783 wgp->wgp_session_unstable = wgs_prev;
1784 }
1785
1786 static void
1787 wg_handle_msg_resp(struct wg_softc *wg, const struct wg_msg_resp *wgmr,
1788 const struct sockaddr *src)
1789 {
1790 uint8_t ckey[WG_CHAINING_KEY_LEN]; /* [W] 5.4.3: Cr */
1791 uint8_t hash[WG_HASH_LEN]; /* [W] 5.4.3: Kr */
1792 uint8_t cipher_key[WG_KDF_OUTPUT_LEN];
1793 struct wg_peer *wgp;
1794 struct wg_session *wgs;
1795 struct psref psref;
1796 int error;
1797 uint8_t mac1[WG_MAC_LEN];
1798 struct wg_session *wgs_prev;
1799 struct mbuf *m;
1800
1801 wg_algo_mac_mac1(mac1, sizeof(mac1),
1802 wg->wg_pubkey, sizeof(wg->wg_pubkey),
1803 (const uint8_t *)wgmr, offsetof(struct wg_msg_resp, wgmr_mac1));
1804
1805 /*
1806 * [W] 5.3: Denial of Service Mitigation & Cookies
1807 * "the responder, ..., must always reject messages with an invalid
1808 * msg.mac1"
1809 */
1810 if (!consttime_memequal(mac1, wgmr->wgmr_mac1, sizeof(mac1))) {
1811 WG_DLOG("mac1 is invalid\n");
1812 return;
1813 }
1814
1815 WG_TRACE("resp msg received");
1816 wgs = wg_lookup_session_by_index(wg, wgmr->wgmr_receiver, &psref);
1817 if (wgs == NULL) {
1818 WG_TRACE("No session found");
1819 return;
1820 }
1821
1822 wgp = wgs->wgs_peer;
1823
1824 mutex_enter(wgp->wgp_lock);
1825
1826 /* If we weren't waiting for a handshake response, drop it. */
1827 if (wgs->wgs_state != WGS_STATE_INIT_ACTIVE) {
1828 WG_TRACE("peer sent spurious handshake response, ignoring");
1829 goto out;
1830 }
1831
1832 if (__predict_false(wg_is_underload(wg, wgp, WG_MSG_TYPE_RESP))) {
1833 WG_TRACE("under load");
1834 /*
1835 * [W] 5.3: Denial of Service Mitigation & Cookies
1836 * "the responder, ..., and when under load may reject messages
1837 * with an invalid msg.mac2. If the responder receives a
1838 * message with a valid msg.mac1 yet with an invalid msg.mac2,
1839 * and is under load, it may respond with a cookie reply
1840 * message"
1841 */
1842 uint8_t zero[WG_MAC_LEN] = {0};
1843 if (consttime_memequal(wgmr->wgmr_mac2, zero, sizeof(zero))) {
1844 WG_TRACE("sending a cookie message: no cookie included");
1845 (void)wg_send_cookie_msg(wg, wgp, wgmr->wgmr_sender,
1846 wgmr->wgmr_mac1, src);
1847 goto out;
1848 }
1849 if (!wgp->wgp_last_sent_cookie_valid) {
1850 WG_TRACE("sending a cookie message: no cookie sent ever");
1851 (void)wg_send_cookie_msg(wg, wgp, wgmr->wgmr_sender,
1852 wgmr->wgmr_mac1, src);
1853 goto out;
1854 }
1855 uint8_t mac2[WG_MAC_LEN];
1856 wg_algo_mac(mac2, sizeof(mac2), wgp->wgp_last_sent_cookie,
1857 WG_COOKIE_LEN, (const uint8_t *)wgmr,
1858 offsetof(struct wg_msg_resp, wgmr_mac2), NULL, 0);
1859 if (!consttime_memequal(mac2, wgmr->wgmr_mac2, sizeof(mac2))) {
1860 WG_DLOG("mac2 is invalid\n");
1861 goto out;
1862 }
1863 WG_TRACE("under load, but continue to sending");
1864 }
1865
1866 memcpy(hash, wgs->wgs_handshake_hash, sizeof(hash));
1867 memcpy(ckey, wgs->wgs_chaining_key, sizeof(ckey));
1868
1869 /*
1870 * [W] 5.4.3 Second Message: Responder to Initiator
1871 * "When the initiator receives this message, it does the same
1872 * operations so that its final state variables are identical,
1873 * replacing the operands of the DH function to produce equivalent
1874 * values."
1875 * Note that the following comments of operations are just copies of
1876 * the initiator's ones.
1877 */
1878
1879 /* [N] 2.2: "e" */
1880 /* Cr := KDF1(Cr, Er^pub) */
1881 wg_algo_kdf(ckey, NULL, NULL, ckey, wgmr->wgmr_ephemeral,
1882 sizeof(wgmr->wgmr_ephemeral));
1883 /* Hr := HASH(Hr || msg.ephemeral) */
1884 wg_algo_hash(hash, wgmr->wgmr_ephemeral, sizeof(wgmr->wgmr_ephemeral));
1885
1886 WG_DUMP_HASH("ckey", ckey);
1887 WG_DUMP_HASH("hash", hash);
1888
1889 /* [N] 2.2: "ee" */
1890 /* Cr := KDF1(Cr, DH(Er^priv, Ei^pub)) */
1891 wg_algo_dh_kdf(ckey, NULL, wgs->wgs_ephemeral_key_priv,
1892 wgmr->wgmr_ephemeral);
1893
1894 /* [N] 2.2: "se" */
1895 /* Cr := KDF1(Cr, DH(Er^priv, Si^pub)) */
1896 wg_algo_dh_kdf(ckey, NULL, wg->wg_privkey, wgmr->wgmr_ephemeral);
1897
1898 /* [N] 9.2: "psk" */
1899 {
1900 uint8_t kdfout[WG_KDF_OUTPUT_LEN];
1901 /* Cr, r, k := KDF3(Cr, Q) */
1902 wg_algo_kdf(ckey, kdfout, cipher_key, ckey, wgp->wgp_psk,
1903 sizeof(wgp->wgp_psk));
1904 /* Hr := HASH(Hr || r) */
1905 wg_algo_hash(hash, kdfout, sizeof(kdfout));
1906 }
1907
1908 {
1909 uint8_t out[sizeof(wgmr->wgmr_empty)]; /* for safety */
1910 /* msg.empty := AEAD(k, 0, e, Hr) */
1911 error = wg_algo_aead_dec(out, 0, cipher_key, 0, wgmr->wgmr_empty,
1912 sizeof(wgmr->wgmr_empty), hash, sizeof(hash));
1913 WG_DUMP_HASH("wgmr_empty", wgmr->wgmr_empty);
1914 if (error != 0) {
1915 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
1916 "wg_algo_aead_dec for empty message failed\n");
1917 goto out;
1918 }
1919 /* Hr := HASH(Hr || msg.empty) */
1920 wg_algo_hash(hash, wgmr->wgmr_empty, sizeof(wgmr->wgmr_empty));
1921 }
1922
1923 memcpy(wgs->wgs_handshake_hash, hash, sizeof(wgs->wgs_handshake_hash));
1924 memcpy(wgs->wgs_chaining_key, ckey, sizeof(wgs->wgs_chaining_key));
1925 wgs->wgs_remote_index = wgmr->wgmr_sender;
1926 WG_DLOG("receiver=%x\n", wgs->wgs_remote_index);
1927
1928 KASSERT(wgs->wgs_state == WGS_STATE_INIT_ACTIVE);
1929 wgs->wgs_state = WGS_STATE_ESTABLISHED;
1930 wgs->wgs_time_established = time_uptime;
1931 wgs->wgs_time_last_data_sent = 0;
1932 wgs->wgs_is_initiator = true;
1933 wg_calculate_keys(wgs, true);
1934 wg_clear_states(wgs);
1935 WG_TRACE("WGS_STATE_ESTABLISHED");
1936
1937 callout_stop(&wgp->wgp_handshake_timeout_timer);
1938
1939 wg_swap_sessions(wgp);
1940 KASSERT(wgs == wgp->wgp_session_stable);
1941 wgs_prev = wgp->wgp_session_unstable;
1942 getnanotime(&wgp->wgp_last_handshake_time);
1943 wgp->wgp_handshake_start_time = 0;
1944 wgp->wgp_last_sent_mac1_valid = false;
1945 wgp->wgp_last_sent_cookie_valid = false;
1946
1947 wg_schedule_rekey_timer(wgp);
1948
1949 wg_update_endpoint_if_necessary(wgp, src);
1950
1951 /*
1952 * If we had a data packet queued up, send it; otherwise send a
1953 * keepalive message -- either way we have to send something
1954 * immediately or else the responder will never answer.
1955 */
1956 if ((m = atomic_swap_ptr(&wgp->wgp_pending, NULL)) != NULL) {
1957 const uint32_t h = curcpu()->ci_index; // pktq_rps_hash(m)
1958
1959 M_SETCTX(m, wgp);
1960 if (__predict_false(!pktq_enqueue(wg_pktq, m, h))) {
1961 WGLOG(LOG_ERR, "pktq full, dropping\n");
1962 m_freem(m);
1963 }
1964 } else {
1965 wg_send_keepalive_msg(wgp, wgs);
1966 }
1967
1968 if (wgs_prev->wgs_state == WGS_STATE_ESTABLISHED) {
1969 /* Wait for wg_get_stable_session to drain. */
1970 pserialize_perform(wgp->wgp_psz);
1971
1972 /* Transition ESTABLISHED->DESTROYING. */
1973 wgs_prev->wgs_state = WGS_STATE_DESTROYING;
1974
1975 /* We can't destroy the old session immediately */
1976 wg_schedule_session_dtor_timer(wgp);
1977 } else {
1978 KASSERTMSG(wgs_prev->wgs_state == WGS_STATE_UNKNOWN,
1979 "state=%d", wgs_prev->wgs_state);
1980 }
1981
1982 out:
1983 mutex_exit(wgp->wgp_lock);
1984 wg_put_session(wgs, &psref);
1985 }
1986
1987 static int
1988 wg_send_handshake_msg_resp(struct wg_softc *wg, struct wg_peer *wgp,
1989 struct wg_session *wgs, const struct wg_msg_init *wgmi)
1990 {
1991 int error;
1992 struct mbuf *m;
1993 struct wg_msg_resp *wgmr;
1994
1995 KASSERT(mutex_owned(wgp->wgp_lock));
1996 KASSERT(wgs == wgp->wgp_session_unstable);
1997 KASSERT(wgs->wgs_state == WGS_STATE_INIT_PASSIVE);
1998
1999 m = m_gethdr(M_WAIT, MT_DATA);
2000 m->m_pkthdr.len = m->m_len = sizeof(*wgmr);
2001 wgmr = mtod(m, struct wg_msg_resp *);
2002 wg_fill_msg_resp(wg, wgp, wgs, wgmr, wgmi);
2003
2004 error = wg->wg_ops->send_hs_msg(wgp, m);
2005 if (error == 0)
2006 WG_TRACE("resp msg sent");
2007 return error;
2008 }
2009
2010 static struct wg_peer *
2011 wg_lookup_peer_by_pubkey(struct wg_softc *wg,
2012 const uint8_t pubkey[WG_STATIC_KEY_LEN], struct psref *psref)
2013 {
2014 struct wg_peer *wgp;
2015
2016 int s = pserialize_read_enter();
2017 wgp = thmap_get(wg->wg_peers_bypubkey, pubkey, WG_STATIC_KEY_LEN);
2018 if (wgp != NULL)
2019 wg_get_peer(wgp, psref);
2020 pserialize_read_exit(s);
2021
2022 return wgp;
2023 }
2024
2025 static void
2026 wg_fill_msg_cookie(struct wg_softc *wg, struct wg_peer *wgp,
2027 struct wg_msg_cookie *wgmc, const uint32_t sender,
2028 const uint8_t mac1[WG_MAC_LEN], const struct sockaddr *src)
2029 {
2030 uint8_t cookie[WG_COOKIE_LEN];
2031 uint8_t key[WG_HASH_LEN];
2032 uint8_t addr[sizeof(struct in6_addr)];
2033 size_t addrlen;
2034 uint16_t uh_sport; /* be */
2035
2036 KASSERT(mutex_owned(wgp->wgp_lock));
2037
2038 wgmc->wgmc_type = htole32(WG_MSG_TYPE_COOKIE);
2039 wgmc->wgmc_receiver = sender;
2040 cprng_fast(wgmc->wgmc_salt, sizeof(wgmc->wgmc_salt));
2041
2042 /*
2043 * [W] 5.4.7: Under Load: Cookie Reply Message
2044 * "The secret variable, Rm, changes every two minutes to a
2045 * random value"
2046 */
2047 if ((time_uptime - wgp->wgp_last_genrandval_time) > WG_RANDVAL_TIME) {
2048 wgp->wgp_randval = cprng_strong32();
2049 wgp->wgp_last_genrandval_time = time_uptime;
2050 }
2051
2052 switch (src->sa_family) {
2053 case AF_INET: {
2054 const struct sockaddr_in *sin = satocsin(src);
2055 addrlen = sizeof(sin->sin_addr);
2056 memcpy(addr, &sin->sin_addr, addrlen);
2057 uh_sport = sin->sin_port;
2058 break;
2059 }
2060 #ifdef INET6
2061 case AF_INET6: {
2062 const struct sockaddr_in6 *sin6 = satocsin6(src);
2063 addrlen = sizeof(sin6->sin6_addr);
2064 memcpy(addr, &sin6->sin6_addr, addrlen);
2065 uh_sport = sin6->sin6_port;
2066 break;
2067 }
2068 #endif
2069 default:
2070 panic("invalid af=%d", src->sa_family);
2071 }
2072
2073 wg_algo_mac(cookie, sizeof(cookie),
2074 (const uint8_t *)&wgp->wgp_randval, sizeof(wgp->wgp_randval),
2075 addr, addrlen, (const uint8_t *)&uh_sport, sizeof(uh_sport));
2076 wg_algo_mac_cookie(key, sizeof(key), wg->wg_pubkey,
2077 sizeof(wg->wg_pubkey));
2078 wg_algo_xaead_enc(wgmc->wgmc_cookie, sizeof(wgmc->wgmc_cookie), key,
2079 cookie, sizeof(cookie), mac1, WG_MAC_LEN, wgmc->wgmc_salt);
2080
2081 /* Need to store to calculate mac2 */
2082 memcpy(wgp->wgp_last_sent_cookie, cookie, sizeof(cookie));
2083 wgp->wgp_last_sent_cookie_valid = true;
2084 }
2085
2086 static int
2087 wg_send_cookie_msg(struct wg_softc *wg, struct wg_peer *wgp,
2088 const uint32_t sender, const uint8_t mac1[WG_MAC_LEN],
2089 const struct sockaddr *src)
2090 {
2091 int error;
2092 struct mbuf *m;
2093 struct wg_msg_cookie *wgmc;
2094
2095 KASSERT(mutex_owned(wgp->wgp_lock));
2096
2097 m = m_gethdr(M_WAIT, MT_DATA);
2098 m->m_pkthdr.len = m->m_len = sizeof(*wgmc);
2099 wgmc = mtod(m, struct wg_msg_cookie *);
2100 wg_fill_msg_cookie(wg, wgp, wgmc, sender, mac1, src);
2101
2102 error = wg->wg_ops->send_hs_msg(wgp, m);
2103 if (error == 0)
2104 WG_TRACE("cookie msg sent");
2105 return error;
2106 }
2107
2108 static bool
2109 wg_is_underload(struct wg_softc *wg, struct wg_peer *wgp, int msgtype)
2110 {
2111 #ifdef WG_DEBUG_PARAMS
2112 if (wg_force_underload)
2113 return true;
2114 #endif
2115
2116 /*
2117 * XXX we don't have a means of a load estimation. The purpose of
2118 * the mechanism is a DoS mitigation, so we consider frequent handshake
2119 * messages as (a kind of) load; if a message of the same type comes
2120 * to a peer within 1 second, we consider we are under load.
2121 */
2122 time_t last = wgp->wgp_last_msg_received_time[msgtype];
2123 wgp->wgp_last_msg_received_time[msgtype] = time_uptime;
2124 return (time_uptime - last) == 0;
2125 }
2126
2127 static void
2128 wg_calculate_keys(struct wg_session *wgs, const bool initiator)
2129 {
2130
2131 KASSERT(mutex_owned(wgs->wgs_peer->wgp_lock));
2132
2133 /*
2134 * [W] 5.4.5: Ti^send = Tr^recv, Ti^recv = Tr^send := KDF2(Ci = Cr, e)
2135 */
2136 if (initiator) {
2137 wg_algo_kdf(wgs->wgs_tkey_send, wgs->wgs_tkey_recv, NULL,
2138 wgs->wgs_chaining_key, NULL, 0);
2139 } else {
2140 wg_algo_kdf(wgs->wgs_tkey_recv, wgs->wgs_tkey_send, NULL,
2141 wgs->wgs_chaining_key, NULL, 0);
2142 }
2143 WG_DUMP_HASH("wgs_tkey_send", wgs->wgs_tkey_send);
2144 WG_DUMP_HASH("wgs_tkey_recv", wgs->wgs_tkey_recv);
2145 }
2146
2147 static uint64_t
2148 wg_session_get_send_counter(struct wg_session *wgs)
2149 {
2150 #ifdef __HAVE_ATOMIC64_LOADSTORE
2151 return atomic_load_relaxed(&wgs->wgs_send_counter);
2152 #else
2153 uint64_t send_counter;
2154
2155 mutex_enter(&wgs->wgs_send_counter_lock);
2156 send_counter = wgs->wgs_send_counter;
2157 mutex_exit(&wgs->wgs_send_counter_lock);
2158
2159 return send_counter;
2160 #endif
2161 }
2162
2163 static uint64_t
2164 wg_session_inc_send_counter(struct wg_session *wgs)
2165 {
2166 #ifdef __HAVE_ATOMIC64_LOADSTORE
2167 return atomic_inc_64_nv(&wgs->wgs_send_counter) - 1;
2168 #else
2169 uint64_t send_counter;
2170
2171 mutex_enter(&wgs->wgs_send_counter_lock);
2172 send_counter = wgs->wgs_send_counter++;
2173 mutex_exit(&wgs->wgs_send_counter_lock);
2174
2175 return send_counter;
2176 #endif
2177 }
2178
2179 static void
2180 wg_clear_states(struct wg_session *wgs)
2181 {
2182
2183 KASSERT(mutex_owned(wgs->wgs_peer->wgp_lock));
2184
2185 wgs->wgs_send_counter = 0;
2186 sliwin_reset(&wgs->wgs_recvwin->window);
2187
2188 #define wgs_clear(v) explicit_memset(wgs->wgs_##v, 0, sizeof(wgs->wgs_##v))
2189 wgs_clear(handshake_hash);
2190 wgs_clear(chaining_key);
2191 wgs_clear(ephemeral_key_pub);
2192 wgs_clear(ephemeral_key_priv);
2193 wgs_clear(ephemeral_key_peer);
2194 #undef wgs_clear
2195 }
2196
2197 static struct wg_session *
2198 wg_lookup_session_by_index(struct wg_softc *wg, const uint32_t index,
2199 struct psref *psref)
2200 {
2201 struct wg_session *wgs;
2202
2203 int s = pserialize_read_enter();
2204 wgs = thmap_get(wg->wg_sessions_byindex, &index, sizeof index);
2205 if (wgs != NULL) {
2206 KASSERT(atomic_load_relaxed(&wgs->wgs_state) !=
2207 WGS_STATE_UNKNOWN);
2208 psref_acquire(psref, &wgs->wgs_psref, wg_psref_class);
2209 }
2210 pserialize_read_exit(s);
2211
2212 return wgs;
2213 }
2214
2215 static void
2216 wg_schedule_rekey_timer(struct wg_peer *wgp)
2217 {
2218 int timeout = MIN(wg_rekey_after_time, INT_MAX/hz);
2219
2220 callout_schedule(&wgp->wgp_rekey_timer, timeout * hz);
2221 }
2222
2223 static void
2224 wg_send_keepalive_msg(struct wg_peer *wgp, struct wg_session *wgs)
2225 {
2226 struct mbuf *m;
2227
2228 /*
2229 * [W] 6.5 Passive Keepalive
2230 * "A keepalive message is simply a transport data message with
2231 * a zero-length encapsulated encrypted inner-packet."
2232 */
2233 m = m_gethdr(M_WAIT, MT_DATA);
2234 wg_send_data_msg(wgp, wgs, m);
2235 }
2236
2237 static bool
2238 wg_need_to_send_init_message(struct wg_session *wgs)
2239 {
2240 /*
2241 * [W] 6.2 Transport Message Limits
2242 * "if a peer is the initiator of a current secure session,
2243 * WireGuard will send a handshake initiation message to begin
2244 * a new secure session ... if after receiving a transport data
2245 * message, the current secure session is (REJECT-AFTER-TIME
2246 * KEEPALIVE-TIMEOUT REKEY-TIMEOUT) seconds old and it has
2247 * not yet acted upon this event."
2248 */
2249 return wgs->wgs_is_initiator && wgs->wgs_time_last_data_sent == 0 &&
2250 (time_uptime - wgs->wgs_time_established) >=
2251 (wg_reject_after_time - wg_keepalive_timeout - wg_rekey_timeout);
2252 }
2253
2254 static void
2255 wg_schedule_peer_task(struct wg_peer *wgp, int task)
2256 {
2257
2258 mutex_enter(wgp->wgp_intr_lock);
2259 WG_DLOG("tasks=%d, task=%d\n", wgp->wgp_tasks, task);
2260 if (wgp->wgp_tasks == 0)
2261 /*
2262 * XXX If the current CPU is already loaded -- e.g., if
2263 * there's already a bunch of handshakes queued up --
2264 * consider tossing this over to another CPU to
2265 * distribute the load.
2266 */
2267 workqueue_enqueue(wg_wq, &wgp->wgp_work, NULL);
2268 wgp->wgp_tasks |= task;
2269 mutex_exit(wgp->wgp_intr_lock);
2270 }
2271
2272 static void
2273 wg_change_endpoint(struct wg_peer *wgp, const struct sockaddr *new)
2274 {
2275 struct wg_sockaddr *wgsa_prev;
2276
2277 WG_TRACE("Changing endpoint");
2278
2279 memcpy(wgp->wgp_endpoint0, new, new->sa_len);
2280 wgsa_prev = wgp->wgp_endpoint;
2281 atomic_store_release(&wgp->wgp_endpoint, wgp->wgp_endpoint0);
2282 wgp->wgp_endpoint0 = wgsa_prev;
2283 atomic_store_release(&wgp->wgp_endpoint_available, true);
2284
2285 wg_schedule_peer_task(wgp, WGP_TASK_ENDPOINT_CHANGED);
2286 }
2287
2288 static bool
2289 wg_validate_inner_packet(const char *packet, size_t decrypted_len, int *af)
2290 {
2291 uint16_t packet_len;
2292 const struct ip *ip;
2293
2294 if (__predict_false(decrypted_len < sizeof(struct ip)))
2295 return false;
2296
2297 ip = (const struct ip *)packet;
2298 if (ip->ip_v == 4)
2299 *af = AF_INET;
2300 else if (ip->ip_v == 6)
2301 *af = AF_INET6;
2302 else
2303 return false;
2304
2305 WG_DLOG("af=%d\n", *af);
2306
2307 if (*af == AF_INET) {
2308 packet_len = ntohs(ip->ip_len);
2309 } else {
2310 const struct ip6_hdr *ip6;
2311
2312 if (__predict_false(decrypted_len < sizeof(struct ip6_hdr)))
2313 return false;
2314
2315 ip6 = (const struct ip6_hdr *)packet;
2316 packet_len = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen);
2317 }
2318
2319 WG_DLOG("packet_len=%u\n", packet_len);
2320 if (packet_len > decrypted_len)
2321 return false;
2322
2323 return true;
2324 }
2325
2326 static bool
2327 wg_validate_route(struct wg_softc *wg, struct wg_peer *wgp_expected,
2328 int af, char *packet)
2329 {
2330 struct sockaddr_storage ss;
2331 struct sockaddr *sa;
2332 struct psref psref;
2333 struct wg_peer *wgp;
2334 bool ok;
2335
2336 /*
2337 * II CRYPTOKEY ROUTING
2338 * "it will only accept it if its source IP resolves in the
2339 * table to the public key used in the secure session for
2340 * decrypting it."
2341 */
2342
2343 if (af == AF_INET) {
2344 const struct ip *ip = (const struct ip *)packet;
2345 struct sockaddr_in *sin = (struct sockaddr_in *)&ss;
2346 sockaddr_in_init(sin, &ip->ip_src, 0);
2347 sa = sintosa(sin);
2348 #ifdef INET6
2349 } else {
2350 const struct ip6_hdr *ip6 = (const struct ip6_hdr *)packet;
2351 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&ss;
2352 sockaddr_in6_init(sin6, &ip6->ip6_src, 0, 0, 0);
2353 sa = sin6tosa(sin6);
2354 #endif
2355 }
2356
2357 wgp = wg_pick_peer_by_sa(wg, sa, &psref);
2358 ok = (wgp == wgp_expected);
2359 if (wgp != NULL)
2360 wg_put_peer(wgp, &psref);
2361
2362 return ok;
2363 }
2364
2365 static void
2366 wg_session_dtor_timer(void *arg)
2367 {
2368 struct wg_peer *wgp = arg;
2369
2370 WG_TRACE("enter");
2371
2372 wg_schedule_peer_task(wgp, WGP_TASK_DESTROY_PREV_SESSION);
2373 }
2374
2375 static void
2376 wg_schedule_session_dtor_timer(struct wg_peer *wgp)
2377 {
2378
2379 /* 1 second grace period */
2380 callout_schedule(&wgp->wgp_session_dtor_timer, hz);
2381 }
2382
2383 static bool
2384 sockaddr_port_match(const struct sockaddr *sa1, const struct sockaddr *sa2)
2385 {
2386 if (sa1->sa_family != sa2->sa_family)
2387 return false;
2388
2389 switch (sa1->sa_family) {
2390 case AF_INET:
2391 return satocsin(sa1)->sin_port == satocsin(sa2)->sin_port;
2392 case AF_INET6:
2393 return satocsin6(sa1)->sin6_port == satocsin6(sa2)->sin6_port;
2394 default:
2395 return true;
2396 }
2397 }
2398
2399 static void
2400 wg_update_endpoint_if_necessary(struct wg_peer *wgp,
2401 const struct sockaddr *src)
2402 {
2403 struct wg_sockaddr *wgsa;
2404 struct psref psref;
2405
2406 wgsa = wg_get_endpoint_sa(wgp, &psref);
2407
2408 #ifdef WG_DEBUG_LOG
2409 char oldaddr[128], newaddr[128];
2410 sockaddr_format(wgsatosa(wgsa), oldaddr, sizeof(oldaddr));
2411 sockaddr_format(src, newaddr, sizeof(newaddr));
2412 WG_DLOG("old=%s, new=%s\n", oldaddr, newaddr);
2413 #endif
2414
2415 /*
2416 * III: "Since the packet has authenticated correctly, the source IP of
2417 * the outer UDP/IP packet is used to update the endpoint for peer..."
2418 */
2419 if (__predict_false(sockaddr_cmp(src, wgsatosa(wgsa)) != 0 ||
2420 !sockaddr_port_match(src, wgsatosa(wgsa)))) {
2421 /* XXX We can't change the endpoint twice in a short period */
2422 if (atomic_swap_uint(&wgp->wgp_endpoint_changing, 1) == 0) {
2423 wg_change_endpoint(wgp, src);
2424 }
2425 }
2426
2427 wg_put_sa(wgp, wgsa, &psref);
2428 }
2429
2430 static void
2431 wg_handle_msg_data(struct wg_softc *wg, struct mbuf *m,
2432 const struct sockaddr *src)
2433 {
2434 struct wg_msg_data *wgmd;
2435 char *encrypted_buf = NULL, *decrypted_buf;
2436 size_t encrypted_len, decrypted_len;
2437 struct wg_session *wgs;
2438 struct wg_peer *wgp;
2439 int state;
2440 size_t mlen;
2441 struct psref psref;
2442 int error, af;
2443 bool success, free_encrypted_buf = false, ok;
2444 struct mbuf *n;
2445
2446 KASSERT(m->m_len >= sizeof(struct wg_msg_data));
2447 wgmd = mtod(m, struct wg_msg_data *);
2448
2449 KASSERT(wgmd->wgmd_type == htole32(WG_MSG_TYPE_DATA));
2450 WG_TRACE("data");
2451
2452 /* Find the putative session, or drop. */
2453 wgs = wg_lookup_session_by_index(wg, wgmd->wgmd_receiver, &psref);
2454 if (wgs == NULL) {
2455 WG_TRACE("No session found");
2456 m_freem(m);
2457 return;
2458 }
2459
2460 /*
2461 * We are only ready to handle data when in INIT_PASSIVE,
2462 * ESTABLISHED, or DESTROYING. All transitions out of that
2463 * state dissociate the session index and drain psrefs.
2464 */
2465 state = atomic_load_relaxed(&wgs->wgs_state);
2466 switch (state) {
2467 case WGS_STATE_UNKNOWN:
2468 panic("wg session %p in unknown state has session index %u",
2469 wgs, wgmd->wgmd_receiver);
2470 case WGS_STATE_INIT_ACTIVE:
2471 WG_TRACE("not yet ready for data");
2472 goto out;
2473 case WGS_STATE_INIT_PASSIVE:
2474 case WGS_STATE_ESTABLISHED:
2475 case WGS_STATE_DESTROYING:
2476 break;
2477 }
2478
2479 /*
2480 * Get the peer, for rate-limited logs (XXX MPSAFE, dtrace) and
2481 * to update the endpoint if authentication succeeds.
2482 */
2483 wgp = wgs->wgs_peer;
2484
2485 /*
2486 * Reject outrageously wrong sequence numbers before doing any
2487 * crypto work or taking any locks.
2488 */
2489 error = sliwin_check_fast(&wgs->wgs_recvwin->window,
2490 le64toh(wgmd->wgmd_counter));
2491 if (error) {
2492 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
2493 "out-of-window packet: %"PRIu64"\n",
2494 le64toh(wgmd->wgmd_counter));
2495 goto out;
2496 }
2497
2498 /* Ensure the payload and authenticator are contiguous. */
2499 mlen = m_length(m);
2500 encrypted_len = mlen - sizeof(*wgmd);
2501 if (encrypted_len < WG_AUTHTAG_LEN) {
2502 WG_DLOG("Short encrypted_len: %lu\n", encrypted_len);
2503 goto out;
2504 }
2505 success = m_ensure_contig(&m, sizeof(*wgmd) + encrypted_len);
2506 if (success) {
2507 encrypted_buf = mtod(m, char *) + sizeof(*wgmd);
2508 } else {
2509 encrypted_buf = kmem_intr_alloc(encrypted_len, KM_NOSLEEP);
2510 if (encrypted_buf == NULL) {
2511 WG_DLOG("failed to allocate encrypted_buf\n");
2512 goto out;
2513 }
2514 m_copydata(m, sizeof(*wgmd), encrypted_len, encrypted_buf);
2515 free_encrypted_buf = true;
2516 }
2517 /* m_ensure_contig may change m regardless of its result */
2518 KASSERT(m->m_len >= sizeof(*wgmd));
2519 wgmd = mtod(m, struct wg_msg_data *);
2520
2521 /*
2522 * Get a buffer for the plaintext. Add WG_AUTHTAG_LEN to avoid
2523 * a zero-length buffer (XXX). Drop if plaintext is longer
2524 * than MCLBYTES (XXX).
2525 */
2526 decrypted_len = encrypted_len - WG_AUTHTAG_LEN;
2527 if (decrypted_len > MCLBYTES) {
2528 /* FIXME handle larger data than MCLBYTES */
2529 WG_DLOG("couldn't handle larger data than MCLBYTES\n");
2530 goto out;
2531 }
2532 n = wg_get_mbuf(0, decrypted_len + WG_AUTHTAG_LEN);
2533 if (n == NULL) {
2534 WG_DLOG("wg_get_mbuf failed\n");
2535 goto out;
2536 }
2537 decrypted_buf = mtod(n, char *);
2538
2539 /* Decrypt and verify the packet. */
2540 WG_DLOG("mlen=%lu, encrypted_len=%lu\n", mlen, encrypted_len);
2541 error = wg_algo_aead_dec(decrypted_buf,
2542 encrypted_len - WG_AUTHTAG_LEN /* can be 0 */,
2543 wgs->wgs_tkey_recv, le64toh(wgmd->wgmd_counter), encrypted_buf,
2544 encrypted_len, NULL, 0);
2545 if (error != 0) {
2546 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
2547 "failed to wg_algo_aead_dec\n");
2548 m_freem(n);
2549 goto out;
2550 }
2551 WG_DLOG("outsize=%u\n", (u_int)decrypted_len);
2552
2553 /* Packet is genuine. Reject it if a replay or just too old. */
2554 mutex_enter(&wgs->wgs_recvwin->lock);
2555 error = sliwin_update(&wgs->wgs_recvwin->window,
2556 le64toh(wgmd->wgmd_counter));
2557 mutex_exit(&wgs->wgs_recvwin->lock);
2558 if (error) {
2559 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
2560 "replay or out-of-window packet: %"PRIu64"\n",
2561 le64toh(wgmd->wgmd_counter));
2562 m_freem(n);
2563 goto out;
2564 }
2565
2566 /* We're done with m now; free it and chuck the pointers. */
2567 m_freem(m);
2568 m = NULL;
2569 wgmd = NULL;
2570
2571 /*
2572 * Validate the encapsulated packet header and get the address
2573 * family, or drop.
2574 */
2575 ok = wg_validate_inner_packet(decrypted_buf, decrypted_len, &af);
2576 if (!ok) {
2577 m_freem(n);
2578 goto out;
2579 }
2580
2581 /*
2582 * The packet is genuine. Update the peer's endpoint if the
2583 * source address changed.
2584 *
2585 * XXX How to prevent DoS by replaying genuine packets from the
2586 * wrong source address?
2587 */
2588 wg_update_endpoint_if_necessary(wgp, src);
2589
2590 /* Submit it into our network stack if routable. */
2591 ok = wg_validate_route(wg, wgp, af, decrypted_buf);
2592 if (ok) {
2593 wg->wg_ops->input(&wg->wg_if, n, af);
2594 } else {
2595 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
2596 "invalid source address\n");
2597 m_freem(n);
2598 /*
2599 * The inner address is invalid however the session is valid
2600 * so continue the session processing below.
2601 */
2602 }
2603 n = NULL;
2604
2605 /* Update the state machine if necessary. */
2606 if (__predict_false(state == WGS_STATE_INIT_PASSIVE)) {
2607 /*
2608 * We were waiting for the initiator to send their
2609 * first data transport message, and that has happened.
2610 * Schedule a task to establish this session.
2611 */
2612 wg_schedule_peer_task(wgp, WGP_TASK_ESTABLISH_SESSION);
2613 } else {
2614 if (__predict_false(wg_need_to_send_init_message(wgs))) {
2615 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE);
2616 }
2617 /*
2618 * [W] 6.5 Passive Keepalive
2619 * "If a peer has received a validly-authenticated transport
2620 * data message (section 5.4.6), but does not have any packets
2621 * itself to send back for KEEPALIVE-TIMEOUT seconds, it sends
2622 * a keepalive message."
2623 */
2624 WG_DLOG("time_uptime=%ju wgs_time_last_data_sent=%ju\n",
2625 (uintmax_t)time_uptime,
2626 (uintmax_t)wgs->wgs_time_last_data_sent);
2627 if ((time_uptime - wgs->wgs_time_last_data_sent) >=
2628 wg_keepalive_timeout) {
2629 WG_TRACE("Schedule sending keepalive message");
2630 /*
2631 * We can't send a keepalive message here to avoid
2632 * a deadlock; we already hold the solock of a socket
2633 * that is used to send the message.
2634 */
2635 wg_schedule_peer_task(wgp,
2636 WGP_TASK_SEND_KEEPALIVE_MESSAGE);
2637 }
2638 }
2639 out:
2640 wg_put_session(wgs, &psref);
2641 if (m != NULL)
2642 m_freem(m);
2643 if (free_encrypted_buf)
2644 kmem_intr_free(encrypted_buf, encrypted_len);
2645 }
2646
2647 static void
2648 wg_handle_msg_cookie(struct wg_softc *wg, const struct wg_msg_cookie *wgmc)
2649 {
2650 struct wg_session *wgs;
2651 struct wg_peer *wgp;
2652 struct psref psref;
2653 int error;
2654 uint8_t key[WG_HASH_LEN];
2655 uint8_t cookie[WG_COOKIE_LEN];
2656
2657 WG_TRACE("cookie msg received");
2658
2659 /* Find the putative session. */
2660 wgs = wg_lookup_session_by_index(wg, wgmc->wgmc_receiver, &psref);
2661 if (wgs == NULL) {
2662 WG_TRACE("No session found");
2663 return;
2664 }
2665
2666 /* Lock the peer so we can update the cookie state. */
2667 wgp = wgs->wgs_peer;
2668 mutex_enter(wgp->wgp_lock);
2669
2670 if (!wgp->wgp_last_sent_mac1_valid) {
2671 WG_TRACE("No valid mac1 sent (or expired)");
2672 goto out;
2673 }
2674
2675 /* Decrypt the cookie and store it for later handshake retry. */
2676 wg_algo_mac_cookie(key, sizeof(key), wgp->wgp_pubkey,
2677 sizeof(wgp->wgp_pubkey));
2678 error = wg_algo_xaead_dec(cookie, sizeof(cookie), key,
2679 wgmc->wgmc_cookie, sizeof(wgmc->wgmc_cookie),
2680 wgp->wgp_last_sent_mac1, sizeof(wgp->wgp_last_sent_mac1),
2681 wgmc->wgmc_salt);
2682 if (error != 0) {
2683 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
2684 "wg_algo_aead_dec for cookie failed: error=%d\n", error);
2685 goto out;
2686 }
2687 /*
2688 * [W] 6.6: Interaction with Cookie Reply System
2689 * "it should simply store the decrypted cookie value from the cookie
2690 * reply message, and wait for the expiration of the REKEY-TIMEOUT
2691 * timer for retrying a handshake initiation message."
2692 */
2693 wgp->wgp_latest_cookie_time = time_uptime;
2694 memcpy(wgp->wgp_latest_cookie, cookie, sizeof(wgp->wgp_latest_cookie));
2695 out:
2696 mutex_exit(wgp->wgp_lock);
2697 wg_put_session(wgs, &psref);
2698 }
2699
2700 static struct mbuf *
2701 wg_validate_msg_header(struct wg_softc *wg, struct mbuf *m)
2702 {
2703 struct wg_msg wgm;
2704 size_t mbuflen;
2705 size_t msglen;
2706
2707 /*
2708 * Get the mbuf chain length. It is already guaranteed, by
2709 * wg_overudp_cb, to be large enough for a struct wg_msg.
2710 */
2711 mbuflen = m_length(m);
2712 KASSERT(mbuflen >= sizeof(struct wg_msg));
2713
2714 /*
2715 * Copy the message header (32-bit message type) out -- we'll
2716 * worry about contiguity and alignment later.
2717 */
2718 m_copydata(m, 0, sizeof(wgm), &wgm);
2719 switch (le32toh(wgm.wgm_type)) {
2720 case WG_MSG_TYPE_INIT:
2721 msglen = sizeof(struct wg_msg_init);
2722 break;
2723 case WG_MSG_TYPE_RESP:
2724 msglen = sizeof(struct wg_msg_resp);
2725 break;
2726 case WG_MSG_TYPE_COOKIE:
2727 msglen = sizeof(struct wg_msg_cookie);
2728 break;
2729 case WG_MSG_TYPE_DATA:
2730 msglen = sizeof(struct wg_msg_data);
2731 break;
2732 default:
2733 WG_LOG_RATECHECK(&wg->wg_ppsratecheck, LOG_DEBUG,
2734 "Unexpected msg type: %u\n", le32toh(wgm.wgm_type));
2735 goto error;
2736 }
2737
2738 /* Verify the mbuf chain is long enough for this type of message. */
2739 if (__predict_false(mbuflen < msglen)) {
2740 WG_DLOG("Invalid msg size: mbuflen=%lu type=%u\n", mbuflen,
2741 le32toh(wgm.wgm_type));
2742 goto error;
2743 }
2744
2745 /* Make the message header contiguous if necessary. */
2746 if (__predict_false(m->m_len < msglen)) {
2747 m = m_pullup(m, msglen);
2748 if (m == NULL)
2749 return NULL;
2750 }
2751
2752 return m;
2753
2754 error:
2755 m_freem(m);
2756 return NULL;
2757 }
2758
2759 static void
2760 wg_handle_packet(struct wg_softc *wg, struct mbuf *m,
2761 const struct sockaddr *src)
2762 {
2763 struct wg_msg *wgm;
2764
2765 m = wg_validate_msg_header(wg, m);
2766 if (__predict_false(m == NULL))
2767 return;
2768
2769 KASSERT(m->m_len >= sizeof(struct wg_msg));
2770 wgm = mtod(m, struct wg_msg *);
2771 switch (le32toh(wgm->wgm_type)) {
2772 case WG_MSG_TYPE_INIT:
2773 wg_handle_msg_init(wg, (struct wg_msg_init *)wgm, src);
2774 break;
2775 case WG_MSG_TYPE_RESP:
2776 wg_handle_msg_resp(wg, (struct wg_msg_resp *)wgm, src);
2777 break;
2778 case WG_MSG_TYPE_COOKIE:
2779 wg_handle_msg_cookie(wg, (struct wg_msg_cookie *)wgm);
2780 break;
2781 case WG_MSG_TYPE_DATA:
2782 wg_handle_msg_data(wg, m, src);
2783 /* wg_handle_msg_data frees m for us */
2784 return;
2785 default:
2786 panic("invalid message type: %d", le32toh(wgm->wgm_type));
2787 }
2788
2789 m_freem(m);
2790 }
2791
2792 static void
2793 wg_receive_packets(struct wg_softc *wg, const int af)
2794 {
2795
2796 for (;;) {
2797 int error, flags;
2798 struct socket *so;
2799 struct mbuf *m = NULL;
2800 struct uio dummy_uio;
2801 struct mbuf *paddr = NULL;
2802 struct sockaddr *src;
2803
2804 so = wg_get_so_by_af(wg, af);
2805 flags = MSG_DONTWAIT;
2806 dummy_uio.uio_resid = 1000000000;
2807
2808 error = so->so_receive(so, &paddr, &dummy_uio, &m, NULL,
2809 &flags);
2810 if (error || m == NULL) {
2811 //if (error == EWOULDBLOCK)
2812 return;
2813 }
2814
2815 KASSERT(paddr != NULL);
2816 KASSERT(paddr->m_len >= sizeof(struct sockaddr));
2817 src = mtod(paddr, struct sockaddr *);
2818
2819 wg_handle_packet(wg, m, src);
2820 }
2821 }
2822
2823 static void
2824 wg_get_peer(struct wg_peer *wgp, struct psref *psref)
2825 {
2826
2827 psref_acquire(psref, &wgp->wgp_psref, wg_psref_class);
2828 }
2829
2830 static void
2831 wg_put_peer(struct wg_peer *wgp, struct psref *psref)
2832 {
2833
2834 psref_release(psref, &wgp->wgp_psref, wg_psref_class);
2835 }
2836
2837 static void
2838 wg_task_send_init_message(struct wg_softc *wg, struct wg_peer *wgp)
2839 {
2840 struct wg_session *wgs;
2841
2842 WG_TRACE("WGP_TASK_SEND_INIT_MESSAGE");
2843
2844 KASSERT(mutex_owned(wgp->wgp_lock));
2845
2846 if (!atomic_load_acquire(&wgp->wgp_endpoint_available)) {
2847 WGLOG(LOG_DEBUG, "No endpoint available\n");
2848 /* XXX should do something? */
2849 return;
2850 }
2851
2852 wgs = wgp->wgp_session_stable;
2853 if (wgs->wgs_state == WGS_STATE_UNKNOWN) {
2854 /* XXX What if the unstable session is already INIT_ACTIVE? */
2855 wg_send_handshake_msg_init(wg, wgp);
2856 } else {
2857 /* rekey */
2858 wgs = wgp->wgp_session_unstable;
2859 if (wgs->wgs_state != WGS_STATE_INIT_ACTIVE)
2860 wg_send_handshake_msg_init(wg, wgp);
2861 }
2862 }
2863
2864 static void
2865 wg_task_retry_handshake(struct wg_softc *wg, struct wg_peer *wgp)
2866 {
2867 struct wg_session *wgs;
2868
2869 WG_TRACE("WGP_TASK_RETRY_HANDSHAKE");
2870
2871 KASSERT(mutex_owned(wgp->wgp_lock));
2872 KASSERT(wgp->wgp_handshake_start_time != 0);
2873
2874 wgs = wgp->wgp_session_unstable;
2875 if (wgs->wgs_state != WGS_STATE_INIT_ACTIVE)
2876 return;
2877
2878 /*
2879 * XXX no real need to assign a new index here, but we do need
2880 * to transition to UNKNOWN temporarily
2881 */
2882 wg_put_session_index(wg, wgs);
2883
2884 /* [W] 6.4 Handshake Initiation Retransmission */
2885 if ((time_uptime - wgp->wgp_handshake_start_time) >
2886 wg_rekey_attempt_time) {
2887 /* Give up handshaking */
2888 wgp->wgp_handshake_start_time = 0;
2889 WG_TRACE("give up");
2890
2891 /*
2892 * If a new data packet comes, handshaking will be retried
2893 * and a new session would be established at that time,
2894 * however we don't want to send pending packets then.
2895 */
2896 wg_purge_pending_packets(wgp);
2897 return;
2898 }
2899
2900 wg_task_send_init_message(wg, wgp);
2901 }
2902
2903 static void
2904 wg_task_establish_session(struct wg_softc *wg, struct wg_peer *wgp)
2905 {
2906 struct wg_session *wgs, *wgs_prev;
2907 struct mbuf *m;
2908
2909 KASSERT(mutex_owned(wgp->wgp_lock));
2910
2911 wgs = wgp->wgp_session_unstable;
2912 if (wgs->wgs_state != WGS_STATE_INIT_PASSIVE)
2913 /* XXX Can this happen? */
2914 return;
2915
2916 wgs->wgs_state = WGS_STATE_ESTABLISHED;
2917 wgs->wgs_time_established = time_uptime;
2918 wgs->wgs_time_last_data_sent = 0;
2919 wgs->wgs_is_initiator = false;
2920 WG_TRACE("WGS_STATE_ESTABLISHED");
2921
2922 wg_swap_sessions(wgp);
2923 KASSERT(wgs == wgp->wgp_session_stable);
2924 wgs_prev = wgp->wgp_session_unstable;
2925 getnanotime(&wgp->wgp_last_handshake_time);
2926 wgp->wgp_handshake_start_time = 0;
2927 wgp->wgp_last_sent_mac1_valid = false;
2928 wgp->wgp_last_sent_cookie_valid = false;
2929
2930 /* If we had a data packet queued up, send it. */
2931 if ((m = atomic_swap_ptr(&wgp->wgp_pending, NULL)) != NULL) {
2932 const uint32_t h = curcpu()->ci_index; // pktq_rps_hash(m)
2933
2934 M_SETCTX(m, wgp);
2935 if (__predict_false(!pktq_enqueue(wg_pktq, m, h))) {
2936 WGLOG(LOG_ERR, "pktq full, dropping\n");
2937 m_freem(m);
2938 }
2939 }
2940
2941 if (wgs_prev->wgs_state == WGS_STATE_ESTABLISHED) {
2942 /* Wait for wg_get_stable_session to drain. */
2943 pserialize_perform(wgp->wgp_psz);
2944
2945 /* Transition ESTABLISHED->DESTROYING. */
2946 wgs_prev->wgs_state = WGS_STATE_DESTROYING;
2947
2948 /* We can't destroy the old session immediately */
2949 wg_schedule_session_dtor_timer(wgp);
2950 } else {
2951 KASSERTMSG(wgs_prev->wgs_state == WGS_STATE_UNKNOWN,
2952 "state=%d", wgs_prev->wgs_state);
2953 wg_clear_states(wgs_prev);
2954 wgs_prev->wgs_state = WGS_STATE_UNKNOWN;
2955 }
2956 }
2957
2958 static void
2959 wg_task_endpoint_changed(struct wg_softc *wg, struct wg_peer *wgp)
2960 {
2961
2962 WG_TRACE("WGP_TASK_ENDPOINT_CHANGED");
2963
2964 KASSERT(mutex_owned(wgp->wgp_lock));
2965
2966 if (atomic_load_relaxed(&wgp->wgp_endpoint_changing)) {
2967 pserialize_perform(wgp->wgp_psz);
2968 mutex_exit(wgp->wgp_lock);
2969 psref_target_destroy(&wgp->wgp_endpoint0->wgsa_psref,
2970 wg_psref_class);
2971 psref_target_init(&wgp->wgp_endpoint0->wgsa_psref,
2972 wg_psref_class);
2973 mutex_enter(wgp->wgp_lock);
2974 atomic_store_release(&wgp->wgp_endpoint_changing, 0);
2975 }
2976 }
2977
2978 static void
2979 wg_task_send_keepalive_message(struct wg_softc *wg, struct wg_peer *wgp)
2980 {
2981 struct wg_session *wgs;
2982
2983 WG_TRACE("WGP_TASK_SEND_KEEPALIVE_MESSAGE");
2984
2985 KASSERT(mutex_owned(wgp->wgp_lock));
2986
2987 wgs = wgp->wgp_session_stable;
2988 if (wgs->wgs_state != WGS_STATE_ESTABLISHED)
2989 return;
2990
2991 wg_send_keepalive_msg(wgp, wgs);
2992 }
2993
2994 static void
2995 wg_task_destroy_prev_session(struct wg_softc *wg, struct wg_peer *wgp)
2996 {
2997 struct wg_session *wgs;
2998
2999 WG_TRACE("WGP_TASK_DESTROY_PREV_SESSION");
3000
3001 KASSERT(mutex_owned(wgp->wgp_lock));
3002
3003 wgs = wgp->wgp_session_unstable;
3004 if (wgs->wgs_state == WGS_STATE_DESTROYING) {
3005 wg_put_session_index(wg, wgs);
3006 }
3007 }
3008
3009 static void
3010 wg_peer_work(struct work *wk, void *cookie)
3011 {
3012 struct wg_peer *wgp = container_of(wk, struct wg_peer, wgp_work);
3013 struct wg_softc *wg = wgp->wgp_sc;
3014 int tasks;
3015
3016 mutex_enter(wgp->wgp_intr_lock);
3017 while ((tasks = wgp->wgp_tasks) != 0) {
3018 wgp->wgp_tasks = 0;
3019 mutex_exit(wgp->wgp_intr_lock);
3020
3021 mutex_enter(wgp->wgp_lock);
3022 if (ISSET(tasks, WGP_TASK_SEND_INIT_MESSAGE))
3023 wg_task_send_init_message(wg, wgp);
3024 if (ISSET(tasks, WGP_TASK_RETRY_HANDSHAKE))
3025 wg_task_retry_handshake(wg, wgp);
3026 if (ISSET(tasks, WGP_TASK_ESTABLISH_SESSION))
3027 wg_task_establish_session(wg, wgp);
3028 if (ISSET(tasks, WGP_TASK_ENDPOINT_CHANGED))
3029 wg_task_endpoint_changed(wg, wgp);
3030 if (ISSET(tasks, WGP_TASK_SEND_KEEPALIVE_MESSAGE))
3031 wg_task_send_keepalive_message(wg, wgp);
3032 if (ISSET(tasks, WGP_TASK_DESTROY_PREV_SESSION))
3033 wg_task_destroy_prev_session(wg, wgp);
3034 mutex_exit(wgp->wgp_lock);
3035
3036 mutex_enter(wgp->wgp_intr_lock);
3037 }
3038 mutex_exit(wgp->wgp_intr_lock);
3039 }
3040
3041 static void
3042 wg_job(struct threadpool_job *job)
3043 {
3044 struct wg_softc *wg = container_of(job, struct wg_softc, wg_job);
3045 int bound, upcalls;
3046
3047 mutex_enter(wg->wg_intr_lock);
3048 while ((upcalls = wg->wg_upcalls) != 0) {
3049 wg->wg_upcalls = 0;
3050 mutex_exit(wg->wg_intr_lock);
3051 bound = curlwp_bind();
3052 if (ISSET(upcalls, WG_UPCALL_INET))
3053 wg_receive_packets(wg, AF_INET);
3054 if (ISSET(upcalls, WG_UPCALL_INET6))
3055 wg_receive_packets(wg, AF_INET6);
3056 curlwp_bindx(bound);
3057 mutex_enter(wg->wg_intr_lock);
3058 }
3059 threadpool_job_done(job);
3060 mutex_exit(wg->wg_intr_lock);
3061 }
3062
3063 static int
3064 wg_bind_port(struct wg_softc *wg, const uint16_t port)
3065 {
3066 int error;
3067 uint16_t old_port = wg->wg_listen_port;
3068
3069 if (port != 0 && old_port == port)
3070 return 0;
3071
3072 struct sockaddr_in _sin, *sin = &_sin;
3073 sin->sin_len = sizeof(*sin);
3074 sin->sin_family = AF_INET;
3075 sin->sin_addr.s_addr = INADDR_ANY;
3076 sin->sin_port = htons(port);
3077
3078 error = sobind(wg->wg_so4, sintosa(sin), curlwp);
3079 if (error != 0)
3080 return error;
3081
3082 #ifdef INET6
3083 struct sockaddr_in6 _sin6, *sin6 = &_sin6;
3084 sin6->sin6_len = sizeof(*sin6);
3085 sin6->sin6_family = AF_INET6;
3086 sin6->sin6_addr = in6addr_any;
3087 sin6->sin6_port = htons(port);
3088
3089 error = sobind(wg->wg_so6, sin6tosa(sin6), curlwp);
3090 if (error != 0)
3091 return error;
3092 #endif
3093
3094 wg->wg_listen_port = port;
3095
3096 return 0;
3097 }
3098
3099 static void
3100 wg_so_upcall(struct socket *so, void *cookie, int events, int waitflag)
3101 {
3102 struct wg_softc *wg = cookie;
3103 int reason;
3104
3105 reason = (so->so_proto->pr_domain->dom_family == AF_INET) ?
3106 WG_UPCALL_INET :
3107 WG_UPCALL_INET6;
3108
3109 mutex_enter(wg->wg_intr_lock);
3110 wg->wg_upcalls |= reason;
3111 threadpool_schedule_job(wg->wg_threadpool, &wg->wg_job);
3112 mutex_exit(wg->wg_intr_lock);
3113 }
3114
3115 static int
3116 wg_overudp_cb(struct mbuf **mp, int offset, struct socket *so,
3117 struct sockaddr *src, void *arg)
3118 {
3119 struct wg_softc *wg = arg;
3120 struct wg_msg wgm;
3121 struct mbuf *m = *mp;
3122
3123 WG_TRACE("enter");
3124
3125 /* Verify the mbuf chain is long enough to have a wg msg header. */
3126 KASSERT(offset <= m_length(m));
3127 if (__predict_false(m_length(m) - offset < sizeof(struct wg_msg))) {
3128 /* drop on the floor */
3129 m_freem(m);
3130 return -1;
3131 }
3132
3133 /*
3134 * Copy the message header (32-bit message type) out -- we'll
3135 * worry about contiguity and alignment later.
3136 */
3137 m_copydata(m, offset, sizeof(struct wg_msg), &wgm);
3138 WG_DLOG("type=%d\n", le32toh(wgm.wgm_type));
3139
3140 /*
3141 * Handle DATA packets promptly as they arrive. Other packets
3142 * may require expensive public-key crypto and are not as
3143 * sensitive to latency, so defer them to the worker thread.
3144 */
3145 switch (le32toh(wgm.wgm_type)) {
3146 case WG_MSG_TYPE_DATA:
3147 /* handle immediately */
3148 m_adj(m, offset);
3149 if (__predict_false(m->m_len < sizeof(struct wg_msg_data))) {
3150 m = m_pullup(m, sizeof(struct wg_msg_data));
3151 if (m == NULL)
3152 return -1;
3153 }
3154 wg_handle_msg_data(wg, m, src);
3155 *mp = NULL;
3156 return 1;
3157 case WG_MSG_TYPE_INIT:
3158 case WG_MSG_TYPE_RESP:
3159 case WG_MSG_TYPE_COOKIE:
3160 /* pass through to so_receive in wg_receive_packets */
3161 return 0;
3162 default:
3163 /* drop on the floor */
3164 m_freem(m);
3165 return -1;
3166 }
3167 }
3168
3169 static int
3170 wg_socreate(struct wg_softc *wg, int af, struct socket **sop)
3171 {
3172 int error;
3173 struct socket *so;
3174
3175 error = socreate(af, &so, SOCK_DGRAM, 0, curlwp, NULL);
3176 if (error != 0)
3177 return error;
3178
3179 solock(so);
3180 so->so_upcallarg = wg;
3181 so->so_upcall = wg_so_upcall;
3182 so->so_rcv.sb_flags |= SB_UPCALL;
3183 if (af == AF_INET)
3184 in_pcb_register_overudp_cb(sotoinpcb(so), wg_overudp_cb, wg);
3185 #if INET6
3186 else
3187 in6_pcb_register_overudp_cb(sotoin6pcb(so), wg_overudp_cb, wg);
3188 #endif
3189 sounlock(so);
3190
3191 *sop = so;
3192
3193 return 0;
3194 }
3195
3196 static bool
3197 wg_session_hit_limits(struct wg_session *wgs)
3198 {
3199
3200 /*
3201 * [W] 6.2: Transport Message Limits
3202 * "After REJECT-AFTER-MESSAGES transport data messages or after the
3203 * current secure session is REJECT-AFTER-TIME seconds old, whichever
3204 * comes first, WireGuard will refuse to send any more transport data
3205 * messages using the current secure session, ..."
3206 */
3207 KASSERT(wgs->wgs_time_established != 0);
3208 if ((time_uptime - wgs->wgs_time_established) > wg_reject_after_time) {
3209 WG_DLOG("The session hits REJECT_AFTER_TIME\n");
3210 return true;
3211 } else if (wg_session_get_send_counter(wgs) >
3212 wg_reject_after_messages) {
3213 WG_DLOG("The session hits REJECT_AFTER_MESSAGES\n");
3214 return true;
3215 }
3216
3217 return false;
3218 }
3219
3220 static void
3221 wgintr(void *cookie)
3222 {
3223 struct wg_peer *wgp;
3224 struct wg_session *wgs;
3225 struct mbuf *m;
3226 struct psref psref;
3227
3228 while ((m = pktq_dequeue(wg_pktq)) != NULL) {
3229 wgp = M_GETCTX(m, struct wg_peer *);
3230 if ((wgs = wg_get_stable_session(wgp, &psref)) == NULL) {
3231 WG_TRACE("no stable session");
3232 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE);
3233 goto next0;
3234 }
3235 if (__predict_false(wg_session_hit_limits(wgs))) {
3236 WG_TRACE("stable session hit limits");
3237 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE);
3238 goto next1;
3239 }
3240 wg_send_data_msg(wgp, wgs, m);
3241 m = NULL; /* consumed */
3242 next1: wg_put_session(wgs, &psref);
3243 next0: if (m)
3244 m_freem(m);
3245 /* XXX Yield to avoid userland starvation? */
3246 }
3247 }
3248
3249 static void
3250 wg_rekey_timer(void *arg)
3251 {
3252 struct wg_peer *wgp = arg;
3253
3254 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE);
3255 }
3256
3257 static void
3258 wg_purge_pending_packets(struct wg_peer *wgp)
3259 {
3260 struct mbuf *m;
3261
3262 if ((m = atomic_swap_ptr(&wgp->wgp_pending, NULL)) != NULL)
3263 m_freem(m);
3264 pktq_barrier(wg_pktq);
3265 }
3266
3267 static void
3268 wg_handshake_timeout_timer(void *arg)
3269 {
3270 struct wg_peer *wgp = arg;
3271
3272 WG_TRACE("enter");
3273
3274 wg_schedule_peer_task(wgp, WGP_TASK_RETRY_HANDSHAKE);
3275 }
3276
3277 static struct wg_peer *
3278 wg_alloc_peer(struct wg_softc *wg)
3279 {
3280 struct wg_peer *wgp;
3281
3282 wgp = kmem_zalloc(sizeof(*wgp), KM_SLEEP);
3283
3284 wgp->wgp_sc = wg;
3285 callout_init(&wgp->wgp_rekey_timer, CALLOUT_MPSAFE);
3286 callout_setfunc(&wgp->wgp_rekey_timer, wg_rekey_timer, wgp);
3287 callout_init(&wgp->wgp_handshake_timeout_timer, CALLOUT_MPSAFE);
3288 callout_setfunc(&wgp->wgp_handshake_timeout_timer,
3289 wg_handshake_timeout_timer, wgp);
3290 callout_init(&wgp->wgp_session_dtor_timer, CALLOUT_MPSAFE);
3291 callout_setfunc(&wgp->wgp_session_dtor_timer,
3292 wg_session_dtor_timer, wgp);
3293 PSLIST_ENTRY_INIT(wgp, wgp_peerlist_entry);
3294 wgp->wgp_endpoint_changing = false;
3295 wgp->wgp_endpoint_available = false;
3296 wgp->wgp_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
3297 wgp->wgp_intr_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
3298 wgp->wgp_psz = pserialize_create();
3299 psref_target_init(&wgp->wgp_psref, wg_psref_class);
3300
3301 wgp->wgp_endpoint = kmem_zalloc(sizeof(*wgp->wgp_endpoint), KM_SLEEP);
3302 wgp->wgp_endpoint0 = kmem_zalloc(sizeof(*wgp->wgp_endpoint0), KM_SLEEP);
3303 psref_target_init(&wgp->wgp_endpoint->wgsa_psref, wg_psref_class);
3304 psref_target_init(&wgp->wgp_endpoint0->wgsa_psref, wg_psref_class);
3305
3306 struct wg_session *wgs;
3307 wgp->wgp_session_stable =
3308 kmem_zalloc(sizeof(*wgp->wgp_session_stable), KM_SLEEP);
3309 wgp->wgp_session_unstable =
3310 kmem_zalloc(sizeof(*wgp->wgp_session_unstable), KM_SLEEP);
3311 wgs = wgp->wgp_session_stable;
3312 wgs->wgs_peer = wgp;
3313 wgs->wgs_state = WGS_STATE_UNKNOWN;
3314 psref_target_init(&wgs->wgs_psref, wg_psref_class);
3315 #ifndef __HAVE_ATOMIC64_LOADSTORE
3316 mutex_init(&wgs->wgs_send_counter_lock, MUTEX_DEFAULT, IPL_SOFTNET);
3317 #endif
3318 wgs->wgs_recvwin = kmem_zalloc(sizeof(*wgs->wgs_recvwin), KM_SLEEP);
3319 mutex_init(&wgs->wgs_recvwin->lock, MUTEX_DEFAULT, IPL_SOFTNET);
3320
3321 wgs = wgp->wgp_session_unstable;
3322 wgs->wgs_peer = wgp;
3323 wgs->wgs_state = WGS_STATE_UNKNOWN;
3324 psref_target_init(&wgs->wgs_psref, wg_psref_class);
3325 #ifndef __HAVE_ATOMIC64_LOADSTORE
3326 mutex_init(&wgs->wgs_send_counter_lock, MUTEX_DEFAULT, IPL_SOFTNET);
3327 #endif
3328 wgs->wgs_recvwin = kmem_zalloc(sizeof(*wgs->wgs_recvwin), KM_SLEEP);
3329 mutex_init(&wgs->wgs_recvwin->lock, MUTEX_DEFAULT, IPL_SOFTNET);
3330
3331 return wgp;
3332 }
3333
3334 static void
3335 wg_destroy_peer(struct wg_peer *wgp)
3336 {
3337 struct wg_session *wgs;
3338 struct wg_softc *wg = wgp->wgp_sc;
3339
3340 /* Prevent new packets from this peer on any source address. */
3341 rw_enter(wg->wg_rwlock, RW_WRITER);
3342 for (int i = 0; i < wgp->wgp_n_allowedips; i++) {
3343 struct wg_allowedip *wga = &wgp->wgp_allowedips[i];
3344 struct radix_node_head *rnh = wg_rnh(wg, wga->wga_family);
3345 struct radix_node *rn;
3346
3347 KASSERT(rnh != NULL);
3348 rn = rnh->rnh_deladdr(&wga->wga_sa_addr,
3349 &wga->wga_sa_mask, rnh);
3350 if (rn == NULL) {
3351 char addrstr[128];
3352 sockaddr_format(&wga->wga_sa_addr, addrstr,
3353 sizeof(addrstr));
3354 WGLOG(LOG_WARNING, "Couldn't delete %s", addrstr);
3355 }
3356 }
3357 rw_exit(wg->wg_rwlock);
3358
3359 /* Purge pending packets. */
3360 wg_purge_pending_packets(wgp);
3361
3362 /* Halt all packet processing and timeouts. */
3363 callout_halt(&wgp->wgp_rekey_timer, NULL);
3364 callout_halt(&wgp->wgp_handshake_timeout_timer, NULL);
3365 callout_halt(&wgp->wgp_session_dtor_timer, NULL);
3366
3367 /* Wait for any queued work to complete. */
3368 workqueue_wait(wg_wq, &wgp->wgp_work);
3369
3370 wgs = wgp->wgp_session_unstable;
3371 if (wgs->wgs_state != WGS_STATE_UNKNOWN) {
3372 mutex_enter(wgp->wgp_lock);
3373 wg_destroy_session(wg, wgs);
3374 mutex_exit(wgp->wgp_lock);
3375 }
3376 mutex_destroy(&wgs->wgs_recvwin->lock);
3377 kmem_free(wgs->wgs_recvwin, sizeof(*wgs->wgs_recvwin));
3378 #ifndef __HAVE_ATOMIC64_LOADSTORE
3379 mutex_destroy(&wgs->wgs_send_counter_lock);
3380 #endif
3381 kmem_free(wgs, sizeof(*wgs));
3382
3383 wgs = wgp->wgp_session_stable;
3384 if (wgs->wgs_state != WGS_STATE_UNKNOWN) {
3385 mutex_enter(wgp->wgp_lock);
3386 wg_destroy_session(wg, wgs);
3387 mutex_exit(wgp->wgp_lock);
3388 }
3389 mutex_destroy(&wgs->wgs_recvwin->lock);
3390 kmem_free(wgs->wgs_recvwin, sizeof(*wgs->wgs_recvwin));
3391 #ifndef __HAVE_ATOMIC64_LOADSTORE
3392 mutex_destroy(&wgs->wgs_send_counter_lock);
3393 #endif
3394 kmem_free(wgs, sizeof(*wgs));
3395
3396 psref_target_destroy(&wgp->wgp_endpoint->wgsa_psref, wg_psref_class);
3397 psref_target_destroy(&wgp->wgp_endpoint0->wgsa_psref, wg_psref_class);
3398 kmem_free(wgp->wgp_endpoint, sizeof(*wgp->wgp_endpoint));
3399 kmem_free(wgp->wgp_endpoint0, sizeof(*wgp->wgp_endpoint0));
3400
3401 pserialize_destroy(wgp->wgp_psz);
3402 mutex_obj_free(wgp->wgp_intr_lock);
3403 mutex_obj_free(wgp->wgp_lock);
3404
3405 kmem_free(wgp, sizeof(*wgp));
3406 }
3407
3408 static void
3409 wg_destroy_all_peers(struct wg_softc *wg)
3410 {
3411 struct wg_peer *wgp, *wgp0 __diagused;
3412 void *garbage_byname, *garbage_bypubkey;
3413
3414 restart:
3415 garbage_byname = garbage_bypubkey = NULL;
3416 mutex_enter(wg->wg_lock);
3417 WG_PEER_WRITER_FOREACH(wgp, wg) {
3418 if (wgp->wgp_name[0]) {
3419 wgp0 = thmap_del(wg->wg_peers_byname, wgp->wgp_name,
3420 strlen(wgp->wgp_name));
3421 KASSERT(wgp0 == wgp);
3422 garbage_byname = thmap_stage_gc(wg->wg_peers_byname);
3423 }
3424 wgp0 = thmap_del(wg->wg_peers_bypubkey, wgp->wgp_pubkey,
3425 sizeof(wgp->wgp_pubkey));
3426 KASSERT(wgp0 == wgp);
3427 garbage_bypubkey = thmap_stage_gc(wg->wg_peers_bypubkey);
3428 WG_PEER_WRITER_REMOVE(wgp);
3429 wg->wg_npeers--;
3430 mutex_enter(wgp->wgp_lock);
3431 pserialize_perform(wgp->wgp_psz);
3432 mutex_exit(wgp->wgp_lock);
3433 PSLIST_ENTRY_DESTROY(wgp, wgp_peerlist_entry);
3434 break;
3435 }
3436 mutex_exit(wg->wg_lock);
3437
3438 if (wgp == NULL)
3439 return;
3440
3441 psref_target_destroy(&wgp->wgp_psref, wg_psref_class);
3442
3443 wg_destroy_peer(wgp);
3444 thmap_gc(wg->wg_peers_byname, garbage_byname);
3445 thmap_gc(wg->wg_peers_bypubkey, garbage_bypubkey);
3446
3447 goto restart;
3448 }
3449
3450 static int
3451 wg_destroy_peer_name(struct wg_softc *wg, const char *name)
3452 {
3453 struct wg_peer *wgp, *wgp0 __diagused;
3454 void *garbage_byname, *garbage_bypubkey;
3455
3456 mutex_enter(wg->wg_lock);
3457 wgp = thmap_del(wg->wg_peers_byname, name, strlen(name));
3458 if (wgp != NULL) {
3459 wgp0 = thmap_del(wg->wg_peers_bypubkey, wgp->wgp_pubkey,
3460 sizeof(wgp->wgp_pubkey));
3461 KASSERT(wgp0 == wgp);
3462 garbage_byname = thmap_stage_gc(wg->wg_peers_byname);
3463 garbage_bypubkey = thmap_stage_gc(wg->wg_peers_bypubkey);
3464 WG_PEER_WRITER_REMOVE(wgp);
3465 wg->wg_npeers--;
3466 mutex_enter(wgp->wgp_lock);
3467 pserialize_perform(wgp->wgp_psz);
3468 mutex_exit(wgp->wgp_lock);
3469 PSLIST_ENTRY_DESTROY(wgp, wgp_peerlist_entry);
3470 }
3471 mutex_exit(wg->wg_lock);
3472
3473 if (wgp == NULL)
3474 return ENOENT;
3475
3476 psref_target_destroy(&wgp->wgp_psref, wg_psref_class);
3477
3478 wg_destroy_peer(wgp);
3479 thmap_gc(wg->wg_peers_byname, garbage_byname);
3480 thmap_gc(wg->wg_peers_bypubkey, garbage_bypubkey);
3481
3482 return 0;
3483 }
3484
3485 static int
3486 wg_if_attach(struct wg_softc *wg)
3487 {
3488 int error;
3489
3490 wg->wg_if.if_addrlen = 0;
3491 wg->wg_if.if_mtu = WG_MTU;
3492 wg->wg_if.if_flags = IFF_MULTICAST;
3493 wg->wg_if.if_extflags = IFEF_NO_LINK_STATE_CHANGE;
3494 wg->wg_if.if_extflags |= IFEF_MPSAFE;
3495 wg->wg_if.if_ioctl = wg_ioctl;
3496 wg->wg_if.if_output = wg_output;
3497 wg->wg_if.if_init = wg_init;
3498 wg->wg_if.if_stop = wg_stop;
3499 wg->wg_if.if_type = IFT_OTHER;
3500 wg->wg_if.if_dlt = DLT_NULL;
3501 wg->wg_if.if_softc = wg;
3502 IFQ_SET_READY(&wg->wg_if.if_snd);
3503
3504 error = if_initialize(&wg->wg_if);
3505 if (error != 0)
3506 return error;
3507
3508 if_alloc_sadl(&wg->wg_if);
3509 if_register(&wg->wg_if);
3510
3511 bpf_attach(&wg->wg_if, DLT_NULL, sizeof(uint32_t));
3512
3513 return 0;
3514 }
3515
3516 static void
3517 wg_if_detach(struct wg_softc *wg)
3518 {
3519 struct ifnet *ifp = &wg->wg_if;
3520
3521 bpf_detach(ifp);
3522 if_detach(ifp);
3523 }
3524
3525 static int
3526 wg_clone_create(struct if_clone *ifc, int unit)
3527 {
3528 struct wg_softc *wg;
3529 int error;
3530
3531 wg = kmem_zalloc(sizeof(*wg), KM_SLEEP);
3532
3533 if_initname(&wg->wg_if, ifc->ifc_name, unit);
3534
3535 PSLIST_INIT(&wg->wg_peers);
3536 wg->wg_peers_bypubkey = thmap_create(0, NULL, THMAP_NOCOPY);
3537 wg->wg_peers_byname = thmap_create(0, NULL, THMAP_NOCOPY);
3538 wg->wg_sessions_byindex = thmap_create(0, NULL, THMAP_NOCOPY);
3539 wg->wg_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
3540 wg->wg_intr_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
3541 wg->wg_rwlock = rw_obj_alloc();
3542 threadpool_job_init(&wg->wg_job, wg_job, wg->wg_intr_lock,
3543 "%s", if_name(&wg->wg_if));
3544 wg->wg_ops = &wg_ops_rumpkernel;
3545
3546 error = threadpool_get(&wg->wg_threadpool, PRI_NONE);
3547 if (error)
3548 goto fail0;
3549
3550 #ifdef INET
3551 error = wg_socreate(wg, AF_INET, &wg->wg_so4);
3552 if (error)
3553 goto fail1;
3554 rn_inithead((void **)&wg->wg_rtable_ipv4,
3555 offsetof(struct sockaddr_in, sin_addr) * NBBY);
3556 #endif
3557 #ifdef INET6
3558 error = wg_socreate(wg, AF_INET6, &wg->wg_so6);
3559 if (error)
3560 goto fail2;
3561 rn_inithead((void **)&wg->wg_rtable_ipv6,
3562 offsetof(struct sockaddr_in6, sin6_addr) * NBBY);
3563 #endif
3564
3565 error = wg_if_attach(wg);
3566 if (error)
3567 goto fail3;
3568
3569 mutex_enter(&wg_softcs.lock);
3570 LIST_INSERT_HEAD(&wg_softcs.list, wg, wg_list);
3571 mutex_exit(&wg_softcs.lock);
3572
3573 return 0;
3574
3575 fail4: __unused
3576 mutex_enter(&wg_softcs.lock);
3577 LIST_REMOVE(wg, wg_list);
3578 mutex_exit(&wg_softcs.lock);
3579 wg_if_detach(wg);
3580 fail3: wg_destroy_all_peers(wg);
3581 #ifdef INET6
3582 solock(wg->wg_so6);
3583 wg->wg_so6->so_rcv.sb_flags &= ~SB_UPCALL;
3584 sounlock(wg->wg_so6);
3585 #endif
3586 #ifdef INET
3587 solock(wg->wg_so4);
3588 wg->wg_so4->so_rcv.sb_flags &= ~SB_UPCALL;
3589 sounlock(wg->wg_so4);
3590 #endif
3591 mutex_enter(wg->wg_intr_lock);
3592 threadpool_cancel_job(wg->wg_threadpool, &wg->wg_job);
3593 mutex_exit(wg->wg_intr_lock);
3594 #ifdef INET6
3595 if (wg->wg_rtable_ipv6 != NULL)
3596 free(wg->wg_rtable_ipv6, M_RTABLE);
3597 soclose(wg->wg_so6);
3598 fail2:
3599 #endif
3600 #ifdef INET
3601 if (wg->wg_rtable_ipv4 != NULL)
3602 free(wg->wg_rtable_ipv4, M_RTABLE);
3603 soclose(wg->wg_so4);
3604 fail1:
3605 #endif
3606 threadpool_put(wg->wg_threadpool, PRI_NONE);
3607 fail0: threadpool_job_destroy(&wg->wg_job);
3608 rw_obj_free(wg->wg_rwlock);
3609 mutex_obj_free(wg->wg_intr_lock);
3610 mutex_obj_free(wg->wg_lock);
3611 thmap_destroy(wg->wg_sessions_byindex);
3612 thmap_destroy(wg->wg_peers_byname);
3613 thmap_destroy(wg->wg_peers_bypubkey);
3614 PSLIST_DESTROY(&wg->wg_peers);
3615 kmem_free(wg, sizeof(*wg));
3616 return error;
3617 }
3618
3619 static int
3620 wg_clone_destroy(struct ifnet *ifp)
3621 {
3622 struct wg_softc *wg = container_of(ifp, struct wg_softc, wg_if);
3623
3624 #ifdef WG_RUMPKERNEL
3625 if (wg_user_mode(wg)) {
3626 rumpuser_wg_destroy(wg->wg_user);
3627 wg->wg_user = NULL;
3628 }
3629 #endif
3630
3631 mutex_enter(&wg_softcs.lock);
3632 LIST_REMOVE(wg, wg_list);
3633 mutex_exit(&wg_softcs.lock);
3634 wg_if_detach(wg);
3635 wg_destroy_all_peers(wg);
3636 #ifdef INET6
3637 solock(wg->wg_so6);
3638 wg->wg_so6->so_rcv.sb_flags &= ~SB_UPCALL;
3639 sounlock(wg->wg_so6);
3640 #endif
3641 #ifdef INET
3642 solock(wg->wg_so4);
3643 wg->wg_so4->so_rcv.sb_flags &= ~SB_UPCALL;
3644 sounlock(wg->wg_so4);
3645 #endif
3646 mutex_enter(wg->wg_intr_lock);
3647 threadpool_cancel_job(wg->wg_threadpool, &wg->wg_job);
3648 mutex_exit(wg->wg_intr_lock);
3649 #ifdef INET6
3650 if (wg->wg_rtable_ipv6 != NULL)
3651 free(wg->wg_rtable_ipv6, M_RTABLE);
3652 soclose(wg->wg_so6);
3653 #endif
3654 #ifdef INET
3655 if (wg->wg_rtable_ipv4 != NULL)
3656 free(wg->wg_rtable_ipv4, M_RTABLE);
3657 soclose(wg->wg_so4);
3658 #endif
3659 threadpool_put(wg->wg_threadpool, PRI_NONE);
3660 threadpool_job_destroy(&wg->wg_job);
3661 rw_obj_free(wg->wg_rwlock);
3662 mutex_obj_free(wg->wg_intr_lock);
3663 mutex_obj_free(wg->wg_lock);
3664 thmap_destroy(wg->wg_sessions_byindex);
3665 thmap_destroy(wg->wg_peers_byname);
3666 thmap_destroy(wg->wg_peers_bypubkey);
3667 PSLIST_DESTROY(&wg->wg_peers);
3668 kmem_free(wg, sizeof(*wg));
3669
3670 return 0;
3671 }
3672
3673 static struct wg_peer *
3674 wg_pick_peer_by_sa(struct wg_softc *wg, const struct sockaddr *sa,
3675 struct psref *psref)
3676 {
3677 struct radix_node_head *rnh;
3678 struct radix_node *rn;
3679 struct wg_peer *wgp = NULL;
3680 struct wg_allowedip *wga;
3681
3682 #ifdef WG_DEBUG_LOG
3683 char addrstr[128];
3684 sockaddr_format(sa, addrstr, sizeof(addrstr));
3685 WG_DLOG("sa=%s\n", addrstr);
3686 #endif
3687
3688 rw_enter(wg->wg_rwlock, RW_READER);
3689
3690 rnh = wg_rnh(wg, sa->sa_family);
3691 if (rnh == NULL)
3692 goto out;
3693
3694 rn = rnh->rnh_matchaddr(sa, rnh);
3695 if (rn == NULL || (rn->rn_flags & RNF_ROOT) != 0)
3696 goto out;
3697
3698 WG_TRACE("success");
3699
3700 wga = container_of(rn, struct wg_allowedip, wga_nodes[0]);
3701 wgp = wga->wga_peer;
3702 wg_get_peer(wgp, psref);
3703
3704 out:
3705 rw_exit(wg->wg_rwlock);
3706 return wgp;
3707 }
3708
3709 static void
3710 wg_fill_msg_data(struct wg_softc *wg, struct wg_peer *wgp,
3711 struct wg_session *wgs, struct wg_msg_data *wgmd)
3712 {
3713
3714 memset(wgmd, 0, sizeof(*wgmd));
3715 wgmd->wgmd_type = htole32(WG_MSG_TYPE_DATA);
3716 wgmd->wgmd_receiver = wgs->wgs_remote_index;
3717 /* [W] 5.4.6: msg.counter := Nm^send */
3718 /* [W] 5.4.6: Nm^send := Nm^send + 1 */
3719 wgmd->wgmd_counter = htole64(wg_session_inc_send_counter(wgs));
3720 WG_DLOG("counter=%"PRIu64"\n", le64toh(wgmd->wgmd_counter));
3721 }
3722
3723 static int
3724 wg_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst,
3725 const struct rtentry *rt)
3726 {
3727 struct wg_softc *wg = ifp->if_softc;
3728 struct wg_peer *wgp = NULL;
3729 struct wg_session *wgs = NULL;
3730 struct psref wgp_psref, wgs_psref;
3731 int bound;
3732 int error;
3733
3734 bound = curlwp_bind();
3735
3736 /* TODO make the nest limit configurable via sysctl */
3737 error = if_tunnel_check_nesting(ifp, m, 1);
3738 if (error) {
3739 WGLOG(LOG_ERR, "tunneling loop detected and packet dropped\n");
3740 goto out0;
3741 }
3742
3743 IFQ_CLASSIFY(&ifp->if_snd, m, dst->sa_family);
3744
3745 bpf_mtap_af(ifp, dst->sa_family, m, BPF_D_OUT);
3746
3747 m->m_flags &= ~(M_BCAST|M_MCAST);
3748
3749 wgp = wg_pick_peer_by_sa(wg, dst, &wgp_psref);
3750 if (wgp == NULL) {
3751 WG_TRACE("peer not found");
3752 error = EHOSTUNREACH;
3753 goto out0;
3754 }
3755
3756 /* Clear checksum-offload flags. */
3757 m->m_pkthdr.csum_flags = 0;
3758 m->m_pkthdr.csum_data = 0;
3759
3760 /* Check whether there's an established session. */
3761 wgs = wg_get_stable_session(wgp, &wgs_psref);
3762 if (wgs == NULL) {
3763 /*
3764 * No established session. If we're the first to try
3765 * sending data, schedule a handshake and queue the
3766 * packet for when the handshake is done; otherwise
3767 * just drop the packet and let the ongoing handshake
3768 * attempt continue. We could queue more data packets
3769 * but it's not clear that's worthwhile.
3770 */
3771 if (atomic_cas_ptr(&wgp->wgp_pending, NULL, m) == NULL) {
3772 m = NULL; /* consume */
3773 WG_TRACE("queued first packet; init handshake");
3774 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE);
3775 } else {
3776 WG_TRACE("first packet already queued, dropping");
3777 }
3778 goto out1;
3779 }
3780
3781 /* There's an established session. Toss it in the queue. */
3782 kpreempt_disable();
3783 const uint32_t h = curcpu()->ci_index; // pktq_rps_hash(m)
3784 M_SETCTX(m, wgp);
3785 if (__predict_false(!pktq_enqueue(wg_pktq, m, h))) {
3786 WGLOG(LOG_ERR, "pktq full, dropping\n");
3787 error = ENOBUFS;
3788 goto out2;
3789 }
3790 m = NULL; /* consumed */
3791 error = 0;
3792 out2: kpreempt_enable();
3793
3794 wg_put_session(wgs, &wgs_psref);
3795 out1: wg_put_peer(wgp, &wgp_psref);
3796 out0: if (m)
3797 m_freem(m);
3798 curlwp_bindx(bound);
3799 return error;
3800 }
3801
3802 static int
3803 wg_send_udp(struct wg_peer *wgp, struct mbuf *m)
3804 {
3805 struct psref psref;
3806 struct wg_sockaddr *wgsa;
3807 int error;
3808 struct socket *so;
3809
3810 wgsa = wg_get_endpoint_sa(wgp, &psref);
3811 so = wg_get_so_by_peer(wgp, wgsa);
3812 solock(so);
3813 if (wgsatosa(wgsa)->sa_family == AF_INET) {
3814 error = udp_send(so, m, wgsatosa(wgsa), NULL, curlwp);
3815 } else {
3816 #ifdef INET6
3817 error = udp6_output(sotoin6pcb(so), m, wgsatosin6(wgsa),
3818 NULL, curlwp);
3819 #else
3820 m_freem(m);
3821 error = EPFNOSUPPORT;
3822 #endif
3823 }
3824 sounlock(so);
3825 wg_put_sa(wgp, wgsa, &psref);
3826
3827 return error;
3828 }
3829
3830 /* Inspired by pppoe_get_mbuf */
3831 static struct mbuf *
3832 wg_get_mbuf(size_t leading_len, size_t len)
3833 {
3834 struct mbuf *m;
3835
3836 KASSERT(leading_len <= MCLBYTES);
3837 KASSERT(len <= MCLBYTES - leading_len);
3838
3839 m = m_gethdr(M_DONTWAIT, MT_DATA);
3840 if (m == NULL)
3841 return NULL;
3842 if (len + leading_len > MHLEN) {
3843 m_clget(m, M_DONTWAIT);
3844 if ((m->m_flags & M_EXT) == 0) {
3845 m_free(m);
3846 return NULL;
3847 }
3848 }
3849 m->m_data += leading_len;
3850 m->m_pkthdr.len = m->m_len = len;
3851
3852 return m;
3853 }
3854
3855 static int
3856 wg_send_data_msg(struct wg_peer *wgp, struct wg_session *wgs,
3857 struct mbuf *m)
3858 {
3859 struct wg_softc *wg = wgp->wgp_sc;
3860 int error;
3861 size_t inner_len, padded_len, encrypted_len;
3862 char *padded_buf = NULL;
3863 size_t mlen;
3864 struct wg_msg_data *wgmd;
3865 bool free_padded_buf = false;
3866 struct mbuf *n;
3867 size_t leading_len = max_linkhdr + sizeof(struct ip6_hdr) +
3868 sizeof(struct udphdr);
3869
3870 mlen = m_length(m);
3871 inner_len = mlen;
3872 padded_len = roundup(mlen, 16);
3873 encrypted_len = padded_len + WG_AUTHTAG_LEN;
3874 WG_DLOG("inner=%lu, padded=%lu, encrypted_len=%lu\n",
3875 inner_len, padded_len, encrypted_len);
3876 if (mlen != 0) {
3877 bool success;
3878 success = m_ensure_contig(&m, padded_len);
3879 if (success) {
3880 padded_buf = mtod(m, char *);
3881 } else {
3882 padded_buf = kmem_intr_alloc(padded_len, KM_NOSLEEP);
3883 if (padded_buf == NULL) {
3884 error = ENOBUFS;
3885 goto end;
3886 }
3887 free_padded_buf = true;
3888 m_copydata(m, 0, mlen, padded_buf);
3889 }
3890 memset(padded_buf + mlen, 0, padded_len - inner_len);
3891 }
3892
3893 n = wg_get_mbuf(leading_len, sizeof(*wgmd) + encrypted_len);
3894 if (n == NULL) {
3895 error = ENOBUFS;
3896 goto end;
3897 }
3898 KASSERT(n->m_len >= sizeof(*wgmd));
3899 wgmd = mtod(n, struct wg_msg_data *);
3900 wg_fill_msg_data(wg, wgp, wgs, wgmd);
3901 /* [W] 5.4.6: AEAD(Tm^send, Nm^send, P, e) */
3902 wg_algo_aead_enc((char *)wgmd + sizeof(*wgmd), encrypted_len,
3903 wgs->wgs_tkey_send, le64toh(wgmd->wgmd_counter),
3904 padded_buf, padded_len,
3905 NULL, 0);
3906
3907 error = wg->wg_ops->send_data_msg(wgp, n);
3908 if (error == 0) {
3909 struct ifnet *ifp = &wg->wg_if;
3910 if_statadd(ifp, if_obytes, mlen);
3911 if_statinc(ifp, if_opackets);
3912 if (wgs->wgs_is_initiator &&
3913 wgs->wgs_time_last_data_sent == 0) {
3914 /*
3915 * [W] 6.2 Transport Message Limits
3916 * "if a peer is the initiator of a current secure
3917 * session, WireGuard will send a handshake initiation
3918 * message to begin a new secure session if, after
3919 * transmitting a transport data message, the current
3920 * secure session is REKEY-AFTER-TIME seconds old,"
3921 */
3922 wg_schedule_rekey_timer(wgp);
3923 }
3924 wgs->wgs_time_last_data_sent = time_uptime;
3925 if (wg_session_get_send_counter(wgs) >=
3926 wg_rekey_after_messages) {
3927 /*
3928 * [W] 6.2 Transport Message Limits
3929 * "WireGuard will try to create a new session, by
3930 * sending a handshake initiation message (section
3931 * 5.4.2), after it has sent REKEY-AFTER-MESSAGES
3932 * transport data messages..."
3933 */
3934 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE);
3935 }
3936 }
3937 end:
3938 m_freem(m);
3939 if (free_padded_buf)
3940 kmem_intr_free(padded_buf, padded_len);
3941 return error;
3942 }
3943
3944 static void
3945 wg_input(struct ifnet *ifp, struct mbuf *m, const int af)
3946 {
3947 pktqueue_t *pktq;
3948 size_t pktlen;
3949
3950 KASSERT(af == AF_INET || af == AF_INET6);
3951
3952 WG_TRACE("");
3953
3954 m_set_rcvif(m, ifp);
3955 pktlen = m->m_pkthdr.len;
3956
3957 bpf_mtap_af(ifp, af, m, BPF_D_IN);
3958
3959 switch (af) {
3960 case AF_INET:
3961 pktq = ip_pktq;
3962 break;
3963 #ifdef INET6
3964 case AF_INET6:
3965 pktq = ip6_pktq;
3966 break;
3967 #endif
3968 default:
3969 panic("invalid af=%d", af);
3970 }
3971
3972 const u_int h = curcpu()->ci_index;
3973 if (__predict_true(pktq_enqueue(pktq, m, h))) {
3974 if_statadd(ifp, if_ibytes, pktlen);
3975 if_statinc(ifp, if_ipackets);
3976 } else {
3977 m_freem(m);
3978 }
3979 }
3980
3981 static void
3982 wg_calc_pubkey(uint8_t pubkey[WG_STATIC_KEY_LEN],
3983 const uint8_t privkey[WG_STATIC_KEY_LEN])
3984 {
3985
3986 crypto_scalarmult_base(pubkey, privkey);
3987 }
3988
3989 static int
3990 wg_rtable_add_route(struct wg_softc *wg, struct wg_allowedip *wga)
3991 {
3992 struct radix_node_head *rnh;
3993 struct radix_node *rn;
3994 int error = 0;
3995
3996 rw_enter(wg->wg_rwlock, RW_WRITER);
3997 rnh = wg_rnh(wg, wga->wga_family);
3998 KASSERT(rnh != NULL);
3999 rn = rnh->rnh_addaddr(&wga->wga_sa_addr, &wga->wga_sa_mask, rnh,
4000 wga->wga_nodes);
4001 rw_exit(wg->wg_rwlock);
4002
4003 if (rn == NULL)
4004 error = EEXIST;
4005
4006 return error;
4007 }
4008
4009 static int
4010 wg_handle_prop_peer(struct wg_softc *wg, prop_dictionary_t peer,
4011 struct wg_peer **wgpp)
4012 {
4013 int error = 0;
4014 const void *pubkey;
4015 size_t pubkey_len;
4016 const void *psk;
4017 size_t psk_len;
4018 const char *name = NULL;
4019
4020 if (prop_dictionary_get_string(peer, "name", &name)) {
4021 if (strlen(name) > WG_PEER_NAME_MAXLEN) {
4022 error = EINVAL;
4023 goto out;
4024 }
4025 }
4026
4027 if (!prop_dictionary_get_data(peer, "public_key",
4028 &pubkey, &pubkey_len)) {
4029 error = EINVAL;
4030 goto out;
4031 }
4032 #ifdef WG_DEBUG_DUMP
4033 {
4034 char *hex = gethexdump(pubkey, pubkey_len);
4035 log(LOG_DEBUG, "pubkey=%p, pubkey_len=%lu\n%s\n",
4036 pubkey, pubkey_len, hex);
4037 puthexdump(hex, pubkey, pubkey_len);
4038 }
4039 #endif
4040
4041 struct wg_peer *wgp = wg_alloc_peer(wg);
4042 memcpy(wgp->wgp_pubkey, pubkey, sizeof(wgp->wgp_pubkey));
4043 if (name != NULL)
4044 strncpy(wgp->wgp_name, name, sizeof(wgp->wgp_name));
4045
4046 if (prop_dictionary_get_data(peer, "preshared_key", &psk, &psk_len)) {
4047 if (psk_len != sizeof(wgp->wgp_psk)) {
4048 error = EINVAL;
4049 goto out;
4050 }
4051 memcpy(wgp->wgp_psk, psk, sizeof(wgp->wgp_psk));
4052 }
4053
4054 const void *addr;
4055 size_t addr_len;
4056 struct wg_sockaddr *wgsa = wgp->wgp_endpoint;
4057
4058 if (!prop_dictionary_get_data(peer, "endpoint", &addr, &addr_len))
4059 goto skip_endpoint;
4060 if (addr_len < sizeof(*wgsatosa(wgsa)) ||
4061 addr_len > sizeof(*wgsatoss(wgsa))) {
4062 error = EINVAL;
4063 goto out;
4064 }
4065 memcpy(wgsatoss(wgsa), addr, addr_len);
4066 switch (wgsa_family(wgsa)) {
4067 case AF_INET:
4068 #ifdef INET6
4069 case AF_INET6:
4070 #endif
4071 break;
4072 default:
4073 error = EPFNOSUPPORT;
4074 goto out;
4075 }
4076 if (addr_len != sockaddr_getsize_by_family(wgsa_family(wgsa))) {
4077 error = EINVAL;
4078 goto out;
4079 }
4080 {
4081 char addrstr[128];
4082 sockaddr_format(wgsatosa(wgsa), addrstr, sizeof(addrstr));
4083 WG_DLOG("addr=%s\n", addrstr);
4084 }
4085 wgp->wgp_endpoint_available = true;
4086
4087 prop_array_t allowedips;
4088 skip_endpoint:
4089 allowedips = prop_dictionary_get(peer, "allowedips");
4090 if (allowedips == NULL)
4091 goto skip;
4092
4093 prop_object_iterator_t _it = prop_array_iterator(allowedips);
4094 prop_dictionary_t prop_allowedip;
4095 int j = 0;
4096 while ((prop_allowedip = prop_object_iterator_next(_it)) != NULL) {
4097 struct wg_allowedip *wga = &wgp->wgp_allowedips[j];
4098
4099 if (!prop_dictionary_get_int(prop_allowedip, "family",
4100 &wga->wga_family))
4101 continue;
4102 if (!prop_dictionary_get_data(prop_allowedip, "ip",
4103 &addr, &addr_len))
4104 continue;
4105 if (!prop_dictionary_get_uint8(prop_allowedip, "cidr",
4106 &wga->wga_cidr))
4107 continue;
4108
4109 switch (wga->wga_family) {
4110 case AF_INET: {
4111 struct sockaddr_in sin;
4112 char addrstr[128];
4113 struct in_addr mask;
4114 struct sockaddr_in sin_mask;
4115
4116 if (addr_len != sizeof(struct in_addr))
4117 return EINVAL;
4118 memcpy(&wga->wga_addr4, addr, addr_len);
4119
4120 sockaddr_in_init(&sin, (const struct in_addr *)addr,
4121 0);
4122 sockaddr_copy(&wga->wga_sa_addr,
4123 sizeof(sin), sintosa(&sin));
4124
4125 sockaddr_format(sintosa(&sin),
4126 addrstr, sizeof(addrstr));
4127 WG_DLOG("addr=%s/%d\n", addrstr, wga->wga_cidr);
4128
4129 in_len2mask(&mask, wga->wga_cidr);
4130 sockaddr_in_init(&sin_mask, &mask, 0);
4131 sockaddr_copy(&wga->wga_sa_mask,
4132 sizeof(sin_mask), sintosa(&sin_mask));
4133
4134 break;
4135 }
4136 #ifdef INET6
4137 case AF_INET6: {
4138 struct sockaddr_in6 sin6;
4139 char addrstr[128];
4140 struct in6_addr mask;
4141 struct sockaddr_in6 sin6_mask;
4142
4143 if (addr_len != sizeof(struct in6_addr))
4144 return EINVAL;
4145 memcpy(&wga->wga_addr6, addr, addr_len);
4146
4147 sockaddr_in6_init(&sin6, (const struct in6_addr *)addr,
4148 0, 0, 0);
4149 sockaddr_copy(&wga->wga_sa_addr,
4150 sizeof(sin6), sin6tosa(&sin6));
4151
4152 sockaddr_format(sin6tosa(&sin6),
4153 addrstr, sizeof(addrstr));
4154 WG_DLOG("addr=%s/%d\n", addrstr, wga->wga_cidr);
4155
4156 in6_prefixlen2mask(&mask, wga->wga_cidr);
4157 sockaddr_in6_init(&sin6_mask, &mask, 0, 0, 0);
4158 sockaddr_copy(&wga->wga_sa_mask,
4159 sizeof(sin6_mask), sin6tosa(&sin6_mask));
4160
4161 break;
4162 }
4163 #endif
4164 default:
4165 error = EINVAL;
4166 goto out;
4167 }
4168 wga->wga_peer = wgp;
4169
4170 error = wg_rtable_add_route(wg, wga);
4171 if (error != 0)
4172 goto out;
4173
4174 j++;
4175 }
4176 wgp->wgp_n_allowedips = j;
4177 skip:
4178 *wgpp = wgp;
4179 out:
4180 return error;
4181 }
4182
4183 static int
4184 wg_alloc_prop_buf(char **_buf, struct ifdrv *ifd)
4185 {
4186 int error;
4187 char *buf;
4188
4189 WG_DLOG("buf=%p, len=%lu\n", ifd->ifd_data, ifd->ifd_len);
4190 buf = kmem_alloc(ifd->ifd_len + 1, KM_SLEEP);
4191 error = copyin(ifd->ifd_data, buf, ifd->ifd_len);
4192 if (error != 0)
4193 return error;
4194 buf[ifd->ifd_len] = '\0';
4195 #ifdef WG_DEBUG_DUMP
4196 log(LOG_DEBUG, "%.*s\n",
4197 (int)MIN(INT_MAX, ifd->ifd_len),
4198 (const char *)buf);
4199 #endif
4200 *_buf = buf;
4201 return 0;
4202 }
4203
4204 static int
4205 wg_ioctl_set_private_key(struct wg_softc *wg, struct ifdrv *ifd)
4206 {
4207 int error;
4208 prop_dictionary_t prop_dict;
4209 char *buf = NULL;
4210 const void *privkey;
4211 size_t privkey_len;
4212
4213 error = wg_alloc_prop_buf(&buf, ifd);
4214 if (error != 0)
4215 return error;
4216 error = EINVAL;
4217 prop_dict = prop_dictionary_internalize(buf);
4218 if (prop_dict == NULL)
4219 goto out;
4220 if (!prop_dictionary_get_data(prop_dict, "private_key",
4221 &privkey, &privkey_len))
4222 goto out;
4223 #ifdef WG_DEBUG_DUMP
4224 {
4225 char *hex = gethexdump(privkey, privkey_len);
4226 log(LOG_DEBUG, "privkey=%p, privkey_len=%lu\n%s\n",
4227 privkey, privkey_len, hex);
4228 puthexdump(hex, privkey, privkey_len);
4229 }
4230 #endif
4231 if (privkey_len != WG_STATIC_KEY_LEN)
4232 goto out;
4233 memcpy(wg->wg_privkey, privkey, WG_STATIC_KEY_LEN);
4234 wg_calc_pubkey(wg->wg_pubkey, wg->wg_privkey);
4235 error = 0;
4236
4237 out:
4238 kmem_free(buf, ifd->ifd_len + 1);
4239 return error;
4240 }
4241
4242 static int
4243 wg_ioctl_set_listen_port(struct wg_softc *wg, struct ifdrv *ifd)
4244 {
4245 int error;
4246 prop_dictionary_t prop_dict;
4247 char *buf = NULL;
4248 uint16_t port;
4249
4250 error = wg_alloc_prop_buf(&buf, ifd);
4251 if (error != 0)
4252 return error;
4253 error = EINVAL;
4254 prop_dict = prop_dictionary_internalize(buf);
4255 if (prop_dict == NULL)
4256 goto out;
4257 if (!prop_dictionary_get_uint16(prop_dict, "listen_port", &port))
4258 goto out;
4259
4260 error = wg->wg_ops->bind_port(wg, (uint16_t)port);
4261
4262 out:
4263 kmem_free(buf, ifd->ifd_len + 1);
4264 return error;
4265 }
4266
4267 static int
4268 wg_ioctl_add_peer(struct wg_softc *wg, struct ifdrv *ifd)
4269 {
4270 int error;
4271 prop_dictionary_t prop_dict;
4272 char *buf = NULL;
4273 struct wg_peer *wgp = NULL, *wgp0 __diagused;
4274
4275 error = wg_alloc_prop_buf(&buf, ifd);
4276 if (error != 0)
4277 return error;
4278 error = EINVAL;
4279 prop_dict = prop_dictionary_internalize(buf);
4280 if (prop_dict == NULL)
4281 goto out;
4282
4283 error = wg_handle_prop_peer(wg, prop_dict, &wgp);
4284 if (error != 0)
4285 goto out;
4286
4287 mutex_enter(wg->wg_lock);
4288 if (thmap_get(wg->wg_peers_bypubkey, wgp->wgp_pubkey,
4289 sizeof(wgp->wgp_pubkey)) != NULL ||
4290 (wgp->wgp_name[0] &&
4291 thmap_get(wg->wg_peers_byname, wgp->wgp_name,
4292 strlen(wgp->wgp_name)) != NULL)) {
4293 mutex_exit(wg->wg_lock);
4294 wg_destroy_peer(wgp);
4295 error = EEXIST;
4296 goto out;
4297 }
4298 wgp0 = thmap_put(wg->wg_peers_bypubkey, wgp->wgp_pubkey,
4299 sizeof(wgp->wgp_pubkey), wgp);
4300 KASSERT(wgp0 == wgp);
4301 if (wgp->wgp_name[0]) {
4302 wgp0 = thmap_put(wg->wg_peers_byname, wgp->wgp_name,
4303 strlen(wgp->wgp_name), wgp);
4304 KASSERT(wgp0 == wgp);
4305 }
4306 WG_PEER_WRITER_INSERT_HEAD(wgp, wg);
4307 wg->wg_npeers++;
4308 mutex_exit(wg->wg_lock);
4309
4310 out:
4311 kmem_free(buf, ifd->ifd_len + 1);
4312 return error;
4313 }
4314
4315 static int
4316 wg_ioctl_delete_peer(struct wg_softc *wg, struct ifdrv *ifd)
4317 {
4318 int error;
4319 prop_dictionary_t prop_dict;
4320 char *buf = NULL;
4321 const char *name;
4322
4323 error = wg_alloc_prop_buf(&buf, ifd);
4324 if (error != 0)
4325 return error;
4326 error = EINVAL;
4327 prop_dict = prop_dictionary_internalize(buf);
4328 if (prop_dict == NULL)
4329 goto out;
4330
4331 if (!prop_dictionary_get_string(prop_dict, "name", &name))
4332 goto out;
4333 if (strlen(name) > WG_PEER_NAME_MAXLEN)
4334 goto out;
4335
4336 error = wg_destroy_peer_name(wg, name);
4337 out:
4338 kmem_free(buf, ifd->ifd_len + 1);
4339 return error;
4340 }
4341
4342 static int
4343 wg_ioctl_get(struct wg_softc *wg, struct ifdrv *ifd)
4344 {
4345 int error = ENOMEM;
4346 prop_dictionary_t prop_dict;
4347 prop_array_t peers = NULL;
4348 char *buf;
4349 struct wg_peer *wgp;
4350 int s, i;
4351
4352 prop_dict = prop_dictionary_create();
4353 if (prop_dict == NULL)
4354 goto error;
4355
4356 if (!prop_dictionary_set_data(prop_dict, "private_key", wg->wg_privkey,
4357 WG_STATIC_KEY_LEN))
4358 goto error;
4359
4360 if (wg->wg_listen_port != 0) {
4361 if (!prop_dictionary_set_uint16(prop_dict, "listen_port",
4362 wg->wg_listen_port))
4363 goto error;
4364 }
4365
4366 if (wg->wg_npeers == 0)
4367 goto skip_peers;
4368
4369 peers = prop_array_create();
4370 if (peers == NULL)
4371 goto error;
4372
4373 s = pserialize_read_enter();
4374 i = 0;
4375 WG_PEER_READER_FOREACH(wgp, wg) {
4376 struct wg_sockaddr *wgsa;
4377 struct psref wgp_psref, wgsa_psref;
4378 prop_dictionary_t prop_peer;
4379
4380 wg_get_peer(wgp, &wgp_psref);
4381 pserialize_read_exit(s);
4382
4383 prop_peer = prop_dictionary_create();
4384 if (prop_peer == NULL)
4385 goto next;
4386
4387 if (strlen(wgp->wgp_name) > 0) {
4388 if (!prop_dictionary_set_string(prop_peer, "name",
4389 wgp->wgp_name))
4390 goto next;
4391 }
4392
4393 if (!prop_dictionary_set_data(prop_peer, "public_key",
4394 wgp->wgp_pubkey, sizeof(wgp->wgp_pubkey)))
4395 goto next;
4396
4397 uint8_t psk_zero[WG_PRESHARED_KEY_LEN] = {0};
4398 if (!consttime_memequal(wgp->wgp_psk, psk_zero,
4399 sizeof(wgp->wgp_psk))) {
4400 if (!prop_dictionary_set_data(prop_peer,
4401 "preshared_key",
4402 wgp->wgp_psk, sizeof(wgp->wgp_psk)))
4403 goto next;
4404 }
4405
4406 wgsa = wg_get_endpoint_sa(wgp, &wgsa_psref);
4407 CTASSERT(AF_UNSPEC == 0);
4408 if (wgsa_family(wgsa) != 0 /*AF_UNSPEC*/ &&
4409 !prop_dictionary_set_data(prop_peer, "endpoint",
4410 wgsatoss(wgsa),
4411 sockaddr_getsize_by_family(wgsa_family(wgsa)))) {
4412 wg_put_sa(wgp, wgsa, &wgsa_psref);
4413 goto next;
4414 }
4415 wg_put_sa(wgp, wgsa, &wgsa_psref);
4416
4417 const struct timespec *t = &wgp->wgp_last_handshake_time;
4418
4419 if (!prop_dictionary_set_uint64(prop_peer,
4420 "last_handshake_time_sec", t->tv_sec))
4421 goto next;
4422 if (!prop_dictionary_set_uint32(prop_peer,
4423 "last_handshake_time_nsec", t->tv_nsec))
4424 goto next;
4425
4426 if (wgp->wgp_n_allowedips == 0)
4427 goto skip_allowedips;
4428
4429 prop_array_t allowedips = prop_array_create();
4430 if (allowedips == NULL)
4431 goto next;
4432 for (int j = 0; j < wgp->wgp_n_allowedips; j++) {
4433 struct wg_allowedip *wga = &wgp->wgp_allowedips[j];
4434 prop_dictionary_t prop_allowedip;
4435
4436 prop_allowedip = prop_dictionary_create();
4437 if (prop_allowedip == NULL)
4438 break;
4439
4440 if (!prop_dictionary_set_int(prop_allowedip, "family",
4441 wga->wga_family))
4442 goto _next;
4443 if (!prop_dictionary_set_uint8(prop_allowedip, "cidr",
4444 wga->wga_cidr))
4445 goto _next;
4446
4447 switch (wga->wga_family) {
4448 case AF_INET:
4449 if (!prop_dictionary_set_data(prop_allowedip,
4450 "ip", &wga->wga_addr4,
4451 sizeof(wga->wga_addr4)))
4452 goto _next;
4453 break;
4454 #ifdef INET6
4455 case AF_INET6:
4456 if (!prop_dictionary_set_data(prop_allowedip,
4457 "ip", &wga->wga_addr6,
4458 sizeof(wga->wga_addr6)))
4459 goto _next;
4460 break;
4461 #endif
4462 default:
4463 break;
4464 }
4465 prop_array_set(allowedips, j, prop_allowedip);
4466 _next:
4467 prop_object_release(prop_allowedip);
4468 }
4469 prop_dictionary_set(prop_peer, "allowedips", allowedips);
4470 prop_object_release(allowedips);
4471
4472 skip_allowedips:
4473
4474 prop_array_set(peers, i, prop_peer);
4475 next:
4476 if (prop_peer)
4477 prop_object_release(prop_peer);
4478 i++;
4479
4480 s = pserialize_read_enter();
4481 wg_put_peer(wgp, &wgp_psref);
4482 }
4483 pserialize_read_exit(s);
4484
4485 prop_dictionary_set(prop_dict, "peers", peers);
4486 prop_object_release(peers);
4487 peers = NULL;
4488
4489 skip_peers:
4490 buf = prop_dictionary_externalize(prop_dict);
4491 if (buf == NULL)
4492 goto error;
4493 if (ifd->ifd_len < (strlen(buf) + 1)) {
4494 error = EINVAL;
4495 goto error;
4496 }
4497 error = copyout(buf, ifd->ifd_data, strlen(buf) + 1);
4498
4499 free(buf, 0);
4500 error:
4501 if (peers != NULL)
4502 prop_object_release(peers);
4503 if (prop_dict != NULL)
4504 prop_object_release(prop_dict);
4505
4506 return error;
4507 }
4508
4509 static int
4510 wg_ioctl(struct ifnet *ifp, u_long cmd, void *data)
4511 {
4512 struct wg_softc *wg = ifp->if_softc;
4513 struct ifreq *ifr = data;
4514 struct ifaddr *ifa = data;
4515 struct ifdrv *ifd = data;
4516 int error = 0;
4517
4518 switch (cmd) {
4519 case SIOCINITIFADDR:
4520 if (ifa->ifa_addr->sa_family != AF_LINK &&
4521 (ifp->if_flags & (IFF_UP | IFF_RUNNING)) !=
4522 (IFF_UP | IFF_RUNNING)) {
4523 ifp->if_flags |= IFF_UP;
4524 error = ifp->if_init(ifp);
4525 }
4526 return error;
4527 case SIOCADDMULTI:
4528 case SIOCDELMULTI:
4529 switch (ifr->ifr_addr.sa_family) {
4530 case AF_INET: /* IP supports Multicast */
4531 break;
4532 #ifdef INET6
4533 case AF_INET6: /* IP6 supports Multicast */
4534 break;
4535 #endif
4536 default: /* Other protocols doesn't support Multicast */
4537 error = EAFNOSUPPORT;
4538 break;
4539 }
4540 return error;
4541 case SIOCSDRVSPEC:
4542 switch (ifd->ifd_cmd) {
4543 case WG_IOCTL_SET_PRIVATE_KEY:
4544 error = wg_ioctl_set_private_key(wg, ifd);
4545 break;
4546 case WG_IOCTL_SET_LISTEN_PORT:
4547 error = wg_ioctl_set_listen_port(wg, ifd);
4548 break;
4549 case WG_IOCTL_ADD_PEER:
4550 error = wg_ioctl_add_peer(wg, ifd);
4551 break;
4552 case WG_IOCTL_DELETE_PEER:
4553 error = wg_ioctl_delete_peer(wg, ifd);
4554 break;
4555 default:
4556 error = EINVAL;
4557 break;
4558 }
4559 return error;
4560 case SIOCGDRVSPEC:
4561 return wg_ioctl_get(wg, ifd);
4562 case SIOCSIFFLAGS:
4563 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
4564 break;
4565 switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) {
4566 case IFF_RUNNING:
4567 /*
4568 * If interface is marked down and it is running,
4569 * then stop and disable it.
4570 */
4571 (*ifp->if_stop)(ifp, 1);
4572 break;
4573 case IFF_UP:
4574 /*
4575 * If interface is marked up and it is stopped, then
4576 * start it.
4577 */
4578 error = (*ifp->if_init)(ifp);
4579 break;
4580 default:
4581 break;
4582 }
4583 return error;
4584 #ifdef WG_RUMPKERNEL
4585 case SIOCSLINKSTR:
4586 error = wg_ioctl_linkstr(wg, ifd);
4587 if (error == 0)
4588 wg->wg_ops = &wg_ops_rumpuser;
4589 return error;
4590 #endif
4591 default:
4592 break;
4593 }
4594
4595 error = ifioctl_common(ifp, cmd, data);
4596
4597 #ifdef WG_RUMPKERNEL
4598 if (!wg_user_mode(wg))
4599 return error;
4600
4601 /* Do the same to the corresponding tun device on the host */
4602 /*
4603 * XXX Actually the command has not been handled yet. It
4604 * will be handled via pr_ioctl form doifioctl later.
4605 */
4606 switch (cmd) {
4607 case SIOCAIFADDR:
4608 case SIOCDIFADDR: {
4609 struct in_aliasreq _ifra = *(const struct in_aliasreq *)data;
4610 struct in_aliasreq *ifra = &_ifra;
4611 KASSERT(error == ENOTTY);
4612 strncpy(ifra->ifra_name, rumpuser_wg_get_tunname(wg->wg_user),
4613 IFNAMSIZ);
4614 error = rumpuser_wg_ioctl(wg->wg_user, cmd, ifra, AF_INET);
4615 if (error == 0)
4616 error = ENOTTY;
4617 break;
4618 }
4619 #ifdef INET6
4620 case SIOCAIFADDR_IN6:
4621 case SIOCDIFADDR_IN6: {
4622 struct in6_aliasreq _ifra = *(const struct in6_aliasreq *)data;
4623 struct in6_aliasreq *ifra = &_ifra;
4624 KASSERT(error == ENOTTY);
4625 strncpy(ifra->ifra_name, rumpuser_wg_get_tunname(wg->wg_user),
4626 IFNAMSIZ);
4627 error = rumpuser_wg_ioctl(wg->wg_user, cmd, ifra, AF_INET6);
4628 if (error == 0)
4629 error = ENOTTY;
4630 break;
4631 }
4632 #endif
4633 }
4634 #endif /* WG_RUMPKERNEL */
4635
4636 return error;
4637 }
4638
4639 static int
4640 wg_init(struct ifnet *ifp)
4641 {
4642
4643 ifp->if_flags |= IFF_RUNNING;
4644
4645 /* TODO flush pending packets. */
4646 return 0;
4647 }
4648
4649 static void
4650 wg_stop(struct ifnet *ifp, int disable)
4651 {
4652
4653 KASSERT((ifp->if_flags & IFF_RUNNING) != 0);
4654 ifp->if_flags &= ~IFF_RUNNING;
4655
4656 /* Need to do something? */
4657 }
4658
4659 #ifdef WG_DEBUG_PARAMS
4660 SYSCTL_SETUP(sysctl_net_wg_setup, "sysctl net.wg setup")
4661 {
4662 const struct sysctlnode *node = NULL;
4663
4664 sysctl_createv(clog, 0, NULL, &node,
4665 CTLFLAG_PERMANENT,
4666 CTLTYPE_NODE, "wg",
4667 SYSCTL_DESCR("wg(4)"),
4668 NULL, 0, NULL, 0,
4669 CTL_NET, CTL_CREATE, CTL_EOL);
4670 sysctl_createv(clog, 0, &node, NULL,
4671 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4672 CTLTYPE_QUAD, "rekey_after_messages",
4673 SYSCTL_DESCR("session liftime by messages"),
4674 NULL, 0, &wg_rekey_after_messages, 0, CTL_CREATE, CTL_EOL);
4675 sysctl_createv(clog, 0, &node, NULL,
4676 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4677 CTLTYPE_INT, "rekey_after_time",
4678 SYSCTL_DESCR("session liftime"),
4679 NULL, 0, &wg_rekey_after_time, 0, CTL_CREATE, CTL_EOL);
4680 sysctl_createv(clog, 0, &node, NULL,
4681 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4682 CTLTYPE_INT, "rekey_timeout",
4683 SYSCTL_DESCR("session handshake retry time"),
4684 NULL, 0, &wg_rekey_timeout, 0, CTL_CREATE, CTL_EOL);
4685 sysctl_createv(clog, 0, &node, NULL,
4686 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4687 CTLTYPE_INT, "rekey_attempt_time",
4688 SYSCTL_DESCR("session handshake timeout"),
4689 NULL, 0, &wg_rekey_attempt_time, 0, CTL_CREATE, CTL_EOL);
4690 sysctl_createv(clog, 0, &node, NULL,
4691 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4692 CTLTYPE_INT, "keepalive_timeout",
4693 SYSCTL_DESCR("keepalive timeout"),
4694 NULL, 0, &wg_keepalive_timeout, 0, CTL_CREATE, CTL_EOL);
4695 sysctl_createv(clog, 0, &node, NULL,
4696 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4697 CTLTYPE_BOOL, "force_underload",
4698 SYSCTL_DESCR("force to detemine under load"),
4699 NULL, 0, &wg_force_underload, 0, CTL_CREATE, CTL_EOL);
4700 }
4701 #endif
4702
4703 #ifdef WG_RUMPKERNEL
4704 static bool
4705 wg_user_mode(struct wg_softc *wg)
4706 {
4707
4708 return wg->wg_user != NULL;
4709 }
4710
4711 static int
4712 wg_ioctl_linkstr(struct wg_softc *wg, struct ifdrv *ifd)
4713 {
4714 struct ifnet *ifp = &wg->wg_if;
4715 int error;
4716
4717 if (ifp->if_flags & IFF_UP)
4718 return EBUSY;
4719
4720 if (ifd->ifd_cmd == IFLINKSTR_UNSET) {
4721 /* XXX do nothing */
4722 return 0;
4723 } else if (ifd->ifd_cmd != 0) {
4724 return EINVAL;
4725 } else if (wg->wg_user != NULL) {
4726 return EBUSY;
4727 }
4728
4729 /* Assume \0 included */
4730 if (ifd->ifd_len > IFNAMSIZ) {
4731 return E2BIG;
4732 } else if (ifd->ifd_len < 1) {
4733 return EINVAL;
4734 }
4735
4736 char tun_name[IFNAMSIZ];
4737 error = copyinstr(ifd->ifd_data, tun_name, ifd->ifd_len, NULL);
4738 if (error != 0)
4739 return error;
4740
4741 if (strncmp(tun_name, "tun", 3) != 0)
4742 return EINVAL;
4743
4744 error = rumpuser_wg_create(tun_name, wg, &wg->wg_user);
4745
4746 return error;
4747 }
4748
4749 static int
4750 wg_send_user(struct wg_peer *wgp, struct mbuf *m)
4751 {
4752 int error;
4753 struct psref psref;
4754 struct wg_sockaddr *wgsa;
4755 struct wg_softc *wg = wgp->wgp_sc;
4756 struct iovec iov[1];
4757
4758 wgsa = wg_get_endpoint_sa(wgp, &psref);
4759
4760 iov[0].iov_base = mtod(m, void *);
4761 iov[0].iov_len = m->m_len;
4762
4763 /* Send messages to a peer via an ordinary socket. */
4764 error = rumpuser_wg_send_peer(wg->wg_user, wgsatosa(wgsa), iov, 1);
4765
4766 wg_put_sa(wgp, wgsa, &psref);
4767
4768 m_freem(m);
4769
4770 return error;
4771 }
4772
4773 static void
4774 wg_input_user(struct ifnet *ifp, struct mbuf *m, const int af)
4775 {
4776 struct wg_softc *wg = ifp->if_softc;
4777 struct iovec iov[2];
4778 struct sockaddr_storage ss;
4779
4780 KASSERT(af == AF_INET || af == AF_INET6);
4781
4782 WG_TRACE("");
4783
4784 if (af == AF_INET) {
4785 struct sockaddr_in *sin = (struct sockaddr_in *)&ss;
4786 struct ip *ip;
4787
4788 KASSERT(m->m_len >= sizeof(struct ip));
4789 ip = mtod(m, struct ip *);
4790 sockaddr_in_init(sin, &ip->ip_dst, 0);
4791 } else {
4792 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&ss;
4793 struct ip6_hdr *ip6;
4794
4795 KASSERT(m->m_len >= sizeof(struct ip6_hdr));
4796 ip6 = mtod(m, struct ip6_hdr *);
4797 sockaddr_in6_init(sin6, &ip6->ip6_dst, 0, 0, 0);
4798 }
4799
4800 iov[0].iov_base = &ss;
4801 iov[0].iov_len = ss.ss_len;
4802 iov[1].iov_base = mtod(m, void *);
4803 iov[1].iov_len = m->m_len;
4804
4805 WG_DUMP_BUF(iov[1].iov_base, iov[1].iov_len);
4806
4807 /* Send decrypted packets to users via a tun. */
4808 rumpuser_wg_send_user(wg->wg_user, iov, 2);
4809
4810 m_freem(m);
4811 }
4812
4813 static int
4814 wg_bind_port_user(struct wg_softc *wg, const uint16_t port)
4815 {
4816 int error;
4817 uint16_t old_port = wg->wg_listen_port;
4818
4819 if (port != 0 && old_port == port)
4820 return 0;
4821
4822 error = rumpuser_wg_sock_bind(wg->wg_user, port);
4823 if (error == 0)
4824 wg->wg_listen_port = port;
4825 return error;
4826 }
4827
4828 /*
4829 * Receive user packets.
4830 */
4831 void
4832 rumpkern_wg_recv_user(struct wg_softc *wg, struct iovec *iov, size_t iovlen)
4833 {
4834 struct ifnet *ifp = &wg->wg_if;
4835 struct mbuf *m;
4836 const struct sockaddr *dst;
4837
4838 WG_TRACE("");
4839
4840 dst = iov[0].iov_base;
4841
4842 m = m_gethdr(M_DONTWAIT, MT_DATA);
4843 if (m == NULL)
4844 return;
4845 m->m_len = m->m_pkthdr.len = 0;
4846 m_copyback(m, 0, iov[1].iov_len, iov[1].iov_base);
4847
4848 WG_DLOG("iov_len=%lu\n", iov[1].iov_len);
4849 WG_DUMP_BUF(iov[1].iov_base, iov[1].iov_len);
4850
4851 (void)wg_output(ifp, m, dst, NULL);
4852 }
4853
4854 /*
4855 * Receive packets from a peer.
4856 */
4857 void
4858 rumpkern_wg_recv_peer(struct wg_softc *wg, struct iovec *iov, size_t iovlen)
4859 {
4860 struct mbuf *m;
4861 const struct sockaddr *src;
4862
4863 WG_TRACE("");
4864
4865 src = iov[0].iov_base;
4866
4867 m = m_gethdr(M_DONTWAIT, MT_DATA);
4868 if (m == NULL)
4869 return;
4870 m->m_len = m->m_pkthdr.len = 0;
4871 m_copyback(m, 0, iov[1].iov_len, iov[1].iov_base);
4872
4873 WG_DLOG("iov_len=%lu\n", iov[1].iov_len);
4874 WG_DUMP_BUF(iov[1].iov_base, iov[1].iov_len);
4875
4876 wg_handle_packet(wg, m, src);
4877 }
4878 #endif /* WG_RUMPKERNEL */
4879
4880 /*
4881 * Module infrastructure
4882 */
4883 #include "if_module.h"
4884
4885 IF_MODULE(MODULE_CLASS_DRIVER, wg, "")
4886