if_wg.c revision 1.62 1 /* $NetBSD: if_wg.c,v 1.62 2020/11/11 18:08:34 riastradh Exp $ */
2
3 /*
4 * Copyright (C) Ryota Ozaki <ozaki.ryota (at) gmail.com>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the project nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 /*
33 * This network interface aims to implement the WireGuard protocol.
34 * The implementation is based on the paper of WireGuard as of
35 * 2018-06-30 [1]. The paper is referred in the source code with label
36 * [W]. Also the specification of the Noise protocol framework as of
37 * 2018-07-11 [2] is referred with label [N].
38 *
39 * [1] https://www.wireguard.com/papers/wireguard.pdf
40 * [2] http://noiseprotocol.org/noise.pdf
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: if_wg.c,v 1.62 2020/11/11 18:08:34 riastradh Exp $");
45
46 #ifdef _KERNEL_OPT
47 #include "opt_altq_enabled.h"
48 #include "opt_inet.h"
49 #endif
50
51 #include <sys/param.h>
52 #include <sys/types.h>
53
54 #include <sys/atomic.h>
55 #include <sys/callout.h>
56 #include <sys/cprng.h>
57 #include <sys/cpu.h>
58 #include <sys/device.h>
59 #include <sys/domain.h>
60 #include <sys/errno.h>
61 #include <sys/intr.h>
62 #include <sys/ioctl.h>
63 #include <sys/kernel.h>
64 #include <sys/kmem.h>
65 #include <sys/mbuf.h>
66 #include <sys/module.h>
67 #include <sys/mutex.h>
68 #include <sys/once.h>
69 #include <sys/percpu.h>
70 #include <sys/pserialize.h>
71 #include <sys/psref.h>
72 #include <sys/queue.h>
73 #include <sys/rwlock.h>
74 #include <sys/socket.h>
75 #include <sys/socketvar.h>
76 #include <sys/sockio.h>
77 #include <sys/sysctl.h>
78 #include <sys/syslog.h>
79 #include <sys/systm.h>
80 #include <sys/thmap.h>
81 #include <sys/threadpool.h>
82 #include <sys/time.h>
83 #include <sys/timespec.h>
84 #include <sys/workqueue.h>
85
86 #include <net/bpf.h>
87 #include <net/if.h>
88 #include <net/if_types.h>
89 #include <net/if_wg.h>
90 #include <net/pktqueue.h>
91 #include <net/route.h>
92
93 #include <netinet/in.h>
94 #include <netinet/in_pcb.h>
95 #include <netinet/in_var.h>
96 #include <netinet/ip.h>
97 #include <netinet/ip_var.h>
98 #include <netinet/udp.h>
99 #include <netinet/udp_var.h>
100
101 #ifdef INET6
102 #include <netinet/ip6.h>
103 #include <netinet6/in6_pcb.h>
104 #include <netinet6/in6_var.h>
105 #include <netinet6/ip6_var.h>
106 #include <netinet6/udp6_var.h>
107 #endif /* INET6 */
108
109 #include <prop/proplib.h>
110
111 #include <crypto/blake2/blake2s.h>
112 #include <crypto/sodium/crypto_aead_chacha20poly1305.h>
113 #include <crypto/sodium/crypto_aead_xchacha20poly1305.h>
114 #include <crypto/sodium/crypto_scalarmult.h>
115
116 #include "ioconf.h"
117
118 #ifdef WG_RUMPKERNEL
119 #include "wg_user.h"
120 #endif
121
122 /*
123 * Data structures
124 * - struct wg_softc is an instance of wg interfaces
125 * - It has a list of peers (struct wg_peer)
126 * - It has a threadpool job that sends/receives handshake messages and
127 * runs event handlers
128 * - It has its own two routing tables: one is for IPv4 and the other IPv6
129 * - struct wg_peer is a representative of a peer
130 * - It has a struct work to handle handshakes and timer tasks
131 * - It has a pair of session instances (struct wg_session)
132 * - It has a pair of endpoint instances (struct wg_sockaddr)
133 * - Normally one endpoint is used and the second one is used only on
134 * a peer migration (a change of peer's IP address)
135 * - It has a list of IP addresses and sub networks called allowedips
136 * (struct wg_allowedip)
137 * - A packets sent over a session is allowed if its destination matches
138 * any IP addresses or sub networks of the list
139 * - struct wg_session represents a session of a secure tunnel with a peer
140 * - Two instances of sessions belong to a peer; a stable session and a
141 * unstable session
142 * - A handshake process of a session always starts with a unstable instance
143 * - Once a session is established, its instance becomes stable and the
144 * other becomes unstable instead
145 * - Data messages are always sent via a stable session
146 *
147 * Locking notes:
148 * - Each wg has a mutex(9) wg_lock, and a rwlock(9) wg_rwlock
149 * - Changes to the peer list are serialized by wg_lock
150 * - The peer list may be read with pserialize(9) and psref(9)
151 * - The rwlock (wg_rwlock) protects the routing tables (wg_rtable_ipv[46])
152 * => XXX replace by pserialize when routing table is psz-safe
153 * - Each peer (struct wg_peer, wgp) has a mutex wgp_lock, which can be taken
154 * only in thread context and serializes:
155 * - the stable and unstable session pointers
156 * - all unstable session state
157 * - Packet processing may be done in softint context:
158 * - The stable session can be read under pserialize(9) or psref(9)
159 * - The stable session is always ESTABLISHED
160 * - On a session swap, we must wait for all readers to release a
161 * reference to a stable session before changing wgs_state and
162 * session states
163 * - Lock order: wg_lock -> wgp_lock
164 */
165
166
167 #define WGLOG(level, fmt, args...) \
168 log(level, "%s: " fmt, __func__, ##args)
169
170 /* Debug options */
171 #ifdef WG_DEBUG
172 /* Output debug logs */
173 #ifndef WG_DEBUG_LOG
174 #define WG_DEBUG_LOG
175 #endif
176 /* Output trace logs */
177 #ifndef WG_DEBUG_TRACE
178 #define WG_DEBUG_TRACE
179 #endif
180 /* Output hash values, etc. */
181 #ifndef WG_DEBUG_DUMP
182 #define WG_DEBUG_DUMP
183 #endif
184 /* Make some internal parameters configurable for testing and debugging */
185 #ifndef WG_DEBUG_PARAMS
186 #define WG_DEBUG_PARAMS
187 #endif
188 #endif
189
190 #ifdef WG_DEBUG_TRACE
191 #define WG_TRACE(msg) \
192 log(LOG_DEBUG, "%s:%d: %s\n", __func__, __LINE__, (msg))
193 #else
194 #define WG_TRACE(msg) __nothing
195 #endif
196
197 #ifdef WG_DEBUG_LOG
198 #define WG_DLOG(fmt, args...) log(LOG_DEBUG, "%s: " fmt, __func__, ##args)
199 #else
200 #define WG_DLOG(fmt, args...) __nothing
201 #endif
202
203 #define WG_LOG_RATECHECK(wgprc, level, fmt, args...) do { \
204 if (ppsratecheck(&(wgprc)->wgprc_lasttime, \
205 &(wgprc)->wgprc_curpps, 1)) { \
206 log(level, fmt, ##args); \
207 } \
208 } while (0)
209
210 #ifdef WG_DEBUG_PARAMS
211 static bool wg_force_underload = false;
212 #endif
213
214 #ifdef WG_DEBUG_DUMP
215
216 static char *
217 gethexdump(const char *p, size_t n)
218 {
219 char *buf;
220 size_t i;
221
222 if (n > SIZE_MAX/3 - 1)
223 return NULL;
224 buf = kmem_alloc(3*n + 1, KM_NOSLEEP);
225 if (buf == NULL)
226 return NULL;
227 for (i = 0; i < n; i++)
228 snprintf(buf + 3*i, 3 + 1, " %02hhx", p[i]);
229 return buf;
230 }
231
232 static void
233 puthexdump(char *buf, const void *p, size_t n)
234 {
235
236 if (buf == NULL)
237 return;
238 kmem_free(buf, 3*n + 1);
239 }
240
241 #ifdef WG_RUMPKERNEL
242 static void
243 wg_dump_buf(const char *func, const char *buf, const size_t size)
244 {
245 char *hex = gethexdump(buf, size);
246
247 log(LOG_DEBUG, "%s: %s\n", func, hex ? hex : "(enomem)");
248 puthexdump(hex, buf, size);
249 }
250 #endif
251
252 static void
253 wg_dump_hash(const uint8_t *func, const uint8_t *name, const uint8_t *hash,
254 const size_t size)
255 {
256 char *hex = gethexdump(hash, size);
257
258 log(LOG_DEBUG, "%s: %s: %s\n", func, name, hex ? hex : "(enomem)");
259 puthexdump(hex, hash, size);
260 }
261
262 #define WG_DUMP_HASH(name, hash) \
263 wg_dump_hash(__func__, name, hash, WG_HASH_LEN)
264 #define WG_DUMP_HASH48(name, hash) \
265 wg_dump_hash(__func__, name, hash, 48)
266 #define WG_DUMP_BUF(buf, size) \
267 wg_dump_buf(__func__, buf, size)
268 #else
269 #define WG_DUMP_HASH(name, hash) __nothing
270 #define WG_DUMP_HASH48(name, hash) __nothing
271 #define WG_DUMP_BUF(buf, size) __nothing
272 #endif /* WG_DEBUG_DUMP */
273
274 #define WG_MTU 1420
275 #define WG_ALLOWEDIPS 16
276
277 #define CURVE25519_KEY_LEN 32
278 #define TAI64N_LEN sizeof(uint32_t) * 3
279 #define POLY1305_AUTHTAG_LEN 16
280 #define HMAC_BLOCK_LEN 64
281
282 /* [N] 4.1: "DHLEN must be 32 or greater." WireGuard chooses 32. */
283 /* [N] 4.3: Hash functions */
284 #define NOISE_DHLEN 32
285 /* [N] 4.3: "Must be 32 or 64." WireGuard chooses 32. */
286 #define NOISE_HASHLEN 32
287 #define NOISE_BLOCKLEN 64
288 #define NOISE_HKDF_OUTPUT_LEN NOISE_HASHLEN
289 /* [N] 5.1: "k" */
290 #define NOISE_CIPHER_KEY_LEN 32
291 /*
292 * [N] 9.2: "psk"
293 * "... psk is a 32-byte secret value provided by the application."
294 */
295 #define NOISE_PRESHARED_KEY_LEN 32
296
297 #define WG_STATIC_KEY_LEN CURVE25519_KEY_LEN
298 #define WG_TIMESTAMP_LEN TAI64N_LEN
299
300 #define WG_PRESHARED_KEY_LEN NOISE_PRESHARED_KEY_LEN
301
302 #define WG_COOKIE_LEN 16
303 #define WG_MAC_LEN 16
304 #define WG_RANDVAL_LEN 24
305
306 #define WG_EPHEMERAL_KEY_LEN CURVE25519_KEY_LEN
307 /* [N] 5.2: "ck: A chaining key of HASHLEN bytes" */
308 #define WG_CHAINING_KEY_LEN NOISE_HASHLEN
309 /* [N] 5.2: "h: A hash output of HASHLEN bytes" */
310 #define WG_HASH_LEN NOISE_HASHLEN
311 #define WG_CIPHER_KEY_LEN NOISE_CIPHER_KEY_LEN
312 #define WG_DH_OUTPUT_LEN NOISE_DHLEN
313 #define WG_KDF_OUTPUT_LEN NOISE_HKDF_OUTPUT_LEN
314 #define WG_AUTHTAG_LEN POLY1305_AUTHTAG_LEN
315 #define WG_DATA_KEY_LEN 32
316 #define WG_SALT_LEN 24
317
318 /*
319 * The protocol messages
320 */
321 struct wg_msg {
322 uint32_t wgm_type;
323 } __packed;
324
325 /* [W] 5.4.2 First Message: Initiator to Responder */
326 struct wg_msg_init {
327 uint32_t wgmi_type;
328 uint32_t wgmi_sender;
329 uint8_t wgmi_ephemeral[WG_EPHEMERAL_KEY_LEN];
330 uint8_t wgmi_static[WG_STATIC_KEY_LEN + WG_AUTHTAG_LEN];
331 uint8_t wgmi_timestamp[WG_TIMESTAMP_LEN + WG_AUTHTAG_LEN];
332 uint8_t wgmi_mac1[WG_MAC_LEN];
333 uint8_t wgmi_mac2[WG_MAC_LEN];
334 } __packed;
335
336 /* [W] 5.4.3 Second Message: Responder to Initiator */
337 struct wg_msg_resp {
338 uint32_t wgmr_type;
339 uint32_t wgmr_sender;
340 uint32_t wgmr_receiver;
341 uint8_t wgmr_ephemeral[WG_EPHEMERAL_KEY_LEN];
342 uint8_t wgmr_empty[0 + WG_AUTHTAG_LEN];
343 uint8_t wgmr_mac1[WG_MAC_LEN];
344 uint8_t wgmr_mac2[WG_MAC_LEN];
345 } __packed;
346
347 /* [W] 5.4.6 Subsequent Messages: Transport Data Messages */
348 struct wg_msg_data {
349 uint32_t wgmd_type;
350 uint32_t wgmd_receiver;
351 uint64_t wgmd_counter;
352 uint32_t wgmd_packet[0];
353 } __packed;
354
355 /* [W] 5.4.7 Under Load: Cookie Reply Message */
356 struct wg_msg_cookie {
357 uint32_t wgmc_type;
358 uint32_t wgmc_receiver;
359 uint8_t wgmc_salt[WG_SALT_LEN];
360 uint8_t wgmc_cookie[WG_COOKIE_LEN + WG_AUTHTAG_LEN];
361 } __packed;
362
363 #define WG_MSG_TYPE_INIT 1
364 #define WG_MSG_TYPE_RESP 2
365 #define WG_MSG_TYPE_COOKIE 3
366 #define WG_MSG_TYPE_DATA 4
367 #define WG_MSG_TYPE_MAX WG_MSG_TYPE_DATA
368
369 /* Sliding windows */
370
371 #define SLIWIN_BITS 2048u
372 #define SLIWIN_TYPE uint32_t
373 #define SLIWIN_BPW NBBY*sizeof(SLIWIN_TYPE)
374 #define SLIWIN_WORDS howmany(SLIWIN_BITS, SLIWIN_BPW)
375 #define SLIWIN_NPKT (SLIWIN_BITS - NBBY*sizeof(SLIWIN_TYPE))
376
377 struct sliwin {
378 SLIWIN_TYPE B[SLIWIN_WORDS];
379 uint64_t T;
380 };
381
382 static void
383 sliwin_reset(struct sliwin *W)
384 {
385
386 memset(W, 0, sizeof(*W));
387 }
388
389 static int
390 sliwin_check_fast(const volatile struct sliwin *W, uint64_t S)
391 {
392
393 /*
394 * If it's more than one window older than the highest sequence
395 * number we've seen, reject.
396 */
397 #ifdef __HAVE_ATOMIC64_LOADSTORE
398 if (S + SLIWIN_NPKT < atomic_load_relaxed(&W->T))
399 return EAUTH;
400 #endif
401
402 /*
403 * Otherwise, we need to take the lock to decide, so don't
404 * reject just yet. Caller must serialize a call to
405 * sliwin_update in this case.
406 */
407 return 0;
408 }
409
410 static int
411 sliwin_update(struct sliwin *W, uint64_t S)
412 {
413 unsigned word, bit;
414
415 /*
416 * If it's more than one window older than the highest sequence
417 * number we've seen, reject.
418 */
419 if (S + SLIWIN_NPKT < W->T)
420 return EAUTH;
421
422 /*
423 * If it's higher than the highest sequence number we've seen,
424 * advance the window.
425 */
426 if (S > W->T) {
427 uint64_t i = W->T / SLIWIN_BPW;
428 uint64_t j = S / SLIWIN_BPW;
429 unsigned k;
430
431 for (k = 0; k < MIN(j - i, SLIWIN_WORDS); k++)
432 W->B[(i + k + 1) % SLIWIN_WORDS] = 0;
433 #ifdef __HAVE_ATOMIC64_LOADSTORE
434 atomic_store_relaxed(&W->T, S);
435 #else
436 W->T = S;
437 #endif
438 }
439
440 /* Test and set the bit -- if already set, reject. */
441 word = (S / SLIWIN_BPW) % SLIWIN_WORDS;
442 bit = S % SLIWIN_BPW;
443 if (W->B[word] & (1UL << bit))
444 return EAUTH;
445 W->B[word] |= 1UL << bit;
446
447 /* Accept! */
448 return 0;
449 }
450
451 struct wg_session {
452 struct wg_peer *wgs_peer;
453 struct psref_target
454 wgs_psref;
455
456 int wgs_state;
457 #define WGS_STATE_UNKNOWN 0
458 #define WGS_STATE_INIT_ACTIVE 1
459 #define WGS_STATE_INIT_PASSIVE 2
460 #define WGS_STATE_ESTABLISHED 3
461 #define WGS_STATE_DESTROYING 4
462
463 time_t wgs_time_established;
464 time_t wgs_time_last_data_sent;
465 bool wgs_is_initiator;
466
467 uint32_t wgs_local_index;
468 uint32_t wgs_remote_index;
469 #ifdef __HAVE_ATOMIC64_LOADSTORE
470 volatile uint64_t
471 wgs_send_counter;
472 #else
473 kmutex_t wgs_send_counter_lock;
474 uint64_t wgs_send_counter;
475 #endif
476
477 struct {
478 kmutex_t lock;
479 struct sliwin window;
480 } *wgs_recvwin;
481
482 uint8_t wgs_handshake_hash[WG_HASH_LEN];
483 uint8_t wgs_chaining_key[WG_CHAINING_KEY_LEN];
484 uint8_t wgs_ephemeral_key_pub[WG_EPHEMERAL_KEY_LEN];
485 uint8_t wgs_ephemeral_key_priv[WG_EPHEMERAL_KEY_LEN];
486 uint8_t wgs_ephemeral_key_peer[WG_EPHEMERAL_KEY_LEN];
487 uint8_t wgs_tkey_send[WG_DATA_KEY_LEN];
488 uint8_t wgs_tkey_recv[WG_DATA_KEY_LEN];
489 };
490
491 struct wg_sockaddr {
492 union {
493 struct sockaddr_storage _ss;
494 struct sockaddr _sa;
495 struct sockaddr_in _sin;
496 struct sockaddr_in6 _sin6;
497 };
498 struct psref_target wgsa_psref;
499 };
500
501 #define wgsatoss(wgsa) (&(wgsa)->_ss)
502 #define wgsatosa(wgsa) (&(wgsa)->_sa)
503 #define wgsatosin(wgsa) (&(wgsa)->_sin)
504 #define wgsatosin6(wgsa) (&(wgsa)->_sin6)
505
506 #define wgsa_family(wgsa) (wgsatosa(wgsa)->sa_family)
507
508 struct wg_peer;
509 struct wg_allowedip {
510 struct radix_node wga_nodes[2];
511 struct wg_sockaddr _wga_sa_addr;
512 struct wg_sockaddr _wga_sa_mask;
513 #define wga_sa_addr _wga_sa_addr._sa
514 #define wga_sa_mask _wga_sa_mask._sa
515
516 int wga_family;
517 uint8_t wga_cidr;
518 union {
519 struct in_addr _ip4;
520 struct in6_addr _ip6;
521 } wga_addr;
522 #define wga_addr4 wga_addr._ip4
523 #define wga_addr6 wga_addr._ip6
524
525 struct wg_peer *wga_peer;
526 };
527
528 typedef uint8_t wg_timestamp_t[WG_TIMESTAMP_LEN];
529
530 struct wg_ppsratecheck {
531 struct timeval wgprc_lasttime;
532 int wgprc_curpps;
533 };
534
535 struct wg_softc;
536 struct wg_peer {
537 struct wg_softc *wgp_sc;
538 char wgp_name[WG_PEER_NAME_MAXLEN + 1];
539 struct pslist_entry wgp_peerlist_entry;
540 pserialize_t wgp_psz;
541 struct psref_target wgp_psref;
542 kmutex_t *wgp_lock;
543 kmutex_t *wgp_intr_lock;
544
545 uint8_t wgp_pubkey[WG_STATIC_KEY_LEN];
546 struct wg_sockaddr *wgp_endpoint;
547 struct wg_sockaddr *wgp_endpoint0;
548 volatile unsigned wgp_endpoint_changing;
549 bool wgp_endpoint_available;
550
551 /* The preshared key (optional) */
552 uint8_t wgp_psk[WG_PRESHARED_KEY_LEN];
553
554 struct wg_session *wgp_session_stable;
555 struct wg_session *wgp_session_unstable;
556
557 /* first outgoing packet awaiting session initiation */
558 struct mbuf *wgp_pending;
559
560 /* timestamp in big-endian */
561 wg_timestamp_t wgp_timestamp_latest_init;
562
563 struct timespec wgp_last_handshake_time;
564
565 callout_t wgp_rekey_timer;
566 callout_t wgp_handshake_timeout_timer;
567 callout_t wgp_session_dtor_timer;
568
569 time_t wgp_handshake_start_time;
570
571 int wgp_n_allowedips;
572 struct wg_allowedip wgp_allowedips[WG_ALLOWEDIPS];
573
574 time_t wgp_latest_cookie_time;
575 uint8_t wgp_latest_cookie[WG_COOKIE_LEN];
576 uint8_t wgp_last_sent_mac1[WG_MAC_LEN];
577 bool wgp_last_sent_mac1_valid;
578 uint8_t wgp_last_sent_cookie[WG_COOKIE_LEN];
579 bool wgp_last_sent_cookie_valid;
580
581 time_t wgp_last_msg_received_time[WG_MSG_TYPE_MAX];
582
583 time_t wgp_last_genrandval_time;
584 uint32_t wgp_randval;
585
586 struct wg_ppsratecheck wgp_ppsratecheck;
587
588 struct work wgp_work;
589 unsigned int wgp_tasks;
590 #define WGP_TASK_SEND_INIT_MESSAGE __BIT(0)
591 #define WGP_TASK_RETRY_HANDSHAKE __BIT(1)
592 #define WGP_TASK_ESTABLISH_SESSION __BIT(2)
593 #define WGP_TASK_ENDPOINT_CHANGED __BIT(3)
594 #define WGP_TASK_SEND_KEEPALIVE_MESSAGE __BIT(4)
595 #define WGP_TASK_DESTROY_PREV_SESSION __BIT(5)
596 };
597
598 struct wg_ops;
599
600 struct wg_softc {
601 struct ifnet wg_if;
602 LIST_ENTRY(wg_softc) wg_list;
603 kmutex_t *wg_lock;
604 kmutex_t *wg_intr_lock;
605 krwlock_t *wg_rwlock;
606
607 uint8_t wg_privkey[WG_STATIC_KEY_LEN];
608 uint8_t wg_pubkey[WG_STATIC_KEY_LEN];
609
610 int wg_npeers;
611 struct pslist_head wg_peers;
612 struct thmap *wg_peers_bypubkey;
613 struct thmap *wg_peers_byname;
614 struct thmap *wg_sessions_byindex;
615 uint16_t wg_listen_port;
616
617 struct threadpool *wg_threadpool;
618
619 struct threadpool_job wg_job;
620 int wg_upcalls;
621 #define WG_UPCALL_INET __BIT(0)
622 #define WG_UPCALL_INET6 __BIT(1)
623
624 #ifdef INET
625 struct socket *wg_so4;
626 struct radix_node_head *wg_rtable_ipv4;
627 #endif
628 #ifdef INET6
629 struct socket *wg_so6;
630 struct radix_node_head *wg_rtable_ipv6;
631 #endif
632
633 struct wg_ppsratecheck wg_ppsratecheck;
634
635 struct wg_ops *wg_ops;
636
637 #ifdef WG_RUMPKERNEL
638 struct wg_user *wg_user;
639 #endif
640 };
641
642 /* [W] 6.1 Preliminaries */
643 #define WG_REKEY_AFTER_MESSAGES (1ULL << 60)
644 #define WG_REJECT_AFTER_MESSAGES (UINT64_MAX - (1 << 13))
645 #define WG_REKEY_AFTER_TIME 120
646 #define WG_REJECT_AFTER_TIME 180
647 #define WG_REKEY_ATTEMPT_TIME 90
648 #define WG_REKEY_TIMEOUT 5
649 #define WG_KEEPALIVE_TIMEOUT 10
650
651 #define WG_COOKIE_TIME 120
652 #define WG_RANDVAL_TIME (2 * 60)
653
654 static uint64_t wg_rekey_after_messages = WG_REKEY_AFTER_MESSAGES;
655 static uint64_t wg_reject_after_messages = WG_REJECT_AFTER_MESSAGES;
656 static unsigned wg_rekey_after_time = WG_REKEY_AFTER_TIME;
657 static unsigned wg_reject_after_time = WG_REJECT_AFTER_TIME;
658 static unsigned wg_rekey_attempt_time = WG_REKEY_ATTEMPT_TIME;
659 static unsigned wg_rekey_timeout = WG_REKEY_TIMEOUT;
660 static unsigned wg_keepalive_timeout = WG_KEEPALIVE_TIMEOUT;
661
662 static struct mbuf *
663 wg_get_mbuf(size_t, size_t);
664
665 static int wg_send_data_msg(struct wg_peer *, struct wg_session *,
666 struct mbuf *);
667 static int wg_send_cookie_msg(struct wg_softc *, struct wg_peer *,
668 const uint32_t, const uint8_t [], const struct sockaddr *);
669 static int wg_send_handshake_msg_resp(struct wg_softc *, struct wg_peer *,
670 struct wg_session *, const struct wg_msg_init *);
671 static void wg_send_keepalive_msg(struct wg_peer *, struct wg_session *);
672
673 static struct wg_peer *
674 wg_pick_peer_by_sa(struct wg_softc *, const struct sockaddr *,
675 struct psref *);
676 static struct wg_peer *
677 wg_lookup_peer_by_pubkey(struct wg_softc *,
678 const uint8_t [], struct psref *);
679
680 static struct wg_session *
681 wg_lookup_session_by_index(struct wg_softc *,
682 const uint32_t, struct psref *);
683
684 static void wg_update_endpoint_if_necessary(struct wg_peer *,
685 const struct sockaddr *);
686
687 static void wg_schedule_rekey_timer(struct wg_peer *);
688 static void wg_schedule_session_dtor_timer(struct wg_peer *);
689
690 static bool wg_is_underload(struct wg_softc *, struct wg_peer *, int);
691 static void wg_calculate_keys(struct wg_session *, const bool);
692
693 static void wg_clear_states(struct wg_session *);
694
695 static void wg_get_peer(struct wg_peer *, struct psref *);
696 static void wg_put_peer(struct wg_peer *, struct psref *);
697
698 static int wg_send_so(struct wg_peer *, struct mbuf *);
699 static int wg_send_udp(struct wg_peer *, struct mbuf *);
700 static int wg_output(struct ifnet *, struct mbuf *,
701 const struct sockaddr *, const struct rtentry *);
702 static void wg_input(struct ifnet *, struct mbuf *, const int);
703 static int wg_ioctl(struct ifnet *, u_long, void *);
704 static int wg_bind_port(struct wg_softc *, const uint16_t);
705 static int wg_init(struct ifnet *);
706 #ifdef ALTQ
707 static void wg_start(struct ifnet *);
708 #endif
709 static void wg_stop(struct ifnet *, int);
710
711 static void wg_peer_work(struct work *, void *);
712 static void wg_job(struct threadpool_job *);
713 static void wgintr(void *);
714 static void wg_purge_pending_packets(struct wg_peer *);
715
716 static int wg_clone_create(struct if_clone *, int);
717 static int wg_clone_destroy(struct ifnet *);
718
719 struct wg_ops {
720 int (*send_hs_msg)(struct wg_peer *, struct mbuf *);
721 int (*send_data_msg)(struct wg_peer *, struct mbuf *);
722 void (*input)(struct ifnet *, struct mbuf *, const int);
723 int (*bind_port)(struct wg_softc *, const uint16_t);
724 };
725
726 struct wg_ops wg_ops_rumpkernel = {
727 .send_hs_msg = wg_send_so,
728 .send_data_msg = wg_send_udp,
729 .input = wg_input,
730 .bind_port = wg_bind_port,
731 };
732
733 #ifdef WG_RUMPKERNEL
734 static bool wg_user_mode(struct wg_softc *);
735 static int wg_ioctl_linkstr(struct wg_softc *, struct ifdrv *);
736
737 static int wg_send_user(struct wg_peer *, struct mbuf *);
738 static void wg_input_user(struct ifnet *, struct mbuf *, const int);
739 static int wg_bind_port_user(struct wg_softc *, const uint16_t);
740
741 struct wg_ops wg_ops_rumpuser = {
742 .send_hs_msg = wg_send_user,
743 .send_data_msg = wg_send_user,
744 .input = wg_input_user,
745 .bind_port = wg_bind_port_user,
746 };
747 #endif
748
749 #define WG_PEER_READER_FOREACH(wgp, wg) \
750 PSLIST_READER_FOREACH((wgp), &(wg)->wg_peers, struct wg_peer, \
751 wgp_peerlist_entry)
752 #define WG_PEER_WRITER_FOREACH(wgp, wg) \
753 PSLIST_WRITER_FOREACH((wgp), &(wg)->wg_peers, struct wg_peer, \
754 wgp_peerlist_entry)
755 #define WG_PEER_WRITER_INSERT_HEAD(wgp, wg) \
756 PSLIST_WRITER_INSERT_HEAD(&(wg)->wg_peers, (wgp), wgp_peerlist_entry)
757 #define WG_PEER_WRITER_REMOVE(wgp) \
758 PSLIST_WRITER_REMOVE((wgp), wgp_peerlist_entry)
759
760 struct wg_route {
761 struct radix_node wgr_nodes[2];
762 struct wg_peer *wgr_peer;
763 };
764
765 static struct radix_node_head *
766 wg_rnh(struct wg_softc *wg, const int family)
767 {
768
769 switch (family) {
770 case AF_INET:
771 return wg->wg_rtable_ipv4;
772 #ifdef INET6
773 case AF_INET6:
774 return wg->wg_rtable_ipv6;
775 #endif
776 default:
777 return NULL;
778 }
779 }
780
781
782 /*
783 * Global variables
784 */
785 static volatile unsigned wg_count __cacheline_aligned;
786
787 struct psref_class *wg_psref_class __read_mostly;
788
789 static struct if_clone wg_cloner =
790 IF_CLONE_INITIALIZER("wg", wg_clone_create, wg_clone_destroy);
791
792 static struct pktqueue *wg_pktq __read_mostly;
793 static struct workqueue *wg_wq __read_mostly;
794
795 void wgattach(int);
796 /* ARGSUSED */
797 void
798 wgattach(int count)
799 {
800 /*
801 * Nothing to do here, initialization is handled by the
802 * module initialization code in wginit() below).
803 */
804 }
805
806 static void
807 wginit(void)
808 {
809
810 wg_psref_class = psref_class_create("wg", IPL_SOFTNET);
811
812 if_clone_attach(&wg_cloner);
813 }
814
815 /*
816 * XXX Kludge: This should just happen in wginit, but workqueue_create
817 * cannot be run until after CPUs have been detected, and wginit runs
818 * before configure.
819 */
820 static int
821 wginitqueues(void)
822 {
823 int error __diagused;
824
825 wg_pktq = pktq_create(IFQ_MAXLEN, wgintr, NULL);
826 KASSERT(wg_pktq != NULL);
827
828 error = workqueue_create(&wg_wq, "wgpeer", wg_peer_work, NULL,
829 PRI_NONE, IPL_SOFTNET, WQ_MPSAFE|WQ_PERCPU);
830 KASSERT(error == 0);
831
832 return 0;
833 }
834
835 static void
836 wg_guarantee_initialized(void)
837 {
838 static ONCE_DECL(init);
839 int error __diagused;
840
841 error = RUN_ONCE(&init, wginitqueues);
842 KASSERT(error == 0);
843 }
844
845 static int
846 wg_count_inc(void)
847 {
848 unsigned o, n;
849
850 do {
851 o = atomic_load_relaxed(&wg_count);
852 if (o == UINT_MAX)
853 return ENFILE;
854 n = o + 1;
855 } while (atomic_cas_uint(&wg_count, o, n) != o);
856
857 return 0;
858 }
859
860 static void
861 wg_count_dec(void)
862 {
863 unsigned c __diagused;
864
865 c = atomic_dec_uint_nv(&wg_count);
866 KASSERT(c != UINT_MAX);
867 }
868
869 static int
870 wgdetach(void)
871 {
872
873 /* Prevent new interface creation. */
874 if_clone_detach(&wg_cloner);
875
876 /* Check whether there are any existing interfaces. */
877 if (atomic_load_relaxed(&wg_count)) {
878 /* Back out -- reattach the cloner. */
879 if_clone_attach(&wg_cloner);
880 return EBUSY;
881 }
882
883 /* No interfaces left. Nuke it. */
884 workqueue_destroy(wg_wq);
885 pktq_destroy(wg_pktq);
886 psref_class_destroy(wg_psref_class);
887
888 return 0;
889 }
890
891 static void
892 wg_init_key_and_hash(uint8_t ckey[WG_CHAINING_KEY_LEN],
893 uint8_t hash[WG_HASH_LEN])
894 {
895 /* [W] 5.4: CONSTRUCTION */
896 const char *signature = "Noise_IKpsk2_25519_ChaChaPoly_BLAKE2s";
897 /* [W] 5.4: IDENTIFIER */
898 const char *id = "WireGuard v1 zx2c4 Jason (at) zx2c4.com";
899 struct blake2s state;
900
901 blake2s(ckey, WG_CHAINING_KEY_LEN, NULL, 0,
902 signature, strlen(signature));
903
904 CTASSERT(WG_HASH_LEN == WG_CHAINING_KEY_LEN);
905 memcpy(hash, ckey, WG_CHAINING_KEY_LEN);
906
907 blake2s_init(&state, WG_HASH_LEN, NULL, 0);
908 blake2s_update(&state, ckey, WG_CHAINING_KEY_LEN);
909 blake2s_update(&state, id, strlen(id));
910 blake2s_final(&state, hash);
911
912 WG_DUMP_HASH("ckey", ckey);
913 WG_DUMP_HASH("hash", hash);
914 }
915
916 static void
917 wg_algo_hash(uint8_t hash[WG_HASH_LEN], const uint8_t input[],
918 const size_t inputsize)
919 {
920 struct blake2s state;
921
922 blake2s_init(&state, WG_HASH_LEN, NULL, 0);
923 blake2s_update(&state, hash, WG_HASH_LEN);
924 blake2s_update(&state, input, inputsize);
925 blake2s_final(&state, hash);
926 }
927
928 static void
929 wg_algo_mac(uint8_t out[], const size_t outsize,
930 const uint8_t key[], const size_t keylen,
931 const uint8_t input1[], const size_t input1len,
932 const uint8_t input2[], const size_t input2len)
933 {
934 struct blake2s state;
935
936 blake2s_init(&state, outsize, key, keylen);
937
938 blake2s_update(&state, input1, input1len);
939 if (input2 != NULL)
940 blake2s_update(&state, input2, input2len);
941 blake2s_final(&state, out);
942 }
943
944 static void
945 wg_algo_mac_mac1(uint8_t out[], const size_t outsize,
946 const uint8_t input1[], const size_t input1len,
947 const uint8_t input2[], const size_t input2len)
948 {
949 struct blake2s state;
950 /* [W] 5.4: LABEL-MAC1 */
951 const char *label = "mac1----";
952 uint8_t key[WG_HASH_LEN];
953
954 blake2s_init(&state, sizeof(key), NULL, 0);
955 blake2s_update(&state, label, strlen(label));
956 blake2s_update(&state, input1, input1len);
957 blake2s_final(&state, key);
958
959 blake2s_init(&state, outsize, key, sizeof(key));
960 if (input2 != NULL)
961 blake2s_update(&state, input2, input2len);
962 blake2s_final(&state, out);
963 }
964
965 static void
966 wg_algo_mac_cookie(uint8_t out[], const size_t outsize,
967 const uint8_t input1[], const size_t input1len)
968 {
969 struct blake2s state;
970 /* [W] 5.4: LABEL-COOKIE */
971 const char *label = "cookie--";
972
973 blake2s_init(&state, outsize, NULL, 0);
974 blake2s_update(&state, label, strlen(label));
975 blake2s_update(&state, input1, input1len);
976 blake2s_final(&state, out);
977 }
978
979 static void
980 wg_algo_generate_keypair(uint8_t pubkey[WG_EPHEMERAL_KEY_LEN],
981 uint8_t privkey[WG_EPHEMERAL_KEY_LEN])
982 {
983
984 CTASSERT(WG_EPHEMERAL_KEY_LEN == crypto_scalarmult_curve25519_BYTES);
985
986 cprng_strong(kern_cprng, privkey, WG_EPHEMERAL_KEY_LEN, 0);
987 crypto_scalarmult_base(pubkey, privkey);
988 }
989
990 static void
991 wg_algo_dh(uint8_t out[WG_DH_OUTPUT_LEN],
992 const uint8_t privkey[WG_STATIC_KEY_LEN],
993 const uint8_t pubkey[WG_STATIC_KEY_LEN])
994 {
995
996 CTASSERT(WG_STATIC_KEY_LEN == crypto_scalarmult_curve25519_BYTES);
997
998 int ret __diagused = crypto_scalarmult(out, privkey, pubkey);
999 KASSERT(ret == 0);
1000 }
1001
1002 static void
1003 wg_algo_hmac(uint8_t out[], const size_t outlen,
1004 const uint8_t key[], const size_t keylen,
1005 const uint8_t in[], const size_t inlen)
1006 {
1007 #define IPAD 0x36
1008 #define OPAD 0x5c
1009 uint8_t hmackey[HMAC_BLOCK_LEN] = {0};
1010 uint8_t ipad[HMAC_BLOCK_LEN];
1011 uint8_t opad[HMAC_BLOCK_LEN];
1012 int i;
1013 struct blake2s state;
1014
1015 KASSERT(outlen == WG_HASH_LEN);
1016 KASSERT(keylen <= HMAC_BLOCK_LEN);
1017
1018 memcpy(hmackey, key, keylen);
1019
1020 for (i = 0; i < sizeof(hmackey); i++) {
1021 ipad[i] = hmackey[i] ^ IPAD;
1022 opad[i] = hmackey[i] ^ OPAD;
1023 }
1024
1025 blake2s_init(&state, WG_HASH_LEN, NULL, 0);
1026 blake2s_update(&state, ipad, sizeof(ipad));
1027 blake2s_update(&state, in, inlen);
1028 blake2s_final(&state, out);
1029
1030 blake2s_init(&state, WG_HASH_LEN, NULL, 0);
1031 blake2s_update(&state, opad, sizeof(opad));
1032 blake2s_update(&state, out, WG_HASH_LEN);
1033 blake2s_final(&state, out);
1034 #undef IPAD
1035 #undef OPAD
1036 }
1037
1038 static void
1039 wg_algo_kdf(uint8_t out1[WG_KDF_OUTPUT_LEN], uint8_t out2[WG_KDF_OUTPUT_LEN],
1040 uint8_t out3[WG_KDF_OUTPUT_LEN], const uint8_t ckey[WG_CHAINING_KEY_LEN],
1041 const uint8_t input[], const size_t inputlen)
1042 {
1043 uint8_t tmp1[WG_KDF_OUTPUT_LEN], tmp2[WG_KDF_OUTPUT_LEN + 1];
1044 uint8_t one[1];
1045
1046 /*
1047 * [N] 4.3: "an input_key_material byte sequence with length
1048 * either zero bytes, 32 bytes, or DHLEN bytes."
1049 */
1050 KASSERT(inputlen == 0 || inputlen == 32 || inputlen == NOISE_DHLEN);
1051
1052 WG_DUMP_HASH("ckey", ckey);
1053 if (input != NULL)
1054 WG_DUMP_HASH("input", input);
1055 wg_algo_hmac(tmp1, sizeof(tmp1), ckey, WG_CHAINING_KEY_LEN,
1056 input, inputlen);
1057 WG_DUMP_HASH("tmp1", tmp1);
1058 one[0] = 1;
1059 wg_algo_hmac(out1, WG_KDF_OUTPUT_LEN, tmp1, sizeof(tmp1),
1060 one, sizeof(one));
1061 WG_DUMP_HASH("out1", out1);
1062 if (out2 == NULL)
1063 return;
1064 memcpy(tmp2, out1, WG_KDF_OUTPUT_LEN);
1065 tmp2[WG_KDF_OUTPUT_LEN] = 2;
1066 wg_algo_hmac(out2, WG_KDF_OUTPUT_LEN, tmp1, sizeof(tmp1),
1067 tmp2, sizeof(tmp2));
1068 WG_DUMP_HASH("out2", out2);
1069 if (out3 == NULL)
1070 return;
1071 memcpy(tmp2, out2, WG_KDF_OUTPUT_LEN);
1072 tmp2[WG_KDF_OUTPUT_LEN] = 3;
1073 wg_algo_hmac(out3, WG_KDF_OUTPUT_LEN, tmp1, sizeof(tmp1),
1074 tmp2, sizeof(tmp2));
1075 WG_DUMP_HASH("out3", out3);
1076 }
1077
1078 static void
1079 wg_algo_dh_kdf(uint8_t ckey[WG_CHAINING_KEY_LEN],
1080 uint8_t cipher_key[WG_CIPHER_KEY_LEN],
1081 const uint8_t local_key[WG_STATIC_KEY_LEN],
1082 const uint8_t remote_key[WG_STATIC_KEY_LEN])
1083 {
1084 uint8_t dhout[WG_DH_OUTPUT_LEN];
1085
1086 wg_algo_dh(dhout, local_key, remote_key);
1087 wg_algo_kdf(ckey, cipher_key, NULL, ckey, dhout, sizeof(dhout));
1088
1089 WG_DUMP_HASH("dhout", dhout);
1090 WG_DUMP_HASH("ckey", ckey);
1091 if (cipher_key != NULL)
1092 WG_DUMP_HASH("cipher_key", cipher_key);
1093 }
1094
1095 static void
1096 wg_algo_aead_enc(uint8_t out[], size_t expected_outsize, const uint8_t key[],
1097 const uint64_t counter, const uint8_t plain[], const size_t plainsize,
1098 const uint8_t auth[], size_t authlen)
1099 {
1100 uint8_t nonce[(32 + 64) / 8] = {0};
1101 long long unsigned int outsize;
1102 int error __diagused;
1103
1104 le64enc(&nonce[4], counter);
1105
1106 error = crypto_aead_chacha20poly1305_ietf_encrypt(out, &outsize, plain,
1107 plainsize, auth, authlen, NULL, nonce, key);
1108 KASSERT(error == 0);
1109 KASSERT(outsize == expected_outsize);
1110 }
1111
1112 static int
1113 wg_algo_aead_dec(uint8_t out[], size_t expected_outsize, const uint8_t key[],
1114 const uint64_t counter, const uint8_t encrypted[],
1115 const size_t encryptedsize, const uint8_t auth[], size_t authlen)
1116 {
1117 uint8_t nonce[(32 + 64) / 8] = {0};
1118 long long unsigned int outsize;
1119 int error;
1120
1121 le64enc(&nonce[4], counter);
1122
1123 error = crypto_aead_chacha20poly1305_ietf_decrypt(out, &outsize, NULL,
1124 encrypted, encryptedsize, auth, authlen, nonce, key);
1125 if (error == 0)
1126 KASSERT(outsize == expected_outsize);
1127 return error;
1128 }
1129
1130 static void
1131 wg_algo_xaead_enc(uint8_t out[], const size_t expected_outsize,
1132 const uint8_t key[], const uint8_t plain[], const size_t plainsize,
1133 const uint8_t auth[], size_t authlen,
1134 const uint8_t nonce[WG_SALT_LEN])
1135 {
1136 long long unsigned int outsize;
1137 int error __diagused;
1138
1139 CTASSERT(WG_SALT_LEN == crypto_aead_xchacha20poly1305_ietf_NPUBBYTES);
1140 error = crypto_aead_xchacha20poly1305_ietf_encrypt(out, &outsize,
1141 plain, plainsize, auth, authlen, NULL, nonce, key);
1142 KASSERT(error == 0);
1143 KASSERT(outsize == expected_outsize);
1144 }
1145
1146 static int
1147 wg_algo_xaead_dec(uint8_t out[], const size_t expected_outsize,
1148 const uint8_t key[], const uint8_t encrypted[], const size_t encryptedsize,
1149 const uint8_t auth[], size_t authlen,
1150 const uint8_t nonce[WG_SALT_LEN])
1151 {
1152 long long unsigned int outsize;
1153 int error;
1154
1155 error = crypto_aead_xchacha20poly1305_ietf_decrypt(out, &outsize, NULL,
1156 encrypted, encryptedsize, auth, authlen, nonce, key);
1157 if (error == 0)
1158 KASSERT(outsize == expected_outsize);
1159 return error;
1160 }
1161
1162 static void
1163 wg_algo_tai64n(wg_timestamp_t timestamp)
1164 {
1165 struct timespec ts;
1166
1167 /* FIXME strict TAI64N (https://cr.yp.to/libtai/tai64.html) */
1168 getnanotime(&ts);
1169 /* TAI64 label in external TAI64 format */
1170 be32enc(timestamp, 0x40000000UL + (ts.tv_sec >> 32));
1171 /* second beginning from 1970 TAI */
1172 be32enc(timestamp + 4, ts.tv_sec & 0xffffffffU);
1173 /* nanosecond in big-endian format */
1174 be32enc(timestamp + 8, ts.tv_nsec);
1175 }
1176
1177 /*
1178 * wg_get_stable_session(wgp, psref)
1179 *
1180 * Get a passive reference to the current stable session, or
1181 * return NULL if there is no current stable session.
1182 *
1183 * The pointer is always there but the session is not necessarily
1184 * ESTABLISHED; if it is not ESTABLISHED, return NULL. However,
1185 * the session may transition from ESTABLISHED to DESTROYING while
1186 * holding the passive reference.
1187 */
1188 static struct wg_session *
1189 wg_get_stable_session(struct wg_peer *wgp, struct psref *psref)
1190 {
1191 int s;
1192 struct wg_session *wgs;
1193
1194 s = pserialize_read_enter();
1195 wgs = atomic_load_consume(&wgp->wgp_session_stable);
1196 if (__predict_false(wgs->wgs_state != WGS_STATE_ESTABLISHED))
1197 wgs = NULL;
1198 else
1199 psref_acquire(psref, &wgs->wgs_psref, wg_psref_class);
1200 pserialize_read_exit(s);
1201
1202 return wgs;
1203 }
1204
1205 static void
1206 wg_put_session(struct wg_session *wgs, struct psref *psref)
1207 {
1208
1209 psref_release(psref, &wgs->wgs_psref, wg_psref_class);
1210 }
1211
1212 static void
1213 wg_destroy_session(struct wg_softc *wg, struct wg_session *wgs)
1214 {
1215 struct wg_peer *wgp = wgs->wgs_peer;
1216 struct wg_session *wgs0 __diagused;
1217 void *garbage;
1218
1219 KASSERT(mutex_owned(wgp->wgp_lock));
1220 KASSERT(wgs->wgs_state != WGS_STATE_UNKNOWN);
1221
1222 /* Remove the session from the table. */
1223 wgs0 = thmap_del(wg->wg_sessions_byindex,
1224 &wgs->wgs_local_index, sizeof(wgs->wgs_local_index));
1225 KASSERT(wgs0 == wgs);
1226 garbage = thmap_stage_gc(wg->wg_sessions_byindex);
1227
1228 /* Wait for passive references to drain. */
1229 pserialize_perform(wgp->wgp_psz);
1230 psref_target_destroy(&wgs->wgs_psref, wg_psref_class);
1231
1232 /* Free memory, zero state, and transition to UNKNOWN. */
1233 thmap_gc(wg->wg_sessions_byindex, garbage);
1234 wg_clear_states(wgs);
1235 wgs->wgs_state = WGS_STATE_UNKNOWN;
1236 }
1237
1238 /*
1239 * wg_get_session_index(wg, wgs)
1240 *
1241 * Choose a session index for wgs->wgs_local_index, and store it
1242 * in wg's table of sessions by index.
1243 *
1244 * wgs must be the unstable session of its peer, and must be
1245 * transitioning out of the UNKNOWN state.
1246 */
1247 static void
1248 wg_get_session_index(struct wg_softc *wg, struct wg_session *wgs)
1249 {
1250 struct wg_peer *wgp __diagused = wgs->wgs_peer;
1251 struct wg_session *wgs0;
1252 uint32_t index;
1253
1254 KASSERT(mutex_owned(wgp->wgp_lock));
1255 KASSERT(wgs == wgp->wgp_session_unstable);
1256 KASSERT(wgs->wgs_state == WGS_STATE_UNKNOWN);
1257
1258 do {
1259 /* Pick a uniform random index. */
1260 index = cprng_strong32();
1261
1262 /* Try to take it. */
1263 wgs->wgs_local_index = index;
1264 wgs0 = thmap_put(wg->wg_sessions_byindex,
1265 &wgs->wgs_local_index, sizeof wgs->wgs_local_index, wgs);
1266
1267 /* If someone else beat us, start over. */
1268 } while (__predict_false(wgs0 != wgs));
1269 }
1270
1271 /*
1272 * wg_put_session_index(wg, wgs)
1273 *
1274 * Remove wgs from the table of sessions by index, wait for any
1275 * passive references to drain, and transition the session to the
1276 * UNKNOWN state.
1277 *
1278 * wgs must be the unstable session of its peer, and must not be
1279 * UNKNOWN or ESTABLISHED.
1280 */
1281 static void
1282 wg_put_session_index(struct wg_softc *wg, struct wg_session *wgs)
1283 {
1284 struct wg_peer *wgp __diagused = wgs->wgs_peer;
1285
1286 KASSERT(mutex_owned(wgp->wgp_lock));
1287 KASSERT(wgs == wgp->wgp_session_unstable);
1288 KASSERT(wgs->wgs_state != WGS_STATE_UNKNOWN);
1289 KASSERT(wgs->wgs_state != WGS_STATE_ESTABLISHED);
1290
1291 wg_destroy_session(wg, wgs);
1292 psref_target_init(&wgs->wgs_psref, wg_psref_class);
1293 }
1294
1295 /*
1296 * Handshake patterns
1297 *
1298 * [W] 5: "These messages use the "IK" pattern from Noise"
1299 * [N] 7.5. Interactive handshake patterns (fundamental)
1300 * "The first character refers to the initiators static key:"
1301 * "I = Static key for initiator Immediately transmitted to responder,
1302 * despite reduced or absent identity hiding"
1303 * "The second character refers to the responders static key:"
1304 * "K = Static key for responder Known to initiator"
1305 * "IK:
1306 * <- s
1307 * ...
1308 * -> e, es, s, ss
1309 * <- e, ee, se"
1310 * [N] 9.4. Pattern modifiers
1311 * "IKpsk2:
1312 * <- s
1313 * ...
1314 * -> e, es, s, ss
1315 * <- e, ee, se, psk"
1316 */
1317 static void
1318 wg_fill_msg_init(struct wg_softc *wg, struct wg_peer *wgp,
1319 struct wg_session *wgs, struct wg_msg_init *wgmi)
1320 {
1321 uint8_t ckey[WG_CHAINING_KEY_LEN]; /* [W] 5.4.2: Ci */
1322 uint8_t hash[WG_HASH_LEN]; /* [W] 5.4.2: Hi */
1323 uint8_t cipher_key[WG_CIPHER_KEY_LEN];
1324 uint8_t pubkey[WG_EPHEMERAL_KEY_LEN];
1325 uint8_t privkey[WG_EPHEMERAL_KEY_LEN];
1326
1327 KASSERT(mutex_owned(wgp->wgp_lock));
1328 KASSERT(wgs == wgp->wgp_session_unstable);
1329 KASSERT(wgs->wgs_state == WGS_STATE_INIT_ACTIVE);
1330
1331 wgmi->wgmi_type = htole32(WG_MSG_TYPE_INIT);
1332 wgmi->wgmi_sender = wgs->wgs_local_index;
1333
1334 /* [W] 5.4.2: First Message: Initiator to Responder */
1335
1336 /* Ci := HASH(CONSTRUCTION) */
1337 /* Hi := HASH(Ci || IDENTIFIER) */
1338 wg_init_key_and_hash(ckey, hash);
1339 /* Hi := HASH(Hi || Sr^pub) */
1340 wg_algo_hash(hash, wgp->wgp_pubkey, sizeof(wgp->wgp_pubkey));
1341
1342 WG_DUMP_HASH("hash", hash);
1343
1344 /* [N] 2.2: "e" */
1345 /* Ei^priv, Ei^pub := DH-GENERATE() */
1346 wg_algo_generate_keypair(pubkey, privkey);
1347 /* Ci := KDF1(Ci, Ei^pub) */
1348 wg_algo_kdf(ckey, NULL, NULL, ckey, pubkey, sizeof(pubkey));
1349 /* msg.ephemeral := Ei^pub */
1350 memcpy(wgmi->wgmi_ephemeral, pubkey, sizeof(wgmi->wgmi_ephemeral));
1351 /* Hi := HASH(Hi || msg.ephemeral) */
1352 wg_algo_hash(hash, pubkey, sizeof(pubkey));
1353
1354 WG_DUMP_HASH("ckey", ckey);
1355 WG_DUMP_HASH("hash", hash);
1356
1357 /* [N] 2.2: "es" */
1358 /* Ci, k := KDF2(Ci, DH(Ei^priv, Sr^pub)) */
1359 wg_algo_dh_kdf(ckey, cipher_key, privkey, wgp->wgp_pubkey);
1360
1361 /* [N] 2.2: "s" */
1362 /* msg.static := AEAD(k, 0, Si^pub, Hi) */
1363 wg_algo_aead_enc(wgmi->wgmi_static, sizeof(wgmi->wgmi_static),
1364 cipher_key, 0, wg->wg_pubkey, sizeof(wg->wg_pubkey),
1365 hash, sizeof(hash));
1366 /* Hi := HASH(Hi || msg.static) */
1367 wg_algo_hash(hash, wgmi->wgmi_static, sizeof(wgmi->wgmi_static));
1368
1369 WG_DUMP_HASH48("wgmi_static", wgmi->wgmi_static);
1370
1371 /* [N] 2.2: "ss" */
1372 /* Ci, k := KDF2(Ci, DH(Si^priv, Sr^pub)) */
1373 wg_algo_dh_kdf(ckey, cipher_key, wg->wg_privkey, wgp->wgp_pubkey);
1374
1375 /* msg.timestamp := AEAD(k, TIMESTAMP(), Hi) */
1376 wg_timestamp_t timestamp;
1377 wg_algo_tai64n(timestamp);
1378 wg_algo_aead_enc(wgmi->wgmi_timestamp, sizeof(wgmi->wgmi_timestamp),
1379 cipher_key, 0, timestamp, sizeof(timestamp), hash, sizeof(hash));
1380 /* Hi := HASH(Hi || msg.timestamp) */
1381 wg_algo_hash(hash, wgmi->wgmi_timestamp, sizeof(wgmi->wgmi_timestamp));
1382
1383 /* [W] 5.4.4 Cookie MACs */
1384 wg_algo_mac_mac1(wgmi->wgmi_mac1, sizeof(wgmi->wgmi_mac1),
1385 wgp->wgp_pubkey, sizeof(wgp->wgp_pubkey),
1386 (const uint8_t *)wgmi, offsetof(struct wg_msg_init, wgmi_mac1));
1387 /* Need mac1 to decrypt a cookie from a cookie message */
1388 memcpy(wgp->wgp_last_sent_mac1, wgmi->wgmi_mac1,
1389 sizeof(wgp->wgp_last_sent_mac1));
1390 wgp->wgp_last_sent_mac1_valid = true;
1391
1392 if (wgp->wgp_latest_cookie_time == 0 ||
1393 (time_uptime - wgp->wgp_latest_cookie_time) >= WG_COOKIE_TIME)
1394 memset(wgmi->wgmi_mac2, 0, sizeof(wgmi->wgmi_mac2));
1395 else {
1396 wg_algo_mac(wgmi->wgmi_mac2, sizeof(wgmi->wgmi_mac2),
1397 wgp->wgp_latest_cookie, WG_COOKIE_LEN,
1398 (const uint8_t *)wgmi,
1399 offsetof(struct wg_msg_init, wgmi_mac2),
1400 NULL, 0);
1401 }
1402
1403 memcpy(wgs->wgs_ephemeral_key_pub, pubkey, sizeof(pubkey));
1404 memcpy(wgs->wgs_ephemeral_key_priv, privkey, sizeof(privkey));
1405 memcpy(wgs->wgs_handshake_hash, hash, sizeof(hash));
1406 memcpy(wgs->wgs_chaining_key, ckey, sizeof(ckey));
1407 WG_DLOG("%s: sender=%x\n", __func__, wgs->wgs_local_index);
1408 }
1409
1410 static void
1411 wg_handle_msg_init(struct wg_softc *wg, const struct wg_msg_init *wgmi,
1412 const struct sockaddr *src)
1413 {
1414 uint8_t ckey[WG_CHAINING_KEY_LEN]; /* [W] 5.4.2: Ci */
1415 uint8_t hash[WG_HASH_LEN]; /* [W] 5.4.2: Hi */
1416 uint8_t cipher_key[WG_CIPHER_KEY_LEN];
1417 uint8_t peer_pubkey[WG_STATIC_KEY_LEN];
1418 struct wg_peer *wgp;
1419 struct wg_session *wgs;
1420 int error, ret;
1421 struct psref psref_peer;
1422 uint8_t mac1[WG_MAC_LEN];
1423
1424 WG_TRACE("init msg received");
1425
1426 wg_algo_mac_mac1(mac1, sizeof(mac1),
1427 wg->wg_pubkey, sizeof(wg->wg_pubkey),
1428 (const uint8_t *)wgmi, offsetof(struct wg_msg_init, wgmi_mac1));
1429
1430 /*
1431 * [W] 5.3: Denial of Service Mitigation & Cookies
1432 * "the responder, ..., must always reject messages with an invalid
1433 * msg.mac1"
1434 */
1435 if (!consttime_memequal(mac1, wgmi->wgmi_mac1, sizeof(mac1))) {
1436 WG_DLOG("mac1 is invalid\n");
1437 return;
1438 }
1439
1440 /*
1441 * [W] 5.4.2: First Message: Initiator to Responder
1442 * "When the responder receives this message, it does the same
1443 * operations so that its final state variables are identical,
1444 * replacing the operands of the DH function to produce equivalent
1445 * values."
1446 * Note that the following comments of operations are just copies of
1447 * the initiator's ones.
1448 */
1449
1450 /* Ci := HASH(CONSTRUCTION) */
1451 /* Hi := HASH(Ci || IDENTIFIER) */
1452 wg_init_key_and_hash(ckey, hash);
1453 /* Hi := HASH(Hi || Sr^pub) */
1454 wg_algo_hash(hash, wg->wg_pubkey, sizeof(wg->wg_pubkey));
1455
1456 /* [N] 2.2: "e" */
1457 /* Ci := KDF1(Ci, Ei^pub) */
1458 wg_algo_kdf(ckey, NULL, NULL, ckey, wgmi->wgmi_ephemeral,
1459 sizeof(wgmi->wgmi_ephemeral));
1460 /* Hi := HASH(Hi || msg.ephemeral) */
1461 wg_algo_hash(hash, wgmi->wgmi_ephemeral, sizeof(wgmi->wgmi_ephemeral));
1462
1463 WG_DUMP_HASH("ckey", ckey);
1464
1465 /* [N] 2.2: "es" */
1466 /* Ci, k := KDF2(Ci, DH(Ei^priv, Sr^pub)) */
1467 wg_algo_dh_kdf(ckey, cipher_key, wg->wg_privkey, wgmi->wgmi_ephemeral);
1468
1469 WG_DUMP_HASH48("wgmi_static", wgmi->wgmi_static);
1470
1471 /* [N] 2.2: "s" */
1472 /* msg.static := AEAD(k, 0, Si^pub, Hi) */
1473 error = wg_algo_aead_dec(peer_pubkey, WG_STATIC_KEY_LEN, cipher_key, 0,
1474 wgmi->wgmi_static, sizeof(wgmi->wgmi_static), hash, sizeof(hash));
1475 if (error != 0) {
1476 WG_LOG_RATECHECK(&wg->wg_ppsratecheck, LOG_DEBUG,
1477 "wg_algo_aead_dec for secret key failed\n");
1478 return;
1479 }
1480 /* Hi := HASH(Hi || msg.static) */
1481 wg_algo_hash(hash, wgmi->wgmi_static, sizeof(wgmi->wgmi_static));
1482
1483 wgp = wg_lookup_peer_by_pubkey(wg, peer_pubkey, &psref_peer);
1484 if (wgp == NULL) {
1485 WG_DLOG("peer not found\n");
1486 return;
1487 }
1488
1489 /*
1490 * Lock the peer to serialize access to cookie state.
1491 *
1492 * XXX Can we safely avoid holding the lock across DH? Take it
1493 * just to verify mac2 and then unlock/DH/lock?
1494 */
1495 mutex_enter(wgp->wgp_lock);
1496
1497 if (__predict_false(wg_is_underload(wg, wgp, WG_MSG_TYPE_INIT))) {
1498 WG_TRACE("under load");
1499 /*
1500 * [W] 5.3: Denial of Service Mitigation & Cookies
1501 * "the responder, ..., and when under load may reject messages
1502 * with an invalid msg.mac2. If the responder receives a
1503 * message with a valid msg.mac1 yet with an invalid msg.mac2,
1504 * and is under load, it may respond with a cookie reply
1505 * message"
1506 */
1507 uint8_t zero[WG_MAC_LEN] = {0};
1508 if (consttime_memequal(wgmi->wgmi_mac2, zero, sizeof(zero))) {
1509 WG_TRACE("sending a cookie message: no cookie included");
1510 (void)wg_send_cookie_msg(wg, wgp, wgmi->wgmi_sender,
1511 wgmi->wgmi_mac1, src);
1512 goto out;
1513 }
1514 if (!wgp->wgp_last_sent_cookie_valid) {
1515 WG_TRACE("sending a cookie message: no cookie sent ever");
1516 (void)wg_send_cookie_msg(wg, wgp, wgmi->wgmi_sender,
1517 wgmi->wgmi_mac1, src);
1518 goto out;
1519 }
1520 uint8_t mac2[WG_MAC_LEN];
1521 wg_algo_mac(mac2, sizeof(mac2), wgp->wgp_last_sent_cookie,
1522 WG_COOKIE_LEN, (const uint8_t *)wgmi,
1523 offsetof(struct wg_msg_init, wgmi_mac2), NULL, 0);
1524 if (!consttime_memequal(mac2, wgmi->wgmi_mac2, sizeof(mac2))) {
1525 WG_DLOG("mac2 is invalid\n");
1526 goto out;
1527 }
1528 WG_TRACE("under load, but continue to sending");
1529 }
1530
1531 /* [N] 2.2: "ss" */
1532 /* Ci, k := KDF2(Ci, DH(Si^priv, Sr^pub)) */
1533 wg_algo_dh_kdf(ckey, cipher_key, wg->wg_privkey, wgp->wgp_pubkey);
1534
1535 /* msg.timestamp := AEAD(k, TIMESTAMP(), Hi) */
1536 wg_timestamp_t timestamp;
1537 error = wg_algo_aead_dec(timestamp, sizeof(timestamp), cipher_key, 0,
1538 wgmi->wgmi_timestamp, sizeof(wgmi->wgmi_timestamp),
1539 hash, sizeof(hash));
1540 if (error != 0) {
1541 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
1542 "wg_algo_aead_dec for timestamp failed\n");
1543 goto out;
1544 }
1545 /* Hi := HASH(Hi || msg.timestamp) */
1546 wg_algo_hash(hash, wgmi->wgmi_timestamp, sizeof(wgmi->wgmi_timestamp));
1547
1548 /*
1549 * [W] 5.1 "The responder keeps track of the greatest timestamp
1550 * received per peer and discards packets containing
1551 * timestamps less than or equal to it."
1552 */
1553 ret = memcmp(timestamp, wgp->wgp_timestamp_latest_init,
1554 sizeof(timestamp));
1555 if (ret <= 0) {
1556 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
1557 "invalid init msg: timestamp is old\n");
1558 goto out;
1559 }
1560 memcpy(wgp->wgp_timestamp_latest_init, timestamp, sizeof(timestamp));
1561
1562 /*
1563 * Message is good -- we're committing to handle it now, unless
1564 * we were already initiating a session.
1565 */
1566 wgs = wgp->wgp_session_unstable;
1567 switch (wgs->wgs_state) {
1568 case WGS_STATE_UNKNOWN: /* new session initiated by peer */
1569 wg_get_session_index(wg, wgs);
1570 break;
1571 case WGS_STATE_INIT_ACTIVE: /* we're already initiating, drop */
1572 WG_TRACE("Session already initializing, ignoring the message");
1573 goto out;
1574 case WGS_STATE_INIT_PASSIVE: /* peer is retrying, start over */
1575 WG_TRACE("Session already initializing, destroying old states");
1576 wg_clear_states(wgs);
1577 /* keep session index */
1578 break;
1579 case WGS_STATE_ESTABLISHED: /* can't happen */
1580 panic("unstable session can't be established");
1581 break;
1582 case WGS_STATE_DESTROYING: /* rekey initiated by peer */
1583 WG_TRACE("Session destroying, but force to clear");
1584 callout_stop(&wgp->wgp_session_dtor_timer);
1585 wg_clear_states(wgs);
1586 /* keep session index */
1587 break;
1588 default:
1589 panic("invalid session state: %d", wgs->wgs_state);
1590 }
1591 wgs->wgs_state = WGS_STATE_INIT_PASSIVE;
1592
1593 memcpy(wgs->wgs_handshake_hash, hash, sizeof(hash));
1594 memcpy(wgs->wgs_chaining_key, ckey, sizeof(ckey));
1595 memcpy(wgs->wgs_ephemeral_key_peer, wgmi->wgmi_ephemeral,
1596 sizeof(wgmi->wgmi_ephemeral));
1597
1598 wg_update_endpoint_if_necessary(wgp, src);
1599
1600 (void)wg_send_handshake_msg_resp(wg, wgp, wgs, wgmi);
1601
1602 wg_calculate_keys(wgs, false);
1603 wg_clear_states(wgs);
1604
1605 out:
1606 mutex_exit(wgp->wgp_lock);
1607 wg_put_peer(wgp, &psref_peer);
1608 }
1609
1610 static struct socket *
1611 wg_get_so_by_af(struct wg_softc *wg, const int af)
1612 {
1613
1614 switch (af) {
1615 #ifdef INET
1616 case AF_INET:
1617 return wg->wg_so4;
1618 #endif
1619 #ifdef INET6
1620 case AF_INET6:
1621 return wg->wg_so6;
1622 #endif
1623 default:
1624 panic("wg: no such af: %d", af);
1625 }
1626 }
1627
1628 static struct socket *
1629 wg_get_so_by_peer(struct wg_peer *wgp, struct wg_sockaddr *wgsa)
1630 {
1631
1632 return wg_get_so_by_af(wgp->wgp_sc, wgsa_family(wgsa));
1633 }
1634
1635 static struct wg_sockaddr *
1636 wg_get_endpoint_sa(struct wg_peer *wgp, struct psref *psref)
1637 {
1638 struct wg_sockaddr *wgsa;
1639 int s;
1640
1641 s = pserialize_read_enter();
1642 wgsa = atomic_load_consume(&wgp->wgp_endpoint);
1643 psref_acquire(psref, &wgsa->wgsa_psref, wg_psref_class);
1644 pserialize_read_exit(s);
1645
1646 return wgsa;
1647 }
1648
1649 static void
1650 wg_put_sa(struct wg_peer *wgp, struct wg_sockaddr *wgsa, struct psref *psref)
1651 {
1652
1653 psref_release(psref, &wgsa->wgsa_psref, wg_psref_class);
1654 }
1655
1656 static int
1657 wg_send_so(struct wg_peer *wgp, struct mbuf *m)
1658 {
1659 int error;
1660 struct socket *so;
1661 struct psref psref;
1662 struct wg_sockaddr *wgsa;
1663
1664 wgsa = wg_get_endpoint_sa(wgp, &psref);
1665 so = wg_get_so_by_peer(wgp, wgsa);
1666 error = sosend(so, wgsatosa(wgsa), NULL, m, NULL, 0, curlwp);
1667 wg_put_sa(wgp, wgsa, &psref);
1668
1669 return error;
1670 }
1671
1672 static int
1673 wg_send_handshake_msg_init(struct wg_softc *wg, struct wg_peer *wgp)
1674 {
1675 int error;
1676 struct mbuf *m;
1677 struct wg_msg_init *wgmi;
1678 struct wg_session *wgs;
1679
1680 KASSERT(mutex_owned(wgp->wgp_lock));
1681
1682 wgs = wgp->wgp_session_unstable;
1683 /* XXX pull dispatch out into wg_task_send_init_message */
1684 switch (wgs->wgs_state) {
1685 case WGS_STATE_UNKNOWN: /* new session initiated by us */
1686 wg_get_session_index(wg, wgs);
1687 break;
1688 case WGS_STATE_INIT_ACTIVE: /* we're already initiating, stop */
1689 WG_TRACE("Session already initializing, skip starting new one");
1690 return EBUSY;
1691 case WGS_STATE_INIT_PASSIVE: /* peer was trying -- XXX what now? */
1692 WG_TRACE("Session already initializing, destroying old states");
1693 wg_clear_states(wgs);
1694 /* keep session index */
1695 break;
1696 case WGS_STATE_ESTABLISHED: /* can't happen */
1697 panic("unstable session can't be established");
1698 break;
1699 case WGS_STATE_DESTROYING: /* rekey initiated by us too early */
1700 WG_TRACE("Session destroying");
1701 /* XXX should wait? */
1702 return EBUSY;
1703 }
1704 wgs->wgs_state = WGS_STATE_INIT_ACTIVE;
1705
1706 m = m_gethdr(M_WAIT, MT_DATA);
1707 m->m_pkthdr.len = m->m_len = sizeof(*wgmi);
1708 wgmi = mtod(m, struct wg_msg_init *);
1709 wg_fill_msg_init(wg, wgp, wgs, wgmi);
1710
1711 error = wg->wg_ops->send_hs_msg(wgp, m);
1712 if (error == 0) {
1713 WG_TRACE("init msg sent");
1714
1715 if (wgp->wgp_handshake_start_time == 0)
1716 wgp->wgp_handshake_start_time = time_uptime;
1717 callout_schedule(&wgp->wgp_handshake_timeout_timer,
1718 MIN(wg_rekey_timeout, INT_MAX/hz) * hz);
1719 } else {
1720 wg_put_session_index(wg, wgs);
1721 /* Initiation failed; toss packet waiting for it if any. */
1722 if ((m = atomic_swap_ptr(&wgp->wgp_pending, NULL)) != NULL)
1723 m_freem(m);
1724 }
1725
1726 return error;
1727 }
1728
1729 static void
1730 wg_fill_msg_resp(struct wg_softc *wg, struct wg_peer *wgp,
1731 struct wg_session *wgs, struct wg_msg_resp *wgmr,
1732 const struct wg_msg_init *wgmi)
1733 {
1734 uint8_t ckey[WG_CHAINING_KEY_LEN]; /* [W] 5.4.3: Cr */
1735 uint8_t hash[WG_HASH_LEN]; /* [W] 5.4.3: Hr */
1736 uint8_t cipher_key[WG_KDF_OUTPUT_LEN];
1737 uint8_t pubkey[WG_EPHEMERAL_KEY_LEN];
1738 uint8_t privkey[WG_EPHEMERAL_KEY_LEN];
1739
1740 KASSERT(mutex_owned(wgp->wgp_lock));
1741 KASSERT(wgs == wgp->wgp_session_unstable);
1742 KASSERT(wgs->wgs_state == WGS_STATE_INIT_PASSIVE);
1743
1744 memcpy(hash, wgs->wgs_handshake_hash, sizeof(hash));
1745 memcpy(ckey, wgs->wgs_chaining_key, sizeof(ckey));
1746
1747 wgmr->wgmr_type = htole32(WG_MSG_TYPE_RESP);
1748 wgmr->wgmr_sender = wgs->wgs_local_index;
1749 wgmr->wgmr_receiver = wgmi->wgmi_sender;
1750
1751 /* [W] 5.4.3 Second Message: Responder to Initiator */
1752
1753 /* [N] 2.2: "e" */
1754 /* Er^priv, Er^pub := DH-GENERATE() */
1755 wg_algo_generate_keypair(pubkey, privkey);
1756 /* Cr := KDF1(Cr, Er^pub) */
1757 wg_algo_kdf(ckey, NULL, NULL, ckey, pubkey, sizeof(pubkey));
1758 /* msg.ephemeral := Er^pub */
1759 memcpy(wgmr->wgmr_ephemeral, pubkey, sizeof(wgmr->wgmr_ephemeral));
1760 /* Hr := HASH(Hr || msg.ephemeral) */
1761 wg_algo_hash(hash, pubkey, sizeof(pubkey));
1762
1763 WG_DUMP_HASH("ckey", ckey);
1764 WG_DUMP_HASH("hash", hash);
1765
1766 /* [N] 2.2: "ee" */
1767 /* Cr := KDF1(Cr, DH(Er^priv, Ei^pub)) */
1768 wg_algo_dh_kdf(ckey, NULL, privkey, wgs->wgs_ephemeral_key_peer);
1769
1770 /* [N] 2.2: "se" */
1771 /* Cr := KDF1(Cr, DH(Er^priv, Si^pub)) */
1772 wg_algo_dh_kdf(ckey, NULL, privkey, wgp->wgp_pubkey);
1773
1774 /* [N] 9.2: "psk" */
1775 {
1776 uint8_t kdfout[WG_KDF_OUTPUT_LEN];
1777 /* Cr, r, k := KDF3(Cr, Q) */
1778 wg_algo_kdf(ckey, kdfout, cipher_key, ckey, wgp->wgp_psk,
1779 sizeof(wgp->wgp_psk));
1780 /* Hr := HASH(Hr || r) */
1781 wg_algo_hash(hash, kdfout, sizeof(kdfout));
1782 }
1783
1784 /* msg.empty := AEAD(k, 0, e, Hr) */
1785 wg_algo_aead_enc(wgmr->wgmr_empty, sizeof(wgmr->wgmr_empty),
1786 cipher_key, 0, NULL, 0, hash, sizeof(hash));
1787 /* Hr := HASH(Hr || msg.empty) */
1788 wg_algo_hash(hash, wgmr->wgmr_empty, sizeof(wgmr->wgmr_empty));
1789
1790 WG_DUMP_HASH("wgmr_empty", wgmr->wgmr_empty);
1791
1792 /* [W] 5.4.4: Cookie MACs */
1793 /* msg.mac1 := MAC(HASH(LABEL-MAC1 || Sm'^pub), msg_a) */
1794 wg_algo_mac_mac1(wgmr->wgmr_mac1, sizeof(wgmi->wgmi_mac1),
1795 wgp->wgp_pubkey, sizeof(wgp->wgp_pubkey),
1796 (const uint8_t *)wgmr, offsetof(struct wg_msg_resp, wgmr_mac1));
1797 /* Need mac1 to decrypt a cookie from a cookie message */
1798 memcpy(wgp->wgp_last_sent_mac1, wgmr->wgmr_mac1,
1799 sizeof(wgp->wgp_last_sent_mac1));
1800 wgp->wgp_last_sent_mac1_valid = true;
1801
1802 if (wgp->wgp_latest_cookie_time == 0 ||
1803 (time_uptime - wgp->wgp_latest_cookie_time) >= WG_COOKIE_TIME)
1804 /* msg.mac2 := 0^16 */
1805 memset(wgmr->wgmr_mac2, 0, sizeof(wgmr->wgmr_mac2));
1806 else {
1807 /* msg.mac2 := MAC(Lm, msg_b) */
1808 wg_algo_mac(wgmr->wgmr_mac2, sizeof(wgmi->wgmi_mac2),
1809 wgp->wgp_latest_cookie, WG_COOKIE_LEN,
1810 (const uint8_t *)wgmr,
1811 offsetof(struct wg_msg_resp, wgmr_mac2),
1812 NULL, 0);
1813 }
1814
1815 memcpy(wgs->wgs_handshake_hash, hash, sizeof(hash));
1816 memcpy(wgs->wgs_chaining_key, ckey, sizeof(ckey));
1817 memcpy(wgs->wgs_ephemeral_key_pub, pubkey, sizeof(pubkey));
1818 memcpy(wgs->wgs_ephemeral_key_priv, privkey, sizeof(privkey));
1819 wgs->wgs_remote_index = wgmi->wgmi_sender;
1820 WG_DLOG("sender=%x\n", wgs->wgs_local_index);
1821 WG_DLOG("receiver=%x\n", wgs->wgs_remote_index);
1822 }
1823
1824 static void
1825 wg_swap_sessions(struct wg_peer *wgp)
1826 {
1827 struct wg_session *wgs, *wgs_prev;
1828
1829 KASSERT(mutex_owned(wgp->wgp_lock));
1830
1831 wgs = wgp->wgp_session_unstable;
1832 KASSERT(wgs->wgs_state == WGS_STATE_ESTABLISHED);
1833
1834 wgs_prev = wgp->wgp_session_stable;
1835 KASSERT(wgs_prev->wgs_state == WGS_STATE_ESTABLISHED ||
1836 wgs_prev->wgs_state == WGS_STATE_UNKNOWN);
1837 atomic_store_release(&wgp->wgp_session_stable, wgs);
1838 wgp->wgp_session_unstable = wgs_prev;
1839 }
1840
1841 static void
1842 wg_handle_msg_resp(struct wg_softc *wg, const struct wg_msg_resp *wgmr,
1843 const struct sockaddr *src)
1844 {
1845 uint8_t ckey[WG_CHAINING_KEY_LEN]; /* [W] 5.4.3: Cr */
1846 uint8_t hash[WG_HASH_LEN]; /* [W] 5.4.3: Kr */
1847 uint8_t cipher_key[WG_KDF_OUTPUT_LEN];
1848 struct wg_peer *wgp;
1849 struct wg_session *wgs;
1850 struct psref psref;
1851 int error;
1852 uint8_t mac1[WG_MAC_LEN];
1853 struct wg_session *wgs_prev;
1854 struct mbuf *m;
1855
1856 wg_algo_mac_mac1(mac1, sizeof(mac1),
1857 wg->wg_pubkey, sizeof(wg->wg_pubkey),
1858 (const uint8_t *)wgmr, offsetof(struct wg_msg_resp, wgmr_mac1));
1859
1860 /*
1861 * [W] 5.3: Denial of Service Mitigation & Cookies
1862 * "the responder, ..., must always reject messages with an invalid
1863 * msg.mac1"
1864 */
1865 if (!consttime_memequal(mac1, wgmr->wgmr_mac1, sizeof(mac1))) {
1866 WG_DLOG("mac1 is invalid\n");
1867 return;
1868 }
1869
1870 WG_TRACE("resp msg received");
1871 wgs = wg_lookup_session_by_index(wg, wgmr->wgmr_receiver, &psref);
1872 if (wgs == NULL) {
1873 WG_TRACE("No session found");
1874 return;
1875 }
1876
1877 wgp = wgs->wgs_peer;
1878
1879 mutex_enter(wgp->wgp_lock);
1880
1881 /* If we weren't waiting for a handshake response, drop it. */
1882 if (wgs->wgs_state != WGS_STATE_INIT_ACTIVE) {
1883 WG_TRACE("peer sent spurious handshake response, ignoring");
1884 goto out;
1885 }
1886
1887 if (__predict_false(wg_is_underload(wg, wgp, WG_MSG_TYPE_RESP))) {
1888 WG_TRACE("under load");
1889 /*
1890 * [W] 5.3: Denial of Service Mitigation & Cookies
1891 * "the responder, ..., and when under load may reject messages
1892 * with an invalid msg.mac2. If the responder receives a
1893 * message with a valid msg.mac1 yet with an invalid msg.mac2,
1894 * and is under load, it may respond with a cookie reply
1895 * message"
1896 */
1897 uint8_t zero[WG_MAC_LEN] = {0};
1898 if (consttime_memequal(wgmr->wgmr_mac2, zero, sizeof(zero))) {
1899 WG_TRACE("sending a cookie message: no cookie included");
1900 (void)wg_send_cookie_msg(wg, wgp, wgmr->wgmr_sender,
1901 wgmr->wgmr_mac1, src);
1902 goto out;
1903 }
1904 if (!wgp->wgp_last_sent_cookie_valid) {
1905 WG_TRACE("sending a cookie message: no cookie sent ever");
1906 (void)wg_send_cookie_msg(wg, wgp, wgmr->wgmr_sender,
1907 wgmr->wgmr_mac1, src);
1908 goto out;
1909 }
1910 uint8_t mac2[WG_MAC_LEN];
1911 wg_algo_mac(mac2, sizeof(mac2), wgp->wgp_last_sent_cookie,
1912 WG_COOKIE_LEN, (const uint8_t *)wgmr,
1913 offsetof(struct wg_msg_resp, wgmr_mac2), NULL, 0);
1914 if (!consttime_memequal(mac2, wgmr->wgmr_mac2, sizeof(mac2))) {
1915 WG_DLOG("mac2 is invalid\n");
1916 goto out;
1917 }
1918 WG_TRACE("under load, but continue to sending");
1919 }
1920
1921 memcpy(hash, wgs->wgs_handshake_hash, sizeof(hash));
1922 memcpy(ckey, wgs->wgs_chaining_key, sizeof(ckey));
1923
1924 /*
1925 * [W] 5.4.3 Second Message: Responder to Initiator
1926 * "When the initiator receives this message, it does the same
1927 * operations so that its final state variables are identical,
1928 * replacing the operands of the DH function to produce equivalent
1929 * values."
1930 * Note that the following comments of operations are just copies of
1931 * the initiator's ones.
1932 */
1933
1934 /* [N] 2.2: "e" */
1935 /* Cr := KDF1(Cr, Er^pub) */
1936 wg_algo_kdf(ckey, NULL, NULL, ckey, wgmr->wgmr_ephemeral,
1937 sizeof(wgmr->wgmr_ephemeral));
1938 /* Hr := HASH(Hr || msg.ephemeral) */
1939 wg_algo_hash(hash, wgmr->wgmr_ephemeral, sizeof(wgmr->wgmr_ephemeral));
1940
1941 WG_DUMP_HASH("ckey", ckey);
1942 WG_DUMP_HASH("hash", hash);
1943
1944 /* [N] 2.2: "ee" */
1945 /* Cr := KDF1(Cr, DH(Er^priv, Ei^pub)) */
1946 wg_algo_dh_kdf(ckey, NULL, wgs->wgs_ephemeral_key_priv,
1947 wgmr->wgmr_ephemeral);
1948
1949 /* [N] 2.2: "se" */
1950 /* Cr := KDF1(Cr, DH(Er^priv, Si^pub)) */
1951 wg_algo_dh_kdf(ckey, NULL, wg->wg_privkey, wgmr->wgmr_ephemeral);
1952
1953 /* [N] 9.2: "psk" */
1954 {
1955 uint8_t kdfout[WG_KDF_OUTPUT_LEN];
1956 /* Cr, r, k := KDF3(Cr, Q) */
1957 wg_algo_kdf(ckey, kdfout, cipher_key, ckey, wgp->wgp_psk,
1958 sizeof(wgp->wgp_psk));
1959 /* Hr := HASH(Hr || r) */
1960 wg_algo_hash(hash, kdfout, sizeof(kdfout));
1961 }
1962
1963 {
1964 uint8_t out[sizeof(wgmr->wgmr_empty)]; /* for safety */
1965 /* msg.empty := AEAD(k, 0, e, Hr) */
1966 error = wg_algo_aead_dec(out, 0, cipher_key, 0, wgmr->wgmr_empty,
1967 sizeof(wgmr->wgmr_empty), hash, sizeof(hash));
1968 WG_DUMP_HASH("wgmr_empty", wgmr->wgmr_empty);
1969 if (error != 0) {
1970 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
1971 "wg_algo_aead_dec for empty message failed\n");
1972 goto out;
1973 }
1974 /* Hr := HASH(Hr || msg.empty) */
1975 wg_algo_hash(hash, wgmr->wgmr_empty, sizeof(wgmr->wgmr_empty));
1976 }
1977
1978 memcpy(wgs->wgs_handshake_hash, hash, sizeof(wgs->wgs_handshake_hash));
1979 memcpy(wgs->wgs_chaining_key, ckey, sizeof(wgs->wgs_chaining_key));
1980 wgs->wgs_remote_index = wgmr->wgmr_sender;
1981 WG_DLOG("receiver=%x\n", wgs->wgs_remote_index);
1982
1983 KASSERT(wgs->wgs_state == WGS_STATE_INIT_ACTIVE);
1984 wgs->wgs_state = WGS_STATE_ESTABLISHED;
1985 wgs->wgs_time_established = time_uptime;
1986 wgs->wgs_time_last_data_sent = 0;
1987 wgs->wgs_is_initiator = true;
1988 wg_calculate_keys(wgs, true);
1989 wg_clear_states(wgs);
1990 WG_TRACE("WGS_STATE_ESTABLISHED");
1991
1992 callout_stop(&wgp->wgp_handshake_timeout_timer);
1993
1994 wg_swap_sessions(wgp);
1995 KASSERT(wgs == wgp->wgp_session_stable);
1996 wgs_prev = wgp->wgp_session_unstable;
1997 getnanotime(&wgp->wgp_last_handshake_time);
1998 wgp->wgp_handshake_start_time = 0;
1999 wgp->wgp_last_sent_mac1_valid = false;
2000 wgp->wgp_last_sent_cookie_valid = false;
2001
2002 wg_schedule_rekey_timer(wgp);
2003
2004 wg_update_endpoint_if_necessary(wgp, src);
2005
2006 /*
2007 * If we had a data packet queued up, send it; otherwise send a
2008 * keepalive message -- either way we have to send something
2009 * immediately or else the responder will never answer.
2010 */
2011 if ((m = atomic_swap_ptr(&wgp->wgp_pending, NULL)) != NULL) {
2012 kpreempt_disable();
2013 const uint32_t h = curcpu()->ci_index; // pktq_rps_hash(m)
2014 M_SETCTX(m, wgp);
2015 if (__predict_false(!pktq_enqueue(wg_pktq, m, h))) {
2016 WGLOG(LOG_ERR, "pktq full, dropping\n");
2017 m_freem(m);
2018 }
2019 kpreempt_enable();
2020 } else {
2021 wg_send_keepalive_msg(wgp, wgs);
2022 }
2023
2024 if (wgs_prev->wgs_state == WGS_STATE_ESTABLISHED) {
2025 /* Wait for wg_get_stable_session to drain. */
2026 pserialize_perform(wgp->wgp_psz);
2027
2028 /* Transition ESTABLISHED->DESTROYING. */
2029 wgs_prev->wgs_state = WGS_STATE_DESTROYING;
2030
2031 /* We can't destroy the old session immediately */
2032 wg_schedule_session_dtor_timer(wgp);
2033 } else {
2034 KASSERTMSG(wgs_prev->wgs_state == WGS_STATE_UNKNOWN,
2035 "state=%d", wgs_prev->wgs_state);
2036 }
2037
2038 out:
2039 mutex_exit(wgp->wgp_lock);
2040 wg_put_session(wgs, &psref);
2041 }
2042
2043 static int
2044 wg_send_handshake_msg_resp(struct wg_softc *wg, struct wg_peer *wgp,
2045 struct wg_session *wgs, const struct wg_msg_init *wgmi)
2046 {
2047 int error;
2048 struct mbuf *m;
2049 struct wg_msg_resp *wgmr;
2050
2051 KASSERT(mutex_owned(wgp->wgp_lock));
2052 KASSERT(wgs == wgp->wgp_session_unstable);
2053 KASSERT(wgs->wgs_state == WGS_STATE_INIT_PASSIVE);
2054
2055 m = m_gethdr(M_WAIT, MT_DATA);
2056 m->m_pkthdr.len = m->m_len = sizeof(*wgmr);
2057 wgmr = mtod(m, struct wg_msg_resp *);
2058 wg_fill_msg_resp(wg, wgp, wgs, wgmr, wgmi);
2059
2060 error = wg->wg_ops->send_hs_msg(wgp, m);
2061 if (error == 0)
2062 WG_TRACE("resp msg sent");
2063 return error;
2064 }
2065
2066 static struct wg_peer *
2067 wg_lookup_peer_by_pubkey(struct wg_softc *wg,
2068 const uint8_t pubkey[WG_STATIC_KEY_LEN], struct psref *psref)
2069 {
2070 struct wg_peer *wgp;
2071
2072 int s = pserialize_read_enter();
2073 wgp = thmap_get(wg->wg_peers_bypubkey, pubkey, WG_STATIC_KEY_LEN);
2074 if (wgp != NULL)
2075 wg_get_peer(wgp, psref);
2076 pserialize_read_exit(s);
2077
2078 return wgp;
2079 }
2080
2081 static void
2082 wg_fill_msg_cookie(struct wg_softc *wg, struct wg_peer *wgp,
2083 struct wg_msg_cookie *wgmc, const uint32_t sender,
2084 const uint8_t mac1[WG_MAC_LEN], const struct sockaddr *src)
2085 {
2086 uint8_t cookie[WG_COOKIE_LEN];
2087 uint8_t key[WG_HASH_LEN];
2088 uint8_t addr[sizeof(struct in6_addr)];
2089 size_t addrlen;
2090 uint16_t uh_sport; /* be */
2091
2092 KASSERT(mutex_owned(wgp->wgp_lock));
2093
2094 wgmc->wgmc_type = htole32(WG_MSG_TYPE_COOKIE);
2095 wgmc->wgmc_receiver = sender;
2096 cprng_fast(wgmc->wgmc_salt, sizeof(wgmc->wgmc_salt));
2097
2098 /*
2099 * [W] 5.4.7: Under Load: Cookie Reply Message
2100 * "The secret variable, Rm, changes every two minutes to a
2101 * random value"
2102 */
2103 if ((time_uptime - wgp->wgp_last_genrandval_time) > WG_RANDVAL_TIME) {
2104 wgp->wgp_randval = cprng_strong32();
2105 wgp->wgp_last_genrandval_time = time_uptime;
2106 }
2107
2108 switch (src->sa_family) {
2109 case AF_INET: {
2110 const struct sockaddr_in *sin = satocsin(src);
2111 addrlen = sizeof(sin->sin_addr);
2112 memcpy(addr, &sin->sin_addr, addrlen);
2113 uh_sport = sin->sin_port;
2114 break;
2115 }
2116 #ifdef INET6
2117 case AF_INET6: {
2118 const struct sockaddr_in6 *sin6 = satocsin6(src);
2119 addrlen = sizeof(sin6->sin6_addr);
2120 memcpy(addr, &sin6->sin6_addr, addrlen);
2121 uh_sport = sin6->sin6_port;
2122 break;
2123 }
2124 #endif
2125 default:
2126 panic("invalid af=%d", src->sa_family);
2127 }
2128
2129 wg_algo_mac(cookie, sizeof(cookie),
2130 (const uint8_t *)&wgp->wgp_randval, sizeof(wgp->wgp_randval),
2131 addr, addrlen, (const uint8_t *)&uh_sport, sizeof(uh_sport));
2132 wg_algo_mac_cookie(key, sizeof(key), wg->wg_pubkey,
2133 sizeof(wg->wg_pubkey));
2134 wg_algo_xaead_enc(wgmc->wgmc_cookie, sizeof(wgmc->wgmc_cookie), key,
2135 cookie, sizeof(cookie), mac1, WG_MAC_LEN, wgmc->wgmc_salt);
2136
2137 /* Need to store to calculate mac2 */
2138 memcpy(wgp->wgp_last_sent_cookie, cookie, sizeof(cookie));
2139 wgp->wgp_last_sent_cookie_valid = true;
2140 }
2141
2142 static int
2143 wg_send_cookie_msg(struct wg_softc *wg, struct wg_peer *wgp,
2144 const uint32_t sender, const uint8_t mac1[WG_MAC_LEN],
2145 const struct sockaddr *src)
2146 {
2147 int error;
2148 struct mbuf *m;
2149 struct wg_msg_cookie *wgmc;
2150
2151 KASSERT(mutex_owned(wgp->wgp_lock));
2152
2153 m = m_gethdr(M_WAIT, MT_DATA);
2154 m->m_pkthdr.len = m->m_len = sizeof(*wgmc);
2155 wgmc = mtod(m, struct wg_msg_cookie *);
2156 wg_fill_msg_cookie(wg, wgp, wgmc, sender, mac1, src);
2157
2158 error = wg->wg_ops->send_hs_msg(wgp, m);
2159 if (error == 0)
2160 WG_TRACE("cookie msg sent");
2161 return error;
2162 }
2163
2164 static bool
2165 wg_is_underload(struct wg_softc *wg, struct wg_peer *wgp, int msgtype)
2166 {
2167 #ifdef WG_DEBUG_PARAMS
2168 if (wg_force_underload)
2169 return true;
2170 #endif
2171
2172 /*
2173 * XXX we don't have a means of a load estimation. The purpose of
2174 * the mechanism is a DoS mitigation, so we consider frequent handshake
2175 * messages as (a kind of) load; if a message of the same type comes
2176 * to a peer within 1 second, we consider we are under load.
2177 */
2178 time_t last = wgp->wgp_last_msg_received_time[msgtype];
2179 wgp->wgp_last_msg_received_time[msgtype] = time_uptime;
2180 return (time_uptime - last) == 0;
2181 }
2182
2183 static void
2184 wg_calculate_keys(struct wg_session *wgs, const bool initiator)
2185 {
2186
2187 KASSERT(mutex_owned(wgs->wgs_peer->wgp_lock));
2188
2189 /*
2190 * [W] 5.4.5: Ti^send = Tr^recv, Ti^recv = Tr^send := KDF2(Ci = Cr, e)
2191 */
2192 if (initiator) {
2193 wg_algo_kdf(wgs->wgs_tkey_send, wgs->wgs_tkey_recv, NULL,
2194 wgs->wgs_chaining_key, NULL, 0);
2195 } else {
2196 wg_algo_kdf(wgs->wgs_tkey_recv, wgs->wgs_tkey_send, NULL,
2197 wgs->wgs_chaining_key, NULL, 0);
2198 }
2199 WG_DUMP_HASH("wgs_tkey_send", wgs->wgs_tkey_send);
2200 WG_DUMP_HASH("wgs_tkey_recv", wgs->wgs_tkey_recv);
2201 }
2202
2203 static uint64_t
2204 wg_session_get_send_counter(struct wg_session *wgs)
2205 {
2206 #ifdef __HAVE_ATOMIC64_LOADSTORE
2207 return atomic_load_relaxed(&wgs->wgs_send_counter);
2208 #else
2209 uint64_t send_counter;
2210
2211 mutex_enter(&wgs->wgs_send_counter_lock);
2212 send_counter = wgs->wgs_send_counter;
2213 mutex_exit(&wgs->wgs_send_counter_lock);
2214
2215 return send_counter;
2216 #endif
2217 }
2218
2219 static uint64_t
2220 wg_session_inc_send_counter(struct wg_session *wgs)
2221 {
2222 #ifdef __HAVE_ATOMIC64_LOADSTORE
2223 return atomic_inc_64_nv(&wgs->wgs_send_counter) - 1;
2224 #else
2225 uint64_t send_counter;
2226
2227 mutex_enter(&wgs->wgs_send_counter_lock);
2228 send_counter = wgs->wgs_send_counter++;
2229 mutex_exit(&wgs->wgs_send_counter_lock);
2230
2231 return send_counter;
2232 #endif
2233 }
2234
2235 static void
2236 wg_clear_states(struct wg_session *wgs)
2237 {
2238
2239 KASSERT(mutex_owned(wgs->wgs_peer->wgp_lock));
2240
2241 wgs->wgs_send_counter = 0;
2242 sliwin_reset(&wgs->wgs_recvwin->window);
2243
2244 #define wgs_clear(v) explicit_memset(wgs->wgs_##v, 0, sizeof(wgs->wgs_##v))
2245 wgs_clear(handshake_hash);
2246 wgs_clear(chaining_key);
2247 wgs_clear(ephemeral_key_pub);
2248 wgs_clear(ephemeral_key_priv);
2249 wgs_clear(ephemeral_key_peer);
2250 #undef wgs_clear
2251 }
2252
2253 static struct wg_session *
2254 wg_lookup_session_by_index(struct wg_softc *wg, const uint32_t index,
2255 struct psref *psref)
2256 {
2257 struct wg_session *wgs;
2258
2259 int s = pserialize_read_enter();
2260 wgs = thmap_get(wg->wg_sessions_byindex, &index, sizeof index);
2261 if (wgs != NULL) {
2262 KASSERT(atomic_load_relaxed(&wgs->wgs_state) !=
2263 WGS_STATE_UNKNOWN);
2264 psref_acquire(psref, &wgs->wgs_psref, wg_psref_class);
2265 }
2266 pserialize_read_exit(s);
2267
2268 return wgs;
2269 }
2270
2271 static void
2272 wg_schedule_rekey_timer(struct wg_peer *wgp)
2273 {
2274 int timeout = MIN(wg_rekey_after_time, INT_MAX/hz);
2275
2276 callout_schedule(&wgp->wgp_rekey_timer, timeout * hz);
2277 }
2278
2279 static void
2280 wg_send_keepalive_msg(struct wg_peer *wgp, struct wg_session *wgs)
2281 {
2282 struct mbuf *m;
2283
2284 /*
2285 * [W] 6.5 Passive Keepalive
2286 * "A keepalive message is simply a transport data message with
2287 * a zero-length encapsulated encrypted inner-packet."
2288 */
2289 m = m_gethdr(M_WAIT, MT_DATA);
2290 wg_send_data_msg(wgp, wgs, m);
2291 }
2292
2293 static bool
2294 wg_need_to_send_init_message(struct wg_session *wgs)
2295 {
2296 /*
2297 * [W] 6.2 Transport Message Limits
2298 * "if a peer is the initiator of a current secure session,
2299 * WireGuard will send a handshake initiation message to begin
2300 * a new secure session ... if after receiving a transport data
2301 * message, the current secure session is (REJECT-AFTER-TIME
2302 * KEEPALIVE-TIMEOUT REKEY-TIMEOUT) seconds old and it has
2303 * not yet acted upon this event."
2304 */
2305 return wgs->wgs_is_initiator && wgs->wgs_time_last_data_sent == 0 &&
2306 (time_uptime - wgs->wgs_time_established) >=
2307 (wg_reject_after_time - wg_keepalive_timeout - wg_rekey_timeout);
2308 }
2309
2310 static void
2311 wg_schedule_peer_task(struct wg_peer *wgp, int task)
2312 {
2313
2314 mutex_enter(wgp->wgp_intr_lock);
2315 WG_DLOG("tasks=%d, task=%d\n", wgp->wgp_tasks, task);
2316 if (wgp->wgp_tasks == 0)
2317 /*
2318 * XXX If the current CPU is already loaded -- e.g., if
2319 * there's already a bunch of handshakes queued up --
2320 * consider tossing this over to another CPU to
2321 * distribute the load.
2322 */
2323 workqueue_enqueue(wg_wq, &wgp->wgp_work, NULL);
2324 wgp->wgp_tasks |= task;
2325 mutex_exit(wgp->wgp_intr_lock);
2326 }
2327
2328 static void
2329 wg_change_endpoint(struct wg_peer *wgp, const struct sockaddr *new)
2330 {
2331 struct wg_sockaddr *wgsa_prev;
2332
2333 WG_TRACE("Changing endpoint");
2334
2335 memcpy(wgp->wgp_endpoint0, new, new->sa_len);
2336 wgsa_prev = wgp->wgp_endpoint;
2337 atomic_store_release(&wgp->wgp_endpoint, wgp->wgp_endpoint0);
2338 wgp->wgp_endpoint0 = wgsa_prev;
2339 atomic_store_release(&wgp->wgp_endpoint_available, true);
2340
2341 wg_schedule_peer_task(wgp, WGP_TASK_ENDPOINT_CHANGED);
2342 }
2343
2344 static bool
2345 wg_validate_inner_packet(const char *packet, size_t decrypted_len, int *af)
2346 {
2347 uint16_t packet_len;
2348 const struct ip *ip;
2349
2350 if (__predict_false(decrypted_len < sizeof(struct ip)))
2351 return false;
2352
2353 ip = (const struct ip *)packet;
2354 if (ip->ip_v == 4)
2355 *af = AF_INET;
2356 else if (ip->ip_v == 6)
2357 *af = AF_INET6;
2358 else
2359 return false;
2360
2361 WG_DLOG("af=%d\n", *af);
2362
2363 switch (*af) {
2364 #ifdef INET
2365 case AF_INET:
2366 packet_len = ntohs(ip->ip_len);
2367 break;
2368 #endif
2369 #ifdef INET6
2370 case AF_INET6: {
2371 const struct ip6_hdr *ip6;
2372
2373 if (__predict_false(decrypted_len < sizeof(struct ip6_hdr)))
2374 return false;
2375
2376 ip6 = (const struct ip6_hdr *)packet;
2377 packet_len = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen);
2378 break;
2379 }
2380 #endif
2381 default:
2382 return false;
2383 }
2384
2385 WG_DLOG("packet_len=%u\n", packet_len);
2386 if (packet_len > decrypted_len)
2387 return false;
2388
2389 return true;
2390 }
2391
2392 static bool
2393 wg_validate_route(struct wg_softc *wg, struct wg_peer *wgp_expected,
2394 int af, char *packet)
2395 {
2396 struct sockaddr_storage ss;
2397 struct sockaddr *sa;
2398 struct psref psref;
2399 struct wg_peer *wgp;
2400 bool ok;
2401
2402 /*
2403 * II CRYPTOKEY ROUTING
2404 * "it will only accept it if its source IP resolves in the
2405 * table to the public key used in the secure session for
2406 * decrypting it."
2407 */
2408
2409 if (af == AF_INET) {
2410 const struct ip *ip = (const struct ip *)packet;
2411 struct sockaddr_in *sin = (struct sockaddr_in *)&ss;
2412 sockaddr_in_init(sin, &ip->ip_src, 0);
2413 sa = sintosa(sin);
2414 #ifdef INET6
2415 } else {
2416 const struct ip6_hdr *ip6 = (const struct ip6_hdr *)packet;
2417 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&ss;
2418 sockaddr_in6_init(sin6, &ip6->ip6_src, 0, 0, 0);
2419 sa = sin6tosa(sin6);
2420 #endif
2421 }
2422
2423 wgp = wg_pick_peer_by_sa(wg, sa, &psref);
2424 ok = (wgp == wgp_expected);
2425 if (wgp != NULL)
2426 wg_put_peer(wgp, &psref);
2427
2428 return ok;
2429 }
2430
2431 static void
2432 wg_session_dtor_timer(void *arg)
2433 {
2434 struct wg_peer *wgp = arg;
2435
2436 WG_TRACE("enter");
2437
2438 wg_schedule_peer_task(wgp, WGP_TASK_DESTROY_PREV_SESSION);
2439 }
2440
2441 static void
2442 wg_schedule_session_dtor_timer(struct wg_peer *wgp)
2443 {
2444
2445 /* 1 second grace period */
2446 callout_schedule(&wgp->wgp_session_dtor_timer, hz);
2447 }
2448
2449 static bool
2450 sockaddr_port_match(const struct sockaddr *sa1, const struct sockaddr *sa2)
2451 {
2452 if (sa1->sa_family != sa2->sa_family)
2453 return false;
2454
2455 switch (sa1->sa_family) {
2456 #ifdef INET
2457 case AF_INET:
2458 return satocsin(sa1)->sin_port == satocsin(sa2)->sin_port;
2459 #endif
2460 #ifdef INET6
2461 case AF_INET6:
2462 return satocsin6(sa1)->sin6_port == satocsin6(sa2)->sin6_port;
2463 #endif
2464 default:
2465 return false;
2466 }
2467 }
2468
2469 static void
2470 wg_update_endpoint_if_necessary(struct wg_peer *wgp,
2471 const struct sockaddr *src)
2472 {
2473 struct wg_sockaddr *wgsa;
2474 struct psref psref;
2475
2476 wgsa = wg_get_endpoint_sa(wgp, &psref);
2477
2478 #ifdef WG_DEBUG_LOG
2479 char oldaddr[128], newaddr[128];
2480 sockaddr_format(wgsatosa(wgsa), oldaddr, sizeof(oldaddr));
2481 sockaddr_format(src, newaddr, sizeof(newaddr));
2482 WG_DLOG("old=%s, new=%s\n", oldaddr, newaddr);
2483 #endif
2484
2485 /*
2486 * III: "Since the packet has authenticated correctly, the source IP of
2487 * the outer UDP/IP packet is used to update the endpoint for peer..."
2488 */
2489 if (__predict_false(sockaddr_cmp(src, wgsatosa(wgsa)) != 0 ||
2490 !sockaddr_port_match(src, wgsatosa(wgsa)))) {
2491 /* XXX We can't change the endpoint twice in a short period */
2492 if (atomic_swap_uint(&wgp->wgp_endpoint_changing, 1) == 0) {
2493 wg_change_endpoint(wgp, src);
2494 }
2495 }
2496
2497 wg_put_sa(wgp, wgsa, &psref);
2498 }
2499
2500 static void
2501 wg_handle_msg_data(struct wg_softc *wg, struct mbuf *m,
2502 const struct sockaddr *src)
2503 {
2504 struct wg_msg_data *wgmd;
2505 char *encrypted_buf = NULL, *decrypted_buf;
2506 size_t encrypted_len, decrypted_len;
2507 struct wg_session *wgs;
2508 struct wg_peer *wgp;
2509 int state;
2510 size_t mlen;
2511 struct psref psref;
2512 int error, af;
2513 bool success, free_encrypted_buf = false, ok;
2514 struct mbuf *n;
2515
2516 KASSERT(m->m_len >= sizeof(struct wg_msg_data));
2517 wgmd = mtod(m, struct wg_msg_data *);
2518
2519 KASSERT(wgmd->wgmd_type == htole32(WG_MSG_TYPE_DATA));
2520 WG_TRACE("data");
2521
2522 /* Find the putative session, or drop. */
2523 wgs = wg_lookup_session_by_index(wg, wgmd->wgmd_receiver, &psref);
2524 if (wgs == NULL) {
2525 WG_TRACE("No session found");
2526 m_freem(m);
2527 return;
2528 }
2529
2530 /*
2531 * We are only ready to handle data when in INIT_PASSIVE,
2532 * ESTABLISHED, or DESTROYING. All transitions out of that
2533 * state dissociate the session index and drain psrefs.
2534 */
2535 state = atomic_load_relaxed(&wgs->wgs_state);
2536 switch (state) {
2537 case WGS_STATE_UNKNOWN:
2538 panic("wg session %p in unknown state has session index %u",
2539 wgs, wgmd->wgmd_receiver);
2540 case WGS_STATE_INIT_ACTIVE:
2541 WG_TRACE("not yet ready for data");
2542 goto out;
2543 case WGS_STATE_INIT_PASSIVE:
2544 case WGS_STATE_ESTABLISHED:
2545 case WGS_STATE_DESTROYING:
2546 break;
2547 }
2548
2549 /*
2550 * Get the peer, for rate-limited logs (XXX MPSAFE, dtrace) and
2551 * to update the endpoint if authentication succeeds.
2552 */
2553 wgp = wgs->wgs_peer;
2554
2555 /*
2556 * Reject outrageously wrong sequence numbers before doing any
2557 * crypto work or taking any locks.
2558 */
2559 error = sliwin_check_fast(&wgs->wgs_recvwin->window,
2560 le64toh(wgmd->wgmd_counter));
2561 if (error) {
2562 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
2563 "out-of-window packet: %"PRIu64"\n",
2564 le64toh(wgmd->wgmd_counter));
2565 goto out;
2566 }
2567
2568 /* Ensure the payload and authenticator are contiguous. */
2569 mlen = m_length(m);
2570 encrypted_len = mlen - sizeof(*wgmd);
2571 if (encrypted_len < WG_AUTHTAG_LEN) {
2572 WG_DLOG("Short encrypted_len: %lu\n", encrypted_len);
2573 goto out;
2574 }
2575 success = m_ensure_contig(&m, sizeof(*wgmd) + encrypted_len);
2576 if (success) {
2577 encrypted_buf = mtod(m, char *) + sizeof(*wgmd);
2578 } else {
2579 encrypted_buf = kmem_intr_alloc(encrypted_len, KM_NOSLEEP);
2580 if (encrypted_buf == NULL) {
2581 WG_DLOG("failed to allocate encrypted_buf\n");
2582 goto out;
2583 }
2584 m_copydata(m, sizeof(*wgmd), encrypted_len, encrypted_buf);
2585 free_encrypted_buf = true;
2586 }
2587 /* m_ensure_contig may change m regardless of its result */
2588 KASSERT(m->m_len >= sizeof(*wgmd));
2589 wgmd = mtod(m, struct wg_msg_data *);
2590
2591 /*
2592 * Get a buffer for the plaintext. Add WG_AUTHTAG_LEN to avoid
2593 * a zero-length buffer (XXX). Drop if plaintext is longer
2594 * than MCLBYTES (XXX).
2595 */
2596 decrypted_len = encrypted_len - WG_AUTHTAG_LEN;
2597 if (decrypted_len > MCLBYTES) {
2598 /* FIXME handle larger data than MCLBYTES */
2599 WG_DLOG("couldn't handle larger data than MCLBYTES\n");
2600 goto out;
2601 }
2602 n = wg_get_mbuf(0, decrypted_len + WG_AUTHTAG_LEN);
2603 if (n == NULL) {
2604 WG_DLOG("wg_get_mbuf failed\n");
2605 goto out;
2606 }
2607 decrypted_buf = mtod(n, char *);
2608
2609 /* Decrypt and verify the packet. */
2610 WG_DLOG("mlen=%lu, encrypted_len=%lu\n", mlen, encrypted_len);
2611 error = wg_algo_aead_dec(decrypted_buf,
2612 encrypted_len - WG_AUTHTAG_LEN /* can be 0 */,
2613 wgs->wgs_tkey_recv, le64toh(wgmd->wgmd_counter), encrypted_buf,
2614 encrypted_len, NULL, 0);
2615 if (error != 0) {
2616 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
2617 "failed to wg_algo_aead_dec\n");
2618 m_freem(n);
2619 goto out;
2620 }
2621 WG_DLOG("outsize=%u\n", (u_int)decrypted_len);
2622
2623 /* Packet is genuine. Reject it if a replay or just too old. */
2624 mutex_enter(&wgs->wgs_recvwin->lock);
2625 error = sliwin_update(&wgs->wgs_recvwin->window,
2626 le64toh(wgmd->wgmd_counter));
2627 mutex_exit(&wgs->wgs_recvwin->lock);
2628 if (error) {
2629 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
2630 "replay or out-of-window packet: %"PRIu64"\n",
2631 le64toh(wgmd->wgmd_counter));
2632 m_freem(n);
2633 goto out;
2634 }
2635
2636 /* We're done with m now; free it and chuck the pointers. */
2637 m_freem(m);
2638 m = NULL;
2639 wgmd = NULL;
2640
2641 /*
2642 * Validate the encapsulated packet header and get the address
2643 * family, or drop.
2644 */
2645 ok = wg_validate_inner_packet(decrypted_buf, decrypted_len, &af);
2646 if (!ok) {
2647 m_freem(n);
2648 goto out;
2649 }
2650
2651 /*
2652 * The packet is genuine. Update the peer's endpoint if the
2653 * source address changed.
2654 *
2655 * XXX How to prevent DoS by replaying genuine packets from the
2656 * wrong source address?
2657 */
2658 wg_update_endpoint_if_necessary(wgp, src);
2659
2660 /* Submit it into our network stack if routable. */
2661 ok = wg_validate_route(wg, wgp, af, decrypted_buf);
2662 if (ok) {
2663 wg->wg_ops->input(&wg->wg_if, n, af);
2664 } else {
2665 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
2666 "invalid source address\n");
2667 m_freem(n);
2668 /*
2669 * The inner address is invalid however the session is valid
2670 * so continue the session processing below.
2671 */
2672 }
2673 n = NULL;
2674
2675 /* Update the state machine if necessary. */
2676 if (__predict_false(state == WGS_STATE_INIT_PASSIVE)) {
2677 /*
2678 * We were waiting for the initiator to send their
2679 * first data transport message, and that has happened.
2680 * Schedule a task to establish this session.
2681 */
2682 wg_schedule_peer_task(wgp, WGP_TASK_ESTABLISH_SESSION);
2683 } else {
2684 if (__predict_false(wg_need_to_send_init_message(wgs))) {
2685 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE);
2686 }
2687 /*
2688 * [W] 6.5 Passive Keepalive
2689 * "If a peer has received a validly-authenticated transport
2690 * data message (section 5.4.6), but does not have any packets
2691 * itself to send back for KEEPALIVE-TIMEOUT seconds, it sends
2692 * a keepalive message."
2693 */
2694 WG_DLOG("time_uptime=%ju wgs_time_last_data_sent=%ju\n",
2695 (uintmax_t)time_uptime,
2696 (uintmax_t)wgs->wgs_time_last_data_sent);
2697 if ((time_uptime - wgs->wgs_time_last_data_sent) >=
2698 wg_keepalive_timeout) {
2699 WG_TRACE("Schedule sending keepalive message");
2700 /*
2701 * We can't send a keepalive message here to avoid
2702 * a deadlock; we already hold the solock of a socket
2703 * that is used to send the message.
2704 */
2705 wg_schedule_peer_task(wgp,
2706 WGP_TASK_SEND_KEEPALIVE_MESSAGE);
2707 }
2708 }
2709 out:
2710 wg_put_session(wgs, &psref);
2711 if (m != NULL)
2712 m_freem(m);
2713 if (free_encrypted_buf)
2714 kmem_intr_free(encrypted_buf, encrypted_len);
2715 }
2716
2717 static void
2718 wg_handle_msg_cookie(struct wg_softc *wg, const struct wg_msg_cookie *wgmc)
2719 {
2720 struct wg_session *wgs;
2721 struct wg_peer *wgp;
2722 struct psref psref;
2723 int error;
2724 uint8_t key[WG_HASH_LEN];
2725 uint8_t cookie[WG_COOKIE_LEN];
2726
2727 WG_TRACE("cookie msg received");
2728
2729 /* Find the putative session. */
2730 wgs = wg_lookup_session_by_index(wg, wgmc->wgmc_receiver, &psref);
2731 if (wgs == NULL) {
2732 WG_TRACE("No session found");
2733 return;
2734 }
2735
2736 /* Lock the peer so we can update the cookie state. */
2737 wgp = wgs->wgs_peer;
2738 mutex_enter(wgp->wgp_lock);
2739
2740 if (!wgp->wgp_last_sent_mac1_valid) {
2741 WG_TRACE("No valid mac1 sent (or expired)");
2742 goto out;
2743 }
2744
2745 /* Decrypt the cookie and store it for later handshake retry. */
2746 wg_algo_mac_cookie(key, sizeof(key), wgp->wgp_pubkey,
2747 sizeof(wgp->wgp_pubkey));
2748 error = wg_algo_xaead_dec(cookie, sizeof(cookie), key,
2749 wgmc->wgmc_cookie, sizeof(wgmc->wgmc_cookie),
2750 wgp->wgp_last_sent_mac1, sizeof(wgp->wgp_last_sent_mac1),
2751 wgmc->wgmc_salt);
2752 if (error != 0) {
2753 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG,
2754 "wg_algo_aead_dec for cookie failed: error=%d\n", error);
2755 goto out;
2756 }
2757 /*
2758 * [W] 6.6: Interaction with Cookie Reply System
2759 * "it should simply store the decrypted cookie value from the cookie
2760 * reply message, and wait for the expiration of the REKEY-TIMEOUT
2761 * timer for retrying a handshake initiation message."
2762 */
2763 wgp->wgp_latest_cookie_time = time_uptime;
2764 memcpy(wgp->wgp_latest_cookie, cookie, sizeof(wgp->wgp_latest_cookie));
2765 out:
2766 mutex_exit(wgp->wgp_lock);
2767 wg_put_session(wgs, &psref);
2768 }
2769
2770 static struct mbuf *
2771 wg_validate_msg_header(struct wg_softc *wg, struct mbuf *m)
2772 {
2773 struct wg_msg wgm;
2774 size_t mbuflen;
2775 size_t msglen;
2776
2777 /*
2778 * Get the mbuf chain length. It is already guaranteed, by
2779 * wg_overudp_cb, to be large enough for a struct wg_msg.
2780 */
2781 mbuflen = m_length(m);
2782 KASSERT(mbuflen >= sizeof(struct wg_msg));
2783
2784 /*
2785 * Copy the message header (32-bit message type) out -- we'll
2786 * worry about contiguity and alignment later.
2787 */
2788 m_copydata(m, 0, sizeof(wgm), &wgm);
2789 switch (le32toh(wgm.wgm_type)) {
2790 case WG_MSG_TYPE_INIT:
2791 msglen = sizeof(struct wg_msg_init);
2792 break;
2793 case WG_MSG_TYPE_RESP:
2794 msglen = sizeof(struct wg_msg_resp);
2795 break;
2796 case WG_MSG_TYPE_COOKIE:
2797 msglen = sizeof(struct wg_msg_cookie);
2798 break;
2799 case WG_MSG_TYPE_DATA:
2800 msglen = sizeof(struct wg_msg_data);
2801 break;
2802 default:
2803 WG_LOG_RATECHECK(&wg->wg_ppsratecheck, LOG_DEBUG,
2804 "Unexpected msg type: %u\n", le32toh(wgm.wgm_type));
2805 goto error;
2806 }
2807
2808 /* Verify the mbuf chain is long enough for this type of message. */
2809 if (__predict_false(mbuflen < msglen)) {
2810 WG_DLOG("Invalid msg size: mbuflen=%lu type=%u\n", mbuflen,
2811 le32toh(wgm.wgm_type));
2812 goto error;
2813 }
2814
2815 /* Make the message header contiguous if necessary. */
2816 if (__predict_false(m->m_len < msglen)) {
2817 m = m_pullup(m, msglen);
2818 if (m == NULL)
2819 return NULL;
2820 }
2821
2822 return m;
2823
2824 error:
2825 m_freem(m);
2826 return NULL;
2827 }
2828
2829 static void
2830 wg_handle_packet(struct wg_softc *wg, struct mbuf *m,
2831 const struct sockaddr *src)
2832 {
2833 struct wg_msg *wgm;
2834
2835 m = wg_validate_msg_header(wg, m);
2836 if (__predict_false(m == NULL))
2837 return;
2838
2839 KASSERT(m->m_len >= sizeof(struct wg_msg));
2840 wgm = mtod(m, struct wg_msg *);
2841 switch (le32toh(wgm->wgm_type)) {
2842 case WG_MSG_TYPE_INIT:
2843 wg_handle_msg_init(wg, (struct wg_msg_init *)wgm, src);
2844 break;
2845 case WG_MSG_TYPE_RESP:
2846 wg_handle_msg_resp(wg, (struct wg_msg_resp *)wgm, src);
2847 break;
2848 case WG_MSG_TYPE_COOKIE:
2849 wg_handle_msg_cookie(wg, (struct wg_msg_cookie *)wgm);
2850 break;
2851 case WG_MSG_TYPE_DATA:
2852 wg_handle_msg_data(wg, m, src);
2853 /* wg_handle_msg_data frees m for us */
2854 return;
2855 default:
2856 panic("invalid message type: %d", le32toh(wgm->wgm_type));
2857 }
2858
2859 m_freem(m);
2860 }
2861
2862 static void
2863 wg_receive_packets(struct wg_softc *wg, const int af)
2864 {
2865
2866 for (;;) {
2867 int error, flags;
2868 struct socket *so;
2869 struct mbuf *m = NULL;
2870 struct uio dummy_uio;
2871 struct mbuf *paddr = NULL;
2872 struct sockaddr *src;
2873
2874 so = wg_get_so_by_af(wg, af);
2875 flags = MSG_DONTWAIT;
2876 dummy_uio.uio_resid = 1000000000;
2877
2878 error = so->so_receive(so, &paddr, &dummy_uio, &m, NULL,
2879 &flags);
2880 if (error || m == NULL) {
2881 //if (error == EWOULDBLOCK)
2882 return;
2883 }
2884
2885 KASSERT(paddr != NULL);
2886 KASSERT(paddr->m_len >= sizeof(struct sockaddr));
2887 src = mtod(paddr, struct sockaddr *);
2888
2889 wg_handle_packet(wg, m, src);
2890 }
2891 }
2892
2893 static void
2894 wg_get_peer(struct wg_peer *wgp, struct psref *psref)
2895 {
2896
2897 psref_acquire(psref, &wgp->wgp_psref, wg_psref_class);
2898 }
2899
2900 static void
2901 wg_put_peer(struct wg_peer *wgp, struct psref *psref)
2902 {
2903
2904 psref_release(psref, &wgp->wgp_psref, wg_psref_class);
2905 }
2906
2907 static void
2908 wg_task_send_init_message(struct wg_softc *wg, struct wg_peer *wgp)
2909 {
2910 struct wg_session *wgs;
2911
2912 WG_TRACE("WGP_TASK_SEND_INIT_MESSAGE");
2913
2914 KASSERT(mutex_owned(wgp->wgp_lock));
2915
2916 if (!atomic_load_acquire(&wgp->wgp_endpoint_available)) {
2917 WGLOG(LOG_DEBUG, "No endpoint available\n");
2918 /* XXX should do something? */
2919 return;
2920 }
2921
2922 wgs = wgp->wgp_session_stable;
2923 if (wgs->wgs_state == WGS_STATE_UNKNOWN) {
2924 /* XXX What if the unstable session is already INIT_ACTIVE? */
2925 wg_send_handshake_msg_init(wg, wgp);
2926 } else {
2927 /* rekey */
2928 wgs = wgp->wgp_session_unstable;
2929 if (wgs->wgs_state != WGS_STATE_INIT_ACTIVE)
2930 wg_send_handshake_msg_init(wg, wgp);
2931 }
2932 }
2933
2934 static void
2935 wg_task_retry_handshake(struct wg_softc *wg, struct wg_peer *wgp)
2936 {
2937 struct wg_session *wgs;
2938
2939 WG_TRACE("WGP_TASK_RETRY_HANDSHAKE");
2940
2941 KASSERT(mutex_owned(wgp->wgp_lock));
2942 KASSERT(wgp->wgp_handshake_start_time != 0);
2943
2944 wgs = wgp->wgp_session_unstable;
2945 if (wgs->wgs_state != WGS_STATE_INIT_ACTIVE)
2946 return;
2947
2948 /*
2949 * XXX no real need to assign a new index here, but we do need
2950 * to transition to UNKNOWN temporarily
2951 */
2952 wg_put_session_index(wg, wgs);
2953
2954 /* [W] 6.4 Handshake Initiation Retransmission */
2955 if ((time_uptime - wgp->wgp_handshake_start_time) >
2956 wg_rekey_attempt_time) {
2957 /* Give up handshaking */
2958 wgp->wgp_handshake_start_time = 0;
2959 WG_TRACE("give up");
2960
2961 /*
2962 * If a new data packet comes, handshaking will be retried
2963 * and a new session would be established at that time,
2964 * however we don't want to send pending packets then.
2965 */
2966 wg_purge_pending_packets(wgp);
2967 return;
2968 }
2969
2970 wg_task_send_init_message(wg, wgp);
2971 }
2972
2973 static void
2974 wg_task_establish_session(struct wg_softc *wg, struct wg_peer *wgp)
2975 {
2976 struct wg_session *wgs, *wgs_prev;
2977 struct mbuf *m;
2978
2979 KASSERT(mutex_owned(wgp->wgp_lock));
2980
2981 wgs = wgp->wgp_session_unstable;
2982 if (wgs->wgs_state != WGS_STATE_INIT_PASSIVE)
2983 /* XXX Can this happen? */
2984 return;
2985
2986 wgs->wgs_state = WGS_STATE_ESTABLISHED;
2987 wgs->wgs_time_established = time_uptime;
2988 wgs->wgs_time_last_data_sent = 0;
2989 wgs->wgs_is_initiator = false;
2990 WG_TRACE("WGS_STATE_ESTABLISHED");
2991
2992 wg_swap_sessions(wgp);
2993 KASSERT(wgs == wgp->wgp_session_stable);
2994 wgs_prev = wgp->wgp_session_unstable;
2995 getnanotime(&wgp->wgp_last_handshake_time);
2996 wgp->wgp_handshake_start_time = 0;
2997 wgp->wgp_last_sent_mac1_valid = false;
2998 wgp->wgp_last_sent_cookie_valid = false;
2999
3000 /* If we had a data packet queued up, send it. */
3001 if ((m = atomic_swap_ptr(&wgp->wgp_pending, NULL)) != NULL) {
3002 kpreempt_disable();
3003 const uint32_t h = curcpu()->ci_index; // pktq_rps_hash(m)
3004 M_SETCTX(m, wgp);
3005 if (__predict_false(!pktq_enqueue(wg_pktq, m, h))) {
3006 WGLOG(LOG_ERR, "pktq full, dropping\n");
3007 m_freem(m);
3008 }
3009 kpreempt_enable();
3010 }
3011
3012 if (wgs_prev->wgs_state == WGS_STATE_ESTABLISHED) {
3013 /* Wait for wg_get_stable_session to drain. */
3014 pserialize_perform(wgp->wgp_psz);
3015
3016 /* Transition ESTABLISHED->DESTROYING. */
3017 wgs_prev->wgs_state = WGS_STATE_DESTROYING;
3018
3019 /* We can't destroy the old session immediately */
3020 wg_schedule_session_dtor_timer(wgp);
3021 } else {
3022 KASSERTMSG(wgs_prev->wgs_state == WGS_STATE_UNKNOWN,
3023 "state=%d", wgs_prev->wgs_state);
3024 wg_clear_states(wgs_prev);
3025 wgs_prev->wgs_state = WGS_STATE_UNKNOWN;
3026 }
3027 }
3028
3029 static void
3030 wg_task_endpoint_changed(struct wg_softc *wg, struct wg_peer *wgp)
3031 {
3032
3033 WG_TRACE("WGP_TASK_ENDPOINT_CHANGED");
3034
3035 KASSERT(mutex_owned(wgp->wgp_lock));
3036
3037 if (atomic_load_relaxed(&wgp->wgp_endpoint_changing)) {
3038 pserialize_perform(wgp->wgp_psz);
3039 mutex_exit(wgp->wgp_lock);
3040 psref_target_destroy(&wgp->wgp_endpoint0->wgsa_psref,
3041 wg_psref_class);
3042 psref_target_init(&wgp->wgp_endpoint0->wgsa_psref,
3043 wg_psref_class);
3044 mutex_enter(wgp->wgp_lock);
3045 atomic_store_release(&wgp->wgp_endpoint_changing, 0);
3046 }
3047 }
3048
3049 static void
3050 wg_task_send_keepalive_message(struct wg_softc *wg, struct wg_peer *wgp)
3051 {
3052 struct wg_session *wgs;
3053
3054 WG_TRACE("WGP_TASK_SEND_KEEPALIVE_MESSAGE");
3055
3056 KASSERT(mutex_owned(wgp->wgp_lock));
3057
3058 wgs = wgp->wgp_session_stable;
3059 if (wgs->wgs_state != WGS_STATE_ESTABLISHED)
3060 return;
3061
3062 wg_send_keepalive_msg(wgp, wgs);
3063 }
3064
3065 static void
3066 wg_task_destroy_prev_session(struct wg_softc *wg, struct wg_peer *wgp)
3067 {
3068 struct wg_session *wgs;
3069
3070 WG_TRACE("WGP_TASK_DESTROY_PREV_SESSION");
3071
3072 KASSERT(mutex_owned(wgp->wgp_lock));
3073
3074 wgs = wgp->wgp_session_unstable;
3075 if (wgs->wgs_state == WGS_STATE_DESTROYING) {
3076 wg_put_session_index(wg, wgs);
3077 }
3078 }
3079
3080 static void
3081 wg_peer_work(struct work *wk, void *cookie)
3082 {
3083 struct wg_peer *wgp = container_of(wk, struct wg_peer, wgp_work);
3084 struct wg_softc *wg = wgp->wgp_sc;
3085 int tasks;
3086
3087 mutex_enter(wgp->wgp_intr_lock);
3088 while ((tasks = wgp->wgp_tasks) != 0) {
3089 wgp->wgp_tasks = 0;
3090 mutex_exit(wgp->wgp_intr_lock);
3091
3092 mutex_enter(wgp->wgp_lock);
3093 if (ISSET(tasks, WGP_TASK_SEND_INIT_MESSAGE))
3094 wg_task_send_init_message(wg, wgp);
3095 if (ISSET(tasks, WGP_TASK_RETRY_HANDSHAKE))
3096 wg_task_retry_handshake(wg, wgp);
3097 if (ISSET(tasks, WGP_TASK_ESTABLISH_SESSION))
3098 wg_task_establish_session(wg, wgp);
3099 if (ISSET(tasks, WGP_TASK_ENDPOINT_CHANGED))
3100 wg_task_endpoint_changed(wg, wgp);
3101 if (ISSET(tasks, WGP_TASK_SEND_KEEPALIVE_MESSAGE))
3102 wg_task_send_keepalive_message(wg, wgp);
3103 if (ISSET(tasks, WGP_TASK_DESTROY_PREV_SESSION))
3104 wg_task_destroy_prev_session(wg, wgp);
3105 mutex_exit(wgp->wgp_lock);
3106
3107 mutex_enter(wgp->wgp_intr_lock);
3108 }
3109 mutex_exit(wgp->wgp_intr_lock);
3110 }
3111
3112 static void
3113 wg_job(struct threadpool_job *job)
3114 {
3115 struct wg_softc *wg = container_of(job, struct wg_softc, wg_job);
3116 int bound, upcalls;
3117
3118 mutex_enter(wg->wg_intr_lock);
3119 while ((upcalls = wg->wg_upcalls) != 0) {
3120 wg->wg_upcalls = 0;
3121 mutex_exit(wg->wg_intr_lock);
3122 bound = curlwp_bind();
3123 if (ISSET(upcalls, WG_UPCALL_INET))
3124 wg_receive_packets(wg, AF_INET);
3125 if (ISSET(upcalls, WG_UPCALL_INET6))
3126 wg_receive_packets(wg, AF_INET6);
3127 curlwp_bindx(bound);
3128 mutex_enter(wg->wg_intr_lock);
3129 }
3130 threadpool_job_done(job);
3131 mutex_exit(wg->wg_intr_lock);
3132 }
3133
3134 static int
3135 wg_bind_port(struct wg_softc *wg, const uint16_t port)
3136 {
3137 int error;
3138 uint16_t old_port = wg->wg_listen_port;
3139
3140 if (port != 0 && old_port == port)
3141 return 0;
3142
3143 struct sockaddr_in _sin, *sin = &_sin;
3144 sin->sin_len = sizeof(*sin);
3145 sin->sin_family = AF_INET;
3146 sin->sin_addr.s_addr = INADDR_ANY;
3147 sin->sin_port = htons(port);
3148
3149 error = sobind(wg->wg_so4, sintosa(sin), curlwp);
3150 if (error != 0)
3151 return error;
3152
3153 #ifdef INET6
3154 struct sockaddr_in6 _sin6, *sin6 = &_sin6;
3155 sin6->sin6_len = sizeof(*sin6);
3156 sin6->sin6_family = AF_INET6;
3157 sin6->sin6_addr = in6addr_any;
3158 sin6->sin6_port = htons(port);
3159
3160 error = sobind(wg->wg_so6, sin6tosa(sin6), curlwp);
3161 if (error != 0)
3162 return error;
3163 #endif
3164
3165 wg->wg_listen_port = port;
3166
3167 return 0;
3168 }
3169
3170 static void
3171 wg_so_upcall(struct socket *so, void *cookie, int events, int waitflag)
3172 {
3173 struct wg_softc *wg = cookie;
3174 int reason;
3175
3176 reason = (so->so_proto->pr_domain->dom_family == AF_INET) ?
3177 WG_UPCALL_INET :
3178 WG_UPCALL_INET6;
3179
3180 mutex_enter(wg->wg_intr_lock);
3181 wg->wg_upcalls |= reason;
3182 threadpool_schedule_job(wg->wg_threadpool, &wg->wg_job);
3183 mutex_exit(wg->wg_intr_lock);
3184 }
3185
3186 static int
3187 wg_overudp_cb(struct mbuf **mp, int offset, struct socket *so,
3188 struct sockaddr *src, void *arg)
3189 {
3190 struct wg_softc *wg = arg;
3191 struct wg_msg wgm;
3192 struct mbuf *m = *mp;
3193
3194 WG_TRACE("enter");
3195
3196 /* Verify the mbuf chain is long enough to have a wg msg header. */
3197 KASSERT(offset <= m_length(m));
3198 if (__predict_false(m_length(m) - offset < sizeof(struct wg_msg))) {
3199 /* drop on the floor */
3200 m_freem(m);
3201 return -1;
3202 }
3203
3204 /*
3205 * Copy the message header (32-bit message type) out -- we'll
3206 * worry about contiguity and alignment later.
3207 */
3208 m_copydata(m, offset, sizeof(struct wg_msg), &wgm);
3209 WG_DLOG("type=%d\n", le32toh(wgm.wgm_type));
3210
3211 /*
3212 * Handle DATA packets promptly as they arrive. Other packets
3213 * may require expensive public-key crypto and are not as
3214 * sensitive to latency, so defer them to the worker thread.
3215 */
3216 switch (le32toh(wgm.wgm_type)) {
3217 case WG_MSG_TYPE_DATA:
3218 /* handle immediately */
3219 m_adj(m, offset);
3220 if (__predict_false(m->m_len < sizeof(struct wg_msg_data))) {
3221 m = m_pullup(m, sizeof(struct wg_msg_data));
3222 if (m == NULL)
3223 return -1;
3224 }
3225 wg_handle_msg_data(wg, m, src);
3226 *mp = NULL;
3227 return 1;
3228 case WG_MSG_TYPE_INIT:
3229 case WG_MSG_TYPE_RESP:
3230 case WG_MSG_TYPE_COOKIE:
3231 /* pass through to so_receive in wg_receive_packets */
3232 return 0;
3233 default:
3234 /* drop on the floor */
3235 m_freem(m);
3236 return -1;
3237 }
3238 }
3239
3240 static int
3241 wg_socreate(struct wg_softc *wg, int af, struct socket **sop)
3242 {
3243 int error;
3244 struct socket *so;
3245
3246 error = socreate(af, &so, SOCK_DGRAM, 0, curlwp, NULL);
3247 if (error != 0)
3248 return error;
3249
3250 solock(so);
3251 so->so_upcallarg = wg;
3252 so->so_upcall = wg_so_upcall;
3253 so->so_rcv.sb_flags |= SB_UPCALL;
3254 if (af == AF_INET)
3255 in_pcb_register_overudp_cb(sotoinpcb(so), wg_overudp_cb, wg);
3256 #if INET6
3257 else
3258 in6_pcb_register_overudp_cb(sotoin6pcb(so), wg_overudp_cb, wg);
3259 #endif
3260 sounlock(so);
3261
3262 *sop = so;
3263
3264 return 0;
3265 }
3266
3267 static bool
3268 wg_session_hit_limits(struct wg_session *wgs)
3269 {
3270
3271 /*
3272 * [W] 6.2: Transport Message Limits
3273 * "After REJECT-AFTER-MESSAGES transport data messages or after the
3274 * current secure session is REJECT-AFTER-TIME seconds old, whichever
3275 * comes first, WireGuard will refuse to send any more transport data
3276 * messages using the current secure session, ..."
3277 */
3278 KASSERT(wgs->wgs_time_established != 0);
3279 if ((time_uptime - wgs->wgs_time_established) > wg_reject_after_time) {
3280 WG_DLOG("The session hits REJECT_AFTER_TIME\n");
3281 return true;
3282 } else if (wg_session_get_send_counter(wgs) >
3283 wg_reject_after_messages) {
3284 WG_DLOG("The session hits REJECT_AFTER_MESSAGES\n");
3285 return true;
3286 }
3287
3288 return false;
3289 }
3290
3291 static void
3292 wgintr(void *cookie)
3293 {
3294 struct wg_peer *wgp;
3295 struct wg_session *wgs;
3296 struct mbuf *m;
3297 struct psref psref;
3298
3299 while ((m = pktq_dequeue(wg_pktq)) != NULL) {
3300 wgp = M_GETCTX(m, struct wg_peer *);
3301 if ((wgs = wg_get_stable_session(wgp, &psref)) == NULL) {
3302 WG_TRACE("no stable session");
3303 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE);
3304 goto next0;
3305 }
3306 if (__predict_false(wg_session_hit_limits(wgs))) {
3307 WG_TRACE("stable session hit limits");
3308 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE);
3309 goto next1;
3310 }
3311 wg_send_data_msg(wgp, wgs, m);
3312 m = NULL; /* consumed */
3313 next1: wg_put_session(wgs, &psref);
3314 next0: if (m)
3315 m_freem(m);
3316 /* XXX Yield to avoid userland starvation? */
3317 }
3318 }
3319
3320 static void
3321 wg_rekey_timer(void *arg)
3322 {
3323 struct wg_peer *wgp = arg;
3324
3325 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE);
3326 }
3327
3328 static void
3329 wg_purge_pending_packets(struct wg_peer *wgp)
3330 {
3331 struct mbuf *m;
3332
3333 if ((m = atomic_swap_ptr(&wgp->wgp_pending, NULL)) != NULL)
3334 m_freem(m);
3335 pktq_barrier(wg_pktq);
3336 }
3337
3338 static void
3339 wg_handshake_timeout_timer(void *arg)
3340 {
3341 struct wg_peer *wgp = arg;
3342
3343 WG_TRACE("enter");
3344
3345 wg_schedule_peer_task(wgp, WGP_TASK_RETRY_HANDSHAKE);
3346 }
3347
3348 static struct wg_peer *
3349 wg_alloc_peer(struct wg_softc *wg)
3350 {
3351 struct wg_peer *wgp;
3352
3353 wgp = kmem_zalloc(sizeof(*wgp), KM_SLEEP);
3354
3355 wgp->wgp_sc = wg;
3356 callout_init(&wgp->wgp_rekey_timer, CALLOUT_MPSAFE);
3357 callout_setfunc(&wgp->wgp_rekey_timer, wg_rekey_timer, wgp);
3358 callout_init(&wgp->wgp_handshake_timeout_timer, CALLOUT_MPSAFE);
3359 callout_setfunc(&wgp->wgp_handshake_timeout_timer,
3360 wg_handshake_timeout_timer, wgp);
3361 callout_init(&wgp->wgp_session_dtor_timer, CALLOUT_MPSAFE);
3362 callout_setfunc(&wgp->wgp_session_dtor_timer,
3363 wg_session_dtor_timer, wgp);
3364 PSLIST_ENTRY_INIT(wgp, wgp_peerlist_entry);
3365 wgp->wgp_endpoint_changing = false;
3366 wgp->wgp_endpoint_available = false;
3367 wgp->wgp_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
3368 wgp->wgp_intr_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
3369 wgp->wgp_psz = pserialize_create();
3370 psref_target_init(&wgp->wgp_psref, wg_psref_class);
3371
3372 wgp->wgp_endpoint = kmem_zalloc(sizeof(*wgp->wgp_endpoint), KM_SLEEP);
3373 wgp->wgp_endpoint0 = kmem_zalloc(sizeof(*wgp->wgp_endpoint0), KM_SLEEP);
3374 psref_target_init(&wgp->wgp_endpoint->wgsa_psref, wg_psref_class);
3375 psref_target_init(&wgp->wgp_endpoint0->wgsa_psref, wg_psref_class);
3376
3377 struct wg_session *wgs;
3378 wgp->wgp_session_stable =
3379 kmem_zalloc(sizeof(*wgp->wgp_session_stable), KM_SLEEP);
3380 wgp->wgp_session_unstable =
3381 kmem_zalloc(sizeof(*wgp->wgp_session_unstable), KM_SLEEP);
3382 wgs = wgp->wgp_session_stable;
3383 wgs->wgs_peer = wgp;
3384 wgs->wgs_state = WGS_STATE_UNKNOWN;
3385 psref_target_init(&wgs->wgs_psref, wg_psref_class);
3386 #ifndef __HAVE_ATOMIC64_LOADSTORE
3387 mutex_init(&wgs->wgs_send_counter_lock, MUTEX_DEFAULT, IPL_SOFTNET);
3388 #endif
3389 wgs->wgs_recvwin = kmem_zalloc(sizeof(*wgs->wgs_recvwin), KM_SLEEP);
3390 mutex_init(&wgs->wgs_recvwin->lock, MUTEX_DEFAULT, IPL_SOFTNET);
3391
3392 wgs = wgp->wgp_session_unstable;
3393 wgs->wgs_peer = wgp;
3394 wgs->wgs_state = WGS_STATE_UNKNOWN;
3395 psref_target_init(&wgs->wgs_psref, wg_psref_class);
3396 #ifndef __HAVE_ATOMIC64_LOADSTORE
3397 mutex_init(&wgs->wgs_send_counter_lock, MUTEX_DEFAULT, IPL_SOFTNET);
3398 #endif
3399 wgs->wgs_recvwin = kmem_zalloc(sizeof(*wgs->wgs_recvwin), KM_SLEEP);
3400 mutex_init(&wgs->wgs_recvwin->lock, MUTEX_DEFAULT, IPL_SOFTNET);
3401
3402 return wgp;
3403 }
3404
3405 static void
3406 wg_destroy_peer(struct wg_peer *wgp)
3407 {
3408 struct wg_session *wgs;
3409 struct wg_softc *wg = wgp->wgp_sc;
3410
3411 /* Prevent new packets from this peer on any source address. */
3412 rw_enter(wg->wg_rwlock, RW_WRITER);
3413 for (int i = 0; i < wgp->wgp_n_allowedips; i++) {
3414 struct wg_allowedip *wga = &wgp->wgp_allowedips[i];
3415 struct radix_node_head *rnh = wg_rnh(wg, wga->wga_family);
3416 struct radix_node *rn;
3417
3418 KASSERT(rnh != NULL);
3419 rn = rnh->rnh_deladdr(&wga->wga_sa_addr,
3420 &wga->wga_sa_mask, rnh);
3421 if (rn == NULL) {
3422 char addrstr[128];
3423 sockaddr_format(&wga->wga_sa_addr, addrstr,
3424 sizeof(addrstr));
3425 WGLOG(LOG_WARNING, "Couldn't delete %s", addrstr);
3426 }
3427 }
3428 rw_exit(wg->wg_rwlock);
3429
3430 /* Purge pending packets. */
3431 wg_purge_pending_packets(wgp);
3432
3433 /* Halt all packet processing and timeouts. */
3434 callout_halt(&wgp->wgp_rekey_timer, NULL);
3435 callout_halt(&wgp->wgp_handshake_timeout_timer, NULL);
3436 callout_halt(&wgp->wgp_session_dtor_timer, NULL);
3437
3438 /* Wait for any queued work to complete. */
3439 workqueue_wait(wg_wq, &wgp->wgp_work);
3440
3441 wgs = wgp->wgp_session_unstable;
3442 if (wgs->wgs_state != WGS_STATE_UNKNOWN) {
3443 mutex_enter(wgp->wgp_lock);
3444 wg_destroy_session(wg, wgs);
3445 mutex_exit(wgp->wgp_lock);
3446 }
3447 mutex_destroy(&wgs->wgs_recvwin->lock);
3448 kmem_free(wgs->wgs_recvwin, sizeof(*wgs->wgs_recvwin));
3449 #ifndef __HAVE_ATOMIC64_LOADSTORE
3450 mutex_destroy(&wgs->wgs_send_counter_lock);
3451 #endif
3452 kmem_free(wgs, sizeof(*wgs));
3453
3454 wgs = wgp->wgp_session_stable;
3455 if (wgs->wgs_state != WGS_STATE_UNKNOWN) {
3456 mutex_enter(wgp->wgp_lock);
3457 wg_destroy_session(wg, wgs);
3458 mutex_exit(wgp->wgp_lock);
3459 }
3460 mutex_destroy(&wgs->wgs_recvwin->lock);
3461 kmem_free(wgs->wgs_recvwin, sizeof(*wgs->wgs_recvwin));
3462 #ifndef __HAVE_ATOMIC64_LOADSTORE
3463 mutex_destroy(&wgs->wgs_send_counter_lock);
3464 #endif
3465 kmem_free(wgs, sizeof(*wgs));
3466
3467 psref_target_destroy(&wgp->wgp_endpoint->wgsa_psref, wg_psref_class);
3468 psref_target_destroy(&wgp->wgp_endpoint0->wgsa_psref, wg_psref_class);
3469 kmem_free(wgp->wgp_endpoint, sizeof(*wgp->wgp_endpoint));
3470 kmem_free(wgp->wgp_endpoint0, sizeof(*wgp->wgp_endpoint0));
3471
3472 pserialize_destroy(wgp->wgp_psz);
3473 mutex_obj_free(wgp->wgp_intr_lock);
3474 mutex_obj_free(wgp->wgp_lock);
3475
3476 kmem_free(wgp, sizeof(*wgp));
3477 }
3478
3479 static void
3480 wg_destroy_all_peers(struct wg_softc *wg)
3481 {
3482 struct wg_peer *wgp, *wgp0 __diagused;
3483 void *garbage_byname, *garbage_bypubkey;
3484
3485 restart:
3486 garbage_byname = garbage_bypubkey = NULL;
3487 mutex_enter(wg->wg_lock);
3488 WG_PEER_WRITER_FOREACH(wgp, wg) {
3489 if (wgp->wgp_name[0]) {
3490 wgp0 = thmap_del(wg->wg_peers_byname, wgp->wgp_name,
3491 strlen(wgp->wgp_name));
3492 KASSERT(wgp0 == wgp);
3493 garbage_byname = thmap_stage_gc(wg->wg_peers_byname);
3494 }
3495 wgp0 = thmap_del(wg->wg_peers_bypubkey, wgp->wgp_pubkey,
3496 sizeof(wgp->wgp_pubkey));
3497 KASSERT(wgp0 == wgp);
3498 garbage_bypubkey = thmap_stage_gc(wg->wg_peers_bypubkey);
3499 WG_PEER_WRITER_REMOVE(wgp);
3500 wg->wg_npeers--;
3501 mutex_enter(wgp->wgp_lock);
3502 pserialize_perform(wgp->wgp_psz);
3503 mutex_exit(wgp->wgp_lock);
3504 PSLIST_ENTRY_DESTROY(wgp, wgp_peerlist_entry);
3505 break;
3506 }
3507 mutex_exit(wg->wg_lock);
3508
3509 if (wgp == NULL)
3510 return;
3511
3512 psref_target_destroy(&wgp->wgp_psref, wg_psref_class);
3513
3514 wg_destroy_peer(wgp);
3515 thmap_gc(wg->wg_peers_byname, garbage_byname);
3516 thmap_gc(wg->wg_peers_bypubkey, garbage_bypubkey);
3517
3518 goto restart;
3519 }
3520
3521 static int
3522 wg_destroy_peer_name(struct wg_softc *wg, const char *name)
3523 {
3524 struct wg_peer *wgp, *wgp0 __diagused;
3525 void *garbage_byname, *garbage_bypubkey;
3526
3527 mutex_enter(wg->wg_lock);
3528 wgp = thmap_del(wg->wg_peers_byname, name, strlen(name));
3529 if (wgp != NULL) {
3530 wgp0 = thmap_del(wg->wg_peers_bypubkey, wgp->wgp_pubkey,
3531 sizeof(wgp->wgp_pubkey));
3532 KASSERT(wgp0 == wgp);
3533 garbage_byname = thmap_stage_gc(wg->wg_peers_byname);
3534 garbage_bypubkey = thmap_stage_gc(wg->wg_peers_bypubkey);
3535 WG_PEER_WRITER_REMOVE(wgp);
3536 wg->wg_npeers--;
3537 if (wg->wg_npeers == 0)
3538 if_link_state_change(&wg->wg_if, LINK_STATE_DOWN);
3539 mutex_enter(wgp->wgp_lock);
3540 pserialize_perform(wgp->wgp_psz);
3541 mutex_exit(wgp->wgp_lock);
3542 PSLIST_ENTRY_DESTROY(wgp, wgp_peerlist_entry);
3543 }
3544 mutex_exit(wg->wg_lock);
3545
3546 if (wgp == NULL)
3547 return ENOENT;
3548
3549 psref_target_destroy(&wgp->wgp_psref, wg_psref_class);
3550
3551 wg_destroy_peer(wgp);
3552 thmap_gc(wg->wg_peers_byname, garbage_byname);
3553 thmap_gc(wg->wg_peers_bypubkey, garbage_bypubkey);
3554
3555 return 0;
3556 }
3557
3558 static int
3559 wg_if_attach(struct wg_softc *wg)
3560 {
3561 int error;
3562
3563 wg->wg_if.if_addrlen = 0;
3564 wg->wg_if.if_mtu = WG_MTU;
3565 wg->wg_if.if_flags = IFF_MULTICAST;
3566 wg->wg_if.if_extflags = IFEF_MPSAFE;
3567 wg->wg_if.if_ioctl = wg_ioctl;
3568 wg->wg_if.if_output = wg_output;
3569 wg->wg_if.if_init = wg_init;
3570 #ifdef ALTQ
3571 wg->wg_if.if_start = wg_start;
3572 #endif
3573 wg->wg_if.if_stop = wg_stop;
3574 wg->wg_if.if_type = IFT_OTHER;
3575 wg->wg_if.if_dlt = DLT_NULL;
3576 wg->wg_if.if_softc = wg;
3577 #ifdef ALTQ
3578 IFQ_SET_READY(&wg->wg_if.if_snd);
3579 #endif
3580
3581 error = if_initialize(&wg->wg_if);
3582 if (error != 0)
3583 return error;
3584
3585 wg->wg_if.if_link_state = LINK_STATE_DOWN;
3586 if_alloc_sadl(&wg->wg_if);
3587 if_register(&wg->wg_if);
3588
3589 bpf_attach(&wg->wg_if, DLT_NULL, sizeof(uint32_t));
3590
3591 return 0;
3592 }
3593
3594 static void
3595 wg_if_detach(struct wg_softc *wg)
3596 {
3597 struct ifnet *ifp = &wg->wg_if;
3598
3599 bpf_detach(ifp);
3600 if_detach(ifp);
3601 }
3602
3603 static int
3604 wg_clone_create(struct if_clone *ifc, int unit)
3605 {
3606 struct wg_softc *wg;
3607 int error;
3608
3609 wg_guarantee_initialized();
3610
3611 error = wg_count_inc();
3612 if (error)
3613 return error;
3614
3615 wg = kmem_zalloc(sizeof(*wg), KM_SLEEP);
3616
3617 if_initname(&wg->wg_if, ifc->ifc_name, unit);
3618
3619 PSLIST_INIT(&wg->wg_peers);
3620 wg->wg_peers_bypubkey = thmap_create(0, NULL, THMAP_NOCOPY);
3621 wg->wg_peers_byname = thmap_create(0, NULL, THMAP_NOCOPY);
3622 wg->wg_sessions_byindex = thmap_create(0, NULL, THMAP_NOCOPY);
3623 wg->wg_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
3624 wg->wg_intr_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
3625 wg->wg_rwlock = rw_obj_alloc();
3626 threadpool_job_init(&wg->wg_job, wg_job, wg->wg_intr_lock,
3627 "%s", if_name(&wg->wg_if));
3628 wg->wg_ops = &wg_ops_rumpkernel;
3629
3630 error = threadpool_get(&wg->wg_threadpool, PRI_NONE);
3631 if (error)
3632 goto fail0;
3633
3634 #ifdef INET
3635 error = wg_socreate(wg, AF_INET, &wg->wg_so4);
3636 if (error)
3637 goto fail1;
3638 rn_inithead((void **)&wg->wg_rtable_ipv4,
3639 offsetof(struct sockaddr_in, sin_addr) * NBBY);
3640 #endif
3641 #ifdef INET6
3642 error = wg_socreate(wg, AF_INET6, &wg->wg_so6);
3643 if (error)
3644 goto fail2;
3645 rn_inithead((void **)&wg->wg_rtable_ipv6,
3646 offsetof(struct sockaddr_in6, sin6_addr) * NBBY);
3647 #endif
3648
3649 error = wg_if_attach(wg);
3650 if (error)
3651 goto fail3;
3652
3653 return 0;
3654
3655 fail4: __unused
3656 wg_if_detach(wg);
3657 fail3: wg_destroy_all_peers(wg);
3658 #ifdef INET6
3659 solock(wg->wg_so6);
3660 wg->wg_so6->so_rcv.sb_flags &= ~SB_UPCALL;
3661 sounlock(wg->wg_so6);
3662 #endif
3663 #ifdef INET
3664 solock(wg->wg_so4);
3665 wg->wg_so4->so_rcv.sb_flags &= ~SB_UPCALL;
3666 sounlock(wg->wg_so4);
3667 #endif
3668 mutex_enter(wg->wg_intr_lock);
3669 threadpool_cancel_job(wg->wg_threadpool, &wg->wg_job);
3670 mutex_exit(wg->wg_intr_lock);
3671 #ifdef INET6
3672 if (wg->wg_rtable_ipv6 != NULL)
3673 free(wg->wg_rtable_ipv6, M_RTABLE);
3674 soclose(wg->wg_so6);
3675 fail2:
3676 #endif
3677 #ifdef INET
3678 if (wg->wg_rtable_ipv4 != NULL)
3679 free(wg->wg_rtable_ipv4, M_RTABLE);
3680 soclose(wg->wg_so4);
3681 fail1:
3682 #endif
3683 threadpool_put(wg->wg_threadpool, PRI_NONE);
3684 fail0: threadpool_job_destroy(&wg->wg_job);
3685 rw_obj_free(wg->wg_rwlock);
3686 mutex_obj_free(wg->wg_intr_lock);
3687 mutex_obj_free(wg->wg_lock);
3688 thmap_destroy(wg->wg_sessions_byindex);
3689 thmap_destroy(wg->wg_peers_byname);
3690 thmap_destroy(wg->wg_peers_bypubkey);
3691 PSLIST_DESTROY(&wg->wg_peers);
3692 kmem_free(wg, sizeof(*wg));
3693 wg_count_dec();
3694 return error;
3695 }
3696
3697 static int
3698 wg_clone_destroy(struct ifnet *ifp)
3699 {
3700 struct wg_softc *wg = container_of(ifp, struct wg_softc, wg_if);
3701
3702 #ifdef WG_RUMPKERNEL
3703 if (wg_user_mode(wg)) {
3704 rumpuser_wg_destroy(wg->wg_user);
3705 wg->wg_user = NULL;
3706 }
3707 #endif
3708
3709 wg_if_detach(wg);
3710 wg_destroy_all_peers(wg);
3711 #ifdef INET6
3712 solock(wg->wg_so6);
3713 wg->wg_so6->so_rcv.sb_flags &= ~SB_UPCALL;
3714 sounlock(wg->wg_so6);
3715 #endif
3716 #ifdef INET
3717 solock(wg->wg_so4);
3718 wg->wg_so4->so_rcv.sb_flags &= ~SB_UPCALL;
3719 sounlock(wg->wg_so4);
3720 #endif
3721 mutex_enter(wg->wg_intr_lock);
3722 threadpool_cancel_job(wg->wg_threadpool, &wg->wg_job);
3723 mutex_exit(wg->wg_intr_lock);
3724 #ifdef INET6
3725 if (wg->wg_rtable_ipv6 != NULL)
3726 free(wg->wg_rtable_ipv6, M_RTABLE);
3727 soclose(wg->wg_so6);
3728 #endif
3729 #ifdef INET
3730 if (wg->wg_rtable_ipv4 != NULL)
3731 free(wg->wg_rtable_ipv4, M_RTABLE);
3732 soclose(wg->wg_so4);
3733 #endif
3734 threadpool_put(wg->wg_threadpool, PRI_NONE);
3735 threadpool_job_destroy(&wg->wg_job);
3736 rw_obj_free(wg->wg_rwlock);
3737 mutex_obj_free(wg->wg_intr_lock);
3738 mutex_obj_free(wg->wg_lock);
3739 thmap_destroy(wg->wg_sessions_byindex);
3740 thmap_destroy(wg->wg_peers_byname);
3741 thmap_destroy(wg->wg_peers_bypubkey);
3742 PSLIST_DESTROY(&wg->wg_peers);
3743 kmem_free(wg, sizeof(*wg));
3744 wg_count_dec();
3745
3746 return 0;
3747 }
3748
3749 static struct wg_peer *
3750 wg_pick_peer_by_sa(struct wg_softc *wg, const struct sockaddr *sa,
3751 struct psref *psref)
3752 {
3753 struct radix_node_head *rnh;
3754 struct radix_node *rn;
3755 struct wg_peer *wgp = NULL;
3756 struct wg_allowedip *wga;
3757
3758 #ifdef WG_DEBUG_LOG
3759 char addrstr[128];
3760 sockaddr_format(sa, addrstr, sizeof(addrstr));
3761 WG_DLOG("sa=%s\n", addrstr);
3762 #endif
3763
3764 rw_enter(wg->wg_rwlock, RW_READER);
3765
3766 rnh = wg_rnh(wg, sa->sa_family);
3767 if (rnh == NULL)
3768 goto out;
3769
3770 rn = rnh->rnh_matchaddr(sa, rnh);
3771 if (rn == NULL || (rn->rn_flags & RNF_ROOT) != 0)
3772 goto out;
3773
3774 WG_TRACE("success");
3775
3776 wga = container_of(rn, struct wg_allowedip, wga_nodes[0]);
3777 wgp = wga->wga_peer;
3778 wg_get_peer(wgp, psref);
3779
3780 out:
3781 rw_exit(wg->wg_rwlock);
3782 return wgp;
3783 }
3784
3785 static void
3786 wg_fill_msg_data(struct wg_softc *wg, struct wg_peer *wgp,
3787 struct wg_session *wgs, struct wg_msg_data *wgmd)
3788 {
3789
3790 memset(wgmd, 0, sizeof(*wgmd));
3791 wgmd->wgmd_type = htole32(WG_MSG_TYPE_DATA);
3792 wgmd->wgmd_receiver = wgs->wgs_remote_index;
3793 /* [W] 5.4.6: msg.counter := Nm^send */
3794 /* [W] 5.4.6: Nm^send := Nm^send + 1 */
3795 wgmd->wgmd_counter = htole64(wg_session_inc_send_counter(wgs));
3796 WG_DLOG("counter=%"PRIu64"\n", le64toh(wgmd->wgmd_counter));
3797 }
3798
3799 static int
3800 wg_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst,
3801 const struct rtentry *rt)
3802 {
3803 struct wg_softc *wg = ifp->if_softc;
3804 struct wg_peer *wgp = NULL;
3805 struct wg_session *wgs = NULL;
3806 struct psref wgp_psref, wgs_psref;
3807 int bound;
3808 int error;
3809
3810 bound = curlwp_bind();
3811
3812 /* TODO make the nest limit configurable via sysctl */
3813 error = if_tunnel_check_nesting(ifp, m, 1);
3814 if (error) {
3815 WGLOG(LOG_ERR, "tunneling loop detected and packet dropped\n");
3816 goto out0;
3817 }
3818
3819 #ifdef ALTQ
3820 bool altq = atomic_load_relaxed(&ifp->if_snd.altq_flags)
3821 & ALTQF_ENABLED;
3822 if (altq)
3823 IFQ_CLASSIFY(&ifp->if_snd, m, dst->sa_family);
3824 #endif
3825
3826 bpf_mtap_af(ifp, dst->sa_family, m, BPF_D_OUT);
3827
3828 m->m_flags &= ~(M_BCAST|M_MCAST);
3829
3830 wgp = wg_pick_peer_by_sa(wg, dst, &wgp_psref);
3831 if (wgp == NULL) {
3832 WG_TRACE("peer not found");
3833 error = EHOSTUNREACH;
3834 goto out0;
3835 }
3836
3837 /* Clear checksum-offload flags. */
3838 m->m_pkthdr.csum_flags = 0;
3839 m->m_pkthdr.csum_data = 0;
3840
3841 /* Check whether there's an established session. */
3842 wgs = wg_get_stable_session(wgp, &wgs_psref);
3843 if (wgs == NULL) {
3844 /*
3845 * No established session. If we're the first to try
3846 * sending data, schedule a handshake and queue the
3847 * packet for when the handshake is done; otherwise
3848 * just drop the packet and let the ongoing handshake
3849 * attempt continue. We could queue more data packets
3850 * but it's not clear that's worthwhile.
3851 */
3852 if (atomic_cas_ptr(&wgp->wgp_pending, NULL, m) == NULL) {
3853 m = NULL; /* consume */
3854 WG_TRACE("queued first packet; init handshake");
3855 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE);
3856 } else {
3857 WG_TRACE("first packet already queued, dropping");
3858 }
3859 goto out1;
3860 }
3861
3862 /* There's an established session. Toss it in the queue. */
3863 #ifdef ALTQ
3864 if (altq) {
3865 mutex_enter(ifp->if_snd.ifq_lock);
3866 if (ALTQ_IS_ENABLED(&ifp->if_snd)) {
3867 M_SETCTX(m, wgp);
3868 ALTQ_ENQUEUE(&ifp->if_snd, m, error);
3869 m = NULL; /* consume */
3870 }
3871 mutex_exit(ifp->if_snd.ifq_lock);
3872 if (m == NULL) {
3873 wg_start(ifp);
3874 goto out2;
3875 }
3876 }
3877 #endif
3878 kpreempt_disable();
3879 const uint32_t h = curcpu()->ci_index; // pktq_rps_hash(m)
3880 M_SETCTX(m, wgp);
3881 if (__predict_false(!pktq_enqueue(wg_pktq, m, h))) {
3882 WGLOG(LOG_ERR, "pktq full, dropping\n");
3883 error = ENOBUFS;
3884 goto out3;
3885 }
3886 m = NULL; /* consumed */
3887 error = 0;
3888 out3: kpreempt_enable();
3889
3890 #ifdef ALTQ
3891 out2:
3892 #endif
3893 wg_put_session(wgs, &wgs_psref);
3894 out1: wg_put_peer(wgp, &wgp_psref);
3895 out0: if (m)
3896 m_freem(m);
3897 curlwp_bindx(bound);
3898 return error;
3899 }
3900
3901 static int
3902 wg_send_udp(struct wg_peer *wgp, struct mbuf *m)
3903 {
3904 struct psref psref;
3905 struct wg_sockaddr *wgsa;
3906 int error;
3907 struct socket *so;
3908
3909 wgsa = wg_get_endpoint_sa(wgp, &psref);
3910 so = wg_get_so_by_peer(wgp, wgsa);
3911 solock(so);
3912 if (wgsatosa(wgsa)->sa_family == AF_INET) {
3913 error = udp_send(so, m, wgsatosa(wgsa), NULL, curlwp);
3914 } else {
3915 #ifdef INET6
3916 error = udp6_output(sotoin6pcb(so), m, wgsatosin6(wgsa),
3917 NULL, curlwp);
3918 #else
3919 m_freem(m);
3920 error = EPFNOSUPPORT;
3921 #endif
3922 }
3923 sounlock(so);
3924 wg_put_sa(wgp, wgsa, &psref);
3925
3926 return error;
3927 }
3928
3929 /* Inspired by pppoe_get_mbuf */
3930 static struct mbuf *
3931 wg_get_mbuf(size_t leading_len, size_t len)
3932 {
3933 struct mbuf *m;
3934
3935 KASSERT(leading_len <= MCLBYTES);
3936 KASSERT(len <= MCLBYTES - leading_len);
3937
3938 m = m_gethdr(M_DONTWAIT, MT_DATA);
3939 if (m == NULL)
3940 return NULL;
3941 if (len + leading_len > MHLEN) {
3942 m_clget(m, M_DONTWAIT);
3943 if ((m->m_flags & M_EXT) == 0) {
3944 m_free(m);
3945 return NULL;
3946 }
3947 }
3948 m->m_data += leading_len;
3949 m->m_pkthdr.len = m->m_len = len;
3950
3951 return m;
3952 }
3953
3954 static int
3955 wg_send_data_msg(struct wg_peer *wgp, struct wg_session *wgs,
3956 struct mbuf *m)
3957 {
3958 struct wg_softc *wg = wgp->wgp_sc;
3959 int error;
3960 size_t inner_len, padded_len, encrypted_len;
3961 char *padded_buf = NULL;
3962 size_t mlen;
3963 struct wg_msg_data *wgmd;
3964 bool free_padded_buf = false;
3965 struct mbuf *n;
3966 size_t leading_len = max_hdr + sizeof(struct udphdr);
3967
3968 mlen = m_length(m);
3969 inner_len = mlen;
3970 padded_len = roundup(mlen, 16);
3971 encrypted_len = padded_len + WG_AUTHTAG_LEN;
3972 WG_DLOG("inner=%lu, padded=%lu, encrypted_len=%lu\n",
3973 inner_len, padded_len, encrypted_len);
3974 if (mlen != 0) {
3975 bool success;
3976 success = m_ensure_contig(&m, padded_len);
3977 if (success) {
3978 padded_buf = mtod(m, char *);
3979 } else {
3980 padded_buf = kmem_intr_alloc(padded_len, KM_NOSLEEP);
3981 if (padded_buf == NULL) {
3982 error = ENOBUFS;
3983 goto end;
3984 }
3985 free_padded_buf = true;
3986 m_copydata(m, 0, mlen, padded_buf);
3987 }
3988 memset(padded_buf + mlen, 0, padded_len - inner_len);
3989 }
3990
3991 n = wg_get_mbuf(leading_len, sizeof(*wgmd) + encrypted_len);
3992 if (n == NULL) {
3993 error = ENOBUFS;
3994 goto end;
3995 }
3996 KASSERT(n->m_len >= sizeof(*wgmd));
3997 wgmd = mtod(n, struct wg_msg_data *);
3998 wg_fill_msg_data(wg, wgp, wgs, wgmd);
3999 /* [W] 5.4.6: AEAD(Tm^send, Nm^send, P, e) */
4000 wg_algo_aead_enc((char *)wgmd + sizeof(*wgmd), encrypted_len,
4001 wgs->wgs_tkey_send, le64toh(wgmd->wgmd_counter),
4002 padded_buf, padded_len,
4003 NULL, 0);
4004
4005 error = wg->wg_ops->send_data_msg(wgp, n);
4006 if (error == 0) {
4007 struct ifnet *ifp = &wg->wg_if;
4008 if_statadd(ifp, if_obytes, mlen);
4009 if_statinc(ifp, if_opackets);
4010 if (wgs->wgs_is_initiator &&
4011 wgs->wgs_time_last_data_sent == 0) {
4012 /*
4013 * [W] 6.2 Transport Message Limits
4014 * "if a peer is the initiator of a current secure
4015 * session, WireGuard will send a handshake initiation
4016 * message to begin a new secure session if, after
4017 * transmitting a transport data message, the current
4018 * secure session is REKEY-AFTER-TIME seconds old,"
4019 */
4020 wg_schedule_rekey_timer(wgp);
4021 }
4022 wgs->wgs_time_last_data_sent = time_uptime;
4023 if (wg_session_get_send_counter(wgs) >=
4024 wg_rekey_after_messages) {
4025 /*
4026 * [W] 6.2 Transport Message Limits
4027 * "WireGuard will try to create a new session, by
4028 * sending a handshake initiation message (section
4029 * 5.4.2), after it has sent REKEY-AFTER-MESSAGES
4030 * transport data messages..."
4031 */
4032 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE);
4033 }
4034 }
4035 end:
4036 m_freem(m);
4037 if (free_padded_buf)
4038 kmem_intr_free(padded_buf, padded_len);
4039 return error;
4040 }
4041
4042 static void
4043 wg_input(struct ifnet *ifp, struct mbuf *m, const int af)
4044 {
4045 pktqueue_t *pktq;
4046 size_t pktlen;
4047
4048 KASSERT(af == AF_INET || af == AF_INET6);
4049
4050 WG_TRACE("");
4051
4052 m_set_rcvif(m, ifp);
4053 pktlen = m->m_pkthdr.len;
4054
4055 bpf_mtap_af(ifp, af, m, BPF_D_IN);
4056
4057 switch (af) {
4058 case AF_INET:
4059 pktq = ip_pktq;
4060 break;
4061 #ifdef INET6
4062 case AF_INET6:
4063 pktq = ip6_pktq;
4064 break;
4065 #endif
4066 default:
4067 panic("invalid af=%d", af);
4068 }
4069
4070 kpreempt_disable();
4071 const u_int h = curcpu()->ci_index;
4072 if (__predict_true(pktq_enqueue(pktq, m, h))) {
4073 if_statadd(ifp, if_ibytes, pktlen);
4074 if_statinc(ifp, if_ipackets);
4075 } else {
4076 m_freem(m);
4077 }
4078 kpreempt_enable();
4079 }
4080
4081 static void
4082 wg_calc_pubkey(uint8_t pubkey[WG_STATIC_KEY_LEN],
4083 const uint8_t privkey[WG_STATIC_KEY_LEN])
4084 {
4085
4086 crypto_scalarmult_base(pubkey, privkey);
4087 }
4088
4089 static int
4090 wg_rtable_add_route(struct wg_softc *wg, struct wg_allowedip *wga)
4091 {
4092 struct radix_node_head *rnh;
4093 struct radix_node *rn;
4094 int error = 0;
4095
4096 rw_enter(wg->wg_rwlock, RW_WRITER);
4097 rnh = wg_rnh(wg, wga->wga_family);
4098 KASSERT(rnh != NULL);
4099 rn = rnh->rnh_addaddr(&wga->wga_sa_addr, &wga->wga_sa_mask, rnh,
4100 wga->wga_nodes);
4101 rw_exit(wg->wg_rwlock);
4102
4103 if (rn == NULL)
4104 error = EEXIST;
4105
4106 return error;
4107 }
4108
4109 static int
4110 wg_handle_prop_peer(struct wg_softc *wg, prop_dictionary_t peer,
4111 struct wg_peer **wgpp)
4112 {
4113 int error = 0;
4114 const void *pubkey;
4115 size_t pubkey_len;
4116 const void *psk;
4117 size_t psk_len;
4118 const char *name = NULL;
4119
4120 if (prop_dictionary_get_string(peer, "name", &name)) {
4121 if (strlen(name) > WG_PEER_NAME_MAXLEN) {
4122 error = EINVAL;
4123 goto out;
4124 }
4125 }
4126
4127 if (!prop_dictionary_get_data(peer, "public_key",
4128 &pubkey, &pubkey_len)) {
4129 error = EINVAL;
4130 goto out;
4131 }
4132 #ifdef WG_DEBUG_DUMP
4133 {
4134 char *hex = gethexdump(pubkey, pubkey_len);
4135 log(LOG_DEBUG, "pubkey=%p, pubkey_len=%lu\n%s\n",
4136 pubkey, pubkey_len, hex);
4137 puthexdump(hex, pubkey, pubkey_len);
4138 }
4139 #endif
4140
4141 struct wg_peer *wgp = wg_alloc_peer(wg);
4142 memcpy(wgp->wgp_pubkey, pubkey, sizeof(wgp->wgp_pubkey));
4143 if (name != NULL)
4144 strncpy(wgp->wgp_name, name, sizeof(wgp->wgp_name));
4145
4146 if (prop_dictionary_get_data(peer, "preshared_key", &psk, &psk_len)) {
4147 if (psk_len != sizeof(wgp->wgp_psk)) {
4148 error = EINVAL;
4149 goto out;
4150 }
4151 memcpy(wgp->wgp_psk, psk, sizeof(wgp->wgp_psk));
4152 }
4153
4154 const void *addr;
4155 size_t addr_len;
4156 struct wg_sockaddr *wgsa = wgp->wgp_endpoint;
4157
4158 if (!prop_dictionary_get_data(peer, "endpoint", &addr, &addr_len))
4159 goto skip_endpoint;
4160 if (addr_len < sizeof(*wgsatosa(wgsa)) ||
4161 addr_len > sizeof(*wgsatoss(wgsa))) {
4162 error = EINVAL;
4163 goto out;
4164 }
4165 memcpy(wgsatoss(wgsa), addr, addr_len);
4166 switch (wgsa_family(wgsa)) {
4167 case AF_INET:
4168 #ifdef INET6
4169 case AF_INET6:
4170 #endif
4171 break;
4172 default:
4173 error = EPFNOSUPPORT;
4174 goto out;
4175 }
4176 if (addr_len != sockaddr_getsize_by_family(wgsa_family(wgsa))) {
4177 error = EINVAL;
4178 goto out;
4179 }
4180 {
4181 char addrstr[128];
4182 sockaddr_format(wgsatosa(wgsa), addrstr, sizeof(addrstr));
4183 WG_DLOG("addr=%s\n", addrstr);
4184 }
4185 wgp->wgp_endpoint_available = true;
4186
4187 prop_array_t allowedips;
4188 skip_endpoint:
4189 allowedips = prop_dictionary_get(peer, "allowedips");
4190 if (allowedips == NULL)
4191 goto skip;
4192
4193 prop_object_iterator_t _it = prop_array_iterator(allowedips);
4194 prop_dictionary_t prop_allowedip;
4195 int j = 0;
4196 while ((prop_allowedip = prop_object_iterator_next(_it)) != NULL) {
4197 struct wg_allowedip *wga = &wgp->wgp_allowedips[j];
4198
4199 if (!prop_dictionary_get_int(prop_allowedip, "family",
4200 &wga->wga_family))
4201 continue;
4202 if (!prop_dictionary_get_data(prop_allowedip, "ip",
4203 &addr, &addr_len))
4204 continue;
4205 if (!prop_dictionary_get_uint8(prop_allowedip, "cidr",
4206 &wga->wga_cidr))
4207 continue;
4208
4209 switch (wga->wga_family) {
4210 case AF_INET: {
4211 struct sockaddr_in sin;
4212 char addrstr[128];
4213 struct in_addr mask;
4214 struct sockaddr_in sin_mask;
4215
4216 if (addr_len != sizeof(struct in_addr))
4217 return EINVAL;
4218 memcpy(&wga->wga_addr4, addr, addr_len);
4219
4220 sockaddr_in_init(&sin, (const struct in_addr *)addr,
4221 0);
4222 sockaddr_copy(&wga->wga_sa_addr,
4223 sizeof(sin), sintosa(&sin));
4224
4225 sockaddr_format(sintosa(&sin),
4226 addrstr, sizeof(addrstr));
4227 WG_DLOG("addr=%s/%d\n", addrstr, wga->wga_cidr);
4228
4229 in_len2mask(&mask, wga->wga_cidr);
4230 sockaddr_in_init(&sin_mask, &mask, 0);
4231 sockaddr_copy(&wga->wga_sa_mask,
4232 sizeof(sin_mask), sintosa(&sin_mask));
4233
4234 break;
4235 }
4236 #ifdef INET6
4237 case AF_INET6: {
4238 struct sockaddr_in6 sin6;
4239 char addrstr[128];
4240 struct in6_addr mask;
4241 struct sockaddr_in6 sin6_mask;
4242
4243 if (addr_len != sizeof(struct in6_addr))
4244 return EINVAL;
4245 memcpy(&wga->wga_addr6, addr, addr_len);
4246
4247 sockaddr_in6_init(&sin6, (const struct in6_addr *)addr,
4248 0, 0, 0);
4249 sockaddr_copy(&wga->wga_sa_addr,
4250 sizeof(sin6), sin6tosa(&sin6));
4251
4252 sockaddr_format(sin6tosa(&sin6),
4253 addrstr, sizeof(addrstr));
4254 WG_DLOG("addr=%s/%d\n", addrstr, wga->wga_cidr);
4255
4256 in6_prefixlen2mask(&mask, wga->wga_cidr);
4257 sockaddr_in6_init(&sin6_mask, &mask, 0, 0, 0);
4258 sockaddr_copy(&wga->wga_sa_mask,
4259 sizeof(sin6_mask), sin6tosa(&sin6_mask));
4260
4261 break;
4262 }
4263 #endif
4264 default:
4265 error = EINVAL;
4266 goto out;
4267 }
4268 wga->wga_peer = wgp;
4269
4270 error = wg_rtable_add_route(wg, wga);
4271 if (error != 0)
4272 goto out;
4273
4274 j++;
4275 }
4276 wgp->wgp_n_allowedips = j;
4277 skip:
4278 *wgpp = wgp;
4279 out:
4280 return error;
4281 }
4282
4283 static int
4284 wg_alloc_prop_buf(char **_buf, struct ifdrv *ifd)
4285 {
4286 int error;
4287 char *buf;
4288
4289 WG_DLOG("buf=%p, len=%lu\n", ifd->ifd_data, ifd->ifd_len);
4290 buf = kmem_alloc(ifd->ifd_len + 1, KM_SLEEP);
4291 error = copyin(ifd->ifd_data, buf, ifd->ifd_len);
4292 if (error != 0)
4293 return error;
4294 buf[ifd->ifd_len] = '\0';
4295 #ifdef WG_DEBUG_DUMP
4296 log(LOG_DEBUG, "%.*s\n",
4297 (int)MIN(INT_MAX, ifd->ifd_len),
4298 (const char *)buf);
4299 #endif
4300 *_buf = buf;
4301 return 0;
4302 }
4303
4304 static int
4305 wg_ioctl_set_private_key(struct wg_softc *wg, struct ifdrv *ifd)
4306 {
4307 int error;
4308 prop_dictionary_t prop_dict;
4309 char *buf = NULL;
4310 const void *privkey;
4311 size_t privkey_len;
4312
4313 error = wg_alloc_prop_buf(&buf, ifd);
4314 if (error != 0)
4315 return error;
4316 error = EINVAL;
4317 prop_dict = prop_dictionary_internalize(buf);
4318 if (prop_dict == NULL)
4319 goto out;
4320 if (!prop_dictionary_get_data(prop_dict, "private_key",
4321 &privkey, &privkey_len))
4322 goto out;
4323 #ifdef WG_DEBUG_DUMP
4324 {
4325 char *hex = gethexdump(privkey, privkey_len);
4326 log(LOG_DEBUG, "privkey=%p, privkey_len=%lu\n%s\n",
4327 privkey, privkey_len, hex);
4328 puthexdump(hex, privkey, privkey_len);
4329 }
4330 #endif
4331 if (privkey_len != WG_STATIC_KEY_LEN)
4332 goto out;
4333 memcpy(wg->wg_privkey, privkey, WG_STATIC_KEY_LEN);
4334 wg_calc_pubkey(wg->wg_pubkey, wg->wg_privkey);
4335 error = 0;
4336
4337 out:
4338 kmem_free(buf, ifd->ifd_len + 1);
4339 return error;
4340 }
4341
4342 static int
4343 wg_ioctl_set_listen_port(struct wg_softc *wg, struct ifdrv *ifd)
4344 {
4345 int error;
4346 prop_dictionary_t prop_dict;
4347 char *buf = NULL;
4348 uint16_t port;
4349
4350 error = wg_alloc_prop_buf(&buf, ifd);
4351 if (error != 0)
4352 return error;
4353 error = EINVAL;
4354 prop_dict = prop_dictionary_internalize(buf);
4355 if (prop_dict == NULL)
4356 goto out;
4357 if (!prop_dictionary_get_uint16(prop_dict, "listen_port", &port))
4358 goto out;
4359
4360 error = wg->wg_ops->bind_port(wg, (uint16_t)port);
4361
4362 out:
4363 kmem_free(buf, ifd->ifd_len + 1);
4364 return error;
4365 }
4366
4367 static int
4368 wg_ioctl_add_peer(struct wg_softc *wg, struct ifdrv *ifd)
4369 {
4370 int error;
4371 prop_dictionary_t prop_dict;
4372 char *buf = NULL;
4373 struct wg_peer *wgp = NULL, *wgp0 __diagused;
4374
4375 error = wg_alloc_prop_buf(&buf, ifd);
4376 if (error != 0)
4377 return error;
4378 error = EINVAL;
4379 prop_dict = prop_dictionary_internalize(buf);
4380 if (prop_dict == NULL)
4381 goto out;
4382
4383 error = wg_handle_prop_peer(wg, prop_dict, &wgp);
4384 if (error != 0)
4385 goto out;
4386
4387 mutex_enter(wg->wg_lock);
4388 if (thmap_get(wg->wg_peers_bypubkey, wgp->wgp_pubkey,
4389 sizeof(wgp->wgp_pubkey)) != NULL ||
4390 (wgp->wgp_name[0] &&
4391 thmap_get(wg->wg_peers_byname, wgp->wgp_name,
4392 strlen(wgp->wgp_name)) != NULL)) {
4393 mutex_exit(wg->wg_lock);
4394 wg_destroy_peer(wgp);
4395 error = EEXIST;
4396 goto out;
4397 }
4398 wgp0 = thmap_put(wg->wg_peers_bypubkey, wgp->wgp_pubkey,
4399 sizeof(wgp->wgp_pubkey), wgp);
4400 KASSERT(wgp0 == wgp);
4401 if (wgp->wgp_name[0]) {
4402 wgp0 = thmap_put(wg->wg_peers_byname, wgp->wgp_name,
4403 strlen(wgp->wgp_name), wgp);
4404 KASSERT(wgp0 == wgp);
4405 }
4406 WG_PEER_WRITER_INSERT_HEAD(wgp, wg);
4407 wg->wg_npeers++;
4408 mutex_exit(wg->wg_lock);
4409
4410 if_link_state_change(&wg->wg_if, LINK_STATE_UP);
4411
4412 out:
4413 kmem_free(buf, ifd->ifd_len + 1);
4414 return error;
4415 }
4416
4417 static int
4418 wg_ioctl_delete_peer(struct wg_softc *wg, struct ifdrv *ifd)
4419 {
4420 int error;
4421 prop_dictionary_t prop_dict;
4422 char *buf = NULL;
4423 const char *name;
4424
4425 error = wg_alloc_prop_buf(&buf, ifd);
4426 if (error != 0)
4427 return error;
4428 error = EINVAL;
4429 prop_dict = prop_dictionary_internalize(buf);
4430 if (prop_dict == NULL)
4431 goto out;
4432
4433 if (!prop_dictionary_get_string(prop_dict, "name", &name))
4434 goto out;
4435 if (strlen(name) > WG_PEER_NAME_MAXLEN)
4436 goto out;
4437
4438 error = wg_destroy_peer_name(wg, name);
4439 out:
4440 kmem_free(buf, ifd->ifd_len + 1);
4441 return error;
4442 }
4443
4444 static int
4445 wg_ioctl_get(struct wg_softc *wg, struct ifdrv *ifd)
4446 {
4447 int error = ENOMEM;
4448 prop_dictionary_t prop_dict;
4449 prop_array_t peers = NULL;
4450 char *buf;
4451 struct wg_peer *wgp;
4452 int s, i;
4453
4454 prop_dict = prop_dictionary_create();
4455 if (prop_dict == NULL)
4456 goto error;
4457
4458 if (!prop_dictionary_set_data(prop_dict, "private_key", wg->wg_privkey,
4459 WG_STATIC_KEY_LEN))
4460 goto error;
4461
4462 if (wg->wg_listen_port != 0) {
4463 if (!prop_dictionary_set_uint16(prop_dict, "listen_port",
4464 wg->wg_listen_port))
4465 goto error;
4466 }
4467
4468 if (wg->wg_npeers == 0)
4469 goto skip_peers;
4470
4471 peers = prop_array_create();
4472 if (peers == NULL)
4473 goto error;
4474
4475 s = pserialize_read_enter();
4476 i = 0;
4477 WG_PEER_READER_FOREACH(wgp, wg) {
4478 struct wg_sockaddr *wgsa;
4479 struct psref wgp_psref, wgsa_psref;
4480 prop_dictionary_t prop_peer;
4481
4482 wg_get_peer(wgp, &wgp_psref);
4483 pserialize_read_exit(s);
4484
4485 prop_peer = prop_dictionary_create();
4486 if (prop_peer == NULL)
4487 goto next;
4488
4489 if (strlen(wgp->wgp_name) > 0) {
4490 if (!prop_dictionary_set_string(prop_peer, "name",
4491 wgp->wgp_name))
4492 goto next;
4493 }
4494
4495 if (!prop_dictionary_set_data(prop_peer, "public_key",
4496 wgp->wgp_pubkey, sizeof(wgp->wgp_pubkey)))
4497 goto next;
4498
4499 uint8_t psk_zero[WG_PRESHARED_KEY_LEN] = {0};
4500 if (!consttime_memequal(wgp->wgp_psk, psk_zero,
4501 sizeof(wgp->wgp_psk))) {
4502 if (!prop_dictionary_set_data(prop_peer,
4503 "preshared_key",
4504 wgp->wgp_psk, sizeof(wgp->wgp_psk)))
4505 goto next;
4506 }
4507
4508 wgsa = wg_get_endpoint_sa(wgp, &wgsa_psref);
4509 CTASSERT(AF_UNSPEC == 0);
4510 if (wgsa_family(wgsa) != 0 /*AF_UNSPEC*/ &&
4511 !prop_dictionary_set_data(prop_peer, "endpoint",
4512 wgsatoss(wgsa),
4513 sockaddr_getsize_by_family(wgsa_family(wgsa)))) {
4514 wg_put_sa(wgp, wgsa, &wgsa_psref);
4515 goto next;
4516 }
4517 wg_put_sa(wgp, wgsa, &wgsa_psref);
4518
4519 const struct timespec *t = &wgp->wgp_last_handshake_time;
4520
4521 if (!prop_dictionary_set_uint64(prop_peer,
4522 "last_handshake_time_sec", t->tv_sec))
4523 goto next;
4524 if (!prop_dictionary_set_uint32(prop_peer,
4525 "last_handshake_time_nsec", t->tv_nsec))
4526 goto next;
4527
4528 if (wgp->wgp_n_allowedips == 0)
4529 goto skip_allowedips;
4530
4531 prop_array_t allowedips = prop_array_create();
4532 if (allowedips == NULL)
4533 goto next;
4534 for (int j = 0; j < wgp->wgp_n_allowedips; j++) {
4535 struct wg_allowedip *wga = &wgp->wgp_allowedips[j];
4536 prop_dictionary_t prop_allowedip;
4537
4538 prop_allowedip = prop_dictionary_create();
4539 if (prop_allowedip == NULL)
4540 break;
4541
4542 if (!prop_dictionary_set_int(prop_allowedip, "family",
4543 wga->wga_family))
4544 goto _next;
4545 if (!prop_dictionary_set_uint8(prop_allowedip, "cidr",
4546 wga->wga_cidr))
4547 goto _next;
4548
4549 switch (wga->wga_family) {
4550 case AF_INET:
4551 if (!prop_dictionary_set_data(prop_allowedip,
4552 "ip", &wga->wga_addr4,
4553 sizeof(wga->wga_addr4)))
4554 goto _next;
4555 break;
4556 #ifdef INET6
4557 case AF_INET6:
4558 if (!prop_dictionary_set_data(prop_allowedip,
4559 "ip", &wga->wga_addr6,
4560 sizeof(wga->wga_addr6)))
4561 goto _next;
4562 break;
4563 #endif
4564 default:
4565 break;
4566 }
4567 prop_array_set(allowedips, j, prop_allowedip);
4568 _next:
4569 prop_object_release(prop_allowedip);
4570 }
4571 prop_dictionary_set(prop_peer, "allowedips", allowedips);
4572 prop_object_release(allowedips);
4573
4574 skip_allowedips:
4575
4576 prop_array_set(peers, i, prop_peer);
4577 next:
4578 if (prop_peer)
4579 prop_object_release(prop_peer);
4580 i++;
4581
4582 s = pserialize_read_enter();
4583 wg_put_peer(wgp, &wgp_psref);
4584 }
4585 pserialize_read_exit(s);
4586
4587 prop_dictionary_set(prop_dict, "peers", peers);
4588 prop_object_release(peers);
4589 peers = NULL;
4590
4591 skip_peers:
4592 buf = prop_dictionary_externalize(prop_dict);
4593 if (buf == NULL)
4594 goto error;
4595 if (ifd->ifd_len < (strlen(buf) + 1)) {
4596 error = EINVAL;
4597 goto error;
4598 }
4599 error = copyout(buf, ifd->ifd_data, strlen(buf) + 1);
4600
4601 free(buf, 0);
4602 error:
4603 if (peers != NULL)
4604 prop_object_release(peers);
4605 if (prop_dict != NULL)
4606 prop_object_release(prop_dict);
4607
4608 return error;
4609 }
4610
4611 static int
4612 wg_ioctl(struct ifnet *ifp, u_long cmd, void *data)
4613 {
4614 struct wg_softc *wg = ifp->if_softc;
4615 struct ifreq *ifr = data;
4616 struct ifaddr *ifa = data;
4617 struct ifdrv *ifd = data;
4618 int error = 0;
4619
4620 switch (cmd) {
4621 case SIOCINITIFADDR:
4622 if (ifa->ifa_addr->sa_family != AF_LINK &&
4623 (ifp->if_flags & (IFF_UP | IFF_RUNNING)) !=
4624 (IFF_UP | IFF_RUNNING)) {
4625 ifp->if_flags |= IFF_UP;
4626 error = ifp->if_init(ifp);
4627 }
4628 return error;
4629 case SIOCADDMULTI:
4630 case SIOCDELMULTI:
4631 switch (ifr->ifr_addr.sa_family) {
4632 case AF_INET: /* IP supports Multicast */
4633 break;
4634 #ifdef INET6
4635 case AF_INET6: /* IP6 supports Multicast */
4636 break;
4637 #endif
4638 default: /* Other protocols doesn't support Multicast */
4639 error = EAFNOSUPPORT;
4640 break;
4641 }
4642 return error;
4643 case SIOCSDRVSPEC:
4644 switch (ifd->ifd_cmd) {
4645 case WG_IOCTL_SET_PRIVATE_KEY:
4646 error = wg_ioctl_set_private_key(wg, ifd);
4647 break;
4648 case WG_IOCTL_SET_LISTEN_PORT:
4649 error = wg_ioctl_set_listen_port(wg, ifd);
4650 break;
4651 case WG_IOCTL_ADD_PEER:
4652 error = wg_ioctl_add_peer(wg, ifd);
4653 break;
4654 case WG_IOCTL_DELETE_PEER:
4655 error = wg_ioctl_delete_peer(wg, ifd);
4656 break;
4657 default:
4658 error = EINVAL;
4659 break;
4660 }
4661 return error;
4662 case SIOCGDRVSPEC:
4663 return wg_ioctl_get(wg, ifd);
4664 case SIOCSIFFLAGS:
4665 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
4666 break;
4667 switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) {
4668 case IFF_RUNNING:
4669 /*
4670 * If interface is marked down and it is running,
4671 * then stop and disable it.
4672 */
4673 (*ifp->if_stop)(ifp, 1);
4674 break;
4675 case IFF_UP:
4676 /*
4677 * If interface is marked up and it is stopped, then
4678 * start it.
4679 */
4680 error = (*ifp->if_init)(ifp);
4681 break;
4682 default:
4683 break;
4684 }
4685 return error;
4686 #ifdef WG_RUMPKERNEL
4687 case SIOCSLINKSTR:
4688 error = wg_ioctl_linkstr(wg, ifd);
4689 if (error == 0)
4690 wg->wg_ops = &wg_ops_rumpuser;
4691 return error;
4692 #endif
4693 default:
4694 break;
4695 }
4696
4697 error = ifioctl_common(ifp, cmd, data);
4698
4699 #ifdef WG_RUMPKERNEL
4700 if (!wg_user_mode(wg))
4701 return error;
4702
4703 /* Do the same to the corresponding tun device on the host */
4704 /*
4705 * XXX Actually the command has not been handled yet. It
4706 * will be handled via pr_ioctl form doifioctl later.
4707 */
4708 switch (cmd) {
4709 case SIOCAIFADDR:
4710 case SIOCDIFADDR: {
4711 struct in_aliasreq _ifra = *(const struct in_aliasreq *)data;
4712 struct in_aliasreq *ifra = &_ifra;
4713 KASSERT(error == ENOTTY);
4714 strncpy(ifra->ifra_name, rumpuser_wg_get_tunname(wg->wg_user),
4715 IFNAMSIZ);
4716 error = rumpuser_wg_ioctl(wg->wg_user, cmd, ifra, AF_INET);
4717 if (error == 0)
4718 error = ENOTTY;
4719 break;
4720 }
4721 #ifdef INET6
4722 case SIOCAIFADDR_IN6:
4723 case SIOCDIFADDR_IN6: {
4724 struct in6_aliasreq _ifra = *(const struct in6_aliasreq *)data;
4725 struct in6_aliasreq *ifra = &_ifra;
4726 KASSERT(error == ENOTTY);
4727 strncpy(ifra->ifra_name, rumpuser_wg_get_tunname(wg->wg_user),
4728 IFNAMSIZ);
4729 error = rumpuser_wg_ioctl(wg->wg_user, cmd, ifra, AF_INET6);
4730 if (error == 0)
4731 error = ENOTTY;
4732 break;
4733 }
4734 #endif
4735 }
4736 #endif /* WG_RUMPKERNEL */
4737
4738 return error;
4739 }
4740
4741 static int
4742 wg_init(struct ifnet *ifp)
4743 {
4744
4745 ifp->if_flags |= IFF_RUNNING;
4746
4747 /* TODO flush pending packets. */
4748 return 0;
4749 }
4750
4751 #ifdef ALTQ
4752 static void
4753 wg_start(struct ifnet *ifp)
4754 {
4755 struct mbuf *m;
4756
4757 for (;;) {
4758 IFQ_DEQUEUE(&ifp->if_snd, m);
4759 if (m == NULL)
4760 break;
4761
4762 kpreempt_disable();
4763 const uint32_t h = curcpu()->ci_index; // pktq_rps_hash(m)
4764 if (__predict_false(!pktq_enqueue(wg_pktq, m, h))) {
4765 WGLOG(LOG_ERR, "pktq full, dropping\n");
4766 m_freem(m);
4767 }
4768 kpreempt_enable();
4769 }
4770 }
4771 #endif
4772
4773 static void
4774 wg_stop(struct ifnet *ifp, int disable)
4775 {
4776
4777 KASSERT((ifp->if_flags & IFF_RUNNING) != 0);
4778 ifp->if_flags &= ~IFF_RUNNING;
4779
4780 /* Need to do something? */
4781 }
4782
4783 #ifdef WG_DEBUG_PARAMS
4784 SYSCTL_SETUP(sysctl_net_wg_setup, "sysctl net.wg setup")
4785 {
4786 const struct sysctlnode *node = NULL;
4787
4788 sysctl_createv(clog, 0, NULL, &node,
4789 CTLFLAG_PERMANENT,
4790 CTLTYPE_NODE, "wg",
4791 SYSCTL_DESCR("wg(4)"),
4792 NULL, 0, NULL, 0,
4793 CTL_NET, CTL_CREATE, CTL_EOL);
4794 sysctl_createv(clog, 0, &node, NULL,
4795 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4796 CTLTYPE_QUAD, "rekey_after_messages",
4797 SYSCTL_DESCR("session liftime by messages"),
4798 NULL, 0, &wg_rekey_after_messages, 0, CTL_CREATE, CTL_EOL);
4799 sysctl_createv(clog, 0, &node, NULL,
4800 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4801 CTLTYPE_INT, "rekey_after_time",
4802 SYSCTL_DESCR("session liftime"),
4803 NULL, 0, &wg_rekey_after_time, 0, CTL_CREATE, CTL_EOL);
4804 sysctl_createv(clog, 0, &node, NULL,
4805 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4806 CTLTYPE_INT, "rekey_timeout",
4807 SYSCTL_DESCR("session handshake retry time"),
4808 NULL, 0, &wg_rekey_timeout, 0, CTL_CREATE, CTL_EOL);
4809 sysctl_createv(clog, 0, &node, NULL,
4810 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4811 CTLTYPE_INT, "rekey_attempt_time",
4812 SYSCTL_DESCR("session handshake timeout"),
4813 NULL, 0, &wg_rekey_attempt_time, 0, CTL_CREATE, CTL_EOL);
4814 sysctl_createv(clog, 0, &node, NULL,
4815 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4816 CTLTYPE_INT, "keepalive_timeout",
4817 SYSCTL_DESCR("keepalive timeout"),
4818 NULL, 0, &wg_keepalive_timeout, 0, CTL_CREATE, CTL_EOL);
4819 sysctl_createv(clog, 0, &node, NULL,
4820 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4821 CTLTYPE_BOOL, "force_underload",
4822 SYSCTL_DESCR("force to detemine under load"),
4823 NULL, 0, &wg_force_underload, 0, CTL_CREATE, CTL_EOL);
4824 }
4825 #endif
4826
4827 #ifdef WG_RUMPKERNEL
4828 static bool
4829 wg_user_mode(struct wg_softc *wg)
4830 {
4831
4832 return wg->wg_user != NULL;
4833 }
4834
4835 static int
4836 wg_ioctl_linkstr(struct wg_softc *wg, struct ifdrv *ifd)
4837 {
4838 struct ifnet *ifp = &wg->wg_if;
4839 int error;
4840
4841 if (ifp->if_flags & IFF_UP)
4842 return EBUSY;
4843
4844 if (ifd->ifd_cmd == IFLINKSTR_UNSET) {
4845 /* XXX do nothing */
4846 return 0;
4847 } else if (ifd->ifd_cmd != 0) {
4848 return EINVAL;
4849 } else if (wg->wg_user != NULL) {
4850 return EBUSY;
4851 }
4852
4853 /* Assume \0 included */
4854 if (ifd->ifd_len > IFNAMSIZ) {
4855 return E2BIG;
4856 } else if (ifd->ifd_len < 1) {
4857 return EINVAL;
4858 }
4859
4860 char tun_name[IFNAMSIZ];
4861 error = copyinstr(ifd->ifd_data, tun_name, ifd->ifd_len, NULL);
4862 if (error != 0)
4863 return error;
4864
4865 if (strncmp(tun_name, "tun", 3) != 0)
4866 return EINVAL;
4867
4868 error = rumpuser_wg_create(tun_name, wg, &wg->wg_user);
4869
4870 return error;
4871 }
4872
4873 static int
4874 wg_send_user(struct wg_peer *wgp, struct mbuf *m)
4875 {
4876 int error;
4877 struct psref psref;
4878 struct wg_sockaddr *wgsa;
4879 struct wg_softc *wg = wgp->wgp_sc;
4880 struct iovec iov[1];
4881
4882 wgsa = wg_get_endpoint_sa(wgp, &psref);
4883
4884 iov[0].iov_base = mtod(m, void *);
4885 iov[0].iov_len = m->m_len;
4886
4887 /* Send messages to a peer via an ordinary socket. */
4888 error = rumpuser_wg_send_peer(wg->wg_user, wgsatosa(wgsa), iov, 1);
4889
4890 wg_put_sa(wgp, wgsa, &psref);
4891
4892 m_freem(m);
4893
4894 return error;
4895 }
4896
4897 static void
4898 wg_input_user(struct ifnet *ifp, struct mbuf *m, const int af)
4899 {
4900 struct wg_softc *wg = ifp->if_softc;
4901 struct iovec iov[2];
4902 struct sockaddr_storage ss;
4903
4904 KASSERT(af == AF_INET || af == AF_INET6);
4905
4906 WG_TRACE("");
4907
4908 if (af == AF_INET) {
4909 struct sockaddr_in *sin = (struct sockaddr_in *)&ss;
4910 struct ip *ip;
4911
4912 KASSERT(m->m_len >= sizeof(struct ip));
4913 ip = mtod(m, struct ip *);
4914 sockaddr_in_init(sin, &ip->ip_dst, 0);
4915 } else {
4916 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&ss;
4917 struct ip6_hdr *ip6;
4918
4919 KASSERT(m->m_len >= sizeof(struct ip6_hdr));
4920 ip6 = mtod(m, struct ip6_hdr *);
4921 sockaddr_in6_init(sin6, &ip6->ip6_dst, 0, 0, 0);
4922 }
4923
4924 iov[0].iov_base = &ss;
4925 iov[0].iov_len = ss.ss_len;
4926 iov[1].iov_base = mtod(m, void *);
4927 iov[1].iov_len = m->m_len;
4928
4929 WG_DUMP_BUF(iov[1].iov_base, iov[1].iov_len);
4930
4931 /* Send decrypted packets to users via a tun. */
4932 rumpuser_wg_send_user(wg->wg_user, iov, 2);
4933
4934 m_freem(m);
4935 }
4936
4937 static int
4938 wg_bind_port_user(struct wg_softc *wg, const uint16_t port)
4939 {
4940 int error;
4941 uint16_t old_port = wg->wg_listen_port;
4942
4943 if (port != 0 && old_port == port)
4944 return 0;
4945
4946 error = rumpuser_wg_sock_bind(wg->wg_user, port);
4947 if (error == 0)
4948 wg->wg_listen_port = port;
4949 return error;
4950 }
4951
4952 /*
4953 * Receive user packets.
4954 */
4955 void
4956 rumpkern_wg_recv_user(struct wg_softc *wg, struct iovec *iov, size_t iovlen)
4957 {
4958 struct ifnet *ifp = &wg->wg_if;
4959 struct mbuf *m;
4960 const struct sockaddr *dst;
4961
4962 WG_TRACE("");
4963
4964 dst = iov[0].iov_base;
4965
4966 m = m_gethdr(M_DONTWAIT, MT_DATA);
4967 if (m == NULL)
4968 return;
4969 m->m_len = m->m_pkthdr.len = 0;
4970 m_copyback(m, 0, iov[1].iov_len, iov[1].iov_base);
4971
4972 WG_DLOG("iov_len=%lu\n", iov[1].iov_len);
4973 WG_DUMP_BUF(iov[1].iov_base, iov[1].iov_len);
4974
4975 (void)wg_output(ifp, m, dst, NULL);
4976 }
4977
4978 /*
4979 * Receive packets from a peer.
4980 */
4981 void
4982 rumpkern_wg_recv_peer(struct wg_softc *wg, struct iovec *iov, size_t iovlen)
4983 {
4984 struct mbuf *m;
4985 const struct sockaddr *src;
4986
4987 WG_TRACE("");
4988
4989 src = iov[0].iov_base;
4990
4991 m = m_gethdr(M_DONTWAIT, MT_DATA);
4992 if (m == NULL)
4993 return;
4994 m->m_len = m->m_pkthdr.len = 0;
4995 m_copyback(m, 0, iov[1].iov_len, iov[1].iov_base);
4996
4997 WG_DLOG("iov_len=%lu\n", iov[1].iov_len);
4998 WG_DUMP_BUF(iov[1].iov_base, iov[1].iov_len);
4999
5000 wg_handle_packet(wg, m, src);
5001 }
5002 #endif /* WG_RUMPKERNEL */
5003
5004 /*
5005 * Module infrastructure
5006 */
5007 #include "if_module.h"
5008
5009 IF_MODULE(MODULE_CLASS_DRIVER, wg, "")
5010