Home | History | Annotate | Line # | Download | only in netinet
ip_sync.c revision 1.2
      1 /*	$NetBSD: ip_sync.c,v 1.2 2012/03/23 20:39:50 christos Exp $	*/
      2 
      3 /*
      4  * Copyright (C) 2011 by Darren Reed.
      5  *
      6  * See the IPFILTER.LICENCE file for details on licencing.
      7  */
      8 #if defined(KERNEL) || defined(_KERNEL)
      9 # undef KERNEL
     10 # undef _KERNEL
     11 # define        KERNEL	1
     12 # define        _KERNEL	1
     13 #endif
     14 #include <sys/errno.h>
     15 #include <sys/types.h>
     16 #include <sys/param.h>
     17 #include <sys/file.h>
     18 #if !defined(_KERNEL) && !defined(__KERNEL__)
     19 # include <stdio.h>
     20 # include <stdlib.h>
     21 # include <string.h>
     22 # define _KERNEL
     23 # define KERNEL
     24 # ifdef __OpenBSD__
     25 struct file;
     26 # endif
     27 # include <sys/uio.h>
     28 # undef _KERNEL
     29 # undef KERNEL
     30 #else
     31 # include <sys/systm.h>
     32 # if !defined(__SVR4) && !defined(__svr4__)
     33 #  include <sys/mbuf.h>
     34 # endif
     35 # include <sys/select.h>
     36 # if __FreeBSD_version >= 500000
     37 #  include <sys/selinfo.h>
     38 # endif
     39 #endif
     40 #if defined(__NetBSD__) && (__NetBSD_Version__ >= 104000000)
     41 # include <sys/proc.h>
     42 #endif
     43 #if defined(_KERNEL) && (__FreeBSD_version >= 220000)
     44 # include <sys/filio.h>
     45 # include <sys/fcntl.h>
     46 #else
     47 # include <sys/ioctl.h>
     48 #endif
     49 #include <sys/time.h>
     50 #if !defined(linux)
     51 # include <sys/protosw.h>
     52 #endif
     53 #include <sys/socket.h>
     54 #if defined(__SVR4) || defined(__svr4__)
     55 # include <sys/filio.h>
     56 # include <sys/byteorder.h>
     57 # ifdef _KERNEL
     58 #  include <sys/dditypes.h>
     59 # endif
     60 # include <sys/stream.h>
     61 # include <sys/kmem.h>
     62 #endif
     63 
     64 #include <net/if.h>
     65 #ifdef sun
     66 # include <net/af.h>
     67 #endif
     68 #include <netinet/in.h>
     69 #include <netinet/in_systm.h>
     70 #include <netinet/ip.h>
     71 #include <netinet/tcp.h>
     72 #if !defined(linux)
     73 # include <netinet/ip_var.h>
     74 #endif
     75 #if !defined(__hpux) && !defined(linux)
     76 # include <netinet/tcp_fsm.h>
     77 #endif
     78 #include <netinet/udp.h>
     79 #include <netinet/ip_icmp.h>
     80 #include "netinet/ip_compat.h"
     81 #include <netinet/tcpip.h>
     82 #include "netinet/ip_fil.h"
     83 #include "netinet/ip_nat.h"
     84 #include "netinet/ip_frag.h"
     85 #include "netinet/ip_state.h"
     86 #include "netinet/ip_proxy.h"
     87 #include "netinet/ip_sync.h"
     88 #ifdef  USE_INET6
     89 #include <netinet/icmp6.h>
     90 #endif
     91 #if (__FreeBSD_version >= 300000)
     92 # include <sys/malloc.h>
     93 # if defined(_KERNEL) && !defined(IPFILTER_LKM)
     94 #  include <sys/libkern.h>
     95 #  include <sys/systm.h>
     96 # endif
     97 #endif
     98 /* END OF INCLUDES */
     99 
    100 #if !defined(lint)
    101 #if defined(__NetBSD__)
    102 #include <sys/cdefs.h>
    103 __KERNEL_RCSID(0, "$NetBSD: ip_sync.c,v 1.2 2012/03/23 20:39:50 christos Exp $");
    104 #else
    105 static const char rcsid[] = "@(#)Id: ip_sync.c,v 2.68.2.4 2012/01/29 05:30:36 darrenr Exp";
    106 #endif
    107 #endif
    108 
    109 #define	SYNC_STATETABSZ	256
    110 #define	SYNC_NATTABSZ	256
    111 
    112 typedef struct ipf_sync_softc_s {
    113 	ipfmutex_t	ipf_syncadd;
    114 	ipfmutex_t	ipsl_mutex;
    115 	ipfrwlock_t	ipf_syncstate;
    116 	ipfrwlock_t	ipf_syncnat;
    117 #if SOLARIS && defined(_KERNEL)
    118 	kcondvar_t	ipslwait;
    119 #endif
    120 #if defined(linux) && defined(_KERNEL)
    121 	wait_queue_head_t	sl_tail_linux;
    122 #endif
    123 	synclist_t	**syncstatetab;
    124 	synclist_t	**syncnattab;
    125 	synclogent_t	*synclog;
    126 	syncupdent_t	*syncupd;
    127 	u_int		ipf_sync_num;
    128 	u_int		ipf_sync_wrap;
    129 	u_int		sl_idx;		/* next available sync log entry */
    130 	u_int		su_idx;		/* next available sync update entry */
    131 	u_int		sl_tail;	/* next sync log entry to read */
    132 	u_int		su_tail;	/* next sync update entry to read */
    133 	int		ipf_sync_log_sz;
    134 	int		ipf_sync_nat_tab_sz;
    135 	int		ipf_sync_state_tab_sz;
    136 	int		ipf_sync_debug;
    137 	int		ipf_sync_events;
    138 	u_32_t		ipf_sync_lastwakeup;
    139 	int		ipf_sync_wake_interval;
    140 	int		ipf_sync_event_high_wm;
    141 	int		ipf_sync_queue_high_wm;
    142 	int		ipf_sync_inited;
    143 } ipf_sync_softc_t;
    144 
    145 static int ipf_sync_flush_table(ipf_sync_softc_t *, int, synclist_t **);
    146 static void ipf_sync_wakeup(ipf_main_softc_t *);
    147 static void ipf_sync_del(ipf_sync_softc_t *, synclist_t *);
    148 static void ipf_sync_poll_wakeup(ipf_main_softc_t *);
    149 static int ipf_sync_nat(ipf_main_softc_t *, synchdr_t *, void *);
    150 static int ipf_sync_state(ipf_main_softc_t *, synchdr_t *, void *);
    151 
    152 # if !defined(sparc) && !defined(__hppa)
    153 void ipf_sync_tcporder(int, struct tcpdata *);
    154 void ipf_sync_natorder(int, struct nat *);
    155 void ipf_sync_storder(int, struct ipstate *);
    156 # endif
    157 
    158 
    159 void *
    160 ipf_sync_soft_create(ipf_main_softc_t *softc)
    161 {
    162 	ipf_sync_softc_t *softs;
    163 
    164 	KMALLOC(softs, ipf_sync_softc_t *);
    165 	if (softs == NULL)
    166 		return NULL;
    167 
    168 	bzero((char *)softs, sizeof(*softs));
    169 
    170 	softs->ipf_sync_log_sz = SYNCLOG_SZ;
    171 	softs->ipf_sync_nat_tab_sz = SYNC_STATETABSZ;
    172 	softs->ipf_sync_state_tab_sz = SYNC_STATETABSZ;
    173 	softs->ipf_sync_event_high_wm = SYNCLOG_SZ * 100 / 90;	/* 90% */
    174 	softs->ipf_sync_queue_high_wm = SYNCLOG_SZ * 100 / 90;	/* 90% */
    175 
    176 	return softs;
    177 }
    178 
    179 
    180 /* ------------------------------------------------------------------------ */
    181 /* Function:    ipf_sync_init                                               */
    182 /* Returns:     int - 0 == success, -1 == failure                           */
    183 /* Parameters:  Nil                                                         */
    184 /*                                                                          */
    185 /* Initialise all of the locks required for the sync code and initialise    */
    186 /* any data structures, as required.                                        */
    187 /* ------------------------------------------------------------------------ */
    188 int
    189 ipf_sync_soft_init(ipf_main_softc_t *softc, void *arg)
    190 {
    191 	ipf_sync_softc_t *softs = arg;
    192 
    193 	KMALLOCS(softs->synclog, synclogent_t *,
    194 		 softs->ipf_sync_log_sz * sizeof(*softs->synclog));
    195 	if (softs->synclog == NULL)
    196 		return -1;
    197 	bzero((char *)softs->synclog,
    198 	      softs->ipf_sync_log_sz * sizeof(*softs->synclog));
    199 
    200 	KMALLOCS(softs->syncupd, syncupdent_t *,
    201 		 softs->ipf_sync_log_sz * sizeof(*softs->syncupd));
    202 	if (softs->syncupd == NULL)
    203 		return -2;
    204 	bzero((char *)softs->syncupd,
    205 	      softs->ipf_sync_log_sz * sizeof(*softs->syncupd));
    206 
    207 	KMALLOCS(softs->syncstatetab, synclist_t **,
    208 		 softs->ipf_sync_state_tab_sz * sizeof(*softs->syncstatetab));
    209 	if (softs->syncstatetab == NULL)
    210 		return -3;
    211 	bzero((char *)softs->syncstatetab,
    212 	      softs->ipf_sync_state_tab_sz * sizeof(*softs->syncstatetab));
    213 
    214 	KMALLOCS(softs->syncnattab, synclist_t **,
    215 		 softs->ipf_sync_nat_tab_sz * sizeof(*softs->syncnattab));
    216 	if (softs->syncnattab == NULL)
    217 		return -3;
    218 	bzero((char *)softs->syncnattab,
    219 	      softs->ipf_sync_nat_tab_sz * sizeof(*softs->syncnattab));
    220 
    221 	softs->ipf_sync_num = 1;
    222 	softs->ipf_sync_wrap = 0;
    223 	softs->sl_idx = 0;
    224 	softs->su_idx = 0;
    225 	softs->sl_tail = 0;
    226 	softs->su_tail = 0;
    227 	softs->ipf_sync_events = 0;
    228 	softs->ipf_sync_lastwakeup = 0;
    229 
    230 
    231 # if SOLARIS && defined(_KERNEL)
    232 	cv_init(&softs->ipslwait, "ipsl condvar", CV_DRIVER, NULL);
    233 # endif
    234 	RWLOCK_INIT(&softs->ipf_syncstate, "add things to state sync table");
    235 	RWLOCK_INIT(&softs->ipf_syncnat, "add things to nat sync table");
    236 	MUTEX_INIT(&softs->ipf_syncadd, "add things to sync table");
    237 	MUTEX_INIT(&softs->ipsl_mutex, "read ring lock");
    238 
    239 	softs->ipf_sync_inited = 1;
    240 
    241 	return 0;
    242 }
    243 
    244 
    245 /* ------------------------------------------------------------------------ */
    246 /* Function:    ipf_sync_unload                                             */
    247 /* Returns:     int - 0 == success, -1 == failure                           */
    248 /* Parameters:  Nil                                                         */
    249 /*                                                                          */
    250 /* Destroy the locks created when initialising and free any memory in use   */
    251 /* with the synchronisation tables.                                         */
    252 /* ------------------------------------------------------------------------ */
    253 int
    254 ipf_sync_soft_fini(ipf_main_softc_t *softc, void *arg)
    255 {
    256 	ipf_sync_softc_t *softs = arg;
    257 
    258 	if (softs->syncnattab != NULL) {
    259 		ipf_sync_flush_table(softs, softs->ipf_sync_nat_tab_sz,
    260 				     softs->syncnattab);
    261 		KFREES(softs->syncnattab,
    262 		       softs->ipf_sync_nat_tab_sz * sizeof(*softs->syncnattab));
    263 		softs->syncnattab = NULL;
    264 	}
    265 
    266 	if (softs->syncstatetab != NULL) {
    267 		ipf_sync_flush_table(softs, softs->ipf_sync_state_tab_sz,
    268 				     softs->syncstatetab);
    269 		KFREES(softs->syncstatetab,
    270 		       softs->ipf_sync_state_tab_sz *
    271 		       sizeof(*softs->syncstatetab));
    272 		softs->syncstatetab = NULL;
    273 	}
    274 
    275 	if (softs->syncupd != NULL) {
    276 		KFREES(softs->syncupd,
    277 		       softs->ipf_sync_log_sz * sizeof(*softs->syncupd));
    278 		softs->syncupd = NULL;
    279 	}
    280 
    281 	if (softs->synclog != NULL) {
    282 		KFREES(softs->synclog,
    283 		       softs->ipf_sync_log_sz * sizeof(*softs->synclog));
    284 		softs->synclog = NULL;
    285 	}
    286 
    287 	if (softs->ipf_sync_inited == 1) {
    288 		MUTEX_DESTROY(&softs->ipsl_mutex);
    289 		MUTEX_DESTROY(&softs->ipf_syncadd);
    290 		RW_DESTROY(&softs->ipf_syncnat);
    291 		RW_DESTROY(&softs->ipf_syncstate);
    292 		softs->ipf_sync_inited = 0;
    293 	}
    294 
    295 	return 0;
    296 }
    297 
    298 void
    299 ipf_sync_soft_destroy(ipf_main_softc_t *softc, void *arg)
    300 {
    301 	ipf_sync_softc_t *softs = arg;
    302 
    303 	KFREE(softs);
    304 }
    305 
    306 
    307 # if !defined(sparc) && !defined(__hppa)
    308 /* ------------------------------------------------------------------------ */
    309 /* Function:    ipf_sync_tcporder                                           */
    310 /* Returns:     Nil                                                         */
    311 /* Parameters:  way(I) - direction of byte order conversion.                */
    312 /*              td(IO) - pointer to data to be converted.                   */
    313 /*                                                                          */
    314 /* Do byte swapping on values in the TCP state information structure that   */
    315 /* need to be used at both ends by the host in their native byte order.     */
    316 /* ------------------------------------------------------------------------ */
    317 void
    318 ipf_sync_tcporder(int way, tcpdata_t *td)
    319 {
    320 	if (way) {
    321 		td->td_maxwin = htons(td->td_maxwin);
    322 		td->td_end = htonl(td->td_end);
    323 		td->td_maxend = htonl(td->td_maxend);
    324 	} else {
    325 		td->td_maxwin = ntohs(td->td_maxwin);
    326 		td->td_end = ntohl(td->td_end);
    327 		td->td_maxend = ntohl(td->td_maxend);
    328 	}
    329 }
    330 
    331 
    332 /* ------------------------------------------------------------------------ */
    333 /* Function:    ipf_sync_natorder                                           */
    334 /* Returns:     Nil                                                         */
    335 /* Parameters:  way(I)  - direction of byte order conversion.               */
    336 /*              nat(IO) - pointer to data to be converted.                  */
    337 /*                                                                          */
    338 /* Do byte swapping on values in the NAT data structure that need to be     */
    339 /* used at both ends by the host in their native byte order.                */
    340 /* ------------------------------------------------------------------------ */
    341 void
    342 ipf_sync_natorder(int way, nat_t *n)
    343 {
    344 	if (way) {
    345 		n->nat_age = htonl(n->nat_age);
    346 		n->nat_flags = htonl(n->nat_flags);
    347 		n->nat_ipsumd = htonl(n->nat_ipsumd);
    348 		n->nat_use = htonl(n->nat_use);
    349 		n->nat_dir = htonl(n->nat_dir);
    350 	} else {
    351 		n->nat_age = ntohl(n->nat_age);
    352 		n->nat_flags = ntohl(n->nat_flags);
    353 		n->nat_ipsumd = ntohl(n->nat_ipsumd);
    354 		n->nat_use = ntohl(n->nat_use);
    355 		n->nat_dir = ntohl(n->nat_dir);
    356 	}
    357 }
    358 
    359 
    360 /* ------------------------------------------------------------------------ */
    361 /* Function:    ipf_sync_storder                                            */
    362 /* Returns:     Nil                                                         */
    363 /* Parameters:  way(I)  - direction of byte order conversion.               */
    364 /*              ips(IO) - pointer to data to be converted.                  */
    365 /*                                                                          */
    366 /* Do byte swapping on values in the IP state data structure that need to   */
    367 /* be used at both ends by the host in their native byte order.             */
    368 /* ------------------------------------------------------------------------ */
    369 void
    370 ipf_sync_storder(int way, ipstate_t *ips)
    371 {
    372 	ipf_sync_tcporder(way, &ips->is_tcp.ts_data[0]);
    373 	ipf_sync_tcporder(way, &ips->is_tcp.ts_data[1]);
    374 
    375 	if (way) {
    376 		ips->is_hv = htonl(ips->is_hv);
    377 		ips->is_die = htonl(ips->is_die);
    378 		ips->is_pass = htonl(ips->is_pass);
    379 		ips->is_flags = htonl(ips->is_flags);
    380 		ips->is_opt[0] = htonl(ips->is_opt[0]);
    381 		ips->is_opt[1] = htonl(ips->is_opt[1]);
    382 		ips->is_optmsk[0] = htonl(ips->is_optmsk[0]);
    383 		ips->is_optmsk[1] = htonl(ips->is_optmsk[1]);
    384 		ips->is_sec = htons(ips->is_sec);
    385 		ips->is_secmsk = htons(ips->is_secmsk);
    386 		ips->is_auth = htons(ips->is_auth);
    387 		ips->is_authmsk = htons(ips->is_authmsk);
    388 		ips->is_s0[0] = htonl(ips->is_s0[0]);
    389 		ips->is_s0[1] = htonl(ips->is_s0[1]);
    390 		ips->is_smsk[0] = htons(ips->is_smsk[0]);
    391 		ips->is_smsk[1] = htons(ips->is_smsk[1]);
    392 	} else {
    393 		ips->is_hv = ntohl(ips->is_hv);
    394 		ips->is_die = ntohl(ips->is_die);
    395 		ips->is_pass = ntohl(ips->is_pass);
    396 		ips->is_flags = ntohl(ips->is_flags);
    397 		ips->is_opt[0] = ntohl(ips->is_opt[0]);
    398 		ips->is_opt[1] = ntohl(ips->is_opt[1]);
    399 		ips->is_optmsk[0] = ntohl(ips->is_optmsk[0]);
    400 		ips->is_optmsk[1] = ntohl(ips->is_optmsk[1]);
    401 		ips->is_sec = ntohs(ips->is_sec);
    402 		ips->is_secmsk = ntohs(ips->is_secmsk);
    403 		ips->is_auth = ntohs(ips->is_auth);
    404 		ips->is_authmsk = ntohs(ips->is_authmsk);
    405 		ips->is_s0[0] = ntohl(ips->is_s0[0]);
    406 		ips->is_s0[1] = ntohl(ips->is_s0[1]);
    407 		ips->is_smsk[0] = ntohl(ips->is_smsk[0]);
    408 		ips->is_smsk[1] = ntohl(ips->is_smsk[1]);
    409 	}
    410 }
    411 # else /* !defined(sparc) && !defined(__hppa) */
    412 #  define	ipf_sync_tcporder(x,y)
    413 #  define	ipf_sync_natorder(x,y)
    414 #  define	ipf_sync_storder(x,y)
    415 # endif /* !defined(sparc) && !defined(__hppa) */
    416 
    417 
    418 /* ------------------------------------------------------------------------ */
    419 /* Function:    ipf_sync_write                                              */
    420 /* Returns:     int    - 0 == success, else error value.                    */
    421 /* Parameters:  uio(I) - pointer to information about data to write         */
    422 /*                                                                          */
    423 /* Moves data from user space into the kernel and uses it for updating data */
    424 /* structures in the state/NAT tables.                                      */
    425 /* ------------------------------------------------------------------------ */
    426 int
    427 ipf_sync_write(ipf_main_softc_t *softc, struct uio *uio)
    428 {
    429 	ipf_sync_softc_t *softs = softc->ipf_sync_soft;
    430 	synchdr_t sh;
    431 
    432 	/*
    433 	 * THIS MUST BE SUFFICIENT LARGE TO STORE
    434 	 * ANY POSSIBLE DATA TYPE
    435 	 */
    436 	char data[2048];
    437 
    438 	int err = 0;
    439 
    440 #  if BSD_GE_YEAR(199306) || defined(__FreeBSD__) || defined(__osf__)
    441 	uio->uio_rw = UIO_WRITE;
    442 #  endif
    443 
    444 	/* Try to get bytes */
    445 	while (uio->uio_resid > 0) {
    446 
    447 		if (uio->uio_resid >= sizeof(sh)) {
    448 
    449 			err = UIOMOVE((void *)&sh, sizeof(sh), UIO_WRITE, uio);
    450 
    451 			if (err) {
    452 				if (softs->ipf_sync_debug > 2)
    453 					printf("uiomove(header) failed: %d\n",
    454 						err);
    455 				return err;
    456 			}
    457 
    458 			/* convert to host order */
    459 			sh.sm_magic = ntohl(sh.sm_magic);
    460 			sh.sm_len = ntohl(sh.sm_len);
    461 			sh.sm_num = ntohl(sh.sm_num);
    462 
    463 			if (softs->ipf_sync_debug > 8)
    464 				printf("[%d] Read v:%d p:%d cmd:%d table:%d rev:%d len:%d magic:%x\n",
    465 					sh.sm_num, sh.sm_v, sh.sm_p, sh.sm_cmd,
    466 					sh.sm_table, sh.sm_rev, sh.sm_len,
    467 					sh.sm_magic);
    468 
    469 			if (sh.sm_magic != SYNHDRMAGIC) {
    470 				if (softs->ipf_sync_debug > 2)
    471 					printf("uiomove(header) invalid %s\n",
    472 						"magic");
    473 				IPFERROR(110001);
    474 				return EINVAL;
    475 			}
    476 
    477 			if (sh.sm_v != 4 && sh.sm_v != 6) {
    478 				if (softs->ipf_sync_debug > 2)
    479 					printf("uiomove(header) invalid %s\n",
    480 						"protocol");
    481 				IPFERROR(110002);
    482 				return EINVAL;
    483 			}
    484 
    485 			if (sh.sm_cmd > SMC_MAXCMD) {
    486 				if (softs->ipf_sync_debug > 2)
    487 					printf("uiomove(header) invalid %s\n",
    488 						"command");
    489 				IPFERROR(110003);
    490 				return EINVAL;
    491 			}
    492 
    493 
    494 			if (sh.sm_table > SMC_MAXTBL) {
    495 				if (softs->ipf_sync_debug > 2)
    496 					printf("uiomove(header) invalid %s\n",
    497 						"table");
    498 				IPFERROR(110004);
    499 				return EINVAL;
    500 			}
    501 
    502 		} else {
    503 			/* unsufficient data, wait until next call */
    504 			if (softs->ipf_sync_debug > 2)
    505 				printf("uiomove(header) insufficient data");
    506 			IPFERROR(110005);
    507 			return EAGAIN;
    508 	 	}
    509 
    510 
    511 		/*
    512 		 * We have a header, so try to read the amount of data
    513 		 * needed for the request
    514 		 */
    515 
    516 		/* not supported */
    517 		if (sh.sm_len == 0) {
    518 			if (softs->ipf_sync_debug > 2)
    519 				printf("uiomove(data zero length %s\n",
    520 					"not supported");
    521 			IPFERROR(110006);
    522 			return EINVAL;
    523 		}
    524 
    525 		if (uio->uio_resid >= sh.sm_len) {
    526 
    527 			err = UIOMOVE((void *)data, sh.sm_len, UIO_WRITE, uio);
    528 
    529 			if (err) {
    530 				if (softs->ipf_sync_debug > 2)
    531 					printf("uiomove(data) failed: %d\n",
    532 						err);
    533 				return err;
    534 			}
    535 
    536 			if (softs->ipf_sync_debug > 7)
    537 				printf("uiomove(data) %d bytes read\n",
    538 					sh.sm_len);
    539 
    540 			if (sh.sm_table == SMC_STATE)
    541 				err = ipf_sync_state(softc, &sh, data);
    542 			else if (sh.sm_table == SMC_NAT)
    543 				err = ipf_sync_nat(softc, &sh, data);
    544 			if (softs->ipf_sync_debug > 7)
    545 				printf("[%d] Finished with error %d\n",
    546 					sh.sm_num, err);
    547 
    548 		} else {
    549 			/* insufficient data, wait until next call */
    550 			if (softs->ipf_sync_debug > 2)
    551 				printf("uiomove(data) %s %d bytes, got %zu\n",
    552 					"insufficient data, need",
    553 					sh.sm_len, uio->uio_resid);
    554 			IPFERROR(110007);
    555 			return EAGAIN;
    556 		}
    557 	}
    558 
    559 	/* no more data */
    560 	return 0;
    561 }
    562 
    563 
    564 /* ------------------------------------------------------------------------ */
    565 /* Function:    ipf_sync_read                                               */
    566 /* Returns:     int    - 0 == success, else error value.                    */
    567 /* Parameters:  uio(O) - pointer to information about where to store data   */
    568 /*                                                                          */
    569 /* This function is called when a user program wants to read some data      */
    570 /* for pending state/NAT updates.  If no data is available, the caller is   */
    571 /* put to sleep, pending a wakeup from the "lower half" of this code.       */
    572 /* ------------------------------------------------------------------------ */
    573 int
    574 ipf_sync_read(ipf_main_softc_t *softc, struct uio *uio)
    575 {
    576 	ipf_sync_softc_t *softs = softc->ipf_sync_soft;
    577 	syncupdent_t *su;
    578 	synclogent_t *sl;
    579 	int err = 0;
    580 
    581 	if ((uio->uio_resid & 3) || (uio->uio_resid < 8)) {
    582 		IPFERROR(110008);
    583 		return EINVAL;
    584 	}
    585 
    586 #  if BSD_GE_YEAR(199306) || defined(__FreeBSD__) || defined(__osf__)
    587 	uio->uio_rw = UIO_READ;
    588 #  endif
    589 
    590 	MUTEX_ENTER(&softs->ipsl_mutex);
    591 	while ((softs->sl_tail == softs->sl_idx) &&
    592 	       (softs->su_tail == softs->su_idx)) {
    593 #  if defined(_KERNEL)
    594 #   if SOLARIS
    595 		if (!cv_wait_sig(&softs->ipslwait, &softs->ipsl_mutex.ipf_lk)) {
    596 			MUTEX_EXIT(&softs->ipsl_mutex);
    597 			IPFERROR(110009);
    598 			return EINTR;
    599 		}
    600 #   else
    601 #    ifdef __hpux
    602 		{
    603 		lock_t *l;
    604 
    605 		l = get_sleep_lock(&softs->sl_tail);
    606 		err = sleep(&softs->sl_tail, PZERO+1);
    607 		if (err) {
    608 			MUTEX_EXIT(&softs->ipsl_mutex);
    609 			IPFERROR(110010);
    610 			return EINTR;
    611 		}
    612 		spinunlock(l);
    613 		}
    614 #    else /* __hpux */
    615 #     ifdef __osf__
    616 		err = mpsleep(&softs->sl_tail, PSUSP|PCATCH,  "ipl sleep", 0,
    617 			      &softs->ipsl_mutex, MS_LOCK_SIMPLE);
    618 		if (err) {
    619 			IPFERROR(110011);
    620 			return EINTR;
    621 		}
    622 #     else
    623 		MUTEX_EXIT(&softs->ipsl_mutex);
    624 		err = SLEEP(&softs->sl_tail, "ipl sleep");
    625 		if (err) {
    626 			IPFERROR(110012);
    627 			return EINTR;
    628 		}
    629 		MUTEX_ENTER(&softs->ipsl_mutex);
    630 #     endif /* __osf__ */
    631 #    endif /* __hpux */
    632 #   endif /* SOLARIS */
    633 #  endif /* _KERNEL */
    634 	}
    635 
    636 	while ((softs->sl_tail < softs->sl_idx) &&
    637 	       (uio->uio_resid > sizeof(*sl))) {
    638 		sl = softs->synclog + softs->sl_tail++;
    639 		MUTEX_EXIT(&softs->ipsl_mutex);
    640 		err = UIOMOVE(sl, sizeof(*sl), UIO_READ, uio);
    641 		if (err != 0)
    642 			goto goterror;
    643 		MUTEX_ENTER(&softs->ipsl_mutex);
    644 	}
    645 
    646 	while ((softs->su_tail < softs->su_idx) &&
    647 	       (uio->uio_resid > sizeof(*su))) {
    648 		su = softs->syncupd + softs->su_tail;
    649 		softs->su_tail++;
    650 		MUTEX_EXIT(&softs->ipsl_mutex);
    651 		err = UIOMOVE(su, sizeof(*su), UIO_READ, uio);
    652 		if (err != 0)
    653 			goto goterror;
    654 		MUTEX_ENTER(&softs->ipsl_mutex);
    655 		if (su->sup_hdr.sm_sl != NULL)
    656 			su->sup_hdr.sm_sl->sl_idx = -1;
    657 	}
    658 	if (softs->sl_tail == softs->sl_idx)
    659 		softs->sl_tail = softs->sl_idx = 0;
    660 	if (softs->su_tail == softs->su_idx)
    661 		softs->su_tail = softs->su_idx = 0;
    662 	MUTEX_EXIT(&softs->ipsl_mutex);
    663 goterror:
    664 	return err;
    665 }
    666 
    667 
    668 /* ------------------------------------------------------------------------ */
    669 /* Function:    ipf_sync_state                                              */
    670 /* Returns:     int    - 0 == success, else error value.                    */
    671 /* Parameters:  sp(I)  - pointer to sync packet data header                 */
    672 /*              uio(I) - pointer to user data for further information       */
    673 /*                                                                          */
    674 /* Updates the state table according to information passed in the sync      */
    675 /* header.  As required, more data is fetched from the uio structure but    */
    676 /* varies depending on the contents of the sync header.  This function can  */
    677 /* create a new state entry or update one.  Deletion is left to the state   */
    678 /* structures being timed out correctly.                                    */
    679 /* ------------------------------------------------------------------------ */
    680 static int
    681 ipf_sync_state(ipf_main_softc_t *softc, synchdr_t *sp, void *data)
    682 {
    683 	ipf_sync_softc_t *softs = softc->ipf_sync_soft;
    684 	synctcp_update_t su;
    685 	ipstate_t *is, sn;
    686 	synclist_t *sl;
    687 	frentry_t *fr;
    688 	u_int hv;
    689 	int err = 0;
    690 
    691 	hv = sp->sm_num & (softs->ipf_sync_state_tab_sz - 1);
    692 
    693 	switch (sp->sm_cmd)
    694 	{
    695 	case SMC_CREATE :
    696 
    697 		bcopy(data, &sn, sizeof(sn));
    698 		KMALLOC(is, ipstate_t *);
    699 		if (is == NULL) {
    700 			IPFERROR(110013);
    701 			err = ENOMEM;
    702 			break;
    703 		}
    704 
    705 		KMALLOC(sl, synclist_t *);
    706 		if (sl == NULL) {
    707 			IPFERROR(110014);
    708 			err = ENOMEM;
    709 			KFREE(is);
    710 			break;
    711 		}
    712 
    713 		bzero((char *)is, offsetof(ipstate_t, is_die));
    714 		bcopy((char *)&sn.is_die, (char *)&is->is_die,
    715 		      sizeof(*is) - offsetof(ipstate_t, is_die));
    716 		ipf_sync_storder(0, is);
    717 
    718 		/*
    719 		 * We need to find the same rule on the slave as was used on
    720 		 * the master to create this state entry.
    721 		 */
    722 		READ_ENTER(&softc->ipf_mutex);
    723 		fr = ipf_getrulen(softc, IPL_LOGIPF, sn.is_group, sn.is_rulen);
    724 		if (fr != NULL) {
    725 			MUTEX_ENTER(&fr->fr_lock);
    726 			fr->fr_ref++;
    727 			fr->fr_statecnt++;
    728 			MUTEX_EXIT(&fr->fr_lock);
    729 		}
    730 		RWLOCK_EXIT(&softc->ipf_mutex);
    731 
    732 		if (softs->ipf_sync_debug > 4)
    733 			printf("[%d] Filter rules = %p\n", sp->sm_num, fr);
    734 
    735 		is->is_rule = fr;
    736 		is->is_sync = sl;
    737 
    738 		sl->sl_idx = -1;
    739 		sl->sl_ips = is;
    740 		bcopy(sp, &sl->sl_hdr, sizeof(struct synchdr));
    741 
    742 		WRITE_ENTER(&softs->ipf_syncstate);
    743 		WRITE_ENTER(&softc->ipf_state);
    744 
    745 		sl->sl_pnext = softs->syncstatetab + hv;
    746 		sl->sl_next = softs->syncstatetab[hv];
    747 		if (softs->syncstatetab[hv] != NULL)
    748 			softs->syncstatetab[hv]->sl_pnext = &sl->sl_next;
    749 		softs->syncstatetab[hv] = sl;
    750 		MUTEX_DOWNGRADE(&softs->ipf_syncstate);
    751 		ipf_state_insert(softc, is, sp->sm_rev);
    752 		/*
    753 		 * Do not initialise the interface pointers for the state
    754 		 * entry as the full complement of interface names may not
    755 		 * be present.
    756 		 *
    757 		 * Put this state entry on its timeout queue.
    758 		 */
    759 		/*fr_setstatequeue(is, sp->sm_rev);*/
    760 		break;
    761 
    762 	case SMC_UPDATE :
    763 		bcopy(data, &su, sizeof(su));
    764 
    765 		if (softs->ipf_sync_debug > 4)
    766 			printf("[%d] Update age %lu state %d/%d \n",
    767 				sp->sm_num, su.stu_age, su.stu_state[0],
    768 				su.stu_state[1]);
    769 
    770 		READ_ENTER(&softs->ipf_syncstate);
    771 		for (sl = softs->syncstatetab[hv]; (sl != NULL);
    772 		     sl = sl->sl_next)
    773 			if (sl->sl_hdr.sm_num == sp->sm_num)
    774 				break;
    775 		if (sl == NULL) {
    776 			if (softs->ipf_sync_debug > 1)
    777 				printf("[%d] State not found - can't update\n",
    778 					sp->sm_num);
    779 			RWLOCK_EXIT(&softs->ipf_syncstate);
    780 			IPFERROR(110015);
    781 			err = ENOENT;
    782 			break;
    783 		}
    784 
    785 		READ_ENTER(&softc->ipf_state);
    786 
    787 		if (softs->ipf_sync_debug > 6)
    788 			printf("[%d] Data from state v:%d p:%d cmd:%d table:%d rev:%d\n",
    789 				sp->sm_num, sl->sl_hdr.sm_v, sl->sl_hdr.sm_p,
    790 				sl->sl_hdr.sm_cmd, sl->sl_hdr.sm_table,
    791 				sl->sl_hdr.sm_rev);
    792 
    793 		is = sl->sl_ips;
    794 
    795 		MUTEX_ENTER(&is->is_lock);
    796 		switch (sp->sm_p)
    797 		{
    798 		case IPPROTO_TCP :
    799 			/* XXX FV --- shouldn't we do ntohl/htonl???? XXX */
    800 			is->is_send = su.stu_data[0].td_end;
    801 			is->is_maxsend = su.stu_data[0].td_maxend;
    802 			is->is_maxswin = su.stu_data[0].td_maxwin;
    803 			is->is_state[0] = su.stu_state[0];
    804 			is->is_dend = su.stu_data[1].td_end;
    805 			is->is_maxdend = su.stu_data[1].td_maxend;
    806 			is->is_maxdwin = su.stu_data[1].td_maxwin;
    807 			is->is_state[1] = su.stu_state[1];
    808 			break;
    809 		default :
    810 			break;
    811 		}
    812 
    813 		if (softs->ipf_sync_debug > 6)
    814 			printf("[%d] Setting timers for state\n", sp->sm_num);
    815 
    816 		ipf_state_setqueue(softc, is, sp->sm_rev);
    817 
    818 		MUTEX_EXIT(&is->is_lock);
    819 		break;
    820 
    821 	default :
    822 		IPFERROR(110016);
    823 		err = EINVAL;
    824 		break;
    825 	}
    826 
    827 	if (err == 0) {
    828 		RWLOCK_EXIT(&softc->ipf_state);
    829 		RWLOCK_EXIT(&softs->ipf_syncstate);
    830 	}
    831 
    832 	if (softs->ipf_sync_debug > 6)
    833 		printf("[%d] Update completed with error %d\n",
    834 			sp->sm_num, err);
    835 
    836 	return err;
    837 }
    838 
    839 
    840 /* ------------------------------------------------------------------------ */
    841 /* Function:    ipf_sync_del                                                */
    842 /* Returns:     Nil                                                         */
    843 /* Parameters:  sl(I) - pointer to synclist object to delete                */
    844 /*                                                                          */
    845 /* Deletes an object from the synclist.                                     */
    846 /* ------------------------------------------------------------------------ */
    847 static void
    848 ipf_sync_del(ipf_sync_softc_t *softs, synclist_t *sl)
    849 {
    850 	*sl->sl_pnext = sl->sl_next;
    851 	if (sl->sl_next != NULL)
    852 		sl->sl_next->sl_pnext = sl->sl_pnext;
    853 	if (sl->sl_idx != -1)
    854 		softs->syncupd[sl->sl_idx].sup_hdr.sm_sl = NULL;
    855 }
    856 
    857 
    858 /* ------------------------------------------------------------------------ */
    859 /* Function:    ipf_sync_del_state                                          */
    860 /* Returns:     Nil                                                         */
    861 /* Parameters:  sl(I) - pointer to synclist object to delete                */
    862 /*                                                                          */
    863 /* Deletes an object from the synclist state table and free's its memory.   */
    864 /* ------------------------------------------------------------------------ */
    865 void
    866 ipf_sync_del_state(void *arg, synclist_t *sl)
    867 {
    868 	ipf_sync_softc_t *softs = arg;
    869 
    870 	WRITE_ENTER(&softs->ipf_syncstate);
    871 	ipf_sync_del(softs, sl);
    872 	RWLOCK_EXIT(&softs->ipf_syncstate);
    873 	KFREE(sl);
    874 }
    875 
    876 
    877 /* ------------------------------------------------------------------------ */
    878 /* Function:    ipf_sync_del_nat                                            */
    879 /* Returns:     Nil                                                         */
    880 /* Parameters:  sl(I) - pointer to synclist object to delete                */
    881 /*                                                                          */
    882 /* Deletes an object from the synclist nat table and free's its memory.     */
    883 /* ------------------------------------------------------------------------ */
    884 void
    885 ipf_sync_del_nat(void *arg, synclist_t *sl)
    886 {
    887 	ipf_sync_softc_t *softs = arg;
    888 
    889 	WRITE_ENTER(&softs->ipf_syncnat);
    890 	ipf_sync_del(softs, sl);
    891 	RWLOCK_EXIT(&softs->ipf_syncnat);
    892 	KFREE(sl);
    893 }
    894 
    895 
    896 /* ------------------------------------------------------------------------ */
    897 /* Function:    ipf_sync_nat                                                */
    898 /* Returns:     int    - 0 == success, else error value.                    */
    899 /* Parameters:  sp(I)  - pointer to sync packet data header                 */
    900 /*              uio(I) - pointer to user data for further information       */
    901 /*                                                                          */
    902 /* Updates the NAT  table according to information passed in the sync       */
    903 /* header.  As required, more data is fetched from the uio structure but    */
    904 /* varies depending on the contents of the sync header.  This function can  */
    905 /* create a new NAT entry or update one.  Deletion is left to the NAT       */
    906 /* structures being timed out correctly.                                    */
    907 /* ------------------------------------------------------------------------ */
    908 static int
    909 ipf_sync_nat(ipf_main_softc_t *softc, synchdr_t *sp, void *data)
    910 {
    911 	ipf_sync_softc_t *softs = softc->ipf_sync_soft;
    912 	syncupdent_t su;
    913 	nat_t *n, *nat;
    914 	synclist_t *sl;
    915 	u_int hv = 0;
    916 	int err;
    917 
    918 	READ_ENTER(&softs->ipf_syncnat);
    919 
    920 	switch (sp->sm_cmd)
    921 	{
    922 	case SMC_CREATE :
    923 		KMALLOC(n, nat_t *);
    924 		if (n == NULL) {
    925 			IPFERROR(110017);
    926 			err = ENOMEM;
    927 			break;
    928 		}
    929 
    930 		KMALLOC(sl, synclist_t *);
    931 		if (sl == NULL) {
    932 			IPFERROR(110018);
    933 			err = ENOMEM;
    934 			KFREE(n);
    935 			break;
    936 		}
    937 
    938 		nat = (nat_t *)data;
    939 		bzero((char *)n, offsetof(nat_t, nat_age));
    940 		bcopy((char *)&nat->nat_age, (char *)&n->nat_age,
    941 		      sizeof(*n) - offsetof(nat_t, nat_age));
    942 		ipf_sync_natorder(0, n);
    943 		n->nat_sync = sl;
    944 		n->nat_rev = sl->sl_rev;
    945 
    946 		sl->sl_idx = -1;
    947 		sl->sl_ipn = n;
    948 		sl->sl_num = ntohl(sp->sm_num);
    949 
    950 		WRITE_ENTER(&softc->ipf_nat);
    951 		sl->sl_pnext = softs->syncnattab + hv;
    952 		sl->sl_next = softs->syncnattab[hv];
    953 		if (softs->syncnattab[hv] != NULL)
    954 			softs->syncnattab[hv]->sl_pnext = &sl->sl_next;
    955 		softs->syncnattab[hv] = sl;
    956 		(void) ipf_nat_insert(softc, softc->ipf_nat_soft, n);
    957 		RWLOCK_EXIT(&softc->ipf_nat);
    958 		break;
    959 
    960 	case SMC_UPDATE :
    961 		bcopy(data, &su, sizeof(su));
    962 
    963 		for (sl = softs->syncnattab[hv]; (sl != NULL);
    964 		     sl = sl->sl_next)
    965 			if (sl->sl_hdr.sm_num == sp->sm_num)
    966 				break;
    967 		if (sl == NULL) {
    968 			IPFERROR(110019);
    969 			err = ENOENT;
    970 			break;
    971 		}
    972 
    973 		READ_ENTER(&softc->ipf_nat);
    974 
    975 		nat = sl->sl_ipn;
    976 		nat->nat_rev = sl->sl_rev;
    977 
    978 		MUTEX_ENTER(&nat->nat_lock);
    979 		ipf_nat_setqueue(softc, softc->ipf_nat_soft, nat);
    980 		MUTEX_EXIT(&nat->nat_lock);
    981 
    982 		RWLOCK_EXIT(&softc->ipf_nat);
    983 
    984 		break;
    985 
    986 	default :
    987 		IPFERROR(110020);
    988 		err = EINVAL;
    989 		break;
    990 	}
    991 
    992 	RWLOCK_EXIT(&softs->ipf_syncnat);
    993 	return 0;
    994 }
    995 
    996 
    997 /* ------------------------------------------------------------------------ */
    998 /* Function:    ipf_sync_new                                                */
    999 /* Returns:     synclist_t* - NULL == failure, else pointer to new synclist */
   1000 /*                            data structure.                               */
   1001 /* Parameters:  tab(I) - type of synclist_t to create                       */
   1002 /*              fin(I) - pointer to packet information                      */
   1003 /*              ptr(I) - pointer to owning object                           */
   1004 /*                                                                          */
   1005 /* Creates a new sync table entry and notifies any sleepers that it's there */
   1006 /* waiting to be processed.                                                 */
   1007 /* ------------------------------------------------------------------------ */
   1008 synclist_t *
   1009 ipf_sync_new(ipf_main_softc_t *softc, int tab, fr_info_t *fin, void *ptr)
   1010 {
   1011 	ipf_sync_softc_t *softs = softc->ipf_sync_soft;
   1012 	synclist_t *sl, *ss;
   1013 	synclogent_t *sle;
   1014 	u_int hv, sz;
   1015 
   1016 	if (softs->sl_idx == softs->ipf_sync_log_sz)
   1017 		return NULL;
   1018 	KMALLOC(sl, synclist_t *);
   1019 	if (sl == NULL)
   1020 		return NULL;
   1021 
   1022 	MUTEX_ENTER(&softs->ipf_syncadd);
   1023 	/*
   1024 	 * Get a unique number for this synclist_t.  The number is only meant
   1025 	 * to be unique for the lifetime of the structure and may be reused
   1026 	 * later.
   1027 	 */
   1028 	softs->ipf_sync_num++;
   1029 	if (softs->ipf_sync_num == 0) {
   1030 		softs->ipf_sync_num = 1;
   1031 		softs->ipf_sync_wrap++;
   1032 	}
   1033 
   1034 	/*
   1035 	 * Use the synch number of the object as the hash key.  Should end up
   1036 	 * with relatively even distribution over time.
   1037 	 * XXX - an attacker could lunch an DoS attack, of sorts, if they are
   1038 	 * the only one causing new table entries by only keeping open every
   1039 	 * nth connection they make, where n is a value in the interval
   1040 	 * [0, SYNC_STATETABSZ-1].
   1041 	 */
   1042 	switch (tab)
   1043 	{
   1044 	case SMC_STATE :
   1045 		hv = softs->ipf_sync_num & (softs->ipf_sync_state_tab_sz - 1);
   1046 		while (softs->ipf_sync_wrap != 0) {
   1047 			for (ss = softs->syncstatetab[hv]; ss; ss = ss->sl_next)
   1048 				if (ss->sl_hdr.sm_num == softs->ipf_sync_num)
   1049 					break;
   1050 			if (ss == NULL)
   1051 				break;
   1052 			softs->ipf_sync_num++;
   1053 			hv = softs->ipf_sync_num &
   1054 			     (softs->ipf_sync_state_tab_sz - 1);
   1055 		}
   1056 		sl->sl_pnext = softs->syncstatetab + hv;
   1057 		sl->sl_next = softs->syncstatetab[hv];
   1058 		softs->syncstatetab[hv] = sl;
   1059 		break;
   1060 
   1061 	case SMC_NAT :
   1062 		hv = softs->ipf_sync_num & (softs->ipf_sync_nat_tab_sz - 1);
   1063 		while (softs->ipf_sync_wrap != 0) {
   1064 			for (ss = softs->syncnattab[hv]; ss; ss = ss->sl_next)
   1065 				if (ss->sl_hdr.sm_num == softs->ipf_sync_num)
   1066 					break;
   1067 			if (ss == NULL)
   1068 				break;
   1069 			softs->ipf_sync_num++;
   1070 			hv = softs->ipf_sync_num &
   1071 			     (softs->ipf_sync_nat_tab_sz - 1);
   1072 		}
   1073 		sl->sl_pnext = softs->syncnattab + hv;
   1074 		sl->sl_next = softs->syncnattab[hv];
   1075 		softs->syncnattab[hv] = sl;
   1076 		break;
   1077 
   1078 	default :
   1079 		break;
   1080 	}
   1081 
   1082 	sl->sl_num = softs->ipf_sync_num;
   1083 	MUTEX_EXIT(&softs->ipf_syncadd);
   1084 
   1085 	sl->sl_magic = htonl(SYNHDRMAGIC);
   1086 	sl->sl_v = fin->fin_v;
   1087 	sl->sl_p = fin->fin_p;
   1088 	sl->sl_cmd = SMC_CREATE;
   1089 	sl->sl_idx = -1;
   1090 	sl->sl_table = tab;
   1091 	sl->sl_rev = fin->fin_rev;
   1092 	if (tab == SMC_STATE) {
   1093 		sl->sl_ips = ptr;
   1094 		sz = sizeof(*sl->sl_ips);
   1095 	} else if (tab == SMC_NAT) {
   1096 		sl->sl_ipn = ptr;
   1097 		sz = sizeof(*sl->sl_ipn);
   1098 	} else {
   1099 		ptr = NULL;
   1100 		sz = 0;
   1101 	}
   1102 	sl->sl_len = sz;
   1103 
   1104 	/*
   1105 	 * Create the log entry to be read by a user daemon.  When it has been
   1106 	 * finished and put on the queue, send a signal to wakeup any waiters.
   1107 	 */
   1108 	MUTEX_ENTER(&softs->ipf_syncadd);
   1109 	sle = softs->synclog + softs->sl_idx++;
   1110 	bcopy((char *)&sl->sl_hdr, (char *)&sle->sle_hdr,
   1111 	      sizeof(sle->sle_hdr));
   1112 	sle->sle_hdr.sm_num = htonl(sle->sle_hdr.sm_num);
   1113 	sle->sle_hdr.sm_len = htonl(sle->sle_hdr.sm_len);
   1114 	if (ptr != NULL) {
   1115 		bcopy((char *)ptr, (char *)&sle->sle_un, sz);
   1116 		if (tab == SMC_STATE) {
   1117 			ipf_sync_storder(1, &sle->sle_un.sleu_ips);
   1118 		} else if (tab == SMC_NAT) {
   1119 			ipf_sync_natorder(1, &sle->sle_un.sleu_ipn);
   1120 		}
   1121 	}
   1122 	MUTEX_EXIT(&softs->ipf_syncadd);
   1123 
   1124 	ipf_sync_wakeup(softc);
   1125 	return sl;
   1126 }
   1127 
   1128 
   1129 /* ------------------------------------------------------------------------ */
   1130 /* Function:    ipf_sync_update                                             */
   1131 /* Returns:     Nil                                                         */
   1132 /* Parameters:  tab(I) - type of synclist_t to create                       */
   1133 /*              fin(I) - pointer to packet information                      */
   1134 /*              sl(I)  - pointer to synchronisation object                  */
   1135 /*                                                                          */
   1136 /* For outbound packets, only, create an sync update record for the user    */
   1137 /* process to read.                                                         */
   1138 /* ------------------------------------------------------------------------ */
   1139 void
   1140 ipf_sync_update(ipf_main_softc_t *softc, int tab, fr_info_t *fin,
   1141     synclist_t *sl)
   1142 {
   1143 	ipf_sync_softc_t *softs = softc->ipf_sync_soft;
   1144 	synctcp_update_t *st;
   1145 	syncupdent_t *slu;
   1146 	ipstate_t *ips;
   1147 	nat_t *nat;
   1148 	ipfrwlock_t *lock;
   1149 
   1150 	if (fin->fin_out == 0 || sl == NULL)
   1151 		return;
   1152 
   1153 	if (tab == SMC_STATE) {
   1154 		lock = &softs->ipf_syncstate;
   1155 	} else {
   1156 		lock = &softs->ipf_syncnat;
   1157 	}
   1158 
   1159 	READ_ENTER(lock);
   1160 	if (sl->sl_idx == -1) {
   1161 		MUTEX_ENTER(&softs->ipf_syncadd);
   1162 		slu = softs->syncupd + softs->su_idx;
   1163 		sl->sl_idx = softs->su_idx++;
   1164 		MUTEX_EXIT(&softs->ipf_syncadd);
   1165 
   1166 		bcopy((char *)&sl->sl_hdr, (char *)&slu->sup_hdr,
   1167 		      sizeof(slu->sup_hdr));
   1168 		slu->sup_hdr.sm_magic = htonl(SYNHDRMAGIC);
   1169 		slu->sup_hdr.sm_sl = sl;
   1170 		slu->sup_hdr.sm_cmd = SMC_UPDATE;
   1171 		slu->sup_hdr.sm_table = tab;
   1172 		slu->sup_hdr.sm_num = htonl(sl->sl_num);
   1173 		slu->sup_hdr.sm_len = htonl(sizeof(struct synctcp_update));
   1174 		slu->sup_hdr.sm_rev = fin->fin_rev;
   1175 # if 0
   1176 		if (fin->fin_p == IPPROTO_TCP) {
   1177 			st->stu_len[0] = 0;
   1178 			st->stu_len[1] = 0;
   1179 		}
   1180 # endif
   1181 	} else
   1182 		slu = softs->syncupd + sl->sl_idx;
   1183 
   1184 	/*
   1185 	 * Only TCP has complex timeouts, others just use default timeouts.
   1186 	 * For TCP, we only need to track the connection state and window.
   1187 	 */
   1188 	if (fin->fin_p == IPPROTO_TCP) {
   1189 		st = &slu->sup_tcp;
   1190 		if (tab == SMC_STATE) {
   1191 			ips = sl->sl_ips;
   1192 			st->stu_age = htonl(ips->is_die);
   1193 			st->stu_data[0].td_end = ips->is_send;
   1194 			st->stu_data[0].td_maxend = ips->is_maxsend;
   1195 			st->stu_data[0].td_maxwin = ips->is_maxswin;
   1196 			st->stu_state[0] = ips->is_state[0];
   1197 			st->stu_data[1].td_end = ips->is_dend;
   1198 			st->stu_data[1].td_maxend = ips->is_maxdend;
   1199 			st->stu_data[1].td_maxwin = ips->is_maxdwin;
   1200 			st->stu_state[1] = ips->is_state[1];
   1201 		} else if (tab == SMC_NAT) {
   1202 			nat = sl->sl_ipn;
   1203 			st->stu_age = htonl(nat->nat_age);
   1204 		}
   1205 	}
   1206 	RWLOCK_EXIT(lock);
   1207 
   1208 	ipf_sync_wakeup(softc);
   1209 }
   1210 
   1211 
   1212 /* ------------------------------------------------------------------------ */
   1213 /* Function:    ipf_sync_flush_table                                        */
   1214 /* Returns:     int - number of entries freed by flushing table             */
   1215 /* Parameters:  tabsize(I) - size of the array pointed to by table          */
   1216 /*              table(I)   - pointer to sync table to empty                 */
   1217 /*                                                                          */
   1218 /* Walk through a table of sync entries and free each one.  It is assumed   */
   1219 /* that some lock is held so that nobody else tries to access the table     */
   1220 /* during this cleanup.                                                     */
   1221 /* ------------------------------------------------------------------------ */
   1222 static int
   1223 ipf_sync_flush_table(ipf_sync_softc_t *softs, int tabsize, synclist_t **table)
   1224 {
   1225 	synclist_t *sl;
   1226 	int i, items;
   1227 
   1228 	items = 0;
   1229 
   1230 	for (i = 0; i < tabsize; i++) {
   1231 		while ((sl = table[i]) != NULL) {
   1232 			switch (sl->sl_table) {
   1233 			case SMC_STATE :
   1234 				if (sl->sl_ips != NULL)
   1235 					sl->sl_ips->is_sync = NULL;
   1236 				break;
   1237 			case SMC_NAT :
   1238 				if (sl->sl_ipn != NULL)
   1239 					sl->sl_ipn->nat_sync = NULL;
   1240 				break;
   1241 			}
   1242 			if (sl->sl_next != NULL)
   1243 				sl->sl_next->sl_pnext = sl->sl_pnext;
   1244 			table[i] = sl->sl_next;
   1245 			if (sl->sl_idx != -1)
   1246 				softs->syncupd[sl->sl_idx].sup_hdr.sm_sl = NULL;
   1247 			KFREE(sl);
   1248 			items++;
   1249 		}
   1250 	}
   1251 
   1252 	return items;
   1253 }
   1254 
   1255 
   1256 /* ------------------------------------------------------------------------ */
   1257 /* Function:    ipf_sync_ioctl                                              */
   1258 /* Returns:     int - 0 == success, != 0 == failure                         */
   1259 /* Parameters:  data(I) - pointer to ioctl data                             */
   1260 /*              cmd(I)  - ioctl command integer                             */
   1261 /*              mode(I) - file mode bits used with open                     */
   1262 /*                                                                          */
   1263 /* This function currently does not handle any ioctls and so just returns   */
   1264 /* EINVAL on all occasions.                                                 */
   1265 /* ------------------------------------------------------------------------ */
   1266 int
   1267 ipf_sync_ioctl(ipf_main_softc_t *softc, void *data, ioctlcmd_t cmd, int mode,
   1268     int uid, void *ctx)
   1269 {
   1270 	ipf_sync_softc_t *softs = softc->ipf_sync_soft;
   1271 	int error, i;
   1272 	SPL_INT(s);
   1273 
   1274 	switch (cmd)
   1275 	{
   1276         case SIOCIPFFL:
   1277 		error = BCOPYIN(data, &i, sizeof(i));
   1278 		if (error != 0) {
   1279 			IPFERROR(110023);
   1280 			error = EFAULT;
   1281 			break;
   1282 		}
   1283 
   1284 		switch (i)
   1285 		{
   1286 		case SMC_RLOG :
   1287 			SPL_NET(s);
   1288 			MUTEX_ENTER(&softs->ipsl_mutex);
   1289 			i = (softs->sl_tail - softs->sl_idx) +
   1290 			    (softs->su_tail - softs->su_idx);
   1291 			softs->sl_idx = 0;
   1292 			softs->su_idx = 0;
   1293 			softs->sl_tail = 0;
   1294 			softs->su_tail = 0;
   1295 			MUTEX_EXIT(&softs->ipsl_mutex);
   1296 			SPL_X(s);
   1297 			break;
   1298 
   1299 		case SMC_NAT :
   1300 			SPL_NET(s);
   1301 			WRITE_ENTER(&softs->ipf_syncnat);
   1302 			i = ipf_sync_flush_table(softs, SYNC_NATTABSZ,
   1303 						 softs->syncnattab);
   1304 			RWLOCK_EXIT(&softs->ipf_syncnat);
   1305 			SPL_X(s);
   1306 			break;
   1307 
   1308 		case SMC_STATE :
   1309 			SPL_NET(s);
   1310 			WRITE_ENTER(&softs->ipf_syncstate);
   1311 			i = ipf_sync_flush_table(softs, SYNC_STATETABSZ,
   1312 						 softs->syncstatetab);
   1313 			RWLOCK_EXIT(&softs->ipf_syncstate);
   1314 			SPL_X(s);
   1315 			break;
   1316 		}
   1317 
   1318 		error = BCOPYOUT(&i, data, sizeof(i));
   1319 		if (error != 0) {
   1320 			IPFERROR(110022);
   1321 			error = EFAULT;
   1322 		}
   1323 		break;
   1324 
   1325 	default :
   1326 		IPFERROR(110021);
   1327 		error = EINVAL;
   1328 		break;
   1329 	}
   1330 
   1331 	return error;
   1332 }
   1333 
   1334 
   1335 /* ------------------------------------------------------------------------ */
   1336 /* Function:    ipf_sync_canread                                            */
   1337 /* Returns:     int - 0 == success, != 0 == failure                         */
   1338 /* Parameters:  Nil                                                         */
   1339 /*                                                                          */
   1340 /* This function provides input to the poll handler about whether or not    */
   1341 /* there is data waiting to be read from the /dev/ipsync device.            */
   1342 /* ------------------------------------------------------------------------ */
   1343 int
   1344 ipf_sync_canread(void *arg)
   1345 {
   1346 	ipf_sync_softc_t *softs = arg;
   1347 	return !((softs->sl_tail == softs->sl_idx) &&
   1348 		 (softs->su_tail == softs->su_idx));
   1349 }
   1350 
   1351 
   1352 /* ------------------------------------------------------------------------ */
   1353 /* Function:    ipf_sync_canwrite                                           */
   1354 /* Returns:     int - 1 == can always write                                 */
   1355 /* Parameters:  Nil                                                         */
   1356 /*                                                                          */
   1357 /* This function lets the poll handler know that it is always ready willing */
   1358 /* to accept write events.                                                  */
   1359 /* XXX Maybe this should return false if the sync table is full?            */
   1360 /* ------------------------------------------------------------------------ */
   1361 int
   1362 ipf_sync_canwrite(void *arg)
   1363 {
   1364 	return 1;
   1365 }
   1366 
   1367 
   1368 /* ------------------------------------------------------------------------ */
   1369 /* Function:    ipf_sync_wakeup                                             */
   1370 /* Parameters:  Nil                                                         */
   1371 /* Returns:     Nil                                                         */
   1372 /*                                                                          */
   1373 /* This function implements the heuristics that decide how often to         */
   1374 /* generate a poll wakeup for programs that are waiting for information     */
   1375 /* about when they can do a read on /dev/ipsync.                            */
   1376 /*                                                                          */
   1377 /* There are three different considerations here:                           */
   1378 /* - do not keep a program waiting too long: ipf_sync_wake_interval is the  */
   1379 /*   maximum number of ipf ticks to let pass by;                            */
   1380 /* - do not let the queue of ouststanding things to generate notifies for   */
   1381 /*   get too full (ipf_sync_queue_high_wm is the high water mark);          */
   1382 /* - do not let too many events get collapsed in before deciding that the   */
   1383 /*   other host(s) need an update (ipf_sync_event_high_wm is the high water */
   1384 /*   mark for this counter.)                                                */
   1385 /* ------------------------------------------------------------------------ */
   1386 static void
   1387 ipf_sync_wakeup(ipf_main_softc_t *softc)
   1388 {
   1389 	ipf_sync_softc_t *softs = softc->ipf_sync_soft;
   1390 
   1391 	softs->ipf_sync_events++;
   1392 	if ((softc->ipf_ticks >
   1393 	    softs->ipf_sync_lastwakeup + softs->ipf_sync_wake_interval) ||
   1394 	    (softs->ipf_sync_events > softs->ipf_sync_event_high_wm) ||
   1395 	    ((softs->sl_tail - softs->sl_idx) >
   1396 	     softs->ipf_sync_queue_high_wm) ||
   1397 	    ((softs->su_tail - softs->su_idx) >
   1398 	     softs->ipf_sync_queue_high_wm)) {
   1399 
   1400 		ipf_sync_poll_wakeup(softc);
   1401 	}
   1402 }
   1403 
   1404 
   1405 /* ------------------------------------------------------------------------ */
   1406 /* Function:    ipf_sync_poll_wakeup                                        */
   1407 /* Parameters:  Nil                                                         */
   1408 /* Returns:     Nil                                                         */
   1409 /*                                                                          */
   1410 /* Deliver a poll wakeup and reset counters for two of the three heuristics */
   1411 /* ------------------------------------------------------------------------ */
   1412 static void
   1413 ipf_sync_poll_wakeup(ipf_main_softc_t *softc)
   1414 {
   1415 	ipf_sync_softc_t *softs = softc->ipf_sync_soft;
   1416 
   1417 	softs->ipf_sync_events = 0;
   1418 	softs->ipf_sync_lastwakeup = softc->ipf_ticks;
   1419 
   1420 # ifdef _KERNEL
   1421 #  if SOLARIS
   1422 	MUTEX_ENTER(&softs->ipsl_mutex);
   1423 	cv_signal(&softs->ipslwait);
   1424 	MUTEX_EXIT(&softs->ipsl_mutex);
   1425 	pollwakeup(&softc->ipf_poll_head[IPL_LOGSYNC], POLLIN|POLLRDNORM);
   1426 #  else
   1427 	WAKEUP(&softs->sl_tail, 0);
   1428 	POLLWAKEUP(IPL_LOGSYNC);
   1429 #  endif
   1430 # endif
   1431 }
   1432 
   1433 
   1434 /* ------------------------------------------------------------------------ */
   1435 /* Function:    ipf_sync_expire                                             */
   1436 /* Parameters:  Nil                                                         */
   1437 /* Returns:     Nil                                                         */
   1438 /*                                                                          */
   1439 /* This is the function called even ipf_tick.  It implements one of the     */
   1440 /* three heuristics above *IF* there are events waiting.                    */
   1441 /* ------------------------------------------------------------------------ */
   1442 void
   1443 ipf_sync_expire(ipf_main_softc_t *softc)
   1444 {
   1445 	ipf_sync_softc_t *softs = softc->ipf_sync_soft;
   1446 
   1447 	if ((softs->ipf_sync_events > 0) &&
   1448 	    (softc->ipf_ticks >
   1449 	     softs->ipf_sync_lastwakeup + softs->ipf_sync_wake_interval)) {
   1450 		ipf_sync_poll_wakeup(softc);
   1451 	}
   1452 }
   1453