Home | History | Annotate | Line # | Download | only in netinet
ip_sync.c revision 1.1
      1 /*	$NetBSD: ip_sync.c,v 1.1 2012/03/23 20:37:04 christos Exp $	*/
      2 
      3 /*
      4  * Copyright (C) 2011 by Darren Reed.
      5  *
      6  * See the IPFILTER.LICENCE file for details on licencing.
      7  */
      8 #if defined(KERNEL) || defined(_KERNEL)
      9 # undef KERNEL
     10 # undef _KERNEL
     11 # define        KERNEL	1
     12 # define        _KERNEL	1
     13 #endif
     14 #include <sys/errno.h>
     15 #include <sys/types.h>
     16 #include <sys/param.h>
     17 #include <sys/file.h>
     18 #if !defined(_KERNEL) && !defined(__KERNEL__)
     19 # include <stdio.h>
     20 # include <stdlib.h>
     21 # include <string.h>
     22 # define _KERNEL
     23 # define KERNEL
     24 # ifdef __OpenBSD__
     25 struct file;
     26 # endif
     27 # include <sys/uio.h>
     28 # undef _KERNEL
     29 # undef KERNEL
     30 #else
     31 # include <sys/systm.h>
     32 # if !defined(__SVR4) && !defined(__svr4__)
     33 #  include <sys/mbuf.h>
     34 # endif
     35 # include <sys/select.h>
     36 # if __FreeBSD_version >= 500000
     37 #  include <sys/selinfo.h>
     38 # endif
     39 #endif
     40 #if defined(__NetBSD__) && (__NetBSD_Version__ >= 104000000)
     41 # include <sys/proc.h>
     42 #endif
     43 #if defined(_KERNEL) && (__FreeBSD_version >= 220000)
     44 # include <sys/filio.h>
     45 # include <sys/fcntl.h>
     46 #else
     47 # include <sys/ioctl.h>
     48 #endif
     49 #include <sys/time.h>
     50 #if !defined(linux)
     51 # include <sys/protosw.h>
     52 #endif
     53 #include <sys/socket.h>
     54 #if defined(__SVR4) || defined(__svr4__)
     55 # include <sys/filio.h>
     56 # include <sys/byteorder.h>
     57 # ifdef _KERNEL
     58 #  include <sys/dditypes.h>
     59 # endif
     60 # include <sys/stream.h>
     61 # include <sys/kmem.h>
     62 #endif
     63 
     64 #include <net/if.h>
     65 #ifdef sun
     66 # include <net/af.h>
     67 #endif
     68 #include <netinet/in.h>
     69 #include <netinet/in_systm.h>
     70 #include <netinet/ip.h>
     71 #include <netinet/tcp.h>
     72 #if !defined(linux)
     73 # include <netinet/ip_var.h>
     74 #endif
     75 #if !defined(__hpux) && !defined(linux)
     76 # include <netinet/tcp_fsm.h>
     77 #endif
     78 #include <netinet/udp.h>
     79 #include <netinet/ip_icmp.h>
     80 #include "netinet/ip_compat.h"
     81 #include <netinet/tcpip.h>
     82 #include "netinet/ip_fil.h"
     83 #include "netinet/ip_nat.h"
     84 #include "netinet/ip_frag.h"
     85 #include "netinet/ip_state.h"
     86 #include "netinet/ip_proxy.h"
     87 #include "netinet/ip_sync.h"
     88 #ifdef  USE_INET6
     89 #include <netinet/icmp6.h>
     90 #endif
     91 #if (__FreeBSD_version >= 300000)
     92 # include <sys/malloc.h>
     93 # if defined(_KERNEL) && !defined(IPFILTER_LKM)
     94 #  include <sys/libkern.h>
     95 #  include <sys/systm.h>
     96 # endif
     97 #endif
     98 /* END OF INCLUDES */
     99 
    100 #if !defined(lint)
    101 static const char rcsid[] = "@(#)Id";
    102 #endif
    103 
    104 #define	SYNC_STATETABSZ	256
    105 #define	SYNC_NATTABSZ	256
    106 
    107 typedef struct ipf_sync_softc_s {
    108 	ipfmutex_t	ipf_syncadd;
    109 	ipfmutex_t	ipsl_mutex;
    110 	ipfrwlock_t	ipf_syncstate;
    111 	ipfrwlock_t	ipf_syncnat;
    112 #if SOLARIS && defined(_KERNEL)
    113 	kcondvar_t	ipslwait;
    114 #endif
    115 #if defined(linux) && defined(_KERNEL)
    116 	wait_queue_head_t	sl_tail_linux;
    117 #endif
    118 	synclist_t	**syncstatetab;
    119 	synclist_t	**syncnattab;
    120 	synclogent_t	*synclog;
    121 	syncupdent_t	*syncupd;
    122 	u_int		ipf_sync_num;
    123 	u_int		ipf_sync_wrap;
    124 	u_int		sl_idx;		/* next available sync log entry */
    125 	u_int		su_idx;		/* next available sync update entry */
    126 	u_int		sl_tail;	/* next sync log entry to read */
    127 	u_int		su_tail;	/* next sync update entry to read */
    128 	int		ipf_sync_log_sz;
    129 	int		ipf_sync_nat_tab_sz;
    130 	int		ipf_sync_state_tab_sz;
    131 	int		ipf_sync_debug;
    132 	int		ipf_sync_events;
    133 	u_32_t		ipf_sync_lastwakeup;
    134 	int		ipf_sync_wake_interval;
    135 	int		ipf_sync_event_high_wm;
    136 	int		ipf_sync_queue_high_wm;
    137 	int		ipf_sync_inited;
    138 } ipf_sync_softc_t;
    139 
    140 static int ipf_sync_flush_table __P((ipf_sync_softc_t *, int, synclist_t **));
    141 static void ipf_sync_wakeup __P((ipf_main_softc_t *));
    142 static void ipf_sync_del __P((ipf_sync_softc_t *, synclist_t *));
    143 static void ipf_sync_poll_wakeup __P((ipf_main_softc_t *));
    144 static int ipf_sync_nat __P((ipf_main_softc_t *, synchdr_t *, void *));
    145 static int ipf_sync_state __P((ipf_main_softc_t *, synchdr_t *, void *));
    146 
    147 # if !defined(sparc) && !defined(__hppa)
    148 void ipf_sync_tcporder __P((int, struct tcpdata *));
    149 void ipf_sync_natorder __P((int, struct nat *));
    150 void ipf_sync_storder __P((int, struct ipstate *));
    151 # endif
    152 
    153 
    154 void *
    155 ipf_sync_soft_create(softc)
    156 	ipf_main_softc_t *softc;
    157 {
    158 	ipf_sync_softc_t *softs;
    159 
    160 	KMALLOC(softs, ipf_sync_softc_t *);
    161 	if (softs == NULL)
    162 		return NULL;
    163 
    164 	bzero((char *)softs, sizeof(*softs));
    165 
    166 	softs->ipf_sync_log_sz = SYNCLOG_SZ;
    167 	softs->ipf_sync_nat_tab_sz = SYNC_STATETABSZ;
    168 	softs->ipf_sync_state_tab_sz = SYNC_STATETABSZ;
    169 	softs->ipf_sync_event_high_wm = SYNCLOG_SZ * 100 / 90;	/* 90% */
    170 	softs->ipf_sync_queue_high_wm = SYNCLOG_SZ * 100 / 90;	/* 90% */
    171 
    172 	return softs;
    173 }
    174 
    175 
    176 /* ------------------------------------------------------------------------ */
    177 /* Function:    ipf_sync_init                                               */
    178 /* Returns:     int - 0 == success, -1 == failure                           */
    179 /* Parameters:  Nil                                                         */
    180 /*                                                                          */
    181 /* Initialise all of the locks required for the sync code and initialise    */
    182 /* any data structures, as required.                                        */
    183 /* ------------------------------------------------------------------------ */
    184 int
    185 ipf_sync_soft_init(softc, arg)
    186 	ipf_main_softc_t *softc;
    187 	void *arg;
    188 {
    189 	ipf_sync_softc_t *softs = arg;
    190 
    191 	KMALLOCS(softs->synclog, synclogent_t *,
    192 		 softs->ipf_sync_log_sz * sizeof(*softs->synclog));
    193 	if (softs->synclog == NULL)
    194 		return -1;
    195 	bzero((char *)softs->synclog,
    196 	      softs->ipf_sync_log_sz * sizeof(*softs->synclog));
    197 
    198 	KMALLOCS(softs->syncupd, syncupdent_t *,
    199 		 softs->ipf_sync_log_sz * sizeof(*softs->syncupd));
    200 	if (softs->syncupd == NULL)
    201 		return -2;
    202 	bzero((char *)softs->syncupd,
    203 	      softs->ipf_sync_log_sz * sizeof(*softs->syncupd));
    204 
    205 	KMALLOCS(softs->syncstatetab, synclist_t **,
    206 		 softs->ipf_sync_state_tab_sz * sizeof(*softs->syncstatetab));
    207 	if (softs->syncstatetab == NULL)
    208 		return -3;
    209 	bzero((char *)softs->syncstatetab,
    210 	      softs->ipf_sync_state_tab_sz * sizeof(*softs->syncstatetab));
    211 
    212 	KMALLOCS(softs->syncnattab, synclist_t **,
    213 		 softs->ipf_sync_nat_tab_sz * sizeof(*softs->syncnattab));
    214 	if (softs->syncnattab == NULL)
    215 		return -3;
    216 	bzero((char *)softs->syncnattab,
    217 	      softs->ipf_sync_nat_tab_sz * sizeof(*softs->syncnattab));
    218 
    219 	softs->ipf_sync_num = 1;
    220 	softs->ipf_sync_wrap = 0;
    221 	softs->sl_idx = 0;
    222 	softs->su_idx = 0;
    223 	softs->sl_tail = 0;
    224 	softs->su_tail = 0;
    225 	softs->ipf_sync_events = 0;
    226 	softs->ipf_sync_lastwakeup = 0;
    227 
    228 
    229 # if SOLARIS && defined(_KERNEL)
    230 	cv_init(&softs->ipslwait, "ipsl condvar", CV_DRIVER, NULL);
    231 # endif
    232 	RWLOCK_INIT(&softs->ipf_syncstate, "add things to state sync table");
    233 	RWLOCK_INIT(&softs->ipf_syncnat, "add things to nat sync table");
    234 	MUTEX_INIT(&softs->ipf_syncadd, "add things to sync table");
    235 	MUTEX_INIT(&softs->ipsl_mutex, "read ring lock");
    236 
    237 	softs->ipf_sync_inited = 1;
    238 
    239 	return 0;
    240 }
    241 
    242 
    243 /* ------------------------------------------------------------------------ */
    244 /* Function:    ipf_sync_unload                                             */
    245 /* Returns:     int - 0 == success, -1 == failure                           */
    246 /* Parameters:  Nil                                                         */
    247 /*                                                                          */
    248 /* Destroy the locks created when initialising and free any memory in use   */
    249 /* with the synchronisation tables.                                         */
    250 /* ------------------------------------------------------------------------ */
    251 int
    252 ipf_sync_soft_fini(softc, arg)
    253 	ipf_main_softc_t *softc;
    254 	void *arg;
    255 {
    256 	ipf_sync_softc_t *softs = arg;
    257 
    258 	if (softs->syncnattab != NULL) {
    259 		ipf_sync_flush_table(softs, softs->ipf_sync_nat_tab_sz,
    260 				     softs->syncnattab);
    261 		KFREES(softs->syncnattab,
    262 		       softs->ipf_sync_nat_tab_sz * sizeof(*softs->syncnattab));
    263 		softs->syncnattab = NULL;
    264 	}
    265 
    266 	if (softs->syncstatetab != NULL) {
    267 		ipf_sync_flush_table(softs, softs->ipf_sync_state_tab_sz,
    268 				     softs->syncstatetab);
    269 		KFREES(softs->syncstatetab,
    270 		       softs->ipf_sync_state_tab_sz *
    271 		       sizeof(*softs->syncstatetab));
    272 		softs->syncstatetab = NULL;
    273 	}
    274 
    275 	if (softs->syncupd != NULL) {
    276 		KFREES(softs->syncupd,
    277 		       softs->ipf_sync_log_sz * sizeof(*softs->syncupd));
    278 		softs->syncupd = NULL;
    279 	}
    280 
    281 	if (softs->synclog != NULL) {
    282 		KFREES(softs->synclog,
    283 		       softs->ipf_sync_log_sz * sizeof(*softs->synclog));
    284 		softs->synclog = NULL;
    285 	}
    286 
    287 	if (softs->ipf_sync_inited == 1) {
    288 		MUTEX_DESTROY(&softs->ipsl_mutex);
    289 		MUTEX_DESTROY(&softs->ipf_syncadd);
    290 		RW_DESTROY(&softs->ipf_syncnat);
    291 		RW_DESTROY(&softs->ipf_syncstate);
    292 		softs->ipf_sync_inited = 0;
    293 	}
    294 
    295 	return 0;
    296 }
    297 
    298 void
    299 ipf_sync_soft_destroy(softc, arg)
    300 	ipf_main_softc_t *softc;
    301 	void *arg;
    302 {
    303 	ipf_sync_softc_t *softs = arg;
    304 
    305 	KFREE(softs);
    306 }
    307 
    308 
    309 # if !defined(sparc) && !defined(__hppa)
    310 /* ------------------------------------------------------------------------ */
    311 /* Function:    ipf_sync_tcporder                                           */
    312 /* Returns:     Nil                                                         */
    313 /* Parameters:  way(I) - direction of byte order conversion.                */
    314 /*              td(IO) - pointer to data to be converted.                   */
    315 /*                                                                          */
    316 /* Do byte swapping on values in the TCP state information structure that   */
    317 /* need to be used at both ends by the host in their native byte order.     */
    318 /* ------------------------------------------------------------------------ */
    319 void
    320 ipf_sync_tcporder(way, td)
    321 	int way;
    322 	tcpdata_t *td;
    323 {
    324 	if (way) {
    325 		td->td_maxwin = htons(td->td_maxwin);
    326 		td->td_end = htonl(td->td_end);
    327 		td->td_maxend = htonl(td->td_maxend);
    328 	} else {
    329 		td->td_maxwin = ntohs(td->td_maxwin);
    330 		td->td_end = ntohl(td->td_end);
    331 		td->td_maxend = ntohl(td->td_maxend);
    332 	}
    333 }
    334 
    335 
    336 /* ------------------------------------------------------------------------ */
    337 /* Function:    ipf_sync_natorder                                           */
    338 /* Returns:     Nil                                                         */
    339 /* Parameters:  way(I)  - direction of byte order conversion.               */
    340 /*              nat(IO) - pointer to data to be converted.                  */
    341 /*                                                                          */
    342 /* Do byte swapping on values in the NAT data structure that need to be     */
    343 /* used at both ends by the host in their native byte order.                */
    344 /* ------------------------------------------------------------------------ */
    345 void
    346 ipf_sync_natorder(way, n)
    347 	int way;
    348 	nat_t *n;
    349 {
    350 	if (way) {
    351 		n->nat_age = htonl(n->nat_age);
    352 		n->nat_flags = htonl(n->nat_flags);
    353 		n->nat_ipsumd = htonl(n->nat_ipsumd);
    354 		n->nat_use = htonl(n->nat_use);
    355 		n->nat_dir = htonl(n->nat_dir);
    356 	} else {
    357 		n->nat_age = ntohl(n->nat_age);
    358 		n->nat_flags = ntohl(n->nat_flags);
    359 		n->nat_ipsumd = ntohl(n->nat_ipsumd);
    360 		n->nat_use = ntohl(n->nat_use);
    361 		n->nat_dir = ntohl(n->nat_dir);
    362 	}
    363 }
    364 
    365 
    366 /* ------------------------------------------------------------------------ */
    367 /* Function:    ipf_sync_storder                                            */
    368 /* Returns:     Nil                                                         */
    369 /* Parameters:  way(I)  - direction of byte order conversion.               */
    370 /*              ips(IO) - pointer to data to be converted.                  */
    371 /*                                                                          */
    372 /* Do byte swapping on values in the IP state data structure that need to   */
    373 /* be used at both ends by the host in their native byte order.             */
    374 /* ------------------------------------------------------------------------ */
    375 void
    376 ipf_sync_storder(way, ips)
    377 	int way;
    378 	ipstate_t *ips;
    379 {
    380 	ipf_sync_tcporder(way, &ips->is_tcp.ts_data[0]);
    381 	ipf_sync_tcporder(way, &ips->is_tcp.ts_data[1]);
    382 
    383 	if (way) {
    384 		ips->is_hv = htonl(ips->is_hv);
    385 		ips->is_die = htonl(ips->is_die);
    386 		ips->is_pass = htonl(ips->is_pass);
    387 		ips->is_flags = htonl(ips->is_flags);
    388 		ips->is_opt[0] = htonl(ips->is_opt[0]);
    389 		ips->is_opt[1] = htonl(ips->is_opt[1]);
    390 		ips->is_optmsk[0] = htonl(ips->is_optmsk[0]);
    391 		ips->is_optmsk[1] = htonl(ips->is_optmsk[1]);
    392 		ips->is_sec = htons(ips->is_sec);
    393 		ips->is_secmsk = htons(ips->is_secmsk);
    394 		ips->is_auth = htons(ips->is_auth);
    395 		ips->is_authmsk = htons(ips->is_authmsk);
    396 		ips->is_s0[0] = htonl(ips->is_s0[0]);
    397 		ips->is_s0[1] = htonl(ips->is_s0[1]);
    398 		ips->is_smsk[0] = htons(ips->is_smsk[0]);
    399 		ips->is_smsk[1] = htons(ips->is_smsk[1]);
    400 	} else {
    401 		ips->is_hv = ntohl(ips->is_hv);
    402 		ips->is_die = ntohl(ips->is_die);
    403 		ips->is_pass = ntohl(ips->is_pass);
    404 		ips->is_flags = ntohl(ips->is_flags);
    405 		ips->is_opt[0] = ntohl(ips->is_opt[0]);
    406 		ips->is_opt[1] = ntohl(ips->is_opt[1]);
    407 		ips->is_optmsk[0] = ntohl(ips->is_optmsk[0]);
    408 		ips->is_optmsk[1] = ntohl(ips->is_optmsk[1]);
    409 		ips->is_sec = ntohs(ips->is_sec);
    410 		ips->is_secmsk = ntohs(ips->is_secmsk);
    411 		ips->is_auth = ntohs(ips->is_auth);
    412 		ips->is_authmsk = ntohs(ips->is_authmsk);
    413 		ips->is_s0[0] = ntohl(ips->is_s0[0]);
    414 		ips->is_s0[1] = ntohl(ips->is_s0[1]);
    415 		ips->is_smsk[0] = ntohl(ips->is_smsk[0]);
    416 		ips->is_smsk[1] = ntohl(ips->is_smsk[1]);
    417 	}
    418 }
    419 # else /* !defined(sparc) && !defined(__hppa) */
    420 #  define	ipf_sync_tcporder(x,y)
    421 #  define	ipf_sync_natorder(x,y)
    422 #  define	ipf_sync_storder(x,y)
    423 # endif /* !defined(sparc) && !defined(__hppa) */
    424 
    425 
    426 /* ------------------------------------------------------------------------ */
    427 /* Function:    ipf_sync_write                                              */
    428 /* Returns:     int    - 0 == success, else error value.                    */
    429 /* Parameters:  uio(I) - pointer to information about data to write         */
    430 /*                                                                          */
    431 /* Moves data from user space into the kernel and uses it for updating data */
    432 /* structures in the state/NAT tables.                                      */
    433 /* ------------------------------------------------------------------------ */
    434 int
    435 ipf_sync_write(softc, uio)
    436 	ipf_main_softc_t *softc;
    437 	struct uio *uio;
    438 {
    439 	ipf_sync_softc_t *softs = softc->ipf_sync_soft;
    440 	synchdr_t sh;
    441 
    442 	/*
    443 	 * THIS MUST BE SUFFICIENT LARGE TO STORE
    444 	 * ANY POSSIBLE DATA TYPE
    445 	 */
    446 	char data[2048];
    447 
    448 	int err = 0;
    449 
    450 #  if BSD_GE_YEAR(199306) || defined(__FreeBSD__) || defined(__osf__)
    451 	uio->uio_rw = UIO_WRITE;
    452 #  endif
    453 
    454 	/* Try to get bytes */
    455 	while (uio->uio_resid > 0) {
    456 
    457 		if (uio->uio_resid >= sizeof(sh)) {
    458 
    459 			err = UIOMOVE(&sh, sizeof(sh), UIO_WRITE, uio);
    460 
    461 			if (err) {
    462 				if (softs->ipf_sync_debug > 2)
    463 					printf("uiomove(header) failed: %d\n",
    464 						err);
    465 				return err;
    466 			}
    467 
    468 			/* convert to host order */
    469 			sh.sm_magic = ntohl(sh.sm_magic);
    470 			sh.sm_len = ntohl(sh.sm_len);
    471 			sh.sm_num = ntohl(sh.sm_num);
    472 
    473 			if (softs->ipf_sync_debug > 8)
    474 				printf("[%d] Read v:%d p:%d cmd:%d table:%d rev:%d len:%d magic:%x\n",
    475 					sh.sm_num, sh.sm_v, sh.sm_p, sh.sm_cmd,
    476 					sh.sm_table, sh.sm_rev, sh.sm_len,
    477 					sh.sm_magic);
    478 
    479 			if (sh.sm_magic != SYNHDRMAGIC) {
    480 				if (softs->ipf_sync_debug > 2)
    481 					printf("uiomove(header) invalid %s\n",
    482 						"magic");
    483 				IPFERROR(110001);
    484 				return EINVAL;
    485 			}
    486 
    487 			if (sh.sm_v != 4 && sh.sm_v != 6) {
    488 				if (softs->ipf_sync_debug > 2)
    489 					printf("uiomove(header) invalid %s\n",
    490 						"protocol");
    491 				IPFERROR(110002);
    492 				return EINVAL;
    493 			}
    494 
    495 			if (sh.sm_cmd > SMC_MAXCMD) {
    496 				if (softs->ipf_sync_debug > 2)
    497 					printf("uiomove(header) invalid %s\n",
    498 						"command");
    499 				IPFERROR(110003);
    500 				return EINVAL;
    501 			}
    502 
    503 
    504 			if (sh.sm_table > SMC_MAXTBL) {
    505 				if (softs->ipf_sync_debug > 2)
    506 					printf("uiomove(header) invalid %s\n",
    507 						"table");
    508 				IPFERROR(110004);
    509 				return EINVAL;
    510 			}
    511 
    512 		} else {
    513 			/* unsufficient data, wait until next call */
    514 			if (softs->ipf_sync_debug > 2)
    515 				printf("uiomove(header) insufficient data");
    516 			IPFERROR(110005);
    517 			return EAGAIN;
    518 	 	}
    519 
    520 
    521 		/*
    522 		 * We have a header, so try to read the amount of data
    523 		 * needed for the request
    524 		 */
    525 
    526 		/* not supported */
    527 		if (sh.sm_len == 0) {
    528 			if (softs->ipf_sync_debug > 2)
    529 				printf("uiomove(data zero length %s\n",
    530 					"not supported");
    531 			IPFERROR(110006);
    532 			return EINVAL;
    533 		}
    534 
    535 		if (uio->uio_resid >= sh.sm_len) {
    536 
    537 			err = UIOMOVE(data, sh.sm_len, UIO_WRITE, uio);
    538 
    539 			if (err) {
    540 				if (softs->ipf_sync_debug > 2)
    541 					printf("uiomove(data) failed: %d\n",
    542 						err);
    543 				return err;
    544 			}
    545 
    546 			if (softs->ipf_sync_debug > 7)
    547 				printf("uiomove(data) %d bytes read\n",
    548 					sh.sm_len);
    549 
    550 			if (sh.sm_table == SMC_STATE)
    551 				err = ipf_sync_state(softc, &sh, data);
    552 			else if (sh.sm_table == SMC_NAT)
    553 				err = ipf_sync_nat(softc, &sh, data);
    554 			if (softs->ipf_sync_debug > 7)
    555 				printf("[%d] Finished with error %d\n",
    556 					sh.sm_num, err);
    557 
    558 		} else {
    559 			/* insufficient data, wait until next call */
    560 			if (softs->ipf_sync_debug > 2)
    561 				printf("uiomove(data) %s %d bytes, got %d\n",
    562 					"insufficient data, need",
    563 					sh.sm_len, uio->uio_resid);
    564 			IPFERROR(110007);
    565 			return EAGAIN;
    566 		}
    567 	}
    568 
    569 	/* no more data */
    570 	return 0;
    571 }
    572 
    573 
    574 /* ------------------------------------------------------------------------ */
    575 /* Function:    ipf_sync_read                                               */
    576 /* Returns:     int    - 0 == success, else error value.                    */
    577 /* Parameters:  uio(O) - pointer to information about where to store data   */
    578 /*                                                                          */
    579 /* This function is called when a user program wants to read some data      */
    580 /* for pending state/NAT updates.  If no data is available, the caller is   */
    581 /* put to sleep, pending a wakeup from the "lower half" of this code.       */
    582 /* ------------------------------------------------------------------------ */
    583 int
    584 ipf_sync_read(softc, uio)
    585 	ipf_main_softc_t *softc;
    586 	struct uio *uio;
    587 {
    588 	ipf_sync_softc_t *softs = softc->ipf_sync_soft;
    589 	syncupdent_t *su;
    590 	synclogent_t *sl;
    591 	int err = 0;
    592 
    593 	if ((uio->uio_resid & 3) || (uio->uio_resid < 8)) {
    594 		IPFERROR(110008);
    595 		return EINVAL;
    596 	}
    597 
    598 #  if BSD_GE_YEAR(199306) || defined(__FreeBSD__) || defined(__osf__)
    599 	uio->uio_rw = UIO_READ;
    600 #  endif
    601 
    602 	MUTEX_ENTER(&softs->ipsl_mutex);
    603 	while ((softs->sl_tail == softs->sl_idx) &&
    604 	       (softs->su_tail == softs->su_idx)) {
    605 #  if defined(_KERNEL)
    606 #   if SOLARIS
    607 		if (!cv_wait_sig(&softs->ipslwait, &softs->ipsl_mutex.ipf_lk)) {
    608 			MUTEX_EXIT(&softs->ipsl_mutex);
    609 			IPFERROR(110009);
    610 			return EINTR;
    611 		}
    612 #   else
    613 #    ifdef __hpux
    614 		{
    615 		lock_t *l;
    616 
    617 		l = get_sleep_lock(&softs->sl_tail);
    618 		err = sleep(&softs->sl_tail, PZERO+1);
    619 		if (err) {
    620 			MUTEX_EXIT(&softs->ipsl_mutex);
    621 			IPFERROR(110010);
    622 			return EINTR;
    623 		}
    624 		spinunlock(l);
    625 		}
    626 #    else /* __hpux */
    627 #     ifdef __osf__
    628 		err = mpsleep(&softs->sl_tail, PSUSP|PCATCH,  "ipl sleep", 0,
    629 			      &softs->ipsl_mutex, MS_LOCK_SIMPLE);
    630 		if (err) {
    631 			IPFERROR(110011);
    632 			return EINTR;
    633 		}
    634 #     else
    635 		MUTEX_EXIT(&softs->ipsl_mutex);
    636 		err = SLEEP(&softs->sl_tail, "ipl sleep");
    637 		if (err) {
    638 			IPFERROR(110012);
    639 			return EINTR;
    640 		}
    641 		MUTEX_ENTER(&softs->ipsl_mutex);
    642 #     endif /* __osf__ */
    643 #    endif /* __hpux */
    644 #   endif /* SOLARIS */
    645 #  endif /* _KERNEL */
    646 	}
    647 
    648 	while ((softs->sl_tail < softs->sl_idx) &&
    649 	       (uio->uio_resid > sizeof(*sl))) {
    650 		sl = softs->synclog + softs->sl_tail++;
    651 		MUTEX_EXIT(&softs->ipsl_mutex);
    652 		err = UIOMOVE(sl, sizeof(*sl), UIO_READ, uio);
    653 		if (err != 0)
    654 			goto goterror;
    655 		MUTEX_ENTER(&softs->ipsl_mutex);
    656 	}
    657 
    658 	while ((softs->su_tail < softs->su_idx) &&
    659 	       (uio->uio_resid > sizeof(*su))) {
    660 		su = softs->syncupd + softs->su_tail;
    661 		softs->su_tail++;
    662 		MUTEX_EXIT(&softs->ipsl_mutex);
    663 		err = UIOMOVE(su, sizeof(*su), UIO_READ, uio);
    664 		if (err != 0)
    665 			goto goterror;
    666 		MUTEX_ENTER(&softs->ipsl_mutex);
    667 		if (su->sup_hdr.sm_sl != NULL)
    668 			su->sup_hdr.sm_sl->sl_idx = -1;
    669 	}
    670 	if (softs->sl_tail == softs->sl_idx)
    671 		softs->sl_tail = softs->sl_idx = 0;
    672 	if (softs->su_tail == softs->su_idx)
    673 		softs->su_tail = softs->su_idx = 0;
    674 	MUTEX_EXIT(&softs->ipsl_mutex);
    675 goterror:
    676 	return err;
    677 }
    678 
    679 
    680 /* ------------------------------------------------------------------------ */
    681 /* Function:    ipf_sync_state                                              */
    682 /* Returns:     int    - 0 == success, else error value.                    */
    683 /* Parameters:  sp(I)  - pointer to sync packet data header                 */
    684 /*              uio(I) - pointer to user data for further information       */
    685 /*                                                                          */
    686 /* Updates the state table according to information passed in the sync      */
    687 /* header.  As required, more data is fetched from the uio structure but    */
    688 /* varies depending on the contents of the sync header.  This function can  */
    689 /* create a new state entry or update one.  Deletion is left to the state   */
    690 /* structures being timed out correctly.                                    */
    691 /* ------------------------------------------------------------------------ */
    692 static int
    693 ipf_sync_state(softc, sp, data)
    694 	ipf_main_softc_t *softc;
    695 	synchdr_t *sp;
    696 	void *data;
    697 {
    698 	ipf_sync_softc_t *softs = softc->ipf_sync_soft;
    699 	synctcp_update_t su;
    700 	ipstate_t *is, sn;
    701 	synclist_t *sl;
    702 	frentry_t *fr;
    703 	u_int hv;
    704 	int err = 0;
    705 
    706 	hv = sp->sm_num & (softs->ipf_sync_state_tab_sz - 1);
    707 
    708 	switch (sp->sm_cmd)
    709 	{
    710 	case SMC_CREATE :
    711 
    712 		bcopy(data, &sn, sizeof(sn));
    713 		KMALLOC(is, ipstate_t *);
    714 		if (is == NULL) {
    715 			IPFERROR(110013);
    716 			err = ENOMEM;
    717 			break;
    718 		}
    719 
    720 		KMALLOC(sl, synclist_t *);
    721 		if (sl == NULL) {
    722 			IPFERROR(110014);
    723 			err = ENOMEM;
    724 			KFREE(is);
    725 			break;
    726 		}
    727 
    728 		bzero((char *)is, offsetof(ipstate_t, is_die));
    729 		bcopy((char *)&sn.is_die, (char *)&is->is_die,
    730 		      sizeof(*is) - offsetof(ipstate_t, is_die));
    731 		ipf_sync_storder(0, is);
    732 
    733 		/*
    734 		 * We need to find the same rule on the slave as was used on
    735 		 * the master to create this state entry.
    736 		 */
    737 		READ_ENTER(&softc->ipf_mutex);
    738 		fr = ipf_getrulen(softc, IPL_LOGIPF, sn.is_group, sn.is_rulen);
    739 		if (fr != NULL) {
    740 			MUTEX_ENTER(&fr->fr_lock);
    741 			fr->fr_ref++;
    742 			fr->fr_statecnt++;
    743 			MUTEX_EXIT(&fr->fr_lock);
    744 		}
    745 		RWLOCK_EXIT(&softc->ipf_mutex);
    746 
    747 		if (softs->ipf_sync_debug > 4)
    748 			printf("[%d] Filter rules = %p\n", sp->sm_num, fr);
    749 
    750 		is->is_rule = fr;
    751 		is->is_sync = sl;
    752 
    753 		sl->sl_idx = -1;
    754 		sl->sl_ips = is;
    755 		bcopy(sp, &sl->sl_hdr, sizeof(struct synchdr));
    756 
    757 		WRITE_ENTER(&softs->ipf_syncstate);
    758 		WRITE_ENTER(&softc->ipf_state);
    759 
    760 		sl->sl_pnext = softs->syncstatetab + hv;
    761 		sl->sl_next = softs->syncstatetab[hv];
    762 		if (softs->syncstatetab[hv] != NULL)
    763 			softs->syncstatetab[hv]->sl_pnext = &sl->sl_next;
    764 		softs->syncstatetab[hv] = sl;
    765 		MUTEX_DOWNGRADE(&softs->ipf_syncstate);
    766 		ipf_state_insert(softc, is, sp->sm_rev);
    767 		/*
    768 		 * Do not initialise the interface pointers for the state
    769 		 * entry as the full complement of interface names may not
    770 		 * be present.
    771 		 *
    772 		 * Put this state entry on its timeout queue.
    773 		 */
    774 		/*fr_setstatequeue(is, sp->sm_rev);*/
    775 		break;
    776 
    777 	case SMC_UPDATE :
    778 		bcopy(data, &su, sizeof(su));
    779 
    780 		if (softs->ipf_sync_debug > 4)
    781 			printf("[%d] Update age %lu state %d/%d \n",
    782 				sp->sm_num, su.stu_age, su.stu_state[0],
    783 				su.stu_state[1]);
    784 
    785 		READ_ENTER(&softs->ipf_syncstate);
    786 		for (sl = softs->syncstatetab[hv]; (sl != NULL);
    787 		     sl = sl->sl_next)
    788 			if (sl->sl_hdr.sm_num == sp->sm_num)
    789 				break;
    790 		if (sl == NULL) {
    791 			if (softs->ipf_sync_debug > 1)
    792 				printf("[%d] State not found - can't update\n",
    793 					sp->sm_num);
    794 			RWLOCK_EXIT(&softs->ipf_syncstate);
    795 			IPFERROR(110015);
    796 			err = ENOENT;
    797 			break;
    798 		}
    799 
    800 		READ_ENTER(&softc->ipf_state);
    801 
    802 		if (softs->ipf_sync_debug > 6)
    803 			printf("[%d] Data from state v:%d p:%d cmd:%d table:%d rev:%d\n",
    804 				sp->sm_num, sl->sl_hdr.sm_v, sl->sl_hdr.sm_p,
    805 				sl->sl_hdr.sm_cmd, sl->sl_hdr.sm_table,
    806 				sl->sl_hdr.sm_rev);
    807 
    808 		is = sl->sl_ips;
    809 
    810 		MUTEX_ENTER(&is->is_lock);
    811 		switch (sp->sm_p)
    812 		{
    813 		case IPPROTO_TCP :
    814 			/* XXX FV --- shouldn't we do ntohl/htonl???? XXX */
    815 			is->is_send = su.stu_data[0].td_end;
    816 			is->is_maxsend = su.stu_data[0].td_maxend;
    817 			is->is_maxswin = su.stu_data[0].td_maxwin;
    818 			is->is_state[0] = su.stu_state[0];
    819 			is->is_dend = su.stu_data[1].td_end;
    820 			is->is_maxdend = su.stu_data[1].td_maxend;
    821 			is->is_maxdwin = su.stu_data[1].td_maxwin;
    822 			is->is_state[1] = su.stu_state[1];
    823 			break;
    824 		default :
    825 			break;
    826 		}
    827 
    828 		if (softs->ipf_sync_debug > 6)
    829 			printf("[%d] Setting timers for state\n", sp->sm_num);
    830 
    831 		ipf_state_setqueue(softc, is, sp->sm_rev);
    832 
    833 		MUTEX_EXIT(&is->is_lock);
    834 		break;
    835 
    836 	default :
    837 		IPFERROR(110016);
    838 		err = EINVAL;
    839 		break;
    840 	}
    841 
    842 	if (err == 0) {
    843 		RWLOCK_EXIT(&softc->ipf_state);
    844 		RWLOCK_EXIT(&softs->ipf_syncstate);
    845 	}
    846 
    847 	if (softs->ipf_sync_debug > 6)
    848 		printf("[%d] Update completed with error %d\n",
    849 			sp->sm_num, err);
    850 
    851 	return err;
    852 }
    853 
    854 
    855 /* ------------------------------------------------------------------------ */
    856 /* Function:    ipf_sync_del                                                */
    857 /* Returns:     Nil                                                         */
    858 /* Parameters:  sl(I) - pointer to synclist object to delete                */
    859 /*                                                                          */
    860 /* Deletes an object from the synclist.                                     */
    861 /* ------------------------------------------------------------------------ */
    862 static void
    863 ipf_sync_del(softs, sl)
    864 	ipf_sync_softc_t *softs;
    865 	synclist_t *sl;
    866 {
    867 	*sl->sl_pnext = sl->sl_next;
    868 	if (sl->sl_next != NULL)
    869 		sl->sl_next->sl_pnext = sl->sl_pnext;
    870 	if (sl->sl_idx != -1)
    871 		softs->syncupd[sl->sl_idx].sup_hdr.sm_sl = NULL;
    872 }
    873 
    874 
    875 /* ------------------------------------------------------------------------ */
    876 /* Function:    ipf_sync_del_state                                          */
    877 /* Returns:     Nil                                                         */
    878 /* Parameters:  sl(I) - pointer to synclist object to delete                */
    879 /*                                                                          */
    880 /* Deletes an object from the synclist state table and free's its memory.   */
    881 /* ------------------------------------------------------------------------ */
    882 void
    883 ipf_sync_del_state(arg, sl)
    884 	void *arg;
    885 	synclist_t *sl;
    886 {
    887 	ipf_sync_softc_t *softs = arg;
    888 
    889 	WRITE_ENTER(&softs->ipf_syncstate);
    890 	ipf_sync_del(softs, sl);
    891 	RWLOCK_EXIT(&softs->ipf_syncstate);
    892 	KFREE(sl);
    893 }
    894 
    895 
    896 /* ------------------------------------------------------------------------ */
    897 /* Function:    ipf_sync_del_nat                                            */
    898 /* Returns:     Nil                                                         */
    899 /* Parameters:  sl(I) - pointer to synclist object to delete                */
    900 /*                                                                          */
    901 /* Deletes an object from the synclist nat table and free's its memory.     */
    902 /* ------------------------------------------------------------------------ */
    903 void
    904 ipf_sync_del_nat(arg, sl)
    905 	void *arg;
    906 	synclist_t *sl;
    907 {
    908 	ipf_sync_softc_t *softs = arg;
    909 
    910 	WRITE_ENTER(&softs->ipf_syncnat);
    911 	ipf_sync_del(softs, sl);
    912 	RWLOCK_EXIT(&softs->ipf_syncnat);
    913 	KFREE(sl);
    914 }
    915 
    916 
    917 /* ------------------------------------------------------------------------ */
    918 /* Function:    ipf_sync_nat                                                */
    919 /* Returns:     int    - 0 == success, else error value.                    */
    920 /* Parameters:  sp(I)  - pointer to sync packet data header                 */
    921 /*              uio(I) - pointer to user data for further information       */
    922 /*                                                                          */
    923 /* Updates the NAT  table according to information passed in the sync       */
    924 /* header.  As required, more data is fetched from the uio structure but    */
    925 /* varies depending on the contents of the sync header.  This function can  */
    926 /* create a new NAT entry or update one.  Deletion is left to the NAT       */
    927 /* structures being timed out correctly.                                    */
    928 /* ------------------------------------------------------------------------ */
    929 static int
    930 ipf_sync_nat(softc, sp, data)
    931 	ipf_main_softc_t *softc;
    932 	synchdr_t *sp;
    933 	void *data;
    934 {
    935 	ipf_sync_softc_t *softs = softc->ipf_sync_soft;
    936 	syncupdent_t su;
    937 	nat_t *n, *nat;
    938 	synclist_t *sl;
    939 	u_int hv = 0;
    940 	int err;
    941 
    942 	READ_ENTER(&softs->ipf_syncnat);
    943 
    944 	switch (sp->sm_cmd)
    945 	{
    946 	case SMC_CREATE :
    947 		KMALLOC(n, nat_t *);
    948 		if (n == NULL) {
    949 			IPFERROR(110017);
    950 			err = ENOMEM;
    951 			break;
    952 		}
    953 
    954 		KMALLOC(sl, synclist_t *);
    955 		if (sl == NULL) {
    956 			IPFERROR(110018);
    957 			err = ENOMEM;
    958 			KFREE(n);
    959 			break;
    960 		}
    961 
    962 		nat = (nat_t *)data;
    963 		bzero((char *)n, offsetof(nat_t, nat_age));
    964 		bcopy((char *)&nat->nat_age, (char *)&n->nat_age,
    965 		      sizeof(*n) - offsetof(nat_t, nat_age));
    966 		ipf_sync_natorder(0, n);
    967 		n->nat_sync = sl;
    968 		n->nat_rev = sl->sl_rev;
    969 
    970 		sl->sl_idx = -1;
    971 		sl->sl_ipn = n;
    972 		sl->sl_num = ntohl(sp->sm_num);
    973 
    974 		WRITE_ENTER(&softc->ipf_nat);
    975 		sl->sl_pnext = softs->syncnattab + hv;
    976 		sl->sl_next = softs->syncnattab[hv];
    977 		if (softs->syncnattab[hv] != NULL)
    978 			softs->syncnattab[hv]->sl_pnext = &sl->sl_next;
    979 		softs->syncnattab[hv] = sl;
    980 		(void) ipf_nat_insert(softc, softc->ipf_nat_soft, n);
    981 		RWLOCK_EXIT(&softc->ipf_nat);
    982 		break;
    983 
    984 	case SMC_UPDATE :
    985 		bcopy(data, &su, sizeof(su));
    986 
    987 		for (sl = softs->syncnattab[hv]; (sl != NULL);
    988 		     sl = sl->sl_next)
    989 			if (sl->sl_hdr.sm_num == sp->sm_num)
    990 				break;
    991 		if (sl == NULL) {
    992 			IPFERROR(110019);
    993 			err = ENOENT;
    994 			break;
    995 		}
    996 
    997 		READ_ENTER(&softc->ipf_nat);
    998 
    999 		nat = sl->sl_ipn;
   1000 		nat->nat_rev = sl->sl_rev;
   1001 
   1002 		MUTEX_ENTER(&nat->nat_lock);
   1003 		ipf_nat_setqueue(softc, softc->ipf_nat_soft, nat);
   1004 		MUTEX_EXIT(&nat->nat_lock);
   1005 
   1006 		RWLOCK_EXIT(&softc->ipf_nat);
   1007 
   1008 		break;
   1009 
   1010 	default :
   1011 		IPFERROR(110020);
   1012 		err = EINVAL;
   1013 		break;
   1014 	}
   1015 
   1016 	RWLOCK_EXIT(&softs->ipf_syncnat);
   1017 	return 0;
   1018 }
   1019 
   1020 
   1021 /* ------------------------------------------------------------------------ */
   1022 /* Function:    ipf_sync_new                                                */
   1023 /* Returns:     synclist_t* - NULL == failure, else pointer to new synclist */
   1024 /*                            data structure.                               */
   1025 /* Parameters:  tab(I) - type of synclist_t to create                       */
   1026 /*              fin(I) - pointer to packet information                      */
   1027 /*              ptr(I) - pointer to owning object                           */
   1028 /*                                                                          */
   1029 /* Creates a new sync table entry and notifies any sleepers that it's there */
   1030 /* waiting to be processed.                                                 */
   1031 /* ------------------------------------------------------------------------ */
   1032 synclist_t *
   1033 ipf_sync_new(softc, tab, fin, ptr)
   1034 	ipf_main_softc_t *softc;
   1035 	int tab;
   1036 	fr_info_t *fin;
   1037 	void *ptr;
   1038 {
   1039 	ipf_sync_softc_t *softs = softc->ipf_sync_soft;
   1040 	synclist_t *sl, *ss;
   1041 	synclogent_t *sle;
   1042 	u_int hv, sz;
   1043 
   1044 	if (softs->sl_idx == softs->ipf_sync_log_sz)
   1045 		return NULL;
   1046 	KMALLOC(sl, synclist_t *);
   1047 	if (sl == NULL)
   1048 		return NULL;
   1049 
   1050 	MUTEX_ENTER(&softs->ipf_syncadd);
   1051 	/*
   1052 	 * Get a unique number for this synclist_t.  The number is only meant
   1053 	 * to be unique for the lifetime of the structure and may be reused
   1054 	 * later.
   1055 	 */
   1056 	softs->ipf_sync_num++;
   1057 	if (softs->ipf_sync_num == 0) {
   1058 		softs->ipf_sync_num = 1;
   1059 		softs->ipf_sync_wrap++;
   1060 	}
   1061 
   1062 	/*
   1063 	 * Use the synch number of the object as the hash key.  Should end up
   1064 	 * with relatively even distribution over time.
   1065 	 * XXX - an attacker could lunch an DoS attack, of sorts, if they are
   1066 	 * the only one causing new table entries by only keeping open every
   1067 	 * nth connection they make, where n is a value in the interval
   1068 	 * [0, SYNC_STATETABSZ-1].
   1069 	 */
   1070 	switch (tab)
   1071 	{
   1072 	case SMC_STATE :
   1073 		hv = softs->ipf_sync_num & (softs->ipf_sync_state_tab_sz - 1);
   1074 		while (softs->ipf_sync_wrap != 0) {
   1075 			for (ss = softs->syncstatetab[hv]; ss; ss = ss->sl_next)
   1076 				if (ss->sl_hdr.sm_num == softs->ipf_sync_num)
   1077 					break;
   1078 			if (ss == NULL)
   1079 				break;
   1080 			softs->ipf_sync_num++;
   1081 			hv = softs->ipf_sync_num &
   1082 			     (softs->ipf_sync_state_tab_sz - 1);
   1083 		}
   1084 		sl->sl_pnext = softs->syncstatetab + hv;
   1085 		sl->sl_next = softs->syncstatetab[hv];
   1086 		softs->syncstatetab[hv] = sl;
   1087 		break;
   1088 
   1089 	case SMC_NAT :
   1090 		hv = softs->ipf_sync_num & (softs->ipf_sync_nat_tab_sz - 1);
   1091 		while (softs->ipf_sync_wrap != 0) {
   1092 			for (ss = softs->syncnattab[hv]; ss; ss = ss->sl_next)
   1093 				if (ss->sl_hdr.sm_num == softs->ipf_sync_num)
   1094 					break;
   1095 			if (ss == NULL)
   1096 				break;
   1097 			softs->ipf_sync_num++;
   1098 			hv = softs->ipf_sync_num &
   1099 			     (softs->ipf_sync_nat_tab_sz - 1);
   1100 		}
   1101 		sl->sl_pnext = softs->syncnattab + hv;
   1102 		sl->sl_next = softs->syncnattab[hv];
   1103 		softs->syncnattab[hv] = sl;
   1104 		break;
   1105 
   1106 	default :
   1107 		break;
   1108 	}
   1109 
   1110 	sl->sl_num = softs->ipf_sync_num;
   1111 	MUTEX_EXIT(&softs->ipf_syncadd);
   1112 
   1113 	sl->sl_magic = htonl(SYNHDRMAGIC);
   1114 	sl->sl_v = fin->fin_v;
   1115 	sl->sl_p = fin->fin_p;
   1116 	sl->sl_cmd = SMC_CREATE;
   1117 	sl->sl_idx = -1;
   1118 	sl->sl_table = tab;
   1119 	sl->sl_rev = fin->fin_rev;
   1120 	if (tab == SMC_STATE) {
   1121 		sl->sl_ips = ptr;
   1122 		sz = sizeof(*sl->sl_ips);
   1123 	} else if (tab == SMC_NAT) {
   1124 		sl->sl_ipn = ptr;
   1125 		sz = sizeof(*sl->sl_ipn);
   1126 	} else {
   1127 		ptr = NULL;
   1128 		sz = 0;
   1129 	}
   1130 	sl->sl_len = sz;
   1131 
   1132 	/*
   1133 	 * Create the log entry to be read by a user daemon.  When it has been
   1134 	 * finished and put on the queue, send a signal to wakeup any waiters.
   1135 	 */
   1136 	MUTEX_ENTER(&softs->ipf_syncadd);
   1137 	sle = softs->synclog + softs->sl_idx++;
   1138 	bcopy((char *)&sl->sl_hdr, (char *)&sle->sle_hdr,
   1139 	      sizeof(sle->sle_hdr));
   1140 	sle->sle_hdr.sm_num = htonl(sle->sle_hdr.sm_num);
   1141 	sle->sle_hdr.sm_len = htonl(sle->sle_hdr.sm_len);
   1142 	if (ptr != NULL) {
   1143 		bcopy((char *)ptr, (char *)&sle->sle_un, sz);
   1144 		if (tab == SMC_STATE) {
   1145 			ipf_sync_storder(1, &sle->sle_un.sleu_ips);
   1146 		} else if (tab == SMC_NAT) {
   1147 			ipf_sync_natorder(1, &sle->sle_un.sleu_ipn);
   1148 		}
   1149 	}
   1150 	MUTEX_EXIT(&softs->ipf_syncadd);
   1151 
   1152 	ipf_sync_wakeup(softc);
   1153 	return sl;
   1154 }
   1155 
   1156 
   1157 /* ------------------------------------------------------------------------ */
   1158 /* Function:    ipf_sync_update                                             */
   1159 /* Returns:     Nil                                                         */
   1160 /* Parameters:  tab(I) - type of synclist_t to create                       */
   1161 /*              fin(I) - pointer to packet information                      */
   1162 /*              sl(I)  - pointer to synchronisation object                  */
   1163 /*                                                                          */
   1164 /* For outbound packets, only, create an sync update record for the user    */
   1165 /* process to read.                                                         */
   1166 /* ------------------------------------------------------------------------ */
   1167 void
   1168 ipf_sync_update(softc, tab, fin, sl)
   1169 	ipf_main_softc_t *softc;
   1170 	int tab;
   1171 	fr_info_t *fin;
   1172 	synclist_t *sl;
   1173 {
   1174 	ipf_sync_softc_t *softs = softc->ipf_sync_soft;
   1175 	synctcp_update_t *st;
   1176 	syncupdent_t *slu;
   1177 	ipstate_t *ips;
   1178 	nat_t *nat;
   1179 	ipfrwlock_t *lock;
   1180 
   1181 	if (fin->fin_out == 0 || sl == NULL)
   1182 		return;
   1183 
   1184 	if (tab == SMC_STATE) {
   1185 		lock = &softs->ipf_syncstate;
   1186 	} else {
   1187 		lock = &softs->ipf_syncnat;
   1188 	}
   1189 
   1190 	READ_ENTER(lock);
   1191 	if (sl->sl_idx == -1) {
   1192 		MUTEX_ENTER(&softs->ipf_syncadd);
   1193 		slu = softs->syncupd + softs->su_idx;
   1194 		sl->sl_idx = softs->su_idx++;
   1195 		MUTEX_EXIT(&softs->ipf_syncadd);
   1196 
   1197 		bcopy((char *)&sl->sl_hdr, (char *)&slu->sup_hdr,
   1198 		      sizeof(slu->sup_hdr));
   1199 		slu->sup_hdr.sm_magic = htonl(SYNHDRMAGIC);
   1200 		slu->sup_hdr.sm_sl = sl;
   1201 		slu->sup_hdr.sm_cmd = SMC_UPDATE;
   1202 		slu->sup_hdr.sm_table = tab;
   1203 		slu->sup_hdr.sm_num = htonl(sl->sl_num);
   1204 		slu->sup_hdr.sm_len = htonl(sizeof(struct synctcp_update));
   1205 		slu->sup_hdr.sm_rev = fin->fin_rev;
   1206 # if 0
   1207 		if (fin->fin_p == IPPROTO_TCP) {
   1208 			st->stu_len[0] = 0;
   1209 			st->stu_len[1] = 0;
   1210 		}
   1211 # endif
   1212 	} else
   1213 		slu = softs->syncupd + sl->sl_idx;
   1214 
   1215 	/*
   1216 	 * Only TCP has complex timeouts, others just use default timeouts.
   1217 	 * For TCP, we only need to track the connection state and window.
   1218 	 */
   1219 	if (fin->fin_p == IPPROTO_TCP) {
   1220 		st = &slu->sup_tcp;
   1221 		if (tab == SMC_STATE) {
   1222 			ips = sl->sl_ips;
   1223 			st->stu_age = htonl(ips->is_die);
   1224 			st->stu_data[0].td_end = ips->is_send;
   1225 			st->stu_data[0].td_maxend = ips->is_maxsend;
   1226 			st->stu_data[0].td_maxwin = ips->is_maxswin;
   1227 			st->stu_state[0] = ips->is_state[0];
   1228 			st->stu_data[1].td_end = ips->is_dend;
   1229 			st->stu_data[1].td_maxend = ips->is_maxdend;
   1230 			st->stu_data[1].td_maxwin = ips->is_maxdwin;
   1231 			st->stu_state[1] = ips->is_state[1];
   1232 		} else if (tab == SMC_NAT) {
   1233 			nat = sl->sl_ipn;
   1234 			st->stu_age = htonl(nat->nat_age);
   1235 		}
   1236 	}
   1237 	RWLOCK_EXIT(lock);
   1238 
   1239 	ipf_sync_wakeup(softc);
   1240 }
   1241 
   1242 
   1243 /* ------------------------------------------------------------------------ */
   1244 /* Function:    ipf_sync_flush_table                                        */
   1245 /* Returns:     int - number of entries freed by flushing table             */
   1246 /* Parameters:  tabsize(I) - size of the array pointed to by table          */
   1247 /*              table(I)   - pointer to sync table to empty                 */
   1248 /*                                                                          */
   1249 /* Walk through a table of sync entries and free each one.  It is assumed   */
   1250 /* that some lock is held so that nobody else tries to access the table     */
   1251 /* during this cleanup.                                                     */
   1252 /* ------------------------------------------------------------------------ */
   1253 static int
   1254 ipf_sync_flush_table(softs, tabsize, table)
   1255 	ipf_sync_softc_t *softs;
   1256 	int tabsize;
   1257 	synclist_t **table;
   1258 {
   1259 	synclist_t *sl;
   1260 	int i, items;
   1261 
   1262 	items = 0;
   1263 
   1264 	for (i = 0; i < tabsize; i++) {
   1265 		while ((sl = table[i]) != NULL) {
   1266 			switch (sl->sl_table) {
   1267 			case SMC_STATE :
   1268 				if (sl->sl_ips != NULL)
   1269 					sl->sl_ips->is_sync = NULL;
   1270 				break;
   1271 			case SMC_NAT :
   1272 				if (sl->sl_ipn != NULL)
   1273 					sl->sl_ipn->nat_sync = NULL;
   1274 				break;
   1275 			}
   1276 			if (sl->sl_next != NULL)
   1277 				sl->sl_next->sl_pnext = sl->sl_pnext;
   1278 			table[i] = sl->sl_next;
   1279 			if (sl->sl_idx != -1)
   1280 				softs->syncupd[sl->sl_idx].sup_hdr.sm_sl = NULL;
   1281 			KFREE(sl);
   1282 			items++;
   1283 		}
   1284 	}
   1285 
   1286 	return items;
   1287 }
   1288 
   1289 
   1290 /* ------------------------------------------------------------------------ */
   1291 /* Function:    ipf_sync_ioctl                                              */
   1292 /* Returns:     int - 0 == success, != 0 == failure                         */
   1293 /* Parameters:  data(I) - pointer to ioctl data                             */
   1294 /*              cmd(I)  - ioctl command integer                             */
   1295 /*              mode(I) - file mode bits used with open                     */
   1296 /*                                                                          */
   1297 /* This function currently does not handle any ioctls and so just returns   */
   1298 /* EINVAL on all occasions.                                                 */
   1299 /* ------------------------------------------------------------------------ */
   1300 int
   1301 ipf_sync_ioctl(softc, data, cmd, mode, uid, ctx)
   1302 	ipf_main_softc_t *softc;
   1303 	caddr_t data;
   1304 	ioctlcmd_t cmd;
   1305 	int mode, uid;
   1306 	void *ctx;
   1307 {
   1308 	ipf_sync_softc_t *softs = softc->ipf_sync_soft;
   1309 	int error, i;
   1310 	SPL_INT(s);
   1311 
   1312 	switch (cmd)
   1313 	{
   1314         case SIOCIPFFL:
   1315 		error = BCOPYIN(data, &i, sizeof(i));
   1316 		if (error != 0) {
   1317 			IPFERROR(110023);
   1318 			error = EFAULT;
   1319 			break;
   1320 		}
   1321 
   1322 		switch (i)
   1323 		{
   1324 		case SMC_RLOG :
   1325 			SPL_NET(s);
   1326 			MUTEX_ENTER(&softs->ipsl_mutex);
   1327 			i = (softs->sl_tail - softs->sl_idx) +
   1328 			    (softs->su_tail - softs->su_idx);
   1329 			softs->sl_idx = 0;
   1330 			softs->su_idx = 0;
   1331 			softs->sl_tail = 0;
   1332 			softs->su_tail = 0;
   1333 			MUTEX_EXIT(&softs->ipsl_mutex);
   1334 			SPL_X(s);
   1335 			break;
   1336 
   1337 		case SMC_NAT :
   1338 			SPL_NET(s);
   1339 			WRITE_ENTER(&softs->ipf_syncnat);
   1340 			i = ipf_sync_flush_table(softs, SYNC_NATTABSZ,
   1341 						 softs->syncnattab);
   1342 			RWLOCK_EXIT(&softs->ipf_syncnat);
   1343 			SPL_X(s);
   1344 			break;
   1345 
   1346 		case SMC_STATE :
   1347 			SPL_NET(s);
   1348 			WRITE_ENTER(&softs->ipf_syncstate);
   1349 			i = ipf_sync_flush_table(softs, SYNC_STATETABSZ,
   1350 						 softs->syncstatetab);
   1351 			RWLOCK_EXIT(&softs->ipf_syncstate);
   1352 			SPL_X(s);
   1353 			break;
   1354 		}
   1355 
   1356 		error = BCOPYOUT(&i, data, sizeof(i));
   1357 		if (error != 0) {
   1358 			IPFERROR(110022);
   1359 			error = EFAULT;
   1360 		}
   1361 		break;
   1362 
   1363 	default :
   1364 		IPFERROR(110021);
   1365 		error = EINVAL;
   1366 		break;
   1367 	}
   1368 
   1369 	return error;
   1370 }
   1371 
   1372 
   1373 /* ------------------------------------------------------------------------ */
   1374 /* Function:    ipf_sync_canread                                            */
   1375 /* Returns:     int - 0 == success, != 0 == failure                         */
   1376 /* Parameters:  Nil                                                         */
   1377 /*                                                                          */
   1378 /* This function provides input to the poll handler about whether or not    */
   1379 /* there is data waiting to be read from the /dev/ipsync device.            */
   1380 /* ------------------------------------------------------------------------ */
   1381 int
   1382 ipf_sync_canread(arg)
   1383 	void *arg;
   1384 {
   1385 	ipf_sync_softc_t *softs = arg;
   1386 	return !((softs->sl_tail == softs->sl_idx) &&
   1387 		 (softs->su_tail == softs->su_idx));
   1388 }
   1389 
   1390 
   1391 /* ------------------------------------------------------------------------ */
   1392 /* Function:    ipf_sync_canwrite                                           */
   1393 /* Returns:     int - 1 == can always write                                 */
   1394 /* Parameters:  Nil                                                         */
   1395 /*                                                                          */
   1396 /* This function lets the poll handler know that it is always ready willing */
   1397 /* to accept write events.                                                  */
   1398 /* XXX Maybe this should return false if the sync table is full?            */
   1399 /* ------------------------------------------------------------------------ */
   1400 int
   1401 ipf_sync_canwrite(arg)
   1402 	void *arg;
   1403 {
   1404 	return 1;
   1405 }
   1406 
   1407 
   1408 /* ------------------------------------------------------------------------ */
   1409 /* Function:    ipf_sync_wakeup                                             */
   1410 /* Parameters:  Nil                                                         */
   1411 /* Returns:     Nil                                                         */
   1412 /*                                                                          */
   1413 /* This function implements the heuristics that decide how often to         */
   1414 /* generate a poll wakeup for programs that are waiting for information     */
   1415 /* about when they can do a read on /dev/ipsync.                            */
   1416 /*                                                                          */
   1417 /* There are three different considerations here:                           */
   1418 /* - do not keep a program waiting too long: ipf_sync_wake_interval is the  */
   1419 /*   maximum number of ipf ticks to let pass by;                            */
   1420 /* - do not let the queue of ouststanding things to generate notifies for   */
   1421 /*   get too full (ipf_sync_queue_high_wm is the high water mark);          */
   1422 /* - do not let too many events get collapsed in before deciding that the   */
   1423 /*   other host(s) need an update (ipf_sync_event_high_wm is the high water */
   1424 /*   mark for this counter.)                                                */
   1425 /* ------------------------------------------------------------------------ */
   1426 static void
   1427 ipf_sync_wakeup(softc)
   1428 	ipf_main_softc_t *softc;
   1429 {
   1430 	ipf_sync_softc_t *softs = softc->ipf_sync_soft;
   1431 
   1432 	softs->ipf_sync_events++;
   1433 	if ((softc->ipf_ticks >
   1434 	    softs->ipf_sync_lastwakeup + softs->ipf_sync_wake_interval) ||
   1435 	    (softs->ipf_sync_events > softs->ipf_sync_event_high_wm) ||
   1436 	    ((softs->sl_tail - softs->sl_idx) >
   1437 	     softs->ipf_sync_queue_high_wm) ||
   1438 	    ((softs->su_tail - softs->su_idx) >
   1439 	     softs->ipf_sync_queue_high_wm)) {
   1440 
   1441 		ipf_sync_poll_wakeup(softc);
   1442 	}
   1443 }
   1444 
   1445 
   1446 /* ------------------------------------------------------------------------ */
   1447 /* Function:    ipf_sync_poll_wakeup                                        */
   1448 /* Parameters:  Nil                                                         */
   1449 /* Returns:     Nil                                                         */
   1450 /*                                                                          */
   1451 /* Deliver a poll wakeup and reset counters for two of the three heuristics */
   1452 /* ------------------------------------------------------------------------ */
   1453 static void
   1454 ipf_sync_poll_wakeup(softc)
   1455 	ipf_main_softc_t *softc;
   1456 {
   1457 	ipf_sync_softc_t *softs = softc->ipf_sync_soft;
   1458 
   1459 	softs->ipf_sync_events = 0;
   1460 	softs->ipf_sync_lastwakeup = softc->ipf_ticks;
   1461 
   1462 # ifdef _KERNEL
   1463 #  if SOLARIS
   1464 	MUTEX_ENTER(&softs->ipsl_mutex);
   1465 	cv_signal(&softs->ipslwait);
   1466 	MUTEX_EXIT(&softs->ipsl_mutex);
   1467 	pollwakeup(&softc->ipf_poll_head[IPL_LOGSYNC], POLLIN|POLLRDNORM);
   1468 #  else
   1469 	WAKEUP(&softs->sl_tail, 0);
   1470 	POLLWAKEUP(IPL_LOGSYNC);
   1471 #  endif
   1472 # endif
   1473 }
   1474 
   1475 
   1476 /* ------------------------------------------------------------------------ */
   1477 /* Function:    ipf_sync_expire                                             */
   1478 /* Parameters:  Nil                                                         */
   1479 /* Returns:     Nil                                                         */
   1480 /*                                                                          */
   1481 /* This is the function called even ipf_tick.  It implements one of the     */
   1482 /* three heuristics above *IF* there are events waiting.                    */
   1483 /* ------------------------------------------------------------------------ */
   1484 void
   1485 ipf_sync_expire(softc)
   1486 	ipf_main_softc_t *softc;
   1487 {
   1488 	ipf_sync_softc_t *softs = softc->ipf_sync_soft;
   1489 
   1490 	if ((softs->ipf_sync_events > 0) &&
   1491 	    (softc->ipf_ticks >
   1492 	     softs->ipf_sync_lastwakeup + softs->ipf_sync_wake_interval)) {
   1493 		ipf_sync_poll_wakeup(softc);
   1494 	}
   1495 }
   1496