npf_tableset.c revision 1.33 1 /*-
2 * Copyright (c) 2009-2019 The NetBSD Foundation, Inc.
3 * All rights reserved.
4 *
5 * This material is based upon work partially supported by The
6 * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 /*
31 * NPF tableset module.
32 *
33 * Notes
34 *
35 * The tableset is an array of tables. After the creation, the array
36 * is immutable. The caller is responsible to synchronise the access
37 * to the tableset.
38 *
39 * Warning (not applicable for the userspace npfkern):
40 *
41 * The thmap_put()/thmap_del() are not called from the interrupt
42 * context and are protected by a mutex(9), therefore they do not
43 * SPL wrappers -- see the comment at the top of the npf_conndb.c
44 * source file.
45 */
46
47 #ifdef _KERNEL
48 #include <sys/cdefs.h>
49 __KERNEL_RCSID(0, "$NetBSD: npf_tableset.c,v 1.33 2019/07/23 00:52:01 rmind Exp $");
50
51 #include <sys/param.h>
52 #include <sys/types.h>
53
54 #include <sys/atomic.h>
55 #include <sys/cdbr.h>
56 #include <sys/kmem.h>
57 #include <sys/pool.h>
58 #include <sys/queue.h>
59 #include <sys/mutex.h>
60 #include <sys/thmap.h>
61
62 #include "lpm.h"
63 #endif
64
65 #include "npf_impl.h"
66
67 typedef struct npf_tblent {
68 LIST_ENTRY(npf_tblent) te_listent;
69 uint16_t te_preflen;
70 uint16_t te_alen;
71 npf_addr_t te_addr;
72 } npf_tblent_t;
73
74 #define NPF_ADDRLEN2IDX(alen) ((alen) >> 4)
75 #define NPF_ADDR_SLOTS (2)
76
77 struct npf_table {
78 /*
79 * The storage type can be: a) hashmap b) LPM c) cdb.
80 * There are separate trees for IPv4 and IPv6.
81 */
82 union {
83 struct {
84 thmap_t * t_map;
85 LIST_HEAD(, npf_tblent) t_gc;
86 };
87 lpm_t * t_lpm;
88 struct {
89 void * t_blob;
90 size_t t_bsize;
91 struct cdbr * t_cdb;
92 };
93 struct {
94 npf_tblent_t ** t_elements[NPF_ADDR_SLOTS];
95 unsigned t_allocated[NPF_ADDR_SLOTS];
96 unsigned t_used[NPF_ADDR_SLOTS];
97 };
98 } /* C11 */;
99 LIST_HEAD(, npf_tblent) t_list;
100 unsigned t_nitems;
101
102 /*
103 * Table ID, type and lock. The ID may change during the
104 * config reload, it is protected by the npf_config_lock.
105 */
106 int t_type;
107 unsigned t_id;
108 kmutex_t t_lock;
109
110 /* Reference count and table name. */
111 unsigned t_refcnt;
112 char t_name[NPF_TABLE_MAXNAMELEN];
113 };
114
115 struct npf_tableset {
116 unsigned ts_nitems;
117 npf_table_t * ts_map[];
118 };
119
120 #define NPF_TABLESET_SIZE(n) \
121 (offsetof(npf_tableset_t, ts_map[n]) * sizeof(npf_table_t *))
122
123 #define NPF_IFADDR_STEP 4
124
125 static pool_cache_t tblent_cache __read_mostly;
126
127 /*
128 * npf_table_sysinit: initialise tableset structures.
129 */
130 void
131 npf_tableset_sysinit(void)
132 {
133 tblent_cache = pool_cache_init(sizeof(npf_tblent_t), 0,
134 0, 0, "npftblpl", NULL, IPL_NONE, NULL, NULL, NULL);
135 }
136
137 void
138 npf_tableset_sysfini(void)
139 {
140 pool_cache_destroy(tblent_cache);
141 }
142
143 npf_tableset_t *
144 npf_tableset_create(u_int nitems)
145 {
146 npf_tableset_t *ts = kmem_zalloc(NPF_TABLESET_SIZE(nitems), KM_SLEEP);
147 ts->ts_nitems = nitems;
148 return ts;
149 }
150
151 void
152 npf_tableset_destroy(npf_tableset_t *ts)
153 {
154 /*
155 * Destroy all tables (no references should be held, since the
156 * ruleset should be destroyed before).
157 */
158 for (u_int tid = 0; tid < ts->ts_nitems; tid++) {
159 npf_table_t *t = ts->ts_map[tid];
160
161 if (t && atomic_dec_uint_nv(&t->t_refcnt) == 0) {
162 npf_table_destroy(t);
163 }
164 }
165 kmem_free(ts, NPF_TABLESET_SIZE(ts->ts_nitems));
166 }
167
168 /*
169 * npf_tableset_insert: insert the table into the specified tableset.
170 *
171 * => Returns 0 on success. Fails and returns error if ID is already used.
172 */
173 int
174 npf_tableset_insert(npf_tableset_t *ts, npf_table_t *t)
175 {
176 const u_int tid = t->t_id;
177 int error;
178
179 KASSERT((u_int)tid < ts->ts_nitems);
180
181 if (ts->ts_map[tid] == NULL) {
182 atomic_inc_uint(&t->t_refcnt);
183 ts->ts_map[tid] = t;
184 error = 0;
185 } else {
186 error = EEXIST;
187 }
188 return error;
189 }
190
191 npf_table_t *
192 npf_tableset_swap(npf_tableset_t *ts, npf_table_t *newt)
193 {
194 const u_int tid = newt->t_id;
195 npf_table_t *oldt = ts->ts_map[tid];
196
197 KASSERT(tid < ts->ts_nitems);
198 KASSERT(oldt->t_id == newt->t_id);
199
200 newt->t_refcnt = oldt->t_refcnt;
201 oldt->t_refcnt = 0;
202
203 return atomic_swap_ptr(&ts->ts_map[tid], newt);
204 }
205
206 /*
207 * npf_tableset_getbyname: look for a table in the set given the name.
208 */
209 npf_table_t *
210 npf_tableset_getbyname(npf_tableset_t *ts, const char *name)
211 {
212 npf_table_t *t;
213
214 for (u_int tid = 0; tid < ts->ts_nitems; tid++) {
215 if ((t = ts->ts_map[tid]) == NULL)
216 continue;
217 if (strcmp(name, t->t_name) == 0)
218 return t;
219 }
220 return NULL;
221 }
222
223 npf_table_t *
224 npf_tableset_getbyid(npf_tableset_t *ts, u_int tid)
225 {
226 if (__predict_true(tid < ts->ts_nitems)) {
227 return ts->ts_map[tid];
228 }
229 return NULL;
230 }
231
232 /*
233 * npf_tableset_reload: iterate all tables and if the new table is of the
234 * same type and has no items, then we preserve the old one and its entries.
235 *
236 * => The caller is responsible for providing synchronisation.
237 */
238 void
239 npf_tableset_reload(npf_t *npf, npf_tableset_t *nts, npf_tableset_t *ots)
240 {
241 for (u_int tid = 0; tid < nts->ts_nitems; tid++) {
242 npf_table_t *t, *ot;
243
244 if ((t = nts->ts_map[tid]) == NULL) {
245 continue;
246 }
247
248 /* If our table has entries, just load it. */
249 if (t->t_nitems) {
250 continue;
251 }
252
253 /* Look for a currently existing table with such name. */
254 ot = npf_tableset_getbyname(ots, t->t_name);
255 if (ot == NULL) {
256 /* Not found: we have a new table. */
257 continue;
258 }
259
260 /* Found. Did the type change? */
261 if (t->t_type != ot->t_type) {
262 /* Yes, load the new. */
263 continue;
264 }
265
266 /*
267 * Preserve the current table. Acquire a reference since
268 * we are keeping it in the old table set. Update its ID.
269 */
270 atomic_inc_uint(&ot->t_refcnt);
271 nts->ts_map[tid] = ot;
272
273 KASSERT(npf_config_locked_p(npf));
274 ot->t_id = tid;
275
276 /* Destroy the new table (we hold the only reference). */
277 t->t_refcnt--;
278 npf_table_destroy(t);
279 }
280 }
281
282 int
283 npf_tableset_export(npf_t *npf, const npf_tableset_t *ts, nvlist_t *npf_dict)
284 {
285 const npf_table_t *t;
286
287 KASSERT(npf_config_locked_p(npf));
288
289 for (u_int tid = 0; tid < ts->ts_nitems; tid++) {
290 nvlist_t *table;
291
292 if ((t = ts->ts_map[tid]) == NULL) {
293 continue;
294 }
295 table = nvlist_create(0);
296 nvlist_add_string(table, "name", t->t_name);
297 nvlist_add_number(table, "type", t->t_type);
298 nvlist_add_number(table, "id", tid);
299
300 nvlist_append_nvlist_array(npf_dict, "tables", table);
301 nvlist_destroy(table);
302 }
303 return 0;
304 }
305
306 /*
307 * Few helper routines.
308 */
309
310 static void
311 table_ipset_flush(npf_table_t *t)
312 {
313 npf_tblent_t *ent;
314
315 while ((ent = LIST_FIRST(&t->t_list)) != NULL) {
316 thmap_del(t->t_map, &ent->te_addr, ent->te_alen);
317 LIST_REMOVE(ent, te_listent);
318 pool_cache_put(tblent_cache, ent);
319 }
320 t->t_nitems = 0;
321 }
322
323 static void
324 table_tree_flush(npf_table_t *t)
325 {
326 npf_tblent_t *ent;
327
328 while ((ent = LIST_FIRST(&t->t_list)) != NULL) {
329 LIST_REMOVE(ent, te_listent);
330 pool_cache_put(tblent_cache, ent);
331 }
332 lpm_clear(t->t_lpm, NULL, NULL);
333 t->t_nitems = 0;
334 }
335
336 static void
337 table_ifaddr_flush(npf_table_t *t)
338 {
339 npf_tblent_t *ent;
340
341 for (unsigned i = 0; i < NPF_ADDR_SLOTS; i++) {
342 size_t len;
343
344 if (!t->t_allocated[i]) {
345 KASSERT(t->t_elements[i] == NULL);
346 continue;
347 }
348 len = t->t_allocated[i] * sizeof(npf_tblent_t *);
349 kmem_free(t->t_elements[i], len);
350 t->t_elements[i] = NULL;
351 t->t_allocated[i] = 0;
352 t->t_used[i] = 0;
353 }
354 while ((ent = LIST_FIRST(&t->t_list)) != NULL) {
355 LIST_REMOVE(ent, te_listent);
356 pool_cache_put(tblent_cache, ent);
357 }
358 t->t_nitems = 0;
359 }
360
361 /*
362 * npf_table_create: create table with a specified ID.
363 */
364 npf_table_t *
365 npf_table_create(const char *name, u_int tid, int type,
366 const void *blob, size_t size)
367 {
368 npf_table_t *t;
369
370 t = kmem_zalloc(sizeof(npf_table_t), KM_SLEEP);
371 strlcpy(t->t_name, name, NPF_TABLE_MAXNAMELEN);
372
373 switch (type) {
374 case NPF_TABLE_LPM:
375 t->t_lpm = lpm_create(KM_NOSLEEP);
376 if (t->t_lpm == NULL) {
377 goto out;
378 }
379 LIST_INIT(&t->t_list);
380 break;
381 case NPF_TABLE_IPSET:
382 t->t_map = thmap_create(0, NULL, THMAP_NOCOPY);
383 if (t->t_map == NULL) {
384 goto out;
385 }
386 break;
387 case NPF_TABLE_CONST:
388 t->t_blob = kmem_alloc(size, KM_SLEEP);
389 if (t->t_blob == NULL) {
390 goto out;
391 }
392 memcpy(t->t_blob, blob, size);
393 t->t_bsize = size;
394
395 t->t_cdb = cdbr_open_mem(t->t_blob, size,
396 CDBR_DEFAULT, NULL, NULL);
397 if (t->t_cdb == NULL) {
398 kmem_free(t->t_blob, t->t_bsize);
399 goto out;
400 }
401 t->t_nitems = cdbr_entries(t->t_cdb);
402 break;
403 case NPF_TABLE_IFADDR:
404 break;
405 default:
406 KASSERT(false);
407 }
408 mutex_init(&t->t_lock, MUTEX_DEFAULT, IPL_NET);
409 t->t_type = type;
410 t->t_id = tid;
411 return t;
412 out:
413 kmem_free(t, sizeof(npf_table_t));
414 return NULL;
415 }
416
417 /*
418 * npf_table_destroy: free all table entries and table itself.
419 */
420 void
421 npf_table_destroy(npf_table_t *t)
422 {
423 KASSERT(t->t_refcnt == 0);
424
425 switch (t->t_type) {
426 case NPF_TABLE_IPSET:
427 table_ipset_flush(t);
428 npf_table_gc(NULL, t);
429 thmap_destroy(t->t_map);
430 break;
431 case NPF_TABLE_LPM:
432 table_tree_flush(t);
433 lpm_destroy(t->t_lpm);
434 break;
435 case NPF_TABLE_CONST:
436 cdbr_close(t->t_cdb);
437 kmem_free(t->t_blob, t->t_bsize);
438 break;
439 case NPF_TABLE_IFADDR:
440 table_ifaddr_flush(t);
441 break;
442 default:
443 KASSERT(false);
444 }
445 mutex_destroy(&t->t_lock);
446 kmem_free(t, sizeof(npf_table_t));
447 }
448
449 u_int
450 npf_table_getid(npf_table_t *t)
451 {
452 return t->t_id;
453 }
454
455 /*
456 * npf_table_check: validate the name, ID and type.
457 */
458 int
459 npf_table_check(npf_tableset_t *ts, const char *name, uint64_t tid, uint64_t type)
460 {
461 if (tid >= ts->ts_nitems) {
462 return EINVAL;
463 }
464 if (ts->ts_map[tid] != NULL) {
465 return EEXIST;
466 }
467 switch (type) {
468 case NPF_TABLE_LPM:
469 case NPF_TABLE_IPSET:
470 case NPF_TABLE_CONST:
471 case NPF_TABLE_IFADDR:
472 break;
473 default:
474 return EINVAL;
475 }
476 if (strlen(name) >= NPF_TABLE_MAXNAMELEN) {
477 return ENAMETOOLONG;
478 }
479 if (npf_tableset_getbyname(ts, name)) {
480 return EEXIST;
481 }
482 return 0;
483 }
484
485 static int
486 table_ifaddr_insert(npf_table_t *t, const int alen, npf_tblent_t *ent)
487 {
488 const unsigned aidx = NPF_ADDRLEN2IDX(alen);
489 const unsigned allocated = t->t_allocated[aidx];
490 const unsigned used = t->t_used[aidx];
491
492 /*
493 * No need to check for duplicates.
494 */
495 if (allocated <= used) {
496 npf_tblent_t **old_elements = t->t_elements[aidx];
497 npf_tblent_t **elements;
498 size_t toalloc, newsize;
499
500 toalloc = roundup2(allocated + 1, NPF_IFADDR_STEP);
501 newsize = toalloc * sizeof(npf_tblent_t *);
502
503 elements = kmem_zalloc(newsize, KM_NOSLEEP);
504 if (elements == NULL) {
505 return ENOMEM;
506 }
507 for (unsigned i = 0; i < used; i++) {
508 elements[i] = old_elements[i];
509 }
510 if (allocated) {
511 const size_t len = allocated * sizeof(npf_tblent_t *);
512 KASSERT(old_elements != NULL);
513 kmem_free(old_elements, len);
514 }
515 t->t_elements[aidx] = elements;
516 t->t_allocated[aidx] = toalloc;
517 }
518 t->t_elements[aidx][used] = ent;
519 t->t_used[aidx]++;
520 return 0;
521 }
522
523 /*
524 * npf_table_insert: add an IP CIDR entry into the table.
525 */
526 int
527 npf_table_insert(npf_table_t *t, const int alen,
528 const npf_addr_t *addr, const npf_netmask_t mask)
529 {
530 npf_tblent_t *ent;
531 int error;
532
533 error = npf_netmask_check(alen, mask);
534 if (error) {
535 return error;
536 }
537 ent = pool_cache_get(tblent_cache, PR_WAITOK);
538 memcpy(&ent->te_addr, addr, alen);
539 ent->te_alen = alen;
540 ent->te_preflen = 0;
541
542 /*
543 * Insert the entry. Return an error on duplicate.
544 */
545 mutex_enter(&t->t_lock);
546 switch (t->t_type) {
547 case NPF_TABLE_IPSET:
548 /*
549 * Hashmap supports only IPs.
550 *
551 * Note: the key must be already persistent, since we
552 * use THMAP_NOCOPY.
553 */
554 if (mask != NPF_NO_NETMASK) {
555 error = EINVAL;
556 break;
557 }
558 if (thmap_put(t->t_map, &ent->te_addr, alen, ent) == ent) {
559 LIST_INSERT_HEAD(&t->t_list, ent, te_listent);
560 t->t_nitems++;
561 } else {
562 error = EEXIST;
563 }
564 break;
565 case NPF_TABLE_LPM: {
566 const unsigned preflen =
567 (mask == NPF_NO_NETMASK) ? (alen * 8) : mask;
568 ent->te_preflen = preflen;
569
570 if (lpm_lookup(t->t_lpm, addr, alen) == NULL &&
571 lpm_insert(t->t_lpm, addr, alen, preflen, ent) == 0) {
572 LIST_INSERT_HEAD(&t->t_list, ent, te_listent);
573 t->t_nitems++;
574 error = 0;
575 } else {
576 error = EEXIST;
577 }
578 break;
579 }
580 case NPF_TABLE_CONST:
581 error = EINVAL;
582 break;
583 case NPF_TABLE_IFADDR:
584 if ((error = table_ifaddr_insert(t, alen, ent)) != 0) {
585 break;
586 }
587 LIST_INSERT_HEAD(&t->t_list, ent, te_listent);
588 t->t_nitems++;
589 break;
590 default:
591 KASSERT(false);
592 }
593 mutex_exit(&t->t_lock);
594
595 if (error) {
596 pool_cache_put(tblent_cache, ent);
597 }
598 return error;
599 }
600
601 /*
602 * npf_table_remove: remove the IP CIDR entry from the table.
603 */
604 int
605 npf_table_remove(npf_table_t *t, const int alen,
606 const npf_addr_t *addr, const npf_netmask_t mask)
607 {
608 npf_tblent_t *ent = NULL;
609 int error;
610
611 error = npf_netmask_check(alen, mask);
612 if (error) {
613 return error;
614 }
615
616 mutex_enter(&t->t_lock);
617 switch (t->t_type) {
618 case NPF_TABLE_IPSET:
619 ent = thmap_del(t->t_map, addr, alen);
620 if (__predict_true(ent != NULL)) {
621 LIST_REMOVE(ent, te_listent);
622 LIST_INSERT_HEAD(&t->t_gc, ent, te_listent);
623 ent = NULL; // to be G/C'ed
624 t->t_nitems--;
625 } else {
626 error = ENOENT;
627 }
628 break;
629 case NPF_TABLE_LPM:
630 ent = lpm_lookup(t->t_lpm, addr, alen);
631 if (__predict_true(ent != NULL)) {
632 LIST_REMOVE(ent, te_listent);
633 lpm_remove(t->t_lpm, &ent->te_addr,
634 ent->te_alen, ent->te_preflen);
635 t->t_nitems--;
636 } else {
637 error = ENOENT;
638 }
639 break;
640 case NPF_TABLE_CONST:
641 case NPF_TABLE_IFADDR:
642 error = EINVAL;
643 break;
644 default:
645 KASSERT(false);
646 ent = NULL;
647 }
648 mutex_exit(&t->t_lock);
649
650 if (ent) {
651 pool_cache_put(tblent_cache, ent);
652 }
653 return error;
654 }
655
656 /*
657 * npf_table_lookup: find the table according to ID, lookup and match
658 * the contents with the specified IP address.
659 */
660 int
661 npf_table_lookup(npf_table_t *t, const int alen, const npf_addr_t *addr)
662 {
663 const void *data;
664 size_t dlen;
665 bool found;
666 int error;
667
668 error = npf_netmask_check(alen, NPF_NO_NETMASK);
669 if (error) {
670 return error;
671 }
672
673 switch (t->t_type) {
674 case NPF_TABLE_IPSET:
675 found = thmap_get(t->t_map, addr, alen) != NULL;
676 break;
677 case NPF_TABLE_LPM:
678 mutex_enter(&t->t_lock);
679 found = lpm_lookup(t->t_lpm, addr, alen) != NULL;
680 mutex_exit(&t->t_lock);
681 break;
682 case NPF_TABLE_CONST:
683 if (cdbr_find(t->t_cdb, addr, alen, &data, &dlen) == 0) {
684 found = dlen == (unsigned)alen &&
685 memcmp(addr, data, dlen) == 0;
686 } else {
687 found = false;
688 }
689 break;
690 case NPF_TABLE_IFADDR: {
691 const unsigned aidx = NPF_ADDRLEN2IDX(alen);
692
693 found = false;
694 for (unsigned i = 0; i < t->t_used[aidx]; i++) {
695 const npf_tblent_t *elm = t->t_elements[aidx][i];
696
697 KASSERT(elm->te_alen == alen);
698
699 if (memcmp(&elm->te_addr, addr, alen) == 0) {
700 found = true;
701 break;
702 }
703 }
704 break;
705 }
706 default:
707 KASSERT(false);
708 found = false;
709 }
710
711 return found ? 0 : ENOENT;
712 }
713
714 npf_addr_t *
715 npf_table_getsome(npf_table_t *t, const int alen, unsigned idx)
716 {
717 const unsigned aidx = NPF_ADDRLEN2IDX(alen);
718 npf_tblent_t *elm;
719 unsigned nitems;
720
721 KASSERT(t->t_type == NPF_TABLE_IFADDR);
722 KASSERT(aidx < NPF_ADDR_SLOTS);
723
724 nitems = t->t_used[aidx];
725 if (nitems == 0) {
726 return NULL;
727 }
728
729 /*
730 * No need to acquire the lock, since the table is immutable.
731 */
732 elm = t->t_elements[aidx][idx % nitems];
733 return &elm->te_addr;
734 }
735
736 static int
737 table_ent_copyout(const npf_addr_t *addr, const int alen, npf_netmask_t mask,
738 void *ubuf, size_t len, size_t *off)
739 {
740 void *ubufp = (uint8_t *)ubuf + *off;
741 npf_ioctl_ent_t uent;
742
743 if ((*off += sizeof(npf_ioctl_ent_t)) > len) {
744 return ENOMEM;
745 }
746 uent.alen = alen;
747 memcpy(&uent.addr, addr, sizeof(npf_addr_t));
748 uent.mask = mask;
749
750 return copyout(&uent, ubufp, sizeof(npf_ioctl_ent_t));
751 }
752
753 static int
754 table_generic_list(const npf_table_t *t, void *ubuf, size_t len)
755 {
756 npf_tblent_t *ent;
757 size_t off = 0;
758 int error = 0;
759
760 LIST_FOREACH(ent, &t->t_list, te_listent) {
761 error = table_ent_copyout(&ent->te_addr,
762 ent->te_alen, ent->te_preflen, ubuf, len, &off);
763 if (error)
764 break;
765 }
766 return error;
767 }
768
769 static int
770 table_cdb_list(npf_table_t *t, void *ubuf, size_t len)
771 {
772 size_t off = 0, dlen;
773 const void *data;
774 int error = 0;
775
776 for (size_t i = 0; i < t->t_nitems; i++) {
777 if (cdbr_get(t->t_cdb, i, &data, &dlen) != 0) {
778 return EINVAL;
779 }
780 error = table_ent_copyout(data, dlen, 0, ubuf, len, &off);
781 if (error)
782 break;
783 }
784 return error;
785 }
786
787 /*
788 * npf_table_list: copy a list of all table entries into a userspace buffer.
789 */
790 int
791 npf_table_list(npf_table_t *t, void *ubuf, size_t len)
792 {
793 int error = 0;
794
795 mutex_enter(&t->t_lock);
796 switch (t->t_type) {
797 case NPF_TABLE_IPSET:
798 error = table_generic_list(t, ubuf, len);
799 break;
800 case NPF_TABLE_LPM:
801 error = table_generic_list(t, ubuf, len);
802 break;
803 case NPF_TABLE_CONST:
804 error = table_cdb_list(t, ubuf, len);
805 break;
806 case NPF_TABLE_IFADDR:
807 error = table_generic_list(t, ubuf, len);
808 break;
809 default:
810 KASSERT(false);
811 }
812 mutex_exit(&t->t_lock);
813
814 return error;
815 }
816
817 /*
818 * npf_table_flush: remove all table entries.
819 */
820 int
821 npf_table_flush(npf_table_t *t)
822 {
823 int error = 0;
824
825 mutex_enter(&t->t_lock);
826 switch (t->t_type) {
827 case NPF_TABLE_IPSET:
828 table_ipset_flush(t);
829 break;
830 case NPF_TABLE_LPM:
831 table_tree_flush(t);
832 break;
833 case NPF_TABLE_CONST:
834 case NPF_TABLE_IFADDR:
835 error = EINVAL;
836 break;
837 default:
838 KASSERT(false);
839 }
840 mutex_exit(&t->t_lock);
841 return error;
842 }
843
844 void
845 npf_table_gc(npf_t *npf, npf_table_t *t)
846 {
847 npf_tblent_t *ent;
848 void *ref;
849
850 if (t->t_type != NPF_TABLE_IPSET || LIST_EMPTY(&t->t_gc)) {
851 return;
852 }
853
854 ref = thmap_stage_gc(t->t_map);
855 if (npf) {
856 npf_config_locked_p(npf);
857 npf_config_sync(npf);
858 }
859 thmap_gc(t->t_map, ref);
860
861 while ((ent = LIST_FIRST(&t->t_gc)) != NULL) {
862 LIST_REMOVE(ent, te_listent);
863 pool_cache_put(tblent_cache, ent);
864 }
865 }
866