npf_tableset.c revision 1.18 1 1.18 rmind /* $NetBSD: npf_tableset.c,v 1.18 2013/05/19 20:45:34 rmind Exp $ */
2 1.1 rmind
3 1.1 rmind /*-
4 1.9 rmind * Copyright (c) 2009-2012 The NetBSD Foundation, Inc.
5 1.1 rmind * All rights reserved.
6 1.1 rmind *
7 1.1 rmind * This material is based upon work partially supported by The
8 1.1 rmind * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
9 1.1 rmind *
10 1.1 rmind * Redistribution and use in source and binary forms, with or without
11 1.1 rmind * modification, are permitted provided that the following conditions
12 1.1 rmind * are met:
13 1.1 rmind * 1. Redistributions of source code must retain the above copyright
14 1.1 rmind * notice, this list of conditions and the following disclaimer.
15 1.1 rmind * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 rmind * notice, this list of conditions and the following disclaimer in the
17 1.1 rmind * documentation and/or other materials provided with the distribution.
18 1.1 rmind *
19 1.1 rmind * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.1 rmind * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1 rmind * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1 rmind * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.1 rmind * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1 rmind * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1 rmind * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1 rmind * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1 rmind * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1 rmind * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1 rmind * POSSIBILITY OF SUCH DAMAGE.
30 1.1 rmind */
31 1.1 rmind
32 1.1 rmind /*
33 1.4 rmind * NPF tableset module.
34 1.1 rmind *
35 1.15 rmind * Notes
36 1.15 rmind *
37 1.15 rmind * The tableset is an array of tables. After the creation, the array
38 1.15 rmind * is immutable. The caller is responsible to synchronise the access
39 1.15 rmind * to the tableset. The table can either be a hash or a tree. Its
40 1.15 rmind * entries are protected by a read-write lock.
41 1.1 rmind */
42 1.1 rmind
43 1.1 rmind #include <sys/cdefs.h>
44 1.18 rmind __KERNEL_RCSID(0, "$NetBSD: npf_tableset.c,v 1.18 2013/05/19 20:45:34 rmind Exp $");
45 1.1 rmind
46 1.1 rmind #include <sys/param.h>
47 1.10 rmind #include <sys/types.h>
48 1.1 rmind
49 1.1 rmind #include <sys/atomic.h>
50 1.1 rmind #include <sys/hash.h>
51 1.1 rmind #include <sys/kmem.h>
52 1.1 rmind #include <sys/pool.h>
53 1.1 rmind #include <sys/queue.h>
54 1.1 rmind #include <sys/rwlock.h>
55 1.1 rmind #include <sys/systm.h>
56 1.1 rmind #include <sys/types.h>
57 1.1 rmind
58 1.1 rmind #include "npf_impl.h"
59 1.1 rmind
60 1.13 rmind /*
61 1.14 rmind * Table structures.
62 1.13 rmind */
63 1.13 rmind
64 1.15 rmind typedef struct npf_tblent {
65 1.1 rmind union {
66 1.13 rmind LIST_ENTRY(npf_tblent) hashq;
67 1.13 rmind pt_node_t node;
68 1.1 rmind } te_entry;
69 1.13 rmind int te_alen;
70 1.13 rmind npf_addr_t te_addr;
71 1.15 rmind } npf_tblent_t;
72 1.1 rmind
73 1.1 rmind LIST_HEAD(npf_hashl, npf_tblent);
74 1.1 rmind
75 1.1 rmind struct npf_table {
76 1.13 rmind char t_name[16];
77 1.1 rmind /* Lock and reference count. */
78 1.13 rmind krwlock_t t_lock;
79 1.13 rmind u_int t_refcnt;
80 1.15 rmind /* Total number of items. */
81 1.15 rmind u_int t_nitems;
82 1.1 rmind /* Table ID. */
83 1.13 rmind u_int t_id;
84 1.14 rmind /* The storage type can be: a) hash b) tree. */
85 1.13 rmind int t_type;
86 1.13 rmind struct npf_hashl * t_hashl;
87 1.13 rmind u_long t_hashmask;
88 1.15 rmind /* Separate trees for IPv4 and IPv6. */
89 1.13 rmind pt_tree_t t_tree[2];
90 1.1 rmind };
91 1.1 rmind
92 1.13 rmind #define NPF_ADDRLEN2TREE(alen) ((alen) >> 4)
93 1.13 rmind
94 1.13 rmind static pool_cache_t tblent_cache __read_mostly;
95 1.1 rmind
96 1.1 rmind /*
97 1.1 rmind * npf_table_sysinit: initialise tableset structures.
98 1.1 rmind */
99 1.4 rmind void
100 1.1 rmind npf_tableset_sysinit(void)
101 1.1 rmind {
102 1.1 rmind
103 1.1 rmind tblent_cache = pool_cache_init(sizeof(npf_tblent_t), coherency_unit,
104 1.14 rmind 0, 0, "npftblpl", NULL, IPL_NONE, NULL, NULL, NULL);
105 1.1 rmind }
106 1.1 rmind
107 1.1 rmind void
108 1.1 rmind npf_tableset_sysfini(void)
109 1.1 rmind {
110 1.1 rmind
111 1.1 rmind pool_cache_destroy(tblent_cache);
112 1.1 rmind }
113 1.1 rmind
114 1.1 rmind npf_tableset_t *
115 1.1 rmind npf_tableset_create(void)
116 1.1 rmind {
117 1.1 rmind const size_t sz = NPF_TABLE_SLOTS * sizeof(npf_table_t *);
118 1.1 rmind
119 1.1 rmind return kmem_zalloc(sz, KM_SLEEP);
120 1.1 rmind }
121 1.1 rmind
122 1.1 rmind void
123 1.1 rmind npf_tableset_destroy(npf_tableset_t *tblset)
124 1.1 rmind {
125 1.1 rmind const size_t sz = NPF_TABLE_SLOTS * sizeof(npf_table_t *);
126 1.1 rmind npf_table_t *t;
127 1.1 rmind u_int tid;
128 1.1 rmind
129 1.1 rmind /*
130 1.1 rmind * Destroy all tables (no references should be held, as ruleset
131 1.1 rmind * should be destroyed before).
132 1.1 rmind */
133 1.1 rmind for (tid = 0; tid < NPF_TABLE_SLOTS; tid++) {
134 1.1 rmind t = tblset[tid];
135 1.17 rmind if (t && atomic_dec_uint_nv(&t->t_refcnt) == 0) {
136 1.1 rmind npf_table_destroy(t);
137 1.1 rmind }
138 1.1 rmind }
139 1.1 rmind kmem_free(tblset, sz);
140 1.1 rmind }
141 1.1 rmind
142 1.1 rmind /*
143 1.1 rmind * npf_tableset_insert: insert the table into the specified tableset.
144 1.1 rmind *
145 1.13 rmind * => Returns 0 on success. Fails and returns error if ID is already used.
146 1.1 rmind */
147 1.1 rmind int
148 1.1 rmind npf_tableset_insert(npf_tableset_t *tblset, npf_table_t *t)
149 1.1 rmind {
150 1.1 rmind const u_int tid = t->t_id;
151 1.1 rmind int error;
152 1.1 rmind
153 1.1 rmind KASSERT((u_int)tid < NPF_TABLE_SLOTS);
154 1.1 rmind
155 1.1 rmind if (tblset[tid] == NULL) {
156 1.17 rmind atomic_inc_uint(&t->t_refcnt);
157 1.1 rmind tblset[tid] = t;
158 1.1 rmind error = 0;
159 1.1 rmind } else {
160 1.1 rmind error = EEXIST;
161 1.1 rmind }
162 1.1 rmind return error;
163 1.1 rmind }
164 1.1 rmind
165 1.1 rmind /*
166 1.15 rmind * npf_tableset_reload: iterate all tables and if the new table is of the
167 1.15 rmind * same type and has no items, then we preserve the old one and its entries.
168 1.15 rmind *
169 1.15 rmind * => The caller is responsible for providing synchronisation.
170 1.15 rmind */
171 1.15 rmind void
172 1.15 rmind npf_tableset_reload(npf_tableset_t *ntset, npf_tableset_t *otset)
173 1.15 rmind {
174 1.15 rmind for (int i = 0; i < NPF_TABLE_SLOTS; i++) {
175 1.15 rmind npf_table_t *t = ntset[i], *ot = otset[i];
176 1.15 rmind
177 1.15 rmind if (t == NULL || ot == NULL) {
178 1.15 rmind continue;
179 1.15 rmind }
180 1.15 rmind if (t->t_nitems || t->t_type != ot->t_type) {
181 1.15 rmind continue;
182 1.15 rmind }
183 1.17 rmind
184 1.17 rmind /*
185 1.17 rmind * Acquire a reference since the table has to be kept
186 1.17 rmind * in the old tableset.
187 1.17 rmind */
188 1.17 rmind atomic_inc_uint(&ot->t_refcnt);
189 1.15 rmind ntset[i] = ot;
190 1.17 rmind
191 1.17 rmind /* Only reference, never been visible. */
192 1.17 rmind t->t_refcnt--;
193 1.15 rmind npf_table_destroy(t);
194 1.15 rmind }
195 1.15 rmind }
196 1.15 rmind
197 1.15 rmind /*
198 1.13 rmind * Few helper routines.
199 1.1 rmind */
200 1.1 rmind
201 1.13 rmind static npf_tblent_t *
202 1.13 rmind table_hash_lookup(const npf_table_t *t, const npf_addr_t *addr,
203 1.13 rmind const int alen, struct npf_hashl **rhtbl)
204 1.1 rmind {
205 1.13 rmind const uint32_t hidx = hash32_buf(addr, alen, HASH32_BUF_INIT);
206 1.13 rmind struct npf_hashl *htbl = &t->t_hashl[hidx & t->t_hashmask];
207 1.13 rmind npf_tblent_t *ent;
208 1.1 rmind
209 1.13 rmind /*
210 1.13 rmind * Lookup the hash table and check for duplicates.
211 1.13 rmind * Note: mask is ignored for the hash storage.
212 1.13 rmind */
213 1.13 rmind LIST_FOREACH(ent, htbl, te_entry.hashq) {
214 1.13 rmind if (ent->te_alen != alen) {
215 1.13 rmind continue;
216 1.13 rmind }
217 1.13 rmind if (memcmp(&ent->te_addr, addr, alen) == 0) {
218 1.13 rmind break;
219 1.13 rmind }
220 1.13 rmind }
221 1.13 rmind *rhtbl = htbl;
222 1.13 rmind return ent;
223 1.1 rmind }
224 1.1 rmind
225 1.13 rmind static void
226 1.18 rmind table_hash_destroy(npf_table_t *t)
227 1.18 rmind {
228 1.18 rmind for (unsigned n = 0; n <= t->t_hashmask; n++) {
229 1.18 rmind npf_tblent_t *ent;
230 1.18 rmind
231 1.18 rmind while ((ent = LIST_FIRST(&t->t_hashl[n])) != NULL) {
232 1.18 rmind LIST_REMOVE(ent, te_entry.hashq);
233 1.18 rmind pool_cache_put(tblent_cache, ent);
234 1.18 rmind }
235 1.18 rmind }
236 1.18 rmind }
237 1.18 rmind
238 1.18 rmind static void
239 1.13 rmind table_tree_destroy(pt_tree_t *tree)
240 1.1 rmind {
241 1.13 rmind npf_tblent_t *ent;
242 1.1 rmind
243 1.13 rmind while ((ent = ptree_iterate(tree, NULL, PT_ASCENDING)) != NULL) {
244 1.13 rmind ptree_remove_node(tree, ent);
245 1.13 rmind pool_cache_put(tblent_cache, ent);
246 1.13 rmind }
247 1.1 rmind }
248 1.1 rmind
249 1.1 rmind /*
250 1.1 rmind * npf_table_create: create table with a specified ID.
251 1.1 rmind */
252 1.1 rmind npf_table_t *
253 1.1 rmind npf_table_create(u_int tid, int type, size_t hsize)
254 1.1 rmind {
255 1.1 rmind npf_table_t *t;
256 1.1 rmind
257 1.1 rmind KASSERT((u_int)tid < NPF_TABLE_SLOTS);
258 1.1 rmind
259 1.1 rmind t = kmem_zalloc(sizeof(npf_table_t), KM_SLEEP);
260 1.1 rmind switch (type) {
261 1.9 rmind case NPF_TABLE_TREE:
262 1.13 rmind ptree_init(&t->t_tree[0], &npf_table_ptree_ops,
263 1.13 rmind (void *)(sizeof(struct in_addr) / sizeof(uint32_t)),
264 1.13 rmind offsetof(npf_tblent_t, te_entry.node),
265 1.13 rmind offsetof(npf_tblent_t, te_addr));
266 1.13 rmind ptree_init(&t->t_tree[1], &npf_table_ptree_ops,
267 1.13 rmind (void *)(sizeof(struct in6_addr) / sizeof(uint32_t)),
268 1.13 rmind offsetof(npf_tblent_t, te_entry.node),
269 1.13 rmind offsetof(npf_tblent_t, te_addr));
270 1.1 rmind break;
271 1.1 rmind case NPF_TABLE_HASH:
272 1.1 rmind t->t_hashl = hashinit(hsize, HASH_LIST, true, &t->t_hashmask);
273 1.1 rmind if (t->t_hashl == NULL) {
274 1.1 rmind kmem_free(t, sizeof(npf_table_t));
275 1.1 rmind return NULL;
276 1.1 rmind }
277 1.1 rmind break;
278 1.1 rmind default:
279 1.1 rmind KASSERT(false);
280 1.1 rmind }
281 1.1 rmind rw_init(&t->t_lock);
282 1.1 rmind t->t_type = type;
283 1.1 rmind t->t_id = tid;
284 1.15 rmind
285 1.1 rmind return t;
286 1.1 rmind }
287 1.1 rmind
288 1.1 rmind /*
289 1.1 rmind * npf_table_destroy: free all table entries and table itself.
290 1.1 rmind */
291 1.1 rmind void
292 1.1 rmind npf_table_destroy(npf_table_t *t)
293 1.1 rmind {
294 1.17 rmind KASSERT(t->t_refcnt == 0);
295 1.1 rmind
296 1.1 rmind switch (t->t_type) {
297 1.15 rmind case NPF_TABLE_HASH:
298 1.18 rmind table_hash_destroy(t);
299 1.1 rmind hashdone(t->t_hashl, HASH_LIST, t->t_hashmask);
300 1.1 rmind break;
301 1.15 rmind case NPF_TABLE_TREE:
302 1.13 rmind table_tree_destroy(&t->t_tree[0]);
303 1.13 rmind table_tree_destroy(&t->t_tree[1]);
304 1.1 rmind break;
305 1.1 rmind default:
306 1.1 rmind KASSERT(false);
307 1.1 rmind }
308 1.1 rmind rw_destroy(&t->t_lock);
309 1.1 rmind kmem_free(t, sizeof(npf_table_t));
310 1.1 rmind }
311 1.1 rmind
312 1.1 rmind /*
313 1.1 rmind * npf_table_check: validate ID and type.
314 1.13 rmind */
315 1.1 rmind int
316 1.13 rmind npf_table_check(const npf_tableset_t *tset, u_int tid, int type)
317 1.1 rmind {
318 1.1 rmind
319 1.1 rmind if ((u_int)tid >= NPF_TABLE_SLOTS) {
320 1.1 rmind return EINVAL;
321 1.1 rmind }
322 1.1 rmind if (tset[tid] != NULL) {
323 1.1 rmind return EEXIST;
324 1.1 rmind }
325 1.9 rmind if (type != NPF_TABLE_TREE && type != NPF_TABLE_HASH) {
326 1.1 rmind return EINVAL;
327 1.1 rmind }
328 1.1 rmind return 0;
329 1.1 rmind }
330 1.1 rmind
331 1.13 rmind static int
332 1.15 rmind table_cidr_check(const u_int aidx, const npf_addr_t *addr,
333 1.13 rmind const npf_netmask_t mask)
334 1.13 rmind {
335 1.13 rmind
336 1.13 rmind if (mask > NPF_MAX_NETMASK && mask != NPF_NO_NETMASK) {
337 1.13 rmind return EINVAL;
338 1.13 rmind }
339 1.13 rmind if (aidx > 1) {
340 1.13 rmind return EINVAL;
341 1.13 rmind }
342 1.13 rmind
343 1.13 rmind /*
344 1.13 rmind * For IPv4 (aidx = 0) - 32 and for IPv6 (aidx = 1) - 128.
345 1.13 rmind * If it is a host - shall use NPF_NO_NETMASK.
346 1.13 rmind */
347 1.13 rmind if (mask >= (aidx ? 128 : 32) && mask != NPF_NO_NETMASK) {
348 1.13 rmind return EINVAL;
349 1.13 rmind }
350 1.13 rmind return 0;
351 1.13 rmind }
352 1.13 rmind
353 1.1 rmind /*
354 1.13 rmind * npf_table_insert: add an IP CIDR entry into the table.
355 1.1 rmind */
356 1.1 rmind int
357 1.13 rmind npf_table_insert(npf_tableset_t *tset, u_int tid, const int alen,
358 1.6 zoltan const npf_addr_t *addr, const npf_netmask_t mask)
359 1.1 rmind {
360 1.13 rmind const u_int aidx = NPF_ADDRLEN2TREE(alen);
361 1.13 rmind npf_tblent_t *ent;
362 1.1 rmind npf_table_t *t;
363 1.13 rmind int error;
364 1.1 rmind
365 1.15 rmind if ((u_int)tid >= NPF_TABLE_SLOTS || (t = tset[tid]) == NULL) {
366 1.15 rmind return EINVAL;
367 1.15 rmind }
368 1.15 rmind
369 1.15 rmind error = table_cidr_check(aidx, addr, mask);
370 1.13 rmind if (error) {
371 1.13 rmind return error;
372 1.8 rmind }
373 1.12 rmind ent = pool_cache_get(tblent_cache, PR_WAITOK);
374 1.13 rmind memcpy(&ent->te_addr, addr, alen);
375 1.13 rmind ent->te_alen = alen;
376 1.1 rmind
377 1.13 rmind /*
378 1.13 rmind * Insert the entry. Return an error on duplicate.
379 1.13 rmind */
380 1.15 rmind rw_enter(&t->t_lock, RW_WRITER);
381 1.1 rmind switch (t->t_type) {
382 1.13 rmind case NPF_TABLE_HASH: {
383 1.13 rmind struct npf_hashl *htbl;
384 1.13 rmind
385 1.13 rmind /*
386 1.13 rmind * Hash tables by the concept support only IPs.
387 1.13 rmind */
388 1.13 rmind if (mask != NPF_NO_NETMASK) {
389 1.13 rmind error = EINVAL;
390 1.13 rmind break;
391 1.1 rmind }
392 1.13 rmind if (!table_hash_lookup(t, addr, alen, &htbl)) {
393 1.12 rmind LIST_INSERT_HEAD(htbl, ent, te_entry.hashq);
394 1.15 rmind t->t_nitems++;
395 1.1 rmind } else {
396 1.1 rmind error = EEXIST;
397 1.1 rmind }
398 1.1 rmind break;
399 1.13 rmind }
400 1.13 rmind case NPF_TABLE_TREE: {
401 1.13 rmind pt_tree_t *tree = &t->t_tree[aidx];
402 1.13 rmind bool ok;
403 1.13 rmind
404 1.13 rmind /*
405 1.13 rmind * If no mask specified, use maximum mask.
406 1.13 rmind */
407 1.15 rmind ok = (mask != NPF_NO_NETMASK) ?
408 1.15 rmind ptree_insert_mask_node(tree, ent, mask) :
409 1.15 rmind ptree_insert_node(tree, ent);
410 1.15 rmind if (ok) {
411 1.15 rmind t->t_nitems++;
412 1.15 rmind error = 0;
413 1.13 rmind } else {
414 1.15 rmind error = EEXIST;
415 1.1 rmind }
416 1.1 rmind break;
417 1.13 rmind }
418 1.1 rmind default:
419 1.1 rmind KASSERT(false);
420 1.1 rmind }
421 1.15 rmind rw_exit(&t->t_lock);
422 1.1 rmind
423 1.8 rmind if (error) {
424 1.12 rmind pool_cache_put(tblent_cache, ent);
425 1.1 rmind }
426 1.1 rmind return error;
427 1.1 rmind }
428 1.1 rmind
429 1.1 rmind /*
430 1.13 rmind * npf_table_remove: remove the IP CIDR entry from the table.
431 1.1 rmind */
432 1.1 rmind int
433 1.13 rmind npf_table_remove(npf_tableset_t *tset, u_int tid, const int alen,
434 1.6 zoltan const npf_addr_t *addr, const npf_netmask_t mask)
435 1.1 rmind {
436 1.13 rmind const u_int aidx = NPF_ADDRLEN2TREE(alen);
437 1.12 rmind npf_tblent_t *ent;
438 1.1 rmind npf_table_t *t;
439 1.13 rmind int error;
440 1.1 rmind
441 1.15 rmind error = table_cidr_check(aidx, addr, mask);
442 1.13 rmind if (error) {
443 1.13 rmind return error;
444 1.8 rmind }
445 1.15 rmind
446 1.15 rmind if ((u_int)tid >= NPF_TABLE_SLOTS || (t = tset[tid]) == NULL) {
447 1.1 rmind return EINVAL;
448 1.1 rmind }
449 1.12 rmind
450 1.15 rmind rw_enter(&t->t_lock, RW_WRITER);
451 1.13 rmind switch (t->t_type) {
452 1.13 rmind case NPF_TABLE_HASH: {
453 1.13 rmind struct npf_hashl *htbl;
454 1.8 rmind
455 1.13 rmind ent = table_hash_lookup(t, addr, alen, &htbl);
456 1.12 rmind if (__predict_true(ent != NULL)) {
457 1.12 rmind LIST_REMOVE(ent, te_entry.hashq);
458 1.15 rmind t->t_nitems--;
459 1.1 rmind }
460 1.1 rmind break;
461 1.13 rmind }
462 1.13 rmind case NPF_TABLE_TREE: {
463 1.13 rmind pt_tree_t *tree = &t->t_tree[aidx];
464 1.13 rmind
465 1.13 rmind ent = ptree_find_node(tree, addr);
466 1.12 rmind if (__predict_true(ent != NULL)) {
467 1.13 rmind ptree_remove_node(tree, ent);
468 1.15 rmind t->t_nitems--;
469 1.1 rmind }
470 1.1 rmind break;
471 1.13 rmind }
472 1.1 rmind default:
473 1.1 rmind KASSERT(false);
474 1.13 rmind ent = NULL;
475 1.1 rmind }
476 1.15 rmind rw_exit(&t->t_lock);
477 1.1 rmind
478 1.12 rmind if (ent == NULL) {
479 1.8 rmind return ENOENT;
480 1.1 rmind }
481 1.12 rmind pool_cache_put(tblent_cache, ent);
482 1.8 rmind return 0;
483 1.1 rmind }
484 1.1 rmind
485 1.1 rmind /*
486 1.13 rmind * npf_table_lookup: find the table according to ID, lookup and match
487 1.13 rmind * the contents with the specified IP address.
488 1.1 rmind */
489 1.1 rmind int
490 1.13 rmind npf_table_lookup(npf_tableset_t *tset, u_int tid,
491 1.13 rmind const int alen, const npf_addr_t *addr)
492 1.1 rmind {
493 1.13 rmind const u_int aidx = NPF_ADDRLEN2TREE(alen);
494 1.13 rmind npf_tblent_t *ent;
495 1.1 rmind npf_table_t *t;
496 1.1 rmind
497 1.13 rmind if (__predict_false(aidx > 1)) {
498 1.13 rmind return EINVAL;
499 1.13 rmind }
500 1.13 rmind
501 1.15 rmind if ((u_int)tid >= NPF_TABLE_SLOTS || (t = tset[tid]) == NULL) {
502 1.1 rmind return EINVAL;
503 1.1 rmind }
504 1.15 rmind
505 1.15 rmind rw_enter(&t->t_lock, RW_READER);
506 1.1 rmind switch (t->t_type) {
507 1.13 rmind case NPF_TABLE_HASH: {
508 1.13 rmind struct npf_hashl *htbl;
509 1.13 rmind ent = table_hash_lookup(t, addr, alen, &htbl);
510 1.1 rmind break;
511 1.13 rmind }
512 1.13 rmind case NPF_TABLE_TREE: {
513 1.13 rmind ent = ptree_find_node(&t->t_tree[aidx], addr);
514 1.1 rmind break;
515 1.13 rmind }
516 1.1 rmind default:
517 1.1 rmind KASSERT(false);
518 1.13 rmind ent = NULL;
519 1.1 rmind }
520 1.15 rmind rw_exit(&t->t_lock);
521 1.1 rmind
522 1.13 rmind return ent ? 0 : ENOENT;
523 1.1 rmind }
524 1.15 rmind
525 1.15 rmind static int
526 1.15 rmind table_ent_copyout(npf_tblent_t *ent, npf_netmask_t mask,
527 1.15 rmind void *ubuf, size_t len, size_t *off)
528 1.15 rmind {
529 1.15 rmind void *ubufp = (uint8_t *)ubuf + *off;
530 1.15 rmind npf_ioctl_ent_t uent;
531 1.15 rmind
532 1.15 rmind if ((*off += sizeof(npf_ioctl_ent_t)) > len) {
533 1.15 rmind return ENOMEM;
534 1.15 rmind }
535 1.15 rmind uent.alen = ent->te_alen;
536 1.15 rmind memcpy(&uent.addr, &ent->te_addr, sizeof(npf_addr_t));
537 1.15 rmind uent.mask = mask;
538 1.15 rmind
539 1.15 rmind return copyout(&uent, ubufp, sizeof(npf_ioctl_ent_t));
540 1.15 rmind }
541 1.15 rmind
542 1.15 rmind static int
543 1.15 rmind table_tree_list(pt_tree_t *tree, npf_netmask_t maxmask, void *ubuf,
544 1.15 rmind size_t len, size_t *off)
545 1.15 rmind {
546 1.15 rmind npf_tblent_t *ent = NULL;
547 1.15 rmind int error = 0;
548 1.15 rmind
549 1.15 rmind while ((ent = ptree_iterate(tree, ent, PT_ASCENDING)) != NULL) {
550 1.15 rmind pt_bitlen_t blen;
551 1.15 rmind
552 1.15 rmind if (!ptree_mask_node_p(tree, ent, &blen)) {
553 1.15 rmind blen = maxmask;
554 1.15 rmind }
555 1.15 rmind error = table_ent_copyout(ent, blen, ubuf, len, off);
556 1.15 rmind if (error)
557 1.15 rmind break;
558 1.15 rmind }
559 1.15 rmind return error;
560 1.15 rmind }
561 1.15 rmind
562 1.15 rmind /*
563 1.15 rmind * npf_table_list: copy a list of all table entries into a userspace buffer.
564 1.15 rmind */
565 1.15 rmind int
566 1.15 rmind npf_table_list(npf_tableset_t *tset, u_int tid, void *ubuf, size_t len)
567 1.15 rmind {
568 1.15 rmind npf_table_t *t;
569 1.15 rmind size_t off = 0;
570 1.15 rmind int error = 0;
571 1.15 rmind
572 1.15 rmind if ((u_int)tid >= NPF_TABLE_SLOTS || (t = tset[tid]) == NULL) {
573 1.15 rmind return EINVAL;
574 1.15 rmind }
575 1.15 rmind
576 1.15 rmind rw_enter(&t->t_lock, RW_READER);
577 1.15 rmind switch (t->t_type) {
578 1.15 rmind case NPF_TABLE_HASH:
579 1.15 rmind for (unsigned n = 0; n <= t->t_hashmask; n++) {
580 1.15 rmind npf_tblent_t *ent;
581 1.15 rmind
582 1.15 rmind LIST_FOREACH(ent, &t->t_hashl[n], te_entry.hashq)
583 1.15 rmind if ((error = table_ent_copyout(ent, 0, ubuf,
584 1.15 rmind len, &off)) != 0)
585 1.15 rmind break;
586 1.15 rmind }
587 1.15 rmind break;
588 1.15 rmind case NPF_TABLE_TREE:
589 1.15 rmind error = table_tree_list(&t->t_tree[0], 32, ubuf, len, &off);
590 1.15 rmind if (error)
591 1.15 rmind break;
592 1.15 rmind error = table_tree_list(&t->t_tree[1], 128, ubuf, len, &off);
593 1.16 rmind break;
594 1.15 rmind default:
595 1.15 rmind KASSERT(false);
596 1.15 rmind }
597 1.15 rmind rw_exit(&t->t_lock);
598 1.15 rmind
599 1.15 rmind return error;
600 1.15 rmind }
601 1.18 rmind
602 1.18 rmind /*
603 1.18 rmind * npf_table_flush: remove all table entries.
604 1.18 rmind */
605 1.18 rmind int
606 1.18 rmind npf_table_flush(npf_tableset_t *tset, u_int tid)
607 1.18 rmind {
608 1.18 rmind npf_table_t *t;
609 1.18 rmind
610 1.18 rmind if ((u_int)tid >= NPF_TABLE_SLOTS || (t = tset[tid]) == NULL) {
611 1.18 rmind return EINVAL;
612 1.18 rmind }
613 1.18 rmind
614 1.18 rmind rw_enter(&t->t_lock, RW_WRITER);
615 1.18 rmind switch (t->t_type) {
616 1.18 rmind case NPF_TABLE_HASH:
617 1.18 rmind table_hash_destroy(t);
618 1.18 rmind t->t_nitems = 0;
619 1.18 rmind break;
620 1.18 rmind case NPF_TABLE_TREE:
621 1.18 rmind table_tree_destroy(&t->t_tree[0]);
622 1.18 rmind table_tree_destroy(&t->t_tree[1]);
623 1.18 rmind t->t_nitems = 0;
624 1.18 rmind break;
625 1.18 rmind default:
626 1.18 rmind KASSERT(false);
627 1.18 rmind }
628 1.18 rmind rw_exit(&t->t_lock);
629 1.18 rmind
630 1.18 rmind return 0;
631 1.18 rmind }
632