npf_tableset.c revision 1.20 1 1.20 rmind /* $NetBSD: npf_tableset.c,v 1.20 2013/11/22 00:25:51 rmind Exp $ */
2 1.1 rmind
3 1.1 rmind /*-
4 1.20 rmind * Copyright (c) 2009-2013 The NetBSD Foundation, Inc.
5 1.1 rmind * All rights reserved.
6 1.1 rmind *
7 1.1 rmind * This material is based upon work partially supported by The
8 1.1 rmind * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
9 1.1 rmind *
10 1.1 rmind * Redistribution and use in source and binary forms, with or without
11 1.1 rmind * modification, are permitted provided that the following conditions
12 1.1 rmind * are met:
13 1.1 rmind * 1. Redistributions of source code must retain the above copyright
14 1.1 rmind * notice, this list of conditions and the following disclaimer.
15 1.1 rmind * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 rmind * notice, this list of conditions and the following disclaimer in the
17 1.1 rmind * documentation and/or other materials provided with the distribution.
18 1.1 rmind *
19 1.1 rmind * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.1 rmind * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1 rmind * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1 rmind * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.1 rmind * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1 rmind * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1 rmind * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1 rmind * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1 rmind * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1 rmind * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1 rmind * POSSIBILITY OF SUCH DAMAGE.
30 1.1 rmind */
31 1.1 rmind
32 1.1 rmind /*
33 1.4 rmind * NPF tableset module.
34 1.1 rmind *
35 1.15 rmind * Notes
36 1.15 rmind *
37 1.15 rmind * The tableset is an array of tables. After the creation, the array
38 1.15 rmind * is immutable. The caller is responsible to synchronise the access
39 1.15 rmind * to the tableset. The table can either be a hash or a tree. Its
40 1.15 rmind * entries are protected by a read-write lock.
41 1.1 rmind */
42 1.1 rmind
43 1.1 rmind #include <sys/cdefs.h>
44 1.20 rmind __KERNEL_RCSID(0, "$NetBSD: npf_tableset.c,v 1.20 2013/11/22 00:25:51 rmind Exp $");
45 1.1 rmind
46 1.1 rmind #include <sys/param.h>
47 1.10 rmind #include <sys/types.h>
48 1.1 rmind
49 1.1 rmind #include <sys/atomic.h>
50 1.1 rmind #include <sys/hash.h>
51 1.1 rmind #include <sys/kmem.h>
52 1.1 rmind #include <sys/pool.h>
53 1.1 rmind #include <sys/queue.h>
54 1.1 rmind #include <sys/rwlock.h>
55 1.1 rmind #include <sys/systm.h>
56 1.1 rmind #include <sys/types.h>
57 1.1 rmind
58 1.1 rmind #include "npf_impl.h"
59 1.1 rmind
60 1.15 rmind typedef struct npf_tblent {
61 1.1 rmind union {
62 1.13 rmind LIST_ENTRY(npf_tblent) hashq;
63 1.13 rmind pt_node_t node;
64 1.1 rmind } te_entry;
65 1.13 rmind int te_alen;
66 1.13 rmind npf_addr_t te_addr;
67 1.15 rmind } npf_tblent_t;
68 1.1 rmind
69 1.1 rmind LIST_HEAD(npf_hashl, npf_tblent);
70 1.1 rmind
71 1.1 rmind struct npf_table {
72 1.19 rmind /*
73 1.19 rmind * The storage type can be: a) hash b) tree.
74 1.19 rmind * There are separate trees for IPv4 and IPv6.
75 1.19 rmind */
76 1.13 rmind struct npf_hashl * t_hashl;
77 1.13 rmind u_long t_hashmask;
78 1.13 rmind pt_tree_t t_tree[2];
79 1.19 rmind
80 1.19 rmind /*
81 1.19 rmind * Table ID, type and lock. The ID may change during the
82 1.19 rmind * config reload, it is protected by the npf_config_lock.
83 1.19 rmind */
84 1.19 rmind int t_type;
85 1.19 rmind u_int t_id;
86 1.19 rmind krwlock_t t_lock;
87 1.19 rmind
88 1.19 rmind /* The number of items, reference count and table name. */
89 1.19 rmind u_int t_nitems;
90 1.19 rmind u_int t_refcnt;
91 1.19 rmind char t_name[NPF_TABLE_MAXNAMELEN];
92 1.19 rmind };
93 1.19 rmind
94 1.19 rmind struct npf_tableset {
95 1.19 rmind u_int ts_nitems;
96 1.19 rmind npf_table_t * ts_map[];
97 1.1 rmind };
98 1.1 rmind
99 1.19 rmind #define NPF_TABLESET_SIZE(n) \
100 1.19 rmind (offsetof(npf_tableset_t, ts_map[n]) * sizeof(npf_table_t *))
101 1.19 rmind
102 1.13 rmind #define NPF_ADDRLEN2TREE(alen) ((alen) >> 4)
103 1.13 rmind
104 1.13 rmind static pool_cache_t tblent_cache __read_mostly;
105 1.1 rmind
106 1.1 rmind /*
107 1.1 rmind * npf_table_sysinit: initialise tableset structures.
108 1.1 rmind */
109 1.4 rmind void
110 1.1 rmind npf_tableset_sysinit(void)
111 1.1 rmind {
112 1.1 rmind tblent_cache = pool_cache_init(sizeof(npf_tblent_t), coherency_unit,
113 1.14 rmind 0, 0, "npftblpl", NULL, IPL_NONE, NULL, NULL, NULL);
114 1.1 rmind }
115 1.1 rmind
116 1.1 rmind void
117 1.1 rmind npf_tableset_sysfini(void)
118 1.1 rmind {
119 1.1 rmind pool_cache_destroy(tblent_cache);
120 1.1 rmind }
121 1.1 rmind
122 1.1 rmind npf_tableset_t *
123 1.19 rmind npf_tableset_create(u_int nitems)
124 1.1 rmind {
125 1.19 rmind npf_tableset_t *ts = kmem_zalloc(NPF_TABLESET_SIZE(nitems), KM_SLEEP);
126 1.19 rmind ts->ts_nitems = nitems;
127 1.19 rmind return ts;
128 1.1 rmind }
129 1.1 rmind
130 1.1 rmind void
131 1.19 rmind npf_tableset_destroy(npf_tableset_t *ts)
132 1.1 rmind {
133 1.1 rmind /*
134 1.19 rmind * Destroy all tables (no references should be held, since the
135 1.19 rmind * ruleset should be destroyed before).
136 1.1 rmind */
137 1.19 rmind for (u_int tid = 0; tid < ts->ts_nitems; tid++) {
138 1.19 rmind npf_table_t *t = ts->ts_map[tid];
139 1.19 rmind
140 1.17 rmind if (t && atomic_dec_uint_nv(&t->t_refcnt) == 0) {
141 1.1 rmind npf_table_destroy(t);
142 1.1 rmind }
143 1.1 rmind }
144 1.19 rmind kmem_free(ts, NPF_TABLESET_SIZE(ts->ts_nitems));
145 1.1 rmind }
146 1.1 rmind
147 1.1 rmind /*
148 1.1 rmind * npf_tableset_insert: insert the table into the specified tableset.
149 1.1 rmind *
150 1.13 rmind * => Returns 0 on success. Fails and returns error if ID is already used.
151 1.1 rmind */
152 1.1 rmind int
153 1.19 rmind npf_tableset_insert(npf_tableset_t *ts, npf_table_t *t)
154 1.1 rmind {
155 1.1 rmind const u_int tid = t->t_id;
156 1.1 rmind int error;
157 1.1 rmind
158 1.19 rmind KASSERT((u_int)tid < ts->ts_nitems);
159 1.1 rmind
160 1.19 rmind if (ts->ts_map[tid] == NULL) {
161 1.17 rmind atomic_inc_uint(&t->t_refcnt);
162 1.19 rmind ts->ts_map[tid] = t;
163 1.1 rmind error = 0;
164 1.1 rmind } else {
165 1.1 rmind error = EEXIST;
166 1.1 rmind }
167 1.1 rmind return error;
168 1.1 rmind }
169 1.1 rmind
170 1.1 rmind /*
171 1.19 rmind * npf_tableset_getbyname: look for a table in the set given the name.
172 1.19 rmind */
173 1.19 rmind npf_table_t *
174 1.19 rmind npf_tableset_getbyname(npf_tableset_t *ts, const char *name)
175 1.19 rmind {
176 1.19 rmind npf_table_t *t;
177 1.19 rmind
178 1.19 rmind for (u_int tid = 0; tid < ts->ts_nitems; tid++) {
179 1.19 rmind if ((t = ts->ts_map[tid]) == NULL)
180 1.19 rmind continue;
181 1.19 rmind if (strcmp(name, t->t_name) == 0)
182 1.19 rmind return t;
183 1.19 rmind }
184 1.19 rmind return NULL;
185 1.19 rmind }
186 1.19 rmind
187 1.19 rmind npf_table_t *
188 1.19 rmind npf_tableset_getbyid(npf_tableset_t *ts, u_int tid)
189 1.19 rmind {
190 1.19 rmind if (__predict_true(tid < ts->ts_nitems)) {
191 1.19 rmind return ts->ts_map[tid];
192 1.19 rmind }
193 1.19 rmind return NULL;
194 1.19 rmind }
195 1.19 rmind
196 1.19 rmind /*
197 1.15 rmind * npf_tableset_reload: iterate all tables and if the new table is of the
198 1.15 rmind * same type and has no items, then we preserve the old one and its entries.
199 1.15 rmind *
200 1.15 rmind * => The caller is responsible for providing synchronisation.
201 1.15 rmind */
202 1.15 rmind void
203 1.19 rmind npf_tableset_reload(npf_tableset_t *nts, npf_tableset_t *ots)
204 1.15 rmind {
205 1.19 rmind for (u_int tid = 0; tid < nts->ts_nitems; tid++) {
206 1.19 rmind npf_table_t *t, *ot;
207 1.19 rmind
208 1.19 rmind if ((t = nts->ts_map[tid]) == NULL) {
209 1.19 rmind continue;
210 1.19 rmind }
211 1.15 rmind
212 1.19 rmind /* If our table has entries, just load it. */
213 1.19 rmind if (t->t_nitems) {
214 1.15 rmind continue;
215 1.15 rmind }
216 1.19 rmind
217 1.19 rmind /* Look for a currently existing table with such name. */
218 1.19 rmind ot = npf_tableset_getbyname(ots, t->t_name);
219 1.19 rmind if (ot == NULL) {
220 1.19 rmind /* Not found: we have a new table. */
221 1.19 rmind continue;
222 1.19 rmind }
223 1.19 rmind
224 1.19 rmind /* Found. Did the type change? */
225 1.19 rmind if (t->t_type != ot->t_type) {
226 1.19 rmind /* Yes, load the new. */
227 1.15 rmind continue;
228 1.15 rmind }
229 1.17 rmind
230 1.17 rmind /*
231 1.19 rmind * Preserve the current table. Acquire a reference since
232 1.19 rmind * we are keeping it in the old table set. Update its ID.
233 1.17 rmind */
234 1.17 rmind atomic_inc_uint(&ot->t_refcnt);
235 1.19 rmind nts->ts_map[tid] = ot;
236 1.19 rmind
237 1.19 rmind KASSERT(npf_config_locked_p());
238 1.19 rmind ot->t_id = tid;
239 1.17 rmind
240 1.19 rmind /* Destroy the new table (we hold only reference). */
241 1.17 rmind t->t_refcnt--;
242 1.15 rmind npf_table_destroy(t);
243 1.15 rmind }
244 1.15 rmind }
245 1.15 rmind
246 1.20 rmind void
247 1.20 rmind npf_tableset_syncdict(const npf_tableset_t *ts, prop_dictionary_t ndict)
248 1.20 rmind {
249 1.20 rmind prop_array_t tables = prop_array_create();
250 1.20 rmind const npf_table_t *t;
251 1.20 rmind
252 1.20 rmind KASSERT(npf_config_locked_p());
253 1.20 rmind
254 1.20 rmind for (u_int tid = 0; tid < ts->ts_nitems; tid++) {
255 1.20 rmind if ((t = ts->ts_map[tid]) == NULL) {
256 1.20 rmind continue;
257 1.20 rmind }
258 1.20 rmind prop_dictionary_t tdict = prop_dictionary_create();
259 1.20 rmind prop_dictionary_set_cstring(tdict, "name", t->t_name);
260 1.20 rmind prop_dictionary_set_uint32(tdict, "type", t->t_type);
261 1.20 rmind prop_dictionary_set_uint32(tdict, "id", tid);
262 1.20 rmind
263 1.20 rmind prop_array_add(tables, tdict);
264 1.20 rmind prop_object_release(tdict);
265 1.20 rmind }
266 1.20 rmind prop_dictionary_remove(ndict, "tables");
267 1.20 rmind prop_dictionary_set(ndict, "tables", tables);
268 1.20 rmind prop_object_release(tables);
269 1.20 rmind }
270 1.20 rmind
271 1.15 rmind /*
272 1.13 rmind * Few helper routines.
273 1.1 rmind */
274 1.1 rmind
275 1.13 rmind static npf_tblent_t *
276 1.13 rmind table_hash_lookup(const npf_table_t *t, const npf_addr_t *addr,
277 1.13 rmind const int alen, struct npf_hashl **rhtbl)
278 1.1 rmind {
279 1.13 rmind const uint32_t hidx = hash32_buf(addr, alen, HASH32_BUF_INIT);
280 1.13 rmind struct npf_hashl *htbl = &t->t_hashl[hidx & t->t_hashmask];
281 1.13 rmind npf_tblent_t *ent;
282 1.1 rmind
283 1.13 rmind /*
284 1.13 rmind * Lookup the hash table and check for duplicates.
285 1.13 rmind * Note: mask is ignored for the hash storage.
286 1.13 rmind */
287 1.13 rmind LIST_FOREACH(ent, htbl, te_entry.hashq) {
288 1.13 rmind if (ent->te_alen != alen) {
289 1.13 rmind continue;
290 1.13 rmind }
291 1.13 rmind if (memcmp(&ent->te_addr, addr, alen) == 0) {
292 1.13 rmind break;
293 1.13 rmind }
294 1.13 rmind }
295 1.13 rmind *rhtbl = htbl;
296 1.13 rmind return ent;
297 1.1 rmind }
298 1.1 rmind
299 1.13 rmind static void
300 1.18 rmind table_hash_destroy(npf_table_t *t)
301 1.18 rmind {
302 1.18 rmind for (unsigned n = 0; n <= t->t_hashmask; n++) {
303 1.18 rmind npf_tblent_t *ent;
304 1.18 rmind
305 1.18 rmind while ((ent = LIST_FIRST(&t->t_hashl[n])) != NULL) {
306 1.18 rmind LIST_REMOVE(ent, te_entry.hashq);
307 1.18 rmind pool_cache_put(tblent_cache, ent);
308 1.18 rmind }
309 1.18 rmind }
310 1.18 rmind }
311 1.18 rmind
312 1.18 rmind static void
313 1.13 rmind table_tree_destroy(pt_tree_t *tree)
314 1.1 rmind {
315 1.13 rmind npf_tblent_t *ent;
316 1.1 rmind
317 1.13 rmind while ((ent = ptree_iterate(tree, NULL, PT_ASCENDING)) != NULL) {
318 1.13 rmind ptree_remove_node(tree, ent);
319 1.13 rmind pool_cache_put(tblent_cache, ent);
320 1.13 rmind }
321 1.1 rmind }
322 1.1 rmind
323 1.1 rmind /*
324 1.1 rmind * npf_table_create: create table with a specified ID.
325 1.1 rmind */
326 1.1 rmind npf_table_t *
327 1.19 rmind npf_table_create(const char *name, u_int tid, int type, size_t hsize)
328 1.1 rmind {
329 1.1 rmind npf_table_t *t;
330 1.1 rmind
331 1.19 rmind t = kmem_zalloc(sizeof(npf_table_t), KM_SLEEP);
332 1.19 rmind strlcpy(t->t_name, name, NPF_TABLE_MAXNAMELEN);
333 1.1 rmind
334 1.1 rmind switch (type) {
335 1.9 rmind case NPF_TABLE_TREE:
336 1.13 rmind ptree_init(&t->t_tree[0], &npf_table_ptree_ops,
337 1.13 rmind (void *)(sizeof(struct in_addr) / sizeof(uint32_t)),
338 1.13 rmind offsetof(npf_tblent_t, te_entry.node),
339 1.13 rmind offsetof(npf_tblent_t, te_addr));
340 1.13 rmind ptree_init(&t->t_tree[1], &npf_table_ptree_ops,
341 1.13 rmind (void *)(sizeof(struct in6_addr) / sizeof(uint32_t)),
342 1.13 rmind offsetof(npf_tblent_t, te_entry.node),
343 1.13 rmind offsetof(npf_tblent_t, te_addr));
344 1.1 rmind break;
345 1.1 rmind case NPF_TABLE_HASH:
346 1.1 rmind t->t_hashl = hashinit(hsize, HASH_LIST, true, &t->t_hashmask);
347 1.1 rmind if (t->t_hashl == NULL) {
348 1.1 rmind kmem_free(t, sizeof(npf_table_t));
349 1.1 rmind return NULL;
350 1.1 rmind }
351 1.1 rmind break;
352 1.1 rmind default:
353 1.1 rmind KASSERT(false);
354 1.1 rmind }
355 1.1 rmind rw_init(&t->t_lock);
356 1.1 rmind t->t_type = type;
357 1.1 rmind t->t_id = tid;
358 1.15 rmind
359 1.1 rmind return t;
360 1.1 rmind }
361 1.1 rmind
362 1.1 rmind /*
363 1.1 rmind * npf_table_destroy: free all table entries and table itself.
364 1.1 rmind */
365 1.1 rmind void
366 1.1 rmind npf_table_destroy(npf_table_t *t)
367 1.1 rmind {
368 1.17 rmind KASSERT(t->t_refcnt == 0);
369 1.1 rmind
370 1.1 rmind switch (t->t_type) {
371 1.15 rmind case NPF_TABLE_HASH:
372 1.18 rmind table_hash_destroy(t);
373 1.1 rmind hashdone(t->t_hashl, HASH_LIST, t->t_hashmask);
374 1.1 rmind break;
375 1.15 rmind case NPF_TABLE_TREE:
376 1.13 rmind table_tree_destroy(&t->t_tree[0]);
377 1.13 rmind table_tree_destroy(&t->t_tree[1]);
378 1.1 rmind break;
379 1.1 rmind default:
380 1.1 rmind KASSERT(false);
381 1.1 rmind }
382 1.1 rmind rw_destroy(&t->t_lock);
383 1.1 rmind kmem_free(t, sizeof(npf_table_t));
384 1.1 rmind }
385 1.1 rmind
386 1.1 rmind /*
387 1.19 rmind * npf_table_check: validate the name, ID and type.
388 1.13 rmind */
389 1.1 rmind int
390 1.19 rmind npf_table_check(npf_tableset_t *ts, const char *name, u_int tid, int type)
391 1.1 rmind {
392 1.19 rmind if ((u_int)tid >= ts->ts_nitems) {
393 1.1 rmind return EINVAL;
394 1.1 rmind }
395 1.19 rmind if (ts->ts_map[tid] != NULL) {
396 1.1 rmind return EEXIST;
397 1.1 rmind }
398 1.9 rmind if (type != NPF_TABLE_TREE && type != NPF_TABLE_HASH) {
399 1.1 rmind return EINVAL;
400 1.1 rmind }
401 1.19 rmind if (strlen(name) >= NPF_TABLE_MAXNAMELEN) {
402 1.19 rmind return ENAMETOOLONG;
403 1.19 rmind }
404 1.19 rmind if (npf_tableset_getbyname(ts, name)) {
405 1.20 rmind return EEXIST;
406 1.19 rmind }
407 1.1 rmind return 0;
408 1.1 rmind }
409 1.1 rmind
410 1.13 rmind static int
411 1.15 rmind table_cidr_check(const u_int aidx, const npf_addr_t *addr,
412 1.13 rmind const npf_netmask_t mask)
413 1.13 rmind {
414 1.19 rmind if (aidx > 1) {
415 1.13 rmind return EINVAL;
416 1.13 rmind }
417 1.19 rmind if (mask > NPF_MAX_NETMASK && mask != NPF_NO_NETMASK) {
418 1.13 rmind return EINVAL;
419 1.13 rmind }
420 1.13 rmind
421 1.13 rmind /*
422 1.13 rmind * For IPv4 (aidx = 0) - 32 and for IPv6 (aidx = 1) - 128.
423 1.13 rmind * If it is a host - shall use NPF_NO_NETMASK.
424 1.13 rmind */
425 1.13 rmind if (mask >= (aidx ? 128 : 32) && mask != NPF_NO_NETMASK) {
426 1.13 rmind return EINVAL;
427 1.13 rmind }
428 1.13 rmind return 0;
429 1.13 rmind }
430 1.13 rmind
431 1.1 rmind /*
432 1.13 rmind * npf_table_insert: add an IP CIDR entry into the table.
433 1.1 rmind */
434 1.1 rmind int
435 1.19 rmind npf_table_insert(npf_table_t *t, const int alen,
436 1.6 zoltan const npf_addr_t *addr, const npf_netmask_t mask)
437 1.1 rmind {
438 1.13 rmind const u_int aidx = NPF_ADDRLEN2TREE(alen);
439 1.13 rmind npf_tblent_t *ent;
440 1.13 rmind int error;
441 1.1 rmind
442 1.15 rmind error = table_cidr_check(aidx, addr, mask);
443 1.13 rmind if (error) {
444 1.13 rmind return error;
445 1.8 rmind }
446 1.12 rmind ent = pool_cache_get(tblent_cache, PR_WAITOK);
447 1.13 rmind memcpy(&ent->te_addr, addr, alen);
448 1.13 rmind ent->te_alen = alen;
449 1.1 rmind
450 1.13 rmind /*
451 1.13 rmind * Insert the entry. Return an error on duplicate.
452 1.13 rmind */
453 1.15 rmind rw_enter(&t->t_lock, RW_WRITER);
454 1.1 rmind switch (t->t_type) {
455 1.13 rmind case NPF_TABLE_HASH: {
456 1.13 rmind struct npf_hashl *htbl;
457 1.13 rmind
458 1.13 rmind /*
459 1.13 rmind * Hash tables by the concept support only IPs.
460 1.13 rmind */
461 1.13 rmind if (mask != NPF_NO_NETMASK) {
462 1.13 rmind error = EINVAL;
463 1.13 rmind break;
464 1.1 rmind }
465 1.13 rmind if (!table_hash_lookup(t, addr, alen, &htbl)) {
466 1.12 rmind LIST_INSERT_HEAD(htbl, ent, te_entry.hashq);
467 1.15 rmind t->t_nitems++;
468 1.1 rmind } else {
469 1.1 rmind error = EEXIST;
470 1.1 rmind }
471 1.1 rmind break;
472 1.13 rmind }
473 1.13 rmind case NPF_TABLE_TREE: {
474 1.13 rmind pt_tree_t *tree = &t->t_tree[aidx];
475 1.13 rmind bool ok;
476 1.13 rmind
477 1.13 rmind /*
478 1.13 rmind * If no mask specified, use maximum mask.
479 1.13 rmind */
480 1.15 rmind ok = (mask != NPF_NO_NETMASK) ?
481 1.15 rmind ptree_insert_mask_node(tree, ent, mask) :
482 1.15 rmind ptree_insert_node(tree, ent);
483 1.15 rmind if (ok) {
484 1.15 rmind t->t_nitems++;
485 1.15 rmind error = 0;
486 1.13 rmind } else {
487 1.15 rmind error = EEXIST;
488 1.1 rmind }
489 1.1 rmind break;
490 1.13 rmind }
491 1.1 rmind default:
492 1.1 rmind KASSERT(false);
493 1.1 rmind }
494 1.15 rmind rw_exit(&t->t_lock);
495 1.1 rmind
496 1.8 rmind if (error) {
497 1.12 rmind pool_cache_put(tblent_cache, ent);
498 1.1 rmind }
499 1.1 rmind return error;
500 1.1 rmind }
501 1.1 rmind
502 1.1 rmind /*
503 1.13 rmind * npf_table_remove: remove the IP CIDR entry from the table.
504 1.1 rmind */
505 1.1 rmind int
506 1.19 rmind npf_table_remove(npf_table_t *t, const int alen,
507 1.6 zoltan const npf_addr_t *addr, const npf_netmask_t mask)
508 1.1 rmind {
509 1.13 rmind const u_int aidx = NPF_ADDRLEN2TREE(alen);
510 1.12 rmind npf_tblent_t *ent;
511 1.13 rmind int error;
512 1.1 rmind
513 1.15 rmind error = table_cidr_check(aidx, addr, mask);
514 1.13 rmind if (error) {
515 1.13 rmind return error;
516 1.8 rmind }
517 1.15 rmind
518 1.15 rmind rw_enter(&t->t_lock, RW_WRITER);
519 1.13 rmind switch (t->t_type) {
520 1.13 rmind case NPF_TABLE_HASH: {
521 1.13 rmind struct npf_hashl *htbl;
522 1.8 rmind
523 1.13 rmind ent = table_hash_lookup(t, addr, alen, &htbl);
524 1.12 rmind if (__predict_true(ent != NULL)) {
525 1.12 rmind LIST_REMOVE(ent, te_entry.hashq);
526 1.15 rmind t->t_nitems--;
527 1.1 rmind }
528 1.1 rmind break;
529 1.13 rmind }
530 1.13 rmind case NPF_TABLE_TREE: {
531 1.13 rmind pt_tree_t *tree = &t->t_tree[aidx];
532 1.13 rmind
533 1.13 rmind ent = ptree_find_node(tree, addr);
534 1.12 rmind if (__predict_true(ent != NULL)) {
535 1.13 rmind ptree_remove_node(tree, ent);
536 1.15 rmind t->t_nitems--;
537 1.1 rmind }
538 1.1 rmind break;
539 1.13 rmind }
540 1.1 rmind default:
541 1.1 rmind KASSERT(false);
542 1.13 rmind ent = NULL;
543 1.1 rmind }
544 1.15 rmind rw_exit(&t->t_lock);
545 1.1 rmind
546 1.12 rmind if (ent == NULL) {
547 1.8 rmind return ENOENT;
548 1.1 rmind }
549 1.12 rmind pool_cache_put(tblent_cache, ent);
550 1.8 rmind return 0;
551 1.1 rmind }
552 1.1 rmind
553 1.1 rmind /*
554 1.13 rmind * npf_table_lookup: find the table according to ID, lookup and match
555 1.13 rmind * the contents with the specified IP address.
556 1.1 rmind */
557 1.1 rmind int
558 1.19 rmind npf_table_lookup(npf_table_t *t, const int alen, const npf_addr_t *addr)
559 1.1 rmind {
560 1.13 rmind const u_int aidx = NPF_ADDRLEN2TREE(alen);
561 1.13 rmind npf_tblent_t *ent;
562 1.1 rmind
563 1.13 rmind if (__predict_false(aidx > 1)) {
564 1.13 rmind return EINVAL;
565 1.13 rmind }
566 1.13 rmind
567 1.15 rmind rw_enter(&t->t_lock, RW_READER);
568 1.1 rmind switch (t->t_type) {
569 1.13 rmind case NPF_TABLE_HASH: {
570 1.13 rmind struct npf_hashl *htbl;
571 1.13 rmind ent = table_hash_lookup(t, addr, alen, &htbl);
572 1.1 rmind break;
573 1.13 rmind }
574 1.13 rmind case NPF_TABLE_TREE: {
575 1.13 rmind ent = ptree_find_node(&t->t_tree[aidx], addr);
576 1.1 rmind break;
577 1.13 rmind }
578 1.1 rmind default:
579 1.1 rmind KASSERT(false);
580 1.13 rmind ent = NULL;
581 1.1 rmind }
582 1.15 rmind rw_exit(&t->t_lock);
583 1.1 rmind
584 1.13 rmind return ent ? 0 : ENOENT;
585 1.1 rmind }
586 1.15 rmind
587 1.15 rmind static int
588 1.15 rmind table_ent_copyout(npf_tblent_t *ent, npf_netmask_t mask,
589 1.15 rmind void *ubuf, size_t len, size_t *off)
590 1.15 rmind {
591 1.15 rmind void *ubufp = (uint8_t *)ubuf + *off;
592 1.15 rmind npf_ioctl_ent_t uent;
593 1.15 rmind
594 1.15 rmind if ((*off += sizeof(npf_ioctl_ent_t)) > len) {
595 1.15 rmind return ENOMEM;
596 1.15 rmind }
597 1.15 rmind uent.alen = ent->te_alen;
598 1.15 rmind memcpy(&uent.addr, &ent->te_addr, sizeof(npf_addr_t));
599 1.15 rmind uent.mask = mask;
600 1.15 rmind
601 1.15 rmind return copyout(&uent, ubufp, sizeof(npf_ioctl_ent_t));
602 1.15 rmind }
603 1.15 rmind
604 1.15 rmind static int
605 1.15 rmind table_tree_list(pt_tree_t *tree, npf_netmask_t maxmask, void *ubuf,
606 1.15 rmind size_t len, size_t *off)
607 1.15 rmind {
608 1.15 rmind npf_tblent_t *ent = NULL;
609 1.15 rmind int error = 0;
610 1.15 rmind
611 1.15 rmind while ((ent = ptree_iterate(tree, ent, PT_ASCENDING)) != NULL) {
612 1.15 rmind pt_bitlen_t blen;
613 1.15 rmind
614 1.15 rmind if (!ptree_mask_node_p(tree, ent, &blen)) {
615 1.15 rmind blen = maxmask;
616 1.15 rmind }
617 1.15 rmind error = table_ent_copyout(ent, blen, ubuf, len, off);
618 1.15 rmind if (error)
619 1.15 rmind break;
620 1.15 rmind }
621 1.15 rmind return error;
622 1.15 rmind }
623 1.15 rmind
624 1.15 rmind /*
625 1.15 rmind * npf_table_list: copy a list of all table entries into a userspace buffer.
626 1.15 rmind */
627 1.15 rmind int
628 1.19 rmind npf_table_list(npf_table_t *t, void *ubuf, size_t len)
629 1.15 rmind {
630 1.15 rmind size_t off = 0;
631 1.15 rmind int error = 0;
632 1.15 rmind
633 1.15 rmind rw_enter(&t->t_lock, RW_READER);
634 1.15 rmind switch (t->t_type) {
635 1.15 rmind case NPF_TABLE_HASH:
636 1.15 rmind for (unsigned n = 0; n <= t->t_hashmask; n++) {
637 1.15 rmind npf_tblent_t *ent;
638 1.15 rmind
639 1.15 rmind LIST_FOREACH(ent, &t->t_hashl[n], te_entry.hashq)
640 1.15 rmind if ((error = table_ent_copyout(ent, 0, ubuf,
641 1.15 rmind len, &off)) != 0)
642 1.15 rmind break;
643 1.15 rmind }
644 1.15 rmind break;
645 1.15 rmind case NPF_TABLE_TREE:
646 1.15 rmind error = table_tree_list(&t->t_tree[0], 32, ubuf, len, &off);
647 1.15 rmind if (error)
648 1.15 rmind break;
649 1.15 rmind error = table_tree_list(&t->t_tree[1], 128, ubuf, len, &off);
650 1.16 rmind break;
651 1.15 rmind default:
652 1.15 rmind KASSERT(false);
653 1.15 rmind }
654 1.15 rmind rw_exit(&t->t_lock);
655 1.15 rmind
656 1.15 rmind return error;
657 1.15 rmind }
658 1.18 rmind
659 1.18 rmind /*
660 1.18 rmind * npf_table_flush: remove all table entries.
661 1.18 rmind */
662 1.18 rmind int
663 1.19 rmind npf_table_flush(npf_table_t *t)
664 1.18 rmind {
665 1.18 rmind rw_enter(&t->t_lock, RW_WRITER);
666 1.18 rmind switch (t->t_type) {
667 1.18 rmind case NPF_TABLE_HASH:
668 1.18 rmind table_hash_destroy(t);
669 1.18 rmind t->t_nitems = 0;
670 1.18 rmind break;
671 1.18 rmind case NPF_TABLE_TREE:
672 1.18 rmind table_tree_destroy(&t->t_tree[0]);
673 1.18 rmind table_tree_destroy(&t->t_tree[1]);
674 1.18 rmind t->t_nitems = 0;
675 1.18 rmind break;
676 1.18 rmind default:
677 1.18 rmind KASSERT(false);
678 1.18 rmind }
679 1.18 rmind rw_exit(&t->t_lock);
680 1.18 rmind return 0;
681 1.18 rmind }
682