npf_handler.c revision 1.28 1 /* $NetBSD: npf_handler.c,v 1.28 2013/11/08 00:38:26 rmind Exp $ */
2
3 /*-
4 * Copyright (c) 2009-2013 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This material is based upon work partially supported by The
8 * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * NPF packet handler.
34 *
35 * Note: pfil(9) hooks are currently locked by softnet_lock and kernel-lock.
36 */
37
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: npf_handler.c,v 1.28 2013/11/08 00:38:26 rmind Exp $");
40
41 #include <sys/types.h>
42 #include <sys/param.h>
43
44 #include <sys/mbuf.h>
45 #include <sys/mutex.h>
46 #include <net/if.h>
47 #include <net/pfil.h>
48 #include <sys/socketvar.h>
49
50 #include <netinet/in_systm.h>
51 #include <netinet/in.h>
52 #include <netinet/ip_var.h>
53 #include <netinet/ip6.h>
54 #include <netinet6/ip6_var.h>
55
56 #include "npf_impl.h"
57
58 static bool pfil_registered = false;
59 static pfil_head_t * npf_ph_if = NULL;
60 static pfil_head_t * npf_ph_inet = NULL;
61 static pfil_head_t * npf_ph_inet6 = NULL;
62
63 #ifndef INET6
64 #define ip6_reass_packet(x, y) ENOTSUP
65 #endif
66
67 /*
68 * npf_ifhook: hook handling interface changes.
69 */
70 static int
71 npf_ifhook(void *arg, struct mbuf **mp, ifnet_t *ifp, int di)
72 {
73 u_long cmd = (u_long)mp;
74
75 if (di == PFIL_IFNET) {
76 switch (cmd) {
77 case PFIL_IFNET_ATTACH:
78 npf_ifmap_attach(ifp);
79 break;
80 case PFIL_IFNET_DETACH:
81 npf_ifmap_detach(ifp);
82 break;
83 }
84 }
85 return 0;
86 }
87
88 static int
89 npf_reassembly(npf_cache_t *npc, nbuf_t *nbuf, struct mbuf **mp)
90 {
91 int error = EINVAL;
92
93 /* Reset the mbuf as it may have changed. */
94 *mp = nbuf_head_mbuf(nbuf);
95 nbuf_reset(nbuf);
96
97 if (npf_iscached(npc, NPC_IP4)) {
98 struct ip *ip = nbuf_dataptr(nbuf);
99 error = ip_reass_packet(mp, ip);
100 } else if (npf_iscached(npc, NPC_IP6)) {
101 /*
102 * Note: ip6_reass_packet() offset is the start of
103 * the fragment header.
104 */
105 error = ip6_reass_packet(mp, npc->npc_hlen);
106 if (error && *mp == NULL) {
107 memset(nbuf, 0, sizeof(nbuf_t));
108 }
109 }
110 if (error) {
111 npf_stats_inc(NPF_STAT_REASSFAIL);
112 return error;
113 }
114 if (*mp == NULL) {
115 /* More fragments should come. */
116 npf_stats_inc(NPF_STAT_FRAGMENTS);
117 return 0;
118 }
119
120 /*
121 * Reassembly is complete, we have the final packet.
122 * Cache again, since layer 4 data is accessible now.
123 */
124 nbuf_init(nbuf, *mp, nbuf->nb_ifp);
125 npc->npc_info = 0;
126
127 if (npf_cache_all(npc, nbuf) & NPC_IPFRAG) {
128 return EINVAL;
129 }
130 npf_stats_inc(NPF_STAT_REASSEMBLY);
131 return 0;
132 }
133
134 /*
135 * npf_packet_handler: main packet handling routine for layer 3.
136 *
137 * Note: packet flow and inspection logic is in strict order.
138 */
139 int
140 npf_packet_handler(void *arg, struct mbuf **mp, ifnet_t *ifp, int di)
141 {
142 nbuf_t nbuf;
143 npf_cache_t npc;
144 npf_session_t *se;
145 npf_rule_t *rl;
146 npf_rproc_t *rp;
147 int error, retfl;
148 int decision;
149
150 /*
151 * Initialise packet information cache.
152 * Note: it is enough to clear the info bits.
153 */
154 KASSERT(ifp != NULL);
155 nbuf_init(&nbuf, *mp, ifp);
156 npc.npc_info = 0;
157 decision = NPF_DECISION_BLOCK;
158 error = 0;
159 retfl = 0;
160 rp = NULL;
161
162 /* Cache everything. Determine whether it is an IP fragment. */
163 if (npf_cache_all(&npc, &nbuf) & NPC_IPFRAG) {
164 /*
165 * Pass to IPv4 or IPv6 reassembly mechanism.
166 */
167 error = npf_reassembly(&npc, &nbuf, mp);
168 if (error) {
169 se = NULL;
170 goto out;
171 }
172 if (*mp == NULL) {
173 /* More fragments should come; return. */
174 return 0;
175 }
176 }
177
178 /* Inspect the list of sessions (if found, acquires a reference). */
179 se = npf_session_inspect(&npc, &nbuf, di, &error);
180
181 /* If "passing" session found - skip the ruleset inspection. */
182 if (se && npf_session_pass(se, &rp)) {
183 npf_stats_inc(NPF_STAT_PASS_SESSION);
184 KASSERT(error == 0);
185 goto pass;
186 }
187 if (error) {
188 if (error == ENETUNREACH)
189 goto block;
190 goto out;
191 }
192
193 /* Acquire the lock, inspect the ruleset using this packet. */
194 int slock = npf_config_read_enter();
195 npf_ruleset_t *rlset = npf_config_ruleset();
196
197 rl = npf_ruleset_inspect(&npc, &nbuf, rlset, di, NPF_LAYER_3);
198 if (rl == NULL) {
199 const bool pass = npf_default_pass();
200 npf_config_read_exit(slock);
201
202 if (pass) {
203 npf_stats_inc(NPF_STAT_PASS_DEFAULT);
204 goto pass;
205 }
206 npf_stats_inc(NPF_STAT_BLOCK_DEFAULT);
207 goto block;
208 }
209
210 /*
211 * Get the rule procedure (acquires a reference) for association
212 * with a session (if any) and execution.
213 */
214 KASSERT(rp == NULL);
215 rp = npf_rule_getrproc(rl);
216
217 /* Conclude with the rule and release the lock. */
218 error = npf_rule_conclude(rl, &retfl);
219 npf_config_read_exit(slock);
220
221 if (error) {
222 npf_stats_inc(NPF_STAT_BLOCK_RULESET);
223 goto block;
224 }
225 npf_stats_inc(NPF_STAT_PASS_RULESET);
226
227 /*
228 * Establish a "pass" session, if required. Just proceed,
229 * if session creation fails (e.g. due to unsupported protocol).
230 */
231 if ((retfl & NPF_RULE_STATEFUL) != 0 && !se) {
232 se = npf_session_establish(&npc, &nbuf, di);
233 if (se) {
234 /*
235 * Note: the reference on the rule procedure is
236 * transfered to the session. It will be released
237 * on session destruction.
238 */
239 npf_session_setpass(se, rp);
240 }
241 }
242 pass:
243 decision = NPF_DECISION_PASS;
244 KASSERT(error == 0);
245 /*
246 * Perform NAT.
247 */
248 error = npf_do_nat(&npc, se, &nbuf, di);
249 block:
250 /*
251 * Execute the rule procedure, if any is associated.
252 * It may reverse the decision from pass to block.
253 */
254 if (rp) {
255 npf_rproc_run(&npc, &nbuf, rp, &decision);
256 }
257 out:
258 /*
259 * Release the reference on a session. Release the reference on a
260 * rule procedure only if there was no association.
261 */
262 if (se) {
263 npf_session_release(se);
264 } else if (rp) {
265 npf_rproc_release(rp);
266 }
267
268 /* Reset mbuf pointer before returning to the caller. */
269 if ((*mp = nbuf_head_mbuf(&nbuf)) == NULL) {
270 return error ? error : ENOMEM;
271 }
272
273 /* Pass the packet if decided and there is no error. */
274 if (decision == NPF_DECISION_PASS && !error) {
275 /*
276 * XXX: Disable for now, it will be set accordingly later,
277 * for optimisations (to reduce inspection).
278 */
279 (*mp)->m_flags &= ~M_CANFASTFWD;
280 return 0;
281 }
282
283 /*
284 * Block the packet. ENETUNREACH is used to indicate blocking.
285 * Depending on the flags and protocol, return TCP reset (RST) or
286 * ICMP destination unreachable.
287 */
288 if (retfl && npf_return_block(&npc, &nbuf, retfl)) {
289 *mp = NULL;
290 }
291
292 if (!error) {
293 error = ENETUNREACH;
294 }
295
296 if (*mp) {
297 m_freem(*mp);
298 *mp = NULL;
299 }
300 return error;
301 }
302
303 /*
304 * npf_pfil_register: register pfil(9) hooks.
305 */
306 int
307 npf_pfil_register(bool init)
308 {
309 int error = 0;
310
311 mutex_enter(softnet_lock);
312 KERNEL_LOCK(1, NULL);
313
314 /* Init: interface re-config and attach/detach hook. */
315 if (!npf_ph_if) {
316 npf_ph_if = pfil_head_get(PFIL_TYPE_IFNET, 0);
317 if (!npf_ph_if) {
318 error = ENOENT;
319 goto out;
320 }
321 error = pfil_add_hook(npf_ifhook, NULL,
322 PFIL_IFADDR | PFIL_IFNET, npf_ph_if);
323 KASSERT(error == 0);
324 }
325 if (init) {
326 goto out;
327 }
328
329 /* Check if pfil hooks are not already registered. */
330 if (pfil_registered) {
331 error = EEXIST;
332 goto out;
333 }
334
335 /* Capture points of the activity in the IP layer. */
336 npf_ph_inet = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET);
337 npf_ph_inet6 = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET6);
338 if (!npf_ph_inet && !npf_ph_inet6) {
339 error = ENOENT;
340 goto out;
341 }
342
343 /* Packet IN/OUT handlers for IP layer. */
344 if (npf_ph_inet) {
345 error = pfil_add_hook(npf_packet_handler, NULL,
346 PFIL_ALL, npf_ph_inet);
347 KASSERT(error == 0);
348 }
349 if (npf_ph_inet6) {
350 error = pfil_add_hook(npf_packet_handler, NULL,
351 PFIL_ALL, npf_ph_inet6);
352 KASSERT(error == 0);
353 }
354 pfil_registered = true;
355 out:
356 KERNEL_UNLOCK_ONE(NULL);
357 mutex_exit(softnet_lock);
358
359 return error;
360 }
361
362 /*
363 * npf_pfil_unregister: unregister pfil(9) hooks.
364 */
365 void
366 npf_pfil_unregister(bool fini)
367 {
368 mutex_enter(softnet_lock);
369 KERNEL_LOCK(1, NULL);
370
371 if (fini && npf_ph_if) {
372 (void)pfil_remove_hook(npf_ifhook, NULL,
373 PFIL_IFADDR | PFIL_IFNET, npf_ph_if);
374 }
375 if (npf_ph_inet) {
376 (void)pfil_remove_hook(npf_packet_handler, NULL,
377 PFIL_ALL, npf_ph_inet);
378 }
379 if (npf_ph_inet6) {
380 (void)pfil_remove_hook(npf_packet_handler, NULL,
381 PFIL_ALL, npf_ph_inet6);
382 }
383 pfil_registered = false;
384
385 KERNEL_UNLOCK_ONE(NULL);
386 mutex_exit(softnet_lock);
387 }
388
389 bool
390 npf_pfil_registered_p(void)
391 {
392 return pfil_registered;
393 }
394