npf_mbuf.c revision 1.18.14.1 1 /* $NetBSD: npf_mbuf.c,v 1.18.14.1 2018/07/28 04:38:10 pgoyette Exp $ */
2
3 /*-
4 * Copyright (c) 2009-2012 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This material is based upon work partially supported by The
8 * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * NPF network buffer management interface.
34 *
35 * Network buffer in NetBSD is mbuf. Internal mbuf structures are
36 * abstracted within this source.
37 */
38
39 #ifdef _KERNEL
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: npf_mbuf.c,v 1.18.14.1 2018/07/28 04:38:10 pgoyette Exp $");
42
43 #include <sys/param.h>
44 #include <sys/mbuf.h>
45 #include <netinet/in_offload.h>
46 #endif
47
48 #include "npf_impl.h"
49
50 #if defined(_NPF_STANDALONE)
51 #define m_length(m) (nbuf)->nb_mops->getchainlen(m)
52 #define m_buflen(m) (nbuf)->nb_mops->getlen(m)
53 #define m_next_ptr(m) (nbuf)->nb_mops->getnext(m)
54 #define m_ensure_contig(m,t) (nbuf)->nb_mops->ensure_contig((m), (t))
55 #define m_makewritable(m,o,l,f) (nbuf)->nb_mops->ensure_writable((m), (o+l))
56 #define mtod(m,t) ((t)((nbuf)->nb_mops->getdata(m)))
57 #define m_flags_p(m,f) true
58 #else
59 #define m_next_ptr(m) (m)->m_next
60 #define m_buflen(m) (m)->m_len
61 #define m_flags_p(m,f) (((m)->m_flags & (f)) != 0)
62 #endif
63
64 #define NBUF_ENSURE_ALIGN (MAX(COHERENCY_UNIT, 64))
65 #define NBUF_ENSURE_MASK (NBUF_ENSURE_ALIGN - 1)
66 #define NBUF_ENSURE_ROUNDUP(x) (((x) + NBUF_ENSURE_ALIGN) & ~NBUF_ENSURE_MASK)
67
68 void
69 nbuf_init(npf_t *npf, nbuf_t *nbuf, struct mbuf *m, const ifnet_t *ifp)
70 {
71 u_int ifid = npf_ifmap_getid(npf, ifp);
72
73 KASSERT(m_flags_p(m, M_PKTHDR));
74 nbuf->nb_mops = npf->mbufops;
75
76 nbuf->nb_mbuf0 = m;
77 nbuf->nb_ifp = ifp;
78 nbuf->nb_ifid = ifid;
79 nbuf_reset(nbuf);
80 }
81
82 void
83 nbuf_reset(nbuf_t *nbuf)
84 {
85 struct mbuf *m = nbuf->nb_mbuf0;
86
87 nbuf->nb_mbuf = m;
88 nbuf->nb_nptr = mtod(m, void *);
89 }
90
91 void *
92 nbuf_dataptr(nbuf_t *nbuf)
93 {
94 KASSERT(nbuf->nb_nptr);
95 return nbuf->nb_nptr;
96 }
97
98 size_t
99 nbuf_offset(const nbuf_t *nbuf)
100 {
101 const struct mbuf *m = nbuf->nb_mbuf;
102 const u_int off = (uintptr_t)nbuf->nb_nptr - mtod(m, uintptr_t);
103 const int poff = m_length(nbuf->nb_mbuf0) - m_length(m) + off;
104
105 return poff;
106 }
107
108 struct mbuf *
109 nbuf_head_mbuf(nbuf_t *nbuf)
110 {
111 return nbuf->nb_mbuf0;
112 }
113
114 bool
115 nbuf_flag_p(const nbuf_t *nbuf, int flag)
116 {
117 return (nbuf->nb_flags & flag) != 0;
118 }
119
120 void
121 nbuf_unset_flag(nbuf_t *nbuf, int flag)
122 {
123 nbuf->nb_flags &= ~flag;
124 }
125
126 /*
127 * nbuf_advance: advance in nbuf or chain by specified amount of bytes and,
128 * if requested, ensure that the area *after* advance is contiguous.
129 *
130 * => Returns new pointer to data in nbuf or NULL if offset is invalid.
131 * => Current nbuf and the offset is stored in the nbuf metadata.
132 */
133 void *
134 nbuf_advance(nbuf_t *nbuf, size_t len, size_t ensure)
135 {
136 struct mbuf *m = nbuf->nb_mbuf;
137 u_int off, wmark;
138 uint8_t *d;
139
140 /* Offset with amount to advance. */
141 off = (uintptr_t)nbuf->nb_nptr - mtod(m, uintptr_t) + len;
142 wmark = m_buflen(m);
143
144 /* Find the mbuf according to offset. */
145 while (__predict_false(wmark <= off)) {
146 m = m_next_ptr(m);
147 if (__predict_false(m == NULL)) {
148 /*
149 * If end of the chain, then the offset is
150 * higher than packet length.
151 */
152 return NULL;
153 }
154 wmark += m_buflen(m);
155 }
156 KASSERT(off < m_length(nbuf->nb_mbuf0));
157
158 /* Offset in mbuf data. */
159 d = mtod(m, uint8_t *);
160 KASSERT(off >= (wmark - m_buflen(m)));
161 d += (off - (wmark - m_buflen(m)));
162
163 nbuf->nb_mbuf = m;
164 nbuf->nb_nptr = d;
165
166 if (ensure) {
167 /* Ensure contiguousness (may change nbuf chain). */
168 d = nbuf_ensure_contig(nbuf, ensure);
169 }
170 return d;
171 }
172
173 /*
174 * nbuf_ensure_contig: check whether the specified length from the current
175 * point in the nbuf is contiguous. If not, rearrange the chain to be so.
176 *
177 * => Returns pointer to the data at the current offset in the buffer.
178 * => Returns NULL on failure and nbuf becomes invalid.
179 */
180 void *
181 nbuf_ensure_contig(nbuf_t *nbuf, size_t len)
182 {
183 const struct mbuf * const n = nbuf->nb_mbuf;
184 const size_t off = (uintptr_t)nbuf->nb_nptr - mtod(n, uintptr_t);
185
186 KASSERT(off <= m_buflen(n));
187
188 if (__predict_false(m_buflen(n) < (off + len))) {
189 struct mbuf *m = nbuf->nb_mbuf0;
190 const size_t foff = nbuf_offset(nbuf);
191 const size_t plen = m_length(m);
192 const size_t mlen = m_buflen(m);
193 size_t target;
194 bool success;
195
196 //npf_stats_inc(npf, NPF_STAT_NBUF_NONCONTIG);
197
198 /* Attempt to round-up to NBUF_ENSURE_ALIGN bytes. */
199 if ((target = NBUF_ENSURE_ROUNDUP(foff + len)) > plen) {
200 target = foff + len;
201 }
202
203 /* Rearrange the chain to be contiguous. */
204 KASSERT(m_flags_p(m, M_PKTHDR));
205 success = m_ensure_contig(&m, target);
206 KASSERT(m != NULL);
207
208 /* If no change in the chain: return what we have. */
209 if (m == nbuf->nb_mbuf0 && m_buflen(m) == mlen) {
210 return success ? nbuf->nb_nptr : NULL;
211 }
212
213 /*
214 * The mbuf chain was re-arranged. Update the pointers
215 * accordingly and indicate that the references to the data
216 * might need a reset.
217 */
218 KASSERT(m_flags_p(m, M_PKTHDR));
219 nbuf->nb_mbuf0 = m;
220 nbuf->nb_mbuf = m;
221
222 KASSERT(foff < m_buflen(m) && foff < m_length(m));
223 nbuf->nb_nptr = mtod(m, uint8_t *) + foff;
224 nbuf->nb_flags |= NBUF_DATAREF_RESET;
225
226 if (!success) {
227 //npf_stats_inc(npf, NPF_STAT_NBUF_CONTIG_FAIL);
228 return NULL;
229 }
230 }
231 return nbuf->nb_nptr;
232 }
233
234 void *
235 nbuf_ensure_writable(nbuf_t *nbuf, size_t len)
236 {
237 struct mbuf *m = nbuf->nb_mbuf;
238 const u_int off = (uintptr_t)nbuf->nb_nptr - mtod(m, uintptr_t);
239 const int tlen = off + len;
240 bool head_buf;
241
242 KASSERT(off < m_length(nbuf->nb_mbuf0));
243
244 if (!M_UNWRITABLE(m, tlen)) {
245 return nbuf->nb_nptr;
246 }
247 head_buf = (nbuf->nb_mbuf0 == m);
248 if (m_makewritable(&m, 0, tlen, M_NOWAIT)) {
249 memset(nbuf, 0, sizeof(nbuf_t));
250 return NULL;
251 }
252 if (head_buf) {
253 KASSERT(m_flags_p(m, M_PKTHDR));
254 KASSERT(off < m_length(m));
255 nbuf->nb_mbuf0 = m;
256 }
257 nbuf->nb_mbuf = m;
258 nbuf->nb_nptr = mtod(m, uint8_t *) + off;
259
260 return nbuf->nb_nptr;
261 }
262
263 bool
264 nbuf_cksum_barrier(nbuf_t *nbuf, int di)
265 {
266 #ifdef _KERNEL
267 struct mbuf *m;
268
269 if (di != PFIL_OUT) {
270 return false;
271 }
272 m = nbuf->nb_mbuf0;
273 KASSERT(m_flags_p(m, M_PKTHDR));
274
275 if (m->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_UDPv4)) {
276 in_undefer_cksum_tcpudp(m);
277 m->m_pkthdr.csum_flags &= ~(M_CSUM_TCPv4 | M_CSUM_UDPv4);
278 return true;
279 }
280 #ifdef INET6
281 if (m->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
282 in6_delayed_cksum(m);
283 m->m_pkthdr.csum_flags &= ~(M_CSUM_TCPv6 | M_CSUM_UDPv6);
284 return true;
285 }
286 #endif
287 #else
288 (void)nbuf; (void)di;
289 #endif
290 return false;
291 }
292
293 /*
294 * nbuf_add_tag: add a tag to specified network buffer.
295 *
296 * => Returns 0 on success or errno on failure.
297 */
298 int
299 nbuf_add_tag(nbuf_t *nbuf, uint32_t val)
300 {
301 #ifdef _KERNEL
302 struct mbuf *m = nbuf->nb_mbuf0;
303 struct m_tag *mt;
304 uint32_t *dat;
305
306 KASSERT(m_flags_p(m, M_PKTHDR));
307
308 mt = m_tag_get(PACKET_TAG_NPF, sizeof(uint32_t), M_NOWAIT);
309 if (mt == NULL) {
310 return ENOMEM;
311 }
312 dat = (uint32_t *)(mt + 1);
313 *dat = val;
314 m_tag_prepend(m, mt);
315 return 0;
316 #else
317 (void)nbuf; (void)val;
318 return ENOTSUP;
319 #endif
320 }
321
322 /*
323 * nbuf_find_tag: find a tag in specified network buffer.
324 *
325 * => Returns 0 on success or errno on failure.
326 */
327 int
328 nbuf_find_tag(nbuf_t *nbuf, uint32_t *val)
329 {
330 #ifdef _KERNEL
331 struct mbuf *m = nbuf->nb_mbuf0;
332 struct m_tag *mt;
333
334 KASSERT(m_flags_p(m, M_PKTHDR));
335
336 mt = m_tag_find(m, PACKET_TAG_NPF, NULL);
337 if (mt == NULL) {
338 return EINVAL;
339 }
340 *val = *(uint32_t *)(mt + 1);
341 return 0;
342 #else
343 (void)nbuf; (void)val;
344 return ENOTSUP;
345 #endif
346 }
347