npf_mbuf.c revision 1.18.14.2 1 /* $NetBSD: npf_mbuf.c,v 1.18.14.2 2018/09/06 06:56:44 pgoyette Exp $ */
2
3 /*-
4 * Copyright (c) 2009-2012 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This material is based upon work partially supported by The
8 * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * NPF network buffer management interface.
34 *
35 * Network buffer in NetBSD is mbuf. Internal mbuf structures are
36 * abstracted within this source.
37 */
38
39 #ifdef _KERNEL
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: npf_mbuf.c,v 1.18.14.2 2018/09/06 06:56:44 pgoyette Exp $");
42
43 #include <sys/param.h>
44 #include <sys/mbuf.h>
45 #include <netinet/in_offload.h>
46 #endif
47
48 #include "npf_impl.h"
49
50 #ifdef _KERNEL
51 #ifdef INET6
52 #include <netinet6/in6.h>
53 #include <netinet6/in6_offload.h>
54 #endif
55 #endif
56
57 #if defined(_NPF_STANDALONE)
58 #define m_length(m) (nbuf)->nb_mops->getchainlen(m)
59 #define m_buflen(m) (nbuf)->nb_mops->getlen(m)
60 #define m_next_ptr(m) (nbuf)->nb_mops->getnext(m)
61 #define m_ensure_contig(m,t) (nbuf)->nb_mops->ensure_contig((m), (t))
62 #define m_makewritable(m,o,l,f) (nbuf)->nb_mops->ensure_writable((m), (o+l))
63 #define mtod(m,t) ((t)((nbuf)->nb_mops->getdata(m)))
64 #define m_flags_p(m,f) true
65 #else
66 #define m_next_ptr(m) (m)->m_next
67 #define m_buflen(m) (m)->m_len
68 #define m_flags_p(m,f) (((m)->m_flags & (f)) != 0)
69 #endif
70
71 #define NBUF_ENSURE_ALIGN (MAX(COHERENCY_UNIT, 64))
72 #define NBUF_ENSURE_MASK (NBUF_ENSURE_ALIGN - 1)
73 #define NBUF_ENSURE_ROUNDUP(x) (((x) + NBUF_ENSURE_ALIGN) & ~NBUF_ENSURE_MASK)
74
75 void
76 nbuf_init(npf_t *npf, nbuf_t *nbuf, struct mbuf *m, const ifnet_t *ifp)
77 {
78 u_int ifid = npf_ifmap_getid(npf, ifp);
79
80 KASSERT(m_flags_p(m, M_PKTHDR));
81 nbuf->nb_mops = npf->mbufops;
82
83 nbuf->nb_mbuf0 = m;
84 nbuf->nb_ifp = ifp;
85 nbuf->nb_ifid = ifid;
86 nbuf_reset(nbuf);
87 }
88
89 void
90 nbuf_reset(nbuf_t *nbuf)
91 {
92 struct mbuf *m = nbuf->nb_mbuf0;
93
94 nbuf->nb_mbuf = m;
95 nbuf->nb_nptr = mtod(m, void *);
96 }
97
98 void *
99 nbuf_dataptr(nbuf_t *nbuf)
100 {
101 KASSERT(nbuf->nb_nptr);
102 return nbuf->nb_nptr;
103 }
104
105 size_t
106 nbuf_offset(const nbuf_t *nbuf)
107 {
108 const struct mbuf *m = nbuf->nb_mbuf;
109 const u_int off = (uintptr_t)nbuf->nb_nptr - mtod(m, uintptr_t);
110 const int poff = m_length(nbuf->nb_mbuf0) - m_length(m) + off;
111
112 return poff;
113 }
114
115 struct mbuf *
116 nbuf_head_mbuf(nbuf_t *nbuf)
117 {
118 return nbuf->nb_mbuf0;
119 }
120
121 bool
122 nbuf_flag_p(const nbuf_t *nbuf, int flag)
123 {
124 return (nbuf->nb_flags & flag) != 0;
125 }
126
127 void
128 nbuf_unset_flag(nbuf_t *nbuf, int flag)
129 {
130 nbuf->nb_flags &= ~flag;
131 }
132
133 /*
134 * nbuf_advance: advance in nbuf or chain by specified amount of bytes and,
135 * if requested, ensure that the area *after* advance is contiguous.
136 *
137 * => Returns new pointer to data in nbuf or NULL if offset is invalid.
138 * => Current nbuf and the offset is stored in the nbuf metadata.
139 */
140 void *
141 nbuf_advance(nbuf_t *nbuf, size_t len, size_t ensure)
142 {
143 struct mbuf *m = nbuf->nb_mbuf;
144 u_int off, wmark;
145 uint8_t *d;
146
147 /* Offset with amount to advance. */
148 off = (uintptr_t)nbuf->nb_nptr - mtod(m, uintptr_t) + len;
149 wmark = m_buflen(m);
150
151 /* Find the mbuf according to offset. */
152 while (__predict_false(wmark <= off)) {
153 m = m_next_ptr(m);
154 if (__predict_false(m == NULL)) {
155 /*
156 * If end of the chain, then the offset is
157 * higher than packet length.
158 */
159 return NULL;
160 }
161 wmark += m_buflen(m);
162 }
163 KASSERT(off < m_length(nbuf->nb_mbuf0));
164
165 /* Offset in mbuf data. */
166 d = mtod(m, uint8_t *);
167 KASSERT(off >= (wmark - m_buflen(m)));
168 d += (off - (wmark - m_buflen(m)));
169
170 nbuf->nb_mbuf = m;
171 nbuf->nb_nptr = d;
172
173 if (ensure) {
174 /* Ensure contiguousness (may change nbuf chain). */
175 d = nbuf_ensure_contig(nbuf, ensure);
176 }
177 return d;
178 }
179
180 /*
181 * nbuf_ensure_contig: check whether the specified length from the current
182 * point in the nbuf is contiguous. If not, rearrange the chain to be so.
183 *
184 * => Returns pointer to the data at the current offset in the buffer.
185 * => Returns NULL on failure and nbuf becomes invalid.
186 */
187 void *
188 nbuf_ensure_contig(nbuf_t *nbuf, size_t len)
189 {
190 const struct mbuf * const n = nbuf->nb_mbuf;
191 const size_t off = (uintptr_t)nbuf->nb_nptr - mtod(n, uintptr_t);
192
193 KASSERT(off <= m_buflen(n));
194
195 if (__predict_false(m_buflen(n) < (off + len))) {
196 struct mbuf *m = nbuf->nb_mbuf0;
197 const size_t foff = nbuf_offset(nbuf);
198 const size_t plen = m_length(m);
199 const size_t mlen = m_buflen(m);
200 size_t target;
201 bool success;
202
203 //npf_stats_inc(npf, NPF_STAT_NBUF_NONCONTIG);
204
205 /* Attempt to round-up to NBUF_ENSURE_ALIGN bytes. */
206 if ((target = NBUF_ENSURE_ROUNDUP(foff + len)) > plen) {
207 target = foff + len;
208 }
209
210 /* Rearrange the chain to be contiguous. */
211 KASSERT(m_flags_p(m, M_PKTHDR));
212 success = m_ensure_contig(&m, target);
213 KASSERT(m != NULL);
214
215 /* If no change in the chain: return what we have. */
216 if (m == nbuf->nb_mbuf0 && m_buflen(m) == mlen) {
217 return success ? nbuf->nb_nptr : NULL;
218 }
219
220 /*
221 * The mbuf chain was re-arranged. Update the pointers
222 * accordingly and indicate that the references to the data
223 * might need a reset.
224 */
225 KASSERT(m_flags_p(m, M_PKTHDR));
226 nbuf->nb_mbuf0 = m;
227 nbuf->nb_mbuf = m;
228
229 KASSERT(foff < m_buflen(m) && foff < m_length(m));
230 nbuf->nb_nptr = mtod(m, uint8_t *) + foff;
231 nbuf->nb_flags |= NBUF_DATAREF_RESET;
232
233 if (!success) {
234 //npf_stats_inc(npf, NPF_STAT_NBUF_CONTIG_FAIL);
235 return NULL;
236 }
237 }
238 return nbuf->nb_nptr;
239 }
240
241 void *
242 nbuf_ensure_writable(nbuf_t *nbuf, size_t len)
243 {
244 struct mbuf *m = nbuf->nb_mbuf;
245 const u_int off = (uintptr_t)nbuf->nb_nptr - mtod(m, uintptr_t);
246 const int tlen = off + len;
247 bool head_buf;
248
249 KASSERT(off < m_length(nbuf->nb_mbuf0));
250
251 if (!M_UNWRITABLE(m, tlen)) {
252 return nbuf->nb_nptr;
253 }
254 head_buf = (nbuf->nb_mbuf0 == m);
255 if (m_makewritable(&m, 0, tlen, M_NOWAIT)) {
256 memset(nbuf, 0, sizeof(nbuf_t));
257 return NULL;
258 }
259 if (head_buf) {
260 KASSERT(m_flags_p(m, M_PKTHDR));
261 KASSERT(off < m_length(m));
262 nbuf->nb_mbuf0 = m;
263 }
264 nbuf->nb_mbuf = m;
265 nbuf->nb_nptr = mtod(m, uint8_t *) + off;
266
267 return nbuf->nb_nptr;
268 }
269
270 bool
271 nbuf_cksum_barrier(nbuf_t *nbuf, int di)
272 {
273 #ifdef _KERNEL
274 struct mbuf *m;
275
276 if (di != PFIL_OUT) {
277 return false;
278 }
279 m = nbuf->nb_mbuf0;
280 KASSERT(m_flags_p(m, M_PKTHDR));
281
282 if (m->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_UDPv4)) {
283 in_undefer_cksum_tcpudp(m);
284 m->m_pkthdr.csum_flags &= ~(M_CSUM_TCPv4 | M_CSUM_UDPv4);
285 return true;
286 }
287 #ifdef INET6
288 if (m->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
289 in6_undefer_cksum_tcpudp(m);
290 m->m_pkthdr.csum_flags &= ~(M_CSUM_TCPv6 | M_CSUM_UDPv6);
291 return true;
292 }
293 #endif
294 #else
295 (void)nbuf; (void)di;
296 #endif
297 return false;
298 }
299
300 /*
301 * nbuf_add_tag: add a tag to specified network buffer.
302 *
303 * => Returns 0 on success or errno on failure.
304 */
305 int
306 nbuf_add_tag(nbuf_t *nbuf, uint32_t val)
307 {
308 #ifdef _KERNEL
309 struct mbuf *m = nbuf->nb_mbuf0;
310 struct m_tag *mt;
311 uint32_t *dat;
312
313 KASSERT(m_flags_p(m, M_PKTHDR));
314
315 mt = m_tag_get(PACKET_TAG_NPF, sizeof(uint32_t), M_NOWAIT);
316 if (mt == NULL) {
317 return ENOMEM;
318 }
319 dat = (uint32_t *)(mt + 1);
320 *dat = val;
321 m_tag_prepend(m, mt);
322 return 0;
323 #else
324 (void)nbuf; (void)val;
325 return ENOTSUP;
326 #endif
327 }
328
329 /*
330 * nbuf_find_tag: find a tag in specified network buffer.
331 *
332 * => Returns 0 on success or errno on failure.
333 */
334 int
335 nbuf_find_tag(nbuf_t *nbuf, uint32_t *val)
336 {
337 #ifdef _KERNEL
338 struct mbuf *m = nbuf->nb_mbuf0;
339 struct m_tag *mt;
340
341 KASSERT(m_flags_p(m, M_PKTHDR));
342
343 mt = m_tag_find(m, PACKET_TAG_NPF, NULL);
344 if (mt == NULL) {
345 return EINVAL;
346 }
347 *val = *(uint32_t *)(mt + 1);
348 return 0;
349 #else
350 (void)nbuf; (void)val;
351 return ENOTSUP;
352 #endif
353 }
354