ix_txrx.c revision 1.15 1 1.1 msaitoh /******************************************************************************
2 1.1 msaitoh
3 1.4 msaitoh Copyright (c) 2001-2015, Intel Corporation
4 1.1 msaitoh All rights reserved.
5 1.1 msaitoh
6 1.1 msaitoh Redistribution and use in source and binary forms, with or without
7 1.1 msaitoh modification, are permitted provided that the following conditions are met:
8 1.1 msaitoh
9 1.1 msaitoh 1. Redistributions of source code must retain the above copyright notice,
10 1.1 msaitoh this list of conditions and the following disclaimer.
11 1.1 msaitoh
12 1.1 msaitoh 2. Redistributions in binary form must reproduce the above copyright
13 1.1 msaitoh notice, this list of conditions and the following disclaimer in the
14 1.1 msaitoh documentation and/or other materials provided with the distribution.
15 1.1 msaitoh
16 1.1 msaitoh 3. Neither the name of the Intel Corporation nor the names of its
17 1.1 msaitoh contributors may be used to endorse or promote products derived from
18 1.1 msaitoh this software without specific prior written permission.
19 1.1 msaitoh
20 1.1 msaitoh THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 1.1 msaitoh AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 1.1 msaitoh IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 1.1 msaitoh ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 1.1 msaitoh LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 1.1 msaitoh CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 1.1 msaitoh SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 1.1 msaitoh INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 1.1 msaitoh CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 1.1 msaitoh ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 1.1 msaitoh POSSIBILITY OF SUCH DAMAGE.
31 1.1 msaitoh
32 1.1 msaitoh ******************************************************************************/
33 1.1 msaitoh /*
34 1.1 msaitoh * Copyright (c) 2011 The NetBSD Foundation, Inc.
35 1.1 msaitoh * All rights reserved.
36 1.1 msaitoh *
37 1.1 msaitoh * This code is derived from software contributed to The NetBSD Foundation
38 1.1 msaitoh * by Coyote Point Systems, Inc.
39 1.1 msaitoh *
40 1.1 msaitoh * Redistribution and use in source and binary forms, with or without
41 1.1 msaitoh * modification, are permitted provided that the following conditions
42 1.1 msaitoh * are met:
43 1.1 msaitoh * 1. Redistributions of source code must retain the above copyright
44 1.1 msaitoh * notice, this list of conditions and the following disclaimer.
45 1.1 msaitoh * 2. Redistributions in binary form must reproduce the above copyright
46 1.1 msaitoh * notice, this list of conditions and the following disclaimer in the
47 1.1 msaitoh * documentation and/or other materials provided with the distribution.
48 1.1 msaitoh *
49 1.1 msaitoh * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
50 1.1 msaitoh * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
51 1.1 msaitoh * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
52 1.1 msaitoh * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
53 1.1 msaitoh * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
54 1.1 msaitoh * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
55 1.1 msaitoh * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
56 1.1 msaitoh * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
57 1.1 msaitoh * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
58 1.1 msaitoh * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
59 1.1 msaitoh * POSSIBILITY OF SUCH DAMAGE.
60 1.1 msaitoh */
61 1.10 msaitoh /*$FreeBSD: head/sys/dev/ixgbe/ix_txrx.c 301538 2016-06-07 04:51:50Z sephe $*/
62 1.15 msaitoh /*$NetBSD: ix_txrx.c,v 1.15 2017/01/18 08:15:22 msaitoh Exp $*/
63 1.1 msaitoh
64 1.8 msaitoh #include "opt_inet.h"
65 1.8 msaitoh #include "opt_inet6.h"
66 1.8 msaitoh
67 1.1 msaitoh #include "ixgbe.h"
68 1.1 msaitoh
69 1.4 msaitoh #ifdef DEV_NETMAP
70 1.4 msaitoh #include <net/netmap.h>
71 1.4 msaitoh #include <sys/selinfo.h>
72 1.4 msaitoh #include <dev/netmap/netmap_kern.h>
73 1.4 msaitoh
74 1.4 msaitoh extern int ix_crcstrip;
75 1.4 msaitoh #endif
76 1.4 msaitoh
77 1.1 msaitoh /*
78 1.3 msaitoh ** HW RSC control:
79 1.1 msaitoh ** this feature only works with
80 1.1 msaitoh ** IPv4, and only on 82599 and later.
81 1.1 msaitoh ** Also this will cause IP forwarding to
82 1.1 msaitoh ** fail and that can't be controlled by
83 1.1 msaitoh ** the stack as LRO can. For all these
84 1.1 msaitoh ** reasons I've deemed it best to leave
85 1.1 msaitoh ** this off and not bother with a tuneable
86 1.1 msaitoh ** interface, this would need to be compiled
87 1.1 msaitoh ** to enable.
88 1.1 msaitoh */
89 1.1 msaitoh static bool ixgbe_rsc_enable = FALSE;
90 1.1 msaitoh
91 1.3 msaitoh #ifdef IXGBE_FDIR
92 1.3 msaitoh /*
93 1.3 msaitoh ** For Flow Director: this is the
94 1.3 msaitoh ** number of TX packets we sample
95 1.3 msaitoh ** for the filter pool, this means
96 1.3 msaitoh ** every 20th packet will be probed.
97 1.3 msaitoh **
98 1.3 msaitoh ** This feature can be disabled by
99 1.3 msaitoh ** setting this to 0.
100 1.3 msaitoh */
101 1.3 msaitoh static int atr_sample_rate = 20;
102 1.3 msaitoh #endif
103 1.3 msaitoh
104 1.3 msaitoh /*********************************************************************
105 1.3 msaitoh * Local Function prototypes
106 1.3 msaitoh *********************************************************************/
107 1.1 msaitoh static void ixgbe_setup_transmit_ring(struct tx_ring *);
108 1.1 msaitoh static void ixgbe_free_transmit_buffers(struct tx_ring *);
109 1.1 msaitoh static int ixgbe_setup_receive_ring(struct rx_ring *);
110 1.1 msaitoh static void ixgbe_free_receive_buffers(struct rx_ring *);
111 1.1 msaitoh
112 1.1 msaitoh static void ixgbe_rx_checksum(u32, struct mbuf *, u32,
113 1.1 msaitoh struct ixgbe_hw_stats *);
114 1.1 msaitoh static void ixgbe_refresh_mbufs(struct rx_ring *, int);
115 1.1 msaitoh static int ixgbe_xmit(struct tx_ring *, struct mbuf *);
116 1.1 msaitoh static int ixgbe_tx_ctx_setup(struct tx_ring *,
117 1.1 msaitoh struct mbuf *, u32 *, u32 *);
118 1.1 msaitoh static int ixgbe_tso_setup(struct tx_ring *,
119 1.1 msaitoh struct mbuf *, u32 *, u32 *);
120 1.1 msaitoh #ifdef IXGBE_FDIR
121 1.1 msaitoh static void ixgbe_atr(struct tx_ring *, struct mbuf *);
122 1.1 msaitoh #endif
123 1.1 msaitoh static __inline void ixgbe_rx_discard(struct rx_ring *, int);
124 1.1 msaitoh static __inline void ixgbe_rx_input(struct rx_ring *, struct ifnet *,
125 1.1 msaitoh struct mbuf *, u32);
126 1.1 msaitoh
127 1.1 msaitoh static void ixgbe_setup_hw_rsc(struct rx_ring *);
128 1.1 msaitoh
129 1.1 msaitoh #ifdef IXGBE_LEGACY_TX
130 1.1 msaitoh /*********************************************************************
131 1.1 msaitoh * Transmit entry point
132 1.1 msaitoh *
133 1.1 msaitoh * ixgbe_start is called by the stack to initiate a transmit.
134 1.1 msaitoh * The driver will remain in this routine as long as there are
135 1.1 msaitoh * packets to transmit and transmit resources are available.
136 1.1 msaitoh * In case resources are not available stack is notified and
137 1.1 msaitoh * the packet is requeued.
138 1.1 msaitoh **********************************************************************/
139 1.1 msaitoh
140 1.1 msaitoh void
141 1.1 msaitoh ixgbe_start_locked(struct tx_ring *txr, struct ifnet * ifp)
142 1.1 msaitoh {
143 1.1 msaitoh int rc;
144 1.1 msaitoh struct mbuf *m_head;
145 1.1 msaitoh struct adapter *adapter = txr->adapter;
146 1.1 msaitoh
147 1.1 msaitoh IXGBE_TX_LOCK_ASSERT(txr);
148 1.1 msaitoh
149 1.1 msaitoh if ((ifp->if_flags & IFF_RUNNING) == 0)
150 1.1 msaitoh return;
151 1.1 msaitoh if (!adapter->link_active)
152 1.1 msaitoh return;
153 1.1 msaitoh
154 1.1 msaitoh while (!IFQ_IS_EMPTY(&ifp->if_snd)) {
155 1.1 msaitoh if (txr->tx_avail <= IXGBE_QUEUE_MIN_FREE)
156 1.1 msaitoh break;
157 1.1 msaitoh
158 1.1 msaitoh IFQ_POLL(&ifp->if_snd, m_head);
159 1.1 msaitoh if (m_head == NULL)
160 1.1 msaitoh break;
161 1.1 msaitoh
162 1.1 msaitoh if ((rc = ixgbe_xmit(txr, m_head)) == EAGAIN) {
163 1.1 msaitoh break;
164 1.1 msaitoh }
165 1.1 msaitoh IFQ_DEQUEUE(&ifp->if_snd, m_head);
166 1.1 msaitoh if (rc == EFBIG) {
167 1.1 msaitoh struct mbuf *mtmp;
168 1.1 msaitoh
169 1.1 msaitoh if ((mtmp = m_defrag(m_head, M_NOWAIT)) != NULL) {
170 1.1 msaitoh m_head = mtmp;
171 1.1 msaitoh rc = ixgbe_xmit(txr, m_head);
172 1.1 msaitoh if (rc != 0)
173 1.1 msaitoh adapter->efbig2_tx_dma_setup.ev_count++;
174 1.1 msaitoh } else
175 1.1 msaitoh adapter->m_defrag_failed.ev_count++;
176 1.1 msaitoh }
177 1.1 msaitoh if (rc != 0) {
178 1.1 msaitoh m_freem(m_head);
179 1.1 msaitoh continue;
180 1.1 msaitoh }
181 1.1 msaitoh
182 1.1 msaitoh /* Send a copy of the frame to the BPF listener */
183 1.1 msaitoh bpf_mtap(ifp, m_head);
184 1.1 msaitoh }
185 1.1 msaitoh return;
186 1.1 msaitoh }
187 1.1 msaitoh
188 1.1 msaitoh /*
189 1.1 msaitoh * Legacy TX start - called by the stack, this
190 1.1 msaitoh * always uses the first tx ring, and should
191 1.1 msaitoh * not be used with multiqueue tx enabled.
192 1.1 msaitoh */
193 1.1 msaitoh void
194 1.1 msaitoh ixgbe_start(struct ifnet *ifp)
195 1.1 msaitoh {
196 1.1 msaitoh struct adapter *adapter = ifp->if_softc;
197 1.1 msaitoh struct tx_ring *txr = adapter->tx_rings;
198 1.1 msaitoh
199 1.1 msaitoh if (ifp->if_flags & IFF_RUNNING) {
200 1.1 msaitoh IXGBE_TX_LOCK(txr);
201 1.1 msaitoh ixgbe_start_locked(txr, ifp);
202 1.1 msaitoh IXGBE_TX_UNLOCK(txr);
203 1.1 msaitoh }
204 1.1 msaitoh return;
205 1.1 msaitoh }
206 1.1 msaitoh
207 1.1 msaitoh #else /* ! IXGBE_LEGACY_TX */
208 1.1 msaitoh
209 1.1 msaitoh /*
210 1.8 msaitoh ** Multiqueue Transmit Entry Point
211 1.8 msaitoh ** (if_transmit function)
212 1.1 msaitoh */
213 1.1 msaitoh int
214 1.1 msaitoh ixgbe_mq_start(struct ifnet *ifp, struct mbuf *m)
215 1.1 msaitoh {
216 1.1 msaitoh struct adapter *adapter = ifp->if_softc;
217 1.1 msaitoh struct ix_queue *que;
218 1.1 msaitoh struct tx_ring *txr;
219 1.1 msaitoh int i, err = 0;
220 1.1 msaitoh #ifdef RSS
221 1.1 msaitoh uint32_t bucket_id;
222 1.1 msaitoh #endif
223 1.1 msaitoh
224 1.1 msaitoh /*
225 1.1 msaitoh * When doing RSS, map it to the same outbound queue
226 1.1 msaitoh * as the incoming flow would be mapped to.
227 1.1 msaitoh *
228 1.1 msaitoh * If everything is setup correctly, it should be the
229 1.1 msaitoh * same bucket that the current CPU we're on is.
230 1.1 msaitoh */
231 1.8 msaitoh #if __FreeBSD_version < 1100054
232 1.8 msaitoh if (m->m_flags & M_FLOWID) {
233 1.8 msaitoh #else
234 1.1 msaitoh if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
235 1.8 msaitoh #endif
236 1.1 msaitoh #ifdef RSS
237 1.1 msaitoh if (rss_hash2bucket(m->m_pkthdr.flowid,
238 1.8 msaitoh M_HASHTYPE_GET(m), &bucket_id) == 0) {
239 1.3 msaitoh /* TODO: spit out something if bucket_id > num_queues? */
240 1.1 msaitoh i = bucket_id % adapter->num_queues;
241 1.8 msaitoh #ifdef IXGBE_DEBUG
242 1.8 msaitoh if (bucket_id > adapter->num_queues)
243 1.8 msaitoh if_printf(ifp, "bucket_id (%d) > num_queues "
244 1.8 msaitoh "(%d)\n", bucket_id, adapter->num_queues);
245 1.8 msaitoh #endif
246 1.8 msaitoh } else
247 1.1 msaitoh #endif
248 1.1 msaitoh i = m->m_pkthdr.flowid % adapter->num_queues;
249 1.3 msaitoh } else
250 1.1 msaitoh i = curcpu % adapter->num_queues;
251 1.3 msaitoh
252 1.3 msaitoh /* Check for a hung queue and pick alternative */
253 1.3 msaitoh if (((1 << i) & adapter->active_queues) == 0)
254 1.3 msaitoh i = ffsl(adapter->active_queues);
255 1.1 msaitoh
256 1.1 msaitoh txr = &adapter->tx_rings[i];
257 1.1 msaitoh que = &adapter->queues[i];
258 1.1 msaitoh
259 1.1 msaitoh err = drbr_enqueue(ifp, txr->br, m);
260 1.1 msaitoh if (err)
261 1.1 msaitoh return (err);
262 1.1 msaitoh if (IXGBE_TX_TRYLOCK(txr)) {
263 1.1 msaitoh ixgbe_mq_start_locked(ifp, txr);
264 1.1 msaitoh IXGBE_TX_UNLOCK(txr);
265 1.1 msaitoh } else
266 1.1 msaitoh softint_schedule(txr->txq_si);
267 1.1 msaitoh
268 1.1 msaitoh return (0);
269 1.1 msaitoh }
270 1.1 msaitoh
271 1.1 msaitoh int
272 1.1 msaitoh ixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr)
273 1.1 msaitoh {
274 1.1 msaitoh struct adapter *adapter = txr->adapter;
275 1.1 msaitoh struct mbuf *next;
276 1.1 msaitoh int enqueued = 0, err = 0;
277 1.1 msaitoh
278 1.1 msaitoh if (((ifp->if_flags & IFF_RUNNING) == 0) ||
279 1.1 msaitoh adapter->link_active == 0)
280 1.1 msaitoh return (ENETDOWN);
281 1.1 msaitoh
282 1.1 msaitoh /* Process the queue */
283 1.1 msaitoh #if __FreeBSD_version < 901504
284 1.1 msaitoh next = drbr_dequeue(ifp, txr->br);
285 1.1 msaitoh while (next != NULL) {
286 1.1 msaitoh if ((err = ixgbe_xmit(txr, &next)) != 0) {
287 1.1 msaitoh if (next != NULL)
288 1.1 msaitoh err = drbr_enqueue(ifp, txr->br, next);
289 1.1 msaitoh #else
290 1.1 msaitoh while ((next = drbr_peek(ifp, txr->br)) != NULL) {
291 1.1 msaitoh if ((err = ixgbe_xmit(txr, &next)) != 0) {
292 1.1 msaitoh if (next == NULL) {
293 1.1 msaitoh drbr_advance(ifp, txr->br);
294 1.1 msaitoh } else {
295 1.1 msaitoh drbr_putback(ifp, txr->br, next);
296 1.1 msaitoh }
297 1.1 msaitoh #endif
298 1.1 msaitoh break;
299 1.1 msaitoh }
300 1.1 msaitoh #if __FreeBSD_version >= 901504
301 1.1 msaitoh drbr_advance(ifp, txr->br);
302 1.1 msaitoh #endif
303 1.1 msaitoh enqueued++;
304 1.3 msaitoh #if 0 // this is VF-only
305 1.3 msaitoh #if __FreeBSD_version >= 1100036
306 1.4 msaitoh /*
307 1.4 msaitoh * Since we're looking at the tx ring, we can check
308 1.4 msaitoh * to see if we're a VF by examing our tail register
309 1.4 msaitoh * address.
310 1.4 msaitoh */
311 1.4 msaitoh if (txr->tail < IXGBE_TDT(0) && next->m_flags & M_MCAST)
312 1.3 msaitoh if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
313 1.3 msaitoh #endif
314 1.3 msaitoh #endif
315 1.1 msaitoh /* Send a copy of the frame to the BPF listener */
316 1.1 msaitoh bpf_mtap(ifp, next);
317 1.1 msaitoh if ((ifp->if_flags & IFF_RUNNING) == 0)
318 1.1 msaitoh break;
319 1.1 msaitoh #if __FreeBSD_version < 901504
320 1.1 msaitoh next = drbr_dequeue(ifp, txr->br);
321 1.1 msaitoh #endif
322 1.1 msaitoh }
323 1.1 msaitoh
324 1.1 msaitoh if (txr->tx_avail < IXGBE_TX_CLEANUP_THRESHOLD)
325 1.1 msaitoh ixgbe_txeof(txr);
326 1.1 msaitoh
327 1.1 msaitoh return (err);
328 1.1 msaitoh }
329 1.1 msaitoh
330 1.1 msaitoh /*
331 1.1 msaitoh * Called from a taskqueue to drain queued transmit packets.
332 1.1 msaitoh */
333 1.1 msaitoh void
334 1.1 msaitoh ixgbe_deferred_mq_start(void *arg, int pending)
335 1.1 msaitoh {
336 1.1 msaitoh struct tx_ring *txr = arg;
337 1.1 msaitoh struct adapter *adapter = txr->adapter;
338 1.1 msaitoh struct ifnet *ifp = adapter->ifp;
339 1.1 msaitoh
340 1.1 msaitoh IXGBE_TX_LOCK(txr);
341 1.1 msaitoh if (!drbr_empty(ifp, txr->br))
342 1.1 msaitoh ixgbe_mq_start_locked(ifp, txr);
343 1.1 msaitoh IXGBE_TX_UNLOCK(txr);
344 1.1 msaitoh }
345 1.1 msaitoh
346 1.1 msaitoh /*
347 1.4 msaitoh * Flush all ring buffers
348 1.4 msaitoh */
349 1.1 msaitoh void
350 1.1 msaitoh ixgbe_qflush(struct ifnet *ifp)
351 1.1 msaitoh {
352 1.1 msaitoh struct adapter *adapter = ifp->if_softc;
353 1.1 msaitoh struct tx_ring *txr = adapter->tx_rings;
354 1.1 msaitoh struct mbuf *m;
355 1.1 msaitoh
356 1.1 msaitoh for (int i = 0; i < adapter->num_queues; i++, txr++) {
357 1.1 msaitoh IXGBE_TX_LOCK(txr);
358 1.1 msaitoh while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
359 1.1 msaitoh m_freem(m);
360 1.1 msaitoh IXGBE_TX_UNLOCK(txr);
361 1.1 msaitoh }
362 1.1 msaitoh if_qflush(ifp);
363 1.1 msaitoh }
364 1.1 msaitoh #endif /* IXGBE_LEGACY_TX */
365 1.1 msaitoh
366 1.3 msaitoh
367 1.1 msaitoh /*********************************************************************
368 1.1 msaitoh *
369 1.1 msaitoh * This routine maps the mbufs to tx descriptors, allowing the
370 1.1 msaitoh * TX engine to transmit the packets.
371 1.1 msaitoh * - return 0 on success, positive on failure
372 1.1 msaitoh *
373 1.1 msaitoh **********************************************************************/
374 1.1 msaitoh
375 1.1 msaitoh static int
376 1.1 msaitoh ixgbe_xmit(struct tx_ring *txr, struct mbuf *m_head)
377 1.1 msaitoh {
378 1.1 msaitoh struct m_tag *mtag;
379 1.1 msaitoh struct adapter *adapter = txr->adapter;
380 1.1 msaitoh struct ethercom *ec = &adapter->osdep.ec;
381 1.1 msaitoh u32 olinfo_status = 0, cmd_type_len;
382 1.1 msaitoh int i, j, error;
383 1.1 msaitoh int first;
384 1.1 msaitoh bus_dmamap_t map;
385 1.1 msaitoh struct ixgbe_tx_buf *txbuf;
386 1.1 msaitoh union ixgbe_adv_tx_desc *txd = NULL;
387 1.1 msaitoh
388 1.1 msaitoh /* Basic descriptor defines */
389 1.1 msaitoh cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
390 1.1 msaitoh IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
391 1.1 msaitoh
392 1.1 msaitoh if ((mtag = VLAN_OUTPUT_TAG(ec, m_head)) != NULL)
393 1.1 msaitoh cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
394 1.1 msaitoh
395 1.1 msaitoh /*
396 1.1 msaitoh * Important to capture the first descriptor
397 1.1 msaitoh * used because it will contain the index of
398 1.1 msaitoh * the one we tell the hardware to report back
399 1.1 msaitoh */
400 1.1 msaitoh first = txr->next_avail_desc;
401 1.1 msaitoh txbuf = &txr->tx_buffers[first];
402 1.1 msaitoh map = txbuf->map;
403 1.1 msaitoh
404 1.1 msaitoh /*
405 1.1 msaitoh * Map the packet for DMA.
406 1.1 msaitoh */
407 1.1 msaitoh error = bus_dmamap_load_mbuf(txr->txtag->dt_dmat, map,
408 1.1 msaitoh m_head, BUS_DMA_NOWAIT);
409 1.1 msaitoh
410 1.1 msaitoh if (__predict_false(error)) {
411 1.1 msaitoh
412 1.1 msaitoh switch (error) {
413 1.1 msaitoh case EAGAIN:
414 1.1 msaitoh adapter->eagain_tx_dma_setup.ev_count++;
415 1.1 msaitoh return EAGAIN;
416 1.1 msaitoh case ENOMEM:
417 1.1 msaitoh adapter->enomem_tx_dma_setup.ev_count++;
418 1.1 msaitoh return EAGAIN;
419 1.1 msaitoh case EFBIG:
420 1.1 msaitoh /*
421 1.1 msaitoh * XXX Try it again?
422 1.1 msaitoh * do m_defrag() and retry bus_dmamap_load_mbuf().
423 1.1 msaitoh */
424 1.1 msaitoh adapter->efbig_tx_dma_setup.ev_count++;
425 1.1 msaitoh return error;
426 1.1 msaitoh case EINVAL:
427 1.1 msaitoh adapter->einval_tx_dma_setup.ev_count++;
428 1.1 msaitoh return error;
429 1.1 msaitoh default:
430 1.1 msaitoh adapter->other_tx_dma_setup.ev_count++;
431 1.1 msaitoh return error;
432 1.1 msaitoh }
433 1.1 msaitoh }
434 1.1 msaitoh
435 1.1 msaitoh /* Make certain there are enough descriptors */
436 1.10 msaitoh if (txr->tx_avail < (map->dm_nsegs + 2)) {
437 1.1 msaitoh txr->no_desc_avail.ev_count++;
438 1.1 msaitoh ixgbe_dmamap_unload(txr->txtag, txbuf->map);
439 1.1 msaitoh return EAGAIN;
440 1.1 msaitoh }
441 1.1 msaitoh
442 1.1 msaitoh /*
443 1.4 msaitoh * Set up the appropriate offload context
444 1.4 msaitoh * this will consume the first descriptor
445 1.4 msaitoh */
446 1.1 msaitoh error = ixgbe_tx_ctx_setup(txr, m_head, &cmd_type_len, &olinfo_status);
447 1.1 msaitoh if (__predict_false(error)) {
448 1.1 msaitoh return (error);
449 1.1 msaitoh }
450 1.1 msaitoh
451 1.1 msaitoh #ifdef IXGBE_FDIR
452 1.1 msaitoh /* Do the flow director magic */
453 1.1 msaitoh if ((txr->atr_sample) && (!adapter->fdir_reinit)) {
454 1.1 msaitoh ++txr->atr_count;
455 1.1 msaitoh if (txr->atr_count >= atr_sample_rate) {
456 1.1 msaitoh ixgbe_atr(txr, m_head);
457 1.1 msaitoh txr->atr_count = 0;
458 1.1 msaitoh }
459 1.1 msaitoh }
460 1.1 msaitoh #endif
461 1.1 msaitoh
462 1.8 msaitoh olinfo_status |= IXGBE_ADVTXD_CC;
463 1.1 msaitoh i = txr->next_avail_desc;
464 1.1 msaitoh for (j = 0; j < map->dm_nsegs; j++) {
465 1.1 msaitoh bus_size_t seglen;
466 1.1 msaitoh bus_addr_t segaddr;
467 1.1 msaitoh
468 1.1 msaitoh txbuf = &txr->tx_buffers[i];
469 1.1 msaitoh txd = &txr->tx_base[i];
470 1.1 msaitoh seglen = map->dm_segs[j].ds_len;
471 1.1 msaitoh segaddr = htole64(map->dm_segs[j].ds_addr);
472 1.1 msaitoh
473 1.1 msaitoh txd->read.buffer_addr = segaddr;
474 1.1 msaitoh txd->read.cmd_type_len = htole32(txr->txd_cmd |
475 1.1 msaitoh cmd_type_len |seglen);
476 1.1 msaitoh txd->read.olinfo_status = htole32(olinfo_status);
477 1.1 msaitoh
478 1.1 msaitoh if (++i == txr->num_desc)
479 1.1 msaitoh i = 0;
480 1.1 msaitoh }
481 1.1 msaitoh
482 1.1 msaitoh txd->read.cmd_type_len |=
483 1.1 msaitoh htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
484 1.1 msaitoh txr->tx_avail -= map->dm_nsegs;
485 1.1 msaitoh txr->next_avail_desc = i;
486 1.1 msaitoh
487 1.1 msaitoh txbuf->m_head = m_head;
488 1.1 msaitoh /*
489 1.4 msaitoh * Here we swap the map so the last descriptor,
490 1.4 msaitoh * which gets the completion interrupt has the
491 1.4 msaitoh * real map, and the first descriptor gets the
492 1.4 msaitoh * unused map from this descriptor.
493 1.4 msaitoh */
494 1.1 msaitoh txr->tx_buffers[first].map = txbuf->map;
495 1.1 msaitoh txbuf->map = map;
496 1.1 msaitoh bus_dmamap_sync(txr->txtag->dt_dmat, map, 0, m_head->m_pkthdr.len,
497 1.1 msaitoh BUS_DMASYNC_PREWRITE);
498 1.1 msaitoh
499 1.1 msaitoh /* Set the EOP descriptor that will be marked done */
500 1.1 msaitoh txbuf = &txr->tx_buffers[first];
501 1.1 msaitoh txbuf->eop = txd;
502 1.1 msaitoh
503 1.1 msaitoh ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
504 1.1 msaitoh BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
505 1.1 msaitoh /*
506 1.1 msaitoh * Advance the Transmit Descriptor Tail (Tdt), this tells the
507 1.1 msaitoh * hardware that this frame is available to transmit.
508 1.1 msaitoh */
509 1.1 msaitoh ++txr->total_packets.ev_count;
510 1.3 msaitoh IXGBE_WRITE_REG(&adapter->hw, txr->tail, i);
511 1.3 msaitoh
512 1.3 msaitoh /* Mark queue as having work */
513 1.3 msaitoh if (txr->busy == 0)
514 1.3 msaitoh txr->busy = 1;
515 1.1 msaitoh
516 1.1 msaitoh return 0;
517 1.1 msaitoh }
518 1.1 msaitoh
519 1.1 msaitoh /*********************************************************************
520 1.1 msaitoh *
521 1.1 msaitoh * Allocate memory for tx_buffer structures. The tx_buffer stores all
522 1.1 msaitoh * the information needed to transmit a packet on the wire. This is
523 1.1 msaitoh * called only once at attach, setup is done every reset.
524 1.1 msaitoh *
525 1.1 msaitoh **********************************************************************/
526 1.1 msaitoh int
527 1.1 msaitoh ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
528 1.1 msaitoh {
529 1.1 msaitoh struct adapter *adapter = txr->adapter;
530 1.1 msaitoh device_t dev = adapter->dev;
531 1.1 msaitoh struct ixgbe_tx_buf *txbuf;
532 1.1 msaitoh int error, i;
533 1.1 msaitoh
534 1.1 msaitoh /*
535 1.1 msaitoh * Setup DMA descriptor areas.
536 1.1 msaitoh */
537 1.1 msaitoh if ((error = ixgbe_dma_tag_create(adapter->osdep.dmat, /* parent */
538 1.1 msaitoh 1, 0, /* alignment, bounds */
539 1.1 msaitoh IXGBE_TSO_SIZE, /* maxsize */
540 1.1 msaitoh adapter->num_segs, /* nsegments */
541 1.1 msaitoh PAGE_SIZE, /* maxsegsize */
542 1.1 msaitoh 0, /* flags */
543 1.1 msaitoh &txr->txtag))) {
544 1.1 msaitoh aprint_error_dev(dev,"Unable to allocate TX DMA tag\n");
545 1.1 msaitoh goto fail;
546 1.1 msaitoh }
547 1.1 msaitoh
548 1.1 msaitoh if (!(txr->tx_buffers =
549 1.1 msaitoh (struct ixgbe_tx_buf *) malloc(sizeof(struct ixgbe_tx_buf) *
550 1.1 msaitoh adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
551 1.1 msaitoh aprint_error_dev(dev, "Unable to allocate tx_buffer memory\n");
552 1.1 msaitoh error = ENOMEM;
553 1.1 msaitoh goto fail;
554 1.1 msaitoh }
555 1.1 msaitoh
556 1.1 msaitoh /* Create the descriptor buffer dma maps */
557 1.1 msaitoh txbuf = txr->tx_buffers;
558 1.1 msaitoh for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
559 1.1 msaitoh error = ixgbe_dmamap_create(txr->txtag, 0, &txbuf->map);
560 1.1 msaitoh if (error != 0) {
561 1.1 msaitoh aprint_error_dev(dev,
562 1.1 msaitoh "Unable to create TX DMA map (%d)\n", error);
563 1.1 msaitoh goto fail;
564 1.1 msaitoh }
565 1.1 msaitoh }
566 1.1 msaitoh
567 1.1 msaitoh return 0;
568 1.1 msaitoh fail:
569 1.1 msaitoh /* We free all, it handles case where we are in the middle */
570 1.15 msaitoh #if 0 /* XXX was FreeBSD */
571 1.1 msaitoh ixgbe_free_transmit_structures(adapter);
572 1.15 msaitoh #else
573 1.15 msaitoh ixgbe_free_transmit_buffers(txr);
574 1.15 msaitoh #endif
575 1.1 msaitoh return (error);
576 1.1 msaitoh }
577 1.1 msaitoh
578 1.1 msaitoh /*********************************************************************
579 1.1 msaitoh *
580 1.1 msaitoh * Initialize a transmit ring.
581 1.1 msaitoh *
582 1.1 msaitoh **********************************************************************/
583 1.1 msaitoh static void
584 1.1 msaitoh ixgbe_setup_transmit_ring(struct tx_ring *txr)
585 1.1 msaitoh {
586 1.1 msaitoh struct adapter *adapter = txr->adapter;
587 1.1 msaitoh struct ixgbe_tx_buf *txbuf;
588 1.1 msaitoh #ifdef DEV_NETMAP
589 1.1 msaitoh struct netmap_adapter *na = NA(adapter->ifp);
590 1.1 msaitoh struct netmap_slot *slot;
591 1.1 msaitoh #endif /* DEV_NETMAP */
592 1.1 msaitoh
593 1.1 msaitoh /* Clear the old ring contents */
594 1.1 msaitoh IXGBE_TX_LOCK(txr);
595 1.1 msaitoh #ifdef DEV_NETMAP
596 1.1 msaitoh /*
597 1.1 msaitoh * (under lock): if in netmap mode, do some consistency
598 1.1 msaitoh * checks and set slot to entry 0 of the netmap ring.
599 1.1 msaitoh */
600 1.1 msaitoh slot = netmap_reset(na, NR_TX, txr->me, 0);
601 1.1 msaitoh #endif /* DEV_NETMAP */
602 1.1 msaitoh bzero((void *)txr->tx_base,
603 1.1 msaitoh (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
604 1.1 msaitoh /* Reset indices */
605 1.1 msaitoh txr->next_avail_desc = 0;
606 1.1 msaitoh txr->next_to_clean = 0;
607 1.1 msaitoh
608 1.1 msaitoh /* Free any existing tx buffers. */
609 1.1 msaitoh txbuf = txr->tx_buffers;
610 1.5 msaitoh for (int i = 0; i < txr->num_desc; i++, txbuf++) {
611 1.1 msaitoh if (txbuf->m_head != NULL) {
612 1.1 msaitoh bus_dmamap_sync(txr->txtag->dt_dmat, txbuf->map,
613 1.1 msaitoh 0, txbuf->m_head->m_pkthdr.len,
614 1.1 msaitoh BUS_DMASYNC_POSTWRITE);
615 1.1 msaitoh ixgbe_dmamap_unload(txr->txtag, txbuf->map);
616 1.1 msaitoh m_freem(txbuf->m_head);
617 1.1 msaitoh txbuf->m_head = NULL;
618 1.1 msaitoh }
619 1.1 msaitoh #ifdef DEV_NETMAP
620 1.1 msaitoh /*
621 1.1 msaitoh * In netmap mode, set the map for the packet buffer.
622 1.1 msaitoh * NOTE: Some drivers (not this one) also need to set
623 1.1 msaitoh * the physical buffer address in the NIC ring.
624 1.1 msaitoh * Slots in the netmap ring (indexed by "si") are
625 1.1 msaitoh * kring->nkr_hwofs positions "ahead" wrt the
626 1.1 msaitoh * corresponding slot in the NIC ring. In some drivers
627 1.1 msaitoh * (not here) nkr_hwofs can be negative. Function
628 1.1 msaitoh * netmap_idx_n2k() handles wraparounds properly.
629 1.1 msaitoh */
630 1.1 msaitoh if (slot) {
631 1.1 msaitoh int si = netmap_idx_n2k(&na->tx_rings[txr->me], i);
632 1.5 msaitoh netmap_load_map(na, txr->txtag,
633 1.5 msaitoh txbuf->map, NMB(na, slot + si));
634 1.1 msaitoh }
635 1.1 msaitoh #endif /* DEV_NETMAP */
636 1.1 msaitoh /* Clear the EOP descriptor pointer */
637 1.1 msaitoh txbuf->eop = NULL;
638 1.1 msaitoh }
639 1.1 msaitoh
640 1.1 msaitoh #ifdef IXGBE_FDIR
641 1.1 msaitoh /* Set the rate at which we sample packets */
642 1.1 msaitoh if (adapter->hw.mac.type != ixgbe_mac_82598EB)
643 1.1 msaitoh txr->atr_sample = atr_sample_rate;
644 1.1 msaitoh #endif
645 1.1 msaitoh
646 1.1 msaitoh /* Set number of descriptors available */
647 1.1 msaitoh txr->tx_avail = adapter->num_tx_desc;
648 1.1 msaitoh
649 1.1 msaitoh ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
650 1.1 msaitoh BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
651 1.1 msaitoh IXGBE_TX_UNLOCK(txr);
652 1.1 msaitoh }
653 1.1 msaitoh
654 1.1 msaitoh /*********************************************************************
655 1.1 msaitoh *
656 1.1 msaitoh * Initialize all transmit rings.
657 1.1 msaitoh *
658 1.1 msaitoh **********************************************************************/
659 1.1 msaitoh int
660 1.1 msaitoh ixgbe_setup_transmit_structures(struct adapter *adapter)
661 1.1 msaitoh {
662 1.1 msaitoh struct tx_ring *txr = adapter->tx_rings;
663 1.1 msaitoh
664 1.1 msaitoh for (int i = 0; i < adapter->num_queues; i++, txr++)
665 1.1 msaitoh ixgbe_setup_transmit_ring(txr);
666 1.1 msaitoh
667 1.1 msaitoh return (0);
668 1.1 msaitoh }
669 1.1 msaitoh
670 1.1 msaitoh /*********************************************************************
671 1.1 msaitoh *
672 1.1 msaitoh * Free all transmit rings.
673 1.1 msaitoh *
674 1.1 msaitoh **********************************************************************/
675 1.1 msaitoh void
676 1.1 msaitoh ixgbe_free_transmit_structures(struct adapter *adapter)
677 1.1 msaitoh {
678 1.1 msaitoh struct tx_ring *txr = adapter->tx_rings;
679 1.1 msaitoh
680 1.1 msaitoh for (int i = 0; i < adapter->num_queues; i++, txr++) {
681 1.1 msaitoh ixgbe_free_transmit_buffers(txr);
682 1.1 msaitoh ixgbe_dma_free(adapter, &txr->txdma);
683 1.1 msaitoh IXGBE_TX_LOCK_DESTROY(txr);
684 1.1 msaitoh }
685 1.1 msaitoh free(adapter->tx_rings, M_DEVBUF);
686 1.1 msaitoh }
687 1.1 msaitoh
688 1.1 msaitoh /*********************************************************************
689 1.1 msaitoh *
690 1.1 msaitoh * Free transmit ring related data structures.
691 1.1 msaitoh *
692 1.1 msaitoh **********************************************************************/
693 1.1 msaitoh static void
694 1.1 msaitoh ixgbe_free_transmit_buffers(struct tx_ring *txr)
695 1.1 msaitoh {
696 1.1 msaitoh struct adapter *adapter = txr->adapter;
697 1.1 msaitoh struct ixgbe_tx_buf *tx_buffer;
698 1.1 msaitoh int i;
699 1.1 msaitoh
700 1.14 msaitoh INIT_DEBUGOUT("ixgbe_free_transmit_buffers: begin");
701 1.1 msaitoh
702 1.1 msaitoh if (txr->tx_buffers == NULL)
703 1.1 msaitoh return;
704 1.1 msaitoh
705 1.1 msaitoh tx_buffer = txr->tx_buffers;
706 1.1 msaitoh for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
707 1.1 msaitoh if (tx_buffer->m_head != NULL) {
708 1.1 msaitoh bus_dmamap_sync(txr->txtag->dt_dmat, tx_buffer->map,
709 1.1 msaitoh 0, tx_buffer->m_head->m_pkthdr.len,
710 1.1 msaitoh BUS_DMASYNC_POSTWRITE);
711 1.1 msaitoh ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
712 1.1 msaitoh m_freem(tx_buffer->m_head);
713 1.1 msaitoh tx_buffer->m_head = NULL;
714 1.1 msaitoh if (tx_buffer->map != NULL) {
715 1.1 msaitoh ixgbe_dmamap_destroy(txr->txtag,
716 1.1 msaitoh tx_buffer->map);
717 1.1 msaitoh tx_buffer->map = NULL;
718 1.1 msaitoh }
719 1.1 msaitoh } else if (tx_buffer->map != NULL) {
720 1.1 msaitoh ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
721 1.1 msaitoh ixgbe_dmamap_destroy(txr->txtag, tx_buffer->map);
722 1.1 msaitoh tx_buffer->map = NULL;
723 1.1 msaitoh }
724 1.1 msaitoh }
725 1.1 msaitoh #ifndef IXGBE_LEGACY_TX
726 1.1 msaitoh if (txr->br != NULL)
727 1.1 msaitoh buf_ring_free(txr->br, M_DEVBUF);
728 1.1 msaitoh #endif
729 1.1 msaitoh if (txr->tx_buffers != NULL) {
730 1.1 msaitoh free(txr->tx_buffers, M_DEVBUF);
731 1.1 msaitoh txr->tx_buffers = NULL;
732 1.1 msaitoh }
733 1.1 msaitoh if (txr->txtag != NULL) {
734 1.1 msaitoh ixgbe_dma_tag_destroy(txr->txtag);
735 1.1 msaitoh txr->txtag = NULL;
736 1.1 msaitoh }
737 1.1 msaitoh return;
738 1.1 msaitoh }
739 1.1 msaitoh
740 1.1 msaitoh /*********************************************************************
741 1.1 msaitoh *
742 1.1 msaitoh * Advanced Context Descriptor setup for VLAN, CSUM or TSO
743 1.1 msaitoh *
744 1.1 msaitoh **********************************************************************/
745 1.1 msaitoh
746 1.1 msaitoh static int
747 1.1 msaitoh ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp,
748 1.1 msaitoh u32 *cmd_type_len, u32 *olinfo_status)
749 1.1 msaitoh {
750 1.1 msaitoh struct adapter *adapter = txr->adapter;
751 1.1 msaitoh struct ethercom *ec = &adapter->osdep.ec;
752 1.4 msaitoh struct m_tag *mtag;
753 1.1 msaitoh struct ixgbe_adv_tx_context_desc *TXD;
754 1.1 msaitoh struct ether_vlan_header *eh;
755 1.8 msaitoh #ifdef INET
756 1.8 msaitoh struct ip *ip;
757 1.8 msaitoh #endif
758 1.8 msaitoh #ifdef INET6
759 1.8 msaitoh struct ip6_hdr *ip6;
760 1.8 msaitoh #endif
761 1.1 msaitoh u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
762 1.1 msaitoh int ehdrlen, ip_hlen = 0;
763 1.1 msaitoh u16 etype;
764 1.8 msaitoh u8 ipproto = 0;
765 1.1 msaitoh int offload = TRUE;
766 1.1 msaitoh int ctxd = txr->next_avail_desc;
767 1.1 msaitoh u16 vtag = 0;
768 1.8 msaitoh char *l3d;
769 1.8 msaitoh
770 1.1 msaitoh
771 1.1 msaitoh /* First check if TSO is to be used */
772 1.1 msaitoh if (mp->m_pkthdr.csum_flags & (M_CSUM_TSOv4|M_CSUM_TSOv6))
773 1.1 msaitoh return (ixgbe_tso_setup(txr, mp, cmd_type_len, olinfo_status));
774 1.1 msaitoh
775 1.1 msaitoh if ((mp->m_pkthdr.csum_flags & M_CSUM_OFFLOAD) == 0)
776 1.1 msaitoh offload = FALSE;
777 1.1 msaitoh
778 1.1 msaitoh /* Indicate the whole packet as payload when not doing TSO */
779 1.1 msaitoh *olinfo_status |= mp->m_pkthdr.len << IXGBE_ADVTXD_PAYLEN_SHIFT;
780 1.1 msaitoh
781 1.1 msaitoh /* Now ready a context descriptor */
782 1.1 msaitoh TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
783 1.1 msaitoh
784 1.1 msaitoh /*
785 1.1 msaitoh ** In advanced descriptors the vlan tag must
786 1.1 msaitoh ** be placed into the context descriptor. Hence
787 1.1 msaitoh ** we need to make one even if not doing offloads.
788 1.1 msaitoh */
789 1.1 msaitoh if ((mtag = VLAN_OUTPUT_TAG(ec, mp)) != NULL) {
790 1.1 msaitoh vtag = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
791 1.1 msaitoh vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
792 1.5 msaitoh } else if (!IXGBE_IS_X550VF(adapter) && (offload == FALSE))
793 1.4 msaitoh return (0);
794 1.1 msaitoh
795 1.1 msaitoh /*
796 1.1 msaitoh * Determine where frame payload starts.
797 1.1 msaitoh * Jump over vlan headers if already present,
798 1.1 msaitoh * helpful for QinQ too.
799 1.1 msaitoh */
800 1.1 msaitoh KASSERT(mp->m_len >= offsetof(struct ether_vlan_header, evl_tag));
801 1.1 msaitoh eh = mtod(mp, struct ether_vlan_header *);
802 1.1 msaitoh if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
803 1.1 msaitoh KASSERT(mp->m_len >= sizeof(struct ether_vlan_header));
804 1.1 msaitoh etype = ntohs(eh->evl_proto);
805 1.1 msaitoh ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
806 1.1 msaitoh } else {
807 1.1 msaitoh etype = ntohs(eh->evl_encap_proto);
808 1.1 msaitoh ehdrlen = ETHER_HDR_LEN;
809 1.1 msaitoh }
810 1.1 msaitoh
811 1.1 msaitoh /* Set the ether header length */
812 1.1 msaitoh vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
813 1.1 msaitoh
814 1.3 msaitoh if (offload == FALSE)
815 1.3 msaitoh goto no_offloads;
816 1.3 msaitoh
817 1.8 msaitoh /*
818 1.8 msaitoh * If the first mbuf only includes the ethernet header, jump to the next one
819 1.8 msaitoh * XXX: This assumes the stack splits mbufs containing headers on header boundaries
820 1.8 msaitoh * XXX: And assumes the entire IP header is contained in one mbuf
821 1.8 msaitoh */
822 1.8 msaitoh if (mp->m_len == ehdrlen && mp->m_next)
823 1.8 msaitoh l3d = mtod(mp->m_next, char *);
824 1.8 msaitoh else
825 1.8 msaitoh l3d = mtod(mp, char *) + ehdrlen;
826 1.8 msaitoh
827 1.1 msaitoh switch (etype) {
828 1.9 msaitoh #ifdef INET
829 1.1 msaitoh case ETHERTYPE_IP:
830 1.8 msaitoh ip = (struct ip *)(l3d);
831 1.8 msaitoh ip_hlen = ip->ip_hl << 2;
832 1.8 msaitoh ipproto = ip->ip_p;
833 1.8 msaitoh type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
834 1.1 msaitoh KASSERT((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) == 0 ||
835 1.8 msaitoh ip->ip_sum == 0);
836 1.1 msaitoh break;
837 1.9 msaitoh #endif
838 1.9 msaitoh #ifdef INET6
839 1.1 msaitoh case ETHERTYPE_IPV6:
840 1.8 msaitoh ip6 = (struct ip6_hdr *)(l3d);
841 1.8 msaitoh ip_hlen = sizeof(struct ip6_hdr);
842 1.8 msaitoh ipproto = ip6->ip6_nxt;
843 1.1 msaitoh type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
844 1.1 msaitoh break;
845 1.9 msaitoh #endif
846 1.1 msaitoh default:
847 1.11 msaitoh offload = false;
848 1.1 msaitoh break;
849 1.1 msaitoh }
850 1.1 msaitoh
851 1.1 msaitoh if ((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) != 0)
852 1.1 msaitoh *olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
853 1.1 msaitoh
854 1.1 msaitoh vlan_macip_lens |= ip_hlen;
855 1.1 msaitoh
856 1.8 msaitoh /* No support for offloads for non-L4 next headers */
857 1.8 msaitoh switch (ipproto) {
858 1.8 msaitoh case IPPROTO_TCP:
859 1.8 msaitoh if (mp->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_TCPv6))
860 1.8 msaitoh
861 1.8 msaitoh type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
862 1.8 msaitoh else
863 1.8 msaitoh offload = false;
864 1.8 msaitoh break;
865 1.8 msaitoh case IPPROTO_UDP:
866 1.8 msaitoh if (mp->m_pkthdr.csum_flags & (M_CSUM_UDPv4|M_CSUM_UDPv6))
867 1.8 msaitoh type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
868 1.8 msaitoh else
869 1.8 msaitoh offload = false;
870 1.8 msaitoh break;
871 1.11 msaitoh default:
872 1.11 msaitoh offload = false;
873 1.11 msaitoh break;
874 1.8 msaitoh }
875 1.8 msaitoh
876 1.8 msaitoh if (offload) /* Insert L4 checksum into data descriptors */
877 1.1 msaitoh *olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
878 1.1 msaitoh
879 1.3 msaitoh no_offloads:
880 1.3 msaitoh type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
881 1.3 msaitoh
882 1.1 msaitoh /* Now copy bits into descriptor */
883 1.1 msaitoh TXD->vlan_macip_lens = htole32(vlan_macip_lens);
884 1.1 msaitoh TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
885 1.1 msaitoh TXD->seqnum_seed = htole32(0);
886 1.1 msaitoh TXD->mss_l4len_idx = htole32(0);
887 1.1 msaitoh
888 1.1 msaitoh /* We've consumed the first desc, adjust counters */
889 1.1 msaitoh if (++ctxd == txr->num_desc)
890 1.1 msaitoh ctxd = 0;
891 1.1 msaitoh txr->next_avail_desc = ctxd;
892 1.1 msaitoh --txr->tx_avail;
893 1.1 msaitoh
894 1.1 msaitoh return 0;
895 1.1 msaitoh }
896 1.1 msaitoh
897 1.1 msaitoh /**********************************************************************
898 1.1 msaitoh *
899 1.1 msaitoh * Setup work for hardware segmentation offload (TSO) on
900 1.1 msaitoh * adapters using advanced tx descriptors
901 1.1 msaitoh *
902 1.1 msaitoh **********************************************************************/
903 1.1 msaitoh static int
904 1.1 msaitoh ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp,
905 1.1 msaitoh u32 *cmd_type_len, u32 *olinfo_status)
906 1.1 msaitoh {
907 1.1 msaitoh struct m_tag *mtag;
908 1.1 msaitoh struct adapter *adapter = txr->adapter;
909 1.1 msaitoh struct ethercom *ec = &adapter->osdep.ec;
910 1.1 msaitoh struct ixgbe_adv_tx_context_desc *TXD;
911 1.1 msaitoh u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
912 1.1 msaitoh u32 mss_l4len_idx = 0, paylen;
913 1.1 msaitoh u16 vtag = 0, eh_type;
914 1.1 msaitoh int ctxd, ehdrlen, ip_hlen, tcp_hlen;
915 1.1 msaitoh struct ether_vlan_header *eh;
916 1.1 msaitoh #ifdef INET6
917 1.1 msaitoh struct ip6_hdr *ip6;
918 1.1 msaitoh #endif
919 1.1 msaitoh #ifdef INET
920 1.1 msaitoh struct ip *ip;
921 1.1 msaitoh #endif
922 1.1 msaitoh struct tcphdr *th;
923 1.1 msaitoh
924 1.1 msaitoh /*
925 1.1 msaitoh * Determine where frame payload starts.
926 1.1 msaitoh * Jump over vlan headers if already present
927 1.1 msaitoh */
928 1.1 msaitoh eh = mtod(mp, struct ether_vlan_header *);
929 1.1 msaitoh if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
930 1.1 msaitoh ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
931 1.1 msaitoh eh_type = eh->evl_proto;
932 1.1 msaitoh } else {
933 1.1 msaitoh ehdrlen = ETHER_HDR_LEN;
934 1.1 msaitoh eh_type = eh->evl_encap_proto;
935 1.1 msaitoh }
936 1.1 msaitoh
937 1.1 msaitoh switch (ntohs(eh_type)) {
938 1.1 msaitoh #ifdef INET6
939 1.1 msaitoh case ETHERTYPE_IPV6:
940 1.1 msaitoh ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
941 1.1 msaitoh /* XXX-BZ For now we do not pretend to support ext. hdrs. */
942 1.1 msaitoh if (ip6->ip6_nxt != IPPROTO_TCP)
943 1.1 msaitoh return (ENXIO);
944 1.1 msaitoh ip_hlen = sizeof(struct ip6_hdr);
945 1.1 msaitoh ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
946 1.1 msaitoh th = (struct tcphdr *)((char *)ip6 + ip_hlen);
947 1.1 msaitoh th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
948 1.1 msaitoh &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
949 1.1 msaitoh type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
950 1.1 msaitoh break;
951 1.1 msaitoh #endif
952 1.1 msaitoh #ifdef INET
953 1.1 msaitoh case ETHERTYPE_IP:
954 1.1 msaitoh ip = (struct ip *)(mp->m_data + ehdrlen);
955 1.1 msaitoh if (ip->ip_p != IPPROTO_TCP)
956 1.1 msaitoh return (ENXIO);
957 1.1 msaitoh ip->ip_sum = 0;
958 1.1 msaitoh ip_hlen = ip->ip_hl << 2;
959 1.1 msaitoh th = (struct tcphdr *)((char *)ip + ip_hlen);
960 1.1 msaitoh th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
961 1.1 msaitoh ip->ip_dst.s_addr, htons(IPPROTO_TCP));
962 1.1 msaitoh type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
963 1.1 msaitoh /* Tell transmit desc to also do IPv4 checksum. */
964 1.1 msaitoh *olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
965 1.1 msaitoh break;
966 1.1 msaitoh #endif
967 1.1 msaitoh default:
968 1.1 msaitoh panic("%s: CSUM_TSO but no supported IP version (0x%04x)",
969 1.1 msaitoh __func__, ntohs(eh_type));
970 1.1 msaitoh break;
971 1.1 msaitoh }
972 1.1 msaitoh
973 1.1 msaitoh ctxd = txr->next_avail_desc;
974 1.1 msaitoh TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
975 1.1 msaitoh
976 1.1 msaitoh tcp_hlen = th->th_off << 2;
977 1.1 msaitoh
978 1.1 msaitoh /* This is used in the transmit desc in encap */
979 1.1 msaitoh paylen = mp->m_pkthdr.len - ehdrlen - ip_hlen - tcp_hlen;
980 1.1 msaitoh
981 1.1 msaitoh /* VLAN MACLEN IPLEN */
982 1.1 msaitoh if ((mtag = VLAN_OUTPUT_TAG(ec, mp)) != NULL) {
983 1.1 msaitoh vtag = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
984 1.1 msaitoh vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
985 1.1 msaitoh }
986 1.1 msaitoh
987 1.1 msaitoh vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
988 1.1 msaitoh vlan_macip_lens |= ip_hlen;
989 1.1 msaitoh TXD->vlan_macip_lens = htole32(vlan_macip_lens);
990 1.1 msaitoh
991 1.1 msaitoh /* ADV DTYPE TUCMD */
992 1.1 msaitoh type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
993 1.1 msaitoh type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
994 1.1 msaitoh TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
995 1.1 msaitoh
996 1.1 msaitoh /* MSS L4LEN IDX */
997 1.1 msaitoh mss_l4len_idx |= (mp->m_pkthdr.segsz << IXGBE_ADVTXD_MSS_SHIFT);
998 1.1 msaitoh mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
999 1.1 msaitoh TXD->mss_l4len_idx = htole32(mss_l4len_idx);
1000 1.1 msaitoh
1001 1.1 msaitoh TXD->seqnum_seed = htole32(0);
1002 1.1 msaitoh
1003 1.1 msaitoh if (++ctxd == txr->num_desc)
1004 1.1 msaitoh ctxd = 0;
1005 1.1 msaitoh
1006 1.1 msaitoh txr->tx_avail--;
1007 1.1 msaitoh txr->next_avail_desc = ctxd;
1008 1.1 msaitoh *cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
1009 1.1 msaitoh *olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1010 1.1 msaitoh *olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
1011 1.1 msaitoh ++txr->tso_tx.ev_count;
1012 1.1 msaitoh return (0);
1013 1.1 msaitoh }
1014 1.1 msaitoh
1015 1.3 msaitoh
1016 1.1 msaitoh /**********************************************************************
1017 1.1 msaitoh *
1018 1.1 msaitoh * Examine each tx_buffer in the used queue. If the hardware is done
1019 1.1 msaitoh * processing the packet then free associated resources. The
1020 1.1 msaitoh * tx_buffer is put back on the free queue.
1021 1.1 msaitoh *
1022 1.1 msaitoh **********************************************************************/
1023 1.1 msaitoh void
1024 1.1 msaitoh ixgbe_txeof(struct tx_ring *txr)
1025 1.1 msaitoh {
1026 1.1 msaitoh struct adapter *adapter = txr->adapter;
1027 1.1 msaitoh struct ifnet *ifp = adapter->ifp;
1028 1.1 msaitoh u32 work, processed = 0;
1029 1.7 msaitoh u32 limit = adapter->tx_process_limit;
1030 1.1 msaitoh struct ixgbe_tx_buf *buf;
1031 1.1 msaitoh union ixgbe_adv_tx_desc *txd;
1032 1.1 msaitoh
1033 1.1 msaitoh KASSERT(mutex_owned(&txr->tx_mtx));
1034 1.1 msaitoh
1035 1.1 msaitoh #ifdef DEV_NETMAP
1036 1.1 msaitoh if (ifp->if_capenable & IFCAP_NETMAP) {
1037 1.1 msaitoh struct netmap_adapter *na = NA(ifp);
1038 1.1 msaitoh struct netmap_kring *kring = &na->tx_rings[txr->me];
1039 1.1 msaitoh txd = txr->tx_base;
1040 1.1 msaitoh bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1041 1.1 msaitoh BUS_DMASYNC_POSTREAD);
1042 1.1 msaitoh /*
1043 1.1 msaitoh * In netmap mode, all the work is done in the context
1044 1.1 msaitoh * of the client thread. Interrupt handlers only wake up
1045 1.1 msaitoh * clients, which may be sleeping on individual rings
1046 1.1 msaitoh * or on a global resource for all rings.
1047 1.1 msaitoh * To implement tx interrupt mitigation, we wake up the client
1048 1.1 msaitoh * thread roughly every half ring, even if the NIC interrupts
1049 1.1 msaitoh * more frequently. This is implemented as follows:
1050 1.1 msaitoh * - ixgbe_txsync() sets kring->nr_kflags with the index of
1051 1.1 msaitoh * the slot that should wake up the thread (nkr_num_slots
1052 1.1 msaitoh * means the user thread should not be woken up);
1053 1.1 msaitoh * - the driver ignores tx interrupts unless netmap_mitigate=0
1054 1.1 msaitoh * or the slot has the DD bit set.
1055 1.1 msaitoh */
1056 1.1 msaitoh if (!netmap_mitigate ||
1057 1.1 msaitoh (kring->nr_kflags < kring->nkr_num_slots &&
1058 1.1 msaitoh txd[kring->nr_kflags].wb.status & IXGBE_TXD_STAT_DD)) {
1059 1.1 msaitoh netmap_tx_irq(ifp, txr->me);
1060 1.1 msaitoh }
1061 1.1 msaitoh return;
1062 1.1 msaitoh }
1063 1.1 msaitoh #endif /* DEV_NETMAP */
1064 1.1 msaitoh
1065 1.1 msaitoh if (txr->tx_avail == txr->num_desc) {
1066 1.3 msaitoh txr->busy = 0;
1067 1.1 msaitoh return;
1068 1.1 msaitoh }
1069 1.1 msaitoh
1070 1.1 msaitoh /* Get work starting point */
1071 1.1 msaitoh work = txr->next_to_clean;
1072 1.1 msaitoh buf = &txr->tx_buffers[work];
1073 1.1 msaitoh txd = &txr->tx_base[work];
1074 1.1 msaitoh work -= txr->num_desc; /* The distance to ring end */
1075 1.1 msaitoh ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1076 1.1 msaitoh BUS_DMASYNC_POSTREAD);
1077 1.8 msaitoh
1078 1.1 msaitoh do {
1079 1.8 msaitoh union ixgbe_adv_tx_desc *eop = buf->eop;
1080 1.1 msaitoh if (eop == NULL) /* No work */
1081 1.1 msaitoh break;
1082 1.1 msaitoh
1083 1.1 msaitoh if ((eop->wb.status & IXGBE_TXD_STAT_DD) == 0)
1084 1.1 msaitoh break; /* I/O not complete */
1085 1.1 msaitoh
1086 1.1 msaitoh if (buf->m_head) {
1087 1.1 msaitoh txr->bytes +=
1088 1.1 msaitoh buf->m_head->m_pkthdr.len;
1089 1.1 msaitoh bus_dmamap_sync(txr->txtag->dt_dmat,
1090 1.1 msaitoh buf->map,
1091 1.1 msaitoh 0, buf->m_head->m_pkthdr.len,
1092 1.1 msaitoh BUS_DMASYNC_POSTWRITE);
1093 1.1 msaitoh ixgbe_dmamap_unload(txr->txtag,
1094 1.1 msaitoh buf->map);
1095 1.1 msaitoh m_freem(buf->m_head);
1096 1.1 msaitoh buf->m_head = NULL;
1097 1.1 msaitoh }
1098 1.1 msaitoh buf->eop = NULL;
1099 1.1 msaitoh ++txr->tx_avail;
1100 1.1 msaitoh
1101 1.1 msaitoh /* We clean the range if multi segment */
1102 1.1 msaitoh while (txd != eop) {
1103 1.1 msaitoh ++txd;
1104 1.1 msaitoh ++buf;
1105 1.1 msaitoh ++work;
1106 1.1 msaitoh /* wrap the ring? */
1107 1.1 msaitoh if (__predict_false(!work)) {
1108 1.1 msaitoh work -= txr->num_desc;
1109 1.1 msaitoh buf = txr->tx_buffers;
1110 1.1 msaitoh txd = txr->tx_base;
1111 1.1 msaitoh }
1112 1.1 msaitoh if (buf->m_head) {
1113 1.1 msaitoh txr->bytes +=
1114 1.1 msaitoh buf->m_head->m_pkthdr.len;
1115 1.1 msaitoh bus_dmamap_sync(txr->txtag->dt_dmat,
1116 1.1 msaitoh buf->map,
1117 1.1 msaitoh 0, buf->m_head->m_pkthdr.len,
1118 1.1 msaitoh BUS_DMASYNC_POSTWRITE);
1119 1.1 msaitoh ixgbe_dmamap_unload(txr->txtag,
1120 1.1 msaitoh buf->map);
1121 1.1 msaitoh m_freem(buf->m_head);
1122 1.1 msaitoh buf->m_head = NULL;
1123 1.1 msaitoh }
1124 1.1 msaitoh ++txr->tx_avail;
1125 1.1 msaitoh buf->eop = NULL;
1126 1.1 msaitoh
1127 1.1 msaitoh }
1128 1.1 msaitoh ++txr->packets;
1129 1.1 msaitoh ++processed;
1130 1.1 msaitoh ++ifp->if_opackets;
1131 1.1 msaitoh
1132 1.1 msaitoh /* Try the next packet */
1133 1.1 msaitoh ++txd;
1134 1.1 msaitoh ++buf;
1135 1.1 msaitoh ++work;
1136 1.1 msaitoh /* reset with a wrap */
1137 1.1 msaitoh if (__predict_false(!work)) {
1138 1.1 msaitoh work -= txr->num_desc;
1139 1.1 msaitoh buf = txr->tx_buffers;
1140 1.1 msaitoh txd = txr->tx_base;
1141 1.1 msaitoh }
1142 1.1 msaitoh prefetch(txd);
1143 1.1 msaitoh } while (__predict_true(--limit));
1144 1.1 msaitoh
1145 1.1 msaitoh ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1146 1.1 msaitoh BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1147 1.1 msaitoh
1148 1.1 msaitoh work += txr->num_desc;
1149 1.1 msaitoh txr->next_to_clean = work;
1150 1.1 msaitoh
1151 1.1 msaitoh /*
1152 1.3 msaitoh ** Queue Hang detection, we know there's
1153 1.1 msaitoh ** work outstanding or the first return
1154 1.3 msaitoh ** would have been taken, so increment busy
1155 1.3 msaitoh ** if nothing managed to get cleaned, then
1156 1.3 msaitoh ** in local_timer it will be checked and
1157 1.3 msaitoh ** marked as HUNG if it exceeds a MAX attempt.
1158 1.1 msaitoh */
1159 1.3 msaitoh if ((processed == 0) && (txr->busy != IXGBE_QUEUE_HUNG))
1160 1.3 msaitoh ++txr->busy;
1161 1.3 msaitoh /*
1162 1.3 msaitoh ** If anything gets cleaned we reset state to 1,
1163 1.3 msaitoh ** note this will turn off HUNG if its set.
1164 1.3 msaitoh */
1165 1.3 msaitoh if (processed)
1166 1.3 msaitoh txr->busy = 1;
1167 1.1 msaitoh
1168 1.1 msaitoh if (txr->tx_avail == txr->num_desc)
1169 1.3 msaitoh txr->busy = 0;
1170 1.1 msaitoh
1171 1.1 msaitoh return;
1172 1.1 msaitoh }
1173 1.1 msaitoh
1174 1.3 msaitoh
1175 1.1 msaitoh #ifdef IXGBE_FDIR
1176 1.1 msaitoh /*
1177 1.1 msaitoh ** This routine parses packet headers so that Flow
1178 1.1 msaitoh ** Director can make a hashed filter table entry
1179 1.1 msaitoh ** allowing traffic flows to be identified and kept
1180 1.1 msaitoh ** on the same cpu. This would be a performance
1181 1.1 msaitoh ** hit, but we only do it at IXGBE_FDIR_RATE of
1182 1.1 msaitoh ** packets.
1183 1.1 msaitoh */
1184 1.1 msaitoh static void
1185 1.1 msaitoh ixgbe_atr(struct tx_ring *txr, struct mbuf *mp)
1186 1.1 msaitoh {
1187 1.1 msaitoh struct adapter *adapter = txr->adapter;
1188 1.1 msaitoh struct ix_queue *que;
1189 1.1 msaitoh struct ip *ip;
1190 1.1 msaitoh struct tcphdr *th;
1191 1.1 msaitoh struct udphdr *uh;
1192 1.1 msaitoh struct ether_vlan_header *eh;
1193 1.1 msaitoh union ixgbe_atr_hash_dword input = {.dword = 0};
1194 1.1 msaitoh union ixgbe_atr_hash_dword common = {.dword = 0};
1195 1.1 msaitoh int ehdrlen, ip_hlen;
1196 1.1 msaitoh u16 etype;
1197 1.1 msaitoh
1198 1.1 msaitoh eh = mtod(mp, struct ether_vlan_header *);
1199 1.1 msaitoh if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1200 1.1 msaitoh ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1201 1.1 msaitoh etype = eh->evl_proto;
1202 1.1 msaitoh } else {
1203 1.1 msaitoh ehdrlen = ETHER_HDR_LEN;
1204 1.1 msaitoh etype = eh->evl_encap_proto;
1205 1.1 msaitoh }
1206 1.1 msaitoh
1207 1.1 msaitoh /* Only handling IPv4 */
1208 1.1 msaitoh if (etype != htons(ETHERTYPE_IP))
1209 1.1 msaitoh return;
1210 1.1 msaitoh
1211 1.1 msaitoh ip = (struct ip *)(mp->m_data + ehdrlen);
1212 1.1 msaitoh ip_hlen = ip->ip_hl << 2;
1213 1.1 msaitoh
1214 1.1 msaitoh /* check if we're UDP or TCP */
1215 1.1 msaitoh switch (ip->ip_p) {
1216 1.1 msaitoh case IPPROTO_TCP:
1217 1.1 msaitoh th = (struct tcphdr *)((char *)ip + ip_hlen);
1218 1.1 msaitoh /* src and dst are inverted */
1219 1.1 msaitoh common.port.dst ^= th->th_sport;
1220 1.1 msaitoh common.port.src ^= th->th_dport;
1221 1.1 msaitoh input.formatted.flow_type ^= IXGBE_ATR_FLOW_TYPE_TCPV4;
1222 1.1 msaitoh break;
1223 1.1 msaitoh case IPPROTO_UDP:
1224 1.1 msaitoh uh = (struct udphdr *)((char *)ip + ip_hlen);
1225 1.1 msaitoh /* src and dst are inverted */
1226 1.1 msaitoh common.port.dst ^= uh->uh_sport;
1227 1.1 msaitoh common.port.src ^= uh->uh_dport;
1228 1.1 msaitoh input.formatted.flow_type ^= IXGBE_ATR_FLOW_TYPE_UDPV4;
1229 1.1 msaitoh break;
1230 1.1 msaitoh default:
1231 1.1 msaitoh return;
1232 1.1 msaitoh }
1233 1.1 msaitoh
1234 1.1 msaitoh input.formatted.vlan_id = htobe16(mp->m_pkthdr.ether_vtag);
1235 1.1 msaitoh if (mp->m_pkthdr.ether_vtag)
1236 1.1 msaitoh common.flex_bytes ^= htons(ETHERTYPE_VLAN);
1237 1.1 msaitoh else
1238 1.1 msaitoh common.flex_bytes ^= etype;
1239 1.1 msaitoh common.ip ^= ip->ip_src.s_addr ^ ip->ip_dst.s_addr;
1240 1.1 msaitoh
1241 1.1 msaitoh que = &adapter->queues[txr->me];
1242 1.1 msaitoh /*
1243 1.1 msaitoh ** This assumes the Rx queue and Tx
1244 1.1 msaitoh ** queue are bound to the same CPU
1245 1.1 msaitoh */
1246 1.1 msaitoh ixgbe_fdir_add_signature_filter_82599(&adapter->hw,
1247 1.1 msaitoh input, common, que->msix);
1248 1.1 msaitoh }
1249 1.1 msaitoh #endif /* IXGBE_FDIR */
1250 1.1 msaitoh
1251 1.1 msaitoh /*
1252 1.1 msaitoh ** Used to detect a descriptor that has
1253 1.1 msaitoh ** been merged by Hardware RSC.
1254 1.1 msaitoh */
1255 1.1 msaitoh static inline u32
1256 1.1 msaitoh ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
1257 1.1 msaitoh {
1258 1.1 msaitoh return (le32toh(rx->wb.lower.lo_dword.data) &
1259 1.1 msaitoh IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
1260 1.1 msaitoh }
1261 1.1 msaitoh
1262 1.1 msaitoh /*********************************************************************
1263 1.1 msaitoh *
1264 1.1 msaitoh * Initialize Hardware RSC (LRO) feature on 82599
1265 1.1 msaitoh * for an RX ring, this is toggled by the LRO capability
1266 1.1 msaitoh * even though it is transparent to the stack.
1267 1.1 msaitoh *
1268 1.1 msaitoh * NOTE: since this HW feature only works with IPV4 and
1269 1.1 msaitoh * our testing has shown soft LRO to be as effective
1270 1.1 msaitoh * I have decided to disable this by default.
1271 1.1 msaitoh *
1272 1.1 msaitoh **********************************************************************/
1273 1.1 msaitoh static void
1274 1.1 msaitoh ixgbe_setup_hw_rsc(struct rx_ring *rxr)
1275 1.1 msaitoh {
1276 1.1 msaitoh struct adapter *adapter = rxr->adapter;
1277 1.1 msaitoh struct ixgbe_hw *hw = &adapter->hw;
1278 1.1 msaitoh u32 rscctrl, rdrxctl;
1279 1.1 msaitoh
1280 1.1 msaitoh /* If turning LRO/RSC off we need to disable it */
1281 1.1 msaitoh if ((adapter->ifp->if_capenable & IFCAP_LRO) == 0) {
1282 1.1 msaitoh rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
1283 1.1 msaitoh rscctrl &= ~IXGBE_RSCCTL_RSCEN;
1284 1.1 msaitoh return;
1285 1.1 msaitoh }
1286 1.1 msaitoh
1287 1.1 msaitoh rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1288 1.1 msaitoh rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
1289 1.1 msaitoh #ifdef DEV_NETMAP /* crcstrip is optional in netmap */
1290 1.1 msaitoh if (adapter->ifp->if_capenable & IFCAP_NETMAP && !ix_crcstrip)
1291 1.1 msaitoh #endif /* DEV_NETMAP */
1292 1.1 msaitoh rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
1293 1.1 msaitoh rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
1294 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
1295 1.1 msaitoh
1296 1.1 msaitoh rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
1297 1.1 msaitoh rscctrl |= IXGBE_RSCCTL_RSCEN;
1298 1.1 msaitoh /*
1299 1.1 msaitoh ** Limit the total number of descriptors that
1300 1.1 msaitoh ** can be combined, so it does not exceed 64K
1301 1.1 msaitoh */
1302 1.1 msaitoh if (rxr->mbuf_sz == MCLBYTES)
1303 1.1 msaitoh rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
1304 1.1 msaitoh else if (rxr->mbuf_sz == MJUMPAGESIZE)
1305 1.1 msaitoh rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
1306 1.1 msaitoh else if (rxr->mbuf_sz == MJUM9BYTES)
1307 1.1 msaitoh rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
1308 1.1 msaitoh else /* Using 16K cluster */
1309 1.1 msaitoh rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
1310 1.1 msaitoh
1311 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxr->me), rscctrl);
1312 1.1 msaitoh
1313 1.1 msaitoh /* Enable TCP header recognition */
1314 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0),
1315 1.1 msaitoh (IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)) |
1316 1.1 msaitoh IXGBE_PSRTYPE_TCPHDR));
1317 1.1 msaitoh
1318 1.1 msaitoh /* Disable RSC for ACK packets */
1319 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
1320 1.1 msaitoh (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
1321 1.1 msaitoh
1322 1.1 msaitoh rxr->hw_rsc = TRUE;
1323 1.1 msaitoh }
1324 1.8 msaitoh
1325 1.1 msaitoh /*********************************************************************
1326 1.1 msaitoh *
1327 1.1 msaitoh * Refresh mbuf buffers for RX descriptor rings
1328 1.1 msaitoh * - now keeps its own state so discards due to resource
1329 1.1 msaitoh * exhaustion are unnecessary, if an mbuf cannot be obtained
1330 1.1 msaitoh * it just returns, keeping its placeholder, thus it can simply
1331 1.1 msaitoh * be recalled to try again.
1332 1.1 msaitoh *
1333 1.1 msaitoh **********************************************************************/
1334 1.1 msaitoh static void
1335 1.1 msaitoh ixgbe_refresh_mbufs(struct rx_ring *rxr, int limit)
1336 1.1 msaitoh {
1337 1.1 msaitoh struct adapter *adapter = rxr->adapter;
1338 1.1 msaitoh struct ixgbe_rx_buf *rxbuf;
1339 1.1 msaitoh struct mbuf *mp;
1340 1.1 msaitoh int i, j, error;
1341 1.1 msaitoh bool refreshed = false;
1342 1.1 msaitoh
1343 1.1 msaitoh i = j = rxr->next_to_refresh;
1344 1.1 msaitoh /* Control the loop with one beyond */
1345 1.1 msaitoh if (++j == rxr->num_desc)
1346 1.1 msaitoh j = 0;
1347 1.1 msaitoh
1348 1.1 msaitoh while (j != limit) {
1349 1.1 msaitoh rxbuf = &rxr->rx_buffers[i];
1350 1.1 msaitoh if (rxbuf->buf == NULL) {
1351 1.1 msaitoh mp = ixgbe_getjcl(&adapter->jcl_head, M_NOWAIT,
1352 1.1 msaitoh MT_DATA, M_PKTHDR, rxr->mbuf_sz);
1353 1.1 msaitoh if (mp == NULL) {
1354 1.1 msaitoh rxr->no_jmbuf.ev_count++;
1355 1.1 msaitoh goto update;
1356 1.1 msaitoh }
1357 1.1 msaitoh if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
1358 1.1 msaitoh m_adj(mp, ETHER_ALIGN);
1359 1.1 msaitoh } else
1360 1.1 msaitoh mp = rxbuf->buf;
1361 1.1 msaitoh
1362 1.1 msaitoh mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
1363 1.1 msaitoh
1364 1.1 msaitoh /* If we're dealing with an mbuf that was copied rather
1365 1.1 msaitoh * than replaced, there's no need to go through busdma.
1366 1.1 msaitoh */
1367 1.1 msaitoh if ((rxbuf->flags & IXGBE_RX_COPY) == 0) {
1368 1.1 msaitoh /* Get the memory mapping */
1369 1.4 msaitoh ixgbe_dmamap_unload(rxr->ptag, rxbuf->pmap);
1370 1.1 msaitoh error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat,
1371 1.1 msaitoh rxbuf->pmap, mp, BUS_DMA_NOWAIT);
1372 1.1 msaitoh if (error != 0) {
1373 1.1 msaitoh printf("Refresh mbufs: payload dmamap load"
1374 1.1 msaitoh " failure - %d\n", error);
1375 1.1 msaitoh m_free(mp);
1376 1.1 msaitoh rxbuf->buf = NULL;
1377 1.1 msaitoh goto update;
1378 1.1 msaitoh }
1379 1.1 msaitoh rxbuf->buf = mp;
1380 1.1 msaitoh bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
1381 1.1 msaitoh 0, mp->m_pkthdr.len, BUS_DMASYNC_PREREAD);
1382 1.1 msaitoh rxbuf->addr = rxr->rx_base[i].read.pkt_addr =
1383 1.1 msaitoh htole64(rxbuf->pmap->dm_segs[0].ds_addr);
1384 1.1 msaitoh } else {
1385 1.1 msaitoh rxr->rx_base[i].read.pkt_addr = rxbuf->addr;
1386 1.1 msaitoh rxbuf->flags &= ~IXGBE_RX_COPY;
1387 1.1 msaitoh }
1388 1.1 msaitoh
1389 1.1 msaitoh refreshed = true;
1390 1.1 msaitoh /* Next is precalculated */
1391 1.1 msaitoh i = j;
1392 1.1 msaitoh rxr->next_to_refresh = i;
1393 1.1 msaitoh if (++j == rxr->num_desc)
1394 1.1 msaitoh j = 0;
1395 1.1 msaitoh }
1396 1.1 msaitoh update:
1397 1.1 msaitoh if (refreshed) /* Update hardware tail index */
1398 1.1 msaitoh IXGBE_WRITE_REG(&adapter->hw,
1399 1.3 msaitoh rxr->tail, rxr->next_to_refresh);
1400 1.1 msaitoh return;
1401 1.1 msaitoh }
1402 1.1 msaitoh
1403 1.1 msaitoh /*********************************************************************
1404 1.1 msaitoh *
1405 1.1 msaitoh * Allocate memory for rx_buffer structures. Since we use one
1406 1.1 msaitoh * rx_buffer per received packet, the maximum number of rx_buffer's
1407 1.1 msaitoh * that we'll need is equal to the number of receive descriptors
1408 1.1 msaitoh * that we've allocated.
1409 1.1 msaitoh *
1410 1.1 msaitoh **********************************************************************/
1411 1.1 msaitoh int
1412 1.1 msaitoh ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
1413 1.1 msaitoh {
1414 1.1 msaitoh struct adapter *adapter = rxr->adapter;
1415 1.1 msaitoh device_t dev = adapter->dev;
1416 1.1 msaitoh struct ixgbe_rx_buf *rxbuf;
1417 1.5 msaitoh int bsize, error;
1418 1.1 msaitoh
1419 1.1 msaitoh bsize = sizeof(struct ixgbe_rx_buf) * rxr->num_desc;
1420 1.1 msaitoh if (!(rxr->rx_buffers =
1421 1.1 msaitoh (struct ixgbe_rx_buf *) malloc(bsize,
1422 1.1 msaitoh M_DEVBUF, M_NOWAIT | M_ZERO))) {
1423 1.1 msaitoh aprint_error_dev(dev, "Unable to allocate rx_buffer memory\n");
1424 1.1 msaitoh error = ENOMEM;
1425 1.1 msaitoh goto fail;
1426 1.1 msaitoh }
1427 1.1 msaitoh
1428 1.1 msaitoh if ((error = ixgbe_dma_tag_create(adapter->osdep.dmat, /* parent */
1429 1.1 msaitoh 1, 0, /* alignment, bounds */
1430 1.1 msaitoh MJUM16BYTES, /* maxsize */
1431 1.1 msaitoh 1, /* nsegments */
1432 1.1 msaitoh MJUM16BYTES, /* maxsegsize */
1433 1.1 msaitoh 0, /* flags */
1434 1.1 msaitoh &rxr->ptag))) {
1435 1.1 msaitoh aprint_error_dev(dev, "Unable to create RX DMA tag\n");
1436 1.1 msaitoh goto fail;
1437 1.1 msaitoh }
1438 1.1 msaitoh
1439 1.5 msaitoh for (int i = 0; i < rxr->num_desc; i++, rxbuf++) {
1440 1.1 msaitoh rxbuf = &rxr->rx_buffers[i];
1441 1.4 msaitoh error = ixgbe_dmamap_create(rxr->ptag, 0, &rxbuf->pmap);
1442 1.1 msaitoh if (error) {
1443 1.1 msaitoh aprint_error_dev(dev, "Unable to create RX dma map\n");
1444 1.1 msaitoh goto fail;
1445 1.1 msaitoh }
1446 1.1 msaitoh }
1447 1.1 msaitoh
1448 1.1 msaitoh return (0);
1449 1.1 msaitoh
1450 1.1 msaitoh fail:
1451 1.1 msaitoh /* Frees all, but can handle partial completion */
1452 1.1 msaitoh ixgbe_free_receive_structures(adapter);
1453 1.1 msaitoh return (error);
1454 1.1 msaitoh }
1455 1.1 msaitoh
1456 1.1 msaitoh static void
1457 1.1 msaitoh ixgbe_free_receive_ring(struct rx_ring *rxr)
1458 1.1 msaitoh {
1459 1.1 msaitoh struct ixgbe_rx_buf *rxbuf;
1460 1.1 msaitoh
1461 1.5 msaitoh for (int i = 0; i < rxr->num_desc; i++) {
1462 1.1 msaitoh rxbuf = &rxr->rx_buffers[i];
1463 1.1 msaitoh if (rxbuf->buf != NULL) {
1464 1.1 msaitoh bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
1465 1.1 msaitoh 0, rxbuf->buf->m_pkthdr.len,
1466 1.1 msaitoh BUS_DMASYNC_POSTREAD);
1467 1.1 msaitoh ixgbe_dmamap_unload(rxr->ptag, rxbuf->pmap);
1468 1.1 msaitoh rxbuf->buf->m_flags |= M_PKTHDR;
1469 1.1 msaitoh m_freem(rxbuf->buf);
1470 1.1 msaitoh rxbuf->buf = NULL;
1471 1.1 msaitoh rxbuf->flags = 0;
1472 1.1 msaitoh }
1473 1.1 msaitoh }
1474 1.1 msaitoh }
1475 1.1 msaitoh
1476 1.1 msaitoh /*********************************************************************
1477 1.1 msaitoh *
1478 1.1 msaitoh * Initialize a receive ring and its buffers.
1479 1.1 msaitoh *
1480 1.1 msaitoh **********************************************************************/
1481 1.1 msaitoh static int
1482 1.1 msaitoh ixgbe_setup_receive_ring(struct rx_ring *rxr)
1483 1.1 msaitoh {
1484 1.1 msaitoh struct adapter *adapter;
1485 1.1 msaitoh struct ixgbe_rx_buf *rxbuf;
1486 1.1 msaitoh #ifdef LRO
1487 1.1 msaitoh struct ifnet *ifp;
1488 1.1 msaitoh struct lro_ctrl *lro = &rxr->lro;
1489 1.1 msaitoh #endif /* LRO */
1490 1.1 msaitoh int rsize, error = 0;
1491 1.1 msaitoh #ifdef DEV_NETMAP
1492 1.1 msaitoh struct netmap_adapter *na = NA(rxr->adapter->ifp);
1493 1.1 msaitoh struct netmap_slot *slot;
1494 1.1 msaitoh #endif /* DEV_NETMAP */
1495 1.1 msaitoh
1496 1.1 msaitoh adapter = rxr->adapter;
1497 1.1 msaitoh #ifdef LRO
1498 1.1 msaitoh ifp = adapter->ifp;
1499 1.1 msaitoh #endif /* LRO */
1500 1.1 msaitoh
1501 1.1 msaitoh /* Clear the ring contents */
1502 1.1 msaitoh IXGBE_RX_LOCK(rxr);
1503 1.1 msaitoh #ifdef DEV_NETMAP
1504 1.1 msaitoh /* same as in ixgbe_setup_transmit_ring() */
1505 1.1 msaitoh slot = netmap_reset(na, NR_RX, rxr->me, 0);
1506 1.1 msaitoh #endif /* DEV_NETMAP */
1507 1.1 msaitoh rsize = roundup2(adapter->num_rx_desc *
1508 1.1 msaitoh sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
1509 1.1 msaitoh bzero((void *)rxr->rx_base, rsize);
1510 1.1 msaitoh /* Cache the size */
1511 1.1 msaitoh rxr->mbuf_sz = adapter->rx_mbuf_sz;
1512 1.1 msaitoh
1513 1.1 msaitoh /* Free current RX buffer structs and their mbufs */
1514 1.1 msaitoh ixgbe_free_receive_ring(rxr);
1515 1.1 msaitoh
1516 1.1 msaitoh IXGBE_RX_UNLOCK(rxr);
1517 1.1 msaitoh
1518 1.1 msaitoh /* Now reinitialize our supply of jumbo mbufs. The number
1519 1.1 msaitoh * or size of jumbo mbufs may have changed.
1520 1.1 msaitoh */
1521 1.1 msaitoh ixgbe_jcl_reinit(&adapter->jcl_head, rxr->ptag->dt_dmat,
1522 1.1 msaitoh 2 * adapter->num_rx_desc, adapter->rx_mbuf_sz);
1523 1.1 msaitoh
1524 1.1 msaitoh IXGBE_RX_LOCK(rxr);
1525 1.1 msaitoh
1526 1.1 msaitoh /* Now replenish the mbufs */
1527 1.1 msaitoh for (int j = 0; j != rxr->num_desc; ++j) {
1528 1.1 msaitoh struct mbuf *mp;
1529 1.1 msaitoh
1530 1.1 msaitoh rxbuf = &rxr->rx_buffers[j];
1531 1.1 msaitoh #ifdef DEV_NETMAP
1532 1.1 msaitoh /*
1533 1.1 msaitoh * In netmap mode, fill the map and set the buffer
1534 1.1 msaitoh * address in the NIC ring, considering the offset
1535 1.1 msaitoh * between the netmap and NIC rings (see comment in
1536 1.1 msaitoh * ixgbe_setup_transmit_ring() ). No need to allocate
1537 1.1 msaitoh * an mbuf, so end the block with a continue;
1538 1.1 msaitoh */
1539 1.1 msaitoh if (slot) {
1540 1.1 msaitoh int sj = netmap_idx_n2k(&na->rx_rings[rxr->me], j);
1541 1.1 msaitoh uint64_t paddr;
1542 1.1 msaitoh void *addr;
1543 1.1 msaitoh
1544 1.1 msaitoh addr = PNMB(na, slot + sj, &paddr);
1545 1.1 msaitoh netmap_load_map(na, rxr->ptag, rxbuf->pmap, addr);
1546 1.1 msaitoh /* Update descriptor and the cached value */
1547 1.1 msaitoh rxr->rx_base[j].read.pkt_addr = htole64(paddr);
1548 1.1 msaitoh rxbuf->addr = htole64(paddr);
1549 1.1 msaitoh continue;
1550 1.1 msaitoh }
1551 1.1 msaitoh #endif /* DEV_NETMAP */
1552 1.1 msaitoh rxbuf->flags = 0;
1553 1.1 msaitoh rxbuf->buf = ixgbe_getjcl(&adapter->jcl_head, M_NOWAIT,
1554 1.1 msaitoh MT_DATA, M_PKTHDR, adapter->rx_mbuf_sz);
1555 1.1 msaitoh if (rxbuf->buf == NULL) {
1556 1.1 msaitoh error = ENOBUFS;
1557 1.1 msaitoh goto fail;
1558 1.1 msaitoh }
1559 1.1 msaitoh mp = rxbuf->buf;
1560 1.1 msaitoh mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
1561 1.1 msaitoh /* Get the memory mapping */
1562 1.1 msaitoh error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat,
1563 1.1 msaitoh rxbuf->pmap, mp, BUS_DMA_NOWAIT);
1564 1.1 msaitoh if (error != 0)
1565 1.1 msaitoh goto fail;
1566 1.1 msaitoh bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
1567 1.1 msaitoh 0, adapter->rx_mbuf_sz, BUS_DMASYNC_PREREAD);
1568 1.1 msaitoh /* Update the descriptor and the cached value */
1569 1.1 msaitoh rxr->rx_base[j].read.pkt_addr =
1570 1.1 msaitoh htole64(rxbuf->pmap->dm_segs[0].ds_addr);
1571 1.1 msaitoh rxbuf->addr = htole64(rxbuf->pmap->dm_segs[0].ds_addr);
1572 1.1 msaitoh }
1573 1.1 msaitoh
1574 1.1 msaitoh
1575 1.1 msaitoh /* Setup our descriptor indices */
1576 1.1 msaitoh rxr->next_to_check = 0;
1577 1.1 msaitoh rxr->next_to_refresh = 0;
1578 1.1 msaitoh rxr->lro_enabled = FALSE;
1579 1.1 msaitoh rxr->rx_copies.ev_count = 0;
1580 1.13 msaitoh #if 0 /* NetBSD */
1581 1.1 msaitoh rxr->rx_bytes.ev_count = 0;
1582 1.13 msaitoh #if 1 /* Fix inconsistency */
1583 1.13 msaitoh rxr->rx_packets.ev_count = 0;
1584 1.13 msaitoh #endif
1585 1.13 msaitoh #endif
1586 1.1 msaitoh rxr->vtag_strip = FALSE;
1587 1.1 msaitoh
1588 1.1 msaitoh ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
1589 1.1 msaitoh BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1590 1.1 msaitoh
1591 1.1 msaitoh /*
1592 1.1 msaitoh ** Now set up the LRO interface:
1593 1.1 msaitoh */
1594 1.1 msaitoh if (ixgbe_rsc_enable)
1595 1.1 msaitoh ixgbe_setup_hw_rsc(rxr);
1596 1.1 msaitoh #ifdef LRO
1597 1.1 msaitoh else if (ifp->if_capenable & IFCAP_LRO) {
1598 1.1 msaitoh device_t dev = adapter->dev;
1599 1.1 msaitoh int err = tcp_lro_init(lro);
1600 1.1 msaitoh if (err) {
1601 1.1 msaitoh device_printf(dev, "LRO Initialization failed!\n");
1602 1.1 msaitoh goto fail;
1603 1.1 msaitoh }
1604 1.1 msaitoh INIT_DEBUGOUT("RX Soft LRO Initialized\n");
1605 1.1 msaitoh rxr->lro_enabled = TRUE;
1606 1.1 msaitoh lro->ifp = adapter->ifp;
1607 1.1 msaitoh }
1608 1.1 msaitoh #endif /* LRO */
1609 1.1 msaitoh
1610 1.1 msaitoh IXGBE_RX_UNLOCK(rxr);
1611 1.1 msaitoh return (0);
1612 1.1 msaitoh
1613 1.1 msaitoh fail:
1614 1.1 msaitoh ixgbe_free_receive_ring(rxr);
1615 1.1 msaitoh IXGBE_RX_UNLOCK(rxr);
1616 1.1 msaitoh return (error);
1617 1.1 msaitoh }
1618 1.1 msaitoh
1619 1.1 msaitoh /*********************************************************************
1620 1.1 msaitoh *
1621 1.1 msaitoh * Initialize all receive rings.
1622 1.1 msaitoh *
1623 1.1 msaitoh **********************************************************************/
1624 1.1 msaitoh int
1625 1.1 msaitoh ixgbe_setup_receive_structures(struct adapter *adapter)
1626 1.1 msaitoh {
1627 1.1 msaitoh struct rx_ring *rxr = adapter->rx_rings;
1628 1.1 msaitoh int j;
1629 1.1 msaitoh
1630 1.1 msaitoh for (j = 0; j < adapter->num_queues; j++, rxr++)
1631 1.1 msaitoh if (ixgbe_setup_receive_ring(rxr))
1632 1.1 msaitoh goto fail;
1633 1.1 msaitoh
1634 1.1 msaitoh return (0);
1635 1.1 msaitoh fail:
1636 1.1 msaitoh /*
1637 1.1 msaitoh * Free RX buffers allocated so far, we will only handle
1638 1.1 msaitoh * the rings that completed, the failing case will have
1639 1.1 msaitoh * cleaned up for itself. 'j' failed, so its the terminus.
1640 1.1 msaitoh */
1641 1.1 msaitoh for (int i = 0; i < j; ++i) {
1642 1.1 msaitoh rxr = &adapter->rx_rings[i];
1643 1.1 msaitoh ixgbe_free_receive_ring(rxr);
1644 1.1 msaitoh }
1645 1.1 msaitoh
1646 1.1 msaitoh return (ENOBUFS);
1647 1.1 msaitoh }
1648 1.1 msaitoh
1649 1.3 msaitoh
1650 1.1 msaitoh /*********************************************************************
1651 1.1 msaitoh *
1652 1.1 msaitoh * Free all receive rings.
1653 1.1 msaitoh *
1654 1.1 msaitoh **********************************************************************/
1655 1.1 msaitoh void
1656 1.1 msaitoh ixgbe_free_receive_structures(struct adapter *adapter)
1657 1.1 msaitoh {
1658 1.1 msaitoh struct rx_ring *rxr = adapter->rx_rings;
1659 1.1 msaitoh
1660 1.1 msaitoh INIT_DEBUGOUT("ixgbe_free_receive_structures: begin");
1661 1.1 msaitoh
1662 1.1 msaitoh for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1663 1.1 msaitoh #ifdef LRO
1664 1.1 msaitoh struct lro_ctrl *lro = &rxr->lro;
1665 1.1 msaitoh #endif /* LRO */
1666 1.1 msaitoh ixgbe_free_receive_buffers(rxr);
1667 1.1 msaitoh #ifdef LRO
1668 1.1 msaitoh /* Free LRO memory */
1669 1.1 msaitoh tcp_lro_free(lro);
1670 1.1 msaitoh #endif /* LRO */
1671 1.1 msaitoh /* Free the ring memory as well */
1672 1.1 msaitoh ixgbe_dma_free(adapter, &rxr->rxdma);
1673 1.1 msaitoh IXGBE_RX_LOCK_DESTROY(rxr);
1674 1.1 msaitoh }
1675 1.1 msaitoh
1676 1.1 msaitoh free(adapter->rx_rings, M_DEVBUF);
1677 1.1 msaitoh }
1678 1.1 msaitoh
1679 1.1 msaitoh
1680 1.1 msaitoh /*********************************************************************
1681 1.1 msaitoh *
1682 1.1 msaitoh * Free receive ring data structures
1683 1.1 msaitoh *
1684 1.1 msaitoh **********************************************************************/
1685 1.1 msaitoh static void
1686 1.1 msaitoh ixgbe_free_receive_buffers(struct rx_ring *rxr)
1687 1.1 msaitoh {
1688 1.1 msaitoh struct adapter *adapter = rxr->adapter;
1689 1.1 msaitoh struct ixgbe_rx_buf *rxbuf;
1690 1.1 msaitoh
1691 1.1 msaitoh INIT_DEBUGOUT("ixgbe_free_receive_buffers: begin");
1692 1.1 msaitoh
1693 1.1 msaitoh /* Cleanup any existing buffers */
1694 1.1 msaitoh if (rxr->rx_buffers != NULL) {
1695 1.1 msaitoh for (int i = 0; i < adapter->num_rx_desc; i++) {
1696 1.1 msaitoh rxbuf = &rxr->rx_buffers[i];
1697 1.1 msaitoh if (rxbuf->buf != NULL) {
1698 1.1 msaitoh bus_dmamap_sync(rxr->ptag->dt_dmat,
1699 1.1 msaitoh rxbuf->pmap, 0, rxbuf->buf->m_pkthdr.len,
1700 1.1 msaitoh BUS_DMASYNC_POSTREAD);
1701 1.1 msaitoh ixgbe_dmamap_unload(rxr->ptag, rxbuf->pmap);
1702 1.1 msaitoh rxbuf->buf->m_flags |= M_PKTHDR;
1703 1.1 msaitoh m_freem(rxbuf->buf);
1704 1.1 msaitoh }
1705 1.1 msaitoh rxbuf->buf = NULL;
1706 1.1 msaitoh if (rxbuf->pmap != NULL) {
1707 1.1 msaitoh ixgbe_dmamap_destroy(rxr->ptag, rxbuf->pmap);
1708 1.1 msaitoh rxbuf->pmap = NULL;
1709 1.1 msaitoh }
1710 1.1 msaitoh }
1711 1.1 msaitoh if (rxr->rx_buffers != NULL) {
1712 1.1 msaitoh free(rxr->rx_buffers, M_DEVBUF);
1713 1.1 msaitoh rxr->rx_buffers = NULL;
1714 1.1 msaitoh }
1715 1.1 msaitoh }
1716 1.1 msaitoh
1717 1.1 msaitoh if (rxr->ptag != NULL) {
1718 1.1 msaitoh ixgbe_dma_tag_destroy(rxr->ptag);
1719 1.1 msaitoh rxr->ptag = NULL;
1720 1.1 msaitoh }
1721 1.1 msaitoh
1722 1.1 msaitoh return;
1723 1.1 msaitoh }
1724 1.1 msaitoh
1725 1.1 msaitoh static __inline void
1726 1.1 msaitoh ixgbe_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
1727 1.1 msaitoh {
1728 1.1 msaitoh int s;
1729 1.1 msaitoh
1730 1.1 msaitoh #ifdef LRO
1731 1.1 msaitoh struct adapter *adapter = ifp->if_softc;
1732 1.1 msaitoh struct ethercom *ec = &adapter->osdep.ec;
1733 1.1 msaitoh
1734 1.1 msaitoh /*
1735 1.1 msaitoh * ATM LRO is only for IP/TCP packets and TCP checksum of the packet
1736 1.1 msaitoh * should be computed by hardware. Also it should not have VLAN tag in
1737 1.1 msaitoh * ethernet header. In case of IPv6 we do not yet support ext. hdrs.
1738 1.1 msaitoh */
1739 1.1 msaitoh if (rxr->lro_enabled &&
1740 1.1 msaitoh (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) != 0 &&
1741 1.1 msaitoh (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
1742 1.1 msaitoh ((ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
1743 1.1 msaitoh (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) ||
1744 1.1 msaitoh (ptype & (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
1745 1.1 msaitoh (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) &&
1746 1.1 msaitoh (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
1747 1.1 msaitoh (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
1748 1.1 msaitoh /*
1749 1.1 msaitoh * Send to the stack if:
1750 1.1 msaitoh ** - LRO not enabled, or
1751 1.1 msaitoh ** - no LRO resources, or
1752 1.1 msaitoh ** - lro enqueue fails
1753 1.1 msaitoh */
1754 1.1 msaitoh if (rxr->lro.lro_cnt != 0)
1755 1.1 msaitoh if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
1756 1.1 msaitoh return;
1757 1.1 msaitoh }
1758 1.1 msaitoh #endif /* LRO */
1759 1.1 msaitoh
1760 1.1 msaitoh IXGBE_RX_UNLOCK(rxr);
1761 1.1 msaitoh
1762 1.1 msaitoh s = splnet();
1763 1.1 msaitoh if_input(ifp, m);
1764 1.1 msaitoh splx(s);
1765 1.1 msaitoh
1766 1.1 msaitoh IXGBE_RX_LOCK(rxr);
1767 1.1 msaitoh }
1768 1.1 msaitoh
1769 1.1 msaitoh static __inline void
1770 1.1 msaitoh ixgbe_rx_discard(struct rx_ring *rxr, int i)
1771 1.1 msaitoh {
1772 1.1 msaitoh struct ixgbe_rx_buf *rbuf;
1773 1.1 msaitoh
1774 1.1 msaitoh rbuf = &rxr->rx_buffers[i];
1775 1.1 msaitoh
1776 1.1 msaitoh
1777 1.1 msaitoh /*
1778 1.1 msaitoh ** With advanced descriptors the writeback
1779 1.1 msaitoh ** clobbers the buffer addrs, so its easier
1780 1.1 msaitoh ** to just free the existing mbufs and take
1781 1.1 msaitoh ** the normal refresh path to get new buffers
1782 1.1 msaitoh ** and mapping.
1783 1.1 msaitoh */
1784 1.1 msaitoh
1785 1.1 msaitoh if (rbuf->buf != NULL) {/* Partial chain ? */
1786 1.1 msaitoh rbuf->fmp->m_flags |= M_PKTHDR;
1787 1.1 msaitoh m_freem(rbuf->fmp);
1788 1.1 msaitoh rbuf->fmp = NULL;
1789 1.1 msaitoh rbuf->buf = NULL; /* rbuf->buf is part of fmp's chain */
1790 1.1 msaitoh } else if (rbuf->buf) {
1791 1.1 msaitoh m_free(rbuf->buf);
1792 1.1 msaitoh rbuf->buf = NULL;
1793 1.1 msaitoh }
1794 1.4 msaitoh ixgbe_dmamap_unload(rxr->ptag, rbuf->pmap);
1795 1.1 msaitoh
1796 1.1 msaitoh rbuf->flags = 0;
1797 1.1 msaitoh
1798 1.1 msaitoh return;
1799 1.1 msaitoh }
1800 1.1 msaitoh
1801 1.1 msaitoh
1802 1.1 msaitoh /*********************************************************************
1803 1.1 msaitoh *
1804 1.1 msaitoh * This routine executes in interrupt context. It replenishes
1805 1.1 msaitoh * the mbufs in the descriptor and sends data which has been
1806 1.1 msaitoh * dma'ed into host memory to upper layer.
1807 1.1 msaitoh *
1808 1.1 msaitoh * Return TRUE for more work, FALSE for all clean.
1809 1.1 msaitoh *********************************************************************/
1810 1.1 msaitoh bool
1811 1.1 msaitoh ixgbe_rxeof(struct ix_queue *que)
1812 1.1 msaitoh {
1813 1.1 msaitoh struct adapter *adapter = que->adapter;
1814 1.1 msaitoh struct rx_ring *rxr = que->rxr;
1815 1.1 msaitoh struct ifnet *ifp = adapter->ifp;
1816 1.1 msaitoh #ifdef LRO
1817 1.1 msaitoh struct lro_ctrl *lro = &rxr->lro;
1818 1.1 msaitoh #endif /* LRO */
1819 1.1 msaitoh int i, nextp, processed = 0;
1820 1.1 msaitoh u32 staterr = 0;
1821 1.7 msaitoh u32 count = adapter->rx_process_limit;
1822 1.1 msaitoh union ixgbe_adv_rx_desc *cur;
1823 1.1 msaitoh struct ixgbe_rx_buf *rbuf, *nbuf;
1824 1.1 msaitoh #ifdef RSS
1825 1.1 msaitoh u16 pkt_info;
1826 1.1 msaitoh #endif
1827 1.1 msaitoh
1828 1.1 msaitoh IXGBE_RX_LOCK(rxr);
1829 1.1 msaitoh
1830 1.1 msaitoh #ifdef DEV_NETMAP
1831 1.1 msaitoh /* Same as the txeof routine: wakeup clients on intr. */
1832 1.1 msaitoh if (netmap_rx_irq(ifp, rxr->me, &processed)) {
1833 1.1 msaitoh IXGBE_RX_UNLOCK(rxr);
1834 1.1 msaitoh return (FALSE);
1835 1.1 msaitoh }
1836 1.1 msaitoh #endif /* DEV_NETMAP */
1837 1.1 msaitoh
1838 1.1 msaitoh for (i = rxr->next_to_check; count != 0;) {
1839 1.1 msaitoh struct mbuf *sendmp, *mp;
1840 1.1 msaitoh u32 rsc, ptype;
1841 1.1 msaitoh u16 len;
1842 1.1 msaitoh u16 vtag = 0;
1843 1.1 msaitoh bool eop;
1844 1.1 msaitoh
1845 1.1 msaitoh /* Sync the ring. */
1846 1.1 msaitoh ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
1847 1.1 msaitoh BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1848 1.1 msaitoh
1849 1.1 msaitoh cur = &rxr->rx_base[i];
1850 1.1 msaitoh staterr = le32toh(cur->wb.upper.status_error);
1851 1.1 msaitoh #ifdef RSS
1852 1.1 msaitoh pkt_info = le16toh(cur->wb.lower.lo_dword.hs_rss.pkt_info);
1853 1.1 msaitoh #endif
1854 1.1 msaitoh
1855 1.1 msaitoh if ((staterr & IXGBE_RXD_STAT_DD) == 0)
1856 1.1 msaitoh break;
1857 1.1 msaitoh if ((ifp->if_flags & IFF_RUNNING) == 0)
1858 1.1 msaitoh break;
1859 1.1 msaitoh
1860 1.1 msaitoh count--;
1861 1.1 msaitoh sendmp = NULL;
1862 1.1 msaitoh nbuf = NULL;
1863 1.1 msaitoh rsc = 0;
1864 1.1 msaitoh cur->wb.upper.status_error = 0;
1865 1.1 msaitoh rbuf = &rxr->rx_buffers[i];
1866 1.1 msaitoh mp = rbuf->buf;
1867 1.1 msaitoh
1868 1.1 msaitoh len = le16toh(cur->wb.upper.length);
1869 1.1 msaitoh ptype = le32toh(cur->wb.lower.lo_dword.data) &
1870 1.1 msaitoh IXGBE_RXDADV_PKTTYPE_MASK;
1871 1.1 msaitoh eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
1872 1.1 msaitoh
1873 1.1 msaitoh /* Make sure bad packets are discarded */
1874 1.1 msaitoh if (eop && (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) {
1875 1.3 msaitoh #if __FreeBSD_version >= 1100036
1876 1.4 msaitoh if (IXGBE_IS_VF(adapter))
1877 1.4 msaitoh if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1878 1.3 msaitoh #endif
1879 1.1 msaitoh rxr->rx_discarded.ev_count++;
1880 1.1 msaitoh ixgbe_rx_discard(rxr, i);
1881 1.1 msaitoh goto next_desc;
1882 1.1 msaitoh }
1883 1.1 msaitoh
1884 1.1 msaitoh /*
1885 1.1 msaitoh ** On 82599 which supports a hardware
1886 1.1 msaitoh ** LRO (called HW RSC), packets need
1887 1.1 msaitoh ** not be fragmented across sequential
1888 1.1 msaitoh ** descriptors, rather the next descriptor
1889 1.1 msaitoh ** is indicated in bits of the descriptor.
1890 1.1 msaitoh ** This also means that we might proceses
1891 1.1 msaitoh ** more than one packet at a time, something
1892 1.1 msaitoh ** that has never been true before, it
1893 1.1 msaitoh ** required eliminating global chain pointers
1894 1.1 msaitoh ** in favor of what we are doing here. -jfv
1895 1.1 msaitoh */
1896 1.1 msaitoh if (!eop) {
1897 1.1 msaitoh /*
1898 1.1 msaitoh ** Figure out the next descriptor
1899 1.1 msaitoh ** of this frame.
1900 1.1 msaitoh */
1901 1.1 msaitoh if (rxr->hw_rsc == TRUE) {
1902 1.1 msaitoh rsc = ixgbe_rsc_count(cur);
1903 1.1 msaitoh rxr->rsc_num += (rsc - 1);
1904 1.1 msaitoh }
1905 1.1 msaitoh if (rsc) { /* Get hardware index */
1906 1.1 msaitoh nextp = ((staterr &
1907 1.1 msaitoh IXGBE_RXDADV_NEXTP_MASK) >>
1908 1.1 msaitoh IXGBE_RXDADV_NEXTP_SHIFT);
1909 1.1 msaitoh } else { /* Just sequential */
1910 1.1 msaitoh nextp = i + 1;
1911 1.1 msaitoh if (nextp == adapter->num_rx_desc)
1912 1.1 msaitoh nextp = 0;
1913 1.1 msaitoh }
1914 1.1 msaitoh nbuf = &rxr->rx_buffers[nextp];
1915 1.1 msaitoh prefetch(nbuf);
1916 1.1 msaitoh }
1917 1.1 msaitoh /*
1918 1.1 msaitoh ** Rather than using the fmp/lmp global pointers
1919 1.1 msaitoh ** we now keep the head of a packet chain in the
1920 1.1 msaitoh ** buffer struct and pass this along from one
1921 1.1 msaitoh ** descriptor to the next, until we get EOP.
1922 1.1 msaitoh */
1923 1.1 msaitoh mp->m_len = len;
1924 1.1 msaitoh /*
1925 1.1 msaitoh ** See if there is a stored head
1926 1.1 msaitoh ** that determines what we are
1927 1.1 msaitoh */
1928 1.1 msaitoh sendmp = rbuf->fmp;
1929 1.1 msaitoh if (sendmp != NULL) { /* secondary frag */
1930 1.1 msaitoh rbuf->buf = rbuf->fmp = NULL;
1931 1.1 msaitoh mp->m_flags &= ~M_PKTHDR;
1932 1.1 msaitoh sendmp->m_pkthdr.len += mp->m_len;
1933 1.1 msaitoh } else {
1934 1.1 msaitoh /*
1935 1.1 msaitoh * Optimize. This might be a small packet,
1936 1.1 msaitoh * maybe just a TCP ACK. Do a fast copy that
1937 1.1 msaitoh * is cache aligned into a new mbuf, and
1938 1.1 msaitoh * leave the old mbuf+cluster for re-use.
1939 1.1 msaitoh */
1940 1.1 msaitoh if (eop && len <= IXGBE_RX_COPY_LEN) {
1941 1.1 msaitoh sendmp = m_gethdr(M_NOWAIT, MT_DATA);
1942 1.1 msaitoh if (sendmp != NULL) {
1943 1.1 msaitoh sendmp->m_data +=
1944 1.1 msaitoh IXGBE_RX_COPY_ALIGN;
1945 1.1 msaitoh ixgbe_bcopy(mp->m_data,
1946 1.1 msaitoh sendmp->m_data, len);
1947 1.1 msaitoh sendmp->m_len = len;
1948 1.1 msaitoh rxr->rx_copies.ev_count++;
1949 1.1 msaitoh rbuf->flags |= IXGBE_RX_COPY;
1950 1.1 msaitoh }
1951 1.1 msaitoh }
1952 1.1 msaitoh if (sendmp == NULL) {
1953 1.1 msaitoh rbuf->buf = rbuf->fmp = NULL;
1954 1.1 msaitoh sendmp = mp;
1955 1.1 msaitoh }
1956 1.1 msaitoh
1957 1.1 msaitoh /* first desc of a non-ps chain */
1958 1.1 msaitoh sendmp->m_flags |= M_PKTHDR;
1959 1.1 msaitoh sendmp->m_pkthdr.len = mp->m_len;
1960 1.1 msaitoh }
1961 1.1 msaitoh ++processed;
1962 1.1 msaitoh
1963 1.1 msaitoh /* Pass the head pointer on */
1964 1.1 msaitoh if (eop == 0) {
1965 1.1 msaitoh nbuf->fmp = sendmp;
1966 1.1 msaitoh sendmp = NULL;
1967 1.1 msaitoh mp->m_next = nbuf->buf;
1968 1.1 msaitoh } else { /* Sending this frame */
1969 1.1 msaitoh m_set_rcvif(sendmp, ifp);
1970 1.1 msaitoh ifp->if_ipackets++;
1971 1.1 msaitoh rxr->rx_packets.ev_count++;
1972 1.1 msaitoh /* capture data for AIM */
1973 1.1 msaitoh rxr->bytes += sendmp->m_pkthdr.len;
1974 1.1 msaitoh rxr->rx_bytes.ev_count += sendmp->m_pkthdr.len;
1975 1.1 msaitoh /* Process vlan info */
1976 1.1 msaitoh if ((rxr->vtag_strip) &&
1977 1.1 msaitoh (staterr & IXGBE_RXD_STAT_VP))
1978 1.1 msaitoh vtag = le16toh(cur->wb.upper.vlan);
1979 1.1 msaitoh if (vtag) {
1980 1.1 msaitoh VLAN_INPUT_TAG(ifp, sendmp, vtag,
1981 1.1 msaitoh printf("%s: could not apply VLAN "
1982 1.1 msaitoh "tag", __func__));
1983 1.1 msaitoh }
1984 1.1 msaitoh if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1985 1.1 msaitoh ixgbe_rx_checksum(staterr, sendmp, ptype,
1986 1.3 msaitoh &adapter->stats.pf);
1987 1.1 msaitoh }
1988 1.8 msaitoh
1989 1.6 msaitoh #if 0 /* FreeBSD */
1990 1.6 msaitoh /*
1991 1.6 msaitoh * In case of multiqueue, we have RXCSUM.PCSD bit set
1992 1.6 msaitoh * and never cleared. This means we have RSS hash
1993 1.6 msaitoh * available to be used.
1994 1.6 msaitoh */
1995 1.6 msaitoh if (adapter->num_queues > 1) {
1996 1.6 msaitoh sendmp->m_pkthdr.flowid =
1997 1.6 msaitoh le32toh(cur->wb.lower.hi_dword.rss);
1998 1.6 msaitoh switch (pkt_info & IXGBE_RXDADV_RSSTYPE_MASK) {
1999 1.6 msaitoh case IXGBE_RXDADV_RSSTYPE_IPV4:
2000 1.6 msaitoh M_HASHTYPE_SET(sendmp,
2001 1.6 msaitoh M_HASHTYPE_RSS_IPV4);
2002 1.6 msaitoh break;
2003 1.6 msaitoh case IXGBE_RXDADV_RSSTYPE_IPV4_TCP:
2004 1.6 msaitoh M_HASHTYPE_SET(sendmp,
2005 1.6 msaitoh M_HASHTYPE_RSS_TCP_IPV4);
2006 1.6 msaitoh break;
2007 1.6 msaitoh case IXGBE_RXDADV_RSSTYPE_IPV6:
2008 1.6 msaitoh M_HASHTYPE_SET(sendmp,
2009 1.6 msaitoh M_HASHTYPE_RSS_IPV6);
2010 1.6 msaitoh break;
2011 1.6 msaitoh case IXGBE_RXDADV_RSSTYPE_IPV6_TCP:
2012 1.6 msaitoh M_HASHTYPE_SET(sendmp,
2013 1.6 msaitoh M_HASHTYPE_RSS_TCP_IPV6);
2014 1.6 msaitoh break;
2015 1.6 msaitoh case IXGBE_RXDADV_RSSTYPE_IPV6_EX:
2016 1.6 msaitoh M_HASHTYPE_SET(sendmp,
2017 1.6 msaitoh M_HASHTYPE_RSS_IPV6_EX);
2018 1.6 msaitoh break;
2019 1.6 msaitoh case IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX:
2020 1.6 msaitoh M_HASHTYPE_SET(sendmp,
2021 1.6 msaitoh M_HASHTYPE_RSS_TCP_IPV6_EX);
2022 1.6 msaitoh break;
2023 1.6 msaitoh #if __FreeBSD_version > 1100000
2024 1.6 msaitoh case IXGBE_RXDADV_RSSTYPE_IPV4_UDP:
2025 1.6 msaitoh M_HASHTYPE_SET(sendmp,
2026 1.6 msaitoh M_HASHTYPE_RSS_UDP_IPV4);
2027 1.6 msaitoh break;
2028 1.6 msaitoh case IXGBE_RXDADV_RSSTYPE_IPV6_UDP:
2029 1.6 msaitoh M_HASHTYPE_SET(sendmp,
2030 1.6 msaitoh M_HASHTYPE_RSS_UDP_IPV6);
2031 1.6 msaitoh break;
2032 1.6 msaitoh case IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX:
2033 1.6 msaitoh M_HASHTYPE_SET(sendmp,
2034 1.6 msaitoh M_HASHTYPE_RSS_UDP_IPV6_EX);
2035 1.6 msaitoh break;
2036 1.6 msaitoh #endif
2037 1.6 msaitoh default:
2038 1.6 msaitoh M_HASHTYPE_SET(sendmp,
2039 1.10 msaitoh M_HASHTYPE_OPAQUE_HASH);
2040 1.6 msaitoh }
2041 1.6 msaitoh } else {
2042 1.6 msaitoh sendmp->m_pkthdr.flowid = que->msix;
2043 1.1 msaitoh M_HASHTYPE_SET(sendmp, M_HASHTYPE_OPAQUE);
2044 1.1 msaitoh }
2045 1.8 msaitoh #endif
2046 1.1 msaitoh }
2047 1.1 msaitoh next_desc:
2048 1.1 msaitoh ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2049 1.1 msaitoh BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2050 1.1 msaitoh
2051 1.1 msaitoh /* Advance our pointers to the next descriptor. */
2052 1.1 msaitoh if (++i == rxr->num_desc)
2053 1.1 msaitoh i = 0;
2054 1.1 msaitoh
2055 1.1 msaitoh /* Now send to the stack or do LRO */
2056 1.1 msaitoh if (sendmp != NULL) {
2057 1.1 msaitoh rxr->next_to_check = i;
2058 1.1 msaitoh ixgbe_rx_input(rxr, ifp, sendmp, ptype);
2059 1.1 msaitoh i = rxr->next_to_check;
2060 1.1 msaitoh }
2061 1.1 msaitoh
2062 1.1 msaitoh /* Every 8 descriptors we go to refresh mbufs */
2063 1.1 msaitoh if (processed == 8) {
2064 1.1 msaitoh ixgbe_refresh_mbufs(rxr, i);
2065 1.1 msaitoh processed = 0;
2066 1.1 msaitoh }
2067 1.1 msaitoh }
2068 1.1 msaitoh
2069 1.1 msaitoh /* Refresh any remaining buf structs */
2070 1.1 msaitoh if (ixgbe_rx_unrefreshed(rxr))
2071 1.1 msaitoh ixgbe_refresh_mbufs(rxr, i);
2072 1.1 msaitoh
2073 1.1 msaitoh rxr->next_to_check = i;
2074 1.1 msaitoh
2075 1.1 msaitoh #ifdef LRO
2076 1.1 msaitoh /*
2077 1.1 msaitoh * Flush any outstanding LRO work
2078 1.1 msaitoh */
2079 1.10 msaitoh tcp_lro_flush_all(lro);
2080 1.1 msaitoh #endif /* LRO */
2081 1.1 msaitoh
2082 1.1 msaitoh IXGBE_RX_UNLOCK(rxr);
2083 1.1 msaitoh
2084 1.1 msaitoh /*
2085 1.1 msaitoh ** Still have cleaning to do?
2086 1.1 msaitoh */
2087 1.1 msaitoh if ((staterr & IXGBE_RXD_STAT_DD) != 0)
2088 1.1 msaitoh return true;
2089 1.1 msaitoh else
2090 1.1 msaitoh return false;
2091 1.1 msaitoh }
2092 1.1 msaitoh
2093 1.1 msaitoh
2094 1.1 msaitoh /*********************************************************************
2095 1.1 msaitoh *
2096 1.1 msaitoh * Verify that the hardware indicated that the checksum is valid.
2097 1.1 msaitoh * Inform the stack about the status of checksum so that stack
2098 1.1 msaitoh * doesn't spend time verifying the checksum.
2099 1.1 msaitoh *
2100 1.1 msaitoh *********************************************************************/
2101 1.1 msaitoh static void
2102 1.1 msaitoh ixgbe_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype,
2103 1.1 msaitoh struct ixgbe_hw_stats *stats)
2104 1.1 msaitoh {
2105 1.1 msaitoh u16 status = (u16) staterr;
2106 1.1 msaitoh u8 errors = (u8) (staterr >> 24);
2107 1.1 msaitoh #if 0
2108 1.8 msaitoh bool sctp = false;
2109 1.1 msaitoh
2110 1.1 msaitoh if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
2111 1.1 msaitoh (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
2112 1.8 msaitoh sctp = true;
2113 1.1 msaitoh #endif
2114 1.1 msaitoh
2115 1.8 msaitoh /* IPv4 checksum */
2116 1.1 msaitoh if (status & IXGBE_RXD_STAT_IPCS) {
2117 1.1 msaitoh stats->ipcs.ev_count++;
2118 1.1 msaitoh if (!(errors & IXGBE_RXD_ERR_IPE)) {
2119 1.1 msaitoh /* IP Checksum Good */
2120 1.1 msaitoh mp->m_pkthdr.csum_flags = M_CSUM_IPv4;
2121 1.1 msaitoh } else {
2122 1.1 msaitoh stats->ipcs_bad.ev_count++;
2123 1.1 msaitoh mp->m_pkthdr.csum_flags = M_CSUM_IPv4|M_CSUM_IPv4_BAD;
2124 1.1 msaitoh }
2125 1.1 msaitoh }
2126 1.8 msaitoh /* TCP/UDP/SCTP checksum */
2127 1.1 msaitoh if (status & IXGBE_RXD_STAT_L4CS) {
2128 1.1 msaitoh stats->l4cs.ev_count++;
2129 1.1 msaitoh int type = M_CSUM_TCPv4|M_CSUM_TCPv6|M_CSUM_UDPv4|M_CSUM_UDPv6;
2130 1.1 msaitoh if (!(errors & IXGBE_RXD_ERR_TCPE)) {
2131 1.1 msaitoh mp->m_pkthdr.csum_flags |= type;
2132 1.1 msaitoh } else {
2133 1.1 msaitoh stats->l4cs_bad.ev_count++;
2134 1.1 msaitoh mp->m_pkthdr.csum_flags |= type | M_CSUM_TCP_UDP_BAD;
2135 1.1 msaitoh }
2136 1.1 msaitoh }
2137 1.1 msaitoh }
2138 1.1 msaitoh
2139 1.1 msaitoh
2140 1.1 msaitoh /********************************************************************
2141 1.1 msaitoh * Manage DMA'able memory.
2142 1.1 msaitoh *******************************************************************/
2143 1.1 msaitoh
2144 1.1 msaitoh int
2145 1.1 msaitoh ixgbe_dma_malloc(struct adapter *adapter, const bus_size_t size,
2146 1.1 msaitoh struct ixgbe_dma_alloc *dma, const int mapflags)
2147 1.1 msaitoh {
2148 1.1 msaitoh device_t dev = adapter->dev;
2149 1.1 msaitoh int r, rsegs;
2150 1.1 msaitoh
2151 1.1 msaitoh r = ixgbe_dma_tag_create(adapter->osdep.dmat, /* parent */
2152 1.1 msaitoh DBA_ALIGN, 0, /* alignment, bounds */
2153 1.1 msaitoh size, /* maxsize */
2154 1.1 msaitoh 1, /* nsegments */
2155 1.1 msaitoh size, /* maxsegsize */
2156 1.1 msaitoh BUS_DMA_ALLOCNOW, /* flags */
2157 1.1 msaitoh &dma->dma_tag);
2158 1.1 msaitoh if (r != 0) {
2159 1.1 msaitoh aprint_error_dev(dev,
2160 1.1 msaitoh "%s: ixgbe_dma_tag_create failed; error %d\n", __func__, r);
2161 1.1 msaitoh goto fail_0;
2162 1.1 msaitoh }
2163 1.1 msaitoh
2164 1.1 msaitoh r = bus_dmamem_alloc(dma->dma_tag->dt_dmat,
2165 1.1 msaitoh size,
2166 1.1 msaitoh dma->dma_tag->dt_alignment,
2167 1.1 msaitoh dma->dma_tag->dt_boundary,
2168 1.1 msaitoh &dma->dma_seg, 1, &rsegs, BUS_DMA_NOWAIT);
2169 1.1 msaitoh if (r != 0) {
2170 1.1 msaitoh aprint_error_dev(dev,
2171 1.1 msaitoh "%s: bus_dmamem_alloc failed; error %d\n", __func__, r);
2172 1.1 msaitoh goto fail_1;
2173 1.1 msaitoh }
2174 1.1 msaitoh
2175 1.1 msaitoh r = bus_dmamem_map(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs,
2176 1.1 msaitoh size, &dma->dma_vaddr, BUS_DMA_NOWAIT);
2177 1.1 msaitoh if (r != 0) {
2178 1.1 msaitoh aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n",
2179 1.1 msaitoh __func__, r);
2180 1.1 msaitoh goto fail_2;
2181 1.1 msaitoh }
2182 1.1 msaitoh
2183 1.1 msaitoh r = ixgbe_dmamap_create(dma->dma_tag, 0, &dma->dma_map);
2184 1.1 msaitoh if (r != 0) {
2185 1.1 msaitoh aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n",
2186 1.1 msaitoh __func__, r);
2187 1.1 msaitoh goto fail_3;
2188 1.1 msaitoh }
2189 1.1 msaitoh
2190 1.1 msaitoh r = bus_dmamap_load(dma->dma_tag->dt_dmat, dma->dma_map, dma->dma_vaddr,
2191 1.1 msaitoh size,
2192 1.1 msaitoh NULL,
2193 1.1 msaitoh mapflags | BUS_DMA_NOWAIT);
2194 1.1 msaitoh if (r != 0) {
2195 1.1 msaitoh aprint_error_dev(dev, "%s: bus_dmamap_load failed; error %d\n",
2196 1.1 msaitoh __func__, r);
2197 1.1 msaitoh goto fail_4;
2198 1.1 msaitoh }
2199 1.1 msaitoh dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr;
2200 1.1 msaitoh dma->dma_size = size;
2201 1.1 msaitoh return 0;
2202 1.1 msaitoh fail_4:
2203 1.1 msaitoh ixgbe_dmamap_destroy(dma->dma_tag, dma->dma_map);
2204 1.1 msaitoh fail_3:
2205 1.1 msaitoh bus_dmamem_unmap(dma->dma_tag->dt_dmat, dma->dma_vaddr, size);
2206 1.1 msaitoh fail_2:
2207 1.1 msaitoh bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs);
2208 1.1 msaitoh fail_1:
2209 1.1 msaitoh ixgbe_dma_tag_destroy(dma->dma_tag);
2210 1.1 msaitoh fail_0:
2211 1.1 msaitoh return r;
2212 1.1 msaitoh }
2213 1.1 msaitoh
2214 1.3 msaitoh void
2215 1.1 msaitoh ixgbe_dma_free(struct adapter *adapter, struct ixgbe_dma_alloc *dma)
2216 1.1 msaitoh {
2217 1.1 msaitoh bus_dmamap_sync(dma->dma_tag->dt_dmat, dma->dma_map, 0, dma->dma_size,
2218 1.1 msaitoh BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2219 1.1 msaitoh ixgbe_dmamap_unload(dma->dma_tag, dma->dma_map);
2220 1.1 msaitoh bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, 1);
2221 1.1 msaitoh ixgbe_dma_tag_destroy(dma->dma_tag);
2222 1.1 msaitoh }
2223 1.1 msaitoh
2224 1.1 msaitoh
2225 1.1 msaitoh /*********************************************************************
2226 1.1 msaitoh *
2227 1.1 msaitoh * Allocate memory for the transmit and receive rings, and then
2228 1.1 msaitoh * the descriptors associated with each, called only once at attach.
2229 1.1 msaitoh *
2230 1.1 msaitoh **********************************************************************/
2231 1.1 msaitoh int
2232 1.1 msaitoh ixgbe_allocate_queues(struct adapter *adapter)
2233 1.1 msaitoh {
2234 1.1 msaitoh device_t dev = adapter->dev;
2235 1.1 msaitoh struct ix_queue *que;
2236 1.1 msaitoh struct tx_ring *txr;
2237 1.1 msaitoh struct rx_ring *rxr;
2238 1.1 msaitoh int rsize, tsize, error = IXGBE_SUCCESS;
2239 1.1 msaitoh int txconf = 0, rxconf = 0;
2240 1.5 msaitoh #ifdef PCI_IOV
2241 1.5 msaitoh enum ixgbe_iov_mode iov_mode;
2242 1.5 msaitoh #endif
2243 1.1 msaitoh
2244 1.1 msaitoh /* First allocate the top level queue structs */
2245 1.1 msaitoh if (!(adapter->queues =
2246 1.1 msaitoh (struct ix_queue *) malloc(sizeof(struct ix_queue) *
2247 1.1 msaitoh adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2248 1.1 msaitoh aprint_error_dev(dev, "Unable to allocate queue memory\n");
2249 1.1 msaitoh error = ENOMEM;
2250 1.1 msaitoh goto fail;
2251 1.1 msaitoh }
2252 1.1 msaitoh
2253 1.1 msaitoh /* First allocate the TX ring struct memory */
2254 1.1 msaitoh if (!(adapter->tx_rings =
2255 1.1 msaitoh (struct tx_ring *) malloc(sizeof(struct tx_ring) *
2256 1.1 msaitoh adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2257 1.1 msaitoh aprint_error_dev(dev, "Unable to allocate TX ring memory\n");
2258 1.1 msaitoh error = ENOMEM;
2259 1.1 msaitoh goto tx_fail;
2260 1.1 msaitoh }
2261 1.1 msaitoh
2262 1.1 msaitoh /* Next allocate the RX */
2263 1.1 msaitoh if (!(adapter->rx_rings =
2264 1.1 msaitoh (struct rx_ring *) malloc(sizeof(struct rx_ring) *
2265 1.1 msaitoh adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2266 1.1 msaitoh aprint_error_dev(dev, "Unable to allocate RX ring memory\n");
2267 1.1 msaitoh error = ENOMEM;
2268 1.1 msaitoh goto rx_fail;
2269 1.1 msaitoh }
2270 1.1 msaitoh
2271 1.1 msaitoh /* For the ring itself */
2272 1.1 msaitoh tsize = roundup2(adapter->num_tx_desc *
2273 1.1 msaitoh sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
2274 1.1 msaitoh
2275 1.5 msaitoh #ifdef PCI_IOV
2276 1.5 msaitoh iov_mode = ixgbe_get_iov_mode(adapter);
2277 1.5 msaitoh adapter->pool = ixgbe_max_vfs(iov_mode);
2278 1.5 msaitoh #else
2279 1.5 msaitoh adapter->pool = 0;
2280 1.5 msaitoh #endif
2281 1.1 msaitoh /*
2282 1.1 msaitoh * Now set up the TX queues, txconf is needed to handle the
2283 1.1 msaitoh * possibility that things fail midcourse and we need to
2284 1.1 msaitoh * undo memory gracefully
2285 1.1 msaitoh */
2286 1.1 msaitoh for (int i = 0; i < adapter->num_queues; i++, txconf++) {
2287 1.1 msaitoh /* Set up some basics */
2288 1.1 msaitoh txr = &adapter->tx_rings[i];
2289 1.1 msaitoh txr->adapter = adapter;
2290 1.5 msaitoh #ifdef PCI_IOV
2291 1.5 msaitoh txr->me = ixgbe_pf_que_index(iov_mode, i);
2292 1.5 msaitoh #else
2293 1.1 msaitoh txr->me = i;
2294 1.5 msaitoh #endif
2295 1.1 msaitoh txr->num_desc = adapter->num_tx_desc;
2296 1.1 msaitoh
2297 1.1 msaitoh /* Initialize the TX side lock */
2298 1.1 msaitoh snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2299 1.1 msaitoh device_xname(dev), txr->me);
2300 1.1 msaitoh mutex_init(&txr->tx_mtx, MUTEX_DEFAULT, IPL_NET);
2301 1.1 msaitoh
2302 1.1 msaitoh if (ixgbe_dma_malloc(adapter, tsize,
2303 1.1 msaitoh &txr->txdma, BUS_DMA_NOWAIT)) {
2304 1.1 msaitoh aprint_error_dev(dev,
2305 1.1 msaitoh "Unable to allocate TX Descriptor memory\n");
2306 1.1 msaitoh error = ENOMEM;
2307 1.1 msaitoh goto err_tx_desc;
2308 1.1 msaitoh }
2309 1.1 msaitoh txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
2310 1.1 msaitoh bzero((void *)txr->tx_base, tsize);
2311 1.1 msaitoh
2312 1.1 msaitoh /* Now allocate transmit buffers for the ring */
2313 1.1 msaitoh if (ixgbe_allocate_transmit_buffers(txr)) {
2314 1.1 msaitoh aprint_error_dev(dev,
2315 1.1 msaitoh "Critical Failure setting up transmit buffers\n");
2316 1.1 msaitoh error = ENOMEM;
2317 1.1 msaitoh goto err_tx_desc;
2318 1.1 msaitoh }
2319 1.1 msaitoh #ifndef IXGBE_LEGACY_TX
2320 1.1 msaitoh /* Allocate a buf ring */
2321 1.1 msaitoh txr->br = buf_ring_alloc(IXGBE_BR_SIZE, M_DEVBUF,
2322 1.1 msaitoh M_WAITOK, &txr->tx_mtx);
2323 1.1 msaitoh if (txr->br == NULL) {
2324 1.1 msaitoh aprint_error_dev(dev,
2325 1.1 msaitoh "Critical Failure setting up buf ring\n");
2326 1.1 msaitoh error = ENOMEM;
2327 1.1 msaitoh goto err_tx_desc;
2328 1.1 msaitoh }
2329 1.1 msaitoh #endif
2330 1.1 msaitoh }
2331 1.1 msaitoh
2332 1.1 msaitoh /*
2333 1.1 msaitoh * Next the RX queues...
2334 1.1 msaitoh */
2335 1.1 msaitoh rsize = roundup2(adapter->num_rx_desc *
2336 1.1 msaitoh sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
2337 1.1 msaitoh for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
2338 1.1 msaitoh rxr = &adapter->rx_rings[i];
2339 1.1 msaitoh /* Set up some basics */
2340 1.1 msaitoh rxr->adapter = adapter;
2341 1.5 msaitoh #ifdef PCI_IOV
2342 1.5 msaitoh rxr->me = ixgbe_pf_que_index(iov_mode, i);
2343 1.5 msaitoh #else
2344 1.1 msaitoh rxr->me = i;
2345 1.5 msaitoh #endif
2346 1.1 msaitoh rxr->num_desc = adapter->num_rx_desc;
2347 1.1 msaitoh
2348 1.1 msaitoh /* Initialize the RX side lock */
2349 1.1 msaitoh snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2350 1.1 msaitoh device_xname(dev), rxr->me);
2351 1.1 msaitoh mutex_init(&rxr->rx_mtx, MUTEX_DEFAULT, IPL_NET);
2352 1.1 msaitoh
2353 1.1 msaitoh if (ixgbe_dma_malloc(adapter, rsize,
2354 1.1 msaitoh &rxr->rxdma, BUS_DMA_NOWAIT)) {
2355 1.1 msaitoh aprint_error_dev(dev,
2356 1.1 msaitoh "Unable to allocate RxDescriptor memory\n");
2357 1.1 msaitoh error = ENOMEM;
2358 1.1 msaitoh goto err_rx_desc;
2359 1.1 msaitoh }
2360 1.1 msaitoh rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
2361 1.1 msaitoh bzero((void *)rxr->rx_base, rsize);
2362 1.1 msaitoh
2363 1.1 msaitoh /* Allocate receive buffers for the ring*/
2364 1.1 msaitoh if (ixgbe_allocate_receive_buffers(rxr)) {
2365 1.1 msaitoh aprint_error_dev(dev,
2366 1.1 msaitoh "Critical Failure setting up receive buffers\n");
2367 1.1 msaitoh error = ENOMEM;
2368 1.1 msaitoh goto err_rx_desc;
2369 1.1 msaitoh }
2370 1.1 msaitoh }
2371 1.1 msaitoh
2372 1.1 msaitoh /*
2373 1.1 msaitoh ** Finally set up the queue holding structs
2374 1.1 msaitoh */
2375 1.1 msaitoh for (int i = 0; i < adapter->num_queues; i++) {
2376 1.1 msaitoh que = &adapter->queues[i];
2377 1.1 msaitoh que->adapter = adapter;
2378 1.3 msaitoh que->me = i;
2379 1.1 msaitoh que->txr = &adapter->tx_rings[i];
2380 1.1 msaitoh que->rxr = &adapter->rx_rings[i];
2381 1.1 msaitoh }
2382 1.1 msaitoh
2383 1.1 msaitoh return (0);
2384 1.1 msaitoh
2385 1.1 msaitoh err_rx_desc:
2386 1.1 msaitoh for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
2387 1.1 msaitoh ixgbe_dma_free(adapter, &rxr->rxdma);
2388 1.1 msaitoh err_tx_desc:
2389 1.1 msaitoh for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
2390 1.1 msaitoh ixgbe_dma_free(adapter, &txr->txdma);
2391 1.1 msaitoh free(adapter->rx_rings, M_DEVBUF);
2392 1.1 msaitoh rx_fail:
2393 1.1 msaitoh free(adapter->tx_rings, M_DEVBUF);
2394 1.1 msaitoh tx_fail:
2395 1.1 msaitoh free(adapter->queues, M_DEVBUF);
2396 1.1 msaitoh fail:
2397 1.1 msaitoh return (error);
2398 1.1 msaitoh }
2399 1.1 msaitoh
2400