ix_txrx.c revision 1.4 1 1.1 msaitoh /******************************************************************************
2 1.1 msaitoh
3 1.4 msaitoh Copyright (c) 2001-2015, Intel Corporation
4 1.1 msaitoh All rights reserved.
5 1.1 msaitoh
6 1.1 msaitoh Redistribution and use in source and binary forms, with or without
7 1.1 msaitoh modification, are permitted provided that the following conditions are met:
8 1.1 msaitoh
9 1.1 msaitoh 1. Redistributions of source code must retain the above copyright notice,
10 1.1 msaitoh this list of conditions and the following disclaimer.
11 1.1 msaitoh
12 1.1 msaitoh 2. Redistributions in binary form must reproduce the above copyright
13 1.1 msaitoh notice, this list of conditions and the following disclaimer in the
14 1.1 msaitoh documentation and/or other materials provided with the distribution.
15 1.1 msaitoh
16 1.1 msaitoh 3. Neither the name of the Intel Corporation nor the names of its
17 1.1 msaitoh contributors may be used to endorse or promote products derived from
18 1.1 msaitoh this software without specific prior written permission.
19 1.1 msaitoh
20 1.1 msaitoh THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 1.1 msaitoh AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 1.1 msaitoh IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 1.1 msaitoh ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 1.1 msaitoh LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 1.1 msaitoh CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 1.1 msaitoh SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 1.1 msaitoh INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 1.1 msaitoh CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 1.1 msaitoh ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 1.1 msaitoh POSSIBILITY OF SUCH DAMAGE.
31 1.1 msaitoh
32 1.1 msaitoh ******************************************************************************/
33 1.1 msaitoh /*
34 1.1 msaitoh * Copyright (c) 2011 The NetBSD Foundation, Inc.
35 1.1 msaitoh * All rights reserved.
36 1.1 msaitoh *
37 1.1 msaitoh * This code is derived from software contributed to The NetBSD Foundation
38 1.1 msaitoh * by Coyote Point Systems, Inc.
39 1.1 msaitoh *
40 1.1 msaitoh * Redistribution and use in source and binary forms, with or without
41 1.1 msaitoh * modification, are permitted provided that the following conditions
42 1.1 msaitoh * are met:
43 1.1 msaitoh * 1. Redistributions of source code must retain the above copyright
44 1.1 msaitoh * notice, this list of conditions and the following disclaimer.
45 1.1 msaitoh * 2. Redistributions in binary form must reproduce the above copyright
46 1.1 msaitoh * notice, this list of conditions and the following disclaimer in the
47 1.1 msaitoh * documentation and/or other materials provided with the distribution.
48 1.1 msaitoh *
49 1.1 msaitoh * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
50 1.1 msaitoh * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
51 1.1 msaitoh * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
52 1.1 msaitoh * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
53 1.1 msaitoh * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
54 1.1 msaitoh * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
55 1.1 msaitoh * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
56 1.1 msaitoh * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
57 1.1 msaitoh * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
58 1.1 msaitoh * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
59 1.1 msaitoh * POSSIBILITY OF SUCH DAMAGE.
60 1.1 msaitoh */
61 1.4 msaitoh /*$FreeBSD: head/sys/dev/ixgbe/ix_txrx.c 282289 2015-04-30 22:53:27Z erj $*/
62 1.3 msaitoh /*$NetBSD: ix_txrx.c,v 1.4 2016/12/01 06:56:28 msaitoh Exp $*/
63 1.1 msaitoh
64 1.1 msaitoh #include "ixgbe.h"
65 1.1 msaitoh
66 1.4 msaitoh #ifdef DEV_NETMAP
67 1.4 msaitoh #include <net/netmap.h>
68 1.4 msaitoh #include <sys/selinfo.h>
69 1.4 msaitoh #include <dev/netmap/netmap_kern.h>
70 1.4 msaitoh
71 1.4 msaitoh extern int ix_crcstrip;
72 1.4 msaitoh #endif
73 1.4 msaitoh
74 1.1 msaitoh /*
75 1.3 msaitoh ** HW RSC control:
76 1.1 msaitoh ** this feature only works with
77 1.1 msaitoh ** IPv4, and only on 82599 and later.
78 1.1 msaitoh ** Also this will cause IP forwarding to
79 1.1 msaitoh ** fail and that can't be controlled by
80 1.1 msaitoh ** the stack as LRO can. For all these
81 1.1 msaitoh ** reasons I've deemed it best to leave
82 1.1 msaitoh ** this off and not bother with a tuneable
83 1.1 msaitoh ** interface, this would need to be compiled
84 1.1 msaitoh ** to enable.
85 1.1 msaitoh */
86 1.1 msaitoh static bool ixgbe_rsc_enable = FALSE;
87 1.1 msaitoh
88 1.3 msaitoh #ifdef IXGBE_FDIR
89 1.3 msaitoh /*
90 1.3 msaitoh ** For Flow Director: this is the
91 1.3 msaitoh ** number of TX packets we sample
92 1.3 msaitoh ** for the filter pool, this means
93 1.3 msaitoh ** every 20th packet will be probed.
94 1.3 msaitoh **
95 1.3 msaitoh ** This feature can be disabled by
96 1.3 msaitoh ** setting this to 0.
97 1.3 msaitoh */
98 1.3 msaitoh static int atr_sample_rate = 20;
99 1.3 msaitoh #endif
100 1.3 msaitoh
101 1.3 msaitoh /* Shared PCI config read/write */
102 1.3 msaitoh u16
103 1.3 msaitoh ixgbe_read_pci_cfg(struct ixgbe_hw *hw, u32 reg)
104 1.3 msaitoh {
105 1.3 msaitoh switch (reg % 4) {
106 1.3 msaitoh case 0:
107 1.3 msaitoh return pci_conf_read(hw->back->pc, hw->back->tag, reg) &
108 1.3 msaitoh __BITS(15, 0);
109 1.3 msaitoh case 2:
110 1.3 msaitoh return __SHIFTOUT(pci_conf_read(hw->back->pc, hw->back->tag,
111 1.3 msaitoh reg - 2), __BITS(31, 16));
112 1.3 msaitoh default:
113 1.3 msaitoh panic("%s: invalid register (%" PRIx32, __func__, reg);
114 1.3 msaitoh break;
115 1.3 msaitoh }
116 1.3 msaitoh }
117 1.3 msaitoh
118 1.3 msaitoh void
119 1.3 msaitoh ixgbe_write_pci_cfg(struct ixgbe_hw *hw, u32 reg, u16 value)
120 1.3 msaitoh {
121 1.3 msaitoh pcireg_t old;
122 1.3 msaitoh
123 1.3 msaitoh switch (reg % 4) {
124 1.3 msaitoh case 0:
125 1.3 msaitoh old = pci_conf_read(hw->back->pc, hw->back->tag, reg) &
126 1.3 msaitoh __BITS(31, 16);
127 1.3 msaitoh pci_conf_write(hw->back->pc, hw->back->tag, reg, value | old);
128 1.3 msaitoh break;
129 1.3 msaitoh case 2:
130 1.3 msaitoh old = pci_conf_read(hw->back->pc, hw->back->tag, reg - 2) &
131 1.3 msaitoh __BITS(15, 0);
132 1.3 msaitoh pci_conf_write(hw->back->pc, hw->back->tag, reg - 2,
133 1.3 msaitoh __SHIFTIN(value, __BITS(31, 16)) | old);
134 1.3 msaitoh break;
135 1.3 msaitoh default:
136 1.3 msaitoh panic("%s: invalid register (%" PRIx32, __func__, reg);
137 1.3 msaitoh break;
138 1.3 msaitoh }
139 1.3 msaitoh
140 1.3 msaitoh return;
141 1.3 msaitoh }
142 1.3 msaitoh
143 1.3 msaitoh /*********************************************************************
144 1.3 msaitoh * Local Function prototypes
145 1.3 msaitoh *********************************************************************/
146 1.1 msaitoh static void ixgbe_setup_transmit_ring(struct tx_ring *);
147 1.1 msaitoh static void ixgbe_free_transmit_buffers(struct tx_ring *);
148 1.1 msaitoh static int ixgbe_setup_receive_ring(struct rx_ring *);
149 1.1 msaitoh static void ixgbe_free_receive_buffers(struct rx_ring *);
150 1.1 msaitoh
151 1.1 msaitoh static void ixgbe_rx_checksum(u32, struct mbuf *, u32,
152 1.1 msaitoh struct ixgbe_hw_stats *);
153 1.1 msaitoh static void ixgbe_refresh_mbufs(struct rx_ring *, int);
154 1.1 msaitoh static int ixgbe_xmit(struct tx_ring *, struct mbuf *);
155 1.1 msaitoh static int ixgbe_tx_ctx_setup(struct tx_ring *,
156 1.1 msaitoh struct mbuf *, u32 *, u32 *);
157 1.1 msaitoh static int ixgbe_tso_setup(struct tx_ring *,
158 1.1 msaitoh struct mbuf *, u32 *, u32 *);
159 1.1 msaitoh #ifdef IXGBE_FDIR
160 1.1 msaitoh static void ixgbe_atr(struct tx_ring *, struct mbuf *);
161 1.1 msaitoh #endif
162 1.1 msaitoh static __inline void ixgbe_rx_discard(struct rx_ring *, int);
163 1.1 msaitoh static __inline void ixgbe_rx_input(struct rx_ring *, struct ifnet *,
164 1.1 msaitoh struct mbuf *, u32);
165 1.1 msaitoh
166 1.1 msaitoh static void ixgbe_setup_hw_rsc(struct rx_ring *);
167 1.1 msaitoh
168 1.1 msaitoh #ifdef IXGBE_LEGACY_TX
169 1.1 msaitoh /*********************************************************************
170 1.1 msaitoh * Transmit entry point
171 1.1 msaitoh *
172 1.1 msaitoh * ixgbe_start is called by the stack to initiate a transmit.
173 1.1 msaitoh * The driver will remain in this routine as long as there are
174 1.1 msaitoh * packets to transmit and transmit resources are available.
175 1.1 msaitoh * In case resources are not available stack is notified and
176 1.1 msaitoh * the packet is requeued.
177 1.1 msaitoh **********************************************************************/
178 1.1 msaitoh
179 1.1 msaitoh void
180 1.1 msaitoh ixgbe_start_locked(struct tx_ring *txr, struct ifnet * ifp)
181 1.1 msaitoh {
182 1.1 msaitoh int rc;
183 1.1 msaitoh struct mbuf *m_head;
184 1.1 msaitoh struct adapter *adapter = txr->adapter;
185 1.1 msaitoh
186 1.1 msaitoh IXGBE_TX_LOCK_ASSERT(txr);
187 1.1 msaitoh
188 1.1 msaitoh if ((ifp->if_flags & IFF_RUNNING) == 0)
189 1.1 msaitoh return;
190 1.1 msaitoh if (!adapter->link_active)
191 1.1 msaitoh return;
192 1.1 msaitoh
193 1.1 msaitoh while (!IFQ_IS_EMPTY(&ifp->if_snd)) {
194 1.1 msaitoh if (txr->tx_avail <= IXGBE_QUEUE_MIN_FREE)
195 1.1 msaitoh break;
196 1.1 msaitoh
197 1.1 msaitoh IFQ_POLL(&ifp->if_snd, m_head);
198 1.1 msaitoh if (m_head == NULL)
199 1.1 msaitoh break;
200 1.1 msaitoh
201 1.1 msaitoh if ((rc = ixgbe_xmit(txr, m_head)) == EAGAIN) {
202 1.1 msaitoh break;
203 1.1 msaitoh }
204 1.1 msaitoh IFQ_DEQUEUE(&ifp->if_snd, m_head);
205 1.1 msaitoh if (rc == EFBIG) {
206 1.1 msaitoh struct mbuf *mtmp;
207 1.1 msaitoh
208 1.1 msaitoh if ((mtmp = m_defrag(m_head, M_NOWAIT)) != NULL) {
209 1.1 msaitoh m_head = mtmp;
210 1.1 msaitoh rc = ixgbe_xmit(txr, m_head);
211 1.1 msaitoh if (rc != 0)
212 1.1 msaitoh adapter->efbig2_tx_dma_setup.ev_count++;
213 1.1 msaitoh } else
214 1.1 msaitoh adapter->m_defrag_failed.ev_count++;
215 1.1 msaitoh }
216 1.1 msaitoh if (rc != 0) {
217 1.1 msaitoh m_freem(m_head);
218 1.1 msaitoh continue;
219 1.1 msaitoh }
220 1.1 msaitoh
221 1.1 msaitoh /* Send a copy of the frame to the BPF listener */
222 1.1 msaitoh bpf_mtap(ifp, m_head);
223 1.1 msaitoh }
224 1.1 msaitoh return;
225 1.1 msaitoh }
226 1.1 msaitoh
227 1.1 msaitoh /*
228 1.1 msaitoh * Legacy TX start - called by the stack, this
229 1.1 msaitoh * always uses the first tx ring, and should
230 1.1 msaitoh * not be used with multiqueue tx enabled.
231 1.1 msaitoh */
232 1.1 msaitoh void
233 1.1 msaitoh ixgbe_start(struct ifnet *ifp)
234 1.1 msaitoh {
235 1.1 msaitoh struct adapter *adapter = ifp->if_softc;
236 1.1 msaitoh struct tx_ring *txr = adapter->tx_rings;
237 1.1 msaitoh
238 1.1 msaitoh if (ifp->if_flags & IFF_RUNNING) {
239 1.1 msaitoh IXGBE_TX_LOCK(txr);
240 1.1 msaitoh ixgbe_start_locked(txr, ifp);
241 1.1 msaitoh IXGBE_TX_UNLOCK(txr);
242 1.1 msaitoh }
243 1.1 msaitoh return;
244 1.1 msaitoh }
245 1.1 msaitoh
246 1.1 msaitoh #else /* ! IXGBE_LEGACY_TX */
247 1.1 msaitoh
248 1.1 msaitoh /*
249 1.1 msaitoh ** Multiqueue Transmit driver
250 1.1 msaitoh **
251 1.1 msaitoh */
252 1.1 msaitoh int
253 1.1 msaitoh ixgbe_mq_start(struct ifnet *ifp, struct mbuf *m)
254 1.1 msaitoh {
255 1.1 msaitoh struct adapter *adapter = ifp->if_softc;
256 1.1 msaitoh struct ix_queue *que;
257 1.1 msaitoh struct tx_ring *txr;
258 1.1 msaitoh int i, err = 0;
259 1.1 msaitoh #ifdef RSS
260 1.1 msaitoh uint32_t bucket_id;
261 1.1 msaitoh #endif
262 1.1 msaitoh
263 1.1 msaitoh /*
264 1.1 msaitoh * When doing RSS, map it to the same outbound queue
265 1.1 msaitoh * as the incoming flow would be mapped to.
266 1.1 msaitoh *
267 1.1 msaitoh * If everything is setup correctly, it should be the
268 1.1 msaitoh * same bucket that the current CPU we're on is.
269 1.1 msaitoh */
270 1.4 msaitoh #if __FreeBSD_version < 1100054
271 1.4 msaitoh if (m->m_flags & M_FLOWID) {
272 1.4 msaitoh #else
273 1.1 msaitoh if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
274 1.4 msaitoh #endif
275 1.1 msaitoh #ifdef RSS
276 1.1 msaitoh if (rss_hash2bucket(m->m_pkthdr.flowid,
277 1.3 msaitoh M_HASHTYPE_GET(m), &bucket_id) == 0)
278 1.3 msaitoh /* TODO: spit out something if bucket_id > num_queues? */
279 1.1 msaitoh i = bucket_id % adapter->num_queues;
280 1.3 msaitoh else
281 1.1 msaitoh #endif
282 1.1 msaitoh i = m->m_pkthdr.flowid % adapter->num_queues;
283 1.3 msaitoh } else
284 1.1 msaitoh i = curcpu % adapter->num_queues;
285 1.3 msaitoh
286 1.3 msaitoh /* Check for a hung queue and pick alternative */
287 1.3 msaitoh if (((1 << i) & adapter->active_queues) == 0)
288 1.3 msaitoh i = ffsl(adapter->active_queues);
289 1.1 msaitoh
290 1.1 msaitoh txr = &adapter->tx_rings[i];
291 1.1 msaitoh que = &adapter->queues[i];
292 1.1 msaitoh
293 1.1 msaitoh err = drbr_enqueue(ifp, txr->br, m);
294 1.1 msaitoh if (err)
295 1.1 msaitoh return (err);
296 1.1 msaitoh if (IXGBE_TX_TRYLOCK(txr)) {
297 1.1 msaitoh ixgbe_mq_start_locked(ifp, txr);
298 1.1 msaitoh IXGBE_TX_UNLOCK(txr);
299 1.1 msaitoh } else
300 1.1 msaitoh softint_schedule(txr->txq_si);
301 1.1 msaitoh
302 1.1 msaitoh return (0);
303 1.1 msaitoh }
304 1.1 msaitoh
305 1.1 msaitoh int
306 1.1 msaitoh ixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr)
307 1.1 msaitoh {
308 1.1 msaitoh struct adapter *adapter = txr->adapter;
309 1.1 msaitoh struct mbuf *next;
310 1.1 msaitoh int enqueued = 0, err = 0;
311 1.1 msaitoh
312 1.1 msaitoh if (((ifp->if_flags & IFF_RUNNING) == 0) ||
313 1.1 msaitoh adapter->link_active == 0)
314 1.1 msaitoh return (ENETDOWN);
315 1.1 msaitoh
316 1.1 msaitoh /* Process the queue */
317 1.1 msaitoh #if __FreeBSD_version < 901504
318 1.1 msaitoh next = drbr_dequeue(ifp, txr->br);
319 1.1 msaitoh while (next != NULL) {
320 1.1 msaitoh if ((err = ixgbe_xmit(txr, &next)) != 0) {
321 1.1 msaitoh if (next != NULL)
322 1.1 msaitoh err = drbr_enqueue(ifp, txr->br, next);
323 1.1 msaitoh #else
324 1.1 msaitoh while ((next = drbr_peek(ifp, txr->br)) != NULL) {
325 1.1 msaitoh if ((err = ixgbe_xmit(txr, &next)) != 0) {
326 1.1 msaitoh if (next == NULL) {
327 1.1 msaitoh drbr_advance(ifp, txr->br);
328 1.1 msaitoh } else {
329 1.1 msaitoh drbr_putback(ifp, txr->br, next);
330 1.1 msaitoh }
331 1.1 msaitoh #endif
332 1.1 msaitoh break;
333 1.1 msaitoh }
334 1.1 msaitoh #if __FreeBSD_version >= 901504
335 1.1 msaitoh drbr_advance(ifp, txr->br);
336 1.1 msaitoh #endif
337 1.1 msaitoh enqueued++;
338 1.3 msaitoh #if 0 // this is VF-only
339 1.3 msaitoh #if __FreeBSD_version >= 1100036
340 1.4 msaitoh /*
341 1.4 msaitoh * Since we're looking at the tx ring, we can check
342 1.4 msaitoh * to see if we're a VF by examing our tail register
343 1.4 msaitoh * address.
344 1.4 msaitoh */
345 1.4 msaitoh if (txr->tail < IXGBE_TDT(0) && next->m_flags & M_MCAST)
346 1.3 msaitoh if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
347 1.3 msaitoh #endif
348 1.3 msaitoh #endif
349 1.1 msaitoh /* Send a copy of the frame to the BPF listener */
350 1.1 msaitoh bpf_mtap(ifp, next);
351 1.1 msaitoh if ((ifp->if_flags & IFF_RUNNING) == 0)
352 1.1 msaitoh break;
353 1.1 msaitoh #if __FreeBSD_version < 901504
354 1.1 msaitoh next = drbr_dequeue(ifp, txr->br);
355 1.1 msaitoh #endif
356 1.1 msaitoh }
357 1.1 msaitoh
358 1.1 msaitoh if (txr->tx_avail < IXGBE_TX_CLEANUP_THRESHOLD)
359 1.1 msaitoh ixgbe_txeof(txr);
360 1.1 msaitoh
361 1.1 msaitoh return (err);
362 1.1 msaitoh }
363 1.1 msaitoh
364 1.1 msaitoh /*
365 1.1 msaitoh * Called from a taskqueue to drain queued transmit packets.
366 1.1 msaitoh */
367 1.1 msaitoh void
368 1.1 msaitoh ixgbe_deferred_mq_start(void *arg, int pending)
369 1.1 msaitoh {
370 1.1 msaitoh struct tx_ring *txr = arg;
371 1.1 msaitoh struct adapter *adapter = txr->adapter;
372 1.1 msaitoh struct ifnet *ifp = adapter->ifp;
373 1.1 msaitoh
374 1.1 msaitoh IXGBE_TX_LOCK(txr);
375 1.1 msaitoh if (!drbr_empty(ifp, txr->br))
376 1.1 msaitoh ixgbe_mq_start_locked(ifp, txr);
377 1.1 msaitoh IXGBE_TX_UNLOCK(txr);
378 1.1 msaitoh }
379 1.1 msaitoh
380 1.1 msaitoh /*
381 1.4 msaitoh * Flush all ring buffers
382 1.4 msaitoh */
383 1.1 msaitoh void
384 1.1 msaitoh ixgbe_qflush(struct ifnet *ifp)
385 1.1 msaitoh {
386 1.1 msaitoh struct adapter *adapter = ifp->if_softc;
387 1.1 msaitoh struct tx_ring *txr = adapter->tx_rings;
388 1.1 msaitoh struct mbuf *m;
389 1.1 msaitoh
390 1.1 msaitoh for (int i = 0; i < adapter->num_queues; i++, txr++) {
391 1.1 msaitoh IXGBE_TX_LOCK(txr);
392 1.1 msaitoh while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
393 1.1 msaitoh m_freem(m);
394 1.1 msaitoh IXGBE_TX_UNLOCK(txr);
395 1.1 msaitoh }
396 1.1 msaitoh if_qflush(ifp);
397 1.1 msaitoh }
398 1.1 msaitoh #endif /* IXGBE_LEGACY_TX */
399 1.1 msaitoh
400 1.3 msaitoh
401 1.1 msaitoh /*********************************************************************
402 1.1 msaitoh *
403 1.1 msaitoh * This routine maps the mbufs to tx descriptors, allowing the
404 1.1 msaitoh * TX engine to transmit the packets.
405 1.1 msaitoh * - return 0 on success, positive on failure
406 1.1 msaitoh *
407 1.1 msaitoh **********************************************************************/
408 1.1 msaitoh
409 1.1 msaitoh static int
410 1.1 msaitoh ixgbe_xmit(struct tx_ring *txr, struct mbuf *m_head)
411 1.1 msaitoh {
412 1.1 msaitoh struct m_tag *mtag;
413 1.1 msaitoh struct adapter *adapter = txr->adapter;
414 1.1 msaitoh struct ethercom *ec = &adapter->osdep.ec;
415 1.1 msaitoh u32 olinfo_status = 0, cmd_type_len;
416 1.1 msaitoh int i, j, error;
417 1.1 msaitoh int first;
418 1.1 msaitoh bus_dmamap_t map;
419 1.1 msaitoh struct ixgbe_tx_buf *txbuf;
420 1.1 msaitoh union ixgbe_adv_tx_desc *txd = NULL;
421 1.1 msaitoh
422 1.1 msaitoh /* Basic descriptor defines */
423 1.1 msaitoh cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
424 1.1 msaitoh IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
425 1.1 msaitoh
426 1.1 msaitoh if ((mtag = VLAN_OUTPUT_TAG(ec, m_head)) != NULL)
427 1.1 msaitoh cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
428 1.1 msaitoh
429 1.1 msaitoh /*
430 1.1 msaitoh * Important to capture the first descriptor
431 1.1 msaitoh * used because it will contain the index of
432 1.1 msaitoh * the one we tell the hardware to report back
433 1.1 msaitoh */
434 1.1 msaitoh first = txr->next_avail_desc;
435 1.1 msaitoh txbuf = &txr->tx_buffers[first];
436 1.1 msaitoh map = txbuf->map;
437 1.1 msaitoh
438 1.1 msaitoh /*
439 1.1 msaitoh * Map the packet for DMA.
440 1.1 msaitoh */
441 1.1 msaitoh error = bus_dmamap_load_mbuf(txr->txtag->dt_dmat, map,
442 1.1 msaitoh m_head, BUS_DMA_NOWAIT);
443 1.1 msaitoh
444 1.1 msaitoh if (__predict_false(error)) {
445 1.1 msaitoh
446 1.1 msaitoh switch (error) {
447 1.1 msaitoh case EAGAIN:
448 1.1 msaitoh adapter->eagain_tx_dma_setup.ev_count++;
449 1.1 msaitoh return EAGAIN;
450 1.1 msaitoh case ENOMEM:
451 1.1 msaitoh adapter->enomem_tx_dma_setup.ev_count++;
452 1.1 msaitoh return EAGAIN;
453 1.1 msaitoh case EFBIG:
454 1.1 msaitoh /*
455 1.1 msaitoh * XXX Try it again?
456 1.1 msaitoh * do m_defrag() and retry bus_dmamap_load_mbuf().
457 1.1 msaitoh */
458 1.1 msaitoh adapter->efbig_tx_dma_setup.ev_count++;
459 1.1 msaitoh return error;
460 1.1 msaitoh case EINVAL:
461 1.1 msaitoh adapter->einval_tx_dma_setup.ev_count++;
462 1.1 msaitoh return error;
463 1.1 msaitoh default:
464 1.1 msaitoh adapter->other_tx_dma_setup.ev_count++;
465 1.1 msaitoh return error;
466 1.1 msaitoh }
467 1.1 msaitoh }
468 1.1 msaitoh
469 1.1 msaitoh /* Make certain there are enough descriptors */
470 1.1 msaitoh if (map->dm_nsegs > txr->tx_avail - 2) {
471 1.1 msaitoh txr->no_desc_avail.ev_count++;
472 1.1 msaitoh ixgbe_dmamap_unload(txr->txtag, txbuf->map);
473 1.1 msaitoh return EAGAIN;
474 1.1 msaitoh }
475 1.1 msaitoh
476 1.1 msaitoh /*
477 1.4 msaitoh * Set up the appropriate offload context
478 1.4 msaitoh * this will consume the first descriptor
479 1.4 msaitoh */
480 1.1 msaitoh error = ixgbe_tx_ctx_setup(txr, m_head, &cmd_type_len, &olinfo_status);
481 1.1 msaitoh if (__predict_false(error)) {
482 1.1 msaitoh return (error);
483 1.1 msaitoh }
484 1.1 msaitoh
485 1.1 msaitoh #ifdef IXGBE_FDIR
486 1.1 msaitoh /* Do the flow director magic */
487 1.1 msaitoh if ((txr->atr_sample) && (!adapter->fdir_reinit)) {
488 1.1 msaitoh ++txr->atr_count;
489 1.1 msaitoh if (txr->atr_count >= atr_sample_rate) {
490 1.1 msaitoh ixgbe_atr(txr, m_head);
491 1.1 msaitoh txr->atr_count = 0;
492 1.1 msaitoh }
493 1.1 msaitoh }
494 1.1 msaitoh #endif
495 1.1 msaitoh
496 1.1 msaitoh i = txr->next_avail_desc;
497 1.1 msaitoh for (j = 0; j < map->dm_nsegs; j++) {
498 1.1 msaitoh bus_size_t seglen;
499 1.1 msaitoh bus_addr_t segaddr;
500 1.1 msaitoh
501 1.1 msaitoh txbuf = &txr->tx_buffers[i];
502 1.1 msaitoh txd = &txr->tx_base[i];
503 1.1 msaitoh seglen = map->dm_segs[j].ds_len;
504 1.1 msaitoh segaddr = htole64(map->dm_segs[j].ds_addr);
505 1.1 msaitoh
506 1.1 msaitoh txd->read.buffer_addr = segaddr;
507 1.1 msaitoh txd->read.cmd_type_len = htole32(txr->txd_cmd |
508 1.1 msaitoh cmd_type_len |seglen);
509 1.1 msaitoh txd->read.olinfo_status = htole32(olinfo_status);
510 1.1 msaitoh
511 1.1 msaitoh if (++i == txr->num_desc)
512 1.1 msaitoh i = 0;
513 1.1 msaitoh }
514 1.1 msaitoh
515 1.1 msaitoh txd->read.cmd_type_len |=
516 1.1 msaitoh htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
517 1.1 msaitoh txr->tx_avail -= map->dm_nsegs;
518 1.1 msaitoh txr->next_avail_desc = i;
519 1.1 msaitoh
520 1.1 msaitoh txbuf->m_head = m_head;
521 1.1 msaitoh /*
522 1.4 msaitoh * Here we swap the map so the last descriptor,
523 1.4 msaitoh * which gets the completion interrupt has the
524 1.4 msaitoh * real map, and the first descriptor gets the
525 1.4 msaitoh * unused map from this descriptor.
526 1.4 msaitoh */
527 1.1 msaitoh txr->tx_buffers[first].map = txbuf->map;
528 1.1 msaitoh txbuf->map = map;
529 1.1 msaitoh bus_dmamap_sync(txr->txtag->dt_dmat, map, 0, m_head->m_pkthdr.len,
530 1.1 msaitoh BUS_DMASYNC_PREWRITE);
531 1.1 msaitoh
532 1.1 msaitoh /* Set the EOP descriptor that will be marked done */
533 1.1 msaitoh txbuf = &txr->tx_buffers[first];
534 1.1 msaitoh txbuf->eop = txd;
535 1.1 msaitoh
536 1.1 msaitoh ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
537 1.1 msaitoh BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
538 1.1 msaitoh /*
539 1.1 msaitoh * Advance the Transmit Descriptor Tail (Tdt), this tells the
540 1.1 msaitoh * hardware that this frame is available to transmit.
541 1.1 msaitoh */
542 1.1 msaitoh ++txr->total_packets.ev_count;
543 1.3 msaitoh IXGBE_WRITE_REG(&adapter->hw, txr->tail, i);
544 1.3 msaitoh
545 1.3 msaitoh /* Mark queue as having work */
546 1.3 msaitoh if (txr->busy == 0)
547 1.3 msaitoh txr->busy = 1;
548 1.1 msaitoh
549 1.1 msaitoh return 0;
550 1.1 msaitoh }
551 1.1 msaitoh
552 1.1 msaitoh /*********************************************************************
553 1.1 msaitoh *
554 1.1 msaitoh * Allocate memory for tx_buffer structures. The tx_buffer stores all
555 1.1 msaitoh * the information needed to transmit a packet on the wire. This is
556 1.1 msaitoh * called only once at attach, setup is done every reset.
557 1.1 msaitoh *
558 1.1 msaitoh **********************************************************************/
559 1.1 msaitoh int
560 1.1 msaitoh ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
561 1.1 msaitoh {
562 1.1 msaitoh struct adapter *adapter = txr->adapter;
563 1.1 msaitoh device_t dev = adapter->dev;
564 1.1 msaitoh struct ixgbe_tx_buf *txbuf;
565 1.1 msaitoh int error, i;
566 1.1 msaitoh
567 1.1 msaitoh /*
568 1.1 msaitoh * Setup DMA descriptor areas.
569 1.1 msaitoh */
570 1.1 msaitoh if ((error = ixgbe_dma_tag_create(adapter->osdep.dmat, /* parent */
571 1.1 msaitoh 1, 0, /* alignment, bounds */
572 1.1 msaitoh IXGBE_TSO_SIZE, /* maxsize */
573 1.1 msaitoh adapter->num_segs, /* nsegments */
574 1.1 msaitoh PAGE_SIZE, /* maxsegsize */
575 1.1 msaitoh 0, /* flags */
576 1.1 msaitoh &txr->txtag))) {
577 1.1 msaitoh aprint_error_dev(dev,"Unable to allocate TX DMA tag\n");
578 1.1 msaitoh goto fail;
579 1.1 msaitoh }
580 1.1 msaitoh
581 1.1 msaitoh if (!(txr->tx_buffers =
582 1.1 msaitoh (struct ixgbe_tx_buf *) malloc(sizeof(struct ixgbe_tx_buf) *
583 1.1 msaitoh adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
584 1.1 msaitoh aprint_error_dev(dev, "Unable to allocate tx_buffer memory\n");
585 1.1 msaitoh error = ENOMEM;
586 1.1 msaitoh goto fail;
587 1.1 msaitoh }
588 1.1 msaitoh
589 1.1 msaitoh /* Create the descriptor buffer dma maps */
590 1.1 msaitoh txbuf = txr->tx_buffers;
591 1.1 msaitoh for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
592 1.1 msaitoh error = ixgbe_dmamap_create(txr->txtag, 0, &txbuf->map);
593 1.1 msaitoh if (error != 0) {
594 1.1 msaitoh aprint_error_dev(dev,
595 1.1 msaitoh "Unable to create TX DMA map (%d)\n", error);
596 1.1 msaitoh goto fail;
597 1.1 msaitoh }
598 1.1 msaitoh }
599 1.1 msaitoh
600 1.1 msaitoh return 0;
601 1.1 msaitoh fail:
602 1.1 msaitoh /* We free all, it handles case where we are in the middle */
603 1.1 msaitoh ixgbe_free_transmit_structures(adapter);
604 1.1 msaitoh return (error);
605 1.1 msaitoh }
606 1.1 msaitoh
607 1.1 msaitoh /*********************************************************************
608 1.1 msaitoh *
609 1.1 msaitoh * Initialize a transmit ring.
610 1.1 msaitoh *
611 1.1 msaitoh **********************************************************************/
612 1.1 msaitoh static void
613 1.1 msaitoh ixgbe_setup_transmit_ring(struct tx_ring *txr)
614 1.1 msaitoh {
615 1.1 msaitoh struct adapter *adapter = txr->adapter;
616 1.1 msaitoh struct ixgbe_tx_buf *txbuf;
617 1.1 msaitoh int i;
618 1.1 msaitoh #ifdef DEV_NETMAP
619 1.1 msaitoh struct netmap_adapter *na = NA(adapter->ifp);
620 1.1 msaitoh struct netmap_slot *slot;
621 1.1 msaitoh #endif /* DEV_NETMAP */
622 1.1 msaitoh
623 1.1 msaitoh /* Clear the old ring contents */
624 1.1 msaitoh IXGBE_TX_LOCK(txr);
625 1.1 msaitoh #ifdef DEV_NETMAP
626 1.1 msaitoh /*
627 1.1 msaitoh * (under lock): if in netmap mode, do some consistency
628 1.1 msaitoh * checks and set slot to entry 0 of the netmap ring.
629 1.1 msaitoh */
630 1.1 msaitoh slot = netmap_reset(na, NR_TX, txr->me, 0);
631 1.1 msaitoh #endif /* DEV_NETMAP */
632 1.1 msaitoh bzero((void *)txr->tx_base,
633 1.1 msaitoh (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
634 1.1 msaitoh /* Reset indices */
635 1.1 msaitoh txr->next_avail_desc = 0;
636 1.1 msaitoh txr->next_to_clean = 0;
637 1.1 msaitoh
638 1.1 msaitoh /* Free any existing tx buffers. */
639 1.1 msaitoh txbuf = txr->tx_buffers;
640 1.1 msaitoh for (i = 0; i < txr->num_desc; i++, txbuf++) {
641 1.1 msaitoh if (txbuf->m_head != NULL) {
642 1.1 msaitoh bus_dmamap_sync(txr->txtag->dt_dmat, txbuf->map,
643 1.1 msaitoh 0, txbuf->m_head->m_pkthdr.len,
644 1.1 msaitoh BUS_DMASYNC_POSTWRITE);
645 1.1 msaitoh ixgbe_dmamap_unload(txr->txtag, txbuf->map);
646 1.1 msaitoh m_freem(txbuf->m_head);
647 1.1 msaitoh txbuf->m_head = NULL;
648 1.1 msaitoh }
649 1.1 msaitoh #ifdef DEV_NETMAP
650 1.1 msaitoh /*
651 1.1 msaitoh * In netmap mode, set the map for the packet buffer.
652 1.1 msaitoh * NOTE: Some drivers (not this one) also need to set
653 1.1 msaitoh * the physical buffer address in the NIC ring.
654 1.1 msaitoh * Slots in the netmap ring (indexed by "si") are
655 1.1 msaitoh * kring->nkr_hwofs positions "ahead" wrt the
656 1.1 msaitoh * corresponding slot in the NIC ring. In some drivers
657 1.1 msaitoh * (not here) nkr_hwofs can be negative. Function
658 1.1 msaitoh * netmap_idx_n2k() handles wraparounds properly.
659 1.1 msaitoh */
660 1.1 msaitoh if (slot) {
661 1.1 msaitoh int si = netmap_idx_n2k(&na->tx_rings[txr->me], i);
662 1.1 msaitoh netmap_load_map(na, txr->txtag, txbuf->map, NMB(na, slot + si));
663 1.1 msaitoh }
664 1.1 msaitoh #endif /* DEV_NETMAP */
665 1.1 msaitoh /* Clear the EOP descriptor pointer */
666 1.1 msaitoh txbuf->eop = NULL;
667 1.1 msaitoh }
668 1.1 msaitoh
669 1.1 msaitoh #ifdef IXGBE_FDIR
670 1.1 msaitoh /* Set the rate at which we sample packets */
671 1.1 msaitoh if (adapter->hw.mac.type != ixgbe_mac_82598EB)
672 1.1 msaitoh txr->atr_sample = atr_sample_rate;
673 1.1 msaitoh #endif
674 1.1 msaitoh
675 1.1 msaitoh /* Set number of descriptors available */
676 1.1 msaitoh txr->tx_avail = adapter->num_tx_desc;
677 1.1 msaitoh
678 1.1 msaitoh ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
679 1.1 msaitoh BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
680 1.1 msaitoh IXGBE_TX_UNLOCK(txr);
681 1.1 msaitoh }
682 1.1 msaitoh
683 1.1 msaitoh /*********************************************************************
684 1.1 msaitoh *
685 1.1 msaitoh * Initialize all transmit rings.
686 1.1 msaitoh *
687 1.1 msaitoh **********************************************************************/
688 1.1 msaitoh int
689 1.1 msaitoh ixgbe_setup_transmit_structures(struct adapter *adapter)
690 1.1 msaitoh {
691 1.1 msaitoh struct tx_ring *txr = adapter->tx_rings;
692 1.1 msaitoh
693 1.1 msaitoh for (int i = 0; i < adapter->num_queues; i++, txr++)
694 1.1 msaitoh ixgbe_setup_transmit_ring(txr);
695 1.1 msaitoh
696 1.1 msaitoh return (0);
697 1.1 msaitoh }
698 1.1 msaitoh
699 1.1 msaitoh /*********************************************************************
700 1.1 msaitoh *
701 1.1 msaitoh * Free all transmit rings.
702 1.1 msaitoh *
703 1.1 msaitoh **********************************************************************/
704 1.1 msaitoh void
705 1.1 msaitoh ixgbe_free_transmit_structures(struct adapter *adapter)
706 1.1 msaitoh {
707 1.1 msaitoh struct tx_ring *txr = adapter->tx_rings;
708 1.1 msaitoh
709 1.1 msaitoh for (int i = 0; i < adapter->num_queues; i++, txr++) {
710 1.1 msaitoh ixgbe_free_transmit_buffers(txr);
711 1.1 msaitoh ixgbe_dma_free(adapter, &txr->txdma);
712 1.1 msaitoh IXGBE_TX_LOCK_DESTROY(txr);
713 1.1 msaitoh }
714 1.1 msaitoh free(adapter->tx_rings, M_DEVBUF);
715 1.1 msaitoh }
716 1.1 msaitoh
717 1.1 msaitoh /*********************************************************************
718 1.1 msaitoh *
719 1.1 msaitoh * Free transmit ring related data structures.
720 1.1 msaitoh *
721 1.1 msaitoh **********************************************************************/
722 1.1 msaitoh static void
723 1.1 msaitoh ixgbe_free_transmit_buffers(struct tx_ring *txr)
724 1.1 msaitoh {
725 1.1 msaitoh struct adapter *adapter = txr->adapter;
726 1.1 msaitoh struct ixgbe_tx_buf *tx_buffer;
727 1.1 msaitoh int i;
728 1.1 msaitoh
729 1.1 msaitoh INIT_DEBUGOUT("ixgbe_free_transmit_ring: begin");
730 1.1 msaitoh
731 1.1 msaitoh if (txr->tx_buffers == NULL)
732 1.1 msaitoh return;
733 1.1 msaitoh
734 1.1 msaitoh tx_buffer = txr->tx_buffers;
735 1.1 msaitoh for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
736 1.1 msaitoh if (tx_buffer->m_head != NULL) {
737 1.1 msaitoh bus_dmamap_sync(txr->txtag->dt_dmat, tx_buffer->map,
738 1.1 msaitoh 0, tx_buffer->m_head->m_pkthdr.len,
739 1.1 msaitoh BUS_DMASYNC_POSTWRITE);
740 1.1 msaitoh ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
741 1.1 msaitoh m_freem(tx_buffer->m_head);
742 1.1 msaitoh tx_buffer->m_head = NULL;
743 1.1 msaitoh if (tx_buffer->map != NULL) {
744 1.1 msaitoh ixgbe_dmamap_destroy(txr->txtag,
745 1.1 msaitoh tx_buffer->map);
746 1.1 msaitoh tx_buffer->map = NULL;
747 1.1 msaitoh }
748 1.1 msaitoh } else if (tx_buffer->map != NULL) {
749 1.1 msaitoh ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
750 1.1 msaitoh ixgbe_dmamap_destroy(txr->txtag, tx_buffer->map);
751 1.1 msaitoh tx_buffer->map = NULL;
752 1.1 msaitoh }
753 1.1 msaitoh }
754 1.1 msaitoh #ifndef IXGBE_LEGACY_TX
755 1.1 msaitoh if (txr->br != NULL)
756 1.1 msaitoh buf_ring_free(txr->br, M_DEVBUF);
757 1.1 msaitoh #endif
758 1.1 msaitoh if (txr->tx_buffers != NULL) {
759 1.1 msaitoh free(txr->tx_buffers, M_DEVBUF);
760 1.1 msaitoh txr->tx_buffers = NULL;
761 1.1 msaitoh }
762 1.1 msaitoh if (txr->txtag != NULL) {
763 1.1 msaitoh ixgbe_dma_tag_destroy(txr->txtag);
764 1.1 msaitoh txr->txtag = NULL;
765 1.1 msaitoh }
766 1.1 msaitoh return;
767 1.1 msaitoh }
768 1.1 msaitoh
769 1.1 msaitoh /*********************************************************************
770 1.1 msaitoh *
771 1.1 msaitoh * Advanced Context Descriptor setup for VLAN, CSUM or TSO
772 1.1 msaitoh *
773 1.1 msaitoh **********************************************************************/
774 1.1 msaitoh
775 1.1 msaitoh static int
776 1.1 msaitoh ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp,
777 1.1 msaitoh u32 *cmd_type_len, u32 *olinfo_status)
778 1.1 msaitoh {
779 1.1 msaitoh struct adapter *adapter = txr->adapter;
780 1.1 msaitoh struct ethercom *ec = &adapter->osdep.ec;
781 1.4 msaitoh struct m_tag *mtag;
782 1.1 msaitoh struct ixgbe_adv_tx_context_desc *TXD;
783 1.1 msaitoh struct ether_vlan_header *eh;
784 1.1 msaitoh struct ip ip;
785 1.1 msaitoh struct ip6_hdr ip6;
786 1.1 msaitoh u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
787 1.1 msaitoh int ehdrlen, ip_hlen = 0;
788 1.1 msaitoh u16 etype;
789 1.1 msaitoh u8 ipproto __diagused = 0;
790 1.1 msaitoh int offload = TRUE;
791 1.1 msaitoh int ctxd = txr->next_avail_desc;
792 1.1 msaitoh u16 vtag = 0;
793 1.1 msaitoh
794 1.1 msaitoh /* First check if TSO is to be used */
795 1.1 msaitoh if (mp->m_pkthdr.csum_flags & (M_CSUM_TSOv4|M_CSUM_TSOv6))
796 1.1 msaitoh return (ixgbe_tso_setup(txr, mp, cmd_type_len, olinfo_status));
797 1.1 msaitoh
798 1.1 msaitoh if ((mp->m_pkthdr.csum_flags & M_CSUM_OFFLOAD) == 0)
799 1.1 msaitoh offload = FALSE;
800 1.1 msaitoh
801 1.1 msaitoh /* Indicate the whole packet as payload when not doing TSO */
802 1.1 msaitoh *olinfo_status |= mp->m_pkthdr.len << IXGBE_ADVTXD_PAYLEN_SHIFT;
803 1.1 msaitoh
804 1.1 msaitoh /* Now ready a context descriptor */
805 1.1 msaitoh TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
806 1.1 msaitoh
807 1.1 msaitoh /*
808 1.1 msaitoh ** In advanced descriptors the vlan tag must
809 1.1 msaitoh ** be placed into the context descriptor. Hence
810 1.1 msaitoh ** we need to make one even if not doing offloads.
811 1.1 msaitoh */
812 1.1 msaitoh if ((mtag = VLAN_OUTPUT_TAG(ec, mp)) != NULL) {
813 1.1 msaitoh vtag = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
814 1.1 msaitoh vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
815 1.3 msaitoh }
816 1.4 msaitoh else if (!IXGBE_IS_X550VF(adapter) && (offload == FALSE))
817 1.4 msaitoh return (0);
818 1.1 msaitoh
819 1.1 msaitoh /*
820 1.1 msaitoh * Determine where frame payload starts.
821 1.1 msaitoh * Jump over vlan headers if already present,
822 1.1 msaitoh * helpful for QinQ too.
823 1.1 msaitoh */
824 1.1 msaitoh KASSERT(mp->m_len >= offsetof(struct ether_vlan_header, evl_tag));
825 1.1 msaitoh eh = mtod(mp, struct ether_vlan_header *);
826 1.1 msaitoh if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
827 1.1 msaitoh KASSERT(mp->m_len >= sizeof(struct ether_vlan_header));
828 1.1 msaitoh etype = ntohs(eh->evl_proto);
829 1.1 msaitoh ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
830 1.1 msaitoh } else {
831 1.1 msaitoh etype = ntohs(eh->evl_encap_proto);
832 1.1 msaitoh ehdrlen = ETHER_HDR_LEN;
833 1.1 msaitoh }
834 1.1 msaitoh
835 1.1 msaitoh /* Set the ether header length */
836 1.1 msaitoh vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
837 1.1 msaitoh
838 1.3 msaitoh if (offload == FALSE)
839 1.3 msaitoh goto no_offloads;
840 1.3 msaitoh
841 1.1 msaitoh switch (etype) {
842 1.1 msaitoh case ETHERTYPE_IP:
843 1.1 msaitoh m_copydata(mp, ehdrlen, sizeof(ip), &ip);
844 1.1 msaitoh ip_hlen = ip.ip_hl << 2;
845 1.1 msaitoh ipproto = ip.ip_p;
846 1.1 msaitoh #if 0
847 1.1 msaitoh ip.ip_sum = 0;
848 1.1 msaitoh m_copyback(mp, ehdrlen, sizeof(ip), &ip);
849 1.1 msaitoh #else
850 1.1 msaitoh KASSERT((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) == 0 ||
851 1.1 msaitoh ip.ip_sum == 0);
852 1.1 msaitoh #endif
853 1.1 msaitoh type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
854 1.1 msaitoh break;
855 1.1 msaitoh case ETHERTYPE_IPV6:
856 1.1 msaitoh m_copydata(mp, ehdrlen, sizeof(ip6), &ip6);
857 1.1 msaitoh ip_hlen = sizeof(ip6);
858 1.1 msaitoh /* XXX-BZ this will go badly in case of ext hdrs. */
859 1.1 msaitoh ipproto = ip6.ip6_nxt;
860 1.1 msaitoh type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
861 1.1 msaitoh break;
862 1.1 msaitoh default:
863 1.1 msaitoh break;
864 1.1 msaitoh }
865 1.1 msaitoh
866 1.1 msaitoh if ((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) != 0)
867 1.1 msaitoh *olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
868 1.1 msaitoh
869 1.1 msaitoh vlan_macip_lens |= ip_hlen;
870 1.1 msaitoh
871 1.1 msaitoh if (mp->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_TCPv6)) {
872 1.1 msaitoh type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
873 1.1 msaitoh *olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
874 1.1 msaitoh KASSERT(ipproto == IPPROTO_TCP);
875 1.1 msaitoh } else if (mp->m_pkthdr.csum_flags & (M_CSUM_UDPv4|M_CSUM_UDPv6)) {
876 1.1 msaitoh type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
877 1.1 msaitoh *olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
878 1.1 msaitoh KASSERT(ipproto == IPPROTO_UDP);
879 1.1 msaitoh }
880 1.1 msaitoh
881 1.3 msaitoh no_offloads:
882 1.3 msaitoh type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
883 1.3 msaitoh
884 1.1 msaitoh /* Now copy bits into descriptor */
885 1.1 msaitoh TXD->vlan_macip_lens = htole32(vlan_macip_lens);
886 1.1 msaitoh TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
887 1.1 msaitoh TXD->seqnum_seed = htole32(0);
888 1.1 msaitoh TXD->mss_l4len_idx = htole32(0);
889 1.1 msaitoh
890 1.1 msaitoh /* We've consumed the first desc, adjust counters */
891 1.1 msaitoh if (++ctxd == txr->num_desc)
892 1.1 msaitoh ctxd = 0;
893 1.1 msaitoh txr->next_avail_desc = ctxd;
894 1.1 msaitoh --txr->tx_avail;
895 1.1 msaitoh
896 1.1 msaitoh return 0;
897 1.1 msaitoh }
898 1.1 msaitoh
899 1.1 msaitoh /**********************************************************************
900 1.1 msaitoh *
901 1.1 msaitoh * Setup work for hardware segmentation offload (TSO) on
902 1.1 msaitoh * adapters using advanced tx descriptors
903 1.1 msaitoh *
904 1.1 msaitoh **********************************************************************/
905 1.1 msaitoh static int
906 1.1 msaitoh ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp,
907 1.1 msaitoh u32 *cmd_type_len, u32 *olinfo_status)
908 1.1 msaitoh {
909 1.1 msaitoh struct m_tag *mtag;
910 1.1 msaitoh struct adapter *adapter = txr->adapter;
911 1.1 msaitoh struct ethercom *ec = &adapter->osdep.ec;
912 1.1 msaitoh struct ixgbe_adv_tx_context_desc *TXD;
913 1.1 msaitoh u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
914 1.1 msaitoh u32 mss_l4len_idx = 0, paylen;
915 1.1 msaitoh u16 vtag = 0, eh_type;
916 1.1 msaitoh int ctxd, ehdrlen, ip_hlen, tcp_hlen;
917 1.1 msaitoh struct ether_vlan_header *eh;
918 1.1 msaitoh #ifdef INET6
919 1.1 msaitoh struct ip6_hdr *ip6;
920 1.1 msaitoh #endif
921 1.1 msaitoh #ifdef INET
922 1.1 msaitoh struct ip *ip;
923 1.1 msaitoh #endif
924 1.1 msaitoh struct tcphdr *th;
925 1.1 msaitoh
926 1.1 msaitoh
927 1.1 msaitoh /*
928 1.1 msaitoh * Determine where frame payload starts.
929 1.1 msaitoh * Jump over vlan headers if already present
930 1.1 msaitoh */
931 1.1 msaitoh eh = mtod(mp, struct ether_vlan_header *);
932 1.1 msaitoh if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
933 1.1 msaitoh ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
934 1.1 msaitoh eh_type = eh->evl_proto;
935 1.1 msaitoh } else {
936 1.1 msaitoh ehdrlen = ETHER_HDR_LEN;
937 1.1 msaitoh eh_type = eh->evl_encap_proto;
938 1.1 msaitoh }
939 1.1 msaitoh
940 1.1 msaitoh switch (ntohs(eh_type)) {
941 1.1 msaitoh #ifdef INET6
942 1.1 msaitoh case ETHERTYPE_IPV6:
943 1.1 msaitoh ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
944 1.1 msaitoh /* XXX-BZ For now we do not pretend to support ext. hdrs. */
945 1.1 msaitoh if (ip6->ip6_nxt != IPPROTO_TCP)
946 1.1 msaitoh return (ENXIO);
947 1.1 msaitoh ip_hlen = sizeof(struct ip6_hdr);
948 1.1 msaitoh ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
949 1.1 msaitoh th = (struct tcphdr *)((char *)ip6 + ip_hlen);
950 1.1 msaitoh th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
951 1.1 msaitoh &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
952 1.1 msaitoh type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
953 1.1 msaitoh break;
954 1.1 msaitoh #endif
955 1.1 msaitoh #ifdef INET
956 1.1 msaitoh case ETHERTYPE_IP:
957 1.1 msaitoh ip = (struct ip *)(mp->m_data + ehdrlen);
958 1.1 msaitoh if (ip->ip_p != IPPROTO_TCP)
959 1.1 msaitoh return (ENXIO);
960 1.1 msaitoh ip->ip_sum = 0;
961 1.1 msaitoh ip_hlen = ip->ip_hl << 2;
962 1.1 msaitoh th = (struct tcphdr *)((char *)ip + ip_hlen);
963 1.1 msaitoh th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
964 1.1 msaitoh ip->ip_dst.s_addr, htons(IPPROTO_TCP));
965 1.1 msaitoh type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
966 1.1 msaitoh /* Tell transmit desc to also do IPv4 checksum. */
967 1.1 msaitoh *olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
968 1.1 msaitoh break;
969 1.1 msaitoh #endif
970 1.1 msaitoh default:
971 1.1 msaitoh panic("%s: CSUM_TSO but no supported IP version (0x%04x)",
972 1.1 msaitoh __func__, ntohs(eh_type));
973 1.1 msaitoh break;
974 1.1 msaitoh }
975 1.1 msaitoh
976 1.1 msaitoh ctxd = txr->next_avail_desc;
977 1.1 msaitoh TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
978 1.1 msaitoh
979 1.1 msaitoh tcp_hlen = th->th_off << 2;
980 1.1 msaitoh
981 1.1 msaitoh /* This is used in the transmit desc in encap */
982 1.1 msaitoh paylen = mp->m_pkthdr.len - ehdrlen - ip_hlen - tcp_hlen;
983 1.1 msaitoh
984 1.1 msaitoh /* VLAN MACLEN IPLEN */
985 1.1 msaitoh if ((mtag = VLAN_OUTPUT_TAG(ec, mp)) != NULL) {
986 1.1 msaitoh vtag = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
987 1.1 msaitoh vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
988 1.1 msaitoh }
989 1.1 msaitoh
990 1.1 msaitoh vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
991 1.1 msaitoh vlan_macip_lens |= ip_hlen;
992 1.1 msaitoh TXD->vlan_macip_lens = htole32(vlan_macip_lens);
993 1.1 msaitoh
994 1.1 msaitoh /* ADV DTYPE TUCMD */
995 1.1 msaitoh type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
996 1.1 msaitoh type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
997 1.1 msaitoh TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
998 1.1 msaitoh
999 1.1 msaitoh /* MSS L4LEN IDX */
1000 1.1 msaitoh mss_l4len_idx |= (mp->m_pkthdr.segsz << IXGBE_ADVTXD_MSS_SHIFT);
1001 1.1 msaitoh mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
1002 1.1 msaitoh TXD->mss_l4len_idx = htole32(mss_l4len_idx);
1003 1.1 msaitoh
1004 1.1 msaitoh TXD->seqnum_seed = htole32(0);
1005 1.1 msaitoh
1006 1.1 msaitoh if (++ctxd == txr->num_desc)
1007 1.1 msaitoh ctxd = 0;
1008 1.1 msaitoh
1009 1.1 msaitoh txr->tx_avail--;
1010 1.1 msaitoh txr->next_avail_desc = ctxd;
1011 1.1 msaitoh *cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
1012 1.1 msaitoh *olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1013 1.1 msaitoh *olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
1014 1.1 msaitoh ++txr->tso_tx.ev_count;
1015 1.1 msaitoh return (0);
1016 1.1 msaitoh }
1017 1.1 msaitoh
1018 1.3 msaitoh
1019 1.1 msaitoh /**********************************************************************
1020 1.1 msaitoh *
1021 1.1 msaitoh * Examine each tx_buffer in the used queue. If the hardware is done
1022 1.1 msaitoh * processing the packet then free associated resources. The
1023 1.1 msaitoh * tx_buffer is put back on the free queue.
1024 1.1 msaitoh *
1025 1.1 msaitoh **********************************************************************/
1026 1.1 msaitoh void
1027 1.1 msaitoh ixgbe_txeof(struct tx_ring *txr)
1028 1.1 msaitoh {
1029 1.1 msaitoh struct adapter *adapter = txr->adapter;
1030 1.1 msaitoh struct ifnet *ifp = adapter->ifp;
1031 1.1 msaitoh u32 work, processed = 0;
1032 1.1 msaitoh u16 limit = txr->process_limit;
1033 1.1 msaitoh struct ixgbe_tx_buf *buf;
1034 1.1 msaitoh union ixgbe_adv_tx_desc *txd;
1035 1.1 msaitoh
1036 1.1 msaitoh KASSERT(mutex_owned(&txr->tx_mtx));
1037 1.1 msaitoh
1038 1.1 msaitoh #ifdef DEV_NETMAP
1039 1.1 msaitoh if (ifp->if_capenable & IFCAP_NETMAP) {
1040 1.1 msaitoh struct netmap_adapter *na = NA(ifp);
1041 1.1 msaitoh struct netmap_kring *kring = &na->tx_rings[txr->me];
1042 1.1 msaitoh txd = txr->tx_base;
1043 1.1 msaitoh bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1044 1.1 msaitoh BUS_DMASYNC_POSTREAD);
1045 1.1 msaitoh /*
1046 1.1 msaitoh * In netmap mode, all the work is done in the context
1047 1.1 msaitoh * of the client thread. Interrupt handlers only wake up
1048 1.1 msaitoh * clients, which may be sleeping on individual rings
1049 1.1 msaitoh * or on a global resource for all rings.
1050 1.1 msaitoh * To implement tx interrupt mitigation, we wake up the client
1051 1.1 msaitoh * thread roughly every half ring, even if the NIC interrupts
1052 1.1 msaitoh * more frequently. This is implemented as follows:
1053 1.1 msaitoh * - ixgbe_txsync() sets kring->nr_kflags with the index of
1054 1.1 msaitoh * the slot that should wake up the thread (nkr_num_slots
1055 1.1 msaitoh * means the user thread should not be woken up);
1056 1.1 msaitoh * - the driver ignores tx interrupts unless netmap_mitigate=0
1057 1.1 msaitoh * or the slot has the DD bit set.
1058 1.1 msaitoh */
1059 1.1 msaitoh if (!netmap_mitigate ||
1060 1.1 msaitoh (kring->nr_kflags < kring->nkr_num_slots &&
1061 1.1 msaitoh txd[kring->nr_kflags].wb.status & IXGBE_TXD_STAT_DD)) {
1062 1.1 msaitoh netmap_tx_irq(ifp, txr->me);
1063 1.1 msaitoh }
1064 1.1 msaitoh return;
1065 1.1 msaitoh }
1066 1.1 msaitoh #endif /* DEV_NETMAP */
1067 1.1 msaitoh
1068 1.1 msaitoh if (txr->tx_avail == txr->num_desc) {
1069 1.3 msaitoh txr->busy = 0;
1070 1.1 msaitoh return;
1071 1.1 msaitoh }
1072 1.1 msaitoh
1073 1.1 msaitoh /* Get work starting point */
1074 1.1 msaitoh work = txr->next_to_clean;
1075 1.1 msaitoh buf = &txr->tx_buffers[work];
1076 1.1 msaitoh txd = &txr->tx_base[work];
1077 1.1 msaitoh work -= txr->num_desc; /* The distance to ring end */
1078 1.1 msaitoh ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1079 1.1 msaitoh BUS_DMASYNC_POSTREAD);
1080 1.1 msaitoh do {
1081 1.1 msaitoh union ixgbe_adv_tx_desc *eop= buf->eop;
1082 1.1 msaitoh if (eop == NULL) /* No work */
1083 1.1 msaitoh break;
1084 1.1 msaitoh
1085 1.1 msaitoh if ((eop->wb.status & IXGBE_TXD_STAT_DD) == 0)
1086 1.1 msaitoh break; /* I/O not complete */
1087 1.1 msaitoh
1088 1.1 msaitoh if (buf->m_head) {
1089 1.1 msaitoh txr->bytes +=
1090 1.1 msaitoh buf->m_head->m_pkthdr.len;
1091 1.1 msaitoh bus_dmamap_sync(txr->txtag->dt_dmat,
1092 1.1 msaitoh buf->map,
1093 1.1 msaitoh 0, buf->m_head->m_pkthdr.len,
1094 1.1 msaitoh BUS_DMASYNC_POSTWRITE);
1095 1.1 msaitoh ixgbe_dmamap_unload(txr->txtag,
1096 1.1 msaitoh buf->map);
1097 1.1 msaitoh m_freem(buf->m_head);
1098 1.1 msaitoh buf->m_head = NULL;
1099 1.1 msaitoh }
1100 1.1 msaitoh buf->eop = NULL;
1101 1.1 msaitoh ++txr->tx_avail;
1102 1.1 msaitoh
1103 1.1 msaitoh /* We clean the range if multi segment */
1104 1.1 msaitoh while (txd != eop) {
1105 1.1 msaitoh ++txd;
1106 1.1 msaitoh ++buf;
1107 1.1 msaitoh ++work;
1108 1.1 msaitoh /* wrap the ring? */
1109 1.1 msaitoh if (__predict_false(!work)) {
1110 1.1 msaitoh work -= txr->num_desc;
1111 1.1 msaitoh buf = txr->tx_buffers;
1112 1.1 msaitoh txd = txr->tx_base;
1113 1.1 msaitoh }
1114 1.1 msaitoh if (buf->m_head) {
1115 1.1 msaitoh txr->bytes +=
1116 1.1 msaitoh buf->m_head->m_pkthdr.len;
1117 1.1 msaitoh bus_dmamap_sync(txr->txtag->dt_dmat,
1118 1.1 msaitoh buf->map,
1119 1.1 msaitoh 0, buf->m_head->m_pkthdr.len,
1120 1.1 msaitoh BUS_DMASYNC_POSTWRITE);
1121 1.1 msaitoh ixgbe_dmamap_unload(txr->txtag,
1122 1.1 msaitoh buf->map);
1123 1.1 msaitoh m_freem(buf->m_head);
1124 1.1 msaitoh buf->m_head = NULL;
1125 1.1 msaitoh }
1126 1.1 msaitoh ++txr->tx_avail;
1127 1.1 msaitoh buf->eop = NULL;
1128 1.1 msaitoh
1129 1.1 msaitoh }
1130 1.1 msaitoh ++txr->packets;
1131 1.1 msaitoh ++processed;
1132 1.1 msaitoh ++ifp->if_opackets;
1133 1.1 msaitoh
1134 1.1 msaitoh /* Try the next packet */
1135 1.1 msaitoh ++txd;
1136 1.1 msaitoh ++buf;
1137 1.1 msaitoh ++work;
1138 1.1 msaitoh /* reset with a wrap */
1139 1.1 msaitoh if (__predict_false(!work)) {
1140 1.1 msaitoh work -= txr->num_desc;
1141 1.1 msaitoh buf = txr->tx_buffers;
1142 1.1 msaitoh txd = txr->tx_base;
1143 1.1 msaitoh }
1144 1.1 msaitoh prefetch(txd);
1145 1.1 msaitoh } while (__predict_true(--limit));
1146 1.1 msaitoh
1147 1.1 msaitoh ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1148 1.1 msaitoh BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1149 1.1 msaitoh
1150 1.1 msaitoh work += txr->num_desc;
1151 1.1 msaitoh txr->next_to_clean = work;
1152 1.1 msaitoh
1153 1.1 msaitoh /*
1154 1.3 msaitoh ** Queue Hang detection, we know there's
1155 1.1 msaitoh ** work outstanding or the first return
1156 1.3 msaitoh ** would have been taken, so increment busy
1157 1.3 msaitoh ** if nothing managed to get cleaned, then
1158 1.3 msaitoh ** in local_timer it will be checked and
1159 1.3 msaitoh ** marked as HUNG if it exceeds a MAX attempt.
1160 1.1 msaitoh */
1161 1.3 msaitoh if ((processed == 0) && (txr->busy != IXGBE_QUEUE_HUNG))
1162 1.3 msaitoh ++txr->busy;
1163 1.3 msaitoh /*
1164 1.3 msaitoh ** If anything gets cleaned we reset state to 1,
1165 1.3 msaitoh ** note this will turn off HUNG if its set.
1166 1.3 msaitoh */
1167 1.3 msaitoh if (processed)
1168 1.3 msaitoh txr->busy = 1;
1169 1.1 msaitoh
1170 1.1 msaitoh if (txr->tx_avail == txr->num_desc)
1171 1.3 msaitoh txr->busy = 0;
1172 1.1 msaitoh
1173 1.1 msaitoh return;
1174 1.1 msaitoh }
1175 1.1 msaitoh
1176 1.3 msaitoh
1177 1.1 msaitoh #ifdef IXGBE_FDIR
1178 1.1 msaitoh /*
1179 1.1 msaitoh ** This routine parses packet headers so that Flow
1180 1.1 msaitoh ** Director can make a hashed filter table entry
1181 1.1 msaitoh ** allowing traffic flows to be identified and kept
1182 1.1 msaitoh ** on the same cpu. This would be a performance
1183 1.1 msaitoh ** hit, but we only do it at IXGBE_FDIR_RATE of
1184 1.1 msaitoh ** packets.
1185 1.1 msaitoh */
1186 1.1 msaitoh static void
1187 1.1 msaitoh ixgbe_atr(struct tx_ring *txr, struct mbuf *mp)
1188 1.1 msaitoh {
1189 1.1 msaitoh struct adapter *adapter = txr->adapter;
1190 1.1 msaitoh struct ix_queue *que;
1191 1.1 msaitoh struct ip *ip;
1192 1.1 msaitoh struct tcphdr *th;
1193 1.1 msaitoh struct udphdr *uh;
1194 1.1 msaitoh struct ether_vlan_header *eh;
1195 1.1 msaitoh union ixgbe_atr_hash_dword input = {.dword = 0};
1196 1.1 msaitoh union ixgbe_atr_hash_dword common = {.dword = 0};
1197 1.1 msaitoh int ehdrlen, ip_hlen;
1198 1.1 msaitoh u16 etype;
1199 1.1 msaitoh
1200 1.1 msaitoh eh = mtod(mp, struct ether_vlan_header *);
1201 1.1 msaitoh if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1202 1.1 msaitoh ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1203 1.1 msaitoh etype = eh->evl_proto;
1204 1.1 msaitoh } else {
1205 1.1 msaitoh ehdrlen = ETHER_HDR_LEN;
1206 1.1 msaitoh etype = eh->evl_encap_proto;
1207 1.1 msaitoh }
1208 1.1 msaitoh
1209 1.1 msaitoh /* Only handling IPv4 */
1210 1.1 msaitoh if (etype != htons(ETHERTYPE_IP))
1211 1.1 msaitoh return;
1212 1.1 msaitoh
1213 1.1 msaitoh ip = (struct ip *)(mp->m_data + ehdrlen);
1214 1.1 msaitoh ip_hlen = ip->ip_hl << 2;
1215 1.1 msaitoh
1216 1.1 msaitoh /* check if we're UDP or TCP */
1217 1.1 msaitoh switch (ip->ip_p) {
1218 1.1 msaitoh case IPPROTO_TCP:
1219 1.1 msaitoh th = (struct tcphdr *)((char *)ip + ip_hlen);
1220 1.1 msaitoh /* src and dst are inverted */
1221 1.1 msaitoh common.port.dst ^= th->th_sport;
1222 1.1 msaitoh common.port.src ^= th->th_dport;
1223 1.1 msaitoh input.formatted.flow_type ^= IXGBE_ATR_FLOW_TYPE_TCPV4;
1224 1.1 msaitoh break;
1225 1.1 msaitoh case IPPROTO_UDP:
1226 1.1 msaitoh uh = (struct udphdr *)((char *)ip + ip_hlen);
1227 1.1 msaitoh /* src and dst are inverted */
1228 1.1 msaitoh common.port.dst ^= uh->uh_sport;
1229 1.1 msaitoh common.port.src ^= uh->uh_dport;
1230 1.1 msaitoh input.formatted.flow_type ^= IXGBE_ATR_FLOW_TYPE_UDPV4;
1231 1.1 msaitoh break;
1232 1.1 msaitoh default:
1233 1.1 msaitoh return;
1234 1.1 msaitoh }
1235 1.1 msaitoh
1236 1.1 msaitoh input.formatted.vlan_id = htobe16(mp->m_pkthdr.ether_vtag);
1237 1.1 msaitoh if (mp->m_pkthdr.ether_vtag)
1238 1.1 msaitoh common.flex_bytes ^= htons(ETHERTYPE_VLAN);
1239 1.1 msaitoh else
1240 1.1 msaitoh common.flex_bytes ^= etype;
1241 1.1 msaitoh common.ip ^= ip->ip_src.s_addr ^ ip->ip_dst.s_addr;
1242 1.1 msaitoh
1243 1.1 msaitoh que = &adapter->queues[txr->me];
1244 1.1 msaitoh /*
1245 1.1 msaitoh ** This assumes the Rx queue and Tx
1246 1.1 msaitoh ** queue are bound to the same CPU
1247 1.1 msaitoh */
1248 1.1 msaitoh ixgbe_fdir_add_signature_filter_82599(&adapter->hw,
1249 1.1 msaitoh input, common, que->msix);
1250 1.1 msaitoh }
1251 1.1 msaitoh #endif /* IXGBE_FDIR */
1252 1.1 msaitoh
1253 1.1 msaitoh /*
1254 1.1 msaitoh ** Used to detect a descriptor that has
1255 1.1 msaitoh ** been merged by Hardware RSC.
1256 1.1 msaitoh */
1257 1.1 msaitoh static inline u32
1258 1.1 msaitoh ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
1259 1.1 msaitoh {
1260 1.1 msaitoh return (le32toh(rx->wb.lower.lo_dword.data) &
1261 1.1 msaitoh IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
1262 1.1 msaitoh }
1263 1.1 msaitoh
1264 1.1 msaitoh /*********************************************************************
1265 1.1 msaitoh *
1266 1.1 msaitoh * Initialize Hardware RSC (LRO) feature on 82599
1267 1.1 msaitoh * for an RX ring, this is toggled by the LRO capability
1268 1.1 msaitoh * even though it is transparent to the stack.
1269 1.1 msaitoh *
1270 1.1 msaitoh * NOTE: since this HW feature only works with IPV4 and
1271 1.1 msaitoh * our testing has shown soft LRO to be as effective
1272 1.1 msaitoh * I have decided to disable this by default.
1273 1.1 msaitoh *
1274 1.1 msaitoh **********************************************************************/
1275 1.1 msaitoh static void
1276 1.1 msaitoh ixgbe_setup_hw_rsc(struct rx_ring *rxr)
1277 1.1 msaitoh {
1278 1.1 msaitoh struct adapter *adapter = rxr->adapter;
1279 1.1 msaitoh struct ixgbe_hw *hw = &adapter->hw;
1280 1.1 msaitoh u32 rscctrl, rdrxctl;
1281 1.1 msaitoh
1282 1.1 msaitoh /* If turning LRO/RSC off we need to disable it */
1283 1.1 msaitoh if ((adapter->ifp->if_capenable & IFCAP_LRO) == 0) {
1284 1.1 msaitoh rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
1285 1.1 msaitoh rscctrl &= ~IXGBE_RSCCTL_RSCEN;
1286 1.1 msaitoh return;
1287 1.1 msaitoh }
1288 1.1 msaitoh
1289 1.1 msaitoh rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1290 1.1 msaitoh rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
1291 1.1 msaitoh #ifdef DEV_NETMAP /* crcstrip is optional in netmap */
1292 1.1 msaitoh if (adapter->ifp->if_capenable & IFCAP_NETMAP && !ix_crcstrip)
1293 1.1 msaitoh #endif /* DEV_NETMAP */
1294 1.1 msaitoh rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
1295 1.1 msaitoh rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
1296 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
1297 1.1 msaitoh
1298 1.1 msaitoh rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
1299 1.1 msaitoh rscctrl |= IXGBE_RSCCTL_RSCEN;
1300 1.1 msaitoh /*
1301 1.1 msaitoh ** Limit the total number of descriptors that
1302 1.1 msaitoh ** can be combined, so it does not exceed 64K
1303 1.1 msaitoh */
1304 1.1 msaitoh if (rxr->mbuf_sz == MCLBYTES)
1305 1.1 msaitoh rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
1306 1.1 msaitoh else if (rxr->mbuf_sz == MJUMPAGESIZE)
1307 1.1 msaitoh rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
1308 1.1 msaitoh else if (rxr->mbuf_sz == MJUM9BYTES)
1309 1.1 msaitoh rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
1310 1.1 msaitoh else /* Using 16K cluster */
1311 1.1 msaitoh rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
1312 1.1 msaitoh
1313 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxr->me), rscctrl);
1314 1.1 msaitoh
1315 1.1 msaitoh /* Enable TCP header recognition */
1316 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0),
1317 1.1 msaitoh (IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)) |
1318 1.1 msaitoh IXGBE_PSRTYPE_TCPHDR));
1319 1.1 msaitoh
1320 1.1 msaitoh /* Disable RSC for ACK packets */
1321 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
1322 1.1 msaitoh (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
1323 1.1 msaitoh
1324 1.1 msaitoh rxr->hw_rsc = TRUE;
1325 1.1 msaitoh }
1326 1.1 msaitoh /*********************************************************************
1327 1.1 msaitoh *
1328 1.1 msaitoh * Refresh mbuf buffers for RX descriptor rings
1329 1.1 msaitoh * - now keeps its own state so discards due to resource
1330 1.1 msaitoh * exhaustion are unnecessary, if an mbuf cannot be obtained
1331 1.1 msaitoh * it just returns, keeping its placeholder, thus it can simply
1332 1.1 msaitoh * be recalled to try again.
1333 1.1 msaitoh *
1334 1.1 msaitoh **********************************************************************/
1335 1.1 msaitoh static void
1336 1.1 msaitoh ixgbe_refresh_mbufs(struct rx_ring *rxr, int limit)
1337 1.1 msaitoh {
1338 1.1 msaitoh struct adapter *adapter = rxr->adapter;
1339 1.1 msaitoh struct ixgbe_rx_buf *rxbuf;
1340 1.1 msaitoh struct mbuf *mp;
1341 1.1 msaitoh int i, j, error;
1342 1.1 msaitoh bool refreshed = false;
1343 1.1 msaitoh
1344 1.1 msaitoh i = j = rxr->next_to_refresh;
1345 1.1 msaitoh /* Control the loop with one beyond */
1346 1.1 msaitoh if (++j == rxr->num_desc)
1347 1.1 msaitoh j = 0;
1348 1.1 msaitoh
1349 1.1 msaitoh while (j != limit) {
1350 1.1 msaitoh rxbuf = &rxr->rx_buffers[i];
1351 1.1 msaitoh if (rxbuf->buf == NULL) {
1352 1.1 msaitoh mp = ixgbe_getjcl(&adapter->jcl_head, M_NOWAIT,
1353 1.1 msaitoh MT_DATA, M_PKTHDR, rxr->mbuf_sz);
1354 1.1 msaitoh if (mp == NULL) {
1355 1.1 msaitoh rxr->no_jmbuf.ev_count++;
1356 1.1 msaitoh goto update;
1357 1.1 msaitoh }
1358 1.1 msaitoh if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
1359 1.1 msaitoh m_adj(mp, ETHER_ALIGN);
1360 1.1 msaitoh } else
1361 1.1 msaitoh mp = rxbuf->buf;
1362 1.1 msaitoh
1363 1.1 msaitoh mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
1364 1.1 msaitoh
1365 1.1 msaitoh /* If we're dealing with an mbuf that was copied rather
1366 1.1 msaitoh * than replaced, there's no need to go through busdma.
1367 1.1 msaitoh */
1368 1.1 msaitoh if ((rxbuf->flags & IXGBE_RX_COPY) == 0) {
1369 1.1 msaitoh /* Get the memory mapping */
1370 1.4 msaitoh ixgbe_dmamap_unload(rxr->ptag, rxbuf->pmap);
1371 1.1 msaitoh error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat,
1372 1.1 msaitoh rxbuf->pmap, mp, BUS_DMA_NOWAIT);
1373 1.1 msaitoh if (error != 0) {
1374 1.1 msaitoh printf("Refresh mbufs: payload dmamap load"
1375 1.1 msaitoh " failure - %d\n", error);
1376 1.1 msaitoh m_free(mp);
1377 1.1 msaitoh rxbuf->buf = NULL;
1378 1.1 msaitoh goto update;
1379 1.1 msaitoh }
1380 1.1 msaitoh rxbuf->buf = mp;
1381 1.1 msaitoh bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
1382 1.1 msaitoh 0, mp->m_pkthdr.len, BUS_DMASYNC_PREREAD);
1383 1.1 msaitoh rxbuf->addr = rxr->rx_base[i].read.pkt_addr =
1384 1.1 msaitoh htole64(rxbuf->pmap->dm_segs[0].ds_addr);
1385 1.1 msaitoh } else {
1386 1.1 msaitoh rxr->rx_base[i].read.pkt_addr = rxbuf->addr;
1387 1.1 msaitoh rxbuf->flags &= ~IXGBE_RX_COPY;
1388 1.1 msaitoh }
1389 1.1 msaitoh
1390 1.1 msaitoh refreshed = true;
1391 1.1 msaitoh /* Next is precalculated */
1392 1.1 msaitoh i = j;
1393 1.1 msaitoh rxr->next_to_refresh = i;
1394 1.1 msaitoh if (++j == rxr->num_desc)
1395 1.1 msaitoh j = 0;
1396 1.1 msaitoh }
1397 1.1 msaitoh update:
1398 1.1 msaitoh if (refreshed) /* Update hardware tail index */
1399 1.1 msaitoh IXGBE_WRITE_REG(&adapter->hw,
1400 1.3 msaitoh rxr->tail, rxr->next_to_refresh);
1401 1.1 msaitoh return;
1402 1.1 msaitoh }
1403 1.1 msaitoh
1404 1.1 msaitoh /*********************************************************************
1405 1.1 msaitoh *
1406 1.1 msaitoh * Allocate memory for rx_buffer structures. Since we use one
1407 1.1 msaitoh * rx_buffer per received packet, the maximum number of rx_buffer's
1408 1.1 msaitoh * that we'll need is equal to the number of receive descriptors
1409 1.1 msaitoh * that we've allocated.
1410 1.1 msaitoh *
1411 1.1 msaitoh **********************************************************************/
1412 1.1 msaitoh int
1413 1.1 msaitoh ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
1414 1.1 msaitoh {
1415 1.1 msaitoh struct adapter *adapter = rxr->adapter;
1416 1.1 msaitoh device_t dev = adapter->dev;
1417 1.1 msaitoh struct ixgbe_rx_buf *rxbuf;
1418 1.1 msaitoh int i, bsize, error;
1419 1.1 msaitoh
1420 1.1 msaitoh bsize = sizeof(struct ixgbe_rx_buf) * rxr->num_desc;
1421 1.1 msaitoh if (!(rxr->rx_buffers =
1422 1.1 msaitoh (struct ixgbe_rx_buf *) malloc(bsize,
1423 1.1 msaitoh M_DEVBUF, M_NOWAIT | M_ZERO))) {
1424 1.1 msaitoh aprint_error_dev(dev, "Unable to allocate rx_buffer memory\n");
1425 1.1 msaitoh error = ENOMEM;
1426 1.1 msaitoh goto fail;
1427 1.1 msaitoh }
1428 1.1 msaitoh
1429 1.1 msaitoh if ((error = ixgbe_dma_tag_create(adapter->osdep.dmat, /* parent */
1430 1.1 msaitoh 1, 0, /* alignment, bounds */
1431 1.1 msaitoh MJUM16BYTES, /* maxsize */
1432 1.1 msaitoh 1, /* nsegments */
1433 1.1 msaitoh MJUM16BYTES, /* maxsegsize */
1434 1.1 msaitoh 0, /* flags */
1435 1.1 msaitoh &rxr->ptag))) {
1436 1.1 msaitoh aprint_error_dev(dev, "Unable to create RX DMA tag\n");
1437 1.1 msaitoh goto fail;
1438 1.1 msaitoh }
1439 1.1 msaitoh
1440 1.1 msaitoh for (i = 0; i < rxr->num_desc; i++, rxbuf++) {
1441 1.1 msaitoh rxbuf = &rxr->rx_buffers[i];
1442 1.4 msaitoh error = ixgbe_dmamap_create(rxr->ptag, 0, &rxbuf->pmap);
1443 1.1 msaitoh if (error) {
1444 1.1 msaitoh aprint_error_dev(dev, "Unable to create RX dma map\n");
1445 1.1 msaitoh goto fail;
1446 1.1 msaitoh }
1447 1.1 msaitoh }
1448 1.1 msaitoh
1449 1.1 msaitoh return (0);
1450 1.1 msaitoh
1451 1.1 msaitoh fail:
1452 1.1 msaitoh /* Frees all, but can handle partial completion */
1453 1.1 msaitoh ixgbe_free_receive_structures(adapter);
1454 1.1 msaitoh return (error);
1455 1.1 msaitoh }
1456 1.1 msaitoh
1457 1.3 msaitoh
1458 1.1 msaitoh static void
1459 1.1 msaitoh ixgbe_free_receive_ring(struct rx_ring *rxr)
1460 1.1 msaitoh {
1461 1.1 msaitoh struct ixgbe_rx_buf *rxbuf;
1462 1.1 msaitoh int i;
1463 1.1 msaitoh
1464 1.1 msaitoh for (i = 0; i < rxr->num_desc; i++) {
1465 1.1 msaitoh rxbuf = &rxr->rx_buffers[i];
1466 1.1 msaitoh if (rxbuf->buf != NULL) {
1467 1.1 msaitoh bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
1468 1.1 msaitoh 0, rxbuf->buf->m_pkthdr.len,
1469 1.1 msaitoh BUS_DMASYNC_POSTREAD);
1470 1.1 msaitoh ixgbe_dmamap_unload(rxr->ptag, rxbuf->pmap);
1471 1.1 msaitoh rxbuf->buf->m_flags |= M_PKTHDR;
1472 1.1 msaitoh m_freem(rxbuf->buf);
1473 1.1 msaitoh rxbuf->buf = NULL;
1474 1.1 msaitoh rxbuf->flags = 0;
1475 1.1 msaitoh }
1476 1.1 msaitoh }
1477 1.1 msaitoh }
1478 1.1 msaitoh
1479 1.1 msaitoh
1480 1.1 msaitoh /*********************************************************************
1481 1.1 msaitoh *
1482 1.1 msaitoh * Initialize a receive ring and its buffers.
1483 1.1 msaitoh *
1484 1.1 msaitoh **********************************************************************/
1485 1.1 msaitoh static int
1486 1.1 msaitoh ixgbe_setup_receive_ring(struct rx_ring *rxr)
1487 1.1 msaitoh {
1488 1.1 msaitoh struct adapter *adapter;
1489 1.1 msaitoh struct ixgbe_rx_buf *rxbuf;
1490 1.1 msaitoh #ifdef LRO
1491 1.1 msaitoh struct ifnet *ifp;
1492 1.1 msaitoh struct lro_ctrl *lro = &rxr->lro;
1493 1.1 msaitoh #endif /* LRO */
1494 1.1 msaitoh int rsize, error = 0;
1495 1.1 msaitoh #ifdef DEV_NETMAP
1496 1.1 msaitoh struct netmap_adapter *na = NA(rxr->adapter->ifp);
1497 1.1 msaitoh struct netmap_slot *slot;
1498 1.1 msaitoh #endif /* DEV_NETMAP */
1499 1.1 msaitoh
1500 1.1 msaitoh adapter = rxr->adapter;
1501 1.1 msaitoh #ifdef LRO
1502 1.1 msaitoh ifp = adapter->ifp;
1503 1.1 msaitoh #endif /* LRO */
1504 1.1 msaitoh
1505 1.1 msaitoh /* Clear the ring contents */
1506 1.1 msaitoh IXGBE_RX_LOCK(rxr);
1507 1.1 msaitoh #ifdef DEV_NETMAP
1508 1.1 msaitoh /* same as in ixgbe_setup_transmit_ring() */
1509 1.1 msaitoh slot = netmap_reset(na, NR_RX, rxr->me, 0);
1510 1.1 msaitoh #endif /* DEV_NETMAP */
1511 1.1 msaitoh rsize = roundup2(adapter->num_rx_desc *
1512 1.1 msaitoh sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
1513 1.1 msaitoh bzero((void *)rxr->rx_base, rsize);
1514 1.1 msaitoh /* Cache the size */
1515 1.1 msaitoh rxr->mbuf_sz = adapter->rx_mbuf_sz;
1516 1.1 msaitoh
1517 1.1 msaitoh /* Free current RX buffer structs and their mbufs */
1518 1.1 msaitoh ixgbe_free_receive_ring(rxr);
1519 1.1 msaitoh
1520 1.1 msaitoh IXGBE_RX_UNLOCK(rxr);
1521 1.1 msaitoh
1522 1.1 msaitoh /* Now reinitialize our supply of jumbo mbufs. The number
1523 1.1 msaitoh * or size of jumbo mbufs may have changed.
1524 1.1 msaitoh */
1525 1.1 msaitoh ixgbe_jcl_reinit(&adapter->jcl_head, rxr->ptag->dt_dmat,
1526 1.1 msaitoh 2 * adapter->num_rx_desc, adapter->rx_mbuf_sz);
1527 1.1 msaitoh
1528 1.1 msaitoh IXGBE_RX_LOCK(rxr);
1529 1.1 msaitoh
1530 1.1 msaitoh /* Now replenish the mbufs */
1531 1.1 msaitoh for (int j = 0; j != rxr->num_desc; ++j) {
1532 1.1 msaitoh struct mbuf *mp;
1533 1.1 msaitoh
1534 1.1 msaitoh rxbuf = &rxr->rx_buffers[j];
1535 1.1 msaitoh #ifdef DEV_NETMAP
1536 1.1 msaitoh /*
1537 1.1 msaitoh * In netmap mode, fill the map and set the buffer
1538 1.1 msaitoh * address in the NIC ring, considering the offset
1539 1.1 msaitoh * between the netmap and NIC rings (see comment in
1540 1.1 msaitoh * ixgbe_setup_transmit_ring() ). No need to allocate
1541 1.1 msaitoh * an mbuf, so end the block with a continue;
1542 1.1 msaitoh */
1543 1.1 msaitoh if (slot) {
1544 1.1 msaitoh int sj = netmap_idx_n2k(&na->rx_rings[rxr->me], j);
1545 1.1 msaitoh uint64_t paddr;
1546 1.1 msaitoh void *addr;
1547 1.1 msaitoh
1548 1.1 msaitoh addr = PNMB(na, slot + sj, &paddr);
1549 1.1 msaitoh netmap_load_map(na, rxr->ptag, rxbuf->pmap, addr);
1550 1.1 msaitoh /* Update descriptor and the cached value */
1551 1.1 msaitoh rxr->rx_base[j].read.pkt_addr = htole64(paddr);
1552 1.1 msaitoh rxbuf->addr = htole64(paddr);
1553 1.1 msaitoh continue;
1554 1.1 msaitoh }
1555 1.1 msaitoh #endif /* DEV_NETMAP */
1556 1.1 msaitoh rxbuf->flags = 0;
1557 1.1 msaitoh rxbuf->buf = ixgbe_getjcl(&adapter->jcl_head, M_NOWAIT,
1558 1.1 msaitoh MT_DATA, M_PKTHDR, adapter->rx_mbuf_sz);
1559 1.1 msaitoh if (rxbuf->buf == NULL) {
1560 1.1 msaitoh error = ENOBUFS;
1561 1.1 msaitoh goto fail;
1562 1.1 msaitoh }
1563 1.1 msaitoh mp = rxbuf->buf;
1564 1.1 msaitoh mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
1565 1.1 msaitoh /* Get the memory mapping */
1566 1.1 msaitoh error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat,
1567 1.1 msaitoh rxbuf->pmap, mp, BUS_DMA_NOWAIT);
1568 1.1 msaitoh if (error != 0)
1569 1.1 msaitoh goto fail;
1570 1.1 msaitoh bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
1571 1.1 msaitoh 0, adapter->rx_mbuf_sz, BUS_DMASYNC_PREREAD);
1572 1.1 msaitoh /* Update the descriptor and the cached value */
1573 1.1 msaitoh rxr->rx_base[j].read.pkt_addr =
1574 1.1 msaitoh htole64(rxbuf->pmap->dm_segs[0].ds_addr);
1575 1.1 msaitoh rxbuf->addr = htole64(rxbuf->pmap->dm_segs[0].ds_addr);
1576 1.1 msaitoh }
1577 1.1 msaitoh
1578 1.1 msaitoh
1579 1.1 msaitoh /* Setup our descriptor indices */
1580 1.1 msaitoh rxr->next_to_check = 0;
1581 1.1 msaitoh rxr->next_to_refresh = 0;
1582 1.1 msaitoh rxr->lro_enabled = FALSE;
1583 1.1 msaitoh rxr->rx_copies.ev_count = 0;
1584 1.1 msaitoh rxr->rx_bytes.ev_count = 0;
1585 1.1 msaitoh rxr->vtag_strip = FALSE;
1586 1.1 msaitoh
1587 1.1 msaitoh ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
1588 1.1 msaitoh BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1589 1.1 msaitoh
1590 1.1 msaitoh /*
1591 1.1 msaitoh ** Now set up the LRO interface:
1592 1.1 msaitoh */
1593 1.1 msaitoh if (ixgbe_rsc_enable)
1594 1.1 msaitoh ixgbe_setup_hw_rsc(rxr);
1595 1.1 msaitoh #ifdef LRO
1596 1.1 msaitoh else if (ifp->if_capenable & IFCAP_LRO) {
1597 1.1 msaitoh device_t dev = adapter->dev;
1598 1.1 msaitoh int err = tcp_lro_init(lro);
1599 1.1 msaitoh if (err) {
1600 1.1 msaitoh device_printf(dev, "LRO Initialization failed!\n");
1601 1.1 msaitoh goto fail;
1602 1.1 msaitoh }
1603 1.1 msaitoh INIT_DEBUGOUT("RX Soft LRO Initialized\n");
1604 1.1 msaitoh rxr->lro_enabled = TRUE;
1605 1.1 msaitoh lro->ifp = adapter->ifp;
1606 1.1 msaitoh }
1607 1.1 msaitoh #endif /* LRO */
1608 1.1 msaitoh
1609 1.1 msaitoh IXGBE_RX_UNLOCK(rxr);
1610 1.1 msaitoh return (0);
1611 1.1 msaitoh
1612 1.1 msaitoh fail:
1613 1.1 msaitoh ixgbe_free_receive_ring(rxr);
1614 1.1 msaitoh IXGBE_RX_UNLOCK(rxr);
1615 1.1 msaitoh return (error);
1616 1.1 msaitoh }
1617 1.1 msaitoh
1618 1.1 msaitoh /*********************************************************************
1619 1.1 msaitoh *
1620 1.1 msaitoh * Initialize all receive rings.
1621 1.1 msaitoh *
1622 1.1 msaitoh **********************************************************************/
1623 1.1 msaitoh int
1624 1.1 msaitoh ixgbe_setup_receive_structures(struct adapter *adapter)
1625 1.1 msaitoh {
1626 1.1 msaitoh struct rx_ring *rxr = adapter->rx_rings;
1627 1.1 msaitoh int j;
1628 1.1 msaitoh
1629 1.1 msaitoh for (j = 0; j < adapter->num_queues; j++, rxr++)
1630 1.1 msaitoh if (ixgbe_setup_receive_ring(rxr))
1631 1.1 msaitoh goto fail;
1632 1.1 msaitoh
1633 1.1 msaitoh return (0);
1634 1.1 msaitoh fail:
1635 1.1 msaitoh /*
1636 1.1 msaitoh * Free RX buffers allocated so far, we will only handle
1637 1.1 msaitoh * the rings that completed, the failing case will have
1638 1.1 msaitoh * cleaned up for itself. 'j' failed, so its the terminus.
1639 1.1 msaitoh */
1640 1.1 msaitoh for (int i = 0; i < j; ++i) {
1641 1.1 msaitoh rxr = &adapter->rx_rings[i];
1642 1.1 msaitoh ixgbe_free_receive_ring(rxr);
1643 1.1 msaitoh }
1644 1.1 msaitoh
1645 1.1 msaitoh return (ENOBUFS);
1646 1.1 msaitoh }
1647 1.1 msaitoh
1648 1.3 msaitoh
1649 1.1 msaitoh /*********************************************************************
1650 1.1 msaitoh *
1651 1.1 msaitoh * Free all receive rings.
1652 1.1 msaitoh *
1653 1.1 msaitoh **********************************************************************/
1654 1.1 msaitoh void
1655 1.1 msaitoh ixgbe_free_receive_structures(struct adapter *adapter)
1656 1.1 msaitoh {
1657 1.1 msaitoh struct rx_ring *rxr = adapter->rx_rings;
1658 1.1 msaitoh
1659 1.1 msaitoh INIT_DEBUGOUT("ixgbe_free_receive_structures: begin");
1660 1.1 msaitoh
1661 1.1 msaitoh for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1662 1.1 msaitoh #ifdef LRO
1663 1.1 msaitoh struct lro_ctrl *lro = &rxr->lro;
1664 1.1 msaitoh #endif /* LRO */
1665 1.1 msaitoh ixgbe_free_receive_buffers(rxr);
1666 1.1 msaitoh #ifdef LRO
1667 1.1 msaitoh /* Free LRO memory */
1668 1.1 msaitoh tcp_lro_free(lro);
1669 1.1 msaitoh #endif /* LRO */
1670 1.1 msaitoh /* Free the ring memory as well */
1671 1.1 msaitoh ixgbe_dma_free(adapter, &rxr->rxdma);
1672 1.1 msaitoh IXGBE_RX_LOCK_DESTROY(rxr);
1673 1.1 msaitoh }
1674 1.1 msaitoh
1675 1.1 msaitoh free(adapter->rx_rings, M_DEVBUF);
1676 1.1 msaitoh }
1677 1.1 msaitoh
1678 1.1 msaitoh
1679 1.1 msaitoh /*********************************************************************
1680 1.1 msaitoh *
1681 1.1 msaitoh * Free receive ring data structures
1682 1.1 msaitoh *
1683 1.1 msaitoh **********************************************************************/
1684 1.1 msaitoh static void
1685 1.1 msaitoh ixgbe_free_receive_buffers(struct rx_ring *rxr)
1686 1.1 msaitoh {
1687 1.1 msaitoh struct adapter *adapter = rxr->adapter;
1688 1.1 msaitoh struct ixgbe_rx_buf *rxbuf;
1689 1.1 msaitoh
1690 1.1 msaitoh INIT_DEBUGOUT("ixgbe_free_receive_buffers: begin");
1691 1.1 msaitoh
1692 1.1 msaitoh /* Cleanup any existing buffers */
1693 1.1 msaitoh if (rxr->rx_buffers != NULL) {
1694 1.1 msaitoh for (int i = 0; i < adapter->num_rx_desc; i++) {
1695 1.1 msaitoh rxbuf = &rxr->rx_buffers[i];
1696 1.1 msaitoh if (rxbuf->buf != NULL) {
1697 1.1 msaitoh bus_dmamap_sync(rxr->ptag->dt_dmat,
1698 1.1 msaitoh rxbuf->pmap, 0, rxbuf->buf->m_pkthdr.len,
1699 1.1 msaitoh BUS_DMASYNC_POSTREAD);
1700 1.1 msaitoh ixgbe_dmamap_unload(rxr->ptag, rxbuf->pmap);
1701 1.1 msaitoh rxbuf->buf->m_flags |= M_PKTHDR;
1702 1.1 msaitoh m_freem(rxbuf->buf);
1703 1.1 msaitoh }
1704 1.1 msaitoh rxbuf->buf = NULL;
1705 1.1 msaitoh if (rxbuf->pmap != NULL) {
1706 1.1 msaitoh ixgbe_dmamap_destroy(rxr->ptag, rxbuf->pmap);
1707 1.1 msaitoh rxbuf->pmap = NULL;
1708 1.1 msaitoh }
1709 1.1 msaitoh }
1710 1.1 msaitoh if (rxr->rx_buffers != NULL) {
1711 1.1 msaitoh free(rxr->rx_buffers, M_DEVBUF);
1712 1.1 msaitoh rxr->rx_buffers = NULL;
1713 1.1 msaitoh }
1714 1.1 msaitoh }
1715 1.1 msaitoh
1716 1.1 msaitoh if (rxr->ptag != NULL) {
1717 1.1 msaitoh ixgbe_dma_tag_destroy(rxr->ptag);
1718 1.1 msaitoh rxr->ptag = NULL;
1719 1.1 msaitoh }
1720 1.1 msaitoh
1721 1.1 msaitoh return;
1722 1.1 msaitoh }
1723 1.1 msaitoh
1724 1.1 msaitoh static __inline void
1725 1.1 msaitoh ixgbe_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
1726 1.1 msaitoh {
1727 1.1 msaitoh int s;
1728 1.1 msaitoh
1729 1.1 msaitoh #ifdef LRO
1730 1.1 msaitoh struct adapter *adapter = ifp->if_softc;
1731 1.1 msaitoh struct ethercom *ec = &adapter->osdep.ec;
1732 1.1 msaitoh
1733 1.1 msaitoh /*
1734 1.1 msaitoh * ATM LRO is only for IP/TCP packets and TCP checksum of the packet
1735 1.1 msaitoh * should be computed by hardware. Also it should not have VLAN tag in
1736 1.1 msaitoh * ethernet header. In case of IPv6 we do not yet support ext. hdrs.
1737 1.1 msaitoh */
1738 1.1 msaitoh if (rxr->lro_enabled &&
1739 1.1 msaitoh (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) != 0 &&
1740 1.1 msaitoh (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
1741 1.1 msaitoh ((ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
1742 1.1 msaitoh (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) ||
1743 1.1 msaitoh (ptype & (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
1744 1.1 msaitoh (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) &&
1745 1.1 msaitoh (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
1746 1.1 msaitoh (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
1747 1.1 msaitoh /*
1748 1.1 msaitoh * Send to the stack if:
1749 1.1 msaitoh ** - LRO not enabled, or
1750 1.1 msaitoh ** - no LRO resources, or
1751 1.1 msaitoh ** - lro enqueue fails
1752 1.1 msaitoh */
1753 1.1 msaitoh if (rxr->lro.lro_cnt != 0)
1754 1.1 msaitoh if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
1755 1.1 msaitoh return;
1756 1.1 msaitoh }
1757 1.1 msaitoh #endif /* LRO */
1758 1.1 msaitoh
1759 1.1 msaitoh IXGBE_RX_UNLOCK(rxr);
1760 1.1 msaitoh
1761 1.1 msaitoh s = splnet();
1762 1.1 msaitoh /* Pass this up to any BPF listeners. */
1763 1.1 msaitoh bpf_mtap(ifp, m);
1764 1.1 msaitoh if_input(ifp, m);
1765 1.1 msaitoh splx(s);
1766 1.1 msaitoh
1767 1.1 msaitoh IXGBE_RX_LOCK(rxr);
1768 1.1 msaitoh }
1769 1.1 msaitoh
1770 1.1 msaitoh static __inline void
1771 1.1 msaitoh ixgbe_rx_discard(struct rx_ring *rxr, int i)
1772 1.1 msaitoh {
1773 1.1 msaitoh struct ixgbe_rx_buf *rbuf;
1774 1.1 msaitoh
1775 1.1 msaitoh rbuf = &rxr->rx_buffers[i];
1776 1.1 msaitoh
1777 1.1 msaitoh
1778 1.1 msaitoh /*
1779 1.1 msaitoh ** With advanced descriptors the writeback
1780 1.1 msaitoh ** clobbers the buffer addrs, so its easier
1781 1.1 msaitoh ** to just free the existing mbufs and take
1782 1.1 msaitoh ** the normal refresh path to get new buffers
1783 1.1 msaitoh ** and mapping.
1784 1.1 msaitoh */
1785 1.1 msaitoh
1786 1.1 msaitoh if (rbuf->buf != NULL) {/* Partial chain ? */
1787 1.1 msaitoh rbuf->fmp->m_flags |= M_PKTHDR;
1788 1.1 msaitoh m_freem(rbuf->fmp);
1789 1.1 msaitoh rbuf->fmp = NULL;
1790 1.1 msaitoh rbuf->buf = NULL; /* rbuf->buf is part of fmp's chain */
1791 1.1 msaitoh } else if (rbuf->buf) {
1792 1.1 msaitoh m_free(rbuf->buf);
1793 1.1 msaitoh rbuf->buf = NULL;
1794 1.1 msaitoh }
1795 1.4 msaitoh ixgbe_dmamap_unload(rxr->ptag, rbuf->pmap);
1796 1.1 msaitoh
1797 1.1 msaitoh rbuf->flags = 0;
1798 1.1 msaitoh
1799 1.1 msaitoh return;
1800 1.1 msaitoh }
1801 1.1 msaitoh
1802 1.1 msaitoh
1803 1.1 msaitoh /*********************************************************************
1804 1.1 msaitoh *
1805 1.1 msaitoh * This routine executes in interrupt context. It replenishes
1806 1.1 msaitoh * the mbufs in the descriptor and sends data which has been
1807 1.1 msaitoh * dma'ed into host memory to upper layer.
1808 1.1 msaitoh *
1809 1.1 msaitoh * Return TRUE for more work, FALSE for all clean.
1810 1.1 msaitoh *********************************************************************/
1811 1.1 msaitoh bool
1812 1.1 msaitoh ixgbe_rxeof(struct ix_queue *que)
1813 1.1 msaitoh {
1814 1.1 msaitoh struct adapter *adapter = que->adapter;
1815 1.1 msaitoh struct rx_ring *rxr = que->rxr;
1816 1.1 msaitoh struct ifnet *ifp = adapter->ifp;
1817 1.1 msaitoh #ifdef LRO
1818 1.1 msaitoh struct lro_ctrl *lro = &rxr->lro;
1819 1.1 msaitoh struct lro_entry *queued;
1820 1.1 msaitoh #endif /* LRO */
1821 1.1 msaitoh int i, nextp, processed = 0;
1822 1.1 msaitoh u32 staterr = 0;
1823 1.1 msaitoh u16 count = rxr->process_limit;
1824 1.1 msaitoh union ixgbe_adv_rx_desc *cur;
1825 1.1 msaitoh struct ixgbe_rx_buf *rbuf, *nbuf;
1826 1.1 msaitoh #ifdef RSS
1827 1.1 msaitoh u16 pkt_info;
1828 1.1 msaitoh #endif
1829 1.1 msaitoh
1830 1.1 msaitoh IXGBE_RX_LOCK(rxr);
1831 1.1 msaitoh
1832 1.1 msaitoh #ifdef DEV_NETMAP
1833 1.1 msaitoh /* Same as the txeof routine: wakeup clients on intr. */
1834 1.1 msaitoh if (netmap_rx_irq(ifp, rxr->me, &processed)) {
1835 1.1 msaitoh IXGBE_RX_UNLOCK(rxr);
1836 1.1 msaitoh return (FALSE);
1837 1.1 msaitoh }
1838 1.1 msaitoh #endif /* DEV_NETMAP */
1839 1.1 msaitoh
1840 1.1 msaitoh for (i = rxr->next_to_check; count != 0;) {
1841 1.1 msaitoh struct mbuf *sendmp, *mp;
1842 1.1 msaitoh u32 rsc, ptype;
1843 1.1 msaitoh u16 len;
1844 1.1 msaitoh u16 vtag = 0;
1845 1.1 msaitoh bool eop;
1846 1.1 msaitoh
1847 1.1 msaitoh /* Sync the ring. */
1848 1.1 msaitoh ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
1849 1.1 msaitoh BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1850 1.1 msaitoh
1851 1.1 msaitoh cur = &rxr->rx_base[i];
1852 1.1 msaitoh staterr = le32toh(cur->wb.upper.status_error);
1853 1.1 msaitoh #ifdef RSS
1854 1.1 msaitoh pkt_info = le16toh(cur->wb.lower.lo_dword.hs_rss.pkt_info);
1855 1.1 msaitoh #endif
1856 1.1 msaitoh
1857 1.1 msaitoh if ((staterr & IXGBE_RXD_STAT_DD) == 0)
1858 1.1 msaitoh break;
1859 1.1 msaitoh if ((ifp->if_flags & IFF_RUNNING) == 0)
1860 1.1 msaitoh break;
1861 1.1 msaitoh
1862 1.1 msaitoh count--;
1863 1.1 msaitoh sendmp = NULL;
1864 1.1 msaitoh nbuf = NULL;
1865 1.1 msaitoh rsc = 0;
1866 1.1 msaitoh cur->wb.upper.status_error = 0;
1867 1.1 msaitoh rbuf = &rxr->rx_buffers[i];
1868 1.1 msaitoh mp = rbuf->buf;
1869 1.1 msaitoh
1870 1.1 msaitoh len = le16toh(cur->wb.upper.length);
1871 1.1 msaitoh ptype = le32toh(cur->wb.lower.lo_dword.data) &
1872 1.1 msaitoh IXGBE_RXDADV_PKTTYPE_MASK;
1873 1.1 msaitoh eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
1874 1.1 msaitoh
1875 1.1 msaitoh /* Make sure bad packets are discarded */
1876 1.1 msaitoh if (eop && (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) {
1877 1.3 msaitoh #if __FreeBSD_version >= 1100036
1878 1.4 msaitoh if (IXGBE_IS_VF(adapter))
1879 1.4 msaitoh if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1880 1.3 msaitoh #endif
1881 1.1 msaitoh rxr->rx_discarded.ev_count++;
1882 1.1 msaitoh ixgbe_rx_discard(rxr, i);
1883 1.1 msaitoh goto next_desc;
1884 1.1 msaitoh }
1885 1.1 msaitoh
1886 1.1 msaitoh /*
1887 1.1 msaitoh ** On 82599 which supports a hardware
1888 1.1 msaitoh ** LRO (called HW RSC), packets need
1889 1.1 msaitoh ** not be fragmented across sequential
1890 1.1 msaitoh ** descriptors, rather the next descriptor
1891 1.1 msaitoh ** is indicated in bits of the descriptor.
1892 1.1 msaitoh ** This also means that we might proceses
1893 1.1 msaitoh ** more than one packet at a time, something
1894 1.1 msaitoh ** that has never been true before, it
1895 1.1 msaitoh ** required eliminating global chain pointers
1896 1.1 msaitoh ** in favor of what we are doing here. -jfv
1897 1.1 msaitoh */
1898 1.1 msaitoh if (!eop) {
1899 1.1 msaitoh /*
1900 1.1 msaitoh ** Figure out the next descriptor
1901 1.1 msaitoh ** of this frame.
1902 1.1 msaitoh */
1903 1.1 msaitoh if (rxr->hw_rsc == TRUE) {
1904 1.1 msaitoh rsc = ixgbe_rsc_count(cur);
1905 1.1 msaitoh rxr->rsc_num += (rsc - 1);
1906 1.1 msaitoh }
1907 1.1 msaitoh if (rsc) { /* Get hardware index */
1908 1.1 msaitoh nextp = ((staterr &
1909 1.1 msaitoh IXGBE_RXDADV_NEXTP_MASK) >>
1910 1.1 msaitoh IXGBE_RXDADV_NEXTP_SHIFT);
1911 1.1 msaitoh } else { /* Just sequential */
1912 1.1 msaitoh nextp = i + 1;
1913 1.1 msaitoh if (nextp == adapter->num_rx_desc)
1914 1.1 msaitoh nextp = 0;
1915 1.1 msaitoh }
1916 1.1 msaitoh nbuf = &rxr->rx_buffers[nextp];
1917 1.1 msaitoh prefetch(nbuf);
1918 1.1 msaitoh }
1919 1.1 msaitoh /*
1920 1.1 msaitoh ** Rather than using the fmp/lmp global pointers
1921 1.1 msaitoh ** we now keep the head of a packet chain in the
1922 1.1 msaitoh ** buffer struct and pass this along from one
1923 1.1 msaitoh ** descriptor to the next, until we get EOP.
1924 1.1 msaitoh */
1925 1.1 msaitoh mp->m_len = len;
1926 1.1 msaitoh /*
1927 1.1 msaitoh ** See if there is a stored head
1928 1.1 msaitoh ** that determines what we are
1929 1.1 msaitoh */
1930 1.1 msaitoh sendmp = rbuf->fmp;
1931 1.1 msaitoh if (sendmp != NULL) { /* secondary frag */
1932 1.1 msaitoh rbuf->buf = rbuf->fmp = NULL;
1933 1.1 msaitoh mp->m_flags &= ~M_PKTHDR;
1934 1.1 msaitoh sendmp->m_pkthdr.len += mp->m_len;
1935 1.1 msaitoh } else {
1936 1.1 msaitoh /*
1937 1.1 msaitoh * Optimize. This might be a small packet,
1938 1.1 msaitoh * maybe just a TCP ACK. Do a fast copy that
1939 1.1 msaitoh * is cache aligned into a new mbuf, and
1940 1.1 msaitoh * leave the old mbuf+cluster for re-use.
1941 1.1 msaitoh */
1942 1.1 msaitoh if (eop && len <= IXGBE_RX_COPY_LEN) {
1943 1.1 msaitoh sendmp = m_gethdr(M_NOWAIT, MT_DATA);
1944 1.1 msaitoh if (sendmp != NULL) {
1945 1.1 msaitoh sendmp->m_data +=
1946 1.1 msaitoh IXGBE_RX_COPY_ALIGN;
1947 1.1 msaitoh ixgbe_bcopy(mp->m_data,
1948 1.1 msaitoh sendmp->m_data, len);
1949 1.1 msaitoh sendmp->m_len = len;
1950 1.1 msaitoh rxr->rx_copies.ev_count++;
1951 1.1 msaitoh rbuf->flags |= IXGBE_RX_COPY;
1952 1.1 msaitoh }
1953 1.1 msaitoh }
1954 1.1 msaitoh if (sendmp == NULL) {
1955 1.1 msaitoh rbuf->buf = rbuf->fmp = NULL;
1956 1.1 msaitoh sendmp = mp;
1957 1.1 msaitoh }
1958 1.1 msaitoh
1959 1.1 msaitoh /* first desc of a non-ps chain */
1960 1.1 msaitoh sendmp->m_flags |= M_PKTHDR;
1961 1.1 msaitoh sendmp->m_pkthdr.len = mp->m_len;
1962 1.1 msaitoh }
1963 1.1 msaitoh ++processed;
1964 1.1 msaitoh
1965 1.1 msaitoh /* Pass the head pointer on */
1966 1.1 msaitoh if (eop == 0) {
1967 1.1 msaitoh nbuf->fmp = sendmp;
1968 1.1 msaitoh sendmp = NULL;
1969 1.1 msaitoh mp->m_next = nbuf->buf;
1970 1.1 msaitoh } else { /* Sending this frame */
1971 1.1 msaitoh m_set_rcvif(sendmp, ifp);
1972 1.1 msaitoh ifp->if_ipackets++;
1973 1.1 msaitoh rxr->rx_packets.ev_count++;
1974 1.1 msaitoh /* capture data for AIM */
1975 1.1 msaitoh rxr->bytes += sendmp->m_pkthdr.len;
1976 1.1 msaitoh rxr->rx_bytes.ev_count += sendmp->m_pkthdr.len;
1977 1.1 msaitoh /* Process vlan info */
1978 1.1 msaitoh if ((rxr->vtag_strip) &&
1979 1.1 msaitoh (staterr & IXGBE_RXD_STAT_VP))
1980 1.1 msaitoh vtag = le16toh(cur->wb.upper.vlan);
1981 1.1 msaitoh if (vtag) {
1982 1.1 msaitoh VLAN_INPUT_TAG(ifp, sendmp, vtag,
1983 1.1 msaitoh printf("%s: could not apply VLAN "
1984 1.1 msaitoh "tag", __func__));
1985 1.1 msaitoh }
1986 1.1 msaitoh if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1987 1.1 msaitoh ixgbe_rx_checksum(staterr, sendmp, ptype,
1988 1.3 msaitoh &adapter->stats.pf);
1989 1.1 msaitoh }
1990 1.1 msaitoh #if __FreeBSD_version >= 800000
1991 1.1 msaitoh #ifdef RSS
1992 1.1 msaitoh sendmp->m_pkthdr.flowid =
1993 1.1 msaitoh le32toh(cur->wb.lower.hi_dword.rss);
1994 1.4 msaitoh #if __FreeBSD_version < 1100054
1995 1.4 msaitoh sendmp->m_flags |= M_FLOWID;
1996 1.4 msaitoh #endif
1997 1.1 msaitoh switch (pkt_info & IXGBE_RXDADV_RSSTYPE_MASK) {
1998 1.1 msaitoh case IXGBE_RXDADV_RSSTYPE_IPV4_TCP:
1999 1.1 msaitoh M_HASHTYPE_SET(sendmp, M_HASHTYPE_RSS_TCP_IPV4);
2000 1.1 msaitoh break;
2001 1.1 msaitoh case IXGBE_RXDADV_RSSTYPE_IPV4:
2002 1.1 msaitoh M_HASHTYPE_SET(sendmp, M_HASHTYPE_RSS_IPV4);
2003 1.1 msaitoh break;
2004 1.1 msaitoh case IXGBE_RXDADV_RSSTYPE_IPV6_TCP:
2005 1.1 msaitoh M_HASHTYPE_SET(sendmp, M_HASHTYPE_RSS_TCP_IPV6);
2006 1.1 msaitoh break;
2007 1.1 msaitoh case IXGBE_RXDADV_RSSTYPE_IPV6_EX:
2008 1.1 msaitoh M_HASHTYPE_SET(sendmp, M_HASHTYPE_RSS_IPV6_EX);
2009 1.1 msaitoh break;
2010 1.1 msaitoh case IXGBE_RXDADV_RSSTYPE_IPV6:
2011 1.1 msaitoh M_HASHTYPE_SET(sendmp, M_HASHTYPE_RSS_IPV6);
2012 1.1 msaitoh break;
2013 1.1 msaitoh case IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX:
2014 1.1 msaitoh M_HASHTYPE_SET(sendmp, M_HASHTYPE_RSS_TCP_IPV6_EX);
2015 1.1 msaitoh break;
2016 1.1 msaitoh case IXGBE_RXDADV_RSSTYPE_IPV4_UDP:
2017 1.1 msaitoh M_HASHTYPE_SET(sendmp, M_HASHTYPE_RSS_UDP_IPV4);
2018 1.1 msaitoh break;
2019 1.1 msaitoh case IXGBE_RXDADV_RSSTYPE_IPV6_UDP:
2020 1.1 msaitoh M_HASHTYPE_SET(sendmp, M_HASHTYPE_RSS_UDP_IPV6);
2021 1.1 msaitoh break;
2022 1.1 msaitoh case IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX:
2023 1.1 msaitoh M_HASHTYPE_SET(sendmp, M_HASHTYPE_RSS_UDP_IPV6_EX);
2024 1.1 msaitoh break;
2025 1.1 msaitoh default:
2026 1.1 msaitoh M_HASHTYPE_SET(sendmp, M_HASHTYPE_OPAQUE);
2027 1.1 msaitoh }
2028 1.1 msaitoh #else /* RSS */
2029 1.1 msaitoh sendmp->m_pkthdr.flowid = que->msix;
2030 1.4 msaitoh #if __FreeBSD_version >= 1100054
2031 1.1 msaitoh M_HASHTYPE_SET(sendmp, M_HASHTYPE_OPAQUE);
2032 1.4 msaitoh #else
2033 1.4 msaitoh sendmp->m_flags |= M_FLOWID;
2034 1.4 msaitoh #endif
2035 1.1 msaitoh #endif /* RSS */
2036 1.1 msaitoh #endif /* FreeBSD_version */
2037 1.1 msaitoh }
2038 1.1 msaitoh next_desc:
2039 1.1 msaitoh ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2040 1.1 msaitoh BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2041 1.1 msaitoh
2042 1.1 msaitoh /* Advance our pointers to the next descriptor. */
2043 1.1 msaitoh if (++i == rxr->num_desc)
2044 1.1 msaitoh i = 0;
2045 1.1 msaitoh
2046 1.1 msaitoh /* Now send to the stack or do LRO */
2047 1.1 msaitoh if (sendmp != NULL) {
2048 1.1 msaitoh rxr->next_to_check = i;
2049 1.1 msaitoh ixgbe_rx_input(rxr, ifp, sendmp, ptype);
2050 1.1 msaitoh i = rxr->next_to_check;
2051 1.1 msaitoh }
2052 1.1 msaitoh
2053 1.1 msaitoh /* Every 8 descriptors we go to refresh mbufs */
2054 1.1 msaitoh if (processed == 8) {
2055 1.1 msaitoh ixgbe_refresh_mbufs(rxr, i);
2056 1.1 msaitoh processed = 0;
2057 1.1 msaitoh }
2058 1.1 msaitoh }
2059 1.1 msaitoh
2060 1.1 msaitoh /* Refresh any remaining buf structs */
2061 1.1 msaitoh if (ixgbe_rx_unrefreshed(rxr))
2062 1.1 msaitoh ixgbe_refresh_mbufs(rxr, i);
2063 1.1 msaitoh
2064 1.1 msaitoh rxr->next_to_check = i;
2065 1.1 msaitoh
2066 1.1 msaitoh #ifdef LRO
2067 1.1 msaitoh /*
2068 1.1 msaitoh * Flush any outstanding LRO work
2069 1.1 msaitoh */
2070 1.1 msaitoh while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
2071 1.1 msaitoh SLIST_REMOVE_HEAD(&lro->lro_active, next);
2072 1.1 msaitoh tcp_lro_flush(lro, queued);
2073 1.1 msaitoh }
2074 1.1 msaitoh #endif /* LRO */
2075 1.1 msaitoh
2076 1.1 msaitoh IXGBE_RX_UNLOCK(rxr);
2077 1.1 msaitoh
2078 1.1 msaitoh /*
2079 1.1 msaitoh ** Still have cleaning to do?
2080 1.1 msaitoh */
2081 1.1 msaitoh if ((staterr & IXGBE_RXD_STAT_DD) != 0)
2082 1.1 msaitoh return true;
2083 1.1 msaitoh else
2084 1.1 msaitoh return false;
2085 1.1 msaitoh }
2086 1.1 msaitoh
2087 1.1 msaitoh
2088 1.1 msaitoh /*********************************************************************
2089 1.1 msaitoh *
2090 1.1 msaitoh * Verify that the hardware indicated that the checksum is valid.
2091 1.1 msaitoh * Inform the stack about the status of checksum so that stack
2092 1.1 msaitoh * doesn't spend time verifying the checksum.
2093 1.1 msaitoh *
2094 1.1 msaitoh *********************************************************************/
2095 1.1 msaitoh static void
2096 1.1 msaitoh ixgbe_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype,
2097 1.1 msaitoh struct ixgbe_hw_stats *stats)
2098 1.1 msaitoh {
2099 1.1 msaitoh u16 status = (u16) staterr;
2100 1.1 msaitoh u8 errors = (u8) (staterr >> 24);
2101 1.1 msaitoh #if 0
2102 1.1 msaitoh bool sctp = FALSE;
2103 1.1 msaitoh
2104 1.1 msaitoh if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
2105 1.1 msaitoh (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
2106 1.1 msaitoh sctp = TRUE;
2107 1.1 msaitoh #endif
2108 1.1 msaitoh
2109 1.1 msaitoh if (status & IXGBE_RXD_STAT_IPCS) {
2110 1.1 msaitoh stats->ipcs.ev_count++;
2111 1.1 msaitoh if (!(errors & IXGBE_RXD_ERR_IPE)) {
2112 1.1 msaitoh /* IP Checksum Good */
2113 1.1 msaitoh mp->m_pkthdr.csum_flags = M_CSUM_IPv4;
2114 1.1 msaitoh
2115 1.1 msaitoh } else {
2116 1.1 msaitoh stats->ipcs_bad.ev_count++;
2117 1.1 msaitoh mp->m_pkthdr.csum_flags = M_CSUM_IPv4|M_CSUM_IPv4_BAD;
2118 1.1 msaitoh }
2119 1.1 msaitoh }
2120 1.1 msaitoh if (status & IXGBE_RXD_STAT_L4CS) {
2121 1.1 msaitoh stats->l4cs.ev_count++;
2122 1.1 msaitoh int type = M_CSUM_TCPv4|M_CSUM_TCPv6|M_CSUM_UDPv4|M_CSUM_UDPv6;
2123 1.1 msaitoh if (!(errors & IXGBE_RXD_ERR_TCPE)) {
2124 1.1 msaitoh mp->m_pkthdr.csum_flags |= type;
2125 1.1 msaitoh } else {
2126 1.1 msaitoh stats->l4cs_bad.ev_count++;
2127 1.1 msaitoh mp->m_pkthdr.csum_flags |= type | M_CSUM_TCP_UDP_BAD;
2128 1.1 msaitoh }
2129 1.1 msaitoh }
2130 1.1 msaitoh return;
2131 1.1 msaitoh }
2132 1.1 msaitoh
2133 1.1 msaitoh
2134 1.1 msaitoh /********************************************************************
2135 1.1 msaitoh * Manage DMA'able memory.
2136 1.1 msaitoh *******************************************************************/
2137 1.1 msaitoh
2138 1.1 msaitoh int
2139 1.1 msaitoh ixgbe_dma_malloc(struct adapter *adapter, const bus_size_t size,
2140 1.1 msaitoh struct ixgbe_dma_alloc *dma, const int mapflags)
2141 1.1 msaitoh {
2142 1.1 msaitoh device_t dev = adapter->dev;
2143 1.1 msaitoh int r, rsegs;
2144 1.1 msaitoh
2145 1.1 msaitoh r = ixgbe_dma_tag_create(adapter->osdep.dmat, /* parent */
2146 1.1 msaitoh DBA_ALIGN, 0, /* alignment, bounds */
2147 1.1 msaitoh size, /* maxsize */
2148 1.1 msaitoh 1, /* nsegments */
2149 1.1 msaitoh size, /* maxsegsize */
2150 1.1 msaitoh BUS_DMA_ALLOCNOW, /* flags */
2151 1.1 msaitoh &dma->dma_tag);
2152 1.1 msaitoh if (r != 0) {
2153 1.1 msaitoh aprint_error_dev(dev,
2154 1.1 msaitoh "%s: ixgbe_dma_tag_create failed; error %d\n", __func__, r);
2155 1.1 msaitoh goto fail_0;
2156 1.1 msaitoh }
2157 1.1 msaitoh
2158 1.1 msaitoh r = bus_dmamem_alloc(dma->dma_tag->dt_dmat,
2159 1.1 msaitoh size,
2160 1.1 msaitoh dma->dma_tag->dt_alignment,
2161 1.1 msaitoh dma->dma_tag->dt_boundary,
2162 1.1 msaitoh &dma->dma_seg, 1, &rsegs, BUS_DMA_NOWAIT);
2163 1.1 msaitoh if (r != 0) {
2164 1.1 msaitoh aprint_error_dev(dev,
2165 1.1 msaitoh "%s: bus_dmamem_alloc failed; error %d\n", __func__, r);
2166 1.1 msaitoh goto fail_1;
2167 1.1 msaitoh }
2168 1.1 msaitoh
2169 1.1 msaitoh r = bus_dmamem_map(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs,
2170 1.1 msaitoh size, &dma->dma_vaddr, BUS_DMA_NOWAIT);
2171 1.1 msaitoh if (r != 0) {
2172 1.1 msaitoh aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n",
2173 1.1 msaitoh __func__, r);
2174 1.1 msaitoh goto fail_2;
2175 1.1 msaitoh }
2176 1.1 msaitoh
2177 1.1 msaitoh r = ixgbe_dmamap_create(dma->dma_tag, 0, &dma->dma_map);
2178 1.1 msaitoh if (r != 0) {
2179 1.1 msaitoh aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n",
2180 1.1 msaitoh __func__, r);
2181 1.1 msaitoh goto fail_3;
2182 1.1 msaitoh }
2183 1.1 msaitoh
2184 1.1 msaitoh r = bus_dmamap_load(dma->dma_tag->dt_dmat, dma->dma_map, dma->dma_vaddr,
2185 1.1 msaitoh size,
2186 1.1 msaitoh NULL,
2187 1.1 msaitoh mapflags | BUS_DMA_NOWAIT);
2188 1.1 msaitoh if (r != 0) {
2189 1.1 msaitoh aprint_error_dev(dev, "%s: bus_dmamap_load failed; error %d\n",
2190 1.1 msaitoh __func__, r);
2191 1.1 msaitoh goto fail_4;
2192 1.1 msaitoh }
2193 1.1 msaitoh dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr;
2194 1.1 msaitoh dma->dma_size = size;
2195 1.1 msaitoh return 0;
2196 1.1 msaitoh fail_4:
2197 1.1 msaitoh ixgbe_dmamap_destroy(dma->dma_tag, dma->dma_map);
2198 1.1 msaitoh fail_3:
2199 1.1 msaitoh bus_dmamem_unmap(dma->dma_tag->dt_dmat, dma->dma_vaddr, size);
2200 1.1 msaitoh fail_2:
2201 1.1 msaitoh bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs);
2202 1.1 msaitoh fail_1:
2203 1.1 msaitoh ixgbe_dma_tag_destroy(dma->dma_tag);
2204 1.1 msaitoh fail_0:
2205 1.1 msaitoh return r;
2206 1.1 msaitoh }
2207 1.1 msaitoh
2208 1.3 msaitoh void
2209 1.1 msaitoh ixgbe_dma_free(struct adapter *adapter, struct ixgbe_dma_alloc *dma)
2210 1.1 msaitoh {
2211 1.1 msaitoh bus_dmamap_sync(dma->dma_tag->dt_dmat, dma->dma_map, 0, dma->dma_size,
2212 1.1 msaitoh BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2213 1.1 msaitoh ixgbe_dmamap_unload(dma->dma_tag, dma->dma_map);
2214 1.1 msaitoh bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, 1);
2215 1.1 msaitoh ixgbe_dma_tag_destroy(dma->dma_tag);
2216 1.1 msaitoh }
2217 1.1 msaitoh
2218 1.1 msaitoh
2219 1.1 msaitoh /*********************************************************************
2220 1.1 msaitoh *
2221 1.1 msaitoh * Allocate memory for the transmit and receive rings, and then
2222 1.1 msaitoh * the descriptors associated with each, called only once at attach.
2223 1.1 msaitoh *
2224 1.1 msaitoh **********************************************************************/
2225 1.1 msaitoh int
2226 1.1 msaitoh ixgbe_allocate_queues(struct adapter *adapter)
2227 1.1 msaitoh {
2228 1.1 msaitoh device_t dev = adapter->dev;
2229 1.1 msaitoh struct ix_queue *que;
2230 1.1 msaitoh struct tx_ring *txr;
2231 1.1 msaitoh struct rx_ring *rxr;
2232 1.1 msaitoh int rsize, tsize, error = IXGBE_SUCCESS;
2233 1.1 msaitoh int txconf = 0, rxconf = 0;
2234 1.1 msaitoh
2235 1.1 msaitoh /* First allocate the top level queue structs */
2236 1.1 msaitoh if (!(adapter->queues =
2237 1.1 msaitoh (struct ix_queue *) malloc(sizeof(struct ix_queue) *
2238 1.1 msaitoh adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2239 1.1 msaitoh aprint_error_dev(dev, "Unable to allocate queue memory\n");
2240 1.1 msaitoh error = ENOMEM;
2241 1.1 msaitoh goto fail;
2242 1.1 msaitoh }
2243 1.1 msaitoh
2244 1.1 msaitoh /* First allocate the TX ring struct memory */
2245 1.1 msaitoh if (!(adapter->tx_rings =
2246 1.1 msaitoh (struct tx_ring *) malloc(sizeof(struct tx_ring) *
2247 1.1 msaitoh adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2248 1.1 msaitoh aprint_error_dev(dev, "Unable to allocate TX ring memory\n");
2249 1.1 msaitoh error = ENOMEM;
2250 1.1 msaitoh goto tx_fail;
2251 1.1 msaitoh }
2252 1.1 msaitoh
2253 1.1 msaitoh /* Next allocate the RX */
2254 1.1 msaitoh if (!(adapter->rx_rings =
2255 1.1 msaitoh (struct rx_ring *) malloc(sizeof(struct rx_ring) *
2256 1.1 msaitoh adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2257 1.1 msaitoh aprint_error_dev(dev, "Unable to allocate RX ring memory\n");
2258 1.1 msaitoh error = ENOMEM;
2259 1.1 msaitoh goto rx_fail;
2260 1.1 msaitoh }
2261 1.1 msaitoh
2262 1.1 msaitoh /* For the ring itself */
2263 1.1 msaitoh tsize = roundup2(adapter->num_tx_desc *
2264 1.1 msaitoh sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
2265 1.1 msaitoh
2266 1.1 msaitoh /*
2267 1.1 msaitoh * Now set up the TX queues, txconf is needed to handle the
2268 1.1 msaitoh * possibility that things fail midcourse and we need to
2269 1.1 msaitoh * undo memory gracefully
2270 1.1 msaitoh */
2271 1.1 msaitoh for (int i = 0; i < adapter->num_queues; i++, txconf++) {
2272 1.1 msaitoh /* Set up some basics */
2273 1.1 msaitoh txr = &adapter->tx_rings[i];
2274 1.1 msaitoh txr->adapter = adapter;
2275 1.1 msaitoh txr->me = i;
2276 1.1 msaitoh txr->num_desc = adapter->num_tx_desc;
2277 1.1 msaitoh
2278 1.1 msaitoh /* Initialize the TX side lock */
2279 1.1 msaitoh snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2280 1.1 msaitoh device_xname(dev), txr->me);
2281 1.1 msaitoh mutex_init(&txr->tx_mtx, MUTEX_DEFAULT, IPL_NET);
2282 1.1 msaitoh
2283 1.1 msaitoh if (ixgbe_dma_malloc(adapter, tsize,
2284 1.1 msaitoh &txr->txdma, BUS_DMA_NOWAIT)) {
2285 1.1 msaitoh aprint_error_dev(dev,
2286 1.1 msaitoh "Unable to allocate TX Descriptor memory\n");
2287 1.1 msaitoh error = ENOMEM;
2288 1.1 msaitoh goto err_tx_desc;
2289 1.1 msaitoh }
2290 1.1 msaitoh txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
2291 1.1 msaitoh bzero((void *)txr->tx_base, tsize);
2292 1.1 msaitoh
2293 1.1 msaitoh /* Now allocate transmit buffers for the ring */
2294 1.1 msaitoh if (ixgbe_allocate_transmit_buffers(txr)) {
2295 1.1 msaitoh aprint_error_dev(dev,
2296 1.1 msaitoh "Critical Failure setting up transmit buffers\n");
2297 1.1 msaitoh error = ENOMEM;
2298 1.1 msaitoh goto err_tx_desc;
2299 1.1 msaitoh }
2300 1.1 msaitoh #ifndef IXGBE_LEGACY_TX
2301 1.1 msaitoh /* Allocate a buf ring */
2302 1.1 msaitoh txr->br = buf_ring_alloc(IXGBE_BR_SIZE, M_DEVBUF,
2303 1.1 msaitoh M_WAITOK, &txr->tx_mtx);
2304 1.1 msaitoh if (txr->br == NULL) {
2305 1.1 msaitoh aprint_error_dev(dev,
2306 1.1 msaitoh "Critical Failure setting up buf ring\n");
2307 1.1 msaitoh error = ENOMEM;
2308 1.1 msaitoh goto err_tx_desc;
2309 1.1 msaitoh }
2310 1.1 msaitoh #endif
2311 1.1 msaitoh }
2312 1.1 msaitoh
2313 1.1 msaitoh /*
2314 1.1 msaitoh * Next the RX queues...
2315 1.1 msaitoh */
2316 1.1 msaitoh rsize = roundup2(adapter->num_rx_desc *
2317 1.1 msaitoh sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
2318 1.1 msaitoh for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
2319 1.1 msaitoh rxr = &adapter->rx_rings[i];
2320 1.1 msaitoh /* Set up some basics */
2321 1.1 msaitoh rxr->adapter = adapter;
2322 1.1 msaitoh rxr->me = i;
2323 1.1 msaitoh rxr->num_desc = adapter->num_rx_desc;
2324 1.1 msaitoh
2325 1.1 msaitoh /* Initialize the RX side lock */
2326 1.1 msaitoh snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2327 1.1 msaitoh device_xname(dev), rxr->me);
2328 1.1 msaitoh mutex_init(&rxr->rx_mtx, MUTEX_DEFAULT, IPL_NET);
2329 1.1 msaitoh
2330 1.1 msaitoh if (ixgbe_dma_malloc(adapter, rsize,
2331 1.1 msaitoh &rxr->rxdma, BUS_DMA_NOWAIT)) {
2332 1.1 msaitoh aprint_error_dev(dev,
2333 1.1 msaitoh "Unable to allocate RxDescriptor memory\n");
2334 1.1 msaitoh error = ENOMEM;
2335 1.1 msaitoh goto err_rx_desc;
2336 1.1 msaitoh }
2337 1.1 msaitoh rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
2338 1.1 msaitoh bzero((void *)rxr->rx_base, rsize);
2339 1.1 msaitoh
2340 1.1 msaitoh /* Allocate receive buffers for the ring*/
2341 1.1 msaitoh if (ixgbe_allocate_receive_buffers(rxr)) {
2342 1.1 msaitoh aprint_error_dev(dev,
2343 1.1 msaitoh "Critical Failure setting up receive buffers\n");
2344 1.1 msaitoh error = ENOMEM;
2345 1.1 msaitoh goto err_rx_desc;
2346 1.1 msaitoh }
2347 1.1 msaitoh }
2348 1.1 msaitoh
2349 1.1 msaitoh /*
2350 1.1 msaitoh ** Finally set up the queue holding structs
2351 1.1 msaitoh */
2352 1.1 msaitoh for (int i = 0; i < adapter->num_queues; i++) {
2353 1.1 msaitoh que = &adapter->queues[i];
2354 1.1 msaitoh que->adapter = adapter;
2355 1.3 msaitoh que->me = i;
2356 1.1 msaitoh que->txr = &adapter->tx_rings[i];
2357 1.1 msaitoh que->rxr = &adapter->rx_rings[i];
2358 1.1 msaitoh }
2359 1.1 msaitoh
2360 1.1 msaitoh return (0);
2361 1.1 msaitoh
2362 1.1 msaitoh err_rx_desc:
2363 1.1 msaitoh for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
2364 1.1 msaitoh ixgbe_dma_free(adapter, &rxr->rxdma);
2365 1.1 msaitoh err_tx_desc:
2366 1.1 msaitoh for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
2367 1.1 msaitoh ixgbe_dma_free(adapter, &txr->txdma);
2368 1.1 msaitoh free(adapter->rx_rings, M_DEVBUF);
2369 1.1 msaitoh rx_fail:
2370 1.1 msaitoh free(adapter->tx_rings, M_DEVBUF);
2371 1.1 msaitoh tx_fail:
2372 1.1 msaitoh free(adapter->queues, M_DEVBUF);
2373 1.1 msaitoh fail:
2374 1.1 msaitoh return (error);
2375 1.1 msaitoh }
2376 1.1 msaitoh
2377