ix_txrx.c revision 1.43 1 1.43 msaitoh /* $NetBSD: ix_txrx.c,v 1.43 2018/05/16 07:51:17 msaitoh Exp $ */
2 1.28 msaitoh
3 1.1 msaitoh /******************************************************************************
4 1.1 msaitoh
5 1.28 msaitoh Copyright (c) 2001-2017, Intel Corporation
6 1.1 msaitoh All rights reserved.
7 1.28 msaitoh
8 1.28 msaitoh Redistribution and use in source and binary forms, with or without
9 1.1 msaitoh modification, are permitted provided that the following conditions are met:
10 1.28 msaitoh
11 1.28 msaitoh 1. Redistributions of source code must retain the above copyright notice,
12 1.1 msaitoh this list of conditions and the following disclaimer.
13 1.28 msaitoh
14 1.28 msaitoh 2. Redistributions in binary form must reproduce the above copyright
15 1.28 msaitoh notice, this list of conditions and the following disclaimer in the
16 1.1 msaitoh documentation and/or other materials provided with the distribution.
17 1.28 msaitoh
18 1.28 msaitoh 3. Neither the name of the Intel Corporation nor the names of its
19 1.28 msaitoh contributors may be used to endorse or promote products derived from
20 1.1 msaitoh this software without specific prior written permission.
21 1.28 msaitoh
22 1.1 msaitoh THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 1.28 msaitoh AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 1.28 msaitoh IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 1.28 msaitoh ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 1.28 msaitoh LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 1.28 msaitoh CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 1.28 msaitoh SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 1.28 msaitoh INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 1.28 msaitoh CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 1.1 msaitoh ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 1.1 msaitoh POSSIBILITY OF SUCH DAMAGE.
33 1.1 msaitoh
34 1.1 msaitoh ******************************************************************************/
35 1.39 msaitoh /*$FreeBSD: head/sys/dev/ixgbe/ix_txrx.c 327031 2017-12-20 18:15:06Z erj $*/
36 1.28 msaitoh
37 1.1 msaitoh /*
38 1.1 msaitoh * Copyright (c) 2011 The NetBSD Foundation, Inc.
39 1.1 msaitoh * All rights reserved.
40 1.1 msaitoh *
41 1.1 msaitoh * This code is derived from software contributed to The NetBSD Foundation
42 1.1 msaitoh * by Coyote Point Systems, Inc.
43 1.1 msaitoh *
44 1.1 msaitoh * Redistribution and use in source and binary forms, with or without
45 1.1 msaitoh * modification, are permitted provided that the following conditions
46 1.1 msaitoh * are met:
47 1.1 msaitoh * 1. Redistributions of source code must retain the above copyright
48 1.1 msaitoh * notice, this list of conditions and the following disclaimer.
49 1.1 msaitoh * 2. Redistributions in binary form must reproduce the above copyright
50 1.1 msaitoh * notice, this list of conditions and the following disclaimer in the
51 1.1 msaitoh * documentation and/or other materials provided with the distribution.
52 1.1 msaitoh *
53 1.1 msaitoh * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
54 1.1 msaitoh * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55 1.1 msaitoh * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
56 1.1 msaitoh * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
57 1.1 msaitoh * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
58 1.1 msaitoh * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
59 1.1 msaitoh * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 1.1 msaitoh * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
61 1.1 msaitoh * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
62 1.1 msaitoh * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 1.1 msaitoh * POSSIBILITY OF SUCH DAMAGE.
64 1.1 msaitoh */
65 1.1 msaitoh
66 1.8 msaitoh #include "opt_inet.h"
67 1.8 msaitoh #include "opt_inet6.h"
68 1.8 msaitoh
69 1.1 msaitoh #include "ixgbe.h"
70 1.1 msaitoh
71 1.1 msaitoh /*
72 1.28 msaitoh * HW RSC control:
73 1.28 msaitoh * this feature only works with
74 1.28 msaitoh * IPv4, and only on 82599 and later.
75 1.28 msaitoh * Also this will cause IP forwarding to
76 1.28 msaitoh * fail and that can't be controlled by
77 1.28 msaitoh * the stack as LRO can. For all these
78 1.28 msaitoh * reasons I've deemed it best to leave
79 1.28 msaitoh * this off and not bother with a tuneable
80 1.28 msaitoh * interface, this would need to be compiled
81 1.28 msaitoh * to enable.
82 1.28 msaitoh */
83 1.1 msaitoh static bool ixgbe_rsc_enable = FALSE;
84 1.1 msaitoh
85 1.3 msaitoh /*
86 1.28 msaitoh * For Flow Director: this is the
87 1.28 msaitoh * number of TX packets we sample
88 1.28 msaitoh * for the filter pool, this means
89 1.28 msaitoh * every 20th packet will be probed.
90 1.28 msaitoh *
91 1.28 msaitoh * This feature can be disabled by
92 1.28 msaitoh * setting this to 0.
93 1.28 msaitoh */
94 1.3 msaitoh static int atr_sample_rate = 20;
95 1.3 msaitoh
96 1.28 msaitoh /************************************************************************
97 1.3 msaitoh * Local Function prototypes
98 1.28 msaitoh ************************************************************************/
99 1.28 msaitoh static void ixgbe_setup_transmit_ring(struct tx_ring *);
100 1.28 msaitoh static void ixgbe_free_transmit_buffers(struct tx_ring *);
101 1.28 msaitoh static int ixgbe_setup_receive_ring(struct rx_ring *);
102 1.28 msaitoh static void ixgbe_free_receive_buffers(struct rx_ring *);
103 1.28 msaitoh static void ixgbe_rx_checksum(u32, struct mbuf *, u32,
104 1.28 msaitoh struct ixgbe_hw_stats *);
105 1.28 msaitoh static void ixgbe_refresh_mbufs(struct rx_ring *, int);
106 1.38 knakahar static void ixgbe_drain(struct ifnet *, struct tx_ring *);
107 1.28 msaitoh static int ixgbe_xmit(struct tx_ring *, struct mbuf *);
108 1.28 msaitoh static int ixgbe_tx_ctx_setup(struct tx_ring *,
109 1.28 msaitoh struct mbuf *, u32 *, u32 *);
110 1.28 msaitoh static int ixgbe_tso_setup(struct tx_ring *,
111 1.28 msaitoh struct mbuf *, u32 *, u32 *);
112 1.1 msaitoh static __inline void ixgbe_rx_discard(struct rx_ring *, int);
113 1.1 msaitoh static __inline void ixgbe_rx_input(struct rx_ring *, struct ifnet *,
114 1.28 msaitoh struct mbuf *, u32);
115 1.28 msaitoh static int ixgbe_dma_malloc(struct adapter *, bus_size_t,
116 1.28 msaitoh struct ixgbe_dma_alloc *, int);
117 1.28 msaitoh static void ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *);
118 1.1 msaitoh
119 1.1 msaitoh static void ixgbe_setup_hw_rsc(struct rx_ring *);
120 1.1 msaitoh
121 1.28 msaitoh /************************************************************************
122 1.28 msaitoh * ixgbe_legacy_start_locked - Transmit entry point
123 1.1 msaitoh *
124 1.28 msaitoh * Called by the stack to initiate a transmit.
125 1.28 msaitoh * The driver will remain in this routine as long as there are
126 1.28 msaitoh * packets to transmit and transmit resources are available.
127 1.28 msaitoh * In case resources are not available, the stack is notified
128 1.28 msaitoh * and the packet is requeued.
129 1.28 msaitoh ************************************************************************/
130 1.28 msaitoh int
131 1.28 msaitoh ixgbe_legacy_start_locked(struct ifnet *ifp, struct tx_ring *txr)
132 1.1 msaitoh {
133 1.1 msaitoh struct mbuf *m_head;
134 1.1 msaitoh struct adapter *adapter = txr->adapter;
135 1.42 msaitoh int enqueued = 0;
136 1.42 msaitoh int rc;
137 1.1 msaitoh
138 1.1 msaitoh IXGBE_TX_LOCK_ASSERT(txr);
139 1.1 msaitoh
140 1.38 knakahar if (!adapter->link_active) {
141 1.38 knakahar /*
142 1.38 knakahar * discard all packets buffered in IFQ to avoid
143 1.38 knakahar * sending old packets at next link up timing.
144 1.38 knakahar */
145 1.38 knakahar ixgbe_drain(ifp, txr);
146 1.38 knakahar return (ENETDOWN);
147 1.38 knakahar }
148 1.1 msaitoh if ((ifp->if_flags & IFF_RUNNING) == 0)
149 1.28 msaitoh return (ENETDOWN);
150 1.1 msaitoh
151 1.1 msaitoh while (!IFQ_IS_EMPTY(&ifp->if_snd)) {
152 1.1 msaitoh if (txr->tx_avail <= IXGBE_QUEUE_MIN_FREE)
153 1.1 msaitoh break;
154 1.1 msaitoh
155 1.1 msaitoh IFQ_POLL(&ifp->if_snd, m_head);
156 1.1 msaitoh if (m_head == NULL)
157 1.1 msaitoh break;
158 1.1 msaitoh
159 1.1 msaitoh if ((rc = ixgbe_xmit(txr, m_head)) == EAGAIN) {
160 1.1 msaitoh break;
161 1.1 msaitoh }
162 1.42 msaitoh enqueued++;
163 1.1 msaitoh IFQ_DEQUEUE(&ifp->if_snd, m_head);
164 1.1 msaitoh if (rc != 0) {
165 1.1 msaitoh m_freem(m_head);
166 1.1 msaitoh continue;
167 1.1 msaitoh }
168 1.1 msaitoh
169 1.1 msaitoh /* Send a copy of the frame to the BPF listener */
170 1.1 msaitoh bpf_mtap(ifp, m_head);
171 1.1 msaitoh }
172 1.42 msaitoh if (enqueued) {
173 1.42 msaitoh txr->lastsent = time_uptime;
174 1.42 msaitoh txr->sending = true;
175 1.42 msaitoh }
176 1.1 msaitoh
177 1.28 msaitoh return IXGBE_SUCCESS;
178 1.28 msaitoh } /* ixgbe_legacy_start_locked */
179 1.28 msaitoh
180 1.28 msaitoh /************************************************************************
181 1.28 msaitoh * ixgbe_legacy_start
182 1.28 msaitoh *
183 1.28 msaitoh * Called by the stack, this always uses the first tx ring,
184 1.28 msaitoh * and should not be used with multiqueue tx enabled.
185 1.28 msaitoh ************************************************************************/
186 1.1 msaitoh void
187 1.28 msaitoh ixgbe_legacy_start(struct ifnet *ifp)
188 1.1 msaitoh {
189 1.1 msaitoh struct adapter *adapter = ifp->if_softc;
190 1.28 msaitoh struct tx_ring *txr = adapter->tx_rings;
191 1.1 msaitoh
192 1.1 msaitoh if (ifp->if_flags & IFF_RUNNING) {
193 1.1 msaitoh IXGBE_TX_LOCK(txr);
194 1.28 msaitoh ixgbe_legacy_start_locked(ifp, txr);
195 1.1 msaitoh IXGBE_TX_UNLOCK(txr);
196 1.1 msaitoh }
197 1.28 msaitoh } /* ixgbe_legacy_start */
198 1.1 msaitoh
199 1.28 msaitoh /************************************************************************
200 1.28 msaitoh * ixgbe_mq_start - Multiqueue Transmit Entry Point
201 1.28 msaitoh *
202 1.28 msaitoh * (if_transmit function)
203 1.28 msaitoh ************************************************************************/
204 1.1 msaitoh int
205 1.1 msaitoh ixgbe_mq_start(struct ifnet *ifp, struct mbuf *m)
206 1.1 msaitoh {
207 1.1 msaitoh struct adapter *adapter = ifp->if_softc;
208 1.1 msaitoh struct tx_ring *txr;
209 1.1 msaitoh int i, err = 0;
210 1.28 msaitoh #ifdef RSS
211 1.1 msaitoh uint32_t bucket_id;
212 1.1 msaitoh #endif
213 1.1 msaitoh
214 1.1 msaitoh /*
215 1.1 msaitoh * When doing RSS, map it to the same outbound queue
216 1.1 msaitoh * as the incoming flow would be mapped to.
217 1.1 msaitoh *
218 1.1 msaitoh * If everything is setup correctly, it should be the
219 1.1 msaitoh * same bucket that the current CPU we're on is.
220 1.1 msaitoh */
221 1.28 msaitoh #ifdef RSS
222 1.1 msaitoh if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
223 1.28 msaitoh if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
224 1.28 msaitoh (rss_hash2bucket(m->m_pkthdr.flowid, M_HASHTYPE_GET(m),
225 1.28 msaitoh &bucket_id) == 0)) {
226 1.1 msaitoh i = bucket_id % adapter->num_queues;
227 1.8 msaitoh #ifdef IXGBE_DEBUG
228 1.8 msaitoh if (bucket_id > adapter->num_queues)
229 1.28 msaitoh if_printf(ifp,
230 1.28 msaitoh "bucket_id (%d) > num_queues (%d)\n",
231 1.28 msaitoh bucket_id, adapter->num_queues);
232 1.8 msaitoh #endif
233 1.8 msaitoh } else
234 1.1 msaitoh i = m->m_pkthdr.flowid % adapter->num_queues;
235 1.3 msaitoh } else
236 1.28 msaitoh #endif /* 0 */
237 1.18 msaitoh i = cpu_index(curcpu()) % adapter->num_queues;
238 1.3 msaitoh
239 1.3 msaitoh /* Check for a hung queue and pick alternative */
240 1.3 msaitoh if (((1 << i) & adapter->active_queues) == 0)
241 1.18 msaitoh i = ffs64(adapter->active_queues);
242 1.1 msaitoh
243 1.1 msaitoh txr = &adapter->tx_rings[i];
244 1.1 msaitoh
245 1.18 msaitoh err = pcq_put(txr->txr_interq, m);
246 1.18 msaitoh if (err == false) {
247 1.18 msaitoh m_freem(m);
248 1.18 msaitoh txr->pcq_drops.ev_count++;
249 1.1 msaitoh return (err);
250 1.18 msaitoh }
251 1.1 msaitoh if (IXGBE_TX_TRYLOCK(txr)) {
252 1.1 msaitoh ixgbe_mq_start_locked(ifp, txr);
253 1.1 msaitoh IXGBE_TX_UNLOCK(txr);
254 1.34 knakahar } else {
255 1.34 knakahar if (adapter->txrx_use_workqueue) {
256 1.34 knakahar /*
257 1.34 knakahar * This function itself is not called in interrupt
258 1.34 knakahar * context, however it can be called in fast softint
259 1.34 knakahar * context right after receiving forwarding packets.
260 1.34 knakahar * So, it is required to protect workqueue from twice
261 1.34 knakahar * enqueuing when the machine uses both spontaneous
262 1.34 knakahar * packets and forwarding packets.
263 1.34 knakahar */
264 1.34 knakahar u_int *enqueued = percpu_getref(adapter->txr_wq_enqueued);
265 1.34 knakahar if (*enqueued == 0) {
266 1.34 knakahar *enqueued = 1;
267 1.34 knakahar percpu_putref(adapter->txr_wq_enqueued);
268 1.34 knakahar workqueue_enqueue(adapter->txr_wq, &txr->wq_cookie, curcpu());
269 1.34 knakahar } else
270 1.34 knakahar percpu_putref(adapter->txr_wq_enqueued);
271 1.34 knakahar } else
272 1.34 knakahar softint_schedule(txr->txr_si);
273 1.34 knakahar }
274 1.1 msaitoh
275 1.1 msaitoh return (0);
276 1.28 msaitoh } /* ixgbe_mq_start */
277 1.1 msaitoh
278 1.28 msaitoh /************************************************************************
279 1.28 msaitoh * ixgbe_mq_start_locked
280 1.28 msaitoh ************************************************************************/
281 1.1 msaitoh int
282 1.1 msaitoh ixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr)
283 1.1 msaitoh {
284 1.28 msaitoh struct mbuf *next;
285 1.28 msaitoh int enqueued = 0, err = 0;
286 1.1 msaitoh
287 1.38 knakahar if (!txr->adapter->link_active) {
288 1.38 knakahar /*
289 1.38 knakahar * discard all packets buffered in txr_interq to avoid
290 1.38 knakahar * sending old packets at next link up timing.
291 1.38 knakahar */
292 1.38 knakahar ixgbe_drain(ifp, txr);
293 1.38 knakahar return (ENETDOWN);
294 1.38 knakahar }
295 1.28 msaitoh if ((ifp->if_flags & IFF_RUNNING) == 0)
296 1.28 msaitoh return (ENETDOWN);
297 1.1 msaitoh
298 1.1 msaitoh /* Process the queue */
299 1.18 msaitoh while ((next = pcq_get(txr->txr_interq)) != NULL) {
300 1.18 msaitoh if ((err = ixgbe_xmit(txr, next)) != 0) {
301 1.18 msaitoh m_freem(next);
302 1.18 msaitoh /* All errors are counted in ixgbe_xmit() */
303 1.1 msaitoh break;
304 1.1 msaitoh }
305 1.1 msaitoh enqueued++;
306 1.3 msaitoh #if __FreeBSD_version >= 1100036
307 1.4 msaitoh /*
308 1.4 msaitoh * Since we're looking at the tx ring, we can check
309 1.4 msaitoh * to see if we're a VF by examing our tail register
310 1.4 msaitoh * address.
311 1.4 msaitoh */
312 1.28 msaitoh if ((txr->adapter->feat_en & IXGBE_FEATURE_VF) &&
313 1.28 msaitoh (next->m_flags & M_MCAST))
314 1.3 msaitoh if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
315 1.3 msaitoh #endif
316 1.1 msaitoh /* Send a copy of the frame to the BPF listener */
317 1.1 msaitoh bpf_mtap(ifp, next);
318 1.1 msaitoh if ((ifp->if_flags & IFF_RUNNING) == 0)
319 1.1 msaitoh break;
320 1.1 msaitoh }
321 1.1 msaitoh
322 1.42 msaitoh if (enqueued) {
323 1.42 msaitoh txr->lastsent = time_uptime;
324 1.42 msaitoh txr->sending = true;
325 1.42 msaitoh }
326 1.42 msaitoh
327 1.28 msaitoh if (txr->tx_avail < IXGBE_TX_CLEANUP_THRESHOLD(txr->adapter))
328 1.1 msaitoh ixgbe_txeof(txr);
329 1.1 msaitoh
330 1.1 msaitoh return (err);
331 1.28 msaitoh } /* ixgbe_mq_start_locked */
332 1.1 msaitoh
333 1.28 msaitoh /************************************************************************
334 1.28 msaitoh * ixgbe_deferred_mq_start
335 1.28 msaitoh *
336 1.34 knakahar * Called from a softint and workqueue (indirectly) to drain queued
337 1.34 knakahar * transmit packets.
338 1.28 msaitoh ************************************************************************/
339 1.1 msaitoh void
340 1.18 msaitoh ixgbe_deferred_mq_start(void *arg)
341 1.1 msaitoh {
342 1.1 msaitoh struct tx_ring *txr = arg;
343 1.1 msaitoh struct adapter *adapter = txr->adapter;
344 1.28 msaitoh struct ifnet *ifp = adapter->ifp;
345 1.1 msaitoh
346 1.1 msaitoh IXGBE_TX_LOCK(txr);
347 1.18 msaitoh if (pcq_peek(txr->txr_interq) != NULL)
348 1.1 msaitoh ixgbe_mq_start_locked(ifp, txr);
349 1.1 msaitoh IXGBE_TX_UNLOCK(txr);
350 1.28 msaitoh } /* ixgbe_deferred_mq_start */
351 1.3 msaitoh
352 1.28 msaitoh /************************************************************************
353 1.34 knakahar * ixgbe_deferred_mq_start_work
354 1.34 knakahar *
355 1.34 knakahar * Called from a workqueue to drain queued transmit packets.
356 1.34 knakahar ************************************************************************/
357 1.34 knakahar void
358 1.34 knakahar ixgbe_deferred_mq_start_work(struct work *wk, void *arg)
359 1.34 knakahar {
360 1.34 knakahar struct tx_ring *txr = container_of(wk, struct tx_ring, wq_cookie);
361 1.34 knakahar struct adapter *adapter = txr->adapter;
362 1.34 knakahar u_int *enqueued = percpu_getref(adapter->txr_wq_enqueued);
363 1.34 knakahar *enqueued = 0;
364 1.34 knakahar percpu_putref(adapter->txr_wq_enqueued);
365 1.34 knakahar
366 1.34 knakahar ixgbe_deferred_mq_start(txr);
367 1.34 knakahar } /* ixgbe_deferred_mq_start */
368 1.34 knakahar
369 1.38 knakahar /************************************************************************
370 1.38 knakahar * ixgbe_drain_all
371 1.38 knakahar ************************************************************************/
372 1.38 knakahar void
373 1.38 knakahar ixgbe_drain_all(struct adapter *adapter)
374 1.38 knakahar {
375 1.38 knakahar struct ifnet *ifp = adapter->ifp;
376 1.38 knakahar struct ix_queue *que = adapter->queues;
377 1.38 knakahar
378 1.38 knakahar for (int i = 0; i < adapter->num_queues; i++, que++) {
379 1.38 knakahar struct tx_ring *txr = que->txr;
380 1.38 knakahar
381 1.38 knakahar IXGBE_TX_LOCK(txr);
382 1.38 knakahar ixgbe_drain(ifp, txr);
383 1.38 knakahar IXGBE_TX_UNLOCK(txr);
384 1.38 knakahar }
385 1.38 knakahar }
386 1.34 knakahar
387 1.34 knakahar /************************************************************************
388 1.28 msaitoh * ixgbe_xmit
389 1.1 msaitoh *
390 1.28 msaitoh * Maps the mbufs to tx descriptors, allowing the
391 1.28 msaitoh * TX engine to transmit the packets.
392 1.1 msaitoh *
393 1.28 msaitoh * Return 0 on success, positive on failure
394 1.28 msaitoh ************************************************************************/
395 1.1 msaitoh static int
396 1.1 msaitoh ixgbe_xmit(struct tx_ring *txr, struct mbuf *m_head)
397 1.1 msaitoh {
398 1.28 msaitoh struct adapter *adapter = txr->adapter;
399 1.28 msaitoh struct ixgbe_tx_buf *txbuf;
400 1.1 msaitoh union ixgbe_adv_tx_desc *txd = NULL;
401 1.28 msaitoh struct ifnet *ifp = adapter->ifp;
402 1.28 msaitoh int i, j, error;
403 1.28 msaitoh int first;
404 1.28 msaitoh u32 olinfo_status = 0, cmd_type_len;
405 1.28 msaitoh bool remap = TRUE;
406 1.28 msaitoh bus_dmamap_t map;
407 1.1 msaitoh
408 1.1 msaitoh /* Basic descriptor defines */
409 1.28 msaitoh cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
410 1.1 msaitoh IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
411 1.1 msaitoh
412 1.29 knakahar if (vlan_has_tag(m_head))
413 1.28 msaitoh cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
414 1.1 msaitoh
415 1.28 msaitoh /*
416 1.28 msaitoh * Important to capture the first descriptor
417 1.28 msaitoh * used because it will contain the index of
418 1.28 msaitoh * the one we tell the hardware to report back
419 1.28 msaitoh */
420 1.28 msaitoh first = txr->next_avail_desc;
421 1.1 msaitoh txbuf = &txr->tx_buffers[first];
422 1.1 msaitoh map = txbuf->map;
423 1.1 msaitoh
424 1.1 msaitoh /*
425 1.1 msaitoh * Map the packet for DMA.
426 1.1 msaitoh */
427 1.22 msaitoh retry:
428 1.28 msaitoh error = bus_dmamap_load_mbuf(txr->txtag->dt_dmat, map, m_head,
429 1.28 msaitoh BUS_DMA_NOWAIT);
430 1.1 msaitoh
431 1.1 msaitoh if (__predict_false(error)) {
432 1.22 msaitoh struct mbuf *m;
433 1.1 msaitoh
434 1.1 msaitoh switch (error) {
435 1.1 msaitoh case EAGAIN:
436 1.35 msaitoh txr->q_eagain_tx_dma_setup++;
437 1.1 msaitoh return EAGAIN;
438 1.1 msaitoh case ENOMEM:
439 1.35 msaitoh txr->q_enomem_tx_dma_setup++;
440 1.1 msaitoh return EAGAIN;
441 1.1 msaitoh case EFBIG:
442 1.22 msaitoh /* Try it again? - one try */
443 1.22 msaitoh if (remap == TRUE) {
444 1.22 msaitoh remap = FALSE;
445 1.22 msaitoh /*
446 1.22 msaitoh * XXX: m_defrag will choke on
447 1.22 msaitoh * non-MCLBYTES-sized clusters
448 1.22 msaitoh */
449 1.35 msaitoh txr->q_efbig_tx_dma_setup++;
450 1.22 msaitoh m = m_defrag(m_head, M_NOWAIT);
451 1.22 msaitoh if (m == NULL) {
452 1.35 msaitoh txr->q_mbuf_defrag_failed++;
453 1.22 msaitoh return ENOBUFS;
454 1.22 msaitoh }
455 1.22 msaitoh m_head = m;
456 1.22 msaitoh goto retry;
457 1.22 msaitoh } else {
458 1.35 msaitoh txr->q_efbig2_tx_dma_setup++;
459 1.22 msaitoh return error;
460 1.22 msaitoh }
461 1.1 msaitoh case EINVAL:
462 1.35 msaitoh txr->q_einval_tx_dma_setup++;
463 1.1 msaitoh return error;
464 1.1 msaitoh default:
465 1.35 msaitoh txr->q_other_tx_dma_setup++;
466 1.1 msaitoh return error;
467 1.1 msaitoh }
468 1.1 msaitoh }
469 1.1 msaitoh
470 1.1 msaitoh /* Make certain there are enough descriptors */
471 1.10 msaitoh if (txr->tx_avail < (map->dm_nsegs + 2)) {
472 1.1 msaitoh txr->no_desc_avail.ev_count++;
473 1.1 msaitoh ixgbe_dmamap_unload(txr->txtag, txbuf->map);
474 1.1 msaitoh return EAGAIN;
475 1.1 msaitoh }
476 1.1 msaitoh
477 1.1 msaitoh /*
478 1.4 msaitoh * Set up the appropriate offload context
479 1.4 msaitoh * this will consume the first descriptor
480 1.4 msaitoh */
481 1.1 msaitoh error = ixgbe_tx_ctx_setup(txr, m_head, &cmd_type_len, &olinfo_status);
482 1.1 msaitoh if (__predict_false(error)) {
483 1.1 msaitoh return (error);
484 1.1 msaitoh }
485 1.1 msaitoh
486 1.1 msaitoh /* Do the flow director magic */
487 1.28 msaitoh if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
488 1.28 msaitoh (txr->atr_sample) && (!adapter->fdir_reinit)) {
489 1.1 msaitoh ++txr->atr_count;
490 1.1 msaitoh if (txr->atr_count >= atr_sample_rate) {
491 1.1 msaitoh ixgbe_atr(txr, m_head);
492 1.1 msaitoh txr->atr_count = 0;
493 1.1 msaitoh }
494 1.1 msaitoh }
495 1.1 msaitoh
496 1.8 msaitoh olinfo_status |= IXGBE_ADVTXD_CC;
497 1.1 msaitoh i = txr->next_avail_desc;
498 1.1 msaitoh for (j = 0; j < map->dm_nsegs; j++) {
499 1.1 msaitoh bus_size_t seglen;
500 1.1 msaitoh bus_addr_t segaddr;
501 1.1 msaitoh
502 1.1 msaitoh txbuf = &txr->tx_buffers[i];
503 1.1 msaitoh txd = &txr->tx_base[i];
504 1.1 msaitoh seglen = map->dm_segs[j].ds_len;
505 1.1 msaitoh segaddr = htole64(map->dm_segs[j].ds_addr);
506 1.1 msaitoh
507 1.1 msaitoh txd->read.buffer_addr = segaddr;
508 1.40 msaitoh txd->read.cmd_type_len = htole32(cmd_type_len | seglen);
509 1.1 msaitoh txd->read.olinfo_status = htole32(olinfo_status);
510 1.1 msaitoh
511 1.1 msaitoh if (++i == txr->num_desc)
512 1.1 msaitoh i = 0;
513 1.1 msaitoh }
514 1.1 msaitoh
515 1.28 msaitoh txd->read.cmd_type_len |= htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
516 1.1 msaitoh txr->tx_avail -= map->dm_nsegs;
517 1.1 msaitoh txr->next_avail_desc = i;
518 1.1 msaitoh
519 1.1 msaitoh txbuf->m_head = m_head;
520 1.1 msaitoh /*
521 1.4 msaitoh * Here we swap the map so the last descriptor,
522 1.4 msaitoh * which gets the completion interrupt has the
523 1.4 msaitoh * real map, and the first descriptor gets the
524 1.4 msaitoh * unused map from this descriptor.
525 1.4 msaitoh */
526 1.1 msaitoh txr->tx_buffers[first].map = txbuf->map;
527 1.1 msaitoh txbuf->map = map;
528 1.1 msaitoh bus_dmamap_sync(txr->txtag->dt_dmat, map, 0, m_head->m_pkthdr.len,
529 1.1 msaitoh BUS_DMASYNC_PREWRITE);
530 1.1 msaitoh
531 1.28 msaitoh /* Set the EOP descriptor that will be marked done */
532 1.28 msaitoh txbuf = &txr->tx_buffers[first];
533 1.1 msaitoh txbuf->eop = txd;
534 1.1 msaitoh
535 1.28 msaitoh ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
536 1.1 msaitoh BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
537 1.1 msaitoh /*
538 1.1 msaitoh * Advance the Transmit Descriptor Tail (Tdt), this tells the
539 1.1 msaitoh * hardware that this frame is available to transmit.
540 1.1 msaitoh */
541 1.1 msaitoh ++txr->total_packets.ev_count;
542 1.3 msaitoh IXGBE_WRITE_REG(&adapter->hw, txr->tail, i);
543 1.3 msaitoh
544 1.23 msaitoh /*
545 1.23 msaitoh * XXXX NOMPSAFE: ifp->if_data should be percpu.
546 1.23 msaitoh */
547 1.23 msaitoh ifp->if_obytes += m_head->m_pkthdr.len;
548 1.23 msaitoh if (m_head->m_flags & M_MCAST)
549 1.23 msaitoh ifp->if_omcasts++;
550 1.23 msaitoh
551 1.28 msaitoh return (0);
552 1.28 msaitoh } /* ixgbe_xmit */
553 1.1 msaitoh
554 1.38 knakahar /************************************************************************
555 1.38 knakahar * ixgbe_drain
556 1.38 knakahar ************************************************************************/
557 1.38 knakahar static void
558 1.38 knakahar ixgbe_drain(struct ifnet *ifp, struct tx_ring *txr)
559 1.38 knakahar {
560 1.38 knakahar struct mbuf *m;
561 1.38 knakahar
562 1.38 knakahar IXGBE_TX_LOCK_ASSERT(txr);
563 1.38 knakahar
564 1.38 knakahar if (txr->me == 0) {
565 1.38 knakahar while (!IFQ_IS_EMPTY(&ifp->if_snd)) {
566 1.38 knakahar IFQ_DEQUEUE(&ifp->if_snd, m);
567 1.38 knakahar m_freem(m);
568 1.38 knakahar IF_DROP(&ifp->if_snd);
569 1.38 knakahar }
570 1.38 knakahar }
571 1.38 knakahar
572 1.38 knakahar while ((m = pcq_get(txr->txr_interq)) != NULL) {
573 1.38 knakahar m_freem(m);
574 1.38 knakahar txr->pcq_drops.ev_count++;
575 1.38 knakahar }
576 1.38 knakahar }
577 1.16 msaitoh
578 1.28 msaitoh /************************************************************************
579 1.28 msaitoh * ixgbe_allocate_transmit_buffers
580 1.1 msaitoh *
581 1.28 msaitoh * Allocate memory for tx_buffer structures. The tx_buffer stores all
582 1.28 msaitoh * the information needed to transmit a packet on the wire. This is
583 1.28 msaitoh * called only once at attach, setup is done every reset.
584 1.28 msaitoh ************************************************************************/
585 1.28 msaitoh static int
586 1.1 msaitoh ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
587 1.1 msaitoh {
588 1.28 msaitoh struct adapter *adapter = txr->adapter;
589 1.28 msaitoh device_t dev = adapter->dev;
590 1.1 msaitoh struct ixgbe_tx_buf *txbuf;
591 1.28 msaitoh int error, i;
592 1.1 msaitoh
593 1.1 msaitoh /*
594 1.1 msaitoh * Setup DMA descriptor areas.
595 1.1 msaitoh */
596 1.28 msaitoh error = ixgbe_dma_tag_create(
597 1.28 msaitoh /* parent */ adapter->osdep.dmat,
598 1.28 msaitoh /* alignment */ 1,
599 1.28 msaitoh /* bounds */ 0,
600 1.28 msaitoh /* maxsize */ IXGBE_TSO_SIZE,
601 1.28 msaitoh /* nsegments */ adapter->num_segs,
602 1.28 msaitoh /* maxsegsize */ PAGE_SIZE,
603 1.28 msaitoh /* flags */ 0,
604 1.28 msaitoh &txr->txtag);
605 1.28 msaitoh if (error != 0) {
606 1.1 msaitoh aprint_error_dev(dev,"Unable to allocate TX DMA tag\n");
607 1.1 msaitoh goto fail;
608 1.1 msaitoh }
609 1.1 msaitoh
610 1.28 msaitoh txr->tx_buffers =
611 1.1 msaitoh (struct ixgbe_tx_buf *) malloc(sizeof(struct ixgbe_tx_buf) *
612 1.28 msaitoh adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
613 1.28 msaitoh if (txr->tx_buffers == NULL) {
614 1.1 msaitoh aprint_error_dev(dev, "Unable to allocate tx_buffer memory\n");
615 1.1 msaitoh error = ENOMEM;
616 1.1 msaitoh goto fail;
617 1.1 msaitoh }
618 1.1 msaitoh
619 1.28 msaitoh /* Create the descriptor buffer dma maps */
620 1.1 msaitoh txbuf = txr->tx_buffers;
621 1.1 msaitoh for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
622 1.1 msaitoh error = ixgbe_dmamap_create(txr->txtag, 0, &txbuf->map);
623 1.1 msaitoh if (error != 0) {
624 1.1 msaitoh aprint_error_dev(dev,
625 1.1 msaitoh "Unable to create TX DMA map (%d)\n", error);
626 1.1 msaitoh goto fail;
627 1.1 msaitoh }
628 1.1 msaitoh }
629 1.1 msaitoh
630 1.1 msaitoh return 0;
631 1.1 msaitoh fail:
632 1.1 msaitoh /* We free all, it handles case where we are in the middle */
633 1.15 msaitoh #if 0 /* XXX was FreeBSD */
634 1.1 msaitoh ixgbe_free_transmit_structures(adapter);
635 1.15 msaitoh #else
636 1.15 msaitoh ixgbe_free_transmit_buffers(txr);
637 1.15 msaitoh #endif
638 1.1 msaitoh return (error);
639 1.28 msaitoh } /* ixgbe_allocate_transmit_buffers */
640 1.1 msaitoh
641 1.28 msaitoh /************************************************************************
642 1.28 msaitoh * ixgbe_setup_transmit_ring - Initialize a transmit ring.
643 1.28 msaitoh ************************************************************************/
644 1.1 msaitoh static void
645 1.1 msaitoh ixgbe_setup_transmit_ring(struct tx_ring *txr)
646 1.1 msaitoh {
647 1.28 msaitoh struct adapter *adapter = txr->adapter;
648 1.28 msaitoh struct ixgbe_tx_buf *txbuf;
649 1.1 msaitoh #ifdef DEV_NETMAP
650 1.1 msaitoh struct netmap_adapter *na = NA(adapter->ifp);
651 1.28 msaitoh struct netmap_slot *slot;
652 1.1 msaitoh #endif /* DEV_NETMAP */
653 1.1 msaitoh
654 1.1 msaitoh /* Clear the old ring contents */
655 1.1 msaitoh IXGBE_TX_LOCK(txr);
656 1.28 msaitoh
657 1.1 msaitoh #ifdef DEV_NETMAP
658 1.28 msaitoh if (adapter->feat_en & IXGBE_FEATURE_NETMAP) {
659 1.28 msaitoh /*
660 1.28 msaitoh * (under lock): if in netmap mode, do some consistency
661 1.28 msaitoh * checks and set slot to entry 0 of the netmap ring.
662 1.28 msaitoh */
663 1.28 msaitoh slot = netmap_reset(na, NR_TX, txr->me, 0);
664 1.28 msaitoh }
665 1.1 msaitoh #endif /* DEV_NETMAP */
666 1.28 msaitoh
667 1.1 msaitoh bzero((void *)txr->tx_base,
668 1.28 msaitoh (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
669 1.1 msaitoh /* Reset indices */
670 1.1 msaitoh txr->next_avail_desc = 0;
671 1.1 msaitoh txr->next_to_clean = 0;
672 1.1 msaitoh
673 1.1 msaitoh /* Free any existing tx buffers. */
674 1.28 msaitoh txbuf = txr->tx_buffers;
675 1.5 msaitoh for (int i = 0; i < txr->num_desc; i++, txbuf++) {
676 1.42 msaitoh txr->sending = false;
677 1.1 msaitoh if (txbuf->m_head != NULL) {
678 1.1 msaitoh bus_dmamap_sync(txr->txtag->dt_dmat, txbuf->map,
679 1.1 msaitoh 0, txbuf->m_head->m_pkthdr.len,
680 1.1 msaitoh BUS_DMASYNC_POSTWRITE);
681 1.1 msaitoh ixgbe_dmamap_unload(txr->txtag, txbuf->map);
682 1.1 msaitoh m_freem(txbuf->m_head);
683 1.1 msaitoh txbuf->m_head = NULL;
684 1.1 msaitoh }
685 1.28 msaitoh
686 1.1 msaitoh #ifdef DEV_NETMAP
687 1.1 msaitoh /*
688 1.1 msaitoh * In netmap mode, set the map for the packet buffer.
689 1.1 msaitoh * NOTE: Some drivers (not this one) also need to set
690 1.1 msaitoh * the physical buffer address in the NIC ring.
691 1.1 msaitoh * Slots in the netmap ring (indexed by "si") are
692 1.1 msaitoh * kring->nkr_hwofs positions "ahead" wrt the
693 1.1 msaitoh * corresponding slot in the NIC ring. In some drivers
694 1.1 msaitoh * (not here) nkr_hwofs can be negative. Function
695 1.1 msaitoh * netmap_idx_n2k() handles wraparounds properly.
696 1.1 msaitoh */
697 1.28 msaitoh if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && slot) {
698 1.1 msaitoh int si = netmap_idx_n2k(&na->tx_rings[txr->me], i);
699 1.5 msaitoh netmap_load_map(na, txr->txtag,
700 1.5 msaitoh txbuf->map, NMB(na, slot + si));
701 1.1 msaitoh }
702 1.1 msaitoh #endif /* DEV_NETMAP */
703 1.28 msaitoh
704 1.1 msaitoh /* Clear the EOP descriptor pointer */
705 1.1 msaitoh txbuf->eop = NULL;
706 1.28 msaitoh }
707 1.1 msaitoh
708 1.1 msaitoh /* Set the rate at which we sample packets */
709 1.28 msaitoh if (adapter->feat_en & IXGBE_FEATURE_FDIR)
710 1.1 msaitoh txr->atr_sample = atr_sample_rate;
711 1.1 msaitoh
712 1.1 msaitoh /* Set number of descriptors available */
713 1.1 msaitoh txr->tx_avail = adapter->num_tx_desc;
714 1.1 msaitoh
715 1.1 msaitoh ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
716 1.1 msaitoh BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
717 1.1 msaitoh IXGBE_TX_UNLOCK(txr);
718 1.28 msaitoh } /* ixgbe_setup_transmit_ring */
719 1.1 msaitoh
720 1.28 msaitoh /************************************************************************
721 1.28 msaitoh * ixgbe_setup_transmit_structures - Initialize all transmit rings.
722 1.28 msaitoh ************************************************************************/
723 1.1 msaitoh int
724 1.1 msaitoh ixgbe_setup_transmit_structures(struct adapter *adapter)
725 1.1 msaitoh {
726 1.1 msaitoh struct tx_ring *txr = adapter->tx_rings;
727 1.1 msaitoh
728 1.1 msaitoh for (int i = 0; i < adapter->num_queues; i++, txr++)
729 1.1 msaitoh ixgbe_setup_transmit_ring(txr);
730 1.1 msaitoh
731 1.1 msaitoh return (0);
732 1.28 msaitoh } /* ixgbe_setup_transmit_structures */
733 1.1 msaitoh
734 1.28 msaitoh /************************************************************************
735 1.28 msaitoh * ixgbe_free_transmit_structures - Free all transmit rings.
736 1.28 msaitoh ************************************************************************/
737 1.1 msaitoh void
738 1.1 msaitoh ixgbe_free_transmit_structures(struct adapter *adapter)
739 1.1 msaitoh {
740 1.1 msaitoh struct tx_ring *txr = adapter->tx_rings;
741 1.1 msaitoh
742 1.1 msaitoh for (int i = 0; i < adapter->num_queues; i++, txr++) {
743 1.1 msaitoh ixgbe_free_transmit_buffers(txr);
744 1.1 msaitoh ixgbe_dma_free(adapter, &txr->txdma);
745 1.1 msaitoh IXGBE_TX_LOCK_DESTROY(txr);
746 1.1 msaitoh }
747 1.1 msaitoh free(adapter->tx_rings, M_DEVBUF);
748 1.28 msaitoh } /* ixgbe_free_transmit_structures */
749 1.1 msaitoh
750 1.28 msaitoh /************************************************************************
751 1.28 msaitoh * ixgbe_free_transmit_buffers
752 1.1 msaitoh *
753 1.28 msaitoh * Free transmit ring related data structures.
754 1.28 msaitoh ************************************************************************/
755 1.1 msaitoh static void
756 1.1 msaitoh ixgbe_free_transmit_buffers(struct tx_ring *txr)
757 1.1 msaitoh {
758 1.28 msaitoh struct adapter *adapter = txr->adapter;
759 1.1 msaitoh struct ixgbe_tx_buf *tx_buffer;
760 1.28 msaitoh int i;
761 1.1 msaitoh
762 1.14 msaitoh INIT_DEBUGOUT("ixgbe_free_transmit_buffers: begin");
763 1.1 msaitoh
764 1.1 msaitoh if (txr->tx_buffers == NULL)
765 1.1 msaitoh return;
766 1.1 msaitoh
767 1.1 msaitoh tx_buffer = txr->tx_buffers;
768 1.1 msaitoh for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
769 1.1 msaitoh if (tx_buffer->m_head != NULL) {
770 1.1 msaitoh bus_dmamap_sync(txr->txtag->dt_dmat, tx_buffer->map,
771 1.1 msaitoh 0, tx_buffer->m_head->m_pkthdr.len,
772 1.1 msaitoh BUS_DMASYNC_POSTWRITE);
773 1.1 msaitoh ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
774 1.1 msaitoh m_freem(tx_buffer->m_head);
775 1.1 msaitoh tx_buffer->m_head = NULL;
776 1.1 msaitoh if (tx_buffer->map != NULL) {
777 1.1 msaitoh ixgbe_dmamap_destroy(txr->txtag,
778 1.1 msaitoh tx_buffer->map);
779 1.1 msaitoh tx_buffer->map = NULL;
780 1.1 msaitoh }
781 1.1 msaitoh } else if (tx_buffer->map != NULL) {
782 1.1 msaitoh ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
783 1.1 msaitoh ixgbe_dmamap_destroy(txr->txtag, tx_buffer->map);
784 1.1 msaitoh tx_buffer->map = NULL;
785 1.1 msaitoh }
786 1.1 msaitoh }
787 1.18 msaitoh if (txr->txr_interq != NULL) {
788 1.18 msaitoh struct mbuf *m;
789 1.18 msaitoh
790 1.18 msaitoh while ((m = pcq_get(txr->txr_interq)) != NULL)
791 1.18 msaitoh m_freem(m);
792 1.18 msaitoh pcq_destroy(txr->txr_interq);
793 1.18 msaitoh }
794 1.1 msaitoh if (txr->tx_buffers != NULL) {
795 1.1 msaitoh free(txr->tx_buffers, M_DEVBUF);
796 1.1 msaitoh txr->tx_buffers = NULL;
797 1.1 msaitoh }
798 1.1 msaitoh if (txr->txtag != NULL) {
799 1.1 msaitoh ixgbe_dma_tag_destroy(txr->txtag);
800 1.1 msaitoh txr->txtag = NULL;
801 1.1 msaitoh }
802 1.28 msaitoh } /* ixgbe_free_transmit_buffers */
803 1.1 msaitoh
804 1.28 msaitoh /************************************************************************
805 1.28 msaitoh * ixgbe_tx_ctx_setup
806 1.1 msaitoh *
807 1.28 msaitoh * Advanced Context Descriptor setup for VLAN, CSUM or TSO
808 1.28 msaitoh ************************************************************************/
809 1.1 msaitoh static int
810 1.1 msaitoh ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp,
811 1.1 msaitoh u32 *cmd_type_len, u32 *olinfo_status)
812 1.1 msaitoh {
813 1.28 msaitoh struct adapter *adapter = txr->adapter;
814 1.1 msaitoh struct ixgbe_adv_tx_context_desc *TXD;
815 1.28 msaitoh struct ether_vlan_header *eh;
816 1.8 msaitoh #ifdef INET
817 1.28 msaitoh struct ip *ip;
818 1.8 msaitoh #endif
819 1.8 msaitoh #ifdef INET6
820 1.28 msaitoh struct ip6_hdr *ip6;
821 1.8 msaitoh #endif
822 1.28 msaitoh int ehdrlen, ip_hlen = 0;
823 1.28 msaitoh int offload = TRUE;
824 1.28 msaitoh int ctxd = txr->next_avail_desc;
825 1.28 msaitoh u32 vlan_macip_lens = 0;
826 1.28 msaitoh u32 type_tucmd_mlhl = 0;
827 1.28 msaitoh u16 vtag = 0;
828 1.28 msaitoh u16 etype;
829 1.28 msaitoh u8 ipproto = 0;
830 1.28 msaitoh char *l3d;
831 1.8 msaitoh
832 1.1 msaitoh
833 1.1 msaitoh /* First check if TSO is to be used */
834 1.28 msaitoh if (mp->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) {
835 1.17 msaitoh int rv = ixgbe_tso_setup(txr, mp, cmd_type_len, olinfo_status);
836 1.17 msaitoh
837 1.21 msaitoh if (rv != 0)
838 1.17 msaitoh ++adapter->tso_err.ev_count;
839 1.21 msaitoh return rv;
840 1.17 msaitoh }
841 1.1 msaitoh
842 1.1 msaitoh if ((mp->m_pkthdr.csum_flags & M_CSUM_OFFLOAD) == 0)
843 1.1 msaitoh offload = FALSE;
844 1.1 msaitoh
845 1.1 msaitoh /* Indicate the whole packet as payload when not doing TSO */
846 1.28 msaitoh *olinfo_status |= mp->m_pkthdr.len << IXGBE_ADVTXD_PAYLEN_SHIFT;
847 1.1 msaitoh
848 1.1 msaitoh /* Now ready a context descriptor */
849 1.28 msaitoh TXD = (struct ixgbe_adv_tx_context_desc *)&txr->tx_base[ctxd];
850 1.1 msaitoh
851 1.1 msaitoh /*
852 1.28 msaitoh * In advanced descriptors the vlan tag must
853 1.28 msaitoh * be placed into the context descriptor. Hence
854 1.28 msaitoh * we need to make one even if not doing offloads.
855 1.28 msaitoh */
856 1.29 knakahar if (vlan_has_tag(mp)) {
857 1.29 knakahar vtag = htole16(vlan_get_tag(mp));
858 1.1 msaitoh vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
859 1.28 msaitoh } else if (!(txr->adapter->feat_en & IXGBE_FEATURE_NEEDS_CTXD) &&
860 1.28 msaitoh (offload == FALSE))
861 1.4 msaitoh return (0);
862 1.1 msaitoh
863 1.1 msaitoh /*
864 1.1 msaitoh * Determine where frame payload starts.
865 1.1 msaitoh * Jump over vlan headers if already present,
866 1.1 msaitoh * helpful for QinQ too.
867 1.1 msaitoh */
868 1.1 msaitoh KASSERT(mp->m_len >= offsetof(struct ether_vlan_header, evl_tag));
869 1.1 msaitoh eh = mtod(mp, struct ether_vlan_header *);
870 1.1 msaitoh if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
871 1.1 msaitoh KASSERT(mp->m_len >= sizeof(struct ether_vlan_header));
872 1.1 msaitoh etype = ntohs(eh->evl_proto);
873 1.1 msaitoh ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
874 1.1 msaitoh } else {
875 1.1 msaitoh etype = ntohs(eh->evl_encap_proto);
876 1.1 msaitoh ehdrlen = ETHER_HDR_LEN;
877 1.1 msaitoh }
878 1.1 msaitoh
879 1.1 msaitoh /* Set the ether header length */
880 1.1 msaitoh vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
881 1.1 msaitoh
882 1.3 msaitoh if (offload == FALSE)
883 1.3 msaitoh goto no_offloads;
884 1.3 msaitoh
885 1.8 msaitoh /*
886 1.28 msaitoh * If the first mbuf only includes the ethernet header,
887 1.28 msaitoh * jump to the next one
888 1.28 msaitoh * XXX: This assumes the stack splits mbufs containing headers
889 1.28 msaitoh * on header boundaries
890 1.8 msaitoh * XXX: And assumes the entire IP header is contained in one mbuf
891 1.8 msaitoh */
892 1.8 msaitoh if (mp->m_len == ehdrlen && mp->m_next)
893 1.8 msaitoh l3d = mtod(mp->m_next, char *);
894 1.8 msaitoh else
895 1.8 msaitoh l3d = mtod(mp, char *) + ehdrlen;
896 1.8 msaitoh
897 1.1 msaitoh switch (etype) {
898 1.9 msaitoh #ifdef INET
899 1.1 msaitoh case ETHERTYPE_IP:
900 1.8 msaitoh ip = (struct ip *)(l3d);
901 1.8 msaitoh ip_hlen = ip->ip_hl << 2;
902 1.8 msaitoh ipproto = ip->ip_p;
903 1.8 msaitoh type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
904 1.1 msaitoh KASSERT((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) == 0 ||
905 1.8 msaitoh ip->ip_sum == 0);
906 1.1 msaitoh break;
907 1.9 msaitoh #endif
908 1.9 msaitoh #ifdef INET6
909 1.1 msaitoh case ETHERTYPE_IPV6:
910 1.8 msaitoh ip6 = (struct ip6_hdr *)(l3d);
911 1.8 msaitoh ip_hlen = sizeof(struct ip6_hdr);
912 1.8 msaitoh ipproto = ip6->ip6_nxt;
913 1.1 msaitoh type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
914 1.1 msaitoh break;
915 1.9 msaitoh #endif
916 1.1 msaitoh default:
917 1.11 msaitoh offload = false;
918 1.1 msaitoh break;
919 1.1 msaitoh }
920 1.1 msaitoh
921 1.1 msaitoh if ((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) != 0)
922 1.1 msaitoh *olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
923 1.1 msaitoh
924 1.1 msaitoh vlan_macip_lens |= ip_hlen;
925 1.1 msaitoh
926 1.8 msaitoh /* No support for offloads for non-L4 next headers */
927 1.8 msaitoh switch (ipproto) {
928 1.36 msaitoh case IPPROTO_TCP:
929 1.36 msaitoh if (mp->m_pkthdr.csum_flags &
930 1.36 msaitoh (M_CSUM_TCPv4 | M_CSUM_TCPv6))
931 1.36 msaitoh type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
932 1.36 msaitoh else
933 1.36 msaitoh offload = false;
934 1.36 msaitoh break;
935 1.36 msaitoh case IPPROTO_UDP:
936 1.36 msaitoh if (mp->m_pkthdr.csum_flags &
937 1.36 msaitoh (M_CSUM_UDPv4 | M_CSUM_UDPv6))
938 1.36 msaitoh type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
939 1.36 msaitoh else
940 1.11 msaitoh offload = false;
941 1.36 msaitoh break;
942 1.36 msaitoh default:
943 1.36 msaitoh offload = false;
944 1.36 msaitoh break;
945 1.8 msaitoh }
946 1.8 msaitoh
947 1.8 msaitoh if (offload) /* Insert L4 checksum into data descriptors */
948 1.1 msaitoh *olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
949 1.1 msaitoh
950 1.3 msaitoh no_offloads:
951 1.3 msaitoh type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
952 1.3 msaitoh
953 1.1 msaitoh /* Now copy bits into descriptor */
954 1.1 msaitoh TXD->vlan_macip_lens = htole32(vlan_macip_lens);
955 1.1 msaitoh TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
956 1.1 msaitoh TXD->seqnum_seed = htole32(0);
957 1.1 msaitoh TXD->mss_l4len_idx = htole32(0);
958 1.1 msaitoh
959 1.1 msaitoh /* We've consumed the first desc, adjust counters */
960 1.1 msaitoh if (++ctxd == txr->num_desc)
961 1.1 msaitoh ctxd = 0;
962 1.1 msaitoh txr->next_avail_desc = ctxd;
963 1.1 msaitoh --txr->tx_avail;
964 1.1 msaitoh
965 1.28 msaitoh return (0);
966 1.28 msaitoh } /* ixgbe_tx_ctx_setup */
967 1.1 msaitoh
968 1.28 msaitoh /************************************************************************
969 1.28 msaitoh * ixgbe_tso_setup
970 1.1 msaitoh *
971 1.28 msaitoh * Setup work for hardware segmentation offload (TSO) on
972 1.28 msaitoh * adapters using advanced tx descriptors
973 1.28 msaitoh ************************************************************************/
974 1.1 msaitoh static int
975 1.28 msaitoh ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *cmd_type_len,
976 1.28 msaitoh u32 *olinfo_status)
977 1.1 msaitoh {
978 1.1 msaitoh struct ixgbe_adv_tx_context_desc *TXD;
979 1.28 msaitoh struct ether_vlan_header *eh;
980 1.1 msaitoh #ifdef INET6
981 1.28 msaitoh struct ip6_hdr *ip6;
982 1.1 msaitoh #endif
983 1.1 msaitoh #ifdef INET
984 1.28 msaitoh struct ip *ip;
985 1.1 msaitoh #endif
986 1.28 msaitoh struct tcphdr *th;
987 1.28 msaitoh int ctxd, ehdrlen, ip_hlen, tcp_hlen;
988 1.28 msaitoh u32 vlan_macip_lens = 0;
989 1.28 msaitoh u32 type_tucmd_mlhl = 0;
990 1.28 msaitoh u32 mss_l4len_idx = 0, paylen;
991 1.28 msaitoh u16 vtag = 0, eh_type;
992 1.1 msaitoh
993 1.1 msaitoh /*
994 1.1 msaitoh * Determine where frame payload starts.
995 1.1 msaitoh * Jump over vlan headers if already present
996 1.1 msaitoh */
997 1.1 msaitoh eh = mtod(mp, struct ether_vlan_header *);
998 1.1 msaitoh if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
999 1.1 msaitoh ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1000 1.1 msaitoh eh_type = eh->evl_proto;
1001 1.1 msaitoh } else {
1002 1.1 msaitoh ehdrlen = ETHER_HDR_LEN;
1003 1.1 msaitoh eh_type = eh->evl_encap_proto;
1004 1.1 msaitoh }
1005 1.1 msaitoh
1006 1.1 msaitoh switch (ntohs(eh_type)) {
1007 1.1 msaitoh #ifdef INET
1008 1.1 msaitoh case ETHERTYPE_IP:
1009 1.1 msaitoh ip = (struct ip *)(mp->m_data + ehdrlen);
1010 1.1 msaitoh if (ip->ip_p != IPPROTO_TCP)
1011 1.1 msaitoh return (ENXIO);
1012 1.1 msaitoh ip->ip_sum = 0;
1013 1.1 msaitoh ip_hlen = ip->ip_hl << 2;
1014 1.1 msaitoh th = (struct tcphdr *)((char *)ip + ip_hlen);
1015 1.1 msaitoh th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
1016 1.1 msaitoh ip->ip_dst.s_addr, htons(IPPROTO_TCP));
1017 1.1 msaitoh type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
1018 1.1 msaitoh /* Tell transmit desc to also do IPv4 checksum. */
1019 1.1 msaitoh *olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
1020 1.1 msaitoh break;
1021 1.1 msaitoh #endif
1022 1.28 msaitoh #ifdef INET6
1023 1.28 msaitoh case ETHERTYPE_IPV6:
1024 1.28 msaitoh ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
1025 1.28 msaitoh /* XXX-BZ For now we do not pretend to support ext. hdrs. */
1026 1.28 msaitoh if (ip6->ip6_nxt != IPPROTO_TCP)
1027 1.28 msaitoh return (ENXIO);
1028 1.28 msaitoh ip_hlen = sizeof(struct ip6_hdr);
1029 1.28 msaitoh ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
1030 1.28 msaitoh th = (struct tcphdr *)((char *)ip6 + ip_hlen);
1031 1.28 msaitoh th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
1032 1.28 msaitoh &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
1033 1.28 msaitoh type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
1034 1.28 msaitoh break;
1035 1.28 msaitoh #endif
1036 1.1 msaitoh default:
1037 1.1 msaitoh panic("%s: CSUM_TSO but no supported IP version (0x%04x)",
1038 1.1 msaitoh __func__, ntohs(eh_type));
1039 1.1 msaitoh break;
1040 1.1 msaitoh }
1041 1.1 msaitoh
1042 1.1 msaitoh ctxd = txr->next_avail_desc;
1043 1.28 msaitoh TXD = (struct ixgbe_adv_tx_context_desc *)&txr->tx_base[ctxd];
1044 1.1 msaitoh
1045 1.1 msaitoh tcp_hlen = th->th_off << 2;
1046 1.1 msaitoh
1047 1.1 msaitoh /* This is used in the transmit desc in encap */
1048 1.1 msaitoh paylen = mp->m_pkthdr.len - ehdrlen - ip_hlen - tcp_hlen;
1049 1.1 msaitoh
1050 1.1 msaitoh /* VLAN MACLEN IPLEN */
1051 1.29 knakahar if (vlan_has_tag(mp)) {
1052 1.29 knakahar vtag = htole16(vlan_get_tag(mp));
1053 1.28 msaitoh vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
1054 1.1 msaitoh }
1055 1.1 msaitoh
1056 1.1 msaitoh vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
1057 1.1 msaitoh vlan_macip_lens |= ip_hlen;
1058 1.1 msaitoh TXD->vlan_macip_lens = htole32(vlan_macip_lens);
1059 1.1 msaitoh
1060 1.1 msaitoh /* ADV DTYPE TUCMD */
1061 1.1 msaitoh type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
1062 1.1 msaitoh type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
1063 1.1 msaitoh TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
1064 1.1 msaitoh
1065 1.1 msaitoh /* MSS L4LEN IDX */
1066 1.1 msaitoh mss_l4len_idx |= (mp->m_pkthdr.segsz << IXGBE_ADVTXD_MSS_SHIFT);
1067 1.1 msaitoh mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
1068 1.1 msaitoh TXD->mss_l4len_idx = htole32(mss_l4len_idx);
1069 1.1 msaitoh
1070 1.1 msaitoh TXD->seqnum_seed = htole32(0);
1071 1.1 msaitoh
1072 1.1 msaitoh if (++ctxd == txr->num_desc)
1073 1.1 msaitoh ctxd = 0;
1074 1.1 msaitoh
1075 1.1 msaitoh txr->tx_avail--;
1076 1.1 msaitoh txr->next_avail_desc = ctxd;
1077 1.1 msaitoh *cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
1078 1.1 msaitoh *olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1079 1.1 msaitoh *olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
1080 1.1 msaitoh ++txr->tso_tx.ev_count;
1081 1.28 msaitoh
1082 1.1 msaitoh return (0);
1083 1.28 msaitoh } /* ixgbe_tso_setup */
1084 1.1 msaitoh
1085 1.3 msaitoh
1086 1.28 msaitoh /************************************************************************
1087 1.28 msaitoh * ixgbe_txeof
1088 1.1 msaitoh *
1089 1.28 msaitoh * Examine each tx_buffer in the used queue. If the hardware is done
1090 1.28 msaitoh * processing the packet then free associated resources. The
1091 1.28 msaitoh * tx_buffer is put back on the free queue.
1092 1.28 msaitoh ************************************************************************/
1093 1.32 msaitoh bool
1094 1.1 msaitoh ixgbe_txeof(struct tx_ring *txr)
1095 1.1 msaitoh {
1096 1.1 msaitoh struct adapter *adapter = txr->adapter;
1097 1.1 msaitoh struct ifnet *ifp = adapter->ifp;
1098 1.28 msaitoh struct ixgbe_tx_buf *buf;
1099 1.28 msaitoh union ixgbe_adv_tx_desc *txd;
1100 1.1 msaitoh u32 work, processed = 0;
1101 1.7 msaitoh u32 limit = adapter->tx_process_limit;
1102 1.1 msaitoh
1103 1.1 msaitoh KASSERT(mutex_owned(&txr->tx_mtx));
1104 1.1 msaitoh
1105 1.1 msaitoh #ifdef DEV_NETMAP
1106 1.28 msaitoh if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
1107 1.28 msaitoh (adapter->ifp->if_capenable & IFCAP_NETMAP)) {
1108 1.28 msaitoh struct netmap_adapter *na = NA(adapter->ifp);
1109 1.1 msaitoh struct netmap_kring *kring = &na->tx_rings[txr->me];
1110 1.1 msaitoh txd = txr->tx_base;
1111 1.1 msaitoh bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1112 1.1 msaitoh BUS_DMASYNC_POSTREAD);
1113 1.1 msaitoh /*
1114 1.1 msaitoh * In netmap mode, all the work is done in the context
1115 1.1 msaitoh * of the client thread. Interrupt handlers only wake up
1116 1.1 msaitoh * clients, which may be sleeping on individual rings
1117 1.1 msaitoh * or on a global resource for all rings.
1118 1.1 msaitoh * To implement tx interrupt mitigation, we wake up the client
1119 1.1 msaitoh * thread roughly every half ring, even if the NIC interrupts
1120 1.1 msaitoh * more frequently. This is implemented as follows:
1121 1.1 msaitoh * - ixgbe_txsync() sets kring->nr_kflags with the index of
1122 1.1 msaitoh * the slot that should wake up the thread (nkr_num_slots
1123 1.1 msaitoh * means the user thread should not be woken up);
1124 1.1 msaitoh * - the driver ignores tx interrupts unless netmap_mitigate=0
1125 1.1 msaitoh * or the slot has the DD bit set.
1126 1.1 msaitoh */
1127 1.1 msaitoh if (!netmap_mitigate ||
1128 1.1 msaitoh (kring->nr_kflags < kring->nkr_num_slots &&
1129 1.28 msaitoh txd[kring->nr_kflags].wb.status & IXGBE_TXD_STAT_DD)) {
1130 1.1 msaitoh netmap_tx_irq(ifp, txr->me);
1131 1.1 msaitoh }
1132 1.32 msaitoh return false;
1133 1.1 msaitoh }
1134 1.1 msaitoh #endif /* DEV_NETMAP */
1135 1.1 msaitoh
1136 1.1 msaitoh if (txr->tx_avail == txr->num_desc) {
1137 1.42 msaitoh txr->sending = false;
1138 1.32 msaitoh return false;
1139 1.1 msaitoh }
1140 1.1 msaitoh
1141 1.1 msaitoh /* Get work starting point */
1142 1.1 msaitoh work = txr->next_to_clean;
1143 1.1 msaitoh buf = &txr->tx_buffers[work];
1144 1.1 msaitoh txd = &txr->tx_base[work];
1145 1.1 msaitoh work -= txr->num_desc; /* The distance to ring end */
1146 1.28 msaitoh ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1147 1.1 msaitoh BUS_DMASYNC_POSTREAD);
1148 1.8 msaitoh
1149 1.1 msaitoh do {
1150 1.8 msaitoh union ixgbe_adv_tx_desc *eop = buf->eop;
1151 1.1 msaitoh if (eop == NULL) /* No work */
1152 1.1 msaitoh break;
1153 1.1 msaitoh
1154 1.1 msaitoh if ((eop->wb.status & IXGBE_TXD_STAT_DD) == 0)
1155 1.1 msaitoh break; /* I/O not complete */
1156 1.1 msaitoh
1157 1.1 msaitoh if (buf->m_head) {
1158 1.28 msaitoh txr->bytes += buf->m_head->m_pkthdr.len;
1159 1.28 msaitoh bus_dmamap_sync(txr->txtag->dt_dmat, buf->map,
1160 1.1 msaitoh 0, buf->m_head->m_pkthdr.len,
1161 1.1 msaitoh BUS_DMASYNC_POSTWRITE);
1162 1.28 msaitoh ixgbe_dmamap_unload(txr->txtag, buf->map);
1163 1.1 msaitoh m_freem(buf->m_head);
1164 1.1 msaitoh buf->m_head = NULL;
1165 1.1 msaitoh }
1166 1.1 msaitoh buf->eop = NULL;
1167 1.1 msaitoh ++txr->tx_avail;
1168 1.1 msaitoh
1169 1.1 msaitoh /* We clean the range if multi segment */
1170 1.1 msaitoh while (txd != eop) {
1171 1.1 msaitoh ++txd;
1172 1.1 msaitoh ++buf;
1173 1.1 msaitoh ++work;
1174 1.1 msaitoh /* wrap the ring? */
1175 1.1 msaitoh if (__predict_false(!work)) {
1176 1.1 msaitoh work -= txr->num_desc;
1177 1.1 msaitoh buf = txr->tx_buffers;
1178 1.1 msaitoh txd = txr->tx_base;
1179 1.1 msaitoh }
1180 1.1 msaitoh if (buf->m_head) {
1181 1.1 msaitoh txr->bytes +=
1182 1.1 msaitoh buf->m_head->m_pkthdr.len;
1183 1.1 msaitoh bus_dmamap_sync(txr->txtag->dt_dmat,
1184 1.1 msaitoh buf->map,
1185 1.1 msaitoh 0, buf->m_head->m_pkthdr.len,
1186 1.1 msaitoh BUS_DMASYNC_POSTWRITE);
1187 1.1 msaitoh ixgbe_dmamap_unload(txr->txtag,
1188 1.1 msaitoh buf->map);
1189 1.1 msaitoh m_freem(buf->m_head);
1190 1.1 msaitoh buf->m_head = NULL;
1191 1.1 msaitoh }
1192 1.1 msaitoh ++txr->tx_avail;
1193 1.1 msaitoh buf->eop = NULL;
1194 1.1 msaitoh
1195 1.1 msaitoh }
1196 1.1 msaitoh ++txr->packets;
1197 1.1 msaitoh ++processed;
1198 1.1 msaitoh ++ifp->if_opackets;
1199 1.1 msaitoh
1200 1.1 msaitoh /* Try the next packet */
1201 1.1 msaitoh ++txd;
1202 1.1 msaitoh ++buf;
1203 1.1 msaitoh ++work;
1204 1.1 msaitoh /* reset with a wrap */
1205 1.1 msaitoh if (__predict_false(!work)) {
1206 1.1 msaitoh work -= txr->num_desc;
1207 1.1 msaitoh buf = txr->tx_buffers;
1208 1.1 msaitoh txd = txr->tx_base;
1209 1.1 msaitoh }
1210 1.1 msaitoh prefetch(txd);
1211 1.1 msaitoh } while (__predict_true(--limit));
1212 1.1 msaitoh
1213 1.1 msaitoh ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1214 1.1 msaitoh BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1215 1.1 msaitoh
1216 1.1 msaitoh work += txr->num_desc;
1217 1.1 msaitoh txr->next_to_clean = work;
1218 1.1 msaitoh
1219 1.43 msaitoh if (txr->tx_avail == txr->num_desc)
1220 1.43 msaitoh txr->sending = false;
1221 1.43 msaitoh
1222 1.32 msaitoh return ((limit > 0) ? false : true);
1223 1.28 msaitoh } /* ixgbe_txeof */
1224 1.1 msaitoh
1225 1.28 msaitoh /************************************************************************
1226 1.28 msaitoh * ixgbe_rsc_count
1227 1.28 msaitoh *
1228 1.28 msaitoh * Used to detect a descriptor that has been merged by Hardware RSC.
1229 1.28 msaitoh ************************************************************************/
1230 1.1 msaitoh static inline u32
1231 1.1 msaitoh ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
1232 1.1 msaitoh {
1233 1.1 msaitoh return (le32toh(rx->wb.lower.lo_dword.data) &
1234 1.1 msaitoh IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
1235 1.28 msaitoh } /* ixgbe_rsc_count */
1236 1.1 msaitoh
1237 1.28 msaitoh /************************************************************************
1238 1.28 msaitoh * ixgbe_setup_hw_rsc
1239 1.1 msaitoh *
1240 1.28 msaitoh * Initialize Hardware RSC (LRO) feature on 82599
1241 1.28 msaitoh * for an RX ring, this is toggled by the LRO capability
1242 1.28 msaitoh * even though it is transparent to the stack.
1243 1.28 msaitoh *
1244 1.28 msaitoh * NOTE: Since this HW feature only works with IPv4 and
1245 1.28 msaitoh * testing has shown soft LRO to be as effective,
1246 1.28 msaitoh * this feature will be disabled by default.
1247 1.28 msaitoh ************************************************************************/
1248 1.1 msaitoh static void
1249 1.1 msaitoh ixgbe_setup_hw_rsc(struct rx_ring *rxr)
1250 1.1 msaitoh {
1251 1.28 msaitoh struct adapter *adapter = rxr->adapter;
1252 1.28 msaitoh struct ixgbe_hw *hw = &adapter->hw;
1253 1.28 msaitoh u32 rscctrl, rdrxctl;
1254 1.1 msaitoh
1255 1.1 msaitoh /* If turning LRO/RSC off we need to disable it */
1256 1.1 msaitoh if ((adapter->ifp->if_capenable & IFCAP_LRO) == 0) {
1257 1.1 msaitoh rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
1258 1.1 msaitoh rscctrl &= ~IXGBE_RSCCTL_RSCEN;
1259 1.1 msaitoh return;
1260 1.1 msaitoh }
1261 1.1 msaitoh
1262 1.1 msaitoh rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1263 1.1 msaitoh rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
1264 1.28 msaitoh #ifdef DEV_NETMAP
1265 1.28 msaitoh /* Always strip CRC unless Netmap disabled it */
1266 1.28 msaitoh if (!(adapter->feat_en & IXGBE_FEATURE_NETMAP) ||
1267 1.28 msaitoh !(adapter->ifp->if_capenable & IFCAP_NETMAP) ||
1268 1.28 msaitoh ix_crcstrip)
1269 1.1 msaitoh #endif /* DEV_NETMAP */
1270 1.28 msaitoh rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
1271 1.1 msaitoh rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
1272 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
1273 1.1 msaitoh
1274 1.1 msaitoh rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
1275 1.1 msaitoh rscctrl |= IXGBE_RSCCTL_RSCEN;
1276 1.1 msaitoh /*
1277 1.28 msaitoh * Limit the total number of descriptors that
1278 1.28 msaitoh * can be combined, so it does not exceed 64K
1279 1.28 msaitoh */
1280 1.1 msaitoh if (rxr->mbuf_sz == MCLBYTES)
1281 1.1 msaitoh rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
1282 1.1 msaitoh else if (rxr->mbuf_sz == MJUMPAGESIZE)
1283 1.1 msaitoh rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
1284 1.1 msaitoh else if (rxr->mbuf_sz == MJUM9BYTES)
1285 1.1 msaitoh rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
1286 1.1 msaitoh else /* Using 16K cluster */
1287 1.1 msaitoh rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
1288 1.1 msaitoh
1289 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxr->me), rscctrl);
1290 1.1 msaitoh
1291 1.1 msaitoh /* Enable TCP header recognition */
1292 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0),
1293 1.28 msaitoh (IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)) | IXGBE_PSRTYPE_TCPHDR));
1294 1.1 msaitoh
1295 1.1 msaitoh /* Disable RSC for ACK packets */
1296 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
1297 1.1 msaitoh (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
1298 1.1 msaitoh
1299 1.1 msaitoh rxr->hw_rsc = TRUE;
1300 1.28 msaitoh } /* ixgbe_setup_hw_rsc */
1301 1.8 msaitoh
1302 1.28 msaitoh /************************************************************************
1303 1.28 msaitoh * ixgbe_refresh_mbufs
1304 1.1 msaitoh *
1305 1.28 msaitoh * Refresh mbuf buffers for RX descriptor rings
1306 1.28 msaitoh * - now keeps its own state so discards due to resource
1307 1.28 msaitoh * exhaustion are unnecessary, if an mbuf cannot be obtained
1308 1.28 msaitoh * it just returns, keeping its placeholder, thus it can simply
1309 1.28 msaitoh * be recalled to try again.
1310 1.28 msaitoh ************************************************************************/
1311 1.1 msaitoh static void
1312 1.1 msaitoh ixgbe_refresh_mbufs(struct rx_ring *rxr, int limit)
1313 1.1 msaitoh {
1314 1.28 msaitoh struct adapter *adapter = rxr->adapter;
1315 1.28 msaitoh struct ixgbe_rx_buf *rxbuf;
1316 1.28 msaitoh struct mbuf *mp;
1317 1.28 msaitoh int i, j, error;
1318 1.28 msaitoh bool refreshed = false;
1319 1.1 msaitoh
1320 1.1 msaitoh i = j = rxr->next_to_refresh;
1321 1.1 msaitoh /* Control the loop with one beyond */
1322 1.1 msaitoh if (++j == rxr->num_desc)
1323 1.1 msaitoh j = 0;
1324 1.1 msaitoh
1325 1.1 msaitoh while (j != limit) {
1326 1.1 msaitoh rxbuf = &rxr->rx_buffers[i];
1327 1.1 msaitoh if (rxbuf->buf == NULL) {
1328 1.1 msaitoh mp = ixgbe_getjcl(&adapter->jcl_head, M_NOWAIT,
1329 1.1 msaitoh MT_DATA, M_PKTHDR, rxr->mbuf_sz);
1330 1.1 msaitoh if (mp == NULL) {
1331 1.1 msaitoh rxr->no_jmbuf.ev_count++;
1332 1.1 msaitoh goto update;
1333 1.1 msaitoh }
1334 1.1 msaitoh if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
1335 1.1 msaitoh m_adj(mp, ETHER_ALIGN);
1336 1.1 msaitoh } else
1337 1.1 msaitoh mp = rxbuf->buf;
1338 1.1 msaitoh
1339 1.1 msaitoh mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
1340 1.1 msaitoh
1341 1.1 msaitoh /* If we're dealing with an mbuf that was copied rather
1342 1.1 msaitoh * than replaced, there's no need to go through busdma.
1343 1.1 msaitoh */
1344 1.1 msaitoh if ((rxbuf->flags & IXGBE_RX_COPY) == 0) {
1345 1.1 msaitoh /* Get the memory mapping */
1346 1.4 msaitoh ixgbe_dmamap_unload(rxr->ptag, rxbuf->pmap);
1347 1.1 msaitoh error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat,
1348 1.1 msaitoh rxbuf->pmap, mp, BUS_DMA_NOWAIT);
1349 1.1 msaitoh if (error != 0) {
1350 1.28 msaitoh printf("Refresh mbufs: payload dmamap load failure - %d\n", error);
1351 1.1 msaitoh m_free(mp);
1352 1.1 msaitoh rxbuf->buf = NULL;
1353 1.1 msaitoh goto update;
1354 1.1 msaitoh }
1355 1.1 msaitoh rxbuf->buf = mp;
1356 1.1 msaitoh bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
1357 1.1 msaitoh 0, mp->m_pkthdr.len, BUS_DMASYNC_PREREAD);
1358 1.1 msaitoh rxbuf->addr = rxr->rx_base[i].read.pkt_addr =
1359 1.1 msaitoh htole64(rxbuf->pmap->dm_segs[0].ds_addr);
1360 1.1 msaitoh } else {
1361 1.1 msaitoh rxr->rx_base[i].read.pkt_addr = rxbuf->addr;
1362 1.1 msaitoh rxbuf->flags &= ~IXGBE_RX_COPY;
1363 1.1 msaitoh }
1364 1.1 msaitoh
1365 1.1 msaitoh refreshed = true;
1366 1.1 msaitoh /* Next is precalculated */
1367 1.1 msaitoh i = j;
1368 1.1 msaitoh rxr->next_to_refresh = i;
1369 1.1 msaitoh if (++j == rxr->num_desc)
1370 1.1 msaitoh j = 0;
1371 1.1 msaitoh }
1372 1.28 msaitoh
1373 1.1 msaitoh update:
1374 1.1 msaitoh if (refreshed) /* Update hardware tail index */
1375 1.28 msaitoh IXGBE_WRITE_REG(&adapter->hw, rxr->tail, rxr->next_to_refresh);
1376 1.28 msaitoh
1377 1.1 msaitoh return;
1378 1.28 msaitoh } /* ixgbe_refresh_mbufs */
1379 1.1 msaitoh
1380 1.28 msaitoh /************************************************************************
1381 1.28 msaitoh * ixgbe_allocate_receive_buffers
1382 1.1 msaitoh *
1383 1.28 msaitoh * Allocate memory for rx_buffer structures. Since we use one
1384 1.28 msaitoh * rx_buffer per received packet, the maximum number of rx_buffer's
1385 1.28 msaitoh * that we'll need is equal to the number of receive descriptors
1386 1.28 msaitoh * that we've allocated.
1387 1.28 msaitoh ************************************************************************/
1388 1.28 msaitoh static int
1389 1.1 msaitoh ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
1390 1.1 msaitoh {
1391 1.28 msaitoh struct adapter *adapter = rxr->adapter;
1392 1.28 msaitoh device_t dev = adapter->dev;
1393 1.28 msaitoh struct ixgbe_rx_buf *rxbuf;
1394 1.28 msaitoh int bsize, error;
1395 1.1 msaitoh
1396 1.1 msaitoh bsize = sizeof(struct ixgbe_rx_buf) * rxr->num_desc;
1397 1.28 msaitoh rxr->rx_buffers = (struct ixgbe_rx_buf *)malloc(bsize, M_DEVBUF,
1398 1.28 msaitoh M_NOWAIT | M_ZERO);
1399 1.28 msaitoh if (rxr->rx_buffers == NULL) {
1400 1.1 msaitoh aprint_error_dev(dev, "Unable to allocate rx_buffer memory\n");
1401 1.1 msaitoh error = ENOMEM;
1402 1.1 msaitoh goto fail;
1403 1.1 msaitoh }
1404 1.1 msaitoh
1405 1.28 msaitoh error = ixgbe_dma_tag_create(
1406 1.28 msaitoh /* parent */ adapter->osdep.dmat,
1407 1.28 msaitoh /* alignment */ 1,
1408 1.28 msaitoh /* bounds */ 0,
1409 1.28 msaitoh /* maxsize */ MJUM16BYTES,
1410 1.28 msaitoh /* nsegments */ 1,
1411 1.28 msaitoh /* maxsegsize */ MJUM16BYTES,
1412 1.28 msaitoh /* flags */ 0,
1413 1.28 msaitoh &rxr->ptag);
1414 1.28 msaitoh if (error != 0) {
1415 1.1 msaitoh aprint_error_dev(dev, "Unable to create RX DMA tag\n");
1416 1.1 msaitoh goto fail;
1417 1.1 msaitoh }
1418 1.1 msaitoh
1419 1.5 msaitoh for (int i = 0; i < rxr->num_desc; i++, rxbuf++) {
1420 1.1 msaitoh rxbuf = &rxr->rx_buffers[i];
1421 1.4 msaitoh error = ixgbe_dmamap_create(rxr->ptag, 0, &rxbuf->pmap);
1422 1.1 msaitoh if (error) {
1423 1.1 msaitoh aprint_error_dev(dev, "Unable to create RX dma map\n");
1424 1.1 msaitoh goto fail;
1425 1.1 msaitoh }
1426 1.1 msaitoh }
1427 1.1 msaitoh
1428 1.1 msaitoh return (0);
1429 1.1 msaitoh
1430 1.1 msaitoh fail:
1431 1.1 msaitoh /* Frees all, but can handle partial completion */
1432 1.1 msaitoh ixgbe_free_receive_structures(adapter);
1433 1.28 msaitoh
1434 1.1 msaitoh return (error);
1435 1.28 msaitoh } /* ixgbe_allocate_receive_buffers */
1436 1.1 msaitoh
1437 1.28 msaitoh /************************************************************************
1438 1.30 msaitoh * ixgbe_free_receive_ring
1439 1.28 msaitoh ************************************************************************/
1440 1.28 msaitoh static void
1441 1.1 msaitoh ixgbe_free_receive_ring(struct rx_ring *rxr)
1442 1.27 msaitoh {
1443 1.5 msaitoh for (int i = 0; i < rxr->num_desc; i++) {
1444 1.27 msaitoh ixgbe_rx_discard(rxr, i);
1445 1.1 msaitoh }
1446 1.28 msaitoh } /* ixgbe_free_receive_ring */
1447 1.1 msaitoh
1448 1.28 msaitoh /************************************************************************
1449 1.28 msaitoh * ixgbe_setup_receive_ring
1450 1.1 msaitoh *
1451 1.28 msaitoh * Initialize a receive ring and its buffers.
1452 1.28 msaitoh ************************************************************************/
1453 1.1 msaitoh static int
1454 1.1 msaitoh ixgbe_setup_receive_ring(struct rx_ring *rxr)
1455 1.1 msaitoh {
1456 1.28 msaitoh struct adapter *adapter;
1457 1.28 msaitoh struct ixgbe_rx_buf *rxbuf;
1458 1.1 msaitoh #ifdef LRO
1459 1.28 msaitoh struct ifnet *ifp;
1460 1.28 msaitoh struct lro_ctrl *lro = &rxr->lro;
1461 1.1 msaitoh #endif /* LRO */
1462 1.1 msaitoh #ifdef DEV_NETMAP
1463 1.1 msaitoh struct netmap_adapter *na = NA(rxr->adapter->ifp);
1464 1.28 msaitoh struct netmap_slot *slot;
1465 1.1 msaitoh #endif /* DEV_NETMAP */
1466 1.28 msaitoh int rsize, error = 0;
1467 1.1 msaitoh
1468 1.1 msaitoh adapter = rxr->adapter;
1469 1.1 msaitoh #ifdef LRO
1470 1.1 msaitoh ifp = adapter->ifp;
1471 1.1 msaitoh #endif /* LRO */
1472 1.1 msaitoh
1473 1.1 msaitoh /* Clear the ring contents */
1474 1.1 msaitoh IXGBE_RX_LOCK(rxr);
1475 1.28 msaitoh
1476 1.1 msaitoh #ifdef DEV_NETMAP
1477 1.28 msaitoh if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
1478 1.28 msaitoh slot = netmap_reset(na, NR_RX, rxr->me, 0);
1479 1.1 msaitoh #endif /* DEV_NETMAP */
1480 1.28 msaitoh
1481 1.1 msaitoh rsize = roundup2(adapter->num_rx_desc *
1482 1.1 msaitoh sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
1483 1.1 msaitoh bzero((void *)rxr->rx_base, rsize);
1484 1.1 msaitoh /* Cache the size */
1485 1.1 msaitoh rxr->mbuf_sz = adapter->rx_mbuf_sz;
1486 1.1 msaitoh
1487 1.1 msaitoh /* Free current RX buffer structs and their mbufs */
1488 1.1 msaitoh ixgbe_free_receive_ring(rxr);
1489 1.1 msaitoh
1490 1.1 msaitoh /* Now replenish the mbufs */
1491 1.1 msaitoh for (int j = 0; j != rxr->num_desc; ++j) {
1492 1.28 msaitoh struct mbuf *mp;
1493 1.1 msaitoh
1494 1.1 msaitoh rxbuf = &rxr->rx_buffers[j];
1495 1.28 msaitoh
1496 1.1 msaitoh #ifdef DEV_NETMAP
1497 1.1 msaitoh /*
1498 1.1 msaitoh * In netmap mode, fill the map and set the buffer
1499 1.1 msaitoh * address in the NIC ring, considering the offset
1500 1.1 msaitoh * between the netmap and NIC rings (see comment in
1501 1.1 msaitoh * ixgbe_setup_transmit_ring() ). No need to allocate
1502 1.1 msaitoh * an mbuf, so end the block with a continue;
1503 1.1 msaitoh */
1504 1.28 msaitoh if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && slot) {
1505 1.1 msaitoh int sj = netmap_idx_n2k(&na->rx_rings[rxr->me], j);
1506 1.1 msaitoh uint64_t paddr;
1507 1.1 msaitoh void *addr;
1508 1.1 msaitoh
1509 1.1 msaitoh addr = PNMB(na, slot + sj, &paddr);
1510 1.1 msaitoh netmap_load_map(na, rxr->ptag, rxbuf->pmap, addr);
1511 1.1 msaitoh /* Update descriptor and the cached value */
1512 1.1 msaitoh rxr->rx_base[j].read.pkt_addr = htole64(paddr);
1513 1.1 msaitoh rxbuf->addr = htole64(paddr);
1514 1.1 msaitoh continue;
1515 1.1 msaitoh }
1516 1.1 msaitoh #endif /* DEV_NETMAP */
1517 1.28 msaitoh
1518 1.28 msaitoh rxbuf->flags = 0;
1519 1.1 msaitoh rxbuf->buf = ixgbe_getjcl(&adapter->jcl_head, M_NOWAIT,
1520 1.1 msaitoh MT_DATA, M_PKTHDR, adapter->rx_mbuf_sz);
1521 1.1 msaitoh if (rxbuf->buf == NULL) {
1522 1.1 msaitoh error = ENOBUFS;
1523 1.28 msaitoh goto fail;
1524 1.1 msaitoh }
1525 1.1 msaitoh mp = rxbuf->buf;
1526 1.1 msaitoh mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
1527 1.1 msaitoh /* Get the memory mapping */
1528 1.28 msaitoh error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat, rxbuf->pmap,
1529 1.28 msaitoh mp, BUS_DMA_NOWAIT);
1530 1.1 msaitoh if (error != 0)
1531 1.1 msaitoh goto fail;
1532 1.1 msaitoh bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
1533 1.1 msaitoh 0, adapter->rx_mbuf_sz, BUS_DMASYNC_PREREAD);
1534 1.1 msaitoh /* Update the descriptor and the cached value */
1535 1.1 msaitoh rxr->rx_base[j].read.pkt_addr =
1536 1.1 msaitoh htole64(rxbuf->pmap->dm_segs[0].ds_addr);
1537 1.1 msaitoh rxbuf->addr = htole64(rxbuf->pmap->dm_segs[0].ds_addr);
1538 1.1 msaitoh }
1539 1.1 msaitoh
1540 1.1 msaitoh
1541 1.1 msaitoh /* Setup our descriptor indices */
1542 1.1 msaitoh rxr->next_to_check = 0;
1543 1.1 msaitoh rxr->next_to_refresh = 0;
1544 1.1 msaitoh rxr->lro_enabled = FALSE;
1545 1.1 msaitoh rxr->rx_copies.ev_count = 0;
1546 1.13 msaitoh #if 0 /* NetBSD */
1547 1.1 msaitoh rxr->rx_bytes.ev_count = 0;
1548 1.13 msaitoh #if 1 /* Fix inconsistency */
1549 1.13 msaitoh rxr->rx_packets.ev_count = 0;
1550 1.13 msaitoh #endif
1551 1.13 msaitoh #endif
1552 1.1 msaitoh rxr->vtag_strip = FALSE;
1553 1.1 msaitoh
1554 1.1 msaitoh ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
1555 1.1 msaitoh BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1556 1.1 msaitoh
1557 1.1 msaitoh /*
1558 1.28 msaitoh * Now set up the LRO interface
1559 1.28 msaitoh */
1560 1.1 msaitoh if (ixgbe_rsc_enable)
1561 1.1 msaitoh ixgbe_setup_hw_rsc(rxr);
1562 1.1 msaitoh #ifdef LRO
1563 1.1 msaitoh else if (ifp->if_capenable & IFCAP_LRO) {
1564 1.1 msaitoh device_t dev = adapter->dev;
1565 1.1 msaitoh int err = tcp_lro_init(lro);
1566 1.1 msaitoh if (err) {
1567 1.1 msaitoh device_printf(dev, "LRO Initialization failed!\n");
1568 1.1 msaitoh goto fail;
1569 1.1 msaitoh }
1570 1.1 msaitoh INIT_DEBUGOUT("RX Soft LRO Initialized\n");
1571 1.1 msaitoh rxr->lro_enabled = TRUE;
1572 1.1 msaitoh lro->ifp = adapter->ifp;
1573 1.1 msaitoh }
1574 1.1 msaitoh #endif /* LRO */
1575 1.1 msaitoh
1576 1.1 msaitoh IXGBE_RX_UNLOCK(rxr);
1577 1.28 msaitoh
1578 1.1 msaitoh return (0);
1579 1.1 msaitoh
1580 1.1 msaitoh fail:
1581 1.1 msaitoh ixgbe_free_receive_ring(rxr);
1582 1.1 msaitoh IXGBE_RX_UNLOCK(rxr);
1583 1.28 msaitoh
1584 1.1 msaitoh return (error);
1585 1.28 msaitoh } /* ixgbe_setup_receive_ring */
1586 1.1 msaitoh
1587 1.28 msaitoh /************************************************************************
1588 1.28 msaitoh * ixgbe_setup_receive_structures - Initialize all receive rings.
1589 1.28 msaitoh ************************************************************************/
1590 1.1 msaitoh int
1591 1.1 msaitoh ixgbe_setup_receive_structures(struct adapter *adapter)
1592 1.1 msaitoh {
1593 1.1 msaitoh struct rx_ring *rxr = adapter->rx_rings;
1594 1.28 msaitoh int j;
1595 1.1 msaitoh
1596 1.30 msaitoh /*
1597 1.30 msaitoh * Now reinitialize our supply of jumbo mbufs. The number
1598 1.30 msaitoh * or size of jumbo mbufs may have changed.
1599 1.30 msaitoh * Assume all of rxr->ptag are the same.
1600 1.30 msaitoh */
1601 1.41 msaitoh ixgbe_jcl_reinit(adapter, rxr->ptag->dt_dmat,
1602 1.30 msaitoh (2 * adapter->num_rx_desc) * adapter->num_queues,
1603 1.30 msaitoh adapter->rx_mbuf_sz);
1604 1.30 msaitoh
1605 1.1 msaitoh for (j = 0; j < adapter->num_queues; j++, rxr++)
1606 1.1 msaitoh if (ixgbe_setup_receive_ring(rxr))
1607 1.1 msaitoh goto fail;
1608 1.1 msaitoh
1609 1.1 msaitoh return (0);
1610 1.1 msaitoh fail:
1611 1.1 msaitoh /*
1612 1.1 msaitoh * Free RX buffers allocated so far, we will only handle
1613 1.1 msaitoh * the rings that completed, the failing case will have
1614 1.1 msaitoh * cleaned up for itself. 'j' failed, so its the terminus.
1615 1.1 msaitoh */
1616 1.1 msaitoh for (int i = 0; i < j; ++i) {
1617 1.1 msaitoh rxr = &adapter->rx_rings[i];
1618 1.27 msaitoh IXGBE_RX_LOCK(rxr);
1619 1.1 msaitoh ixgbe_free_receive_ring(rxr);
1620 1.27 msaitoh IXGBE_RX_UNLOCK(rxr);
1621 1.1 msaitoh }
1622 1.1 msaitoh
1623 1.1 msaitoh return (ENOBUFS);
1624 1.28 msaitoh } /* ixgbe_setup_receive_structures */
1625 1.1 msaitoh
1626 1.3 msaitoh
1627 1.28 msaitoh /************************************************************************
1628 1.28 msaitoh * ixgbe_free_receive_structures - Free all receive rings.
1629 1.28 msaitoh ************************************************************************/
1630 1.1 msaitoh void
1631 1.1 msaitoh ixgbe_free_receive_structures(struct adapter *adapter)
1632 1.1 msaitoh {
1633 1.1 msaitoh struct rx_ring *rxr = adapter->rx_rings;
1634 1.1 msaitoh
1635 1.1 msaitoh INIT_DEBUGOUT("ixgbe_free_receive_structures: begin");
1636 1.1 msaitoh
1637 1.1 msaitoh for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1638 1.1 msaitoh ixgbe_free_receive_buffers(rxr);
1639 1.1 msaitoh #ifdef LRO
1640 1.1 msaitoh /* Free LRO memory */
1641 1.28 msaitoh tcp_lro_free(&rxr->lro);
1642 1.1 msaitoh #endif /* LRO */
1643 1.1 msaitoh /* Free the ring memory as well */
1644 1.1 msaitoh ixgbe_dma_free(adapter, &rxr->rxdma);
1645 1.1 msaitoh IXGBE_RX_LOCK_DESTROY(rxr);
1646 1.1 msaitoh }
1647 1.1 msaitoh
1648 1.1 msaitoh free(adapter->rx_rings, M_DEVBUF);
1649 1.28 msaitoh } /* ixgbe_free_receive_structures */
1650 1.1 msaitoh
1651 1.1 msaitoh
1652 1.28 msaitoh /************************************************************************
1653 1.28 msaitoh * ixgbe_free_receive_buffers - Free receive ring data structures
1654 1.28 msaitoh ************************************************************************/
1655 1.1 msaitoh static void
1656 1.1 msaitoh ixgbe_free_receive_buffers(struct rx_ring *rxr)
1657 1.1 msaitoh {
1658 1.28 msaitoh struct adapter *adapter = rxr->adapter;
1659 1.28 msaitoh struct ixgbe_rx_buf *rxbuf;
1660 1.1 msaitoh
1661 1.1 msaitoh INIT_DEBUGOUT("ixgbe_free_receive_buffers: begin");
1662 1.1 msaitoh
1663 1.1 msaitoh /* Cleanup any existing buffers */
1664 1.1 msaitoh if (rxr->rx_buffers != NULL) {
1665 1.1 msaitoh for (int i = 0; i < adapter->num_rx_desc; i++) {
1666 1.1 msaitoh rxbuf = &rxr->rx_buffers[i];
1667 1.27 msaitoh ixgbe_rx_discard(rxr, i);
1668 1.1 msaitoh if (rxbuf->pmap != NULL) {
1669 1.1 msaitoh ixgbe_dmamap_destroy(rxr->ptag, rxbuf->pmap);
1670 1.1 msaitoh rxbuf->pmap = NULL;
1671 1.1 msaitoh }
1672 1.1 msaitoh }
1673 1.1 msaitoh if (rxr->rx_buffers != NULL) {
1674 1.1 msaitoh free(rxr->rx_buffers, M_DEVBUF);
1675 1.1 msaitoh rxr->rx_buffers = NULL;
1676 1.1 msaitoh }
1677 1.1 msaitoh }
1678 1.1 msaitoh
1679 1.1 msaitoh if (rxr->ptag != NULL) {
1680 1.1 msaitoh ixgbe_dma_tag_destroy(rxr->ptag);
1681 1.1 msaitoh rxr->ptag = NULL;
1682 1.1 msaitoh }
1683 1.1 msaitoh
1684 1.1 msaitoh return;
1685 1.28 msaitoh } /* ixgbe_free_receive_buffers */
1686 1.1 msaitoh
1687 1.28 msaitoh /************************************************************************
1688 1.28 msaitoh * ixgbe_rx_input
1689 1.28 msaitoh ************************************************************************/
1690 1.1 msaitoh static __inline void
1691 1.28 msaitoh ixgbe_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m,
1692 1.28 msaitoh u32 ptype)
1693 1.1 msaitoh {
1694 1.20 msaitoh struct adapter *adapter = ifp->if_softc;
1695 1.1 msaitoh
1696 1.1 msaitoh #ifdef LRO
1697 1.1 msaitoh struct ethercom *ec = &adapter->osdep.ec;
1698 1.1 msaitoh
1699 1.28 msaitoh /*
1700 1.28 msaitoh * ATM LRO is only for IP/TCP packets and TCP checksum of the packet
1701 1.28 msaitoh * should be computed by hardware. Also it should not have VLAN tag in
1702 1.28 msaitoh * ethernet header. In case of IPv6 we do not yet support ext. hdrs.
1703 1.28 msaitoh */
1704 1.1 msaitoh if (rxr->lro_enabled &&
1705 1.1 msaitoh (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) != 0 &&
1706 1.1 msaitoh (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
1707 1.1 msaitoh ((ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
1708 1.1 msaitoh (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) ||
1709 1.1 msaitoh (ptype & (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
1710 1.1 msaitoh (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) &&
1711 1.1 msaitoh (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
1712 1.1 msaitoh (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
1713 1.1 msaitoh /*
1714 1.1 msaitoh * Send to the stack if:
1715 1.1 msaitoh ** - LRO not enabled, or
1716 1.1 msaitoh ** - no LRO resources, or
1717 1.1 msaitoh ** - lro enqueue fails
1718 1.1 msaitoh */
1719 1.1 msaitoh if (rxr->lro.lro_cnt != 0)
1720 1.1 msaitoh if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
1721 1.1 msaitoh return;
1722 1.1 msaitoh }
1723 1.1 msaitoh #endif /* LRO */
1724 1.1 msaitoh
1725 1.20 msaitoh if_percpuq_enqueue(adapter->ipq, m);
1726 1.28 msaitoh } /* ixgbe_rx_input */
1727 1.1 msaitoh
1728 1.28 msaitoh /************************************************************************
1729 1.28 msaitoh * ixgbe_rx_discard
1730 1.28 msaitoh ************************************************************************/
1731 1.1 msaitoh static __inline void
1732 1.1 msaitoh ixgbe_rx_discard(struct rx_ring *rxr, int i)
1733 1.1 msaitoh {
1734 1.28 msaitoh struct ixgbe_rx_buf *rbuf;
1735 1.1 msaitoh
1736 1.1 msaitoh rbuf = &rxr->rx_buffers[i];
1737 1.1 msaitoh
1738 1.1 msaitoh /*
1739 1.28 msaitoh * With advanced descriptors the writeback
1740 1.28 msaitoh * clobbers the buffer addrs, so its easier
1741 1.28 msaitoh * to just free the existing mbufs and take
1742 1.28 msaitoh * the normal refresh path to get new buffers
1743 1.28 msaitoh * and mapping.
1744 1.28 msaitoh */
1745 1.1 msaitoh
1746 1.26 msaitoh if (rbuf->fmp != NULL) {/* Partial chain ? */
1747 1.27 msaitoh bus_dmamap_sync(rxr->ptag->dt_dmat, rbuf->pmap, 0,
1748 1.27 msaitoh rbuf->buf->m_pkthdr.len, BUS_DMASYNC_POSTREAD);
1749 1.1 msaitoh m_freem(rbuf->fmp);
1750 1.1 msaitoh rbuf->fmp = NULL;
1751 1.1 msaitoh rbuf->buf = NULL; /* rbuf->buf is part of fmp's chain */
1752 1.1 msaitoh } else if (rbuf->buf) {
1753 1.27 msaitoh bus_dmamap_sync(rxr->ptag->dt_dmat, rbuf->pmap, 0,
1754 1.27 msaitoh rbuf->buf->m_pkthdr.len, BUS_DMASYNC_POSTREAD);
1755 1.1 msaitoh m_free(rbuf->buf);
1756 1.1 msaitoh rbuf->buf = NULL;
1757 1.1 msaitoh }
1758 1.4 msaitoh ixgbe_dmamap_unload(rxr->ptag, rbuf->pmap);
1759 1.1 msaitoh
1760 1.1 msaitoh rbuf->flags = 0;
1761 1.1 msaitoh
1762 1.1 msaitoh return;
1763 1.28 msaitoh } /* ixgbe_rx_discard */
1764 1.1 msaitoh
1765 1.1 msaitoh
1766 1.28 msaitoh /************************************************************************
1767 1.28 msaitoh * ixgbe_rxeof
1768 1.1 msaitoh *
1769 1.28 msaitoh * Executes in interrupt context. It replenishes the
1770 1.28 msaitoh * mbufs in the descriptor and sends data which has
1771 1.28 msaitoh * been dma'ed into host memory to upper layer.
1772 1.1 msaitoh *
1773 1.28 msaitoh * Return TRUE for more work, FALSE for all clean.
1774 1.28 msaitoh ************************************************************************/
1775 1.1 msaitoh bool
1776 1.1 msaitoh ixgbe_rxeof(struct ix_queue *que)
1777 1.1 msaitoh {
1778 1.1 msaitoh struct adapter *adapter = que->adapter;
1779 1.1 msaitoh struct rx_ring *rxr = que->rxr;
1780 1.1 msaitoh struct ifnet *ifp = adapter->ifp;
1781 1.1 msaitoh #ifdef LRO
1782 1.1 msaitoh struct lro_ctrl *lro = &rxr->lro;
1783 1.1 msaitoh #endif /* LRO */
1784 1.28 msaitoh union ixgbe_adv_rx_desc *cur;
1785 1.28 msaitoh struct ixgbe_rx_buf *rbuf, *nbuf;
1786 1.1 msaitoh int i, nextp, processed = 0;
1787 1.1 msaitoh u32 staterr = 0;
1788 1.7 msaitoh u32 count = adapter->rx_process_limit;
1789 1.1 msaitoh #ifdef RSS
1790 1.1 msaitoh u16 pkt_info;
1791 1.1 msaitoh #endif
1792 1.1 msaitoh
1793 1.1 msaitoh IXGBE_RX_LOCK(rxr);
1794 1.1 msaitoh
1795 1.1 msaitoh #ifdef DEV_NETMAP
1796 1.28 msaitoh if (adapter->feat_en & IXGBE_FEATURE_NETMAP) {
1797 1.28 msaitoh /* Same as the txeof routine: wakeup clients on intr. */
1798 1.28 msaitoh if (netmap_rx_irq(ifp, rxr->me, &processed)) {
1799 1.28 msaitoh IXGBE_RX_UNLOCK(rxr);
1800 1.28 msaitoh return (FALSE);
1801 1.28 msaitoh }
1802 1.1 msaitoh }
1803 1.1 msaitoh #endif /* DEV_NETMAP */
1804 1.1 msaitoh
1805 1.1 msaitoh for (i = rxr->next_to_check; count != 0;) {
1806 1.28 msaitoh struct mbuf *sendmp, *mp;
1807 1.28 msaitoh u32 rsc, ptype;
1808 1.28 msaitoh u16 len;
1809 1.28 msaitoh u16 vtag = 0;
1810 1.28 msaitoh bool eop;
1811 1.1 msaitoh
1812 1.1 msaitoh /* Sync the ring. */
1813 1.1 msaitoh ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
1814 1.1 msaitoh BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1815 1.1 msaitoh
1816 1.1 msaitoh cur = &rxr->rx_base[i];
1817 1.1 msaitoh staterr = le32toh(cur->wb.upper.status_error);
1818 1.1 msaitoh #ifdef RSS
1819 1.1 msaitoh pkt_info = le16toh(cur->wb.lower.lo_dword.hs_rss.pkt_info);
1820 1.1 msaitoh #endif
1821 1.1 msaitoh
1822 1.1 msaitoh if ((staterr & IXGBE_RXD_STAT_DD) == 0)
1823 1.1 msaitoh break;
1824 1.1 msaitoh if ((ifp->if_flags & IFF_RUNNING) == 0)
1825 1.1 msaitoh break;
1826 1.1 msaitoh
1827 1.1 msaitoh count--;
1828 1.1 msaitoh sendmp = NULL;
1829 1.1 msaitoh nbuf = NULL;
1830 1.1 msaitoh rsc = 0;
1831 1.1 msaitoh cur->wb.upper.status_error = 0;
1832 1.1 msaitoh rbuf = &rxr->rx_buffers[i];
1833 1.1 msaitoh mp = rbuf->buf;
1834 1.1 msaitoh
1835 1.1 msaitoh len = le16toh(cur->wb.upper.length);
1836 1.1 msaitoh ptype = le32toh(cur->wb.lower.lo_dword.data) &
1837 1.1 msaitoh IXGBE_RXDADV_PKTTYPE_MASK;
1838 1.1 msaitoh eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
1839 1.1 msaitoh
1840 1.1 msaitoh /* Make sure bad packets are discarded */
1841 1.1 msaitoh if (eop && (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) {
1842 1.3 msaitoh #if __FreeBSD_version >= 1100036
1843 1.28 msaitoh if (adapter->feat_en & IXGBE_FEATURE_VF)
1844 1.4 msaitoh if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1845 1.3 msaitoh #endif
1846 1.1 msaitoh rxr->rx_discarded.ev_count++;
1847 1.1 msaitoh ixgbe_rx_discard(rxr, i);
1848 1.1 msaitoh goto next_desc;
1849 1.1 msaitoh }
1850 1.1 msaitoh
1851 1.27 msaitoh bus_dmamap_sync(rxr->ptag->dt_dmat, rbuf->pmap, 0,
1852 1.27 msaitoh rbuf->buf->m_pkthdr.len, BUS_DMASYNC_POSTREAD);
1853 1.27 msaitoh
1854 1.1 msaitoh /*
1855 1.28 msaitoh * On 82599 which supports a hardware
1856 1.28 msaitoh * LRO (called HW RSC), packets need
1857 1.28 msaitoh * not be fragmented across sequential
1858 1.28 msaitoh * descriptors, rather the next descriptor
1859 1.28 msaitoh * is indicated in bits of the descriptor.
1860 1.28 msaitoh * This also means that we might proceses
1861 1.28 msaitoh * more than one packet at a time, something
1862 1.28 msaitoh * that has never been true before, it
1863 1.28 msaitoh * required eliminating global chain pointers
1864 1.28 msaitoh * in favor of what we are doing here. -jfv
1865 1.28 msaitoh */
1866 1.1 msaitoh if (!eop) {
1867 1.1 msaitoh /*
1868 1.28 msaitoh * Figure out the next descriptor
1869 1.28 msaitoh * of this frame.
1870 1.28 msaitoh */
1871 1.1 msaitoh if (rxr->hw_rsc == TRUE) {
1872 1.1 msaitoh rsc = ixgbe_rsc_count(cur);
1873 1.1 msaitoh rxr->rsc_num += (rsc - 1);
1874 1.1 msaitoh }
1875 1.1 msaitoh if (rsc) { /* Get hardware index */
1876 1.28 msaitoh nextp = ((staterr & IXGBE_RXDADV_NEXTP_MASK) >>
1877 1.1 msaitoh IXGBE_RXDADV_NEXTP_SHIFT);
1878 1.1 msaitoh } else { /* Just sequential */
1879 1.1 msaitoh nextp = i + 1;
1880 1.1 msaitoh if (nextp == adapter->num_rx_desc)
1881 1.1 msaitoh nextp = 0;
1882 1.1 msaitoh }
1883 1.1 msaitoh nbuf = &rxr->rx_buffers[nextp];
1884 1.1 msaitoh prefetch(nbuf);
1885 1.1 msaitoh }
1886 1.1 msaitoh /*
1887 1.28 msaitoh * Rather than using the fmp/lmp global pointers
1888 1.28 msaitoh * we now keep the head of a packet chain in the
1889 1.28 msaitoh * buffer struct and pass this along from one
1890 1.28 msaitoh * descriptor to the next, until we get EOP.
1891 1.28 msaitoh */
1892 1.1 msaitoh mp->m_len = len;
1893 1.1 msaitoh /*
1894 1.28 msaitoh * See if there is a stored head
1895 1.28 msaitoh * that determines what we are
1896 1.28 msaitoh */
1897 1.1 msaitoh sendmp = rbuf->fmp;
1898 1.1 msaitoh if (sendmp != NULL) { /* secondary frag */
1899 1.1 msaitoh rbuf->buf = rbuf->fmp = NULL;
1900 1.1 msaitoh mp->m_flags &= ~M_PKTHDR;
1901 1.1 msaitoh sendmp->m_pkthdr.len += mp->m_len;
1902 1.1 msaitoh } else {
1903 1.1 msaitoh /*
1904 1.1 msaitoh * Optimize. This might be a small packet,
1905 1.1 msaitoh * maybe just a TCP ACK. Do a fast copy that
1906 1.1 msaitoh * is cache aligned into a new mbuf, and
1907 1.1 msaitoh * leave the old mbuf+cluster for re-use.
1908 1.1 msaitoh */
1909 1.1 msaitoh if (eop && len <= IXGBE_RX_COPY_LEN) {
1910 1.1 msaitoh sendmp = m_gethdr(M_NOWAIT, MT_DATA);
1911 1.1 msaitoh if (sendmp != NULL) {
1912 1.28 msaitoh sendmp->m_data += IXGBE_RX_COPY_ALIGN;
1913 1.28 msaitoh ixgbe_bcopy(mp->m_data, sendmp->m_data,
1914 1.28 msaitoh len);
1915 1.1 msaitoh sendmp->m_len = len;
1916 1.1 msaitoh rxr->rx_copies.ev_count++;
1917 1.1 msaitoh rbuf->flags |= IXGBE_RX_COPY;
1918 1.1 msaitoh }
1919 1.1 msaitoh }
1920 1.1 msaitoh if (sendmp == NULL) {
1921 1.1 msaitoh rbuf->buf = rbuf->fmp = NULL;
1922 1.1 msaitoh sendmp = mp;
1923 1.1 msaitoh }
1924 1.1 msaitoh
1925 1.1 msaitoh /* first desc of a non-ps chain */
1926 1.1 msaitoh sendmp->m_flags |= M_PKTHDR;
1927 1.1 msaitoh sendmp->m_pkthdr.len = mp->m_len;
1928 1.1 msaitoh }
1929 1.1 msaitoh ++processed;
1930 1.1 msaitoh
1931 1.1 msaitoh /* Pass the head pointer on */
1932 1.1 msaitoh if (eop == 0) {
1933 1.1 msaitoh nbuf->fmp = sendmp;
1934 1.1 msaitoh sendmp = NULL;
1935 1.1 msaitoh mp->m_next = nbuf->buf;
1936 1.1 msaitoh } else { /* Sending this frame */
1937 1.1 msaitoh m_set_rcvif(sendmp, ifp);
1938 1.31 msaitoh ++rxr->packets;
1939 1.1 msaitoh rxr->rx_packets.ev_count++;
1940 1.1 msaitoh /* capture data for AIM */
1941 1.1 msaitoh rxr->bytes += sendmp->m_pkthdr.len;
1942 1.1 msaitoh rxr->rx_bytes.ev_count += sendmp->m_pkthdr.len;
1943 1.1 msaitoh /* Process vlan info */
1944 1.28 msaitoh if ((rxr->vtag_strip) && (staterr & IXGBE_RXD_STAT_VP))
1945 1.1 msaitoh vtag = le16toh(cur->wb.upper.vlan);
1946 1.1 msaitoh if (vtag) {
1947 1.29 knakahar vlan_set_tag(sendmp, vtag);
1948 1.1 msaitoh }
1949 1.1 msaitoh if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1950 1.1 msaitoh ixgbe_rx_checksum(staterr, sendmp, ptype,
1951 1.3 msaitoh &adapter->stats.pf);
1952 1.1 msaitoh }
1953 1.8 msaitoh
1954 1.6 msaitoh #if 0 /* FreeBSD */
1955 1.28 msaitoh /*
1956 1.28 msaitoh * In case of multiqueue, we have RXCSUM.PCSD bit set
1957 1.28 msaitoh * and never cleared. This means we have RSS hash
1958 1.28 msaitoh * available to be used.
1959 1.28 msaitoh */
1960 1.28 msaitoh if (adapter->num_queues > 1) {
1961 1.28 msaitoh sendmp->m_pkthdr.flowid =
1962 1.28 msaitoh le32toh(cur->wb.lower.hi_dword.rss);
1963 1.28 msaitoh switch (pkt_info & IXGBE_RXDADV_RSSTYPE_MASK) {
1964 1.28 msaitoh case IXGBE_RXDADV_RSSTYPE_IPV4:
1965 1.28 msaitoh M_HASHTYPE_SET(sendmp,
1966 1.28 msaitoh M_HASHTYPE_RSS_IPV4);
1967 1.28 msaitoh break;
1968 1.28 msaitoh case IXGBE_RXDADV_RSSTYPE_IPV4_TCP:
1969 1.28 msaitoh M_HASHTYPE_SET(sendmp,
1970 1.28 msaitoh M_HASHTYPE_RSS_TCP_IPV4);
1971 1.28 msaitoh break;
1972 1.28 msaitoh case IXGBE_RXDADV_RSSTYPE_IPV6:
1973 1.28 msaitoh M_HASHTYPE_SET(sendmp,
1974 1.28 msaitoh M_HASHTYPE_RSS_IPV6);
1975 1.28 msaitoh break;
1976 1.28 msaitoh case IXGBE_RXDADV_RSSTYPE_IPV6_TCP:
1977 1.28 msaitoh M_HASHTYPE_SET(sendmp,
1978 1.28 msaitoh M_HASHTYPE_RSS_TCP_IPV6);
1979 1.28 msaitoh break;
1980 1.28 msaitoh case IXGBE_RXDADV_RSSTYPE_IPV6_EX:
1981 1.28 msaitoh M_HASHTYPE_SET(sendmp,
1982 1.28 msaitoh M_HASHTYPE_RSS_IPV6_EX);
1983 1.28 msaitoh break;
1984 1.28 msaitoh case IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX:
1985 1.28 msaitoh M_HASHTYPE_SET(sendmp,
1986 1.28 msaitoh M_HASHTYPE_RSS_TCP_IPV6_EX);
1987 1.28 msaitoh break;
1988 1.6 msaitoh #if __FreeBSD_version > 1100000
1989 1.28 msaitoh case IXGBE_RXDADV_RSSTYPE_IPV4_UDP:
1990 1.28 msaitoh M_HASHTYPE_SET(sendmp,
1991 1.28 msaitoh M_HASHTYPE_RSS_UDP_IPV4);
1992 1.28 msaitoh break;
1993 1.28 msaitoh case IXGBE_RXDADV_RSSTYPE_IPV6_UDP:
1994 1.28 msaitoh M_HASHTYPE_SET(sendmp,
1995 1.28 msaitoh M_HASHTYPE_RSS_UDP_IPV6);
1996 1.28 msaitoh break;
1997 1.28 msaitoh case IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX:
1998 1.28 msaitoh M_HASHTYPE_SET(sendmp,
1999 1.28 msaitoh M_HASHTYPE_RSS_UDP_IPV6_EX);
2000 1.28 msaitoh break;
2001 1.28 msaitoh #endif
2002 1.28 msaitoh default:
2003 1.28 msaitoh M_HASHTYPE_SET(sendmp,
2004 1.28 msaitoh M_HASHTYPE_OPAQUE_HASH);
2005 1.28 msaitoh }
2006 1.28 msaitoh } else {
2007 1.28 msaitoh sendmp->m_pkthdr.flowid = que->msix;
2008 1.1 msaitoh M_HASHTYPE_SET(sendmp, M_HASHTYPE_OPAQUE);
2009 1.1 msaitoh }
2010 1.8 msaitoh #endif
2011 1.1 msaitoh }
2012 1.1 msaitoh next_desc:
2013 1.1 msaitoh ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2014 1.1 msaitoh BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2015 1.1 msaitoh
2016 1.1 msaitoh /* Advance our pointers to the next descriptor. */
2017 1.1 msaitoh if (++i == rxr->num_desc)
2018 1.1 msaitoh i = 0;
2019 1.1 msaitoh
2020 1.1 msaitoh /* Now send to the stack or do LRO */
2021 1.1 msaitoh if (sendmp != NULL) {
2022 1.1 msaitoh rxr->next_to_check = i;
2023 1.28 msaitoh IXGBE_RX_UNLOCK(rxr);
2024 1.1 msaitoh ixgbe_rx_input(rxr, ifp, sendmp, ptype);
2025 1.28 msaitoh IXGBE_RX_LOCK(rxr);
2026 1.1 msaitoh i = rxr->next_to_check;
2027 1.1 msaitoh }
2028 1.1 msaitoh
2029 1.28 msaitoh /* Every 8 descriptors we go to refresh mbufs */
2030 1.1 msaitoh if (processed == 8) {
2031 1.1 msaitoh ixgbe_refresh_mbufs(rxr, i);
2032 1.1 msaitoh processed = 0;
2033 1.1 msaitoh }
2034 1.1 msaitoh }
2035 1.1 msaitoh
2036 1.1 msaitoh /* Refresh any remaining buf structs */
2037 1.1 msaitoh if (ixgbe_rx_unrefreshed(rxr))
2038 1.1 msaitoh ixgbe_refresh_mbufs(rxr, i);
2039 1.1 msaitoh
2040 1.1 msaitoh rxr->next_to_check = i;
2041 1.1 msaitoh
2042 1.28 msaitoh IXGBE_RX_UNLOCK(rxr);
2043 1.28 msaitoh
2044 1.1 msaitoh #ifdef LRO
2045 1.1 msaitoh /*
2046 1.1 msaitoh * Flush any outstanding LRO work
2047 1.1 msaitoh */
2048 1.10 msaitoh tcp_lro_flush_all(lro);
2049 1.1 msaitoh #endif /* LRO */
2050 1.1 msaitoh
2051 1.1 msaitoh /*
2052 1.28 msaitoh * Still have cleaning to do?
2053 1.28 msaitoh */
2054 1.1 msaitoh if ((staterr & IXGBE_RXD_STAT_DD) != 0)
2055 1.28 msaitoh return (TRUE);
2056 1.28 msaitoh
2057 1.28 msaitoh return (FALSE);
2058 1.28 msaitoh } /* ixgbe_rxeof */
2059 1.1 msaitoh
2060 1.1 msaitoh
2061 1.28 msaitoh /************************************************************************
2062 1.28 msaitoh * ixgbe_rx_checksum
2063 1.1 msaitoh *
2064 1.28 msaitoh * Verify that the hardware indicated that the checksum is valid.
2065 1.28 msaitoh * Inform the stack about the status of checksum so that stack
2066 1.28 msaitoh * doesn't spend time verifying the checksum.
2067 1.28 msaitoh ************************************************************************/
2068 1.1 msaitoh static void
2069 1.1 msaitoh ixgbe_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype,
2070 1.1 msaitoh struct ixgbe_hw_stats *stats)
2071 1.1 msaitoh {
2072 1.28 msaitoh u16 status = (u16)staterr;
2073 1.28 msaitoh u8 errors = (u8)(staterr >> 24);
2074 1.1 msaitoh #if 0
2075 1.28 msaitoh bool sctp = false;
2076 1.1 msaitoh
2077 1.1 msaitoh if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
2078 1.1 msaitoh (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
2079 1.8 msaitoh sctp = true;
2080 1.1 msaitoh #endif
2081 1.1 msaitoh
2082 1.8 msaitoh /* IPv4 checksum */
2083 1.1 msaitoh if (status & IXGBE_RXD_STAT_IPCS) {
2084 1.1 msaitoh stats->ipcs.ev_count++;
2085 1.1 msaitoh if (!(errors & IXGBE_RXD_ERR_IPE)) {
2086 1.1 msaitoh /* IP Checksum Good */
2087 1.1 msaitoh mp->m_pkthdr.csum_flags = M_CSUM_IPv4;
2088 1.1 msaitoh } else {
2089 1.1 msaitoh stats->ipcs_bad.ev_count++;
2090 1.1 msaitoh mp->m_pkthdr.csum_flags = M_CSUM_IPv4|M_CSUM_IPv4_BAD;
2091 1.1 msaitoh }
2092 1.1 msaitoh }
2093 1.8 msaitoh /* TCP/UDP/SCTP checksum */
2094 1.1 msaitoh if (status & IXGBE_RXD_STAT_L4CS) {
2095 1.1 msaitoh stats->l4cs.ev_count++;
2096 1.1 msaitoh int type = M_CSUM_TCPv4|M_CSUM_TCPv6|M_CSUM_UDPv4|M_CSUM_UDPv6;
2097 1.1 msaitoh if (!(errors & IXGBE_RXD_ERR_TCPE)) {
2098 1.1 msaitoh mp->m_pkthdr.csum_flags |= type;
2099 1.1 msaitoh } else {
2100 1.1 msaitoh stats->l4cs_bad.ev_count++;
2101 1.1 msaitoh mp->m_pkthdr.csum_flags |= type | M_CSUM_TCP_UDP_BAD;
2102 1.1 msaitoh }
2103 1.1 msaitoh }
2104 1.28 msaitoh } /* ixgbe_rx_checksum */
2105 1.1 msaitoh
2106 1.28 msaitoh /************************************************************************
2107 1.28 msaitoh * ixgbe_dma_malloc
2108 1.28 msaitoh ************************************************************************/
2109 1.1 msaitoh int
2110 1.1 msaitoh ixgbe_dma_malloc(struct adapter *adapter, const bus_size_t size,
2111 1.1 msaitoh struct ixgbe_dma_alloc *dma, const int mapflags)
2112 1.1 msaitoh {
2113 1.1 msaitoh device_t dev = adapter->dev;
2114 1.28 msaitoh int r, rsegs;
2115 1.1 msaitoh
2116 1.28 msaitoh r = ixgbe_dma_tag_create(
2117 1.28 msaitoh /* parent */ adapter->osdep.dmat,
2118 1.28 msaitoh /* alignment */ DBA_ALIGN,
2119 1.28 msaitoh /* bounds */ 0,
2120 1.28 msaitoh /* maxsize */ size,
2121 1.28 msaitoh /* nsegments */ 1,
2122 1.28 msaitoh /* maxsegsize */ size,
2123 1.28 msaitoh /* flags */ BUS_DMA_ALLOCNOW,
2124 1.1 msaitoh &dma->dma_tag);
2125 1.1 msaitoh if (r != 0) {
2126 1.1 msaitoh aprint_error_dev(dev,
2127 1.1 msaitoh "%s: ixgbe_dma_tag_create failed; error %d\n", __func__, r);
2128 1.1 msaitoh goto fail_0;
2129 1.1 msaitoh }
2130 1.1 msaitoh
2131 1.28 msaitoh r = bus_dmamem_alloc(dma->dma_tag->dt_dmat, size,
2132 1.28 msaitoh dma->dma_tag->dt_alignment, dma->dma_tag->dt_boundary,
2133 1.28 msaitoh &dma->dma_seg, 1, &rsegs, BUS_DMA_NOWAIT);
2134 1.1 msaitoh if (r != 0) {
2135 1.1 msaitoh aprint_error_dev(dev,
2136 1.1 msaitoh "%s: bus_dmamem_alloc failed; error %d\n", __func__, r);
2137 1.1 msaitoh goto fail_1;
2138 1.1 msaitoh }
2139 1.1 msaitoh
2140 1.1 msaitoh r = bus_dmamem_map(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs,
2141 1.1 msaitoh size, &dma->dma_vaddr, BUS_DMA_NOWAIT);
2142 1.1 msaitoh if (r != 0) {
2143 1.1 msaitoh aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n",
2144 1.1 msaitoh __func__, r);
2145 1.1 msaitoh goto fail_2;
2146 1.1 msaitoh }
2147 1.1 msaitoh
2148 1.1 msaitoh r = ixgbe_dmamap_create(dma->dma_tag, 0, &dma->dma_map);
2149 1.1 msaitoh if (r != 0) {
2150 1.1 msaitoh aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n",
2151 1.1 msaitoh __func__, r);
2152 1.1 msaitoh goto fail_3;
2153 1.1 msaitoh }
2154 1.1 msaitoh
2155 1.28 msaitoh r = bus_dmamap_load(dma->dma_tag->dt_dmat, dma->dma_map,
2156 1.28 msaitoh dma->dma_vaddr, size, NULL, mapflags | BUS_DMA_NOWAIT);
2157 1.1 msaitoh if (r != 0) {
2158 1.1 msaitoh aprint_error_dev(dev, "%s: bus_dmamap_load failed; error %d\n",
2159 1.1 msaitoh __func__, r);
2160 1.1 msaitoh goto fail_4;
2161 1.1 msaitoh }
2162 1.1 msaitoh dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr;
2163 1.1 msaitoh dma->dma_size = size;
2164 1.1 msaitoh return 0;
2165 1.1 msaitoh fail_4:
2166 1.1 msaitoh ixgbe_dmamap_destroy(dma->dma_tag, dma->dma_map);
2167 1.1 msaitoh fail_3:
2168 1.1 msaitoh bus_dmamem_unmap(dma->dma_tag->dt_dmat, dma->dma_vaddr, size);
2169 1.1 msaitoh fail_2:
2170 1.1 msaitoh bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs);
2171 1.1 msaitoh fail_1:
2172 1.1 msaitoh ixgbe_dma_tag_destroy(dma->dma_tag);
2173 1.1 msaitoh fail_0:
2174 1.1 msaitoh
2175 1.28 msaitoh return (r);
2176 1.28 msaitoh } /* ixgbe_dma_malloc */
2177 1.28 msaitoh
2178 1.28 msaitoh /************************************************************************
2179 1.28 msaitoh * ixgbe_dma_free
2180 1.28 msaitoh ************************************************************************/
2181 1.3 msaitoh void
2182 1.1 msaitoh ixgbe_dma_free(struct adapter *adapter, struct ixgbe_dma_alloc *dma)
2183 1.1 msaitoh {
2184 1.1 msaitoh bus_dmamap_sync(dma->dma_tag->dt_dmat, dma->dma_map, 0, dma->dma_size,
2185 1.1 msaitoh BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2186 1.1 msaitoh ixgbe_dmamap_unload(dma->dma_tag, dma->dma_map);
2187 1.1 msaitoh bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, 1);
2188 1.1 msaitoh ixgbe_dma_tag_destroy(dma->dma_tag);
2189 1.28 msaitoh } /* ixgbe_dma_free */
2190 1.1 msaitoh
2191 1.1 msaitoh
2192 1.28 msaitoh /************************************************************************
2193 1.28 msaitoh * ixgbe_allocate_queues
2194 1.1 msaitoh *
2195 1.28 msaitoh * Allocate memory for the transmit and receive rings, and then
2196 1.28 msaitoh * the descriptors associated with each, called only once at attach.
2197 1.28 msaitoh ************************************************************************/
2198 1.1 msaitoh int
2199 1.1 msaitoh ixgbe_allocate_queues(struct adapter *adapter)
2200 1.1 msaitoh {
2201 1.1 msaitoh device_t dev = adapter->dev;
2202 1.1 msaitoh struct ix_queue *que;
2203 1.1 msaitoh struct tx_ring *txr;
2204 1.1 msaitoh struct rx_ring *rxr;
2205 1.28 msaitoh int rsize, tsize, error = IXGBE_SUCCESS;
2206 1.28 msaitoh int txconf = 0, rxconf = 0;
2207 1.1 msaitoh
2208 1.28 msaitoh /* First, allocate the top level queue structs */
2209 1.28 msaitoh adapter->queues = (struct ix_queue *)malloc(sizeof(struct ix_queue) *
2210 1.28 msaitoh adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO);
2211 1.28 msaitoh if (adapter->queues == NULL) {
2212 1.28 msaitoh aprint_error_dev(dev, "Unable to allocate queue memory\n");
2213 1.1 msaitoh error = ENOMEM;
2214 1.1 msaitoh goto fail;
2215 1.1 msaitoh }
2216 1.1 msaitoh
2217 1.28 msaitoh /* Second, allocate the TX ring struct memory */
2218 1.28 msaitoh adapter->tx_rings = (struct tx_ring *)malloc(sizeof(struct tx_ring) *
2219 1.28 msaitoh adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO);
2220 1.28 msaitoh if (adapter->tx_rings == NULL) {
2221 1.1 msaitoh aprint_error_dev(dev, "Unable to allocate TX ring memory\n");
2222 1.1 msaitoh error = ENOMEM;
2223 1.1 msaitoh goto tx_fail;
2224 1.1 msaitoh }
2225 1.1 msaitoh
2226 1.28 msaitoh /* Third, allocate the RX ring */
2227 1.28 msaitoh adapter->rx_rings = (struct rx_ring *)malloc(sizeof(struct rx_ring) *
2228 1.28 msaitoh adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO);
2229 1.28 msaitoh if (adapter->rx_rings == NULL) {
2230 1.1 msaitoh aprint_error_dev(dev, "Unable to allocate RX ring memory\n");
2231 1.1 msaitoh error = ENOMEM;
2232 1.1 msaitoh goto rx_fail;
2233 1.1 msaitoh }
2234 1.1 msaitoh
2235 1.1 msaitoh /* For the ring itself */
2236 1.28 msaitoh tsize = roundup2(adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc),
2237 1.28 msaitoh DBA_ALIGN);
2238 1.1 msaitoh
2239 1.1 msaitoh /*
2240 1.1 msaitoh * Now set up the TX queues, txconf is needed to handle the
2241 1.1 msaitoh * possibility that things fail midcourse and we need to
2242 1.1 msaitoh * undo memory gracefully
2243 1.28 msaitoh */
2244 1.1 msaitoh for (int i = 0; i < adapter->num_queues; i++, txconf++) {
2245 1.1 msaitoh /* Set up some basics */
2246 1.1 msaitoh txr = &adapter->tx_rings[i];
2247 1.1 msaitoh txr->adapter = adapter;
2248 1.28 msaitoh txr->txr_interq = NULL;
2249 1.28 msaitoh /* In case SR-IOV is enabled, align the index properly */
2250 1.5 msaitoh #ifdef PCI_IOV
2251 1.28 msaitoh txr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
2252 1.28 msaitoh i);
2253 1.5 msaitoh #else
2254 1.1 msaitoh txr->me = i;
2255 1.5 msaitoh #endif
2256 1.1 msaitoh txr->num_desc = adapter->num_tx_desc;
2257 1.1 msaitoh
2258 1.1 msaitoh /* Initialize the TX side lock */
2259 1.1 msaitoh mutex_init(&txr->tx_mtx, MUTEX_DEFAULT, IPL_NET);
2260 1.1 msaitoh
2261 1.28 msaitoh if (ixgbe_dma_malloc(adapter, tsize, &txr->txdma,
2262 1.28 msaitoh BUS_DMA_NOWAIT)) {
2263 1.1 msaitoh aprint_error_dev(dev,
2264 1.1 msaitoh "Unable to allocate TX Descriptor memory\n");
2265 1.1 msaitoh error = ENOMEM;
2266 1.1 msaitoh goto err_tx_desc;
2267 1.1 msaitoh }
2268 1.1 msaitoh txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
2269 1.1 msaitoh bzero((void *)txr->tx_base, tsize);
2270 1.1 msaitoh
2271 1.28 msaitoh /* Now allocate transmit buffers for the ring */
2272 1.28 msaitoh if (ixgbe_allocate_transmit_buffers(txr)) {
2273 1.1 msaitoh aprint_error_dev(dev,
2274 1.1 msaitoh "Critical Failure setting up transmit buffers\n");
2275 1.1 msaitoh error = ENOMEM;
2276 1.1 msaitoh goto err_tx_desc;
2277 1.1 msaitoh }
2278 1.28 msaitoh if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
2279 1.28 msaitoh /* Allocate a buf ring */
2280 1.28 msaitoh txr->txr_interq = pcq_create(IXGBE_BR_SIZE, KM_SLEEP);
2281 1.28 msaitoh if (txr->txr_interq == NULL) {
2282 1.28 msaitoh aprint_error_dev(dev,
2283 1.28 msaitoh "Critical Failure setting up buf ring\n");
2284 1.28 msaitoh error = ENOMEM;
2285 1.28 msaitoh goto err_tx_desc;
2286 1.28 msaitoh }
2287 1.28 msaitoh }
2288 1.1 msaitoh }
2289 1.1 msaitoh
2290 1.1 msaitoh /*
2291 1.1 msaitoh * Next the RX queues...
2292 1.1 msaitoh */
2293 1.28 msaitoh rsize = roundup2(adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc),
2294 1.28 msaitoh DBA_ALIGN);
2295 1.1 msaitoh for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
2296 1.1 msaitoh rxr = &adapter->rx_rings[i];
2297 1.1 msaitoh /* Set up some basics */
2298 1.1 msaitoh rxr->adapter = adapter;
2299 1.5 msaitoh #ifdef PCI_IOV
2300 1.28 msaitoh /* In case SR-IOV is enabled, align the index properly */
2301 1.28 msaitoh rxr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
2302 1.28 msaitoh i);
2303 1.5 msaitoh #else
2304 1.1 msaitoh rxr->me = i;
2305 1.5 msaitoh #endif
2306 1.1 msaitoh rxr->num_desc = adapter->num_rx_desc;
2307 1.1 msaitoh
2308 1.1 msaitoh /* Initialize the RX side lock */
2309 1.1 msaitoh mutex_init(&rxr->rx_mtx, MUTEX_DEFAULT, IPL_NET);
2310 1.1 msaitoh
2311 1.28 msaitoh if (ixgbe_dma_malloc(adapter, rsize, &rxr->rxdma,
2312 1.28 msaitoh BUS_DMA_NOWAIT)) {
2313 1.1 msaitoh aprint_error_dev(dev,
2314 1.1 msaitoh "Unable to allocate RxDescriptor memory\n");
2315 1.1 msaitoh error = ENOMEM;
2316 1.1 msaitoh goto err_rx_desc;
2317 1.1 msaitoh }
2318 1.1 msaitoh rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
2319 1.1 msaitoh bzero((void *)rxr->rx_base, rsize);
2320 1.1 msaitoh
2321 1.28 msaitoh /* Allocate receive buffers for the ring */
2322 1.1 msaitoh if (ixgbe_allocate_receive_buffers(rxr)) {
2323 1.1 msaitoh aprint_error_dev(dev,
2324 1.1 msaitoh "Critical Failure setting up receive buffers\n");
2325 1.1 msaitoh error = ENOMEM;
2326 1.1 msaitoh goto err_rx_desc;
2327 1.1 msaitoh }
2328 1.1 msaitoh }
2329 1.1 msaitoh
2330 1.1 msaitoh /*
2331 1.28 msaitoh * Finally set up the queue holding structs
2332 1.28 msaitoh */
2333 1.1 msaitoh for (int i = 0; i < adapter->num_queues; i++) {
2334 1.1 msaitoh que = &adapter->queues[i];
2335 1.1 msaitoh que->adapter = adapter;
2336 1.3 msaitoh que->me = i;
2337 1.1 msaitoh que->txr = &adapter->tx_rings[i];
2338 1.1 msaitoh que->rxr = &adapter->rx_rings[i];
2339 1.33 knakahar
2340 1.37 knakahar mutex_init(&que->dc_mtx, MUTEX_DEFAULT, IPL_NET);
2341 1.37 knakahar que->disabled_count = 0;
2342 1.1 msaitoh }
2343 1.1 msaitoh
2344 1.1 msaitoh return (0);
2345 1.1 msaitoh
2346 1.1 msaitoh err_rx_desc:
2347 1.1 msaitoh for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
2348 1.1 msaitoh ixgbe_dma_free(adapter, &rxr->rxdma);
2349 1.1 msaitoh err_tx_desc:
2350 1.1 msaitoh for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
2351 1.1 msaitoh ixgbe_dma_free(adapter, &txr->txdma);
2352 1.1 msaitoh free(adapter->rx_rings, M_DEVBUF);
2353 1.1 msaitoh rx_fail:
2354 1.1 msaitoh free(adapter->tx_rings, M_DEVBUF);
2355 1.1 msaitoh tx_fail:
2356 1.1 msaitoh free(adapter->queues, M_DEVBUF);
2357 1.1 msaitoh fail:
2358 1.1 msaitoh return (error);
2359 1.28 msaitoh } /* ixgbe_allocate_queues */
2360