ix_txrx.c revision 1.52 1 1.52 msaitoh /* $NetBSD: ix_txrx.c,v 1.52 2019/02/22 06:49:15 msaitoh Exp $ */
2 1.28 msaitoh
3 1.1 msaitoh /******************************************************************************
4 1.1 msaitoh
5 1.28 msaitoh Copyright (c) 2001-2017, Intel Corporation
6 1.1 msaitoh All rights reserved.
7 1.28 msaitoh
8 1.28 msaitoh Redistribution and use in source and binary forms, with or without
9 1.1 msaitoh modification, are permitted provided that the following conditions are met:
10 1.28 msaitoh
11 1.28 msaitoh 1. Redistributions of source code must retain the above copyright notice,
12 1.1 msaitoh this list of conditions and the following disclaimer.
13 1.28 msaitoh
14 1.28 msaitoh 2. Redistributions in binary form must reproduce the above copyright
15 1.28 msaitoh notice, this list of conditions and the following disclaimer in the
16 1.1 msaitoh documentation and/or other materials provided with the distribution.
17 1.28 msaitoh
18 1.28 msaitoh 3. Neither the name of the Intel Corporation nor the names of its
19 1.28 msaitoh contributors may be used to endorse or promote products derived from
20 1.1 msaitoh this software without specific prior written permission.
21 1.28 msaitoh
22 1.1 msaitoh THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 1.28 msaitoh AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 1.28 msaitoh IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 1.28 msaitoh ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 1.28 msaitoh LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 1.28 msaitoh CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 1.28 msaitoh SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 1.28 msaitoh INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 1.28 msaitoh CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 1.1 msaitoh ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 1.1 msaitoh POSSIBILITY OF SUCH DAMAGE.
33 1.1 msaitoh
34 1.1 msaitoh ******************************************************************************/
35 1.39 msaitoh /*$FreeBSD: head/sys/dev/ixgbe/ix_txrx.c 327031 2017-12-20 18:15:06Z erj $*/
36 1.28 msaitoh
37 1.1 msaitoh /*
38 1.1 msaitoh * Copyright (c) 2011 The NetBSD Foundation, Inc.
39 1.1 msaitoh * All rights reserved.
40 1.1 msaitoh *
41 1.1 msaitoh * This code is derived from software contributed to The NetBSD Foundation
42 1.1 msaitoh * by Coyote Point Systems, Inc.
43 1.1 msaitoh *
44 1.1 msaitoh * Redistribution and use in source and binary forms, with or without
45 1.1 msaitoh * modification, are permitted provided that the following conditions
46 1.1 msaitoh * are met:
47 1.1 msaitoh * 1. Redistributions of source code must retain the above copyright
48 1.1 msaitoh * notice, this list of conditions and the following disclaimer.
49 1.1 msaitoh * 2. Redistributions in binary form must reproduce the above copyright
50 1.1 msaitoh * notice, this list of conditions and the following disclaimer in the
51 1.1 msaitoh * documentation and/or other materials provided with the distribution.
52 1.1 msaitoh *
53 1.1 msaitoh * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
54 1.1 msaitoh * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55 1.1 msaitoh * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
56 1.1 msaitoh * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
57 1.1 msaitoh * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
58 1.1 msaitoh * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
59 1.1 msaitoh * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 1.1 msaitoh * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
61 1.1 msaitoh * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
62 1.1 msaitoh * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 1.1 msaitoh * POSSIBILITY OF SUCH DAMAGE.
64 1.1 msaitoh */
65 1.1 msaitoh
66 1.8 msaitoh #include "opt_inet.h"
67 1.8 msaitoh #include "opt_inet6.h"
68 1.8 msaitoh
69 1.1 msaitoh #include "ixgbe.h"
70 1.1 msaitoh
71 1.1 msaitoh /*
72 1.28 msaitoh * HW RSC control:
73 1.28 msaitoh * this feature only works with
74 1.28 msaitoh * IPv4, and only on 82599 and later.
75 1.28 msaitoh * Also this will cause IP forwarding to
76 1.28 msaitoh * fail and that can't be controlled by
77 1.28 msaitoh * the stack as LRO can. For all these
78 1.28 msaitoh * reasons I've deemed it best to leave
79 1.28 msaitoh * this off and not bother with a tuneable
80 1.28 msaitoh * interface, this would need to be compiled
81 1.28 msaitoh * to enable.
82 1.28 msaitoh */
83 1.1 msaitoh static bool ixgbe_rsc_enable = FALSE;
84 1.1 msaitoh
85 1.3 msaitoh /*
86 1.28 msaitoh * For Flow Director: this is the
87 1.28 msaitoh * number of TX packets we sample
88 1.28 msaitoh * for the filter pool, this means
89 1.28 msaitoh * every 20th packet will be probed.
90 1.28 msaitoh *
91 1.28 msaitoh * This feature can be disabled by
92 1.28 msaitoh * setting this to 0.
93 1.28 msaitoh */
94 1.3 msaitoh static int atr_sample_rate = 20;
95 1.3 msaitoh
96 1.28 msaitoh /************************************************************************
97 1.3 msaitoh * Local Function prototypes
98 1.28 msaitoh ************************************************************************/
99 1.28 msaitoh static void ixgbe_setup_transmit_ring(struct tx_ring *);
100 1.28 msaitoh static void ixgbe_free_transmit_buffers(struct tx_ring *);
101 1.28 msaitoh static int ixgbe_setup_receive_ring(struct rx_ring *);
102 1.28 msaitoh static void ixgbe_free_receive_buffers(struct rx_ring *);
103 1.28 msaitoh static void ixgbe_rx_checksum(u32, struct mbuf *, u32,
104 1.28 msaitoh struct ixgbe_hw_stats *);
105 1.28 msaitoh static void ixgbe_refresh_mbufs(struct rx_ring *, int);
106 1.38 knakahar static void ixgbe_drain(struct ifnet *, struct tx_ring *);
107 1.28 msaitoh static int ixgbe_xmit(struct tx_ring *, struct mbuf *);
108 1.28 msaitoh static int ixgbe_tx_ctx_setup(struct tx_ring *,
109 1.28 msaitoh struct mbuf *, u32 *, u32 *);
110 1.28 msaitoh static int ixgbe_tso_setup(struct tx_ring *,
111 1.28 msaitoh struct mbuf *, u32 *, u32 *);
112 1.1 msaitoh static __inline void ixgbe_rx_discard(struct rx_ring *, int);
113 1.1 msaitoh static __inline void ixgbe_rx_input(struct rx_ring *, struct ifnet *,
114 1.28 msaitoh struct mbuf *, u32);
115 1.28 msaitoh static int ixgbe_dma_malloc(struct adapter *, bus_size_t,
116 1.28 msaitoh struct ixgbe_dma_alloc *, int);
117 1.28 msaitoh static void ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *);
118 1.1 msaitoh
119 1.1 msaitoh static void ixgbe_setup_hw_rsc(struct rx_ring *);
120 1.1 msaitoh
121 1.28 msaitoh /************************************************************************
122 1.28 msaitoh * ixgbe_legacy_start_locked - Transmit entry point
123 1.1 msaitoh *
124 1.28 msaitoh * Called by the stack to initiate a transmit.
125 1.28 msaitoh * The driver will remain in this routine as long as there are
126 1.28 msaitoh * packets to transmit and transmit resources are available.
127 1.28 msaitoh * In case resources are not available, the stack is notified
128 1.28 msaitoh * and the packet is requeued.
129 1.28 msaitoh ************************************************************************/
130 1.28 msaitoh int
131 1.28 msaitoh ixgbe_legacy_start_locked(struct ifnet *ifp, struct tx_ring *txr)
132 1.1 msaitoh {
133 1.45 msaitoh int rc;
134 1.1 msaitoh struct mbuf *m_head;
135 1.1 msaitoh struct adapter *adapter = txr->adapter;
136 1.1 msaitoh
137 1.1 msaitoh IXGBE_TX_LOCK_ASSERT(txr);
138 1.1 msaitoh
139 1.52 msaitoh if (adapter->link_active != LINK_STATE_UP) {
140 1.38 knakahar /*
141 1.38 knakahar * discard all packets buffered in IFQ to avoid
142 1.38 knakahar * sending old packets at next link up timing.
143 1.38 knakahar */
144 1.38 knakahar ixgbe_drain(ifp, txr);
145 1.38 knakahar return (ENETDOWN);
146 1.38 knakahar }
147 1.1 msaitoh if ((ifp->if_flags & IFF_RUNNING) == 0)
148 1.28 msaitoh return (ENETDOWN);
149 1.47 msaitoh if (txr->txr_no_space)
150 1.47 msaitoh return (ENETDOWN);
151 1.47 msaitoh
152 1.1 msaitoh while (!IFQ_IS_EMPTY(&ifp->if_snd)) {
153 1.1 msaitoh if (txr->tx_avail <= IXGBE_QUEUE_MIN_FREE)
154 1.1 msaitoh break;
155 1.1 msaitoh
156 1.1 msaitoh IFQ_POLL(&ifp->if_snd, m_head);
157 1.1 msaitoh if (m_head == NULL)
158 1.1 msaitoh break;
159 1.1 msaitoh
160 1.1 msaitoh if ((rc = ixgbe_xmit(txr, m_head)) == EAGAIN) {
161 1.1 msaitoh break;
162 1.1 msaitoh }
163 1.1 msaitoh IFQ_DEQUEUE(&ifp->if_snd, m_head);
164 1.1 msaitoh if (rc != 0) {
165 1.1 msaitoh m_freem(m_head);
166 1.1 msaitoh continue;
167 1.1 msaitoh }
168 1.1 msaitoh
169 1.1 msaitoh /* Send a copy of the frame to the BPF listener */
170 1.48 msaitoh bpf_mtap(ifp, m_head, BPF_D_OUT);
171 1.1 msaitoh }
172 1.44 msaitoh
173 1.28 msaitoh return IXGBE_SUCCESS;
174 1.28 msaitoh } /* ixgbe_legacy_start_locked */
175 1.28 msaitoh
176 1.28 msaitoh /************************************************************************
177 1.28 msaitoh * ixgbe_legacy_start
178 1.28 msaitoh *
179 1.28 msaitoh * Called by the stack, this always uses the first tx ring,
180 1.28 msaitoh * and should not be used with multiqueue tx enabled.
181 1.28 msaitoh ************************************************************************/
182 1.1 msaitoh void
183 1.28 msaitoh ixgbe_legacy_start(struct ifnet *ifp)
184 1.1 msaitoh {
185 1.1 msaitoh struct adapter *adapter = ifp->if_softc;
186 1.28 msaitoh struct tx_ring *txr = adapter->tx_rings;
187 1.1 msaitoh
188 1.1 msaitoh if (ifp->if_flags & IFF_RUNNING) {
189 1.1 msaitoh IXGBE_TX_LOCK(txr);
190 1.28 msaitoh ixgbe_legacy_start_locked(ifp, txr);
191 1.1 msaitoh IXGBE_TX_UNLOCK(txr);
192 1.1 msaitoh }
193 1.28 msaitoh } /* ixgbe_legacy_start */
194 1.1 msaitoh
195 1.28 msaitoh /************************************************************************
196 1.28 msaitoh * ixgbe_mq_start - Multiqueue Transmit Entry Point
197 1.28 msaitoh *
198 1.28 msaitoh * (if_transmit function)
199 1.28 msaitoh ************************************************************************/
200 1.1 msaitoh int
201 1.1 msaitoh ixgbe_mq_start(struct ifnet *ifp, struct mbuf *m)
202 1.1 msaitoh {
203 1.1 msaitoh struct adapter *adapter = ifp->if_softc;
204 1.1 msaitoh struct tx_ring *txr;
205 1.50 msaitoh int i;
206 1.28 msaitoh #ifdef RSS
207 1.1 msaitoh uint32_t bucket_id;
208 1.1 msaitoh #endif
209 1.1 msaitoh
210 1.1 msaitoh /*
211 1.1 msaitoh * When doing RSS, map it to the same outbound queue
212 1.1 msaitoh * as the incoming flow would be mapped to.
213 1.1 msaitoh *
214 1.1 msaitoh * If everything is setup correctly, it should be the
215 1.1 msaitoh * same bucket that the current CPU we're on is.
216 1.1 msaitoh */
217 1.28 msaitoh #ifdef RSS
218 1.1 msaitoh if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
219 1.28 msaitoh if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
220 1.28 msaitoh (rss_hash2bucket(m->m_pkthdr.flowid, M_HASHTYPE_GET(m),
221 1.28 msaitoh &bucket_id) == 0)) {
222 1.1 msaitoh i = bucket_id % adapter->num_queues;
223 1.8 msaitoh #ifdef IXGBE_DEBUG
224 1.8 msaitoh if (bucket_id > adapter->num_queues)
225 1.28 msaitoh if_printf(ifp,
226 1.28 msaitoh "bucket_id (%d) > num_queues (%d)\n",
227 1.28 msaitoh bucket_id, adapter->num_queues);
228 1.8 msaitoh #endif
229 1.8 msaitoh } else
230 1.1 msaitoh i = m->m_pkthdr.flowid % adapter->num_queues;
231 1.3 msaitoh } else
232 1.28 msaitoh #endif /* 0 */
233 1.51 knakahar i = (cpu_index(curcpu()) % ncpu) % adapter->num_queues;
234 1.3 msaitoh
235 1.3 msaitoh /* Check for a hung queue and pick alternative */
236 1.3 msaitoh if (((1 << i) & adapter->active_queues) == 0)
237 1.18 msaitoh i = ffs64(adapter->active_queues);
238 1.1 msaitoh
239 1.1 msaitoh txr = &adapter->tx_rings[i];
240 1.1 msaitoh
241 1.50 msaitoh if (__predict_false(!pcq_put(txr->txr_interq, m))) {
242 1.18 msaitoh m_freem(m);
243 1.18 msaitoh txr->pcq_drops.ev_count++;
244 1.50 msaitoh return ENOBUFS;
245 1.18 msaitoh }
246 1.1 msaitoh if (IXGBE_TX_TRYLOCK(txr)) {
247 1.1 msaitoh ixgbe_mq_start_locked(ifp, txr);
248 1.1 msaitoh IXGBE_TX_UNLOCK(txr);
249 1.34 knakahar } else {
250 1.34 knakahar if (adapter->txrx_use_workqueue) {
251 1.44 msaitoh u_int *enqueued;
252 1.44 msaitoh
253 1.34 knakahar /*
254 1.34 knakahar * This function itself is not called in interrupt
255 1.34 knakahar * context, however it can be called in fast softint
256 1.34 knakahar * context right after receiving forwarding packets.
257 1.34 knakahar * So, it is required to protect workqueue from twice
258 1.34 knakahar * enqueuing when the machine uses both spontaneous
259 1.34 knakahar * packets and forwarding packets.
260 1.34 knakahar */
261 1.44 msaitoh enqueued = percpu_getref(adapter->txr_wq_enqueued);
262 1.34 knakahar if (*enqueued == 0) {
263 1.34 knakahar *enqueued = 1;
264 1.34 knakahar percpu_putref(adapter->txr_wq_enqueued);
265 1.44 msaitoh workqueue_enqueue(adapter->txr_wq,
266 1.44 msaitoh &txr->wq_cookie, curcpu());
267 1.34 knakahar } else
268 1.34 knakahar percpu_putref(adapter->txr_wq_enqueued);
269 1.34 knakahar } else
270 1.34 knakahar softint_schedule(txr->txr_si);
271 1.34 knakahar }
272 1.1 msaitoh
273 1.1 msaitoh return (0);
274 1.28 msaitoh } /* ixgbe_mq_start */
275 1.1 msaitoh
276 1.28 msaitoh /************************************************************************
277 1.28 msaitoh * ixgbe_mq_start_locked
278 1.28 msaitoh ************************************************************************/
279 1.1 msaitoh int
280 1.1 msaitoh ixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr)
281 1.1 msaitoh {
282 1.28 msaitoh struct mbuf *next;
283 1.28 msaitoh int enqueued = 0, err = 0;
284 1.1 msaitoh
285 1.52 msaitoh if (txr->adapter->link_active != LINK_STATE_UP) {
286 1.38 knakahar /*
287 1.38 knakahar * discard all packets buffered in txr_interq to avoid
288 1.38 knakahar * sending old packets at next link up timing.
289 1.38 knakahar */
290 1.38 knakahar ixgbe_drain(ifp, txr);
291 1.38 knakahar return (ENETDOWN);
292 1.38 knakahar }
293 1.28 msaitoh if ((ifp->if_flags & IFF_RUNNING) == 0)
294 1.28 msaitoh return (ENETDOWN);
295 1.47 msaitoh if (txr->txr_no_space)
296 1.47 msaitoh return (ENETDOWN);
297 1.1 msaitoh
298 1.1 msaitoh /* Process the queue */
299 1.18 msaitoh while ((next = pcq_get(txr->txr_interq)) != NULL) {
300 1.18 msaitoh if ((err = ixgbe_xmit(txr, next)) != 0) {
301 1.18 msaitoh m_freem(next);
302 1.18 msaitoh /* All errors are counted in ixgbe_xmit() */
303 1.1 msaitoh break;
304 1.1 msaitoh }
305 1.1 msaitoh enqueued++;
306 1.3 msaitoh #if __FreeBSD_version >= 1100036
307 1.4 msaitoh /*
308 1.4 msaitoh * Since we're looking at the tx ring, we can check
309 1.4 msaitoh * to see if we're a VF by examing our tail register
310 1.4 msaitoh * address.
311 1.4 msaitoh */
312 1.28 msaitoh if ((txr->adapter->feat_en & IXGBE_FEATURE_VF) &&
313 1.28 msaitoh (next->m_flags & M_MCAST))
314 1.3 msaitoh if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
315 1.3 msaitoh #endif
316 1.1 msaitoh /* Send a copy of the frame to the BPF listener */
317 1.48 msaitoh bpf_mtap(ifp, next, BPF_D_OUT);
318 1.1 msaitoh if ((ifp->if_flags & IFF_RUNNING) == 0)
319 1.1 msaitoh break;
320 1.1 msaitoh }
321 1.1 msaitoh
322 1.28 msaitoh if (txr->tx_avail < IXGBE_TX_CLEANUP_THRESHOLD(txr->adapter))
323 1.1 msaitoh ixgbe_txeof(txr);
324 1.1 msaitoh
325 1.1 msaitoh return (err);
326 1.28 msaitoh } /* ixgbe_mq_start_locked */
327 1.1 msaitoh
328 1.28 msaitoh /************************************************************************
329 1.28 msaitoh * ixgbe_deferred_mq_start
330 1.28 msaitoh *
331 1.34 knakahar * Called from a softint and workqueue (indirectly) to drain queued
332 1.34 knakahar * transmit packets.
333 1.28 msaitoh ************************************************************************/
334 1.1 msaitoh void
335 1.18 msaitoh ixgbe_deferred_mq_start(void *arg)
336 1.1 msaitoh {
337 1.1 msaitoh struct tx_ring *txr = arg;
338 1.1 msaitoh struct adapter *adapter = txr->adapter;
339 1.28 msaitoh struct ifnet *ifp = adapter->ifp;
340 1.1 msaitoh
341 1.1 msaitoh IXGBE_TX_LOCK(txr);
342 1.18 msaitoh if (pcq_peek(txr->txr_interq) != NULL)
343 1.1 msaitoh ixgbe_mq_start_locked(ifp, txr);
344 1.1 msaitoh IXGBE_TX_UNLOCK(txr);
345 1.28 msaitoh } /* ixgbe_deferred_mq_start */
346 1.3 msaitoh
347 1.28 msaitoh /************************************************************************
348 1.34 knakahar * ixgbe_deferred_mq_start_work
349 1.34 knakahar *
350 1.34 knakahar * Called from a workqueue to drain queued transmit packets.
351 1.34 knakahar ************************************************************************/
352 1.34 knakahar void
353 1.34 knakahar ixgbe_deferred_mq_start_work(struct work *wk, void *arg)
354 1.34 knakahar {
355 1.34 knakahar struct tx_ring *txr = container_of(wk, struct tx_ring, wq_cookie);
356 1.34 knakahar struct adapter *adapter = txr->adapter;
357 1.34 knakahar u_int *enqueued = percpu_getref(adapter->txr_wq_enqueued);
358 1.34 knakahar *enqueued = 0;
359 1.34 knakahar percpu_putref(adapter->txr_wq_enqueued);
360 1.34 knakahar
361 1.34 knakahar ixgbe_deferred_mq_start(txr);
362 1.34 knakahar } /* ixgbe_deferred_mq_start */
363 1.34 knakahar
364 1.38 knakahar /************************************************************************
365 1.38 knakahar * ixgbe_drain_all
366 1.38 knakahar ************************************************************************/
367 1.38 knakahar void
368 1.38 knakahar ixgbe_drain_all(struct adapter *adapter)
369 1.38 knakahar {
370 1.38 knakahar struct ifnet *ifp = adapter->ifp;
371 1.38 knakahar struct ix_queue *que = adapter->queues;
372 1.38 knakahar
373 1.38 knakahar for (int i = 0; i < adapter->num_queues; i++, que++) {
374 1.38 knakahar struct tx_ring *txr = que->txr;
375 1.38 knakahar
376 1.38 knakahar IXGBE_TX_LOCK(txr);
377 1.38 knakahar ixgbe_drain(ifp, txr);
378 1.38 knakahar IXGBE_TX_UNLOCK(txr);
379 1.38 knakahar }
380 1.38 knakahar }
381 1.34 knakahar
382 1.34 knakahar /************************************************************************
383 1.28 msaitoh * ixgbe_xmit
384 1.1 msaitoh *
385 1.28 msaitoh * Maps the mbufs to tx descriptors, allowing the
386 1.28 msaitoh * TX engine to transmit the packets.
387 1.1 msaitoh *
388 1.28 msaitoh * Return 0 on success, positive on failure
389 1.28 msaitoh ************************************************************************/
390 1.1 msaitoh static int
391 1.1 msaitoh ixgbe_xmit(struct tx_ring *txr, struct mbuf *m_head)
392 1.1 msaitoh {
393 1.28 msaitoh struct adapter *adapter = txr->adapter;
394 1.28 msaitoh struct ixgbe_tx_buf *txbuf;
395 1.1 msaitoh union ixgbe_adv_tx_desc *txd = NULL;
396 1.28 msaitoh struct ifnet *ifp = adapter->ifp;
397 1.28 msaitoh int i, j, error;
398 1.28 msaitoh int first;
399 1.28 msaitoh u32 olinfo_status = 0, cmd_type_len;
400 1.28 msaitoh bool remap = TRUE;
401 1.28 msaitoh bus_dmamap_t map;
402 1.1 msaitoh
403 1.1 msaitoh /* Basic descriptor defines */
404 1.28 msaitoh cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
405 1.1 msaitoh IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
406 1.1 msaitoh
407 1.29 knakahar if (vlan_has_tag(m_head))
408 1.28 msaitoh cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
409 1.1 msaitoh
410 1.28 msaitoh /*
411 1.28 msaitoh * Important to capture the first descriptor
412 1.28 msaitoh * used because it will contain the index of
413 1.28 msaitoh * the one we tell the hardware to report back
414 1.28 msaitoh */
415 1.28 msaitoh first = txr->next_avail_desc;
416 1.1 msaitoh txbuf = &txr->tx_buffers[first];
417 1.1 msaitoh map = txbuf->map;
418 1.1 msaitoh
419 1.1 msaitoh /*
420 1.1 msaitoh * Map the packet for DMA.
421 1.1 msaitoh */
422 1.22 msaitoh retry:
423 1.28 msaitoh error = bus_dmamap_load_mbuf(txr->txtag->dt_dmat, map, m_head,
424 1.28 msaitoh BUS_DMA_NOWAIT);
425 1.1 msaitoh
426 1.1 msaitoh if (__predict_false(error)) {
427 1.22 msaitoh struct mbuf *m;
428 1.1 msaitoh
429 1.1 msaitoh switch (error) {
430 1.1 msaitoh case EAGAIN:
431 1.35 msaitoh txr->q_eagain_tx_dma_setup++;
432 1.1 msaitoh return EAGAIN;
433 1.1 msaitoh case ENOMEM:
434 1.35 msaitoh txr->q_enomem_tx_dma_setup++;
435 1.1 msaitoh return EAGAIN;
436 1.1 msaitoh case EFBIG:
437 1.22 msaitoh /* Try it again? - one try */
438 1.22 msaitoh if (remap == TRUE) {
439 1.22 msaitoh remap = FALSE;
440 1.22 msaitoh /*
441 1.22 msaitoh * XXX: m_defrag will choke on
442 1.22 msaitoh * non-MCLBYTES-sized clusters
443 1.22 msaitoh */
444 1.35 msaitoh txr->q_efbig_tx_dma_setup++;
445 1.22 msaitoh m = m_defrag(m_head, M_NOWAIT);
446 1.22 msaitoh if (m == NULL) {
447 1.35 msaitoh txr->q_mbuf_defrag_failed++;
448 1.22 msaitoh return ENOBUFS;
449 1.22 msaitoh }
450 1.22 msaitoh m_head = m;
451 1.22 msaitoh goto retry;
452 1.22 msaitoh } else {
453 1.35 msaitoh txr->q_efbig2_tx_dma_setup++;
454 1.22 msaitoh return error;
455 1.22 msaitoh }
456 1.1 msaitoh case EINVAL:
457 1.35 msaitoh txr->q_einval_tx_dma_setup++;
458 1.1 msaitoh return error;
459 1.1 msaitoh default:
460 1.35 msaitoh txr->q_other_tx_dma_setup++;
461 1.1 msaitoh return error;
462 1.1 msaitoh }
463 1.1 msaitoh }
464 1.1 msaitoh
465 1.1 msaitoh /* Make certain there are enough descriptors */
466 1.10 msaitoh if (txr->tx_avail < (map->dm_nsegs + 2)) {
467 1.47 msaitoh txr->txr_no_space = true;
468 1.1 msaitoh txr->no_desc_avail.ev_count++;
469 1.1 msaitoh ixgbe_dmamap_unload(txr->txtag, txbuf->map);
470 1.1 msaitoh return EAGAIN;
471 1.1 msaitoh }
472 1.1 msaitoh
473 1.1 msaitoh /*
474 1.4 msaitoh * Set up the appropriate offload context
475 1.4 msaitoh * this will consume the first descriptor
476 1.4 msaitoh */
477 1.1 msaitoh error = ixgbe_tx_ctx_setup(txr, m_head, &cmd_type_len, &olinfo_status);
478 1.1 msaitoh if (__predict_false(error)) {
479 1.1 msaitoh return (error);
480 1.1 msaitoh }
481 1.1 msaitoh
482 1.1 msaitoh /* Do the flow director magic */
483 1.28 msaitoh if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
484 1.28 msaitoh (txr->atr_sample) && (!adapter->fdir_reinit)) {
485 1.1 msaitoh ++txr->atr_count;
486 1.1 msaitoh if (txr->atr_count >= atr_sample_rate) {
487 1.1 msaitoh ixgbe_atr(txr, m_head);
488 1.1 msaitoh txr->atr_count = 0;
489 1.1 msaitoh }
490 1.1 msaitoh }
491 1.1 msaitoh
492 1.8 msaitoh olinfo_status |= IXGBE_ADVTXD_CC;
493 1.1 msaitoh i = txr->next_avail_desc;
494 1.1 msaitoh for (j = 0; j < map->dm_nsegs; j++) {
495 1.1 msaitoh bus_size_t seglen;
496 1.1 msaitoh bus_addr_t segaddr;
497 1.1 msaitoh
498 1.1 msaitoh txbuf = &txr->tx_buffers[i];
499 1.1 msaitoh txd = &txr->tx_base[i];
500 1.1 msaitoh seglen = map->dm_segs[j].ds_len;
501 1.1 msaitoh segaddr = htole64(map->dm_segs[j].ds_addr);
502 1.1 msaitoh
503 1.1 msaitoh txd->read.buffer_addr = segaddr;
504 1.40 msaitoh txd->read.cmd_type_len = htole32(cmd_type_len | seglen);
505 1.1 msaitoh txd->read.olinfo_status = htole32(olinfo_status);
506 1.1 msaitoh
507 1.1 msaitoh if (++i == txr->num_desc)
508 1.1 msaitoh i = 0;
509 1.1 msaitoh }
510 1.1 msaitoh
511 1.28 msaitoh txd->read.cmd_type_len |= htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
512 1.1 msaitoh txr->tx_avail -= map->dm_nsegs;
513 1.1 msaitoh txr->next_avail_desc = i;
514 1.1 msaitoh
515 1.1 msaitoh txbuf->m_head = m_head;
516 1.1 msaitoh /*
517 1.4 msaitoh * Here we swap the map so the last descriptor,
518 1.4 msaitoh * which gets the completion interrupt has the
519 1.4 msaitoh * real map, and the first descriptor gets the
520 1.4 msaitoh * unused map from this descriptor.
521 1.4 msaitoh */
522 1.1 msaitoh txr->tx_buffers[first].map = txbuf->map;
523 1.1 msaitoh txbuf->map = map;
524 1.1 msaitoh bus_dmamap_sync(txr->txtag->dt_dmat, map, 0, m_head->m_pkthdr.len,
525 1.1 msaitoh BUS_DMASYNC_PREWRITE);
526 1.1 msaitoh
527 1.28 msaitoh /* Set the EOP descriptor that will be marked done */
528 1.28 msaitoh txbuf = &txr->tx_buffers[first];
529 1.1 msaitoh txbuf->eop = txd;
530 1.1 msaitoh
531 1.28 msaitoh ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
532 1.1 msaitoh BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
533 1.1 msaitoh /*
534 1.1 msaitoh * Advance the Transmit Descriptor Tail (Tdt), this tells the
535 1.1 msaitoh * hardware that this frame is available to transmit.
536 1.1 msaitoh */
537 1.1 msaitoh ++txr->total_packets.ev_count;
538 1.3 msaitoh IXGBE_WRITE_REG(&adapter->hw, txr->tail, i);
539 1.3 msaitoh
540 1.23 msaitoh /*
541 1.23 msaitoh * XXXX NOMPSAFE: ifp->if_data should be percpu.
542 1.23 msaitoh */
543 1.23 msaitoh ifp->if_obytes += m_head->m_pkthdr.len;
544 1.23 msaitoh if (m_head->m_flags & M_MCAST)
545 1.23 msaitoh ifp->if_omcasts++;
546 1.23 msaitoh
547 1.45 msaitoh /* Mark queue as having work */
548 1.45 msaitoh if (txr->busy == 0)
549 1.45 msaitoh txr->busy = 1;
550 1.45 msaitoh
551 1.28 msaitoh return (0);
552 1.28 msaitoh } /* ixgbe_xmit */
553 1.1 msaitoh
554 1.38 knakahar /************************************************************************
555 1.38 knakahar * ixgbe_drain
556 1.38 knakahar ************************************************************************/
557 1.38 knakahar static void
558 1.38 knakahar ixgbe_drain(struct ifnet *ifp, struct tx_ring *txr)
559 1.38 knakahar {
560 1.38 knakahar struct mbuf *m;
561 1.38 knakahar
562 1.38 knakahar IXGBE_TX_LOCK_ASSERT(txr);
563 1.38 knakahar
564 1.38 knakahar if (txr->me == 0) {
565 1.38 knakahar while (!IFQ_IS_EMPTY(&ifp->if_snd)) {
566 1.38 knakahar IFQ_DEQUEUE(&ifp->if_snd, m);
567 1.38 knakahar m_freem(m);
568 1.38 knakahar IF_DROP(&ifp->if_snd);
569 1.38 knakahar }
570 1.38 knakahar }
571 1.38 knakahar
572 1.38 knakahar while ((m = pcq_get(txr->txr_interq)) != NULL) {
573 1.38 knakahar m_freem(m);
574 1.38 knakahar txr->pcq_drops.ev_count++;
575 1.38 knakahar }
576 1.38 knakahar }
577 1.16 msaitoh
578 1.28 msaitoh /************************************************************************
579 1.28 msaitoh * ixgbe_allocate_transmit_buffers
580 1.1 msaitoh *
581 1.28 msaitoh * Allocate memory for tx_buffer structures. The tx_buffer stores all
582 1.28 msaitoh * the information needed to transmit a packet on the wire. This is
583 1.28 msaitoh * called only once at attach, setup is done every reset.
584 1.28 msaitoh ************************************************************************/
585 1.28 msaitoh static int
586 1.1 msaitoh ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
587 1.1 msaitoh {
588 1.28 msaitoh struct adapter *adapter = txr->adapter;
589 1.28 msaitoh device_t dev = adapter->dev;
590 1.1 msaitoh struct ixgbe_tx_buf *txbuf;
591 1.28 msaitoh int error, i;
592 1.1 msaitoh
593 1.1 msaitoh /*
594 1.1 msaitoh * Setup DMA descriptor areas.
595 1.1 msaitoh */
596 1.28 msaitoh error = ixgbe_dma_tag_create(
597 1.28 msaitoh /* parent */ adapter->osdep.dmat,
598 1.28 msaitoh /* alignment */ 1,
599 1.28 msaitoh /* bounds */ 0,
600 1.28 msaitoh /* maxsize */ IXGBE_TSO_SIZE,
601 1.28 msaitoh /* nsegments */ adapter->num_segs,
602 1.28 msaitoh /* maxsegsize */ PAGE_SIZE,
603 1.28 msaitoh /* flags */ 0,
604 1.28 msaitoh &txr->txtag);
605 1.28 msaitoh if (error != 0) {
606 1.1 msaitoh aprint_error_dev(dev,"Unable to allocate TX DMA tag\n");
607 1.1 msaitoh goto fail;
608 1.1 msaitoh }
609 1.1 msaitoh
610 1.28 msaitoh txr->tx_buffers =
611 1.1 msaitoh (struct ixgbe_tx_buf *) malloc(sizeof(struct ixgbe_tx_buf) *
612 1.28 msaitoh adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
613 1.28 msaitoh if (txr->tx_buffers == NULL) {
614 1.1 msaitoh aprint_error_dev(dev, "Unable to allocate tx_buffer memory\n");
615 1.1 msaitoh error = ENOMEM;
616 1.1 msaitoh goto fail;
617 1.1 msaitoh }
618 1.1 msaitoh
619 1.28 msaitoh /* Create the descriptor buffer dma maps */
620 1.1 msaitoh txbuf = txr->tx_buffers;
621 1.1 msaitoh for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
622 1.1 msaitoh error = ixgbe_dmamap_create(txr->txtag, 0, &txbuf->map);
623 1.1 msaitoh if (error != 0) {
624 1.1 msaitoh aprint_error_dev(dev,
625 1.1 msaitoh "Unable to create TX DMA map (%d)\n", error);
626 1.1 msaitoh goto fail;
627 1.1 msaitoh }
628 1.1 msaitoh }
629 1.1 msaitoh
630 1.1 msaitoh return 0;
631 1.1 msaitoh fail:
632 1.1 msaitoh /* We free all, it handles case where we are in the middle */
633 1.15 msaitoh #if 0 /* XXX was FreeBSD */
634 1.1 msaitoh ixgbe_free_transmit_structures(adapter);
635 1.15 msaitoh #else
636 1.15 msaitoh ixgbe_free_transmit_buffers(txr);
637 1.15 msaitoh #endif
638 1.1 msaitoh return (error);
639 1.28 msaitoh } /* ixgbe_allocate_transmit_buffers */
640 1.1 msaitoh
641 1.28 msaitoh /************************************************************************
642 1.28 msaitoh * ixgbe_setup_transmit_ring - Initialize a transmit ring.
643 1.28 msaitoh ************************************************************************/
644 1.1 msaitoh static void
645 1.1 msaitoh ixgbe_setup_transmit_ring(struct tx_ring *txr)
646 1.1 msaitoh {
647 1.28 msaitoh struct adapter *adapter = txr->adapter;
648 1.28 msaitoh struct ixgbe_tx_buf *txbuf;
649 1.1 msaitoh #ifdef DEV_NETMAP
650 1.1 msaitoh struct netmap_adapter *na = NA(adapter->ifp);
651 1.28 msaitoh struct netmap_slot *slot;
652 1.1 msaitoh #endif /* DEV_NETMAP */
653 1.1 msaitoh
654 1.1 msaitoh /* Clear the old ring contents */
655 1.1 msaitoh IXGBE_TX_LOCK(txr);
656 1.28 msaitoh
657 1.1 msaitoh #ifdef DEV_NETMAP
658 1.28 msaitoh if (adapter->feat_en & IXGBE_FEATURE_NETMAP) {
659 1.28 msaitoh /*
660 1.28 msaitoh * (under lock): if in netmap mode, do some consistency
661 1.28 msaitoh * checks and set slot to entry 0 of the netmap ring.
662 1.28 msaitoh */
663 1.28 msaitoh slot = netmap_reset(na, NR_TX, txr->me, 0);
664 1.28 msaitoh }
665 1.1 msaitoh #endif /* DEV_NETMAP */
666 1.28 msaitoh
667 1.1 msaitoh bzero((void *)txr->tx_base,
668 1.28 msaitoh (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
669 1.1 msaitoh /* Reset indices */
670 1.1 msaitoh txr->next_avail_desc = 0;
671 1.1 msaitoh txr->next_to_clean = 0;
672 1.1 msaitoh
673 1.1 msaitoh /* Free any existing tx buffers. */
674 1.28 msaitoh txbuf = txr->tx_buffers;
675 1.5 msaitoh for (int i = 0; i < txr->num_desc; i++, txbuf++) {
676 1.1 msaitoh if (txbuf->m_head != NULL) {
677 1.1 msaitoh bus_dmamap_sync(txr->txtag->dt_dmat, txbuf->map,
678 1.1 msaitoh 0, txbuf->m_head->m_pkthdr.len,
679 1.1 msaitoh BUS_DMASYNC_POSTWRITE);
680 1.1 msaitoh ixgbe_dmamap_unload(txr->txtag, txbuf->map);
681 1.1 msaitoh m_freem(txbuf->m_head);
682 1.1 msaitoh txbuf->m_head = NULL;
683 1.1 msaitoh }
684 1.28 msaitoh
685 1.1 msaitoh #ifdef DEV_NETMAP
686 1.1 msaitoh /*
687 1.1 msaitoh * In netmap mode, set the map for the packet buffer.
688 1.1 msaitoh * NOTE: Some drivers (not this one) also need to set
689 1.1 msaitoh * the physical buffer address in the NIC ring.
690 1.1 msaitoh * Slots in the netmap ring (indexed by "si") are
691 1.1 msaitoh * kring->nkr_hwofs positions "ahead" wrt the
692 1.1 msaitoh * corresponding slot in the NIC ring. In some drivers
693 1.1 msaitoh * (not here) nkr_hwofs can be negative. Function
694 1.1 msaitoh * netmap_idx_n2k() handles wraparounds properly.
695 1.1 msaitoh */
696 1.28 msaitoh if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && slot) {
697 1.1 msaitoh int si = netmap_idx_n2k(&na->tx_rings[txr->me], i);
698 1.5 msaitoh netmap_load_map(na, txr->txtag,
699 1.5 msaitoh txbuf->map, NMB(na, slot + si));
700 1.1 msaitoh }
701 1.1 msaitoh #endif /* DEV_NETMAP */
702 1.28 msaitoh
703 1.1 msaitoh /* Clear the EOP descriptor pointer */
704 1.1 msaitoh txbuf->eop = NULL;
705 1.28 msaitoh }
706 1.1 msaitoh
707 1.1 msaitoh /* Set the rate at which we sample packets */
708 1.28 msaitoh if (adapter->feat_en & IXGBE_FEATURE_FDIR)
709 1.1 msaitoh txr->atr_sample = atr_sample_rate;
710 1.1 msaitoh
711 1.1 msaitoh /* Set number of descriptors available */
712 1.1 msaitoh txr->tx_avail = adapter->num_tx_desc;
713 1.1 msaitoh
714 1.1 msaitoh ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
715 1.1 msaitoh BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
716 1.1 msaitoh IXGBE_TX_UNLOCK(txr);
717 1.28 msaitoh } /* ixgbe_setup_transmit_ring */
718 1.1 msaitoh
719 1.28 msaitoh /************************************************************************
720 1.28 msaitoh * ixgbe_setup_transmit_structures - Initialize all transmit rings.
721 1.28 msaitoh ************************************************************************/
722 1.1 msaitoh int
723 1.1 msaitoh ixgbe_setup_transmit_structures(struct adapter *adapter)
724 1.1 msaitoh {
725 1.1 msaitoh struct tx_ring *txr = adapter->tx_rings;
726 1.1 msaitoh
727 1.1 msaitoh for (int i = 0; i < adapter->num_queues; i++, txr++)
728 1.1 msaitoh ixgbe_setup_transmit_ring(txr);
729 1.1 msaitoh
730 1.1 msaitoh return (0);
731 1.28 msaitoh } /* ixgbe_setup_transmit_structures */
732 1.1 msaitoh
733 1.28 msaitoh /************************************************************************
734 1.28 msaitoh * ixgbe_free_transmit_structures - Free all transmit rings.
735 1.28 msaitoh ************************************************************************/
736 1.1 msaitoh void
737 1.1 msaitoh ixgbe_free_transmit_structures(struct adapter *adapter)
738 1.1 msaitoh {
739 1.1 msaitoh struct tx_ring *txr = adapter->tx_rings;
740 1.1 msaitoh
741 1.1 msaitoh for (int i = 0; i < adapter->num_queues; i++, txr++) {
742 1.1 msaitoh ixgbe_free_transmit_buffers(txr);
743 1.1 msaitoh ixgbe_dma_free(adapter, &txr->txdma);
744 1.1 msaitoh IXGBE_TX_LOCK_DESTROY(txr);
745 1.1 msaitoh }
746 1.1 msaitoh free(adapter->tx_rings, M_DEVBUF);
747 1.28 msaitoh } /* ixgbe_free_transmit_structures */
748 1.1 msaitoh
749 1.28 msaitoh /************************************************************************
750 1.28 msaitoh * ixgbe_free_transmit_buffers
751 1.1 msaitoh *
752 1.28 msaitoh * Free transmit ring related data structures.
753 1.28 msaitoh ************************************************************************/
754 1.1 msaitoh static void
755 1.1 msaitoh ixgbe_free_transmit_buffers(struct tx_ring *txr)
756 1.1 msaitoh {
757 1.28 msaitoh struct adapter *adapter = txr->adapter;
758 1.1 msaitoh struct ixgbe_tx_buf *tx_buffer;
759 1.28 msaitoh int i;
760 1.1 msaitoh
761 1.14 msaitoh INIT_DEBUGOUT("ixgbe_free_transmit_buffers: begin");
762 1.1 msaitoh
763 1.1 msaitoh if (txr->tx_buffers == NULL)
764 1.1 msaitoh return;
765 1.1 msaitoh
766 1.1 msaitoh tx_buffer = txr->tx_buffers;
767 1.1 msaitoh for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
768 1.1 msaitoh if (tx_buffer->m_head != NULL) {
769 1.1 msaitoh bus_dmamap_sync(txr->txtag->dt_dmat, tx_buffer->map,
770 1.1 msaitoh 0, tx_buffer->m_head->m_pkthdr.len,
771 1.1 msaitoh BUS_DMASYNC_POSTWRITE);
772 1.1 msaitoh ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
773 1.1 msaitoh m_freem(tx_buffer->m_head);
774 1.1 msaitoh tx_buffer->m_head = NULL;
775 1.1 msaitoh if (tx_buffer->map != NULL) {
776 1.1 msaitoh ixgbe_dmamap_destroy(txr->txtag,
777 1.1 msaitoh tx_buffer->map);
778 1.1 msaitoh tx_buffer->map = NULL;
779 1.1 msaitoh }
780 1.1 msaitoh } else if (tx_buffer->map != NULL) {
781 1.1 msaitoh ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
782 1.1 msaitoh ixgbe_dmamap_destroy(txr->txtag, tx_buffer->map);
783 1.1 msaitoh tx_buffer->map = NULL;
784 1.1 msaitoh }
785 1.1 msaitoh }
786 1.18 msaitoh if (txr->txr_interq != NULL) {
787 1.18 msaitoh struct mbuf *m;
788 1.18 msaitoh
789 1.18 msaitoh while ((m = pcq_get(txr->txr_interq)) != NULL)
790 1.18 msaitoh m_freem(m);
791 1.18 msaitoh pcq_destroy(txr->txr_interq);
792 1.18 msaitoh }
793 1.1 msaitoh if (txr->tx_buffers != NULL) {
794 1.1 msaitoh free(txr->tx_buffers, M_DEVBUF);
795 1.1 msaitoh txr->tx_buffers = NULL;
796 1.1 msaitoh }
797 1.1 msaitoh if (txr->txtag != NULL) {
798 1.1 msaitoh ixgbe_dma_tag_destroy(txr->txtag);
799 1.1 msaitoh txr->txtag = NULL;
800 1.1 msaitoh }
801 1.28 msaitoh } /* ixgbe_free_transmit_buffers */
802 1.1 msaitoh
803 1.28 msaitoh /************************************************************************
804 1.28 msaitoh * ixgbe_tx_ctx_setup
805 1.1 msaitoh *
806 1.28 msaitoh * Advanced Context Descriptor setup for VLAN, CSUM or TSO
807 1.28 msaitoh ************************************************************************/
808 1.1 msaitoh static int
809 1.1 msaitoh ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp,
810 1.1 msaitoh u32 *cmd_type_len, u32 *olinfo_status)
811 1.1 msaitoh {
812 1.28 msaitoh struct adapter *adapter = txr->adapter;
813 1.1 msaitoh struct ixgbe_adv_tx_context_desc *TXD;
814 1.28 msaitoh struct ether_vlan_header *eh;
815 1.8 msaitoh #ifdef INET
816 1.28 msaitoh struct ip *ip;
817 1.8 msaitoh #endif
818 1.8 msaitoh #ifdef INET6
819 1.28 msaitoh struct ip6_hdr *ip6;
820 1.8 msaitoh #endif
821 1.28 msaitoh int ehdrlen, ip_hlen = 0;
822 1.28 msaitoh int offload = TRUE;
823 1.28 msaitoh int ctxd = txr->next_avail_desc;
824 1.28 msaitoh u32 vlan_macip_lens = 0;
825 1.28 msaitoh u32 type_tucmd_mlhl = 0;
826 1.28 msaitoh u16 vtag = 0;
827 1.28 msaitoh u16 etype;
828 1.28 msaitoh u8 ipproto = 0;
829 1.28 msaitoh char *l3d;
830 1.8 msaitoh
831 1.1 msaitoh
832 1.1 msaitoh /* First check if TSO is to be used */
833 1.28 msaitoh if (mp->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) {
834 1.17 msaitoh int rv = ixgbe_tso_setup(txr, mp, cmd_type_len, olinfo_status);
835 1.17 msaitoh
836 1.21 msaitoh if (rv != 0)
837 1.17 msaitoh ++adapter->tso_err.ev_count;
838 1.21 msaitoh return rv;
839 1.17 msaitoh }
840 1.1 msaitoh
841 1.1 msaitoh if ((mp->m_pkthdr.csum_flags & M_CSUM_OFFLOAD) == 0)
842 1.1 msaitoh offload = FALSE;
843 1.1 msaitoh
844 1.1 msaitoh /* Indicate the whole packet as payload when not doing TSO */
845 1.28 msaitoh *olinfo_status |= mp->m_pkthdr.len << IXGBE_ADVTXD_PAYLEN_SHIFT;
846 1.1 msaitoh
847 1.1 msaitoh /* Now ready a context descriptor */
848 1.28 msaitoh TXD = (struct ixgbe_adv_tx_context_desc *)&txr->tx_base[ctxd];
849 1.1 msaitoh
850 1.1 msaitoh /*
851 1.28 msaitoh * In advanced descriptors the vlan tag must
852 1.28 msaitoh * be placed into the context descriptor. Hence
853 1.28 msaitoh * we need to make one even if not doing offloads.
854 1.28 msaitoh */
855 1.29 knakahar if (vlan_has_tag(mp)) {
856 1.29 knakahar vtag = htole16(vlan_get_tag(mp));
857 1.1 msaitoh vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
858 1.28 msaitoh } else if (!(txr->adapter->feat_en & IXGBE_FEATURE_NEEDS_CTXD) &&
859 1.28 msaitoh (offload == FALSE))
860 1.4 msaitoh return (0);
861 1.1 msaitoh
862 1.1 msaitoh /*
863 1.1 msaitoh * Determine where frame payload starts.
864 1.1 msaitoh * Jump over vlan headers if already present,
865 1.1 msaitoh * helpful for QinQ too.
866 1.1 msaitoh */
867 1.1 msaitoh KASSERT(mp->m_len >= offsetof(struct ether_vlan_header, evl_tag));
868 1.1 msaitoh eh = mtod(mp, struct ether_vlan_header *);
869 1.1 msaitoh if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
870 1.1 msaitoh KASSERT(mp->m_len >= sizeof(struct ether_vlan_header));
871 1.1 msaitoh etype = ntohs(eh->evl_proto);
872 1.1 msaitoh ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
873 1.1 msaitoh } else {
874 1.1 msaitoh etype = ntohs(eh->evl_encap_proto);
875 1.1 msaitoh ehdrlen = ETHER_HDR_LEN;
876 1.1 msaitoh }
877 1.1 msaitoh
878 1.1 msaitoh /* Set the ether header length */
879 1.1 msaitoh vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
880 1.1 msaitoh
881 1.3 msaitoh if (offload == FALSE)
882 1.3 msaitoh goto no_offloads;
883 1.3 msaitoh
884 1.8 msaitoh /*
885 1.28 msaitoh * If the first mbuf only includes the ethernet header,
886 1.28 msaitoh * jump to the next one
887 1.28 msaitoh * XXX: This assumes the stack splits mbufs containing headers
888 1.28 msaitoh * on header boundaries
889 1.8 msaitoh * XXX: And assumes the entire IP header is contained in one mbuf
890 1.8 msaitoh */
891 1.8 msaitoh if (mp->m_len == ehdrlen && mp->m_next)
892 1.8 msaitoh l3d = mtod(mp->m_next, char *);
893 1.8 msaitoh else
894 1.8 msaitoh l3d = mtod(mp, char *) + ehdrlen;
895 1.8 msaitoh
896 1.1 msaitoh switch (etype) {
897 1.9 msaitoh #ifdef INET
898 1.1 msaitoh case ETHERTYPE_IP:
899 1.8 msaitoh ip = (struct ip *)(l3d);
900 1.8 msaitoh ip_hlen = ip->ip_hl << 2;
901 1.8 msaitoh ipproto = ip->ip_p;
902 1.8 msaitoh type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
903 1.1 msaitoh KASSERT((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) == 0 ||
904 1.8 msaitoh ip->ip_sum == 0);
905 1.1 msaitoh break;
906 1.9 msaitoh #endif
907 1.9 msaitoh #ifdef INET6
908 1.1 msaitoh case ETHERTYPE_IPV6:
909 1.8 msaitoh ip6 = (struct ip6_hdr *)(l3d);
910 1.8 msaitoh ip_hlen = sizeof(struct ip6_hdr);
911 1.8 msaitoh ipproto = ip6->ip6_nxt;
912 1.1 msaitoh type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
913 1.1 msaitoh break;
914 1.9 msaitoh #endif
915 1.1 msaitoh default:
916 1.11 msaitoh offload = false;
917 1.1 msaitoh break;
918 1.1 msaitoh }
919 1.1 msaitoh
920 1.1 msaitoh if ((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) != 0)
921 1.1 msaitoh *olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
922 1.1 msaitoh
923 1.1 msaitoh vlan_macip_lens |= ip_hlen;
924 1.1 msaitoh
925 1.8 msaitoh /* No support for offloads for non-L4 next headers */
926 1.8 msaitoh switch (ipproto) {
927 1.36 msaitoh case IPPROTO_TCP:
928 1.36 msaitoh if (mp->m_pkthdr.csum_flags &
929 1.36 msaitoh (M_CSUM_TCPv4 | M_CSUM_TCPv6))
930 1.36 msaitoh type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
931 1.36 msaitoh else
932 1.36 msaitoh offload = false;
933 1.36 msaitoh break;
934 1.36 msaitoh case IPPROTO_UDP:
935 1.36 msaitoh if (mp->m_pkthdr.csum_flags &
936 1.36 msaitoh (M_CSUM_UDPv4 | M_CSUM_UDPv6))
937 1.36 msaitoh type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
938 1.36 msaitoh else
939 1.11 msaitoh offload = false;
940 1.36 msaitoh break;
941 1.36 msaitoh default:
942 1.36 msaitoh offload = false;
943 1.36 msaitoh break;
944 1.8 msaitoh }
945 1.8 msaitoh
946 1.8 msaitoh if (offload) /* Insert L4 checksum into data descriptors */
947 1.1 msaitoh *olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
948 1.1 msaitoh
949 1.3 msaitoh no_offloads:
950 1.3 msaitoh type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
951 1.3 msaitoh
952 1.1 msaitoh /* Now copy bits into descriptor */
953 1.1 msaitoh TXD->vlan_macip_lens = htole32(vlan_macip_lens);
954 1.1 msaitoh TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
955 1.1 msaitoh TXD->seqnum_seed = htole32(0);
956 1.1 msaitoh TXD->mss_l4len_idx = htole32(0);
957 1.1 msaitoh
958 1.1 msaitoh /* We've consumed the first desc, adjust counters */
959 1.1 msaitoh if (++ctxd == txr->num_desc)
960 1.1 msaitoh ctxd = 0;
961 1.1 msaitoh txr->next_avail_desc = ctxd;
962 1.1 msaitoh --txr->tx_avail;
963 1.1 msaitoh
964 1.28 msaitoh return (0);
965 1.28 msaitoh } /* ixgbe_tx_ctx_setup */
966 1.1 msaitoh
967 1.28 msaitoh /************************************************************************
968 1.28 msaitoh * ixgbe_tso_setup
969 1.1 msaitoh *
970 1.28 msaitoh * Setup work for hardware segmentation offload (TSO) on
971 1.28 msaitoh * adapters using advanced tx descriptors
972 1.28 msaitoh ************************************************************************/
973 1.1 msaitoh static int
974 1.28 msaitoh ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *cmd_type_len,
975 1.28 msaitoh u32 *olinfo_status)
976 1.1 msaitoh {
977 1.1 msaitoh struct ixgbe_adv_tx_context_desc *TXD;
978 1.28 msaitoh struct ether_vlan_header *eh;
979 1.1 msaitoh #ifdef INET6
980 1.28 msaitoh struct ip6_hdr *ip6;
981 1.1 msaitoh #endif
982 1.1 msaitoh #ifdef INET
983 1.28 msaitoh struct ip *ip;
984 1.1 msaitoh #endif
985 1.28 msaitoh struct tcphdr *th;
986 1.28 msaitoh int ctxd, ehdrlen, ip_hlen, tcp_hlen;
987 1.28 msaitoh u32 vlan_macip_lens = 0;
988 1.28 msaitoh u32 type_tucmd_mlhl = 0;
989 1.28 msaitoh u32 mss_l4len_idx = 0, paylen;
990 1.28 msaitoh u16 vtag = 0, eh_type;
991 1.1 msaitoh
992 1.1 msaitoh /*
993 1.1 msaitoh * Determine where frame payload starts.
994 1.1 msaitoh * Jump over vlan headers if already present
995 1.1 msaitoh */
996 1.1 msaitoh eh = mtod(mp, struct ether_vlan_header *);
997 1.1 msaitoh if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
998 1.1 msaitoh ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
999 1.1 msaitoh eh_type = eh->evl_proto;
1000 1.1 msaitoh } else {
1001 1.1 msaitoh ehdrlen = ETHER_HDR_LEN;
1002 1.1 msaitoh eh_type = eh->evl_encap_proto;
1003 1.1 msaitoh }
1004 1.1 msaitoh
1005 1.1 msaitoh switch (ntohs(eh_type)) {
1006 1.1 msaitoh #ifdef INET
1007 1.1 msaitoh case ETHERTYPE_IP:
1008 1.1 msaitoh ip = (struct ip *)(mp->m_data + ehdrlen);
1009 1.1 msaitoh if (ip->ip_p != IPPROTO_TCP)
1010 1.1 msaitoh return (ENXIO);
1011 1.1 msaitoh ip->ip_sum = 0;
1012 1.1 msaitoh ip_hlen = ip->ip_hl << 2;
1013 1.1 msaitoh th = (struct tcphdr *)((char *)ip + ip_hlen);
1014 1.1 msaitoh th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
1015 1.1 msaitoh ip->ip_dst.s_addr, htons(IPPROTO_TCP));
1016 1.1 msaitoh type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
1017 1.1 msaitoh /* Tell transmit desc to also do IPv4 checksum. */
1018 1.1 msaitoh *olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
1019 1.1 msaitoh break;
1020 1.1 msaitoh #endif
1021 1.28 msaitoh #ifdef INET6
1022 1.28 msaitoh case ETHERTYPE_IPV6:
1023 1.28 msaitoh ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
1024 1.28 msaitoh /* XXX-BZ For now we do not pretend to support ext. hdrs. */
1025 1.28 msaitoh if (ip6->ip6_nxt != IPPROTO_TCP)
1026 1.28 msaitoh return (ENXIO);
1027 1.28 msaitoh ip_hlen = sizeof(struct ip6_hdr);
1028 1.28 msaitoh ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
1029 1.28 msaitoh th = (struct tcphdr *)((char *)ip6 + ip_hlen);
1030 1.28 msaitoh th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
1031 1.28 msaitoh &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
1032 1.28 msaitoh type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
1033 1.28 msaitoh break;
1034 1.28 msaitoh #endif
1035 1.1 msaitoh default:
1036 1.1 msaitoh panic("%s: CSUM_TSO but no supported IP version (0x%04x)",
1037 1.1 msaitoh __func__, ntohs(eh_type));
1038 1.1 msaitoh break;
1039 1.1 msaitoh }
1040 1.1 msaitoh
1041 1.1 msaitoh ctxd = txr->next_avail_desc;
1042 1.28 msaitoh TXD = (struct ixgbe_adv_tx_context_desc *)&txr->tx_base[ctxd];
1043 1.1 msaitoh
1044 1.1 msaitoh tcp_hlen = th->th_off << 2;
1045 1.1 msaitoh
1046 1.1 msaitoh /* This is used in the transmit desc in encap */
1047 1.1 msaitoh paylen = mp->m_pkthdr.len - ehdrlen - ip_hlen - tcp_hlen;
1048 1.1 msaitoh
1049 1.1 msaitoh /* VLAN MACLEN IPLEN */
1050 1.29 knakahar if (vlan_has_tag(mp)) {
1051 1.29 knakahar vtag = htole16(vlan_get_tag(mp));
1052 1.28 msaitoh vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
1053 1.1 msaitoh }
1054 1.1 msaitoh
1055 1.1 msaitoh vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
1056 1.1 msaitoh vlan_macip_lens |= ip_hlen;
1057 1.1 msaitoh TXD->vlan_macip_lens = htole32(vlan_macip_lens);
1058 1.1 msaitoh
1059 1.1 msaitoh /* ADV DTYPE TUCMD */
1060 1.1 msaitoh type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
1061 1.1 msaitoh type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
1062 1.1 msaitoh TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
1063 1.1 msaitoh
1064 1.1 msaitoh /* MSS L4LEN IDX */
1065 1.1 msaitoh mss_l4len_idx |= (mp->m_pkthdr.segsz << IXGBE_ADVTXD_MSS_SHIFT);
1066 1.1 msaitoh mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
1067 1.1 msaitoh TXD->mss_l4len_idx = htole32(mss_l4len_idx);
1068 1.1 msaitoh
1069 1.1 msaitoh TXD->seqnum_seed = htole32(0);
1070 1.1 msaitoh
1071 1.1 msaitoh if (++ctxd == txr->num_desc)
1072 1.1 msaitoh ctxd = 0;
1073 1.1 msaitoh
1074 1.1 msaitoh txr->tx_avail--;
1075 1.1 msaitoh txr->next_avail_desc = ctxd;
1076 1.1 msaitoh *cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
1077 1.1 msaitoh *olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1078 1.1 msaitoh *olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
1079 1.1 msaitoh ++txr->tso_tx.ev_count;
1080 1.28 msaitoh
1081 1.1 msaitoh return (0);
1082 1.28 msaitoh } /* ixgbe_tso_setup */
1083 1.1 msaitoh
1084 1.3 msaitoh
1085 1.28 msaitoh /************************************************************************
1086 1.28 msaitoh * ixgbe_txeof
1087 1.1 msaitoh *
1088 1.28 msaitoh * Examine each tx_buffer in the used queue. If the hardware is done
1089 1.28 msaitoh * processing the packet then free associated resources. The
1090 1.28 msaitoh * tx_buffer is put back on the free queue.
1091 1.28 msaitoh ************************************************************************/
1092 1.32 msaitoh bool
1093 1.1 msaitoh ixgbe_txeof(struct tx_ring *txr)
1094 1.1 msaitoh {
1095 1.1 msaitoh struct adapter *adapter = txr->adapter;
1096 1.1 msaitoh struct ifnet *ifp = adapter->ifp;
1097 1.28 msaitoh struct ixgbe_tx_buf *buf;
1098 1.28 msaitoh union ixgbe_adv_tx_desc *txd;
1099 1.1 msaitoh u32 work, processed = 0;
1100 1.7 msaitoh u32 limit = adapter->tx_process_limit;
1101 1.1 msaitoh
1102 1.1 msaitoh KASSERT(mutex_owned(&txr->tx_mtx));
1103 1.1 msaitoh
1104 1.1 msaitoh #ifdef DEV_NETMAP
1105 1.28 msaitoh if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
1106 1.28 msaitoh (adapter->ifp->if_capenable & IFCAP_NETMAP)) {
1107 1.28 msaitoh struct netmap_adapter *na = NA(adapter->ifp);
1108 1.1 msaitoh struct netmap_kring *kring = &na->tx_rings[txr->me];
1109 1.1 msaitoh txd = txr->tx_base;
1110 1.1 msaitoh bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1111 1.1 msaitoh BUS_DMASYNC_POSTREAD);
1112 1.1 msaitoh /*
1113 1.1 msaitoh * In netmap mode, all the work is done in the context
1114 1.1 msaitoh * of the client thread. Interrupt handlers only wake up
1115 1.1 msaitoh * clients, which may be sleeping on individual rings
1116 1.1 msaitoh * or on a global resource for all rings.
1117 1.1 msaitoh * To implement tx interrupt mitigation, we wake up the client
1118 1.1 msaitoh * thread roughly every half ring, even if the NIC interrupts
1119 1.1 msaitoh * more frequently. This is implemented as follows:
1120 1.1 msaitoh * - ixgbe_txsync() sets kring->nr_kflags with the index of
1121 1.1 msaitoh * the slot that should wake up the thread (nkr_num_slots
1122 1.1 msaitoh * means the user thread should not be woken up);
1123 1.1 msaitoh * - the driver ignores tx interrupts unless netmap_mitigate=0
1124 1.1 msaitoh * or the slot has the DD bit set.
1125 1.1 msaitoh */
1126 1.1 msaitoh if (!netmap_mitigate ||
1127 1.1 msaitoh (kring->nr_kflags < kring->nkr_num_slots &&
1128 1.28 msaitoh txd[kring->nr_kflags].wb.status & IXGBE_TXD_STAT_DD)) {
1129 1.1 msaitoh netmap_tx_irq(ifp, txr->me);
1130 1.1 msaitoh }
1131 1.32 msaitoh return false;
1132 1.1 msaitoh }
1133 1.1 msaitoh #endif /* DEV_NETMAP */
1134 1.1 msaitoh
1135 1.1 msaitoh if (txr->tx_avail == txr->num_desc) {
1136 1.45 msaitoh txr->busy = 0;
1137 1.32 msaitoh return false;
1138 1.1 msaitoh }
1139 1.1 msaitoh
1140 1.1 msaitoh /* Get work starting point */
1141 1.1 msaitoh work = txr->next_to_clean;
1142 1.1 msaitoh buf = &txr->tx_buffers[work];
1143 1.1 msaitoh txd = &txr->tx_base[work];
1144 1.1 msaitoh work -= txr->num_desc; /* The distance to ring end */
1145 1.28 msaitoh ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1146 1.1 msaitoh BUS_DMASYNC_POSTREAD);
1147 1.8 msaitoh
1148 1.1 msaitoh do {
1149 1.8 msaitoh union ixgbe_adv_tx_desc *eop = buf->eop;
1150 1.1 msaitoh if (eop == NULL) /* No work */
1151 1.1 msaitoh break;
1152 1.1 msaitoh
1153 1.1 msaitoh if ((eop->wb.status & IXGBE_TXD_STAT_DD) == 0)
1154 1.1 msaitoh break; /* I/O not complete */
1155 1.1 msaitoh
1156 1.1 msaitoh if (buf->m_head) {
1157 1.28 msaitoh txr->bytes += buf->m_head->m_pkthdr.len;
1158 1.28 msaitoh bus_dmamap_sync(txr->txtag->dt_dmat, buf->map,
1159 1.1 msaitoh 0, buf->m_head->m_pkthdr.len,
1160 1.1 msaitoh BUS_DMASYNC_POSTWRITE);
1161 1.28 msaitoh ixgbe_dmamap_unload(txr->txtag, buf->map);
1162 1.1 msaitoh m_freem(buf->m_head);
1163 1.1 msaitoh buf->m_head = NULL;
1164 1.1 msaitoh }
1165 1.1 msaitoh buf->eop = NULL;
1166 1.47 msaitoh txr->txr_no_space = false;
1167 1.1 msaitoh ++txr->tx_avail;
1168 1.1 msaitoh
1169 1.1 msaitoh /* We clean the range if multi segment */
1170 1.1 msaitoh while (txd != eop) {
1171 1.1 msaitoh ++txd;
1172 1.1 msaitoh ++buf;
1173 1.1 msaitoh ++work;
1174 1.1 msaitoh /* wrap the ring? */
1175 1.1 msaitoh if (__predict_false(!work)) {
1176 1.1 msaitoh work -= txr->num_desc;
1177 1.1 msaitoh buf = txr->tx_buffers;
1178 1.1 msaitoh txd = txr->tx_base;
1179 1.1 msaitoh }
1180 1.1 msaitoh if (buf->m_head) {
1181 1.1 msaitoh txr->bytes +=
1182 1.1 msaitoh buf->m_head->m_pkthdr.len;
1183 1.1 msaitoh bus_dmamap_sync(txr->txtag->dt_dmat,
1184 1.1 msaitoh buf->map,
1185 1.1 msaitoh 0, buf->m_head->m_pkthdr.len,
1186 1.1 msaitoh BUS_DMASYNC_POSTWRITE);
1187 1.1 msaitoh ixgbe_dmamap_unload(txr->txtag,
1188 1.1 msaitoh buf->map);
1189 1.1 msaitoh m_freem(buf->m_head);
1190 1.1 msaitoh buf->m_head = NULL;
1191 1.1 msaitoh }
1192 1.1 msaitoh ++txr->tx_avail;
1193 1.1 msaitoh buf->eop = NULL;
1194 1.1 msaitoh
1195 1.1 msaitoh }
1196 1.1 msaitoh ++txr->packets;
1197 1.1 msaitoh ++processed;
1198 1.1 msaitoh ++ifp->if_opackets;
1199 1.1 msaitoh
1200 1.1 msaitoh /* Try the next packet */
1201 1.1 msaitoh ++txd;
1202 1.1 msaitoh ++buf;
1203 1.1 msaitoh ++work;
1204 1.1 msaitoh /* reset with a wrap */
1205 1.1 msaitoh if (__predict_false(!work)) {
1206 1.1 msaitoh work -= txr->num_desc;
1207 1.1 msaitoh buf = txr->tx_buffers;
1208 1.1 msaitoh txd = txr->tx_base;
1209 1.1 msaitoh }
1210 1.1 msaitoh prefetch(txd);
1211 1.1 msaitoh } while (__predict_true(--limit));
1212 1.1 msaitoh
1213 1.1 msaitoh ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1214 1.1 msaitoh BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1215 1.1 msaitoh
1216 1.1 msaitoh work += txr->num_desc;
1217 1.1 msaitoh txr->next_to_clean = work;
1218 1.1 msaitoh
1219 1.45 msaitoh /*
1220 1.45 msaitoh * Queue Hang detection, we know there's
1221 1.45 msaitoh * work outstanding or the first return
1222 1.45 msaitoh * would have been taken, so increment busy
1223 1.45 msaitoh * if nothing managed to get cleaned, then
1224 1.45 msaitoh * in local_timer it will be checked and
1225 1.45 msaitoh * marked as HUNG if it exceeds a MAX attempt.
1226 1.45 msaitoh */
1227 1.45 msaitoh if ((processed == 0) && (txr->busy != IXGBE_QUEUE_HUNG))
1228 1.45 msaitoh ++txr->busy;
1229 1.45 msaitoh /*
1230 1.45 msaitoh * If anything gets cleaned we reset state to 1,
1231 1.45 msaitoh * note this will turn off HUNG if its set.
1232 1.45 msaitoh */
1233 1.45 msaitoh if (processed)
1234 1.45 msaitoh txr->busy = 1;
1235 1.45 msaitoh
1236 1.43 msaitoh if (txr->tx_avail == txr->num_desc)
1237 1.45 msaitoh txr->busy = 0;
1238 1.43 msaitoh
1239 1.32 msaitoh return ((limit > 0) ? false : true);
1240 1.28 msaitoh } /* ixgbe_txeof */
1241 1.1 msaitoh
1242 1.28 msaitoh /************************************************************************
1243 1.28 msaitoh * ixgbe_rsc_count
1244 1.28 msaitoh *
1245 1.28 msaitoh * Used to detect a descriptor that has been merged by Hardware RSC.
1246 1.28 msaitoh ************************************************************************/
1247 1.1 msaitoh static inline u32
1248 1.1 msaitoh ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
1249 1.1 msaitoh {
1250 1.1 msaitoh return (le32toh(rx->wb.lower.lo_dword.data) &
1251 1.1 msaitoh IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
1252 1.28 msaitoh } /* ixgbe_rsc_count */
1253 1.1 msaitoh
1254 1.28 msaitoh /************************************************************************
1255 1.28 msaitoh * ixgbe_setup_hw_rsc
1256 1.1 msaitoh *
1257 1.28 msaitoh * Initialize Hardware RSC (LRO) feature on 82599
1258 1.28 msaitoh * for an RX ring, this is toggled by the LRO capability
1259 1.28 msaitoh * even though it is transparent to the stack.
1260 1.28 msaitoh *
1261 1.28 msaitoh * NOTE: Since this HW feature only works with IPv4 and
1262 1.28 msaitoh * testing has shown soft LRO to be as effective,
1263 1.28 msaitoh * this feature will be disabled by default.
1264 1.28 msaitoh ************************************************************************/
1265 1.1 msaitoh static void
1266 1.1 msaitoh ixgbe_setup_hw_rsc(struct rx_ring *rxr)
1267 1.1 msaitoh {
1268 1.28 msaitoh struct adapter *adapter = rxr->adapter;
1269 1.28 msaitoh struct ixgbe_hw *hw = &adapter->hw;
1270 1.28 msaitoh u32 rscctrl, rdrxctl;
1271 1.1 msaitoh
1272 1.1 msaitoh /* If turning LRO/RSC off we need to disable it */
1273 1.1 msaitoh if ((adapter->ifp->if_capenable & IFCAP_LRO) == 0) {
1274 1.1 msaitoh rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
1275 1.1 msaitoh rscctrl &= ~IXGBE_RSCCTL_RSCEN;
1276 1.1 msaitoh return;
1277 1.1 msaitoh }
1278 1.1 msaitoh
1279 1.1 msaitoh rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1280 1.1 msaitoh rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
1281 1.28 msaitoh #ifdef DEV_NETMAP
1282 1.28 msaitoh /* Always strip CRC unless Netmap disabled it */
1283 1.28 msaitoh if (!(adapter->feat_en & IXGBE_FEATURE_NETMAP) ||
1284 1.28 msaitoh !(adapter->ifp->if_capenable & IFCAP_NETMAP) ||
1285 1.28 msaitoh ix_crcstrip)
1286 1.1 msaitoh #endif /* DEV_NETMAP */
1287 1.28 msaitoh rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
1288 1.1 msaitoh rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
1289 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
1290 1.1 msaitoh
1291 1.1 msaitoh rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
1292 1.1 msaitoh rscctrl |= IXGBE_RSCCTL_RSCEN;
1293 1.1 msaitoh /*
1294 1.28 msaitoh * Limit the total number of descriptors that
1295 1.28 msaitoh * can be combined, so it does not exceed 64K
1296 1.28 msaitoh */
1297 1.1 msaitoh if (rxr->mbuf_sz == MCLBYTES)
1298 1.1 msaitoh rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
1299 1.1 msaitoh else if (rxr->mbuf_sz == MJUMPAGESIZE)
1300 1.1 msaitoh rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
1301 1.1 msaitoh else if (rxr->mbuf_sz == MJUM9BYTES)
1302 1.1 msaitoh rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
1303 1.1 msaitoh else /* Using 16K cluster */
1304 1.1 msaitoh rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
1305 1.1 msaitoh
1306 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxr->me), rscctrl);
1307 1.1 msaitoh
1308 1.1 msaitoh /* Enable TCP header recognition */
1309 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0),
1310 1.28 msaitoh (IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)) | IXGBE_PSRTYPE_TCPHDR));
1311 1.1 msaitoh
1312 1.1 msaitoh /* Disable RSC for ACK packets */
1313 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
1314 1.1 msaitoh (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
1315 1.1 msaitoh
1316 1.1 msaitoh rxr->hw_rsc = TRUE;
1317 1.28 msaitoh } /* ixgbe_setup_hw_rsc */
1318 1.8 msaitoh
1319 1.28 msaitoh /************************************************************************
1320 1.28 msaitoh * ixgbe_refresh_mbufs
1321 1.1 msaitoh *
1322 1.28 msaitoh * Refresh mbuf buffers for RX descriptor rings
1323 1.28 msaitoh * - now keeps its own state so discards due to resource
1324 1.28 msaitoh * exhaustion are unnecessary, if an mbuf cannot be obtained
1325 1.28 msaitoh * it just returns, keeping its placeholder, thus it can simply
1326 1.28 msaitoh * be recalled to try again.
1327 1.28 msaitoh ************************************************************************/
1328 1.1 msaitoh static void
1329 1.1 msaitoh ixgbe_refresh_mbufs(struct rx_ring *rxr, int limit)
1330 1.1 msaitoh {
1331 1.28 msaitoh struct adapter *adapter = rxr->adapter;
1332 1.28 msaitoh struct ixgbe_rx_buf *rxbuf;
1333 1.28 msaitoh struct mbuf *mp;
1334 1.28 msaitoh int i, j, error;
1335 1.28 msaitoh bool refreshed = false;
1336 1.1 msaitoh
1337 1.1 msaitoh i = j = rxr->next_to_refresh;
1338 1.1 msaitoh /* Control the loop with one beyond */
1339 1.1 msaitoh if (++j == rxr->num_desc)
1340 1.1 msaitoh j = 0;
1341 1.1 msaitoh
1342 1.1 msaitoh while (j != limit) {
1343 1.1 msaitoh rxbuf = &rxr->rx_buffers[i];
1344 1.1 msaitoh if (rxbuf->buf == NULL) {
1345 1.49 msaitoh mp = ixgbe_getjcl(&rxr->jcl_head, M_NOWAIT,
1346 1.1 msaitoh MT_DATA, M_PKTHDR, rxr->mbuf_sz);
1347 1.1 msaitoh if (mp == NULL) {
1348 1.1 msaitoh rxr->no_jmbuf.ev_count++;
1349 1.1 msaitoh goto update;
1350 1.1 msaitoh }
1351 1.1 msaitoh if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
1352 1.1 msaitoh m_adj(mp, ETHER_ALIGN);
1353 1.1 msaitoh } else
1354 1.1 msaitoh mp = rxbuf->buf;
1355 1.1 msaitoh
1356 1.1 msaitoh mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
1357 1.1 msaitoh
1358 1.1 msaitoh /* If we're dealing with an mbuf that was copied rather
1359 1.1 msaitoh * than replaced, there's no need to go through busdma.
1360 1.1 msaitoh */
1361 1.1 msaitoh if ((rxbuf->flags & IXGBE_RX_COPY) == 0) {
1362 1.1 msaitoh /* Get the memory mapping */
1363 1.4 msaitoh ixgbe_dmamap_unload(rxr->ptag, rxbuf->pmap);
1364 1.1 msaitoh error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat,
1365 1.1 msaitoh rxbuf->pmap, mp, BUS_DMA_NOWAIT);
1366 1.1 msaitoh if (error != 0) {
1367 1.28 msaitoh printf("Refresh mbufs: payload dmamap load failure - %d\n", error);
1368 1.1 msaitoh m_free(mp);
1369 1.1 msaitoh rxbuf->buf = NULL;
1370 1.1 msaitoh goto update;
1371 1.1 msaitoh }
1372 1.1 msaitoh rxbuf->buf = mp;
1373 1.1 msaitoh bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
1374 1.1 msaitoh 0, mp->m_pkthdr.len, BUS_DMASYNC_PREREAD);
1375 1.1 msaitoh rxbuf->addr = rxr->rx_base[i].read.pkt_addr =
1376 1.1 msaitoh htole64(rxbuf->pmap->dm_segs[0].ds_addr);
1377 1.1 msaitoh } else {
1378 1.1 msaitoh rxr->rx_base[i].read.pkt_addr = rxbuf->addr;
1379 1.1 msaitoh rxbuf->flags &= ~IXGBE_RX_COPY;
1380 1.1 msaitoh }
1381 1.1 msaitoh
1382 1.1 msaitoh refreshed = true;
1383 1.1 msaitoh /* Next is precalculated */
1384 1.1 msaitoh i = j;
1385 1.1 msaitoh rxr->next_to_refresh = i;
1386 1.1 msaitoh if (++j == rxr->num_desc)
1387 1.1 msaitoh j = 0;
1388 1.1 msaitoh }
1389 1.28 msaitoh
1390 1.1 msaitoh update:
1391 1.1 msaitoh if (refreshed) /* Update hardware tail index */
1392 1.28 msaitoh IXGBE_WRITE_REG(&adapter->hw, rxr->tail, rxr->next_to_refresh);
1393 1.28 msaitoh
1394 1.1 msaitoh return;
1395 1.28 msaitoh } /* ixgbe_refresh_mbufs */
1396 1.1 msaitoh
1397 1.28 msaitoh /************************************************************************
1398 1.28 msaitoh * ixgbe_allocate_receive_buffers
1399 1.1 msaitoh *
1400 1.28 msaitoh * Allocate memory for rx_buffer structures. Since we use one
1401 1.28 msaitoh * rx_buffer per received packet, the maximum number of rx_buffer's
1402 1.28 msaitoh * that we'll need is equal to the number of receive descriptors
1403 1.28 msaitoh * that we've allocated.
1404 1.28 msaitoh ************************************************************************/
1405 1.28 msaitoh static int
1406 1.1 msaitoh ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
1407 1.1 msaitoh {
1408 1.28 msaitoh struct adapter *adapter = rxr->adapter;
1409 1.28 msaitoh device_t dev = adapter->dev;
1410 1.28 msaitoh struct ixgbe_rx_buf *rxbuf;
1411 1.28 msaitoh int bsize, error;
1412 1.1 msaitoh
1413 1.1 msaitoh bsize = sizeof(struct ixgbe_rx_buf) * rxr->num_desc;
1414 1.28 msaitoh rxr->rx_buffers = (struct ixgbe_rx_buf *)malloc(bsize, M_DEVBUF,
1415 1.28 msaitoh M_NOWAIT | M_ZERO);
1416 1.28 msaitoh if (rxr->rx_buffers == NULL) {
1417 1.1 msaitoh aprint_error_dev(dev, "Unable to allocate rx_buffer memory\n");
1418 1.1 msaitoh error = ENOMEM;
1419 1.1 msaitoh goto fail;
1420 1.1 msaitoh }
1421 1.1 msaitoh
1422 1.28 msaitoh error = ixgbe_dma_tag_create(
1423 1.28 msaitoh /* parent */ adapter->osdep.dmat,
1424 1.28 msaitoh /* alignment */ 1,
1425 1.28 msaitoh /* bounds */ 0,
1426 1.28 msaitoh /* maxsize */ MJUM16BYTES,
1427 1.28 msaitoh /* nsegments */ 1,
1428 1.28 msaitoh /* maxsegsize */ MJUM16BYTES,
1429 1.28 msaitoh /* flags */ 0,
1430 1.28 msaitoh &rxr->ptag);
1431 1.28 msaitoh if (error != 0) {
1432 1.1 msaitoh aprint_error_dev(dev, "Unable to create RX DMA tag\n");
1433 1.1 msaitoh goto fail;
1434 1.1 msaitoh }
1435 1.1 msaitoh
1436 1.5 msaitoh for (int i = 0; i < rxr->num_desc; i++, rxbuf++) {
1437 1.1 msaitoh rxbuf = &rxr->rx_buffers[i];
1438 1.4 msaitoh error = ixgbe_dmamap_create(rxr->ptag, 0, &rxbuf->pmap);
1439 1.1 msaitoh if (error) {
1440 1.1 msaitoh aprint_error_dev(dev, "Unable to create RX dma map\n");
1441 1.1 msaitoh goto fail;
1442 1.1 msaitoh }
1443 1.1 msaitoh }
1444 1.1 msaitoh
1445 1.1 msaitoh return (0);
1446 1.1 msaitoh
1447 1.1 msaitoh fail:
1448 1.1 msaitoh /* Frees all, but can handle partial completion */
1449 1.1 msaitoh ixgbe_free_receive_structures(adapter);
1450 1.28 msaitoh
1451 1.1 msaitoh return (error);
1452 1.28 msaitoh } /* ixgbe_allocate_receive_buffers */
1453 1.1 msaitoh
1454 1.28 msaitoh /************************************************************************
1455 1.30 msaitoh * ixgbe_free_receive_ring
1456 1.28 msaitoh ************************************************************************/
1457 1.28 msaitoh static void
1458 1.1 msaitoh ixgbe_free_receive_ring(struct rx_ring *rxr)
1459 1.27 msaitoh {
1460 1.5 msaitoh for (int i = 0; i < rxr->num_desc; i++) {
1461 1.27 msaitoh ixgbe_rx_discard(rxr, i);
1462 1.1 msaitoh }
1463 1.28 msaitoh } /* ixgbe_free_receive_ring */
1464 1.1 msaitoh
1465 1.28 msaitoh /************************************************************************
1466 1.28 msaitoh * ixgbe_setup_receive_ring
1467 1.1 msaitoh *
1468 1.28 msaitoh * Initialize a receive ring and its buffers.
1469 1.28 msaitoh ************************************************************************/
1470 1.1 msaitoh static int
1471 1.1 msaitoh ixgbe_setup_receive_ring(struct rx_ring *rxr)
1472 1.1 msaitoh {
1473 1.28 msaitoh struct adapter *adapter;
1474 1.28 msaitoh struct ixgbe_rx_buf *rxbuf;
1475 1.1 msaitoh #ifdef LRO
1476 1.28 msaitoh struct ifnet *ifp;
1477 1.28 msaitoh struct lro_ctrl *lro = &rxr->lro;
1478 1.1 msaitoh #endif /* LRO */
1479 1.1 msaitoh #ifdef DEV_NETMAP
1480 1.1 msaitoh struct netmap_adapter *na = NA(rxr->adapter->ifp);
1481 1.28 msaitoh struct netmap_slot *slot;
1482 1.1 msaitoh #endif /* DEV_NETMAP */
1483 1.28 msaitoh int rsize, error = 0;
1484 1.1 msaitoh
1485 1.1 msaitoh adapter = rxr->adapter;
1486 1.1 msaitoh #ifdef LRO
1487 1.1 msaitoh ifp = adapter->ifp;
1488 1.1 msaitoh #endif /* LRO */
1489 1.1 msaitoh
1490 1.1 msaitoh /* Clear the ring contents */
1491 1.1 msaitoh IXGBE_RX_LOCK(rxr);
1492 1.28 msaitoh
1493 1.1 msaitoh #ifdef DEV_NETMAP
1494 1.28 msaitoh if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
1495 1.28 msaitoh slot = netmap_reset(na, NR_RX, rxr->me, 0);
1496 1.1 msaitoh #endif /* DEV_NETMAP */
1497 1.28 msaitoh
1498 1.1 msaitoh rsize = roundup2(adapter->num_rx_desc *
1499 1.1 msaitoh sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
1500 1.1 msaitoh bzero((void *)rxr->rx_base, rsize);
1501 1.1 msaitoh /* Cache the size */
1502 1.1 msaitoh rxr->mbuf_sz = adapter->rx_mbuf_sz;
1503 1.1 msaitoh
1504 1.1 msaitoh /* Free current RX buffer structs and their mbufs */
1505 1.1 msaitoh ixgbe_free_receive_ring(rxr);
1506 1.1 msaitoh
1507 1.49 msaitoh IXGBE_RX_UNLOCK(rxr);
1508 1.49 msaitoh /*
1509 1.49 msaitoh * Now reinitialize our supply of jumbo mbufs. The number
1510 1.49 msaitoh * or size of jumbo mbufs may have changed.
1511 1.49 msaitoh * Assume all of rxr->ptag are the same.
1512 1.49 msaitoh */
1513 1.49 msaitoh ixgbe_jcl_reinit(adapter, rxr->ptag->dt_dmat, rxr,
1514 1.49 msaitoh (2 * adapter->num_rx_desc), adapter->rx_mbuf_sz);
1515 1.49 msaitoh
1516 1.49 msaitoh IXGBE_RX_LOCK(rxr);
1517 1.49 msaitoh
1518 1.1 msaitoh /* Now replenish the mbufs */
1519 1.1 msaitoh for (int j = 0; j != rxr->num_desc; ++j) {
1520 1.28 msaitoh struct mbuf *mp;
1521 1.1 msaitoh
1522 1.1 msaitoh rxbuf = &rxr->rx_buffers[j];
1523 1.28 msaitoh
1524 1.1 msaitoh #ifdef DEV_NETMAP
1525 1.1 msaitoh /*
1526 1.1 msaitoh * In netmap mode, fill the map and set the buffer
1527 1.1 msaitoh * address in the NIC ring, considering the offset
1528 1.1 msaitoh * between the netmap and NIC rings (see comment in
1529 1.1 msaitoh * ixgbe_setup_transmit_ring() ). No need to allocate
1530 1.1 msaitoh * an mbuf, so end the block with a continue;
1531 1.1 msaitoh */
1532 1.28 msaitoh if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && slot) {
1533 1.1 msaitoh int sj = netmap_idx_n2k(&na->rx_rings[rxr->me], j);
1534 1.1 msaitoh uint64_t paddr;
1535 1.1 msaitoh void *addr;
1536 1.1 msaitoh
1537 1.1 msaitoh addr = PNMB(na, slot + sj, &paddr);
1538 1.1 msaitoh netmap_load_map(na, rxr->ptag, rxbuf->pmap, addr);
1539 1.1 msaitoh /* Update descriptor and the cached value */
1540 1.1 msaitoh rxr->rx_base[j].read.pkt_addr = htole64(paddr);
1541 1.1 msaitoh rxbuf->addr = htole64(paddr);
1542 1.1 msaitoh continue;
1543 1.1 msaitoh }
1544 1.1 msaitoh #endif /* DEV_NETMAP */
1545 1.28 msaitoh
1546 1.28 msaitoh rxbuf->flags = 0;
1547 1.49 msaitoh rxbuf->buf = ixgbe_getjcl(&rxr->jcl_head, M_NOWAIT,
1548 1.1 msaitoh MT_DATA, M_PKTHDR, adapter->rx_mbuf_sz);
1549 1.1 msaitoh if (rxbuf->buf == NULL) {
1550 1.1 msaitoh error = ENOBUFS;
1551 1.28 msaitoh goto fail;
1552 1.1 msaitoh }
1553 1.1 msaitoh mp = rxbuf->buf;
1554 1.1 msaitoh mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
1555 1.1 msaitoh /* Get the memory mapping */
1556 1.28 msaitoh error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat, rxbuf->pmap,
1557 1.28 msaitoh mp, BUS_DMA_NOWAIT);
1558 1.1 msaitoh if (error != 0)
1559 1.1 msaitoh goto fail;
1560 1.1 msaitoh bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
1561 1.1 msaitoh 0, adapter->rx_mbuf_sz, BUS_DMASYNC_PREREAD);
1562 1.1 msaitoh /* Update the descriptor and the cached value */
1563 1.1 msaitoh rxr->rx_base[j].read.pkt_addr =
1564 1.1 msaitoh htole64(rxbuf->pmap->dm_segs[0].ds_addr);
1565 1.1 msaitoh rxbuf->addr = htole64(rxbuf->pmap->dm_segs[0].ds_addr);
1566 1.1 msaitoh }
1567 1.1 msaitoh
1568 1.1 msaitoh
1569 1.1 msaitoh /* Setup our descriptor indices */
1570 1.1 msaitoh rxr->next_to_check = 0;
1571 1.1 msaitoh rxr->next_to_refresh = 0;
1572 1.1 msaitoh rxr->lro_enabled = FALSE;
1573 1.1 msaitoh rxr->rx_copies.ev_count = 0;
1574 1.13 msaitoh #if 0 /* NetBSD */
1575 1.1 msaitoh rxr->rx_bytes.ev_count = 0;
1576 1.13 msaitoh #if 1 /* Fix inconsistency */
1577 1.13 msaitoh rxr->rx_packets.ev_count = 0;
1578 1.13 msaitoh #endif
1579 1.13 msaitoh #endif
1580 1.1 msaitoh rxr->vtag_strip = FALSE;
1581 1.1 msaitoh
1582 1.1 msaitoh ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
1583 1.1 msaitoh BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1584 1.1 msaitoh
1585 1.1 msaitoh /*
1586 1.28 msaitoh * Now set up the LRO interface
1587 1.28 msaitoh */
1588 1.1 msaitoh if (ixgbe_rsc_enable)
1589 1.1 msaitoh ixgbe_setup_hw_rsc(rxr);
1590 1.1 msaitoh #ifdef LRO
1591 1.1 msaitoh else if (ifp->if_capenable & IFCAP_LRO) {
1592 1.1 msaitoh device_t dev = adapter->dev;
1593 1.1 msaitoh int err = tcp_lro_init(lro);
1594 1.1 msaitoh if (err) {
1595 1.1 msaitoh device_printf(dev, "LRO Initialization failed!\n");
1596 1.1 msaitoh goto fail;
1597 1.1 msaitoh }
1598 1.1 msaitoh INIT_DEBUGOUT("RX Soft LRO Initialized\n");
1599 1.1 msaitoh rxr->lro_enabled = TRUE;
1600 1.1 msaitoh lro->ifp = adapter->ifp;
1601 1.1 msaitoh }
1602 1.1 msaitoh #endif /* LRO */
1603 1.1 msaitoh
1604 1.1 msaitoh IXGBE_RX_UNLOCK(rxr);
1605 1.28 msaitoh
1606 1.1 msaitoh return (0);
1607 1.1 msaitoh
1608 1.1 msaitoh fail:
1609 1.1 msaitoh ixgbe_free_receive_ring(rxr);
1610 1.1 msaitoh IXGBE_RX_UNLOCK(rxr);
1611 1.28 msaitoh
1612 1.1 msaitoh return (error);
1613 1.28 msaitoh } /* ixgbe_setup_receive_ring */
1614 1.1 msaitoh
1615 1.28 msaitoh /************************************************************************
1616 1.28 msaitoh * ixgbe_setup_receive_structures - Initialize all receive rings.
1617 1.28 msaitoh ************************************************************************/
1618 1.1 msaitoh int
1619 1.1 msaitoh ixgbe_setup_receive_structures(struct adapter *adapter)
1620 1.1 msaitoh {
1621 1.1 msaitoh struct rx_ring *rxr = adapter->rx_rings;
1622 1.28 msaitoh int j;
1623 1.1 msaitoh
1624 1.1 msaitoh for (j = 0; j < adapter->num_queues; j++, rxr++)
1625 1.1 msaitoh if (ixgbe_setup_receive_ring(rxr))
1626 1.1 msaitoh goto fail;
1627 1.1 msaitoh
1628 1.1 msaitoh return (0);
1629 1.1 msaitoh fail:
1630 1.1 msaitoh /*
1631 1.1 msaitoh * Free RX buffers allocated so far, we will only handle
1632 1.1 msaitoh * the rings that completed, the failing case will have
1633 1.1 msaitoh * cleaned up for itself. 'j' failed, so its the terminus.
1634 1.1 msaitoh */
1635 1.1 msaitoh for (int i = 0; i < j; ++i) {
1636 1.1 msaitoh rxr = &adapter->rx_rings[i];
1637 1.27 msaitoh IXGBE_RX_LOCK(rxr);
1638 1.1 msaitoh ixgbe_free_receive_ring(rxr);
1639 1.27 msaitoh IXGBE_RX_UNLOCK(rxr);
1640 1.1 msaitoh }
1641 1.1 msaitoh
1642 1.1 msaitoh return (ENOBUFS);
1643 1.28 msaitoh } /* ixgbe_setup_receive_structures */
1644 1.1 msaitoh
1645 1.3 msaitoh
1646 1.28 msaitoh /************************************************************************
1647 1.28 msaitoh * ixgbe_free_receive_structures - Free all receive rings.
1648 1.28 msaitoh ************************************************************************/
1649 1.1 msaitoh void
1650 1.1 msaitoh ixgbe_free_receive_structures(struct adapter *adapter)
1651 1.1 msaitoh {
1652 1.1 msaitoh struct rx_ring *rxr = adapter->rx_rings;
1653 1.1 msaitoh
1654 1.1 msaitoh INIT_DEBUGOUT("ixgbe_free_receive_structures: begin");
1655 1.1 msaitoh
1656 1.1 msaitoh for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1657 1.1 msaitoh ixgbe_free_receive_buffers(rxr);
1658 1.1 msaitoh #ifdef LRO
1659 1.1 msaitoh /* Free LRO memory */
1660 1.28 msaitoh tcp_lro_free(&rxr->lro);
1661 1.1 msaitoh #endif /* LRO */
1662 1.1 msaitoh /* Free the ring memory as well */
1663 1.1 msaitoh ixgbe_dma_free(adapter, &rxr->rxdma);
1664 1.1 msaitoh IXGBE_RX_LOCK_DESTROY(rxr);
1665 1.1 msaitoh }
1666 1.1 msaitoh
1667 1.1 msaitoh free(adapter->rx_rings, M_DEVBUF);
1668 1.28 msaitoh } /* ixgbe_free_receive_structures */
1669 1.1 msaitoh
1670 1.1 msaitoh
1671 1.28 msaitoh /************************************************************************
1672 1.28 msaitoh * ixgbe_free_receive_buffers - Free receive ring data structures
1673 1.28 msaitoh ************************************************************************/
1674 1.1 msaitoh static void
1675 1.1 msaitoh ixgbe_free_receive_buffers(struct rx_ring *rxr)
1676 1.1 msaitoh {
1677 1.28 msaitoh struct adapter *adapter = rxr->adapter;
1678 1.28 msaitoh struct ixgbe_rx_buf *rxbuf;
1679 1.1 msaitoh
1680 1.1 msaitoh INIT_DEBUGOUT("ixgbe_free_receive_buffers: begin");
1681 1.1 msaitoh
1682 1.1 msaitoh /* Cleanup any existing buffers */
1683 1.1 msaitoh if (rxr->rx_buffers != NULL) {
1684 1.1 msaitoh for (int i = 0; i < adapter->num_rx_desc; i++) {
1685 1.1 msaitoh rxbuf = &rxr->rx_buffers[i];
1686 1.27 msaitoh ixgbe_rx_discard(rxr, i);
1687 1.1 msaitoh if (rxbuf->pmap != NULL) {
1688 1.1 msaitoh ixgbe_dmamap_destroy(rxr->ptag, rxbuf->pmap);
1689 1.1 msaitoh rxbuf->pmap = NULL;
1690 1.1 msaitoh }
1691 1.1 msaitoh }
1692 1.1 msaitoh if (rxr->rx_buffers != NULL) {
1693 1.1 msaitoh free(rxr->rx_buffers, M_DEVBUF);
1694 1.1 msaitoh rxr->rx_buffers = NULL;
1695 1.1 msaitoh }
1696 1.1 msaitoh }
1697 1.1 msaitoh
1698 1.1 msaitoh if (rxr->ptag != NULL) {
1699 1.1 msaitoh ixgbe_dma_tag_destroy(rxr->ptag);
1700 1.1 msaitoh rxr->ptag = NULL;
1701 1.1 msaitoh }
1702 1.1 msaitoh
1703 1.1 msaitoh return;
1704 1.28 msaitoh } /* ixgbe_free_receive_buffers */
1705 1.1 msaitoh
1706 1.28 msaitoh /************************************************************************
1707 1.28 msaitoh * ixgbe_rx_input
1708 1.28 msaitoh ************************************************************************/
1709 1.1 msaitoh static __inline void
1710 1.28 msaitoh ixgbe_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m,
1711 1.28 msaitoh u32 ptype)
1712 1.1 msaitoh {
1713 1.20 msaitoh struct adapter *adapter = ifp->if_softc;
1714 1.1 msaitoh
1715 1.1 msaitoh #ifdef LRO
1716 1.1 msaitoh struct ethercom *ec = &adapter->osdep.ec;
1717 1.1 msaitoh
1718 1.28 msaitoh /*
1719 1.28 msaitoh * ATM LRO is only for IP/TCP packets and TCP checksum of the packet
1720 1.28 msaitoh * should be computed by hardware. Also it should not have VLAN tag in
1721 1.28 msaitoh * ethernet header. In case of IPv6 we do not yet support ext. hdrs.
1722 1.28 msaitoh */
1723 1.1 msaitoh if (rxr->lro_enabled &&
1724 1.1 msaitoh (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) != 0 &&
1725 1.1 msaitoh (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
1726 1.1 msaitoh ((ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
1727 1.1 msaitoh (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) ||
1728 1.1 msaitoh (ptype & (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
1729 1.1 msaitoh (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) &&
1730 1.1 msaitoh (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
1731 1.1 msaitoh (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
1732 1.1 msaitoh /*
1733 1.1 msaitoh * Send to the stack if:
1734 1.1 msaitoh ** - LRO not enabled, or
1735 1.1 msaitoh ** - no LRO resources, or
1736 1.1 msaitoh ** - lro enqueue fails
1737 1.1 msaitoh */
1738 1.1 msaitoh if (rxr->lro.lro_cnt != 0)
1739 1.1 msaitoh if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
1740 1.1 msaitoh return;
1741 1.1 msaitoh }
1742 1.1 msaitoh #endif /* LRO */
1743 1.1 msaitoh
1744 1.20 msaitoh if_percpuq_enqueue(adapter->ipq, m);
1745 1.28 msaitoh } /* ixgbe_rx_input */
1746 1.1 msaitoh
1747 1.28 msaitoh /************************************************************************
1748 1.28 msaitoh * ixgbe_rx_discard
1749 1.28 msaitoh ************************************************************************/
1750 1.1 msaitoh static __inline void
1751 1.1 msaitoh ixgbe_rx_discard(struct rx_ring *rxr, int i)
1752 1.1 msaitoh {
1753 1.28 msaitoh struct ixgbe_rx_buf *rbuf;
1754 1.1 msaitoh
1755 1.1 msaitoh rbuf = &rxr->rx_buffers[i];
1756 1.1 msaitoh
1757 1.1 msaitoh /*
1758 1.28 msaitoh * With advanced descriptors the writeback
1759 1.28 msaitoh * clobbers the buffer addrs, so its easier
1760 1.28 msaitoh * to just free the existing mbufs and take
1761 1.28 msaitoh * the normal refresh path to get new buffers
1762 1.28 msaitoh * and mapping.
1763 1.28 msaitoh */
1764 1.1 msaitoh
1765 1.26 msaitoh if (rbuf->fmp != NULL) {/* Partial chain ? */
1766 1.27 msaitoh bus_dmamap_sync(rxr->ptag->dt_dmat, rbuf->pmap, 0,
1767 1.27 msaitoh rbuf->buf->m_pkthdr.len, BUS_DMASYNC_POSTREAD);
1768 1.1 msaitoh m_freem(rbuf->fmp);
1769 1.1 msaitoh rbuf->fmp = NULL;
1770 1.1 msaitoh rbuf->buf = NULL; /* rbuf->buf is part of fmp's chain */
1771 1.1 msaitoh } else if (rbuf->buf) {
1772 1.27 msaitoh bus_dmamap_sync(rxr->ptag->dt_dmat, rbuf->pmap, 0,
1773 1.27 msaitoh rbuf->buf->m_pkthdr.len, BUS_DMASYNC_POSTREAD);
1774 1.1 msaitoh m_free(rbuf->buf);
1775 1.1 msaitoh rbuf->buf = NULL;
1776 1.1 msaitoh }
1777 1.4 msaitoh ixgbe_dmamap_unload(rxr->ptag, rbuf->pmap);
1778 1.1 msaitoh
1779 1.1 msaitoh rbuf->flags = 0;
1780 1.1 msaitoh
1781 1.1 msaitoh return;
1782 1.28 msaitoh } /* ixgbe_rx_discard */
1783 1.1 msaitoh
1784 1.1 msaitoh
1785 1.28 msaitoh /************************************************************************
1786 1.28 msaitoh * ixgbe_rxeof
1787 1.1 msaitoh *
1788 1.28 msaitoh * Executes in interrupt context. It replenishes the
1789 1.28 msaitoh * mbufs in the descriptor and sends data which has
1790 1.28 msaitoh * been dma'ed into host memory to upper layer.
1791 1.1 msaitoh *
1792 1.28 msaitoh * Return TRUE for more work, FALSE for all clean.
1793 1.28 msaitoh ************************************************************************/
1794 1.1 msaitoh bool
1795 1.1 msaitoh ixgbe_rxeof(struct ix_queue *que)
1796 1.1 msaitoh {
1797 1.1 msaitoh struct adapter *adapter = que->adapter;
1798 1.1 msaitoh struct rx_ring *rxr = que->rxr;
1799 1.1 msaitoh struct ifnet *ifp = adapter->ifp;
1800 1.1 msaitoh #ifdef LRO
1801 1.1 msaitoh struct lro_ctrl *lro = &rxr->lro;
1802 1.1 msaitoh #endif /* LRO */
1803 1.28 msaitoh union ixgbe_adv_rx_desc *cur;
1804 1.28 msaitoh struct ixgbe_rx_buf *rbuf, *nbuf;
1805 1.1 msaitoh int i, nextp, processed = 0;
1806 1.1 msaitoh u32 staterr = 0;
1807 1.7 msaitoh u32 count = adapter->rx_process_limit;
1808 1.1 msaitoh #ifdef RSS
1809 1.1 msaitoh u16 pkt_info;
1810 1.1 msaitoh #endif
1811 1.1 msaitoh
1812 1.1 msaitoh IXGBE_RX_LOCK(rxr);
1813 1.1 msaitoh
1814 1.1 msaitoh #ifdef DEV_NETMAP
1815 1.28 msaitoh if (adapter->feat_en & IXGBE_FEATURE_NETMAP) {
1816 1.28 msaitoh /* Same as the txeof routine: wakeup clients on intr. */
1817 1.28 msaitoh if (netmap_rx_irq(ifp, rxr->me, &processed)) {
1818 1.28 msaitoh IXGBE_RX_UNLOCK(rxr);
1819 1.28 msaitoh return (FALSE);
1820 1.28 msaitoh }
1821 1.1 msaitoh }
1822 1.1 msaitoh #endif /* DEV_NETMAP */
1823 1.1 msaitoh
1824 1.1 msaitoh for (i = rxr->next_to_check; count != 0;) {
1825 1.28 msaitoh struct mbuf *sendmp, *mp;
1826 1.28 msaitoh u32 rsc, ptype;
1827 1.28 msaitoh u16 len;
1828 1.28 msaitoh u16 vtag = 0;
1829 1.28 msaitoh bool eop;
1830 1.1 msaitoh
1831 1.1 msaitoh /* Sync the ring. */
1832 1.1 msaitoh ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
1833 1.1 msaitoh BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1834 1.1 msaitoh
1835 1.1 msaitoh cur = &rxr->rx_base[i];
1836 1.1 msaitoh staterr = le32toh(cur->wb.upper.status_error);
1837 1.1 msaitoh #ifdef RSS
1838 1.1 msaitoh pkt_info = le16toh(cur->wb.lower.lo_dword.hs_rss.pkt_info);
1839 1.1 msaitoh #endif
1840 1.1 msaitoh
1841 1.1 msaitoh if ((staterr & IXGBE_RXD_STAT_DD) == 0)
1842 1.1 msaitoh break;
1843 1.1 msaitoh
1844 1.1 msaitoh count--;
1845 1.1 msaitoh sendmp = NULL;
1846 1.1 msaitoh nbuf = NULL;
1847 1.1 msaitoh rsc = 0;
1848 1.1 msaitoh cur->wb.upper.status_error = 0;
1849 1.1 msaitoh rbuf = &rxr->rx_buffers[i];
1850 1.1 msaitoh mp = rbuf->buf;
1851 1.1 msaitoh
1852 1.1 msaitoh len = le16toh(cur->wb.upper.length);
1853 1.1 msaitoh ptype = le32toh(cur->wb.lower.lo_dword.data) &
1854 1.1 msaitoh IXGBE_RXDADV_PKTTYPE_MASK;
1855 1.1 msaitoh eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
1856 1.1 msaitoh
1857 1.1 msaitoh /* Make sure bad packets are discarded */
1858 1.1 msaitoh if (eop && (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) {
1859 1.3 msaitoh #if __FreeBSD_version >= 1100036
1860 1.28 msaitoh if (adapter->feat_en & IXGBE_FEATURE_VF)
1861 1.4 msaitoh if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1862 1.3 msaitoh #endif
1863 1.1 msaitoh rxr->rx_discarded.ev_count++;
1864 1.1 msaitoh ixgbe_rx_discard(rxr, i);
1865 1.1 msaitoh goto next_desc;
1866 1.1 msaitoh }
1867 1.1 msaitoh
1868 1.27 msaitoh bus_dmamap_sync(rxr->ptag->dt_dmat, rbuf->pmap, 0,
1869 1.27 msaitoh rbuf->buf->m_pkthdr.len, BUS_DMASYNC_POSTREAD);
1870 1.27 msaitoh
1871 1.1 msaitoh /*
1872 1.28 msaitoh * On 82599 which supports a hardware
1873 1.28 msaitoh * LRO (called HW RSC), packets need
1874 1.28 msaitoh * not be fragmented across sequential
1875 1.28 msaitoh * descriptors, rather the next descriptor
1876 1.28 msaitoh * is indicated in bits of the descriptor.
1877 1.28 msaitoh * This also means that we might proceses
1878 1.28 msaitoh * more than one packet at a time, something
1879 1.28 msaitoh * that has never been true before, it
1880 1.28 msaitoh * required eliminating global chain pointers
1881 1.28 msaitoh * in favor of what we are doing here. -jfv
1882 1.28 msaitoh */
1883 1.1 msaitoh if (!eop) {
1884 1.1 msaitoh /*
1885 1.28 msaitoh * Figure out the next descriptor
1886 1.28 msaitoh * of this frame.
1887 1.28 msaitoh */
1888 1.1 msaitoh if (rxr->hw_rsc == TRUE) {
1889 1.1 msaitoh rsc = ixgbe_rsc_count(cur);
1890 1.1 msaitoh rxr->rsc_num += (rsc - 1);
1891 1.1 msaitoh }
1892 1.1 msaitoh if (rsc) { /* Get hardware index */
1893 1.28 msaitoh nextp = ((staterr & IXGBE_RXDADV_NEXTP_MASK) >>
1894 1.1 msaitoh IXGBE_RXDADV_NEXTP_SHIFT);
1895 1.1 msaitoh } else { /* Just sequential */
1896 1.1 msaitoh nextp = i + 1;
1897 1.1 msaitoh if (nextp == adapter->num_rx_desc)
1898 1.1 msaitoh nextp = 0;
1899 1.1 msaitoh }
1900 1.1 msaitoh nbuf = &rxr->rx_buffers[nextp];
1901 1.1 msaitoh prefetch(nbuf);
1902 1.1 msaitoh }
1903 1.1 msaitoh /*
1904 1.28 msaitoh * Rather than using the fmp/lmp global pointers
1905 1.28 msaitoh * we now keep the head of a packet chain in the
1906 1.28 msaitoh * buffer struct and pass this along from one
1907 1.28 msaitoh * descriptor to the next, until we get EOP.
1908 1.28 msaitoh */
1909 1.1 msaitoh mp->m_len = len;
1910 1.1 msaitoh /*
1911 1.28 msaitoh * See if there is a stored head
1912 1.28 msaitoh * that determines what we are
1913 1.28 msaitoh */
1914 1.1 msaitoh sendmp = rbuf->fmp;
1915 1.1 msaitoh if (sendmp != NULL) { /* secondary frag */
1916 1.1 msaitoh rbuf->buf = rbuf->fmp = NULL;
1917 1.1 msaitoh mp->m_flags &= ~M_PKTHDR;
1918 1.1 msaitoh sendmp->m_pkthdr.len += mp->m_len;
1919 1.1 msaitoh } else {
1920 1.1 msaitoh /*
1921 1.1 msaitoh * Optimize. This might be a small packet,
1922 1.1 msaitoh * maybe just a TCP ACK. Do a fast copy that
1923 1.1 msaitoh * is cache aligned into a new mbuf, and
1924 1.1 msaitoh * leave the old mbuf+cluster for re-use.
1925 1.1 msaitoh */
1926 1.1 msaitoh if (eop && len <= IXGBE_RX_COPY_LEN) {
1927 1.1 msaitoh sendmp = m_gethdr(M_NOWAIT, MT_DATA);
1928 1.1 msaitoh if (sendmp != NULL) {
1929 1.28 msaitoh sendmp->m_data += IXGBE_RX_COPY_ALIGN;
1930 1.28 msaitoh ixgbe_bcopy(mp->m_data, sendmp->m_data,
1931 1.28 msaitoh len);
1932 1.1 msaitoh sendmp->m_len = len;
1933 1.1 msaitoh rxr->rx_copies.ev_count++;
1934 1.1 msaitoh rbuf->flags |= IXGBE_RX_COPY;
1935 1.1 msaitoh }
1936 1.1 msaitoh }
1937 1.1 msaitoh if (sendmp == NULL) {
1938 1.1 msaitoh rbuf->buf = rbuf->fmp = NULL;
1939 1.1 msaitoh sendmp = mp;
1940 1.1 msaitoh }
1941 1.1 msaitoh
1942 1.1 msaitoh /* first desc of a non-ps chain */
1943 1.1 msaitoh sendmp->m_flags |= M_PKTHDR;
1944 1.1 msaitoh sendmp->m_pkthdr.len = mp->m_len;
1945 1.1 msaitoh }
1946 1.1 msaitoh ++processed;
1947 1.1 msaitoh
1948 1.1 msaitoh /* Pass the head pointer on */
1949 1.1 msaitoh if (eop == 0) {
1950 1.1 msaitoh nbuf->fmp = sendmp;
1951 1.1 msaitoh sendmp = NULL;
1952 1.1 msaitoh mp->m_next = nbuf->buf;
1953 1.1 msaitoh } else { /* Sending this frame */
1954 1.1 msaitoh m_set_rcvif(sendmp, ifp);
1955 1.31 msaitoh ++rxr->packets;
1956 1.1 msaitoh rxr->rx_packets.ev_count++;
1957 1.1 msaitoh /* capture data for AIM */
1958 1.1 msaitoh rxr->bytes += sendmp->m_pkthdr.len;
1959 1.1 msaitoh rxr->rx_bytes.ev_count += sendmp->m_pkthdr.len;
1960 1.1 msaitoh /* Process vlan info */
1961 1.28 msaitoh if ((rxr->vtag_strip) && (staterr & IXGBE_RXD_STAT_VP))
1962 1.1 msaitoh vtag = le16toh(cur->wb.upper.vlan);
1963 1.1 msaitoh if (vtag) {
1964 1.29 knakahar vlan_set_tag(sendmp, vtag);
1965 1.1 msaitoh }
1966 1.1 msaitoh if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1967 1.1 msaitoh ixgbe_rx_checksum(staterr, sendmp, ptype,
1968 1.3 msaitoh &adapter->stats.pf);
1969 1.1 msaitoh }
1970 1.8 msaitoh
1971 1.6 msaitoh #if 0 /* FreeBSD */
1972 1.28 msaitoh /*
1973 1.28 msaitoh * In case of multiqueue, we have RXCSUM.PCSD bit set
1974 1.28 msaitoh * and never cleared. This means we have RSS hash
1975 1.28 msaitoh * available to be used.
1976 1.28 msaitoh */
1977 1.28 msaitoh if (adapter->num_queues > 1) {
1978 1.28 msaitoh sendmp->m_pkthdr.flowid =
1979 1.28 msaitoh le32toh(cur->wb.lower.hi_dword.rss);
1980 1.44 msaitoh switch (pkt_info & IXGBE_RXDADV_RSSTYPE_MASK) {
1981 1.44 msaitoh case IXGBE_RXDADV_RSSTYPE_IPV4:
1982 1.28 msaitoh M_HASHTYPE_SET(sendmp,
1983 1.28 msaitoh M_HASHTYPE_RSS_IPV4);
1984 1.28 msaitoh break;
1985 1.44 msaitoh case IXGBE_RXDADV_RSSTYPE_IPV4_TCP:
1986 1.28 msaitoh M_HASHTYPE_SET(sendmp,
1987 1.28 msaitoh M_HASHTYPE_RSS_TCP_IPV4);
1988 1.28 msaitoh break;
1989 1.44 msaitoh case IXGBE_RXDADV_RSSTYPE_IPV6:
1990 1.28 msaitoh M_HASHTYPE_SET(sendmp,
1991 1.28 msaitoh M_HASHTYPE_RSS_IPV6);
1992 1.28 msaitoh break;
1993 1.44 msaitoh case IXGBE_RXDADV_RSSTYPE_IPV6_TCP:
1994 1.28 msaitoh M_HASHTYPE_SET(sendmp,
1995 1.28 msaitoh M_HASHTYPE_RSS_TCP_IPV6);
1996 1.28 msaitoh break;
1997 1.44 msaitoh case IXGBE_RXDADV_RSSTYPE_IPV6_EX:
1998 1.28 msaitoh M_HASHTYPE_SET(sendmp,
1999 1.28 msaitoh M_HASHTYPE_RSS_IPV6_EX);
2000 1.28 msaitoh break;
2001 1.44 msaitoh case IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX:
2002 1.28 msaitoh M_HASHTYPE_SET(sendmp,
2003 1.28 msaitoh M_HASHTYPE_RSS_TCP_IPV6_EX);
2004 1.28 msaitoh break;
2005 1.6 msaitoh #if __FreeBSD_version > 1100000
2006 1.44 msaitoh case IXGBE_RXDADV_RSSTYPE_IPV4_UDP:
2007 1.28 msaitoh M_HASHTYPE_SET(sendmp,
2008 1.28 msaitoh M_HASHTYPE_RSS_UDP_IPV4);
2009 1.28 msaitoh break;
2010 1.44 msaitoh case IXGBE_RXDADV_RSSTYPE_IPV6_UDP:
2011 1.28 msaitoh M_HASHTYPE_SET(sendmp,
2012 1.28 msaitoh M_HASHTYPE_RSS_UDP_IPV6);
2013 1.28 msaitoh break;
2014 1.44 msaitoh case IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX:
2015 1.28 msaitoh M_HASHTYPE_SET(sendmp,
2016 1.28 msaitoh M_HASHTYPE_RSS_UDP_IPV6_EX);
2017 1.28 msaitoh break;
2018 1.28 msaitoh #endif
2019 1.44 msaitoh default:
2020 1.28 msaitoh M_HASHTYPE_SET(sendmp,
2021 1.28 msaitoh M_HASHTYPE_OPAQUE_HASH);
2022 1.28 msaitoh }
2023 1.28 msaitoh } else {
2024 1.28 msaitoh sendmp->m_pkthdr.flowid = que->msix;
2025 1.1 msaitoh M_HASHTYPE_SET(sendmp, M_HASHTYPE_OPAQUE);
2026 1.1 msaitoh }
2027 1.8 msaitoh #endif
2028 1.1 msaitoh }
2029 1.1 msaitoh next_desc:
2030 1.1 msaitoh ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2031 1.1 msaitoh BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2032 1.1 msaitoh
2033 1.1 msaitoh /* Advance our pointers to the next descriptor. */
2034 1.1 msaitoh if (++i == rxr->num_desc)
2035 1.1 msaitoh i = 0;
2036 1.1 msaitoh
2037 1.1 msaitoh /* Now send to the stack or do LRO */
2038 1.1 msaitoh if (sendmp != NULL) {
2039 1.1 msaitoh rxr->next_to_check = i;
2040 1.28 msaitoh IXGBE_RX_UNLOCK(rxr);
2041 1.1 msaitoh ixgbe_rx_input(rxr, ifp, sendmp, ptype);
2042 1.28 msaitoh IXGBE_RX_LOCK(rxr);
2043 1.1 msaitoh i = rxr->next_to_check;
2044 1.1 msaitoh }
2045 1.1 msaitoh
2046 1.28 msaitoh /* Every 8 descriptors we go to refresh mbufs */
2047 1.1 msaitoh if (processed == 8) {
2048 1.1 msaitoh ixgbe_refresh_mbufs(rxr, i);
2049 1.1 msaitoh processed = 0;
2050 1.1 msaitoh }
2051 1.1 msaitoh }
2052 1.1 msaitoh
2053 1.1 msaitoh /* Refresh any remaining buf structs */
2054 1.1 msaitoh if (ixgbe_rx_unrefreshed(rxr))
2055 1.1 msaitoh ixgbe_refresh_mbufs(rxr, i);
2056 1.1 msaitoh
2057 1.1 msaitoh rxr->next_to_check = i;
2058 1.1 msaitoh
2059 1.28 msaitoh IXGBE_RX_UNLOCK(rxr);
2060 1.28 msaitoh
2061 1.1 msaitoh #ifdef LRO
2062 1.1 msaitoh /*
2063 1.1 msaitoh * Flush any outstanding LRO work
2064 1.1 msaitoh */
2065 1.10 msaitoh tcp_lro_flush_all(lro);
2066 1.1 msaitoh #endif /* LRO */
2067 1.1 msaitoh
2068 1.1 msaitoh /*
2069 1.28 msaitoh * Still have cleaning to do?
2070 1.28 msaitoh */
2071 1.1 msaitoh if ((staterr & IXGBE_RXD_STAT_DD) != 0)
2072 1.28 msaitoh return (TRUE);
2073 1.28 msaitoh
2074 1.28 msaitoh return (FALSE);
2075 1.28 msaitoh } /* ixgbe_rxeof */
2076 1.1 msaitoh
2077 1.1 msaitoh
2078 1.28 msaitoh /************************************************************************
2079 1.28 msaitoh * ixgbe_rx_checksum
2080 1.1 msaitoh *
2081 1.28 msaitoh * Verify that the hardware indicated that the checksum is valid.
2082 1.28 msaitoh * Inform the stack about the status of checksum so that stack
2083 1.28 msaitoh * doesn't spend time verifying the checksum.
2084 1.28 msaitoh ************************************************************************/
2085 1.1 msaitoh static void
2086 1.1 msaitoh ixgbe_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype,
2087 1.1 msaitoh struct ixgbe_hw_stats *stats)
2088 1.1 msaitoh {
2089 1.28 msaitoh u16 status = (u16)staterr;
2090 1.28 msaitoh u8 errors = (u8)(staterr >> 24);
2091 1.1 msaitoh #if 0
2092 1.28 msaitoh bool sctp = false;
2093 1.1 msaitoh
2094 1.1 msaitoh if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
2095 1.1 msaitoh (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
2096 1.8 msaitoh sctp = true;
2097 1.1 msaitoh #endif
2098 1.1 msaitoh
2099 1.8 msaitoh /* IPv4 checksum */
2100 1.1 msaitoh if (status & IXGBE_RXD_STAT_IPCS) {
2101 1.1 msaitoh stats->ipcs.ev_count++;
2102 1.1 msaitoh if (!(errors & IXGBE_RXD_ERR_IPE)) {
2103 1.1 msaitoh /* IP Checksum Good */
2104 1.1 msaitoh mp->m_pkthdr.csum_flags = M_CSUM_IPv4;
2105 1.1 msaitoh } else {
2106 1.1 msaitoh stats->ipcs_bad.ev_count++;
2107 1.1 msaitoh mp->m_pkthdr.csum_flags = M_CSUM_IPv4|M_CSUM_IPv4_BAD;
2108 1.1 msaitoh }
2109 1.1 msaitoh }
2110 1.8 msaitoh /* TCP/UDP/SCTP checksum */
2111 1.1 msaitoh if (status & IXGBE_RXD_STAT_L4CS) {
2112 1.1 msaitoh stats->l4cs.ev_count++;
2113 1.1 msaitoh int type = M_CSUM_TCPv4|M_CSUM_TCPv6|M_CSUM_UDPv4|M_CSUM_UDPv6;
2114 1.1 msaitoh if (!(errors & IXGBE_RXD_ERR_TCPE)) {
2115 1.1 msaitoh mp->m_pkthdr.csum_flags |= type;
2116 1.1 msaitoh } else {
2117 1.1 msaitoh stats->l4cs_bad.ev_count++;
2118 1.1 msaitoh mp->m_pkthdr.csum_flags |= type | M_CSUM_TCP_UDP_BAD;
2119 1.1 msaitoh }
2120 1.1 msaitoh }
2121 1.28 msaitoh } /* ixgbe_rx_checksum */
2122 1.1 msaitoh
2123 1.28 msaitoh /************************************************************************
2124 1.28 msaitoh * ixgbe_dma_malloc
2125 1.28 msaitoh ************************************************************************/
2126 1.1 msaitoh int
2127 1.1 msaitoh ixgbe_dma_malloc(struct adapter *adapter, const bus_size_t size,
2128 1.1 msaitoh struct ixgbe_dma_alloc *dma, const int mapflags)
2129 1.1 msaitoh {
2130 1.1 msaitoh device_t dev = adapter->dev;
2131 1.28 msaitoh int r, rsegs;
2132 1.1 msaitoh
2133 1.28 msaitoh r = ixgbe_dma_tag_create(
2134 1.28 msaitoh /* parent */ adapter->osdep.dmat,
2135 1.28 msaitoh /* alignment */ DBA_ALIGN,
2136 1.28 msaitoh /* bounds */ 0,
2137 1.28 msaitoh /* maxsize */ size,
2138 1.28 msaitoh /* nsegments */ 1,
2139 1.28 msaitoh /* maxsegsize */ size,
2140 1.28 msaitoh /* flags */ BUS_DMA_ALLOCNOW,
2141 1.1 msaitoh &dma->dma_tag);
2142 1.1 msaitoh if (r != 0) {
2143 1.1 msaitoh aprint_error_dev(dev,
2144 1.44 msaitoh "%s: ixgbe_dma_tag_create failed; error %d\n", __func__,
2145 1.44 msaitoh r);
2146 1.1 msaitoh goto fail_0;
2147 1.1 msaitoh }
2148 1.1 msaitoh
2149 1.28 msaitoh r = bus_dmamem_alloc(dma->dma_tag->dt_dmat, size,
2150 1.28 msaitoh dma->dma_tag->dt_alignment, dma->dma_tag->dt_boundary,
2151 1.28 msaitoh &dma->dma_seg, 1, &rsegs, BUS_DMA_NOWAIT);
2152 1.1 msaitoh if (r != 0) {
2153 1.1 msaitoh aprint_error_dev(dev,
2154 1.1 msaitoh "%s: bus_dmamem_alloc failed; error %d\n", __func__, r);
2155 1.1 msaitoh goto fail_1;
2156 1.1 msaitoh }
2157 1.1 msaitoh
2158 1.1 msaitoh r = bus_dmamem_map(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs,
2159 1.1 msaitoh size, &dma->dma_vaddr, BUS_DMA_NOWAIT);
2160 1.1 msaitoh if (r != 0) {
2161 1.1 msaitoh aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n",
2162 1.1 msaitoh __func__, r);
2163 1.1 msaitoh goto fail_2;
2164 1.1 msaitoh }
2165 1.1 msaitoh
2166 1.1 msaitoh r = ixgbe_dmamap_create(dma->dma_tag, 0, &dma->dma_map);
2167 1.1 msaitoh if (r != 0) {
2168 1.1 msaitoh aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n",
2169 1.1 msaitoh __func__, r);
2170 1.1 msaitoh goto fail_3;
2171 1.1 msaitoh }
2172 1.1 msaitoh
2173 1.28 msaitoh r = bus_dmamap_load(dma->dma_tag->dt_dmat, dma->dma_map,
2174 1.28 msaitoh dma->dma_vaddr, size, NULL, mapflags | BUS_DMA_NOWAIT);
2175 1.1 msaitoh if (r != 0) {
2176 1.1 msaitoh aprint_error_dev(dev, "%s: bus_dmamap_load failed; error %d\n",
2177 1.1 msaitoh __func__, r);
2178 1.1 msaitoh goto fail_4;
2179 1.1 msaitoh }
2180 1.1 msaitoh dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr;
2181 1.1 msaitoh dma->dma_size = size;
2182 1.1 msaitoh return 0;
2183 1.1 msaitoh fail_4:
2184 1.1 msaitoh ixgbe_dmamap_destroy(dma->dma_tag, dma->dma_map);
2185 1.1 msaitoh fail_3:
2186 1.1 msaitoh bus_dmamem_unmap(dma->dma_tag->dt_dmat, dma->dma_vaddr, size);
2187 1.1 msaitoh fail_2:
2188 1.1 msaitoh bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs);
2189 1.1 msaitoh fail_1:
2190 1.1 msaitoh ixgbe_dma_tag_destroy(dma->dma_tag);
2191 1.1 msaitoh fail_0:
2192 1.1 msaitoh
2193 1.28 msaitoh return (r);
2194 1.28 msaitoh } /* ixgbe_dma_malloc */
2195 1.28 msaitoh
2196 1.28 msaitoh /************************************************************************
2197 1.28 msaitoh * ixgbe_dma_free
2198 1.28 msaitoh ************************************************************************/
2199 1.3 msaitoh void
2200 1.1 msaitoh ixgbe_dma_free(struct adapter *adapter, struct ixgbe_dma_alloc *dma)
2201 1.1 msaitoh {
2202 1.1 msaitoh bus_dmamap_sync(dma->dma_tag->dt_dmat, dma->dma_map, 0, dma->dma_size,
2203 1.1 msaitoh BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2204 1.1 msaitoh ixgbe_dmamap_unload(dma->dma_tag, dma->dma_map);
2205 1.1 msaitoh bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, 1);
2206 1.1 msaitoh ixgbe_dma_tag_destroy(dma->dma_tag);
2207 1.28 msaitoh } /* ixgbe_dma_free */
2208 1.1 msaitoh
2209 1.1 msaitoh
2210 1.28 msaitoh /************************************************************************
2211 1.28 msaitoh * ixgbe_allocate_queues
2212 1.1 msaitoh *
2213 1.28 msaitoh * Allocate memory for the transmit and receive rings, and then
2214 1.28 msaitoh * the descriptors associated with each, called only once at attach.
2215 1.28 msaitoh ************************************************************************/
2216 1.1 msaitoh int
2217 1.1 msaitoh ixgbe_allocate_queues(struct adapter *adapter)
2218 1.1 msaitoh {
2219 1.1 msaitoh device_t dev = adapter->dev;
2220 1.1 msaitoh struct ix_queue *que;
2221 1.1 msaitoh struct tx_ring *txr;
2222 1.1 msaitoh struct rx_ring *rxr;
2223 1.28 msaitoh int rsize, tsize, error = IXGBE_SUCCESS;
2224 1.28 msaitoh int txconf = 0, rxconf = 0;
2225 1.1 msaitoh
2226 1.28 msaitoh /* First, allocate the top level queue structs */
2227 1.28 msaitoh adapter->queues = (struct ix_queue *)malloc(sizeof(struct ix_queue) *
2228 1.28 msaitoh adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO);
2229 1.28 msaitoh if (adapter->queues == NULL) {
2230 1.28 msaitoh aprint_error_dev(dev, "Unable to allocate queue memory\n");
2231 1.1 msaitoh error = ENOMEM;
2232 1.1 msaitoh goto fail;
2233 1.1 msaitoh }
2234 1.1 msaitoh
2235 1.28 msaitoh /* Second, allocate the TX ring struct memory */
2236 1.28 msaitoh adapter->tx_rings = (struct tx_ring *)malloc(sizeof(struct tx_ring) *
2237 1.28 msaitoh adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO);
2238 1.28 msaitoh if (adapter->tx_rings == NULL) {
2239 1.1 msaitoh aprint_error_dev(dev, "Unable to allocate TX ring memory\n");
2240 1.1 msaitoh error = ENOMEM;
2241 1.1 msaitoh goto tx_fail;
2242 1.1 msaitoh }
2243 1.1 msaitoh
2244 1.28 msaitoh /* Third, allocate the RX ring */
2245 1.28 msaitoh adapter->rx_rings = (struct rx_ring *)malloc(sizeof(struct rx_ring) *
2246 1.28 msaitoh adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO);
2247 1.28 msaitoh if (adapter->rx_rings == NULL) {
2248 1.1 msaitoh aprint_error_dev(dev, "Unable to allocate RX ring memory\n");
2249 1.1 msaitoh error = ENOMEM;
2250 1.1 msaitoh goto rx_fail;
2251 1.1 msaitoh }
2252 1.1 msaitoh
2253 1.1 msaitoh /* For the ring itself */
2254 1.28 msaitoh tsize = roundup2(adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc),
2255 1.28 msaitoh DBA_ALIGN);
2256 1.1 msaitoh
2257 1.1 msaitoh /*
2258 1.1 msaitoh * Now set up the TX queues, txconf is needed to handle the
2259 1.1 msaitoh * possibility that things fail midcourse and we need to
2260 1.1 msaitoh * undo memory gracefully
2261 1.28 msaitoh */
2262 1.1 msaitoh for (int i = 0; i < adapter->num_queues; i++, txconf++) {
2263 1.1 msaitoh /* Set up some basics */
2264 1.1 msaitoh txr = &adapter->tx_rings[i];
2265 1.1 msaitoh txr->adapter = adapter;
2266 1.28 msaitoh txr->txr_interq = NULL;
2267 1.28 msaitoh /* In case SR-IOV is enabled, align the index properly */
2268 1.5 msaitoh #ifdef PCI_IOV
2269 1.28 msaitoh txr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
2270 1.28 msaitoh i);
2271 1.5 msaitoh #else
2272 1.1 msaitoh txr->me = i;
2273 1.5 msaitoh #endif
2274 1.1 msaitoh txr->num_desc = adapter->num_tx_desc;
2275 1.1 msaitoh
2276 1.1 msaitoh /* Initialize the TX side lock */
2277 1.1 msaitoh mutex_init(&txr->tx_mtx, MUTEX_DEFAULT, IPL_NET);
2278 1.1 msaitoh
2279 1.28 msaitoh if (ixgbe_dma_malloc(adapter, tsize, &txr->txdma,
2280 1.28 msaitoh BUS_DMA_NOWAIT)) {
2281 1.1 msaitoh aprint_error_dev(dev,
2282 1.1 msaitoh "Unable to allocate TX Descriptor memory\n");
2283 1.1 msaitoh error = ENOMEM;
2284 1.1 msaitoh goto err_tx_desc;
2285 1.1 msaitoh }
2286 1.1 msaitoh txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
2287 1.1 msaitoh bzero((void *)txr->tx_base, tsize);
2288 1.1 msaitoh
2289 1.28 msaitoh /* Now allocate transmit buffers for the ring */
2290 1.28 msaitoh if (ixgbe_allocate_transmit_buffers(txr)) {
2291 1.1 msaitoh aprint_error_dev(dev,
2292 1.1 msaitoh "Critical Failure setting up transmit buffers\n");
2293 1.1 msaitoh error = ENOMEM;
2294 1.1 msaitoh goto err_tx_desc;
2295 1.1 msaitoh }
2296 1.28 msaitoh if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
2297 1.28 msaitoh /* Allocate a buf ring */
2298 1.28 msaitoh txr->txr_interq = pcq_create(IXGBE_BR_SIZE, KM_SLEEP);
2299 1.28 msaitoh if (txr->txr_interq == NULL) {
2300 1.28 msaitoh aprint_error_dev(dev,
2301 1.28 msaitoh "Critical Failure setting up buf ring\n");
2302 1.28 msaitoh error = ENOMEM;
2303 1.28 msaitoh goto err_tx_desc;
2304 1.28 msaitoh }
2305 1.28 msaitoh }
2306 1.1 msaitoh }
2307 1.1 msaitoh
2308 1.1 msaitoh /*
2309 1.1 msaitoh * Next the RX queues...
2310 1.1 msaitoh */
2311 1.28 msaitoh rsize = roundup2(adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc),
2312 1.28 msaitoh DBA_ALIGN);
2313 1.1 msaitoh for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
2314 1.1 msaitoh rxr = &adapter->rx_rings[i];
2315 1.1 msaitoh /* Set up some basics */
2316 1.1 msaitoh rxr->adapter = adapter;
2317 1.5 msaitoh #ifdef PCI_IOV
2318 1.28 msaitoh /* In case SR-IOV is enabled, align the index properly */
2319 1.28 msaitoh rxr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
2320 1.28 msaitoh i);
2321 1.5 msaitoh #else
2322 1.1 msaitoh rxr->me = i;
2323 1.5 msaitoh #endif
2324 1.1 msaitoh rxr->num_desc = adapter->num_rx_desc;
2325 1.1 msaitoh
2326 1.1 msaitoh /* Initialize the RX side lock */
2327 1.1 msaitoh mutex_init(&rxr->rx_mtx, MUTEX_DEFAULT, IPL_NET);
2328 1.1 msaitoh
2329 1.28 msaitoh if (ixgbe_dma_malloc(adapter, rsize, &rxr->rxdma,
2330 1.28 msaitoh BUS_DMA_NOWAIT)) {
2331 1.1 msaitoh aprint_error_dev(dev,
2332 1.1 msaitoh "Unable to allocate RxDescriptor memory\n");
2333 1.1 msaitoh error = ENOMEM;
2334 1.1 msaitoh goto err_rx_desc;
2335 1.1 msaitoh }
2336 1.1 msaitoh rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
2337 1.1 msaitoh bzero((void *)rxr->rx_base, rsize);
2338 1.1 msaitoh
2339 1.28 msaitoh /* Allocate receive buffers for the ring */
2340 1.1 msaitoh if (ixgbe_allocate_receive_buffers(rxr)) {
2341 1.1 msaitoh aprint_error_dev(dev,
2342 1.1 msaitoh "Critical Failure setting up receive buffers\n");
2343 1.1 msaitoh error = ENOMEM;
2344 1.1 msaitoh goto err_rx_desc;
2345 1.1 msaitoh }
2346 1.1 msaitoh }
2347 1.1 msaitoh
2348 1.1 msaitoh /*
2349 1.28 msaitoh * Finally set up the queue holding structs
2350 1.28 msaitoh */
2351 1.1 msaitoh for (int i = 0; i < adapter->num_queues; i++) {
2352 1.1 msaitoh que = &adapter->queues[i];
2353 1.1 msaitoh que->adapter = adapter;
2354 1.3 msaitoh que->me = i;
2355 1.1 msaitoh que->txr = &adapter->tx_rings[i];
2356 1.1 msaitoh que->rxr = &adapter->rx_rings[i];
2357 1.33 knakahar
2358 1.37 knakahar mutex_init(&que->dc_mtx, MUTEX_DEFAULT, IPL_NET);
2359 1.37 knakahar que->disabled_count = 0;
2360 1.1 msaitoh }
2361 1.1 msaitoh
2362 1.1 msaitoh return (0);
2363 1.1 msaitoh
2364 1.1 msaitoh err_rx_desc:
2365 1.1 msaitoh for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
2366 1.1 msaitoh ixgbe_dma_free(adapter, &rxr->rxdma);
2367 1.1 msaitoh err_tx_desc:
2368 1.1 msaitoh for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
2369 1.1 msaitoh ixgbe_dma_free(adapter, &txr->txdma);
2370 1.1 msaitoh free(adapter->rx_rings, M_DEVBUF);
2371 1.1 msaitoh rx_fail:
2372 1.1 msaitoh free(adapter->tx_rings, M_DEVBUF);
2373 1.1 msaitoh tx_fail:
2374 1.1 msaitoh free(adapter->queues, M_DEVBUF);
2375 1.1 msaitoh fail:
2376 1.1 msaitoh return (error);
2377 1.28 msaitoh } /* ixgbe_allocate_queues */
2378