ix_txrx.c revision 1.45 1 1.45 msaitoh /* $NetBSD: ix_txrx.c,v 1.45 2018/05/18 10:09:02 msaitoh Exp $ */
2 1.28 msaitoh
3 1.1 msaitoh /******************************************************************************
4 1.1 msaitoh
5 1.28 msaitoh Copyright (c) 2001-2017, Intel Corporation
6 1.1 msaitoh All rights reserved.
7 1.28 msaitoh
8 1.28 msaitoh Redistribution and use in source and binary forms, with or without
9 1.1 msaitoh modification, are permitted provided that the following conditions are met:
10 1.28 msaitoh
11 1.28 msaitoh 1. Redistributions of source code must retain the above copyright notice,
12 1.1 msaitoh this list of conditions and the following disclaimer.
13 1.28 msaitoh
14 1.28 msaitoh 2. Redistributions in binary form must reproduce the above copyright
15 1.28 msaitoh notice, this list of conditions and the following disclaimer in the
16 1.1 msaitoh documentation and/or other materials provided with the distribution.
17 1.28 msaitoh
18 1.28 msaitoh 3. Neither the name of the Intel Corporation nor the names of its
19 1.28 msaitoh contributors may be used to endorse or promote products derived from
20 1.1 msaitoh this software without specific prior written permission.
21 1.28 msaitoh
22 1.1 msaitoh THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 1.28 msaitoh AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 1.28 msaitoh IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 1.28 msaitoh ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 1.28 msaitoh LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 1.28 msaitoh CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 1.28 msaitoh SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 1.28 msaitoh INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 1.28 msaitoh CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 1.1 msaitoh ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 1.1 msaitoh POSSIBILITY OF SUCH DAMAGE.
33 1.1 msaitoh
34 1.1 msaitoh ******************************************************************************/
35 1.39 msaitoh /*$FreeBSD: head/sys/dev/ixgbe/ix_txrx.c 327031 2017-12-20 18:15:06Z erj $*/
36 1.28 msaitoh
37 1.1 msaitoh /*
38 1.1 msaitoh * Copyright (c) 2011 The NetBSD Foundation, Inc.
39 1.1 msaitoh * All rights reserved.
40 1.1 msaitoh *
41 1.1 msaitoh * This code is derived from software contributed to The NetBSD Foundation
42 1.1 msaitoh * by Coyote Point Systems, Inc.
43 1.1 msaitoh *
44 1.1 msaitoh * Redistribution and use in source and binary forms, with or without
45 1.1 msaitoh * modification, are permitted provided that the following conditions
46 1.1 msaitoh * are met:
47 1.1 msaitoh * 1. Redistributions of source code must retain the above copyright
48 1.1 msaitoh * notice, this list of conditions and the following disclaimer.
49 1.1 msaitoh * 2. Redistributions in binary form must reproduce the above copyright
50 1.1 msaitoh * notice, this list of conditions and the following disclaimer in the
51 1.1 msaitoh * documentation and/or other materials provided with the distribution.
52 1.1 msaitoh *
53 1.1 msaitoh * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
54 1.1 msaitoh * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55 1.1 msaitoh * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
56 1.1 msaitoh * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
57 1.1 msaitoh * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
58 1.1 msaitoh * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
59 1.1 msaitoh * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 1.1 msaitoh * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
61 1.1 msaitoh * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
62 1.1 msaitoh * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 1.1 msaitoh * POSSIBILITY OF SUCH DAMAGE.
64 1.1 msaitoh */
65 1.1 msaitoh
66 1.8 msaitoh #include "opt_inet.h"
67 1.8 msaitoh #include "opt_inet6.h"
68 1.8 msaitoh
69 1.1 msaitoh #include "ixgbe.h"
70 1.1 msaitoh
71 1.1 msaitoh /*
72 1.28 msaitoh * HW RSC control:
73 1.28 msaitoh * this feature only works with
74 1.28 msaitoh * IPv4, and only on 82599 and later.
75 1.28 msaitoh * Also this will cause IP forwarding to
76 1.28 msaitoh * fail and that can't be controlled by
77 1.28 msaitoh * the stack as LRO can. For all these
78 1.28 msaitoh * reasons I've deemed it best to leave
79 1.28 msaitoh * this off and not bother with a tuneable
80 1.28 msaitoh * interface, this would need to be compiled
81 1.28 msaitoh * to enable.
82 1.28 msaitoh */
83 1.1 msaitoh static bool ixgbe_rsc_enable = FALSE;
84 1.1 msaitoh
85 1.3 msaitoh /*
86 1.28 msaitoh * For Flow Director: this is the
87 1.28 msaitoh * number of TX packets we sample
88 1.28 msaitoh * for the filter pool, this means
89 1.28 msaitoh * every 20th packet will be probed.
90 1.28 msaitoh *
91 1.28 msaitoh * This feature can be disabled by
92 1.28 msaitoh * setting this to 0.
93 1.28 msaitoh */
94 1.3 msaitoh static int atr_sample_rate = 20;
95 1.3 msaitoh
96 1.28 msaitoh /************************************************************************
97 1.3 msaitoh * Local Function prototypes
98 1.28 msaitoh ************************************************************************/
99 1.28 msaitoh static void ixgbe_setup_transmit_ring(struct tx_ring *);
100 1.28 msaitoh static void ixgbe_free_transmit_buffers(struct tx_ring *);
101 1.28 msaitoh static int ixgbe_setup_receive_ring(struct rx_ring *);
102 1.28 msaitoh static void ixgbe_free_receive_buffers(struct rx_ring *);
103 1.28 msaitoh static void ixgbe_rx_checksum(u32, struct mbuf *, u32,
104 1.28 msaitoh struct ixgbe_hw_stats *);
105 1.28 msaitoh static void ixgbe_refresh_mbufs(struct rx_ring *, int);
106 1.38 knakahar static void ixgbe_drain(struct ifnet *, struct tx_ring *);
107 1.28 msaitoh static int ixgbe_xmit(struct tx_ring *, struct mbuf *);
108 1.28 msaitoh static int ixgbe_tx_ctx_setup(struct tx_ring *,
109 1.28 msaitoh struct mbuf *, u32 *, u32 *);
110 1.28 msaitoh static int ixgbe_tso_setup(struct tx_ring *,
111 1.28 msaitoh struct mbuf *, u32 *, u32 *);
112 1.1 msaitoh static __inline void ixgbe_rx_discard(struct rx_ring *, int);
113 1.1 msaitoh static __inline void ixgbe_rx_input(struct rx_ring *, struct ifnet *,
114 1.28 msaitoh struct mbuf *, u32);
115 1.28 msaitoh static int ixgbe_dma_malloc(struct adapter *, bus_size_t,
116 1.28 msaitoh struct ixgbe_dma_alloc *, int);
117 1.28 msaitoh static void ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *);
118 1.1 msaitoh
119 1.1 msaitoh static void ixgbe_setup_hw_rsc(struct rx_ring *);
120 1.1 msaitoh
121 1.28 msaitoh /************************************************************************
122 1.28 msaitoh * ixgbe_legacy_start_locked - Transmit entry point
123 1.1 msaitoh *
124 1.28 msaitoh * Called by the stack to initiate a transmit.
125 1.28 msaitoh * The driver will remain in this routine as long as there are
126 1.28 msaitoh * packets to transmit and transmit resources are available.
127 1.28 msaitoh * In case resources are not available, the stack is notified
128 1.28 msaitoh * and the packet is requeued.
129 1.28 msaitoh ************************************************************************/
130 1.28 msaitoh int
131 1.28 msaitoh ixgbe_legacy_start_locked(struct ifnet *ifp, struct tx_ring *txr)
132 1.1 msaitoh {
133 1.45 msaitoh int rc;
134 1.1 msaitoh struct mbuf *m_head;
135 1.1 msaitoh struct adapter *adapter = txr->adapter;
136 1.1 msaitoh
137 1.1 msaitoh IXGBE_TX_LOCK_ASSERT(txr);
138 1.1 msaitoh
139 1.38 knakahar if (!adapter->link_active) {
140 1.38 knakahar /*
141 1.38 knakahar * discard all packets buffered in IFQ to avoid
142 1.38 knakahar * sending old packets at next link up timing.
143 1.38 knakahar */
144 1.38 knakahar ixgbe_drain(ifp, txr);
145 1.38 knakahar return (ENETDOWN);
146 1.38 knakahar }
147 1.1 msaitoh if ((ifp->if_flags & IFF_RUNNING) == 0)
148 1.28 msaitoh return (ENETDOWN);
149 1.1 msaitoh
150 1.1 msaitoh while (!IFQ_IS_EMPTY(&ifp->if_snd)) {
151 1.1 msaitoh if (txr->tx_avail <= IXGBE_QUEUE_MIN_FREE)
152 1.1 msaitoh break;
153 1.1 msaitoh
154 1.1 msaitoh IFQ_POLL(&ifp->if_snd, m_head);
155 1.1 msaitoh if (m_head == NULL)
156 1.1 msaitoh break;
157 1.1 msaitoh
158 1.1 msaitoh if ((rc = ixgbe_xmit(txr, m_head)) == EAGAIN) {
159 1.1 msaitoh break;
160 1.1 msaitoh }
161 1.1 msaitoh IFQ_DEQUEUE(&ifp->if_snd, m_head);
162 1.1 msaitoh if (rc != 0) {
163 1.1 msaitoh m_freem(m_head);
164 1.1 msaitoh continue;
165 1.1 msaitoh }
166 1.1 msaitoh
167 1.1 msaitoh /* Send a copy of the frame to the BPF listener */
168 1.1 msaitoh bpf_mtap(ifp, m_head);
169 1.1 msaitoh }
170 1.44 msaitoh
171 1.28 msaitoh return IXGBE_SUCCESS;
172 1.28 msaitoh } /* ixgbe_legacy_start_locked */
173 1.28 msaitoh
174 1.28 msaitoh /************************************************************************
175 1.28 msaitoh * ixgbe_legacy_start
176 1.28 msaitoh *
177 1.28 msaitoh * Called by the stack, this always uses the first tx ring,
178 1.28 msaitoh * and should not be used with multiqueue tx enabled.
179 1.28 msaitoh ************************************************************************/
180 1.1 msaitoh void
181 1.28 msaitoh ixgbe_legacy_start(struct ifnet *ifp)
182 1.1 msaitoh {
183 1.1 msaitoh struct adapter *adapter = ifp->if_softc;
184 1.28 msaitoh struct tx_ring *txr = adapter->tx_rings;
185 1.1 msaitoh
186 1.1 msaitoh if (ifp->if_flags & IFF_RUNNING) {
187 1.1 msaitoh IXGBE_TX_LOCK(txr);
188 1.28 msaitoh ixgbe_legacy_start_locked(ifp, txr);
189 1.1 msaitoh IXGBE_TX_UNLOCK(txr);
190 1.1 msaitoh }
191 1.28 msaitoh } /* ixgbe_legacy_start */
192 1.1 msaitoh
193 1.28 msaitoh /************************************************************************
194 1.28 msaitoh * ixgbe_mq_start - Multiqueue Transmit Entry Point
195 1.28 msaitoh *
196 1.28 msaitoh * (if_transmit function)
197 1.28 msaitoh ************************************************************************/
198 1.1 msaitoh int
199 1.1 msaitoh ixgbe_mq_start(struct ifnet *ifp, struct mbuf *m)
200 1.1 msaitoh {
201 1.1 msaitoh struct adapter *adapter = ifp->if_softc;
202 1.1 msaitoh struct tx_ring *txr;
203 1.1 msaitoh int i, err = 0;
204 1.28 msaitoh #ifdef RSS
205 1.1 msaitoh uint32_t bucket_id;
206 1.1 msaitoh #endif
207 1.1 msaitoh
208 1.1 msaitoh /*
209 1.1 msaitoh * When doing RSS, map it to the same outbound queue
210 1.1 msaitoh * as the incoming flow would be mapped to.
211 1.1 msaitoh *
212 1.1 msaitoh * If everything is setup correctly, it should be the
213 1.1 msaitoh * same bucket that the current CPU we're on is.
214 1.1 msaitoh */
215 1.28 msaitoh #ifdef RSS
216 1.1 msaitoh if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
217 1.28 msaitoh if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
218 1.28 msaitoh (rss_hash2bucket(m->m_pkthdr.flowid, M_HASHTYPE_GET(m),
219 1.28 msaitoh &bucket_id) == 0)) {
220 1.1 msaitoh i = bucket_id % adapter->num_queues;
221 1.8 msaitoh #ifdef IXGBE_DEBUG
222 1.8 msaitoh if (bucket_id > adapter->num_queues)
223 1.28 msaitoh if_printf(ifp,
224 1.28 msaitoh "bucket_id (%d) > num_queues (%d)\n",
225 1.28 msaitoh bucket_id, adapter->num_queues);
226 1.8 msaitoh #endif
227 1.8 msaitoh } else
228 1.1 msaitoh i = m->m_pkthdr.flowid % adapter->num_queues;
229 1.3 msaitoh } else
230 1.28 msaitoh #endif /* 0 */
231 1.18 msaitoh i = cpu_index(curcpu()) % adapter->num_queues;
232 1.3 msaitoh
233 1.3 msaitoh /* Check for a hung queue and pick alternative */
234 1.3 msaitoh if (((1 << i) & adapter->active_queues) == 0)
235 1.18 msaitoh i = ffs64(adapter->active_queues);
236 1.1 msaitoh
237 1.1 msaitoh txr = &adapter->tx_rings[i];
238 1.1 msaitoh
239 1.18 msaitoh err = pcq_put(txr->txr_interq, m);
240 1.18 msaitoh if (err == false) {
241 1.18 msaitoh m_freem(m);
242 1.18 msaitoh txr->pcq_drops.ev_count++;
243 1.1 msaitoh return (err);
244 1.18 msaitoh }
245 1.1 msaitoh if (IXGBE_TX_TRYLOCK(txr)) {
246 1.1 msaitoh ixgbe_mq_start_locked(ifp, txr);
247 1.1 msaitoh IXGBE_TX_UNLOCK(txr);
248 1.34 knakahar } else {
249 1.34 knakahar if (adapter->txrx_use_workqueue) {
250 1.44 msaitoh u_int *enqueued;
251 1.44 msaitoh
252 1.34 knakahar /*
253 1.34 knakahar * This function itself is not called in interrupt
254 1.34 knakahar * context, however it can be called in fast softint
255 1.34 knakahar * context right after receiving forwarding packets.
256 1.34 knakahar * So, it is required to protect workqueue from twice
257 1.34 knakahar * enqueuing when the machine uses both spontaneous
258 1.34 knakahar * packets and forwarding packets.
259 1.34 knakahar */
260 1.44 msaitoh enqueued = percpu_getref(adapter->txr_wq_enqueued);
261 1.34 knakahar if (*enqueued == 0) {
262 1.34 knakahar *enqueued = 1;
263 1.34 knakahar percpu_putref(adapter->txr_wq_enqueued);
264 1.44 msaitoh workqueue_enqueue(adapter->txr_wq,
265 1.44 msaitoh &txr->wq_cookie, curcpu());
266 1.34 knakahar } else
267 1.34 knakahar percpu_putref(adapter->txr_wq_enqueued);
268 1.34 knakahar } else
269 1.34 knakahar softint_schedule(txr->txr_si);
270 1.34 knakahar }
271 1.1 msaitoh
272 1.1 msaitoh return (0);
273 1.28 msaitoh } /* ixgbe_mq_start */
274 1.1 msaitoh
275 1.28 msaitoh /************************************************************************
276 1.28 msaitoh * ixgbe_mq_start_locked
277 1.28 msaitoh ************************************************************************/
278 1.1 msaitoh int
279 1.1 msaitoh ixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr)
280 1.1 msaitoh {
281 1.28 msaitoh struct mbuf *next;
282 1.28 msaitoh int enqueued = 0, err = 0;
283 1.1 msaitoh
284 1.38 knakahar if (!txr->adapter->link_active) {
285 1.38 knakahar /*
286 1.38 knakahar * discard all packets buffered in txr_interq to avoid
287 1.38 knakahar * sending old packets at next link up timing.
288 1.38 knakahar */
289 1.38 knakahar ixgbe_drain(ifp, txr);
290 1.38 knakahar return (ENETDOWN);
291 1.38 knakahar }
292 1.28 msaitoh if ((ifp->if_flags & IFF_RUNNING) == 0)
293 1.28 msaitoh return (ENETDOWN);
294 1.1 msaitoh
295 1.1 msaitoh /* Process the queue */
296 1.18 msaitoh while ((next = pcq_get(txr->txr_interq)) != NULL) {
297 1.18 msaitoh if ((err = ixgbe_xmit(txr, next)) != 0) {
298 1.18 msaitoh m_freem(next);
299 1.18 msaitoh /* All errors are counted in ixgbe_xmit() */
300 1.1 msaitoh break;
301 1.1 msaitoh }
302 1.1 msaitoh enqueued++;
303 1.3 msaitoh #if __FreeBSD_version >= 1100036
304 1.4 msaitoh /*
305 1.4 msaitoh * Since we're looking at the tx ring, we can check
306 1.4 msaitoh * to see if we're a VF by examing our tail register
307 1.4 msaitoh * address.
308 1.4 msaitoh */
309 1.28 msaitoh if ((txr->adapter->feat_en & IXGBE_FEATURE_VF) &&
310 1.28 msaitoh (next->m_flags & M_MCAST))
311 1.3 msaitoh if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
312 1.3 msaitoh #endif
313 1.1 msaitoh /* Send a copy of the frame to the BPF listener */
314 1.1 msaitoh bpf_mtap(ifp, next);
315 1.1 msaitoh if ((ifp->if_flags & IFF_RUNNING) == 0)
316 1.1 msaitoh break;
317 1.1 msaitoh }
318 1.1 msaitoh
319 1.28 msaitoh if (txr->tx_avail < IXGBE_TX_CLEANUP_THRESHOLD(txr->adapter))
320 1.1 msaitoh ixgbe_txeof(txr);
321 1.1 msaitoh
322 1.1 msaitoh return (err);
323 1.28 msaitoh } /* ixgbe_mq_start_locked */
324 1.1 msaitoh
325 1.28 msaitoh /************************************************************************
326 1.28 msaitoh * ixgbe_deferred_mq_start
327 1.28 msaitoh *
328 1.34 knakahar * Called from a softint and workqueue (indirectly) to drain queued
329 1.34 knakahar * transmit packets.
330 1.28 msaitoh ************************************************************************/
331 1.1 msaitoh void
332 1.18 msaitoh ixgbe_deferred_mq_start(void *arg)
333 1.1 msaitoh {
334 1.1 msaitoh struct tx_ring *txr = arg;
335 1.1 msaitoh struct adapter *adapter = txr->adapter;
336 1.28 msaitoh struct ifnet *ifp = adapter->ifp;
337 1.1 msaitoh
338 1.1 msaitoh IXGBE_TX_LOCK(txr);
339 1.18 msaitoh if (pcq_peek(txr->txr_interq) != NULL)
340 1.1 msaitoh ixgbe_mq_start_locked(ifp, txr);
341 1.1 msaitoh IXGBE_TX_UNLOCK(txr);
342 1.28 msaitoh } /* ixgbe_deferred_mq_start */
343 1.3 msaitoh
344 1.28 msaitoh /************************************************************************
345 1.34 knakahar * ixgbe_deferred_mq_start_work
346 1.34 knakahar *
347 1.34 knakahar * Called from a workqueue to drain queued transmit packets.
348 1.34 knakahar ************************************************************************/
349 1.34 knakahar void
350 1.34 knakahar ixgbe_deferred_mq_start_work(struct work *wk, void *arg)
351 1.34 knakahar {
352 1.34 knakahar struct tx_ring *txr = container_of(wk, struct tx_ring, wq_cookie);
353 1.34 knakahar struct adapter *adapter = txr->adapter;
354 1.34 knakahar u_int *enqueued = percpu_getref(adapter->txr_wq_enqueued);
355 1.34 knakahar *enqueued = 0;
356 1.34 knakahar percpu_putref(adapter->txr_wq_enqueued);
357 1.34 knakahar
358 1.34 knakahar ixgbe_deferred_mq_start(txr);
359 1.34 knakahar } /* ixgbe_deferred_mq_start */
360 1.34 knakahar
361 1.38 knakahar /************************************************************************
362 1.38 knakahar * ixgbe_drain_all
363 1.38 knakahar ************************************************************************/
364 1.38 knakahar void
365 1.38 knakahar ixgbe_drain_all(struct adapter *adapter)
366 1.38 knakahar {
367 1.38 knakahar struct ifnet *ifp = adapter->ifp;
368 1.38 knakahar struct ix_queue *que = adapter->queues;
369 1.38 knakahar
370 1.38 knakahar for (int i = 0; i < adapter->num_queues; i++, que++) {
371 1.38 knakahar struct tx_ring *txr = que->txr;
372 1.38 knakahar
373 1.38 knakahar IXGBE_TX_LOCK(txr);
374 1.38 knakahar ixgbe_drain(ifp, txr);
375 1.38 knakahar IXGBE_TX_UNLOCK(txr);
376 1.38 knakahar }
377 1.38 knakahar }
378 1.34 knakahar
379 1.34 knakahar /************************************************************************
380 1.28 msaitoh * ixgbe_xmit
381 1.1 msaitoh *
382 1.28 msaitoh * Maps the mbufs to tx descriptors, allowing the
383 1.28 msaitoh * TX engine to transmit the packets.
384 1.1 msaitoh *
385 1.28 msaitoh * Return 0 on success, positive on failure
386 1.28 msaitoh ************************************************************************/
387 1.1 msaitoh static int
388 1.1 msaitoh ixgbe_xmit(struct tx_ring *txr, struct mbuf *m_head)
389 1.1 msaitoh {
390 1.28 msaitoh struct adapter *adapter = txr->adapter;
391 1.28 msaitoh struct ixgbe_tx_buf *txbuf;
392 1.1 msaitoh union ixgbe_adv_tx_desc *txd = NULL;
393 1.28 msaitoh struct ifnet *ifp = adapter->ifp;
394 1.28 msaitoh int i, j, error;
395 1.28 msaitoh int first;
396 1.28 msaitoh u32 olinfo_status = 0, cmd_type_len;
397 1.28 msaitoh bool remap = TRUE;
398 1.28 msaitoh bus_dmamap_t map;
399 1.1 msaitoh
400 1.1 msaitoh /* Basic descriptor defines */
401 1.28 msaitoh cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
402 1.1 msaitoh IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
403 1.1 msaitoh
404 1.29 knakahar if (vlan_has_tag(m_head))
405 1.28 msaitoh cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
406 1.1 msaitoh
407 1.28 msaitoh /*
408 1.28 msaitoh * Important to capture the first descriptor
409 1.28 msaitoh * used because it will contain the index of
410 1.28 msaitoh * the one we tell the hardware to report back
411 1.28 msaitoh */
412 1.28 msaitoh first = txr->next_avail_desc;
413 1.1 msaitoh txbuf = &txr->tx_buffers[first];
414 1.1 msaitoh map = txbuf->map;
415 1.1 msaitoh
416 1.1 msaitoh /*
417 1.1 msaitoh * Map the packet for DMA.
418 1.1 msaitoh */
419 1.22 msaitoh retry:
420 1.28 msaitoh error = bus_dmamap_load_mbuf(txr->txtag->dt_dmat, map, m_head,
421 1.28 msaitoh BUS_DMA_NOWAIT);
422 1.1 msaitoh
423 1.1 msaitoh if (__predict_false(error)) {
424 1.22 msaitoh struct mbuf *m;
425 1.1 msaitoh
426 1.1 msaitoh switch (error) {
427 1.1 msaitoh case EAGAIN:
428 1.35 msaitoh txr->q_eagain_tx_dma_setup++;
429 1.1 msaitoh return EAGAIN;
430 1.1 msaitoh case ENOMEM:
431 1.35 msaitoh txr->q_enomem_tx_dma_setup++;
432 1.1 msaitoh return EAGAIN;
433 1.1 msaitoh case EFBIG:
434 1.22 msaitoh /* Try it again? - one try */
435 1.22 msaitoh if (remap == TRUE) {
436 1.22 msaitoh remap = FALSE;
437 1.22 msaitoh /*
438 1.22 msaitoh * XXX: m_defrag will choke on
439 1.22 msaitoh * non-MCLBYTES-sized clusters
440 1.22 msaitoh */
441 1.35 msaitoh txr->q_efbig_tx_dma_setup++;
442 1.22 msaitoh m = m_defrag(m_head, M_NOWAIT);
443 1.22 msaitoh if (m == NULL) {
444 1.35 msaitoh txr->q_mbuf_defrag_failed++;
445 1.22 msaitoh return ENOBUFS;
446 1.22 msaitoh }
447 1.22 msaitoh m_head = m;
448 1.22 msaitoh goto retry;
449 1.22 msaitoh } else {
450 1.35 msaitoh txr->q_efbig2_tx_dma_setup++;
451 1.22 msaitoh return error;
452 1.22 msaitoh }
453 1.1 msaitoh case EINVAL:
454 1.35 msaitoh txr->q_einval_tx_dma_setup++;
455 1.1 msaitoh return error;
456 1.1 msaitoh default:
457 1.35 msaitoh txr->q_other_tx_dma_setup++;
458 1.1 msaitoh return error;
459 1.1 msaitoh }
460 1.1 msaitoh }
461 1.1 msaitoh
462 1.1 msaitoh /* Make certain there are enough descriptors */
463 1.10 msaitoh if (txr->tx_avail < (map->dm_nsegs + 2)) {
464 1.1 msaitoh txr->no_desc_avail.ev_count++;
465 1.1 msaitoh ixgbe_dmamap_unload(txr->txtag, txbuf->map);
466 1.1 msaitoh return EAGAIN;
467 1.1 msaitoh }
468 1.1 msaitoh
469 1.1 msaitoh /*
470 1.4 msaitoh * Set up the appropriate offload context
471 1.4 msaitoh * this will consume the first descriptor
472 1.4 msaitoh */
473 1.1 msaitoh error = ixgbe_tx_ctx_setup(txr, m_head, &cmd_type_len, &olinfo_status);
474 1.1 msaitoh if (__predict_false(error)) {
475 1.1 msaitoh return (error);
476 1.1 msaitoh }
477 1.1 msaitoh
478 1.1 msaitoh /* Do the flow director magic */
479 1.28 msaitoh if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
480 1.28 msaitoh (txr->atr_sample) && (!adapter->fdir_reinit)) {
481 1.1 msaitoh ++txr->atr_count;
482 1.1 msaitoh if (txr->atr_count >= atr_sample_rate) {
483 1.1 msaitoh ixgbe_atr(txr, m_head);
484 1.1 msaitoh txr->atr_count = 0;
485 1.1 msaitoh }
486 1.1 msaitoh }
487 1.1 msaitoh
488 1.8 msaitoh olinfo_status |= IXGBE_ADVTXD_CC;
489 1.1 msaitoh i = txr->next_avail_desc;
490 1.1 msaitoh for (j = 0; j < map->dm_nsegs; j++) {
491 1.1 msaitoh bus_size_t seglen;
492 1.1 msaitoh bus_addr_t segaddr;
493 1.1 msaitoh
494 1.1 msaitoh txbuf = &txr->tx_buffers[i];
495 1.1 msaitoh txd = &txr->tx_base[i];
496 1.1 msaitoh seglen = map->dm_segs[j].ds_len;
497 1.1 msaitoh segaddr = htole64(map->dm_segs[j].ds_addr);
498 1.1 msaitoh
499 1.1 msaitoh txd->read.buffer_addr = segaddr;
500 1.40 msaitoh txd->read.cmd_type_len = htole32(cmd_type_len | seglen);
501 1.1 msaitoh txd->read.olinfo_status = htole32(olinfo_status);
502 1.1 msaitoh
503 1.1 msaitoh if (++i == txr->num_desc)
504 1.1 msaitoh i = 0;
505 1.1 msaitoh }
506 1.1 msaitoh
507 1.28 msaitoh txd->read.cmd_type_len |= htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
508 1.1 msaitoh txr->tx_avail -= map->dm_nsegs;
509 1.1 msaitoh txr->next_avail_desc = i;
510 1.1 msaitoh
511 1.1 msaitoh txbuf->m_head = m_head;
512 1.1 msaitoh /*
513 1.4 msaitoh * Here we swap the map so the last descriptor,
514 1.4 msaitoh * which gets the completion interrupt has the
515 1.4 msaitoh * real map, and the first descriptor gets the
516 1.4 msaitoh * unused map from this descriptor.
517 1.4 msaitoh */
518 1.1 msaitoh txr->tx_buffers[first].map = txbuf->map;
519 1.1 msaitoh txbuf->map = map;
520 1.1 msaitoh bus_dmamap_sync(txr->txtag->dt_dmat, map, 0, m_head->m_pkthdr.len,
521 1.1 msaitoh BUS_DMASYNC_PREWRITE);
522 1.1 msaitoh
523 1.28 msaitoh /* Set the EOP descriptor that will be marked done */
524 1.28 msaitoh txbuf = &txr->tx_buffers[first];
525 1.1 msaitoh txbuf->eop = txd;
526 1.1 msaitoh
527 1.28 msaitoh ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
528 1.1 msaitoh BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
529 1.1 msaitoh /*
530 1.1 msaitoh * Advance the Transmit Descriptor Tail (Tdt), this tells the
531 1.1 msaitoh * hardware that this frame is available to transmit.
532 1.1 msaitoh */
533 1.1 msaitoh ++txr->total_packets.ev_count;
534 1.3 msaitoh IXGBE_WRITE_REG(&adapter->hw, txr->tail, i);
535 1.3 msaitoh
536 1.23 msaitoh /*
537 1.23 msaitoh * XXXX NOMPSAFE: ifp->if_data should be percpu.
538 1.23 msaitoh */
539 1.23 msaitoh ifp->if_obytes += m_head->m_pkthdr.len;
540 1.23 msaitoh if (m_head->m_flags & M_MCAST)
541 1.23 msaitoh ifp->if_omcasts++;
542 1.23 msaitoh
543 1.45 msaitoh /* Mark queue as having work */
544 1.45 msaitoh if (txr->busy == 0)
545 1.45 msaitoh txr->busy = 1;
546 1.45 msaitoh
547 1.28 msaitoh return (0);
548 1.28 msaitoh } /* ixgbe_xmit */
549 1.1 msaitoh
550 1.38 knakahar /************************************************************************
551 1.38 knakahar * ixgbe_drain
552 1.38 knakahar ************************************************************************/
553 1.38 knakahar static void
554 1.38 knakahar ixgbe_drain(struct ifnet *ifp, struct tx_ring *txr)
555 1.38 knakahar {
556 1.38 knakahar struct mbuf *m;
557 1.38 knakahar
558 1.38 knakahar IXGBE_TX_LOCK_ASSERT(txr);
559 1.38 knakahar
560 1.38 knakahar if (txr->me == 0) {
561 1.38 knakahar while (!IFQ_IS_EMPTY(&ifp->if_snd)) {
562 1.38 knakahar IFQ_DEQUEUE(&ifp->if_snd, m);
563 1.38 knakahar m_freem(m);
564 1.38 knakahar IF_DROP(&ifp->if_snd);
565 1.38 knakahar }
566 1.38 knakahar }
567 1.38 knakahar
568 1.38 knakahar while ((m = pcq_get(txr->txr_interq)) != NULL) {
569 1.38 knakahar m_freem(m);
570 1.38 knakahar txr->pcq_drops.ev_count++;
571 1.38 knakahar }
572 1.38 knakahar }
573 1.16 msaitoh
574 1.28 msaitoh /************************************************************************
575 1.28 msaitoh * ixgbe_allocate_transmit_buffers
576 1.1 msaitoh *
577 1.28 msaitoh * Allocate memory for tx_buffer structures. The tx_buffer stores all
578 1.28 msaitoh * the information needed to transmit a packet on the wire. This is
579 1.28 msaitoh * called only once at attach, setup is done every reset.
580 1.28 msaitoh ************************************************************************/
581 1.28 msaitoh static int
582 1.1 msaitoh ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
583 1.1 msaitoh {
584 1.28 msaitoh struct adapter *adapter = txr->adapter;
585 1.28 msaitoh device_t dev = adapter->dev;
586 1.1 msaitoh struct ixgbe_tx_buf *txbuf;
587 1.28 msaitoh int error, i;
588 1.1 msaitoh
589 1.1 msaitoh /*
590 1.1 msaitoh * Setup DMA descriptor areas.
591 1.1 msaitoh */
592 1.28 msaitoh error = ixgbe_dma_tag_create(
593 1.28 msaitoh /* parent */ adapter->osdep.dmat,
594 1.28 msaitoh /* alignment */ 1,
595 1.28 msaitoh /* bounds */ 0,
596 1.28 msaitoh /* maxsize */ IXGBE_TSO_SIZE,
597 1.28 msaitoh /* nsegments */ adapter->num_segs,
598 1.28 msaitoh /* maxsegsize */ PAGE_SIZE,
599 1.28 msaitoh /* flags */ 0,
600 1.28 msaitoh &txr->txtag);
601 1.28 msaitoh if (error != 0) {
602 1.1 msaitoh aprint_error_dev(dev,"Unable to allocate TX DMA tag\n");
603 1.1 msaitoh goto fail;
604 1.1 msaitoh }
605 1.1 msaitoh
606 1.28 msaitoh txr->tx_buffers =
607 1.1 msaitoh (struct ixgbe_tx_buf *) malloc(sizeof(struct ixgbe_tx_buf) *
608 1.28 msaitoh adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
609 1.28 msaitoh if (txr->tx_buffers == NULL) {
610 1.1 msaitoh aprint_error_dev(dev, "Unable to allocate tx_buffer memory\n");
611 1.1 msaitoh error = ENOMEM;
612 1.1 msaitoh goto fail;
613 1.1 msaitoh }
614 1.1 msaitoh
615 1.28 msaitoh /* Create the descriptor buffer dma maps */
616 1.1 msaitoh txbuf = txr->tx_buffers;
617 1.1 msaitoh for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
618 1.1 msaitoh error = ixgbe_dmamap_create(txr->txtag, 0, &txbuf->map);
619 1.1 msaitoh if (error != 0) {
620 1.1 msaitoh aprint_error_dev(dev,
621 1.1 msaitoh "Unable to create TX DMA map (%d)\n", error);
622 1.1 msaitoh goto fail;
623 1.1 msaitoh }
624 1.1 msaitoh }
625 1.1 msaitoh
626 1.1 msaitoh return 0;
627 1.1 msaitoh fail:
628 1.1 msaitoh /* We free all, it handles case where we are in the middle */
629 1.15 msaitoh #if 0 /* XXX was FreeBSD */
630 1.1 msaitoh ixgbe_free_transmit_structures(adapter);
631 1.15 msaitoh #else
632 1.15 msaitoh ixgbe_free_transmit_buffers(txr);
633 1.15 msaitoh #endif
634 1.1 msaitoh return (error);
635 1.28 msaitoh } /* ixgbe_allocate_transmit_buffers */
636 1.1 msaitoh
637 1.28 msaitoh /************************************************************************
638 1.28 msaitoh * ixgbe_setup_transmit_ring - Initialize a transmit ring.
639 1.28 msaitoh ************************************************************************/
640 1.1 msaitoh static void
641 1.1 msaitoh ixgbe_setup_transmit_ring(struct tx_ring *txr)
642 1.1 msaitoh {
643 1.28 msaitoh struct adapter *adapter = txr->adapter;
644 1.28 msaitoh struct ixgbe_tx_buf *txbuf;
645 1.1 msaitoh #ifdef DEV_NETMAP
646 1.1 msaitoh struct netmap_adapter *na = NA(adapter->ifp);
647 1.28 msaitoh struct netmap_slot *slot;
648 1.1 msaitoh #endif /* DEV_NETMAP */
649 1.1 msaitoh
650 1.1 msaitoh /* Clear the old ring contents */
651 1.1 msaitoh IXGBE_TX_LOCK(txr);
652 1.28 msaitoh
653 1.1 msaitoh #ifdef DEV_NETMAP
654 1.28 msaitoh if (adapter->feat_en & IXGBE_FEATURE_NETMAP) {
655 1.28 msaitoh /*
656 1.28 msaitoh * (under lock): if in netmap mode, do some consistency
657 1.28 msaitoh * checks and set slot to entry 0 of the netmap ring.
658 1.28 msaitoh */
659 1.28 msaitoh slot = netmap_reset(na, NR_TX, txr->me, 0);
660 1.28 msaitoh }
661 1.1 msaitoh #endif /* DEV_NETMAP */
662 1.28 msaitoh
663 1.1 msaitoh bzero((void *)txr->tx_base,
664 1.28 msaitoh (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
665 1.1 msaitoh /* Reset indices */
666 1.1 msaitoh txr->next_avail_desc = 0;
667 1.1 msaitoh txr->next_to_clean = 0;
668 1.1 msaitoh
669 1.1 msaitoh /* Free any existing tx buffers. */
670 1.28 msaitoh txbuf = txr->tx_buffers;
671 1.5 msaitoh for (int i = 0; i < txr->num_desc; i++, txbuf++) {
672 1.1 msaitoh if (txbuf->m_head != NULL) {
673 1.1 msaitoh bus_dmamap_sync(txr->txtag->dt_dmat, txbuf->map,
674 1.1 msaitoh 0, txbuf->m_head->m_pkthdr.len,
675 1.1 msaitoh BUS_DMASYNC_POSTWRITE);
676 1.1 msaitoh ixgbe_dmamap_unload(txr->txtag, txbuf->map);
677 1.1 msaitoh m_freem(txbuf->m_head);
678 1.1 msaitoh txbuf->m_head = NULL;
679 1.1 msaitoh }
680 1.28 msaitoh
681 1.1 msaitoh #ifdef DEV_NETMAP
682 1.1 msaitoh /*
683 1.1 msaitoh * In netmap mode, set the map for the packet buffer.
684 1.1 msaitoh * NOTE: Some drivers (not this one) also need to set
685 1.1 msaitoh * the physical buffer address in the NIC ring.
686 1.1 msaitoh * Slots in the netmap ring (indexed by "si") are
687 1.1 msaitoh * kring->nkr_hwofs positions "ahead" wrt the
688 1.1 msaitoh * corresponding slot in the NIC ring. In some drivers
689 1.1 msaitoh * (not here) nkr_hwofs can be negative. Function
690 1.1 msaitoh * netmap_idx_n2k() handles wraparounds properly.
691 1.1 msaitoh */
692 1.28 msaitoh if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && slot) {
693 1.1 msaitoh int si = netmap_idx_n2k(&na->tx_rings[txr->me], i);
694 1.5 msaitoh netmap_load_map(na, txr->txtag,
695 1.5 msaitoh txbuf->map, NMB(na, slot + si));
696 1.1 msaitoh }
697 1.1 msaitoh #endif /* DEV_NETMAP */
698 1.28 msaitoh
699 1.1 msaitoh /* Clear the EOP descriptor pointer */
700 1.1 msaitoh txbuf->eop = NULL;
701 1.28 msaitoh }
702 1.1 msaitoh
703 1.1 msaitoh /* Set the rate at which we sample packets */
704 1.28 msaitoh if (adapter->feat_en & IXGBE_FEATURE_FDIR)
705 1.1 msaitoh txr->atr_sample = atr_sample_rate;
706 1.1 msaitoh
707 1.1 msaitoh /* Set number of descriptors available */
708 1.1 msaitoh txr->tx_avail = adapter->num_tx_desc;
709 1.1 msaitoh
710 1.1 msaitoh ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
711 1.1 msaitoh BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
712 1.1 msaitoh IXGBE_TX_UNLOCK(txr);
713 1.28 msaitoh } /* ixgbe_setup_transmit_ring */
714 1.1 msaitoh
715 1.28 msaitoh /************************************************************************
716 1.28 msaitoh * ixgbe_setup_transmit_structures - Initialize all transmit rings.
717 1.28 msaitoh ************************************************************************/
718 1.1 msaitoh int
719 1.1 msaitoh ixgbe_setup_transmit_structures(struct adapter *adapter)
720 1.1 msaitoh {
721 1.1 msaitoh struct tx_ring *txr = adapter->tx_rings;
722 1.1 msaitoh
723 1.1 msaitoh for (int i = 0; i < adapter->num_queues; i++, txr++)
724 1.1 msaitoh ixgbe_setup_transmit_ring(txr);
725 1.1 msaitoh
726 1.1 msaitoh return (0);
727 1.28 msaitoh } /* ixgbe_setup_transmit_structures */
728 1.1 msaitoh
729 1.28 msaitoh /************************************************************************
730 1.28 msaitoh * ixgbe_free_transmit_structures - Free all transmit rings.
731 1.28 msaitoh ************************************************************************/
732 1.1 msaitoh void
733 1.1 msaitoh ixgbe_free_transmit_structures(struct adapter *adapter)
734 1.1 msaitoh {
735 1.1 msaitoh struct tx_ring *txr = adapter->tx_rings;
736 1.1 msaitoh
737 1.1 msaitoh for (int i = 0; i < adapter->num_queues; i++, txr++) {
738 1.1 msaitoh ixgbe_free_transmit_buffers(txr);
739 1.1 msaitoh ixgbe_dma_free(adapter, &txr->txdma);
740 1.1 msaitoh IXGBE_TX_LOCK_DESTROY(txr);
741 1.1 msaitoh }
742 1.1 msaitoh free(adapter->tx_rings, M_DEVBUF);
743 1.28 msaitoh } /* ixgbe_free_transmit_structures */
744 1.1 msaitoh
745 1.28 msaitoh /************************************************************************
746 1.28 msaitoh * ixgbe_free_transmit_buffers
747 1.1 msaitoh *
748 1.28 msaitoh * Free transmit ring related data structures.
749 1.28 msaitoh ************************************************************************/
750 1.1 msaitoh static void
751 1.1 msaitoh ixgbe_free_transmit_buffers(struct tx_ring *txr)
752 1.1 msaitoh {
753 1.28 msaitoh struct adapter *adapter = txr->adapter;
754 1.1 msaitoh struct ixgbe_tx_buf *tx_buffer;
755 1.28 msaitoh int i;
756 1.1 msaitoh
757 1.14 msaitoh INIT_DEBUGOUT("ixgbe_free_transmit_buffers: begin");
758 1.1 msaitoh
759 1.1 msaitoh if (txr->tx_buffers == NULL)
760 1.1 msaitoh return;
761 1.1 msaitoh
762 1.1 msaitoh tx_buffer = txr->tx_buffers;
763 1.1 msaitoh for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
764 1.1 msaitoh if (tx_buffer->m_head != NULL) {
765 1.1 msaitoh bus_dmamap_sync(txr->txtag->dt_dmat, tx_buffer->map,
766 1.1 msaitoh 0, tx_buffer->m_head->m_pkthdr.len,
767 1.1 msaitoh BUS_DMASYNC_POSTWRITE);
768 1.1 msaitoh ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
769 1.1 msaitoh m_freem(tx_buffer->m_head);
770 1.1 msaitoh tx_buffer->m_head = NULL;
771 1.1 msaitoh if (tx_buffer->map != NULL) {
772 1.1 msaitoh ixgbe_dmamap_destroy(txr->txtag,
773 1.1 msaitoh tx_buffer->map);
774 1.1 msaitoh tx_buffer->map = NULL;
775 1.1 msaitoh }
776 1.1 msaitoh } else if (tx_buffer->map != NULL) {
777 1.1 msaitoh ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
778 1.1 msaitoh ixgbe_dmamap_destroy(txr->txtag, tx_buffer->map);
779 1.1 msaitoh tx_buffer->map = NULL;
780 1.1 msaitoh }
781 1.1 msaitoh }
782 1.18 msaitoh if (txr->txr_interq != NULL) {
783 1.18 msaitoh struct mbuf *m;
784 1.18 msaitoh
785 1.18 msaitoh while ((m = pcq_get(txr->txr_interq)) != NULL)
786 1.18 msaitoh m_freem(m);
787 1.18 msaitoh pcq_destroy(txr->txr_interq);
788 1.18 msaitoh }
789 1.1 msaitoh if (txr->tx_buffers != NULL) {
790 1.1 msaitoh free(txr->tx_buffers, M_DEVBUF);
791 1.1 msaitoh txr->tx_buffers = NULL;
792 1.1 msaitoh }
793 1.1 msaitoh if (txr->txtag != NULL) {
794 1.1 msaitoh ixgbe_dma_tag_destroy(txr->txtag);
795 1.1 msaitoh txr->txtag = NULL;
796 1.1 msaitoh }
797 1.28 msaitoh } /* ixgbe_free_transmit_buffers */
798 1.1 msaitoh
799 1.28 msaitoh /************************************************************************
800 1.28 msaitoh * ixgbe_tx_ctx_setup
801 1.1 msaitoh *
802 1.28 msaitoh * Advanced Context Descriptor setup for VLAN, CSUM or TSO
803 1.28 msaitoh ************************************************************************/
804 1.1 msaitoh static int
805 1.1 msaitoh ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp,
806 1.1 msaitoh u32 *cmd_type_len, u32 *olinfo_status)
807 1.1 msaitoh {
808 1.28 msaitoh struct adapter *adapter = txr->adapter;
809 1.1 msaitoh struct ixgbe_adv_tx_context_desc *TXD;
810 1.28 msaitoh struct ether_vlan_header *eh;
811 1.8 msaitoh #ifdef INET
812 1.28 msaitoh struct ip *ip;
813 1.8 msaitoh #endif
814 1.8 msaitoh #ifdef INET6
815 1.28 msaitoh struct ip6_hdr *ip6;
816 1.8 msaitoh #endif
817 1.28 msaitoh int ehdrlen, ip_hlen = 0;
818 1.28 msaitoh int offload = TRUE;
819 1.28 msaitoh int ctxd = txr->next_avail_desc;
820 1.28 msaitoh u32 vlan_macip_lens = 0;
821 1.28 msaitoh u32 type_tucmd_mlhl = 0;
822 1.28 msaitoh u16 vtag = 0;
823 1.28 msaitoh u16 etype;
824 1.28 msaitoh u8 ipproto = 0;
825 1.28 msaitoh char *l3d;
826 1.8 msaitoh
827 1.1 msaitoh
828 1.1 msaitoh /* First check if TSO is to be used */
829 1.28 msaitoh if (mp->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) {
830 1.17 msaitoh int rv = ixgbe_tso_setup(txr, mp, cmd_type_len, olinfo_status);
831 1.17 msaitoh
832 1.21 msaitoh if (rv != 0)
833 1.17 msaitoh ++adapter->tso_err.ev_count;
834 1.21 msaitoh return rv;
835 1.17 msaitoh }
836 1.1 msaitoh
837 1.1 msaitoh if ((mp->m_pkthdr.csum_flags & M_CSUM_OFFLOAD) == 0)
838 1.1 msaitoh offload = FALSE;
839 1.1 msaitoh
840 1.1 msaitoh /* Indicate the whole packet as payload when not doing TSO */
841 1.28 msaitoh *olinfo_status |= mp->m_pkthdr.len << IXGBE_ADVTXD_PAYLEN_SHIFT;
842 1.1 msaitoh
843 1.1 msaitoh /* Now ready a context descriptor */
844 1.28 msaitoh TXD = (struct ixgbe_adv_tx_context_desc *)&txr->tx_base[ctxd];
845 1.1 msaitoh
846 1.1 msaitoh /*
847 1.28 msaitoh * In advanced descriptors the vlan tag must
848 1.28 msaitoh * be placed into the context descriptor. Hence
849 1.28 msaitoh * we need to make one even if not doing offloads.
850 1.28 msaitoh */
851 1.29 knakahar if (vlan_has_tag(mp)) {
852 1.29 knakahar vtag = htole16(vlan_get_tag(mp));
853 1.1 msaitoh vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
854 1.28 msaitoh } else if (!(txr->adapter->feat_en & IXGBE_FEATURE_NEEDS_CTXD) &&
855 1.28 msaitoh (offload == FALSE))
856 1.4 msaitoh return (0);
857 1.1 msaitoh
858 1.1 msaitoh /*
859 1.1 msaitoh * Determine where frame payload starts.
860 1.1 msaitoh * Jump over vlan headers if already present,
861 1.1 msaitoh * helpful for QinQ too.
862 1.1 msaitoh */
863 1.1 msaitoh KASSERT(mp->m_len >= offsetof(struct ether_vlan_header, evl_tag));
864 1.1 msaitoh eh = mtod(mp, struct ether_vlan_header *);
865 1.1 msaitoh if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
866 1.1 msaitoh KASSERT(mp->m_len >= sizeof(struct ether_vlan_header));
867 1.1 msaitoh etype = ntohs(eh->evl_proto);
868 1.1 msaitoh ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
869 1.1 msaitoh } else {
870 1.1 msaitoh etype = ntohs(eh->evl_encap_proto);
871 1.1 msaitoh ehdrlen = ETHER_HDR_LEN;
872 1.1 msaitoh }
873 1.1 msaitoh
874 1.1 msaitoh /* Set the ether header length */
875 1.1 msaitoh vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
876 1.1 msaitoh
877 1.3 msaitoh if (offload == FALSE)
878 1.3 msaitoh goto no_offloads;
879 1.3 msaitoh
880 1.8 msaitoh /*
881 1.28 msaitoh * If the first mbuf only includes the ethernet header,
882 1.28 msaitoh * jump to the next one
883 1.28 msaitoh * XXX: This assumes the stack splits mbufs containing headers
884 1.28 msaitoh * on header boundaries
885 1.8 msaitoh * XXX: And assumes the entire IP header is contained in one mbuf
886 1.8 msaitoh */
887 1.8 msaitoh if (mp->m_len == ehdrlen && mp->m_next)
888 1.8 msaitoh l3d = mtod(mp->m_next, char *);
889 1.8 msaitoh else
890 1.8 msaitoh l3d = mtod(mp, char *) + ehdrlen;
891 1.8 msaitoh
892 1.1 msaitoh switch (etype) {
893 1.9 msaitoh #ifdef INET
894 1.1 msaitoh case ETHERTYPE_IP:
895 1.8 msaitoh ip = (struct ip *)(l3d);
896 1.8 msaitoh ip_hlen = ip->ip_hl << 2;
897 1.8 msaitoh ipproto = ip->ip_p;
898 1.8 msaitoh type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
899 1.1 msaitoh KASSERT((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) == 0 ||
900 1.8 msaitoh ip->ip_sum == 0);
901 1.1 msaitoh break;
902 1.9 msaitoh #endif
903 1.9 msaitoh #ifdef INET6
904 1.1 msaitoh case ETHERTYPE_IPV6:
905 1.8 msaitoh ip6 = (struct ip6_hdr *)(l3d);
906 1.8 msaitoh ip_hlen = sizeof(struct ip6_hdr);
907 1.8 msaitoh ipproto = ip6->ip6_nxt;
908 1.1 msaitoh type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
909 1.1 msaitoh break;
910 1.9 msaitoh #endif
911 1.1 msaitoh default:
912 1.11 msaitoh offload = false;
913 1.1 msaitoh break;
914 1.1 msaitoh }
915 1.1 msaitoh
916 1.1 msaitoh if ((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) != 0)
917 1.1 msaitoh *olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
918 1.1 msaitoh
919 1.1 msaitoh vlan_macip_lens |= ip_hlen;
920 1.1 msaitoh
921 1.8 msaitoh /* No support for offloads for non-L4 next headers */
922 1.8 msaitoh switch (ipproto) {
923 1.36 msaitoh case IPPROTO_TCP:
924 1.36 msaitoh if (mp->m_pkthdr.csum_flags &
925 1.36 msaitoh (M_CSUM_TCPv4 | M_CSUM_TCPv6))
926 1.36 msaitoh type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
927 1.36 msaitoh else
928 1.36 msaitoh offload = false;
929 1.36 msaitoh break;
930 1.36 msaitoh case IPPROTO_UDP:
931 1.36 msaitoh if (mp->m_pkthdr.csum_flags &
932 1.36 msaitoh (M_CSUM_UDPv4 | M_CSUM_UDPv6))
933 1.36 msaitoh type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
934 1.36 msaitoh else
935 1.11 msaitoh offload = false;
936 1.36 msaitoh break;
937 1.36 msaitoh default:
938 1.36 msaitoh offload = false;
939 1.36 msaitoh break;
940 1.8 msaitoh }
941 1.8 msaitoh
942 1.8 msaitoh if (offload) /* Insert L4 checksum into data descriptors */
943 1.1 msaitoh *olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
944 1.1 msaitoh
945 1.3 msaitoh no_offloads:
946 1.3 msaitoh type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
947 1.3 msaitoh
948 1.1 msaitoh /* Now copy bits into descriptor */
949 1.1 msaitoh TXD->vlan_macip_lens = htole32(vlan_macip_lens);
950 1.1 msaitoh TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
951 1.1 msaitoh TXD->seqnum_seed = htole32(0);
952 1.1 msaitoh TXD->mss_l4len_idx = htole32(0);
953 1.1 msaitoh
954 1.1 msaitoh /* We've consumed the first desc, adjust counters */
955 1.1 msaitoh if (++ctxd == txr->num_desc)
956 1.1 msaitoh ctxd = 0;
957 1.1 msaitoh txr->next_avail_desc = ctxd;
958 1.1 msaitoh --txr->tx_avail;
959 1.1 msaitoh
960 1.28 msaitoh return (0);
961 1.28 msaitoh } /* ixgbe_tx_ctx_setup */
962 1.1 msaitoh
963 1.28 msaitoh /************************************************************************
964 1.28 msaitoh * ixgbe_tso_setup
965 1.1 msaitoh *
966 1.28 msaitoh * Setup work for hardware segmentation offload (TSO) on
967 1.28 msaitoh * adapters using advanced tx descriptors
968 1.28 msaitoh ************************************************************************/
969 1.1 msaitoh static int
970 1.28 msaitoh ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *cmd_type_len,
971 1.28 msaitoh u32 *olinfo_status)
972 1.1 msaitoh {
973 1.1 msaitoh struct ixgbe_adv_tx_context_desc *TXD;
974 1.28 msaitoh struct ether_vlan_header *eh;
975 1.1 msaitoh #ifdef INET6
976 1.28 msaitoh struct ip6_hdr *ip6;
977 1.1 msaitoh #endif
978 1.1 msaitoh #ifdef INET
979 1.28 msaitoh struct ip *ip;
980 1.1 msaitoh #endif
981 1.28 msaitoh struct tcphdr *th;
982 1.28 msaitoh int ctxd, ehdrlen, ip_hlen, tcp_hlen;
983 1.28 msaitoh u32 vlan_macip_lens = 0;
984 1.28 msaitoh u32 type_tucmd_mlhl = 0;
985 1.28 msaitoh u32 mss_l4len_idx = 0, paylen;
986 1.28 msaitoh u16 vtag = 0, eh_type;
987 1.1 msaitoh
988 1.1 msaitoh /*
989 1.1 msaitoh * Determine where frame payload starts.
990 1.1 msaitoh * Jump over vlan headers if already present
991 1.1 msaitoh */
992 1.1 msaitoh eh = mtod(mp, struct ether_vlan_header *);
993 1.1 msaitoh if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
994 1.1 msaitoh ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
995 1.1 msaitoh eh_type = eh->evl_proto;
996 1.1 msaitoh } else {
997 1.1 msaitoh ehdrlen = ETHER_HDR_LEN;
998 1.1 msaitoh eh_type = eh->evl_encap_proto;
999 1.1 msaitoh }
1000 1.1 msaitoh
1001 1.1 msaitoh switch (ntohs(eh_type)) {
1002 1.1 msaitoh #ifdef INET
1003 1.1 msaitoh case ETHERTYPE_IP:
1004 1.1 msaitoh ip = (struct ip *)(mp->m_data + ehdrlen);
1005 1.1 msaitoh if (ip->ip_p != IPPROTO_TCP)
1006 1.1 msaitoh return (ENXIO);
1007 1.1 msaitoh ip->ip_sum = 0;
1008 1.1 msaitoh ip_hlen = ip->ip_hl << 2;
1009 1.1 msaitoh th = (struct tcphdr *)((char *)ip + ip_hlen);
1010 1.1 msaitoh th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
1011 1.1 msaitoh ip->ip_dst.s_addr, htons(IPPROTO_TCP));
1012 1.1 msaitoh type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
1013 1.1 msaitoh /* Tell transmit desc to also do IPv4 checksum. */
1014 1.1 msaitoh *olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
1015 1.1 msaitoh break;
1016 1.1 msaitoh #endif
1017 1.28 msaitoh #ifdef INET6
1018 1.28 msaitoh case ETHERTYPE_IPV6:
1019 1.28 msaitoh ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
1020 1.28 msaitoh /* XXX-BZ For now we do not pretend to support ext. hdrs. */
1021 1.28 msaitoh if (ip6->ip6_nxt != IPPROTO_TCP)
1022 1.28 msaitoh return (ENXIO);
1023 1.28 msaitoh ip_hlen = sizeof(struct ip6_hdr);
1024 1.28 msaitoh ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
1025 1.28 msaitoh th = (struct tcphdr *)((char *)ip6 + ip_hlen);
1026 1.28 msaitoh th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
1027 1.28 msaitoh &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
1028 1.28 msaitoh type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
1029 1.28 msaitoh break;
1030 1.28 msaitoh #endif
1031 1.1 msaitoh default:
1032 1.1 msaitoh panic("%s: CSUM_TSO but no supported IP version (0x%04x)",
1033 1.1 msaitoh __func__, ntohs(eh_type));
1034 1.1 msaitoh break;
1035 1.1 msaitoh }
1036 1.1 msaitoh
1037 1.1 msaitoh ctxd = txr->next_avail_desc;
1038 1.28 msaitoh TXD = (struct ixgbe_adv_tx_context_desc *)&txr->tx_base[ctxd];
1039 1.1 msaitoh
1040 1.1 msaitoh tcp_hlen = th->th_off << 2;
1041 1.1 msaitoh
1042 1.1 msaitoh /* This is used in the transmit desc in encap */
1043 1.1 msaitoh paylen = mp->m_pkthdr.len - ehdrlen - ip_hlen - tcp_hlen;
1044 1.1 msaitoh
1045 1.1 msaitoh /* VLAN MACLEN IPLEN */
1046 1.29 knakahar if (vlan_has_tag(mp)) {
1047 1.29 knakahar vtag = htole16(vlan_get_tag(mp));
1048 1.28 msaitoh vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
1049 1.1 msaitoh }
1050 1.1 msaitoh
1051 1.1 msaitoh vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
1052 1.1 msaitoh vlan_macip_lens |= ip_hlen;
1053 1.1 msaitoh TXD->vlan_macip_lens = htole32(vlan_macip_lens);
1054 1.1 msaitoh
1055 1.1 msaitoh /* ADV DTYPE TUCMD */
1056 1.1 msaitoh type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
1057 1.1 msaitoh type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
1058 1.1 msaitoh TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
1059 1.1 msaitoh
1060 1.1 msaitoh /* MSS L4LEN IDX */
1061 1.1 msaitoh mss_l4len_idx |= (mp->m_pkthdr.segsz << IXGBE_ADVTXD_MSS_SHIFT);
1062 1.1 msaitoh mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
1063 1.1 msaitoh TXD->mss_l4len_idx = htole32(mss_l4len_idx);
1064 1.1 msaitoh
1065 1.1 msaitoh TXD->seqnum_seed = htole32(0);
1066 1.1 msaitoh
1067 1.1 msaitoh if (++ctxd == txr->num_desc)
1068 1.1 msaitoh ctxd = 0;
1069 1.1 msaitoh
1070 1.1 msaitoh txr->tx_avail--;
1071 1.1 msaitoh txr->next_avail_desc = ctxd;
1072 1.1 msaitoh *cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
1073 1.1 msaitoh *olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1074 1.1 msaitoh *olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
1075 1.1 msaitoh ++txr->tso_tx.ev_count;
1076 1.28 msaitoh
1077 1.1 msaitoh return (0);
1078 1.28 msaitoh } /* ixgbe_tso_setup */
1079 1.1 msaitoh
1080 1.3 msaitoh
1081 1.28 msaitoh /************************************************************************
1082 1.28 msaitoh * ixgbe_txeof
1083 1.1 msaitoh *
1084 1.28 msaitoh * Examine each tx_buffer in the used queue. If the hardware is done
1085 1.28 msaitoh * processing the packet then free associated resources. The
1086 1.28 msaitoh * tx_buffer is put back on the free queue.
1087 1.28 msaitoh ************************************************************************/
1088 1.32 msaitoh bool
1089 1.1 msaitoh ixgbe_txeof(struct tx_ring *txr)
1090 1.1 msaitoh {
1091 1.1 msaitoh struct adapter *adapter = txr->adapter;
1092 1.1 msaitoh struct ifnet *ifp = adapter->ifp;
1093 1.28 msaitoh struct ixgbe_tx_buf *buf;
1094 1.28 msaitoh union ixgbe_adv_tx_desc *txd;
1095 1.1 msaitoh u32 work, processed = 0;
1096 1.7 msaitoh u32 limit = adapter->tx_process_limit;
1097 1.1 msaitoh
1098 1.1 msaitoh KASSERT(mutex_owned(&txr->tx_mtx));
1099 1.1 msaitoh
1100 1.1 msaitoh #ifdef DEV_NETMAP
1101 1.28 msaitoh if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
1102 1.28 msaitoh (adapter->ifp->if_capenable & IFCAP_NETMAP)) {
1103 1.28 msaitoh struct netmap_adapter *na = NA(adapter->ifp);
1104 1.1 msaitoh struct netmap_kring *kring = &na->tx_rings[txr->me];
1105 1.1 msaitoh txd = txr->tx_base;
1106 1.1 msaitoh bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1107 1.1 msaitoh BUS_DMASYNC_POSTREAD);
1108 1.1 msaitoh /*
1109 1.1 msaitoh * In netmap mode, all the work is done in the context
1110 1.1 msaitoh * of the client thread. Interrupt handlers only wake up
1111 1.1 msaitoh * clients, which may be sleeping on individual rings
1112 1.1 msaitoh * or on a global resource for all rings.
1113 1.1 msaitoh * To implement tx interrupt mitigation, we wake up the client
1114 1.1 msaitoh * thread roughly every half ring, even if the NIC interrupts
1115 1.1 msaitoh * more frequently. This is implemented as follows:
1116 1.1 msaitoh * - ixgbe_txsync() sets kring->nr_kflags with the index of
1117 1.1 msaitoh * the slot that should wake up the thread (nkr_num_slots
1118 1.1 msaitoh * means the user thread should not be woken up);
1119 1.1 msaitoh * - the driver ignores tx interrupts unless netmap_mitigate=0
1120 1.1 msaitoh * or the slot has the DD bit set.
1121 1.1 msaitoh */
1122 1.1 msaitoh if (!netmap_mitigate ||
1123 1.1 msaitoh (kring->nr_kflags < kring->nkr_num_slots &&
1124 1.28 msaitoh txd[kring->nr_kflags].wb.status & IXGBE_TXD_STAT_DD)) {
1125 1.1 msaitoh netmap_tx_irq(ifp, txr->me);
1126 1.1 msaitoh }
1127 1.32 msaitoh return false;
1128 1.1 msaitoh }
1129 1.1 msaitoh #endif /* DEV_NETMAP */
1130 1.1 msaitoh
1131 1.1 msaitoh if (txr->tx_avail == txr->num_desc) {
1132 1.45 msaitoh txr->busy = 0;
1133 1.32 msaitoh return false;
1134 1.1 msaitoh }
1135 1.1 msaitoh
1136 1.1 msaitoh /* Get work starting point */
1137 1.1 msaitoh work = txr->next_to_clean;
1138 1.1 msaitoh buf = &txr->tx_buffers[work];
1139 1.1 msaitoh txd = &txr->tx_base[work];
1140 1.1 msaitoh work -= txr->num_desc; /* The distance to ring end */
1141 1.28 msaitoh ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1142 1.1 msaitoh BUS_DMASYNC_POSTREAD);
1143 1.8 msaitoh
1144 1.1 msaitoh do {
1145 1.8 msaitoh union ixgbe_adv_tx_desc *eop = buf->eop;
1146 1.1 msaitoh if (eop == NULL) /* No work */
1147 1.1 msaitoh break;
1148 1.1 msaitoh
1149 1.1 msaitoh if ((eop->wb.status & IXGBE_TXD_STAT_DD) == 0)
1150 1.1 msaitoh break; /* I/O not complete */
1151 1.1 msaitoh
1152 1.1 msaitoh if (buf->m_head) {
1153 1.28 msaitoh txr->bytes += buf->m_head->m_pkthdr.len;
1154 1.28 msaitoh bus_dmamap_sync(txr->txtag->dt_dmat, buf->map,
1155 1.1 msaitoh 0, buf->m_head->m_pkthdr.len,
1156 1.1 msaitoh BUS_DMASYNC_POSTWRITE);
1157 1.28 msaitoh ixgbe_dmamap_unload(txr->txtag, buf->map);
1158 1.1 msaitoh m_freem(buf->m_head);
1159 1.1 msaitoh buf->m_head = NULL;
1160 1.1 msaitoh }
1161 1.1 msaitoh buf->eop = NULL;
1162 1.1 msaitoh ++txr->tx_avail;
1163 1.1 msaitoh
1164 1.1 msaitoh /* We clean the range if multi segment */
1165 1.1 msaitoh while (txd != eop) {
1166 1.1 msaitoh ++txd;
1167 1.1 msaitoh ++buf;
1168 1.1 msaitoh ++work;
1169 1.1 msaitoh /* wrap the ring? */
1170 1.1 msaitoh if (__predict_false(!work)) {
1171 1.1 msaitoh work -= txr->num_desc;
1172 1.1 msaitoh buf = txr->tx_buffers;
1173 1.1 msaitoh txd = txr->tx_base;
1174 1.1 msaitoh }
1175 1.1 msaitoh if (buf->m_head) {
1176 1.1 msaitoh txr->bytes +=
1177 1.1 msaitoh buf->m_head->m_pkthdr.len;
1178 1.1 msaitoh bus_dmamap_sync(txr->txtag->dt_dmat,
1179 1.1 msaitoh buf->map,
1180 1.1 msaitoh 0, buf->m_head->m_pkthdr.len,
1181 1.1 msaitoh BUS_DMASYNC_POSTWRITE);
1182 1.1 msaitoh ixgbe_dmamap_unload(txr->txtag,
1183 1.1 msaitoh buf->map);
1184 1.1 msaitoh m_freem(buf->m_head);
1185 1.1 msaitoh buf->m_head = NULL;
1186 1.1 msaitoh }
1187 1.1 msaitoh ++txr->tx_avail;
1188 1.1 msaitoh buf->eop = NULL;
1189 1.1 msaitoh
1190 1.1 msaitoh }
1191 1.1 msaitoh ++txr->packets;
1192 1.1 msaitoh ++processed;
1193 1.1 msaitoh ++ifp->if_opackets;
1194 1.1 msaitoh
1195 1.1 msaitoh /* Try the next packet */
1196 1.1 msaitoh ++txd;
1197 1.1 msaitoh ++buf;
1198 1.1 msaitoh ++work;
1199 1.1 msaitoh /* reset with a wrap */
1200 1.1 msaitoh if (__predict_false(!work)) {
1201 1.1 msaitoh work -= txr->num_desc;
1202 1.1 msaitoh buf = txr->tx_buffers;
1203 1.1 msaitoh txd = txr->tx_base;
1204 1.1 msaitoh }
1205 1.1 msaitoh prefetch(txd);
1206 1.1 msaitoh } while (__predict_true(--limit));
1207 1.1 msaitoh
1208 1.1 msaitoh ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1209 1.1 msaitoh BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1210 1.1 msaitoh
1211 1.1 msaitoh work += txr->num_desc;
1212 1.1 msaitoh txr->next_to_clean = work;
1213 1.1 msaitoh
1214 1.45 msaitoh /*
1215 1.45 msaitoh * Queue Hang detection, we know there's
1216 1.45 msaitoh * work outstanding or the first return
1217 1.45 msaitoh * would have been taken, so increment busy
1218 1.45 msaitoh * if nothing managed to get cleaned, then
1219 1.45 msaitoh * in local_timer it will be checked and
1220 1.45 msaitoh * marked as HUNG if it exceeds a MAX attempt.
1221 1.45 msaitoh */
1222 1.45 msaitoh if ((processed == 0) && (txr->busy != IXGBE_QUEUE_HUNG))
1223 1.45 msaitoh ++txr->busy;
1224 1.45 msaitoh /*
1225 1.45 msaitoh * If anything gets cleaned we reset state to 1,
1226 1.45 msaitoh * note this will turn off HUNG if its set.
1227 1.45 msaitoh */
1228 1.45 msaitoh if (processed)
1229 1.45 msaitoh txr->busy = 1;
1230 1.45 msaitoh
1231 1.43 msaitoh if (txr->tx_avail == txr->num_desc)
1232 1.45 msaitoh txr->busy = 0;
1233 1.43 msaitoh
1234 1.32 msaitoh return ((limit > 0) ? false : true);
1235 1.28 msaitoh } /* ixgbe_txeof */
1236 1.1 msaitoh
1237 1.28 msaitoh /************************************************************************
1238 1.28 msaitoh * ixgbe_rsc_count
1239 1.28 msaitoh *
1240 1.28 msaitoh * Used to detect a descriptor that has been merged by Hardware RSC.
1241 1.28 msaitoh ************************************************************************/
1242 1.1 msaitoh static inline u32
1243 1.1 msaitoh ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
1244 1.1 msaitoh {
1245 1.1 msaitoh return (le32toh(rx->wb.lower.lo_dword.data) &
1246 1.1 msaitoh IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
1247 1.28 msaitoh } /* ixgbe_rsc_count */
1248 1.1 msaitoh
1249 1.28 msaitoh /************************************************************************
1250 1.28 msaitoh * ixgbe_setup_hw_rsc
1251 1.1 msaitoh *
1252 1.28 msaitoh * Initialize Hardware RSC (LRO) feature on 82599
1253 1.28 msaitoh * for an RX ring, this is toggled by the LRO capability
1254 1.28 msaitoh * even though it is transparent to the stack.
1255 1.28 msaitoh *
1256 1.28 msaitoh * NOTE: Since this HW feature only works with IPv4 and
1257 1.28 msaitoh * testing has shown soft LRO to be as effective,
1258 1.28 msaitoh * this feature will be disabled by default.
1259 1.28 msaitoh ************************************************************************/
1260 1.1 msaitoh static void
1261 1.1 msaitoh ixgbe_setup_hw_rsc(struct rx_ring *rxr)
1262 1.1 msaitoh {
1263 1.28 msaitoh struct adapter *adapter = rxr->adapter;
1264 1.28 msaitoh struct ixgbe_hw *hw = &adapter->hw;
1265 1.28 msaitoh u32 rscctrl, rdrxctl;
1266 1.1 msaitoh
1267 1.1 msaitoh /* If turning LRO/RSC off we need to disable it */
1268 1.1 msaitoh if ((adapter->ifp->if_capenable & IFCAP_LRO) == 0) {
1269 1.1 msaitoh rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
1270 1.1 msaitoh rscctrl &= ~IXGBE_RSCCTL_RSCEN;
1271 1.1 msaitoh return;
1272 1.1 msaitoh }
1273 1.1 msaitoh
1274 1.1 msaitoh rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1275 1.1 msaitoh rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
1276 1.28 msaitoh #ifdef DEV_NETMAP
1277 1.28 msaitoh /* Always strip CRC unless Netmap disabled it */
1278 1.28 msaitoh if (!(adapter->feat_en & IXGBE_FEATURE_NETMAP) ||
1279 1.28 msaitoh !(adapter->ifp->if_capenable & IFCAP_NETMAP) ||
1280 1.28 msaitoh ix_crcstrip)
1281 1.1 msaitoh #endif /* DEV_NETMAP */
1282 1.28 msaitoh rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
1283 1.1 msaitoh rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
1284 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
1285 1.1 msaitoh
1286 1.1 msaitoh rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
1287 1.1 msaitoh rscctrl |= IXGBE_RSCCTL_RSCEN;
1288 1.1 msaitoh /*
1289 1.28 msaitoh * Limit the total number of descriptors that
1290 1.28 msaitoh * can be combined, so it does not exceed 64K
1291 1.28 msaitoh */
1292 1.1 msaitoh if (rxr->mbuf_sz == MCLBYTES)
1293 1.1 msaitoh rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
1294 1.1 msaitoh else if (rxr->mbuf_sz == MJUMPAGESIZE)
1295 1.1 msaitoh rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
1296 1.1 msaitoh else if (rxr->mbuf_sz == MJUM9BYTES)
1297 1.1 msaitoh rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
1298 1.1 msaitoh else /* Using 16K cluster */
1299 1.1 msaitoh rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
1300 1.1 msaitoh
1301 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxr->me), rscctrl);
1302 1.1 msaitoh
1303 1.1 msaitoh /* Enable TCP header recognition */
1304 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0),
1305 1.28 msaitoh (IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)) | IXGBE_PSRTYPE_TCPHDR));
1306 1.1 msaitoh
1307 1.1 msaitoh /* Disable RSC for ACK packets */
1308 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
1309 1.1 msaitoh (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
1310 1.1 msaitoh
1311 1.1 msaitoh rxr->hw_rsc = TRUE;
1312 1.28 msaitoh } /* ixgbe_setup_hw_rsc */
1313 1.8 msaitoh
1314 1.28 msaitoh /************************************************************************
1315 1.28 msaitoh * ixgbe_refresh_mbufs
1316 1.1 msaitoh *
1317 1.28 msaitoh * Refresh mbuf buffers for RX descriptor rings
1318 1.28 msaitoh * - now keeps its own state so discards due to resource
1319 1.28 msaitoh * exhaustion are unnecessary, if an mbuf cannot be obtained
1320 1.28 msaitoh * it just returns, keeping its placeholder, thus it can simply
1321 1.28 msaitoh * be recalled to try again.
1322 1.28 msaitoh ************************************************************************/
1323 1.1 msaitoh static void
1324 1.1 msaitoh ixgbe_refresh_mbufs(struct rx_ring *rxr, int limit)
1325 1.1 msaitoh {
1326 1.28 msaitoh struct adapter *adapter = rxr->adapter;
1327 1.28 msaitoh struct ixgbe_rx_buf *rxbuf;
1328 1.28 msaitoh struct mbuf *mp;
1329 1.28 msaitoh int i, j, error;
1330 1.28 msaitoh bool refreshed = false;
1331 1.1 msaitoh
1332 1.1 msaitoh i = j = rxr->next_to_refresh;
1333 1.1 msaitoh /* Control the loop with one beyond */
1334 1.1 msaitoh if (++j == rxr->num_desc)
1335 1.1 msaitoh j = 0;
1336 1.1 msaitoh
1337 1.1 msaitoh while (j != limit) {
1338 1.1 msaitoh rxbuf = &rxr->rx_buffers[i];
1339 1.1 msaitoh if (rxbuf->buf == NULL) {
1340 1.1 msaitoh mp = ixgbe_getjcl(&adapter->jcl_head, M_NOWAIT,
1341 1.1 msaitoh MT_DATA, M_PKTHDR, rxr->mbuf_sz);
1342 1.1 msaitoh if (mp == NULL) {
1343 1.1 msaitoh rxr->no_jmbuf.ev_count++;
1344 1.1 msaitoh goto update;
1345 1.1 msaitoh }
1346 1.1 msaitoh if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
1347 1.1 msaitoh m_adj(mp, ETHER_ALIGN);
1348 1.1 msaitoh } else
1349 1.1 msaitoh mp = rxbuf->buf;
1350 1.1 msaitoh
1351 1.1 msaitoh mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
1352 1.1 msaitoh
1353 1.1 msaitoh /* If we're dealing with an mbuf that was copied rather
1354 1.1 msaitoh * than replaced, there's no need to go through busdma.
1355 1.1 msaitoh */
1356 1.1 msaitoh if ((rxbuf->flags & IXGBE_RX_COPY) == 0) {
1357 1.1 msaitoh /* Get the memory mapping */
1358 1.4 msaitoh ixgbe_dmamap_unload(rxr->ptag, rxbuf->pmap);
1359 1.1 msaitoh error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat,
1360 1.1 msaitoh rxbuf->pmap, mp, BUS_DMA_NOWAIT);
1361 1.1 msaitoh if (error != 0) {
1362 1.28 msaitoh printf("Refresh mbufs: payload dmamap load failure - %d\n", error);
1363 1.1 msaitoh m_free(mp);
1364 1.1 msaitoh rxbuf->buf = NULL;
1365 1.1 msaitoh goto update;
1366 1.1 msaitoh }
1367 1.1 msaitoh rxbuf->buf = mp;
1368 1.1 msaitoh bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
1369 1.1 msaitoh 0, mp->m_pkthdr.len, BUS_DMASYNC_PREREAD);
1370 1.1 msaitoh rxbuf->addr = rxr->rx_base[i].read.pkt_addr =
1371 1.1 msaitoh htole64(rxbuf->pmap->dm_segs[0].ds_addr);
1372 1.1 msaitoh } else {
1373 1.1 msaitoh rxr->rx_base[i].read.pkt_addr = rxbuf->addr;
1374 1.1 msaitoh rxbuf->flags &= ~IXGBE_RX_COPY;
1375 1.1 msaitoh }
1376 1.1 msaitoh
1377 1.1 msaitoh refreshed = true;
1378 1.1 msaitoh /* Next is precalculated */
1379 1.1 msaitoh i = j;
1380 1.1 msaitoh rxr->next_to_refresh = i;
1381 1.1 msaitoh if (++j == rxr->num_desc)
1382 1.1 msaitoh j = 0;
1383 1.1 msaitoh }
1384 1.28 msaitoh
1385 1.1 msaitoh update:
1386 1.1 msaitoh if (refreshed) /* Update hardware tail index */
1387 1.28 msaitoh IXGBE_WRITE_REG(&adapter->hw, rxr->tail, rxr->next_to_refresh);
1388 1.28 msaitoh
1389 1.1 msaitoh return;
1390 1.28 msaitoh } /* ixgbe_refresh_mbufs */
1391 1.1 msaitoh
1392 1.28 msaitoh /************************************************************************
1393 1.28 msaitoh * ixgbe_allocate_receive_buffers
1394 1.1 msaitoh *
1395 1.28 msaitoh * Allocate memory for rx_buffer structures. Since we use one
1396 1.28 msaitoh * rx_buffer per received packet, the maximum number of rx_buffer's
1397 1.28 msaitoh * that we'll need is equal to the number of receive descriptors
1398 1.28 msaitoh * that we've allocated.
1399 1.28 msaitoh ************************************************************************/
1400 1.28 msaitoh static int
1401 1.1 msaitoh ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
1402 1.1 msaitoh {
1403 1.28 msaitoh struct adapter *adapter = rxr->adapter;
1404 1.28 msaitoh device_t dev = adapter->dev;
1405 1.28 msaitoh struct ixgbe_rx_buf *rxbuf;
1406 1.28 msaitoh int bsize, error;
1407 1.1 msaitoh
1408 1.1 msaitoh bsize = sizeof(struct ixgbe_rx_buf) * rxr->num_desc;
1409 1.28 msaitoh rxr->rx_buffers = (struct ixgbe_rx_buf *)malloc(bsize, M_DEVBUF,
1410 1.28 msaitoh M_NOWAIT | M_ZERO);
1411 1.28 msaitoh if (rxr->rx_buffers == NULL) {
1412 1.1 msaitoh aprint_error_dev(dev, "Unable to allocate rx_buffer memory\n");
1413 1.1 msaitoh error = ENOMEM;
1414 1.1 msaitoh goto fail;
1415 1.1 msaitoh }
1416 1.1 msaitoh
1417 1.28 msaitoh error = ixgbe_dma_tag_create(
1418 1.28 msaitoh /* parent */ adapter->osdep.dmat,
1419 1.28 msaitoh /* alignment */ 1,
1420 1.28 msaitoh /* bounds */ 0,
1421 1.28 msaitoh /* maxsize */ MJUM16BYTES,
1422 1.28 msaitoh /* nsegments */ 1,
1423 1.28 msaitoh /* maxsegsize */ MJUM16BYTES,
1424 1.28 msaitoh /* flags */ 0,
1425 1.28 msaitoh &rxr->ptag);
1426 1.28 msaitoh if (error != 0) {
1427 1.1 msaitoh aprint_error_dev(dev, "Unable to create RX DMA tag\n");
1428 1.1 msaitoh goto fail;
1429 1.1 msaitoh }
1430 1.1 msaitoh
1431 1.5 msaitoh for (int i = 0; i < rxr->num_desc; i++, rxbuf++) {
1432 1.1 msaitoh rxbuf = &rxr->rx_buffers[i];
1433 1.4 msaitoh error = ixgbe_dmamap_create(rxr->ptag, 0, &rxbuf->pmap);
1434 1.1 msaitoh if (error) {
1435 1.1 msaitoh aprint_error_dev(dev, "Unable to create RX dma map\n");
1436 1.1 msaitoh goto fail;
1437 1.1 msaitoh }
1438 1.1 msaitoh }
1439 1.1 msaitoh
1440 1.1 msaitoh return (0);
1441 1.1 msaitoh
1442 1.1 msaitoh fail:
1443 1.1 msaitoh /* Frees all, but can handle partial completion */
1444 1.1 msaitoh ixgbe_free_receive_structures(adapter);
1445 1.28 msaitoh
1446 1.1 msaitoh return (error);
1447 1.28 msaitoh } /* ixgbe_allocate_receive_buffers */
1448 1.1 msaitoh
1449 1.28 msaitoh /************************************************************************
1450 1.30 msaitoh * ixgbe_free_receive_ring
1451 1.28 msaitoh ************************************************************************/
1452 1.28 msaitoh static void
1453 1.1 msaitoh ixgbe_free_receive_ring(struct rx_ring *rxr)
1454 1.27 msaitoh {
1455 1.5 msaitoh for (int i = 0; i < rxr->num_desc; i++) {
1456 1.27 msaitoh ixgbe_rx_discard(rxr, i);
1457 1.1 msaitoh }
1458 1.28 msaitoh } /* ixgbe_free_receive_ring */
1459 1.1 msaitoh
1460 1.28 msaitoh /************************************************************************
1461 1.28 msaitoh * ixgbe_setup_receive_ring
1462 1.1 msaitoh *
1463 1.28 msaitoh * Initialize a receive ring and its buffers.
1464 1.28 msaitoh ************************************************************************/
1465 1.1 msaitoh static int
1466 1.1 msaitoh ixgbe_setup_receive_ring(struct rx_ring *rxr)
1467 1.1 msaitoh {
1468 1.28 msaitoh struct adapter *adapter;
1469 1.28 msaitoh struct ixgbe_rx_buf *rxbuf;
1470 1.1 msaitoh #ifdef LRO
1471 1.28 msaitoh struct ifnet *ifp;
1472 1.28 msaitoh struct lro_ctrl *lro = &rxr->lro;
1473 1.1 msaitoh #endif /* LRO */
1474 1.1 msaitoh #ifdef DEV_NETMAP
1475 1.1 msaitoh struct netmap_adapter *na = NA(rxr->adapter->ifp);
1476 1.28 msaitoh struct netmap_slot *slot;
1477 1.1 msaitoh #endif /* DEV_NETMAP */
1478 1.28 msaitoh int rsize, error = 0;
1479 1.1 msaitoh
1480 1.1 msaitoh adapter = rxr->adapter;
1481 1.1 msaitoh #ifdef LRO
1482 1.1 msaitoh ifp = adapter->ifp;
1483 1.1 msaitoh #endif /* LRO */
1484 1.1 msaitoh
1485 1.1 msaitoh /* Clear the ring contents */
1486 1.1 msaitoh IXGBE_RX_LOCK(rxr);
1487 1.28 msaitoh
1488 1.1 msaitoh #ifdef DEV_NETMAP
1489 1.28 msaitoh if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
1490 1.28 msaitoh slot = netmap_reset(na, NR_RX, rxr->me, 0);
1491 1.1 msaitoh #endif /* DEV_NETMAP */
1492 1.28 msaitoh
1493 1.1 msaitoh rsize = roundup2(adapter->num_rx_desc *
1494 1.1 msaitoh sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
1495 1.1 msaitoh bzero((void *)rxr->rx_base, rsize);
1496 1.1 msaitoh /* Cache the size */
1497 1.1 msaitoh rxr->mbuf_sz = adapter->rx_mbuf_sz;
1498 1.1 msaitoh
1499 1.1 msaitoh /* Free current RX buffer structs and their mbufs */
1500 1.1 msaitoh ixgbe_free_receive_ring(rxr);
1501 1.1 msaitoh
1502 1.1 msaitoh /* Now replenish the mbufs */
1503 1.1 msaitoh for (int j = 0; j != rxr->num_desc; ++j) {
1504 1.28 msaitoh struct mbuf *mp;
1505 1.1 msaitoh
1506 1.1 msaitoh rxbuf = &rxr->rx_buffers[j];
1507 1.28 msaitoh
1508 1.1 msaitoh #ifdef DEV_NETMAP
1509 1.1 msaitoh /*
1510 1.1 msaitoh * In netmap mode, fill the map and set the buffer
1511 1.1 msaitoh * address in the NIC ring, considering the offset
1512 1.1 msaitoh * between the netmap and NIC rings (see comment in
1513 1.1 msaitoh * ixgbe_setup_transmit_ring() ). No need to allocate
1514 1.1 msaitoh * an mbuf, so end the block with a continue;
1515 1.1 msaitoh */
1516 1.28 msaitoh if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && slot) {
1517 1.1 msaitoh int sj = netmap_idx_n2k(&na->rx_rings[rxr->me], j);
1518 1.1 msaitoh uint64_t paddr;
1519 1.1 msaitoh void *addr;
1520 1.1 msaitoh
1521 1.1 msaitoh addr = PNMB(na, slot + sj, &paddr);
1522 1.1 msaitoh netmap_load_map(na, rxr->ptag, rxbuf->pmap, addr);
1523 1.1 msaitoh /* Update descriptor and the cached value */
1524 1.1 msaitoh rxr->rx_base[j].read.pkt_addr = htole64(paddr);
1525 1.1 msaitoh rxbuf->addr = htole64(paddr);
1526 1.1 msaitoh continue;
1527 1.1 msaitoh }
1528 1.1 msaitoh #endif /* DEV_NETMAP */
1529 1.28 msaitoh
1530 1.28 msaitoh rxbuf->flags = 0;
1531 1.1 msaitoh rxbuf->buf = ixgbe_getjcl(&adapter->jcl_head, M_NOWAIT,
1532 1.1 msaitoh MT_DATA, M_PKTHDR, adapter->rx_mbuf_sz);
1533 1.1 msaitoh if (rxbuf->buf == NULL) {
1534 1.1 msaitoh error = ENOBUFS;
1535 1.28 msaitoh goto fail;
1536 1.1 msaitoh }
1537 1.1 msaitoh mp = rxbuf->buf;
1538 1.1 msaitoh mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
1539 1.1 msaitoh /* Get the memory mapping */
1540 1.28 msaitoh error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat, rxbuf->pmap,
1541 1.28 msaitoh mp, BUS_DMA_NOWAIT);
1542 1.1 msaitoh if (error != 0)
1543 1.1 msaitoh goto fail;
1544 1.1 msaitoh bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
1545 1.1 msaitoh 0, adapter->rx_mbuf_sz, BUS_DMASYNC_PREREAD);
1546 1.1 msaitoh /* Update the descriptor and the cached value */
1547 1.1 msaitoh rxr->rx_base[j].read.pkt_addr =
1548 1.1 msaitoh htole64(rxbuf->pmap->dm_segs[0].ds_addr);
1549 1.1 msaitoh rxbuf->addr = htole64(rxbuf->pmap->dm_segs[0].ds_addr);
1550 1.1 msaitoh }
1551 1.1 msaitoh
1552 1.1 msaitoh
1553 1.1 msaitoh /* Setup our descriptor indices */
1554 1.1 msaitoh rxr->next_to_check = 0;
1555 1.1 msaitoh rxr->next_to_refresh = 0;
1556 1.1 msaitoh rxr->lro_enabled = FALSE;
1557 1.1 msaitoh rxr->rx_copies.ev_count = 0;
1558 1.13 msaitoh #if 0 /* NetBSD */
1559 1.1 msaitoh rxr->rx_bytes.ev_count = 0;
1560 1.13 msaitoh #if 1 /* Fix inconsistency */
1561 1.13 msaitoh rxr->rx_packets.ev_count = 0;
1562 1.13 msaitoh #endif
1563 1.13 msaitoh #endif
1564 1.1 msaitoh rxr->vtag_strip = FALSE;
1565 1.1 msaitoh
1566 1.1 msaitoh ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
1567 1.1 msaitoh BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1568 1.1 msaitoh
1569 1.1 msaitoh /*
1570 1.28 msaitoh * Now set up the LRO interface
1571 1.28 msaitoh */
1572 1.1 msaitoh if (ixgbe_rsc_enable)
1573 1.1 msaitoh ixgbe_setup_hw_rsc(rxr);
1574 1.1 msaitoh #ifdef LRO
1575 1.1 msaitoh else if (ifp->if_capenable & IFCAP_LRO) {
1576 1.1 msaitoh device_t dev = adapter->dev;
1577 1.1 msaitoh int err = tcp_lro_init(lro);
1578 1.1 msaitoh if (err) {
1579 1.1 msaitoh device_printf(dev, "LRO Initialization failed!\n");
1580 1.1 msaitoh goto fail;
1581 1.1 msaitoh }
1582 1.1 msaitoh INIT_DEBUGOUT("RX Soft LRO Initialized\n");
1583 1.1 msaitoh rxr->lro_enabled = TRUE;
1584 1.1 msaitoh lro->ifp = adapter->ifp;
1585 1.1 msaitoh }
1586 1.1 msaitoh #endif /* LRO */
1587 1.1 msaitoh
1588 1.1 msaitoh IXGBE_RX_UNLOCK(rxr);
1589 1.28 msaitoh
1590 1.1 msaitoh return (0);
1591 1.1 msaitoh
1592 1.1 msaitoh fail:
1593 1.1 msaitoh ixgbe_free_receive_ring(rxr);
1594 1.1 msaitoh IXGBE_RX_UNLOCK(rxr);
1595 1.28 msaitoh
1596 1.1 msaitoh return (error);
1597 1.28 msaitoh } /* ixgbe_setup_receive_ring */
1598 1.1 msaitoh
1599 1.28 msaitoh /************************************************************************
1600 1.28 msaitoh * ixgbe_setup_receive_structures - Initialize all receive rings.
1601 1.28 msaitoh ************************************************************************/
1602 1.1 msaitoh int
1603 1.1 msaitoh ixgbe_setup_receive_structures(struct adapter *adapter)
1604 1.1 msaitoh {
1605 1.1 msaitoh struct rx_ring *rxr = adapter->rx_rings;
1606 1.28 msaitoh int j;
1607 1.1 msaitoh
1608 1.30 msaitoh /*
1609 1.30 msaitoh * Now reinitialize our supply of jumbo mbufs. The number
1610 1.30 msaitoh * or size of jumbo mbufs may have changed.
1611 1.30 msaitoh * Assume all of rxr->ptag are the same.
1612 1.30 msaitoh */
1613 1.41 msaitoh ixgbe_jcl_reinit(adapter, rxr->ptag->dt_dmat,
1614 1.30 msaitoh (2 * adapter->num_rx_desc) * adapter->num_queues,
1615 1.30 msaitoh adapter->rx_mbuf_sz);
1616 1.30 msaitoh
1617 1.1 msaitoh for (j = 0; j < adapter->num_queues; j++, rxr++)
1618 1.1 msaitoh if (ixgbe_setup_receive_ring(rxr))
1619 1.1 msaitoh goto fail;
1620 1.1 msaitoh
1621 1.1 msaitoh return (0);
1622 1.1 msaitoh fail:
1623 1.1 msaitoh /*
1624 1.1 msaitoh * Free RX buffers allocated so far, we will only handle
1625 1.1 msaitoh * the rings that completed, the failing case will have
1626 1.1 msaitoh * cleaned up for itself. 'j' failed, so its the terminus.
1627 1.1 msaitoh */
1628 1.1 msaitoh for (int i = 0; i < j; ++i) {
1629 1.1 msaitoh rxr = &adapter->rx_rings[i];
1630 1.27 msaitoh IXGBE_RX_LOCK(rxr);
1631 1.1 msaitoh ixgbe_free_receive_ring(rxr);
1632 1.27 msaitoh IXGBE_RX_UNLOCK(rxr);
1633 1.1 msaitoh }
1634 1.1 msaitoh
1635 1.1 msaitoh return (ENOBUFS);
1636 1.28 msaitoh } /* ixgbe_setup_receive_structures */
1637 1.1 msaitoh
1638 1.3 msaitoh
1639 1.28 msaitoh /************************************************************************
1640 1.28 msaitoh * ixgbe_free_receive_structures - Free all receive rings.
1641 1.28 msaitoh ************************************************************************/
1642 1.1 msaitoh void
1643 1.1 msaitoh ixgbe_free_receive_structures(struct adapter *adapter)
1644 1.1 msaitoh {
1645 1.1 msaitoh struct rx_ring *rxr = adapter->rx_rings;
1646 1.1 msaitoh
1647 1.1 msaitoh INIT_DEBUGOUT("ixgbe_free_receive_structures: begin");
1648 1.1 msaitoh
1649 1.1 msaitoh for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1650 1.1 msaitoh ixgbe_free_receive_buffers(rxr);
1651 1.1 msaitoh #ifdef LRO
1652 1.1 msaitoh /* Free LRO memory */
1653 1.28 msaitoh tcp_lro_free(&rxr->lro);
1654 1.1 msaitoh #endif /* LRO */
1655 1.1 msaitoh /* Free the ring memory as well */
1656 1.1 msaitoh ixgbe_dma_free(adapter, &rxr->rxdma);
1657 1.1 msaitoh IXGBE_RX_LOCK_DESTROY(rxr);
1658 1.1 msaitoh }
1659 1.1 msaitoh
1660 1.1 msaitoh free(adapter->rx_rings, M_DEVBUF);
1661 1.28 msaitoh } /* ixgbe_free_receive_structures */
1662 1.1 msaitoh
1663 1.1 msaitoh
1664 1.28 msaitoh /************************************************************************
1665 1.28 msaitoh * ixgbe_free_receive_buffers - Free receive ring data structures
1666 1.28 msaitoh ************************************************************************/
1667 1.1 msaitoh static void
1668 1.1 msaitoh ixgbe_free_receive_buffers(struct rx_ring *rxr)
1669 1.1 msaitoh {
1670 1.28 msaitoh struct adapter *adapter = rxr->adapter;
1671 1.28 msaitoh struct ixgbe_rx_buf *rxbuf;
1672 1.1 msaitoh
1673 1.1 msaitoh INIT_DEBUGOUT("ixgbe_free_receive_buffers: begin");
1674 1.1 msaitoh
1675 1.1 msaitoh /* Cleanup any existing buffers */
1676 1.1 msaitoh if (rxr->rx_buffers != NULL) {
1677 1.1 msaitoh for (int i = 0; i < adapter->num_rx_desc; i++) {
1678 1.1 msaitoh rxbuf = &rxr->rx_buffers[i];
1679 1.27 msaitoh ixgbe_rx_discard(rxr, i);
1680 1.1 msaitoh if (rxbuf->pmap != NULL) {
1681 1.1 msaitoh ixgbe_dmamap_destroy(rxr->ptag, rxbuf->pmap);
1682 1.1 msaitoh rxbuf->pmap = NULL;
1683 1.1 msaitoh }
1684 1.1 msaitoh }
1685 1.1 msaitoh if (rxr->rx_buffers != NULL) {
1686 1.1 msaitoh free(rxr->rx_buffers, M_DEVBUF);
1687 1.1 msaitoh rxr->rx_buffers = NULL;
1688 1.1 msaitoh }
1689 1.1 msaitoh }
1690 1.1 msaitoh
1691 1.1 msaitoh if (rxr->ptag != NULL) {
1692 1.1 msaitoh ixgbe_dma_tag_destroy(rxr->ptag);
1693 1.1 msaitoh rxr->ptag = NULL;
1694 1.1 msaitoh }
1695 1.1 msaitoh
1696 1.1 msaitoh return;
1697 1.28 msaitoh } /* ixgbe_free_receive_buffers */
1698 1.1 msaitoh
1699 1.28 msaitoh /************************************************************************
1700 1.28 msaitoh * ixgbe_rx_input
1701 1.28 msaitoh ************************************************************************/
1702 1.1 msaitoh static __inline void
1703 1.28 msaitoh ixgbe_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m,
1704 1.28 msaitoh u32 ptype)
1705 1.1 msaitoh {
1706 1.20 msaitoh struct adapter *adapter = ifp->if_softc;
1707 1.1 msaitoh
1708 1.1 msaitoh #ifdef LRO
1709 1.1 msaitoh struct ethercom *ec = &adapter->osdep.ec;
1710 1.1 msaitoh
1711 1.28 msaitoh /*
1712 1.28 msaitoh * ATM LRO is only for IP/TCP packets and TCP checksum of the packet
1713 1.28 msaitoh * should be computed by hardware. Also it should not have VLAN tag in
1714 1.28 msaitoh * ethernet header. In case of IPv6 we do not yet support ext. hdrs.
1715 1.28 msaitoh */
1716 1.1 msaitoh if (rxr->lro_enabled &&
1717 1.1 msaitoh (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) != 0 &&
1718 1.1 msaitoh (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
1719 1.1 msaitoh ((ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
1720 1.1 msaitoh (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) ||
1721 1.1 msaitoh (ptype & (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
1722 1.1 msaitoh (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) &&
1723 1.1 msaitoh (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
1724 1.1 msaitoh (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
1725 1.1 msaitoh /*
1726 1.1 msaitoh * Send to the stack if:
1727 1.1 msaitoh ** - LRO not enabled, or
1728 1.1 msaitoh ** - no LRO resources, or
1729 1.1 msaitoh ** - lro enqueue fails
1730 1.1 msaitoh */
1731 1.1 msaitoh if (rxr->lro.lro_cnt != 0)
1732 1.1 msaitoh if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
1733 1.1 msaitoh return;
1734 1.1 msaitoh }
1735 1.1 msaitoh #endif /* LRO */
1736 1.1 msaitoh
1737 1.20 msaitoh if_percpuq_enqueue(adapter->ipq, m);
1738 1.28 msaitoh } /* ixgbe_rx_input */
1739 1.1 msaitoh
1740 1.28 msaitoh /************************************************************************
1741 1.28 msaitoh * ixgbe_rx_discard
1742 1.28 msaitoh ************************************************************************/
1743 1.1 msaitoh static __inline void
1744 1.1 msaitoh ixgbe_rx_discard(struct rx_ring *rxr, int i)
1745 1.1 msaitoh {
1746 1.28 msaitoh struct ixgbe_rx_buf *rbuf;
1747 1.1 msaitoh
1748 1.1 msaitoh rbuf = &rxr->rx_buffers[i];
1749 1.1 msaitoh
1750 1.1 msaitoh /*
1751 1.28 msaitoh * With advanced descriptors the writeback
1752 1.28 msaitoh * clobbers the buffer addrs, so its easier
1753 1.28 msaitoh * to just free the existing mbufs and take
1754 1.28 msaitoh * the normal refresh path to get new buffers
1755 1.28 msaitoh * and mapping.
1756 1.28 msaitoh */
1757 1.1 msaitoh
1758 1.26 msaitoh if (rbuf->fmp != NULL) {/* Partial chain ? */
1759 1.27 msaitoh bus_dmamap_sync(rxr->ptag->dt_dmat, rbuf->pmap, 0,
1760 1.27 msaitoh rbuf->buf->m_pkthdr.len, BUS_DMASYNC_POSTREAD);
1761 1.1 msaitoh m_freem(rbuf->fmp);
1762 1.1 msaitoh rbuf->fmp = NULL;
1763 1.1 msaitoh rbuf->buf = NULL; /* rbuf->buf is part of fmp's chain */
1764 1.1 msaitoh } else if (rbuf->buf) {
1765 1.27 msaitoh bus_dmamap_sync(rxr->ptag->dt_dmat, rbuf->pmap, 0,
1766 1.27 msaitoh rbuf->buf->m_pkthdr.len, BUS_DMASYNC_POSTREAD);
1767 1.1 msaitoh m_free(rbuf->buf);
1768 1.1 msaitoh rbuf->buf = NULL;
1769 1.1 msaitoh }
1770 1.4 msaitoh ixgbe_dmamap_unload(rxr->ptag, rbuf->pmap);
1771 1.1 msaitoh
1772 1.1 msaitoh rbuf->flags = 0;
1773 1.1 msaitoh
1774 1.1 msaitoh return;
1775 1.28 msaitoh } /* ixgbe_rx_discard */
1776 1.1 msaitoh
1777 1.1 msaitoh
1778 1.28 msaitoh /************************************************************************
1779 1.28 msaitoh * ixgbe_rxeof
1780 1.1 msaitoh *
1781 1.28 msaitoh * Executes in interrupt context. It replenishes the
1782 1.28 msaitoh * mbufs in the descriptor and sends data which has
1783 1.28 msaitoh * been dma'ed into host memory to upper layer.
1784 1.1 msaitoh *
1785 1.28 msaitoh * Return TRUE for more work, FALSE for all clean.
1786 1.28 msaitoh ************************************************************************/
1787 1.1 msaitoh bool
1788 1.1 msaitoh ixgbe_rxeof(struct ix_queue *que)
1789 1.1 msaitoh {
1790 1.1 msaitoh struct adapter *adapter = que->adapter;
1791 1.1 msaitoh struct rx_ring *rxr = que->rxr;
1792 1.1 msaitoh struct ifnet *ifp = adapter->ifp;
1793 1.1 msaitoh #ifdef LRO
1794 1.1 msaitoh struct lro_ctrl *lro = &rxr->lro;
1795 1.1 msaitoh #endif /* LRO */
1796 1.28 msaitoh union ixgbe_adv_rx_desc *cur;
1797 1.28 msaitoh struct ixgbe_rx_buf *rbuf, *nbuf;
1798 1.1 msaitoh int i, nextp, processed = 0;
1799 1.1 msaitoh u32 staterr = 0;
1800 1.7 msaitoh u32 count = adapter->rx_process_limit;
1801 1.1 msaitoh #ifdef RSS
1802 1.1 msaitoh u16 pkt_info;
1803 1.1 msaitoh #endif
1804 1.1 msaitoh
1805 1.1 msaitoh IXGBE_RX_LOCK(rxr);
1806 1.1 msaitoh
1807 1.1 msaitoh #ifdef DEV_NETMAP
1808 1.28 msaitoh if (adapter->feat_en & IXGBE_FEATURE_NETMAP) {
1809 1.28 msaitoh /* Same as the txeof routine: wakeup clients on intr. */
1810 1.28 msaitoh if (netmap_rx_irq(ifp, rxr->me, &processed)) {
1811 1.28 msaitoh IXGBE_RX_UNLOCK(rxr);
1812 1.28 msaitoh return (FALSE);
1813 1.28 msaitoh }
1814 1.1 msaitoh }
1815 1.1 msaitoh #endif /* DEV_NETMAP */
1816 1.1 msaitoh
1817 1.1 msaitoh for (i = rxr->next_to_check; count != 0;) {
1818 1.28 msaitoh struct mbuf *sendmp, *mp;
1819 1.28 msaitoh u32 rsc, ptype;
1820 1.28 msaitoh u16 len;
1821 1.28 msaitoh u16 vtag = 0;
1822 1.28 msaitoh bool eop;
1823 1.1 msaitoh
1824 1.1 msaitoh /* Sync the ring. */
1825 1.1 msaitoh ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
1826 1.1 msaitoh BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1827 1.1 msaitoh
1828 1.1 msaitoh cur = &rxr->rx_base[i];
1829 1.1 msaitoh staterr = le32toh(cur->wb.upper.status_error);
1830 1.1 msaitoh #ifdef RSS
1831 1.1 msaitoh pkt_info = le16toh(cur->wb.lower.lo_dword.hs_rss.pkt_info);
1832 1.1 msaitoh #endif
1833 1.1 msaitoh
1834 1.1 msaitoh if ((staterr & IXGBE_RXD_STAT_DD) == 0)
1835 1.1 msaitoh break;
1836 1.1 msaitoh if ((ifp->if_flags & IFF_RUNNING) == 0)
1837 1.1 msaitoh break;
1838 1.1 msaitoh
1839 1.1 msaitoh count--;
1840 1.1 msaitoh sendmp = NULL;
1841 1.1 msaitoh nbuf = NULL;
1842 1.1 msaitoh rsc = 0;
1843 1.1 msaitoh cur->wb.upper.status_error = 0;
1844 1.1 msaitoh rbuf = &rxr->rx_buffers[i];
1845 1.1 msaitoh mp = rbuf->buf;
1846 1.1 msaitoh
1847 1.1 msaitoh len = le16toh(cur->wb.upper.length);
1848 1.1 msaitoh ptype = le32toh(cur->wb.lower.lo_dword.data) &
1849 1.1 msaitoh IXGBE_RXDADV_PKTTYPE_MASK;
1850 1.1 msaitoh eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
1851 1.1 msaitoh
1852 1.1 msaitoh /* Make sure bad packets are discarded */
1853 1.1 msaitoh if (eop && (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) {
1854 1.3 msaitoh #if __FreeBSD_version >= 1100036
1855 1.28 msaitoh if (adapter->feat_en & IXGBE_FEATURE_VF)
1856 1.4 msaitoh if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1857 1.3 msaitoh #endif
1858 1.1 msaitoh rxr->rx_discarded.ev_count++;
1859 1.1 msaitoh ixgbe_rx_discard(rxr, i);
1860 1.1 msaitoh goto next_desc;
1861 1.1 msaitoh }
1862 1.1 msaitoh
1863 1.27 msaitoh bus_dmamap_sync(rxr->ptag->dt_dmat, rbuf->pmap, 0,
1864 1.27 msaitoh rbuf->buf->m_pkthdr.len, BUS_DMASYNC_POSTREAD);
1865 1.27 msaitoh
1866 1.1 msaitoh /*
1867 1.28 msaitoh * On 82599 which supports a hardware
1868 1.28 msaitoh * LRO (called HW RSC), packets need
1869 1.28 msaitoh * not be fragmented across sequential
1870 1.28 msaitoh * descriptors, rather the next descriptor
1871 1.28 msaitoh * is indicated in bits of the descriptor.
1872 1.28 msaitoh * This also means that we might proceses
1873 1.28 msaitoh * more than one packet at a time, something
1874 1.28 msaitoh * that has never been true before, it
1875 1.28 msaitoh * required eliminating global chain pointers
1876 1.28 msaitoh * in favor of what we are doing here. -jfv
1877 1.28 msaitoh */
1878 1.1 msaitoh if (!eop) {
1879 1.1 msaitoh /*
1880 1.28 msaitoh * Figure out the next descriptor
1881 1.28 msaitoh * of this frame.
1882 1.28 msaitoh */
1883 1.1 msaitoh if (rxr->hw_rsc == TRUE) {
1884 1.1 msaitoh rsc = ixgbe_rsc_count(cur);
1885 1.1 msaitoh rxr->rsc_num += (rsc - 1);
1886 1.1 msaitoh }
1887 1.1 msaitoh if (rsc) { /* Get hardware index */
1888 1.28 msaitoh nextp = ((staterr & IXGBE_RXDADV_NEXTP_MASK) >>
1889 1.1 msaitoh IXGBE_RXDADV_NEXTP_SHIFT);
1890 1.1 msaitoh } else { /* Just sequential */
1891 1.1 msaitoh nextp = i + 1;
1892 1.1 msaitoh if (nextp == adapter->num_rx_desc)
1893 1.1 msaitoh nextp = 0;
1894 1.1 msaitoh }
1895 1.1 msaitoh nbuf = &rxr->rx_buffers[nextp];
1896 1.1 msaitoh prefetch(nbuf);
1897 1.1 msaitoh }
1898 1.1 msaitoh /*
1899 1.28 msaitoh * Rather than using the fmp/lmp global pointers
1900 1.28 msaitoh * we now keep the head of a packet chain in the
1901 1.28 msaitoh * buffer struct and pass this along from one
1902 1.28 msaitoh * descriptor to the next, until we get EOP.
1903 1.28 msaitoh */
1904 1.1 msaitoh mp->m_len = len;
1905 1.1 msaitoh /*
1906 1.28 msaitoh * See if there is a stored head
1907 1.28 msaitoh * that determines what we are
1908 1.28 msaitoh */
1909 1.1 msaitoh sendmp = rbuf->fmp;
1910 1.1 msaitoh if (sendmp != NULL) { /* secondary frag */
1911 1.1 msaitoh rbuf->buf = rbuf->fmp = NULL;
1912 1.1 msaitoh mp->m_flags &= ~M_PKTHDR;
1913 1.1 msaitoh sendmp->m_pkthdr.len += mp->m_len;
1914 1.1 msaitoh } else {
1915 1.1 msaitoh /*
1916 1.1 msaitoh * Optimize. This might be a small packet,
1917 1.1 msaitoh * maybe just a TCP ACK. Do a fast copy that
1918 1.1 msaitoh * is cache aligned into a new mbuf, and
1919 1.1 msaitoh * leave the old mbuf+cluster for re-use.
1920 1.1 msaitoh */
1921 1.1 msaitoh if (eop && len <= IXGBE_RX_COPY_LEN) {
1922 1.1 msaitoh sendmp = m_gethdr(M_NOWAIT, MT_DATA);
1923 1.1 msaitoh if (sendmp != NULL) {
1924 1.28 msaitoh sendmp->m_data += IXGBE_RX_COPY_ALIGN;
1925 1.28 msaitoh ixgbe_bcopy(mp->m_data, sendmp->m_data,
1926 1.28 msaitoh len);
1927 1.1 msaitoh sendmp->m_len = len;
1928 1.1 msaitoh rxr->rx_copies.ev_count++;
1929 1.1 msaitoh rbuf->flags |= IXGBE_RX_COPY;
1930 1.1 msaitoh }
1931 1.1 msaitoh }
1932 1.1 msaitoh if (sendmp == NULL) {
1933 1.1 msaitoh rbuf->buf = rbuf->fmp = NULL;
1934 1.1 msaitoh sendmp = mp;
1935 1.1 msaitoh }
1936 1.1 msaitoh
1937 1.1 msaitoh /* first desc of a non-ps chain */
1938 1.1 msaitoh sendmp->m_flags |= M_PKTHDR;
1939 1.1 msaitoh sendmp->m_pkthdr.len = mp->m_len;
1940 1.1 msaitoh }
1941 1.1 msaitoh ++processed;
1942 1.1 msaitoh
1943 1.1 msaitoh /* Pass the head pointer on */
1944 1.1 msaitoh if (eop == 0) {
1945 1.1 msaitoh nbuf->fmp = sendmp;
1946 1.1 msaitoh sendmp = NULL;
1947 1.1 msaitoh mp->m_next = nbuf->buf;
1948 1.1 msaitoh } else { /* Sending this frame */
1949 1.1 msaitoh m_set_rcvif(sendmp, ifp);
1950 1.31 msaitoh ++rxr->packets;
1951 1.1 msaitoh rxr->rx_packets.ev_count++;
1952 1.1 msaitoh /* capture data for AIM */
1953 1.1 msaitoh rxr->bytes += sendmp->m_pkthdr.len;
1954 1.1 msaitoh rxr->rx_bytes.ev_count += sendmp->m_pkthdr.len;
1955 1.1 msaitoh /* Process vlan info */
1956 1.28 msaitoh if ((rxr->vtag_strip) && (staterr & IXGBE_RXD_STAT_VP))
1957 1.1 msaitoh vtag = le16toh(cur->wb.upper.vlan);
1958 1.1 msaitoh if (vtag) {
1959 1.29 knakahar vlan_set_tag(sendmp, vtag);
1960 1.1 msaitoh }
1961 1.1 msaitoh if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1962 1.1 msaitoh ixgbe_rx_checksum(staterr, sendmp, ptype,
1963 1.3 msaitoh &adapter->stats.pf);
1964 1.1 msaitoh }
1965 1.8 msaitoh
1966 1.6 msaitoh #if 0 /* FreeBSD */
1967 1.28 msaitoh /*
1968 1.28 msaitoh * In case of multiqueue, we have RXCSUM.PCSD bit set
1969 1.28 msaitoh * and never cleared. This means we have RSS hash
1970 1.28 msaitoh * available to be used.
1971 1.28 msaitoh */
1972 1.28 msaitoh if (adapter->num_queues > 1) {
1973 1.28 msaitoh sendmp->m_pkthdr.flowid =
1974 1.28 msaitoh le32toh(cur->wb.lower.hi_dword.rss);
1975 1.44 msaitoh switch (pkt_info & IXGBE_RXDADV_RSSTYPE_MASK) {
1976 1.44 msaitoh case IXGBE_RXDADV_RSSTYPE_IPV4:
1977 1.28 msaitoh M_HASHTYPE_SET(sendmp,
1978 1.28 msaitoh M_HASHTYPE_RSS_IPV4);
1979 1.28 msaitoh break;
1980 1.44 msaitoh case IXGBE_RXDADV_RSSTYPE_IPV4_TCP:
1981 1.28 msaitoh M_HASHTYPE_SET(sendmp,
1982 1.28 msaitoh M_HASHTYPE_RSS_TCP_IPV4);
1983 1.28 msaitoh break;
1984 1.44 msaitoh case IXGBE_RXDADV_RSSTYPE_IPV6:
1985 1.28 msaitoh M_HASHTYPE_SET(sendmp,
1986 1.28 msaitoh M_HASHTYPE_RSS_IPV6);
1987 1.28 msaitoh break;
1988 1.44 msaitoh case IXGBE_RXDADV_RSSTYPE_IPV6_TCP:
1989 1.28 msaitoh M_HASHTYPE_SET(sendmp,
1990 1.28 msaitoh M_HASHTYPE_RSS_TCP_IPV6);
1991 1.28 msaitoh break;
1992 1.44 msaitoh case IXGBE_RXDADV_RSSTYPE_IPV6_EX:
1993 1.28 msaitoh M_HASHTYPE_SET(sendmp,
1994 1.28 msaitoh M_HASHTYPE_RSS_IPV6_EX);
1995 1.28 msaitoh break;
1996 1.44 msaitoh case IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX:
1997 1.28 msaitoh M_HASHTYPE_SET(sendmp,
1998 1.28 msaitoh M_HASHTYPE_RSS_TCP_IPV6_EX);
1999 1.28 msaitoh break;
2000 1.6 msaitoh #if __FreeBSD_version > 1100000
2001 1.44 msaitoh case IXGBE_RXDADV_RSSTYPE_IPV4_UDP:
2002 1.28 msaitoh M_HASHTYPE_SET(sendmp,
2003 1.28 msaitoh M_HASHTYPE_RSS_UDP_IPV4);
2004 1.28 msaitoh break;
2005 1.44 msaitoh case IXGBE_RXDADV_RSSTYPE_IPV6_UDP:
2006 1.28 msaitoh M_HASHTYPE_SET(sendmp,
2007 1.28 msaitoh M_HASHTYPE_RSS_UDP_IPV6);
2008 1.28 msaitoh break;
2009 1.44 msaitoh case IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX:
2010 1.28 msaitoh M_HASHTYPE_SET(sendmp,
2011 1.28 msaitoh M_HASHTYPE_RSS_UDP_IPV6_EX);
2012 1.28 msaitoh break;
2013 1.28 msaitoh #endif
2014 1.44 msaitoh default:
2015 1.28 msaitoh M_HASHTYPE_SET(sendmp,
2016 1.28 msaitoh M_HASHTYPE_OPAQUE_HASH);
2017 1.28 msaitoh }
2018 1.28 msaitoh } else {
2019 1.28 msaitoh sendmp->m_pkthdr.flowid = que->msix;
2020 1.1 msaitoh M_HASHTYPE_SET(sendmp, M_HASHTYPE_OPAQUE);
2021 1.1 msaitoh }
2022 1.8 msaitoh #endif
2023 1.1 msaitoh }
2024 1.1 msaitoh next_desc:
2025 1.1 msaitoh ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2026 1.1 msaitoh BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2027 1.1 msaitoh
2028 1.1 msaitoh /* Advance our pointers to the next descriptor. */
2029 1.1 msaitoh if (++i == rxr->num_desc)
2030 1.1 msaitoh i = 0;
2031 1.1 msaitoh
2032 1.1 msaitoh /* Now send to the stack or do LRO */
2033 1.1 msaitoh if (sendmp != NULL) {
2034 1.1 msaitoh rxr->next_to_check = i;
2035 1.28 msaitoh IXGBE_RX_UNLOCK(rxr);
2036 1.1 msaitoh ixgbe_rx_input(rxr, ifp, sendmp, ptype);
2037 1.28 msaitoh IXGBE_RX_LOCK(rxr);
2038 1.1 msaitoh i = rxr->next_to_check;
2039 1.1 msaitoh }
2040 1.1 msaitoh
2041 1.28 msaitoh /* Every 8 descriptors we go to refresh mbufs */
2042 1.1 msaitoh if (processed == 8) {
2043 1.1 msaitoh ixgbe_refresh_mbufs(rxr, i);
2044 1.1 msaitoh processed = 0;
2045 1.1 msaitoh }
2046 1.1 msaitoh }
2047 1.1 msaitoh
2048 1.1 msaitoh /* Refresh any remaining buf structs */
2049 1.1 msaitoh if (ixgbe_rx_unrefreshed(rxr))
2050 1.1 msaitoh ixgbe_refresh_mbufs(rxr, i);
2051 1.1 msaitoh
2052 1.1 msaitoh rxr->next_to_check = i;
2053 1.1 msaitoh
2054 1.28 msaitoh IXGBE_RX_UNLOCK(rxr);
2055 1.28 msaitoh
2056 1.1 msaitoh #ifdef LRO
2057 1.1 msaitoh /*
2058 1.1 msaitoh * Flush any outstanding LRO work
2059 1.1 msaitoh */
2060 1.10 msaitoh tcp_lro_flush_all(lro);
2061 1.1 msaitoh #endif /* LRO */
2062 1.1 msaitoh
2063 1.1 msaitoh /*
2064 1.28 msaitoh * Still have cleaning to do?
2065 1.28 msaitoh */
2066 1.1 msaitoh if ((staterr & IXGBE_RXD_STAT_DD) != 0)
2067 1.28 msaitoh return (TRUE);
2068 1.28 msaitoh
2069 1.28 msaitoh return (FALSE);
2070 1.28 msaitoh } /* ixgbe_rxeof */
2071 1.1 msaitoh
2072 1.1 msaitoh
2073 1.28 msaitoh /************************************************************************
2074 1.28 msaitoh * ixgbe_rx_checksum
2075 1.1 msaitoh *
2076 1.28 msaitoh * Verify that the hardware indicated that the checksum is valid.
2077 1.28 msaitoh * Inform the stack about the status of checksum so that stack
2078 1.28 msaitoh * doesn't spend time verifying the checksum.
2079 1.28 msaitoh ************************************************************************/
2080 1.1 msaitoh static void
2081 1.1 msaitoh ixgbe_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype,
2082 1.1 msaitoh struct ixgbe_hw_stats *stats)
2083 1.1 msaitoh {
2084 1.28 msaitoh u16 status = (u16)staterr;
2085 1.28 msaitoh u8 errors = (u8)(staterr >> 24);
2086 1.1 msaitoh #if 0
2087 1.28 msaitoh bool sctp = false;
2088 1.1 msaitoh
2089 1.1 msaitoh if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
2090 1.1 msaitoh (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
2091 1.8 msaitoh sctp = true;
2092 1.1 msaitoh #endif
2093 1.1 msaitoh
2094 1.8 msaitoh /* IPv4 checksum */
2095 1.1 msaitoh if (status & IXGBE_RXD_STAT_IPCS) {
2096 1.1 msaitoh stats->ipcs.ev_count++;
2097 1.1 msaitoh if (!(errors & IXGBE_RXD_ERR_IPE)) {
2098 1.1 msaitoh /* IP Checksum Good */
2099 1.1 msaitoh mp->m_pkthdr.csum_flags = M_CSUM_IPv4;
2100 1.1 msaitoh } else {
2101 1.1 msaitoh stats->ipcs_bad.ev_count++;
2102 1.1 msaitoh mp->m_pkthdr.csum_flags = M_CSUM_IPv4|M_CSUM_IPv4_BAD;
2103 1.1 msaitoh }
2104 1.1 msaitoh }
2105 1.8 msaitoh /* TCP/UDP/SCTP checksum */
2106 1.1 msaitoh if (status & IXGBE_RXD_STAT_L4CS) {
2107 1.1 msaitoh stats->l4cs.ev_count++;
2108 1.1 msaitoh int type = M_CSUM_TCPv4|M_CSUM_TCPv6|M_CSUM_UDPv4|M_CSUM_UDPv6;
2109 1.1 msaitoh if (!(errors & IXGBE_RXD_ERR_TCPE)) {
2110 1.1 msaitoh mp->m_pkthdr.csum_flags |= type;
2111 1.1 msaitoh } else {
2112 1.1 msaitoh stats->l4cs_bad.ev_count++;
2113 1.1 msaitoh mp->m_pkthdr.csum_flags |= type | M_CSUM_TCP_UDP_BAD;
2114 1.1 msaitoh }
2115 1.1 msaitoh }
2116 1.28 msaitoh } /* ixgbe_rx_checksum */
2117 1.1 msaitoh
2118 1.28 msaitoh /************************************************************************
2119 1.28 msaitoh * ixgbe_dma_malloc
2120 1.28 msaitoh ************************************************************************/
2121 1.1 msaitoh int
2122 1.1 msaitoh ixgbe_dma_malloc(struct adapter *adapter, const bus_size_t size,
2123 1.1 msaitoh struct ixgbe_dma_alloc *dma, const int mapflags)
2124 1.1 msaitoh {
2125 1.1 msaitoh device_t dev = adapter->dev;
2126 1.28 msaitoh int r, rsegs;
2127 1.1 msaitoh
2128 1.28 msaitoh r = ixgbe_dma_tag_create(
2129 1.28 msaitoh /* parent */ adapter->osdep.dmat,
2130 1.28 msaitoh /* alignment */ DBA_ALIGN,
2131 1.28 msaitoh /* bounds */ 0,
2132 1.28 msaitoh /* maxsize */ size,
2133 1.28 msaitoh /* nsegments */ 1,
2134 1.28 msaitoh /* maxsegsize */ size,
2135 1.28 msaitoh /* flags */ BUS_DMA_ALLOCNOW,
2136 1.1 msaitoh &dma->dma_tag);
2137 1.1 msaitoh if (r != 0) {
2138 1.1 msaitoh aprint_error_dev(dev,
2139 1.44 msaitoh "%s: ixgbe_dma_tag_create failed; error %d\n", __func__,
2140 1.44 msaitoh r);
2141 1.1 msaitoh goto fail_0;
2142 1.1 msaitoh }
2143 1.1 msaitoh
2144 1.28 msaitoh r = bus_dmamem_alloc(dma->dma_tag->dt_dmat, size,
2145 1.28 msaitoh dma->dma_tag->dt_alignment, dma->dma_tag->dt_boundary,
2146 1.28 msaitoh &dma->dma_seg, 1, &rsegs, BUS_DMA_NOWAIT);
2147 1.1 msaitoh if (r != 0) {
2148 1.1 msaitoh aprint_error_dev(dev,
2149 1.1 msaitoh "%s: bus_dmamem_alloc failed; error %d\n", __func__, r);
2150 1.1 msaitoh goto fail_1;
2151 1.1 msaitoh }
2152 1.1 msaitoh
2153 1.1 msaitoh r = bus_dmamem_map(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs,
2154 1.1 msaitoh size, &dma->dma_vaddr, BUS_DMA_NOWAIT);
2155 1.1 msaitoh if (r != 0) {
2156 1.1 msaitoh aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n",
2157 1.1 msaitoh __func__, r);
2158 1.1 msaitoh goto fail_2;
2159 1.1 msaitoh }
2160 1.1 msaitoh
2161 1.1 msaitoh r = ixgbe_dmamap_create(dma->dma_tag, 0, &dma->dma_map);
2162 1.1 msaitoh if (r != 0) {
2163 1.1 msaitoh aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n",
2164 1.1 msaitoh __func__, r);
2165 1.1 msaitoh goto fail_3;
2166 1.1 msaitoh }
2167 1.1 msaitoh
2168 1.28 msaitoh r = bus_dmamap_load(dma->dma_tag->dt_dmat, dma->dma_map,
2169 1.28 msaitoh dma->dma_vaddr, size, NULL, mapflags | BUS_DMA_NOWAIT);
2170 1.1 msaitoh if (r != 0) {
2171 1.1 msaitoh aprint_error_dev(dev, "%s: bus_dmamap_load failed; error %d\n",
2172 1.1 msaitoh __func__, r);
2173 1.1 msaitoh goto fail_4;
2174 1.1 msaitoh }
2175 1.1 msaitoh dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr;
2176 1.1 msaitoh dma->dma_size = size;
2177 1.1 msaitoh return 0;
2178 1.1 msaitoh fail_4:
2179 1.1 msaitoh ixgbe_dmamap_destroy(dma->dma_tag, dma->dma_map);
2180 1.1 msaitoh fail_3:
2181 1.1 msaitoh bus_dmamem_unmap(dma->dma_tag->dt_dmat, dma->dma_vaddr, size);
2182 1.1 msaitoh fail_2:
2183 1.1 msaitoh bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs);
2184 1.1 msaitoh fail_1:
2185 1.1 msaitoh ixgbe_dma_tag_destroy(dma->dma_tag);
2186 1.1 msaitoh fail_0:
2187 1.1 msaitoh
2188 1.28 msaitoh return (r);
2189 1.28 msaitoh } /* ixgbe_dma_malloc */
2190 1.28 msaitoh
2191 1.28 msaitoh /************************************************************************
2192 1.28 msaitoh * ixgbe_dma_free
2193 1.28 msaitoh ************************************************************************/
2194 1.3 msaitoh void
2195 1.1 msaitoh ixgbe_dma_free(struct adapter *adapter, struct ixgbe_dma_alloc *dma)
2196 1.1 msaitoh {
2197 1.1 msaitoh bus_dmamap_sync(dma->dma_tag->dt_dmat, dma->dma_map, 0, dma->dma_size,
2198 1.1 msaitoh BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2199 1.1 msaitoh ixgbe_dmamap_unload(dma->dma_tag, dma->dma_map);
2200 1.1 msaitoh bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, 1);
2201 1.1 msaitoh ixgbe_dma_tag_destroy(dma->dma_tag);
2202 1.28 msaitoh } /* ixgbe_dma_free */
2203 1.1 msaitoh
2204 1.1 msaitoh
2205 1.28 msaitoh /************************************************************************
2206 1.28 msaitoh * ixgbe_allocate_queues
2207 1.1 msaitoh *
2208 1.28 msaitoh * Allocate memory for the transmit and receive rings, and then
2209 1.28 msaitoh * the descriptors associated with each, called only once at attach.
2210 1.28 msaitoh ************************************************************************/
2211 1.1 msaitoh int
2212 1.1 msaitoh ixgbe_allocate_queues(struct adapter *adapter)
2213 1.1 msaitoh {
2214 1.1 msaitoh device_t dev = adapter->dev;
2215 1.1 msaitoh struct ix_queue *que;
2216 1.1 msaitoh struct tx_ring *txr;
2217 1.1 msaitoh struct rx_ring *rxr;
2218 1.28 msaitoh int rsize, tsize, error = IXGBE_SUCCESS;
2219 1.28 msaitoh int txconf = 0, rxconf = 0;
2220 1.1 msaitoh
2221 1.28 msaitoh /* First, allocate the top level queue structs */
2222 1.28 msaitoh adapter->queues = (struct ix_queue *)malloc(sizeof(struct ix_queue) *
2223 1.28 msaitoh adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO);
2224 1.28 msaitoh if (adapter->queues == NULL) {
2225 1.28 msaitoh aprint_error_dev(dev, "Unable to allocate queue memory\n");
2226 1.1 msaitoh error = ENOMEM;
2227 1.1 msaitoh goto fail;
2228 1.1 msaitoh }
2229 1.1 msaitoh
2230 1.28 msaitoh /* Second, allocate the TX ring struct memory */
2231 1.28 msaitoh adapter->tx_rings = (struct tx_ring *)malloc(sizeof(struct tx_ring) *
2232 1.28 msaitoh adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO);
2233 1.28 msaitoh if (adapter->tx_rings == NULL) {
2234 1.1 msaitoh aprint_error_dev(dev, "Unable to allocate TX ring memory\n");
2235 1.1 msaitoh error = ENOMEM;
2236 1.1 msaitoh goto tx_fail;
2237 1.1 msaitoh }
2238 1.1 msaitoh
2239 1.28 msaitoh /* Third, allocate the RX ring */
2240 1.28 msaitoh adapter->rx_rings = (struct rx_ring *)malloc(sizeof(struct rx_ring) *
2241 1.28 msaitoh adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO);
2242 1.28 msaitoh if (adapter->rx_rings == NULL) {
2243 1.1 msaitoh aprint_error_dev(dev, "Unable to allocate RX ring memory\n");
2244 1.1 msaitoh error = ENOMEM;
2245 1.1 msaitoh goto rx_fail;
2246 1.1 msaitoh }
2247 1.1 msaitoh
2248 1.1 msaitoh /* For the ring itself */
2249 1.28 msaitoh tsize = roundup2(adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc),
2250 1.28 msaitoh DBA_ALIGN);
2251 1.1 msaitoh
2252 1.1 msaitoh /*
2253 1.1 msaitoh * Now set up the TX queues, txconf is needed to handle the
2254 1.1 msaitoh * possibility that things fail midcourse and we need to
2255 1.1 msaitoh * undo memory gracefully
2256 1.28 msaitoh */
2257 1.1 msaitoh for (int i = 0; i < adapter->num_queues; i++, txconf++) {
2258 1.1 msaitoh /* Set up some basics */
2259 1.1 msaitoh txr = &adapter->tx_rings[i];
2260 1.1 msaitoh txr->adapter = adapter;
2261 1.28 msaitoh txr->txr_interq = NULL;
2262 1.28 msaitoh /* In case SR-IOV is enabled, align the index properly */
2263 1.5 msaitoh #ifdef PCI_IOV
2264 1.28 msaitoh txr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
2265 1.28 msaitoh i);
2266 1.5 msaitoh #else
2267 1.1 msaitoh txr->me = i;
2268 1.5 msaitoh #endif
2269 1.1 msaitoh txr->num_desc = adapter->num_tx_desc;
2270 1.1 msaitoh
2271 1.1 msaitoh /* Initialize the TX side lock */
2272 1.1 msaitoh mutex_init(&txr->tx_mtx, MUTEX_DEFAULT, IPL_NET);
2273 1.1 msaitoh
2274 1.28 msaitoh if (ixgbe_dma_malloc(adapter, tsize, &txr->txdma,
2275 1.28 msaitoh BUS_DMA_NOWAIT)) {
2276 1.1 msaitoh aprint_error_dev(dev,
2277 1.1 msaitoh "Unable to allocate TX Descriptor memory\n");
2278 1.1 msaitoh error = ENOMEM;
2279 1.1 msaitoh goto err_tx_desc;
2280 1.1 msaitoh }
2281 1.1 msaitoh txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
2282 1.1 msaitoh bzero((void *)txr->tx_base, tsize);
2283 1.1 msaitoh
2284 1.28 msaitoh /* Now allocate transmit buffers for the ring */
2285 1.28 msaitoh if (ixgbe_allocate_transmit_buffers(txr)) {
2286 1.1 msaitoh aprint_error_dev(dev,
2287 1.1 msaitoh "Critical Failure setting up transmit buffers\n");
2288 1.1 msaitoh error = ENOMEM;
2289 1.1 msaitoh goto err_tx_desc;
2290 1.1 msaitoh }
2291 1.28 msaitoh if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
2292 1.28 msaitoh /* Allocate a buf ring */
2293 1.28 msaitoh txr->txr_interq = pcq_create(IXGBE_BR_SIZE, KM_SLEEP);
2294 1.28 msaitoh if (txr->txr_interq == NULL) {
2295 1.28 msaitoh aprint_error_dev(dev,
2296 1.28 msaitoh "Critical Failure setting up buf ring\n");
2297 1.28 msaitoh error = ENOMEM;
2298 1.28 msaitoh goto err_tx_desc;
2299 1.28 msaitoh }
2300 1.28 msaitoh }
2301 1.1 msaitoh }
2302 1.1 msaitoh
2303 1.1 msaitoh /*
2304 1.1 msaitoh * Next the RX queues...
2305 1.1 msaitoh */
2306 1.28 msaitoh rsize = roundup2(adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc),
2307 1.28 msaitoh DBA_ALIGN);
2308 1.1 msaitoh for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
2309 1.1 msaitoh rxr = &adapter->rx_rings[i];
2310 1.1 msaitoh /* Set up some basics */
2311 1.1 msaitoh rxr->adapter = adapter;
2312 1.5 msaitoh #ifdef PCI_IOV
2313 1.28 msaitoh /* In case SR-IOV is enabled, align the index properly */
2314 1.28 msaitoh rxr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
2315 1.28 msaitoh i);
2316 1.5 msaitoh #else
2317 1.1 msaitoh rxr->me = i;
2318 1.5 msaitoh #endif
2319 1.1 msaitoh rxr->num_desc = adapter->num_rx_desc;
2320 1.1 msaitoh
2321 1.1 msaitoh /* Initialize the RX side lock */
2322 1.1 msaitoh mutex_init(&rxr->rx_mtx, MUTEX_DEFAULT, IPL_NET);
2323 1.1 msaitoh
2324 1.28 msaitoh if (ixgbe_dma_malloc(adapter, rsize, &rxr->rxdma,
2325 1.28 msaitoh BUS_DMA_NOWAIT)) {
2326 1.1 msaitoh aprint_error_dev(dev,
2327 1.1 msaitoh "Unable to allocate RxDescriptor memory\n");
2328 1.1 msaitoh error = ENOMEM;
2329 1.1 msaitoh goto err_rx_desc;
2330 1.1 msaitoh }
2331 1.1 msaitoh rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
2332 1.1 msaitoh bzero((void *)rxr->rx_base, rsize);
2333 1.1 msaitoh
2334 1.28 msaitoh /* Allocate receive buffers for the ring */
2335 1.1 msaitoh if (ixgbe_allocate_receive_buffers(rxr)) {
2336 1.1 msaitoh aprint_error_dev(dev,
2337 1.1 msaitoh "Critical Failure setting up receive buffers\n");
2338 1.1 msaitoh error = ENOMEM;
2339 1.1 msaitoh goto err_rx_desc;
2340 1.1 msaitoh }
2341 1.1 msaitoh }
2342 1.1 msaitoh
2343 1.1 msaitoh /*
2344 1.28 msaitoh * Finally set up the queue holding structs
2345 1.28 msaitoh */
2346 1.1 msaitoh for (int i = 0; i < adapter->num_queues; i++) {
2347 1.1 msaitoh que = &adapter->queues[i];
2348 1.1 msaitoh que->adapter = adapter;
2349 1.3 msaitoh que->me = i;
2350 1.1 msaitoh que->txr = &adapter->tx_rings[i];
2351 1.1 msaitoh que->rxr = &adapter->rx_rings[i];
2352 1.33 knakahar
2353 1.37 knakahar mutex_init(&que->dc_mtx, MUTEX_DEFAULT, IPL_NET);
2354 1.37 knakahar que->disabled_count = 0;
2355 1.1 msaitoh }
2356 1.1 msaitoh
2357 1.1 msaitoh return (0);
2358 1.1 msaitoh
2359 1.1 msaitoh err_rx_desc:
2360 1.1 msaitoh for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
2361 1.1 msaitoh ixgbe_dma_free(adapter, &rxr->rxdma);
2362 1.1 msaitoh err_tx_desc:
2363 1.1 msaitoh for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
2364 1.1 msaitoh ixgbe_dma_free(adapter, &txr->txdma);
2365 1.1 msaitoh free(adapter->rx_rings, M_DEVBUF);
2366 1.1 msaitoh rx_fail:
2367 1.1 msaitoh free(adapter->tx_rings, M_DEVBUF);
2368 1.1 msaitoh tx_fail:
2369 1.1 msaitoh free(adapter->queues, M_DEVBUF);
2370 1.1 msaitoh fail:
2371 1.1 msaitoh return (error);
2372 1.28 msaitoh } /* ixgbe_allocate_queues */
2373