ix_txrx.c revision 1.65 1 1.65 msaitoh /* $NetBSD: ix_txrx.c,v 1.65 2021/03/02 11:10:53 msaitoh Exp $ */
2 1.28 msaitoh
3 1.1 msaitoh /******************************************************************************
4 1.1 msaitoh
5 1.28 msaitoh Copyright (c) 2001-2017, Intel Corporation
6 1.1 msaitoh All rights reserved.
7 1.28 msaitoh
8 1.28 msaitoh Redistribution and use in source and binary forms, with or without
9 1.1 msaitoh modification, are permitted provided that the following conditions are met:
10 1.28 msaitoh
11 1.28 msaitoh 1. Redistributions of source code must retain the above copyright notice,
12 1.1 msaitoh this list of conditions and the following disclaimer.
13 1.28 msaitoh
14 1.28 msaitoh 2. Redistributions in binary form must reproduce the above copyright
15 1.28 msaitoh notice, this list of conditions and the following disclaimer in the
16 1.1 msaitoh documentation and/or other materials provided with the distribution.
17 1.28 msaitoh
18 1.28 msaitoh 3. Neither the name of the Intel Corporation nor the names of its
19 1.28 msaitoh contributors may be used to endorse or promote products derived from
20 1.1 msaitoh this software without specific prior written permission.
21 1.28 msaitoh
22 1.1 msaitoh THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 1.28 msaitoh AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 1.28 msaitoh IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 1.28 msaitoh ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 1.28 msaitoh LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 1.28 msaitoh CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 1.28 msaitoh SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 1.28 msaitoh INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 1.28 msaitoh CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 1.1 msaitoh ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 1.1 msaitoh POSSIBILITY OF SUCH DAMAGE.
33 1.1 msaitoh
34 1.1 msaitoh ******************************************************************************/
35 1.39 msaitoh /*$FreeBSD: head/sys/dev/ixgbe/ix_txrx.c 327031 2017-12-20 18:15:06Z erj $*/
36 1.28 msaitoh
37 1.1 msaitoh /*
38 1.1 msaitoh * Copyright (c) 2011 The NetBSD Foundation, Inc.
39 1.1 msaitoh * All rights reserved.
40 1.1 msaitoh *
41 1.1 msaitoh * This code is derived from software contributed to The NetBSD Foundation
42 1.1 msaitoh * by Coyote Point Systems, Inc.
43 1.1 msaitoh *
44 1.1 msaitoh * Redistribution and use in source and binary forms, with or without
45 1.1 msaitoh * modification, are permitted provided that the following conditions
46 1.1 msaitoh * are met:
47 1.1 msaitoh * 1. Redistributions of source code must retain the above copyright
48 1.1 msaitoh * notice, this list of conditions and the following disclaimer.
49 1.1 msaitoh * 2. Redistributions in binary form must reproduce the above copyright
50 1.1 msaitoh * notice, this list of conditions and the following disclaimer in the
51 1.1 msaitoh * documentation and/or other materials provided with the distribution.
52 1.1 msaitoh *
53 1.1 msaitoh * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
54 1.1 msaitoh * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55 1.1 msaitoh * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
56 1.1 msaitoh * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
57 1.1 msaitoh * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
58 1.1 msaitoh * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
59 1.1 msaitoh * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 1.1 msaitoh * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
61 1.1 msaitoh * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
62 1.1 msaitoh * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 1.1 msaitoh * POSSIBILITY OF SUCH DAMAGE.
64 1.1 msaitoh */
65 1.1 msaitoh
66 1.8 msaitoh #include "opt_inet.h"
67 1.8 msaitoh #include "opt_inet6.h"
68 1.8 msaitoh
69 1.1 msaitoh #include "ixgbe.h"
70 1.1 msaitoh
71 1.1 msaitoh /*
72 1.28 msaitoh * HW RSC control:
73 1.28 msaitoh * this feature only works with
74 1.28 msaitoh * IPv4, and only on 82599 and later.
75 1.28 msaitoh * Also this will cause IP forwarding to
76 1.28 msaitoh * fail and that can't be controlled by
77 1.28 msaitoh * the stack as LRO can. For all these
78 1.28 msaitoh * reasons I've deemed it best to leave
79 1.28 msaitoh * this off and not bother with a tuneable
80 1.28 msaitoh * interface, this would need to be compiled
81 1.28 msaitoh * to enable.
82 1.28 msaitoh */
83 1.1 msaitoh static bool ixgbe_rsc_enable = FALSE;
84 1.1 msaitoh
85 1.3 msaitoh /*
86 1.28 msaitoh * For Flow Director: this is the
87 1.28 msaitoh * number of TX packets we sample
88 1.28 msaitoh * for the filter pool, this means
89 1.28 msaitoh * every 20th packet will be probed.
90 1.28 msaitoh *
91 1.28 msaitoh * This feature can be disabled by
92 1.28 msaitoh * setting this to 0.
93 1.28 msaitoh */
94 1.3 msaitoh static int atr_sample_rate = 20;
95 1.3 msaitoh
96 1.28 msaitoh /************************************************************************
97 1.3 msaitoh * Local Function prototypes
98 1.28 msaitoh ************************************************************************/
99 1.28 msaitoh static void ixgbe_setup_transmit_ring(struct tx_ring *);
100 1.28 msaitoh static void ixgbe_free_transmit_buffers(struct tx_ring *);
101 1.28 msaitoh static int ixgbe_setup_receive_ring(struct rx_ring *);
102 1.28 msaitoh static void ixgbe_free_receive_buffers(struct rx_ring *);
103 1.28 msaitoh static void ixgbe_rx_checksum(u32, struct mbuf *, u32,
104 1.28 msaitoh struct ixgbe_hw_stats *);
105 1.28 msaitoh static void ixgbe_refresh_mbufs(struct rx_ring *, int);
106 1.38 knakahar static void ixgbe_drain(struct ifnet *, struct tx_ring *);
107 1.28 msaitoh static int ixgbe_xmit(struct tx_ring *, struct mbuf *);
108 1.28 msaitoh static int ixgbe_tx_ctx_setup(struct tx_ring *,
109 1.28 msaitoh struct mbuf *, u32 *, u32 *);
110 1.28 msaitoh static int ixgbe_tso_setup(struct tx_ring *,
111 1.28 msaitoh struct mbuf *, u32 *, u32 *);
112 1.1 msaitoh static __inline void ixgbe_rx_discard(struct rx_ring *, int);
113 1.1 msaitoh static __inline void ixgbe_rx_input(struct rx_ring *, struct ifnet *,
114 1.28 msaitoh struct mbuf *, u32);
115 1.28 msaitoh static int ixgbe_dma_malloc(struct adapter *, bus_size_t,
116 1.28 msaitoh struct ixgbe_dma_alloc *, int);
117 1.28 msaitoh static void ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *);
118 1.1 msaitoh
119 1.1 msaitoh static void ixgbe_setup_hw_rsc(struct rx_ring *);
120 1.1 msaitoh
121 1.28 msaitoh /************************************************************************
122 1.28 msaitoh * ixgbe_legacy_start_locked - Transmit entry point
123 1.1 msaitoh *
124 1.28 msaitoh * Called by the stack to initiate a transmit.
125 1.28 msaitoh * The driver will remain in this routine as long as there are
126 1.28 msaitoh * packets to transmit and transmit resources are available.
127 1.28 msaitoh * In case resources are not available, the stack is notified
128 1.28 msaitoh * and the packet is requeued.
129 1.28 msaitoh ************************************************************************/
130 1.28 msaitoh int
131 1.28 msaitoh ixgbe_legacy_start_locked(struct ifnet *ifp, struct tx_ring *txr)
132 1.1 msaitoh {
133 1.45 msaitoh int rc;
134 1.1 msaitoh struct mbuf *m_head;
135 1.1 msaitoh struct adapter *adapter = txr->adapter;
136 1.1 msaitoh
137 1.1 msaitoh IXGBE_TX_LOCK_ASSERT(txr);
138 1.1 msaitoh
139 1.52 msaitoh if (adapter->link_active != LINK_STATE_UP) {
140 1.38 knakahar /*
141 1.38 knakahar * discard all packets buffered in IFQ to avoid
142 1.38 knakahar * sending old packets at next link up timing.
143 1.38 knakahar */
144 1.38 knakahar ixgbe_drain(ifp, txr);
145 1.38 knakahar return (ENETDOWN);
146 1.38 knakahar }
147 1.1 msaitoh if ((ifp->if_flags & IFF_RUNNING) == 0)
148 1.28 msaitoh return (ENETDOWN);
149 1.47 msaitoh if (txr->txr_no_space)
150 1.47 msaitoh return (ENETDOWN);
151 1.58 msaitoh
152 1.1 msaitoh while (!IFQ_IS_EMPTY(&ifp->if_snd)) {
153 1.1 msaitoh if (txr->tx_avail <= IXGBE_QUEUE_MIN_FREE)
154 1.1 msaitoh break;
155 1.1 msaitoh
156 1.1 msaitoh IFQ_POLL(&ifp->if_snd, m_head);
157 1.1 msaitoh if (m_head == NULL)
158 1.1 msaitoh break;
159 1.1 msaitoh
160 1.1 msaitoh if ((rc = ixgbe_xmit(txr, m_head)) == EAGAIN) {
161 1.1 msaitoh break;
162 1.1 msaitoh }
163 1.1 msaitoh IFQ_DEQUEUE(&ifp->if_snd, m_head);
164 1.1 msaitoh if (rc != 0) {
165 1.1 msaitoh m_freem(m_head);
166 1.1 msaitoh continue;
167 1.1 msaitoh }
168 1.1 msaitoh
169 1.1 msaitoh /* Send a copy of the frame to the BPF listener */
170 1.48 msaitoh bpf_mtap(ifp, m_head, BPF_D_OUT);
171 1.1 msaitoh }
172 1.44 msaitoh
173 1.28 msaitoh return IXGBE_SUCCESS;
174 1.28 msaitoh } /* ixgbe_legacy_start_locked */
175 1.28 msaitoh
176 1.28 msaitoh /************************************************************************
177 1.28 msaitoh * ixgbe_legacy_start
178 1.28 msaitoh *
179 1.28 msaitoh * Called by the stack, this always uses the first tx ring,
180 1.28 msaitoh * and should not be used with multiqueue tx enabled.
181 1.28 msaitoh ************************************************************************/
182 1.1 msaitoh void
183 1.28 msaitoh ixgbe_legacy_start(struct ifnet *ifp)
184 1.1 msaitoh {
185 1.1 msaitoh struct adapter *adapter = ifp->if_softc;
186 1.28 msaitoh struct tx_ring *txr = adapter->tx_rings;
187 1.1 msaitoh
188 1.1 msaitoh if (ifp->if_flags & IFF_RUNNING) {
189 1.1 msaitoh IXGBE_TX_LOCK(txr);
190 1.28 msaitoh ixgbe_legacy_start_locked(ifp, txr);
191 1.1 msaitoh IXGBE_TX_UNLOCK(txr);
192 1.1 msaitoh }
193 1.28 msaitoh } /* ixgbe_legacy_start */
194 1.1 msaitoh
195 1.28 msaitoh /************************************************************************
196 1.28 msaitoh * ixgbe_mq_start - Multiqueue Transmit Entry Point
197 1.28 msaitoh *
198 1.28 msaitoh * (if_transmit function)
199 1.28 msaitoh ************************************************************************/
200 1.1 msaitoh int
201 1.1 msaitoh ixgbe_mq_start(struct ifnet *ifp, struct mbuf *m)
202 1.1 msaitoh {
203 1.1 msaitoh struct adapter *adapter = ifp->if_softc;
204 1.1 msaitoh struct tx_ring *txr;
205 1.50 msaitoh int i;
206 1.28 msaitoh #ifdef RSS
207 1.1 msaitoh uint32_t bucket_id;
208 1.1 msaitoh #endif
209 1.1 msaitoh
210 1.1 msaitoh /*
211 1.1 msaitoh * When doing RSS, map it to the same outbound queue
212 1.1 msaitoh * as the incoming flow would be mapped to.
213 1.1 msaitoh *
214 1.1 msaitoh * If everything is setup correctly, it should be the
215 1.1 msaitoh * same bucket that the current CPU we're on is.
216 1.1 msaitoh */
217 1.28 msaitoh #ifdef RSS
218 1.1 msaitoh if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
219 1.28 msaitoh if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
220 1.28 msaitoh (rss_hash2bucket(m->m_pkthdr.flowid, M_HASHTYPE_GET(m),
221 1.28 msaitoh &bucket_id) == 0)) {
222 1.1 msaitoh i = bucket_id % adapter->num_queues;
223 1.8 msaitoh #ifdef IXGBE_DEBUG
224 1.8 msaitoh if (bucket_id > adapter->num_queues)
225 1.28 msaitoh if_printf(ifp,
226 1.28 msaitoh "bucket_id (%d) > num_queues (%d)\n",
227 1.28 msaitoh bucket_id, adapter->num_queues);
228 1.8 msaitoh #endif
229 1.8 msaitoh } else
230 1.1 msaitoh i = m->m_pkthdr.flowid % adapter->num_queues;
231 1.3 msaitoh } else
232 1.28 msaitoh #endif /* 0 */
233 1.51 knakahar i = (cpu_index(curcpu()) % ncpu) % adapter->num_queues;
234 1.3 msaitoh
235 1.3 msaitoh /* Check for a hung queue and pick alternative */
236 1.54 msaitoh if (((1ULL << i) & adapter->active_queues) == 0)
237 1.18 msaitoh i = ffs64(adapter->active_queues);
238 1.1 msaitoh
239 1.1 msaitoh txr = &adapter->tx_rings[i];
240 1.1 msaitoh
241 1.50 msaitoh if (__predict_false(!pcq_put(txr->txr_interq, m))) {
242 1.18 msaitoh m_freem(m);
243 1.18 msaitoh txr->pcq_drops.ev_count++;
244 1.50 msaitoh return ENOBUFS;
245 1.18 msaitoh }
246 1.1 msaitoh if (IXGBE_TX_TRYLOCK(txr)) {
247 1.1 msaitoh ixgbe_mq_start_locked(ifp, txr);
248 1.1 msaitoh IXGBE_TX_UNLOCK(txr);
249 1.34 knakahar } else {
250 1.34 knakahar if (adapter->txrx_use_workqueue) {
251 1.44 msaitoh u_int *enqueued;
252 1.44 msaitoh
253 1.34 knakahar /*
254 1.34 knakahar * This function itself is not called in interrupt
255 1.34 knakahar * context, however it can be called in fast softint
256 1.34 knakahar * context right after receiving forwarding packets.
257 1.34 knakahar * So, it is required to protect workqueue from twice
258 1.34 knakahar * enqueuing when the machine uses both spontaneous
259 1.34 knakahar * packets and forwarding packets.
260 1.34 knakahar */
261 1.44 msaitoh enqueued = percpu_getref(adapter->txr_wq_enqueued);
262 1.34 knakahar if (*enqueued == 0) {
263 1.34 knakahar *enqueued = 1;
264 1.34 knakahar percpu_putref(adapter->txr_wq_enqueued);
265 1.44 msaitoh workqueue_enqueue(adapter->txr_wq,
266 1.44 msaitoh &txr->wq_cookie, curcpu());
267 1.34 knakahar } else
268 1.34 knakahar percpu_putref(adapter->txr_wq_enqueued);
269 1.56 knakahar } else {
270 1.56 knakahar kpreempt_disable();
271 1.34 knakahar softint_schedule(txr->txr_si);
272 1.56 knakahar kpreempt_enable();
273 1.56 knakahar }
274 1.34 knakahar }
275 1.1 msaitoh
276 1.1 msaitoh return (0);
277 1.28 msaitoh } /* ixgbe_mq_start */
278 1.1 msaitoh
279 1.28 msaitoh /************************************************************************
280 1.28 msaitoh * ixgbe_mq_start_locked
281 1.28 msaitoh ************************************************************************/
282 1.1 msaitoh int
283 1.1 msaitoh ixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr)
284 1.1 msaitoh {
285 1.28 msaitoh struct mbuf *next;
286 1.28 msaitoh int enqueued = 0, err = 0;
287 1.1 msaitoh
288 1.52 msaitoh if (txr->adapter->link_active != LINK_STATE_UP) {
289 1.38 knakahar /*
290 1.38 knakahar * discard all packets buffered in txr_interq to avoid
291 1.38 knakahar * sending old packets at next link up timing.
292 1.38 knakahar */
293 1.38 knakahar ixgbe_drain(ifp, txr);
294 1.38 knakahar return (ENETDOWN);
295 1.38 knakahar }
296 1.28 msaitoh if ((ifp->if_flags & IFF_RUNNING) == 0)
297 1.28 msaitoh return (ENETDOWN);
298 1.47 msaitoh if (txr->txr_no_space)
299 1.47 msaitoh return (ENETDOWN);
300 1.1 msaitoh
301 1.1 msaitoh /* Process the queue */
302 1.18 msaitoh while ((next = pcq_get(txr->txr_interq)) != NULL) {
303 1.18 msaitoh if ((err = ixgbe_xmit(txr, next)) != 0) {
304 1.18 msaitoh m_freem(next);
305 1.18 msaitoh /* All errors are counted in ixgbe_xmit() */
306 1.1 msaitoh break;
307 1.1 msaitoh }
308 1.1 msaitoh enqueued++;
309 1.3 msaitoh #if __FreeBSD_version >= 1100036
310 1.4 msaitoh /*
311 1.4 msaitoh * Since we're looking at the tx ring, we can check
312 1.4 msaitoh * to see if we're a VF by examing our tail register
313 1.4 msaitoh * address.
314 1.4 msaitoh */
315 1.28 msaitoh if ((txr->adapter->feat_en & IXGBE_FEATURE_VF) &&
316 1.28 msaitoh (next->m_flags & M_MCAST))
317 1.3 msaitoh if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
318 1.3 msaitoh #endif
319 1.1 msaitoh /* Send a copy of the frame to the BPF listener */
320 1.48 msaitoh bpf_mtap(ifp, next, BPF_D_OUT);
321 1.1 msaitoh if ((ifp->if_flags & IFF_RUNNING) == 0)
322 1.1 msaitoh break;
323 1.1 msaitoh }
324 1.1 msaitoh
325 1.28 msaitoh if (txr->tx_avail < IXGBE_TX_CLEANUP_THRESHOLD(txr->adapter))
326 1.1 msaitoh ixgbe_txeof(txr);
327 1.1 msaitoh
328 1.1 msaitoh return (err);
329 1.28 msaitoh } /* ixgbe_mq_start_locked */
330 1.1 msaitoh
331 1.28 msaitoh /************************************************************************
332 1.28 msaitoh * ixgbe_deferred_mq_start
333 1.28 msaitoh *
334 1.34 knakahar * Called from a softint and workqueue (indirectly) to drain queued
335 1.34 knakahar * transmit packets.
336 1.28 msaitoh ************************************************************************/
337 1.1 msaitoh void
338 1.18 msaitoh ixgbe_deferred_mq_start(void *arg)
339 1.1 msaitoh {
340 1.1 msaitoh struct tx_ring *txr = arg;
341 1.1 msaitoh struct adapter *adapter = txr->adapter;
342 1.28 msaitoh struct ifnet *ifp = adapter->ifp;
343 1.1 msaitoh
344 1.1 msaitoh IXGBE_TX_LOCK(txr);
345 1.18 msaitoh if (pcq_peek(txr->txr_interq) != NULL)
346 1.1 msaitoh ixgbe_mq_start_locked(ifp, txr);
347 1.1 msaitoh IXGBE_TX_UNLOCK(txr);
348 1.28 msaitoh } /* ixgbe_deferred_mq_start */
349 1.3 msaitoh
350 1.28 msaitoh /************************************************************************
351 1.34 knakahar * ixgbe_deferred_mq_start_work
352 1.34 knakahar *
353 1.34 knakahar * Called from a workqueue to drain queued transmit packets.
354 1.34 knakahar ************************************************************************/
355 1.34 knakahar void
356 1.34 knakahar ixgbe_deferred_mq_start_work(struct work *wk, void *arg)
357 1.34 knakahar {
358 1.34 knakahar struct tx_ring *txr = container_of(wk, struct tx_ring, wq_cookie);
359 1.34 knakahar struct adapter *adapter = txr->adapter;
360 1.34 knakahar u_int *enqueued = percpu_getref(adapter->txr_wq_enqueued);
361 1.34 knakahar *enqueued = 0;
362 1.34 knakahar percpu_putref(adapter->txr_wq_enqueued);
363 1.34 knakahar
364 1.34 knakahar ixgbe_deferred_mq_start(txr);
365 1.34 knakahar } /* ixgbe_deferred_mq_start */
366 1.34 knakahar
367 1.38 knakahar /************************************************************************
368 1.38 knakahar * ixgbe_drain_all
369 1.38 knakahar ************************************************************************/
370 1.38 knakahar void
371 1.38 knakahar ixgbe_drain_all(struct adapter *adapter)
372 1.38 knakahar {
373 1.38 knakahar struct ifnet *ifp = adapter->ifp;
374 1.38 knakahar struct ix_queue *que = adapter->queues;
375 1.38 knakahar
376 1.38 knakahar for (int i = 0; i < adapter->num_queues; i++, que++) {
377 1.38 knakahar struct tx_ring *txr = que->txr;
378 1.38 knakahar
379 1.38 knakahar IXGBE_TX_LOCK(txr);
380 1.38 knakahar ixgbe_drain(ifp, txr);
381 1.38 knakahar IXGBE_TX_UNLOCK(txr);
382 1.38 knakahar }
383 1.38 knakahar }
384 1.34 knakahar
385 1.34 knakahar /************************************************************************
386 1.28 msaitoh * ixgbe_xmit
387 1.1 msaitoh *
388 1.28 msaitoh * Maps the mbufs to tx descriptors, allowing the
389 1.28 msaitoh * TX engine to transmit the packets.
390 1.1 msaitoh *
391 1.28 msaitoh * Return 0 on success, positive on failure
392 1.28 msaitoh ************************************************************************/
393 1.1 msaitoh static int
394 1.1 msaitoh ixgbe_xmit(struct tx_ring *txr, struct mbuf *m_head)
395 1.1 msaitoh {
396 1.28 msaitoh struct adapter *adapter = txr->adapter;
397 1.28 msaitoh struct ixgbe_tx_buf *txbuf;
398 1.1 msaitoh union ixgbe_adv_tx_desc *txd = NULL;
399 1.28 msaitoh struct ifnet *ifp = adapter->ifp;
400 1.28 msaitoh int i, j, error;
401 1.28 msaitoh int first;
402 1.28 msaitoh u32 olinfo_status = 0, cmd_type_len;
403 1.28 msaitoh bool remap = TRUE;
404 1.28 msaitoh bus_dmamap_t map;
405 1.1 msaitoh
406 1.1 msaitoh /* Basic descriptor defines */
407 1.28 msaitoh cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
408 1.1 msaitoh IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
409 1.1 msaitoh
410 1.29 knakahar if (vlan_has_tag(m_head))
411 1.28 msaitoh cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
412 1.1 msaitoh
413 1.28 msaitoh /*
414 1.28 msaitoh * Important to capture the first descriptor
415 1.28 msaitoh * used because it will contain the index of
416 1.28 msaitoh * the one we tell the hardware to report back
417 1.28 msaitoh */
418 1.28 msaitoh first = txr->next_avail_desc;
419 1.1 msaitoh txbuf = &txr->tx_buffers[first];
420 1.1 msaitoh map = txbuf->map;
421 1.1 msaitoh
422 1.1 msaitoh /*
423 1.1 msaitoh * Map the packet for DMA.
424 1.1 msaitoh */
425 1.22 msaitoh retry:
426 1.28 msaitoh error = bus_dmamap_load_mbuf(txr->txtag->dt_dmat, map, m_head,
427 1.28 msaitoh BUS_DMA_NOWAIT);
428 1.1 msaitoh
429 1.1 msaitoh if (__predict_false(error)) {
430 1.22 msaitoh struct mbuf *m;
431 1.1 msaitoh
432 1.1 msaitoh switch (error) {
433 1.1 msaitoh case EAGAIN:
434 1.35 msaitoh txr->q_eagain_tx_dma_setup++;
435 1.1 msaitoh return EAGAIN;
436 1.1 msaitoh case ENOMEM:
437 1.35 msaitoh txr->q_enomem_tx_dma_setup++;
438 1.1 msaitoh return EAGAIN;
439 1.1 msaitoh case EFBIG:
440 1.22 msaitoh /* Try it again? - one try */
441 1.22 msaitoh if (remap == TRUE) {
442 1.22 msaitoh remap = FALSE;
443 1.22 msaitoh /*
444 1.22 msaitoh * XXX: m_defrag will choke on
445 1.22 msaitoh * non-MCLBYTES-sized clusters
446 1.22 msaitoh */
447 1.35 msaitoh txr->q_efbig_tx_dma_setup++;
448 1.22 msaitoh m = m_defrag(m_head, M_NOWAIT);
449 1.22 msaitoh if (m == NULL) {
450 1.35 msaitoh txr->q_mbuf_defrag_failed++;
451 1.22 msaitoh return ENOBUFS;
452 1.22 msaitoh }
453 1.22 msaitoh m_head = m;
454 1.22 msaitoh goto retry;
455 1.22 msaitoh } else {
456 1.35 msaitoh txr->q_efbig2_tx_dma_setup++;
457 1.22 msaitoh return error;
458 1.22 msaitoh }
459 1.1 msaitoh case EINVAL:
460 1.35 msaitoh txr->q_einval_tx_dma_setup++;
461 1.1 msaitoh return error;
462 1.1 msaitoh default:
463 1.35 msaitoh txr->q_other_tx_dma_setup++;
464 1.1 msaitoh return error;
465 1.1 msaitoh }
466 1.1 msaitoh }
467 1.1 msaitoh
468 1.1 msaitoh /* Make certain there are enough descriptors */
469 1.10 msaitoh if (txr->tx_avail < (map->dm_nsegs + 2)) {
470 1.47 msaitoh txr->txr_no_space = true;
471 1.1 msaitoh txr->no_desc_avail.ev_count++;
472 1.1 msaitoh ixgbe_dmamap_unload(txr->txtag, txbuf->map);
473 1.1 msaitoh return EAGAIN;
474 1.1 msaitoh }
475 1.1 msaitoh
476 1.1 msaitoh /*
477 1.4 msaitoh * Set up the appropriate offload context
478 1.4 msaitoh * this will consume the first descriptor
479 1.4 msaitoh */
480 1.1 msaitoh error = ixgbe_tx_ctx_setup(txr, m_head, &cmd_type_len, &olinfo_status);
481 1.1 msaitoh if (__predict_false(error)) {
482 1.1 msaitoh return (error);
483 1.1 msaitoh }
484 1.1 msaitoh
485 1.1 msaitoh /* Do the flow director magic */
486 1.28 msaitoh if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
487 1.28 msaitoh (txr->atr_sample) && (!adapter->fdir_reinit)) {
488 1.1 msaitoh ++txr->atr_count;
489 1.1 msaitoh if (txr->atr_count >= atr_sample_rate) {
490 1.1 msaitoh ixgbe_atr(txr, m_head);
491 1.1 msaitoh txr->atr_count = 0;
492 1.1 msaitoh }
493 1.1 msaitoh }
494 1.1 msaitoh
495 1.8 msaitoh olinfo_status |= IXGBE_ADVTXD_CC;
496 1.1 msaitoh i = txr->next_avail_desc;
497 1.1 msaitoh for (j = 0; j < map->dm_nsegs; j++) {
498 1.1 msaitoh bus_size_t seglen;
499 1.1 msaitoh bus_addr_t segaddr;
500 1.1 msaitoh
501 1.1 msaitoh txbuf = &txr->tx_buffers[i];
502 1.1 msaitoh txd = &txr->tx_base[i];
503 1.1 msaitoh seglen = map->dm_segs[j].ds_len;
504 1.1 msaitoh segaddr = htole64(map->dm_segs[j].ds_addr);
505 1.1 msaitoh
506 1.1 msaitoh txd->read.buffer_addr = segaddr;
507 1.40 msaitoh txd->read.cmd_type_len = htole32(cmd_type_len | seglen);
508 1.1 msaitoh txd->read.olinfo_status = htole32(olinfo_status);
509 1.1 msaitoh
510 1.1 msaitoh if (++i == txr->num_desc)
511 1.1 msaitoh i = 0;
512 1.1 msaitoh }
513 1.1 msaitoh
514 1.28 msaitoh txd->read.cmd_type_len |= htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
515 1.1 msaitoh txr->tx_avail -= map->dm_nsegs;
516 1.1 msaitoh txr->next_avail_desc = i;
517 1.1 msaitoh
518 1.1 msaitoh txbuf->m_head = m_head;
519 1.1 msaitoh /*
520 1.4 msaitoh * Here we swap the map so the last descriptor,
521 1.4 msaitoh * which gets the completion interrupt has the
522 1.4 msaitoh * real map, and the first descriptor gets the
523 1.4 msaitoh * unused map from this descriptor.
524 1.4 msaitoh */
525 1.1 msaitoh txr->tx_buffers[first].map = txbuf->map;
526 1.1 msaitoh txbuf->map = map;
527 1.1 msaitoh bus_dmamap_sync(txr->txtag->dt_dmat, map, 0, m_head->m_pkthdr.len,
528 1.1 msaitoh BUS_DMASYNC_PREWRITE);
529 1.1 msaitoh
530 1.28 msaitoh /* Set the EOP descriptor that will be marked done */
531 1.28 msaitoh txbuf = &txr->tx_buffers[first];
532 1.1 msaitoh txbuf->eop = txd;
533 1.1 msaitoh
534 1.28 msaitoh ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
535 1.1 msaitoh BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
536 1.1 msaitoh /*
537 1.1 msaitoh * Advance the Transmit Descriptor Tail (Tdt), this tells the
538 1.1 msaitoh * hardware that this frame is available to transmit.
539 1.1 msaitoh */
540 1.1 msaitoh ++txr->total_packets.ev_count;
541 1.3 msaitoh IXGBE_WRITE_REG(&adapter->hw, txr->tail, i);
542 1.3 msaitoh
543 1.61 thorpej net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
544 1.61 thorpej if_statadd_ref(nsr, if_obytes, m_head->m_pkthdr.len);
545 1.23 msaitoh if (m_head->m_flags & M_MCAST)
546 1.61 thorpej if_statinc_ref(nsr, if_omcasts);
547 1.61 thorpej IF_STAT_PUTREF(ifp);
548 1.23 msaitoh
549 1.45 msaitoh /* Mark queue as having work */
550 1.45 msaitoh if (txr->busy == 0)
551 1.45 msaitoh txr->busy = 1;
552 1.45 msaitoh
553 1.28 msaitoh return (0);
554 1.28 msaitoh } /* ixgbe_xmit */
555 1.1 msaitoh
556 1.38 knakahar /************************************************************************
557 1.38 knakahar * ixgbe_drain
558 1.38 knakahar ************************************************************************/
559 1.38 knakahar static void
560 1.38 knakahar ixgbe_drain(struct ifnet *ifp, struct tx_ring *txr)
561 1.38 knakahar {
562 1.38 knakahar struct mbuf *m;
563 1.38 knakahar
564 1.38 knakahar IXGBE_TX_LOCK_ASSERT(txr);
565 1.38 knakahar
566 1.38 knakahar if (txr->me == 0) {
567 1.38 knakahar while (!IFQ_IS_EMPTY(&ifp->if_snd)) {
568 1.38 knakahar IFQ_DEQUEUE(&ifp->if_snd, m);
569 1.38 knakahar m_freem(m);
570 1.38 knakahar IF_DROP(&ifp->if_snd);
571 1.38 knakahar }
572 1.38 knakahar }
573 1.38 knakahar
574 1.38 knakahar while ((m = pcq_get(txr->txr_interq)) != NULL) {
575 1.38 knakahar m_freem(m);
576 1.38 knakahar txr->pcq_drops.ev_count++;
577 1.38 knakahar }
578 1.38 knakahar }
579 1.16 msaitoh
580 1.28 msaitoh /************************************************************************
581 1.28 msaitoh * ixgbe_allocate_transmit_buffers
582 1.1 msaitoh *
583 1.28 msaitoh * Allocate memory for tx_buffer structures. The tx_buffer stores all
584 1.28 msaitoh * the information needed to transmit a packet on the wire. This is
585 1.28 msaitoh * called only once at attach, setup is done every reset.
586 1.28 msaitoh ************************************************************************/
587 1.28 msaitoh static int
588 1.1 msaitoh ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
589 1.1 msaitoh {
590 1.28 msaitoh struct adapter *adapter = txr->adapter;
591 1.28 msaitoh device_t dev = adapter->dev;
592 1.1 msaitoh struct ixgbe_tx_buf *txbuf;
593 1.28 msaitoh int error, i;
594 1.1 msaitoh
595 1.1 msaitoh /*
596 1.1 msaitoh * Setup DMA descriptor areas.
597 1.1 msaitoh */
598 1.28 msaitoh error = ixgbe_dma_tag_create(
599 1.28 msaitoh /* parent */ adapter->osdep.dmat,
600 1.28 msaitoh /* alignment */ 1,
601 1.28 msaitoh /* bounds */ 0,
602 1.28 msaitoh /* maxsize */ IXGBE_TSO_SIZE,
603 1.28 msaitoh /* nsegments */ adapter->num_segs,
604 1.28 msaitoh /* maxsegsize */ PAGE_SIZE,
605 1.28 msaitoh /* flags */ 0,
606 1.28 msaitoh &txr->txtag);
607 1.28 msaitoh if (error != 0) {
608 1.1 msaitoh aprint_error_dev(dev,"Unable to allocate TX DMA tag\n");
609 1.1 msaitoh goto fail;
610 1.1 msaitoh }
611 1.1 msaitoh
612 1.57 chs txr->tx_buffers = malloc(sizeof(struct ixgbe_tx_buf) *
613 1.57 chs adapter->num_tx_desc, M_DEVBUF, M_WAITOK | M_ZERO);
614 1.1 msaitoh
615 1.28 msaitoh /* Create the descriptor buffer dma maps */
616 1.1 msaitoh txbuf = txr->tx_buffers;
617 1.1 msaitoh for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
618 1.1 msaitoh error = ixgbe_dmamap_create(txr->txtag, 0, &txbuf->map);
619 1.1 msaitoh if (error != 0) {
620 1.1 msaitoh aprint_error_dev(dev,
621 1.1 msaitoh "Unable to create TX DMA map (%d)\n", error);
622 1.1 msaitoh goto fail;
623 1.1 msaitoh }
624 1.1 msaitoh }
625 1.1 msaitoh
626 1.1 msaitoh return 0;
627 1.1 msaitoh fail:
628 1.1 msaitoh /* We free all, it handles case where we are in the middle */
629 1.15 msaitoh #if 0 /* XXX was FreeBSD */
630 1.1 msaitoh ixgbe_free_transmit_structures(adapter);
631 1.15 msaitoh #else
632 1.15 msaitoh ixgbe_free_transmit_buffers(txr);
633 1.15 msaitoh #endif
634 1.1 msaitoh return (error);
635 1.28 msaitoh } /* ixgbe_allocate_transmit_buffers */
636 1.1 msaitoh
637 1.28 msaitoh /************************************************************************
638 1.28 msaitoh * ixgbe_setup_transmit_ring - Initialize a transmit ring.
639 1.28 msaitoh ************************************************************************/
640 1.1 msaitoh static void
641 1.1 msaitoh ixgbe_setup_transmit_ring(struct tx_ring *txr)
642 1.1 msaitoh {
643 1.28 msaitoh struct adapter *adapter = txr->adapter;
644 1.28 msaitoh struct ixgbe_tx_buf *txbuf;
645 1.1 msaitoh #ifdef DEV_NETMAP
646 1.1 msaitoh struct netmap_adapter *na = NA(adapter->ifp);
647 1.28 msaitoh struct netmap_slot *slot;
648 1.1 msaitoh #endif /* DEV_NETMAP */
649 1.1 msaitoh
650 1.1 msaitoh /* Clear the old ring contents */
651 1.1 msaitoh IXGBE_TX_LOCK(txr);
652 1.28 msaitoh
653 1.1 msaitoh #ifdef DEV_NETMAP
654 1.28 msaitoh if (adapter->feat_en & IXGBE_FEATURE_NETMAP) {
655 1.28 msaitoh /*
656 1.28 msaitoh * (under lock): if in netmap mode, do some consistency
657 1.28 msaitoh * checks and set slot to entry 0 of the netmap ring.
658 1.28 msaitoh */
659 1.28 msaitoh slot = netmap_reset(na, NR_TX, txr->me, 0);
660 1.28 msaitoh }
661 1.1 msaitoh #endif /* DEV_NETMAP */
662 1.28 msaitoh
663 1.1 msaitoh bzero((void *)txr->tx_base,
664 1.28 msaitoh (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
665 1.1 msaitoh /* Reset indices */
666 1.1 msaitoh txr->next_avail_desc = 0;
667 1.1 msaitoh txr->next_to_clean = 0;
668 1.1 msaitoh
669 1.1 msaitoh /* Free any existing tx buffers. */
670 1.28 msaitoh txbuf = txr->tx_buffers;
671 1.5 msaitoh for (int i = 0; i < txr->num_desc; i++, txbuf++) {
672 1.1 msaitoh if (txbuf->m_head != NULL) {
673 1.1 msaitoh bus_dmamap_sync(txr->txtag->dt_dmat, txbuf->map,
674 1.1 msaitoh 0, txbuf->m_head->m_pkthdr.len,
675 1.1 msaitoh BUS_DMASYNC_POSTWRITE);
676 1.1 msaitoh ixgbe_dmamap_unload(txr->txtag, txbuf->map);
677 1.1 msaitoh m_freem(txbuf->m_head);
678 1.1 msaitoh txbuf->m_head = NULL;
679 1.1 msaitoh }
680 1.28 msaitoh
681 1.1 msaitoh #ifdef DEV_NETMAP
682 1.1 msaitoh /*
683 1.1 msaitoh * In netmap mode, set the map for the packet buffer.
684 1.1 msaitoh * NOTE: Some drivers (not this one) also need to set
685 1.1 msaitoh * the physical buffer address in the NIC ring.
686 1.1 msaitoh * Slots in the netmap ring (indexed by "si") are
687 1.1 msaitoh * kring->nkr_hwofs positions "ahead" wrt the
688 1.1 msaitoh * corresponding slot in the NIC ring. In some drivers
689 1.1 msaitoh * (not here) nkr_hwofs can be negative. Function
690 1.1 msaitoh * netmap_idx_n2k() handles wraparounds properly.
691 1.1 msaitoh */
692 1.28 msaitoh if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && slot) {
693 1.53 msaitoh int si = netmap_idx_n2k(na->tx_rings[txr->me], i);
694 1.5 msaitoh netmap_load_map(na, txr->txtag,
695 1.5 msaitoh txbuf->map, NMB(na, slot + si));
696 1.1 msaitoh }
697 1.1 msaitoh #endif /* DEV_NETMAP */
698 1.28 msaitoh
699 1.1 msaitoh /* Clear the EOP descriptor pointer */
700 1.1 msaitoh txbuf->eop = NULL;
701 1.28 msaitoh }
702 1.1 msaitoh
703 1.1 msaitoh /* Set the rate at which we sample packets */
704 1.28 msaitoh if (adapter->feat_en & IXGBE_FEATURE_FDIR)
705 1.1 msaitoh txr->atr_sample = atr_sample_rate;
706 1.1 msaitoh
707 1.1 msaitoh /* Set number of descriptors available */
708 1.1 msaitoh txr->tx_avail = adapter->num_tx_desc;
709 1.1 msaitoh
710 1.1 msaitoh ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
711 1.1 msaitoh BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
712 1.1 msaitoh IXGBE_TX_UNLOCK(txr);
713 1.28 msaitoh } /* ixgbe_setup_transmit_ring */
714 1.1 msaitoh
715 1.28 msaitoh /************************************************************************
716 1.28 msaitoh * ixgbe_setup_transmit_structures - Initialize all transmit rings.
717 1.28 msaitoh ************************************************************************/
718 1.1 msaitoh int
719 1.1 msaitoh ixgbe_setup_transmit_structures(struct adapter *adapter)
720 1.1 msaitoh {
721 1.1 msaitoh struct tx_ring *txr = adapter->tx_rings;
722 1.1 msaitoh
723 1.1 msaitoh for (int i = 0; i < adapter->num_queues; i++, txr++)
724 1.1 msaitoh ixgbe_setup_transmit_ring(txr);
725 1.1 msaitoh
726 1.1 msaitoh return (0);
727 1.28 msaitoh } /* ixgbe_setup_transmit_structures */
728 1.1 msaitoh
729 1.28 msaitoh /************************************************************************
730 1.28 msaitoh * ixgbe_free_transmit_structures - Free all transmit rings.
731 1.28 msaitoh ************************************************************************/
732 1.1 msaitoh void
733 1.1 msaitoh ixgbe_free_transmit_structures(struct adapter *adapter)
734 1.1 msaitoh {
735 1.1 msaitoh struct tx_ring *txr = adapter->tx_rings;
736 1.1 msaitoh
737 1.1 msaitoh for (int i = 0; i < adapter->num_queues; i++, txr++) {
738 1.1 msaitoh ixgbe_free_transmit_buffers(txr);
739 1.1 msaitoh ixgbe_dma_free(adapter, &txr->txdma);
740 1.1 msaitoh IXGBE_TX_LOCK_DESTROY(txr);
741 1.1 msaitoh }
742 1.1 msaitoh free(adapter->tx_rings, M_DEVBUF);
743 1.28 msaitoh } /* ixgbe_free_transmit_structures */
744 1.1 msaitoh
745 1.28 msaitoh /************************************************************************
746 1.28 msaitoh * ixgbe_free_transmit_buffers
747 1.1 msaitoh *
748 1.28 msaitoh * Free transmit ring related data structures.
749 1.28 msaitoh ************************************************************************/
750 1.1 msaitoh static void
751 1.1 msaitoh ixgbe_free_transmit_buffers(struct tx_ring *txr)
752 1.1 msaitoh {
753 1.28 msaitoh struct adapter *adapter = txr->adapter;
754 1.1 msaitoh struct ixgbe_tx_buf *tx_buffer;
755 1.28 msaitoh int i;
756 1.1 msaitoh
757 1.14 msaitoh INIT_DEBUGOUT("ixgbe_free_transmit_buffers: begin");
758 1.1 msaitoh
759 1.1 msaitoh if (txr->tx_buffers == NULL)
760 1.1 msaitoh return;
761 1.1 msaitoh
762 1.1 msaitoh tx_buffer = txr->tx_buffers;
763 1.1 msaitoh for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
764 1.1 msaitoh if (tx_buffer->m_head != NULL) {
765 1.1 msaitoh bus_dmamap_sync(txr->txtag->dt_dmat, tx_buffer->map,
766 1.1 msaitoh 0, tx_buffer->m_head->m_pkthdr.len,
767 1.1 msaitoh BUS_DMASYNC_POSTWRITE);
768 1.1 msaitoh ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
769 1.1 msaitoh m_freem(tx_buffer->m_head);
770 1.1 msaitoh tx_buffer->m_head = NULL;
771 1.1 msaitoh if (tx_buffer->map != NULL) {
772 1.1 msaitoh ixgbe_dmamap_destroy(txr->txtag,
773 1.1 msaitoh tx_buffer->map);
774 1.1 msaitoh tx_buffer->map = NULL;
775 1.1 msaitoh }
776 1.1 msaitoh } else if (tx_buffer->map != NULL) {
777 1.1 msaitoh ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
778 1.1 msaitoh ixgbe_dmamap_destroy(txr->txtag, tx_buffer->map);
779 1.1 msaitoh tx_buffer->map = NULL;
780 1.1 msaitoh }
781 1.1 msaitoh }
782 1.18 msaitoh if (txr->txr_interq != NULL) {
783 1.18 msaitoh struct mbuf *m;
784 1.18 msaitoh
785 1.18 msaitoh while ((m = pcq_get(txr->txr_interq)) != NULL)
786 1.18 msaitoh m_freem(m);
787 1.18 msaitoh pcq_destroy(txr->txr_interq);
788 1.18 msaitoh }
789 1.1 msaitoh if (txr->tx_buffers != NULL) {
790 1.1 msaitoh free(txr->tx_buffers, M_DEVBUF);
791 1.1 msaitoh txr->tx_buffers = NULL;
792 1.1 msaitoh }
793 1.1 msaitoh if (txr->txtag != NULL) {
794 1.1 msaitoh ixgbe_dma_tag_destroy(txr->txtag);
795 1.1 msaitoh txr->txtag = NULL;
796 1.1 msaitoh }
797 1.28 msaitoh } /* ixgbe_free_transmit_buffers */
798 1.1 msaitoh
799 1.28 msaitoh /************************************************************************
800 1.28 msaitoh * ixgbe_tx_ctx_setup
801 1.1 msaitoh *
802 1.28 msaitoh * Advanced Context Descriptor setup for VLAN, CSUM or TSO
803 1.28 msaitoh ************************************************************************/
804 1.1 msaitoh static int
805 1.1 msaitoh ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp,
806 1.1 msaitoh u32 *cmd_type_len, u32 *olinfo_status)
807 1.1 msaitoh {
808 1.28 msaitoh struct adapter *adapter = txr->adapter;
809 1.1 msaitoh struct ixgbe_adv_tx_context_desc *TXD;
810 1.28 msaitoh struct ether_vlan_header *eh;
811 1.8 msaitoh #ifdef INET
812 1.28 msaitoh struct ip *ip;
813 1.8 msaitoh #endif
814 1.8 msaitoh #ifdef INET6
815 1.28 msaitoh struct ip6_hdr *ip6;
816 1.8 msaitoh #endif
817 1.28 msaitoh int ehdrlen, ip_hlen = 0;
818 1.28 msaitoh int offload = TRUE;
819 1.28 msaitoh int ctxd = txr->next_avail_desc;
820 1.28 msaitoh u32 vlan_macip_lens = 0;
821 1.28 msaitoh u32 type_tucmd_mlhl = 0;
822 1.28 msaitoh u16 vtag = 0;
823 1.28 msaitoh u16 etype;
824 1.28 msaitoh u8 ipproto = 0;
825 1.28 msaitoh char *l3d;
826 1.8 msaitoh
827 1.1 msaitoh
828 1.1 msaitoh /* First check if TSO is to be used */
829 1.28 msaitoh if (mp->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) {
830 1.17 msaitoh int rv = ixgbe_tso_setup(txr, mp, cmd_type_len, olinfo_status);
831 1.17 msaitoh
832 1.21 msaitoh if (rv != 0)
833 1.17 msaitoh ++adapter->tso_err.ev_count;
834 1.21 msaitoh return rv;
835 1.17 msaitoh }
836 1.1 msaitoh
837 1.1 msaitoh if ((mp->m_pkthdr.csum_flags & M_CSUM_OFFLOAD) == 0)
838 1.1 msaitoh offload = FALSE;
839 1.1 msaitoh
840 1.1 msaitoh /* Indicate the whole packet as payload when not doing TSO */
841 1.28 msaitoh *olinfo_status |= mp->m_pkthdr.len << IXGBE_ADVTXD_PAYLEN_SHIFT;
842 1.1 msaitoh
843 1.1 msaitoh /* Now ready a context descriptor */
844 1.28 msaitoh TXD = (struct ixgbe_adv_tx_context_desc *)&txr->tx_base[ctxd];
845 1.1 msaitoh
846 1.1 msaitoh /*
847 1.28 msaitoh * In advanced descriptors the vlan tag must
848 1.28 msaitoh * be placed into the context descriptor. Hence
849 1.28 msaitoh * we need to make one even if not doing offloads.
850 1.28 msaitoh */
851 1.29 knakahar if (vlan_has_tag(mp)) {
852 1.29 knakahar vtag = htole16(vlan_get_tag(mp));
853 1.1 msaitoh vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
854 1.28 msaitoh } else if (!(txr->adapter->feat_en & IXGBE_FEATURE_NEEDS_CTXD) &&
855 1.28 msaitoh (offload == FALSE))
856 1.4 msaitoh return (0);
857 1.1 msaitoh
858 1.1 msaitoh /*
859 1.1 msaitoh * Determine where frame payload starts.
860 1.1 msaitoh * Jump over vlan headers if already present,
861 1.1 msaitoh * helpful for QinQ too.
862 1.1 msaitoh */
863 1.1 msaitoh KASSERT(mp->m_len >= offsetof(struct ether_vlan_header, evl_tag));
864 1.1 msaitoh eh = mtod(mp, struct ether_vlan_header *);
865 1.1 msaitoh if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
866 1.1 msaitoh KASSERT(mp->m_len >= sizeof(struct ether_vlan_header));
867 1.1 msaitoh etype = ntohs(eh->evl_proto);
868 1.1 msaitoh ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
869 1.1 msaitoh } else {
870 1.1 msaitoh etype = ntohs(eh->evl_encap_proto);
871 1.1 msaitoh ehdrlen = ETHER_HDR_LEN;
872 1.1 msaitoh }
873 1.1 msaitoh
874 1.1 msaitoh /* Set the ether header length */
875 1.1 msaitoh vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
876 1.1 msaitoh
877 1.3 msaitoh if (offload == FALSE)
878 1.3 msaitoh goto no_offloads;
879 1.3 msaitoh
880 1.8 msaitoh /*
881 1.28 msaitoh * If the first mbuf only includes the ethernet header,
882 1.28 msaitoh * jump to the next one
883 1.28 msaitoh * XXX: This assumes the stack splits mbufs containing headers
884 1.28 msaitoh * on header boundaries
885 1.8 msaitoh * XXX: And assumes the entire IP header is contained in one mbuf
886 1.8 msaitoh */
887 1.8 msaitoh if (mp->m_len == ehdrlen && mp->m_next)
888 1.8 msaitoh l3d = mtod(mp->m_next, char *);
889 1.8 msaitoh else
890 1.8 msaitoh l3d = mtod(mp, char *) + ehdrlen;
891 1.8 msaitoh
892 1.1 msaitoh switch (etype) {
893 1.9 msaitoh #ifdef INET
894 1.1 msaitoh case ETHERTYPE_IP:
895 1.8 msaitoh ip = (struct ip *)(l3d);
896 1.8 msaitoh ip_hlen = ip->ip_hl << 2;
897 1.8 msaitoh ipproto = ip->ip_p;
898 1.8 msaitoh type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
899 1.1 msaitoh KASSERT((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) == 0 ||
900 1.8 msaitoh ip->ip_sum == 0);
901 1.1 msaitoh break;
902 1.9 msaitoh #endif
903 1.9 msaitoh #ifdef INET6
904 1.1 msaitoh case ETHERTYPE_IPV6:
905 1.8 msaitoh ip6 = (struct ip6_hdr *)(l3d);
906 1.8 msaitoh ip_hlen = sizeof(struct ip6_hdr);
907 1.8 msaitoh ipproto = ip6->ip6_nxt;
908 1.1 msaitoh type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
909 1.1 msaitoh break;
910 1.9 msaitoh #endif
911 1.1 msaitoh default:
912 1.11 msaitoh offload = false;
913 1.1 msaitoh break;
914 1.1 msaitoh }
915 1.1 msaitoh
916 1.1 msaitoh if ((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) != 0)
917 1.1 msaitoh *olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
918 1.1 msaitoh
919 1.1 msaitoh vlan_macip_lens |= ip_hlen;
920 1.1 msaitoh
921 1.8 msaitoh /* No support for offloads for non-L4 next headers */
922 1.63 msaitoh switch (ipproto) {
923 1.36 msaitoh case IPPROTO_TCP:
924 1.36 msaitoh if (mp->m_pkthdr.csum_flags &
925 1.36 msaitoh (M_CSUM_TCPv4 | M_CSUM_TCPv6))
926 1.36 msaitoh type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
927 1.36 msaitoh else
928 1.36 msaitoh offload = false;
929 1.36 msaitoh break;
930 1.36 msaitoh case IPPROTO_UDP:
931 1.36 msaitoh if (mp->m_pkthdr.csum_flags &
932 1.36 msaitoh (M_CSUM_UDPv4 | M_CSUM_UDPv6))
933 1.36 msaitoh type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
934 1.36 msaitoh else
935 1.11 msaitoh offload = false;
936 1.36 msaitoh break;
937 1.36 msaitoh default:
938 1.36 msaitoh offload = false;
939 1.36 msaitoh break;
940 1.8 msaitoh }
941 1.8 msaitoh
942 1.8 msaitoh if (offload) /* Insert L4 checksum into data descriptors */
943 1.1 msaitoh *olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
944 1.1 msaitoh
945 1.3 msaitoh no_offloads:
946 1.3 msaitoh type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
947 1.3 msaitoh
948 1.1 msaitoh /* Now copy bits into descriptor */
949 1.1 msaitoh TXD->vlan_macip_lens = htole32(vlan_macip_lens);
950 1.1 msaitoh TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
951 1.1 msaitoh TXD->seqnum_seed = htole32(0);
952 1.1 msaitoh TXD->mss_l4len_idx = htole32(0);
953 1.1 msaitoh
954 1.1 msaitoh /* We've consumed the first desc, adjust counters */
955 1.1 msaitoh if (++ctxd == txr->num_desc)
956 1.1 msaitoh ctxd = 0;
957 1.1 msaitoh txr->next_avail_desc = ctxd;
958 1.1 msaitoh --txr->tx_avail;
959 1.1 msaitoh
960 1.28 msaitoh return (0);
961 1.28 msaitoh } /* ixgbe_tx_ctx_setup */
962 1.1 msaitoh
963 1.28 msaitoh /************************************************************************
964 1.28 msaitoh * ixgbe_tso_setup
965 1.1 msaitoh *
966 1.28 msaitoh * Setup work for hardware segmentation offload (TSO) on
967 1.28 msaitoh * adapters using advanced tx descriptors
968 1.28 msaitoh ************************************************************************/
969 1.1 msaitoh static int
970 1.28 msaitoh ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *cmd_type_len,
971 1.28 msaitoh u32 *olinfo_status)
972 1.1 msaitoh {
973 1.1 msaitoh struct ixgbe_adv_tx_context_desc *TXD;
974 1.28 msaitoh struct ether_vlan_header *eh;
975 1.1 msaitoh #ifdef INET6
976 1.28 msaitoh struct ip6_hdr *ip6;
977 1.1 msaitoh #endif
978 1.1 msaitoh #ifdef INET
979 1.28 msaitoh struct ip *ip;
980 1.1 msaitoh #endif
981 1.28 msaitoh struct tcphdr *th;
982 1.28 msaitoh int ctxd, ehdrlen, ip_hlen, tcp_hlen;
983 1.28 msaitoh u32 vlan_macip_lens = 0;
984 1.28 msaitoh u32 type_tucmd_mlhl = 0;
985 1.28 msaitoh u32 mss_l4len_idx = 0, paylen;
986 1.28 msaitoh u16 vtag = 0, eh_type;
987 1.1 msaitoh
988 1.1 msaitoh /*
989 1.1 msaitoh * Determine where frame payload starts.
990 1.1 msaitoh * Jump over vlan headers if already present
991 1.1 msaitoh */
992 1.1 msaitoh eh = mtod(mp, struct ether_vlan_header *);
993 1.1 msaitoh if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
994 1.1 msaitoh ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
995 1.1 msaitoh eh_type = eh->evl_proto;
996 1.1 msaitoh } else {
997 1.1 msaitoh ehdrlen = ETHER_HDR_LEN;
998 1.1 msaitoh eh_type = eh->evl_encap_proto;
999 1.1 msaitoh }
1000 1.1 msaitoh
1001 1.1 msaitoh switch (ntohs(eh_type)) {
1002 1.1 msaitoh #ifdef INET
1003 1.1 msaitoh case ETHERTYPE_IP:
1004 1.1 msaitoh ip = (struct ip *)(mp->m_data + ehdrlen);
1005 1.1 msaitoh if (ip->ip_p != IPPROTO_TCP)
1006 1.1 msaitoh return (ENXIO);
1007 1.1 msaitoh ip->ip_sum = 0;
1008 1.1 msaitoh ip_hlen = ip->ip_hl << 2;
1009 1.1 msaitoh th = (struct tcphdr *)((char *)ip + ip_hlen);
1010 1.1 msaitoh th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
1011 1.1 msaitoh ip->ip_dst.s_addr, htons(IPPROTO_TCP));
1012 1.1 msaitoh type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
1013 1.1 msaitoh /* Tell transmit desc to also do IPv4 checksum. */
1014 1.1 msaitoh *olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
1015 1.1 msaitoh break;
1016 1.1 msaitoh #endif
1017 1.28 msaitoh #ifdef INET6
1018 1.28 msaitoh case ETHERTYPE_IPV6:
1019 1.28 msaitoh ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
1020 1.28 msaitoh /* XXX-BZ For now we do not pretend to support ext. hdrs. */
1021 1.28 msaitoh if (ip6->ip6_nxt != IPPROTO_TCP)
1022 1.28 msaitoh return (ENXIO);
1023 1.28 msaitoh ip_hlen = sizeof(struct ip6_hdr);
1024 1.28 msaitoh ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
1025 1.28 msaitoh th = (struct tcphdr *)((char *)ip6 + ip_hlen);
1026 1.28 msaitoh th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
1027 1.28 msaitoh &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
1028 1.28 msaitoh type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
1029 1.28 msaitoh break;
1030 1.28 msaitoh #endif
1031 1.1 msaitoh default:
1032 1.1 msaitoh panic("%s: CSUM_TSO but no supported IP version (0x%04x)",
1033 1.1 msaitoh __func__, ntohs(eh_type));
1034 1.1 msaitoh break;
1035 1.1 msaitoh }
1036 1.1 msaitoh
1037 1.1 msaitoh ctxd = txr->next_avail_desc;
1038 1.28 msaitoh TXD = (struct ixgbe_adv_tx_context_desc *)&txr->tx_base[ctxd];
1039 1.1 msaitoh
1040 1.1 msaitoh tcp_hlen = th->th_off << 2;
1041 1.1 msaitoh
1042 1.1 msaitoh /* This is used in the transmit desc in encap */
1043 1.1 msaitoh paylen = mp->m_pkthdr.len - ehdrlen - ip_hlen - tcp_hlen;
1044 1.1 msaitoh
1045 1.1 msaitoh /* VLAN MACLEN IPLEN */
1046 1.29 knakahar if (vlan_has_tag(mp)) {
1047 1.29 knakahar vtag = htole16(vlan_get_tag(mp));
1048 1.28 msaitoh vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
1049 1.1 msaitoh }
1050 1.1 msaitoh
1051 1.1 msaitoh vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
1052 1.1 msaitoh vlan_macip_lens |= ip_hlen;
1053 1.1 msaitoh TXD->vlan_macip_lens = htole32(vlan_macip_lens);
1054 1.1 msaitoh
1055 1.1 msaitoh /* ADV DTYPE TUCMD */
1056 1.1 msaitoh type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
1057 1.1 msaitoh type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
1058 1.1 msaitoh TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
1059 1.1 msaitoh
1060 1.1 msaitoh /* MSS L4LEN IDX */
1061 1.1 msaitoh mss_l4len_idx |= (mp->m_pkthdr.segsz << IXGBE_ADVTXD_MSS_SHIFT);
1062 1.1 msaitoh mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
1063 1.1 msaitoh TXD->mss_l4len_idx = htole32(mss_l4len_idx);
1064 1.1 msaitoh
1065 1.1 msaitoh TXD->seqnum_seed = htole32(0);
1066 1.1 msaitoh
1067 1.1 msaitoh if (++ctxd == txr->num_desc)
1068 1.1 msaitoh ctxd = 0;
1069 1.1 msaitoh
1070 1.1 msaitoh txr->tx_avail--;
1071 1.1 msaitoh txr->next_avail_desc = ctxd;
1072 1.1 msaitoh *cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
1073 1.1 msaitoh *olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1074 1.1 msaitoh *olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
1075 1.1 msaitoh ++txr->tso_tx.ev_count;
1076 1.28 msaitoh
1077 1.1 msaitoh return (0);
1078 1.28 msaitoh } /* ixgbe_tso_setup */
1079 1.1 msaitoh
1080 1.3 msaitoh
1081 1.28 msaitoh /************************************************************************
1082 1.28 msaitoh * ixgbe_txeof
1083 1.1 msaitoh *
1084 1.28 msaitoh * Examine each tx_buffer in the used queue. If the hardware is done
1085 1.28 msaitoh * processing the packet then free associated resources. The
1086 1.28 msaitoh * tx_buffer is put back on the free queue.
1087 1.28 msaitoh ************************************************************************/
1088 1.32 msaitoh bool
1089 1.1 msaitoh ixgbe_txeof(struct tx_ring *txr)
1090 1.1 msaitoh {
1091 1.1 msaitoh struct adapter *adapter = txr->adapter;
1092 1.1 msaitoh struct ifnet *ifp = adapter->ifp;
1093 1.28 msaitoh struct ixgbe_tx_buf *buf;
1094 1.28 msaitoh union ixgbe_adv_tx_desc *txd;
1095 1.1 msaitoh u32 work, processed = 0;
1096 1.7 msaitoh u32 limit = adapter->tx_process_limit;
1097 1.1 msaitoh
1098 1.1 msaitoh KASSERT(mutex_owned(&txr->tx_mtx));
1099 1.1 msaitoh
1100 1.1 msaitoh #ifdef DEV_NETMAP
1101 1.28 msaitoh if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
1102 1.28 msaitoh (adapter->ifp->if_capenable & IFCAP_NETMAP)) {
1103 1.28 msaitoh struct netmap_adapter *na = NA(adapter->ifp);
1104 1.53 msaitoh struct netmap_kring *kring = na->tx_rings[txr->me];
1105 1.1 msaitoh txd = txr->tx_base;
1106 1.1 msaitoh bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1107 1.1 msaitoh BUS_DMASYNC_POSTREAD);
1108 1.1 msaitoh /*
1109 1.1 msaitoh * In netmap mode, all the work is done in the context
1110 1.1 msaitoh * of the client thread. Interrupt handlers only wake up
1111 1.1 msaitoh * clients, which may be sleeping on individual rings
1112 1.1 msaitoh * or on a global resource for all rings.
1113 1.1 msaitoh * To implement tx interrupt mitigation, we wake up the client
1114 1.1 msaitoh * thread roughly every half ring, even if the NIC interrupts
1115 1.1 msaitoh * more frequently. This is implemented as follows:
1116 1.1 msaitoh * - ixgbe_txsync() sets kring->nr_kflags with the index of
1117 1.1 msaitoh * the slot that should wake up the thread (nkr_num_slots
1118 1.1 msaitoh * means the user thread should not be woken up);
1119 1.1 msaitoh * - the driver ignores tx interrupts unless netmap_mitigate=0
1120 1.1 msaitoh * or the slot has the DD bit set.
1121 1.1 msaitoh */
1122 1.53 msaitoh if (kring->nr_kflags < kring->nkr_num_slots &&
1123 1.53 msaitoh txd[kring->nr_kflags].wb.status & IXGBE_TXD_STAT_DD) {
1124 1.1 msaitoh netmap_tx_irq(ifp, txr->me);
1125 1.1 msaitoh }
1126 1.32 msaitoh return false;
1127 1.1 msaitoh }
1128 1.1 msaitoh #endif /* DEV_NETMAP */
1129 1.1 msaitoh
1130 1.1 msaitoh if (txr->tx_avail == txr->num_desc) {
1131 1.45 msaitoh txr->busy = 0;
1132 1.32 msaitoh return false;
1133 1.1 msaitoh }
1134 1.1 msaitoh
1135 1.1 msaitoh /* Get work starting point */
1136 1.1 msaitoh work = txr->next_to_clean;
1137 1.1 msaitoh buf = &txr->tx_buffers[work];
1138 1.1 msaitoh txd = &txr->tx_base[work];
1139 1.1 msaitoh work -= txr->num_desc; /* The distance to ring end */
1140 1.28 msaitoh ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1141 1.1 msaitoh BUS_DMASYNC_POSTREAD);
1142 1.8 msaitoh
1143 1.1 msaitoh do {
1144 1.8 msaitoh union ixgbe_adv_tx_desc *eop = buf->eop;
1145 1.1 msaitoh if (eop == NULL) /* No work */
1146 1.1 msaitoh break;
1147 1.1 msaitoh
1148 1.1 msaitoh if ((eop->wb.status & IXGBE_TXD_STAT_DD) == 0)
1149 1.1 msaitoh break; /* I/O not complete */
1150 1.1 msaitoh
1151 1.1 msaitoh if (buf->m_head) {
1152 1.28 msaitoh txr->bytes += buf->m_head->m_pkthdr.len;
1153 1.28 msaitoh bus_dmamap_sync(txr->txtag->dt_dmat, buf->map,
1154 1.1 msaitoh 0, buf->m_head->m_pkthdr.len,
1155 1.1 msaitoh BUS_DMASYNC_POSTWRITE);
1156 1.28 msaitoh ixgbe_dmamap_unload(txr->txtag, buf->map);
1157 1.1 msaitoh m_freem(buf->m_head);
1158 1.1 msaitoh buf->m_head = NULL;
1159 1.1 msaitoh }
1160 1.1 msaitoh buf->eop = NULL;
1161 1.47 msaitoh txr->txr_no_space = false;
1162 1.1 msaitoh ++txr->tx_avail;
1163 1.1 msaitoh
1164 1.1 msaitoh /* We clean the range if multi segment */
1165 1.1 msaitoh while (txd != eop) {
1166 1.1 msaitoh ++txd;
1167 1.1 msaitoh ++buf;
1168 1.1 msaitoh ++work;
1169 1.1 msaitoh /* wrap the ring? */
1170 1.1 msaitoh if (__predict_false(!work)) {
1171 1.1 msaitoh work -= txr->num_desc;
1172 1.1 msaitoh buf = txr->tx_buffers;
1173 1.1 msaitoh txd = txr->tx_base;
1174 1.1 msaitoh }
1175 1.1 msaitoh if (buf->m_head) {
1176 1.1 msaitoh txr->bytes +=
1177 1.1 msaitoh buf->m_head->m_pkthdr.len;
1178 1.1 msaitoh bus_dmamap_sync(txr->txtag->dt_dmat,
1179 1.1 msaitoh buf->map,
1180 1.1 msaitoh 0, buf->m_head->m_pkthdr.len,
1181 1.1 msaitoh BUS_DMASYNC_POSTWRITE);
1182 1.1 msaitoh ixgbe_dmamap_unload(txr->txtag,
1183 1.1 msaitoh buf->map);
1184 1.1 msaitoh m_freem(buf->m_head);
1185 1.1 msaitoh buf->m_head = NULL;
1186 1.1 msaitoh }
1187 1.1 msaitoh ++txr->tx_avail;
1188 1.1 msaitoh buf->eop = NULL;
1189 1.1 msaitoh
1190 1.1 msaitoh }
1191 1.1 msaitoh ++txr->packets;
1192 1.1 msaitoh ++processed;
1193 1.61 thorpej if_statinc(ifp, if_opackets);
1194 1.1 msaitoh
1195 1.1 msaitoh /* Try the next packet */
1196 1.1 msaitoh ++txd;
1197 1.1 msaitoh ++buf;
1198 1.1 msaitoh ++work;
1199 1.1 msaitoh /* reset with a wrap */
1200 1.1 msaitoh if (__predict_false(!work)) {
1201 1.1 msaitoh work -= txr->num_desc;
1202 1.1 msaitoh buf = txr->tx_buffers;
1203 1.1 msaitoh txd = txr->tx_base;
1204 1.1 msaitoh }
1205 1.1 msaitoh prefetch(txd);
1206 1.1 msaitoh } while (__predict_true(--limit));
1207 1.1 msaitoh
1208 1.1 msaitoh ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1209 1.1 msaitoh BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1210 1.1 msaitoh
1211 1.1 msaitoh work += txr->num_desc;
1212 1.1 msaitoh txr->next_to_clean = work;
1213 1.1 msaitoh
1214 1.45 msaitoh /*
1215 1.45 msaitoh * Queue Hang detection, we know there's
1216 1.45 msaitoh * work outstanding or the first return
1217 1.45 msaitoh * would have been taken, so increment busy
1218 1.45 msaitoh * if nothing managed to get cleaned, then
1219 1.45 msaitoh * in local_timer it will be checked and
1220 1.45 msaitoh * marked as HUNG if it exceeds a MAX attempt.
1221 1.45 msaitoh */
1222 1.45 msaitoh if ((processed == 0) && (txr->busy != IXGBE_QUEUE_HUNG))
1223 1.45 msaitoh ++txr->busy;
1224 1.45 msaitoh /*
1225 1.45 msaitoh * If anything gets cleaned we reset state to 1,
1226 1.45 msaitoh * note this will turn off HUNG if its set.
1227 1.45 msaitoh */
1228 1.45 msaitoh if (processed)
1229 1.45 msaitoh txr->busy = 1;
1230 1.45 msaitoh
1231 1.43 msaitoh if (txr->tx_avail == txr->num_desc)
1232 1.45 msaitoh txr->busy = 0;
1233 1.43 msaitoh
1234 1.32 msaitoh return ((limit > 0) ? false : true);
1235 1.28 msaitoh } /* ixgbe_txeof */
1236 1.1 msaitoh
1237 1.28 msaitoh /************************************************************************
1238 1.28 msaitoh * ixgbe_rsc_count
1239 1.28 msaitoh *
1240 1.28 msaitoh * Used to detect a descriptor that has been merged by Hardware RSC.
1241 1.28 msaitoh ************************************************************************/
1242 1.1 msaitoh static inline u32
1243 1.1 msaitoh ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
1244 1.1 msaitoh {
1245 1.1 msaitoh return (le32toh(rx->wb.lower.lo_dword.data) &
1246 1.1 msaitoh IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
1247 1.28 msaitoh } /* ixgbe_rsc_count */
1248 1.1 msaitoh
1249 1.28 msaitoh /************************************************************************
1250 1.28 msaitoh * ixgbe_setup_hw_rsc
1251 1.1 msaitoh *
1252 1.28 msaitoh * Initialize Hardware RSC (LRO) feature on 82599
1253 1.28 msaitoh * for an RX ring, this is toggled by the LRO capability
1254 1.28 msaitoh * even though it is transparent to the stack.
1255 1.28 msaitoh *
1256 1.28 msaitoh * NOTE: Since this HW feature only works with IPv4 and
1257 1.28 msaitoh * testing has shown soft LRO to be as effective,
1258 1.28 msaitoh * this feature will be disabled by default.
1259 1.28 msaitoh ************************************************************************/
1260 1.1 msaitoh static void
1261 1.1 msaitoh ixgbe_setup_hw_rsc(struct rx_ring *rxr)
1262 1.1 msaitoh {
1263 1.28 msaitoh struct adapter *adapter = rxr->adapter;
1264 1.28 msaitoh struct ixgbe_hw *hw = &adapter->hw;
1265 1.28 msaitoh u32 rscctrl, rdrxctl;
1266 1.1 msaitoh
1267 1.1 msaitoh /* If turning LRO/RSC off we need to disable it */
1268 1.1 msaitoh if ((adapter->ifp->if_capenable & IFCAP_LRO) == 0) {
1269 1.1 msaitoh rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
1270 1.1 msaitoh rscctrl &= ~IXGBE_RSCCTL_RSCEN;
1271 1.1 msaitoh return;
1272 1.1 msaitoh }
1273 1.1 msaitoh
1274 1.1 msaitoh rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1275 1.1 msaitoh rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
1276 1.28 msaitoh #ifdef DEV_NETMAP
1277 1.28 msaitoh /* Always strip CRC unless Netmap disabled it */
1278 1.28 msaitoh if (!(adapter->feat_en & IXGBE_FEATURE_NETMAP) ||
1279 1.28 msaitoh !(adapter->ifp->if_capenable & IFCAP_NETMAP) ||
1280 1.28 msaitoh ix_crcstrip)
1281 1.1 msaitoh #endif /* DEV_NETMAP */
1282 1.28 msaitoh rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
1283 1.1 msaitoh rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
1284 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
1285 1.1 msaitoh
1286 1.1 msaitoh rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
1287 1.1 msaitoh rscctrl |= IXGBE_RSCCTL_RSCEN;
1288 1.1 msaitoh /*
1289 1.28 msaitoh * Limit the total number of descriptors that
1290 1.28 msaitoh * can be combined, so it does not exceed 64K
1291 1.28 msaitoh */
1292 1.1 msaitoh if (rxr->mbuf_sz == MCLBYTES)
1293 1.1 msaitoh rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
1294 1.1 msaitoh else if (rxr->mbuf_sz == MJUMPAGESIZE)
1295 1.1 msaitoh rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
1296 1.1 msaitoh else if (rxr->mbuf_sz == MJUM9BYTES)
1297 1.1 msaitoh rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
1298 1.1 msaitoh else /* Using 16K cluster */
1299 1.1 msaitoh rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
1300 1.1 msaitoh
1301 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxr->me), rscctrl);
1302 1.1 msaitoh
1303 1.1 msaitoh /* Enable TCP header recognition */
1304 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0),
1305 1.28 msaitoh (IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)) | IXGBE_PSRTYPE_TCPHDR));
1306 1.1 msaitoh
1307 1.1 msaitoh /* Disable RSC for ACK packets */
1308 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
1309 1.1 msaitoh (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
1310 1.1 msaitoh
1311 1.1 msaitoh rxr->hw_rsc = TRUE;
1312 1.28 msaitoh } /* ixgbe_setup_hw_rsc */
1313 1.8 msaitoh
1314 1.28 msaitoh /************************************************************************
1315 1.28 msaitoh * ixgbe_refresh_mbufs
1316 1.1 msaitoh *
1317 1.28 msaitoh * Refresh mbuf buffers for RX descriptor rings
1318 1.28 msaitoh * - now keeps its own state so discards due to resource
1319 1.28 msaitoh * exhaustion are unnecessary, if an mbuf cannot be obtained
1320 1.28 msaitoh * it just returns, keeping its placeholder, thus it can simply
1321 1.28 msaitoh * be recalled to try again.
1322 1.65 msaitoh *
1323 1.65 msaitoh * XXX NetBSD TODO:
1324 1.65 msaitoh * - The ixgbe_rxeof() function always preallocates mbuf cluster (jcl),
1325 1.65 msaitoh * so the ixgbe_refresh_mbufs() function can be simplified.
1326 1.65 msaitoh *
1327 1.28 msaitoh ************************************************************************/
1328 1.1 msaitoh static void
1329 1.1 msaitoh ixgbe_refresh_mbufs(struct rx_ring *rxr, int limit)
1330 1.1 msaitoh {
1331 1.28 msaitoh struct adapter *adapter = rxr->adapter;
1332 1.28 msaitoh struct ixgbe_rx_buf *rxbuf;
1333 1.28 msaitoh struct mbuf *mp;
1334 1.28 msaitoh int i, j, error;
1335 1.28 msaitoh bool refreshed = false;
1336 1.1 msaitoh
1337 1.1 msaitoh i = j = rxr->next_to_refresh;
1338 1.1 msaitoh /* Control the loop with one beyond */
1339 1.1 msaitoh if (++j == rxr->num_desc)
1340 1.1 msaitoh j = 0;
1341 1.1 msaitoh
1342 1.1 msaitoh while (j != limit) {
1343 1.1 msaitoh rxbuf = &rxr->rx_buffers[i];
1344 1.1 msaitoh if (rxbuf->buf == NULL) {
1345 1.49 msaitoh mp = ixgbe_getjcl(&rxr->jcl_head, M_NOWAIT,
1346 1.1 msaitoh MT_DATA, M_PKTHDR, rxr->mbuf_sz);
1347 1.1 msaitoh if (mp == NULL) {
1348 1.1 msaitoh rxr->no_jmbuf.ev_count++;
1349 1.1 msaitoh goto update;
1350 1.1 msaitoh }
1351 1.1 msaitoh if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
1352 1.1 msaitoh m_adj(mp, ETHER_ALIGN);
1353 1.1 msaitoh } else
1354 1.1 msaitoh mp = rxbuf->buf;
1355 1.1 msaitoh
1356 1.1 msaitoh mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
1357 1.1 msaitoh
1358 1.1 msaitoh /* If we're dealing with an mbuf that was copied rather
1359 1.1 msaitoh * than replaced, there's no need to go through busdma.
1360 1.1 msaitoh */
1361 1.1 msaitoh if ((rxbuf->flags & IXGBE_RX_COPY) == 0) {
1362 1.1 msaitoh /* Get the memory mapping */
1363 1.4 msaitoh ixgbe_dmamap_unload(rxr->ptag, rxbuf->pmap);
1364 1.1 msaitoh error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat,
1365 1.1 msaitoh rxbuf->pmap, mp, BUS_DMA_NOWAIT);
1366 1.1 msaitoh if (error != 0) {
1367 1.55 msaitoh device_printf(adapter->dev, "Refresh mbufs: "
1368 1.55 msaitoh "payload dmamap load failure - %d\n",
1369 1.55 msaitoh error);
1370 1.1 msaitoh m_free(mp);
1371 1.1 msaitoh rxbuf->buf = NULL;
1372 1.1 msaitoh goto update;
1373 1.1 msaitoh }
1374 1.1 msaitoh rxbuf->buf = mp;
1375 1.1 msaitoh bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
1376 1.1 msaitoh 0, mp->m_pkthdr.len, BUS_DMASYNC_PREREAD);
1377 1.1 msaitoh rxbuf->addr = rxr->rx_base[i].read.pkt_addr =
1378 1.1 msaitoh htole64(rxbuf->pmap->dm_segs[0].ds_addr);
1379 1.1 msaitoh } else {
1380 1.1 msaitoh rxr->rx_base[i].read.pkt_addr = rxbuf->addr;
1381 1.1 msaitoh rxbuf->flags &= ~IXGBE_RX_COPY;
1382 1.1 msaitoh }
1383 1.1 msaitoh
1384 1.1 msaitoh refreshed = true;
1385 1.1 msaitoh /* Next is precalculated */
1386 1.1 msaitoh i = j;
1387 1.1 msaitoh rxr->next_to_refresh = i;
1388 1.1 msaitoh if (++j == rxr->num_desc)
1389 1.1 msaitoh j = 0;
1390 1.1 msaitoh }
1391 1.28 msaitoh
1392 1.1 msaitoh update:
1393 1.1 msaitoh if (refreshed) /* Update hardware tail index */
1394 1.28 msaitoh IXGBE_WRITE_REG(&adapter->hw, rxr->tail, rxr->next_to_refresh);
1395 1.28 msaitoh
1396 1.1 msaitoh return;
1397 1.28 msaitoh } /* ixgbe_refresh_mbufs */
1398 1.1 msaitoh
1399 1.28 msaitoh /************************************************************************
1400 1.28 msaitoh * ixgbe_allocate_receive_buffers
1401 1.1 msaitoh *
1402 1.28 msaitoh * Allocate memory for rx_buffer structures. Since we use one
1403 1.28 msaitoh * rx_buffer per received packet, the maximum number of rx_buffer's
1404 1.28 msaitoh * that we'll need is equal to the number of receive descriptors
1405 1.28 msaitoh * that we've allocated.
1406 1.28 msaitoh ************************************************************************/
1407 1.28 msaitoh static int
1408 1.1 msaitoh ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
1409 1.1 msaitoh {
1410 1.53 msaitoh struct adapter *adapter = rxr->adapter;
1411 1.28 msaitoh device_t dev = adapter->dev;
1412 1.28 msaitoh struct ixgbe_rx_buf *rxbuf;
1413 1.28 msaitoh int bsize, error;
1414 1.1 msaitoh
1415 1.1 msaitoh bsize = sizeof(struct ixgbe_rx_buf) * rxr->num_desc;
1416 1.57 chs rxr->rx_buffers = malloc(bsize, M_DEVBUF, M_WAITOK | M_ZERO);
1417 1.1 msaitoh
1418 1.28 msaitoh error = ixgbe_dma_tag_create(
1419 1.28 msaitoh /* parent */ adapter->osdep.dmat,
1420 1.28 msaitoh /* alignment */ 1,
1421 1.28 msaitoh /* bounds */ 0,
1422 1.28 msaitoh /* maxsize */ MJUM16BYTES,
1423 1.28 msaitoh /* nsegments */ 1,
1424 1.28 msaitoh /* maxsegsize */ MJUM16BYTES,
1425 1.28 msaitoh /* flags */ 0,
1426 1.28 msaitoh &rxr->ptag);
1427 1.28 msaitoh if (error != 0) {
1428 1.1 msaitoh aprint_error_dev(dev, "Unable to create RX DMA tag\n");
1429 1.1 msaitoh goto fail;
1430 1.1 msaitoh }
1431 1.1 msaitoh
1432 1.5 msaitoh for (int i = 0; i < rxr->num_desc; i++, rxbuf++) {
1433 1.1 msaitoh rxbuf = &rxr->rx_buffers[i];
1434 1.4 msaitoh error = ixgbe_dmamap_create(rxr->ptag, 0, &rxbuf->pmap);
1435 1.1 msaitoh if (error) {
1436 1.1 msaitoh aprint_error_dev(dev, "Unable to create RX dma map\n");
1437 1.1 msaitoh goto fail;
1438 1.1 msaitoh }
1439 1.1 msaitoh }
1440 1.1 msaitoh
1441 1.1 msaitoh return (0);
1442 1.1 msaitoh
1443 1.1 msaitoh fail:
1444 1.1 msaitoh /* Frees all, but can handle partial completion */
1445 1.1 msaitoh ixgbe_free_receive_structures(adapter);
1446 1.28 msaitoh
1447 1.1 msaitoh return (error);
1448 1.28 msaitoh } /* ixgbe_allocate_receive_buffers */
1449 1.1 msaitoh
1450 1.28 msaitoh /************************************************************************
1451 1.30 msaitoh * ixgbe_free_receive_ring
1452 1.28 msaitoh ************************************************************************/
1453 1.28 msaitoh static void
1454 1.1 msaitoh ixgbe_free_receive_ring(struct rx_ring *rxr)
1455 1.27 msaitoh {
1456 1.5 msaitoh for (int i = 0; i < rxr->num_desc; i++) {
1457 1.27 msaitoh ixgbe_rx_discard(rxr, i);
1458 1.1 msaitoh }
1459 1.28 msaitoh } /* ixgbe_free_receive_ring */
1460 1.1 msaitoh
1461 1.28 msaitoh /************************************************************************
1462 1.28 msaitoh * ixgbe_setup_receive_ring
1463 1.1 msaitoh *
1464 1.28 msaitoh * Initialize a receive ring and its buffers.
1465 1.28 msaitoh ************************************************************************/
1466 1.1 msaitoh static int
1467 1.1 msaitoh ixgbe_setup_receive_ring(struct rx_ring *rxr)
1468 1.1 msaitoh {
1469 1.28 msaitoh struct adapter *adapter;
1470 1.28 msaitoh struct ixgbe_rx_buf *rxbuf;
1471 1.1 msaitoh #ifdef LRO
1472 1.28 msaitoh struct ifnet *ifp;
1473 1.28 msaitoh struct lro_ctrl *lro = &rxr->lro;
1474 1.1 msaitoh #endif /* LRO */
1475 1.1 msaitoh #ifdef DEV_NETMAP
1476 1.1 msaitoh struct netmap_adapter *na = NA(rxr->adapter->ifp);
1477 1.28 msaitoh struct netmap_slot *slot;
1478 1.1 msaitoh #endif /* DEV_NETMAP */
1479 1.28 msaitoh int rsize, error = 0;
1480 1.1 msaitoh
1481 1.1 msaitoh adapter = rxr->adapter;
1482 1.1 msaitoh #ifdef LRO
1483 1.1 msaitoh ifp = adapter->ifp;
1484 1.1 msaitoh #endif /* LRO */
1485 1.1 msaitoh
1486 1.1 msaitoh /* Clear the ring contents */
1487 1.1 msaitoh IXGBE_RX_LOCK(rxr);
1488 1.28 msaitoh
1489 1.1 msaitoh #ifdef DEV_NETMAP
1490 1.28 msaitoh if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
1491 1.28 msaitoh slot = netmap_reset(na, NR_RX, rxr->me, 0);
1492 1.1 msaitoh #endif /* DEV_NETMAP */
1493 1.28 msaitoh
1494 1.1 msaitoh rsize = roundup2(adapter->num_rx_desc *
1495 1.1 msaitoh sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
1496 1.1 msaitoh bzero((void *)rxr->rx_base, rsize);
1497 1.1 msaitoh /* Cache the size */
1498 1.1 msaitoh rxr->mbuf_sz = adapter->rx_mbuf_sz;
1499 1.1 msaitoh
1500 1.1 msaitoh /* Free current RX buffer structs and their mbufs */
1501 1.1 msaitoh ixgbe_free_receive_ring(rxr);
1502 1.1 msaitoh
1503 1.49 msaitoh IXGBE_RX_UNLOCK(rxr);
1504 1.49 msaitoh /*
1505 1.49 msaitoh * Now reinitialize our supply of jumbo mbufs. The number
1506 1.49 msaitoh * or size of jumbo mbufs may have changed.
1507 1.49 msaitoh * Assume all of rxr->ptag are the same.
1508 1.49 msaitoh */
1509 1.49 msaitoh ixgbe_jcl_reinit(adapter, rxr->ptag->dt_dmat, rxr,
1510 1.49 msaitoh (2 * adapter->num_rx_desc), adapter->rx_mbuf_sz);
1511 1.49 msaitoh
1512 1.49 msaitoh IXGBE_RX_LOCK(rxr);
1513 1.49 msaitoh
1514 1.1 msaitoh /* Now replenish the mbufs */
1515 1.1 msaitoh for (int j = 0; j != rxr->num_desc; ++j) {
1516 1.28 msaitoh struct mbuf *mp;
1517 1.1 msaitoh
1518 1.1 msaitoh rxbuf = &rxr->rx_buffers[j];
1519 1.28 msaitoh
1520 1.1 msaitoh #ifdef DEV_NETMAP
1521 1.1 msaitoh /*
1522 1.1 msaitoh * In netmap mode, fill the map and set the buffer
1523 1.1 msaitoh * address in the NIC ring, considering the offset
1524 1.1 msaitoh * between the netmap and NIC rings (see comment in
1525 1.1 msaitoh * ixgbe_setup_transmit_ring() ). No need to allocate
1526 1.1 msaitoh * an mbuf, so end the block with a continue;
1527 1.1 msaitoh */
1528 1.28 msaitoh if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && slot) {
1529 1.53 msaitoh int sj = netmap_idx_n2k(na->rx_rings[rxr->me], j);
1530 1.1 msaitoh uint64_t paddr;
1531 1.1 msaitoh void *addr;
1532 1.1 msaitoh
1533 1.1 msaitoh addr = PNMB(na, slot + sj, &paddr);
1534 1.1 msaitoh netmap_load_map(na, rxr->ptag, rxbuf->pmap, addr);
1535 1.1 msaitoh /* Update descriptor and the cached value */
1536 1.1 msaitoh rxr->rx_base[j].read.pkt_addr = htole64(paddr);
1537 1.1 msaitoh rxbuf->addr = htole64(paddr);
1538 1.1 msaitoh continue;
1539 1.1 msaitoh }
1540 1.1 msaitoh #endif /* DEV_NETMAP */
1541 1.28 msaitoh
1542 1.28 msaitoh rxbuf->flags = 0;
1543 1.49 msaitoh rxbuf->buf = ixgbe_getjcl(&rxr->jcl_head, M_NOWAIT,
1544 1.1 msaitoh MT_DATA, M_PKTHDR, adapter->rx_mbuf_sz);
1545 1.1 msaitoh if (rxbuf->buf == NULL) {
1546 1.1 msaitoh error = ENOBUFS;
1547 1.28 msaitoh goto fail;
1548 1.1 msaitoh }
1549 1.1 msaitoh mp = rxbuf->buf;
1550 1.1 msaitoh mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
1551 1.1 msaitoh /* Get the memory mapping */
1552 1.28 msaitoh error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat, rxbuf->pmap,
1553 1.28 msaitoh mp, BUS_DMA_NOWAIT);
1554 1.1 msaitoh if (error != 0)
1555 1.1 msaitoh goto fail;
1556 1.1 msaitoh bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
1557 1.1 msaitoh 0, adapter->rx_mbuf_sz, BUS_DMASYNC_PREREAD);
1558 1.1 msaitoh /* Update the descriptor and the cached value */
1559 1.1 msaitoh rxr->rx_base[j].read.pkt_addr =
1560 1.1 msaitoh htole64(rxbuf->pmap->dm_segs[0].ds_addr);
1561 1.1 msaitoh rxbuf->addr = htole64(rxbuf->pmap->dm_segs[0].ds_addr);
1562 1.1 msaitoh }
1563 1.1 msaitoh
1564 1.1 msaitoh /* Setup our descriptor indices */
1565 1.1 msaitoh rxr->next_to_check = 0;
1566 1.1 msaitoh rxr->next_to_refresh = 0;
1567 1.1 msaitoh rxr->lro_enabled = FALSE;
1568 1.1 msaitoh rxr->rx_copies.ev_count = 0;
1569 1.13 msaitoh #if 0 /* NetBSD */
1570 1.1 msaitoh rxr->rx_bytes.ev_count = 0;
1571 1.13 msaitoh #if 1 /* Fix inconsistency */
1572 1.13 msaitoh rxr->rx_packets.ev_count = 0;
1573 1.13 msaitoh #endif
1574 1.13 msaitoh #endif
1575 1.1 msaitoh rxr->vtag_strip = FALSE;
1576 1.1 msaitoh
1577 1.1 msaitoh ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
1578 1.1 msaitoh BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1579 1.1 msaitoh
1580 1.1 msaitoh /*
1581 1.28 msaitoh * Now set up the LRO interface
1582 1.28 msaitoh */
1583 1.1 msaitoh if (ixgbe_rsc_enable)
1584 1.1 msaitoh ixgbe_setup_hw_rsc(rxr);
1585 1.1 msaitoh #ifdef LRO
1586 1.1 msaitoh else if (ifp->if_capenable & IFCAP_LRO) {
1587 1.1 msaitoh device_t dev = adapter->dev;
1588 1.1 msaitoh int err = tcp_lro_init(lro);
1589 1.1 msaitoh if (err) {
1590 1.1 msaitoh device_printf(dev, "LRO Initialization failed!\n");
1591 1.1 msaitoh goto fail;
1592 1.1 msaitoh }
1593 1.1 msaitoh INIT_DEBUGOUT("RX Soft LRO Initialized\n");
1594 1.1 msaitoh rxr->lro_enabled = TRUE;
1595 1.1 msaitoh lro->ifp = adapter->ifp;
1596 1.1 msaitoh }
1597 1.1 msaitoh #endif /* LRO */
1598 1.1 msaitoh
1599 1.1 msaitoh IXGBE_RX_UNLOCK(rxr);
1600 1.28 msaitoh
1601 1.1 msaitoh return (0);
1602 1.1 msaitoh
1603 1.1 msaitoh fail:
1604 1.1 msaitoh ixgbe_free_receive_ring(rxr);
1605 1.1 msaitoh IXGBE_RX_UNLOCK(rxr);
1606 1.28 msaitoh
1607 1.1 msaitoh return (error);
1608 1.28 msaitoh } /* ixgbe_setup_receive_ring */
1609 1.1 msaitoh
1610 1.28 msaitoh /************************************************************************
1611 1.28 msaitoh * ixgbe_setup_receive_structures - Initialize all receive rings.
1612 1.28 msaitoh ************************************************************************/
1613 1.1 msaitoh int
1614 1.1 msaitoh ixgbe_setup_receive_structures(struct adapter *adapter)
1615 1.1 msaitoh {
1616 1.1 msaitoh struct rx_ring *rxr = adapter->rx_rings;
1617 1.28 msaitoh int j;
1618 1.1 msaitoh
1619 1.62 msaitoh INIT_DEBUGOUT("ixgbe_setup_receive_structures");
1620 1.1 msaitoh for (j = 0; j < adapter->num_queues; j++, rxr++)
1621 1.1 msaitoh if (ixgbe_setup_receive_ring(rxr))
1622 1.1 msaitoh goto fail;
1623 1.1 msaitoh
1624 1.1 msaitoh return (0);
1625 1.1 msaitoh fail:
1626 1.1 msaitoh /*
1627 1.1 msaitoh * Free RX buffers allocated so far, we will only handle
1628 1.1 msaitoh * the rings that completed, the failing case will have
1629 1.1 msaitoh * cleaned up for itself. 'j' failed, so its the terminus.
1630 1.1 msaitoh */
1631 1.1 msaitoh for (int i = 0; i < j; ++i) {
1632 1.1 msaitoh rxr = &adapter->rx_rings[i];
1633 1.27 msaitoh IXGBE_RX_LOCK(rxr);
1634 1.1 msaitoh ixgbe_free_receive_ring(rxr);
1635 1.27 msaitoh IXGBE_RX_UNLOCK(rxr);
1636 1.1 msaitoh }
1637 1.1 msaitoh
1638 1.1 msaitoh return (ENOBUFS);
1639 1.28 msaitoh } /* ixgbe_setup_receive_structures */
1640 1.1 msaitoh
1641 1.3 msaitoh
1642 1.28 msaitoh /************************************************************************
1643 1.28 msaitoh * ixgbe_free_receive_structures - Free all receive rings.
1644 1.28 msaitoh ************************************************************************/
1645 1.1 msaitoh void
1646 1.1 msaitoh ixgbe_free_receive_structures(struct adapter *adapter)
1647 1.1 msaitoh {
1648 1.1 msaitoh struct rx_ring *rxr = adapter->rx_rings;
1649 1.1 msaitoh
1650 1.1 msaitoh INIT_DEBUGOUT("ixgbe_free_receive_structures: begin");
1651 1.1 msaitoh
1652 1.1 msaitoh for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1653 1.1 msaitoh ixgbe_free_receive_buffers(rxr);
1654 1.1 msaitoh #ifdef LRO
1655 1.1 msaitoh /* Free LRO memory */
1656 1.28 msaitoh tcp_lro_free(&rxr->lro);
1657 1.1 msaitoh #endif /* LRO */
1658 1.1 msaitoh /* Free the ring memory as well */
1659 1.1 msaitoh ixgbe_dma_free(adapter, &rxr->rxdma);
1660 1.1 msaitoh IXGBE_RX_LOCK_DESTROY(rxr);
1661 1.1 msaitoh }
1662 1.1 msaitoh
1663 1.1 msaitoh free(adapter->rx_rings, M_DEVBUF);
1664 1.28 msaitoh } /* ixgbe_free_receive_structures */
1665 1.1 msaitoh
1666 1.1 msaitoh
1667 1.28 msaitoh /************************************************************************
1668 1.28 msaitoh * ixgbe_free_receive_buffers - Free receive ring data structures
1669 1.28 msaitoh ************************************************************************/
1670 1.1 msaitoh static void
1671 1.1 msaitoh ixgbe_free_receive_buffers(struct rx_ring *rxr)
1672 1.1 msaitoh {
1673 1.28 msaitoh struct adapter *adapter = rxr->adapter;
1674 1.28 msaitoh struct ixgbe_rx_buf *rxbuf;
1675 1.1 msaitoh
1676 1.1 msaitoh INIT_DEBUGOUT("ixgbe_free_receive_buffers: begin");
1677 1.1 msaitoh
1678 1.1 msaitoh /* Cleanup any existing buffers */
1679 1.1 msaitoh if (rxr->rx_buffers != NULL) {
1680 1.1 msaitoh for (int i = 0; i < adapter->num_rx_desc; i++) {
1681 1.1 msaitoh rxbuf = &rxr->rx_buffers[i];
1682 1.27 msaitoh ixgbe_rx_discard(rxr, i);
1683 1.1 msaitoh if (rxbuf->pmap != NULL) {
1684 1.1 msaitoh ixgbe_dmamap_destroy(rxr->ptag, rxbuf->pmap);
1685 1.1 msaitoh rxbuf->pmap = NULL;
1686 1.1 msaitoh }
1687 1.1 msaitoh }
1688 1.59 msaitoh
1689 1.59 msaitoh /* NetBSD specific. See ixgbe_netbsd.c */
1690 1.59 msaitoh ixgbe_jcl_destroy(adapter, rxr);
1691 1.59 msaitoh
1692 1.1 msaitoh if (rxr->rx_buffers != NULL) {
1693 1.1 msaitoh free(rxr->rx_buffers, M_DEVBUF);
1694 1.1 msaitoh rxr->rx_buffers = NULL;
1695 1.1 msaitoh }
1696 1.1 msaitoh }
1697 1.1 msaitoh
1698 1.1 msaitoh if (rxr->ptag != NULL) {
1699 1.1 msaitoh ixgbe_dma_tag_destroy(rxr->ptag);
1700 1.1 msaitoh rxr->ptag = NULL;
1701 1.1 msaitoh }
1702 1.1 msaitoh
1703 1.1 msaitoh return;
1704 1.28 msaitoh } /* ixgbe_free_receive_buffers */
1705 1.1 msaitoh
1706 1.28 msaitoh /************************************************************************
1707 1.28 msaitoh * ixgbe_rx_input
1708 1.28 msaitoh ************************************************************************/
1709 1.1 msaitoh static __inline void
1710 1.28 msaitoh ixgbe_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m,
1711 1.28 msaitoh u32 ptype)
1712 1.1 msaitoh {
1713 1.20 msaitoh struct adapter *adapter = ifp->if_softc;
1714 1.1 msaitoh
1715 1.1 msaitoh #ifdef LRO
1716 1.1 msaitoh struct ethercom *ec = &adapter->osdep.ec;
1717 1.1 msaitoh
1718 1.28 msaitoh /*
1719 1.28 msaitoh * ATM LRO is only for IP/TCP packets and TCP checksum of the packet
1720 1.28 msaitoh * should be computed by hardware. Also it should not have VLAN tag in
1721 1.28 msaitoh * ethernet header. In case of IPv6 we do not yet support ext. hdrs.
1722 1.28 msaitoh */
1723 1.1 msaitoh if (rxr->lro_enabled &&
1724 1.1 msaitoh (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) != 0 &&
1725 1.1 msaitoh (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
1726 1.1 msaitoh ((ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
1727 1.1 msaitoh (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) ||
1728 1.1 msaitoh (ptype & (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
1729 1.1 msaitoh (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) &&
1730 1.1 msaitoh (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
1731 1.1 msaitoh (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
1732 1.1 msaitoh /*
1733 1.1 msaitoh * Send to the stack if:
1734 1.1 msaitoh ** - LRO not enabled, or
1735 1.1 msaitoh ** - no LRO resources, or
1736 1.1 msaitoh ** - lro enqueue fails
1737 1.1 msaitoh */
1738 1.1 msaitoh if (rxr->lro.lro_cnt != 0)
1739 1.1 msaitoh if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
1740 1.1 msaitoh return;
1741 1.1 msaitoh }
1742 1.1 msaitoh #endif /* LRO */
1743 1.1 msaitoh
1744 1.20 msaitoh if_percpuq_enqueue(adapter->ipq, m);
1745 1.28 msaitoh } /* ixgbe_rx_input */
1746 1.1 msaitoh
1747 1.28 msaitoh /************************************************************************
1748 1.28 msaitoh * ixgbe_rx_discard
1749 1.28 msaitoh ************************************************************************/
1750 1.1 msaitoh static __inline void
1751 1.1 msaitoh ixgbe_rx_discard(struct rx_ring *rxr, int i)
1752 1.1 msaitoh {
1753 1.28 msaitoh struct ixgbe_rx_buf *rbuf;
1754 1.1 msaitoh
1755 1.1 msaitoh rbuf = &rxr->rx_buffers[i];
1756 1.1 msaitoh
1757 1.1 msaitoh /*
1758 1.28 msaitoh * With advanced descriptors the writeback
1759 1.28 msaitoh * clobbers the buffer addrs, so its easier
1760 1.28 msaitoh * to just free the existing mbufs and take
1761 1.28 msaitoh * the normal refresh path to get new buffers
1762 1.28 msaitoh * and mapping.
1763 1.28 msaitoh */
1764 1.1 msaitoh
1765 1.26 msaitoh if (rbuf->fmp != NULL) {/* Partial chain ? */
1766 1.27 msaitoh bus_dmamap_sync(rxr->ptag->dt_dmat, rbuf->pmap, 0,
1767 1.27 msaitoh rbuf->buf->m_pkthdr.len, BUS_DMASYNC_POSTREAD);
1768 1.1 msaitoh m_freem(rbuf->fmp);
1769 1.1 msaitoh rbuf->fmp = NULL;
1770 1.1 msaitoh rbuf->buf = NULL; /* rbuf->buf is part of fmp's chain */
1771 1.1 msaitoh } else if (rbuf->buf) {
1772 1.27 msaitoh bus_dmamap_sync(rxr->ptag->dt_dmat, rbuf->pmap, 0,
1773 1.27 msaitoh rbuf->buf->m_pkthdr.len, BUS_DMASYNC_POSTREAD);
1774 1.1 msaitoh m_free(rbuf->buf);
1775 1.1 msaitoh rbuf->buf = NULL;
1776 1.1 msaitoh }
1777 1.4 msaitoh ixgbe_dmamap_unload(rxr->ptag, rbuf->pmap);
1778 1.1 msaitoh
1779 1.1 msaitoh rbuf->flags = 0;
1780 1.1 msaitoh
1781 1.1 msaitoh return;
1782 1.28 msaitoh } /* ixgbe_rx_discard */
1783 1.1 msaitoh
1784 1.1 msaitoh
1785 1.28 msaitoh /************************************************************************
1786 1.28 msaitoh * ixgbe_rxeof
1787 1.1 msaitoh *
1788 1.28 msaitoh * Executes in interrupt context. It replenishes the
1789 1.28 msaitoh * mbufs in the descriptor and sends data which has
1790 1.28 msaitoh * been dma'ed into host memory to upper layer.
1791 1.1 msaitoh *
1792 1.28 msaitoh * Return TRUE for more work, FALSE for all clean.
1793 1.28 msaitoh ************************************************************************/
1794 1.1 msaitoh bool
1795 1.1 msaitoh ixgbe_rxeof(struct ix_queue *que)
1796 1.1 msaitoh {
1797 1.1 msaitoh struct adapter *adapter = que->adapter;
1798 1.1 msaitoh struct rx_ring *rxr = que->rxr;
1799 1.1 msaitoh struct ifnet *ifp = adapter->ifp;
1800 1.1 msaitoh #ifdef LRO
1801 1.1 msaitoh struct lro_ctrl *lro = &rxr->lro;
1802 1.1 msaitoh #endif /* LRO */
1803 1.28 msaitoh union ixgbe_adv_rx_desc *cur;
1804 1.28 msaitoh struct ixgbe_rx_buf *rbuf, *nbuf;
1805 1.1 msaitoh int i, nextp, processed = 0;
1806 1.1 msaitoh u32 staterr = 0;
1807 1.65 msaitoh u32 count = 0;
1808 1.65 msaitoh u32 limit = adapter->rx_process_limit;
1809 1.65 msaitoh bool discard_multidesc = false;
1810 1.1 msaitoh #ifdef RSS
1811 1.1 msaitoh u16 pkt_info;
1812 1.1 msaitoh #endif
1813 1.1 msaitoh
1814 1.1 msaitoh IXGBE_RX_LOCK(rxr);
1815 1.1 msaitoh
1816 1.1 msaitoh #ifdef DEV_NETMAP
1817 1.28 msaitoh if (adapter->feat_en & IXGBE_FEATURE_NETMAP) {
1818 1.28 msaitoh /* Same as the txeof routine: wakeup clients on intr. */
1819 1.28 msaitoh if (netmap_rx_irq(ifp, rxr->me, &processed)) {
1820 1.28 msaitoh IXGBE_RX_UNLOCK(rxr);
1821 1.28 msaitoh return (FALSE);
1822 1.28 msaitoh }
1823 1.1 msaitoh }
1824 1.1 msaitoh #endif /* DEV_NETMAP */
1825 1.1 msaitoh
1826 1.65 msaitoh /*
1827 1.65 msaitoh * The max number of loop is rx_process_limit. If discard_multidesc is
1828 1.65 msaitoh * true, continue processing to not to send broken packet to the upper
1829 1.65 msaitoh * layer.
1830 1.65 msaitoh */
1831 1.65 msaitoh for (i = rxr->next_to_check;
1832 1.65 msaitoh (count < limit) || (discard_multidesc == true);) {
1833 1.65 msaitoh
1834 1.28 msaitoh struct mbuf *sendmp, *mp;
1835 1.64 knakahar struct mbuf *newmp;
1836 1.28 msaitoh u32 rsc, ptype;
1837 1.28 msaitoh u16 len;
1838 1.28 msaitoh u16 vtag = 0;
1839 1.28 msaitoh bool eop;
1840 1.53 msaitoh
1841 1.1 msaitoh /* Sync the ring. */
1842 1.1 msaitoh ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
1843 1.1 msaitoh BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1844 1.1 msaitoh
1845 1.1 msaitoh cur = &rxr->rx_base[i];
1846 1.1 msaitoh staterr = le32toh(cur->wb.upper.status_error);
1847 1.1 msaitoh #ifdef RSS
1848 1.1 msaitoh pkt_info = le16toh(cur->wb.lower.lo_dword.hs_rss.pkt_info);
1849 1.1 msaitoh #endif
1850 1.1 msaitoh
1851 1.1 msaitoh if ((staterr & IXGBE_RXD_STAT_DD) == 0)
1852 1.1 msaitoh break;
1853 1.1 msaitoh
1854 1.65 msaitoh count++;
1855 1.1 msaitoh sendmp = NULL;
1856 1.1 msaitoh nbuf = NULL;
1857 1.1 msaitoh rsc = 0;
1858 1.1 msaitoh cur->wb.upper.status_error = 0;
1859 1.1 msaitoh rbuf = &rxr->rx_buffers[i];
1860 1.1 msaitoh mp = rbuf->buf;
1861 1.1 msaitoh
1862 1.1 msaitoh len = le16toh(cur->wb.upper.length);
1863 1.1 msaitoh ptype = le32toh(cur->wb.lower.lo_dword.data) &
1864 1.1 msaitoh IXGBE_RXDADV_PKTTYPE_MASK;
1865 1.1 msaitoh eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
1866 1.1 msaitoh
1867 1.1 msaitoh /* Make sure bad packets are discarded */
1868 1.1 msaitoh if (eop && (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) {
1869 1.3 msaitoh #if __FreeBSD_version >= 1100036
1870 1.28 msaitoh if (adapter->feat_en & IXGBE_FEATURE_VF)
1871 1.4 msaitoh if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1872 1.3 msaitoh #endif
1873 1.1 msaitoh rxr->rx_discarded.ev_count++;
1874 1.1 msaitoh ixgbe_rx_discard(rxr, i);
1875 1.65 msaitoh discard_multidesc = false;
1876 1.1 msaitoh goto next_desc;
1877 1.1 msaitoh }
1878 1.1 msaitoh
1879 1.64 knakahar /* pre-alloc new mbuf */
1880 1.65 msaitoh if (!discard_multidesc)
1881 1.65 msaitoh newmp = ixgbe_getjcl(&rxr->jcl_head, M_NOWAIT, MT_DATA,
1882 1.65 msaitoh M_PKTHDR, rxr->mbuf_sz);
1883 1.65 msaitoh else
1884 1.65 msaitoh newmp = NULL;
1885 1.64 knakahar if (newmp == NULL) {
1886 1.64 knakahar rxr->rx_discarded.ev_count++;
1887 1.65 msaitoh /*
1888 1.65 msaitoh * Descriptor initialization is already done by the
1889 1.65 msaitoh * above code (cur->wb.upper.status_error = 0).
1890 1.65 msaitoh * So, we can reuse current rbuf->buf for new packet.
1891 1.65 msaitoh *
1892 1.65 msaitoh * Rewrite the buffer addr, see comment in
1893 1.65 msaitoh * ixgbe_rx_discard().
1894 1.65 msaitoh */
1895 1.65 msaitoh cur->read.pkt_addr = rbuf->addr;
1896 1.65 msaitoh m_freem(rbuf->fmp);
1897 1.65 msaitoh rbuf->fmp = NULL;
1898 1.65 msaitoh if (!eop) {
1899 1.65 msaitoh /* Discard the entire packet. */
1900 1.65 msaitoh discard_multidesc = true;
1901 1.65 msaitoh } else
1902 1.65 msaitoh discard_multidesc = false;
1903 1.64 knakahar goto next_desc;
1904 1.64 knakahar }
1905 1.65 msaitoh discard_multidesc = false;
1906 1.64 knakahar
1907 1.27 msaitoh bus_dmamap_sync(rxr->ptag->dt_dmat, rbuf->pmap, 0,
1908 1.27 msaitoh rbuf->buf->m_pkthdr.len, BUS_DMASYNC_POSTREAD);
1909 1.27 msaitoh
1910 1.1 msaitoh /*
1911 1.28 msaitoh * On 82599 which supports a hardware
1912 1.28 msaitoh * LRO (called HW RSC), packets need
1913 1.28 msaitoh * not be fragmented across sequential
1914 1.28 msaitoh * descriptors, rather the next descriptor
1915 1.28 msaitoh * is indicated in bits of the descriptor.
1916 1.28 msaitoh * This also means that we might proceses
1917 1.28 msaitoh * more than one packet at a time, something
1918 1.28 msaitoh * that has never been true before, it
1919 1.28 msaitoh * required eliminating global chain pointers
1920 1.28 msaitoh * in favor of what we are doing here. -jfv
1921 1.28 msaitoh */
1922 1.1 msaitoh if (!eop) {
1923 1.1 msaitoh /*
1924 1.28 msaitoh * Figure out the next descriptor
1925 1.28 msaitoh * of this frame.
1926 1.28 msaitoh */
1927 1.1 msaitoh if (rxr->hw_rsc == TRUE) {
1928 1.1 msaitoh rsc = ixgbe_rsc_count(cur);
1929 1.1 msaitoh rxr->rsc_num += (rsc - 1);
1930 1.1 msaitoh }
1931 1.1 msaitoh if (rsc) { /* Get hardware index */
1932 1.28 msaitoh nextp = ((staterr & IXGBE_RXDADV_NEXTP_MASK) >>
1933 1.1 msaitoh IXGBE_RXDADV_NEXTP_SHIFT);
1934 1.1 msaitoh } else { /* Just sequential */
1935 1.1 msaitoh nextp = i + 1;
1936 1.1 msaitoh if (nextp == adapter->num_rx_desc)
1937 1.1 msaitoh nextp = 0;
1938 1.1 msaitoh }
1939 1.1 msaitoh nbuf = &rxr->rx_buffers[nextp];
1940 1.1 msaitoh prefetch(nbuf);
1941 1.1 msaitoh }
1942 1.1 msaitoh /*
1943 1.28 msaitoh * Rather than using the fmp/lmp global pointers
1944 1.28 msaitoh * we now keep the head of a packet chain in the
1945 1.28 msaitoh * buffer struct and pass this along from one
1946 1.28 msaitoh * descriptor to the next, until we get EOP.
1947 1.28 msaitoh */
1948 1.1 msaitoh mp->m_len = len;
1949 1.1 msaitoh /*
1950 1.28 msaitoh * See if there is a stored head
1951 1.28 msaitoh * that determines what we are
1952 1.28 msaitoh */
1953 1.1 msaitoh sendmp = rbuf->fmp;
1954 1.1 msaitoh if (sendmp != NULL) { /* secondary frag */
1955 1.64 knakahar rbuf->buf = newmp;
1956 1.64 knakahar rbuf->fmp = NULL;
1957 1.1 msaitoh mp->m_flags &= ~M_PKTHDR;
1958 1.1 msaitoh sendmp->m_pkthdr.len += mp->m_len;
1959 1.1 msaitoh } else {
1960 1.1 msaitoh /*
1961 1.1 msaitoh * Optimize. This might be a small packet,
1962 1.1 msaitoh * maybe just a TCP ACK. Do a fast copy that
1963 1.1 msaitoh * is cache aligned into a new mbuf, and
1964 1.1 msaitoh * leave the old mbuf+cluster for re-use.
1965 1.1 msaitoh */
1966 1.1 msaitoh if (eop && len <= IXGBE_RX_COPY_LEN) {
1967 1.1 msaitoh sendmp = m_gethdr(M_NOWAIT, MT_DATA);
1968 1.1 msaitoh if (sendmp != NULL) {
1969 1.28 msaitoh sendmp->m_data += IXGBE_RX_COPY_ALIGN;
1970 1.28 msaitoh ixgbe_bcopy(mp->m_data, sendmp->m_data,
1971 1.28 msaitoh len);
1972 1.1 msaitoh sendmp->m_len = len;
1973 1.1 msaitoh rxr->rx_copies.ev_count++;
1974 1.1 msaitoh rbuf->flags |= IXGBE_RX_COPY;
1975 1.64 knakahar
1976 1.64 knakahar m_freem(newmp);
1977 1.1 msaitoh }
1978 1.1 msaitoh }
1979 1.1 msaitoh if (sendmp == NULL) {
1980 1.64 knakahar rbuf->buf = newmp;
1981 1.64 knakahar rbuf->fmp = NULL;
1982 1.1 msaitoh sendmp = mp;
1983 1.1 msaitoh }
1984 1.1 msaitoh
1985 1.1 msaitoh /* first desc of a non-ps chain */
1986 1.1 msaitoh sendmp->m_flags |= M_PKTHDR;
1987 1.1 msaitoh sendmp->m_pkthdr.len = mp->m_len;
1988 1.1 msaitoh }
1989 1.1 msaitoh ++processed;
1990 1.1 msaitoh
1991 1.1 msaitoh /* Pass the head pointer on */
1992 1.1 msaitoh if (eop == 0) {
1993 1.1 msaitoh nbuf->fmp = sendmp;
1994 1.1 msaitoh sendmp = NULL;
1995 1.1 msaitoh mp->m_next = nbuf->buf;
1996 1.1 msaitoh } else { /* Sending this frame */
1997 1.1 msaitoh m_set_rcvif(sendmp, ifp);
1998 1.31 msaitoh ++rxr->packets;
1999 1.1 msaitoh rxr->rx_packets.ev_count++;
2000 1.1 msaitoh /* capture data for AIM */
2001 1.1 msaitoh rxr->bytes += sendmp->m_pkthdr.len;
2002 1.1 msaitoh rxr->rx_bytes.ev_count += sendmp->m_pkthdr.len;
2003 1.1 msaitoh /* Process vlan info */
2004 1.28 msaitoh if ((rxr->vtag_strip) && (staterr & IXGBE_RXD_STAT_VP))
2005 1.1 msaitoh vtag = le16toh(cur->wb.upper.vlan);
2006 1.1 msaitoh if (vtag) {
2007 1.29 knakahar vlan_set_tag(sendmp, vtag);
2008 1.1 msaitoh }
2009 1.1 msaitoh if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
2010 1.1 msaitoh ixgbe_rx_checksum(staterr, sendmp, ptype,
2011 1.3 msaitoh &adapter->stats.pf);
2012 1.1 msaitoh }
2013 1.8 msaitoh
2014 1.6 msaitoh #if 0 /* FreeBSD */
2015 1.28 msaitoh /*
2016 1.28 msaitoh * In case of multiqueue, we have RXCSUM.PCSD bit set
2017 1.28 msaitoh * and never cleared. This means we have RSS hash
2018 1.28 msaitoh * available to be used.
2019 1.28 msaitoh */
2020 1.28 msaitoh if (adapter->num_queues > 1) {
2021 1.28 msaitoh sendmp->m_pkthdr.flowid =
2022 1.28 msaitoh le32toh(cur->wb.lower.hi_dword.rss);
2023 1.44 msaitoh switch (pkt_info & IXGBE_RXDADV_RSSTYPE_MASK) {
2024 1.44 msaitoh case IXGBE_RXDADV_RSSTYPE_IPV4:
2025 1.28 msaitoh M_HASHTYPE_SET(sendmp,
2026 1.28 msaitoh M_HASHTYPE_RSS_IPV4);
2027 1.28 msaitoh break;
2028 1.44 msaitoh case IXGBE_RXDADV_RSSTYPE_IPV4_TCP:
2029 1.28 msaitoh M_HASHTYPE_SET(sendmp,
2030 1.28 msaitoh M_HASHTYPE_RSS_TCP_IPV4);
2031 1.28 msaitoh break;
2032 1.44 msaitoh case IXGBE_RXDADV_RSSTYPE_IPV6:
2033 1.28 msaitoh M_HASHTYPE_SET(sendmp,
2034 1.28 msaitoh M_HASHTYPE_RSS_IPV6);
2035 1.28 msaitoh break;
2036 1.44 msaitoh case IXGBE_RXDADV_RSSTYPE_IPV6_TCP:
2037 1.28 msaitoh M_HASHTYPE_SET(sendmp,
2038 1.28 msaitoh M_HASHTYPE_RSS_TCP_IPV6);
2039 1.28 msaitoh break;
2040 1.44 msaitoh case IXGBE_RXDADV_RSSTYPE_IPV6_EX:
2041 1.28 msaitoh M_HASHTYPE_SET(sendmp,
2042 1.28 msaitoh M_HASHTYPE_RSS_IPV6_EX);
2043 1.28 msaitoh break;
2044 1.44 msaitoh case IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX:
2045 1.28 msaitoh M_HASHTYPE_SET(sendmp,
2046 1.28 msaitoh M_HASHTYPE_RSS_TCP_IPV6_EX);
2047 1.28 msaitoh break;
2048 1.6 msaitoh #if __FreeBSD_version > 1100000
2049 1.44 msaitoh case IXGBE_RXDADV_RSSTYPE_IPV4_UDP:
2050 1.28 msaitoh M_HASHTYPE_SET(sendmp,
2051 1.28 msaitoh M_HASHTYPE_RSS_UDP_IPV4);
2052 1.28 msaitoh break;
2053 1.44 msaitoh case IXGBE_RXDADV_RSSTYPE_IPV6_UDP:
2054 1.28 msaitoh M_HASHTYPE_SET(sendmp,
2055 1.28 msaitoh M_HASHTYPE_RSS_UDP_IPV6);
2056 1.28 msaitoh break;
2057 1.44 msaitoh case IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX:
2058 1.28 msaitoh M_HASHTYPE_SET(sendmp,
2059 1.28 msaitoh M_HASHTYPE_RSS_UDP_IPV6_EX);
2060 1.28 msaitoh break;
2061 1.28 msaitoh #endif
2062 1.44 msaitoh default:
2063 1.28 msaitoh M_HASHTYPE_SET(sendmp,
2064 1.28 msaitoh M_HASHTYPE_OPAQUE_HASH);
2065 1.28 msaitoh }
2066 1.28 msaitoh } else {
2067 1.28 msaitoh sendmp->m_pkthdr.flowid = que->msix;
2068 1.1 msaitoh M_HASHTYPE_SET(sendmp, M_HASHTYPE_OPAQUE);
2069 1.1 msaitoh }
2070 1.8 msaitoh #endif
2071 1.1 msaitoh }
2072 1.1 msaitoh next_desc:
2073 1.1 msaitoh ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2074 1.1 msaitoh BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2075 1.1 msaitoh
2076 1.1 msaitoh /* Advance our pointers to the next descriptor. */
2077 1.1 msaitoh if (++i == rxr->num_desc)
2078 1.1 msaitoh i = 0;
2079 1.1 msaitoh
2080 1.1 msaitoh /* Now send to the stack or do LRO */
2081 1.1 msaitoh if (sendmp != NULL) {
2082 1.1 msaitoh rxr->next_to_check = i;
2083 1.28 msaitoh IXGBE_RX_UNLOCK(rxr);
2084 1.1 msaitoh ixgbe_rx_input(rxr, ifp, sendmp, ptype);
2085 1.28 msaitoh IXGBE_RX_LOCK(rxr);
2086 1.1 msaitoh i = rxr->next_to_check;
2087 1.1 msaitoh }
2088 1.1 msaitoh
2089 1.28 msaitoh /* Every 8 descriptors we go to refresh mbufs */
2090 1.1 msaitoh if (processed == 8) {
2091 1.1 msaitoh ixgbe_refresh_mbufs(rxr, i);
2092 1.1 msaitoh processed = 0;
2093 1.1 msaitoh }
2094 1.1 msaitoh }
2095 1.1 msaitoh
2096 1.1 msaitoh /* Refresh any remaining buf structs */
2097 1.1 msaitoh if (ixgbe_rx_unrefreshed(rxr))
2098 1.1 msaitoh ixgbe_refresh_mbufs(rxr, i);
2099 1.1 msaitoh
2100 1.1 msaitoh rxr->next_to_check = i;
2101 1.1 msaitoh
2102 1.28 msaitoh IXGBE_RX_UNLOCK(rxr);
2103 1.28 msaitoh
2104 1.1 msaitoh #ifdef LRO
2105 1.1 msaitoh /*
2106 1.1 msaitoh * Flush any outstanding LRO work
2107 1.1 msaitoh */
2108 1.10 msaitoh tcp_lro_flush_all(lro);
2109 1.1 msaitoh #endif /* LRO */
2110 1.1 msaitoh
2111 1.1 msaitoh /*
2112 1.28 msaitoh * Still have cleaning to do?
2113 1.28 msaitoh */
2114 1.1 msaitoh if ((staterr & IXGBE_RXD_STAT_DD) != 0)
2115 1.28 msaitoh return (TRUE);
2116 1.28 msaitoh
2117 1.28 msaitoh return (FALSE);
2118 1.28 msaitoh } /* ixgbe_rxeof */
2119 1.1 msaitoh
2120 1.1 msaitoh
2121 1.28 msaitoh /************************************************************************
2122 1.28 msaitoh * ixgbe_rx_checksum
2123 1.1 msaitoh *
2124 1.28 msaitoh * Verify that the hardware indicated that the checksum is valid.
2125 1.28 msaitoh * Inform the stack about the status of checksum so that stack
2126 1.28 msaitoh * doesn't spend time verifying the checksum.
2127 1.28 msaitoh ************************************************************************/
2128 1.1 msaitoh static void
2129 1.1 msaitoh ixgbe_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype,
2130 1.1 msaitoh struct ixgbe_hw_stats *stats)
2131 1.1 msaitoh {
2132 1.28 msaitoh u16 status = (u16)staterr;
2133 1.28 msaitoh u8 errors = (u8)(staterr >> 24);
2134 1.1 msaitoh #if 0
2135 1.28 msaitoh bool sctp = false;
2136 1.1 msaitoh
2137 1.1 msaitoh if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
2138 1.1 msaitoh (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
2139 1.8 msaitoh sctp = true;
2140 1.1 msaitoh #endif
2141 1.1 msaitoh
2142 1.8 msaitoh /* IPv4 checksum */
2143 1.1 msaitoh if (status & IXGBE_RXD_STAT_IPCS) {
2144 1.1 msaitoh stats->ipcs.ev_count++;
2145 1.1 msaitoh if (!(errors & IXGBE_RXD_ERR_IPE)) {
2146 1.1 msaitoh /* IP Checksum Good */
2147 1.1 msaitoh mp->m_pkthdr.csum_flags = M_CSUM_IPv4;
2148 1.1 msaitoh } else {
2149 1.1 msaitoh stats->ipcs_bad.ev_count++;
2150 1.1 msaitoh mp->m_pkthdr.csum_flags = M_CSUM_IPv4|M_CSUM_IPv4_BAD;
2151 1.1 msaitoh }
2152 1.1 msaitoh }
2153 1.8 msaitoh /* TCP/UDP/SCTP checksum */
2154 1.1 msaitoh if (status & IXGBE_RXD_STAT_L4CS) {
2155 1.1 msaitoh stats->l4cs.ev_count++;
2156 1.1 msaitoh int type = M_CSUM_TCPv4|M_CSUM_TCPv6|M_CSUM_UDPv4|M_CSUM_UDPv6;
2157 1.1 msaitoh if (!(errors & IXGBE_RXD_ERR_TCPE)) {
2158 1.1 msaitoh mp->m_pkthdr.csum_flags |= type;
2159 1.1 msaitoh } else {
2160 1.1 msaitoh stats->l4cs_bad.ev_count++;
2161 1.1 msaitoh mp->m_pkthdr.csum_flags |= type | M_CSUM_TCP_UDP_BAD;
2162 1.1 msaitoh }
2163 1.1 msaitoh }
2164 1.28 msaitoh } /* ixgbe_rx_checksum */
2165 1.1 msaitoh
2166 1.28 msaitoh /************************************************************************
2167 1.28 msaitoh * ixgbe_dma_malloc
2168 1.28 msaitoh ************************************************************************/
2169 1.1 msaitoh int
2170 1.1 msaitoh ixgbe_dma_malloc(struct adapter *adapter, const bus_size_t size,
2171 1.1 msaitoh struct ixgbe_dma_alloc *dma, const int mapflags)
2172 1.1 msaitoh {
2173 1.1 msaitoh device_t dev = adapter->dev;
2174 1.28 msaitoh int r, rsegs;
2175 1.1 msaitoh
2176 1.28 msaitoh r = ixgbe_dma_tag_create(
2177 1.28 msaitoh /* parent */ adapter->osdep.dmat,
2178 1.28 msaitoh /* alignment */ DBA_ALIGN,
2179 1.28 msaitoh /* bounds */ 0,
2180 1.28 msaitoh /* maxsize */ size,
2181 1.28 msaitoh /* nsegments */ 1,
2182 1.28 msaitoh /* maxsegsize */ size,
2183 1.28 msaitoh /* flags */ BUS_DMA_ALLOCNOW,
2184 1.1 msaitoh &dma->dma_tag);
2185 1.1 msaitoh if (r != 0) {
2186 1.1 msaitoh aprint_error_dev(dev,
2187 1.44 msaitoh "%s: ixgbe_dma_tag_create failed; error %d\n", __func__,
2188 1.44 msaitoh r);
2189 1.1 msaitoh goto fail_0;
2190 1.1 msaitoh }
2191 1.1 msaitoh
2192 1.28 msaitoh r = bus_dmamem_alloc(dma->dma_tag->dt_dmat, size,
2193 1.28 msaitoh dma->dma_tag->dt_alignment, dma->dma_tag->dt_boundary,
2194 1.28 msaitoh &dma->dma_seg, 1, &rsegs, BUS_DMA_NOWAIT);
2195 1.1 msaitoh if (r != 0) {
2196 1.1 msaitoh aprint_error_dev(dev,
2197 1.1 msaitoh "%s: bus_dmamem_alloc failed; error %d\n", __func__, r);
2198 1.1 msaitoh goto fail_1;
2199 1.1 msaitoh }
2200 1.1 msaitoh
2201 1.1 msaitoh r = bus_dmamem_map(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs,
2202 1.1 msaitoh size, &dma->dma_vaddr, BUS_DMA_NOWAIT);
2203 1.1 msaitoh if (r != 0) {
2204 1.1 msaitoh aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n",
2205 1.1 msaitoh __func__, r);
2206 1.1 msaitoh goto fail_2;
2207 1.1 msaitoh }
2208 1.1 msaitoh
2209 1.1 msaitoh r = ixgbe_dmamap_create(dma->dma_tag, 0, &dma->dma_map);
2210 1.1 msaitoh if (r != 0) {
2211 1.1 msaitoh aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n",
2212 1.1 msaitoh __func__, r);
2213 1.1 msaitoh goto fail_3;
2214 1.1 msaitoh }
2215 1.1 msaitoh
2216 1.28 msaitoh r = bus_dmamap_load(dma->dma_tag->dt_dmat, dma->dma_map,
2217 1.28 msaitoh dma->dma_vaddr, size, NULL, mapflags | BUS_DMA_NOWAIT);
2218 1.1 msaitoh if (r != 0) {
2219 1.1 msaitoh aprint_error_dev(dev, "%s: bus_dmamap_load failed; error %d\n",
2220 1.1 msaitoh __func__, r);
2221 1.1 msaitoh goto fail_4;
2222 1.1 msaitoh }
2223 1.1 msaitoh dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr;
2224 1.1 msaitoh dma->dma_size = size;
2225 1.1 msaitoh return 0;
2226 1.1 msaitoh fail_4:
2227 1.1 msaitoh ixgbe_dmamap_destroy(dma->dma_tag, dma->dma_map);
2228 1.1 msaitoh fail_3:
2229 1.1 msaitoh bus_dmamem_unmap(dma->dma_tag->dt_dmat, dma->dma_vaddr, size);
2230 1.1 msaitoh fail_2:
2231 1.1 msaitoh bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs);
2232 1.1 msaitoh fail_1:
2233 1.1 msaitoh ixgbe_dma_tag_destroy(dma->dma_tag);
2234 1.1 msaitoh fail_0:
2235 1.1 msaitoh
2236 1.28 msaitoh return (r);
2237 1.28 msaitoh } /* ixgbe_dma_malloc */
2238 1.28 msaitoh
2239 1.28 msaitoh /************************************************************************
2240 1.28 msaitoh * ixgbe_dma_free
2241 1.28 msaitoh ************************************************************************/
2242 1.3 msaitoh void
2243 1.1 msaitoh ixgbe_dma_free(struct adapter *adapter, struct ixgbe_dma_alloc *dma)
2244 1.1 msaitoh {
2245 1.1 msaitoh bus_dmamap_sync(dma->dma_tag->dt_dmat, dma->dma_map, 0, dma->dma_size,
2246 1.1 msaitoh BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2247 1.1 msaitoh ixgbe_dmamap_unload(dma->dma_tag, dma->dma_map);
2248 1.1 msaitoh bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, 1);
2249 1.1 msaitoh ixgbe_dma_tag_destroy(dma->dma_tag);
2250 1.28 msaitoh } /* ixgbe_dma_free */
2251 1.1 msaitoh
2252 1.1 msaitoh
2253 1.28 msaitoh /************************************************************************
2254 1.28 msaitoh * ixgbe_allocate_queues
2255 1.1 msaitoh *
2256 1.28 msaitoh * Allocate memory for the transmit and receive rings, and then
2257 1.28 msaitoh * the descriptors associated with each, called only once at attach.
2258 1.28 msaitoh ************************************************************************/
2259 1.1 msaitoh int
2260 1.1 msaitoh ixgbe_allocate_queues(struct adapter *adapter)
2261 1.1 msaitoh {
2262 1.1 msaitoh device_t dev = adapter->dev;
2263 1.1 msaitoh struct ix_queue *que;
2264 1.1 msaitoh struct tx_ring *txr;
2265 1.1 msaitoh struct rx_ring *rxr;
2266 1.28 msaitoh int rsize, tsize, error = IXGBE_SUCCESS;
2267 1.28 msaitoh int txconf = 0, rxconf = 0;
2268 1.1 msaitoh
2269 1.28 msaitoh /* First, allocate the top level queue structs */
2270 1.28 msaitoh adapter->queues = (struct ix_queue *)malloc(sizeof(struct ix_queue) *
2271 1.63 msaitoh adapter->num_queues, M_DEVBUF, M_WAITOK | M_ZERO);
2272 1.1 msaitoh
2273 1.28 msaitoh /* Second, allocate the TX ring struct memory */
2274 1.57 chs adapter->tx_rings = malloc(sizeof(struct tx_ring) *
2275 1.57 chs adapter->num_queues, M_DEVBUF, M_WAITOK | M_ZERO);
2276 1.1 msaitoh
2277 1.28 msaitoh /* Third, allocate the RX ring */
2278 1.28 msaitoh adapter->rx_rings = (struct rx_ring *)malloc(sizeof(struct rx_ring) *
2279 1.57 chs adapter->num_queues, M_DEVBUF, M_WAITOK | M_ZERO);
2280 1.1 msaitoh
2281 1.1 msaitoh /* For the ring itself */
2282 1.28 msaitoh tsize = roundup2(adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc),
2283 1.28 msaitoh DBA_ALIGN);
2284 1.1 msaitoh
2285 1.1 msaitoh /*
2286 1.1 msaitoh * Now set up the TX queues, txconf is needed to handle the
2287 1.1 msaitoh * possibility that things fail midcourse and we need to
2288 1.1 msaitoh * undo memory gracefully
2289 1.28 msaitoh */
2290 1.1 msaitoh for (int i = 0; i < adapter->num_queues; i++, txconf++) {
2291 1.1 msaitoh /* Set up some basics */
2292 1.1 msaitoh txr = &adapter->tx_rings[i];
2293 1.1 msaitoh txr->adapter = adapter;
2294 1.28 msaitoh txr->txr_interq = NULL;
2295 1.28 msaitoh /* In case SR-IOV is enabled, align the index properly */
2296 1.5 msaitoh #ifdef PCI_IOV
2297 1.28 msaitoh txr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
2298 1.28 msaitoh i);
2299 1.5 msaitoh #else
2300 1.1 msaitoh txr->me = i;
2301 1.5 msaitoh #endif
2302 1.1 msaitoh txr->num_desc = adapter->num_tx_desc;
2303 1.1 msaitoh
2304 1.1 msaitoh /* Initialize the TX side lock */
2305 1.1 msaitoh mutex_init(&txr->tx_mtx, MUTEX_DEFAULT, IPL_NET);
2306 1.1 msaitoh
2307 1.28 msaitoh if (ixgbe_dma_malloc(adapter, tsize, &txr->txdma,
2308 1.28 msaitoh BUS_DMA_NOWAIT)) {
2309 1.1 msaitoh aprint_error_dev(dev,
2310 1.1 msaitoh "Unable to allocate TX Descriptor memory\n");
2311 1.1 msaitoh error = ENOMEM;
2312 1.1 msaitoh goto err_tx_desc;
2313 1.1 msaitoh }
2314 1.1 msaitoh txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
2315 1.1 msaitoh bzero((void *)txr->tx_base, tsize);
2316 1.1 msaitoh
2317 1.28 msaitoh /* Now allocate transmit buffers for the ring */
2318 1.28 msaitoh if (ixgbe_allocate_transmit_buffers(txr)) {
2319 1.1 msaitoh aprint_error_dev(dev,
2320 1.1 msaitoh "Critical Failure setting up transmit buffers\n");
2321 1.1 msaitoh error = ENOMEM;
2322 1.1 msaitoh goto err_tx_desc;
2323 1.63 msaitoh }
2324 1.28 msaitoh if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
2325 1.28 msaitoh /* Allocate a buf ring */
2326 1.28 msaitoh txr->txr_interq = pcq_create(IXGBE_BR_SIZE, KM_SLEEP);
2327 1.28 msaitoh if (txr->txr_interq == NULL) {
2328 1.28 msaitoh aprint_error_dev(dev,
2329 1.28 msaitoh "Critical Failure setting up buf ring\n");
2330 1.28 msaitoh error = ENOMEM;
2331 1.28 msaitoh goto err_tx_desc;
2332 1.28 msaitoh }
2333 1.28 msaitoh }
2334 1.1 msaitoh }
2335 1.1 msaitoh
2336 1.1 msaitoh /*
2337 1.1 msaitoh * Next the RX queues...
2338 1.53 msaitoh */
2339 1.28 msaitoh rsize = roundup2(adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc),
2340 1.28 msaitoh DBA_ALIGN);
2341 1.1 msaitoh for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
2342 1.1 msaitoh rxr = &adapter->rx_rings[i];
2343 1.1 msaitoh /* Set up some basics */
2344 1.1 msaitoh rxr->adapter = adapter;
2345 1.5 msaitoh #ifdef PCI_IOV
2346 1.28 msaitoh /* In case SR-IOV is enabled, align the index properly */
2347 1.28 msaitoh rxr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
2348 1.28 msaitoh i);
2349 1.5 msaitoh #else
2350 1.1 msaitoh rxr->me = i;
2351 1.5 msaitoh #endif
2352 1.1 msaitoh rxr->num_desc = adapter->num_rx_desc;
2353 1.1 msaitoh
2354 1.1 msaitoh /* Initialize the RX side lock */
2355 1.1 msaitoh mutex_init(&rxr->rx_mtx, MUTEX_DEFAULT, IPL_NET);
2356 1.1 msaitoh
2357 1.28 msaitoh if (ixgbe_dma_malloc(adapter, rsize, &rxr->rxdma,
2358 1.28 msaitoh BUS_DMA_NOWAIT)) {
2359 1.1 msaitoh aprint_error_dev(dev,
2360 1.1 msaitoh "Unable to allocate RxDescriptor memory\n");
2361 1.1 msaitoh error = ENOMEM;
2362 1.1 msaitoh goto err_rx_desc;
2363 1.1 msaitoh }
2364 1.1 msaitoh rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
2365 1.1 msaitoh bzero((void *)rxr->rx_base, rsize);
2366 1.1 msaitoh
2367 1.28 msaitoh /* Allocate receive buffers for the ring */
2368 1.1 msaitoh if (ixgbe_allocate_receive_buffers(rxr)) {
2369 1.1 msaitoh aprint_error_dev(dev,
2370 1.1 msaitoh "Critical Failure setting up receive buffers\n");
2371 1.1 msaitoh error = ENOMEM;
2372 1.1 msaitoh goto err_rx_desc;
2373 1.1 msaitoh }
2374 1.1 msaitoh }
2375 1.1 msaitoh
2376 1.1 msaitoh /*
2377 1.28 msaitoh * Finally set up the queue holding structs
2378 1.28 msaitoh */
2379 1.1 msaitoh for (int i = 0; i < adapter->num_queues; i++) {
2380 1.1 msaitoh que = &adapter->queues[i];
2381 1.1 msaitoh que->adapter = adapter;
2382 1.3 msaitoh que->me = i;
2383 1.1 msaitoh que->txr = &adapter->tx_rings[i];
2384 1.1 msaitoh que->rxr = &adapter->rx_rings[i];
2385 1.33 knakahar
2386 1.37 knakahar mutex_init(&que->dc_mtx, MUTEX_DEFAULT, IPL_NET);
2387 1.37 knakahar que->disabled_count = 0;
2388 1.1 msaitoh }
2389 1.1 msaitoh
2390 1.1 msaitoh return (0);
2391 1.1 msaitoh
2392 1.1 msaitoh err_rx_desc:
2393 1.1 msaitoh for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
2394 1.1 msaitoh ixgbe_dma_free(adapter, &rxr->rxdma);
2395 1.1 msaitoh err_tx_desc:
2396 1.1 msaitoh for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
2397 1.1 msaitoh ixgbe_dma_free(adapter, &txr->txdma);
2398 1.1 msaitoh free(adapter->rx_rings, M_DEVBUF);
2399 1.1 msaitoh free(adapter->tx_rings, M_DEVBUF);
2400 1.1 msaitoh free(adapter->queues, M_DEVBUF);
2401 1.1 msaitoh return (error);
2402 1.28 msaitoh } /* ixgbe_allocate_queues */
2403 1.60 msaitoh
2404 1.60 msaitoh /************************************************************************
2405 1.60 msaitoh * ixgbe_free_queues
2406 1.60 msaitoh *
2407 1.60 msaitoh * Free descriptors for the transmit and receive rings, and then
2408 1.60 msaitoh * the memory associated with each.
2409 1.60 msaitoh ************************************************************************/
2410 1.60 msaitoh void
2411 1.60 msaitoh ixgbe_free_queues(struct adapter *adapter)
2412 1.60 msaitoh {
2413 1.60 msaitoh struct ix_queue *que;
2414 1.60 msaitoh int i;
2415 1.60 msaitoh
2416 1.60 msaitoh ixgbe_free_transmit_structures(adapter);
2417 1.60 msaitoh ixgbe_free_receive_structures(adapter);
2418 1.60 msaitoh for (i = 0; i < adapter->num_queues; i++) {
2419 1.60 msaitoh que = &adapter->queues[i];
2420 1.60 msaitoh mutex_destroy(&que->dc_mtx);
2421 1.60 msaitoh }
2422 1.60 msaitoh free(adapter->queues, M_DEVBUF);
2423 1.60 msaitoh } /* ixgbe_free_queues */
2424