if_ena.c revision 1.20 1 /*-
2 * BSD LICENSE
3 *
4 * Copyright (c) 2015-2017 Amazon.com, Inc. or its affiliates.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 *
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30 #include <sys/cdefs.h>
31 #if 0
32 __FBSDID("$FreeBSD: head/sys/dev/ena/ena.c 333456 2018-05-10 09:37:54Z mw $");
33 #endif
34 __KERNEL_RCSID(0, "$NetBSD: if_ena.c,v 1.20 2020/02/01 02:32:40 riastradh Exp $");
35
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/bus.h>
39 #include <sys/endian.h>
40 #include <sys/kernel.h>
41 #include <sys/kthread.h>
42 #include <sys/malloc.h>
43 #include <sys/mbuf.h>
44 #include <sys/module.h>
45 #include <sys/socket.h>
46 #include <sys/sockio.h>
47 #include <sys/sysctl.h>
48 #include <sys/time.h>
49 #include <sys/workqueue.h>
50 #include <sys/callout.h>
51 #include <sys/interrupt.h>
52 #include <sys/cpu.h>
53
54 #include <net/if_ether.h>
55 #include <net/if_vlanvar.h>
56
57 #include <dev/pci/if_enavar.h>
58
59 /*********************************************************
60 * Function prototypes
61 *********************************************************/
62 static int ena_probe(device_t, cfdata_t, void *);
63 static int ena_intr_msix_mgmnt(void *);
64 static int ena_allocate_pci_resources(struct pci_attach_args *,
65 struct ena_adapter *);
66 static void ena_free_pci_resources(struct ena_adapter *);
67 static int ena_change_mtu(struct ifnet *, int);
68 static void ena_init_io_rings_common(struct ena_adapter *,
69 struct ena_ring *, uint16_t);
70 static void ena_init_io_rings(struct ena_adapter *);
71 static void ena_free_io_ring_resources(struct ena_adapter *, unsigned int);
72 static void ena_free_all_io_rings_resources(struct ena_adapter *);
73 #if 0
74 static int ena_setup_tx_dma_tag(struct ena_adapter *);
75 static int ena_free_tx_dma_tag(struct ena_adapter *);
76 static int ena_setup_rx_dma_tag(struct ena_adapter *);
77 static int ena_free_rx_dma_tag(struct ena_adapter *);
78 #endif
79 static int ena_setup_tx_resources(struct ena_adapter *, int);
80 static void ena_free_tx_resources(struct ena_adapter *, int);
81 static int ena_setup_all_tx_resources(struct ena_adapter *);
82 static void ena_free_all_tx_resources(struct ena_adapter *);
83 static inline int validate_rx_req_id(struct ena_ring *, uint16_t);
84 static int ena_setup_rx_resources(struct ena_adapter *, unsigned int);
85 static void ena_free_rx_resources(struct ena_adapter *, unsigned int);
86 static int ena_setup_all_rx_resources(struct ena_adapter *);
87 static void ena_free_all_rx_resources(struct ena_adapter *);
88 static inline int ena_alloc_rx_mbuf(struct ena_adapter *, struct ena_ring *,
89 struct ena_rx_buffer *);
90 static void ena_free_rx_mbuf(struct ena_adapter *, struct ena_ring *,
91 struct ena_rx_buffer *);
92 static int ena_refill_rx_bufs(struct ena_ring *, uint32_t);
93 static void ena_free_rx_bufs(struct ena_adapter *, unsigned int);
94 static void ena_refill_all_rx_bufs(struct ena_adapter *);
95 static void ena_free_all_rx_bufs(struct ena_adapter *);
96 static void ena_free_tx_bufs(struct ena_adapter *, unsigned int);
97 static void ena_free_all_tx_bufs(struct ena_adapter *);
98 static void ena_destroy_all_tx_queues(struct ena_adapter *);
99 static void ena_destroy_all_rx_queues(struct ena_adapter *);
100 static void ena_destroy_all_io_queues(struct ena_adapter *);
101 static int ena_create_io_queues(struct ena_adapter *);
102 static int ena_tx_cleanup(struct ena_ring *);
103 static void ena_deferred_rx_cleanup(struct work *, void *);
104 static int ena_rx_cleanup(struct ena_ring *);
105 static inline int validate_tx_req_id(struct ena_ring *, uint16_t);
106 #if 0
107 static void ena_rx_hash_mbuf(struct ena_ring *, struct ena_com_rx_ctx *,
108 struct mbuf *);
109 #endif
110 static struct mbuf* ena_rx_mbuf(struct ena_ring *, struct ena_com_rx_buf_info *,
111 struct ena_com_rx_ctx *, uint16_t *);
112 static inline void ena_rx_checksum(struct ena_ring *, struct ena_com_rx_ctx *,
113 struct mbuf *);
114 static int ena_handle_msix(void *);
115 static int ena_enable_msix(struct ena_adapter *);
116 static int ena_request_mgmnt_irq(struct ena_adapter *);
117 static int ena_request_io_irq(struct ena_adapter *);
118 static void ena_free_mgmnt_irq(struct ena_adapter *);
119 static void ena_free_io_irq(struct ena_adapter *);
120 static void ena_free_irqs(struct ena_adapter*);
121 static void ena_disable_msix(struct ena_adapter *);
122 static void ena_unmask_all_io_irqs(struct ena_adapter *);
123 static int ena_rss_configure(struct ena_adapter *);
124 static int ena_up_complete(struct ena_adapter *);
125 static int ena_up(struct ena_adapter *);
126 static void ena_down(struct ena_adapter *);
127 #if 0
128 static uint64_t ena_get_counter(struct ifnet *, ift_counter);
129 #endif
130 static int ena_media_change(struct ifnet *);
131 static void ena_media_status(struct ifnet *, struct ifmediareq *);
132 static int ena_init(struct ifnet *);
133 static int ena_ioctl(struct ifnet *, u_long, void *);
134 static int ena_get_dev_offloads(struct ena_com_dev_get_features_ctx *);
135 static void ena_update_host_info(struct ena_admin_host_info *, struct ifnet *);
136 static void ena_update_hwassist(struct ena_adapter *);
137 static int ena_setup_ifnet(device_t, struct ena_adapter *,
138 struct ena_com_dev_get_features_ctx *);
139 static void ena_tx_csum(struct ena_com_tx_ctx *, struct mbuf *);
140 static int ena_check_and_collapse_mbuf(struct ena_ring *tx_ring,
141 struct mbuf **mbuf);
142 static int ena_xmit_mbuf(struct ena_ring *, struct mbuf **);
143 static void ena_start_xmit(struct ena_ring *);
144 static int ena_mq_start(struct ifnet *, struct mbuf *);
145 static void ena_deferred_mq_start(struct work *, void *);
146 #if 0
147 static void ena_qflush(struct ifnet *);
148 #endif
149 static int ena_calc_io_queue_num(struct pci_attach_args *,
150 struct ena_adapter *, struct ena_com_dev_get_features_ctx *);
151 static int ena_calc_queue_size(struct ena_adapter *, uint16_t *,
152 uint16_t *, struct ena_com_dev_get_features_ctx *);
153 #if 0
154 static int ena_rss_init_default(struct ena_adapter *);
155 static void ena_rss_init_default_deferred(void *);
156 #endif
157 static void ena_config_host_info(struct ena_com_dev *);
158 static void ena_attach(device_t, device_t, void *);
159 static int ena_detach(device_t, int);
160 static int ena_device_init(struct ena_adapter *, device_t,
161 struct ena_com_dev_get_features_ctx *, int *);
162 static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *,
163 int);
164 static void ena_update_on_link_change(void *, struct ena_admin_aenq_entry *);
165 static void unimplemented_aenq_handler(void *,
166 struct ena_admin_aenq_entry *);
167 static void ena_timer_service(void *);
168
169 static const char ena_version[] =
170 DEVICE_NAME DRV_MODULE_NAME " v" DRV_MODULE_VERSION;
171
172 #if 0
173 static SYSCTL_NODE(_hw, OID_AUTO, ena, CTLFLAG_RD, 0, "ENA driver parameters");
174 #endif
175
176 /*
177 * Tuneable number of buffers in the buf-ring (drbr)
178 */
179 static int ena_buf_ring_size = 4096;
180 #if 0
181 SYSCTL_INT(_hw_ena, OID_AUTO, buf_ring_size, CTLFLAG_RWTUN,
182 &ena_buf_ring_size, 0, "Size of the bufring");
183 #endif
184
185 /*
186 * Logging level for changing verbosity of the output
187 */
188 int ena_log_level = ENA_ALERT | ENA_WARNING;
189 #if 0
190 SYSCTL_INT(_hw_ena, OID_AUTO, log_level, CTLFLAG_RWTUN,
191 &ena_log_level, 0, "Logging level indicating verbosity of the logs");
192 #endif
193
194 static const ena_vendor_info_t ena_vendor_info_array[] = {
195 { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_PF, 0},
196 { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_LLQ_PF, 0},
197 { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_VF, 0},
198 { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_LLQ_VF, 0},
199 /* Last entry */
200 { 0, 0, 0 }
201 };
202
203 /*
204 * Contains pointers to event handlers, e.g. link state chage.
205 */
206 static struct ena_aenq_handlers aenq_handlers;
207
208 int
209 ena_dma_alloc(device_t dmadev, bus_size_t size,
210 ena_mem_handle_t *dma , int mapflags)
211 {
212 struct ena_adapter *adapter = device_private(dmadev);
213 uint32_t maxsize;
214 bus_dma_segment_t seg;
215 int error, nsegs;
216
217 maxsize = ((size - 1) / PAGE_SIZE + 1) * PAGE_SIZE;
218
219 #if 0
220 /* XXX what is this needed for ? */
221 dma_space_addr = ENA_DMA_BIT_MASK(adapter->dma_width);
222 if (unlikely(dma_space_addr == 0))
223 dma_space_addr = BUS_SPACE_MAXADDR;
224 #endif
225
226 dma->tag = adapter->sc_dmat;
227
228 if ((error = bus_dmamap_create(dma->tag, maxsize, 1, maxsize, 0,
229 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &dma->map)) != 0) {
230 ena_trace(ENA_ALERT, "bus_dmamap_create(%ju) failed: %d\n",
231 (uintmax_t)maxsize, error);
232 goto fail_create;
233 }
234
235 error = bus_dmamem_alloc(dma->tag, maxsize, 8, 0, &seg, 1, &nsegs,
236 BUS_DMA_ALLOCNOW);
237 if (error) {
238 ena_trace(ENA_ALERT, "bus_dmamem_alloc(%ju) failed: %d\n",
239 (uintmax_t)maxsize, error);
240 goto fail_alloc;
241 }
242
243 error = bus_dmamem_map(dma->tag, &seg, nsegs, maxsize,
244 &dma->vaddr, BUS_DMA_COHERENT);
245 if (error) {
246 ena_trace(ENA_ALERT, "bus_dmamem_map(%ju) failed: %d\n",
247 (uintmax_t)maxsize, error);
248 goto fail_map;
249 }
250 memset(dma->vaddr, 0, maxsize);
251
252 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr,
253 maxsize, NULL, mapflags);
254 if (error) {
255 ena_trace(ENA_ALERT, ": bus_dmamap_load failed: %d\n", error);
256 goto fail_load;
257 }
258 dma->paddr = dma->map->dm_segs[0].ds_addr;
259
260 return (0);
261
262 fail_load:
263 bus_dmamem_unmap(dma->tag, dma->vaddr, maxsize);
264 fail_map:
265 bus_dmamem_free(dma->tag, &seg, nsegs);
266 fail_alloc:
267 bus_dmamap_destroy(adapter->sc_dmat, dma->map);
268 fail_create:
269 return (error);
270 }
271
272 static int
273 ena_allocate_pci_resources(struct pci_attach_args *pa,
274 struct ena_adapter *adapter)
275 {
276 pcireg_t memtype, reg;
277 bus_addr_t memaddr;
278 bus_size_t mapsize;
279 int flags, error;
280 int msixoff;
281
282 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, ENA_REG_BAR);
283 if (PCI_MAPREG_TYPE(memtype) != PCI_MAPREG_TYPE_MEM) {
284 aprint_error_dev(adapter->pdev, "invalid type (type=0x%x)\n",
285 memtype);
286 return ENXIO;
287 }
288 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
289 if (((reg & PCI_COMMAND_MASTER_ENABLE) == 0) ||
290 ((reg & PCI_COMMAND_MEM_ENABLE) == 0)) {
291 /*
292 * Enable address decoding for memory range in case BIOS or
293 * UEFI didn't set it.
294 */
295 reg |= PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_MEM_ENABLE;
296 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
297 reg);
298 }
299
300 adapter->sc_btag = pa->pa_memt;
301 error = pci_mapreg_info(pa->pa_pc, pa->pa_tag, ENA_REG_BAR,
302 memtype, &memaddr, &mapsize, &flags);
303 if (error) {
304 aprint_error_dev(adapter->pdev, "can't get map info\n");
305 return ENXIO;
306 }
307
308 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_MSIX, &msixoff,
309 NULL)) {
310 pcireg_t msixtbl;
311 uint32_t table_offset;
312 int bir;
313
314 msixtbl = pci_conf_read(pa->pa_pc, pa->pa_tag,
315 msixoff + PCI_MSIX_TBLOFFSET);
316 table_offset = msixtbl & PCI_MSIX_TBLOFFSET_MASK;
317 bir = msixtbl & PCI_MSIX_TBLBIR_MASK;
318 if (bir == PCI_MAPREG_NUM(ENA_REG_BAR))
319 mapsize = table_offset;
320 }
321
322 error = bus_space_map(adapter->sc_btag, memaddr, mapsize, flags,
323 &adapter->sc_bhandle);
324 if (error != 0) {
325 aprint_error_dev(adapter->pdev,
326 "can't map mem space (error=%d)\n", error);
327 return ENXIO;
328 }
329
330 return (0);
331 }
332
333 static void
334 ena_free_pci_resources(struct ena_adapter *adapter)
335 {
336 /* Nothing to do */
337 }
338
339 static int
340 ena_probe(device_t parent, cfdata_t match, void *aux)
341 {
342 struct pci_attach_args *pa = aux;
343 const ena_vendor_info_t *ent;
344
345 for (int i = 0; i < __arraycount(ena_vendor_info_array); i++) {
346 ent = &ena_vendor_info_array[i];
347
348 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
349 (PCI_PRODUCT(pa->pa_id) == ent->device_id)) {
350 return 1;
351 }
352 }
353
354 return 0;
355 }
356
357 static int
358 ena_change_mtu(struct ifnet *ifp, int new_mtu)
359 {
360 struct ena_adapter *adapter = if_getsoftc(ifp);
361 int rc;
362
363 if ((new_mtu > adapter->max_mtu) || (new_mtu < ENA_MIN_MTU)) {
364 device_printf(adapter->pdev, "Invalid MTU setting. "
365 "new_mtu: %d max mtu: %d min mtu: %d\n",
366 new_mtu, adapter->max_mtu, ENA_MIN_MTU);
367 return (EINVAL);
368 }
369
370 rc = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu);
371 if (likely(rc == 0)) {
372 ena_trace(ENA_DBG, "set MTU to %d\n", new_mtu);
373 if_setmtu(ifp, new_mtu);
374 } else {
375 device_printf(adapter->pdev, "Failed to set MTU to %d\n",
376 new_mtu);
377 }
378
379 return (rc);
380 }
381
382 #define EVCNT_INIT(st, f) \
383 do { \
384 evcnt_attach_dynamic(&st->f, EVCNT_TYPE_MISC, NULL, \
385 st->name, #f); \
386 } while (0)
387
388 static inline void
389 ena_alloc_counters_rx(struct ena_stats_rx *st, int queue)
390 {
391 snprintf(st->name, sizeof(st->name), "ena rxq%d", queue);
392
393 EVCNT_INIT(st, cnt);
394 EVCNT_INIT(st, bytes);
395 EVCNT_INIT(st, refil_partial);
396 EVCNT_INIT(st, bad_csum);
397 EVCNT_INIT(st, mjum_alloc_fail);
398 EVCNT_INIT(st, mbuf_alloc_fail);
399 EVCNT_INIT(st, dma_mapping_err);
400 EVCNT_INIT(st, bad_desc_num);
401 EVCNT_INIT(st, bad_req_id);
402 EVCNT_INIT(st, empty_rx_ring);
403
404 /* Make sure all code is updated when new fields added */
405 CTASSERT(offsetof(struct ena_stats_rx, empty_rx_ring)
406 + sizeof(st->empty_rx_ring) == sizeof(*st));
407 }
408
409 static inline void
410 ena_alloc_counters_tx(struct ena_stats_tx *st, int queue)
411 {
412 snprintf(st->name, sizeof(st->name), "ena txq%d", queue);
413
414 EVCNT_INIT(st, cnt);
415 EVCNT_INIT(st, bytes);
416 EVCNT_INIT(st, prepare_ctx_err);
417 EVCNT_INIT(st, dma_mapping_err);
418 EVCNT_INIT(st, doorbells);
419 EVCNT_INIT(st, missing_tx_comp);
420 EVCNT_INIT(st, bad_req_id);
421 EVCNT_INIT(st, collapse);
422 EVCNT_INIT(st, collapse_err);
423
424 /* Make sure all code is updated when new fields added */
425 CTASSERT(offsetof(struct ena_stats_tx, collapse_err)
426 + sizeof(st->collapse_err) == sizeof(*st));
427 }
428
429 static inline void
430 ena_alloc_counters_dev(struct ena_stats_dev *st, int queue)
431 {
432 snprintf(st->name, sizeof(st->name), "ena dev ioq%d", queue);
433
434 EVCNT_INIT(st, wd_expired);
435 EVCNT_INIT(st, interface_up);
436 EVCNT_INIT(st, interface_down);
437 EVCNT_INIT(st, admin_q_pause);
438
439 /* Make sure all code is updated when new fields added */
440 CTASSERT(offsetof(struct ena_stats_dev, admin_q_pause)
441 + sizeof(st->admin_q_pause) == sizeof(*st));
442 }
443
444 static inline void
445 ena_alloc_counters_hwstats(struct ena_hw_stats *st, int queue)
446 {
447 snprintf(st->name, sizeof(st->name), "ena hw ioq%d", queue);
448
449 EVCNT_INIT(st, rx_packets);
450 EVCNT_INIT(st, tx_packets);
451 EVCNT_INIT(st, rx_bytes);
452 EVCNT_INIT(st, tx_bytes);
453 EVCNT_INIT(st, rx_drops);
454
455 /* Make sure all code is updated when new fields added */
456 CTASSERT(offsetof(struct ena_hw_stats, rx_drops)
457 + sizeof(st->rx_drops) == sizeof(*st));
458 }
459 static inline void
460 ena_free_counters(struct evcnt *begin, int size)
461 {
462 struct evcnt *end = (struct evcnt *)((char *)begin + size);
463
464 for (; begin < end; ++begin)
465 counter_u64_free(*begin);
466 }
467
468 static inline void
469 ena_reset_counters(struct evcnt *begin, int size)
470 {
471 struct evcnt *end = (struct evcnt *)((char *)begin + size);
472
473 for (; begin < end; ++begin)
474 counter_u64_zero(*begin);
475 }
476
477 static void
478 ena_init_io_rings_common(struct ena_adapter *adapter, struct ena_ring *ring,
479 uint16_t qid)
480 {
481
482 ring->qid = qid;
483 ring->adapter = adapter;
484 ring->ena_dev = adapter->ena_dev;
485 }
486
487 static void
488 ena_init_io_rings(struct ena_adapter *adapter)
489 {
490 struct ena_com_dev *ena_dev;
491 struct ena_ring *txr, *rxr;
492 struct ena_que *que;
493 int i;
494
495 ena_dev = adapter->ena_dev;
496
497 for (i = 0; i < adapter->num_queues; i++) {
498 txr = &adapter->tx_ring[i];
499 rxr = &adapter->rx_ring[i];
500
501 /* TX/RX common ring state */
502 ena_init_io_rings_common(adapter, txr, i);
503 ena_init_io_rings_common(adapter, rxr, i);
504
505 /* TX specific ring state */
506 txr->ring_size = adapter->tx_ring_size;
507 txr->tx_max_header_size = ena_dev->tx_max_header_size;
508 txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type;
509 txr->smoothed_interval =
510 ena_com_get_nonadaptive_moderation_interval_tx(ena_dev);
511
512 /* Allocate a buf ring */
513 txr->br = buf_ring_alloc(ena_buf_ring_size, M_DEVBUF,
514 M_WAITOK, &txr->ring_mtx);
515
516 /* Alloc TX statistics. */
517 ena_alloc_counters_tx(&txr->tx_stats, i);
518
519 /* RX specific ring state */
520 rxr->ring_size = adapter->rx_ring_size;
521 rxr->smoothed_interval =
522 ena_com_get_nonadaptive_moderation_interval_rx(ena_dev);
523
524 /* Alloc RX statistics. */
525 ena_alloc_counters_rx(&rxr->rx_stats, i);
526
527 /* Initialize locks */
528 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
529 device_xname(adapter->pdev), i);
530 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
531 device_xname(adapter->pdev), i);
532
533 mutex_init(&txr->ring_mtx, MUTEX_DEFAULT, IPL_NET);
534 mutex_init(&rxr->ring_mtx, MUTEX_DEFAULT, IPL_NET);
535
536 que = &adapter->que[i];
537 que->adapter = adapter;
538 que->id = i;
539 que->tx_ring = txr;
540 que->rx_ring = rxr;
541
542 txr->que = que;
543 rxr->que = que;
544
545 rxr->empty_rx_queue = 0;
546 }
547 }
548
549 static void
550 ena_free_io_ring_resources(struct ena_adapter *adapter, unsigned int qid)
551 {
552 struct ena_ring *txr = &adapter->tx_ring[qid];
553 struct ena_ring *rxr = &adapter->rx_ring[qid];
554
555 ena_free_counters((struct evcnt *)&txr->tx_stats,
556 sizeof(txr->tx_stats));
557 ena_free_counters((struct evcnt *)&rxr->rx_stats,
558 sizeof(rxr->rx_stats));
559
560 ENA_RING_MTX_LOCK(txr);
561 drbr_free(txr->br, M_DEVBUF);
562 ENA_RING_MTX_UNLOCK(txr);
563
564 mutex_destroy(&txr->ring_mtx);
565 mutex_destroy(&rxr->ring_mtx);
566 }
567
568 static void
569 ena_free_all_io_rings_resources(struct ena_adapter *adapter)
570 {
571 int i;
572
573 for (i = 0; i < adapter->num_queues; i++)
574 ena_free_io_ring_resources(adapter, i);
575
576 }
577
578 #if 0
579 static int
580 ena_setup_tx_dma_tag(struct ena_adapter *adapter)
581 {
582 int ret;
583
584 /* Create DMA tag for Tx buffers */
585 ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev),
586 1, 0, /* alignment, bounds */
587 ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window */
588 BUS_SPACE_MAXADDR, /* highaddr of excl window */
589 NULL, NULL, /* filter, filterarg */
590 ENA_TSO_MAXSIZE, /* maxsize */
591 adapter->max_tx_sgl_size - 1, /* nsegments */
592 ENA_TSO_MAXSIZE, /* maxsegsize */
593 0, /* flags */
594 NULL, /* lockfunc */
595 NULL, /* lockfuncarg */
596 &adapter->tx_buf_tag);
597
598 return (ret);
599 }
600 #endif
601
602 #if 0
603 static int
604 ena_setup_rx_dma_tag(struct ena_adapter *adapter)
605 {
606 int ret;
607
608 /* Create DMA tag for Rx buffers*/
609 ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev), /* parent */
610 1, 0, /* alignment, bounds */
611 ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window */
612 BUS_SPACE_MAXADDR, /* highaddr of excl window */
613 NULL, NULL, /* filter, filterarg */
614 MJUM16BYTES, /* maxsize */
615 adapter->max_rx_sgl_size, /* nsegments */
616 MJUM16BYTES, /* maxsegsize */
617 0, /* flags */
618 NULL, /* lockfunc */
619 NULL, /* lockarg */
620 &adapter->rx_buf_tag);
621
622 return (ret);
623 }
624 #endif
625
626 /**
627 * ena_setup_tx_resources - allocate Tx resources (Descriptors)
628 * @adapter: network interface device structure
629 * @qid: queue index
630 *
631 * Returns 0 on success, otherwise on failure.
632 **/
633 static int
634 ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
635 {
636 struct ena_que *que = &adapter->que[qid];
637 struct ena_ring *tx_ring = que->tx_ring;
638 int size, i, err;
639 #ifdef RSS
640 cpuset_t cpu_mask;
641 #endif
642
643 size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size;
644 tx_ring->tx_buffer_info = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
645
646 size = sizeof(uint16_t) * tx_ring->ring_size;
647 tx_ring->free_tx_ids = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
648
649 /* Req id stack for TX OOO completions */
650 for (i = 0; i < tx_ring->ring_size; i++)
651 tx_ring->free_tx_ids[i] = i;
652
653 /* Reset TX statistics. */
654 ena_reset_counters((struct evcnt *)&tx_ring->tx_stats,
655 sizeof(tx_ring->tx_stats));
656
657 tx_ring->next_to_use = 0;
658 tx_ring->next_to_clean = 0;
659
660 /* Make sure that drbr is empty */
661 ENA_RING_MTX_LOCK(tx_ring);
662 drbr_flush(adapter->ifp, tx_ring->br);
663 ENA_RING_MTX_UNLOCK(tx_ring);
664
665 /* ... and create the buffer DMA maps */
666 for (i = 0; i < tx_ring->ring_size; i++) {
667 err = bus_dmamap_create(adapter->sc_dmat,
668 ENA_TSO_MAXSIZE, adapter->max_tx_sgl_size - 1,
669 ENA_TSO_MAXSIZE, 0, 0,
670 &tx_ring->tx_buffer_info[i].map);
671 if (unlikely(err != 0)) {
672 ena_trace(ENA_ALERT,
673 "Unable to create Tx DMA map for buffer %d\n", i);
674 goto err_buf_info_unmap;
675 }
676 }
677
678 /* Allocate workqueues */
679 int rc = workqueue_create(&tx_ring->enqueue_tq, "ena_tx_enq",
680 ena_deferred_mq_start, tx_ring, 0, IPL_NET, WQ_PERCPU | WQ_MPSAFE);
681 if (unlikely(rc != 0)) {
682 ena_trace(ENA_ALERT,
683 "Unable to create workqueue for enqueue task\n");
684 i = tx_ring->ring_size;
685 goto err_buf_info_unmap;
686 }
687
688 #if 0
689 /* RSS set cpu for thread */
690 #ifdef RSS
691 CPU_SETOF(que->cpu, &cpu_mask);
692 taskqueue_start_threads_cpuset(&tx_ring->enqueue_tq, 1, IPL_NET,
693 &cpu_mask, "%s tx_ring enq (bucket %d)",
694 device_xname(adapter->pdev), que->cpu);
695 #else /* RSS */
696 taskqueue_start_threads(&tx_ring->enqueue_tq, 1, IPL_NET,
697 "%s txeq %d", device_xname(adapter->pdev), que->cpu);
698 #endif /* RSS */
699 #endif
700
701 return (0);
702
703 err_buf_info_unmap:
704 while (i--) {
705 bus_dmamap_destroy(adapter->sc_dmat,
706 tx_ring->tx_buffer_info[i].map);
707 }
708 free(tx_ring->free_tx_ids, M_DEVBUF);
709 tx_ring->free_tx_ids = NULL;
710 free(tx_ring->tx_buffer_info, M_DEVBUF);
711 tx_ring->tx_buffer_info = NULL;
712
713 return (ENOMEM);
714 }
715
716 /**
717 * ena_free_tx_resources - Free Tx Resources per Queue
718 * @adapter: network interface device structure
719 * @qid: queue index
720 *
721 * Free all transmit software resources
722 **/
723 static void
724 ena_free_tx_resources(struct ena_adapter *adapter, int qid)
725 {
726 struct ena_ring *tx_ring = &adapter->tx_ring[qid];
727
728 workqueue_wait(tx_ring->enqueue_tq, &tx_ring->enqueue_task);
729 workqueue_destroy(tx_ring->enqueue_tq);
730 tx_ring->enqueue_tq = NULL;
731
732 ENA_RING_MTX_LOCK(tx_ring);
733 /* Flush buffer ring, */
734 drbr_flush(adapter->ifp, tx_ring->br);
735
736 /* Free buffer DMA maps, */
737 for (int i = 0; i < tx_ring->ring_size; i++) {
738 m_freem(tx_ring->tx_buffer_info[i].mbuf);
739 tx_ring->tx_buffer_info[i].mbuf = NULL;
740 bus_dmamap_unload(adapter->sc_dmat,
741 tx_ring->tx_buffer_info[i].map);
742 bus_dmamap_destroy(adapter->sc_dmat,
743 tx_ring->tx_buffer_info[i].map);
744 }
745 ENA_RING_MTX_UNLOCK(tx_ring);
746
747 /* And free allocated memory. */
748 free(tx_ring->tx_buffer_info, M_DEVBUF);
749 tx_ring->tx_buffer_info = NULL;
750
751 free(tx_ring->free_tx_ids, M_DEVBUF);
752 tx_ring->free_tx_ids = NULL;
753 }
754
755 /**
756 * ena_setup_all_tx_resources - allocate all queues Tx resources
757 * @adapter: network interface device structure
758 *
759 * Returns 0 on success, otherwise on failure.
760 **/
761 static int
762 ena_setup_all_tx_resources(struct ena_adapter *adapter)
763 {
764 int i, rc;
765
766 for (i = 0; i < adapter->num_queues; i++) {
767 rc = ena_setup_tx_resources(adapter, i);
768 if (rc != 0) {
769 device_printf(adapter->pdev,
770 "Allocation for Tx Queue %u failed\n", i);
771 goto err_setup_tx;
772 }
773 }
774
775 return (0);
776
777 err_setup_tx:
778 /* Rewind the index freeing the rings as we go */
779 while (i--)
780 ena_free_tx_resources(adapter, i);
781 return (rc);
782 }
783
784 /**
785 * ena_free_all_tx_resources - Free Tx Resources for All Queues
786 * @adapter: network interface device structure
787 *
788 * Free all transmit software resources
789 **/
790 static void
791 ena_free_all_tx_resources(struct ena_adapter *adapter)
792 {
793 int i;
794
795 for (i = 0; i < adapter->num_queues; i++)
796 ena_free_tx_resources(adapter, i);
797 }
798
799 static inline int
800 validate_rx_req_id(struct ena_ring *rx_ring, uint16_t req_id)
801 {
802 if (likely(req_id < rx_ring->ring_size))
803 return (0);
804
805 device_printf(rx_ring->adapter->pdev, "Invalid rx req_id: %hu\n",
806 req_id);
807 counter_u64_add(rx_ring->rx_stats.bad_req_id, 1);
808
809 /* Trigger device reset */
810 rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
811 rx_ring->adapter->trigger_reset = true;
812
813 return (EFAULT);
814 }
815
816 /**
817 * ena_setup_rx_resources - allocate Rx resources (Descriptors)
818 * @adapter: network interface device structure
819 * @qid: queue index
820 *
821 * Returns 0 on success, otherwise on failure.
822 **/
823 static int
824 ena_setup_rx_resources(struct ena_adapter *adapter, unsigned int qid)
825 {
826 struct ena_que *que = &adapter->que[qid];
827 struct ena_ring *rx_ring = que->rx_ring;
828 int size, err, i;
829 #ifdef RSS
830 cpuset_t cpu_mask;
831 #endif
832
833 size = sizeof(struct ena_rx_buffer) * rx_ring->ring_size;
834
835 /*
836 * Alloc extra element so in rx path
837 * we can always prefetch rx_info + 1
838 */
839 size += sizeof(struct ena_rx_buffer);
840
841 rx_ring->rx_buffer_info = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
842
843 size = sizeof(uint16_t) * rx_ring->ring_size;
844 rx_ring->free_rx_ids = malloc(size, M_DEVBUF, M_WAITOK);
845
846 for (i = 0; i < rx_ring->ring_size; i++)
847 rx_ring->free_rx_ids[i] = i;
848
849 /* Reset RX statistics. */
850 ena_reset_counters((struct evcnt *)&rx_ring->rx_stats,
851 sizeof(rx_ring->rx_stats));
852
853 rx_ring->next_to_clean = 0;
854 rx_ring->next_to_use = 0;
855
856 /* ... and create the buffer DMA maps */
857 for (i = 0; i < rx_ring->ring_size; i++) {
858 err = bus_dmamap_create(adapter->sc_dmat,
859 MJUM16BYTES, adapter->max_rx_sgl_size, MJUM16BYTES,
860 0, 0,
861 &(rx_ring->rx_buffer_info[i].map));
862 if (err != 0) {
863 ena_trace(ENA_ALERT,
864 "Unable to create Rx DMA map for buffer %d\n", i);
865 goto err_buf_info_unmap;
866 }
867 }
868
869 #ifdef LRO
870 /* Create LRO for the ring */
871 if ((adapter->ifp->if_capenable & IFCAP_LRO) != 0) {
872 int err = tcp_lro_init(&rx_ring->lro);
873 if (err != 0) {
874 device_printf(adapter->pdev,
875 "LRO[%d] Initialization failed!\n", qid);
876 } else {
877 ena_trace(ENA_INFO,
878 "RX Soft LRO[%d] Initialized\n", qid);
879 rx_ring->lro.ifp = adapter->ifp;
880 }
881 }
882 #endif
883
884 /* Allocate workqueues */
885 int rc = workqueue_create(&rx_ring->cmpl_tq, "ena_rx_comp",
886 ena_deferred_rx_cleanup, rx_ring, 0, IPL_NET, WQ_PERCPU | WQ_MPSAFE);
887 if (unlikely(rc != 0)) {
888 ena_trace(ENA_ALERT,
889 "Unable to create workqueue for RX completion task\n");
890 goto err_buf_info_unmap;
891 }
892
893 #if 0
894 /* RSS set cpu for thread */
895 #ifdef RSS
896 CPU_SETOF(que->cpu, &cpu_mask);
897 taskqueue_start_threads_cpuset(&rx_ring->cmpl_tq, 1, IPL_NET, &cpu_mask,
898 "%s rx_ring cmpl (bucket %d)",
899 device_xname(adapter->pdev), que->cpu);
900 #else
901 taskqueue_start_threads(&rx_ring->cmpl_tq, 1, IPL_NET,
902 "%s rx_ring cmpl %d", device_xname(adapter->pdev), que->cpu);
903 #endif
904 #endif
905
906 return (0);
907
908 err_buf_info_unmap:
909 while (i--) {
910 bus_dmamap_destroy(adapter->sc_dmat,
911 rx_ring->rx_buffer_info[i].map);
912 }
913
914 free(rx_ring->free_rx_ids, M_DEVBUF);
915 rx_ring->free_rx_ids = NULL;
916 free(rx_ring->rx_buffer_info, M_DEVBUF);
917 rx_ring->rx_buffer_info = NULL;
918 return (ENOMEM);
919 }
920
921 /**
922 * ena_free_rx_resources - Free Rx Resources
923 * @adapter: network interface device structure
924 * @qid: queue index
925 *
926 * Free all receive software resources
927 **/
928 static void
929 ena_free_rx_resources(struct ena_adapter *adapter, unsigned int qid)
930 {
931 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
932
933 workqueue_wait(rx_ring->cmpl_tq, &rx_ring->cmpl_task);
934 workqueue_destroy(rx_ring->cmpl_tq);
935 rx_ring->cmpl_tq = NULL;
936
937 /* Free buffer DMA maps, */
938 for (int i = 0; i < rx_ring->ring_size; i++) {
939 m_freem(rx_ring->rx_buffer_info[i].mbuf);
940 rx_ring->rx_buffer_info[i].mbuf = NULL;
941 bus_dmamap_unload(adapter->sc_dmat,
942 rx_ring->rx_buffer_info[i].map);
943 bus_dmamap_destroy(adapter->sc_dmat,
944 rx_ring->rx_buffer_info[i].map);
945 }
946
947 #ifdef LRO
948 /* free LRO resources, */
949 tcp_lro_free(&rx_ring->lro);
950 #endif
951
952 /* free allocated memory */
953 free(rx_ring->rx_buffer_info, M_DEVBUF);
954 rx_ring->rx_buffer_info = NULL;
955
956 free(rx_ring->free_rx_ids, M_DEVBUF);
957 rx_ring->free_rx_ids = NULL;
958 }
959
960 /**
961 * ena_setup_all_rx_resources - allocate all queues Rx resources
962 * @adapter: network interface device structure
963 *
964 * Returns 0 on success, otherwise on failure.
965 **/
966 static int
967 ena_setup_all_rx_resources(struct ena_adapter *adapter)
968 {
969 int i, rc = 0;
970
971 for (i = 0; i < adapter->num_queues; i++) {
972 rc = ena_setup_rx_resources(adapter, i);
973 if (rc != 0) {
974 device_printf(adapter->pdev,
975 "Allocation for Rx Queue %u failed\n", i);
976 goto err_setup_rx;
977 }
978 }
979 return (0);
980
981 err_setup_rx:
982 /* rewind the index freeing the rings as we go */
983 while (i--)
984 ena_free_rx_resources(adapter, i);
985 return (rc);
986 }
987
988 /**
989 * ena_free_all_rx_resources - Free Rx resources for all queues
990 * @adapter: network interface device structure
991 *
992 * Free all receive software resources
993 **/
994 static void
995 ena_free_all_rx_resources(struct ena_adapter *adapter)
996 {
997 int i;
998
999 for (i = 0; i < adapter->num_queues; i++)
1000 ena_free_rx_resources(adapter, i);
1001 }
1002
1003 static inline int
1004 ena_alloc_rx_mbuf(struct ena_adapter *adapter,
1005 struct ena_ring *rx_ring, struct ena_rx_buffer *rx_info)
1006 {
1007 struct ena_com_buf *ena_buf;
1008 int error;
1009 int mlen;
1010
1011 /* if previous allocated frag is not used */
1012 if (unlikely(rx_info->mbuf != NULL))
1013 return (0);
1014
1015 /* Get mbuf using UMA allocator */
1016 rx_info->mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM16BYTES);
1017
1018 if (unlikely(rx_info->mbuf == NULL)) {
1019 counter_u64_add(rx_ring->rx_stats.mjum_alloc_fail, 1);
1020 rx_info->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1021 if (unlikely(rx_info->mbuf == NULL)) {
1022 counter_u64_add(rx_ring->rx_stats.mbuf_alloc_fail, 1);
1023 return (ENOMEM);
1024 }
1025 mlen = MCLBYTES;
1026 } else {
1027 mlen = MJUM16BYTES;
1028 }
1029 /* Set mbuf length*/
1030 rx_info->mbuf->m_pkthdr.len = rx_info->mbuf->m_len = mlen;
1031
1032 /* Map packets for DMA */
1033 ena_trace(ENA_DBG | ENA_RSC | ENA_RXPTH,
1034 "Using tag %p for buffers' DMA mapping, mbuf %p len: %d",
1035 adapter->sc_dmat,rx_info->mbuf, rx_info->mbuf->m_len);
1036 error = bus_dmamap_load_mbuf(adapter->sc_dmat, rx_info->map,
1037 rx_info->mbuf, BUS_DMA_NOWAIT);
1038 if (unlikely((error != 0) || (rx_info->map->dm_nsegs != 1))) {
1039 ena_trace(ENA_WARNING, "failed to map mbuf, error: %d, "
1040 "nsegs: %d\n", error, rx_info->map->dm_nsegs);
1041 counter_u64_add(rx_ring->rx_stats.dma_mapping_err, 1);
1042 goto exit;
1043
1044 }
1045
1046 bus_dmamap_sync(adapter->sc_dmat, rx_info->map, 0,
1047 rx_info->map->dm_mapsize, BUS_DMASYNC_PREREAD);
1048
1049 ena_buf = &rx_info->ena_buf;
1050 ena_buf->paddr = rx_info->map->dm_segs[0].ds_addr;
1051 ena_buf->len = mlen;
1052
1053 ena_trace(ENA_DBG | ENA_RSC | ENA_RXPTH,
1054 "ALLOC RX BUF: mbuf %p, rx_info %p, len %d, paddr %#jx\n",
1055 rx_info->mbuf, rx_info,ena_buf->len, (uintmax_t)ena_buf->paddr);
1056
1057 return (0);
1058
1059 exit:
1060 m_freem(rx_info->mbuf);
1061 rx_info->mbuf = NULL;
1062 return (EFAULT);
1063 }
1064
1065 static void
1066 ena_free_rx_mbuf(struct ena_adapter *adapter, struct ena_ring *rx_ring,
1067 struct ena_rx_buffer *rx_info)
1068 {
1069
1070 if (rx_info->mbuf == NULL) {
1071 ena_trace(ENA_WARNING, "Trying to free unallocated buffer\n");
1072 return;
1073 }
1074
1075 bus_dmamap_unload(adapter->sc_dmat, rx_info->map);
1076 m_freem(rx_info->mbuf);
1077 rx_info->mbuf = NULL;
1078 }
1079
1080 /**
1081 * ena_refill_rx_bufs - Refills ring with descriptors
1082 * @rx_ring: the ring which we want to feed with free descriptors
1083 * @num: number of descriptors to refill
1084 * Refills the ring with newly allocated DMA-mapped mbufs for receiving
1085 **/
1086 static int
1087 ena_refill_rx_bufs(struct ena_ring *rx_ring, uint32_t num)
1088 {
1089 struct ena_adapter *adapter = rx_ring->adapter;
1090 uint16_t next_to_use, req_id;
1091 uint32_t i;
1092 int rc;
1093
1094 ena_trace(ENA_DBG | ENA_RXPTH | ENA_RSC, "refill qid: %d",
1095 rx_ring->qid);
1096
1097 next_to_use = rx_ring->next_to_use;
1098
1099 for (i = 0; i < num; i++) {
1100 struct ena_rx_buffer *rx_info;
1101
1102 ena_trace(ENA_DBG | ENA_RXPTH | ENA_RSC,
1103 "RX buffer - next to use: %d", next_to_use);
1104
1105 req_id = rx_ring->free_rx_ids[next_to_use];
1106 rc = validate_rx_req_id(rx_ring, req_id);
1107 if (unlikely(rc != 0))
1108 break;
1109
1110 rx_info = &rx_ring->rx_buffer_info[req_id];
1111
1112 rc = ena_alloc_rx_mbuf(adapter, rx_ring, rx_info);
1113 if (unlikely(rc != 0)) {
1114 ena_trace(ENA_WARNING,
1115 "failed to alloc buffer for rx queue %d\n",
1116 rx_ring->qid);
1117 break;
1118 }
1119 rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq,
1120 &rx_info->ena_buf, req_id);
1121 if (unlikely(rc != 0)) {
1122 ena_trace(ENA_WARNING,
1123 "failed to add buffer for rx queue %d\n",
1124 rx_ring->qid);
1125 break;
1126 }
1127 next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use,
1128 rx_ring->ring_size);
1129 }
1130
1131 if (unlikely(i < num)) {
1132 counter_u64_add(rx_ring->rx_stats.refil_partial, 1);
1133 ena_trace(ENA_WARNING,
1134 "refilled rx qid %d with only %d mbufs (from %d)\n",
1135 rx_ring->qid, i, num);
1136 }
1137
1138 if (likely(i != 0)) {
1139 wmb();
1140 ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq);
1141 }
1142 rx_ring->next_to_use = next_to_use;
1143 return (i);
1144 }
1145
1146 static void
1147 ena_free_rx_bufs(struct ena_adapter *adapter, unsigned int qid)
1148 {
1149 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
1150 unsigned int i;
1151
1152 for (i = 0; i < rx_ring->ring_size; i++) {
1153 struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i];
1154
1155 if (rx_info->mbuf != NULL)
1156 ena_free_rx_mbuf(adapter, rx_ring, rx_info);
1157 }
1158 }
1159
1160 /**
1161 * ena_refill_all_rx_bufs - allocate all queues Rx buffers
1162 * @adapter: network interface device structure
1163 *
1164 */
1165 static void
1166 ena_refill_all_rx_bufs(struct ena_adapter *adapter)
1167 {
1168 struct ena_ring *rx_ring;
1169 int i, rc, bufs_num;
1170
1171 for (i = 0; i < adapter->num_queues; i++) {
1172 rx_ring = &adapter->rx_ring[i];
1173 bufs_num = rx_ring->ring_size - 1;
1174 rc = ena_refill_rx_bufs(rx_ring, bufs_num);
1175
1176 if (unlikely(rc != bufs_num))
1177 ena_trace(ENA_WARNING, "refilling Queue %d failed. "
1178 "Allocated %d buffers from: %d\n", i, rc, bufs_num);
1179 }
1180 }
1181
1182 static void
1183 ena_free_all_rx_bufs(struct ena_adapter *adapter)
1184 {
1185 int i;
1186
1187 for (i = 0; i < adapter->num_queues; i++)
1188 ena_free_rx_bufs(adapter, i);
1189 }
1190
1191 /**
1192 * ena_free_tx_bufs - Free Tx Buffers per Queue
1193 * @adapter: network interface device structure
1194 * @qid: queue index
1195 **/
1196 static void
1197 ena_free_tx_bufs(struct ena_adapter *adapter, unsigned int qid)
1198 {
1199 bool print_once = true;
1200 struct ena_ring *tx_ring = &adapter->tx_ring[qid];
1201
1202 ENA_RING_MTX_LOCK(tx_ring);
1203 for (int i = 0; i < tx_ring->ring_size; i++) {
1204 struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
1205
1206 if (tx_info->mbuf == NULL)
1207 continue;
1208
1209 if (print_once) {
1210 device_printf(adapter->pdev,
1211 "free uncompleted tx mbuf qid %d idx 0x%x",
1212 qid, i);
1213 print_once = false;
1214 } else {
1215 ena_trace(ENA_DBG,
1216 "free uncompleted tx mbuf qid %d idx 0x%x",
1217 qid, i);
1218 }
1219
1220 bus_dmamap_unload(adapter->sc_dmat, tx_info->map);
1221 m_free(tx_info->mbuf);
1222 tx_info->mbuf = NULL;
1223 }
1224 ENA_RING_MTX_UNLOCK(tx_ring);
1225 }
1226
1227 static void
1228 ena_free_all_tx_bufs(struct ena_adapter *adapter)
1229 {
1230
1231 for (int i = 0; i < adapter->num_queues; i++)
1232 ena_free_tx_bufs(adapter, i);
1233 }
1234
1235 static void
1236 ena_destroy_all_tx_queues(struct ena_adapter *adapter)
1237 {
1238 uint16_t ena_qid;
1239 int i;
1240
1241 for (i = 0; i < adapter->num_queues; i++) {
1242 ena_qid = ENA_IO_TXQ_IDX(i);
1243 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
1244 }
1245 }
1246
1247 static void
1248 ena_destroy_all_rx_queues(struct ena_adapter *adapter)
1249 {
1250 uint16_t ena_qid;
1251 int i;
1252
1253 for (i = 0; i < adapter->num_queues; i++) {
1254 ena_qid = ENA_IO_RXQ_IDX(i);
1255 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
1256 }
1257 }
1258
1259 static void
1260 ena_destroy_all_io_queues(struct ena_adapter *adapter)
1261 {
1262 ena_destroy_all_tx_queues(adapter);
1263 ena_destroy_all_rx_queues(adapter);
1264 }
1265
1266 static inline int
1267 validate_tx_req_id(struct ena_ring *tx_ring, uint16_t req_id)
1268 {
1269 struct ena_adapter *adapter = tx_ring->adapter;
1270 struct ena_tx_buffer *tx_info = NULL;
1271
1272 if (likely(req_id < tx_ring->ring_size)) {
1273 tx_info = &tx_ring->tx_buffer_info[req_id];
1274 if (tx_info->mbuf != NULL)
1275 return (0);
1276 }
1277
1278 if (tx_info->mbuf == NULL)
1279 device_printf(adapter->pdev,
1280 "tx_info doesn't have valid mbuf\n");
1281 else
1282 device_printf(adapter->pdev, "Invalid req_id: %hu\n", req_id);
1283
1284 counter_u64_add(tx_ring->tx_stats.bad_req_id, 1);
1285
1286 return (EFAULT);
1287 }
1288
1289 static int
1290 ena_create_io_queues(struct ena_adapter *adapter)
1291 {
1292 struct ena_com_dev *ena_dev = adapter->ena_dev;
1293 struct ena_com_create_io_ctx ctx;
1294 struct ena_ring *ring;
1295 uint16_t ena_qid;
1296 uint32_t msix_vector;
1297 int rc, i;
1298
1299 /* Create TX queues */
1300 for (i = 0; i < adapter->num_queues; i++) {
1301 msix_vector = ENA_IO_IRQ_IDX(i);
1302 ena_qid = ENA_IO_TXQ_IDX(i);
1303 ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
1304 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
1305 ctx.queue_size = adapter->tx_ring_size;
1306 ctx.msix_vector = msix_vector;
1307 ctx.qid = ena_qid;
1308 rc = ena_com_create_io_queue(ena_dev, &ctx);
1309 if (rc != 0) {
1310 device_printf(adapter->pdev,
1311 "Failed to create io TX queue #%d rc: %d\n", i, rc);
1312 goto err_tx;
1313 }
1314 ring = &adapter->tx_ring[i];
1315 rc = ena_com_get_io_handlers(ena_dev, ena_qid,
1316 &ring->ena_com_io_sq,
1317 &ring->ena_com_io_cq);
1318 if (rc != 0) {
1319 device_printf(adapter->pdev,
1320 "Failed to get TX queue handlers. TX queue num"
1321 " %d rc: %d\n", i, rc);
1322 ena_com_destroy_io_queue(ena_dev, ena_qid);
1323 goto err_tx;
1324 }
1325 }
1326
1327 /* Create RX queues */
1328 for (i = 0; i < adapter->num_queues; i++) {
1329 msix_vector = ENA_IO_IRQ_IDX(i);
1330 ena_qid = ENA_IO_RXQ_IDX(i);
1331 ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
1332 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
1333 ctx.queue_size = adapter->rx_ring_size;
1334 ctx.msix_vector = msix_vector;
1335 ctx.qid = ena_qid;
1336 rc = ena_com_create_io_queue(ena_dev, &ctx);
1337 if (unlikely(rc != 0)) {
1338 device_printf(adapter->pdev,
1339 "Failed to create io RX queue[%d] rc: %d\n", i, rc);
1340 goto err_rx;
1341 }
1342
1343 ring = &adapter->rx_ring[i];
1344 rc = ena_com_get_io_handlers(ena_dev, ena_qid,
1345 &ring->ena_com_io_sq,
1346 &ring->ena_com_io_cq);
1347 if (unlikely(rc != 0)) {
1348 device_printf(adapter->pdev,
1349 "Failed to get RX queue handlers. RX queue num"
1350 " %d rc: %d\n", i, rc);
1351 ena_com_destroy_io_queue(ena_dev, ena_qid);
1352 goto err_rx;
1353 }
1354 }
1355
1356 return (0);
1357
1358 err_rx:
1359 while (i--)
1360 ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i));
1361 i = adapter->num_queues;
1362 err_tx:
1363 while (i--)
1364 ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i));
1365
1366 return (ENXIO);
1367 }
1368
1369 /**
1370 * ena_tx_cleanup - clear sent packets and corresponding descriptors
1371 * @tx_ring: ring for which we want to clean packets
1372 *
1373 * Once packets are sent, we ask the device in a loop for no longer used
1374 * descriptors. We find the related mbuf chain in a map (index in an array)
1375 * and free it, then update ring state.
1376 * This is performed in "endless" loop, updating ring pointers every
1377 * TX_COMMIT. The first check of free descriptor is performed before the actual
1378 * loop, then repeated at the loop end.
1379 **/
1380 static int
1381 ena_tx_cleanup(struct ena_ring *tx_ring)
1382 {
1383 struct ena_adapter *adapter;
1384 struct ena_com_io_cq* io_cq;
1385 uint16_t next_to_clean;
1386 uint16_t req_id;
1387 uint16_t ena_qid;
1388 unsigned int total_done = 0;
1389 int rc;
1390 int commit = TX_COMMIT;
1391 int budget = TX_BUDGET;
1392 int work_done;
1393
1394 adapter = tx_ring->que->adapter;
1395 ena_qid = ENA_IO_TXQ_IDX(tx_ring->que->id);
1396 io_cq = &adapter->ena_dev->io_cq_queues[ena_qid];
1397 next_to_clean = tx_ring->next_to_clean;
1398
1399 do {
1400 struct ena_tx_buffer *tx_info;
1401 struct mbuf *mbuf;
1402
1403 rc = ena_com_tx_comp_req_id_get(io_cq, &req_id);
1404 if (unlikely(rc != 0))
1405 break;
1406
1407 rc = validate_tx_req_id(tx_ring, req_id);
1408 if (unlikely(rc != 0))
1409 break;
1410
1411 tx_info = &tx_ring->tx_buffer_info[req_id];
1412
1413 mbuf = tx_info->mbuf;
1414
1415 tx_info->mbuf = NULL;
1416 bintime_clear(&tx_info->timestamp);
1417
1418 if (likely(tx_info->num_of_bufs != 0)) {
1419 /* Map is no longer required */
1420 bus_dmamap_unload(adapter->sc_dmat, tx_info->map);
1421 }
1422
1423 ena_trace(ENA_DBG | ENA_TXPTH, "tx: q %d mbuf %p completed",
1424 tx_ring->qid, mbuf);
1425
1426 m_freem(mbuf);
1427
1428 total_done += tx_info->tx_descs;
1429
1430 tx_ring->free_tx_ids[next_to_clean] = req_id;
1431 next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
1432 tx_ring->ring_size);
1433
1434 if (unlikely(--commit == 0)) {
1435 commit = TX_COMMIT;
1436 /* update ring state every TX_COMMIT descriptor */
1437 tx_ring->next_to_clean = next_to_clean;
1438 ena_com_comp_ack(
1439 &adapter->ena_dev->io_sq_queues[ena_qid],
1440 total_done);
1441 ena_com_update_dev_comp_head(io_cq);
1442 total_done = 0;
1443 }
1444 } while (likely(--budget));
1445
1446 work_done = TX_BUDGET - budget;
1447
1448 ena_trace(ENA_DBG | ENA_TXPTH, "tx: q %d done. total pkts: %d",
1449 tx_ring->qid, work_done);
1450
1451 /* If there is still something to commit update ring state */
1452 if (likely(commit != TX_COMMIT)) {
1453 tx_ring->next_to_clean = next_to_clean;
1454 ena_com_comp_ack(&adapter->ena_dev->io_sq_queues[ena_qid],
1455 total_done);
1456 ena_com_update_dev_comp_head(io_cq);
1457 }
1458
1459 if (atomic_cas_uint(&tx_ring->task_pending, 0, 1) == 0)
1460 workqueue_enqueue(tx_ring->enqueue_tq, &tx_ring->enqueue_task, NULL);
1461
1462 return (work_done);
1463 }
1464
1465 #if 0
1466 static void
1467 ena_rx_hash_mbuf(struct ena_ring *rx_ring, struct ena_com_rx_ctx *ena_rx_ctx,
1468 struct mbuf *mbuf)
1469 {
1470 struct ena_adapter *adapter = rx_ring->adapter;
1471
1472 if (likely(adapter->rss_support)) {
1473 mbuf->m_pkthdr.flowid = ena_rx_ctx->hash;
1474
1475 if (ena_rx_ctx->frag &&
1476 (ena_rx_ctx->l3_proto != ENA_ETH_IO_L3_PROTO_UNKNOWN)) {
1477 M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE_HASH);
1478 return;
1479 }
1480
1481 switch (ena_rx_ctx->l3_proto) {
1482 case ENA_ETH_IO_L3_PROTO_IPV4:
1483 switch (ena_rx_ctx->l4_proto) {
1484 case ENA_ETH_IO_L4_PROTO_TCP:
1485 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV4);
1486 break;
1487 case ENA_ETH_IO_L4_PROTO_UDP:
1488 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV4);
1489 break;
1490 default:
1491 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV4);
1492 }
1493 break;
1494 case ENA_ETH_IO_L3_PROTO_IPV6:
1495 switch (ena_rx_ctx->l4_proto) {
1496 case ENA_ETH_IO_L4_PROTO_TCP:
1497 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV6);
1498 break;
1499 case ENA_ETH_IO_L4_PROTO_UDP:
1500 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV6);
1501 break;
1502 default:
1503 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV6);
1504 }
1505 break;
1506 case ENA_ETH_IO_L3_PROTO_UNKNOWN:
1507 M_HASHTYPE_SET(mbuf, M_HASHTYPE_NONE);
1508 break;
1509 default:
1510 M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE_HASH);
1511 }
1512 } else {
1513 mbuf->m_pkthdr.flowid = rx_ring->qid;
1514 M_HASHTYPE_SET(mbuf, M_HASHTYPE_NONE);
1515 }
1516 }
1517 #endif
1518
1519 /**
1520 * ena_rx_mbuf - assemble mbuf from descriptors
1521 * @rx_ring: ring for which we want to clean packets
1522 * @ena_bufs: buffer info
1523 * @ena_rx_ctx: metadata for this packet(s)
1524 * @next_to_clean: ring pointer, will be updated only upon success
1525 *
1526 **/
1527 static struct mbuf*
1528 ena_rx_mbuf(struct ena_ring *rx_ring, struct ena_com_rx_buf_info *ena_bufs,
1529 struct ena_com_rx_ctx *ena_rx_ctx, uint16_t *next_to_clean)
1530 {
1531 struct mbuf *mbuf;
1532 struct ena_rx_buffer *rx_info;
1533 struct ena_adapter *adapter;
1534 unsigned int descs = ena_rx_ctx->descs;
1535 uint16_t ntc, len, req_id, buf = 0;
1536
1537 ntc = *next_to_clean;
1538 adapter = rx_ring->adapter;
1539 rx_info = &rx_ring->rx_buffer_info[ntc];
1540
1541 if (unlikely(rx_info->mbuf == NULL)) {
1542 device_printf(adapter->pdev, "NULL mbuf in rx_info");
1543 return (NULL);
1544 }
1545
1546 len = ena_bufs[buf].len;
1547 req_id = ena_bufs[buf].req_id;
1548 rx_info = &rx_ring->rx_buffer_info[req_id];
1549
1550 ena_trace(ENA_DBG | ENA_RXPTH, "rx_info %p, mbuf %p, paddr %jx",
1551 rx_info, rx_info->mbuf, (uintmax_t)rx_info->ena_buf.paddr);
1552
1553 mbuf = rx_info->mbuf;
1554 KASSERT(mbuf->m_flags & M_PKTHDR);
1555 mbuf->m_pkthdr.len = len;
1556 mbuf->m_len = len;
1557 m_set_rcvif(mbuf, rx_ring->que->adapter->ifp);
1558
1559 /* Fill mbuf with hash key and it's interpretation for optimization */
1560 #if 0
1561 ena_rx_hash_mbuf(rx_ring, ena_rx_ctx, mbuf);
1562 #endif
1563
1564 ena_trace(ENA_DBG | ENA_RXPTH, "rx mbuf 0x%p, flags=0x%x, len: %d",
1565 mbuf, mbuf->m_flags, mbuf->m_pkthdr.len);
1566
1567 /* DMA address is not needed anymore, unmap it */
1568 bus_dmamap_unload(rx_ring->adapter->sc_dmat, rx_info->map);
1569
1570 rx_info->mbuf = NULL;
1571 rx_ring->free_rx_ids[ntc] = req_id;
1572 ntc = ENA_RX_RING_IDX_NEXT(ntc, rx_ring->ring_size);
1573
1574 /*
1575 * While we have more than 1 descriptors for one rcvd packet, append
1576 * other mbufs to the main one
1577 */
1578 while (--descs) {
1579 ++buf;
1580 len = ena_bufs[buf].len;
1581 req_id = ena_bufs[buf].req_id;
1582 rx_info = &rx_ring->rx_buffer_info[req_id];
1583
1584 if (unlikely(rx_info->mbuf == NULL)) {
1585 device_printf(adapter->pdev, "NULL mbuf in rx_info");
1586 /*
1587 * If one of the required mbufs was not allocated yet,
1588 * we can break there.
1589 * All earlier used descriptors will be reallocated
1590 * later and not used mbufs can be reused.
1591 * The next_to_clean pointer will not be updated in case
1592 * of an error, so caller should advance it manually
1593 * in error handling routine to keep it up to date
1594 * with hw ring.
1595 */
1596 m_freem(mbuf);
1597 return (NULL);
1598 }
1599
1600 if (unlikely(m_append(mbuf, len, rx_info->mbuf->m_data) == 0)) {
1601 counter_u64_add(rx_ring->rx_stats.mbuf_alloc_fail, 1);
1602 ena_trace(ENA_WARNING, "Failed to append Rx mbuf %p",
1603 mbuf);
1604 }
1605
1606 ena_trace(ENA_DBG | ENA_RXPTH,
1607 "rx mbuf updated. len %d", mbuf->m_pkthdr.len);
1608
1609 /* Free already appended mbuf, it won't be useful anymore */
1610 bus_dmamap_unload(rx_ring->adapter->sc_dmat, rx_info->map);
1611 m_freem(rx_info->mbuf);
1612 rx_info->mbuf = NULL;
1613
1614 rx_ring->free_rx_ids[ntc] = req_id;
1615 ntc = ENA_RX_RING_IDX_NEXT(ntc, rx_ring->ring_size);
1616 }
1617
1618 *next_to_clean = ntc;
1619
1620 return (mbuf);
1621 }
1622
1623 /**
1624 * ena_rx_checksum - indicate in mbuf if hw indicated a good cksum
1625 **/
1626 static inline void
1627 ena_rx_checksum(struct ena_ring *rx_ring, struct ena_com_rx_ctx *ena_rx_ctx,
1628 struct mbuf *mbuf)
1629 {
1630
1631 /* IPv4 */
1632 if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) {
1633 mbuf->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1634 if (ena_rx_ctx->l3_csum_err) {
1635 /* ipv4 checksum error */
1636 mbuf->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1637 counter_u64_add(rx_ring->rx_stats.bad_csum, 1);
1638 ena_trace(ENA_DBG, "RX IPv4 header checksum error");
1639 return;
1640 }
1641
1642 /* TCP/UDP */
1643 if ((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
1644 (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)) {
1645 mbuf->m_pkthdr.csum_flags |= (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ? M_CSUM_TCPv4 : M_CSUM_UDPv4;
1646 if (ena_rx_ctx->l4_csum_err) {
1647 /* TCP/UDP checksum error */
1648 mbuf->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1649 counter_u64_add(rx_ring->rx_stats.bad_csum, 1);
1650 ena_trace(ENA_DBG, "RX L4 checksum error");
1651 }
1652 }
1653 }
1654 /* IPv6 */
1655 else if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV6) {
1656 /* TCP/UDP */
1657 if ((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
1658 (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)) {
1659 mbuf->m_pkthdr.csum_flags |= (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ? M_CSUM_TCPv6 : M_CSUM_UDPv6;
1660 if (ena_rx_ctx->l4_csum_err) {
1661 /* TCP/UDP checksum error */
1662 mbuf->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1663 counter_u64_add(rx_ring->rx_stats.bad_csum, 1);
1664 ena_trace(ENA_DBG, "RX L4 checksum error");
1665 }
1666 }
1667 }
1668 }
1669
1670 static void
1671 ena_deferred_rx_cleanup(struct work *wk, void *arg)
1672 {
1673 struct ena_ring *rx_ring = arg;
1674 int budget = CLEAN_BUDGET;
1675
1676 atomic_swap_uint(&rx_ring->task_pending, 0);
1677
1678 ENA_RING_MTX_LOCK(rx_ring);
1679 /*
1680 * If deferred task was executed, perform cleanup of all awaiting
1681 * descs (or until given budget is depleted to avoid infinite loop).
1682 */
1683 while (likely(budget--)) {
1684 if (ena_rx_cleanup(rx_ring) == 0)
1685 break;
1686 }
1687 ENA_RING_MTX_UNLOCK(rx_ring);
1688 }
1689
1690 /**
1691 * ena_rx_cleanup - handle rx irq
1692 * @arg: ring for which irq is being handled
1693 **/
1694 static int
1695 ena_rx_cleanup(struct ena_ring *rx_ring)
1696 {
1697 struct ena_adapter *adapter;
1698 struct mbuf *mbuf;
1699 struct ena_com_rx_ctx ena_rx_ctx;
1700 struct ena_com_io_cq* io_cq;
1701 struct ena_com_io_sq* io_sq;
1702 struct ifnet *ifp;
1703 uint16_t ena_qid;
1704 uint16_t next_to_clean;
1705 uint32_t refill_required;
1706 uint32_t refill_threshold;
1707 uint32_t do_if_input = 0;
1708 unsigned int qid;
1709 int rc, i;
1710 int budget = RX_BUDGET;
1711
1712 adapter = rx_ring->que->adapter;
1713 ifp = adapter->ifp;
1714 qid = rx_ring->que->id;
1715 ena_qid = ENA_IO_RXQ_IDX(qid);
1716 io_cq = &adapter->ena_dev->io_cq_queues[ena_qid];
1717 io_sq = &adapter->ena_dev->io_sq_queues[ena_qid];
1718 next_to_clean = rx_ring->next_to_clean;
1719
1720 ena_trace(ENA_DBG, "rx: qid %d", qid);
1721
1722 do {
1723 ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
1724 ena_rx_ctx.max_bufs = adapter->max_rx_sgl_size;
1725 ena_rx_ctx.descs = 0;
1726 rc = ena_com_rx_pkt(io_cq, io_sq, &ena_rx_ctx);
1727
1728 if (unlikely(rc != 0))
1729 goto error;
1730
1731 if (unlikely(ena_rx_ctx.descs == 0))
1732 break;
1733
1734 ena_trace(ENA_DBG | ENA_RXPTH, "rx: q %d got packet from ena. "
1735 "descs #: %d l3 proto %d l4 proto %d hash: %x",
1736 rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto,
1737 ena_rx_ctx.l4_proto, ena_rx_ctx.hash);
1738
1739 /* Receive mbuf from the ring */
1740 mbuf = ena_rx_mbuf(rx_ring, rx_ring->ena_bufs,
1741 &ena_rx_ctx, &next_to_clean);
1742
1743 /* Exit if we failed to retrieve a buffer */
1744 if (unlikely(mbuf == NULL)) {
1745 for (i = 0; i < ena_rx_ctx.descs; ++i) {
1746 rx_ring->free_rx_ids[next_to_clean] =
1747 rx_ring->ena_bufs[i].req_id;
1748 next_to_clean =
1749 ENA_RX_RING_IDX_NEXT(next_to_clean,
1750 rx_ring->ring_size);
1751
1752 }
1753 break;
1754 }
1755
1756 if (((ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) != 0) ||
1757 ((ifp->if_capenable & IFCAP_CSUM_TCPv4_Rx) != 0) ||
1758 ((ifp->if_capenable & IFCAP_CSUM_UDPv4_Rx) != 0) ||
1759 ((ifp->if_capenable & IFCAP_CSUM_TCPv6_Rx) != 0) ||
1760 ((ifp->if_capenable & IFCAP_CSUM_UDPv6_Rx) != 0)) {
1761 ena_rx_checksum(rx_ring, &ena_rx_ctx, mbuf);
1762 }
1763
1764 counter_enter();
1765 counter_u64_add_protected(rx_ring->rx_stats.bytes,
1766 mbuf->m_pkthdr.len);
1767 counter_u64_add_protected(adapter->hw_stats.rx_bytes,
1768 mbuf->m_pkthdr.len);
1769 counter_exit();
1770 /*
1771 * LRO is only for IP/TCP packets and TCP checksum of the packet
1772 * should be computed by hardware.
1773 */
1774 do_if_input = 1;
1775 #ifdef LRO
1776 if (((ifp->if_capenable & IFCAP_LRO) != 0) &&
1777 ((mbuf->m_pkthdr.csum_flags & CSUM_IP_VALID) != 0) &&
1778 (ena_rx_ctx.l4_proto == ENA_ETH_IO_L4_PROTO_TCP)) {
1779 /*
1780 * Send to the stack if:
1781 * - LRO not enabled, or
1782 * - no LRO resources, or
1783 * - lro enqueue fails
1784 */
1785 if ((rx_ring->lro.lro_cnt != 0) &&
1786 (tcp_lro_rx(&rx_ring->lro, mbuf, 0) == 0))
1787 do_if_input = 0;
1788 }
1789 #endif
1790 if (do_if_input != 0) {
1791 ena_trace(ENA_DBG | ENA_RXPTH,
1792 "calling if_input() with mbuf %p", mbuf);
1793 if_percpuq_enqueue(ifp->if_percpuq, mbuf);
1794 }
1795
1796 counter_enter();
1797 counter_u64_add_protected(rx_ring->rx_stats.cnt, 1);
1798 counter_u64_add_protected(adapter->hw_stats.rx_packets, 1);
1799 counter_exit();
1800 } while (--budget);
1801
1802 rx_ring->next_to_clean = next_to_clean;
1803
1804 refill_required = ena_com_free_desc(io_sq);
1805 refill_threshold = rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER;
1806
1807 if (refill_required > refill_threshold) {
1808 ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq);
1809 ena_refill_rx_bufs(rx_ring, refill_required);
1810 }
1811
1812 #ifdef LRO
1813 tcp_lro_flush_all(&rx_ring->lro);
1814 #endif
1815
1816 return (RX_BUDGET - budget);
1817
1818 error:
1819 counter_u64_add(rx_ring->rx_stats.bad_desc_num, 1);
1820 return (RX_BUDGET - budget);
1821 }
1822
1823 /*********************************************************************
1824 *
1825 * MSIX & Interrupt Service routine
1826 *
1827 **********************************************************************/
1828
1829 /**
1830 * ena_handle_msix - MSIX Interrupt Handler for admin/async queue
1831 * @arg: interrupt number
1832 **/
1833 static int
1834 ena_intr_msix_mgmnt(void *arg)
1835 {
1836 struct ena_adapter *adapter = (struct ena_adapter *)arg;
1837
1838 ena_com_admin_q_comp_intr_handler(adapter->ena_dev);
1839 if (likely(adapter->running))
1840 ena_com_aenq_intr_handler(adapter->ena_dev, arg);
1841
1842 return 1;
1843 }
1844
1845 /**
1846 * ena_handle_msix - MSIX Interrupt Handler for Tx/Rx
1847 * @arg: interrupt number
1848 **/
1849 static int
1850 ena_handle_msix(void *arg)
1851 {
1852 struct ena_que *que = arg;
1853 struct ena_adapter *adapter = que->adapter;
1854 struct ifnet *ifp = adapter->ifp;
1855 struct ena_ring *tx_ring;
1856 struct ena_ring *rx_ring;
1857 struct ena_com_io_cq* io_cq;
1858 struct ena_eth_io_intr_reg intr_reg;
1859 int qid, ena_qid;
1860 int txc, rxc, i;
1861
1862 if (unlikely((if_getdrvflags(ifp) & IFF_RUNNING) == 0))
1863 return 0;
1864
1865 ena_trace(ENA_DBG, "MSI-X TX/RX routine");
1866
1867 tx_ring = que->tx_ring;
1868 rx_ring = que->rx_ring;
1869 qid = que->id;
1870 ena_qid = ENA_IO_TXQ_IDX(qid);
1871 io_cq = &adapter->ena_dev->io_cq_queues[ena_qid];
1872
1873 for (i = 0; i < CLEAN_BUDGET; ++i) {
1874 /*
1875 * If lock cannot be acquired, then deferred cleanup task was
1876 * being executed and rx ring is being cleaned up in
1877 * another thread.
1878 */
1879 if (likely(ENA_RING_MTX_TRYLOCK(rx_ring) != 0)) {
1880 rxc = ena_rx_cleanup(rx_ring);
1881 ENA_RING_MTX_UNLOCK(rx_ring);
1882 } else {
1883 rxc = 0;
1884 }
1885
1886 /* Protection from calling ena_tx_cleanup from ena_start_xmit */
1887 ENA_RING_MTX_LOCK(tx_ring);
1888 txc = ena_tx_cleanup(tx_ring);
1889 ENA_RING_MTX_UNLOCK(tx_ring);
1890
1891 if (unlikely((if_getdrvflags(ifp) & IFF_RUNNING) == 0))
1892 return 0;
1893
1894 if ((txc != TX_BUDGET) && (rxc != RX_BUDGET))
1895 break;
1896 }
1897
1898 /* Signal that work is done and unmask interrupt */
1899 ena_com_update_intr_reg(&intr_reg,
1900 RX_IRQ_INTERVAL,
1901 TX_IRQ_INTERVAL,
1902 true);
1903 ena_com_unmask_intr(io_cq, &intr_reg);
1904
1905 return 1;
1906 }
1907
1908 static int
1909 ena_enable_msix(struct ena_adapter *adapter)
1910 {
1911 int msix_req;
1912 int counts[PCI_INTR_TYPE_SIZE];
1913 int max_type;
1914
1915 /* Reserved the max msix vectors we might need */
1916 msix_req = ENA_MAX_MSIX_VEC(adapter->num_queues);
1917
1918 counts[PCI_INTR_TYPE_INTX] = 0;
1919 counts[PCI_INTR_TYPE_MSI] = 0;
1920 counts[PCI_INTR_TYPE_MSIX] = msix_req;
1921 max_type = PCI_INTR_TYPE_MSIX;
1922
1923 if (pci_intr_alloc(&adapter->sc_pa, &adapter->sc_intrs, counts,
1924 max_type) != 0) {
1925 aprint_error_dev(adapter->pdev,
1926 "failed to allocate interrupt\n");
1927 return ENOSPC;
1928 }
1929
1930 adapter->sc_nintrs = counts[PCI_INTR_TYPE_MSIX];
1931
1932 if (counts[PCI_INTR_TYPE_MSIX] != msix_req) {
1933 device_printf(adapter->pdev,
1934 "Enable only %d MSI-x (out of %d), reduce "
1935 "the number of queues\n", adapter->sc_nintrs, msix_req);
1936 adapter->num_queues = adapter->sc_nintrs - ENA_ADMIN_MSIX_VEC;
1937 }
1938
1939 return 0;
1940 }
1941
1942 #if 0
1943 static void
1944 ena_setup_io_intr(struct ena_adapter *adapter)
1945 {
1946 static int last_bind_cpu = -1;
1947 int irq_idx;
1948
1949 for (int i = 0; i < adapter->num_queues; i++) {
1950 irq_idx = ENA_IO_IRQ_IDX(i);
1951
1952 snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE,
1953 "%s-TxRx-%d", device_xname(adapter->pdev), i);
1954 adapter->irq_tbl[irq_idx].handler = ena_handle_msix;
1955 adapter->irq_tbl[irq_idx].data = &adapter->que[i];
1956 adapter->irq_tbl[irq_idx].vector =
1957 adapter->msix_entries[irq_idx].vector;
1958 ena_trace(ENA_INFO | ENA_IOQ, "ena_setup_io_intr vector: %d\n",
1959 adapter->msix_entries[irq_idx].vector);
1960 #ifdef RSS
1961 adapter->que[i].cpu = adapter->irq_tbl[irq_idx].cpu =
1962 rss_getcpu(i % rss_getnumbuckets());
1963 #else
1964 /*
1965 * We still want to bind rings to the corresponding cpu
1966 * using something similar to the RSS round-robin technique.
1967 */
1968 if (unlikely(last_bind_cpu < 0))
1969 last_bind_cpu = CPU_FIRST();
1970 adapter->que[i].cpu = adapter->irq_tbl[irq_idx].cpu =
1971 last_bind_cpu;
1972 last_bind_cpu = CPU_NEXT(last_bind_cpu);
1973 #endif
1974 }
1975 }
1976 #endif
1977
1978 static int
1979 ena_request_mgmnt_irq(struct ena_adapter *adapter)
1980 {
1981 const char *intrstr;
1982 char intrbuf[PCI_INTRSTR_LEN];
1983 char intr_xname[INTRDEVNAMEBUF];
1984 pci_chipset_tag_t pc = adapter->sc_pa.pa_pc;
1985 const int irq_slot = ENA_MGMNT_IRQ_IDX;
1986
1987 KASSERT(adapter->sc_intrs != NULL);
1988 KASSERT(adapter->sc_ihs[irq_slot] == NULL);
1989
1990 snprintf(intr_xname, sizeof(intr_xname), "%s mgmnt",
1991 device_xname(adapter->pdev));
1992 intrstr = pci_intr_string(pc, adapter->sc_intrs[irq_slot],
1993 intrbuf, sizeof(intrbuf));
1994
1995 adapter->sc_ihs[irq_slot] = pci_intr_establish_xname(
1996 pc, adapter->sc_intrs[irq_slot],
1997 IPL_NET, ena_intr_msix_mgmnt, adapter, intr_xname);
1998
1999 if (adapter->sc_ihs[irq_slot] == NULL) {
2000 device_printf(adapter->pdev, "failed to register "
2001 "interrupt handler for MGMNT irq %s\n",
2002 intrstr);
2003 return ENOMEM;
2004 }
2005
2006 aprint_normal_dev(adapter->pdev,
2007 "for MGMNT interrupting at %s\n", intrstr);
2008
2009 return 0;
2010 }
2011
2012 static int
2013 ena_request_io_irq(struct ena_adapter *adapter)
2014 {
2015 const char *intrstr;
2016 char intrbuf[PCI_INTRSTR_LEN];
2017 char intr_xname[INTRDEVNAMEBUF];
2018 pci_chipset_tag_t pc = adapter->sc_pa.pa_pc;
2019 const int irq_off = ENA_IO_IRQ_FIRST_IDX;
2020 void *vih;
2021 kcpuset_t *affinity;
2022 int i;
2023
2024 KASSERT(adapter->sc_intrs != NULL);
2025
2026 kcpuset_create(&affinity, false);
2027
2028 for (i = 0; i < adapter->num_queues; i++) {
2029 int irq_slot = i + irq_off;
2030 int affinity_to = (irq_slot) % ncpu;
2031
2032 KASSERT((void *)adapter->sc_intrs[irq_slot] != NULL);
2033 KASSERT(adapter->sc_ihs[irq_slot] == NULL);
2034
2035 snprintf(intr_xname, sizeof(intr_xname), "%s ioq%d",
2036 device_xname(adapter->pdev), i);
2037 intrstr = pci_intr_string(pc, adapter->sc_intrs[irq_slot],
2038 intrbuf, sizeof(intrbuf));
2039
2040 vih = pci_intr_establish_xname(adapter->sc_pa.pa_pc,
2041 adapter->sc_intrs[irq_slot], IPL_NET,
2042 ena_handle_msix, &adapter->que[i], intr_xname);
2043
2044 if (adapter->sc_ihs[ENA_MGMNT_IRQ_IDX] == NULL) {
2045 device_printf(adapter->pdev, "failed to register "
2046 "interrupt handler for IO queue %d irq %s\n",
2047 i, intrstr);
2048 goto err;
2049 }
2050
2051 kcpuset_zero(affinity);
2052 /* Round-robin affinity */
2053 kcpuset_set(affinity, affinity_to);
2054 int error = interrupt_distribute(vih, affinity, NULL);
2055 if (error == 0) {
2056 aprint_normal_dev(adapter->pdev,
2057 "for IO queue %d interrupting at %s"
2058 " affinity to %u\n", i, intrstr, affinity_to);
2059 } else {
2060 aprint_normal_dev(adapter->pdev,
2061 "for IO queue %d interrupting at %s\n", i, intrstr);
2062 }
2063
2064 adapter->sc_ihs[irq_slot] = vih;
2065
2066 #ifdef RSS
2067 ena_trace(ENA_INFO, "queue %d - RSS bucket %d\n",
2068 i - ENA_IO_IRQ_FIRST_IDX, irq->cpu);
2069 #else
2070 ena_trace(ENA_INFO, "queue %d - cpu %d\n",
2071 i - ENA_IO_IRQ_FIRST_IDX, affinity_to);
2072 #endif
2073 }
2074
2075 kcpuset_destroy(affinity);
2076 return 0;
2077
2078 err:
2079 kcpuset_destroy(affinity);
2080
2081 for (i--; i >= 0; i--) {
2082 #if defined(DEBUG) || defined(DIAGNOSTIC)
2083 int irq_slot = i + irq_off;
2084 #endif
2085 KASSERT(adapter->sc_ihs[irq_slot] != NULL);
2086 pci_intr_disestablish(adapter->sc_pa.pa_pc, adapter->sc_ihs[i]);
2087 adapter->sc_ihs[i] = NULL;
2088 }
2089
2090 return ENOSPC;
2091 }
2092
2093 static void
2094 ena_free_mgmnt_irq(struct ena_adapter *adapter)
2095 {
2096 const int irq_slot = ENA_MGMNT_IRQ_IDX;
2097
2098 if (adapter->sc_ihs[irq_slot]) {
2099 pci_intr_disestablish(adapter->sc_pa.pa_pc,
2100 adapter->sc_ihs[irq_slot]);
2101 adapter->sc_ihs[irq_slot] = NULL;
2102 }
2103 }
2104
2105 static void
2106 ena_free_io_irq(struct ena_adapter *adapter)
2107 {
2108 const int irq_off = ENA_IO_IRQ_FIRST_IDX;
2109
2110 for (int i = 0; i < adapter->num_queues; i++) {
2111 int irq_slot = i + irq_off;
2112
2113 if (adapter->sc_ihs[irq_slot]) {
2114 pci_intr_disestablish(adapter->sc_pa.pa_pc,
2115 adapter->sc_ihs[i]);
2116 adapter->sc_ihs[i] = NULL;
2117 }
2118 }
2119 }
2120
2121 static void
2122 ena_free_irqs(struct ena_adapter* adapter)
2123 {
2124
2125 ena_free_io_irq(adapter);
2126 ena_free_mgmnt_irq(adapter);
2127 ena_disable_msix(adapter);
2128 }
2129
2130 static void
2131 ena_disable_msix(struct ena_adapter *adapter)
2132 {
2133 pci_intr_release(adapter->sc_pa.pa_pc, adapter->sc_intrs,
2134 adapter->sc_nintrs);
2135 }
2136
2137 static void
2138 ena_unmask_all_io_irqs(struct ena_adapter *adapter)
2139 {
2140 struct ena_com_io_cq* io_cq;
2141 struct ena_eth_io_intr_reg intr_reg;
2142 uint16_t ena_qid;
2143 int i;
2144
2145 /* Unmask interrupts for all queues */
2146 for (i = 0; i < adapter->num_queues; i++) {
2147 ena_qid = ENA_IO_TXQ_IDX(i);
2148 io_cq = &adapter->ena_dev->io_cq_queues[ena_qid];
2149 ena_com_update_intr_reg(&intr_reg, 0, 0, true);
2150 ena_com_unmask_intr(io_cq, &intr_reg);
2151 }
2152 }
2153
2154 /* Configure the Rx forwarding */
2155 static int
2156 ena_rss_configure(struct ena_adapter *adapter)
2157 {
2158 struct ena_com_dev *ena_dev = adapter->ena_dev;
2159 int rc;
2160
2161 /* Set indirect table */
2162 rc = ena_com_indirect_table_set(ena_dev);
2163 if (unlikely((rc != 0) && (rc != EOPNOTSUPP)))
2164 return (rc);
2165
2166 /* Configure hash function (if supported) */
2167 rc = ena_com_set_hash_function(ena_dev);
2168 if (unlikely((rc != 0) && (rc != EOPNOTSUPP)))
2169 return (rc);
2170
2171 /* Configure hash inputs (if supported) */
2172 rc = ena_com_set_hash_ctrl(ena_dev);
2173 if (unlikely((rc != 0) && (rc != EOPNOTSUPP)))
2174 return (rc);
2175
2176 return (0);
2177 }
2178
2179 static int
2180 ena_up_complete(struct ena_adapter *adapter)
2181 {
2182 int rc;
2183
2184 if (likely(adapter->rss_support)) {
2185 rc = ena_rss_configure(adapter);
2186 if (rc != 0)
2187 return (rc);
2188 }
2189
2190 rc = ena_change_mtu(adapter->ifp, adapter->ifp->if_mtu);
2191 if (unlikely(rc != 0))
2192 return (rc);
2193
2194 ena_refill_all_rx_bufs(adapter);
2195 ena_reset_counters((struct evcnt *)&adapter->hw_stats,
2196 sizeof(adapter->hw_stats));
2197
2198 return (0);
2199 }
2200
2201 static int
2202 ena_up(struct ena_adapter *adapter)
2203 {
2204 int rc = 0;
2205
2206 #if 0
2207 if (unlikely(device_is_attached(adapter->pdev) == 0)) {
2208 device_printf(adapter->pdev, "device is not attached!\n");
2209 return (ENXIO);
2210 }
2211 #endif
2212
2213 if (unlikely(!adapter->running)) {
2214 device_printf(adapter->pdev, "device is not running!\n");
2215 return (ENXIO);
2216 }
2217
2218 if (!adapter->up) {
2219 device_printf(adapter->pdev, "device is going UP\n");
2220
2221 /* setup interrupts for IO queues */
2222 rc = ena_request_io_irq(adapter);
2223 if (unlikely(rc != 0)) {
2224 ena_trace(ENA_ALERT, "err_req_irq");
2225 goto err_req_irq;
2226 }
2227
2228 /* allocate transmit descriptors */
2229 rc = ena_setup_all_tx_resources(adapter);
2230 if (unlikely(rc != 0)) {
2231 ena_trace(ENA_ALERT, "err_setup_tx");
2232 goto err_setup_tx;
2233 }
2234
2235 /* allocate receive descriptors */
2236 rc = ena_setup_all_rx_resources(adapter);
2237 if (unlikely(rc != 0)) {
2238 ena_trace(ENA_ALERT, "err_setup_rx");
2239 goto err_setup_rx;
2240 }
2241
2242 /* create IO queues for Rx & Tx */
2243 rc = ena_create_io_queues(adapter);
2244 if (unlikely(rc != 0)) {
2245 ena_trace(ENA_ALERT,
2246 "create IO queues failed");
2247 goto err_io_que;
2248 }
2249
2250 if (unlikely(adapter->link_status))
2251 if_link_state_change(adapter->ifp, LINK_STATE_UP);
2252
2253 rc = ena_up_complete(adapter);
2254 if (unlikely(rc != 0))
2255 goto err_up_complete;
2256
2257 counter_u64_add(adapter->dev_stats.interface_up, 1);
2258
2259 ena_update_hwassist(adapter);
2260
2261 if_setdrvflagbits(adapter->ifp, IFF_RUNNING,
2262 IFF_OACTIVE);
2263
2264 callout_reset(&adapter->timer_service, hz,
2265 ena_timer_service, (void *)adapter);
2266
2267 adapter->up = true;
2268
2269 ena_unmask_all_io_irqs(adapter);
2270 }
2271
2272 return (0);
2273
2274 err_up_complete:
2275 ena_destroy_all_io_queues(adapter);
2276 err_io_que:
2277 ena_free_all_rx_resources(adapter);
2278 err_setup_rx:
2279 ena_free_all_tx_resources(adapter);
2280 err_setup_tx:
2281 ena_free_io_irq(adapter);
2282 err_req_irq:
2283 return (rc);
2284 }
2285
2286 #if 0
2287 static uint64_t
2288 ena_get_counter(struct ifnet *ifp, ift_counter cnt)
2289 {
2290 struct ena_adapter *adapter;
2291 struct ena_hw_stats *stats;
2292
2293 adapter = if_getsoftc(ifp);
2294 stats = &adapter->hw_stats;
2295
2296 switch (cnt) {
2297 case IFCOUNTER_IPACKETS:
2298 return (counter_u64_fetch(stats->rx_packets));
2299 case IFCOUNTER_OPACKETS:
2300 return (counter_u64_fetch(stats->tx_packets));
2301 case IFCOUNTER_IBYTES:
2302 return (counter_u64_fetch(stats->rx_bytes));
2303 case IFCOUNTER_OBYTES:
2304 return (counter_u64_fetch(stats->tx_bytes));
2305 case IFCOUNTER_IQDROPS:
2306 return (counter_u64_fetch(stats->rx_drops));
2307 default:
2308 return (if_get_counter_default(ifp, cnt));
2309 }
2310 }
2311 #endif
2312
2313 static int
2314 ena_media_change(struct ifnet *ifp)
2315 {
2316 /* Media Change is not supported by firmware */
2317 return (0);
2318 }
2319
2320 static void
2321 ena_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2322 {
2323 struct ena_adapter *adapter = if_getsoftc(ifp);
2324 ena_trace(ENA_DBG, "enter");
2325
2326 mutex_enter(&adapter->global_mtx);
2327
2328 ifmr->ifm_status = IFM_AVALID;
2329 ifmr->ifm_active = IFM_ETHER;
2330
2331 if (!adapter->link_status) {
2332 mutex_exit(&adapter->global_mtx);
2333 ena_trace(ENA_INFO, "link_status = false");
2334 return;
2335 }
2336
2337 ifmr->ifm_status |= IFM_ACTIVE;
2338 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2339
2340 mutex_exit(&adapter->global_mtx);
2341 }
2342
2343 static int
2344 ena_init(struct ifnet *ifp)
2345 {
2346 struct ena_adapter *adapter = if_getsoftc(ifp);
2347
2348 if (!adapter->up) {
2349 rw_enter(&adapter->ioctl_sx, RW_WRITER);
2350 ena_up(adapter);
2351 rw_exit(&adapter->ioctl_sx);
2352 }
2353
2354 return 0;
2355 }
2356
2357 static int
2358 ena_ioctl(struct ifnet *ifp, u_long command, void *data)
2359 {
2360 struct ena_adapter *adapter;
2361 struct ifreq *ifr;
2362 int rc;
2363
2364 adapter = ifp->if_softc;
2365 ifr = (struct ifreq *)data;
2366
2367 /*
2368 * Acquiring lock to prevent from running up and down routines parallel.
2369 */
2370 rc = 0;
2371 switch (command) {
2372 case SIOCSIFMTU:
2373 if (ifp->if_mtu == ifr->ifr_mtu)
2374 break;
2375 rw_enter(&adapter->ioctl_sx, RW_WRITER);
2376 ena_down(adapter);
2377
2378 ena_change_mtu(ifp, ifr->ifr_mtu);
2379
2380 rc = ena_up(adapter);
2381 rw_exit(&adapter->ioctl_sx);
2382 break;
2383
2384 case SIOCSIFFLAGS:
2385 if ((ifp->if_flags & IFF_UP) != 0) {
2386 if ((if_getdrvflags(ifp) & IFF_RUNNING) != 0) {
2387 if ((ifp->if_flags & (IFF_PROMISC |
2388 IFF_ALLMULTI)) != 0) {
2389 device_printf(adapter->pdev,
2390 "ioctl promisc/allmulti\n");
2391 }
2392 } else {
2393 rw_enter(&adapter->ioctl_sx, RW_WRITER);
2394 rc = ena_up(adapter);
2395 rw_exit(&adapter->ioctl_sx);
2396 }
2397 } else {
2398 if ((if_getdrvflags(ifp) & IFF_RUNNING) != 0) {
2399 rw_enter(&adapter->ioctl_sx, RW_WRITER);
2400 ena_down(adapter);
2401 rw_exit(&adapter->ioctl_sx);
2402 }
2403 }
2404 break;
2405
2406 case SIOCADDMULTI:
2407 case SIOCDELMULTI:
2408 break;
2409
2410 case SIOCSIFCAP:
2411 {
2412 struct ifcapreq *ifcr = data;
2413 int reinit = 0;
2414
2415 if (ifcr->ifcr_capenable != ifp->if_capenable) {
2416 ifp->if_capenable = ifcr->ifcr_capenable;
2417 reinit = 1;
2418 }
2419
2420 if ((reinit != 0) &&
2421 ((if_getdrvflags(ifp) & IFF_RUNNING) != 0)) {
2422 rw_enter(&adapter->ioctl_sx, RW_WRITER);
2423 ena_down(adapter);
2424 rc = ena_up(adapter);
2425 rw_exit(&adapter->ioctl_sx);
2426 }
2427 }
2428
2429 break;
2430 default:
2431 rc = ether_ioctl(ifp, command, data);
2432 break;
2433 }
2434
2435 return (rc);
2436 }
2437
2438 static int
2439 ena_get_dev_offloads(struct ena_com_dev_get_features_ctx *feat)
2440 {
2441 int caps = 0;
2442
2443 if ((feat->offload.tx &
2444 (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK |
2445 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK |
2446 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK)) != 0)
2447 caps |= IFCAP_CSUM_IPv4_Tx;
2448
2449 if ((feat->offload.tx &
2450 (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK |
2451 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)) != 0)
2452 caps |= IFCAP_CSUM_TCPv6_Tx | IFCAP_CSUM_UDPv6_Tx;
2453
2454 if ((feat->offload.tx &
2455 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) != 0)
2456 caps |= IFCAP_TSOv4;
2457
2458 if ((feat->offload.tx &
2459 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK) != 0)
2460 caps |= IFCAP_TSOv6;
2461
2462 if ((feat->offload.rx_supported &
2463 (ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK |
2464 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK)) != 0)
2465 caps |= IFCAP_CSUM_IPv4_Rx;
2466
2467 if ((feat->offload.rx_supported &
2468 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK) != 0)
2469 caps |= IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2470
2471 caps |= IFCAP_LRO;
2472
2473 return (caps);
2474 }
2475
2476 static void
2477 ena_update_host_info(struct ena_admin_host_info *host_info, struct ifnet *ifp)
2478 {
2479
2480 host_info->supported_network_features[0] =
2481 (uint32_t)if_getcapabilities(ifp);
2482 }
2483
2484 static void
2485 ena_update_hwassist(struct ena_adapter *adapter)
2486 {
2487 struct ifnet *ifp = adapter->ifp;
2488 uint32_t feat = adapter->tx_offload_cap;
2489 int cap = if_getcapenable(ifp);
2490 int flags = 0;
2491
2492 if_clearhwassist(ifp);
2493
2494 if ((cap & (IFCAP_CSUM_IPv4_Tx|IFCAP_CSUM_TCPv4_Tx|IFCAP_CSUM_UDPv4_Tx))
2495 != 0) {
2496 if ((feat &
2497 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK) != 0)
2498 flags |= M_CSUM_IPv4;
2499 if ((feat &
2500 (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK |
2501 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)) != 0)
2502 flags |= M_CSUM_TCPv4 | M_CSUM_UDPv4;
2503 }
2504
2505 if ((cap & IFCAP_CSUM_TCPv6_Tx) != 0)
2506 flags |= M_CSUM_TCPv6;
2507
2508 if ((cap & IFCAP_CSUM_UDPv6_Tx) != 0)
2509 flags |= M_CSUM_UDPv6;
2510
2511 if ((cap & IFCAP_TSOv4) != 0)
2512 flags |= M_CSUM_TSOv4;
2513
2514 if ((cap & IFCAP_TSOv6) != 0)
2515 flags |= M_CSUM_TSOv6;
2516
2517 if_sethwassistbits(ifp, flags, 0);
2518 }
2519
2520 static int
2521 ena_setup_ifnet(device_t pdev, struct ena_adapter *adapter,
2522 struct ena_com_dev_get_features_ctx *feat)
2523 {
2524 struct ifnet *ifp;
2525 int caps = 0;
2526
2527 ifp = adapter->ifp = &adapter->sc_ec.ec_if;
2528 if (unlikely(ifp == NULL)) {
2529 ena_trace(ENA_ALERT, "can not allocate ifnet structure\n");
2530 return (ENXIO);
2531 }
2532 if_initname(ifp, "ena", device_unit(pdev));
2533 if_setdev(ifp, pdev);
2534 if_setsoftc(ifp, adapter);
2535
2536 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
2537 if_setinitfn(ifp, ena_init);
2538 if_settransmitfn(ifp, ena_mq_start);
2539 #if 0
2540 if_setqflushfn(ifp, ena_qflush);
2541 #endif
2542 if_setioctlfn(ifp, ena_ioctl);
2543 #if 0
2544 if_setgetcounterfn(ifp, ena_get_counter);
2545 #endif
2546
2547 if_setsendqlen(ifp, adapter->tx_ring_size);
2548 if_setsendqready(ifp);
2549 if_setmtu(ifp, ETHERMTU);
2550 if_setbaudrate(ifp, 0);
2551 /* Zeroize capabilities... */
2552 if_setcapabilities(ifp, 0);
2553 if_setcapenable(ifp, 0);
2554 /* check hardware support */
2555 caps = ena_get_dev_offloads(feat);
2556 /* ... and set them */
2557 if_setcapabilitiesbit(ifp, caps, 0);
2558 adapter->sc_ec.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2559
2560 #if 0
2561 /* TSO parameters */
2562 /* XXX no limits on NetBSD, guarded by virtue of dmamap load failing */
2563 ifp->if_hw_tsomax = ENA_TSO_MAXSIZE -
2564 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
2565 ifp->if_hw_tsomaxsegcount = adapter->max_tx_sgl_size - 1;
2566 ifp->if_hw_tsomaxsegsize = ENA_TSO_MAXSIZE;
2567 #endif
2568
2569 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
2570 if_setcapenable(ifp, if_getcapabilities(ifp));
2571
2572 /*
2573 * Specify the media types supported by this adapter and register
2574 * callbacks to update media and link information
2575 */
2576 adapter->sc_ec.ec_ifmedia = &adapter->media;
2577 ifmedia_init(&adapter->media, IFM_IMASK,
2578 ena_media_change, ena_media_status);
2579 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2580 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2581
2582 if_attach(ifp);
2583 if_deferred_start_init(ifp, NULL);
2584
2585 ether_ifattach(ifp, adapter->mac_addr);
2586
2587 return (0);
2588 }
2589
2590 static void
2591 ena_down(struct ena_adapter *adapter)
2592 {
2593 int rc;
2594
2595 if (adapter->up) {
2596 device_printf(adapter->pdev, "device is going DOWN\n");
2597
2598 callout_halt(&adapter->timer_service, &adapter->global_mtx);
2599
2600 adapter->up = false;
2601 if_setdrvflagbits(adapter->ifp, IFF_OACTIVE,
2602 IFF_RUNNING);
2603
2604 ena_free_io_irq(adapter);
2605
2606 if (adapter->trigger_reset) {
2607 rc = ena_com_dev_reset(adapter->ena_dev,
2608 adapter->reset_reason);
2609 if (unlikely(rc != 0))
2610 device_printf(adapter->pdev,
2611 "Device reset failed\n");
2612 }
2613
2614 ena_destroy_all_io_queues(adapter);
2615
2616 ena_free_all_tx_bufs(adapter);
2617 ena_free_all_rx_bufs(adapter);
2618 ena_free_all_tx_resources(adapter);
2619 ena_free_all_rx_resources(adapter);
2620
2621 counter_u64_add(adapter->dev_stats.interface_down, 1);
2622 }
2623 }
2624
2625 static void
2626 ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx, struct mbuf *mbuf)
2627 {
2628 struct ena_com_tx_meta *ena_meta;
2629 struct ether_vlan_header *eh;
2630 u32 mss;
2631 bool offload;
2632 uint16_t etype;
2633 int ehdrlen;
2634 struct ip *ip;
2635 int iphlen;
2636 struct tcphdr *th;
2637
2638 offload = false;
2639 ena_meta = &ena_tx_ctx->ena_meta;
2640
2641 #if 0
2642 u32 mss = mbuf->m_pkthdr.tso_segsz;
2643
2644 if (mss != 0)
2645 offload = true;
2646 #else
2647 mss = mbuf->m_pkthdr.len; /* XXX don't have tso_segsz */
2648 #endif
2649
2650 if ((mbuf->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0)
2651 offload = true;
2652
2653 if ((mbuf->m_pkthdr.csum_flags & CSUM_OFFLOAD) != 0)
2654 offload = true;
2655
2656 if (!offload) {
2657 ena_tx_ctx->meta_valid = 0;
2658 return;
2659 }
2660
2661 /* Determine where frame payload starts. */
2662 eh = mtod(mbuf, struct ether_vlan_header *);
2663 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2664 etype = ntohs(eh->evl_proto);
2665 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2666 } else {
2667 etype = htons(eh->evl_encap_proto);
2668 ehdrlen = ETHER_HDR_LEN;
2669 }
2670
2671 ip = (struct ip *)(mbuf->m_data + ehdrlen);
2672 iphlen = ip->ip_hl << 2;
2673 th = (struct tcphdr *)((vaddr_t)ip + iphlen);
2674
2675 if ((mbuf->m_pkthdr.csum_flags & M_CSUM_IPv4) != 0) {
2676 ena_tx_ctx->l3_csum_enable = 1;
2677 }
2678 if ((mbuf->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
2679 ena_tx_ctx->tso_enable = 1;
2680 ena_meta->l4_hdr_len = (th->th_off);
2681 }
2682
2683 switch (etype) {
2684 case ETHERTYPE_IP:
2685 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4;
2686 if ((ip->ip_off & htons(IP_DF)) != 0)
2687 ena_tx_ctx->df = 1;
2688 break;
2689 case ETHERTYPE_IPV6:
2690 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6;
2691
2692 default:
2693 break;
2694 }
2695
2696 if (ip->ip_p == IPPROTO_TCP) {
2697 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
2698 if ((mbuf->m_pkthdr.csum_flags &
2699 (M_CSUM_TCPv4 | M_CSUM_TCPv6)) != 0)
2700 ena_tx_ctx->l4_csum_enable = 1;
2701 else
2702 ena_tx_ctx->l4_csum_enable = 0;
2703 } else if (ip->ip_p == IPPROTO_UDP) {
2704 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
2705 if ((mbuf->m_pkthdr.csum_flags &
2706 (M_CSUM_UDPv4 | M_CSUM_UDPv6)) != 0)
2707 ena_tx_ctx->l4_csum_enable = 1;
2708 else
2709 ena_tx_ctx->l4_csum_enable = 0;
2710 } else {
2711 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UNKNOWN;
2712 ena_tx_ctx->l4_csum_enable = 0;
2713 }
2714
2715 ena_meta->mss = mss;
2716 ena_meta->l3_hdr_len = iphlen;
2717 ena_meta->l3_hdr_offset = ehdrlen;
2718 ena_tx_ctx->meta_valid = 1;
2719 }
2720
2721 static int
2722 ena_check_and_collapse_mbuf(struct ena_ring *tx_ring, struct mbuf **mbuf)
2723 {
2724 struct ena_adapter *adapter;
2725 struct mbuf *collapsed_mbuf;
2726 int num_frags;
2727
2728 adapter = tx_ring->adapter;
2729 num_frags = ena_mbuf_count(*mbuf);
2730
2731 /* One segment must be reserved for configuration descriptor. */
2732 if (num_frags < adapter->max_tx_sgl_size)
2733 return (0);
2734 counter_u64_add(tx_ring->tx_stats.collapse, 1);
2735
2736 collapsed_mbuf = m_collapse(*mbuf, M_NOWAIT,
2737 adapter->max_tx_sgl_size - 1);
2738 if (unlikely(collapsed_mbuf == NULL)) {
2739 counter_u64_add(tx_ring->tx_stats.collapse_err, 1);
2740 return (ENOMEM);
2741 }
2742
2743 /* If mbuf was collapsed succesfully, original mbuf is released. */
2744 *mbuf = collapsed_mbuf;
2745
2746 return (0);
2747 }
2748
2749 static int
2750 ena_xmit_mbuf(struct ena_ring *tx_ring, struct mbuf **mbuf)
2751 {
2752 struct ena_adapter *adapter;
2753 struct ena_tx_buffer *tx_info;
2754 struct ena_com_tx_ctx ena_tx_ctx;
2755 struct ena_com_dev *ena_dev;
2756 struct ena_com_buf *ena_buf;
2757 struct ena_com_io_sq* io_sq;
2758 void *push_hdr;
2759 uint16_t next_to_use;
2760 uint16_t req_id;
2761 uint16_t ena_qid;
2762 uint32_t header_len;
2763 int i, rc;
2764 int nb_hw_desc;
2765
2766 ena_qid = ENA_IO_TXQ_IDX(tx_ring->que->id);
2767 adapter = tx_ring->que->adapter;
2768 ena_dev = adapter->ena_dev;
2769 io_sq = &ena_dev->io_sq_queues[ena_qid];
2770
2771 rc = ena_check_and_collapse_mbuf(tx_ring, mbuf);
2772 if (unlikely(rc != 0)) {
2773 ena_trace(ENA_WARNING,
2774 "Failed to collapse mbuf! err: %d", rc);
2775 return (rc);
2776 }
2777
2778 next_to_use = tx_ring->next_to_use;
2779 req_id = tx_ring->free_tx_ids[next_to_use];
2780 tx_info = &tx_ring->tx_buffer_info[req_id];
2781
2782 tx_info->mbuf = *mbuf;
2783 tx_info->num_of_bufs = 0;
2784
2785 ena_buf = tx_info->bufs;
2786
2787 ena_trace(ENA_DBG | ENA_TXPTH, "Tx: %d bytes", (*mbuf)->m_pkthdr.len);
2788
2789 /*
2790 * header_len is just a hint for the device. Because FreeBSD is not
2791 * giving us information about packet header length and it is not
2792 * guaranteed that all packet headers will be in the 1st mbuf, setting
2793 * header_len to 0 is making the device ignore this value and resolve
2794 * header on it's own.
2795 */
2796 header_len = 0;
2797 push_hdr = NULL;
2798
2799 rc = bus_dmamap_load_mbuf(adapter->sc_dmat, tx_info->map,
2800 *mbuf, BUS_DMA_NOWAIT);
2801
2802 if (unlikely((rc != 0) || (tx_info->map->dm_nsegs == 0))) {
2803 ena_trace(ENA_WARNING,
2804 "dmamap load failed! err: %d nsegs: %d", rc,
2805 tx_info->map->dm_nsegs);
2806 counter_u64_add(tx_ring->tx_stats.dma_mapping_err, 1);
2807 tx_info->mbuf = NULL;
2808 if (rc == ENOMEM)
2809 return (ENA_COM_NO_MEM);
2810 else
2811 return (ENA_COM_INVAL);
2812 }
2813
2814 for (i = 0; i < tx_info->map->dm_nsegs; i++) {
2815 ena_buf->len = tx_info->map->dm_segs[i].ds_len;
2816 ena_buf->paddr = tx_info->map->dm_segs[i].ds_addr;
2817 ena_buf++;
2818 }
2819 tx_info->num_of_bufs = tx_info->map->dm_nsegs;
2820
2821 memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx));
2822 ena_tx_ctx.ena_bufs = tx_info->bufs;
2823 ena_tx_ctx.push_header = push_hdr;
2824 ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
2825 ena_tx_ctx.req_id = req_id;
2826 ena_tx_ctx.header_len = header_len;
2827
2828 /* Set flags and meta data */
2829 ena_tx_csum(&ena_tx_ctx, *mbuf);
2830 /* Prepare the packet's descriptors and send them to device */
2831 rc = ena_com_prepare_tx(io_sq, &ena_tx_ctx, &nb_hw_desc);
2832 if (unlikely(rc != 0)) {
2833 device_printf(adapter->pdev, "failed to prepare tx bufs\n");
2834 counter_u64_add(tx_ring->tx_stats.prepare_ctx_err, 1);
2835 goto dma_error;
2836 }
2837
2838 counter_enter();
2839 counter_u64_add_protected(tx_ring->tx_stats.cnt, 1);
2840 counter_u64_add_protected(tx_ring->tx_stats.bytes,
2841 (*mbuf)->m_pkthdr.len);
2842
2843 counter_u64_add_protected(adapter->hw_stats.tx_packets, 1);
2844 counter_u64_add_protected(adapter->hw_stats.tx_bytes,
2845 (*mbuf)->m_pkthdr.len);
2846 counter_exit();
2847
2848 tx_info->tx_descs = nb_hw_desc;
2849 getbinuptime(&tx_info->timestamp);
2850 tx_info->print_once = true;
2851
2852 tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
2853 tx_ring->ring_size);
2854
2855 bus_dmamap_sync(adapter->sc_dmat, tx_info->map, 0,
2856 tx_info->map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2857
2858 return (0);
2859
2860 dma_error:
2861 tx_info->mbuf = NULL;
2862 bus_dmamap_unload(adapter->sc_dmat, tx_info->map);
2863
2864 return (rc);
2865 }
2866
2867 static void
2868 ena_start_xmit(struct ena_ring *tx_ring)
2869 {
2870 struct mbuf *mbuf;
2871 struct ena_adapter *adapter = tx_ring->adapter;
2872 struct ena_com_io_sq* io_sq;
2873 int ena_qid;
2874 int acum_pkts = 0;
2875 int ret = 0;
2876
2877 if (unlikely((if_getdrvflags(adapter->ifp) & IFF_RUNNING) == 0))
2878 return;
2879
2880 if (unlikely(!adapter->link_status))
2881 return;
2882
2883 ena_qid = ENA_IO_TXQ_IDX(tx_ring->que->id);
2884 io_sq = &adapter->ena_dev->io_sq_queues[ena_qid];
2885
2886 while ((mbuf = drbr_peek(adapter->ifp, tx_ring->br)) != NULL) {
2887 ena_trace(ENA_DBG | ENA_TXPTH, "\ndequeued mbuf %p with flags %#x and"
2888 " header csum flags %#jx",
2889 mbuf, mbuf->m_flags, (uint64_t)mbuf->m_pkthdr.csum_flags);
2890
2891 if (unlikely(!ena_com_sq_have_enough_space(io_sq,
2892 ENA_TX_CLEANUP_THRESHOLD)))
2893 ena_tx_cleanup(tx_ring);
2894
2895 if (unlikely((ret = ena_xmit_mbuf(tx_ring, &mbuf)) != 0)) {
2896 if (ret == ENA_COM_NO_MEM) {
2897 drbr_putback(adapter->ifp, tx_ring->br, mbuf);
2898 } else if (ret == ENA_COM_NO_SPACE) {
2899 drbr_putback(adapter->ifp, tx_ring->br, mbuf);
2900 } else {
2901 m_freem(mbuf);
2902 drbr_advance(adapter->ifp, tx_ring->br);
2903 }
2904
2905 break;
2906 }
2907
2908 drbr_advance(adapter->ifp, tx_ring->br);
2909
2910 if (unlikely((if_getdrvflags(adapter->ifp) &
2911 IFF_RUNNING) == 0))
2912 return;
2913
2914 acum_pkts++;
2915
2916 /*
2917 * If there's a BPF listener, bounce a copy of this frame
2918 * to him.
2919 */
2920 bpf_mtap(adapter->ifp, mbuf, BPF_D_OUT);
2921
2922 if (unlikely(acum_pkts == DB_THRESHOLD)) {
2923 acum_pkts = 0;
2924 wmb();
2925 /* Trigger the dma engine */
2926 ena_com_write_sq_doorbell(io_sq);
2927 counter_u64_add(tx_ring->tx_stats.doorbells, 1);
2928 }
2929
2930 }
2931
2932 if (likely(acum_pkts != 0)) {
2933 wmb();
2934 /* Trigger the dma engine */
2935 ena_com_write_sq_doorbell(io_sq);
2936 counter_u64_add(tx_ring->tx_stats.doorbells, 1);
2937 }
2938
2939 if (!ena_com_sq_have_enough_space(io_sq, ENA_TX_CLEANUP_THRESHOLD))
2940 ena_tx_cleanup(tx_ring);
2941 }
2942
2943 static void
2944 ena_deferred_mq_start(struct work *wk, void *arg)
2945 {
2946 struct ena_ring *tx_ring = (struct ena_ring *)arg;
2947 struct ifnet *ifp = tx_ring->adapter->ifp;
2948
2949 atomic_swap_uint(&tx_ring->task_pending, 0);
2950
2951 while (!drbr_empty(ifp, tx_ring->br) &&
2952 (if_getdrvflags(ifp) & IFF_RUNNING) != 0) {
2953 ENA_RING_MTX_LOCK(tx_ring);
2954 ena_start_xmit(tx_ring);
2955 ENA_RING_MTX_UNLOCK(tx_ring);
2956 }
2957 }
2958
2959 static int
2960 ena_mq_start(struct ifnet *ifp, struct mbuf *m)
2961 {
2962 struct ena_adapter *adapter = ifp->if_softc;
2963 struct ena_ring *tx_ring;
2964 int ret, is_drbr_empty;
2965 uint32_t i;
2966
2967 if (unlikely((if_getdrvflags(adapter->ifp) & IFF_RUNNING) == 0))
2968 return (ENODEV);
2969
2970 /* Which queue to use */
2971 /*
2972 * If everything is setup correctly, it should be the
2973 * same bucket that the current CPU we're on is.
2974 * It should improve performance.
2975 */
2976 #if 0
2977 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
2978 #ifdef RSS
2979 if (rss_hash2bucket(m->m_pkthdr.flowid,
2980 M_HASHTYPE_GET(m), &i) == 0) {
2981 i = i % adapter->num_queues;
2982
2983 } else
2984 #endif
2985 {
2986 i = m->m_pkthdr.flowid % adapter->num_queues;
2987 }
2988 } else {
2989 #endif
2990 i = cpu_index(curcpu()) % adapter->num_queues;
2991 #if 0
2992 }
2993 #endif
2994 tx_ring = &adapter->tx_ring[i];
2995
2996 /* Check if drbr is empty before putting packet */
2997 is_drbr_empty = drbr_empty(ifp, tx_ring->br);
2998 ret = drbr_enqueue(ifp, tx_ring->br, m);
2999 if (unlikely(ret != 0)) {
3000 if (atomic_cas_uint(&tx_ring->task_pending, 0, 1) == 0)
3001 workqueue_enqueue(tx_ring->enqueue_tq, &tx_ring->enqueue_task,
3002 curcpu());
3003 return (ret);
3004 }
3005
3006 if ((is_drbr_empty != 0) && (ENA_RING_MTX_TRYLOCK(tx_ring) != 0)) {
3007 ena_start_xmit(tx_ring);
3008 ENA_RING_MTX_UNLOCK(tx_ring);
3009 } else {
3010 if (atomic_cas_uint(&tx_ring->task_pending, 0, 1) == 0)
3011 workqueue_enqueue(tx_ring->enqueue_tq, &tx_ring->enqueue_task,
3012 curcpu());
3013 }
3014
3015 return (0);
3016 }
3017
3018 #if 0
3019 static void
3020 ena_qflush(struct ifnet *ifp)
3021 {
3022 struct ena_adapter *adapter = ifp->if_softc;
3023 struct ena_ring *tx_ring = adapter->tx_ring;
3024 int i;
3025
3026 for(i = 0; i < adapter->num_queues; ++i, ++tx_ring)
3027 if (!drbr_empty(ifp, tx_ring->br)) {
3028 ENA_RING_MTX_LOCK(tx_ring);
3029 drbr_flush(ifp, tx_ring->br);
3030 ENA_RING_MTX_UNLOCK(tx_ring);
3031 }
3032
3033 if_qflush(ifp);
3034 }
3035 #endif
3036
3037 static int
3038 ena_calc_io_queue_num(struct pci_attach_args *pa,
3039 struct ena_adapter *adapter,
3040 struct ena_com_dev_get_features_ctx *get_feat_ctx)
3041 {
3042 int io_sq_num, io_cq_num, io_queue_num;
3043
3044 io_sq_num = get_feat_ctx->max_queues.max_sq_num;
3045 io_cq_num = get_feat_ctx->max_queues.max_cq_num;
3046
3047 io_queue_num = min_t(int, mp_ncpus, ENA_MAX_NUM_IO_QUEUES);
3048 io_queue_num = min_t(int, io_queue_num, io_sq_num);
3049 io_queue_num = min_t(int, io_queue_num, io_cq_num);
3050 /* 1 IRQ for for mgmnt and 1 IRQ for each TX/RX pair */
3051 io_queue_num = min_t(int, io_queue_num,
3052 pci_msix_count(pa->pa_pc, pa->pa_tag) - 1);
3053 #ifdef RSS
3054 io_queue_num = min_t(int, io_queue_num, rss_getnumbuckets());
3055 #endif
3056
3057 return (io_queue_num);
3058 }
3059
3060 static int
3061 ena_calc_queue_size(struct ena_adapter *adapter, uint16_t *max_tx_sgl_size,
3062 uint16_t *max_rx_sgl_size, struct ena_com_dev_get_features_ctx *feat)
3063 {
3064 uint32_t queue_size = ENA_DEFAULT_RING_SIZE;
3065 uint32_t v;
3066 uint32_t q;
3067
3068 queue_size = min_t(uint32_t, queue_size,
3069 feat->max_queues.max_cq_depth);
3070 queue_size = min_t(uint32_t, queue_size,
3071 feat->max_queues.max_sq_depth);
3072
3073 /* round down to the nearest power of 2 */
3074 v = queue_size;
3075 while (v != 0) {
3076 if (powerof2(queue_size) != 0)
3077 break;
3078 v /= 2;
3079 q = rounddown2(queue_size, v);
3080 if (q != 0) {
3081 queue_size = q;
3082 break;
3083 }
3084 }
3085
3086 if (unlikely(queue_size == 0)) {
3087 device_printf(adapter->pdev, "Invalid queue size\n");
3088 return (ENA_COM_FAULT);
3089 }
3090
3091 *max_tx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
3092 feat->max_queues.max_packet_tx_descs);
3093 *max_rx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
3094 feat->max_queues.max_packet_rx_descs);
3095
3096 return (queue_size);
3097 }
3098
3099 #if 0
3100 static int
3101 ena_rss_init_default(struct ena_adapter *adapter)
3102 {
3103 struct ena_com_dev *ena_dev = adapter->ena_dev;
3104 device_t dev = adapter->pdev;
3105 int qid, rc, i;
3106
3107 rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE);
3108 if (unlikely(rc != 0)) {
3109 device_printf(dev, "Cannot init indirect table\n");
3110 return (rc);
3111 }
3112
3113 for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
3114 #ifdef RSS
3115 qid = rss_get_indirection_to_bucket(i);
3116 qid = qid % adapter->num_queues;
3117 #else
3118 qid = i % adapter->num_queues;
3119 #endif
3120 rc = ena_com_indirect_table_fill_entry(ena_dev, i,
3121 ENA_IO_RXQ_IDX(qid));
3122 if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) {
3123 device_printf(dev, "Cannot fill indirect table\n");
3124 goto err_rss_destroy;
3125 }
3126 }
3127
3128 rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL,
3129 ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
3130 if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) {
3131 device_printf(dev, "Cannot fill hash function\n");
3132 goto err_rss_destroy;
3133 }
3134
3135 rc = ena_com_set_default_hash_ctrl(ena_dev);
3136 if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) {
3137 device_printf(dev, "Cannot fill hash control\n");
3138 goto err_rss_destroy;
3139 }
3140
3141 return (0);
3142
3143 err_rss_destroy:
3144 ena_com_rss_destroy(ena_dev);
3145 return (rc);
3146 }
3147
3148 static void
3149 ena_rss_init_default_deferred(void *arg)
3150 {
3151 struct ena_adapter *adapter;
3152 devclass_t dc;
3153 int max;
3154 int rc;
3155
3156 dc = devclass_find("ena");
3157 if (unlikely(dc == NULL)) {
3158 ena_trace(ENA_ALERT, "No devclass ena\n");
3159 return;
3160 }
3161
3162 max = devclass_get_maxunit(dc);
3163 while (max-- >= 0) {
3164 adapter = devclass_get_softc(dc, max);
3165 if (adapter != NULL) {
3166 rc = ena_rss_init_default(adapter);
3167 adapter->rss_support = true;
3168 if (unlikely(rc != 0)) {
3169 device_printf(adapter->pdev,
3170 "WARNING: RSS was not properly initialized,"
3171 " it will affect bandwidth\n");
3172 adapter->rss_support = false;
3173 }
3174 }
3175 }
3176 }
3177 SYSINIT(ena_rss_init, SI_SUB_KICK_SCHEDULER, SI_ORDER_SECOND, ena_rss_init_default_deferred, NULL);
3178 #endif
3179
3180 static void
3181 ena_config_host_info(struct ena_com_dev *ena_dev)
3182 {
3183 struct ena_admin_host_info *host_info;
3184 int rc;
3185
3186 /* Allocate only the host info */
3187 rc = ena_com_allocate_host_info(ena_dev);
3188 if (unlikely(rc != 0)) {
3189 ena_trace(ENA_ALERT, "Cannot allocate host info\n");
3190 return;
3191 }
3192
3193 host_info = ena_dev->host_attr.host_info;
3194
3195 host_info->os_type = ENA_ADMIN_OS_FREEBSD;
3196 host_info->kernel_ver = osreldate;
3197
3198 snprintf(host_info->kernel_ver_str, sizeof(host_info->kernel_ver_str),
3199 "%d", osreldate);
3200 host_info->os_dist = 0;
3201 strncpy(host_info->os_dist_str, osrelease,
3202 sizeof(host_info->os_dist_str) - 1);
3203
3204 host_info->driver_version =
3205 (DRV_MODULE_VER_MAJOR) |
3206 (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
3207 (DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT);
3208
3209 rc = ena_com_set_host_attributes(ena_dev);
3210 if (unlikely(rc != 0)) {
3211 if (rc == EOPNOTSUPP)
3212 ena_trace(ENA_WARNING, "Cannot set host attributes\n");
3213 else
3214 ena_trace(ENA_ALERT, "Cannot set host attributes\n");
3215
3216 goto err;
3217 }
3218
3219 return;
3220
3221 err:
3222 ena_com_delete_host_info(ena_dev);
3223 }
3224
3225 static int
3226 ena_device_init(struct ena_adapter *adapter, device_t pdev,
3227 struct ena_com_dev_get_features_ctx *get_feat_ctx, int *wd_active)
3228 {
3229 struct ena_com_dev* ena_dev = adapter->ena_dev;
3230 bool readless_supported;
3231 uint32_t aenq_groups;
3232 int dma_width;
3233 int rc;
3234
3235 rc = ena_com_mmio_reg_read_request_init(ena_dev);
3236 if (unlikely(rc != 0)) {
3237 device_printf(pdev, "failed to init mmio read less\n");
3238 return (rc);
3239 }
3240
3241 /*
3242 * The PCIe configuration space revision id indicate if mmio reg
3243 * read is disabled
3244 */
3245 const int rev = PCI_REVISION(adapter->sc_pa.pa_class);
3246 readless_supported = ((rev & ENA_MMIO_DISABLE_REG_READ) == 0);
3247 ena_com_set_mmio_read_mode(ena_dev, readless_supported);
3248
3249 rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL);
3250 if (unlikely(rc != 0)) {
3251 device_printf(pdev, "Can not reset device\n");
3252 goto err_mmio_read_less;
3253 }
3254
3255 rc = ena_com_validate_version(ena_dev);
3256 if (unlikely(rc != 0)) {
3257 device_printf(pdev, "device version is too low\n");
3258 goto err_mmio_read_less;
3259 }
3260
3261 dma_width = ena_com_get_dma_width(ena_dev);
3262 if (unlikely(dma_width < 0)) {
3263 device_printf(pdev, "Invalid dma width value %d", dma_width);
3264 rc = dma_width;
3265 goto err_mmio_read_less;
3266 }
3267 adapter->dma_width = dma_width;
3268
3269 /* ENA admin level init */
3270 rc = ena_com_admin_init(ena_dev, &aenq_handlers, true);
3271 if (unlikely(rc != 0)) {
3272 device_printf(pdev,
3273 "Can not initialize ena admin queue with device\n");
3274 goto err_mmio_read_less;
3275 }
3276
3277 /*
3278 * To enable the msix interrupts the driver needs to know the number
3279 * of queues. So the driver uses polling mode to retrieve this
3280 * information
3281 */
3282 ena_com_set_admin_polling_mode(ena_dev, true);
3283
3284 ena_config_host_info(ena_dev);
3285
3286 /* Get Device Attributes */
3287 rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
3288 if (unlikely(rc != 0)) {
3289 device_printf(pdev,
3290 "Cannot get attribute for ena device rc: %d\n", rc);
3291 goto err_admin_init;
3292 }
3293
3294 aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) | BIT(ENA_ADMIN_KEEP_ALIVE);
3295
3296 aenq_groups &= get_feat_ctx->aenq.supported_groups;
3297 rc = ena_com_set_aenq_config(ena_dev, aenq_groups);
3298 if (unlikely(rc != 0)) {
3299 device_printf(pdev, "Cannot configure aenq groups rc: %d\n", rc);
3300 goto err_admin_init;
3301 }
3302
3303 *wd_active = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
3304
3305 return (0);
3306
3307 err_admin_init:
3308 ena_com_delete_host_info(ena_dev);
3309 ena_com_admin_destroy(ena_dev);
3310 err_mmio_read_less:
3311 ena_com_mmio_reg_read_request_destroy(ena_dev);
3312
3313 return (rc);
3314 }
3315
3316 static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter,
3317 int io_vectors)
3318 {
3319 struct ena_com_dev *ena_dev = adapter->ena_dev;
3320 int rc;
3321
3322 rc = ena_enable_msix(adapter);
3323 if (unlikely(rc != 0)) {
3324 device_printf(adapter->pdev, "Error with MSI-X enablement\n");
3325 return (rc);
3326 }
3327
3328 rc = ena_request_mgmnt_irq(adapter);
3329 if (unlikely(rc != 0)) {
3330 device_printf(adapter->pdev, "Cannot setup mgmnt queue intr\n");
3331 goto err_disable_msix;
3332 }
3333
3334 ena_com_set_admin_polling_mode(ena_dev, false);
3335
3336 ena_com_admin_aenq_enable(ena_dev);
3337
3338 return (0);
3339
3340 err_disable_msix:
3341 ena_disable_msix(adapter);
3342
3343 return (rc);
3344 }
3345
3346 /* Function called on ENA_ADMIN_KEEP_ALIVE event */
3347 static void ena_keep_alive_wd(void *adapter_data,
3348 struct ena_admin_aenq_entry *aenq_e)
3349 {
3350 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
3351 struct ena_admin_aenq_keep_alive_desc *desc;
3352 uint64_t rx_drops;
3353
3354 desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e;
3355
3356 rx_drops = ((uint64_t)desc->rx_drops_high << 32) | desc->rx_drops_low;
3357 counter_u64_zero(adapter->hw_stats.rx_drops);
3358 counter_u64_add(adapter->hw_stats.rx_drops, rx_drops);
3359
3360 atomic_store_release(&adapter->keep_alive_timestamp, getsbinuptime());
3361 }
3362
3363 /* Check for keep alive expiration */
3364 static void check_for_missing_keep_alive(struct ena_adapter *adapter)
3365 {
3366 sbintime_t timestamp, time;
3367
3368 if (adapter->wd_active == 0)
3369 return;
3370
3371 if (likely(adapter->keep_alive_timeout == 0))
3372 return;
3373
3374 timestamp = atomic_load_acquire(&adapter->keep_alive_timestamp);
3375
3376 time = getsbinuptime() - timestamp;
3377 if (unlikely(time > adapter->keep_alive_timeout)) {
3378 device_printf(adapter->pdev,
3379 "Keep alive watchdog timeout.\n");
3380 counter_u64_add(adapter->dev_stats.wd_expired, 1);
3381 adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO;
3382 adapter->trigger_reset = true;
3383 }
3384 }
3385
3386 /* Check if admin queue is enabled */
3387 static void check_for_admin_com_state(struct ena_adapter *adapter)
3388 {
3389 if (unlikely(ena_com_get_admin_running_state(adapter->ena_dev) ==
3390 false)) {
3391 device_printf(adapter->pdev,
3392 "ENA admin queue is not in running state!\n");
3393 counter_u64_add(adapter->dev_stats.admin_q_pause, 1);
3394 adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO;
3395 adapter->trigger_reset = true;
3396 }
3397 }
3398
3399 static int
3400 check_missing_comp_in_queue(struct ena_adapter *adapter,
3401 struct ena_ring *tx_ring)
3402 {
3403 struct bintime curtime, time;
3404 struct ena_tx_buffer *tx_buf;
3405 uint32_t missed_tx = 0;
3406 int i;
3407
3408 getbinuptime(&curtime);
3409
3410 for (i = 0; i < tx_ring->ring_size; i++) {
3411 tx_buf = &tx_ring->tx_buffer_info[i];
3412
3413 if (bintime_isset(&tx_buf->timestamp) == 0)
3414 continue;
3415
3416 time = curtime;
3417 bintime_sub(&time, &tx_buf->timestamp);
3418
3419 /* Check again if packet is still waiting */
3420 if (unlikely(bttosbt(time) > adapter->missing_tx_timeout)) {
3421
3422 if (!tx_buf->print_once)
3423 ena_trace(ENA_WARNING, "Found a Tx that wasn't "
3424 "completed on time, qid %d, index %d.\n",
3425 tx_ring->qid, i);
3426
3427 tx_buf->print_once = true;
3428 missed_tx++;
3429 counter_u64_add(tx_ring->tx_stats.missing_tx_comp, 1);
3430
3431 if (unlikely(missed_tx >
3432 adapter->missing_tx_threshold)) {
3433 device_printf(adapter->pdev,
3434 "The number of lost tx completion "
3435 "is above the threshold (%d > %d). "
3436 "Reset the device\n",
3437 missed_tx, adapter->missing_tx_threshold);
3438 adapter->reset_reason =
3439 ENA_REGS_RESET_MISS_TX_CMPL;
3440 adapter->trigger_reset = true;
3441 return (EIO);
3442 }
3443 }
3444 }
3445
3446 return (0);
3447 }
3448
3449 /*
3450 * Check for TX which were not completed on time.
3451 * Timeout is defined by "missing_tx_timeout".
3452 * Reset will be performed if number of incompleted
3453 * transactions exceeds "missing_tx_threshold".
3454 */
3455 static void
3456 check_for_missing_tx_completions(struct ena_adapter *adapter)
3457 {
3458 struct ena_ring *tx_ring;
3459 int i, budget, rc;
3460
3461 /* Make sure the driver doesn't turn the device in other process */
3462 rmb();
3463
3464 if (!adapter->up)
3465 return;
3466
3467 if (adapter->trigger_reset)
3468 return;
3469
3470 if (adapter->missing_tx_timeout == 0)
3471 return;
3472
3473 budget = adapter->missing_tx_max_queues;
3474
3475 for (i = adapter->next_monitored_tx_qid; i < adapter->num_queues; i++) {
3476 tx_ring = &adapter->tx_ring[i];
3477
3478 rc = check_missing_comp_in_queue(adapter, tx_ring);
3479 if (unlikely(rc != 0))
3480 return;
3481
3482 budget--;
3483 if (budget == 0) {
3484 i++;
3485 break;
3486 }
3487 }
3488
3489 adapter->next_monitored_tx_qid = i % adapter->num_queues;
3490 }
3491
3492 /* trigger deferred rx cleanup after 2 consecutive detections */
3493 #define EMPTY_RX_REFILL 2
3494 /* For the rare case where the device runs out of Rx descriptors and the
3495 * msix handler failed to refill new Rx descriptors (due to a lack of memory
3496 * for example).
3497 * This case will lead to a deadlock:
3498 * The device won't send interrupts since all the new Rx packets will be dropped
3499 * The msix handler won't allocate new Rx descriptors so the device won't be
3500 * able to send new packets.
3501 *
3502 * When such a situation is detected - execute rx cleanup task in another thread
3503 */
3504 static void
3505 check_for_empty_rx_ring(struct ena_adapter *adapter)
3506 {
3507 struct ena_ring *rx_ring;
3508 int i, refill_required;
3509
3510 if (!adapter->up)
3511 return;
3512
3513 if (adapter->trigger_reset)
3514 return;
3515
3516 for (i = 0; i < adapter->num_queues; i++) {
3517 rx_ring = &adapter->rx_ring[i];
3518
3519 refill_required = ena_com_free_desc(rx_ring->ena_com_io_sq);
3520 if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
3521 rx_ring->empty_rx_queue++;
3522
3523 if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) {
3524 counter_u64_add(rx_ring->rx_stats.empty_rx_ring,
3525 1);
3526
3527 device_printf(adapter->pdev,
3528 "trigger refill for ring %d\n", i);
3529
3530 if (atomic_cas_uint(&rx_ring->task_pending, 0, 1) == 0)
3531 workqueue_enqueue(rx_ring->cmpl_tq,
3532 &rx_ring->cmpl_task, curcpu());
3533 rx_ring->empty_rx_queue = 0;
3534 }
3535 } else {
3536 rx_ring->empty_rx_queue = 0;
3537 }
3538 }
3539 }
3540
3541 static void
3542 ena_timer_service(void *data)
3543 {
3544 struct ena_adapter *adapter = (struct ena_adapter *)data;
3545 struct ena_admin_host_info *host_info =
3546 adapter->ena_dev->host_attr.host_info;
3547
3548 check_for_missing_keep_alive(adapter);
3549
3550 check_for_admin_com_state(adapter);
3551
3552 check_for_missing_tx_completions(adapter);
3553
3554 check_for_empty_rx_ring(adapter);
3555
3556 if (host_info != NULL)
3557 ena_update_host_info(host_info, adapter->ifp);
3558
3559 if (unlikely(adapter->trigger_reset)) {
3560 device_printf(adapter->pdev, "Trigger reset is on\n");
3561 workqueue_enqueue(adapter->reset_tq, &adapter->reset_task,
3562 curcpu());
3563 return;
3564 }
3565
3566 /*
3567 * Schedule another timeout one second from now.
3568 */
3569 callout_schedule(&adapter->timer_service, hz);
3570 }
3571
3572 static void
3573 ena_reset_task(struct work *wk, void *arg)
3574 {
3575 struct ena_com_dev_get_features_ctx get_feat_ctx;
3576 struct ena_adapter *adapter = (struct ena_adapter *)arg;
3577 struct ena_com_dev *ena_dev = adapter->ena_dev;
3578 bool dev_up;
3579 int rc;
3580
3581 if (unlikely(!adapter->trigger_reset)) {
3582 device_printf(adapter->pdev,
3583 "device reset scheduled but trigger_reset is off\n");
3584 return;
3585 }
3586
3587 rw_enter(&adapter->ioctl_sx, RW_WRITER);
3588
3589 callout_halt(&adapter->timer_service, &adapter->global_mtx);
3590
3591 dev_up = adapter->up;
3592
3593 ena_com_set_admin_running_state(ena_dev, false);
3594 ena_down(adapter);
3595 ena_free_mgmnt_irq(adapter);
3596 ena_disable_msix(adapter);
3597 ena_com_abort_admin_commands(ena_dev);
3598 ena_com_wait_for_abort_completion(ena_dev);
3599 ena_com_admin_destroy(ena_dev);
3600 ena_com_mmio_reg_read_request_destroy(ena_dev);
3601
3602 adapter->reset_reason = ENA_REGS_RESET_NORMAL;
3603 adapter->trigger_reset = false;
3604
3605 /* Finished destroy part. Restart the device */
3606 rc = ena_device_init(adapter, adapter->pdev, &get_feat_ctx,
3607 &adapter->wd_active);
3608 if (unlikely(rc != 0)) {
3609 device_printf(adapter->pdev,
3610 "ENA device init failed! (err: %d)\n", rc);
3611 goto err_dev_free;
3612 }
3613
3614 /* XXX dealloc and realloc MSI-X, probably a waste */
3615 rc = ena_enable_msix_and_set_admin_interrupts(adapter,
3616 adapter->num_queues);
3617 if (unlikely(rc != 0)) {
3618 device_printf(adapter->pdev, "Enable MSI-X failed\n");
3619 goto err_com_free;
3620 }
3621
3622 /* If the interface was up before the reset bring it up */
3623 if (dev_up) {
3624 rc = ena_up(adapter);
3625 if (unlikely(rc != 0)) {
3626 device_printf(adapter->pdev,
3627 "Failed to create I/O queues\n");
3628 goto err_msix_free;
3629 }
3630 }
3631
3632 callout_reset(&adapter->timer_service, hz,
3633 ena_timer_service, (void *)adapter);
3634
3635 rw_exit(&adapter->ioctl_sx);
3636
3637 return;
3638
3639 err_msix_free:
3640 ena_free_mgmnt_irq(adapter);
3641 ena_disable_msix(adapter);
3642 err_com_free:
3643 ena_com_admin_destroy(ena_dev);
3644 err_dev_free:
3645 device_printf(adapter->pdev, "ENA reset failed!\n");
3646 adapter->running = false;
3647 rw_exit(&adapter->ioctl_sx);
3648 }
3649
3650 /**
3651 * ena_attach - Device Initialization Routine
3652 * @pdev: device information struct
3653 *
3654 * Returns 0 on success, otherwise on failure.
3655 *
3656 * ena_attach initializes an adapter identified by a device structure.
3657 * The OS initialization, configuring of the adapter private structure,
3658 * and a hardware reset occur.
3659 **/
3660 static void
3661 ena_attach(device_t parent, device_t self, void *aux)
3662 {
3663 struct pci_attach_args *pa = aux;
3664 struct ena_com_dev_get_features_ctx get_feat_ctx;
3665 static int version_printed;
3666 struct ena_adapter *adapter = device_private(self);
3667 struct ena_com_dev *ena_dev = NULL;
3668 uint16_t tx_sgl_size = 0;
3669 uint16_t rx_sgl_size = 0;
3670 pcireg_t reg;
3671 int io_queue_num;
3672 int queue_size;
3673 int rc;
3674
3675 adapter->pdev = self;
3676 adapter->ifp = &adapter->sc_ec.ec_if;
3677 adapter->sc_pa = *pa; /* used after attach for adapter reset too */
3678
3679 if (pci_dma64_available(pa))
3680 adapter->sc_dmat = pa->pa_dmat64;
3681 else
3682 adapter->sc_dmat = pa->pa_dmat;
3683
3684 pci_aprint_devinfo(pa, NULL);
3685
3686 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
3687 if ((reg & PCI_COMMAND_MASTER_ENABLE) == 0) {
3688 reg |= PCI_COMMAND_MASTER_ENABLE;
3689 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, reg);
3690 }
3691
3692 mutex_init(&adapter->global_mtx, MUTEX_DEFAULT, IPL_NET);
3693 rw_init(&adapter->ioctl_sx);
3694
3695 /* Set up the timer service */
3696 adapter->keep_alive_timeout = DEFAULT_KEEP_ALIVE_TO;
3697 adapter->missing_tx_timeout = DEFAULT_TX_CMP_TO;
3698 adapter->missing_tx_max_queues = DEFAULT_TX_MONITORED_QUEUES;
3699 adapter->missing_tx_threshold = DEFAULT_TX_CMP_THRESHOLD;
3700
3701 if (version_printed++ == 0)
3702 device_printf(parent, "%s\n", ena_version);
3703
3704 rc = ena_allocate_pci_resources(pa, adapter);
3705 if (unlikely(rc != 0)) {
3706 device_printf(parent, "PCI resource allocation failed!\n");
3707 ena_free_pci_resources(adapter);
3708 return;
3709 }
3710
3711 /* Allocate memory for ena_dev structure */
3712 ena_dev = malloc(sizeof(struct ena_com_dev), M_DEVBUF,
3713 M_WAITOK | M_ZERO);
3714
3715 adapter->ena_dev = ena_dev;
3716 ena_dev->dmadev = self;
3717 ena_dev->bus = malloc(sizeof(struct ena_bus), M_DEVBUF,
3718 M_WAITOK | M_ZERO);
3719
3720 /* Store register resources */
3721 ((struct ena_bus*)(ena_dev->bus))->reg_bar_t = adapter->sc_btag;
3722 ((struct ena_bus*)(ena_dev->bus))->reg_bar_h = adapter->sc_bhandle;
3723
3724 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3725
3726 /* Device initialization */
3727 rc = ena_device_init(adapter, self, &get_feat_ctx, &adapter->wd_active);
3728 if (unlikely(rc != 0)) {
3729 device_printf(self, "ENA device init failed! (err: %d)\n", rc);
3730 rc = ENXIO;
3731 goto err_bus_free;
3732 }
3733
3734 adapter->keep_alive_timestamp = getsbinuptime();
3735
3736 adapter->tx_offload_cap = get_feat_ctx.offload.tx;
3737
3738 /* Set for sure that interface is not up */
3739 adapter->up = false;
3740
3741 memcpy(adapter->mac_addr, get_feat_ctx.dev_attr.mac_addr,
3742 ETHER_ADDR_LEN);
3743
3744 /* calculate IO queue number to create */
3745 io_queue_num = ena_calc_io_queue_num(pa, adapter, &get_feat_ctx);
3746
3747 ENA_ASSERT(io_queue_num > 0, "Invalid queue number: %d\n",
3748 io_queue_num);
3749 adapter->num_queues = io_queue_num;
3750
3751 adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu;
3752
3753 /* calculatre ring sizes */
3754 queue_size = ena_calc_queue_size(adapter,&tx_sgl_size,
3755 &rx_sgl_size, &get_feat_ctx);
3756 if (unlikely((queue_size <= 0) || (io_queue_num <= 0))) {
3757 rc = ENA_COM_FAULT;
3758 goto err_com_free;
3759 }
3760
3761 adapter->reset_reason = ENA_REGS_RESET_NORMAL;
3762
3763 adapter->tx_ring_size = queue_size;
3764 adapter->rx_ring_size = queue_size;
3765
3766 adapter->max_tx_sgl_size = tx_sgl_size;
3767 adapter->max_rx_sgl_size = rx_sgl_size;
3768
3769 #if 0
3770 /* set up dma tags for rx and tx buffers */
3771 rc = ena_setup_tx_dma_tag(adapter);
3772 if (unlikely(rc != 0)) {
3773 device_printf(self, "Failed to create TX DMA tag\n");
3774 goto err_com_free;
3775 }
3776
3777 rc = ena_setup_rx_dma_tag(adapter);
3778 if (unlikely(rc != 0)) {
3779 device_printf(self, "Failed to create RX DMA tag\n");
3780 goto err_tx_tag_free;
3781 }
3782 #endif
3783
3784 /* initialize rings basic information */
3785 device_printf(self, "initialize %d io queues\n", io_queue_num);
3786 ena_init_io_rings(adapter);
3787
3788 /* setup network interface */
3789 rc = ena_setup_ifnet(self, adapter, &get_feat_ctx);
3790 if (unlikely(rc != 0)) {
3791 device_printf(self, "Error with network interface setup\n");
3792 goto err_io_free;
3793 }
3794
3795 rc = ena_enable_msix_and_set_admin_interrupts(adapter, io_queue_num);
3796 if (unlikely(rc != 0)) {
3797 device_printf(self,
3798 "Failed to enable and set the admin interrupts\n");
3799 goto err_ifp_free;
3800 }
3801
3802 callout_init(&adapter->timer_service, CALLOUT_MPSAFE);
3803
3804 /* Initialize reset task queue */
3805 rc = workqueue_create(&adapter->reset_tq, "ena_reset_enq",
3806 ena_reset_task, adapter, 0, IPL_NET, WQ_PERCPU | WQ_MPSAFE);
3807 if (unlikely(rc != 0)) {
3808 ena_trace(ENA_ALERT,
3809 "Unable to create workqueue for reset task\n");
3810 goto err_ifp_free;
3811 }
3812
3813 /* Initialize statistics */
3814 ena_alloc_counters_dev(&adapter->dev_stats, io_queue_num);
3815 ena_alloc_counters_hwstats(&adapter->hw_stats, io_queue_num);
3816 #if 0
3817 ena_sysctl_add_nodes(adapter);
3818 #endif
3819
3820 /* Tell the stack that the interface is not active */
3821 if_setdrvflagbits(adapter->ifp, IFF_OACTIVE, IFF_RUNNING);
3822
3823 adapter->running = true;
3824 return;
3825
3826 err_ifp_free:
3827 if_detach(adapter->ifp);
3828 if_free(adapter->ifp);
3829 err_io_free:
3830 ena_free_all_io_rings_resources(adapter);
3831 #if 0
3832 ena_free_rx_dma_tag(adapter);
3833 err_tx_tag_free:
3834 ena_free_tx_dma_tag(adapter);
3835 #endif
3836 err_com_free:
3837 ena_com_admin_destroy(ena_dev);
3838 ena_com_delete_host_info(ena_dev);
3839 ena_com_mmio_reg_read_request_destroy(ena_dev);
3840 err_bus_free:
3841 free(ena_dev->bus, M_DEVBUF);
3842 free(ena_dev, M_DEVBUF);
3843 ena_free_pci_resources(adapter);
3844 }
3845
3846 /**
3847 * ena_detach - Device Removal Routine
3848 * @pdev: device information struct
3849 *
3850 * ena_detach is called by the device subsystem to alert the driver
3851 * that it should release a PCI device.
3852 **/
3853 static int
3854 ena_detach(device_t pdev, int flags)
3855 {
3856 struct ena_adapter *adapter = device_private(pdev);
3857 struct ena_com_dev *ena_dev = adapter->ena_dev;
3858 #if 0
3859 int rc;
3860 #endif
3861
3862 /* Make sure VLANS are not using driver */
3863 if (VLAN_ATTACHED(&adapter->sc_ec)) {
3864 device_printf(adapter->pdev ,"VLAN is in use, detach first\n");
3865 return (EBUSY);
3866 }
3867
3868 /* Free reset task and callout */
3869 callout_halt(&adapter->timer_service, &adapter->global_mtx);
3870 callout_destroy(&adapter->timer_service);
3871 workqueue_wait(adapter->reset_tq, &adapter->reset_task);
3872 workqueue_destroy(adapter->reset_tq);
3873 adapter->reset_tq = NULL;
3874
3875 rw_enter(&adapter->ioctl_sx, RW_WRITER);
3876 ena_down(adapter);
3877 rw_exit(&adapter->ioctl_sx);
3878
3879 if (adapter->ifp != NULL) {
3880 ether_ifdetach(adapter->ifp);
3881 if_free(adapter->ifp);
3882 }
3883
3884 ena_free_all_io_rings_resources(adapter);
3885
3886 ena_free_counters((struct evcnt *)&adapter->hw_stats,
3887 sizeof(struct ena_hw_stats));
3888 ena_free_counters((struct evcnt *)&adapter->dev_stats,
3889 sizeof(struct ena_stats_dev));
3890
3891 if (likely(adapter->rss_support))
3892 ena_com_rss_destroy(ena_dev);
3893
3894 #if 0
3895 rc = ena_free_rx_dma_tag(adapter);
3896 if (unlikely(rc != 0))
3897 device_printf(adapter->pdev,
3898 "Unmapped RX DMA tag associations\n");
3899
3900 rc = ena_free_tx_dma_tag(adapter);
3901 if (unlikely(rc != 0))
3902 device_printf(adapter->pdev,
3903 "Unmapped TX DMA tag associations\n");
3904 #endif
3905
3906 /* Reset the device only if the device is running. */
3907 if (adapter->running)
3908 ena_com_dev_reset(ena_dev, adapter->reset_reason);
3909
3910 ena_com_delete_host_info(ena_dev);
3911
3912 ena_free_irqs(adapter);
3913
3914 ena_com_abort_admin_commands(ena_dev);
3915
3916 ena_com_wait_for_abort_completion(ena_dev);
3917
3918 ena_com_admin_destroy(ena_dev);
3919
3920 ena_com_mmio_reg_read_request_destroy(ena_dev);
3921
3922 ena_free_pci_resources(adapter);
3923
3924 mutex_destroy(&adapter->global_mtx);
3925 rw_destroy(&adapter->ioctl_sx);
3926
3927 if (ena_dev->bus != NULL)
3928 free(ena_dev->bus, M_DEVBUF);
3929
3930 if (ena_dev != NULL)
3931 free(ena_dev, M_DEVBUF);
3932
3933 return 0;
3934 }
3935
3936 /******************************************************************************
3937 ******************************** AENQ Handlers *******************************
3938 *****************************************************************************/
3939 /**
3940 * ena_update_on_link_change:
3941 * Notify the network interface about the change in link status
3942 **/
3943 static void
3944 ena_update_on_link_change(void *adapter_data,
3945 struct ena_admin_aenq_entry *aenq_e)
3946 {
3947 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
3948 struct ena_admin_aenq_link_change_desc *aenq_desc;
3949 int status;
3950 struct ifnet *ifp;
3951
3952 aenq_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e;
3953 ifp = adapter->ifp;
3954 status = aenq_desc->flags &
3955 ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
3956
3957 if (status != 0) {
3958 device_printf(adapter->pdev, "link is UP\n");
3959 if_link_state_change(ifp, LINK_STATE_UP);
3960 } else if (status == 0) {
3961 device_printf(adapter->pdev, "link is DOWN\n");
3962 if_link_state_change(ifp, LINK_STATE_DOWN);
3963 } else {
3964 device_printf(adapter->pdev, "invalid value recvd\n");
3965 BUG();
3966 }
3967
3968 adapter->link_status = status;
3969 }
3970
3971 /**
3972 * This handler will called for unknown event group or unimplemented handlers
3973 **/
3974 static void
3975 unimplemented_aenq_handler(void *data,
3976 struct ena_admin_aenq_entry *aenq_e)
3977 {
3978 return;
3979 }
3980
3981 static struct ena_aenq_handlers aenq_handlers = {
3982 .handlers = {
3983 [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change,
3984 [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd,
3985 },
3986 .unimplemented_handler = unimplemented_aenq_handler
3987 };
3988
3989 #ifdef __FreeBSD__
3990 /*********************************************************************
3991 * FreeBSD Device Interface Entry Points
3992 *********************************************************************/
3993
3994 static device_method_t ena_methods[] = {
3995 /* Device interface */
3996 DEVMETHOD(device_probe, ena_probe),
3997 DEVMETHOD(device_attach, ena_attach),
3998 DEVMETHOD(device_detach, ena_detach),
3999 DEVMETHOD_END
4000 };
4001
4002 static driver_t ena_driver = {
4003 "ena", ena_methods, sizeof(struct ena_adapter),
4004 };
4005
4006 devclass_t ena_devclass;
4007 DRIVER_MODULE(ena, pci, ena_driver, ena_devclass, 0, 0);
4008 MODULE_DEPEND(ena, pci, 1, 1, 1);
4009 MODULE_DEPEND(ena, ether, 1, 1, 1);
4010
4011 /*********************************************************************/
4012 #endif /* __FreeBSD__ */
4013
4014 #ifdef __NetBSD__
4015 CFATTACH_DECL_NEW(ena, sizeof(struct ena_adapter), ena_probe, ena_attach,
4016 ena_detach, NULL);
4017 #endif /* __NetBSD */
4018