/src/sys/fs/udf/ |
udf_strat_sequential.c | 96 struct bufq_state *queues[UDF_SHED_MAX]; member in struct:strat_private 284 bufq_put(priv->queues[queue], nestbuf); 486 buf = bufq_get(priv->queues[priv->cur_queue]); 529 if (bufq_peek(priv->queues[UDF_SHED_READING])) 531 if (bufq_peek(priv->queues[UDF_SHED_WRITING])) /* only for unmount */ 533 if (bufq_peek(priv->queues[UDF_SHED_SEQWRITING])) 579 empty = (bufq_peek(priv->queues[UDF_SHED_READING]) == NULL); 580 empty &= (bufq_peek(priv->queues[UDF_SHED_WRITING]) == NULL); 581 empty &= (bufq_peek(priv->queues[UDF_SHED_SEQWRITING]) == NULL); 657 bufq_alloc(&priv->queues[UDF_SHED_READING], "disksort" [all...] |
udf_strat_rmw.c | 136 struct bufq_state *queues[UDF_SHED_MAX]; member in struct:strat_private 260 bufq_put(priv->queues[newqueue], eccline->buf); 279 buf = bufq_peek(priv->queues[queued_on]); 313 buf = bufq_get(priv->queues[queued_on]); 356 ret = bufq_cancel(priv->queues[eccline->queued_on], eccline->buf); 1299 if (bufq_peek(priv->queues[UDF_SHED_READING])) 1301 if (bufq_peek(priv->queues[UDF_SHED_WRITING])) 1303 if (bufq_peek(priv->queues[UDF_SHED_SEQWRITING])) 1323 work = (bufq_peek(priv->queues[UDF_SHED_WAITING]) != NULL); 1324 work |= (bufq_peek(priv->queues[UDF_SHED_READING]) != NULL) [all...] |
/src/sys/dev/pci/ixgbe/ |
ixv.c | 186 /* Number of Queues - do not exceed MSI-X vectors - 1 */ 530 /* Allocate our TX/RX Queues */ 654 evcnt_detach(&sc->queues[i].irqs); 655 evcnt_detach(&sc->queues[i].handleq); 656 evcnt_detach(&sc->queues[i].req); 724 for (i = 0, que = sc->queues; i < sc->num_queues; i++, que++) 784 for (i = 0, que = sc->queues; i < sc->num_queues; i++, que++) 833 struct ix_queue *que = &sc->queues[vector]; 854 struct ix_queue *que = &sc->queues[vector]; 870 ixv_rearm_queues(struct ixgbe_softc *sc, u64 queues) 1286 u64 queues = 0; local in function:ixv_handle_timer 3528 int want, queues, msgs; local in function:ixv_configure_interrupts [all...] |
ixgbe.c | 358 * Number of Queues, can be set to 0, 365 "Number of queues to configure, 0 indicates autoconfigure"); 590 int regnum = i / 4; /* 1 register per 4 queues */ 681 int regnum = i / 4; /* 1 register per 4 queues */ 989 /* Allocate our TX/RX Queues */ 1151 /* Allocate our TX/RX Queues again */ 1890 snprintf(sc->queues[i].evnamebuf, 1891 sizeof(sc->queues[i].evnamebuf), "%s q%d", xname, i); 1892 snprintf(sc->queues[i].namebuf, 1893 sizeof(sc->queues[i].namebuf), "q%d", i) 4617 u64 queues = 0; local in function:ixgbe_handle_timer 7066 int want, queues, msgs; local in function:ixgbe_configure_interrupts [all...] |
if_fdir.c | 142 que = &sc->queues[txr->me];
|
ix_txrx.c | 392 struct ix_queue *que = sc->queues; 2366 sc->queues = kmem_zalloc(sizeof(struct ix_queue) * sc->num_queues, 2382 * Now set up the TX queues, txconf is needed to handle the 2433 * Next the RX queues... 2476 que = &sc->queues[i]; 2496 kmem_free(sc->queues, sizeof(struct ix_queue) * sc->num_queues); 2515 que = &sc->queues[i]; 2518 kmem_free(sc->queues, sizeof(struct ix_queue) * sc->num_queues);
|
ixgbe.h | 475 struct if_percpuq *ipq; /* softint-based input queues */ 555 * Queues: 560 struct ix_queue *queues; member in struct:ixgbe_softc
|
/src/sys/external/gpl2/dts/dist/arch/arm64/boot/dts/freescale/ |
imx8-ss-conn.dtsi | 86 fsl,num-tx-queues=<3>; 87 fsl,num-rx-queues=<3>; 106 fsl,num-tx-queues=<3>; 107 fsl,num-rx-queues=<3>;
|
/src/sys/external/gpl2/dts/dist/arch/arm/boot/dts/ |
artpec6.dtsi | 319 mtl_rx_setup: rx-queues-config { 320 snps,rx-queues-to-use = <1>; 324 mtl_tx_setup: tx-queues-config { 325 snps,tx-queues-to-use = <2>;
|
imx7d.dtsi | 160 fsl,num-tx-queues = <3>; 161 fsl,num-rx-queues = <3>;
|
keystone-k2hk-netcp.dtsi | 24 managed-queues = <0 0x2000>; 36 managed-queues = <0x2000 0x2000>;
|
keystone-k2g-netcp.dtsi | 26 managed-queues = <0 0x80>;
|
keystone-k2e-netcp.dtsi | 24 managed-queues = <0 0x2000>;
|
keystone-k2l-netcp.dtsi | 24 managed-queues = <0 0x2000>;
|
/src/sys/dist/pf/net/ |
pf_ruleset.c | 150 TAILQ_INIT(&ruleset->rules[i].queues[0]); 151 TAILQ_INIT(&ruleset->rules[i].queues[1]); 152 ruleset->rules[i].active.ptr = &ruleset->rules[i].queues[0]; 153 ruleset->rules[i].inactive.ptr = &ruleset->rules[i].queues[1];
|
/src/sys/external/bsd/drm2/dist/drm/amd/amdkfd/ |
kfd_process_queue_manager.c | 41 list_for_each_entry(pqn, &pqm->queues, process_queue_list) { 61 pr_info("Cannot open more queues for process with pasid 0x%x\n", 142 INIT_LIST_HEAD(&pqm->queues); 157 list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) { 221 * for debug process, verify that it is within the static queues limit 326 list_add(&pqn->process_queue_list, &pqm->queues); 338 /* check if queues list is empty unregister process from device */ 518 list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
|
kfd_device_queue_manager.h | 53 * @exeute_queues: Dispatches the queues list to the H/W. 61 * @start: Initializes the resources/modules the the device needs for queues 78 * @process_termination: Clears all process queues belongs to that device. 80 * @evict_process_queues: Evict all active queues of a process 82 * @restore_process_queues: Restore all evicted queues queues of a process 165 * This struct is a base class for the kfd queues scheduler in the 169 * concrete device. This class is the only class in the queues scheduler 182 struct list_head queues; member in struct:device_queue_manager
|
kfd_packet_manager.c | 125 struct list_head *queues, 148 pr_debug("Building runlist ib process count: %d queues count %d\n", 152 list_for_each_entry(cur, queues, list) { 209 pr_debug("Finished map process and queues to runlist\n");
|
kfd_device_queue_manager.c | 151 /* For SDMA queues on SOC15 with 8-byte doorbell, use static 164 /* For CP queues on SOC15 reserve a free doorbell ID */ 294 pr_warn("Can't create new usermode queue because %d queues were already created\n", 307 * Eviction state logic: mark all queues as evicted, even ones 308 * not currently active. Restoring inactive queues later only 379 pr_debug("Total of %d queues are accountable so far\n", 612 pr_info_ratelimited("Evicting PASID 0x%x queues\n", 615 /* Mark all queues as evicted. Deactivate all active queues on 658 pr_info_ratelimited("Evicting PASID 0x%x queues\n" [all...] |
/src/sys/dev/marvell/ |
if_mvxpevar.h | 88 #define MVXPE_IS_QUEUE_BUSY(queues, q) \ 89 ((((queues) >> (q)) & 0x1)) 104 * the ethernet device has 8 rx/tx DMA queues. each of queue has its own
|
if_mvxpe.c | 97 /* Descriptor Ring Control for each of queues */ 959 * Descriptor Ring Controls for each of queues 969 * Allocate the descriptor queues. 1435 uint32_t ic, queues, datum = 0; local in function:mvxpe_rxtxth_intr 1470 queues = MVXPE_PRXTXTI_GET_RBICTAPQ(ic); 1471 if (queues) { 1473 mvxpe_rx(sc, queues); 1475 queues = MVXPE_PRXTXTI_GET_TBTCQ(ic); 1476 if (queues) { 1478 mvxpe_tx_complete(sc, queues); [all...] |
/src/sys/dev/pci/igc/ |
if_igc.c | 398 /* Allocate TX/RX queues */ 400 aprint_error_dev(sc->sc_dev, "unable to allocate queues\n"); 644 sc->queues = 655 /* Set up the TX queues. */ 663 txr->txr_igcq = &sc->queues[iq]; 680 /* Set up the RX queues. */ 688 rxr->rxr_igcq = &sc->queues[iq]; 712 struct igc_queue *q = &sc->queues[iq]; 731 kmem_free(sc->queues, sc->sc_nqueues * sizeof(struct igc_queue)); 732 sc->queues = NULL [all...] |
if_igc.h | 358 struct igc_queue *queues; member in struct:igc_softc
|
/src/dist/pf/sbin/pfctl/ |
parse.y | 171 } *queues = NULL; 203 struct node_qassign queues; 1308 yyerror("no child queues specified"); 2009 if ($9.queues.qname != NULL) { 2010 if (strlcpy(r.qname, $9.queues.qname, 2016 free($9.queues.qname); 2018 if ($9.queues.pqname != NULL) { 2019 if (strlcpy(r.pqname, $9.queues.pqname, 2025 free($9.queues.pqname); 2126 if (filter_opts.queues.qname) [all...] |
/src/sys/external/gpl2/dts/dist/arch/arm64/boot/dts/mediatek/ |
mt2712e.dtsi | 698 mtl_rx_setup: rx-queues-config { 699 snps,rx-queues-to-use = <1>; 708 mtl_tx_setup: tx-queues-config { 709 snps,tx-queues-to-use = <3>;
|