/src/sys/external/bsd/drm2/dist/drm/amd/amdkfd/ |
kfd_device_queue_manager.h | 53 * @exeute_queues: Dispatches the queues list to the H/W. 61 * @start: Initializes the resources/modules the the device needs for queues 78 * @process_termination: Clears all process queues belongs to that device. 80 * @evict_process_queues: Evict all active queues of a process 82 * @restore_process_queues: Restore all evicted queues queues of a process 165 * This struct is a base class for the kfd queues scheduler in the 169 * concrete device. This class is the only class in the queues scheduler 182 struct list_head queues; member in struct:device_queue_manager
|
kfd_priv.h | 82 * definitions for Kaveri. In Kaveri only the first ME queues participates 125 * Kernel module parameter to specify maximum number of supported queues per 327 * @KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES: Preempts all queues in the 328 * running queues list. 330 * @KFD_UNMAP_QUEUES_FILTER_BY_PASID: Preempts queues that belongs to 381 * @priority: Defines the queue priority relative to other queues in the 386 * currently all queues are initialized with the highest priority. 405 * @is_evicted: Defines if the queue is evicted. Only active queues 433 /* Not relevant for user mode queues in cp scheduling */ 435 /* Relevant only for sdma queues*/ 539 struct list_head queues; member in struct:process_queue_manager [all...] |
/src/sys/fs/udf/ |
udf_strat_sequential.c | 96 struct bufq_state *queues[UDF_SHED_MAX]; member in struct:strat_private 284 bufq_put(priv->queues[queue], nestbuf); 486 buf = bufq_get(priv->queues[priv->cur_queue]); 529 if (bufq_peek(priv->queues[UDF_SHED_READING])) 531 if (bufq_peek(priv->queues[UDF_SHED_WRITING])) /* only for unmount */ 533 if (bufq_peek(priv->queues[UDF_SHED_SEQWRITING])) 579 empty = (bufq_peek(priv->queues[UDF_SHED_READING]) == NULL); 580 empty &= (bufq_peek(priv->queues[UDF_SHED_WRITING]) == NULL); 581 empty &= (bufq_peek(priv->queues[UDF_SHED_SEQWRITING]) == NULL); 657 bufq_alloc(&priv->queues[UDF_SHED_READING], "disksort" [all...] |
udf_strat_rmw.c | 136 struct bufq_state *queues[UDF_SHED_MAX]; member in struct:strat_private 260 bufq_put(priv->queues[newqueue], eccline->buf); 279 buf = bufq_peek(priv->queues[queued_on]); 313 buf = bufq_get(priv->queues[queued_on]); 356 ret = bufq_cancel(priv->queues[eccline->queued_on], eccline->buf); 1299 if (bufq_peek(priv->queues[UDF_SHED_READING])) 1301 if (bufq_peek(priv->queues[UDF_SHED_WRITING])) 1303 if (bufq_peek(priv->queues[UDF_SHED_SEQWRITING])) 1323 work = (bufq_peek(priv->queues[UDF_SHED_WAITING]) != NULL); 1324 work |= (bufq_peek(priv->queues[UDF_SHED_READING]) != NULL) [all...] |
/src/sys/dev/pci/igc/ |
if_igc.h | 358 struct igc_queue *queues; member in struct:igc_softc
|
/src/sys/dev/pci/ixgbe/ |
ixv.c | 186 /* Number of Queues - do not exceed MSI-X vectors - 1 */ 530 /* Allocate our TX/RX Queues */ 654 evcnt_detach(&sc->queues[i].irqs); 655 evcnt_detach(&sc->queues[i].handleq); 656 evcnt_detach(&sc->queues[i].req); 724 for (i = 0, que = sc->queues; i < sc->num_queues; i++, que++) 784 for (i = 0, que = sc->queues; i < sc->num_queues; i++, que++) 833 struct ix_queue *que = &sc->queues[vector]; 854 struct ix_queue *que = &sc->queues[vector]; 870 ixv_rearm_queues(struct ixgbe_softc *sc, u64 queues) 1286 u64 queues = 0; local in function:ixv_handle_timer 3528 int want, queues, msgs; local in function:ixv_configure_interrupts [all...] |
ixgbe.c | 358 * Number of Queues, can be set to 0, 365 "Number of queues to configure, 0 indicates autoconfigure"); 590 int regnum = i / 4; /* 1 register per 4 queues */ 681 int regnum = i / 4; /* 1 register per 4 queues */ 989 /* Allocate our TX/RX Queues */ 1151 /* Allocate our TX/RX Queues again */ 1890 snprintf(sc->queues[i].evnamebuf, 1891 sizeof(sc->queues[i].evnamebuf), "%s q%d", xname, i); 1892 snprintf(sc->queues[i].namebuf, 1893 sizeof(sc->queues[i].namebuf), "q%d", i) 4617 u64 queues = 0; local in function:ixgbe_handle_timer 7066 int want, queues, msgs; local in function:ixgbe_configure_interrupts [all...] |
ixgbe.h | 475 struct if_percpuq *ipq; /* softint-based input queues */ 555 * Queues: 560 struct ix_queue *queues; member in struct:ixgbe_softc
|
/src/sys/dev/marvell/ |
if_mvxpe.c | 97 /* Descriptor Ring Control for each of queues */ 959 * Descriptor Ring Controls for each of queues 969 * Allocate the descriptor queues. 1435 uint32_t ic, queues, datum = 0; local in function:mvxpe_rxtxth_intr 1470 queues = MVXPE_PRXTXTI_GET_RBICTAPQ(ic); 1471 if (queues) { 1473 mvxpe_rx(sc, queues); 1475 queues = MVXPE_PRXTXTI_GET_TBTCQ(ic); 1476 if (queues) { 1478 mvxpe_tx_complete(sc, queues); [all...] |
/src/sys/dist/pf/net/ |
pfvar.h | 890 struct pf_rulequeue queues[2]; member in struct:pf_ruleset::__anon30a5ecce1408
|