HomeSort by: relevance | last modified time | path
    Searched refs:sge (Results 1 - 15 of 15) sorted by relevancy

  /src/sys/dev/ic/
ld_aac.c 146 struct aac_sg_entryraw *sge; local in function:ld_aac_dobio
162 sge = sgt->SgEntryRaw;
164 for (i = 0; i < xfer->dm_nsegs; i++, sge++) {
165 sge->SgAddress = htole64(xfer->dm_segs[i].ds_addr);
166 sge->SgByteCount = htole32(xfer->dm_segs[i].ds_len);
167 sge->Next = 0;
168 sge->Prev = 0;
169 sge->Flags = 0;
177 struct aac_sg_entry *sge; local in function:ld_aac_dobio
205 sge = sgt->SgEntry
222 struct aac_sg_entry64 *sge; local in function:ld_aac_dobio
    [all...]
nvmereg.h 138 struct nvme_sge sge; member in union:nvme_sqe::__anon46b1826e010a
191 struct nvme_sge sge; member in union:nvme_sqe_io::__anon46b1826e020a
mpt.c 1303 mpt2host_sge_simple_union(SGE_SIMPLE_UNION *sge)
1306 MPT_2_HOST32(sge, FlagsLength);
1307 MPT_2_HOST32(sge, _u.Address64.Low);
1308 MPT_2_HOST32(sge, _u.Address64.High);
mlx.c 1915 struct mlx_sgentry *sge; local in function:mlx_ccb_map
1934 sge = (struct mlx_sgentry *)((char *)mlx->mlx_sgls + sgloff);
1936 for (i = 0; i < nsegs; i++, sge++) {
1937 sge->sge_addr = htole32(xfer->dm_segs[i].ds_addr);
1938 sge->sge_count = htole32(xfer->dm_segs[i].ds_len);
  /src/sys/dev/pci/cxgb/
cxgb_sge.c 181 * Reclaims Tx descriptors that the SGE has indicated it has processed,
216 * t3_sge_init - initialize SGE
218 * @p: the SGE parameters
220 * Performs SGE initialization needed every time after a chip reset.
369 CH_ALERT(adapter, "SGE response queue credit overflow\n");
451 * refill_fl - refill an SGE free-buffer list
456 * (Re)populate an SGE free-buffer list with up to @n new packet buffers.
521 * free_rx_bufs - free the Rx buffers on an SGE free list
523 * @q: the SGE free list to clean up
525 * Release the buffers on an SGE free-buffer Rx queue. HW fetching fro
    [all...]
cxgb_offload.c 253 whole pdu + cpl headers has to fit into one sge buffer */
256 (adapter->sge.qs[0].fl[1].buf_size -
293 mtx_lock(&adapter->sge.reg_lock);
296 mtx_unlock(&adapter->sge.reg_lock);
322 mtx_lock(&adapter->sge.reg_lock);
327 mtx_unlock(&adapter->sge.reg_lock);
331 mtx_lock(&adapter->sge.reg_lock);
333 mtx_unlock(&adapter->sge.reg_lock);
338 mtx_lock(&adapter->sge.reg_lock);
343 mtx_unlock(&adapter->sge.reg_lock)
    [all...]
cxgb_adapter.h 304 struct sge { struct
365 struct sge sge; member in struct:adapter
541 * XXX figure out how we can return this to being private to sge
cxgb_main.c 352 MTX_INIT(&sc->sge.reg_lock, sc->reglockbuf, NULL, MTX_DEF);
483 /* initialize sge private state */
565 MTX_DESTROY(&sc->sge.reg_lock);
573 * setup_sge_qsets - configure SGE Tx/Rx/response queues
576 * Determines how many sets of SGE queues to use and initializes them.
600 &sc->params.sge.qset[qset_idx], ntxq, pi);
1390 qs = &p->adapter->sge.qs[p->first_qset];
1464 qs = &pi->adapter->sge.qs[pi->first_qset];
1483 qs = &pi->adapter->sge.qs[pi->first_qset];
cxgb_common.h 106 SGE_QSETS = 8, /* # of SGE Tx/Rx/RspQ sets */
111 enum sge_context_type { /* SGE egress context types */
123 struct sg_ent { /* SGE scatter/gather entry */
232 u64 rx_cong_drops; /* # of Rx drops due to SGE congestion */
306 struct qset_params { /* SGE queue set parameters */
374 struct sge_params sge; member in struct:adapter_params
cxgb_t3_hw.c 1503 { F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1 },
1770 * concentrator and the SGE data interrupts.
1871 * t3_sge_write_context - write an SGE context
1876 * Program an SGE context with the values already loaded in the
1893 * t3_sge_init_ecntxt - initialize an SGE egress context
1905 * Initialize an SGE egress context and make it ready for use. If the
1937 * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
1948 * Initialize an SGE free list context and make it ready for use. The
1977 * t3_sge_init_rspcntxt - initialize an SGE response queue context
1987 * Initialize an SGE response queue context and make it ready for use
    [all...]
  /src/sys/dev/pci/
mpii.c 397 mpii_dvatosge(struct mpii_sge *sge, u_int64_t dva)
399 sge->sg_addr_lo = htole32(dva);
400 sge->sg_addr_hi = htole32(dva >> 32);
828 struct mpii_ieee_sge *csge, *nsge, *sge; local in function:mpii_load_xs_sas3
836 /* zero length transfer still requires an SGE */
849 sge = nsge;
853 /* offset to the chain sge from the beginning */
857 /* address of the next sge */
861 sizeof(*sge));
864 sge = nsge
886 struct mpii_sge *csge, *nsge, *sge; local in function:mpii_load_xs
    [all...]
mfii.c 1250 struct mfii_sge *sge = (struct mfii_sge *)(ctx + 1); local in function:mfii_dcmd_start
1253 io->sgl_offset0 = (uint32_t *)sge - (uint32_t *)io;
1256 sge->sg_addr = htole64(ccb->ccb_sense_dva);
1257 sge->sg_len = htole32(sizeof(*ccb->ccb_sense));
1258 sge->sg_flags = MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA;
1925 struct mfii_sge *sge = (struct mfii_sge *)(ctx + 1); local in function:mfii_do_mgmt
1965 io->sgl_offset0 = ((u_int8_t *)sge - (u_int8_t *)io) / 4;
1966 io->chain_offset = ((u_int8_t *)sge - (u_int8_t *)io) / 16;
1968 sge->sg_addr = htole64(ccb->ccb_mfi_dva);
1969 sge->sg_len = htole32(MFI_FRAME_SIZE)
2644 struct mfii_sge *sge = NULL, *nsge = sglp; local in function:mfii_load_ccb
    [all...]
pvscsi.c 234 struct pvscsi_sg_element sge[PVSCSI_MAX_SG_ENTRIES_PER_SEGMENT]; member in struct:pvscsi_sg_list
1476 struct pvscsi_sg_element *sge; local in function:pvscsi_scsipi_request
1481 sge = hcb->sg_list->sge;
1485 sge[i].addr = segs[i].ds_addr;
1486 sge[i].length = segs[i].ds_len;
1487 sge[i].flags = 0;
1494 sizeof(*sge) * nseg, BUS_DMASYNC_PREWRITE);
amr.c 1122 struct amr_sgentry *sge; local in function:amr_ccb_map
1155 sge = (struct amr_sgentry *)((char *)amr->amr_sgls + sgloff);
1156 for (i = 0; i < nsegs; i++, sge++) {
1157 sge->sge_addr = htole32(xfer->dm_segs[i].ds_addr);
1158 sge->sge_count = htole32(xfer->dm_segs[i].ds_len);
arcmsr.c 484 struct arc_sge *sgl = ccb->ccb_cmd->sgl, *sge;
502 sge = &sgl[i];
504 sge->sg_hdr = htole32(ARC_SGE_64BIT | dmap->dm_segs[i].ds_len);
506 sge->sg_hi_addr = htole32((uint32_t)(addr >> 32));
507 sge->sg_lo_addr = htole32((uint32_t)addr);

Completed in 27 milliseconds