| /src/sys/external/bsd/ipf/netinet/ |
| radix_ipf.c | 33 ipf_rdx_node_t nodes[2], int *); 90 /* nodes(O) - pair of ipf_rdx_node_t's to initialise with data */ 93 /* Initialise the fields in a pair of radix tree nodes according to the */ 99 buildnodes(addrfamily_t *addr, addrfamily_t *mask, ipf_rdx_node_t nodes[2]) 116 bzero(&nodes[0], sizeof(ipf_rdx_node_t) * 2); 117 nodes[0].maskbitcount = maskbits; 118 nodes[0].index = -1 - (ADF_OFF_BITS + maskbits); 119 nodes[0].addrkey = (u_32_t *)addr; 120 nodes[0].maskkey = (u_32_t *)mask; 121 nodes[0].addroff = nodes[0].addrkey + masklen 919 struct ipf_rdx_node nodes[2]; member in struct:myst [all...] |
| radix_ipf.h | 60 ipf_rdx_node_t nodes[3]; member in struct:ipf_rdx_head
|
| /src/bin/sh/ |
| Makefile | 11 GENSRCS=builtins.c init.c nodes.c 12 GENHDRS=builtins.h nodes.h token.h nodenames.h optinit.h 74 .ORDER: nodes.h nodes.c 75 nodes.c nodes.h: mknodes.sh nodetypes nodes.c.pat 78 [ -f nodes.h ] 80 nodenames.h: mknodenames.sh nodes.h
|
| /src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/ |
| amdgpu_vram_mgr.c | 282 struct drm_mm_node *nodes = mem->mm_node; local 292 for (usage = 0; nodes && pages; pages -= nodes->size, nodes++) 293 usage += amdgpu_vram_mgr_vis_size(adev, nodes); 338 struct drm_mm_node *nodes; local 375 nodes = kvmalloc_array((uint32_t)num_nodes, sizeof(*nodes), 377 if (!nodes) { 393 r = drm_mm_insert_node_in_range(mm, &nodes[i], pages 456 struct drm_mm_node *nodes = mem->mm_node; local [all...] |
| ta_xgmi_if.h | 94 struct ta_xgmi_node_info nodes[TA_XGMI__MAX_CONNECTED_NODES]; member in struct:ta_xgmi_cmd_get_topology_info_input 99 struct ta_xgmi_node_info nodes[TA_XGMI__MAX_CONNECTED_NODES]; member in struct:ta_xgmi_cmd_get_topology_info_output 104 struct ta_xgmi_node_info nodes[TA_XGMI__MAX_CONNECTED_NODES]; member in struct:ta_xgmi_cmd_set_topology_info_input
|
| amdgpu_psp_v11_0.c | 762 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id; 763 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops; 764 topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled; 765 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine; 777 topology->nodes[i].node_id = topology_info_output->nodes[i].node_id [all...] |
| /src/sys/external/bsd/drm2/dist/drm/selftests/ |
| test-drm_mm.c | 272 struct drm_mm_node nodes[2]; local 275 /* Create a small drm_mm with a couple of nodes and a few holes, and 281 memset(nodes, 0, sizeof(nodes)); 282 nodes[0].start = 512; 283 nodes[0].size = 1024; 284 ret = drm_mm_reserve_node(&mm, &nodes[0]); 287 nodes[0].start, nodes[0].size); 291 nodes[1].size = 1024 380 struct drm_mm_node tmp, *nodes, *node, *next; local 578 struct drm_mm_node *nodes, *node, *next; local 885 struct drm_mm_node *nodes, *node, *next; local 1046 struct drm_mm_node *nodes, *node, *next; local 1412 struct evict_node *nodes; local 1525 struct evict_node *nodes; local 1626 struct drm_mm_node *nodes, *node, *next; local 1740 struct drm_mm_node *nodes, *node, *next; local 2164 struct evict_node *nodes; local 2267 struct evict_node *nodes; local [all...] |
| /src/sys/dev/raidframe/ |
| rf_dagffwr.c | 62 * through the graph, blindly executing nodes until it reaches the end. 142 * All nodes are before the commit node (Cmt) are assumed to be atomic and 200 /* alloc the nodes: Wnd, xor, commit, block, term, and Wnp */ 205 tmpNode->list_next = dag_h->nodes; 206 dag_h->nodes = tmpNode; 208 wndNodes = dag_h->nodes; 211 xorNode->list_next = dag_h->nodes; 212 dag_h->nodes = xorNode; 215 wnpNode->list_next = dag_h->nodes; 216 dag_h->nodes = wnpNode [all...] |
| rf_dagffrd.c | 60 * through the graph, blindly executing nodes until it reaches the end. 108 * There is one disk node per stripe unit accessed, and all disk nodes are in 112 * normally. Subsequent disk nodes are created by copying the first one, 117 * in ONLY ONE of the read nodes. This does not apply to the "params" field 189 tmpNode->list_next = dag_h->nodes; 190 dag_h->nodes = tmpNode; 192 diskNodes = dag_h->nodes; 195 blockNode->list_next = dag_h->nodes; 196 dag_h->nodes = blockNode; 199 commitNode->list_next = dag_h->nodes; [all...] |
| rf_dagdegrd.c | 62 * through the graph, blindly executing nodes until it reaches the end. 136 /* alloc the Wnd nodes and the Wmir node */ 142 /* total number of nodes = 1 + (block + commit + terminator) */ 145 rdNode->list_next = dag_h->nodes; 146 dag_h->nodes = rdNode; 149 blockNode->list_next = dag_h->nodes; 150 dag_h->nodes = blockNode; 153 commitNode->list_next = dag_h->nodes; 154 dag_h->nodes = commitNode; 157 termNode->list_next = dag_h->nodes; 580 RF_DagNode_t *nodes, *rdNode, *blockNode, *commitNode, *termNode; local 1046 RF_DagNode_t *nodes, *rudNodes, *rrdNodes, *recoveryNode, *blockNode, local [all...] |
| rf_parityscan.c | 408 * nNodes nodes at level 2, an unblock-recon node at level 3, and a 410 * and unblock nodes are not touched, nor are the pda fields in the 411 * second-level nodes, so they must be filled in later. 426 RF_DagNode_t *nodes, *termNode, *blockNode, *unblockNode, *tmpNode; local 443 /* create the nodes, the block & unblock nodes, and the terminator 448 tmpNode->list_next = dag_h->nodes; 449 dag_h->nodes = tmpNode; 451 nodes = dag_h->nodes; [all...] |
| rf_dagdegwr.c | 62 * through the graph, blindly executing nodes until it reaches the end. 134 * commit nodes: Xor, Wnd 147 * The block & unblock nodes are leftovers from a previous version. They 210 /* create all the nodes at once */ 223 * DAG generator, so here's what I'm gonna do- if there's no read nodes, 236 blockNode->list_next = dag_h->nodes; 237 dag_h->nodes = blockNode; 240 commitNode->list_next = dag_h->nodes; 241 dag_h->nodes = commitNode; 244 unblockNode->list_next = dag_h->nodes; 721 RF_DagNode_t *nodes, *wudNodes, *rrdNodes, *recoveryNode, *blockNode, local [all...] |
| rf_parityloggingdags.c | 87 RF_DagNode_t *nodes, *wndNodes, *rodNodes = NULL, *syncNode, *xorNode, local 103 /* alloc the Wnd nodes, the xor node, and the Lpo node */ 105 nodes = RF_MallocAndAdd((nWndNodes + 6) * sizeof(*nodes), allocList); 107 wndNodes = &nodes[i]; 109 xorNode = &nodes[i]; 111 lpoNode = &nodes[i]; 113 blockNode = &nodes[i]; 115 syncNode = &nodes[i]; 117 unblockNode = &nodes[i] 331 RF_DagNode_t *xorNodes, *blockNode, *unblockNode, *nodes; local [all...] |
| rf_dagutils.c | 69 /* The maximum number of nodes in a DAG is bounded by 204 while (dag_h->nodes) { 205 tmpnode = dag_h->nodes; 206 dag_h->nodes = dag_h->nodes->list_next; 703 RF_DagNode_t **nodes, int unvisited) 709 nodes[node->nodeNum] = node; 751 acount, nodes, unvisited)) { 796 * -- all nodes have status wait 797 * -- numAntDone is zero in all nodes 812 RF_DagNode_t **nodes; \/* array of ptrs to nodes in dag *\/ local [all...] |
| rf_dag.h | 122 RF_DagNode_t *list_next; /* next in the list of DAG nodes for this DAG */ 124 int visited; /* used to avoid re-visiting nodes on DAG 156 int numCommitNodes; /* number of commit nodes in graph */ 157 int numCommits; /* number of commit nodes which have been 175 RF_DagNode_t *nodes; /* linked list of nodes used in this DAG */ member in struct:RF_DagHeader_s
|
| /src/sys/external/bsd/drm2/dist/drm/nouveau/nvkm/core/ |
| nouveau_nvkm_core_mm.c | 31 #define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL : \ 41 list_for_each_entry(node, &mm->nodes, nl_entry) { 251 prev = list_last_entry(&mm->nodes, typeof(*node), nl_entry); 260 list_add_tail(&node->nl_entry, &mm->nodes); 264 INIT_LIST_HEAD(&mm->nodes); 280 list_add_tail(&node->nl_entry, &mm->nodes); 291 int nodes = 0; local 296 list_for_each_entry(node, &mm->nodes, nl_entry) { 298 if (++nodes > mm->heap_nodes) { 305 list_for_each_entry_safe(node, temp, &mm->nodes, nl_entry) [all...] |
| /src/sys/external/bsd/drm2/dist/drm/nouveau/include/nvkm/core/ |
| mm.h | 23 struct list_head nodes; member in struct:nvkm_mm 50 list_for_each_entry(node, &mm->nodes, nl_entry) {
|
| /src/usr.bin/make/unit-tests/ |
| varname-dot-alltargets.mk | 20 # about all nodes, therefore source is also included.
|
| /src/sys/fs/tmpfs/ |
| tmpfs_vfsops.c | 94 ino_t nodes; local 146 nodes = 3 + (memlimit / 1024); 149 nodes = args->ta_nodes_max; 152 nodes = MIN(nodes, INT_MAX); 153 KASSERT(nodes >= 3); 157 if (set_nodes && nodes < tmp->tm_nodes_cnt) 173 tmp->tm_nodes_max = nodes; 191 tmp->tm_nodes_max = nodes;
|
| /src/sys/arch/powerpc/booke/dev/ |
| cpunode.c | 54 static u_int nodes; variable 63 if (ma->ma_node > 8 || (nodes & (1 << ma->ma_node))) 105 nodes |= 1 << ma->ma_node;
|
| /src/sbin/routed/ |
| radix.h | 117 struct radix_node_head *head, struct radix_node nodes[]); 120 struct radix_node_head *head, struct radix_node nodes[]);
|
| /src/sys/net/ |
| radix.h | 110 struct radix_node_head *head, struct radix_node nodes[]); 113 struct radix_node_head *head, struct radix_node nodes[]);
|
| /src/usr.bin/make/ |
| compat.c | 475 GNode **nodes; local 481 nodes = vec.items; 486 if (nodes[i]->type & OP_WAIT) { 487 MakeInRandomOrder(nodes + start, nodes + i, pgn); 488 Compat_Make(nodes[i], pgn); 492 MakeInRandomOrder(nodes + start, nodes + i, pgn); 747 * Expand .USE nodes right now, because they can modify the structure
|
| /src/sys/kern/ |
| kern_crashme.c | 35 * supports crashme sysctl nodes, to test various ways the system can 36 * panic or crash. you can add and remove nodes. 83 static crashme_node nodes[] = { variable 217 * register the various nodes with sysctl. 253 for (n = 0; n < __arraycount(nodes); n++) { 254 if (crashme_add(&nodes[n])) 256 " debug.crashme.%s\n", __func__, nodes[n].cn_name);
|
| /src/sys/arch/arm/sunxi/ |
| sunxi_sramc.c | 101 TAILQ_ENTRY(sunxi_sramc_node) nodes; 134 TAILQ_INSERT_TAIL(&sc->sc_nodes, node, nodes); 254 TAILQ_FOREACH(node, &sc->sc_nodes, nodes)
|