HomeSort by: relevance | last modified time | path
    Searched refs:nodes (Results 1 - 25 of 124) sorted by relevancy

1 2 3 4 5

  /src/sys/external/bsd/ipf/netinet/
radix_ipf.c 33 ipf_rdx_node_t nodes[2], int *);
90 /* nodes(O) - pair of ipf_rdx_node_t's to initialise with data */
93 /* Initialise the fields in a pair of radix tree nodes according to the */
99 buildnodes(addrfamily_t *addr, addrfamily_t *mask, ipf_rdx_node_t nodes[2])
116 bzero(&nodes[0], sizeof(ipf_rdx_node_t) * 2);
117 nodes[0].maskbitcount = maskbits;
118 nodes[0].index = -1 - (ADF_OFF_BITS + maskbits);
119 nodes[0].addrkey = (u_32_t *)addr;
120 nodes[0].maskkey = (u_32_t *)mask;
121 nodes[0].addroff = nodes[0].addrkey + masklen
919 struct ipf_rdx_node nodes[2]; member in struct:myst
    [all...]
  /src/bin/sh/
Makefile 11 GENSRCS=builtins.c init.c nodes.c
12 GENHDRS=builtins.h nodes.h token.h nodenames.h optinit.h
74 .ORDER: nodes.h nodes.c
75 nodes.c nodes.h: mknodes.sh nodetypes nodes.c.pat
78 [ -f nodes.h ]
80 nodenames.h: mknodenames.sh nodes.h
  /src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/
amdgpu_vram_mgr.c 282 struct drm_mm_node *nodes = mem->mm_node; local in function:amdgpu_vram_mgr_bo_visible_size
292 for (usage = 0; nodes && pages; pages -= nodes->size, nodes++)
293 usage += amdgpu_vram_mgr_vis_size(adev, nodes);
338 struct drm_mm_node *nodes; local in function:amdgpu_vram_mgr_new
375 nodes = kvmalloc_array((uint32_t)num_nodes, sizeof(*nodes),
377 if (!nodes) {
393 r = drm_mm_insert_node_in_range(mm, &nodes[i], pages
456 struct drm_mm_node *nodes = mem->mm_node; local in function:amdgpu_vram_mgr_del
    [all...]
ta_xgmi_if.h 94 struct ta_xgmi_node_info nodes[TA_XGMI__MAX_CONNECTED_NODES]; member in struct:ta_xgmi_cmd_get_topology_info_input
99 struct ta_xgmi_node_info nodes[TA_XGMI__MAX_CONNECTED_NODES]; member in struct:ta_xgmi_cmd_get_topology_info_output
104 struct ta_xgmi_node_info nodes[TA_XGMI__MAX_CONNECTED_NODES]; member in struct:ta_xgmi_cmd_set_topology_info_input
amdgpu_psp_v11_0.c 762 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
763 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
764 topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
765 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
777 topology->nodes[i].node_id = topology_info_output->nodes[i].node_id
    [all...]
  /src/sys/external/gpl2/dts/dist/arch/arm/boot/dts/
s3c6400.dtsi 7 * Samsung's S3C6400 SoC device nodes are listed in this file. S3C6400
11 * Note: This file does not include device nodes for all the controllers in
13 * nodes can be added to this file.
s3c6410.dtsi 7 * Samsung's S3C6410 SoC device nodes are listed in this file. S3C6410
11 * Note: This file does not include device nodes for all the controllers in
13 * nodes can be added to this file.
exynos-syscon-restart.dtsi 3 * Samsung's Exynos SoC syscon reboot/poweroff nodes common definition.
  /src/sys/external/bsd/drm2/dist/drm/selftests/
test-drm_mm.c 272 struct drm_mm_node nodes[2]; local in function:igt_debug
275 /* Create a small drm_mm with a couple of nodes and a few holes, and
281 memset(nodes, 0, sizeof(nodes));
282 nodes[0].start = 512;
283 nodes[0].size = 1024;
284 ret = drm_mm_reserve_node(&mm, &nodes[0]);
287 nodes[0].start, nodes[0].size);
291 nodes[1].size = 1024
380 struct drm_mm_node tmp, *nodes, *node, *next; local in function:__igt_reserve
578 struct drm_mm_node *nodes, *node, *next; local in function:__igt_insert
885 struct drm_mm_node *nodes, *node, *next; local in function:__igt_insert_range
1046 struct drm_mm_node *nodes, *node, *next; local in function:igt_align
1412 struct evict_node *nodes; local in function:igt_evict
1525 struct evict_node *nodes; local in function:igt_evict_range
1626 struct drm_mm_node *nodes, *node, *next; local in function:igt_topdown
1740 struct drm_mm_node *nodes, *node, *next; local in function:igt_bottomup
2164 struct evict_node *nodes; local in function:igt_color_evict
2267 struct evict_node *nodes; local in function:igt_color_evict_range
    [all...]
  /src/sys/dev/raidframe/
rf_dagffwr.c 62 * through the graph, blindly executing nodes until it reaches the end.
142 * All nodes are before the commit node (Cmt) are assumed to be atomic and
200 /* alloc the nodes: Wnd, xor, commit, block, term, and Wnp */
205 tmpNode->list_next = dag_h->nodes;
206 dag_h->nodes = tmpNode;
208 wndNodes = dag_h->nodes;
211 xorNode->list_next = dag_h->nodes;
212 dag_h->nodes = xorNode;
215 wnpNode->list_next = dag_h->nodes;
216 dag_h->nodes = wnpNode
    [all...]
rf_dagffrd.c 60 * through the graph, blindly executing nodes until it reaches the end.
108 * There is one disk node per stripe unit accessed, and all disk nodes are in
112 * normally. Subsequent disk nodes are created by copying the first one,
117 * in ONLY ONE of the read nodes. This does not apply to the "params" field
189 tmpNode->list_next = dag_h->nodes;
190 dag_h->nodes = tmpNode;
192 diskNodes = dag_h->nodes;
195 blockNode->list_next = dag_h->nodes;
196 dag_h->nodes = blockNode;
199 commitNode->list_next = dag_h->nodes;
    [all...]
rf_dagdegrd.c 62 * through the graph, blindly executing nodes until it reaches the end.
136 /* alloc the Wnd nodes and the Wmir node */
142 /* total number of nodes = 1 + (block + commit + terminator) */
145 rdNode->list_next = dag_h->nodes;
146 dag_h->nodes = rdNode;
149 blockNode->list_next = dag_h->nodes;
150 dag_h->nodes = blockNode;
153 commitNode->list_next = dag_h->nodes;
154 dag_h->nodes = commitNode;
157 termNode->list_next = dag_h->nodes;
580 RF_DagNode_t *nodes, *rdNode, *blockNode, *commitNode, *termNode; local in function:rf_CreateRaidCDegradedReadDAG
1046 RF_DagNode_t *nodes, *rudNodes, *rrdNodes, *recoveryNode, *blockNode, local in function:rf_DoubleDegRead
    [all...]
rf_parityscan.c 408 * nNodes nodes at level 2, an unblock-recon node at level 3, and a
410 * and unblock nodes are not touched, nor are the pda fields in the
411 * second-level nodes, so they must be filled in later.
426 RF_DagNode_t *nodes, *termNode, *blockNode, *unblockNode, *tmpNode; local in function:rf_MakeSimpleDAG
443 /* create the nodes, the block & unblock nodes, and the terminator
448 tmpNode->list_next = dag_h->nodes;
449 dag_h->nodes = tmpNode;
451 nodes = dag_h->nodes;
    [all...]
rf_dagdegwr.c 62 * through the graph, blindly executing nodes until it reaches the end.
134 * commit nodes: Xor, Wnd
147 * The block & unblock nodes are leftovers from a previous version. They
210 /* create all the nodes at once */
223 * DAG generator, so here's what I'm gonna do- if there's no read nodes,
236 blockNode->list_next = dag_h->nodes;
237 dag_h->nodes = blockNode;
240 commitNode->list_next = dag_h->nodes;
241 dag_h->nodes = commitNode;
244 unblockNode->list_next = dag_h->nodes;
721 RF_DagNode_t *nodes, *wudNodes, *rrdNodes, *recoveryNode, *blockNode, local in function:rf_DoubleDegSmallWrite
    [all...]
rf_parityloggingdags.c 87 RF_DagNode_t *nodes, *wndNodes, *rodNodes = NULL, *syncNode, *xorNode, local in function:rf_CommonCreateParityLoggingLargeWriteDAG
103 /* alloc the Wnd nodes, the xor node, and the Lpo node */
105 nodes = RF_MallocAndAdd((nWndNodes + 6) * sizeof(*nodes), allocList);
107 wndNodes = &nodes[i];
109 xorNode = &nodes[i];
111 lpoNode = &nodes[i];
113 blockNode = &nodes[i];
115 syncNode = &nodes[i];
117 unblockNode = &nodes[i]
331 RF_DagNode_t *xorNodes, *blockNode, *unblockNode, *nodes; local in function:rf_CommonCreateParityLoggingSmallWriteDAG
    [all...]
rf_dagutils.c 69 /* The maximum number of nodes in a DAG is bounded by
204 while (dag_h->nodes) {
205 tmpnode = dag_h->nodes;
206 dag_h->nodes = dag_h->nodes->list_next;
703 RF_DagNode_t **nodes, int unvisited)
709 nodes[node->nodeNum] = node;
751 acount, nodes, unvisited)) {
796 * -- all nodes have status wait
797 * -- numAntDone is zero in all nodes
812 RF_DagNode_t **nodes; \/* array of ptrs to nodes in dag *\/ local in function:rf_ValidateDAG
    [all...]
  /src/usr.bin/make/unit-tests/
deptgt-end-fail-all.mk 7 # Until 2020-12-07, the .END node was made even if the main nodes had failed.
9 # dependency of the main nodes had failed, just not if one of the main nodes
depsrc-end.mk 4 # paradox but works since these special nodes are not in the dependency
depsrc-wait.mk 4 # which adds a sequence point between the nodes to its left and the nodes
varname-dot-alltargets.mk 20 # about all nodes, therefore source is also included.
depsrc-usebefore.mk 6 # If a target depends on several .USE or .USEBEFORE nodes, the commands get
7 # appended or prepended in declaration order. For .USE nodes, this is the
8 # expected order, for .USEBEFORE nodes the order is somewhat reversed, and for
9 # .USE or .USEBEFORE nodes that depend on other .USE or .USEBEFORE nodes, it
  /src/sys/external/bsd/drm2/dist/drm/nouveau/nvkm/core/
nouveau_nvkm_core_mm.c 31 #define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL : \
41 list_for_each_entry(node, &mm->nodes, nl_entry) {
251 prev = list_last_entry(&mm->nodes, typeof(*node), nl_entry);
260 list_add_tail(&node->nl_entry, &mm->nodes);
264 INIT_LIST_HEAD(&mm->nodes);
280 list_add_tail(&node->nl_entry, &mm->nodes);
291 int nodes = 0; local in function:nvkm_mm_fini
296 list_for_each_entry(node, &mm->nodes, nl_entry) {
298 if (++nodes > mm->heap_nodes) {
305 list_for_each_entry_safe(node, temp, &mm->nodes, nl_entry)
    [all...]
  /src/sys/external/bsd/drm2/dist/drm/nouveau/include/nvkm/core/
mm.h 23 struct list_head nodes; member in struct:nvkm_mm
50 list_for_each_entry(node, &mm->nodes, nl_entry) {
  /src/sys/fs/tmpfs/
tmpfs_vfsops.c 94 ino_t nodes; local in function:tmpfs_mount
146 nodes = 3 + (memlimit / 1024);
149 nodes = args->ta_nodes_max;
152 nodes = MIN(nodes, INT_MAX);
153 KASSERT(nodes >= 3);
157 if (set_nodes && nodes < tmp->tm_nodes_cnt)
173 tmp->tm_nodes_max = nodes;
191 tmp->tm_nodes_max = nodes;
  /src/sys/arch/powerpc/booke/dev/
cpunode.c 54 static u_int nodes; variable in typeref:typename:u_int
63 if (ma->ma_node > 8 || (nodes & (1 << ma->ma_node)))
105 nodes |= 1 << ma->ma_node;

Completed in 21 milliseconds

1 2 3 4 5