HomeSort by: relevance | last modified time | path
    Searched refs:rb (Results 1 - 25 of 147) sorted by relevancy

1 2 3 4 5 6

  /src/sys/external/bsd/drm2/dist/drm/amd/display/dmub/inc/
dmub_rb.h 58 static inline bool dmub_rb_empty(struct dmub_rb *rb)
60 return (rb->wrpt == rb->rptr);
63 static inline bool dmub_rb_full(struct dmub_rb *rb)
67 if (rb->wrpt >= rb->rptr)
68 data_count = rb->wrpt - rb->rptr;
70 data_count = rb->capacity - (rb->rptr - rb->wrpt)
    [all...]
  /src/sys/sys/
rbtree.h 62 #define RB_FATHER(rb) \
63 ((struct rb_node *)((rb)->rb_info & ~RB_FLAG_MASK))
64 #define RB_SET_FATHER(rb, father) \
65 ((void)((rb)->rb_info = (uintptr_t)(father)|((rb)->rb_info & RB_FLAG_MASK)))
67 #define RB_SENTINEL_P(rb) ((rb) == NULL)
68 #define RB_LEFT_SENTINEL_P(rb) RB_SENTINEL_P((rb)->rb_left)
69 #define RB_RIGHT_SENTINEL_P(rb) RB_SENTINEL_P((rb)->rb_right
    [all...]
  /src/sys/dev/cardbus/
rbus.c 206 rbus_tag_t rb; local in function:rbus_new_body
220 if (NULL == (rb = (rbus_tag_t)malloc(sizeof(struct rbustag), M_DEVBUF,
225 rb->rb_bt = bt;
226 rb->rb_parent = parent;
227 rb->rb_start = start;
228 rb->rb_end = end;
229 rb->rb_offset = offset;
230 rb->rb_flags = flags;
231 rb->rb_ext = ex;
240 return rb;
254 rbus_tag_t rb; local in function:rbus_new
294 rbus_tag_t rb; local in function:rbus_new_root_delegate
    [all...]
  /src/sys/arch/hpcmips/dev/
ucbsnd.c 655 ringbuf_allocate(struct ring_buf *rb, size_t blksize, int maxblk)
657 rb->rb_bufsize = blksize * maxblk;
658 rb->rb_blksize = blksize;
659 rb->rb_maxblks = maxblk;
661 rb->rb_buf = (u_int32_t)kmem_alloc(rb->rb_bufsize, KM_SLEEP);
663 rb->rb_buf = (u_int32_t)dmabuf_static;
665 if (rb->rb_buf == 0) {
669 memset((char*)rb->rb_buf, 0, rb->rb_bufsize)
    [all...]
  /src/sys/arch/hp300/dev/
rbox.c 275 volatile struct rboxfb *rb = (struct rboxfb *)fb->regkva; local in function:rbox_restore
278 rb_waitbusy(rb);
280 rb->regs.id = GRFHWID; /* trigger reset */
283 rb->regs.interrupt = 0x04;
284 rb->video_enable = 0x01;
285 rb->drive = 0x01;
286 rb->vdrive = 0x0;
288 rb->opwen = 0xFF;
305 rb->rep_rule = RBOX_DUALROP(RR_COPY);
326 rb->blink = 0x00
376 volatile struct rboxfb *rb = (struct rboxfb *)fb->regkva; local in function:rbox_windowmove
    [all...]
  /src/sys/external/bsd/drm2/dist/drm/
drm_mm.c 39 * an RB-tree is used instead. At least if we expect heavy fragmentation.
161 INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
189 struct rb_node **link, *rb; local in function:drm_mm_interval_tree_add_node
196 rb = &hole_node->rb;
197 while (rb) {
198 parent = rb_entry(rb, struct drm_mm_node, rb);
203 rb = rb_parent(rb);
440 struct rb_node *rb = mm->holes_addr.rbr_tree.rbt_root; local in function:find_hole
    [all...]
drm_prime.c 186 struct rb_node **p, *rb; local in function:drm_prime_add_buf_handle
202 rb = NULL;
207 rb = *p;
208 pos = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
210 p = &rb->rb_right;
212 p = &rb->rb_left;
214 rb_link_node(&member->dmabuf_rb, rb, p);
223 rb = NULL;
228 rb = *p;
229 pos = rb_entry(rb, struct drm_prime_member, handle_rb)
    [all...]
  /src/usr.sbin/mlxctl/
cmds.c 370 struct mlx_rebuild_request rb; local in function:cmd_rebuild
381 rb.rr_channel = (int)strtol(*argv, &p, 0);
385 rb.rr_target = (int)strtol(*argv, &p, 0);
389 if (ioctl(mlxfd, MLX_REBUILDASYNC, &rb) == 0)
392 switch (rb.rr_status) {
394 warnx("the drive at %d:%d is already ONLINE", rb.rr_channel,
395 rb.rr_target);
403 warnx("there is no drive at %d:%d", rb.rr_channel,
404 rb.rr_target);
  /src/sys/external/bsd/drm2/dist/drm/i915/
i915_scheduler.c 50 static inline struct i915_priolist *to_priolist(struct rb_node *rb)
52 return rb_entry(rb, struct i915_priolist, node);
57 struct rb_node *rb; local in function:assert_priolists
67 for (rb = rb_first_cached(&execlists->queue);
68 rb;
69 rb = rb_next2(&execlists->queue.rb_root, rb)) {
70 const struct i915_priolist *p = to_priolist(rb);
138 struct rb_node **parent, *rb; local in function:i915_sched_lookup_priolist
156 __USE(rb);
    [all...]
  /src/sys/arch/powerpc/fpu/
fpu_emu.c 349 int ra, rb, rc, rt, type, mask, fsr, cx, bf, setcr, cond; local in function:fpu_execute
402 rb = instr.i_x.i_rb;
404 ra, tf->tf_fixreg[ra], rb, tf->tf_fixreg[rb]));
406 addr = tf->tf_fixreg[rb];
434 rb = instr.i_x.i_rb;
436 ra, tf->tf_fixreg[ra], rb, tf->tf_fixreg[rb]));
437 addr = tf->tf_fixreg[rb];
518 rb = instr.i_x.i_rb
    [all...]
  /src/games/gomoku/
bdinit.c 192 adjust_overlap(u_char ov, int ra, int offa, int rb, int offb, int mask)
195 if (rb != ra)
239 for (int rb = 4; --rb >= 0;) { local in function:init_overlap_frame
240 int db = dd[rb];
247 if (is_blocked(spb0, rb))
250 frame_index fib = spb0->s_frame[rb];
253 *op = adjust_overlap(*op, ra, offa, rb, offb, mask);
makemove.c 280 * with direction 'rb' that cross frame 'a' in 'os'. Since the spot 'os'
285 update_overlap_different_direction(spot_index os, frame_index a, direction rb)
288 int db = dd[rb];
293 if (is_blocked(sp, rb))
296 frame_index b = sp->s_frame[rb];
340 for (direction rb = 0; rb < r; rb++) local in function:update_overlap
341 update_overlap_different_direction(os, a, rb);
  /src/sys/arch/x86/include/
pmap_pv.h 73 rb_tree_t rb; member in union:pmap_page::__anond0af0539010a
86 #define pp_rb pp_u.rb
  /src/sys/external/bsd/drm2/include/linux/
interval_tree.h 46 struct rb_node rb; member in struct:interval_tree_node
84 .rbto_node_offset = offsetof(struct interval_tree_node, rb),
  /src/sys/dev/ieee1394/
firewire.c 1057 fw_rcv(struct fw_rcv_buf *rb)
1076 fp = (struct fw_pkt *)rb->vec[0].iov_base;
1083 rb->xfer = fw_tl2xfer(rb->fc, fp->mode.hdr.src,
1085 if (rb->xfer == NULL) {
1086 aprint_error_dev(rb->fc->bdev, "unknown response"
1095 rb->xfer = fw_tl2xfer(rb->fc, fp->mode.hdr.src,
1097 if (rb->xfer == NULL) {
1105 fw_rcv_copy(rb);
    [all...]
  /src/sys/external/bsd/drm2/dist/drm/i915/gt/uc/
intel_guc_submission.c 62 static inline struct i915_priolist *to_priolist(struct rb_node *rb)
64 return rb_entry(rb, struct i915_priolist, node);
300 struct rb_node *rb; local in function:__guc_dequeue
317 while ((rb = rb_first_cached(&execlists->queue))) {
318 struct i915_priolist *p = to_priolist(rb);
343 rb ? to_priolist(rb)->priority : INT_MIN;
438 struct rb_node *rb; local in function:guc_reset_cancel
471 while ((rb = rb_first_cached(&execlists->queue))) {
472 struct i915_priolist *p = to_priolist(rb);
    [all...]
  /src/sys/arch/arm/include/arm32/
frame.h 138 #define GET_CPSR(rb) /* nothing */
139 #define CPSID_I(ra,rb) cpsid i
140 #define CPSIE_I(ra,rb) cpsie i
142 #define GET_CPSR(rb) \
143 mrs rb, cpsr /* fetch CPSR */
145 #define CPSID_I(ra,rb) \
146 orr ra, rb, #(IF32_bits) ;\
149 #define CPSIE_I(ra,rb) \
150 bic ra, rb, #(IF32_bits) ;\
  /src/sys/arch/powerpc/powerpc/
fixup.c 128 const u_int rb = i.i_x.i_rb; local in function:powerpc_fixup_stubs
132 const u_int spr = (rb << 5) | ra;
141 const u_int spr = (rb << 5) | ra;
151 KASSERT(valid_mask & (1 << rb));
153 fixreg[ra] = fixreg[rs] | fixreg[rb];
  /src/sys/dev/i2o/
iopsp.c 535 struct i2o_scsi_reply *rb; local in function:iopsp_intr
542 rb = reply;
546 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
550 if (rb->hbastatus != I2O_SCSI_DSC_SUCCESS) {
551 switch (rb->hbastatus) {
571 rb->hbastatus);
572 } else if (rb->scsistatus != SCSI_OK) {
573 switch (rb->scsistatus) {
576 sl = le32toh(rb->senselen);
579 memcpy(&xs->sense.scsi_sense, rb->sense, sl)
    [all...]
ld_iop.c 487 struct i2o_rbs_reply *rb; local in function:ld_iop_intr
494 rb = reply;
499 err = ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0);
501 if (!err && rb->reqstatus != I2O_STATUS_SUCCESS) {
502 detail = le16toh(rb->detail);
515 bp->b_resid = bp->b_bcount - le32toh(rb->transfercount);
525 struct i2o_util_event_register_reply *rb; local in function:ld_iop_intr_event
530 rb = reply;
532 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
535 event = le32toh(rb->event)
    [all...]
iop.c 1613 struct i2o_reply *rb; local in function:iop_handle_reply
1621 rb = (struct i2o_reply *)((char *)sc->sc_rep + off);
1628 if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0)
1634 ictx = le32toh(rb->msgictx);
1644 iop_reply_print(sc, rb);
1657 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
1660 fn = (struct i2o_fault_notify *)rb;
1665 status = rb->reqstatus;
1666 tctx = le32toh(rb->msgtctx);
1685 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) == 0
1782 struct i2o_util_event_register_reply *rb; local in function:iop_intr_event
    [all...]
  /src/sys/external/bsd/drm2/dist/drm/i915/gt/
intel_lrc.c 210 struct rb_node rb; member in struct:virtual_engine::ve_node
268 .rbto_node_offset = offsetof(struct ve_node, rb),
339 static inline struct i915_priolist *to_priolist(struct rb_node *rb)
341 return rb_entry(rb, struct i915_priolist, node);
381 struct rb_node *rb; local in function:queue_prio
383 rb = rb_first_cached(&execlists->queue);
384 if (!rb)
391 p = to_priolist(rb);
397 struct rb_node *rb)
434 if (rb) {
1830 struct rb_node *rb; local in function:execlists_dequeue
3681 struct rb_node *rb; local in function:execlists_reset_cancel
4870 struct rb_node **parent, *rb; local in function:virtual_submission_tasklet
5252 struct rb_node *rb; local in function:intel_execlists_show_requests
    [all...]
  /src/sys/ufs/chfs/
ebh.h 109 * @rb: RB-node of the tree
115 RB_ENTRY(chfs_ltree_entry) rb;
131 * @u.rb: link in the used RB-tree of chfs_scan_info
140 RB_ENTRY(chfs_scan_leb) rb;
154 * @used: RB-tree of used PEBs describing by chfs_scan_leb
178 * @u.rb: link to the RB-tree to the free PEBs
185 RB_ENTRY(chfs_peb) rb;
    [all...]
  /src/sys/external/bsd/drm2/dist/drm/i915/gem/
i915_gem_mman.c 741 struct rb_node *rb;
744 rb = obj->mmo.offsets.rb_node;
745 while (rb) {
747 rb_entry(rb, typeof(*mmo), offset);
755 rb = rb->rb_right;
757 rb = rb->rb_left;
790 struct rb_node *rb, **p;
793 rb = NULL
    [all...]
  /src/sys/external/gpl2/dts/dist/arch/arm/boot/dts/
at91-linea.dtsi 63 atmel,rb = <0>;

Completed in 27 milliseconds

1 2 3 4 5 6