/src/sys/external/bsd/drm2/dist/include/drm/ttm/ |
ttm_execbuf_util.h | 57 * @ticket: ww_acquire_ctx from reserve call 64 extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, 70 * @ticket: [out] ww_acquire_ctx filled in by call, or NULL if only 102 extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, 109 * @ticket: ww_acquire_ctx from reserve call 119 extern void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
|
ttm_bo_driver.h | 665 * @ticket: ticket used to acquire the ww_mutex. 677 * -EALREADY: Bo already reserved using @ticket. This error code will only 682 struct ww_acquire_ctx *ticket) 688 if (WARN_ON(ticket)) 696 ret = dma_resv_lock_interruptible(bo->base.resv, ticket); 698 ret = dma_resv_lock(bo->base.resv, ticket); 710 * @ticket: ticket used to acquire the ww_mutex. 724 * and call this function with @use_ticket == 1 and @ticket->stamp == the uniqu [all...] |
/src/sys/external/bsd/drm2/include/linux/ |
smp.h | 68 uint64_t ticket; local in function:on_each_cpu 70 ticket = xc_broadcast(0, &on_each_cpu_xc, &f, cookie); 72 xc_wait(ticket);
|
/src/sys/external/bsd/drm2/dist/drm/ttm/ |
ttm_execbuf_util.c | 51 void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, 68 if (ticket) 69 ww_acquire_fini(ticket); 85 int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, 95 if (ticket) 96 ww_acquire_init(ticket, &reservation_ww_class); 101 ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket); 129 ticket); 131 dma_resv_lock_slow(bo->base.resv, ticket); [all...] |
ttm_bo.c | 821 * @ticket: acquire ticket 827 struct ww_acquire_ctx *ticket) 831 if (!busy_bo || !ticket) 836 ticket); 838 r = dma_resv_lock(busy_bo->base.resv, ticket); 855 struct ww_acquire_ctx *ticket) 870 if (busy && !busy_bo && ticket != 896 ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket); 975 struct ww_acquire_ctx *ticket; local in function:ttm_bo_mem_force_space [all...] |
/src/lib/libtelnet/ |
forward.c | 38 rd_and_store_for_creds(context, auth_context, inbuf, ticket) 42 krb5_ticket *ticket; 58 if ((retval = krb5_cc_initialize(context, ccache, ticket->enc_part2->client)) != 0)
|
kerberos5.c | 93 static krb5_ticket *ticket; variable in typeref:typename:krb5_ticket * 316 server, NULL, NULL, &ticket); 401 if (krb5_unparse_name(telnet_context, ticket->client, &name)) 405 ticket->client, UserNameRequested)) { 466 ticket->client); 601 krb5_kuserok(telnet_context, ticket->client, UserNameRequested)) {
|
/src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/ |
amdgpu_csa.c | 74 struct ww_acquire_ctx ticket; local in function:amdgpu_map_static_csa 88 r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); 96 ttm_eu_backoff_reservation(&ticket, &list); 108 ttm_eu_backoff_reservation(&ticket, &list); 112 ttm_eu_backoff_reservation(&ticket, &list);
|
amdgpu_gem.c | 180 struct ww_acquire_ctx ticket; local in function:amdgpu_gem_object_close 193 r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates); 218 ttm_eu_backoff_reservation(&ticket, &list); 597 struct ww_acquire_ctx ticket; local in function:amdgpu_gem_va_ioctl 659 r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); 705 ttm_eu_backoff_reservation(&ticket, &list);
|
amdgpu_amdkfd_gpuvm.c | 584 struct ww_acquire_ctx ticket; /* Reservation ticket */ member in struct:bo_vm_reservation_context 629 ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list, 702 ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list, 736 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list); 1790 struct ww_acquire_ctx ticket; local in function:validate_invalid_user_pages 1826 ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates); 1884 ttm_eu_backoff_reservation(&ticket, &resv_list); 2023 ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list, 2115 ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list) [all...] |
/src/sys/external/bsd/drm2/linux/ |
linux_dma_resv.c | 382 * dma_resv_write_begin(robj, ticket) 385 * ticket for it. The ticket must be passed to 395 struct dma_resv_write_ticket *ticket) 404 * dma_resv_write_commit(robj, ticket) 407 * dma_resv_write_begin that returned ticket. 416 struct dma_resv_write_ticket *ticket) 429 * dma_resv_read_begin(robj, ticket) 431 * Begin a read section, and initialize opaque ticket for it. The 432 * ticket must be passed to dma_resv_read_exit, and th 573 struct dma_resv_write_ticket ticket; local in function:dma_resv_add_excl_fence 632 struct dma_resv_write_ticket ticket; local in function:dma_resv_add_shared_fence 772 struct dma_resv_read_ticket ticket; local in function:dma_resv_get_fences_rcu 1047 struct dma_resv_read_ticket ticket; local in function:dma_resv_test_signaled_rcu 1134 struct dma_resv_read_ticket ticket; local in function:dma_resv_wait_timeout_rcu 1290 struct dma_resv_read_ticket ticket; local in function:dma_resv_do_poll [all...] |
/src/sys/external/bsd/drm2/dist/drm/vmwgfx/ |
vmwgfx_validation.h | 66 * @ticket: Ticked used for ww mutex locking 83 struct ww_acquire_ctx ticket; member in struct:vmw_validation_context 174 return ttm_eu_reserve_buffers(&ctx->ticket, &ctx->bo_list, intr, 190 ttm_eu_fence_buffer_objects(&ctx->ticket, &ctx->bo_list,
|
vmwgfx_resource.c | 543 * @ticket: The ww aqcquire context to use, or NULL if trylocking. 551 vmw_resource_check_buffer(struct ww_acquire_ctx *ticket, 572 ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL); 590 ttm_eu_backoff_reservation(ticket, &val_list); 638 * @ticket: The ww acquire ctx used for reservation. 642 vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket, 652 ttm_eu_backoff_reservation(ticket, &val_list); 661 * @ticket: The ww acquire ticket to use, or NULL if trylocking. 665 static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket, 932 struct ww_acquire_ctx ticket; local in function:vmw_resource_evict_type [all...] |
/src/sys/arch/xen/xen/ |
xen_clock.c | 191 * Enter a vCPU time read section and store a ticket in *tp, which 217 * Exit a vCPU time read section with the ticket in *tp from 276 struct xen_vcputime_ticket ticket; local in function:xen_vcputime_systime_ns 291 vt = xen_vcputime_enter(&ticket); 303 } while (!xen_vcputime_exit(vt, &ticket)); 433 struct xen_vcputime_ticket ticket; local in function:xen_vcputime_raw_systime_ns 437 vt = xen_vcputime_enter(&ticket); 439 } while (!xen_vcputime_exit(vt, &ticket)); 460 * Enter a wall clock read section and store a ticket in *tp, 482 * Exit a wall clock read section with the ticket in *tp fro 603 struct xen_vcputime_ticket ticket; local in function:xen_delay 1117 struct xen_wallclock_ticket ticket; local in function:xen_wallclock_time [all...] |
/src/dist/pf/libexec/tftp-proxy/ |
filter.c | 251 pfr.ticket = pfte[TRANS_FILTER].ticket; 254 pfr.ticket = pfte[TRANS_NAT].ticket; 257 pfr.ticket = pfte[TRANS_RDR].ticket; 265 pfr.pool_ticket = pfp.ticket;
|
/src/dist/pf/usr.sbin/ftp-proxy/ |
filter.c | 273 pfr.ticket = pfte[TRANS_FILTER].ticket; 276 pfr.ticket = pfte[TRANS_NAT].ticket; 279 pfr.ticket = pfte[TRANS_RDR].ticket; 287 pfr.pool_ticket = pfp.ticket;
|
/src/sys/dist/pf/net/ |
pf_ioctl.c | 368 u_int32_t ticket; local in function:pfdetach 385 if (pf_begin_rules(&ticket, i, &r) == 0) 386 pf_commit_rules(ticket, i, &r); 388 if (pf_begin_altq(&ticket) == 0) 389 pf_commit_altq(ticket); 422 if (pf_begin_rules(&ticket, i, anchor->name) == 0) 423 pf_commit_rules(ticket, i, anchor->name); 476 pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action, 491 if (check_ticket && ticket != 492 ruleset->rules[rs_num].active.ticket) [all...] |
/src/sys/external/bsd/common/linux/ |
linux_srcu.c | 145 * Enter an srcu read section and return a ticket for it. Any 147 * srcu_read_unlock(srcu, ticket). 175 * srcu_read_unlock(srcu, ticket) 178 * ticket. If there is a pending synchronize_srcu and we might be 184 srcu_read_unlock(struct srcu_struct *srcu, int ticket) 186 unsigned gen = ticket;
|
/src/dist/pf/sbin/pfctl/ |
pfctl_qstats.c | 141 if (pa.ticket != last_ticket && *root != NULL) { 145 last_ticket = pa.ticket; 156 pq.ticket = pa.ticket;
|
/src/sys/external/bsd/drm2/dist/drm/qxl/ |
qxl_release.c | 267 ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos, 277 ttm_eu_backoff_reservation(&release->ticket, &release->bos); 291 ttm_eu_backoff_reservation(&release->ticket, &release->bos); 468 ww_acquire_fini(&release->ticket);
|
/src/sys/external/bsd/drm2/dist/drm/virtio/ |
virtgpu_gem.c | 213 &objs->ticket); 224 &objs->ticket);
|
/src/usr.bin/login/ |
k5login.c | 114 * Verify the Kerberos ticket-granting ticket just retrieved for the 132 krb5_ticket *ticket = NULL; local in function:k5_verify_creds 154 /* talk to the kdc and construct the ticket */ 184 /* got ticket, try to use it */ 186 princ, NULL, NULL, &ticket); 207 krb5_warn(kcontext, kerror, "Unable to verify host ticket"); 208 k5_log(kcontext, kerror, "can't verify v5 ticket (%s)", 214 * The host/<host> ticket has been received _and_ verified. 223 /* possibly ticket and packet need freeing here as well * [all...] |
/src/sys/kern/ |
kern_heartbeat.c | 297 const uint64_t ticket = local in function:set_max_period 299 xc_wait(ticket);
|
/src/sys/external/bsd/drm2/dist/drm/radeon/ |
radeon_object.h | 146 struct ww_acquire_ctx *ticket,
|
/src/sys/external/bsd/drm2/dist/drm/nouveau/ |
nouveau_gem.c | 337 struct ww_acquire_ctx ticket; member in struct:validate_op 382 ww_acquire_fini(&op->ticket); 398 ww_acquire_init(&op->ticket, &reservation_ww_class); 431 ret = ttm_bo_reserve(&nvbo->bo, true, false, &op->ticket); 439 &op->ticket); 486 ww_acquire_done(&op->ticket);
|