HomeSort by: relevance | last modified time | path
    Searched refs:job (Results 1 - 25 of 124) sorted by relevancy

1 2 3 4 5

  /src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/
amdgpu_job.c 39 struct amdgpu_job *job = to_amdgpu_job(s_job); local in function:amdgpu_job_timedout
44 if (amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
50 amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti);
52 job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
58 amdgpu_device_gpu_recover(ring->adev, job);
64 struct amdgpu_job **job, struct amdgpu_vm *vm)
73 *job = kzalloc(size, GFP_KERNEL);
74 if (!*job)
81 (*job)->base.sched = &adev->rings[0]->sched;
82 (*job)->vm = vm
126 struct amdgpu_job *job = to_amdgpu_job(s_job); local in function:amdgpu_job_free_cb
191 struct amdgpu_job *job = to_amdgpu_job(sched_job); local in function:amdgpu_job_dependency
223 struct amdgpu_job *job; local in function:amdgpu_job_run
    [all...]
amdgpu_job.h 40 #define AMDGPU_JOB_GET_VMID(job) ((job) ? (job)->vmid : 0)
70 struct amdgpu_job **job, struct amdgpu_vm *vm);
72 struct amdgpu_job **job);
74 void amdgpu_job_free_resources(struct amdgpu_job *job);
75 void amdgpu_job_free(struct amdgpu_job *job);
76 int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
78 int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
amdgpu_ib.c 128 struct amdgpu_ib *ibs, struct amdgpu_job *job,
148 /* ring tests don't use a job */
149 if (job) {
150 vm = job->vm;
151 fence_ctx = job->base.s_fence ?
152 job->base.s_fence->scheduled.context : 0;
163 if (vm && !job->vmid) {
178 if (ring->funcs->emit_pipeline_sync && job &&
179 ((tmp = amdgpu_sync_get_fence(&job->sched_sync, NULL)) ||
181 amdgpu_vm_need_pipeline_sync(ring, job))) {
    [all...]
amdgpu_trace.h 37 #define AMDGPU_JOB_GET_TIMELINE_NAME(job) \
38 job->base.s_fence->finished.ops->get_timeline_name(&job->base.s_fence->finished)
157 __entry->dw = p->job->ibs[i].length_dw;
167 TP_PROTO(struct amdgpu_job *job),
168 TP_ARGS(job),
171 __string(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
175 __string(ring, to_amdgpu_ring(job->base.sched)->name)
180 __entry->sched_job_id = job->base.id;
181 __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
    [all...]
amdgpu_vm_sdma.c 72 r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, &p->job);
79 r = amdgpu_sync_fence(&p->job->sync, exclusive, false);
87 return amdgpu_sync_resv(p->adev, &p->job->sync, root->tbo.base.resv,
103 struct amdgpu_ib *ib = p->job->ibs;
115 r = amdgpu_job_submit(p->job, entity, AMDGPU_FENCE_OWNER_VM, &f);
132 amdgpu_job_free(p->job);
150 struct amdgpu_ib *ib = p->job->ibs;
180 struct amdgpu_ib *ib = p->job->ibs;
218 ndw -= p->job->ibs->length_dw;
232 r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, &p->job);
    [all...]
amdgpu_ids.c 200 * @ring: ring we want to submit job to
269 * @ring: ring we want to submit job to
272 * @job: job who wants to use the VMID
280 struct amdgpu_job *job,
297 job->vm_pd_addr != (*id)->pd_gpu_addr ||
325 job->vm_needs_flush = needs_flush;
333 * @ring: ring we want to submit job to
336 * @job: job who wants to use the VMI
    [all...]
jpeg_v2_0.h 33 void jpeg_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job,
  /src/sys/arch/mips/atheros/dev/
arspi.c 93 #define JOB_WAIT 0x8 /* job must wait for WIP bits */
260 struct arspi_job *job = st->st_busprivate; local in function:arspi_transfer
262 kmem_free(job, sizeof(*job));
283 struct arspi_job *job; local in function:arspi_sched
297 job = st->st_busprivate;
306 if (job->job_flags & JOB_WAIT) {
312 } else if (job->job_flags & JOB_WREN) {
319 PUTREG(sc, ARSPI_REG_DATA, job->job_data);
322 PUTREG(sc, ARSPI_REG_OPCODE, job->job_opcode
347 struct arspi_job *job; local in function:arspi_done
466 struct arspi_job *job; local in function:arspi_make_job
579 struct arspi_job *job = st->st_busprivate; local in function:arspi_update_job
609 struct arspi_job *job = st->st_busprivate; local in function:arspi_finish_job
    [all...]
  /src/bin/sh/
jobs.h 50 #define SHOW_ISSIG 0x20 /* job was signalled */
51 #define SHOW_NO_FREE 0x40 /* do not free job */
56 * A job structure contains information about a job. A job is either a
69 struct job { struct
74 pid_t pgrp; /* process group of this job */
82 #define JOBWANTED 2 /* set if this is a job being sought */
83 #define JPIPEFAIL 4 /* set if -o pipefail when job created */
85 char jobctl; /* job running under job control *
    [all...]
jobs.c 95 static struct job *jobtab; /* array of jobs */
101 static int curjob = -1; /* current job */
105 STATIC void restartjob(struct job *);
106 STATIC void freejob(struct job *);
107 STATIC struct job *getjob(const char *, int);
108 STATIC int dowait(int, struct job *, struct job **);
112 STATIC int jobstatus(const struct job *, int);
113 STATIC int waitproc(int, struct job *, int *);
154 * Turn job control on and off
702 struct job *job, *last; local in function:waitcmd
944 int pg = 0, onep = 0, job = 0; local in function:jobidcmd
    [all...]
eval.h 47 struct job *jp; /* job structure for command */
  /src/usr.bin/make/
job.c 1 /* $NetBSD: job.c,v 1.519 2025/08/04 15:40:39 sjg Exp $ */
93 * job table is empty.
119 #include "job.h"
126 /* "@(#)job.c 8.2 (Berkeley) 3/19/94" */
127 MAKE_RCSID("$NetBSD: job.c,v 1.519 2025/08/04 15:40:39 sjg Exp $");
155 JOB_ST_FREE, /* Job is available */
156 JOB_ST_SET_UP, /* Job is allocated but otherwise invalid */
157 JOB_ST_RUNNING, /* Job is running, pid valid */
158 JOB_ST_FINISHED /* Job is done (i.e. after SIGCHLD) */
169 * A Job manages the shell commands that are run to create a single target
575 const Job *job; local in function:JobTable_Dump
680 Job *job; local in function:JobCondPassSig
794 Job *job; local in function:JobFindPid
1676 Job *job; local in function:Job_Make
1906 Job *job; local in function:JobReapChild
1966 Job *job; local in function:ContinueJobs
1982 Job *job; local in function:Job_CatchOutput
2397 Job *job; local in function:JobInterrupt
2475 Job *job; local in function:Job_AbortAll
    [all...]
trace.c 42 * Trace_Log Log an event about a particular make job.
48 #include "job.h"
61 "JOB",
84 Trace_Log(TrEvent event, Job *job)
104 if (job != NULL) {
105 GNode *gn = Job_Node(job);
108 Job_FlagsToString(job, flags, sizeof flags);
110 gn->name, Job_Pid(job), flags, type);
  /src/usr.bin/make/unit-tests/
job-output-long-lines.mk 1 # $NetBSD: job-output-long-lines.mk,v 1.4 2020/11/01 17:29:13 rillig Exp $
10 # As of 2020-09-27, the default job buffer size is 1024. When a job produces
12 # not terminated by a newline. Because of this missing newline, the job
13 # markers "--- job-a ---" and "--- job-b ---" are not always written at the
22 all: job-a job-b
24 job-a:
29 job-b
    [all...]
opt-tracefile.exp 5 <timestamp> 1 JOB <make-pid> <curdir> dependency1 <job-pid> --- OP_DEPENDS|OP_PHONY|OP_HAS_COMMANDS|OP_DEPS_FOUND|OP_MARK
6 <timestamp> 1 DON <make-pid> <curdir> dependency1 <job-pid> --- OP_DEPENDS|OP_PHONY|OP_HAS_COMMANDS|OP_DEPS_FOUND|OP_MARK
7 <timestamp> 1 JOB <make-pid> <curdir> dependency2 <job-pid> --- OP_DEPENDS|OP_PHONY|OP_HAS_COMMANDS|OP_DEPS_FOUND|OP_MARK
8 <timestamp> 1 DON <make-pid> <curdir> dependency2 <job-pid> --- OP_DEPENDS|OP_PHONY|OP_HAS_COMMANDS|OP_DEPS_FOUND|OP_MARK
9 <timestamp> 1 JOB <make-pid> <curdir> trace <job-pid> --- OP_DEPENDS|OP_PHONY|OP_HAS_COMMANDS|OP_DEPS_FOUND
10 <timestamp> 1 DON <make-pid> <curdir> trace <job-pid> --- OP_DEPENDS|OP_PHONY|OP_HAS_COMMANDS|OP_DEPS_FOUND
sh-errctl.exp 20 job started, job table:
21 job 0, status running, flags ---, pid <pid>
opt-debug-jobs.exp 18 job started, job table:
19 job 0, status running, flags ---, pid <pid>
jobs-empty-commands-error.mk 5 # dependencies, these files would end up empty. Since job.c 1.399 from
8 # After 2021-01-29, before job.c 1.435 2021-06-16, targets that could not be
opt-tracefile.mk 4 # record to a trace log whenever a job is started or completed.
9 @awk '{ $$1 = "<timestamp>"; $$4 = "<make-pid>"; if (NF >= 7) $$7 = "<job-pid>"; print }' opt-tracefile.log
  /src/sys/kern/
sys_aio.c 38 * Workers sleep on service_cv until a job is assigned.
42 * Job distribution:
54 * Job tracking:
55 * A hash table (by userspace aiocb pointer) maps aiocb -> kernel job.
64 * Enables future enhancements like dynamic job appending during processing.
324 * Destroy job structure
327 aio_job_fini(struct aio_job *job)
329 mutex_enter(&job->mtx);
330 aiowaitgrouplk_fini(&job->lk);
331 mutex_exit(&job->mtx)
447 struct aio_job *job, *tmp; local in function:aiosp_distribute_jobs
533 struct aio_job *job; local in function:aiosp_suspend
776 for (struct aio_job *job;;) { local in function:aiost_process_fg
843 struct aio_job *job = st->job; local in function:aiost_entry
890 for (struct aio_job *job;;) { local in function:aiost_entry
1136 struct aio_job *job; local in function:aiosp_validate_conflicts
1171 struct aio_job *job; local in function:aiosp_error
1197 struct aio_job *job = NULL; local in function:aiosp_return
1260 struct aio_job *job = NULL; local in function:aiocbp_lookup_job
1293 struct aio_job *job = NULL; local in function:aiocbp_remove_job
1741 struct aio_job *job; local in function:sys_aio_cancel
2175 struct aio_job *job; local in function:aio_print_jobs
    [all...]
kern_threadpool.c 37 * can be given jobs to assign to a worker thread. Scheduling a job in
76 * touching remote CPUs' memory when scheduling a job, but that still
141 "struct threadpool *"/*pool*/, "struct threadpool_job *"/*job*/);
143 "struct threadpool *"/*pool*/, "struct threadpool_job *"/*job*/);
145 "struct threadpool *"/*pool*/, "struct threadpool_job *"/*job*/);
148 "struct threadpool_job *"/*job*/,
159 "struct threadpool_job *"/*job*/);
162 "struct threadpool_job *"/*job*/,
172 "struct threadpool *"/*pool*/, "struct threadpool_job *"/*job*/);
752 threadpool_job_init(struct threadpool_job *job, threadpool_job_fn_t fn
1043 struct threadpool_job *const job = TAILQ_FIRST(&pool->tp_jobs); local in function:threadpool_dispatcher_thread
1136 struct threadpool_job *const job = thread->tpt_job; local in function:threadpool_thread
    [all...]
  /src/tests/rump/kernspace/
threadpool.c 99 struct threadpool_job job; member in struct:test_job_data
105 test_job_func_schedule(struct threadpool_job *job)
108 container_of(job, struct test_job_data, job);
114 threadpool_job_done(job);
119 test_job_func_cancel(struct threadpool_job *job)
122 container_of(job, struct test_job_data, job);
133 threadpool_job_done(job);
142 threadpool_job_init(&data->job, fn, &data->mutex, "testjob")
    [all...]
  /src/sys/external/bsd/drm2/drm/
drm_writeback.c 56 drm_writeback_cleanup_job(struct drm_writeback_job *job)
  /src/sys/external/bsd/drm2/dist/drm/scheduler/
sched_main.c 34 * backend operations to the scheduler like submitting a job to hardware run queue,
35 * returning the dependencies of a job etc.
127 * drm_sched_rq_select_entity - Select an entity which could provide a job to run
224 * drm_sched_suspend_timeout - Suspend scheduler job timeout
261 * drm_sched_resume_timeout - Resume scheduler job timeout
299 struct drm_sched_job *job; local in function:drm_sched_job_timedout
306 job = list_first_entry_or_null(&sched->ring_mirror_list,
309 if (job) {
311 * Remove the bad job so it cannot be freed by concurrent
315 list_del_init(&job->node)
694 struct drm_sched_job *job; local in function:drm_sched_get_cleanup_job
    [all...]
sched_entity.c 50 * @guilty: atomic_t set to 1 when a job on this queue
114 * Return true if entity could provide a job.
240 struct drm_sched_job *job = container_of(cb, struct drm_sched_job, local in function:drm_sched_entity_kill_jobs_cb
243 drm_sched_fence_finished(job->s_fence);
244 WARN_ON(job->s_fence->parent);
245 job->sched->ops->free_job(job);
258 struct drm_sched_job *job; local in function:drm_sched_entity_kill_jobs
261 while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
262 struct drm_sched_fence *s_fence = job->s_fence
    [all...]

Completed in 20 milliseconds

1 2 3 4 5