Lines Matching defs:job
38 * Workers sleep on service_cv until a job is assigned.
42 * Job distribution:
54 * Job tracking:
55 * A hash table (by userspace aiocb pointer) maps aiocb -> kernel job.
64 * Enables future enhancements like dynamic job appending during processing.
324 * Destroy job structure
327 aio_job_fini(struct aio_job *job)
329 mutex_enter(&job->mtx);
330 aiowaitgrouplk_fini(&job->lk);
331 mutex_exit(&job->mtx);
332 mutex_destroy(&job->mtx);
336 * Mark job as complete
339 aio_job_mark_complete(struct aio_job *job)
341 mutex_enter(&job->mtx);
342 job->completed = true;
343 aio_file_release(job->fp);
344 job->fp = NULL;
346 aiowaitgrouplk_flush(&job->lk);
347 mutex_exit(&job->mtx);
349 aiost_sigsend(job->p, &job->aiocbp.aio_sigevent);
380 * Release a job back to the pool
383 aio_job_release(struct aio_job *job)
385 if (job->fp) {
386 aio_file_release(job->fp);
387 job->fp = NULL;
390 aio_job_fini(job);
391 pool_put(&aio_job_pool, job);
396 * Cancel a job pending on aiosp->jobs
399 aio_job_cancel(struct aiosp *aiosp, struct aio_job *job)
401 mutex_enter(&job->mtx);
402 TAILQ_REMOVE(&aiosp->jobs, job, list);
404 job->on_queue = false;
405 job->aiocbp._errno = ECANCELED;
406 mutex_exit(&job->mtx);
447 struct aio_job *job, *tmp;
457 TAILQ_FOREACH_SAFE(job, &sp->jobs, list, tmp) {
458 fp = job->fp;
485 aiost->job = NULL;
496 aiost->job = job;
499 TAILQ_REMOVE(&sp->jobs, job, list);
501 job->on_queue = false;
505 TAILQ_INSERT_TAIL(&fg->queue, job, list);
526 * AIOSP_SUSPEND_ANY return when any job completes
533 struct aio_job *job;
554 error = aiocbp_lookup_job(aiosp, aiocbp_list[i], &job);
558 if (job == NULL) {
562 if (job->completed) {
567 mutex_exit(&job->mtx);
571 aiowaitgroup_join(wg, &job->lk);
573 mutex_exit(&job->mtx);
697 * Enqueue a job for processing by the process's servicing pool
700 aiosp_enqueue_job(struct aiosp *aiosp, struct aio_job *job)
704 TAILQ_INSERT_TAIL(&aiosp->jobs, job, list);
706 job->on_queue = true;
727 st->job = NULL;
751 * Process single job without coalescing.
754 aiost_process_singleton(struct aio_job *job)
756 if ((job->aio_op & AIO_READ) == AIO_READ) {
757 io_read(job);
758 } else if ((job->aio_op & AIO_WRITE) == AIO_WRITE) {
759 io_write(job);
760 } else if ((job->aio_op & AIO_SYNC) == AIO_SYNC) {
761 io_sync(job);
764 job->aio_op);
767 aio_job_mark_complete(job);
776 for (struct aio_job *job;;) {
778 job = TAILQ_FIRST(&fg->queue);
779 if (job) {
780 TAILQ_REMOVE(&fg->queue, job, list);
784 if (job == NULL) {
788 aiost_process_singleton(job);
794 * Handles both singleton jobs and file-grouped job batches.
842 } else if (st->job) {
843 struct aio_job *job = st->job;
846 aiost_process_singleton(job);
854 * a job
861 st->job = NULL;
884 if (st->job) {
885 aio_job_release(st->job);
890 for (struct aio_job *job;;) {
892 job = TAILQ_FIRST(&fg->queue);
893 if (job) {
894 TAILQ_REMOVE(&fg->queue, job, list);
898 if (job == NULL) {
902 aio_job_release(job);
950 io_write(struct aio_job *job)
952 return io_write_fallback(job);
959 io_read(struct aio_job *job)
961 return io_read_fallback(job);
968 uio_construct(struct aio_job *job, struct file **fp, struct iovec *aiov,
971 struct aiocb *aiocbp = &job->aiocbp;
976 *fp = job->fp;
988 auio->uio_vmspace = job->p->p_vmspace;
997 io_write_fallback(struct aio_job *job)
1002 struct aiocb *aiocbp = &job->aiocbp;
1005 error = uio_construct(job, &fp, &aiov, &auio);
1021 job->aiocbp.aio_nbytes -= auio.uio_resid;
1022 job->aiocbp._retval = (error == 0) ? job->aiocbp.aio_nbytes : -1;
1024 job->aiocbp._errno = error;
1025 job->aiocbp._state = JOB_DONE;
1033 io_read_fallback(struct aio_job *job)
1038 struct aiocb *aiocbp = &job->aiocbp;
1041 error = uio_construct(job, &fp, &aiov, &auio);
1055 job->aiocbp.aio_nbytes -= auio.uio_resid;
1056 job->aiocbp._retval = (error == 0) ? job->aiocbp.aio_nbytes : -1;
1058 job->aiocbp._errno = error;
1059 job->aiocbp._state = JOB_DONE;
1067 io_sync(struct aio_job *job)
1069 struct file *fp = job->fp;
1085 if (job->aio_op & AIO_DSYNC) {
1094 job->aiocbp._retval = (error == 0) ? 0 : -1;
1096 job->aiocbp._errno = error;
1097 job->aiocbp._state = JOB_DONE;
1099 copyout(&job->aiocbp, job->aiocb_uptr, sizeof(job->aiocbp));
1130 * Ensure that the same job can not be enqueued twice.
1136 struct aio_job *job;
1142 job = st->job;
1143 if (job && st->job->aiocb_uptr == uptr) {
1148 TAILQ_FOREACH(job, &st->fg->queue, list) {
1149 if (job->aiocb_uptr == uptr) {
1171 struct aio_job *job;
1174 error = aiocbp_lookup_job(aiosp, uptr, &job);
1175 if (error || job == NULL) {
1179 if (job->aiocbp._state == JOB_NONE) {
1180 mutex_exit(&job->mtx);
1184 *retval = job->aiocbp._errno;
1185 mutex_exit(&job->mtx);
1197 struct aio_job *job = NULL;
1200 error = aiocbp_remove_job(aiosp, uptr, &job, &handle);
1205 if (job == NULL) {
1212 if (job->aiocbp._state != JOB_DONE) {
1213 mutex_exit(&job->mtx);
1220 *retval = job->aiocbp._retval;
1222 if (job->fp) {
1223 aio_file_release(job->fp);
1224 job->fp = NULL;
1227 job->aiocbp._errno = 0;
1228 job->aiocbp._retval = -1;
1229 job->aiocbp._state = JOB_NONE;
1231 mutex_exit(&job->mtx);
1236 aio_job_fini(job);
1237 pool_put(&aio_job_pool, job);
1260 struct aio_job *job = NULL;
1269 job = aiocbp->job;
1270 if (job) {
1271 mutex_enter(&job->mtx);
1275 *jobp = job;
1286 * Detach job and return job with job->mtx held
1293 struct aio_job *job = NULL;
1305 job = aiocbp->job;
1306 if (job) {
1307 mutex_enter(&job->mtx);
1315 *jobp = job;
1341 found->job = aiocbp->job;
1453 * Initialize wait group link for job tracking.
1466 * Caller must hold job->mtx
1502 * Notify all wait groups of job completion.
1567 * Enqueue the job.
1616 * Look for already existing job. If found the job is in-progress.
1649 /* Allocate and initialize a new AIO job */
1675 aiocbp->job = a_job;
1688 * Add the job to the queue, update the counters, and
1689 * notify the AIO worker thread to handle the job.
1741 struct aio_job *job;
1783 * if aiocbp_uptr != NULL, then just cancel the job associated with that
1788 error = aiocbp_lookup_job(aiosp, aiocbp_uptr, &job);
1789 if (error || job == NULL) {
1794 if (job->completed) {
1801 * If the job is on sp->job (signified by job->on_queue)
1806 if (job->on_queue) {
1807 aio_job_cancel(aiosp, job);
1808 aio_job_mark_complete(job);
1812 mutex_exit(&job->mtx);
1818 TAILQ_FOREACH_SAFE(job, &aiosp->jobs, list, tmp) {
1819 if (job->aiocbp.aio_fildes == (int)fildes) {
1820 aio_job_cancel(aiosp, job);
1821 aio_job_mark_complete(job);
2175 struct aio_job *job;
2201 TAILQ_FOREACH(job, &sp->jobs, list) {
2203 job->aio_op, job->aiocbp._errno, job->aiocbp._state,
2204 job->aiocb_uptr, job->completed);
2206 job->aiocbp.aio_fildes,
2207 (unsigned long long)job->aiocbp.aio_offset,
2208 (void *)job->aiocbp.aio_buf,
2209 (size_t)job->aiocbp.aio_nbytes,
2210 job->lio);
2219 if (st->job) {
2220 struct aio_job *j = st->job;
2222 " job: op=%d err=%d state=%d uptr=%p\n",
2242 /* aiocbp hash maps user aiocbp to kernel job */
2254 (*pr)(" uptr=%p job=%p", hc->uptr, hc->job);