scsipi_base.c revision 1.178.4.2 1 /* $NetBSD: scsipi_base.c,v 1.178.4.2 2018/11/26 01:52:47 pgoyette Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999, 2000, 2002, 2003, 2004 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: scsipi_base.c,v 1.178.4.2 2018/11/26 01:52:47 pgoyette Exp $");
35
36 #ifdef _KERNEL_OPT
37 #include "opt_scsi.h"
38 #endif
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/buf.h>
44 #include <sys/uio.h>
45 #include <sys/malloc.h>
46 #include <sys/pool.h>
47 #include <sys/errno.h>
48 #include <sys/device.h>
49 #include <sys/proc.h>
50 #include <sys/kthread.h>
51 #include <sys/hash.h>
52 #include <sys/atomic.h>
53
54 #include <dev/scsipi/scsi_spc.h>
55 #include <dev/scsipi/scsipi_all.h>
56 #include <dev/scsipi/scsipi_disk.h>
57 #include <dev/scsipi/scsipiconf.h>
58 #include <dev/scsipi/scsipi_base.h>
59
60 #include <dev/scsipi/scsi_all.h>
61 #include <dev/scsipi/scsi_message.h>
62
63 #include <machine/param.h>
64
65 static int scsipi_complete(struct scsipi_xfer *);
66 static void scsipi_request_sense(struct scsipi_xfer *);
67 static int scsipi_enqueue(struct scsipi_xfer *);
68 static void scsipi_run_queue(struct scsipi_channel *chan);
69
70 static void scsipi_completion_thread(void *);
71
72 static void scsipi_get_tag(struct scsipi_xfer *);
73 static void scsipi_put_tag(struct scsipi_xfer *);
74
75 static int scsipi_get_resource(struct scsipi_channel *);
76 static void scsipi_put_resource(struct scsipi_channel *);
77
78 static void scsipi_async_event_max_openings(struct scsipi_channel *,
79 struct scsipi_max_openings *);
80 static void scsipi_async_event_channel_reset(struct scsipi_channel *);
81
82 static void scsipi_channel_freeze_locked(struct scsipi_channel *, int);
83
84 static void scsipi_adapter_lock(struct scsipi_adapter *adapt);
85 static void scsipi_adapter_unlock(struct scsipi_adapter *adapt);
86
87 static struct pool scsipi_xfer_pool;
88
89 int scsipi_xs_count = 0;
90
91 /*
92 * scsipi_init:
93 *
94 * Called when a scsibus or atapibus is attached to the system
95 * to initialize shared data structures.
96 */
97 void
98 scsipi_init(void)
99 {
100 static int scsipi_init_done;
101
102 if (scsipi_init_done)
103 return;
104 scsipi_init_done = 1;
105
106 /* Initialize the scsipi_xfer pool. */
107 pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
108 0, 0, "scxspl", NULL, IPL_BIO);
109 if (pool_prime(&scsipi_xfer_pool,
110 PAGE_SIZE / sizeof(struct scsipi_xfer)) == ENOMEM) {
111 printf("WARNING: not enough memory for scsipi_xfer_pool\n");
112 }
113
114 scsipi_ioctl_init();
115 }
116
117 /*
118 * scsipi_channel_init:
119 *
120 * Initialize a scsipi_channel when it is attached.
121 */
122 int
123 scsipi_channel_init(struct scsipi_channel *chan)
124 {
125 struct scsipi_adapter *adapt = chan->chan_adapter;
126 int i;
127
128 /* Initialize shared data. */
129 scsipi_init();
130
131 /* Initialize the queues. */
132 TAILQ_INIT(&chan->chan_queue);
133 TAILQ_INIT(&chan->chan_complete);
134
135 for (i = 0; i < SCSIPI_CHAN_PERIPH_BUCKETS; i++)
136 LIST_INIT(&chan->chan_periphtab[i]);
137
138 /*
139 * Create the asynchronous completion thread.
140 */
141 if (kthread_create(PRI_NONE, 0, NULL, scsipi_completion_thread, chan,
142 &chan->chan_thread, "%s", chan->chan_name)) {
143 aprint_error_dev(adapt->adapt_dev, "unable to create completion thread for "
144 "channel %d\n", chan->chan_channel);
145 panic("scsipi_channel_init");
146 }
147
148 return 0;
149 }
150
151 /*
152 * scsipi_channel_shutdown:
153 *
154 * Shutdown a scsipi_channel.
155 */
156 void
157 scsipi_channel_shutdown(struct scsipi_channel *chan)
158 {
159
160 mutex_enter(chan_mtx(chan));
161 /*
162 * Shut down the completion thread.
163 */
164 chan->chan_tflags |= SCSIPI_CHANT_SHUTDOWN;
165 cv_broadcast(chan_cv_complete(chan));
166
167 /*
168 * Now wait for the thread to exit.
169 */
170 while (chan->chan_thread != NULL)
171 cv_wait(chan_cv_thread(chan), chan_mtx(chan));
172 mutex_exit(chan_mtx(chan));
173 }
174
175 static uint32_t
176 scsipi_chan_periph_hash(uint64_t t, uint64_t l)
177 {
178 uint32_t hash;
179
180 hash = hash32_buf(&t, sizeof(t), HASH32_BUF_INIT);
181 hash = hash32_buf(&l, sizeof(l), hash);
182
183 return hash & SCSIPI_CHAN_PERIPH_HASHMASK;
184 }
185
186 /*
187 * scsipi_insert_periph:
188 *
189 * Insert a periph into the channel.
190 */
191 void
192 scsipi_insert_periph(struct scsipi_channel *chan, struct scsipi_periph *periph)
193 {
194 uint32_t hash;
195
196 hash = scsipi_chan_periph_hash(periph->periph_target,
197 periph->periph_lun);
198
199 mutex_enter(chan_mtx(chan));
200 LIST_INSERT_HEAD(&chan->chan_periphtab[hash], periph, periph_hash);
201 mutex_exit(chan_mtx(chan));
202 }
203
204 /*
205 * scsipi_remove_periph:
206 *
207 * Remove a periph from the channel.
208 */
209 void
210 scsipi_remove_periph(struct scsipi_channel *chan,
211 struct scsipi_periph *periph)
212 {
213
214 LIST_REMOVE(periph, periph_hash);
215 }
216
217 /*
218 * scsipi_lookup_periph:
219 *
220 * Lookup a periph on the specified channel.
221 */
222 static struct scsipi_periph *
223 scsipi_lookup_periph_internal(struct scsipi_channel *chan, int target, int lun, bool lock)
224 {
225 struct scsipi_periph *periph;
226 uint32_t hash;
227
228 if (target >= chan->chan_ntargets ||
229 lun >= chan->chan_nluns)
230 return NULL;
231
232 hash = scsipi_chan_periph_hash(target, lun);
233
234 if (lock)
235 mutex_enter(chan_mtx(chan));
236 LIST_FOREACH(periph, &chan->chan_periphtab[hash], periph_hash) {
237 if (periph->periph_target == target &&
238 periph->periph_lun == lun)
239 break;
240 }
241 if (lock)
242 mutex_exit(chan_mtx(chan));
243
244 return periph;
245 }
246
247 struct scsipi_periph *
248 scsipi_lookup_periph_locked(struct scsipi_channel *chan, int target, int lun)
249 {
250 return scsipi_lookup_periph_internal(chan, target, lun, false);
251 }
252
253 struct scsipi_periph *
254 scsipi_lookup_periph(struct scsipi_channel *chan, int target, int lun)
255 {
256 return scsipi_lookup_periph_internal(chan, target, lun, true);
257 }
258
259 /*
260 * scsipi_get_resource:
261 *
262 * Allocate a single xfer `resource' from the channel.
263 *
264 * NOTE: Must be called with channel lock held
265 */
266 static int
267 scsipi_get_resource(struct scsipi_channel *chan)
268 {
269 struct scsipi_adapter *adapt = chan->chan_adapter;
270
271 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
272 if (chan->chan_openings > 0) {
273 chan->chan_openings--;
274 return 1;
275 }
276 return 0;
277 }
278
279 if (adapt->adapt_openings > 0) {
280 adapt->adapt_openings--;
281 return 1;
282 }
283 return 0;
284 }
285
286 /*
287 * scsipi_grow_resources:
288 *
289 * Attempt to grow resources for a channel. If this succeeds,
290 * we allocate one for our caller.
291 *
292 * NOTE: Must be called with channel lock held
293 */
294 static inline int
295 scsipi_grow_resources(struct scsipi_channel *chan)
296 {
297
298 if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
299 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
300 mutex_exit(chan_mtx(chan));
301 scsipi_adapter_request(chan,
302 ADAPTER_REQ_GROW_RESOURCES, NULL);
303 mutex_enter(chan_mtx(chan));
304 return scsipi_get_resource(chan);
305 }
306 /*
307 * ask the channel thread to do it. It'll have to thaw the
308 * queue
309 */
310 scsipi_channel_freeze_locked(chan, 1);
311 chan->chan_tflags |= SCSIPI_CHANT_GROWRES;
312 cv_broadcast(chan_cv_complete(chan));
313 return 0;
314 }
315
316 return 0;
317 }
318
319 /*
320 * scsipi_put_resource:
321 *
322 * Free a single xfer `resource' to the channel.
323 *
324 * NOTE: Must be called with channel lock held
325 */
326 static void
327 scsipi_put_resource(struct scsipi_channel *chan)
328 {
329 struct scsipi_adapter *adapt = chan->chan_adapter;
330
331 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
332 chan->chan_openings++;
333 else
334 adapt->adapt_openings++;
335 }
336
337 /*
338 * scsipi_get_tag:
339 *
340 * Get a tag ID for the specified xfer.
341 *
342 * NOTE: Must be called with channel lock held
343 */
344 static void
345 scsipi_get_tag(struct scsipi_xfer *xs)
346 {
347 struct scsipi_periph *periph = xs->xs_periph;
348 int bit, tag;
349 u_int word;
350
351 KASSERT(mutex_owned(chan_mtx(periph->periph_channel)));
352
353 bit = 0; /* XXX gcc */
354 for (word = 0; word < PERIPH_NTAGWORDS; word++) {
355 bit = ffs(periph->periph_freetags[word]);
356 if (bit != 0)
357 break;
358 }
359 #ifdef DIAGNOSTIC
360 if (word == PERIPH_NTAGWORDS) {
361 scsipi_printaddr(periph);
362 printf("no free tags\n");
363 panic("scsipi_get_tag");
364 }
365 #endif
366
367 bit -= 1;
368 periph->periph_freetags[word] &= ~(1 << bit);
369 tag = (word << 5) | bit;
370
371 /* XXX Should eventually disallow this completely. */
372 if (tag >= periph->periph_openings) {
373 scsipi_printaddr(periph);
374 printf("WARNING: tag %d greater than available openings %d\n",
375 tag, periph->periph_openings);
376 }
377
378 xs->xs_tag_id = tag;
379 }
380
381 /*
382 * scsipi_put_tag:
383 *
384 * Put the tag ID for the specified xfer back into the pool.
385 *
386 * NOTE: Must be called with channel lock held
387 */
388 static void
389 scsipi_put_tag(struct scsipi_xfer *xs)
390 {
391 struct scsipi_periph *periph = xs->xs_periph;
392 int word, bit;
393
394 KASSERT(mutex_owned(chan_mtx(periph->periph_channel)));
395
396 word = xs->xs_tag_id >> 5;
397 bit = xs->xs_tag_id & 0x1f;
398
399 periph->periph_freetags[word] |= (1 << bit);
400 }
401
402 /*
403 * scsipi_get_xs:
404 *
405 * Allocate an xfer descriptor and associate it with the
406 * specified peripheral. If the peripheral has no more
407 * available command openings, we either block waiting for
408 * one to become available, or fail.
409 *
410 * When this routine is called with the channel lock held
411 * the flags must include XS_CTL_NOSLEEP.
412 */
413 struct scsipi_xfer *
414 scsipi_get_xs(struct scsipi_periph *periph, int flags)
415 {
416 struct scsipi_xfer *xs;
417 bool lock = (flags & XS_CTL_NOSLEEP) == 0;
418
419 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n"));
420
421 KASSERT(!cold);
422
423 #ifdef DIAGNOSTIC
424 /*
425 * URGENT commands can never be ASYNC.
426 */
427 if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
428 (XS_CTL_URGENT|XS_CTL_ASYNC)) {
429 scsipi_printaddr(periph);
430 printf("URGENT and ASYNC\n");
431 panic("scsipi_get_xs");
432 }
433 #endif
434
435 /*
436 * Wait for a command opening to become available. Rules:
437 *
438 * - All xfers must wait for an available opening.
439 * Exception: URGENT xfers can proceed when
440 * active == openings, because we use the opening
441 * of the command we're recovering for.
442 * - if the periph has sense pending, only URGENT & REQSENSE
443 * xfers may proceed.
444 *
445 * - If the periph is recovering, only URGENT xfers may
446 * proceed.
447 *
448 * - If the periph is currently executing a recovery
449 * command, URGENT commands must block, because only
450 * one recovery command can execute at a time.
451 */
452 if (lock)
453 mutex_enter(chan_mtx(periph->periph_channel));
454 for (;;) {
455 if (flags & XS_CTL_URGENT) {
456 if (periph->periph_active > periph->periph_openings)
457 goto wait_for_opening;
458 if (periph->periph_flags & PERIPH_SENSE) {
459 if ((flags & XS_CTL_REQSENSE) == 0)
460 goto wait_for_opening;
461 } else {
462 if ((periph->periph_flags &
463 PERIPH_RECOVERY_ACTIVE) != 0)
464 goto wait_for_opening;
465 periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
466 }
467 break;
468 }
469 if (periph->periph_active >= periph->periph_openings ||
470 (periph->periph_flags & PERIPH_RECOVERING) != 0)
471 goto wait_for_opening;
472 periph->periph_active++;
473 KASSERT(mutex_owned(chan_mtx(periph->periph_channel)));
474 break;
475
476 wait_for_opening:
477 if (flags & XS_CTL_NOSLEEP) {
478 KASSERT(!lock);
479 return NULL;
480 }
481 KASSERT(lock);
482 SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n"));
483 periph->periph_flags |= PERIPH_WAITING;
484 cv_wait(periph_cv_periph(periph),
485 chan_mtx(periph->periph_channel));
486 }
487 if (lock)
488 mutex_exit(chan_mtx(periph->periph_channel));
489
490 SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n"));
491 xs = pool_get(&scsipi_xfer_pool,
492 ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
493 if (xs == NULL) {
494 if (lock)
495 mutex_enter(chan_mtx(periph->periph_channel));
496 if (flags & XS_CTL_URGENT) {
497 if ((flags & XS_CTL_REQSENSE) == 0)
498 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
499 } else
500 periph->periph_active--;
501 if (lock)
502 mutex_exit(chan_mtx(periph->periph_channel));
503 scsipi_printaddr(periph);
504 printf("unable to allocate %sscsipi_xfer\n",
505 (flags & XS_CTL_URGENT) ? "URGENT " : "");
506 }
507
508 SC_DEBUG(periph, SCSIPI_DB3, ("returning\n"));
509
510 if (xs != NULL) {
511 memset(xs, 0, sizeof(*xs));
512 callout_init(&xs->xs_callout, 0);
513 xs->xs_periph = periph;
514 xs->xs_control = flags;
515 xs->xs_status = 0;
516 if ((flags & XS_CTL_NOSLEEP) == 0)
517 mutex_enter(chan_mtx(periph->periph_channel));
518 TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
519 KASSERT(mutex_owned(chan_mtx(periph->periph_channel)));
520 if ((flags & XS_CTL_NOSLEEP) == 0)
521 mutex_exit(chan_mtx(periph->periph_channel));
522 }
523 return xs;
524 }
525
526 /*
527 * scsipi_put_xs:
528 *
529 * Release an xfer descriptor, decreasing the outstanding command
530 * count for the peripheral. If there is a thread waiting for
531 * an opening, wake it up. If not, kick any queued I/O the
532 * peripheral may have.
533 *
534 * NOTE: Must be called with channel lock held
535 */
536 void
537 scsipi_put_xs(struct scsipi_xfer *xs)
538 {
539 struct scsipi_periph *periph = xs->xs_periph;
540 int flags = xs->xs_control;
541
542 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n"));
543 KASSERT(mutex_owned(chan_mtx(periph->periph_channel)));
544
545 TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
546 callout_destroy(&xs->xs_callout);
547 pool_put(&scsipi_xfer_pool, xs);
548
549 #ifdef DIAGNOSTIC
550 if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
551 periph->periph_active == 0) {
552 scsipi_printaddr(periph);
553 printf("recovery without a command to recovery for\n");
554 panic("scsipi_put_xs");
555 }
556 #endif
557
558 if (flags & XS_CTL_URGENT) {
559 if ((flags & XS_CTL_REQSENSE) == 0)
560 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
561 } else
562 periph->periph_active--;
563 if (periph->periph_active == 0 &&
564 (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
565 periph->periph_flags &= ~PERIPH_WAITDRAIN;
566 cv_broadcast(periph_cv_active(periph));
567 }
568
569 if (periph->periph_flags & PERIPH_WAITING) {
570 periph->periph_flags &= ~PERIPH_WAITING;
571 cv_broadcast(periph_cv_periph(periph));
572 } else {
573 if (periph->periph_switch->psw_start != NULL &&
574 device_is_active(periph->periph_dev)) {
575 SC_DEBUG(periph, SCSIPI_DB2,
576 ("calling private start()\n"));
577 (*periph->periph_switch->psw_start)(periph);
578 }
579 }
580 }
581
582 /*
583 * scsipi_channel_freeze:
584 *
585 * Freeze a channel's xfer queue.
586 */
587 void
588 scsipi_channel_freeze(struct scsipi_channel *chan, int count)
589 {
590 bool lock = chan_running(chan) > 0;
591
592 if (lock)
593 mutex_enter(chan_mtx(chan));
594 chan->chan_qfreeze += count;
595 if (lock)
596 mutex_exit(chan_mtx(chan));
597 }
598
599 static void
600 scsipi_channel_freeze_locked(struct scsipi_channel *chan, int count)
601 {
602
603 chan->chan_qfreeze += count;
604 }
605
606 /*
607 * scsipi_channel_thaw:
608 *
609 * Thaw a channel's xfer queue.
610 */
611 void
612 scsipi_channel_thaw(struct scsipi_channel *chan, int count)
613 {
614 bool lock = chan_running(chan) > 0;
615
616 if (lock)
617 mutex_enter(chan_mtx(chan));
618 chan->chan_qfreeze -= count;
619 /*
620 * Don't let the freeze count go negative.
621 *
622 * Presumably the adapter driver could keep track of this,
623 * but it might just be easier to do this here so as to allow
624 * multiple callers, including those outside the adapter driver.
625 */
626 if (chan->chan_qfreeze < 0) {
627 chan->chan_qfreeze = 0;
628 }
629 if (lock)
630 mutex_exit(chan_mtx(chan));
631
632 /*
633 * until the channel is running
634 */
635 if (!lock)
636 return;
637
638 /*
639 * Kick the channel's queue here. Note, we may be running in
640 * interrupt context (softclock or HBA's interrupt), so the adapter
641 * driver had better not sleep.
642 */
643 if (chan->chan_qfreeze == 0)
644 scsipi_run_queue(chan);
645 }
646
647 /*
648 * scsipi_channel_timed_thaw:
649 *
650 * Thaw a channel after some time has expired. This will also
651 * run the channel's queue if the freeze count has reached 0.
652 */
653 void
654 scsipi_channel_timed_thaw(void *arg)
655 {
656 struct scsipi_channel *chan = arg;
657
658 scsipi_channel_thaw(chan, 1);
659 }
660
661 /*
662 * scsipi_periph_freeze:
663 *
664 * Freeze a device's xfer queue.
665 */
666 void
667 scsipi_periph_freeze_locked(struct scsipi_periph *periph, int count)
668 {
669
670 periph->periph_qfreeze += count;
671 }
672
673 /*
674 * scsipi_periph_thaw:
675 *
676 * Thaw a device's xfer queue.
677 */
678 void
679 scsipi_periph_thaw_locked(struct scsipi_periph *periph, int count)
680 {
681
682 periph->periph_qfreeze -= count;
683 #ifdef DIAGNOSTIC
684 if (periph->periph_qfreeze < 0) {
685 static const char pc[] = "periph freeze count < 0";
686 scsipi_printaddr(periph);
687 printf("%s\n", pc);
688 panic(pc);
689 }
690 #endif
691 if (periph->periph_qfreeze == 0 &&
692 (periph->periph_flags & PERIPH_WAITING) != 0)
693 cv_broadcast(periph_cv_periph(periph));
694 }
695
696 void
697 scsipi_periph_freeze(struct scsipi_periph *periph, int count)
698 {
699
700 mutex_enter(chan_mtx(periph->periph_channel));
701 scsipi_periph_freeze_locked(periph, count);
702 mutex_exit(chan_mtx(periph->periph_channel));
703 }
704
705 void
706 scsipi_periph_thaw(struct scsipi_periph *periph, int count)
707 {
708
709 mutex_enter(chan_mtx(periph->periph_channel));
710 scsipi_periph_thaw_locked(periph, count);
711 mutex_exit(chan_mtx(periph->periph_channel));
712 }
713
714 /*
715 * scsipi_periph_timed_thaw:
716 *
717 * Thaw a device after some time has expired.
718 */
719 void
720 scsipi_periph_timed_thaw(void *arg)
721 {
722 struct scsipi_periph *periph = arg;
723 struct scsipi_channel *chan = periph->periph_channel;
724
725 callout_stop(&periph->periph_callout);
726
727 mutex_enter(chan_mtx(chan));
728 scsipi_periph_thaw_locked(periph, 1);
729 if ((periph->periph_channel->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
730 /*
731 * Kick the channel's queue here. Note, we're running in
732 * interrupt context (softclock), so the adapter driver
733 * had better not sleep.
734 */
735 mutex_exit(chan_mtx(chan));
736 scsipi_run_queue(periph->periph_channel);
737 } else {
738 /*
739 * Tell the completion thread to kick the channel's queue here.
740 */
741 periph->periph_channel->chan_tflags |= SCSIPI_CHANT_KICK;
742 cv_broadcast(chan_cv_complete(chan));
743 mutex_exit(chan_mtx(chan));
744 }
745 }
746
747 /*
748 * scsipi_wait_drain:
749 *
750 * Wait for a periph's pending xfers to drain.
751 */
752 void
753 scsipi_wait_drain(struct scsipi_periph *periph)
754 {
755 struct scsipi_channel *chan = periph->periph_channel;
756
757 mutex_enter(chan_mtx(chan));
758 while (periph->periph_active != 0) {
759 periph->periph_flags |= PERIPH_WAITDRAIN;
760 cv_wait(periph_cv_active(periph), chan_mtx(chan));
761 }
762 mutex_exit(chan_mtx(chan));
763 }
764
765 /*
766 * scsipi_kill_pending:
767 *
768 * Kill off all pending xfers for a periph.
769 *
770 * NOTE: Must be called with channel lock held
771 */
772 void
773 scsipi_kill_pending(struct scsipi_periph *periph)
774 {
775 struct scsipi_channel *chan = periph->periph_channel;
776
777 (*chan->chan_bustype->bustype_kill_pending)(periph);
778 while (periph->periph_active != 0) {
779 periph->periph_flags |= PERIPH_WAITDRAIN;
780 cv_wait(periph_cv_active(periph), chan_mtx(chan));
781 }
782 }
783
784 /*
785 * scsipi_print_cdb:
786 * prints a command descriptor block (for debug purpose, error messages,
787 * SCSIVERBOSE, ...)
788 */
789 void
790 scsipi_print_cdb(struct scsipi_generic *cmd)
791 {
792 int i, j;
793
794 printf("0x%02x", cmd->opcode);
795
796 switch (CDB_GROUPID(cmd->opcode)) {
797 case CDB_GROUPID_0:
798 j = CDB_GROUP0;
799 break;
800 case CDB_GROUPID_1:
801 j = CDB_GROUP1;
802 break;
803 case CDB_GROUPID_2:
804 j = CDB_GROUP2;
805 break;
806 case CDB_GROUPID_3:
807 j = CDB_GROUP3;
808 break;
809 case CDB_GROUPID_4:
810 j = CDB_GROUP4;
811 break;
812 case CDB_GROUPID_5:
813 j = CDB_GROUP5;
814 break;
815 case CDB_GROUPID_6:
816 j = CDB_GROUP6;
817 break;
818 case CDB_GROUPID_7:
819 j = CDB_GROUP7;
820 break;
821 default:
822 j = 0;
823 }
824 if (j == 0)
825 j = sizeof (cmd->bytes);
826 for (i = 0; i < j-1; i++) /* already done the opcode */
827 printf(" %02x", cmd->bytes[i]);
828 }
829
830 /*
831 * scsipi_interpret_sense:
832 *
833 * Look at the returned sense and act on the error, determining
834 * the unix error number to pass back. (0 = report no error)
835 *
836 * NOTE: If we return ERESTART, we are expected to haved
837 * thawed the device!
838 *
839 * THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
840 */
841 int
842 scsipi_interpret_sense(struct scsipi_xfer *xs)
843 {
844 struct scsi_sense_data *sense;
845 struct scsipi_periph *periph = xs->xs_periph;
846 u_int8_t key;
847 int error;
848 u_int32_t info;
849 static const char *error_mes[] = {
850 "soft error (corrected)",
851 "not ready", "medium error",
852 "non-media hardware failure", "illegal request",
853 "unit attention", "readonly device",
854 "no data found", "vendor unique",
855 "copy aborted", "command aborted",
856 "search returned equal", "volume overflow",
857 "verify miscompare", "unknown error key"
858 };
859
860 sense = &xs->sense.scsi_sense;
861 #ifdef SCSIPI_DEBUG
862 if (periph->periph_flags & SCSIPI_DB1) {
863 int count;
864 scsipi_printaddr(periph);
865 printf(" sense debug information:\n");
866 printf("\tcode 0x%x valid %d\n",
867 SSD_RCODE(sense->response_code),
868 sense->response_code & SSD_RCODE_VALID ? 1 : 0);
869 printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
870 sense->segment,
871 SSD_SENSE_KEY(sense->flags),
872 sense->flags & SSD_ILI ? 1 : 0,
873 sense->flags & SSD_EOM ? 1 : 0,
874 sense->flags & SSD_FILEMARK ? 1 : 0);
875 printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d "
876 "extra bytes\n",
877 sense->info[0],
878 sense->info[1],
879 sense->info[2],
880 sense->info[3],
881 sense->extra_len);
882 printf("\textra: ");
883 for (count = 0; count < SSD_ADD_BYTES_LIM(sense); count++)
884 printf("0x%x ", sense->csi[count]);
885 printf("\n");
886 }
887 #endif
888
889 /*
890 * If the periph has its own error handler, call it first.
891 * If it returns a legit error value, return that, otherwise
892 * it wants us to continue with normal error processing.
893 */
894 if (periph->periph_switch->psw_error != NULL) {
895 SC_DEBUG(periph, SCSIPI_DB2,
896 ("calling private err_handler()\n"));
897 error = (*periph->periph_switch->psw_error)(xs);
898 if (error != EJUSTRETURN)
899 return error;
900 }
901 /* otherwise use the default */
902 switch (SSD_RCODE(sense->response_code)) {
903
904 /*
905 * Old SCSI-1 and SASI devices respond with
906 * codes other than 70.
907 */
908 case 0x00: /* no error (command completed OK) */
909 return 0;
910 case 0x04: /* drive not ready after it was selected */
911 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
912 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
913 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
914 return 0;
915 /* XXX - display some sort of error here? */
916 return EIO;
917 case 0x20: /* invalid command */
918 if ((xs->xs_control &
919 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
920 return 0;
921 return EINVAL;
922 case 0x25: /* invalid LUN (Adaptec ACB-4000) */
923 return EACCES;
924
925 /*
926 * If it's code 70, use the extended stuff and
927 * interpret the key
928 */
929 case 0x71: /* delayed error */
930 scsipi_printaddr(periph);
931 key = SSD_SENSE_KEY(sense->flags);
932 printf(" DEFERRED ERROR, key = 0x%x\n", key);
933 /* FALLTHROUGH */
934 case 0x70:
935 if ((sense->response_code & SSD_RCODE_VALID) != 0)
936 info = _4btol(sense->info);
937 else
938 info = 0;
939 key = SSD_SENSE_KEY(sense->flags);
940
941 switch (key) {
942 case SKEY_NO_SENSE:
943 case SKEY_RECOVERED_ERROR:
944 if (xs->resid == xs->datalen && xs->datalen) {
945 /*
946 * Why is this here?
947 */
948 xs->resid = 0; /* not short read */
949 }
950 case SKEY_EQUAL:
951 error = 0;
952 break;
953 case SKEY_NOT_READY:
954 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
955 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
956 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
957 return 0;
958 if (sense->asc == 0x3A) {
959 error = ENODEV; /* Medium not present */
960 if (xs->xs_control & XS_CTL_SILENT_NODEV)
961 return error;
962 } else
963 error = EIO;
964 if ((xs->xs_control & XS_CTL_SILENT) != 0)
965 return error;
966 break;
967 case SKEY_ILLEGAL_REQUEST:
968 if ((xs->xs_control &
969 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
970 return 0;
971 /*
972 * Handle the case where a device reports
973 * Logical Unit Not Supported during discovery.
974 */
975 if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
976 sense->asc == 0x25 &&
977 sense->ascq == 0x00)
978 return EINVAL;
979 if ((xs->xs_control & XS_CTL_SILENT) != 0)
980 return EIO;
981 error = EINVAL;
982 break;
983 case SKEY_UNIT_ATTENTION:
984 if (sense->asc == 0x29 &&
985 sense->ascq == 0x00) {
986 /* device or bus reset */
987 return ERESTART;
988 }
989 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
990 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
991 if ((xs->xs_control &
992 XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
993 /* XXX Should reupload any transient state. */
994 (periph->periph_flags &
995 PERIPH_REMOVABLE) == 0) {
996 return ERESTART;
997 }
998 if ((xs->xs_control & XS_CTL_SILENT) != 0)
999 return EIO;
1000 error = EIO;
1001 break;
1002 case SKEY_DATA_PROTECT:
1003 error = EROFS;
1004 break;
1005 case SKEY_BLANK_CHECK:
1006 error = 0;
1007 break;
1008 case SKEY_ABORTED_COMMAND:
1009 if (xs->xs_retries != 0) {
1010 xs->xs_retries--;
1011 error = ERESTART;
1012 } else
1013 error = EIO;
1014 break;
1015 case SKEY_VOLUME_OVERFLOW:
1016 error = ENOSPC;
1017 break;
1018 default:
1019 error = EIO;
1020 break;
1021 }
1022
1023 /* Print verbose decode if appropriate and possible */
1024 if ((key == 0) ||
1025 ((xs->xs_control & XS_CTL_SILENT) != 0) ||
1026 (scsipi_print_sense(xs, 0) != 0))
1027 return error;
1028
1029 /* Print brief(er) sense information */
1030 scsipi_printaddr(periph);
1031 printf("%s", error_mes[key - 1]);
1032 if ((sense->response_code & SSD_RCODE_VALID) != 0) {
1033 switch (key) {
1034 case SKEY_NOT_READY:
1035 case SKEY_ILLEGAL_REQUEST:
1036 case SKEY_UNIT_ATTENTION:
1037 case SKEY_DATA_PROTECT:
1038 break;
1039 case SKEY_BLANK_CHECK:
1040 printf(", requested size: %d (decimal)",
1041 info);
1042 break;
1043 case SKEY_ABORTED_COMMAND:
1044 if (xs->xs_retries)
1045 printf(", retrying");
1046 printf(", cmd 0x%x, info 0x%x",
1047 xs->cmd->opcode, info);
1048 break;
1049 default:
1050 printf(", info = %d (decimal)", info);
1051 }
1052 }
1053 if (sense->extra_len != 0) {
1054 int n;
1055 printf(", data =");
1056 for (n = 0; n < sense->extra_len; n++)
1057 printf(" %02x",
1058 sense->csi[n]);
1059 }
1060 printf("\n");
1061 return error;
1062
1063 /*
1064 * Some other code, just report it
1065 */
1066 default:
1067 #if defined(SCSIDEBUG) || defined(DEBUG)
1068 {
1069 static const char *uc = "undecodable sense error";
1070 int i;
1071 u_int8_t *cptr = (u_int8_t *) sense;
1072 scsipi_printaddr(periph);
1073 if (xs->cmd == &xs->cmdstore) {
1074 printf("%s for opcode 0x%x, data=",
1075 uc, xs->cmdstore.opcode);
1076 } else {
1077 printf("%s, data=", uc);
1078 }
1079 for (i = 0; i < sizeof (sense); i++)
1080 printf(" 0x%02x", *(cptr++) & 0xff);
1081 printf("\n");
1082 }
1083 #else
1084 scsipi_printaddr(periph);
1085 printf("Sense Error Code 0x%x",
1086 SSD_RCODE(sense->response_code));
1087 if ((sense->response_code & SSD_RCODE_VALID) != 0) {
1088 struct scsi_sense_data_unextended *usense =
1089 (struct scsi_sense_data_unextended *)sense;
1090 printf(" at block no. %d (decimal)",
1091 _3btol(usense->block));
1092 }
1093 printf("\n");
1094 #endif
1095 return EIO;
1096 }
1097 }
1098
1099 /*
1100 * scsipi_test_unit_ready:
1101 *
1102 * Issue a `test unit ready' request.
1103 */
1104 int
1105 scsipi_test_unit_ready(struct scsipi_periph *periph, int flags)
1106 {
1107 struct scsi_test_unit_ready cmd;
1108 int retries;
1109
1110 /* some ATAPI drives don't support TEST UNIT READY. Sigh */
1111 if (periph->periph_quirks & PQUIRK_NOTUR)
1112 return 0;
1113
1114 if (flags & XS_CTL_DISCOVERY)
1115 retries = 0;
1116 else
1117 retries = SCSIPIRETRIES;
1118
1119 memset(&cmd, 0, sizeof(cmd));
1120 cmd.opcode = SCSI_TEST_UNIT_READY;
1121
1122 return scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
1123 retries, 10000, NULL, flags);
1124 }
1125
1126 static const struct scsipi_inquiry3_pattern {
1127 const char vendor[8];
1128 const char product[16];
1129 const char revision[4];
1130 } scsipi_inquiry3_quirk[] = {
1131 { "ES-6600 ", "", "" },
1132 };
1133
1134 static int
1135 scsipi_inquiry3_ok(const struct scsipi_inquiry_data *ib)
1136 {
1137 for (size_t i = 0; i < __arraycount(scsipi_inquiry3_quirk); i++) {
1138 const struct scsipi_inquiry3_pattern *q =
1139 &scsipi_inquiry3_quirk[i];
1140 #define MATCH(field) \
1141 (q->field[0] ? memcmp(ib->field, q->field, sizeof(ib->field)) == 0 : 1)
1142 if (MATCH(vendor) && MATCH(product) && MATCH(revision))
1143 return 0;
1144 }
1145 return 1;
1146 }
1147
1148 /*
1149 * scsipi_inquire:
1150 *
1151 * Ask the device about itself.
1152 */
1153 int
1154 scsipi_inquire(struct scsipi_periph *periph, struct scsipi_inquiry_data *inqbuf,
1155 int flags)
1156 {
1157 struct scsipi_inquiry cmd;
1158 int error;
1159 int retries;
1160
1161 if (flags & XS_CTL_DISCOVERY)
1162 retries = 0;
1163 else
1164 retries = SCSIPIRETRIES;
1165
1166 /*
1167 * If we request more data than the device can provide, it SHOULD just
1168 * return a short response. However, some devices error with an
1169 * ILLEGAL REQUEST sense code, and yet others have even more special
1170 * failture modes (such as the GL641USB flash adapter, which goes loony
1171 * and sends corrupted CRCs). To work around this, and to bring our
1172 * behavior more in line with other OSes, we do a shorter inquiry,
1173 * covering all the SCSI-2 information, first, and then request more
1174 * data iff the "additional length" field indicates there is more.
1175 * - mycroft, 2003/10/16
1176 */
1177 memset(&cmd, 0, sizeof(cmd));
1178 cmd.opcode = INQUIRY;
1179 cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI2;
1180 error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1181 (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI2, retries,
1182 10000, NULL, flags | XS_CTL_DATA_IN);
1183 if (!error &&
1184 inqbuf->additional_length > SCSIPI_INQUIRY_LENGTH_SCSI2 - 4) {
1185 if (scsipi_inquiry3_ok(inqbuf)) {
1186 #if 0
1187 printf("inquire: addlen=%d, retrying\n", inqbuf->additional_length);
1188 #endif
1189 cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI3;
1190 error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1191 (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI3, retries,
1192 10000, NULL, flags | XS_CTL_DATA_IN);
1193 #if 0
1194 printf("inquire: error=%d\n", error);
1195 #endif
1196 }
1197 }
1198
1199 #ifdef SCSI_OLD_NOINQUIRY
1200 /*
1201 * Kludge for the Adaptec ACB-4000 SCSI->MFM translator.
1202 * This board doesn't support the INQUIRY command at all.
1203 */
1204 if (error == EINVAL || error == EACCES) {
1205 /*
1206 * Conjure up an INQUIRY response.
1207 */
1208 inqbuf->device = (error == EINVAL ?
1209 SID_QUAL_LU_PRESENT :
1210 SID_QUAL_LU_NOTPRESENT) | T_DIRECT;
1211 inqbuf->dev_qual2 = 0;
1212 inqbuf->version = 0;
1213 inqbuf->response_format = SID_FORMAT_SCSI1;
1214 inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
1215 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1216 memcpy(inqbuf->vendor, "ADAPTEC ACB-4000 ", 28);
1217 error = 0;
1218 }
1219
1220 /*
1221 * Kludge for the Emulex MT-02 SCSI->QIC translator.
1222 * This board gives an empty response to an INQUIRY command.
1223 */
1224 else if (error == 0 &&
1225 inqbuf->device == (SID_QUAL_LU_PRESENT | T_DIRECT) &&
1226 inqbuf->dev_qual2 == 0 &&
1227 inqbuf->version == 0 &&
1228 inqbuf->response_format == SID_FORMAT_SCSI1) {
1229 /*
1230 * Fill out the INQUIRY response.
1231 */
1232 inqbuf->device = (SID_QUAL_LU_PRESENT | T_SEQUENTIAL);
1233 inqbuf->dev_qual2 = SID_REMOVABLE;
1234 inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
1235 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1236 memcpy(inqbuf->vendor, "EMULEX MT-02 QIC ", 28);
1237 }
1238 #endif /* SCSI_OLD_NOINQUIRY */
1239
1240 return error;
1241 }
1242
1243 /*
1244 * scsipi_prevent:
1245 *
1246 * Prevent or allow the user to remove the media
1247 */
1248 int
1249 scsipi_prevent(struct scsipi_periph *periph, int type, int flags)
1250 {
1251 struct scsi_prevent_allow_medium_removal cmd;
1252
1253 if (periph->periph_quirks & PQUIRK_NODOORLOCK)
1254 return 0;
1255
1256 memset(&cmd, 0, sizeof(cmd));
1257 cmd.opcode = SCSI_PREVENT_ALLOW_MEDIUM_REMOVAL;
1258 cmd.how = type;
1259
1260 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
1261 SCSIPIRETRIES, 5000, NULL, flags));
1262 }
1263
1264 /*
1265 * scsipi_start:
1266 *
1267 * Send a START UNIT.
1268 */
1269 int
1270 scsipi_start(struct scsipi_periph *periph, int type, int flags)
1271 {
1272 struct scsipi_start_stop cmd;
1273
1274 memset(&cmd, 0, sizeof(cmd));
1275 cmd.opcode = START_STOP;
1276 cmd.byte2 = 0x00;
1277 cmd.how = type;
1278
1279 return scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
1280 SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000, NULL, flags);
1281 }
1282
1283 /*
1284 * scsipi_mode_sense, scsipi_mode_sense_big:
1285 * get a sense page from a device
1286 */
1287
1288 int
1289 scsipi_mode_sense(struct scsipi_periph *periph, int byte2, int page,
1290 struct scsi_mode_parameter_header_6 *data, int len, int flags, int retries,
1291 int timeout)
1292 {
1293 struct scsi_mode_sense_6 cmd;
1294
1295 memset(&cmd, 0, sizeof(cmd));
1296 cmd.opcode = SCSI_MODE_SENSE_6;
1297 cmd.byte2 = byte2;
1298 cmd.page = page;
1299 cmd.length = len & 0xff;
1300
1301 return scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1302 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN);
1303 }
1304
1305 int
1306 scsipi_mode_sense_big(struct scsipi_periph *periph, int byte2, int page,
1307 struct scsi_mode_parameter_header_10 *data, int len, int flags, int retries,
1308 int timeout)
1309 {
1310 struct scsi_mode_sense_10 cmd;
1311
1312 memset(&cmd, 0, sizeof(cmd));
1313 cmd.opcode = SCSI_MODE_SENSE_10;
1314 cmd.byte2 = byte2;
1315 cmd.page = page;
1316 _lto2b(len, cmd.length);
1317
1318 return scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1319 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN);
1320 }
1321
1322 int
1323 scsipi_mode_select(struct scsipi_periph *periph, int byte2,
1324 struct scsi_mode_parameter_header_6 *data, int len, int flags, int retries,
1325 int timeout)
1326 {
1327 struct scsi_mode_select_6 cmd;
1328
1329 memset(&cmd, 0, sizeof(cmd));
1330 cmd.opcode = SCSI_MODE_SELECT_6;
1331 cmd.byte2 = byte2;
1332 cmd.length = len & 0xff;
1333
1334 return scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1335 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT);
1336 }
1337
1338 int
1339 scsipi_mode_select_big(struct scsipi_periph *periph, int byte2,
1340 struct scsi_mode_parameter_header_10 *data, int len, int flags, int retries,
1341 int timeout)
1342 {
1343 struct scsi_mode_select_10 cmd;
1344
1345 memset(&cmd, 0, sizeof(cmd));
1346 cmd.opcode = SCSI_MODE_SELECT_10;
1347 cmd.byte2 = byte2;
1348 _lto2b(len, cmd.length);
1349
1350 return scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1351 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT);
1352 }
1353
1354 /*
1355 * scsipi_done:
1356 *
1357 * This routine is called by an adapter's interrupt handler when
1358 * an xfer is completed.
1359 */
1360 void
1361 scsipi_done(struct scsipi_xfer *xs)
1362 {
1363 struct scsipi_periph *periph = xs->xs_periph;
1364 struct scsipi_channel *chan = periph->periph_channel;
1365 int freezecnt;
1366
1367 SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n"));
1368 #ifdef SCSIPI_DEBUG
1369 if (periph->periph_dbflags & SCSIPI_DB1)
1370 show_scsipi_cmd(xs);
1371 #endif
1372
1373 mutex_enter(chan_mtx(chan));
1374 /*
1375 * The resource this command was using is now free.
1376 */
1377 if (xs->xs_status & XS_STS_DONE) {
1378 /* XXX in certain circumstances, such as a device
1379 * being detached, a xs that has already been
1380 * scsipi_done()'d by the main thread will be done'd
1381 * again by scsibusdetach(). Putting the xs on the
1382 * chan_complete queue causes list corruption and
1383 * everyone dies. This prevents that, but perhaps
1384 * there should be better coordination somewhere such
1385 * that this won't ever happen (and can be turned into
1386 * a KASSERT().
1387 */
1388 mutex_exit(chan_mtx(chan));
1389 goto out;
1390 }
1391 scsipi_put_resource(chan);
1392 xs->xs_periph->periph_sent--;
1393
1394 /*
1395 * If the command was tagged, free the tag.
1396 */
1397 if (XS_CTL_TAGTYPE(xs) != 0)
1398 scsipi_put_tag(xs);
1399 else
1400 periph->periph_flags &= ~PERIPH_UNTAG;
1401
1402 /* Mark the command as `done'. */
1403 xs->xs_status |= XS_STS_DONE;
1404
1405 #ifdef DIAGNOSTIC
1406 if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
1407 (XS_CTL_ASYNC|XS_CTL_POLL))
1408 panic("scsipi_done: ASYNC and POLL");
1409 #endif
1410
1411 /*
1412 * If the xfer had an error of any sort, freeze the
1413 * periph's queue. Freeze it again if we were requested
1414 * to do so in the xfer.
1415 */
1416 freezecnt = 0;
1417 if (xs->error != XS_NOERROR)
1418 freezecnt++;
1419 if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
1420 freezecnt++;
1421 if (freezecnt != 0)
1422 scsipi_periph_freeze_locked(periph, freezecnt);
1423
1424 /*
1425 * record the xfer with a pending sense, in case a SCSI reset is
1426 * received before the thread is waked up.
1427 */
1428 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1429 periph->periph_flags |= PERIPH_SENSE;
1430 periph->periph_xscheck = xs;
1431 }
1432
1433 /*
1434 * If this was an xfer that was not to complete asynchronously,
1435 * let the requesting thread perform error checking/handling
1436 * in its context.
1437 */
1438 if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
1439 /*
1440 * If it's a polling job, just return, to unwind the
1441 * call graph. We don't need to restart the queue,
1442 * because pollings jobs are treated specially, and
1443 * are really only used during crash dumps anyway
1444 * (XXX or during boot-time autconfiguration of
1445 * ATAPI devices).
1446 */
1447 if (xs->xs_control & XS_CTL_POLL) {
1448 mutex_exit(chan_mtx(chan));
1449 return;
1450 }
1451 cv_broadcast(xs_cv(xs));
1452 mutex_exit(chan_mtx(chan));
1453 goto out;
1454 }
1455
1456 /*
1457 * Catch the extremely common case of I/O completing
1458 * without error; no use in taking a context switch
1459 * if we can handle it in interrupt context.
1460 */
1461 if (xs->error == XS_NOERROR) {
1462 mutex_exit(chan_mtx(chan));
1463 (void) scsipi_complete(xs);
1464 goto out;
1465 }
1466
1467 /*
1468 * There is an error on this xfer. Put it on the channel's
1469 * completion queue, and wake up the completion thread.
1470 */
1471 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
1472 cv_broadcast(chan_cv_complete(chan));
1473 mutex_exit(chan_mtx(chan));
1474
1475 out:
1476 /*
1477 * If there are more xfers on the channel's queue, attempt to
1478 * run them.
1479 */
1480 scsipi_run_queue(chan);
1481 }
1482
1483 /*
1484 * scsipi_complete:
1485 *
1486 * Completion of a scsipi_xfer. This is the guts of scsipi_done().
1487 *
1488 * NOTE: This routine MUST be called with valid thread context
1489 * except for the case where the following two conditions are
1490 * true:
1491 *
1492 * xs->error == XS_NOERROR
1493 * XS_CTL_ASYNC is set in xs->xs_control
1494 *
1495 * The semantics of this routine can be tricky, so here is an
1496 * explanation:
1497 *
1498 * 0 Xfer completed successfully.
1499 *
1500 * ERESTART Xfer had an error, but was restarted.
1501 *
1502 * anything else Xfer had an error, return value is Unix
1503 * errno.
1504 *
1505 * If the return value is anything but ERESTART:
1506 *
1507 * - If XS_CTL_ASYNC is set, `xs' has been freed back to
1508 * the pool.
1509 * - If there is a buf associated with the xfer,
1510 * it has been biodone()'d.
1511 */
1512 static int
1513 scsipi_complete(struct scsipi_xfer *xs)
1514 {
1515 struct scsipi_periph *periph = xs->xs_periph;
1516 struct scsipi_channel *chan = periph->periph_channel;
1517 int error;
1518
1519 #ifdef DIAGNOSTIC
1520 if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
1521 panic("scsipi_complete: XS_CTL_ASYNC but no buf");
1522 #endif
1523 /*
1524 * If command terminated with a CHECK CONDITION, we need to issue a
1525 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed
1526 * we'll have the real status.
1527 * Must be processed with channel lock held to avoid missing
1528 * a SCSI bus reset for this command.
1529 */
1530 mutex_enter(chan_mtx(chan));
1531 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1532 /* request sense for a request sense ? */
1533 if (xs->xs_control & XS_CTL_REQSENSE) {
1534 scsipi_printaddr(periph);
1535 printf("request sense for a request sense ?\n");
1536 /* XXX maybe we should reset the device ? */
1537 /* we've been frozen because xs->error != XS_NOERROR */
1538 scsipi_periph_thaw_locked(periph, 1);
1539 mutex_exit(chan_mtx(chan));
1540 if (xs->resid < xs->datalen) {
1541 printf("we read %d bytes of sense anyway:\n",
1542 xs->datalen - xs->resid);
1543 scsipi_print_sense_data((void *)xs->data, 0);
1544 }
1545 return EINVAL;
1546 }
1547 mutex_exit(chan_mtx(chan)); // XXX allows other commands to queue or run
1548 scsipi_request_sense(xs);
1549 } else
1550 mutex_exit(chan_mtx(chan));
1551
1552 /*
1553 * If it's a user level request, bypass all usual completion
1554 * processing, let the user work it out..
1555 */
1556 if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
1557 SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n"));
1558 mutex_enter(chan_mtx(chan));
1559 if (xs->error != XS_NOERROR)
1560 scsipi_periph_thaw_locked(periph, 1);
1561 mutex_exit(chan_mtx(chan));
1562 scsipi_user_done(xs);
1563 SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n "));
1564 return 0;
1565 }
1566
1567 switch (xs->error) {
1568 case XS_NOERROR:
1569 error = 0;
1570 break;
1571
1572 case XS_SENSE:
1573 case XS_SHORTSENSE:
1574 error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
1575 break;
1576
1577 case XS_RESOURCE_SHORTAGE:
1578 /*
1579 * XXX Should freeze channel's queue.
1580 */
1581 scsipi_printaddr(periph);
1582 printf("adapter resource shortage\n");
1583 /* FALLTHROUGH */
1584
1585 case XS_BUSY:
1586 if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
1587 struct scsipi_max_openings mo;
1588
1589 /*
1590 * We set the openings to active - 1, assuming that
1591 * the command that got us here is the first one that
1592 * can't fit into the device's queue. If that's not
1593 * the case, I guess we'll find out soon enough.
1594 */
1595 mo.mo_target = periph->periph_target;
1596 mo.mo_lun = periph->periph_lun;
1597 if (periph->periph_active < periph->periph_openings)
1598 mo.mo_openings = periph->periph_active - 1;
1599 else
1600 mo.mo_openings = periph->periph_openings - 1;
1601 #ifdef DIAGNOSTIC
1602 if (mo.mo_openings < 0) {
1603 scsipi_printaddr(periph);
1604 printf("QUEUE FULL resulted in < 0 openings\n");
1605 panic("scsipi_done");
1606 }
1607 #endif
1608 if (mo.mo_openings == 0) {
1609 scsipi_printaddr(periph);
1610 printf("QUEUE FULL resulted in 0 openings\n");
1611 mo.mo_openings = 1;
1612 }
1613 scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
1614 error = ERESTART;
1615 } else if (xs->xs_retries != 0) {
1616 xs->xs_retries--;
1617 /*
1618 * Wait one second, and try again.
1619 */
1620 mutex_enter(chan_mtx(chan));
1621 if ((xs->xs_control & XS_CTL_POLL) ||
1622 (chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
1623 /* XXX: quite extreme */
1624 kpause("xsbusy", false, hz, chan_mtx(chan));
1625 } else if (!callout_pending(&periph->periph_callout)) {
1626 scsipi_periph_freeze_locked(periph, 1);
1627 callout_reset(&periph->periph_callout,
1628 hz, scsipi_periph_timed_thaw, periph);
1629 }
1630 mutex_exit(chan_mtx(chan));
1631 error = ERESTART;
1632 } else
1633 error = EBUSY;
1634 break;
1635
1636 case XS_REQUEUE:
1637 error = ERESTART;
1638 break;
1639
1640 case XS_SELTIMEOUT:
1641 case XS_TIMEOUT:
1642 /*
1643 * If the device hasn't gone away, honor retry counts.
1644 *
1645 * Note that if we're in the middle of probing it,
1646 * it won't be found because it isn't here yet so
1647 * we won't honor the retry count in that case.
1648 */
1649 if (scsipi_lookup_periph(chan, periph->periph_target,
1650 periph->periph_lun) && xs->xs_retries != 0) {
1651 xs->xs_retries--;
1652 error = ERESTART;
1653 } else
1654 error = EIO;
1655 break;
1656
1657 case XS_RESET:
1658 if (xs->xs_control & XS_CTL_REQSENSE) {
1659 /*
1660 * request sense interrupted by reset: signal it
1661 * with EINTR return code.
1662 */
1663 error = EINTR;
1664 } else {
1665 if (xs->xs_retries != 0) {
1666 xs->xs_retries--;
1667 error = ERESTART;
1668 } else
1669 error = EIO;
1670 }
1671 break;
1672
1673 case XS_DRIVER_STUFFUP:
1674 scsipi_printaddr(periph);
1675 printf("generic HBA error\n");
1676 error = EIO;
1677 break;
1678 default:
1679 scsipi_printaddr(periph);
1680 printf("invalid return code from adapter: %d\n", xs->error);
1681 error = EIO;
1682 break;
1683 }
1684
1685 mutex_enter(chan_mtx(chan));
1686 if (error == ERESTART) {
1687 /*
1688 * If we get here, the periph has been thawed and frozen
1689 * again if we had to issue recovery commands. Alternatively,
1690 * it may have been frozen again and in a timed thaw. In
1691 * any case, we thaw the periph once we re-enqueue the
1692 * command. Once the periph is fully thawed, it will begin
1693 * operation again.
1694 */
1695 xs->error = XS_NOERROR;
1696 xs->status = SCSI_OK;
1697 xs->xs_status &= ~XS_STS_DONE;
1698 xs->xs_requeuecnt++;
1699 error = scsipi_enqueue(xs);
1700 if (error == 0) {
1701 scsipi_periph_thaw_locked(periph, 1);
1702 mutex_exit(chan_mtx(chan));
1703 return ERESTART;
1704 }
1705 }
1706
1707 /*
1708 * scsipi_done() freezes the queue if not XS_NOERROR.
1709 * Thaw it here.
1710 */
1711 if (xs->error != XS_NOERROR)
1712 scsipi_periph_thaw_locked(periph, 1);
1713 mutex_exit(chan_mtx(chan));
1714
1715 if (periph->periph_switch->psw_done)
1716 periph->periph_switch->psw_done(xs, error);
1717
1718 mutex_enter(chan_mtx(chan));
1719 if (xs->xs_control & XS_CTL_ASYNC)
1720 scsipi_put_xs(xs);
1721 mutex_exit(chan_mtx(chan));
1722
1723 return error;
1724 }
1725
1726 /*
1727 * Issue a request sense for the given scsipi_xfer. Called when the xfer
1728 * returns with a CHECK_CONDITION status. Must be called in valid thread
1729 * context.
1730 */
1731
1732 static void
1733 scsipi_request_sense(struct scsipi_xfer *xs)
1734 {
1735 struct scsipi_periph *periph = xs->xs_periph;
1736 int flags, error;
1737 struct scsi_request_sense cmd;
1738
1739 periph->periph_flags |= PERIPH_SENSE;
1740
1741 /* if command was polling, request sense will too */
1742 flags = xs->xs_control & XS_CTL_POLL;
1743 /* Polling commands can't sleep */
1744 if (flags)
1745 flags |= XS_CTL_NOSLEEP;
1746
1747 flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN |
1748 XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH;
1749
1750 memset(&cmd, 0, sizeof(cmd));
1751 cmd.opcode = SCSI_REQUEST_SENSE;
1752 cmd.length = sizeof(struct scsi_sense_data);
1753
1754 error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1755 (void *)&xs->sense.scsi_sense, sizeof(struct scsi_sense_data),
1756 0, 1000, NULL, flags);
1757 periph->periph_flags &= ~PERIPH_SENSE;
1758 periph->periph_xscheck = NULL;
1759 switch (error) {
1760 case 0:
1761 /* we have a valid sense */
1762 xs->error = XS_SENSE;
1763 return;
1764 case EINTR:
1765 /* REQUEST_SENSE interrupted by bus reset. */
1766 xs->error = XS_RESET;
1767 return;
1768 case EIO:
1769 /* request sense coudn't be performed */
1770 /*
1771 * XXX this isn't quite right but we don't have anything
1772 * better for now
1773 */
1774 xs->error = XS_DRIVER_STUFFUP;
1775 return;
1776 default:
1777 /* Notify that request sense failed. */
1778 xs->error = XS_DRIVER_STUFFUP;
1779 scsipi_printaddr(periph);
1780 printf("request sense failed with error %d\n", error);
1781 return;
1782 }
1783 }
1784
1785 /*
1786 * scsipi_enqueue:
1787 *
1788 * Enqueue an xfer on a channel.
1789 */
1790 static int
1791 scsipi_enqueue(struct scsipi_xfer *xs)
1792 {
1793 struct scsipi_channel *chan = xs->xs_periph->periph_channel;
1794 struct scsipi_xfer *qxs;
1795
1796 /*
1797 * If the xfer is to be polled, and there are already jobs on
1798 * the queue, we can't proceed.
1799 */
1800 KASSERT(mutex_owned(chan_mtx(chan)));
1801 if ((xs->xs_control & XS_CTL_POLL) != 0 &&
1802 TAILQ_FIRST(&chan->chan_queue) != NULL) {
1803 xs->error = XS_DRIVER_STUFFUP;
1804 return EAGAIN;
1805 }
1806
1807 /*
1808 * If we have an URGENT xfer, it's an error recovery command
1809 * and it should just go on the head of the channel's queue.
1810 */
1811 if (xs->xs_control & XS_CTL_URGENT) {
1812 TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
1813 goto out;
1814 }
1815
1816 /*
1817 * If this xfer has already been on the queue before, we
1818 * need to reinsert it in the correct order. That order is:
1819 *
1820 * Immediately before the first xfer for this periph
1821 * with a requeuecnt less than xs->xs_requeuecnt.
1822 *
1823 * Failing that, at the end of the queue. (We'll end up
1824 * there naturally.)
1825 */
1826 if (xs->xs_requeuecnt != 0) {
1827 for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
1828 qxs = TAILQ_NEXT(qxs, channel_q)) {
1829 if (qxs->xs_periph == xs->xs_periph &&
1830 qxs->xs_requeuecnt < xs->xs_requeuecnt)
1831 break;
1832 }
1833 if (qxs != NULL) {
1834 TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
1835 channel_q);
1836 goto out;
1837 }
1838 }
1839 TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
1840 out:
1841 if (xs->xs_control & XS_CTL_THAW_PERIPH)
1842 scsipi_periph_thaw_locked(xs->xs_periph, 1);
1843 return 0;
1844 }
1845
1846 /*
1847 * scsipi_run_queue:
1848 *
1849 * Start as many xfers as possible running on the channel.
1850 */
1851 static void
1852 scsipi_run_queue(struct scsipi_channel *chan)
1853 {
1854 struct scsipi_xfer *xs;
1855 struct scsipi_periph *periph;
1856
1857 for (;;) {
1858 mutex_enter(chan_mtx(chan));
1859
1860 /*
1861 * If the channel is frozen, we can't do any work right
1862 * now.
1863 */
1864 if (chan->chan_qfreeze != 0) {
1865 mutex_exit(chan_mtx(chan));
1866 return;
1867 }
1868
1869 /*
1870 * Look for work to do, and make sure we can do it.
1871 */
1872 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
1873 xs = TAILQ_NEXT(xs, channel_q)) {
1874 periph = xs->xs_periph;
1875
1876 if ((periph->periph_sent >= periph->periph_openings) ||
1877 periph->periph_qfreeze != 0 ||
1878 (periph->periph_flags & PERIPH_UNTAG) != 0)
1879 continue;
1880
1881 if ((periph->periph_flags &
1882 (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 &&
1883 (xs->xs_control & XS_CTL_URGENT) == 0)
1884 continue;
1885
1886 /*
1887 * We can issue this xfer!
1888 */
1889 goto got_one;
1890 }
1891
1892 /*
1893 * Can't find any work to do right now.
1894 */
1895 mutex_exit(chan_mtx(chan));
1896 return;
1897
1898 got_one:
1899 /*
1900 * Have an xfer to run. Allocate a resource from
1901 * the adapter to run it. If we can't allocate that
1902 * resource, we don't dequeue the xfer.
1903 */
1904 if (scsipi_get_resource(chan) == 0) {
1905 /*
1906 * Adapter is out of resources. If the adapter
1907 * supports it, attempt to grow them.
1908 */
1909 if (scsipi_grow_resources(chan) == 0) {
1910 /*
1911 * Wasn't able to grow resources,
1912 * nothing more we can do.
1913 */
1914 if (xs->xs_control & XS_CTL_POLL) {
1915 scsipi_printaddr(xs->xs_periph);
1916 printf("polling command but no "
1917 "adapter resources");
1918 /* We'll panic shortly... */
1919 }
1920 mutex_exit(chan_mtx(chan));
1921
1922 /*
1923 * XXX: We should be able to note that
1924 * XXX: that resources are needed here!
1925 */
1926 return;
1927 }
1928 /*
1929 * scsipi_grow_resources() allocated the resource
1930 * for us.
1931 */
1932 }
1933
1934 /*
1935 * We have a resource to run this xfer, do it!
1936 */
1937 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
1938
1939 /*
1940 * If the command is to be tagged, allocate a tag ID
1941 * for it.
1942 */
1943 if (XS_CTL_TAGTYPE(xs) != 0)
1944 scsipi_get_tag(xs);
1945 else
1946 periph->periph_flags |= PERIPH_UNTAG;
1947 periph->periph_sent++;
1948 mutex_exit(chan_mtx(chan));
1949
1950 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
1951 }
1952 #ifdef DIAGNOSTIC
1953 panic("scsipi_run_queue: impossible");
1954 #endif
1955 }
1956
1957 /*
1958 * scsipi_execute_xs:
1959 *
1960 * Begin execution of an xfer, waiting for it to complete, if necessary.
1961 */
1962 int
1963 scsipi_execute_xs(struct scsipi_xfer *xs)
1964 {
1965 struct scsipi_periph *periph = xs->xs_periph;
1966 struct scsipi_channel *chan = periph->periph_channel;
1967 int oasync, async, poll, error;
1968
1969 KASSERT(!cold);
1970
1971 (chan->chan_bustype->bustype_cmd)(xs);
1972
1973 xs->xs_status &= ~XS_STS_DONE;
1974 xs->error = XS_NOERROR;
1975 xs->resid = xs->datalen;
1976 xs->status = SCSI_OK;
1977
1978 #ifdef SCSIPI_DEBUG
1979 if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) {
1980 printf("scsipi_execute_xs: ");
1981 show_scsipi_xs(xs);
1982 printf("\n");
1983 }
1984 #endif
1985
1986 /*
1987 * Deal with command tagging:
1988 *
1989 * - If the device's current operating mode doesn't
1990 * include tagged queueing, clear the tag mask.
1991 *
1992 * - If the device's current operating mode *does*
1993 * include tagged queueing, set the tag_type in
1994 * the xfer to the appropriate byte for the tag
1995 * message.
1996 */
1997 if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 ||
1998 (xs->xs_control & XS_CTL_REQSENSE)) {
1999 xs->xs_control &= ~XS_CTL_TAGMASK;
2000 xs->xs_tag_type = 0;
2001 } else {
2002 /*
2003 * If the request doesn't specify a tag, give Head
2004 * tags to URGENT operations and Simple tags to
2005 * everything else.
2006 */
2007 if (XS_CTL_TAGTYPE(xs) == 0) {
2008 if (xs->xs_control & XS_CTL_URGENT)
2009 xs->xs_control |= XS_CTL_HEAD_TAG;
2010 else
2011 xs->xs_control |= XS_CTL_SIMPLE_TAG;
2012 }
2013
2014 switch (XS_CTL_TAGTYPE(xs)) {
2015 case XS_CTL_ORDERED_TAG:
2016 xs->xs_tag_type = MSG_ORDERED_Q_TAG;
2017 break;
2018
2019 case XS_CTL_SIMPLE_TAG:
2020 xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
2021 break;
2022
2023 case XS_CTL_HEAD_TAG:
2024 xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
2025 break;
2026
2027 default:
2028 scsipi_printaddr(periph);
2029 printf("invalid tag mask 0x%08x\n",
2030 XS_CTL_TAGTYPE(xs));
2031 panic("scsipi_execute_xs");
2032 }
2033 }
2034
2035 /* If the adaptor wants us to poll, poll. */
2036 if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY)
2037 xs->xs_control |= XS_CTL_POLL;
2038
2039 /*
2040 * If we don't yet have a completion thread, or we are to poll for
2041 * completion, clear the ASYNC flag.
2042 */
2043 oasync = (xs->xs_control & XS_CTL_ASYNC);
2044 if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
2045 xs->xs_control &= ~XS_CTL_ASYNC;
2046
2047 async = (xs->xs_control & XS_CTL_ASYNC);
2048 poll = (xs->xs_control & XS_CTL_POLL);
2049
2050 #ifdef DIAGNOSTIC
2051 if (oasync != 0 && xs->bp == NULL)
2052 panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
2053 #endif
2054
2055 /*
2056 * Enqueue the transfer. If we're not polling for completion, this
2057 * should ALWAYS return `no error'.
2058 */
2059 error = scsipi_enqueue(xs);
2060 if (error) {
2061 if (poll == 0) {
2062 scsipi_printaddr(periph);
2063 printf("not polling, but enqueue failed with %d\n",
2064 error);
2065 panic("scsipi_execute_xs");
2066 }
2067
2068 scsipi_printaddr(periph);
2069 printf("should have flushed queue?\n");
2070 goto free_xs;
2071 }
2072
2073 mutex_exit(chan_mtx(chan));
2074 restarted:
2075 scsipi_run_queue(chan);
2076 mutex_enter(chan_mtx(chan));
2077
2078 /*
2079 * The xfer is enqueued, and possibly running. If it's to be
2080 * completed asynchronously, just return now.
2081 */
2082 if (async)
2083 return 0;
2084
2085 /*
2086 * Not an asynchronous command; wait for it to complete.
2087 */
2088 while ((xs->xs_status & XS_STS_DONE) == 0) {
2089 if (poll) {
2090 scsipi_printaddr(periph);
2091 printf("polling command not done\n");
2092 panic("scsipi_execute_xs");
2093 }
2094 cv_wait(xs_cv(xs), chan_mtx(chan));
2095 }
2096
2097 /*
2098 * Command is complete. scsipi_done() has awakened us to perform
2099 * the error handling.
2100 */
2101 mutex_exit(chan_mtx(chan));
2102 error = scsipi_complete(xs);
2103 if (error == ERESTART)
2104 goto restarted;
2105
2106 /*
2107 * If it was meant to run async and we cleared aync ourselve,
2108 * don't return an error here. It has already been handled
2109 */
2110 if (oasync)
2111 error = 0;
2112 /*
2113 * Command completed successfully or fatal error occurred. Fall
2114 * into....
2115 */
2116 mutex_enter(chan_mtx(chan));
2117 free_xs:
2118 scsipi_put_xs(xs);
2119 mutex_exit(chan_mtx(chan));
2120
2121 /*
2122 * Kick the queue, keep it running in case it stopped for some
2123 * reason.
2124 */
2125 scsipi_run_queue(chan);
2126
2127 mutex_enter(chan_mtx(chan));
2128 return error;
2129 }
2130
2131 /*
2132 * scsipi_completion_thread:
2133 *
2134 * This is the completion thread. We wait for errors on
2135 * asynchronous xfers, and perform the error handling
2136 * function, restarting the command, if necessary.
2137 */
2138 static void
2139 scsipi_completion_thread(void *arg)
2140 {
2141 struct scsipi_channel *chan = arg;
2142 struct scsipi_xfer *xs;
2143
2144 if (chan->chan_init_cb)
2145 (*chan->chan_init_cb)(chan, chan->chan_init_cb_arg);
2146
2147 mutex_enter(chan_mtx(chan));
2148 chan->chan_flags |= SCSIPI_CHAN_TACTIVE;
2149 for (;;) {
2150 xs = TAILQ_FIRST(&chan->chan_complete);
2151 if (xs == NULL && chan->chan_tflags == 0) {
2152 /* nothing to do; wait */
2153 cv_wait(chan_cv_complete(chan), chan_mtx(chan));
2154 continue;
2155 }
2156 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2157 /* call chan_callback from thread context */
2158 chan->chan_tflags &= ~SCSIPI_CHANT_CALLBACK;
2159 chan->chan_callback(chan, chan->chan_callback_arg);
2160 continue;
2161 }
2162 if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) {
2163 /* attempt to get more openings for this channel */
2164 chan->chan_tflags &= ~SCSIPI_CHANT_GROWRES;
2165 mutex_exit(chan_mtx(chan));
2166 scsipi_adapter_request(chan,
2167 ADAPTER_REQ_GROW_RESOURCES, NULL);
2168 scsipi_channel_thaw(chan, 1);
2169 if (chan->chan_tflags & SCSIPI_CHANT_GROWRES)
2170 kpause("scsizzz", FALSE, hz/10, NULL);
2171 mutex_enter(chan_mtx(chan));
2172 continue;
2173 }
2174 if (chan->chan_tflags & SCSIPI_CHANT_KICK) {
2175 /* explicitly run the queues for this channel */
2176 chan->chan_tflags &= ~SCSIPI_CHANT_KICK;
2177 mutex_exit(chan_mtx(chan));
2178 scsipi_run_queue(chan);
2179 mutex_enter(chan_mtx(chan));
2180 continue;
2181 }
2182 if (chan->chan_tflags & SCSIPI_CHANT_SHUTDOWN) {
2183 break;
2184 }
2185 if (xs) {
2186 TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
2187 mutex_exit(chan_mtx(chan));
2188
2189 /*
2190 * Have an xfer with an error; process it.
2191 */
2192 (void) scsipi_complete(xs);
2193
2194 /*
2195 * Kick the queue; keep it running if it was stopped
2196 * for some reason.
2197 */
2198 scsipi_run_queue(chan);
2199 mutex_enter(chan_mtx(chan));
2200 }
2201 }
2202
2203 chan->chan_thread = NULL;
2204
2205 /* In case parent is waiting for us to exit. */
2206 cv_broadcast(chan_cv_thread(chan));
2207 mutex_exit(chan_mtx(chan));
2208
2209 kthread_exit(0);
2210 }
2211 /*
2212 * scsipi_thread_call_callback:
2213 *
2214 * request to call a callback from the completion thread
2215 */
2216 int
2217 scsipi_thread_call_callback(struct scsipi_channel *chan,
2218 void (*callback)(struct scsipi_channel *, void *), void *arg)
2219 {
2220
2221 mutex_enter(chan_mtx(chan));
2222 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
2223 /* kernel thread doesn't exist yet */
2224 mutex_exit(chan_mtx(chan));
2225 return ESRCH;
2226 }
2227 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2228 mutex_exit(chan_mtx(chan));
2229 return EBUSY;
2230 }
2231 scsipi_channel_freeze(chan, 1);
2232 chan->chan_callback = callback;
2233 chan->chan_callback_arg = arg;
2234 chan->chan_tflags |= SCSIPI_CHANT_CALLBACK;
2235 cv_broadcast(chan_cv_complete(chan));
2236 mutex_exit(chan_mtx(chan));
2237 return 0;
2238 }
2239
2240 /*
2241 * scsipi_async_event:
2242 *
2243 * Handle an asynchronous event from an adapter.
2244 */
2245 void
2246 scsipi_async_event(struct scsipi_channel *chan, scsipi_async_event_t event,
2247 void *arg)
2248 {
2249 bool lock = chan_running(chan) > 0;
2250
2251 if (lock)
2252 mutex_enter(chan_mtx(chan));
2253 switch (event) {
2254 case ASYNC_EVENT_MAX_OPENINGS:
2255 scsipi_async_event_max_openings(chan,
2256 (struct scsipi_max_openings *)arg);
2257 break;
2258
2259 case ASYNC_EVENT_XFER_MODE:
2260 if (chan->chan_bustype->bustype_async_event_xfer_mode) {
2261 chan->chan_bustype->bustype_async_event_xfer_mode(
2262 chan, arg);
2263 }
2264 break;
2265 case ASYNC_EVENT_RESET:
2266 scsipi_async_event_channel_reset(chan);
2267 break;
2268 }
2269 if (lock)
2270 mutex_exit(chan_mtx(chan));
2271 }
2272
2273 /*
2274 * scsipi_async_event_max_openings:
2275 *
2276 * Update the maximum number of outstanding commands a
2277 * device may have.
2278 */
2279 static void
2280 scsipi_async_event_max_openings(struct scsipi_channel *chan,
2281 struct scsipi_max_openings *mo)
2282 {
2283 struct scsipi_periph *periph;
2284 int minlun, maxlun;
2285
2286 if (mo->mo_lun == -1) {
2287 /*
2288 * Wildcarded; apply it to all LUNs.
2289 */
2290 minlun = 0;
2291 maxlun = chan->chan_nluns - 1;
2292 } else
2293 minlun = maxlun = mo->mo_lun;
2294
2295 /* XXX This could really suck with a large LUN space. */
2296 for (; minlun <= maxlun; minlun++) {
2297 periph = scsipi_lookup_periph_locked(chan, mo->mo_target, minlun);
2298 if (periph == NULL)
2299 continue;
2300
2301 if (mo->mo_openings < periph->periph_openings)
2302 periph->periph_openings = mo->mo_openings;
2303 else if (mo->mo_openings > periph->periph_openings &&
2304 (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
2305 periph->periph_openings = mo->mo_openings;
2306 }
2307 }
2308
2309 /*
2310 * scsipi_set_xfer_mode:
2311 *
2312 * Set the xfer mode for the specified I_T Nexus.
2313 */
2314 void
2315 scsipi_set_xfer_mode(struct scsipi_channel *chan, int target, int immed)
2316 {
2317 struct scsipi_xfer_mode xm;
2318 struct scsipi_periph *itperiph;
2319 int lun;
2320
2321 /*
2322 * Go to the minimal xfer mode.
2323 */
2324 xm.xm_target = target;
2325 xm.xm_mode = 0;
2326 xm.xm_period = 0; /* ignored */
2327 xm.xm_offset = 0; /* ignored */
2328
2329 /*
2330 * Find the first LUN we know about on this I_T Nexus.
2331 */
2332 for (itperiph = NULL, lun = 0; lun < chan->chan_nluns; lun++) {
2333 itperiph = scsipi_lookup_periph(chan, target, lun);
2334 if (itperiph != NULL)
2335 break;
2336 }
2337 if (itperiph != NULL) {
2338 xm.xm_mode = itperiph->periph_cap;
2339 /*
2340 * Now issue the request to the adapter.
2341 */
2342 scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm);
2343 /*
2344 * If we want this to happen immediately, issue a dummy
2345 * command, since most adapters can't really negotiate unless
2346 * they're executing a job.
2347 */
2348 if (immed != 0) {
2349 (void) scsipi_test_unit_ready(itperiph,
2350 XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
2351 XS_CTL_IGNORE_NOT_READY |
2352 XS_CTL_IGNORE_MEDIA_CHANGE);
2353 }
2354 }
2355 }
2356
2357 /*
2358 * scsipi_channel_reset:
2359 *
2360 * handle scsi bus reset
2361 * called with channel lock held
2362 */
2363 static void
2364 scsipi_async_event_channel_reset(struct scsipi_channel *chan)
2365 {
2366 struct scsipi_xfer *xs, *xs_next;
2367 struct scsipi_periph *periph;
2368 int target, lun;
2369
2370 /*
2371 * Channel has been reset. Also mark as reset pending REQUEST_SENSE
2372 * commands; as the sense is not available any more.
2373 * can't call scsipi_done() from here, as the command has not been
2374 * sent to the adapter yet (this would corrupt accounting).
2375 */
2376
2377 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) {
2378 xs_next = TAILQ_NEXT(xs, channel_q);
2379 if (xs->xs_control & XS_CTL_REQSENSE) {
2380 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
2381 xs->error = XS_RESET;
2382 if ((xs->xs_control & XS_CTL_ASYNC) != 0)
2383 TAILQ_INSERT_TAIL(&chan->chan_complete, xs,
2384 channel_q);
2385 }
2386 }
2387 cv_broadcast(chan_cv_complete(chan));
2388 /* Catch xs with pending sense which may not have a REQSENSE xs yet */
2389 for (target = 0; target < chan->chan_ntargets; target++) {
2390 if (target == chan->chan_id)
2391 continue;
2392 for (lun = 0; lun < chan->chan_nluns; lun++) {
2393 periph = scsipi_lookup_periph_locked(chan, target, lun);
2394 if (periph) {
2395 xs = periph->periph_xscheck;
2396 if (xs)
2397 xs->error = XS_RESET;
2398 }
2399 }
2400 }
2401 }
2402
2403 /*
2404 * scsipi_target_detach:
2405 *
2406 * detach all periph associated with a I_T
2407 * must be called from valid thread context
2408 */
2409 int
2410 scsipi_target_detach(struct scsipi_channel *chan, int target, int lun,
2411 int flags)
2412 {
2413 struct scsipi_periph *periph;
2414 device_t tdev;
2415 int ctarget, mintarget, maxtarget;
2416 int clun, minlun, maxlun;
2417 int error = 0;
2418
2419 if (target == -1) {
2420 mintarget = 0;
2421 maxtarget = chan->chan_ntargets;
2422 } else {
2423 if (target == chan->chan_id)
2424 return EINVAL;
2425 if (target < 0 || target >= chan->chan_ntargets)
2426 return EINVAL;
2427 mintarget = target;
2428 maxtarget = target + 1;
2429 }
2430
2431 if (lun == -1) {
2432 minlun = 0;
2433 maxlun = chan->chan_nluns;
2434 } else {
2435 if (lun < 0 || lun >= chan->chan_nluns)
2436 return EINVAL;
2437 minlun = lun;
2438 maxlun = lun + 1;
2439 }
2440
2441 /* for config_detach */
2442 KERNEL_LOCK(1, curlwp);
2443
2444 mutex_enter(chan_mtx(chan));
2445 for (ctarget = mintarget; ctarget < maxtarget; ctarget++) {
2446 if (ctarget == chan->chan_id)
2447 continue;
2448
2449 for (clun = minlun; clun < maxlun; clun++) {
2450 periph = scsipi_lookup_periph_locked(chan, ctarget, clun);
2451 if (periph == NULL)
2452 continue;
2453 tdev = periph->periph_dev;
2454 mutex_exit(chan_mtx(chan));
2455 error = config_detach(tdev, flags);
2456 if (error)
2457 goto out;
2458 mutex_enter(chan_mtx(chan));
2459 KASSERT(scsipi_lookup_periph_locked(chan, ctarget, clun) == NULL);
2460 }
2461 }
2462 mutex_exit(chan_mtx(chan));
2463
2464 out:
2465 KERNEL_UNLOCK_ONE(curlwp);
2466
2467 return error;
2468 }
2469
2470 /*
2471 * scsipi_adapter_addref:
2472 *
2473 * Add a reference to the adapter pointed to by the provided
2474 * link, enabling the adapter if necessary.
2475 */
2476 int
2477 scsipi_adapter_addref(struct scsipi_adapter *adapt)
2478 {
2479 int error = 0;
2480
2481 if (atomic_inc_uint_nv(&adapt->adapt_refcnt) == 1
2482 && adapt->adapt_enable != NULL) {
2483 scsipi_adapter_lock(adapt);
2484 error = scsipi_adapter_enable(adapt, 1);
2485 scsipi_adapter_unlock(adapt);
2486 if (error)
2487 atomic_dec_uint(&adapt->adapt_refcnt);
2488 }
2489 return error;
2490 }
2491
2492 /*
2493 * scsipi_adapter_delref:
2494 *
2495 * Delete a reference to the adapter pointed to by the provided
2496 * link, disabling the adapter if possible.
2497 */
2498 void
2499 scsipi_adapter_delref(struct scsipi_adapter *adapt)
2500 {
2501
2502 if (atomic_dec_uint_nv(&adapt->adapt_refcnt) == 0
2503 && adapt->adapt_enable != NULL) {
2504 scsipi_adapter_lock(adapt);
2505 (void) scsipi_adapter_enable(adapt, 0);
2506 scsipi_adapter_unlock(adapt);
2507 }
2508 }
2509
2510 static struct scsipi_syncparam {
2511 int ss_factor;
2512 int ss_period; /* ns * 100 */
2513 } scsipi_syncparams[] = {
2514 { 0x08, 625 }, /* FAST-160 (Ultra320) */
2515 { 0x09, 1250 }, /* FAST-80 (Ultra160) */
2516 { 0x0a, 2500 }, /* FAST-40 40MHz (Ultra2) */
2517 { 0x0b, 3030 }, /* FAST-40 33MHz (Ultra2) */
2518 { 0x0c, 5000 }, /* FAST-20 (Ultra) */
2519 };
2520 static const int scsipi_nsyncparams =
2521 sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
2522
2523 int
2524 scsipi_sync_period_to_factor(int period /* ns * 100 */)
2525 {
2526 int i;
2527
2528 for (i = 0; i < scsipi_nsyncparams; i++) {
2529 if (period <= scsipi_syncparams[i].ss_period)
2530 return scsipi_syncparams[i].ss_factor;
2531 }
2532
2533 return (period / 100) / 4;
2534 }
2535
2536 int
2537 scsipi_sync_factor_to_period(int factor)
2538 {
2539 int i;
2540
2541 for (i = 0; i < scsipi_nsyncparams; i++) {
2542 if (factor == scsipi_syncparams[i].ss_factor)
2543 return scsipi_syncparams[i].ss_period;
2544 }
2545
2546 return (factor * 4) * 100;
2547 }
2548
2549 int
2550 scsipi_sync_factor_to_freq(int factor)
2551 {
2552 int i;
2553
2554 for (i = 0; i < scsipi_nsyncparams; i++) {
2555 if (factor == scsipi_syncparams[i].ss_factor)
2556 return 100000000 / scsipi_syncparams[i].ss_period;
2557 }
2558
2559 return 10000000 / ((factor * 4) * 10);
2560 }
2561
2562 static inline void
2563 scsipi_adapter_lock(struct scsipi_adapter *adapt)
2564 {
2565
2566 if ((adapt->adapt_flags & SCSIPI_ADAPT_MPSAFE) == 0)
2567 KERNEL_LOCK(1, NULL);
2568 }
2569
2570 static inline void
2571 scsipi_adapter_unlock(struct scsipi_adapter *adapt)
2572 {
2573
2574 if ((adapt->adapt_flags & SCSIPI_ADAPT_MPSAFE) == 0)
2575 KERNEL_UNLOCK_ONE(NULL);
2576 }
2577
2578 void
2579 scsipi_adapter_minphys(struct scsipi_channel *chan, struct buf *bp)
2580 {
2581 struct scsipi_adapter *adapt = chan->chan_adapter;
2582
2583 scsipi_adapter_lock(adapt);
2584 (adapt->adapt_minphys)(bp);
2585 scsipi_adapter_unlock(chan->chan_adapter);
2586 }
2587
2588 void
2589 scsipi_adapter_request(struct scsipi_channel *chan,
2590 scsipi_adapter_req_t req, void *arg)
2591
2592 {
2593 struct scsipi_adapter *adapt = chan->chan_adapter;
2594
2595 scsipi_adapter_lock(adapt);
2596 (adapt->adapt_request)(chan, req, arg);
2597 scsipi_adapter_unlock(adapt);
2598 }
2599
2600 int
2601 scsipi_adapter_ioctl(struct scsipi_channel *chan, u_long cmd,
2602 void *data, int flag, struct proc *p)
2603 {
2604 struct scsipi_adapter *adapt = chan->chan_adapter;
2605 int error;
2606
2607 if (adapt->adapt_ioctl == NULL)
2608 return ENOTTY;
2609
2610 scsipi_adapter_lock(adapt);
2611 error = (adapt->adapt_ioctl)(chan, cmd, data, flag, p);
2612 scsipi_adapter_unlock(adapt);
2613 return error;
2614 }
2615
2616 int
2617 scsipi_adapter_enable(struct scsipi_adapter *adapt, int enable)
2618 {
2619 int error;
2620
2621 scsipi_adapter_lock(adapt);
2622 error = (adapt->adapt_enable)(adapt->adapt_dev, enable);
2623 scsipi_adapter_unlock(adapt);
2624 return error;
2625 }
2626
2627 #ifdef SCSIPI_DEBUG
2628 /*
2629 * Given a scsipi_xfer, dump the request, in all its glory
2630 */
2631 void
2632 show_scsipi_xs(struct scsipi_xfer *xs)
2633 {
2634
2635 printf("xs(%p): ", xs);
2636 printf("xs_control(0x%08x)", xs->xs_control);
2637 printf("xs_status(0x%08x)", xs->xs_status);
2638 printf("periph(%p)", xs->xs_periph);
2639 printf("retr(0x%x)", xs->xs_retries);
2640 printf("timo(0x%x)", xs->timeout);
2641 printf("cmd(%p)", xs->cmd);
2642 printf("len(0x%x)", xs->cmdlen);
2643 printf("data(%p)", xs->data);
2644 printf("len(0x%x)", xs->datalen);
2645 printf("res(0x%x)", xs->resid);
2646 printf("err(0x%x)", xs->error);
2647 printf("bp(%p)", xs->bp);
2648 show_scsipi_cmd(xs);
2649 }
2650
2651 void
2652 show_scsipi_cmd(struct scsipi_xfer *xs)
2653 {
2654 u_char *b = (u_char *) xs->cmd;
2655 int i = 0;
2656
2657 scsipi_printaddr(xs->xs_periph);
2658 printf(" command: ");
2659
2660 if ((xs->xs_control & XS_CTL_RESET) == 0) {
2661 while (i < xs->cmdlen) {
2662 if (i)
2663 printf(",");
2664 printf("0x%x", b[i++]);
2665 }
2666 printf("-[%d bytes]\n", xs->datalen);
2667 if (xs->datalen)
2668 show_mem(xs->data, uimin(64, xs->datalen));
2669 } else
2670 printf("-RESET-\n");
2671 }
2672
2673 void
2674 show_mem(u_char *address, int num)
2675 {
2676 int x;
2677
2678 printf("------------------------------");
2679 for (x = 0; x < num; x++) {
2680 if ((x % 16) == 0)
2681 printf("\n%03d: ", x);
2682 printf("%02x ", *address++);
2683 }
2684 printf("\n------------------------------\n");
2685 }
2686 #endif /* SCSIPI_DEBUG */
2687