scsipi_base.c revision 1.103 1 /* $NetBSD: scsipi_base.c,v 1.103 2004/03/15 22:43:43 bouyer Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999, 2000, 2002, 2003 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: scsipi_base.c,v 1.103 2004/03/15 22:43:43 bouyer Exp $");
42
43 #include "opt_scsi.h"
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/buf.h>
49 #include <sys/uio.h>
50 #include <sys/malloc.h>
51 #include <sys/pool.h>
52 #include <sys/errno.h>
53 #include <sys/device.h>
54 #include <sys/proc.h>
55 #include <sys/kthread.h>
56 #include <sys/hash.h>
57
58 #include <dev/scsipi/scsipi_all.h>
59 #include <dev/scsipi/scsipi_disk.h>
60 #include <dev/scsipi/scsipiconf.h>
61 #include <dev/scsipi/scsipi_base.h>
62
63 #include <dev/scsipi/scsi_all.h>
64 #include <dev/scsipi/scsi_message.h>
65
66 int scsipi_complete __P((struct scsipi_xfer *));
67 void scsipi_request_sense __P((struct scsipi_xfer *));
68 int scsipi_enqueue __P((struct scsipi_xfer *));
69 void scsipi_run_queue __P((struct scsipi_channel *chan));
70
71 void scsipi_completion_thread __P((void *));
72
73 void scsipi_get_tag __P((struct scsipi_xfer *));
74 void scsipi_put_tag __P((struct scsipi_xfer *));
75
76 int scsipi_get_resource __P((struct scsipi_channel *));
77 void scsipi_put_resource __P((struct scsipi_channel *));
78 __inline int scsipi_grow_resources __P((struct scsipi_channel *));
79
80 void scsipi_async_event_max_openings __P((struct scsipi_channel *,
81 struct scsipi_max_openings *));
82 void scsipi_async_event_xfer_mode __P((struct scsipi_channel *,
83 struct scsipi_xfer_mode *));
84 void scsipi_async_event_channel_reset __P((struct scsipi_channel *));
85
86 struct pool scsipi_xfer_pool;
87
88 /*
89 * scsipi_init:
90 *
91 * Called when a scsibus or atapibus is attached to the system
92 * to initialize shared data structures.
93 */
94 void
95 scsipi_init()
96 {
97 static int scsipi_init_done;
98
99 if (scsipi_init_done)
100 return;
101 scsipi_init_done = 1;
102
103 /* Initialize the scsipi_xfer pool. */
104 pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
105 0, 0, "scxspl", NULL);
106 }
107
108 /*
109 * scsipi_channel_init:
110 *
111 * Initialize a scsipi_channel when it is attached.
112 */
113 int
114 scsipi_channel_init(chan)
115 struct scsipi_channel *chan;
116 {
117 int i;
118
119 /* Initialize shared data. */
120 scsipi_init();
121
122 /* Initialize the queues. */
123 TAILQ_INIT(&chan->chan_queue);
124 TAILQ_INIT(&chan->chan_complete);
125
126 for (i = 0; i < SCSIPI_CHAN_PERIPH_BUCKETS; i++)
127 LIST_INIT(&chan->chan_periphtab[i]);
128
129 /*
130 * Create the asynchronous completion thread.
131 */
132 kthread_create(scsipi_create_completion_thread, chan);
133 return (0);
134 }
135
136 /*
137 * scsipi_channel_shutdown:
138 *
139 * Shutdown a scsipi_channel.
140 */
141 void
142 scsipi_channel_shutdown(chan)
143 struct scsipi_channel *chan;
144 {
145
146 /*
147 * Shut down the completion thread.
148 */
149 chan->chan_tflags |= SCSIPI_CHANT_SHUTDOWN;
150 wakeup(&chan->chan_complete);
151
152 /*
153 * Now wait for the thread to exit.
154 */
155 while (chan->chan_thread != NULL)
156 (void) tsleep(&chan->chan_thread, PRIBIO, "scshut", 0);
157 }
158
159 static uint32_t
160 scsipi_chan_periph_hash(uint64_t t, uint64_t l)
161 {
162 uint32_t hash;
163
164 hash = hash32_buf(&t, sizeof(t), HASH32_BUF_INIT);
165 hash = hash32_buf(&l, sizeof(l), hash);
166
167 return (hash & SCSIPI_CHAN_PERIPH_HASHMASK);
168 }
169
170 /*
171 * scsipi_insert_periph:
172 *
173 * Insert a periph into the channel.
174 */
175 void
176 scsipi_insert_periph(chan, periph)
177 struct scsipi_channel *chan;
178 struct scsipi_periph *periph;
179 {
180 uint32_t hash;
181 int s;
182
183 hash = scsipi_chan_periph_hash(periph->periph_target,
184 periph->periph_lun);
185
186 s = splbio();
187 LIST_INSERT_HEAD(&chan->chan_periphtab[hash], periph, periph_hash);
188 splx(s);
189 }
190
191 /*
192 * scsipi_remove_periph:
193 *
194 * Remove a periph from the channel.
195 */
196 void
197 scsipi_remove_periph(chan, periph)
198 struct scsipi_channel *chan;
199 struct scsipi_periph *periph;
200 {
201 int s;
202
203 s = splbio();
204 LIST_REMOVE(periph, periph_hash);
205 splx(s);
206 }
207
208 /*
209 * scsipi_lookup_periph:
210 *
211 * Lookup a periph on the specified channel.
212 */
213 struct scsipi_periph *
214 scsipi_lookup_periph(chan, target, lun)
215 struct scsipi_channel *chan;
216 int target, lun;
217 {
218 struct scsipi_periph *periph;
219 uint32_t hash;
220 int s;
221
222 if (target >= chan->chan_ntargets ||
223 lun >= chan->chan_nluns)
224 return (NULL);
225
226 hash = scsipi_chan_periph_hash(target, lun);
227
228 s = splbio();
229 LIST_FOREACH(periph, &chan->chan_periphtab[hash], periph_hash) {
230 if (periph->periph_target == target &&
231 periph->periph_lun == lun)
232 break;
233 }
234 splx(s);
235
236 return (periph);
237 }
238
239 /*
240 * scsipi_get_resource:
241 *
242 * Allocate a single xfer `resource' from the channel.
243 *
244 * NOTE: Must be called at splbio().
245 */
246 int
247 scsipi_get_resource(chan)
248 struct scsipi_channel *chan;
249 {
250 struct scsipi_adapter *adapt = chan->chan_adapter;
251
252 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
253 if (chan->chan_openings > 0) {
254 chan->chan_openings--;
255 return (1);
256 }
257 return (0);
258 }
259
260 if (adapt->adapt_openings > 0) {
261 adapt->adapt_openings--;
262 return (1);
263 }
264 return (0);
265 }
266
267 /*
268 * scsipi_grow_resources:
269 *
270 * Attempt to grow resources for a channel. If this succeeds,
271 * we allocate one for our caller.
272 *
273 * NOTE: Must be called at splbio().
274 */
275 __inline int
276 scsipi_grow_resources(chan)
277 struct scsipi_channel *chan;
278 {
279
280 if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
281 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
282 scsipi_adapter_request(chan,
283 ADAPTER_REQ_GROW_RESOURCES, NULL);
284 return (scsipi_get_resource(chan));
285 }
286 /*
287 * ask the channel thread to do it. It'll have to thaw the
288 * queue
289 */
290 scsipi_channel_freeze(chan, 1);
291 chan->chan_tflags |= SCSIPI_CHANT_GROWRES;
292 wakeup(&chan->chan_complete);
293 return (0);
294 }
295
296 return (0);
297 }
298
299 /*
300 * scsipi_put_resource:
301 *
302 * Free a single xfer `resource' to the channel.
303 *
304 * NOTE: Must be called at splbio().
305 */
306 void
307 scsipi_put_resource(chan)
308 struct scsipi_channel *chan;
309 {
310 struct scsipi_adapter *adapt = chan->chan_adapter;
311
312 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
313 chan->chan_openings++;
314 else
315 adapt->adapt_openings++;
316 }
317
318 /*
319 * scsipi_get_tag:
320 *
321 * Get a tag ID for the specified xfer.
322 *
323 * NOTE: Must be called at splbio().
324 */
325 void
326 scsipi_get_tag(xs)
327 struct scsipi_xfer *xs;
328 {
329 struct scsipi_periph *periph = xs->xs_periph;
330 int bit, tag;
331 u_int word;
332
333 bit = 0; /* XXX gcc */
334 for (word = 0; word < PERIPH_NTAGWORDS; word++) {
335 bit = ffs(periph->periph_freetags[word]);
336 if (bit != 0)
337 break;
338 }
339 #ifdef DIAGNOSTIC
340 if (word == PERIPH_NTAGWORDS) {
341 scsipi_printaddr(periph);
342 printf("no free tags\n");
343 panic("scsipi_get_tag");
344 }
345 #endif
346
347 bit -= 1;
348 periph->periph_freetags[word] &= ~(1 << bit);
349 tag = (word << 5) | bit;
350
351 /* XXX Should eventually disallow this completely. */
352 if (tag >= periph->periph_openings) {
353 scsipi_printaddr(periph);
354 printf("WARNING: tag %d greater than available openings %d\n",
355 tag, periph->periph_openings);
356 }
357
358 xs->xs_tag_id = tag;
359 }
360
361 /*
362 * scsipi_put_tag:
363 *
364 * Put the tag ID for the specified xfer back into the pool.
365 *
366 * NOTE: Must be called at splbio().
367 */
368 void
369 scsipi_put_tag(xs)
370 struct scsipi_xfer *xs;
371 {
372 struct scsipi_periph *periph = xs->xs_periph;
373 int word, bit;
374
375 word = xs->xs_tag_id >> 5;
376 bit = xs->xs_tag_id & 0x1f;
377
378 periph->periph_freetags[word] |= (1 << bit);
379 }
380
381 /*
382 * scsipi_get_xs:
383 *
384 * Allocate an xfer descriptor and associate it with the
385 * specified peripherial. If the peripherial has no more
386 * available command openings, we either block waiting for
387 * one to become available, or fail.
388 */
389 struct scsipi_xfer *
390 scsipi_get_xs(periph, flags)
391 struct scsipi_periph *periph;
392 int flags;
393 {
394 struct scsipi_xfer *xs;
395 int s;
396
397 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n"));
398
399 /*
400 * If we're cold, make sure we poll.
401 */
402 if (cold)
403 flags |= XS_CTL_NOSLEEP | XS_CTL_POLL;
404
405 #ifdef DIAGNOSTIC
406 /*
407 * URGENT commands can never be ASYNC.
408 */
409 if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
410 (XS_CTL_URGENT|XS_CTL_ASYNC)) {
411 scsipi_printaddr(periph);
412 printf("URGENT and ASYNC\n");
413 panic("scsipi_get_xs");
414 }
415 #endif
416
417 s = splbio();
418 /*
419 * Wait for a command opening to become available. Rules:
420 *
421 * - All xfers must wait for an available opening.
422 * Exception: URGENT xfers can proceed when
423 * active == openings, because we use the opening
424 * of the command we're recovering for.
425 * - if the periph has sense pending, only URGENT & REQSENSE
426 * xfers may proceed.
427 *
428 * - If the periph is recovering, only URGENT xfers may
429 * proceed.
430 *
431 * - If the periph is currently executing a recovery
432 * command, URGENT commands must block, because only
433 * one recovery command can execute at a time.
434 */
435 for (;;) {
436 if (flags & XS_CTL_URGENT) {
437 if (periph->periph_active > periph->periph_openings)
438 goto wait_for_opening;
439 if (periph->periph_flags & PERIPH_SENSE) {
440 if ((flags & XS_CTL_REQSENSE) == 0)
441 goto wait_for_opening;
442 } else {
443 if ((periph->periph_flags &
444 PERIPH_RECOVERY_ACTIVE) != 0)
445 goto wait_for_opening;
446 periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
447 }
448 break;
449 }
450 if (periph->periph_active >= periph->periph_openings ||
451 (periph->periph_flags & PERIPH_RECOVERING) != 0)
452 goto wait_for_opening;
453 periph->periph_active++;
454 break;
455
456 wait_for_opening:
457 if (flags & XS_CTL_NOSLEEP) {
458 splx(s);
459 return (NULL);
460 }
461 SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n"));
462 periph->periph_flags |= PERIPH_WAITING;
463 (void) tsleep(periph, PRIBIO, "getxs", 0);
464 }
465 SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n"));
466 xs = pool_get(&scsipi_xfer_pool,
467 ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
468 if (xs == NULL) {
469 if (flags & XS_CTL_URGENT) {
470 if ((flags & XS_CTL_REQSENSE) == 0)
471 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
472 } else
473 periph->periph_active--;
474 scsipi_printaddr(periph);
475 printf("unable to allocate %sscsipi_xfer\n",
476 (flags & XS_CTL_URGENT) ? "URGENT " : "");
477 }
478 splx(s);
479
480 SC_DEBUG(periph, SCSIPI_DB3, ("returning\n"));
481
482 if (xs != NULL) {
483 memset(xs, 0, sizeof(*xs));
484 callout_init(&xs->xs_callout);
485 xs->xs_periph = periph;
486 xs->xs_control = flags;
487 xs->xs_status = 0;
488 s = splbio();
489 TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
490 splx(s);
491 }
492 return (xs);
493 }
494
495 /*
496 * scsipi_put_xs:
497 *
498 * Release an xfer descriptor, decreasing the outstanding command
499 * count for the peripherial. If there is a thread waiting for
500 * an opening, wake it up. If not, kick any queued I/O the
501 * peripherial may have.
502 *
503 * NOTE: Must be called at splbio().
504 */
505 void
506 scsipi_put_xs(xs)
507 struct scsipi_xfer *xs;
508 {
509 struct scsipi_periph *periph = xs->xs_periph;
510 int flags = xs->xs_control;
511
512 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n"));
513
514 TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
515 pool_put(&scsipi_xfer_pool, xs);
516
517 #ifdef DIAGNOSTIC
518 if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
519 periph->periph_active == 0) {
520 scsipi_printaddr(periph);
521 printf("recovery without a command to recovery for\n");
522 panic("scsipi_put_xs");
523 }
524 #endif
525
526 if (flags & XS_CTL_URGENT) {
527 if ((flags & XS_CTL_REQSENSE) == 0)
528 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
529 } else
530 periph->periph_active--;
531 if (periph->periph_active == 0 &&
532 (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
533 periph->periph_flags &= ~PERIPH_WAITDRAIN;
534 wakeup(&periph->periph_active);
535 }
536
537 if (periph->periph_flags & PERIPH_WAITING) {
538 periph->periph_flags &= ~PERIPH_WAITING;
539 wakeup(periph);
540 } else {
541 if (periph->periph_switch->psw_start != NULL) {
542 SC_DEBUG(periph, SCSIPI_DB2,
543 ("calling private start()\n"));
544 (*periph->periph_switch->psw_start)(periph);
545 }
546 }
547 }
548
549 /*
550 * scsipi_channel_freeze:
551 *
552 * Freeze a channel's xfer queue.
553 */
554 void
555 scsipi_channel_freeze(chan, count)
556 struct scsipi_channel *chan;
557 int count;
558 {
559 int s;
560
561 s = splbio();
562 chan->chan_qfreeze += count;
563 splx(s);
564 }
565
566 /*
567 * scsipi_channel_thaw:
568 *
569 * Thaw a channel's xfer queue.
570 */
571 void
572 scsipi_channel_thaw(chan, count)
573 struct scsipi_channel *chan;
574 int count;
575 {
576 int s;
577
578 s = splbio();
579 chan->chan_qfreeze -= count;
580 /*
581 * Don't let the freeze count go negative.
582 *
583 * Presumably the adapter driver could keep track of this,
584 * but it might just be easier to do this here so as to allow
585 * multiple callers, including those outside the adapter driver.
586 */
587 if (chan->chan_qfreeze < 0) {
588 chan->chan_qfreeze = 0;
589 }
590 splx(s);
591 /*
592 * Kick the channel's queue here. Note, we may be running in
593 * interrupt context (softclock or HBA's interrupt), so the adapter
594 * driver had better not sleep.
595 */
596 if (chan->chan_qfreeze == 0)
597 scsipi_run_queue(chan);
598 }
599
600 /*
601 * scsipi_channel_timed_thaw:
602 *
603 * Thaw a channel after some time has expired. This will also
604 * run the channel's queue if the freeze count has reached 0.
605 */
606 void
607 scsipi_channel_timed_thaw(arg)
608 void *arg;
609 {
610 struct scsipi_channel *chan = arg;
611
612 scsipi_channel_thaw(chan, 1);
613 }
614
615 /*
616 * scsipi_periph_freeze:
617 *
618 * Freeze a device's xfer queue.
619 */
620 void
621 scsipi_periph_freeze(periph, count)
622 struct scsipi_periph *periph;
623 int count;
624 {
625 int s;
626
627 s = splbio();
628 periph->periph_qfreeze += count;
629 splx(s);
630 }
631
632 /*
633 * scsipi_periph_thaw:
634 *
635 * Thaw a device's xfer queue.
636 */
637 void
638 scsipi_periph_thaw(periph, count)
639 struct scsipi_periph *periph;
640 int count;
641 {
642 int s;
643
644 s = splbio();
645 periph->periph_qfreeze -= count;
646 #ifdef DIAGNOSTIC
647 if (periph->periph_qfreeze < 0) {
648 static const char pc[] = "periph freeze count < 0";
649 scsipi_printaddr(periph);
650 printf("%s\n", pc);
651 panic(pc);
652 }
653 #endif
654 if (periph->periph_qfreeze == 0 &&
655 (periph->periph_flags & PERIPH_WAITING) != 0)
656 wakeup(periph);
657 splx(s);
658 }
659
660 /*
661 * scsipi_periph_timed_thaw:
662 *
663 * Thaw a device after some time has expired.
664 */
665 void
666 scsipi_periph_timed_thaw(arg)
667 void *arg;
668 {
669 int s;
670 struct scsipi_periph *periph = arg;
671
672 callout_stop(&periph->periph_callout);
673
674 s = splbio();
675 scsipi_periph_thaw(periph, 1);
676 if ((periph->periph_channel->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
677 /*
678 * Kick the channel's queue here. Note, we're running in
679 * interrupt context (softclock), so the adapter driver
680 * had better not sleep.
681 */
682 scsipi_run_queue(periph->periph_channel);
683 } else {
684 /*
685 * Tell the completion thread to kick the channel's queue here.
686 */
687 periph->periph_channel->chan_tflags |= SCSIPI_CHANT_KICK;
688 wakeup(&periph->periph_channel->chan_complete);
689 }
690 splx(s);
691 }
692
693 /*
694 * scsipi_wait_drain:
695 *
696 * Wait for a periph's pending xfers to drain.
697 */
698 void
699 scsipi_wait_drain(periph)
700 struct scsipi_periph *periph;
701 {
702 int s;
703
704 s = splbio();
705 while (periph->periph_active != 0) {
706 periph->periph_flags |= PERIPH_WAITDRAIN;
707 (void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0);
708 }
709 splx(s);
710 }
711
712 /*
713 * scsipi_kill_pending:
714 *
715 * Kill off all pending xfers for a periph.
716 *
717 * NOTE: Must be called at splbio().
718 */
719 void
720 scsipi_kill_pending(periph)
721 struct scsipi_periph *periph;
722 {
723
724 (*periph->periph_channel->chan_bustype->bustype_kill_pending)(periph);
725 #ifdef DIAGNOSTIC
726 if (TAILQ_FIRST(&periph->periph_xferq) != NULL)
727 panic("scsipi_kill_pending");
728 #endif
729 scsipi_wait_drain(periph);
730 }
731
732 /*
733 * scsipi_print_cbd:
734 * prints a command block descriptor (for debug purpose, error messages,
735 * SCSIPI_VERBOSE, ...)
736 */
737 void
738 scsipi_print_cbd(cmd)
739 struct scsipi_generic *cmd;
740 {
741 int i, j;
742
743 printf("0x%02x", cmd->opcode);
744
745 switch (CDB_GROUPID(cmd->opcode)) {
746 case CDB_GROUPID_0:
747 j = CDB_GROUP0;
748 break;
749 case CDB_GROUPID_1:
750 j = CDB_GROUP1;
751 break;
752 case CDB_GROUPID_2:
753 j = CDB_GROUP2;
754 break;
755 case CDB_GROUPID_3:
756 j = CDB_GROUP3;
757 break;
758 case CDB_GROUPID_4:
759 j = CDB_GROUP4;
760 break;
761 case CDB_GROUPID_5:
762 j = CDB_GROUP5;
763 break;
764 case CDB_GROUPID_6:
765 j = CDB_GROUP6;
766 break;
767 case CDB_GROUPID_7:
768 j = CDB_GROUP7;
769 break;
770 default:
771 j = 0;
772 }
773 if (j == 0)
774 j = sizeof (cmd->bytes);
775 for (i = 0; i < j-1; i++) /* already done the opcode */
776 printf(" %02x", cmd->bytes[i]);
777 }
778
779 /*
780 * scsipi_interpret_sense:
781 *
782 * Look at the returned sense and act on the error, determining
783 * the unix error number to pass back. (0 = report no error)
784 *
785 * NOTE: If we return ERESTART, we are expected to haved
786 * thawed the device!
787 *
788 * THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
789 */
790 int
791 scsipi_interpret_sense(xs)
792 struct scsipi_xfer *xs;
793 {
794 struct scsipi_sense_data *sense;
795 struct scsipi_periph *periph = xs->xs_periph;
796 u_int8_t key;
797 int error;
798 #ifndef SCSIVERBOSE
799 u_int32_t info;
800 static char *error_mes[] = {
801 "soft error (corrected)",
802 "not ready", "medium error",
803 "non-media hardware failure", "illegal request",
804 "unit attention", "readonly device",
805 "no data found", "vendor unique",
806 "copy aborted", "command aborted",
807 "search returned equal", "volume overflow",
808 "verify miscompare", "unknown error key"
809 };
810 #endif
811
812 sense = &xs->sense.scsi_sense;
813 #ifdef SCSIPI_DEBUG
814 if (periph->periph_flags & SCSIPI_DB1) {
815 int count;
816 scsipi_printaddr(periph);
817 printf(" sense debug information:\n");
818 printf("\tcode 0x%x valid 0x%x\n",
819 sense->error_code & SSD_ERRCODE,
820 sense->error_code & SSD_ERRCODE_VALID ? 1 : 0);
821 printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
822 sense->segment,
823 sense->flags & SSD_KEY,
824 sense->flags & SSD_ILI ? 1 : 0,
825 sense->flags & SSD_EOM ? 1 : 0,
826 sense->flags & SSD_FILEMARK ? 1 : 0);
827 printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d "
828 "extra bytes\n",
829 sense->info[0],
830 sense->info[1],
831 sense->info[2],
832 sense->info[3],
833 sense->extra_len);
834 printf("\textra: ");
835 for (count = 0; count < ADD_BYTES_LIM(sense); count++)
836 printf("0x%x ", sense->cmd_spec_info[count]);
837 printf("\n");
838 }
839 #endif
840
841 /*
842 * If the periph has it's own error handler, call it first.
843 * If it returns a legit error value, return that, otherwise
844 * it wants us to continue with normal error processing.
845 */
846 if (periph->periph_switch->psw_error != NULL) {
847 SC_DEBUG(periph, SCSIPI_DB2,
848 ("calling private err_handler()\n"));
849 error = (*periph->periph_switch->psw_error)(xs);
850 if (error != EJUSTRETURN)
851 return (error);
852 }
853 /* otherwise use the default */
854 switch (sense->error_code & SSD_ERRCODE) {
855
856 /*
857 * Old SCSI-1 and SASI devices respond with
858 * codes other than 70.
859 */
860 case 0x00: /* no error (command completed OK) */
861 return (0);
862 case 0x04: /* drive not ready after it was selected */
863 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
864 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
865 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
866 return (0);
867 /* XXX - display some sort of error here? */
868 return (EIO);
869 case 0x20: /* invalid command */
870 if ((xs->xs_control &
871 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
872 return (0);
873 return (EINVAL);
874 case 0x25: /* invalid LUN (Adaptec ACB-4000) */
875 return (EACCES);
876
877 /*
878 * If it's code 70, use the extended stuff and
879 * interpret the key
880 */
881 case 0x71: /* delayed error */
882 scsipi_printaddr(periph);
883 key = sense->flags & SSD_KEY;
884 printf(" DEFERRED ERROR, key = 0x%x\n", key);
885 /* FALLTHROUGH */
886 case 0x70:
887 #ifndef SCSIVERBOSE
888 if ((sense->error_code & SSD_ERRCODE_VALID) != 0)
889 info = _4btol(sense->info);
890 else
891 info = 0;
892 #endif
893 key = sense->flags & SSD_KEY;
894
895 switch (key) {
896 case SKEY_NO_SENSE:
897 case SKEY_RECOVERED_ERROR:
898 if (xs->resid == xs->datalen && xs->datalen) {
899 /*
900 * Why is this here?
901 */
902 xs->resid = 0; /* not short read */
903 }
904 case SKEY_EQUAL:
905 error = 0;
906 break;
907 case SKEY_NOT_READY:
908 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
909 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
910 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
911 return (0);
912 if (sense->add_sense_code == 0x3A) {
913 error = ENODEV; /* Medium not present */
914 if (xs->xs_control & XS_CTL_SILENT_NODEV)
915 return (error);
916 } else
917 error = EIO;
918 if ((xs->xs_control & XS_CTL_SILENT) != 0)
919 return (error);
920 break;
921 case SKEY_ILLEGAL_REQUEST:
922 if ((xs->xs_control &
923 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
924 return (0);
925 /*
926 * Handle the case where a device reports
927 * Logical Unit Not Supported during discovery.
928 */
929 if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
930 sense->add_sense_code == 0x25 &&
931 sense->add_sense_code_qual == 0x00)
932 return (EINVAL);
933 if ((xs->xs_control & XS_CTL_SILENT) != 0)
934 return (EIO);
935 error = EINVAL;
936 break;
937 case SKEY_UNIT_ATTENTION:
938 if (sense->add_sense_code == 0x29 &&
939 sense->add_sense_code_qual == 0x00) {
940 /* device or bus reset */
941 if (xs->xs_retries != 0) {
942 xs->xs_retries--;
943 error = ERESTART;
944 } else
945 error = EIO;
946 return (error);
947 }
948 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
949 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
950 if ((xs->xs_control &
951 XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
952 /* XXX Should reupload any transient state. */
953 (periph->periph_flags &
954 PERIPH_REMOVABLE) == 0) {
955 return (ERESTART);
956 }
957 if ((xs->xs_control & XS_CTL_SILENT) != 0)
958 return (EIO);
959 error = EIO;
960 break;
961 case SKEY_WRITE_PROTECT:
962 error = EROFS;
963 break;
964 case SKEY_BLANK_CHECK:
965 error = 0;
966 break;
967 case SKEY_ABORTED_COMMAND:
968 if (xs->xs_retries != 0) {
969 xs->xs_retries--;
970 error = ERESTART;
971 } else
972 error = EIO;
973 break;
974 case SKEY_VOLUME_OVERFLOW:
975 error = ENOSPC;
976 break;
977 default:
978 error = EIO;
979 break;
980 }
981
982 #ifdef SCSIVERBOSE
983 if (key && (xs->xs_control & XS_CTL_SILENT) == 0)
984 scsipi_print_sense(xs, 0);
985 #else
986 if (key) {
987 scsipi_printaddr(periph);
988 printf("%s", error_mes[key - 1]);
989 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
990 switch (key) {
991 case SKEY_NOT_READY:
992 case SKEY_ILLEGAL_REQUEST:
993 case SKEY_UNIT_ATTENTION:
994 case SKEY_WRITE_PROTECT:
995 break;
996 case SKEY_BLANK_CHECK:
997 printf(", requested size: %d (decimal)",
998 info);
999 break;
1000 case SKEY_ABORTED_COMMAND:
1001 if (xs->xs_retries)
1002 printf(", retrying");
1003 printf(", cmd 0x%x, info 0x%x",
1004 xs->cmd->opcode, info);
1005 break;
1006 default:
1007 printf(", info = %d (decimal)", info);
1008 }
1009 }
1010 if (sense->extra_len != 0) {
1011 int n;
1012 printf(", data =");
1013 for (n = 0; n < sense->extra_len; n++)
1014 printf(" %02x",
1015 sense->cmd_spec_info[n]);
1016 }
1017 printf("\n");
1018 }
1019 #endif
1020 return (error);
1021
1022 /*
1023 * Some other code, just report it
1024 */
1025 default:
1026 #if defined(SCSIDEBUG) || defined(DEBUG)
1027 {
1028 static char *uc = "undecodable sense error";
1029 int i;
1030 u_int8_t *cptr = (u_int8_t *) sense;
1031 scsipi_printaddr(periph);
1032 if (xs->cmd == &xs->cmdstore) {
1033 printf("%s for opcode 0x%x, data=",
1034 uc, xs->cmdstore.opcode);
1035 } else {
1036 printf("%s, data=", uc);
1037 }
1038 for (i = 0; i < sizeof (sense); i++)
1039 printf(" 0x%02x", *(cptr++) & 0xff);
1040 printf("\n");
1041 }
1042 #else
1043 scsipi_printaddr(periph);
1044 printf("Sense Error Code 0x%x",
1045 sense->error_code & SSD_ERRCODE);
1046 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
1047 struct scsipi_sense_data_unextended *usense =
1048 (struct scsipi_sense_data_unextended *)sense;
1049 printf(" at block no. %d (decimal)",
1050 _3btol(usense->block));
1051 }
1052 printf("\n");
1053 #endif
1054 return (EIO);
1055 }
1056 }
1057
1058 /*
1059 * scsipi_size:
1060 *
1061 * Find out from the device what its capacity is.
1062 */
1063 u_int64_t
1064 scsipi_size(periph, flags)
1065 struct scsipi_periph *periph;
1066 int flags;
1067 {
1068 struct scsipi_read_cap_data rdcap;
1069 struct scsipi_read_capacity scsipi_cmd;
1070
1071 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1072 scsipi_cmd.opcode = READ_CAPACITY;
1073
1074 /*
1075 * If the command works, interpret the result as a 4 byte
1076 * number of blocks
1077 */
1078 if (scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1079 sizeof(scsipi_cmd), (u_char *)&rdcap, sizeof(rdcap),
1080 SCSIPIRETRIES, 20000, NULL,
1081 flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK | XS_CTL_SILENT) != 0)
1082 return (0);
1083
1084 return (_4btol(rdcap.addr) + 1);
1085 }
1086
1087 /*
1088 * scsipi_test_unit_ready:
1089 *
1090 * Issue a `test unit ready' request.
1091 */
1092 int
1093 scsipi_test_unit_ready(periph, flags)
1094 struct scsipi_periph *periph;
1095 int flags;
1096 {
1097 int retries;
1098 struct scsipi_test_unit_ready scsipi_cmd;
1099
1100 /* some ATAPI drives don't support TEST_UNIT_READY. Sigh */
1101 if (periph->periph_quirks & PQUIRK_NOTUR)
1102 return (0);
1103
1104 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1105 scsipi_cmd.opcode = TEST_UNIT_READY;
1106
1107 if (flags & XS_CTL_DISCOVERY)
1108 retries = 0;
1109 else
1110 retries = SCSIPIRETRIES;
1111
1112 return (scsipi_command(periph,
1113 (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd),
1114 0, 0, retries, 10000, NULL, flags));
1115 }
1116
1117 /*
1118 * scsipi_inquire:
1119 *
1120 * Ask the device about itself.
1121 */
1122 int
1123 scsipi_inquire(periph, inqbuf, flags)
1124 struct scsipi_periph *periph;
1125 struct scsipi_inquiry_data *inqbuf;
1126 int flags;
1127 {
1128 int retries;
1129 struct scsipi_inquiry scsipi_cmd;
1130 int error;
1131
1132 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1133 scsipi_cmd.opcode = INQUIRY;
1134
1135 if (flags & XS_CTL_DISCOVERY)
1136 retries = 0;
1137 else
1138 retries = SCSIPIRETRIES;
1139
1140 /*
1141 * If we request more data than the device can provide, it SHOULD just
1142 * return a short reponse. However, some devices error with an
1143 * ILLEGAL REQUEST sense code, and yet others have even more special
1144 * failture modes (such as the GL641USB flash adapter, which goes loony
1145 * and sends corrupted CRCs). To work around this, and to bring our
1146 * behavior more in line with other OSes, we do a shorter inquiry,
1147 * covering all the SCSI-2 information, first, and then request more
1148 * data iff the "additional length" field indicates there is more.
1149 * - mycroft, 2003/10/16
1150 */
1151 scsipi_cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI2;
1152 error = scsipi_command(periph,
1153 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1154 (u_char *) inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI2,
1155 retries, 10000, NULL, XS_CTL_DATA_IN | flags);
1156 if (!error && inqbuf->additional_length > SCSIPI_INQUIRY_LENGTH_SCSI2 - 4) {
1157 scsipi_cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI3;
1158 error = scsipi_command(periph,
1159 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1160 (u_char *) inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI3,
1161 retries, 10000, NULL, XS_CTL_DATA_IN | flags);
1162 }
1163
1164 #ifdef SCSI_OLD_NOINQUIRY
1165 /*
1166 * Kludge for the Adaptec ACB-4000 SCSI->MFM translator.
1167 * This board doesn't support the INQUIRY command at all.
1168 */
1169 if (error == EINVAL || error == EACCES) {
1170 /*
1171 * Conjure up an INQUIRY response.
1172 */
1173 inqbuf->device = (error == EINVAL ?
1174 SID_QUAL_LU_PRESENT :
1175 SID_QUAL_LU_NOTPRESENT) | T_DIRECT;
1176 inqbuf->dev_qual2 = 0;
1177 inqbuf->version = 0;
1178 inqbuf->response_format = SID_FORMAT_SCSI1;
1179 inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
1180 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1181 memcpy(inqbuf->vendor, "ADAPTEC ACB-4000 ", 28);
1182 error = 0;
1183 }
1184
1185 /*
1186 * Kludge for the Emulex MT-02 SCSI->QIC translator.
1187 * This board gives an empty response to an INQUIRY command.
1188 */
1189 else if (error == 0 &&
1190 inqbuf->device == (SID_QUAL_LU_PRESENT | T_DIRECT) &&
1191 inqbuf->dev_qual2 == 0 &&
1192 inqbuf->version == 0 &&
1193 inqbuf->response_format == SID_FORMAT_SCSI1) {
1194 /*
1195 * Fill out the INQUIRY response.
1196 */
1197 inqbuf->device = (SID_QUAL_LU_PRESENT | T_SEQUENTIAL);
1198 inqbuf->dev_qual2 = SID_REMOVABLE;
1199 inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
1200 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1201 memcpy(inqbuf->vendor, "EMULEX MT-02 QIC ", 28);
1202 }
1203 #endif /* SCSI_OLD_NOINQUIRY */
1204
1205 return error;
1206 }
1207
1208 /*
1209 * scsipi_prevent:
1210 *
1211 * Prevent or allow the user to remove the media
1212 */
1213 int
1214 scsipi_prevent(periph, type, flags)
1215 struct scsipi_periph *periph;
1216 int type, flags;
1217 {
1218 struct scsipi_prevent scsipi_cmd;
1219
1220 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1221 scsipi_cmd.opcode = PREVENT_ALLOW;
1222 scsipi_cmd.how = type;
1223
1224 return (scsipi_command(periph,
1225 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1226 0, 0, SCSIPIRETRIES, 5000, NULL, flags));
1227 }
1228
1229 /*
1230 * scsipi_start:
1231 *
1232 * Send a START UNIT.
1233 */
1234 int
1235 scsipi_start(periph, type, flags)
1236 struct scsipi_periph *periph;
1237 int type, flags;
1238 {
1239 struct scsipi_start_stop scsipi_cmd;
1240
1241 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1242 scsipi_cmd.opcode = START_STOP;
1243 scsipi_cmd.byte2 = 0x00;
1244 scsipi_cmd.how = type;
1245
1246 return (scsipi_command(periph,
1247 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1248 0, 0, SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000,
1249 NULL, flags));
1250 }
1251
1252 /*
1253 * scsipi_mode_sense, scsipi_mode_sense_big:
1254 * get a sense page from a device
1255 */
1256
1257 int
1258 scsipi_mode_sense(periph, byte2, page, data, len, flags, retries, timeout)
1259 struct scsipi_periph *periph;
1260 int byte2, page, len, flags, retries, timeout;
1261 struct scsipi_mode_header *data;
1262 {
1263 struct scsipi_mode_sense scsipi_cmd;
1264 int error;
1265
1266 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1267 scsipi_cmd.opcode = MODE_SENSE;
1268 scsipi_cmd.byte2 = byte2;
1269 scsipi_cmd.page = page;
1270 scsipi_cmd.length = len & 0xff;
1271 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1272 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1273 flags | XS_CTL_DATA_IN);
1274 SC_DEBUG(periph, SCSIPI_DB2,
1275 ("scsipi_mode_sense: error=%d\n", error));
1276 return (error);
1277 }
1278
1279 int
1280 scsipi_mode_sense_big(periph, byte2, page, data, len, flags, retries, timeout)
1281 struct scsipi_periph *periph;
1282 int byte2, page, len, flags, retries, timeout;
1283 struct scsipi_mode_header_big *data;
1284 {
1285 struct scsipi_mode_sense_big scsipi_cmd;
1286 int error;
1287
1288 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1289 scsipi_cmd.opcode = MODE_SENSE_BIG;
1290 scsipi_cmd.byte2 = byte2;
1291 scsipi_cmd.page = page;
1292 _lto2b(len, scsipi_cmd.length);
1293 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1294 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1295 flags | XS_CTL_DATA_IN);
1296 SC_DEBUG(periph, SCSIPI_DB2,
1297 ("scsipi_mode_sense_big: error=%d\n", error));
1298 return (error);
1299 }
1300
1301 int
1302 scsipi_mode_select(periph, byte2, data, len, flags, retries, timeout)
1303 struct scsipi_periph *periph;
1304 int byte2, len, flags, retries, timeout;
1305 struct scsipi_mode_header *data;
1306 {
1307 struct scsipi_mode_select scsipi_cmd;
1308 int error;
1309
1310 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1311 scsipi_cmd.opcode = MODE_SELECT;
1312 scsipi_cmd.byte2 = byte2;
1313 scsipi_cmd.length = len & 0xff;
1314 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1315 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1316 flags | XS_CTL_DATA_OUT);
1317 SC_DEBUG(periph, SCSIPI_DB2,
1318 ("scsipi_mode_select: error=%d\n", error));
1319 return (error);
1320 }
1321
1322 int
1323 scsipi_mode_select_big(periph, byte2, data, len, flags, retries, timeout)
1324 struct scsipi_periph *periph;
1325 int byte2, len, flags, retries, timeout;
1326 struct scsipi_mode_header_big *data;
1327 {
1328 struct scsipi_mode_select_big scsipi_cmd;
1329 int error;
1330
1331 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1332 scsipi_cmd.opcode = MODE_SELECT_BIG;
1333 scsipi_cmd.byte2 = byte2;
1334 _lto2b(len, scsipi_cmd.length);
1335 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1336 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1337 flags | XS_CTL_DATA_OUT);
1338 SC_DEBUG(periph, SCSIPI_DB2,
1339 ("scsipi_mode_select: error=%d\n", error));
1340 return (error);
1341 }
1342
1343 /*
1344 * scsipi_done:
1345 *
1346 * This routine is called by an adapter's interrupt handler when
1347 * an xfer is completed.
1348 */
1349 void
1350 scsipi_done(xs)
1351 struct scsipi_xfer *xs;
1352 {
1353 struct scsipi_periph *periph = xs->xs_periph;
1354 struct scsipi_channel *chan = periph->periph_channel;
1355 int s, freezecnt;
1356
1357 SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n"));
1358 #ifdef SCSIPI_DEBUG
1359 if (periph->periph_dbflags & SCSIPI_DB1)
1360 show_scsipi_cmd(xs);
1361 #endif
1362
1363 s = splbio();
1364 /*
1365 * The resource this command was using is now free.
1366 */
1367 scsipi_put_resource(chan);
1368 xs->xs_periph->periph_sent--;
1369
1370 /*
1371 * If the command was tagged, free the tag.
1372 */
1373 if (XS_CTL_TAGTYPE(xs) != 0)
1374 scsipi_put_tag(xs);
1375 else
1376 periph->periph_flags &= ~PERIPH_UNTAG;
1377
1378 /* Mark the command as `done'. */
1379 xs->xs_status |= XS_STS_DONE;
1380
1381 #ifdef DIAGNOSTIC
1382 if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
1383 (XS_CTL_ASYNC|XS_CTL_POLL))
1384 panic("scsipi_done: ASYNC and POLL");
1385 #endif
1386
1387 /*
1388 * If the xfer had an error of any sort, freeze the
1389 * periph's queue. Freeze it again if we were requested
1390 * to do so in the xfer.
1391 */
1392 freezecnt = 0;
1393 if (xs->error != XS_NOERROR)
1394 freezecnt++;
1395 if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
1396 freezecnt++;
1397 if (freezecnt != 0)
1398 scsipi_periph_freeze(periph, freezecnt);
1399
1400 /*
1401 * record the xfer with a pending sense, in case a SCSI reset is
1402 * received before the thread is waked up.
1403 */
1404 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1405 periph->periph_flags |= PERIPH_SENSE;
1406 periph->periph_xscheck = xs;
1407 }
1408
1409 /*
1410 * If this was an xfer that was not to complete asynchronously,
1411 * let the requesting thread perform error checking/handling
1412 * in its context.
1413 */
1414 if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
1415 splx(s);
1416 /*
1417 * If it's a polling job, just return, to unwind the
1418 * call graph. We don't need to restart the queue,
1419 * because pollings jobs are treated specially, and
1420 * are really only used during crash dumps anyway
1421 * (XXX or during boot-time autconfiguration of
1422 * ATAPI devices).
1423 */
1424 if (xs->xs_control & XS_CTL_POLL)
1425 return;
1426 wakeup(xs);
1427 goto out;
1428 }
1429
1430 /*
1431 * Catch the extremely common case of I/O completing
1432 * without error; no use in taking a context switch
1433 * if we can handle it in interrupt context.
1434 */
1435 if (xs->error == XS_NOERROR) {
1436 splx(s);
1437 (void) scsipi_complete(xs);
1438 goto out;
1439 }
1440
1441 /*
1442 * There is an error on this xfer. Put it on the channel's
1443 * completion queue, and wake up the completion thread.
1444 */
1445 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
1446 splx(s);
1447 wakeup(&chan->chan_complete);
1448
1449 out:
1450 /*
1451 * If there are more xfers on the channel's queue, attempt to
1452 * run them.
1453 */
1454 scsipi_run_queue(chan);
1455 }
1456
1457 /*
1458 * scsipi_complete:
1459 *
1460 * Completion of a scsipi_xfer. This is the guts of scsipi_done().
1461 *
1462 * NOTE: This routine MUST be called with valid thread context
1463 * except for the case where the following two conditions are
1464 * true:
1465 *
1466 * xs->error == XS_NOERROR
1467 * XS_CTL_ASYNC is set in xs->xs_control
1468 *
1469 * The semantics of this routine can be tricky, so here is an
1470 * explanation:
1471 *
1472 * 0 Xfer completed successfully.
1473 *
1474 * ERESTART Xfer had an error, but was restarted.
1475 *
1476 * anything else Xfer had an error, return value is Unix
1477 * errno.
1478 *
1479 * If the return value is anything but ERESTART:
1480 *
1481 * - If XS_CTL_ASYNC is set, `xs' has been freed back to
1482 * the pool.
1483 * - If there is a buf associated with the xfer,
1484 * it has been biodone()'d.
1485 */
1486 int
1487 scsipi_complete(xs)
1488 struct scsipi_xfer *xs;
1489 {
1490 struct scsipi_periph *periph = xs->xs_periph;
1491 struct scsipi_channel *chan = periph->periph_channel;
1492 struct buf *bp;
1493 int error, s;
1494
1495 #ifdef DIAGNOSTIC
1496 if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
1497 panic("scsipi_complete: XS_CTL_ASYNC but no buf");
1498 #endif
1499 /*
1500 * If command terminated with a CHECK CONDITION, we need to issue a
1501 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed
1502 * we'll have the real status.
1503 * Must be processed at splbio() to avoid missing a SCSI bus reset
1504 * for this command.
1505 */
1506 s = splbio();
1507 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1508 /* request sense for a request sense ? */
1509 if (xs->xs_control & XS_CTL_REQSENSE) {
1510 scsipi_printaddr(periph);
1511 printf("request sense for a request sense ?\n");
1512 /* XXX maybe we should reset the device ? */
1513 /* we've been frozen because xs->error != XS_NOERROR */
1514 scsipi_periph_thaw(periph, 1);
1515 splx(s);
1516 if (xs->resid < xs->datalen) {
1517 printf("we read %d bytes of sense anyway:\n",
1518 xs->datalen - xs->resid);
1519 #ifdef SCSIVERBOSE
1520 scsipi_print_sense_data((void *)xs->data, 0);
1521 #endif
1522 }
1523 return EINVAL;
1524 }
1525 scsipi_request_sense(xs);
1526 }
1527 splx(s);
1528
1529 /*
1530 * If it's a user level request, bypass all usual completion
1531 * processing, let the user work it out..
1532 */
1533 if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
1534 SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n"));
1535 if (xs->error != XS_NOERROR)
1536 scsipi_periph_thaw(periph, 1);
1537 scsipi_user_done(xs);
1538 SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n "));
1539 return 0;
1540 }
1541
1542 switch (xs->error) {
1543 case XS_NOERROR:
1544 error = 0;
1545 break;
1546
1547 case XS_SENSE:
1548 case XS_SHORTSENSE:
1549 error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
1550 break;
1551
1552 case XS_RESOURCE_SHORTAGE:
1553 /*
1554 * XXX Should freeze channel's queue.
1555 */
1556 scsipi_printaddr(periph);
1557 printf("adapter resource shortage\n");
1558 /* FALLTHROUGH */
1559
1560 case XS_BUSY:
1561 if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
1562 struct scsipi_max_openings mo;
1563
1564 /*
1565 * We set the openings to active - 1, assuming that
1566 * the command that got us here is the first one that
1567 * can't fit into the device's queue. If that's not
1568 * the case, I guess we'll find out soon enough.
1569 */
1570 mo.mo_target = periph->periph_target;
1571 mo.mo_lun = periph->periph_lun;
1572 if (periph->periph_active < periph->periph_openings)
1573 mo.mo_openings = periph->periph_active - 1;
1574 else
1575 mo.mo_openings = periph->periph_openings - 1;
1576 #ifdef DIAGNOSTIC
1577 if (mo.mo_openings < 0) {
1578 scsipi_printaddr(periph);
1579 printf("QUEUE FULL resulted in < 0 openings\n");
1580 panic("scsipi_done");
1581 }
1582 #endif
1583 if (mo.mo_openings == 0) {
1584 scsipi_printaddr(periph);
1585 printf("QUEUE FULL resulted in 0 openings\n");
1586 mo.mo_openings = 1;
1587 }
1588 scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
1589 error = ERESTART;
1590 } else if (xs->xs_retries != 0) {
1591 xs->xs_retries--;
1592 /*
1593 * Wait one second, and try again.
1594 */
1595 if ((xs->xs_control & XS_CTL_POLL) ||
1596 (chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
1597 delay(1000000);
1598 } else if (!callout_pending(&periph->periph_callout)) {
1599 scsipi_periph_freeze(periph, 1);
1600 callout_reset(&periph->periph_callout,
1601 hz, scsipi_periph_timed_thaw, periph);
1602 }
1603 error = ERESTART;
1604 } else
1605 error = EBUSY;
1606 break;
1607
1608 case XS_REQUEUE:
1609 error = ERESTART;
1610 break;
1611
1612 case XS_SELTIMEOUT:
1613 case XS_TIMEOUT:
1614 /*
1615 * If the device hasn't gone away, honor retry counts.
1616 *
1617 * Note that if we're in the middle of probing it,
1618 * it won't be found because it isn't here yet so
1619 * we won't honor the retry count in that case.
1620 */
1621 if (scsipi_lookup_periph(chan, periph->periph_target,
1622 periph->periph_lun) && xs->xs_retries != 0) {
1623 xs->xs_retries--;
1624 error = ERESTART;
1625 } else
1626 error = EIO;
1627 break;
1628
1629 case XS_RESET:
1630 if (xs->xs_control & XS_CTL_REQSENSE) {
1631 /*
1632 * request sense interrupted by reset: signal it
1633 * with EINTR return code.
1634 */
1635 error = EINTR;
1636 } else {
1637 if (xs->xs_retries != 0) {
1638 xs->xs_retries--;
1639 error = ERESTART;
1640 } else
1641 error = EIO;
1642 }
1643 break;
1644
1645 case XS_DRIVER_STUFFUP:
1646 scsipi_printaddr(periph);
1647 printf("generic HBA error\n");
1648 error = EIO;
1649 break;
1650 default:
1651 scsipi_printaddr(periph);
1652 printf("invalid return code from adapter: %d\n", xs->error);
1653 error = EIO;
1654 break;
1655 }
1656
1657 s = splbio();
1658 if (error == ERESTART) {
1659 /*
1660 * If we get here, the periph has been thawed and frozen
1661 * again if we had to issue recovery commands. Alternatively,
1662 * it may have been frozen again and in a timed thaw. In
1663 * any case, we thaw the periph once we re-enqueue the
1664 * command. Once the periph is fully thawed, it will begin
1665 * operation again.
1666 */
1667 xs->error = XS_NOERROR;
1668 xs->status = SCSI_OK;
1669 xs->xs_status &= ~XS_STS_DONE;
1670 xs->xs_requeuecnt++;
1671 error = scsipi_enqueue(xs);
1672 if (error == 0) {
1673 scsipi_periph_thaw(periph, 1);
1674 splx(s);
1675 return (ERESTART);
1676 }
1677 }
1678
1679 /*
1680 * scsipi_done() freezes the queue if not XS_NOERROR.
1681 * Thaw it here.
1682 */
1683 if (xs->error != XS_NOERROR)
1684 scsipi_periph_thaw(periph, 1);
1685
1686 /*
1687 * Set buffer fields in case the periph
1688 * switch done func uses them
1689 */
1690 if ((bp = xs->bp) != NULL) {
1691 if (error) {
1692 bp->b_error = error;
1693 bp->b_flags |= B_ERROR;
1694 bp->b_resid = bp->b_bcount;
1695 } else {
1696 bp->b_error = 0;
1697 bp->b_resid = xs->resid;
1698 }
1699 }
1700
1701 if (periph->periph_switch->psw_done)
1702 periph->periph_switch->psw_done(xs);
1703
1704 if (bp)
1705 biodone(bp);
1706
1707 if (xs->xs_control & XS_CTL_ASYNC)
1708 scsipi_put_xs(xs);
1709 splx(s);
1710
1711 return (error);
1712 }
1713
1714 /*
1715 * Issue a request sense for the given scsipi_xfer. Called when the xfer
1716 * returns with a CHECK_CONDITION status. Must be called in valid thread
1717 * context and at splbio().
1718 */
1719
1720 void
1721 scsipi_request_sense(xs)
1722 struct scsipi_xfer *xs;
1723 {
1724 struct scsipi_periph *periph = xs->xs_periph;
1725 int flags, error;
1726 struct scsipi_sense cmd;
1727
1728 periph->periph_flags |= PERIPH_SENSE;
1729
1730 /* if command was polling, request sense will too */
1731 flags = xs->xs_control & XS_CTL_POLL;
1732 /* Polling commands can't sleep */
1733 if (flags)
1734 flags |= XS_CTL_NOSLEEP;
1735
1736 flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN |
1737 XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH;
1738
1739 memset(&cmd, 0, sizeof(cmd));
1740 cmd.opcode = REQUEST_SENSE;
1741 cmd.length = sizeof(struct scsipi_sense_data);
1742
1743 error = scsipi_command(periph,
1744 (struct scsipi_generic *) &cmd, sizeof(cmd),
1745 (u_char*)&xs->sense.scsi_sense, sizeof(struct scsipi_sense_data),
1746 0, 1000, NULL, flags);
1747 periph->periph_flags &= ~PERIPH_SENSE;
1748 periph->periph_xscheck = NULL;
1749 switch(error) {
1750 case 0:
1751 /* we have a valid sense */
1752 xs->error = XS_SENSE;
1753 return;
1754 case EINTR:
1755 /* REQUEST_SENSE interrupted by bus reset. */
1756 xs->error = XS_RESET;
1757 return;
1758 case EIO:
1759 /* request sense coudn't be performed */
1760 /*
1761 * XXX this isn't quite right but we don't have anything
1762 * better for now
1763 */
1764 xs->error = XS_DRIVER_STUFFUP;
1765 return;
1766 default:
1767 /* Notify that request sense failed. */
1768 xs->error = XS_DRIVER_STUFFUP;
1769 scsipi_printaddr(periph);
1770 printf("request sense failed with error %d\n", error);
1771 return;
1772 }
1773 }
1774
1775 /*
1776 * scsipi_enqueue:
1777 *
1778 * Enqueue an xfer on a channel.
1779 */
1780 int
1781 scsipi_enqueue(xs)
1782 struct scsipi_xfer *xs;
1783 {
1784 struct scsipi_channel *chan = xs->xs_periph->periph_channel;
1785 struct scsipi_xfer *qxs;
1786 int s;
1787
1788 s = splbio();
1789
1790 /*
1791 * If the xfer is to be polled, and there are already jobs on
1792 * the queue, we can't proceed.
1793 */
1794 if ((xs->xs_control & XS_CTL_POLL) != 0 &&
1795 TAILQ_FIRST(&chan->chan_queue) != NULL) {
1796 splx(s);
1797 xs->error = XS_DRIVER_STUFFUP;
1798 return (EAGAIN);
1799 }
1800
1801 /*
1802 * If we have an URGENT xfer, it's an error recovery command
1803 * and it should just go on the head of the channel's queue.
1804 */
1805 if (xs->xs_control & XS_CTL_URGENT) {
1806 TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
1807 goto out;
1808 }
1809
1810 /*
1811 * If this xfer has already been on the queue before, we
1812 * need to reinsert it in the correct order. That order is:
1813 *
1814 * Immediately before the first xfer for this periph
1815 * with a requeuecnt less than xs->xs_requeuecnt.
1816 *
1817 * Failing that, at the end of the queue. (We'll end up
1818 * there naturally.)
1819 */
1820 if (xs->xs_requeuecnt != 0) {
1821 for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
1822 qxs = TAILQ_NEXT(qxs, channel_q)) {
1823 if (qxs->xs_periph == xs->xs_periph &&
1824 qxs->xs_requeuecnt < xs->xs_requeuecnt)
1825 break;
1826 }
1827 if (qxs != NULL) {
1828 TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
1829 channel_q);
1830 goto out;
1831 }
1832 }
1833 TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
1834 out:
1835 if (xs->xs_control & XS_CTL_THAW_PERIPH)
1836 scsipi_periph_thaw(xs->xs_periph, 1);
1837 splx(s);
1838 return (0);
1839 }
1840
1841 /*
1842 * scsipi_run_queue:
1843 *
1844 * Start as many xfers as possible running on the channel.
1845 */
1846 void
1847 scsipi_run_queue(chan)
1848 struct scsipi_channel *chan;
1849 {
1850 struct scsipi_xfer *xs;
1851 struct scsipi_periph *periph;
1852 int s;
1853
1854 for (;;) {
1855 s = splbio();
1856
1857 /*
1858 * If the channel is frozen, we can't do any work right
1859 * now.
1860 */
1861 if (chan->chan_qfreeze != 0) {
1862 splx(s);
1863 return;
1864 }
1865
1866 /*
1867 * Look for work to do, and make sure we can do it.
1868 */
1869 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
1870 xs = TAILQ_NEXT(xs, channel_q)) {
1871 periph = xs->xs_periph;
1872
1873 if ((periph->periph_sent >= periph->periph_openings) ||
1874 periph->periph_qfreeze != 0 ||
1875 (periph->periph_flags & PERIPH_UNTAG) != 0)
1876 continue;
1877
1878 if ((periph->periph_flags &
1879 (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 &&
1880 (xs->xs_control & XS_CTL_URGENT) == 0)
1881 continue;
1882
1883 /*
1884 * We can issue this xfer!
1885 */
1886 goto got_one;
1887 }
1888
1889 /*
1890 * Can't find any work to do right now.
1891 */
1892 splx(s);
1893 return;
1894
1895 got_one:
1896 /*
1897 * Have an xfer to run. Allocate a resource from
1898 * the adapter to run it. If we can't allocate that
1899 * resource, we don't dequeue the xfer.
1900 */
1901 if (scsipi_get_resource(chan) == 0) {
1902 /*
1903 * Adapter is out of resources. If the adapter
1904 * supports it, attempt to grow them.
1905 */
1906 if (scsipi_grow_resources(chan) == 0) {
1907 /*
1908 * Wasn't able to grow resources,
1909 * nothing more we can do.
1910 */
1911 if (xs->xs_control & XS_CTL_POLL) {
1912 scsipi_printaddr(xs->xs_periph);
1913 printf("polling command but no "
1914 "adapter resources");
1915 /* We'll panic shortly... */
1916 }
1917 splx(s);
1918
1919 /*
1920 * XXX: We should be able to note that
1921 * XXX: that resources are needed here!
1922 */
1923 return;
1924 }
1925 /*
1926 * scsipi_grow_resources() allocated the resource
1927 * for us.
1928 */
1929 }
1930
1931 /*
1932 * We have a resource to run this xfer, do it!
1933 */
1934 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
1935
1936 /*
1937 * If the command is to be tagged, allocate a tag ID
1938 * for it.
1939 */
1940 if (XS_CTL_TAGTYPE(xs) != 0)
1941 scsipi_get_tag(xs);
1942 else
1943 periph->periph_flags |= PERIPH_UNTAG;
1944 periph->periph_sent++;
1945 splx(s);
1946
1947 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
1948 }
1949 #ifdef DIAGNOSTIC
1950 panic("scsipi_run_queue: impossible");
1951 #endif
1952 }
1953
1954 /*
1955 * scsipi_execute_xs:
1956 *
1957 * Begin execution of an xfer, waiting for it to complete, if necessary.
1958 */
1959 int
1960 scsipi_execute_xs(xs)
1961 struct scsipi_xfer *xs;
1962 {
1963 struct scsipi_periph *periph = xs->xs_periph;
1964 struct scsipi_channel *chan = periph->periph_channel;
1965 int oasync, async, poll, retries, error, s;
1966
1967 xs->xs_status &= ~XS_STS_DONE;
1968 xs->error = XS_NOERROR;
1969 xs->resid = xs->datalen;
1970 xs->status = SCSI_OK;
1971
1972 #ifdef SCSIPI_DEBUG
1973 if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) {
1974 printf("scsipi_execute_xs: ");
1975 show_scsipi_xs(xs);
1976 printf("\n");
1977 }
1978 #endif
1979
1980 /*
1981 * Deal with command tagging:
1982 *
1983 * - If the device's current operating mode doesn't
1984 * include tagged queueing, clear the tag mask.
1985 *
1986 * - If the device's current operating mode *does*
1987 * include tagged queueing, set the tag_type in
1988 * the xfer to the appropriate byte for the tag
1989 * message.
1990 */
1991 if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 ||
1992 (xs->xs_control & XS_CTL_REQSENSE)) {
1993 xs->xs_control &= ~XS_CTL_TAGMASK;
1994 xs->xs_tag_type = 0;
1995 } else {
1996 /*
1997 * If the request doesn't specify a tag, give Head
1998 * tags to URGENT operations and Ordered tags to
1999 * everything else.
2000 */
2001 if (XS_CTL_TAGTYPE(xs) == 0) {
2002 if (xs->xs_control & XS_CTL_URGENT)
2003 xs->xs_control |= XS_CTL_HEAD_TAG;
2004 else
2005 xs->xs_control |= XS_CTL_ORDERED_TAG;
2006 }
2007
2008 switch (XS_CTL_TAGTYPE(xs)) {
2009 case XS_CTL_ORDERED_TAG:
2010 xs->xs_tag_type = MSG_ORDERED_Q_TAG;
2011 break;
2012
2013 case XS_CTL_SIMPLE_TAG:
2014 xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
2015 break;
2016
2017 case XS_CTL_HEAD_TAG:
2018 xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
2019 break;
2020
2021 default:
2022 scsipi_printaddr(periph);
2023 printf("invalid tag mask 0x%08x\n",
2024 XS_CTL_TAGTYPE(xs));
2025 panic("scsipi_execute_xs");
2026 }
2027 }
2028
2029 /* If the adaptor wants us to poll, poll. */
2030 if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY)
2031 xs->xs_control |= XS_CTL_POLL;
2032
2033 /*
2034 * If we don't yet have a completion thread, or we are to poll for
2035 * completion, clear the ASYNC flag.
2036 */
2037 oasync = (xs->xs_control & XS_CTL_ASYNC);
2038 if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
2039 xs->xs_control &= ~XS_CTL_ASYNC;
2040
2041 async = (xs->xs_control & XS_CTL_ASYNC);
2042 poll = (xs->xs_control & XS_CTL_POLL);
2043 retries = xs->xs_retries; /* for polling commands */
2044
2045 #ifdef DIAGNOSTIC
2046 if (oasync != 0 && xs->bp == NULL)
2047 panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
2048 #endif
2049
2050 /*
2051 * Enqueue the transfer. If we're not polling for completion, this
2052 * should ALWAYS return `no error'.
2053 */
2054 try_again:
2055 error = scsipi_enqueue(xs);
2056 if (error) {
2057 if (poll == 0) {
2058 scsipi_printaddr(periph);
2059 printf("not polling, but enqueue failed with %d\n",
2060 error);
2061 panic("scsipi_execute_xs");
2062 }
2063
2064 scsipi_printaddr(periph);
2065 printf("failed to enqueue polling command");
2066 if (retries != 0) {
2067 printf(", retrying...\n");
2068 delay(1000000);
2069 retries--;
2070 goto try_again;
2071 }
2072 printf("\n");
2073 goto free_xs;
2074 }
2075
2076 restarted:
2077 scsipi_run_queue(chan);
2078
2079 /*
2080 * The xfer is enqueued, and possibly running. If it's to be
2081 * completed asynchronously, just return now.
2082 */
2083 if (async)
2084 return (EJUSTRETURN);
2085
2086 /*
2087 * Not an asynchronous command; wait for it to complete.
2088 */
2089 s = splbio();
2090 while ((xs->xs_status & XS_STS_DONE) == 0) {
2091 if (poll) {
2092 scsipi_printaddr(periph);
2093 printf("polling command not done\n");
2094 panic("scsipi_execute_xs");
2095 }
2096 (void) tsleep(xs, PRIBIO, "xscmd", 0);
2097 }
2098 splx(s);
2099
2100 /*
2101 * Command is complete. scsipi_done() has awakened us to perform
2102 * the error handling.
2103 */
2104 error = scsipi_complete(xs);
2105 if (error == ERESTART)
2106 goto restarted;
2107
2108 /*
2109 * If it was meant to run async and we cleared aync ourselve,
2110 * don't return an error here. It has already been handled
2111 */
2112 if (oasync)
2113 error = EJUSTRETURN;
2114 /*
2115 * Command completed successfully or fatal error occurred. Fall
2116 * into....
2117 */
2118 free_xs:
2119 s = splbio();
2120 scsipi_put_xs(xs);
2121 splx(s);
2122
2123 /*
2124 * Kick the queue, keep it running in case it stopped for some
2125 * reason.
2126 */
2127 scsipi_run_queue(chan);
2128
2129 return (error);
2130 }
2131
2132 /*
2133 * scsipi_completion_thread:
2134 *
2135 * This is the completion thread. We wait for errors on
2136 * asynchronous xfers, and perform the error handling
2137 * function, restarting the command, if necessary.
2138 */
2139 void
2140 scsipi_completion_thread(arg)
2141 void *arg;
2142 {
2143 struct scsipi_channel *chan = arg;
2144 struct scsipi_xfer *xs;
2145 int s;
2146
2147 if (chan->chan_init_cb)
2148 (*chan->chan_init_cb)(chan, chan->chan_init_cb_arg);
2149
2150 s = splbio();
2151 chan->chan_flags |= SCSIPI_CHAN_TACTIVE;
2152 splx(s);
2153 for (;;) {
2154 s = splbio();
2155 xs = TAILQ_FIRST(&chan->chan_complete);
2156 if (xs == NULL && chan->chan_tflags == 0) {
2157 /* nothing to do; wait */
2158 (void) tsleep(&chan->chan_complete, PRIBIO,
2159 "sccomp", 0);
2160 splx(s);
2161 continue;
2162 }
2163 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2164 /* call chan_callback from thread context */
2165 chan->chan_tflags &= ~SCSIPI_CHANT_CALLBACK;
2166 chan->chan_callback(chan, chan->chan_callback_arg);
2167 splx(s);
2168 continue;
2169 }
2170 if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) {
2171 /* attempt to get more openings for this channel */
2172 chan->chan_tflags &= ~SCSIPI_CHANT_GROWRES;
2173 scsipi_adapter_request(chan,
2174 ADAPTER_REQ_GROW_RESOURCES, NULL);
2175 scsipi_channel_thaw(chan, 1);
2176 splx(s);
2177 continue;
2178 }
2179 if (chan->chan_tflags & SCSIPI_CHANT_KICK) {
2180 /* explicitly run the queues for this channel */
2181 chan->chan_tflags &= ~SCSIPI_CHANT_KICK;
2182 scsipi_run_queue(chan);
2183 splx(s);
2184 continue;
2185 }
2186 if (chan->chan_tflags & SCSIPI_CHANT_SHUTDOWN) {
2187 splx(s);
2188 break;
2189 }
2190 if (xs) {
2191 TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
2192 splx(s);
2193
2194 /*
2195 * Have an xfer with an error; process it.
2196 */
2197 (void) scsipi_complete(xs);
2198
2199 /*
2200 * Kick the queue; keep it running if it was stopped
2201 * for some reason.
2202 */
2203 scsipi_run_queue(chan);
2204 } else {
2205 splx(s);
2206 }
2207 }
2208
2209 chan->chan_thread = NULL;
2210
2211 /* In case parent is waiting for us to exit. */
2212 wakeup(&chan->chan_thread);
2213
2214 kthread_exit(0);
2215 }
2216
2217 /*
2218 * scsipi_create_completion_thread:
2219 *
2220 * Callback to actually create the completion thread.
2221 */
2222 void
2223 scsipi_create_completion_thread(arg)
2224 void *arg;
2225 {
2226 struct scsipi_channel *chan = arg;
2227 struct scsipi_adapter *adapt = chan->chan_adapter;
2228
2229 if (kthread_create1(scsipi_completion_thread, chan,
2230 &chan->chan_thread, "%s", chan->chan_name)) {
2231 printf("%s: unable to create completion thread for "
2232 "channel %d\n", adapt->adapt_dev->dv_xname,
2233 chan->chan_channel);
2234 panic("scsipi_create_completion_thread");
2235 }
2236 }
2237
2238 /*
2239 * scsipi_thread_call_callback:
2240 *
2241 * request to call a callback from the completion thread
2242 */
2243 int
2244 scsipi_thread_call_callback(chan, callback, arg)
2245 struct scsipi_channel *chan;
2246 void (*callback) __P((struct scsipi_channel *, void *));
2247 void *arg;
2248 {
2249 int s;
2250
2251 s = splbio();
2252 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
2253 /* kernel thread doesn't exist yet */
2254 splx(s);
2255 return ESRCH;
2256 }
2257 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2258 splx(s);
2259 return EBUSY;
2260 }
2261 scsipi_channel_freeze(chan, 1);
2262 chan->chan_callback = callback;
2263 chan->chan_callback_arg = arg;
2264 chan->chan_tflags |= SCSIPI_CHANT_CALLBACK;
2265 wakeup(&chan->chan_complete);
2266 splx(s);
2267 return(0);
2268 }
2269
2270 /*
2271 * scsipi_async_event:
2272 *
2273 * Handle an asynchronous event from an adapter.
2274 */
2275 void
2276 scsipi_async_event(chan, event, arg)
2277 struct scsipi_channel *chan;
2278 scsipi_async_event_t event;
2279 void *arg;
2280 {
2281 int s;
2282
2283 s = splbio();
2284 switch (event) {
2285 case ASYNC_EVENT_MAX_OPENINGS:
2286 scsipi_async_event_max_openings(chan,
2287 (struct scsipi_max_openings *)arg);
2288 break;
2289
2290 case ASYNC_EVENT_XFER_MODE:
2291 scsipi_async_event_xfer_mode(chan,
2292 (struct scsipi_xfer_mode *)arg);
2293 break;
2294 case ASYNC_EVENT_RESET:
2295 scsipi_async_event_channel_reset(chan);
2296 break;
2297 }
2298 splx(s);
2299 }
2300
2301 /*
2302 * scsipi_print_xfer_mode:
2303 *
2304 * Print a periph's capabilities.
2305 */
2306 void
2307 scsipi_print_xfer_mode(periph)
2308 struct scsipi_periph *periph;
2309 {
2310 int period, freq, speed, mbs;
2311
2312 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0)
2313 return;
2314
2315 aprint_normal("%s: ", periph->periph_dev->dv_xname);
2316 if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) {
2317 period = scsipi_sync_factor_to_period(periph->periph_period);
2318 aprint_normal("sync (%d.%02dns offset %d)",
2319 period / 100, period % 100, periph->periph_offset);
2320 } else
2321 aprint_normal("async");
2322
2323 if (periph->periph_mode & PERIPH_CAP_WIDE32)
2324 aprint_normal(", 32-bit");
2325 else if (periph->periph_mode & (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
2326 aprint_normal(", 16-bit");
2327 else
2328 aprint_normal(", 8-bit");
2329
2330 if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) {
2331 freq = scsipi_sync_factor_to_freq(periph->periph_period);
2332 speed = freq;
2333 if (periph->periph_mode & PERIPH_CAP_WIDE32)
2334 speed *= 4;
2335 else if (periph->periph_mode &
2336 (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
2337 speed *= 2;
2338 mbs = speed / 1000;
2339 if (mbs > 0)
2340 aprint_normal(" (%d.%03dMB/s)", mbs, speed % 1000);
2341 else
2342 aprint_normal(" (%dKB/s)", speed % 1000);
2343 }
2344
2345 aprint_normal(" transfers");
2346
2347 if (periph->periph_mode & PERIPH_CAP_TQING)
2348 aprint_normal(", tagged queueing");
2349
2350 aprint_normal("\n");
2351 }
2352
2353 /*
2354 * scsipi_async_event_max_openings:
2355 *
2356 * Update the maximum number of outstanding commands a
2357 * device may have.
2358 */
2359 void
2360 scsipi_async_event_max_openings(chan, mo)
2361 struct scsipi_channel *chan;
2362 struct scsipi_max_openings *mo;
2363 {
2364 struct scsipi_periph *periph;
2365 int minlun, maxlun;
2366
2367 if (mo->mo_lun == -1) {
2368 /*
2369 * Wildcarded; apply it to all LUNs.
2370 */
2371 minlun = 0;
2372 maxlun = chan->chan_nluns - 1;
2373 } else
2374 minlun = maxlun = mo->mo_lun;
2375
2376 /* XXX This could really suck with a large LUN space. */
2377 for (; minlun <= maxlun; minlun++) {
2378 periph = scsipi_lookup_periph(chan, mo->mo_target, minlun);
2379 if (periph == NULL)
2380 continue;
2381
2382 if (mo->mo_openings < periph->periph_openings)
2383 periph->periph_openings = mo->mo_openings;
2384 else if (mo->mo_openings > periph->periph_openings &&
2385 (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
2386 periph->periph_openings = mo->mo_openings;
2387 }
2388 }
2389
2390 /*
2391 * scsipi_async_event_xfer_mode:
2392 *
2393 * Update the xfer mode for all periphs sharing the
2394 * specified I_T Nexus.
2395 */
2396 void
2397 scsipi_async_event_xfer_mode(chan, xm)
2398 struct scsipi_channel *chan;
2399 struct scsipi_xfer_mode *xm;
2400 {
2401 struct scsipi_periph *periph;
2402 int lun, announce, mode, period, offset;
2403
2404 for (lun = 0; lun < chan->chan_nluns; lun++) {
2405 periph = scsipi_lookup_periph(chan, xm->xm_target, lun);
2406 if (periph == NULL)
2407 continue;
2408 announce = 0;
2409
2410 /*
2411 * Clamp the xfer mode down to this periph's capabilities.
2412 */
2413 mode = xm->xm_mode & periph->periph_cap;
2414 if (mode & PERIPH_CAP_SYNC) {
2415 period = xm->xm_period;
2416 offset = xm->xm_offset;
2417 } else {
2418 period = 0;
2419 offset = 0;
2420 }
2421
2422 /*
2423 * If we do not have a valid xfer mode yet, or the parameters
2424 * are different, announce them.
2425 */
2426 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0 ||
2427 periph->periph_mode != mode ||
2428 periph->periph_period != period ||
2429 periph->periph_offset != offset)
2430 announce = 1;
2431
2432 periph->periph_mode = mode;
2433 periph->periph_period = period;
2434 periph->periph_offset = offset;
2435 periph->periph_flags |= PERIPH_MODE_VALID;
2436
2437 if (announce)
2438 scsipi_print_xfer_mode(periph);
2439 }
2440 }
2441
2442 /*
2443 * scsipi_set_xfer_mode:
2444 *
2445 * Set the xfer mode for the specified I_T Nexus.
2446 */
2447 void
2448 scsipi_set_xfer_mode(chan, target, immed)
2449 struct scsipi_channel *chan;
2450 int target, immed;
2451 {
2452 struct scsipi_xfer_mode xm;
2453 struct scsipi_periph *itperiph;
2454 int lun, s;
2455
2456 /*
2457 * Go to the minimal xfer mode.
2458 */
2459 xm.xm_target = target;
2460 xm.xm_mode = 0;
2461 xm.xm_period = 0; /* ignored */
2462 xm.xm_offset = 0; /* ignored */
2463
2464 /*
2465 * Find the first LUN we know about on this I_T Nexus.
2466 */
2467 for (itperiph = NULL, lun = 0; lun < chan->chan_nluns; lun++) {
2468 itperiph = scsipi_lookup_periph(chan, target, lun);
2469 if (itperiph != NULL)
2470 break;
2471 }
2472 if (itperiph != NULL) {
2473 xm.xm_mode = itperiph->periph_cap;
2474 /*
2475 * Now issue the request to the adapter.
2476 */
2477 s = splbio();
2478 scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm);
2479 splx(s);
2480 /*
2481 * If we want this to happen immediately, issue a dummy
2482 * command, since most adapters can't really negotiate unless
2483 * they're executing a job.
2484 */
2485 if (immed != 0) {
2486 (void) scsipi_test_unit_ready(itperiph,
2487 XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
2488 XS_CTL_IGNORE_NOT_READY |
2489 XS_CTL_IGNORE_MEDIA_CHANGE);
2490 }
2491 }
2492 }
2493
2494 /*
2495 * scsipi_channel_reset:
2496 *
2497 * handle scsi bus reset
2498 * called at splbio
2499 */
2500 void
2501 scsipi_async_event_channel_reset(chan)
2502 struct scsipi_channel *chan;
2503 {
2504 struct scsipi_xfer *xs, *xs_next;
2505 struct scsipi_periph *periph;
2506 int target, lun;
2507
2508 /*
2509 * Channel has been reset. Also mark as reset pending REQUEST_SENSE
2510 * commands; as the sense is not available any more.
2511 * can't call scsipi_done() from here, as the command has not been
2512 * sent to the adapter yet (this would corrupt accounting).
2513 */
2514
2515 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) {
2516 xs_next = TAILQ_NEXT(xs, channel_q);
2517 if (xs->xs_control & XS_CTL_REQSENSE) {
2518 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
2519 xs->error = XS_RESET;
2520 if ((xs->xs_control & XS_CTL_ASYNC) != 0)
2521 TAILQ_INSERT_TAIL(&chan->chan_complete, xs,
2522 channel_q);
2523 }
2524 }
2525 wakeup(&chan->chan_complete);
2526 /* Catch xs with pending sense which may not have a REQSENSE xs yet */
2527 for (target = 0; target < chan->chan_ntargets; target++) {
2528 if (target == chan->chan_id)
2529 continue;
2530 for (lun = 0; lun < chan->chan_nluns; lun++) {
2531 periph = scsipi_lookup_periph(chan, target, lun);
2532 if (periph) {
2533 xs = periph->periph_xscheck;
2534 if (xs)
2535 xs->error = XS_RESET;
2536 }
2537 }
2538 }
2539 }
2540
2541 /*
2542 * scsipi_target_detach:
2543 *
2544 * detach all periph associated with a I_T
2545 * must be called from valid thread context
2546 */
2547 int
2548 scsipi_target_detach(chan, target, lun, flags)
2549 struct scsipi_channel *chan;
2550 int target, lun;
2551 int flags;
2552 {
2553 struct scsipi_periph *periph;
2554 int ctarget, mintarget, maxtarget;
2555 int clun, minlun, maxlun;
2556 int error;
2557
2558 if (target == -1) {
2559 mintarget = 0;
2560 maxtarget = chan->chan_ntargets;
2561 } else {
2562 if (target == chan->chan_id)
2563 return EINVAL;
2564 if (target < 0 || target >= chan->chan_ntargets)
2565 return EINVAL;
2566 mintarget = target;
2567 maxtarget = target + 1;
2568 }
2569
2570 if (lun == -1) {
2571 minlun = 0;
2572 maxlun = chan->chan_nluns;
2573 } else {
2574 if (lun < 0 || lun >= chan->chan_nluns)
2575 return EINVAL;
2576 minlun = lun;
2577 maxlun = lun + 1;
2578 }
2579
2580 for (ctarget = mintarget; ctarget < maxtarget; ctarget++) {
2581 if (ctarget == chan->chan_id)
2582 continue;
2583
2584 for (clun = minlun; clun < maxlun; clun++) {
2585 periph = scsipi_lookup_periph(chan, ctarget, clun);
2586 if (periph == NULL)
2587 continue;
2588 error = config_detach(periph->periph_dev, flags);
2589 if (error)
2590 return (error);
2591 scsipi_remove_periph(chan, periph);
2592 free(periph, M_DEVBUF);
2593 }
2594 }
2595 return(0);
2596 }
2597
2598 /*
2599 * scsipi_adapter_addref:
2600 *
2601 * Add a reference to the adapter pointed to by the provided
2602 * link, enabling the adapter if necessary.
2603 */
2604 int
2605 scsipi_adapter_addref(adapt)
2606 struct scsipi_adapter *adapt;
2607 {
2608 int s, error = 0;
2609
2610 s = splbio();
2611 if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) {
2612 error = (*adapt->adapt_enable)(adapt->adapt_dev, 1);
2613 if (error)
2614 adapt->adapt_refcnt--;
2615 }
2616 splx(s);
2617 return (error);
2618 }
2619
2620 /*
2621 * scsipi_adapter_delref:
2622 *
2623 * Delete a reference to the adapter pointed to by the provided
2624 * link, disabling the adapter if possible.
2625 */
2626 void
2627 scsipi_adapter_delref(adapt)
2628 struct scsipi_adapter *adapt;
2629 {
2630 int s;
2631
2632 s = splbio();
2633 if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL)
2634 (void) (*adapt->adapt_enable)(adapt->adapt_dev, 0);
2635 splx(s);
2636 }
2637
2638 struct scsipi_syncparam {
2639 int ss_factor;
2640 int ss_period; /* ns * 100 */
2641 } scsipi_syncparams[] = {
2642 { 0x08, 625 }, /* FAST-160 (Ultra320) */
2643 { 0x09, 1250 }, /* FAST-80 (Ultra160) */
2644 { 0x0a, 2500 }, /* FAST-40 40MHz (Ultra2) */
2645 { 0x0b, 3030 }, /* FAST-40 33MHz (Ultra2) */
2646 { 0x0c, 5000 }, /* FAST-20 (Ultra) */
2647 };
2648 const int scsipi_nsyncparams =
2649 sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
2650
2651 int
2652 scsipi_sync_period_to_factor(period)
2653 int period; /* ns * 100 */
2654 {
2655 int i;
2656
2657 for (i = 0; i < scsipi_nsyncparams; i++) {
2658 if (period <= scsipi_syncparams[i].ss_period)
2659 return (scsipi_syncparams[i].ss_factor);
2660 }
2661
2662 return ((period / 100) / 4);
2663 }
2664
2665 int
2666 scsipi_sync_factor_to_period(factor)
2667 int factor;
2668 {
2669 int i;
2670
2671 for (i = 0; i < scsipi_nsyncparams; i++) {
2672 if (factor == scsipi_syncparams[i].ss_factor)
2673 return (scsipi_syncparams[i].ss_period);
2674 }
2675
2676 return ((factor * 4) * 100);
2677 }
2678
2679 int
2680 scsipi_sync_factor_to_freq(factor)
2681 int factor;
2682 {
2683 int i;
2684
2685 for (i = 0; i < scsipi_nsyncparams; i++) {
2686 if (factor == scsipi_syncparams[i].ss_factor)
2687 return (100000000 / scsipi_syncparams[i].ss_period);
2688 }
2689
2690 return (10000000 / ((factor * 4) * 10));
2691 }
2692
2693 #ifdef SCSIPI_DEBUG
2694 /*
2695 * Given a scsipi_xfer, dump the request, in all it's glory
2696 */
2697 void
2698 show_scsipi_xs(xs)
2699 struct scsipi_xfer *xs;
2700 {
2701
2702 printf("xs(%p): ", xs);
2703 printf("xs_control(0x%08x)", xs->xs_control);
2704 printf("xs_status(0x%08x)", xs->xs_status);
2705 printf("periph(%p)", xs->xs_periph);
2706 printf("retr(0x%x)", xs->xs_retries);
2707 printf("timo(0x%x)", xs->timeout);
2708 printf("cmd(%p)", xs->cmd);
2709 printf("len(0x%x)", xs->cmdlen);
2710 printf("data(%p)", xs->data);
2711 printf("len(0x%x)", xs->datalen);
2712 printf("res(0x%x)", xs->resid);
2713 printf("err(0x%x)", xs->error);
2714 printf("bp(%p)", xs->bp);
2715 show_scsipi_cmd(xs);
2716 }
2717
2718 void
2719 show_scsipi_cmd(xs)
2720 struct scsipi_xfer *xs;
2721 {
2722 u_char *b = (u_char *) xs->cmd;
2723 int i = 0;
2724
2725 scsipi_printaddr(xs->xs_periph);
2726 printf(" command: ");
2727
2728 if ((xs->xs_control & XS_CTL_RESET) == 0) {
2729 while (i < xs->cmdlen) {
2730 if (i)
2731 printf(",");
2732 printf("0x%x", b[i++]);
2733 }
2734 printf("-[%d bytes]\n", xs->datalen);
2735 if (xs->datalen)
2736 show_mem(xs->data, min(64, xs->datalen));
2737 } else
2738 printf("-RESET-\n");
2739 }
2740
2741 void
2742 show_mem(address, num)
2743 u_char *address;
2744 int num;
2745 {
2746 int x;
2747
2748 printf("------------------------------");
2749 for (x = 0; x < num; x++) {
2750 if ((x % 16) == 0)
2751 printf("\n%03d: ", x);
2752 printf("%02x ", *address++);
2753 }
2754 printf("\n------------------------------\n");
2755 }
2756 #endif /* SCSIPI_DEBUG */
2757