scsipi_base.c revision 1.102 1 /* $NetBSD: scsipi_base.c,v 1.102 2004/03/10 21:57:31 bouyer Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999, 2000, 2002, 2003 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: scsipi_base.c,v 1.102 2004/03/10 21:57:31 bouyer Exp $");
42
43 #include "opt_scsi.h"
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/buf.h>
49 #include <sys/uio.h>
50 #include <sys/malloc.h>
51 #include <sys/pool.h>
52 #include <sys/errno.h>
53 #include <sys/device.h>
54 #include <sys/proc.h>
55 #include <sys/kthread.h>
56 #include <sys/hash.h>
57
58 #include <dev/scsipi/scsipi_all.h>
59 #include <dev/scsipi/scsipi_disk.h>
60 #include <dev/scsipi/scsipiconf.h>
61 #include <dev/scsipi/scsipi_base.h>
62
63 #include <dev/scsipi/scsi_all.h>
64 #include <dev/scsipi/scsi_message.h>
65
66 int scsipi_complete __P((struct scsipi_xfer *));
67 void scsipi_request_sense __P((struct scsipi_xfer *));
68 int scsipi_enqueue __P((struct scsipi_xfer *));
69 void scsipi_run_queue __P((struct scsipi_channel *chan));
70
71 void scsipi_completion_thread __P((void *));
72
73 void scsipi_get_tag __P((struct scsipi_xfer *));
74 void scsipi_put_tag __P((struct scsipi_xfer *));
75
76 int scsipi_get_resource __P((struct scsipi_channel *));
77 void scsipi_put_resource __P((struct scsipi_channel *));
78 __inline int scsipi_grow_resources __P((struct scsipi_channel *));
79
80 void scsipi_async_event_max_openings __P((struct scsipi_channel *,
81 struct scsipi_max_openings *));
82 void scsipi_async_event_xfer_mode __P((struct scsipi_channel *,
83 struct scsipi_xfer_mode *));
84 void scsipi_async_event_channel_reset __P((struct scsipi_channel *));
85
86 struct pool scsipi_xfer_pool;
87
88 /*
89 * scsipi_init:
90 *
91 * Called when a scsibus or atapibus is attached to the system
92 * to initialize shared data structures.
93 */
94 void
95 scsipi_init()
96 {
97 static int scsipi_init_done;
98
99 if (scsipi_init_done)
100 return;
101 scsipi_init_done = 1;
102
103 /* Initialize the scsipi_xfer pool. */
104 pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
105 0, 0, "scxspl", NULL);
106 }
107
108 /*
109 * scsipi_channel_init:
110 *
111 * Initialize a scsipi_channel when it is attached.
112 */
113 int
114 scsipi_channel_init(chan)
115 struct scsipi_channel *chan;
116 {
117 int i;
118
119 /* Initialize shared data. */
120 scsipi_init();
121
122 /* Initialize the queues. */
123 TAILQ_INIT(&chan->chan_queue);
124 TAILQ_INIT(&chan->chan_complete);
125
126 for (i = 0; i < SCSIPI_CHAN_PERIPH_BUCKETS; i++)
127 LIST_INIT(&chan->chan_periphtab[i]);
128
129 /*
130 * Create the asynchronous completion thread.
131 */
132 kthread_create(scsipi_create_completion_thread, chan);
133 return (0);
134 }
135
136 /*
137 * scsipi_channel_shutdown:
138 *
139 * Shutdown a scsipi_channel.
140 */
141 void
142 scsipi_channel_shutdown(chan)
143 struct scsipi_channel *chan;
144 {
145
146 /*
147 * Shut down the completion thread.
148 */
149 chan->chan_tflags |= SCSIPI_CHANT_SHUTDOWN;
150 wakeup(&chan->chan_complete);
151
152 /*
153 * Now wait for the thread to exit.
154 */
155 while (chan->chan_thread != NULL)
156 (void) tsleep(&chan->chan_thread, PRIBIO, "scshut", 0);
157 }
158
159 static uint32_t
160 scsipi_chan_periph_hash(uint64_t t, uint64_t l)
161 {
162 uint32_t hash;
163
164 hash = hash32_buf(&t, sizeof(t), HASH32_BUF_INIT);
165 hash = hash32_buf(&l, sizeof(l), hash);
166
167 return (hash & SCSIPI_CHAN_PERIPH_HASHMASK);
168 }
169
170 /*
171 * scsipi_insert_periph:
172 *
173 * Insert a periph into the channel.
174 */
175 void
176 scsipi_insert_periph(chan, periph)
177 struct scsipi_channel *chan;
178 struct scsipi_periph *periph;
179 {
180 uint32_t hash;
181 int s;
182
183 hash = scsipi_chan_periph_hash(periph->periph_target,
184 periph->periph_lun);
185
186 s = splbio();
187 LIST_INSERT_HEAD(&chan->chan_periphtab[hash], periph, periph_hash);
188 splx(s);
189 }
190
191 /*
192 * scsipi_remove_periph:
193 *
194 * Remove a periph from the channel.
195 */
196 void
197 scsipi_remove_periph(chan, periph)
198 struct scsipi_channel *chan;
199 struct scsipi_periph *periph;
200 {
201 int s;
202
203 s = splbio();
204 LIST_REMOVE(periph, periph_hash);
205 splx(s);
206 }
207
208 /*
209 * scsipi_lookup_periph:
210 *
211 * Lookup a periph on the specified channel.
212 */
213 struct scsipi_periph *
214 scsipi_lookup_periph(chan, target, lun)
215 struct scsipi_channel *chan;
216 int target, lun;
217 {
218 struct scsipi_periph *periph;
219 uint32_t hash;
220 int s;
221
222 if (target >= chan->chan_ntargets ||
223 lun >= chan->chan_nluns)
224 return (NULL);
225
226 hash = scsipi_chan_periph_hash(target, lun);
227
228 s = splbio();
229 LIST_FOREACH(periph, &chan->chan_periphtab[hash], periph_hash) {
230 if (periph->periph_target == target &&
231 periph->periph_lun == lun)
232 break;
233 }
234 splx(s);
235
236 return (periph);
237 }
238
239 /*
240 * scsipi_get_resource:
241 *
242 * Allocate a single xfer `resource' from the channel.
243 *
244 * NOTE: Must be called at splbio().
245 */
246 int
247 scsipi_get_resource(chan)
248 struct scsipi_channel *chan;
249 {
250 struct scsipi_adapter *adapt = chan->chan_adapter;
251
252 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
253 if (chan->chan_openings > 0) {
254 chan->chan_openings--;
255 return (1);
256 }
257 return (0);
258 }
259
260 if (adapt->adapt_openings > 0) {
261 adapt->adapt_openings--;
262 return (1);
263 }
264 return (0);
265 }
266
267 /*
268 * scsipi_grow_resources:
269 *
270 * Attempt to grow resources for a channel. If this succeeds,
271 * we allocate one for our caller.
272 *
273 * NOTE: Must be called at splbio().
274 */
275 __inline int
276 scsipi_grow_resources(chan)
277 struct scsipi_channel *chan;
278 {
279
280 if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
281 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
282 scsipi_adapter_request(chan,
283 ADAPTER_REQ_GROW_RESOURCES, NULL);
284 return (scsipi_get_resource(chan));
285 }
286 /*
287 * ask the channel thread to do it. It'll have to thaw the
288 * queue
289 */
290 scsipi_channel_freeze(chan, 1);
291 chan->chan_tflags |= SCSIPI_CHANT_GROWRES;
292 wakeup(&chan->chan_complete);
293 return (0);
294 }
295
296 return (0);
297 }
298
299 /*
300 * scsipi_put_resource:
301 *
302 * Free a single xfer `resource' to the channel.
303 *
304 * NOTE: Must be called at splbio().
305 */
306 void
307 scsipi_put_resource(chan)
308 struct scsipi_channel *chan;
309 {
310 struct scsipi_adapter *adapt = chan->chan_adapter;
311
312 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
313 chan->chan_openings++;
314 else
315 adapt->adapt_openings++;
316 }
317
318 /*
319 * scsipi_get_tag:
320 *
321 * Get a tag ID for the specified xfer.
322 *
323 * NOTE: Must be called at splbio().
324 */
325 void
326 scsipi_get_tag(xs)
327 struct scsipi_xfer *xs;
328 {
329 struct scsipi_periph *periph = xs->xs_periph;
330 int bit, tag;
331 u_int word;
332
333 bit = 0; /* XXX gcc */
334 for (word = 0; word < PERIPH_NTAGWORDS; word++) {
335 bit = ffs(periph->periph_freetags[word]);
336 if (bit != 0)
337 break;
338 }
339 #ifdef DIAGNOSTIC
340 if (word == PERIPH_NTAGWORDS) {
341 scsipi_printaddr(periph);
342 printf("no free tags\n");
343 panic("scsipi_get_tag");
344 }
345 #endif
346
347 bit -= 1;
348 periph->periph_freetags[word] &= ~(1 << bit);
349 tag = (word << 5) | bit;
350
351 /* XXX Should eventually disallow this completely. */
352 if (tag >= periph->periph_openings) {
353 scsipi_printaddr(periph);
354 printf("WARNING: tag %d greater than available openings %d\n",
355 tag, periph->periph_openings);
356 }
357
358 xs->xs_tag_id = tag;
359 }
360
361 /*
362 * scsipi_put_tag:
363 *
364 * Put the tag ID for the specified xfer back into the pool.
365 *
366 * NOTE: Must be called at splbio().
367 */
368 void
369 scsipi_put_tag(xs)
370 struct scsipi_xfer *xs;
371 {
372 struct scsipi_periph *periph = xs->xs_periph;
373 int word, bit;
374
375 word = xs->xs_tag_id >> 5;
376 bit = xs->xs_tag_id & 0x1f;
377
378 periph->periph_freetags[word] |= (1 << bit);
379 }
380
381 /*
382 * scsipi_get_xs:
383 *
384 * Allocate an xfer descriptor and associate it with the
385 * specified peripherial. If the peripherial has no more
386 * available command openings, we either block waiting for
387 * one to become available, or fail.
388 */
389 struct scsipi_xfer *
390 scsipi_get_xs(periph, flags)
391 struct scsipi_periph *periph;
392 int flags;
393 {
394 struct scsipi_xfer *xs;
395 int s;
396
397 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n"));
398
399 /*
400 * If we're cold, make sure we poll.
401 */
402 if (cold)
403 flags |= XS_CTL_NOSLEEP | XS_CTL_POLL;
404
405 #ifdef DIAGNOSTIC
406 /*
407 * URGENT commands can never be ASYNC.
408 */
409 if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
410 (XS_CTL_URGENT|XS_CTL_ASYNC)) {
411 scsipi_printaddr(periph);
412 printf("URGENT and ASYNC\n");
413 panic("scsipi_get_xs");
414 }
415 #endif
416
417 s = splbio();
418 /*
419 * Wait for a command opening to become available. Rules:
420 *
421 * - All xfers must wait for an available opening.
422 * Exception: URGENT xfers can proceed when
423 * active == openings, because we use the opening
424 * of the command we're recovering for.
425 * - if the periph has sense pending, only URGENT & REQSENSE
426 * xfers may proceed.
427 *
428 * - If the periph is recovering, only URGENT xfers may
429 * proceed.
430 *
431 * - If the periph is currently executing a recovery
432 * command, URGENT commands must block, because only
433 * one recovery command can execute at a time.
434 */
435 for (;;) {
436 if (flags & XS_CTL_URGENT) {
437 if (periph->periph_active > periph->periph_openings)
438 goto wait_for_opening;
439 if (periph->periph_flags & PERIPH_SENSE) {
440 if ((flags & XS_CTL_REQSENSE) == 0)
441 goto wait_for_opening;
442 } else {
443 if ((periph->periph_flags &
444 PERIPH_RECOVERY_ACTIVE) != 0)
445 goto wait_for_opening;
446 periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
447 }
448 break;
449 }
450 if (periph->periph_active >= periph->periph_openings ||
451 (periph->periph_flags & PERIPH_RECOVERING) != 0)
452 goto wait_for_opening;
453 periph->periph_active++;
454 break;
455
456 wait_for_opening:
457 if (flags & XS_CTL_NOSLEEP) {
458 splx(s);
459 return (NULL);
460 }
461 SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n"));
462 periph->periph_flags |= PERIPH_WAITING;
463 (void) tsleep(periph, PRIBIO, "getxs", 0);
464 }
465 SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n"));
466 xs = pool_get(&scsipi_xfer_pool,
467 ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
468 if (xs == NULL) {
469 if (flags & XS_CTL_URGENT) {
470 if ((flags & XS_CTL_REQSENSE) == 0)
471 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
472 } else
473 periph->periph_active--;
474 scsipi_printaddr(periph);
475 printf("unable to allocate %sscsipi_xfer\n",
476 (flags & XS_CTL_URGENT) ? "URGENT " : "");
477 }
478 splx(s);
479
480 SC_DEBUG(periph, SCSIPI_DB3, ("returning\n"));
481
482 if (xs != NULL) {
483 memset(xs, 0, sizeof(*xs));
484 callout_init(&xs->xs_callout);
485 xs->xs_periph = periph;
486 xs->xs_control = flags;
487 xs->xs_status = 0;
488 s = splbio();
489 TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
490 splx(s);
491 }
492 return (xs);
493 }
494
495 /*
496 * scsipi_put_xs:
497 *
498 * Release an xfer descriptor, decreasing the outstanding command
499 * count for the peripherial. If there is a thread waiting for
500 * an opening, wake it up. If not, kick any queued I/O the
501 * peripherial may have.
502 *
503 * NOTE: Must be called at splbio().
504 */
505 void
506 scsipi_put_xs(xs)
507 struct scsipi_xfer *xs;
508 {
509 struct scsipi_periph *periph = xs->xs_periph;
510 int flags = xs->xs_control;
511
512 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n"));
513
514 TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
515 pool_put(&scsipi_xfer_pool, xs);
516
517 #ifdef DIAGNOSTIC
518 if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
519 periph->periph_active == 0) {
520 scsipi_printaddr(periph);
521 printf("recovery without a command to recovery for\n");
522 panic("scsipi_put_xs");
523 }
524 #endif
525
526 if (flags & XS_CTL_URGENT) {
527 if ((flags & XS_CTL_REQSENSE) == 0)
528 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
529 } else
530 periph->periph_active--;
531 if (periph->periph_active == 0 &&
532 (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
533 periph->periph_flags &= ~PERIPH_WAITDRAIN;
534 wakeup(&periph->periph_active);
535 }
536
537 if (periph->periph_flags & PERIPH_WAITING) {
538 periph->periph_flags &= ~PERIPH_WAITING;
539 wakeup(periph);
540 } else {
541 if (periph->periph_switch->psw_start != NULL) {
542 SC_DEBUG(periph, SCSIPI_DB2,
543 ("calling private start()\n"));
544 (*periph->periph_switch->psw_start)(periph);
545 }
546 }
547 }
548
549 /*
550 * scsipi_channel_freeze:
551 *
552 * Freeze a channel's xfer queue.
553 */
554 void
555 scsipi_channel_freeze(chan, count)
556 struct scsipi_channel *chan;
557 int count;
558 {
559 int s;
560
561 s = splbio();
562 chan->chan_qfreeze += count;
563 splx(s);
564 }
565
566 /*
567 * scsipi_channel_thaw:
568 *
569 * Thaw a channel's xfer queue.
570 */
571 void
572 scsipi_channel_thaw(chan, count)
573 struct scsipi_channel *chan;
574 int count;
575 {
576 int s;
577
578 s = splbio();
579 chan->chan_qfreeze -= count;
580 /*
581 * Don't let the freeze count go negative.
582 *
583 * Presumably the adapter driver could keep track of this,
584 * but it might just be easier to do this here so as to allow
585 * multiple callers, including those outside the adapter driver.
586 */
587 if (chan->chan_qfreeze < 0) {
588 chan->chan_qfreeze = 0;
589 }
590 splx(s);
591 /*
592 * Kick the channel's queue here. Note, we may be running in
593 * interrupt context (softclock or HBA's interrupt), so the adapter
594 * driver had better not sleep.
595 */
596 if (chan->chan_qfreeze == 0)
597 scsipi_run_queue(chan);
598 }
599
600 /*
601 * scsipi_channel_timed_thaw:
602 *
603 * Thaw a channel after some time has expired. This will also
604 * run the channel's queue if the freeze count has reached 0.
605 */
606 void
607 scsipi_channel_timed_thaw(arg)
608 void *arg;
609 {
610 struct scsipi_channel *chan = arg;
611
612 scsipi_channel_thaw(chan, 1);
613 }
614
615 /*
616 * scsipi_periph_freeze:
617 *
618 * Freeze a device's xfer queue.
619 */
620 void
621 scsipi_periph_freeze(periph, count)
622 struct scsipi_periph *periph;
623 int count;
624 {
625 int s;
626
627 s = splbio();
628 periph->periph_qfreeze += count;
629 splx(s);
630 }
631
632 /*
633 * scsipi_periph_thaw:
634 *
635 * Thaw a device's xfer queue.
636 */
637 void
638 scsipi_periph_thaw(periph, count)
639 struct scsipi_periph *periph;
640 int count;
641 {
642 int s;
643
644 s = splbio();
645 periph->periph_qfreeze -= count;
646 #ifdef DIAGNOSTIC
647 if (periph->periph_qfreeze < 0) {
648 static const char pc[] = "periph freeze count < 0";
649 scsipi_printaddr(periph);
650 printf("%s\n", pc);
651 panic(pc);
652 }
653 #endif
654 if (periph->periph_qfreeze == 0 &&
655 (periph->periph_flags & PERIPH_WAITING) != 0)
656 wakeup(periph);
657 splx(s);
658 }
659
660 /*
661 * scsipi_periph_timed_thaw:
662 *
663 * Thaw a device after some time has expired.
664 */
665 void
666 scsipi_periph_timed_thaw(arg)
667 void *arg;
668 {
669 int s;
670 struct scsipi_periph *periph = arg;
671
672 callout_stop(&periph->periph_callout);
673
674 s = splbio();
675 scsipi_periph_thaw(periph, 1);
676 if ((periph->periph_channel->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
677 /*
678 * Kick the channel's queue here. Note, we're running in
679 * interrupt context (softclock), so the adapter driver
680 * had better not sleep.
681 */
682 scsipi_run_queue(periph->periph_channel);
683 } else {
684 /*
685 * Tell the completion thread to kick the channel's queue here.
686 */
687 periph->periph_channel->chan_tflags |= SCSIPI_CHANT_KICK;
688 wakeup(&periph->periph_channel->chan_complete);
689 }
690 splx(s);
691 }
692
693 /*
694 * scsipi_wait_drain:
695 *
696 * Wait for a periph's pending xfers to drain.
697 */
698 void
699 scsipi_wait_drain(periph)
700 struct scsipi_periph *periph;
701 {
702 int s;
703
704 s = splbio();
705 while (periph->periph_active != 0) {
706 periph->periph_flags |= PERIPH_WAITDRAIN;
707 (void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0);
708 }
709 splx(s);
710 }
711
712 /*
713 * scsipi_kill_pending:
714 *
715 * Kill off all pending xfers for a periph.
716 *
717 * NOTE: Must be called at splbio().
718 */
719 void
720 scsipi_kill_pending(periph)
721 struct scsipi_periph *periph;
722 {
723
724 (*periph->periph_channel->chan_bustype->bustype_kill_pending)(periph);
725 #ifdef DIAGNOSTIC
726 if (TAILQ_FIRST(&periph->periph_xferq) != NULL)
727 panic("scsipi_kill_pending");
728 #endif
729 scsipi_wait_drain(periph);
730 }
731
732 /*
733 * scsipi_interpret_sense:
734 *
735 * Look at the returned sense and act on the error, determining
736 * the unix error number to pass back. (0 = report no error)
737 *
738 * NOTE: If we return ERESTART, we are expected to haved
739 * thawed the device!
740 *
741 * THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
742 */
743 int
744 scsipi_interpret_sense(xs)
745 struct scsipi_xfer *xs;
746 {
747 struct scsipi_sense_data *sense;
748 struct scsipi_periph *periph = xs->xs_periph;
749 u_int8_t key;
750 int error;
751 #ifndef SCSIVERBOSE
752 u_int32_t info;
753 static char *error_mes[] = {
754 "soft error (corrected)",
755 "not ready", "medium error",
756 "non-media hardware failure", "illegal request",
757 "unit attention", "readonly device",
758 "no data found", "vendor unique",
759 "copy aborted", "command aborted",
760 "search returned equal", "volume overflow",
761 "verify miscompare", "unknown error key"
762 };
763 #endif
764
765 sense = &xs->sense.scsi_sense;
766 #ifdef SCSIPI_DEBUG
767 if (periph->periph_flags & SCSIPI_DB1) {
768 int count;
769 scsipi_printaddr(periph);
770 printf(" sense debug information:\n");
771 printf("\tcode 0x%x valid 0x%x\n",
772 sense->error_code & SSD_ERRCODE,
773 sense->error_code & SSD_ERRCODE_VALID ? 1 : 0);
774 printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
775 sense->segment,
776 sense->flags & SSD_KEY,
777 sense->flags & SSD_ILI ? 1 : 0,
778 sense->flags & SSD_EOM ? 1 : 0,
779 sense->flags & SSD_FILEMARK ? 1 : 0);
780 printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d "
781 "extra bytes\n",
782 sense->info[0],
783 sense->info[1],
784 sense->info[2],
785 sense->info[3],
786 sense->extra_len);
787 printf("\textra: ");
788 for (count = 0; count < ADD_BYTES_LIM(sense); count++)
789 printf("0x%x ", sense->cmd_spec_info[count]);
790 printf("\n");
791 }
792 #endif
793
794 /*
795 * If the periph has it's own error handler, call it first.
796 * If it returns a legit error value, return that, otherwise
797 * it wants us to continue with normal error processing.
798 */
799 if (periph->periph_switch->psw_error != NULL) {
800 SC_DEBUG(periph, SCSIPI_DB2,
801 ("calling private err_handler()\n"));
802 error = (*periph->periph_switch->psw_error)(xs);
803 if (error != EJUSTRETURN)
804 return (error);
805 }
806 /* otherwise use the default */
807 switch (sense->error_code & SSD_ERRCODE) {
808
809 /*
810 * Old SCSI-1 and SASI devices respond with
811 * codes other than 70.
812 */
813 case 0x00: /* no error (command completed OK) */
814 return (0);
815 case 0x04: /* drive not ready after it was selected */
816 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
817 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
818 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
819 return (0);
820 /* XXX - display some sort of error here? */
821 return (EIO);
822 case 0x20: /* invalid command */
823 if ((xs->xs_control &
824 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
825 return (0);
826 return (EINVAL);
827 case 0x25: /* invalid LUN (Adaptec ACB-4000) */
828 return (EACCES);
829
830 /*
831 * If it's code 70, use the extended stuff and
832 * interpret the key
833 */
834 case 0x71: /* delayed error */
835 scsipi_printaddr(periph);
836 key = sense->flags & SSD_KEY;
837 printf(" DEFERRED ERROR, key = 0x%x\n", key);
838 /* FALLTHROUGH */
839 case 0x70:
840 #ifndef SCSIVERBOSE
841 if ((sense->error_code & SSD_ERRCODE_VALID) != 0)
842 info = _4btol(sense->info);
843 else
844 info = 0;
845 #endif
846 key = sense->flags & SSD_KEY;
847
848 switch (key) {
849 case SKEY_NO_SENSE:
850 case SKEY_RECOVERED_ERROR:
851 if (xs->resid == xs->datalen && xs->datalen) {
852 /*
853 * Why is this here?
854 */
855 xs->resid = 0; /* not short read */
856 }
857 case SKEY_EQUAL:
858 error = 0;
859 break;
860 case SKEY_NOT_READY:
861 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
862 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
863 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
864 return (0);
865 if (sense->add_sense_code == 0x3A) {
866 error = ENODEV; /* Medium not present */
867 if (xs->xs_control & XS_CTL_SILENT_NODEV)
868 return (error);
869 } else
870 error = EIO;
871 if ((xs->xs_control & XS_CTL_SILENT) != 0)
872 return (error);
873 break;
874 case SKEY_ILLEGAL_REQUEST:
875 if ((xs->xs_control &
876 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
877 return (0);
878 /*
879 * Handle the case where a device reports
880 * Logical Unit Not Supported during discovery.
881 */
882 if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
883 sense->add_sense_code == 0x25 &&
884 sense->add_sense_code_qual == 0x00)
885 return (EINVAL);
886 if ((xs->xs_control & XS_CTL_SILENT) != 0)
887 return (EIO);
888 error = EINVAL;
889 break;
890 case SKEY_UNIT_ATTENTION:
891 if (sense->add_sense_code == 0x29 &&
892 sense->add_sense_code_qual == 0x00) {
893 /* device or bus reset */
894 if (xs->xs_retries != 0) {
895 xs->xs_retries--;
896 error = ERESTART;
897 } else
898 error = EIO;
899 return (error);
900 }
901 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
902 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
903 if ((xs->xs_control &
904 XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
905 /* XXX Should reupload any transient state. */
906 (periph->periph_flags &
907 PERIPH_REMOVABLE) == 0) {
908 return (ERESTART);
909 }
910 if ((xs->xs_control & XS_CTL_SILENT) != 0)
911 return (EIO);
912 error = EIO;
913 break;
914 case SKEY_WRITE_PROTECT:
915 error = EROFS;
916 break;
917 case SKEY_BLANK_CHECK:
918 error = 0;
919 break;
920 case SKEY_ABORTED_COMMAND:
921 if (xs->xs_retries != 0) {
922 xs->xs_retries--;
923 error = ERESTART;
924 } else
925 error = EIO;
926 break;
927 case SKEY_VOLUME_OVERFLOW:
928 error = ENOSPC;
929 break;
930 default:
931 error = EIO;
932 break;
933 }
934
935 #ifdef SCSIVERBOSE
936 if (key && (xs->xs_control & XS_CTL_SILENT) == 0)
937 scsipi_print_sense(xs, 0);
938 #else
939 if (key) {
940 scsipi_printaddr(periph);
941 printf("%s", error_mes[key - 1]);
942 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
943 switch (key) {
944 case SKEY_NOT_READY:
945 case SKEY_ILLEGAL_REQUEST:
946 case SKEY_UNIT_ATTENTION:
947 case SKEY_WRITE_PROTECT:
948 break;
949 case SKEY_BLANK_CHECK:
950 printf(", requested size: %d (decimal)",
951 info);
952 break;
953 case SKEY_ABORTED_COMMAND:
954 if (xs->xs_retries)
955 printf(", retrying");
956 printf(", cmd 0x%x, info 0x%x",
957 xs->cmd->opcode, info);
958 break;
959 default:
960 printf(", info = %d (decimal)", info);
961 }
962 }
963 if (sense->extra_len != 0) {
964 int n;
965 printf(", data =");
966 for (n = 0; n < sense->extra_len; n++)
967 printf(" %02x",
968 sense->cmd_spec_info[n]);
969 }
970 printf("\n");
971 }
972 #endif
973 return (error);
974
975 /*
976 * Some other code, just report it
977 */
978 default:
979 #if defined(SCSIDEBUG) || defined(DEBUG)
980 {
981 static char *uc = "undecodable sense error";
982 int i;
983 u_int8_t *cptr = (u_int8_t *) sense;
984 scsipi_printaddr(periph);
985 if (xs->cmd == &xs->cmdstore) {
986 printf("%s for opcode 0x%x, data=",
987 uc, xs->cmdstore.opcode);
988 } else {
989 printf("%s, data=", uc);
990 }
991 for (i = 0; i < sizeof (sense); i++)
992 printf(" 0x%02x", *(cptr++) & 0xff);
993 printf("\n");
994 }
995 #else
996 scsipi_printaddr(periph);
997 printf("Sense Error Code 0x%x",
998 sense->error_code & SSD_ERRCODE);
999 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
1000 struct scsipi_sense_data_unextended *usense =
1001 (struct scsipi_sense_data_unextended *)sense;
1002 printf(" at block no. %d (decimal)",
1003 _3btol(usense->block));
1004 }
1005 printf("\n");
1006 #endif
1007 return (EIO);
1008 }
1009 }
1010
1011 /*
1012 * scsipi_size:
1013 *
1014 * Find out from the device what its capacity is.
1015 */
1016 u_int64_t
1017 scsipi_size(periph, flags)
1018 struct scsipi_periph *periph;
1019 int flags;
1020 {
1021 struct scsipi_read_cap_data rdcap;
1022 struct scsipi_read_capacity scsipi_cmd;
1023
1024 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1025 scsipi_cmd.opcode = READ_CAPACITY;
1026
1027 /*
1028 * If the command works, interpret the result as a 4 byte
1029 * number of blocks
1030 */
1031 if (scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1032 sizeof(scsipi_cmd), (u_char *)&rdcap, sizeof(rdcap),
1033 SCSIPIRETRIES, 20000, NULL,
1034 flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK | XS_CTL_SILENT) != 0)
1035 return (0);
1036
1037 return (_4btol(rdcap.addr) + 1);
1038 }
1039
1040 /*
1041 * scsipi_test_unit_ready:
1042 *
1043 * Issue a `test unit ready' request.
1044 */
1045 int
1046 scsipi_test_unit_ready(periph, flags)
1047 struct scsipi_periph *periph;
1048 int flags;
1049 {
1050 int retries;
1051 struct scsipi_test_unit_ready scsipi_cmd;
1052
1053 /* some ATAPI drives don't support TEST_UNIT_READY. Sigh */
1054 if (periph->periph_quirks & PQUIRK_NOTUR)
1055 return (0);
1056
1057 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1058 scsipi_cmd.opcode = TEST_UNIT_READY;
1059
1060 if (flags & XS_CTL_DISCOVERY)
1061 retries = 0;
1062 else
1063 retries = SCSIPIRETRIES;
1064
1065 return (scsipi_command(periph,
1066 (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd),
1067 0, 0, retries, 10000, NULL, flags));
1068 }
1069
1070 /*
1071 * scsipi_inquire:
1072 *
1073 * Ask the device about itself.
1074 */
1075 int
1076 scsipi_inquire(periph, inqbuf, flags)
1077 struct scsipi_periph *periph;
1078 struct scsipi_inquiry_data *inqbuf;
1079 int flags;
1080 {
1081 int retries;
1082 struct scsipi_inquiry scsipi_cmd;
1083 int error;
1084
1085 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1086 scsipi_cmd.opcode = INQUIRY;
1087
1088 if (flags & XS_CTL_DISCOVERY)
1089 retries = 0;
1090 else
1091 retries = SCSIPIRETRIES;
1092
1093 /*
1094 * If we request more data than the device can provide, it SHOULD just
1095 * return a short reponse. However, some devices error with an
1096 * ILLEGAL REQUEST sense code, and yet others have even more special
1097 * failture modes (such as the GL641USB flash adapter, which goes loony
1098 * and sends corrupted CRCs). To work around this, and to bring our
1099 * behavior more in line with other OSes, we do a shorter inquiry,
1100 * covering all the SCSI-2 information, first, and then request more
1101 * data iff the "additional length" field indicates there is more.
1102 * - mycroft, 2003/10/16
1103 */
1104 scsipi_cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI2;
1105 error = scsipi_command(periph,
1106 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1107 (u_char *) inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI2,
1108 retries, 10000, NULL, XS_CTL_DATA_IN | flags);
1109 if (!error && inqbuf->additional_length > SCSIPI_INQUIRY_LENGTH_SCSI2 - 4) {
1110 scsipi_cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI3;
1111 error = scsipi_command(periph,
1112 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1113 (u_char *) inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI3,
1114 retries, 10000, NULL, XS_CTL_DATA_IN | flags);
1115 }
1116
1117 #ifdef SCSI_OLD_NOINQUIRY
1118 /*
1119 * Kludge for the Adaptec ACB-4000 SCSI->MFM translator.
1120 * This board doesn't support the INQUIRY command at all.
1121 */
1122 if (error == EINVAL || error == EACCES) {
1123 /*
1124 * Conjure up an INQUIRY response.
1125 */
1126 inqbuf->device = (error == EINVAL ?
1127 SID_QUAL_LU_PRESENT :
1128 SID_QUAL_LU_NOTPRESENT) | T_DIRECT;
1129 inqbuf->dev_qual2 = 0;
1130 inqbuf->version = 0;
1131 inqbuf->response_format = SID_FORMAT_SCSI1;
1132 inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
1133 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1134 memcpy(inqbuf->vendor, "ADAPTEC ACB-4000 ", 28);
1135 error = 0;
1136 }
1137
1138 /*
1139 * Kludge for the Emulex MT-02 SCSI->QIC translator.
1140 * This board gives an empty response to an INQUIRY command.
1141 */
1142 else if (error == 0 &&
1143 inqbuf->device == (SID_QUAL_LU_PRESENT | T_DIRECT) &&
1144 inqbuf->dev_qual2 == 0 &&
1145 inqbuf->version == 0 &&
1146 inqbuf->response_format == SID_FORMAT_SCSI1) {
1147 /*
1148 * Fill out the INQUIRY response.
1149 */
1150 inqbuf->device = (SID_QUAL_LU_PRESENT | T_SEQUENTIAL);
1151 inqbuf->dev_qual2 = SID_REMOVABLE;
1152 inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
1153 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1154 memcpy(inqbuf->vendor, "EMULEX MT-02 QIC ", 28);
1155 }
1156 #endif /* SCSI_OLD_NOINQUIRY */
1157
1158 return error;
1159 }
1160
1161 /*
1162 * scsipi_prevent:
1163 *
1164 * Prevent or allow the user to remove the media
1165 */
1166 int
1167 scsipi_prevent(periph, type, flags)
1168 struct scsipi_periph *periph;
1169 int type, flags;
1170 {
1171 struct scsipi_prevent scsipi_cmd;
1172
1173 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1174 scsipi_cmd.opcode = PREVENT_ALLOW;
1175 scsipi_cmd.how = type;
1176
1177 return (scsipi_command(periph,
1178 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1179 0, 0, SCSIPIRETRIES, 5000, NULL, flags));
1180 }
1181
1182 /*
1183 * scsipi_start:
1184 *
1185 * Send a START UNIT.
1186 */
1187 int
1188 scsipi_start(periph, type, flags)
1189 struct scsipi_periph *periph;
1190 int type, flags;
1191 {
1192 struct scsipi_start_stop scsipi_cmd;
1193
1194 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1195 scsipi_cmd.opcode = START_STOP;
1196 scsipi_cmd.byte2 = 0x00;
1197 scsipi_cmd.how = type;
1198
1199 return (scsipi_command(periph,
1200 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1201 0, 0, SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000,
1202 NULL, flags));
1203 }
1204
1205 /*
1206 * scsipi_mode_sense, scsipi_mode_sense_big:
1207 * get a sense page from a device
1208 */
1209
1210 int
1211 scsipi_mode_sense(periph, byte2, page, data, len, flags, retries, timeout)
1212 struct scsipi_periph *periph;
1213 int byte2, page, len, flags, retries, timeout;
1214 struct scsipi_mode_header *data;
1215 {
1216 struct scsipi_mode_sense scsipi_cmd;
1217 int error;
1218
1219 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1220 scsipi_cmd.opcode = MODE_SENSE;
1221 scsipi_cmd.byte2 = byte2;
1222 scsipi_cmd.page = page;
1223 scsipi_cmd.length = len & 0xff;
1224 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1225 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1226 flags | XS_CTL_DATA_IN);
1227 SC_DEBUG(periph, SCSIPI_DB2,
1228 ("scsipi_mode_sense: error=%d\n", error));
1229 return (error);
1230 }
1231
1232 int
1233 scsipi_mode_sense_big(periph, byte2, page, data, len, flags, retries, timeout)
1234 struct scsipi_periph *periph;
1235 int byte2, page, len, flags, retries, timeout;
1236 struct scsipi_mode_header_big *data;
1237 {
1238 struct scsipi_mode_sense_big scsipi_cmd;
1239 int error;
1240
1241 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1242 scsipi_cmd.opcode = MODE_SENSE_BIG;
1243 scsipi_cmd.byte2 = byte2;
1244 scsipi_cmd.page = page;
1245 _lto2b(len, scsipi_cmd.length);
1246 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1247 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1248 flags | XS_CTL_DATA_IN);
1249 SC_DEBUG(periph, SCSIPI_DB2,
1250 ("scsipi_mode_sense_big: error=%d\n", error));
1251 return (error);
1252 }
1253
1254 int
1255 scsipi_mode_select(periph, byte2, data, len, flags, retries, timeout)
1256 struct scsipi_periph *periph;
1257 int byte2, len, flags, retries, timeout;
1258 struct scsipi_mode_header *data;
1259 {
1260 struct scsipi_mode_select scsipi_cmd;
1261 int error;
1262
1263 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1264 scsipi_cmd.opcode = MODE_SELECT;
1265 scsipi_cmd.byte2 = byte2;
1266 scsipi_cmd.length = len & 0xff;
1267 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1268 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1269 flags | XS_CTL_DATA_OUT);
1270 SC_DEBUG(periph, SCSIPI_DB2,
1271 ("scsipi_mode_select: error=%d\n", error));
1272 return (error);
1273 }
1274
1275 int
1276 scsipi_mode_select_big(periph, byte2, data, len, flags, retries, timeout)
1277 struct scsipi_periph *periph;
1278 int byte2, len, flags, retries, timeout;
1279 struct scsipi_mode_header_big *data;
1280 {
1281 struct scsipi_mode_select_big scsipi_cmd;
1282 int error;
1283
1284 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1285 scsipi_cmd.opcode = MODE_SELECT_BIG;
1286 scsipi_cmd.byte2 = byte2;
1287 _lto2b(len, scsipi_cmd.length);
1288 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1289 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1290 flags | XS_CTL_DATA_OUT);
1291 SC_DEBUG(periph, SCSIPI_DB2,
1292 ("scsipi_mode_select: error=%d\n", error));
1293 return (error);
1294 }
1295
1296 /*
1297 * scsipi_done:
1298 *
1299 * This routine is called by an adapter's interrupt handler when
1300 * an xfer is completed.
1301 */
1302 void
1303 scsipi_done(xs)
1304 struct scsipi_xfer *xs;
1305 {
1306 struct scsipi_periph *periph = xs->xs_periph;
1307 struct scsipi_channel *chan = periph->periph_channel;
1308 int s, freezecnt;
1309
1310 SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n"));
1311 #ifdef SCSIPI_DEBUG
1312 if (periph->periph_dbflags & SCSIPI_DB1)
1313 show_scsipi_cmd(xs);
1314 #endif
1315
1316 s = splbio();
1317 /*
1318 * The resource this command was using is now free.
1319 */
1320 scsipi_put_resource(chan);
1321 xs->xs_periph->periph_sent--;
1322
1323 /*
1324 * If the command was tagged, free the tag.
1325 */
1326 if (XS_CTL_TAGTYPE(xs) != 0)
1327 scsipi_put_tag(xs);
1328 else
1329 periph->periph_flags &= ~PERIPH_UNTAG;
1330
1331 /* Mark the command as `done'. */
1332 xs->xs_status |= XS_STS_DONE;
1333
1334 #ifdef DIAGNOSTIC
1335 if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
1336 (XS_CTL_ASYNC|XS_CTL_POLL))
1337 panic("scsipi_done: ASYNC and POLL");
1338 #endif
1339
1340 /*
1341 * If the xfer had an error of any sort, freeze the
1342 * periph's queue. Freeze it again if we were requested
1343 * to do so in the xfer.
1344 */
1345 freezecnt = 0;
1346 if (xs->error != XS_NOERROR)
1347 freezecnt++;
1348 if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
1349 freezecnt++;
1350 if (freezecnt != 0)
1351 scsipi_periph_freeze(periph, freezecnt);
1352
1353 /*
1354 * record the xfer with a pending sense, in case a SCSI reset is
1355 * received before the thread is waked up.
1356 */
1357 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1358 periph->periph_flags |= PERIPH_SENSE;
1359 periph->periph_xscheck = xs;
1360 }
1361
1362 /*
1363 * If this was an xfer that was not to complete asynchronously,
1364 * let the requesting thread perform error checking/handling
1365 * in its context.
1366 */
1367 if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
1368 splx(s);
1369 /*
1370 * If it's a polling job, just return, to unwind the
1371 * call graph. We don't need to restart the queue,
1372 * because pollings jobs are treated specially, and
1373 * are really only used during crash dumps anyway
1374 * (XXX or during boot-time autconfiguration of
1375 * ATAPI devices).
1376 */
1377 if (xs->xs_control & XS_CTL_POLL)
1378 return;
1379 wakeup(xs);
1380 goto out;
1381 }
1382
1383 /*
1384 * Catch the extremely common case of I/O completing
1385 * without error; no use in taking a context switch
1386 * if we can handle it in interrupt context.
1387 */
1388 if (xs->error == XS_NOERROR) {
1389 splx(s);
1390 (void) scsipi_complete(xs);
1391 goto out;
1392 }
1393
1394 /*
1395 * There is an error on this xfer. Put it on the channel's
1396 * completion queue, and wake up the completion thread.
1397 */
1398 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
1399 splx(s);
1400 wakeup(&chan->chan_complete);
1401
1402 out:
1403 /*
1404 * If there are more xfers on the channel's queue, attempt to
1405 * run them.
1406 */
1407 scsipi_run_queue(chan);
1408 }
1409
1410 /*
1411 * scsipi_complete:
1412 *
1413 * Completion of a scsipi_xfer. This is the guts of scsipi_done().
1414 *
1415 * NOTE: This routine MUST be called with valid thread context
1416 * except for the case where the following two conditions are
1417 * true:
1418 *
1419 * xs->error == XS_NOERROR
1420 * XS_CTL_ASYNC is set in xs->xs_control
1421 *
1422 * The semantics of this routine can be tricky, so here is an
1423 * explanation:
1424 *
1425 * 0 Xfer completed successfully.
1426 *
1427 * ERESTART Xfer had an error, but was restarted.
1428 *
1429 * anything else Xfer had an error, return value is Unix
1430 * errno.
1431 *
1432 * If the return value is anything but ERESTART:
1433 *
1434 * - If XS_CTL_ASYNC is set, `xs' has been freed back to
1435 * the pool.
1436 * - If there is a buf associated with the xfer,
1437 * it has been biodone()'d.
1438 */
1439 int
1440 scsipi_complete(xs)
1441 struct scsipi_xfer *xs;
1442 {
1443 struct scsipi_periph *periph = xs->xs_periph;
1444 struct scsipi_channel *chan = periph->periph_channel;
1445 struct buf *bp;
1446 int error, s;
1447
1448 #ifdef DIAGNOSTIC
1449 if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
1450 panic("scsipi_complete: XS_CTL_ASYNC but no buf");
1451 #endif
1452 /*
1453 * If command terminated with a CHECK CONDITION, we need to issue a
1454 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed
1455 * we'll have the real status.
1456 * Must be processed at splbio() to avoid missing a SCSI bus reset
1457 * for this command.
1458 */
1459 s = splbio();
1460 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1461 /* request sense for a request sense ? */
1462 if (xs->xs_control & XS_CTL_REQSENSE) {
1463 scsipi_printaddr(periph);
1464 printf("request sense for a request sense ?\n");
1465 /* XXX maybe we should reset the device ? */
1466 /* we've been frozen because xs->error != XS_NOERROR */
1467 scsipi_periph_thaw(periph, 1);
1468 splx(s);
1469 if (xs->resid < xs->datalen) {
1470 printf("we read %d bytes of sense anyway:\n",
1471 xs->datalen - xs->resid);
1472 #ifdef SCSIVERBOSE
1473 scsipi_print_sense_data((void *)xs->data, 0);
1474 #endif
1475 }
1476 return EINVAL;
1477 }
1478 scsipi_request_sense(xs);
1479 }
1480 splx(s);
1481
1482 /*
1483 * If it's a user level request, bypass all usual completion
1484 * processing, let the user work it out..
1485 */
1486 if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
1487 SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n"));
1488 if (xs->error != XS_NOERROR)
1489 scsipi_periph_thaw(periph, 1);
1490 scsipi_user_done(xs);
1491 SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n "));
1492 return 0;
1493 }
1494
1495 switch (xs->error) {
1496 case XS_NOERROR:
1497 error = 0;
1498 break;
1499
1500 case XS_SENSE:
1501 case XS_SHORTSENSE:
1502 error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
1503 break;
1504
1505 case XS_RESOURCE_SHORTAGE:
1506 /*
1507 * XXX Should freeze channel's queue.
1508 */
1509 scsipi_printaddr(periph);
1510 printf("adapter resource shortage\n");
1511 /* FALLTHROUGH */
1512
1513 case XS_BUSY:
1514 if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
1515 struct scsipi_max_openings mo;
1516
1517 /*
1518 * We set the openings to active - 1, assuming that
1519 * the command that got us here is the first one that
1520 * can't fit into the device's queue. If that's not
1521 * the case, I guess we'll find out soon enough.
1522 */
1523 mo.mo_target = periph->periph_target;
1524 mo.mo_lun = periph->periph_lun;
1525 if (periph->periph_active < periph->periph_openings)
1526 mo.mo_openings = periph->periph_active - 1;
1527 else
1528 mo.mo_openings = periph->periph_openings - 1;
1529 #ifdef DIAGNOSTIC
1530 if (mo.mo_openings < 0) {
1531 scsipi_printaddr(periph);
1532 printf("QUEUE FULL resulted in < 0 openings\n");
1533 panic("scsipi_done");
1534 }
1535 #endif
1536 if (mo.mo_openings == 0) {
1537 scsipi_printaddr(periph);
1538 printf("QUEUE FULL resulted in 0 openings\n");
1539 mo.mo_openings = 1;
1540 }
1541 scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
1542 error = ERESTART;
1543 } else if (xs->xs_retries != 0) {
1544 xs->xs_retries--;
1545 /*
1546 * Wait one second, and try again.
1547 */
1548 if ((xs->xs_control & XS_CTL_POLL) ||
1549 (chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
1550 delay(1000000);
1551 } else if (!callout_pending(&periph->periph_callout)) {
1552 scsipi_periph_freeze(periph, 1);
1553 callout_reset(&periph->periph_callout,
1554 hz, scsipi_periph_timed_thaw, periph);
1555 }
1556 error = ERESTART;
1557 } else
1558 error = EBUSY;
1559 break;
1560
1561 case XS_REQUEUE:
1562 error = ERESTART;
1563 break;
1564
1565 case XS_SELTIMEOUT:
1566 case XS_TIMEOUT:
1567 /*
1568 * If the device hasn't gone away, honor retry counts.
1569 *
1570 * Note that if we're in the middle of probing it,
1571 * it won't be found because it isn't here yet so
1572 * we won't honor the retry count in that case.
1573 */
1574 if (scsipi_lookup_periph(chan, periph->periph_target,
1575 periph->periph_lun) && xs->xs_retries != 0) {
1576 xs->xs_retries--;
1577 error = ERESTART;
1578 } else
1579 error = EIO;
1580 break;
1581
1582 case XS_RESET:
1583 if (xs->xs_control & XS_CTL_REQSENSE) {
1584 /*
1585 * request sense interrupted by reset: signal it
1586 * with EINTR return code.
1587 */
1588 error = EINTR;
1589 } else {
1590 if (xs->xs_retries != 0) {
1591 xs->xs_retries--;
1592 error = ERESTART;
1593 } else
1594 error = EIO;
1595 }
1596 break;
1597
1598 case XS_DRIVER_STUFFUP:
1599 scsipi_printaddr(periph);
1600 printf("generic HBA error\n");
1601 error = EIO;
1602 break;
1603 default:
1604 scsipi_printaddr(periph);
1605 printf("invalid return code from adapter: %d\n", xs->error);
1606 error = EIO;
1607 break;
1608 }
1609
1610 s = splbio();
1611 if (error == ERESTART) {
1612 /*
1613 * If we get here, the periph has been thawed and frozen
1614 * again if we had to issue recovery commands. Alternatively,
1615 * it may have been frozen again and in a timed thaw. In
1616 * any case, we thaw the periph once we re-enqueue the
1617 * command. Once the periph is fully thawed, it will begin
1618 * operation again.
1619 */
1620 xs->error = XS_NOERROR;
1621 xs->status = SCSI_OK;
1622 xs->xs_status &= ~XS_STS_DONE;
1623 xs->xs_requeuecnt++;
1624 error = scsipi_enqueue(xs);
1625 if (error == 0) {
1626 scsipi_periph_thaw(periph, 1);
1627 splx(s);
1628 return (ERESTART);
1629 }
1630 }
1631
1632 /*
1633 * scsipi_done() freezes the queue if not XS_NOERROR.
1634 * Thaw it here.
1635 */
1636 if (xs->error != XS_NOERROR)
1637 scsipi_periph_thaw(periph, 1);
1638
1639 /*
1640 * Set buffer fields in case the periph
1641 * switch done func uses them
1642 */
1643 if ((bp = xs->bp) != NULL) {
1644 if (error) {
1645 bp->b_error = error;
1646 bp->b_flags |= B_ERROR;
1647 bp->b_resid = bp->b_bcount;
1648 } else {
1649 bp->b_error = 0;
1650 bp->b_resid = xs->resid;
1651 }
1652 }
1653
1654 if (periph->periph_switch->psw_done)
1655 periph->periph_switch->psw_done(xs);
1656
1657 if (bp)
1658 biodone(bp);
1659
1660 if (xs->xs_control & XS_CTL_ASYNC)
1661 scsipi_put_xs(xs);
1662 splx(s);
1663
1664 return (error);
1665 }
1666
1667 /*
1668 * Issue a request sense for the given scsipi_xfer. Called when the xfer
1669 * returns with a CHECK_CONDITION status. Must be called in valid thread
1670 * context and at splbio().
1671 */
1672
1673 void
1674 scsipi_request_sense(xs)
1675 struct scsipi_xfer *xs;
1676 {
1677 struct scsipi_periph *periph = xs->xs_periph;
1678 int flags, error;
1679 struct scsipi_sense cmd;
1680
1681 periph->periph_flags |= PERIPH_SENSE;
1682
1683 /* if command was polling, request sense will too */
1684 flags = xs->xs_control & XS_CTL_POLL;
1685 /* Polling commands can't sleep */
1686 if (flags)
1687 flags |= XS_CTL_NOSLEEP;
1688
1689 flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN |
1690 XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH;
1691
1692 memset(&cmd, 0, sizeof(cmd));
1693 cmd.opcode = REQUEST_SENSE;
1694 cmd.length = sizeof(struct scsipi_sense_data);
1695
1696 error = scsipi_command(periph,
1697 (struct scsipi_generic *) &cmd, sizeof(cmd),
1698 (u_char*)&xs->sense.scsi_sense, sizeof(struct scsipi_sense_data),
1699 0, 1000, NULL, flags);
1700 periph->periph_flags &= ~PERIPH_SENSE;
1701 periph->periph_xscheck = NULL;
1702 switch(error) {
1703 case 0:
1704 /* we have a valid sense */
1705 xs->error = XS_SENSE;
1706 return;
1707 case EINTR:
1708 /* REQUEST_SENSE interrupted by bus reset. */
1709 xs->error = XS_RESET;
1710 return;
1711 case EIO:
1712 /* request sense coudn't be performed */
1713 /*
1714 * XXX this isn't quite right but we don't have anything
1715 * better for now
1716 */
1717 xs->error = XS_DRIVER_STUFFUP;
1718 return;
1719 default:
1720 /* Notify that request sense failed. */
1721 xs->error = XS_DRIVER_STUFFUP;
1722 scsipi_printaddr(periph);
1723 printf("request sense failed with error %d\n", error);
1724 return;
1725 }
1726 }
1727
1728 /*
1729 * scsipi_enqueue:
1730 *
1731 * Enqueue an xfer on a channel.
1732 */
1733 int
1734 scsipi_enqueue(xs)
1735 struct scsipi_xfer *xs;
1736 {
1737 struct scsipi_channel *chan = xs->xs_periph->periph_channel;
1738 struct scsipi_xfer *qxs;
1739 int s;
1740
1741 s = splbio();
1742
1743 /*
1744 * If the xfer is to be polled, and there are already jobs on
1745 * the queue, we can't proceed.
1746 */
1747 if ((xs->xs_control & XS_CTL_POLL) != 0 &&
1748 TAILQ_FIRST(&chan->chan_queue) != NULL) {
1749 splx(s);
1750 xs->error = XS_DRIVER_STUFFUP;
1751 return (EAGAIN);
1752 }
1753
1754 /*
1755 * If we have an URGENT xfer, it's an error recovery command
1756 * and it should just go on the head of the channel's queue.
1757 */
1758 if (xs->xs_control & XS_CTL_URGENT) {
1759 TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
1760 goto out;
1761 }
1762
1763 /*
1764 * If this xfer has already been on the queue before, we
1765 * need to reinsert it in the correct order. That order is:
1766 *
1767 * Immediately before the first xfer for this periph
1768 * with a requeuecnt less than xs->xs_requeuecnt.
1769 *
1770 * Failing that, at the end of the queue. (We'll end up
1771 * there naturally.)
1772 */
1773 if (xs->xs_requeuecnt != 0) {
1774 for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
1775 qxs = TAILQ_NEXT(qxs, channel_q)) {
1776 if (qxs->xs_periph == xs->xs_periph &&
1777 qxs->xs_requeuecnt < xs->xs_requeuecnt)
1778 break;
1779 }
1780 if (qxs != NULL) {
1781 TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
1782 channel_q);
1783 goto out;
1784 }
1785 }
1786 TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
1787 out:
1788 if (xs->xs_control & XS_CTL_THAW_PERIPH)
1789 scsipi_periph_thaw(xs->xs_periph, 1);
1790 splx(s);
1791 return (0);
1792 }
1793
1794 /*
1795 * scsipi_run_queue:
1796 *
1797 * Start as many xfers as possible running on the channel.
1798 */
1799 void
1800 scsipi_run_queue(chan)
1801 struct scsipi_channel *chan;
1802 {
1803 struct scsipi_xfer *xs;
1804 struct scsipi_periph *periph;
1805 int s;
1806
1807 for (;;) {
1808 s = splbio();
1809
1810 /*
1811 * If the channel is frozen, we can't do any work right
1812 * now.
1813 */
1814 if (chan->chan_qfreeze != 0) {
1815 splx(s);
1816 return;
1817 }
1818
1819 /*
1820 * Look for work to do, and make sure we can do it.
1821 */
1822 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
1823 xs = TAILQ_NEXT(xs, channel_q)) {
1824 periph = xs->xs_periph;
1825
1826 if ((periph->periph_sent >= periph->periph_openings) ||
1827 periph->periph_qfreeze != 0 ||
1828 (periph->periph_flags & PERIPH_UNTAG) != 0)
1829 continue;
1830
1831 if ((periph->periph_flags &
1832 (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 &&
1833 (xs->xs_control & XS_CTL_URGENT) == 0)
1834 continue;
1835
1836 /*
1837 * We can issue this xfer!
1838 */
1839 goto got_one;
1840 }
1841
1842 /*
1843 * Can't find any work to do right now.
1844 */
1845 splx(s);
1846 return;
1847
1848 got_one:
1849 /*
1850 * Have an xfer to run. Allocate a resource from
1851 * the adapter to run it. If we can't allocate that
1852 * resource, we don't dequeue the xfer.
1853 */
1854 if (scsipi_get_resource(chan) == 0) {
1855 /*
1856 * Adapter is out of resources. If the adapter
1857 * supports it, attempt to grow them.
1858 */
1859 if (scsipi_grow_resources(chan) == 0) {
1860 /*
1861 * Wasn't able to grow resources,
1862 * nothing more we can do.
1863 */
1864 if (xs->xs_control & XS_CTL_POLL) {
1865 scsipi_printaddr(xs->xs_periph);
1866 printf("polling command but no "
1867 "adapter resources");
1868 /* We'll panic shortly... */
1869 }
1870 splx(s);
1871
1872 /*
1873 * XXX: We should be able to note that
1874 * XXX: that resources are needed here!
1875 */
1876 return;
1877 }
1878 /*
1879 * scsipi_grow_resources() allocated the resource
1880 * for us.
1881 */
1882 }
1883
1884 /*
1885 * We have a resource to run this xfer, do it!
1886 */
1887 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
1888
1889 /*
1890 * If the command is to be tagged, allocate a tag ID
1891 * for it.
1892 */
1893 if (XS_CTL_TAGTYPE(xs) != 0)
1894 scsipi_get_tag(xs);
1895 else
1896 periph->periph_flags |= PERIPH_UNTAG;
1897 periph->periph_sent++;
1898 splx(s);
1899
1900 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
1901 }
1902 #ifdef DIAGNOSTIC
1903 panic("scsipi_run_queue: impossible");
1904 #endif
1905 }
1906
1907 /*
1908 * scsipi_execute_xs:
1909 *
1910 * Begin execution of an xfer, waiting for it to complete, if necessary.
1911 */
1912 int
1913 scsipi_execute_xs(xs)
1914 struct scsipi_xfer *xs;
1915 {
1916 struct scsipi_periph *periph = xs->xs_periph;
1917 struct scsipi_channel *chan = periph->periph_channel;
1918 int oasync, async, poll, retries, error, s;
1919
1920 xs->xs_status &= ~XS_STS_DONE;
1921 xs->error = XS_NOERROR;
1922 xs->resid = xs->datalen;
1923 xs->status = SCSI_OK;
1924
1925 #ifdef SCSIPI_DEBUG
1926 if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) {
1927 printf("scsipi_execute_xs: ");
1928 show_scsipi_xs(xs);
1929 printf("\n");
1930 }
1931 #endif
1932
1933 /*
1934 * Deal with command tagging:
1935 *
1936 * - If the device's current operating mode doesn't
1937 * include tagged queueing, clear the tag mask.
1938 *
1939 * - If the device's current operating mode *does*
1940 * include tagged queueing, set the tag_type in
1941 * the xfer to the appropriate byte for the tag
1942 * message.
1943 */
1944 if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 ||
1945 (xs->xs_control & XS_CTL_REQSENSE)) {
1946 xs->xs_control &= ~XS_CTL_TAGMASK;
1947 xs->xs_tag_type = 0;
1948 } else {
1949 /*
1950 * If the request doesn't specify a tag, give Head
1951 * tags to URGENT operations and Ordered tags to
1952 * everything else.
1953 */
1954 if (XS_CTL_TAGTYPE(xs) == 0) {
1955 if (xs->xs_control & XS_CTL_URGENT)
1956 xs->xs_control |= XS_CTL_HEAD_TAG;
1957 else
1958 xs->xs_control |= XS_CTL_ORDERED_TAG;
1959 }
1960
1961 switch (XS_CTL_TAGTYPE(xs)) {
1962 case XS_CTL_ORDERED_TAG:
1963 xs->xs_tag_type = MSG_ORDERED_Q_TAG;
1964 break;
1965
1966 case XS_CTL_SIMPLE_TAG:
1967 xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
1968 break;
1969
1970 case XS_CTL_HEAD_TAG:
1971 xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
1972 break;
1973
1974 default:
1975 scsipi_printaddr(periph);
1976 printf("invalid tag mask 0x%08x\n",
1977 XS_CTL_TAGTYPE(xs));
1978 panic("scsipi_execute_xs");
1979 }
1980 }
1981
1982 /* If the adaptor wants us to poll, poll. */
1983 if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY)
1984 xs->xs_control |= XS_CTL_POLL;
1985
1986 /*
1987 * If we don't yet have a completion thread, or we are to poll for
1988 * completion, clear the ASYNC flag.
1989 */
1990 oasync = (xs->xs_control & XS_CTL_ASYNC);
1991 if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
1992 xs->xs_control &= ~XS_CTL_ASYNC;
1993
1994 async = (xs->xs_control & XS_CTL_ASYNC);
1995 poll = (xs->xs_control & XS_CTL_POLL);
1996 retries = xs->xs_retries; /* for polling commands */
1997
1998 #ifdef DIAGNOSTIC
1999 if (oasync != 0 && xs->bp == NULL)
2000 panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
2001 #endif
2002
2003 /*
2004 * Enqueue the transfer. If we're not polling for completion, this
2005 * should ALWAYS return `no error'.
2006 */
2007 try_again:
2008 error = scsipi_enqueue(xs);
2009 if (error) {
2010 if (poll == 0) {
2011 scsipi_printaddr(periph);
2012 printf("not polling, but enqueue failed with %d\n",
2013 error);
2014 panic("scsipi_execute_xs");
2015 }
2016
2017 scsipi_printaddr(periph);
2018 printf("failed to enqueue polling command");
2019 if (retries != 0) {
2020 printf(", retrying...\n");
2021 delay(1000000);
2022 retries--;
2023 goto try_again;
2024 }
2025 printf("\n");
2026 goto free_xs;
2027 }
2028
2029 restarted:
2030 scsipi_run_queue(chan);
2031
2032 /*
2033 * The xfer is enqueued, and possibly running. If it's to be
2034 * completed asynchronously, just return now.
2035 */
2036 if (async)
2037 return (EJUSTRETURN);
2038
2039 /*
2040 * Not an asynchronous command; wait for it to complete.
2041 */
2042 s = splbio();
2043 while ((xs->xs_status & XS_STS_DONE) == 0) {
2044 if (poll) {
2045 scsipi_printaddr(periph);
2046 printf("polling command not done\n");
2047 panic("scsipi_execute_xs");
2048 }
2049 (void) tsleep(xs, PRIBIO, "xscmd", 0);
2050 }
2051 splx(s);
2052
2053 /*
2054 * Command is complete. scsipi_done() has awakened us to perform
2055 * the error handling.
2056 */
2057 error = scsipi_complete(xs);
2058 if (error == ERESTART)
2059 goto restarted;
2060
2061 /*
2062 * If it was meant to run async and we cleared aync ourselve,
2063 * don't return an error here. It has already been handled
2064 */
2065 if (oasync)
2066 error = EJUSTRETURN;
2067 /*
2068 * Command completed successfully or fatal error occurred. Fall
2069 * into....
2070 */
2071 free_xs:
2072 s = splbio();
2073 scsipi_put_xs(xs);
2074 splx(s);
2075
2076 /*
2077 * Kick the queue, keep it running in case it stopped for some
2078 * reason.
2079 */
2080 scsipi_run_queue(chan);
2081
2082 return (error);
2083 }
2084
2085 /*
2086 * scsipi_completion_thread:
2087 *
2088 * This is the completion thread. We wait for errors on
2089 * asynchronous xfers, and perform the error handling
2090 * function, restarting the command, if necessary.
2091 */
2092 void
2093 scsipi_completion_thread(arg)
2094 void *arg;
2095 {
2096 struct scsipi_channel *chan = arg;
2097 struct scsipi_xfer *xs;
2098 int s;
2099
2100 if (chan->chan_init_cb)
2101 (*chan->chan_init_cb)(chan, chan->chan_init_cb_arg);
2102
2103 s = splbio();
2104 chan->chan_flags |= SCSIPI_CHAN_TACTIVE;
2105 splx(s);
2106 for (;;) {
2107 s = splbio();
2108 xs = TAILQ_FIRST(&chan->chan_complete);
2109 if (xs == NULL && chan->chan_tflags == 0) {
2110 /* nothing to do; wait */
2111 (void) tsleep(&chan->chan_complete, PRIBIO,
2112 "sccomp", 0);
2113 splx(s);
2114 continue;
2115 }
2116 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2117 /* call chan_callback from thread context */
2118 chan->chan_tflags &= ~SCSIPI_CHANT_CALLBACK;
2119 chan->chan_callback(chan, chan->chan_callback_arg);
2120 splx(s);
2121 continue;
2122 }
2123 if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) {
2124 /* attempt to get more openings for this channel */
2125 chan->chan_tflags &= ~SCSIPI_CHANT_GROWRES;
2126 scsipi_adapter_request(chan,
2127 ADAPTER_REQ_GROW_RESOURCES, NULL);
2128 scsipi_channel_thaw(chan, 1);
2129 splx(s);
2130 continue;
2131 }
2132 if (chan->chan_tflags & SCSIPI_CHANT_KICK) {
2133 /* explicitly run the queues for this channel */
2134 chan->chan_tflags &= ~SCSIPI_CHANT_KICK;
2135 scsipi_run_queue(chan);
2136 splx(s);
2137 continue;
2138 }
2139 if (chan->chan_tflags & SCSIPI_CHANT_SHUTDOWN) {
2140 splx(s);
2141 break;
2142 }
2143 if (xs) {
2144 TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
2145 splx(s);
2146
2147 /*
2148 * Have an xfer with an error; process it.
2149 */
2150 (void) scsipi_complete(xs);
2151
2152 /*
2153 * Kick the queue; keep it running if it was stopped
2154 * for some reason.
2155 */
2156 scsipi_run_queue(chan);
2157 } else {
2158 splx(s);
2159 }
2160 }
2161
2162 chan->chan_thread = NULL;
2163
2164 /* In case parent is waiting for us to exit. */
2165 wakeup(&chan->chan_thread);
2166
2167 kthread_exit(0);
2168 }
2169
2170 /*
2171 * scsipi_create_completion_thread:
2172 *
2173 * Callback to actually create the completion thread.
2174 */
2175 void
2176 scsipi_create_completion_thread(arg)
2177 void *arg;
2178 {
2179 struct scsipi_channel *chan = arg;
2180 struct scsipi_adapter *adapt = chan->chan_adapter;
2181
2182 if (kthread_create1(scsipi_completion_thread, chan,
2183 &chan->chan_thread, "%s", chan->chan_name)) {
2184 printf("%s: unable to create completion thread for "
2185 "channel %d\n", adapt->adapt_dev->dv_xname,
2186 chan->chan_channel);
2187 panic("scsipi_create_completion_thread");
2188 }
2189 }
2190
2191 /*
2192 * scsipi_thread_call_callback:
2193 *
2194 * request to call a callback from the completion thread
2195 */
2196 int
2197 scsipi_thread_call_callback(chan, callback, arg)
2198 struct scsipi_channel *chan;
2199 void (*callback) __P((struct scsipi_channel *, void *));
2200 void *arg;
2201 {
2202 int s;
2203
2204 s = splbio();
2205 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
2206 /* kernel thread doesn't exist yet */
2207 splx(s);
2208 return ESRCH;
2209 }
2210 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2211 splx(s);
2212 return EBUSY;
2213 }
2214 scsipi_channel_freeze(chan, 1);
2215 chan->chan_callback = callback;
2216 chan->chan_callback_arg = arg;
2217 chan->chan_tflags |= SCSIPI_CHANT_CALLBACK;
2218 wakeup(&chan->chan_complete);
2219 splx(s);
2220 return(0);
2221 }
2222
2223 /*
2224 * scsipi_async_event:
2225 *
2226 * Handle an asynchronous event from an adapter.
2227 */
2228 void
2229 scsipi_async_event(chan, event, arg)
2230 struct scsipi_channel *chan;
2231 scsipi_async_event_t event;
2232 void *arg;
2233 {
2234 int s;
2235
2236 s = splbio();
2237 switch (event) {
2238 case ASYNC_EVENT_MAX_OPENINGS:
2239 scsipi_async_event_max_openings(chan,
2240 (struct scsipi_max_openings *)arg);
2241 break;
2242
2243 case ASYNC_EVENT_XFER_MODE:
2244 scsipi_async_event_xfer_mode(chan,
2245 (struct scsipi_xfer_mode *)arg);
2246 break;
2247 case ASYNC_EVENT_RESET:
2248 scsipi_async_event_channel_reset(chan);
2249 break;
2250 }
2251 splx(s);
2252 }
2253
2254 /*
2255 * scsipi_print_xfer_mode:
2256 *
2257 * Print a periph's capabilities.
2258 */
2259 void
2260 scsipi_print_xfer_mode(periph)
2261 struct scsipi_periph *periph;
2262 {
2263 int period, freq, speed, mbs;
2264
2265 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0)
2266 return;
2267
2268 aprint_normal("%s: ", periph->periph_dev->dv_xname);
2269 if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) {
2270 period = scsipi_sync_factor_to_period(periph->periph_period);
2271 aprint_normal("sync (%d.%02dns offset %d)",
2272 period / 100, period % 100, periph->periph_offset);
2273 } else
2274 aprint_normal("async");
2275
2276 if (periph->periph_mode & PERIPH_CAP_WIDE32)
2277 aprint_normal(", 32-bit");
2278 else if (periph->periph_mode & (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
2279 aprint_normal(", 16-bit");
2280 else
2281 aprint_normal(", 8-bit");
2282
2283 if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) {
2284 freq = scsipi_sync_factor_to_freq(periph->periph_period);
2285 speed = freq;
2286 if (periph->periph_mode & PERIPH_CAP_WIDE32)
2287 speed *= 4;
2288 else if (periph->periph_mode &
2289 (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
2290 speed *= 2;
2291 mbs = speed / 1000;
2292 if (mbs > 0)
2293 aprint_normal(" (%d.%03dMB/s)", mbs, speed % 1000);
2294 else
2295 aprint_normal(" (%dKB/s)", speed % 1000);
2296 }
2297
2298 aprint_normal(" transfers");
2299
2300 if (periph->periph_mode & PERIPH_CAP_TQING)
2301 aprint_normal(", tagged queueing");
2302
2303 aprint_normal("\n");
2304 }
2305
2306 /*
2307 * scsipi_async_event_max_openings:
2308 *
2309 * Update the maximum number of outstanding commands a
2310 * device may have.
2311 */
2312 void
2313 scsipi_async_event_max_openings(chan, mo)
2314 struct scsipi_channel *chan;
2315 struct scsipi_max_openings *mo;
2316 {
2317 struct scsipi_periph *periph;
2318 int minlun, maxlun;
2319
2320 if (mo->mo_lun == -1) {
2321 /*
2322 * Wildcarded; apply it to all LUNs.
2323 */
2324 minlun = 0;
2325 maxlun = chan->chan_nluns - 1;
2326 } else
2327 minlun = maxlun = mo->mo_lun;
2328
2329 /* XXX This could really suck with a large LUN space. */
2330 for (; minlun <= maxlun; minlun++) {
2331 periph = scsipi_lookup_periph(chan, mo->mo_target, minlun);
2332 if (periph == NULL)
2333 continue;
2334
2335 if (mo->mo_openings < periph->periph_openings)
2336 periph->periph_openings = mo->mo_openings;
2337 else if (mo->mo_openings > periph->periph_openings &&
2338 (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
2339 periph->periph_openings = mo->mo_openings;
2340 }
2341 }
2342
2343 /*
2344 * scsipi_async_event_xfer_mode:
2345 *
2346 * Update the xfer mode for all periphs sharing the
2347 * specified I_T Nexus.
2348 */
2349 void
2350 scsipi_async_event_xfer_mode(chan, xm)
2351 struct scsipi_channel *chan;
2352 struct scsipi_xfer_mode *xm;
2353 {
2354 struct scsipi_periph *periph;
2355 int lun, announce, mode, period, offset;
2356
2357 for (lun = 0; lun < chan->chan_nluns; lun++) {
2358 periph = scsipi_lookup_periph(chan, xm->xm_target, lun);
2359 if (periph == NULL)
2360 continue;
2361 announce = 0;
2362
2363 /*
2364 * Clamp the xfer mode down to this periph's capabilities.
2365 */
2366 mode = xm->xm_mode & periph->periph_cap;
2367 if (mode & PERIPH_CAP_SYNC) {
2368 period = xm->xm_period;
2369 offset = xm->xm_offset;
2370 } else {
2371 period = 0;
2372 offset = 0;
2373 }
2374
2375 /*
2376 * If we do not have a valid xfer mode yet, or the parameters
2377 * are different, announce them.
2378 */
2379 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0 ||
2380 periph->periph_mode != mode ||
2381 periph->periph_period != period ||
2382 periph->periph_offset != offset)
2383 announce = 1;
2384
2385 periph->periph_mode = mode;
2386 periph->periph_period = period;
2387 periph->periph_offset = offset;
2388 periph->periph_flags |= PERIPH_MODE_VALID;
2389
2390 if (announce)
2391 scsipi_print_xfer_mode(periph);
2392 }
2393 }
2394
2395 /*
2396 * scsipi_set_xfer_mode:
2397 *
2398 * Set the xfer mode for the specified I_T Nexus.
2399 */
2400 void
2401 scsipi_set_xfer_mode(chan, target, immed)
2402 struct scsipi_channel *chan;
2403 int target, immed;
2404 {
2405 struct scsipi_xfer_mode xm;
2406 struct scsipi_periph *itperiph;
2407 int lun, s;
2408
2409 /*
2410 * Go to the minimal xfer mode.
2411 */
2412 xm.xm_target = target;
2413 xm.xm_mode = 0;
2414 xm.xm_period = 0; /* ignored */
2415 xm.xm_offset = 0; /* ignored */
2416
2417 /*
2418 * Find the first LUN we know about on this I_T Nexus.
2419 */
2420 for (itperiph = NULL, lun = 0; lun < chan->chan_nluns; lun++) {
2421 itperiph = scsipi_lookup_periph(chan, target, lun);
2422 if (itperiph != NULL)
2423 break;
2424 }
2425 if (itperiph != NULL) {
2426 xm.xm_mode = itperiph->periph_cap;
2427 /*
2428 * Now issue the request to the adapter.
2429 */
2430 s = splbio();
2431 scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm);
2432 splx(s);
2433 /*
2434 * If we want this to happen immediately, issue a dummy
2435 * command, since most adapters can't really negotiate unless
2436 * they're executing a job.
2437 */
2438 if (immed != 0) {
2439 (void) scsipi_test_unit_ready(itperiph,
2440 XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
2441 XS_CTL_IGNORE_NOT_READY |
2442 XS_CTL_IGNORE_MEDIA_CHANGE);
2443 }
2444 }
2445 }
2446
2447 /*
2448 * scsipi_channel_reset:
2449 *
2450 * handle scsi bus reset
2451 * called at splbio
2452 */
2453 void
2454 scsipi_async_event_channel_reset(chan)
2455 struct scsipi_channel *chan;
2456 {
2457 struct scsipi_xfer *xs, *xs_next;
2458 struct scsipi_periph *periph;
2459 int target, lun;
2460
2461 /*
2462 * Channel has been reset. Also mark as reset pending REQUEST_SENSE
2463 * commands; as the sense is not available any more.
2464 * can't call scsipi_done() from here, as the command has not been
2465 * sent to the adapter yet (this would corrupt accounting).
2466 */
2467
2468 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) {
2469 xs_next = TAILQ_NEXT(xs, channel_q);
2470 if (xs->xs_control & XS_CTL_REQSENSE) {
2471 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
2472 xs->error = XS_RESET;
2473 if ((xs->xs_control & XS_CTL_ASYNC) != 0)
2474 TAILQ_INSERT_TAIL(&chan->chan_complete, xs,
2475 channel_q);
2476 }
2477 }
2478 wakeup(&chan->chan_complete);
2479 /* Catch xs with pending sense which may not have a REQSENSE xs yet */
2480 for (target = 0; target < chan->chan_ntargets; target++) {
2481 if (target == chan->chan_id)
2482 continue;
2483 for (lun = 0; lun < chan->chan_nluns; lun++) {
2484 periph = scsipi_lookup_periph(chan, target, lun);
2485 if (periph) {
2486 xs = periph->periph_xscheck;
2487 if (xs)
2488 xs->error = XS_RESET;
2489 }
2490 }
2491 }
2492 }
2493
2494 /*
2495 * scsipi_target_detach:
2496 *
2497 * detach all periph associated with a I_T
2498 * must be called from valid thread context
2499 */
2500 int
2501 scsipi_target_detach(chan, target, lun, flags)
2502 struct scsipi_channel *chan;
2503 int target, lun;
2504 int flags;
2505 {
2506 struct scsipi_periph *periph;
2507 int ctarget, mintarget, maxtarget;
2508 int clun, minlun, maxlun;
2509 int error;
2510
2511 if (target == -1) {
2512 mintarget = 0;
2513 maxtarget = chan->chan_ntargets;
2514 } else {
2515 if (target == chan->chan_id)
2516 return EINVAL;
2517 if (target < 0 || target >= chan->chan_ntargets)
2518 return EINVAL;
2519 mintarget = target;
2520 maxtarget = target + 1;
2521 }
2522
2523 if (lun == -1) {
2524 minlun = 0;
2525 maxlun = chan->chan_nluns;
2526 } else {
2527 if (lun < 0 || lun >= chan->chan_nluns)
2528 return EINVAL;
2529 minlun = lun;
2530 maxlun = lun + 1;
2531 }
2532
2533 for (ctarget = mintarget; ctarget < maxtarget; ctarget++) {
2534 if (ctarget == chan->chan_id)
2535 continue;
2536
2537 for (clun = minlun; clun < maxlun; clun++) {
2538 periph = scsipi_lookup_periph(chan, ctarget, clun);
2539 if (periph == NULL)
2540 continue;
2541 error = config_detach(periph->periph_dev, flags);
2542 if (error)
2543 return (error);
2544 scsipi_remove_periph(chan, periph);
2545 free(periph, M_DEVBUF);
2546 }
2547 }
2548 return(0);
2549 }
2550
2551 /*
2552 * scsipi_adapter_addref:
2553 *
2554 * Add a reference to the adapter pointed to by the provided
2555 * link, enabling the adapter if necessary.
2556 */
2557 int
2558 scsipi_adapter_addref(adapt)
2559 struct scsipi_adapter *adapt;
2560 {
2561 int s, error = 0;
2562
2563 s = splbio();
2564 if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) {
2565 error = (*adapt->adapt_enable)(adapt->adapt_dev, 1);
2566 if (error)
2567 adapt->adapt_refcnt--;
2568 }
2569 splx(s);
2570 return (error);
2571 }
2572
2573 /*
2574 * scsipi_adapter_delref:
2575 *
2576 * Delete a reference to the adapter pointed to by the provided
2577 * link, disabling the adapter if possible.
2578 */
2579 void
2580 scsipi_adapter_delref(adapt)
2581 struct scsipi_adapter *adapt;
2582 {
2583 int s;
2584
2585 s = splbio();
2586 if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL)
2587 (void) (*adapt->adapt_enable)(adapt->adapt_dev, 0);
2588 splx(s);
2589 }
2590
2591 struct scsipi_syncparam {
2592 int ss_factor;
2593 int ss_period; /* ns * 100 */
2594 } scsipi_syncparams[] = {
2595 { 0x08, 625 }, /* FAST-160 (Ultra320) */
2596 { 0x09, 1250 }, /* FAST-80 (Ultra160) */
2597 { 0x0a, 2500 }, /* FAST-40 40MHz (Ultra2) */
2598 { 0x0b, 3030 }, /* FAST-40 33MHz (Ultra2) */
2599 { 0x0c, 5000 }, /* FAST-20 (Ultra) */
2600 };
2601 const int scsipi_nsyncparams =
2602 sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
2603
2604 int
2605 scsipi_sync_period_to_factor(period)
2606 int period; /* ns * 100 */
2607 {
2608 int i;
2609
2610 for (i = 0; i < scsipi_nsyncparams; i++) {
2611 if (period <= scsipi_syncparams[i].ss_period)
2612 return (scsipi_syncparams[i].ss_factor);
2613 }
2614
2615 return ((period / 100) / 4);
2616 }
2617
2618 int
2619 scsipi_sync_factor_to_period(factor)
2620 int factor;
2621 {
2622 int i;
2623
2624 for (i = 0; i < scsipi_nsyncparams; i++) {
2625 if (factor == scsipi_syncparams[i].ss_factor)
2626 return (scsipi_syncparams[i].ss_period);
2627 }
2628
2629 return ((factor * 4) * 100);
2630 }
2631
2632 int
2633 scsipi_sync_factor_to_freq(factor)
2634 int factor;
2635 {
2636 int i;
2637
2638 for (i = 0; i < scsipi_nsyncparams; i++) {
2639 if (factor == scsipi_syncparams[i].ss_factor)
2640 return (100000000 / scsipi_syncparams[i].ss_period);
2641 }
2642
2643 return (10000000 / ((factor * 4) * 10));
2644 }
2645
2646 #ifdef SCSIPI_DEBUG
2647 /*
2648 * Given a scsipi_xfer, dump the request, in all it's glory
2649 */
2650 void
2651 show_scsipi_xs(xs)
2652 struct scsipi_xfer *xs;
2653 {
2654
2655 printf("xs(%p): ", xs);
2656 printf("xs_control(0x%08x)", xs->xs_control);
2657 printf("xs_status(0x%08x)", xs->xs_status);
2658 printf("periph(%p)", xs->xs_periph);
2659 printf("retr(0x%x)", xs->xs_retries);
2660 printf("timo(0x%x)", xs->timeout);
2661 printf("cmd(%p)", xs->cmd);
2662 printf("len(0x%x)", xs->cmdlen);
2663 printf("data(%p)", xs->data);
2664 printf("len(0x%x)", xs->datalen);
2665 printf("res(0x%x)", xs->resid);
2666 printf("err(0x%x)", xs->error);
2667 printf("bp(%p)", xs->bp);
2668 show_scsipi_cmd(xs);
2669 }
2670
2671 void
2672 show_scsipi_cmd(xs)
2673 struct scsipi_xfer *xs;
2674 {
2675 u_char *b = (u_char *) xs->cmd;
2676 int i = 0;
2677
2678 scsipi_printaddr(xs->xs_periph);
2679 printf(" command: ");
2680
2681 if ((xs->xs_control & XS_CTL_RESET) == 0) {
2682 while (i < xs->cmdlen) {
2683 if (i)
2684 printf(",");
2685 printf("0x%x", b[i++]);
2686 }
2687 printf("-[%d bytes]\n", xs->datalen);
2688 if (xs->datalen)
2689 show_mem(xs->data, min(64, xs->datalen));
2690 } else
2691 printf("-RESET-\n");
2692 }
2693
2694 void
2695 show_mem(address, num)
2696 u_char *address;
2697 int num;
2698 {
2699 int x;
2700
2701 printf("------------------------------");
2702 for (x = 0; x < num; x++) {
2703 if ((x % 16) == 0)
2704 printf("\n%03d: ", x);
2705 printf("%02x ", *address++);
2706 }
2707 printf("\n------------------------------\n");
2708 }
2709 #endif /* SCSIPI_DEBUG */
2710