scsipi_base.c revision 1.74 1 /* $NetBSD: scsipi_base.c,v 1.74 2002/05/16 02:54:20 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999, 2000, 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: scsipi_base.c,v 1.74 2002/05/16 02:54:20 thorpej Exp $");
42
43 #include "opt_scsi.h"
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/buf.h>
49 #include <sys/uio.h>
50 #include <sys/malloc.h>
51 #include <sys/pool.h>
52 #include <sys/errno.h>
53 #include <sys/device.h>
54 #include <sys/proc.h>
55 #include <sys/kthread.h>
56 #include <sys/hash.h>
57
58 #include <dev/scsipi/scsipi_all.h>
59 #include <dev/scsipi/scsipi_disk.h>
60 #include <dev/scsipi/scsipiconf.h>
61 #include <dev/scsipi/scsipi_base.h>
62
63 #include <dev/scsipi/scsi_all.h>
64 #include <dev/scsipi/scsi_message.h>
65
66 int scsipi_complete __P((struct scsipi_xfer *));
67 void scsipi_request_sense __P((struct scsipi_xfer *));
68 int scsipi_enqueue __P((struct scsipi_xfer *));
69 void scsipi_run_queue __P((struct scsipi_channel *chan));
70
71 void scsipi_completion_thread __P((void *));
72
73 void scsipi_get_tag __P((struct scsipi_xfer *));
74 void scsipi_put_tag __P((struct scsipi_xfer *));
75
76 int scsipi_get_resource __P((struct scsipi_channel *));
77 void scsipi_put_resource __P((struct scsipi_channel *));
78 __inline int scsipi_grow_resources __P((struct scsipi_channel *));
79
80 void scsipi_async_event_max_openings __P((struct scsipi_channel *,
81 struct scsipi_max_openings *));
82 void scsipi_async_event_xfer_mode __P((struct scsipi_channel *,
83 struct scsipi_xfer_mode *));
84 void scsipi_async_event_channel_reset __P((struct scsipi_channel *));
85
86 struct pool scsipi_xfer_pool;
87
88 /*
89 * scsipi_init:
90 *
91 * Called when a scsibus or atapibus is attached to the system
92 * to initialize shared data structures.
93 */
94 void
95 scsipi_init()
96 {
97 static int scsipi_init_done;
98
99 if (scsipi_init_done)
100 return;
101 scsipi_init_done = 1;
102
103 /* Initialize the scsipi_xfer pool. */
104 pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
105 0, 0, "scxspl", NULL);
106 }
107
108 /*
109 * scsipi_channel_init:
110 *
111 * Initialize a scsipi_channel when it is attached.
112 */
113 int
114 scsipi_channel_init(chan)
115 struct scsipi_channel *chan;
116 {
117 int i;
118
119 /* Initialize shared data. */
120 scsipi_init();
121
122 /* Initialize the queues. */
123 TAILQ_INIT(&chan->chan_queue);
124 TAILQ_INIT(&chan->chan_complete);
125
126 for (i = 0; i < SCSIPI_CHAN_PERIPH_BUCKETS; i++)
127 LIST_INIT(&chan->chan_periphtab[i]);
128
129 /*
130 * Create the asynchronous completion thread.
131 */
132 kthread_create(scsipi_create_completion_thread, chan);
133 return (0);
134 }
135
136 /*
137 * scsipi_channel_shutdown:
138 *
139 * Shutdown a scsipi_channel.
140 */
141 void
142 scsipi_channel_shutdown(chan)
143 struct scsipi_channel *chan;
144 {
145
146 /*
147 * Shut down the completion thread.
148 */
149 chan->chan_tflags |= SCSIPI_CHANT_SHUTDOWN;
150 wakeup(&chan->chan_complete);
151
152 /*
153 * Now wait for the thread to exit.
154 */
155 while (chan->chan_thread != NULL)
156 (void) tsleep(&chan->chan_thread, PRIBIO, "scshut", 0);
157 }
158
159 static uint32_t
160 scsipi_chan_periph_hash(uint64_t t, uint64_t l)
161 {
162 uint32_t hash;
163
164 hash = hash32_buf(&t, sizeof(t), HASH32_BUF_INIT);
165 hash = hash32_buf(&l, sizeof(l), hash);
166
167 return (hash & SCSIPI_CHAN_PERIPH_HASHMASK);
168 }
169
170 /*
171 * scsipi_insert_periph:
172 *
173 * Insert a periph into the channel.
174 */
175 void
176 scsipi_insert_periph(chan, periph)
177 struct scsipi_channel *chan;
178 struct scsipi_periph *periph;
179 {
180 uint32_t hash;
181 int s;
182
183 hash = scsipi_chan_periph_hash(periph->periph_target,
184 periph->periph_lun);
185
186 s = splbio();
187 LIST_INSERT_HEAD(&chan->chan_periphtab[hash], periph, periph_hash);
188 splx(s);
189 }
190
191 /*
192 * scsipi_remove_periph:
193 *
194 * Remove a periph from the channel.
195 */
196 void
197 scsipi_remove_periph(chan, periph)
198 struct scsipi_channel *chan;
199 struct scsipi_periph *periph;
200 {
201 int s;
202
203 s = splbio();
204 LIST_REMOVE(periph, periph_hash);
205 splx(s);
206 }
207
208 /*
209 * scsipi_lookup_periph:
210 *
211 * Lookup a periph on the specified channel.
212 */
213 struct scsipi_periph *
214 scsipi_lookup_periph(chan, target, lun)
215 struct scsipi_channel *chan;
216 int target, lun;
217 {
218 struct scsipi_periph *periph;
219 uint32_t hash;
220 int s;
221
222 if (target >= chan->chan_ntargets ||
223 lun >= chan->chan_nluns)
224 return (NULL);
225
226 hash = scsipi_chan_periph_hash(target, lun);
227
228 s = splbio();
229 LIST_FOREACH(periph, &chan->chan_periphtab[hash], periph_hash) {
230 if (periph->periph_target == target &&
231 periph->periph_lun == lun)
232 break;
233 }
234 splx(s);
235
236 return (periph);
237 }
238
239 /*
240 * scsipi_get_resource:
241 *
242 * Allocate a single xfer `resource' from the channel.
243 *
244 * NOTE: Must be called at splbio().
245 */
246 int
247 scsipi_get_resource(chan)
248 struct scsipi_channel *chan;
249 {
250 struct scsipi_adapter *adapt = chan->chan_adapter;
251
252 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
253 if (chan->chan_openings > 0) {
254 chan->chan_openings--;
255 return (1);
256 }
257 return (0);
258 }
259
260 if (adapt->adapt_openings > 0) {
261 adapt->adapt_openings--;
262 return (1);
263 }
264 return (0);
265 }
266
267 /*
268 * scsipi_grow_resources:
269 *
270 * Attempt to grow resources for a channel. If this succeeds,
271 * we allocate one for our caller.
272 *
273 * NOTE: Must be called at splbio().
274 */
275 __inline int
276 scsipi_grow_resources(chan)
277 struct scsipi_channel *chan;
278 {
279
280 if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
281 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
282 scsipi_adapter_request(chan,
283 ADAPTER_REQ_GROW_RESOURCES, NULL);
284 return (scsipi_get_resource(chan));
285 }
286 /*
287 * ask the channel thread to do it. It'll have to thaw the
288 * queue
289 */
290 scsipi_channel_freeze(chan, 1);
291 chan->chan_tflags |= SCSIPI_CHANT_GROWRES;
292 wakeup(&chan->chan_complete);
293 return (0);
294 }
295
296 return (0);
297 }
298
299 /*
300 * scsipi_put_resource:
301 *
302 * Free a single xfer `resource' to the channel.
303 *
304 * NOTE: Must be called at splbio().
305 */
306 void
307 scsipi_put_resource(chan)
308 struct scsipi_channel *chan;
309 {
310 struct scsipi_adapter *adapt = chan->chan_adapter;
311
312 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
313 chan->chan_openings++;
314 else
315 adapt->adapt_openings++;
316 }
317
318 /*
319 * scsipi_get_tag:
320 *
321 * Get a tag ID for the specified xfer.
322 *
323 * NOTE: Must be called at splbio().
324 */
325 void
326 scsipi_get_tag(xs)
327 struct scsipi_xfer *xs;
328 {
329 struct scsipi_periph *periph = xs->xs_periph;
330 int word, bit, tag;
331
332 for (word = 0; word < PERIPH_NTAGWORDS; word++) {
333 bit = ffs(periph->periph_freetags[word]);
334 if (bit != 0)
335 break;
336 }
337 #ifdef DIAGNOSTIC
338 if (word == PERIPH_NTAGWORDS) {
339 scsipi_printaddr(periph);
340 printf("no free tags\n");
341 panic("scsipi_get_tag");
342 }
343 #endif
344
345 bit -= 1;
346 periph->periph_freetags[word] &= ~(1 << bit);
347 tag = (word << 5) | bit;
348
349 /* XXX Should eventually disallow this completely. */
350 if (tag >= periph->periph_openings) {
351 scsipi_printaddr(periph);
352 printf("WARNING: tag %d greater than available openings %d\n",
353 tag, periph->periph_openings);
354 }
355
356 xs->xs_tag_id = tag;
357 }
358
359 /*
360 * scsipi_put_tag:
361 *
362 * Put the tag ID for the specified xfer back into the pool.
363 *
364 * NOTE: Must be called at splbio().
365 */
366 void
367 scsipi_put_tag(xs)
368 struct scsipi_xfer *xs;
369 {
370 struct scsipi_periph *periph = xs->xs_periph;
371 int word, bit;
372
373 word = xs->xs_tag_id >> 5;
374 bit = xs->xs_tag_id & 0x1f;
375
376 periph->periph_freetags[word] |= (1 << bit);
377 }
378
379 /*
380 * scsipi_get_xs:
381 *
382 * Allocate an xfer descriptor and associate it with the
383 * specified peripherial. If the peripherial has no more
384 * available command openings, we either block waiting for
385 * one to become available, or fail.
386 */
387 struct scsipi_xfer *
388 scsipi_get_xs(periph, flags)
389 struct scsipi_periph *periph;
390 int flags;
391 {
392 struct scsipi_xfer *xs;
393 int s;
394
395 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n"));
396
397 /*
398 * If we're cold, make sure we poll.
399 */
400 if (cold)
401 flags |= XS_CTL_NOSLEEP | XS_CTL_POLL;
402
403 #ifdef DIAGNOSTIC
404 /*
405 * URGENT commands can never be ASYNC.
406 */
407 if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
408 (XS_CTL_URGENT|XS_CTL_ASYNC)) {
409 scsipi_printaddr(periph);
410 printf("URGENT and ASYNC\n");
411 panic("scsipi_get_xs");
412 }
413 #endif
414
415 s = splbio();
416 /*
417 * Wait for a command opening to become available. Rules:
418 *
419 * - All xfers must wait for an available opening.
420 * Exception: URGENT xfers can proceed when
421 * active == openings, because we use the opening
422 * of the command we're recovering for.
423 * - if the periph has sense pending, only URGENT & REQSENSE
424 * xfers may proceed.
425 *
426 * - If the periph is recovering, only URGENT xfers may
427 * proceed.
428 *
429 * - If the periph is currently executing a recovery
430 * command, URGENT commands must block, because only
431 * one recovery command can execute at a time.
432 */
433 for (;;) {
434 if (flags & XS_CTL_URGENT) {
435 if (periph->periph_active > periph->periph_openings)
436 goto wait_for_opening;
437 if (periph->periph_flags & PERIPH_SENSE) {
438 if ((flags & XS_CTL_REQSENSE) == 0)
439 goto wait_for_opening;
440 } else {
441 if ((periph->periph_flags &
442 PERIPH_RECOVERY_ACTIVE) != 0)
443 goto wait_for_opening;
444 periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
445 }
446 break;
447 }
448 if (periph->periph_active >= periph->periph_openings ||
449 (periph->periph_flags & PERIPH_RECOVERING) != 0)
450 goto wait_for_opening;
451 periph->periph_active++;
452 break;
453
454 wait_for_opening:
455 if (flags & XS_CTL_NOSLEEP) {
456 splx(s);
457 return (NULL);
458 }
459 SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n"));
460 periph->periph_flags |= PERIPH_WAITING;
461 (void) tsleep(periph, PRIBIO, "getxs", 0);
462 }
463 SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n"));
464 xs = pool_get(&scsipi_xfer_pool,
465 ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
466 if (xs == NULL) {
467 if (flags & XS_CTL_URGENT) {
468 if ((flags & XS_CTL_REQSENSE) == 0)
469 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
470 } else
471 periph->periph_active--;
472 scsipi_printaddr(periph);
473 printf("unable to allocate %sscsipi_xfer\n",
474 (flags & XS_CTL_URGENT) ? "URGENT " : "");
475 }
476 splx(s);
477
478 SC_DEBUG(periph, SCSIPI_DB3, ("returning\n"));
479
480 if (xs != NULL) {
481 callout_init(&xs->xs_callout);
482 memset(xs, 0, sizeof(*xs));
483 xs->xs_periph = periph;
484 xs->xs_control = flags;
485 xs->xs_status = 0;
486 s = splbio();
487 TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
488 splx(s);
489 }
490 return (xs);
491 }
492
493 /*
494 * scsipi_put_xs:
495 *
496 * Release an xfer descriptor, decreasing the outstanding command
497 * count for the peripherial. If there is a thread waiting for
498 * an opening, wake it up. If not, kick any queued I/O the
499 * peripherial may have.
500 *
501 * NOTE: Must be called at splbio().
502 */
503 void
504 scsipi_put_xs(xs)
505 struct scsipi_xfer *xs;
506 {
507 struct scsipi_periph *periph = xs->xs_periph;
508 int flags = xs->xs_control;
509
510 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n"));
511
512 TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
513 pool_put(&scsipi_xfer_pool, xs);
514
515 #ifdef DIAGNOSTIC
516 if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
517 periph->periph_active == 0) {
518 scsipi_printaddr(periph);
519 printf("recovery without a command to recovery for\n");
520 panic("scsipi_put_xs");
521 }
522 #endif
523
524 if (flags & XS_CTL_URGENT) {
525 if ((flags & XS_CTL_REQSENSE) == 0)
526 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
527 } else
528 periph->periph_active--;
529 if (periph->periph_active == 0 &&
530 (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
531 periph->periph_flags &= ~PERIPH_WAITDRAIN;
532 wakeup(&periph->periph_active);
533 }
534
535 if (periph->periph_flags & PERIPH_WAITING) {
536 periph->periph_flags &= ~PERIPH_WAITING;
537 wakeup(periph);
538 } else {
539 if (periph->periph_switch->psw_start != NULL) {
540 SC_DEBUG(periph, SCSIPI_DB2,
541 ("calling private start()\n"));
542 (*periph->periph_switch->psw_start)(periph);
543 }
544 }
545 }
546
547 /*
548 * scsipi_channel_freeze:
549 *
550 * Freeze a channel's xfer queue.
551 */
552 void
553 scsipi_channel_freeze(chan, count)
554 struct scsipi_channel *chan;
555 int count;
556 {
557 int s;
558
559 s = splbio();
560 chan->chan_qfreeze += count;
561 splx(s);
562 }
563
564 /*
565 * scsipi_channel_thaw:
566 *
567 * Thaw a channel's xfer queue.
568 */
569 void
570 scsipi_channel_thaw(chan, count)
571 struct scsipi_channel *chan;
572 int count;
573 {
574 int s;
575
576 s = splbio();
577 chan->chan_qfreeze -= count;
578 /*
579 * Don't let the freeze count go negative.
580 *
581 * Presumably the adapter driver could keep track of this,
582 * but it might just be easier to do this here so as to allow
583 * multiple callers, including those outside the adapter driver.
584 */
585 if (chan->chan_qfreeze < 0) {
586 chan->chan_qfreeze = 0;
587 }
588 splx(s);
589 /*
590 * Kick the channel's queue here. Note, we may be running in
591 * interrupt context (softclock or HBA's interrupt), so the adapter
592 * driver had better not sleep.
593 */
594 if (chan->chan_qfreeze == 0)
595 scsipi_run_queue(chan);
596 }
597
598 /*
599 * scsipi_channel_timed_thaw:
600 *
601 * Thaw a channel after some time has expired. This will also
602 * run the channel's queue if the freeze count has reached 0.
603 */
604 void
605 scsipi_channel_timed_thaw(arg)
606 void *arg;
607 {
608 struct scsipi_channel *chan = arg;
609
610 scsipi_channel_thaw(chan, 1);
611 }
612
613 /*
614 * scsipi_periph_freeze:
615 *
616 * Freeze a device's xfer queue.
617 */
618 void
619 scsipi_periph_freeze(periph, count)
620 struct scsipi_periph *periph;
621 int count;
622 {
623 int s;
624
625 s = splbio();
626 periph->periph_qfreeze += count;
627 splx(s);
628 }
629
630 /*
631 * scsipi_periph_thaw:
632 *
633 * Thaw a device's xfer queue.
634 */
635 void
636 scsipi_periph_thaw(periph, count)
637 struct scsipi_periph *periph;
638 int count;
639 {
640 int s;
641
642 s = splbio();
643 periph->periph_qfreeze -= count;
644 #ifdef DIAGNOSTIC
645 if (periph->periph_qfreeze < 0) {
646 static const char pc[] = "periph freeze count < 0";
647 scsipi_printaddr(periph);
648 printf("%s\n", pc);
649 panic(pc);
650 }
651 #endif
652 if (periph->periph_qfreeze == 0 &&
653 (periph->periph_flags & PERIPH_WAITING) != 0)
654 wakeup(periph);
655 splx(s);
656 }
657
658 /*
659 * scsipi_periph_timed_thaw:
660 *
661 * Thaw a device after some time has expired.
662 */
663 void
664 scsipi_periph_timed_thaw(arg)
665 void *arg;
666 {
667 int s;
668 struct scsipi_periph *periph = arg;
669
670 callout_stop(&periph->periph_callout);
671
672 s = splbio();
673 scsipi_periph_thaw(periph, 1);
674 if ((periph->periph_channel->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
675 /*
676 * Kick the channel's queue here. Note, we're running in
677 * interrupt context (softclock), so the adapter driver
678 * had better not sleep.
679 */
680 scsipi_run_queue(periph->periph_channel);
681 } else {
682 /*
683 * Tell the completion thread to kick the channel's queue here.
684 */
685 periph->periph_channel->chan_tflags |= SCSIPI_CHANT_KICK;
686 wakeup(&periph->periph_channel->chan_complete);
687 }
688 splx(s);
689 }
690
691 /*
692 * scsipi_wait_drain:
693 *
694 * Wait for a periph's pending xfers to drain.
695 */
696 void
697 scsipi_wait_drain(periph)
698 struct scsipi_periph *periph;
699 {
700 int s;
701
702 s = splbio();
703 while (periph->periph_active != 0) {
704 periph->periph_flags |= PERIPH_WAITDRAIN;
705 (void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0);
706 }
707 splx(s);
708 }
709
710 /*
711 * scsipi_kill_pending:
712 *
713 * Kill off all pending xfers for a periph.
714 *
715 * NOTE: Must be called at splbio().
716 */
717 void
718 scsipi_kill_pending(periph)
719 struct scsipi_periph *periph;
720 {
721
722 (*periph->periph_channel->chan_bustype->bustype_kill_pending)(periph);
723 #ifdef DIAGNOSTIC
724 if (TAILQ_FIRST(&periph->periph_xferq) != NULL)
725 panic("scsipi_kill_pending");
726 #endif
727 scsipi_wait_drain(periph);
728 }
729
730 /*
731 * scsipi_interpret_sense:
732 *
733 * Look at the returned sense and act on the error, determining
734 * the unix error number to pass back. (0 = report no error)
735 *
736 * NOTE: If we return ERESTART, we are expected to haved
737 * thawed the device!
738 *
739 * THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
740 */
741 int
742 scsipi_interpret_sense(xs)
743 struct scsipi_xfer *xs;
744 {
745 struct scsipi_sense_data *sense;
746 struct scsipi_periph *periph = xs->xs_periph;
747 u_int8_t key;
748 u_int32_t info;
749 int error;
750 #ifndef SCSIVERBOSE
751 static char *error_mes[] = {
752 "soft error (corrected)",
753 "not ready", "medium error",
754 "non-media hardware failure", "illegal request",
755 "unit attention", "readonly device",
756 "no data found", "vendor unique",
757 "copy aborted", "command aborted",
758 "search returned equal", "volume overflow",
759 "verify miscompare", "unknown error key"
760 };
761 #endif
762
763 sense = &xs->sense.scsi_sense;
764 #ifdef SCSIPI_DEBUG
765 if (periph->periph_flags & SCSIPI_DB1) {
766 int count;
767 scsipi_printaddr(periph);
768 printf(" sense debug information:\n");
769 printf("\tcode 0x%x valid 0x%x\n",
770 sense->error_code & SSD_ERRCODE,
771 sense->error_code & SSD_ERRCODE_VALID ? 1 : 0);
772 printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
773 sense->segment,
774 sense->flags & SSD_KEY,
775 sense->flags & SSD_ILI ? 1 : 0,
776 sense->flags & SSD_EOM ? 1 : 0,
777 sense->flags & SSD_FILEMARK ? 1 : 0);
778 printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d "
779 "extra bytes\n",
780 sense->info[0],
781 sense->info[1],
782 sense->info[2],
783 sense->info[3],
784 sense->extra_len);
785 printf("\textra: ");
786 for (count = 0; count < ADD_BYTES_LIM(sense); count++)
787 printf("0x%x ", sense->cmd_spec_info[count]);
788 printf("\n");
789 }
790 #endif
791
792 /*
793 * If the periph has it's own error handler, call it first.
794 * If it returns a legit error value, return that, otherwise
795 * it wants us to continue with normal error processing.
796 */
797 if (periph->periph_switch->psw_error != NULL) {
798 SC_DEBUG(periph, SCSIPI_DB2,
799 ("calling private err_handler()\n"));
800 error = (*periph->periph_switch->psw_error)(xs);
801 if (error != EJUSTRETURN)
802 return (error);
803 }
804 /* otherwise use the default */
805 switch (sense->error_code & SSD_ERRCODE) {
806
807 /*
808 * Old SCSI-1 and SASI devices respond with
809 * codes other than 70.
810 */
811 case 0x00: /* no error (command completed OK) */
812 return (0);
813 case 0x04: /* drive not ready after it was selected */
814 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
815 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
816 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
817 return (0);
818 /* XXX - display some sort of error here? */
819 return (EIO);
820 case 0x20: /* invalid command */
821 if ((xs->xs_control &
822 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
823 return (0);
824 return (EINVAL);
825 case 0x25: /* invalid LUN (Adaptec ACB-4000) */
826 return (EACCES);
827
828 /*
829 * If it's code 70, use the extended stuff and
830 * interpret the key
831 */
832 case 0x71: /* delayed error */
833 scsipi_printaddr(periph);
834 key = sense->flags & SSD_KEY;
835 printf(" DEFERRED ERROR, key = 0x%x\n", key);
836 /* FALLTHROUGH */
837 case 0x70:
838 if ((sense->error_code & SSD_ERRCODE_VALID) != 0)
839 info = _4btol(sense->info);
840 else
841 info = 0;
842 key = sense->flags & SSD_KEY;
843
844 switch (key) {
845 case SKEY_NO_SENSE:
846 case SKEY_RECOVERED_ERROR:
847 if (xs->resid == xs->datalen && xs->datalen) {
848 /*
849 * Why is this here?
850 */
851 xs->resid = 0; /* not short read */
852 }
853 case SKEY_EQUAL:
854 error = 0;
855 break;
856 case SKEY_NOT_READY:
857 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
858 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
859 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
860 return (0);
861 if (sense->add_sense_code == 0x3A) {
862 error = ENODEV; /* Medium not present */
863 if (xs->xs_control & XS_CTL_SILENT_NODEV)
864 return (error);
865 } else
866 error = EIO;
867 if ((xs->xs_control & XS_CTL_SILENT) != 0)
868 return (error);
869 break;
870 case SKEY_ILLEGAL_REQUEST:
871 if ((xs->xs_control &
872 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
873 return (0);
874 /*
875 * Handle the case where a device reports
876 * Logical Unit Not Supported during discovery.
877 */
878 if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
879 sense->add_sense_code == 0x25 &&
880 sense->add_sense_code_qual == 0x00)
881 return (EINVAL);
882 if ((xs->xs_control & XS_CTL_SILENT) != 0)
883 return (EIO);
884 error = EINVAL;
885 break;
886 case SKEY_UNIT_ATTENTION:
887 if (sense->add_sense_code == 0x29 &&
888 sense->add_sense_code_qual == 0x00) {
889 /* device or bus reset */
890 return (ERESTART);
891 }
892 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
893 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
894 if ((xs->xs_control &
895 XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
896 /* XXX Should reupload any transient state. */
897 (periph->periph_flags &
898 PERIPH_REMOVABLE) == 0) {
899 return (ERESTART);
900 }
901 if ((xs->xs_control & XS_CTL_SILENT) != 0)
902 return (EIO);
903 error = EIO;
904 break;
905 case SKEY_WRITE_PROTECT:
906 error = EROFS;
907 break;
908 case SKEY_BLANK_CHECK:
909 error = 0;
910 break;
911 case SKEY_ABORTED_COMMAND:
912 error = ERESTART;
913 break;
914 case SKEY_VOLUME_OVERFLOW:
915 error = ENOSPC;
916 break;
917 default:
918 error = EIO;
919 break;
920 }
921
922 #ifdef SCSIVERBOSE
923 if (key && (xs->xs_control & XS_CTL_SILENT) == 0)
924 scsipi_print_sense(xs, 0);
925 #else
926 if (key) {
927 scsipi_printaddr(periph);
928 printf("%s", error_mes[key - 1]);
929 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
930 switch (key) {
931 case SKEY_NOT_READY:
932 case SKEY_ILLEGAL_REQUEST:
933 case SKEY_UNIT_ATTENTION:
934 case SKEY_WRITE_PROTECT:
935 break;
936 case SKEY_BLANK_CHECK:
937 printf(", requested size: %d (decimal)",
938 info);
939 break;
940 case SKEY_ABORTED_COMMAND:
941 if (xs->xs_retries)
942 printf(", retrying");
943 printf(", cmd 0x%x, info 0x%x",
944 xs->cmd->opcode, info);
945 break;
946 default:
947 printf(", info = %d (decimal)", info);
948 }
949 }
950 if (sense->extra_len != 0) {
951 int n;
952 printf(", data =");
953 for (n = 0; n < sense->extra_len; n++)
954 printf(" %02x",
955 sense->cmd_spec_info[n]);
956 }
957 printf("\n");
958 }
959 #endif
960 return (error);
961
962 /*
963 * Some other code, just report it
964 */
965 default:
966 #if defined(SCSIDEBUG) || defined(DEBUG)
967 {
968 static char *uc = "undecodable sense error";
969 int i;
970 u_int8_t *cptr = (u_int8_t *) sense;
971 scsipi_printaddr(periph);
972 if (xs->cmd == &xs->cmdstore) {
973 printf("%s for opcode 0x%x, data=",
974 uc, xs->cmdstore.opcode);
975 } else {
976 printf("%s, data=", uc);
977 }
978 for (i = 0; i < sizeof (sense); i++)
979 printf(" 0x%02x", *(cptr++) & 0xff);
980 printf("\n");
981 }
982 #else
983 scsipi_printaddr(periph);
984 printf("Sense Error Code 0x%x",
985 sense->error_code & SSD_ERRCODE);
986 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
987 struct scsipi_sense_data_unextended *usense =
988 (struct scsipi_sense_data_unextended *)sense;
989 printf(" at block no. %d (decimal)",
990 _3btol(usense->block));
991 }
992 printf("\n");
993 #endif
994 return (EIO);
995 }
996 }
997
998 /*
999 * scsipi_size:
1000 *
1001 * Find out from the device what its capacity is.
1002 */
1003 u_long
1004 scsipi_size(periph, flags)
1005 struct scsipi_periph *periph;
1006 int flags;
1007 {
1008 struct scsipi_read_cap_data rdcap;
1009 struct scsipi_read_capacity scsipi_cmd;
1010
1011 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1012 scsipi_cmd.opcode = READ_CAPACITY;
1013
1014 /*
1015 * If the command works, interpret the result as a 4 byte
1016 * number of blocks
1017 */
1018 if (scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1019 sizeof(scsipi_cmd), (u_char *)&rdcap, sizeof(rdcap),
1020 SCSIPIRETRIES, 20000, NULL,
1021 flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK) != 0) {
1022 scsipi_printaddr(periph);
1023 printf("could not get size\n");
1024 return (0);
1025 }
1026
1027 return (_4btol(rdcap.addr) + 1);
1028 }
1029
1030 /*
1031 * scsipi_test_unit_ready:
1032 *
1033 * Issue a `test unit ready' request.
1034 */
1035 int
1036 scsipi_test_unit_ready(periph, flags)
1037 struct scsipi_periph *periph;
1038 int flags;
1039 {
1040 struct scsipi_test_unit_ready scsipi_cmd;
1041
1042 /* some ATAPI drives don't support TEST_UNIT_READY. Sigh */
1043 if (periph->periph_quirks & PQUIRK_NOTUR)
1044 return (0);
1045
1046 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1047 scsipi_cmd.opcode = TEST_UNIT_READY;
1048
1049 return (scsipi_command(periph,
1050 (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd),
1051 0, 0, SCSIPIRETRIES, 10000, NULL, flags));
1052 }
1053
1054 /*
1055 * scsipi_inquire:
1056 *
1057 * Ask the device about itself.
1058 */
1059 int
1060 scsipi_inquire(periph, inqbuf, flags)
1061 struct scsipi_periph *periph;
1062 struct scsipi_inquiry_data *inqbuf;
1063 int flags;
1064 {
1065 struct scsipi_inquiry scsipi_cmd;
1066 int error;
1067
1068 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1069 scsipi_cmd.opcode = INQUIRY;
1070 scsipi_cmd.length = sizeof(struct scsipi_inquiry_data);
1071
1072 error = scsipi_command(periph,
1073 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1074 (u_char *) inqbuf, sizeof(struct scsipi_inquiry_data),
1075 SCSIPIRETRIES, 10000, NULL, XS_CTL_DATA_IN | flags);
1076
1077 #ifdef SCSI_OLD_NOINQUIRY
1078 /*
1079 * Kludge for the Adaptec ACB-4000 SCSI->MFM translator.
1080 * This board doesn't support the INQUIRY command at all.
1081 */
1082 if (error == EINVAL || error == EACCES) {
1083 /*
1084 * Conjure up an INQUIRY response.
1085 */
1086 inqbuf->device = (error == EINVAL ?
1087 SID_QUAL_LU_PRESENT :
1088 SID_QUAL_LU_NOTPRESENT) | T_DIRECT;
1089 inqbuf->dev_qual2 = 0;
1090 inqbuf->version = 0;
1091 inqbuf->response_format = SID_FORMAT_SCSI1;
1092 inqbuf->additional_length = 3 + 28;
1093 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1094 memcpy(inqbuf->vendor, "ADAPTEC ", sizeof(inqbuf->vendor));
1095 memcpy(inqbuf->product, "ACB-4000 ",
1096 sizeof(inqbuf->product));
1097 memcpy(inqbuf->revision, " ", sizeof(inqbuf->revision));
1098 error = 0;
1099 }
1100
1101 /*
1102 * Kludge for the Emulex MT-02 SCSI->QIC translator.
1103 * This board gives an empty response to an INQUIRY command.
1104 */
1105 else if (error == 0 &&
1106 inqbuf->device == (SID_QUAL_LU_PRESENT | T_DIRECT) &&
1107 inqbuf->dev_qual2 == 0 &&
1108 inqbuf->version == 0 &&
1109 inqbuf->response_format == SID_FORMAT_SCSI1) {
1110 /*
1111 * Fill out the INQUIRY response.
1112 */
1113 inqbuf->device = (SID_QUAL_LU_PRESENT | T_SEQUENTIAL);
1114 inqbuf->dev_qual2 = SID_REMOVABLE;
1115 inqbuf->additional_length = 3 + 28;
1116 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1117 memcpy(inqbuf->vendor, "EMULEX ", sizeof(inqbuf->vendor));
1118 memcpy(inqbuf->product, "MT-02 QIC ",
1119 sizeof(inqbuf->product));
1120 memcpy(inqbuf->revision, " ", sizeof(inqbuf->revision));
1121 }
1122 #endif /* SCSI_OLD_NOINQUIRY */
1123
1124 return error;
1125 }
1126
1127 /*
1128 * scsipi_prevent:
1129 *
1130 * Prevent or allow the user to remove the media
1131 */
1132 int
1133 scsipi_prevent(periph, type, flags)
1134 struct scsipi_periph *periph;
1135 int type, flags;
1136 {
1137 struct scsipi_prevent scsipi_cmd;
1138
1139 if (periph->periph_quirks & PQUIRK_NODOORLOCK)
1140 return (0);
1141
1142 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1143 scsipi_cmd.opcode = PREVENT_ALLOW;
1144 scsipi_cmd.how = type;
1145
1146 return (scsipi_command(periph,
1147 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1148 0, 0, SCSIPIRETRIES, 5000, NULL, flags));
1149 }
1150
1151 /*
1152 * scsipi_start:
1153 *
1154 * Send a START UNIT.
1155 */
1156 int
1157 scsipi_start(periph, type, flags)
1158 struct scsipi_periph *periph;
1159 int type, flags;
1160 {
1161 struct scsipi_start_stop scsipi_cmd;
1162
1163 if (periph->periph_quirks & PQUIRK_NOSTARTUNIT)
1164 return 0;
1165
1166 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1167 scsipi_cmd.opcode = START_STOP;
1168 scsipi_cmd.byte2 = 0x00;
1169 scsipi_cmd.how = type;
1170
1171 return (scsipi_command(periph,
1172 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1173 0, 0, SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000,
1174 NULL, flags));
1175 }
1176
1177 /*
1178 * scsipi_mode_sense, scsipi_mode_sense_big:
1179 * get a sense page from a device
1180 */
1181
1182 int
1183 scsipi_mode_sense(periph, byte2, page, data, len, flags, retries, timeout)
1184 struct scsipi_periph *periph;
1185 int byte2, page, len, flags, retries, timeout;
1186 struct scsipi_mode_header *data;
1187 {
1188 struct scsipi_mode_sense scsipi_cmd;
1189 int error;
1190
1191 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1192 scsipi_cmd.opcode = MODE_SENSE;
1193 scsipi_cmd.byte2 = byte2;
1194 scsipi_cmd.page = page;
1195 if (scsipi_periph_bustype(periph) == SCSIPI_BUSTYPE_ATAPI)
1196 _lto2b(len, scsipi_cmd.u_len.atapi.length);
1197 else
1198 scsipi_cmd.u_len.scsi.length = len & 0xff;
1199 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1200 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1201 flags | XS_CTL_DATA_IN);
1202 SC_DEBUG(periph, SCSIPI_DB2,
1203 ("scsipi_mode_sense: error=%d\n", error));
1204 return (error);
1205 }
1206
1207 int
1208 scsipi_mode_sense_big(periph, byte2, page, data, len, flags, retries, timeout)
1209 struct scsipi_periph *periph;
1210 int byte2, page, len, flags, retries, timeout;
1211 struct scsipi_mode_header_big *data;
1212 {
1213 struct scsipi_mode_sense_big scsipi_cmd;
1214 int error;
1215
1216 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1217 scsipi_cmd.opcode = MODE_SENSE_BIG;
1218 scsipi_cmd.byte2 = byte2;
1219 scsipi_cmd.page = page;
1220 _lto2b(len, scsipi_cmd.length);
1221 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1222 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1223 flags | XS_CTL_DATA_IN);
1224 SC_DEBUG(periph, SCSIPI_DB2,
1225 ("scsipi_mode_sense_big: error=%d\n", error));
1226 return (error);
1227 }
1228
1229 int
1230 scsipi_mode_select(periph, byte2, data, len, flags, retries, timeout)
1231 struct scsipi_periph *periph;
1232 int byte2, len, flags, retries, timeout;
1233 struct scsipi_mode_header *data;
1234 {
1235 struct scsipi_mode_select scsipi_cmd;
1236 int error;
1237
1238 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1239 scsipi_cmd.opcode = MODE_SELECT;
1240 scsipi_cmd.byte2 = byte2;
1241 if (scsipi_periph_bustype(periph) == SCSIPI_BUSTYPE_ATAPI)
1242 _lto2b(len, scsipi_cmd.u_len.atapi.length);
1243 else
1244 scsipi_cmd.u_len.scsi.length = len & 0xff;
1245 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1246 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1247 flags | XS_CTL_DATA_OUT);
1248 SC_DEBUG(periph, SCSIPI_DB2,
1249 ("scsipi_mode_select: error=%d\n", error));
1250 return (error);
1251 }
1252
1253 int
1254 scsipi_mode_select_big(periph, byte2, data, len, flags, retries, timeout)
1255 struct scsipi_periph *periph;
1256 int byte2, len, flags, retries, timeout;
1257 struct scsipi_mode_header_big *data;
1258 {
1259 struct scsipi_mode_select_big scsipi_cmd;
1260 int error;
1261
1262 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1263 scsipi_cmd.opcode = MODE_SELECT_BIG;
1264 scsipi_cmd.byte2 = byte2;
1265 _lto2b(len, scsipi_cmd.length);
1266 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1267 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1268 flags | XS_CTL_DATA_OUT);
1269 SC_DEBUG(periph, SCSIPI_DB2,
1270 ("scsipi_mode_select: error=%d\n", error));
1271 return (error);
1272 }
1273
1274 /*
1275 * scsipi_done:
1276 *
1277 * This routine is called by an adapter's interrupt handler when
1278 * an xfer is completed.
1279 */
1280 void
1281 scsipi_done(xs)
1282 struct scsipi_xfer *xs;
1283 {
1284 struct scsipi_periph *periph = xs->xs_periph;
1285 struct scsipi_channel *chan = periph->periph_channel;
1286 int s, freezecnt;
1287
1288 SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n"));
1289 #ifdef SCSIPI_DEBUG
1290 if (periph->periph_dbflags & SCSIPI_DB1)
1291 show_scsipi_cmd(xs);
1292 #endif
1293
1294 s = splbio();
1295 /*
1296 * The resource this command was using is now free.
1297 */
1298 scsipi_put_resource(chan);
1299 xs->xs_periph->periph_sent--;
1300
1301 /*
1302 * If the command was tagged, free the tag.
1303 */
1304 if (XS_CTL_TAGTYPE(xs) != 0)
1305 scsipi_put_tag(xs);
1306 else
1307 periph->periph_flags &= ~PERIPH_UNTAG;
1308
1309 /* Mark the command as `done'. */
1310 xs->xs_status |= XS_STS_DONE;
1311
1312 #ifdef DIAGNOSTIC
1313 if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
1314 (XS_CTL_ASYNC|XS_CTL_POLL))
1315 panic("scsipi_done: ASYNC and POLL");
1316 #endif
1317
1318 /*
1319 * If the xfer had an error of any sort, freeze the
1320 * periph's queue. Freeze it again if we were requested
1321 * to do so in the xfer.
1322 */
1323 freezecnt = 0;
1324 if (xs->error != XS_NOERROR)
1325 freezecnt++;
1326 if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
1327 freezecnt++;
1328 if (freezecnt != 0)
1329 scsipi_periph_freeze(periph, freezecnt);
1330
1331 /*
1332 * record the xfer with a pending sense, in case a SCSI reset is
1333 * received before the thread is waked up.
1334 */
1335 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1336 periph->periph_flags |= PERIPH_SENSE;
1337 periph->periph_xscheck = xs;
1338 }
1339
1340 /*
1341 * If this was an xfer that was not to complete asynchronously,
1342 * let the requesting thread perform error checking/handling
1343 * in its context.
1344 */
1345 if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
1346 splx(s);
1347 /*
1348 * If it's a polling job, just return, to unwind the
1349 * call graph. We don't need to restart the queue,
1350 * because pollings jobs are treated specially, and
1351 * are really only used during crash dumps anyway
1352 * (XXX or during boot-time autconfiguration of
1353 * ATAPI devices).
1354 */
1355 if (xs->xs_control & XS_CTL_POLL)
1356 return;
1357 wakeup(xs);
1358 goto out;
1359 }
1360
1361 /*
1362 * Catch the extremely common case of I/O completing
1363 * without error; no use in taking a context switch
1364 * if we can handle it in interrupt context.
1365 */
1366 if (xs->error == XS_NOERROR) {
1367 splx(s);
1368 (void) scsipi_complete(xs);
1369 goto out;
1370 }
1371
1372 /*
1373 * There is an error on this xfer. Put it on the channel's
1374 * completion queue, and wake up the completion thread.
1375 */
1376 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
1377 splx(s);
1378 wakeup(&chan->chan_complete);
1379
1380 out:
1381 /*
1382 * If there are more xfers on the channel's queue, attempt to
1383 * run them.
1384 */
1385 scsipi_run_queue(chan);
1386 }
1387
1388 /*
1389 * scsipi_complete:
1390 *
1391 * Completion of a scsipi_xfer. This is the guts of scsipi_done().
1392 *
1393 * NOTE: This routine MUST be called with valid thread context
1394 * except for the case where the following two conditions are
1395 * true:
1396 *
1397 * xs->error == XS_NOERROR
1398 * XS_CTL_ASYNC is set in xs->xs_control
1399 *
1400 * The semantics of this routine can be tricky, so here is an
1401 * explanation:
1402 *
1403 * 0 Xfer completed successfully.
1404 *
1405 * ERESTART Xfer had an error, but was restarted.
1406 *
1407 * anything else Xfer had an error, return value is Unix
1408 * errno.
1409 *
1410 * If the return value is anything but ERESTART:
1411 *
1412 * - If XS_CTL_ASYNC is set, `xs' has been freed back to
1413 * the pool.
1414 * - If there is a buf associated with the xfer,
1415 * it has been biodone()'d.
1416 */
1417 int
1418 scsipi_complete(xs)
1419 struct scsipi_xfer *xs;
1420 {
1421 struct scsipi_periph *periph = xs->xs_periph;
1422 struct scsipi_channel *chan = periph->periph_channel;
1423 struct buf *bp;
1424 int error, s;
1425
1426 #ifdef DIAGNOSTIC
1427 if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
1428 panic("scsipi_complete: XS_CTL_ASYNC but no buf");
1429 #endif
1430 /*
1431 * If command terminated with a CHECK CONDITION, we need to issue a
1432 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed
1433 * we'll have the real status.
1434 * Must be processed at splbio() to avoid missing a SCSI bus reset
1435 * for this command.
1436 */
1437 s = splbio();
1438 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1439 /* request sense for a request sense ? */
1440 if (xs->xs_control & XS_CTL_REQSENSE) {
1441 scsipi_printaddr(periph);
1442 printf("request sense for a request sense ?\n");
1443 /* XXX maybe we should reset the device ? */
1444 /* we've been frozen because xs->error != XS_NOERROR */
1445 scsipi_periph_thaw(periph, 1);
1446 splx(s);
1447 if (xs->resid < xs->datalen) {
1448 printf("we read %d bytes of sense anyway:\n",
1449 xs->datalen - xs->resid);
1450 #ifdef SCSIVERBOSE
1451 scsipi_print_sense_data((void *)xs->data, 0);
1452 #endif
1453 }
1454 return EINVAL;
1455 }
1456 scsipi_request_sense(xs);
1457 }
1458 splx(s);
1459
1460 /*
1461 * If it's a user level request, bypass all usual completion
1462 * processing, let the user work it out..
1463 */
1464 if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
1465 SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n"));
1466 if (xs->error != XS_NOERROR)
1467 scsipi_periph_thaw(periph, 1);
1468 scsipi_user_done(xs);
1469 SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n "));
1470 return 0;
1471 }
1472
1473 switch (xs->error) {
1474 case XS_NOERROR:
1475 error = 0;
1476 break;
1477
1478 case XS_SENSE:
1479 case XS_SHORTSENSE:
1480 error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
1481 break;
1482
1483 case XS_RESOURCE_SHORTAGE:
1484 /*
1485 * XXX Should freeze channel's queue.
1486 */
1487 scsipi_printaddr(periph);
1488 printf("adapter resource shortage\n");
1489 /* FALLTHROUGH */
1490
1491 case XS_BUSY:
1492 if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
1493 struct scsipi_max_openings mo;
1494
1495 /*
1496 * We set the openings to active - 1, assuming that
1497 * the command that got us here is the first one that
1498 * can't fit into the device's queue. If that's not
1499 * the case, I guess we'll find out soon enough.
1500 */
1501 mo.mo_target = periph->periph_target;
1502 mo.mo_lun = periph->periph_lun;
1503 if (periph->periph_active < periph->periph_openings)
1504 mo.mo_openings = periph->periph_active - 1;
1505 else
1506 mo.mo_openings = periph->periph_openings - 1;
1507 #ifdef DIAGNOSTIC
1508 if (mo.mo_openings < 0) {
1509 scsipi_printaddr(periph);
1510 printf("QUEUE FULL resulted in < 0 openings\n");
1511 panic("scsipi_done");
1512 }
1513 #endif
1514 if (mo.mo_openings == 0) {
1515 scsipi_printaddr(periph);
1516 printf("QUEUE FULL resulted in 0 openings\n");
1517 mo.mo_openings = 1;
1518 }
1519 scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
1520 error = ERESTART;
1521 } else if (xs->xs_retries != 0) {
1522 xs->xs_retries--;
1523 /*
1524 * Wait one second, and try again.
1525 */
1526 if ((xs->xs_control & XS_CTL_POLL) ||
1527 (chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
1528 delay(1000000);
1529 } else if (!callout_active(&periph->periph_callout)) {
1530 scsipi_periph_freeze(periph, 1);
1531 callout_reset(&periph->periph_callout,
1532 hz, scsipi_periph_timed_thaw, periph);
1533 }
1534 error = ERESTART;
1535 } else
1536 error = EBUSY;
1537 break;
1538
1539 case XS_REQUEUE:
1540 error = ERESTART;
1541 break;
1542
1543 case XS_TIMEOUT:
1544 if (xs->xs_retries != 0) {
1545 xs->xs_retries--;
1546 error = ERESTART;
1547 } else
1548 error = EIO;
1549 break;
1550
1551 case XS_SELTIMEOUT:
1552 /* XXX Disable device? */
1553 error = EIO;
1554 break;
1555
1556 case XS_RESET:
1557 if (xs->xs_control & XS_CTL_REQSENSE) {
1558 /*
1559 * request sense interrupted by reset: signal it
1560 * with EINTR return code.
1561 */
1562 error = EINTR;
1563 } else {
1564 if (xs->xs_retries != 0) {
1565 xs->xs_retries--;
1566 error = ERESTART;
1567 } else
1568 error = EIO;
1569 }
1570 break;
1571
1572 default:
1573 scsipi_printaddr(periph);
1574 printf("invalid return code from adapter: %d\n", xs->error);
1575 error = EIO;
1576 break;
1577 }
1578
1579 s = splbio();
1580 if (error == ERESTART) {
1581 /*
1582 * If we get here, the periph has been thawed and frozen
1583 * again if we had to issue recovery commands. Alternatively,
1584 * it may have been frozen again and in a timed thaw. In
1585 * any case, we thaw the periph once we re-enqueue the
1586 * command. Once the periph is fully thawed, it will begin
1587 * operation again.
1588 */
1589 xs->error = XS_NOERROR;
1590 xs->status = SCSI_OK;
1591 xs->xs_status &= ~XS_STS_DONE;
1592 xs->xs_requeuecnt++;
1593 error = scsipi_enqueue(xs);
1594 if (error == 0) {
1595 scsipi_periph_thaw(periph, 1);
1596 splx(s);
1597 return (ERESTART);
1598 }
1599 }
1600
1601 /*
1602 * scsipi_done() freezes the queue if not XS_NOERROR.
1603 * Thaw it here.
1604 */
1605 if (xs->error != XS_NOERROR)
1606 scsipi_periph_thaw(periph, 1);
1607
1608 /*
1609 * Set buffer fields in case the periph
1610 * switch done func uses them
1611 */
1612 if ((bp = xs->bp) != NULL) {
1613 if (error) {
1614 bp->b_error = error;
1615 bp->b_flags |= B_ERROR;
1616 bp->b_resid = bp->b_bcount;
1617 } else {
1618 bp->b_error = 0;
1619 bp->b_resid = xs->resid;
1620 }
1621 }
1622
1623 if (periph->periph_switch->psw_done)
1624 periph->periph_switch->psw_done(xs);
1625
1626 if (bp)
1627 biodone(bp);
1628
1629 if (xs->xs_control & XS_CTL_ASYNC)
1630 scsipi_put_xs(xs);
1631 splx(s);
1632
1633 return (error);
1634 }
1635
1636 /*
1637 * Issue a request sense for the given scsipi_xfer. Called when the xfer
1638 * returns with a CHECK_CONDITION status. Must be called in valid thread
1639 * context and at splbio().
1640 */
1641
1642 void
1643 scsipi_request_sense(xs)
1644 struct scsipi_xfer *xs;
1645 {
1646 struct scsipi_periph *periph = xs->xs_periph;
1647 int flags, error;
1648 struct scsipi_sense cmd;
1649
1650 periph->periph_flags |= PERIPH_SENSE;
1651
1652 /* if command was polling, request sense will too */
1653 flags = xs->xs_control & XS_CTL_POLL;
1654 /* Polling commands can't sleep */
1655 if (flags)
1656 flags |= XS_CTL_NOSLEEP;
1657
1658 flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN |
1659 XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH;
1660
1661 memset(&cmd, 0, sizeof(cmd));
1662 cmd.opcode = REQUEST_SENSE;
1663 cmd.length = sizeof(struct scsipi_sense_data);
1664
1665 error = scsipi_command(periph,
1666 (struct scsipi_generic *) &cmd, sizeof(cmd),
1667 (u_char*)&xs->sense.scsi_sense, sizeof(struct scsipi_sense_data),
1668 0, 1000, NULL, flags);
1669 periph->periph_flags &= ~PERIPH_SENSE;
1670 periph->periph_xscheck = NULL;
1671 switch(error) {
1672 case 0:
1673 /* we have a valid sense */
1674 xs->error = XS_SENSE;
1675 return;
1676 case EINTR:
1677 /* REQUEST_SENSE interrupted by bus reset. */
1678 xs->error = XS_RESET;
1679 return;
1680 case EIO:
1681 /* request sense coudn't be performed */
1682 /*
1683 * XXX this isn't quite rigth but we don't have anything
1684 * better for now
1685 */
1686 xs->error = XS_DRIVER_STUFFUP;
1687 return;
1688 default:
1689 /* Notify that request sense failed. */
1690 xs->error = XS_DRIVER_STUFFUP;
1691 scsipi_printaddr(periph);
1692 printf("request sense failed with error %d\n", error);
1693 return;
1694 }
1695 }
1696
1697 /*
1698 * scsipi_enqueue:
1699 *
1700 * Enqueue an xfer on a channel.
1701 */
1702 int
1703 scsipi_enqueue(xs)
1704 struct scsipi_xfer *xs;
1705 {
1706 struct scsipi_channel *chan = xs->xs_periph->periph_channel;
1707 struct scsipi_xfer *qxs;
1708 int s;
1709
1710 s = splbio();
1711
1712 /*
1713 * If the xfer is to be polled, and there are already jobs on
1714 * the queue, we can't proceed.
1715 */
1716 if ((xs->xs_control & XS_CTL_POLL) != 0 &&
1717 TAILQ_FIRST(&chan->chan_queue) != NULL) {
1718 splx(s);
1719 xs->error = XS_DRIVER_STUFFUP;
1720 return (EAGAIN);
1721 }
1722
1723 /*
1724 * If we have an URGENT xfer, it's an error recovery command
1725 * and it should just go on the head of the channel's queue.
1726 */
1727 if (xs->xs_control & XS_CTL_URGENT) {
1728 TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
1729 goto out;
1730 }
1731
1732 /*
1733 * If this xfer has already been on the queue before, we
1734 * need to reinsert it in the correct order. That order is:
1735 *
1736 * Immediately before the first xfer for this periph
1737 * with a requeuecnt less than xs->xs_requeuecnt.
1738 *
1739 * Failing that, at the end of the queue. (We'll end up
1740 * there naturally.)
1741 */
1742 if (xs->xs_requeuecnt != 0) {
1743 for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
1744 qxs = TAILQ_NEXT(qxs, channel_q)) {
1745 if (qxs->xs_periph == xs->xs_periph &&
1746 qxs->xs_requeuecnt < xs->xs_requeuecnt)
1747 break;
1748 }
1749 if (qxs != NULL) {
1750 TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
1751 channel_q);
1752 goto out;
1753 }
1754 }
1755 TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
1756 out:
1757 if (xs->xs_control & XS_CTL_THAW_PERIPH)
1758 scsipi_periph_thaw(xs->xs_periph, 1);
1759 splx(s);
1760 return (0);
1761 }
1762
1763 /*
1764 * scsipi_run_queue:
1765 *
1766 * Start as many xfers as possible running on the channel.
1767 */
1768 void
1769 scsipi_run_queue(chan)
1770 struct scsipi_channel *chan;
1771 {
1772 struct scsipi_xfer *xs;
1773 struct scsipi_periph *periph;
1774 int s;
1775
1776 for (;;) {
1777 s = splbio();
1778
1779 /*
1780 * If the channel is frozen, we can't do any work right
1781 * now.
1782 */
1783 if (chan->chan_qfreeze != 0) {
1784 splx(s);
1785 return;
1786 }
1787
1788 /*
1789 * Look for work to do, and make sure we can do it.
1790 */
1791 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
1792 xs = TAILQ_NEXT(xs, channel_q)) {
1793 periph = xs->xs_periph;
1794
1795 if ((periph->periph_sent >= periph->periph_openings) ||
1796 periph->periph_qfreeze != 0 ||
1797 (periph->periph_flags & PERIPH_UNTAG) != 0)
1798 continue;
1799
1800 if ((periph->periph_flags &
1801 (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 &&
1802 (xs->xs_control & XS_CTL_URGENT) == 0)
1803 continue;
1804
1805 /*
1806 * We can issue this xfer!
1807 */
1808 goto got_one;
1809 }
1810
1811 /*
1812 * Can't find any work to do right now.
1813 */
1814 splx(s);
1815 return;
1816
1817 got_one:
1818 /*
1819 * Have an xfer to run. Allocate a resource from
1820 * the adapter to run it. If we can't allocate that
1821 * resource, we don't dequeue the xfer.
1822 */
1823 if (scsipi_get_resource(chan) == 0) {
1824 /*
1825 * Adapter is out of resources. If the adapter
1826 * supports it, attempt to grow them.
1827 */
1828 if (scsipi_grow_resources(chan) == 0) {
1829 /*
1830 * Wasn't able to grow resources,
1831 * nothing more we can do.
1832 */
1833 if (xs->xs_control & XS_CTL_POLL) {
1834 scsipi_printaddr(xs->xs_periph);
1835 printf("polling command but no "
1836 "adapter resources");
1837 /* We'll panic shortly... */
1838 }
1839 splx(s);
1840
1841 /*
1842 * XXX: We should be able to note that
1843 * XXX: that resources are needed here!
1844 */
1845 return;
1846 }
1847 /*
1848 * scsipi_grow_resources() allocated the resource
1849 * for us.
1850 */
1851 }
1852
1853 /*
1854 * We have a resource to run this xfer, do it!
1855 */
1856 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
1857
1858 /*
1859 * If the command is to be tagged, allocate a tag ID
1860 * for it.
1861 */
1862 if (XS_CTL_TAGTYPE(xs) != 0)
1863 scsipi_get_tag(xs);
1864 else
1865 periph->periph_flags |= PERIPH_UNTAG;
1866 periph->periph_sent++;
1867 splx(s);
1868
1869 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
1870 }
1871 #ifdef DIAGNOSTIC
1872 panic("scsipi_run_queue: impossible");
1873 #endif
1874 }
1875
1876 /*
1877 * scsipi_execute_xs:
1878 *
1879 * Begin execution of an xfer, waiting for it to complete, if necessary.
1880 */
1881 int
1882 scsipi_execute_xs(xs)
1883 struct scsipi_xfer *xs;
1884 {
1885 struct scsipi_periph *periph = xs->xs_periph;
1886 struct scsipi_channel *chan = periph->periph_channel;
1887 int oasync, async, poll, retries, error, s;
1888
1889 xs->xs_status &= ~XS_STS_DONE;
1890 xs->error = XS_NOERROR;
1891 xs->resid = xs->datalen;
1892 xs->status = SCSI_OK;
1893
1894 #ifdef SCSIPI_DEBUG
1895 if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) {
1896 printf("scsipi_execute_xs: ");
1897 show_scsipi_xs(xs);
1898 printf("\n");
1899 }
1900 #endif
1901
1902 /*
1903 * Deal with command tagging:
1904 *
1905 * - If the device's current operating mode doesn't
1906 * include tagged queueing, clear the tag mask.
1907 *
1908 * - If the device's current operating mode *does*
1909 * include tagged queueing, set the tag_type in
1910 * the xfer to the appropriate byte for the tag
1911 * message.
1912 */
1913 if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 ||
1914 (xs->xs_control & XS_CTL_REQSENSE)) {
1915 xs->xs_control &= ~XS_CTL_TAGMASK;
1916 xs->xs_tag_type = 0;
1917 } else {
1918 /*
1919 * If the request doesn't specify a tag, give Head
1920 * tags to URGENT operations and Ordered tags to
1921 * everything else.
1922 */
1923 if (XS_CTL_TAGTYPE(xs) == 0) {
1924 if (xs->xs_control & XS_CTL_URGENT)
1925 xs->xs_control |= XS_CTL_HEAD_TAG;
1926 else
1927 xs->xs_control |= XS_CTL_ORDERED_TAG;
1928 }
1929
1930 switch (XS_CTL_TAGTYPE(xs)) {
1931 case XS_CTL_ORDERED_TAG:
1932 xs->xs_tag_type = MSG_ORDERED_Q_TAG;
1933 break;
1934
1935 case XS_CTL_SIMPLE_TAG:
1936 xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
1937 break;
1938
1939 case XS_CTL_HEAD_TAG:
1940 xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
1941 break;
1942
1943 default:
1944 scsipi_printaddr(periph);
1945 printf("invalid tag mask 0x%08x\n",
1946 XS_CTL_TAGTYPE(xs));
1947 panic("scsipi_execute_xs");
1948 }
1949 }
1950
1951 /* If the adaptor wants us to poll, poll. */
1952 if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY)
1953 xs->xs_control |= XS_CTL_POLL;
1954
1955 /*
1956 * If we don't yet have a completion thread, or we are to poll for
1957 * completion, clear the ASYNC flag.
1958 */
1959 oasync = (xs->xs_control & XS_CTL_ASYNC);
1960 if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
1961 xs->xs_control &= ~XS_CTL_ASYNC;
1962
1963 async = (xs->xs_control & XS_CTL_ASYNC);
1964 poll = (xs->xs_control & XS_CTL_POLL);
1965 retries = xs->xs_retries; /* for polling commands */
1966
1967 #ifdef DIAGNOSTIC
1968 if (oasync != 0 && xs->bp == NULL)
1969 panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
1970 #endif
1971
1972 /*
1973 * Enqueue the transfer. If we're not polling for completion, this
1974 * should ALWAYS return `no error'.
1975 */
1976 try_again:
1977 error = scsipi_enqueue(xs);
1978 if (error) {
1979 if (poll == 0) {
1980 scsipi_printaddr(periph);
1981 printf("not polling, but enqueue failed with %d\n",
1982 error);
1983 panic("scsipi_execute_xs");
1984 }
1985
1986 scsipi_printaddr(periph);
1987 printf("failed to enqueue polling command");
1988 if (retries != 0) {
1989 printf(", retrying...\n");
1990 delay(1000000);
1991 retries--;
1992 goto try_again;
1993 }
1994 printf("\n");
1995 goto free_xs;
1996 }
1997
1998 restarted:
1999 scsipi_run_queue(chan);
2000
2001 /*
2002 * The xfer is enqueued, and possibly running. If it's to be
2003 * completed asynchronously, just return now.
2004 */
2005 if (async)
2006 return (EJUSTRETURN);
2007
2008 /*
2009 * Not an asynchronous command; wait for it to complete.
2010 */
2011 s = splbio();
2012 while ((xs->xs_status & XS_STS_DONE) == 0) {
2013 if (poll) {
2014 scsipi_printaddr(periph);
2015 printf("polling command not done\n");
2016 panic("scsipi_execute_xs");
2017 }
2018 (void) tsleep(xs, PRIBIO, "xscmd", 0);
2019 }
2020 splx(s);
2021
2022 /*
2023 * Command is complete. scsipi_done() has awakened us to perform
2024 * the error handling.
2025 */
2026 error = scsipi_complete(xs);
2027 if (error == ERESTART)
2028 goto restarted;
2029
2030 /*
2031 * If it was meant to run async and we cleared aync ourselve,
2032 * don't return an error here. It has already been handled
2033 */
2034 if (oasync)
2035 error = EJUSTRETURN;
2036 /*
2037 * Command completed successfully or fatal error occurred. Fall
2038 * into....
2039 */
2040 free_xs:
2041 s = splbio();
2042 scsipi_put_xs(xs);
2043 splx(s);
2044
2045 /*
2046 * Kick the queue, keep it running in case it stopped for some
2047 * reason.
2048 */
2049 scsipi_run_queue(chan);
2050
2051 return (error);
2052 }
2053
2054 /*
2055 * scsipi_completion_thread:
2056 *
2057 * This is the completion thread. We wait for errors on
2058 * asynchronous xfers, and perform the error handling
2059 * function, restarting the command, if necessary.
2060 */
2061 void
2062 scsipi_completion_thread(arg)
2063 void *arg;
2064 {
2065 struct scsipi_channel *chan = arg;
2066 struct scsipi_xfer *xs;
2067 int s;
2068
2069 s = splbio();
2070 chan->chan_flags |= SCSIPI_CHAN_TACTIVE;
2071 splx(s);
2072 for (;;) {
2073 s = splbio();
2074 xs = TAILQ_FIRST(&chan->chan_complete);
2075 if (xs == NULL && chan->chan_tflags == 0) {
2076 /* nothing to do; wait */
2077 (void) tsleep(&chan->chan_complete, PRIBIO,
2078 "sccomp", 0);
2079 splx(s);
2080 continue;
2081 }
2082 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2083 /* call chan_callback from thread context */
2084 chan->chan_tflags &= ~SCSIPI_CHANT_CALLBACK;
2085 chan->chan_callback(chan, chan->chan_callback_arg);
2086 splx(s);
2087 continue;
2088 }
2089 if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) {
2090 /* attempt to get more openings for this channel */
2091 chan->chan_tflags &= ~SCSIPI_CHANT_GROWRES;
2092 scsipi_adapter_request(chan,
2093 ADAPTER_REQ_GROW_RESOURCES, NULL);
2094 scsipi_channel_thaw(chan, 1);
2095 splx(s);
2096 continue;
2097 }
2098 if (chan->chan_tflags & SCSIPI_CHANT_KICK) {
2099 /* explicitly run the queues for this channel */
2100 chan->chan_tflags &= ~SCSIPI_CHANT_KICK;
2101 scsipi_run_queue(chan);
2102 splx(s);
2103 continue;
2104 }
2105 if (chan->chan_tflags & SCSIPI_CHANT_SHUTDOWN) {
2106 splx(s);
2107 break;
2108 }
2109 if (xs) {
2110 TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
2111 splx(s);
2112
2113 /*
2114 * Have an xfer with an error; process it.
2115 */
2116 (void) scsipi_complete(xs);
2117
2118 /*
2119 * Kick the queue; keep it running if it was stopped
2120 * for some reason.
2121 */
2122 scsipi_run_queue(chan);
2123 } else {
2124 splx(s);
2125 }
2126 }
2127
2128 chan->chan_thread = NULL;
2129
2130 /* In case parent is waiting for us to exit. */
2131 wakeup(&chan->chan_thread);
2132
2133 kthread_exit(0);
2134 }
2135
2136 /*
2137 * scsipi_create_completion_thread:
2138 *
2139 * Callback to actually create the completion thread.
2140 */
2141 void
2142 scsipi_create_completion_thread(arg)
2143 void *arg;
2144 {
2145 struct scsipi_channel *chan = arg;
2146 struct scsipi_adapter *adapt = chan->chan_adapter;
2147
2148 if (kthread_create1(scsipi_completion_thread, chan,
2149 &chan->chan_thread, "%s", chan->chan_name)) {
2150 printf("%s: unable to create completion thread for "
2151 "channel %d\n", adapt->adapt_dev->dv_xname,
2152 chan->chan_channel);
2153 panic("scsipi_create_completion_thread");
2154 }
2155 }
2156
2157 /*
2158 * scsipi_thread_call_callback:
2159 *
2160 * request to call a callback from the completion thread
2161 */
2162 int
2163 scsipi_thread_call_callback(chan, callback, arg)
2164 struct scsipi_channel *chan;
2165 void (*callback) __P((struct scsipi_channel *, void *));
2166 void *arg;
2167 {
2168 int s;
2169
2170 s = splbio();
2171 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
2172 /* kernel thread doesn't exist yet */
2173 splx(s);
2174 return ESRCH;
2175 }
2176 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2177 splx(s);
2178 return EBUSY;
2179 }
2180 scsipi_channel_freeze(chan, 1);
2181 chan->chan_callback = callback;
2182 chan->chan_callback_arg = arg;
2183 chan->chan_tflags |= SCSIPI_CHANT_CALLBACK;
2184 wakeup(&chan->chan_complete);
2185 splx(s);
2186 return(0);
2187 }
2188
2189 /*
2190 * scsipi_async_event:
2191 *
2192 * Handle an asynchronous event from an adapter.
2193 */
2194 void
2195 scsipi_async_event(chan, event, arg)
2196 struct scsipi_channel *chan;
2197 scsipi_async_event_t event;
2198 void *arg;
2199 {
2200 int s;
2201
2202 s = splbio();
2203 switch (event) {
2204 case ASYNC_EVENT_MAX_OPENINGS:
2205 scsipi_async_event_max_openings(chan,
2206 (struct scsipi_max_openings *)arg);
2207 break;
2208
2209 case ASYNC_EVENT_XFER_MODE:
2210 scsipi_async_event_xfer_mode(chan,
2211 (struct scsipi_xfer_mode *)arg);
2212 break;
2213 case ASYNC_EVENT_RESET:
2214 scsipi_async_event_channel_reset(chan);
2215 break;
2216 }
2217 splx(s);
2218 }
2219
2220 /*
2221 * scsipi_print_xfer_mode:
2222 *
2223 * Print a periph's capabilities.
2224 */
2225 void
2226 scsipi_print_xfer_mode(periph)
2227 struct scsipi_periph *periph;
2228 {
2229 int period, freq, speed, mbs;
2230
2231 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0)
2232 return;
2233
2234 printf("%s: ", periph->periph_dev->dv_xname);
2235 if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) {
2236 period = scsipi_sync_factor_to_period(periph->periph_period);
2237 printf("sync (%d.%dns offset %d)",
2238 period / 10, period % 10, periph->periph_offset);
2239 } else
2240 printf("async");
2241
2242 if (periph->periph_mode & PERIPH_CAP_WIDE32)
2243 printf(", 32-bit");
2244 else if (periph->periph_mode & (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
2245 printf(", 16-bit");
2246 else
2247 printf(", 8-bit");
2248
2249 if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) {
2250 freq = scsipi_sync_factor_to_freq(periph->periph_period);
2251 speed = freq;
2252 if (periph->periph_mode & PERIPH_CAP_WIDE32)
2253 speed *= 4;
2254 else if (periph->periph_mode &
2255 (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
2256 speed *= 2;
2257 mbs = speed / 1000;
2258 if (mbs > 0)
2259 printf(" (%d.%03dMB/s)", mbs, speed % 1000);
2260 else
2261 printf(" (%dKB/s)", speed % 1000);
2262 }
2263
2264 printf(" transfers");
2265
2266 if (periph->periph_mode & PERIPH_CAP_TQING)
2267 printf(", tagged queueing");
2268
2269 printf("\n");
2270 }
2271
2272 /*
2273 * scsipi_async_event_max_openings:
2274 *
2275 * Update the maximum number of outstanding commands a
2276 * device may have.
2277 */
2278 void
2279 scsipi_async_event_max_openings(chan, mo)
2280 struct scsipi_channel *chan;
2281 struct scsipi_max_openings *mo;
2282 {
2283 struct scsipi_periph *periph;
2284 int minlun, maxlun;
2285
2286 if (mo->mo_lun == -1) {
2287 /*
2288 * Wildcarded; apply it to all LUNs.
2289 */
2290 minlun = 0;
2291 maxlun = chan->chan_nluns - 1;
2292 } else
2293 minlun = maxlun = mo->mo_lun;
2294
2295 /* XXX This could really suck with a large LUN space. */
2296 for (; minlun <= maxlun; minlun++) {
2297 periph = scsipi_lookup_periph(chan, mo->mo_target, minlun);
2298 if (periph == NULL)
2299 continue;
2300
2301 if (mo->mo_openings < periph->periph_openings)
2302 periph->periph_openings = mo->mo_openings;
2303 else if (mo->mo_openings > periph->periph_openings &&
2304 (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
2305 periph->periph_openings = mo->mo_openings;
2306 }
2307 }
2308
2309 /*
2310 * scsipi_async_event_xfer_mode:
2311 *
2312 * Update the xfer mode for all periphs sharing the
2313 * specified I_T Nexus.
2314 */
2315 void
2316 scsipi_async_event_xfer_mode(chan, xm)
2317 struct scsipi_channel *chan;
2318 struct scsipi_xfer_mode *xm;
2319 {
2320 struct scsipi_periph *periph;
2321 int lun, announce, mode, period, offset;
2322
2323 for (lun = 0; lun < chan->chan_nluns; lun++) {
2324 periph = scsipi_lookup_periph(chan, xm->xm_target, lun);
2325 if (periph == NULL)
2326 continue;
2327 announce = 0;
2328
2329 /*
2330 * Clamp the xfer mode down to this periph's capabilities.
2331 */
2332 mode = xm->xm_mode & periph->periph_cap;
2333 if (mode & PERIPH_CAP_SYNC) {
2334 period = xm->xm_period;
2335 offset = xm->xm_offset;
2336 } else {
2337 period = 0;
2338 offset = 0;
2339 }
2340
2341 /*
2342 * If we do not have a valid xfer mode yet, or the parameters
2343 * are different, announce them.
2344 */
2345 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0 ||
2346 periph->periph_mode != mode ||
2347 periph->periph_period != period ||
2348 periph->periph_offset != offset)
2349 announce = 1;
2350
2351 periph->periph_mode = mode;
2352 periph->periph_period = period;
2353 periph->periph_offset = offset;
2354 periph->periph_flags |= PERIPH_MODE_VALID;
2355
2356 if (announce)
2357 scsipi_print_xfer_mode(periph);
2358 }
2359 }
2360
2361 /*
2362 * scsipi_set_xfer_mode:
2363 *
2364 * Set the xfer mode for the specified I_T Nexus.
2365 */
2366 void
2367 scsipi_set_xfer_mode(chan, target, immed)
2368 struct scsipi_channel *chan;
2369 int target, immed;
2370 {
2371 struct scsipi_xfer_mode xm;
2372 struct scsipi_periph *itperiph;
2373 int lun, s;
2374
2375 /*
2376 * Go to the minimal xfer mode.
2377 */
2378 xm.xm_target = target;
2379 xm.xm_mode = 0;
2380 xm.xm_period = 0; /* ignored */
2381 xm.xm_offset = 0; /* ignored */
2382
2383 /*
2384 * Find the first LUN we know about on this I_T Nexus.
2385 */
2386 for (lun = 0; lun < chan->chan_nluns; lun++) {
2387 itperiph = scsipi_lookup_periph(chan, target, lun);
2388 if (itperiph != NULL)
2389 break;
2390 }
2391 if (itperiph != NULL) {
2392 xm.xm_mode = itperiph->periph_cap;
2393 /*
2394 * Now issue the request to the adapter.
2395 */
2396 s = splbio();
2397 scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm);
2398 splx(s);
2399 /*
2400 * If we want this to happen immediately, issue a dummy
2401 * command, since most adapters can't really negotiate unless
2402 * they're executing a job.
2403 */
2404 if (immed != 0) {
2405 (void) scsipi_test_unit_ready(itperiph,
2406 XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
2407 XS_CTL_IGNORE_NOT_READY |
2408 XS_CTL_IGNORE_MEDIA_CHANGE);
2409 }
2410 }
2411 }
2412
2413 /*
2414 * scsipi_channel_reset:
2415 *
2416 * handle scsi bus reset
2417 * called at splbio
2418 */
2419 void
2420 scsipi_async_event_channel_reset(chan)
2421 struct scsipi_channel *chan;
2422 {
2423 struct scsipi_xfer *xs, *xs_next;
2424 struct scsipi_periph *periph;
2425 int target, lun;
2426
2427 /*
2428 * Channel has been reset. Also mark as reset pending REQUEST_SENSE
2429 * commands; as the sense is not available any more.
2430 * can't call scsipi_done() from here, as the command has not been
2431 * sent to the adapter yet (this would corrupt accounting).
2432 */
2433
2434 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) {
2435 xs_next = TAILQ_NEXT(xs, channel_q);
2436 if (xs->xs_control & XS_CTL_REQSENSE) {
2437 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
2438 xs->error = XS_RESET;
2439 if ((xs->xs_control & XS_CTL_ASYNC) != 0)
2440 TAILQ_INSERT_TAIL(&chan->chan_complete, xs,
2441 channel_q);
2442 }
2443 }
2444 wakeup(&chan->chan_complete);
2445 /* Catch xs with pending sense which may not have a REQSENSE xs yet */
2446 for (target = 0; target < chan->chan_ntargets; target++) {
2447 if (target == chan->chan_id)
2448 continue;
2449 for (lun = 0; lun < chan->chan_nluns; lun++) {
2450 scsipi_lookup_periph(chan, target, lun);
2451 if (periph) {
2452 xs = periph->periph_xscheck;
2453 if (xs)
2454 xs->error = XS_RESET;
2455 }
2456 }
2457 }
2458 }
2459
2460 /*
2461 * scsipi_target_detach:
2462 *
2463 * detach all periph associated with a I_T
2464 * must be called from valid thread context
2465 */
2466 int
2467 scsipi_target_detach(chan, target, lun, flags)
2468 struct scsipi_channel *chan;
2469 int target, lun;
2470 int flags;
2471 {
2472 struct scsipi_periph *periph;
2473 int ctarget, mintarget, maxtarget;
2474 int clun, minlun, maxlun;
2475 int error;
2476
2477 if (target == -1) {
2478 mintarget = 0;
2479 maxtarget = chan->chan_ntargets;
2480 } else {
2481 if (target == chan->chan_id)
2482 return EINVAL;
2483 if (target < 0 || target >= chan->chan_ntargets)
2484 return EINVAL;
2485 mintarget = target;
2486 maxtarget = target + 1;
2487 }
2488
2489 if (lun == -1) {
2490 minlun = 0;
2491 maxlun = chan->chan_nluns;
2492 } else {
2493 if (lun < 0 || lun >= chan->chan_nluns)
2494 return EINVAL;
2495 minlun = lun;
2496 maxlun = lun + 1;
2497 }
2498
2499 for (ctarget = mintarget; ctarget < maxtarget; ctarget++) {
2500 if (ctarget == chan->chan_id)
2501 continue;
2502
2503 for (clun = minlun; clun < maxlun; clun++) {
2504 periph = scsipi_lookup_periph(chan, ctarget, clun);
2505 if (periph == NULL)
2506 continue;
2507 error = config_detach(periph->periph_dev, flags);
2508 if (error)
2509 return (error);
2510 scsipi_remove_periph(chan, periph);
2511 free(periph, M_DEVBUF);
2512 }
2513 }
2514 return(0);
2515 }
2516
2517 /*
2518 * scsipi_adapter_addref:
2519 *
2520 * Add a reference to the adapter pointed to by the provided
2521 * link, enabling the adapter if necessary.
2522 */
2523 int
2524 scsipi_adapter_addref(adapt)
2525 struct scsipi_adapter *adapt;
2526 {
2527 int s, error = 0;
2528
2529 s = splbio();
2530 if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) {
2531 error = (*adapt->adapt_enable)(adapt->adapt_dev, 1);
2532 if (error)
2533 adapt->adapt_refcnt--;
2534 }
2535 splx(s);
2536 return (error);
2537 }
2538
2539 /*
2540 * scsipi_adapter_delref:
2541 *
2542 * Delete a reference to the adapter pointed to by the provided
2543 * link, disabling the adapter if possible.
2544 */
2545 void
2546 scsipi_adapter_delref(adapt)
2547 struct scsipi_adapter *adapt;
2548 {
2549 int s;
2550
2551 s = splbio();
2552 if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL)
2553 (void) (*adapt->adapt_enable)(adapt->adapt_dev, 0);
2554 splx(s);
2555 }
2556
2557 struct scsipi_syncparam {
2558 int ss_factor;
2559 int ss_period; /* ns * 10 */
2560 } scsipi_syncparams[] = {
2561 { 0x09, 125 },
2562 { 0x0a, 250 },
2563 { 0x0b, 303 },
2564 { 0x0c, 500 },
2565 };
2566 const int scsipi_nsyncparams =
2567 sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
2568
2569 int
2570 scsipi_sync_period_to_factor(period)
2571 int period; /* ns * 10 */
2572 {
2573 int i;
2574
2575 for (i = 0; i < scsipi_nsyncparams; i++) {
2576 if (period <= scsipi_syncparams[i].ss_period)
2577 return (scsipi_syncparams[i].ss_factor);
2578 }
2579
2580 return ((period / 10) / 4);
2581 }
2582
2583 int
2584 scsipi_sync_factor_to_period(factor)
2585 int factor;
2586 {
2587 int i;
2588
2589 for (i = 0; i < scsipi_nsyncparams; i++) {
2590 if (factor == scsipi_syncparams[i].ss_factor)
2591 return (scsipi_syncparams[i].ss_period);
2592 }
2593
2594 return ((factor * 4) * 10);
2595 }
2596
2597 int
2598 scsipi_sync_factor_to_freq(factor)
2599 int factor;
2600 {
2601 int i;
2602
2603 for (i = 0; i < scsipi_nsyncparams; i++) {
2604 if (factor == scsipi_syncparams[i].ss_factor)
2605 return (10000000 / scsipi_syncparams[i].ss_period);
2606 }
2607
2608 return (10000000 / ((factor * 4) * 10));
2609 }
2610
2611 #ifdef SCSIPI_DEBUG
2612 /*
2613 * Given a scsipi_xfer, dump the request, in all it's glory
2614 */
2615 void
2616 show_scsipi_xs(xs)
2617 struct scsipi_xfer *xs;
2618 {
2619
2620 printf("xs(%p): ", xs);
2621 printf("xs_control(0x%08x)", xs->xs_control);
2622 printf("xs_status(0x%08x)", xs->xs_status);
2623 printf("periph(%p)", xs->xs_periph);
2624 printf("retr(0x%x)", xs->xs_retries);
2625 printf("timo(0x%x)", xs->timeout);
2626 printf("cmd(%p)", xs->cmd);
2627 printf("len(0x%x)", xs->cmdlen);
2628 printf("data(%p)", xs->data);
2629 printf("len(0x%x)", xs->datalen);
2630 printf("res(0x%x)", xs->resid);
2631 printf("err(0x%x)", xs->error);
2632 printf("bp(%p)", xs->bp);
2633 show_scsipi_cmd(xs);
2634 }
2635
2636 void
2637 show_scsipi_cmd(xs)
2638 struct scsipi_xfer *xs;
2639 {
2640 u_char *b = (u_char *) xs->cmd;
2641 int i = 0;
2642
2643 scsipi_printaddr(xs->xs_periph);
2644 printf(" command: ");
2645
2646 if ((xs->xs_control & XS_CTL_RESET) == 0) {
2647 while (i < xs->cmdlen) {
2648 if (i)
2649 printf(",");
2650 printf("0x%x", b[i++]);
2651 }
2652 printf("-[%d bytes]\n", xs->datalen);
2653 if (xs->datalen)
2654 show_mem(xs->data, min(64, xs->datalen));
2655 } else
2656 printf("-RESET-\n");
2657 }
2658
2659 void
2660 show_mem(address, num)
2661 u_char *address;
2662 int num;
2663 {
2664 int x;
2665
2666 printf("------------------------------");
2667 for (x = 0; x < num; x++) {
2668 if ((x % 16) == 0)
2669 printf("\n%03d: ", x);
2670 printf("%02x ", *address++);
2671 }
2672 printf("\n------------------------------\n");
2673 }
2674 #endif /* SCSIPI_DEBUG */
2675