scsipi_base.c revision 1.26.2.14 1 /* $NetBSD: scsipi_base.c,v 1.26.2.14 2001/04/11 01:16:05 mjacob Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include "opt_scsi.h"
41
42 #include <sys/types.h>
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/buf.h>
47 #include <sys/uio.h>
48 #include <sys/malloc.h>
49 #include <sys/pool.h>
50 #include <sys/errno.h>
51 #include <sys/device.h>
52 #include <sys/proc.h>
53 #include <sys/kthread.h>
54
55 #include <dev/scsipi/scsipi_all.h>
56 #include <dev/scsipi/scsipi_disk.h>
57 #include <dev/scsipi/scsipiconf.h>
58 #include <dev/scsipi/scsipi_base.h>
59
60 #include <dev/scsipi/scsi_all.h>
61 #include <dev/scsipi/scsi_message.h>
62
63 int scsipi_complete __P((struct scsipi_xfer *));
64 void scsipi_request_sense __P((struct scsipi_xfer *));
65 int scsipi_enqueue __P((struct scsipi_xfer *));
66 void scsipi_run_queue __P((struct scsipi_channel *chan));
67
68 void scsipi_completion_thread __P((void *));
69
70 void scsipi_get_tag __P((struct scsipi_xfer *));
71 void scsipi_put_tag __P((struct scsipi_xfer *));
72
73 int scsipi_get_resource __P((struct scsipi_channel *));
74 void scsipi_put_resource __P((struct scsipi_channel *));
75 __inline int scsipi_grow_resources __P((struct scsipi_channel *));
76
77 void scsipi_async_event_max_openings __P((struct scsipi_channel *,
78 struct scsipi_max_openings *));
79 void scsipi_async_event_xfer_mode __P((struct scsipi_channel *,
80 struct scsipi_xfer_mode *));
81 void scsipi_async_event_channel_reset __P((struct scsipi_channel *));
82
83 struct pool scsipi_xfer_pool;
84
85 /*
86 * scsipi_init:
87 *
88 * Called when a scsibus or atapibus is attached to the system
89 * to initialize shared data structures.
90 */
91 void
92 scsipi_init()
93 {
94 static int scsipi_init_done;
95
96 if (scsipi_init_done)
97 return;
98 scsipi_init_done = 1;
99
100 /* Initialize the scsipi_xfer pool. */
101 pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
102 0, 0, "scxspl", 0, NULL, NULL, M_DEVBUF);
103 }
104
105 /*
106 * scsipi_channel_init:
107 *
108 * Initialize a scsipi_channel when it is attached.
109 */
110 int
111 scsipi_channel_init(chan)
112 struct scsipi_channel *chan;
113 {
114 size_t nbytes;
115 int i;
116
117 /* Initialize shared data. */
118 scsipi_init();
119
120 /* Initialize the queues. */
121 TAILQ_INIT(&chan->chan_queue);
122 TAILQ_INIT(&chan->chan_complete);
123
124 nbytes = chan->chan_ntargets * sizeof(struct scsipi_periph **);
125 chan->chan_periphs = malloc(nbytes, M_DEVBUF, M_NOWAIT);
126 if (chan->chan_periphs == NULL)
127 return (ENOMEM);
128
129
130 nbytes = chan->chan_nluns * sizeof(struct scsipi_periph *);
131 for (i = 0; i < chan->chan_ntargets; i++) {
132 chan->chan_periphs[i] = malloc(nbytes, M_DEVBUF, M_NOWAIT);
133 if (chan->chan_periphs[i] == NULL) {
134 while (--i >= 0) {
135 free(chan->chan_periphs[i], M_DEVBUF);
136 }
137 return (ENOMEM);
138 }
139 memset(chan->chan_periphs[i], 0, nbytes);
140 }
141
142 /*
143 * Create the asynchronous completion thread.
144 */
145 kthread_create(scsipi_create_completion_thread, chan);
146 return (0);
147 }
148
149 /*
150 * scsipi_channel_shutdown:
151 *
152 * Shutdown a scsipi_channel.
153 */
154 void
155 scsipi_channel_shutdown(chan)
156 struct scsipi_channel *chan;
157 {
158
159 /*
160 * Shut down the completion thread.
161 */
162 chan->chan_flags |= SCSIPI_CHAN_SHUTDOWN;
163 wakeup(&chan->chan_complete);
164
165 /*
166 * Now wait for the thread to exit.
167 */
168 while (chan->chan_thread != NULL)
169 (void) tsleep(&chan->chan_thread, PRIBIO, "scshut", 0);
170 }
171
172 /*
173 * scsipi_insert_periph:
174 *
175 * Insert a periph into the channel.
176 */
177 void
178 scsipi_insert_periph(chan, periph)
179 struct scsipi_channel *chan;
180 struct scsipi_periph *periph;
181 {
182 int s;
183
184 s = splbio();
185 chan->chan_periphs[periph->periph_target][periph->periph_lun] = periph;
186 splx(s);
187 }
188
189 /*
190 * scsipi_remove_periph:
191 *
192 * Remove a periph from the channel.
193 */
194 void
195 scsipi_remove_periph(chan, periph)
196 struct scsipi_channel *chan;
197 struct scsipi_periph *periph;
198 {
199 int s;
200
201 s = splbio();
202 chan->chan_periphs[periph->periph_target][periph->periph_lun] = NULL;
203 splx(s);
204 }
205
206 /*
207 * scsipi_lookup_periph:
208 *
209 * Lookup a periph on the specified channel.
210 */
211 struct scsipi_periph *
212 scsipi_lookup_periph(chan, target, lun)
213 struct scsipi_channel *chan;
214 int target, lun;
215 {
216 struct scsipi_periph *periph;
217 int s;
218
219 if (target >= chan->chan_ntargets ||
220 lun >= chan->chan_nluns)
221 return (NULL);
222
223 s = splbio();
224 periph = chan->chan_periphs[target][lun];
225 splx(s);
226
227 return (periph);
228 }
229
230 /*
231 * scsipi_get_resource:
232 *
233 * Allocate a single xfer `resource' from the channel.
234 *
235 * NOTE: Must be called at splbio().
236 */
237 int
238 scsipi_get_resource(chan)
239 struct scsipi_channel *chan;
240 {
241 struct scsipi_adapter *adapt = chan->chan_adapter;
242
243 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
244 if (chan->chan_openings > 0) {
245 chan->chan_openings--;
246 return (1);
247 }
248 return (0);
249 }
250
251 if (adapt->adapt_openings > 0) {
252 adapt->adapt_openings--;
253 return (1);
254 }
255 return (0);
256 }
257
258 /*
259 * scsipi_grow_resources:
260 *
261 * Attempt to grow resources for a channel. If this succeeds,
262 * we allocate one for our caller.
263 *
264 * NOTE: Must be called at splbio().
265 */
266 __inline int
267 scsipi_grow_resources(chan)
268 struct scsipi_channel *chan;
269 {
270
271 if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
272 scsipi_adapter_request(chan, ADAPTER_REQ_GROW_RESOURCES, NULL);
273 return (scsipi_get_resource(chan));
274 }
275
276 return (0);
277 }
278
279 /*
280 * scsipi_put_resource:
281 *
282 * Free a single xfer `resource' to the channel.
283 *
284 * NOTE: Must be called at splbio().
285 */
286 void
287 scsipi_put_resource(chan)
288 struct scsipi_channel *chan;
289 {
290 struct scsipi_adapter *adapt = chan->chan_adapter;
291
292 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
293 chan->chan_openings++;
294 else
295 adapt->adapt_openings++;
296 }
297
298 /*
299 * scsipi_get_tag:
300 *
301 * Get a tag ID for the specified xfer.
302 *
303 * NOTE: Must be called at splbio().
304 */
305 void
306 scsipi_get_tag(xs)
307 struct scsipi_xfer *xs;
308 {
309 struct scsipi_periph *periph = xs->xs_periph;
310 int word, bit, tag;
311
312 for (word = 0; word < PERIPH_NTAGWORDS; word++) {
313 bit = ffs(periph->periph_freetags[word]);
314 if (bit != 0)
315 break;
316 }
317 #ifdef DIAGNOSTIC
318 if (word == PERIPH_NTAGWORDS) {
319 scsipi_printaddr(periph);
320 printf("no free tags\n");
321 panic("scsipi_get_tag");
322 }
323 #endif
324
325 bit -= 1;
326 periph->periph_freetags[word] &= ~(1 << bit);
327 tag = (word << 5) | bit;
328
329 /* XXX Should eventually disallow this completely. */
330 if (tag >= periph->periph_openings) {
331 scsipi_printaddr(periph);
332 printf("WARNING: tag %d greater than available openings %d\n",
333 tag, periph->periph_openings);
334 }
335
336 xs->xs_tag_id = tag;
337 }
338
339 /*
340 * scsipi_put_tag:
341 *
342 * Put the tag ID for the specified xfer back into the pool.
343 *
344 * NOTE: Must be called at splbio().
345 */
346 void
347 scsipi_put_tag(xs)
348 struct scsipi_xfer *xs;
349 {
350 struct scsipi_periph *periph = xs->xs_periph;
351 int word, bit;
352
353 word = xs->xs_tag_id >> 5;
354 bit = xs->xs_tag_id & 0x1f;
355
356 periph->periph_freetags[word] |= (1 << bit);
357 }
358
359 /*
360 * scsipi_get_xs:
361 *
362 * Allocate an xfer descriptor and associate it with the
363 * specified peripherial. If the peripherial has no more
364 * available command openings, we either block waiting for
365 * one to become available, or fail.
366 */
367 struct scsipi_xfer *
368 scsipi_get_xs(periph, flags)
369 struct scsipi_periph *periph;
370 int flags;
371 {
372 struct scsipi_xfer *xs;
373 int s;
374
375 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n"));
376
377 /*
378 * If we're cold, make sure we poll.
379 */
380 if (cold)
381 flags |= XS_CTL_NOSLEEP | XS_CTL_POLL;
382
383 #ifdef DIAGNOSTIC
384 /*
385 * URGENT commands can never be ASYNC.
386 */
387 if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
388 (XS_CTL_URGENT|XS_CTL_ASYNC)) {
389 scsipi_printaddr(periph);
390 printf("URGENT and ASYNC\n");
391 panic("scsipi_get_xs");
392 }
393 #endif
394
395 s = splbio();
396 /*
397 * Wait for a command opening to become available. Rules:
398 *
399 * - All xfers must wait for an available opening.
400 * Exception: URGENT xfers can proceed when
401 * active == openings, because we use the opening
402 * of the command we're recovering for.
403 * - if the periph has sense pending, only URGENT & REQSENSE
404 * xfers may proceed.
405 *
406 * - If the periph is recovering, only URGENT xfers may
407 * proceed.
408 *
409 * - If the periph is currently executing a recovery
410 * command, URGENT commands must block, because only
411 * one recovery command can execute at a time.
412 */
413 for (;;) {
414 if (flags & XS_CTL_URGENT) {
415 if (periph->periph_active > periph->periph_openings)
416 goto wait_for_opening;
417 if (periph->periph_flags & PERIPH_SENSE) {
418 if ((flags & XS_CTL_REQSENSE) == 0)
419 goto wait_for_opening;
420 } else {
421 if ((periph->periph_flags &
422 PERIPH_RECOVERY_ACTIVE) != 0)
423 goto wait_for_opening;
424 periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
425 }
426 break;
427 }
428 if (periph->periph_active >= periph->periph_openings ||
429 (periph->periph_flags & PERIPH_RECOVERING) != 0)
430 goto wait_for_opening;
431 periph->periph_active++;
432 break;
433
434 wait_for_opening:
435 if (flags & XS_CTL_NOSLEEP) {
436 splx(s);
437 return (NULL);
438 }
439 SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n"));
440 periph->periph_flags |= PERIPH_WAITING;
441 (void) tsleep(periph, PRIBIO, "getxs", 0);
442 }
443 SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n"));
444 xs = pool_get(&scsipi_xfer_pool,
445 ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
446 if (xs == NULL) {
447 if (flags & XS_CTL_URGENT) {
448 if ((flags & XS_CTL_REQSENSE) == 0)
449 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
450 } else
451 periph->periph_active--;
452 scsipi_printaddr(periph);
453 printf("unable to allocate %sscsipi_xfer\n",
454 (flags & XS_CTL_URGENT) ? "URGENT " : "");
455 }
456 splx(s);
457
458 SC_DEBUG(periph, SCSIPI_DB3, ("returning\n"));
459
460 if (xs != NULL) {
461 callout_init(&xs->xs_callout);
462 memset(xs, 0, sizeof(*xs));
463 xs->xs_periph = periph;
464 xs->xs_control = flags;
465 xs->xs_status = 0;
466 s = splbio();
467 TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
468 splx(s);
469 }
470 return (xs);
471 }
472
473 /*
474 * scsipi_put_xs:
475 *
476 * Release an xfer descriptor, decreasing the outstanding command
477 * count for the peripherial. If there is a thread waiting for
478 * an opening, wake it up. If not, kick any queued I/O the
479 * peripherial may have.
480 *
481 * NOTE: Must be called at splbio().
482 */
483 void
484 scsipi_put_xs(xs)
485 struct scsipi_xfer *xs;
486 {
487 struct scsipi_periph *periph = xs->xs_periph;
488 int flags = xs->xs_control;
489
490 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n"));
491
492 TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
493 pool_put(&scsipi_xfer_pool, xs);
494
495 #ifdef DIAGNOSTIC
496 if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
497 periph->periph_active == 0) {
498 scsipi_printaddr(periph);
499 printf("recovery without a command to recovery for\n");
500 panic("scsipi_put_xs");
501 }
502 #endif
503
504 if (flags & XS_CTL_URGENT) {
505 if ((flags & XS_CTL_REQSENSE) == 0)
506 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
507 } else
508 periph->periph_active--;
509 if (periph->periph_active == 0 &&
510 (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
511 periph->periph_flags &= ~PERIPH_WAITDRAIN;
512 wakeup(&periph->periph_active);
513 }
514
515 if (periph->periph_flags & PERIPH_WAITING) {
516 periph->periph_flags &= ~PERIPH_WAITING;
517 wakeup(periph);
518 } else {
519 if (periph->periph_switch->psw_start != NULL) {
520 SC_DEBUG(periph, SCSIPI_DB2,
521 ("calling private start()\n"));
522 (*periph->periph_switch->psw_start)(periph);
523 }
524 }
525 }
526
527 /*
528 * scsipi_channel_freeze:
529 *
530 * Freeze a channel's xfer queue.
531 */
532 void
533 scsipi_channel_freeze(chan, count)
534 struct scsipi_channel *chan;
535 int count;
536 {
537 int s;
538
539 s = splbio();
540 chan->chan_qfreeze += count;
541 splx(s);
542 }
543
544 /*
545 * scsipi_channel_thaw:
546 *
547 * Thaw a channel's xfer queue.
548 */
549 void
550 scsipi_channel_thaw(chan, count)
551 struct scsipi_channel *chan;
552 int count;
553 {
554 int s;
555
556 s = splbio();
557 chan->chan_qfreeze -= count;
558 splx(s);
559 }
560
561 /*
562 * scsipi_channel_timed_thaw:
563 *
564 * Thaw a channel after some time has expired.
565 */
566 void
567 scsipi_channel_timed_thaw(arg)
568 void *arg;
569 {
570 struct scsipi_channel *chan = arg;
571
572 scsipi_channel_thaw(chan, 1);
573
574 /*
575 * Kick the channel's queue here. Note, we're running in
576 * interrupt context (softclock), so the adapter driver
577 * had better not sleep.
578 */
579 scsipi_run_queue(chan);
580 }
581
582 /*
583 * scsipi_periph_freeze:
584 *
585 * Freeze a device's xfer queue.
586 */
587 void
588 scsipi_periph_freeze(periph, count)
589 struct scsipi_periph *periph;
590 int count;
591 {
592 int s;
593
594 s = splbio();
595 periph->periph_qfreeze += count;
596 splx(s);
597 }
598
599 /*
600 * scsipi_periph_thaw:
601 *
602 * Thaw a device's xfer queue.
603 */
604 void
605 scsipi_periph_thaw(periph, count)
606 struct scsipi_periph *periph;
607 int count;
608 {
609 int s;
610
611 s = splbio();
612 periph->periph_qfreeze -= count;
613 if (periph->periph_qfreeze == 0 &&
614 (periph->periph_flags & PERIPH_WAITING) != 0)
615 wakeup(periph);
616 splx(s);
617 }
618
619 /*
620 * scsipi_periph_timed_thaw:
621 *
622 * Thaw a device after some time has expired.
623 */
624 void
625 scsipi_periph_timed_thaw(arg)
626 void *arg;
627 {
628 struct scsipi_periph *periph = arg;
629
630 callout_stop(&periph->periph_callout);
631 scsipi_periph_thaw(periph, 1);
632
633 /*
634 * Kick the channel's queue here. Note, we're running in
635 * interrupt context (softclock), so the adapter driver
636 * had better not sleep.
637 */
638 scsipi_run_queue(periph->periph_channel);
639 }
640
641 /*
642 * scsipi_wait_drain:
643 *
644 * Wait for a periph's pending xfers to drain.
645 */
646 void
647 scsipi_wait_drain(periph)
648 struct scsipi_periph *periph;
649 {
650 int s;
651
652 s = splbio();
653 while (periph->periph_active != 0) {
654 periph->periph_flags |= PERIPH_WAITDRAIN;
655 (void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0);
656 }
657 splx(s);
658 }
659
660 /*
661 * scsipi_kill_pending:
662 *
663 * Kill off all pending xfers for a periph.
664 *
665 * NOTE: Must be called at splbio().
666 */
667 void
668 scsipi_kill_pending(periph)
669 struct scsipi_periph *periph;
670 {
671
672 (*periph->periph_channel->chan_bustype->bustype_kill_pending)(periph);
673 #ifdef DIAGNOSTIC
674 if (TAILQ_FIRST(&periph->periph_xferq) != NULL)
675 panic("scsipi_kill_pending");
676 #endif
677 scsipi_wait_drain(periph);
678 }
679
680 /*
681 * scsipi_interpret_sense:
682 *
683 * Look at the returned sense and act on the error, determining
684 * the unix error number to pass back. (0 = report no error)
685 *
686 * NOTE: If we return ERESTART, we are expected to haved
687 * thawed the device!
688 *
689 * THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
690 */
691 int
692 scsipi_interpret_sense(xs)
693 struct scsipi_xfer *xs;
694 {
695 struct scsipi_sense_data *sense;
696 struct scsipi_periph *periph = xs->xs_periph;
697 u_int8_t key;
698 u_int32_t info;
699 int error;
700 #ifndef SCSIVERBOSE
701 static char *error_mes[] = {
702 "soft error (corrected)",
703 "not ready", "medium error",
704 "non-media hardware failure", "illegal request",
705 "unit attention", "readonly device",
706 "no data found", "vendor unique",
707 "copy aborted", "command aborted",
708 "search returned equal", "volume overflow",
709 "verify miscompare", "unknown error key"
710 };
711 #endif
712
713 sense = &xs->sense.scsi_sense;
714 #ifdef SCSIPI_DEBUG
715 if (periph->periph_flags & SCSIPI_DB1) {
716 int count;
717 scsipi_printaddr(periph);
718 printf(" sense debug information:\n");
719 printf("\tcode 0x%x valid 0x%x\n",
720 sense->error_code & SSD_ERRCODE,
721 sense->error_code & SSD_ERRCODE_VALID ? 1 : 0);
722 printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
723 sense->segment,
724 sense->flags & SSD_KEY,
725 sense->flags & SSD_ILI ? 1 : 0,
726 sense->flags & SSD_EOM ? 1 : 0,
727 sense->flags & SSD_FILEMARK ? 1 : 0);
728 printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d "
729 "extra bytes\n",
730 sense->info[0],
731 sense->info[1],
732 sense->info[2],
733 sense->info[3],
734 sense->extra_len);
735 printf("\textra: ");
736 for (count = 0; count < ADD_BYTES_LIM(sense); count++)
737 printf("0x%x ", sense->cmd_spec_info[count]);
738 printf("\n");
739 }
740 #endif
741
742 /*
743 * If the periph has it's own error handler, call it first.
744 * If it returns a legit error value, return that, otherwise
745 * it wants us to continue with normal error processing.
746 */
747 if (periph->periph_switch->psw_error != NULL) {
748 SC_DEBUG(periph, SCSIPI_DB2,
749 ("calling private err_handler()\n"));
750 error = (*periph->periph_switch->psw_error)(xs);
751 if (error != EJUSTRETURN)
752 return (error);
753 }
754 /* otherwise use the default */
755 switch (sense->error_code & SSD_ERRCODE) {
756 /*
757 * If it's code 70, use the extended stuff and
758 * interpret the key
759 */
760 case 0x71: /* delayed error */
761 scsipi_printaddr(periph);
762 key = sense->flags & SSD_KEY;
763 printf(" DEFERRED ERROR, key = 0x%x\n", key);
764 /* FALLTHROUGH */
765 case 0x70:
766 if ((sense->error_code & SSD_ERRCODE_VALID) != 0)
767 info = _4btol(sense->info);
768 else
769 info = 0;
770 key = sense->flags & SSD_KEY;
771
772 switch (key) {
773 case SKEY_NO_SENSE:
774 case SKEY_RECOVERED_ERROR:
775 if (xs->resid == xs->datalen && xs->datalen) {
776 /*
777 * Why is this here?
778 */
779 xs->resid = 0; /* not short read */
780 }
781 case SKEY_EQUAL:
782 error = 0;
783 break;
784 case SKEY_NOT_READY:
785 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
786 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
787 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
788 return (0);
789 if (sense->add_sense_code == 0x3A &&
790 sense->add_sense_code_qual == 0x00)
791 error = ENODEV; /* Medium not present */
792 else
793 error = EIO;
794 if ((xs->xs_control & XS_CTL_SILENT) != 0)
795 return (error);
796 break;
797 case SKEY_ILLEGAL_REQUEST:
798 if ((xs->xs_control &
799 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
800 return (0);
801 /*
802 * Handle the case where a device reports
803 * Logical Unit Not Supported during discovery.
804 */
805 if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
806 sense->add_sense_code == 0x25 &&
807 sense->add_sense_code_qual == 0x00)
808 return (EINVAL);
809 if ((xs->xs_control & XS_CTL_SILENT) != 0)
810 return (EIO);
811 error = EINVAL;
812 break;
813 case SKEY_UNIT_ATTENTION:
814 if (sense->add_sense_code == 0x29 &&
815 sense->add_sense_code_qual == 0x00) {
816 /* device or bus reset */
817 return (ERESTART);
818 }
819 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
820 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
821 if ((xs->xs_control &
822 XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
823 /* XXX Should reupload any transient state. */
824 (periph->periph_flags &
825 PERIPH_REMOVABLE) == 0) {
826 return (ERESTART);
827 }
828 if ((xs->xs_control & XS_CTL_SILENT) != 0)
829 return (EIO);
830 error = EIO;
831 break;
832 case SKEY_WRITE_PROTECT:
833 error = EROFS;
834 break;
835 case SKEY_BLANK_CHECK:
836 error = 0;
837 break;
838 case SKEY_ABORTED_COMMAND:
839 error = ERESTART;
840 break;
841 case SKEY_VOLUME_OVERFLOW:
842 error = ENOSPC;
843 break;
844 default:
845 error = EIO;
846 break;
847 }
848
849 #ifdef SCSIVERBOSE
850 if (key && (xs->xs_control & XS_CTL_SILENT) == 0)
851 scsipi_print_sense(xs, 0);
852 #else
853 if (key) {
854 scsipi_printaddr(periph);
855 printf("%s", error_mes[key - 1]);
856 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
857 switch (key) {
858 case SKEY_NOT_READY:
859 case SKEY_ILLEGAL_REQUEST:
860 case SKEY_UNIT_ATTENTION:
861 case SKEY_WRITE_PROTECT:
862 break;
863 case SKEY_BLANK_CHECK:
864 printf(", requested size: %d (decimal)",
865 info);
866 break;
867 case SKEY_ABORTED_COMMAND:
868 if (xs->xs_retries)
869 printf(", retrying");
870 printf(", cmd 0x%x, info 0x%x",
871 xs->cmd->opcode, info);
872 break;
873 default:
874 printf(", info = %d (decimal)", info);
875 }
876 }
877 if (sense->extra_len != 0) {
878 int n;
879 printf(", data =");
880 for (n = 0; n < sense->extra_len; n++)
881 printf(" %02x",
882 sense->cmd_spec_info[n]);
883 }
884 printf("\n");
885 }
886 #endif
887 return (error);
888
889 /*
890 * Not code 70, just report it
891 */
892 default:
893 #if defined(SCSIDEBUG) || defined(DEBUG)
894 {
895 static char *uc = "undecodable sense error";
896 int i;
897 u_int8_t *cptr = (u_int8_t *) sense;
898 scsipi_printaddr(periph);
899 if (xs->cmd == &xs->cmdstore) {
900 printf("%s for opcode 0x%x, data=",
901 uc, xs->cmdstore.opcode);
902 } else {
903 printf("%s, data=", uc);
904 }
905 for (i = 0; i < sizeof (sense); i++)
906 printf(" 0x%02x", *(cptr++) & 0xff);
907 printf("\n");
908 }
909 #else
910
911 scsipi_printaddr(periph);
912 printf("Sense Error Code 0x%x",
913 sense->error_code & SSD_ERRCODE);
914 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
915 struct scsipi_sense_data_unextended *usense =
916 (struct scsipi_sense_data_unextended *)sense;
917 printf(" at block no. %d (decimal)",
918 _3btol(usense->block));
919 }
920 printf("\n");
921 #endif
922 return (EIO);
923 }
924 }
925
926 /*
927 * scsipi_size:
928 *
929 * Find out from the device what its capacity is.
930 */
931 u_long
932 scsipi_size(periph, flags)
933 struct scsipi_periph *periph;
934 int flags;
935 {
936 struct scsipi_read_cap_data rdcap;
937 struct scsipi_read_capacity scsipi_cmd;
938
939 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
940 scsipi_cmd.opcode = READ_CAPACITY;
941
942 /*
943 * If the command works, interpret the result as a 4 byte
944 * number of blocks
945 */
946 if (scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
947 sizeof(scsipi_cmd), (u_char *)&rdcap, sizeof(rdcap),
948 SCSIPIRETRIES, 20000, NULL,
949 flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK) != 0) {
950 scsipi_printaddr(periph);
951 printf("could not get size\n");
952 return (0);
953 }
954
955 return (_4btol(rdcap.addr) + 1);
956 }
957
958 /*
959 * scsipi_test_unit_ready:
960 *
961 * Issue a `test unit ready' request.
962 */
963 int
964 scsipi_test_unit_ready(periph, flags)
965 struct scsipi_periph *periph;
966 int flags;
967 {
968 struct scsipi_test_unit_ready scsipi_cmd;
969
970 /* some ATAPI drives don't support TEST_UNIT_READY. Sigh */
971 if (periph->periph_quirks & PQUIRK_NOTUR)
972 return (0);
973
974 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
975 scsipi_cmd.opcode = TEST_UNIT_READY;
976
977 return (scsipi_command(periph,
978 (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd),
979 0, 0, SCSIPIRETRIES, 10000, NULL, flags));
980 }
981
982 /*
983 * scsipi_inquire:
984 *
985 * Ask the device about itself.
986 */
987 int
988 scsipi_inquire(periph, inqbuf, flags)
989 struct scsipi_periph *periph;
990 struct scsipi_inquiry_data *inqbuf;
991 int flags;
992 {
993 struct scsipi_inquiry scsipi_cmd;
994
995 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
996 scsipi_cmd.opcode = INQUIRY;
997 scsipi_cmd.length = sizeof(struct scsipi_inquiry_data);
998
999 return (scsipi_command(periph,
1000 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1001 (u_char *) inqbuf, sizeof(struct scsipi_inquiry_data),
1002 SCSIPIRETRIES, 10000, NULL, XS_CTL_DATA_IN | flags));
1003 }
1004
1005 /*
1006 * scsipi_prevent:
1007 *
1008 * Prevent or allow the user to remove the media
1009 */
1010 int
1011 scsipi_prevent(periph, type, flags)
1012 struct scsipi_periph *periph;
1013 int type, flags;
1014 {
1015 struct scsipi_prevent scsipi_cmd;
1016
1017 if (periph->periph_quirks & PQUIRK_NODOORLOCK)
1018 return (0);
1019
1020 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
1021 scsipi_cmd.opcode = PREVENT_ALLOW;
1022 scsipi_cmd.how = type;
1023
1024 return (scsipi_command(periph,
1025 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1026 0, 0, SCSIPIRETRIES, 5000, NULL, flags));
1027 }
1028
1029 /*
1030 * scsipi_start:
1031 *
1032 * Send a START UNIT.
1033 */
1034 int
1035 scsipi_start(periph, type, flags)
1036 struct scsipi_periph *periph;
1037 int type, flags;
1038 {
1039 struct scsipi_start_stop scsipi_cmd;
1040
1041 if (periph->periph_quirks & PQUIRK_NOSTARTUNIT)
1042 return 0;
1043
1044 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
1045 scsipi_cmd.opcode = START_STOP;
1046 scsipi_cmd.byte2 = 0x00;
1047 scsipi_cmd.how = type;
1048
1049 return (scsipi_command(periph,
1050 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1051 0, 0, SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000,
1052 NULL, flags));
1053 }
1054
1055 /*
1056 * scsipi_done:
1057 *
1058 * This routine is called by an adapter's interrupt handler when
1059 * an xfer is completed.
1060 */
1061 void
1062 scsipi_done(xs)
1063 struct scsipi_xfer *xs;
1064 {
1065 struct scsipi_periph *periph = xs->xs_periph;
1066 struct scsipi_channel *chan = periph->periph_channel;
1067 int s, freezecnt;
1068
1069 SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n"));
1070 #ifdef SCSIPI_DEBUG
1071 if (periph->periph_dbflags & SCSIPI_DB1)
1072 show_scsipi_cmd(xs);
1073 #endif
1074
1075 s = splbio();
1076 /*
1077 * The resource this command was using is now free.
1078 */
1079 scsipi_put_resource(chan);
1080
1081 /*
1082 * If the command was tagged, free the tag.
1083 */
1084 if (XS_CTL_TAGTYPE(xs) != 0)
1085 scsipi_put_tag(xs);
1086 else
1087 periph->periph_flags &= ~PERIPH_UNTAG;
1088
1089 /* Mark the command as `done'. */
1090 xs->xs_status |= XS_STS_DONE;
1091
1092 #ifdef DIAGNOSTIC
1093 if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
1094 (XS_CTL_ASYNC|XS_CTL_POLL))
1095 panic("scsipi_done: ASYNC and POLL");
1096 #endif
1097
1098 /*
1099 * If the xfer had an error of any sort, freeze the
1100 * periph's queue. Freeze it again if we were requested
1101 * to do so in the xfer.
1102 */
1103 freezecnt = 0;
1104 if (xs->error != XS_NOERROR)
1105 freezecnt++;
1106 if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
1107 freezecnt++;
1108 if (freezecnt != 0)
1109 scsipi_periph_freeze(periph, freezecnt);
1110
1111 /*
1112 * record the xfer with a pending sense, in case a SCSI reset is
1113 * received before the thread is waked up.
1114 */
1115 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1116 periph->periph_flags |= PERIPH_SENSE;
1117 periph->periph_xscheck = xs;
1118 }
1119
1120 /*
1121 * If this was an xfer that was not to complete asynchrnously,
1122 * let the requesting thread perform error checking/handling
1123 * in its context.
1124 */
1125 if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
1126 splx(s);
1127 /*
1128 * If it's a polling job, just return, to unwind the
1129 * call graph. We don't need to restart the queue,
1130 * because pollings jobs are treated specially, and
1131 * are really only used during crash dumps anyway
1132 * (XXX or during boot-time autconfiguration of
1133 * ATAPI devices).
1134 */
1135 if (xs->xs_control & XS_CTL_POLL)
1136 return;
1137 wakeup(xs);
1138 goto out;
1139 }
1140
1141 /*
1142 * Catch the extremely common case of I/O completing
1143 * without error; no use in taking a context switch
1144 * if we can handle it in interrupt context.
1145 */
1146 if (xs->error == XS_NOERROR) {
1147 splx(s);
1148 (void) scsipi_complete(xs);
1149 goto out;
1150 }
1151
1152 /*
1153 * There is an error on this xfer. Put it on the channel's
1154 * completion queue, and wake up the completion thread.
1155 */
1156 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
1157 splx(s);
1158 wakeup(&chan->chan_complete);
1159
1160 out:
1161 /*
1162 * If there are more xfers on the channel's queue, attempt to
1163 * run them.
1164 */
1165 scsipi_run_queue(chan);
1166 }
1167
1168 /*
1169 * scsipi_complete:
1170 *
1171 * Completion of a scsipi_xfer. This is the guts of scsipi_done().
1172 *
1173 * NOTE: This routine MUST be called with valid thread context
1174 * except for the case where the following two conditions are
1175 * true:
1176 *
1177 * xs->error == XS_NOERROR
1178 * XS_CTL_ASYNC is set in xs->xs_control
1179 *
1180 * The semantics of this routine can be tricky, so here is an
1181 * explanation:
1182 *
1183 * 0 Xfer completed successfully.
1184 *
1185 * ERESTART Xfer had an error, but was restarted.
1186 *
1187 * anything else Xfer had an error, return value is Unix
1188 * errno.
1189 *
1190 * If the return value is anything but ERESTART:
1191 *
1192 * - If XS_CTL_ASYNC is set, `xs' has been freed back to
1193 * the pool.
1194 * - If there is a buf associated with the xfer,
1195 * it has been biodone()'d.
1196 */
1197 int
1198 scsipi_complete(xs)
1199 struct scsipi_xfer *xs;
1200 {
1201 struct scsipi_periph *periph = xs->xs_periph;
1202 struct scsipi_channel *chan = periph->periph_channel;
1203 struct buf *bp;
1204 int error, s;
1205
1206 #ifdef DIAGNOSTIC
1207 if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
1208 panic("scsipi_complete: XS_CTL_ASYNC but no buf");
1209 #endif
1210 /*
1211 * If command terminated with a CHECK CONDITION, we need to issue a
1212 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed
1213 * we'll have the real status.
1214 * Must be processed at splbio() to avoid missing a SCSI bus reset
1215 * for this command.
1216 */
1217 s = splbio();
1218 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1219 /* request sense for a request sense ? */
1220 if (xs->xs_control & XS_CTL_REQSENSE) {
1221 scsipi_printaddr(periph);
1222 /* XXX maybe we should reset the device ? */
1223 /* we've been frozen because xs->error != XS_NOERROR */
1224 scsipi_periph_thaw(periph, 1);
1225 splx(s);
1226 return EINVAL;
1227 }
1228 scsipi_request_sense(xs);
1229 }
1230 splx(s);
1231 /*
1232 * If it's a user level request, bypass all usual completion
1233 * processing, let the user work it out..
1234 */
1235 if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
1236 SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n"));
1237 if (xs->error != XS_NOERROR)
1238 scsipi_periph_thaw(periph, 1);
1239 scsipi_user_done(xs);
1240 SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n "));
1241 return 0;
1242 }
1243
1244
1245 switch (xs->error) {
1246 case XS_NOERROR:
1247 error = 0;
1248 break;
1249
1250 case XS_SENSE:
1251 case XS_SHORTSENSE:
1252 error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
1253 break;
1254
1255 case XS_RESOURCE_SHORTAGE:
1256 /*
1257 * XXX Should freeze channel's queue.
1258 */
1259 scsipi_printaddr(periph);
1260 printf("adapter resource shortage\n");
1261 /* FALLTHROUGH */
1262
1263 case XS_BUSY:
1264 if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
1265 struct scsipi_max_openings mo;
1266
1267 /*
1268 * We set the openings to active - 1, assuming that
1269 * the command that got us here is the first one that
1270 * can't fit into the device's queue. If that's not
1271 * the case, I guess we'll find out soon enough.
1272 */
1273 mo.mo_target = periph->periph_target;
1274 mo.mo_lun = periph->periph_lun;
1275 mo.mo_openings = periph->periph_active - 1;
1276 #ifdef DIAGNOSTIC
1277 if (mo.mo_openings < 0) {
1278 scsipi_printaddr(periph);
1279 printf("QUEUE FULL resulted in < 0 openings\n");
1280 panic("scsipi_done");
1281 }
1282 #endif
1283 if (mo.mo_openings == 0) {
1284 scsipi_printaddr(periph);
1285 printf("QUEUE FULL resulted in 0 openings\n");
1286 mo.mo_openings = 1;
1287 }
1288 scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
1289 error = ERESTART;
1290 } else if (xs->xs_retries != 0) {
1291 xs->xs_retries--;
1292 /*
1293 * Wait one second, and try again.
1294 */
1295 if (xs->xs_control & XS_CTL_POLL)
1296 delay(1000000);
1297 else {
1298 scsipi_periph_freeze(periph, 1);
1299 callout_reset(&periph->periph_callout,
1300 hz, scsipi_periph_timed_thaw, periph);
1301 }
1302 error = ERESTART;
1303 } else
1304 error = EBUSY;
1305 break;
1306
1307 case XS_REQUEUE:
1308 error = ERESTART;
1309 break;
1310
1311 case XS_TIMEOUT:
1312 if (xs->xs_retries != 0) {
1313 xs->xs_retries--;
1314 error = ERESTART;
1315 } else
1316 error = EIO;
1317 break;
1318
1319 case XS_SELTIMEOUT:
1320 /* XXX Disable device? */
1321 error = EIO;
1322 break;
1323
1324 case XS_RESET:
1325 if (xs->xs_control & XS_CTL_REQSENSE) {
1326 /*
1327 * request sense interrupted by reset: signal it
1328 * with EINTR return code.
1329 */
1330 error = EINTR;
1331 } else {
1332 if (xs->xs_retries != 0) {
1333 xs->xs_retries--;
1334 error = ERESTART;
1335 } else
1336 error = EIO;
1337 }
1338 break;
1339
1340 default:
1341 scsipi_printaddr(periph);
1342 printf("invalid return code from adapter: %d\n", xs->error);
1343 error = EIO;
1344 break;
1345 }
1346
1347 s = splbio();
1348 if (error == ERESTART) {
1349 /*
1350 * If we get here, the periph has been thawed and frozen
1351 * again if we had to issue recovery commands. Alternatively,
1352 * it may have been frozen again and in a timed thaw. In
1353 * any case, we thaw the periph once we re-enqueue the
1354 * command. Once the periph is fully thawed, it will begin
1355 * operation again.
1356 */
1357 xs->error = XS_NOERROR;
1358 xs->status = SCSI_OK;
1359 xs->xs_status &= ~XS_STS_DONE;
1360 xs->xs_requeuecnt++;
1361 error = scsipi_enqueue(xs);
1362 if (error == 0) {
1363 scsipi_periph_thaw(periph, 1);
1364 splx(s);
1365 return (ERESTART);
1366 }
1367 }
1368
1369 /*
1370 * scsipi_done() freezes the queue if not XS_NOERROR.
1371 * Thaw it here.
1372 */
1373 if (xs->error != XS_NOERROR)
1374 scsipi_periph_thaw(periph, 1);
1375
1376 if ((bp = xs->bp) != NULL) {
1377 if (error) {
1378 bp->b_error = error;
1379 bp->b_flags |= B_ERROR;
1380 bp->b_resid = bp->b_bcount;
1381 } else {
1382 bp->b_error = 0;
1383 bp->b_resid = xs->resid;
1384 }
1385 biodone(bp);
1386 }
1387
1388 if (xs->xs_control & XS_CTL_ASYNC)
1389 scsipi_put_xs(xs);
1390 splx(s);
1391
1392 return (error);
1393 }
1394
1395 /*
1396 * Issue a request sense for the given scsipi_xfer. Called when the xfer
1397 * returns with a CHECK_CONDITION status. Must be called in valid thread
1398 * context and at splbio().
1399 */
1400
1401 void
1402 scsipi_request_sense(xs)
1403 struct scsipi_xfer *xs;
1404 {
1405 struct scsipi_periph *periph = xs->xs_periph;
1406 int flags, error;
1407 struct scsipi_sense cmd;
1408
1409 periph->periph_flags |= PERIPH_SENSE;
1410
1411 /* if command was polling, request sense will too */
1412 flags = xs->xs_control & XS_CTL_POLL;
1413 /* Polling commands can't sleep */
1414 if (flags)
1415 flags |= XS_CTL_NOSLEEP;
1416
1417 flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN |
1418 XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH;
1419
1420 bzero(&cmd, sizeof(cmd));
1421 cmd.opcode = REQUEST_SENSE;
1422 cmd.length = sizeof(struct scsipi_sense_data);
1423
1424 error = scsipi_command(periph,
1425 (struct scsipi_generic *) &cmd, sizeof(cmd),
1426 (u_char*)&xs->sense.scsi_sense, sizeof(struct scsipi_sense_data),
1427 0, 1000, NULL, flags);
1428 periph->periph_flags &= ~PERIPH_SENSE;
1429 periph->periph_xscheck = NULL;
1430 switch(error) {
1431 case 0:
1432 /* we have a valid sense */
1433 xs->error = XS_SENSE;
1434 return;
1435 case EINTR:
1436 /* REQUEST_SENSE interrupted by bus reset. */
1437 xs->error = XS_RESET;
1438 return;
1439 case EIO:
1440 /* request sense coudn't be performed */
1441 /*
1442 * XXX this isn't quite rigth but we don't have anything
1443 * better for now
1444 */
1445 xs->error = XS_DRIVER_STUFFUP;
1446 return;
1447 default:
1448 /* Notify that request sense failed. */
1449 xs->error = XS_DRIVER_STUFFUP;
1450 scsipi_printaddr(periph);
1451 printf("request sense failed with error %d\n", error);
1452 return;
1453 }
1454 }
1455
1456 /*
1457 * scsipi_enqueue:
1458 *
1459 * Enqueue an xfer on a channel.
1460 */
1461 int
1462 scsipi_enqueue(xs)
1463 struct scsipi_xfer *xs;
1464 {
1465 struct scsipi_channel *chan = xs->xs_periph->periph_channel;
1466 struct scsipi_xfer *qxs;
1467 int s;
1468
1469 s = splbio();
1470
1471 /*
1472 * If the xfer is to be polled, and there are already jobs on
1473 * the queue, we can't proceed.
1474 */
1475 if ((xs->xs_control & XS_CTL_POLL) != 0 &&
1476 TAILQ_FIRST(&chan->chan_queue) != NULL) {
1477 splx(s);
1478 xs->error = XS_DRIVER_STUFFUP;
1479 return (EAGAIN);
1480 }
1481
1482 /*
1483 * If we have an URGENT xfer, it's an error recovery command
1484 * and it should just go on the head of the channel's queue.
1485 */
1486 if (xs->xs_control & XS_CTL_URGENT) {
1487 TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
1488 goto out;
1489 }
1490
1491 /*
1492 * If this xfer has already been on the queue before, we
1493 * need to reinsert it in the correct order. That order is:
1494 *
1495 * Immediately before the first xfer for this periph
1496 * with a requeuecnt less than xs->xs_requeuecnt.
1497 *
1498 * Failing that, at the end of the queue. (We'll end up
1499 * there naturally.)
1500 */
1501 if (xs->xs_requeuecnt != 0) {
1502 for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
1503 qxs = TAILQ_NEXT(qxs, channel_q)) {
1504 if (qxs->xs_periph == xs->xs_periph &&
1505 qxs->xs_requeuecnt < xs->xs_requeuecnt)
1506 break;
1507 }
1508 if (qxs != NULL) {
1509 TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
1510 channel_q);
1511 goto out;
1512 }
1513 }
1514 TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
1515 out:
1516 if (xs->xs_control & XS_CTL_THAW_PERIPH)
1517 scsipi_periph_thaw(xs->xs_periph, 1);
1518 splx(s);
1519 return (0);
1520 }
1521
1522 /*
1523 * scsipi_run_queue:
1524 *
1525 * Start as many xfers as possible running on the channel.
1526 */
1527 void
1528 scsipi_run_queue(chan)
1529 struct scsipi_channel *chan;
1530 {
1531 struct scsipi_xfer *xs;
1532 struct scsipi_periph *periph;
1533 int s;
1534
1535 for (;;) {
1536 s = splbio();
1537
1538 /*
1539 * If the channel is frozen, we can't do any work right
1540 * now.
1541 */
1542 if (chan->chan_qfreeze != 0) {
1543 splx(s);
1544 return;
1545 }
1546
1547 /*
1548 * Look for work to do, and make sure we can do it.
1549 */
1550 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
1551 xs = TAILQ_NEXT(xs, channel_q)) {
1552 periph = xs->xs_periph;
1553
1554 if ((periph->periph_active > periph->periph_openings) || periph->periph_qfreeze != 0 ||
1555 (periph->periph_flags & PERIPH_UNTAG) != 0)
1556 continue;
1557
1558 if ((periph->periph_flags &
1559 (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 &&
1560 (xs->xs_control & XS_CTL_URGENT) == 0)
1561 continue;
1562
1563 /*
1564 * We can issue this xfer!
1565 */
1566 goto got_one;
1567 }
1568
1569 /*
1570 * Can't find any work to do right now.
1571 */
1572 splx(s);
1573 return;
1574
1575 got_one:
1576 /*
1577 * Have an xfer to run. Allocate a resource from
1578 * the adapter to run it. If we can't allocate that
1579 * resource, we don't dequeue the xfer.
1580 */
1581 if (scsipi_get_resource(chan) == 0) {
1582 /*
1583 * Adapter is out of resources. If the adapter
1584 * supports it, attempt to grow them.
1585 */
1586 if (scsipi_grow_resources(chan) == 0) {
1587 /*
1588 * Wasn't able to grow resources,
1589 * nothing more we can do.
1590 */
1591 if (xs->xs_control & XS_CTL_POLL) {
1592 scsipi_printaddr(xs->xs_periph);
1593 printf("polling command but no "
1594 "adapter resources");
1595 /* We'll panic shortly... */
1596 }
1597 splx(s);
1598 return;
1599 }
1600 /*
1601 * scsipi_grow_resources() allocated the resource
1602 * for us.
1603 */
1604 }
1605
1606 /*
1607 * We have a resource to run this xfer, do it!
1608 */
1609 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
1610
1611 /*
1612 * If the command is to be tagged, allocate a tag ID
1613 * for it.
1614 */
1615 if (XS_CTL_TAGTYPE(xs) != 0)
1616 scsipi_get_tag(xs);
1617 else
1618 periph->periph_flags |= PERIPH_UNTAG;
1619 splx(s);
1620
1621 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
1622 }
1623 #ifdef DIAGNOSTIC
1624 panic("scsipi_run_queue: impossible");
1625 #endif
1626 }
1627
1628 /*
1629 * scsipi_execute_xs:
1630 *
1631 * Begin execution of an xfer, waiting for it to complete, if necessary.
1632 */
1633 int
1634 scsipi_execute_xs(xs)
1635 struct scsipi_xfer *xs;
1636 {
1637 struct scsipi_periph *periph = xs->xs_periph;
1638 struct scsipi_channel *chan = periph->periph_channel;
1639 int async, poll, retries, error, s;
1640
1641 xs->xs_status &= ~XS_STS_DONE;
1642 xs->error = XS_NOERROR;
1643 xs->resid = xs->datalen;
1644 xs->status = SCSI_OK;
1645
1646 #ifdef SCSIPI_DEBUG
1647 if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) {
1648 printf("scsipi_execute_xs: ");
1649 show_scsipi_xs(xs);
1650 printf("\n");
1651 }
1652 #endif
1653
1654 /*
1655 * Deal with command tagging:
1656 *
1657 * - If the device's current operating mode doesn't
1658 * include tagged queueing, clear the tag mask.
1659 *
1660 * - If the device's current operating mode *does*
1661 * include tagged queueing, set the tag_type in
1662 * the xfer to the appropriate byte for the tag
1663 * message.
1664 */
1665 if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 ||
1666 (xs->xs_control & XS_CTL_REQSENSE)) {
1667 xs->xs_control &= ~XS_CTL_TAGMASK;
1668 xs->xs_tag_type = 0;
1669 } else {
1670 /*
1671 * If the request doesn't specify a tag, give Head
1672 * tags to URGENT operations and Ordered tags to
1673 * everything else.
1674 */
1675 if (XS_CTL_TAGTYPE(xs) == 0) {
1676 if (xs->xs_control & XS_CTL_URGENT)
1677 xs->xs_control |= XS_CTL_HEAD_TAG;
1678 else
1679 xs->xs_control |= XS_CTL_ORDERED_TAG;
1680 }
1681
1682 switch (XS_CTL_TAGTYPE(xs)) {
1683 case XS_CTL_ORDERED_TAG:
1684 xs->xs_tag_type = MSG_ORDERED_Q_TAG;
1685 break;
1686
1687 case XS_CTL_SIMPLE_TAG:
1688 xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
1689 break;
1690
1691 case XS_CTL_HEAD_TAG:
1692 xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
1693 break;
1694
1695 default:
1696 scsipi_printaddr(periph);
1697 printf("invalid tag mask 0x%08x\n",
1698 XS_CTL_TAGTYPE(xs));
1699 panic("scsipi_execute_xs");
1700 }
1701 }
1702
1703 /*
1704 * If we don't yet have a completion thread, or we are to poll for
1705 * completion, clear the ASYNC flag.
1706 */
1707 if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
1708 xs->xs_control &= ~XS_CTL_ASYNC;
1709
1710 async = (xs->xs_control & XS_CTL_ASYNC);
1711 poll = (xs->xs_control & XS_CTL_POLL);
1712 retries = xs->xs_retries; /* for polling commands */
1713
1714 #ifdef DIAGNOSTIC
1715 if (async != 0 && xs->bp == NULL)
1716 panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
1717 #endif
1718
1719 /*
1720 * Enqueue the transfer. If we're not polling for completion, this
1721 * should ALWAYS return `no error'.
1722 */
1723 try_again:
1724 error = scsipi_enqueue(xs);
1725 if (error) {
1726 if (poll == 0) {
1727 scsipi_printaddr(periph);
1728 printf("not polling, but enqueue failed with %d\n",
1729 error);
1730 panic("scsipi_execute_xs");
1731 }
1732
1733 scsipi_printaddr(periph);
1734 printf("failed to enqueue polling command");
1735 if (retries != 0) {
1736 printf(", retrying...\n");
1737 delay(1000000);
1738 retries--;
1739 goto try_again;
1740 }
1741 printf("\n");
1742 goto free_xs;
1743 }
1744
1745 restarted:
1746 scsipi_run_queue(chan);
1747
1748 /*
1749 * The xfer is enqueued, and possibly running. If it's to be
1750 * completed asynchronously, just return now.
1751 */
1752 if (async)
1753 return (EJUSTRETURN);
1754
1755 /*
1756 * Not an asynchronous command; wait for it to complete.
1757 */
1758 while ((xs->xs_status & XS_STS_DONE) == 0) {
1759 if (poll) {
1760 scsipi_printaddr(periph);
1761 printf("polling command not done\n");
1762 panic("scsipi_execute_xs");
1763 }
1764 (void) tsleep(xs, PRIBIO, "xscmd", 0);
1765 }
1766
1767 /*
1768 * Command is complete. scsipi_done() has awakened us to perform
1769 * the error handling.
1770 */
1771 error = scsipi_complete(xs);
1772 if (error == ERESTART)
1773 goto restarted;
1774
1775 /*
1776 * Command completed successfully or fatal error occurred. Fall
1777 * into....
1778 */
1779 free_xs:
1780 s = splbio();
1781 scsipi_put_xs(xs);
1782 splx(s);
1783
1784 /*
1785 * Kick the queue, keep it running in case it stopped for some
1786 * reason.
1787 */
1788 scsipi_run_queue(chan);
1789
1790 return (error);
1791 }
1792
1793 /*
1794 * scsipi_completion_thread:
1795 *
1796 * This is the completion thread. We wait for errors on
1797 * asynchronous xfers, and perform the error handling
1798 * function, restarting the command, if necessary.
1799 */
1800 void
1801 scsipi_completion_thread(arg)
1802 void *arg;
1803 {
1804 struct scsipi_channel *chan = arg;
1805 struct scsipi_xfer *xs;
1806 int s;
1807
1808 for (;;) {
1809 s = splbio();
1810 xs = TAILQ_FIRST(&chan->chan_complete);
1811 if (xs == NULL &&
1812 (chan->chan_flags & SCSIPI_CHAN_SHUTDOWN) == 0) {
1813 splx(s);
1814 (void) tsleep(&chan->chan_complete, PRIBIO,
1815 "sccomp", 0);
1816 continue;
1817 }
1818 if (chan->chan_flags & SCSIPI_CHAN_SHUTDOWN) {
1819 splx(s);
1820 break;
1821 }
1822 TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
1823 splx(s);
1824
1825 /*
1826 * Have an xfer with an error; process it.
1827 */
1828 (void) scsipi_complete(xs);
1829
1830 /*
1831 * Kick the queue; keep it running if it was stopped
1832 * for some reason.
1833 */
1834 scsipi_run_queue(chan);
1835 }
1836
1837 chan->chan_thread = NULL;
1838
1839 /* In case parent is waiting for us to exit. */
1840 wakeup(&chan->chan_thread);
1841
1842 kthread_exit(0);
1843 }
1844
1845 /*
1846 * scsipi_create_completion_thread:
1847 *
1848 * Callback to actually create the completion thread.
1849 */
1850 void
1851 scsipi_create_completion_thread(arg)
1852 void *arg;
1853 {
1854 struct scsipi_channel *chan = arg;
1855 struct scsipi_adapter *adapt = chan->chan_adapter;
1856
1857 if (kthread_create1(scsipi_completion_thread, chan,
1858 &chan->chan_thread, "%s:%d", adapt->adapt_dev->dv_xname,
1859 chan->chan_channel)) {
1860 printf("%s: unable to create completion thread for "
1861 "channel %d\n", adapt->adapt_dev->dv_xname,
1862 chan->chan_channel);
1863 panic("scsipi_create_completion_thread");
1864 }
1865 }
1866
1867 /*
1868 * scsipi_async_event:
1869 *
1870 * Handle an asynchronous event from an adapter.
1871 */
1872 void
1873 scsipi_async_event(chan, event, arg)
1874 struct scsipi_channel *chan;
1875 scsipi_async_event_t event;
1876 void *arg;
1877 {
1878 int s;
1879
1880 s = splbio();
1881 switch (event) {
1882 case ASYNC_EVENT_MAX_OPENINGS:
1883 scsipi_async_event_max_openings(chan,
1884 (struct scsipi_max_openings *)arg);
1885 break;
1886
1887 case ASYNC_EVENT_XFER_MODE:
1888 scsipi_async_event_xfer_mode(chan,
1889 (struct scsipi_xfer_mode *)arg);
1890 break;
1891 case ASYNC_EVENT_RESET:
1892 scsipi_async_event_channel_reset(chan);
1893 break;
1894 }
1895 splx(s);
1896 }
1897
1898 /*
1899 * scsipi_print_xfer_mode:
1900 *
1901 * Print a periph's capabilities.
1902 */
1903 void
1904 scsipi_print_xfer_mode(periph)
1905 struct scsipi_periph *periph;
1906 {
1907 int period, freq, speed, mbs;
1908
1909 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0)
1910 return;
1911
1912 printf("%s: ", periph->periph_dev->dv_xname);
1913 if (periph->periph_mode & PERIPH_CAP_SYNC) {
1914 period = scsipi_sync_factor_to_period(periph->periph_period);
1915 printf("Sync (%d.%dns offset %d)",
1916 period / 10, period % 10, periph->periph_offset);
1917 } else
1918 printf("Async");
1919
1920 if (periph->periph_mode & PERIPH_CAP_WIDE32)
1921 printf(", 32-bit");
1922 else if (periph->periph_mode & PERIPH_CAP_WIDE16)
1923 printf(", 16-bit");
1924 else
1925 printf(", 8-bit");
1926
1927 if (periph->periph_mode & PERIPH_CAP_SYNC) {
1928 freq = scsipi_sync_factor_to_freq(periph->periph_period);
1929 speed = freq;
1930 if (periph->periph_mode & PERIPH_CAP_WIDE32)
1931 speed *= 4;
1932 else if (periph->periph_mode & PERIPH_CAP_WIDE16)
1933 speed *= 2;
1934 mbs = speed / 1000;
1935 if (mbs > 0)
1936 printf(" (%d.%03dMB/s)", mbs, speed % 1000);
1937 else
1938 printf(" (%dKB/s)", speed % 1000);
1939 }
1940
1941 printf(" transfers");
1942
1943 if (periph->periph_mode & PERIPH_CAP_TQING)
1944 printf(", tagged queueing");
1945
1946 printf("\n");
1947 }
1948
1949 /*
1950 * scsipi_async_event_max_openings:
1951 *
1952 * Update the maximum number of outstanding commands a
1953 * device may have.
1954 */
1955 void
1956 scsipi_async_event_max_openings(chan, mo)
1957 struct scsipi_channel *chan;
1958 struct scsipi_max_openings *mo;
1959 {
1960 struct scsipi_periph *periph;
1961 int minlun, maxlun;
1962
1963 if (mo->mo_lun == -1) {
1964 /*
1965 * Wildcarded; apply it to all LUNs.
1966 */
1967 minlun = 0;
1968 maxlun = chan->chan_nluns - 1;
1969 } else
1970 minlun = maxlun = mo->mo_lun;
1971
1972 for (; minlun <= maxlun; minlun++) {
1973 periph = scsipi_lookup_periph(chan, mo->mo_target, minlun);
1974 if (periph == NULL)
1975 continue;
1976
1977 if (mo->mo_openings < periph->periph_openings)
1978 periph->periph_openings = mo->mo_openings;
1979 else if (mo->mo_openings > periph->periph_openings &&
1980 (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
1981 periph->periph_openings = mo->mo_openings;
1982 }
1983 }
1984
1985 /*
1986 * scsipi_async_event_xfer_mode:
1987 *
1988 * Update the xfer mode for all periphs sharing the
1989 * specified I_T Nexus.
1990 */
1991 void
1992 scsipi_async_event_xfer_mode(chan, xm)
1993 struct scsipi_channel *chan;
1994 struct scsipi_xfer_mode *xm;
1995 {
1996 struct scsipi_periph *periph;
1997 int lun, announce, mode, period, offset;
1998
1999 for (lun = 0; lun < chan->chan_nluns; lun++) {
2000 periph = scsipi_lookup_periph(chan, xm->xm_target, lun);
2001 if (periph == NULL)
2002 continue;
2003 announce = 0;
2004
2005 /*
2006 * Clamp the xfer mode down to this periph's capabilities.
2007 */
2008 mode = xm->xm_mode & periph->periph_cap;
2009 if (mode & PERIPH_CAP_SYNC) {
2010 period = xm->xm_period;
2011 offset = xm->xm_offset;
2012 } else {
2013 period = 0;
2014 offset = 0;
2015 }
2016
2017 /*
2018 * If we do not have a valid xfer mode yet, or the parameters
2019 * are different, announce them.
2020 */
2021 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0 ||
2022 periph->periph_mode != mode ||
2023 periph->periph_period != period ||
2024 periph->periph_offset != offset)
2025 announce = 1;
2026
2027 periph->periph_mode = mode;
2028 periph->periph_period = period;
2029 periph->periph_offset = offset;
2030 periph->periph_flags |= PERIPH_MODE_VALID;
2031
2032 if (announce)
2033 scsipi_print_xfer_mode(periph);
2034 }
2035 }
2036
2037 /*
2038 * scsipi_set_xfer_mode:
2039 *
2040 * Set the xfer mode for the specified I_T Nexus.
2041 */
2042 void
2043 scsipi_set_xfer_mode(chan, target, immed)
2044 struct scsipi_channel *chan;
2045 int target, immed;
2046 {
2047 struct scsipi_xfer_mode xm;
2048 struct scsipi_periph *itperiph;
2049 int lun, s;
2050
2051 /*
2052 * Go to the minimal xfer mode.
2053 */
2054 xm.xm_target = target;
2055 xm.xm_mode = 0;
2056 xm.xm_period = 0; /* ignored */
2057 xm.xm_offset = 0; /* ignored */
2058
2059 /*
2060 * Find the first LUN we know about on this I_T Nexus.
2061 */
2062 for (lun = 0; lun < chan->chan_nluns; lun++) {
2063 itperiph = scsipi_lookup_periph(chan, target, lun);
2064 if (itperiph != NULL)
2065 break;
2066 }
2067 if (itperiph != NULL)
2068 xm.xm_mode = itperiph->periph_cap;
2069
2070 /*
2071 * Now issue the request to the adapter.
2072 */
2073 s = splbio();
2074 scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm);
2075 splx(s);
2076
2077 /*
2078 * If we want this to happen immediately, issue a dummy command,
2079 * since most adapters can't really negotiate unless they're
2080 * executing a job.
2081 */
2082 if (immed != 0 && itperiph != NULL) {
2083 (void) scsipi_test_unit_ready(itperiph,
2084 XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
2085 XS_CTL_IGNORE_NOT_READY |
2086 XS_CTL_IGNORE_MEDIA_CHANGE);
2087 }
2088 }
2089
2090 /*
2091 * scsipi_channel_reset:
2092 *
2093 * handle scsi bus reset
2094 */
2095 void
2096 scsipi_async_event_channel_reset(chan)
2097 struct scsipi_channel *chan;
2098 {
2099 struct scsipi_xfer *xs, *xs_next;
2100 struct scsipi_periph *periph;
2101 int target, lun;
2102
2103 /*
2104 * Channel has been reset. Also mark as reset pending REQUEST_SENSE
2105 * commands; as the sense is not available any more.
2106 */
2107
2108 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) {
2109 xs_next = TAILQ_NEXT(xs, channel_q);
2110 if (xs->xs_control & XS_CTL_REQSENSE) {
2111 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
2112 xs->error = XS_RESET;
2113 scsipi_done(xs);
2114 }
2115 }
2116 /* Catch xs with pending sense which may not have a REQSENSE xs yet */
2117 for (target = 0; target < chan->chan_ntargets; target++) {
2118 if (target == chan->chan_id)
2119 continue;
2120 for (lun = 0; lun < chan->chan_nluns; lun++) {
2121 periph = chan->chan_periphs[target][lun];
2122 if (periph) {
2123 xs = periph->periph_xscheck;
2124 if (xs)
2125 xs->error = XS_RESET;
2126 }
2127 }
2128 }
2129 }
2130
2131
2132 /*
2133 * scsipi_adapter_addref:
2134 *
2135 * Add a reference to the adapter pointed to by the provided
2136 * link, enabling the adapter if necessary.
2137 */
2138 int
2139 scsipi_adapter_addref(adapt)
2140 struct scsipi_adapter *adapt;
2141 {
2142 int s, error = 0;
2143
2144 s = splbio();
2145 if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) {
2146 error = (*adapt->adapt_enable)(adapt->adapt_dev, 1);
2147 if (error)
2148 adapt->adapt_refcnt--;
2149 }
2150 splx(s);
2151 return (error);
2152 }
2153
2154 /*
2155 * scsipi_adapter_delref:
2156 *
2157 * Delete a reference to the adapter pointed to by the provided
2158 * link, disabling the adapter if possible.
2159 */
2160 void
2161 scsipi_adapter_delref(adapt)
2162 struct scsipi_adapter *adapt;
2163 {
2164 int s;
2165
2166 s = splbio();
2167 if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL)
2168 (void) (*adapt->adapt_enable)(adapt->adapt_dev, 0);
2169 splx(s);
2170 }
2171
2172 struct scsipi_syncparam {
2173 int ss_factor;
2174 int ss_period; /* ns * 10 */
2175 } scsipi_syncparams[] = {
2176 { 0x0a, 250 },
2177 { 0x0b, 303 },
2178 { 0x0c, 500 },
2179 };
2180 const int scsipi_nsyncparams =
2181 sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
2182
2183 int
2184 scsipi_sync_period_to_factor(period)
2185 int period; /* ns * 10 */
2186 {
2187 int i;
2188
2189 for (i = 0; i < scsipi_nsyncparams; i++) {
2190 if (period <= scsipi_syncparams[i].ss_period)
2191 return (scsipi_syncparams[i].ss_factor);
2192 }
2193
2194 return ((period / 10) / 4);
2195 }
2196
2197 int
2198 scsipi_sync_factor_to_period(factor)
2199 int factor;
2200 {
2201 int i;
2202
2203 for (i = 0; i < scsipi_nsyncparams; i++) {
2204 if (factor == scsipi_syncparams[i].ss_factor)
2205 return (scsipi_syncparams[i].ss_period);
2206 }
2207
2208 return ((factor * 4) * 10);
2209 }
2210
2211 int
2212 scsipi_sync_factor_to_freq(factor)
2213 int factor;
2214 {
2215 int i;
2216
2217 for (i = 0; i < scsipi_nsyncparams; i++) {
2218 if (factor == scsipi_syncparams[i].ss_factor)
2219 return (10000000 / scsipi_syncparams[i].ss_period);
2220 }
2221
2222 return (10000000 / ((factor * 4) * 10));
2223 }
2224
2225 #ifdef SCSIPI_DEBUG
2226 /*
2227 * Given a scsipi_xfer, dump the request, in all it's glory
2228 */
2229 void
2230 show_scsipi_xs(xs)
2231 struct scsipi_xfer *xs;
2232 {
2233
2234 printf("xs(%p): ", xs);
2235 printf("xs_control(0x%08x)", xs->xs_control);
2236 printf("xs_status(0x%08x)", xs->xs_status);
2237 printf("periph(%p)", xs->xs_periph);
2238 printf("retr(0x%x)", xs->xs_retries);
2239 printf("timo(0x%x)", xs->timeout);
2240 printf("cmd(%p)", xs->cmd);
2241 printf("len(0x%x)", xs->cmdlen);
2242 printf("data(%p)", xs->data);
2243 printf("len(0x%x)", xs->datalen);
2244 printf("res(0x%x)", xs->resid);
2245 printf("err(0x%x)", xs->error);
2246 printf("bp(%p)", xs->bp);
2247 show_scsipi_cmd(xs);
2248 }
2249
2250 void
2251 show_scsipi_cmd(xs)
2252 struct scsipi_xfer *xs;
2253 {
2254 u_char *b = (u_char *) xs->cmd;
2255 int i = 0;
2256
2257 scsipi_printaddr(xs->xs_periph);
2258 printf(" command: ");
2259
2260 if ((xs->xs_control & XS_CTL_RESET) == 0) {
2261 while (i < xs->cmdlen) {
2262 if (i)
2263 printf(",");
2264 printf("0x%x", b[i++]);
2265 }
2266 printf("-[%d bytes]\n", xs->datalen);
2267 if (xs->datalen)
2268 show_mem(xs->data, min(64, xs->datalen));
2269 } else
2270 printf("-RESET-\n");
2271 }
2272
2273 void
2274 show_mem(address, num)
2275 u_char *address;
2276 int num;
2277 {
2278 int x;
2279
2280 printf("------------------------------");
2281 for (x = 0; x < num; x++) {
2282 if ((x % 16) == 0)
2283 printf("\n%03d: ", x);
2284 printf("%02x ", *address++);
2285 }
2286 printf("\n------------------------------\n");
2287 }
2288 #endif /* SCSIPI_DEBUG */
2289