scsipi_base.c revision 1.26.2.13 1 /* $NetBSD: scsipi_base.c,v 1.26.2.13 2001/04/03 15:27:18 bouyer Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include "opt_scsi.h"
41
42 #include <sys/types.h>
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/buf.h>
47 #include <sys/uio.h>
48 #include <sys/malloc.h>
49 #include <sys/pool.h>
50 #include <sys/errno.h>
51 #include <sys/device.h>
52 #include <sys/proc.h>
53 #include <sys/kthread.h>
54
55 #include <dev/scsipi/scsipi_all.h>
56 #include <dev/scsipi/scsipi_disk.h>
57 #include <dev/scsipi/scsipiconf.h>
58 #include <dev/scsipi/scsipi_base.h>
59
60 #include <dev/scsipi/scsi_all.h>
61 #include <dev/scsipi/scsi_message.h>
62
63 int scsipi_complete __P((struct scsipi_xfer *));
64 void scsipi_request_sense __P((struct scsipi_xfer *));
65 int scsipi_enqueue __P((struct scsipi_xfer *));
66 void scsipi_run_queue __P((struct scsipi_channel *chan));
67
68 void scsipi_completion_thread __P((void *));
69
70 void scsipi_get_tag __P((struct scsipi_xfer *));
71 void scsipi_put_tag __P((struct scsipi_xfer *));
72
73 int scsipi_get_resource __P((struct scsipi_channel *));
74 void scsipi_put_resource __P((struct scsipi_channel *));
75 __inline int scsipi_grow_resources __P((struct scsipi_channel *));
76
77 void scsipi_async_event_max_openings __P((struct scsipi_channel *,
78 struct scsipi_max_openings *));
79 void scsipi_async_event_xfer_mode __P((struct scsipi_channel *,
80 struct scsipi_xfer_mode *));
81 void scsipi_async_event_channel_reset __P((struct scsipi_channel *));
82
83 struct pool scsipi_xfer_pool;
84
85 /*
86 * scsipi_init:
87 *
88 * Called when a scsibus or atapibus is attached to the system
89 * to initialize shared data structures.
90 */
91 void
92 scsipi_init()
93 {
94 static int scsipi_init_done;
95
96 if (scsipi_init_done)
97 return;
98 scsipi_init_done = 1;
99
100 /* Initialize the scsipi_xfer pool. */
101 pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
102 0, 0, "scxspl", 0, NULL, NULL, M_DEVBUF);
103 }
104
105 /*
106 * scsipi_channel_init:
107 *
108 * Initialize a scsipi_channel when it is attached.
109 */
110 void
111 scsipi_channel_init(chan)
112 struct scsipi_channel *chan;
113 {
114 size_t nbytes;
115 int i;
116
117 /* Initialize shared data. */
118 scsipi_init();
119
120 /* Initialize the queues. */
121 TAILQ_INIT(&chan->chan_queue);
122 TAILQ_INIT(&chan->chan_complete);
123
124 nbytes = chan->chan_ntargets * sizeof(struct scsipi_periph **);
125 chan->chan_periphs = malloc(nbytes, M_DEVBUF, M_WAITOK);
126
127 nbytes = chan->chan_nluns * sizeof(struct scsipi_periph *);
128 for (i = 0; i < chan->chan_ntargets; i++) {
129 chan->chan_periphs[i] = malloc(nbytes, M_DEVBUF, M_WAITOK);
130 memset(chan->chan_periphs[i], 0, nbytes);
131 }
132
133 /*
134 * Create the asynchronous completion thread.
135 */
136 kthread_create(scsipi_create_completion_thread, chan);
137 }
138
139 /*
140 * scsipi_channel_shutdown:
141 *
142 * Shutdown a scsipi_channel.
143 */
144 void
145 scsipi_channel_shutdown(chan)
146 struct scsipi_channel *chan;
147 {
148
149 /*
150 * Shut down the completion thread.
151 */
152 chan->chan_flags |= SCSIPI_CHAN_SHUTDOWN;
153 wakeup(&chan->chan_complete);
154
155 /*
156 * Now wait for the thread to exit.
157 */
158 while (chan->chan_thread != NULL)
159 (void) tsleep(&chan->chan_thread, PRIBIO, "scshut", 0);
160 }
161
162 /*
163 * scsipi_insert_periph:
164 *
165 * Insert a periph into the channel.
166 */
167 void
168 scsipi_insert_periph(chan, periph)
169 struct scsipi_channel *chan;
170 struct scsipi_periph *periph;
171 {
172 int s;
173
174 s = splbio();
175 chan->chan_periphs[periph->periph_target][periph->periph_lun] = periph;
176 splx(s);
177 }
178
179 /*
180 * scsipi_remove_periph:
181 *
182 * Remove a periph from the channel.
183 */
184 void
185 scsipi_remove_periph(chan, periph)
186 struct scsipi_channel *chan;
187 struct scsipi_periph *periph;
188 {
189 int s;
190
191 s = splbio();
192 chan->chan_periphs[periph->periph_target][periph->periph_lun] = NULL;
193 splx(s);
194 }
195
196 /*
197 * scsipi_lookup_periph:
198 *
199 * Lookup a periph on the specified channel.
200 */
201 struct scsipi_periph *
202 scsipi_lookup_periph(chan, target, lun)
203 struct scsipi_channel *chan;
204 int target, lun;
205 {
206 struct scsipi_periph *periph;
207 int s;
208
209 if (target >= chan->chan_ntargets ||
210 lun >= chan->chan_nluns)
211 return (NULL);
212
213 s = splbio();
214 periph = chan->chan_periphs[target][lun];
215 splx(s);
216
217 return (periph);
218 }
219
220 /*
221 * scsipi_get_resource:
222 *
223 * Allocate a single xfer `resource' from the channel.
224 *
225 * NOTE: Must be called at splbio().
226 */
227 int
228 scsipi_get_resource(chan)
229 struct scsipi_channel *chan;
230 {
231 struct scsipi_adapter *adapt = chan->chan_adapter;
232
233 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
234 if (chan->chan_openings > 0) {
235 chan->chan_openings--;
236 return (1);
237 }
238 return (0);
239 }
240
241 if (adapt->adapt_openings > 0) {
242 adapt->adapt_openings--;
243 return (1);
244 }
245 return (0);
246 }
247
248 /*
249 * scsipi_grow_resources:
250 *
251 * Attempt to grow resources for a channel. If this succeeds,
252 * we allocate one for our caller.
253 *
254 * NOTE: Must be called at splbio().
255 */
256 __inline int
257 scsipi_grow_resources(chan)
258 struct scsipi_channel *chan;
259 {
260
261 if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
262 scsipi_adapter_request(chan, ADAPTER_REQ_GROW_RESOURCES, NULL);
263 return (scsipi_get_resource(chan));
264 }
265
266 return (0);
267 }
268
269 /*
270 * scsipi_put_resource:
271 *
272 * Free a single xfer `resource' to the channel.
273 *
274 * NOTE: Must be called at splbio().
275 */
276 void
277 scsipi_put_resource(chan)
278 struct scsipi_channel *chan;
279 {
280 struct scsipi_adapter *adapt = chan->chan_adapter;
281
282 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
283 chan->chan_openings++;
284 else
285 adapt->adapt_openings++;
286 }
287
288 /*
289 * scsipi_get_tag:
290 *
291 * Get a tag ID for the specified xfer.
292 *
293 * NOTE: Must be called at splbio().
294 */
295 void
296 scsipi_get_tag(xs)
297 struct scsipi_xfer *xs;
298 {
299 struct scsipi_periph *periph = xs->xs_periph;
300 int word, bit, tag;
301
302 for (word = 0; word < PERIPH_NTAGWORDS; word++) {
303 bit = ffs(periph->periph_freetags[word]);
304 if (bit != 0)
305 break;
306 }
307 #ifdef DIAGNOSTIC
308 if (word == PERIPH_NTAGWORDS) {
309 scsipi_printaddr(periph);
310 printf("no free tags\n");
311 panic("scsipi_get_tag");
312 }
313 #endif
314
315 bit -= 1;
316 periph->periph_freetags[word] &= ~(1 << bit);
317 tag = (word << 5) | bit;
318
319 /* XXX Should eventually disallow this completely. */
320 if (tag >= periph->periph_openings) {
321 scsipi_printaddr(periph);
322 printf("WARNING: tag %d greater than available openings %d\n",
323 tag, periph->periph_openings);
324 }
325
326 xs->xs_tag_id = tag;
327 }
328
329 /*
330 * scsipi_put_tag:
331 *
332 * Put the tag ID for the specified xfer back into the pool.
333 *
334 * NOTE: Must be called at splbio().
335 */
336 void
337 scsipi_put_tag(xs)
338 struct scsipi_xfer *xs;
339 {
340 struct scsipi_periph *periph = xs->xs_periph;
341 int word, bit;
342
343 word = xs->xs_tag_id >> 5;
344 bit = xs->xs_tag_id & 0x1f;
345
346 periph->periph_freetags[word] |= (1 << bit);
347 }
348
349 /*
350 * scsipi_get_xs:
351 *
352 * Allocate an xfer descriptor and associate it with the
353 * specified peripherial. If the peripherial has no more
354 * available command openings, we either block waiting for
355 * one to become available, or fail.
356 */
357 struct scsipi_xfer *
358 scsipi_get_xs(periph, flags)
359 struct scsipi_periph *periph;
360 int flags;
361 {
362 struct scsipi_xfer *xs;
363 int s;
364
365 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n"));
366
367 /*
368 * If we're cold, make sure we poll.
369 */
370 if (cold)
371 flags |= XS_CTL_NOSLEEP | XS_CTL_POLL;
372
373 #ifdef DIAGNOSTIC
374 /*
375 * URGENT commands can never be ASYNC.
376 */
377 if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
378 (XS_CTL_URGENT|XS_CTL_ASYNC)) {
379 scsipi_printaddr(periph);
380 printf("URGENT and ASYNC\n");
381 panic("scsipi_get_xs");
382 }
383 #endif
384
385 s = splbio();
386 /*
387 * Wait for a command opening to become available. Rules:
388 *
389 * - All xfers must wait for an available opening.
390 * Exception: URGENT xfers can proceed when
391 * active == openings, because we use the opening
392 * of the command we're recovering for.
393 * - if the periph has sense pending, only URGENT & REQSENSE
394 * xfers may proceed.
395 *
396 * - If the periph is recovering, only URGENT xfers may
397 * proceed.
398 *
399 * - If the periph is currently executing a recovery
400 * command, URGENT commands must block, because only
401 * one recovery command can execute at a time.
402 */
403 for (;;) {
404 if (flags & XS_CTL_URGENT) {
405 if (periph->periph_active > periph->periph_openings)
406 goto wait_for_opening;
407 if (periph->periph_flags & PERIPH_SENSE) {
408 if ((flags & XS_CTL_REQSENSE) == 0)
409 goto wait_for_opening;
410 } else {
411 if ((periph->periph_flags &
412 PERIPH_RECOVERY_ACTIVE) != 0)
413 goto wait_for_opening;
414 periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
415 }
416 break;
417 }
418 if (periph->periph_active >= periph->periph_openings ||
419 (periph->periph_flags & PERIPH_RECOVERING) != 0)
420 goto wait_for_opening;
421 periph->periph_active++;
422 break;
423
424 wait_for_opening:
425 if (flags & XS_CTL_NOSLEEP) {
426 splx(s);
427 return (NULL);
428 }
429 SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n"));
430 periph->periph_flags |= PERIPH_WAITING;
431 (void) tsleep(periph, PRIBIO, "getxs", 0);
432 }
433 SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n"));
434 xs = pool_get(&scsipi_xfer_pool,
435 ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
436 if (xs == NULL) {
437 if (flags & XS_CTL_URGENT) {
438 if ((flags & XS_CTL_REQSENSE) == 0)
439 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
440 } else
441 periph->periph_active--;
442 scsipi_printaddr(periph);
443 printf("unable to allocate %sscsipi_xfer\n",
444 (flags & XS_CTL_URGENT) ? "URGENT " : "");
445 }
446 splx(s);
447
448 SC_DEBUG(periph, SCSIPI_DB3, ("returning\n"));
449
450 if (xs != NULL) {
451 callout_init(&xs->xs_callout);
452 memset(xs, 0, sizeof(*xs));
453 xs->xs_periph = periph;
454 xs->xs_control = flags;
455 xs->xs_status = 0;
456 s = splbio();
457 TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
458 splx(s);
459 }
460 return (xs);
461 }
462
463 /*
464 * scsipi_put_xs:
465 *
466 * Release an xfer descriptor, decreasing the outstanding command
467 * count for the peripherial. If there is a thread waiting for
468 * an opening, wake it up. If not, kick any queued I/O the
469 * peripherial may have.
470 *
471 * NOTE: Must be called at splbio().
472 */
473 void
474 scsipi_put_xs(xs)
475 struct scsipi_xfer *xs;
476 {
477 struct scsipi_periph *periph = xs->xs_periph;
478 int flags = xs->xs_control;
479
480 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n"));
481
482 TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
483 pool_put(&scsipi_xfer_pool, xs);
484
485 #ifdef DIAGNOSTIC
486 if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
487 periph->periph_active == 0) {
488 scsipi_printaddr(periph);
489 printf("recovery without a command to recovery for\n");
490 panic("scsipi_put_xs");
491 }
492 #endif
493
494 if (flags & XS_CTL_URGENT) {
495 if ((flags & XS_CTL_REQSENSE) == 0)
496 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
497 } else
498 periph->periph_active--;
499 if (periph->periph_active == 0 &&
500 (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
501 periph->periph_flags &= ~PERIPH_WAITDRAIN;
502 wakeup(&periph->periph_active);
503 }
504
505 if (periph->periph_flags & PERIPH_WAITING) {
506 periph->periph_flags &= ~PERIPH_WAITING;
507 wakeup(periph);
508 } else {
509 if (periph->periph_switch->psw_start != NULL) {
510 SC_DEBUG(periph, SCSIPI_DB2,
511 ("calling private start()\n"));
512 (*periph->periph_switch->psw_start)(periph);
513 }
514 }
515 }
516
517 /*
518 * scsipi_channel_freeze:
519 *
520 * Freeze a channel's xfer queue.
521 */
522 void
523 scsipi_channel_freeze(chan, count)
524 struct scsipi_channel *chan;
525 int count;
526 {
527 int s;
528
529 s = splbio();
530 chan->chan_qfreeze += count;
531 splx(s);
532 }
533
534 /*
535 * scsipi_channel_thaw:
536 *
537 * Thaw a channel's xfer queue.
538 */
539 void
540 scsipi_channel_thaw(chan, count)
541 struct scsipi_channel *chan;
542 int count;
543 {
544 int s;
545
546 s = splbio();
547 chan->chan_qfreeze -= count;
548 splx(s);
549 }
550
551 /*
552 * scsipi_channel_timed_thaw:
553 *
554 * Thaw a channel after some time has expired.
555 */
556 void
557 scsipi_channel_timed_thaw(arg)
558 void *arg;
559 {
560 struct scsipi_channel *chan = arg;
561
562 scsipi_channel_thaw(chan, 1);
563
564 /*
565 * Kick the channel's queue here. Note, we're running in
566 * interrupt context (softclock), so the adapter driver
567 * had better not sleep.
568 */
569 scsipi_run_queue(chan);
570 }
571
572 /*
573 * scsipi_periph_freeze:
574 *
575 * Freeze a device's xfer queue.
576 */
577 void
578 scsipi_periph_freeze(periph, count)
579 struct scsipi_periph *periph;
580 int count;
581 {
582 int s;
583
584 s = splbio();
585 periph->periph_qfreeze += count;
586 splx(s);
587 }
588
589 /*
590 * scsipi_periph_thaw:
591 *
592 * Thaw a device's xfer queue.
593 */
594 void
595 scsipi_periph_thaw(periph, count)
596 struct scsipi_periph *periph;
597 int count;
598 {
599 int s;
600
601 s = splbio();
602 periph->periph_qfreeze -= count;
603 if (periph->periph_qfreeze == 0 &&
604 (periph->periph_flags & PERIPH_WAITING) != 0)
605 wakeup(periph);
606 splx(s);
607 }
608
609 /*
610 * scsipi_periph_timed_thaw:
611 *
612 * Thaw a device after some time has expired.
613 */
614 void
615 scsipi_periph_timed_thaw(arg)
616 void *arg;
617 {
618 struct scsipi_periph *periph = arg;
619
620 callout_stop(&periph->periph_callout);
621 scsipi_periph_thaw(periph, 1);
622
623 /*
624 * Kick the channel's queue here. Note, we're running in
625 * interrupt context (softclock), so the adapter driver
626 * had better not sleep.
627 */
628 scsipi_run_queue(periph->periph_channel);
629 }
630
631 /*
632 * scsipi_wait_drain:
633 *
634 * Wait for a periph's pending xfers to drain.
635 */
636 void
637 scsipi_wait_drain(periph)
638 struct scsipi_periph *periph;
639 {
640 int s;
641
642 s = splbio();
643 while (periph->periph_active != 0) {
644 periph->periph_flags |= PERIPH_WAITDRAIN;
645 (void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0);
646 }
647 splx(s);
648 }
649
650 /*
651 * scsipi_kill_pending:
652 *
653 * Kill off all pending xfers for a periph.
654 *
655 * NOTE: Must be called at splbio().
656 */
657 void
658 scsipi_kill_pending(periph)
659 struct scsipi_periph *periph;
660 {
661
662 (*periph->periph_channel->chan_bustype->bustype_kill_pending)(periph);
663 #ifdef DIAGNOSTIC
664 if (TAILQ_FIRST(&periph->periph_xferq) != NULL)
665 panic("scsipi_kill_pending");
666 #endif
667 scsipi_wait_drain(periph);
668 }
669
670 /*
671 * scsipi_interpret_sense:
672 *
673 * Look at the returned sense and act on the error, determining
674 * the unix error number to pass back. (0 = report no error)
675 *
676 * NOTE: If we return ERESTART, we are expected to haved
677 * thawed the device!
678 *
679 * THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
680 */
681 int
682 scsipi_interpret_sense(xs)
683 struct scsipi_xfer *xs;
684 {
685 struct scsipi_sense_data *sense;
686 struct scsipi_periph *periph = xs->xs_periph;
687 u_int8_t key;
688 u_int32_t info;
689 int error;
690 #ifndef SCSIVERBOSE
691 static char *error_mes[] = {
692 "soft error (corrected)",
693 "not ready", "medium error",
694 "non-media hardware failure", "illegal request",
695 "unit attention", "readonly device",
696 "no data found", "vendor unique",
697 "copy aborted", "command aborted",
698 "search returned equal", "volume overflow",
699 "verify miscompare", "unknown error key"
700 };
701 #endif
702
703 sense = &xs->sense.scsi_sense;
704 #ifdef SCSIPI_DEBUG
705 if (periph->periph_flags & SCSIPI_DB1) {
706 int count;
707 scsipi_printaddr(periph);
708 printf(" sense debug information:\n");
709 printf("\tcode 0x%x valid 0x%x\n",
710 sense->error_code & SSD_ERRCODE,
711 sense->error_code & SSD_ERRCODE_VALID ? 1 : 0);
712 printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
713 sense->segment,
714 sense->flags & SSD_KEY,
715 sense->flags & SSD_ILI ? 1 : 0,
716 sense->flags & SSD_EOM ? 1 : 0,
717 sense->flags & SSD_FILEMARK ? 1 : 0);
718 printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d "
719 "extra bytes\n",
720 sense->info[0],
721 sense->info[1],
722 sense->info[2],
723 sense->info[3],
724 sense->extra_len);
725 printf("\textra: ");
726 for (count = 0; count < ADD_BYTES_LIM(sense); count++)
727 printf("0x%x ", sense->cmd_spec_info[count]);
728 printf("\n");
729 }
730 #endif
731
732 /*
733 * If the periph has it's own error handler, call it first.
734 * If it returns a legit error value, return that, otherwise
735 * it wants us to continue with normal error processing.
736 */
737 if (periph->periph_switch->psw_error != NULL) {
738 SC_DEBUG(periph, SCSIPI_DB2,
739 ("calling private err_handler()\n"));
740 error = (*periph->periph_switch->psw_error)(xs);
741 if (error != EJUSTRETURN)
742 return (error);
743 }
744 /* otherwise use the default */
745 switch (sense->error_code & SSD_ERRCODE) {
746 /*
747 * If it's code 70, use the extended stuff and
748 * interpret the key
749 */
750 case 0x71: /* delayed error */
751 scsipi_printaddr(periph);
752 key = sense->flags & SSD_KEY;
753 printf(" DEFERRED ERROR, key = 0x%x\n", key);
754 /* FALLTHROUGH */
755 case 0x70:
756 if ((sense->error_code & SSD_ERRCODE_VALID) != 0)
757 info = _4btol(sense->info);
758 else
759 info = 0;
760 key = sense->flags & SSD_KEY;
761
762 switch (key) {
763 case SKEY_NO_SENSE:
764 case SKEY_RECOVERED_ERROR:
765 if (xs->resid == xs->datalen && xs->datalen) {
766 /*
767 * Why is this here?
768 */
769 xs->resid = 0; /* not short read */
770 }
771 case SKEY_EQUAL:
772 error = 0;
773 break;
774 case SKEY_NOT_READY:
775 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
776 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
777 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
778 return (0);
779 if (sense->add_sense_code == 0x3A &&
780 sense->add_sense_code_qual == 0x00)
781 error = ENODEV; /* Medium not present */
782 else
783 error = EIO;
784 if ((xs->xs_control & XS_CTL_SILENT) != 0)
785 return (error);
786 break;
787 case SKEY_ILLEGAL_REQUEST:
788 if ((xs->xs_control &
789 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
790 return (0);
791 /*
792 * Handle the case where a device reports
793 * Logical Unit Not Supported during discovery.
794 */
795 if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
796 sense->add_sense_code == 0x25 &&
797 sense->add_sense_code_qual == 0x00)
798 return (EINVAL);
799 if ((xs->xs_control & XS_CTL_SILENT) != 0)
800 return (EIO);
801 error = EINVAL;
802 break;
803 case SKEY_UNIT_ATTENTION:
804 if (sense->add_sense_code == 0x29 &&
805 sense->add_sense_code_qual == 0x00) {
806 /* device or bus reset */
807 return (ERESTART);
808 }
809 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
810 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
811 if ((xs->xs_control &
812 XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
813 /* XXX Should reupload any transient state. */
814 (periph->periph_flags &
815 PERIPH_REMOVABLE) == 0) {
816 return (ERESTART);
817 }
818 if ((xs->xs_control & XS_CTL_SILENT) != 0)
819 return (EIO);
820 error = EIO;
821 break;
822 case SKEY_WRITE_PROTECT:
823 error = EROFS;
824 break;
825 case SKEY_BLANK_CHECK:
826 error = 0;
827 break;
828 case SKEY_ABORTED_COMMAND:
829 error = ERESTART;
830 break;
831 case SKEY_VOLUME_OVERFLOW:
832 error = ENOSPC;
833 break;
834 default:
835 error = EIO;
836 break;
837 }
838
839 #ifdef SCSIVERBOSE
840 if (key && (xs->xs_control & XS_CTL_SILENT) == 0)
841 scsipi_print_sense(xs, 0);
842 #else
843 if (key) {
844 scsipi_printaddr(periph);
845 printf("%s", error_mes[key - 1]);
846 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
847 switch (key) {
848 case SKEY_NOT_READY:
849 case SKEY_ILLEGAL_REQUEST:
850 case SKEY_UNIT_ATTENTION:
851 case SKEY_WRITE_PROTECT:
852 break;
853 case SKEY_BLANK_CHECK:
854 printf(", requested size: %d (decimal)",
855 info);
856 break;
857 case SKEY_ABORTED_COMMAND:
858 if (xs->xs_retries)
859 printf(", retrying");
860 printf(", cmd 0x%x, info 0x%x",
861 xs->cmd->opcode, info);
862 break;
863 default:
864 printf(", info = %d (decimal)", info);
865 }
866 }
867 if (sense->extra_len != 0) {
868 int n;
869 printf(", data =");
870 for (n = 0; n < sense->extra_len; n++)
871 printf(" %02x",
872 sense->cmd_spec_info[n]);
873 }
874 printf("\n");
875 }
876 #endif
877 return (error);
878
879 /*
880 * Not code 70, just report it
881 */
882 default:
883 #if defined(SCSIDEBUG) || defined(DEBUG)
884 {
885 static char *uc = "undecodable sense error";
886 int i;
887 u_int8_t *cptr = (u_int8_t *) sense;
888 scsipi_printaddr(periph);
889 if (xs->cmd == &xs->cmdstore) {
890 printf("%s for opcode 0x%x, data=",
891 uc, xs->cmdstore.opcode);
892 } else {
893 printf("%s, data=", uc);
894 }
895 for (i = 0; i < sizeof (sense); i++)
896 printf(" 0x%02x", *(cptr++) & 0xff);
897 printf("\n");
898 }
899 #else
900
901 scsipi_printaddr(periph);
902 printf("Sense Error Code 0x%x",
903 sense->error_code & SSD_ERRCODE);
904 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
905 struct scsipi_sense_data_unextended *usense =
906 (struct scsipi_sense_data_unextended *)sense;
907 printf(" at block no. %d (decimal)",
908 _3btol(usense->block));
909 }
910 printf("\n");
911 #endif
912 return (EIO);
913 }
914 }
915
916 /*
917 * scsipi_size:
918 *
919 * Find out from the device what its capacity is.
920 */
921 u_long
922 scsipi_size(periph, flags)
923 struct scsipi_periph *periph;
924 int flags;
925 {
926 struct scsipi_read_cap_data rdcap;
927 struct scsipi_read_capacity scsipi_cmd;
928
929 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
930 scsipi_cmd.opcode = READ_CAPACITY;
931
932 /*
933 * If the command works, interpret the result as a 4 byte
934 * number of blocks
935 */
936 if (scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
937 sizeof(scsipi_cmd), (u_char *)&rdcap, sizeof(rdcap),
938 SCSIPIRETRIES, 20000, NULL,
939 flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK) != 0) {
940 scsipi_printaddr(periph);
941 printf("could not get size\n");
942 return (0);
943 }
944
945 return (_4btol(rdcap.addr) + 1);
946 }
947
948 /*
949 * scsipi_test_unit_ready:
950 *
951 * Issue a `test unit ready' request.
952 */
953 int
954 scsipi_test_unit_ready(periph, flags)
955 struct scsipi_periph *periph;
956 int flags;
957 {
958 struct scsipi_test_unit_ready scsipi_cmd;
959
960 /* some ATAPI drives don't support TEST_UNIT_READY. Sigh */
961 if (periph->periph_quirks & PQUIRK_NOTUR)
962 return (0);
963
964 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
965 scsipi_cmd.opcode = TEST_UNIT_READY;
966
967 return (scsipi_command(periph,
968 (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd),
969 0, 0, SCSIPIRETRIES, 10000, NULL, flags));
970 }
971
972 /*
973 * scsipi_inquire:
974 *
975 * Ask the device about itself.
976 */
977 int
978 scsipi_inquire(periph, inqbuf, flags)
979 struct scsipi_periph *periph;
980 struct scsipi_inquiry_data *inqbuf;
981 int flags;
982 {
983 struct scsipi_inquiry scsipi_cmd;
984
985 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
986 scsipi_cmd.opcode = INQUIRY;
987 scsipi_cmd.length = sizeof(struct scsipi_inquiry_data);
988
989 return (scsipi_command(periph,
990 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
991 (u_char *) inqbuf, sizeof(struct scsipi_inquiry_data),
992 SCSIPIRETRIES, 10000, NULL, XS_CTL_DATA_IN | flags));
993 }
994
995 /*
996 * scsipi_prevent:
997 *
998 * Prevent or allow the user to remove the media
999 */
1000 int
1001 scsipi_prevent(periph, type, flags)
1002 struct scsipi_periph *periph;
1003 int type, flags;
1004 {
1005 struct scsipi_prevent scsipi_cmd;
1006
1007 if (periph->periph_quirks & PQUIRK_NODOORLOCK)
1008 return (0);
1009
1010 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
1011 scsipi_cmd.opcode = PREVENT_ALLOW;
1012 scsipi_cmd.how = type;
1013
1014 return (scsipi_command(periph,
1015 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1016 0, 0, SCSIPIRETRIES, 5000, NULL, flags));
1017 }
1018
1019 /*
1020 * scsipi_start:
1021 *
1022 * Send a START UNIT.
1023 */
1024 int
1025 scsipi_start(periph, type, flags)
1026 struct scsipi_periph *periph;
1027 int type, flags;
1028 {
1029 struct scsipi_start_stop scsipi_cmd;
1030
1031 if (periph->periph_quirks & PQUIRK_NOSTARTUNIT)
1032 return 0;
1033
1034 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
1035 scsipi_cmd.opcode = START_STOP;
1036 scsipi_cmd.byte2 = 0x00;
1037 scsipi_cmd.how = type;
1038
1039 return (scsipi_command(periph,
1040 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1041 0, 0, SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000,
1042 NULL, flags));
1043 }
1044
1045 /*
1046 * scsipi_done:
1047 *
1048 * This routine is called by an adapter's interrupt handler when
1049 * an xfer is completed.
1050 */
1051 void
1052 scsipi_done(xs)
1053 struct scsipi_xfer *xs;
1054 {
1055 struct scsipi_periph *periph = xs->xs_periph;
1056 struct scsipi_channel *chan = periph->periph_channel;
1057 int s, freezecnt;
1058
1059 SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n"));
1060 #ifdef SCSIPI_DEBUG
1061 if (periph->periph_dbflags & SCSIPI_DB1)
1062 show_scsipi_cmd(xs);
1063 #endif
1064
1065 s = splbio();
1066 /*
1067 * The resource this command was using is now free.
1068 */
1069 scsipi_put_resource(chan);
1070
1071 /*
1072 * If the command was tagged, free the tag.
1073 */
1074 if (XS_CTL_TAGTYPE(xs) != 0)
1075 scsipi_put_tag(xs);
1076 else
1077 periph->periph_flags &= ~PERIPH_UNTAG;
1078
1079 /* Mark the command as `done'. */
1080 xs->xs_status |= XS_STS_DONE;
1081
1082 #ifdef DIAGNOSTIC
1083 if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
1084 (XS_CTL_ASYNC|XS_CTL_POLL))
1085 panic("scsipi_done: ASYNC and POLL");
1086 #endif
1087
1088 /*
1089 * If the xfer had an error of any sort, freeze the
1090 * periph's queue. Freeze it again if we were requested
1091 * to do so in the xfer.
1092 */
1093 freezecnt = 0;
1094 if (xs->error != XS_NOERROR)
1095 freezecnt++;
1096 if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
1097 freezecnt++;
1098 if (freezecnt != 0)
1099 scsipi_periph_freeze(periph, freezecnt);
1100
1101 /*
1102 * record the xfer with a pending sense, in case a SCSI reset is
1103 * received before the thread is waked up.
1104 */
1105 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1106 periph->periph_flags |= PERIPH_SENSE;
1107 periph->periph_xscheck = xs;
1108 }
1109
1110 /*
1111 * If this was an xfer that was not to complete asynchrnously,
1112 * let the requesting thread perform error checking/handling
1113 * in its context.
1114 */
1115 if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
1116 splx(s);
1117 /*
1118 * If it's a polling job, just return, to unwind the
1119 * call graph. We don't need to restart the queue,
1120 * because pollings jobs are treated specially, and
1121 * are really only used during crash dumps anyway
1122 * (XXX or during boot-time autconfiguration of
1123 * ATAPI devices).
1124 */
1125 if (xs->xs_control & XS_CTL_POLL)
1126 return;
1127 wakeup(xs);
1128 goto out;
1129 }
1130
1131 /*
1132 * Catch the extremely common case of I/O completing
1133 * without error; no use in taking a context switch
1134 * if we can handle it in interrupt context.
1135 */
1136 if (xs->error == XS_NOERROR) {
1137 splx(s);
1138 (void) scsipi_complete(xs);
1139 goto out;
1140 }
1141
1142 /*
1143 * There is an error on this xfer. Put it on the channel's
1144 * completion queue, and wake up the completion thread.
1145 */
1146 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
1147 splx(s);
1148 wakeup(&chan->chan_complete);
1149
1150 out:
1151 /*
1152 * If there are more xfers on the channel's queue, attempt to
1153 * run them.
1154 */
1155 scsipi_run_queue(chan);
1156 }
1157
1158 /*
1159 * scsipi_complete:
1160 *
1161 * Completion of a scsipi_xfer. This is the guts of scsipi_done().
1162 *
1163 * NOTE: This routine MUST be called with valid thread context
1164 * except for the case where the following two conditions are
1165 * true:
1166 *
1167 * xs->error == XS_NOERROR
1168 * XS_CTL_ASYNC is set in xs->xs_control
1169 *
1170 * The semantics of this routine can be tricky, so here is an
1171 * explanation:
1172 *
1173 * 0 Xfer completed successfully.
1174 *
1175 * ERESTART Xfer had an error, but was restarted.
1176 *
1177 * anything else Xfer had an error, return value is Unix
1178 * errno.
1179 *
1180 * If the return value is anything but ERESTART:
1181 *
1182 * - If XS_CTL_ASYNC is set, `xs' has been freed back to
1183 * the pool.
1184 * - If there is a buf associated with the xfer,
1185 * it has been biodone()'d.
1186 */
1187 int
1188 scsipi_complete(xs)
1189 struct scsipi_xfer *xs;
1190 {
1191 struct scsipi_periph *periph = xs->xs_periph;
1192 struct scsipi_channel *chan = periph->periph_channel;
1193 struct buf *bp;
1194 int error, s;
1195
1196 #ifdef DIAGNOSTIC
1197 if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
1198 panic("scsipi_complete: XS_CTL_ASYNC but no buf");
1199 #endif
1200 /*
1201 * If command terminated with a CHECK CONDITION, we need to issue a
1202 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed
1203 * we'll have the real status.
1204 * Must be processed at splbio() to avoid missing a SCSI bus reset
1205 * for this command.
1206 */
1207 s = splbio();
1208 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1209 /* request sense for a request sense ? */
1210 if (xs->xs_control & XS_CTL_REQSENSE) {
1211 scsipi_printaddr(periph);
1212 /* XXX maybe we should reset the device ? */
1213 /* we've been frozen because xs->error != XS_NOERROR */
1214 scsipi_periph_thaw(periph, 1);
1215 splx(s);
1216 return EINVAL;
1217 }
1218 scsipi_request_sense(xs);
1219 }
1220 splx(s);
1221 /*
1222 * If it's a user level request, bypass all usual completion
1223 * processing, let the user work it out..
1224 */
1225 if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
1226 SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n"));
1227 if (xs->error != XS_NOERROR)
1228 scsipi_periph_thaw(periph, 1);
1229 scsipi_user_done(xs);
1230 SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n "));
1231 return 0;
1232 }
1233
1234
1235 switch (xs->error) {
1236 case XS_NOERROR:
1237 error = 0;
1238 break;
1239
1240 case XS_SENSE:
1241 case XS_SHORTSENSE:
1242 error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
1243 break;
1244
1245 case XS_RESOURCE_SHORTAGE:
1246 /*
1247 * XXX Should freeze channel's queue.
1248 */
1249 scsipi_printaddr(periph);
1250 printf("adapter resource shortage\n");
1251 /* FALLTHROUGH */
1252
1253 case XS_BUSY:
1254 if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
1255 struct scsipi_max_openings mo;
1256
1257 /*
1258 * We set the openings to active - 1, assuming that
1259 * the command that got us here is the first one that
1260 * can't fit into the device's queue. If that's not
1261 * the case, I guess we'll find out soon enough.
1262 */
1263 mo.mo_target = periph->periph_target;
1264 mo.mo_lun = periph->periph_lun;
1265 mo.mo_openings = periph->periph_active - 1;
1266 #ifdef DIAGNOSTIC
1267 if (mo.mo_openings < 0) {
1268 scsipi_printaddr(periph);
1269 printf("QUEUE FULL resulted in < 0 openings\n");
1270 panic("scsipi_done");
1271 }
1272 #endif
1273 if (mo.mo_openings == 0) {
1274 scsipi_printaddr(periph);
1275 printf("QUEUE FULL resulted in 0 openings\n");
1276 mo.mo_openings = 1;
1277 }
1278 scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
1279 error = ERESTART;
1280 } else if (xs->xs_retries != 0) {
1281 xs->xs_retries--;
1282 /*
1283 * Wait one second, and try again.
1284 */
1285 if (xs->xs_control & XS_CTL_POLL)
1286 delay(1000000);
1287 else {
1288 scsipi_periph_freeze(periph, 1);
1289 callout_reset(&periph->periph_callout,
1290 hz, scsipi_periph_timed_thaw, periph);
1291 }
1292 error = ERESTART;
1293 } else
1294 error = EBUSY;
1295 break;
1296
1297 case XS_REQUEUE:
1298 error = ERESTART;
1299 break;
1300
1301 case XS_TIMEOUT:
1302 if (xs->xs_retries != 0) {
1303 xs->xs_retries--;
1304 error = ERESTART;
1305 } else
1306 error = EIO;
1307 break;
1308
1309 case XS_SELTIMEOUT:
1310 /* XXX Disable device? */
1311 error = EIO;
1312 break;
1313
1314 case XS_RESET:
1315 if (xs->xs_control & XS_CTL_REQSENSE) {
1316 /*
1317 * request sense interrupted by reset: signal it
1318 * with EINTR return code.
1319 */
1320 error = EINTR;
1321 } else {
1322 if (xs->xs_retries != 0) {
1323 xs->xs_retries--;
1324 error = ERESTART;
1325 } else
1326 error = EIO;
1327 }
1328 break;
1329
1330 default:
1331 scsipi_printaddr(periph);
1332 printf("invalid return code from adapter: %d\n", xs->error);
1333 error = EIO;
1334 break;
1335 }
1336
1337 s = splbio();
1338 if (error == ERESTART) {
1339 /*
1340 * If we get here, the periph has been thawed and frozen
1341 * again if we had to issue recovery commands. Alternatively,
1342 * it may have been frozen again and in a timed thaw. In
1343 * any case, we thaw the periph once we re-enqueue the
1344 * command. Once the periph is fully thawed, it will begin
1345 * operation again.
1346 */
1347 xs->error = XS_NOERROR;
1348 xs->status = SCSI_OK;
1349 xs->xs_status &= ~XS_STS_DONE;
1350 xs->xs_requeuecnt++;
1351 error = scsipi_enqueue(xs);
1352 if (error == 0) {
1353 scsipi_periph_thaw(periph, 1);
1354 splx(s);
1355 return (ERESTART);
1356 }
1357 }
1358
1359 /*
1360 * scsipi_done() freezes the queue if not XS_NOERROR.
1361 * Thaw it here.
1362 */
1363 if (xs->error != XS_NOERROR)
1364 scsipi_periph_thaw(periph, 1);
1365
1366 if ((bp = xs->bp) != NULL) {
1367 if (error) {
1368 bp->b_error = error;
1369 bp->b_flags |= B_ERROR;
1370 bp->b_resid = bp->b_bcount;
1371 } else {
1372 bp->b_error = 0;
1373 bp->b_resid = xs->resid;
1374 }
1375 biodone(bp);
1376 }
1377
1378 if (xs->xs_control & XS_CTL_ASYNC)
1379 scsipi_put_xs(xs);
1380 splx(s);
1381
1382 return (error);
1383 }
1384
1385 /*
1386 * Issue a request sense for the given scsipi_xfer. Called when the xfer
1387 * returns with a CHECK_CONDITION status. Must be called in valid thread
1388 * context and at splbio().
1389 */
1390
1391 void
1392 scsipi_request_sense(xs)
1393 struct scsipi_xfer *xs;
1394 {
1395 struct scsipi_periph *periph = xs->xs_periph;
1396 int flags, error;
1397 struct scsipi_sense cmd;
1398
1399 periph->periph_flags |= PERIPH_SENSE;
1400
1401 /* if command was polling, request sense will too */
1402 flags = xs->xs_control & XS_CTL_POLL;
1403 /* Polling commands can't sleep */
1404 if (flags)
1405 flags |= XS_CTL_NOSLEEP;
1406
1407 flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN |
1408 XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH;
1409
1410 bzero(&cmd, sizeof(cmd));
1411 cmd.opcode = REQUEST_SENSE;
1412 cmd.length = sizeof(struct scsipi_sense_data);
1413
1414 error = scsipi_command(periph,
1415 (struct scsipi_generic *) &cmd, sizeof(cmd),
1416 (u_char*)&xs->sense.scsi_sense, sizeof(struct scsipi_sense_data),
1417 0, 1000, NULL, flags);
1418 periph->periph_flags &= ~PERIPH_SENSE;
1419 periph->periph_xscheck = NULL;
1420 switch(error) {
1421 case 0:
1422 /* we have a valid sense */
1423 xs->error = XS_SENSE;
1424 return;
1425 case EINTR:
1426 /* REQUEST_SENSE interrupted by bus reset. */
1427 xs->error = XS_RESET;
1428 return;
1429 case EIO:
1430 /* request sense coudn't be performed */
1431 /*
1432 * XXX this isn't quite rigth but we don't have anything
1433 * better for now
1434 */
1435 xs->error = XS_DRIVER_STUFFUP;
1436 return;
1437 default:
1438 /* Notify that request sense failed. */
1439 xs->error = XS_DRIVER_STUFFUP;
1440 scsipi_printaddr(periph);
1441 printf("request sense failed with error %d\n", error);
1442 return;
1443 }
1444 }
1445
1446 /*
1447 * scsipi_enqueue:
1448 *
1449 * Enqueue an xfer on a channel.
1450 */
1451 int
1452 scsipi_enqueue(xs)
1453 struct scsipi_xfer *xs;
1454 {
1455 struct scsipi_channel *chan = xs->xs_periph->periph_channel;
1456 struct scsipi_xfer *qxs;
1457 int s;
1458
1459 s = splbio();
1460
1461 /*
1462 * If the xfer is to be polled, and there are already jobs on
1463 * the queue, we can't proceed.
1464 */
1465 if ((xs->xs_control & XS_CTL_POLL) != 0 &&
1466 TAILQ_FIRST(&chan->chan_queue) != NULL) {
1467 splx(s);
1468 xs->error = XS_DRIVER_STUFFUP;
1469 return (EAGAIN);
1470 }
1471
1472 /*
1473 * If we have an URGENT xfer, it's an error recovery command
1474 * and it should just go on the head of the channel's queue.
1475 */
1476 if (xs->xs_control & XS_CTL_URGENT) {
1477 TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
1478 goto out;
1479 }
1480
1481 /*
1482 * If this xfer has already been on the queue before, we
1483 * need to reinsert it in the correct order. That order is:
1484 *
1485 * Immediately before the first xfer for this periph
1486 * with a requeuecnt less than xs->xs_requeuecnt.
1487 *
1488 * Failing that, at the end of the queue. (We'll end up
1489 * there naturally.)
1490 */
1491 if (xs->xs_requeuecnt != 0) {
1492 for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
1493 qxs = TAILQ_NEXT(qxs, channel_q)) {
1494 if (qxs->xs_periph == xs->xs_periph &&
1495 qxs->xs_requeuecnt < xs->xs_requeuecnt)
1496 break;
1497 }
1498 if (qxs != NULL) {
1499 TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
1500 channel_q);
1501 goto out;
1502 }
1503 }
1504 TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
1505 out:
1506 if (xs->xs_control & XS_CTL_THAW_PERIPH)
1507 scsipi_periph_thaw(xs->xs_periph, 1);
1508 splx(s);
1509 return (0);
1510 }
1511
1512 /*
1513 * scsipi_run_queue:
1514 *
1515 * Start as many xfers as possible running on the channel.
1516 */
1517 void
1518 scsipi_run_queue(chan)
1519 struct scsipi_channel *chan;
1520 {
1521 struct scsipi_xfer *xs;
1522 struct scsipi_periph *periph;
1523 int s;
1524
1525 for (;;) {
1526 s = splbio();
1527
1528 /*
1529 * If the channel is frozen, we can't do any work right
1530 * now.
1531 */
1532 if (chan->chan_qfreeze != 0) {
1533 splx(s);
1534 return;
1535 }
1536
1537 /*
1538 * Look for work to do, and make sure we can do it.
1539 */
1540 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
1541 xs = TAILQ_NEXT(xs, channel_q)) {
1542 periph = xs->xs_periph;
1543
1544 if ((periph->periph_active > periph->periph_openings) || periph->periph_qfreeze != 0 ||
1545 (periph->periph_flags & PERIPH_UNTAG) != 0)
1546 continue;
1547
1548 if ((periph->periph_flags &
1549 (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 &&
1550 (xs->xs_control & XS_CTL_URGENT) == 0)
1551 continue;
1552
1553 /*
1554 * We can issue this xfer!
1555 */
1556 goto got_one;
1557 }
1558
1559 /*
1560 * Can't find any work to do right now.
1561 */
1562 splx(s);
1563 return;
1564
1565 got_one:
1566 /*
1567 * Have an xfer to run. Allocate a resource from
1568 * the adapter to run it. If we can't allocate that
1569 * resource, we don't dequeue the xfer.
1570 */
1571 if (scsipi_get_resource(chan) == 0) {
1572 /*
1573 * Adapter is out of resources. If the adapter
1574 * supports it, attempt to grow them.
1575 */
1576 if (scsipi_grow_resources(chan) == 0) {
1577 /*
1578 * Wasn't able to grow resources,
1579 * nothing more we can do.
1580 */
1581 if (xs->xs_control & XS_CTL_POLL) {
1582 scsipi_printaddr(xs->xs_periph);
1583 printf("polling command but no "
1584 "adapter resources");
1585 /* We'll panic shortly... */
1586 }
1587 splx(s);
1588 return;
1589 }
1590 /*
1591 * scsipi_grow_resources() allocated the resource
1592 * for us.
1593 */
1594 }
1595
1596 /*
1597 * We have a resource to run this xfer, do it!
1598 */
1599 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
1600
1601 /*
1602 * If the command is to be tagged, allocate a tag ID
1603 * for it.
1604 */
1605 if (XS_CTL_TAGTYPE(xs) != 0)
1606 scsipi_get_tag(xs);
1607 else
1608 periph->periph_flags |= PERIPH_UNTAG;
1609 splx(s);
1610
1611 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
1612 }
1613 #ifdef DIAGNOSTIC
1614 panic("scsipi_run_queue: impossible");
1615 #endif
1616 }
1617
1618 /*
1619 * scsipi_execute_xs:
1620 *
1621 * Begin execution of an xfer, waiting for it to complete, if necessary.
1622 */
1623 int
1624 scsipi_execute_xs(xs)
1625 struct scsipi_xfer *xs;
1626 {
1627 struct scsipi_periph *periph = xs->xs_periph;
1628 struct scsipi_channel *chan = periph->periph_channel;
1629 int async, poll, retries, error, s;
1630
1631 xs->xs_status &= ~XS_STS_DONE;
1632 xs->error = XS_NOERROR;
1633 xs->resid = xs->datalen;
1634 xs->status = SCSI_OK;
1635
1636 #ifdef SCSIPI_DEBUG
1637 if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) {
1638 printf("scsipi_execute_xs: ");
1639 show_scsipi_xs(xs);
1640 printf("\n");
1641 }
1642 #endif
1643
1644 /*
1645 * Deal with command tagging:
1646 *
1647 * - If the device's current operating mode doesn't
1648 * include tagged queueing, clear the tag mask.
1649 *
1650 * - If the device's current operating mode *does*
1651 * include tagged queueing, set the tag_type in
1652 * the xfer to the appropriate byte for the tag
1653 * message.
1654 */
1655 if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 ||
1656 (xs->xs_control & XS_CTL_REQSENSE)) {
1657 xs->xs_control &= ~XS_CTL_TAGMASK;
1658 xs->xs_tag_type = 0;
1659 } else {
1660 /*
1661 * If the request doesn't specify a tag, give Head
1662 * tags to URGENT operations and Ordered tags to
1663 * everything else.
1664 */
1665 if (XS_CTL_TAGTYPE(xs) == 0) {
1666 if (xs->xs_control & XS_CTL_URGENT)
1667 xs->xs_control |= XS_CTL_HEAD_TAG;
1668 else
1669 xs->xs_control |= XS_CTL_ORDERED_TAG;
1670 }
1671
1672 switch (XS_CTL_TAGTYPE(xs)) {
1673 case XS_CTL_ORDERED_TAG:
1674 xs->xs_tag_type = MSG_ORDERED_Q_TAG;
1675 break;
1676
1677 case XS_CTL_SIMPLE_TAG:
1678 xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
1679 break;
1680
1681 case XS_CTL_HEAD_TAG:
1682 xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
1683 break;
1684
1685 default:
1686 scsipi_printaddr(periph);
1687 printf("invalid tag mask 0x%08x\n",
1688 XS_CTL_TAGTYPE(xs));
1689 panic("scsipi_execute_xs");
1690 }
1691 }
1692
1693 /*
1694 * If we don't yet have a completion thread, or we are to poll for
1695 * completion, clear the ASYNC flag.
1696 */
1697 if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
1698 xs->xs_control &= ~XS_CTL_ASYNC;
1699
1700 async = (xs->xs_control & XS_CTL_ASYNC);
1701 poll = (xs->xs_control & XS_CTL_POLL);
1702 retries = xs->xs_retries; /* for polling commands */
1703
1704 #ifdef DIAGNOSTIC
1705 if (async != 0 && xs->bp == NULL)
1706 panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
1707 #endif
1708
1709 /*
1710 * Enqueue the transfer. If we're not polling for completion, this
1711 * should ALWAYS return `no error'.
1712 */
1713 try_again:
1714 error = scsipi_enqueue(xs);
1715 if (error) {
1716 if (poll == 0) {
1717 scsipi_printaddr(periph);
1718 printf("not polling, but enqueue failed with %d\n",
1719 error);
1720 panic("scsipi_execute_xs");
1721 }
1722
1723 scsipi_printaddr(periph);
1724 printf("failed to enqueue polling command");
1725 if (retries != 0) {
1726 printf(", retrying...\n");
1727 delay(1000000);
1728 retries--;
1729 goto try_again;
1730 }
1731 printf("\n");
1732 goto free_xs;
1733 }
1734
1735 restarted:
1736 scsipi_run_queue(chan);
1737
1738 /*
1739 * The xfer is enqueued, and possibly running. If it's to be
1740 * completed asynchronously, just return now.
1741 */
1742 if (async)
1743 return (EJUSTRETURN);
1744
1745 /*
1746 * Not an asynchronous command; wait for it to complete.
1747 */
1748 while ((xs->xs_status & XS_STS_DONE) == 0) {
1749 if (poll) {
1750 scsipi_printaddr(periph);
1751 printf("polling command not done\n");
1752 panic("scsipi_execute_xs");
1753 }
1754 (void) tsleep(xs, PRIBIO, "xscmd", 0);
1755 }
1756
1757 /*
1758 * Command is complete. scsipi_done() has awakened us to perform
1759 * the error handling.
1760 */
1761 error = scsipi_complete(xs);
1762 if (error == ERESTART)
1763 goto restarted;
1764
1765 /*
1766 * Command completed successfully or fatal error occurred. Fall
1767 * into....
1768 */
1769 free_xs:
1770 s = splbio();
1771 scsipi_put_xs(xs);
1772 splx(s);
1773
1774 /*
1775 * Kick the queue, keep it running in case it stopped for some
1776 * reason.
1777 */
1778 scsipi_run_queue(chan);
1779
1780 return (error);
1781 }
1782
1783 /*
1784 * scsipi_completion_thread:
1785 *
1786 * This is the completion thread. We wait for errors on
1787 * asynchronous xfers, and perform the error handling
1788 * function, restarting the command, if necessary.
1789 */
1790 void
1791 scsipi_completion_thread(arg)
1792 void *arg;
1793 {
1794 struct scsipi_channel *chan = arg;
1795 struct scsipi_xfer *xs;
1796 int s;
1797
1798 for (;;) {
1799 s = splbio();
1800 xs = TAILQ_FIRST(&chan->chan_complete);
1801 if (xs == NULL &&
1802 (chan->chan_flags & SCSIPI_CHAN_SHUTDOWN) == 0) {
1803 splx(s);
1804 (void) tsleep(&chan->chan_complete, PRIBIO,
1805 "sccomp", 0);
1806 continue;
1807 }
1808 if (chan->chan_flags & SCSIPI_CHAN_SHUTDOWN) {
1809 splx(s);
1810 break;
1811 }
1812 TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
1813 splx(s);
1814
1815 /*
1816 * Have an xfer with an error; process it.
1817 */
1818 (void) scsipi_complete(xs);
1819
1820 /*
1821 * Kick the queue; keep it running if it was stopped
1822 * for some reason.
1823 */
1824 scsipi_run_queue(chan);
1825 }
1826
1827 chan->chan_thread = NULL;
1828
1829 /* In case parent is waiting for us to exit. */
1830 wakeup(&chan->chan_thread);
1831
1832 kthread_exit(0);
1833 }
1834
1835 /*
1836 * scsipi_create_completion_thread:
1837 *
1838 * Callback to actually create the completion thread.
1839 */
1840 void
1841 scsipi_create_completion_thread(arg)
1842 void *arg;
1843 {
1844 struct scsipi_channel *chan = arg;
1845 struct scsipi_adapter *adapt = chan->chan_adapter;
1846
1847 if (kthread_create1(scsipi_completion_thread, chan,
1848 &chan->chan_thread, "%s:%d", adapt->adapt_dev->dv_xname,
1849 chan->chan_channel)) {
1850 printf("%s: unable to create completion thread for "
1851 "channel %d\n", adapt->adapt_dev->dv_xname,
1852 chan->chan_channel);
1853 panic("scsipi_create_completion_thread");
1854 }
1855 }
1856
1857 /*
1858 * scsipi_async_event:
1859 *
1860 * Handle an asynchronous event from an adapter.
1861 */
1862 void
1863 scsipi_async_event(chan, event, arg)
1864 struct scsipi_channel *chan;
1865 scsipi_async_event_t event;
1866 void *arg;
1867 {
1868 int s;
1869
1870 s = splbio();
1871 switch (event) {
1872 case ASYNC_EVENT_MAX_OPENINGS:
1873 scsipi_async_event_max_openings(chan,
1874 (struct scsipi_max_openings *)arg);
1875 break;
1876
1877 case ASYNC_EVENT_XFER_MODE:
1878 scsipi_async_event_xfer_mode(chan,
1879 (struct scsipi_xfer_mode *)arg);
1880 break;
1881 case ASYNC_EVENT_RESET:
1882 scsipi_async_event_channel_reset(chan);
1883 break;
1884 }
1885 splx(s);
1886 }
1887
1888 /*
1889 * scsipi_print_xfer_mode:
1890 *
1891 * Print a periph's capabilities.
1892 */
1893 void
1894 scsipi_print_xfer_mode(periph)
1895 struct scsipi_periph *periph;
1896 {
1897 int period, freq, speed, mbs;
1898
1899 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0)
1900 return;
1901
1902 printf("%s: ", periph->periph_dev->dv_xname);
1903 if (periph->periph_mode & PERIPH_CAP_SYNC) {
1904 period = scsipi_sync_factor_to_period(periph->periph_period);
1905 printf("Sync (%d.%dns offset %d)",
1906 period / 10, period % 10, periph->periph_offset);
1907 } else
1908 printf("Async");
1909
1910 if (periph->periph_mode & PERIPH_CAP_WIDE32)
1911 printf(", 32-bit");
1912 else if (periph->periph_mode & PERIPH_CAP_WIDE16)
1913 printf(", 16-bit");
1914 else
1915 printf(", 8-bit");
1916
1917 if (periph->periph_mode & PERIPH_CAP_SYNC) {
1918 freq = scsipi_sync_factor_to_freq(periph->periph_period);
1919 speed = freq;
1920 if (periph->periph_mode & PERIPH_CAP_WIDE32)
1921 speed *= 4;
1922 else if (periph->periph_mode & PERIPH_CAP_WIDE16)
1923 speed *= 2;
1924 mbs = speed / 1000;
1925 if (mbs > 0)
1926 printf(" (%d.%03dMB/s)", mbs, speed % 1000);
1927 else
1928 printf(" (%dKB/s)", speed % 1000);
1929 }
1930
1931 printf(" transfers");
1932
1933 if (periph->periph_mode & PERIPH_CAP_TQING)
1934 printf(", tagged queueing");
1935
1936 printf("\n");
1937 }
1938
1939 /*
1940 * scsipi_async_event_max_openings:
1941 *
1942 * Update the maximum number of outstanding commands a
1943 * device may have.
1944 */
1945 void
1946 scsipi_async_event_max_openings(chan, mo)
1947 struct scsipi_channel *chan;
1948 struct scsipi_max_openings *mo;
1949 {
1950 struct scsipi_periph *periph;
1951 int minlun, maxlun;
1952
1953 if (mo->mo_lun == -1) {
1954 /*
1955 * Wildcarded; apply it to all LUNs.
1956 */
1957 minlun = 0;
1958 maxlun = chan->chan_nluns - 1;
1959 } else
1960 minlun = maxlun = mo->mo_lun;
1961
1962 for (; minlun <= maxlun; minlun++) {
1963 periph = scsipi_lookup_periph(chan, mo->mo_target, minlun);
1964 if (periph == NULL)
1965 continue;
1966
1967 if (mo->mo_openings < periph->periph_openings)
1968 periph->periph_openings = mo->mo_openings;
1969 else if (mo->mo_openings > periph->periph_openings &&
1970 (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
1971 periph->periph_openings = mo->mo_openings;
1972 }
1973 }
1974
1975 /*
1976 * scsipi_async_event_xfer_mode:
1977 *
1978 * Update the xfer mode for all periphs sharing the
1979 * specified I_T Nexus.
1980 */
1981 void
1982 scsipi_async_event_xfer_mode(chan, xm)
1983 struct scsipi_channel *chan;
1984 struct scsipi_xfer_mode *xm;
1985 {
1986 struct scsipi_periph *periph;
1987 int lun, announce, mode, period, offset;
1988
1989 for (lun = 0; lun < chan->chan_nluns; lun++) {
1990 periph = scsipi_lookup_periph(chan, xm->xm_target, lun);
1991 if (periph == NULL)
1992 continue;
1993 announce = 0;
1994
1995 /*
1996 * Clamp the xfer mode down to this periph's capabilities.
1997 */
1998 mode = xm->xm_mode & periph->periph_cap;
1999 if (mode & PERIPH_CAP_SYNC) {
2000 period = xm->xm_period;
2001 offset = xm->xm_offset;
2002 } else {
2003 period = 0;
2004 offset = 0;
2005 }
2006
2007 /*
2008 * If we do not have a valid xfer mode yet, or the parameters
2009 * are different, announce them.
2010 */
2011 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0 ||
2012 periph->periph_mode != mode ||
2013 periph->periph_period != period ||
2014 periph->periph_offset != offset)
2015 announce = 1;
2016
2017 periph->periph_mode = mode;
2018 periph->periph_period = period;
2019 periph->periph_offset = offset;
2020 periph->periph_flags |= PERIPH_MODE_VALID;
2021
2022 if (announce)
2023 scsipi_print_xfer_mode(periph);
2024 }
2025 }
2026
2027 /*
2028 * scsipi_set_xfer_mode:
2029 *
2030 * Set the xfer mode for the specified I_T Nexus.
2031 */
2032 void
2033 scsipi_set_xfer_mode(chan, target, immed)
2034 struct scsipi_channel *chan;
2035 int target, immed;
2036 {
2037 struct scsipi_xfer_mode xm;
2038 struct scsipi_periph *itperiph;
2039 int lun, s;
2040
2041 /*
2042 * Go to the minimal xfer mode.
2043 */
2044 xm.xm_target = target;
2045 xm.xm_mode = 0;
2046 xm.xm_period = 0; /* ignored */
2047 xm.xm_offset = 0; /* ignored */
2048
2049 /*
2050 * Find the first LUN we know about on this I_T Nexus.
2051 */
2052 for (lun = 0; lun < chan->chan_nluns; lun++) {
2053 itperiph = scsipi_lookup_periph(chan, target, lun);
2054 if (itperiph != NULL)
2055 break;
2056 }
2057 if (itperiph != NULL)
2058 xm.xm_mode = itperiph->periph_cap;
2059
2060 /*
2061 * Now issue the request to the adapter.
2062 */
2063 s = splbio();
2064 scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm);
2065 splx(s);
2066
2067 /*
2068 * If we want this to happen immediately, issue a dummy command,
2069 * since most adapters can't really negotiate unless they're
2070 * executing a job.
2071 */
2072 if (immed != 0 && itperiph != NULL) {
2073 (void) scsipi_test_unit_ready(itperiph,
2074 XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
2075 XS_CTL_IGNORE_NOT_READY |
2076 XS_CTL_IGNORE_MEDIA_CHANGE);
2077 }
2078 }
2079
2080 /*
2081 * scsipi_channel_reset:
2082 *
2083 * handle scsi bus reset
2084 */
2085 void
2086 scsipi_async_event_channel_reset(chan)
2087 struct scsipi_channel *chan;
2088 {
2089 struct scsipi_xfer *xs, *xs_next;
2090 struct scsipi_periph *periph;
2091 int target, lun;
2092
2093 /*
2094 * Channel has been reset. Also mark as reset pending REQUEST_SENSE
2095 * commands; as the sense is not available any more.
2096 */
2097
2098 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) {
2099 xs_next = TAILQ_NEXT(xs, channel_q);
2100 if (xs->xs_control & XS_CTL_REQSENSE) {
2101 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
2102 xs->error = XS_RESET;
2103 scsipi_done(xs);
2104 }
2105 }
2106 /* Catch xs with pending sense which may not have a REQSENSE xs yet */
2107 for (target = 0; target < chan->chan_ntargets; target++) {
2108 if (target == chan->chan_id)
2109 continue;
2110 for (lun = 0; lun < chan->chan_nluns; lun++) {
2111 periph = chan->chan_periphs[target][lun];
2112 if (periph) {
2113 xs = periph->periph_xscheck;
2114 if (xs)
2115 xs->error = XS_RESET;
2116 }
2117 }
2118 }
2119 }
2120
2121
2122 /*
2123 * scsipi_adapter_addref:
2124 *
2125 * Add a reference to the adapter pointed to by the provided
2126 * link, enabling the adapter if necessary.
2127 */
2128 int
2129 scsipi_adapter_addref(adapt)
2130 struct scsipi_adapter *adapt;
2131 {
2132 int s, error = 0;
2133
2134 s = splbio();
2135 if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) {
2136 error = (*adapt->adapt_enable)(adapt->adapt_dev, 1);
2137 if (error)
2138 adapt->adapt_refcnt--;
2139 }
2140 splx(s);
2141 return (error);
2142 }
2143
2144 /*
2145 * scsipi_adapter_delref:
2146 *
2147 * Delete a reference to the adapter pointed to by the provided
2148 * link, disabling the adapter if possible.
2149 */
2150 void
2151 scsipi_adapter_delref(adapt)
2152 struct scsipi_adapter *adapt;
2153 {
2154 int s;
2155
2156 s = splbio();
2157 if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL)
2158 (void) (*adapt->adapt_enable)(adapt->adapt_dev, 0);
2159 splx(s);
2160 }
2161
2162 struct scsipi_syncparam {
2163 int ss_factor;
2164 int ss_period; /* ns * 10 */
2165 } scsipi_syncparams[] = {
2166 { 0x0a, 250 },
2167 { 0x0b, 303 },
2168 { 0x0c, 500 },
2169 };
2170 const int scsipi_nsyncparams =
2171 sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
2172
2173 int
2174 scsipi_sync_period_to_factor(period)
2175 int period; /* ns * 10 */
2176 {
2177 int i;
2178
2179 for (i = 0; i < scsipi_nsyncparams; i++) {
2180 if (period <= scsipi_syncparams[i].ss_period)
2181 return (scsipi_syncparams[i].ss_factor);
2182 }
2183
2184 return ((period / 10) / 4);
2185 }
2186
2187 int
2188 scsipi_sync_factor_to_period(factor)
2189 int factor;
2190 {
2191 int i;
2192
2193 for (i = 0; i < scsipi_nsyncparams; i++) {
2194 if (factor == scsipi_syncparams[i].ss_factor)
2195 return (scsipi_syncparams[i].ss_period);
2196 }
2197
2198 return ((factor * 4) * 10);
2199 }
2200
2201 int
2202 scsipi_sync_factor_to_freq(factor)
2203 int factor;
2204 {
2205 int i;
2206
2207 for (i = 0; i < scsipi_nsyncparams; i++) {
2208 if (factor == scsipi_syncparams[i].ss_factor)
2209 return (10000000 / scsipi_syncparams[i].ss_period);
2210 }
2211
2212 return (10000000 / ((factor * 4) * 10));
2213 }
2214
2215 #ifdef SCSIPI_DEBUG
2216 /*
2217 * Given a scsipi_xfer, dump the request, in all it's glory
2218 */
2219 void
2220 show_scsipi_xs(xs)
2221 struct scsipi_xfer *xs;
2222 {
2223
2224 printf("xs(%p): ", xs);
2225 printf("xs_control(0x%08x)", xs->xs_control);
2226 printf("xs_status(0x%08x)", xs->xs_status);
2227 printf("periph(%p)", xs->xs_periph);
2228 printf("retr(0x%x)", xs->xs_retries);
2229 printf("timo(0x%x)", xs->timeout);
2230 printf("cmd(%p)", xs->cmd);
2231 printf("len(0x%x)", xs->cmdlen);
2232 printf("data(%p)", xs->data);
2233 printf("len(0x%x)", xs->datalen);
2234 printf("res(0x%x)", xs->resid);
2235 printf("err(0x%x)", xs->error);
2236 printf("bp(%p)", xs->bp);
2237 show_scsipi_cmd(xs);
2238 }
2239
2240 void
2241 show_scsipi_cmd(xs)
2242 struct scsipi_xfer *xs;
2243 {
2244 u_char *b = (u_char *) xs->cmd;
2245 int i = 0;
2246
2247 scsipi_printaddr(xs->xs_periph);
2248 printf(" command: ");
2249
2250 if ((xs->xs_control & XS_CTL_RESET) == 0) {
2251 while (i < xs->cmdlen) {
2252 if (i)
2253 printf(",");
2254 printf("0x%x", b[i++]);
2255 }
2256 printf("-[%d bytes]\n", xs->datalen);
2257 if (xs->datalen)
2258 show_mem(xs->data, min(64, xs->datalen));
2259 } else
2260 printf("-RESET-\n");
2261 }
2262
2263 void
2264 show_mem(address, num)
2265 u_char *address;
2266 int num;
2267 {
2268 int x;
2269
2270 printf("------------------------------");
2271 for (x = 0; x < num; x++) {
2272 if ((x % 16) == 0)
2273 printf("\n%03d: ", x);
2274 printf("%02x ", *address++);
2275 }
2276 printf("\n------------------------------\n");
2277 }
2278 #endif /* SCSIPI_DEBUG */
2279