scsipi_base.c revision 1.26.2.15 1 /* $NetBSD: scsipi_base.c,v 1.26.2.15 2001/04/22 16:40:29 bouyer Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include "opt_scsi.h"
41
42 #include <sys/types.h>
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/buf.h>
47 #include <sys/uio.h>
48 #include <sys/malloc.h>
49 #include <sys/pool.h>
50 #include <sys/errno.h>
51 #include <sys/device.h>
52 #include <sys/proc.h>
53 #include <sys/kthread.h>
54
55 #include <dev/scsipi/scsipi_all.h>
56 #include <dev/scsipi/scsipi_disk.h>
57 #include <dev/scsipi/scsipiconf.h>
58 #include <dev/scsipi/scsipi_base.h>
59
60 #include <dev/scsipi/scsi_all.h>
61 #include <dev/scsipi/scsi_message.h>
62
63 int scsipi_complete __P((struct scsipi_xfer *));
64 void scsipi_request_sense __P((struct scsipi_xfer *));
65 int scsipi_enqueue __P((struct scsipi_xfer *));
66 void scsipi_run_queue __P((struct scsipi_channel *chan));
67
68 void scsipi_completion_thread __P((void *));
69
70 void scsipi_get_tag __P((struct scsipi_xfer *));
71 void scsipi_put_tag __P((struct scsipi_xfer *));
72
73 int scsipi_get_resource __P((struct scsipi_channel *));
74 void scsipi_put_resource __P((struct scsipi_channel *));
75 __inline int scsipi_grow_resources __P((struct scsipi_channel *));
76
77 void scsipi_async_event_max_openings __P((struct scsipi_channel *,
78 struct scsipi_max_openings *));
79 void scsipi_async_event_xfer_mode __P((struct scsipi_channel *,
80 struct scsipi_xfer_mode *));
81 void scsipi_async_event_channel_reset __P((struct scsipi_channel *));
82
83 struct pool scsipi_xfer_pool;
84
85 /*
86 * scsipi_init:
87 *
88 * Called when a scsibus or atapibus is attached to the system
89 * to initialize shared data structures.
90 */
91 void
92 scsipi_init()
93 {
94 static int scsipi_init_done;
95
96 if (scsipi_init_done)
97 return;
98 scsipi_init_done = 1;
99
100 /* Initialize the scsipi_xfer pool. */
101 pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
102 0, 0, "scxspl", 0, NULL, NULL, M_DEVBUF);
103 }
104
105 /*
106 * scsipi_channel_init:
107 *
108 * Initialize a scsipi_channel when it is attached.
109 */
110 int
111 scsipi_channel_init(chan)
112 struct scsipi_channel *chan;
113 {
114 size_t nbytes;
115 int i;
116
117 /* Initialize shared data. */
118 scsipi_init();
119
120 /* Initialize the queues. */
121 TAILQ_INIT(&chan->chan_queue);
122 TAILQ_INIT(&chan->chan_complete);
123
124 nbytes = chan->chan_ntargets * sizeof(struct scsipi_periph **);
125 chan->chan_periphs = malloc(nbytes, M_DEVBUF, M_NOWAIT);
126 if (chan->chan_periphs == NULL)
127 return (ENOMEM);
128
129
130 nbytes = chan->chan_nluns * sizeof(struct scsipi_periph *);
131 for (i = 0; i < chan->chan_ntargets; i++) {
132 chan->chan_periphs[i] = malloc(nbytes, M_DEVBUF, M_NOWAIT);
133 if (chan->chan_periphs[i] == NULL) {
134 while (--i >= 0) {
135 free(chan->chan_periphs[i], M_DEVBUF);
136 }
137 return (ENOMEM);
138 }
139 memset(chan->chan_periphs[i], 0, nbytes);
140 }
141
142 /*
143 * Create the asynchronous completion thread.
144 */
145 kthread_create(scsipi_create_completion_thread, chan);
146 return (0);
147 }
148
149 /*
150 * scsipi_channel_shutdown:
151 *
152 * Shutdown a scsipi_channel.
153 */
154 void
155 scsipi_channel_shutdown(chan)
156 struct scsipi_channel *chan;
157 {
158
159 /*
160 * Shut down the completion thread.
161 */
162 chan->chan_flags |= SCSIPI_CHAN_SHUTDOWN;
163 wakeup(&chan->chan_complete);
164
165 /*
166 * Now wait for the thread to exit.
167 */
168 while (chan->chan_thread != NULL)
169 (void) tsleep(&chan->chan_thread, PRIBIO, "scshut", 0);
170 }
171
172 /*
173 * scsipi_insert_periph:
174 *
175 * Insert a periph into the channel.
176 */
177 void
178 scsipi_insert_periph(chan, periph)
179 struct scsipi_channel *chan;
180 struct scsipi_periph *periph;
181 {
182 int s;
183
184 s = splbio();
185 chan->chan_periphs[periph->periph_target][periph->periph_lun] = periph;
186 splx(s);
187 }
188
189 /*
190 * scsipi_remove_periph:
191 *
192 * Remove a periph from the channel.
193 */
194 void
195 scsipi_remove_periph(chan, periph)
196 struct scsipi_channel *chan;
197 struct scsipi_periph *periph;
198 {
199 int s;
200
201 s = splbio();
202 chan->chan_periphs[periph->periph_target][periph->periph_lun] = NULL;
203 splx(s);
204 }
205
206 /*
207 * scsipi_lookup_periph:
208 *
209 * Lookup a periph on the specified channel.
210 */
211 struct scsipi_periph *
212 scsipi_lookup_periph(chan, target, lun)
213 struct scsipi_channel *chan;
214 int target, lun;
215 {
216 struct scsipi_periph *periph;
217 int s;
218
219 if (target >= chan->chan_ntargets ||
220 lun >= chan->chan_nluns)
221 return (NULL);
222
223 s = splbio();
224 periph = chan->chan_periphs[target][lun];
225 splx(s);
226
227 return (periph);
228 }
229
230 /*
231 * scsipi_get_resource:
232 *
233 * Allocate a single xfer `resource' from the channel.
234 *
235 * NOTE: Must be called at splbio().
236 */
237 int
238 scsipi_get_resource(chan)
239 struct scsipi_channel *chan;
240 {
241 struct scsipi_adapter *adapt = chan->chan_adapter;
242
243 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
244 if (chan->chan_openings > 0) {
245 chan->chan_openings--;
246 return (1);
247 }
248 return (0);
249 }
250
251 if (adapt->adapt_openings > 0) {
252 adapt->adapt_openings--;
253 return (1);
254 }
255 return (0);
256 }
257
258 /*
259 * scsipi_grow_resources:
260 *
261 * Attempt to grow resources for a channel. If this succeeds,
262 * we allocate one for our caller.
263 *
264 * NOTE: Must be called at splbio().
265 */
266 __inline int
267 scsipi_grow_resources(chan)
268 struct scsipi_channel *chan;
269 {
270
271 if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
272 scsipi_adapter_request(chan, ADAPTER_REQ_GROW_RESOURCES, NULL);
273 return (scsipi_get_resource(chan));
274 }
275
276 return (0);
277 }
278
279 /*
280 * scsipi_put_resource:
281 *
282 * Free a single xfer `resource' to the channel.
283 *
284 * NOTE: Must be called at splbio().
285 */
286 void
287 scsipi_put_resource(chan)
288 struct scsipi_channel *chan;
289 {
290 struct scsipi_adapter *adapt = chan->chan_adapter;
291
292 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
293 chan->chan_openings++;
294 else
295 adapt->adapt_openings++;
296 }
297
298 /*
299 * scsipi_get_tag:
300 *
301 * Get a tag ID for the specified xfer.
302 *
303 * NOTE: Must be called at splbio().
304 */
305 void
306 scsipi_get_tag(xs)
307 struct scsipi_xfer *xs;
308 {
309 struct scsipi_periph *periph = xs->xs_periph;
310 int word, bit, tag;
311
312 for (word = 0; word < PERIPH_NTAGWORDS; word++) {
313 bit = ffs(periph->periph_freetags[word]);
314 if (bit != 0)
315 break;
316 }
317 #ifdef DIAGNOSTIC
318 if (word == PERIPH_NTAGWORDS) {
319 scsipi_printaddr(periph);
320 printf("no free tags\n");
321 panic("scsipi_get_tag");
322 }
323 #endif
324
325 bit -= 1;
326 periph->periph_freetags[word] &= ~(1 << bit);
327 tag = (word << 5) | bit;
328
329 /* XXX Should eventually disallow this completely. */
330 if (tag >= periph->periph_openings) {
331 scsipi_printaddr(periph);
332 printf("WARNING: tag %d greater than available openings %d\n",
333 tag, periph->periph_openings);
334 }
335
336 xs->xs_tag_id = tag;
337 }
338
339 /*
340 * scsipi_put_tag:
341 *
342 * Put the tag ID for the specified xfer back into the pool.
343 *
344 * NOTE: Must be called at splbio().
345 */
346 void
347 scsipi_put_tag(xs)
348 struct scsipi_xfer *xs;
349 {
350 struct scsipi_periph *periph = xs->xs_periph;
351 int word, bit;
352
353 word = xs->xs_tag_id >> 5;
354 bit = xs->xs_tag_id & 0x1f;
355
356 periph->periph_freetags[word] |= (1 << bit);
357 }
358
359 /*
360 * scsipi_get_xs:
361 *
362 * Allocate an xfer descriptor and associate it with the
363 * specified peripherial. If the peripherial has no more
364 * available command openings, we either block waiting for
365 * one to become available, or fail.
366 */
367 struct scsipi_xfer *
368 scsipi_get_xs(periph, flags)
369 struct scsipi_periph *periph;
370 int flags;
371 {
372 struct scsipi_xfer *xs;
373 int s;
374
375 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n"));
376
377 /*
378 * If we're cold, make sure we poll.
379 */
380 if (cold)
381 flags |= XS_CTL_NOSLEEP | XS_CTL_POLL;
382
383 #ifdef DIAGNOSTIC
384 /*
385 * URGENT commands can never be ASYNC.
386 */
387 if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
388 (XS_CTL_URGENT|XS_CTL_ASYNC)) {
389 scsipi_printaddr(periph);
390 printf("URGENT and ASYNC\n");
391 panic("scsipi_get_xs");
392 }
393 #endif
394
395 s = splbio();
396 /*
397 * Wait for a command opening to become available. Rules:
398 *
399 * - All xfers must wait for an available opening.
400 * Exception: URGENT xfers can proceed when
401 * active == openings, because we use the opening
402 * of the command we're recovering for.
403 * - if the periph has sense pending, only URGENT & REQSENSE
404 * xfers may proceed.
405 *
406 * - If the periph is recovering, only URGENT xfers may
407 * proceed.
408 *
409 * - If the periph is currently executing a recovery
410 * command, URGENT commands must block, because only
411 * one recovery command can execute at a time.
412 */
413 for (;;) {
414 if (flags & XS_CTL_URGENT) {
415 if (periph->periph_active > periph->periph_openings)
416 goto wait_for_opening;
417 if (periph->periph_flags & PERIPH_SENSE) {
418 if ((flags & XS_CTL_REQSENSE) == 0)
419 goto wait_for_opening;
420 } else {
421 if ((periph->periph_flags &
422 PERIPH_RECOVERY_ACTIVE) != 0)
423 goto wait_for_opening;
424 periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
425 }
426 break;
427 }
428 if (periph->periph_active >= periph->periph_openings ||
429 (periph->periph_flags & PERIPH_RECOVERING) != 0)
430 goto wait_for_opening;
431 periph->periph_active++;
432 break;
433
434 wait_for_opening:
435 if (flags & XS_CTL_NOSLEEP) {
436 splx(s);
437 return (NULL);
438 }
439 SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n"));
440 periph->periph_flags |= PERIPH_WAITING;
441 (void) tsleep(periph, PRIBIO, "getxs", 0);
442 }
443 SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n"));
444 xs = pool_get(&scsipi_xfer_pool,
445 ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
446 if (xs == NULL) {
447 if (flags & XS_CTL_URGENT) {
448 if ((flags & XS_CTL_REQSENSE) == 0)
449 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
450 } else
451 periph->periph_active--;
452 scsipi_printaddr(periph);
453 printf("unable to allocate %sscsipi_xfer\n",
454 (flags & XS_CTL_URGENT) ? "URGENT " : "");
455 }
456 splx(s);
457
458 SC_DEBUG(periph, SCSIPI_DB3, ("returning\n"));
459
460 if (xs != NULL) {
461 callout_init(&xs->xs_callout);
462 memset(xs, 0, sizeof(*xs));
463 xs->xs_periph = periph;
464 xs->xs_control = flags;
465 xs->xs_status = 0;
466 s = splbio();
467 TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
468 splx(s);
469 }
470 return (xs);
471 }
472
473 /*
474 * scsipi_put_xs:
475 *
476 * Release an xfer descriptor, decreasing the outstanding command
477 * count for the peripherial. If there is a thread waiting for
478 * an opening, wake it up. If not, kick any queued I/O the
479 * peripherial may have.
480 *
481 * NOTE: Must be called at splbio().
482 */
483 void
484 scsipi_put_xs(xs)
485 struct scsipi_xfer *xs;
486 {
487 struct scsipi_periph *periph = xs->xs_periph;
488 int flags = xs->xs_control;
489
490 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n"));
491
492 TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
493 pool_put(&scsipi_xfer_pool, xs);
494
495 #ifdef DIAGNOSTIC
496 if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
497 periph->periph_active == 0) {
498 scsipi_printaddr(periph);
499 printf("recovery without a command to recovery for\n");
500 panic("scsipi_put_xs");
501 }
502 #endif
503
504 if (flags & XS_CTL_URGENT) {
505 if ((flags & XS_CTL_REQSENSE) == 0)
506 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
507 } else
508 periph->periph_active--;
509 if (periph->periph_active == 0 &&
510 (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
511 periph->periph_flags &= ~PERIPH_WAITDRAIN;
512 wakeup(&periph->periph_active);
513 }
514
515 if (periph->periph_flags & PERIPH_WAITING) {
516 periph->periph_flags &= ~PERIPH_WAITING;
517 wakeup(periph);
518 } else {
519 if (periph->periph_switch->psw_start != NULL) {
520 SC_DEBUG(periph, SCSIPI_DB2,
521 ("calling private start()\n"));
522 (*periph->periph_switch->psw_start)(periph);
523 }
524 }
525 }
526
527 /*
528 * scsipi_channel_freeze:
529 *
530 * Freeze a channel's xfer queue.
531 */
532 void
533 scsipi_channel_freeze(chan, count)
534 struct scsipi_channel *chan;
535 int count;
536 {
537 int s;
538
539 s = splbio();
540 chan->chan_qfreeze += count;
541 splx(s);
542 }
543
544 /*
545 * scsipi_channel_thaw:
546 *
547 * Thaw a channel's xfer queue.
548 */
549 void
550 scsipi_channel_thaw(chan, count)
551 struct scsipi_channel *chan;
552 int count;
553 {
554 int s;
555
556 s = splbio();
557 chan->chan_qfreeze -= count;
558 splx(s);
559 }
560
561 /*
562 * scsipi_channel_timed_thaw:
563 *
564 * Thaw a channel after some time has expired.
565 */
566 void
567 scsipi_channel_timed_thaw(arg)
568 void *arg;
569 {
570 struct scsipi_channel *chan = arg;
571
572 scsipi_channel_thaw(chan, 1);
573
574 /*
575 * Kick the channel's queue here. Note, we're running in
576 * interrupt context (softclock), so the adapter driver
577 * had better not sleep.
578 */
579 scsipi_run_queue(chan);
580 }
581
582 /*
583 * scsipi_periph_freeze:
584 *
585 * Freeze a device's xfer queue.
586 */
587 void
588 scsipi_periph_freeze(periph, count)
589 struct scsipi_periph *periph;
590 int count;
591 {
592 int s;
593
594 s = splbio();
595 periph->periph_qfreeze += count;
596 splx(s);
597 }
598
599 /*
600 * scsipi_periph_thaw:
601 *
602 * Thaw a device's xfer queue.
603 */
604 void
605 scsipi_periph_thaw(periph, count)
606 struct scsipi_periph *periph;
607 int count;
608 {
609 int s;
610
611 s = splbio();
612 periph->periph_qfreeze -= count;
613 if (periph->periph_qfreeze == 0 &&
614 (periph->periph_flags & PERIPH_WAITING) != 0)
615 wakeup(periph);
616 splx(s);
617 }
618
619 /*
620 * scsipi_periph_timed_thaw:
621 *
622 * Thaw a device after some time has expired.
623 */
624 void
625 scsipi_periph_timed_thaw(arg)
626 void *arg;
627 {
628 struct scsipi_periph *periph = arg;
629
630 callout_stop(&periph->periph_callout);
631 scsipi_periph_thaw(periph, 1);
632
633 /*
634 * Kick the channel's queue here. Note, we're running in
635 * interrupt context (softclock), so the adapter driver
636 * had better not sleep.
637 */
638 scsipi_run_queue(periph->periph_channel);
639 }
640
641 /*
642 * scsipi_wait_drain:
643 *
644 * Wait for a periph's pending xfers to drain.
645 */
646 void
647 scsipi_wait_drain(periph)
648 struct scsipi_periph *periph;
649 {
650 int s;
651
652 s = splbio();
653 while (periph->periph_active != 0) {
654 periph->periph_flags |= PERIPH_WAITDRAIN;
655 (void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0);
656 }
657 splx(s);
658 }
659
660 /*
661 * scsipi_kill_pending:
662 *
663 * Kill off all pending xfers for a periph.
664 *
665 * NOTE: Must be called at splbio().
666 */
667 void
668 scsipi_kill_pending(periph)
669 struct scsipi_periph *periph;
670 {
671
672 (*periph->periph_channel->chan_bustype->bustype_kill_pending)(periph);
673 #ifdef DIAGNOSTIC
674 if (TAILQ_FIRST(&periph->periph_xferq) != NULL)
675 panic("scsipi_kill_pending");
676 #endif
677 scsipi_wait_drain(periph);
678 }
679
680 /*
681 * scsipi_interpret_sense:
682 *
683 * Look at the returned sense and act on the error, determining
684 * the unix error number to pass back. (0 = report no error)
685 *
686 * NOTE: If we return ERESTART, we are expected to haved
687 * thawed the device!
688 *
689 * THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
690 */
691 int
692 scsipi_interpret_sense(xs)
693 struct scsipi_xfer *xs;
694 {
695 struct scsipi_sense_data *sense;
696 struct scsipi_periph *periph = xs->xs_periph;
697 u_int8_t key;
698 u_int32_t info;
699 int error;
700 #ifndef SCSIVERBOSE
701 static char *error_mes[] = {
702 "soft error (corrected)",
703 "not ready", "medium error",
704 "non-media hardware failure", "illegal request",
705 "unit attention", "readonly device",
706 "no data found", "vendor unique",
707 "copy aborted", "command aborted",
708 "search returned equal", "volume overflow",
709 "verify miscompare", "unknown error key"
710 };
711 #endif
712
713 sense = &xs->sense.scsi_sense;
714 #ifdef SCSIPI_DEBUG
715 if (periph->periph_flags & SCSIPI_DB1) {
716 int count;
717 scsipi_printaddr(periph);
718 printf(" sense debug information:\n");
719 printf("\tcode 0x%x valid 0x%x\n",
720 sense->error_code & SSD_ERRCODE,
721 sense->error_code & SSD_ERRCODE_VALID ? 1 : 0);
722 printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
723 sense->segment,
724 sense->flags & SSD_KEY,
725 sense->flags & SSD_ILI ? 1 : 0,
726 sense->flags & SSD_EOM ? 1 : 0,
727 sense->flags & SSD_FILEMARK ? 1 : 0);
728 printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d "
729 "extra bytes\n",
730 sense->info[0],
731 sense->info[1],
732 sense->info[2],
733 sense->info[3],
734 sense->extra_len);
735 printf("\textra: ");
736 for (count = 0; count < ADD_BYTES_LIM(sense); count++)
737 printf("0x%x ", sense->cmd_spec_info[count]);
738 printf("\n");
739 }
740 #endif
741
742 /*
743 * If the periph has it's own error handler, call it first.
744 * If it returns a legit error value, return that, otherwise
745 * it wants us to continue with normal error processing.
746 */
747 if (periph->periph_switch->psw_error != NULL) {
748 SC_DEBUG(periph, SCSIPI_DB2,
749 ("calling private err_handler()\n"));
750 error = (*periph->periph_switch->psw_error)(xs);
751 if (error != EJUSTRETURN)
752 return (error);
753 }
754 /* otherwise use the default */
755 switch (sense->error_code & SSD_ERRCODE) {
756 /*
757 * If it's code 70, use the extended stuff and
758 * interpret the key
759 */
760 case 0x71: /* delayed error */
761 scsipi_printaddr(periph);
762 key = sense->flags & SSD_KEY;
763 printf(" DEFERRED ERROR, key = 0x%x\n", key);
764 /* FALLTHROUGH */
765 case 0x70:
766 if ((sense->error_code & SSD_ERRCODE_VALID) != 0)
767 info = _4btol(sense->info);
768 else
769 info = 0;
770 key = sense->flags & SSD_KEY;
771
772 switch (key) {
773 case SKEY_NO_SENSE:
774 case SKEY_RECOVERED_ERROR:
775 if (xs->resid == xs->datalen && xs->datalen) {
776 /*
777 * Why is this here?
778 */
779 xs->resid = 0; /* not short read */
780 }
781 case SKEY_EQUAL:
782 error = 0;
783 break;
784 case SKEY_NOT_READY:
785 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
786 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
787 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
788 return (0);
789 if (sense->add_sense_code == 0x3A &&
790 sense->add_sense_code_qual == 0x00)
791 error = ENODEV; /* Medium not present */
792 else
793 error = EIO;
794 if ((xs->xs_control & XS_CTL_SILENT) != 0)
795 return (error);
796 break;
797 case SKEY_ILLEGAL_REQUEST:
798 if ((xs->xs_control &
799 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
800 return (0);
801 /*
802 * Handle the case where a device reports
803 * Logical Unit Not Supported during discovery.
804 */
805 if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
806 sense->add_sense_code == 0x25 &&
807 sense->add_sense_code_qual == 0x00)
808 return (EINVAL);
809 if ((xs->xs_control & XS_CTL_SILENT) != 0)
810 return (EIO);
811 error = EINVAL;
812 break;
813 case SKEY_UNIT_ATTENTION:
814 if (sense->add_sense_code == 0x29 &&
815 sense->add_sense_code_qual == 0x00) {
816 /* device or bus reset */
817 return (ERESTART);
818 }
819 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
820 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
821 if ((xs->xs_control &
822 XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
823 /* XXX Should reupload any transient state. */
824 (periph->periph_flags &
825 PERIPH_REMOVABLE) == 0) {
826 return (ERESTART);
827 }
828 if ((xs->xs_control & XS_CTL_SILENT) != 0)
829 return (EIO);
830 error = EIO;
831 break;
832 case SKEY_WRITE_PROTECT:
833 error = EROFS;
834 break;
835 case SKEY_BLANK_CHECK:
836 error = 0;
837 break;
838 case SKEY_ABORTED_COMMAND:
839 error = ERESTART;
840 break;
841 case SKEY_VOLUME_OVERFLOW:
842 error = ENOSPC;
843 break;
844 default:
845 error = EIO;
846 break;
847 }
848
849 #ifdef SCSIVERBOSE
850 if (key && (xs->xs_control & XS_CTL_SILENT) == 0)
851 scsipi_print_sense(xs, 0);
852 #else
853 if (key) {
854 scsipi_printaddr(periph);
855 printf("%s", error_mes[key - 1]);
856 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
857 switch (key) {
858 case SKEY_NOT_READY:
859 case SKEY_ILLEGAL_REQUEST:
860 case SKEY_UNIT_ATTENTION:
861 case SKEY_WRITE_PROTECT:
862 break;
863 case SKEY_BLANK_CHECK:
864 printf(", requested size: %d (decimal)",
865 info);
866 break;
867 case SKEY_ABORTED_COMMAND:
868 if (xs->xs_retries)
869 printf(", retrying");
870 printf(", cmd 0x%x, info 0x%x",
871 xs->cmd->opcode, info);
872 break;
873 default:
874 printf(", info = %d (decimal)", info);
875 }
876 }
877 if (sense->extra_len != 0) {
878 int n;
879 printf(", data =");
880 for (n = 0; n < sense->extra_len; n++)
881 printf(" %02x",
882 sense->cmd_spec_info[n]);
883 }
884 printf("\n");
885 }
886 #endif
887 return (error);
888
889 /*
890 * Not code 70, just report it
891 */
892 default:
893 #if defined(SCSIDEBUG) || defined(DEBUG)
894 {
895 static char *uc = "undecodable sense error";
896 int i;
897 u_int8_t *cptr = (u_int8_t *) sense;
898 scsipi_printaddr(periph);
899 if (xs->cmd == &xs->cmdstore) {
900 printf("%s for opcode 0x%x, data=",
901 uc, xs->cmdstore.opcode);
902 } else {
903 printf("%s, data=", uc);
904 }
905 for (i = 0; i < sizeof (sense); i++)
906 printf(" 0x%02x", *(cptr++) & 0xff);
907 printf("\n");
908 }
909 #else
910
911 scsipi_printaddr(periph);
912 printf("Sense Error Code 0x%x",
913 sense->error_code & SSD_ERRCODE);
914 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
915 struct scsipi_sense_data_unextended *usense =
916 (struct scsipi_sense_data_unextended *)sense;
917 printf(" at block no. %d (decimal)",
918 _3btol(usense->block));
919 }
920 printf("\n");
921 #endif
922 return (EIO);
923 }
924 }
925
926 /*
927 * scsipi_size:
928 *
929 * Find out from the device what its capacity is.
930 */
931 u_long
932 scsipi_size(periph, flags)
933 struct scsipi_periph *periph;
934 int flags;
935 {
936 struct scsipi_read_cap_data rdcap;
937 struct scsipi_read_capacity scsipi_cmd;
938
939 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
940 scsipi_cmd.opcode = READ_CAPACITY;
941
942 /*
943 * If the command works, interpret the result as a 4 byte
944 * number of blocks
945 */
946 if (scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
947 sizeof(scsipi_cmd), (u_char *)&rdcap, sizeof(rdcap),
948 SCSIPIRETRIES, 20000, NULL,
949 flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK) != 0) {
950 scsipi_printaddr(periph);
951 printf("could not get size\n");
952 return (0);
953 }
954
955 return (_4btol(rdcap.addr) + 1);
956 }
957
958 /*
959 * scsipi_test_unit_ready:
960 *
961 * Issue a `test unit ready' request.
962 */
963 int
964 scsipi_test_unit_ready(periph, flags)
965 struct scsipi_periph *periph;
966 int flags;
967 {
968 struct scsipi_test_unit_ready scsipi_cmd;
969
970 /* some ATAPI drives don't support TEST_UNIT_READY. Sigh */
971 if (periph->periph_quirks & PQUIRK_NOTUR)
972 return (0);
973
974 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
975 scsipi_cmd.opcode = TEST_UNIT_READY;
976
977 return (scsipi_command(periph,
978 (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd),
979 0, 0, SCSIPIRETRIES, 10000, NULL, flags));
980 }
981
982 /*
983 * scsipi_inquire:
984 *
985 * Ask the device about itself.
986 */
987 int
988 scsipi_inquire(periph, inqbuf, flags)
989 struct scsipi_periph *periph;
990 struct scsipi_inquiry_data *inqbuf;
991 int flags;
992 {
993 struct scsipi_inquiry scsipi_cmd;
994
995 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
996 scsipi_cmd.opcode = INQUIRY;
997 scsipi_cmd.length = sizeof(struct scsipi_inquiry_data);
998
999 return (scsipi_command(periph,
1000 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1001 (u_char *) inqbuf, sizeof(struct scsipi_inquiry_data),
1002 SCSIPIRETRIES, 10000, NULL, XS_CTL_DATA_IN | flags));
1003 }
1004
1005 /*
1006 * scsipi_prevent:
1007 *
1008 * Prevent or allow the user to remove the media
1009 */
1010 int
1011 scsipi_prevent(periph, type, flags)
1012 struct scsipi_periph *periph;
1013 int type, flags;
1014 {
1015 struct scsipi_prevent scsipi_cmd;
1016
1017 if (periph->periph_quirks & PQUIRK_NODOORLOCK)
1018 return (0);
1019
1020 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
1021 scsipi_cmd.opcode = PREVENT_ALLOW;
1022 scsipi_cmd.how = type;
1023
1024 return (scsipi_command(periph,
1025 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1026 0, 0, SCSIPIRETRIES, 5000, NULL, flags));
1027 }
1028
1029 /*
1030 * scsipi_start:
1031 *
1032 * Send a START UNIT.
1033 */
1034 int
1035 scsipi_start(periph, type, flags)
1036 struct scsipi_periph *periph;
1037 int type, flags;
1038 {
1039 struct scsipi_start_stop scsipi_cmd;
1040
1041 if (periph->periph_quirks & PQUIRK_NOSTARTUNIT)
1042 return 0;
1043
1044 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
1045 scsipi_cmd.opcode = START_STOP;
1046 scsipi_cmd.byte2 = 0x00;
1047 scsipi_cmd.how = type;
1048
1049 return (scsipi_command(periph,
1050 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1051 0, 0, SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000,
1052 NULL, flags));
1053 }
1054
1055 /*
1056 * scsipi_done:
1057 *
1058 * This routine is called by an adapter's interrupt handler when
1059 * an xfer is completed.
1060 */
1061 void
1062 scsipi_done(xs)
1063 struct scsipi_xfer *xs;
1064 {
1065 struct scsipi_periph *periph = xs->xs_periph;
1066 struct scsipi_channel *chan = periph->periph_channel;
1067 int s, freezecnt;
1068
1069 SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n"));
1070 #ifdef SCSIPI_DEBUG
1071 if (periph->periph_dbflags & SCSIPI_DB1)
1072 show_scsipi_cmd(xs);
1073 #endif
1074
1075 s = splbio();
1076 /*
1077 * The resource this command was using is now free.
1078 */
1079 scsipi_put_resource(chan);
1080 xs->xs_periph->periph_sent--;
1081
1082 /*
1083 * If the command was tagged, free the tag.
1084 */
1085 if (XS_CTL_TAGTYPE(xs) != 0)
1086 scsipi_put_tag(xs);
1087 else
1088 periph->periph_flags &= ~PERIPH_UNTAG;
1089
1090 /* Mark the command as `done'. */
1091 xs->xs_status |= XS_STS_DONE;
1092
1093 #ifdef DIAGNOSTIC
1094 if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
1095 (XS_CTL_ASYNC|XS_CTL_POLL))
1096 panic("scsipi_done: ASYNC and POLL");
1097 #endif
1098
1099 /*
1100 * If the xfer had an error of any sort, freeze the
1101 * periph's queue. Freeze it again if we were requested
1102 * to do so in the xfer.
1103 */
1104 freezecnt = 0;
1105 if (xs->error != XS_NOERROR)
1106 freezecnt++;
1107 if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
1108 freezecnt++;
1109 if (freezecnt != 0)
1110 scsipi_periph_freeze(periph, freezecnt);
1111
1112 /*
1113 * record the xfer with a pending sense, in case a SCSI reset is
1114 * received before the thread is waked up.
1115 */
1116 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1117 periph->periph_flags |= PERIPH_SENSE;
1118 periph->periph_xscheck = xs;
1119 }
1120
1121 /*
1122 * If this was an xfer that was not to complete asynchrnously,
1123 * let the requesting thread perform error checking/handling
1124 * in its context.
1125 */
1126 if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
1127 splx(s);
1128 /*
1129 * If it's a polling job, just return, to unwind the
1130 * call graph. We don't need to restart the queue,
1131 * because pollings jobs are treated specially, and
1132 * are really only used during crash dumps anyway
1133 * (XXX or during boot-time autconfiguration of
1134 * ATAPI devices).
1135 */
1136 if (xs->xs_control & XS_CTL_POLL)
1137 return;
1138 wakeup(xs);
1139 goto out;
1140 }
1141
1142 /*
1143 * Catch the extremely common case of I/O completing
1144 * without error; no use in taking a context switch
1145 * if we can handle it in interrupt context.
1146 */
1147 if (xs->error == XS_NOERROR) {
1148 splx(s);
1149 (void) scsipi_complete(xs);
1150 goto out;
1151 }
1152
1153 /*
1154 * There is an error on this xfer. Put it on the channel's
1155 * completion queue, and wake up the completion thread.
1156 */
1157 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
1158 splx(s);
1159 wakeup(&chan->chan_complete);
1160
1161 out:
1162 /*
1163 * If there are more xfers on the channel's queue, attempt to
1164 * run them.
1165 */
1166 scsipi_run_queue(chan);
1167 }
1168
1169 /*
1170 * scsipi_complete:
1171 *
1172 * Completion of a scsipi_xfer. This is the guts of scsipi_done().
1173 *
1174 * NOTE: This routine MUST be called with valid thread context
1175 * except for the case where the following two conditions are
1176 * true:
1177 *
1178 * xs->error == XS_NOERROR
1179 * XS_CTL_ASYNC is set in xs->xs_control
1180 *
1181 * The semantics of this routine can be tricky, so here is an
1182 * explanation:
1183 *
1184 * 0 Xfer completed successfully.
1185 *
1186 * ERESTART Xfer had an error, but was restarted.
1187 *
1188 * anything else Xfer had an error, return value is Unix
1189 * errno.
1190 *
1191 * If the return value is anything but ERESTART:
1192 *
1193 * - If XS_CTL_ASYNC is set, `xs' has been freed back to
1194 * the pool.
1195 * - If there is a buf associated with the xfer,
1196 * it has been biodone()'d.
1197 */
1198 int
1199 scsipi_complete(xs)
1200 struct scsipi_xfer *xs;
1201 {
1202 struct scsipi_periph *periph = xs->xs_periph;
1203 struct scsipi_channel *chan = periph->periph_channel;
1204 struct buf *bp;
1205 int error, s;
1206
1207 #ifdef DIAGNOSTIC
1208 if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
1209 panic("scsipi_complete: XS_CTL_ASYNC but no buf");
1210 #endif
1211 /*
1212 * If command terminated with a CHECK CONDITION, we need to issue a
1213 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed
1214 * we'll have the real status.
1215 * Must be processed at splbio() to avoid missing a SCSI bus reset
1216 * for this command.
1217 */
1218 s = splbio();
1219 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1220 /* request sense for a request sense ? */
1221 if (xs->xs_control & XS_CTL_REQSENSE) {
1222 scsipi_printaddr(periph);
1223 /* XXX maybe we should reset the device ? */
1224 /* we've been frozen because xs->error != XS_NOERROR */
1225 scsipi_periph_thaw(periph, 1);
1226 splx(s);
1227 return EINVAL;
1228 }
1229 scsipi_request_sense(xs);
1230 }
1231 splx(s);
1232 /*
1233 * If it's a user level request, bypass all usual completion
1234 * processing, let the user work it out..
1235 */
1236 if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
1237 SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n"));
1238 if (xs->error != XS_NOERROR)
1239 scsipi_periph_thaw(periph, 1);
1240 scsipi_user_done(xs);
1241 SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n "));
1242 return 0;
1243 }
1244
1245
1246 switch (xs->error) {
1247 case XS_NOERROR:
1248 error = 0;
1249 break;
1250
1251 case XS_SENSE:
1252 case XS_SHORTSENSE:
1253 error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
1254 break;
1255
1256 case XS_RESOURCE_SHORTAGE:
1257 /*
1258 * XXX Should freeze channel's queue.
1259 */
1260 scsipi_printaddr(periph);
1261 printf("adapter resource shortage\n");
1262 /* FALLTHROUGH */
1263
1264 case XS_BUSY:
1265 if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
1266 struct scsipi_max_openings mo;
1267
1268 /*
1269 * We set the openings to active - 1, assuming that
1270 * the command that got us here is the first one that
1271 * can't fit into the device's queue. If that's not
1272 * the case, I guess we'll find out soon enough.
1273 */
1274 mo.mo_target = periph->periph_target;
1275 mo.mo_lun = periph->periph_lun;
1276 mo.mo_openings = periph->periph_active - 1;
1277 #ifdef DIAGNOSTIC
1278 if (mo.mo_openings < 0) {
1279 scsipi_printaddr(periph);
1280 printf("QUEUE FULL resulted in < 0 openings\n");
1281 panic("scsipi_done");
1282 }
1283 #endif
1284 if (mo.mo_openings == 0) {
1285 scsipi_printaddr(periph);
1286 printf("QUEUE FULL resulted in 0 openings\n");
1287 mo.mo_openings = 1;
1288 }
1289 scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
1290 error = ERESTART;
1291 } else if (xs->xs_retries != 0) {
1292 xs->xs_retries--;
1293 /*
1294 * Wait one second, and try again.
1295 */
1296 if (xs->xs_control & XS_CTL_POLL)
1297 delay(1000000);
1298 else {
1299 scsipi_periph_freeze(periph, 1);
1300 callout_reset(&periph->periph_callout,
1301 hz, scsipi_periph_timed_thaw, periph);
1302 }
1303 error = ERESTART;
1304 } else
1305 error = EBUSY;
1306 break;
1307
1308 case XS_REQUEUE:
1309 error = ERESTART;
1310 break;
1311
1312 case XS_TIMEOUT:
1313 if (xs->xs_retries != 0) {
1314 xs->xs_retries--;
1315 error = ERESTART;
1316 } else
1317 error = EIO;
1318 break;
1319
1320 case XS_SELTIMEOUT:
1321 /* XXX Disable device? */
1322 error = EIO;
1323 break;
1324
1325 case XS_RESET:
1326 if (xs->xs_control & XS_CTL_REQSENSE) {
1327 /*
1328 * request sense interrupted by reset: signal it
1329 * with EINTR return code.
1330 */
1331 error = EINTR;
1332 } else {
1333 if (xs->xs_retries != 0) {
1334 xs->xs_retries--;
1335 error = ERESTART;
1336 } else
1337 error = EIO;
1338 }
1339 break;
1340
1341 default:
1342 scsipi_printaddr(periph);
1343 printf("invalid return code from adapter: %d\n", xs->error);
1344 error = EIO;
1345 break;
1346 }
1347
1348 s = splbio();
1349 if (error == ERESTART) {
1350 /*
1351 * If we get here, the periph has been thawed and frozen
1352 * again if we had to issue recovery commands. Alternatively,
1353 * it may have been frozen again and in a timed thaw. In
1354 * any case, we thaw the periph once we re-enqueue the
1355 * command. Once the periph is fully thawed, it will begin
1356 * operation again.
1357 */
1358 xs->error = XS_NOERROR;
1359 xs->status = SCSI_OK;
1360 xs->xs_status &= ~XS_STS_DONE;
1361 xs->xs_requeuecnt++;
1362 error = scsipi_enqueue(xs);
1363 if (error == 0) {
1364 scsipi_periph_thaw(periph, 1);
1365 splx(s);
1366 return (ERESTART);
1367 }
1368 }
1369
1370 /*
1371 * scsipi_done() freezes the queue if not XS_NOERROR.
1372 * Thaw it here.
1373 */
1374 if (xs->error != XS_NOERROR)
1375 scsipi_periph_thaw(periph, 1);
1376
1377 if ((bp = xs->bp) != NULL) {
1378 if (error) {
1379 bp->b_error = error;
1380 bp->b_flags |= B_ERROR;
1381 bp->b_resid = bp->b_bcount;
1382 } else {
1383 bp->b_error = 0;
1384 bp->b_resid = xs->resid;
1385 }
1386 biodone(bp);
1387 }
1388
1389 if (xs->xs_control & XS_CTL_ASYNC)
1390 scsipi_put_xs(xs);
1391 splx(s);
1392
1393 return (error);
1394 }
1395
1396 /*
1397 * Issue a request sense for the given scsipi_xfer. Called when the xfer
1398 * returns with a CHECK_CONDITION status. Must be called in valid thread
1399 * context and at splbio().
1400 */
1401
1402 void
1403 scsipi_request_sense(xs)
1404 struct scsipi_xfer *xs;
1405 {
1406 struct scsipi_periph *periph = xs->xs_periph;
1407 int flags, error;
1408 struct scsipi_sense cmd;
1409
1410 periph->periph_flags |= PERIPH_SENSE;
1411
1412 /* if command was polling, request sense will too */
1413 flags = xs->xs_control & XS_CTL_POLL;
1414 /* Polling commands can't sleep */
1415 if (flags)
1416 flags |= XS_CTL_NOSLEEP;
1417
1418 flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN |
1419 XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH;
1420
1421 bzero(&cmd, sizeof(cmd));
1422 cmd.opcode = REQUEST_SENSE;
1423 cmd.length = sizeof(struct scsipi_sense_data);
1424
1425 error = scsipi_command(periph,
1426 (struct scsipi_generic *) &cmd, sizeof(cmd),
1427 (u_char*)&xs->sense.scsi_sense, sizeof(struct scsipi_sense_data),
1428 0, 1000, NULL, flags);
1429 periph->periph_flags &= ~PERIPH_SENSE;
1430 periph->periph_xscheck = NULL;
1431 switch(error) {
1432 case 0:
1433 /* we have a valid sense */
1434 xs->error = XS_SENSE;
1435 return;
1436 case EINTR:
1437 /* REQUEST_SENSE interrupted by bus reset. */
1438 xs->error = XS_RESET;
1439 return;
1440 case EIO:
1441 /* request sense coudn't be performed */
1442 /*
1443 * XXX this isn't quite rigth but we don't have anything
1444 * better for now
1445 */
1446 xs->error = XS_DRIVER_STUFFUP;
1447 return;
1448 default:
1449 /* Notify that request sense failed. */
1450 xs->error = XS_DRIVER_STUFFUP;
1451 scsipi_printaddr(periph);
1452 printf("request sense failed with error %d\n", error);
1453 return;
1454 }
1455 }
1456
1457 /*
1458 * scsipi_enqueue:
1459 *
1460 * Enqueue an xfer on a channel.
1461 */
1462 int
1463 scsipi_enqueue(xs)
1464 struct scsipi_xfer *xs;
1465 {
1466 struct scsipi_channel *chan = xs->xs_periph->periph_channel;
1467 struct scsipi_xfer *qxs;
1468 int s;
1469
1470 s = splbio();
1471
1472 /*
1473 * If the xfer is to be polled, and there are already jobs on
1474 * the queue, we can't proceed.
1475 */
1476 if ((xs->xs_control & XS_CTL_POLL) != 0 &&
1477 TAILQ_FIRST(&chan->chan_queue) != NULL) {
1478 splx(s);
1479 xs->error = XS_DRIVER_STUFFUP;
1480 return (EAGAIN);
1481 }
1482
1483 /*
1484 * If we have an URGENT xfer, it's an error recovery command
1485 * and it should just go on the head of the channel's queue.
1486 */
1487 if (xs->xs_control & XS_CTL_URGENT) {
1488 TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
1489 goto out;
1490 }
1491
1492 /*
1493 * If this xfer has already been on the queue before, we
1494 * need to reinsert it in the correct order. That order is:
1495 *
1496 * Immediately before the first xfer for this periph
1497 * with a requeuecnt less than xs->xs_requeuecnt.
1498 *
1499 * Failing that, at the end of the queue. (We'll end up
1500 * there naturally.)
1501 */
1502 if (xs->xs_requeuecnt != 0) {
1503 for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
1504 qxs = TAILQ_NEXT(qxs, channel_q)) {
1505 if (qxs->xs_periph == xs->xs_periph &&
1506 qxs->xs_requeuecnt < xs->xs_requeuecnt)
1507 break;
1508 }
1509 if (qxs != NULL) {
1510 TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
1511 channel_q);
1512 goto out;
1513 }
1514 }
1515 TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
1516 out:
1517 if (xs->xs_control & XS_CTL_THAW_PERIPH)
1518 scsipi_periph_thaw(xs->xs_periph, 1);
1519 splx(s);
1520 return (0);
1521 }
1522
1523 /*
1524 * scsipi_run_queue:
1525 *
1526 * Start as many xfers as possible running on the channel.
1527 */
1528 void
1529 scsipi_run_queue(chan)
1530 struct scsipi_channel *chan;
1531 {
1532 struct scsipi_xfer *xs;
1533 struct scsipi_periph *periph;
1534 int s;
1535
1536 for (;;) {
1537 s = splbio();
1538
1539 /*
1540 * If the channel is frozen, we can't do any work right
1541 * now.
1542 */
1543 if (chan->chan_qfreeze != 0) {
1544 splx(s);
1545 return;
1546 }
1547
1548 /*
1549 * Look for work to do, and make sure we can do it.
1550 */
1551 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
1552 xs = TAILQ_NEXT(xs, channel_q)) {
1553 periph = xs->xs_periph;
1554
1555 if ((periph->periph_sent >= periph->periph_openings) ||
1556 periph->periph_qfreeze != 0 ||
1557 (periph->periph_flags & PERIPH_UNTAG) != 0)
1558 continue;
1559
1560 if ((periph->periph_flags &
1561 (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 &&
1562 (xs->xs_control & XS_CTL_URGENT) == 0)
1563 continue;
1564
1565 /*
1566 * We can issue this xfer!
1567 */
1568 goto got_one;
1569 }
1570
1571 /*
1572 * Can't find any work to do right now.
1573 */
1574 splx(s);
1575 return;
1576
1577 got_one:
1578 /*
1579 * Have an xfer to run. Allocate a resource from
1580 * the adapter to run it. If we can't allocate that
1581 * resource, we don't dequeue the xfer.
1582 */
1583 if (scsipi_get_resource(chan) == 0) {
1584 /*
1585 * Adapter is out of resources. If the adapter
1586 * supports it, attempt to grow them.
1587 */
1588 if (scsipi_grow_resources(chan) == 0) {
1589 /*
1590 * Wasn't able to grow resources,
1591 * nothing more we can do.
1592 */
1593 if (xs->xs_control & XS_CTL_POLL) {
1594 scsipi_printaddr(xs->xs_periph);
1595 printf("polling command but no "
1596 "adapter resources");
1597 /* We'll panic shortly... */
1598 }
1599 splx(s);
1600 return;
1601 }
1602 /*
1603 * scsipi_grow_resources() allocated the resource
1604 * for us.
1605 */
1606 }
1607
1608 /*
1609 * We have a resource to run this xfer, do it!
1610 */
1611 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
1612
1613 /*
1614 * If the command is to be tagged, allocate a tag ID
1615 * for it.
1616 */
1617 if (XS_CTL_TAGTYPE(xs) != 0)
1618 scsipi_get_tag(xs);
1619 else
1620 periph->periph_flags |= PERIPH_UNTAG;
1621 periph->periph_sent++;
1622 splx(s);
1623
1624 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
1625 }
1626 #ifdef DIAGNOSTIC
1627 panic("scsipi_run_queue: impossible");
1628 #endif
1629 }
1630
1631 /*
1632 * scsipi_execute_xs:
1633 *
1634 * Begin execution of an xfer, waiting for it to complete, if necessary.
1635 */
1636 int
1637 scsipi_execute_xs(xs)
1638 struct scsipi_xfer *xs;
1639 {
1640 struct scsipi_periph *periph = xs->xs_periph;
1641 struct scsipi_channel *chan = periph->periph_channel;
1642 int async, poll, retries, error, s;
1643
1644 xs->xs_status &= ~XS_STS_DONE;
1645 xs->error = XS_NOERROR;
1646 xs->resid = xs->datalen;
1647 xs->status = SCSI_OK;
1648
1649 #ifdef SCSIPI_DEBUG
1650 if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) {
1651 printf("scsipi_execute_xs: ");
1652 show_scsipi_xs(xs);
1653 printf("\n");
1654 }
1655 #endif
1656
1657 /*
1658 * Deal with command tagging:
1659 *
1660 * - If the device's current operating mode doesn't
1661 * include tagged queueing, clear the tag mask.
1662 *
1663 * - If the device's current operating mode *does*
1664 * include tagged queueing, set the tag_type in
1665 * the xfer to the appropriate byte for the tag
1666 * message.
1667 */
1668 if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 ||
1669 (xs->xs_control & XS_CTL_REQSENSE)) {
1670 xs->xs_control &= ~XS_CTL_TAGMASK;
1671 xs->xs_tag_type = 0;
1672 } else {
1673 /*
1674 * If the request doesn't specify a tag, give Head
1675 * tags to URGENT operations and Ordered tags to
1676 * everything else.
1677 */
1678 if (XS_CTL_TAGTYPE(xs) == 0) {
1679 if (xs->xs_control & XS_CTL_URGENT)
1680 xs->xs_control |= XS_CTL_HEAD_TAG;
1681 else
1682 xs->xs_control |= XS_CTL_ORDERED_TAG;
1683 }
1684
1685 switch (XS_CTL_TAGTYPE(xs)) {
1686 case XS_CTL_ORDERED_TAG:
1687 xs->xs_tag_type = MSG_ORDERED_Q_TAG;
1688 break;
1689
1690 case XS_CTL_SIMPLE_TAG:
1691 xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
1692 break;
1693
1694 case XS_CTL_HEAD_TAG:
1695 xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
1696 break;
1697
1698 default:
1699 scsipi_printaddr(periph);
1700 printf("invalid tag mask 0x%08x\n",
1701 XS_CTL_TAGTYPE(xs));
1702 panic("scsipi_execute_xs");
1703 }
1704 }
1705
1706 /*
1707 * If we don't yet have a completion thread, or we are to poll for
1708 * completion, clear the ASYNC flag.
1709 */
1710 if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
1711 xs->xs_control &= ~XS_CTL_ASYNC;
1712
1713 async = (xs->xs_control & XS_CTL_ASYNC);
1714 poll = (xs->xs_control & XS_CTL_POLL);
1715 retries = xs->xs_retries; /* for polling commands */
1716
1717 #ifdef DIAGNOSTIC
1718 if (async != 0 && xs->bp == NULL)
1719 panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
1720 #endif
1721
1722 /*
1723 * Enqueue the transfer. If we're not polling for completion, this
1724 * should ALWAYS return `no error'.
1725 */
1726 try_again:
1727 error = scsipi_enqueue(xs);
1728 if (error) {
1729 if (poll == 0) {
1730 scsipi_printaddr(periph);
1731 printf("not polling, but enqueue failed with %d\n",
1732 error);
1733 panic("scsipi_execute_xs");
1734 }
1735
1736 scsipi_printaddr(periph);
1737 printf("failed to enqueue polling command");
1738 if (retries != 0) {
1739 printf(", retrying...\n");
1740 delay(1000000);
1741 retries--;
1742 goto try_again;
1743 }
1744 printf("\n");
1745 goto free_xs;
1746 }
1747
1748 restarted:
1749 scsipi_run_queue(chan);
1750
1751 /*
1752 * The xfer is enqueued, and possibly running. If it's to be
1753 * completed asynchronously, just return now.
1754 */
1755 if (async)
1756 return (EJUSTRETURN);
1757
1758 /*
1759 * Not an asynchronous command; wait for it to complete.
1760 */
1761 while ((xs->xs_status & XS_STS_DONE) == 0) {
1762 if (poll) {
1763 scsipi_printaddr(periph);
1764 printf("polling command not done\n");
1765 panic("scsipi_execute_xs");
1766 }
1767 (void) tsleep(xs, PRIBIO, "xscmd", 0);
1768 }
1769
1770 /*
1771 * Command is complete. scsipi_done() has awakened us to perform
1772 * the error handling.
1773 */
1774 error = scsipi_complete(xs);
1775 if (error == ERESTART)
1776 goto restarted;
1777
1778 /*
1779 * Command completed successfully or fatal error occurred. Fall
1780 * into....
1781 */
1782 free_xs:
1783 s = splbio();
1784 scsipi_put_xs(xs);
1785 splx(s);
1786
1787 /*
1788 * Kick the queue, keep it running in case it stopped for some
1789 * reason.
1790 */
1791 scsipi_run_queue(chan);
1792
1793 return (error);
1794 }
1795
1796 /*
1797 * scsipi_completion_thread:
1798 *
1799 * This is the completion thread. We wait for errors on
1800 * asynchronous xfers, and perform the error handling
1801 * function, restarting the command, if necessary.
1802 */
1803 void
1804 scsipi_completion_thread(arg)
1805 void *arg;
1806 {
1807 struct scsipi_channel *chan = arg;
1808 struct scsipi_xfer *xs;
1809 int s;
1810
1811 for (;;) {
1812 s = splbio();
1813 xs = TAILQ_FIRST(&chan->chan_complete);
1814 if (xs == NULL &&
1815 (chan->chan_flags & SCSIPI_CHAN_SHUTDOWN) == 0) {
1816 splx(s);
1817 (void) tsleep(&chan->chan_complete, PRIBIO,
1818 "sccomp", 0);
1819 continue;
1820 }
1821 if (chan->chan_flags & SCSIPI_CHAN_SHUTDOWN) {
1822 splx(s);
1823 break;
1824 }
1825 TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
1826 splx(s);
1827
1828 /*
1829 * Have an xfer with an error; process it.
1830 */
1831 (void) scsipi_complete(xs);
1832
1833 /*
1834 * Kick the queue; keep it running if it was stopped
1835 * for some reason.
1836 */
1837 scsipi_run_queue(chan);
1838 }
1839
1840 chan->chan_thread = NULL;
1841
1842 /* In case parent is waiting for us to exit. */
1843 wakeup(&chan->chan_thread);
1844
1845 kthread_exit(0);
1846 }
1847
1848 /*
1849 * scsipi_create_completion_thread:
1850 *
1851 * Callback to actually create the completion thread.
1852 */
1853 void
1854 scsipi_create_completion_thread(arg)
1855 void *arg;
1856 {
1857 struct scsipi_channel *chan = arg;
1858 struct scsipi_adapter *adapt = chan->chan_adapter;
1859
1860 if (kthread_create1(scsipi_completion_thread, chan,
1861 &chan->chan_thread, "%s:%d", adapt->adapt_dev->dv_xname,
1862 chan->chan_channel)) {
1863 printf("%s: unable to create completion thread for "
1864 "channel %d\n", adapt->adapt_dev->dv_xname,
1865 chan->chan_channel);
1866 panic("scsipi_create_completion_thread");
1867 }
1868 }
1869
1870 /*
1871 * scsipi_async_event:
1872 *
1873 * Handle an asynchronous event from an adapter.
1874 */
1875 void
1876 scsipi_async_event(chan, event, arg)
1877 struct scsipi_channel *chan;
1878 scsipi_async_event_t event;
1879 void *arg;
1880 {
1881 int s;
1882
1883 s = splbio();
1884 switch (event) {
1885 case ASYNC_EVENT_MAX_OPENINGS:
1886 scsipi_async_event_max_openings(chan,
1887 (struct scsipi_max_openings *)arg);
1888 break;
1889
1890 case ASYNC_EVENT_XFER_MODE:
1891 scsipi_async_event_xfer_mode(chan,
1892 (struct scsipi_xfer_mode *)arg);
1893 break;
1894 case ASYNC_EVENT_RESET:
1895 scsipi_async_event_channel_reset(chan);
1896 break;
1897 }
1898 splx(s);
1899 }
1900
1901 /*
1902 * scsipi_print_xfer_mode:
1903 *
1904 * Print a periph's capabilities.
1905 */
1906 void
1907 scsipi_print_xfer_mode(periph)
1908 struct scsipi_periph *periph;
1909 {
1910 int period, freq, speed, mbs;
1911
1912 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0)
1913 return;
1914
1915 printf("%s: ", periph->periph_dev->dv_xname);
1916 if (periph->periph_mode & PERIPH_CAP_SYNC) {
1917 period = scsipi_sync_factor_to_period(periph->periph_period);
1918 printf("Sync (%d.%dns offset %d)",
1919 period / 10, period % 10, periph->periph_offset);
1920 } else
1921 printf("Async");
1922
1923 if (periph->periph_mode & PERIPH_CAP_WIDE32)
1924 printf(", 32-bit");
1925 else if (periph->periph_mode & PERIPH_CAP_WIDE16)
1926 printf(", 16-bit");
1927 else
1928 printf(", 8-bit");
1929
1930 if (periph->periph_mode & PERIPH_CAP_SYNC) {
1931 freq = scsipi_sync_factor_to_freq(periph->periph_period);
1932 speed = freq;
1933 if (periph->periph_mode & PERIPH_CAP_WIDE32)
1934 speed *= 4;
1935 else if (periph->periph_mode & PERIPH_CAP_WIDE16)
1936 speed *= 2;
1937 mbs = speed / 1000;
1938 if (mbs > 0)
1939 printf(" (%d.%03dMB/s)", mbs, speed % 1000);
1940 else
1941 printf(" (%dKB/s)", speed % 1000);
1942 }
1943
1944 printf(" transfers");
1945
1946 if (periph->periph_mode & PERIPH_CAP_TQING)
1947 printf(", tagged queueing");
1948
1949 printf("\n");
1950 }
1951
1952 /*
1953 * scsipi_async_event_max_openings:
1954 *
1955 * Update the maximum number of outstanding commands a
1956 * device may have.
1957 */
1958 void
1959 scsipi_async_event_max_openings(chan, mo)
1960 struct scsipi_channel *chan;
1961 struct scsipi_max_openings *mo;
1962 {
1963 struct scsipi_periph *periph;
1964 int minlun, maxlun;
1965
1966 if (mo->mo_lun == -1) {
1967 /*
1968 * Wildcarded; apply it to all LUNs.
1969 */
1970 minlun = 0;
1971 maxlun = chan->chan_nluns - 1;
1972 } else
1973 minlun = maxlun = mo->mo_lun;
1974
1975 for (; minlun <= maxlun; minlun++) {
1976 periph = scsipi_lookup_periph(chan, mo->mo_target, minlun);
1977 if (periph == NULL)
1978 continue;
1979
1980 if (mo->mo_openings < periph->periph_openings)
1981 periph->periph_openings = mo->mo_openings;
1982 else if (mo->mo_openings > periph->periph_openings &&
1983 (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
1984 periph->periph_openings = mo->mo_openings;
1985 }
1986 }
1987
1988 /*
1989 * scsipi_async_event_xfer_mode:
1990 *
1991 * Update the xfer mode for all periphs sharing the
1992 * specified I_T Nexus.
1993 */
1994 void
1995 scsipi_async_event_xfer_mode(chan, xm)
1996 struct scsipi_channel *chan;
1997 struct scsipi_xfer_mode *xm;
1998 {
1999 struct scsipi_periph *periph;
2000 int lun, announce, mode, period, offset;
2001
2002 for (lun = 0; lun < chan->chan_nluns; lun++) {
2003 periph = scsipi_lookup_periph(chan, xm->xm_target, lun);
2004 if (periph == NULL)
2005 continue;
2006 announce = 0;
2007
2008 /*
2009 * Clamp the xfer mode down to this periph's capabilities.
2010 */
2011 mode = xm->xm_mode & periph->periph_cap;
2012 if (mode & PERIPH_CAP_SYNC) {
2013 period = xm->xm_period;
2014 offset = xm->xm_offset;
2015 } else {
2016 period = 0;
2017 offset = 0;
2018 }
2019
2020 /*
2021 * If we do not have a valid xfer mode yet, or the parameters
2022 * are different, announce them.
2023 */
2024 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0 ||
2025 periph->periph_mode != mode ||
2026 periph->periph_period != period ||
2027 periph->periph_offset != offset)
2028 announce = 1;
2029
2030 periph->periph_mode = mode;
2031 periph->periph_period = period;
2032 periph->periph_offset = offset;
2033 periph->periph_flags |= PERIPH_MODE_VALID;
2034
2035 if (announce)
2036 scsipi_print_xfer_mode(periph);
2037 }
2038 }
2039
2040 /*
2041 * scsipi_set_xfer_mode:
2042 *
2043 * Set the xfer mode for the specified I_T Nexus.
2044 */
2045 void
2046 scsipi_set_xfer_mode(chan, target, immed)
2047 struct scsipi_channel *chan;
2048 int target, immed;
2049 {
2050 struct scsipi_xfer_mode xm;
2051 struct scsipi_periph *itperiph;
2052 int lun, s;
2053
2054 /*
2055 * Go to the minimal xfer mode.
2056 */
2057 xm.xm_target = target;
2058 xm.xm_mode = 0;
2059 xm.xm_period = 0; /* ignored */
2060 xm.xm_offset = 0; /* ignored */
2061
2062 /*
2063 * Find the first LUN we know about on this I_T Nexus.
2064 */
2065 for (lun = 0; lun < chan->chan_nluns; lun++) {
2066 itperiph = scsipi_lookup_periph(chan, target, lun);
2067 if (itperiph != NULL)
2068 break;
2069 }
2070 if (itperiph != NULL)
2071 xm.xm_mode = itperiph->periph_cap;
2072
2073 /*
2074 * Now issue the request to the adapter.
2075 */
2076 s = splbio();
2077 scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm);
2078 splx(s);
2079
2080 /*
2081 * If we want this to happen immediately, issue a dummy command,
2082 * since most adapters can't really negotiate unless they're
2083 * executing a job.
2084 */
2085 if (immed != 0 && itperiph != NULL) {
2086 (void) scsipi_test_unit_ready(itperiph,
2087 XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
2088 XS_CTL_IGNORE_NOT_READY |
2089 XS_CTL_IGNORE_MEDIA_CHANGE);
2090 }
2091 }
2092
2093 /*
2094 * scsipi_channel_reset:
2095 *
2096 * handle scsi bus reset
2097 * called at splbio
2098 */
2099 void
2100 scsipi_async_event_channel_reset(chan)
2101 struct scsipi_channel *chan;
2102 {
2103 struct scsipi_xfer *xs, *xs_next;
2104 struct scsipi_periph *periph;
2105 int target, lun;
2106
2107 /*
2108 * Channel has been reset. Also mark as reset pending REQUEST_SENSE
2109 * commands; as the sense is not available any more.
2110 * can't call scsipi_done() from here, as the command has not been
2111 * sent to the adapter yet (this would corrupt accounting).
2112 */
2113
2114 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) {
2115 xs_next = TAILQ_NEXT(xs, channel_q);
2116 if (xs->xs_control & XS_CTL_REQSENSE) {
2117 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
2118 xs->error = XS_RESET;
2119 if ((xs->xs_control & XS_CTL_ASYNC) != 0)
2120 TAILQ_INSERT_TAIL(&chan->chan_complete, xs,
2121 channel_q);
2122 }
2123 }
2124 wakeup(&chan->chan_complete);
2125 /* Catch xs with pending sense which may not have a REQSENSE xs yet */
2126 for (target = 0; target < chan->chan_ntargets; target++) {
2127 if (target == chan->chan_id)
2128 continue;
2129 for (lun = 0; lun < chan->chan_nluns; lun++) {
2130 periph = chan->chan_periphs[target][lun];
2131 if (periph) {
2132 xs = periph->periph_xscheck;
2133 if (xs)
2134 xs->error = XS_RESET;
2135 }
2136 }
2137 }
2138 }
2139
2140
2141 /*
2142 * scsipi_adapter_addref:
2143 *
2144 * Add a reference to the adapter pointed to by the provided
2145 * link, enabling the adapter if necessary.
2146 */
2147 int
2148 scsipi_adapter_addref(adapt)
2149 struct scsipi_adapter *adapt;
2150 {
2151 int s, error = 0;
2152
2153 s = splbio();
2154 if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) {
2155 error = (*adapt->adapt_enable)(adapt->adapt_dev, 1);
2156 if (error)
2157 adapt->adapt_refcnt--;
2158 }
2159 splx(s);
2160 return (error);
2161 }
2162
2163 /*
2164 * scsipi_adapter_delref:
2165 *
2166 * Delete a reference to the adapter pointed to by the provided
2167 * link, disabling the adapter if possible.
2168 */
2169 void
2170 scsipi_adapter_delref(adapt)
2171 struct scsipi_adapter *adapt;
2172 {
2173 int s;
2174
2175 s = splbio();
2176 if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL)
2177 (void) (*adapt->adapt_enable)(adapt->adapt_dev, 0);
2178 splx(s);
2179 }
2180
2181 struct scsipi_syncparam {
2182 int ss_factor;
2183 int ss_period; /* ns * 10 */
2184 } scsipi_syncparams[] = {
2185 { 0x0a, 250 },
2186 { 0x0b, 303 },
2187 { 0x0c, 500 },
2188 };
2189 const int scsipi_nsyncparams =
2190 sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
2191
2192 int
2193 scsipi_sync_period_to_factor(period)
2194 int period; /* ns * 10 */
2195 {
2196 int i;
2197
2198 for (i = 0; i < scsipi_nsyncparams; i++) {
2199 if (period <= scsipi_syncparams[i].ss_period)
2200 return (scsipi_syncparams[i].ss_factor);
2201 }
2202
2203 return ((period / 10) / 4);
2204 }
2205
2206 int
2207 scsipi_sync_factor_to_period(factor)
2208 int factor;
2209 {
2210 int i;
2211
2212 for (i = 0; i < scsipi_nsyncparams; i++) {
2213 if (factor == scsipi_syncparams[i].ss_factor)
2214 return (scsipi_syncparams[i].ss_period);
2215 }
2216
2217 return ((factor * 4) * 10);
2218 }
2219
2220 int
2221 scsipi_sync_factor_to_freq(factor)
2222 int factor;
2223 {
2224 int i;
2225
2226 for (i = 0; i < scsipi_nsyncparams; i++) {
2227 if (factor == scsipi_syncparams[i].ss_factor)
2228 return (10000000 / scsipi_syncparams[i].ss_period);
2229 }
2230
2231 return (10000000 / ((factor * 4) * 10));
2232 }
2233
2234 #ifdef SCSIPI_DEBUG
2235 /*
2236 * Given a scsipi_xfer, dump the request, in all it's glory
2237 */
2238 void
2239 show_scsipi_xs(xs)
2240 struct scsipi_xfer *xs;
2241 {
2242
2243 printf("xs(%p): ", xs);
2244 printf("xs_control(0x%08x)", xs->xs_control);
2245 printf("xs_status(0x%08x)", xs->xs_status);
2246 printf("periph(%p)", xs->xs_periph);
2247 printf("retr(0x%x)", xs->xs_retries);
2248 printf("timo(0x%x)", xs->timeout);
2249 printf("cmd(%p)", xs->cmd);
2250 printf("len(0x%x)", xs->cmdlen);
2251 printf("data(%p)", xs->data);
2252 printf("len(0x%x)", xs->datalen);
2253 printf("res(0x%x)", xs->resid);
2254 printf("err(0x%x)", xs->error);
2255 printf("bp(%p)", xs->bp);
2256 show_scsipi_cmd(xs);
2257 }
2258
2259 void
2260 show_scsipi_cmd(xs)
2261 struct scsipi_xfer *xs;
2262 {
2263 u_char *b = (u_char *) xs->cmd;
2264 int i = 0;
2265
2266 scsipi_printaddr(xs->xs_periph);
2267 printf(" command: ");
2268
2269 if ((xs->xs_control & XS_CTL_RESET) == 0) {
2270 while (i < xs->cmdlen) {
2271 if (i)
2272 printf(",");
2273 printf("0x%x", b[i++]);
2274 }
2275 printf("-[%d bytes]\n", xs->datalen);
2276 if (xs->datalen)
2277 show_mem(xs->data, min(64, xs->datalen));
2278 } else
2279 printf("-RESET-\n");
2280 }
2281
2282 void
2283 show_mem(address, num)
2284 u_char *address;
2285 int num;
2286 {
2287 int x;
2288
2289 printf("------------------------------");
2290 for (x = 0; x < num; x++) {
2291 if ((x % 16) == 0)
2292 printf("\n%03d: ", x);
2293 printf("%02x ", *address++);
2294 }
2295 printf("\n------------------------------\n");
2296 }
2297 #endif /* SCSIPI_DEBUG */
2298