scsipi_base.c revision 1.40 1 /* $NetBSD: scsipi_base.c,v 1.40 2001/04/27 21:36:58 bouyer Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include "opt_scsi.h"
41
42 #include <sys/types.h>
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/buf.h>
47 #include <sys/uio.h>
48 #include <sys/malloc.h>
49 #include <sys/pool.h>
50 #include <sys/errno.h>
51 #include <sys/device.h>
52 #include <sys/proc.h>
53 #include <sys/kthread.h>
54
55 #include <dev/scsipi/scsipi_all.h>
56 #include <dev/scsipi/scsipi_disk.h>
57 #include <dev/scsipi/scsipiconf.h>
58 #include <dev/scsipi/scsipi_base.h>
59
60 #include <dev/scsipi/scsi_all.h>
61 #include <dev/scsipi/scsi_message.h>
62
63 int scsipi_complete __P((struct scsipi_xfer *));
64 void scsipi_request_sense __P((struct scsipi_xfer *));
65 int scsipi_enqueue __P((struct scsipi_xfer *));
66 void scsipi_run_queue __P((struct scsipi_channel *chan));
67
68 void scsipi_completion_thread __P((void *));
69
70 void scsipi_get_tag __P((struct scsipi_xfer *));
71 void scsipi_put_tag __P((struct scsipi_xfer *));
72
73 int scsipi_get_resource __P((struct scsipi_channel *));
74 void scsipi_put_resource __P((struct scsipi_channel *));
75 __inline int scsipi_grow_resources __P((struct scsipi_channel *));
76
77 void scsipi_async_event_max_openings __P((struct scsipi_channel *,
78 struct scsipi_max_openings *));
79 void scsipi_async_event_xfer_mode __P((struct scsipi_channel *,
80 struct scsipi_xfer_mode *));
81 void scsipi_async_event_channel_reset __P((struct scsipi_channel *));
82
83 struct pool scsipi_xfer_pool;
84
85 /*
86 * scsipi_init:
87 *
88 * Called when a scsibus or atapibus is attached to the system
89 * to initialize shared data structures.
90 */
91 void
92 scsipi_init()
93 {
94 static int scsipi_init_done;
95
96 if (scsipi_init_done)
97 return;
98 scsipi_init_done = 1;
99
100 /* Initialize the scsipi_xfer pool. */
101 pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
102 0, 0, "scxspl", 0, NULL, NULL, M_DEVBUF);
103 }
104
105 /*
106 * scsipi_channel_init:
107 *
108 * Initialize a scsipi_channel when it is attached.
109 */
110 int
111 scsipi_channel_init(chan)
112 struct scsipi_channel *chan;
113 {
114 size_t nbytes;
115 int i;
116
117 /* Initialize shared data. */
118 scsipi_init();
119
120 /* Initialize the queues. */
121 TAILQ_INIT(&chan->chan_queue);
122 TAILQ_INIT(&chan->chan_complete);
123
124 nbytes = chan->chan_ntargets * sizeof(struct scsipi_periph **);
125 chan->chan_periphs = malloc(nbytes, M_DEVBUF, M_NOWAIT);
126 if (chan->chan_periphs == NULL)
127 return (ENOMEM);
128
129
130 nbytes = chan->chan_nluns * sizeof(struct scsipi_periph *);
131 for (i = 0; i < chan->chan_ntargets; i++) {
132 chan->chan_periphs[i] = malloc(nbytes, M_DEVBUF, M_NOWAIT);
133 if (chan->chan_periphs[i] == NULL) {
134 while (--i >= 0) {
135 free(chan->chan_periphs[i], M_DEVBUF);
136 }
137 return (ENOMEM);
138 }
139 memset(chan->chan_periphs[i], 0, nbytes);
140 }
141
142 /*
143 * Create the asynchronous completion thread.
144 */
145 kthread_create(scsipi_create_completion_thread, chan);
146 return (0);
147 }
148
149 /*
150 * scsipi_channel_shutdown:
151 *
152 * Shutdown a scsipi_channel.
153 */
154 void
155 scsipi_channel_shutdown(chan)
156 struct scsipi_channel *chan;
157 {
158
159 /*
160 * Shut down the completion thread.
161 */
162 chan->chan_flags |= SCSIPI_CHAN_SHUTDOWN;
163 wakeup(&chan->chan_complete);
164
165 /*
166 * Now wait for the thread to exit.
167 */
168 while (chan->chan_thread != NULL)
169 (void) tsleep(&chan->chan_thread, PRIBIO, "scshut", 0);
170 }
171
172 /*
173 * scsipi_insert_periph:
174 *
175 * Insert a periph into the channel.
176 */
177 void
178 scsipi_insert_periph(chan, periph)
179 struct scsipi_channel *chan;
180 struct scsipi_periph *periph;
181 {
182 int s;
183
184 s = splbio();
185 chan->chan_periphs[periph->periph_target][periph->periph_lun] = periph;
186 splx(s);
187 }
188
189 /*
190 * scsipi_remove_periph:
191 *
192 * Remove a periph from the channel.
193 */
194 void
195 scsipi_remove_periph(chan, periph)
196 struct scsipi_channel *chan;
197 struct scsipi_periph *periph;
198 {
199 int s;
200
201 s = splbio();
202 chan->chan_periphs[periph->periph_target][periph->periph_lun] = NULL;
203 splx(s);
204 }
205
206 /*
207 * scsipi_lookup_periph:
208 *
209 * Lookup a periph on the specified channel.
210 */
211 struct scsipi_periph *
212 scsipi_lookup_periph(chan, target, lun)
213 struct scsipi_channel *chan;
214 int target, lun;
215 {
216 struct scsipi_periph *periph;
217 int s;
218
219 if (target >= chan->chan_ntargets ||
220 lun >= chan->chan_nluns)
221 return (NULL);
222
223 s = splbio();
224 periph = chan->chan_periphs[target][lun];
225 splx(s);
226
227 return (periph);
228 }
229
230 /*
231 * scsipi_get_resource:
232 *
233 * Allocate a single xfer `resource' from the channel.
234 *
235 * NOTE: Must be called at splbio().
236 */
237 int
238 scsipi_get_resource(chan)
239 struct scsipi_channel *chan;
240 {
241 struct scsipi_adapter *adapt = chan->chan_adapter;
242
243 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
244 if (chan->chan_openings > 0) {
245 chan->chan_openings--;
246 return (1);
247 }
248 return (0);
249 }
250
251 if (adapt->adapt_openings > 0) {
252 adapt->adapt_openings--;
253 return (1);
254 }
255 return (0);
256 }
257
258 /*
259 * scsipi_grow_resources:
260 *
261 * Attempt to grow resources for a channel. If this succeeds,
262 * we allocate one for our caller.
263 *
264 * NOTE: Must be called at splbio().
265 */
266 __inline int
267 scsipi_grow_resources(chan)
268 struct scsipi_channel *chan;
269 {
270
271 if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
272 scsipi_adapter_request(chan, ADAPTER_REQ_GROW_RESOURCES, NULL);
273 return (scsipi_get_resource(chan));
274 }
275
276 return (0);
277 }
278
279 /*
280 * scsipi_put_resource:
281 *
282 * Free a single xfer `resource' to the channel.
283 *
284 * NOTE: Must be called at splbio().
285 */
286 void
287 scsipi_put_resource(chan)
288 struct scsipi_channel *chan;
289 {
290 struct scsipi_adapter *adapt = chan->chan_adapter;
291
292 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
293 chan->chan_openings++;
294 else
295 adapt->adapt_openings++;
296 }
297
298 /*
299 * scsipi_get_tag:
300 *
301 * Get a tag ID for the specified xfer.
302 *
303 * NOTE: Must be called at splbio().
304 */
305 void
306 scsipi_get_tag(xs)
307 struct scsipi_xfer *xs;
308 {
309 struct scsipi_periph *periph = xs->xs_periph;
310 int word, bit, tag;
311
312 for (word = 0; word < PERIPH_NTAGWORDS; word++) {
313 bit = ffs(periph->periph_freetags[word]);
314 if (bit != 0)
315 break;
316 }
317 #ifdef DIAGNOSTIC
318 if (word == PERIPH_NTAGWORDS) {
319 scsipi_printaddr(periph);
320 printf("no free tags\n");
321 panic("scsipi_get_tag");
322 }
323 #endif
324
325 bit -= 1;
326 periph->periph_freetags[word] &= ~(1 << bit);
327 tag = (word << 5) | bit;
328
329 /* XXX Should eventually disallow this completely. */
330 if (tag >= periph->periph_openings) {
331 scsipi_printaddr(periph);
332 printf("WARNING: tag %d greater than available openings %d\n",
333 tag, periph->periph_openings);
334 }
335
336 xs->xs_tag_id = tag;
337 }
338
339 /*
340 * scsipi_put_tag:
341 *
342 * Put the tag ID for the specified xfer back into the pool.
343 *
344 * NOTE: Must be called at splbio().
345 */
346 void
347 scsipi_put_tag(xs)
348 struct scsipi_xfer *xs;
349 {
350 struct scsipi_periph *periph = xs->xs_periph;
351 int word, bit;
352
353 word = xs->xs_tag_id >> 5;
354 bit = xs->xs_tag_id & 0x1f;
355
356 periph->periph_freetags[word] |= (1 << bit);
357 }
358
359 /*
360 * scsipi_get_xs:
361 *
362 * Allocate an xfer descriptor and associate it with the
363 * specified peripherial. If the peripherial has no more
364 * available command openings, we either block waiting for
365 * one to become available, or fail.
366 */
367 struct scsipi_xfer *
368 scsipi_get_xs(periph, flags)
369 struct scsipi_periph *periph;
370 int flags;
371 {
372 struct scsipi_xfer *xs;
373 int s;
374
375 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n"));
376
377 /*
378 * If we're cold, make sure we poll.
379 */
380 if (cold)
381 flags |= XS_CTL_NOSLEEP | XS_CTL_POLL;
382
383 #ifdef DIAGNOSTIC
384 /*
385 * URGENT commands can never be ASYNC.
386 */
387 if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
388 (XS_CTL_URGENT|XS_CTL_ASYNC)) {
389 scsipi_printaddr(periph);
390 printf("URGENT and ASYNC\n");
391 panic("scsipi_get_xs");
392 }
393 #endif
394
395 s = splbio();
396 /*
397 * Wait for a command opening to become available. Rules:
398 *
399 * - All xfers must wait for an available opening.
400 * Exception: URGENT xfers can proceed when
401 * active == openings, because we use the opening
402 * of the command we're recovering for.
403 * - if the periph has sense pending, only URGENT & REQSENSE
404 * xfers may proceed.
405 *
406 * - If the periph is recovering, only URGENT xfers may
407 * proceed.
408 *
409 * - If the periph is currently executing a recovery
410 * command, URGENT commands must block, because only
411 * one recovery command can execute at a time.
412 */
413 for (;;) {
414 if (flags & XS_CTL_URGENT) {
415 if (periph->periph_active > periph->periph_openings)
416 goto wait_for_opening;
417 if (periph->periph_flags & PERIPH_SENSE) {
418 if ((flags & XS_CTL_REQSENSE) == 0)
419 goto wait_for_opening;
420 } else {
421 if ((periph->periph_flags &
422 PERIPH_RECOVERY_ACTIVE) != 0)
423 goto wait_for_opening;
424 periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
425 }
426 break;
427 }
428 if (periph->periph_active >= periph->periph_openings ||
429 (periph->periph_flags & PERIPH_RECOVERING) != 0)
430 goto wait_for_opening;
431 periph->periph_active++;
432 break;
433
434 wait_for_opening:
435 if (flags & XS_CTL_NOSLEEP) {
436 splx(s);
437 return (NULL);
438 }
439 SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n"));
440 periph->periph_flags |= PERIPH_WAITING;
441 (void) tsleep(periph, PRIBIO, "getxs", 0);
442 }
443 SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n"));
444 xs = pool_get(&scsipi_xfer_pool,
445 ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
446 if (xs == NULL) {
447 if (flags & XS_CTL_URGENT) {
448 if ((flags & XS_CTL_REQSENSE) == 0)
449 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
450 } else
451 periph->periph_active--;
452 scsipi_printaddr(periph);
453 printf("unable to allocate %sscsipi_xfer\n",
454 (flags & XS_CTL_URGENT) ? "URGENT " : "");
455 }
456 splx(s);
457
458 SC_DEBUG(periph, SCSIPI_DB3, ("returning\n"));
459
460 if (xs != NULL) {
461 callout_init(&xs->xs_callout);
462 memset(xs, 0, sizeof(*xs));
463 xs->xs_periph = periph;
464 xs->xs_control = flags;
465 xs->xs_status = 0;
466 s = splbio();
467 TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
468 splx(s);
469 }
470 return (xs);
471 }
472
473 /*
474 * scsipi_put_xs:
475 *
476 * Release an xfer descriptor, decreasing the outstanding command
477 * count for the peripherial. If there is a thread waiting for
478 * an opening, wake it up. If not, kick any queued I/O the
479 * peripherial may have.
480 *
481 * NOTE: Must be called at splbio().
482 */
483 void
484 scsipi_put_xs(xs)
485 struct scsipi_xfer *xs;
486 {
487 struct scsipi_periph *periph = xs->xs_periph;
488 int flags = xs->xs_control;
489
490 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n"));
491
492 TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
493 pool_put(&scsipi_xfer_pool, xs);
494
495 #ifdef DIAGNOSTIC
496 if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
497 periph->periph_active == 0) {
498 scsipi_printaddr(periph);
499 printf("recovery without a command to recovery for\n");
500 panic("scsipi_put_xs");
501 }
502 #endif
503
504 if (flags & XS_CTL_URGENT) {
505 if ((flags & XS_CTL_REQSENSE) == 0)
506 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
507 } else
508 periph->periph_active--;
509 if (periph->periph_active == 0 &&
510 (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
511 periph->periph_flags &= ~PERIPH_WAITDRAIN;
512 wakeup(&periph->periph_active);
513 }
514
515 if (periph->periph_flags & PERIPH_WAITING) {
516 periph->periph_flags &= ~PERIPH_WAITING;
517 wakeup(periph);
518 } else {
519 if (periph->periph_switch->psw_start != NULL) {
520 SC_DEBUG(periph, SCSIPI_DB2,
521 ("calling private start()\n"));
522 (*periph->periph_switch->psw_start)(periph);
523 }
524 }
525 }
526
527 /*
528 * scsipi_channel_freeze:
529 *
530 * Freeze a channel's xfer queue.
531 */
532 void
533 scsipi_channel_freeze(chan, count)
534 struct scsipi_channel *chan;
535 int count;
536 {
537 int s;
538
539 s = splbio();
540 chan->chan_qfreeze += count;
541 splx(s);
542 }
543
544 /*
545 * scsipi_channel_thaw:
546 *
547 * Thaw a channel's xfer queue.
548 */
549 void
550 scsipi_channel_thaw(chan, count)
551 struct scsipi_channel *chan;
552 int count;
553 {
554 int s;
555
556 s = splbio();
557 chan->chan_qfreeze -= count;
558 /*
559 * Don't let the freeze count go negative.
560 *
561 * Presumably the adapter driver could keep track of this,
562 * but it might just be easier to do this here so as to allow
563 * multiple callers, including those outside the adapter driver.
564 */
565 if (chan->chan_qfreeze < 0) {
566 chan->chan_qfreeze = 0;
567 }
568 splx(s);
569 }
570
571 /*
572 * scsipi_channel_timed_thaw:
573 *
574 * Thaw a channel after some time has expired.
575 */
576 void
577 scsipi_channel_timed_thaw(arg)
578 void *arg;
579 {
580 struct scsipi_channel *chan = arg;
581
582 scsipi_channel_thaw(chan, 1);
583
584 /*
585 * Kick the channel's queue here. Note, we're running in
586 * interrupt context (softclock), so the adapter driver
587 * had better not sleep.
588 */
589 scsipi_run_queue(chan);
590 }
591
592 /*
593 * scsipi_periph_freeze:
594 *
595 * Freeze a device's xfer queue.
596 */
597 void
598 scsipi_periph_freeze(periph, count)
599 struct scsipi_periph *periph;
600 int count;
601 {
602 int s;
603
604 s = splbio();
605 periph->periph_qfreeze += count;
606 splx(s);
607 }
608
609 /*
610 * scsipi_periph_thaw:
611 *
612 * Thaw a device's xfer queue.
613 */
614 void
615 scsipi_periph_thaw(periph, count)
616 struct scsipi_periph *periph;
617 int count;
618 {
619 int s;
620
621 s = splbio();
622 periph->periph_qfreeze -= count;
623 if (periph->periph_qfreeze == 0 &&
624 (periph->periph_flags & PERIPH_WAITING) != 0)
625 wakeup(periph);
626 splx(s);
627 }
628
629 /*
630 * scsipi_periph_timed_thaw:
631 *
632 * Thaw a device after some time has expired.
633 */
634 void
635 scsipi_periph_timed_thaw(arg)
636 void *arg;
637 {
638 struct scsipi_periph *periph = arg;
639
640 callout_stop(&periph->periph_callout);
641 scsipi_periph_thaw(periph, 1);
642
643 /*
644 * Kick the channel's queue here. Note, we're running in
645 * interrupt context (softclock), so the adapter driver
646 * had better not sleep.
647 */
648 scsipi_run_queue(periph->periph_channel);
649 }
650
651 /*
652 * scsipi_wait_drain:
653 *
654 * Wait for a periph's pending xfers to drain.
655 */
656 void
657 scsipi_wait_drain(periph)
658 struct scsipi_periph *periph;
659 {
660 int s;
661
662 s = splbio();
663 while (periph->periph_active != 0) {
664 periph->periph_flags |= PERIPH_WAITDRAIN;
665 (void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0);
666 }
667 splx(s);
668 }
669
670 /*
671 * scsipi_kill_pending:
672 *
673 * Kill off all pending xfers for a periph.
674 *
675 * NOTE: Must be called at splbio().
676 */
677 void
678 scsipi_kill_pending(periph)
679 struct scsipi_periph *periph;
680 {
681
682 (*periph->periph_channel->chan_bustype->bustype_kill_pending)(periph);
683 #ifdef DIAGNOSTIC
684 if (TAILQ_FIRST(&periph->periph_xferq) != NULL)
685 panic("scsipi_kill_pending");
686 #endif
687 scsipi_wait_drain(periph);
688 }
689
690 /*
691 * scsipi_interpret_sense:
692 *
693 * Look at the returned sense and act on the error, determining
694 * the unix error number to pass back. (0 = report no error)
695 *
696 * NOTE: If we return ERESTART, we are expected to haved
697 * thawed the device!
698 *
699 * THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
700 */
701 int
702 scsipi_interpret_sense(xs)
703 struct scsipi_xfer *xs;
704 {
705 struct scsipi_sense_data *sense;
706 struct scsipi_periph *periph = xs->xs_periph;
707 u_int8_t key;
708 u_int32_t info;
709 int error;
710 #ifndef SCSIVERBOSE
711 static char *error_mes[] = {
712 "soft error (corrected)",
713 "not ready", "medium error",
714 "non-media hardware failure", "illegal request",
715 "unit attention", "readonly device",
716 "no data found", "vendor unique",
717 "copy aborted", "command aborted",
718 "search returned equal", "volume overflow",
719 "verify miscompare", "unknown error key"
720 };
721 #endif
722
723 sense = &xs->sense.scsi_sense;
724 #ifdef SCSIPI_DEBUG
725 if (periph->periph_flags & SCSIPI_DB1) {
726 int count;
727 scsipi_printaddr(periph);
728 printf(" sense debug information:\n");
729 printf("\tcode 0x%x valid 0x%x\n",
730 sense->error_code & SSD_ERRCODE,
731 sense->error_code & SSD_ERRCODE_VALID ? 1 : 0);
732 printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
733 sense->segment,
734 sense->flags & SSD_KEY,
735 sense->flags & SSD_ILI ? 1 : 0,
736 sense->flags & SSD_EOM ? 1 : 0,
737 sense->flags & SSD_FILEMARK ? 1 : 0);
738 printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d "
739 "extra bytes\n",
740 sense->info[0],
741 sense->info[1],
742 sense->info[2],
743 sense->info[3],
744 sense->extra_len);
745 printf("\textra: ");
746 for (count = 0; count < ADD_BYTES_LIM(sense); count++)
747 printf("0x%x ", sense->cmd_spec_info[count]);
748 printf("\n");
749 }
750 #endif
751
752 /*
753 * If the periph has it's own error handler, call it first.
754 * If it returns a legit error value, return that, otherwise
755 * it wants us to continue with normal error processing.
756 */
757 if (periph->periph_switch->psw_error != NULL) {
758 SC_DEBUG(periph, SCSIPI_DB2,
759 ("calling private err_handler()\n"));
760 error = (*periph->periph_switch->psw_error)(xs);
761 if (error != EJUSTRETURN)
762 return (error);
763 }
764 /* otherwise use the default */
765 switch (sense->error_code & SSD_ERRCODE) {
766 /*
767 * If it's code 70, use the extended stuff and
768 * interpret the key
769 */
770 case 0x71: /* delayed error */
771 scsipi_printaddr(periph);
772 key = sense->flags & SSD_KEY;
773 printf(" DEFERRED ERROR, key = 0x%x\n", key);
774 /* FALLTHROUGH */
775 case 0x70:
776 if ((sense->error_code & SSD_ERRCODE_VALID) != 0)
777 info = _4btol(sense->info);
778 else
779 info = 0;
780 key = sense->flags & SSD_KEY;
781
782 switch (key) {
783 case SKEY_NO_SENSE:
784 case SKEY_RECOVERED_ERROR:
785 if (xs->resid == xs->datalen && xs->datalen) {
786 /*
787 * Why is this here?
788 */
789 xs->resid = 0; /* not short read */
790 }
791 case SKEY_EQUAL:
792 error = 0;
793 break;
794 case SKEY_NOT_READY:
795 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
796 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
797 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
798 return (0);
799 if (sense->add_sense_code == 0x3A &&
800 sense->add_sense_code_qual == 0x00)
801 error = ENODEV; /* Medium not present */
802 else
803 error = EIO;
804 if ((xs->xs_control & XS_CTL_SILENT) != 0)
805 return (error);
806 break;
807 case SKEY_ILLEGAL_REQUEST:
808 if ((xs->xs_control &
809 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
810 return (0);
811 /*
812 * Handle the case where a device reports
813 * Logical Unit Not Supported during discovery.
814 */
815 if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
816 sense->add_sense_code == 0x25 &&
817 sense->add_sense_code_qual == 0x00)
818 return (EINVAL);
819 if ((xs->xs_control & XS_CTL_SILENT) != 0)
820 return (EIO);
821 error = EINVAL;
822 break;
823 case SKEY_UNIT_ATTENTION:
824 if (sense->add_sense_code == 0x29 &&
825 sense->add_sense_code_qual == 0x00) {
826 /* device or bus reset */
827 return (ERESTART);
828 }
829 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
830 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
831 if ((xs->xs_control &
832 XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
833 /* XXX Should reupload any transient state. */
834 (periph->periph_flags &
835 PERIPH_REMOVABLE) == 0) {
836 return (ERESTART);
837 }
838 if ((xs->xs_control & XS_CTL_SILENT) != 0)
839 return (EIO);
840 error = EIO;
841 break;
842 case SKEY_WRITE_PROTECT:
843 error = EROFS;
844 break;
845 case SKEY_BLANK_CHECK:
846 error = 0;
847 break;
848 case SKEY_ABORTED_COMMAND:
849 error = ERESTART;
850 break;
851 case SKEY_VOLUME_OVERFLOW:
852 error = ENOSPC;
853 break;
854 default:
855 error = EIO;
856 break;
857 }
858
859 #ifdef SCSIVERBOSE
860 if (key && (xs->xs_control & XS_CTL_SILENT) == 0)
861 scsipi_print_sense(xs, 0);
862 #else
863 if (key) {
864 scsipi_printaddr(periph);
865 printf("%s", error_mes[key - 1]);
866 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
867 switch (key) {
868 case SKEY_NOT_READY:
869 case SKEY_ILLEGAL_REQUEST:
870 case SKEY_UNIT_ATTENTION:
871 case SKEY_WRITE_PROTECT:
872 break;
873 case SKEY_BLANK_CHECK:
874 printf(", requested size: %d (decimal)",
875 info);
876 break;
877 case SKEY_ABORTED_COMMAND:
878 if (xs->xs_retries)
879 printf(", retrying");
880 printf(", cmd 0x%x, info 0x%x",
881 xs->cmd->opcode, info);
882 break;
883 default:
884 printf(", info = %d (decimal)", info);
885 }
886 }
887 if (sense->extra_len != 0) {
888 int n;
889 printf(", data =");
890 for (n = 0; n < sense->extra_len; n++)
891 printf(" %02x",
892 sense->cmd_spec_info[n]);
893 }
894 printf("\n");
895 }
896 #endif
897 return (error);
898
899 /*
900 * Not code 70, just report it
901 */
902 default:
903 #if defined(SCSIDEBUG) || defined(DEBUG)
904 {
905 static char *uc = "undecodable sense error";
906 int i;
907 u_int8_t *cptr = (u_int8_t *) sense;
908 scsipi_printaddr(periph);
909 if (xs->cmd == &xs->cmdstore) {
910 printf("%s for opcode 0x%x, data=",
911 uc, xs->cmdstore.opcode);
912 } else {
913 printf("%s, data=", uc);
914 }
915 for (i = 0; i < sizeof (sense); i++)
916 printf(" 0x%02x", *(cptr++) & 0xff);
917 printf("\n");
918 }
919 #else
920
921 scsipi_printaddr(periph);
922 printf("Sense Error Code 0x%x",
923 sense->error_code & SSD_ERRCODE);
924 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
925 struct scsipi_sense_data_unextended *usense =
926 (struct scsipi_sense_data_unextended *)sense;
927 printf(" at block no. %d (decimal)",
928 _3btol(usense->block));
929 }
930 printf("\n");
931 #endif
932 return (EIO);
933 }
934 }
935
936 /*
937 * scsipi_size:
938 *
939 * Find out from the device what its capacity is.
940 */
941 u_long
942 scsipi_size(periph, flags)
943 struct scsipi_periph *periph;
944 int flags;
945 {
946 struct scsipi_read_cap_data rdcap;
947 struct scsipi_read_capacity scsipi_cmd;
948
949 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
950 scsipi_cmd.opcode = READ_CAPACITY;
951
952 /*
953 * If the command works, interpret the result as a 4 byte
954 * number of blocks
955 */
956 if (scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
957 sizeof(scsipi_cmd), (u_char *)&rdcap, sizeof(rdcap),
958 SCSIPIRETRIES, 20000, NULL,
959 flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK) != 0) {
960 scsipi_printaddr(periph);
961 printf("could not get size\n");
962 return (0);
963 }
964
965 return (_4btol(rdcap.addr) + 1);
966 }
967
968 /*
969 * scsipi_test_unit_ready:
970 *
971 * Issue a `test unit ready' request.
972 */
973 int
974 scsipi_test_unit_ready(periph, flags)
975 struct scsipi_periph *periph;
976 int flags;
977 {
978 struct scsipi_test_unit_ready scsipi_cmd;
979
980 /* some ATAPI drives don't support TEST_UNIT_READY. Sigh */
981 if (periph->periph_quirks & PQUIRK_NOTUR)
982 return (0);
983
984 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
985 scsipi_cmd.opcode = TEST_UNIT_READY;
986
987 return (scsipi_command(periph,
988 (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd),
989 0, 0, SCSIPIRETRIES, 10000, NULL, flags));
990 }
991
992 /*
993 * scsipi_inquire:
994 *
995 * Ask the device about itself.
996 */
997 int
998 scsipi_inquire(periph, inqbuf, flags)
999 struct scsipi_periph *periph;
1000 struct scsipi_inquiry_data *inqbuf;
1001 int flags;
1002 {
1003 struct scsipi_inquiry scsipi_cmd;
1004
1005 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
1006 scsipi_cmd.opcode = INQUIRY;
1007 scsipi_cmd.length = sizeof(struct scsipi_inquiry_data);
1008
1009 return (scsipi_command(periph,
1010 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1011 (u_char *) inqbuf, sizeof(struct scsipi_inquiry_data),
1012 SCSIPIRETRIES, 10000, NULL, XS_CTL_DATA_IN | flags));
1013 }
1014
1015 /*
1016 * scsipi_prevent:
1017 *
1018 * Prevent or allow the user to remove the media
1019 */
1020 int
1021 scsipi_prevent(periph, type, flags)
1022 struct scsipi_periph *periph;
1023 int type, flags;
1024 {
1025 struct scsipi_prevent scsipi_cmd;
1026
1027 if (periph->periph_quirks & PQUIRK_NODOORLOCK)
1028 return (0);
1029
1030 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
1031 scsipi_cmd.opcode = PREVENT_ALLOW;
1032 scsipi_cmd.how = type;
1033
1034 return (scsipi_command(periph,
1035 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1036 0, 0, SCSIPIRETRIES, 5000, NULL, flags));
1037 }
1038
1039 /*
1040 * scsipi_start:
1041 *
1042 * Send a START UNIT.
1043 */
1044 int
1045 scsipi_start(periph, type, flags)
1046 struct scsipi_periph *periph;
1047 int type, flags;
1048 {
1049 struct scsipi_start_stop scsipi_cmd;
1050
1051 if (periph->periph_quirks & PQUIRK_NOSTARTUNIT)
1052 return 0;
1053
1054 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
1055 scsipi_cmd.opcode = START_STOP;
1056 scsipi_cmd.byte2 = 0x00;
1057 scsipi_cmd.how = type;
1058
1059 return (scsipi_command(periph,
1060 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1061 0, 0, SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000,
1062 NULL, flags));
1063 }
1064
1065 /*
1066 * scsipi_done:
1067 *
1068 * This routine is called by an adapter's interrupt handler when
1069 * an xfer is completed.
1070 */
1071 void
1072 scsipi_done(xs)
1073 struct scsipi_xfer *xs;
1074 {
1075 struct scsipi_periph *periph = xs->xs_periph;
1076 struct scsipi_channel *chan = periph->periph_channel;
1077 int s, freezecnt;
1078
1079 SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n"));
1080 #ifdef SCSIPI_DEBUG
1081 if (periph->periph_dbflags & SCSIPI_DB1)
1082 show_scsipi_cmd(xs);
1083 #endif
1084
1085 s = splbio();
1086 /*
1087 * The resource this command was using is now free.
1088 */
1089 scsipi_put_resource(chan);
1090 xs->xs_periph->periph_sent--;
1091
1092 /*
1093 * If the command was tagged, free the tag.
1094 */
1095 if (XS_CTL_TAGTYPE(xs) != 0)
1096 scsipi_put_tag(xs);
1097 else
1098 periph->periph_flags &= ~PERIPH_UNTAG;
1099
1100 /* Mark the command as `done'. */
1101 xs->xs_status |= XS_STS_DONE;
1102
1103 #ifdef DIAGNOSTIC
1104 if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
1105 (XS_CTL_ASYNC|XS_CTL_POLL))
1106 panic("scsipi_done: ASYNC and POLL");
1107 #endif
1108
1109 /*
1110 * If the xfer had an error of any sort, freeze the
1111 * periph's queue. Freeze it again if we were requested
1112 * to do so in the xfer.
1113 */
1114 freezecnt = 0;
1115 if (xs->error != XS_NOERROR)
1116 freezecnt++;
1117 if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
1118 freezecnt++;
1119 if (freezecnt != 0)
1120 scsipi_periph_freeze(periph, freezecnt);
1121
1122 /*
1123 * record the xfer with a pending sense, in case a SCSI reset is
1124 * received before the thread is waked up.
1125 */
1126 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1127 periph->periph_flags |= PERIPH_SENSE;
1128 periph->periph_xscheck = xs;
1129 }
1130
1131 /*
1132 * If this was an xfer that was not to complete asynchrnously,
1133 * let the requesting thread perform error checking/handling
1134 * in its context.
1135 */
1136 if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
1137 splx(s);
1138 /*
1139 * If it's a polling job, just return, to unwind the
1140 * call graph. We don't need to restart the queue,
1141 * because pollings jobs are treated specially, and
1142 * are really only used during crash dumps anyway
1143 * (XXX or during boot-time autconfiguration of
1144 * ATAPI devices).
1145 */
1146 if (xs->xs_control & XS_CTL_POLL)
1147 return;
1148 wakeup(xs);
1149 goto out;
1150 }
1151
1152 /*
1153 * Catch the extremely common case of I/O completing
1154 * without error; no use in taking a context switch
1155 * if we can handle it in interrupt context.
1156 */
1157 if (xs->error == XS_NOERROR) {
1158 splx(s);
1159 (void) scsipi_complete(xs);
1160 goto out;
1161 }
1162
1163 /*
1164 * There is an error on this xfer. Put it on the channel's
1165 * completion queue, and wake up the completion thread.
1166 */
1167 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
1168 splx(s);
1169 wakeup(&chan->chan_complete);
1170
1171 out:
1172 /*
1173 * If there are more xfers on the channel's queue, attempt to
1174 * run them.
1175 */
1176 scsipi_run_queue(chan);
1177 }
1178
1179 /*
1180 * scsipi_complete:
1181 *
1182 * Completion of a scsipi_xfer. This is the guts of scsipi_done().
1183 *
1184 * NOTE: This routine MUST be called with valid thread context
1185 * except for the case where the following two conditions are
1186 * true:
1187 *
1188 * xs->error == XS_NOERROR
1189 * XS_CTL_ASYNC is set in xs->xs_control
1190 *
1191 * The semantics of this routine can be tricky, so here is an
1192 * explanation:
1193 *
1194 * 0 Xfer completed successfully.
1195 *
1196 * ERESTART Xfer had an error, but was restarted.
1197 *
1198 * anything else Xfer had an error, return value is Unix
1199 * errno.
1200 *
1201 * If the return value is anything but ERESTART:
1202 *
1203 * - If XS_CTL_ASYNC is set, `xs' has been freed back to
1204 * the pool.
1205 * - If there is a buf associated with the xfer,
1206 * it has been biodone()'d.
1207 */
1208 int
1209 scsipi_complete(xs)
1210 struct scsipi_xfer *xs;
1211 {
1212 struct scsipi_periph *periph = xs->xs_periph;
1213 struct scsipi_channel *chan = periph->periph_channel;
1214 struct buf *bp;
1215 int error, s;
1216
1217 #ifdef DIAGNOSTIC
1218 if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
1219 panic("scsipi_complete: XS_CTL_ASYNC but no buf");
1220 #endif
1221 /*
1222 * If command terminated with a CHECK CONDITION, we need to issue a
1223 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed
1224 * we'll have the real status.
1225 * Must be processed at splbio() to avoid missing a SCSI bus reset
1226 * for this command.
1227 */
1228 s = splbio();
1229 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1230 /* request sense for a request sense ? */
1231 if (xs->xs_control & XS_CTL_REQSENSE) {
1232 scsipi_printaddr(periph);
1233 /* XXX maybe we should reset the device ? */
1234 /* we've been frozen because xs->error != XS_NOERROR */
1235 scsipi_periph_thaw(periph, 1);
1236 splx(s);
1237 return EINVAL;
1238 }
1239 scsipi_request_sense(xs);
1240 }
1241 splx(s);
1242 /*
1243 * If it's a user level request, bypass all usual completion
1244 * processing, let the user work it out..
1245 */
1246 if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
1247 SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n"));
1248 if (xs->error != XS_NOERROR)
1249 scsipi_periph_thaw(periph, 1);
1250 scsipi_user_done(xs);
1251 SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n "));
1252 return 0;
1253 }
1254
1255
1256 switch (xs->error) {
1257 case XS_NOERROR:
1258 error = 0;
1259 break;
1260
1261 case XS_SENSE:
1262 case XS_SHORTSENSE:
1263 error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
1264 break;
1265
1266 case XS_RESOURCE_SHORTAGE:
1267 /*
1268 * XXX Should freeze channel's queue.
1269 */
1270 scsipi_printaddr(periph);
1271 printf("adapter resource shortage\n");
1272 /* FALLTHROUGH */
1273
1274 case XS_BUSY:
1275 if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
1276 struct scsipi_max_openings mo;
1277
1278 /*
1279 * We set the openings to active - 1, assuming that
1280 * the command that got us here is the first one that
1281 * can't fit into the device's queue. If that's not
1282 * the case, I guess we'll find out soon enough.
1283 */
1284 mo.mo_target = periph->periph_target;
1285 mo.mo_lun = periph->periph_lun;
1286 mo.mo_openings = periph->periph_active - 1;
1287 #ifdef DIAGNOSTIC
1288 if (mo.mo_openings < 0) {
1289 scsipi_printaddr(periph);
1290 printf("QUEUE FULL resulted in < 0 openings\n");
1291 panic("scsipi_done");
1292 }
1293 #endif
1294 if (mo.mo_openings == 0) {
1295 scsipi_printaddr(periph);
1296 printf("QUEUE FULL resulted in 0 openings\n");
1297 mo.mo_openings = 1;
1298 }
1299 scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
1300 error = ERESTART;
1301 } else if (xs->xs_retries != 0) {
1302 xs->xs_retries--;
1303 /*
1304 * Wait one second, and try again.
1305 */
1306 if (xs->xs_control & XS_CTL_POLL)
1307 delay(1000000);
1308 else {
1309 scsipi_periph_freeze(periph, 1);
1310 callout_reset(&periph->periph_callout,
1311 hz, scsipi_periph_timed_thaw, periph);
1312 }
1313 error = ERESTART;
1314 } else
1315 error = EBUSY;
1316 break;
1317
1318 case XS_REQUEUE:
1319 error = ERESTART;
1320 break;
1321
1322 case XS_TIMEOUT:
1323 if (xs->xs_retries != 0) {
1324 xs->xs_retries--;
1325 error = ERESTART;
1326 } else
1327 error = EIO;
1328 break;
1329
1330 case XS_SELTIMEOUT:
1331 /* XXX Disable device? */
1332 error = EIO;
1333 break;
1334
1335 case XS_RESET:
1336 if (xs->xs_control & XS_CTL_REQSENSE) {
1337 /*
1338 * request sense interrupted by reset: signal it
1339 * with EINTR return code.
1340 */
1341 error = EINTR;
1342 } else {
1343 if (xs->xs_retries != 0) {
1344 xs->xs_retries--;
1345 error = ERESTART;
1346 } else
1347 error = EIO;
1348 }
1349 break;
1350
1351 default:
1352 scsipi_printaddr(periph);
1353 printf("invalid return code from adapter: %d\n", xs->error);
1354 error = EIO;
1355 break;
1356 }
1357
1358 s = splbio();
1359 if (error == ERESTART) {
1360 /*
1361 * If we get here, the periph has been thawed and frozen
1362 * again if we had to issue recovery commands. Alternatively,
1363 * it may have been frozen again and in a timed thaw. In
1364 * any case, we thaw the periph once we re-enqueue the
1365 * command. Once the periph is fully thawed, it will begin
1366 * operation again.
1367 */
1368 xs->error = XS_NOERROR;
1369 xs->status = SCSI_OK;
1370 xs->xs_status &= ~XS_STS_DONE;
1371 xs->xs_requeuecnt++;
1372 error = scsipi_enqueue(xs);
1373 if (error == 0) {
1374 scsipi_periph_thaw(periph, 1);
1375 splx(s);
1376 return (ERESTART);
1377 }
1378 }
1379
1380 /*
1381 * scsipi_done() freezes the queue if not XS_NOERROR.
1382 * Thaw it here.
1383 */
1384 if (xs->error != XS_NOERROR)
1385 scsipi_periph_thaw(periph, 1);
1386
1387
1388 if (periph->periph_switch->psw_done)
1389 periph->periph_switch->psw_done(xs);
1390 if ((bp = xs->bp) != NULL) {
1391 if (error) {
1392 bp->b_error = error;
1393 bp->b_flags |= B_ERROR;
1394 bp->b_resid = bp->b_bcount;
1395 } else {
1396 bp->b_error = 0;
1397 bp->b_resid = xs->resid;
1398 }
1399 biodone(bp);
1400 }
1401
1402 if (xs->xs_control & XS_CTL_ASYNC)
1403 scsipi_put_xs(xs);
1404 splx(s);
1405
1406 return (error);
1407 }
1408
1409 /*
1410 * Issue a request sense for the given scsipi_xfer. Called when the xfer
1411 * returns with a CHECK_CONDITION status. Must be called in valid thread
1412 * context and at splbio().
1413 */
1414
1415 void
1416 scsipi_request_sense(xs)
1417 struct scsipi_xfer *xs;
1418 {
1419 struct scsipi_periph *periph = xs->xs_periph;
1420 int flags, error;
1421 struct scsipi_sense cmd;
1422
1423 periph->periph_flags |= PERIPH_SENSE;
1424
1425 /* if command was polling, request sense will too */
1426 flags = xs->xs_control & XS_CTL_POLL;
1427 /* Polling commands can't sleep */
1428 if (flags)
1429 flags |= XS_CTL_NOSLEEP;
1430
1431 flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN |
1432 XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH;
1433
1434 bzero(&cmd, sizeof(cmd));
1435 cmd.opcode = REQUEST_SENSE;
1436 cmd.length = sizeof(struct scsipi_sense_data);
1437
1438 error = scsipi_command(periph,
1439 (struct scsipi_generic *) &cmd, sizeof(cmd),
1440 (u_char*)&xs->sense.scsi_sense, sizeof(struct scsipi_sense_data),
1441 0, 1000, NULL, flags);
1442 periph->periph_flags &= ~PERIPH_SENSE;
1443 periph->periph_xscheck = NULL;
1444 switch(error) {
1445 case 0:
1446 /* we have a valid sense */
1447 xs->error = XS_SENSE;
1448 return;
1449 case EINTR:
1450 /* REQUEST_SENSE interrupted by bus reset. */
1451 xs->error = XS_RESET;
1452 return;
1453 case EIO:
1454 /* request sense coudn't be performed */
1455 /*
1456 * XXX this isn't quite rigth but we don't have anything
1457 * better for now
1458 */
1459 xs->error = XS_DRIVER_STUFFUP;
1460 return;
1461 default:
1462 /* Notify that request sense failed. */
1463 xs->error = XS_DRIVER_STUFFUP;
1464 scsipi_printaddr(periph);
1465 printf("request sense failed with error %d\n", error);
1466 return;
1467 }
1468 }
1469
1470 /*
1471 * scsipi_enqueue:
1472 *
1473 * Enqueue an xfer on a channel.
1474 */
1475 int
1476 scsipi_enqueue(xs)
1477 struct scsipi_xfer *xs;
1478 {
1479 struct scsipi_channel *chan = xs->xs_periph->periph_channel;
1480 struct scsipi_xfer *qxs;
1481 int s;
1482
1483 s = splbio();
1484
1485 /*
1486 * If the xfer is to be polled, and there are already jobs on
1487 * the queue, we can't proceed.
1488 */
1489 if ((xs->xs_control & XS_CTL_POLL) != 0 &&
1490 TAILQ_FIRST(&chan->chan_queue) != NULL) {
1491 splx(s);
1492 xs->error = XS_DRIVER_STUFFUP;
1493 return (EAGAIN);
1494 }
1495
1496 /*
1497 * If we have an URGENT xfer, it's an error recovery command
1498 * and it should just go on the head of the channel's queue.
1499 */
1500 if (xs->xs_control & XS_CTL_URGENT) {
1501 TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
1502 goto out;
1503 }
1504
1505 /*
1506 * If this xfer has already been on the queue before, we
1507 * need to reinsert it in the correct order. That order is:
1508 *
1509 * Immediately before the first xfer for this periph
1510 * with a requeuecnt less than xs->xs_requeuecnt.
1511 *
1512 * Failing that, at the end of the queue. (We'll end up
1513 * there naturally.)
1514 */
1515 if (xs->xs_requeuecnt != 0) {
1516 for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
1517 qxs = TAILQ_NEXT(qxs, channel_q)) {
1518 if (qxs->xs_periph == xs->xs_periph &&
1519 qxs->xs_requeuecnt < xs->xs_requeuecnt)
1520 break;
1521 }
1522 if (qxs != NULL) {
1523 TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
1524 channel_q);
1525 goto out;
1526 }
1527 }
1528 TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
1529 out:
1530 if (xs->xs_control & XS_CTL_THAW_PERIPH)
1531 scsipi_periph_thaw(xs->xs_periph, 1);
1532 splx(s);
1533 return (0);
1534 }
1535
1536 /*
1537 * scsipi_run_queue:
1538 *
1539 * Start as many xfers as possible running on the channel.
1540 */
1541 void
1542 scsipi_run_queue(chan)
1543 struct scsipi_channel *chan;
1544 {
1545 struct scsipi_xfer *xs;
1546 struct scsipi_periph *periph;
1547 int s;
1548
1549 for (;;) {
1550 s = splbio();
1551
1552 /*
1553 * If the channel is frozen, we can't do any work right
1554 * now.
1555 */
1556 if (chan->chan_qfreeze != 0) {
1557 splx(s);
1558 return;
1559 }
1560
1561 /*
1562 * Look for work to do, and make sure we can do it.
1563 */
1564 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
1565 xs = TAILQ_NEXT(xs, channel_q)) {
1566 periph = xs->xs_periph;
1567
1568 if ((periph->periph_sent >= periph->periph_openings) ||
1569 periph->periph_qfreeze != 0 ||
1570 (periph->periph_flags & PERIPH_UNTAG) != 0)
1571 continue;
1572
1573 if ((periph->periph_flags &
1574 (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 &&
1575 (xs->xs_control & XS_CTL_URGENT) == 0)
1576 continue;
1577
1578 /*
1579 * We can issue this xfer!
1580 */
1581 goto got_one;
1582 }
1583
1584 /*
1585 * Can't find any work to do right now.
1586 */
1587 splx(s);
1588 return;
1589
1590 got_one:
1591 /*
1592 * Have an xfer to run. Allocate a resource from
1593 * the adapter to run it. If we can't allocate that
1594 * resource, we don't dequeue the xfer.
1595 */
1596 if (scsipi_get_resource(chan) == 0) {
1597 /*
1598 * Adapter is out of resources. If the adapter
1599 * supports it, attempt to grow them.
1600 */
1601 if (scsipi_grow_resources(chan) == 0) {
1602 /*
1603 * Wasn't able to grow resources,
1604 * nothing more we can do.
1605 */
1606 if (xs->xs_control & XS_CTL_POLL) {
1607 scsipi_printaddr(xs->xs_periph);
1608 printf("polling command but no "
1609 "adapter resources");
1610 /* We'll panic shortly... */
1611 }
1612 splx(s);
1613
1614 /*
1615 * XXX: We should be able to note that
1616 * XXX: that resources are needed here!
1617 */
1618 return;
1619 }
1620 /*
1621 * scsipi_grow_resources() allocated the resource
1622 * for us.
1623 */
1624 }
1625
1626 /*
1627 * We have a resource to run this xfer, do it!
1628 */
1629 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
1630
1631 /*
1632 * If the command is to be tagged, allocate a tag ID
1633 * for it.
1634 */
1635 if (XS_CTL_TAGTYPE(xs) != 0)
1636 scsipi_get_tag(xs);
1637 else
1638 periph->periph_flags |= PERIPH_UNTAG;
1639 periph->periph_sent++;
1640 splx(s);
1641
1642 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
1643 }
1644 #ifdef DIAGNOSTIC
1645 panic("scsipi_run_queue: impossible");
1646 #endif
1647 }
1648
1649 /*
1650 * scsipi_execute_xs:
1651 *
1652 * Begin execution of an xfer, waiting for it to complete, if necessary.
1653 */
1654 int
1655 scsipi_execute_xs(xs)
1656 struct scsipi_xfer *xs;
1657 {
1658 struct scsipi_periph *periph = xs->xs_periph;
1659 struct scsipi_channel *chan = periph->periph_channel;
1660 int async, poll, retries, error, s;
1661
1662 xs->xs_status &= ~XS_STS_DONE;
1663 xs->error = XS_NOERROR;
1664 xs->resid = xs->datalen;
1665 xs->status = SCSI_OK;
1666
1667 #ifdef SCSIPI_DEBUG
1668 if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) {
1669 printf("scsipi_execute_xs: ");
1670 show_scsipi_xs(xs);
1671 printf("\n");
1672 }
1673 #endif
1674
1675 /*
1676 * Deal with command tagging:
1677 *
1678 * - If the device's current operating mode doesn't
1679 * include tagged queueing, clear the tag mask.
1680 *
1681 * - If the device's current operating mode *does*
1682 * include tagged queueing, set the tag_type in
1683 * the xfer to the appropriate byte for the tag
1684 * message.
1685 */
1686 if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 ||
1687 (xs->xs_control & XS_CTL_REQSENSE)) {
1688 xs->xs_control &= ~XS_CTL_TAGMASK;
1689 xs->xs_tag_type = 0;
1690 } else {
1691 /*
1692 * If the request doesn't specify a tag, give Head
1693 * tags to URGENT operations and Ordered tags to
1694 * everything else.
1695 */
1696 if (XS_CTL_TAGTYPE(xs) == 0) {
1697 if (xs->xs_control & XS_CTL_URGENT)
1698 xs->xs_control |= XS_CTL_HEAD_TAG;
1699 else
1700 xs->xs_control |= XS_CTL_ORDERED_TAG;
1701 }
1702
1703 switch (XS_CTL_TAGTYPE(xs)) {
1704 case XS_CTL_ORDERED_TAG:
1705 xs->xs_tag_type = MSG_ORDERED_Q_TAG;
1706 break;
1707
1708 case XS_CTL_SIMPLE_TAG:
1709 xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
1710 break;
1711
1712 case XS_CTL_HEAD_TAG:
1713 xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
1714 break;
1715
1716 default:
1717 scsipi_printaddr(periph);
1718 printf("invalid tag mask 0x%08x\n",
1719 XS_CTL_TAGTYPE(xs));
1720 panic("scsipi_execute_xs");
1721 }
1722 }
1723
1724 /*
1725 * If we don't yet have a completion thread, or we are to poll for
1726 * completion, clear the ASYNC flag.
1727 */
1728 if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
1729 xs->xs_control &= ~XS_CTL_ASYNC;
1730
1731 async = (xs->xs_control & XS_CTL_ASYNC);
1732 poll = (xs->xs_control & XS_CTL_POLL);
1733 retries = xs->xs_retries; /* for polling commands */
1734
1735 #ifdef DIAGNOSTIC
1736 if (async != 0 && xs->bp == NULL)
1737 panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
1738 #endif
1739
1740 /*
1741 * Enqueue the transfer. If we're not polling for completion, this
1742 * should ALWAYS return `no error'.
1743 */
1744 try_again:
1745 error = scsipi_enqueue(xs);
1746 if (error) {
1747 if (poll == 0) {
1748 scsipi_printaddr(periph);
1749 printf("not polling, but enqueue failed with %d\n",
1750 error);
1751 panic("scsipi_execute_xs");
1752 }
1753
1754 scsipi_printaddr(periph);
1755 printf("failed to enqueue polling command");
1756 if (retries != 0) {
1757 printf(", retrying...\n");
1758 delay(1000000);
1759 retries--;
1760 goto try_again;
1761 }
1762 printf("\n");
1763 goto free_xs;
1764 }
1765
1766 restarted:
1767 scsipi_run_queue(chan);
1768
1769 /*
1770 * The xfer is enqueued, and possibly running. If it's to be
1771 * completed asynchronously, just return now.
1772 */
1773 if (async)
1774 return (EJUSTRETURN);
1775
1776 /*
1777 * Not an asynchronous command; wait for it to complete.
1778 */
1779 s = splbio();
1780 while ((xs->xs_status & XS_STS_DONE) == 0) {
1781 if (poll) {
1782 scsipi_printaddr(periph);
1783 printf("polling command not done\n");
1784 panic("scsipi_execute_xs");
1785 }
1786 (void) tsleep(xs, PRIBIO, "xscmd", 0);
1787 }
1788 splx(s);
1789
1790 /*
1791 * Command is complete. scsipi_done() has awakened us to perform
1792 * the error handling.
1793 */
1794 error = scsipi_complete(xs);
1795 if (error == ERESTART)
1796 goto restarted;
1797
1798 /*
1799 * Command completed successfully or fatal error occurred. Fall
1800 * into....
1801 */
1802 free_xs:
1803 s = splbio();
1804 scsipi_put_xs(xs);
1805 splx(s);
1806
1807 /*
1808 * Kick the queue, keep it running in case it stopped for some
1809 * reason.
1810 */
1811 scsipi_run_queue(chan);
1812
1813 return (error);
1814 }
1815
1816 /*
1817 * scsipi_completion_thread:
1818 *
1819 * This is the completion thread. We wait for errors on
1820 * asynchronous xfers, and perform the error handling
1821 * function, restarting the command, if necessary.
1822 */
1823 void
1824 scsipi_completion_thread(arg)
1825 void *arg;
1826 {
1827 struct scsipi_channel *chan = arg;
1828 struct scsipi_xfer *xs;
1829 int s;
1830
1831 for (;;) {
1832 s = splbio();
1833 xs = TAILQ_FIRST(&chan->chan_complete);
1834 if (xs == NULL &&
1835 (chan->chan_flags & SCSIPI_CHAN_SHUTDOWN) == 0) {
1836 (void) tsleep(&chan->chan_complete, PRIBIO,
1837 "sccomp", 0);
1838 splx(s);
1839 continue;
1840 }
1841 if (chan->chan_flags & SCSIPI_CHAN_SHUTDOWN) {
1842 splx(s);
1843 break;
1844 }
1845 TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
1846 splx(s);
1847
1848 /*
1849 * Have an xfer with an error; process it.
1850 */
1851 (void) scsipi_complete(xs);
1852
1853 /*
1854 * Kick the queue; keep it running if it was stopped
1855 * for some reason.
1856 */
1857 scsipi_run_queue(chan);
1858 }
1859
1860 chan->chan_thread = NULL;
1861
1862 /* In case parent is waiting for us to exit. */
1863 wakeup(&chan->chan_thread);
1864
1865 kthread_exit(0);
1866 }
1867
1868 /*
1869 * scsipi_create_completion_thread:
1870 *
1871 * Callback to actually create the completion thread.
1872 */
1873 void
1874 scsipi_create_completion_thread(arg)
1875 void *arg;
1876 {
1877 struct scsipi_channel *chan = arg;
1878 struct scsipi_adapter *adapt = chan->chan_adapter;
1879
1880 if (kthread_create1(scsipi_completion_thread, chan,
1881 &chan->chan_thread, "%s:%d", adapt->adapt_dev->dv_xname,
1882 chan->chan_channel)) {
1883 printf("%s: unable to create completion thread for "
1884 "channel %d\n", adapt->adapt_dev->dv_xname,
1885 chan->chan_channel);
1886 panic("scsipi_create_completion_thread");
1887 }
1888 }
1889
1890 /*
1891 * scsipi_async_event:
1892 *
1893 * Handle an asynchronous event from an adapter.
1894 */
1895 void
1896 scsipi_async_event(chan, event, arg)
1897 struct scsipi_channel *chan;
1898 scsipi_async_event_t event;
1899 void *arg;
1900 {
1901 int s;
1902
1903 s = splbio();
1904 switch (event) {
1905 case ASYNC_EVENT_MAX_OPENINGS:
1906 scsipi_async_event_max_openings(chan,
1907 (struct scsipi_max_openings *)arg);
1908 break;
1909
1910 case ASYNC_EVENT_XFER_MODE:
1911 scsipi_async_event_xfer_mode(chan,
1912 (struct scsipi_xfer_mode *)arg);
1913 break;
1914 case ASYNC_EVENT_RESET:
1915 scsipi_async_event_channel_reset(chan);
1916 break;
1917 }
1918 splx(s);
1919 }
1920
1921 /*
1922 * scsipi_print_xfer_mode:
1923 *
1924 * Print a periph's capabilities.
1925 */
1926 void
1927 scsipi_print_xfer_mode(periph)
1928 struct scsipi_periph *periph;
1929 {
1930 int period, freq, speed, mbs;
1931
1932 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0)
1933 return;
1934
1935 printf("%s: ", periph->periph_dev->dv_xname);
1936 if (periph->periph_mode & PERIPH_CAP_SYNC) {
1937 period = scsipi_sync_factor_to_period(periph->periph_period);
1938 printf("Sync (%d.%dns offset %d)",
1939 period / 10, period % 10, periph->periph_offset);
1940 } else
1941 printf("Async");
1942
1943 if (periph->periph_mode & PERIPH_CAP_WIDE32)
1944 printf(", 32-bit");
1945 else if (periph->periph_mode & PERIPH_CAP_WIDE16)
1946 printf(", 16-bit");
1947 else
1948 printf(", 8-bit");
1949
1950 if (periph->periph_mode & PERIPH_CAP_SYNC) {
1951 freq = scsipi_sync_factor_to_freq(periph->periph_period);
1952 speed = freq;
1953 if (periph->periph_mode & PERIPH_CAP_WIDE32)
1954 speed *= 4;
1955 else if (periph->periph_mode & PERIPH_CAP_WIDE16)
1956 speed *= 2;
1957 mbs = speed / 1000;
1958 if (mbs > 0)
1959 printf(" (%d.%03dMB/s)", mbs, speed % 1000);
1960 else
1961 printf(" (%dKB/s)", speed % 1000);
1962 }
1963
1964 printf(" transfers");
1965
1966 if (periph->periph_mode & PERIPH_CAP_TQING)
1967 printf(", tagged queueing");
1968
1969 printf("\n");
1970 }
1971
1972 /*
1973 * scsipi_async_event_max_openings:
1974 *
1975 * Update the maximum number of outstanding commands a
1976 * device may have.
1977 */
1978 void
1979 scsipi_async_event_max_openings(chan, mo)
1980 struct scsipi_channel *chan;
1981 struct scsipi_max_openings *mo;
1982 {
1983 struct scsipi_periph *periph;
1984 int minlun, maxlun;
1985
1986 if (mo->mo_lun == -1) {
1987 /*
1988 * Wildcarded; apply it to all LUNs.
1989 */
1990 minlun = 0;
1991 maxlun = chan->chan_nluns - 1;
1992 } else
1993 minlun = maxlun = mo->mo_lun;
1994
1995 for (; minlun <= maxlun; minlun++) {
1996 periph = scsipi_lookup_periph(chan, mo->mo_target, minlun);
1997 if (periph == NULL)
1998 continue;
1999
2000 if (mo->mo_openings < periph->periph_openings)
2001 periph->periph_openings = mo->mo_openings;
2002 else if (mo->mo_openings > periph->periph_openings &&
2003 (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
2004 periph->periph_openings = mo->mo_openings;
2005 }
2006 }
2007
2008 /*
2009 * scsipi_async_event_xfer_mode:
2010 *
2011 * Update the xfer mode for all periphs sharing the
2012 * specified I_T Nexus.
2013 */
2014 void
2015 scsipi_async_event_xfer_mode(chan, xm)
2016 struct scsipi_channel *chan;
2017 struct scsipi_xfer_mode *xm;
2018 {
2019 struct scsipi_periph *periph;
2020 int lun, announce, mode, period, offset;
2021
2022 for (lun = 0; lun < chan->chan_nluns; lun++) {
2023 periph = scsipi_lookup_periph(chan, xm->xm_target, lun);
2024 if (periph == NULL)
2025 continue;
2026 announce = 0;
2027
2028 /*
2029 * Clamp the xfer mode down to this periph's capabilities.
2030 */
2031 mode = xm->xm_mode & periph->periph_cap;
2032 if (mode & PERIPH_CAP_SYNC) {
2033 period = xm->xm_period;
2034 offset = xm->xm_offset;
2035 } else {
2036 period = 0;
2037 offset = 0;
2038 }
2039
2040 /*
2041 * If we do not have a valid xfer mode yet, or the parameters
2042 * are different, announce them.
2043 */
2044 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0 ||
2045 periph->periph_mode != mode ||
2046 periph->periph_period != period ||
2047 periph->periph_offset != offset)
2048 announce = 1;
2049
2050 periph->periph_mode = mode;
2051 periph->periph_period = period;
2052 periph->periph_offset = offset;
2053 periph->periph_flags |= PERIPH_MODE_VALID;
2054
2055 if (announce)
2056 scsipi_print_xfer_mode(periph);
2057 }
2058 }
2059
2060 /*
2061 * scsipi_set_xfer_mode:
2062 *
2063 * Set the xfer mode for the specified I_T Nexus.
2064 */
2065 void
2066 scsipi_set_xfer_mode(chan, target, immed)
2067 struct scsipi_channel *chan;
2068 int target, immed;
2069 {
2070 struct scsipi_xfer_mode xm;
2071 struct scsipi_periph *itperiph;
2072 int lun, s;
2073
2074 /*
2075 * Go to the minimal xfer mode.
2076 */
2077 xm.xm_target = target;
2078 xm.xm_mode = 0;
2079 xm.xm_period = 0; /* ignored */
2080 xm.xm_offset = 0; /* ignored */
2081
2082 /*
2083 * Find the first LUN we know about on this I_T Nexus.
2084 */
2085 for (lun = 0; lun < chan->chan_nluns; lun++) {
2086 itperiph = scsipi_lookup_periph(chan, target, lun);
2087 if (itperiph != NULL)
2088 break;
2089 }
2090 if (itperiph != NULL)
2091 xm.xm_mode = itperiph->periph_cap;
2092
2093 /*
2094 * Now issue the request to the adapter.
2095 */
2096 s = splbio();
2097 scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm);
2098 splx(s);
2099
2100 /*
2101 * If we want this to happen immediately, issue a dummy command,
2102 * since most adapters can't really negotiate unless they're
2103 * executing a job.
2104 */
2105 if (immed != 0 && itperiph != NULL) {
2106 (void) scsipi_test_unit_ready(itperiph,
2107 XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
2108 XS_CTL_IGNORE_NOT_READY |
2109 XS_CTL_IGNORE_MEDIA_CHANGE);
2110 }
2111 }
2112
2113 /*
2114 * scsipi_channel_reset:
2115 *
2116 * handle scsi bus reset
2117 * called at splbio
2118 */
2119 void
2120 scsipi_async_event_channel_reset(chan)
2121 struct scsipi_channel *chan;
2122 {
2123 struct scsipi_xfer *xs, *xs_next;
2124 struct scsipi_periph *periph;
2125 int target, lun;
2126
2127 /*
2128 * Channel has been reset. Also mark as reset pending REQUEST_SENSE
2129 * commands; as the sense is not available any more.
2130 * can't call scsipi_done() from here, as the command has not been
2131 * sent to the adapter yet (this would corrupt accounting).
2132 */
2133
2134 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) {
2135 xs_next = TAILQ_NEXT(xs, channel_q);
2136 if (xs->xs_control & XS_CTL_REQSENSE) {
2137 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
2138 xs->error = XS_RESET;
2139 if ((xs->xs_control & XS_CTL_ASYNC) != 0)
2140 TAILQ_INSERT_TAIL(&chan->chan_complete, xs,
2141 channel_q);
2142 }
2143 }
2144 wakeup(&chan->chan_complete);
2145 /* Catch xs with pending sense which may not have a REQSENSE xs yet */
2146 for (target = 0; target < chan->chan_ntargets; target++) {
2147 if (target == chan->chan_id)
2148 continue;
2149 for (lun = 0; lun < chan->chan_nluns; lun++) {
2150 periph = chan->chan_periphs[target][lun];
2151 if (periph) {
2152 xs = periph->periph_xscheck;
2153 if (xs)
2154 xs->error = XS_RESET;
2155 }
2156 }
2157 }
2158 }
2159
2160
2161 /*
2162 * scsipi_adapter_addref:
2163 *
2164 * Add a reference to the adapter pointed to by the provided
2165 * link, enabling the adapter if necessary.
2166 */
2167 int
2168 scsipi_adapter_addref(adapt)
2169 struct scsipi_adapter *adapt;
2170 {
2171 int s, error = 0;
2172
2173 s = splbio();
2174 if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) {
2175 error = (*adapt->adapt_enable)(adapt->adapt_dev, 1);
2176 if (error)
2177 adapt->adapt_refcnt--;
2178 }
2179 splx(s);
2180 return (error);
2181 }
2182
2183 /*
2184 * scsipi_adapter_delref:
2185 *
2186 * Delete a reference to the adapter pointed to by the provided
2187 * link, disabling the adapter if possible.
2188 */
2189 void
2190 scsipi_adapter_delref(adapt)
2191 struct scsipi_adapter *adapt;
2192 {
2193 int s;
2194
2195 s = splbio();
2196 if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL)
2197 (void) (*adapt->adapt_enable)(adapt->adapt_dev, 0);
2198 splx(s);
2199 }
2200
2201 struct scsipi_syncparam {
2202 int ss_factor;
2203 int ss_period; /* ns * 10 */
2204 } scsipi_syncparams[] = {
2205 { 0x0a, 250 },
2206 { 0x0b, 303 },
2207 { 0x0c, 500 },
2208 };
2209 const int scsipi_nsyncparams =
2210 sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
2211
2212 int
2213 scsipi_sync_period_to_factor(period)
2214 int period; /* ns * 10 */
2215 {
2216 int i;
2217
2218 for (i = 0; i < scsipi_nsyncparams; i++) {
2219 if (period <= scsipi_syncparams[i].ss_period)
2220 return (scsipi_syncparams[i].ss_factor);
2221 }
2222
2223 return ((period / 10) / 4);
2224 }
2225
2226 int
2227 scsipi_sync_factor_to_period(factor)
2228 int factor;
2229 {
2230 int i;
2231
2232 for (i = 0; i < scsipi_nsyncparams; i++) {
2233 if (factor == scsipi_syncparams[i].ss_factor)
2234 return (scsipi_syncparams[i].ss_period);
2235 }
2236
2237 return ((factor * 4) * 10);
2238 }
2239
2240 int
2241 scsipi_sync_factor_to_freq(factor)
2242 int factor;
2243 {
2244 int i;
2245
2246 for (i = 0; i < scsipi_nsyncparams; i++) {
2247 if (factor == scsipi_syncparams[i].ss_factor)
2248 return (10000000 / scsipi_syncparams[i].ss_period);
2249 }
2250
2251 return (10000000 / ((factor * 4) * 10));
2252 }
2253
2254 #ifdef SCSIPI_DEBUG
2255 /*
2256 * Given a scsipi_xfer, dump the request, in all it's glory
2257 */
2258 void
2259 show_scsipi_xs(xs)
2260 struct scsipi_xfer *xs;
2261 {
2262
2263 printf("xs(%p): ", xs);
2264 printf("xs_control(0x%08x)", xs->xs_control);
2265 printf("xs_status(0x%08x)", xs->xs_status);
2266 printf("periph(%p)", xs->xs_periph);
2267 printf("retr(0x%x)", xs->xs_retries);
2268 printf("timo(0x%x)", xs->timeout);
2269 printf("cmd(%p)", xs->cmd);
2270 printf("len(0x%x)", xs->cmdlen);
2271 printf("data(%p)", xs->data);
2272 printf("len(0x%x)", xs->datalen);
2273 printf("res(0x%x)", xs->resid);
2274 printf("err(0x%x)", xs->error);
2275 printf("bp(%p)", xs->bp);
2276 show_scsipi_cmd(xs);
2277 }
2278
2279 void
2280 show_scsipi_cmd(xs)
2281 struct scsipi_xfer *xs;
2282 {
2283 u_char *b = (u_char *) xs->cmd;
2284 int i = 0;
2285
2286 scsipi_printaddr(xs->xs_periph);
2287 printf(" command: ");
2288
2289 if ((xs->xs_control & XS_CTL_RESET) == 0) {
2290 while (i < xs->cmdlen) {
2291 if (i)
2292 printf(",");
2293 printf("0x%x", b[i++]);
2294 }
2295 printf("-[%d bytes]\n", xs->datalen);
2296 if (xs->datalen)
2297 show_mem(xs->data, min(64, xs->datalen));
2298 } else
2299 printf("-RESET-\n");
2300 }
2301
2302 void
2303 show_mem(address, num)
2304 u_char *address;
2305 int num;
2306 {
2307 int x;
2308
2309 printf("------------------------------");
2310 for (x = 0; x < num; x++) {
2311 if ((x % 16) == 0)
2312 printf("\n%03d: ", x);
2313 printf("%02x ", *address++);
2314 }
2315 printf("\n------------------------------\n");
2316 }
2317 #endif /* SCSIPI_DEBUG */
2318