scsipi_base.c revision 1.43 1 /* $NetBSD: scsipi_base.c,v 1.43 2001/05/18 16:25:07 enami Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include "opt_scsi.h"
41
42 #include <sys/types.h>
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/buf.h>
47 #include <sys/uio.h>
48 #include <sys/malloc.h>
49 #include <sys/pool.h>
50 #include <sys/errno.h>
51 #include <sys/device.h>
52 #include <sys/proc.h>
53 #include <sys/kthread.h>
54
55 #include <dev/scsipi/scsipi_all.h>
56 #include <dev/scsipi/scsipi_disk.h>
57 #include <dev/scsipi/scsipiconf.h>
58 #include <dev/scsipi/scsipi_base.h>
59
60 #include <dev/scsipi/scsi_all.h>
61 #include <dev/scsipi/scsi_message.h>
62
63 int scsipi_complete __P((struct scsipi_xfer *));
64 void scsipi_request_sense __P((struct scsipi_xfer *));
65 int scsipi_enqueue __P((struct scsipi_xfer *));
66 void scsipi_run_queue __P((struct scsipi_channel *chan));
67
68 void scsipi_completion_thread __P((void *));
69
70 void scsipi_get_tag __P((struct scsipi_xfer *));
71 void scsipi_put_tag __P((struct scsipi_xfer *));
72
73 int scsipi_get_resource __P((struct scsipi_channel *));
74 void scsipi_put_resource __P((struct scsipi_channel *));
75 __inline int scsipi_grow_resources __P((struct scsipi_channel *));
76
77 void scsipi_async_event_max_openings __P((struct scsipi_channel *,
78 struct scsipi_max_openings *));
79 void scsipi_async_event_xfer_mode __P((struct scsipi_channel *,
80 struct scsipi_xfer_mode *));
81 void scsipi_async_event_channel_reset __P((struct scsipi_channel *));
82
83 struct pool scsipi_xfer_pool;
84
85 /*
86 * scsipi_init:
87 *
88 * Called when a scsibus or atapibus is attached to the system
89 * to initialize shared data structures.
90 */
91 void
92 scsipi_init()
93 {
94 static int scsipi_init_done;
95
96 if (scsipi_init_done)
97 return;
98 scsipi_init_done = 1;
99
100 /* Initialize the scsipi_xfer pool. */
101 pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
102 0, 0, "scxspl", 0, NULL, NULL, M_DEVBUF);
103 }
104
105 /*
106 * scsipi_channel_init:
107 *
108 * Initialize a scsipi_channel when it is attached.
109 */
110 int
111 scsipi_channel_init(chan)
112 struct scsipi_channel *chan;
113 {
114 size_t nbytes;
115 int i;
116
117 /* Initialize shared data. */
118 scsipi_init();
119
120 /* Initialize the queues. */
121 TAILQ_INIT(&chan->chan_queue);
122 TAILQ_INIT(&chan->chan_complete);
123
124 nbytes = chan->chan_ntargets * sizeof(struct scsipi_periph **);
125 chan->chan_periphs = malloc(nbytes, M_DEVBUF, M_NOWAIT);
126 if (chan->chan_periphs == NULL)
127 return (ENOMEM);
128
129
130 nbytes = chan->chan_nluns * sizeof(struct scsipi_periph *);
131 for (i = 0; i < chan->chan_ntargets; i++) {
132 chan->chan_periphs[i] = malloc(nbytes, M_DEVBUF, M_NOWAIT);
133 if (chan->chan_periphs[i] == NULL) {
134 while (--i >= 0) {
135 free(chan->chan_periphs[i], M_DEVBUF);
136 }
137 return (ENOMEM);
138 }
139 memset(chan->chan_periphs[i], 0, nbytes);
140 }
141
142 /*
143 * Create the asynchronous completion thread.
144 */
145 kthread_create(scsipi_create_completion_thread, chan);
146 return (0);
147 }
148
149 /*
150 * scsipi_channel_shutdown:
151 *
152 * Shutdown a scsipi_channel.
153 */
154 void
155 scsipi_channel_shutdown(chan)
156 struct scsipi_channel *chan;
157 {
158
159 /*
160 * Shut down the completion thread.
161 */
162 chan->chan_flags |= SCSIPI_CHAN_SHUTDOWN;
163 wakeup(&chan->chan_complete);
164
165 /*
166 * Now wait for the thread to exit.
167 */
168 while (chan->chan_thread != NULL)
169 (void) tsleep(&chan->chan_thread, PRIBIO, "scshut", 0);
170 }
171
172 /*
173 * scsipi_insert_periph:
174 *
175 * Insert a periph into the channel.
176 */
177 void
178 scsipi_insert_periph(chan, periph)
179 struct scsipi_channel *chan;
180 struct scsipi_periph *periph;
181 {
182 int s;
183
184 s = splbio();
185 chan->chan_periphs[periph->periph_target][periph->periph_lun] = periph;
186 splx(s);
187 }
188
189 /*
190 * scsipi_remove_periph:
191 *
192 * Remove a periph from the channel.
193 */
194 void
195 scsipi_remove_periph(chan, periph)
196 struct scsipi_channel *chan;
197 struct scsipi_periph *periph;
198 {
199 int s;
200
201 s = splbio();
202 chan->chan_periphs[periph->periph_target][periph->periph_lun] = NULL;
203 splx(s);
204 }
205
206 /*
207 * scsipi_lookup_periph:
208 *
209 * Lookup a periph on the specified channel.
210 */
211 struct scsipi_periph *
212 scsipi_lookup_periph(chan, target, lun)
213 struct scsipi_channel *chan;
214 int target, lun;
215 {
216 struct scsipi_periph *periph;
217 int s;
218
219 if (target >= chan->chan_ntargets ||
220 lun >= chan->chan_nluns)
221 return (NULL);
222
223 s = splbio();
224 periph = chan->chan_periphs[target][lun];
225 splx(s);
226
227 return (periph);
228 }
229
230 /*
231 * scsipi_get_resource:
232 *
233 * Allocate a single xfer `resource' from the channel.
234 *
235 * NOTE: Must be called at splbio().
236 */
237 int
238 scsipi_get_resource(chan)
239 struct scsipi_channel *chan;
240 {
241 struct scsipi_adapter *adapt = chan->chan_adapter;
242
243 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
244 if (chan->chan_openings > 0) {
245 chan->chan_openings--;
246 return (1);
247 }
248 return (0);
249 }
250
251 if (adapt->adapt_openings > 0) {
252 adapt->adapt_openings--;
253 return (1);
254 }
255 return (0);
256 }
257
258 /*
259 * scsipi_grow_resources:
260 *
261 * Attempt to grow resources for a channel. If this succeeds,
262 * we allocate one for our caller.
263 *
264 * NOTE: Must be called at splbio().
265 */
266 __inline int
267 scsipi_grow_resources(chan)
268 struct scsipi_channel *chan;
269 {
270
271 if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
272 scsipi_adapter_request(chan, ADAPTER_REQ_GROW_RESOURCES, NULL);
273 return (scsipi_get_resource(chan));
274 }
275
276 return (0);
277 }
278
279 /*
280 * scsipi_put_resource:
281 *
282 * Free a single xfer `resource' to the channel.
283 *
284 * NOTE: Must be called at splbio().
285 */
286 void
287 scsipi_put_resource(chan)
288 struct scsipi_channel *chan;
289 {
290 struct scsipi_adapter *adapt = chan->chan_adapter;
291
292 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
293 chan->chan_openings++;
294 else
295 adapt->adapt_openings++;
296 }
297
298 /*
299 * scsipi_get_tag:
300 *
301 * Get a tag ID for the specified xfer.
302 *
303 * NOTE: Must be called at splbio().
304 */
305 void
306 scsipi_get_tag(xs)
307 struct scsipi_xfer *xs;
308 {
309 struct scsipi_periph *periph = xs->xs_periph;
310 int word, bit, tag;
311
312 for (word = 0; word < PERIPH_NTAGWORDS; word++) {
313 bit = ffs(periph->periph_freetags[word]);
314 if (bit != 0)
315 break;
316 }
317 #ifdef DIAGNOSTIC
318 if (word == PERIPH_NTAGWORDS) {
319 scsipi_printaddr(periph);
320 printf("no free tags\n");
321 panic("scsipi_get_tag");
322 }
323 #endif
324
325 bit -= 1;
326 periph->periph_freetags[word] &= ~(1 << bit);
327 tag = (word << 5) | bit;
328
329 /* XXX Should eventually disallow this completely. */
330 if (tag >= periph->periph_openings) {
331 scsipi_printaddr(periph);
332 printf("WARNING: tag %d greater than available openings %d\n",
333 tag, periph->periph_openings);
334 }
335
336 xs->xs_tag_id = tag;
337 }
338
339 /*
340 * scsipi_put_tag:
341 *
342 * Put the tag ID for the specified xfer back into the pool.
343 *
344 * NOTE: Must be called at splbio().
345 */
346 void
347 scsipi_put_tag(xs)
348 struct scsipi_xfer *xs;
349 {
350 struct scsipi_periph *periph = xs->xs_periph;
351 int word, bit;
352
353 word = xs->xs_tag_id >> 5;
354 bit = xs->xs_tag_id & 0x1f;
355
356 periph->periph_freetags[word] |= (1 << bit);
357 }
358
359 /*
360 * scsipi_get_xs:
361 *
362 * Allocate an xfer descriptor and associate it with the
363 * specified peripherial. If the peripherial has no more
364 * available command openings, we either block waiting for
365 * one to become available, or fail.
366 */
367 struct scsipi_xfer *
368 scsipi_get_xs(periph, flags)
369 struct scsipi_periph *periph;
370 int flags;
371 {
372 struct scsipi_xfer *xs;
373 int s;
374
375 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n"));
376
377 /*
378 * If we're cold, make sure we poll.
379 */
380 if (cold)
381 flags |= XS_CTL_NOSLEEP | XS_CTL_POLL;
382
383 #ifdef DIAGNOSTIC
384 /*
385 * URGENT commands can never be ASYNC.
386 */
387 if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
388 (XS_CTL_URGENT|XS_CTL_ASYNC)) {
389 scsipi_printaddr(periph);
390 printf("URGENT and ASYNC\n");
391 panic("scsipi_get_xs");
392 }
393 #endif
394
395 s = splbio();
396 /*
397 * Wait for a command opening to become available. Rules:
398 *
399 * - All xfers must wait for an available opening.
400 * Exception: URGENT xfers can proceed when
401 * active == openings, because we use the opening
402 * of the command we're recovering for.
403 * - if the periph has sense pending, only URGENT & REQSENSE
404 * xfers may proceed.
405 *
406 * - If the periph is recovering, only URGENT xfers may
407 * proceed.
408 *
409 * - If the periph is currently executing a recovery
410 * command, URGENT commands must block, because only
411 * one recovery command can execute at a time.
412 */
413 for (;;) {
414 if (flags & XS_CTL_URGENT) {
415 if (periph->periph_active > periph->periph_openings)
416 goto wait_for_opening;
417 if (periph->periph_flags & PERIPH_SENSE) {
418 if ((flags & XS_CTL_REQSENSE) == 0)
419 goto wait_for_opening;
420 } else {
421 if ((periph->periph_flags &
422 PERIPH_RECOVERY_ACTIVE) != 0)
423 goto wait_for_opening;
424 periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
425 }
426 break;
427 }
428 if (periph->periph_active >= periph->periph_openings ||
429 (periph->periph_flags & PERIPH_RECOVERING) != 0)
430 goto wait_for_opening;
431 periph->periph_active++;
432 break;
433
434 wait_for_opening:
435 if (flags & XS_CTL_NOSLEEP) {
436 splx(s);
437 return (NULL);
438 }
439 SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n"));
440 periph->periph_flags |= PERIPH_WAITING;
441 (void) tsleep(periph, PRIBIO, "getxs", 0);
442 }
443 SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n"));
444 xs = pool_get(&scsipi_xfer_pool,
445 ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
446 if (xs == NULL) {
447 if (flags & XS_CTL_URGENT) {
448 if ((flags & XS_CTL_REQSENSE) == 0)
449 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
450 } else
451 periph->periph_active--;
452 scsipi_printaddr(periph);
453 printf("unable to allocate %sscsipi_xfer\n",
454 (flags & XS_CTL_URGENT) ? "URGENT " : "");
455 }
456 splx(s);
457
458 SC_DEBUG(periph, SCSIPI_DB3, ("returning\n"));
459
460 if (xs != NULL) {
461 callout_init(&xs->xs_callout);
462 memset(xs, 0, sizeof(*xs));
463 xs->xs_periph = periph;
464 xs->xs_control = flags;
465 xs->xs_status = 0;
466 s = splbio();
467 TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
468 splx(s);
469 }
470 return (xs);
471 }
472
473 /*
474 * scsipi_put_xs:
475 *
476 * Release an xfer descriptor, decreasing the outstanding command
477 * count for the peripherial. If there is a thread waiting for
478 * an opening, wake it up. If not, kick any queued I/O the
479 * peripherial may have.
480 *
481 * NOTE: Must be called at splbio().
482 */
483 void
484 scsipi_put_xs(xs)
485 struct scsipi_xfer *xs;
486 {
487 struct scsipi_periph *periph = xs->xs_periph;
488 int flags = xs->xs_control;
489
490 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n"));
491
492 TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
493 pool_put(&scsipi_xfer_pool, xs);
494
495 #ifdef DIAGNOSTIC
496 if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
497 periph->periph_active == 0) {
498 scsipi_printaddr(periph);
499 printf("recovery without a command to recovery for\n");
500 panic("scsipi_put_xs");
501 }
502 #endif
503
504 if (flags & XS_CTL_URGENT) {
505 if ((flags & XS_CTL_REQSENSE) == 0)
506 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
507 } else
508 periph->periph_active--;
509 if (periph->periph_active == 0 &&
510 (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
511 periph->periph_flags &= ~PERIPH_WAITDRAIN;
512 wakeup(&periph->periph_active);
513 }
514
515 if (periph->periph_flags & PERIPH_WAITING) {
516 periph->periph_flags &= ~PERIPH_WAITING;
517 wakeup(periph);
518 } else {
519 if (periph->periph_switch->psw_start != NULL) {
520 SC_DEBUG(periph, SCSIPI_DB2,
521 ("calling private start()\n"));
522 (*periph->periph_switch->psw_start)(periph);
523 }
524 }
525 }
526
527 /*
528 * scsipi_channel_freeze:
529 *
530 * Freeze a channel's xfer queue.
531 */
532 void
533 scsipi_channel_freeze(chan, count)
534 struct scsipi_channel *chan;
535 int count;
536 {
537 int s;
538
539 s = splbio();
540 chan->chan_qfreeze += count;
541 splx(s);
542 }
543
544 /*
545 * scsipi_channel_thaw:
546 *
547 * Thaw a channel's xfer queue.
548 */
549 void
550 scsipi_channel_thaw(chan, count)
551 struct scsipi_channel *chan;
552 int count;
553 {
554 int s;
555
556 s = splbio();
557 chan->chan_qfreeze -= count;
558 /*
559 * Don't let the freeze count go negative.
560 *
561 * Presumably the adapter driver could keep track of this,
562 * but it might just be easier to do this here so as to allow
563 * multiple callers, including those outside the adapter driver.
564 */
565 if (chan->chan_qfreeze < 0) {
566 chan->chan_qfreeze = 0;
567 }
568 splx(s);
569 }
570
571 /*
572 * scsipi_channel_timed_thaw:
573 *
574 * Thaw a channel after some time has expired.
575 */
576 void
577 scsipi_channel_timed_thaw(arg)
578 void *arg;
579 {
580 struct scsipi_channel *chan = arg;
581
582 scsipi_channel_thaw(chan, 1);
583
584 /*
585 * Kick the channel's queue here. Note, we're running in
586 * interrupt context (softclock), so the adapter driver
587 * had better not sleep.
588 */
589 scsipi_run_queue(chan);
590 }
591
592 /*
593 * scsipi_periph_freeze:
594 *
595 * Freeze a device's xfer queue.
596 */
597 void
598 scsipi_periph_freeze(periph, count)
599 struct scsipi_periph *periph;
600 int count;
601 {
602 int s;
603
604 s = splbio();
605 periph->periph_qfreeze += count;
606 splx(s);
607 }
608
609 /*
610 * scsipi_periph_thaw:
611 *
612 * Thaw a device's xfer queue.
613 */
614 void
615 scsipi_periph_thaw(periph, count)
616 struct scsipi_periph *periph;
617 int count;
618 {
619 int s;
620
621 s = splbio();
622 periph->periph_qfreeze -= count;
623 if (periph->periph_qfreeze == 0 &&
624 (periph->periph_flags & PERIPH_WAITING) != 0)
625 wakeup(periph);
626 splx(s);
627 }
628
629 /*
630 * scsipi_periph_timed_thaw:
631 *
632 * Thaw a device after some time has expired.
633 */
634 void
635 scsipi_periph_timed_thaw(arg)
636 void *arg;
637 {
638 struct scsipi_periph *periph = arg;
639
640 callout_stop(&periph->periph_callout);
641 scsipi_periph_thaw(periph, 1);
642
643 /*
644 * Kick the channel's queue here. Note, we're running in
645 * interrupt context (softclock), so the adapter driver
646 * had better not sleep.
647 */
648 scsipi_run_queue(periph->periph_channel);
649 }
650
651 /*
652 * scsipi_wait_drain:
653 *
654 * Wait for a periph's pending xfers to drain.
655 */
656 void
657 scsipi_wait_drain(periph)
658 struct scsipi_periph *periph;
659 {
660 int s;
661
662 s = splbio();
663 while (periph->periph_active != 0) {
664 periph->periph_flags |= PERIPH_WAITDRAIN;
665 (void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0);
666 }
667 splx(s);
668 }
669
670 /*
671 * scsipi_kill_pending:
672 *
673 * Kill off all pending xfers for a periph.
674 *
675 * NOTE: Must be called at splbio().
676 */
677 void
678 scsipi_kill_pending(periph)
679 struct scsipi_periph *periph;
680 {
681
682 (*periph->periph_channel->chan_bustype->bustype_kill_pending)(periph);
683 #ifdef DIAGNOSTIC
684 if (TAILQ_FIRST(&periph->periph_xferq) != NULL)
685 panic("scsipi_kill_pending");
686 #endif
687 scsipi_wait_drain(periph);
688 }
689
690 /*
691 * scsipi_interpret_sense:
692 *
693 * Look at the returned sense and act on the error, determining
694 * the unix error number to pass back. (0 = report no error)
695 *
696 * NOTE: If we return ERESTART, we are expected to haved
697 * thawed the device!
698 *
699 * THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
700 */
701 int
702 scsipi_interpret_sense(xs)
703 struct scsipi_xfer *xs;
704 {
705 struct scsipi_sense_data *sense;
706 struct scsipi_periph *periph = xs->xs_periph;
707 u_int8_t key;
708 u_int32_t info;
709 int error;
710 #ifndef SCSIVERBOSE
711 static char *error_mes[] = {
712 "soft error (corrected)",
713 "not ready", "medium error",
714 "non-media hardware failure", "illegal request",
715 "unit attention", "readonly device",
716 "no data found", "vendor unique",
717 "copy aborted", "command aborted",
718 "search returned equal", "volume overflow",
719 "verify miscompare", "unknown error key"
720 };
721 #endif
722
723 sense = &xs->sense.scsi_sense;
724 #ifdef SCSIPI_DEBUG
725 if (periph->periph_flags & SCSIPI_DB1) {
726 int count;
727 scsipi_printaddr(periph);
728 printf(" sense debug information:\n");
729 printf("\tcode 0x%x valid 0x%x\n",
730 sense->error_code & SSD_ERRCODE,
731 sense->error_code & SSD_ERRCODE_VALID ? 1 : 0);
732 printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
733 sense->segment,
734 sense->flags & SSD_KEY,
735 sense->flags & SSD_ILI ? 1 : 0,
736 sense->flags & SSD_EOM ? 1 : 0,
737 sense->flags & SSD_FILEMARK ? 1 : 0);
738 printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d "
739 "extra bytes\n",
740 sense->info[0],
741 sense->info[1],
742 sense->info[2],
743 sense->info[3],
744 sense->extra_len);
745 printf("\textra: ");
746 for (count = 0; count < ADD_BYTES_LIM(sense); count++)
747 printf("0x%x ", sense->cmd_spec_info[count]);
748 printf("\n");
749 }
750 #endif
751
752 /*
753 * If the periph has it's own error handler, call it first.
754 * If it returns a legit error value, return that, otherwise
755 * it wants us to continue with normal error processing.
756 */
757 if (periph->periph_switch->psw_error != NULL) {
758 SC_DEBUG(periph, SCSIPI_DB2,
759 ("calling private err_handler()\n"));
760 error = (*periph->periph_switch->psw_error)(xs);
761 if (error != EJUSTRETURN)
762 return (error);
763 }
764 /* otherwise use the default */
765 switch (sense->error_code & SSD_ERRCODE) {
766 /*
767 * If it's code 70, use the extended stuff and
768 * interpret the key
769 */
770 case 0x71: /* delayed error */
771 scsipi_printaddr(periph);
772 key = sense->flags & SSD_KEY;
773 printf(" DEFERRED ERROR, key = 0x%x\n", key);
774 /* FALLTHROUGH */
775 case 0x70:
776 if ((sense->error_code & SSD_ERRCODE_VALID) != 0)
777 info = _4btol(sense->info);
778 else
779 info = 0;
780 key = sense->flags & SSD_KEY;
781
782 switch (key) {
783 case SKEY_NO_SENSE:
784 case SKEY_RECOVERED_ERROR:
785 if (xs->resid == xs->datalen && xs->datalen) {
786 /*
787 * Why is this here?
788 */
789 xs->resid = 0; /* not short read */
790 }
791 case SKEY_EQUAL:
792 error = 0;
793 break;
794 case SKEY_NOT_READY:
795 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
796 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
797 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
798 return (0);
799 if (sense->add_sense_code == 0x3A &&
800 sense->add_sense_code_qual == 0x00)
801 error = ENODEV; /* Medium not present */
802 else
803 error = EIO;
804 if ((xs->xs_control & XS_CTL_SILENT) != 0)
805 return (error);
806 break;
807 case SKEY_ILLEGAL_REQUEST:
808 if ((xs->xs_control &
809 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
810 return (0);
811 /*
812 * Handle the case where a device reports
813 * Logical Unit Not Supported during discovery.
814 */
815 if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
816 sense->add_sense_code == 0x25 &&
817 sense->add_sense_code_qual == 0x00)
818 return (EINVAL);
819 if ((xs->xs_control & XS_CTL_SILENT) != 0)
820 return (EIO);
821 error = EINVAL;
822 break;
823 case SKEY_UNIT_ATTENTION:
824 if (sense->add_sense_code == 0x29 &&
825 sense->add_sense_code_qual == 0x00) {
826 /* device or bus reset */
827 return (ERESTART);
828 }
829 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
830 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
831 if ((xs->xs_control &
832 XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
833 /* XXX Should reupload any transient state. */
834 (periph->periph_flags &
835 PERIPH_REMOVABLE) == 0) {
836 return (ERESTART);
837 }
838 if ((xs->xs_control & XS_CTL_SILENT) != 0)
839 return (EIO);
840 error = EIO;
841 break;
842 case SKEY_WRITE_PROTECT:
843 error = EROFS;
844 break;
845 case SKEY_BLANK_CHECK:
846 error = 0;
847 break;
848 case SKEY_ABORTED_COMMAND:
849 error = ERESTART;
850 break;
851 case SKEY_VOLUME_OVERFLOW:
852 error = ENOSPC;
853 break;
854 default:
855 error = EIO;
856 break;
857 }
858
859 #ifdef SCSIVERBOSE
860 if (key && (xs->xs_control & XS_CTL_SILENT) == 0)
861 scsipi_print_sense(xs, 0);
862 #else
863 if (key) {
864 scsipi_printaddr(periph);
865 printf("%s", error_mes[key - 1]);
866 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
867 switch (key) {
868 case SKEY_NOT_READY:
869 case SKEY_ILLEGAL_REQUEST:
870 case SKEY_UNIT_ATTENTION:
871 case SKEY_WRITE_PROTECT:
872 break;
873 case SKEY_BLANK_CHECK:
874 printf(", requested size: %d (decimal)",
875 info);
876 break;
877 case SKEY_ABORTED_COMMAND:
878 if (xs->xs_retries)
879 printf(", retrying");
880 printf(", cmd 0x%x, info 0x%x",
881 xs->cmd->opcode, info);
882 break;
883 default:
884 printf(", info = %d (decimal)", info);
885 }
886 }
887 if (sense->extra_len != 0) {
888 int n;
889 printf(", data =");
890 for (n = 0; n < sense->extra_len; n++)
891 printf(" %02x",
892 sense->cmd_spec_info[n]);
893 }
894 printf("\n");
895 }
896 #endif
897 return (error);
898
899 /*
900 * Not code 70, just report it
901 */
902 default:
903 #if defined(SCSIDEBUG) || defined(DEBUG)
904 {
905 static char *uc = "undecodable sense error";
906 int i;
907 u_int8_t *cptr = (u_int8_t *) sense;
908 scsipi_printaddr(periph);
909 if (xs->cmd == &xs->cmdstore) {
910 printf("%s for opcode 0x%x, data=",
911 uc, xs->cmdstore.opcode);
912 } else {
913 printf("%s, data=", uc);
914 }
915 for (i = 0; i < sizeof (sense); i++)
916 printf(" 0x%02x", *(cptr++) & 0xff);
917 printf("\n");
918 }
919 #else
920
921 scsipi_printaddr(periph);
922 printf("Sense Error Code 0x%x",
923 sense->error_code & SSD_ERRCODE);
924 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
925 struct scsipi_sense_data_unextended *usense =
926 (struct scsipi_sense_data_unextended *)sense;
927 printf(" at block no. %d (decimal)",
928 _3btol(usense->block));
929 }
930 printf("\n");
931 #endif
932 return (EIO);
933 }
934 }
935
936 /*
937 * scsipi_size:
938 *
939 * Find out from the device what its capacity is.
940 */
941 u_long
942 scsipi_size(periph, flags)
943 struct scsipi_periph *periph;
944 int flags;
945 {
946 struct scsipi_read_cap_data rdcap;
947 struct scsipi_read_capacity scsipi_cmd;
948
949 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
950 scsipi_cmd.opcode = READ_CAPACITY;
951
952 /*
953 * If the command works, interpret the result as a 4 byte
954 * number of blocks
955 */
956 if (scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
957 sizeof(scsipi_cmd), (u_char *)&rdcap, sizeof(rdcap),
958 SCSIPIRETRIES, 20000, NULL,
959 flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK) != 0) {
960 scsipi_printaddr(periph);
961 printf("could not get size\n");
962 return (0);
963 }
964
965 return (_4btol(rdcap.addr) + 1);
966 }
967
968 /*
969 * scsipi_test_unit_ready:
970 *
971 * Issue a `test unit ready' request.
972 */
973 int
974 scsipi_test_unit_ready(periph, flags)
975 struct scsipi_periph *periph;
976 int flags;
977 {
978 struct scsipi_test_unit_ready scsipi_cmd;
979
980 /* some ATAPI drives don't support TEST_UNIT_READY. Sigh */
981 if (periph->periph_quirks & PQUIRK_NOTUR)
982 return (0);
983
984 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
985 scsipi_cmd.opcode = TEST_UNIT_READY;
986
987 return (scsipi_command(periph,
988 (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd),
989 0, 0, SCSIPIRETRIES, 10000, NULL, flags));
990 }
991
992 /*
993 * scsipi_inquire:
994 *
995 * Ask the device about itself.
996 */
997 int
998 scsipi_inquire(periph, inqbuf, flags)
999 struct scsipi_periph *periph;
1000 struct scsipi_inquiry_data *inqbuf;
1001 int flags;
1002 {
1003 struct scsipi_inquiry scsipi_cmd;
1004
1005 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
1006 scsipi_cmd.opcode = INQUIRY;
1007 scsipi_cmd.length = sizeof(struct scsipi_inquiry_data);
1008
1009 return (scsipi_command(periph,
1010 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1011 (u_char *) inqbuf, sizeof(struct scsipi_inquiry_data),
1012 SCSIPIRETRIES, 10000, NULL, XS_CTL_DATA_IN | flags));
1013 }
1014
1015 /*
1016 * scsipi_prevent:
1017 *
1018 * Prevent or allow the user to remove the media
1019 */
1020 int
1021 scsipi_prevent(periph, type, flags)
1022 struct scsipi_periph *periph;
1023 int type, flags;
1024 {
1025 struct scsipi_prevent scsipi_cmd;
1026
1027 if (periph->periph_quirks & PQUIRK_NODOORLOCK)
1028 return (0);
1029
1030 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
1031 scsipi_cmd.opcode = PREVENT_ALLOW;
1032 scsipi_cmd.how = type;
1033
1034 return (scsipi_command(periph,
1035 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1036 0, 0, SCSIPIRETRIES, 5000, NULL, flags));
1037 }
1038
1039 /*
1040 * scsipi_start:
1041 *
1042 * Send a START UNIT.
1043 */
1044 int
1045 scsipi_start(periph, type, flags)
1046 struct scsipi_periph *periph;
1047 int type, flags;
1048 {
1049 struct scsipi_start_stop scsipi_cmd;
1050
1051 if (periph->periph_quirks & PQUIRK_NOSTARTUNIT)
1052 return 0;
1053
1054 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
1055 scsipi_cmd.opcode = START_STOP;
1056 scsipi_cmd.byte2 = 0x00;
1057 scsipi_cmd.how = type;
1058
1059 return (scsipi_command(periph,
1060 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1061 0, 0, SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000,
1062 NULL, flags));
1063 }
1064
1065 /*
1066 * scsipi_mode_sense, scsipi_mode_sense_big:
1067 * get a sense page from a device
1068 */
1069
1070 int
1071 scsipi_mode_sense(periph, byte2, page, data, len, flags, retries, timeout)
1072 struct scsipi_periph *periph;
1073 int byte2, page, len, flags, retries, timeout;
1074 struct scsipi_mode_header *data;
1075 {
1076 struct scsipi_mode_sense scsipi_cmd;
1077 int error;
1078
1079 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
1080 scsipi_cmd.opcode = MODE_SENSE;
1081 scsipi_cmd.byte2 = byte2;
1082 scsipi_cmd.page = page;
1083 if (scsipi_periph_bustype(periph) == SCSIPI_BUSTYPE_ATAPI)
1084 _lto2b(len, scsipi_cmd.u_len.atapi.length);
1085 else
1086 scsipi_cmd.u_len.scsi.length = len & 0xff;
1087 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1088 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1089 flags | XS_CTL_DATA_IN);
1090 SC_DEBUG(periph, SCSIPI_DB2,
1091 ("scsipi_mode_sense: error=%d\n", error));
1092 return (error);
1093 }
1094
1095 int
1096 scsipi_mode_sense_big(periph, byte2, page, data, len, flags, retries, timeout)
1097 struct scsipi_periph *periph;
1098 int byte2, page, len, flags, retries, timeout;
1099 struct scsipi_mode_header_big *data;
1100 {
1101 struct scsipi_mode_sense_big scsipi_cmd;
1102 int error;
1103
1104 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
1105 scsipi_cmd.opcode = MODE_SENSE_BIG;
1106 scsipi_cmd.byte2 = byte2;
1107 scsipi_cmd.page = page;
1108 _lto2b(len, scsipi_cmd.length);
1109 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1110 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1111 flags | XS_CTL_DATA_IN);
1112 SC_DEBUG(periph, SCSIPI_DB2,
1113 ("scsipi_mode_sense_big: error=%d\n", error));
1114 return (error);
1115 }
1116
1117 int
1118 scsipi_mode_select(periph, byte2, data, len, flags, retries, timeout)
1119 struct scsipi_periph *periph;
1120 int byte2, len, flags, retries, timeout;
1121 struct scsipi_mode_header *data;
1122 {
1123 struct scsipi_mode_select scsipi_cmd;
1124 int error;
1125
1126 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
1127 scsipi_cmd.opcode = MODE_SELECT;
1128 scsipi_cmd.byte2 = byte2;
1129 if (scsipi_periph_bustype(periph) == SCSIPI_BUSTYPE_ATAPI)
1130 _lto2b(len, scsipi_cmd.u_len.atapi.length);
1131 else
1132 scsipi_cmd.u_len.scsi.length = len & 0xff;
1133 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1134 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1135 flags | XS_CTL_DATA_OUT);
1136 SC_DEBUG(periph, SCSIPI_DB2,
1137 ("scsipi_mode_select: error=%d\n", error));
1138 return (error);
1139 }
1140
1141 int
1142 scsipi_mode_select_big(periph, byte2, data, len, flags, retries, timeout)
1143 struct scsipi_periph *periph;
1144 int byte2, len, flags, retries, timeout;
1145 struct scsipi_mode_header_big *data;
1146 {
1147 struct scsipi_mode_select_big scsipi_cmd;
1148 int error;
1149
1150 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
1151 scsipi_cmd.opcode = MODE_SELECT_BIG;
1152 scsipi_cmd.byte2 = byte2;
1153 _lto2b(len, scsipi_cmd.length);
1154 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1155 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1156 flags | XS_CTL_DATA_OUT);
1157 SC_DEBUG(periph, SCSIPI_DB2,
1158 ("scsipi_mode_select: error=%d\n", error));
1159 return (error);
1160 }
1161
1162 /*
1163 * scsipi_done:
1164 *
1165 * This routine is called by an adapter's interrupt handler when
1166 * an xfer is completed.
1167 */
1168 void
1169 scsipi_done(xs)
1170 struct scsipi_xfer *xs;
1171 {
1172 struct scsipi_periph *periph = xs->xs_periph;
1173 struct scsipi_channel *chan = periph->periph_channel;
1174 int s, freezecnt;
1175
1176 SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n"));
1177 #ifdef SCSIPI_DEBUG
1178 if (periph->periph_dbflags & SCSIPI_DB1)
1179 show_scsipi_cmd(xs);
1180 #endif
1181
1182 s = splbio();
1183 /*
1184 * The resource this command was using is now free.
1185 */
1186 scsipi_put_resource(chan);
1187 xs->xs_periph->periph_sent--;
1188
1189 /*
1190 * If the command was tagged, free the tag.
1191 */
1192 if (XS_CTL_TAGTYPE(xs) != 0)
1193 scsipi_put_tag(xs);
1194 else
1195 periph->periph_flags &= ~PERIPH_UNTAG;
1196
1197 /* Mark the command as `done'. */
1198 xs->xs_status |= XS_STS_DONE;
1199
1200 #ifdef DIAGNOSTIC
1201 if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
1202 (XS_CTL_ASYNC|XS_CTL_POLL))
1203 panic("scsipi_done: ASYNC and POLL");
1204 #endif
1205
1206 /*
1207 * If the xfer had an error of any sort, freeze the
1208 * periph's queue. Freeze it again if we were requested
1209 * to do so in the xfer.
1210 */
1211 freezecnt = 0;
1212 if (xs->error != XS_NOERROR)
1213 freezecnt++;
1214 if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
1215 freezecnt++;
1216 if (freezecnt != 0)
1217 scsipi_periph_freeze(periph, freezecnt);
1218
1219 /*
1220 * record the xfer with a pending sense, in case a SCSI reset is
1221 * received before the thread is waked up.
1222 */
1223 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1224 periph->periph_flags |= PERIPH_SENSE;
1225 periph->periph_xscheck = xs;
1226 }
1227
1228 /*
1229 * If this was an xfer that was not to complete asynchrnously,
1230 * let the requesting thread perform error checking/handling
1231 * in its context.
1232 */
1233 if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
1234 splx(s);
1235 /*
1236 * If it's a polling job, just return, to unwind the
1237 * call graph. We don't need to restart the queue,
1238 * because pollings jobs are treated specially, and
1239 * are really only used during crash dumps anyway
1240 * (XXX or during boot-time autconfiguration of
1241 * ATAPI devices).
1242 */
1243 if (xs->xs_control & XS_CTL_POLL)
1244 return;
1245 wakeup(xs);
1246 goto out;
1247 }
1248
1249 /*
1250 * Catch the extremely common case of I/O completing
1251 * without error; no use in taking a context switch
1252 * if we can handle it in interrupt context.
1253 */
1254 if (xs->error == XS_NOERROR) {
1255 splx(s);
1256 (void) scsipi_complete(xs);
1257 goto out;
1258 }
1259
1260 /*
1261 * There is an error on this xfer. Put it on the channel's
1262 * completion queue, and wake up the completion thread.
1263 */
1264 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
1265 splx(s);
1266 wakeup(&chan->chan_complete);
1267
1268 out:
1269 /*
1270 * If there are more xfers on the channel's queue, attempt to
1271 * run them.
1272 */
1273 scsipi_run_queue(chan);
1274 }
1275
1276 /*
1277 * scsipi_complete:
1278 *
1279 * Completion of a scsipi_xfer. This is the guts of scsipi_done().
1280 *
1281 * NOTE: This routine MUST be called with valid thread context
1282 * except for the case where the following two conditions are
1283 * true:
1284 *
1285 * xs->error == XS_NOERROR
1286 * XS_CTL_ASYNC is set in xs->xs_control
1287 *
1288 * The semantics of this routine can be tricky, so here is an
1289 * explanation:
1290 *
1291 * 0 Xfer completed successfully.
1292 *
1293 * ERESTART Xfer had an error, but was restarted.
1294 *
1295 * anything else Xfer had an error, return value is Unix
1296 * errno.
1297 *
1298 * If the return value is anything but ERESTART:
1299 *
1300 * - If XS_CTL_ASYNC is set, `xs' has been freed back to
1301 * the pool.
1302 * - If there is a buf associated with the xfer,
1303 * it has been biodone()'d.
1304 */
1305 int
1306 scsipi_complete(xs)
1307 struct scsipi_xfer *xs;
1308 {
1309 struct scsipi_periph *periph = xs->xs_periph;
1310 struct scsipi_channel *chan = periph->periph_channel;
1311 struct buf *bp;
1312 int error, s;
1313
1314 #ifdef DIAGNOSTIC
1315 if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
1316 panic("scsipi_complete: XS_CTL_ASYNC but no buf");
1317 #endif
1318 /*
1319 * If command terminated with a CHECK CONDITION, we need to issue a
1320 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed
1321 * we'll have the real status.
1322 * Must be processed at splbio() to avoid missing a SCSI bus reset
1323 * for this command.
1324 */
1325 s = splbio();
1326 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1327 /* request sense for a request sense ? */
1328 if (xs->xs_control & XS_CTL_REQSENSE) {
1329 scsipi_printaddr(periph);
1330 /* XXX maybe we should reset the device ? */
1331 /* we've been frozen because xs->error != XS_NOERROR */
1332 scsipi_periph_thaw(periph, 1);
1333 splx(s);
1334 return EINVAL;
1335 }
1336 scsipi_request_sense(xs);
1337 }
1338 splx(s);
1339 /*
1340 * If it's a user level request, bypass all usual completion
1341 * processing, let the user work it out..
1342 */
1343 if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
1344 SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n"));
1345 if (xs->error != XS_NOERROR)
1346 scsipi_periph_thaw(periph, 1);
1347 scsipi_user_done(xs);
1348 SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n "));
1349 return 0;
1350 }
1351
1352
1353 switch (xs->error) {
1354 case XS_NOERROR:
1355 error = 0;
1356 break;
1357
1358 case XS_SENSE:
1359 case XS_SHORTSENSE:
1360 error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
1361 break;
1362
1363 case XS_RESOURCE_SHORTAGE:
1364 /*
1365 * XXX Should freeze channel's queue.
1366 */
1367 scsipi_printaddr(periph);
1368 printf("adapter resource shortage\n");
1369 /* FALLTHROUGH */
1370
1371 case XS_BUSY:
1372 if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
1373 struct scsipi_max_openings mo;
1374
1375 /*
1376 * We set the openings to active - 1, assuming that
1377 * the command that got us here is the first one that
1378 * can't fit into the device's queue. If that's not
1379 * the case, I guess we'll find out soon enough.
1380 */
1381 mo.mo_target = periph->periph_target;
1382 mo.mo_lun = periph->periph_lun;
1383 if (periph->periph_active < periph->periph_openings)
1384 mo.mo_openings = periph->periph_active - 1;
1385 else
1386 mo.mo_openings = periph->periph_openings - 1;
1387 #ifdef DIAGNOSTIC
1388 if (mo.mo_openings < 0) {
1389 scsipi_printaddr(periph);
1390 printf("QUEUE FULL resulted in < 0 openings\n");
1391 panic("scsipi_done");
1392 }
1393 #endif
1394 if (mo.mo_openings == 0) {
1395 scsipi_printaddr(periph);
1396 printf("QUEUE FULL resulted in 0 openings\n");
1397 mo.mo_openings = 1;
1398 }
1399 scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
1400 error = ERESTART;
1401 } else if (xs->xs_retries != 0) {
1402 xs->xs_retries--;
1403 /*
1404 * Wait one second, and try again.
1405 */
1406 if (xs->xs_control & XS_CTL_POLL)
1407 delay(1000000);
1408 else {
1409 scsipi_periph_freeze(periph, 1);
1410 callout_reset(&periph->periph_callout,
1411 hz, scsipi_periph_timed_thaw, periph);
1412 }
1413 error = ERESTART;
1414 } else
1415 error = EBUSY;
1416 break;
1417
1418 case XS_REQUEUE:
1419 error = ERESTART;
1420 break;
1421
1422 case XS_TIMEOUT:
1423 if (xs->xs_retries != 0) {
1424 xs->xs_retries--;
1425 error = ERESTART;
1426 } else
1427 error = EIO;
1428 break;
1429
1430 case XS_SELTIMEOUT:
1431 /* XXX Disable device? */
1432 error = EIO;
1433 break;
1434
1435 case XS_RESET:
1436 if (xs->xs_control & XS_CTL_REQSENSE) {
1437 /*
1438 * request sense interrupted by reset: signal it
1439 * with EINTR return code.
1440 */
1441 error = EINTR;
1442 } else {
1443 if (xs->xs_retries != 0) {
1444 xs->xs_retries--;
1445 error = ERESTART;
1446 } else
1447 error = EIO;
1448 }
1449 break;
1450
1451 default:
1452 scsipi_printaddr(periph);
1453 printf("invalid return code from adapter: %d\n", xs->error);
1454 error = EIO;
1455 break;
1456 }
1457
1458 s = splbio();
1459 if (error == ERESTART) {
1460 /*
1461 * If we get here, the periph has been thawed and frozen
1462 * again if we had to issue recovery commands. Alternatively,
1463 * it may have been frozen again and in a timed thaw. In
1464 * any case, we thaw the periph once we re-enqueue the
1465 * command. Once the periph is fully thawed, it will begin
1466 * operation again.
1467 */
1468 xs->error = XS_NOERROR;
1469 xs->status = SCSI_OK;
1470 xs->xs_status &= ~XS_STS_DONE;
1471 xs->xs_requeuecnt++;
1472 error = scsipi_enqueue(xs);
1473 if (error == 0) {
1474 scsipi_periph_thaw(periph, 1);
1475 splx(s);
1476 return (ERESTART);
1477 }
1478 }
1479
1480 /*
1481 * scsipi_done() freezes the queue if not XS_NOERROR.
1482 * Thaw it here.
1483 */
1484 if (xs->error != XS_NOERROR)
1485 scsipi_periph_thaw(periph, 1);
1486
1487
1488 if (periph->periph_switch->psw_done)
1489 periph->periph_switch->psw_done(xs);
1490 if ((bp = xs->bp) != NULL) {
1491 if (error) {
1492 bp->b_error = error;
1493 bp->b_flags |= B_ERROR;
1494 bp->b_resid = bp->b_bcount;
1495 } else {
1496 bp->b_error = 0;
1497 bp->b_resid = xs->resid;
1498 }
1499 biodone(bp);
1500 }
1501
1502 if (xs->xs_control & XS_CTL_ASYNC)
1503 scsipi_put_xs(xs);
1504 splx(s);
1505
1506 return (error);
1507 }
1508
1509 /*
1510 * Issue a request sense for the given scsipi_xfer. Called when the xfer
1511 * returns with a CHECK_CONDITION status. Must be called in valid thread
1512 * context and at splbio().
1513 */
1514
1515 void
1516 scsipi_request_sense(xs)
1517 struct scsipi_xfer *xs;
1518 {
1519 struct scsipi_periph *periph = xs->xs_periph;
1520 int flags, error;
1521 struct scsipi_sense cmd;
1522
1523 periph->periph_flags |= PERIPH_SENSE;
1524
1525 /* if command was polling, request sense will too */
1526 flags = xs->xs_control & XS_CTL_POLL;
1527 /* Polling commands can't sleep */
1528 if (flags)
1529 flags |= XS_CTL_NOSLEEP;
1530
1531 flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN |
1532 XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH;
1533
1534 bzero(&cmd, sizeof(cmd));
1535 cmd.opcode = REQUEST_SENSE;
1536 cmd.length = sizeof(struct scsipi_sense_data);
1537
1538 error = scsipi_command(periph,
1539 (struct scsipi_generic *) &cmd, sizeof(cmd),
1540 (u_char*)&xs->sense.scsi_sense, sizeof(struct scsipi_sense_data),
1541 0, 1000, NULL, flags);
1542 periph->periph_flags &= ~PERIPH_SENSE;
1543 periph->periph_xscheck = NULL;
1544 switch(error) {
1545 case 0:
1546 /* we have a valid sense */
1547 xs->error = XS_SENSE;
1548 return;
1549 case EINTR:
1550 /* REQUEST_SENSE interrupted by bus reset. */
1551 xs->error = XS_RESET;
1552 return;
1553 case EIO:
1554 /* request sense coudn't be performed */
1555 /*
1556 * XXX this isn't quite rigth but we don't have anything
1557 * better for now
1558 */
1559 xs->error = XS_DRIVER_STUFFUP;
1560 return;
1561 default:
1562 /* Notify that request sense failed. */
1563 xs->error = XS_DRIVER_STUFFUP;
1564 scsipi_printaddr(periph);
1565 printf("request sense failed with error %d\n", error);
1566 return;
1567 }
1568 }
1569
1570 /*
1571 * scsipi_enqueue:
1572 *
1573 * Enqueue an xfer on a channel.
1574 */
1575 int
1576 scsipi_enqueue(xs)
1577 struct scsipi_xfer *xs;
1578 {
1579 struct scsipi_channel *chan = xs->xs_periph->periph_channel;
1580 struct scsipi_xfer *qxs;
1581 int s;
1582
1583 s = splbio();
1584
1585 /*
1586 * If the xfer is to be polled, and there are already jobs on
1587 * the queue, we can't proceed.
1588 */
1589 if ((xs->xs_control & XS_CTL_POLL) != 0 &&
1590 TAILQ_FIRST(&chan->chan_queue) != NULL) {
1591 splx(s);
1592 xs->error = XS_DRIVER_STUFFUP;
1593 return (EAGAIN);
1594 }
1595
1596 /*
1597 * If we have an URGENT xfer, it's an error recovery command
1598 * and it should just go on the head of the channel's queue.
1599 */
1600 if (xs->xs_control & XS_CTL_URGENT) {
1601 TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
1602 goto out;
1603 }
1604
1605 /*
1606 * If this xfer has already been on the queue before, we
1607 * need to reinsert it in the correct order. That order is:
1608 *
1609 * Immediately before the first xfer for this periph
1610 * with a requeuecnt less than xs->xs_requeuecnt.
1611 *
1612 * Failing that, at the end of the queue. (We'll end up
1613 * there naturally.)
1614 */
1615 if (xs->xs_requeuecnt != 0) {
1616 for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
1617 qxs = TAILQ_NEXT(qxs, channel_q)) {
1618 if (qxs->xs_periph == xs->xs_periph &&
1619 qxs->xs_requeuecnt < xs->xs_requeuecnt)
1620 break;
1621 }
1622 if (qxs != NULL) {
1623 TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
1624 channel_q);
1625 goto out;
1626 }
1627 }
1628 TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
1629 out:
1630 if (xs->xs_control & XS_CTL_THAW_PERIPH)
1631 scsipi_periph_thaw(xs->xs_periph, 1);
1632 splx(s);
1633 return (0);
1634 }
1635
1636 /*
1637 * scsipi_run_queue:
1638 *
1639 * Start as many xfers as possible running on the channel.
1640 */
1641 void
1642 scsipi_run_queue(chan)
1643 struct scsipi_channel *chan;
1644 {
1645 struct scsipi_xfer *xs;
1646 struct scsipi_periph *periph;
1647 int s;
1648
1649 for (;;) {
1650 s = splbio();
1651
1652 /*
1653 * If the channel is frozen, we can't do any work right
1654 * now.
1655 */
1656 if (chan->chan_qfreeze != 0) {
1657 splx(s);
1658 return;
1659 }
1660
1661 /*
1662 * Look for work to do, and make sure we can do it.
1663 */
1664 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
1665 xs = TAILQ_NEXT(xs, channel_q)) {
1666 periph = xs->xs_periph;
1667
1668 if ((periph->periph_sent >= periph->periph_openings) ||
1669 periph->periph_qfreeze != 0 ||
1670 (periph->periph_flags & PERIPH_UNTAG) != 0)
1671 continue;
1672
1673 if ((periph->periph_flags &
1674 (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 &&
1675 (xs->xs_control & XS_CTL_URGENT) == 0)
1676 continue;
1677
1678 /*
1679 * We can issue this xfer!
1680 */
1681 goto got_one;
1682 }
1683
1684 /*
1685 * Can't find any work to do right now.
1686 */
1687 splx(s);
1688 return;
1689
1690 got_one:
1691 /*
1692 * Have an xfer to run. Allocate a resource from
1693 * the adapter to run it. If we can't allocate that
1694 * resource, we don't dequeue the xfer.
1695 */
1696 if (scsipi_get_resource(chan) == 0) {
1697 /*
1698 * Adapter is out of resources. If the adapter
1699 * supports it, attempt to grow them.
1700 */
1701 if (scsipi_grow_resources(chan) == 0) {
1702 /*
1703 * Wasn't able to grow resources,
1704 * nothing more we can do.
1705 */
1706 if (xs->xs_control & XS_CTL_POLL) {
1707 scsipi_printaddr(xs->xs_periph);
1708 printf("polling command but no "
1709 "adapter resources");
1710 /* We'll panic shortly... */
1711 }
1712 splx(s);
1713
1714 /*
1715 * XXX: We should be able to note that
1716 * XXX: that resources are needed here!
1717 */
1718 return;
1719 }
1720 /*
1721 * scsipi_grow_resources() allocated the resource
1722 * for us.
1723 */
1724 }
1725
1726 /*
1727 * We have a resource to run this xfer, do it!
1728 */
1729 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
1730
1731 /*
1732 * If the command is to be tagged, allocate a tag ID
1733 * for it.
1734 */
1735 if (XS_CTL_TAGTYPE(xs) != 0)
1736 scsipi_get_tag(xs);
1737 else
1738 periph->periph_flags |= PERIPH_UNTAG;
1739 periph->periph_sent++;
1740 splx(s);
1741
1742 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
1743 }
1744 #ifdef DIAGNOSTIC
1745 panic("scsipi_run_queue: impossible");
1746 #endif
1747 }
1748
1749 /*
1750 * scsipi_execute_xs:
1751 *
1752 * Begin execution of an xfer, waiting for it to complete, if necessary.
1753 */
1754 int
1755 scsipi_execute_xs(xs)
1756 struct scsipi_xfer *xs;
1757 {
1758 struct scsipi_periph *periph = xs->xs_periph;
1759 struct scsipi_channel *chan = periph->periph_channel;
1760 int async, poll, retries, error, s;
1761
1762 xs->xs_status &= ~XS_STS_DONE;
1763 xs->error = XS_NOERROR;
1764 xs->resid = xs->datalen;
1765 xs->status = SCSI_OK;
1766
1767 #ifdef SCSIPI_DEBUG
1768 if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) {
1769 printf("scsipi_execute_xs: ");
1770 show_scsipi_xs(xs);
1771 printf("\n");
1772 }
1773 #endif
1774
1775 /*
1776 * Deal with command tagging:
1777 *
1778 * - If the device's current operating mode doesn't
1779 * include tagged queueing, clear the tag mask.
1780 *
1781 * - If the device's current operating mode *does*
1782 * include tagged queueing, set the tag_type in
1783 * the xfer to the appropriate byte for the tag
1784 * message.
1785 */
1786 if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 ||
1787 (xs->xs_control & XS_CTL_REQSENSE)) {
1788 xs->xs_control &= ~XS_CTL_TAGMASK;
1789 xs->xs_tag_type = 0;
1790 } else {
1791 /*
1792 * If the request doesn't specify a tag, give Head
1793 * tags to URGENT operations and Ordered tags to
1794 * everything else.
1795 */
1796 if (XS_CTL_TAGTYPE(xs) == 0) {
1797 if (xs->xs_control & XS_CTL_URGENT)
1798 xs->xs_control |= XS_CTL_HEAD_TAG;
1799 else
1800 xs->xs_control |= XS_CTL_ORDERED_TAG;
1801 }
1802
1803 switch (XS_CTL_TAGTYPE(xs)) {
1804 case XS_CTL_ORDERED_TAG:
1805 xs->xs_tag_type = MSG_ORDERED_Q_TAG;
1806 break;
1807
1808 case XS_CTL_SIMPLE_TAG:
1809 xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
1810 break;
1811
1812 case XS_CTL_HEAD_TAG:
1813 xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
1814 break;
1815
1816 default:
1817 scsipi_printaddr(periph);
1818 printf("invalid tag mask 0x%08x\n",
1819 XS_CTL_TAGTYPE(xs));
1820 panic("scsipi_execute_xs");
1821 }
1822 }
1823
1824 /*
1825 * If we don't yet have a completion thread, or we are to poll for
1826 * completion, clear the ASYNC flag.
1827 */
1828 if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
1829 xs->xs_control &= ~XS_CTL_ASYNC;
1830
1831 async = (xs->xs_control & XS_CTL_ASYNC);
1832 poll = (xs->xs_control & XS_CTL_POLL);
1833 retries = xs->xs_retries; /* for polling commands */
1834
1835 #ifdef DIAGNOSTIC
1836 if (async != 0 && xs->bp == NULL)
1837 panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
1838 #endif
1839
1840 /*
1841 * Enqueue the transfer. If we're not polling for completion, this
1842 * should ALWAYS return `no error'.
1843 */
1844 try_again:
1845 error = scsipi_enqueue(xs);
1846 if (error) {
1847 if (poll == 0) {
1848 scsipi_printaddr(periph);
1849 printf("not polling, but enqueue failed with %d\n",
1850 error);
1851 panic("scsipi_execute_xs");
1852 }
1853
1854 scsipi_printaddr(periph);
1855 printf("failed to enqueue polling command");
1856 if (retries != 0) {
1857 printf(", retrying...\n");
1858 delay(1000000);
1859 retries--;
1860 goto try_again;
1861 }
1862 printf("\n");
1863 goto free_xs;
1864 }
1865
1866 restarted:
1867 scsipi_run_queue(chan);
1868
1869 /*
1870 * The xfer is enqueued, and possibly running. If it's to be
1871 * completed asynchronously, just return now.
1872 */
1873 if (async)
1874 return (EJUSTRETURN);
1875
1876 /*
1877 * Not an asynchronous command; wait for it to complete.
1878 */
1879 s = splbio();
1880 while ((xs->xs_status & XS_STS_DONE) == 0) {
1881 if (poll) {
1882 scsipi_printaddr(periph);
1883 printf("polling command not done\n");
1884 panic("scsipi_execute_xs");
1885 }
1886 (void) tsleep(xs, PRIBIO, "xscmd", 0);
1887 }
1888 splx(s);
1889
1890 /*
1891 * Command is complete. scsipi_done() has awakened us to perform
1892 * the error handling.
1893 */
1894 error = scsipi_complete(xs);
1895 if (error == ERESTART)
1896 goto restarted;
1897
1898 /*
1899 * Command completed successfully or fatal error occurred. Fall
1900 * into....
1901 */
1902 free_xs:
1903 s = splbio();
1904 scsipi_put_xs(xs);
1905 splx(s);
1906
1907 /*
1908 * Kick the queue, keep it running in case it stopped for some
1909 * reason.
1910 */
1911 scsipi_run_queue(chan);
1912
1913 return (error);
1914 }
1915
1916 /*
1917 * scsipi_completion_thread:
1918 *
1919 * This is the completion thread. We wait for errors on
1920 * asynchronous xfers, and perform the error handling
1921 * function, restarting the command, if necessary.
1922 */
1923 void
1924 scsipi_completion_thread(arg)
1925 void *arg;
1926 {
1927 struct scsipi_channel *chan = arg;
1928 struct scsipi_xfer *xs;
1929 int s;
1930
1931 for (;;) {
1932 s = splbio();
1933 xs = TAILQ_FIRST(&chan->chan_complete);
1934 if (xs == NULL &&
1935 (chan->chan_flags & SCSIPI_CHAN_SHUTDOWN) == 0) {
1936 (void) tsleep(&chan->chan_complete, PRIBIO,
1937 "sccomp", 0);
1938 splx(s);
1939 continue;
1940 }
1941 if (chan->chan_flags & SCSIPI_CHAN_SHUTDOWN) {
1942 splx(s);
1943 break;
1944 }
1945 TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
1946 splx(s);
1947
1948 /*
1949 * Have an xfer with an error; process it.
1950 */
1951 (void) scsipi_complete(xs);
1952
1953 /*
1954 * Kick the queue; keep it running if it was stopped
1955 * for some reason.
1956 */
1957 scsipi_run_queue(chan);
1958 }
1959
1960 chan->chan_thread = NULL;
1961
1962 /* In case parent is waiting for us to exit. */
1963 wakeup(&chan->chan_thread);
1964
1965 kthread_exit(0);
1966 }
1967
1968 /*
1969 * scsipi_create_completion_thread:
1970 *
1971 * Callback to actually create the completion thread.
1972 */
1973 void
1974 scsipi_create_completion_thread(arg)
1975 void *arg;
1976 {
1977 struct scsipi_channel *chan = arg;
1978 struct scsipi_adapter *adapt = chan->chan_adapter;
1979
1980 if (kthread_create1(scsipi_completion_thread, chan,
1981 &chan->chan_thread, "%s:%d", adapt->adapt_dev->dv_xname,
1982 chan->chan_channel)) {
1983 printf("%s: unable to create completion thread for "
1984 "channel %d\n", adapt->adapt_dev->dv_xname,
1985 chan->chan_channel);
1986 panic("scsipi_create_completion_thread");
1987 }
1988 }
1989
1990 /*
1991 * scsipi_async_event:
1992 *
1993 * Handle an asynchronous event from an adapter.
1994 */
1995 void
1996 scsipi_async_event(chan, event, arg)
1997 struct scsipi_channel *chan;
1998 scsipi_async_event_t event;
1999 void *arg;
2000 {
2001 int s;
2002
2003 s = splbio();
2004 switch (event) {
2005 case ASYNC_EVENT_MAX_OPENINGS:
2006 scsipi_async_event_max_openings(chan,
2007 (struct scsipi_max_openings *)arg);
2008 break;
2009
2010 case ASYNC_EVENT_XFER_MODE:
2011 scsipi_async_event_xfer_mode(chan,
2012 (struct scsipi_xfer_mode *)arg);
2013 break;
2014 case ASYNC_EVENT_RESET:
2015 scsipi_async_event_channel_reset(chan);
2016 break;
2017 }
2018 splx(s);
2019 }
2020
2021 /*
2022 * scsipi_print_xfer_mode:
2023 *
2024 * Print a periph's capabilities.
2025 */
2026 void
2027 scsipi_print_xfer_mode(periph)
2028 struct scsipi_periph *periph;
2029 {
2030 int period, freq, speed, mbs;
2031
2032 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0)
2033 return;
2034
2035 printf("%s: ", periph->periph_dev->dv_xname);
2036 if (periph->periph_mode & PERIPH_CAP_SYNC) {
2037 period = scsipi_sync_factor_to_period(periph->periph_period);
2038 printf("sync (%d.%dns offset %d)",
2039 period / 10, period % 10, periph->periph_offset);
2040 } else
2041 printf("async");
2042
2043 if (periph->periph_mode & PERIPH_CAP_WIDE32)
2044 printf(", 32-bit");
2045 else if (periph->periph_mode & PERIPH_CAP_WIDE16)
2046 printf(", 16-bit");
2047 else
2048 printf(", 8-bit");
2049
2050 if (periph->periph_mode & PERIPH_CAP_SYNC) {
2051 freq = scsipi_sync_factor_to_freq(periph->periph_period);
2052 speed = freq;
2053 if (periph->periph_mode & PERIPH_CAP_WIDE32)
2054 speed *= 4;
2055 else if (periph->periph_mode & PERIPH_CAP_WIDE16)
2056 speed *= 2;
2057 mbs = speed / 1000;
2058 if (mbs > 0)
2059 printf(" (%d.%03dMB/s)", mbs, speed % 1000);
2060 else
2061 printf(" (%dKB/s)", speed % 1000);
2062 }
2063
2064 printf(" transfers");
2065
2066 if (periph->periph_mode & PERIPH_CAP_TQING)
2067 printf(", tagged queueing");
2068
2069 printf("\n");
2070 }
2071
2072 /*
2073 * scsipi_async_event_max_openings:
2074 *
2075 * Update the maximum number of outstanding commands a
2076 * device may have.
2077 */
2078 void
2079 scsipi_async_event_max_openings(chan, mo)
2080 struct scsipi_channel *chan;
2081 struct scsipi_max_openings *mo;
2082 {
2083 struct scsipi_periph *periph;
2084 int minlun, maxlun;
2085
2086 if (mo->mo_lun == -1) {
2087 /*
2088 * Wildcarded; apply it to all LUNs.
2089 */
2090 minlun = 0;
2091 maxlun = chan->chan_nluns - 1;
2092 } else
2093 minlun = maxlun = mo->mo_lun;
2094
2095 for (; minlun <= maxlun; minlun++) {
2096 periph = scsipi_lookup_periph(chan, mo->mo_target, minlun);
2097 if (periph == NULL)
2098 continue;
2099
2100 if (mo->mo_openings < periph->periph_openings)
2101 periph->periph_openings = mo->mo_openings;
2102 else if (mo->mo_openings > periph->periph_openings &&
2103 (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
2104 periph->periph_openings = mo->mo_openings;
2105 }
2106 }
2107
2108 /*
2109 * scsipi_async_event_xfer_mode:
2110 *
2111 * Update the xfer mode for all periphs sharing the
2112 * specified I_T Nexus.
2113 */
2114 void
2115 scsipi_async_event_xfer_mode(chan, xm)
2116 struct scsipi_channel *chan;
2117 struct scsipi_xfer_mode *xm;
2118 {
2119 struct scsipi_periph *periph;
2120 int lun, announce, mode, period, offset;
2121
2122 for (lun = 0; lun < chan->chan_nluns; lun++) {
2123 periph = scsipi_lookup_periph(chan, xm->xm_target, lun);
2124 if (periph == NULL)
2125 continue;
2126 announce = 0;
2127
2128 /*
2129 * Clamp the xfer mode down to this periph's capabilities.
2130 */
2131 mode = xm->xm_mode & periph->periph_cap;
2132 if (mode & PERIPH_CAP_SYNC) {
2133 period = xm->xm_period;
2134 offset = xm->xm_offset;
2135 } else {
2136 period = 0;
2137 offset = 0;
2138 }
2139
2140 /*
2141 * If we do not have a valid xfer mode yet, or the parameters
2142 * are different, announce them.
2143 */
2144 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0 ||
2145 periph->periph_mode != mode ||
2146 periph->periph_period != period ||
2147 periph->periph_offset != offset)
2148 announce = 1;
2149
2150 periph->periph_mode = mode;
2151 periph->periph_period = period;
2152 periph->periph_offset = offset;
2153 periph->periph_flags |= PERIPH_MODE_VALID;
2154
2155 if (announce)
2156 scsipi_print_xfer_mode(periph);
2157 }
2158 }
2159
2160 /*
2161 * scsipi_set_xfer_mode:
2162 *
2163 * Set the xfer mode for the specified I_T Nexus.
2164 */
2165 void
2166 scsipi_set_xfer_mode(chan, target, immed)
2167 struct scsipi_channel *chan;
2168 int target, immed;
2169 {
2170 struct scsipi_xfer_mode xm;
2171 struct scsipi_periph *itperiph;
2172 int lun, s;
2173
2174 /*
2175 * Go to the minimal xfer mode.
2176 */
2177 xm.xm_target = target;
2178 xm.xm_mode = 0;
2179 xm.xm_period = 0; /* ignored */
2180 xm.xm_offset = 0; /* ignored */
2181
2182 /*
2183 * Find the first LUN we know about on this I_T Nexus.
2184 */
2185 for (lun = 0; lun < chan->chan_nluns; lun++) {
2186 itperiph = scsipi_lookup_periph(chan, target, lun);
2187 if (itperiph != NULL)
2188 break;
2189 }
2190 if (itperiph != NULL)
2191 xm.xm_mode = itperiph->periph_cap;
2192
2193 /*
2194 * Now issue the request to the adapter.
2195 */
2196 s = splbio();
2197 scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm);
2198 splx(s);
2199
2200 /*
2201 * If we want this to happen immediately, issue a dummy command,
2202 * since most adapters can't really negotiate unless they're
2203 * executing a job.
2204 */
2205 if (immed != 0 && itperiph != NULL) {
2206 (void) scsipi_test_unit_ready(itperiph,
2207 XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
2208 XS_CTL_IGNORE_NOT_READY |
2209 XS_CTL_IGNORE_MEDIA_CHANGE);
2210 }
2211 }
2212
2213 /*
2214 * scsipi_channel_reset:
2215 *
2216 * handle scsi bus reset
2217 * called at splbio
2218 */
2219 void
2220 scsipi_async_event_channel_reset(chan)
2221 struct scsipi_channel *chan;
2222 {
2223 struct scsipi_xfer *xs, *xs_next;
2224 struct scsipi_periph *periph;
2225 int target, lun;
2226
2227 /*
2228 * Channel has been reset. Also mark as reset pending REQUEST_SENSE
2229 * commands; as the sense is not available any more.
2230 * can't call scsipi_done() from here, as the command has not been
2231 * sent to the adapter yet (this would corrupt accounting).
2232 */
2233
2234 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) {
2235 xs_next = TAILQ_NEXT(xs, channel_q);
2236 if (xs->xs_control & XS_CTL_REQSENSE) {
2237 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
2238 xs->error = XS_RESET;
2239 if ((xs->xs_control & XS_CTL_ASYNC) != 0)
2240 TAILQ_INSERT_TAIL(&chan->chan_complete, xs,
2241 channel_q);
2242 }
2243 }
2244 wakeup(&chan->chan_complete);
2245 /* Catch xs with pending sense which may not have a REQSENSE xs yet */
2246 for (target = 0; target < chan->chan_ntargets; target++) {
2247 if (target == chan->chan_id)
2248 continue;
2249 for (lun = 0; lun < chan->chan_nluns; lun++) {
2250 periph = chan->chan_periphs[target][lun];
2251 if (periph) {
2252 xs = periph->periph_xscheck;
2253 if (xs)
2254 xs->error = XS_RESET;
2255 }
2256 }
2257 }
2258 }
2259
2260
2261 /*
2262 * scsipi_adapter_addref:
2263 *
2264 * Add a reference to the adapter pointed to by the provided
2265 * link, enabling the adapter if necessary.
2266 */
2267 int
2268 scsipi_adapter_addref(adapt)
2269 struct scsipi_adapter *adapt;
2270 {
2271 int s, error = 0;
2272
2273 s = splbio();
2274 if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) {
2275 error = (*adapt->adapt_enable)(adapt->adapt_dev, 1);
2276 if (error)
2277 adapt->adapt_refcnt--;
2278 }
2279 splx(s);
2280 return (error);
2281 }
2282
2283 /*
2284 * scsipi_adapter_delref:
2285 *
2286 * Delete a reference to the adapter pointed to by the provided
2287 * link, disabling the adapter if possible.
2288 */
2289 void
2290 scsipi_adapter_delref(adapt)
2291 struct scsipi_adapter *adapt;
2292 {
2293 int s;
2294
2295 s = splbio();
2296 if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL)
2297 (void) (*adapt->adapt_enable)(adapt->adapt_dev, 0);
2298 splx(s);
2299 }
2300
2301 struct scsipi_syncparam {
2302 int ss_factor;
2303 int ss_period; /* ns * 10 */
2304 } scsipi_syncparams[] = {
2305 { 0x0a, 250 },
2306 { 0x0b, 303 },
2307 { 0x0c, 500 },
2308 };
2309 const int scsipi_nsyncparams =
2310 sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
2311
2312 int
2313 scsipi_sync_period_to_factor(period)
2314 int period; /* ns * 10 */
2315 {
2316 int i;
2317
2318 for (i = 0; i < scsipi_nsyncparams; i++) {
2319 if (period <= scsipi_syncparams[i].ss_period)
2320 return (scsipi_syncparams[i].ss_factor);
2321 }
2322
2323 return ((period / 10) / 4);
2324 }
2325
2326 int
2327 scsipi_sync_factor_to_period(factor)
2328 int factor;
2329 {
2330 int i;
2331
2332 for (i = 0; i < scsipi_nsyncparams; i++) {
2333 if (factor == scsipi_syncparams[i].ss_factor)
2334 return (scsipi_syncparams[i].ss_period);
2335 }
2336
2337 return ((factor * 4) * 10);
2338 }
2339
2340 int
2341 scsipi_sync_factor_to_freq(factor)
2342 int factor;
2343 {
2344 int i;
2345
2346 for (i = 0; i < scsipi_nsyncparams; i++) {
2347 if (factor == scsipi_syncparams[i].ss_factor)
2348 return (10000000 / scsipi_syncparams[i].ss_period);
2349 }
2350
2351 return (10000000 / ((factor * 4) * 10));
2352 }
2353
2354 #ifdef SCSIPI_DEBUG
2355 /*
2356 * Given a scsipi_xfer, dump the request, in all it's glory
2357 */
2358 void
2359 show_scsipi_xs(xs)
2360 struct scsipi_xfer *xs;
2361 {
2362
2363 printf("xs(%p): ", xs);
2364 printf("xs_control(0x%08x)", xs->xs_control);
2365 printf("xs_status(0x%08x)", xs->xs_status);
2366 printf("periph(%p)", xs->xs_periph);
2367 printf("retr(0x%x)", xs->xs_retries);
2368 printf("timo(0x%x)", xs->timeout);
2369 printf("cmd(%p)", xs->cmd);
2370 printf("len(0x%x)", xs->cmdlen);
2371 printf("data(%p)", xs->data);
2372 printf("len(0x%x)", xs->datalen);
2373 printf("res(0x%x)", xs->resid);
2374 printf("err(0x%x)", xs->error);
2375 printf("bp(%p)", xs->bp);
2376 show_scsipi_cmd(xs);
2377 }
2378
2379 void
2380 show_scsipi_cmd(xs)
2381 struct scsipi_xfer *xs;
2382 {
2383 u_char *b = (u_char *) xs->cmd;
2384 int i = 0;
2385
2386 scsipi_printaddr(xs->xs_periph);
2387 printf(" command: ");
2388
2389 if ((xs->xs_control & XS_CTL_RESET) == 0) {
2390 while (i < xs->cmdlen) {
2391 if (i)
2392 printf(",");
2393 printf("0x%x", b[i++]);
2394 }
2395 printf("-[%d bytes]\n", xs->datalen);
2396 if (xs->datalen)
2397 show_mem(xs->data, min(64, xs->datalen));
2398 } else
2399 printf("-RESET-\n");
2400 }
2401
2402 void
2403 show_mem(address, num)
2404 u_char *address;
2405 int num;
2406 {
2407 int x;
2408
2409 printf("------------------------------");
2410 for (x = 0; x < num; x++) {
2411 if ((x % 16) == 0)
2412 printf("\n%03d: ", x);
2413 printf("%02x ", *address++);
2414 }
2415 printf("\n------------------------------\n");
2416 }
2417 #endif /* SCSIPI_DEBUG */
2418