scsipi_base.c revision 1.63 1 /* $NetBSD: scsipi_base.c,v 1.63 2001/11/15 09:48:17 lukem Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: scsipi_base.c,v 1.63 2001/11/15 09:48:17 lukem Exp $");
42
43 #include "opt_scsi.h"
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/buf.h>
49 #include <sys/uio.h>
50 #include <sys/malloc.h>
51 #include <sys/pool.h>
52 #include <sys/errno.h>
53 #include <sys/device.h>
54 #include <sys/proc.h>
55 #include <sys/kthread.h>
56
57 #include <dev/scsipi/scsipi_all.h>
58 #include <dev/scsipi/scsipi_disk.h>
59 #include <dev/scsipi/scsipiconf.h>
60 #include <dev/scsipi/scsipi_base.h>
61
62 #include <dev/scsipi/scsi_all.h>
63 #include <dev/scsipi/scsi_message.h>
64
65 int scsipi_complete __P((struct scsipi_xfer *));
66 void scsipi_request_sense __P((struct scsipi_xfer *));
67 int scsipi_enqueue __P((struct scsipi_xfer *));
68 void scsipi_run_queue __P((struct scsipi_channel *chan));
69
70 void scsipi_completion_thread __P((void *));
71
72 void scsipi_get_tag __P((struct scsipi_xfer *));
73 void scsipi_put_tag __P((struct scsipi_xfer *));
74
75 int scsipi_get_resource __P((struct scsipi_channel *));
76 void scsipi_put_resource __P((struct scsipi_channel *));
77 __inline int scsipi_grow_resources __P((struct scsipi_channel *));
78
79 void scsipi_async_event_max_openings __P((struct scsipi_channel *,
80 struct scsipi_max_openings *));
81 void scsipi_async_event_xfer_mode __P((struct scsipi_channel *,
82 struct scsipi_xfer_mode *));
83 void scsipi_async_event_channel_reset __P((struct scsipi_channel *));
84
85 struct pool scsipi_xfer_pool;
86
87 /*
88 * scsipi_init:
89 *
90 * Called when a scsibus or atapibus is attached to the system
91 * to initialize shared data structures.
92 */
93 void
94 scsipi_init()
95 {
96 static int scsipi_init_done;
97
98 if (scsipi_init_done)
99 return;
100 scsipi_init_done = 1;
101
102 /* Initialize the scsipi_xfer pool. */
103 pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
104 0, 0, "scxspl", 0, NULL, NULL, M_DEVBUF);
105 }
106
107 /*
108 * scsipi_channel_init:
109 *
110 * Initialize a scsipi_channel when it is attached.
111 */
112 int
113 scsipi_channel_init(chan)
114 struct scsipi_channel *chan;
115 {
116 size_t nbytes;
117 int i;
118
119 /* Initialize shared data. */
120 scsipi_init();
121
122 /* Initialize the queues. */
123 TAILQ_INIT(&chan->chan_queue);
124 TAILQ_INIT(&chan->chan_complete);
125
126 nbytes = chan->chan_ntargets * sizeof(struct scsipi_periph **);
127 chan->chan_periphs = malloc(nbytes, M_DEVBUF, M_NOWAIT);
128 if (chan->chan_periphs == NULL)
129 return (ENOMEM);
130
131
132 nbytes = chan->chan_nluns * sizeof(struct scsipi_periph *);
133 for (i = 0; i < chan->chan_ntargets; i++) {
134 chan->chan_periphs[i] = malloc(nbytes, M_DEVBUF, M_NOWAIT);
135 if (chan->chan_periphs[i] == NULL) {
136 while (--i >= 0) {
137 free(chan->chan_periphs[i], M_DEVBUF);
138 }
139 return (ENOMEM);
140 }
141 memset(chan->chan_periphs[i], 0, nbytes);
142 }
143
144 /*
145 * Create the asynchronous completion thread.
146 */
147 kthread_create(scsipi_create_completion_thread, chan);
148 return (0);
149 }
150
151 /*
152 * scsipi_channel_shutdown:
153 *
154 * Shutdown a scsipi_channel.
155 */
156 void
157 scsipi_channel_shutdown(chan)
158 struct scsipi_channel *chan;
159 {
160
161 /*
162 * Shut down the completion thread.
163 */
164 chan->chan_tflags |= SCSIPI_CHANT_SHUTDOWN;
165 wakeup(&chan->chan_complete);
166
167 /*
168 * Now wait for the thread to exit.
169 */
170 while (chan->chan_thread != NULL)
171 (void) tsleep(&chan->chan_thread, PRIBIO, "scshut", 0);
172 }
173
174 /*
175 * scsipi_insert_periph:
176 *
177 * Insert a periph into the channel.
178 */
179 void
180 scsipi_insert_periph(chan, periph)
181 struct scsipi_channel *chan;
182 struct scsipi_periph *periph;
183 {
184 int s;
185
186 s = splbio();
187 chan->chan_periphs[periph->periph_target][periph->periph_lun] = periph;
188 splx(s);
189 }
190
191 /*
192 * scsipi_remove_periph:
193 *
194 * Remove a periph from the channel.
195 */
196 void
197 scsipi_remove_periph(chan, periph)
198 struct scsipi_channel *chan;
199 struct scsipi_periph *periph;
200 {
201 int s;
202
203 s = splbio();
204 chan->chan_periphs[periph->periph_target][periph->periph_lun] = NULL;
205 splx(s);
206 }
207
208 /*
209 * scsipi_lookup_periph:
210 *
211 * Lookup a periph on the specified channel.
212 */
213 struct scsipi_periph *
214 scsipi_lookup_periph(chan, target, lun)
215 struct scsipi_channel *chan;
216 int target, lun;
217 {
218 struct scsipi_periph *periph;
219 int s;
220
221 if (target >= chan->chan_ntargets ||
222 lun >= chan->chan_nluns)
223 return (NULL);
224
225 s = splbio();
226 periph = chan->chan_periphs[target][lun];
227 splx(s);
228
229 return (periph);
230 }
231
232 /*
233 * scsipi_get_resource:
234 *
235 * Allocate a single xfer `resource' from the channel.
236 *
237 * NOTE: Must be called at splbio().
238 */
239 int
240 scsipi_get_resource(chan)
241 struct scsipi_channel *chan;
242 {
243 struct scsipi_adapter *adapt = chan->chan_adapter;
244
245 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
246 if (chan->chan_openings > 0) {
247 chan->chan_openings--;
248 return (1);
249 }
250 return (0);
251 }
252
253 if (adapt->adapt_openings > 0) {
254 adapt->adapt_openings--;
255 return (1);
256 }
257 return (0);
258 }
259
260 /*
261 * scsipi_grow_resources:
262 *
263 * Attempt to grow resources for a channel. If this succeeds,
264 * we allocate one for our caller.
265 *
266 * NOTE: Must be called at splbio().
267 */
268 __inline int
269 scsipi_grow_resources(chan)
270 struct scsipi_channel *chan;
271 {
272
273 if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
274 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
275 scsipi_adapter_request(chan,
276 ADAPTER_REQ_GROW_RESOURCES, NULL);
277 return (scsipi_get_resource(chan));
278 }
279 /*
280 * ask the channel thread to do it. It'll have to thaw the
281 * queue
282 */
283 scsipi_channel_freeze(chan, 1);
284 chan->chan_tflags |= SCSIPI_CHANT_GROWRES;
285 wakeup(&chan->chan_complete);
286 return (0);
287 }
288
289 return (0);
290 }
291
292 /*
293 * scsipi_put_resource:
294 *
295 * Free a single xfer `resource' to the channel.
296 *
297 * NOTE: Must be called at splbio().
298 */
299 void
300 scsipi_put_resource(chan)
301 struct scsipi_channel *chan;
302 {
303 struct scsipi_adapter *adapt = chan->chan_adapter;
304
305 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
306 chan->chan_openings++;
307 else
308 adapt->adapt_openings++;
309 }
310
311 /*
312 * scsipi_get_tag:
313 *
314 * Get a tag ID for the specified xfer.
315 *
316 * NOTE: Must be called at splbio().
317 */
318 void
319 scsipi_get_tag(xs)
320 struct scsipi_xfer *xs;
321 {
322 struct scsipi_periph *periph = xs->xs_periph;
323 int word, bit, tag;
324
325 for (word = 0; word < PERIPH_NTAGWORDS; word++) {
326 bit = ffs(periph->periph_freetags[word]);
327 if (bit != 0)
328 break;
329 }
330 #ifdef DIAGNOSTIC
331 if (word == PERIPH_NTAGWORDS) {
332 scsipi_printaddr(periph);
333 printf("no free tags\n");
334 panic("scsipi_get_tag");
335 }
336 #endif
337
338 bit -= 1;
339 periph->periph_freetags[word] &= ~(1 << bit);
340 tag = (word << 5) | bit;
341
342 /* XXX Should eventually disallow this completely. */
343 if (tag >= periph->periph_openings) {
344 scsipi_printaddr(periph);
345 printf("WARNING: tag %d greater than available openings %d\n",
346 tag, periph->periph_openings);
347 }
348
349 xs->xs_tag_id = tag;
350 }
351
352 /*
353 * scsipi_put_tag:
354 *
355 * Put the tag ID for the specified xfer back into the pool.
356 *
357 * NOTE: Must be called at splbio().
358 */
359 void
360 scsipi_put_tag(xs)
361 struct scsipi_xfer *xs;
362 {
363 struct scsipi_periph *periph = xs->xs_periph;
364 int word, bit;
365
366 word = xs->xs_tag_id >> 5;
367 bit = xs->xs_tag_id & 0x1f;
368
369 periph->periph_freetags[word] |= (1 << bit);
370 }
371
372 /*
373 * scsipi_get_xs:
374 *
375 * Allocate an xfer descriptor and associate it with the
376 * specified peripherial. If the peripherial has no more
377 * available command openings, we either block waiting for
378 * one to become available, or fail.
379 */
380 struct scsipi_xfer *
381 scsipi_get_xs(periph, flags)
382 struct scsipi_periph *periph;
383 int flags;
384 {
385 struct scsipi_xfer *xs;
386 int s;
387
388 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n"));
389
390 /*
391 * If we're cold, make sure we poll.
392 */
393 if (cold)
394 flags |= XS_CTL_NOSLEEP | XS_CTL_POLL;
395
396 #ifdef DIAGNOSTIC
397 /*
398 * URGENT commands can never be ASYNC.
399 */
400 if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
401 (XS_CTL_URGENT|XS_CTL_ASYNC)) {
402 scsipi_printaddr(periph);
403 printf("URGENT and ASYNC\n");
404 panic("scsipi_get_xs");
405 }
406 #endif
407
408 s = splbio();
409 /*
410 * Wait for a command opening to become available. Rules:
411 *
412 * - All xfers must wait for an available opening.
413 * Exception: URGENT xfers can proceed when
414 * active == openings, because we use the opening
415 * of the command we're recovering for.
416 * - if the periph has sense pending, only URGENT & REQSENSE
417 * xfers may proceed.
418 *
419 * - If the periph is recovering, only URGENT xfers may
420 * proceed.
421 *
422 * - If the periph is currently executing a recovery
423 * command, URGENT commands must block, because only
424 * one recovery command can execute at a time.
425 */
426 for (;;) {
427 if (flags & XS_CTL_URGENT) {
428 if (periph->periph_active > periph->periph_openings)
429 goto wait_for_opening;
430 if (periph->periph_flags & PERIPH_SENSE) {
431 if ((flags & XS_CTL_REQSENSE) == 0)
432 goto wait_for_opening;
433 } else {
434 if ((periph->periph_flags &
435 PERIPH_RECOVERY_ACTIVE) != 0)
436 goto wait_for_opening;
437 periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
438 }
439 break;
440 }
441 if (periph->periph_active >= periph->periph_openings ||
442 (periph->periph_flags & PERIPH_RECOVERING) != 0)
443 goto wait_for_opening;
444 periph->periph_active++;
445 break;
446
447 wait_for_opening:
448 if (flags & XS_CTL_NOSLEEP) {
449 splx(s);
450 return (NULL);
451 }
452 SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n"));
453 periph->periph_flags |= PERIPH_WAITING;
454 (void) tsleep(periph, PRIBIO, "getxs", 0);
455 }
456 SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n"));
457 xs = pool_get(&scsipi_xfer_pool,
458 ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
459 if (xs == NULL) {
460 if (flags & XS_CTL_URGENT) {
461 if ((flags & XS_CTL_REQSENSE) == 0)
462 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
463 } else
464 periph->periph_active--;
465 scsipi_printaddr(periph);
466 printf("unable to allocate %sscsipi_xfer\n",
467 (flags & XS_CTL_URGENT) ? "URGENT " : "");
468 }
469 splx(s);
470
471 SC_DEBUG(periph, SCSIPI_DB3, ("returning\n"));
472
473 if (xs != NULL) {
474 callout_init(&xs->xs_callout);
475 memset(xs, 0, sizeof(*xs));
476 xs->xs_periph = periph;
477 xs->xs_control = flags;
478 xs->xs_status = 0;
479 s = splbio();
480 TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
481 splx(s);
482 }
483 return (xs);
484 }
485
486 /*
487 * scsipi_put_xs:
488 *
489 * Release an xfer descriptor, decreasing the outstanding command
490 * count for the peripherial. If there is a thread waiting for
491 * an opening, wake it up. If not, kick any queued I/O the
492 * peripherial may have.
493 *
494 * NOTE: Must be called at splbio().
495 */
496 void
497 scsipi_put_xs(xs)
498 struct scsipi_xfer *xs;
499 {
500 struct scsipi_periph *periph = xs->xs_periph;
501 int flags = xs->xs_control;
502
503 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n"));
504
505 TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
506 pool_put(&scsipi_xfer_pool, xs);
507
508 #ifdef DIAGNOSTIC
509 if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
510 periph->periph_active == 0) {
511 scsipi_printaddr(periph);
512 printf("recovery without a command to recovery for\n");
513 panic("scsipi_put_xs");
514 }
515 #endif
516
517 if (flags & XS_CTL_URGENT) {
518 if ((flags & XS_CTL_REQSENSE) == 0)
519 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
520 } else
521 periph->periph_active--;
522 if (periph->periph_active == 0 &&
523 (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
524 periph->periph_flags &= ~PERIPH_WAITDRAIN;
525 wakeup(&periph->periph_active);
526 }
527
528 if (periph->periph_flags & PERIPH_WAITING) {
529 periph->periph_flags &= ~PERIPH_WAITING;
530 wakeup(periph);
531 } else {
532 if (periph->periph_switch->psw_start != NULL) {
533 SC_DEBUG(periph, SCSIPI_DB2,
534 ("calling private start()\n"));
535 (*periph->periph_switch->psw_start)(periph);
536 }
537 }
538 }
539
540 /*
541 * scsipi_channel_freeze:
542 *
543 * Freeze a channel's xfer queue.
544 */
545 void
546 scsipi_channel_freeze(chan, count)
547 struct scsipi_channel *chan;
548 int count;
549 {
550 int s;
551
552 s = splbio();
553 chan->chan_qfreeze += count;
554 splx(s);
555 }
556
557 /*
558 * scsipi_channel_thaw:
559 *
560 * Thaw a channel's xfer queue.
561 */
562 void
563 scsipi_channel_thaw(chan, count)
564 struct scsipi_channel *chan;
565 int count;
566 {
567 int s;
568
569 s = splbio();
570 chan->chan_qfreeze -= count;
571 /*
572 * Don't let the freeze count go negative.
573 *
574 * Presumably the adapter driver could keep track of this,
575 * but it might just be easier to do this here so as to allow
576 * multiple callers, including those outside the adapter driver.
577 */
578 if (chan->chan_qfreeze < 0) {
579 chan->chan_qfreeze = 0;
580 }
581 splx(s);
582 /*
583 * Kick the channel's queue here. Note, we may be running in
584 * interrupt context (softclock or HBA's interrupt), so the adapter
585 * driver had better not sleep.
586 */
587 if (chan->chan_qfreeze == 0)
588 scsipi_run_queue(chan);
589 }
590
591 /*
592 * scsipi_channel_timed_thaw:
593 *
594 * Thaw a channel after some time has expired. This will also
595 * run the channel's queue if the freeze count has reached 0.
596 */
597 void
598 scsipi_channel_timed_thaw(arg)
599 void *arg;
600 {
601 struct scsipi_channel *chan = arg;
602
603 scsipi_channel_thaw(chan, 1);
604 }
605
606 /*
607 * scsipi_periph_freeze:
608 *
609 * Freeze a device's xfer queue.
610 */
611 void
612 scsipi_periph_freeze(periph, count)
613 struct scsipi_periph *periph;
614 int count;
615 {
616 int s;
617
618 s = splbio();
619 periph->periph_qfreeze += count;
620 splx(s);
621 }
622
623 /*
624 * scsipi_periph_thaw:
625 *
626 * Thaw a device's xfer queue.
627 */
628 void
629 scsipi_periph_thaw(periph, count)
630 struct scsipi_periph *periph;
631 int count;
632 {
633 int s;
634
635 s = splbio();
636 periph->periph_qfreeze -= count;
637 #ifdef DIAGNOSTIC
638 if (periph->periph_qfreeze < 0) {
639 static const char pc[] = "periph freeze count < 0";
640 scsipi_printaddr(periph);
641 printf("%s\n", pc);
642 panic(pc);
643 }
644 #endif
645 if (periph->periph_qfreeze == 0 &&
646 (periph->periph_flags & PERIPH_WAITING) != 0)
647 wakeup(periph);
648 splx(s);
649 }
650
651 /*
652 * scsipi_periph_timed_thaw:
653 *
654 * Thaw a device after some time has expired.
655 */
656 void
657 scsipi_periph_timed_thaw(arg)
658 void *arg;
659 {
660 int s;
661 struct scsipi_periph *periph = arg;
662
663 callout_stop(&periph->periph_callout);
664
665 s = splbio();
666 scsipi_periph_thaw(periph, 1);
667 if ((periph->periph_channel->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
668 /*
669 * Kick the channel's queue here. Note, we're running in
670 * interrupt context (softclock), so the adapter driver
671 * had better not sleep.
672 */
673 scsipi_run_queue(periph->periph_channel);
674 } else {
675 /*
676 * Tell the completion thread to kick the channel's queue here.
677 */
678 periph->periph_channel->chan_tflags |= SCSIPI_CHANT_KICK;
679 wakeup(&periph->periph_channel->chan_complete);
680 }
681 splx(s);
682 }
683
684 /*
685 * scsipi_wait_drain:
686 *
687 * Wait for a periph's pending xfers to drain.
688 */
689 void
690 scsipi_wait_drain(periph)
691 struct scsipi_periph *periph;
692 {
693 int s;
694
695 s = splbio();
696 while (periph->periph_active != 0) {
697 periph->periph_flags |= PERIPH_WAITDRAIN;
698 (void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0);
699 }
700 splx(s);
701 }
702
703 /*
704 * scsipi_kill_pending:
705 *
706 * Kill off all pending xfers for a periph.
707 *
708 * NOTE: Must be called at splbio().
709 */
710 void
711 scsipi_kill_pending(periph)
712 struct scsipi_periph *periph;
713 {
714
715 (*periph->periph_channel->chan_bustype->bustype_kill_pending)(periph);
716 #ifdef DIAGNOSTIC
717 if (TAILQ_FIRST(&periph->periph_xferq) != NULL)
718 panic("scsipi_kill_pending");
719 #endif
720 scsipi_wait_drain(periph);
721 }
722
723 /*
724 * scsipi_interpret_sense:
725 *
726 * Look at the returned sense and act on the error, determining
727 * the unix error number to pass back. (0 = report no error)
728 *
729 * NOTE: If we return ERESTART, we are expected to haved
730 * thawed the device!
731 *
732 * THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
733 */
734 int
735 scsipi_interpret_sense(xs)
736 struct scsipi_xfer *xs;
737 {
738 struct scsipi_sense_data *sense;
739 struct scsipi_periph *periph = xs->xs_periph;
740 u_int8_t key;
741 u_int32_t info;
742 int error;
743 #ifndef SCSIVERBOSE
744 static char *error_mes[] = {
745 "soft error (corrected)",
746 "not ready", "medium error",
747 "non-media hardware failure", "illegal request",
748 "unit attention", "readonly device",
749 "no data found", "vendor unique",
750 "copy aborted", "command aborted",
751 "search returned equal", "volume overflow",
752 "verify miscompare", "unknown error key"
753 };
754 #endif
755
756 sense = &xs->sense.scsi_sense;
757 #ifdef SCSIPI_DEBUG
758 if (periph->periph_flags & SCSIPI_DB1) {
759 int count;
760 scsipi_printaddr(periph);
761 printf(" sense debug information:\n");
762 printf("\tcode 0x%x valid 0x%x\n",
763 sense->error_code & SSD_ERRCODE,
764 sense->error_code & SSD_ERRCODE_VALID ? 1 : 0);
765 printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
766 sense->segment,
767 sense->flags & SSD_KEY,
768 sense->flags & SSD_ILI ? 1 : 0,
769 sense->flags & SSD_EOM ? 1 : 0,
770 sense->flags & SSD_FILEMARK ? 1 : 0);
771 printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d "
772 "extra bytes\n",
773 sense->info[0],
774 sense->info[1],
775 sense->info[2],
776 sense->info[3],
777 sense->extra_len);
778 printf("\textra: ");
779 for (count = 0; count < ADD_BYTES_LIM(sense); count++)
780 printf("0x%x ", sense->cmd_spec_info[count]);
781 printf("\n");
782 }
783 #endif
784
785 /*
786 * If the periph has it's own error handler, call it first.
787 * If it returns a legit error value, return that, otherwise
788 * it wants us to continue with normal error processing.
789 */
790 if (periph->periph_switch->psw_error != NULL) {
791 SC_DEBUG(periph, SCSIPI_DB2,
792 ("calling private err_handler()\n"));
793 error = (*periph->periph_switch->psw_error)(xs);
794 if (error != EJUSTRETURN)
795 return (error);
796 }
797 /* otherwise use the default */
798 switch (sense->error_code & SSD_ERRCODE) {
799 /*
800 * If it's code 70, use the extended stuff and
801 * interpret the key
802 */
803 case 0x71: /* delayed error */
804 scsipi_printaddr(periph);
805 key = sense->flags & SSD_KEY;
806 printf(" DEFERRED ERROR, key = 0x%x\n", key);
807 /* FALLTHROUGH */
808 case 0x70:
809 if ((sense->error_code & SSD_ERRCODE_VALID) != 0)
810 info = _4btol(sense->info);
811 else
812 info = 0;
813 key = sense->flags & SSD_KEY;
814
815 switch (key) {
816 case SKEY_NO_SENSE:
817 case SKEY_RECOVERED_ERROR:
818 if (xs->resid == xs->datalen && xs->datalen) {
819 /*
820 * Why is this here?
821 */
822 xs->resid = 0; /* not short read */
823 }
824 case SKEY_EQUAL:
825 error = 0;
826 break;
827 case SKEY_NOT_READY:
828 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
829 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
830 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
831 return (0);
832 if (sense->add_sense_code == 0x3A) {
833 error = ENODEV; /* Medium not present */
834 if (xs->xs_control & XS_CTL_SILENT_NODEV)
835 return (error);
836 } else
837 error = EIO;
838 if ((xs->xs_control & XS_CTL_SILENT) != 0)
839 return (error);
840 break;
841 case SKEY_ILLEGAL_REQUEST:
842 if ((xs->xs_control &
843 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
844 return (0);
845 /*
846 * Handle the case where a device reports
847 * Logical Unit Not Supported during discovery.
848 */
849 if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
850 sense->add_sense_code == 0x25 &&
851 sense->add_sense_code_qual == 0x00)
852 return (EINVAL);
853 if ((xs->xs_control & XS_CTL_SILENT) != 0)
854 return (EIO);
855 error = EINVAL;
856 break;
857 case SKEY_UNIT_ATTENTION:
858 if (sense->add_sense_code == 0x29 &&
859 sense->add_sense_code_qual == 0x00) {
860 /* device or bus reset */
861 return (ERESTART);
862 }
863 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
864 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
865 if ((xs->xs_control &
866 XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
867 /* XXX Should reupload any transient state. */
868 (periph->periph_flags &
869 PERIPH_REMOVABLE) == 0) {
870 return (ERESTART);
871 }
872 if ((xs->xs_control & XS_CTL_SILENT) != 0)
873 return (EIO);
874 error = EIO;
875 break;
876 case SKEY_WRITE_PROTECT:
877 error = EROFS;
878 break;
879 case SKEY_BLANK_CHECK:
880 error = 0;
881 break;
882 case SKEY_ABORTED_COMMAND:
883 error = ERESTART;
884 break;
885 case SKEY_VOLUME_OVERFLOW:
886 error = ENOSPC;
887 break;
888 default:
889 error = EIO;
890 break;
891 }
892
893 #ifdef SCSIVERBOSE
894 if (key && (xs->xs_control & XS_CTL_SILENT) == 0)
895 scsipi_print_sense(xs, 0);
896 #else
897 if (key) {
898 scsipi_printaddr(periph);
899 printf("%s", error_mes[key - 1]);
900 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
901 switch (key) {
902 case SKEY_NOT_READY:
903 case SKEY_ILLEGAL_REQUEST:
904 case SKEY_UNIT_ATTENTION:
905 case SKEY_WRITE_PROTECT:
906 break;
907 case SKEY_BLANK_CHECK:
908 printf(", requested size: %d (decimal)",
909 info);
910 break;
911 case SKEY_ABORTED_COMMAND:
912 if (xs->xs_retries)
913 printf(", retrying");
914 printf(", cmd 0x%x, info 0x%x",
915 xs->cmd->opcode, info);
916 break;
917 default:
918 printf(", info = %d (decimal)", info);
919 }
920 }
921 if (sense->extra_len != 0) {
922 int n;
923 printf(", data =");
924 for (n = 0; n < sense->extra_len; n++)
925 printf(" %02x",
926 sense->cmd_spec_info[n]);
927 }
928 printf("\n");
929 }
930 #endif
931 return (error);
932
933 /*
934 * Not code 70, just report it
935 */
936 default:
937 #if defined(SCSIDEBUG) || defined(DEBUG)
938 {
939 static char *uc = "undecodable sense error";
940 int i;
941 u_int8_t *cptr = (u_int8_t *) sense;
942 scsipi_printaddr(periph);
943 if (xs->cmd == &xs->cmdstore) {
944 printf("%s for opcode 0x%x, data=",
945 uc, xs->cmdstore.opcode);
946 } else {
947 printf("%s, data=", uc);
948 }
949 for (i = 0; i < sizeof (sense); i++)
950 printf(" 0x%02x", *(cptr++) & 0xff);
951 printf("\n");
952 }
953 #else
954 scsipi_printaddr(periph);
955 printf("Sense Error Code 0x%x",
956 sense->error_code & SSD_ERRCODE);
957 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
958 struct scsipi_sense_data_unextended *usense =
959 (struct scsipi_sense_data_unextended *)sense;
960 printf(" at block no. %d (decimal)",
961 _3btol(usense->block));
962 }
963 printf("\n");
964 #endif
965 return (EIO);
966 }
967 }
968
969 /*
970 * scsipi_size:
971 *
972 * Find out from the device what its capacity is.
973 */
974 u_long
975 scsipi_size(periph, flags)
976 struct scsipi_periph *periph;
977 int flags;
978 {
979 struct scsipi_read_cap_data rdcap;
980 struct scsipi_read_capacity scsipi_cmd;
981
982 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
983 scsipi_cmd.opcode = READ_CAPACITY;
984
985 /*
986 * If the command works, interpret the result as a 4 byte
987 * number of blocks
988 */
989 if (scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
990 sizeof(scsipi_cmd), (u_char *)&rdcap, sizeof(rdcap),
991 SCSIPIRETRIES, 20000, NULL,
992 flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK) != 0) {
993 scsipi_printaddr(periph);
994 printf("could not get size\n");
995 return (0);
996 }
997
998 return (_4btol(rdcap.addr) + 1);
999 }
1000
1001 /*
1002 * scsipi_test_unit_ready:
1003 *
1004 * Issue a `test unit ready' request.
1005 */
1006 int
1007 scsipi_test_unit_ready(periph, flags)
1008 struct scsipi_periph *periph;
1009 int flags;
1010 {
1011 struct scsipi_test_unit_ready scsipi_cmd;
1012
1013 /* some ATAPI drives don't support TEST_UNIT_READY. Sigh */
1014 if (periph->periph_quirks & PQUIRK_NOTUR)
1015 return (0);
1016
1017 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1018 scsipi_cmd.opcode = TEST_UNIT_READY;
1019
1020 return (scsipi_command(periph,
1021 (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd),
1022 0, 0, SCSIPIRETRIES, 10000, NULL, flags));
1023 }
1024
1025 /*
1026 * scsipi_inquire:
1027 *
1028 * Ask the device about itself.
1029 */
1030 int
1031 scsipi_inquire(periph, inqbuf, flags)
1032 struct scsipi_periph *periph;
1033 struct scsipi_inquiry_data *inqbuf;
1034 int flags;
1035 {
1036 struct scsipi_inquiry scsipi_cmd;
1037
1038 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1039 scsipi_cmd.opcode = INQUIRY;
1040 scsipi_cmd.length = sizeof(struct scsipi_inquiry_data);
1041
1042 return (scsipi_command(periph,
1043 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1044 (u_char *) inqbuf, sizeof(struct scsipi_inquiry_data),
1045 SCSIPIRETRIES, 10000, NULL, XS_CTL_DATA_IN | flags));
1046 }
1047
1048 /*
1049 * scsipi_prevent:
1050 *
1051 * Prevent or allow the user to remove the media
1052 */
1053 int
1054 scsipi_prevent(periph, type, flags)
1055 struct scsipi_periph *periph;
1056 int type, flags;
1057 {
1058 struct scsipi_prevent scsipi_cmd;
1059
1060 if (periph->periph_quirks & PQUIRK_NODOORLOCK)
1061 return (0);
1062
1063 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1064 scsipi_cmd.opcode = PREVENT_ALLOW;
1065 scsipi_cmd.how = type;
1066
1067 return (scsipi_command(periph,
1068 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1069 0, 0, SCSIPIRETRIES, 5000, NULL, flags));
1070 }
1071
1072 /*
1073 * scsipi_start:
1074 *
1075 * Send a START UNIT.
1076 */
1077 int
1078 scsipi_start(periph, type, flags)
1079 struct scsipi_periph *periph;
1080 int type, flags;
1081 {
1082 struct scsipi_start_stop scsipi_cmd;
1083
1084 if (periph->periph_quirks & PQUIRK_NOSTARTUNIT)
1085 return 0;
1086
1087 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1088 scsipi_cmd.opcode = START_STOP;
1089 scsipi_cmd.byte2 = 0x00;
1090 scsipi_cmd.how = type;
1091
1092 return (scsipi_command(periph,
1093 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1094 0, 0, SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000,
1095 NULL, flags));
1096 }
1097
1098 /*
1099 * scsipi_mode_sense, scsipi_mode_sense_big:
1100 * get a sense page from a device
1101 */
1102
1103 int
1104 scsipi_mode_sense(periph, byte2, page, data, len, flags, retries, timeout)
1105 struct scsipi_periph *periph;
1106 int byte2, page, len, flags, retries, timeout;
1107 struct scsipi_mode_header *data;
1108 {
1109 struct scsipi_mode_sense scsipi_cmd;
1110 int error;
1111
1112 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1113 scsipi_cmd.opcode = MODE_SENSE;
1114 scsipi_cmd.byte2 = byte2;
1115 scsipi_cmd.page = page;
1116 if (scsipi_periph_bustype(periph) == SCSIPI_BUSTYPE_ATAPI)
1117 _lto2b(len, scsipi_cmd.u_len.atapi.length);
1118 else
1119 scsipi_cmd.u_len.scsi.length = len & 0xff;
1120 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1121 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1122 flags | XS_CTL_DATA_IN);
1123 SC_DEBUG(periph, SCSIPI_DB2,
1124 ("scsipi_mode_sense: error=%d\n", error));
1125 return (error);
1126 }
1127
1128 int
1129 scsipi_mode_sense_big(periph, byte2, page, data, len, flags, retries, timeout)
1130 struct scsipi_periph *periph;
1131 int byte2, page, len, flags, retries, timeout;
1132 struct scsipi_mode_header_big *data;
1133 {
1134 struct scsipi_mode_sense_big scsipi_cmd;
1135 int error;
1136
1137 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1138 scsipi_cmd.opcode = MODE_SENSE_BIG;
1139 scsipi_cmd.byte2 = byte2;
1140 scsipi_cmd.page = page;
1141 _lto2b(len, scsipi_cmd.length);
1142 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1143 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1144 flags | XS_CTL_DATA_IN);
1145 SC_DEBUG(periph, SCSIPI_DB2,
1146 ("scsipi_mode_sense_big: error=%d\n", error));
1147 return (error);
1148 }
1149
1150 int
1151 scsipi_mode_select(periph, byte2, data, len, flags, retries, timeout)
1152 struct scsipi_periph *periph;
1153 int byte2, len, flags, retries, timeout;
1154 struct scsipi_mode_header *data;
1155 {
1156 struct scsipi_mode_select scsipi_cmd;
1157 int error;
1158
1159 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1160 scsipi_cmd.opcode = MODE_SELECT;
1161 scsipi_cmd.byte2 = byte2;
1162 if (scsipi_periph_bustype(periph) == SCSIPI_BUSTYPE_ATAPI)
1163 _lto2b(len, scsipi_cmd.u_len.atapi.length);
1164 else
1165 scsipi_cmd.u_len.scsi.length = len & 0xff;
1166 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1167 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1168 flags | XS_CTL_DATA_OUT);
1169 SC_DEBUG(periph, SCSIPI_DB2,
1170 ("scsipi_mode_select: error=%d\n", error));
1171 return (error);
1172 }
1173
1174 int
1175 scsipi_mode_select_big(periph, byte2, data, len, flags, retries, timeout)
1176 struct scsipi_periph *periph;
1177 int byte2, len, flags, retries, timeout;
1178 struct scsipi_mode_header_big *data;
1179 {
1180 struct scsipi_mode_select_big scsipi_cmd;
1181 int error;
1182
1183 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1184 scsipi_cmd.opcode = MODE_SELECT_BIG;
1185 scsipi_cmd.byte2 = byte2;
1186 _lto2b(len, scsipi_cmd.length);
1187 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1188 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1189 flags | XS_CTL_DATA_OUT);
1190 SC_DEBUG(periph, SCSIPI_DB2,
1191 ("scsipi_mode_select: error=%d\n", error));
1192 return (error);
1193 }
1194
1195 /*
1196 * scsipi_done:
1197 *
1198 * This routine is called by an adapter's interrupt handler when
1199 * an xfer is completed.
1200 */
1201 void
1202 scsipi_done(xs)
1203 struct scsipi_xfer *xs;
1204 {
1205 struct scsipi_periph *periph = xs->xs_periph;
1206 struct scsipi_channel *chan = periph->periph_channel;
1207 int s, freezecnt;
1208
1209 SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n"));
1210 #ifdef SCSIPI_DEBUG
1211 if (periph->periph_dbflags & SCSIPI_DB1)
1212 show_scsipi_cmd(xs);
1213 #endif
1214
1215 s = splbio();
1216 /*
1217 * The resource this command was using is now free.
1218 */
1219 scsipi_put_resource(chan);
1220 xs->xs_periph->periph_sent--;
1221
1222 /*
1223 * If the command was tagged, free the tag.
1224 */
1225 if (XS_CTL_TAGTYPE(xs) != 0)
1226 scsipi_put_tag(xs);
1227 else
1228 periph->periph_flags &= ~PERIPH_UNTAG;
1229
1230 /* Mark the command as `done'. */
1231 xs->xs_status |= XS_STS_DONE;
1232
1233 #ifdef DIAGNOSTIC
1234 if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
1235 (XS_CTL_ASYNC|XS_CTL_POLL))
1236 panic("scsipi_done: ASYNC and POLL");
1237 #endif
1238
1239 /*
1240 * If the xfer had an error of any sort, freeze the
1241 * periph's queue. Freeze it again if we were requested
1242 * to do so in the xfer.
1243 */
1244 freezecnt = 0;
1245 if (xs->error != XS_NOERROR)
1246 freezecnt++;
1247 if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
1248 freezecnt++;
1249 if (freezecnt != 0)
1250 scsipi_periph_freeze(periph, freezecnt);
1251
1252 /*
1253 * record the xfer with a pending sense, in case a SCSI reset is
1254 * received before the thread is waked up.
1255 */
1256 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1257 periph->periph_flags |= PERIPH_SENSE;
1258 periph->periph_xscheck = xs;
1259 }
1260
1261 /*
1262 * If this was an xfer that was not to complete asynchronously,
1263 * let the requesting thread perform error checking/handling
1264 * in its context.
1265 */
1266 if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
1267 splx(s);
1268 /*
1269 * If it's a polling job, just return, to unwind the
1270 * call graph. We don't need to restart the queue,
1271 * because pollings jobs are treated specially, and
1272 * are really only used during crash dumps anyway
1273 * (XXX or during boot-time autconfiguration of
1274 * ATAPI devices).
1275 */
1276 if (xs->xs_control & XS_CTL_POLL)
1277 return;
1278 wakeup(xs);
1279 goto out;
1280 }
1281
1282 /*
1283 * Catch the extremely common case of I/O completing
1284 * without error; no use in taking a context switch
1285 * if we can handle it in interrupt context.
1286 */
1287 if (xs->error == XS_NOERROR) {
1288 splx(s);
1289 (void) scsipi_complete(xs);
1290 goto out;
1291 }
1292
1293 /*
1294 * There is an error on this xfer. Put it on the channel's
1295 * completion queue, and wake up the completion thread.
1296 */
1297 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
1298 splx(s);
1299 wakeup(&chan->chan_complete);
1300
1301 out:
1302 /*
1303 * If there are more xfers on the channel's queue, attempt to
1304 * run them.
1305 */
1306 scsipi_run_queue(chan);
1307 }
1308
1309 /*
1310 * scsipi_complete:
1311 *
1312 * Completion of a scsipi_xfer. This is the guts of scsipi_done().
1313 *
1314 * NOTE: This routine MUST be called with valid thread context
1315 * except for the case where the following two conditions are
1316 * true:
1317 *
1318 * xs->error == XS_NOERROR
1319 * XS_CTL_ASYNC is set in xs->xs_control
1320 *
1321 * The semantics of this routine can be tricky, so here is an
1322 * explanation:
1323 *
1324 * 0 Xfer completed successfully.
1325 *
1326 * ERESTART Xfer had an error, but was restarted.
1327 *
1328 * anything else Xfer had an error, return value is Unix
1329 * errno.
1330 *
1331 * If the return value is anything but ERESTART:
1332 *
1333 * - If XS_CTL_ASYNC is set, `xs' has been freed back to
1334 * the pool.
1335 * - If there is a buf associated with the xfer,
1336 * it has been biodone()'d.
1337 */
1338 int
1339 scsipi_complete(xs)
1340 struct scsipi_xfer *xs;
1341 {
1342 struct scsipi_periph *periph = xs->xs_periph;
1343 struct scsipi_channel *chan = periph->periph_channel;
1344 struct buf *bp;
1345 int error, s;
1346
1347 #ifdef DIAGNOSTIC
1348 if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
1349 panic("scsipi_complete: XS_CTL_ASYNC but no buf");
1350 #endif
1351 /*
1352 * If command terminated with a CHECK CONDITION, we need to issue a
1353 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed
1354 * we'll have the real status.
1355 * Must be processed at splbio() to avoid missing a SCSI bus reset
1356 * for this command.
1357 */
1358 s = splbio();
1359 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1360 /* request sense for a request sense ? */
1361 if (xs->xs_control & XS_CTL_REQSENSE) {
1362 scsipi_printaddr(periph);
1363 printf("request sense for a request sense ?\n");
1364 /* XXX maybe we should reset the device ? */
1365 /* we've been frozen because xs->error != XS_NOERROR */
1366 scsipi_periph_thaw(periph, 1);
1367 splx(s);
1368 if (xs->resid < xs->datalen) {
1369 printf("we read %d bytes of sense anyway:\n",
1370 xs->datalen - xs->resid);
1371 #ifdef SCSIVERBOSE
1372 scsipi_print_sense_data((void *)xs->data, 0);
1373 #endif
1374 }
1375 return EINVAL;
1376 }
1377 scsipi_request_sense(xs);
1378 }
1379 splx(s);
1380
1381 /*
1382 * If it's a user level request, bypass all usual completion
1383 * processing, let the user work it out..
1384 */
1385 if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
1386 SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n"));
1387 if (xs->error != XS_NOERROR)
1388 scsipi_periph_thaw(periph, 1);
1389 scsipi_user_done(xs);
1390 SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n "));
1391 return 0;
1392 }
1393
1394 switch (xs->error) {
1395 case XS_NOERROR:
1396 error = 0;
1397 break;
1398
1399 case XS_SENSE:
1400 case XS_SHORTSENSE:
1401 error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
1402 break;
1403
1404 case XS_RESOURCE_SHORTAGE:
1405 /*
1406 * XXX Should freeze channel's queue.
1407 */
1408 scsipi_printaddr(periph);
1409 printf("adapter resource shortage\n");
1410 /* FALLTHROUGH */
1411
1412 case XS_BUSY:
1413 if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
1414 struct scsipi_max_openings mo;
1415
1416 /*
1417 * We set the openings to active - 1, assuming that
1418 * the command that got us here is the first one that
1419 * can't fit into the device's queue. If that's not
1420 * the case, I guess we'll find out soon enough.
1421 */
1422 mo.mo_target = periph->periph_target;
1423 mo.mo_lun = periph->periph_lun;
1424 if (periph->periph_active < periph->periph_openings)
1425 mo.mo_openings = periph->periph_active - 1;
1426 else
1427 mo.mo_openings = periph->periph_openings - 1;
1428 #ifdef DIAGNOSTIC
1429 if (mo.mo_openings < 0) {
1430 scsipi_printaddr(periph);
1431 printf("QUEUE FULL resulted in < 0 openings\n");
1432 panic("scsipi_done");
1433 }
1434 #endif
1435 if (mo.mo_openings == 0) {
1436 scsipi_printaddr(periph);
1437 printf("QUEUE FULL resulted in 0 openings\n");
1438 mo.mo_openings = 1;
1439 }
1440 scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
1441 error = ERESTART;
1442 } else if (xs->xs_retries != 0) {
1443 xs->xs_retries--;
1444 /*
1445 * Wait one second, and try again.
1446 */
1447 if ((xs->xs_control & XS_CTL_POLL) ||
1448 (chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
1449 delay(1000000);
1450 } else {
1451 scsipi_periph_freeze(periph, 1);
1452 callout_reset(&periph->periph_callout,
1453 hz, scsipi_periph_timed_thaw, periph);
1454 }
1455 error = ERESTART;
1456 } else
1457 error = EBUSY;
1458 break;
1459
1460 case XS_REQUEUE:
1461 error = ERESTART;
1462 break;
1463
1464 case XS_TIMEOUT:
1465 if (xs->xs_retries != 0) {
1466 xs->xs_retries--;
1467 error = ERESTART;
1468 } else
1469 error = EIO;
1470 break;
1471
1472 case XS_SELTIMEOUT:
1473 /* XXX Disable device? */
1474 error = EIO;
1475 break;
1476
1477 case XS_RESET:
1478 if (xs->xs_control & XS_CTL_REQSENSE) {
1479 /*
1480 * request sense interrupted by reset: signal it
1481 * with EINTR return code.
1482 */
1483 error = EINTR;
1484 } else {
1485 if (xs->xs_retries != 0) {
1486 xs->xs_retries--;
1487 error = ERESTART;
1488 } else
1489 error = EIO;
1490 }
1491 break;
1492
1493 default:
1494 scsipi_printaddr(periph);
1495 printf("invalid return code from adapter: %d\n", xs->error);
1496 error = EIO;
1497 break;
1498 }
1499
1500 s = splbio();
1501 if (error == ERESTART) {
1502 /*
1503 * If we get here, the periph has been thawed and frozen
1504 * again if we had to issue recovery commands. Alternatively,
1505 * it may have been frozen again and in a timed thaw. In
1506 * any case, we thaw the periph once we re-enqueue the
1507 * command. Once the periph is fully thawed, it will begin
1508 * operation again.
1509 */
1510 xs->error = XS_NOERROR;
1511 xs->status = SCSI_OK;
1512 xs->xs_status &= ~XS_STS_DONE;
1513 xs->xs_requeuecnt++;
1514 error = scsipi_enqueue(xs);
1515 if (error == 0) {
1516 scsipi_periph_thaw(periph, 1);
1517 splx(s);
1518 return (ERESTART);
1519 }
1520 }
1521
1522 /*
1523 * scsipi_done() freezes the queue if not XS_NOERROR.
1524 * Thaw it here.
1525 */
1526 if (xs->error != XS_NOERROR)
1527 scsipi_periph_thaw(periph, 1);
1528
1529
1530 if (periph->periph_switch->psw_done)
1531 periph->periph_switch->psw_done(xs);
1532 if ((bp = xs->bp) != NULL) {
1533 if (error) {
1534 bp->b_error = error;
1535 bp->b_flags |= B_ERROR;
1536 bp->b_resid = bp->b_bcount;
1537 } else {
1538 bp->b_error = 0;
1539 bp->b_resid = xs->resid;
1540 }
1541 biodone(bp);
1542 }
1543
1544 if (xs->xs_control & XS_CTL_ASYNC)
1545 scsipi_put_xs(xs);
1546 splx(s);
1547
1548 return (error);
1549 }
1550
1551 /*
1552 * Issue a request sense for the given scsipi_xfer. Called when the xfer
1553 * returns with a CHECK_CONDITION status. Must be called in valid thread
1554 * context and at splbio().
1555 */
1556
1557 void
1558 scsipi_request_sense(xs)
1559 struct scsipi_xfer *xs;
1560 {
1561 struct scsipi_periph *periph = xs->xs_periph;
1562 int flags, error;
1563 struct scsipi_sense cmd;
1564
1565 periph->periph_flags |= PERIPH_SENSE;
1566
1567 /* if command was polling, request sense will too */
1568 flags = xs->xs_control & XS_CTL_POLL;
1569 /* Polling commands can't sleep */
1570 if (flags)
1571 flags |= XS_CTL_NOSLEEP;
1572
1573 flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN |
1574 XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH;
1575
1576 memset(&cmd, 0, sizeof(cmd));
1577 cmd.opcode = REQUEST_SENSE;
1578 cmd.length = sizeof(struct scsipi_sense_data);
1579
1580 error = scsipi_command(periph,
1581 (struct scsipi_generic *) &cmd, sizeof(cmd),
1582 (u_char*)&xs->sense.scsi_sense, sizeof(struct scsipi_sense_data),
1583 0, 1000, NULL, flags);
1584 periph->periph_flags &= ~PERIPH_SENSE;
1585 periph->periph_xscheck = NULL;
1586 switch(error) {
1587 case 0:
1588 /* we have a valid sense */
1589 xs->error = XS_SENSE;
1590 return;
1591 case EINTR:
1592 /* REQUEST_SENSE interrupted by bus reset. */
1593 xs->error = XS_RESET;
1594 return;
1595 case EIO:
1596 /* request sense coudn't be performed */
1597 /*
1598 * XXX this isn't quite rigth but we don't have anything
1599 * better for now
1600 */
1601 xs->error = XS_DRIVER_STUFFUP;
1602 return;
1603 default:
1604 /* Notify that request sense failed. */
1605 xs->error = XS_DRIVER_STUFFUP;
1606 scsipi_printaddr(periph);
1607 printf("request sense failed with error %d\n", error);
1608 return;
1609 }
1610 }
1611
1612 /*
1613 * scsipi_enqueue:
1614 *
1615 * Enqueue an xfer on a channel.
1616 */
1617 int
1618 scsipi_enqueue(xs)
1619 struct scsipi_xfer *xs;
1620 {
1621 struct scsipi_channel *chan = xs->xs_periph->periph_channel;
1622 struct scsipi_xfer *qxs;
1623 int s;
1624
1625 s = splbio();
1626
1627 /*
1628 * If the xfer is to be polled, and there are already jobs on
1629 * the queue, we can't proceed.
1630 */
1631 if ((xs->xs_control & XS_CTL_POLL) != 0 &&
1632 TAILQ_FIRST(&chan->chan_queue) != NULL) {
1633 splx(s);
1634 xs->error = XS_DRIVER_STUFFUP;
1635 return (EAGAIN);
1636 }
1637
1638 /*
1639 * If we have an URGENT xfer, it's an error recovery command
1640 * and it should just go on the head of the channel's queue.
1641 */
1642 if (xs->xs_control & XS_CTL_URGENT) {
1643 TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
1644 goto out;
1645 }
1646
1647 /*
1648 * If this xfer has already been on the queue before, we
1649 * need to reinsert it in the correct order. That order is:
1650 *
1651 * Immediately before the first xfer for this periph
1652 * with a requeuecnt less than xs->xs_requeuecnt.
1653 *
1654 * Failing that, at the end of the queue. (We'll end up
1655 * there naturally.)
1656 */
1657 if (xs->xs_requeuecnt != 0) {
1658 for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
1659 qxs = TAILQ_NEXT(qxs, channel_q)) {
1660 if (qxs->xs_periph == xs->xs_periph &&
1661 qxs->xs_requeuecnt < xs->xs_requeuecnt)
1662 break;
1663 }
1664 if (qxs != NULL) {
1665 TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
1666 channel_q);
1667 goto out;
1668 }
1669 }
1670 TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
1671 out:
1672 if (xs->xs_control & XS_CTL_THAW_PERIPH)
1673 scsipi_periph_thaw(xs->xs_periph, 1);
1674 splx(s);
1675 return (0);
1676 }
1677
1678 /*
1679 * scsipi_run_queue:
1680 *
1681 * Start as many xfers as possible running on the channel.
1682 */
1683 void
1684 scsipi_run_queue(chan)
1685 struct scsipi_channel *chan;
1686 {
1687 struct scsipi_xfer *xs;
1688 struct scsipi_periph *periph;
1689 int s;
1690
1691 for (;;) {
1692 s = splbio();
1693
1694 /*
1695 * If the channel is frozen, we can't do any work right
1696 * now.
1697 */
1698 if (chan->chan_qfreeze != 0) {
1699 splx(s);
1700 return;
1701 }
1702
1703 /*
1704 * Look for work to do, and make sure we can do it.
1705 */
1706 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
1707 xs = TAILQ_NEXT(xs, channel_q)) {
1708 periph = xs->xs_periph;
1709
1710 if ((periph->periph_sent >= periph->periph_openings) ||
1711 periph->periph_qfreeze != 0 ||
1712 (periph->periph_flags & PERIPH_UNTAG) != 0)
1713 continue;
1714
1715 if ((periph->periph_flags &
1716 (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 &&
1717 (xs->xs_control & XS_CTL_URGENT) == 0)
1718 continue;
1719
1720 /*
1721 * We can issue this xfer!
1722 */
1723 goto got_one;
1724 }
1725
1726 /*
1727 * Can't find any work to do right now.
1728 */
1729 splx(s);
1730 return;
1731
1732 got_one:
1733 /*
1734 * Have an xfer to run. Allocate a resource from
1735 * the adapter to run it. If we can't allocate that
1736 * resource, we don't dequeue the xfer.
1737 */
1738 if (scsipi_get_resource(chan) == 0) {
1739 /*
1740 * Adapter is out of resources. If the adapter
1741 * supports it, attempt to grow them.
1742 */
1743 if (scsipi_grow_resources(chan) == 0) {
1744 /*
1745 * Wasn't able to grow resources,
1746 * nothing more we can do.
1747 */
1748 if (xs->xs_control & XS_CTL_POLL) {
1749 scsipi_printaddr(xs->xs_periph);
1750 printf("polling command but no "
1751 "adapter resources");
1752 /* We'll panic shortly... */
1753 }
1754 splx(s);
1755
1756 /*
1757 * XXX: We should be able to note that
1758 * XXX: that resources are needed here!
1759 */
1760 return;
1761 }
1762 /*
1763 * scsipi_grow_resources() allocated the resource
1764 * for us.
1765 */
1766 }
1767
1768 /*
1769 * We have a resource to run this xfer, do it!
1770 */
1771 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
1772
1773 /*
1774 * If the command is to be tagged, allocate a tag ID
1775 * for it.
1776 */
1777 if (XS_CTL_TAGTYPE(xs) != 0)
1778 scsipi_get_tag(xs);
1779 else
1780 periph->periph_flags |= PERIPH_UNTAG;
1781 periph->periph_sent++;
1782 splx(s);
1783
1784 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
1785 }
1786 #ifdef DIAGNOSTIC
1787 panic("scsipi_run_queue: impossible");
1788 #endif
1789 }
1790
1791 /*
1792 * scsipi_execute_xs:
1793 *
1794 * Begin execution of an xfer, waiting for it to complete, if necessary.
1795 */
1796 int
1797 scsipi_execute_xs(xs)
1798 struct scsipi_xfer *xs;
1799 {
1800 struct scsipi_periph *periph = xs->xs_periph;
1801 struct scsipi_channel *chan = periph->periph_channel;
1802 int async, poll, retries, error, s;
1803
1804 xs->xs_status &= ~XS_STS_DONE;
1805 xs->error = XS_NOERROR;
1806 xs->resid = xs->datalen;
1807 xs->status = SCSI_OK;
1808
1809 #ifdef SCSIPI_DEBUG
1810 if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) {
1811 printf("scsipi_execute_xs: ");
1812 show_scsipi_xs(xs);
1813 printf("\n");
1814 }
1815 #endif
1816
1817 /*
1818 * Deal with command tagging:
1819 *
1820 * - If the device's current operating mode doesn't
1821 * include tagged queueing, clear the tag mask.
1822 *
1823 * - If the device's current operating mode *does*
1824 * include tagged queueing, set the tag_type in
1825 * the xfer to the appropriate byte for the tag
1826 * message.
1827 */
1828 if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 ||
1829 (xs->xs_control & XS_CTL_REQSENSE)) {
1830 xs->xs_control &= ~XS_CTL_TAGMASK;
1831 xs->xs_tag_type = 0;
1832 } else {
1833 /*
1834 * If the request doesn't specify a tag, give Head
1835 * tags to URGENT operations and Ordered tags to
1836 * everything else.
1837 */
1838 if (XS_CTL_TAGTYPE(xs) == 0) {
1839 if (xs->xs_control & XS_CTL_URGENT)
1840 xs->xs_control |= XS_CTL_HEAD_TAG;
1841 else
1842 xs->xs_control |= XS_CTL_ORDERED_TAG;
1843 }
1844
1845 switch (XS_CTL_TAGTYPE(xs)) {
1846 case XS_CTL_ORDERED_TAG:
1847 xs->xs_tag_type = MSG_ORDERED_Q_TAG;
1848 break;
1849
1850 case XS_CTL_SIMPLE_TAG:
1851 xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
1852 break;
1853
1854 case XS_CTL_HEAD_TAG:
1855 xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
1856 break;
1857
1858 default:
1859 scsipi_printaddr(periph);
1860 printf("invalid tag mask 0x%08x\n",
1861 XS_CTL_TAGTYPE(xs));
1862 panic("scsipi_execute_xs");
1863 }
1864 }
1865
1866 /* If the adaptor wants us to poll, poll. */
1867 if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY)
1868 xs->xs_control |= XS_CTL_POLL;
1869
1870 /*
1871 * If we don't yet have a completion thread, or we are to poll for
1872 * completion, clear the ASYNC flag.
1873 */
1874 if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
1875 xs->xs_control &= ~XS_CTL_ASYNC;
1876
1877 async = (xs->xs_control & XS_CTL_ASYNC);
1878 poll = (xs->xs_control & XS_CTL_POLL);
1879 retries = xs->xs_retries; /* for polling commands */
1880
1881 #ifdef DIAGNOSTIC
1882 if (async != 0 && xs->bp == NULL)
1883 panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
1884 #endif
1885
1886 /*
1887 * Enqueue the transfer. If we're not polling for completion, this
1888 * should ALWAYS return `no error'.
1889 */
1890 try_again:
1891 error = scsipi_enqueue(xs);
1892 if (error) {
1893 if (poll == 0) {
1894 scsipi_printaddr(periph);
1895 printf("not polling, but enqueue failed with %d\n",
1896 error);
1897 panic("scsipi_execute_xs");
1898 }
1899
1900 scsipi_printaddr(periph);
1901 printf("failed to enqueue polling command");
1902 if (retries != 0) {
1903 printf(", retrying...\n");
1904 delay(1000000);
1905 retries--;
1906 goto try_again;
1907 }
1908 printf("\n");
1909 goto free_xs;
1910 }
1911
1912 restarted:
1913 scsipi_run_queue(chan);
1914
1915 /*
1916 * The xfer is enqueued, and possibly running. If it's to be
1917 * completed asynchronously, just return now.
1918 */
1919 if (async)
1920 return (EJUSTRETURN);
1921
1922 /*
1923 * Not an asynchronous command; wait for it to complete.
1924 */
1925 s = splbio();
1926 while ((xs->xs_status & XS_STS_DONE) == 0) {
1927 if (poll) {
1928 scsipi_printaddr(periph);
1929 printf("polling command not done\n");
1930 panic("scsipi_execute_xs");
1931 }
1932 (void) tsleep(xs, PRIBIO, "xscmd", 0);
1933 }
1934 splx(s);
1935
1936 /*
1937 * Command is complete. scsipi_done() has awakened us to perform
1938 * the error handling.
1939 */
1940 error = scsipi_complete(xs);
1941 if (error == ERESTART)
1942 goto restarted;
1943
1944 /*
1945 * Command completed successfully or fatal error occurred. Fall
1946 * into....
1947 */
1948 free_xs:
1949 s = splbio();
1950 scsipi_put_xs(xs);
1951 splx(s);
1952
1953 /*
1954 * Kick the queue, keep it running in case it stopped for some
1955 * reason.
1956 */
1957 scsipi_run_queue(chan);
1958
1959 return (error);
1960 }
1961
1962 /*
1963 * scsipi_completion_thread:
1964 *
1965 * This is the completion thread. We wait for errors on
1966 * asynchronous xfers, and perform the error handling
1967 * function, restarting the command, if necessary.
1968 */
1969 void
1970 scsipi_completion_thread(arg)
1971 void *arg;
1972 {
1973 struct scsipi_channel *chan = arg;
1974 struct scsipi_xfer *xs;
1975 int s;
1976
1977 s = splbio();
1978 chan->chan_flags |= SCSIPI_CHAN_TACTIVE;
1979 splx(s);
1980 for (;;) {
1981 s = splbio();
1982 xs = TAILQ_FIRST(&chan->chan_complete);
1983 if (xs == NULL && chan->chan_tflags == 0) {
1984 /* nothing to do; wait */
1985 (void) tsleep(&chan->chan_complete, PRIBIO,
1986 "sccomp", 0);
1987 splx(s);
1988 continue;
1989 }
1990 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
1991 /* call chan_callback from thread context */
1992 chan->chan_tflags &= ~SCSIPI_CHANT_CALLBACK;
1993 chan->chan_callback(chan, chan->chan_callback_arg);
1994 splx(s);
1995 continue;
1996 }
1997 if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) {
1998 /* attempt to get more openings for this channel */
1999 chan->chan_tflags &= ~SCSIPI_CHANT_GROWRES;
2000 scsipi_adapter_request(chan,
2001 ADAPTER_REQ_GROW_RESOURCES, NULL);
2002 scsipi_channel_thaw(chan, 1);
2003 splx(s);
2004 continue;
2005 }
2006 if (chan->chan_tflags & SCSIPI_CHANT_KICK) {
2007 /* explicitly run the queues for this channel */
2008 chan->chan_tflags &= ~SCSIPI_CHANT_KICK;
2009 scsipi_run_queue(chan);
2010 splx(s);
2011 continue;
2012 }
2013 if (chan->chan_tflags & SCSIPI_CHANT_SHUTDOWN) {
2014 splx(s);
2015 break;
2016 }
2017 if (xs) {
2018 TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
2019 splx(s);
2020
2021 /*
2022 * Have an xfer with an error; process it.
2023 */
2024 (void) scsipi_complete(xs);
2025
2026 /*
2027 * Kick the queue; keep it running if it was stopped
2028 * for some reason.
2029 */
2030 scsipi_run_queue(chan);
2031 } else {
2032 splx(s);
2033 }
2034 }
2035
2036 chan->chan_thread = NULL;
2037
2038 /* In case parent is waiting for us to exit. */
2039 wakeup(&chan->chan_thread);
2040
2041 kthread_exit(0);
2042 }
2043
2044 /*
2045 * scsipi_create_completion_thread:
2046 *
2047 * Callback to actually create the completion thread.
2048 */
2049 void
2050 scsipi_create_completion_thread(arg)
2051 void *arg;
2052 {
2053 struct scsipi_channel *chan = arg;
2054 struct scsipi_adapter *adapt = chan->chan_adapter;
2055
2056 if (kthread_create1(scsipi_completion_thread, chan,
2057 &chan->chan_thread, "%s:%d", adapt->adapt_dev->dv_xname,
2058 chan->chan_channel)) {
2059 printf("%s: unable to create completion thread for "
2060 "channel %d\n", adapt->adapt_dev->dv_xname,
2061 chan->chan_channel);
2062 panic("scsipi_create_completion_thread");
2063 }
2064 }
2065
2066 /*
2067 * scsipi_thread_call_callback:
2068 *
2069 * request to call a callback from the completion thread
2070 */
2071 int
2072 scsipi_thread_call_callback(chan, callback, arg)
2073 struct scsipi_channel *chan;
2074 void (*callback) __P((struct scsipi_channel *, void *));
2075 void *arg;
2076 {
2077 int s;
2078
2079 s = splbio();
2080 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
2081 /* kernel thread doesn't exist yet */
2082 splx(s);
2083 return ESRCH;
2084 }
2085 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2086 splx(s);
2087 return EBUSY;
2088 }
2089 scsipi_channel_freeze(chan, 1);
2090 chan->chan_callback = callback;
2091 chan->chan_callback_arg = arg;
2092 chan->chan_tflags |= SCSIPI_CHANT_CALLBACK;
2093 wakeup(&chan->chan_complete);
2094 splx(s);
2095 return(0);
2096 }
2097
2098 /*
2099 * scsipi_async_event:
2100 *
2101 * Handle an asynchronous event from an adapter.
2102 */
2103 void
2104 scsipi_async_event(chan, event, arg)
2105 struct scsipi_channel *chan;
2106 scsipi_async_event_t event;
2107 void *arg;
2108 {
2109 int s;
2110
2111 s = splbio();
2112 switch (event) {
2113 case ASYNC_EVENT_MAX_OPENINGS:
2114 scsipi_async_event_max_openings(chan,
2115 (struct scsipi_max_openings *)arg);
2116 break;
2117
2118 case ASYNC_EVENT_XFER_MODE:
2119 scsipi_async_event_xfer_mode(chan,
2120 (struct scsipi_xfer_mode *)arg);
2121 break;
2122 case ASYNC_EVENT_RESET:
2123 scsipi_async_event_channel_reset(chan);
2124 break;
2125 }
2126 splx(s);
2127 }
2128
2129 /*
2130 * scsipi_print_xfer_mode:
2131 *
2132 * Print a periph's capabilities.
2133 */
2134 void
2135 scsipi_print_xfer_mode(periph)
2136 struct scsipi_periph *periph;
2137 {
2138 int period, freq, speed, mbs;
2139
2140 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0)
2141 return;
2142
2143 printf("%s: ", periph->periph_dev->dv_xname);
2144 if (periph->periph_mode & PERIPH_CAP_SYNC) {
2145 period = scsipi_sync_factor_to_period(periph->periph_period);
2146 printf("sync (%d.%dns offset %d)",
2147 period / 10, period % 10, periph->periph_offset);
2148 } else
2149 printf("async");
2150
2151 if (periph->periph_mode & PERIPH_CAP_WIDE32)
2152 printf(", 32-bit");
2153 else if (periph->periph_mode & PERIPH_CAP_WIDE16)
2154 printf(", 16-bit");
2155 else
2156 printf(", 8-bit");
2157
2158 if (periph->periph_mode & PERIPH_CAP_SYNC) {
2159 freq = scsipi_sync_factor_to_freq(periph->periph_period);
2160 speed = freq;
2161 if (periph->periph_mode & PERIPH_CAP_WIDE32)
2162 speed *= 4;
2163 else if (periph->periph_mode & PERIPH_CAP_WIDE16)
2164 speed *= 2;
2165 mbs = speed / 1000;
2166 if (mbs > 0)
2167 printf(" (%d.%03dMB/s)", mbs, speed % 1000);
2168 else
2169 printf(" (%dKB/s)", speed % 1000);
2170 }
2171
2172 printf(" transfers");
2173
2174 if (periph->periph_mode & PERIPH_CAP_TQING)
2175 printf(", tagged queueing");
2176
2177 printf("\n");
2178 }
2179
2180 /*
2181 * scsipi_async_event_max_openings:
2182 *
2183 * Update the maximum number of outstanding commands a
2184 * device may have.
2185 */
2186 void
2187 scsipi_async_event_max_openings(chan, mo)
2188 struct scsipi_channel *chan;
2189 struct scsipi_max_openings *mo;
2190 {
2191 struct scsipi_periph *periph;
2192 int minlun, maxlun;
2193
2194 if (mo->mo_lun == -1) {
2195 /*
2196 * Wildcarded; apply it to all LUNs.
2197 */
2198 minlun = 0;
2199 maxlun = chan->chan_nluns - 1;
2200 } else
2201 minlun = maxlun = mo->mo_lun;
2202
2203 for (; minlun <= maxlun; minlun++) {
2204 periph = scsipi_lookup_periph(chan, mo->mo_target, minlun);
2205 if (periph == NULL)
2206 continue;
2207
2208 if (mo->mo_openings < periph->periph_openings)
2209 periph->periph_openings = mo->mo_openings;
2210 else if (mo->mo_openings > periph->periph_openings &&
2211 (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
2212 periph->periph_openings = mo->mo_openings;
2213 }
2214 }
2215
2216 /*
2217 * scsipi_async_event_xfer_mode:
2218 *
2219 * Update the xfer mode for all periphs sharing the
2220 * specified I_T Nexus.
2221 */
2222 void
2223 scsipi_async_event_xfer_mode(chan, xm)
2224 struct scsipi_channel *chan;
2225 struct scsipi_xfer_mode *xm;
2226 {
2227 struct scsipi_periph *periph;
2228 int lun, announce, mode, period, offset;
2229
2230 for (lun = 0; lun < chan->chan_nluns; lun++) {
2231 periph = scsipi_lookup_periph(chan, xm->xm_target, lun);
2232 if (periph == NULL)
2233 continue;
2234 announce = 0;
2235
2236 /*
2237 * Clamp the xfer mode down to this periph's capabilities.
2238 */
2239 mode = xm->xm_mode & periph->periph_cap;
2240 if (mode & PERIPH_CAP_SYNC) {
2241 period = xm->xm_period;
2242 offset = xm->xm_offset;
2243 } else {
2244 period = 0;
2245 offset = 0;
2246 }
2247
2248 /*
2249 * If we do not have a valid xfer mode yet, or the parameters
2250 * are different, announce them.
2251 */
2252 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0 ||
2253 periph->periph_mode != mode ||
2254 periph->periph_period != period ||
2255 periph->periph_offset != offset)
2256 announce = 1;
2257
2258 periph->periph_mode = mode;
2259 periph->periph_period = period;
2260 periph->periph_offset = offset;
2261 periph->periph_flags |= PERIPH_MODE_VALID;
2262
2263 if (announce)
2264 scsipi_print_xfer_mode(periph);
2265 }
2266 }
2267
2268 /*
2269 * scsipi_set_xfer_mode:
2270 *
2271 * Set the xfer mode for the specified I_T Nexus.
2272 */
2273 void
2274 scsipi_set_xfer_mode(chan, target, immed)
2275 struct scsipi_channel *chan;
2276 int target, immed;
2277 {
2278 struct scsipi_xfer_mode xm;
2279 struct scsipi_periph *itperiph;
2280 int lun, s;
2281
2282 /*
2283 * Go to the minimal xfer mode.
2284 */
2285 xm.xm_target = target;
2286 xm.xm_mode = 0;
2287 xm.xm_period = 0; /* ignored */
2288 xm.xm_offset = 0; /* ignored */
2289
2290 /*
2291 * Find the first LUN we know about on this I_T Nexus.
2292 */
2293 for (lun = 0; lun < chan->chan_nluns; lun++) {
2294 itperiph = scsipi_lookup_periph(chan, target, lun);
2295 if (itperiph != NULL)
2296 break;
2297 }
2298 if (itperiph != NULL) {
2299 xm.xm_mode = itperiph->periph_cap;
2300 /*
2301 * Now issue the request to the adapter.
2302 */
2303 s = splbio();
2304 scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm);
2305 splx(s);
2306 /*
2307 * If we want this to happen immediately, issue a dummy
2308 * command, since most adapters can't really negotiate unless
2309 * they're executing a job.
2310 */
2311 if (immed != 0) {
2312 (void) scsipi_test_unit_ready(itperiph,
2313 XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
2314 XS_CTL_IGNORE_NOT_READY |
2315 XS_CTL_IGNORE_MEDIA_CHANGE);
2316 }
2317 }
2318 }
2319
2320 /*
2321 * scsipi_channel_reset:
2322 *
2323 * handle scsi bus reset
2324 * called at splbio
2325 */
2326 void
2327 scsipi_async_event_channel_reset(chan)
2328 struct scsipi_channel *chan;
2329 {
2330 struct scsipi_xfer *xs, *xs_next;
2331 struct scsipi_periph *periph;
2332 int target, lun;
2333
2334 /*
2335 * Channel has been reset. Also mark as reset pending REQUEST_SENSE
2336 * commands; as the sense is not available any more.
2337 * can't call scsipi_done() from here, as the command has not been
2338 * sent to the adapter yet (this would corrupt accounting).
2339 */
2340
2341 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) {
2342 xs_next = TAILQ_NEXT(xs, channel_q);
2343 if (xs->xs_control & XS_CTL_REQSENSE) {
2344 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
2345 xs->error = XS_RESET;
2346 if ((xs->xs_control & XS_CTL_ASYNC) != 0)
2347 TAILQ_INSERT_TAIL(&chan->chan_complete, xs,
2348 channel_q);
2349 }
2350 }
2351 wakeup(&chan->chan_complete);
2352 /* Catch xs with pending sense which may not have a REQSENSE xs yet */
2353 for (target = 0; target < chan->chan_ntargets; target++) {
2354 if (target == chan->chan_id)
2355 continue;
2356 for (lun = 0; lun < chan->chan_nluns; lun++) {
2357 periph = chan->chan_periphs[target][lun];
2358 if (periph) {
2359 xs = periph->periph_xscheck;
2360 if (xs)
2361 xs->error = XS_RESET;
2362 }
2363 }
2364 }
2365 }
2366
2367 /*
2368 * scsipi_target_detach:
2369 *
2370 * detach all periph associated with a I_T
2371 * must be called from valid thread context
2372 */
2373 int
2374 scsipi_target_detach(chan, target, lun, flags)
2375 struct scsipi_channel *chan;
2376 int target, lun;
2377 int flags;
2378 {
2379 struct scsipi_periph *periph;
2380 int ctarget, mintarget, maxtarget;
2381 int clun, minlun, maxlun;
2382 int error;
2383
2384 if (target == -1) {
2385 mintarget = 0;
2386 maxtarget = chan->chan_ntargets;
2387 } else {
2388 if (target == chan->chan_id)
2389 return EINVAL;
2390 if (target < 0 || target >= chan->chan_ntargets)
2391 return EINVAL;
2392 mintarget = target;
2393 maxtarget = target + 1;
2394 }
2395
2396 if (lun == -1) {
2397 minlun = 0;
2398 maxlun = chan->chan_nluns;
2399 } else {
2400 if (lun < 0 || lun >= chan->chan_nluns)
2401 return EINVAL;
2402 minlun = lun;
2403 maxlun = lun + 1;
2404 }
2405
2406 for (ctarget = mintarget; ctarget < maxtarget; ctarget++) {
2407 if (ctarget == chan->chan_id)
2408 continue;
2409
2410 for (clun = minlun; clun < maxlun; clun++) {
2411 periph = scsipi_lookup_periph(chan, ctarget, clun);
2412 if (periph == NULL)
2413 continue;
2414 error = config_detach(periph->periph_dev, flags);
2415 if (error)
2416 return (error);
2417 scsipi_remove_periph(chan, periph);
2418 free(periph, M_DEVBUF);
2419 }
2420 }
2421 return(0);
2422 }
2423
2424 /*
2425 * scsipi_adapter_addref:
2426 *
2427 * Add a reference to the adapter pointed to by the provided
2428 * link, enabling the adapter if necessary.
2429 */
2430 int
2431 scsipi_adapter_addref(adapt)
2432 struct scsipi_adapter *adapt;
2433 {
2434 int s, error = 0;
2435
2436 s = splbio();
2437 if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) {
2438 error = (*adapt->adapt_enable)(adapt->adapt_dev, 1);
2439 if (error)
2440 adapt->adapt_refcnt--;
2441 }
2442 splx(s);
2443 return (error);
2444 }
2445
2446 /*
2447 * scsipi_adapter_delref:
2448 *
2449 * Delete a reference to the adapter pointed to by the provided
2450 * link, disabling the adapter if possible.
2451 */
2452 void
2453 scsipi_adapter_delref(adapt)
2454 struct scsipi_adapter *adapt;
2455 {
2456 int s;
2457
2458 s = splbio();
2459 if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL)
2460 (void) (*adapt->adapt_enable)(adapt->adapt_dev, 0);
2461 splx(s);
2462 }
2463
2464 struct scsipi_syncparam {
2465 int ss_factor;
2466 int ss_period; /* ns * 10 */
2467 } scsipi_syncparams[] = {
2468 { 0x09, 125 },
2469 { 0x0a, 250 },
2470 { 0x0b, 303 },
2471 { 0x0c, 500 },
2472 };
2473 const int scsipi_nsyncparams =
2474 sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
2475
2476 int
2477 scsipi_sync_period_to_factor(period)
2478 int period; /* ns * 10 */
2479 {
2480 int i;
2481
2482 for (i = 0; i < scsipi_nsyncparams; i++) {
2483 if (period <= scsipi_syncparams[i].ss_period)
2484 return (scsipi_syncparams[i].ss_factor);
2485 }
2486
2487 return ((period / 10) / 4);
2488 }
2489
2490 int
2491 scsipi_sync_factor_to_period(factor)
2492 int factor;
2493 {
2494 int i;
2495
2496 for (i = 0; i < scsipi_nsyncparams; i++) {
2497 if (factor == scsipi_syncparams[i].ss_factor)
2498 return (scsipi_syncparams[i].ss_period);
2499 }
2500
2501 return ((factor * 4) * 10);
2502 }
2503
2504 int
2505 scsipi_sync_factor_to_freq(factor)
2506 int factor;
2507 {
2508 int i;
2509
2510 for (i = 0; i < scsipi_nsyncparams; i++) {
2511 if (factor == scsipi_syncparams[i].ss_factor)
2512 return (10000000 / scsipi_syncparams[i].ss_period);
2513 }
2514
2515 return (10000000 / ((factor * 4) * 10));
2516 }
2517
2518 #ifdef SCSIPI_DEBUG
2519 /*
2520 * Given a scsipi_xfer, dump the request, in all it's glory
2521 */
2522 void
2523 show_scsipi_xs(xs)
2524 struct scsipi_xfer *xs;
2525 {
2526
2527 printf("xs(%p): ", xs);
2528 printf("xs_control(0x%08x)", xs->xs_control);
2529 printf("xs_status(0x%08x)", xs->xs_status);
2530 printf("periph(%p)", xs->xs_periph);
2531 printf("retr(0x%x)", xs->xs_retries);
2532 printf("timo(0x%x)", xs->timeout);
2533 printf("cmd(%p)", xs->cmd);
2534 printf("len(0x%x)", xs->cmdlen);
2535 printf("data(%p)", xs->data);
2536 printf("len(0x%x)", xs->datalen);
2537 printf("res(0x%x)", xs->resid);
2538 printf("err(0x%x)", xs->error);
2539 printf("bp(%p)", xs->bp);
2540 show_scsipi_cmd(xs);
2541 }
2542
2543 void
2544 show_scsipi_cmd(xs)
2545 struct scsipi_xfer *xs;
2546 {
2547 u_char *b = (u_char *) xs->cmd;
2548 int i = 0;
2549
2550 scsipi_printaddr(xs->xs_periph);
2551 printf(" command: ");
2552
2553 if ((xs->xs_control & XS_CTL_RESET) == 0) {
2554 while (i < xs->cmdlen) {
2555 if (i)
2556 printf(",");
2557 printf("0x%x", b[i++]);
2558 }
2559 printf("-[%d bytes]\n", xs->datalen);
2560 if (xs->datalen)
2561 show_mem(xs->data, min(64, xs->datalen));
2562 } else
2563 printf("-RESET-\n");
2564 }
2565
2566 void
2567 show_mem(address, num)
2568 u_char *address;
2569 int num;
2570 {
2571 int x;
2572
2573 printf("------------------------------");
2574 for (x = 0; x < num; x++) {
2575 if ((x % 16) == 0)
2576 printf("\n%03d: ", x);
2577 printf("%02x ", *address++);
2578 }
2579 printf("\n------------------------------\n");
2580 }
2581 #endif /* SCSIPI_DEBUG */
2582