scsipi_base.c revision 1.60 1 /* $NetBSD: scsipi_base.c,v 1.60 2001/10/14 20:31:24 bouyer Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include "opt_scsi.h"
41
42 #include <sys/types.h>
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/buf.h>
47 #include <sys/uio.h>
48 #include <sys/malloc.h>
49 #include <sys/pool.h>
50 #include <sys/errno.h>
51 #include <sys/device.h>
52 #include <sys/proc.h>
53 #include <sys/kthread.h>
54
55 #include <dev/scsipi/scsipi_all.h>
56 #include <dev/scsipi/scsipi_disk.h>
57 #include <dev/scsipi/scsipiconf.h>
58 #include <dev/scsipi/scsipi_base.h>
59
60 #include <dev/scsipi/scsi_all.h>
61 #include <dev/scsipi/scsi_message.h>
62
63 int scsipi_complete __P((struct scsipi_xfer *));
64 void scsipi_request_sense __P((struct scsipi_xfer *));
65 int scsipi_enqueue __P((struct scsipi_xfer *));
66 void scsipi_run_queue __P((struct scsipi_channel *chan));
67
68 void scsipi_completion_thread __P((void *));
69
70 void scsipi_get_tag __P((struct scsipi_xfer *));
71 void scsipi_put_tag __P((struct scsipi_xfer *));
72
73 int scsipi_get_resource __P((struct scsipi_channel *));
74 void scsipi_put_resource __P((struct scsipi_channel *));
75 __inline int scsipi_grow_resources __P((struct scsipi_channel *));
76
77 void scsipi_async_event_max_openings __P((struct scsipi_channel *,
78 struct scsipi_max_openings *));
79 void scsipi_async_event_xfer_mode __P((struct scsipi_channel *,
80 struct scsipi_xfer_mode *));
81 void scsipi_async_event_channel_reset __P((struct scsipi_channel *));
82
83 struct pool scsipi_xfer_pool;
84
85 /*
86 * scsipi_init:
87 *
88 * Called when a scsibus or atapibus is attached to the system
89 * to initialize shared data structures.
90 */
91 void
92 scsipi_init()
93 {
94 static int scsipi_init_done;
95
96 if (scsipi_init_done)
97 return;
98 scsipi_init_done = 1;
99
100 /* Initialize the scsipi_xfer pool. */
101 pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
102 0, 0, "scxspl", 0, NULL, NULL, M_DEVBUF);
103 }
104
105 /*
106 * scsipi_channel_init:
107 *
108 * Initialize a scsipi_channel when it is attached.
109 */
110 int
111 scsipi_channel_init(chan)
112 struct scsipi_channel *chan;
113 {
114 size_t nbytes;
115 int i;
116
117 /* Initialize shared data. */
118 scsipi_init();
119
120 /* Initialize the queues. */
121 TAILQ_INIT(&chan->chan_queue);
122 TAILQ_INIT(&chan->chan_complete);
123
124 nbytes = chan->chan_ntargets * sizeof(struct scsipi_periph **);
125 chan->chan_periphs = malloc(nbytes, M_DEVBUF, M_NOWAIT);
126 if (chan->chan_periphs == NULL)
127 return (ENOMEM);
128
129
130 nbytes = chan->chan_nluns * sizeof(struct scsipi_periph *);
131 for (i = 0; i < chan->chan_ntargets; i++) {
132 chan->chan_periphs[i] = malloc(nbytes, M_DEVBUF, M_NOWAIT);
133 if (chan->chan_periphs[i] == NULL) {
134 while (--i >= 0) {
135 free(chan->chan_periphs[i], M_DEVBUF);
136 }
137 return (ENOMEM);
138 }
139 memset(chan->chan_periphs[i], 0, nbytes);
140 }
141
142 /*
143 * Create the asynchronous completion thread.
144 */
145 kthread_create(scsipi_create_completion_thread, chan);
146 return (0);
147 }
148
149 /*
150 * scsipi_channel_shutdown:
151 *
152 * Shutdown a scsipi_channel.
153 */
154 void
155 scsipi_channel_shutdown(chan)
156 struct scsipi_channel *chan;
157 {
158
159 /*
160 * Shut down the completion thread.
161 */
162 chan->chan_tflags |= SCSIPI_CHANT_SHUTDOWN;
163 wakeup(&chan->chan_complete);
164
165 /*
166 * Now wait for the thread to exit.
167 */
168 while (chan->chan_thread != NULL)
169 (void) tsleep(&chan->chan_thread, PRIBIO, "scshut", 0);
170 }
171
172 /*
173 * scsipi_insert_periph:
174 *
175 * Insert a periph into the channel.
176 */
177 void
178 scsipi_insert_periph(chan, periph)
179 struct scsipi_channel *chan;
180 struct scsipi_periph *periph;
181 {
182 int s;
183
184 s = splbio();
185 chan->chan_periphs[periph->periph_target][periph->periph_lun] = periph;
186 splx(s);
187 }
188
189 /*
190 * scsipi_remove_periph:
191 *
192 * Remove a periph from the channel.
193 */
194 void
195 scsipi_remove_periph(chan, periph)
196 struct scsipi_channel *chan;
197 struct scsipi_periph *periph;
198 {
199 int s;
200
201 s = splbio();
202 chan->chan_periphs[periph->periph_target][periph->periph_lun] = NULL;
203 splx(s);
204 }
205
206 /*
207 * scsipi_lookup_periph:
208 *
209 * Lookup a periph on the specified channel.
210 */
211 struct scsipi_periph *
212 scsipi_lookup_periph(chan, target, lun)
213 struct scsipi_channel *chan;
214 int target, lun;
215 {
216 struct scsipi_periph *periph;
217 int s;
218
219 if (target >= chan->chan_ntargets ||
220 lun >= chan->chan_nluns)
221 return (NULL);
222
223 s = splbio();
224 periph = chan->chan_periphs[target][lun];
225 splx(s);
226
227 return (periph);
228 }
229
230 /*
231 * scsipi_get_resource:
232 *
233 * Allocate a single xfer `resource' from the channel.
234 *
235 * NOTE: Must be called at splbio().
236 */
237 int
238 scsipi_get_resource(chan)
239 struct scsipi_channel *chan;
240 {
241 struct scsipi_adapter *adapt = chan->chan_adapter;
242
243 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
244 if (chan->chan_openings > 0) {
245 chan->chan_openings--;
246 return (1);
247 }
248 return (0);
249 }
250
251 if (adapt->adapt_openings > 0) {
252 adapt->adapt_openings--;
253 return (1);
254 }
255 return (0);
256 }
257
258 /*
259 * scsipi_grow_resources:
260 *
261 * Attempt to grow resources for a channel. If this succeeds,
262 * we allocate one for our caller.
263 *
264 * NOTE: Must be called at splbio().
265 */
266 __inline int
267 scsipi_grow_resources(chan)
268 struct scsipi_channel *chan;
269 {
270
271 if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
272 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
273 scsipi_adapter_request(chan,
274 ADAPTER_REQ_GROW_RESOURCES, NULL);
275 return (scsipi_get_resource(chan));
276 }
277 /*
278 * ask the channel thread to do it. It'll have to thaw the
279 * queue
280 */
281 scsipi_channel_freeze(chan, 1);
282 chan->chan_tflags |= SCSIPI_CHANT_GROWRES;
283 wakeup(&chan->chan_complete);
284 return (0);
285 }
286
287 return (0);
288 }
289
290 /*
291 * scsipi_put_resource:
292 *
293 * Free a single xfer `resource' to the channel.
294 *
295 * NOTE: Must be called at splbio().
296 */
297 void
298 scsipi_put_resource(chan)
299 struct scsipi_channel *chan;
300 {
301 struct scsipi_adapter *adapt = chan->chan_adapter;
302
303 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
304 chan->chan_openings++;
305 else
306 adapt->adapt_openings++;
307 }
308
309 /*
310 * scsipi_get_tag:
311 *
312 * Get a tag ID for the specified xfer.
313 *
314 * NOTE: Must be called at splbio().
315 */
316 void
317 scsipi_get_tag(xs)
318 struct scsipi_xfer *xs;
319 {
320 struct scsipi_periph *periph = xs->xs_periph;
321 int word, bit, tag;
322
323 for (word = 0; word < PERIPH_NTAGWORDS; word++) {
324 bit = ffs(periph->periph_freetags[word]);
325 if (bit != 0)
326 break;
327 }
328 #ifdef DIAGNOSTIC
329 if (word == PERIPH_NTAGWORDS) {
330 scsipi_printaddr(periph);
331 printf("no free tags\n");
332 panic("scsipi_get_tag");
333 }
334 #endif
335
336 bit -= 1;
337 periph->periph_freetags[word] &= ~(1 << bit);
338 tag = (word << 5) | bit;
339
340 /* XXX Should eventually disallow this completely. */
341 if (tag >= periph->periph_openings) {
342 scsipi_printaddr(periph);
343 printf("WARNING: tag %d greater than available openings %d\n",
344 tag, periph->periph_openings);
345 }
346
347 xs->xs_tag_id = tag;
348 }
349
350 /*
351 * scsipi_put_tag:
352 *
353 * Put the tag ID for the specified xfer back into the pool.
354 *
355 * NOTE: Must be called at splbio().
356 */
357 void
358 scsipi_put_tag(xs)
359 struct scsipi_xfer *xs;
360 {
361 struct scsipi_periph *periph = xs->xs_periph;
362 int word, bit;
363
364 word = xs->xs_tag_id >> 5;
365 bit = xs->xs_tag_id & 0x1f;
366
367 periph->periph_freetags[word] |= (1 << bit);
368 }
369
370 /*
371 * scsipi_get_xs:
372 *
373 * Allocate an xfer descriptor and associate it with the
374 * specified peripherial. If the peripherial has no more
375 * available command openings, we either block waiting for
376 * one to become available, or fail.
377 */
378 struct scsipi_xfer *
379 scsipi_get_xs(periph, flags)
380 struct scsipi_periph *periph;
381 int flags;
382 {
383 struct scsipi_xfer *xs;
384 int s;
385
386 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n"));
387
388 /*
389 * If we're cold, make sure we poll.
390 */
391 if (cold)
392 flags |= XS_CTL_NOSLEEP | XS_CTL_POLL;
393
394 #ifdef DIAGNOSTIC
395 /*
396 * URGENT commands can never be ASYNC.
397 */
398 if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
399 (XS_CTL_URGENT|XS_CTL_ASYNC)) {
400 scsipi_printaddr(periph);
401 printf("URGENT and ASYNC\n");
402 panic("scsipi_get_xs");
403 }
404 #endif
405
406 s = splbio();
407 /*
408 * Wait for a command opening to become available. Rules:
409 *
410 * - All xfers must wait for an available opening.
411 * Exception: URGENT xfers can proceed when
412 * active == openings, because we use the opening
413 * of the command we're recovering for.
414 * - if the periph has sense pending, only URGENT & REQSENSE
415 * xfers may proceed.
416 *
417 * - If the periph is recovering, only URGENT xfers may
418 * proceed.
419 *
420 * - If the periph is currently executing a recovery
421 * command, URGENT commands must block, because only
422 * one recovery command can execute at a time.
423 */
424 for (;;) {
425 if (flags & XS_CTL_URGENT) {
426 if (periph->periph_active > periph->periph_openings)
427 goto wait_for_opening;
428 if (periph->periph_flags & PERIPH_SENSE) {
429 if ((flags & XS_CTL_REQSENSE) == 0)
430 goto wait_for_opening;
431 } else {
432 if ((periph->periph_flags &
433 PERIPH_RECOVERY_ACTIVE) != 0)
434 goto wait_for_opening;
435 periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
436 }
437 break;
438 }
439 if (periph->periph_active >= periph->periph_openings ||
440 (periph->periph_flags & PERIPH_RECOVERING) != 0)
441 goto wait_for_opening;
442 periph->periph_active++;
443 break;
444
445 wait_for_opening:
446 if (flags & XS_CTL_NOSLEEP) {
447 splx(s);
448 return (NULL);
449 }
450 SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n"));
451 periph->periph_flags |= PERIPH_WAITING;
452 (void) tsleep(periph, PRIBIO, "getxs", 0);
453 }
454 SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n"));
455 xs = pool_get(&scsipi_xfer_pool,
456 ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
457 if (xs == NULL) {
458 if (flags & XS_CTL_URGENT) {
459 if ((flags & XS_CTL_REQSENSE) == 0)
460 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
461 } else
462 periph->periph_active--;
463 scsipi_printaddr(periph);
464 printf("unable to allocate %sscsipi_xfer\n",
465 (flags & XS_CTL_URGENT) ? "URGENT " : "");
466 }
467 splx(s);
468
469 SC_DEBUG(periph, SCSIPI_DB3, ("returning\n"));
470
471 if (xs != NULL) {
472 callout_init(&xs->xs_callout);
473 memset(xs, 0, sizeof(*xs));
474 xs->xs_periph = periph;
475 xs->xs_control = flags;
476 xs->xs_status = 0;
477 s = splbio();
478 TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
479 splx(s);
480 }
481 return (xs);
482 }
483
484 /*
485 * scsipi_put_xs:
486 *
487 * Release an xfer descriptor, decreasing the outstanding command
488 * count for the peripherial. If there is a thread waiting for
489 * an opening, wake it up. If not, kick any queued I/O the
490 * peripherial may have.
491 *
492 * NOTE: Must be called at splbio().
493 */
494 void
495 scsipi_put_xs(xs)
496 struct scsipi_xfer *xs;
497 {
498 struct scsipi_periph *periph = xs->xs_periph;
499 int flags = xs->xs_control;
500
501 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n"));
502
503 TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
504 pool_put(&scsipi_xfer_pool, xs);
505
506 #ifdef DIAGNOSTIC
507 if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
508 periph->periph_active == 0) {
509 scsipi_printaddr(periph);
510 printf("recovery without a command to recovery for\n");
511 panic("scsipi_put_xs");
512 }
513 #endif
514
515 if (flags & XS_CTL_URGENT) {
516 if ((flags & XS_CTL_REQSENSE) == 0)
517 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
518 } else
519 periph->periph_active--;
520 if (periph->periph_active == 0 &&
521 (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
522 periph->periph_flags &= ~PERIPH_WAITDRAIN;
523 wakeup(&periph->periph_active);
524 }
525
526 if (periph->periph_flags & PERIPH_WAITING) {
527 periph->periph_flags &= ~PERIPH_WAITING;
528 wakeup(periph);
529 } else {
530 if (periph->periph_switch->psw_start != NULL) {
531 SC_DEBUG(periph, SCSIPI_DB2,
532 ("calling private start()\n"));
533 (*periph->periph_switch->psw_start)(periph);
534 }
535 }
536 }
537
538 /*
539 * scsipi_channel_freeze:
540 *
541 * Freeze a channel's xfer queue.
542 */
543 void
544 scsipi_channel_freeze(chan, count)
545 struct scsipi_channel *chan;
546 int count;
547 {
548 int s;
549
550 s = splbio();
551 chan->chan_qfreeze += count;
552 splx(s);
553 }
554
555 /*
556 * scsipi_channel_thaw:
557 *
558 * Thaw a channel's xfer queue.
559 */
560 void
561 scsipi_channel_thaw(chan, count)
562 struct scsipi_channel *chan;
563 int count;
564 {
565 int s;
566
567 s = splbio();
568 chan->chan_qfreeze -= count;
569 /*
570 * Don't let the freeze count go negative.
571 *
572 * Presumably the adapter driver could keep track of this,
573 * but it might just be easier to do this here so as to allow
574 * multiple callers, including those outside the adapter driver.
575 */
576 if (chan->chan_qfreeze < 0) {
577 chan->chan_qfreeze = 0;
578 }
579 splx(s);
580 /*
581 * Kick the channel's queue here. Note, we may be running in
582 * interrupt context (softclock or HBA's interrupt), so the adapter
583 * driver had better not sleep.
584 */
585 if (chan->chan_qfreeze == 0)
586 scsipi_run_queue(chan);
587 }
588
589 /*
590 * scsipi_channel_timed_thaw:
591 *
592 * Thaw a channel after some time has expired. This will also
593 * run the channel's queue if the freeze count has reached 0.
594 */
595 void
596 scsipi_channel_timed_thaw(arg)
597 void *arg;
598 {
599 struct scsipi_channel *chan = arg;
600
601 scsipi_channel_thaw(chan, 1);
602 }
603
604 /*
605 * scsipi_periph_freeze:
606 *
607 * Freeze a device's xfer queue.
608 */
609 void
610 scsipi_periph_freeze(periph, count)
611 struct scsipi_periph *periph;
612 int count;
613 {
614 int s;
615
616 s = splbio();
617 periph->periph_qfreeze += count;
618 splx(s);
619 }
620
621 /*
622 * scsipi_periph_thaw:
623 *
624 * Thaw a device's xfer queue.
625 */
626 void
627 scsipi_periph_thaw(periph, count)
628 struct scsipi_periph *periph;
629 int count;
630 {
631 int s;
632
633 s = splbio();
634 periph->periph_qfreeze -= count;
635 #ifdef DIAGNOSTIC
636 if (periph->periph_qfreeze < 0) {
637 static const char pc[] = "periph freeze count < 0";
638 scsipi_printaddr(periph);
639 printf("%s\n", pc);
640 panic(pc);
641 }
642 #endif
643 if (periph->periph_qfreeze == 0 &&
644 (periph->periph_flags & PERIPH_WAITING) != 0)
645 wakeup(periph);
646 splx(s);
647 }
648
649 /*
650 * scsipi_periph_timed_thaw:
651 *
652 * Thaw a device after some time has expired.
653 */
654 void
655 scsipi_periph_timed_thaw(arg)
656 void *arg;
657 {
658 int s;
659 struct scsipi_periph *periph = arg;
660
661 callout_stop(&periph->periph_callout);
662
663 s = splbio();
664 scsipi_periph_thaw(periph, 1);
665 if ((periph->periph_channel->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
666 /*
667 * Kick the channel's queue here. Note, we're running in
668 * interrupt context (softclock), so the adapter driver
669 * had better not sleep.
670 */
671 scsipi_run_queue(periph->periph_channel);
672 } else {
673 /*
674 * Tell the completion thread to kick the channel's queue here.
675 */
676 periph->periph_channel->chan_tflags |= SCSIPI_CHANT_KICK;
677 wakeup(&periph->periph_channel->chan_complete);
678 }
679 splx(s);
680 }
681
682 /*
683 * scsipi_wait_drain:
684 *
685 * Wait for a periph's pending xfers to drain.
686 */
687 void
688 scsipi_wait_drain(periph)
689 struct scsipi_periph *periph;
690 {
691 int s;
692
693 s = splbio();
694 while (periph->periph_active != 0) {
695 periph->periph_flags |= PERIPH_WAITDRAIN;
696 (void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0);
697 }
698 splx(s);
699 }
700
701 /*
702 * scsipi_kill_pending:
703 *
704 * Kill off all pending xfers for a periph.
705 *
706 * NOTE: Must be called at splbio().
707 */
708 void
709 scsipi_kill_pending(periph)
710 struct scsipi_periph *periph;
711 {
712
713 (*periph->periph_channel->chan_bustype->bustype_kill_pending)(periph);
714 #ifdef DIAGNOSTIC
715 if (TAILQ_FIRST(&periph->periph_xferq) != NULL)
716 panic("scsipi_kill_pending");
717 #endif
718 scsipi_wait_drain(periph);
719 }
720
721 /*
722 * scsipi_interpret_sense:
723 *
724 * Look at the returned sense and act on the error, determining
725 * the unix error number to pass back. (0 = report no error)
726 *
727 * NOTE: If we return ERESTART, we are expected to haved
728 * thawed the device!
729 *
730 * THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
731 */
732 int
733 scsipi_interpret_sense(xs)
734 struct scsipi_xfer *xs;
735 {
736 struct scsipi_sense_data *sense;
737 struct scsipi_periph *periph = xs->xs_periph;
738 u_int8_t key;
739 u_int32_t info;
740 int error;
741 #ifndef SCSIVERBOSE
742 static char *error_mes[] = {
743 "soft error (corrected)",
744 "not ready", "medium error",
745 "non-media hardware failure", "illegal request",
746 "unit attention", "readonly device",
747 "no data found", "vendor unique",
748 "copy aborted", "command aborted",
749 "search returned equal", "volume overflow",
750 "verify miscompare", "unknown error key"
751 };
752 #endif
753
754 sense = &xs->sense.scsi_sense;
755 #ifdef SCSIPI_DEBUG
756 if (periph->periph_flags & SCSIPI_DB1) {
757 int count;
758 scsipi_printaddr(periph);
759 printf(" sense debug information:\n");
760 printf("\tcode 0x%x valid 0x%x\n",
761 sense->error_code & SSD_ERRCODE,
762 sense->error_code & SSD_ERRCODE_VALID ? 1 : 0);
763 printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
764 sense->segment,
765 sense->flags & SSD_KEY,
766 sense->flags & SSD_ILI ? 1 : 0,
767 sense->flags & SSD_EOM ? 1 : 0,
768 sense->flags & SSD_FILEMARK ? 1 : 0);
769 printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d "
770 "extra bytes\n",
771 sense->info[0],
772 sense->info[1],
773 sense->info[2],
774 sense->info[3],
775 sense->extra_len);
776 printf("\textra: ");
777 for (count = 0; count < ADD_BYTES_LIM(sense); count++)
778 printf("0x%x ", sense->cmd_spec_info[count]);
779 printf("\n");
780 }
781 #endif
782
783 /*
784 * If the periph has it's own error handler, call it first.
785 * If it returns a legit error value, return that, otherwise
786 * it wants us to continue with normal error processing.
787 */
788 if (periph->periph_switch->psw_error != NULL) {
789 SC_DEBUG(periph, SCSIPI_DB2,
790 ("calling private err_handler()\n"));
791 error = (*periph->periph_switch->psw_error)(xs);
792 if (error != EJUSTRETURN)
793 return (error);
794 }
795 /* otherwise use the default */
796 switch (sense->error_code & SSD_ERRCODE) {
797 /*
798 * If it's code 70, use the extended stuff and
799 * interpret the key
800 */
801 case 0x71: /* delayed error */
802 scsipi_printaddr(periph);
803 key = sense->flags & SSD_KEY;
804 printf(" DEFERRED ERROR, key = 0x%x\n", key);
805 /* FALLTHROUGH */
806 case 0x70:
807 if ((sense->error_code & SSD_ERRCODE_VALID) != 0)
808 info = _4btol(sense->info);
809 else
810 info = 0;
811 key = sense->flags & SSD_KEY;
812
813 switch (key) {
814 case SKEY_NO_SENSE:
815 case SKEY_RECOVERED_ERROR:
816 if (xs->resid == xs->datalen && xs->datalen) {
817 /*
818 * Why is this here?
819 */
820 xs->resid = 0; /* not short read */
821 }
822 case SKEY_EQUAL:
823 error = 0;
824 break;
825 case SKEY_NOT_READY:
826 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
827 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
828 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
829 return (0);
830 if (sense->add_sense_code == 0x3A) {
831 error = ENODEV; /* Medium not present */
832 if (xs->xs_control & XS_CTL_SILENT_NODEV)
833 return (error);
834 } else
835 error = EIO;
836 if ((xs->xs_control & XS_CTL_SILENT) != 0)
837 return (error);
838 break;
839 case SKEY_ILLEGAL_REQUEST:
840 if ((xs->xs_control &
841 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
842 return (0);
843 /*
844 * Handle the case where a device reports
845 * Logical Unit Not Supported during discovery.
846 */
847 if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
848 sense->add_sense_code == 0x25 &&
849 sense->add_sense_code_qual == 0x00)
850 return (EINVAL);
851 if ((xs->xs_control & XS_CTL_SILENT) != 0)
852 return (EIO);
853 error = EINVAL;
854 break;
855 case SKEY_UNIT_ATTENTION:
856 if (sense->add_sense_code == 0x29 &&
857 sense->add_sense_code_qual == 0x00) {
858 /* device or bus reset */
859 return (ERESTART);
860 }
861 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
862 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
863 if ((xs->xs_control &
864 XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
865 /* XXX Should reupload any transient state. */
866 (periph->periph_flags &
867 PERIPH_REMOVABLE) == 0) {
868 return (ERESTART);
869 }
870 if ((xs->xs_control & XS_CTL_SILENT) != 0)
871 return (EIO);
872 error = EIO;
873 break;
874 case SKEY_WRITE_PROTECT:
875 error = EROFS;
876 break;
877 case SKEY_BLANK_CHECK:
878 error = 0;
879 break;
880 case SKEY_ABORTED_COMMAND:
881 error = ERESTART;
882 break;
883 case SKEY_VOLUME_OVERFLOW:
884 error = ENOSPC;
885 break;
886 default:
887 error = EIO;
888 break;
889 }
890
891 #ifdef SCSIVERBOSE
892 if (key && (xs->xs_control & XS_CTL_SILENT) == 0)
893 scsipi_print_sense(xs, 0);
894 #else
895 if (key) {
896 scsipi_printaddr(periph);
897 printf("%s", error_mes[key - 1]);
898 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
899 switch (key) {
900 case SKEY_NOT_READY:
901 case SKEY_ILLEGAL_REQUEST:
902 case SKEY_UNIT_ATTENTION:
903 case SKEY_WRITE_PROTECT:
904 break;
905 case SKEY_BLANK_CHECK:
906 printf(", requested size: %d (decimal)",
907 info);
908 break;
909 case SKEY_ABORTED_COMMAND:
910 if (xs->xs_retries)
911 printf(", retrying");
912 printf(", cmd 0x%x, info 0x%x",
913 xs->cmd->opcode, info);
914 break;
915 default:
916 printf(", info = %d (decimal)", info);
917 }
918 }
919 if (sense->extra_len != 0) {
920 int n;
921 printf(", data =");
922 for (n = 0; n < sense->extra_len; n++)
923 printf(" %02x",
924 sense->cmd_spec_info[n]);
925 }
926 printf("\n");
927 }
928 #endif
929 return (error);
930
931 /*
932 * Not code 70, just report it
933 */
934 default:
935 #if defined(SCSIDEBUG) || defined(DEBUG)
936 {
937 static char *uc = "undecodable sense error";
938 int i;
939 u_int8_t *cptr = (u_int8_t *) sense;
940 scsipi_printaddr(periph);
941 if (xs->cmd == &xs->cmdstore) {
942 printf("%s for opcode 0x%x, data=",
943 uc, xs->cmdstore.opcode);
944 } else {
945 printf("%s, data=", uc);
946 }
947 for (i = 0; i < sizeof (sense); i++)
948 printf(" 0x%02x", *(cptr++) & 0xff);
949 printf("\n");
950 }
951 #else
952 scsipi_printaddr(periph);
953 printf("Sense Error Code 0x%x",
954 sense->error_code & SSD_ERRCODE);
955 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
956 struct scsipi_sense_data_unextended *usense =
957 (struct scsipi_sense_data_unextended *)sense;
958 printf(" at block no. %d (decimal)",
959 _3btol(usense->block));
960 }
961 printf("\n");
962 #endif
963 return (EIO);
964 }
965 }
966
967 /*
968 * scsipi_size:
969 *
970 * Find out from the device what its capacity is.
971 */
972 u_long
973 scsipi_size(periph, flags)
974 struct scsipi_periph *periph;
975 int flags;
976 {
977 struct scsipi_read_cap_data rdcap;
978 struct scsipi_read_capacity scsipi_cmd;
979
980 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
981 scsipi_cmd.opcode = READ_CAPACITY;
982
983 /*
984 * If the command works, interpret the result as a 4 byte
985 * number of blocks
986 */
987 if (scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
988 sizeof(scsipi_cmd), (u_char *)&rdcap, sizeof(rdcap),
989 SCSIPIRETRIES, 20000, NULL,
990 flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK) != 0) {
991 scsipi_printaddr(periph);
992 printf("could not get size\n");
993 return (0);
994 }
995
996 return (_4btol(rdcap.addr) + 1);
997 }
998
999 /*
1000 * scsipi_test_unit_ready:
1001 *
1002 * Issue a `test unit ready' request.
1003 */
1004 int
1005 scsipi_test_unit_ready(periph, flags)
1006 struct scsipi_periph *periph;
1007 int flags;
1008 {
1009 struct scsipi_test_unit_ready scsipi_cmd;
1010
1011 /* some ATAPI drives don't support TEST_UNIT_READY. Sigh */
1012 if (periph->periph_quirks & PQUIRK_NOTUR)
1013 return (0);
1014
1015 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1016 scsipi_cmd.opcode = TEST_UNIT_READY;
1017
1018 return (scsipi_command(periph,
1019 (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd),
1020 0, 0, SCSIPIRETRIES, 10000, NULL, flags));
1021 }
1022
1023 /*
1024 * scsipi_inquire:
1025 *
1026 * Ask the device about itself.
1027 */
1028 int
1029 scsipi_inquire(periph, inqbuf, flags)
1030 struct scsipi_periph *periph;
1031 struct scsipi_inquiry_data *inqbuf;
1032 int flags;
1033 {
1034 struct scsipi_inquiry scsipi_cmd;
1035
1036 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1037 scsipi_cmd.opcode = INQUIRY;
1038 scsipi_cmd.length = sizeof(struct scsipi_inquiry_data);
1039
1040 return (scsipi_command(periph,
1041 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1042 (u_char *) inqbuf, sizeof(struct scsipi_inquiry_data),
1043 SCSIPIRETRIES, 10000, NULL, XS_CTL_DATA_IN | flags));
1044 }
1045
1046 /*
1047 * scsipi_prevent:
1048 *
1049 * Prevent or allow the user to remove the media
1050 */
1051 int
1052 scsipi_prevent(periph, type, flags)
1053 struct scsipi_periph *periph;
1054 int type, flags;
1055 {
1056 struct scsipi_prevent scsipi_cmd;
1057
1058 if (periph->periph_quirks & PQUIRK_NODOORLOCK)
1059 return (0);
1060
1061 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1062 scsipi_cmd.opcode = PREVENT_ALLOW;
1063 scsipi_cmd.how = type;
1064
1065 return (scsipi_command(periph,
1066 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1067 0, 0, SCSIPIRETRIES, 5000, NULL, flags));
1068 }
1069
1070 /*
1071 * scsipi_start:
1072 *
1073 * Send a START UNIT.
1074 */
1075 int
1076 scsipi_start(periph, type, flags)
1077 struct scsipi_periph *periph;
1078 int type, flags;
1079 {
1080 struct scsipi_start_stop scsipi_cmd;
1081
1082 if (periph->periph_quirks & PQUIRK_NOSTARTUNIT)
1083 return 0;
1084
1085 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1086 scsipi_cmd.opcode = START_STOP;
1087 scsipi_cmd.byte2 = 0x00;
1088 scsipi_cmd.how = type;
1089
1090 return (scsipi_command(periph,
1091 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1092 0, 0, SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000,
1093 NULL, flags));
1094 }
1095
1096 /*
1097 * scsipi_mode_sense, scsipi_mode_sense_big:
1098 * get a sense page from a device
1099 */
1100
1101 int
1102 scsipi_mode_sense(periph, byte2, page, data, len, flags, retries, timeout)
1103 struct scsipi_periph *periph;
1104 int byte2, page, len, flags, retries, timeout;
1105 struct scsipi_mode_header *data;
1106 {
1107 struct scsipi_mode_sense scsipi_cmd;
1108 int error;
1109
1110 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1111 scsipi_cmd.opcode = MODE_SENSE;
1112 scsipi_cmd.byte2 = byte2;
1113 scsipi_cmd.page = page;
1114 if (scsipi_periph_bustype(periph) == SCSIPI_BUSTYPE_ATAPI)
1115 _lto2b(len, scsipi_cmd.u_len.atapi.length);
1116 else
1117 scsipi_cmd.u_len.scsi.length = len & 0xff;
1118 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1119 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1120 flags | XS_CTL_DATA_IN);
1121 SC_DEBUG(periph, SCSIPI_DB2,
1122 ("scsipi_mode_sense: error=%d\n", error));
1123 return (error);
1124 }
1125
1126 int
1127 scsipi_mode_sense_big(periph, byte2, page, data, len, flags, retries, timeout)
1128 struct scsipi_periph *periph;
1129 int byte2, page, len, flags, retries, timeout;
1130 struct scsipi_mode_header_big *data;
1131 {
1132 struct scsipi_mode_sense_big scsipi_cmd;
1133 int error;
1134
1135 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1136 scsipi_cmd.opcode = MODE_SENSE_BIG;
1137 scsipi_cmd.byte2 = byte2;
1138 scsipi_cmd.page = page;
1139 _lto2b(len, scsipi_cmd.length);
1140 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1141 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1142 flags | XS_CTL_DATA_IN);
1143 SC_DEBUG(periph, SCSIPI_DB2,
1144 ("scsipi_mode_sense_big: error=%d\n", error));
1145 return (error);
1146 }
1147
1148 int
1149 scsipi_mode_select(periph, byte2, data, len, flags, retries, timeout)
1150 struct scsipi_periph *periph;
1151 int byte2, len, flags, retries, timeout;
1152 struct scsipi_mode_header *data;
1153 {
1154 struct scsipi_mode_select scsipi_cmd;
1155 int error;
1156
1157 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1158 scsipi_cmd.opcode = MODE_SELECT;
1159 scsipi_cmd.byte2 = byte2;
1160 if (scsipi_periph_bustype(periph) == SCSIPI_BUSTYPE_ATAPI)
1161 _lto2b(len, scsipi_cmd.u_len.atapi.length);
1162 else
1163 scsipi_cmd.u_len.scsi.length = len & 0xff;
1164 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1165 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1166 flags | XS_CTL_DATA_OUT);
1167 SC_DEBUG(periph, SCSIPI_DB2,
1168 ("scsipi_mode_select: error=%d\n", error));
1169 return (error);
1170 }
1171
1172 int
1173 scsipi_mode_select_big(periph, byte2, data, len, flags, retries, timeout)
1174 struct scsipi_periph *periph;
1175 int byte2, len, flags, retries, timeout;
1176 struct scsipi_mode_header_big *data;
1177 {
1178 struct scsipi_mode_select_big scsipi_cmd;
1179 int error;
1180
1181 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1182 scsipi_cmd.opcode = MODE_SELECT_BIG;
1183 scsipi_cmd.byte2 = byte2;
1184 _lto2b(len, scsipi_cmd.length);
1185 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1186 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1187 flags | XS_CTL_DATA_OUT);
1188 SC_DEBUG(periph, SCSIPI_DB2,
1189 ("scsipi_mode_select: error=%d\n", error));
1190 return (error);
1191 }
1192
1193 /*
1194 * scsipi_done:
1195 *
1196 * This routine is called by an adapter's interrupt handler when
1197 * an xfer is completed.
1198 */
1199 void
1200 scsipi_done(xs)
1201 struct scsipi_xfer *xs;
1202 {
1203 struct scsipi_periph *periph = xs->xs_periph;
1204 struct scsipi_channel *chan = periph->periph_channel;
1205 int s, freezecnt;
1206
1207 SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n"));
1208 #ifdef SCSIPI_DEBUG
1209 if (periph->periph_dbflags & SCSIPI_DB1)
1210 show_scsipi_cmd(xs);
1211 #endif
1212
1213 s = splbio();
1214 /*
1215 * The resource this command was using is now free.
1216 */
1217 scsipi_put_resource(chan);
1218 xs->xs_periph->periph_sent--;
1219
1220 /*
1221 * If the command was tagged, free the tag.
1222 */
1223 if (XS_CTL_TAGTYPE(xs) != 0)
1224 scsipi_put_tag(xs);
1225 else
1226 periph->periph_flags &= ~PERIPH_UNTAG;
1227
1228 /* Mark the command as `done'. */
1229 xs->xs_status |= XS_STS_DONE;
1230
1231 #ifdef DIAGNOSTIC
1232 if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
1233 (XS_CTL_ASYNC|XS_CTL_POLL))
1234 panic("scsipi_done: ASYNC and POLL");
1235 #endif
1236
1237 /*
1238 * If the xfer had an error of any sort, freeze the
1239 * periph's queue. Freeze it again if we were requested
1240 * to do so in the xfer.
1241 */
1242 freezecnt = 0;
1243 if (xs->error != XS_NOERROR)
1244 freezecnt++;
1245 if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
1246 freezecnt++;
1247 if (freezecnt != 0)
1248 scsipi_periph_freeze(periph, freezecnt);
1249
1250 /*
1251 * record the xfer with a pending sense, in case a SCSI reset is
1252 * received before the thread is waked up.
1253 */
1254 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1255 periph->periph_flags |= PERIPH_SENSE;
1256 periph->periph_xscheck = xs;
1257 }
1258
1259 /*
1260 * If this was an xfer that was not to complete asynchronously,
1261 * let the requesting thread perform error checking/handling
1262 * in its context.
1263 */
1264 if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
1265 splx(s);
1266 /*
1267 * If it's a polling job, just return, to unwind the
1268 * call graph. We don't need to restart the queue,
1269 * because pollings jobs are treated specially, and
1270 * are really only used during crash dumps anyway
1271 * (XXX or during boot-time autconfiguration of
1272 * ATAPI devices).
1273 */
1274 if (xs->xs_control & XS_CTL_POLL)
1275 return;
1276 wakeup(xs);
1277 goto out;
1278 }
1279
1280 /*
1281 * Catch the extremely common case of I/O completing
1282 * without error; no use in taking a context switch
1283 * if we can handle it in interrupt context.
1284 */
1285 if (xs->error == XS_NOERROR) {
1286 splx(s);
1287 (void) scsipi_complete(xs);
1288 goto out;
1289 }
1290
1291 /*
1292 * There is an error on this xfer. Put it on the channel's
1293 * completion queue, and wake up the completion thread.
1294 */
1295 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
1296 splx(s);
1297 wakeup(&chan->chan_complete);
1298
1299 out:
1300 /*
1301 * If there are more xfers on the channel's queue, attempt to
1302 * run them.
1303 */
1304 scsipi_run_queue(chan);
1305 }
1306
1307 /*
1308 * scsipi_complete:
1309 *
1310 * Completion of a scsipi_xfer. This is the guts of scsipi_done().
1311 *
1312 * NOTE: This routine MUST be called with valid thread context
1313 * except for the case where the following two conditions are
1314 * true:
1315 *
1316 * xs->error == XS_NOERROR
1317 * XS_CTL_ASYNC is set in xs->xs_control
1318 *
1319 * The semantics of this routine can be tricky, so here is an
1320 * explanation:
1321 *
1322 * 0 Xfer completed successfully.
1323 *
1324 * ERESTART Xfer had an error, but was restarted.
1325 *
1326 * anything else Xfer had an error, return value is Unix
1327 * errno.
1328 *
1329 * If the return value is anything but ERESTART:
1330 *
1331 * - If XS_CTL_ASYNC is set, `xs' has been freed back to
1332 * the pool.
1333 * - If there is a buf associated with the xfer,
1334 * it has been biodone()'d.
1335 */
1336 int
1337 scsipi_complete(xs)
1338 struct scsipi_xfer *xs;
1339 {
1340 struct scsipi_periph *periph = xs->xs_periph;
1341 struct scsipi_channel *chan = periph->periph_channel;
1342 struct buf *bp;
1343 int error, s;
1344
1345 #ifdef DIAGNOSTIC
1346 if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
1347 panic("scsipi_complete: XS_CTL_ASYNC but no buf");
1348 #endif
1349 /*
1350 * If command terminated with a CHECK CONDITION, we need to issue a
1351 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed
1352 * we'll have the real status.
1353 * Must be processed at splbio() to avoid missing a SCSI bus reset
1354 * for this command.
1355 */
1356 s = splbio();
1357 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1358 /* request sense for a request sense ? */
1359 if (xs->xs_control & XS_CTL_REQSENSE) {
1360 scsipi_printaddr(periph);
1361 printf("request sense for a request sense ?\n");
1362 /* XXX maybe we should reset the device ? */
1363 /* we've been frozen because xs->error != XS_NOERROR */
1364 scsipi_periph_thaw(periph, 1);
1365 splx(s);
1366 if (xs->resid < xs->datalen) {
1367 printf("we read %d bytes of sense anyway:\n",
1368 xs->datalen - xs->resid);
1369 #ifdef SCSIVERBOSE
1370 scsipi_print_sense_data((void *)xs->data, 0);
1371 #endif
1372 }
1373 return EINVAL;
1374 }
1375 scsipi_request_sense(xs);
1376 }
1377 splx(s);
1378
1379 /*
1380 * If it's a user level request, bypass all usual completion
1381 * processing, let the user work it out..
1382 */
1383 if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
1384 SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n"));
1385 if (xs->error != XS_NOERROR)
1386 scsipi_periph_thaw(periph, 1);
1387 scsipi_user_done(xs);
1388 SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n "));
1389 return 0;
1390 }
1391
1392 switch (xs->error) {
1393 case XS_NOERROR:
1394 error = 0;
1395 break;
1396
1397 case XS_SENSE:
1398 case XS_SHORTSENSE:
1399 error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
1400 break;
1401
1402 case XS_RESOURCE_SHORTAGE:
1403 /*
1404 * XXX Should freeze channel's queue.
1405 */
1406 scsipi_printaddr(periph);
1407 printf("adapter resource shortage\n");
1408 /* FALLTHROUGH */
1409
1410 case XS_BUSY:
1411 if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
1412 struct scsipi_max_openings mo;
1413
1414 /*
1415 * We set the openings to active - 1, assuming that
1416 * the command that got us here is the first one that
1417 * can't fit into the device's queue. If that's not
1418 * the case, I guess we'll find out soon enough.
1419 */
1420 mo.mo_target = periph->periph_target;
1421 mo.mo_lun = periph->periph_lun;
1422 if (periph->periph_active < periph->periph_openings)
1423 mo.mo_openings = periph->periph_active - 1;
1424 else
1425 mo.mo_openings = periph->periph_openings - 1;
1426 #ifdef DIAGNOSTIC
1427 if (mo.mo_openings < 0) {
1428 scsipi_printaddr(periph);
1429 printf("QUEUE FULL resulted in < 0 openings\n");
1430 panic("scsipi_done");
1431 }
1432 #endif
1433 if (mo.mo_openings == 0) {
1434 scsipi_printaddr(periph);
1435 printf("QUEUE FULL resulted in 0 openings\n");
1436 mo.mo_openings = 1;
1437 }
1438 scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
1439 error = ERESTART;
1440 } else if (xs->xs_retries != 0) {
1441 xs->xs_retries--;
1442 /*
1443 * Wait one second, and try again.
1444 */
1445 if ((xs->xs_control & XS_CTL_POLL) ||
1446 (chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
1447 delay(1000000);
1448 } else {
1449 scsipi_periph_freeze(periph, 1);
1450 callout_reset(&periph->periph_callout,
1451 hz, scsipi_periph_timed_thaw, periph);
1452 }
1453 error = ERESTART;
1454 } else
1455 error = EBUSY;
1456 break;
1457
1458 case XS_REQUEUE:
1459 error = ERESTART;
1460 break;
1461
1462 case XS_TIMEOUT:
1463 if (xs->xs_retries != 0) {
1464 xs->xs_retries--;
1465 error = ERESTART;
1466 } else
1467 error = EIO;
1468 break;
1469
1470 case XS_SELTIMEOUT:
1471 /* XXX Disable device? */
1472 error = EIO;
1473 break;
1474
1475 case XS_RESET:
1476 if (xs->xs_control & XS_CTL_REQSENSE) {
1477 /*
1478 * request sense interrupted by reset: signal it
1479 * with EINTR return code.
1480 */
1481 error = EINTR;
1482 } else {
1483 if (xs->xs_retries != 0) {
1484 xs->xs_retries--;
1485 error = ERESTART;
1486 } else
1487 error = EIO;
1488 }
1489 break;
1490
1491 default:
1492 scsipi_printaddr(periph);
1493 printf("invalid return code from adapter: %d\n", xs->error);
1494 error = EIO;
1495 break;
1496 }
1497
1498 s = splbio();
1499 if (error == ERESTART) {
1500 /*
1501 * If we get here, the periph has been thawed and frozen
1502 * again if we had to issue recovery commands. Alternatively,
1503 * it may have been frozen again and in a timed thaw. In
1504 * any case, we thaw the periph once we re-enqueue the
1505 * command. Once the periph is fully thawed, it will begin
1506 * operation again.
1507 */
1508 xs->error = XS_NOERROR;
1509 xs->status = SCSI_OK;
1510 xs->xs_status &= ~XS_STS_DONE;
1511 xs->xs_requeuecnt++;
1512 error = scsipi_enqueue(xs);
1513 if (error == 0) {
1514 scsipi_periph_thaw(periph, 1);
1515 splx(s);
1516 return (ERESTART);
1517 }
1518 }
1519
1520 /*
1521 * scsipi_done() freezes the queue if not XS_NOERROR.
1522 * Thaw it here.
1523 */
1524 if (xs->error != XS_NOERROR)
1525 scsipi_periph_thaw(periph, 1);
1526
1527
1528 if (periph->periph_switch->psw_done)
1529 periph->periph_switch->psw_done(xs);
1530 if ((bp = xs->bp) != NULL) {
1531 if (error) {
1532 bp->b_error = error;
1533 bp->b_flags |= B_ERROR;
1534 bp->b_resid = bp->b_bcount;
1535 } else {
1536 bp->b_error = 0;
1537 bp->b_resid = xs->resid;
1538 }
1539 biodone(bp);
1540 }
1541
1542 if (xs->xs_control & XS_CTL_ASYNC)
1543 scsipi_put_xs(xs);
1544 splx(s);
1545
1546 return (error);
1547 }
1548
1549 /*
1550 * Issue a request sense for the given scsipi_xfer. Called when the xfer
1551 * returns with a CHECK_CONDITION status. Must be called in valid thread
1552 * context and at splbio().
1553 */
1554
1555 void
1556 scsipi_request_sense(xs)
1557 struct scsipi_xfer *xs;
1558 {
1559 struct scsipi_periph *periph = xs->xs_periph;
1560 int flags, error;
1561 struct scsipi_sense cmd;
1562
1563 periph->periph_flags |= PERIPH_SENSE;
1564
1565 /* if command was polling, request sense will too */
1566 flags = xs->xs_control & XS_CTL_POLL;
1567 /* Polling commands can't sleep */
1568 if (flags)
1569 flags |= XS_CTL_NOSLEEP;
1570
1571 flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN |
1572 XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH;
1573
1574 memset(&cmd, 0, sizeof(cmd));
1575 cmd.opcode = REQUEST_SENSE;
1576 cmd.length = sizeof(struct scsipi_sense_data);
1577
1578 error = scsipi_command(periph,
1579 (struct scsipi_generic *) &cmd, sizeof(cmd),
1580 (u_char*)&xs->sense.scsi_sense, sizeof(struct scsipi_sense_data),
1581 0, 1000, NULL, flags);
1582 periph->periph_flags &= ~PERIPH_SENSE;
1583 periph->periph_xscheck = NULL;
1584 switch(error) {
1585 case 0:
1586 /* we have a valid sense */
1587 xs->error = XS_SENSE;
1588 return;
1589 case EINTR:
1590 /* REQUEST_SENSE interrupted by bus reset. */
1591 xs->error = XS_RESET;
1592 return;
1593 case EIO:
1594 /* request sense coudn't be performed */
1595 /*
1596 * XXX this isn't quite rigth but we don't have anything
1597 * better for now
1598 */
1599 xs->error = XS_DRIVER_STUFFUP;
1600 return;
1601 default:
1602 /* Notify that request sense failed. */
1603 xs->error = XS_DRIVER_STUFFUP;
1604 scsipi_printaddr(periph);
1605 printf("request sense failed with error %d\n", error);
1606 return;
1607 }
1608 }
1609
1610 /*
1611 * scsipi_enqueue:
1612 *
1613 * Enqueue an xfer on a channel.
1614 */
1615 int
1616 scsipi_enqueue(xs)
1617 struct scsipi_xfer *xs;
1618 {
1619 struct scsipi_channel *chan = xs->xs_periph->periph_channel;
1620 struct scsipi_xfer *qxs;
1621 int s;
1622
1623 s = splbio();
1624
1625 /*
1626 * If the xfer is to be polled, and there are already jobs on
1627 * the queue, we can't proceed.
1628 */
1629 if ((xs->xs_control & XS_CTL_POLL) != 0 &&
1630 TAILQ_FIRST(&chan->chan_queue) != NULL) {
1631 splx(s);
1632 xs->error = XS_DRIVER_STUFFUP;
1633 return (EAGAIN);
1634 }
1635
1636 /*
1637 * If we have an URGENT xfer, it's an error recovery command
1638 * and it should just go on the head of the channel's queue.
1639 */
1640 if (xs->xs_control & XS_CTL_URGENT) {
1641 TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
1642 goto out;
1643 }
1644
1645 /*
1646 * If this xfer has already been on the queue before, we
1647 * need to reinsert it in the correct order. That order is:
1648 *
1649 * Immediately before the first xfer for this periph
1650 * with a requeuecnt less than xs->xs_requeuecnt.
1651 *
1652 * Failing that, at the end of the queue. (We'll end up
1653 * there naturally.)
1654 */
1655 if (xs->xs_requeuecnt != 0) {
1656 for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
1657 qxs = TAILQ_NEXT(qxs, channel_q)) {
1658 if (qxs->xs_periph == xs->xs_periph &&
1659 qxs->xs_requeuecnt < xs->xs_requeuecnt)
1660 break;
1661 }
1662 if (qxs != NULL) {
1663 TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
1664 channel_q);
1665 goto out;
1666 }
1667 }
1668 TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
1669 out:
1670 if (xs->xs_control & XS_CTL_THAW_PERIPH)
1671 scsipi_periph_thaw(xs->xs_periph, 1);
1672 splx(s);
1673 return (0);
1674 }
1675
1676 /*
1677 * scsipi_run_queue:
1678 *
1679 * Start as many xfers as possible running on the channel.
1680 */
1681 void
1682 scsipi_run_queue(chan)
1683 struct scsipi_channel *chan;
1684 {
1685 struct scsipi_xfer *xs;
1686 struct scsipi_periph *periph;
1687 int s;
1688
1689 for (;;) {
1690 s = splbio();
1691
1692 /*
1693 * If the channel is frozen, we can't do any work right
1694 * now.
1695 */
1696 if (chan->chan_qfreeze != 0) {
1697 splx(s);
1698 return;
1699 }
1700
1701 /*
1702 * Look for work to do, and make sure we can do it.
1703 */
1704 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
1705 xs = TAILQ_NEXT(xs, channel_q)) {
1706 periph = xs->xs_periph;
1707
1708 if ((periph->periph_sent >= periph->periph_openings) ||
1709 periph->periph_qfreeze != 0 ||
1710 (periph->periph_flags & PERIPH_UNTAG) != 0)
1711 continue;
1712
1713 if ((periph->periph_flags &
1714 (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 &&
1715 (xs->xs_control & XS_CTL_URGENT) == 0)
1716 continue;
1717
1718 /*
1719 * We can issue this xfer!
1720 */
1721 goto got_one;
1722 }
1723
1724 /*
1725 * Can't find any work to do right now.
1726 */
1727 splx(s);
1728 return;
1729
1730 got_one:
1731 /*
1732 * Have an xfer to run. Allocate a resource from
1733 * the adapter to run it. If we can't allocate that
1734 * resource, we don't dequeue the xfer.
1735 */
1736 if (scsipi_get_resource(chan) == 0) {
1737 /*
1738 * Adapter is out of resources. If the adapter
1739 * supports it, attempt to grow them.
1740 */
1741 if (scsipi_grow_resources(chan) == 0) {
1742 /*
1743 * Wasn't able to grow resources,
1744 * nothing more we can do.
1745 */
1746 if (xs->xs_control & XS_CTL_POLL) {
1747 scsipi_printaddr(xs->xs_periph);
1748 printf("polling command but no "
1749 "adapter resources");
1750 /* We'll panic shortly... */
1751 }
1752 splx(s);
1753
1754 /*
1755 * XXX: We should be able to note that
1756 * XXX: that resources are needed here!
1757 */
1758 return;
1759 }
1760 /*
1761 * scsipi_grow_resources() allocated the resource
1762 * for us.
1763 */
1764 }
1765
1766 /*
1767 * We have a resource to run this xfer, do it!
1768 */
1769 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
1770
1771 /*
1772 * If the command is to be tagged, allocate a tag ID
1773 * for it.
1774 */
1775 if (XS_CTL_TAGTYPE(xs) != 0)
1776 scsipi_get_tag(xs);
1777 else
1778 periph->periph_flags |= PERIPH_UNTAG;
1779 periph->periph_sent++;
1780 splx(s);
1781
1782 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
1783 }
1784 #ifdef DIAGNOSTIC
1785 panic("scsipi_run_queue: impossible");
1786 #endif
1787 }
1788
1789 /*
1790 * scsipi_execute_xs:
1791 *
1792 * Begin execution of an xfer, waiting for it to complete, if necessary.
1793 */
1794 int
1795 scsipi_execute_xs(xs)
1796 struct scsipi_xfer *xs;
1797 {
1798 struct scsipi_periph *periph = xs->xs_periph;
1799 struct scsipi_channel *chan = periph->periph_channel;
1800 int async, poll, retries, error, s;
1801
1802 xs->xs_status &= ~XS_STS_DONE;
1803 xs->error = XS_NOERROR;
1804 xs->resid = xs->datalen;
1805 xs->status = SCSI_OK;
1806
1807 #ifdef SCSIPI_DEBUG
1808 if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) {
1809 printf("scsipi_execute_xs: ");
1810 show_scsipi_xs(xs);
1811 printf("\n");
1812 }
1813 #endif
1814
1815 /*
1816 * Deal with command tagging:
1817 *
1818 * - If the device's current operating mode doesn't
1819 * include tagged queueing, clear the tag mask.
1820 *
1821 * - If the device's current operating mode *does*
1822 * include tagged queueing, set the tag_type in
1823 * the xfer to the appropriate byte for the tag
1824 * message.
1825 */
1826 if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 ||
1827 (xs->xs_control & XS_CTL_REQSENSE)) {
1828 xs->xs_control &= ~XS_CTL_TAGMASK;
1829 xs->xs_tag_type = 0;
1830 } else {
1831 /*
1832 * If the request doesn't specify a tag, give Head
1833 * tags to URGENT operations and Ordered tags to
1834 * everything else.
1835 */
1836 if (XS_CTL_TAGTYPE(xs) == 0) {
1837 if (xs->xs_control & XS_CTL_URGENT)
1838 xs->xs_control |= XS_CTL_HEAD_TAG;
1839 else
1840 xs->xs_control |= XS_CTL_ORDERED_TAG;
1841 }
1842
1843 switch (XS_CTL_TAGTYPE(xs)) {
1844 case XS_CTL_ORDERED_TAG:
1845 xs->xs_tag_type = MSG_ORDERED_Q_TAG;
1846 break;
1847
1848 case XS_CTL_SIMPLE_TAG:
1849 xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
1850 break;
1851
1852 case XS_CTL_HEAD_TAG:
1853 xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
1854 break;
1855
1856 default:
1857 scsipi_printaddr(periph);
1858 printf("invalid tag mask 0x%08x\n",
1859 XS_CTL_TAGTYPE(xs));
1860 panic("scsipi_execute_xs");
1861 }
1862 }
1863
1864 /* If the adaptor wants us to poll, poll. */
1865 if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY)
1866 xs->xs_control |= XS_CTL_POLL;
1867
1868 /*
1869 * If we don't yet have a completion thread, or we are to poll for
1870 * completion, clear the ASYNC flag.
1871 */
1872 if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
1873 xs->xs_control &= ~XS_CTL_ASYNC;
1874
1875 async = (xs->xs_control & XS_CTL_ASYNC);
1876 poll = (xs->xs_control & XS_CTL_POLL);
1877 retries = xs->xs_retries; /* for polling commands */
1878
1879 #ifdef DIAGNOSTIC
1880 if (async != 0 && xs->bp == NULL)
1881 panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
1882 #endif
1883
1884 /*
1885 * Enqueue the transfer. If we're not polling for completion, this
1886 * should ALWAYS return `no error'.
1887 */
1888 try_again:
1889 error = scsipi_enqueue(xs);
1890 if (error) {
1891 if (poll == 0) {
1892 scsipi_printaddr(periph);
1893 printf("not polling, but enqueue failed with %d\n",
1894 error);
1895 panic("scsipi_execute_xs");
1896 }
1897
1898 scsipi_printaddr(periph);
1899 printf("failed to enqueue polling command");
1900 if (retries != 0) {
1901 printf(", retrying...\n");
1902 delay(1000000);
1903 retries--;
1904 goto try_again;
1905 }
1906 printf("\n");
1907 goto free_xs;
1908 }
1909
1910 restarted:
1911 scsipi_run_queue(chan);
1912
1913 /*
1914 * The xfer is enqueued, and possibly running. If it's to be
1915 * completed asynchronously, just return now.
1916 */
1917 if (async)
1918 return (EJUSTRETURN);
1919
1920 /*
1921 * Not an asynchronous command; wait for it to complete.
1922 */
1923 s = splbio();
1924 while ((xs->xs_status & XS_STS_DONE) == 0) {
1925 if (poll) {
1926 scsipi_printaddr(periph);
1927 printf("polling command not done\n");
1928 panic("scsipi_execute_xs");
1929 }
1930 (void) tsleep(xs, PRIBIO, "xscmd", 0);
1931 }
1932 splx(s);
1933
1934 /*
1935 * Command is complete. scsipi_done() has awakened us to perform
1936 * the error handling.
1937 */
1938 error = scsipi_complete(xs);
1939 if (error == ERESTART)
1940 goto restarted;
1941
1942 /*
1943 * Command completed successfully or fatal error occurred. Fall
1944 * into....
1945 */
1946 free_xs:
1947 s = splbio();
1948 scsipi_put_xs(xs);
1949 splx(s);
1950
1951 /*
1952 * Kick the queue, keep it running in case it stopped for some
1953 * reason.
1954 */
1955 scsipi_run_queue(chan);
1956
1957 return (error);
1958 }
1959
1960 /*
1961 * scsipi_completion_thread:
1962 *
1963 * This is the completion thread. We wait for errors on
1964 * asynchronous xfers, and perform the error handling
1965 * function, restarting the command, if necessary.
1966 */
1967 void
1968 scsipi_completion_thread(arg)
1969 void *arg;
1970 {
1971 struct scsipi_channel *chan = arg;
1972 struct scsipi_xfer *xs;
1973 int s;
1974
1975 s = splbio();
1976 chan->chan_flags |= SCSIPI_CHAN_TACTIVE;
1977 splx(s);
1978 for (;;) {
1979 s = splbio();
1980 xs = TAILQ_FIRST(&chan->chan_complete);
1981 if (xs == NULL && chan->chan_tflags == 0) {
1982 /* nothing to do; wait */
1983 (void) tsleep(&chan->chan_complete, PRIBIO,
1984 "sccomp", 0);
1985 splx(s);
1986 continue;
1987 }
1988 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
1989 /* call chan_callback from thread context */
1990 chan->chan_tflags &= ~SCSIPI_CHANT_CALLBACK;
1991 chan->chan_callback(chan, chan->chan_callback_arg);
1992 splx(s);
1993 continue;
1994 }
1995 if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) {
1996 /* attempt to get more openings for this channel */
1997 chan->chan_tflags &= ~SCSIPI_CHANT_GROWRES;
1998 scsipi_adapter_request(chan,
1999 ADAPTER_REQ_GROW_RESOURCES, NULL);
2000 scsipi_channel_thaw(chan, 1);
2001 splx(s);
2002 continue;
2003 }
2004 if (chan->chan_tflags & SCSIPI_CHANT_KICK) {
2005 /* explicitly run the queues for this channel */
2006 chan->chan_tflags &= ~SCSIPI_CHANT_KICK;
2007 scsipi_run_queue(chan);
2008 splx(s);
2009 continue;
2010 }
2011 if (chan->chan_tflags & SCSIPI_CHANT_SHUTDOWN) {
2012 splx(s);
2013 break;
2014 }
2015 if (xs) {
2016 TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
2017 splx(s);
2018
2019 /*
2020 * Have an xfer with an error; process it.
2021 */
2022 (void) scsipi_complete(xs);
2023
2024 /*
2025 * Kick the queue; keep it running if it was stopped
2026 * for some reason.
2027 */
2028 scsipi_run_queue(chan);
2029 } else {
2030 splx(s);
2031 }
2032 }
2033
2034 chan->chan_thread = NULL;
2035
2036 /* In case parent is waiting for us to exit. */
2037 wakeup(&chan->chan_thread);
2038
2039 kthread_exit(0);
2040 }
2041
2042 /*
2043 * scsipi_create_completion_thread:
2044 *
2045 * Callback to actually create the completion thread.
2046 */
2047 void
2048 scsipi_create_completion_thread(arg)
2049 void *arg;
2050 {
2051 struct scsipi_channel *chan = arg;
2052 struct scsipi_adapter *adapt = chan->chan_adapter;
2053
2054 if (kthread_create1(scsipi_completion_thread, chan,
2055 &chan->chan_thread, "%s:%d", adapt->adapt_dev->dv_xname,
2056 chan->chan_channel)) {
2057 printf("%s: unable to create completion thread for "
2058 "channel %d\n", adapt->adapt_dev->dv_xname,
2059 chan->chan_channel);
2060 panic("scsipi_create_completion_thread");
2061 }
2062 }
2063
2064 /*
2065 * scsipi_thread_call_callback:
2066 *
2067 * request to call a callback from the completion thread
2068 */
2069 int
2070 scsipi_thread_call_callback(chan, callback, arg)
2071 struct scsipi_channel *chan;
2072 void (*callback) __P((struct scsipi_channel *, void *));
2073 void *arg;
2074 {
2075 int s;
2076
2077 s = splbio();
2078 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2079 splx(s);
2080 return EBUSY;
2081 }
2082 scsipi_channel_freeze(chan, 1);
2083 chan->chan_callback = callback;
2084 chan->chan_callback_arg = arg;
2085 chan->chan_tflags |= SCSIPI_CHANT_CALLBACK;
2086 wakeup(&chan->chan_complete);
2087 splx(s);
2088 return(0);
2089 }
2090
2091 /*
2092 * scsipi_async_event:
2093 *
2094 * Handle an asynchronous event from an adapter.
2095 */
2096 void
2097 scsipi_async_event(chan, event, arg)
2098 struct scsipi_channel *chan;
2099 scsipi_async_event_t event;
2100 void *arg;
2101 {
2102 int s;
2103
2104 s = splbio();
2105 switch (event) {
2106 case ASYNC_EVENT_MAX_OPENINGS:
2107 scsipi_async_event_max_openings(chan,
2108 (struct scsipi_max_openings *)arg);
2109 break;
2110
2111 case ASYNC_EVENT_XFER_MODE:
2112 scsipi_async_event_xfer_mode(chan,
2113 (struct scsipi_xfer_mode *)arg);
2114 break;
2115 case ASYNC_EVENT_RESET:
2116 scsipi_async_event_channel_reset(chan);
2117 break;
2118 }
2119 splx(s);
2120 }
2121
2122 /*
2123 * scsipi_print_xfer_mode:
2124 *
2125 * Print a periph's capabilities.
2126 */
2127 void
2128 scsipi_print_xfer_mode(periph)
2129 struct scsipi_periph *periph;
2130 {
2131 int period, freq, speed, mbs;
2132
2133 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0)
2134 return;
2135
2136 printf("%s: ", periph->periph_dev->dv_xname);
2137 if (periph->periph_mode & PERIPH_CAP_SYNC) {
2138 period = scsipi_sync_factor_to_period(periph->periph_period);
2139 printf("sync (%d.%dns offset %d)",
2140 period / 10, period % 10, periph->periph_offset);
2141 } else
2142 printf("async");
2143
2144 if (periph->periph_mode & PERIPH_CAP_WIDE32)
2145 printf(", 32-bit");
2146 else if (periph->periph_mode & PERIPH_CAP_WIDE16)
2147 printf(", 16-bit");
2148 else
2149 printf(", 8-bit");
2150
2151 if (periph->periph_mode & PERIPH_CAP_SYNC) {
2152 freq = scsipi_sync_factor_to_freq(periph->periph_period);
2153 speed = freq;
2154 if (periph->periph_mode & PERIPH_CAP_WIDE32)
2155 speed *= 4;
2156 else if (periph->periph_mode & PERIPH_CAP_WIDE16)
2157 speed *= 2;
2158 mbs = speed / 1000;
2159 if (mbs > 0)
2160 printf(" (%d.%03dMB/s)", mbs, speed % 1000);
2161 else
2162 printf(" (%dKB/s)", speed % 1000);
2163 }
2164
2165 printf(" transfers");
2166
2167 if (periph->periph_mode & PERIPH_CAP_TQING)
2168 printf(", tagged queueing");
2169
2170 printf("\n");
2171 }
2172
2173 /*
2174 * scsipi_async_event_max_openings:
2175 *
2176 * Update the maximum number of outstanding commands a
2177 * device may have.
2178 */
2179 void
2180 scsipi_async_event_max_openings(chan, mo)
2181 struct scsipi_channel *chan;
2182 struct scsipi_max_openings *mo;
2183 {
2184 struct scsipi_periph *periph;
2185 int minlun, maxlun;
2186
2187 if (mo->mo_lun == -1) {
2188 /*
2189 * Wildcarded; apply it to all LUNs.
2190 */
2191 minlun = 0;
2192 maxlun = chan->chan_nluns - 1;
2193 } else
2194 minlun = maxlun = mo->mo_lun;
2195
2196 for (; minlun <= maxlun; minlun++) {
2197 periph = scsipi_lookup_periph(chan, mo->mo_target, minlun);
2198 if (periph == NULL)
2199 continue;
2200
2201 if (mo->mo_openings < periph->periph_openings)
2202 periph->periph_openings = mo->mo_openings;
2203 else if (mo->mo_openings > periph->periph_openings &&
2204 (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
2205 periph->periph_openings = mo->mo_openings;
2206 }
2207 }
2208
2209 /*
2210 * scsipi_async_event_xfer_mode:
2211 *
2212 * Update the xfer mode for all periphs sharing the
2213 * specified I_T Nexus.
2214 */
2215 void
2216 scsipi_async_event_xfer_mode(chan, xm)
2217 struct scsipi_channel *chan;
2218 struct scsipi_xfer_mode *xm;
2219 {
2220 struct scsipi_periph *periph;
2221 int lun, announce, mode, period, offset;
2222
2223 for (lun = 0; lun < chan->chan_nluns; lun++) {
2224 periph = scsipi_lookup_periph(chan, xm->xm_target, lun);
2225 if (periph == NULL)
2226 continue;
2227 announce = 0;
2228
2229 /*
2230 * Clamp the xfer mode down to this periph's capabilities.
2231 */
2232 mode = xm->xm_mode & periph->periph_cap;
2233 if (mode & PERIPH_CAP_SYNC) {
2234 period = xm->xm_period;
2235 offset = xm->xm_offset;
2236 } else {
2237 period = 0;
2238 offset = 0;
2239 }
2240
2241 /*
2242 * If we do not have a valid xfer mode yet, or the parameters
2243 * are different, announce them.
2244 */
2245 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0 ||
2246 periph->periph_mode != mode ||
2247 periph->periph_period != period ||
2248 periph->periph_offset != offset)
2249 announce = 1;
2250
2251 periph->periph_mode = mode;
2252 periph->periph_period = period;
2253 periph->periph_offset = offset;
2254 periph->periph_flags |= PERIPH_MODE_VALID;
2255
2256 if (announce)
2257 scsipi_print_xfer_mode(periph);
2258 }
2259 }
2260
2261 /*
2262 * scsipi_set_xfer_mode:
2263 *
2264 * Set the xfer mode for the specified I_T Nexus.
2265 */
2266 void
2267 scsipi_set_xfer_mode(chan, target, immed)
2268 struct scsipi_channel *chan;
2269 int target, immed;
2270 {
2271 struct scsipi_xfer_mode xm;
2272 struct scsipi_periph *itperiph;
2273 int lun, s;
2274
2275 /*
2276 * Go to the minimal xfer mode.
2277 */
2278 xm.xm_target = target;
2279 xm.xm_mode = 0;
2280 xm.xm_period = 0; /* ignored */
2281 xm.xm_offset = 0; /* ignored */
2282
2283 /*
2284 * Find the first LUN we know about on this I_T Nexus.
2285 */
2286 for (lun = 0; lun < chan->chan_nluns; lun++) {
2287 itperiph = scsipi_lookup_periph(chan, target, lun);
2288 if (itperiph != NULL)
2289 break;
2290 }
2291 if (itperiph != NULL) {
2292 xm.xm_mode = itperiph->periph_cap;
2293 /*
2294 * Now issue the request to the adapter.
2295 */
2296 s = splbio();
2297 scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm);
2298 splx(s);
2299 /*
2300 * If we want this to happen immediately, issue a dummy
2301 * command, since most adapters can't really negotiate unless
2302 * they're executing a job.
2303 */
2304 if (immed != 0) {
2305 (void) scsipi_test_unit_ready(itperiph,
2306 XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
2307 XS_CTL_IGNORE_NOT_READY |
2308 XS_CTL_IGNORE_MEDIA_CHANGE);
2309 }
2310 }
2311 }
2312
2313 /*
2314 * scsipi_channel_reset:
2315 *
2316 * handle scsi bus reset
2317 * called at splbio
2318 */
2319 void
2320 scsipi_async_event_channel_reset(chan)
2321 struct scsipi_channel *chan;
2322 {
2323 struct scsipi_xfer *xs, *xs_next;
2324 struct scsipi_periph *periph;
2325 int target, lun;
2326
2327 /*
2328 * Channel has been reset. Also mark as reset pending REQUEST_SENSE
2329 * commands; as the sense is not available any more.
2330 * can't call scsipi_done() from here, as the command has not been
2331 * sent to the adapter yet (this would corrupt accounting).
2332 */
2333
2334 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) {
2335 xs_next = TAILQ_NEXT(xs, channel_q);
2336 if (xs->xs_control & XS_CTL_REQSENSE) {
2337 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
2338 xs->error = XS_RESET;
2339 if ((xs->xs_control & XS_CTL_ASYNC) != 0)
2340 TAILQ_INSERT_TAIL(&chan->chan_complete, xs,
2341 channel_q);
2342 }
2343 }
2344 wakeup(&chan->chan_complete);
2345 /* Catch xs with pending sense which may not have a REQSENSE xs yet */
2346 for (target = 0; target < chan->chan_ntargets; target++) {
2347 if (target == chan->chan_id)
2348 continue;
2349 for (lun = 0; lun < chan->chan_nluns; lun++) {
2350 periph = chan->chan_periphs[target][lun];
2351 if (periph) {
2352 xs = periph->periph_xscheck;
2353 if (xs)
2354 xs->error = XS_RESET;
2355 }
2356 }
2357 }
2358 }
2359
2360 /*
2361 * scsipi_target_detach:
2362 *
2363 * detach all periph associated with a I_T
2364 * must be called from valid thread context
2365 */
2366 int
2367 scsipi_target_detach(chan, target, lun, flags)
2368 struct scsipi_channel *chan;
2369 int target, lun;
2370 int flags;
2371 {
2372 struct scsipi_periph *periph;
2373 int ctarget, mintarget, maxtarget;
2374 int clun, minlun, maxlun;
2375 int error;
2376
2377 if (target == -1) {
2378 mintarget = 0;
2379 maxtarget = chan->chan_ntargets;
2380 } else {
2381 if (target == chan->chan_id)
2382 return EINVAL;
2383 if (target < 0 || target >= chan->chan_ntargets)
2384 return EINVAL;
2385 mintarget = target;
2386 maxtarget = target + 1;
2387 }
2388
2389 if (lun == -1) {
2390 minlun = 0;
2391 maxlun = chan->chan_nluns;
2392 } else {
2393 if (lun < 0 || lun >= chan->chan_nluns)
2394 return EINVAL;
2395 minlun = lun;
2396 maxlun = lun + 1;
2397 }
2398
2399 for (ctarget = mintarget; ctarget < maxtarget; ctarget++) {
2400 if (ctarget == chan->chan_id)
2401 continue;
2402
2403 for (clun = minlun; clun < maxlun; clun++) {
2404 periph = scsipi_lookup_periph(chan, ctarget, clun);
2405 if (periph == NULL)
2406 continue;
2407 error = config_detach(periph->periph_dev, flags);
2408 if (error)
2409 return (error);
2410 scsipi_remove_periph(chan, periph);
2411 free(periph, M_DEVBUF);
2412 }
2413 }
2414 return(0);
2415 }
2416
2417 /*
2418 * scsipi_adapter_addref:
2419 *
2420 * Add a reference to the adapter pointed to by the provided
2421 * link, enabling the adapter if necessary.
2422 */
2423 int
2424 scsipi_adapter_addref(adapt)
2425 struct scsipi_adapter *adapt;
2426 {
2427 int s, error = 0;
2428
2429 s = splbio();
2430 if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) {
2431 error = (*adapt->adapt_enable)(adapt->adapt_dev, 1);
2432 if (error)
2433 adapt->adapt_refcnt--;
2434 }
2435 splx(s);
2436 return (error);
2437 }
2438
2439 /*
2440 * scsipi_adapter_delref:
2441 *
2442 * Delete a reference to the adapter pointed to by the provided
2443 * link, disabling the adapter if possible.
2444 */
2445 void
2446 scsipi_adapter_delref(adapt)
2447 struct scsipi_adapter *adapt;
2448 {
2449 int s;
2450
2451 s = splbio();
2452 if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL)
2453 (void) (*adapt->adapt_enable)(adapt->adapt_dev, 0);
2454 splx(s);
2455 }
2456
2457 struct scsipi_syncparam {
2458 int ss_factor;
2459 int ss_period; /* ns * 10 */
2460 } scsipi_syncparams[] = {
2461 { 0x09, 125 },
2462 { 0x0a, 250 },
2463 { 0x0b, 303 },
2464 { 0x0c, 500 },
2465 };
2466 const int scsipi_nsyncparams =
2467 sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
2468
2469 int
2470 scsipi_sync_period_to_factor(period)
2471 int period; /* ns * 10 */
2472 {
2473 int i;
2474
2475 for (i = 0; i < scsipi_nsyncparams; i++) {
2476 if (period <= scsipi_syncparams[i].ss_period)
2477 return (scsipi_syncparams[i].ss_factor);
2478 }
2479
2480 return ((period / 10) / 4);
2481 }
2482
2483 int
2484 scsipi_sync_factor_to_period(factor)
2485 int factor;
2486 {
2487 int i;
2488
2489 for (i = 0; i < scsipi_nsyncparams; i++) {
2490 if (factor == scsipi_syncparams[i].ss_factor)
2491 return (scsipi_syncparams[i].ss_period);
2492 }
2493
2494 return ((factor * 4) * 10);
2495 }
2496
2497 int
2498 scsipi_sync_factor_to_freq(factor)
2499 int factor;
2500 {
2501 int i;
2502
2503 for (i = 0; i < scsipi_nsyncparams; i++) {
2504 if (factor == scsipi_syncparams[i].ss_factor)
2505 return (10000000 / scsipi_syncparams[i].ss_period);
2506 }
2507
2508 return (10000000 / ((factor * 4) * 10));
2509 }
2510
2511 #ifdef SCSIPI_DEBUG
2512 /*
2513 * Given a scsipi_xfer, dump the request, in all it's glory
2514 */
2515 void
2516 show_scsipi_xs(xs)
2517 struct scsipi_xfer *xs;
2518 {
2519
2520 printf("xs(%p): ", xs);
2521 printf("xs_control(0x%08x)", xs->xs_control);
2522 printf("xs_status(0x%08x)", xs->xs_status);
2523 printf("periph(%p)", xs->xs_periph);
2524 printf("retr(0x%x)", xs->xs_retries);
2525 printf("timo(0x%x)", xs->timeout);
2526 printf("cmd(%p)", xs->cmd);
2527 printf("len(0x%x)", xs->cmdlen);
2528 printf("data(%p)", xs->data);
2529 printf("len(0x%x)", xs->datalen);
2530 printf("res(0x%x)", xs->resid);
2531 printf("err(0x%x)", xs->error);
2532 printf("bp(%p)", xs->bp);
2533 show_scsipi_cmd(xs);
2534 }
2535
2536 void
2537 show_scsipi_cmd(xs)
2538 struct scsipi_xfer *xs;
2539 {
2540 u_char *b = (u_char *) xs->cmd;
2541 int i = 0;
2542
2543 scsipi_printaddr(xs->xs_periph);
2544 printf(" command: ");
2545
2546 if ((xs->xs_control & XS_CTL_RESET) == 0) {
2547 while (i < xs->cmdlen) {
2548 if (i)
2549 printf(",");
2550 printf("0x%x", b[i++]);
2551 }
2552 printf("-[%d bytes]\n", xs->datalen);
2553 if (xs->datalen)
2554 show_mem(xs->data, min(64, xs->datalen));
2555 } else
2556 printf("-RESET-\n");
2557 }
2558
2559 void
2560 show_mem(address, num)
2561 u_char *address;
2562 int num;
2563 {
2564 int x;
2565
2566 printf("------------------------------");
2567 for (x = 0; x < num; x++) {
2568 if ((x % 16) == 0)
2569 printf("\n%03d: ", x);
2570 printf("%02x ", *address++);
2571 }
2572 printf("\n------------------------------\n");
2573 }
2574 #endif /* SCSIPI_DEBUG */
2575