scsipi_base.c revision 1.97 1 /* $NetBSD: scsipi_base.c,v 1.97 2003/10/16 22:46:07 mycroft Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999, 2000, 2002, 2003 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: scsipi_base.c,v 1.97 2003/10/16 22:46:07 mycroft Exp $");
42
43 #include "opt_scsi.h"
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/buf.h>
49 #include <sys/uio.h>
50 #include <sys/malloc.h>
51 #include <sys/pool.h>
52 #include <sys/errno.h>
53 #include <sys/device.h>
54 #include <sys/proc.h>
55 #include <sys/kthread.h>
56 #include <sys/hash.h>
57
58 #include <dev/scsipi/scsipi_all.h>
59 #include <dev/scsipi/scsipi_disk.h>
60 #include <dev/scsipi/scsipiconf.h>
61 #include <dev/scsipi/scsipi_base.h>
62
63 #include <dev/scsipi/scsi_all.h>
64 #include <dev/scsipi/scsi_message.h>
65
66 int scsipi_complete __P((struct scsipi_xfer *));
67 void scsipi_request_sense __P((struct scsipi_xfer *));
68 int scsipi_enqueue __P((struct scsipi_xfer *));
69 void scsipi_run_queue __P((struct scsipi_channel *chan));
70
71 void scsipi_completion_thread __P((void *));
72
73 void scsipi_get_tag __P((struct scsipi_xfer *));
74 void scsipi_put_tag __P((struct scsipi_xfer *));
75
76 int scsipi_get_resource __P((struct scsipi_channel *));
77 void scsipi_put_resource __P((struct scsipi_channel *));
78 __inline int scsipi_grow_resources __P((struct scsipi_channel *));
79
80 void scsipi_async_event_max_openings __P((struct scsipi_channel *,
81 struct scsipi_max_openings *));
82 void scsipi_async_event_xfer_mode __P((struct scsipi_channel *,
83 struct scsipi_xfer_mode *));
84 void scsipi_async_event_channel_reset __P((struct scsipi_channel *));
85
86 struct pool scsipi_xfer_pool;
87
88 /*
89 * scsipi_init:
90 *
91 * Called when a scsibus or atapibus is attached to the system
92 * to initialize shared data structures.
93 */
94 void
95 scsipi_init()
96 {
97 static int scsipi_init_done;
98
99 if (scsipi_init_done)
100 return;
101 scsipi_init_done = 1;
102
103 /* Initialize the scsipi_xfer pool. */
104 pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
105 0, 0, "scxspl", NULL);
106 }
107
108 /*
109 * scsipi_channel_init:
110 *
111 * Initialize a scsipi_channel when it is attached.
112 */
113 int
114 scsipi_channel_init(chan)
115 struct scsipi_channel *chan;
116 {
117 int i;
118
119 /* Initialize shared data. */
120 scsipi_init();
121
122 /* Initialize the queues. */
123 TAILQ_INIT(&chan->chan_queue);
124 TAILQ_INIT(&chan->chan_complete);
125
126 for (i = 0; i < SCSIPI_CHAN_PERIPH_BUCKETS; i++)
127 LIST_INIT(&chan->chan_periphtab[i]);
128
129 /*
130 * Create the asynchronous completion thread.
131 */
132 kthread_create(scsipi_create_completion_thread, chan);
133 return (0);
134 }
135
136 /*
137 * scsipi_channel_shutdown:
138 *
139 * Shutdown a scsipi_channel.
140 */
141 void
142 scsipi_channel_shutdown(chan)
143 struct scsipi_channel *chan;
144 {
145
146 /*
147 * Shut down the completion thread.
148 */
149 chan->chan_tflags |= SCSIPI_CHANT_SHUTDOWN;
150 wakeup(&chan->chan_complete);
151
152 /*
153 * Now wait for the thread to exit.
154 */
155 while (chan->chan_thread != NULL)
156 (void) tsleep(&chan->chan_thread, PRIBIO, "scshut", 0);
157 }
158
159 static uint32_t
160 scsipi_chan_periph_hash(uint64_t t, uint64_t l)
161 {
162 uint32_t hash;
163
164 hash = hash32_buf(&t, sizeof(t), HASH32_BUF_INIT);
165 hash = hash32_buf(&l, sizeof(l), hash);
166
167 return (hash & SCSIPI_CHAN_PERIPH_HASHMASK);
168 }
169
170 /*
171 * scsipi_insert_periph:
172 *
173 * Insert a periph into the channel.
174 */
175 void
176 scsipi_insert_periph(chan, periph)
177 struct scsipi_channel *chan;
178 struct scsipi_periph *periph;
179 {
180 uint32_t hash;
181 int s;
182
183 hash = scsipi_chan_periph_hash(periph->periph_target,
184 periph->periph_lun);
185
186 s = splbio();
187 LIST_INSERT_HEAD(&chan->chan_periphtab[hash], periph, periph_hash);
188 splx(s);
189 }
190
191 /*
192 * scsipi_remove_periph:
193 *
194 * Remove a periph from the channel.
195 */
196 void
197 scsipi_remove_periph(chan, periph)
198 struct scsipi_channel *chan;
199 struct scsipi_periph *periph;
200 {
201 int s;
202
203 s = splbio();
204 LIST_REMOVE(periph, periph_hash);
205 splx(s);
206 }
207
208 /*
209 * scsipi_lookup_periph:
210 *
211 * Lookup a periph on the specified channel.
212 */
213 struct scsipi_periph *
214 scsipi_lookup_periph(chan, target, lun)
215 struct scsipi_channel *chan;
216 int target, lun;
217 {
218 struct scsipi_periph *periph;
219 uint32_t hash;
220 int s;
221
222 if (target >= chan->chan_ntargets ||
223 lun >= chan->chan_nluns)
224 return (NULL);
225
226 hash = scsipi_chan_periph_hash(target, lun);
227
228 s = splbio();
229 LIST_FOREACH(periph, &chan->chan_periphtab[hash], periph_hash) {
230 if (periph->periph_target == target &&
231 periph->periph_lun == lun)
232 break;
233 }
234 splx(s);
235
236 return (periph);
237 }
238
239 /*
240 * scsipi_get_resource:
241 *
242 * Allocate a single xfer `resource' from the channel.
243 *
244 * NOTE: Must be called at splbio().
245 */
246 int
247 scsipi_get_resource(chan)
248 struct scsipi_channel *chan;
249 {
250 struct scsipi_adapter *adapt = chan->chan_adapter;
251
252 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
253 if (chan->chan_openings > 0) {
254 chan->chan_openings--;
255 return (1);
256 }
257 return (0);
258 }
259
260 if (adapt->adapt_openings > 0) {
261 adapt->adapt_openings--;
262 return (1);
263 }
264 return (0);
265 }
266
267 /*
268 * scsipi_grow_resources:
269 *
270 * Attempt to grow resources for a channel. If this succeeds,
271 * we allocate one for our caller.
272 *
273 * NOTE: Must be called at splbio().
274 */
275 __inline int
276 scsipi_grow_resources(chan)
277 struct scsipi_channel *chan;
278 {
279
280 if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
281 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
282 scsipi_adapter_request(chan,
283 ADAPTER_REQ_GROW_RESOURCES, NULL);
284 return (scsipi_get_resource(chan));
285 }
286 /*
287 * ask the channel thread to do it. It'll have to thaw the
288 * queue
289 */
290 scsipi_channel_freeze(chan, 1);
291 chan->chan_tflags |= SCSIPI_CHANT_GROWRES;
292 wakeup(&chan->chan_complete);
293 return (0);
294 }
295
296 return (0);
297 }
298
299 /*
300 * scsipi_put_resource:
301 *
302 * Free a single xfer `resource' to the channel.
303 *
304 * NOTE: Must be called at splbio().
305 */
306 void
307 scsipi_put_resource(chan)
308 struct scsipi_channel *chan;
309 {
310 struct scsipi_adapter *adapt = chan->chan_adapter;
311
312 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
313 chan->chan_openings++;
314 else
315 adapt->adapt_openings++;
316 }
317
318 /*
319 * scsipi_get_tag:
320 *
321 * Get a tag ID for the specified xfer.
322 *
323 * NOTE: Must be called at splbio().
324 */
325 void
326 scsipi_get_tag(xs)
327 struct scsipi_xfer *xs;
328 {
329 struct scsipi_periph *periph = xs->xs_periph;
330 int bit, tag;
331 u_int word;
332
333 for (word = 0; word < PERIPH_NTAGWORDS; word++) {
334 bit = ffs(periph->periph_freetags[word]);
335 if (bit != 0)
336 break;
337 }
338 #ifdef DIAGNOSTIC
339 if (word == PERIPH_NTAGWORDS) {
340 scsipi_printaddr(periph);
341 printf("no free tags\n");
342 panic("scsipi_get_tag");
343 }
344 #endif
345
346 bit -= 1;
347 periph->periph_freetags[word] &= ~(1 << bit);
348 tag = (word << 5) | bit;
349
350 /* XXX Should eventually disallow this completely. */
351 if (tag >= periph->periph_openings) {
352 scsipi_printaddr(periph);
353 printf("WARNING: tag %d greater than available openings %d\n",
354 tag, periph->periph_openings);
355 }
356
357 xs->xs_tag_id = tag;
358 }
359
360 /*
361 * scsipi_put_tag:
362 *
363 * Put the tag ID for the specified xfer back into the pool.
364 *
365 * NOTE: Must be called at splbio().
366 */
367 void
368 scsipi_put_tag(xs)
369 struct scsipi_xfer *xs;
370 {
371 struct scsipi_periph *periph = xs->xs_periph;
372 int word, bit;
373
374 word = xs->xs_tag_id >> 5;
375 bit = xs->xs_tag_id & 0x1f;
376
377 periph->periph_freetags[word] |= (1 << bit);
378 }
379
380 /*
381 * scsipi_get_xs:
382 *
383 * Allocate an xfer descriptor and associate it with the
384 * specified peripherial. If the peripherial has no more
385 * available command openings, we either block waiting for
386 * one to become available, or fail.
387 */
388 struct scsipi_xfer *
389 scsipi_get_xs(periph, flags)
390 struct scsipi_periph *periph;
391 int flags;
392 {
393 struct scsipi_xfer *xs;
394 int s;
395
396 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n"));
397
398 /*
399 * If we're cold, make sure we poll.
400 */
401 if (cold)
402 flags |= XS_CTL_NOSLEEP | XS_CTL_POLL;
403
404 #ifdef DIAGNOSTIC
405 /*
406 * URGENT commands can never be ASYNC.
407 */
408 if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
409 (XS_CTL_URGENT|XS_CTL_ASYNC)) {
410 scsipi_printaddr(periph);
411 printf("URGENT and ASYNC\n");
412 panic("scsipi_get_xs");
413 }
414 #endif
415
416 s = splbio();
417 /*
418 * Wait for a command opening to become available. Rules:
419 *
420 * - All xfers must wait for an available opening.
421 * Exception: URGENT xfers can proceed when
422 * active == openings, because we use the opening
423 * of the command we're recovering for.
424 * - if the periph has sense pending, only URGENT & REQSENSE
425 * xfers may proceed.
426 *
427 * - If the periph is recovering, only URGENT xfers may
428 * proceed.
429 *
430 * - If the periph is currently executing a recovery
431 * command, URGENT commands must block, because only
432 * one recovery command can execute at a time.
433 */
434 for (;;) {
435 if (flags & XS_CTL_URGENT) {
436 if (periph->periph_active > periph->periph_openings)
437 goto wait_for_opening;
438 if (periph->periph_flags & PERIPH_SENSE) {
439 if ((flags & XS_CTL_REQSENSE) == 0)
440 goto wait_for_opening;
441 } else {
442 if ((periph->periph_flags &
443 PERIPH_RECOVERY_ACTIVE) != 0)
444 goto wait_for_opening;
445 periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
446 }
447 break;
448 }
449 if (periph->periph_active >= periph->periph_openings ||
450 (periph->periph_flags & PERIPH_RECOVERING) != 0)
451 goto wait_for_opening;
452 periph->periph_active++;
453 break;
454
455 wait_for_opening:
456 if (flags & XS_CTL_NOSLEEP) {
457 splx(s);
458 return (NULL);
459 }
460 SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n"));
461 periph->periph_flags |= PERIPH_WAITING;
462 (void) tsleep(periph, PRIBIO, "getxs", 0);
463 }
464 SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n"));
465 xs = pool_get(&scsipi_xfer_pool,
466 ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
467 if (xs == NULL) {
468 if (flags & XS_CTL_URGENT) {
469 if ((flags & XS_CTL_REQSENSE) == 0)
470 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
471 } else
472 periph->periph_active--;
473 scsipi_printaddr(periph);
474 printf("unable to allocate %sscsipi_xfer\n",
475 (flags & XS_CTL_URGENT) ? "URGENT " : "");
476 }
477 splx(s);
478
479 SC_DEBUG(periph, SCSIPI_DB3, ("returning\n"));
480
481 if (xs != NULL) {
482 callout_init(&xs->xs_callout);
483 memset(xs, 0, sizeof(*xs));
484 xs->xs_periph = periph;
485 xs->xs_control = flags;
486 xs->xs_status = 0;
487 s = splbio();
488 TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
489 splx(s);
490 }
491 return (xs);
492 }
493
494 /*
495 * scsipi_put_xs:
496 *
497 * Release an xfer descriptor, decreasing the outstanding command
498 * count for the peripherial. If there is a thread waiting for
499 * an opening, wake it up. If not, kick any queued I/O the
500 * peripherial may have.
501 *
502 * NOTE: Must be called at splbio().
503 */
504 void
505 scsipi_put_xs(xs)
506 struct scsipi_xfer *xs;
507 {
508 struct scsipi_periph *periph = xs->xs_periph;
509 int flags = xs->xs_control;
510
511 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n"));
512
513 TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
514 pool_put(&scsipi_xfer_pool, xs);
515
516 #ifdef DIAGNOSTIC
517 if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
518 periph->periph_active == 0) {
519 scsipi_printaddr(periph);
520 printf("recovery without a command to recovery for\n");
521 panic("scsipi_put_xs");
522 }
523 #endif
524
525 if (flags & XS_CTL_URGENT) {
526 if ((flags & XS_CTL_REQSENSE) == 0)
527 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
528 } else
529 periph->periph_active--;
530 if (periph->periph_active == 0 &&
531 (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
532 periph->periph_flags &= ~PERIPH_WAITDRAIN;
533 wakeup(&periph->periph_active);
534 }
535
536 if (periph->periph_flags & PERIPH_WAITING) {
537 periph->periph_flags &= ~PERIPH_WAITING;
538 wakeup(periph);
539 } else {
540 if (periph->periph_switch->psw_start != NULL) {
541 SC_DEBUG(periph, SCSIPI_DB2,
542 ("calling private start()\n"));
543 (*periph->periph_switch->psw_start)(periph);
544 }
545 }
546 }
547
548 /*
549 * scsipi_channel_freeze:
550 *
551 * Freeze a channel's xfer queue.
552 */
553 void
554 scsipi_channel_freeze(chan, count)
555 struct scsipi_channel *chan;
556 int count;
557 {
558 int s;
559
560 s = splbio();
561 chan->chan_qfreeze += count;
562 splx(s);
563 }
564
565 /*
566 * scsipi_channel_thaw:
567 *
568 * Thaw a channel's xfer queue.
569 */
570 void
571 scsipi_channel_thaw(chan, count)
572 struct scsipi_channel *chan;
573 int count;
574 {
575 int s;
576
577 s = splbio();
578 chan->chan_qfreeze -= count;
579 /*
580 * Don't let the freeze count go negative.
581 *
582 * Presumably the adapter driver could keep track of this,
583 * but it might just be easier to do this here so as to allow
584 * multiple callers, including those outside the adapter driver.
585 */
586 if (chan->chan_qfreeze < 0) {
587 chan->chan_qfreeze = 0;
588 }
589 splx(s);
590 /*
591 * Kick the channel's queue here. Note, we may be running in
592 * interrupt context (softclock or HBA's interrupt), so the adapter
593 * driver had better not sleep.
594 */
595 if (chan->chan_qfreeze == 0)
596 scsipi_run_queue(chan);
597 }
598
599 /*
600 * scsipi_channel_timed_thaw:
601 *
602 * Thaw a channel after some time has expired. This will also
603 * run the channel's queue if the freeze count has reached 0.
604 */
605 void
606 scsipi_channel_timed_thaw(arg)
607 void *arg;
608 {
609 struct scsipi_channel *chan = arg;
610
611 scsipi_channel_thaw(chan, 1);
612 }
613
614 /*
615 * scsipi_periph_freeze:
616 *
617 * Freeze a device's xfer queue.
618 */
619 void
620 scsipi_periph_freeze(periph, count)
621 struct scsipi_periph *periph;
622 int count;
623 {
624 int s;
625
626 s = splbio();
627 periph->periph_qfreeze += count;
628 splx(s);
629 }
630
631 /*
632 * scsipi_periph_thaw:
633 *
634 * Thaw a device's xfer queue.
635 */
636 void
637 scsipi_periph_thaw(periph, count)
638 struct scsipi_periph *periph;
639 int count;
640 {
641 int s;
642
643 s = splbio();
644 periph->periph_qfreeze -= count;
645 #ifdef DIAGNOSTIC
646 if (periph->periph_qfreeze < 0) {
647 static const char pc[] = "periph freeze count < 0";
648 scsipi_printaddr(periph);
649 printf("%s\n", pc);
650 panic(pc);
651 }
652 #endif
653 if (periph->periph_qfreeze == 0 &&
654 (periph->periph_flags & PERIPH_WAITING) != 0)
655 wakeup(periph);
656 splx(s);
657 }
658
659 /*
660 * scsipi_periph_timed_thaw:
661 *
662 * Thaw a device after some time has expired.
663 */
664 void
665 scsipi_periph_timed_thaw(arg)
666 void *arg;
667 {
668 int s;
669 struct scsipi_periph *periph = arg;
670
671 callout_stop(&periph->periph_callout);
672
673 s = splbio();
674 scsipi_periph_thaw(periph, 1);
675 if ((periph->periph_channel->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
676 /*
677 * Kick the channel's queue here. Note, we're running in
678 * interrupt context (softclock), so the adapter driver
679 * had better not sleep.
680 */
681 scsipi_run_queue(periph->periph_channel);
682 } else {
683 /*
684 * Tell the completion thread to kick the channel's queue here.
685 */
686 periph->periph_channel->chan_tflags |= SCSIPI_CHANT_KICK;
687 wakeup(&periph->periph_channel->chan_complete);
688 }
689 splx(s);
690 }
691
692 /*
693 * scsipi_wait_drain:
694 *
695 * Wait for a periph's pending xfers to drain.
696 */
697 void
698 scsipi_wait_drain(periph)
699 struct scsipi_periph *periph;
700 {
701 int s;
702
703 s = splbio();
704 while (periph->periph_active != 0) {
705 periph->periph_flags |= PERIPH_WAITDRAIN;
706 (void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0);
707 }
708 splx(s);
709 }
710
711 /*
712 * scsipi_kill_pending:
713 *
714 * Kill off all pending xfers for a periph.
715 *
716 * NOTE: Must be called at splbio().
717 */
718 void
719 scsipi_kill_pending(periph)
720 struct scsipi_periph *periph;
721 {
722
723 (*periph->periph_channel->chan_bustype->bustype_kill_pending)(periph);
724 #ifdef DIAGNOSTIC
725 if (TAILQ_FIRST(&periph->periph_xferq) != NULL)
726 panic("scsipi_kill_pending");
727 #endif
728 scsipi_wait_drain(periph);
729 }
730
731 /*
732 * scsipi_interpret_sense:
733 *
734 * Look at the returned sense and act on the error, determining
735 * the unix error number to pass back. (0 = report no error)
736 *
737 * NOTE: If we return ERESTART, we are expected to haved
738 * thawed the device!
739 *
740 * THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
741 */
742 int
743 scsipi_interpret_sense(xs)
744 struct scsipi_xfer *xs;
745 {
746 struct scsipi_sense_data *sense;
747 struct scsipi_periph *periph = xs->xs_periph;
748 u_int8_t key;
749 int error;
750 #ifndef SCSIVERBOSE
751 u_int32_t info;
752 static char *error_mes[] = {
753 "soft error (corrected)",
754 "not ready", "medium error",
755 "non-media hardware failure", "illegal request",
756 "unit attention", "readonly device",
757 "no data found", "vendor unique",
758 "copy aborted", "command aborted",
759 "search returned equal", "volume overflow",
760 "verify miscompare", "unknown error key"
761 };
762 #endif
763
764 sense = &xs->sense.scsi_sense;
765 #ifdef SCSIPI_DEBUG
766 if (periph->periph_flags & SCSIPI_DB1) {
767 int count;
768 scsipi_printaddr(periph);
769 printf(" sense debug information:\n");
770 printf("\tcode 0x%x valid 0x%x\n",
771 sense->error_code & SSD_ERRCODE,
772 sense->error_code & SSD_ERRCODE_VALID ? 1 : 0);
773 printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
774 sense->segment,
775 sense->flags & SSD_KEY,
776 sense->flags & SSD_ILI ? 1 : 0,
777 sense->flags & SSD_EOM ? 1 : 0,
778 sense->flags & SSD_FILEMARK ? 1 : 0);
779 printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d "
780 "extra bytes\n",
781 sense->info[0],
782 sense->info[1],
783 sense->info[2],
784 sense->info[3],
785 sense->extra_len);
786 printf("\textra: ");
787 for (count = 0; count < ADD_BYTES_LIM(sense); count++)
788 printf("0x%x ", sense->cmd_spec_info[count]);
789 printf("\n");
790 }
791 #endif
792
793 /*
794 * If the periph has it's own error handler, call it first.
795 * If it returns a legit error value, return that, otherwise
796 * it wants us to continue with normal error processing.
797 */
798 if (periph->periph_switch->psw_error != NULL) {
799 SC_DEBUG(periph, SCSIPI_DB2,
800 ("calling private err_handler()\n"));
801 error = (*periph->periph_switch->psw_error)(xs);
802 if (error != EJUSTRETURN)
803 return (error);
804 }
805 /* otherwise use the default */
806 switch (sense->error_code & SSD_ERRCODE) {
807
808 /*
809 * Old SCSI-1 and SASI devices respond with
810 * codes other than 70.
811 */
812 case 0x00: /* no error (command completed OK) */
813 return (0);
814 case 0x04: /* drive not ready after it was selected */
815 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
816 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
817 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
818 return (0);
819 /* XXX - display some sort of error here? */
820 return (EIO);
821 case 0x20: /* invalid command */
822 if ((xs->xs_control &
823 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
824 return (0);
825 return (EINVAL);
826 case 0x25: /* invalid LUN (Adaptec ACB-4000) */
827 return (EACCES);
828
829 /*
830 * If it's code 70, use the extended stuff and
831 * interpret the key
832 */
833 case 0x71: /* delayed error */
834 scsipi_printaddr(periph);
835 key = sense->flags & SSD_KEY;
836 printf(" DEFERRED ERROR, key = 0x%x\n", key);
837 /* FALLTHROUGH */
838 case 0x70:
839 #ifndef SCSIVERBOSE
840 if ((sense->error_code & SSD_ERRCODE_VALID) != 0)
841 info = _4btol(sense->info);
842 else
843 info = 0;
844 #endif
845 key = sense->flags & SSD_KEY;
846
847 switch (key) {
848 case SKEY_NO_SENSE:
849 case SKEY_RECOVERED_ERROR:
850 if (xs->resid == xs->datalen && xs->datalen) {
851 /*
852 * Why is this here?
853 */
854 xs->resid = 0; /* not short read */
855 }
856 case SKEY_EQUAL:
857 error = 0;
858 break;
859 case SKEY_NOT_READY:
860 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
861 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
862 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
863 return (0);
864 if (sense->add_sense_code == 0x3A) {
865 error = ENODEV; /* Medium not present */
866 if (xs->xs_control & XS_CTL_SILENT_NODEV)
867 return (error);
868 } else
869 error = EIO;
870 if ((xs->xs_control & XS_CTL_SILENT) != 0)
871 return (error);
872 break;
873 case SKEY_ILLEGAL_REQUEST:
874 if ((xs->xs_control &
875 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
876 return (0);
877 /*
878 * Handle the case where a device reports
879 * Logical Unit Not Supported during discovery.
880 */
881 if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
882 sense->add_sense_code == 0x25 &&
883 sense->add_sense_code_qual == 0x00)
884 return (EINVAL);
885 if ((xs->xs_control & XS_CTL_SILENT) != 0)
886 return (EIO);
887 error = EINVAL;
888 break;
889 case SKEY_UNIT_ATTENTION:
890 if (sense->add_sense_code == 0x29 &&
891 sense->add_sense_code_qual == 0x00) {
892 /* device or bus reset */
893 return (ERESTART);
894 }
895 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
896 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
897 if ((xs->xs_control &
898 XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
899 /* XXX Should reupload any transient state. */
900 (periph->periph_flags &
901 PERIPH_REMOVABLE) == 0) {
902 return (ERESTART);
903 }
904 if ((xs->xs_control & XS_CTL_SILENT) != 0)
905 return (EIO);
906 error = EIO;
907 break;
908 case SKEY_WRITE_PROTECT:
909 error = EROFS;
910 break;
911 case SKEY_BLANK_CHECK:
912 error = 0;
913 break;
914 case SKEY_ABORTED_COMMAND:
915 error = ERESTART;
916 break;
917 case SKEY_VOLUME_OVERFLOW:
918 error = ENOSPC;
919 break;
920 default:
921 error = EIO;
922 break;
923 }
924
925 #ifdef SCSIVERBOSE
926 if (key && (xs->xs_control & XS_CTL_SILENT) == 0)
927 scsipi_print_sense(xs, 0);
928 #else
929 if (key) {
930 scsipi_printaddr(periph);
931 printf("%s", error_mes[key - 1]);
932 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
933 switch (key) {
934 case SKEY_NOT_READY:
935 case SKEY_ILLEGAL_REQUEST:
936 case SKEY_UNIT_ATTENTION:
937 case SKEY_WRITE_PROTECT:
938 break;
939 case SKEY_BLANK_CHECK:
940 printf(", requested size: %d (decimal)",
941 info);
942 break;
943 case SKEY_ABORTED_COMMAND:
944 if (xs->xs_retries)
945 printf(", retrying");
946 printf(", cmd 0x%x, info 0x%x",
947 xs->cmd->opcode, info);
948 break;
949 default:
950 printf(", info = %d (decimal)", info);
951 }
952 }
953 if (sense->extra_len != 0) {
954 int n;
955 printf(", data =");
956 for (n = 0; n < sense->extra_len; n++)
957 printf(" %02x",
958 sense->cmd_spec_info[n]);
959 }
960 printf("\n");
961 }
962 #endif
963 return (error);
964
965 /*
966 * Some other code, just report it
967 */
968 default:
969 #if defined(SCSIDEBUG) || defined(DEBUG)
970 {
971 static char *uc = "undecodable sense error";
972 int i;
973 u_int8_t *cptr = (u_int8_t *) sense;
974 scsipi_printaddr(periph);
975 if (xs->cmd == &xs->cmdstore) {
976 printf("%s for opcode 0x%x, data=",
977 uc, xs->cmdstore.opcode);
978 } else {
979 printf("%s, data=", uc);
980 }
981 for (i = 0; i < sizeof (sense); i++)
982 printf(" 0x%02x", *(cptr++) & 0xff);
983 printf("\n");
984 }
985 #else
986 scsipi_printaddr(periph);
987 printf("Sense Error Code 0x%x",
988 sense->error_code & SSD_ERRCODE);
989 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
990 struct scsipi_sense_data_unextended *usense =
991 (struct scsipi_sense_data_unextended *)sense;
992 printf(" at block no. %d (decimal)",
993 _3btol(usense->block));
994 }
995 printf("\n");
996 #endif
997 return (EIO);
998 }
999 }
1000
1001 /*
1002 * scsipi_size:
1003 *
1004 * Find out from the device what its capacity is.
1005 */
1006 u_int64_t
1007 scsipi_size(periph, flags)
1008 struct scsipi_periph *periph;
1009 int flags;
1010 {
1011 struct scsipi_read_cap_data rdcap;
1012 struct scsipi_read_capacity scsipi_cmd;
1013
1014 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1015 scsipi_cmd.opcode = READ_CAPACITY;
1016
1017 /*
1018 * If the command works, interpret the result as a 4 byte
1019 * number of blocks
1020 */
1021 if (scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1022 sizeof(scsipi_cmd), (u_char *)&rdcap, sizeof(rdcap),
1023 SCSIPIRETRIES, 20000, NULL,
1024 flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK | XS_CTL_SILENT) != 0)
1025 return (0);
1026
1027 return (_4btol(rdcap.addr) + 1);
1028 }
1029
1030 /*
1031 * scsipi_test_unit_ready:
1032 *
1033 * Issue a `test unit ready' request.
1034 */
1035 int
1036 scsipi_test_unit_ready(periph, flags)
1037 struct scsipi_periph *periph;
1038 int flags;
1039 {
1040 int retries;
1041 struct scsipi_test_unit_ready scsipi_cmd;
1042
1043 /* some ATAPI drives don't support TEST_UNIT_READY. Sigh */
1044 if (periph->periph_quirks & PQUIRK_NOTUR)
1045 return (0);
1046
1047 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1048 scsipi_cmd.opcode = TEST_UNIT_READY;
1049
1050 if (flags & XS_CTL_DISCOVERY)
1051 retries = 0;
1052 else
1053 retries = SCSIPIRETRIES;
1054
1055 return (scsipi_command(periph,
1056 (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd),
1057 0, 0, retries, 10000, NULL, flags));
1058 }
1059
1060 /*
1061 * scsipi_inquire:
1062 *
1063 * Ask the device about itself.
1064 */
1065 int
1066 scsipi_inquire(periph, inqbuf, flags)
1067 struct scsipi_periph *periph;
1068 struct scsipi_inquiry_data *inqbuf;
1069 int flags;
1070 {
1071 int retries;
1072 struct scsipi_inquiry scsipi_cmd;
1073 int error;
1074
1075 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1076 scsipi_cmd.opcode = INQUIRY;
1077
1078 if (flags & XS_CTL_DISCOVERY)
1079 retries = 0;
1080 else
1081 retries = SCSIPIRETRIES;
1082
1083 scsipi_cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI2;
1084 error = scsipi_command(periph,
1085 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1086 (u_char *) inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI2,
1087 retries, 10000, NULL, XS_CTL_DATA_IN | flags);
1088 if (!error && inqbuf->additional_length > SCSIPI_INQUIRY_LENGTH_SCSI2 - 4) {
1089 scsipi_cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI3;
1090 error = scsipi_command(periph,
1091 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1092 (u_char *) inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI3,
1093 retries, 10000, NULL, XS_CTL_DATA_IN | flags);
1094 }
1095
1096 #ifdef SCSI_OLD_NOINQUIRY
1097 /*
1098 * Kludge for the Adaptec ACB-4000 SCSI->MFM translator.
1099 * This board doesn't support the INQUIRY command at all.
1100 */
1101 if (error == EINVAL || error == EACCES) {
1102 /*
1103 * Conjure up an INQUIRY response.
1104 */
1105 inqbuf->device = (error == EINVAL ?
1106 SID_QUAL_LU_PRESENT :
1107 SID_QUAL_LU_NOTPRESENT) | T_DIRECT;
1108 inqbuf->dev_qual2 = 0;
1109 inqbuf->version = 0;
1110 inqbuf->response_format = SID_FORMAT_SCSI1;
1111 inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
1112 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1113 memcpy(inqbuf->vendor, "ADAPTEC ACB-4000 ", 28);
1114 error = 0;
1115 }
1116
1117 /*
1118 * Kludge for the Emulex MT-02 SCSI->QIC translator.
1119 * This board gives an empty response to an INQUIRY command.
1120 */
1121 else if (error == 0 &&
1122 inqbuf->device == (SID_QUAL_LU_PRESENT | T_DIRECT) &&
1123 inqbuf->dev_qual2 == 0 &&
1124 inqbuf->version == 0 &&
1125 inqbuf->response_format == SID_FORMAT_SCSI1) {
1126 /*
1127 * Fill out the INQUIRY response.
1128 */
1129 inqbuf->device = (SID_QUAL_LU_PRESENT | T_SEQUENTIAL);
1130 inqbuf->dev_qual2 = SID_REMOVABLE;
1131 inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
1132 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1133 memcpy(inqbuf->vendor, "EMULEX MT-02 QIC ", 28);
1134 }
1135 #endif /* SCSI_OLD_NOINQUIRY */
1136
1137 return error;
1138 }
1139
1140 /*
1141 * scsipi_prevent:
1142 *
1143 * Prevent or allow the user to remove the media
1144 */
1145 int
1146 scsipi_prevent(periph, type, flags)
1147 struct scsipi_periph *periph;
1148 int type, flags;
1149 {
1150 struct scsipi_prevent scsipi_cmd;
1151
1152 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1153 scsipi_cmd.opcode = PREVENT_ALLOW;
1154 scsipi_cmd.how = type;
1155
1156 return (scsipi_command(periph,
1157 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1158 0, 0, SCSIPIRETRIES, 5000, NULL, flags));
1159 }
1160
1161 /*
1162 * scsipi_start:
1163 *
1164 * Send a START UNIT.
1165 */
1166 int
1167 scsipi_start(periph, type, flags)
1168 struct scsipi_periph *periph;
1169 int type, flags;
1170 {
1171 struct scsipi_start_stop scsipi_cmd;
1172
1173 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1174 scsipi_cmd.opcode = START_STOP;
1175 scsipi_cmd.byte2 = 0x00;
1176 scsipi_cmd.how = type;
1177
1178 return (scsipi_command(periph,
1179 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1180 0, 0, SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000,
1181 NULL, flags));
1182 }
1183
1184 /*
1185 * scsipi_mode_sense, scsipi_mode_sense_big:
1186 * get a sense page from a device
1187 */
1188
1189 int
1190 scsipi_mode_sense(periph, byte2, page, data, len, flags, retries, timeout)
1191 struct scsipi_periph *periph;
1192 int byte2, page, len, flags, retries, timeout;
1193 struct scsipi_mode_header *data;
1194 {
1195 struct scsipi_mode_sense scsipi_cmd;
1196 int error;
1197
1198 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1199 scsipi_cmd.opcode = MODE_SENSE;
1200 scsipi_cmd.byte2 = byte2;
1201 scsipi_cmd.page = page;
1202 scsipi_cmd.length = len & 0xff;
1203 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1204 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1205 flags | XS_CTL_DATA_IN);
1206 SC_DEBUG(periph, SCSIPI_DB2,
1207 ("scsipi_mode_sense: error=%d\n", error));
1208 return (error);
1209 }
1210
1211 int
1212 scsipi_mode_sense_big(periph, byte2, page, data, len, flags, retries, timeout)
1213 struct scsipi_periph *periph;
1214 int byte2, page, len, flags, retries, timeout;
1215 struct scsipi_mode_header_big *data;
1216 {
1217 struct scsipi_mode_sense_big scsipi_cmd;
1218 int error;
1219
1220 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1221 scsipi_cmd.opcode = MODE_SENSE_BIG;
1222 scsipi_cmd.byte2 = byte2;
1223 scsipi_cmd.page = page;
1224 _lto2b(len, scsipi_cmd.length);
1225 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1226 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1227 flags | XS_CTL_DATA_IN);
1228 SC_DEBUG(periph, SCSIPI_DB2,
1229 ("scsipi_mode_sense_big: error=%d\n", error));
1230 return (error);
1231 }
1232
1233 int
1234 scsipi_mode_select(periph, byte2, data, len, flags, retries, timeout)
1235 struct scsipi_periph *periph;
1236 int byte2, len, flags, retries, timeout;
1237 struct scsipi_mode_header *data;
1238 {
1239 struct scsipi_mode_select scsipi_cmd;
1240 int error;
1241
1242 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1243 scsipi_cmd.opcode = MODE_SELECT;
1244 scsipi_cmd.byte2 = byte2;
1245 scsipi_cmd.length = len & 0xff;
1246 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1247 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1248 flags | XS_CTL_DATA_OUT);
1249 SC_DEBUG(periph, SCSIPI_DB2,
1250 ("scsipi_mode_select: error=%d\n", error));
1251 return (error);
1252 }
1253
1254 int
1255 scsipi_mode_select_big(periph, byte2, data, len, flags, retries, timeout)
1256 struct scsipi_periph *periph;
1257 int byte2, len, flags, retries, timeout;
1258 struct scsipi_mode_header_big *data;
1259 {
1260 struct scsipi_mode_select_big scsipi_cmd;
1261 int error;
1262
1263 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1264 scsipi_cmd.opcode = MODE_SELECT_BIG;
1265 scsipi_cmd.byte2 = byte2;
1266 _lto2b(len, scsipi_cmd.length);
1267 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1268 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1269 flags | XS_CTL_DATA_OUT);
1270 SC_DEBUG(periph, SCSIPI_DB2,
1271 ("scsipi_mode_select: error=%d\n", error));
1272 return (error);
1273 }
1274
1275 /*
1276 * scsipi_done:
1277 *
1278 * This routine is called by an adapter's interrupt handler when
1279 * an xfer is completed.
1280 */
1281 void
1282 scsipi_done(xs)
1283 struct scsipi_xfer *xs;
1284 {
1285 struct scsipi_periph *periph = xs->xs_periph;
1286 struct scsipi_channel *chan = periph->periph_channel;
1287 int s, freezecnt;
1288
1289 SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n"));
1290 #ifdef SCSIPI_DEBUG
1291 if (periph->periph_dbflags & SCSIPI_DB1)
1292 show_scsipi_cmd(xs);
1293 #endif
1294
1295 s = splbio();
1296 /*
1297 * The resource this command was using is now free.
1298 */
1299 scsipi_put_resource(chan);
1300 xs->xs_periph->periph_sent--;
1301
1302 /*
1303 * If the command was tagged, free the tag.
1304 */
1305 if (XS_CTL_TAGTYPE(xs) != 0)
1306 scsipi_put_tag(xs);
1307 else
1308 periph->periph_flags &= ~PERIPH_UNTAG;
1309
1310 /* Mark the command as `done'. */
1311 xs->xs_status |= XS_STS_DONE;
1312
1313 #ifdef DIAGNOSTIC
1314 if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
1315 (XS_CTL_ASYNC|XS_CTL_POLL))
1316 panic("scsipi_done: ASYNC and POLL");
1317 #endif
1318
1319 /*
1320 * If the xfer had an error of any sort, freeze the
1321 * periph's queue. Freeze it again if we were requested
1322 * to do so in the xfer.
1323 */
1324 freezecnt = 0;
1325 if (xs->error != XS_NOERROR)
1326 freezecnt++;
1327 if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
1328 freezecnt++;
1329 if (freezecnt != 0)
1330 scsipi_periph_freeze(periph, freezecnt);
1331
1332 /*
1333 * record the xfer with a pending sense, in case a SCSI reset is
1334 * received before the thread is waked up.
1335 */
1336 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1337 periph->periph_flags |= PERIPH_SENSE;
1338 periph->periph_xscheck = xs;
1339 }
1340
1341 /*
1342 * If this was an xfer that was not to complete asynchronously,
1343 * let the requesting thread perform error checking/handling
1344 * in its context.
1345 */
1346 if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
1347 splx(s);
1348 /*
1349 * If it's a polling job, just return, to unwind the
1350 * call graph. We don't need to restart the queue,
1351 * because pollings jobs are treated specially, and
1352 * are really only used during crash dumps anyway
1353 * (XXX or during boot-time autconfiguration of
1354 * ATAPI devices).
1355 */
1356 if (xs->xs_control & XS_CTL_POLL)
1357 return;
1358 wakeup(xs);
1359 goto out;
1360 }
1361
1362 /*
1363 * Catch the extremely common case of I/O completing
1364 * without error; no use in taking a context switch
1365 * if we can handle it in interrupt context.
1366 */
1367 if (xs->error == XS_NOERROR) {
1368 splx(s);
1369 (void) scsipi_complete(xs);
1370 goto out;
1371 }
1372
1373 /*
1374 * There is an error on this xfer. Put it on the channel's
1375 * completion queue, and wake up the completion thread.
1376 */
1377 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
1378 splx(s);
1379 wakeup(&chan->chan_complete);
1380
1381 out:
1382 /*
1383 * If there are more xfers on the channel's queue, attempt to
1384 * run them.
1385 */
1386 scsipi_run_queue(chan);
1387 }
1388
1389 /*
1390 * scsipi_complete:
1391 *
1392 * Completion of a scsipi_xfer. This is the guts of scsipi_done().
1393 *
1394 * NOTE: This routine MUST be called with valid thread context
1395 * except for the case where the following two conditions are
1396 * true:
1397 *
1398 * xs->error == XS_NOERROR
1399 * XS_CTL_ASYNC is set in xs->xs_control
1400 *
1401 * The semantics of this routine can be tricky, so here is an
1402 * explanation:
1403 *
1404 * 0 Xfer completed successfully.
1405 *
1406 * ERESTART Xfer had an error, but was restarted.
1407 *
1408 * anything else Xfer had an error, return value is Unix
1409 * errno.
1410 *
1411 * If the return value is anything but ERESTART:
1412 *
1413 * - If XS_CTL_ASYNC is set, `xs' has been freed back to
1414 * the pool.
1415 * - If there is a buf associated with the xfer,
1416 * it has been biodone()'d.
1417 */
1418 int
1419 scsipi_complete(xs)
1420 struct scsipi_xfer *xs;
1421 {
1422 struct scsipi_periph *periph = xs->xs_periph;
1423 struct scsipi_channel *chan = periph->periph_channel;
1424 struct buf *bp;
1425 int error, s;
1426
1427 #ifdef DIAGNOSTIC
1428 if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
1429 panic("scsipi_complete: XS_CTL_ASYNC but no buf");
1430 #endif
1431 /*
1432 * If command terminated with a CHECK CONDITION, we need to issue a
1433 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed
1434 * we'll have the real status.
1435 * Must be processed at splbio() to avoid missing a SCSI bus reset
1436 * for this command.
1437 */
1438 s = splbio();
1439 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1440 /* request sense for a request sense ? */
1441 if (xs->xs_control & XS_CTL_REQSENSE) {
1442 scsipi_printaddr(periph);
1443 printf("request sense for a request sense ?\n");
1444 /* XXX maybe we should reset the device ? */
1445 /* we've been frozen because xs->error != XS_NOERROR */
1446 scsipi_periph_thaw(periph, 1);
1447 splx(s);
1448 if (xs->resid < xs->datalen) {
1449 printf("we read %d bytes of sense anyway:\n",
1450 xs->datalen - xs->resid);
1451 #ifdef SCSIVERBOSE
1452 scsipi_print_sense_data((void *)xs->data, 0);
1453 #endif
1454 }
1455 return EINVAL;
1456 }
1457 scsipi_request_sense(xs);
1458 }
1459 splx(s);
1460
1461 /*
1462 * If it's a user level request, bypass all usual completion
1463 * processing, let the user work it out..
1464 */
1465 if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
1466 SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n"));
1467 if (xs->error != XS_NOERROR)
1468 scsipi_periph_thaw(periph, 1);
1469 scsipi_user_done(xs);
1470 SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n "));
1471 return 0;
1472 }
1473
1474 switch (xs->error) {
1475 case XS_NOERROR:
1476 error = 0;
1477 break;
1478
1479 case XS_SENSE:
1480 case XS_SHORTSENSE:
1481 error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
1482 break;
1483
1484 case XS_RESOURCE_SHORTAGE:
1485 /*
1486 * XXX Should freeze channel's queue.
1487 */
1488 scsipi_printaddr(periph);
1489 printf("adapter resource shortage\n");
1490 /* FALLTHROUGH */
1491
1492 case XS_BUSY:
1493 if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
1494 struct scsipi_max_openings mo;
1495
1496 /*
1497 * We set the openings to active - 1, assuming that
1498 * the command that got us here is the first one that
1499 * can't fit into the device's queue. If that's not
1500 * the case, I guess we'll find out soon enough.
1501 */
1502 mo.mo_target = periph->periph_target;
1503 mo.mo_lun = periph->periph_lun;
1504 if (periph->periph_active < periph->periph_openings)
1505 mo.mo_openings = periph->periph_active - 1;
1506 else
1507 mo.mo_openings = periph->periph_openings - 1;
1508 #ifdef DIAGNOSTIC
1509 if (mo.mo_openings < 0) {
1510 scsipi_printaddr(periph);
1511 printf("QUEUE FULL resulted in < 0 openings\n");
1512 panic("scsipi_done");
1513 }
1514 #endif
1515 if (mo.mo_openings == 0) {
1516 scsipi_printaddr(periph);
1517 printf("QUEUE FULL resulted in 0 openings\n");
1518 mo.mo_openings = 1;
1519 }
1520 scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
1521 error = ERESTART;
1522 } else if (xs->xs_retries != 0) {
1523 xs->xs_retries--;
1524 /*
1525 * Wait one second, and try again.
1526 */
1527 if ((xs->xs_control & XS_CTL_POLL) ||
1528 (chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
1529 delay(1000000);
1530 } else if (!callout_pending(&periph->periph_callout)) {
1531 scsipi_periph_freeze(periph, 1);
1532 callout_reset(&periph->periph_callout,
1533 hz, scsipi_periph_timed_thaw, periph);
1534 }
1535 error = ERESTART;
1536 } else
1537 error = EBUSY;
1538 break;
1539
1540 case XS_REQUEUE:
1541 error = ERESTART;
1542 break;
1543
1544 case XS_SELTIMEOUT:
1545 case XS_TIMEOUT:
1546 /*
1547 * If the device hasn't gone away, honor retry counts.
1548 *
1549 * Note that if we're in the middle of probing it,
1550 * it won't be found because it isn't here yet so
1551 * we won't honor the retry count in that case.
1552 */
1553 if (scsipi_lookup_periph(chan, periph->periph_target,
1554 periph->periph_lun) && xs->xs_retries != 0) {
1555 xs->xs_retries--;
1556 error = ERESTART;
1557 } else
1558 error = EIO;
1559 break;
1560
1561 case XS_RESET:
1562 if (xs->xs_control & XS_CTL_REQSENSE) {
1563 /*
1564 * request sense interrupted by reset: signal it
1565 * with EINTR return code.
1566 */
1567 error = EINTR;
1568 } else {
1569 if (xs->xs_retries != 0) {
1570 xs->xs_retries--;
1571 error = ERESTART;
1572 } else
1573 error = EIO;
1574 }
1575 break;
1576
1577 case XS_DRIVER_STUFFUP:
1578 scsipi_printaddr(periph);
1579 printf("generic HBA error\n");
1580 error = EIO;
1581 break;
1582 default:
1583 scsipi_printaddr(periph);
1584 printf("invalid return code from adapter: %d\n", xs->error);
1585 error = EIO;
1586 break;
1587 }
1588
1589 s = splbio();
1590 if (error == ERESTART) {
1591 /*
1592 * If we get here, the periph has been thawed and frozen
1593 * again if we had to issue recovery commands. Alternatively,
1594 * it may have been frozen again and in a timed thaw. In
1595 * any case, we thaw the periph once we re-enqueue the
1596 * command. Once the periph is fully thawed, it will begin
1597 * operation again.
1598 */
1599 xs->error = XS_NOERROR;
1600 xs->status = SCSI_OK;
1601 xs->xs_status &= ~XS_STS_DONE;
1602 xs->xs_requeuecnt++;
1603 error = scsipi_enqueue(xs);
1604 if (error == 0) {
1605 scsipi_periph_thaw(periph, 1);
1606 splx(s);
1607 return (ERESTART);
1608 }
1609 }
1610
1611 /*
1612 * scsipi_done() freezes the queue if not XS_NOERROR.
1613 * Thaw it here.
1614 */
1615 if (xs->error != XS_NOERROR)
1616 scsipi_periph_thaw(periph, 1);
1617
1618 /*
1619 * Set buffer fields in case the periph
1620 * switch done func uses them
1621 */
1622 if ((bp = xs->bp) != NULL) {
1623 if (error) {
1624 bp->b_error = error;
1625 bp->b_flags |= B_ERROR;
1626 bp->b_resid = bp->b_bcount;
1627 } else {
1628 bp->b_error = 0;
1629 bp->b_resid = xs->resid;
1630 }
1631 }
1632
1633 if (periph->periph_switch->psw_done)
1634 periph->periph_switch->psw_done(xs);
1635
1636 if (bp)
1637 biodone(bp);
1638
1639 if (xs->xs_control & XS_CTL_ASYNC)
1640 scsipi_put_xs(xs);
1641 splx(s);
1642
1643 return (error);
1644 }
1645
1646 /*
1647 * Issue a request sense for the given scsipi_xfer. Called when the xfer
1648 * returns with a CHECK_CONDITION status. Must be called in valid thread
1649 * context and at splbio().
1650 */
1651
1652 void
1653 scsipi_request_sense(xs)
1654 struct scsipi_xfer *xs;
1655 {
1656 struct scsipi_periph *periph = xs->xs_periph;
1657 int flags, error;
1658 struct scsipi_sense cmd;
1659
1660 periph->periph_flags |= PERIPH_SENSE;
1661
1662 /* if command was polling, request sense will too */
1663 flags = xs->xs_control & XS_CTL_POLL;
1664 /* Polling commands can't sleep */
1665 if (flags)
1666 flags |= XS_CTL_NOSLEEP;
1667
1668 flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN |
1669 XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH;
1670
1671 memset(&cmd, 0, sizeof(cmd));
1672 cmd.opcode = REQUEST_SENSE;
1673 cmd.length = sizeof(struct scsipi_sense_data);
1674
1675 error = scsipi_command(periph,
1676 (struct scsipi_generic *) &cmd, sizeof(cmd),
1677 (u_char*)&xs->sense.scsi_sense, sizeof(struct scsipi_sense_data),
1678 0, 1000, NULL, flags);
1679 periph->periph_flags &= ~PERIPH_SENSE;
1680 periph->periph_xscheck = NULL;
1681 switch(error) {
1682 case 0:
1683 /* we have a valid sense */
1684 xs->error = XS_SENSE;
1685 return;
1686 case EINTR:
1687 /* REQUEST_SENSE interrupted by bus reset. */
1688 xs->error = XS_RESET;
1689 return;
1690 case EIO:
1691 /* request sense coudn't be performed */
1692 /*
1693 * XXX this isn't quite right but we don't have anything
1694 * better for now
1695 */
1696 xs->error = XS_DRIVER_STUFFUP;
1697 return;
1698 default:
1699 /* Notify that request sense failed. */
1700 xs->error = XS_DRIVER_STUFFUP;
1701 scsipi_printaddr(periph);
1702 printf("request sense failed with error %d\n", error);
1703 return;
1704 }
1705 }
1706
1707 /*
1708 * scsipi_enqueue:
1709 *
1710 * Enqueue an xfer on a channel.
1711 */
1712 int
1713 scsipi_enqueue(xs)
1714 struct scsipi_xfer *xs;
1715 {
1716 struct scsipi_channel *chan = xs->xs_periph->periph_channel;
1717 struct scsipi_xfer *qxs;
1718 int s;
1719
1720 s = splbio();
1721
1722 /*
1723 * If the xfer is to be polled, and there are already jobs on
1724 * the queue, we can't proceed.
1725 */
1726 if ((xs->xs_control & XS_CTL_POLL) != 0 &&
1727 TAILQ_FIRST(&chan->chan_queue) != NULL) {
1728 splx(s);
1729 xs->error = XS_DRIVER_STUFFUP;
1730 return (EAGAIN);
1731 }
1732
1733 /*
1734 * If we have an URGENT xfer, it's an error recovery command
1735 * and it should just go on the head of the channel's queue.
1736 */
1737 if (xs->xs_control & XS_CTL_URGENT) {
1738 TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
1739 goto out;
1740 }
1741
1742 /*
1743 * If this xfer has already been on the queue before, we
1744 * need to reinsert it in the correct order. That order is:
1745 *
1746 * Immediately before the first xfer for this periph
1747 * with a requeuecnt less than xs->xs_requeuecnt.
1748 *
1749 * Failing that, at the end of the queue. (We'll end up
1750 * there naturally.)
1751 */
1752 if (xs->xs_requeuecnt != 0) {
1753 for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
1754 qxs = TAILQ_NEXT(qxs, channel_q)) {
1755 if (qxs->xs_periph == xs->xs_periph &&
1756 qxs->xs_requeuecnt < xs->xs_requeuecnt)
1757 break;
1758 }
1759 if (qxs != NULL) {
1760 TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
1761 channel_q);
1762 goto out;
1763 }
1764 }
1765 TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
1766 out:
1767 if (xs->xs_control & XS_CTL_THAW_PERIPH)
1768 scsipi_periph_thaw(xs->xs_periph, 1);
1769 splx(s);
1770 return (0);
1771 }
1772
1773 /*
1774 * scsipi_run_queue:
1775 *
1776 * Start as many xfers as possible running on the channel.
1777 */
1778 void
1779 scsipi_run_queue(chan)
1780 struct scsipi_channel *chan;
1781 {
1782 struct scsipi_xfer *xs;
1783 struct scsipi_periph *periph;
1784 int s;
1785
1786 for (;;) {
1787 s = splbio();
1788
1789 /*
1790 * If the channel is frozen, we can't do any work right
1791 * now.
1792 */
1793 if (chan->chan_qfreeze != 0) {
1794 splx(s);
1795 return;
1796 }
1797
1798 /*
1799 * Look for work to do, and make sure we can do it.
1800 */
1801 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
1802 xs = TAILQ_NEXT(xs, channel_q)) {
1803 periph = xs->xs_periph;
1804
1805 if ((periph->periph_sent >= periph->periph_openings) ||
1806 periph->periph_qfreeze != 0 ||
1807 (periph->periph_flags & PERIPH_UNTAG) != 0)
1808 continue;
1809
1810 if ((periph->periph_flags &
1811 (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 &&
1812 (xs->xs_control & XS_CTL_URGENT) == 0)
1813 continue;
1814
1815 /*
1816 * We can issue this xfer!
1817 */
1818 goto got_one;
1819 }
1820
1821 /*
1822 * Can't find any work to do right now.
1823 */
1824 splx(s);
1825 return;
1826
1827 got_one:
1828 /*
1829 * Have an xfer to run. Allocate a resource from
1830 * the adapter to run it. If we can't allocate that
1831 * resource, we don't dequeue the xfer.
1832 */
1833 if (scsipi_get_resource(chan) == 0) {
1834 /*
1835 * Adapter is out of resources. If the adapter
1836 * supports it, attempt to grow them.
1837 */
1838 if (scsipi_grow_resources(chan) == 0) {
1839 /*
1840 * Wasn't able to grow resources,
1841 * nothing more we can do.
1842 */
1843 if (xs->xs_control & XS_CTL_POLL) {
1844 scsipi_printaddr(xs->xs_periph);
1845 printf("polling command but no "
1846 "adapter resources");
1847 /* We'll panic shortly... */
1848 }
1849 splx(s);
1850
1851 /*
1852 * XXX: We should be able to note that
1853 * XXX: that resources are needed here!
1854 */
1855 return;
1856 }
1857 /*
1858 * scsipi_grow_resources() allocated the resource
1859 * for us.
1860 */
1861 }
1862
1863 /*
1864 * We have a resource to run this xfer, do it!
1865 */
1866 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
1867
1868 /*
1869 * If the command is to be tagged, allocate a tag ID
1870 * for it.
1871 */
1872 if (XS_CTL_TAGTYPE(xs) != 0)
1873 scsipi_get_tag(xs);
1874 else
1875 periph->periph_flags |= PERIPH_UNTAG;
1876 periph->periph_sent++;
1877 splx(s);
1878
1879 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
1880 }
1881 #ifdef DIAGNOSTIC
1882 panic("scsipi_run_queue: impossible");
1883 #endif
1884 }
1885
1886 /*
1887 * scsipi_execute_xs:
1888 *
1889 * Begin execution of an xfer, waiting for it to complete, if necessary.
1890 */
1891 int
1892 scsipi_execute_xs(xs)
1893 struct scsipi_xfer *xs;
1894 {
1895 struct scsipi_periph *periph = xs->xs_periph;
1896 struct scsipi_channel *chan = periph->periph_channel;
1897 int oasync, async, poll, retries, error, s;
1898
1899 xs->xs_status &= ~XS_STS_DONE;
1900 xs->error = XS_NOERROR;
1901 xs->resid = xs->datalen;
1902 xs->status = SCSI_OK;
1903
1904 #ifdef SCSIPI_DEBUG
1905 if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) {
1906 printf("scsipi_execute_xs: ");
1907 show_scsipi_xs(xs);
1908 printf("\n");
1909 }
1910 #endif
1911
1912 /*
1913 * Deal with command tagging:
1914 *
1915 * - If the device's current operating mode doesn't
1916 * include tagged queueing, clear the tag mask.
1917 *
1918 * - If the device's current operating mode *does*
1919 * include tagged queueing, set the tag_type in
1920 * the xfer to the appropriate byte for the tag
1921 * message.
1922 */
1923 if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 ||
1924 (xs->xs_control & XS_CTL_REQSENSE)) {
1925 xs->xs_control &= ~XS_CTL_TAGMASK;
1926 xs->xs_tag_type = 0;
1927 } else {
1928 /*
1929 * If the request doesn't specify a tag, give Head
1930 * tags to URGENT operations and Ordered tags to
1931 * everything else.
1932 */
1933 if (XS_CTL_TAGTYPE(xs) == 0) {
1934 if (xs->xs_control & XS_CTL_URGENT)
1935 xs->xs_control |= XS_CTL_HEAD_TAG;
1936 else
1937 xs->xs_control |= XS_CTL_ORDERED_TAG;
1938 }
1939
1940 switch (XS_CTL_TAGTYPE(xs)) {
1941 case XS_CTL_ORDERED_TAG:
1942 xs->xs_tag_type = MSG_ORDERED_Q_TAG;
1943 break;
1944
1945 case XS_CTL_SIMPLE_TAG:
1946 xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
1947 break;
1948
1949 case XS_CTL_HEAD_TAG:
1950 xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
1951 break;
1952
1953 default:
1954 scsipi_printaddr(periph);
1955 printf("invalid tag mask 0x%08x\n",
1956 XS_CTL_TAGTYPE(xs));
1957 panic("scsipi_execute_xs");
1958 }
1959 }
1960
1961 /* If the adaptor wants us to poll, poll. */
1962 if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY)
1963 xs->xs_control |= XS_CTL_POLL;
1964
1965 /*
1966 * If we don't yet have a completion thread, or we are to poll for
1967 * completion, clear the ASYNC flag.
1968 */
1969 oasync = (xs->xs_control & XS_CTL_ASYNC);
1970 if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
1971 xs->xs_control &= ~XS_CTL_ASYNC;
1972
1973 async = (xs->xs_control & XS_CTL_ASYNC);
1974 poll = (xs->xs_control & XS_CTL_POLL);
1975 retries = xs->xs_retries; /* for polling commands */
1976
1977 #ifdef DIAGNOSTIC
1978 if (oasync != 0 && xs->bp == NULL)
1979 panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
1980 #endif
1981
1982 /*
1983 * Enqueue the transfer. If we're not polling for completion, this
1984 * should ALWAYS return `no error'.
1985 */
1986 try_again:
1987 error = scsipi_enqueue(xs);
1988 if (error) {
1989 if (poll == 0) {
1990 scsipi_printaddr(periph);
1991 printf("not polling, but enqueue failed with %d\n",
1992 error);
1993 panic("scsipi_execute_xs");
1994 }
1995
1996 scsipi_printaddr(periph);
1997 printf("failed to enqueue polling command");
1998 if (retries != 0) {
1999 printf(", retrying...\n");
2000 delay(1000000);
2001 retries--;
2002 goto try_again;
2003 }
2004 printf("\n");
2005 goto free_xs;
2006 }
2007
2008 restarted:
2009 scsipi_run_queue(chan);
2010
2011 /*
2012 * The xfer is enqueued, and possibly running. If it's to be
2013 * completed asynchronously, just return now.
2014 */
2015 if (async)
2016 return (EJUSTRETURN);
2017
2018 /*
2019 * Not an asynchronous command; wait for it to complete.
2020 */
2021 s = splbio();
2022 while ((xs->xs_status & XS_STS_DONE) == 0) {
2023 if (poll) {
2024 scsipi_printaddr(periph);
2025 printf("polling command not done\n");
2026 panic("scsipi_execute_xs");
2027 }
2028 (void) tsleep(xs, PRIBIO, "xscmd", 0);
2029 }
2030 splx(s);
2031
2032 /*
2033 * Command is complete. scsipi_done() has awakened us to perform
2034 * the error handling.
2035 */
2036 error = scsipi_complete(xs);
2037 if (error == ERESTART)
2038 goto restarted;
2039
2040 /*
2041 * If it was meant to run async and we cleared aync ourselve,
2042 * don't return an error here. It has already been handled
2043 */
2044 if (oasync)
2045 error = EJUSTRETURN;
2046 /*
2047 * Command completed successfully or fatal error occurred. Fall
2048 * into....
2049 */
2050 free_xs:
2051 s = splbio();
2052 scsipi_put_xs(xs);
2053 splx(s);
2054
2055 /*
2056 * Kick the queue, keep it running in case it stopped for some
2057 * reason.
2058 */
2059 scsipi_run_queue(chan);
2060
2061 return (error);
2062 }
2063
2064 /*
2065 * scsipi_completion_thread:
2066 *
2067 * This is the completion thread. We wait for errors on
2068 * asynchronous xfers, and perform the error handling
2069 * function, restarting the command, if necessary.
2070 */
2071 void
2072 scsipi_completion_thread(arg)
2073 void *arg;
2074 {
2075 struct scsipi_channel *chan = arg;
2076 struct scsipi_xfer *xs;
2077 int s;
2078
2079 if (chan->chan_init_cb)
2080 (*chan->chan_init_cb)(chan, chan->chan_init_cb_arg);
2081
2082 s = splbio();
2083 chan->chan_flags |= SCSIPI_CHAN_TACTIVE;
2084 splx(s);
2085 for (;;) {
2086 s = splbio();
2087 xs = TAILQ_FIRST(&chan->chan_complete);
2088 if (xs == NULL && chan->chan_tflags == 0) {
2089 /* nothing to do; wait */
2090 (void) tsleep(&chan->chan_complete, PRIBIO,
2091 "sccomp", 0);
2092 splx(s);
2093 continue;
2094 }
2095 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2096 /* call chan_callback from thread context */
2097 chan->chan_tflags &= ~SCSIPI_CHANT_CALLBACK;
2098 chan->chan_callback(chan, chan->chan_callback_arg);
2099 splx(s);
2100 continue;
2101 }
2102 if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) {
2103 /* attempt to get more openings for this channel */
2104 chan->chan_tflags &= ~SCSIPI_CHANT_GROWRES;
2105 scsipi_adapter_request(chan,
2106 ADAPTER_REQ_GROW_RESOURCES, NULL);
2107 scsipi_channel_thaw(chan, 1);
2108 splx(s);
2109 continue;
2110 }
2111 if (chan->chan_tflags & SCSIPI_CHANT_KICK) {
2112 /* explicitly run the queues for this channel */
2113 chan->chan_tflags &= ~SCSIPI_CHANT_KICK;
2114 scsipi_run_queue(chan);
2115 splx(s);
2116 continue;
2117 }
2118 if (chan->chan_tflags & SCSIPI_CHANT_SHUTDOWN) {
2119 splx(s);
2120 break;
2121 }
2122 if (xs) {
2123 TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
2124 splx(s);
2125
2126 /*
2127 * Have an xfer with an error; process it.
2128 */
2129 (void) scsipi_complete(xs);
2130
2131 /*
2132 * Kick the queue; keep it running if it was stopped
2133 * for some reason.
2134 */
2135 scsipi_run_queue(chan);
2136 } else {
2137 splx(s);
2138 }
2139 }
2140
2141 chan->chan_thread = NULL;
2142
2143 /* In case parent is waiting for us to exit. */
2144 wakeup(&chan->chan_thread);
2145
2146 kthread_exit(0);
2147 }
2148
2149 /*
2150 * scsipi_create_completion_thread:
2151 *
2152 * Callback to actually create the completion thread.
2153 */
2154 void
2155 scsipi_create_completion_thread(arg)
2156 void *arg;
2157 {
2158 struct scsipi_channel *chan = arg;
2159 struct scsipi_adapter *adapt = chan->chan_adapter;
2160
2161 if (kthread_create1(scsipi_completion_thread, chan,
2162 &chan->chan_thread, "%s", chan->chan_name)) {
2163 printf("%s: unable to create completion thread for "
2164 "channel %d\n", adapt->adapt_dev->dv_xname,
2165 chan->chan_channel);
2166 panic("scsipi_create_completion_thread");
2167 }
2168 }
2169
2170 /*
2171 * scsipi_thread_call_callback:
2172 *
2173 * request to call a callback from the completion thread
2174 */
2175 int
2176 scsipi_thread_call_callback(chan, callback, arg)
2177 struct scsipi_channel *chan;
2178 void (*callback) __P((struct scsipi_channel *, void *));
2179 void *arg;
2180 {
2181 int s;
2182
2183 s = splbio();
2184 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
2185 /* kernel thread doesn't exist yet */
2186 splx(s);
2187 return ESRCH;
2188 }
2189 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2190 splx(s);
2191 return EBUSY;
2192 }
2193 scsipi_channel_freeze(chan, 1);
2194 chan->chan_callback = callback;
2195 chan->chan_callback_arg = arg;
2196 chan->chan_tflags |= SCSIPI_CHANT_CALLBACK;
2197 wakeup(&chan->chan_complete);
2198 splx(s);
2199 return(0);
2200 }
2201
2202 /*
2203 * scsipi_async_event:
2204 *
2205 * Handle an asynchronous event from an adapter.
2206 */
2207 void
2208 scsipi_async_event(chan, event, arg)
2209 struct scsipi_channel *chan;
2210 scsipi_async_event_t event;
2211 void *arg;
2212 {
2213 int s;
2214
2215 s = splbio();
2216 switch (event) {
2217 case ASYNC_EVENT_MAX_OPENINGS:
2218 scsipi_async_event_max_openings(chan,
2219 (struct scsipi_max_openings *)arg);
2220 break;
2221
2222 case ASYNC_EVENT_XFER_MODE:
2223 scsipi_async_event_xfer_mode(chan,
2224 (struct scsipi_xfer_mode *)arg);
2225 break;
2226 case ASYNC_EVENT_RESET:
2227 scsipi_async_event_channel_reset(chan);
2228 break;
2229 }
2230 splx(s);
2231 }
2232
2233 /*
2234 * scsipi_print_xfer_mode:
2235 *
2236 * Print a periph's capabilities.
2237 */
2238 void
2239 scsipi_print_xfer_mode(periph)
2240 struct scsipi_periph *periph;
2241 {
2242 int period, freq, speed, mbs;
2243
2244 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0)
2245 return;
2246
2247 printf("%s: ", periph->periph_dev->dv_xname);
2248 if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) {
2249 period = scsipi_sync_factor_to_period(periph->periph_period);
2250 printf("sync (%d.%02dns offset %d)",
2251 period / 100, period % 100, periph->periph_offset);
2252 } else
2253 printf("async");
2254
2255 if (periph->periph_mode & PERIPH_CAP_WIDE32)
2256 printf(", 32-bit");
2257 else if (periph->periph_mode & (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
2258 printf(", 16-bit");
2259 else
2260 printf(", 8-bit");
2261
2262 if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) {
2263 freq = scsipi_sync_factor_to_freq(periph->periph_period);
2264 speed = freq;
2265 if (periph->periph_mode & PERIPH_CAP_WIDE32)
2266 speed *= 4;
2267 else if (periph->periph_mode &
2268 (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
2269 speed *= 2;
2270 mbs = speed / 1000;
2271 if (mbs > 0)
2272 printf(" (%d.%03dMB/s)", mbs, speed % 1000);
2273 else
2274 printf(" (%dKB/s)", speed % 1000);
2275 }
2276
2277 printf(" transfers");
2278
2279 if (periph->periph_mode & PERIPH_CAP_TQING)
2280 printf(", tagged queueing");
2281
2282 printf("\n");
2283 }
2284
2285 /*
2286 * scsipi_async_event_max_openings:
2287 *
2288 * Update the maximum number of outstanding commands a
2289 * device may have.
2290 */
2291 void
2292 scsipi_async_event_max_openings(chan, mo)
2293 struct scsipi_channel *chan;
2294 struct scsipi_max_openings *mo;
2295 {
2296 struct scsipi_periph *periph;
2297 int minlun, maxlun;
2298
2299 if (mo->mo_lun == -1) {
2300 /*
2301 * Wildcarded; apply it to all LUNs.
2302 */
2303 minlun = 0;
2304 maxlun = chan->chan_nluns - 1;
2305 } else
2306 minlun = maxlun = mo->mo_lun;
2307
2308 /* XXX This could really suck with a large LUN space. */
2309 for (; minlun <= maxlun; minlun++) {
2310 periph = scsipi_lookup_periph(chan, mo->mo_target, minlun);
2311 if (periph == NULL)
2312 continue;
2313
2314 if (mo->mo_openings < periph->periph_openings)
2315 periph->periph_openings = mo->mo_openings;
2316 else if (mo->mo_openings > periph->periph_openings &&
2317 (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
2318 periph->periph_openings = mo->mo_openings;
2319 }
2320 }
2321
2322 /*
2323 * scsipi_async_event_xfer_mode:
2324 *
2325 * Update the xfer mode for all periphs sharing the
2326 * specified I_T Nexus.
2327 */
2328 void
2329 scsipi_async_event_xfer_mode(chan, xm)
2330 struct scsipi_channel *chan;
2331 struct scsipi_xfer_mode *xm;
2332 {
2333 struct scsipi_periph *periph;
2334 int lun, announce, mode, period, offset;
2335
2336 for (lun = 0; lun < chan->chan_nluns; lun++) {
2337 periph = scsipi_lookup_periph(chan, xm->xm_target, lun);
2338 if (periph == NULL)
2339 continue;
2340 announce = 0;
2341
2342 /*
2343 * Clamp the xfer mode down to this periph's capabilities.
2344 */
2345 mode = xm->xm_mode & periph->periph_cap;
2346 if (mode & PERIPH_CAP_SYNC) {
2347 period = xm->xm_period;
2348 offset = xm->xm_offset;
2349 } else {
2350 period = 0;
2351 offset = 0;
2352 }
2353
2354 /*
2355 * If we do not have a valid xfer mode yet, or the parameters
2356 * are different, announce them.
2357 */
2358 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0 ||
2359 periph->periph_mode != mode ||
2360 periph->periph_period != period ||
2361 periph->periph_offset != offset)
2362 announce = 1;
2363
2364 periph->periph_mode = mode;
2365 periph->periph_period = period;
2366 periph->periph_offset = offset;
2367 periph->periph_flags |= PERIPH_MODE_VALID;
2368
2369 if (announce)
2370 scsipi_print_xfer_mode(periph);
2371 }
2372 }
2373
2374 /*
2375 * scsipi_set_xfer_mode:
2376 *
2377 * Set the xfer mode for the specified I_T Nexus.
2378 */
2379 void
2380 scsipi_set_xfer_mode(chan, target, immed)
2381 struct scsipi_channel *chan;
2382 int target, immed;
2383 {
2384 struct scsipi_xfer_mode xm;
2385 struct scsipi_periph *itperiph;
2386 int lun, s;
2387
2388 /*
2389 * Go to the minimal xfer mode.
2390 */
2391 xm.xm_target = target;
2392 xm.xm_mode = 0;
2393 xm.xm_period = 0; /* ignored */
2394 xm.xm_offset = 0; /* ignored */
2395
2396 /*
2397 * Find the first LUN we know about on this I_T Nexus.
2398 */
2399 for (itperiph = NULL, lun = 0; lun < chan->chan_nluns; lun++) {
2400 itperiph = scsipi_lookup_periph(chan, target, lun);
2401 if (itperiph != NULL)
2402 break;
2403 }
2404 if (itperiph != NULL) {
2405 xm.xm_mode = itperiph->periph_cap;
2406 /*
2407 * Now issue the request to the adapter.
2408 */
2409 s = splbio();
2410 scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm);
2411 splx(s);
2412 /*
2413 * If we want this to happen immediately, issue a dummy
2414 * command, since most adapters can't really negotiate unless
2415 * they're executing a job.
2416 */
2417 if (immed != 0) {
2418 (void) scsipi_test_unit_ready(itperiph,
2419 XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
2420 XS_CTL_IGNORE_NOT_READY |
2421 XS_CTL_IGNORE_MEDIA_CHANGE);
2422 }
2423 }
2424 }
2425
2426 /*
2427 * scsipi_channel_reset:
2428 *
2429 * handle scsi bus reset
2430 * called at splbio
2431 */
2432 void
2433 scsipi_async_event_channel_reset(chan)
2434 struct scsipi_channel *chan;
2435 {
2436 struct scsipi_xfer *xs, *xs_next;
2437 struct scsipi_periph *periph;
2438 int target, lun;
2439
2440 /*
2441 * Channel has been reset. Also mark as reset pending REQUEST_SENSE
2442 * commands; as the sense is not available any more.
2443 * can't call scsipi_done() from here, as the command has not been
2444 * sent to the adapter yet (this would corrupt accounting).
2445 */
2446
2447 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) {
2448 xs_next = TAILQ_NEXT(xs, channel_q);
2449 if (xs->xs_control & XS_CTL_REQSENSE) {
2450 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
2451 xs->error = XS_RESET;
2452 if ((xs->xs_control & XS_CTL_ASYNC) != 0)
2453 TAILQ_INSERT_TAIL(&chan->chan_complete, xs,
2454 channel_q);
2455 }
2456 }
2457 wakeup(&chan->chan_complete);
2458 /* Catch xs with pending sense which may not have a REQSENSE xs yet */
2459 for (target = 0; target < chan->chan_ntargets; target++) {
2460 if (target == chan->chan_id)
2461 continue;
2462 for (lun = 0; lun < chan->chan_nluns; lun++) {
2463 periph = scsipi_lookup_periph(chan, target, lun);
2464 if (periph) {
2465 xs = periph->periph_xscheck;
2466 if (xs)
2467 xs->error = XS_RESET;
2468 }
2469 }
2470 }
2471 }
2472
2473 /*
2474 * scsipi_target_detach:
2475 *
2476 * detach all periph associated with a I_T
2477 * must be called from valid thread context
2478 */
2479 int
2480 scsipi_target_detach(chan, target, lun, flags)
2481 struct scsipi_channel *chan;
2482 int target, lun;
2483 int flags;
2484 {
2485 struct scsipi_periph *periph;
2486 int ctarget, mintarget, maxtarget;
2487 int clun, minlun, maxlun;
2488 int error;
2489
2490 if (target == -1) {
2491 mintarget = 0;
2492 maxtarget = chan->chan_ntargets;
2493 } else {
2494 if (target == chan->chan_id)
2495 return EINVAL;
2496 if (target < 0 || target >= chan->chan_ntargets)
2497 return EINVAL;
2498 mintarget = target;
2499 maxtarget = target + 1;
2500 }
2501
2502 if (lun == -1) {
2503 minlun = 0;
2504 maxlun = chan->chan_nluns;
2505 } else {
2506 if (lun < 0 || lun >= chan->chan_nluns)
2507 return EINVAL;
2508 minlun = lun;
2509 maxlun = lun + 1;
2510 }
2511
2512 for (ctarget = mintarget; ctarget < maxtarget; ctarget++) {
2513 if (ctarget == chan->chan_id)
2514 continue;
2515
2516 for (clun = minlun; clun < maxlun; clun++) {
2517 periph = scsipi_lookup_periph(chan, ctarget, clun);
2518 if (periph == NULL)
2519 continue;
2520 error = config_detach(periph->periph_dev, flags);
2521 if (error)
2522 return (error);
2523 scsipi_remove_periph(chan, periph);
2524 free(periph, M_DEVBUF);
2525 }
2526 }
2527 return(0);
2528 }
2529
2530 /*
2531 * scsipi_adapter_addref:
2532 *
2533 * Add a reference to the adapter pointed to by the provided
2534 * link, enabling the adapter if necessary.
2535 */
2536 int
2537 scsipi_adapter_addref(adapt)
2538 struct scsipi_adapter *adapt;
2539 {
2540 int s, error = 0;
2541
2542 s = splbio();
2543 if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) {
2544 error = (*adapt->adapt_enable)(adapt->adapt_dev, 1);
2545 if (error)
2546 adapt->adapt_refcnt--;
2547 }
2548 splx(s);
2549 return (error);
2550 }
2551
2552 /*
2553 * scsipi_adapter_delref:
2554 *
2555 * Delete a reference to the adapter pointed to by the provided
2556 * link, disabling the adapter if possible.
2557 */
2558 void
2559 scsipi_adapter_delref(adapt)
2560 struct scsipi_adapter *adapt;
2561 {
2562 int s;
2563
2564 s = splbio();
2565 if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL)
2566 (void) (*adapt->adapt_enable)(adapt->adapt_dev, 0);
2567 splx(s);
2568 }
2569
2570 struct scsipi_syncparam {
2571 int ss_factor;
2572 int ss_period; /* ns * 100 */
2573 } scsipi_syncparams[] = {
2574 { 0x08, 625 }, /* FAST-160 (Ultra320) */
2575 { 0x09, 1250 }, /* FAST-80 (Ultra160) */
2576 { 0x0a, 2500 }, /* FAST-40 40MHz (Ultra2) */
2577 { 0x0b, 3030 }, /* FAST-40 33MHz (Ultra2) */
2578 { 0x0c, 5000 }, /* FAST-20 (Ultra) */
2579 };
2580 const int scsipi_nsyncparams =
2581 sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
2582
2583 int
2584 scsipi_sync_period_to_factor(period)
2585 int period; /* ns * 100 */
2586 {
2587 int i;
2588
2589 for (i = 0; i < scsipi_nsyncparams; i++) {
2590 if (period <= scsipi_syncparams[i].ss_period)
2591 return (scsipi_syncparams[i].ss_factor);
2592 }
2593
2594 return ((period / 100) / 4);
2595 }
2596
2597 int
2598 scsipi_sync_factor_to_period(factor)
2599 int factor;
2600 {
2601 int i;
2602
2603 for (i = 0; i < scsipi_nsyncparams; i++) {
2604 if (factor == scsipi_syncparams[i].ss_factor)
2605 return (scsipi_syncparams[i].ss_period);
2606 }
2607
2608 return ((factor * 4) * 100);
2609 }
2610
2611 int
2612 scsipi_sync_factor_to_freq(factor)
2613 int factor;
2614 {
2615 int i;
2616
2617 for (i = 0; i < scsipi_nsyncparams; i++) {
2618 if (factor == scsipi_syncparams[i].ss_factor)
2619 return (100000000 / scsipi_syncparams[i].ss_period);
2620 }
2621
2622 return (10000000 / ((factor * 4) * 10));
2623 }
2624
2625 #ifdef SCSIPI_DEBUG
2626 /*
2627 * Given a scsipi_xfer, dump the request, in all it's glory
2628 */
2629 void
2630 show_scsipi_xs(xs)
2631 struct scsipi_xfer *xs;
2632 {
2633
2634 printf("xs(%p): ", xs);
2635 printf("xs_control(0x%08x)", xs->xs_control);
2636 printf("xs_status(0x%08x)", xs->xs_status);
2637 printf("periph(%p)", xs->xs_periph);
2638 printf("retr(0x%x)", xs->xs_retries);
2639 printf("timo(0x%x)", xs->timeout);
2640 printf("cmd(%p)", xs->cmd);
2641 printf("len(0x%x)", xs->cmdlen);
2642 printf("data(%p)", xs->data);
2643 printf("len(0x%x)", xs->datalen);
2644 printf("res(0x%x)", xs->resid);
2645 printf("err(0x%x)", xs->error);
2646 printf("bp(%p)", xs->bp);
2647 show_scsipi_cmd(xs);
2648 }
2649
2650 void
2651 show_scsipi_cmd(xs)
2652 struct scsipi_xfer *xs;
2653 {
2654 u_char *b = (u_char *) xs->cmd;
2655 int i = 0;
2656
2657 scsipi_printaddr(xs->xs_periph);
2658 printf(" command: ");
2659
2660 if ((xs->xs_control & XS_CTL_RESET) == 0) {
2661 while (i < xs->cmdlen) {
2662 if (i)
2663 printf(",");
2664 printf("0x%x", b[i++]);
2665 }
2666 printf("-[%d bytes]\n", xs->datalen);
2667 if (xs->datalen)
2668 show_mem(xs->data, min(64, xs->datalen));
2669 } else
2670 printf("-RESET-\n");
2671 }
2672
2673 void
2674 show_mem(address, num)
2675 u_char *address;
2676 int num;
2677 {
2678 int x;
2679
2680 printf("------------------------------");
2681 for (x = 0; x < num; x++) {
2682 if ((x % 16) == 0)
2683 printf("\n%03d: ", x);
2684 printf("%02x ", *address++);
2685 }
2686 printf("\n------------------------------\n");
2687 }
2688 #endif /* SCSIPI_DEBUG */
2689