scsipi_base.c revision 1.107 1 /* $NetBSD: scsipi_base.c,v 1.107 2004/08/18 11:50:59 drochner Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999, 2000, 2002, 2003 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: scsipi_base.c,v 1.107 2004/08/18 11:50:59 drochner Exp $");
42
43 #include "opt_scsi.h"
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/buf.h>
49 #include <sys/uio.h>
50 #include <sys/malloc.h>
51 #include <sys/pool.h>
52 #include <sys/errno.h>
53 #include <sys/device.h>
54 #include <sys/proc.h>
55 #include <sys/kthread.h>
56 #include <sys/hash.h>
57
58 #include <dev/scsipi/scsipi_all.h>
59 #include <dev/scsipi/scsipi_disk.h>
60 #include <dev/scsipi/scsipiconf.h>
61 #include <dev/scsipi/scsipi_base.h>
62
63 #include <dev/scsipi/scsi_all.h>
64 #include <dev/scsipi/scsi_message.h>
65
66 int scsipi_complete __P((struct scsipi_xfer *));
67 void scsipi_request_sense __P((struct scsipi_xfer *));
68 int scsipi_enqueue __P((struct scsipi_xfer *));
69 void scsipi_run_queue __P((struct scsipi_channel *chan));
70
71 void scsipi_completion_thread __P((void *));
72
73 void scsipi_get_tag __P((struct scsipi_xfer *));
74 void scsipi_put_tag __P((struct scsipi_xfer *));
75
76 int scsipi_get_resource __P((struct scsipi_channel *));
77 void scsipi_put_resource __P((struct scsipi_channel *));
78 __inline int scsipi_grow_resources __P((struct scsipi_channel *));
79
80 void scsipi_async_event_max_openings __P((struct scsipi_channel *,
81 struct scsipi_max_openings *));
82 void scsipi_async_event_xfer_mode __P((struct scsipi_channel *,
83 struct scsipi_xfer_mode *));
84 void scsipi_async_event_channel_reset __P((struct scsipi_channel *));
85
86 struct pool scsipi_xfer_pool;
87
88 /*
89 * scsipi_init:
90 *
91 * Called when a scsibus or atapibus is attached to the system
92 * to initialize shared data structures.
93 */
94 void
95 scsipi_init()
96 {
97 static int scsipi_init_done;
98
99 if (scsipi_init_done)
100 return;
101 scsipi_init_done = 1;
102
103 /* Initialize the scsipi_xfer pool. */
104 pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
105 0, 0, "scxspl", NULL);
106 }
107
108 /*
109 * scsipi_channel_init:
110 *
111 * Initialize a scsipi_channel when it is attached.
112 */
113 int
114 scsipi_channel_init(chan)
115 struct scsipi_channel *chan;
116 {
117 int i;
118
119 /* Initialize shared data. */
120 scsipi_init();
121
122 /* Initialize the queues. */
123 TAILQ_INIT(&chan->chan_queue);
124 TAILQ_INIT(&chan->chan_complete);
125
126 for (i = 0; i < SCSIPI_CHAN_PERIPH_BUCKETS; i++)
127 LIST_INIT(&chan->chan_periphtab[i]);
128
129 /*
130 * Create the asynchronous completion thread.
131 */
132 kthread_create(scsipi_create_completion_thread, chan);
133 return (0);
134 }
135
136 /*
137 * scsipi_channel_shutdown:
138 *
139 * Shutdown a scsipi_channel.
140 */
141 void
142 scsipi_channel_shutdown(chan)
143 struct scsipi_channel *chan;
144 {
145
146 /*
147 * Shut down the completion thread.
148 */
149 chan->chan_tflags |= SCSIPI_CHANT_SHUTDOWN;
150 wakeup(&chan->chan_complete);
151
152 /*
153 * Now wait for the thread to exit.
154 */
155 while (chan->chan_thread != NULL)
156 (void) tsleep(&chan->chan_thread, PRIBIO, "scshut", 0);
157 }
158
159 static uint32_t
160 scsipi_chan_periph_hash(uint64_t t, uint64_t l)
161 {
162 uint32_t hash;
163
164 hash = hash32_buf(&t, sizeof(t), HASH32_BUF_INIT);
165 hash = hash32_buf(&l, sizeof(l), hash);
166
167 return (hash & SCSIPI_CHAN_PERIPH_HASHMASK);
168 }
169
170 /*
171 * scsipi_insert_periph:
172 *
173 * Insert a periph into the channel.
174 */
175 void
176 scsipi_insert_periph(chan, periph)
177 struct scsipi_channel *chan;
178 struct scsipi_periph *periph;
179 {
180 uint32_t hash;
181 int s;
182
183 hash = scsipi_chan_periph_hash(periph->periph_target,
184 periph->periph_lun);
185
186 s = splbio();
187 LIST_INSERT_HEAD(&chan->chan_periphtab[hash], periph, periph_hash);
188 splx(s);
189 }
190
191 /*
192 * scsipi_remove_periph:
193 *
194 * Remove a periph from the channel.
195 */
196 void
197 scsipi_remove_periph(chan, periph)
198 struct scsipi_channel *chan;
199 struct scsipi_periph *periph;
200 {
201 int s;
202
203 s = splbio();
204 LIST_REMOVE(periph, periph_hash);
205 splx(s);
206 }
207
208 /*
209 * scsipi_lookup_periph:
210 *
211 * Lookup a periph on the specified channel.
212 */
213 struct scsipi_periph *
214 scsipi_lookup_periph(chan, target, lun)
215 struct scsipi_channel *chan;
216 int target, lun;
217 {
218 struct scsipi_periph *periph;
219 uint32_t hash;
220 int s;
221
222 if (target >= chan->chan_ntargets ||
223 lun >= chan->chan_nluns)
224 return (NULL);
225
226 hash = scsipi_chan_periph_hash(target, lun);
227
228 s = splbio();
229 LIST_FOREACH(periph, &chan->chan_periphtab[hash], periph_hash) {
230 if (periph->periph_target == target &&
231 periph->periph_lun == lun)
232 break;
233 }
234 splx(s);
235
236 return (periph);
237 }
238
239 /*
240 * scsipi_get_resource:
241 *
242 * Allocate a single xfer `resource' from the channel.
243 *
244 * NOTE: Must be called at splbio().
245 */
246 int
247 scsipi_get_resource(chan)
248 struct scsipi_channel *chan;
249 {
250 struct scsipi_adapter *adapt = chan->chan_adapter;
251
252 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
253 if (chan->chan_openings > 0) {
254 chan->chan_openings--;
255 return (1);
256 }
257 return (0);
258 }
259
260 if (adapt->adapt_openings > 0) {
261 adapt->adapt_openings--;
262 return (1);
263 }
264 return (0);
265 }
266
267 /*
268 * scsipi_grow_resources:
269 *
270 * Attempt to grow resources for a channel. If this succeeds,
271 * we allocate one for our caller.
272 *
273 * NOTE: Must be called at splbio().
274 */
275 __inline int
276 scsipi_grow_resources(chan)
277 struct scsipi_channel *chan;
278 {
279
280 if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
281 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
282 scsipi_adapter_request(chan,
283 ADAPTER_REQ_GROW_RESOURCES, NULL);
284 return (scsipi_get_resource(chan));
285 }
286 /*
287 * ask the channel thread to do it. It'll have to thaw the
288 * queue
289 */
290 scsipi_channel_freeze(chan, 1);
291 chan->chan_tflags |= SCSIPI_CHANT_GROWRES;
292 wakeup(&chan->chan_complete);
293 return (0);
294 }
295
296 return (0);
297 }
298
299 /*
300 * scsipi_put_resource:
301 *
302 * Free a single xfer `resource' to the channel.
303 *
304 * NOTE: Must be called at splbio().
305 */
306 void
307 scsipi_put_resource(chan)
308 struct scsipi_channel *chan;
309 {
310 struct scsipi_adapter *adapt = chan->chan_adapter;
311
312 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
313 chan->chan_openings++;
314 else
315 adapt->adapt_openings++;
316 }
317
318 /*
319 * scsipi_get_tag:
320 *
321 * Get a tag ID for the specified xfer.
322 *
323 * NOTE: Must be called at splbio().
324 */
325 void
326 scsipi_get_tag(xs)
327 struct scsipi_xfer *xs;
328 {
329 struct scsipi_periph *periph = xs->xs_periph;
330 int bit, tag;
331 u_int word;
332
333 bit = 0; /* XXX gcc */
334 for (word = 0; word < PERIPH_NTAGWORDS; word++) {
335 bit = ffs(periph->periph_freetags[word]);
336 if (bit != 0)
337 break;
338 }
339 #ifdef DIAGNOSTIC
340 if (word == PERIPH_NTAGWORDS) {
341 scsipi_printaddr(periph);
342 printf("no free tags\n");
343 panic("scsipi_get_tag");
344 }
345 #endif
346
347 bit -= 1;
348 periph->periph_freetags[word] &= ~(1 << bit);
349 tag = (word << 5) | bit;
350
351 /* XXX Should eventually disallow this completely. */
352 if (tag >= periph->periph_openings) {
353 scsipi_printaddr(periph);
354 printf("WARNING: tag %d greater than available openings %d\n",
355 tag, periph->periph_openings);
356 }
357
358 xs->xs_tag_id = tag;
359 }
360
361 /*
362 * scsipi_put_tag:
363 *
364 * Put the tag ID for the specified xfer back into the pool.
365 *
366 * NOTE: Must be called at splbio().
367 */
368 void
369 scsipi_put_tag(xs)
370 struct scsipi_xfer *xs;
371 {
372 struct scsipi_periph *periph = xs->xs_periph;
373 int word, bit;
374
375 word = xs->xs_tag_id >> 5;
376 bit = xs->xs_tag_id & 0x1f;
377
378 periph->periph_freetags[word] |= (1 << bit);
379 }
380
381 /*
382 * scsipi_get_xs:
383 *
384 * Allocate an xfer descriptor and associate it with the
385 * specified peripherial. If the peripherial has no more
386 * available command openings, we either block waiting for
387 * one to become available, or fail.
388 */
389 struct scsipi_xfer *
390 scsipi_get_xs(periph, flags)
391 struct scsipi_periph *periph;
392 int flags;
393 {
394 struct scsipi_xfer *xs;
395 int s;
396
397 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n"));
398
399 /*
400 * If we're cold, make sure we poll.
401 */
402 if (cold)
403 flags |= XS_CTL_NOSLEEP | XS_CTL_POLL;
404
405 #ifdef DIAGNOSTIC
406 /*
407 * URGENT commands can never be ASYNC.
408 */
409 if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
410 (XS_CTL_URGENT|XS_CTL_ASYNC)) {
411 scsipi_printaddr(periph);
412 printf("URGENT and ASYNC\n");
413 panic("scsipi_get_xs");
414 }
415 #endif
416
417 s = splbio();
418 /*
419 * Wait for a command opening to become available. Rules:
420 *
421 * - All xfers must wait for an available opening.
422 * Exception: URGENT xfers can proceed when
423 * active == openings, because we use the opening
424 * of the command we're recovering for.
425 * - if the periph has sense pending, only URGENT & REQSENSE
426 * xfers may proceed.
427 *
428 * - If the periph is recovering, only URGENT xfers may
429 * proceed.
430 *
431 * - If the periph is currently executing a recovery
432 * command, URGENT commands must block, because only
433 * one recovery command can execute at a time.
434 */
435 for (;;) {
436 if (flags & XS_CTL_URGENT) {
437 if (periph->periph_active > periph->periph_openings)
438 goto wait_for_opening;
439 if (periph->periph_flags & PERIPH_SENSE) {
440 if ((flags & XS_CTL_REQSENSE) == 0)
441 goto wait_for_opening;
442 } else {
443 if ((periph->periph_flags &
444 PERIPH_RECOVERY_ACTIVE) != 0)
445 goto wait_for_opening;
446 periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
447 }
448 break;
449 }
450 if (periph->periph_active >= periph->periph_openings ||
451 (periph->periph_flags & PERIPH_RECOVERING) != 0)
452 goto wait_for_opening;
453 periph->periph_active++;
454 break;
455
456 wait_for_opening:
457 if (flags & XS_CTL_NOSLEEP) {
458 splx(s);
459 return (NULL);
460 }
461 SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n"));
462 periph->periph_flags |= PERIPH_WAITING;
463 (void) tsleep(periph, PRIBIO, "getxs", 0);
464 }
465 SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n"));
466 xs = pool_get(&scsipi_xfer_pool,
467 ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
468 if (xs == NULL) {
469 if (flags & XS_CTL_URGENT) {
470 if ((flags & XS_CTL_REQSENSE) == 0)
471 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
472 } else
473 periph->periph_active--;
474 scsipi_printaddr(periph);
475 printf("unable to allocate %sscsipi_xfer\n",
476 (flags & XS_CTL_URGENT) ? "URGENT " : "");
477 }
478 splx(s);
479
480 SC_DEBUG(periph, SCSIPI_DB3, ("returning\n"));
481
482 if (xs != NULL) {
483 memset(xs, 0, sizeof(*xs));
484 callout_init(&xs->xs_callout);
485 xs->xs_periph = periph;
486 xs->xs_control = flags;
487 xs->xs_status = 0;
488 s = splbio();
489 TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
490 splx(s);
491 }
492 return (xs);
493 }
494
495 /*
496 * scsipi_put_xs:
497 *
498 * Release an xfer descriptor, decreasing the outstanding command
499 * count for the peripherial. If there is a thread waiting for
500 * an opening, wake it up. If not, kick any queued I/O the
501 * peripherial may have.
502 *
503 * NOTE: Must be called at splbio().
504 */
505 void
506 scsipi_put_xs(xs)
507 struct scsipi_xfer *xs;
508 {
509 struct scsipi_periph *periph = xs->xs_periph;
510 int flags = xs->xs_control;
511
512 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n"));
513
514 TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
515 pool_put(&scsipi_xfer_pool, xs);
516
517 #ifdef DIAGNOSTIC
518 if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
519 periph->periph_active == 0) {
520 scsipi_printaddr(periph);
521 printf("recovery without a command to recovery for\n");
522 panic("scsipi_put_xs");
523 }
524 #endif
525
526 if (flags & XS_CTL_URGENT) {
527 if ((flags & XS_CTL_REQSENSE) == 0)
528 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
529 } else
530 periph->periph_active--;
531 if (periph->periph_active == 0 &&
532 (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
533 periph->periph_flags &= ~PERIPH_WAITDRAIN;
534 wakeup(&periph->periph_active);
535 }
536
537 if (periph->periph_flags & PERIPH_WAITING) {
538 periph->periph_flags &= ~PERIPH_WAITING;
539 wakeup(periph);
540 } else {
541 if (periph->periph_switch->psw_start != NULL &&
542 (periph->periph_dev->dv_flags & DVF_ACTIVE)) {
543 SC_DEBUG(periph, SCSIPI_DB2,
544 ("calling private start()\n"));
545 (*periph->periph_switch->psw_start)(periph);
546 }
547 }
548 }
549
550 /*
551 * scsipi_channel_freeze:
552 *
553 * Freeze a channel's xfer queue.
554 */
555 void
556 scsipi_channel_freeze(chan, count)
557 struct scsipi_channel *chan;
558 int count;
559 {
560 int s;
561
562 s = splbio();
563 chan->chan_qfreeze += count;
564 splx(s);
565 }
566
567 /*
568 * scsipi_channel_thaw:
569 *
570 * Thaw a channel's xfer queue.
571 */
572 void
573 scsipi_channel_thaw(chan, count)
574 struct scsipi_channel *chan;
575 int count;
576 {
577 int s;
578
579 s = splbio();
580 chan->chan_qfreeze -= count;
581 /*
582 * Don't let the freeze count go negative.
583 *
584 * Presumably the adapter driver could keep track of this,
585 * but it might just be easier to do this here so as to allow
586 * multiple callers, including those outside the adapter driver.
587 */
588 if (chan->chan_qfreeze < 0) {
589 chan->chan_qfreeze = 0;
590 }
591 splx(s);
592 /*
593 * Kick the channel's queue here. Note, we may be running in
594 * interrupt context (softclock or HBA's interrupt), so the adapter
595 * driver had better not sleep.
596 */
597 if (chan->chan_qfreeze == 0)
598 scsipi_run_queue(chan);
599 }
600
601 /*
602 * scsipi_channel_timed_thaw:
603 *
604 * Thaw a channel after some time has expired. This will also
605 * run the channel's queue if the freeze count has reached 0.
606 */
607 void
608 scsipi_channel_timed_thaw(arg)
609 void *arg;
610 {
611 struct scsipi_channel *chan = arg;
612
613 scsipi_channel_thaw(chan, 1);
614 }
615
616 /*
617 * scsipi_periph_freeze:
618 *
619 * Freeze a device's xfer queue.
620 */
621 void
622 scsipi_periph_freeze(periph, count)
623 struct scsipi_periph *periph;
624 int count;
625 {
626 int s;
627
628 s = splbio();
629 periph->periph_qfreeze += count;
630 splx(s);
631 }
632
633 /*
634 * scsipi_periph_thaw:
635 *
636 * Thaw a device's xfer queue.
637 */
638 void
639 scsipi_periph_thaw(periph, count)
640 struct scsipi_periph *periph;
641 int count;
642 {
643 int s;
644
645 s = splbio();
646 periph->periph_qfreeze -= count;
647 #ifdef DIAGNOSTIC
648 if (periph->periph_qfreeze < 0) {
649 static const char pc[] = "periph freeze count < 0";
650 scsipi_printaddr(periph);
651 printf("%s\n", pc);
652 panic(pc);
653 }
654 #endif
655 if (periph->periph_qfreeze == 0 &&
656 (periph->periph_flags & PERIPH_WAITING) != 0)
657 wakeup(periph);
658 splx(s);
659 }
660
661 /*
662 * scsipi_periph_timed_thaw:
663 *
664 * Thaw a device after some time has expired.
665 */
666 void
667 scsipi_periph_timed_thaw(arg)
668 void *arg;
669 {
670 int s;
671 struct scsipi_periph *periph = arg;
672
673 callout_stop(&periph->periph_callout);
674
675 s = splbio();
676 scsipi_periph_thaw(periph, 1);
677 if ((periph->periph_channel->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
678 /*
679 * Kick the channel's queue here. Note, we're running in
680 * interrupt context (softclock), so the adapter driver
681 * had better not sleep.
682 */
683 scsipi_run_queue(periph->periph_channel);
684 } else {
685 /*
686 * Tell the completion thread to kick the channel's queue here.
687 */
688 periph->periph_channel->chan_tflags |= SCSIPI_CHANT_KICK;
689 wakeup(&periph->periph_channel->chan_complete);
690 }
691 splx(s);
692 }
693
694 /*
695 * scsipi_wait_drain:
696 *
697 * Wait for a periph's pending xfers to drain.
698 */
699 void
700 scsipi_wait_drain(periph)
701 struct scsipi_periph *periph;
702 {
703 int s;
704
705 s = splbio();
706 while (periph->periph_active != 0) {
707 periph->periph_flags |= PERIPH_WAITDRAIN;
708 (void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0);
709 }
710 splx(s);
711 }
712
713 /*
714 * scsipi_kill_pending:
715 *
716 * Kill off all pending xfers for a periph.
717 *
718 * NOTE: Must be called at splbio().
719 */
720 void
721 scsipi_kill_pending(periph)
722 struct scsipi_periph *periph;
723 {
724
725 (*periph->periph_channel->chan_bustype->bustype_kill_pending)(periph);
726 scsipi_wait_drain(periph);
727 }
728
729 /*
730 * scsipi_print_cdb:
731 * prints a command descriptor block (for debug purpose, error messages,
732 * SCSIPI_VERBOSE, ...)
733 */
734 void
735 scsipi_print_cdb(cmd)
736 struct scsipi_generic *cmd;
737 {
738 int i, j;
739
740 printf("0x%02x", cmd->opcode);
741
742 switch (CDB_GROUPID(cmd->opcode)) {
743 case CDB_GROUPID_0:
744 j = CDB_GROUP0;
745 break;
746 case CDB_GROUPID_1:
747 j = CDB_GROUP1;
748 break;
749 case CDB_GROUPID_2:
750 j = CDB_GROUP2;
751 break;
752 case CDB_GROUPID_3:
753 j = CDB_GROUP3;
754 break;
755 case CDB_GROUPID_4:
756 j = CDB_GROUP4;
757 break;
758 case CDB_GROUPID_5:
759 j = CDB_GROUP5;
760 break;
761 case CDB_GROUPID_6:
762 j = CDB_GROUP6;
763 break;
764 case CDB_GROUPID_7:
765 j = CDB_GROUP7;
766 break;
767 default:
768 j = 0;
769 }
770 if (j == 0)
771 j = sizeof (cmd->bytes);
772 for (i = 0; i < j-1; i++) /* already done the opcode */
773 printf(" %02x", cmd->bytes[i]);
774 }
775
776 /*
777 * scsipi_interpret_sense:
778 *
779 * Look at the returned sense and act on the error, determining
780 * the unix error number to pass back. (0 = report no error)
781 *
782 * NOTE: If we return ERESTART, we are expected to haved
783 * thawed the device!
784 *
785 * THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
786 */
787 int
788 scsipi_interpret_sense(xs)
789 struct scsipi_xfer *xs;
790 {
791 struct scsipi_sense_data *sense;
792 struct scsipi_periph *periph = xs->xs_periph;
793 u_int8_t key;
794 int error;
795 #ifndef SCSIVERBOSE
796 u_int32_t info;
797 static char *error_mes[] = {
798 "soft error (corrected)",
799 "not ready", "medium error",
800 "non-media hardware failure", "illegal request",
801 "unit attention", "readonly device",
802 "no data found", "vendor unique",
803 "copy aborted", "command aborted",
804 "search returned equal", "volume overflow",
805 "verify miscompare", "unknown error key"
806 };
807 #endif
808
809 sense = &xs->sense.scsi_sense;
810 #ifdef SCSIPI_DEBUG
811 if (periph->periph_flags & SCSIPI_DB1) {
812 int count;
813 scsipi_printaddr(periph);
814 printf(" sense debug information:\n");
815 printf("\tcode 0x%x valid 0x%x\n",
816 sense->error_code & SSD_ERRCODE,
817 sense->error_code & SSD_ERRCODE_VALID ? 1 : 0);
818 printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
819 sense->segment,
820 sense->flags & SSD_KEY,
821 sense->flags & SSD_ILI ? 1 : 0,
822 sense->flags & SSD_EOM ? 1 : 0,
823 sense->flags & SSD_FILEMARK ? 1 : 0);
824 printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d "
825 "extra bytes\n",
826 sense->info[0],
827 sense->info[1],
828 sense->info[2],
829 sense->info[3],
830 sense->extra_len);
831 printf("\textra: ");
832 for (count = 0; count < ADD_BYTES_LIM(sense); count++)
833 printf("0x%x ", sense->cmd_spec_info[count]);
834 printf("\n");
835 }
836 #endif
837
838 /*
839 * If the periph has it's own error handler, call it first.
840 * If it returns a legit error value, return that, otherwise
841 * it wants us to continue with normal error processing.
842 */
843 if (periph->periph_switch->psw_error != NULL) {
844 SC_DEBUG(periph, SCSIPI_DB2,
845 ("calling private err_handler()\n"));
846 error = (*periph->periph_switch->psw_error)(xs);
847 if (error != EJUSTRETURN)
848 return (error);
849 }
850 /* otherwise use the default */
851 switch (sense->error_code & SSD_ERRCODE) {
852
853 /*
854 * Old SCSI-1 and SASI devices respond with
855 * codes other than 70.
856 */
857 case 0x00: /* no error (command completed OK) */
858 return (0);
859 case 0x04: /* drive not ready after it was selected */
860 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
861 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
862 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
863 return (0);
864 /* XXX - display some sort of error here? */
865 return (EIO);
866 case 0x20: /* invalid command */
867 if ((xs->xs_control &
868 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
869 return (0);
870 return (EINVAL);
871 case 0x25: /* invalid LUN (Adaptec ACB-4000) */
872 return (EACCES);
873
874 /*
875 * If it's code 70, use the extended stuff and
876 * interpret the key
877 */
878 case 0x71: /* delayed error */
879 scsipi_printaddr(periph);
880 key = sense->flags & SSD_KEY;
881 printf(" DEFERRED ERROR, key = 0x%x\n", key);
882 /* FALLTHROUGH */
883 case 0x70:
884 #ifndef SCSIVERBOSE
885 if ((sense->error_code & SSD_ERRCODE_VALID) != 0)
886 info = _4btol(sense->info);
887 else
888 info = 0;
889 #endif
890 key = sense->flags & SSD_KEY;
891
892 switch (key) {
893 case SKEY_NO_SENSE:
894 case SKEY_RECOVERED_ERROR:
895 if (xs->resid == xs->datalen && xs->datalen) {
896 /*
897 * Why is this here?
898 */
899 xs->resid = 0; /* not short read */
900 }
901 case SKEY_EQUAL:
902 error = 0;
903 break;
904 case SKEY_NOT_READY:
905 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
906 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
907 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
908 return (0);
909 if (sense->add_sense_code == 0x3A) {
910 error = ENODEV; /* Medium not present */
911 if (xs->xs_control & XS_CTL_SILENT_NODEV)
912 return (error);
913 } else
914 error = EIO;
915 if ((xs->xs_control & XS_CTL_SILENT) != 0)
916 return (error);
917 break;
918 case SKEY_ILLEGAL_REQUEST:
919 if ((xs->xs_control &
920 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
921 return (0);
922 /*
923 * Handle the case where a device reports
924 * Logical Unit Not Supported during discovery.
925 */
926 if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
927 sense->add_sense_code == 0x25 &&
928 sense->add_sense_code_qual == 0x00)
929 return (EINVAL);
930 if ((xs->xs_control & XS_CTL_SILENT) != 0)
931 return (EIO);
932 error = EINVAL;
933 break;
934 case SKEY_UNIT_ATTENTION:
935 if (sense->add_sense_code == 0x29 &&
936 sense->add_sense_code_qual == 0x00) {
937 /* device or bus reset */
938 return (ERESTART);
939 }
940 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
941 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
942 if ((xs->xs_control &
943 XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
944 /* XXX Should reupload any transient state. */
945 (periph->periph_flags &
946 PERIPH_REMOVABLE) == 0) {
947 return (ERESTART);
948 }
949 if ((xs->xs_control & XS_CTL_SILENT) != 0)
950 return (EIO);
951 error = EIO;
952 break;
953 case SKEY_WRITE_PROTECT:
954 error = EROFS;
955 break;
956 case SKEY_BLANK_CHECK:
957 error = 0;
958 break;
959 case SKEY_ABORTED_COMMAND:
960 if (xs->xs_retries != 0) {
961 xs->xs_retries--;
962 error = ERESTART;
963 } else
964 error = EIO;
965 break;
966 case SKEY_VOLUME_OVERFLOW:
967 error = ENOSPC;
968 break;
969 default:
970 error = EIO;
971 break;
972 }
973
974 #ifdef SCSIVERBOSE
975 if (key && (xs->xs_control & XS_CTL_SILENT) == 0)
976 scsipi_print_sense(xs, 0);
977 #else
978 if (key) {
979 scsipi_printaddr(periph);
980 printf("%s", error_mes[key - 1]);
981 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
982 switch (key) {
983 case SKEY_NOT_READY:
984 case SKEY_ILLEGAL_REQUEST:
985 case SKEY_UNIT_ATTENTION:
986 case SKEY_WRITE_PROTECT:
987 break;
988 case SKEY_BLANK_CHECK:
989 printf(", requested size: %d (decimal)",
990 info);
991 break;
992 case SKEY_ABORTED_COMMAND:
993 if (xs->xs_retries)
994 printf(", retrying");
995 printf(", cmd 0x%x, info 0x%x",
996 xs->cmd->opcode, info);
997 break;
998 default:
999 printf(", info = %d (decimal)", info);
1000 }
1001 }
1002 if (sense->extra_len != 0) {
1003 int n;
1004 printf(", data =");
1005 for (n = 0; n < sense->extra_len; n++)
1006 printf(" %02x",
1007 sense->cmd_spec_info[n]);
1008 }
1009 printf("\n");
1010 }
1011 #endif
1012 return (error);
1013
1014 /*
1015 * Some other code, just report it
1016 */
1017 default:
1018 #if defined(SCSIDEBUG) || defined(DEBUG)
1019 {
1020 static char *uc = "undecodable sense error";
1021 int i;
1022 u_int8_t *cptr = (u_int8_t *) sense;
1023 scsipi_printaddr(periph);
1024 if (xs->cmd == &xs->cmdstore) {
1025 printf("%s for opcode 0x%x, data=",
1026 uc, xs->cmdstore.opcode);
1027 } else {
1028 printf("%s, data=", uc);
1029 }
1030 for (i = 0; i < sizeof (sense); i++)
1031 printf(" 0x%02x", *(cptr++) & 0xff);
1032 printf("\n");
1033 }
1034 #else
1035 scsipi_printaddr(periph);
1036 printf("Sense Error Code 0x%x",
1037 sense->error_code & SSD_ERRCODE);
1038 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
1039 struct scsipi_sense_data_unextended *usense =
1040 (struct scsipi_sense_data_unextended *)sense;
1041 printf(" at block no. %d (decimal)",
1042 _3btol(usense->block));
1043 }
1044 printf("\n");
1045 #endif
1046 return (EIO);
1047 }
1048 }
1049
1050 /*
1051 * scsipi_size:
1052 *
1053 * Find out from the device what its capacity is.
1054 */
1055 u_int64_t
1056 scsipi_size(periph, flags)
1057 struct scsipi_periph *periph;
1058 int flags;
1059 {
1060 struct scsipi_read_cap_data rdcap;
1061 struct scsipi_read_capacity scsipi_cmd;
1062
1063 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1064 scsipi_cmd.opcode = READ_CAPACITY;
1065
1066 /*
1067 * If the command works, interpret the result as a 4 byte
1068 * number of blocks
1069 */
1070 if (scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1071 sizeof(scsipi_cmd), (u_char *)&rdcap, sizeof(rdcap),
1072 SCSIPIRETRIES, 20000, NULL,
1073 flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK | XS_CTL_SILENT) != 0)
1074 return (0);
1075
1076 return (_4btol(rdcap.addr) + 1);
1077 }
1078
1079 /*
1080 * scsipi_test_unit_ready:
1081 *
1082 * Issue a `test unit ready' request.
1083 */
1084 int
1085 scsipi_test_unit_ready(periph, flags)
1086 struct scsipi_periph *periph;
1087 int flags;
1088 {
1089 int retries;
1090 struct scsipi_test_unit_ready scsipi_cmd;
1091
1092 /* some ATAPI drives don't support TEST_UNIT_READY. Sigh */
1093 if (periph->periph_quirks & PQUIRK_NOTUR)
1094 return (0);
1095
1096 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1097 scsipi_cmd.opcode = TEST_UNIT_READY;
1098
1099 if (flags & XS_CTL_DISCOVERY)
1100 retries = 0;
1101 else
1102 retries = SCSIPIRETRIES;
1103
1104 return (scsipi_command(periph,
1105 (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd),
1106 0, 0, retries, 10000, NULL, flags));
1107 }
1108
1109 /*
1110 * scsipi_inquire:
1111 *
1112 * Ask the device about itself.
1113 */
1114 int
1115 scsipi_inquire(periph, inqbuf, flags)
1116 struct scsipi_periph *periph;
1117 struct scsipi_inquiry_data *inqbuf;
1118 int flags;
1119 {
1120 int retries;
1121 struct scsipi_inquiry scsipi_cmd;
1122 int error;
1123
1124 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1125 scsipi_cmd.opcode = INQUIRY;
1126
1127 if (flags & XS_CTL_DISCOVERY)
1128 retries = 0;
1129 else
1130 retries = SCSIPIRETRIES;
1131
1132 /*
1133 * If we request more data than the device can provide, it SHOULD just
1134 * return a short reponse. However, some devices error with an
1135 * ILLEGAL REQUEST sense code, and yet others have even more special
1136 * failture modes (such as the GL641USB flash adapter, which goes loony
1137 * and sends corrupted CRCs). To work around this, and to bring our
1138 * behavior more in line with other OSes, we do a shorter inquiry,
1139 * covering all the SCSI-2 information, first, and then request more
1140 * data iff the "additional length" field indicates there is more.
1141 * - mycroft, 2003/10/16
1142 */
1143 scsipi_cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI2;
1144 error = scsipi_command(periph,
1145 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1146 (u_char *) inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI2,
1147 retries, 10000, NULL, XS_CTL_DATA_IN | flags);
1148 if (!error && inqbuf->additional_length > SCSIPI_INQUIRY_LENGTH_SCSI2 - 4) {
1149 scsipi_cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI3;
1150 error = scsipi_command(periph,
1151 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1152 (u_char *) inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI3,
1153 retries, 10000, NULL, XS_CTL_DATA_IN | flags);
1154 }
1155
1156 #ifdef SCSI_OLD_NOINQUIRY
1157 /*
1158 * Kludge for the Adaptec ACB-4000 SCSI->MFM translator.
1159 * This board doesn't support the INQUIRY command at all.
1160 */
1161 if (error == EINVAL || error == EACCES) {
1162 /*
1163 * Conjure up an INQUIRY response.
1164 */
1165 inqbuf->device = (error == EINVAL ?
1166 SID_QUAL_LU_PRESENT :
1167 SID_QUAL_LU_NOTPRESENT) | T_DIRECT;
1168 inqbuf->dev_qual2 = 0;
1169 inqbuf->version = 0;
1170 inqbuf->response_format = SID_FORMAT_SCSI1;
1171 inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
1172 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1173 memcpy(inqbuf->vendor, "ADAPTEC ACB-4000 ", 28);
1174 error = 0;
1175 }
1176
1177 /*
1178 * Kludge for the Emulex MT-02 SCSI->QIC translator.
1179 * This board gives an empty response to an INQUIRY command.
1180 */
1181 else if (error == 0 &&
1182 inqbuf->device == (SID_QUAL_LU_PRESENT | T_DIRECT) &&
1183 inqbuf->dev_qual2 == 0 &&
1184 inqbuf->version == 0 &&
1185 inqbuf->response_format == SID_FORMAT_SCSI1) {
1186 /*
1187 * Fill out the INQUIRY response.
1188 */
1189 inqbuf->device = (SID_QUAL_LU_PRESENT | T_SEQUENTIAL);
1190 inqbuf->dev_qual2 = SID_REMOVABLE;
1191 inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
1192 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1193 memcpy(inqbuf->vendor, "EMULEX MT-02 QIC ", 28);
1194 }
1195 #endif /* SCSI_OLD_NOINQUIRY */
1196
1197 return error;
1198 }
1199
1200 /*
1201 * scsipi_prevent:
1202 *
1203 * Prevent or allow the user to remove the media
1204 */
1205 int
1206 scsipi_prevent(periph, type, flags)
1207 struct scsipi_periph *periph;
1208 int type, flags;
1209 {
1210 struct scsipi_prevent scsipi_cmd;
1211
1212 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1213 scsipi_cmd.opcode = PREVENT_ALLOW;
1214 scsipi_cmd.how = type;
1215
1216 return (scsipi_command(periph,
1217 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1218 0, 0, SCSIPIRETRIES, 5000, NULL, flags));
1219 }
1220
1221 /*
1222 * scsipi_start:
1223 *
1224 * Send a START UNIT.
1225 */
1226 int
1227 scsipi_start(periph, type, flags)
1228 struct scsipi_periph *periph;
1229 int type, flags;
1230 {
1231 struct scsipi_start_stop scsipi_cmd;
1232
1233 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1234 scsipi_cmd.opcode = START_STOP;
1235 scsipi_cmd.byte2 = 0x00;
1236 scsipi_cmd.how = type;
1237
1238 return (scsipi_command(periph,
1239 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1240 0, 0, SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000,
1241 NULL, flags));
1242 }
1243
1244 /*
1245 * scsipi_mode_sense, scsipi_mode_sense_big:
1246 * get a sense page from a device
1247 */
1248
1249 int
1250 scsipi_mode_sense(periph, byte2, page, data, len, flags, retries, timeout)
1251 struct scsipi_periph *periph;
1252 int byte2, page, len, flags, retries, timeout;
1253 struct scsipi_mode_header *data;
1254 {
1255 struct scsipi_mode_sense scsipi_cmd;
1256 int error;
1257
1258 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1259 scsipi_cmd.opcode = MODE_SENSE;
1260 scsipi_cmd.byte2 = byte2;
1261 scsipi_cmd.page = page;
1262 scsipi_cmd.length = len & 0xff;
1263 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1264 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1265 flags | XS_CTL_DATA_IN);
1266 SC_DEBUG(periph, SCSIPI_DB2,
1267 ("scsipi_mode_sense: error=%d\n", error));
1268 return (error);
1269 }
1270
1271 int
1272 scsipi_mode_sense_big(periph, byte2, page, data, len, flags, retries, timeout)
1273 struct scsipi_periph *periph;
1274 int byte2, page, len, flags, retries, timeout;
1275 struct scsipi_mode_header_big *data;
1276 {
1277 struct scsipi_mode_sense_big scsipi_cmd;
1278 int error;
1279
1280 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1281 scsipi_cmd.opcode = MODE_SENSE_BIG;
1282 scsipi_cmd.byte2 = byte2;
1283 scsipi_cmd.page = page;
1284 _lto2b(len, scsipi_cmd.length);
1285 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1286 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1287 flags | XS_CTL_DATA_IN);
1288 SC_DEBUG(periph, SCSIPI_DB2,
1289 ("scsipi_mode_sense_big: error=%d\n", error));
1290 return (error);
1291 }
1292
1293 int
1294 scsipi_mode_select(periph, byte2, data, len, flags, retries, timeout)
1295 struct scsipi_periph *periph;
1296 int byte2, len, flags, retries, timeout;
1297 struct scsipi_mode_header *data;
1298 {
1299 struct scsipi_mode_select scsipi_cmd;
1300 int error;
1301
1302 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1303 scsipi_cmd.opcode = MODE_SELECT;
1304 scsipi_cmd.byte2 = byte2;
1305 scsipi_cmd.length = len & 0xff;
1306 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1307 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1308 flags | XS_CTL_DATA_OUT);
1309 SC_DEBUG(periph, SCSIPI_DB2,
1310 ("scsipi_mode_select: error=%d\n", error));
1311 return (error);
1312 }
1313
1314 int
1315 scsipi_mode_select_big(periph, byte2, data, len, flags, retries, timeout)
1316 struct scsipi_periph *periph;
1317 int byte2, len, flags, retries, timeout;
1318 struct scsipi_mode_header_big *data;
1319 {
1320 struct scsipi_mode_select_big scsipi_cmd;
1321 int error;
1322
1323 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1324 scsipi_cmd.opcode = MODE_SELECT_BIG;
1325 scsipi_cmd.byte2 = byte2;
1326 _lto2b(len, scsipi_cmd.length);
1327 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1328 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1329 flags | XS_CTL_DATA_OUT);
1330 SC_DEBUG(periph, SCSIPI_DB2,
1331 ("scsipi_mode_select: error=%d\n", error));
1332 return (error);
1333 }
1334
1335 /*
1336 * scsipi_done:
1337 *
1338 * This routine is called by an adapter's interrupt handler when
1339 * an xfer is completed.
1340 */
1341 void
1342 scsipi_done(xs)
1343 struct scsipi_xfer *xs;
1344 {
1345 struct scsipi_periph *periph = xs->xs_periph;
1346 struct scsipi_channel *chan = periph->periph_channel;
1347 int s, freezecnt;
1348
1349 SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n"));
1350 #ifdef SCSIPI_DEBUG
1351 if (periph->periph_dbflags & SCSIPI_DB1)
1352 show_scsipi_cmd(xs);
1353 #endif
1354
1355 s = splbio();
1356 /*
1357 * The resource this command was using is now free.
1358 */
1359 scsipi_put_resource(chan);
1360 xs->xs_periph->periph_sent--;
1361
1362 /*
1363 * If the command was tagged, free the tag.
1364 */
1365 if (XS_CTL_TAGTYPE(xs) != 0)
1366 scsipi_put_tag(xs);
1367 else
1368 periph->periph_flags &= ~PERIPH_UNTAG;
1369
1370 /* Mark the command as `done'. */
1371 xs->xs_status |= XS_STS_DONE;
1372
1373 #ifdef DIAGNOSTIC
1374 if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
1375 (XS_CTL_ASYNC|XS_CTL_POLL))
1376 panic("scsipi_done: ASYNC and POLL");
1377 #endif
1378
1379 /*
1380 * If the xfer had an error of any sort, freeze the
1381 * periph's queue. Freeze it again if we were requested
1382 * to do so in the xfer.
1383 */
1384 freezecnt = 0;
1385 if (xs->error != XS_NOERROR)
1386 freezecnt++;
1387 if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
1388 freezecnt++;
1389 if (freezecnt != 0)
1390 scsipi_periph_freeze(periph, freezecnt);
1391
1392 /*
1393 * record the xfer with a pending sense, in case a SCSI reset is
1394 * received before the thread is waked up.
1395 */
1396 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1397 periph->periph_flags |= PERIPH_SENSE;
1398 periph->periph_xscheck = xs;
1399 }
1400
1401 /*
1402 * If this was an xfer that was not to complete asynchronously,
1403 * let the requesting thread perform error checking/handling
1404 * in its context.
1405 */
1406 if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
1407 splx(s);
1408 /*
1409 * If it's a polling job, just return, to unwind the
1410 * call graph. We don't need to restart the queue,
1411 * because pollings jobs are treated specially, and
1412 * are really only used during crash dumps anyway
1413 * (XXX or during boot-time autconfiguration of
1414 * ATAPI devices).
1415 */
1416 if (xs->xs_control & XS_CTL_POLL)
1417 return;
1418 wakeup(xs);
1419 goto out;
1420 }
1421
1422 /*
1423 * Catch the extremely common case of I/O completing
1424 * without error; no use in taking a context switch
1425 * if we can handle it in interrupt context.
1426 */
1427 if (xs->error == XS_NOERROR) {
1428 splx(s);
1429 (void) scsipi_complete(xs);
1430 goto out;
1431 }
1432
1433 /*
1434 * There is an error on this xfer. Put it on the channel's
1435 * completion queue, and wake up the completion thread.
1436 */
1437 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
1438 splx(s);
1439 wakeup(&chan->chan_complete);
1440
1441 out:
1442 /*
1443 * If there are more xfers on the channel's queue, attempt to
1444 * run them.
1445 */
1446 scsipi_run_queue(chan);
1447 }
1448
1449 /*
1450 * scsipi_complete:
1451 *
1452 * Completion of a scsipi_xfer. This is the guts of scsipi_done().
1453 *
1454 * NOTE: This routine MUST be called with valid thread context
1455 * except for the case where the following two conditions are
1456 * true:
1457 *
1458 * xs->error == XS_NOERROR
1459 * XS_CTL_ASYNC is set in xs->xs_control
1460 *
1461 * The semantics of this routine can be tricky, so here is an
1462 * explanation:
1463 *
1464 * 0 Xfer completed successfully.
1465 *
1466 * ERESTART Xfer had an error, but was restarted.
1467 *
1468 * anything else Xfer had an error, return value is Unix
1469 * errno.
1470 *
1471 * If the return value is anything but ERESTART:
1472 *
1473 * - If XS_CTL_ASYNC is set, `xs' has been freed back to
1474 * the pool.
1475 * - If there is a buf associated with the xfer,
1476 * it has been biodone()'d.
1477 */
1478 int
1479 scsipi_complete(xs)
1480 struct scsipi_xfer *xs;
1481 {
1482 struct scsipi_periph *periph = xs->xs_periph;
1483 struct scsipi_channel *chan = periph->periph_channel;
1484 struct buf *bp;
1485 int error, s;
1486
1487 #ifdef DIAGNOSTIC
1488 if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
1489 panic("scsipi_complete: XS_CTL_ASYNC but no buf");
1490 #endif
1491 /*
1492 * If command terminated with a CHECK CONDITION, we need to issue a
1493 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed
1494 * we'll have the real status.
1495 * Must be processed at splbio() to avoid missing a SCSI bus reset
1496 * for this command.
1497 */
1498 s = splbio();
1499 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1500 /* request sense for a request sense ? */
1501 if (xs->xs_control & XS_CTL_REQSENSE) {
1502 scsipi_printaddr(periph);
1503 printf("request sense for a request sense ?\n");
1504 /* XXX maybe we should reset the device ? */
1505 /* we've been frozen because xs->error != XS_NOERROR */
1506 scsipi_periph_thaw(periph, 1);
1507 splx(s);
1508 if (xs->resid < xs->datalen) {
1509 printf("we read %d bytes of sense anyway:\n",
1510 xs->datalen - xs->resid);
1511 #ifdef SCSIVERBOSE
1512 scsipi_print_sense_data((void *)xs->data, 0);
1513 #endif
1514 }
1515 return EINVAL;
1516 }
1517 scsipi_request_sense(xs);
1518 }
1519 splx(s);
1520
1521 /*
1522 * If it's a user level request, bypass all usual completion
1523 * processing, let the user work it out..
1524 */
1525 if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
1526 SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n"));
1527 if (xs->error != XS_NOERROR)
1528 scsipi_periph_thaw(periph, 1);
1529 scsipi_user_done(xs);
1530 SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n "));
1531 return 0;
1532 }
1533
1534 switch (xs->error) {
1535 case XS_NOERROR:
1536 error = 0;
1537 break;
1538
1539 case XS_SENSE:
1540 case XS_SHORTSENSE:
1541 error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
1542 break;
1543
1544 case XS_RESOURCE_SHORTAGE:
1545 /*
1546 * XXX Should freeze channel's queue.
1547 */
1548 scsipi_printaddr(periph);
1549 printf("adapter resource shortage\n");
1550 /* FALLTHROUGH */
1551
1552 case XS_BUSY:
1553 if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
1554 struct scsipi_max_openings mo;
1555
1556 /*
1557 * We set the openings to active - 1, assuming that
1558 * the command that got us here is the first one that
1559 * can't fit into the device's queue. If that's not
1560 * the case, I guess we'll find out soon enough.
1561 */
1562 mo.mo_target = periph->periph_target;
1563 mo.mo_lun = periph->periph_lun;
1564 if (periph->periph_active < periph->periph_openings)
1565 mo.mo_openings = periph->periph_active - 1;
1566 else
1567 mo.mo_openings = periph->periph_openings - 1;
1568 #ifdef DIAGNOSTIC
1569 if (mo.mo_openings < 0) {
1570 scsipi_printaddr(periph);
1571 printf("QUEUE FULL resulted in < 0 openings\n");
1572 panic("scsipi_done");
1573 }
1574 #endif
1575 if (mo.mo_openings == 0) {
1576 scsipi_printaddr(periph);
1577 printf("QUEUE FULL resulted in 0 openings\n");
1578 mo.mo_openings = 1;
1579 }
1580 scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
1581 error = ERESTART;
1582 } else if (xs->xs_retries != 0) {
1583 xs->xs_retries--;
1584 /*
1585 * Wait one second, and try again.
1586 */
1587 if ((xs->xs_control & XS_CTL_POLL) ||
1588 (chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
1589 delay(1000000);
1590 } else if (!callout_pending(&periph->periph_callout)) {
1591 scsipi_periph_freeze(periph, 1);
1592 callout_reset(&periph->periph_callout,
1593 hz, scsipi_periph_timed_thaw, periph);
1594 }
1595 error = ERESTART;
1596 } else
1597 error = EBUSY;
1598 break;
1599
1600 case XS_REQUEUE:
1601 error = ERESTART;
1602 break;
1603
1604 case XS_SELTIMEOUT:
1605 case XS_TIMEOUT:
1606 /*
1607 * If the device hasn't gone away, honor retry counts.
1608 *
1609 * Note that if we're in the middle of probing it,
1610 * it won't be found because it isn't here yet so
1611 * we won't honor the retry count in that case.
1612 */
1613 if (scsipi_lookup_periph(chan, periph->periph_target,
1614 periph->periph_lun) && xs->xs_retries != 0) {
1615 xs->xs_retries--;
1616 error = ERESTART;
1617 } else
1618 error = EIO;
1619 break;
1620
1621 case XS_RESET:
1622 if (xs->xs_control & XS_CTL_REQSENSE) {
1623 /*
1624 * request sense interrupted by reset: signal it
1625 * with EINTR return code.
1626 */
1627 error = EINTR;
1628 } else {
1629 if (xs->xs_retries != 0) {
1630 xs->xs_retries--;
1631 error = ERESTART;
1632 } else
1633 error = EIO;
1634 }
1635 break;
1636
1637 case XS_DRIVER_STUFFUP:
1638 scsipi_printaddr(periph);
1639 printf("generic HBA error\n");
1640 error = EIO;
1641 break;
1642 default:
1643 scsipi_printaddr(periph);
1644 printf("invalid return code from adapter: %d\n", xs->error);
1645 error = EIO;
1646 break;
1647 }
1648
1649 s = splbio();
1650 if (error == ERESTART) {
1651 /*
1652 * If we get here, the periph has been thawed and frozen
1653 * again if we had to issue recovery commands. Alternatively,
1654 * it may have been frozen again and in a timed thaw. In
1655 * any case, we thaw the periph once we re-enqueue the
1656 * command. Once the periph is fully thawed, it will begin
1657 * operation again.
1658 */
1659 xs->error = XS_NOERROR;
1660 xs->status = SCSI_OK;
1661 xs->xs_status &= ~XS_STS_DONE;
1662 xs->xs_requeuecnt++;
1663 error = scsipi_enqueue(xs);
1664 if (error == 0) {
1665 scsipi_periph_thaw(periph, 1);
1666 splx(s);
1667 return (ERESTART);
1668 }
1669 }
1670
1671 /*
1672 * scsipi_done() freezes the queue if not XS_NOERROR.
1673 * Thaw it here.
1674 */
1675 if (xs->error != XS_NOERROR)
1676 scsipi_periph_thaw(periph, 1);
1677
1678 /*
1679 * Set buffer fields in case the periph
1680 * switch done func uses them
1681 */
1682 if ((bp = xs->bp) != NULL) {
1683 if (error) {
1684 bp->b_error = error;
1685 bp->b_flags |= B_ERROR;
1686 bp->b_resid = bp->b_bcount;
1687 } else {
1688 bp->b_error = 0;
1689 bp->b_resid = xs->resid;
1690 }
1691 }
1692
1693 if (periph->periph_switch->psw_done)
1694 periph->periph_switch->psw_done(xs);
1695
1696 if (bp)
1697 biodone(bp);
1698
1699 if (xs->xs_control & XS_CTL_ASYNC)
1700 scsipi_put_xs(xs);
1701 splx(s);
1702
1703 return (error);
1704 }
1705
1706 /*
1707 * Issue a request sense for the given scsipi_xfer. Called when the xfer
1708 * returns with a CHECK_CONDITION status. Must be called in valid thread
1709 * context and at splbio().
1710 */
1711
1712 void
1713 scsipi_request_sense(xs)
1714 struct scsipi_xfer *xs;
1715 {
1716 struct scsipi_periph *periph = xs->xs_periph;
1717 int flags, error;
1718 struct scsipi_sense cmd;
1719
1720 periph->periph_flags |= PERIPH_SENSE;
1721
1722 /* if command was polling, request sense will too */
1723 flags = xs->xs_control & XS_CTL_POLL;
1724 /* Polling commands can't sleep */
1725 if (flags)
1726 flags |= XS_CTL_NOSLEEP;
1727
1728 flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN |
1729 XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH;
1730
1731 memset(&cmd, 0, sizeof(cmd));
1732 cmd.opcode = REQUEST_SENSE;
1733 cmd.length = sizeof(struct scsipi_sense_data);
1734
1735 error = scsipi_command(periph,
1736 (struct scsipi_generic *) &cmd, sizeof(cmd),
1737 (u_char*)&xs->sense.scsi_sense, sizeof(struct scsipi_sense_data),
1738 0, 1000, NULL, flags);
1739 periph->periph_flags &= ~PERIPH_SENSE;
1740 periph->periph_xscheck = NULL;
1741 switch(error) {
1742 case 0:
1743 /* we have a valid sense */
1744 xs->error = XS_SENSE;
1745 return;
1746 case EINTR:
1747 /* REQUEST_SENSE interrupted by bus reset. */
1748 xs->error = XS_RESET;
1749 return;
1750 case EIO:
1751 /* request sense coudn't be performed */
1752 /*
1753 * XXX this isn't quite right but we don't have anything
1754 * better for now
1755 */
1756 xs->error = XS_DRIVER_STUFFUP;
1757 return;
1758 default:
1759 /* Notify that request sense failed. */
1760 xs->error = XS_DRIVER_STUFFUP;
1761 scsipi_printaddr(periph);
1762 printf("request sense failed with error %d\n", error);
1763 return;
1764 }
1765 }
1766
1767 /*
1768 * scsipi_enqueue:
1769 *
1770 * Enqueue an xfer on a channel.
1771 */
1772 int
1773 scsipi_enqueue(xs)
1774 struct scsipi_xfer *xs;
1775 {
1776 struct scsipi_channel *chan = xs->xs_periph->periph_channel;
1777 struct scsipi_xfer *qxs;
1778 int s;
1779
1780 s = splbio();
1781
1782 /*
1783 * If the xfer is to be polled, and there are already jobs on
1784 * the queue, we can't proceed.
1785 */
1786 if ((xs->xs_control & XS_CTL_POLL) != 0 &&
1787 TAILQ_FIRST(&chan->chan_queue) != NULL) {
1788 splx(s);
1789 xs->error = XS_DRIVER_STUFFUP;
1790 return (EAGAIN);
1791 }
1792
1793 /*
1794 * If we have an URGENT xfer, it's an error recovery command
1795 * and it should just go on the head of the channel's queue.
1796 */
1797 if (xs->xs_control & XS_CTL_URGENT) {
1798 TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
1799 goto out;
1800 }
1801
1802 /*
1803 * If this xfer has already been on the queue before, we
1804 * need to reinsert it in the correct order. That order is:
1805 *
1806 * Immediately before the first xfer for this periph
1807 * with a requeuecnt less than xs->xs_requeuecnt.
1808 *
1809 * Failing that, at the end of the queue. (We'll end up
1810 * there naturally.)
1811 */
1812 if (xs->xs_requeuecnt != 0) {
1813 for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
1814 qxs = TAILQ_NEXT(qxs, channel_q)) {
1815 if (qxs->xs_periph == xs->xs_periph &&
1816 qxs->xs_requeuecnt < xs->xs_requeuecnt)
1817 break;
1818 }
1819 if (qxs != NULL) {
1820 TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
1821 channel_q);
1822 goto out;
1823 }
1824 }
1825 TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
1826 out:
1827 if (xs->xs_control & XS_CTL_THAW_PERIPH)
1828 scsipi_periph_thaw(xs->xs_periph, 1);
1829 splx(s);
1830 return (0);
1831 }
1832
1833 /*
1834 * scsipi_run_queue:
1835 *
1836 * Start as many xfers as possible running on the channel.
1837 */
1838 void
1839 scsipi_run_queue(chan)
1840 struct scsipi_channel *chan;
1841 {
1842 struct scsipi_xfer *xs;
1843 struct scsipi_periph *periph;
1844 int s;
1845
1846 for (;;) {
1847 s = splbio();
1848
1849 /*
1850 * If the channel is frozen, we can't do any work right
1851 * now.
1852 */
1853 if (chan->chan_qfreeze != 0) {
1854 splx(s);
1855 return;
1856 }
1857
1858 /*
1859 * Look for work to do, and make sure we can do it.
1860 */
1861 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
1862 xs = TAILQ_NEXT(xs, channel_q)) {
1863 periph = xs->xs_periph;
1864
1865 if ((periph->periph_sent >= periph->periph_openings) ||
1866 periph->periph_qfreeze != 0 ||
1867 (periph->periph_flags & PERIPH_UNTAG) != 0)
1868 continue;
1869
1870 if ((periph->periph_flags &
1871 (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 &&
1872 (xs->xs_control & XS_CTL_URGENT) == 0)
1873 continue;
1874
1875 /*
1876 * We can issue this xfer!
1877 */
1878 goto got_one;
1879 }
1880
1881 /*
1882 * Can't find any work to do right now.
1883 */
1884 splx(s);
1885 return;
1886
1887 got_one:
1888 /*
1889 * Have an xfer to run. Allocate a resource from
1890 * the adapter to run it. If we can't allocate that
1891 * resource, we don't dequeue the xfer.
1892 */
1893 if (scsipi_get_resource(chan) == 0) {
1894 /*
1895 * Adapter is out of resources. If the adapter
1896 * supports it, attempt to grow them.
1897 */
1898 if (scsipi_grow_resources(chan) == 0) {
1899 /*
1900 * Wasn't able to grow resources,
1901 * nothing more we can do.
1902 */
1903 if (xs->xs_control & XS_CTL_POLL) {
1904 scsipi_printaddr(xs->xs_periph);
1905 printf("polling command but no "
1906 "adapter resources");
1907 /* We'll panic shortly... */
1908 }
1909 splx(s);
1910
1911 /*
1912 * XXX: We should be able to note that
1913 * XXX: that resources are needed here!
1914 */
1915 return;
1916 }
1917 /*
1918 * scsipi_grow_resources() allocated the resource
1919 * for us.
1920 */
1921 }
1922
1923 /*
1924 * We have a resource to run this xfer, do it!
1925 */
1926 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
1927
1928 /*
1929 * If the command is to be tagged, allocate a tag ID
1930 * for it.
1931 */
1932 if (XS_CTL_TAGTYPE(xs) != 0)
1933 scsipi_get_tag(xs);
1934 else
1935 periph->periph_flags |= PERIPH_UNTAG;
1936 periph->periph_sent++;
1937 splx(s);
1938
1939 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
1940 }
1941 #ifdef DIAGNOSTIC
1942 panic("scsipi_run_queue: impossible");
1943 #endif
1944 }
1945
1946 /*
1947 * scsipi_execute_xs:
1948 *
1949 * Begin execution of an xfer, waiting for it to complete, if necessary.
1950 */
1951 int
1952 scsipi_execute_xs(xs)
1953 struct scsipi_xfer *xs;
1954 {
1955 struct scsipi_periph *periph = xs->xs_periph;
1956 struct scsipi_channel *chan = periph->periph_channel;
1957 int oasync, async, poll, retries, error, s;
1958
1959 xs->xs_status &= ~XS_STS_DONE;
1960 xs->error = XS_NOERROR;
1961 xs->resid = xs->datalen;
1962 xs->status = SCSI_OK;
1963
1964 #ifdef SCSIPI_DEBUG
1965 if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) {
1966 printf("scsipi_execute_xs: ");
1967 show_scsipi_xs(xs);
1968 printf("\n");
1969 }
1970 #endif
1971
1972 /*
1973 * Deal with command tagging:
1974 *
1975 * - If the device's current operating mode doesn't
1976 * include tagged queueing, clear the tag mask.
1977 *
1978 * - If the device's current operating mode *does*
1979 * include tagged queueing, set the tag_type in
1980 * the xfer to the appropriate byte for the tag
1981 * message.
1982 */
1983 if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 ||
1984 (xs->xs_control & XS_CTL_REQSENSE)) {
1985 xs->xs_control &= ~XS_CTL_TAGMASK;
1986 xs->xs_tag_type = 0;
1987 } else {
1988 /*
1989 * If the request doesn't specify a tag, give Head
1990 * tags to URGENT operations and Ordered tags to
1991 * everything else.
1992 */
1993 if (XS_CTL_TAGTYPE(xs) == 0) {
1994 if (xs->xs_control & XS_CTL_URGENT)
1995 xs->xs_control |= XS_CTL_HEAD_TAG;
1996 else
1997 xs->xs_control |= XS_CTL_ORDERED_TAG;
1998 }
1999
2000 switch (XS_CTL_TAGTYPE(xs)) {
2001 case XS_CTL_ORDERED_TAG:
2002 xs->xs_tag_type = MSG_ORDERED_Q_TAG;
2003 break;
2004
2005 case XS_CTL_SIMPLE_TAG:
2006 xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
2007 break;
2008
2009 case XS_CTL_HEAD_TAG:
2010 xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
2011 break;
2012
2013 default:
2014 scsipi_printaddr(periph);
2015 printf("invalid tag mask 0x%08x\n",
2016 XS_CTL_TAGTYPE(xs));
2017 panic("scsipi_execute_xs");
2018 }
2019 }
2020
2021 /* If the adaptor wants us to poll, poll. */
2022 if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY)
2023 xs->xs_control |= XS_CTL_POLL;
2024
2025 /*
2026 * If we don't yet have a completion thread, or we are to poll for
2027 * completion, clear the ASYNC flag.
2028 */
2029 oasync = (xs->xs_control & XS_CTL_ASYNC);
2030 if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
2031 xs->xs_control &= ~XS_CTL_ASYNC;
2032
2033 async = (xs->xs_control & XS_CTL_ASYNC);
2034 poll = (xs->xs_control & XS_CTL_POLL);
2035 retries = xs->xs_retries; /* for polling commands */
2036
2037 #ifdef DIAGNOSTIC
2038 if (oasync != 0 && xs->bp == NULL)
2039 panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
2040 #endif
2041
2042 /*
2043 * Enqueue the transfer. If we're not polling for completion, this
2044 * should ALWAYS return `no error'.
2045 */
2046 try_again:
2047 error = scsipi_enqueue(xs);
2048 if (error) {
2049 if (poll == 0) {
2050 scsipi_printaddr(periph);
2051 printf("not polling, but enqueue failed with %d\n",
2052 error);
2053 panic("scsipi_execute_xs");
2054 }
2055
2056 scsipi_printaddr(periph);
2057 printf("failed to enqueue polling command");
2058 if (retries != 0) {
2059 printf(", retrying...\n");
2060 delay(1000000);
2061 retries--;
2062 goto try_again;
2063 }
2064 printf("\n");
2065 goto free_xs;
2066 }
2067
2068 restarted:
2069 scsipi_run_queue(chan);
2070
2071 /*
2072 * The xfer is enqueued, and possibly running. If it's to be
2073 * completed asynchronously, just return now.
2074 */
2075 if (async)
2076 return (EJUSTRETURN);
2077
2078 /*
2079 * Not an asynchronous command; wait for it to complete.
2080 */
2081 s = splbio();
2082 while ((xs->xs_status & XS_STS_DONE) == 0) {
2083 if (poll) {
2084 scsipi_printaddr(periph);
2085 printf("polling command not done\n");
2086 panic("scsipi_execute_xs");
2087 }
2088 (void) tsleep(xs, PRIBIO, "xscmd", 0);
2089 }
2090 splx(s);
2091
2092 /*
2093 * Command is complete. scsipi_done() has awakened us to perform
2094 * the error handling.
2095 */
2096 error = scsipi_complete(xs);
2097 if (error == ERESTART)
2098 goto restarted;
2099
2100 /*
2101 * If it was meant to run async and we cleared aync ourselve,
2102 * don't return an error here. It has already been handled
2103 */
2104 if (oasync)
2105 error = EJUSTRETURN;
2106 /*
2107 * Command completed successfully or fatal error occurred. Fall
2108 * into....
2109 */
2110 free_xs:
2111 s = splbio();
2112 scsipi_put_xs(xs);
2113 splx(s);
2114
2115 /*
2116 * Kick the queue, keep it running in case it stopped for some
2117 * reason.
2118 */
2119 scsipi_run_queue(chan);
2120
2121 return (error);
2122 }
2123
2124 /*
2125 * scsipi_completion_thread:
2126 *
2127 * This is the completion thread. We wait for errors on
2128 * asynchronous xfers, and perform the error handling
2129 * function, restarting the command, if necessary.
2130 */
2131 void
2132 scsipi_completion_thread(arg)
2133 void *arg;
2134 {
2135 struct scsipi_channel *chan = arg;
2136 struct scsipi_xfer *xs;
2137 int s;
2138
2139 if (chan->chan_init_cb)
2140 (*chan->chan_init_cb)(chan, chan->chan_init_cb_arg);
2141
2142 s = splbio();
2143 chan->chan_flags |= SCSIPI_CHAN_TACTIVE;
2144 splx(s);
2145 for (;;) {
2146 s = splbio();
2147 xs = TAILQ_FIRST(&chan->chan_complete);
2148 if (xs == NULL && chan->chan_tflags == 0) {
2149 /* nothing to do; wait */
2150 (void) tsleep(&chan->chan_complete, PRIBIO,
2151 "sccomp", 0);
2152 splx(s);
2153 continue;
2154 }
2155 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2156 /* call chan_callback from thread context */
2157 chan->chan_tflags &= ~SCSIPI_CHANT_CALLBACK;
2158 chan->chan_callback(chan, chan->chan_callback_arg);
2159 splx(s);
2160 continue;
2161 }
2162 if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) {
2163 /* attempt to get more openings for this channel */
2164 chan->chan_tflags &= ~SCSIPI_CHANT_GROWRES;
2165 scsipi_adapter_request(chan,
2166 ADAPTER_REQ_GROW_RESOURCES, NULL);
2167 scsipi_channel_thaw(chan, 1);
2168 splx(s);
2169 continue;
2170 }
2171 if (chan->chan_tflags & SCSIPI_CHANT_KICK) {
2172 /* explicitly run the queues for this channel */
2173 chan->chan_tflags &= ~SCSIPI_CHANT_KICK;
2174 scsipi_run_queue(chan);
2175 splx(s);
2176 continue;
2177 }
2178 if (chan->chan_tflags & SCSIPI_CHANT_SHUTDOWN) {
2179 splx(s);
2180 break;
2181 }
2182 if (xs) {
2183 TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
2184 splx(s);
2185
2186 /*
2187 * Have an xfer with an error; process it.
2188 */
2189 (void) scsipi_complete(xs);
2190
2191 /*
2192 * Kick the queue; keep it running if it was stopped
2193 * for some reason.
2194 */
2195 scsipi_run_queue(chan);
2196 } else {
2197 splx(s);
2198 }
2199 }
2200
2201 chan->chan_thread = NULL;
2202
2203 /* In case parent is waiting for us to exit. */
2204 wakeup(&chan->chan_thread);
2205
2206 kthread_exit(0);
2207 }
2208
2209 /*
2210 * scsipi_create_completion_thread:
2211 *
2212 * Callback to actually create the completion thread.
2213 */
2214 void
2215 scsipi_create_completion_thread(arg)
2216 void *arg;
2217 {
2218 struct scsipi_channel *chan = arg;
2219 struct scsipi_adapter *adapt = chan->chan_adapter;
2220
2221 if (kthread_create1(scsipi_completion_thread, chan,
2222 &chan->chan_thread, "%s", chan->chan_name)) {
2223 printf("%s: unable to create completion thread for "
2224 "channel %d\n", adapt->adapt_dev->dv_xname,
2225 chan->chan_channel);
2226 panic("scsipi_create_completion_thread");
2227 }
2228 }
2229
2230 /*
2231 * scsipi_thread_call_callback:
2232 *
2233 * request to call a callback from the completion thread
2234 */
2235 int
2236 scsipi_thread_call_callback(chan, callback, arg)
2237 struct scsipi_channel *chan;
2238 void (*callback) __P((struct scsipi_channel *, void *));
2239 void *arg;
2240 {
2241 int s;
2242
2243 s = splbio();
2244 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
2245 /* kernel thread doesn't exist yet */
2246 splx(s);
2247 return ESRCH;
2248 }
2249 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2250 splx(s);
2251 return EBUSY;
2252 }
2253 scsipi_channel_freeze(chan, 1);
2254 chan->chan_callback = callback;
2255 chan->chan_callback_arg = arg;
2256 chan->chan_tflags |= SCSIPI_CHANT_CALLBACK;
2257 wakeup(&chan->chan_complete);
2258 splx(s);
2259 return(0);
2260 }
2261
2262 /*
2263 * scsipi_async_event:
2264 *
2265 * Handle an asynchronous event from an adapter.
2266 */
2267 void
2268 scsipi_async_event(chan, event, arg)
2269 struct scsipi_channel *chan;
2270 scsipi_async_event_t event;
2271 void *arg;
2272 {
2273 int s;
2274
2275 s = splbio();
2276 switch (event) {
2277 case ASYNC_EVENT_MAX_OPENINGS:
2278 scsipi_async_event_max_openings(chan,
2279 (struct scsipi_max_openings *)arg);
2280 break;
2281
2282 case ASYNC_EVENT_XFER_MODE:
2283 scsipi_async_event_xfer_mode(chan,
2284 (struct scsipi_xfer_mode *)arg);
2285 break;
2286 case ASYNC_EVENT_RESET:
2287 scsipi_async_event_channel_reset(chan);
2288 break;
2289 }
2290 splx(s);
2291 }
2292
2293 /*
2294 * scsipi_print_xfer_mode:
2295 *
2296 * Print a periph's capabilities.
2297 */
2298 void
2299 scsipi_print_xfer_mode(periph)
2300 struct scsipi_periph *periph;
2301 {
2302 int period, freq, speed, mbs;
2303
2304 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0)
2305 return;
2306
2307 aprint_normal("%s: ", periph->periph_dev->dv_xname);
2308 if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) {
2309 period = scsipi_sync_factor_to_period(periph->periph_period);
2310 aprint_normal("sync (%d.%02dns offset %d)",
2311 period / 100, period % 100, periph->periph_offset);
2312 } else
2313 aprint_normal("async");
2314
2315 if (periph->periph_mode & PERIPH_CAP_WIDE32)
2316 aprint_normal(", 32-bit");
2317 else if (periph->periph_mode & (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
2318 aprint_normal(", 16-bit");
2319 else
2320 aprint_normal(", 8-bit");
2321
2322 if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) {
2323 freq = scsipi_sync_factor_to_freq(periph->periph_period);
2324 speed = freq;
2325 if (periph->periph_mode & PERIPH_CAP_WIDE32)
2326 speed *= 4;
2327 else if (periph->periph_mode &
2328 (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
2329 speed *= 2;
2330 mbs = speed / 1000;
2331 if (mbs > 0)
2332 aprint_normal(" (%d.%03dMB/s)", mbs, speed % 1000);
2333 else
2334 aprint_normal(" (%dKB/s)", speed % 1000);
2335 }
2336
2337 aprint_normal(" transfers");
2338
2339 if (periph->periph_mode & PERIPH_CAP_TQING)
2340 aprint_normal(", tagged queueing");
2341
2342 aprint_normal("\n");
2343 }
2344
2345 /*
2346 * scsipi_async_event_max_openings:
2347 *
2348 * Update the maximum number of outstanding commands a
2349 * device may have.
2350 */
2351 void
2352 scsipi_async_event_max_openings(chan, mo)
2353 struct scsipi_channel *chan;
2354 struct scsipi_max_openings *mo;
2355 {
2356 struct scsipi_periph *periph;
2357 int minlun, maxlun;
2358
2359 if (mo->mo_lun == -1) {
2360 /*
2361 * Wildcarded; apply it to all LUNs.
2362 */
2363 minlun = 0;
2364 maxlun = chan->chan_nluns - 1;
2365 } else
2366 minlun = maxlun = mo->mo_lun;
2367
2368 /* XXX This could really suck with a large LUN space. */
2369 for (; minlun <= maxlun; minlun++) {
2370 periph = scsipi_lookup_periph(chan, mo->mo_target, minlun);
2371 if (periph == NULL)
2372 continue;
2373
2374 if (mo->mo_openings < periph->periph_openings)
2375 periph->periph_openings = mo->mo_openings;
2376 else if (mo->mo_openings > periph->periph_openings &&
2377 (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
2378 periph->periph_openings = mo->mo_openings;
2379 }
2380 }
2381
2382 /*
2383 * scsipi_async_event_xfer_mode:
2384 *
2385 * Update the xfer mode for all periphs sharing the
2386 * specified I_T Nexus.
2387 */
2388 void
2389 scsipi_async_event_xfer_mode(chan, xm)
2390 struct scsipi_channel *chan;
2391 struct scsipi_xfer_mode *xm;
2392 {
2393 struct scsipi_periph *periph;
2394 int lun, announce, mode, period, offset;
2395
2396 for (lun = 0; lun < chan->chan_nluns; lun++) {
2397 periph = scsipi_lookup_periph(chan, xm->xm_target, lun);
2398 if (periph == NULL)
2399 continue;
2400 announce = 0;
2401
2402 /*
2403 * Clamp the xfer mode down to this periph's capabilities.
2404 */
2405 mode = xm->xm_mode & periph->periph_cap;
2406 if (mode & PERIPH_CAP_SYNC) {
2407 period = xm->xm_period;
2408 offset = xm->xm_offset;
2409 } else {
2410 period = 0;
2411 offset = 0;
2412 }
2413
2414 /*
2415 * If we do not have a valid xfer mode yet, or the parameters
2416 * are different, announce them.
2417 */
2418 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0 ||
2419 periph->periph_mode != mode ||
2420 periph->periph_period != period ||
2421 periph->periph_offset != offset)
2422 announce = 1;
2423
2424 periph->periph_mode = mode;
2425 periph->periph_period = period;
2426 periph->periph_offset = offset;
2427 periph->periph_flags |= PERIPH_MODE_VALID;
2428
2429 if (announce)
2430 scsipi_print_xfer_mode(periph);
2431 }
2432 }
2433
2434 /*
2435 * scsipi_set_xfer_mode:
2436 *
2437 * Set the xfer mode for the specified I_T Nexus.
2438 */
2439 void
2440 scsipi_set_xfer_mode(chan, target, immed)
2441 struct scsipi_channel *chan;
2442 int target, immed;
2443 {
2444 struct scsipi_xfer_mode xm;
2445 struct scsipi_periph *itperiph;
2446 int lun, s;
2447
2448 /*
2449 * Go to the minimal xfer mode.
2450 */
2451 xm.xm_target = target;
2452 xm.xm_mode = 0;
2453 xm.xm_period = 0; /* ignored */
2454 xm.xm_offset = 0; /* ignored */
2455
2456 /*
2457 * Find the first LUN we know about on this I_T Nexus.
2458 */
2459 for (itperiph = NULL, lun = 0; lun < chan->chan_nluns; lun++) {
2460 itperiph = scsipi_lookup_periph(chan, target, lun);
2461 if (itperiph != NULL)
2462 break;
2463 }
2464 if (itperiph != NULL) {
2465 xm.xm_mode = itperiph->periph_cap;
2466 /*
2467 * Now issue the request to the adapter.
2468 */
2469 s = splbio();
2470 scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm);
2471 splx(s);
2472 /*
2473 * If we want this to happen immediately, issue a dummy
2474 * command, since most adapters can't really negotiate unless
2475 * they're executing a job.
2476 */
2477 if (immed != 0) {
2478 (void) scsipi_test_unit_ready(itperiph,
2479 XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
2480 XS_CTL_IGNORE_NOT_READY |
2481 XS_CTL_IGNORE_MEDIA_CHANGE);
2482 }
2483 }
2484 }
2485
2486 /*
2487 * scsipi_channel_reset:
2488 *
2489 * handle scsi bus reset
2490 * called at splbio
2491 */
2492 void
2493 scsipi_async_event_channel_reset(chan)
2494 struct scsipi_channel *chan;
2495 {
2496 struct scsipi_xfer *xs, *xs_next;
2497 struct scsipi_periph *periph;
2498 int target, lun;
2499
2500 /*
2501 * Channel has been reset. Also mark as reset pending REQUEST_SENSE
2502 * commands; as the sense is not available any more.
2503 * can't call scsipi_done() from here, as the command has not been
2504 * sent to the adapter yet (this would corrupt accounting).
2505 */
2506
2507 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) {
2508 xs_next = TAILQ_NEXT(xs, channel_q);
2509 if (xs->xs_control & XS_CTL_REQSENSE) {
2510 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
2511 xs->error = XS_RESET;
2512 if ((xs->xs_control & XS_CTL_ASYNC) != 0)
2513 TAILQ_INSERT_TAIL(&chan->chan_complete, xs,
2514 channel_q);
2515 }
2516 }
2517 wakeup(&chan->chan_complete);
2518 /* Catch xs with pending sense which may not have a REQSENSE xs yet */
2519 for (target = 0; target < chan->chan_ntargets; target++) {
2520 if (target == chan->chan_id)
2521 continue;
2522 for (lun = 0; lun < chan->chan_nluns; lun++) {
2523 periph = scsipi_lookup_periph(chan, target, lun);
2524 if (periph) {
2525 xs = periph->periph_xscheck;
2526 if (xs)
2527 xs->error = XS_RESET;
2528 }
2529 }
2530 }
2531 }
2532
2533 /*
2534 * scsipi_target_detach:
2535 *
2536 * detach all periph associated with a I_T
2537 * must be called from valid thread context
2538 */
2539 int
2540 scsipi_target_detach(chan, target, lun, flags)
2541 struct scsipi_channel *chan;
2542 int target, lun;
2543 int flags;
2544 {
2545 struct scsipi_periph *periph;
2546 int ctarget, mintarget, maxtarget;
2547 int clun, minlun, maxlun;
2548 int error;
2549
2550 if (target == -1) {
2551 mintarget = 0;
2552 maxtarget = chan->chan_ntargets;
2553 } else {
2554 if (target == chan->chan_id)
2555 return EINVAL;
2556 if (target < 0 || target >= chan->chan_ntargets)
2557 return EINVAL;
2558 mintarget = target;
2559 maxtarget = target + 1;
2560 }
2561
2562 if (lun == -1) {
2563 minlun = 0;
2564 maxlun = chan->chan_nluns;
2565 } else {
2566 if (lun < 0 || lun >= chan->chan_nluns)
2567 return EINVAL;
2568 minlun = lun;
2569 maxlun = lun + 1;
2570 }
2571
2572 for (ctarget = mintarget; ctarget < maxtarget; ctarget++) {
2573 if (ctarget == chan->chan_id)
2574 continue;
2575
2576 for (clun = minlun; clun < maxlun; clun++) {
2577 periph = scsipi_lookup_periph(chan, ctarget, clun);
2578 if (periph == NULL)
2579 continue;
2580 error = config_detach(periph->periph_dev, flags);
2581 if (error)
2582 return (error);
2583 }
2584 }
2585 return(0);
2586 }
2587
2588 /*
2589 * scsipi_adapter_addref:
2590 *
2591 * Add a reference to the adapter pointed to by the provided
2592 * link, enabling the adapter if necessary.
2593 */
2594 int
2595 scsipi_adapter_addref(adapt)
2596 struct scsipi_adapter *adapt;
2597 {
2598 int s, error = 0;
2599
2600 s = splbio();
2601 if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) {
2602 error = (*adapt->adapt_enable)(adapt->adapt_dev, 1);
2603 if (error)
2604 adapt->adapt_refcnt--;
2605 }
2606 splx(s);
2607 return (error);
2608 }
2609
2610 /*
2611 * scsipi_adapter_delref:
2612 *
2613 * Delete a reference to the adapter pointed to by the provided
2614 * link, disabling the adapter if possible.
2615 */
2616 void
2617 scsipi_adapter_delref(adapt)
2618 struct scsipi_adapter *adapt;
2619 {
2620 int s;
2621
2622 s = splbio();
2623 if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL)
2624 (void) (*adapt->adapt_enable)(adapt->adapt_dev, 0);
2625 splx(s);
2626 }
2627
2628 struct scsipi_syncparam {
2629 int ss_factor;
2630 int ss_period; /* ns * 100 */
2631 } scsipi_syncparams[] = {
2632 { 0x08, 625 }, /* FAST-160 (Ultra320) */
2633 { 0x09, 1250 }, /* FAST-80 (Ultra160) */
2634 { 0x0a, 2500 }, /* FAST-40 40MHz (Ultra2) */
2635 { 0x0b, 3030 }, /* FAST-40 33MHz (Ultra2) */
2636 { 0x0c, 5000 }, /* FAST-20 (Ultra) */
2637 };
2638 const int scsipi_nsyncparams =
2639 sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
2640
2641 int
2642 scsipi_sync_period_to_factor(period)
2643 int period; /* ns * 100 */
2644 {
2645 int i;
2646
2647 for (i = 0; i < scsipi_nsyncparams; i++) {
2648 if (period <= scsipi_syncparams[i].ss_period)
2649 return (scsipi_syncparams[i].ss_factor);
2650 }
2651
2652 return ((period / 100) / 4);
2653 }
2654
2655 int
2656 scsipi_sync_factor_to_period(factor)
2657 int factor;
2658 {
2659 int i;
2660
2661 for (i = 0; i < scsipi_nsyncparams; i++) {
2662 if (factor == scsipi_syncparams[i].ss_factor)
2663 return (scsipi_syncparams[i].ss_period);
2664 }
2665
2666 return ((factor * 4) * 100);
2667 }
2668
2669 int
2670 scsipi_sync_factor_to_freq(factor)
2671 int factor;
2672 {
2673 int i;
2674
2675 for (i = 0; i < scsipi_nsyncparams; i++) {
2676 if (factor == scsipi_syncparams[i].ss_factor)
2677 return (100000000 / scsipi_syncparams[i].ss_period);
2678 }
2679
2680 return (10000000 / ((factor * 4) * 10));
2681 }
2682
2683 #ifdef SCSIPI_DEBUG
2684 /*
2685 * Given a scsipi_xfer, dump the request, in all it's glory
2686 */
2687 void
2688 show_scsipi_xs(xs)
2689 struct scsipi_xfer *xs;
2690 {
2691
2692 printf("xs(%p): ", xs);
2693 printf("xs_control(0x%08x)", xs->xs_control);
2694 printf("xs_status(0x%08x)", xs->xs_status);
2695 printf("periph(%p)", xs->xs_periph);
2696 printf("retr(0x%x)", xs->xs_retries);
2697 printf("timo(0x%x)", xs->timeout);
2698 printf("cmd(%p)", xs->cmd);
2699 printf("len(0x%x)", xs->cmdlen);
2700 printf("data(%p)", xs->data);
2701 printf("len(0x%x)", xs->datalen);
2702 printf("res(0x%x)", xs->resid);
2703 printf("err(0x%x)", xs->error);
2704 printf("bp(%p)", xs->bp);
2705 show_scsipi_cmd(xs);
2706 }
2707
2708 void
2709 show_scsipi_cmd(xs)
2710 struct scsipi_xfer *xs;
2711 {
2712 u_char *b = (u_char *) xs->cmd;
2713 int i = 0;
2714
2715 scsipi_printaddr(xs->xs_periph);
2716 printf(" command: ");
2717
2718 if ((xs->xs_control & XS_CTL_RESET) == 0) {
2719 while (i < xs->cmdlen) {
2720 if (i)
2721 printf(",");
2722 printf("0x%x", b[i++]);
2723 }
2724 printf("-[%d bytes]\n", xs->datalen);
2725 if (xs->datalen)
2726 show_mem(xs->data, min(64, xs->datalen));
2727 } else
2728 printf("-RESET-\n");
2729 }
2730
2731 void
2732 show_mem(address, num)
2733 u_char *address;
2734 int num;
2735 {
2736 int x;
2737
2738 printf("------------------------------");
2739 for (x = 0; x < num; x++) {
2740 if ((x % 16) == 0)
2741 printf("\n%03d: ", x);
2742 printf("%02x ", *address++);
2743 }
2744 printf("\n------------------------------\n");
2745 }
2746 #endif /* SCSIPI_DEBUG */
2747