scsipi_base.c revision 1.150.2.2 1 /* $NetBSD: scsipi_base.c,v 1.150.2.2 2010/08/17 06:46:38 uebayasi Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999, 2000, 2002, 2003, 2004 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: scsipi_base.c,v 1.150.2.2 2010/08/17 06:46:38 uebayasi Exp $");
35
36 #include "opt_scsi.h"
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/buf.h>
42 #include <sys/uio.h>
43 #include <sys/malloc.h>
44 #include <sys/pool.h>
45 #include <sys/errno.h>
46 #include <sys/device.h>
47 #include <sys/proc.h>
48 #include <sys/kthread.h>
49 #include <sys/hash.h>
50
51 #include <uvm/uvm_extern.h>
52
53 #include <dev/scsipi/scsi_spc.h>
54 #include <dev/scsipi/scsipi_all.h>
55 #include <dev/scsipi/scsipi_disk.h>
56 #include <dev/scsipi/scsipiconf.h>
57 #include <dev/scsipi/scsipi_base.h>
58
59 #include <dev/scsipi/scsi_all.h>
60 #include <dev/scsipi/scsi_message.h>
61
62 #include <machine/param.h>
63
64 static int scsipi_complete(struct scsipi_xfer *);
65 static void scsipi_request_sense(struct scsipi_xfer *);
66 static int scsipi_enqueue(struct scsipi_xfer *);
67 static void scsipi_run_queue(struct scsipi_channel *chan);
68
69 static void scsipi_completion_thread(void *);
70
71 static void scsipi_get_tag(struct scsipi_xfer *);
72 static void scsipi_put_tag(struct scsipi_xfer *);
73
74 static int scsipi_get_resource(struct scsipi_channel *);
75 static void scsipi_put_resource(struct scsipi_channel *);
76
77 static void scsipi_async_event_max_openings(struct scsipi_channel *,
78 struct scsipi_max_openings *);
79 static void scsipi_async_event_xfer_mode(struct scsipi_channel *,
80 struct scsipi_xfer_mode *);
81 static void scsipi_async_event_channel_reset(struct scsipi_channel *);
82
83 static struct pool scsipi_xfer_pool;
84
85 /*
86 * scsipi_init:
87 *
88 * Called when a scsibus or atapibus is attached to the system
89 * to initialize shared data structures.
90 */
91 void
92 scsipi_init(void)
93 {
94 static int scsipi_init_done;
95
96 if (scsipi_init_done)
97 return;
98 scsipi_init_done = 1;
99
100 /* Initialize the scsipi_xfer pool. */
101 pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
102 0, 0, "scxspl", NULL, IPL_BIO);
103 if (pool_prime(&scsipi_xfer_pool,
104 PAGE_SIZE / sizeof(struct scsipi_xfer)) == ENOMEM) {
105 printf("WARNING: not enough memory for scsipi_xfer_pool\n");
106 }
107 }
108
109 /*
110 * scsipi_channel_init:
111 *
112 * Initialize a scsipi_channel when it is attached.
113 */
114 int
115 scsipi_channel_init(struct scsipi_channel *chan)
116 {
117 struct scsipi_adapter *adapt = chan->chan_adapter;
118 int i;
119
120 /* Initialize shared data. */
121 scsipi_init();
122
123 /* Initialize the queues. */
124 TAILQ_INIT(&chan->chan_queue);
125 TAILQ_INIT(&chan->chan_complete);
126
127 for (i = 0; i < SCSIPI_CHAN_PERIPH_BUCKETS; i++)
128 LIST_INIT(&chan->chan_periphtab[i]);
129
130 /*
131 * Create the asynchronous completion thread.
132 */
133 if (kthread_create(PRI_NONE, 0, NULL, scsipi_completion_thread, chan,
134 &chan->chan_thread, "%s", chan->chan_name)) {
135 aprint_error_dev(adapt->adapt_dev, "unable to create completion thread for "
136 "channel %d\n", chan->chan_channel);
137 panic("scsipi_channel_init");
138 }
139
140 return (0);
141 }
142
143 /*
144 * scsipi_channel_shutdown:
145 *
146 * Shutdown a scsipi_channel.
147 */
148 void
149 scsipi_channel_shutdown(struct scsipi_channel *chan)
150 {
151
152 /*
153 * Shut down the completion thread.
154 */
155 chan->chan_tflags |= SCSIPI_CHANT_SHUTDOWN;
156 wakeup(&chan->chan_complete);
157
158 /*
159 * Now wait for the thread to exit.
160 */
161 while (chan->chan_thread != NULL)
162 (void) tsleep(&chan->chan_thread, PRIBIO, "scshut", 0);
163 }
164
165 static uint32_t
166 scsipi_chan_periph_hash(uint64_t t, uint64_t l)
167 {
168 uint32_t hash;
169
170 hash = hash32_buf(&t, sizeof(t), HASH32_BUF_INIT);
171 hash = hash32_buf(&l, sizeof(l), hash);
172
173 return (hash & SCSIPI_CHAN_PERIPH_HASHMASK);
174 }
175
176 /*
177 * scsipi_insert_periph:
178 *
179 * Insert a periph into the channel.
180 */
181 void
182 scsipi_insert_periph(struct scsipi_channel *chan, struct scsipi_periph *periph)
183 {
184 uint32_t hash;
185 int s;
186
187 hash = scsipi_chan_periph_hash(periph->periph_target,
188 periph->periph_lun);
189
190 s = splbio();
191 LIST_INSERT_HEAD(&chan->chan_periphtab[hash], periph, periph_hash);
192 splx(s);
193 }
194
195 /*
196 * scsipi_remove_periph:
197 *
198 * Remove a periph from the channel.
199 */
200 void
201 scsipi_remove_periph(struct scsipi_channel *chan,
202 struct scsipi_periph *periph)
203 {
204 int s;
205
206 s = splbio();
207 LIST_REMOVE(periph, periph_hash);
208 splx(s);
209 }
210
211 /*
212 * scsipi_lookup_periph:
213 *
214 * Lookup a periph on the specified channel.
215 */
216 struct scsipi_periph *
217 scsipi_lookup_periph(struct scsipi_channel *chan, int target, int lun)
218 {
219 struct scsipi_periph *periph;
220 uint32_t hash;
221 int s;
222
223 if (target >= chan->chan_ntargets ||
224 lun >= chan->chan_nluns)
225 return (NULL);
226
227 hash = scsipi_chan_periph_hash(target, lun);
228
229 s = splbio();
230 LIST_FOREACH(periph, &chan->chan_periphtab[hash], periph_hash) {
231 if (periph->periph_target == target &&
232 periph->periph_lun == lun)
233 break;
234 }
235 splx(s);
236
237 return (periph);
238 }
239
240 /*
241 * scsipi_get_resource:
242 *
243 * Allocate a single xfer `resource' from the channel.
244 *
245 * NOTE: Must be called at splbio().
246 */
247 static int
248 scsipi_get_resource(struct scsipi_channel *chan)
249 {
250 struct scsipi_adapter *adapt = chan->chan_adapter;
251
252 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
253 if (chan->chan_openings > 0) {
254 chan->chan_openings--;
255 return (1);
256 }
257 return (0);
258 }
259
260 if (adapt->adapt_openings > 0) {
261 adapt->adapt_openings--;
262 return (1);
263 }
264 return (0);
265 }
266
267 /*
268 * scsipi_grow_resources:
269 *
270 * Attempt to grow resources for a channel. If this succeeds,
271 * we allocate one for our caller.
272 *
273 * NOTE: Must be called at splbio().
274 */
275 static inline int
276 scsipi_grow_resources(struct scsipi_channel *chan)
277 {
278
279 if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
280 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
281 scsipi_adapter_request(chan,
282 ADAPTER_REQ_GROW_RESOURCES, NULL);
283 return (scsipi_get_resource(chan));
284 }
285 /*
286 * ask the channel thread to do it. It'll have to thaw the
287 * queue
288 */
289 scsipi_channel_freeze(chan, 1);
290 chan->chan_tflags |= SCSIPI_CHANT_GROWRES;
291 wakeup(&chan->chan_complete);
292 return (0);
293 }
294
295 return (0);
296 }
297
298 /*
299 * scsipi_put_resource:
300 *
301 * Free a single xfer `resource' to the channel.
302 *
303 * NOTE: Must be called at splbio().
304 */
305 static void
306 scsipi_put_resource(struct scsipi_channel *chan)
307 {
308 struct scsipi_adapter *adapt = chan->chan_adapter;
309
310 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
311 chan->chan_openings++;
312 else
313 adapt->adapt_openings++;
314 }
315
316 /*
317 * scsipi_get_tag:
318 *
319 * Get a tag ID for the specified xfer.
320 *
321 * NOTE: Must be called at splbio().
322 */
323 static void
324 scsipi_get_tag(struct scsipi_xfer *xs)
325 {
326 struct scsipi_periph *periph = xs->xs_periph;
327 int bit, tag;
328 u_int word;
329
330 bit = 0; /* XXX gcc */
331 for (word = 0; word < PERIPH_NTAGWORDS; word++) {
332 bit = ffs(periph->periph_freetags[word]);
333 if (bit != 0)
334 break;
335 }
336 #ifdef DIAGNOSTIC
337 if (word == PERIPH_NTAGWORDS) {
338 scsipi_printaddr(periph);
339 printf("no free tags\n");
340 panic("scsipi_get_tag");
341 }
342 #endif
343
344 bit -= 1;
345 periph->periph_freetags[word] &= ~(1 << bit);
346 tag = (word << 5) | bit;
347
348 /* XXX Should eventually disallow this completely. */
349 if (tag >= periph->periph_openings) {
350 scsipi_printaddr(periph);
351 printf("WARNING: tag %d greater than available openings %d\n",
352 tag, periph->periph_openings);
353 }
354
355 xs->xs_tag_id = tag;
356 }
357
358 /*
359 * scsipi_put_tag:
360 *
361 * Put the tag ID for the specified xfer back into the pool.
362 *
363 * NOTE: Must be called at splbio().
364 */
365 static void
366 scsipi_put_tag(struct scsipi_xfer *xs)
367 {
368 struct scsipi_periph *periph = xs->xs_periph;
369 int word, bit;
370
371 word = xs->xs_tag_id >> 5;
372 bit = xs->xs_tag_id & 0x1f;
373
374 periph->periph_freetags[word] |= (1 << bit);
375 }
376
377 /*
378 * scsipi_get_xs:
379 *
380 * Allocate an xfer descriptor and associate it with the
381 * specified peripherial. If the peripherial has no more
382 * available command openings, we either block waiting for
383 * one to become available, or fail.
384 */
385 struct scsipi_xfer *
386 scsipi_get_xs(struct scsipi_periph *periph, int flags)
387 {
388 struct scsipi_xfer *xs;
389 int s;
390
391 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n"));
392
393 KASSERT(!cold);
394
395 #ifdef DIAGNOSTIC
396 /*
397 * URGENT commands can never be ASYNC.
398 */
399 if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
400 (XS_CTL_URGENT|XS_CTL_ASYNC)) {
401 scsipi_printaddr(periph);
402 printf("URGENT and ASYNC\n");
403 panic("scsipi_get_xs");
404 }
405 #endif
406
407 s = splbio();
408 /*
409 * Wait for a command opening to become available. Rules:
410 *
411 * - All xfers must wait for an available opening.
412 * Exception: URGENT xfers can proceed when
413 * active == openings, because we use the opening
414 * of the command we're recovering for.
415 * - if the periph has sense pending, only URGENT & REQSENSE
416 * xfers may proceed.
417 *
418 * - If the periph is recovering, only URGENT xfers may
419 * proceed.
420 *
421 * - If the periph is currently executing a recovery
422 * command, URGENT commands must block, because only
423 * one recovery command can execute at a time.
424 */
425 for (;;) {
426 if (flags & XS_CTL_URGENT) {
427 if (periph->periph_active > periph->periph_openings)
428 goto wait_for_opening;
429 if (periph->periph_flags & PERIPH_SENSE) {
430 if ((flags & XS_CTL_REQSENSE) == 0)
431 goto wait_for_opening;
432 } else {
433 if ((periph->periph_flags &
434 PERIPH_RECOVERY_ACTIVE) != 0)
435 goto wait_for_opening;
436 periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
437 }
438 break;
439 }
440 if (periph->periph_active >= periph->periph_openings ||
441 (periph->periph_flags & PERIPH_RECOVERING) != 0)
442 goto wait_for_opening;
443 periph->periph_active++;
444 break;
445
446 wait_for_opening:
447 if (flags & XS_CTL_NOSLEEP) {
448 splx(s);
449 return (NULL);
450 }
451 SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n"));
452 periph->periph_flags |= PERIPH_WAITING;
453 (void) tsleep(periph, PRIBIO, "getxs", 0);
454 }
455 SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n"));
456 xs = pool_get(&scsipi_xfer_pool,
457 ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
458 if (xs == NULL) {
459 if (flags & XS_CTL_URGENT) {
460 if ((flags & XS_CTL_REQSENSE) == 0)
461 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
462 } else
463 periph->periph_active--;
464 scsipi_printaddr(periph);
465 printf("unable to allocate %sscsipi_xfer\n",
466 (flags & XS_CTL_URGENT) ? "URGENT " : "");
467 }
468 splx(s);
469
470 SC_DEBUG(periph, SCSIPI_DB3, ("returning\n"));
471
472 if (xs != NULL) {
473 memset(xs, 0, sizeof(*xs));
474 callout_init(&xs->xs_callout, 0);
475 xs->xs_periph = periph;
476 xs->xs_control = flags;
477 xs->xs_status = 0;
478 s = splbio();
479 TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
480 splx(s);
481 }
482 return (xs);
483 }
484
485 /*
486 * scsipi_put_xs:
487 *
488 * Release an xfer descriptor, decreasing the outstanding command
489 * count for the peripherial. If there is a thread waiting for
490 * an opening, wake it up. If not, kick any queued I/O the
491 * peripherial may have.
492 *
493 * NOTE: Must be called at splbio().
494 */
495 void
496 scsipi_put_xs(struct scsipi_xfer *xs)
497 {
498 struct scsipi_periph *periph = xs->xs_periph;
499 int flags = xs->xs_control;
500
501 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n"));
502
503 TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
504 callout_destroy(&xs->xs_callout);
505 pool_put(&scsipi_xfer_pool, xs);
506
507 #ifdef DIAGNOSTIC
508 if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
509 periph->periph_active == 0) {
510 scsipi_printaddr(periph);
511 printf("recovery without a command to recovery for\n");
512 panic("scsipi_put_xs");
513 }
514 #endif
515
516 if (flags & XS_CTL_URGENT) {
517 if ((flags & XS_CTL_REQSENSE) == 0)
518 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
519 } else
520 periph->periph_active--;
521 if (periph->periph_active == 0 &&
522 (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
523 periph->periph_flags &= ~PERIPH_WAITDRAIN;
524 wakeup(&periph->periph_active);
525 }
526
527 if (periph->periph_flags & PERIPH_WAITING) {
528 periph->periph_flags &= ~PERIPH_WAITING;
529 wakeup(periph);
530 } else {
531 if (periph->periph_switch->psw_start != NULL &&
532 device_is_active(periph->periph_dev)) {
533 SC_DEBUG(periph, SCSIPI_DB2,
534 ("calling private start()\n"));
535 (*periph->periph_switch->psw_start)(periph);
536 }
537 }
538 }
539
540 /*
541 * scsipi_channel_freeze:
542 *
543 * Freeze a channel's xfer queue.
544 */
545 void
546 scsipi_channel_freeze(struct scsipi_channel *chan, int count)
547 {
548 int s;
549
550 s = splbio();
551 chan->chan_qfreeze += count;
552 splx(s);
553 }
554
555 /*
556 * scsipi_channel_thaw:
557 *
558 * Thaw a channel's xfer queue.
559 */
560 void
561 scsipi_channel_thaw(struct scsipi_channel *chan, int count)
562 {
563 int s;
564
565 s = splbio();
566 chan->chan_qfreeze -= count;
567 /*
568 * Don't let the freeze count go negative.
569 *
570 * Presumably the adapter driver could keep track of this,
571 * but it might just be easier to do this here so as to allow
572 * multiple callers, including those outside the adapter driver.
573 */
574 if (chan->chan_qfreeze < 0) {
575 chan->chan_qfreeze = 0;
576 }
577 splx(s);
578 /*
579 * Kick the channel's queue here. Note, we may be running in
580 * interrupt context (softclock or HBA's interrupt), so the adapter
581 * driver had better not sleep.
582 */
583 if (chan->chan_qfreeze == 0)
584 scsipi_run_queue(chan);
585 }
586
587 /*
588 * scsipi_channel_timed_thaw:
589 *
590 * Thaw a channel after some time has expired. This will also
591 * run the channel's queue if the freeze count has reached 0.
592 */
593 void
594 scsipi_channel_timed_thaw(void *arg)
595 {
596 struct scsipi_channel *chan = arg;
597
598 scsipi_channel_thaw(chan, 1);
599 }
600
601 /*
602 * scsipi_periph_freeze:
603 *
604 * Freeze a device's xfer queue.
605 */
606 void
607 scsipi_periph_freeze(struct scsipi_periph *periph, int count)
608 {
609 int s;
610
611 s = splbio();
612 periph->periph_qfreeze += count;
613 splx(s);
614 }
615
616 /*
617 * scsipi_periph_thaw:
618 *
619 * Thaw a device's xfer queue.
620 */
621 void
622 scsipi_periph_thaw(struct scsipi_periph *periph, int count)
623 {
624 int s;
625
626 s = splbio();
627 periph->periph_qfreeze -= count;
628 #ifdef DIAGNOSTIC
629 if (periph->periph_qfreeze < 0) {
630 static const char pc[] = "periph freeze count < 0";
631 scsipi_printaddr(periph);
632 printf("%s\n", pc);
633 panic(pc);
634 }
635 #endif
636 if (periph->periph_qfreeze == 0 &&
637 (periph->periph_flags & PERIPH_WAITING) != 0)
638 wakeup(periph);
639 splx(s);
640 }
641
642 /*
643 * scsipi_periph_timed_thaw:
644 *
645 * Thaw a device after some time has expired.
646 */
647 void
648 scsipi_periph_timed_thaw(void *arg)
649 {
650 int s;
651 struct scsipi_periph *periph = arg;
652
653 callout_stop(&periph->periph_callout);
654
655 s = splbio();
656 scsipi_periph_thaw(periph, 1);
657 if ((periph->periph_channel->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
658 /*
659 * Kick the channel's queue here. Note, we're running in
660 * interrupt context (softclock), so the adapter driver
661 * had better not sleep.
662 */
663 scsipi_run_queue(periph->periph_channel);
664 } else {
665 /*
666 * Tell the completion thread to kick the channel's queue here.
667 */
668 periph->periph_channel->chan_tflags |= SCSIPI_CHANT_KICK;
669 wakeup(&periph->periph_channel->chan_complete);
670 }
671 splx(s);
672 }
673
674 /*
675 * scsipi_wait_drain:
676 *
677 * Wait for a periph's pending xfers to drain.
678 */
679 void
680 scsipi_wait_drain(struct scsipi_periph *periph)
681 {
682 int s;
683
684 s = splbio();
685 while (periph->periph_active != 0) {
686 periph->periph_flags |= PERIPH_WAITDRAIN;
687 (void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0);
688 }
689 splx(s);
690 }
691
692 /*
693 * scsipi_kill_pending:
694 *
695 * Kill off all pending xfers for a periph.
696 *
697 * NOTE: Must be called at splbio().
698 */
699 void
700 scsipi_kill_pending(struct scsipi_periph *periph)
701 {
702
703 (*periph->periph_channel->chan_bustype->bustype_kill_pending)(periph);
704 scsipi_wait_drain(periph);
705 }
706
707 /*
708 * scsipi_print_cdb:
709 * prints a command descriptor block (for debug purpose, error messages,
710 * SCSIVERBOSE, ...)
711 */
712 void
713 scsipi_print_cdb(struct scsipi_generic *cmd)
714 {
715 int i, j;
716
717 printf("0x%02x", cmd->opcode);
718
719 switch (CDB_GROUPID(cmd->opcode)) {
720 case CDB_GROUPID_0:
721 j = CDB_GROUP0;
722 break;
723 case CDB_GROUPID_1:
724 j = CDB_GROUP1;
725 break;
726 case CDB_GROUPID_2:
727 j = CDB_GROUP2;
728 break;
729 case CDB_GROUPID_3:
730 j = CDB_GROUP3;
731 break;
732 case CDB_GROUPID_4:
733 j = CDB_GROUP4;
734 break;
735 case CDB_GROUPID_5:
736 j = CDB_GROUP5;
737 break;
738 case CDB_GROUPID_6:
739 j = CDB_GROUP6;
740 break;
741 case CDB_GROUPID_7:
742 j = CDB_GROUP7;
743 break;
744 default:
745 j = 0;
746 }
747 if (j == 0)
748 j = sizeof (cmd->bytes);
749 for (i = 0; i < j-1; i++) /* already done the opcode */
750 printf(" %02x", cmd->bytes[i]);
751 }
752
753 /*
754 * scsipi_interpret_sense:
755 *
756 * Look at the returned sense and act on the error, determining
757 * the unix error number to pass back. (0 = report no error)
758 *
759 * NOTE: If we return ERESTART, we are expected to haved
760 * thawed the device!
761 *
762 * THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
763 */
764 int
765 scsipi_interpret_sense(struct scsipi_xfer *xs)
766 {
767 struct scsi_sense_data *sense;
768 struct scsipi_periph *periph = xs->xs_periph;
769 u_int8_t key;
770 int error;
771 u_int32_t info;
772 static const char *error_mes[] = {
773 "soft error (corrected)",
774 "not ready", "medium error",
775 "non-media hardware failure", "illegal request",
776 "unit attention", "readonly device",
777 "no data found", "vendor unique",
778 "copy aborted", "command aborted",
779 "search returned equal", "volume overflow",
780 "verify miscompare", "unknown error key"
781 };
782
783 sense = &xs->sense.scsi_sense;
784 #ifdef SCSIPI_DEBUG
785 if (periph->periph_flags & SCSIPI_DB1) {
786 int count;
787 scsipi_printaddr(periph);
788 printf(" sense debug information:\n");
789 printf("\tcode 0x%x valid %d\n",
790 SSD_RCODE(sense->response_code),
791 sense->response_code & SSD_RCODE_VALID ? 1 : 0);
792 printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
793 sense->segment,
794 SSD_SENSE_KEY(sense->flags),
795 sense->flags & SSD_ILI ? 1 : 0,
796 sense->flags & SSD_EOM ? 1 : 0,
797 sense->flags & SSD_FILEMARK ? 1 : 0);
798 printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d "
799 "extra bytes\n",
800 sense->info[0],
801 sense->info[1],
802 sense->info[2],
803 sense->info[3],
804 sense->extra_len);
805 printf("\textra: ");
806 for (count = 0; count < SSD_ADD_BYTES_LIM(sense); count++)
807 printf("0x%x ", sense->csi[count]);
808 printf("\n");
809 }
810 #endif
811
812 /*
813 * If the periph has it's own error handler, call it first.
814 * If it returns a legit error value, return that, otherwise
815 * it wants us to continue with normal error processing.
816 */
817 if (periph->periph_switch->psw_error != NULL) {
818 SC_DEBUG(periph, SCSIPI_DB2,
819 ("calling private err_handler()\n"));
820 error = (*periph->periph_switch->psw_error)(xs);
821 if (error != EJUSTRETURN)
822 return (error);
823 }
824 /* otherwise use the default */
825 switch (SSD_RCODE(sense->response_code)) {
826
827 /*
828 * Old SCSI-1 and SASI devices respond with
829 * codes other than 70.
830 */
831 case 0x00: /* no error (command completed OK) */
832 return (0);
833 case 0x04: /* drive not ready after it was selected */
834 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
835 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
836 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
837 return (0);
838 /* XXX - display some sort of error here? */
839 return (EIO);
840 case 0x20: /* invalid command */
841 if ((xs->xs_control &
842 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
843 return (0);
844 return (EINVAL);
845 case 0x25: /* invalid LUN (Adaptec ACB-4000) */
846 return (EACCES);
847
848 /*
849 * If it's code 70, use the extended stuff and
850 * interpret the key
851 */
852 case 0x71: /* delayed error */
853 scsipi_printaddr(periph);
854 key = SSD_SENSE_KEY(sense->flags);
855 printf(" DEFERRED ERROR, key = 0x%x\n", key);
856 /* FALLTHROUGH */
857 case 0x70:
858 if ((sense->response_code & SSD_RCODE_VALID) != 0)
859 info = _4btol(sense->info);
860 else
861 info = 0;
862 key = SSD_SENSE_KEY(sense->flags);
863
864 switch (key) {
865 case SKEY_NO_SENSE:
866 case SKEY_RECOVERED_ERROR:
867 if (xs->resid == xs->datalen && xs->datalen) {
868 /*
869 * Why is this here?
870 */
871 xs->resid = 0; /* not short read */
872 }
873 case SKEY_EQUAL:
874 error = 0;
875 break;
876 case SKEY_NOT_READY:
877 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
878 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
879 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
880 return (0);
881 if (sense->asc == 0x3A) {
882 error = ENODEV; /* Medium not present */
883 if (xs->xs_control & XS_CTL_SILENT_NODEV)
884 return (error);
885 } else
886 error = EIO;
887 if ((xs->xs_control & XS_CTL_SILENT) != 0)
888 return (error);
889 break;
890 case SKEY_ILLEGAL_REQUEST:
891 if ((xs->xs_control &
892 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
893 return (0);
894 /*
895 * Handle the case where a device reports
896 * Logical Unit Not Supported during discovery.
897 */
898 if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
899 sense->asc == 0x25 &&
900 sense->ascq == 0x00)
901 return (EINVAL);
902 if ((xs->xs_control & XS_CTL_SILENT) != 0)
903 return (EIO);
904 error = EINVAL;
905 break;
906 case SKEY_UNIT_ATTENTION:
907 if (sense->asc == 0x29 &&
908 sense->ascq == 0x00) {
909 /* device or bus reset */
910 return (ERESTART);
911 }
912 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
913 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
914 if ((xs->xs_control &
915 XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
916 /* XXX Should reupload any transient state. */
917 (periph->periph_flags &
918 PERIPH_REMOVABLE) == 0) {
919 return (ERESTART);
920 }
921 if ((xs->xs_control & XS_CTL_SILENT) != 0)
922 return (EIO);
923 error = EIO;
924 break;
925 case SKEY_DATA_PROTECT:
926 error = EROFS;
927 break;
928 case SKEY_BLANK_CHECK:
929 error = 0;
930 break;
931 case SKEY_ABORTED_COMMAND:
932 if (xs->xs_retries != 0) {
933 xs->xs_retries--;
934 error = ERESTART;
935 } else
936 error = EIO;
937 break;
938 case SKEY_VOLUME_OVERFLOW:
939 error = ENOSPC;
940 break;
941 default:
942 error = EIO;
943 break;
944 }
945
946 /* Print verbose decode if appropriate and possible */
947 if ((key == 0) ||
948 ((xs->xs_control & XS_CTL_SILENT) != 0) ||
949 (scsipi_print_sense(xs, 0) != 0))
950 return (error);
951
952 /* Print brief(er) sense information */
953 scsipi_printaddr(periph);
954 printf("%s", error_mes[key - 1]);
955 if ((sense->response_code & SSD_RCODE_VALID) != 0) {
956 switch (key) {
957 case SKEY_NOT_READY:
958 case SKEY_ILLEGAL_REQUEST:
959 case SKEY_UNIT_ATTENTION:
960 case SKEY_DATA_PROTECT:
961 break;
962 case SKEY_BLANK_CHECK:
963 printf(", requested size: %d (decimal)",
964 info);
965 break;
966 case SKEY_ABORTED_COMMAND:
967 if (xs->xs_retries)
968 printf(", retrying");
969 printf(", cmd 0x%x, info 0x%x",
970 xs->cmd->opcode, info);
971 break;
972 default:
973 printf(", info = %d (decimal)", info);
974 }
975 }
976 if (sense->extra_len != 0) {
977 int n;
978 printf(", data =");
979 for (n = 0; n < sense->extra_len; n++)
980 printf(" %02x",
981 sense->csi[n]);
982 }
983 printf("\n");
984 return (error);
985
986 /*
987 * Some other code, just report it
988 */
989 default:
990 #if defined(SCSIDEBUG) || defined(DEBUG)
991 {
992 static const char *uc = "undecodable sense error";
993 int i;
994 u_int8_t *cptr = (u_int8_t *) sense;
995 scsipi_printaddr(periph);
996 if (xs->cmd == &xs->cmdstore) {
997 printf("%s for opcode 0x%x, data=",
998 uc, xs->cmdstore.opcode);
999 } else {
1000 printf("%s, data=", uc);
1001 }
1002 for (i = 0; i < sizeof (sense); i++)
1003 printf(" 0x%02x", *(cptr++) & 0xff);
1004 printf("\n");
1005 }
1006 #else
1007 scsipi_printaddr(periph);
1008 printf("Sense Error Code 0x%x",
1009 SSD_RCODE(sense->response_code));
1010 if ((sense->response_code & SSD_RCODE_VALID) != 0) {
1011 struct scsi_sense_data_unextended *usense =
1012 (struct scsi_sense_data_unextended *)sense;
1013 printf(" at block no. %d (decimal)",
1014 _3btol(usense->block));
1015 }
1016 printf("\n");
1017 #endif
1018 return (EIO);
1019 }
1020 }
1021
1022 /*
1023 * scsipi_test_unit_ready:
1024 *
1025 * Issue a `test unit ready' request.
1026 */
1027 int
1028 scsipi_test_unit_ready(struct scsipi_periph *periph, int flags)
1029 {
1030 struct scsi_test_unit_ready cmd;
1031 int retries;
1032
1033 /* some ATAPI drives don't support TEST UNIT READY. Sigh */
1034 if (periph->periph_quirks & PQUIRK_NOTUR)
1035 return (0);
1036
1037 if (flags & XS_CTL_DISCOVERY)
1038 retries = 0;
1039 else
1040 retries = SCSIPIRETRIES;
1041
1042 memset(&cmd, 0, sizeof(cmd));
1043 cmd.opcode = SCSI_TEST_UNIT_READY;
1044
1045 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
1046 retries, 10000, NULL, flags));
1047 }
1048
1049 /*
1050 * scsipi_inquire:
1051 *
1052 * Ask the device about itself.
1053 */
1054 int
1055 scsipi_inquire(struct scsipi_periph *periph, struct scsipi_inquiry_data *inqbuf,
1056 int flags)
1057 {
1058 struct scsipi_inquiry cmd;
1059 int error;
1060 int retries;
1061
1062 if (flags & XS_CTL_DISCOVERY)
1063 retries = 0;
1064 else
1065 retries = SCSIPIRETRIES;
1066
1067 /*
1068 * If we request more data than the device can provide, it SHOULD just
1069 * return a short reponse. However, some devices error with an
1070 * ILLEGAL REQUEST sense code, and yet others have even more special
1071 * failture modes (such as the GL641USB flash adapter, which goes loony
1072 * and sends corrupted CRCs). To work around this, and to bring our
1073 * behavior more in line with other OSes, we do a shorter inquiry,
1074 * covering all the SCSI-2 information, first, and then request more
1075 * data iff the "additional length" field indicates there is more.
1076 * - mycroft, 2003/10/16
1077 */
1078 memset(&cmd, 0, sizeof(cmd));
1079 cmd.opcode = INQUIRY;
1080 cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI2;
1081 error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1082 (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI2, retries,
1083 10000, NULL, flags | XS_CTL_DATA_IN);
1084 if (!error &&
1085 inqbuf->additional_length > SCSIPI_INQUIRY_LENGTH_SCSI2 - 4) {
1086 #if 0
1087 printf("inquire: addlen=%d, retrying\n", inqbuf->additional_length);
1088 #endif
1089 cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI3;
1090 error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1091 (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI3, retries,
1092 10000, NULL, flags | XS_CTL_DATA_IN);
1093 #if 0
1094 printf("inquire: error=%d\n", error);
1095 #endif
1096 }
1097
1098 #ifdef SCSI_OLD_NOINQUIRY
1099 /*
1100 * Kludge for the Adaptec ACB-4000 SCSI->MFM translator.
1101 * This board doesn't support the INQUIRY command at all.
1102 */
1103 if (error == EINVAL || error == EACCES) {
1104 /*
1105 * Conjure up an INQUIRY response.
1106 */
1107 inqbuf->device = (error == EINVAL ?
1108 SID_QUAL_LU_PRESENT :
1109 SID_QUAL_LU_NOTPRESENT) | T_DIRECT;
1110 inqbuf->dev_qual2 = 0;
1111 inqbuf->version = 0;
1112 inqbuf->response_format = SID_FORMAT_SCSI1;
1113 inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
1114 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1115 memcpy(inqbuf->vendor, "ADAPTEC ACB-4000 ", 28);
1116 error = 0;
1117 }
1118
1119 /*
1120 * Kludge for the Emulex MT-02 SCSI->QIC translator.
1121 * This board gives an empty response to an INQUIRY command.
1122 */
1123 else if (error == 0 &&
1124 inqbuf->device == (SID_QUAL_LU_PRESENT | T_DIRECT) &&
1125 inqbuf->dev_qual2 == 0 &&
1126 inqbuf->version == 0 &&
1127 inqbuf->response_format == SID_FORMAT_SCSI1) {
1128 /*
1129 * Fill out the INQUIRY response.
1130 */
1131 inqbuf->device = (SID_QUAL_LU_PRESENT | T_SEQUENTIAL);
1132 inqbuf->dev_qual2 = SID_REMOVABLE;
1133 inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
1134 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1135 memcpy(inqbuf->vendor, "EMULEX MT-02 QIC ", 28);
1136 }
1137 #endif /* SCSI_OLD_NOINQUIRY */
1138
1139 return error;
1140 }
1141
1142 /*
1143 * scsipi_prevent:
1144 *
1145 * Prevent or allow the user to remove the media
1146 */
1147 int
1148 scsipi_prevent(struct scsipi_periph *periph, int type, int flags)
1149 {
1150 struct scsi_prevent_allow_medium_removal cmd;
1151
1152 if (periph->periph_quirks & PQUIRK_NODOORLOCK)
1153 return 0;
1154
1155 memset(&cmd, 0, sizeof(cmd));
1156 cmd.opcode = SCSI_PREVENT_ALLOW_MEDIUM_REMOVAL;
1157 cmd.how = type;
1158
1159 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
1160 SCSIPIRETRIES, 5000, NULL, flags));
1161 }
1162
1163 /*
1164 * scsipi_start:
1165 *
1166 * Send a START UNIT.
1167 */
1168 int
1169 scsipi_start(struct scsipi_periph *periph, int type, int flags)
1170 {
1171 struct scsipi_start_stop cmd;
1172
1173 memset(&cmd, 0, sizeof(cmd));
1174 cmd.opcode = START_STOP;
1175 cmd.byte2 = 0x00;
1176 cmd.how = type;
1177
1178 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
1179 SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000, NULL, flags));
1180 }
1181
1182 /*
1183 * scsipi_mode_sense, scsipi_mode_sense_big:
1184 * get a sense page from a device
1185 */
1186
1187 int
1188 scsipi_mode_sense(struct scsipi_periph *periph, int byte2, int page,
1189 struct scsi_mode_parameter_header_6 *data, int len, int flags, int retries,
1190 int timeout)
1191 {
1192 struct scsi_mode_sense_6 cmd;
1193
1194 memset(&cmd, 0, sizeof(cmd));
1195 cmd.opcode = SCSI_MODE_SENSE_6;
1196 cmd.byte2 = byte2;
1197 cmd.page = page;
1198 cmd.length = len & 0xff;
1199
1200 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1201 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN));
1202 }
1203
1204 int
1205 scsipi_mode_sense_big(struct scsipi_periph *periph, int byte2, int page,
1206 struct scsi_mode_parameter_header_10 *data, int len, int flags, int retries,
1207 int timeout)
1208 {
1209 struct scsi_mode_sense_10 cmd;
1210
1211 memset(&cmd, 0, sizeof(cmd));
1212 cmd.opcode = SCSI_MODE_SENSE_10;
1213 cmd.byte2 = byte2;
1214 cmd.page = page;
1215 _lto2b(len, cmd.length);
1216
1217 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1218 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN));
1219 }
1220
1221 int
1222 scsipi_mode_select(struct scsipi_periph *periph, int byte2,
1223 struct scsi_mode_parameter_header_6 *data, int len, int flags, int retries,
1224 int timeout)
1225 {
1226 struct scsi_mode_select_6 cmd;
1227
1228 memset(&cmd, 0, sizeof(cmd));
1229 cmd.opcode = SCSI_MODE_SELECT_6;
1230 cmd.byte2 = byte2;
1231 cmd.length = len & 0xff;
1232
1233 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1234 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT));
1235 }
1236
1237 int
1238 scsipi_mode_select_big(struct scsipi_periph *periph, int byte2,
1239 struct scsi_mode_parameter_header_10 *data, int len, int flags, int retries,
1240 int timeout)
1241 {
1242 struct scsi_mode_select_10 cmd;
1243
1244 memset(&cmd, 0, sizeof(cmd));
1245 cmd.opcode = SCSI_MODE_SELECT_10;
1246 cmd.byte2 = byte2;
1247 _lto2b(len, cmd.length);
1248
1249 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1250 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT));
1251 }
1252
1253 /*
1254 * scsipi_done:
1255 *
1256 * This routine is called by an adapter's interrupt handler when
1257 * an xfer is completed.
1258 */
1259 void
1260 scsipi_done(struct scsipi_xfer *xs)
1261 {
1262 struct scsipi_periph *periph = xs->xs_periph;
1263 struct scsipi_channel *chan = periph->periph_channel;
1264 int s, freezecnt;
1265
1266 SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n"));
1267 #ifdef SCSIPI_DEBUG
1268 if (periph->periph_dbflags & SCSIPI_DB1)
1269 show_scsipi_cmd(xs);
1270 #endif
1271
1272 s = splbio();
1273 /*
1274 * The resource this command was using is now free.
1275 */
1276 if (xs->xs_status & XS_STS_DONE) {
1277 /* XXX in certain circumstances, such as a device
1278 * being detached, a xs that has already been
1279 * scsipi_done()'d by the main thread will be done'd
1280 * again by scsibusdetach(). Putting the xs on the
1281 * chan_complete queue causes list corruption and
1282 * everyone dies. This prevents that, but perhaps
1283 * there should be better coordination somewhere such
1284 * that this won't ever happen (and can be turned into
1285 * a KASSERT().
1286 */
1287 splx(s);
1288 goto out;
1289 }
1290 scsipi_put_resource(chan);
1291 xs->xs_periph->periph_sent--;
1292
1293 /*
1294 * If the command was tagged, free the tag.
1295 */
1296 if (XS_CTL_TAGTYPE(xs) != 0)
1297 scsipi_put_tag(xs);
1298 else
1299 periph->periph_flags &= ~PERIPH_UNTAG;
1300
1301 /* Mark the command as `done'. */
1302 xs->xs_status |= XS_STS_DONE;
1303
1304 #ifdef DIAGNOSTIC
1305 if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
1306 (XS_CTL_ASYNC|XS_CTL_POLL))
1307 panic("scsipi_done: ASYNC and POLL");
1308 #endif
1309
1310 /*
1311 * If the xfer had an error of any sort, freeze the
1312 * periph's queue. Freeze it again if we were requested
1313 * to do so in the xfer.
1314 */
1315 freezecnt = 0;
1316 if (xs->error != XS_NOERROR)
1317 freezecnt++;
1318 if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
1319 freezecnt++;
1320 if (freezecnt != 0)
1321 scsipi_periph_freeze(periph, freezecnt);
1322
1323 /*
1324 * record the xfer with a pending sense, in case a SCSI reset is
1325 * received before the thread is waked up.
1326 */
1327 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1328 periph->periph_flags |= PERIPH_SENSE;
1329 periph->periph_xscheck = xs;
1330 }
1331
1332 /*
1333 * If this was an xfer that was not to complete asynchronously,
1334 * let the requesting thread perform error checking/handling
1335 * in its context.
1336 */
1337 if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
1338 splx(s);
1339 /*
1340 * If it's a polling job, just return, to unwind the
1341 * call graph. We don't need to restart the queue,
1342 * because pollings jobs are treated specially, and
1343 * are really only used during crash dumps anyway
1344 * (XXX or during boot-time autconfiguration of
1345 * ATAPI devices).
1346 */
1347 if (xs->xs_control & XS_CTL_POLL)
1348 return;
1349 wakeup(xs);
1350 goto out;
1351 }
1352
1353 /*
1354 * Catch the extremely common case of I/O completing
1355 * without error; no use in taking a context switch
1356 * if we can handle it in interrupt context.
1357 */
1358 if (xs->error == XS_NOERROR) {
1359 splx(s);
1360 (void) scsipi_complete(xs);
1361 goto out;
1362 }
1363
1364 /*
1365 * There is an error on this xfer. Put it on the channel's
1366 * completion queue, and wake up the completion thread.
1367 */
1368 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
1369 splx(s);
1370 wakeup(&chan->chan_complete);
1371
1372 out:
1373 /*
1374 * If there are more xfers on the channel's queue, attempt to
1375 * run them.
1376 */
1377 scsipi_run_queue(chan);
1378 }
1379
1380 /*
1381 * scsipi_complete:
1382 *
1383 * Completion of a scsipi_xfer. This is the guts of scsipi_done().
1384 *
1385 * NOTE: This routine MUST be called with valid thread context
1386 * except for the case where the following two conditions are
1387 * true:
1388 *
1389 * xs->error == XS_NOERROR
1390 * XS_CTL_ASYNC is set in xs->xs_control
1391 *
1392 * The semantics of this routine can be tricky, so here is an
1393 * explanation:
1394 *
1395 * 0 Xfer completed successfully.
1396 *
1397 * ERESTART Xfer had an error, but was restarted.
1398 *
1399 * anything else Xfer had an error, return value is Unix
1400 * errno.
1401 *
1402 * If the return value is anything but ERESTART:
1403 *
1404 * - If XS_CTL_ASYNC is set, `xs' has been freed back to
1405 * the pool.
1406 * - If there is a buf associated with the xfer,
1407 * it has been biodone()'d.
1408 */
1409 static int
1410 scsipi_complete(struct scsipi_xfer *xs)
1411 {
1412 struct scsipi_periph *periph = xs->xs_periph;
1413 struct scsipi_channel *chan = periph->periph_channel;
1414 int error, s;
1415
1416 #ifdef DIAGNOSTIC
1417 if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
1418 panic("scsipi_complete: XS_CTL_ASYNC but no buf");
1419 #endif
1420 /*
1421 * If command terminated with a CHECK CONDITION, we need to issue a
1422 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed
1423 * we'll have the real status.
1424 * Must be processed at splbio() to avoid missing a SCSI bus reset
1425 * for this command.
1426 */
1427 s = splbio();
1428 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1429 /* request sense for a request sense ? */
1430 if (xs->xs_control & XS_CTL_REQSENSE) {
1431 scsipi_printaddr(periph);
1432 printf("request sense for a request sense ?\n");
1433 /* XXX maybe we should reset the device ? */
1434 /* we've been frozen because xs->error != XS_NOERROR */
1435 scsipi_periph_thaw(periph, 1);
1436 splx(s);
1437 if (xs->resid < xs->datalen) {
1438 printf("we read %d bytes of sense anyway:\n",
1439 xs->datalen - xs->resid);
1440 scsipi_print_sense_data((void *)xs->data, 0);
1441 }
1442 return EINVAL;
1443 }
1444 scsipi_request_sense(xs);
1445 }
1446 splx(s);
1447
1448 /*
1449 * If it's a user level request, bypass all usual completion
1450 * processing, let the user work it out..
1451 */
1452 if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
1453 SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n"));
1454 if (xs->error != XS_NOERROR)
1455 scsipi_periph_thaw(periph, 1);
1456 scsipi_user_done(xs);
1457 SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n "));
1458 return 0;
1459 }
1460
1461 switch (xs->error) {
1462 case XS_NOERROR:
1463 error = 0;
1464 break;
1465
1466 case XS_SENSE:
1467 case XS_SHORTSENSE:
1468 error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
1469 break;
1470
1471 case XS_RESOURCE_SHORTAGE:
1472 /*
1473 * XXX Should freeze channel's queue.
1474 */
1475 scsipi_printaddr(periph);
1476 printf("adapter resource shortage\n");
1477 /* FALLTHROUGH */
1478
1479 case XS_BUSY:
1480 if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
1481 struct scsipi_max_openings mo;
1482
1483 /*
1484 * We set the openings to active - 1, assuming that
1485 * the command that got us here is the first one that
1486 * can't fit into the device's queue. If that's not
1487 * the case, I guess we'll find out soon enough.
1488 */
1489 mo.mo_target = periph->periph_target;
1490 mo.mo_lun = periph->periph_lun;
1491 if (periph->periph_active < periph->periph_openings)
1492 mo.mo_openings = periph->periph_active - 1;
1493 else
1494 mo.mo_openings = periph->periph_openings - 1;
1495 #ifdef DIAGNOSTIC
1496 if (mo.mo_openings < 0) {
1497 scsipi_printaddr(periph);
1498 printf("QUEUE FULL resulted in < 0 openings\n");
1499 panic("scsipi_done");
1500 }
1501 #endif
1502 if (mo.mo_openings == 0) {
1503 scsipi_printaddr(periph);
1504 printf("QUEUE FULL resulted in 0 openings\n");
1505 mo.mo_openings = 1;
1506 }
1507 scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
1508 error = ERESTART;
1509 } else if (xs->xs_retries != 0) {
1510 xs->xs_retries--;
1511 /*
1512 * Wait one second, and try again.
1513 */
1514 if ((xs->xs_control & XS_CTL_POLL) ||
1515 (chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
1516 delay(1000000);
1517 } else if (!callout_pending(&periph->periph_callout)) {
1518 scsipi_periph_freeze(periph, 1);
1519 callout_reset(&periph->periph_callout,
1520 hz, scsipi_periph_timed_thaw, periph);
1521 }
1522 error = ERESTART;
1523 } else
1524 error = EBUSY;
1525 break;
1526
1527 case XS_REQUEUE:
1528 error = ERESTART;
1529 break;
1530
1531 case XS_SELTIMEOUT:
1532 case XS_TIMEOUT:
1533 /*
1534 * If the device hasn't gone away, honor retry counts.
1535 *
1536 * Note that if we're in the middle of probing it,
1537 * it won't be found because it isn't here yet so
1538 * we won't honor the retry count in that case.
1539 */
1540 if (scsipi_lookup_periph(chan, periph->periph_target,
1541 periph->periph_lun) && xs->xs_retries != 0) {
1542 xs->xs_retries--;
1543 error = ERESTART;
1544 } else
1545 error = EIO;
1546 break;
1547
1548 case XS_RESET:
1549 if (xs->xs_control & XS_CTL_REQSENSE) {
1550 /*
1551 * request sense interrupted by reset: signal it
1552 * with EINTR return code.
1553 */
1554 error = EINTR;
1555 } else {
1556 if (xs->xs_retries != 0) {
1557 xs->xs_retries--;
1558 error = ERESTART;
1559 } else
1560 error = EIO;
1561 }
1562 break;
1563
1564 case XS_DRIVER_STUFFUP:
1565 scsipi_printaddr(periph);
1566 printf("generic HBA error\n");
1567 error = EIO;
1568 break;
1569 default:
1570 scsipi_printaddr(periph);
1571 printf("invalid return code from adapter: %d\n", xs->error);
1572 error = EIO;
1573 break;
1574 }
1575
1576 s = splbio();
1577 if (error == ERESTART) {
1578 /*
1579 * If we get here, the periph has been thawed and frozen
1580 * again if we had to issue recovery commands. Alternatively,
1581 * it may have been frozen again and in a timed thaw. In
1582 * any case, we thaw the periph once we re-enqueue the
1583 * command. Once the periph is fully thawed, it will begin
1584 * operation again.
1585 */
1586 xs->error = XS_NOERROR;
1587 xs->status = SCSI_OK;
1588 xs->xs_status &= ~XS_STS_DONE;
1589 xs->xs_requeuecnt++;
1590 error = scsipi_enqueue(xs);
1591 if (error == 0) {
1592 scsipi_periph_thaw(periph, 1);
1593 splx(s);
1594 return (ERESTART);
1595 }
1596 }
1597
1598 /*
1599 * scsipi_done() freezes the queue if not XS_NOERROR.
1600 * Thaw it here.
1601 */
1602 if (xs->error != XS_NOERROR)
1603 scsipi_periph_thaw(periph, 1);
1604
1605 if (periph->periph_switch->psw_done)
1606 periph->periph_switch->psw_done(xs, error);
1607
1608 if (xs->xs_control & XS_CTL_ASYNC)
1609 scsipi_put_xs(xs);
1610 splx(s);
1611
1612 return (error);
1613 }
1614
1615 /*
1616 * Issue a request sense for the given scsipi_xfer. Called when the xfer
1617 * returns with a CHECK_CONDITION status. Must be called in valid thread
1618 * context and at splbio().
1619 */
1620
1621 static void
1622 scsipi_request_sense(struct scsipi_xfer *xs)
1623 {
1624 struct scsipi_periph *periph = xs->xs_periph;
1625 int flags, error;
1626 struct scsi_request_sense cmd;
1627
1628 periph->periph_flags |= PERIPH_SENSE;
1629
1630 /* if command was polling, request sense will too */
1631 flags = xs->xs_control & XS_CTL_POLL;
1632 /* Polling commands can't sleep */
1633 if (flags)
1634 flags |= XS_CTL_NOSLEEP;
1635
1636 flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN |
1637 XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH;
1638
1639 memset(&cmd, 0, sizeof(cmd));
1640 cmd.opcode = SCSI_REQUEST_SENSE;
1641 cmd.length = sizeof(struct scsi_sense_data);
1642
1643 error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1644 (void *)&xs->sense.scsi_sense, sizeof(struct scsi_sense_data),
1645 0, 1000, NULL, flags);
1646 periph->periph_flags &= ~PERIPH_SENSE;
1647 periph->periph_xscheck = NULL;
1648 switch (error) {
1649 case 0:
1650 /* we have a valid sense */
1651 xs->error = XS_SENSE;
1652 return;
1653 case EINTR:
1654 /* REQUEST_SENSE interrupted by bus reset. */
1655 xs->error = XS_RESET;
1656 return;
1657 case EIO:
1658 /* request sense coudn't be performed */
1659 /*
1660 * XXX this isn't quite right but we don't have anything
1661 * better for now
1662 */
1663 xs->error = XS_DRIVER_STUFFUP;
1664 return;
1665 default:
1666 /* Notify that request sense failed. */
1667 xs->error = XS_DRIVER_STUFFUP;
1668 scsipi_printaddr(periph);
1669 printf("request sense failed with error %d\n", error);
1670 return;
1671 }
1672 }
1673
1674 /*
1675 * scsipi_enqueue:
1676 *
1677 * Enqueue an xfer on a channel.
1678 */
1679 static int
1680 scsipi_enqueue(struct scsipi_xfer *xs)
1681 {
1682 struct scsipi_channel *chan = xs->xs_periph->periph_channel;
1683 struct scsipi_xfer *qxs;
1684 int s;
1685
1686 s = splbio();
1687
1688 /*
1689 * If the xfer is to be polled, and there are already jobs on
1690 * the queue, we can't proceed.
1691 */
1692 if ((xs->xs_control & XS_CTL_POLL) != 0 &&
1693 TAILQ_FIRST(&chan->chan_queue) != NULL) {
1694 splx(s);
1695 xs->error = XS_DRIVER_STUFFUP;
1696 return (EAGAIN);
1697 }
1698
1699 /*
1700 * If we have an URGENT xfer, it's an error recovery command
1701 * and it should just go on the head of the channel's queue.
1702 */
1703 if (xs->xs_control & XS_CTL_URGENT) {
1704 TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
1705 goto out;
1706 }
1707
1708 /*
1709 * If this xfer has already been on the queue before, we
1710 * need to reinsert it in the correct order. That order is:
1711 *
1712 * Immediately before the first xfer for this periph
1713 * with a requeuecnt less than xs->xs_requeuecnt.
1714 *
1715 * Failing that, at the end of the queue. (We'll end up
1716 * there naturally.)
1717 */
1718 if (xs->xs_requeuecnt != 0) {
1719 for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
1720 qxs = TAILQ_NEXT(qxs, channel_q)) {
1721 if (qxs->xs_periph == xs->xs_periph &&
1722 qxs->xs_requeuecnt < xs->xs_requeuecnt)
1723 break;
1724 }
1725 if (qxs != NULL) {
1726 TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
1727 channel_q);
1728 goto out;
1729 }
1730 }
1731 TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
1732 out:
1733 if (xs->xs_control & XS_CTL_THAW_PERIPH)
1734 scsipi_periph_thaw(xs->xs_periph, 1);
1735 splx(s);
1736 return (0);
1737 }
1738
1739 /*
1740 * scsipi_run_queue:
1741 *
1742 * Start as many xfers as possible running on the channel.
1743 */
1744 static void
1745 scsipi_run_queue(struct scsipi_channel *chan)
1746 {
1747 struct scsipi_xfer *xs;
1748 struct scsipi_periph *periph;
1749 int s;
1750
1751 for (;;) {
1752 s = splbio();
1753
1754 /*
1755 * If the channel is frozen, we can't do any work right
1756 * now.
1757 */
1758 if (chan->chan_qfreeze != 0) {
1759 splx(s);
1760 return;
1761 }
1762
1763 /*
1764 * Look for work to do, and make sure we can do it.
1765 */
1766 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
1767 xs = TAILQ_NEXT(xs, channel_q)) {
1768 periph = xs->xs_periph;
1769
1770 if ((periph->periph_sent >= periph->periph_openings) ||
1771 periph->periph_qfreeze != 0 ||
1772 (periph->periph_flags & PERIPH_UNTAG) != 0)
1773 continue;
1774
1775 if ((periph->periph_flags &
1776 (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 &&
1777 (xs->xs_control & XS_CTL_URGENT) == 0)
1778 continue;
1779
1780 /*
1781 * We can issue this xfer!
1782 */
1783 goto got_one;
1784 }
1785
1786 /*
1787 * Can't find any work to do right now.
1788 */
1789 splx(s);
1790 return;
1791
1792 got_one:
1793 /*
1794 * Have an xfer to run. Allocate a resource from
1795 * the adapter to run it. If we can't allocate that
1796 * resource, we don't dequeue the xfer.
1797 */
1798 if (scsipi_get_resource(chan) == 0) {
1799 /*
1800 * Adapter is out of resources. If the adapter
1801 * supports it, attempt to grow them.
1802 */
1803 if (scsipi_grow_resources(chan) == 0) {
1804 /*
1805 * Wasn't able to grow resources,
1806 * nothing more we can do.
1807 */
1808 if (xs->xs_control & XS_CTL_POLL) {
1809 scsipi_printaddr(xs->xs_periph);
1810 printf("polling command but no "
1811 "adapter resources");
1812 /* We'll panic shortly... */
1813 }
1814 splx(s);
1815
1816 /*
1817 * XXX: We should be able to note that
1818 * XXX: that resources are needed here!
1819 */
1820 return;
1821 }
1822 /*
1823 * scsipi_grow_resources() allocated the resource
1824 * for us.
1825 */
1826 }
1827
1828 /*
1829 * We have a resource to run this xfer, do it!
1830 */
1831 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
1832
1833 /*
1834 * If the command is to be tagged, allocate a tag ID
1835 * for it.
1836 */
1837 if (XS_CTL_TAGTYPE(xs) != 0)
1838 scsipi_get_tag(xs);
1839 else
1840 periph->periph_flags |= PERIPH_UNTAG;
1841 periph->periph_sent++;
1842 splx(s);
1843
1844 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
1845 }
1846 #ifdef DIAGNOSTIC
1847 panic("scsipi_run_queue: impossible");
1848 #endif
1849 }
1850
1851 /*
1852 * scsipi_execute_xs:
1853 *
1854 * Begin execution of an xfer, waiting for it to complete, if necessary.
1855 */
1856 int
1857 scsipi_execute_xs(struct scsipi_xfer *xs)
1858 {
1859 struct scsipi_periph *periph = xs->xs_periph;
1860 struct scsipi_channel *chan = periph->periph_channel;
1861 int oasync, async, poll, error, s;
1862
1863 KASSERT(!cold);
1864
1865 (chan->chan_bustype->bustype_cmd)(xs);
1866
1867 xs->xs_status &= ~XS_STS_DONE;
1868 xs->error = XS_NOERROR;
1869 xs->resid = xs->datalen;
1870 xs->status = SCSI_OK;
1871
1872 #ifdef SCSIPI_DEBUG
1873 if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) {
1874 printf("scsipi_execute_xs: ");
1875 show_scsipi_xs(xs);
1876 printf("\n");
1877 }
1878 #endif
1879
1880 /*
1881 * Deal with command tagging:
1882 *
1883 * - If the device's current operating mode doesn't
1884 * include tagged queueing, clear the tag mask.
1885 *
1886 * - If the device's current operating mode *does*
1887 * include tagged queueing, set the tag_type in
1888 * the xfer to the appropriate byte for the tag
1889 * message.
1890 */
1891 if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 ||
1892 (xs->xs_control & XS_CTL_REQSENSE)) {
1893 xs->xs_control &= ~XS_CTL_TAGMASK;
1894 xs->xs_tag_type = 0;
1895 } else {
1896 /*
1897 * If the request doesn't specify a tag, give Head
1898 * tags to URGENT operations and Ordered tags to
1899 * everything else.
1900 */
1901 if (XS_CTL_TAGTYPE(xs) == 0) {
1902 if (xs->xs_control & XS_CTL_URGENT)
1903 xs->xs_control |= XS_CTL_HEAD_TAG;
1904 else
1905 xs->xs_control |= XS_CTL_ORDERED_TAG;
1906 }
1907
1908 switch (XS_CTL_TAGTYPE(xs)) {
1909 case XS_CTL_ORDERED_TAG:
1910 xs->xs_tag_type = MSG_ORDERED_Q_TAG;
1911 break;
1912
1913 case XS_CTL_SIMPLE_TAG:
1914 xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
1915 break;
1916
1917 case XS_CTL_HEAD_TAG:
1918 xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
1919 break;
1920
1921 default:
1922 scsipi_printaddr(periph);
1923 printf("invalid tag mask 0x%08x\n",
1924 XS_CTL_TAGTYPE(xs));
1925 panic("scsipi_execute_xs");
1926 }
1927 }
1928
1929 /* If the adaptor wants us to poll, poll. */
1930 if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY)
1931 xs->xs_control |= XS_CTL_POLL;
1932
1933 /*
1934 * If we don't yet have a completion thread, or we are to poll for
1935 * completion, clear the ASYNC flag.
1936 */
1937 oasync = (xs->xs_control & XS_CTL_ASYNC);
1938 if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
1939 xs->xs_control &= ~XS_CTL_ASYNC;
1940
1941 async = (xs->xs_control & XS_CTL_ASYNC);
1942 poll = (xs->xs_control & XS_CTL_POLL);
1943
1944 #ifdef DIAGNOSTIC
1945 if (oasync != 0 && xs->bp == NULL)
1946 panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
1947 #endif
1948
1949 /*
1950 * Enqueue the transfer. If we're not polling for completion, this
1951 * should ALWAYS return `no error'.
1952 */
1953 error = scsipi_enqueue(xs);
1954 if (error) {
1955 if (poll == 0) {
1956 scsipi_printaddr(periph);
1957 printf("not polling, but enqueue failed with %d\n",
1958 error);
1959 panic("scsipi_execute_xs");
1960 }
1961
1962 scsipi_printaddr(periph);
1963 printf("should have flushed queue?\n");
1964 goto free_xs;
1965 }
1966
1967 restarted:
1968 scsipi_run_queue(chan);
1969
1970 /*
1971 * The xfer is enqueued, and possibly running. If it's to be
1972 * completed asynchronously, just return now.
1973 */
1974 if (async)
1975 return (0);
1976
1977 /*
1978 * Not an asynchronous command; wait for it to complete.
1979 */
1980 s = splbio();
1981 while ((xs->xs_status & XS_STS_DONE) == 0) {
1982 if (poll) {
1983 scsipi_printaddr(periph);
1984 printf("polling command not done\n");
1985 panic("scsipi_execute_xs");
1986 }
1987 (void) tsleep(xs, PRIBIO, "xscmd", 0);
1988 }
1989 splx(s);
1990
1991 /*
1992 * Command is complete. scsipi_done() has awakened us to perform
1993 * the error handling.
1994 */
1995 error = scsipi_complete(xs);
1996 if (error == ERESTART)
1997 goto restarted;
1998
1999 /*
2000 * If it was meant to run async and we cleared aync ourselve,
2001 * don't return an error here. It has already been handled
2002 */
2003 if (oasync)
2004 error = 0;
2005 /*
2006 * Command completed successfully or fatal error occurred. Fall
2007 * into....
2008 */
2009 free_xs:
2010 s = splbio();
2011 scsipi_put_xs(xs);
2012 splx(s);
2013
2014 /*
2015 * Kick the queue, keep it running in case it stopped for some
2016 * reason.
2017 */
2018 scsipi_run_queue(chan);
2019
2020 return (error);
2021 }
2022
2023 /*
2024 * scsipi_completion_thread:
2025 *
2026 * This is the completion thread. We wait for errors on
2027 * asynchronous xfers, and perform the error handling
2028 * function, restarting the command, if necessary.
2029 */
2030 static void
2031 scsipi_completion_thread(void *arg)
2032 {
2033 struct scsipi_channel *chan = arg;
2034 struct scsipi_xfer *xs;
2035 int s;
2036
2037 if (chan->chan_init_cb)
2038 (*chan->chan_init_cb)(chan, chan->chan_init_cb_arg);
2039
2040 s = splbio();
2041 chan->chan_flags |= SCSIPI_CHAN_TACTIVE;
2042 splx(s);
2043 for (;;) {
2044 s = splbio();
2045 xs = TAILQ_FIRST(&chan->chan_complete);
2046 if (xs == NULL && chan->chan_tflags == 0) {
2047 /* nothing to do; wait */
2048 (void) tsleep(&chan->chan_complete, PRIBIO,
2049 "sccomp", 0);
2050 splx(s);
2051 continue;
2052 }
2053 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2054 /* call chan_callback from thread context */
2055 chan->chan_tflags &= ~SCSIPI_CHANT_CALLBACK;
2056 chan->chan_callback(chan, chan->chan_callback_arg);
2057 splx(s);
2058 continue;
2059 }
2060 if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) {
2061 /* attempt to get more openings for this channel */
2062 chan->chan_tflags &= ~SCSIPI_CHANT_GROWRES;
2063 scsipi_adapter_request(chan,
2064 ADAPTER_REQ_GROW_RESOURCES, NULL);
2065 scsipi_channel_thaw(chan, 1);
2066 splx(s);
2067 if (chan->chan_tflags & SCSIPI_CHANT_GROWRES)
2068 kpause("scsizzz", FALSE, hz/10, NULL);
2069 continue;
2070 }
2071 if (chan->chan_tflags & SCSIPI_CHANT_KICK) {
2072 /* explicitly run the queues for this channel */
2073 chan->chan_tflags &= ~SCSIPI_CHANT_KICK;
2074 scsipi_run_queue(chan);
2075 splx(s);
2076 continue;
2077 }
2078 if (chan->chan_tflags & SCSIPI_CHANT_SHUTDOWN) {
2079 splx(s);
2080 break;
2081 }
2082 if (xs) {
2083 TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
2084 splx(s);
2085
2086 /*
2087 * Have an xfer with an error; process it.
2088 */
2089 (void) scsipi_complete(xs);
2090
2091 /*
2092 * Kick the queue; keep it running if it was stopped
2093 * for some reason.
2094 */
2095 scsipi_run_queue(chan);
2096 } else {
2097 splx(s);
2098 }
2099 }
2100
2101 chan->chan_thread = NULL;
2102
2103 /* In case parent is waiting for us to exit. */
2104 wakeup(&chan->chan_thread);
2105
2106 kthread_exit(0);
2107 }
2108 /*
2109 * scsipi_thread_call_callback:
2110 *
2111 * request to call a callback from the completion thread
2112 */
2113 int
2114 scsipi_thread_call_callback(struct scsipi_channel *chan,
2115 void (*callback)(struct scsipi_channel *, void *), void *arg)
2116 {
2117 int s;
2118
2119 s = splbio();
2120 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
2121 /* kernel thread doesn't exist yet */
2122 splx(s);
2123 return ESRCH;
2124 }
2125 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2126 splx(s);
2127 return EBUSY;
2128 }
2129 scsipi_channel_freeze(chan, 1);
2130 chan->chan_callback = callback;
2131 chan->chan_callback_arg = arg;
2132 chan->chan_tflags |= SCSIPI_CHANT_CALLBACK;
2133 wakeup(&chan->chan_complete);
2134 splx(s);
2135 return(0);
2136 }
2137
2138 /*
2139 * scsipi_async_event:
2140 *
2141 * Handle an asynchronous event from an adapter.
2142 */
2143 void
2144 scsipi_async_event(struct scsipi_channel *chan, scsipi_async_event_t event,
2145 void *arg)
2146 {
2147 int s;
2148
2149 s = splbio();
2150 switch (event) {
2151 case ASYNC_EVENT_MAX_OPENINGS:
2152 scsipi_async_event_max_openings(chan,
2153 (struct scsipi_max_openings *)arg);
2154 break;
2155
2156 case ASYNC_EVENT_XFER_MODE:
2157 scsipi_async_event_xfer_mode(chan,
2158 (struct scsipi_xfer_mode *)arg);
2159 break;
2160 case ASYNC_EVENT_RESET:
2161 scsipi_async_event_channel_reset(chan);
2162 break;
2163 }
2164 splx(s);
2165 }
2166
2167 /*
2168 * scsipi_print_xfer_mode:
2169 *
2170 * Print a periph's capabilities.
2171 */
2172 void
2173 scsipi_print_xfer_mode(struct scsipi_periph *periph)
2174 {
2175 int period, freq, speed, mbs;
2176
2177 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0)
2178 return;
2179
2180 aprint_normal_dev(periph->periph_dev, "");
2181 if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) {
2182 period = scsipi_sync_factor_to_period(periph->periph_period);
2183 aprint_normal("sync (%d.%02dns offset %d)",
2184 period / 100, period % 100, periph->periph_offset);
2185 } else
2186 aprint_normal("async");
2187
2188 if (periph->periph_mode & PERIPH_CAP_WIDE32)
2189 aprint_normal(", 32-bit");
2190 else if (periph->periph_mode & (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
2191 aprint_normal(", 16-bit");
2192 else
2193 aprint_normal(", 8-bit");
2194
2195 if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) {
2196 freq = scsipi_sync_factor_to_freq(periph->periph_period);
2197 speed = freq;
2198 if (periph->periph_mode & PERIPH_CAP_WIDE32)
2199 speed *= 4;
2200 else if (periph->periph_mode &
2201 (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
2202 speed *= 2;
2203 mbs = speed / 1000;
2204 if (mbs > 0)
2205 aprint_normal(" (%d.%03dMB/s)", mbs, speed % 1000);
2206 else
2207 aprint_normal(" (%dKB/s)", speed % 1000);
2208 }
2209
2210 aprint_normal(" transfers");
2211
2212 if (periph->periph_mode & PERIPH_CAP_TQING)
2213 aprint_normal(", tagged queueing");
2214
2215 aprint_normal("\n");
2216 }
2217
2218 /*
2219 * scsipi_async_event_max_openings:
2220 *
2221 * Update the maximum number of outstanding commands a
2222 * device may have.
2223 */
2224 static void
2225 scsipi_async_event_max_openings(struct scsipi_channel *chan,
2226 struct scsipi_max_openings *mo)
2227 {
2228 struct scsipi_periph *periph;
2229 int minlun, maxlun;
2230
2231 if (mo->mo_lun == -1) {
2232 /*
2233 * Wildcarded; apply it to all LUNs.
2234 */
2235 minlun = 0;
2236 maxlun = chan->chan_nluns - 1;
2237 } else
2238 minlun = maxlun = mo->mo_lun;
2239
2240 /* XXX This could really suck with a large LUN space. */
2241 for (; minlun <= maxlun; minlun++) {
2242 periph = scsipi_lookup_periph(chan, mo->mo_target, minlun);
2243 if (periph == NULL)
2244 continue;
2245
2246 if (mo->mo_openings < periph->periph_openings)
2247 periph->periph_openings = mo->mo_openings;
2248 else if (mo->mo_openings > periph->periph_openings &&
2249 (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
2250 periph->periph_openings = mo->mo_openings;
2251 }
2252 }
2253
2254 /*
2255 * scsipi_async_event_xfer_mode:
2256 *
2257 * Update the xfer mode for all periphs sharing the
2258 * specified I_T Nexus.
2259 */
2260 static void
2261 scsipi_async_event_xfer_mode(struct scsipi_channel *chan,
2262 struct scsipi_xfer_mode *xm)
2263 {
2264 struct scsipi_periph *periph;
2265 int lun, announce, mode, period, offset;
2266
2267 for (lun = 0; lun < chan->chan_nluns; lun++) {
2268 periph = scsipi_lookup_periph(chan, xm->xm_target, lun);
2269 if (periph == NULL)
2270 continue;
2271 announce = 0;
2272
2273 /*
2274 * Clamp the xfer mode down to this periph's capabilities.
2275 */
2276 mode = xm->xm_mode & periph->periph_cap;
2277 if (mode & PERIPH_CAP_SYNC) {
2278 period = xm->xm_period;
2279 offset = xm->xm_offset;
2280 } else {
2281 period = 0;
2282 offset = 0;
2283 }
2284
2285 /*
2286 * If we do not have a valid xfer mode yet, or the parameters
2287 * are different, announce them.
2288 */
2289 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0 ||
2290 periph->periph_mode != mode ||
2291 periph->periph_period != period ||
2292 periph->periph_offset != offset)
2293 announce = 1;
2294
2295 periph->periph_mode = mode;
2296 periph->periph_period = period;
2297 periph->periph_offset = offset;
2298 periph->periph_flags |= PERIPH_MODE_VALID;
2299
2300 if (announce)
2301 scsipi_print_xfer_mode(periph);
2302 }
2303 }
2304
2305 /*
2306 * scsipi_set_xfer_mode:
2307 *
2308 * Set the xfer mode for the specified I_T Nexus.
2309 */
2310 void
2311 scsipi_set_xfer_mode(struct scsipi_channel *chan, int target, int immed)
2312 {
2313 struct scsipi_xfer_mode xm;
2314 struct scsipi_periph *itperiph;
2315 int lun, s;
2316
2317 /*
2318 * Go to the minimal xfer mode.
2319 */
2320 xm.xm_target = target;
2321 xm.xm_mode = 0;
2322 xm.xm_period = 0; /* ignored */
2323 xm.xm_offset = 0; /* ignored */
2324
2325 /*
2326 * Find the first LUN we know about on this I_T Nexus.
2327 */
2328 for (itperiph = NULL, lun = 0; lun < chan->chan_nluns; lun++) {
2329 itperiph = scsipi_lookup_periph(chan, target, lun);
2330 if (itperiph != NULL)
2331 break;
2332 }
2333 if (itperiph != NULL) {
2334 xm.xm_mode = itperiph->periph_cap;
2335 /*
2336 * Now issue the request to the adapter.
2337 */
2338 s = splbio();
2339 scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm);
2340 splx(s);
2341 /*
2342 * If we want this to happen immediately, issue a dummy
2343 * command, since most adapters can't really negotiate unless
2344 * they're executing a job.
2345 */
2346 if (immed != 0) {
2347 (void) scsipi_test_unit_ready(itperiph,
2348 XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
2349 XS_CTL_IGNORE_NOT_READY |
2350 XS_CTL_IGNORE_MEDIA_CHANGE);
2351 }
2352 }
2353 }
2354
2355 /*
2356 * scsipi_channel_reset:
2357 *
2358 * handle scsi bus reset
2359 * called at splbio
2360 */
2361 static void
2362 scsipi_async_event_channel_reset(struct scsipi_channel *chan)
2363 {
2364 struct scsipi_xfer *xs, *xs_next;
2365 struct scsipi_periph *periph;
2366 int target, lun;
2367
2368 /*
2369 * Channel has been reset. Also mark as reset pending REQUEST_SENSE
2370 * commands; as the sense is not available any more.
2371 * can't call scsipi_done() from here, as the command has not been
2372 * sent to the adapter yet (this would corrupt accounting).
2373 */
2374
2375 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) {
2376 xs_next = TAILQ_NEXT(xs, channel_q);
2377 if (xs->xs_control & XS_CTL_REQSENSE) {
2378 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
2379 xs->error = XS_RESET;
2380 if ((xs->xs_control & XS_CTL_ASYNC) != 0)
2381 TAILQ_INSERT_TAIL(&chan->chan_complete, xs,
2382 channel_q);
2383 }
2384 }
2385 wakeup(&chan->chan_complete);
2386 /* Catch xs with pending sense which may not have a REQSENSE xs yet */
2387 for (target = 0; target < chan->chan_ntargets; target++) {
2388 if (target == chan->chan_id)
2389 continue;
2390 for (lun = 0; lun < chan->chan_nluns; lun++) {
2391 periph = scsipi_lookup_periph(chan, target, lun);
2392 if (periph) {
2393 xs = periph->periph_xscheck;
2394 if (xs)
2395 xs->error = XS_RESET;
2396 }
2397 }
2398 }
2399 }
2400
2401 /*
2402 * scsipi_target_detach:
2403 *
2404 * detach all periph associated with a I_T
2405 * must be called from valid thread context
2406 */
2407 int
2408 scsipi_target_detach(struct scsipi_channel *chan, int target, int lun,
2409 int flags)
2410 {
2411 struct scsipi_periph *periph;
2412 int ctarget, mintarget, maxtarget;
2413 int clun, minlun, maxlun;
2414 int error;
2415
2416 if (target == -1) {
2417 mintarget = 0;
2418 maxtarget = chan->chan_ntargets;
2419 } else {
2420 if (target == chan->chan_id)
2421 return EINVAL;
2422 if (target < 0 || target >= chan->chan_ntargets)
2423 return EINVAL;
2424 mintarget = target;
2425 maxtarget = target + 1;
2426 }
2427
2428 if (lun == -1) {
2429 minlun = 0;
2430 maxlun = chan->chan_nluns;
2431 } else {
2432 if (lun < 0 || lun >= chan->chan_nluns)
2433 return EINVAL;
2434 minlun = lun;
2435 maxlun = lun + 1;
2436 }
2437
2438 for (ctarget = mintarget; ctarget < maxtarget; ctarget++) {
2439 if (ctarget == chan->chan_id)
2440 continue;
2441
2442 for (clun = minlun; clun < maxlun; clun++) {
2443 periph = scsipi_lookup_periph(chan, ctarget, clun);
2444 if (periph == NULL)
2445 continue;
2446 error = config_detach(periph->periph_dev, flags);
2447 if (error)
2448 return (error);
2449 }
2450 }
2451 return(0);
2452 }
2453
2454 /*
2455 * scsipi_adapter_addref:
2456 *
2457 * Add a reference to the adapter pointed to by the provided
2458 * link, enabling the adapter if necessary.
2459 */
2460 int
2461 scsipi_adapter_addref(struct scsipi_adapter *adapt)
2462 {
2463 int s, error = 0;
2464
2465 s = splbio();
2466 if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) {
2467 error = (*adapt->adapt_enable)(adapt->adapt_dev, 1);
2468 if (error)
2469 adapt->adapt_refcnt--;
2470 }
2471 splx(s);
2472 return (error);
2473 }
2474
2475 /*
2476 * scsipi_adapter_delref:
2477 *
2478 * Delete a reference to the adapter pointed to by the provided
2479 * link, disabling the adapter if possible.
2480 */
2481 void
2482 scsipi_adapter_delref(struct scsipi_adapter *adapt)
2483 {
2484 int s;
2485
2486 s = splbio();
2487 if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL)
2488 (void) (*adapt->adapt_enable)(adapt->adapt_dev, 0);
2489 splx(s);
2490 }
2491
2492 static struct scsipi_syncparam {
2493 int ss_factor;
2494 int ss_period; /* ns * 100 */
2495 } scsipi_syncparams[] = {
2496 { 0x08, 625 }, /* FAST-160 (Ultra320) */
2497 { 0x09, 1250 }, /* FAST-80 (Ultra160) */
2498 { 0x0a, 2500 }, /* FAST-40 40MHz (Ultra2) */
2499 { 0x0b, 3030 }, /* FAST-40 33MHz (Ultra2) */
2500 { 0x0c, 5000 }, /* FAST-20 (Ultra) */
2501 };
2502 static const int scsipi_nsyncparams =
2503 sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
2504
2505 int
2506 scsipi_sync_period_to_factor(int period /* ns * 100 */)
2507 {
2508 int i;
2509
2510 for (i = 0; i < scsipi_nsyncparams; i++) {
2511 if (period <= scsipi_syncparams[i].ss_period)
2512 return (scsipi_syncparams[i].ss_factor);
2513 }
2514
2515 return ((period / 100) / 4);
2516 }
2517
2518 int
2519 scsipi_sync_factor_to_period(int factor)
2520 {
2521 int i;
2522
2523 for (i = 0; i < scsipi_nsyncparams; i++) {
2524 if (factor == scsipi_syncparams[i].ss_factor)
2525 return (scsipi_syncparams[i].ss_period);
2526 }
2527
2528 return ((factor * 4) * 100);
2529 }
2530
2531 int
2532 scsipi_sync_factor_to_freq(int factor)
2533 {
2534 int i;
2535
2536 for (i = 0; i < scsipi_nsyncparams; i++) {
2537 if (factor == scsipi_syncparams[i].ss_factor)
2538 return (100000000 / scsipi_syncparams[i].ss_period);
2539 }
2540
2541 return (10000000 / ((factor * 4) * 10));
2542 }
2543
2544 #ifdef SCSIPI_DEBUG
2545 /*
2546 * Given a scsipi_xfer, dump the request, in all it's glory
2547 */
2548 void
2549 show_scsipi_xs(struct scsipi_xfer *xs)
2550 {
2551
2552 printf("xs(%p): ", xs);
2553 printf("xs_control(0x%08x)", xs->xs_control);
2554 printf("xs_status(0x%08x)", xs->xs_status);
2555 printf("periph(%p)", xs->xs_periph);
2556 printf("retr(0x%x)", xs->xs_retries);
2557 printf("timo(0x%x)", xs->timeout);
2558 printf("cmd(%p)", xs->cmd);
2559 printf("len(0x%x)", xs->cmdlen);
2560 printf("data(%p)", xs->data);
2561 printf("len(0x%x)", xs->datalen);
2562 printf("res(0x%x)", xs->resid);
2563 printf("err(0x%x)", xs->error);
2564 printf("bp(%p)", xs->bp);
2565 show_scsipi_cmd(xs);
2566 }
2567
2568 void
2569 show_scsipi_cmd(struct scsipi_xfer *xs)
2570 {
2571 u_char *b = (u_char *) xs->cmd;
2572 int i = 0;
2573
2574 scsipi_printaddr(xs->xs_periph);
2575 printf(" command: ");
2576
2577 if ((xs->xs_control & XS_CTL_RESET) == 0) {
2578 while (i < xs->cmdlen) {
2579 if (i)
2580 printf(",");
2581 printf("0x%x", b[i++]);
2582 }
2583 printf("-[%d bytes]\n", xs->datalen);
2584 if (xs->datalen)
2585 show_mem(xs->data, min(64, xs->datalen));
2586 } else
2587 printf("-RESET-\n");
2588 }
2589
2590 void
2591 show_mem(u_char *address, int num)
2592 {
2593 int x;
2594
2595 printf("------------------------------");
2596 for (x = 0; x < num; x++) {
2597 if ((x % 16) == 0)
2598 printf("\n%03d: ", x);
2599 printf("%02x ", *address++);
2600 }
2601 printf("\n------------------------------\n");
2602 }
2603 #endif /* SCSIPI_DEBUG */
2604