scsipi_base.c revision 1.110 1 /* $NetBSD: scsipi_base.c,v 1.110 2004/08/27 20:37:28 bouyer Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999, 2000, 2002, 2003 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: scsipi_base.c,v 1.110 2004/08/27 20:37:28 bouyer Exp $");
42
43 #include "opt_scsi.h"
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/buf.h>
49 #include <sys/uio.h>
50 #include <sys/malloc.h>
51 #include <sys/pool.h>
52 #include <sys/errno.h>
53 #include <sys/device.h>
54 #include <sys/proc.h>
55 #include <sys/kthread.h>
56 #include <sys/hash.h>
57 #include <machine/vmparam.h>
58
59 #include <dev/scsipi/scsipi_all.h>
60 #include <dev/scsipi/scsipi_disk.h>
61 #include <dev/scsipi/scsipiconf.h>
62 #include <dev/scsipi/scsipi_base.h>
63
64 #include <dev/scsipi/scsi_all.h>
65 #include <dev/scsipi/scsi_message.h>
66
67 static int scsipi_complete(struct scsipi_xfer *);
68 static void scsipi_request_sense(struct scsipi_xfer *);
69 static int scsipi_enqueue(struct scsipi_xfer *);
70 static void scsipi_run_queue(struct scsipi_channel *chan);
71
72 static void scsipi_completion_thread(void *);
73
74 static void scsipi_get_tag(struct scsipi_xfer *);
75 static void scsipi_put_tag(struct scsipi_xfer *);
76
77 static int scsipi_get_resource(struct scsipi_channel *);
78 static void scsipi_put_resource(struct scsipi_channel *);
79
80 static void scsipi_async_event_max_openings(struct scsipi_channel *,
81 struct scsipi_max_openings *);
82 static void scsipi_async_event_xfer_mode(struct scsipi_channel *,
83 struct scsipi_xfer_mode *);
84 static void scsipi_async_event_channel_reset(struct scsipi_channel *);
85
86 static struct pool scsipi_xfer_pool;
87
88 /*
89 * scsipi_init:
90 *
91 * Called when a scsibus or atapibus is attached to the system
92 * to initialize shared data structures.
93 */
94 void
95 scsipi_init(void)
96 {
97 static int scsipi_init_done;
98
99 if (scsipi_init_done)
100 return;
101 scsipi_init_done = 1;
102
103 /* Initialize the scsipi_xfer pool. */
104 pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
105 0, 0, "scxspl", NULL);
106 if (pool_prime(&scsipi_xfer_pool,
107 PAGE_SIZE / sizeof(struct scsipi_xfer)) == ENOMEM) {
108 printf("WARNING: not enouth memory for scsipi_xfer_pool\n");
109 }
110 }
111
112 /*
113 * scsipi_channel_init:
114 *
115 * Initialize a scsipi_channel when it is attached.
116 */
117 int
118 scsipi_channel_init(struct scsipi_channel *chan)
119 {
120 int i;
121
122 /* Initialize shared data. */
123 scsipi_init();
124
125 /* Initialize the queues. */
126 TAILQ_INIT(&chan->chan_queue);
127 TAILQ_INIT(&chan->chan_complete);
128
129 for (i = 0; i < SCSIPI_CHAN_PERIPH_BUCKETS; i++)
130 LIST_INIT(&chan->chan_periphtab[i]);
131
132 /*
133 * Create the asynchronous completion thread.
134 */
135 kthread_create(scsipi_create_completion_thread, chan);
136 return (0);
137 }
138
139 /*
140 * scsipi_channel_shutdown:
141 *
142 * Shutdown a scsipi_channel.
143 */
144 void
145 scsipi_channel_shutdown(struct scsipi_channel *chan)
146 {
147
148 /*
149 * Shut down the completion thread.
150 */
151 chan->chan_tflags |= SCSIPI_CHANT_SHUTDOWN;
152 wakeup(&chan->chan_complete);
153
154 /*
155 * Now wait for the thread to exit.
156 */
157 while (chan->chan_thread != NULL)
158 (void) tsleep(&chan->chan_thread, PRIBIO, "scshut", 0);
159 }
160
161 static uint32_t
162 scsipi_chan_periph_hash(uint64_t t, uint64_t l)
163 {
164 uint32_t hash;
165
166 hash = hash32_buf(&t, sizeof(t), HASH32_BUF_INIT);
167 hash = hash32_buf(&l, sizeof(l), hash);
168
169 return (hash & SCSIPI_CHAN_PERIPH_HASHMASK);
170 }
171
172 /*
173 * scsipi_insert_periph:
174 *
175 * Insert a periph into the channel.
176 */
177 void
178 scsipi_insert_periph(struct scsipi_channel *chan, struct scsipi_periph *periph)
179 {
180 uint32_t hash;
181 int s;
182
183 hash = scsipi_chan_periph_hash(periph->periph_target,
184 periph->periph_lun);
185
186 s = splbio();
187 LIST_INSERT_HEAD(&chan->chan_periphtab[hash], periph, periph_hash);
188 splx(s);
189 }
190
191 /*
192 * scsipi_remove_periph:
193 *
194 * Remove a periph from the channel.
195 */
196 void
197 scsipi_remove_periph(struct scsipi_channel *chan, struct scsipi_periph *periph)
198 {
199 int s;
200
201 s = splbio();
202 LIST_REMOVE(periph, periph_hash);
203 splx(s);
204 }
205
206 /*
207 * scsipi_lookup_periph:
208 *
209 * Lookup a periph on the specified channel.
210 */
211 struct scsipi_periph *
212 scsipi_lookup_periph(struct scsipi_channel *chan, int target, int lun)
213 {
214 struct scsipi_periph *periph;
215 uint32_t hash;
216 int s;
217
218 if (target >= chan->chan_ntargets ||
219 lun >= chan->chan_nluns)
220 return (NULL);
221
222 hash = scsipi_chan_periph_hash(target, lun);
223
224 s = splbio();
225 LIST_FOREACH(periph, &chan->chan_periphtab[hash], periph_hash) {
226 if (periph->periph_target == target &&
227 periph->periph_lun == lun)
228 break;
229 }
230 splx(s);
231
232 return (periph);
233 }
234
235 /*
236 * scsipi_get_resource:
237 *
238 * Allocate a single xfer `resource' from the channel.
239 *
240 * NOTE: Must be called at splbio().
241 */
242 static int
243 scsipi_get_resource(struct scsipi_channel *chan)
244 {
245 struct scsipi_adapter *adapt = chan->chan_adapter;
246
247 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
248 if (chan->chan_openings > 0) {
249 chan->chan_openings--;
250 return (1);
251 }
252 return (0);
253 }
254
255 if (adapt->adapt_openings > 0) {
256 adapt->adapt_openings--;
257 return (1);
258 }
259 return (0);
260 }
261
262 /*
263 * scsipi_grow_resources:
264 *
265 * Attempt to grow resources for a channel. If this succeeds,
266 * we allocate one for our caller.
267 *
268 * NOTE: Must be called at splbio().
269 */
270 static __inline int
271 scsipi_grow_resources(struct scsipi_channel *chan)
272 {
273
274 if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
275 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
276 scsipi_adapter_request(chan,
277 ADAPTER_REQ_GROW_RESOURCES, NULL);
278 return (scsipi_get_resource(chan));
279 }
280 /*
281 * ask the channel thread to do it. It'll have to thaw the
282 * queue
283 */
284 scsipi_channel_freeze(chan, 1);
285 chan->chan_tflags |= SCSIPI_CHANT_GROWRES;
286 wakeup(&chan->chan_complete);
287 return (0);
288 }
289
290 return (0);
291 }
292
293 /*
294 * scsipi_put_resource:
295 *
296 * Free a single xfer `resource' to the channel.
297 *
298 * NOTE: Must be called at splbio().
299 */
300 static void
301 scsipi_put_resource(struct scsipi_channel *chan)
302 {
303 struct scsipi_adapter *adapt = chan->chan_adapter;
304
305 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
306 chan->chan_openings++;
307 else
308 adapt->adapt_openings++;
309 }
310
311 /*
312 * scsipi_get_tag:
313 *
314 * Get a tag ID for the specified xfer.
315 *
316 * NOTE: Must be called at splbio().
317 */
318 static void
319 scsipi_get_tag(struct scsipi_xfer *xs)
320 {
321 struct scsipi_periph *periph = xs->xs_periph;
322 int bit, tag;
323 u_int word;
324
325 bit = 0; /* XXX gcc */
326 for (word = 0; word < PERIPH_NTAGWORDS; word++) {
327 bit = ffs(periph->periph_freetags[word]);
328 if (bit != 0)
329 break;
330 }
331 #ifdef DIAGNOSTIC
332 if (word == PERIPH_NTAGWORDS) {
333 scsipi_printaddr(periph);
334 printf("no free tags\n");
335 panic("scsipi_get_tag");
336 }
337 #endif
338
339 bit -= 1;
340 periph->periph_freetags[word] &= ~(1 << bit);
341 tag = (word << 5) | bit;
342
343 /* XXX Should eventually disallow this completely. */
344 if (tag >= periph->periph_openings) {
345 scsipi_printaddr(periph);
346 printf("WARNING: tag %d greater than available openings %d\n",
347 tag, periph->periph_openings);
348 }
349
350 xs->xs_tag_id = tag;
351 }
352
353 /*
354 * scsipi_put_tag:
355 *
356 * Put the tag ID for the specified xfer back into the pool.
357 *
358 * NOTE: Must be called at splbio().
359 */
360 static void
361 scsipi_put_tag(struct scsipi_xfer *xs)
362 {
363 struct scsipi_periph *periph = xs->xs_periph;
364 int word, bit;
365
366 word = xs->xs_tag_id >> 5;
367 bit = xs->xs_tag_id & 0x1f;
368
369 periph->periph_freetags[word] |= (1 << bit);
370 }
371
372 /*
373 * scsipi_get_xs:
374 *
375 * Allocate an xfer descriptor and associate it with the
376 * specified peripherial. If the peripherial has no more
377 * available command openings, we either block waiting for
378 * one to become available, or fail.
379 */
380 struct scsipi_xfer *
381 scsipi_get_xs(struct scsipi_periph *periph, int flags)
382 {
383 struct scsipi_xfer *xs;
384 int s;
385
386 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n"));
387
388 /*
389 * If we're cold, make sure we poll.
390 */
391 if (cold)
392 flags |= XS_CTL_NOSLEEP | XS_CTL_POLL;
393
394 #ifdef DIAGNOSTIC
395 /*
396 * URGENT commands can never be ASYNC.
397 */
398 if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
399 (XS_CTL_URGENT|XS_CTL_ASYNC)) {
400 scsipi_printaddr(periph);
401 printf("URGENT and ASYNC\n");
402 panic("scsipi_get_xs");
403 }
404 #endif
405
406 s = splbio();
407 /*
408 * Wait for a command opening to become available. Rules:
409 *
410 * - All xfers must wait for an available opening.
411 * Exception: URGENT xfers can proceed when
412 * active == openings, because we use the opening
413 * of the command we're recovering for.
414 * - if the periph has sense pending, only URGENT & REQSENSE
415 * xfers may proceed.
416 *
417 * - If the periph is recovering, only URGENT xfers may
418 * proceed.
419 *
420 * - If the periph is currently executing a recovery
421 * command, URGENT commands must block, because only
422 * one recovery command can execute at a time.
423 */
424 for (;;) {
425 if (flags & XS_CTL_URGENT) {
426 if (periph->periph_active > periph->periph_openings)
427 goto wait_for_opening;
428 if (periph->periph_flags & PERIPH_SENSE) {
429 if ((flags & XS_CTL_REQSENSE) == 0)
430 goto wait_for_opening;
431 } else {
432 if ((periph->periph_flags &
433 PERIPH_RECOVERY_ACTIVE) != 0)
434 goto wait_for_opening;
435 periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
436 }
437 break;
438 }
439 if (periph->periph_active >= periph->periph_openings ||
440 (periph->periph_flags & PERIPH_RECOVERING) != 0)
441 goto wait_for_opening;
442 periph->periph_active++;
443 break;
444
445 wait_for_opening:
446 if (flags & XS_CTL_NOSLEEP) {
447 splx(s);
448 return (NULL);
449 }
450 SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n"));
451 periph->periph_flags |= PERIPH_WAITING;
452 (void) tsleep(periph, PRIBIO, "getxs", 0);
453 }
454 SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n"));
455 xs = pool_get(&scsipi_xfer_pool,
456 ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
457 if (xs == NULL) {
458 if (flags & XS_CTL_URGENT) {
459 if ((flags & XS_CTL_REQSENSE) == 0)
460 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
461 } else
462 periph->periph_active--;
463 scsipi_printaddr(periph);
464 printf("unable to allocate %sscsipi_xfer\n",
465 (flags & XS_CTL_URGENT) ? "URGENT " : "");
466 }
467 splx(s);
468
469 SC_DEBUG(periph, SCSIPI_DB3, ("returning\n"));
470
471 if (xs != NULL) {
472 memset(xs, 0, sizeof(*xs));
473 callout_init(&xs->xs_callout);
474 xs->xs_periph = periph;
475 xs->xs_control = flags;
476 xs->xs_status = 0;
477 s = splbio();
478 TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
479 splx(s);
480 }
481 return (xs);
482 }
483
484 /*
485 * scsipi_put_xs:
486 *
487 * Release an xfer descriptor, decreasing the outstanding command
488 * count for the peripherial. If there is a thread waiting for
489 * an opening, wake it up. If not, kick any queued I/O the
490 * peripherial may have.
491 *
492 * NOTE: Must be called at splbio().
493 */
494 void
495 scsipi_put_xs(struct scsipi_xfer *xs)
496 {
497 struct scsipi_periph *periph = xs->xs_periph;
498 int flags = xs->xs_control;
499
500 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n"));
501
502 TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
503 pool_put(&scsipi_xfer_pool, xs);
504
505 #ifdef DIAGNOSTIC
506 if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
507 periph->periph_active == 0) {
508 scsipi_printaddr(periph);
509 printf("recovery without a command to recovery for\n");
510 panic("scsipi_put_xs");
511 }
512 #endif
513
514 if (flags & XS_CTL_URGENT) {
515 if ((flags & XS_CTL_REQSENSE) == 0)
516 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
517 } else
518 periph->periph_active--;
519 if (periph->periph_active == 0 &&
520 (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
521 periph->periph_flags &= ~PERIPH_WAITDRAIN;
522 wakeup(&periph->periph_active);
523 }
524
525 if (periph->periph_flags & PERIPH_WAITING) {
526 periph->periph_flags &= ~PERIPH_WAITING;
527 wakeup(periph);
528 } else {
529 if (periph->periph_switch->psw_start != NULL &&
530 (periph->periph_dev->dv_flags & DVF_ACTIVE)) {
531 SC_DEBUG(periph, SCSIPI_DB2,
532 ("calling private start()\n"));
533 (*periph->periph_switch->psw_start)(periph);
534 }
535 }
536 }
537
538 /*
539 * scsipi_channel_freeze:
540 *
541 * Freeze a channel's xfer queue.
542 */
543 void
544 scsipi_channel_freeze(struct scsipi_channel *chan, int count)
545 {
546 int s;
547
548 s = splbio();
549 chan->chan_qfreeze += count;
550 splx(s);
551 }
552
553 /*
554 * scsipi_channel_thaw:
555 *
556 * Thaw a channel's xfer queue.
557 */
558 void
559 scsipi_channel_thaw(struct scsipi_channel *chan, int count)
560 {
561 int s;
562
563 s = splbio();
564 chan->chan_qfreeze -= count;
565 /*
566 * Don't let the freeze count go negative.
567 *
568 * Presumably the adapter driver could keep track of this,
569 * but it might just be easier to do this here so as to allow
570 * multiple callers, including those outside the adapter driver.
571 */
572 if (chan->chan_qfreeze < 0) {
573 chan->chan_qfreeze = 0;
574 }
575 splx(s);
576 /*
577 * Kick the channel's queue here. Note, we may be running in
578 * interrupt context (softclock or HBA's interrupt), so the adapter
579 * driver had better not sleep.
580 */
581 if (chan->chan_qfreeze == 0)
582 scsipi_run_queue(chan);
583 }
584
585 /*
586 * scsipi_channel_timed_thaw:
587 *
588 * Thaw a channel after some time has expired. This will also
589 * run the channel's queue if the freeze count has reached 0.
590 */
591 void
592 scsipi_channel_timed_thaw(void *arg)
593 {
594 struct scsipi_channel *chan = arg;
595
596 scsipi_channel_thaw(chan, 1);
597 }
598
599 /*
600 * scsipi_periph_freeze:
601 *
602 * Freeze a device's xfer queue.
603 */
604 void
605 scsipi_periph_freeze(struct scsipi_periph *periph, int count)
606 {
607 int s;
608
609 s = splbio();
610 periph->periph_qfreeze += count;
611 splx(s);
612 }
613
614 /*
615 * scsipi_periph_thaw:
616 *
617 * Thaw a device's xfer queue.
618 */
619 void
620 scsipi_periph_thaw(struct scsipi_periph *periph, int count)
621 {
622 int s;
623
624 s = splbio();
625 periph->periph_qfreeze -= count;
626 #ifdef DIAGNOSTIC
627 if (periph->periph_qfreeze < 0) {
628 static const char pc[] = "periph freeze count < 0";
629 scsipi_printaddr(periph);
630 printf("%s\n", pc);
631 panic(pc);
632 }
633 #endif
634 if (periph->periph_qfreeze == 0 &&
635 (periph->periph_flags & PERIPH_WAITING) != 0)
636 wakeup(periph);
637 splx(s);
638 }
639
640 /*
641 * scsipi_periph_timed_thaw:
642 *
643 * Thaw a device after some time has expired.
644 */
645 void
646 scsipi_periph_timed_thaw(void *arg)
647 {
648 int s;
649 struct scsipi_periph *periph = arg;
650
651 callout_stop(&periph->periph_callout);
652
653 s = splbio();
654 scsipi_periph_thaw(periph, 1);
655 if ((periph->periph_channel->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
656 /*
657 * Kick the channel's queue here. Note, we're running in
658 * interrupt context (softclock), so the adapter driver
659 * had better not sleep.
660 */
661 scsipi_run_queue(periph->periph_channel);
662 } else {
663 /*
664 * Tell the completion thread to kick the channel's queue here.
665 */
666 periph->periph_channel->chan_tflags |= SCSIPI_CHANT_KICK;
667 wakeup(&periph->periph_channel->chan_complete);
668 }
669 splx(s);
670 }
671
672 /*
673 * scsipi_wait_drain:
674 *
675 * Wait for a periph's pending xfers to drain.
676 */
677 void
678 scsipi_wait_drain(struct scsipi_periph *periph)
679 {
680 int s;
681
682 s = splbio();
683 while (periph->periph_active != 0) {
684 periph->periph_flags |= PERIPH_WAITDRAIN;
685 (void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0);
686 }
687 splx(s);
688 }
689
690 /*
691 * scsipi_kill_pending:
692 *
693 * Kill off all pending xfers for a periph.
694 *
695 * NOTE: Must be called at splbio().
696 */
697 void
698 scsipi_kill_pending(struct scsipi_periph *periph)
699 {
700
701 (*periph->periph_channel->chan_bustype->bustype_kill_pending)(periph);
702 scsipi_wait_drain(periph);
703 }
704
705 /*
706 * scsipi_print_cdb:
707 * prints a command descriptor block (for debug purpose, error messages,
708 * SCSIPI_VERBOSE, ...)
709 */
710 void
711 scsipi_print_cdb(struct scsipi_generic *cmd)
712 {
713 int i, j;
714
715 printf("0x%02x", cmd->opcode);
716
717 switch (CDB_GROUPID(cmd->opcode)) {
718 case CDB_GROUPID_0:
719 j = CDB_GROUP0;
720 break;
721 case CDB_GROUPID_1:
722 j = CDB_GROUP1;
723 break;
724 case CDB_GROUPID_2:
725 j = CDB_GROUP2;
726 break;
727 case CDB_GROUPID_3:
728 j = CDB_GROUP3;
729 break;
730 case CDB_GROUPID_4:
731 j = CDB_GROUP4;
732 break;
733 case CDB_GROUPID_5:
734 j = CDB_GROUP5;
735 break;
736 case CDB_GROUPID_6:
737 j = CDB_GROUP6;
738 break;
739 case CDB_GROUPID_7:
740 j = CDB_GROUP7;
741 break;
742 default:
743 j = 0;
744 }
745 if (j == 0)
746 j = sizeof (cmd->bytes);
747 for (i = 0; i < j-1; i++) /* already done the opcode */
748 printf(" %02x", cmd->bytes[i]);
749 }
750
751 /*
752 * scsipi_interpret_sense:
753 *
754 * Look at the returned sense and act on the error, determining
755 * the unix error number to pass back. (0 = report no error)
756 *
757 * NOTE: If we return ERESTART, we are expected to haved
758 * thawed the device!
759 *
760 * THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
761 */
762 int
763 scsipi_interpret_sense(struct scsipi_xfer *xs)
764 {
765 struct scsipi_sense_data *sense;
766 struct scsipi_periph *periph = xs->xs_periph;
767 u_int8_t key;
768 int error;
769 #ifndef SCSIVERBOSE
770 u_int32_t info;
771 static char *error_mes[] = {
772 "soft error (corrected)",
773 "not ready", "medium error",
774 "non-media hardware failure", "illegal request",
775 "unit attention", "readonly device",
776 "no data found", "vendor unique",
777 "copy aborted", "command aborted",
778 "search returned equal", "volume overflow",
779 "verify miscompare", "unknown error key"
780 };
781 #endif
782
783 sense = &xs->sense.scsi_sense;
784 #ifdef SCSIPI_DEBUG
785 if (periph->periph_flags & SCSIPI_DB1) {
786 int count;
787 scsipi_printaddr(periph);
788 printf(" sense debug information:\n");
789 printf("\tcode 0x%x valid 0x%x\n",
790 sense->error_code & SSD_ERRCODE,
791 sense->error_code & SSD_ERRCODE_VALID ? 1 : 0);
792 printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
793 sense->segment,
794 sense->flags & SSD_KEY,
795 sense->flags & SSD_ILI ? 1 : 0,
796 sense->flags & SSD_EOM ? 1 : 0,
797 sense->flags & SSD_FILEMARK ? 1 : 0);
798 printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d "
799 "extra bytes\n",
800 sense->info[0],
801 sense->info[1],
802 sense->info[2],
803 sense->info[3],
804 sense->extra_len);
805 printf("\textra: ");
806 for (count = 0; count < ADD_BYTES_LIM(sense); count++)
807 printf("0x%x ", sense->cmd_spec_info[count]);
808 printf("\n");
809 }
810 #endif
811
812 /*
813 * If the periph has it's own error handler, call it first.
814 * If it returns a legit error value, return that, otherwise
815 * it wants us to continue with normal error processing.
816 */
817 if (periph->periph_switch->psw_error != NULL) {
818 SC_DEBUG(periph, SCSIPI_DB2,
819 ("calling private err_handler()\n"));
820 error = (*periph->periph_switch->psw_error)(xs);
821 if (error != EJUSTRETURN)
822 return (error);
823 }
824 /* otherwise use the default */
825 switch (sense->error_code & SSD_ERRCODE) {
826
827 /*
828 * Old SCSI-1 and SASI devices respond with
829 * codes other than 70.
830 */
831 case 0x00: /* no error (command completed OK) */
832 return (0);
833 case 0x04: /* drive not ready after it was selected */
834 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
835 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
836 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
837 return (0);
838 /* XXX - display some sort of error here? */
839 return (EIO);
840 case 0x20: /* invalid command */
841 if ((xs->xs_control &
842 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
843 return (0);
844 return (EINVAL);
845 case 0x25: /* invalid LUN (Adaptec ACB-4000) */
846 return (EACCES);
847
848 /*
849 * If it's code 70, use the extended stuff and
850 * interpret the key
851 */
852 case 0x71: /* delayed error */
853 scsipi_printaddr(periph);
854 key = sense->flags & SSD_KEY;
855 printf(" DEFERRED ERROR, key = 0x%x\n", key);
856 /* FALLTHROUGH */
857 case 0x70:
858 #ifndef SCSIVERBOSE
859 if ((sense->error_code & SSD_ERRCODE_VALID) != 0)
860 info = _4btol(sense->info);
861 else
862 info = 0;
863 #endif
864 key = sense->flags & SSD_KEY;
865
866 switch (key) {
867 case SKEY_NO_SENSE:
868 case SKEY_RECOVERED_ERROR:
869 if (xs->resid == xs->datalen && xs->datalen) {
870 /*
871 * Why is this here?
872 */
873 xs->resid = 0; /* not short read */
874 }
875 case SKEY_EQUAL:
876 error = 0;
877 break;
878 case SKEY_NOT_READY:
879 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
880 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
881 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
882 return (0);
883 if (sense->add_sense_code == 0x3A) {
884 error = ENODEV; /* Medium not present */
885 if (xs->xs_control & XS_CTL_SILENT_NODEV)
886 return (error);
887 } else
888 error = EIO;
889 if ((xs->xs_control & XS_CTL_SILENT) != 0)
890 return (error);
891 break;
892 case SKEY_ILLEGAL_REQUEST:
893 if ((xs->xs_control &
894 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
895 return (0);
896 /*
897 * Handle the case where a device reports
898 * Logical Unit Not Supported during discovery.
899 */
900 if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
901 sense->add_sense_code == 0x25 &&
902 sense->add_sense_code_qual == 0x00)
903 return (EINVAL);
904 if ((xs->xs_control & XS_CTL_SILENT) != 0)
905 return (EIO);
906 error = EINVAL;
907 break;
908 case SKEY_UNIT_ATTENTION:
909 if (sense->add_sense_code == 0x29 &&
910 sense->add_sense_code_qual == 0x00) {
911 /* device or bus reset */
912 return (ERESTART);
913 }
914 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
915 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
916 if ((xs->xs_control &
917 XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
918 /* XXX Should reupload any transient state. */
919 (periph->periph_flags &
920 PERIPH_REMOVABLE) == 0) {
921 return (ERESTART);
922 }
923 if ((xs->xs_control & XS_CTL_SILENT) != 0)
924 return (EIO);
925 error = EIO;
926 break;
927 case SKEY_WRITE_PROTECT:
928 error = EROFS;
929 break;
930 case SKEY_BLANK_CHECK:
931 error = 0;
932 break;
933 case SKEY_ABORTED_COMMAND:
934 if (xs->xs_retries != 0) {
935 xs->xs_retries--;
936 error = ERESTART;
937 } else
938 error = EIO;
939 break;
940 case SKEY_VOLUME_OVERFLOW:
941 error = ENOSPC;
942 break;
943 default:
944 error = EIO;
945 break;
946 }
947
948 #ifdef SCSIVERBOSE
949 if (key && (xs->xs_control & XS_CTL_SILENT) == 0)
950 scsipi_print_sense(xs, 0);
951 #else
952 if (key) {
953 scsipi_printaddr(periph);
954 printf("%s", error_mes[key - 1]);
955 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
956 switch (key) {
957 case SKEY_NOT_READY:
958 case SKEY_ILLEGAL_REQUEST:
959 case SKEY_UNIT_ATTENTION:
960 case SKEY_WRITE_PROTECT:
961 break;
962 case SKEY_BLANK_CHECK:
963 printf(", requested size: %d (decimal)",
964 info);
965 break;
966 case SKEY_ABORTED_COMMAND:
967 if (xs->xs_retries)
968 printf(", retrying");
969 printf(", cmd 0x%x, info 0x%x",
970 xs->cmd->opcode, info);
971 break;
972 default:
973 printf(", info = %d (decimal)", info);
974 }
975 }
976 if (sense->extra_len != 0) {
977 int n;
978 printf(", data =");
979 for (n = 0; n < sense->extra_len; n++)
980 printf(" %02x",
981 sense->cmd_spec_info[n]);
982 }
983 printf("\n");
984 }
985 #endif
986 return (error);
987
988 /*
989 * Some other code, just report it
990 */
991 default:
992 #if defined(SCSIDEBUG) || defined(DEBUG)
993 {
994 static char *uc = "undecodable sense error";
995 int i;
996 u_int8_t *cptr = (u_int8_t *) sense;
997 scsipi_printaddr(periph);
998 if (xs->cmd == &xs->cmdstore) {
999 printf("%s for opcode 0x%x, data=",
1000 uc, xs->cmdstore.opcode);
1001 } else {
1002 printf("%s, data=", uc);
1003 }
1004 for (i = 0; i < sizeof (sense); i++)
1005 printf(" 0x%02x", *(cptr++) & 0xff);
1006 printf("\n");
1007 }
1008 #else
1009 scsipi_printaddr(periph);
1010 printf("Sense Error Code 0x%x",
1011 sense->error_code & SSD_ERRCODE);
1012 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
1013 struct scsipi_sense_data_unextended *usense =
1014 (struct scsipi_sense_data_unextended *)sense;
1015 printf(" at block no. %d (decimal)",
1016 _3btol(usense->block));
1017 }
1018 printf("\n");
1019 #endif
1020 return (EIO);
1021 }
1022 }
1023
1024 /*
1025 * scsipi_size:
1026 *
1027 * Find out from the device what its capacity is.
1028 */
1029 u_int64_t
1030 scsipi_size(struct scsipi_periph *periph, int flags)
1031 {
1032 struct scsipi_read_cap_data rdcap;
1033 struct scsipi_read_capacity scsipi_cmd;
1034
1035 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1036 scsipi_cmd.opcode = READ_CAPACITY;
1037
1038 /*
1039 * If the command works, interpret the result as a 4 byte
1040 * number of blocks
1041 */
1042 if (scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1043 sizeof(scsipi_cmd), (u_char *)&rdcap, sizeof(rdcap),
1044 SCSIPIRETRIES, 20000, NULL,
1045 flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK | XS_CTL_SILENT) != 0)
1046 return (0);
1047
1048 return (_4btol(rdcap.addr) + 1);
1049 }
1050
1051 /*
1052 * scsipi_test_unit_ready:
1053 *
1054 * Issue a `test unit ready' request.
1055 */
1056 int
1057 scsipi_test_unit_ready(struct scsipi_periph *periph, int flags)
1058 {
1059 int retries;
1060 struct scsipi_test_unit_ready scsipi_cmd;
1061
1062 /* some ATAPI drives don't support TEST_UNIT_READY. Sigh */
1063 if (periph->periph_quirks & PQUIRK_NOTUR)
1064 return (0);
1065
1066 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1067 scsipi_cmd.opcode = TEST_UNIT_READY;
1068
1069 if (flags & XS_CTL_DISCOVERY)
1070 retries = 0;
1071 else
1072 retries = SCSIPIRETRIES;
1073
1074 return (scsipi_command(periph,
1075 (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd),
1076 0, 0, retries, 10000, NULL, flags));
1077 }
1078
1079 /*
1080 * scsipi_inquire:
1081 *
1082 * Ask the device about itself.
1083 */
1084 int
1085 scsipi_inquire(struct scsipi_periph *periph, struct scsipi_inquiry_data *inqbuf,
1086 int flags)
1087 {
1088 int retries;
1089 struct scsipi_inquiry scsipi_cmd;
1090 int error;
1091
1092 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1093 scsipi_cmd.opcode = INQUIRY;
1094
1095 if (flags & XS_CTL_DISCOVERY)
1096 retries = 0;
1097 else
1098 retries = SCSIPIRETRIES;
1099
1100 /*
1101 * If we request more data than the device can provide, it SHOULD just
1102 * return a short reponse. However, some devices error with an
1103 * ILLEGAL REQUEST sense code, and yet others have even more special
1104 * failture modes (such as the GL641USB flash adapter, which goes loony
1105 * and sends corrupted CRCs). To work around this, and to bring our
1106 * behavior more in line with other OSes, we do a shorter inquiry,
1107 * covering all the SCSI-2 information, first, and then request more
1108 * data iff the "additional length" field indicates there is more.
1109 * - mycroft, 2003/10/16
1110 */
1111 scsipi_cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI2;
1112 error = scsipi_command(periph,
1113 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1114 (u_char *) inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI2,
1115 retries, 10000, NULL, XS_CTL_DATA_IN | flags);
1116 if (!error && inqbuf->additional_length > SCSIPI_INQUIRY_LENGTH_SCSI2 - 4) {
1117 scsipi_cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI3;
1118 error = scsipi_command(periph,
1119 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1120 (u_char *) inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI3,
1121 retries, 10000, NULL, XS_CTL_DATA_IN | flags);
1122 }
1123
1124 #ifdef SCSI_OLD_NOINQUIRY
1125 /*
1126 * Kludge for the Adaptec ACB-4000 SCSI->MFM translator.
1127 * This board doesn't support the INQUIRY command at all.
1128 */
1129 if (error == EINVAL || error == EACCES) {
1130 /*
1131 * Conjure up an INQUIRY response.
1132 */
1133 inqbuf->device = (error == EINVAL ?
1134 SID_QUAL_LU_PRESENT :
1135 SID_QUAL_LU_NOTPRESENT) | T_DIRECT;
1136 inqbuf->dev_qual2 = 0;
1137 inqbuf->version = 0;
1138 inqbuf->response_format = SID_FORMAT_SCSI1;
1139 inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
1140 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1141 memcpy(inqbuf->vendor, "ADAPTEC ACB-4000 ", 28);
1142 error = 0;
1143 }
1144
1145 /*
1146 * Kludge for the Emulex MT-02 SCSI->QIC translator.
1147 * This board gives an empty response to an INQUIRY command.
1148 */
1149 else if (error == 0 &&
1150 inqbuf->device == (SID_QUAL_LU_PRESENT | T_DIRECT) &&
1151 inqbuf->dev_qual2 == 0 &&
1152 inqbuf->version == 0 &&
1153 inqbuf->response_format == SID_FORMAT_SCSI1) {
1154 /*
1155 * Fill out the INQUIRY response.
1156 */
1157 inqbuf->device = (SID_QUAL_LU_PRESENT | T_SEQUENTIAL);
1158 inqbuf->dev_qual2 = SID_REMOVABLE;
1159 inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
1160 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1161 memcpy(inqbuf->vendor, "EMULEX MT-02 QIC ", 28);
1162 }
1163 #endif /* SCSI_OLD_NOINQUIRY */
1164
1165 return error;
1166 }
1167
1168 /*
1169 * scsipi_prevent:
1170 *
1171 * Prevent or allow the user to remove the media
1172 */
1173 int
1174 scsipi_prevent(struct scsipi_periph *periph, int type, int flags)
1175 {
1176 struct scsipi_prevent scsipi_cmd;
1177
1178 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1179 scsipi_cmd.opcode = PREVENT_ALLOW;
1180 scsipi_cmd.how = type;
1181
1182 return (scsipi_command(periph,
1183 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1184 0, 0, SCSIPIRETRIES, 5000, NULL, flags));
1185 }
1186
1187 /*
1188 * scsipi_start:
1189 *
1190 * Send a START UNIT.
1191 */
1192 int
1193 scsipi_start(struct scsipi_periph *periph, int type, int flags)
1194 {
1195 struct scsipi_start_stop scsipi_cmd;
1196
1197 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1198 scsipi_cmd.opcode = START_STOP;
1199 scsipi_cmd.byte2 = 0x00;
1200 scsipi_cmd.how = type;
1201
1202 return (scsipi_command(periph,
1203 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1204 0, 0, SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000,
1205 NULL, flags));
1206 }
1207
1208 /*
1209 * scsipi_mode_sense, scsipi_mode_sense_big:
1210 * get a sense page from a device
1211 */
1212
1213 int
1214 scsipi_mode_sense(struct scsipi_periph *periph, int byte2, int page,
1215 struct scsipi_mode_header *data, int len, int flags, int retries,
1216 int timeout)
1217 {
1218 struct scsipi_mode_sense scsipi_cmd;
1219 int error;
1220
1221 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1222 scsipi_cmd.opcode = MODE_SENSE;
1223 scsipi_cmd.byte2 = byte2;
1224 scsipi_cmd.page = page;
1225 scsipi_cmd.length = len & 0xff;
1226 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1227 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1228 flags | XS_CTL_DATA_IN);
1229 SC_DEBUG(periph, SCSIPI_DB2,
1230 ("scsipi_mode_sense: error=%d\n", error));
1231 return (error);
1232 }
1233
1234 int
1235 scsipi_mode_sense_big(struct scsipi_periph *periph, int byte2, int page,
1236 struct scsipi_mode_header_big *data, int len, int flags, int retries,
1237 int timeout)
1238 {
1239 struct scsipi_mode_sense_big scsipi_cmd;
1240 int error;
1241
1242 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1243 scsipi_cmd.opcode = MODE_SENSE_BIG;
1244 scsipi_cmd.byte2 = byte2;
1245 scsipi_cmd.page = page;
1246 _lto2b(len, scsipi_cmd.length);
1247 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1248 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1249 flags | XS_CTL_DATA_IN);
1250 SC_DEBUG(periph, SCSIPI_DB2,
1251 ("scsipi_mode_sense_big: error=%d\n", error));
1252 return (error);
1253 }
1254
1255 int
1256 scsipi_mode_select(struct scsipi_periph *periph, int byte2,
1257 struct scsipi_mode_header *data, int len, int flags, int retries,
1258 int timeout)
1259 {
1260 struct scsipi_mode_select scsipi_cmd;
1261 int error;
1262
1263 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1264 scsipi_cmd.opcode = MODE_SELECT;
1265 scsipi_cmd.byte2 = byte2;
1266 scsipi_cmd.length = len & 0xff;
1267 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1268 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1269 flags | XS_CTL_DATA_OUT);
1270 SC_DEBUG(periph, SCSIPI_DB2,
1271 ("scsipi_mode_select: error=%d\n", error));
1272 return (error);
1273 }
1274
1275 int
1276 scsipi_mode_select_big(struct scsipi_periph *periph, int byte2,
1277 struct scsipi_mode_header_big *data, int len, int flags, int retries,
1278 int timeout)
1279 {
1280 struct scsipi_mode_select_big scsipi_cmd;
1281 int error;
1282
1283 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1284 scsipi_cmd.opcode = MODE_SELECT_BIG;
1285 scsipi_cmd.byte2 = byte2;
1286 _lto2b(len, scsipi_cmd.length);
1287 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1288 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1289 flags | XS_CTL_DATA_OUT);
1290 SC_DEBUG(periph, SCSIPI_DB2,
1291 ("scsipi_mode_select: error=%d\n", error));
1292 return (error);
1293 }
1294
1295 /*
1296 * scsipi_done:
1297 *
1298 * This routine is called by an adapter's interrupt handler when
1299 * an xfer is completed.
1300 */
1301 void
1302 scsipi_done(struct scsipi_xfer *xs)
1303 {
1304 struct scsipi_periph *periph = xs->xs_periph;
1305 struct scsipi_channel *chan = periph->periph_channel;
1306 int s, freezecnt;
1307
1308 SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n"));
1309 #ifdef SCSIPI_DEBUG
1310 if (periph->periph_dbflags & SCSIPI_DB1)
1311 show_scsipi_cmd(xs);
1312 #endif
1313
1314 s = splbio();
1315 /*
1316 * The resource this command was using is now free.
1317 */
1318 scsipi_put_resource(chan);
1319 xs->xs_periph->periph_sent--;
1320
1321 /*
1322 * If the command was tagged, free the tag.
1323 */
1324 if (XS_CTL_TAGTYPE(xs) != 0)
1325 scsipi_put_tag(xs);
1326 else
1327 periph->periph_flags &= ~PERIPH_UNTAG;
1328
1329 /* Mark the command as `done'. */
1330 xs->xs_status |= XS_STS_DONE;
1331
1332 #ifdef DIAGNOSTIC
1333 if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
1334 (XS_CTL_ASYNC|XS_CTL_POLL))
1335 panic("scsipi_done: ASYNC and POLL");
1336 #endif
1337
1338 /*
1339 * If the xfer had an error of any sort, freeze the
1340 * periph's queue. Freeze it again if we were requested
1341 * to do so in the xfer.
1342 */
1343 freezecnt = 0;
1344 if (xs->error != XS_NOERROR)
1345 freezecnt++;
1346 if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
1347 freezecnt++;
1348 if (freezecnt != 0)
1349 scsipi_periph_freeze(periph, freezecnt);
1350
1351 /*
1352 * record the xfer with a pending sense, in case a SCSI reset is
1353 * received before the thread is waked up.
1354 */
1355 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1356 periph->periph_flags |= PERIPH_SENSE;
1357 periph->periph_xscheck = xs;
1358 }
1359
1360 /*
1361 * If this was an xfer that was not to complete asynchronously,
1362 * let the requesting thread perform error checking/handling
1363 * in its context.
1364 */
1365 if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
1366 splx(s);
1367 /*
1368 * If it's a polling job, just return, to unwind the
1369 * call graph. We don't need to restart the queue,
1370 * because pollings jobs are treated specially, and
1371 * are really only used during crash dumps anyway
1372 * (XXX or during boot-time autconfiguration of
1373 * ATAPI devices).
1374 */
1375 if (xs->xs_control & XS_CTL_POLL)
1376 return;
1377 wakeup(xs);
1378 goto out;
1379 }
1380
1381 /*
1382 * Catch the extremely common case of I/O completing
1383 * without error; no use in taking a context switch
1384 * if we can handle it in interrupt context.
1385 */
1386 if (xs->error == XS_NOERROR) {
1387 splx(s);
1388 (void) scsipi_complete(xs);
1389 goto out;
1390 }
1391
1392 /*
1393 * There is an error on this xfer. Put it on the channel's
1394 * completion queue, and wake up the completion thread.
1395 */
1396 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
1397 splx(s);
1398 wakeup(&chan->chan_complete);
1399
1400 out:
1401 /*
1402 * If there are more xfers on the channel's queue, attempt to
1403 * run them.
1404 */
1405 scsipi_run_queue(chan);
1406 }
1407
1408 /*
1409 * scsipi_complete:
1410 *
1411 * Completion of a scsipi_xfer. This is the guts of scsipi_done().
1412 *
1413 * NOTE: This routine MUST be called with valid thread context
1414 * except for the case where the following two conditions are
1415 * true:
1416 *
1417 * xs->error == XS_NOERROR
1418 * XS_CTL_ASYNC is set in xs->xs_control
1419 *
1420 * The semantics of this routine can be tricky, so here is an
1421 * explanation:
1422 *
1423 * 0 Xfer completed successfully.
1424 *
1425 * ERESTART Xfer had an error, but was restarted.
1426 *
1427 * anything else Xfer had an error, return value is Unix
1428 * errno.
1429 *
1430 * If the return value is anything but ERESTART:
1431 *
1432 * - If XS_CTL_ASYNC is set, `xs' has been freed back to
1433 * the pool.
1434 * - If there is a buf associated with the xfer,
1435 * it has been biodone()'d.
1436 */
1437 static int
1438 scsipi_complete(struct scsipi_xfer *xs)
1439 {
1440 struct scsipi_periph *periph = xs->xs_periph;
1441 struct scsipi_channel *chan = periph->periph_channel;
1442 struct buf *bp;
1443 int error, s;
1444
1445 #ifdef DIAGNOSTIC
1446 if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
1447 panic("scsipi_complete: XS_CTL_ASYNC but no buf");
1448 #endif
1449 /*
1450 * If command terminated with a CHECK CONDITION, we need to issue a
1451 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed
1452 * we'll have the real status.
1453 * Must be processed at splbio() to avoid missing a SCSI bus reset
1454 * for this command.
1455 */
1456 s = splbio();
1457 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1458 /* request sense for a request sense ? */
1459 if (xs->xs_control & XS_CTL_REQSENSE) {
1460 scsipi_printaddr(periph);
1461 printf("request sense for a request sense ?\n");
1462 /* XXX maybe we should reset the device ? */
1463 /* we've been frozen because xs->error != XS_NOERROR */
1464 scsipi_periph_thaw(periph, 1);
1465 splx(s);
1466 if (xs->resid < xs->datalen) {
1467 printf("we read %d bytes of sense anyway:\n",
1468 xs->datalen - xs->resid);
1469 #ifdef SCSIVERBOSE
1470 scsipi_print_sense_data((void *)xs->data, 0);
1471 #endif
1472 }
1473 return EINVAL;
1474 }
1475 scsipi_request_sense(xs);
1476 }
1477 splx(s);
1478
1479 /*
1480 * If it's a user level request, bypass all usual completion
1481 * processing, let the user work it out..
1482 */
1483 if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
1484 SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n"));
1485 if (xs->error != XS_NOERROR)
1486 scsipi_periph_thaw(periph, 1);
1487 scsipi_user_done(xs);
1488 SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n "));
1489 return 0;
1490 }
1491
1492 switch (xs->error) {
1493 case XS_NOERROR:
1494 error = 0;
1495 break;
1496
1497 case XS_SENSE:
1498 case XS_SHORTSENSE:
1499 error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
1500 break;
1501
1502 case XS_RESOURCE_SHORTAGE:
1503 /*
1504 * XXX Should freeze channel's queue.
1505 */
1506 scsipi_printaddr(periph);
1507 printf("adapter resource shortage\n");
1508 /* FALLTHROUGH */
1509
1510 case XS_BUSY:
1511 if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
1512 struct scsipi_max_openings mo;
1513
1514 /*
1515 * We set the openings to active - 1, assuming that
1516 * the command that got us here is the first one that
1517 * can't fit into the device's queue. If that's not
1518 * the case, I guess we'll find out soon enough.
1519 */
1520 mo.mo_target = periph->periph_target;
1521 mo.mo_lun = periph->periph_lun;
1522 if (periph->periph_active < periph->periph_openings)
1523 mo.mo_openings = periph->periph_active - 1;
1524 else
1525 mo.mo_openings = periph->periph_openings - 1;
1526 #ifdef DIAGNOSTIC
1527 if (mo.mo_openings < 0) {
1528 scsipi_printaddr(periph);
1529 printf("QUEUE FULL resulted in < 0 openings\n");
1530 panic("scsipi_done");
1531 }
1532 #endif
1533 if (mo.mo_openings == 0) {
1534 scsipi_printaddr(periph);
1535 printf("QUEUE FULL resulted in 0 openings\n");
1536 mo.mo_openings = 1;
1537 }
1538 scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
1539 error = ERESTART;
1540 } else if (xs->xs_retries != 0) {
1541 xs->xs_retries--;
1542 /*
1543 * Wait one second, and try again.
1544 */
1545 if ((xs->xs_control & XS_CTL_POLL) ||
1546 (chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
1547 delay(1000000);
1548 } else if (!callout_pending(&periph->periph_callout)) {
1549 scsipi_periph_freeze(periph, 1);
1550 callout_reset(&periph->periph_callout,
1551 hz, scsipi_periph_timed_thaw, periph);
1552 }
1553 error = ERESTART;
1554 } else
1555 error = EBUSY;
1556 break;
1557
1558 case XS_REQUEUE:
1559 error = ERESTART;
1560 break;
1561
1562 case XS_SELTIMEOUT:
1563 case XS_TIMEOUT:
1564 /*
1565 * If the device hasn't gone away, honor retry counts.
1566 *
1567 * Note that if we're in the middle of probing it,
1568 * it won't be found because it isn't here yet so
1569 * we won't honor the retry count in that case.
1570 */
1571 if (scsipi_lookup_periph(chan, periph->periph_target,
1572 periph->periph_lun) && xs->xs_retries != 0) {
1573 xs->xs_retries--;
1574 error = ERESTART;
1575 } else
1576 error = EIO;
1577 break;
1578
1579 case XS_RESET:
1580 if (xs->xs_control & XS_CTL_REQSENSE) {
1581 /*
1582 * request sense interrupted by reset: signal it
1583 * with EINTR return code.
1584 */
1585 error = EINTR;
1586 } else {
1587 if (xs->xs_retries != 0) {
1588 xs->xs_retries--;
1589 error = ERESTART;
1590 } else
1591 error = EIO;
1592 }
1593 break;
1594
1595 case XS_DRIVER_STUFFUP:
1596 scsipi_printaddr(periph);
1597 printf("generic HBA error\n");
1598 error = EIO;
1599 break;
1600 default:
1601 scsipi_printaddr(periph);
1602 printf("invalid return code from adapter: %d\n", xs->error);
1603 error = EIO;
1604 break;
1605 }
1606
1607 s = splbio();
1608 if (error == ERESTART) {
1609 /*
1610 * If we get here, the periph has been thawed and frozen
1611 * again if we had to issue recovery commands. Alternatively,
1612 * it may have been frozen again and in a timed thaw. In
1613 * any case, we thaw the periph once we re-enqueue the
1614 * command. Once the periph is fully thawed, it will begin
1615 * operation again.
1616 */
1617 xs->error = XS_NOERROR;
1618 xs->status = SCSI_OK;
1619 xs->xs_status &= ~XS_STS_DONE;
1620 xs->xs_requeuecnt++;
1621 error = scsipi_enqueue(xs);
1622 if (error == 0) {
1623 scsipi_periph_thaw(periph, 1);
1624 splx(s);
1625 return (ERESTART);
1626 }
1627 }
1628
1629 /*
1630 * scsipi_done() freezes the queue if not XS_NOERROR.
1631 * Thaw it here.
1632 */
1633 if (xs->error != XS_NOERROR)
1634 scsipi_periph_thaw(periph, 1);
1635
1636 /*
1637 * Set buffer fields in case the periph
1638 * switch done func uses them
1639 */
1640 if ((bp = xs->bp) != NULL) {
1641 if (error) {
1642 bp->b_error = error;
1643 bp->b_flags |= B_ERROR;
1644 bp->b_resid = bp->b_bcount;
1645 } else {
1646 bp->b_error = 0;
1647 bp->b_resid = xs->resid;
1648 }
1649 }
1650
1651 if (periph->periph_switch->psw_done)
1652 periph->periph_switch->psw_done(xs);
1653
1654 if (bp)
1655 biodone(bp);
1656
1657 if (xs->xs_control & XS_CTL_ASYNC)
1658 scsipi_put_xs(xs);
1659 splx(s);
1660
1661 return (error);
1662 }
1663
1664 /*
1665 * Issue a request sense for the given scsipi_xfer. Called when the xfer
1666 * returns with a CHECK_CONDITION status. Must be called in valid thread
1667 * context and at splbio().
1668 */
1669
1670 static void
1671 scsipi_request_sense(struct scsipi_xfer *xs)
1672 {
1673 struct scsipi_periph *periph = xs->xs_periph;
1674 int flags, error;
1675 struct scsipi_sense cmd;
1676
1677 periph->periph_flags |= PERIPH_SENSE;
1678
1679 /* if command was polling, request sense will too */
1680 flags = xs->xs_control & XS_CTL_POLL;
1681 /* Polling commands can't sleep */
1682 if (flags)
1683 flags |= XS_CTL_NOSLEEP;
1684
1685 flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN |
1686 XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH;
1687
1688 memset(&cmd, 0, sizeof(cmd));
1689 cmd.opcode = REQUEST_SENSE;
1690 cmd.length = sizeof(struct scsipi_sense_data);
1691
1692 error = scsipi_command(periph,
1693 (struct scsipi_generic *) &cmd, sizeof(cmd),
1694 (u_char*)&xs->sense.scsi_sense, sizeof(struct scsipi_sense_data),
1695 0, 1000, NULL, flags);
1696 periph->periph_flags &= ~PERIPH_SENSE;
1697 periph->periph_xscheck = NULL;
1698 switch(error) {
1699 case 0:
1700 /* we have a valid sense */
1701 xs->error = XS_SENSE;
1702 return;
1703 case EINTR:
1704 /* REQUEST_SENSE interrupted by bus reset. */
1705 xs->error = XS_RESET;
1706 return;
1707 case EIO:
1708 /* request sense coudn't be performed */
1709 /*
1710 * XXX this isn't quite right but we don't have anything
1711 * better for now
1712 */
1713 xs->error = XS_DRIVER_STUFFUP;
1714 return;
1715 default:
1716 /* Notify that request sense failed. */
1717 xs->error = XS_DRIVER_STUFFUP;
1718 scsipi_printaddr(periph);
1719 printf("request sense failed with error %d\n", error);
1720 return;
1721 }
1722 }
1723
1724 /*
1725 * scsipi_enqueue:
1726 *
1727 * Enqueue an xfer on a channel.
1728 */
1729 static int
1730 scsipi_enqueue(struct scsipi_xfer *xs)
1731 {
1732 struct scsipi_channel *chan = xs->xs_periph->periph_channel;
1733 struct scsipi_xfer *qxs;
1734 int s;
1735
1736 s = splbio();
1737
1738 /*
1739 * If the xfer is to be polled, and there are already jobs on
1740 * the queue, we can't proceed.
1741 */
1742 if ((xs->xs_control & XS_CTL_POLL) != 0 &&
1743 TAILQ_FIRST(&chan->chan_queue) != NULL) {
1744 splx(s);
1745 xs->error = XS_DRIVER_STUFFUP;
1746 return (EAGAIN);
1747 }
1748
1749 /*
1750 * If we have an URGENT xfer, it's an error recovery command
1751 * and it should just go on the head of the channel's queue.
1752 */
1753 if (xs->xs_control & XS_CTL_URGENT) {
1754 TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
1755 goto out;
1756 }
1757
1758 /*
1759 * If this xfer has already been on the queue before, we
1760 * need to reinsert it in the correct order. That order is:
1761 *
1762 * Immediately before the first xfer for this periph
1763 * with a requeuecnt less than xs->xs_requeuecnt.
1764 *
1765 * Failing that, at the end of the queue. (We'll end up
1766 * there naturally.)
1767 */
1768 if (xs->xs_requeuecnt != 0) {
1769 for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
1770 qxs = TAILQ_NEXT(qxs, channel_q)) {
1771 if (qxs->xs_periph == xs->xs_periph &&
1772 qxs->xs_requeuecnt < xs->xs_requeuecnt)
1773 break;
1774 }
1775 if (qxs != NULL) {
1776 TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
1777 channel_q);
1778 goto out;
1779 }
1780 }
1781 TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
1782 out:
1783 if (xs->xs_control & XS_CTL_THAW_PERIPH)
1784 scsipi_periph_thaw(xs->xs_periph, 1);
1785 splx(s);
1786 return (0);
1787 }
1788
1789 /*
1790 * scsipi_run_queue:
1791 *
1792 * Start as many xfers as possible running on the channel.
1793 */
1794 static void
1795 scsipi_run_queue(struct scsipi_channel *chan)
1796 {
1797 struct scsipi_xfer *xs;
1798 struct scsipi_periph *periph;
1799 int s;
1800
1801 for (;;) {
1802 s = splbio();
1803
1804 /*
1805 * If the channel is frozen, we can't do any work right
1806 * now.
1807 */
1808 if (chan->chan_qfreeze != 0) {
1809 splx(s);
1810 return;
1811 }
1812
1813 /*
1814 * Look for work to do, and make sure we can do it.
1815 */
1816 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
1817 xs = TAILQ_NEXT(xs, channel_q)) {
1818 periph = xs->xs_periph;
1819
1820 if ((periph->periph_sent >= periph->periph_openings) ||
1821 periph->periph_qfreeze != 0 ||
1822 (periph->periph_flags & PERIPH_UNTAG) != 0)
1823 continue;
1824
1825 if ((periph->periph_flags &
1826 (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 &&
1827 (xs->xs_control & XS_CTL_URGENT) == 0)
1828 continue;
1829
1830 /*
1831 * We can issue this xfer!
1832 */
1833 goto got_one;
1834 }
1835
1836 /*
1837 * Can't find any work to do right now.
1838 */
1839 splx(s);
1840 return;
1841
1842 got_one:
1843 /*
1844 * Have an xfer to run. Allocate a resource from
1845 * the adapter to run it. If we can't allocate that
1846 * resource, we don't dequeue the xfer.
1847 */
1848 if (scsipi_get_resource(chan) == 0) {
1849 /*
1850 * Adapter is out of resources. If the adapter
1851 * supports it, attempt to grow them.
1852 */
1853 if (scsipi_grow_resources(chan) == 0) {
1854 /*
1855 * Wasn't able to grow resources,
1856 * nothing more we can do.
1857 */
1858 if (xs->xs_control & XS_CTL_POLL) {
1859 scsipi_printaddr(xs->xs_periph);
1860 printf("polling command but no "
1861 "adapter resources");
1862 /* We'll panic shortly... */
1863 }
1864 splx(s);
1865
1866 /*
1867 * XXX: We should be able to note that
1868 * XXX: that resources are needed here!
1869 */
1870 return;
1871 }
1872 /*
1873 * scsipi_grow_resources() allocated the resource
1874 * for us.
1875 */
1876 }
1877
1878 /*
1879 * We have a resource to run this xfer, do it!
1880 */
1881 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
1882
1883 /*
1884 * If the command is to be tagged, allocate a tag ID
1885 * for it.
1886 */
1887 if (XS_CTL_TAGTYPE(xs) != 0)
1888 scsipi_get_tag(xs);
1889 else
1890 periph->periph_flags |= PERIPH_UNTAG;
1891 periph->periph_sent++;
1892 splx(s);
1893
1894 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
1895 }
1896 #ifdef DIAGNOSTIC
1897 panic("scsipi_run_queue: impossible");
1898 #endif
1899 }
1900
1901 /*
1902 * scsipi_execute_xs:
1903 *
1904 * Begin execution of an xfer, waiting for it to complete, if necessary.
1905 */
1906 int
1907 scsipi_execute_xs(struct scsipi_xfer *xs)
1908 {
1909 struct scsipi_periph *periph = xs->xs_periph;
1910 struct scsipi_channel *chan = periph->periph_channel;
1911 int oasync, async, poll, retries, error, s;
1912
1913 xs->xs_status &= ~XS_STS_DONE;
1914 xs->error = XS_NOERROR;
1915 xs->resid = xs->datalen;
1916 xs->status = SCSI_OK;
1917
1918 #ifdef SCSIPI_DEBUG
1919 if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) {
1920 printf("scsipi_execute_xs: ");
1921 show_scsipi_xs(xs);
1922 printf("\n");
1923 }
1924 #endif
1925
1926 /*
1927 * Deal with command tagging:
1928 *
1929 * - If the device's current operating mode doesn't
1930 * include tagged queueing, clear the tag mask.
1931 *
1932 * - If the device's current operating mode *does*
1933 * include tagged queueing, set the tag_type in
1934 * the xfer to the appropriate byte for the tag
1935 * message.
1936 */
1937 if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 ||
1938 (xs->xs_control & XS_CTL_REQSENSE)) {
1939 xs->xs_control &= ~XS_CTL_TAGMASK;
1940 xs->xs_tag_type = 0;
1941 } else {
1942 /*
1943 * If the request doesn't specify a tag, give Head
1944 * tags to URGENT operations and Ordered tags to
1945 * everything else.
1946 */
1947 if (XS_CTL_TAGTYPE(xs) == 0) {
1948 if (xs->xs_control & XS_CTL_URGENT)
1949 xs->xs_control |= XS_CTL_HEAD_TAG;
1950 else
1951 xs->xs_control |= XS_CTL_ORDERED_TAG;
1952 }
1953
1954 switch (XS_CTL_TAGTYPE(xs)) {
1955 case XS_CTL_ORDERED_TAG:
1956 xs->xs_tag_type = MSG_ORDERED_Q_TAG;
1957 break;
1958
1959 case XS_CTL_SIMPLE_TAG:
1960 xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
1961 break;
1962
1963 case XS_CTL_HEAD_TAG:
1964 xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
1965 break;
1966
1967 default:
1968 scsipi_printaddr(periph);
1969 printf("invalid tag mask 0x%08x\n",
1970 XS_CTL_TAGTYPE(xs));
1971 panic("scsipi_execute_xs");
1972 }
1973 }
1974
1975 /* If the adaptor wants us to poll, poll. */
1976 if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY)
1977 xs->xs_control |= XS_CTL_POLL;
1978
1979 /*
1980 * If we don't yet have a completion thread, or we are to poll for
1981 * completion, clear the ASYNC flag.
1982 */
1983 oasync = (xs->xs_control & XS_CTL_ASYNC);
1984 if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
1985 xs->xs_control &= ~XS_CTL_ASYNC;
1986
1987 async = (xs->xs_control & XS_CTL_ASYNC);
1988 poll = (xs->xs_control & XS_CTL_POLL);
1989 retries = xs->xs_retries; /* for polling commands */
1990
1991 #ifdef DIAGNOSTIC
1992 if (oasync != 0 && xs->bp == NULL)
1993 panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
1994 #endif
1995
1996 /*
1997 * Enqueue the transfer. If we're not polling for completion, this
1998 * should ALWAYS return `no error'.
1999 */
2000 try_again:
2001 error = scsipi_enqueue(xs);
2002 if (error) {
2003 if (poll == 0) {
2004 scsipi_printaddr(periph);
2005 printf("not polling, but enqueue failed with %d\n",
2006 error);
2007 panic("scsipi_execute_xs");
2008 }
2009
2010 scsipi_printaddr(periph);
2011 printf("failed to enqueue polling command");
2012 if (retries != 0) {
2013 printf(", retrying...\n");
2014 delay(1000000);
2015 retries--;
2016 goto try_again;
2017 }
2018 printf("\n");
2019 goto free_xs;
2020 }
2021
2022 restarted:
2023 scsipi_run_queue(chan);
2024
2025 /*
2026 * The xfer is enqueued, and possibly running. If it's to be
2027 * completed asynchronously, just return now.
2028 */
2029 if (async)
2030 return (EJUSTRETURN);
2031
2032 /*
2033 * Not an asynchronous command; wait for it to complete.
2034 */
2035 s = splbio();
2036 while ((xs->xs_status & XS_STS_DONE) == 0) {
2037 if (poll) {
2038 scsipi_printaddr(periph);
2039 printf("polling command not done\n");
2040 panic("scsipi_execute_xs");
2041 }
2042 (void) tsleep(xs, PRIBIO, "xscmd", 0);
2043 }
2044 splx(s);
2045
2046 /*
2047 * Command is complete. scsipi_done() has awakened us to perform
2048 * the error handling.
2049 */
2050 error = scsipi_complete(xs);
2051 if (error == ERESTART)
2052 goto restarted;
2053
2054 /*
2055 * If it was meant to run async and we cleared aync ourselve,
2056 * don't return an error here. It has already been handled
2057 */
2058 if (oasync)
2059 error = EJUSTRETURN;
2060 /*
2061 * Command completed successfully or fatal error occurred. Fall
2062 * into....
2063 */
2064 free_xs:
2065 s = splbio();
2066 scsipi_put_xs(xs);
2067 splx(s);
2068
2069 /*
2070 * Kick the queue, keep it running in case it stopped for some
2071 * reason.
2072 */
2073 scsipi_run_queue(chan);
2074
2075 return (error);
2076 }
2077
2078 /*
2079 * scsipi_completion_thread:
2080 *
2081 * This is the completion thread. We wait for errors on
2082 * asynchronous xfers, and perform the error handling
2083 * function, restarting the command, if necessary.
2084 */
2085 static void
2086 scsipi_completion_thread(void *arg)
2087 {
2088 struct scsipi_channel *chan = arg;
2089 struct scsipi_xfer *xs;
2090 int s;
2091
2092 if (chan->chan_init_cb)
2093 (*chan->chan_init_cb)(chan, chan->chan_init_cb_arg);
2094
2095 s = splbio();
2096 chan->chan_flags |= SCSIPI_CHAN_TACTIVE;
2097 splx(s);
2098 for (;;) {
2099 s = splbio();
2100 xs = TAILQ_FIRST(&chan->chan_complete);
2101 if (xs == NULL && chan->chan_tflags == 0) {
2102 /* nothing to do; wait */
2103 (void) tsleep(&chan->chan_complete, PRIBIO,
2104 "sccomp", 0);
2105 splx(s);
2106 continue;
2107 }
2108 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2109 /* call chan_callback from thread context */
2110 chan->chan_tflags &= ~SCSIPI_CHANT_CALLBACK;
2111 chan->chan_callback(chan, chan->chan_callback_arg);
2112 splx(s);
2113 continue;
2114 }
2115 if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) {
2116 /* attempt to get more openings for this channel */
2117 chan->chan_tflags &= ~SCSIPI_CHANT_GROWRES;
2118 scsipi_adapter_request(chan,
2119 ADAPTER_REQ_GROW_RESOURCES, NULL);
2120 scsipi_channel_thaw(chan, 1);
2121 splx(s);
2122 continue;
2123 }
2124 if (chan->chan_tflags & SCSIPI_CHANT_KICK) {
2125 /* explicitly run the queues for this channel */
2126 chan->chan_tflags &= ~SCSIPI_CHANT_KICK;
2127 scsipi_run_queue(chan);
2128 splx(s);
2129 continue;
2130 }
2131 if (chan->chan_tflags & SCSIPI_CHANT_SHUTDOWN) {
2132 splx(s);
2133 break;
2134 }
2135 if (xs) {
2136 TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
2137 splx(s);
2138
2139 /*
2140 * Have an xfer with an error; process it.
2141 */
2142 (void) scsipi_complete(xs);
2143
2144 /*
2145 * Kick the queue; keep it running if it was stopped
2146 * for some reason.
2147 */
2148 scsipi_run_queue(chan);
2149 } else {
2150 splx(s);
2151 }
2152 }
2153
2154 chan->chan_thread = NULL;
2155
2156 /* In case parent is waiting for us to exit. */
2157 wakeup(&chan->chan_thread);
2158
2159 kthread_exit(0);
2160 }
2161
2162 /*
2163 * scsipi_create_completion_thread:
2164 *
2165 * Callback to actually create the completion thread.
2166 */
2167 void
2168 scsipi_create_completion_thread(void *arg)
2169 {
2170 struct scsipi_channel *chan = arg;
2171 struct scsipi_adapter *adapt = chan->chan_adapter;
2172
2173 if (kthread_create1(scsipi_completion_thread, chan,
2174 &chan->chan_thread, "%s", chan->chan_name)) {
2175 printf("%s: unable to create completion thread for "
2176 "channel %d\n", adapt->adapt_dev->dv_xname,
2177 chan->chan_channel);
2178 panic("scsipi_create_completion_thread");
2179 }
2180 }
2181
2182 /*
2183 * scsipi_thread_call_callback:
2184 *
2185 * request to call a callback from the completion thread
2186 */
2187 int
2188 scsipi_thread_call_callback(struct scsipi_channel *chan,
2189 void (*callback)(struct scsipi_channel *, void *), void *arg)
2190 {
2191 int s;
2192
2193 s = splbio();
2194 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
2195 /* kernel thread doesn't exist yet */
2196 splx(s);
2197 return ESRCH;
2198 }
2199 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2200 splx(s);
2201 return EBUSY;
2202 }
2203 scsipi_channel_freeze(chan, 1);
2204 chan->chan_callback = callback;
2205 chan->chan_callback_arg = arg;
2206 chan->chan_tflags |= SCSIPI_CHANT_CALLBACK;
2207 wakeup(&chan->chan_complete);
2208 splx(s);
2209 return(0);
2210 }
2211
2212 /*
2213 * scsipi_async_event:
2214 *
2215 * Handle an asynchronous event from an adapter.
2216 */
2217 void
2218 scsipi_async_event(struct scsipi_channel *chan, scsipi_async_event_t event,
2219 void *arg)
2220 {
2221 int s;
2222
2223 s = splbio();
2224 switch (event) {
2225 case ASYNC_EVENT_MAX_OPENINGS:
2226 scsipi_async_event_max_openings(chan,
2227 (struct scsipi_max_openings *)arg);
2228 break;
2229
2230 case ASYNC_EVENT_XFER_MODE:
2231 scsipi_async_event_xfer_mode(chan,
2232 (struct scsipi_xfer_mode *)arg);
2233 break;
2234 case ASYNC_EVENT_RESET:
2235 scsipi_async_event_channel_reset(chan);
2236 break;
2237 }
2238 splx(s);
2239 }
2240
2241 /*
2242 * scsipi_print_xfer_mode:
2243 *
2244 * Print a periph's capabilities.
2245 */
2246 void
2247 scsipi_print_xfer_mode(struct scsipi_periph *periph)
2248 {
2249 int period, freq, speed, mbs;
2250
2251 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0)
2252 return;
2253
2254 aprint_normal("%s: ", periph->periph_dev->dv_xname);
2255 if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) {
2256 period = scsipi_sync_factor_to_period(periph->periph_period);
2257 aprint_normal("sync (%d.%02dns offset %d)",
2258 period / 100, period % 100, periph->periph_offset);
2259 } else
2260 aprint_normal("async");
2261
2262 if (periph->periph_mode & PERIPH_CAP_WIDE32)
2263 aprint_normal(", 32-bit");
2264 else if (periph->periph_mode & (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
2265 aprint_normal(", 16-bit");
2266 else
2267 aprint_normal(", 8-bit");
2268
2269 if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) {
2270 freq = scsipi_sync_factor_to_freq(periph->periph_period);
2271 speed = freq;
2272 if (periph->periph_mode & PERIPH_CAP_WIDE32)
2273 speed *= 4;
2274 else if (periph->periph_mode &
2275 (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
2276 speed *= 2;
2277 mbs = speed / 1000;
2278 if (mbs > 0)
2279 aprint_normal(" (%d.%03dMB/s)", mbs, speed % 1000);
2280 else
2281 aprint_normal(" (%dKB/s)", speed % 1000);
2282 }
2283
2284 aprint_normal(" transfers");
2285
2286 if (periph->periph_mode & PERIPH_CAP_TQING)
2287 aprint_normal(", tagged queueing");
2288
2289 aprint_normal("\n");
2290 }
2291
2292 /*
2293 * scsipi_async_event_max_openings:
2294 *
2295 * Update the maximum number of outstanding commands a
2296 * device may have.
2297 */
2298 static void
2299 scsipi_async_event_max_openings(struct scsipi_channel *chan,
2300 struct scsipi_max_openings *mo)
2301 {
2302 struct scsipi_periph *periph;
2303 int minlun, maxlun;
2304
2305 if (mo->mo_lun == -1) {
2306 /*
2307 * Wildcarded; apply it to all LUNs.
2308 */
2309 minlun = 0;
2310 maxlun = chan->chan_nluns - 1;
2311 } else
2312 minlun = maxlun = mo->mo_lun;
2313
2314 /* XXX This could really suck with a large LUN space. */
2315 for (; minlun <= maxlun; minlun++) {
2316 periph = scsipi_lookup_periph(chan, mo->mo_target, minlun);
2317 if (periph == NULL)
2318 continue;
2319
2320 if (mo->mo_openings < periph->periph_openings)
2321 periph->periph_openings = mo->mo_openings;
2322 else if (mo->mo_openings > periph->periph_openings &&
2323 (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
2324 periph->periph_openings = mo->mo_openings;
2325 }
2326 }
2327
2328 /*
2329 * scsipi_async_event_xfer_mode:
2330 *
2331 * Update the xfer mode for all periphs sharing the
2332 * specified I_T Nexus.
2333 */
2334 static void
2335 scsipi_async_event_xfer_mode(struct scsipi_channel *chan,
2336 struct scsipi_xfer_mode *xm)
2337 {
2338 struct scsipi_periph *periph;
2339 int lun, announce, mode, period, offset;
2340
2341 for (lun = 0; lun < chan->chan_nluns; lun++) {
2342 periph = scsipi_lookup_periph(chan, xm->xm_target, lun);
2343 if (periph == NULL)
2344 continue;
2345 announce = 0;
2346
2347 /*
2348 * Clamp the xfer mode down to this periph's capabilities.
2349 */
2350 mode = xm->xm_mode & periph->periph_cap;
2351 if (mode & PERIPH_CAP_SYNC) {
2352 period = xm->xm_period;
2353 offset = xm->xm_offset;
2354 } else {
2355 period = 0;
2356 offset = 0;
2357 }
2358
2359 /*
2360 * If we do not have a valid xfer mode yet, or the parameters
2361 * are different, announce them.
2362 */
2363 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0 ||
2364 periph->periph_mode != mode ||
2365 periph->periph_period != period ||
2366 periph->periph_offset != offset)
2367 announce = 1;
2368
2369 periph->periph_mode = mode;
2370 periph->periph_period = period;
2371 periph->periph_offset = offset;
2372 periph->periph_flags |= PERIPH_MODE_VALID;
2373
2374 if (announce)
2375 scsipi_print_xfer_mode(periph);
2376 }
2377 }
2378
2379 /*
2380 * scsipi_set_xfer_mode:
2381 *
2382 * Set the xfer mode for the specified I_T Nexus.
2383 */
2384 void
2385 scsipi_set_xfer_mode(struct scsipi_channel *chan, int target, int immed)
2386 {
2387 struct scsipi_xfer_mode xm;
2388 struct scsipi_periph *itperiph;
2389 int lun, s;
2390
2391 /*
2392 * Go to the minimal xfer mode.
2393 */
2394 xm.xm_target = target;
2395 xm.xm_mode = 0;
2396 xm.xm_period = 0; /* ignored */
2397 xm.xm_offset = 0; /* ignored */
2398
2399 /*
2400 * Find the first LUN we know about on this I_T Nexus.
2401 */
2402 for (itperiph = NULL, lun = 0; lun < chan->chan_nluns; lun++) {
2403 itperiph = scsipi_lookup_periph(chan, target, lun);
2404 if (itperiph != NULL)
2405 break;
2406 }
2407 if (itperiph != NULL) {
2408 xm.xm_mode = itperiph->periph_cap;
2409 /*
2410 * Now issue the request to the adapter.
2411 */
2412 s = splbio();
2413 scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm);
2414 splx(s);
2415 /*
2416 * If we want this to happen immediately, issue a dummy
2417 * command, since most adapters can't really negotiate unless
2418 * they're executing a job.
2419 */
2420 if (immed != 0) {
2421 (void) scsipi_test_unit_ready(itperiph,
2422 XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
2423 XS_CTL_IGNORE_NOT_READY |
2424 XS_CTL_IGNORE_MEDIA_CHANGE);
2425 }
2426 }
2427 }
2428
2429 /*
2430 * scsipi_channel_reset:
2431 *
2432 * handle scsi bus reset
2433 * called at splbio
2434 */
2435 static void
2436 scsipi_async_event_channel_reset(struct scsipi_channel *chan)
2437 {
2438 struct scsipi_xfer *xs, *xs_next;
2439 struct scsipi_periph *periph;
2440 int target, lun;
2441
2442 /*
2443 * Channel has been reset. Also mark as reset pending REQUEST_SENSE
2444 * commands; as the sense is not available any more.
2445 * can't call scsipi_done() from here, as the command has not been
2446 * sent to the adapter yet (this would corrupt accounting).
2447 */
2448
2449 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) {
2450 xs_next = TAILQ_NEXT(xs, channel_q);
2451 if (xs->xs_control & XS_CTL_REQSENSE) {
2452 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
2453 xs->error = XS_RESET;
2454 if ((xs->xs_control & XS_CTL_ASYNC) != 0)
2455 TAILQ_INSERT_TAIL(&chan->chan_complete, xs,
2456 channel_q);
2457 }
2458 }
2459 wakeup(&chan->chan_complete);
2460 /* Catch xs with pending sense which may not have a REQSENSE xs yet */
2461 for (target = 0; target < chan->chan_ntargets; target++) {
2462 if (target == chan->chan_id)
2463 continue;
2464 for (lun = 0; lun < chan->chan_nluns; lun++) {
2465 periph = scsipi_lookup_periph(chan, target, lun);
2466 if (periph) {
2467 xs = periph->periph_xscheck;
2468 if (xs)
2469 xs->error = XS_RESET;
2470 }
2471 }
2472 }
2473 }
2474
2475 /*
2476 * scsipi_target_detach:
2477 *
2478 * detach all periph associated with a I_T
2479 * must be called from valid thread context
2480 */
2481 int
2482 scsipi_target_detach(struct scsipi_channel *chan, int target, int lun,
2483 int flags)
2484 {
2485 struct scsipi_periph *periph;
2486 int ctarget, mintarget, maxtarget;
2487 int clun, minlun, maxlun;
2488 int error;
2489
2490 if (target == -1) {
2491 mintarget = 0;
2492 maxtarget = chan->chan_ntargets;
2493 } else {
2494 if (target == chan->chan_id)
2495 return EINVAL;
2496 if (target < 0 || target >= chan->chan_ntargets)
2497 return EINVAL;
2498 mintarget = target;
2499 maxtarget = target + 1;
2500 }
2501
2502 if (lun == -1) {
2503 minlun = 0;
2504 maxlun = chan->chan_nluns;
2505 } else {
2506 if (lun < 0 || lun >= chan->chan_nluns)
2507 return EINVAL;
2508 minlun = lun;
2509 maxlun = lun + 1;
2510 }
2511
2512 for (ctarget = mintarget; ctarget < maxtarget; ctarget++) {
2513 if (ctarget == chan->chan_id)
2514 continue;
2515
2516 for (clun = minlun; clun < maxlun; clun++) {
2517 periph = scsipi_lookup_periph(chan, ctarget, clun);
2518 if (periph == NULL)
2519 continue;
2520 error = config_detach(periph->periph_dev, flags);
2521 if (error)
2522 return (error);
2523 }
2524 }
2525 return(0);
2526 }
2527
2528 /*
2529 * scsipi_adapter_addref:
2530 *
2531 * Add a reference to the adapter pointed to by the provided
2532 * link, enabling the adapter if necessary.
2533 */
2534 int
2535 scsipi_adapter_addref(struct scsipi_adapter *adapt)
2536 {
2537 int s, error = 0;
2538
2539 s = splbio();
2540 if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) {
2541 error = (*adapt->adapt_enable)(adapt->adapt_dev, 1);
2542 if (error)
2543 adapt->adapt_refcnt--;
2544 }
2545 splx(s);
2546 return (error);
2547 }
2548
2549 /*
2550 * scsipi_adapter_delref:
2551 *
2552 * Delete a reference to the adapter pointed to by the provided
2553 * link, disabling the adapter if possible.
2554 */
2555 void
2556 scsipi_adapter_delref(struct scsipi_adapter *adapt)
2557 {
2558 int s;
2559
2560 s = splbio();
2561 if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL)
2562 (void) (*adapt->adapt_enable)(adapt->adapt_dev, 0);
2563 splx(s);
2564 }
2565
2566 static struct scsipi_syncparam {
2567 int ss_factor;
2568 int ss_period; /* ns * 100 */
2569 } scsipi_syncparams[] = {
2570 { 0x08, 625 }, /* FAST-160 (Ultra320) */
2571 { 0x09, 1250 }, /* FAST-80 (Ultra160) */
2572 { 0x0a, 2500 }, /* FAST-40 40MHz (Ultra2) */
2573 { 0x0b, 3030 }, /* FAST-40 33MHz (Ultra2) */
2574 { 0x0c, 5000 }, /* FAST-20 (Ultra) */
2575 };
2576 static const int scsipi_nsyncparams =
2577 sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
2578
2579 int
2580 scsipi_sync_period_to_factor(int period /* ns * 100 */)
2581 {
2582 int i;
2583
2584 for (i = 0; i < scsipi_nsyncparams; i++) {
2585 if (period <= scsipi_syncparams[i].ss_period)
2586 return (scsipi_syncparams[i].ss_factor);
2587 }
2588
2589 return ((period / 100) / 4);
2590 }
2591
2592 int
2593 scsipi_sync_factor_to_period(int factor)
2594 {
2595 int i;
2596
2597 for (i = 0; i < scsipi_nsyncparams; i++) {
2598 if (factor == scsipi_syncparams[i].ss_factor)
2599 return (scsipi_syncparams[i].ss_period);
2600 }
2601
2602 return ((factor * 4) * 100);
2603 }
2604
2605 int
2606 scsipi_sync_factor_to_freq(int factor)
2607 {
2608 int i;
2609
2610 for (i = 0; i < scsipi_nsyncparams; i++) {
2611 if (factor == scsipi_syncparams[i].ss_factor)
2612 return (100000000 / scsipi_syncparams[i].ss_period);
2613 }
2614
2615 return (10000000 / ((factor * 4) * 10));
2616 }
2617
2618 #ifdef SCSIPI_DEBUG
2619 /*
2620 * Given a scsipi_xfer, dump the request, in all it's glory
2621 */
2622 void
2623 show_scsipi_xs(struct scsipi_xfer *xs)
2624 {
2625
2626 printf("xs(%p): ", xs);
2627 printf("xs_control(0x%08x)", xs->xs_control);
2628 printf("xs_status(0x%08x)", xs->xs_status);
2629 printf("periph(%p)", xs->xs_periph);
2630 printf("retr(0x%x)", xs->xs_retries);
2631 printf("timo(0x%x)", xs->timeout);
2632 printf("cmd(%p)", xs->cmd);
2633 printf("len(0x%x)", xs->cmdlen);
2634 printf("data(%p)", xs->data);
2635 printf("len(0x%x)", xs->datalen);
2636 printf("res(0x%x)", xs->resid);
2637 printf("err(0x%x)", xs->error);
2638 printf("bp(%p)", xs->bp);
2639 show_scsipi_cmd(xs);
2640 }
2641
2642 void
2643 show_scsipi_cmd(struct scsipi_xfer *xs)
2644 {
2645 u_char *b = (u_char *) xs->cmd;
2646 int i = 0;
2647
2648 scsipi_printaddr(xs->xs_periph);
2649 printf(" command: ");
2650
2651 if ((xs->xs_control & XS_CTL_RESET) == 0) {
2652 while (i < xs->cmdlen) {
2653 if (i)
2654 printf(",");
2655 printf("0x%x", b[i++]);
2656 }
2657 printf("-[%d bytes]\n", xs->datalen);
2658 if (xs->datalen)
2659 show_mem(xs->data, min(64, xs->datalen));
2660 } else
2661 printf("-RESET-\n");
2662 }
2663
2664 void
2665 show_mem(u_char *address, int num)
2666 {
2667 int x;
2668
2669 printf("------------------------------");
2670 for (x = 0; x < num; x++) {
2671 if ((x % 16) == 0)
2672 printf("\n%03d: ", x);
2673 printf("%02x ", *address++);
2674 }
2675 printf("\n------------------------------\n");
2676 }
2677 #endif /* SCSIPI_DEBUG */
2678