scsipi_base.c revision 1.88.2.3 1 /* $NetBSD: scsipi_base.c,v 1.88.2.3 2004/08/25 06:58:43 skrll Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999, 2000, 2002, 2003 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: scsipi_base.c,v 1.88.2.3 2004/08/25 06:58:43 skrll Exp $");
42
43 #include "opt_scsi.h"
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/buf.h>
49 #include <sys/uio.h>
50 #include <sys/malloc.h>
51 #include <sys/pool.h>
52 #include <sys/errno.h>
53 #include <sys/device.h>
54 #include <sys/proc.h>
55 #include <sys/kthread.h>
56 #include <sys/hash.h>
57
58 #include <dev/scsipi/scsipi_all.h>
59 #include <dev/scsipi/scsipi_disk.h>
60 #include <dev/scsipi/scsipiconf.h>
61 #include <dev/scsipi/scsipi_base.h>
62
63 #include <dev/scsipi/scsi_all.h>
64 #include <dev/scsipi/scsi_message.h>
65
66 static int scsipi_complete(struct scsipi_xfer *);
67 static void scsipi_request_sense(struct scsipi_xfer *);
68 static int scsipi_enqueue(struct scsipi_xfer *);
69 static void scsipi_run_queue(struct scsipi_channel *chan);
70
71 static void scsipi_completion_thread(void *);
72
73 static void scsipi_get_tag(struct scsipi_xfer *);
74 static void scsipi_put_tag(struct scsipi_xfer *);
75
76 static int scsipi_get_resource(struct scsipi_channel *);
77 static void scsipi_put_resource(struct scsipi_channel *);
78
79 static void scsipi_async_event_max_openings(struct scsipi_channel *,
80 struct scsipi_max_openings *);
81 static void scsipi_async_event_xfer_mode(struct scsipi_channel *,
82 struct scsipi_xfer_mode *);
83 static void scsipi_async_event_channel_reset(struct scsipi_channel *);
84
85 static struct pool scsipi_xfer_pool;
86
87 /*
88 * scsipi_init:
89 *
90 * Called when a scsibus or atapibus is attached to the system
91 * to initialize shared data structures.
92 */
93 void
94 scsipi_init(void)
95 {
96 static int scsipi_init_done;
97
98 if (scsipi_init_done)
99 return;
100 scsipi_init_done = 1;
101
102 /* Initialize the scsipi_xfer pool. */
103 pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
104 0, 0, "scxspl", NULL);
105 }
106
107 /*
108 * scsipi_channel_init:
109 *
110 * Initialize a scsipi_channel when it is attached.
111 */
112 int
113 scsipi_channel_init(struct scsipi_channel *chan)
114 {
115 int i;
116
117 /* Initialize shared data. */
118 scsipi_init();
119
120 /* Initialize the queues. */
121 TAILQ_INIT(&chan->chan_queue);
122 TAILQ_INIT(&chan->chan_complete);
123
124 for (i = 0; i < SCSIPI_CHAN_PERIPH_BUCKETS; i++)
125 LIST_INIT(&chan->chan_periphtab[i]);
126
127 /*
128 * Create the asynchronous completion thread.
129 */
130 kthread_create(scsipi_create_completion_thread, chan);
131 return (0);
132 }
133
134 /*
135 * scsipi_channel_shutdown:
136 *
137 * Shutdown a scsipi_channel.
138 */
139 void
140 scsipi_channel_shutdown(struct scsipi_channel *chan)
141 {
142
143 /*
144 * Shut down the completion thread.
145 */
146 chan->chan_tflags |= SCSIPI_CHANT_SHUTDOWN;
147 wakeup(&chan->chan_complete);
148
149 /*
150 * Now wait for the thread to exit.
151 */
152 while (chan->chan_thread != NULL)
153 (void) tsleep(&chan->chan_thread, PRIBIO, "scshut", 0);
154 }
155
156 static uint32_t
157 scsipi_chan_periph_hash(uint64_t t, uint64_t l)
158 {
159 uint32_t hash;
160
161 hash = hash32_buf(&t, sizeof(t), HASH32_BUF_INIT);
162 hash = hash32_buf(&l, sizeof(l), hash);
163
164 return (hash & SCSIPI_CHAN_PERIPH_HASHMASK);
165 }
166
167 /*
168 * scsipi_insert_periph:
169 *
170 * Insert a periph into the channel.
171 */
172 void
173 scsipi_insert_periph(struct scsipi_channel *chan, struct scsipi_periph *periph)
174 {
175 uint32_t hash;
176 int s;
177
178 hash = scsipi_chan_periph_hash(periph->periph_target,
179 periph->periph_lun);
180
181 s = splbio();
182 LIST_INSERT_HEAD(&chan->chan_periphtab[hash], periph, periph_hash);
183 splx(s);
184 }
185
186 /*
187 * scsipi_remove_periph:
188 *
189 * Remove a periph from the channel.
190 */
191 void
192 scsipi_remove_periph(struct scsipi_channel *chan, struct scsipi_periph *periph)
193 {
194 int s;
195
196 s = splbio();
197 LIST_REMOVE(periph, periph_hash);
198 splx(s);
199 }
200
201 /*
202 * scsipi_lookup_periph:
203 *
204 * Lookup a periph on the specified channel.
205 */
206 struct scsipi_periph *
207 scsipi_lookup_periph(struct scsipi_channel *chan, int target, int lun)
208 {
209 struct scsipi_periph *periph;
210 uint32_t hash;
211 int s;
212
213 if (target >= chan->chan_ntargets ||
214 lun >= chan->chan_nluns)
215 return (NULL);
216
217 hash = scsipi_chan_periph_hash(target, lun);
218
219 s = splbio();
220 LIST_FOREACH(periph, &chan->chan_periphtab[hash], periph_hash) {
221 if (periph->periph_target == target &&
222 periph->periph_lun == lun)
223 break;
224 }
225 splx(s);
226
227 return (periph);
228 }
229
230 /*
231 * scsipi_get_resource:
232 *
233 * Allocate a single xfer `resource' from the channel.
234 *
235 * NOTE: Must be called at splbio().
236 */
237 static int
238 scsipi_get_resource(struct scsipi_channel *chan)
239 {
240 struct scsipi_adapter *adapt = chan->chan_adapter;
241
242 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
243 if (chan->chan_openings > 0) {
244 chan->chan_openings--;
245 return (1);
246 }
247 return (0);
248 }
249
250 if (adapt->adapt_openings > 0) {
251 adapt->adapt_openings--;
252 return (1);
253 }
254 return (0);
255 }
256
257 /*
258 * scsipi_grow_resources:
259 *
260 * Attempt to grow resources for a channel. If this succeeds,
261 * we allocate one for our caller.
262 *
263 * NOTE: Must be called at splbio().
264 */
265 static __inline int
266 scsipi_grow_resources(struct scsipi_channel *chan)
267 {
268
269 if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
270 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
271 scsipi_adapter_request(chan,
272 ADAPTER_REQ_GROW_RESOURCES, NULL);
273 return (scsipi_get_resource(chan));
274 }
275 /*
276 * ask the channel thread to do it. It'll have to thaw the
277 * queue
278 */
279 scsipi_channel_freeze(chan, 1);
280 chan->chan_tflags |= SCSIPI_CHANT_GROWRES;
281 wakeup(&chan->chan_complete);
282 return (0);
283 }
284
285 return (0);
286 }
287
288 /*
289 * scsipi_put_resource:
290 *
291 * Free a single xfer `resource' to the channel.
292 *
293 * NOTE: Must be called at splbio().
294 */
295 static void
296 scsipi_put_resource(struct scsipi_channel *chan)
297 {
298 struct scsipi_adapter *adapt = chan->chan_adapter;
299
300 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
301 chan->chan_openings++;
302 else
303 adapt->adapt_openings++;
304 }
305
306 /*
307 * scsipi_get_tag:
308 *
309 * Get a tag ID for the specified xfer.
310 *
311 * NOTE: Must be called at splbio().
312 */
313 static void
314 scsipi_get_tag(struct scsipi_xfer *xs)
315 {
316 struct scsipi_periph *periph = xs->xs_periph;
317 int bit, tag;
318 u_int word;
319
320 bit = 0; /* XXX gcc */
321 for (word = 0; word < PERIPH_NTAGWORDS; word++) {
322 bit = ffs(periph->periph_freetags[word]);
323 if (bit != 0)
324 break;
325 }
326 #ifdef DIAGNOSTIC
327 if (word == PERIPH_NTAGWORDS) {
328 scsipi_printaddr(periph);
329 printf("no free tags\n");
330 panic("scsipi_get_tag");
331 }
332 #endif
333
334 bit -= 1;
335 periph->periph_freetags[word] &= ~(1 << bit);
336 tag = (word << 5) | bit;
337
338 /* XXX Should eventually disallow this completely. */
339 if (tag >= periph->periph_openings) {
340 scsipi_printaddr(periph);
341 printf("WARNING: tag %d greater than available openings %d\n",
342 tag, periph->periph_openings);
343 }
344
345 xs->xs_tag_id = tag;
346 }
347
348 /*
349 * scsipi_put_tag:
350 *
351 * Put the tag ID for the specified xfer back into the pool.
352 *
353 * NOTE: Must be called at splbio().
354 */
355 static void
356 scsipi_put_tag(struct scsipi_xfer *xs)
357 {
358 struct scsipi_periph *periph = xs->xs_periph;
359 int word, bit;
360
361 word = xs->xs_tag_id >> 5;
362 bit = xs->xs_tag_id & 0x1f;
363
364 periph->periph_freetags[word] |= (1 << bit);
365 }
366
367 /*
368 * scsipi_get_xs:
369 *
370 * Allocate an xfer descriptor and associate it with the
371 * specified peripherial. If the peripherial has no more
372 * available command openings, we either block waiting for
373 * one to become available, or fail.
374 */
375 struct scsipi_xfer *
376 scsipi_get_xs(struct scsipi_periph *periph, int flags)
377 {
378 struct scsipi_xfer *xs;
379 int s;
380
381 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n"));
382
383 /*
384 * If we're cold, make sure we poll.
385 */
386 if (cold)
387 flags |= XS_CTL_NOSLEEP | XS_CTL_POLL;
388
389 #ifdef DIAGNOSTIC
390 /*
391 * URGENT commands can never be ASYNC.
392 */
393 if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
394 (XS_CTL_URGENT|XS_CTL_ASYNC)) {
395 scsipi_printaddr(periph);
396 printf("URGENT and ASYNC\n");
397 panic("scsipi_get_xs");
398 }
399 #endif
400
401 s = splbio();
402 /*
403 * Wait for a command opening to become available. Rules:
404 *
405 * - All xfers must wait for an available opening.
406 * Exception: URGENT xfers can proceed when
407 * active == openings, because we use the opening
408 * of the command we're recovering for.
409 * - if the periph has sense pending, only URGENT & REQSENSE
410 * xfers may proceed.
411 *
412 * - If the periph is recovering, only URGENT xfers may
413 * proceed.
414 *
415 * - If the periph is currently executing a recovery
416 * command, URGENT commands must block, because only
417 * one recovery command can execute at a time.
418 */
419 for (;;) {
420 if (flags & XS_CTL_URGENT) {
421 if (periph->periph_active > periph->periph_openings)
422 goto wait_for_opening;
423 if (periph->periph_flags & PERIPH_SENSE) {
424 if ((flags & XS_CTL_REQSENSE) == 0)
425 goto wait_for_opening;
426 } else {
427 if ((periph->periph_flags &
428 PERIPH_RECOVERY_ACTIVE) != 0)
429 goto wait_for_opening;
430 periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
431 }
432 break;
433 }
434 if (periph->periph_active >= periph->periph_openings ||
435 (periph->periph_flags & PERIPH_RECOVERING) != 0)
436 goto wait_for_opening;
437 periph->periph_active++;
438 break;
439
440 wait_for_opening:
441 if (flags & XS_CTL_NOSLEEP) {
442 splx(s);
443 return (NULL);
444 }
445 SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n"));
446 periph->periph_flags |= PERIPH_WAITING;
447 (void) tsleep(periph, PRIBIO, "getxs", 0);
448 }
449 SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n"));
450 xs = pool_get(&scsipi_xfer_pool,
451 ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
452 if (xs == NULL) {
453 if (flags & XS_CTL_URGENT) {
454 if ((flags & XS_CTL_REQSENSE) == 0)
455 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
456 } else
457 periph->periph_active--;
458 scsipi_printaddr(periph);
459 printf("unable to allocate %sscsipi_xfer\n",
460 (flags & XS_CTL_URGENT) ? "URGENT " : "");
461 }
462 splx(s);
463
464 SC_DEBUG(periph, SCSIPI_DB3, ("returning\n"));
465
466 if (xs != NULL) {
467 memset(xs, 0, sizeof(*xs));
468 callout_init(&xs->xs_callout);
469 xs->xs_periph = periph;
470 xs->xs_control = flags;
471 xs->xs_status = 0;
472 s = splbio();
473 TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
474 splx(s);
475 }
476 return (xs);
477 }
478
479 /*
480 * scsipi_put_xs:
481 *
482 * Release an xfer descriptor, decreasing the outstanding command
483 * count for the peripherial. If there is a thread waiting for
484 * an opening, wake it up. If not, kick any queued I/O the
485 * peripherial may have.
486 *
487 * NOTE: Must be called at splbio().
488 */
489 void
490 scsipi_put_xs(struct scsipi_xfer *xs)
491 {
492 struct scsipi_periph *periph = xs->xs_periph;
493 int flags = xs->xs_control;
494
495 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n"));
496
497 TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
498 pool_put(&scsipi_xfer_pool, xs);
499
500 #ifdef DIAGNOSTIC
501 if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
502 periph->periph_active == 0) {
503 scsipi_printaddr(periph);
504 printf("recovery without a command to recovery for\n");
505 panic("scsipi_put_xs");
506 }
507 #endif
508
509 if (flags & XS_CTL_URGENT) {
510 if ((flags & XS_CTL_REQSENSE) == 0)
511 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
512 } else
513 periph->periph_active--;
514 if (periph->periph_active == 0 &&
515 (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
516 periph->periph_flags &= ~PERIPH_WAITDRAIN;
517 wakeup(&periph->periph_active);
518 }
519
520 if (periph->periph_flags & PERIPH_WAITING) {
521 periph->periph_flags &= ~PERIPH_WAITING;
522 wakeup(periph);
523 } else {
524 if (periph->periph_switch->psw_start != NULL &&
525 (periph->periph_dev->dv_flags & DVF_ACTIVE)) {
526 SC_DEBUG(periph, SCSIPI_DB2,
527 ("calling private start()\n"));
528 (*periph->periph_switch->psw_start)(periph);
529 }
530 }
531 }
532
533 /*
534 * scsipi_channel_freeze:
535 *
536 * Freeze a channel's xfer queue.
537 */
538 void
539 scsipi_channel_freeze(struct scsipi_channel *chan, int count)
540 {
541 int s;
542
543 s = splbio();
544 chan->chan_qfreeze += count;
545 splx(s);
546 }
547
548 /*
549 * scsipi_channel_thaw:
550 *
551 * Thaw a channel's xfer queue.
552 */
553 void
554 scsipi_channel_thaw(struct scsipi_channel *chan, int count)
555 {
556 int s;
557
558 s = splbio();
559 chan->chan_qfreeze -= count;
560 /*
561 * Don't let the freeze count go negative.
562 *
563 * Presumably the adapter driver could keep track of this,
564 * but it might just be easier to do this here so as to allow
565 * multiple callers, including those outside the adapter driver.
566 */
567 if (chan->chan_qfreeze < 0) {
568 chan->chan_qfreeze = 0;
569 }
570 splx(s);
571 /*
572 * Kick the channel's queue here. Note, we may be running in
573 * interrupt context (softclock or HBA's interrupt), so the adapter
574 * driver had better not sleep.
575 */
576 if (chan->chan_qfreeze == 0)
577 scsipi_run_queue(chan);
578 }
579
580 /*
581 * scsipi_channel_timed_thaw:
582 *
583 * Thaw a channel after some time has expired. This will also
584 * run the channel's queue if the freeze count has reached 0.
585 */
586 void
587 scsipi_channel_timed_thaw(void *arg)
588 {
589 struct scsipi_channel *chan = arg;
590
591 scsipi_channel_thaw(chan, 1);
592 }
593
594 /*
595 * scsipi_periph_freeze:
596 *
597 * Freeze a device's xfer queue.
598 */
599 void
600 scsipi_periph_freeze(struct scsipi_periph *periph, int count)
601 {
602 int s;
603
604 s = splbio();
605 periph->periph_qfreeze += count;
606 splx(s);
607 }
608
609 /*
610 * scsipi_periph_thaw:
611 *
612 * Thaw a device's xfer queue.
613 */
614 void
615 scsipi_periph_thaw(struct scsipi_periph *periph, int count)
616 {
617 int s;
618
619 s = splbio();
620 periph->periph_qfreeze -= count;
621 #ifdef DIAGNOSTIC
622 if (periph->periph_qfreeze < 0) {
623 static const char pc[] = "periph freeze count < 0";
624 scsipi_printaddr(periph);
625 printf("%s\n", pc);
626 panic(pc);
627 }
628 #endif
629 if (periph->periph_qfreeze == 0 &&
630 (periph->periph_flags & PERIPH_WAITING) != 0)
631 wakeup(periph);
632 splx(s);
633 }
634
635 /*
636 * scsipi_periph_timed_thaw:
637 *
638 * Thaw a device after some time has expired.
639 */
640 void
641 scsipi_periph_timed_thaw(void *arg)
642 {
643 int s;
644 struct scsipi_periph *periph = arg;
645
646 callout_stop(&periph->periph_callout);
647
648 s = splbio();
649 scsipi_periph_thaw(periph, 1);
650 if ((periph->periph_channel->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
651 /*
652 * Kick the channel's queue here. Note, we're running in
653 * interrupt context (softclock), so the adapter driver
654 * had better not sleep.
655 */
656 scsipi_run_queue(periph->periph_channel);
657 } else {
658 /*
659 * Tell the completion thread to kick the channel's queue here.
660 */
661 periph->periph_channel->chan_tflags |= SCSIPI_CHANT_KICK;
662 wakeup(&periph->periph_channel->chan_complete);
663 }
664 splx(s);
665 }
666
667 /*
668 * scsipi_wait_drain:
669 *
670 * Wait for a periph's pending xfers to drain.
671 */
672 void
673 scsipi_wait_drain(struct scsipi_periph *periph)
674 {
675 int s;
676
677 s = splbio();
678 while (periph->periph_active != 0) {
679 periph->periph_flags |= PERIPH_WAITDRAIN;
680 (void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0);
681 }
682 splx(s);
683 }
684
685 /*
686 * scsipi_kill_pending:
687 *
688 * Kill off all pending xfers for a periph.
689 *
690 * NOTE: Must be called at splbio().
691 */
692 void
693 scsipi_kill_pending(struct scsipi_periph *periph)
694 {
695
696 (*periph->periph_channel->chan_bustype->bustype_kill_pending)(periph);
697 scsipi_wait_drain(periph);
698 }
699
700 /*
701 * scsipi_print_cdb:
702 * prints a command descriptor block (for debug purpose, error messages,
703 * SCSIPI_VERBOSE, ...)
704 */
705 void
706 scsipi_print_cdb(struct scsipi_generic *cmd)
707 {
708 int i, j;
709
710 printf("0x%02x", cmd->opcode);
711
712 switch (CDB_GROUPID(cmd->opcode)) {
713 case CDB_GROUPID_0:
714 j = CDB_GROUP0;
715 break;
716 case CDB_GROUPID_1:
717 j = CDB_GROUP1;
718 break;
719 case CDB_GROUPID_2:
720 j = CDB_GROUP2;
721 break;
722 case CDB_GROUPID_3:
723 j = CDB_GROUP3;
724 break;
725 case CDB_GROUPID_4:
726 j = CDB_GROUP4;
727 break;
728 case CDB_GROUPID_5:
729 j = CDB_GROUP5;
730 break;
731 case CDB_GROUPID_6:
732 j = CDB_GROUP6;
733 break;
734 case CDB_GROUPID_7:
735 j = CDB_GROUP7;
736 break;
737 default:
738 j = 0;
739 }
740 if (j == 0)
741 j = sizeof (cmd->bytes);
742 for (i = 0; i < j-1; i++) /* already done the opcode */
743 printf(" %02x", cmd->bytes[i]);
744 }
745
746 /*
747 * scsipi_interpret_sense:
748 *
749 * Look at the returned sense and act on the error, determining
750 * the unix error number to pass back. (0 = report no error)
751 *
752 * NOTE: If we return ERESTART, we are expected to haved
753 * thawed the device!
754 *
755 * THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
756 */
757 int
758 scsipi_interpret_sense(struct scsipi_xfer *xs)
759 {
760 struct scsipi_sense_data *sense;
761 struct scsipi_periph *periph = xs->xs_periph;
762 u_int8_t key;
763 int error;
764 #ifndef SCSIVERBOSE
765 u_int32_t info;
766 static char *error_mes[] = {
767 "soft error (corrected)",
768 "not ready", "medium error",
769 "non-media hardware failure", "illegal request",
770 "unit attention", "readonly device",
771 "no data found", "vendor unique",
772 "copy aborted", "command aborted",
773 "search returned equal", "volume overflow",
774 "verify miscompare", "unknown error key"
775 };
776 #endif
777
778 sense = &xs->sense.scsi_sense;
779 #ifdef SCSIPI_DEBUG
780 if (periph->periph_flags & SCSIPI_DB1) {
781 int count;
782 scsipi_printaddr(periph);
783 printf(" sense debug information:\n");
784 printf("\tcode 0x%x valid 0x%x\n",
785 sense->error_code & SSD_ERRCODE,
786 sense->error_code & SSD_ERRCODE_VALID ? 1 : 0);
787 printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
788 sense->segment,
789 sense->flags & SSD_KEY,
790 sense->flags & SSD_ILI ? 1 : 0,
791 sense->flags & SSD_EOM ? 1 : 0,
792 sense->flags & SSD_FILEMARK ? 1 : 0);
793 printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d "
794 "extra bytes\n",
795 sense->info[0],
796 sense->info[1],
797 sense->info[2],
798 sense->info[3],
799 sense->extra_len);
800 printf("\textra: ");
801 for (count = 0; count < ADD_BYTES_LIM(sense); count++)
802 printf("0x%x ", sense->cmd_spec_info[count]);
803 printf("\n");
804 }
805 #endif
806
807 /*
808 * If the periph has it's own error handler, call it first.
809 * If it returns a legit error value, return that, otherwise
810 * it wants us to continue with normal error processing.
811 */
812 if (periph->periph_switch->psw_error != NULL) {
813 SC_DEBUG(periph, SCSIPI_DB2,
814 ("calling private err_handler()\n"));
815 error = (*periph->periph_switch->psw_error)(xs);
816 if (error != EJUSTRETURN)
817 return (error);
818 }
819 /* otherwise use the default */
820 switch (sense->error_code & SSD_ERRCODE) {
821
822 /*
823 * Old SCSI-1 and SASI devices respond with
824 * codes other than 70.
825 */
826 case 0x00: /* no error (command completed OK) */
827 return (0);
828 case 0x04: /* drive not ready after it was selected */
829 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
830 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
831 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
832 return (0);
833 /* XXX - display some sort of error here? */
834 return (EIO);
835 case 0x20: /* invalid command */
836 if ((xs->xs_control &
837 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
838 return (0);
839 return (EINVAL);
840 case 0x25: /* invalid LUN (Adaptec ACB-4000) */
841 return (EACCES);
842
843 /*
844 * If it's code 70, use the extended stuff and
845 * interpret the key
846 */
847 case 0x71: /* delayed error */
848 scsipi_printaddr(periph);
849 key = sense->flags & SSD_KEY;
850 printf(" DEFERRED ERROR, key = 0x%x\n", key);
851 /* FALLTHROUGH */
852 case 0x70:
853 #ifndef SCSIVERBOSE
854 if ((sense->error_code & SSD_ERRCODE_VALID) != 0)
855 info = _4btol(sense->info);
856 else
857 info = 0;
858 #endif
859 key = sense->flags & SSD_KEY;
860
861 switch (key) {
862 case SKEY_NO_SENSE:
863 case SKEY_RECOVERED_ERROR:
864 if (xs->resid == xs->datalen && xs->datalen) {
865 /*
866 * Why is this here?
867 */
868 xs->resid = 0; /* not short read */
869 }
870 case SKEY_EQUAL:
871 error = 0;
872 break;
873 case SKEY_NOT_READY:
874 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
875 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
876 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
877 return (0);
878 if (sense->add_sense_code == 0x3A) {
879 error = ENODEV; /* Medium not present */
880 if (xs->xs_control & XS_CTL_SILENT_NODEV)
881 return (error);
882 } else
883 error = EIO;
884 if ((xs->xs_control & XS_CTL_SILENT) != 0)
885 return (error);
886 break;
887 case SKEY_ILLEGAL_REQUEST:
888 if ((xs->xs_control &
889 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
890 return (0);
891 /*
892 * Handle the case where a device reports
893 * Logical Unit Not Supported during discovery.
894 */
895 if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
896 sense->add_sense_code == 0x25 &&
897 sense->add_sense_code_qual == 0x00)
898 return (EINVAL);
899 if ((xs->xs_control & XS_CTL_SILENT) != 0)
900 return (EIO);
901 error = EINVAL;
902 break;
903 case SKEY_UNIT_ATTENTION:
904 if (sense->add_sense_code == 0x29 &&
905 sense->add_sense_code_qual == 0x00) {
906 /* device or bus reset */
907 return (ERESTART);
908 }
909 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
910 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
911 if ((xs->xs_control &
912 XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
913 /* XXX Should reupload any transient state. */
914 (periph->periph_flags &
915 PERIPH_REMOVABLE) == 0) {
916 return (ERESTART);
917 }
918 if ((xs->xs_control & XS_CTL_SILENT) != 0)
919 return (EIO);
920 error = EIO;
921 break;
922 case SKEY_WRITE_PROTECT:
923 error = EROFS;
924 break;
925 case SKEY_BLANK_CHECK:
926 error = 0;
927 break;
928 case SKEY_ABORTED_COMMAND:
929 if (xs->xs_retries != 0) {
930 xs->xs_retries--;
931 error = ERESTART;
932 } else
933 error = EIO;
934 break;
935 case SKEY_VOLUME_OVERFLOW:
936 error = ENOSPC;
937 break;
938 default:
939 error = EIO;
940 break;
941 }
942
943 #ifdef SCSIVERBOSE
944 if (key && (xs->xs_control & XS_CTL_SILENT) == 0)
945 scsipi_print_sense(xs, 0);
946 #else
947 if (key) {
948 scsipi_printaddr(periph);
949 printf("%s", error_mes[key - 1]);
950 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
951 switch (key) {
952 case SKEY_NOT_READY:
953 case SKEY_ILLEGAL_REQUEST:
954 case SKEY_UNIT_ATTENTION:
955 case SKEY_WRITE_PROTECT:
956 break;
957 case SKEY_BLANK_CHECK:
958 printf(", requested size: %d (decimal)",
959 info);
960 break;
961 case SKEY_ABORTED_COMMAND:
962 if (xs->xs_retries)
963 printf(", retrying");
964 printf(", cmd 0x%x, info 0x%x",
965 xs->cmd->opcode, info);
966 break;
967 default:
968 printf(", info = %d (decimal)", info);
969 }
970 }
971 if (sense->extra_len != 0) {
972 int n;
973 printf(", data =");
974 for (n = 0; n < sense->extra_len; n++)
975 printf(" %02x",
976 sense->cmd_spec_info[n]);
977 }
978 printf("\n");
979 }
980 #endif
981 return (error);
982
983 /*
984 * Some other code, just report it
985 */
986 default:
987 #if defined(SCSIDEBUG) || defined(DEBUG)
988 {
989 static char *uc = "undecodable sense error";
990 int i;
991 u_int8_t *cptr = (u_int8_t *) sense;
992 scsipi_printaddr(periph);
993 if (xs->cmd == &xs->cmdstore) {
994 printf("%s for opcode 0x%x, data=",
995 uc, xs->cmdstore.opcode);
996 } else {
997 printf("%s, data=", uc);
998 }
999 for (i = 0; i < sizeof (sense); i++)
1000 printf(" 0x%02x", *(cptr++) & 0xff);
1001 printf("\n");
1002 }
1003 #else
1004 scsipi_printaddr(periph);
1005 printf("Sense Error Code 0x%x",
1006 sense->error_code & SSD_ERRCODE);
1007 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
1008 struct scsipi_sense_data_unextended *usense =
1009 (struct scsipi_sense_data_unextended *)sense;
1010 printf(" at block no. %d (decimal)",
1011 _3btol(usense->block));
1012 }
1013 printf("\n");
1014 #endif
1015 return (EIO);
1016 }
1017 }
1018
1019 /*
1020 * scsipi_size:
1021 *
1022 * Find out from the device what its capacity is.
1023 */
1024 u_int64_t
1025 scsipi_size(struct scsipi_periph *periph, int flags)
1026 {
1027 struct scsipi_read_cap_data rdcap;
1028 struct scsipi_read_capacity scsipi_cmd;
1029
1030 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1031 scsipi_cmd.opcode = READ_CAPACITY;
1032
1033 /*
1034 * If the command works, interpret the result as a 4 byte
1035 * number of blocks
1036 */
1037 if (scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1038 sizeof(scsipi_cmd), (u_char *)&rdcap, sizeof(rdcap),
1039 SCSIPIRETRIES, 20000, NULL,
1040 flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK | XS_CTL_SILENT) != 0)
1041 return (0);
1042
1043 return (_4btol(rdcap.addr) + 1);
1044 }
1045
1046 /*
1047 * scsipi_test_unit_ready:
1048 *
1049 * Issue a `test unit ready' request.
1050 */
1051 int
1052 scsipi_test_unit_ready(struct scsipi_periph *periph, int flags)
1053 {
1054 int retries;
1055 struct scsipi_test_unit_ready scsipi_cmd;
1056
1057 /* some ATAPI drives don't support TEST_UNIT_READY. Sigh */
1058 if (periph->periph_quirks & PQUIRK_NOTUR)
1059 return (0);
1060
1061 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1062 scsipi_cmd.opcode = TEST_UNIT_READY;
1063
1064 if (flags & XS_CTL_DISCOVERY)
1065 retries = 0;
1066 else
1067 retries = SCSIPIRETRIES;
1068
1069 return (scsipi_command(periph,
1070 (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd),
1071 0, 0, retries, 10000, NULL, flags));
1072 }
1073
1074 /*
1075 * scsipi_inquire:
1076 *
1077 * Ask the device about itself.
1078 */
1079 int
1080 scsipi_inquire(struct scsipi_periph *periph, struct scsipi_inquiry_data *inqbuf,
1081 int flags)
1082 {
1083 int retries;
1084 struct scsipi_inquiry scsipi_cmd;
1085 int error;
1086
1087 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1088 scsipi_cmd.opcode = INQUIRY;
1089
1090 if (flags & XS_CTL_DISCOVERY)
1091 retries = 0;
1092 else
1093 retries = SCSIPIRETRIES;
1094
1095 /*
1096 * If we request more data than the device can provide, it SHOULD just
1097 * return a short reponse. However, some devices error with an
1098 * ILLEGAL REQUEST sense code, and yet others have even more special
1099 * failture modes (such as the GL641USB flash adapter, which goes loony
1100 * and sends corrupted CRCs). To work around this, and to bring our
1101 * behavior more in line with other OSes, we do a shorter inquiry,
1102 * covering all the SCSI-2 information, first, and then request more
1103 * data iff the "additional length" field indicates there is more.
1104 * - mycroft, 2003/10/16
1105 */
1106 scsipi_cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI2;
1107 error = scsipi_command(periph,
1108 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1109 (u_char *) inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI2,
1110 retries, 10000, NULL, XS_CTL_DATA_IN | flags);
1111 if (!error && inqbuf->additional_length > SCSIPI_INQUIRY_LENGTH_SCSI2 - 4) {
1112 scsipi_cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI3;
1113 error = scsipi_command(periph,
1114 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1115 (u_char *) inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI3,
1116 retries, 10000, NULL, XS_CTL_DATA_IN | flags);
1117 }
1118
1119 #ifdef SCSI_OLD_NOINQUIRY
1120 /*
1121 * Kludge for the Adaptec ACB-4000 SCSI->MFM translator.
1122 * This board doesn't support the INQUIRY command at all.
1123 */
1124 if (error == EINVAL || error == EACCES) {
1125 /*
1126 * Conjure up an INQUIRY response.
1127 */
1128 inqbuf->device = (error == EINVAL ?
1129 SID_QUAL_LU_PRESENT :
1130 SID_QUAL_LU_NOTPRESENT) | T_DIRECT;
1131 inqbuf->dev_qual2 = 0;
1132 inqbuf->version = 0;
1133 inqbuf->response_format = SID_FORMAT_SCSI1;
1134 inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
1135 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1136 memcpy(inqbuf->vendor, "ADAPTEC ACB-4000 ", 28);
1137 error = 0;
1138 }
1139
1140 /*
1141 * Kludge for the Emulex MT-02 SCSI->QIC translator.
1142 * This board gives an empty response to an INQUIRY command.
1143 */
1144 else if (error == 0 &&
1145 inqbuf->device == (SID_QUAL_LU_PRESENT | T_DIRECT) &&
1146 inqbuf->dev_qual2 == 0 &&
1147 inqbuf->version == 0 &&
1148 inqbuf->response_format == SID_FORMAT_SCSI1) {
1149 /*
1150 * Fill out the INQUIRY response.
1151 */
1152 inqbuf->device = (SID_QUAL_LU_PRESENT | T_SEQUENTIAL);
1153 inqbuf->dev_qual2 = SID_REMOVABLE;
1154 inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
1155 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1156 memcpy(inqbuf->vendor, "EMULEX MT-02 QIC ", 28);
1157 }
1158 #endif /* SCSI_OLD_NOINQUIRY */
1159
1160 return error;
1161 }
1162
1163 /*
1164 * scsipi_prevent:
1165 *
1166 * Prevent or allow the user to remove the media
1167 */
1168 int
1169 scsipi_prevent(struct scsipi_periph *periph, int type, int flags)
1170 {
1171 struct scsipi_prevent scsipi_cmd;
1172
1173 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1174 scsipi_cmd.opcode = PREVENT_ALLOW;
1175 scsipi_cmd.how = type;
1176
1177 return (scsipi_command(periph,
1178 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1179 0, 0, SCSIPIRETRIES, 5000, NULL, flags));
1180 }
1181
1182 /*
1183 * scsipi_start:
1184 *
1185 * Send a START UNIT.
1186 */
1187 int
1188 scsipi_start(struct scsipi_periph *periph, int type, int flags)
1189 {
1190 struct scsipi_start_stop scsipi_cmd;
1191
1192 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1193 scsipi_cmd.opcode = START_STOP;
1194 scsipi_cmd.byte2 = 0x00;
1195 scsipi_cmd.how = type;
1196
1197 return (scsipi_command(periph,
1198 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1199 0, 0, SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000,
1200 NULL, flags));
1201 }
1202
1203 /*
1204 * scsipi_mode_sense, scsipi_mode_sense_big:
1205 * get a sense page from a device
1206 */
1207
1208 int
1209 scsipi_mode_sense(struct scsipi_periph *periph, int byte2, int page,
1210 struct scsipi_mode_header *data, int len, int flags, int retries,
1211 int timeout)
1212 {
1213 struct scsipi_mode_sense scsipi_cmd;
1214 int error;
1215
1216 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1217 scsipi_cmd.opcode = MODE_SENSE;
1218 scsipi_cmd.byte2 = byte2;
1219 scsipi_cmd.page = page;
1220 scsipi_cmd.length = len & 0xff;
1221 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1222 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1223 flags | XS_CTL_DATA_IN);
1224 SC_DEBUG(periph, SCSIPI_DB2,
1225 ("scsipi_mode_sense: error=%d\n", error));
1226 return (error);
1227 }
1228
1229 int
1230 scsipi_mode_sense_big(struct scsipi_periph *periph, int byte2, int page,
1231 struct scsipi_mode_header_big *data, int len, int flags, int retries,
1232 int timeout)
1233 {
1234 struct scsipi_mode_sense_big scsipi_cmd;
1235 int error;
1236
1237 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1238 scsipi_cmd.opcode = MODE_SENSE_BIG;
1239 scsipi_cmd.byte2 = byte2;
1240 scsipi_cmd.page = page;
1241 _lto2b(len, scsipi_cmd.length);
1242 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1243 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1244 flags | XS_CTL_DATA_IN);
1245 SC_DEBUG(periph, SCSIPI_DB2,
1246 ("scsipi_mode_sense_big: error=%d\n", error));
1247 return (error);
1248 }
1249
1250 int
1251 scsipi_mode_select(struct scsipi_periph *periph, int byte2,
1252 struct scsipi_mode_header *data, int len, int flags, int retries,
1253 int timeout)
1254 {
1255 struct scsipi_mode_select scsipi_cmd;
1256 int error;
1257
1258 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1259 scsipi_cmd.opcode = MODE_SELECT;
1260 scsipi_cmd.byte2 = byte2;
1261 scsipi_cmd.length = len & 0xff;
1262 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1263 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1264 flags | XS_CTL_DATA_OUT);
1265 SC_DEBUG(periph, SCSIPI_DB2,
1266 ("scsipi_mode_select: error=%d\n", error));
1267 return (error);
1268 }
1269
1270 int
1271 scsipi_mode_select_big(struct scsipi_periph *periph, int byte2,
1272 struct scsipi_mode_header_big *data, int len, int flags, int retries,
1273 int timeout)
1274 {
1275 struct scsipi_mode_select_big scsipi_cmd;
1276 int error;
1277
1278 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1279 scsipi_cmd.opcode = MODE_SELECT_BIG;
1280 scsipi_cmd.byte2 = byte2;
1281 _lto2b(len, scsipi_cmd.length);
1282 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1283 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1284 flags | XS_CTL_DATA_OUT);
1285 SC_DEBUG(periph, SCSIPI_DB2,
1286 ("scsipi_mode_select: error=%d\n", error));
1287 return (error);
1288 }
1289
1290 /*
1291 * scsipi_done:
1292 *
1293 * This routine is called by an adapter's interrupt handler when
1294 * an xfer is completed.
1295 */
1296 void
1297 scsipi_done(struct scsipi_xfer *xs)
1298 {
1299 struct scsipi_periph *periph = xs->xs_periph;
1300 struct scsipi_channel *chan = periph->periph_channel;
1301 int s, freezecnt;
1302
1303 SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n"));
1304 #ifdef SCSIPI_DEBUG
1305 if (periph->periph_dbflags & SCSIPI_DB1)
1306 show_scsipi_cmd(xs);
1307 #endif
1308
1309 s = splbio();
1310 /*
1311 * The resource this command was using is now free.
1312 */
1313 scsipi_put_resource(chan);
1314 xs->xs_periph->periph_sent--;
1315
1316 /*
1317 * If the command was tagged, free the tag.
1318 */
1319 if (XS_CTL_TAGTYPE(xs) != 0)
1320 scsipi_put_tag(xs);
1321 else
1322 periph->periph_flags &= ~PERIPH_UNTAG;
1323
1324 /* Mark the command as `done'. */
1325 xs->xs_status |= XS_STS_DONE;
1326
1327 #ifdef DIAGNOSTIC
1328 if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
1329 (XS_CTL_ASYNC|XS_CTL_POLL))
1330 panic("scsipi_done: ASYNC and POLL");
1331 #endif
1332
1333 /*
1334 * If the xfer had an error of any sort, freeze the
1335 * periph's queue. Freeze it again if we were requested
1336 * to do so in the xfer.
1337 */
1338 freezecnt = 0;
1339 if (xs->error != XS_NOERROR)
1340 freezecnt++;
1341 if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
1342 freezecnt++;
1343 if (freezecnt != 0)
1344 scsipi_periph_freeze(periph, freezecnt);
1345
1346 /*
1347 * record the xfer with a pending sense, in case a SCSI reset is
1348 * received before the thread is waked up.
1349 */
1350 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1351 periph->periph_flags |= PERIPH_SENSE;
1352 periph->periph_xscheck = xs;
1353 }
1354
1355 /*
1356 * If this was an xfer that was not to complete asynchronously,
1357 * let the requesting thread perform error checking/handling
1358 * in its context.
1359 */
1360 if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
1361 splx(s);
1362 /*
1363 * If it's a polling job, just return, to unwind the
1364 * call graph. We don't need to restart the queue,
1365 * because pollings jobs are treated specially, and
1366 * are really only used during crash dumps anyway
1367 * (XXX or during boot-time autconfiguration of
1368 * ATAPI devices).
1369 */
1370 if (xs->xs_control & XS_CTL_POLL)
1371 return;
1372 wakeup(xs);
1373 goto out;
1374 }
1375
1376 /*
1377 * Catch the extremely common case of I/O completing
1378 * without error; no use in taking a context switch
1379 * if we can handle it in interrupt context.
1380 */
1381 if (xs->error == XS_NOERROR) {
1382 splx(s);
1383 (void) scsipi_complete(xs);
1384 goto out;
1385 }
1386
1387 /*
1388 * There is an error on this xfer. Put it on the channel's
1389 * completion queue, and wake up the completion thread.
1390 */
1391 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
1392 splx(s);
1393 wakeup(&chan->chan_complete);
1394
1395 out:
1396 /*
1397 * If there are more xfers on the channel's queue, attempt to
1398 * run them.
1399 */
1400 scsipi_run_queue(chan);
1401 }
1402
1403 /*
1404 * scsipi_complete:
1405 *
1406 * Completion of a scsipi_xfer. This is the guts of scsipi_done().
1407 *
1408 * NOTE: This routine MUST be called with valid thread context
1409 * except for the case where the following two conditions are
1410 * true:
1411 *
1412 * xs->error == XS_NOERROR
1413 * XS_CTL_ASYNC is set in xs->xs_control
1414 *
1415 * The semantics of this routine can be tricky, so here is an
1416 * explanation:
1417 *
1418 * 0 Xfer completed successfully.
1419 *
1420 * ERESTART Xfer had an error, but was restarted.
1421 *
1422 * anything else Xfer had an error, return value is Unix
1423 * errno.
1424 *
1425 * If the return value is anything but ERESTART:
1426 *
1427 * - If XS_CTL_ASYNC is set, `xs' has been freed back to
1428 * the pool.
1429 * - If there is a buf associated with the xfer,
1430 * it has been biodone()'d.
1431 */
1432 static int
1433 scsipi_complete(struct scsipi_xfer *xs)
1434 {
1435 struct scsipi_periph *periph = xs->xs_periph;
1436 struct scsipi_channel *chan = periph->periph_channel;
1437 struct buf *bp;
1438 int error, s;
1439
1440 #ifdef DIAGNOSTIC
1441 if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
1442 panic("scsipi_complete: XS_CTL_ASYNC but no buf");
1443 #endif
1444 /*
1445 * If command terminated with a CHECK CONDITION, we need to issue a
1446 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed
1447 * we'll have the real status.
1448 * Must be processed at splbio() to avoid missing a SCSI bus reset
1449 * for this command.
1450 */
1451 s = splbio();
1452 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1453 /* request sense for a request sense ? */
1454 if (xs->xs_control & XS_CTL_REQSENSE) {
1455 scsipi_printaddr(periph);
1456 printf("request sense for a request sense ?\n");
1457 /* XXX maybe we should reset the device ? */
1458 /* we've been frozen because xs->error != XS_NOERROR */
1459 scsipi_periph_thaw(periph, 1);
1460 splx(s);
1461 if (xs->resid < xs->datalen) {
1462 printf("we read %d bytes of sense anyway:\n",
1463 xs->datalen - xs->resid);
1464 #ifdef SCSIVERBOSE
1465 scsipi_print_sense_data((void *)xs->data, 0);
1466 #endif
1467 }
1468 return EINVAL;
1469 }
1470 scsipi_request_sense(xs);
1471 }
1472 splx(s);
1473
1474 /*
1475 * If it's a user level request, bypass all usual completion
1476 * processing, let the user work it out..
1477 */
1478 if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
1479 SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n"));
1480 if (xs->error != XS_NOERROR)
1481 scsipi_periph_thaw(periph, 1);
1482 scsipi_user_done(xs);
1483 SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n "));
1484 return 0;
1485 }
1486
1487 switch (xs->error) {
1488 case XS_NOERROR:
1489 error = 0;
1490 break;
1491
1492 case XS_SENSE:
1493 case XS_SHORTSENSE:
1494 error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
1495 break;
1496
1497 case XS_RESOURCE_SHORTAGE:
1498 /*
1499 * XXX Should freeze channel's queue.
1500 */
1501 scsipi_printaddr(periph);
1502 printf("adapter resource shortage\n");
1503 /* FALLTHROUGH */
1504
1505 case XS_BUSY:
1506 if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
1507 struct scsipi_max_openings mo;
1508
1509 /*
1510 * We set the openings to active - 1, assuming that
1511 * the command that got us here is the first one that
1512 * can't fit into the device's queue. If that's not
1513 * the case, I guess we'll find out soon enough.
1514 */
1515 mo.mo_target = periph->periph_target;
1516 mo.mo_lun = periph->periph_lun;
1517 if (periph->periph_active < periph->periph_openings)
1518 mo.mo_openings = periph->periph_active - 1;
1519 else
1520 mo.mo_openings = periph->periph_openings - 1;
1521 #ifdef DIAGNOSTIC
1522 if (mo.mo_openings < 0) {
1523 scsipi_printaddr(periph);
1524 printf("QUEUE FULL resulted in < 0 openings\n");
1525 panic("scsipi_done");
1526 }
1527 #endif
1528 if (mo.mo_openings == 0) {
1529 scsipi_printaddr(periph);
1530 printf("QUEUE FULL resulted in 0 openings\n");
1531 mo.mo_openings = 1;
1532 }
1533 scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
1534 error = ERESTART;
1535 } else if (xs->xs_retries != 0) {
1536 xs->xs_retries--;
1537 /*
1538 * Wait one second, and try again.
1539 */
1540 if ((xs->xs_control & XS_CTL_POLL) ||
1541 (chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
1542 delay(1000000);
1543 } else if (!callout_pending(&periph->periph_callout)) {
1544 scsipi_periph_freeze(periph, 1);
1545 callout_reset(&periph->periph_callout,
1546 hz, scsipi_periph_timed_thaw, periph);
1547 }
1548 error = ERESTART;
1549 } else
1550 error = EBUSY;
1551 break;
1552
1553 case XS_REQUEUE:
1554 error = ERESTART;
1555 break;
1556
1557 case XS_SELTIMEOUT:
1558 case XS_TIMEOUT:
1559 /*
1560 * If the device hasn't gone away, honor retry counts.
1561 *
1562 * Note that if we're in the middle of probing it,
1563 * it won't be found because it isn't here yet so
1564 * we won't honor the retry count in that case.
1565 */
1566 if (scsipi_lookup_periph(chan, periph->periph_target,
1567 periph->periph_lun) && xs->xs_retries != 0) {
1568 xs->xs_retries--;
1569 error = ERESTART;
1570 } else
1571 error = EIO;
1572 break;
1573
1574 case XS_RESET:
1575 if (xs->xs_control & XS_CTL_REQSENSE) {
1576 /*
1577 * request sense interrupted by reset: signal it
1578 * with EINTR return code.
1579 */
1580 error = EINTR;
1581 } else {
1582 if (xs->xs_retries != 0) {
1583 xs->xs_retries--;
1584 error = ERESTART;
1585 } else
1586 error = EIO;
1587 }
1588 break;
1589
1590 case XS_DRIVER_STUFFUP:
1591 scsipi_printaddr(periph);
1592 printf("generic HBA error\n");
1593 error = EIO;
1594 break;
1595 default:
1596 scsipi_printaddr(periph);
1597 printf("invalid return code from adapter: %d\n", xs->error);
1598 error = EIO;
1599 break;
1600 }
1601
1602 s = splbio();
1603 if (error == ERESTART) {
1604 /*
1605 * If we get here, the periph has been thawed and frozen
1606 * again if we had to issue recovery commands. Alternatively,
1607 * it may have been frozen again and in a timed thaw. In
1608 * any case, we thaw the periph once we re-enqueue the
1609 * command. Once the periph is fully thawed, it will begin
1610 * operation again.
1611 */
1612 xs->error = XS_NOERROR;
1613 xs->status = SCSI_OK;
1614 xs->xs_status &= ~XS_STS_DONE;
1615 xs->xs_requeuecnt++;
1616 error = scsipi_enqueue(xs);
1617 if (error == 0) {
1618 scsipi_periph_thaw(periph, 1);
1619 splx(s);
1620 return (ERESTART);
1621 }
1622 }
1623
1624 /*
1625 * scsipi_done() freezes the queue if not XS_NOERROR.
1626 * Thaw it here.
1627 */
1628 if (xs->error != XS_NOERROR)
1629 scsipi_periph_thaw(periph, 1);
1630
1631 /*
1632 * Set buffer fields in case the periph
1633 * switch done func uses them
1634 */
1635 if ((bp = xs->bp) != NULL) {
1636 if (error) {
1637 bp->b_error = error;
1638 bp->b_flags |= B_ERROR;
1639 bp->b_resid = bp->b_bcount;
1640 } else {
1641 bp->b_error = 0;
1642 bp->b_resid = xs->resid;
1643 }
1644 }
1645
1646 if (periph->periph_switch->psw_done)
1647 periph->periph_switch->psw_done(xs);
1648
1649 if (bp)
1650 biodone(bp);
1651
1652 if (xs->xs_control & XS_CTL_ASYNC)
1653 scsipi_put_xs(xs);
1654 splx(s);
1655
1656 return (error);
1657 }
1658
1659 /*
1660 * Issue a request sense for the given scsipi_xfer. Called when the xfer
1661 * returns with a CHECK_CONDITION status. Must be called in valid thread
1662 * context and at splbio().
1663 */
1664
1665 static void
1666 scsipi_request_sense(struct scsipi_xfer *xs)
1667 {
1668 struct scsipi_periph *periph = xs->xs_periph;
1669 int flags, error;
1670 struct scsipi_sense cmd;
1671
1672 periph->periph_flags |= PERIPH_SENSE;
1673
1674 /* if command was polling, request sense will too */
1675 flags = xs->xs_control & XS_CTL_POLL;
1676 /* Polling commands can't sleep */
1677 if (flags)
1678 flags |= XS_CTL_NOSLEEP;
1679
1680 flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN |
1681 XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH;
1682
1683 memset(&cmd, 0, sizeof(cmd));
1684 cmd.opcode = REQUEST_SENSE;
1685 cmd.length = sizeof(struct scsipi_sense_data);
1686
1687 error = scsipi_command(periph,
1688 (struct scsipi_generic *) &cmd, sizeof(cmd),
1689 (u_char*)&xs->sense.scsi_sense, sizeof(struct scsipi_sense_data),
1690 0, 1000, NULL, flags);
1691 periph->periph_flags &= ~PERIPH_SENSE;
1692 periph->periph_xscheck = NULL;
1693 switch(error) {
1694 case 0:
1695 /* we have a valid sense */
1696 xs->error = XS_SENSE;
1697 return;
1698 case EINTR:
1699 /* REQUEST_SENSE interrupted by bus reset. */
1700 xs->error = XS_RESET;
1701 return;
1702 case EIO:
1703 /* request sense coudn't be performed */
1704 /*
1705 * XXX this isn't quite right but we don't have anything
1706 * better for now
1707 */
1708 xs->error = XS_DRIVER_STUFFUP;
1709 return;
1710 default:
1711 /* Notify that request sense failed. */
1712 xs->error = XS_DRIVER_STUFFUP;
1713 scsipi_printaddr(periph);
1714 printf("request sense failed with error %d\n", error);
1715 return;
1716 }
1717 }
1718
1719 /*
1720 * scsipi_enqueue:
1721 *
1722 * Enqueue an xfer on a channel.
1723 */
1724 static int
1725 scsipi_enqueue(struct scsipi_xfer *xs)
1726 {
1727 struct scsipi_channel *chan = xs->xs_periph->periph_channel;
1728 struct scsipi_xfer *qxs;
1729 int s;
1730
1731 s = splbio();
1732
1733 /*
1734 * If the xfer is to be polled, and there are already jobs on
1735 * the queue, we can't proceed.
1736 */
1737 if ((xs->xs_control & XS_CTL_POLL) != 0 &&
1738 TAILQ_FIRST(&chan->chan_queue) != NULL) {
1739 splx(s);
1740 xs->error = XS_DRIVER_STUFFUP;
1741 return (EAGAIN);
1742 }
1743
1744 /*
1745 * If we have an URGENT xfer, it's an error recovery command
1746 * and it should just go on the head of the channel's queue.
1747 */
1748 if (xs->xs_control & XS_CTL_URGENT) {
1749 TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
1750 goto out;
1751 }
1752
1753 /*
1754 * If this xfer has already been on the queue before, we
1755 * need to reinsert it in the correct order. That order is:
1756 *
1757 * Immediately before the first xfer for this periph
1758 * with a requeuecnt less than xs->xs_requeuecnt.
1759 *
1760 * Failing that, at the end of the queue. (We'll end up
1761 * there naturally.)
1762 */
1763 if (xs->xs_requeuecnt != 0) {
1764 for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
1765 qxs = TAILQ_NEXT(qxs, channel_q)) {
1766 if (qxs->xs_periph == xs->xs_periph &&
1767 qxs->xs_requeuecnt < xs->xs_requeuecnt)
1768 break;
1769 }
1770 if (qxs != NULL) {
1771 TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
1772 channel_q);
1773 goto out;
1774 }
1775 }
1776 TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
1777 out:
1778 if (xs->xs_control & XS_CTL_THAW_PERIPH)
1779 scsipi_periph_thaw(xs->xs_periph, 1);
1780 splx(s);
1781 return (0);
1782 }
1783
1784 /*
1785 * scsipi_run_queue:
1786 *
1787 * Start as many xfers as possible running on the channel.
1788 */
1789 static void
1790 scsipi_run_queue(struct scsipi_channel *chan)
1791 {
1792 struct scsipi_xfer *xs;
1793 struct scsipi_periph *periph;
1794 int s;
1795
1796 for (;;) {
1797 s = splbio();
1798
1799 /*
1800 * If the channel is frozen, we can't do any work right
1801 * now.
1802 */
1803 if (chan->chan_qfreeze != 0) {
1804 splx(s);
1805 return;
1806 }
1807
1808 /*
1809 * Look for work to do, and make sure we can do it.
1810 */
1811 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
1812 xs = TAILQ_NEXT(xs, channel_q)) {
1813 periph = xs->xs_periph;
1814
1815 if ((periph->periph_sent >= periph->periph_openings) ||
1816 periph->periph_qfreeze != 0 ||
1817 (periph->periph_flags & PERIPH_UNTAG) != 0)
1818 continue;
1819
1820 if ((periph->periph_flags &
1821 (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 &&
1822 (xs->xs_control & XS_CTL_URGENT) == 0)
1823 continue;
1824
1825 /*
1826 * We can issue this xfer!
1827 */
1828 goto got_one;
1829 }
1830
1831 /*
1832 * Can't find any work to do right now.
1833 */
1834 splx(s);
1835 return;
1836
1837 got_one:
1838 /*
1839 * Have an xfer to run. Allocate a resource from
1840 * the adapter to run it. If we can't allocate that
1841 * resource, we don't dequeue the xfer.
1842 */
1843 if (scsipi_get_resource(chan) == 0) {
1844 /*
1845 * Adapter is out of resources. If the adapter
1846 * supports it, attempt to grow them.
1847 */
1848 if (scsipi_grow_resources(chan) == 0) {
1849 /*
1850 * Wasn't able to grow resources,
1851 * nothing more we can do.
1852 */
1853 if (xs->xs_control & XS_CTL_POLL) {
1854 scsipi_printaddr(xs->xs_periph);
1855 printf("polling command but no "
1856 "adapter resources");
1857 /* We'll panic shortly... */
1858 }
1859 splx(s);
1860
1861 /*
1862 * XXX: We should be able to note that
1863 * XXX: that resources are needed here!
1864 */
1865 return;
1866 }
1867 /*
1868 * scsipi_grow_resources() allocated the resource
1869 * for us.
1870 */
1871 }
1872
1873 /*
1874 * We have a resource to run this xfer, do it!
1875 */
1876 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
1877
1878 /*
1879 * If the command is to be tagged, allocate a tag ID
1880 * for it.
1881 */
1882 if (XS_CTL_TAGTYPE(xs) != 0)
1883 scsipi_get_tag(xs);
1884 else
1885 periph->periph_flags |= PERIPH_UNTAG;
1886 periph->periph_sent++;
1887 splx(s);
1888
1889 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
1890 }
1891 #ifdef DIAGNOSTIC
1892 panic("scsipi_run_queue: impossible");
1893 #endif
1894 }
1895
1896 /*
1897 * scsipi_execute_xs:
1898 *
1899 * Begin execution of an xfer, waiting for it to complete, if necessary.
1900 */
1901 int
1902 scsipi_execute_xs(struct scsipi_xfer *xs)
1903 {
1904 struct scsipi_periph *periph = xs->xs_periph;
1905 struct scsipi_channel *chan = periph->periph_channel;
1906 int oasync, async, poll, retries, error, s;
1907
1908 xs->xs_status &= ~XS_STS_DONE;
1909 xs->error = XS_NOERROR;
1910 xs->resid = xs->datalen;
1911 xs->status = SCSI_OK;
1912
1913 #ifdef SCSIPI_DEBUG
1914 if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) {
1915 printf("scsipi_execute_xs: ");
1916 show_scsipi_xs(xs);
1917 printf("\n");
1918 }
1919 #endif
1920
1921 /*
1922 * Deal with command tagging:
1923 *
1924 * - If the device's current operating mode doesn't
1925 * include tagged queueing, clear the tag mask.
1926 *
1927 * - If the device's current operating mode *does*
1928 * include tagged queueing, set the tag_type in
1929 * the xfer to the appropriate byte for the tag
1930 * message.
1931 */
1932 if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 ||
1933 (xs->xs_control & XS_CTL_REQSENSE)) {
1934 xs->xs_control &= ~XS_CTL_TAGMASK;
1935 xs->xs_tag_type = 0;
1936 } else {
1937 /*
1938 * If the request doesn't specify a tag, give Head
1939 * tags to URGENT operations and Ordered tags to
1940 * everything else.
1941 */
1942 if (XS_CTL_TAGTYPE(xs) == 0) {
1943 if (xs->xs_control & XS_CTL_URGENT)
1944 xs->xs_control |= XS_CTL_HEAD_TAG;
1945 else
1946 xs->xs_control |= XS_CTL_ORDERED_TAG;
1947 }
1948
1949 switch (XS_CTL_TAGTYPE(xs)) {
1950 case XS_CTL_ORDERED_TAG:
1951 xs->xs_tag_type = MSG_ORDERED_Q_TAG;
1952 break;
1953
1954 case XS_CTL_SIMPLE_TAG:
1955 xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
1956 break;
1957
1958 case XS_CTL_HEAD_TAG:
1959 xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
1960 break;
1961
1962 default:
1963 scsipi_printaddr(periph);
1964 printf("invalid tag mask 0x%08x\n",
1965 XS_CTL_TAGTYPE(xs));
1966 panic("scsipi_execute_xs");
1967 }
1968 }
1969
1970 /* If the adaptor wants us to poll, poll. */
1971 if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY)
1972 xs->xs_control |= XS_CTL_POLL;
1973
1974 /*
1975 * If we don't yet have a completion thread, or we are to poll for
1976 * completion, clear the ASYNC flag.
1977 */
1978 oasync = (xs->xs_control & XS_CTL_ASYNC);
1979 if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
1980 xs->xs_control &= ~XS_CTL_ASYNC;
1981
1982 async = (xs->xs_control & XS_CTL_ASYNC);
1983 poll = (xs->xs_control & XS_CTL_POLL);
1984 retries = xs->xs_retries; /* for polling commands */
1985
1986 #ifdef DIAGNOSTIC
1987 if (oasync != 0 && xs->bp == NULL)
1988 panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
1989 #endif
1990
1991 /*
1992 * Enqueue the transfer. If we're not polling for completion, this
1993 * should ALWAYS return `no error'.
1994 */
1995 try_again:
1996 error = scsipi_enqueue(xs);
1997 if (error) {
1998 if (poll == 0) {
1999 scsipi_printaddr(periph);
2000 printf("not polling, but enqueue failed with %d\n",
2001 error);
2002 panic("scsipi_execute_xs");
2003 }
2004
2005 scsipi_printaddr(periph);
2006 printf("failed to enqueue polling command");
2007 if (retries != 0) {
2008 printf(", retrying...\n");
2009 delay(1000000);
2010 retries--;
2011 goto try_again;
2012 }
2013 printf("\n");
2014 goto free_xs;
2015 }
2016
2017 restarted:
2018 scsipi_run_queue(chan);
2019
2020 /*
2021 * The xfer is enqueued, and possibly running. If it's to be
2022 * completed asynchronously, just return now.
2023 */
2024 if (async)
2025 return (EJUSTRETURN);
2026
2027 /*
2028 * Not an asynchronous command; wait for it to complete.
2029 */
2030 s = splbio();
2031 while ((xs->xs_status & XS_STS_DONE) == 0) {
2032 if (poll) {
2033 scsipi_printaddr(periph);
2034 printf("polling command not done\n");
2035 panic("scsipi_execute_xs");
2036 }
2037 (void) tsleep(xs, PRIBIO, "xscmd", 0);
2038 }
2039 splx(s);
2040
2041 /*
2042 * Command is complete. scsipi_done() has awakened us to perform
2043 * the error handling.
2044 */
2045 error = scsipi_complete(xs);
2046 if (error == ERESTART)
2047 goto restarted;
2048
2049 /*
2050 * If it was meant to run async and we cleared aync ourselve,
2051 * don't return an error here. It has already been handled
2052 */
2053 if (oasync)
2054 error = EJUSTRETURN;
2055 /*
2056 * Command completed successfully or fatal error occurred. Fall
2057 * into....
2058 */
2059 free_xs:
2060 s = splbio();
2061 scsipi_put_xs(xs);
2062 splx(s);
2063
2064 /*
2065 * Kick the queue, keep it running in case it stopped for some
2066 * reason.
2067 */
2068 scsipi_run_queue(chan);
2069
2070 return (error);
2071 }
2072
2073 /*
2074 * scsipi_completion_thread:
2075 *
2076 * This is the completion thread. We wait for errors on
2077 * asynchronous xfers, and perform the error handling
2078 * function, restarting the command, if necessary.
2079 */
2080 static void
2081 scsipi_completion_thread(void *arg)
2082 {
2083 struct scsipi_channel *chan = arg;
2084 struct scsipi_xfer *xs;
2085 int s;
2086
2087 if (chan->chan_init_cb)
2088 (*chan->chan_init_cb)(chan, chan->chan_init_cb_arg);
2089
2090 s = splbio();
2091 chan->chan_flags |= SCSIPI_CHAN_TACTIVE;
2092 splx(s);
2093 for (;;) {
2094 s = splbio();
2095 xs = TAILQ_FIRST(&chan->chan_complete);
2096 if (xs == NULL && chan->chan_tflags == 0) {
2097 /* nothing to do; wait */
2098 (void) tsleep(&chan->chan_complete, PRIBIO,
2099 "sccomp", 0);
2100 splx(s);
2101 continue;
2102 }
2103 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2104 /* call chan_callback from thread context */
2105 chan->chan_tflags &= ~SCSIPI_CHANT_CALLBACK;
2106 chan->chan_callback(chan, chan->chan_callback_arg);
2107 splx(s);
2108 continue;
2109 }
2110 if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) {
2111 /* attempt to get more openings for this channel */
2112 chan->chan_tflags &= ~SCSIPI_CHANT_GROWRES;
2113 scsipi_adapter_request(chan,
2114 ADAPTER_REQ_GROW_RESOURCES, NULL);
2115 scsipi_channel_thaw(chan, 1);
2116 splx(s);
2117 continue;
2118 }
2119 if (chan->chan_tflags & SCSIPI_CHANT_KICK) {
2120 /* explicitly run the queues for this channel */
2121 chan->chan_tflags &= ~SCSIPI_CHANT_KICK;
2122 scsipi_run_queue(chan);
2123 splx(s);
2124 continue;
2125 }
2126 if (chan->chan_tflags & SCSIPI_CHANT_SHUTDOWN) {
2127 splx(s);
2128 break;
2129 }
2130 if (xs) {
2131 TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
2132 splx(s);
2133
2134 /*
2135 * Have an xfer with an error; process it.
2136 */
2137 (void) scsipi_complete(xs);
2138
2139 /*
2140 * Kick the queue; keep it running if it was stopped
2141 * for some reason.
2142 */
2143 scsipi_run_queue(chan);
2144 } else {
2145 splx(s);
2146 }
2147 }
2148
2149 chan->chan_thread = NULL;
2150
2151 /* In case parent is waiting for us to exit. */
2152 wakeup(&chan->chan_thread);
2153
2154 kthread_exit(0);
2155 }
2156
2157 /*
2158 * scsipi_create_completion_thread:
2159 *
2160 * Callback to actually create the completion thread.
2161 */
2162 void
2163 scsipi_create_completion_thread(void *arg)
2164 {
2165 struct scsipi_channel *chan = arg;
2166 struct scsipi_adapter *adapt = chan->chan_adapter;
2167
2168 if (kthread_create1(scsipi_completion_thread, chan,
2169 &chan->chan_thread, "%s", chan->chan_name)) {
2170 printf("%s: unable to create completion thread for "
2171 "channel %d\n", adapt->adapt_dev->dv_xname,
2172 chan->chan_channel);
2173 panic("scsipi_create_completion_thread");
2174 }
2175 }
2176
2177 /*
2178 * scsipi_thread_call_callback:
2179 *
2180 * request to call a callback from the completion thread
2181 */
2182 int
2183 scsipi_thread_call_callback(struct scsipi_channel *chan,
2184 void (*callback)(struct scsipi_channel *, void *), void *arg)
2185 {
2186 int s;
2187
2188 s = splbio();
2189 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
2190 /* kernel thread doesn't exist yet */
2191 splx(s);
2192 return ESRCH;
2193 }
2194 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2195 splx(s);
2196 return EBUSY;
2197 }
2198 scsipi_channel_freeze(chan, 1);
2199 chan->chan_callback = callback;
2200 chan->chan_callback_arg = arg;
2201 chan->chan_tflags |= SCSIPI_CHANT_CALLBACK;
2202 wakeup(&chan->chan_complete);
2203 splx(s);
2204 return(0);
2205 }
2206
2207 /*
2208 * scsipi_async_event:
2209 *
2210 * Handle an asynchronous event from an adapter.
2211 */
2212 void
2213 scsipi_async_event(struct scsipi_channel *chan, scsipi_async_event_t event,
2214 void *arg)
2215 {
2216 int s;
2217
2218 s = splbio();
2219 switch (event) {
2220 case ASYNC_EVENT_MAX_OPENINGS:
2221 scsipi_async_event_max_openings(chan,
2222 (struct scsipi_max_openings *)arg);
2223 break;
2224
2225 case ASYNC_EVENT_XFER_MODE:
2226 scsipi_async_event_xfer_mode(chan,
2227 (struct scsipi_xfer_mode *)arg);
2228 break;
2229 case ASYNC_EVENT_RESET:
2230 scsipi_async_event_channel_reset(chan);
2231 break;
2232 }
2233 splx(s);
2234 }
2235
2236 /*
2237 * scsipi_print_xfer_mode:
2238 *
2239 * Print a periph's capabilities.
2240 */
2241 void
2242 scsipi_print_xfer_mode(struct scsipi_periph *periph)
2243 {
2244 int period, freq, speed, mbs;
2245
2246 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0)
2247 return;
2248
2249 aprint_normal("%s: ", periph->periph_dev->dv_xname);
2250 if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) {
2251 period = scsipi_sync_factor_to_period(periph->periph_period);
2252 aprint_normal("sync (%d.%02dns offset %d)",
2253 period / 100, period % 100, periph->periph_offset);
2254 } else
2255 aprint_normal("async");
2256
2257 if (periph->periph_mode & PERIPH_CAP_WIDE32)
2258 aprint_normal(", 32-bit");
2259 else if (periph->periph_mode & (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
2260 aprint_normal(", 16-bit");
2261 else
2262 aprint_normal(", 8-bit");
2263
2264 if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) {
2265 freq = scsipi_sync_factor_to_freq(periph->periph_period);
2266 speed = freq;
2267 if (periph->periph_mode & PERIPH_CAP_WIDE32)
2268 speed *= 4;
2269 else if (periph->periph_mode &
2270 (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
2271 speed *= 2;
2272 mbs = speed / 1000;
2273 if (mbs > 0)
2274 aprint_normal(" (%d.%03dMB/s)", mbs, speed % 1000);
2275 else
2276 aprint_normal(" (%dKB/s)", speed % 1000);
2277 }
2278
2279 aprint_normal(" transfers");
2280
2281 if (periph->periph_mode & PERIPH_CAP_TQING)
2282 aprint_normal(", tagged queueing");
2283
2284 aprint_normal("\n");
2285 }
2286
2287 /*
2288 * scsipi_async_event_max_openings:
2289 *
2290 * Update the maximum number of outstanding commands a
2291 * device may have.
2292 */
2293 static void
2294 scsipi_async_event_max_openings(struct scsipi_channel *chan,
2295 struct scsipi_max_openings *mo)
2296 {
2297 struct scsipi_periph *periph;
2298 int minlun, maxlun;
2299
2300 if (mo->mo_lun == -1) {
2301 /*
2302 * Wildcarded; apply it to all LUNs.
2303 */
2304 minlun = 0;
2305 maxlun = chan->chan_nluns - 1;
2306 } else
2307 minlun = maxlun = mo->mo_lun;
2308
2309 /* XXX This could really suck with a large LUN space. */
2310 for (; minlun <= maxlun; minlun++) {
2311 periph = scsipi_lookup_periph(chan, mo->mo_target, minlun);
2312 if (periph == NULL)
2313 continue;
2314
2315 if (mo->mo_openings < periph->periph_openings)
2316 periph->periph_openings = mo->mo_openings;
2317 else if (mo->mo_openings > periph->periph_openings &&
2318 (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
2319 periph->periph_openings = mo->mo_openings;
2320 }
2321 }
2322
2323 /*
2324 * scsipi_async_event_xfer_mode:
2325 *
2326 * Update the xfer mode for all periphs sharing the
2327 * specified I_T Nexus.
2328 */
2329 static void
2330 scsipi_async_event_xfer_mode(struct scsipi_channel *chan,
2331 struct scsipi_xfer_mode *xm)
2332 {
2333 struct scsipi_periph *periph;
2334 int lun, announce, mode, period, offset;
2335
2336 for (lun = 0; lun < chan->chan_nluns; lun++) {
2337 periph = scsipi_lookup_periph(chan, xm->xm_target, lun);
2338 if (periph == NULL)
2339 continue;
2340 announce = 0;
2341
2342 /*
2343 * Clamp the xfer mode down to this periph's capabilities.
2344 */
2345 mode = xm->xm_mode & periph->periph_cap;
2346 if (mode & PERIPH_CAP_SYNC) {
2347 period = xm->xm_period;
2348 offset = xm->xm_offset;
2349 } else {
2350 period = 0;
2351 offset = 0;
2352 }
2353
2354 /*
2355 * If we do not have a valid xfer mode yet, or the parameters
2356 * are different, announce them.
2357 */
2358 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0 ||
2359 periph->periph_mode != mode ||
2360 periph->periph_period != period ||
2361 periph->periph_offset != offset)
2362 announce = 1;
2363
2364 periph->periph_mode = mode;
2365 periph->periph_period = period;
2366 periph->periph_offset = offset;
2367 periph->periph_flags |= PERIPH_MODE_VALID;
2368
2369 if (announce)
2370 scsipi_print_xfer_mode(periph);
2371 }
2372 }
2373
2374 /*
2375 * scsipi_set_xfer_mode:
2376 *
2377 * Set the xfer mode for the specified I_T Nexus.
2378 */
2379 void
2380 scsipi_set_xfer_mode(struct scsipi_channel *chan, int target, int immed)
2381 {
2382 struct scsipi_xfer_mode xm;
2383 struct scsipi_periph *itperiph;
2384 int lun, s;
2385
2386 /*
2387 * Go to the minimal xfer mode.
2388 */
2389 xm.xm_target = target;
2390 xm.xm_mode = 0;
2391 xm.xm_period = 0; /* ignored */
2392 xm.xm_offset = 0; /* ignored */
2393
2394 /*
2395 * Find the first LUN we know about on this I_T Nexus.
2396 */
2397 for (itperiph = NULL, lun = 0; lun < chan->chan_nluns; lun++) {
2398 itperiph = scsipi_lookup_periph(chan, target, lun);
2399 if (itperiph != NULL)
2400 break;
2401 }
2402 if (itperiph != NULL) {
2403 xm.xm_mode = itperiph->periph_cap;
2404 /*
2405 * Now issue the request to the adapter.
2406 */
2407 s = splbio();
2408 scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm);
2409 splx(s);
2410 /*
2411 * If we want this to happen immediately, issue a dummy
2412 * command, since most adapters can't really negotiate unless
2413 * they're executing a job.
2414 */
2415 if (immed != 0) {
2416 (void) scsipi_test_unit_ready(itperiph,
2417 XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
2418 XS_CTL_IGNORE_NOT_READY |
2419 XS_CTL_IGNORE_MEDIA_CHANGE);
2420 }
2421 }
2422 }
2423
2424 /*
2425 * scsipi_channel_reset:
2426 *
2427 * handle scsi bus reset
2428 * called at splbio
2429 */
2430 static void
2431 scsipi_async_event_channel_reset(struct scsipi_channel *chan)
2432 {
2433 struct scsipi_xfer *xs, *xs_next;
2434 struct scsipi_periph *periph;
2435 int target, lun;
2436
2437 /*
2438 * Channel has been reset. Also mark as reset pending REQUEST_SENSE
2439 * commands; as the sense is not available any more.
2440 * can't call scsipi_done() from here, as the command has not been
2441 * sent to the adapter yet (this would corrupt accounting).
2442 */
2443
2444 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) {
2445 xs_next = TAILQ_NEXT(xs, channel_q);
2446 if (xs->xs_control & XS_CTL_REQSENSE) {
2447 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
2448 xs->error = XS_RESET;
2449 if ((xs->xs_control & XS_CTL_ASYNC) != 0)
2450 TAILQ_INSERT_TAIL(&chan->chan_complete, xs,
2451 channel_q);
2452 }
2453 }
2454 wakeup(&chan->chan_complete);
2455 /* Catch xs with pending sense which may not have a REQSENSE xs yet */
2456 for (target = 0; target < chan->chan_ntargets; target++) {
2457 if (target == chan->chan_id)
2458 continue;
2459 for (lun = 0; lun < chan->chan_nluns; lun++) {
2460 periph = scsipi_lookup_periph(chan, target, lun);
2461 if (periph) {
2462 xs = periph->periph_xscheck;
2463 if (xs)
2464 xs->error = XS_RESET;
2465 }
2466 }
2467 }
2468 }
2469
2470 /*
2471 * scsipi_target_detach:
2472 *
2473 * detach all periph associated with a I_T
2474 * must be called from valid thread context
2475 */
2476 int
2477 scsipi_target_detach(struct scsipi_channel *chan, int target, int lun,
2478 int flags)
2479 {
2480 struct scsipi_periph *periph;
2481 int ctarget, mintarget, maxtarget;
2482 int clun, minlun, maxlun;
2483 int error;
2484
2485 if (target == -1) {
2486 mintarget = 0;
2487 maxtarget = chan->chan_ntargets;
2488 } else {
2489 if (target == chan->chan_id)
2490 return EINVAL;
2491 if (target < 0 || target >= chan->chan_ntargets)
2492 return EINVAL;
2493 mintarget = target;
2494 maxtarget = target + 1;
2495 }
2496
2497 if (lun == -1) {
2498 minlun = 0;
2499 maxlun = chan->chan_nluns;
2500 } else {
2501 if (lun < 0 || lun >= chan->chan_nluns)
2502 return EINVAL;
2503 minlun = lun;
2504 maxlun = lun + 1;
2505 }
2506
2507 for (ctarget = mintarget; ctarget < maxtarget; ctarget++) {
2508 if (ctarget == chan->chan_id)
2509 continue;
2510
2511 for (clun = minlun; clun < maxlun; clun++) {
2512 periph = scsipi_lookup_periph(chan, ctarget, clun);
2513 if (periph == NULL)
2514 continue;
2515 error = config_detach(periph->periph_dev, flags);
2516 if (error)
2517 return (error);
2518 }
2519 }
2520 return(0);
2521 }
2522
2523 /*
2524 * scsipi_adapter_addref:
2525 *
2526 * Add a reference to the adapter pointed to by the provided
2527 * link, enabling the adapter if necessary.
2528 */
2529 int
2530 scsipi_adapter_addref(struct scsipi_adapter *adapt)
2531 {
2532 int s, error = 0;
2533
2534 s = splbio();
2535 if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) {
2536 error = (*adapt->adapt_enable)(adapt->adapt_dev, 1);
2537 if (error)
2538 adapt->adapt_refcnt--;
2539 }
2540 splx(s);
2541 return (error);
2542 }
2543
2544 /*
2545 * scsipi_adapter_delref:
2546 *
2547 * Delete a reference to the adapter pointed to by the provided
2548 * link, disabling the adapter if possible.
2549 */
2550 void
2551 scsipi_adapter_delref(struct scsipi_adapter *adapt)
2552 {
2553 int s;
2554
2555 s = splbio();
2556 if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL)
2557 (void) (*adapt->adapt_enable)(adapt->adapt_dev, 0);
2558 splx(s);
2559 }
2560
2561 static struct scsipi_syncparam {
2562 int ss_factor;
2563 int ss_period; /* ns * 100 */
2564 } scsipi_syncparams[] = {
2565 { 0x08, 625 }, /* FAST-160 (Ultra320) */
2566 { 0x09, 1250 }, /* FAST-80 (Ultra160) */
2567 { 0x0a, 2500 }, /* FAST-40 40MHz (Ultra2) */
2568 { 0x0b, 3030 }, /* FAST-40 33MHz (Ultra2) */
2569 { 0x0c, 5000 }, /* FAST-20 (Ultra) */
2570 };
2571 static const int scsipi_nsyncparams =
2572 sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
2573
2574 int
2575 scsipi_sync_period_to_factor(int period /* ns * 100 */)
2576 {
2577 int i;
2578
2579 for (i = 0; i < scsipi_nsyncparams; i++) {
2580 if (period <= scsipi_syncparams[i].ss_period)
2581 return (scsipi_syncparams[i].ss_factor);
2582 }
2583
2584 return ((period / 100) / 4);
2585 }
2586
2587 int
2588 scsipi_sync_factor_to_period(int factor)
2589 {
2590 int i;
2591
2592 for (i = 0; i < scsipi_nsyncparams; i++) {
2593 if (factor == scsipi_syncparams[i].ss_factor)
2594 return (scsipi_syncparams[i].ss_period);
2595 }
2596
2597 return ((factor * 4) * 100);
2598 }
2599
2600 int
2601 scsipi_sync_factor_to_freq(int factor)
2602 {
2603 int i;
2604
2605 for (i = 0; i < scsipi_nsyncparams; i++) {
2606 if (factor == scsipi_syncparams[i].ss_factor)
2607 return (100000000 / scsipi_syncparams[i].ss_period);
2608 }
2609
2610 return (10000000 / ((factor * 4) * 10));
2611 }
2612
2613 #ifdef SCSIPI_DEBUG
2614 /*
2615 * Given a scsipi_xfer, dump the request, in all it's glory
2616 */
2617 void
2618 show_scsipi_xs(struct scsipi_xfer *xs)
2619 {
2620
2621 printf("xs(%p): ", xs);
2622 printf("xs_control(0x%08x)", xs->xs_control);
2623 printf("xs_status(0x%08x)", xs->xs_status);
2624 printf("periph(%p)", xs->xs_periph);
2625 printf("retr(0x%x)", xs->xs_retries);
2626 printf("timo(0x%x)", xs->timeout);
2627 printf("cmd(%p)", xs->cmd);
2628 printf("len(0x%x)", xs->cmdlen);
2629 printf("data(%p)", xs->data);
2630 printf("len(0x%x)", xs->datalen);
2631 printf("res(0x%x)", xs->resid);
2632 printf("err(0x%x)", xs->error);
2633 printf("bp(%p)", xs->bp);
2634 show_scsipi_cmd(xs);
2635 }
2636
2637 void
2638 show_scsipi_cmd(struct scsipi_xfer *xs)
2639 {
2640 u_char *b = (u_char *) xs->cmd;
2641 int i = 0;
2642
2643 scsipi_printaddr(xs->xs_periph);
2644 printf(" command: ");
2645
2646 if ((xs->xs_control & XS_CTL_RESET) == 0) {
2647 while (i < xs->cmdlen) {
2648 if (i)
2649 printf(",");
2650 printf("0x%x", b[i++]);
2651 }
2652 printf("-[%d bytes]\n", xs->datalen);
2653 if (xs->datalen)
2654 show_mem(xs->data, min(64, xs->datalen));
2655 } else
2656 printf("-RESET-\n");
2657 }
2658
2659 void
2660 show_mem(u_char *address, int num)
2661 {
2662 int x;
2663
2664 printf("------------------------------");
2665 for (x = 0; x < num; x++) {
2666 if ((x % 16) == 0)
2667 printf("\n%03d: ", x);
2668 printf("%02x ", *address++);
2669 }
2670 printf("\n------------------------------\n");
2671 }
2672 #endif /* SCSIPI_DEBUG */
2673