scsipi_base.c revision 1.138 1 /* $NetBSD: scsipi_base.c,v 1.138 2006/10/09 21:29:14 scw Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999, 2000, 2002, 2003, 2004 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: scsipi_base.c,v 1.138 2006/10/09 21:29:14 scw Exp $");
42
43 #include "opt_scsi.h"
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/buf.h>
49 #include <sys/uio.h>
50 #include <sys/malloc.h>
51 #include <sys/pool.h>
52 #include <sys/errno.h>
53 #include <sys/device.h>
54 #include <sys/proc.h>
55 #include <sys/kthread.h>
56 #include <sys/hash.h>
57
58 #include <uvm/uvm_extern.h>
59
60 #include <dev/scsipi/scsi_spc.h>
61 #include <dev/scsipi/scsipi_all.h>
62 #include <dev/scsipi/scsipi_disk.h>
63 #include <dev/scsipi/scsipiconf.h>
64 #include <dev/scsipi/scsipi_base.h>
65
66 #include <dev/scsipi/scsi_all.h>
67 #include <dev/scsipi/scsi_message.h>
68
69 static int scsipi_complete(struct scsipi_xfer *);
70 static void scsipi_request_sense(struct scsipi_xfer *);
71 static int scsipi_enqueue(struct scsipi_xfer *);
72 static void scsipi_run_queue(struct scsipi_channel *chan);
73
74 static void scsipi_completion_thread(void *);
75
76 static void scsipi_get_tag(struct scsipi_xfer *);
77 static void scsipi_put_tag(struct scsipi_xfer *);
78
79 static int scsipi_get_resource(struct scsipi_channel *);
80 static void scsipi_put_resource(struct scsipi_channel *);
81
82 static void scsipi_async_event_max_openings(struct scsipi_channel *,
83 struct scsipi_max_openings *);
84 static void scsipi_async_event_xfer_mode(struct scsipi_channel *,
85 struct scsipi_xfer_mode *);
86 static void scsipi_async_event_channel_reset(struct scsipi_channel *);
87
88 static struct pool scsipi_xfer_pool;
89
90 /*
91 * scsipi_init:
92 *
93 * Called when a scsibus or atapibus is attached to the system
94 * to initialize shared data structures.
95 */
96 void
97 scsipi_init(void)
98 {
99 static int scsipi_init_done;
100
101 if (scsipi_init_done)
102 return;
103 scsipi_init_done = 1;
104
105 /* Initialize the scsipi_xfer pool. */
106 pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
107 0, 0, "scxspl", NULL);
108 if (pool_prime(&scsipi_xfer_pool,
109 PAGE_SIZE / sizeof(struct scsipi_xfer)) == ENOMEM) {
110 printf("WARNING: not enough memory for scsipi_xfer_pool\n");
111 }
112 }
113
114 /*
115 * scsipi_channel_init:
116 *
117 * Initialize a scsipi_channel when it is attached.
118 */
119 int
120 scsipi_channel_init(struct scsipi_channel *chan)
121 {
122 int i;
123
124 /* Initialize shared data. */
125 scsipi_init();
126
127 /* Initialize the queues. */
128 TAILQ_INIT(&chan->chan_queue);
129 TAILQ_INIT(&chan->chan_complete);
130
131 for (i = 0; i < SCSIPI_CHAN_PERIPH_BUCKETS; i++)
132 LIST_INIT(&chan->chan_periphtab[i]);
133
134 /*
135 * Create the asynchronous completion thread.
136 */
137 kthread_create(scsipi_create_completion_thread, chan);
138 return (0);
139 }
140
141 /*
142 * scsipi_channel_shutdown:
143 *
144 * Shutdown a scsipi_channel.
145 */
146 void
147 scsipi_channel_shutdown(struct scsipi_channel *chan)
148 {
149
150 /*
151 * Shut down the completion thread.
152 */
153 chan->chan_tflags |= SCSIPI_CHANT_SHUTDOWN;
154 wakeup(&chan->chan_complete);
155
156 /*
157 * Now wait for the thread to exit.
158 */
159 while (chan->chan_thread != NULL)
160 (void) tsleep(&chan->chan_thread, PRIBIO, "scshut", 0);
161 }
162
163 static uint32_t
164 scsipi_chan_periph_hash(uint64_t t, uint64_t l)
165 {
166 uint32_t hash;
167
168 hash = hash32_buf(&t, sizeof(t), HASH32_BUF_INIT);
169 hash = hash32_buf(&l, sizeof(l), hash);
170
171 return (hash & SCSIPI_CHAN_PERIPH_HASHMASK);
172 }
173
174 /*
175 * scsipi_insert_periph:
176 *
177 * Insert a periph into the channel.
178 */
179 void
180 scsipi_insert_periph(struct scsipi_channel *chan, struct scsipi_periph *periph)
181 {
182 uint32_t hash;
183 int s;
184
185 hash = scsipi_chan_periph_hash(periph->periph_target,
186 periph->periph_lun);
187
188 s = splbio();
189 LIST_INSERT_HEAD(&chan->chan_periphtab[hash], periph, periph_hash);
190 splx(s);
191 }
192
193 /*
194 * scsipi_remove_periph:
195 *
196 * Remove a periph from the channel.
197 */
198 void
199 scsipi_remove_periph(struct scsipi_channel *chan, struct scsipi_periph *periph)
200 {
201 int s;
202
203 s = splbio();
204 LIST_REMOVE(periph, periph_hash);
205 splx(s);
206 }
207
208 /*
209 * scsipi_lookup_periph:
210 *
211 * Lookup a periph on the specified channel.
212 */
213 struct scsipi_periph *
214 scsipi_lookup_periph(struct scsipi_channel *chan, int target, int lun)
215 {
216 struct scsipi_periph *periph;
217 uint32_t hash;
218 int s;
219
220 if (target >= chan->chan_ntargets ||
221 lun >= chan->chan_nluns)
222 return (NULL);
223
224 hash = scsipi_chan_periph_hash(target, lun);
225
226 s = splbio();
227 LIST_FOREACH(periph, &chan->chan_periphtab[hash], periph_hash) {
228 if (periph->periph_target == target &&
229 periph->periph_lun == lun)
230 break;
231 }
232 splx(s);
233
234 return (periph);
235 }
236
237 /*
238 * scsipi_get_resource:
239 *
240 * Allocate a single xfer `resource' from the channel.
241 *
242 * NOTE: Must be called at splbio().
243 */
244 static int
245 scsipi_get_resource(struct scsipi_channel *chan)
246 {
247 struct scsipi_adapter *adapt = chan->chan_adapter;
248
249 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
250 if (chan->chan_openings > 0) {
251 chan->chan_openings--;
252 return (1);
253 }
254 return (0);
255 }
256
257 if (adapt->adapt_openings > 0) {
258 adapt->adapt_openings--;
259 return (1);
260 }
261 return (0);
262 }
263
264 /*
265 * scsipi_grow_resources:
266 *
267 * Attempt to grow resources for a channel. If this succeeds,
268 * we allocate one for our caller.
269 *
270 * NOTE: Must be called at splbio().
271 */
272 static inline int
273 scsipi_grow_resources(struct scsipi_channel *chan)
274 {
275
276 if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
277 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
278 scsipi_adapter_request(chan,
279 ADAPTER_REQ_GROW_RESOURCES, NULL);
280 return (scsipi_get_resource(chan));
281 }
282 /*
283 * ask the channel thread to do it. It'll have to thaw the
284 * queue
285 */
286 scsipi_channel_freeze(chan, 1);
287 chan->chan_tflags |= SCSIPI_CHANT_GROWRES;
288 wakeup(&chan->chan_complete);
289 return (0);
290 }
291
292 return (0);
293 }
294
295 /*
296 * scsipi_put_resource:
297 *
298 * Free a single xfer `resource' to the channel.
299 *
300 * NOTE: Must be called at splbio().
301 */
302 static void
303 scsipi_put_resource(struct scsipi_channel *chan)
304 {
305 struct scsipi_adapter *adapt = chan->chan_adapter;
306
307 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
308 chan->chan_openings++;
309 else
310 adapt->adapt_openings++;
311 }
312
313 /*
314 * scsipi_get_tag:
315 *
316 * Get a tag ID for the specified xfer.
317 *
318 * NOTE: Must be called at splbio().
319 */
320 static void
321 scsipi_get_tag(struct scsipi_xfer *xs)
322 {
323 struct scsipi_periph *periph = xs->xs_periph;
324 int bit, tag;
325 u_int word;
326
327 bit = 0; /* XXX gcc */
328 for (word = 0; word < PERIPH_NTAGWORDS; word++) {
329 bit = ffs(periph->periph_freetags[word]);
330 if (bit != 0)
331 break;
332 }
333 #ifdef DIAGNOSTIC
334 if (word == PERIPH_NTAGWORDS) {
335 scsipi_printaddr(periph);
336 printf("no free tags\n");
337 panic("scsipi_get_tag");
338 }
339 #endif
340
341 bit -= 1;
342 periph->periph_freetags[word] &= ~(1 << bit);
343 tag = (word << 5) | bit;
344
345 /* XXX Should eventually disallow this completely. */
346 if (tag >= periph->periph_openings) {
347 scsipi_printaddr(periph);
348 printf("WARNING: tag %d greater than available openings %d\n",
349 tag, periph->periph_openings);
350 }
351
352 xs->xs_tag_id = tag;
353 }
354
355 /*
356 * scsipi_put_tag:
357 *
358 * Put the tag ID for the specified xfer back into the pool.
359 *
360 * NOTE: Must be called at splbio().
361 */
362 static void
363 scsipi_put_tag(struct scsipi_xfer *xs)
364 {
365 struct scsipi_periph *periph = xs->xs_periph;
366 int word, bit;
367
368 word = xs->xs_tag_id >> 5;
369 bit = xs->xs_tag_id & 0x1f;
370
371 periph->periph_freetags[word] |= (1 << bit);
372 }
373
374 /*
375 * scsipi_get_xs:
376 *
377 * Allocate an xfer descriptor and associate it with the
378 * specified peripherial. If the peripherial has no more
379 * available command openings, we either block waiting for
380 * one to become available, or fail.
381 */
382 struct scsipi_xfer *
383 scsipi_get_xs(struct scsipi_periph *periph, int flags)
384 {
385 struct scsipi_xfer *xs;
386 int s;
387
388 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n"));
389
390 KASSERT(!cold);
391
392 #ifdef DIAGNOSTIC
393 /*
394 * URGENT commands can never be ASYNC.
395 */
396 if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
397 (XS_CTL_URGENT|XS_CTL_ASYNC)) {
398 scsipi_printaddr(periph);
399 printf("URGENT and ASYNC\n");
400 panic("scsipi_get_xs");
401 }
402 #endif
403
404 s = splbio();
405 /*
406 * Wait for a command opening to become available. Rules:
407 *
408 * - All xfers must wait for an available opening.
409 * Exception: URGENT xfers can proceed when
410 * active == openings, because we use the opening
411 * of the command we're recovering for.
412 * - if the periph has sense pending, only URGENT & REQSENSE
413 * xfers may proceed.
414 *
415 * - If the periph is recovering, only URGENT xfers may
416 * proceed.
417 *
418 * - If the periph is currently executing a recovery
419 * command, URGENT commands must block, because only
420 * one recovery command can execute at a time.
421 */
422 for (;;) {
423 if (flags & XS_CTL_URGENT) {
424 if (periph->periph_active > periph->periph_openings)
425 goto wait_for_opening;
426 if (periph->periph_flags & PERIPH_SENSE) {
427 if ((flags & XS_CTL_REQSENSE) == 0)
428 goto wait_for_opening;
429 } else {
430 if ((periph->periph_flags &
431 PERIPH_RECOVERY_ACTIVE) != 0)
432 goto wait_for_opening;
433 periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
434 }
435 break;
436 }
437 if (periph->periph_active >= periph->periph_openings ||
438 (periph->periph_flags & PERIPH_RECOVERING) != 0)
439 goto wait_for_opening;
440 periph->periph_active++;
441 break;
442
443 wait_for_opening:
444 if (flags & XS_CTL_NOSLEEP) {
445 splx(s);
446 return (NULL);
447 }
448 SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n"));
449 periph->periph_flags |= PERIPH_WAITING;
450 (void) tsleep(periph, PRIBIO, "getxs", 0);
451 }
452 SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n"));
453 xs = pool_get(&scsipi_xfer_pool,
454 ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
455 if (xs == NULL) {
456 if (flags & XS_CTL_URGENT) {
457 if ((flags & XS_CTL_REQSENSE) == 0)
458 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
459 } else
460 periph->periph_active--;
461 scsipi_printaddr(periph);
462 printf("unable to allocate %sscsipi_xfer\n",
463 (flags & XS_CTL_URGENT) ? "URGENT " : "");
464 }
465 splx(s);
466
467 SC_DEBUG(periph, SCSIPI_DB3, ("returning\n"));
468
469 if (xs != NULL) {
470 memset(xs, 0, sizeof(*xs));
471 callout_init(&xs->xs_callout);
472 xs->xs_periph = periph;
473 xs->xs_control = flags;
474 xs->xs_status = 0;
475 s = splbio();
476 TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
477 splx(s);
478 }
479 return (xs);
480 }
481
482 /*
483 * scsipi_put_xs:
484 *
485 * Release an xfer descriptor, decreasing the outstanding command
486 * count for the peripherial. If there is a thread waiting for
487 * an opening, wake it up. If not, kick any queued I/O the
488 * peripherial may have.
489 *
490 * NOTE: Must be called at splbio().
491 */
492 void
493 scsipi_put_xs(struct scsipi_xfer *xs)
494 {
495 struct scsipi_periph *periph = xs->xs_periph;
496 int flags = xs->xs_control;
497
498 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n"));
499
500 TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
501 pool_put(&scsipi_xfer_pool, xs);
502
503 #ifdef DIAGNOSTIC
504 if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
505 periph->periph_active == 0) {
506 scsipi_printaddr(periph);
507 printf("recovery without a command to recovery for\n");
508 panic("scsipi_put_xs");
509 }
510 #endif
511
512 if (flags & XS_CTL_URGENT) {
513 if ((flags & XS_CTL_REQSENSE) == 0)
514 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
515 } else
516 periph->periph_active--;
517 if (periph->periph_active == 0 &&
518 (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
519 periph->periph_flags &= ~PERIPH_WAITDRAIN;
520 wakeup(&periph->periph_active);
521 }
522
523 if (periph->periph_flags & PERIPH_WAITING) {
524 periph->periph_flags &= ~PERIPH_WAITING;
525 wakeup(periph);
526 } else {
527 if (periph->periph_switch->psw_start != NULL &&
528 device_is_active(periph->periph_dev)) {
529 SC_DEBUG(periph, SCSIPI_DB2,
530 ("calling private start()\n"));
531 (*periph->periph_switch->psw_start)(periph);
532 }
533 }
534 }
535
536 /*
537 * scsipi_channel_freeze:
538 *
539 * Freeze a channel's xfer queue.
540 */
541 void
542 scsipi_channel_freeze(struct scsipi_channel *chan, int count)
543 {
544 int s;
545
546 s = splbio();
547 chan->chan_qfreeze += count;
548 splx(s);
549 }
550
551 /*
552 * scsipi_channel_thaw:
553 *
554 * Thaw a channel's xfer queue.
555 */
556 void
557 scsipi_channel_thaw(struct scsipi_channel *chan, int count)
558 {
559 int s;
560
561 s = splbio();
562 chan->chan_qfreeze -= count;
563 /*
564 * Don't let the freeze count go negative.
565 *
566 * Presumably the adapter driver could keep track of this,
567 * but it might just be easier to do this here so as to allow
568 * multiple callers, including those outside the adapter driver.
569 */
570 if (chan->chan_qfreeze < 0) {
571 chan->chan_qfreeze = 0;
572 }
573 splx(s);
574 /*
575 * Kick the channel's queue here. Note, we may be running in
576 * interrupt context (softclock or HBA's interrupt), so the adapter
577 * driver had better not sleep.
578 */
579 if (chan->chan_qfreeze == 0)
580 scsipi_run_queue(chan);
581 }
582
583 /*
584 * scsipi_channel_timed_thaw:
585 *
586 * Thaw a channel after some time has expired. This will also
587 * run the channel's queue if the freeze count has reached 0.
588 */
589 void
590 scsipi_channel_timed_thaw(void *arg)
591 {
592 struct scsipi_channel *chan = arg;
593
594 scsipi_channel_thaw(chan, 1);
595 }
596
597 /*
598 * scsipi_periph_freeze:
599 *
600 * Freeze a device's xfer queue.
601 */
602 void
603 scsipi_periph_freeze(struct scsipi_periph *periph, int count)
604 {
605 int s;
606
607 s = splbio();
608 periph->periph_qfreeze += count;
609 splx(s);
610 }
611
612 /*
613 * scsipi_periph_thaw:
614 *
615 * Thaw a device's xfer queue.
616 */
617 void
618 scsipi_periph_thaw(struct scsipi_periph *periph, int count)
619 {
620 int s;
621
622 s = splbio();
623 periph->periph_qfreeze -= count;
624 #ifdef DIAGNOSTIC
625 if (periph->periph_qfreeze < 0) {
626 static const char pc[] = "periph freeze count < 0";
627 scsipi_printaddr(periph);
628 printf("%s\n", pc);
629 panic(pc);
630 }
631 #endif
632 if (periph->periph_qfreeze == 0 &&
633 (periph->periph_flags & PERIPH_WAITING) != 0)
634 wakeup(periph);
635 splx(s);
636 }
637
638 /*
639 * scsipi_periph_timed_thaw:
640 *
641 * Thaw a device after some time has expired.
642 */
643 void
644 scsipi_periph_timed_thaw(void *arg)
645 {
646 int s;
647 struct scsipi_periph *periph = arg;
648
649 callout_stop(&periph->periph_callout);
650
651 s = splbio();
652 scsipi_periph_thaw(periph, 1);
653 if ((periph->periph_channel->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
654 /*
655 * Kick the channel's queue here. Note, we're running in
656 * interrupt context (softclock), so the adapter driver
657 * had better not sleep.
658 */
659 scsipi_run_queue(periph->periph_channel);
660 } else {
661 /*
662 * Tell the completion thread to kick the channel's queue here.
663 */
664 periph->periph_channel->chan_tflags |= SCSIPI_CHANT_KICK;
665 wakeup(&periph->periph_channel->chan_complete);
666 }
667 splx(s);
668 }
669
670 /*
671 * scsipi_wait_drain:
672 *
673 * Wait for a periph's pending xfers to drain.
674 */
675 void
676 scsipi_wait_drain(struct scsipi_periph *periph)
677 {
678 int s;
679
680 s = splbio();
681 while (periph->periph_active != 0) {
682 periph->periph_flags |= PERIPH_WAITDRAIN;
683 (void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0);
684 }
685 splx(s);
686 }
687
688 /*
689 * scsipi_kill_pending:
690 *
691 * Kill off all pending xfers for a periph.
692 *
693 * NOTE: Must be called at splbio().
694 */
695 void
696 scsipi_kill_pending(struct scsipi_periph *periph)
697 {
698
699 (*periph->periph_channel->chan_bustype->bustype_kill_pending)(periph);
700 scsipi_wait_drain(periph);
701 }
702
703 /*
704 * scsipi_print_cdb:
705 * prints a command descriptor block (for debug purpose, error messages,
706 * SCSIPI_VERBOSE, ...)
707 */
708 void
709 scsipi_print_cdb(struct scsipi_generic *cmd)
710 {
711 int i, j;
712
713 printf("0x%02x", cmd->opcode);
714
715 switch (CDB_GROUPID(cmd->opcode)) {
716 case CDB_GROUPID_0:
717 j = CDB_GROUP0;
718 break;
719 case CDB_GROUPID_1:
720 j = CDB_GROUP1;
721 break;
722 case CDB_GROUPID_2:
723 j = CDB_GROUP2;
724 break;
725 case CDB_GROUPID_3:
726 j = CDB_GROUP3;
727 break;
728 case CDB_GROUPID_4:
729 j = CDB_GROUP4;
730 break;
731 case CDB_GROUPID_5:
732 j = CDB_GROUP5;
733 break;
734 case CDB_GROUPID_6:
735 j = CDB_GROUP6;
736 break;
737 case CDB_GROUPID_7:
738 j = CDB_GROUP7;
739 break;
740 default:
741 j = 0;
742 }
743 if (j == 0)
744 j = sizeof (cmd->bytes);
745 for (i = 0; i < j-1; i++) /* already done the opcode */
746 printf(" %02x", cmd->bytes[i]);
747 }
748
749 /*
750 * scsipi_interpret_sense:
751 *
752 * Look at the returned sense and act on the error, determining
753 * the unix error number to pass back. (0 = report no error)
754 *
755 * NOTE: If we return ERESTART, we are expected to haved
756 * thawed the device!
757 *
758 * THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
759 */
760 int
761 scsipi_interpret_sense(struct scsipi_xfer *xs)
762 {
763 struct scsi_sense_data *sense;
764 struct scsipi_periph *periph = xs->xs_periph;
765 u_int8_t key;
766 int error;
767 #ifndef SCSIVERBOSE
768 u_int32_t info;
769 static const char *error_mes[] = {
770 "soft error (corrected)",
771 "not ready", "medium error",
772 "non-media hardware failure", "illegal request",
773 "unit attention", "readonly device",
774 "no data found", "vendor unique",
775 "copy aborted", "command aborted",
776 "search returned equal", "volume overflow",
777 "verify miscompare", "unknown error key"
778 };
779 #endif
780
781 sense = &xs->sense.scsi_sense;
782 #ifdef SCSIPI_DEBUG
783 if (periph->periph_flags & SCSIPI_DB1) {
784 int count;
785 scsipi_printaddr(periph);
786 printf(" sense debug information:\n");
787 printf("\tcode 0x%x valid %d\n",
788 SSD_RCODE(sense->response_code),
789 sense->response_code & SSD_RCODE_VALID ? 1 : 0);
790 printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
791 sense->segment,
792 SSD_SENSE_KEY(sense->flags),
793 sense->flags & SSD_ILI ? 1 : 0,
794 sense->flags & SSD_EOM ? 1 : 0,
795 sense->flags & SSD_FILEMARK ? 1 : 0);
796 printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d "
797 "extra bytes\n",
798 sense->info[0],
799 sense->info[1],
800 sense->info[2],
801 sense->info[3],
802 sense->extra_len);
803 printf("\textra: ");
804 for (count = 0; count < SSD_ADD_BYTES_LIM(sense); count++)
805 printf("0x%x ", sense->csi[count]);
806 printf("\n");
807 }
808 #endif
809
810 /*
811 * If the periph has it's own error handler, call it first.
812 * If it returns a legit error value, return that, otherwise
813 * it wants us to continue with normal error processing.
814 */
815 if (periph->periph_switch->psw_error != NULL) {
816 SC_DEBUG(periph, SCSIPI_DB2,
817 ("calling private err_handler()\n"));
818 error = (*periph->periph_switch->psw_error)(xs);
819 if (error != EJUSTRETURN)
820 return (error);
821 }
822 /* otherwise use the default */
823 switch (SSD_RCODE(sense->response_code)) {
824
825 /*
826 * Old SCSI-1 and SASI devices respond with
827 * codes other than 70.
828 */
829 case 0x00: /* no error (command completed OK) */
830 return (0);
831 case 0x04: /* drive not ready after it was selected */
832 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
833 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
834 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
835 return (0);
836 /* XXX - display some sort of error here? */
837 return (EIO);
838 case 0x20: /* invalid command */
839 if ((xs->xs_control &
840 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
841 return (0);
842 return (EINVAL);
843 case 0x25: /* invalid LUN (Adaptec ACB-4000) */
844 return (EACCES);
845
846 /*
847 * If it's code 70, use the extended stuff and
848 * interpret the key
849 */
850 case 0x71: /* delayed error */
851 scsipi_printaddr(periph);
852 key = SSD_SENSE_KEY(sense->flags);
853 printf(" DEFERRED ERROR, key = 0x%x\n", key);
854 /* FALLTHROUGH */
855 case 0x70:
856 #ifndef SCSIVERBOSE
857 if ((sense->response_code & SSD_RCODE_VALID) != 0)
858 info = _4btol(sense->info);
859 else
860 info = 0;
861 #endif
862 key = SSD_SENSE_KEY(sense->flags);
863
864 switch (key) {
865 case SKEY_NO_SENSE:
866 case SKEY_RECOVERED_ERROR:
867 if (xs->resid == xs->datalen && xs->datalen) {
868 /*
869 * Why is this here?
870 */
871 xs->resid = 0; /* not short read */
872 }
873 case SKEY_EQUAL:
874 error = 0;
875 break;
876 case SKEY_NOT_READY:
877 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
878 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
879 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
880 return (0);
881 if (sense->asc == 0x3A) {
882 error = ENODEV; /* Medium not present */
883 if (xs->xs_control & XS_CTL_SILENT_NODEV)
884 return (error);
885 } else
886 error = EIO;
887 if ((xs->xs_control & XS_CTL_SILENT) != 0)
888 return (error);
889 break;
890 case SKEY_ILLEGAL_REQUEST:
891 if ((xs->xs_control &
892 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
893 return (0);
894 /*
895 * Handle the case where a device reports
896 * Logical Unit Not Supported during discovery.
897 */
898 if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
899 sense->asc == 0x25 &&
900 sense->ascq == 0x00)
901 return (EINVAL);
902 if ((xs->xs_control & XS_CTL_SILENT) != 0)
903 return (EIO);
904 error = EINVAL;
905 break;
906 case SKEY_UNIT_ATTENTION:
907 if (sense->asc == 0x29 &&
908 sense->ascq == 0x00) {
909 /* device or bus reset */
910 return (ERESTART);
911 }
912 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
913 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
914 if ((xs->xs_control &
915 XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
916 /* XXX Should reupload any transient state. */
917 (periph->periph_flags &
918 PERIPH_REMOVABLE) == 0) {
919 return (ERESTART);
920 }
921 if ((xs->xs_control & XS_CTL_SILENT) != 0)
922 return (EIO);
923 error = EIO;
924 break;
925 case SKEY_DATA_PROTECT:
926 error = EROFS;
927 break;
928 case SKEY_BLANK_CHECK:
929 error = 0;
930 break;
931 case SKEY_ABORTED_COMMAND:
932 if (xs->xs_retries != 0) {
933 xs->xs_retries--;
934 error = ERESTART;
935 } else
936 error = EIO;
937 break;
938 case SKEY_VOLUME_OVERFLOW:
939 error = ENOSPC;
940 break;
941 default:
942 error = EIO;
943 break;
944 }
945
946 #ifdef SCSIVERBOSE
947 if (key && (xs->xs_control & XS_CTL_SILENT) == 0)
948 scsipi_print_sense(xs, 0);
949 #else
950 if (key) {
951 scsipi_printaddr(periph);
952 printf("%s", error_mes[key - 1]);
953 if ((sense->response_code & SSD_RCODE_VALID) != 0) {
954 switch (key) {
955 case SKEY_NOT_READY:
956 case SKEY_ILLEGAL_REQUEST:
957 case SKEY_UNIT_ATTENTION:
958 case SKEY_DATA_PROTECT:
959 break;
960 case SKEY_BLANK_CHECK:
961 printf(", requested size: %d (decimal)",
962 info);
963 break;
964 case SKEY_ABORTED_COMMAND:
965 if (xs->xs_retries)
966 printf(", retrying");
967 printf(", cmd 0x%x, info 0x%x",
968 xs->cmd->opcode, info);
969 break;
970 default:
971 printf(", info = %d (decimal)", info);
972 }
973 }
974 if (sense->extra_len != 0) {
975 int n;
976 printf(", data =");
977 for (n = 0; n < sense->extra_len; n++)
978 printf(" %02x",
979 sense->csi[n]);
980 }
981 printf("\n");
982 }
983 #endif
984 return (error);
985
986 /*
987 * Some other code, just report it
988 */
989 default:
990 #if defined(SCSIDEBUG) || defined(DEBUG)
991 {
992 static const char *uc = "undecodable sense error";
993 int i;
994 u_int8_t *cptr = (u_int8_t *) sense;
995 scsipi_printaddr(periph);
996 if (xs->cmd == &xs->cmdstore) {
997 printf("%s for opcode 0x%x, data=",
998 uc, xs->cmdstore.opcode);
999 } else {
1000 printf("%s, data=", uc);
1001 }
1002 for (i = 0; i < sizeof (sense); i++)
1003 printf(" 0x%02x", *(cptr++) & 0xff);
1004 printf("\n");
1005 }
1006 #else
1007 scsipi_printaddr(periph);
1008 printf("Sense Error Code 0x%x",
1009 SSD_RCODE(sense->response_code));
1010 if ((sense->response_code & SSD_RCODE_VALID) != 0) {
1011 struct scsi_sense_data_unextended *usense =
1012 (struct scsi_sense_data_unextended *)sense;
1013 printf(" at block no. %d (decimal)",
1014 _3btol(usense->block));
1015 }
1016 printf("\n");
1017 #endif
1018 return (EIO);
1019 }
1020 }
1021
1022 /*
1023 * scsipi_size:
1024 *
1025 * Find out from the device what its capacity is.
1026 */
1027 u_int64_t
1028 scsipi_size(struct scsipi_periph *periph, int *secsize, int flags)
1029 {
1030 union {
1031 struct scsipi_read_capacity_10 cmd;
1032 struct scsipi_read_capacity_16 cmd16;
1033 } cmd;
1034 union {
1035 struct scsipi_read_capacity_10_data data;
1036 struct scsipi_read_capacity_16_data data16;
1037 } data;
1038
1039 memset(&cmd, 0, sizeof(cmd));
1040 cmd.cmd.opcode = READ_CAPACITY_10;
1041
1042 /*
1043 * If the command works, interpret the result as a 4 byte
1044 * number of blocks
1045 */
1046 if (scsipi_command(periph, (void *)&cmd.cmd, sizeof(cmd.cmd),
1047 (void *)&data.data, sizeof(data.data), SCSIPIRETRIES, 20000, NULL,
1048 flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK | XS_CTL_SILENT) != 0)
1049 return (0);
1050
1051 if (_4btol(data.data.addr) != 0xffffffff) {
1052 if (secsize)
1053 *secsize = _4btol(data.data.length);
1054 return (_4btol(data.data.addr) + 1);
1055 }
1056
1057 /*
1058 * Device is larger than can be reflected by READ CAPACITY (10).
1059 * Try READ CAPACITY (16).
1060 */
1061
1062 memset(&cmd, 0, sizeof(cmd));
1063 cmd.cmd16.opcode = READ_CAPACITY_16;
1064 cmd.cmd16.byte2 = SRC16_SERVICE_ACTION;
1065 _lto4b(sizeof(data.data16), cmd.cmd16.len);
1066
1067 if (scsipi_command(periph, (void *)&cmd.cmd16, sizeof(cmd.cmd16),
1068 (void *)&data.data16, sizeof(data.data16), SCSIPIRETRIES, 20000,
1069 NULL,
1070 flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK | XS_CTL_SILENT) != 0)
1071 return (0);
1072
1073 if (secsize)
1074 *secsize = _4btol(data.data16.length);
1075 return (_8btol(data.data16.addr) + 1);
1076 }
1077
1078 /*
1079 * scsipi_test_unit_ready:
1080 *
1081 * Issue a `test unit ready' request.
1082 */
1083 int
1084 scsipi_test_unit_ready(struct scsipi_periph *periph, int flags)
1085 {
1086 struct scsi_test_unit_ready cmd;
1087 int retries;
1088
1089 /* some ATAPI drives don't support TEST UNIT READY. Sigh */
1090 if (periph->periph_quirks & PQUIRK_NOTUR)
1091 return (0);
1092
1093 if (flags & XS_CTL_DISCOVERY)
1094 retries = 0;
1095 else
1096 retries = SCSIPIRETRIES;
1097
1098 memset(&cmd, 0, sizeof(cmd));
1099 cmd.opcode = SCSI_TEST_UNIT_READY;
1100
1101 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
1102 retries, 10000, NULL, flags));
1103 }
1104
1105 /*
1106 * scsipi_inquire:
1107 *
1108 * Ask the device about itself.
1109 */
1110 int
1111 scsipi_inquire(struct scsipi_periph *periph, struct scsipi_inquiry_data *inqbuf,
1112 int flags)
1113 {
1114 struct scsipi_inquiry cmd;
1115 int error;
1116 int retries;
1117
1118 if (flags & XS_CTL_DISCOVERY)
1119 retries = 0;
1120 else
1121 retries = SCSIPIRETRIES;
1122
1123 /*
1124 * If we request more data than the device can provide, it SHOULD just
1125 * return a short reponse. However, some devices error with an
1126 * ILLEGAL REQUEST sense code, and yet others have even more special
1127 * failture modes (such as the GL641USB flash adapter, which goes loony
1128 * and sends corrupted CRCs). To work around this, and to bring our
1129 * behavior more in line with other OSes, we do a shorter inquiry,
1130 * covering all the SCSI-2 information, first, and then request more
1131 * data iff the "additional length" field indicates there is more.
1132 * - mycroft, 2003/10/16
1133 */
1134 memset(&cmd, 0, sizeof(cmd));
1135 cmd.opcode = INQUIRY;
1136 cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI2;
1137 error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1138 (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI2, retries,
1139 10000, NULL, flags | XS_CTL_DATA_IN);
1140 if (!error &&
1141 inqbuf->additional_length > SCSIPI_INQUIRY_LENGTH_SCSI2 - 4) {
1142 #if 0
1143 printf("inquire: addlen=%d, retrying\n", inqbuf->additional_length);
1144 #endif
1145 cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI3;
1146 error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1147 (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI3, retries,
1148 10000, NULL, flags | XS_CTL_DATA_IN);
1149 #if 0
1150 printf("inquire: error=%d\n", error);
1151 #endif
1152 }
1153
1154 #ifdef SCSI_OLD_NOINQUIRY
1155 /*
1156 * Kludge for the Adaptec ACB-4000 SCSI->MFM translator.
1157 * This board doesn't support the INQUIRY command at all.
1158 */
1159 if (error == EINVAL || error == EACCES) {
1160 /*
1161 * Conjure up an INQUIRY response.
1162 */
1163 inqbuf->device = (error == EINVAL ?
1164 SID_QUAL_LU_PRESENT :
1165 SID_QUAL_LU_NOTPRESENT) | T_DIRECT;
1166 inqbuf->dev_qual2 = 0;
1167 inqbuf->version = 0;
1168 inqbuf->response_format = SID_FORMAT_SCSI1;
1169 inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
1170 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1171 memcpy(inqbuf->vendor, "ADAPTEC ACB-4000 ", 28);
1172 error = 0;
1173 }
1174
1175 /*
1176 * Kludge for the Emulex MT-02 SCSI->QIC translator.
1177 * This board gives an empty response to an INQUIRY command.
1178 */
1179 else if (error == 0 &&
1180 inqbuf->device == (SID_QUAL_LU_PRESENT | T_DIRECT) &&
1181 inqbuf->dev_qual2 == 0 &&
1182 inqbuf->version == 0 &&
1183 inqbuf->response_format == SID_FORMAT_SCSI1) {
1184 /*
1185 * Fill out the INQUIRY response.
1186 */
1187 inqbuf->device = (SID_QUAL_LU_PRESENT | T_SEQUENTIAL);
1188 inqbuf->dev_qual2 = SID_REMOVABLE;
1189 inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
1190 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1191 memcpy(inqbuf->vendor, "EMULEX MT-02 QIC ", 28);
1192 }
1193 #endif /* SCSI_OLD_NOINQUIRY */
1194
1195 return error;
1196 }
1197
1198 /*
1199 * scsipi_prevent:
1200 *
1201 * Prevent or allow the user to remove the media
1202 */
1203 int
1204 scsipi_prevent(struct scsipi_periph *periph, int type, int flags)
1205 {
1206 struct scsi_prevent_allow_medium_removal cmd;
1207
1208 memset(&cmd, 0, sizeof(cmd));
1209 cmd.opcode = SCSI_PREVENT_ALLOW_MEDIUM_REMOVAL;
1210 cmd.how = type;
1211
1212 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
1213 SCSIPIRETRIES, 5000, NULL, flags));
1214 }
1215
1216 /*
1217 * scsipi_start:
1218 *
1219 * Send a START UNIT.
1220 */
1221 int
1222 scsipi_start(struct scsipi_periph *periph, int type, int flags)
1223 {
1224 struct scsipi_start_stop cmd;
1225
1226 memset(&cmd, 0, sizeof(cmd));
1227 cmd.opcode = START_STOP;
1228 cmd.byte2 = 0x00;
1229 cmd.how = type;
1230
1231 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
1232 SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000, NULL, flags));
1233 }
1234
1235 /*
1236 * scsipi_mode_sense, scsipi_mode_sense_big:
1237 * get a sense page from a device
1238 */
1239
1240 int
1241 scsipi_mode_sense(struct scsipi_periph *periph, int byte2, int page,
1242 struct scsi_mode_parameter_header_6 *data, int len, int flags, int retries,
1243 int timeout)
1244 {
1245 struct scsi_mode_sense_6 cmd;
1246
1247 memset(&cmd, 0, sizeof(cmd));
1248 cmd.opcode = SCSI_MODE_SENSE_6;
1249 cmd.byte2 = byte2;
1250 cmd.page = page;
1251 cmd.length = len & 0xff;
1252
1253 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1254 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN));
1255 }
1256
1257 int
1258 scsipi_mode_sense_big(struct scsipi_periph *periph, int byte2, int page,
1259 struct scsi_mode_parameter_header_10 *data, int len, int flags, int retries,
1260 int timeout)
1261 {
1262 struct scsi_mode_sense_10 cmd;
1263
1264 memset(&cmd, 0, sizeof(cmd));
1265 cmd.opcode = SCSI_MODE_SENSE_10;
1266 cmd.byte2 = byte2;
1267 cmd.page = page;
1268 _lto2b(len, cmd.length);
1269
1270 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1271 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN));
1272 }
1273
1274 int
1275 scsipi_mode_select(struct scsipi_periph *periph, int byte2,
1276 struct scsi_mode_parameter_header_6 *data, int len, int flags, int retries,
1277 int timeout)
1278 {
1279 struct scsi_mode_select_6 cmd;
1280
1281 memset(&cmd, 0, sizeof(cmd));
1282 cmd.opcode = SCSI_MODE_SELECT_6;
1283 cmd.byte2 = byte2;
1284 cmd.length = len & 0xff;
1285
1286 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1287 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT));
1288 }
1289
1290 int
1291 scsipi_mode_select_big(struct scsipi_periph *periph, int byte2,
1292 struct scsi_mode_parameter_header_10 *data, int len, int flags, int retries,
1293 int timeout)
1294 {
1295 struct scsi_mode_select_10 cmd;
1296
1297 memset(&cmd, 0, sizeof(cmd));
1298 cmd.opcode = SCSI_MODE_SELECT_10;
1299 cmd.byte2 = byte2;
1300 _lto2b(len, cmd.length);
1301
1302 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1303 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT));
1304 }
1305
1306 /*
1307 * scsipi_done:
1308 *
1309 * This routine is called by an adapter's interrupt handler when
1310 * an xfer is completed.
1311 */
1312 void
1313 scsipi_done(struct scsipi_xfer *xs)
1314 {
1315 struct scsipi_periph *periph = xs->xs_periph;
1316 struct scsipi_channel *chan = periph->periph_channel;
1317 int s, freezecnt;
1318
1319 SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n"));
1320 #ifdef SCSIPI_DEBUG
1321 if (periph->periph_dbflags & SCSIPI_DB1)
1322 show_scsipi_cmd(xs);
1323 #endif
1324
1325 s = splbio();
1326 /*
1327 * The resource this command was using is now free.
1328 */
1329 if (xs->xs_status & XS_STS_DONE) {
1330 /* XXX in certain circumstances, such as a device
1331 * being detached, a xs that has already been
1332 * scsipi_done()'d by the main thread will be done'd
1333 * again by scsibusdetach(). Putting the xs on the
1334 * chan_complete queue causes list corruption and
1335 * everyone dies. This prevents that, but perhaps
1336 * there should be better coordination somewhere such
1337 * that this won't ever happen (and can be turned into
1338 * a KASSERT().
1339 */
1340 splx(s);
1341 goto out;
1342 }
1343 scsipi_put_resource(chan);
1344 xs->xs_periph->periph_sent--;
1345
1346 /*
1347 * If the command was tagged, free the tag.
1348 */
1349 if (XS_CTL_TAGTYPE(xs) != 0)
1350 scsipi_put_tag(xs);
1351 else
1352 periph->periph_flags &= ~PERIPH_UNTAG;
1353
1354 /* Mark the command as `done'. */
1355 xs->xs_status |= XS_STS_DONE;
1356
1357 #ifdef DIAGNOSTIC
1358 if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
1359 (XS_CTL_ASYNC|XS_CTL_POLL))
1360 panic("scsipi_done: ASYNC and POLL");
1361 #endif
1362
1363 /*
1364 * If the xfer had an error of any sort, freeze the
1365 * periph's queue. Freeze it again if we were requested
1366 * to do so in the xfer.
1367 */
1368 freezecnt = 0;
1369 if (xs->error != XS_NOERROR)
1370 freezecnt++;
1371 if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
1372 freezecnt++;
1373 if (freezecnt != 0)
1374 scsipi_periph_freeze(periph, freezecnt);
1375
1376 /*
1377 * record the xfer with a pending sense, in case a SCSI reset is
1378 * received before the thread is waked up.
1379 */
1380 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1381 periph->periph_flags |= PERIPH_SENSE;
1382 periph->periph_xscheck = xs;
1383 }
1384
1385 /*
1386 * If this was an xfer that was not to complete asynchronously,
1387 * let the requesting thread perform error checking/handling
1388 * in its context.
1389 */
1390 if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
1391 splx(s);
1392 /*
1393 * If it's a polling job, just return, to unwind the
1394 * call graph. We don't need to restart the queue,
1395 * because pollings jobs are treated specially, and
1396 * are really only used during crash dumps anyway
1397 * (XXX or during boot-time autconfiguration of
1398 * ATAPI devices).
1399 */
1400 if (xs->xs_control & XS_CTL_POLL)
1401 return;
1402 wakeup(xs);
1403 goto out;
1404 }
1405
1406 /*
1407 * Catch the extremely common case of I/O completing
1408 * without error; no use in taking a context switch
1409 * if we can handle it in interrupt context.
1410 */
1411 if (xs->error == XS_NOERROR) {
1412 splx(s);
1413 (void) scsipi_complete(xs);
1414 goto out;
1415 }
1416
1417 /*
1418 * There is an error on this xfer. Put it on the channel's
1419 * completion queue, and wake up the completion thread.
1420 */
1421 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
1422 splx(s);
1423 wakeup(&chan->chan_complete);
1424
1425 out:
1426 /*
1427 * If there are more xfers on the channel's queue, attempt to
1428 * run them.
1429 */
1430 scsipi_run_queue(chan);
1431 }
1432
1433 /*
1434 * scsipi_complete:
1435 *
1436 * Completion of a scsipi_xfer. This is the guts of scsipi_done().
1437 *
1438 * NOTE: This routine MUST be called with valid thread context
1439 * except for the case where the following two conditions are
1440 * true:
1441 *
1442 * xs->error == XS_NOERROR
1443 * XS_CTL_ASYNC is set in xs->xs_control
1444 *
1445 * The semantics of this routine can be tricky, so here is an
1446 * explanation:
1447 *
1448 * 0 Xfer completed successfully.
1449 *
1450 * ERESTART Xfer had an error, but was restarted.
1451 *
1452 * anything else Xfer had an error, return value is Unix
1453 * errno.
1454 *
1455 * If the return value is anything but ERESTART:
1456 *
1457 * - If XS_CTL_ASYNC is set, `xs' has been freed back to
1458 * the pool.
1459 * - If there is a buf associated with the xfer,
1460 * it has been biodone()'d.
1461 */
1462 static int
1463 scsipi_complete(struct scsipi_xfer *xs)
1464 {
1465 struct scsipi_periph *periph = xs->xs_periph;
1466 struct scsipi_channel *chan = periph->periph_channel;
1467 int error, s;
1468
1469 #ifdef DIAGNOSTIC
1470 if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
1471 panic("scsipi_complete: XS_CTL_ASYNC but no buf");
1472 #endif
1473 /*
1474 * If command terminated with a CHECK CONDITION, we need to issue a
1475 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed
1476 * we'll have the real status.
1477 * Must be processed at splbio() to avoid missing a SCSI bus reset
1478 * for this command.
1479 */
1480 s = splbio();
1481 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1482 /* request sense for a request sense ? */
1483 if (xs->xs_control & XS_CTL_REQSENSE) {
1484 scsipi_printaddr(periph);
1485 printf("request sense for a request sense ?\n");
1486 /* XXX maybe we should reset the device ? */
1487 /* we've been frozen because xs->error != XS_NOERROR */
1488 scsipi_periph_thaw(periph, 1);
1489 splx(s);
1490 if (xs->resid < xs->datalen) {
1491 printf("we read %d bytes of sense anyway:\n",
1492 xs->datalen - xs->resid);
1493 #ifdef SCSIVERBOSE
1494 scsipi_print_sense_data((void *)xs->data, 0);
1495 #endif
1496 }
1497 return EINVAL;
1498 }
1499 scsipi_request_sense(xs);
1500 }
1501 splx(s);
1502
1503 /*
1504 * If it's a user level request, bypass all usual completion
1505 * processing, let the user work it out..
1506 */
1507 if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
1508 SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n"));
1509 if (xs->error != XS_NOERROR)
1510 scsipi_periph_thaw(periph, 1);
1511 scsipi_user_done(xs);
1512 SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n "));
1513 return 0;
1514 }
1515
1516 switch (xs->error) {
1517 case XS_NOERROR:
1518 error = 0;
1519 break;
1520
1521 case XS_SENSE:
1522 case XS_SHORTSENSE:
1523 error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
1524 break;
1525
1526 case XS_RESOURCE_SHORTAGE:
1527 /*
1528 * XXX Should freeze channel's queue.
1529 */
1530 scsipi_printaddr(periph);
1531 printf("adapter resource shortage\n");
1532 /* FALLTHROUGH */
1533
1534 case XS_BUSY:
1535 if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
1536 struct scsipi_max_openings mo;
1537
1538 /*
1539 * We set the openings to active - 1, assuming that
1540 * the command that got us here is the first one that
1541 * can't fit into the device's queue. If that's not
1542 * the case, I guess we'll find out soon enough.
1543 */
1544 mo.mo_target = periph->periph_target;
1545 mo.mo_lun = periph->periph_lun;
1546 if (periph->periph_active < periph->periph_openings)
1547 mo.mo_openings = periph->periph_active - 1;
1548 else
1549 mo.mo_openings = periph->periph_openings - 1;
1550 #ifdef DIAGNOSTIC
1551 if (mo.mo_openings < 0) {
1552 scsipi_printaddr(periph);
1553 printf("QUEUE FULL resulted in < 0 openings\n");
1554 panic("scsipi_done");
1555 }
1556 #endif
1557 if (mo.mo_openings == 0) {
1558 scsipi_printaddr(periph);
1559 printf("QUEUE FULL resulted in 0 openings\n");
1560 mo.mo_openings = 1;
1561 }
1562 scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
1563 error = ERESTART;
1564 } else if (xs->xs_retries != 0) {
1565 xs->xs_retries--;
1566 /*
1567 * Wait one second, and try again.
1568 */
1569 if ((xs->xs_control & XS_CTL_POLL) ||
1570 (chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
1571 delay(1000000);
1572 } else if (!callout_pending(&periph->periph_callout)) {
1573 scsipi_periph_freeze(periph, 1);
1574 callout_reset(&periph->periph_callout,
1575 hz, scsipi_periph_timed_thaw, periph);
1576 }
1577 error = ERESTART;
1578 } else
1579 error = EBUSY;
1580 break;
1581
1582 case XS_REQUEUE:
1583 error = ERESTART;
1584 break;
1585
1586 case XS_SELTIMEOUT:
1587 case XS_TIMEOUT:
1588 /*
1589 * If the device hasn't gone away, honor retry counts.
1590 *
1591 * Note that if we're in the middle of probing it,
1592 * it won't be found because it isn't here yet so
1593 * we won't honor the retry count in that case.
1594 */
1595 if (scsipi_lookup_periph(chan, periph->periph_target,
1596 periph->periph_lun) && xs->xs_retries != 0) {
1597 xs->xs_retries--;
1598 error = ERESTART;
1599 } else
1600 error = EIO;
1601 break;
1602
1603 case XS_RESET:
1604 if (xs->xs_control & XS_CTL_REQSENSE) {
1605 /*
1606 * request sense interrupted by reset: signal it
1607 * with EINTR return code.
1608 */
1609 error = EINTR;
1610 } else {
1611 if (xs->xs_retries != 0) {
1612 xs->xs_retries--;
1613 error = ERESTART;
1614 } else
1615 error = EIO;
1616 }
1617 break;
1618
1619 case XS_DRIVER_STUFFUP:
1620 scsipi_printaddr(periph);
1621 printf("generic HBA error\n");
1622 error = EIO;
1623 break;
1624 default:
1625 scsipi_printaddr(periph);
1626 printf("invalid return code from adapter: %d\n", xs->error);
1627 error = EIO;
1628 break;
1629 }
1630
1631 s = splbio();
1632 if (error == ERESTART) {
1633 /*
1634 * If we get here, the periph has been thawed and frozen
1635 * again if we had to issue recovery commands. Alternatively,
1636 * it may have been frozen again and in a timed thaw. In
1637 * any case, we thaw the periph once we re-enqueue the
1638 * command. Once the periph is fully thawed, it will begin
1639 * operation again.
1640 */
1641 xs->error = XS_NOERROR;
1642 xs->status = SCSI_OK;
1643 xs->xs_status &= ~XS_STS_DONE;
1644 xs->xs_requeuecnt++;
1645 error = scsipi_enqueue(xs);
1646 if (error == 0) {
1647 scsipi_periph_thaw(periph, 1);
1648 splx(s);
1649 return (ERESTART);
1650 }
1651 }
1652
1653 /*
1654 * scsipi_done() freezes the queue if not XS_NOERROR.
1655 * Thaw it here.
1656 */
1657 if (xs->error != XS_NOERROR)
1658 scsipi_periph_thaw(periph, 1);
1659
1660 if (periph->periph_switch->psw_done)
1661 periph->periph_switch->psw_done(xs, error);
1662
1663 if (xs->xs_control & XS_CTL_ASYNC)
1664 scsipi_put_xs(xs);
1665 splx(s);
1666
1667 return (error);
1668 }
1669
1670 /*
1671 * Issue a request sense for the given scsipi_xfer. Called when the xfer
1672 * returns with a CHECK_CONDITION status. Must be called in valid thread
1673 * context and at splbio().
1674 */
1675
1676 static void
1677 scsipi_request_sense(struct scsipi_xfer *xs)
1678 {
1679 struct scsipi_periph *periph = xs->xs_periph;
1680 int flags, error;
1681 struct scsi_request_sense cmd;
1682
1683 periph->periph_flags |= PERIPH_SENSE;
1684
1685 /* if command was polling, request sense will too */
1686 flags = xs->xs_control & XS_CTL_POLL;
1687 /* Polling commands can't sleep */
1688 if (flags)
1689 flags |= XS_CTL_NOSLEEP;
1690
1691 flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN |
1692 XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH;
1693
1694 memset(&cmd, 0, sizeof(cmd));
1695 cmd.opcode = SCSI_REQUEST_SENSE;
1696 cmd.length = sizeof(struct scsi_sense_data);
1697
1698 error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1699 (void *)&xs->sense.scsi_sense, sizeof(struct scsi_sense_data),
1700 0, 1000, NULL, flags);
1701 periph->periph_flags &= ~PERIPH_SENSE;
1702 periph->periph_xscheck = NULL;
1703 switch (error) {
1704 case 0:
1705 /* we have a valid sense */
1706 xs->error = XS_SENSE;
1707 return;
1708 case EINTR:
1709 /* REQUEST_SENSE interrupted by bus reset. */
1710 xs->error = XS_RESET;
1711 return;
1712 case EIO:
1713 /* request sense coudn't be performed */
1714 /*
1715 * XXX this isn't quite right but we don't have anything
1716 * better for now
1717 */
1718 xs->error = XS_DRIVER_STUFFUP;
1719 return;
1720 default:
1721 /* Notify that request sense failed. */
1722 xs->error = XS_DRIVER_STUFFUP;
1723 scsipi_printaddr(periph);
1724 printf("request sense failed with error %d\n", error);
1725 return;
1726 }
1727 }
1728
1729 /*
1730 * scsipi_enqueue:
1731 *
1732 * Enqueue an xfer on a channel.
1733 */
1734 static int
1735 scsipi_enqueue(struct scsipi_xfer *xs)
1736 {
1737 struct scsipi_channel *chan = xs->xs_periph->periph_channel;
1738 struct scsipi_xfer *qxs;
1739 int s;
1740
1741 s = splbio();
1742
1743 /*
1744 * If the xfer is to be polled, and there are already jobs on
1745 * the queue, we can't proceed.
1746 */
1747 if ((xs->xs_control & XS_CTL_POLL) != 0 &&
1748 TAILQ_FIRST(&chan->chan_queue) != NULL) {
1749 splx(s);
1750 xs->error = XS_DRIVER_STUFFUP;
1751 return (EAGAIN);
1752 }
1753
1754 /*
1755 * If we have an URGENT xfer, it's an error recovery command
1756 * and it should just go on the head of the channel's queue.
1757 */
1758 if (xs->xs_control & XS_CTL_URGENT) {
1759 TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
1760 goto out;
1761 }
1762
1763 /*
1764 * If this xfer has already been on the queue before, we
1765 * need to reinsert it in the correct order. That order is:
1766 *
1767 * Immediately before the first xfer for this periph
1768 * with a requeuecnt less than xs->xs_requeuecnt.
1769 *
1770 * Failing that, at the end of the queue. (We'll end up
1771 * there naturally.)
1772 */
1773 if (xs->xs_requeuecnt != 0) {
1774 for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
1775 qxs = TAILQ_NEXT(qxs, channel_q)) {
1776 if (qxs->xs_periph == xs->xs_periph &&
1777 qxs->xs_requeuecnt < xs->xs_requeuecnt)
1778 break;
1779 }
1780 if (qxs != NULL) {
1781 TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
1782 channel_q);
1783 goto out;
1784 }
1785 }
1786 TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
1787 out:
1788 if (xs->xs_control & XS_CTL_THAW_PERIPH)
1789 scsipi_periph_thaw(xs->xs_periph, 1);
1790 splx(s);
1791 return (0);
1792 }
1793
1794 /*
1795 * scsipi_run_queue:
1796 *
1797 * Start as many xfers as possible running on the channel.
1798 */
1799 static void
1800 scsipi_run_queue(struct scsipi_channel *chan)
1801 {
1802 struct scsipi_xfer *xs;
1803 struct scsipi_periph *periph;
1804 int s;
1805
1806 for (;;) {
1807 s = splbio();
1808
1809 /*
1810 * If the channel is frozen, we can't do any work right
1811 * now.
1812 */
1813 if (chan->chan_qfreeze != 0) {
1814 splx(s);
1815 return;
1816 }
1817
1818 /*
1819 * Look for work to do, and make sure we can do it.
1820 */
1821 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
1822 xs = TAILQ_NEXT(xs, channel_q)) {
1823 periph = xs->xs_periph;
1824
1825 if ((periph->periph_sent >= periph->periph_openings) ||
1826 periph->periph_qfreeze != 0 ||
1827 (periph->periph_flags & PERIPH_UNTAG) != 0)
1828 continue;
1829
1830 if ((periph->periph_flags &
1831 (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 &&
1832 (xs->xs_control & XS_CTL_URGENT) == 0)
1833 continue;
1834
1835 /*
1836 * We can issue this xfer!
1837 */
1838 goto got_one;
1839 }
1840
1841 /*
1842 * Can't find any work to do right now.
1843 */
1844 splx(s);
1845 return;
1846
1847 got_one:
1848 /*
1849 * Have an xfer to run. Allocate a resource from
1850 * the adapter to run it. If we can't allocate that
1851 * resource, we don't dequeue the xfer.
1852 */
1853 if (scsipi_get_resource(chan) == 0) {
1854 /*
1855 * Adapter is out of resources. If the adapter
1856 * supports it, attempt to grow them.
1857 */
1858 if (scsipi_grow_resources(chan) == 0) {
1859 /*
1860 * Wasn't able to grow resources,
1861 * nothing more we can do.
1862 */
1863 if (xs->xs_control & XS_CTL_POLL) {
1864 scsipi_printaddr(xs->xs_periph);
1865 printf("polling command but no "
1866 "adapter resources");
1867 /* We'll panic shortly... */
1868 }
1869 splx(s);
1870
1871 /*
1872 * XXX: We should be able to note that
1873 * XXX: that resources are needed here!
1874 */
1875 return;
1876 }
1877 /*
1878 * scsipi_grow_resources() allocated the resource
1879 * for us.
1880 */
1881 }
1882
1883 /*
1884 * We have a resource to run this xfer, do it!
1885 */
1886 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
1887
1888 /*
1889 * If the command is to be tagged, allocate a tag ID
1890 * for it.
1891 */
1892 if (XS_CTL_TAGTYPE(xs) != 0)
1893 scsipi_get_tag(xs);
1894 else
1895 periph->periph_flags |= PERIPH_UNTAG;
1896 periph->periph_sent++;
1897 splx(s);
1898
1899 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
1900 }
1901 #ifdef DIAGNOSTIC
1902 panic("scsipi_run_queue: impossible");
1903 #endif
1904 }
1905
1906 /*
1907 * scsipi_execute_xs:
1908 *
1909 * Begin execution of an xfer, waiting for it to complete, if necessary.
1910 */
1911 int
1912 scsipi_execute_xs(struct scsipi_xfer *xs)
1913 {
1914 struct scsipi_periph *periph = xs->xs_periph;
1915 struct scsipi_channel *chan = periph->periph_channel;
1916 int oasync, async, poll, error, s;
1917
1918 KASSERT(!cold);
1919
1920 (chan->chan_bustype->bustype_cmd)(xs);
1921
1922 if (xs->xs_control & XS_CTL_DATA_ONSTACK) {
1923 #if 1
1924 if (xs->xs_control & XS_CTL_ASYNC)
1925 panic("scsipi_execute_xs: on stack and async");
1926 #endif
1927 /*
1928 * If the I/O buffer is allocated on stack, the
1929 * process must NOT be swapped out, as the device will
1930 * be accessing the stack.
1931 */
1932 PHOLD(curlwp);
1933 }
1934
1935 xs->xs_status &= ~XS_STS_DONE;
1936 xs->error = XS_NOERROR;
1937 xs->resid = xs->datalen;
1938 xs->status = SCSI_OK;
1939
1940 #ifdef SCSIPI_DEBUG
1941 if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) {
1942 printf("scsipi_execute_xs: ");
1943 show_scsipi_xs(xs);
1944 printf("\n");
1945 }
1946 #endif
1947
1948 /*
1949 * Deal with command tagging:
1950 *
1951 * - If the device's current operating mode doesn't
1952 * include tagged queueing, clear the tag mask.
1953 *
1954 * - If the device's current operating mode *does*
1955 * include tagged queueing, set the tag_type in
1956 * the xfer to the appropriate byte for the tag
1957 * message.
1958 */
1959 if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 ||
1960 (xs->xs_control & XS_CTL_REQSENSE)) {
1961 xs->xs_control &= ~XS_CTL_TAGMASK;
1962 xs->xs_tag_type = 0;
1963 } else {
1964 /*
1965 * If the request doesn't specify a tag, give Head
1966 * tags to URGENT operations and Ordered tags to
1967 * everything else.
1968 */
1969 if (XS_CTL_TAGTYPE(xs) == 0) {
1970 if (xs->xs_control & XS_CTL_URGENT)
1971 xs->xs_control |= XS_CTL_HEAD_TAG;
1972 else
1973 xs->xs_control |= XS_CTL_ORDERED_TAG;
1974 }
1975
1976 switch (XS_CTL_TAGTYPE(xs)) {
1977 case XS_CTL_ORDERED_TAG:
1978 xs->xs_tag_type = MSG_ORDERED_Q_TAG;
1979 break;
1980
1981 case XS_CTL_SIMPLE_TAG:
1982 xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
1983 break;
1984
1985 case XS_CTL_HEAD_TAG:
1986 xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
1987 break;
1988
1989 default:
1990 scsipi_printaddr(periph);
1991 printf("invalid tag mask 0x%08x\n",
1992 XS_CTL_TAGTYPE(xs));
1993 panic("scsipi_execute_xs");
1994 }
1995 }
1996
1997 /* If the adaptor wants us to poll, poll. */
1998 if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY)
1999 xs->xs_control |= XS_CTL_POLL;
2000
2001 /*
2002 * If we don't yet have a completion thread, or we are to poll for
2003 * completion, clear the ASYNC flag.
2004 */
2005 oasync = (xs->xs_control & XS_CTL_ASYNC);
2006 if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
2007 xs->xs_control &= ~XS_CTL_ASYNC;
2008
2009 async = (xs->xs_control & XS_CTL_ASYNC);
2010 poll = (xs->xs_control & XS_CTL_POLL);
2011
2012 #ifdef DIAGNOSTIC
2013 if (oasync != 0 && xs->bp == NULL)
2014 panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
2015 #endif
2016
2017 /*
2018 * Enqueue the transfer. If we're not polling for completion, this
2019 * should ALWAYS return `no error'.
2020 */
2021 error = scsipi_enqueue(xs);
2022 if (error) {
2023 if (poll == 0) {
2024 scsipi_printaddr(periph);
2025 printf("not polling, but enqueue failed with %d\n",
2026 error);
2027 panic("scsipi_execute_xs");
2028 }
2029
2030 scsipi_printaddr(periph);
2031 printf("should have flushed queue?\n");
2032 goto free_xs;
2033 }
2034
2035 restarted:
2036 scsipi_run_queue(chan);
2037
2038 /*
2039 * The xfer is enqueued, and possibly running. If it's to be
2040 * completed asynchronously, just return now.
2041 */
2042 if (async)
2043 return (0);
2044
2045 /*
2046 * Not an asynchronous command; wait for it to complete.
2047 */
2048 s = splbio();
2049 while ((xs->xs_status & XS_STS_DONE) == 0) {
2050 if (poll) {
2051 scsipi_printaddr(periph);
2052 printf("polling command not done\n");
2053 panic("scsipi_execute_xs");
2054 }
2055 (void) tsleep(xs, PRIBIO, "xscmd", 0);
2056 }
2057 splx(s);
2058
2059 /*
2060 * Command is complete. scsipi_done() has awakened us to perform
2061 * the error handling.
2062 */
2063 error = scsipi_complete(xs);
2064 if (error == ERESTART)
2065 goto restarted;
2066
2067 /*
2068 * If it was meant to run async and we cleared aync ourselve,
2069 * don't return an error here. It has already been handled
2070 */
2071 if (oasync)
2072 error = 0;
2073 /*
2074 * Command completed successfully or fatal error occurred. Fall
2075 * into....
2076 */
2077 free_xs:
2078 if (xs->xs_control & XS_CTL_DATA_ONSTACK)
2079 PRELE(curlwp);
2080
2081 s = splbio();
2082 scsipi_put_xs(xs);
2083 splx(s);
2084
2085 /*
2086 * Kick the queue, keep it running in case it stopped for some
2087 * reason.
2088 */
2089 scsipi_run_queue(chan);
2090
2091 return (error);
2092 }
2093
2094 /*
2095 * scsipi_completion_thread:
2096 *
2097 * This is the completion thread. We wait for errors on
2098 * asynchronous xfers, and perform the error handling
2099 * function, restarting the command, if necessary.
2100 */
2101 static void
2102 scsipi_completion_thread(void *arg)
2103 {
2104 struct scsipi_channel *chan = arg;
2105 struct scsipi_xfer *xs;
2106 int s;
2107
2108 if (chan->chan_init_cb)
2109 (*chan->chan_init_cb)(chan, chan->chan_init_cb_arg);
2110
2111 s = splbio();
2112 chan->chan_flags |= SCSIPI_CHAN_TACTIVE;
2113 splx(s);
2114 for (;;) {
2115 s = splbio();
2116 xs = TAILQ_FIRST(&chan->chan_complete);
2117 if (xs == NULL && chan->chan_tflags == 0) {
2118 /* nothing to do; wait */
2119 (void) tsleep(&chan->chan_complete, PRIBIO,
2120 "sccomp", 0);
2121 splx(s);
2122 continue;
2123 }
2124 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2125 /* call chan_callback from thread context */
2126 chan->chan_tflags &= ~SCSIPI_CHANT_CALLBACK;
2127 chan->chan_callback(chan, chan->chan_callback_arg);
2128 splx(s);
2129 continue;
2130 }
2131 if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) {
2132 /* attempt to get more openings for this channel */
2133 chan->chan_tflags &= ~SCSIPI_CHANT_GROWRES;
2134 scsipi_adapter_request(chan,
2135 ADAPTER_REQ_GROW_RESOURCES, NULL);
2136 scsipi_channel_thaw(chan, 1);
2137 splx(s);
2138 if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) {
2139 preempt(1);
2140 }
2141 continue;
2142 }
2143 if (chan->chan_tflags & SCSIPI_CHANT_KICK) {
2144 /* explicitly run the queues for this channel */
2145 chan->chan_tflags &= ~SCSIPI_CHANT_KICK;
2146 scsipi_run_queue(chan);
2147 splx(s);
2148 continue;
2149 }
2150 if (chan->chan_tflags & SCSIPI_CHANT_SHUTDOWN) {
2151 splx(s);
2152 break;
2153 }
2154 if (xs) {
2155 TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
2156 splx(s);
2157
2158 /*
2159 * Have an xfer with an error; process it.
2160 */
2161 (void) scsipi_complete(xs);
2162
2163 /*
2164 * Kick the queue; keep it running if it was stopped
2165 * for some reason.
2166 */
2167 scsipi_run_queue(chan);
2168 } else {
2169 splx(s);
2170 }
2171 }
2172
2173 chan->chan_thread = NULL;
2174
2175 /* In case parent is waiting for us to exit. */
2176 wakeup(&chan->chan_thread);
2177
2178 kthread_exit(0);
2179 }
2180
2181 /*
2182 * scsipi_create_completion_thread:
2183 *
2184 * Callback to actually create the completion thread.
2185 */
2186 void
2187 scsipi_create_completion_thread(void *arg)
2188 {
2189 struct scsipi_channel *chan = arg;
2190 struct scsipi_adapter *adapt = chan->chan_adapter;
2191
2192 if (kthread_create1(scsipi_completion_thread, chan,
2193 &chan->chan_thread, "%s", chan->chan_name)) {
2194 printf("%s: unable to create completion thread for "
2195 "channel %d\n", adapt->adapt_dev->dv_xname,
2196 chan->chan_channel);
2197 panic("scsipi_create_completion_thread");
2198 }
2199 }
2200
2201 /*
2202 * scsipi_thread_call_callback:
2203 *
2204 * request to call a callback from the completion thread
2205 */
2206 int
2207 scsipi_thread_call_callback(struct scsipi_channel *chan,
2208 void (*callback)(struct scsipi_channel *, void *), void *arg)
2209 {
2210 int s;
2211
2212 s = splbio();
2213 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
2214 /* kernel thread doesn't exist yet */
2215 splx(s);
2216 return ESRCH;
2217 }
2218 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2219 splx(s);
2220 return EBUSY;
2221 }
2222 scsipi_channel_freeze(chan, 1);
2223 chan->chan_callback = callback;
2224 chan->chan_callback_arg = arg;
2225 chan->chan_tflags |= SCSIPI_CHANT_CALLBACK;
2226 wakeup(&chan->chan_complete);
2227 splx(s);
2228 return(0);
2229 }
2230
2231 /*
2232 * scsipi_async_event:
2233 *
2234 * Handle an asynchronous event from an adapter.
2235 */
2236 void
2237 scsipi_async_event(struct scsipi_channel *chan, scsipi_async_event_t event,
2238 void *arg)
2239 {
2240 int s;
2241
2242 s = splbio();
2243 switch (event) {
2244 case ASYNC_EVENT_MAX_OPENINGS:
2245 scsipi_async_event_max_openings(chan,
2246 (struct scsipi_max_openings *)arg);
2247 break;
2248
2249 case ASYNC_EVENT_XFER_MODE:
2250 scsipi_async_event_xfer_mode(chan,
2251 (struct scsipi_xfer_mode *)arg);
2252 break;
2253 case ASYNC_EVENT_RESET:
2254 scsipi_async_event_channel_reset(chan);
2255 break;
2256 }
2257 splx(s);
2258 }
2259
2260 /*
2261 * scsipi_print_xfer_mode:
2262 *
2263 * Print a periph's capabilities.
2264 */
2265 void
2266 scsipi_print_xfer_mode(struct scsipi_periph *periph)
2267 {
2268 int period, freq, speed, mbs;
2269
2270 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0)
2271 return;
2272
2273 aprint_normal("%s: ", periph->periph_dev->dv_xname);
2274 if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) {
2275 period = scsipi_sync_factor_to_period(periph->periph_period);
2276 aprint_normal("sync (%d.%02dns offset %d)",
2277 period / 100, period % 100, periph->periph_offset);
2278 } else
2279 aprint_normal("async");
2280
2281 if (periph->periph_mode & PERIPH_CAP_WIDE32)
2282 aprint_normal(", 32-bit");
2283 else if (periph->periph_mode & (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
2284 aprint_normal(", 16-bit");
2285 else
2286 aprint_normal(", 8-bit");
2287
2288 if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) {
2289 freq = scsipi_sync_factor_to_freq(periph->periph_period);
2290 speed = freq;
2291 if (periph->periph_mode & PERIPH_CAP_WIDE32)
2292 speed *= 4;
2293 else if (periph->periph_mode &
2294 (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
2295 speed *= 2;
2296 mbs = speed / 1000;
2297 if (mbs > 0)
2298 aprint_normal(" (%d.%03dMB/s)", mbs, speed % 1000);
2299 else
2300 aprint_normal(" (%dKB/s)", speed % 1000);
2301 }
2302
2303 aprint_normal(" transfers");
2304
2305 if (periph->periph_mode & PERIPH_CAP_TQING)
2306 aprint_normal(", tagged queueing");
2307
2308 aprint_normal("\n");
2309 }
2310
2311 /*
2312 * scsipi_async_event_max_openings:
2313 *
2314 * Update the maximum number of outstanding commands a
2315 * device may have.
2316 */
2317 static void
2318 scsipi_async_event_max_openings(struct scsipi_channel *chan,
2319 struct scsipi_max_openings *mo)
2320 {
2321 struct scsipi_periph *periph;
2322 int minlun, maxlun;
2323
2324 if (mo->mo_lun == -1) {
2325 /*
2326 * Wildcarded; apply it to all LUNs.
2327 */
2328 minlun = 0;
2329 maxlun = chan->chan_nluns - 1;
2330 } else
2331 minlun = maxlun = mo->mo_lun;
2332
2333 /* XXX This could really suck with a large LUN space. */
2334 for (; minlun <= maxlun; minlun++) {
2335 periph = scsipi_lookup_periph(chan, mo->mo_target, minlun);
2336 if (periph == NULL)
2337 continue;
2338
2339 if (mo->mo_openings < periph->periph_openings)
2340 periph->periph_openings = mo->mo_openings;
2341 else if (mo->mo_openings > periph->periph_openings &&
2342 (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
2343 periph->periph_openings = mo->mo_openings;
2344 }
2345 }
2346
2347 /*
2348 * scsipi_async_event_xfer_mode:
2349 *
2350 * Update the xfer mode for all periphs sharing the
2351 * specified I_T Nexus.
2352 */
2353 static void
2354 scsipi_async_event_xfer_mode(struct scsipi_channel *chan,
2355 struct scsipi_xfer_mode *xm)
2356 {
2357 struct scsipi_periph *periph;
2358 int lun, announce, mode, period, offset;
2359
2360 for (lun = 0; lun < chan->chan_nluns; lun++) {
2361 periph = scsipi_lookup_periph(chan, xm->xm_target, lun);
2362 if (periph == NULL)
2363 continue;
2364 announce = 0;
2365
2366 /*
2367 * Clamp the xfer mode down to this periph's capabilities.
2368 */
2369 mode = xm->xm_mode & periph->periph_cap;
2370 if (mode & PERIPH_CAP_SYNC) {
2371 period = xm->xm_period;
2372 offset = xm->xm_offset;
2373 } else {
2374 period = 0;
2375 offset = 0;
2376 }
2377
2378 /*
2379 * If we do not have a valid xfer mode yet, or the parameters
2380 * are different, announce them.
2381 */
2382 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0 ||
2383 periph->periph_mode != mode ||
2384 periph->periph_period != period ||
2385 periph->periph_offset != offset)
2386 announce = 1;
2387
2388 periph->periph_mode = mode;
2389 periph->periph_period = period;
2390 periph->periph_offset = offset;
2391 periph->periph_flags |= PERIPH_MODE_VALID;
2392
2393 if (announce)
2394 scsipi_print_xfer_mode(periph);
2395 }
2396 }
2397
2398 /*
2399 * scsipi_set_xfer_mode:
2400 *
2401 * Set the xfer mode for the specified I_T Nexus.
2402 */
2403 void
2404 scsipi_set_xfer_mode(struct scsipi_channel *chan, int target, int immed)
2405 {
2406 struct scsipi_xfer_mode xm;
2407 struct scsipi_periph *itperiph;
2408 int lun, s;
2409
2410 /*
2411 * Go to the minimal xfer mode.
2412 */
2413 xm.xm_target = target;
2414 xm.xm_mode = 0;
2415 xm.xm_period = 0; /* ignored */
2416 xm.xm_offset = 0; /* ignored */
2417
2418 /*
2419 * Find the first LUN we know about on this I_T Nexus.
2420 */
2421 for (itperiph = NULL, lun = 0; lun < chan->chan_nluns; lun++) {
2422 itperiph = scsipi_lookup_periph(chan, target, lun);
2423 if (itperiph != NULL)
2424 break;
2425 }
2426 if (itperiph != NULL) {
2427 xm.xm_mode = itperiph->periph_cap;
2428 /*
2429 * Now issue the request to the adapter.
2430 */
2431 s = splbio();
2432 scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm);
2433 splx(s);
2434 /*
2435 * If we want this to happen immediately, issue a dummy
2436 * command, since most adapters can't really negotiate unless
2437 * they're executing a job.
2438 */
2439 if (immed != 0) {
2440 (void) scsipi_test_unit_ready(itperiph,
2441 XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
2442 XS_CTL_IGNORE_NOT_READY |
2443 XS_CTL_IGNORE_MEDIA_CHANGE);
2444 }
2445 }
2446 }
2447
2448 /*
2449 * scsipi_channel_reset:
2450 *
2451 * handle scsi bus reset
2452 * called at splbio
2453 */
2454 static void
2455 scsipi_async_event_channel_reset(struct scsipi_channel *chan)
2456 {
2457 struct scsipi_xfer *xs, *xs_next;
2458 struct scsipi_periph *periph;
2459 int target, lun;
2460
2461 /*
2462 * Channel has been reset. Also mark as reset pending REQUEST_SENSE
2463 * commands; as the sense is not available any more.
2464 * can't call scsipi_done() from here, as the command has not been
2465 * sent to the adapter yet (this would corrupt accounting).
2466 */
2467
2468 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) {
2469 xs_next = TAILQ_NEXT(xs, channel_q);
2470 if (xs->xs_control & XS_CTL_REQSENSE) {
2471 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
2472 xs->error = XS_RESET;
2473 if ((xs->xs_control & XS_CTL_ASYNC) != 0)
2474 TAILQ_INSERT_TAIL(&chan->chan_complete, xs,
2475 channel_q);
2476 }
2477 }
2478 wakeup(&chan->chan_complete);
2479 /* Catch xs with pending sense which may not have a REQSENSE xs yet */
2480 for (target = 0; target < chan->chan_ntargets; target++) {
2481 if (target == chan->chan_id)
2482 continue;
2483 for (lun = 0; lun < chan->chan_nluns; lun++) {
2484 periph = scsipi_lookup_periph(chan, target, lun);
2485 if (periph) {
2486 xs = periph->periph_xscheck;
2487 if (xs)
2488 xs->error = XS_RESET;
2489 }
2490 }
2491 }
2492 }
2493
2494 /*
2495 * scsipi_target_detach:
2496 *
2497 * detach all periph associated with a I_T
2498 * must be called from valid thread context
2499 */
2500 int
2501 scsipi_target_detach(struct scsipi_channel *chan, int target, int lun,
2502 int flags)
2503 {
2504 struct scsipi_periph *periph;
2505 int ctarget, mintarget, maxtarget;
2506 int clun, minlun, maxlun;
2507 int error;
2508
2509 if (target == -1) {
2510 mintarget = 0;
2511 maxtarget = chan->chan_ntargets;
2512 } else {
2513 if (target == chan->chan_id)
2514 return EINVAL;
2515 if (target < 0 || target >= chan->chan_ntargets)
2516 return EINVAL;
2517 mintarget = target;
2518 maxtarget = target + 1;
2519 }
2520
2521 if (lun == -1) {
2522 minlun = 0;
2523 maxlun = chan->chan_nluns;
2524 } else {
2525 if (lun < 0 || lun >= chan->chan_nluns)
2526 return EINVAL;
2527 minlun = lun;
2528 maxlun = lun + 1;
2529 }
2530
2531 for (ctarget = mintarget; ctarget < maxtarget; ctarget++) {
2532 if (ctarget == chan->chan_id)
2533 continue;
2534
2535 for (clun = minlun; clun < maxlun; clun++) {
2536 periph = scsipi_lookup_periph(chan, ctarget, clun);
2537 if (periph == NULL)
2538 continue;
2539 error = config_detach(periph->periph_dev, flags);
2540 if (error)
2541 return (error);
2542 }
2543 }
2544 return(0);
2545 }
2546
2547 /*
2548 * scsipi_adapter_addref:
2549 *
2550 * Add a reference to the adapter pointed to by the provided
2551 * link, enabling the adapter if necessary.
2552 */
2553 int
2554 scsipi_adapter_addref(struct scsipi_adapter *adapt)
2555 {
2556 int s, error = 0;
2557
2558 s = splbio();
2559 if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) {
2560 error = (*adapt->adapt_enable)(adapt->adapt_dev, 1);
2561 if (error)
2562 adapt->adapt_refcnt--;
2563 }
2564 splx(s);
2565 return (error);
2566 }
2567
2568 /*
2569 * scsipi_adapter_delref:
2570 *
2571 * Delete a reference to the adapter pointed to by the provided
2572 * link, disabling the adapter if possible.
2573 */
2574 void
2575 scsipi_adapter_delref(struct scsipi_adapter *adapt)
2576 {
2577 int s;
2578
2579 s = splbio();
2580 if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL)
2581 (void) (*adapt->adapt_enable)(adapt->adapt_dev, 0);
2582 splx(s);
2583 }
2584
2585 static struct scsipi_syncparam {
2586 int ss_factor;
2587 int ss_period; /* ns * 100 */
2588 } scsipi_syncparams[] = {
2589 { 0x08, 625 }, /* FAST-160 (Ultra320) */
2590 { 0x09, 1250 }, /* FAST-80 (Ultra160) */
2591 { 0x0a, 2500 }, /* FAST-40 40MHz (Ultra2) */
2592 { 0x0b, 3030 }, /* FAST-40 33MHz (Ultra2) */
2593 { 0x0c, 5000 }, /* FAST-20 (Ultra) */
2594 };
2595 static const int scsipi_nsyncparams =
2596 sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
2597
2598 int
2599 scsipi_sync_period_to_factor(int period /* ns * 100 */)
2600 {
2601 int i;
2602
2603 for (i = 0; i < scsipi_nsyncparams; i++) {
2604 if (period <= scsipi_syncparams[i].ss_period)
2605 return (scsipi_syncparams[i].ss_factor);
2606 }
2607
2608 return ((period / 100) / 4);
2609 }
2610
2611 int
2612 scsipi_sync_factor_to_period(int factor)
2613 {
2614 int i;
2615
2616 for (i = 0; i < scsipi_nsyncparams; i++) {
2617 if (factor == scsipi_syncparams[i].ss_factor)
2618 return (scsipi_syncparams[i].ss_period);
2619 }
2620
2621 return ((factor * 4) * 100);
2622 }
2623
2624 int
2625 scsipi_sync_factor_to_freq(int factor)
2626 {
2627 int i;
2628
2629 for (i = 0; i < scsipi_nsyncparams; i++) {
2630 if (factor == scsipi_syncparams[i].ss_factor)
2631 return (100000000 / scsipi_syncparams[i].ss_period);
2632 }
2633
2634 return (10000000 / ((factor * 4) * 10));
2635 }
2636
2637 #ifdef SCSIPI_DEBUG
2638 /*
2639 * Given a scsipi_xfer, dump the request, in all it's glory
2640 */
2641 void
2642 show_scsipi_xs(struct scsipi_xfer *xs)
2643 {
2644
2645 printf("xs(%p): ", xs);
2646 printf("xs_control(0x%08x)", xs->xs_control);
2647 printf("xs_status(0x%08x)", xs->xs_status);
2648 printf("periph(%p)", xs->xs_periph);
2649 printf("retr(0x%x)", xs->xs_retries);
2650 printf("timo(0x%x)", xs->timeout);
2651 printf("cmd(%p)", xs->cmd);
2652 printf("len(0x%x)", xs->cmdlen);
2653 printf("data(%p)", xs->data);
2654 printf("len(0x%x)", xs->datalen);
2655 printf("res(0x%x)", xs->resid);
2656 printf("err(0x%x)", xs->error);
2657 printf("bp(%p)", xs->bp);
2658 show_scsipi_cmd(xs);
2659 }
2660
2661 void
2662 show_scsipi_cmd(struct scsipi_xfer *xs)
2663 {
2664 u_char *b = (u_char *) xs->cmd;
2665 int i = 0;
2666
2667 scsipi_printaddr(xs->xs_periph);
2668 printf(" command: ");
2669
2670 if ((xs->xs_control & XS_CTL_RESET) == 0) {
2671 while (i < xs->cmdlen) {
2672 if (i)
2673 printf(",");
2674 printf("0x%x", b[i++]);
2675 }
2676 printf("-[%d bytes]\n", xs->datalen);
2677 if (xs->datalen)
2678 show_mem(xs->data, min(64, xs->datalen));
2679 } else
2680 printf("-RESET-\n");
2681 }
2682
2683 void
2684 show_mem(u_char *address, int num)
2685 {
2686 int x;
2687
2688 printf("------------------------------");
2689 for (x = 0; x < num; x++) {
2690 if ((x % 16) == 0)
2691 printf("\n%03d: ", x);
2692 printf("%02x ", *address++);
2693 }
2694 printf("\n------------------------------\n");
2695 }
2696 #endif /* SCSIPI_DEBUG */
2697