scsipi_base.c revision 1.146 1 /* $NetBSD: scsipi_base.c,v 1.146 2008/04/05 15:47:01 cegger Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999, 2000, 2002, 2003, 2004 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: scsipi_base.c,v 1.146 2008/04/05 15:47:01 cegger Exp $");
42
43 #include "opt_scsi.h"
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/buf.h>
49 #include <sys/uio.h>
50 #include <sys/malloc.h>
51 #include <sys/pool.h>
52 #include <sys/errno.h>
53 #include <sys/device.h>
54 #include <sys/proc.h>
55 #include <sys/kthread.h>
56 #include <sys/hash.h>
57
58 #include <uvm/uvm_extern.h>
59
60 #include <dev/scsipi/scsi_spc.h>
61 #include <dev/scsipi/scsipi_all.h>
62 #include <dev/scsipi/scsipi_disk.h>
63 #include <dev/scsipi/scsipiconf.h>
64 #include <dev/scsipi/scsipi_base.h>
65
66 #include <dev/scsipi/scsi_all.h>
67 #include <dev/scsipi/scsi_message.h>
68
69 static int scsipi_complete(struct scsipi_xfer *);
70 static void scsipi_request_sense(struct scsipi_xfer *);
71 static int scsipi_enqueue(struct scsipi_xfer *);
72 static void scsipi_run_queue(struct scsipi_channel *chan);
73
74 static void scsipi_completion_thread(void *);
75
76 static void scsipi_get_tag(struct scsipi_xfer *);
77 static void scsipi_put_tag(struct scsipi_xfer *);
78
79 static int scsipi_get_resource(struct scsipi_channel *);
80 static void scsipi_put_resource(struct scsipi_channel *);
81
82 static void scsipi_async_event_max_openings(struct scsipi_channel *,
83 struct scsipi_max_openings *);
84 static void scsipi_async_event_xfer_mode(struct scsipi_channel *,
85 struct scsipi_xfer_mode *);
86 static void scsipi_async_event_channel_reset(struct scsipi_channel *);
87
88 static struct pool scsipi_xfer_pool;
89
90 /*
91 * scsipi_init:
92 *
93 * Called when a scsibus or atapibus is attached to the system
94 * to initialize shared data structures.
95 */
96 void
97 scsipi_init(void)
98 {
99 static int scsipi_init_done;
100
101 if (scsipi_init_done)
102 return;
103 scsipi_init_done = 1;
104
105 /* Initialize the scsipi_xfer pool. */
106 pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
107 0, 0, "scxspl", NULL, IPL_BIO);
108 if (pool_prime(&scsipi_xfer_pool,
109 PAGE_SIZE / sizeof(struct scsipi_xfer)) == ENOMEM) {
110 printf("WARNING: not enough memory for scsipi_xfer_pool\n");
111 }
112 }
113
114 /*
115 * scsipi_channel_init:
116 *
117 * Initialize a scsipi_channel when it is attached.
118 */
119 int
120 scsipi_channel_init(struct scsipi_channel *chan)
121 {
122 struct scsipi_adapter *adapt = chan->chan_adapter;
123 int i;
124
125 /* Initialize shared data. */
126 scsipi_init();
127
128 /* Initialize the queues. */
129 TAILQ_INIT(&chan->chan_queue);
130 TAILQ_INIT(&chan->chan_complete);
131
132 for (i = 0; i < SCSIPI_CHAN_PERIPH_BUCKETS; i++)
133 LIST_INIT(&chan->chan_periphtab[i]);
134
135 /*
136 * Create the asynchronous completion thread.
137 */
138 if (kthread_create(PRI_NONE, 0, NULL, scsipi_completion_thread, chan,
139 &chan->chan_thread, "%s", chan->chan_name)) {
140 aprint_error_dev(adapt->adapt_dev, "unable to create completion thread for "
141 "channel %d\n", chan->chan_channel);
142 panic("scsipi_channel_init");
143 }
144
145 return (0);
146 }
147
148 /*
149 * scsipi_channel_shutdown:
150 *
151 * Shutdown a scsipi_channel.
152 */
153 void
154 scsipi_channel_shutdown(struct scsipi_channel *chan)
155 {
156
157 /*
158 * Shut down the completion thread.
159 */
160 chan->chan_tflags |= SCSIPI_CHANT_SHUTDOWN;
161 wakeup(&chan->chan_complete);
162
163 /*
164 * Now wait for the thread to exit.
165 */
166 while (chan->chan_thread != NULL)
167 (void) tsleep(&chan->chan_thread, PRIBIO, "scshut", 0);
168 }
169
170 static uint32_t
171 scsipi_chan_periph_hash(uint64_t t, uint64_t l)
172 {
173 uint32_t hash;
174
175 hash = hash32_buf(&t, sizeof(t), HASH32_BUF_INIT);
176 hash = hash32_buf(&l, sizeof(l), hash);
177
178 return (hash & SCSIPI_CHAN_PERIPH_HASHMASK);
179 }
180
181 /*
182 * scsipi_insert_periph:
183 *
184 * Insert a periph into the channel.
185 */
186 void
187 scsipi_insert_periph(struct scsipi_channel *chan, struct scsipi_periph *periph)
188 {
189 uint32_t hash;
190 int s;
191
192 hash = scsipi_chan_periph_hash(periph->periph_target,
193 periph->periph_lun);
194
195 s = splbio();
196 LIST_INSERT_HEAD(&chan->chan_periphtab[hash], periph, periph_hash);
197 splx(s);
198 }
199
200 /*
201 * scsipi_remove_periph:
202 *
203 * Remove a periph from the channel.
204 */
205 void
206 scsipi_remove_periph(struct scsipi_channel *chan,
207 struct scsipi_periph *periph)
208 {
209 int s;
210
211 s = splbio();
212 LIST_REMOVE(periph, periph_hash);
213 splx(s);
214 }
215
216 /*
217 * scsipi_lookup_periph:
218 *
219 * Lookup a periph on the specified channel.
220 */
221 struct scsipi_periph *
222 scsipi_lookup_periph(struct scsipi_channel *chan, int target, int lun)
223 {
224 struct scsipi_periph *periph;
225 uint32_t hash;
226 int s;
227
228 if (target >= chan->chan_ntargets ||
229 lun >= chan->chan_nluns)
230 return (NULL);
231
232 hash = scsipi_chan_periph_hash(target, lun);
233
234 s = splbio();
235 LIST_FOREACH(periph, &chan->chan_periphtab[hash], periph_hash) {
236 if (periph->periph_target == target &&
237 periph->periph_lun == lun)
238 break;
239 }
240 splx(s);
241
242 return (periph);
243 }
244
245 /*
246 * scsipi_get_resource:
247 *
248 * Allocate a single xfer `resource' from the channel.
249 *
250 * NOTE: Must be called at splbio().
251 */
252 static int
253 scsipi_get_resource(struct scsipi_channel *chan)
254 {
255 struct scsipi_adapter *adapt = chan->chan_adapter;
256
257 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
258 if (chan->chan_openings > 0) {
259 chan->chan_openings--;
260 return (1);
261 }
262 return (0);
263 }
264
265 if (adapt->adapt_openings > 0) {
266 adapt->adapt_openings--;
267 return (1);
268 }
269 return (0);
270 }
271
272 /*
273 * scsipi_grow_resources:
274 *
275 * Attempt to grow resources for a channel. If this succeeds,
276 * we allocate one for our caller.
277 *
278 * NOTE: Must be called at splbio().
279 */
280 static inline int
281 scsipi_grow_resources(struct scsipi_channel *chan)
282 {
283
284 if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
285 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
286 scsipi_adapter_request(chan,
287 ADAPTER_REQ_GROW_RESOURCES, NULL);
288 return (scsipi_get_resource(chan));
289 }
290 /*
291 * ask the channel thread to do it. It'll have to thaw the
292 * queue
293 */
294 scsipi_channel_freeze(chan, 1);
295 chan->chan_tflags |= SCSIPI_CHANT_GROWRES;
296 wakeup(&chan->chan_complete);
297 return (0);
298 }
299
300 return (0);
301 }
302
303 /*
304 * scsipi_put_resource:
305 *
306 * Free a single xfer `resource' to the channel.
307 *
308 * NOTE: Must be called at splbio().
309 */
310 static void
311 scsipi_put_resource(struct scsipi_channel *chan)
312 {
313 struct scsipi_adapter *adapt = chan->chan_adapter;
314
315 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
316 chan->chan_openings++;
317 else
318 adapt->adapt_openings++;
319 }
320
321 /*
322 * scsipi_get_tag:
323 *
324 * Get a tag ID for the specified xfer.
325 *
326 * NOTE: Must be called at splbio().
327 */
328 static void
329 scsipi_get_tag(struct scsipi_xfer *xs)
330 {
331 struct scsipi_periph *periph = xs->xs_periph;
332 int bit, tag;
333 u_int word;
334
335 bit = 0; /* XXX gcc */
336 for (word = 0; word < PERIPH_NTAGWORDS; word++) {
337 bit = ffs(periph->periph_freetags[word]);
338 if (bit != 0)
339 break;
340 }
341 #ifdef DIAGNOSTIC
342 if (word == PERIPH_NTAGWORDS) {
343 scsipi_printaddr(periph);
344 printf("no free tags\n");
345 panic("scsipi_get_tag");
346 }
347 #endif
348
349 bit -= 1;
350 periph->periph_freetags[word] &= ~(1 << bit);
351 tag = (word << 5) | bit;
352
353 /* XXX Should eventually disallow this completely. */
354 if (tag >= periph->periph_openings) {
355 scsipi_printaddr(periph);
356 printf("WARNING: tag %d greater than available openings %d\n",
357 tag, periph->periph_openings);
358 }
359
360 xs->xs_tag_id = tag;
361 }
362
363 /*
364 * scsipi_put_tag:
365 *
366 * Put the tag ID for the specified xfer back into the pool.
367 *
368 * NOTE: Must be called at splbio().
369 */
370 static void
371 scsipi_put_tag(struct scsipi_xfer *xs)
372 {
373 struct scsipi_periph *periph = xs->xs_periph;
374 int word, bit;
375
376 word = xs->xs_tag_id >> 5;
377 bit = xs->xs_tag_id & 0x1f;
378
379 periph->periph_freetags[word] |= (1 << bit);
380 }
381
382 /*
383 * scsipi_get_xs:
384 *
385 * Allocate an xfer descriptor and associate it with the
386 * specified peripherial. If the peripherial has no more
387 * available command openings, we either block waiting for
388 * one to become available, or fail.
389 */
390 struct scsipi_xfer *
391 scsipi_get_xs(struct scsipi_periph *periph, int flags)
392 {
393 struct scsipi_xfer *xs;
394 int s;
395
396 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n"));
397
398 KASSERT(!cold);
399
400 #ifdef DIAGNOSTIC
401 /*
402 * URGENT commands can never be ASYNC.
403 */
404 if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
405 (XS_CTL_URGENT|XS_CTL_ASYNC)) {
406 scsipi_printaddr(periph);
407 printf("URGENT and ASYNC\n");
408 panic("scsipi_get_xs");
409 }
410 #endif
411
412 s = splbio();
413 /*
414 * Wait for a command opening to become available. Rules:
415 *
416 * - All xfers must wait for an available opening.
417 * Exception: URGENT xfers can proceed when
418 * active == openings, because we use the opening
419 * of the command we're recovering for.
420 * - if the periph has sense pending, only URGENT & REQSENSE
421 * xfers may proceed.
422 *
423 * - If the periph is recovering, only URGENT xfers may
424 * proceed.
425 *
426 * - If the periph is currently executing a recovery
427 * command, URGENT commands must block, because only
428 * one recovery command can execute at a time.
429 */
430 for (;;) {
431 if (flags & XS_CTL_URGENT) {
432 if (periph->periph_active > periph->periph_openings)
433 goto wait_for_opening;
434 if (periph->periph_flags & PERIPH_SENSE) {
435 if ((flags & XS_CTL_REQSENSE) == 0)
436 goto wait_for_opening;
437 } else {
438 if ((periph->periph_flags &
439 PERIPH_RECOVERY_ACTIVE) != 0)
440 goto wait_for_opening;
441 periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
442 }
443 break;
444 }
445 if (periph->periph_active >= periph->periph_openings ||
446 (periph->periph_flags & PERIPH_RECOVERING) != 0)
447 goto wait_for_opening;
448 periph->periph_active++;
449 break;
450
451 wait_for_opening:
452 if (flags & XS_CTL_NOSLEEP) {
453 splx(s);
454 return (NULL);
455 }
456 SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n"));
457 periph->periph_flags |= PERIPH_WAITING;
458 (void) tsleep(periph, PRIBIO, "getxs", 0);
459 }
460 SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n"));
461 xs = pool_get(&scsipi_xfer_pool,
462 ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
463 if (xs == NULL) {
464 if (flags & XS_CTL_URGENT) {
465 if ((flags & XS_CTL_REQSENSE) == 0)
466 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
467 } else
468 periph->periph_active--;
469 scsipi_printaddr(periph);
470 printf("unable to allocate %sscsipi_xfer\n",
471 (flags & XS_CTL_URGENT) ? "URGENT " : "");
472 }
473 splx(s);
474
475 SC_DEBUG(periph, SCSIPI_DB3, ("returning\n"));
476
477 if (xs != NULL) {
478 memset(xs, 0, sizeof(*xs));
479 callout_init(&xs->xs_callout, 0);
480 xs->xs_periph = periph;
481 xs->xs_control = flags;
482 xs->xs_status = 0;
483 s = splbio();
484 TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
485 splx(s);
486 }
487 return (xs);
488 }
489
490 /*
491 * scsipi_put_xs:
492 *
493 * Release an xfer descriptor, decreasing the outstanding command
494 * count for the peripherial. If there is a thread waiting for
495 * an opening, wake it up. If not, kick any queued I/O the
496 * peripherial may have.
497 *
498 * NOTE: Must be called at splbio().
499 */
500 void
501 scsipi_put_xs(struct scsipi_xfer *xs)
502 {
503 struct scsipi_periph *periph = xs->xs_periph;
504 int flags = xs->xs_control;
505
506 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n"));
507
508 TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
509 pool_put(&scsipi_xfer_pool, xs);
510
511 #ifdef DIAGNOSTIC
512 if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
513 periph->periph_active == 0) {
514 scsipi_printaddr(periph);
515 printf("recovery without a command to recovery for\n");
516 panic("scsipi_put_xs");
517 }
518 #endif
519
520 if (flags & XS_CTL_URGENT) {
521 if ((flags & XS_CTL_REQSENSE) == 0)
522 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
523 } else
524 periph->periph_active--;
525 if (periph->periph_active == 0 &&
526 (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
527 periph->periph_flags &= ~PERIPH_WAITDRAIN;
528 wakeup(&periph->periph_active);
529 }
530
531 if (periph->periph_flags & PERIPH_WAITING) {
532 periph->periph_flags &= ~PERIPH_WAITING;
533 wakeup(periph);
534 } else {
535 if (periph->periph_switch->psw_start != NULL &&
536 device_is_active(periph->periph_dev)) {
537 SC_DEBUG(periph, SCSIPI_DB2,
538 ("calling private start()\n"));
539 (*periph->periph_switch->psw_start)(periph);
540 }
541 }
542 }
543
544 /*
545 * scsipi_channel_freeze:
546 *
547 * Freeze a channel's xfer queue.
548 */
549 void
550 scsipi_channel_freeze(struct scsipi_channel *chan, int count)
551 {
552 int s;
553
554 s = splbio();
555 chan->chan_qfreeze += count;
556 splx(s);
557 }
558
559 /*
560 * scsipi_channel_thaw:
561 *
562 * Thaw a channel's xfer queue.
563 */
564 void
565 scsipi_channel_thaw(struct scsipi_channel *chan, int count)
566 {
567 int s;
568
569 s = splbio();
570 chan->chan_qfreeze -= count;
571 /*
572 * Don't let the freeze count go negative.
573 *
574 * Presumably the adapter driver could keep track of this,
575 * but it might just be easier to do this here so as to allow
576 * multiple callers, including those outside the adapter driver.
577 */
578 if (chan->chan_qfreeze < 0) {
579 chan->chan_qfreeze = 0;
580 }
581 splx(s);
582 /*
583 * Kick the channel's queue here. Note, we may be running in
584 * interrupt context (softclock or HBA's interrupt), so the adapter
585 * driver had better not sleep.
586 */
587 if (chan->chan_qfreeze == 0)
588 scsipi_run_queue(chan);
589 }
590
591 /*
592 * scsipi_channel_timed_thaw:
593 *
594 * Thaw a channel after some time has expired. This will also
595 * run the channel's queue if the freeze count has reached 0.
596 */
597 void
598 scsipi_channel_timed_thaw(void *arg)
599 {
600 struct scsipi_channel *chan = arg;
601
602 scsipi_channel_thaw(chan, 1);
603 }
604
605 /*
606 * scsipi_periph_freeze:
607 *
608 * Freeze a device's xfer queue.
609 */
610 void
611 scsipi_periph_freeze(struct scsipi_periph *periph, int count)
612 {
613 int s;
614
615 s = splbio();
616 periph->periph_qfreeze += count;
617 splx(s);
618 }
619
620 /*
621 * scsipi_periph_thaw:
622 *
623 * Thaw a device's xfer queue.
624 */
625 void
626 scsipi_periph_thaw(struct scsipi_periph *periph, int count)
627 {
628 int s;
629
630 s = splbio();
631 periph->periph_qfreeze -= count;
632 #ifdef DIAGNOSTIC
633 if (periph->periph_qfreeze < 0) {
634 static const char pc[] = "periph freeze count < 0";
635 scsipi_printaddr(periph);
636 printf("%s\n", pc);
637 panic(pc);
638 }
639 #endif
640 if (periph->periph_qfreeze == 0 &&
641 (periph->periph_flags & PERIPH_WAITING) != 0)
642 wakeup(periph);
643 splx(s);
644 }
645
646 /*
647 * scsipi_periph_timed_thaw:
648 *
649 * Thaw a device after some time has expired.
650 */
651 void
652 scsipi_periph_timed_thaw(void *arg)
653 {
654 int s;
655 struct scsipi_periph *periph = arg;
656
657 callout_stop(&periph->periph_callout);
658
659 s = splbio();
660 scsipi_periph_thaw(periph, 1);
661 if ((periph->periph_channel->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
662 /*
663 * Kick the channel's queue here. Note, we're running in
664 * interrupt context (softclock), so the adapter driver
665 * had better not sleep.
666 */
667 scsipi_run_queue(periph->periph_channel);
668 } else {
669 /*
670 * Tell the completion thread to kick the channel's queue here.
671 */
672 periph->periph_channel->chan_tflags |= SCSIPI_CHANT_KICK;
673 wakeup(&periph->periph_channel->chan_complete);
674 }
675 splx(s);
676 }
677
678 /*
679 * scsipi_wait_drain:
680 *
681 * Wait for a periph's pending xfers to drain.
682 */
683 void
684 scsipi_wait_drain(struct scsipi_periph *periph)
685 {
686 int s;
687
688 s = splbio();
689 while (periph->periph_active != 0) {
690 periph->periph_flags |= PERIPH_WAITDRAIN;
691 (void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0);
692 }
693 splx(s);
694 }
695
696 /*
697 * scsipi_kill_pending:
698 *
699 * Kill off all pending xfers for a periph.
700 *
701 * NOTE: Must be called at splbio().
702 */
703 void
704 scsipi_kill_pending(struct scsipi_periph *periph)
705 {
706
707 (*periph->periph_channel->chan_bustype->bustype_kill_pending)(periph);
708 scsipi_wait_drain(periph);
709 }
710
711 /*
712 * scsipi_print_cdb:
713 * prints a command descriptor block (for debug purpose, error messages,
714 * SCSIPI_VERBOSE, ...)
715 */
716 void
717 scsipi_print_cdb(struct scsipi_generic *cmd)
718 {
719 int i, j;
720
721 printf("0x%02x", cmd->opcode);
722
723 switch (CDB_GROUPID(cmd->opcode)) {
724 case CDB_GROUPID_0:
725 j = CDB_GROUP0;
726 break;
727 case CDB_GROUPID_1:
728 j = CDB_GROUP1;
729 break;
730 case CDB_GROUPID_2:
731 j = CDB_GROUP2;
732 break;
733 case CDB_GROUPID_3:
734 j = CDB_GROUP3;
735 break;
736 case CDB_GROUPID_4:
737 j = CDB_GROUP4;
738 break;
739 case CDB_GROUPID_5:
740 j = CDB_GROUP5;
741 break;
742 case CDB_GROUPID_6:
743 j = CDB_GROUP6;
744 break;
745 case CDB_GROUPID_7:
746 j = CDB_GROUP7;
747 break;
748 default:
749 j = 0;
750 }
751 if (j == 0)
752 j = sizeof (cmd->bytes);
753 for (i = 0; i < j-1; i++) /* already done the opcode */
754 printf(" %02x", cmd->bytes[i]);
755 }
756
757 /*
758 * scsipi_interpret_sense:
759 *
760 * Look at the returned sense and act on the error, determining
761 * the unix error number to pass back. (0 = report no error)
762 *
763 * NOTE: If we return ERESTART, we are expected to haved
764 * thawed the device!
765 *
766 * THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
767 */
768 int
769 scsipi_interpret_sense(struct scsipi_xfer *xs)
770 {
771 struct scsi_sense_data *sense;
772 struct scsipi_periph *periph = xs->xs_periph;
773 u_int8_t key;
774 int error;
775 #ifndef SCSIVERBOSE
776 u_int32_t info;
777 static const char *error_mes[] = {
778 "soft error (corrected)",
779 "not ready", "medium error",
780 "non-media hardware failure", "illegal request",
781 "unit attention", "readonly device",
782 "no data found", "vendor unique",
783 "copy aborted", "command aborted",
784 "search returned equal", "volume overflow",
785 "verify miscompare", "unknown error key"
786 };
787 #endif
788
789 sense = &xs->sense.scsi_sense;
790 #ifdef SCSIPI_DEBUG
791 if (periph->periph_flags & SCSIPI_DB1) {
792 int count;
793 scsipi_printaddr(periph);
794 printf(" sense debug information:\n");
795 printf("\tcode 0x%x valid %d\n",
796 SSD_RCODE(sense->response_code),
797 sense->response_code & SSD_RCODE_VALID ? 1 : 0);
798 printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
799 sense->segment,
800 SSD_SENSE_KEY(sense->flags),
801 sense->flags & SSD_ILI ? 1 : 0,
802 sense->flags & SSD_EOM ? 1 : 0,
803 sense->flags & SSD_FILEMARK ? 1 : 0);
804 printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d "
805 "extra bytes\n",
806 sense->info[0],
807 sense->info[1],
808 sense->info[2],
809 sense->info[3],
810 sense->extra_len);
811 printf("\textra: ");
812 for (count = 0; count < SSD_ADD_BYTES_LIM(sense); count++)
813 printf("0x%x ", sense->csi[count]);
814 printf("\n");
815 }
816 #endif
817
818 /*
819 * If the periph has it's own error handler, call it first.
820 * If it returns a legit error value, return that, otherwise
821 * it wants us to continue with normal error processing.
822 */
823 if (periph->periph_switch->psw_error != NULL) {
824 SC_DEBUG(periph, SCSIPI_DB2,
825 ("calling private err_handler()\n"));
826 error = (*periph->periph_switch->psw_error)(xs);
827 if (error != EJUSTRETURN)
828 return (error);
829 }
830 /* otherwise use the default */
831 switch (SSD_RCODE(sense->response_code)) {
832
833 /*
834 * Old SCSI-1 and SASI devices respond with
835 * codes other than 70.
836 */
837 case 0x00: /* no error (command completed OK) */
838 return (0);
839 case 0x04: /* drive not ready after it was selected */
840 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
841 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
842 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
843 return (0);
844 /* XXX - display some sort of error here? */
845 return (EIO);
846 case 0x20: /* invalid command */
847 if ((xs->xs_control &
848 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
849 return (0);
850 return (EINVAL);
851 case 0x25: /* invalid LUN (Adaptec ACB-4000) */
852 return (EACCES);
853
854 /*
855 * If it's code 70, use the extended stuff and
856 * interpret the key
857 */
858 case 0x71: /* delayed error */
859 scsipi_printaddr(periph);
860 key = SSD_SENSE_KEY(sense->flags);
861 printf(" DEFERRED ERROR, key = 0x%x\n", key);
862 /* FALLTHROUGH */
863 case 0x70:
864 #ifndef SCSIVERBOSE
865 if ((sense->response_code & SSD_RCODE_VALID) != 0)
866 info = _4btol(sense->info);
867 else
868 info = 0;
869 #endif
870 key = SSD_SENSE_KEY(sense->flags);
871
872 switch (key) {
873 case SKEY_NO_SENSE:
874 case SKEY_RECOVERED_ERROR:
875 if (xs->resid == xs->datalen && xs->datalen) {
876 /*
877 * Why is this here?
878 */
879 xs->resid = 0; /* not short read */
880 }
881 case SKEY_EQUAL:
882 error = 0;
883 break;
884 case SKEY_NOT_READY:
885 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
886 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
887 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
888 return (0);
889 if (sense->asc == 0x3A) {
890 error = ENODEV; /* Medium not present */
891 if (xs->xs_control & XS_CTL_SILENT_NODEV)
892 return (error);
893 } else
894 error = EIO;
895 if ((xs->xs_control & XS_CTL_SILENT) != 0)
896 return (error);
897 break;
898 case SKEY_ILLEGAL_REQUEST:
899 if ((xs->xs_control &
900 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
901 return (0);
902 /*
903 * Handle the case where a device reports
904 * Logical Unit Not Supported during discovery.
905 */
906 if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
907 sense->asc == 0x25 &&
908 sense->ascq == 0x00)
909 return (EINVAL);
910 if ((xs->xs_control & XS_CTL_SILENT) != 0)
911 return (EIO);
912 error = EINVAL;
913 break;
914 case SKEY_UNIT_ATTENTION:
915 if (sense->asc == 0x29 &&
916 sense->ascq == 0x00) {
917 /* device or bus reset */
918 return (ERESTART);
919 }
920 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
921 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
922 if ((xs->xs_control &
923 XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
924 /* XXX Should reupload any transient state. */
925 (periph->periph_flags &
926 PERIPH_REMOVABLE) == 0) {
927 return (ERESTART);
928 }
929 if ((xs->xs_control & XS_CTL_SILENT) != 0)
930 return (EIO);
931 error = EIO;
932 break;
933 case SKEY_DATA_PROTECT:
934 error = EROFS;
935 break;
936 case SKEY_BLANK_CHECK:
937 error = 0;
938 break;
939 case SKEY_ABORTED_COMMAND:
940 if (xs->xs_retries != 0) {
941 xs->xs_retries--;
942 error = ERESTART;
943 } else
944 error = EIO;
945 break;
946 case SKEY_VOLUME_OVERFLOW:
947 error = ENOSPC;
948 break;
949 default:
950 error = EIO;
951 break;
952 }
953
954 #ifdef SCSIVERBOSE
955 if (key && (xs->xs_control & XS_CTL_SILENT) == 0)
956 scsipi_print_sense(xs, 0);
957 #else
958 if (key) {
959 scsipi_printaddr(periph);
960 printf("%s", error_mes[key - 1]);
961 if ((sense->response_code & SSD_RCODE_VALID) != 0) {
962 switch (key) {
963 case SKEY_NOT_READY:
964 case SKEY_ILLEGAL_REQUEST:
965 case SKEY_UNIT_ATTENTION:
966 case SKEY_DATA_PROTECT:
967 break;
968 case SKEY_BLANK_CHECK:
969 printf(", requested size: %d (decimal)",
970 info);
971 break;
972 case SKEY_ABORTED_COMMAND:
973 if (xs->xs_retries)
974 printf(", retrying");
975 printf(", cmd 0x%x, info 0x%x",
976 xs->cmd->opcode, info);
977 break;
978 default:
979 printf(", info = %d (decimal)", info);
980 }
981 }
982 if (sense->extra_len != 0) {
983 int n;
984 printf(", data =");
985 for (n = 0; n < sense->extra_len; n++)
986 printf(" %02x",
987 sense->csi[n]);
988 }
989 printf("\n");
990 }
991 #endif
992 return (error);
993
994 /*
995 * Some other code, just report it
996 */
997 default:
998 #if defined(SCSIDEBUG) || defined(DEBUG)
999 {
1000 static const char *uc = "undecodable sense error";
1001 int i;
1002 u_int8_t *cptr = (u_int8_t *) sense;
1003 scsipi_printaddr(periph);
1004 if (xs->cmd == &xs->cmdstore) {
1005 printf("%s for opcode 0x%x, data=",
1006 uc, xs->cmdstore.opcode);
1007 } else {
1008 printf("%s, data=", uc);
1009 }
1010 for (i = 0; i < sizeof (sense); i++)
1011 printf(" 0x%02x", *(cptr++) & 0xff);
1012 printf("\n");
1013 }
1014 #else
1015 scsipi_printaddr(periph);
1016 printf("Sense Error Code 0x%x",
1017 SSD_RCODE(sense->response_code));
1018 if ((sense->response_code & SSD_RCODE_VALID) != 0) {
1019 struct scsi_sense_data_unextended *usense =
1020 (struct scsi_sense_data_unextended *)sense;
1021 printf(" at block no. %d (decimal)",
1022 _3btol(usense->block));
1023 }
1024 printf("\n");
1025 #endif
1026 return (EIO);
1027 }
1028 }
1029
1030 /*
1031 * scsipi_test_unit_ready:
1032 *
1033 * Issue a `test unit ready' request.
1034 */
1035 int
1036 scsipi_test_unit_ready(struct scsipi_periph *periph, int flags)
1037 {
1038 struct scsi_test_unit_ready cmd;
1039 int retries;
1040
1041 /* some ATAPI drives don't support TEST UNIT READY. Sigh */
1042 if (periph->periph_quirks & PQUIRK_NOTUR)
1043 return (0);
1044
1045 if (flags & XS_CTL_DISCOVERY)
1046 retries = 0;
1047 else
1048 retries = SCSIPIRETRIES;
1049
1050 memset(&cmd, 0, sizeof(cmd));
1051 cmd.opcode = SCSI_TEST_UNIT_READY;
1052
1053 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
1054 retries, 10000, NULL, flags));
1055 }
1056
1057 /*
1058 * scsipi_inquire:
1059 *
1060 * Ask the device about itself.
1061 */
1062 int
1063 scsipi_inquire(struct scsipi_periph *periph, struct scsipi_inquiry_data *inqbuf,
1064 int flags)
1065 {
1066 struct scsipi_inquiry cmd;
1067 int error;
1068 int retries;
1069
1070 if (flags & XS_CTL_DISCOVERY)
1071 retries = 0;
1072 else
1073 retries = SCSIPIRETRIES;
1074
1075 /*
1076 * If we request more data than the device can provide, it SHOULD just
1077 * return a short reponse. However, some devices error with an
1078 * ILLEGAL REQUEST sense code, and yet others have even more special
1079 * failture modes (such as the GL641USB flash adapter, which goes loony
1080 * and sends corrupted CRCs). To work around this, and to bring our
1081 * behavior more in line with other OSes, we do a shorter inquiry,
1082 * covering all the SCSI-2 information, first, and then request more
1083 * data iff the "additional length" field indicates there is more.
1084 * - mycroft, 2003/10/16
1085 */
1086 memset(&cmd, 0, sizeof(cmd));
1087 cmd.opcode = INQUIRY;
1088 cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI2;
1089 error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1090 (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI2, retries,
1091 10000, NULL, flags | XS_CTL_DATA_IN);
1092 if (!error &&
1093 inqbuf->additional_length > SCSIPI_INQUIRY_LENGTH_SCSI2 - 4) {
1094 #if 0
1095 printf("inquire: addlen=%d, retrying\n", inqbuf->additional_length);
1096 #endif
1097 cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI3;
1098 error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1099 (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI3, retries,
1100 10000, NULL, flags | XS_CTL_DATA_IN);
1101 #if 0
1102 printf("inquire: error=%d\n", error);
1103 #endif
1104 }
1105
1106 #ifdef SCSI_OLD_NOINQUIRY
1107 /*
1108 * Kludge for the Adaptec ACB-4000 SCSI->MFM translator.
1109 * This board doesn't support the INQUIRY command at all.
1110 */
1111 if (error == EINVAL || error == EACCES) {
1112 /*
1113 * Conjure up an INQUIRY response.
1114 */
1115 inqbuf->device = (error == EINVAL ?
1116 SID_QUAL_LU_PRESENT :
1117 SID_QUAL_LU_NOTPRESENT) | T_DIRECT;
1118 inqbuf->dev_qual2 = 0;
1119 inqbuf->version = 0;
1120 inqbuf->response_format = SID_FORMAT_SCSI1;
1121 inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
1122 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1123 memcpy(inqbuf->vendor, "ADAPTEC ACB-4000 ", 28);
1124 error = 0;
1125 }
1126
1127 /*
1128 * Kludge for the Emulex MT-02 SCSI->QIC translator.
1129 * This board gives an empty response to an INQUIRY command.
1130 */
1131 else if (error == 0 &&
1132 inqbuf->device == (SID_QUAL_LU_PRESENT | T_DIRECT) &&
1133 inqbuf->dev_qual2 == 0 &&
1134 inqbuf->version == 0 &&
1135 inqbuf->response_format == SID_FORMAT_SCSI1) {
1136 /*
1137 * Fill out the INQUIRY response.
1138 */
1139 inqbuf->device = (SID_QUAL_LU_PRESENT | T_SEQUENTIAL);
1140 inqbuf->dev_qual2 = SID_REMOVABLE;
1141 inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
1142 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1143 memcpy(inqbuf->vendor, "EMULEX MT-02 QIC ", 28);
1144 }
1145 #endif /* SCSI_OLD_NOINQUIRY */
1146
1147 return error;
1148 }
1149
1150 /*
1151 * scsipi_prevent:
1152 *
1153 * Prevent or allow the user to remove the media
1154 */
1155 int
1156 scsipi_prevent(struct scsipi_periph *periph, int type, int flags)
1157 {
1158 struct scsi_prevent_allow_medium_removal cmd;
1159
1160 memset(&cmd, 0, sizeof(cmd));
1161 cmd.opcode = SCSI_PREVENT_ALLOW_MEDIUM_REMOVAL;
1162 cmd.how = type;
1163
1164 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
1165 SCSIPIRETRIES, 5000, NULL, flags));
1166 }
1167
1168 /*
1169 * scsipi_start:
1170 *
1171 * Send a START UNIT.
1172 */
1173 int
1174 scsipi_start(struct scsipi_periph *periph, int type, int flags)
1175 {
1176 struct scsipi_start_stop cmd;
1177
1178 memset(&cmd, 0, sizeof(cmd));
1179 cmd.opcode = START_STOP;
1180 cmd.byte2 = 0x00;
1181 cmd.how = type;
1182
1183 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
1184 SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000, NULL, flags));
1185 }
1186
1187 /*
1188 * scsipi_mode_sense, scsipi_mode_sense_big:
1189 * get a sense page from a device
1190 */
1191
1192 int
1193 scsipi_mode_sense(struct scsipi_periph *periph, int byte2, int page,
1194 struct scsi_mode_parameter_header_6 *data, int len, int flags, int retries,
1195 int timeout)
1196 {
1197 struct scsi_mode_sense_6 cmd;
1198
1199 memset(&cmd, 0, sizeof(cmd));
1200 cmd.opcode = SCSI_MODE_SENSE_6;
1201 cmd.byte2 = byte2;
1202 cmd.page = page;
1203 cmd.length = len & 0xff;
1204
1205 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1206 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN));
1207 }
1208
1209 int
1210 scsipi_mode_sense_big(struct scsipi_periph *periph, int byte2, int page,
1211 struct scsi_mode_parameter_header_10 *data, int len, int flags, int retries,
1212 int timeout)
1213 {
1214 struct scsi_mode_sense_10 cmd;
1215
1216 memset(&cmd, 0, sizeof(cmd));
1217 cmd.opcode = SCSI_MODE_SENSE_10;
1218 cmd.byte2 = byte2;
1219 cmd.page = page;
1220 _lto2b(len, cmd.length);
1221
1222 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1223 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN));
1224 }
1225
1226 int
1227 scsipi_mode_select(struct scsipi_periph *periph, int byte2,
1228 struct scsi_mode_parameter_header_6 *data, int len, int flags, int retries,
1229 int timeout)
1230 {
1231 struct scsi_mode_select_6 cmd;
1232
1233 memset(&cmd, 0, sizeof(cmd));
1234 cmd.opcode = SCSI_MODE_SELECT_6;
1235 cmd.byte2 = byte2;
1236 cmd.length = len & 0xff;
1237
1238 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1239 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT));
1240 }
1241
1242 int
1243 scsipi_mode_select_big(struct scsipi_periph *periph, int byte2,
1244 struct scsi_mode_parameter_header_10 *data, int len, int flags, int retries,
1245 int timeout)
1246 {
1247 struct scsi_mode_select_10 cmd;
1248
1249 memset(&cmd, 0, sizeof(cmd));
1250 cmd.opcode = SCSI_MODE_SELECT_10;
1251 cmd.byte2 = byte2;
1252 _lto2b(len, cmd.length);
1253
1254 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1255 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT));
1256 }
1257
1258 /*
1259 * scsipi_done:
1260 *
1261 * This routine is called by an adapter's interrupt handler when
1262 * an xfer is completed.
1263 */
1264 void
1265 scsipi_done(struct scsipi_xfer *xs)
1266 {
1267 struct scsipi_periph *periph = xs->xs_periph;
1268 struct scsipi_channel *chan = periph->periph_channel;
1269 int s, freezecnt;
1270
1271 SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n"));
1272 #ifdef SCSIPI_DEBUG
1273 if (periph->periph_dbflags & SCSIPI_DB1)
1274 show_scsipi_cmd(xs);
1275 #endif
1276
1277 s = splbio();
1278 /*
1279 * The resource this command was using is now free.
1280 */
1281 if (xs->xs_status & XS_STS_DONE) {
1282 /* XXX in certain circumstances, such as a device
1283 * being detached, a xs that has already been
1284 * scsipi_done()'d by the main thread will be done'd
1285 * again by scsibusdetach(). Putting the xs on the
1286 * chan_complete queue causes list corruption and
1287 * everyone dies. This prevents that, but perhaps
1288 * there should be better coordination somewhere such
1289 * that this won't ever happen (and can be turned into
1290 * a KASSERT().
1291 */
1292 splx(s);
1293 goto out;
1294 }
1295 scsipi_put_resource(chan);
1296 xs->xs_periph->periph_sent--;
1297
1298 /*
1299 * If the command was tagged, free the tag.
1300 */
1301 if (XS_CTL_TAGTYPE(xs) != 0)
1302 scsipi_put_tag(xs);
1303 else
1304 periph->periph_flags &= ~PERIPH_UNTAG;
1305
1306 /* Mark the command as `done'. */
1307 xs->xs_status |= XS_STS_DONE;
1308
1309 #ifdef DIAGNOSTIC
1310 if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
1311 (XS_CTL_ASYNC|XS_CTL_POLL))
1312 panic("scsipi_done: ASYNC and POLL");
1313 #endif
1314
1315 /*
1316 * If the xfer had an error of any sort, freeze the
1317 * periph's queue. Freeze it again if we were requested
1318 * to do so in the xfer.
1319 */
1320 freezecnt = 0;
1321 if (xs->error != XS_NOERROR)
1322 freezecnt++;
1323 if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
1324 freezecnt++;
1325 if (freezecnt != 0)
1326 scsipi_periph_freeze(periph, freezecnt);
1327
1328 /*
1329 * record the xfer with a pending sense, in case a SCSI reset is
1330 * received before the thread is waked up.
1331 */
1332 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1333 periph->periph_flags |= PERIPH_SENSE;
1334 periph->periph_xscheck = xs;
1335 }
1336
1337 /*
1338 * If this was an xfer that was not to complete asynchronously,
1339 * let the requesting thread perform error checking/handling
1340 * in its context.
1341 */
1342 if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
1343 splx(s);
1344 /*
1345 * If it's a polling job, just return, to unwind the
1346 * call graph. We don't need to restart the queue,
1347 * because pollings jobs are treated specially, and
1348 * are really only used during crash dumps anyway
1349 * (XXX or during boot-time autconfiguration of
1350 * ATAPI devices).
1351 */
1352 if (xs->xs_control & XS_CTL_POLL)
1353 return;
1354 wakeup(xs);
1355 goto out;
1356 }
1357
1358 /*
1359 * Catch the extremely common case of I/O completing
1360 * without error; no use in taking a context switch
1361 * if we can handle it in interrupt context.
1362 */
1363 if (xs->error == XS_NOERROR) {
1364 splx(s);
1365 (void) scsipi_complete(xs);
1366 goto out;
1367 }
1368
1369 /*
1370 * There is an error on this xfer. Put it on the channel's
1371 * completion queue, and wake up the completion thread.
1372 */
1373 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
1374 splx(s);
1375 wakeup(&chan->chan_complete);
1376
1377 out:
1378 /*
1379 * If there are more xfers on the channel's queue, attempt to
1380 * run them.
1381 */
1382 scsipi_run_queue(chan);
1383 }
1384
1385 /*
1386 * scsipi_complete:
1387 *
1388 * Completion of a scsipi_xfer. This is the guts of scsipi_done().
1389 *
1390 * NOTE: This routine MUST be called with valid thread context
1391 * except for the case where the following two conditions are
1392 * true:
1393 *
1394 * xs->error == XS_NOERROR
1395 * XS_CTL_ASYNC is set in xs->xs_control
1396 *
1397 * The semantics of this routine can be tricky, so here is an
1398 * explanation:
1399 *
1400 * 0 Xfer completed successfully.
1401 *
1402 * ERESTART Xfer had an error, but was restarted.
1403 *
1404 * anything else Xfer had an error, return value is Unix
1405 * errno.
1406 *
1407 * If the return value is anything but ERESTART:
1408 *
1409 * - If XS_CTL_ASYNC is set, `xs' has been freed back to
1410 * the pool.
1411 * - If there is a buf associated with the xfer,
1412 * it has been biodone()'d.
1413 */
1414 static int
1415 scsipi_complete(struct scsipi_xfer *xs)
1416 {
1417 struct scsipi_periph *periph = xs->xs_periph;
1418 struct scsipi_channel *chan = periph->periph_channel;
1419 int error, s;
1420
1421 #ifdef DIAGNOSTIC
1422 if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
1423 panic("scsipi_complete: XS_CTL_ASYNC but no buf");
1424 #endif
1425 /*
1426 * If command terminated with a CHECK CONDITION, we need to issue a
1427 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed
1428 * we'll have the real status.
1429 * Must be processed at splbio() to avoid missing a SCSI bus reset
1430 * for this command.
1431 */
1432 s = splbio();
1433 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1434 /* request sense for a request sense ? */
1435 if (xs->xs_control & XS_CTL_REQSENSE) {
1436 scsipi_printaddr(periph);
1437 printf("request sense for a request sense ?\n");
1438 /* XXX maybe we should reset the device ? */
1439 /* we've been frozen because xs->error != XS_NOERROR */
1440 scsipi_periph_thaw(periph, 1);
1441 splx(s);
1442 if (xs->resid < xs->datalen) {
1443 printf("we read %d bytes of sense anyway:\n",
1444 xs->datalen - xs->resid);
1445 #ifdef SCSIVERBOSE
1446 scsipi_print_sense_data((void *)xs->data, 0);
1447 #endif
1448 }
1449 return EINVAL;
1450 }
1451 scsipi_request_sense(xs);
1452 }
1453 splx(s);
1454
1455 /*
1456 * If it's a user level request, bypass all usual completion
1457 * processing, let the user work it out..
1458 */
1459 if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
1460 SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n"));
1461 if (xs->error != XS_NOERROR)
1462 scsipi_periph_thaw(periph, 1);
1463 scsipi_user_done(xs);
1464 SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n "));
1465 return 0;
1466 }
1467
1468 switch (xs->error) {
1469 case XS_NOERROR:
1470 error = 0;
1471 break;
1472
1473 case XS_SENSE:
1474 case XS_SHORTSENSE:
1475 error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
1476 break;
1477
1478 case XS_RESOURCE_SHORTAGE:
1479 /*
1480 * XXX Should freeze channel's queue.
1481 */
1482 scsipi_printaddr(periph);
1483 printf("adapter resource shortage\n");
1484 /* FALLTHROUGH */
1485
1486 case XS_BUSY:
1487 if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
1488 struct scsipi_max_openings mo;
1489
1490 /*
1491 * We set the openings to active - 1, assuming that
1492 * the command that got us here is the first one that
1493 * can't fit into the device's queue. If that's not
1494 * the case, I guess we'll find out soon enough.
1495 */
1496 mo.mo_target = periph->periph_target;
1497 mo.mo_lun = periph->periph_lun;
1498 if (periph->periph_active < periph->periph_openings)
1499 mo.mo_openings = periph->periph_active - 1;
1500 else
1501 mo.mo_openings = periph->periph_openings - 1;
1502 #ifdef DIAGNOSTIC
1503 if (mo.mo_openings < 0) {
1504 scsipi_printaddr(periph);
1505 printf("QUEUE FULL resulted in < 0 openings\n");
1506 panic("scsipi_done");
1507 }
1508 #endif
1509 if (mo.mo_openings == 0) {
1510 scsipi_printaddr(periph);
1511 printf("QUEUE FULL resulted in 0 openings\n");
1512 mo.mo_openings = 1;
1513 }
1514 scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
1515 error = ERESTART;
1516 } else if (xs->xs_retries != 0) {
1517 xs->xs_retries--;
1518 /*
1519 * Wait one second, and try again.
1520 */
1521 if ((xs->xs_control & XS_CTL_POLL) ||
1522 (chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
1523 delay(1000000);
1524 } else if (!callout_pending(&periph->periph_callout)) {
1525 scsipi_periph_freeze(periph, 1);
1526 callout_reset(&periph->periph_callout,
1527 hz, scsipi_periph_timed_thaw, periph);
1528 }
1529 error = ERESTART;
1530 } else
1531 error = EBUSY;
1532 break;
1533
1534 case XS_REQUEUE:
1535 error = ERESTART;
1536 break;
1537
1538 case XS_SELTIMEOUT:
1539 case XS_TIMEOUT:
1540 /*
1541 * If the device hasn't gone away, honor retry counts.
1542 *
1543 * Note that if we're in the middle of probing it,
1544 * it won't be found because it isn't here yet so
1545 * we won't honor the retry count in that case.
1546 */
1547 if (scsipi_lookup_periph(chan, periph->periph_target,
1548 periph->periph_lun) && xs->xs_retries != 0) {
1549 xs->xs_retries--;
1550 error = ERESTART;
1551 } else
1552 error = EIO;
1553 break;
1554
1555 case XS_RESET:
1556 if (xs->xs_control & XS_CTL_REQSENSE) {
1557 /*
1558 * request sense interrupted by reset: signal it
1559 * with EINTR return code.
1560 */
1561 error = EINTR;
1562 } else {
1563 if (xs->xs_retries != 0) {
1564 xs->xs_retries--;
1565 error = ERESTART;
1566 } else
1567 error = EIO;
1568 }
1569 break;
1570
1571 case XS_DRIVER_STUFFUP:
1572 scsipi_printaddr(periph);
1573 printf("generic HBA error\n");
1574 error = EIO;
1575 break;
1576 default:
1577 scsipi_printaddr(periph);
1578 printf("invalid return code from adapter: %d\n", xs->error);
1579 error = EIO;
1580 break;
1581 }
1582
1583 s = splbio();
1584 if (error == ERESTART) {
1585 /*
1586 * If we get here, the periph has been thawed and frozen
1587 * again if we had to issue recovery commands. Alternatively,
1588 * it may have been frozen again and in a timed thaw. In
1589 * any case, we thaw the periph once we re-enqueue the
1590 * command. Once the periph is fully thawed, it will begin
1591 * operation again.
1592 */
1593 xs->error = XS_NOERROR;
1594 xs->status = SCSI_OK;
1595 xs->xs_status &= ~XS_STS_DONE;
1596 xs->xs_requeuecnt++;
1597 error = scsipi_enqueue(xs);
1598 if (error == 0) {
1599 scsipi_periph_thaw(periph, 1);
1600 splx(s);
1601 return (ERESTART);
1602 }
1603 }
1604
1605 /*
1606 * scsipi_done() freezes the queue if not XS_NOERROR.
1607 * Thaw it here.
1608 */
1609 if (xs->error != XS_NOERROR)
1610 scsipi_periph_thaw(periph, 1);
1611
1612 if (periph->periph_switch->psw_done)
1613 periph->periph_switch->psw_done(xs, error);
1614
1615 if (xs->xs_control & XS_CTL_ASYNC)
1616 scsipi_put_xs(xs);
1617 splx(s);
1618
1619 return (error);
1620 }
1621
1622 /*
1623 * Issue a request sense for the given scsipi_xfer. Called when the xfer
1624 * returns with a CHECK_CONDITION status. Must be called in valid thread
1625 * context and at splbio().
1626 */
1627
1628 static void
1629 scsipi_request_sense(struct scsipi_xfer *xs)
1630 {
1631 struct scsipi_periph *periph = xs->xs_periph;
1632 int flags, error;
1633 struct scsi_request_sense cmd;
1634
1635 periph->periph_flags |= PERIPH_SENSE;
1636
1637 /* if command was polling, request sense will too */
1638 flags = xs->xs_control & XS_CTL_POLL;
1639 /* Polling commands can't sleep */
1640 if (flags)
1641 flags |= XS_CTL_NOSLEEP;
1642
1643 flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN |
1644 XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH;
1645
1646 memset(&cmd, 0, sizeof(cmd));
1647 cmd.opcode = SCSI_REQUEST_SENSE;
1648 cmd.length = sizeof(struct scsi_sense_data);
1649
1650 error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1651 (void *)&xs->sense.scsi_sense, sizeof(struct scsi_sense_data),
1652 0, 1000, NULL, flags);
1653 periph->periph_flags &= ~PERIPH_SENSE;
1654 periph->periph_xscheck = NULL;
1655 switch (error) {
1656 case 0:
1657 /* we have a valid sense */
1658 xs->error = XS_SENSE;
1659 return;
1660 case EINTR:
1661 /* REQUEST_SENSE interrupted by bus reset. */
1662 xs->error = XS_RESET;
1663 return;
1664 case EIO:
1665 /* request sense coudn't be performed */
1666 /*
1667 * XXX this isn't quite right but we don't have anything
1668 * better for now
1669 */
1670 xs->error = XS_DRIVER_STUFFUP;
1671 return;
1672 default:
1673 /* Notify that request sense failed. */
1674 xs->error = XS_DRIVER_STUFFUP;
1675 scsipi_printaddr(periph);
1676 printf("request sense failed with error %d\n", error);
1677 return;
1678 }
1679 }
1680
1681 /*
1682 * scsipi_enqueue:
1683 *
1684 * Enqueue an xfer on a channel.
1685 */
1686 static int
1687 scsipi_enqueue(struct scsipi_xfer *xs)
1688 {
1689 struct scsipi_channel *chan = xs->xs_periph->periph_channel;
1690 struct scsipi_xfer *qxs;
1691 int s;
1692
1693 s = splbio();
1694
1695 /*
1696 * If the xfer is to be polled, and there are already jobs on
1697 * the queue, we can't proceed.
1698 */
1699 if ((xs->xs_control & XS_CTL_POLL) != 0 &&
1700 TAILQ_FIRST(&chan->chan_queue) != NULL) {
1701 splx(s);
1702 xs->error = XS_DRIVER_STUFFUP;
1703 return (EAGAIN);
1704 }
1705
1706 /*
1707 * If we have an URGENT xfer, it's an error recovery command
1708 * and it should just go on the head of the channel's queue.
1709 */
1710 if (xs->xs_control & XS_CTL_URGENT) {
1711 TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
1712 goto out;
1713 }
1714
1715 /*
1716 * If this xfer has already been on the queue before, we
1717 * need to reinsert it in the correct order. That order is:
1718 *
1719 * Immediately before the first xfer for this periph
1720 * with a requeuecnt less than xs->xs_requeuecnt.
1721 *
1722 * Failing that, at the end of the queue. (We'll end up
1723 * there naturally.)
1724 */
1725 if (xs->xs_requeuecnt != 0) {
1726 for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
1727 qxs = TAILQ_NEXT(qxs, channel_q)) {
1728 if (qxs->xs_periph == xs->xs_periph &&
1729 qxs->xs_requeuecnt < xs->xs_requeuecnt)
1730 break;
1731 }
1732 if (qxs != NULL) {
1733 TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
1734 channel_q);
1735 goto out;
1736 }
1737 }
1738 TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
1739 out:
1740 if (xs->xs_control & XS_CTL_THAW_PERIPH)
1741 scsipi_periph_thaw(xs->xs_periph, 1);
1742 splx(s);
1743 return (0);
1744 }
1745
1746 /*
1747 * scsipi_run_queue:
1748 *
1749 * Start as many xfers as possible running on the channel.
1750 */
1751 static void
1752 scsipi_run_queue(struct scsipi_channel *chan)
1753 {
1754 struct scsipi_xfer *xs;
1755 struct scsipi_periph *periph;
1756 int s;
1757
1758 for (;;) {
1759 s = splbio();
1760
1761 /*
1762 * If the channel is frozen, we can't do any work right
1763 * now.
1764 */
1765 if (chan->chan_qfreeze != 0) {
1766 splx(s);
1767 return;
1768 }
1769
1770 /*
1771 * Look for work to do, and make sure we can do it.
1772 */
1773 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
1774 xs = TAILQ_NEXT(xs, channel_q)) {
1775 periph = xs->xs_periph;
1776
1777 if ((periph->periph_sent >= periph->periph_openings) ||
1778 periph->periph_qfreeze != 0 ||
1779 (periph->periph_flags & PERIPH_UNTAG) != 0)
1780 continue;
1781
1782 if ((periph->periph_flags &
1783 (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 &&
1784 (xs->xs_control & XS_CTL_URGENT) == 0)
1785 continue;
1786
1787 /*
1788 * We can issue this xfer!
1789 */
1790 goto got_one;
1791 }
1792
1793 /*
1794 * Can't find any work to do right now.
1795 */
1796 splx(s);
1797 return;
1798
1799 got_one:
1800 /*
1801 * Have an xfer to run. Allocate a resource from
1802 * the adapter to run it. If we can't allocate that
1803 * resource, we don't dequeue the xfer.
1804 */
1805 if (scsipi_get_resource(chan) == 0) {
1806 /*
1807 * Adapter is out of resources. If the adapter
1808 * supports it, attempt to grow them.
1809 */
1810 if (scsipi_grow_resources(chan) == 0) {
1811 /*
1812 * Wasn't able to grow resources,
1813 * nothing more we can do.
1814 */
1815 if (xs->xs_control & XS_CTL_POLL) {
1816 scsipi_printaddr(xs->xs_periph);
1817 printf("polling command but no "
1818 "adapter resources");
1819 /* We'll panic shortly... */
1820 }
1821 splx(s);
1822
1823 /*
1824 * XXX: We should be able to note that
1825 * XXX: that resources are needed here!
1826 */
1827 return;
1828 }
1829 /*
1830 * scsipi_grow_resources() allocated the resource
1831 * for us.
1832 */
1833 }
1834
1835 /*
1836 * We have a resource to run this xfer, do it!
1837 */
1838 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
1839
1840 /*
1841 * If the command is to be tagged, allocate a tag ID
1842 * for it.
1843 */
1844 if (XS_CTL_TAGTYPE(xs) != 0)
1845 scsipi_get_tag(xs);
1846 else
1847 periph->periph_flags |= PERIPH_UNTAG;
1848 periph->periph_sent++;
1849 splx(s);
1850
1851 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
1852 }
1853 #ifdef DIAGNOSTIC
1854 panic("scsipi_run_queue: impossible");
1855 #endif
1856 }
1857
1858 /*
1859 * scsipi_execute_xs:
1860 *
1861 * Begin execution of an xfer, waiting for it to complete, if necessary.
1862 */
1863 int
1864 scsipi_execute_xs(struct scsipi_xfer *xs)
1865 {
1866 struct scsipi_periph *periph = xs->xs_periph;
1867 struct scsipi_channel *chan = periph->periph_channel;
1868 int oasync, async, poll, error, s;
1869
1870 KASSERT(!cold);
1871
1872 (chan->chan_bustype->bustype_cmd)(xs);
1873
1874 if (xs->xs_control & XS_CTL_DATA_ONSTACK) {
1875 #if 1
1876 if (xs->xs_control & XS_CTL_ASYNC)
1877 panic("scsipi_execute_xs: on stack and async");
1878 #endif
1879 /*
1880 * If the I/O buffer is allocated on stack, the
1881 * process must NOT be swapped out, as the device will
1882 * be accessing the stack.
1883 */
1884 uvm_lwp_hold(curlwp);
1885 }
1886
1887 xs->xs_status &= ~XS_STS_DONE;
1888 xs->error = XS_NOERROR;
1889 xs->resid = xs->datalen;
1890 xs->status = SCSI_OK;
1891
1892 #ifdef SCSIPI_DEBUG
1893 if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) {
1894 printf("scsipi_execute_xs: ");
1895 show_scsipi_xs(xs);
1896 printf("\n");
1897 }
1898 #endif
1899
1900 /*
1901 * Deal with command tagging:
1902 *
1903 * - If the device's current operating mode doesn't
1904 * include tagged queueing, clear the tag mask.
1905 *
1906 * - If the device's current operating mode *does*
1907 * include tagged queueing, set the tag_type in
1908 * the xfer to the appropriate byte for the tag
1909 * message.
1910 */
1911 if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 ||
1912 (xs->xs_control & XS_CTL_REQSENSE)) {
1913 xs->xs_control &= ~XS_CTL_TAGMASK;
1914 xs->xs_tag_type = 0;
1915 } else {
1916 /*
1917 * If the request doesn't specify a tag, give Head
1918 * tags to URGENT operations and Ordered tags to
1919 * everything else.
1920 */
1921 if (XS_CTL_TAGTYPE(xs) == 0) {
1922 if (xs->xs_control & XS_CTL_URGENT)
1923 xs->xs_control |= XS_CTL_HEAD_TAG;
1924 else
1925 xs->xs_control |= XS_CTL_ORDERED_TAG;
1926 }
1927
1928 switch (XS_CTL_TAGTYPE(xs)) {
1929 case XS_CTL_ORDERED_TAG:
1930 xs->xs_tag_type = MSG_ORDERED_Q_TAG;
1931 break;
1932
1933 case XS_CTL_SIMPLE_TAG:
1934 xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
1935 break;
1936
1937 case XS_CTL_HEAD_TAG:
1938 xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
1939 break;
1940
1941 default:
1942 scsipi_printaddr(periph);
1943 printf("invalid tag mask 0x%08x\n",
1944 XS_CTL_TAGTYPE(xs));
1945 panic("scsipi_execute_xs");
1946 }
1947 }
1948
1949 /* If the adaptor wants us to poll, poll. */
1950 if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY)
1951 xs->xs_control |= XS_CTL_POLL;
1952
1953 /*
1954 * If we don't yet have a completion thread, or we are to poll for
1955 * completion, clear the ASYNC flag.
1956 */
1957 oasync = (xs->xs_control & XS_CTL_ASYNC);
1958 if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
1959 xs->xs_control &= ~XS_CTL_ASYNC;
1960
1961 async = (xs->xs_control & XS_CTL_ASYNC);
1962 poll = (xs->xs_control & XS_CTL_POLL);
1963
1964 #ifdef DIAGNOSTIC
1965 if (oasync != 0 && xs->bp == NULL)
1966 panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
1967 #endif
1968
1969 /*
1970 * Enqueue the transfer. If we're not polling for completion, this
1971 * should ALWAYS return `no error'.
1972 */
1973 error = scsipi_enqueue(xs);
1974 if (error) {
1975 if (poll == 0) {
1976 scsipi_printaddr(periph);
1977 printf("not polling, but enqueue failed with %d\n",
1978 error);
1979 panic("scsipi_execute_xs");
1980 }
1981
1982 scsipi_printaddr(periph);
1983 printf("should have flushed queue?\n");
1984 goto free_xs;
1985 }
1986
1987 restarted:
1988 scsipi_run_queue(chan);
1989
1990 /*
1991 * The xfer is enqueued, and possibly running. If it's to be
1992 * completed asynchronously, just return now.
1993 */
1994 if (async)
1995 return (0);
1996
1997 /*
1998 * Not an asynchronous command; wait for it to complete.
1999 */
2000 s = splbio();
2001 while ((xs->xs_status & XS_STS_DONE) == 0) {
2002 if (poll) {
2003 scsipi_printaddr(periph);
2004 printf("polling command not done\n");
2005 panic("scsipi_execute_xs");
2006 }
2007 (void) tsleep(xs, PRIBIO, "xscmd", 0);
2008 }
2009 splx(s);
2010
2011 /*
2012 * Command is complete. scsipi_done() has awakened us to perform
2013 * the error handling.
2014 */
2015 error = scsipi_complete(xs);
2016 if (error == ERESTART)
2017 goto restarted;
2018
2019 /*
2020 * If it was meant to run async and we cleared aync ourselve,
2021 * don't return an error here. It has already been handled
2022 */
2023 if (oasync)
2024 error = 0;
2025 /*
2026 * Command completed successfully or fatal error occurred. Fall
2027 * into....
2028 */
2029 free_xs:
2030 if (xs->xs_control & XS_CTL_DATA_ONSTACK)
2031 uvm_lwp_rele(curlwp);
2032
2033 s = splbio();
2034 scsipi_put_xs(xs);
2035 splx(s);
2036
2037 /*
2038 * Kick the queue, keep it running in case it stopped for some
2039 * reason.
2040 */
2041 scsipi_run_queue(chan);
2042
2043 return (error);
2044 }
2045
2046 /*
2047 * scsipi_completion_thread:
2048 *
2049 * This is the completion thread. We wait for errors on
2050 * asynchronous xfers, and perform the error handling
2051 * function, restarting the command, if necessary.
2052 */
2053 static void
2054 scsipi_completion_thread(void *arg)
2055 {
2056 struct scsipi_channel *chan = arg;
2057 struct scsipi_xfer *xs;
2058 int s;
2059
2060 if (chan->chan_init_cb)
2061 (*chan->chan_init_cb)(chan, chan->chan_init_cb_arg);
2062
2063 s = splbio();
2064 chan->chan_flags |= SCSIPI_CHAN_TACTIVE;
2065 splx(s);
2066 for (;;) {
2067 s = splbio();
2068 xs = TAILQ_FIRST(&chan->chan_complete);
2069 if (xs == NULL && chan->chan_tflags == 0) {
2070 /* nothing to do; wait */
2071 (void) tsleep(&chan->chan_complete, PRIBIO,
2072 "sccomp", 0);
2073 splx(s);
2074 continue;
2075 }
2076 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2077 /* call chan_callback from thread context */
2078 chan->chan_tflags &= ~SCSIPI_CHANT_CALLBACK;
2079 chan->chan_callback(chan, chan->chan_callback_arg);
2080 splx(s);
2081 continue;
2082 }
2083 if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) {
2084 /* attempt to get more openings for this channel */
2085 chan->chan_tflags &= ~SCSIPI_CHANT_GROWRES;
2086 scsipi_adapter_request(chan,
2087 ADAPTER_REQ_GROW_RESOURCES, NULL);
2088 scsipi_channel_thaw(chan, 1);
2089 splx(s);
2090 if (chan->chan_tflags & SCSIPI_CHANT_GROWRES)
2091 kpause("scsizzz", FALSE, hz/10, NULL);
2092 continue;
2093 }
2094 if (chan->chan_tflags & SCSIPI_CHANT_KICK) {
2095 /* explicitly run the queues for this channel */
2096 chan->chan_tflags &= ~SCSIPI_CHANT_KICK;
2097 scsipi_run_queue(chan);
2098 splx(s);
2099 continue;
2100 }
2101 if (chan->chan_tflags & SCSIPI_CHANT_SHUTDOWN) {
2102 splx(s);
2103 break;
2104 }
2105 if (xs) {
2106 TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
2107 splx(s);
2108
2109 /*
2110 * Have an xfer with an error; process it.
2111 */
2112 (void) scsipi_complete(xs);
2113
2114 /*
2115 * Kick the queue; keep it running if it was stopped
2116 * for some reason.
2117 */
2118 scsipi_run_queue(chan);
2119 } else {
2120 splx(s);
2121 }
2122 }
2123
2124 chan->chan_thread = NULL;
2125
2126 /* In case parent is waiting for us to exit. */
2127 wakeup(&chan->chan_thread);
2128
2129 kthread_exit(0);
2130 }
2131 /*
2132 * scsipi_thread_call_callback:
2133 *
2134 * request to call a callback from the completion thread
2135 */
2136 int
2137 scsipi_thread_call_callback(struct scsipi_channel *chan,
2138 void (*callback)(struct scsipi_channel *, void *), void *arg)
2139 {
2140 int s;
2141
2142 s = splbio();
2143 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
2144 /* kernel thread doesn't exist yet */
2145 splx(s);
2146 return ESRCH;
2147 }
2148 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2149 splx(s);
2150 return EBUSY;
2151 }
2152 scsipi_channel_freeze(chan, 1);
2153 chan->chan_callback = callback;
2154 chan->chan_callback_arg = arg;
2155 chan->chan_tflags |= SCSIPI_CHANT_CALLBACK;
2156 wakeup(&chan->chan_complete);
2157 splx(s);
2158 return(0);
2159 }
2160
2161 /*
2162 * scsipi_async_event:
2163 *
2164 * Handle an asynchronous event from an adapter.
2165 */
2166 void
2167 scsipi_async_event(struct scsipi_channel *chan, scsipi_async_event_t event,
2168 void *arg)
2169 {
2170 int s;
2171
2172 s = splbio();
2173 switch (event) {
2174 case ASYNC_EVENT_MAX_OPENINGS:
2175 scsipi_async_event_max_openings(chan,
2176 (struct scsipi_max_openings *)arg);
2177 break;
2178
2179 case ASYNC_EVENT_XFER_MODE:
2180 scsipi_async_event_xfer_mode(chan,
2181 (struct scsipi_xfer_mode *)arg);
2182 break;
2183 case ASYNC_EVENT_RESET:
2184 scsipi_async_event_channel_reset(chan);
2185 break;
2186 }
2187 splx(s);
2188 }
2189
2190 /*
2191 * scsipi_print_xfer_mode:
2192 *
2193 * Print a periph's capabilities.
2194 */
2195 void
2196 scsipi_print_xfer_mode(struct scsipi_periph *periph)
2197 {
2198 int period, freq, speed, mbs;
2199
2200 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0)
2201 return;
2202
2203 aprint_normal_dev(periph->periph_dev, "");
2204 if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) {
2205 period = scsipi_sync_factor_to_period(periph->periph_period);
2206 aprint_normal("sync (%d.%02dns offset %d)",
2207 period / 100, period % 100, periph->periph_offset);
2208 } else
2209 aprint_normal("async");
2210
2211 if (periph->periph_mode & PERIPH_CAP_WIDE32)
2212 aprint_normal(", 32-bit");
2213 else if (periph->periph_mode & (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
2214 aprint_normal(", 16-bit");
2215 else
2216 aprint_normal(", 8-bit");
2217
2218 if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) {
2219 freq = scsipi_sync_factor_to_freq(periph->periph_period);
2220 speed = freq;
2221 if (periph->periph_mode & PERIPH_CAP_WIDE32)
2222 speed *= 4;
2223 else if (periph->periph_mode &
2224 (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
2225 speed *= 2;
2226 mbs = speed / 1000;
2227 if (mbs > 0)
2228 aprint_normal(" (%d.%03dMB/s)", mbs, speed % 1000);
2229 else
2230 aprint_normal(" (%dKB/s)", speed % 1000);
2231 }
2232
2233 aprint_normal(" transfers");
2234
2235 if (periph->periph_mode & PERIPH_CAP_TQING)
2236 aprint_normal(", tagged queueing");
2237
2238 aprint_normal("\n");
2239 }
2240
2241 /*
2242 * scsipi_async_event_max_openings:
2243 *
2244 * Update the maximum number of outstanding commands a
2245 * device may have.
2246 */
2247 static void
2248 scsipi_async_event_max_openings(struct scsipi_channel *chan,
2249 struct scsipi_max_openings *mo)
2250 {
2251 struct scsipi_periph *periph;
2252 int minlun, maxlun;
2253
2254 if (mo->mo_lun == -1) {
2255 /*
2256 * Wildcarded; apply it to all LUNs.
2257 */
2258 minlun = 0;
2259 maxlun = chan->chan_nluns - 1;
2260 } else
2261 minlun = maxlun = mo->mo_lun;
2262
2263 /* XXX This could really suck with a large LUN space. */
2264 for (; minlun <= maxlun; minlun++) {
2265 periph = scsipi_lookup_periph(chan, mo->mo_target, minlun);
2266 if (periph == NULL)
2267 continue;
2268
2269 if (mo->mo_openings < periph->periph_openings)
2270 periph->periph_openings = mo->mo_openings;
2271 else if (mo->mo_openings > periph->periph_openings &&
2272 (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
2273 periph->periph_openings = mo->mo_openings;
2274 }
2275 }
2276
2277 /*
2278 * scsipi_async_event_xfer_mode:
2279 *
2280 * Update the xfer mode for all periphs sharing the
2281 * specified I_T Nexus.
2282 */
2283 static void
2284 scsipi_async_event_xfer_mode(struct scsipi_channel *chan,
2285 struct scsipi_xfer_mode *xm)
2286 {
2287 struct scsipi_periph *periph;
2288 int lun, announce, mode, period, offset;
2289
2290 for (lun = 0; lun < chan->chan_nluns; lun++) {
2291 periph = scsipi_lookup_periph(chan, xm->xm_target, lun);
2292 if (periph == NULL)
2293 continue;
2294 announce = 0;
2295
2296 /*
2297 * Clamp the xfer mode down to this periph's capabilities.
2298 */
2299 mode = xm->xm_mode & periph->periph_cap;
2300 if (mode & PERIPH_CAP_SYNC) {
2301 period = xm->xm_period;
2302 offset = xm->xm_offset;
2303 } else {
2304 period = 0;
2305 offset = 0;
2306 }
2307
2308 /*
2309 * If we do not have a valid xfer mode yet, or the parameters
2310 * are different, announce them.
2311 */
2312 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0 ||
2313 periph->periph_mode != mode ||
2314 periph->periph_period != period ||
2315 periph->periph_offset != offset)
2316 announce = 1;
2317
2318 periph->periph_mode = mode;
2319 periph->periph_period = period;
2320 periph->periph_offset = offset;
2321 periph->periph_flags |= PERIPH_MODE_VALID;
2322
2323 if (announce)
2324 scsipi_print_xfer_mode(periph);
2325 }
2326 }
2327
2328 /*
2329 * scsipi_set_xfer_mode:
2330 *
2331 * Set the xfer mode for the specified I_T Nexus.
2332 */
2333 void
2334 scsipi_set_xfer_mode(struct scsipi_channel *chan, int target, int immed)
2335 {
2336 struct scsipi_xfer_mode xm;
2337 struct scsipi_periph *itperiph;
2338 int lun, s;
2339
2340 /*
2341 * Go to the minimal xfer mode.
2342 */
2343 xm.xm_target = target;
2344 xm.xm_mode = 0;
2345 xm.xm_period = 0; /* ignored */
2346 xm.xm_offset = 0; /* ignored */
2347
2348 /*
2349 * Find the first LUN we know about on this I_T Nexus.
2350 */
2351 for (itperiph = NULL, lun = 0; lun < chan->chan_nluns; lun++) {
2352 itperiph = scsipi_lookup_periph(chan, target, lun);
2353 if (itperiph != NULL)
2354 break;
2355 }
2356 if (itperiph != NULL) {
2357 xm.xm_mode = itperiph->periph_cap;
2358 /*
2359 * Now issue the request to the adapter.
2360 */
2361 s = splbio();
2362 scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm);
2363 splx(s);
2364 /*
2365 * If we want this to happen immediately, issue a dummy
2366 * command, since most adapters can't really negotiate unless
2367 * they're executing a job.
2368 */
2369 if (immed != 0) {
2370 (void) scsipi_test_unit_ready(itperiph,
2371 XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
2372 XS_CTL_IGNORE_NOT_READY |
2373 XS_CTL_IGNORE_MEDIA_CHANGE);
2374 }
2375 }
2376 }
2377
2378 /*
2379 * scsipi_channel_reset:
2380 *
2381 * handle scsi bus reset
2382 * called at splbio
2383 */
2384 static void
2385 scsipi_async_event_channel_reset(struct scsipi_channel *chan)
2386 {
2387 struct scsipi_xfer *xs, *xs_next;
2388 struct scsipi_periph *periph;
2389 int target, lun;
2390
2391 /*
2392 * Channel has been reset. Also mark as reset pending REQUEST_SENSE
2393 * commands; as the sense is not available any more.
2394 * can't call scsipi_done() from here, as the command has not been
2395 * sent to the adapter yet (this would corrupt accounting).
2396 */
2397
2398 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) {
2399 xs_next = TAILQ_NEXT(xs, channel_q);
2400 if (xs->xs_control & XS_CTL_REQSENSE) {
2401 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
2402 xs->error = XS_RESET;
2403 if ((xs->xs_control & XS_CTL_ASYNC) != 0)
2404 TAILQ_INSERT_TAIL(&chan->chan_complete, xs,
2405 channel_q);
2406 }
2407 }
2408 wakeup(&chan->chan_complete);
2409 /* Catch xs with pending sense which may not have a REQSENSE xs yet */
2410 for (target = 0; target < chan->chan_ntargets; target++) {
2411 if (target == chan->chan_id)
2412 continue;
2413 for (lun = 0; lun < chan->chan_nluns; lun++) {
2414 periph = scsipi_lookup_periph(chan, target, lun);
2415 if (periph) {
2416 xs = periph->periph_xscheck;
2417 if (xs)
2418 xs->error = XS_RESET;
2419 }
2420 }
2421 }
2422 }
2423
2424 /*
2425 * scsipi_target_detach:
2426 *
2427 * detach all periph associated with a I_T
2428 * must be called from valid thread context
2429 */
2430 int
2431 scsipi_target_detach(struct scsipi_channel *chan, int target, int lun,
2432 int flags)
2433 {
2434 struct scsipi_periph *periph;
2435 int ctarget, mintarget, maxtarget;
2436 int clun, minlun, maxlun;
2437 int error;
2438
2439 if (target == -1) {
2440 mintarget = 0;
2441 maxtarget = chan->chan_ntargets;
2442 } else {
2443 if (target == chan->chan_id)
2444 return EINVAL;
2445 if (target < 0 || target >= chan->chan_ntargets)
2446 return EINVAL;
2447 mintarget = target;
2448 maxtarget = target + 1;
2449 }
2450
2451 if (lun == -1) {
2452 minlun = 0;
2453 maxlun = chan->chan_nluns;
2454 } else {
2455 if (lun < 0 || lun >= chan->chan_nluns)
2456 return EINVAL;
2457 minlun = lun;
2458 maxlun = lun + 1;
2459 }
2460
2461 for (ctarget = mintarget; ctarget < maxtarget; ctarget++) {
2462 if (ctarget == chan->chan_id)
2463 continue;
2464
2465 for (clun = minlun; clun < maxlun; clun++) {
2466 periph = scsipi_lookup_periph(chan, ctarget, clun);
2467 if (periph == NULL)
2468 continue;
2469 error = config_detach(periph->periph_dev, flags);
2470 if (error)
2471 return (error);
2472 }
2473 }
2474 return(0);
2475 }
2476
2477 /*
2478 * scsipi_adapter_addref:
2479 *
2480 * Add a reference to the adapter pointed to by the provided
2481 * link, enabling the adapter if necessary.
2482 */
2483 int
2484 scsipi_adapter_addref(struct scsipi_adapter *adapt)
2485 {
2486 int s, error = 0;
2487
2488 s = splbio();
2489 if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) {
2490 error = (*adapt->adapt_enable)(adapt->adapt_dev, 1);
2491 if (error)
2492 adapt->adapt_refcnt--;
2493 }
2494 splx(s);
2495 return (error);
2496 }
2497
2498 /*
2499 * scsipi_adapter_delref:
2500 *
2501 * Delete a reference to the adapter pointed to by the provided
2502 * link, disabling the adapter if possible.
2503 */
2504 void
2505 scsipi_adapter_delref(struct scsipi_adapter *adapt)
2506 {
2507 int s;
2508
2509 s = splbio();
2510 if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL)
2511 (void) (*adapt->adapt_enable)(adapt->adapt_dev, 0);
2512 splx(s);
2513 }
2514
2515 static struct scsipi_syncparam {
2516 int ss_factor;
2517 int ss_period; /* ns * 100 */
2518 } scsipi_syncparams[] = {
2519 { 0x08, 625 }, /* FAST-160 (Ultra320) */
2520 { 0x09, 1250 }, /* FAST-80 (Ultra160) */
2521 { 0x0a, 2500 }, /* FAST-40 40MHz (Ultra2) */
2522 { 0x0b, 3030 }, /* FAST-40 33MHz (Ultra2) */
2523 { 0x0c, 5000 }, /* FAST-20 (Ultra) */
2524 };
2525 static const int scsipi_nsyncparams =
2526 sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
2527
2528 int
2529 scsipi_sync_period_to_factor(int period /* ns * 100 */)
2530 {
2531 int i;
2532
2533 for (i = 0; i < scsipi_nsyncparams; i++) {
2534 if (period <= scsipi_syncparams[i].ss_period)
2535 return (scsipi_syncparams[i].ss_factor);
2536 }
2537
2538 return ((period / 100) / 4);
2539 }
2540
2541 int
2542 scsipi_sync_factor_to_period(int factor)
2543 {
2544 int i;
2545
2546 for (i = 0; i < scsipi_nsyncparams; i++) {
2547 if (factor == scsipi_syncparams[i].ss_factor)
2548 return (scsipi_syncparams[i].ss_period);
2549 }
2550
2551 return ((factor * 4) * 100);
2552 }
2553
2554 int
2555 scsipi_sync_factor_to_freq(int factor)
2556 {
2557 int i;
2558
2559 for (i = 0; i < scsipi_nsyncparams; i++) {
2560 if (factor == scsipi_syncparams[i].ss_factor)
2561 return (100000000 / scsipi_syncparams[i].ss_period);
2562 }
2563
2564 return (10000000 / ((factor * 4) * 10));
2565 }
2566
2567 #ifdef SCSIPI_DEBUG
2568 /*
2569 * Given a scsipi_xfer, dump the request, in all it's glory
2570 */
2571 void
2572 show_scsipi_xs(struct scsipi_xfer *xs)
2573 {
2574
2575 printf("xs(%p): ", xs);
2576 printf("xs_control(0x%08x)", xs->xs_control);
2577 printf("xs_status(0x%08x)", xs->xs_status);
2578 printf("periph(%p)", xs->xs_periph);
2579 printf("retr(0x%x)", xs->xs_retries);
2580 printf("timo(0x%x)", xs->timeout);
2581 printf("cmd(%p)", xs->cmd);
2582 printf("len(0x%x)", xs->cmdlen);
2583 printf("data(%p)", xs->data);
2584 printf("len(0x%x)", xs->datalen);
2585 printf("res(0x%x)", xs->resid);
2586 printf("err(0x%x)", xs->error);
2587 printf("bp(%p)", xs->bp);
2588 show_scsipi_cmd(xs);
2589 }
2590
2591 void
2592 show_scsipi_cmd(struct scsipi_xfer *xs)
2593 {
2594 u_char *b = (u_char *) xs->cmd;
2595 int i = 0;
2596
2597 scsipi_printaddr(xs->xs_periph);
2598 printf(" command: ");
2599
2600 if ((xs->xs_control & XS_CTL_RESET) == 0) {
2601 while (i < xs->cmdlen) {
2602 if (i)
2603 printf(",");
2604 printf("0x%x", b[i++]);
2605 }
2606 printf("-[%d bytes]\n", xs->datalen);
2607 if (xs->datalen)
2608 show_mem(xs->data, min(64, xs->datalen));
2609 } else
2610 printf("-RESET-\n");
2611 }
2612
2613 void
2614 show_mem(u_char *address, int num)
2615 {
2616 int x;
2617
2618 printf("------------------------------");
2619 for (x = 0; x < num; x++) {
2620 if ((x % 16) == 0)
2621 printf("\n%03d: ", x);
2622 printf("%02x ", *address++);
2623 }
2624 printf("\n------------------------------\n");
2625 }
2626 #endif /* SCSIPI_DEBUG */
2627