scsipi_base.c revision 1.145 1 /* $NetBSD: scsipi_base.c,v 1.145 2007/07/09 21:01:21 ad Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999, 2000, 2002, 2003, 2004 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: scsipi_base.c,v 1.145 2007/07/09 21:01:21 ad Exp $");
42
43 #include "opt_scsi.h"
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/buf.h>
49 #include <sys/uio.h>
50 #include <sys/malloc.h>
51 #include <sys/pool.h>
52 #include <sys/errno.h>
53 #include <sys/device.h>
54 #include <sys/proc.h>
55 #include <sys/kthread.h>
56 #include <sys/hash.h>
57
58 #include <uvm/uvm_extern.h>
59
60 #include <dev/scsipi/scsi_spc.h>
61 #include <dev/scsipi/scsipi_all.h>
62 #include <dev/scsipi/scsipi_disk.h>
63 #include <dev/scsipi/scsipiconf.h>
64 #include <dev/scsipi/scsipi_base.h>
65
66 #include <dev/scsipi/scsi_all.h>
67 #include <dev/scsipi/scsi_message.h>
68
69 static int scsipi_complete(struct scsipi_xfer *);
70 static void scsipi_request_sense(struct scsipi_xfer *);
71 static int scsipi_enqueue(struct scsipi_xfer *);
72 static void scsipi_run_queue(struct scsipi_channel *chan);
73
74 static void scsipi_completion_thread(void *);
75
76 static void scsipi_get_tag(struct scsipi_xfer *);
77 static void scsipi_put_tag(struct scsipi_xfer *);
78
79 static int scsipi_get_resource(struct scsipi_channel *);
80 static void scsipi_put_resource(struct scsipi_channel *);
81
82 static void scsipi_async_event_max_openings(struct scsipi_channel *,
83 struct scsipi_max_openings *);
84 static void scsipi_async_event_xfer_mode(struct scsipi_channel *,
85 struct scsipi_xfer_mode *);
86 static void scsipi_async_event_channel_reset(struct scsipi_channel *);
87
88 static struct pool scsipi_xfer_pool;
89
90 /*
91 * scsipi_init:
92 *
93 * Called when a scsibus or atapibus is attached to the system
94 * to initialize shared data structures.
95 */
96 void
97 scsipi_init(void)
98 {
99 static int scsipi_init_done;
100
101 if (scsipi_init_done)
102 return;
103 scsipi_init_done = 1;
104
105 /* Initialize the scsipi_xfer pool. */
106 pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
107 0, 0, "scxspl", NULL, IPL_BIO);
108 if (pool_prime(&scsipi_xfer_pool,
109 PAGE_SIZE / sizeof(struct scsipi_xfer)) == ENOMEM) {
110 printf("WARNING: not enough memory for scsipi_xfer_pool\n");
111 }
112 }
113
114 /*
115 * scsipi_channel_init:
116 *
117 * Initialize a scsipi_channel when it is attached.
118 */
119 int
120 scsipi_channel_init(struct scsipi_channel *chan)
121 {
122 struct scsipi_adapter *adapt = chan->chan_adapter;
123 int i;
124
125 /* Initialize shared data. */
126 scsipi_init();
127
128 /* Initialize the queues. */
129 TAILQ_INIT(&chan->chan_queue);
130 TAILQ_INIT(&chan->chan_complete);
131
132 for (i = 0; i < SCSIPI_CHAN_PERIPH_BUCKETS; i++)
133 LIST_INIT(&chan->chan_periphtab[i]);
134
135 /*
136 * Create the asynchronous completion thread.
137 */
138 if (kthread_create(PRI_NONE, 0, NULL, scsipi_completion_thread, chan,
139 &chan->chan_thread, "%s", chan->chan_name)) {
140 printf("%s: unable to create completion thread for "
141 "channel %d\n", adapt->adapt_dev->dv_xname,
142 chan->chan_channel);
143 panic("scsipi_channel_init");
144 }
145
146 return (0);
147 }
148
149 /*
150 * scsipi_channel_shutdown:
151 *
152 * Shutdown a scsipi_channel.
153 */
154 void
155 scsipi_channel_shutdown(struct scsipi_channel *chan)
156 {
157
158 /*
159 * Shut down the completion thread.
160 */
161 chan->chan_tflags |= SCSIPI_CHANT_SHUTDOWN;
162 wakeup(&chan->chan_complete);
163
164 /*
165 * Now wait for the thread to exit.
166 */
167 while (chan->chan_thread != NULL)
168 (void) tsleep(&chan->chan_thread, PRIBIO, "scshut", 0);
169 }
170
171 static uint32_t
172 scsipi_chan_periph_hash(uint64_t t, uint64_t l)
173 {
174 uint32_t hash;
175
176 hash = hash32_buf(&t, sizeof(t), HASH32_BUF_INIT);
177 hash = hash32_buf(&l, sizeof(l), hash);
178
179 return (hash & SCSIPI_CHAN_PERIPH_HASHMASK);
180 }
181
182 /*
183 * scsipi_insert_periph:
184 *
185 * Insert a periph into the channel.
186 */
187 void
188 scsipi_insert_periph(struct scsipi_channel *chan, struct scsipi_periph *periph)
189 {
190 uint32_t hash;
191 int s;
192
193 hash = scsipi_chan_periph_hash(periph->periph_target,
194 periph->periph_lun);
195
196 s = splbio();
197 LIST_INSERT_HEAD(&chan->chan_periphtab[hash], periph, periph_hash);
198 splx(s);
199 }
200
201 /*
202 * scsipi_remove_periph:
203 *
204 * Remove a periph from the channel.
205 */
206 void
207 scsipi_remove_periph(struct scsipi_channel *chan,
208 struct scsipi_periph *periph)
209 {
210 int s;
211
212 s = splbio();
213 LIST_REMOVE(periph, periph_hash);
214 splx(s);
215 }
216
217 /*
218 * scsipi_lookup_periph:
219 *
220 * Lookup a periph on the specified channel.
221 */
222 struct scsipi_periph *
223 scsipi_lookup_periph(struct scsipi_channel *chan, int target, int lun)
224 {
225 struct scsipi_periph *periph;
226 uint32_t hash;
227 int s;
228
229 if (target >= chan->chan_ntargets ||
230 lun >= chan->chan_nluns)
231 return (NULL);
232
233 hash = scsipi_chan_periph_hash(target, lun);
234
235 s = splbio();
236 LIST_FOREACH(periph, &chan->chan_periphtab[hash], periph_hash) {
237 if (periph->periph_target == target &&
238 periph->periph_lun == lun)
239 break;
240 }
241 splx(s);
242
243 return (periph);
244 }
245
246 /*
247 * scsipi_get_resource:
248 *
249 * Allocate a single xfer `resource' from the channel.
250 *
251 * NOTE: Must be called at splbio().
252 */
253 static int
254 scsipi_get_resource(struct scsipi_channel *chan)
255 {
256 struct scsipi_adapter *adapt = chan->chan_adapter;
257
258 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
259 if (chan->chan_openings > 0) {
260 chan->chan_openings--;
261 return (1);
262 }
263 return (0);
264 }
265
266 if (adapt->adapt_openings > 0) {
267 adapt->adapt_openings--;
268 return (1);
269 }
270 return (0);
271 }
272
273 /*
274 * scsipi_grow_resources:
275 *
276 * Attempt to grow resources for a channel. If this succeeds,
277 * we allocate one for our caller.
278 *
279 * NOTE: Must be called at splbio().
280 */
281 static inline int
282 scsipi_grow_resources(struct scsipi_channel *chan)
283 {
284
285 if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
286 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
287 scsipi_adapter_request(chan,
288 ADAPTER_REQ_GROW_RESOURCES, NULL);
289 return (scsipi_get_resource(chan));
290 }
291 /*
292 * ask the channel thread to do it. It'll have to thaw the
293 * queue
294 */
295 scsipi_channel_freeze(chan, 1);
296 chan->chan_tflags |= SCSIPI_CHANT_GROWRES;
297 wakeup(&chan->chan_complete);
298 return (0);
299 }
300
301 return (0);
302 }
303
304 /*
305 * scsipi_put_resource:
306 *
307 * Free a single xfer `resource' to the channel.
308 *
309 * NOTE: Must be called at splbio().
310 */
311 static void
312 scsipi_put_resource(struct scsipi_channel *chan)
313 {
314 struct scsipi_adapter *adapt = chan->chan_adapter;
315
316 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
317 chan->chan_openings++;
318 else
319 adapt->adapt_openings++;
320 }
321
322 /*
323 * scsipi_get_tag:
324 *
325 * Get a tag ID for the specified xfer.
326 *
327 * NOTE: Must be called at splbio().
328 */
329 static void
330 scsipi_get_tag(struct scsipi_xfer *xs)
331 {
332 struct scsipi_periph *periph = xs->xs_periph;
333 int bit, tag;
334 u_int word;
335
336 bit = 0; /* XXX gcc */
337 for (word = 0; word < PERIPH_NTAGWORDS; word++) {
338 bit = ffs(periph->periph_freetags[word]);
339 if (bit != 0)
340 break;
341 }
342 #ifdef DIAGNOSTIC
343 if (word == PERIPH_NTAGWORDS) {
344 scsipi_printaddr(periph);
345 printf("no free tags\n");
346 panic("scsipi_get_tag");
347 }
348 #endif
349
350 bit -= 1;
351 periph->periph_freetags[word] &= ~(1 << bit);
352 tag = (word << 5) | bit;
353
354 /* XXX Should eventually disallow this completely. */
355 if (tag >= periph->periph_openings) {
356 scsipi_printaddr(periph);
357 printf("WARNING: tag %d greater than available openings %d\n",
358 tag, periph->periph_openings);
359 }
360
361 xs->xs_tag_id = tag;
362 }
363
364 /*
365 * scsipi_put_tag:
366 *
367 * Put the tag ID for the specified xfer back into the pool.
368 *
369 * NOTE: Must be called at splbio().
370 */
371 static void
372 scsipi_put_tag(struct scsipi_xfer *xs)
373 {
374 struct scsipi_periph *periph = xs->xs_periph;
375 int word, bit;
376
377 word = xs->xs_tag_id >> 5;
378 bit = xs->xs_tag_id & 0x1f;
379
380 periph->periph_freetags[word] |= (1 << bit);
381 }
382
383 /*
384 * scsipi_get_xs:
385 *
386 * Allocate an xfer descriptor and associate it with the
387 * specified peripherial. If the peripherial has no more
388 * available command openings, we either block waiting for
389 * one to become available, or fail.
390 */
391 struct scsipi_xfer *
392 scsipi_get_xs(struct scsipi_periph *periph, int flags)
393 {
394 struct scsipi_xfer *xs;
395 int s;
396
397 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n"));
398
399 KASSERT(!cold);
400
401 #ifdef DIAGNOSTIC
402 /*
403 * URGENT commands can never be ASYNC.
404 */
405 if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
406 (XS_CTL_URGENT|XS_CTL_ASYNC)) {
407 scsipi_printaddr(periph);
408 printf("URGENT and ASYNC\n");
409 panic("scsipi_get_xs");
410 }
411 #endif
412
413 s = splbio();
414 /*
415 * Wait for a command opening to become available. Rules:
416 *
417 * - All xfers must wait for an available opening.
418 * Exception: URGENT xfers can proceed when
419 * active == openings, because we use the opening
420 * of the command we're recovering for.
421 * - if the periph has sense pending, only URGENT & REQSENSE
422 * xfers may proceed.
423 *
424 * - If the periph is recovering, only URGENT xfers may
425 * proceed.
426 *
427 * - If the periph is currently executing a recovery
428 * command, URGENT commands must block, because only
429 * one recovery command can execute at a time.
430 */
431 for (;;) {
432 if (flags & XS_CTL_URGENT) {
433 if (periph->periph_active > periph->periph_openings)
434 goto wait_for_opening;
435 if (periph->periph_flags & PERIPH_SENSE) {
436 if ((flags & XS_CTL_REQSENSE) == 0)
437 goto wait_for_opening;
438 } else {
439 if ((periph->periph_flags &
440 PERIPH_RECOVERY_ACTIVE) != 0)
441 goto wait_for_opening;
442 periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
443 }
444 break;
445 }
446 if (periph->periph_active >= periph->periph_openings ||
447 (periph->periph_flags & PERIPH_RECOVERING) != 0)
448 goto wait_for_opening;
449 periph->periph_active++;
450 break;
451
452 wait_for_opening:
453 if (flags & XS_CTL_NOSLEEP) {
454 splx(s);
455 return (NULL);
456 }
457 SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n"));
458 periph->periph_flags |= PERIPH_WAITING;
459 (void) tsleep(periph, PRIBIO, "getxs", 0);
460 }
461 SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n"));
462 xs = pool_get(&scsipi_xfer_pool,
463 ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
464 if (xs == NULL) {
465 if (flags & XS_CTL_URGENT) {
466 if ((flags & XS_CTL_REQSENSE) == 0)
467 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
468 } else
469 periph->periph_active--;
470 scsipi_printaddr(periph);
471 printf("unable to allocate %sscsipi_xfer\n",
472 (flags & XS_CTL_URGENT) ? "URGENT " : "");
473 }
474 splx(s);
475
476 SC_DEBUG(periph, SCSIPI_DB3, ("returning\n"));
477
478 if (xs != NULL) {
479 memset(xs, 0, sizeof(*xs));
480 callout_init(&xs->xs_callout, 0);
481 xs->xs_periph = periph;
482 xs->xs_control = flags;
483 xs->xs_status = 0;
484 s = splbio();
485 TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
486 splx(s);
487 }
488 return (xs);
489 }
490
491 /*
492 * scsipi_put_xs:
493 *
494 * Release an xfer descriptor, decreasing the outstanding command
495 * count for the peripherial. If there is a thread waiting for
496 * an opening, wake it up. If not, kick any queued I/O the
497 * peripherial may have.
498 *
499 * NOTE: Must be called at splbio().
500 */
501 void
502 scsipi_put_xs(struct scsipi_xfer *xs)
503 {
504 struct scsipi_periph *periph = xs->xs_periph;
505 int flags = xs->xs_control;
506
507 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n"));
508
509 TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
510 pool_put(&scsipi_xfer_pool, xs);
511
512 #ifdef DIAGNOSTIC
513 if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
514 periph->periph_active == 0) {
515 scsipi_printaddr(periph);
516 printf("recovery without a command to recovery for\n");
517 panic("scsipi_put_xs");
518 }
519 #endif
520
521 if (flags & XS_CTL_URGENT) {
522 if ((flags & XS_CTL_REQSENSE) == 0)
523 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
524 } else
525 periph->periph_active--;
526 if (periph->periph_active == 0 &&
527 (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
528 periph->periph_flags &= ~PERIPH_WAITDRAIN;
529 wakeup(&periph->periph_active);
530 }
531
532 if (periph->periph_flags & PERIPH_WAITING) {
533 periph->periph_flags &= ~PERIPH_WAITING;
534 wakeup(periph);
535 } else {
536 if (periph->periph_switch->psw_start != NULL &&
537 device_is_active(periph->periph_dev)) {
538 SC_DEBUG(periph, SCSIPI_DB2,
539 ("calling private start()\n"));
540 (*periph->periph_switch->psw_start)(periph);
541 }
542 }
543 }
544
545 /*
546 * scsipi_channel_freeze:
547 *
548 * Freeze a channel's xfer queue.
549 */
550 void
551 scsipi_channel_freeze(struct scsipi_channel *chan, int count)
552 {
553 int s;
554
555 s = splbio();
556 chan->chan_qfreeze += count;
557 splx(s);
558 }
559
560 /*
561 * scsipi_channel_thaw:
562 *
563 * Thaw a channel's xfer queue.
564 */
565 void
566 scsipi_channel_thaw(struct scsipi_channel *chan, int count)
567 {
568 int s;
569
570 s = splbio();
571 chan->chan_qfreeze -= count;
572 /*
573 * Don't let the freeze count go negative.
574 *
575 * Presumably the adapter driver could keep track of this,
576 * but it might just be easier to do this here so as to allow
577 * multiple callers, including those outside the adapter driver.
578 */
579 if (chan->chan_qfreeze < 0) {
580 chan->chan_qfreeze = 0;
581 }
582 splx(s);
583 /*
584 * Kick the channel's queue here. Note, we may be running in
585 * interrupt context (softclock or HBA's interrupt), so the adapter
586 * driver had better not sleep.
587 */
588 if (chan->chan_qfreeze == 0)
589 scsipi_run_queue(chan);
590 }
591
592 /*
593 * scsipi_channel_timed_thaw:
594 *
595 * Thaw a channel after some time has expired. This will also
596 * run the channel's queue if the freeze count has reached 0.
597 */
598 void
599 scsipi_channel_timed_thaw(void *arg)
600 {
601 struct scsipi_channel *chan = arg;
602
603 scsipi_channel_thaw(chan, 1);
604 }
605
606 /*
607 * scsipi_periph_freeze:
608 *
609 * Freeze a device's xfer queue.
610 */
611 void
612 scsipi_periph_freeze(struct scsipi_periph *periph, int count)
613 {
614 int s;
615
616 s = splbio();
617 periph->periph_qfreeze += count;
618 splx(s);
619 }
620
621 /*
622 * scsipi_periph_thaw:
623 *
624 * Thaw a device's xfer queue.
625 */
626 void
627 scsipi_periph_thaw(struct scsipi_periph *periph, int count)
628 {
629 int s;
630
631 s = splbio();
632 periph->periph_qfreeze -= count;
633 #ifdef DIAGNOSTIC
634 if (periph->periph_qfreeze < 0) {
635 static const char pc[] = "periph freeze count < 0";
636 scsipi_printaddr(periph);
637 printf("%s\n", pc);
638 panic(pc);
639 }
640 #endif
641 if (periph->periph_qfreeze == 0 &&
642 (periph->periph_flags & PERIPH_WAITING) != 0)
643 wakeup(periph);
644 splx(s);
645 }
646
647 /*
648 * scsipi_periph_timed_thaw:
649 *
650 * Thaw a device after some time has expired.
651 */
652 void
653 scsipi_periph_timed_thaw(void *arg)
654 {
655 int s;
656 struct scsipi_periph *periph = arg;
657
658 callout_stop(&periph->periph_callout);
659
660 s = splbio();
661 scsipi_periph_thaw(periph, 1);
662 if ((periph->periph_channel->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
663 /*
664 * Kick the channel's queue here. Note, we're running in
665 * interrupt context (softclock), so the adapter driver
666 * had better not sleep.
667 */
668 scsipi_run_queue(periph->periph_channel);
669 } else {
670 /*
671 * Tell the completion thread to kick the channel's queue here.
672 */
673 periph->periph_channel->chan_tflags |= SCSIPI_CHANT_KICK;
674 wakeup(&periph->periph_channel->chan_complete);
675 }
676 splx(s);
677 }
678
679 /*
680 * scsipi_wait_drain:
681 *
682 * Wait for a periph's pending xfers to drain.
683 */
684 void
685 scsipi_wait_drain(struct scsipi_periph *periph)
686 {
687 int s;
688
689 s = splbio();
690 while (periph->periph_active != 0) {
691 periph->periph_flags |= PERIPH_WAITDRAIN;
692 (void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0);
693 }
694 splx(s);
695 }
696
697 /*
698 * scsipi_kill_pending:
699 *
700 * Kill off all pending xfers for a periph.
701 *
702 * NOTE: Must be called at splbio().
703 */
704 void
705 scsipi_kill_pending(struct scsipi_periph *periph)
706 {
707
708 (*periph->periph_channel->chan_bustype->bustype_kill_pending)(periph);
709 scsipi_wait_drain(periph);
710 }
711
712 /*
713 * scsipi_print_cdb:
714 * prints a command descriptor block (for debug purpose, error messages,
715 * SCSIPI_VERBOSE, ...)
716 */
717 void
718 scsipi_print_cdb(struct scsipi_generic *cmd)
719 {
720 int i, j;
721
722 printf("0x%02x", cmd->opcode);
723
724 switch (CDB_GROUPID(cmd->opcode)) {
725 case CDB_GROUPID_0:
726 j = CDB_GROUP0;
727 break;
728 case CDB_GROUPID_1:
729 j = CDB_GROUP1;
730 break;
731 case CDB_GROUPID_2:
732 j = CDB_GROUP2;
733 break;
734 case CDB_GROUPID_3:
735 j = CDB_GROUP3;
736 break;
737 case CDB_GROUPID_4:
738 j = CDB_GROUP4;
739 break;
740 case CDB_GROUPID_5:
741 j = CDB_GROUP5;
742 break;
743 case CDB_GROUPID_6:
744 j = CDB_GROUP6;
745 break;
746 case CDB_GROUPID_7:
747 j = CDB_GROUP7;
748 break;
749 default:
750 j = 0;
751 }
752 if (j == 0)
753 j = sizeof (cmd->bytes);
754 for (i = 0; i < j-1; i++) /* already done the opcode */
755 printf(" %02x", cmd->bytes[i]);
756 }
757
758 /*
759 * scsipi_interpret_sense:
760 *
761 * Look at the returned sense and act on the error, determining
762 * the unix error number to pass back. (0 = report no error)
763 *
764 * NOTE: If we return ERESTART, we are expected to haved
765 * thawed the device!
766 *
767 * THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
768 */
769 int
770 scsipi_interpret_sense(struct scsipi_xfer *xs)
771 {
772 struct scsi_sense_data *sense;
773 struct scsipi_periph *periph = xs->xs_periph;
774 u_int8_t key;
775 int error;
776 #ifndef SCSIVERBOSE
777 u_int32_t info;
778 static const char *error_mes[] = {
779 "soft error (corrected)",
780 "not ready", "medium error",
781 "non-media hardware failure", "illegal request",
782 "unit attention", "readonly device",
783 "no data found", "vendor unique",
784 "copy aborted", "command aborted",
785 "search returned equal", "volume overflow",
786 "verify miscompare", "unknown error key"
787 };
788 #endif
789
790 sense = &xs->sense.scsi_sense;
791 #ifdef SCSIPI_DEBUG
792 if (periph->periph_flags & SCSIPI_DB1) {
793 int count;
794 scsipi_printaddr(periph);
795 printf(" sense debug information:\n");
796 printf("\tcode 0x%x valid %d\n",
797 SSD_RCODE(sense->response_code),
798 sense->response_code & SSD_RCODE_VALID ? 1 : 0);
799 printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
800 sense->segment,
801 SSD_SENSE_KEY(sense->flags),
802 sense->flags & SSD_ILI ? 1 : 0,
803 sense->flags & SSD_EOM ? 1 : 0,
804 sense->flags & SSD_FILEMARK ? 1 : 0);
805 printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d "
806 "extra bytes\n",
807 sense->info[0],
808 sense->info[1],
809 sense->info[2],
810 sense->info[3],
811 sense->extra_len);
812 printf("\textra: ");
813 for (count = 0; count < SSD_ADD_BYTES_LIM(sense); count++)
814 printf("0x%x ", sense->csi[count]);
815 printf("\n");
816 }
817 #endif
818
819 /*
820 * If the periph has it's own error handler, call it first.
821 * If it returns a legit error value, return that, otherwise
822 * it wants us to continue with normal error processing.
823 */
824 if (periph->periph_switch->psw_error != NULL) {
825 SC_DEBUG(periph, SCSIPI_DB2,
826 ("calling private err_handler()\n"));
827 error = (*periph->periph_switch->psw_error)(xs);
828 if (error != EJUSTRETURN)
829 return (error);
830 }
831 /* otherwise use the default */
832 switch (SSD_RCODE(sense->response_code)) {
833
834 /*
835 * Old SCSI-1 and SASI devices respond with
836 * codes other than 70.
837 */
838 case 0x00: /* no error (command completed OK) */
839 return (0);
840 case 0x04: /* drive not ready after it was selected */
841 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
842 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
843 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
844 return (0);
845 /* XXX - display some sort of error here? */
846 return (EIO);
847 case 0x20: /* invalid command */
848 if ((xs->xs_control &
849 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
850 return (0);
851 return (EINVAL);
852 case 0x25: /* invalid LUN (Adaptec ACB-4000) */
853 return (EACCES);
854
855 /*
856 * If it's code 70, use the extended stuff and
857 * interpret the key
858 */
859 case 0x71: /* delayed error */
860 scsipi_printaddr(periph);
861 key = SSD_SENSE_KEY(sense->flags);
862 printf(" DEFERRED ERROR, key = 0x%x\n", key);
863 /* FALLTHROUGH */
864 case 0x70:
865 #ifndef SCSIVERBOSE
866 if ((sense->response_code & SSD_RCODE_VALID) != 0)
867 info = _4btol(sense->info);
868 else
869 info = 0;
870 #endif
871 key = SSD_SENSE_KEY(sense->flags);
872
873 switch (key) {
874 case SKEY_NO_SENSE:
875 case SKEY_RECOVERED_ERROR:
876 if (xs->resid == xs->datalen && xs->datalen) {
877 /*
878 * Why is this here?
879 */
880 xs->resid = 0; /* not short read */
881 }
882 case SKEY_EQUAL:
883 error = 0;
884 break;
885 case SKEY_NOT_READY:
886 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
887 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
888 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
889 return (0);
890 if (sense->asc == 0x3A) {
891 error = ENODEV; /* Medium not present */
892 if (xs->xs_control & XS_CTL_SILENT_NODEV)
893 return (error);
894 } else
895 error = EIO;
896 if ((xs->xs_control & XS_CTL_SILENT) != 0)
897 return (error);
898 break;
899 case SKEY_ILLEGAL_REQUEST:
900 if ((xs->xs_control &
901 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
902 return (0);
903 /*
904 * Handle the case where a device reports
905 * Logical Unit Not Supported during discovery.
906 */
907 if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
908 sense->asc == 0x25 &&
909 sense->ascq == 0x00)
910 return (EINVAL);
911 if ((xs->xs_control & XS_CTL_SILENT) != 0)
912 return (EIO);
913 error = EINVAL;
914 break;
915 case SKEY_UNIT_ATTENTION:
916 if (sense->asc == 0x29 &&
917 sense->ascq == 0x00) {
918 /* device or bus reset */
919 return (ERESTART);
920 }
921 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
922 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
923 if ((xs->xs_control &
924 XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
925 /* XXX Should reupload any transient state. */
926 (periph->periph_flags &
927 PERIPH_REMOVABLE) == 0) {
928 return (ERESTART);
929 }
930 if ((xs->xs_control & XS_CTL_SILENT) != 0)
931 return (EIO);
932 error = EIO;
933 break;
934 case SKEY_DATA_PROTECT:
935 error = EROFS;
936 break;
937 case SKEY_BLANK_CHECK:
938 error = 0;
939 break;
940 case SKEY_ABORTED_COMMAND:
941 if (xs->xs_retries != 0) {
942 xs->xs_retries--;
943 error = ERESTART;
944 } else
945 error = EIO;
946 break;
947 case SKEY_VOLUME_OVERFLOW:
948 error = ENOSPC;
949 break;
950 default:
951 error = EIO;
952 break;
953 }
954
955 #ifdef SCSIVERBOSE
956 if (key && (xs->xs_control & XS_CTL_SILENT) == 0)
957 scsipi_print_sense(xs, 0);
958 #else
959 if (key) {
960 scsipi_printaddr(periph);
961 printf("%s", error_mes[key - 1]);
962 if ((sense->response_code & SSD_RCODE_VALID) != 0) {
963 switch (key) {
964 case SKEY_NOT_READY:
965 case SKEY_ILLEGAL_REQUEST:
966 case SKEY_UNIT_ATTENTION:
967 case SKEY_DATA_PROTECT:
968 break;
969 case SKEY_BLANK_CHECK:
970 printf(", requested size: %d (decimal)",
971 info);
972 break;
973 case SKEY_ABORTED_COMMAND:
974 if (xs->xs_retries)
975 printf(", retrying");
976 printf(", cmd 0x%x, info 0x%x",
977 xs->cmd->opcode, info);
978 break;
979 default:
980 printf(", info = %d (decimal)", info);
981 }
982 }
983 if (sense->extra_len != 0) {
984 int n;
985 printf(", data =");
986 for (n = 0; n < sense->extra_len; n++)
987 printf(" %02x",
988 sense->csi[n]);
989 }
990 printf("\n");
991 }
992 #endif
993 return (error);
994
995 /*
996 * Some other code, just report it
997 */
998 default:
999 #if defined(SCSIDEBUG) || defined(DEBUG)
1000 {
1001 static const char *uc = "undecodable sense error";
1002 int i;
1003 u_int8_t *cptr = (u_int8_t *) sense;
1004 scsipi_printaddr(periph);
1005 if (xs->cmd == &xs->cmdstore) {
1006 printf("%s for opcode 0x%x, data=",
1007 uc, xs->cmdstore.opcode);
1008 } else {
1009 printf("%s, data=", uc);
1010 }
1011 for (i = 0; i < sizeof (sense); i++)
1012 printf(" 0x%02x", *(cptr++) & 0xff);
1013 printf("\n");
1014 }
1015 #else
1016 scsipi_printaddr(periph);
1017 printf("Sense Error Code 0x%x",
1018 SSD_RCODE(sense->response_code));
1019 if ((sense->response_code & SSD_RCODE_VALID) != 0) {
1020 struct scsi_sense_data_unextended *usense =
1021 (struct scsi_sense_data_unextended *)sense;
1022 printf(" at block no. %d (decimal)",
1023 _3btol(usense->block));
1024 }
1025 printf("\n");
1026 #endif
1027 return (EIO);
1028 }
1029 }
1030
1031 /*
1032 * scsipi_test_unit_ready:
1033 *
1034 * Issue a `test unit ready' request.
1035 */
1036 int
1037 scsipi_test_unit_ready(struct scsipi_periph *periph, int flags)
1038 {
1039 struct scsi_test_unit_ready cmd;
1040 int retries;
1041
1042 /* some ATAPI drives don't support TEST UNIT READY. Sigh */
1043 if (periph->periph_quirks & PQUIRK_NOTUR)
1044 return (0);
1045
1046 if (flags & XS_CTL_DISCOVERY)
1047 retries = 0;
1048 else
1049 retries = SCSIPIRETRIES;
1050
1051 memset(&cmd, 0, sizeof(cmd));
1052 cmd.opcode = SCSI_TEST_UNIT_READY;
1053
1054 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
1055 retries, 10000, NULL, flags));
1056 }
1057
1058 /*
1059 * scsipi_inquire:
1060 *
1061 * Ask the device about itself.
1062 */
1063 int
1064 scsipi_inquire(struct scsipi_periph *periph, struct scsipi_inquiry_data *inqbuf,
1065 int flags)
1066 {
1067 struct scsipi_inquiry cmd;
1068 int error;
1069 int retries;
1070
1071 if (flags & XS_CTL_DISCOVERY)
1072 retries = 0;
1073 else
1074 retries = SCSIPIRETRIES;
1075
1076 /*
1077 * If we request more data than the device can provide, it SHOULD just
1078 * return a short reponse. However, some devices error with an
1079 * ILLEGAL REQUEST sense code, and yet others have even more special
1080 * failture modes (such as the GL641USB flash adapter, which goes loony
1081 * and sends corrupted CRCs). To work around this, and to bring our
1082 * behavior more in line with other OSes, we do a shorter inquiry,
1083 * covering all the SCSI-2 information, first, and then request more
1084 * data iff the "additional length" field indicates there is more.
1085 * - mycroft, 2003/10/16
1086 */
1087 memset(&cmd, 0, sizeof(cmd));
1088 cmd.opcode = INQUIRY;
1089 cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI2;
1090 error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1091 (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI2, retries,
1092 10000, NULL, flags | XS_CTL_DATA_IN);
1093 if (!error &&
1094 inqbuf->additional_length > SCSIPI_INQUIRY_LENGTH_SCSI2 - 4) {
1095 #if 0
1096 printf("inquire: addlen=%d, retrying\n", inqbuf->additional_length);
1097 #endif
1098 cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI3;
1099 error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1100 (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI3, retries,
1101 10000, NULL, flags | XS_CTL_DATA_IN);
1102 #if 0
1103 printf("inquire: error=%d\n", error);
1104 #endif
1105 }
1106
1107 #ifdef SCSI_OLD_NOINQUIRY
1108 /*
1109 * Kludge for the Adaptec ACB-4000 SCSI->MFM translator.
1110 * This board doesn't support the INQUIRY command at all.
1111 */
1112 if (error == EINVAL || error == EACCES) {
1113 /*
1114 * Conjure up an INQUIRY response.
1115 */
1116 inqbuf->device = (error == EINVAL ?
1117 SID_QUAL_LU_PRESENT :
1118 SID_QUAL_LU_NOTPRESENT) | T_DIRECT;
1119 inqbuf->dev_qual2 = 0;
1120 inqbuf->version = 0;
1121 inqbuf->response_format = SID_FORMAT_SCSI1;
1122 inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
1123 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1124 memcpy(inqbuf->vendor, "ADAPTEC ACB-4000 ", 28);
1125 error = 0;
1126 }
1127
1128 /*
1129 * Kludge for the Emulex MT-02 SCSI->QIC translator.
1130 * This board gives an empty response to an INQUIRY command.
1131 */
1132 else if (error == 0 &&
1133 inqbuf->device == (SID_QUAL_LU_PRESENT | T_DIRECT) &&
1134 inqbuf->dev_qual2 == 0 &&
1135 inqbuf->version == 0 &&
1136 inqbuf->response_format == SID_FORMAT_SCSI1) {
1137 /*
1138 * Fill out the INQUIRY response.
1139 */
1140 inqbuf->device = (SID_QUAL_LU_PRESENT | T_SEQUENTIAL);
1141 inqbuf->dev_qual2 = SID_REMOVABLE;
1142 inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
1143 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1144 memcpy(inqbuf->vendor, "EMULEX MT-02 QIC ", 28);
1145 }
1146 #endif /* SCSI_OLD_NOINQUIRY */
1147
1148 return error;
1149 }
1150
1151 /*
1152 * scsipi_prevent:
1153 *
1154 * Prevent or allow the user to remove the media
1155 */
1156 int
1157 scsipi_prevent(struct scsipi_periph *periph, int type, int flags)
1158 {
1159 struct scsi_prevent_allow_medium_removal cmd;
1160
1161 memset(&cmd, 0, sizeof(cmd));
1162 cmd.opcode = SCSI_PREVENT_ALLOW_MEDIUM_REMOVAL;
1163 cmd.how = type;
1164
1165 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
1166 SCSIPIRETRIES, 5000, NULL, flags));
1167 }
1168
1169 /*
1170 * scsipi_start:
1171 *
1172 * Send a START UNIT.
1173 */
1174 int
1175 scsipi_start(struct scsipi_periph *periph, int type, int flags)
1176 {
1177 struct scsipi_start_stop cmd;
1178
1179 memset(&cmd, 0, sizeof(cmd));
1180 cmd.opcode = START_STOP;
1181 cmd.byte2 = 0x00;
1182 cmd.how = type;
1183
1184 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
1185 SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000, NULL, flags));
1186 }
1187
1188 /*
1189 * scsipi_mode_sense, scsipi_mode_sense_big:
1190 * get a sense page from a device
1191 */
1192
1193 int
1194 scsipi_mode_sense(struct scsipi_periph *periph, int byte2, int page,
1195 struct scsi_mode_parameter_header_6 *data, int len, int flags, int retries,
1196 int timeout)
1197 {
1198 struct scsi_mode_sense_6 cmd;
1199
1200 memset(&cmd, 0, sizeof(cmd));
1201 cmd.opcode = SCSI_MODE_SENSE_6;
1202 cmd.byte2 = byte2;
1203 cmd.page = page;
1204 cmd.length = len & 0xff;
1205
1206 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1207 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN));
1208 }
1209
1210 int
1211 scsipi_mode_sense_big(struct scsipi_periph *periph, int byte2, int page,
1212 struct scsi_mode_parameter_header_10 *data, int len, int flags, int retries,
1213 int timeout)
1214 {
1215 struct scsi_mode_sense_10 cmd;
1216
1217 memset(&cmd, 0, sizeof(cmd));
1218 cmd.opcode = SCSI_MODE_SENSE_10;
1219 cmd.byte2 = byte2;
1220 cmd.page = page;
1221 _lto2b(len, cmd.length);
1222
1223 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1224 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN));
1225 }
1226
1227 int
1228 scsipi_mode_select(struct scsipi_periph *periph, int byte2,
1229 struct scsi_mode_parameter_header_6 *data, int len, int flags, int retries,
1230 int timeout)
1231 {
1232 struct scsi_mode_select_6 cmd;
1233
1234 memset(&cmd, 0, sizeof(cmd));
1235 cmd.opcode = SCSI_MODE_SELECT_6;
1236 cmd.byte2 = byte2;
1237 cmd.length = len & 0xff;
1238
1239 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1240 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT));
1241 }
1242
1243 int
1244 scsipi_mode_select_big(struct scsipi_periph *periph, int byte2,
1245 struct scsi_mode_parameter_header_10 *data, int len, int flags, int retries,
1246 int timeout)
1247 {
1248 struct scsi_mode_select_10 cmd;
1249
1250 memset(&cmd, 0, sizeof(cmd));
1251 cmd.opcode = SCSI_MODE_SELECT_10;
1252 cmd.byte2 = byte2;
1253 _lto2b(len, cmd.length);
1254
1255 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1256 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT));
1257 }
1258
1259 /*
1260 * scsipi_done:
1261 *
1262 * This routine is called by an adapter's interrupt handler when
1263 * an xfer is completed.
1264 */
1265 void
1266 scsipi_done(struct scsipi_xfer *xs)
1267 {
1268 struct scsipi_periph *periph = xs->xs_periph;
1269 struct scsipi_channel *chan = periph->periph_channel;
1270 int s, freezecnt;
1271
1272 SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n"));
1273 #ifdef SCSIPI_DEBUG
1274 if (periph->periph_dbflags & SCSIPI_DB1)
1275 show_scsipi_cmd(xs);
1276 #endif
1277
1278 s = splbio();
1279 /*
1280 * The resource this command was using is now free.
1281 */
1282 if (xs->xs_status & XS_STS_DONE) {
1283 /* XXX in certain circumstances, such as a device
1284 * being detached, a xs that has already been
1285 * scsipi_done()'d by the main thread will be done'd
1286 * again by scsibusdetach(). Putting the xs on the
1287 * chan_complete queue causes list corruption and
1288 * everyone dies. This prevents that, but perhaps
1289 * there should be better coordination somewhere such
1290 * that this won't ever happen (and can be turned into
1291 * a KASSERT().
1292 */
1293 splx(s);
1294 goto out;
1295 }
1296 scsipi_put_resource(chan);
1297 xs->xs_periph->periph_sent--;
1298
1299 /*
1300 * If the command was tagged, free the tag.
1301 */
1302 if (XS_CTL_TAGTYPE(xs) != 0)
1303 scsipi_put_tag(xs);
1304 else
1305 periph->periph_flags &= ~PERIPH_UNTAG;
1306
1307 /* Mark the command as `done'. */
1308 xs->xs_status |= XS_STS_DONE;
1309
1310 #ifdef DIAGNOSTIC
1311 if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
1312 (XS_CTL_ASYNC|XS_CTL_POLL))
1313 panic("scsipi_done: ASYNC and POLL");
1314 #endif
1315
1316 /*
1317 * If the xfer had an error of any sort, freeze the
1318 * periph's queue. Freeze it again if we were requested
1319 * to do so in the xfer.
1320 */
1321 freezecnt = 0;
1322 if (xs->error != XS_NOERROR)
1323 freezecnt++;
1324 if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
1325 freezecnt++;
1326 if (freezecnt != 0)
1327 scsipi_periph_freeze(periph, freezecnt);
1328
1329 /*
1330 * record the xfer with a pending sense, in case a SCSI reset is
1331 * received before the thread is waked up.
1332 */
1333 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1334 periph->periph_flags |= PERIPH_SENSE;
1335 periph->periph_xscheck = xs;
1336 }
1337
1338 /*
1339 * If this was an xfer that was not to complete asynchronously,
1340 * let the requesting thread perform error checking/handling
1341 * in its context.
1342 */
1343 if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
1344 splx(s);
1345 /*
1346 * If it's a polling job, just return, to unwind the
1347 * call graph. We don't need to restart the queue,
1348 * because pollings jobs are treated specially, and
1349 * are really only used during crash dumps anyway
1350 * (XXX or during boot-time autconfiguration of
1351 * ATAPI devices).
1352 */
1353 if (xs->xs_control & XS_CTL_POLL)
1354 return;
1355 wakeup(xs);
1356 goto out;
1357 }
1358
1359 /*
1360 * Catch the extremely common case of I/O completing
1361 * without error; no use in taking a context switch
1362 * if we can handle it in interrupt context.
1363 */
1364 if (xs->error == XS_NOERROR) {
1365 splx(s);
1366 (void) scsipi_complete(xs);
1367 goto out;
1368 }
1369
1370 /*
1371 * There is an error on this xfer. Put it on the channel's
1372 * completion queue, and wake up the completion thread.
1373 */
1374 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
1375 splx(s);
1376 wakeup(&chan->chan_complete);
1377
1378 out:
1379 /*
1380 * If there are more xfers on the channel's queue, attempt to
1381 * run them.
1382 */
1383 scsipi_run_queue(chan);
1384 }
1385
1386 /*
1387 * scsipi_complete:
1388 *
1389 * Completion of a scsipi_xfer. This is the guts of scsipi_done().
1390 *
1391 * NOTE: This routine MUST be called with valid thread context
1392 * except for the case where the following two conditions are
1393 * true:
1394 *
1395 * xs->error == XS_NOERROR
1396 * XS_CTL_ASYNC is set in xs->xs_control
1397 *
1398 * The semantics of this routine can be tricky, so here is an
1399 * explanation:
1400 *
1401 * 0 Xfer completed successfully.
1402 *
1403 * ERESTART Xfer had an error, but was restarted.
1404 *
1405 * anything else Xfer had an error, return value is Unix
1406 * errno.
1407 *
1408 * If the return value is anything but ERESTART:
1409 *
1410 * - If XS_CTL_ASYNC is set, `xs' has been freed back to
1411 * the pool.
1412 * - If there is a buf associated with the xfer,
1413 * it has been biodone()'d.
1414 */
1415 static int
1416 scsipi_complete(struct scsipi_xfer *xs)
1417 {
1418 struct scsipi_periph *periph = xs->xs_periph;
1419 struct scsipi_channel *chan = periph->periph_channel;
1420 int error, s;
1421
1422 #ifdef DIAGNOSTIC
1423 if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
1424 panic("scsipi_complete: XS_CTL_ASYNC but no buf");
1425 #endif
1426 /*
1427 * If command terminated with a CHECK CONDITION, we need to issue a
1428 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed
1429 * we'll have the real status.
1430 * Must be processed at splbio() to avoid missing a SCSI bus reset
1431 * for this command.
1432 */
1433 s = splbio();
1434 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1435 /* request sense for a request sense ? */
1436 if (xs->xs_control & XS_CTL_REQSENSE) {
1437 scsipi_printaddr(periph);
1438 printf("request sense for a request sense ?\n");
1439 /* XXX maybe we should reset the device ? */
1440 /* we've been frozen because xs->error != XS_NOERROR */
1441 scsipi_periph_thaw(periph, 1);
1442 splx(s);
1443 if (xs->resid < xs->datalen) {
1444 printf("we read %d bytes of sense anyway:\n",
1445 xs->datalen - xs->resid);
1446 #ifdef SCSIVERBOSE
1447 scsipi_print_sense_data((void *)xs->data, 0);
1448 #endif
1449 }
1450 return EINVAL;
1451 }
1452 scsipi_request_sense(xs);
1453 }
1454 splx(s);
1455
1456 /*
1457 * If it's a user level request, bypass all usual completion
1458 * processing, let the user work it out..
1459 */
1460 if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
1461 SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n"));
1462 if (xs->error != XS_NOERROR)
1463 scsipi_periph_thaw(periph, 1);
1464 scsipi_user_done(xs);
1465 SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n "));
1466 return 0;
1467 }
1468
1469 switch (xs->error) {
1470 case XS_NOERROR:
1471 error = 0;
1472 break;
1473
1474 case XS_SENSE:
1475 case XS_SHORTSENSE:
1476 error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
1477 break;
1478
1479 case XS_RESOURCE_SHORTAGE:
1480 /*
1481 * XXX Should freeze channel's queue.
1482 */
1483 scsipi_printaddr(periph);
1484 printf("adapter resource shortage\n");
1485 /* FALLTHROUGH */
1486
1487 case XS_BUSY:
1488 if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
1489 struct scsipi_max_openings mo;
1490
1491 /*
1492 * We set the openings to active - 1, assuming that
1493 * the command that got us here is the first one that
1494 * can't fit into the device's queue. If that's not
1495 * the case, I guess we'll find out soon enough.
1496 */
1497 mo.mo_target = periph->periph_target;
1498 mo.mo_lun = periph->periph_lun;
1499 if (periph->periph_active < periph->periph_openings)
1500 mo.mo_openings = periph->periph_active - 1;
1501 else
1502 mo.mo_openings = periph->periph_openings - 1;
1503 #ifdef DIAGNOSTIC
1504 if (mo.mo_openings < 0) {
1505 scsipi_printaddr(periph);
1506 printf("QUEUE FULL resulted in < 0 openings\n");
1507 panic("scsipi_done");
1508 }
1509 #endif
1510 if (mo.mo_openings == 0) {
1511 scsipi_printaddr(periph);
1512 printf("QUEUE FULL resulted in 0 openings\n");
1513 mo.mo_openings = 1;
1514 }
1515 scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
1516 error = ERESTART;
1517 } else if (xs->xs_retries != 0) {
1518 xs->xs_retries--;
1519 /*
1520 * Wait one second, and try again.
1521 */
1522 if ((xs->xs_control & XS_CTL_POLL) ||
1523 (chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
1524 delay(1000000);
1525 } else if (!callout_pending(&periph->periph_callout)) {
1526 scsipi_periph_freeze(periph, 1);
1527 callout_reset(&periph->periph_callout,
1528 hz, scsipi_periph_timed_thaw, periph);
1529 }
1530 error = ERESTART;
1531 } else
1532 error = EBUSY;
1533 break;
1534
1535 case XS_REQUEUE:
1536 error = ERESTART;
1537 break;
1538
1539 case XS_SELTIMEOUT:
1540 case XS_TIMEOUT:
1541 /*
1542 * If the device hasn't gone away, honor retry counts.
1543 *
1544 * Note that if we're in the middle of probing it,
1545 * it won't be found because it isn't here yet so
1546 * we won't honor the retry count in that case.
1547 */
1548 if (scsipi_lookup_periph(chan, periph->periph_target,
1549 periph->periph_lun) && xs->xs_retries != 0) {
1550 xs->xs_retries--;
1551 error = ERESTART;
1552 } else
1553 error = EIO;
1554 break;
1555
1556 case XS_RESET:
1557 if (xs->xs_control & XS_CTL_REQSENSE) {
1558 /*
1559 * request sense interrupted by reset: signal it
1560 * with EINTR return code.
1561 */
1562 error = EINTR;
1563 } else {
1564 if (xs->xs_retries != 0) {
1565 xs->xs_retries--;
1566 error = ERESTART;
1567 } else
1568 error = EIO;
1569 }
1570 break;
1571
1572 case XS_DRIVER_STUFFUP:
1573 scsipi_printaddr(periph);
1574 printf("generic HBA error\n");
1575 error = EIO;
1576 break;
1577 default:
1578 scsipi_printaddr(periph);
1579 printf("invalid return code from adapter: %d\n", xs->error);
1580 error = EIO;
1581 break;
1582 }
1583
1584 s = splbio();
1585 if (error == ERESTART) {
1586 /*
1587 * If we get here, the periph has been thawed and frozen
1588 * again if we had to issue recovery commands. Alternatively,
1589 * it may have been frozen again and in a timed thaw. In
1590 * any case, we thaw the periph once we re-enqueue the
1591 * command. Once the periph is fully thawed, it will begin
1592 * operation again.
1593 */
1594 xs->error = XS_NOERROR;
1595 xs->status = SCSI_OK;
1596 xs->xs_status &= ~XS_STS_DONE;
1597 xs->xs_requeuecnt++;
1598 error = scsipi_enqueue(xs);
1599 if (error == 0) {
1600 scsipi_periph_thaw(periph, 1);
1601 splx(s);
1602 return (ERESTART);
1603 }
1604 }
1605
1606 /*
1607 * scsipi_done() freezes the queue if not XS_NOERROR.
1608 * Thaw it here.
1609 */
1610 if (xs->error != XS_NOERROR)
1611 scsipi_periph_thaw(periph, 1);
1612
1613 if (periph->periph_switch->psw_done)
1614 periph->periph_switch->psw_done(xs, error);
1615
1616 if (xs->xs_control & XS_CTL_ASYNC)
1617 scsipi_put_xs(xs);
1618 splx(s);
1619
1620 return (error);
1621 }
1622
1623 /*
1624 * Issue a request sense for the given scsipi_xfer. Called when the xfer
1625 * returns with a CHECK_CONDITION status. Must be called in valid thread
1626 * context and at splbio().
1627 */
1628
1629 static void
1630 scsipi_request_sense(struct scsipi_xfer *xs)
1631 {
1632 struct scsipi_periph *periph = xs->xs_periph;
1633 int flags, error;
1634 struct scsi_request_sense cmd;
1635
1636 periph->periph_flags |= PERIPH_SENSE;
1637
1638 /* if command was polling, request sense will too */
1639 flags = xs->xs_control & XS_CTL_POLL;
1640 /* Polling commands can't sleep */
1641 if (flags)
1642 flags |= XS_CTL_NOSLEEP;
1643
1644 flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN |
1645 XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH;
1646
1647 memset(&cmd, 0, sizeof(cmd));
1648 cmd.opcode = SCSI_REQUEST_SENSE;
1649 cmd.length = sizeof(struct scsi_sense_data);
1650
1651 error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1652 (void *)&xs->sense.scsi_sense, sizeof(struct scsi_sense_data),
1653 0, 1000, NULL, flags);
1654 periph->periph_flags &= ~PERIPH_SENSE;
1655 periph->periph_xscheck = NULL;
1656 switch (error) {
1657 case 0:
1658 /* we have a valid sense */
1659 xs->error = XS_SENSE;
1660 return;
1661 case EINTR:
1662 /* REQUEST_SENSE interrupted by bus reset. */
1663 xs->error = XS_RESET;
1664 return;
1665 case EIO:
1666 /* request sense coudn't be performed */
1667 /*
1668 * XXX this isn't quite right but we don't have anything
1669 * better for now
1670 */
1671 xs->error = XS_DRIVER_STUFFUP;
1672 return;
1673 default:
1674 /* Notify that request sense failed. */
1675 xs->error = XS_DRIVER_STUFFUP;
1676 scsipi_printaddr(periph);
1677 printf("request sense failed with error %d\n", error);
1678 return;
1679 }
1680 }
1681
1682 /*
1683 * scsipi_enqueue:
1684 *
1685 * Enqueue an xfer on a channel.
1686 */
1687 static int
1688 scsipi_enqueue(struct scsipi_xfer *xs)
1689 {
1690 struct scsipi_channel *chan = xs->xs_periph->periph_channel;
1691 struct scsipi_xfer *qxs;
1692 int s;
1693
1694 s = splbio();
1695
1696 /*
1697 * If the xfer is to be polled, and there are already jobs on
1698 * the queue, we can't proceed.
1699 */
1700 if ((xs->xs_control & XS_CTL_POLL) != 0 &&
1701 TAILQ_FIRST(&chan->chan_queue) != NULL) {
1702 splx(s);
1703 xs->error = XS_DRIVER_STUFFUP;
1704 return (EAGAIN);
1705 }
1706
1707 /*
1708 * If we have an URGENT xfer, it's an error recovery command
1709 * and it should just go on the head of the channel's queue.
1710 */
1711 if (xs->xs_control & XS_CTL_URGENT) {
1712 TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
1713 goto out;
1714 }
1715
1716 /*
1717 * If this xfer has already been on the queue before, we
1718 * need to reinsert it in the correct order. That order is:
1719 *
1720 * Immediately before the first xfer for this periph
1721 * with a requeuecnt less than xs->xs_requeuecnt.
1722 *
1723 * Failing that, at the end of the queue. (We'll end up
1724 * there naturally.)
1725 */
1726 if (xs->xs_requeuecnt != 0) {
1727 for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
1728 qxs = TAILQ_NEXT(qxs, channel_q)) {
1729 if (qxs->xs_periph == xs->xs_periph &&
1730 qxs->xs_requeuecnt < xs->xs_requeuecnt)
1731 break;
1732 }
1733 if (qxs != NULL) {
1734 TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
1735 channel_q);
1736 goto out;
1737 }
1738 }
1739 TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
1740 out:
1741 if (xs->xs_control & XS_CTL_THAW_PERIPH)
1742 scsipi_periph_thaw(xs->xs_periph, 1);
1743 splx(s);
1744 return (0);
1745 }
1746
1747 /*
1748 * scsipi_run_queue:
1749 *
1750 * Start as many xfers as possible running on the channel.
1751 */
1752 static void
1753 scsipi_run_queue(struct scsipi_channel *chan)
1754 {
1755 struct scsipi_xfer *xs;
1756 struct scsipi_periph *periph;
1757 int s;
1758
1759 for (;;) {
1760 s = splbio();
1761
1762 /*
1763 * If the channel is frozen, we can't do any work right
1764 * now.
1765 */
1766 if (chan->chan_qfreeze != 0) {
1767 splx(s);
1768 return;
1769 }
1770
1771 /*
1772 * Look for work to do, and make sure we can do it.
1773 */
1774 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
1775 xs = TAILQ_NEXT(xs, channel_q)) {
1776 periph = xs->xs_periph;
1777
1778 if ((periph->periph_sent >= periph->periph_openings) ||
1779 periph->periph_qfreeze != 0 ||
1780 (periph->periph_flags & PERIPH_UNTAG) != 0)
1781 continue;
1782
1783 if ((periph->periph_flags &
1784 (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 &&
1785 (xs->xs_control & XS_CTL_URGENT) == 0)
1786 continue;
1787
1788 /*
1789 * We can issue this xfer!
1790 */
1791 goto got_one;
1792 }
1793
1794 /*
1795 * Can't find any work to do right now.
1796 */
1797 splx(s);
1798 return;
1799
1800 got_one:
1801 /*
1802 * Have an xfer to run. Allocate a resource from
1803 * the adapter to run it. If we can't allocate that
1804 * resource, we don't dequeue the xfer.
1805 */
1806 if (scsipi_get_resource(chan) == 0) {
1807 /*
1808 * Adapter is out of resources. If the adapter
1809 * supports it, attempt to grow them.
1810 */
1811 if (scsipi_grow_resources(chan) == 0) {
1812 /*
1813 * Wasn't able to grow resources,
1814 * nothing more we can do.
1815 */
1816 if (xs->xs_control & XS_CTL_POLL) {
1817 scsipi_printaddr(xs->xs_periph);
1818 printf("polling command but no "
1819 "adapter resources");
1820 /* We'll panic shortly... */
1821 }
1822 splx(s);
1823
1824 /*
1825 * XXX: We should be able to note that
1826 * XXX: that resources are needed here!
1827 */
1828 return;
1829 }
1830 /*
1831 * scsipi_grow_resources() allocated the resource
1832 * for us.
1833 */
1834 }
1835
1836 /*
1837 * We have a resource to run this xfer, do it!
1838 */
1839 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
1840
1841 /*
1842 * If the command is to be tagged, allocate a tag ID
1843 * for it.
1844 */
1845 if (XS_CTL_TAGTYPE(xs) != 0)
1846 scsipi_get_tag(xs);
1847 else
1848 periph->periph_flags |= PERIPH_UNTAG;
1849 periph->periph_sent++;
1850 splx(s);
1851
1852 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
1853 }
1854 #ifdef DIAGNOSTIC
1855 panic("scsipi_run_queue: impossible");
1856 #endif
1857 }
1858
1859 /*
1860 * scsipi_execute_xs:
1861 *
1862 * Begin execution of an xfer, waiting for it to complete, if necessary.
1863 */
1864 int
1865 scsipi_execute_xs(struct scsipi_xfer *xs)
1866 {
1867 struct scsipi_periph *periph = xs->xs_periph;
1868 struct scsipi_channel *chan = periph->periph_channel;
1869 int oasync, async, poll, error, s;
1870
1871 KASSERT(!cold);
1872
1873 (chan->chan_bustype->bustype_cmd)(xs);
1874
1875 if (xs->xs_control & XS_CTL_DATA_ONSTACK) {
1876 #if 1
1877 if (xs->xs_control & XS_CTL_ASYNC)
1878 panic("scsipi_execute_xs: on stack and async");
1879 #endif
1880 /*
1881 * If the I/O buffer is allocated on stack, the
1882 * process must NOT be swapped out, as the device will
1883 * be accessing the stack.
1884 */
1885 uvm_lwp_hold(curlwp);
1886 }
1887
1888 xs->xs_status &= ~XS_STS_DONE;
1889 xs->error = XS_NOERROR;
1890 xs->resid = xs->datalen;
1891 xs->status = SCSI_OK;
1892
1893 #ifdef SCSIPI_DEBUG
1894 if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) {
1895 printf("scsipi_execute_xs: ");
1896 show_scsipi_xs(xs);
1897 printf("\n");
1898 }
1899 #endif
1900
1901 /*
1902 * Deal with command tagging:
1903 *
1904 * - If the device's current operating mode doesn't
1905 * include tagged queueing, clear the tag mask.
1906 *
1907 * - If the device's current operating mode *does*
1908 * include tagged queueing, set the tag_type in
1909 * the xfer to the appropriate byte for the tag
1910 * message.
1911 */
1912 if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 ||
1913 (xs->xs_control & XS_CTL_REQSENSE)) {
1914 xs->xs_control &= ~XS_CTL_TAGMASK;
1915 xs->xs_tag_type = 0;
1916 } else {
1917 /*
1918 * If the request doesn't specify a tag, give Head
1919 * tags to URGENT operations and Ordered tags to
1920 * everything else.
1921 */
1922 if (XS_CTL_TAGTYPE(xs) == 0) {
1923 if (xs->xs_control & XS_CTL_URGENT)
1924 xs->xs_control |= XS_CTL_HEAD_TAG;
1925 else
1926 xs->xs_control |= XS_CTL_ORDERED_TAG;
1927 }
1928
1929 switch (XS_CTL_TAGTYPE(xs)) {
1930 case XS_CTL_ORDERED_TAG:
1931 xs->xs_tag_type = MSG_ORDERED_Q_TAG;
1932 break;
1933
1934 case XS_CTL_SIMPLE_TAG:
1935 xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
1936 break;
1937
1938 case XS_CTL_HEAD_TAG:
1939 xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
1940 break;
1941
1942 default:
1943 scsipi_printaddr(periph);
1944 printf("invalid tag mask 0x%08x\n",
1945 XS_CTL_TAGTYPE(xs));
1946 panic("scsipi_execute_xs");
1947 }
1948 }
1949
1950 /* If the adaptor wants us to poll, poll. */
1951 if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY)
1952 xs->xs_control |= XS_CTL_POLL;
1953
1954 /*
1955 * If we don't yet have a completion thread, or we are to poll for
1956 * completion, clear the ASYNC flag.
1957 */
1958 oasync = (xs->xs_control & XS_CTL_ASYNC);
1959 if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
1960 xs->xs_control &= ~XS_CTL_ASYNC;
1961
1962 async = (xs->xs_control & XS_CTL_ASYNC);
1963 poll = (xs->xs_control & XS_CTL_POLL);
1964
1965 #ifdef DIAGNOSTIC
1966 if (oasync != 0 && xs->bp == NULL)
1967 panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
1968 #endif
1969
1970 /*
1971 * Enqueue the transfer. If we're not polling for completion, this
1972 * should ALWAYS return `no error'.
1973 */
1974 error = scsipi_enqueue(xs);
1975 if (error) {
1976 if (poll == 0) {
1977 scsipi_printaddr(periph);
1978 printf("not polling, but enqueue failed with %d\n",
1979 error);
1980 panic("scsipi_execute_xs");
1981 }
1982
1983 scsipi_printaddr(periph);
1984 printf("should have flushed queue?\n");
1985 goto free_xs;
1986 }
1987
1988 restarted:
1989 scsipi_run_queue(chan);
1990
1991 /*
1992 * The xfer is enqueued, and possibly running. If it's to be
1993 * completed asynchronously, just return now.
1994 */
1995 if (async)
1996 return (0);
1997
1998 /*
1999 * Not an asynchronous command; wait for it to complete.
2000 */
2001 s = splbio();
2002 while ((xs->xs_status & XS_STS_DONE) == 0) {
2003 if (poll) {
2004 scsipi_printaddr(periph);
2005 printf("polling command not done\n");
2006 panic("scsipi_execute_xs");
2007 }
2008 (void) tsleep(xs, PRIBIO, "xscmd", 0);
2009 }
2010 splx(s);
2011
2012 /*
2013 * Command is complete. scsipi_done() has awakened us to perform
2014 * the error handling.
2015 */
2016 error = scsipi_complete(xs);
2017 if (error == ERESTART)
2018 goto restarted;
2019
2020 /*
2021 * If it was meant to run async and we cleared aync ourselve,
2022 * don't return an error here. It has already been handled
2023 */
2024 if (oasync)
2025 error = 0;
2026 /*
2027 * Command completed successfully or fatal error occurred. Fall
2028 * into....
2029 */
2030 free_xs:
2031 if (xs->xs_control & XS_CTL_DATA_ONSTACK)
2032 uvm_lwp_rele(curlwp);
2033
2034 s = splbio();
2035 scsipi_put_xs(xs);
2036 splx(s);
2037
2038 /*
2039 * Kick the queue, keep it running in case it stopped for some
2040 * reason.
2041 */
2042 scsipi_run_queue(chan);
2043
2044 return (error);
2045 }
2046
2047 /*
2048 * scsipi_completion_thread:
2049 *
2050 * This is the completion thread. We wait for errors on
2051 * asynchronous xfers, and perform the error handling
2052 * function, restarting the command, if necessary.
2053 */
2054 static void
2055 scsipi_completion_thread(void *arg)
2056 {
2057 struct scsipi_channel *chan = arg;
2058 struct scsipi_xfer *xs;
2059 int s;
2060
2061 if (chan->chan_init_cb)
2062 (*chan->chan_init_cb)(chan, chan->chan_init_cb_arg);
2063
2064 s = splbio();
2065 chan->chan_flags |= SCSIPI_CHAN_TACTIVE;
2066 splx(s);
2067 for (;;) {
2068 s = splbio();
2069 xs = TAILQ_FIRST(&chan->chan_complete);
2070 if (xs == NULL && chan->chan_tflags == 0) {
2071 /* nothing to do; wait */
2072 (void) tsleep(&chan->chan_complete, PRIBIO,
2073 "sccomp", 0);
2074 splx(s);
2075 continue;
2076 }
2077 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2078 /* call chan_callback from thread context */
2079 chan->chan_tflags &= ~SCSIPI_CHANT_CALLBACK;
2080 chan->chan_callback(chan, chan->chan_callback_arg);
2081 splx(s);
2082 continue;
2083 }
2084 if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) {
2085 /* attempt to get more openings for this channel */
2086 chan->chan_tflags &= ~SCSIPI_CHANT_GROWRES;
2087 scsipi_adapter_request(chan,
2088 ADAPTER_REQ_GROW_RESOURCES, NULL);
2089 scsipi_channel_thaw(chan, 1);
2090 splx(s);
2091 if (chan->chan_tflags & SCSIPI_CHANT_GROWRES)
2092 kpause("scsizzz", FALSE, hz/10, NULL);
2093 continue;
2094 }
2095 if (chan->chan_tflags & SCSIPI_CHANT_KICK) {
2096 /* explicitly run the queues for this channel */
2097 chan->chan_tflags &= ~SCSIPI_CHANT_KICK;
2098 scsipi_run_queue(chan);
2099 splx(s);
2100 continue;
2101 }
2102 if (chan->chan_tflags & SCSIPI_CHANT_SHUTDOWN) {
2103 splx(s);
2104 break;
2105 }
2106 if (xs) {
2107 TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
2108 splx(s);
2109
2110 /*
2111 * Have an xfer with an error; process it.
2112 */
2113 (void) scsipi_complete(xs);
2114
2115 /*
2116 * Kick the queue; keep it running if it was stopped
2117 * for some reason.
2118 */
2119 scsipi_run_queue(chan);
2120 } else {
2121 splx(s);
2122 }
2123 }
2124
2125 chan->chan_thread = NULL;
2126
2127 /* In case parent is waiting for us to exit. */
2128 wakeup(&chan->chan_thread);
2129
2130 kthread_exit(0);
2131 }
2132 /*
2133 * scsipi_thread_call_callback:
2134 *
2135 * request to call a callback from the completion thread
2136 */
2137 int
2138 scsipi_thread_call_callback(struct scsipi_channel *chan,
2139 void (*callback)(struct scsipi_channel *, void *), void *arg)
2140 {
2141 int s;
2142
2143 s = splbio();
2144 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
2145 /* kernel thread doesn't exist yet */
2146 splx(s);
2147 return ESRCH;
2148 }
2149 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2150 splx(s);
2151 return EBUSY;
2152 }
2153 scsipi_channel_freeze(chan, 1);
2154 chan->chan_callback = callback;
2155 chan->chan_callback_arg = arg;
2156 chan->chan_tflags |= SCSIPI_CHANT_CALLBACK;
2157 wakeup(&chan->chan_complete);
2158 splx(s);
2159 return(0);
2160 }
2161
2162 /*
2163 * scsipi_async_event:
2164 *
2165 * Handle an asynchronous event from an adapter.
2166 */
2167 void
2168 scsipi_async_event(struct scsipi_channel *chan, scsipi_async_event_t event,
2169 void *arg)
2170 {
2171 int s;
2172
2173 s = splbio();
2174 switch (event) {
2175 case ASYNC_EVENT_MAX_OPENINGS:
2176 scsipi_async_event_max_openings(chan,
2177 (struct scsipi_max_openings *)arg);
2178 break;
2179
2180 case ASYNC_EVENT_XFER_MODE:
2181 scsipi_async_event_xfer_mode(chan,
2182 (struct scsipi_xfer_mode *)arg);
2183 break;
2184 case ASYNC_EVENT_RESET:
2185 scsipi_async_event_channel_reset(chan);
2186 break;
2187 }
2188 splx(s);
2189 }
2190
2191 /*
2192 * scsipi_print_xfer_mode:
2193 *
2194 * Print a periph's capabilities.
2195 */
2196 void
2197 scsipi_print_xfer_mode(struct scsipi_periph *periph)
2198 {
2199 int period, freq, speed, mbs;
2200
2201 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0)
2202 return;
2203
2204 aprint_normal("%s: ", periph->periph_dev->dv_xname);
2205 if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) {
2206 period = scsipi_sync_factor_to_period(periph->periph_period);
2207 aprint_normal("sync (%d.%02dns offset %d)",
2208 period / 100, period % 100, periph->periph_offset);
2209 } else
2210 aprint_normal("async");
2211
2212 if (periph->periph_mode & PERIPH_CAP_WIDE32)
2213 aprint_normal(", 32-bit");
2214 else if (periph->periph_mode & (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
2215 aprint_normal(", 16-bit");
2216 else
2217 aprint_normal(", 8-bit");
2218
2219 if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) {
2220 freq = scsipi_sync_factor_to_freq(periph->periph_period);
2221 speed = freq;
2222 if (periph->periph_mode & PERIPH_CAP_WIDE32)
2223 speed *= 4;
2224 else if (periph->periph_mode &
2225 (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
2226 speed *= 2;
2227 mbs = speed / 1000;
2228 if (mbs > 0)
2229 aprint_normal(" (%d.%03dMB/s)", mbs, speed % 1000);
2230 else
2231 aprint_normal(" (%dKB/s)", speed % 1000);
2232 }
2233
2234 aprint_normal(" transfers");
2235
2236 if (periph->periph_mode & PERIPH_CAP_TQING)
2237 aprint_normal(", tagged queueing");
2238
2239 aprint_normal("\n");
2240 }
2241
2242 /*
2243 * scsipi_async_event_max_openings:
2244 *
2245 * Update the maximum number of outstanding commands a
2246 * device may have.
2247 */
2248 static void
2249 scsipi_async_event_max_openings(struct scsipi_channel *chan,
2250 struct scsipi_max_openings *mo)
2251 {
2252 struct scsipi_periph *periph;
2253 int minlun, maxlun;
2254
2255 if (mo->mo_lun == -1) {
2256 /*
2257 * Wildcarded; apply it to all LUNs.
2258 */
2259 minlun = 0;
2260 maxlun = chan->chan_nluns - 1;
2261 } else
2262 minlun = maxlun = mo->mo_lun;
2263
2264 /* XXX This could really suck with a large LUN space. */
2265 for (; minlun <= maxlun; minlun++) {
2266 periph = scsipi_lookup_periph(chan, mo->mo_target, minlun);
2267 if (periph == NULL)
2268 continue;
2269
2270 if (mo->mo_openings < periph->periph_openings)
2271 periph->periph_openings = mo->mo_openings;
2272 else if (mo->mo_openings > periph->periph_openings &&
2273 (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
2274 periph->periph_openings = mo->mo_openings;
2275 }
2276 }
2277
2278 /*
2279 * scsipi_async_event_xfer_mode:
2280 *
2281 * Update the xfer mode for all periphs sharing the
2282 * specified I_T Nexus.
2283 */
2284 static void
2285 scsipi_async_event_xfer_mode(struct scsipi_channel *chan,
2286 struct scsipi_xfer_mode *xm)
2287 {
2288 struct scsipi_periph *periph;
2289 int lun, announce, mode, period, offset;
2290
2291 for (lun = 0; lun < chan->chan_nluns; lun++) {
2292 periph = scsipi_lookup_periph(chan, xm->xm_target, lun);
2293 if (periph == NULL)
2294 continue;
2295 announce = 0;
2296
2297 /*
2298 * Clamp the xfer mode down to this periph's capabilities.
2299 */
2300 mode = xm->xm_mode & periph->periph_cap;
2301 if (mode & PERIPH_CAP_SYNC) {
2302 period = xm->xm_period;
2303 offset = xm->xm_offset;
2304 } else {
2305 period = 0;
2306 offset = 0;
2307 }
2308
2309 /*
2310 * If we do not have a valid xfer mode yet, or the parameters
2311 * are different, announce them.
2312 */
2313 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0 ||
2314 periph->periph_mode != mode ||
2315 periph->periph_period != period ||
2316 periph->periph_offset != offset)
2317 announce = 1;
2318
2319 periph->periph_mode = mode;
2320 periph->periph_period = period;
2321 periph->periph_offset = offset;
2322 periph->periph_flags |= PERIPH_MODE_VALID;
2323
2324 if (announce)
2325 scsipi_print_xfer_mode(periph);
2326 }
2327 }
2328
2329 /*
2330 * scsipi_set_xfer_mode:
2331 *
2332 * Set the xfer mode for the specified I_T Nexus.
2333 */
2334 void
2335 scsipi_set_xfer_mode(struct scsipi_channel *chan, int target, int immed)
2336 {
2337 struct scsipi_xfer_mode xm;
2338 struct scsipi_periph *itperiph;
2339 int lun, s;
2340
2341 /*
2342 * Go to the minimal xfer mode.
2343 */
2344 xm.xm_target = target;
2345 xm.xm_mode = 0;
2346 xm.xm_period = 0; /* ignored */
2347 xm.xm_offset = 0; /* ignored */
2348
2349 /*
2350 * Find the first LUN we know about on this I_T Nexus.
2351 */
2352 for (itperiph = NULL, lun = 0; lun < chan->chan_nluns; lun++) {
2353 itperiph = scsipi_lookup_periph(chan, target, lun);
2354 if (itperiph != NULL)
2355 break;
2356 }
2357 if (itperiph != NULL) {
2358 xm.xm_mode = itperiph->periph_cap;
2359 /*
2360 * Now issue the request to the adapter.
2361 */
2362 s = splbio();
2363 scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm);
2364 splx(s);
2365 /*
2366 * If we want this to happen immediately, issue a dummy
2367 * command, since most adapters can't really negotiate unless
2368 * they're executing a job.
2369 */
2370 if (immed != 0) {
2371 (void) scsipi_test_unit_ready(itperiph,
2372 XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
2373 XS_CTL_IGNORE_NOT_READY |
2374 XS_CTL_IGNORE_MEDIA_CHANGE);
2375 }
2376 }
2377 }
2378
2379 /*
2380 * scsipi_channel_reset:
2381 *
2382 * handle scsi bus reset
2383 * called at splbio
2384 */
2385 static void
2386 scsipi_async_event_channel_reset(struct scsipi_channel *chan)
2387 {
2388 struct scsipi_xfer *xs, *xs_next;
2389 struct scsipi_periph *periph;
2390 int target, lun;
2391
2392 /*
2393 * Channel has been reset. Also mark as reset pending REQUEST_SENSE
2394 * commands; as the sense is not available any more.
2395 * can't call scsipi_done() from here, as the command has not been
2396 * sent to the adapter yet (this would corrupt accounting).
2397 */
2398
2399 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) {
2400 xs_next = TAILQ_NEXT(xs, channel_q);
2401 if (xs->xs_control & XS_CTL_REQSENSE) {
2402 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
2403 xs->error = XS_RESET;
2404 if ((xs->xs_control & XS_CTL_ASYNC) != 0)
2405 TAILQ_INSERT_TAIL(&chan->chan_complete, xs,
2406 channel_q);
2407 }
2408 }
2409 wakeup(&chan->chan_complete);
2410 /* Catch xs with pending sense which may not have a REQSENSE xs yet */
2411 for (target = 0; target < chan->chan_ntargets; target++) {
2412 if (target == chan->chan_id)
2413 continue;
2414 for (lun = 0; lun < chan->chan_nluns; lun++) {
2415 periph = scsipi_lookup_periph(chan, target, lun);
2416 if (periph) {
2417 xs = periph->periph_xscheck;
2418 if (xs)
2419 xs->error = XS_RESET;
2420 }
2421 }
2422 }
2423 }
2424
2425 /*
2426 * scsipi_target_detach:
2427 *
2428 * detach all periph associated with a I_T
2429 * must be called from valid thread context
2430 */
2431 int
2432 scsipi_target_detach(struct scsipi_channel *chan, int target, int lun,
2433 int flags)
2434 {
2435 struct scsipi_periph *periph;
2436 int ctarget, mintarget, maxtarget;
2437 int clun, minlun, maxlun;
2438 int error;
2439
2440 if (target == -1) {
2441 mintarget = 0;
2442 maxtarget = chan->chan_ntargets;
2443 } else {
2444 if (target == chan->chan_id)
2445 return EINVAL;
2446 if (target < 0 || target >= chan->chan_ntargets)
2447 return EINVAL;
2448 mintarget = target;
2449 maxtarget = target + 1;
2450 }
2451
2452 if (lun == -1) {
2453 minlun = 0;
2454 maxlun = chan->chan_nluns;
2455 } else {
2456 if (lun < 0 || lun >= chan->chan_nluns)
2457 return EINVAL;
2458 minlun = lun;
2459 maxlun = lun + 1;
2460 }
2461
2462 for (ctarget = mintarget; ctarget < maxtarget; ctarget++) {
2463 if (ctarget == chan->chan_id)
2464 continue;
2465
2466 for (clun = minlun; clun < maxlun; clun++) {
2467 periph = scsipi_lookup_periph(chan, ctarget, clun);
2468 if (periph == NULL)
2469 continue;
2470 error = config_detach(periph->periph_dev, flags);
2471 if (error)
2472 return (error);
2473 }
2474 }
2475 return(0);
2476 }
2477
2478 /*
2479 * scsipi_adapter_addref:
2480 *
2481 * Add a reference to the adapter pointed to by the provided
2482 * link, enabling the adapter if necessary.
2483 */
2484 int
2485 scsipi_adapter_addref(struct scsipi_adapter *adapt)
2486 {
2487 int s, error = 0;
2488
2489 s = splbio();
2490 if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) {
2491 error = (*adapt->adapt_enable)(adapt->adapt_dev, 1);
2492 if (error)
2493 adapt->adapt_refcnt--;
2494 }
2495 splx(s);
2496 return (error);
2497 }
2498
2499 /*
2500 * scsipi_adapter_delref:
2501 *
2502 * Delete a reference to the adapter pointed to by the provided
2503 * link, disabling the adapter if possible.
2504 */
2505 void
2506 scsipi_adapter_delref(struct scsipi_adapter *adapt)
2507 {
2508 int s;
2509
2510 s = splbio();
2511 if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL)
2512 (void) (*adapt->adapt_enable)(adapt->adapt_dev, 0);
2513 splx(s);
2514 }
2515
2516 static struct scsipi_syncparam {
2517 int ss_factor;
2518 int ss_period; /* ns * 100 */
2519 } scsipi_syncparams[] = {
2520 { 0x08, 625 }, /* FAST-160 (Ultra320) */
2521 { 0x09, 1250 }, /* FAST-80 (Ultra160) */
2522 { 0x0a, 2500 }, /* FAST-40 40MHz (Ultra2) */
2523 { 0x0b, 3030 }, /* FAST-40 33MHz (Ultra2) */
2524 { 0x0c, 5000 }, /* FAST-20 (Ultra) */
2525 };
2526 static const int scsipi_nsyncparams =
2527 sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
2528
2529 int
2530 scsipi_sync_period_to_factor(int period /* ns * 100 */)
2531 {
2532 int i;
2533
2534 for (i = 0; i < scsipi_nsyncparams; i++) {
2535 if (period <= scsipi_syncparams[i].ss_period)
2536 return (scsipi_syncparams[i].ss_factor);
2537 }
2538
2539 return ((period / 100) / 4);
2540 }
2541
2542 int
2543 scsipi_sync_factor_to_period(int factor)
2544 {
2545 int i;
2546
2547 for (i = 0; i < scsipi_nsyncparams; i++) {
2548 if (factor == scsipi_syncparams[i].ss_factor)
2549 return (scsipi_syncparams[i].ss_period);
2550 }
2551
2552 return ((factor * 4) * 100);
2553 }
2554
2555 int
2556 scsipi_sync_factor_to_freq(int factor)
2557 {
2558 int i;
2559
2560 for (i = 0; i < scsipi_nsyncparams; i++) {
2561 if (factor == scsipi_syncparams[i].ss_factor)
2562 return (100000000 / scsipi_syncparams[i].ss_period);
2563 }
2564
2565 return (10000000 / ((factor * 4) * 10));
2566 }
2567
2568 #ifdef SCSIPI_DEBUG
2569 /*
2570 * Given a scsipi_xfer, dump the request, in all it's glory
2571 */
2572 void
2573 show_scsipi_xs(struct scsipi_xfer *xs)
2574 {
2575
2576 printf("xs(%p): ", xs);
2577 printf("xs_control(0x%08x)", xs->xs_control);
2578 printf("xs_status(0x%08x)", xs->xs_status);
2579 printf("periph(%p)", xs->xs_periph);
2580 printf("retr(0x%x)", xs->xs_retries);
2581 printf("timo(0x%x)", xs->timeout);
2582 printf("cmd(%p)", xs->cmd);
2583 printf("len(0x%x)", xs->cmdlen);
2584 printf("data(%p)", xs->data);
2585 printf("len(0x%x)", xs->datalen);
2586 printf("res(0x%x)", xs->resid);
2587 printf("err(0x%x)", xs->error);
2588 printf("bp(%p)", xs->bp);
2589 show_scsipi_cmd(xs);
2590 }
2591
2592 void
2593 show_scsipi_cmd(struct scsipi_xfer *xs)
2594 {
2595 u_char *b = (u_char *) xs->cmd;
2596 int i = 0;
2597
2598 scsipi_printaddr(xs->xs_periph);
2599 printf(" command: ");
2600
2601 if ((xs->xs_control & XS_CTL_RESET) == 0) {
2602 while (i < xs->cmdlen) {
2603 if (i)
2604 printf(",");
2605 printf("0x%x", b[i++]);
2606 }
2607 printf("-[%d bytes]\n", xs->datalen);
2608 if (xs->datalen)
2609 show_mem(xs->data, min(64, xs->datalen));
2610 } else
2611 printf("-RESET-\n");
2612 }
2613
2614 void
2615 show_mem(u_char *address, int num)
2616 {
2617 int x;
2618
2619 printf("------------------------------");
2620 for (x = 0; x < num; x++) {
2621 if ((x % 16) == 0)
2622 printf("\n%03d: ", x);
2623 printf("%02x ", *address++);
2624 }
2625 printf("\n------------------------------\n");
2626 }
2627 #endif /* SCSIPI_DEBUG */
2628