scsipi_base.c revision 1.161 1 /* $NetBSD: scsipi_base.c,v 1.161 2014/10/06 14:42:08 christos Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999, 2000, 2002, 2003, 2004 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: scsipi_base.c,v 1.161 2014/10/06 14:42:08 christos Exp $");
35
36 #include "opt_scsi.h"
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/buf.h>
42 #include <sys/uio.h>
43 #include <sys/malloc.h>
44 #include <sys/pool.h>
45 #include <sys/errno.h>
46 #include <sys/device.h>
47 #include <sys/proc.h>
48 #include <sys/kthread.h>
49 #include <sys/hash.h>
50
51 #include <dev/scsipi/scsi_spc.h>
52 #include <dev/scsipi/scsipi_all.h>
53 #include <dev/scsipi/scsipi_disk.h>
54 #include <dev/scsipi/scsipiconf.h>
55 #include <dev/scsipi/scsipi_base.h>
56
57 #include <dev/scsipi/scsi_all.h>
58 #include <dev/scsipi/scsi_message.h>
59
60 #include <machine/param.h>
61
62 static int scsipi_complete(struct scsipi_xfer *);
63 static void scsipi_request_sense(struct scsipi_xfer *);
64 static int scsipi_enqueue(struct scsipi_xfer *);
65 static void scsipi_run_queue(struct scsipi_channel *chan);
66
67 static void scsipi_completion_thread(void *);
68
69 static void scsipi_get_tag(struct scsipi_xfer *);
70 static void scsipi_put_tag(struct scsipi_xfer *);
71
72 static int scsipi_get_resource(struct scsipi_channel *);
73 static void scsipi_put_resource(struct scsipi_channel *);
74
75 static void scsipi_async_event_max_openings(struct scsipi_channel *,
76 struct scsipi_max_openings *);
77 static void scsipi_async_event_channel_reset(struct scsipi_channel *);
78
79 static struct pool scsipi_xfer_pool;
80
81 /*
82 * scsipi_init:
83 *
84 * Called when a scsibus or atapibus is attached to the system
85 * to initialize shared data structures.
86 */
87 void
88 scsipi_init(void)
89 {
90 static int scsipi_init_done;
91
92 if (scsipi_init_done)
93 return;
94 scsipi_init_done = 1;
95
96 /* Initialize the scsipi_xfer pool. */
97 pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
98 0, 0, "scxspl", NULL, IPL_BIO);
99 if (pool_prime(&scsipi_xfer_pool,
100 PAGE_SIZE / sizeof(struct scsipi_xfer)) == ENOMEM) {
101 printf("WARNING: not enough memory for scsipi_xfer_pool\n");
102 }
103 }
104
105 /*
106 * scsipi_channel_init:
107 *
108 * Initialize a scsipi_channel when it is attached.
109 */
110 int
111 scsipi_channel_init(struct scsipi_channel *chan)
112 {
113 struct scsipi_adapter *adapt = chan->chan_adapter;
114 int i;
115
116 /* Initialize shared data. */
117 scsipi_init();
118
119 /* Initialize the queues. */
120 TAILQ_INIT(&chan->chan_queue);
121 TAILQ_INIT(&chan->chan_complete);
122
123 for (i = 0; i < SCSIPI_CHAN_PERIPH_BUCKETS; i++)
124 LIST_INIT(&chan->chan_periphtab[i]);
125
126 /*
127 * Create the asynchronous completion thread.
128 */
129 if (kthread_create(PRI_NONE, 0, NULL, scsipi_completion_thread, chan,
130 &chan->chan_thread, "%s", chan->chan_name)) {
131 aprint_error_dev(adapt->adapt_dev, "unable to create completion thread for "
132 "channel %d\n", chan->chan_channel);
133 panic("scsipi_channel_init");
134 }
135
136 return (0);
137 }
138
139 /*
140 * scsipi_channel_shutdown:
141 *
142 * Shutdown a scsipi_channel.
143 */
144 void
145 scsipi_channel_shutdown(struct scsipi_channel *chan)
146 {
147
148 /*
149 * Shut down the completion thread.
150 */
151 chan->chan_tflags |= SCSIPI_CHANT_SHUTDOWN;
152 wakeup(&chan->chan_complete);
153
154 /*
155 * Now wait for the thread to exit.
156 */
157 while (chan->chan_thread != NULL)
158 (void) tsleep(&chan->chan_thread, PRIBIO, "scshut", 0);
159 }
160
161 static uint32_t
162 scsipi_chan_periph_hash(uint64_t t, uint64_t l)
163 {
164 uint32_t hash;
165
166 hash = hash32_buf(&t, sizeof(t), HASH32_BUF_INIT);
167 hash = hash32_buf(&l, sizeof(l), hash);
168
169 return (hash & SCSIPI_CHAN_PERIPH_HASHMASK);
170 }
171
172 /*
173 * scsipi_insert_periph:
174 *
175 * Insert a periph into the channel.
176 */
177 void
178 scsipi_insert_periph(struct scsipi_channel *chan, struct scsipi_periph *periph)
179 {
180 uint32_t hash;
181 int s;
182
183 hash = scsipi_chan_periph_hash(periph->periph_target,
184 periph->periph_lun);
185
186 s = splbio();
187 LIST_INSERT_HEAD(&chan->chan_periphtab[hash], periph, periph_hash);
188 splx(s);
189 }
190
191 /*
192 * scsipi_remove_periph:
193 *
194 * Remove a periph from the channel.
195 */
196 void
197 scsipi_remove_periph(struct scsipi_channel *chan,
198 struct scsipi_periph *periph)
199 {
200 int s;
201
202 s = splbio();
203 LIST_REMOVE(periph, periph_hash);
204 splx(s);
205 }
206
207 /*
208 * scsipi_lookup_periph:
209 *
210 * Lookup a periph on the specified channel.
211 */
212 struct scsipi_periph *
213 scsipi_lookup_periph(struct scsipi_channel *chan, int target, int lun)
214 {
215 struct scsipi_periph *periph;
216 uint32_t hash;
217 int s;
218
219 KASSERT(cold || KERNEL_LOCKED_P());
220
221 if (target >= chan->chan_ntargets ||
222 lun >= chan->chan_nluns)
223 return (NULL);
224
225 hash = scsipi_chan_periph_hash(target, lun);
226
227 s = splbio();
228 LIST_FOREACH(periph, &chan->chan_periphtab[hash], periph_hash) {
229 if (periph->periph_target == target &&
230 periph->periph_lun == lun)
231 break;
232 }
233 splx(s);
234
235 return (periph);
236 }
237
238 /*
239 * scsipi_get_resource:
240 *
241 * Allocate a single xfer `resource' from the channel.
242 *
243 * NOTE: Must be called at splbio().
244 */
245 static int
246 scsipi_get_resource(struct scsipi_channel *chan)
247 {
248 struct scsipi_adapter *adapt = chan->chan_adapter;
249
250 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
251 if (chan->chan_openings > 0) {
252 chan->chan_openings--;
253 return (1);
254 }
255 return (0);
256 }
257
258 if (adapt->adapt_openings > 0) {
259 adapt->adapt_openings--;
260 return (1);
261 }
262 return (0);
263 }
264
265 /*
266 * scsipi_grow_resources:
267 *
268 * Attempt to grow resources for a channel. If this succeeds,
269 * we allocate one for our caller.
270 *
271 * NOTE: Must be called at splbio().
272 */
273 static inline int
274 scsipi_grow_resources(struct scsipi_channel *chan)
275 {
276
277 if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
278 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
279 scsipi_adapter_request(chan,
280 ADAPTER_REQ_GROW_RESOURCES, NULL);
281 return (scsipi_get_resource(chan));
282 }
283 /*
284 * ask the channel thread to do it. It'll have to thaw the
285 * queue
286 */
287 scsipi_channel_freeze(chan, 1);
288 chan->chan_tflags |= SCSIPI_CHANT_GROWRES;
289 wakeup(&chan->chan_complete);
290 return (0);
291 }
292
293 return (0);
294 }
295
296 /*
297 * scsipi_put_resource:
298 *
299 * Free a single xfer `resource' to the channel.
300 *
301 * NOTE: Must be called at splbio().
302 */
303 static void
304 scsipi_put_resource(struct scsipi_channel *chan)
305 {
306 struct scsipi_adapter *adapt = chan->chan_adapter;
307
308 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
309 chan->chan_openings++;
310 else
311 adapt->adapt_openings++;
312 }
313
314 /*
315 * scsipi_get_tag:
316 *
317 * Get a tag ID for the specified xfer.
318 *
319 * NOTE: Must be called at splbio().
320 */
321 static void
322 scsipi_get_tag(struct scsipi_xfer *xs)
323 {
324 struct scsipi_periph *periph = xs->xs_periph;
325 int bit, tag;
326 u_int word;
327
328 bit = 0; /* XXX gcc */
329 for (word = 0; word < PERIPH_NTAGWORDS; word++) {
330 bit = ffs(periph->periph_freetags[word]);
331 if (bit != 0)
332 break;
333 }
334 #ifdef DIAGNOSTIC
335 if (word == PERIPH_NTAGWORDS) {
336 scsipi_printaddr(periph);
337 printf("no free tags\n");
338 panic("scsipi_get_tag");
339 }
340 #endif
341
342 bit -= 1;
343 periph->periph_freetags[word] &= ~(1 << bit);
344 tag = (word << 5) | bit;
345
346 /* XXX Should eventually disallow this completely. */
347 if (tag >= periph->periph_openings) {
348 scsipi_printaddr(periph);
349 printf("WARNING: tag %d greater than available openings %d\n",
350 tag, periph->periph_openings);
351 }
352
353 xs->xs_tag_id = tag;
354 }
355
356 /*
357 * scsipi_put_tag:
358 *
359 * Put the tag ID for the specified xfer back into the pool.
360 *
361 * NOTE: Must be called at splbio().
362 */
363 static void
364 scsipi_put_tag(struct scsipi_xfer *xs)
365 {
366 struct scsipi_periph *periph = xs->xs_periph;
367 int word, bit;
368
369 word = xs->xs_tag_id >> 5;
370 bit = xs->xs_tag_id & 0x1f;
371
372 periph->periph_freetags[word] |= (1 << bit);
373 }
374
375 /*
376 * scsipi_get_xs:
377 *
378 * Allocate an xfer descriptor and associate it with the
379 * specified peripheral. If the peripheral has no more
380 * available command openings, we either block waiting for
381 * one to become available, or fail.
382 */
383 struct scsipi_xfer *
384 scsipi_get_xs(struct scsipi_periph *periph, int flags)
385 {
386 struct scsipi_xfer *xs;
387 int s;
388
389 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n"));
390
391 KASSERT(!cold);
392
393 #ifdef DIAGNOSTIC
394 /*
395 * URGENT commands can never be ASYNC.
396 */
397 if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
398 (XS_CTL_URGENT|XS_CTL_ASYNC)) {
399 scsipi_printaddr(periph);
400 printf("URGENT and ASYNC\n");
401 panic("scsipi_get_xs");
402 }
403 #endif
404
405 s = splbio();
406 /*
407 * Wait for a command opening to become available. Rules:
408 *
409 * - All xfers must wait for an available opening.
410 * Exception: URGENT xfers can proceed when
411 * active == openings, because we use the opening
412 * of the command we're recovering for.
413 * - if the periph has sense pending, only URGENT & REQSENSE
414 * xfers may proceed.
415 *
416 * - If the periph is recovering, only URGENT xfers may
417 * proceed.
418 *
419 * - If the periph is currently executing a recovery
420 * command, URGENT commands must block, because only
421 * one recovery command can execute at a time.
422 */
423 for (;;) {
424 if (flags & XS_CTL_URGENT) {
425 if (periph->periph_active > periph->periph_openings)
426 goto wait_for_opening;
427 if (periph->periph_flags & PERIPH_SENSE) {
428 if ((flags & XS_CTL_REQSENSE) == 0)
429 goto wait_for_opening;
430 } else {
431 if ((periph->periph_flags &
432 PERIPH_RECOVERY_ACTIVE) != 0)
433 goto wait_for_opening;
434 periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
435 }
436 break;
437 }
438 if (periph->periph_active >= periph->periph_openings ||
439 (periph->periph_flags & PERIPH_RECOVERING) != 0)
440 goto wait_for_opening;
441 periph->periph_active++;
442 break;
443
444 wait_for_opening:
445 if (flags & XS_CTL_NOSLEEP) {
446 splx(s);
447 return (NULL);
448 }
449 SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n"));
450 periph->periph_flags |= PERIPH_WAITING;
451 (void) tsleep(periph, PRIBIO, "getxs", 0);
452 }
453 SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n"));
454 xs = pool_get(&scsipi_xfer_pool,
455 ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
456 if (xs == NULL) {
457 if (flags & XS_CTL_URGENT) {
458 if ((flags & XS_CTL_REQSENSE) == 0)
459 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
460 } else
461 periph->periph_active--;
462 scsipi_printaddr(periph);
463 printf("unable to allocate %sscsipi_xfer\n",
464 (flags & XS_CTL_URGENT) ? "URGENT " : "");
465 }
466 splx(s);
467
468 SC_DEBUG(periph, SCSIPI_DB3, ("returning\n"));
469
470 if (xs != NULL) {
471 memset(xs, 0, sizeof(*xs));
472 callout_init(&xs->xs_callout, 0);
473 xs->xs_periph = periph;
474 xs->xs_control = flags;
475 xs->xs_status = 0;
476 s = splbio();
477 TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
478 splx(s);
479 }
480 return (xs);
481 }
482
483 /*
484 * scsipi_put_xs:
485 *
486 * Release an xfer descriptor, decreasing the outstanding command
487 * count for the peripheral. If there is a thread waiting for
488 * an opening, wake it up. If not, kick any queued I/O the
489 * peripheral may have.
490 *
491 * NOTE: Must be called at splbio().
492 */
493 void
494 scsipi_put_xs(struct scsipi_xfer *xs)
495 {
496 struct scsipi_periph *periph = xs->xs_periph;
497 int flags = xs->xs_control;
498
499 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n"));
500
501 TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
502 callout_destroy(&xs->xs_callout);
503 pool_put(&scsipi_xfer_pool, xs);
504
505 #ifdef DIAGNOSTIC
506 if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
507 periph->periph_active == 0) {
508 scsipi_printaddr(periph);
509 printf("recovery without a command to recovery for\n");
510 panic("scsipi_put_xs");
511 }
512 #endif
513
514 if (flags & XS_CTL_URGENT) {
515 if ((flags & XS_CTL_REQSENSE) == 0)
516 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
517 } else
518 periph->periph_active--;
519 if (periph->periph_active == 0 &&
520 (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
521 periph->periph_flags &= ~PERIPH_WAITDRAIN;
522 wakeup(&periph->periph_active);
523 }
524
525 if (periph->periph_flags & PERIPH_WAITING) {
526 periph->periph_flags &= ~PERIPH_WAITING;
527 wakeup(periph);
528 } else {
529 if (periph->periph_switch->psw_start != NULL &&
530 device_is_active(periph->periph_dev)) {
531 SC_DEBUG(periph, SCSIPI_DB2,
532 ("calling private start()\n"));
533 (*periph->periph_switch->psw_start)(periph);
534 }
535 }
536 }
537
538 /*
539 * scsipi_channel_freeze:
540 *
541 * Freeze a channel's xfer queue.
542 */
543 void
544 scsipi_channel_freeze(struct scsipi_channel *chan, int count)
545 {
546 int s;
547
548 s = splbio();
549 chan->chan_qfreeze += count;
550 splx(s);
551 }
552
553 /*
554 * scsipi_channel_thaw:
555 *
556 * Thaw a channel's xfer queue.
557 */
558 void
559 scsipi_channel_thaw(struct scsipi_channel *chan, int count)
560 {
561 int s;
562
563 s = splbio();
564 chan->chan_qfreeze -= count;
565 /*
566 * Don't let the freeze count go negative.
567 *
568 * Presumably the adapter driver could keep track of this,
569 * but it might just be easier to do this here so as to allow
570 * multiple callers, including those outside the adapter driver.
571 */
572 if (chan->chan_qfreeze < 0) {
573 chan->chan_qfreeze = 0;
574 }
575 splx(s);
576 /*
577 * Kick the channel's queue here. Note, we may be running in
578 * interrupt context (softclock or HBA's interrupt), so the adapter
579 * driver had better not sleep.
580 */
581 if (chan->chan_qfreeze == 0)
582 scsipi_run_queue(chan);
583 }
584
585 /*
586 * scsipi_channel_timed_thaw:
587 *
588 * Thaw a channel after some time has expired. This will also
589 * run the channel's queue if the freeze count has reached 0.
590 */
591 void
592 scsipi_channel_timed_thaw(void *arg)
593 {
594 struct scsipi_channel *chan = arg;
595
596 scsipi_channel_thaw(chan, 1);
597 }
598
599 /*
600 * scsipi_periph_freeze:
601 *
602 * Freeze a device's xfer queue.
603 */
604 void
605 scsipi_periph_freeze(struct scsipi_periph *periph, int count)
606 {
607 int s;
608
609 s = splbio();
610 periph->periph_qfreeze += count;
611 splx(s);
612 }
613
614 /*
615 * scsipi_periph_thaw:
616 *
617 * Thaw a device's xfer queue.
618 */
619 void
620 scsipi_periph_thaw(struct scsipi_periph *periph, int count)
621 {
622 int s;
623
624 s = splbio();
625 periph->periph_qfreeze -= count;
626 #ifdef DIAGNOSTIC
627 if (periph->periph_qfreeze < 0) {
628 static const char pc[] = "periph freeze count < 0";
629 scsipi_printaddr(periph);
630 printf("%s\n", pc);
631 panic(pc);
632 }
633 #endif
634 if (periph->periph_qfreeze == 0 &&
635 (periph->periph_flags & PERIPH_WAITING) != 0)
636 wakeup(periph);
637 splx(s);
638 }
639
640 /*
641 * scsipi_periph_timed_thaw:
642 *
643 * Thaw a device after some time has expired.
644 */
645 void
646 scsipi_periph_timed_thaw(void *arg)
647 {
648 int s;
649 struct scsipi_periph *periph = arg;
650
651 callout_stop(&periph->periph_callout);
652
653 s = splbio();
654 scsipi_periph_thaw(periph, 1);
655 if ((periph->periph_channel->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
656 /*
657 * Kick the channel's queue here. Note, we're running in
658 * interrupt context (softclock), so the adapter driver
659 * had better not sleep.
660 */
661 scsipi_run_queue(periph->periph_channel);
662 } else {
663 /*
664 * Tell the completion thread to kick the channel's queue here.
665 */
666 periph->periph_channel->chan_tflags |= SCSIPI_CHANT_KICK;
667 wakeup(&periph->periph_channel->chan_complete);
668 }
669 splx(s);
670 }
671
672 /*
673 * scsipi_wait_drain:
674 *
675 * Wait for a periph's pending xfers to drain.
676 */
677 void
678 scsipi_wait_drain(struct scsipi_periph *periph)
679 {
680 int s;
681
682 s = splbio();
683 while (periph->periph_active != 0) {
684 periph->periph_flags |= PERIPH_WAITDRAIN;
685 (void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0);
686 }
687 splx(s);
688 }
689
690 /*
691 * scsipi_kill_pending:
692 *
693 * Kill off all pending xfers for a periph.
694 *
695 * NOTE: Must be called at splbio().
696 */
697 void
698 scsipi_kill_pending(struct scsipi_periph *periph)
699 {
700
701 (*periph->periph_channel->chan_bustype->bustype_kill_pending)(periph);
702 scsipi_wait_drain(periph);
703 }
704
705 /*
706 * scsipi_print_cdb:
707 * prints a command descriptor block (for debug purpose, error messages,
708 * SCSIVERBOSE, ...)
709 */
710 void
711 scsipi_print_cdb(struct scsipi_generic *cmd)
712 {
713 int i, j;
714
715 printf("0x%02x", cmd->opcode);
716
717 switch (CDB_GROUPID(cmd->opcode)) {
718 case CDB_GROUPID_0:
719 j = CDB_GROUP0;
720 break;
721 case CDB_GROUPID_1:
722 j = CDB_GROUP1;
723 break;
724 case CDB_GROUPID_2:
725 j = CDB_GROUP2;
726 break;
727 case CDB_GROUPID_3:
728 j = CDB_GROUP3;
729 break;
730 case CDB_GROUPID_4:
731 j = CDB_GROUP4;
732 break;
733 case CDB_GROUPID_5:
734 j = CDB_GROUP5;
735 break;
736 case CDB_GROUPID_6:
737 j = CDB_GROUP6;
738 break;
739 case CDB_GROUPID_7:
740 j = CDB_GROUP7;
741 break;
742 default:
743 j = 0;
744 }
745 if (j == 0)
746 j = sizeof (cmd->bytes);
747 for (i = 0; i < j-1; i++) /* already done the opcode */
748 printf(" %02x", cmd->bytes[i]);
749 }
750
751 /*
752 * scsipi_interpret_sense:
753 *
754 * Look at the returned sense and act on the error, determining
755 * the unix error number to pass back. (0 = report no error)
756 *
757 * NOTE: If we return ERESTART, we are expected to haved
758 * thawed the device!
759 *
760 * THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
761 */
762 int
763 scsipi_interpret_sense(struct scsipi_xfer *xs)
764 {
765 struct scsi_sense_data *sense;
766 struct scsipi_periph *periph = xs->xs_periph;
767 u_int8_t key;
768 int error;
769 u_int32_t info;
770 static const char *error_mes[] = {
771 "soft error (corrected)",
772 "not ready", "medium error",
773 "non-media hardware failure", "illegal request",
774 "unit attention", "readonly device",
775 "no data found", "vendor unique",
776 "copy aborted", "command aborted",
777 "search returned equal", "volume overflow",
778 "verify miscompare", "unknown error key"
779 };
780
781 sense = &xs->sense.scsi_sense;
782 #ifdef SCSIPI_DEBUG
783 if (periph->periph_flags & SCSIPI_DB1) {
784 int count;
785 scsipi_printaddr(periph);
786 printf(" sense debug information:\n");
787 printf("\tcode 0x%x valid %d\n",
788 SSD_RCODE(sense->response_code),
789 sense->response_code & SSD_RCODE_VALID ? 1 : 0);
790 printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
791 sense->segment,
792 SSD_SENSE_KEY(sense->flags),
793 sense->flags & SSD_ILI ? 1 : 0,
794 sense->flags & SSD_EOM ? 1 : 0,
795 sense->flags & SSD_FILEMARK ? 1 : 0);
796 printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d "
797 "extra bytes\n",
798 sense->info[0],
799 sense->info[1],
800 sense->info[2],
801 sense->info[3],
802 sense->extra_len);
803 printf("\textra: ");
804 for (count = 0; count < SSD_ADD_BYTES_LIM(sense); count++)
805 printf("0x%x ", sense->csi[count]);
806 printf("\n");
807 }
808 #endif
809
810 /*
811 * If the periph has it's own error handler, call it first.
812 * If it returns a legit error value, return that, otherwise
813 * it wants us to continue with normal error processing.
814 */
815 if (periph->periph_switch->psw_error != NULL) {
816 SC_DEBUG(periph, SCSIPI_DB2,
817 ("calling private err_handler()\n"));
818 error = (*periph->periph_switch->psw_error)(xs);
819 if (error != EJUSTRETURN)
820 return (error);
821 }
822 /* otherwise use the default */
823 switch (SSD_RCODE(sense->response_code)) {
824
825 /*
826 * Old SCSI-1 and SASI devices respond with
827 * codes other than 70.
828 */
829 case 0x00: /* no error (command completed OK) */
830 return (0);
831 case 0x04: /* drive not ready after it was selected */
832 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
833 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
834 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
835 return (0);
836 /* XXX - display some sort of error here? */
837 return (EIO);
838 case 0x20: /* invalid command */
839 if ((xs->xs_control &
840 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
841 return (0);
842 return (EINVAL);
843 case 0x25: /* invalid LUN (Adaptec ACB-4000) */
844 return (EACCES);
845
846 /*
847 * If it's code 70, use the extended stuff and
848 * interpret the key
849 */
850 case 0x71: /* delayed error */
851 scsipi_printaddr(periph);
852 key = SSD_SENSE_KEY(sense->flags);
853 printf(" DEFERRED ERROR, key = 0x%x\n", key);
854 /* FALLTHROUGH */
855 case 0x70:
856 if ((sense->response_code & SSD_RCODE_VALID) != 0)
857 info = _4btol(sense->info);
858 else
859 info = 0;
860 key = SSD_SENSE_KEY(sense->flags);
861
862 switch (key) {
863 case SKEY_NO_SENSE:
864 case SKEY_RECOVERED_ERROR:
865 if (xs->resid == xs->datalen && xs->datalen) {
866 /*
867 * Why is this here?
868 */
869 xs->resid = 0; /* not short read */
870 }
871 case SKEY_EQUAL:
872 error = 0;
873 break;
874 case SKEY_NOT_READY:
875 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
876 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
877 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
878 return (0);
879 if (sense->asc == 0x3A) {
880 error = ENODEV; /* Medium not present */
881 if (xs->xs_control & XS_CTL_SILENT_NODEV)
882 return (error);
883 } else
884 error = EIO;
885 if ((xs->xs_control & XS_CTL_SILENT) != 0)
886 return (error);
887 break;
888 case SKEY_ILLEGAL_REQUEST:
889 if ((xs->xs_control &
890 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
891 return (0);
892 /*
893 * Handle the case where a device reports
894 * Logical Unit Not Supported during discovery.
895 */
896 if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
897 sense->asc == 0x25 &&
898 sense->ascq == 0x00)
899 return (EINVAL);
900 if ((xs->xs_control & XS_CTL_SILENT) != 0)
901 return (EIO);
902 error = EINVAL;
903 break;
904 case SKEY_UNIT_ATTENTION:
905 if (sense->asc == 0x29 &&
906 sense->ascq == 0x00) {
907 /* device or bus reset */
908 return (ERESTART);
909 }
910 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
911 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
912 if ((xs->xs_control &
913 XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
914 /* XXX Should reupload any transient state. */
915 (periph->periph_flags &
916 PERIPH_REMOVABLE) == 0) {
917 return (ERESTART);
918 }
919 if ((xs->xs_control & XS_CTL_SILENT) != 0)
920 return (EIO);
921 error = EIO;
922 break;
923 case SKEY_DATA_PROTECT:
924 error = EROFS;
925 break;
926 case SKEY_BLANK_CHECK:
927 error = 0;
928 break;
929 case SKEY_ABORTED_COMMAND:
930 if (xs->xs_retries != 0) {
931 xs->xs_retries--;
932 error = ERESTART;
933 } else
934 error = EIO;
935 break;
936 case SKEY_VOLUME_OVERFLOW:
937 error = ENOSPC;
938 break;
939 default:
940 error = EIO;
941 break;
942 }
943
944 /* Print verbose decode if appropriate and possible */
945 if ((key == 0) ||
946 ((xs->xs_control & XS_CTL_SILENT) != 0) ||
947 (scsipi_print_sense(xs, 0) != 0))
948 return (error);
949
950 /* Print brief(er) sense information */
951 scsipi_printaddr(periph);
952 printf("%s", error_mes[key - 1]);
953 if ((sense->response_code & SSD_RCODE_VALID) != 0) {
954 switch (key) {
955 case SKEY_NOT_READY:
956 case SKEY_ILLEGAL_REQUEST:
957 case SKEY_UNIT_ATTENTION:
958 case SKEY_DATA_PROTECT:
959 break;
960 case SKEY_BLANK_CHECK:
961 printf(", requested size: %d (decimal)",
962 info);
963 break;
964 case SKEY_ABORTED_COMMAND:
965 if (xs->xs_retries)
966 printf(", retrying");
967 printf(", cmd 0x%x, info 0x%x",
968 xs->cmd->opcode, info);
969 break;
970 default:
971 printf(", info = %d (decimal)", info);
972 }
973 }
974 if (sense->extra_len != 0) {
975 int n;
976 printf(", data =");
977 for (n = 0; n < sense->extra_len; n++)
978 printf(" %02x",
979 sense->csi[n]);
980 }
981 printf("\n");
982 return (error);
983
984 /*
985 * Some other code, just report it
986 */
987 default:
988 #if defined(SCSIDEBUG) || defined(DEBUG)
989 {
990 static const char *uc = "undecodable sense error";
991 int i;
992 u_int8_t *cptr = (u_int8_t *) sense;
993 scsipi_printaddr(periph);
994 if (xs->cmd == &xs->cmdstore) {
995 printf("%s for opcode 0x%x, data=",
996 uc, xs->cmdstore.opcode);
997 } else {
998 printf("%s, data=", uc);
999 }
1000 for (i = 0; i < sizeof (sense); i++)
1001 printf(" 0x%02x", *(cptr++) & 0xff);
1002 printf("\n");
1003 }
1004 #else
1005 scsipi_printaddr(periph);
1006 printf("Sense Error Code 0x%x",
1007 SSD_RCODE(sense->response_code));
1008 if ((sense->response_code & SSD_RCODE_VALID) != 0) {
1009 struct scsi_sense_data_unextended *usense =
1010 (struct scsi_sense_data_unextended *)sense;
1011 printf(" at block no. %d (decimal)",
1012 _3btol(usense->block));
1013 }
1014 printf("\n");
1015 #endif
1016 return (EIO);
1017 }
1018 }
1019
1020 /*
1021 * scsipi_test_unit_ready:
1022 *
1023 * Issue a `test unit ready' request.
1024 */
1025 int
1026 scsipi_test_unit_ready(struct scsipi_periph *periph, int flags)
1027 {
1028 struct scsi_test_unit_ready cmd;
1029 int retries;
1030
1031 /* some ATAPI drives don't support TEST UNIT READY. Sigh */
1032 if (periph->periph_quirks & PQUIRK_NOTUR)
1033 return (0);
1034
1035 if (flags & XS_CTL_DISCOVERY)
1036 retries = 0;
1037 else
1038 retries = SCSIPIRETRIES;
1039
1040 memset(&cmd, 0, sizeof(cmd));
1041 cmd.opcode = SCSI_TEST_UNIT_READY;
1042
1043 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
1044 retries, 10000, NULL, flags));
1045 }
1046
1047 /*
1048 * scsipi_inquire:
1049 *
1050 * Ask the device about itself.
1051 */
1052 int
1053 scsipi_inquire(struct scsipi_periph *periph, struct scsipi_inquiry_data *inqbuf,
1054 int flags)
1055 {
1056 struct scsipi_inquiry cmd;
1057 int error;
1058 int retries;
1059
1060 if (flags & XS_CTL_DISCOVERY)
1061 retries = 0;
1062 else
1063 retries = SCSIPIRETRIES;
1064
1065 /*
1066 * If we request more data than the device can provide, it SHOULD just
1067 * return a short response. However, some devices error with an
1068 * ILLEGAL REQUEST sense code, and yet others have even more special
1069 * failture modes (such as the GL641USB flash adapter, which goes loony
1070 * and sends corrupted CRCs). To work around this, and to bring our
1071 * behavior more in line with other OSes, we do a shorter inquiry,
1072 * covering all the SCSI-2 information, first, and then request more
1073 * data iff the "additional length" field indicates there is more.
1074 * - mycroft, 2003/10/16
1075 */
1076 memset(&cmd, 0, sizeof(cmd));
1077 cmd.opcode = INQUIRY;
1078 cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI2;
1079 error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1080 (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI2, retries,
1081 10000, NULL, flags | XS_CTL_DATA_IN);
1082 if (!error &&
1083 inqbuf->additional_length > SCSIPI_INQUIRY_LENGTH_SCSI2 - 4) {
1084 if (inqbuf->additional_length <= SCSIPI_INQUIRY_LENGTH_SCSI3 - 4) {
1085 #if 0
1086 printf("inquire: addlen=%d, retrying\n", inqbuf->additional_length);
1087 #endif
1088 cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI3;
1089 error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1090 (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI3, retries,
1091 10000, NULL, flags | XS_CTL_DATA_IN);
1092 #if 0
1093 printf("inquire: error=%d\n", error);
1094 #endif
1095 #if 1
1096 } else {
1097 printf("inquire: addlen=%d, not retrying\n", inqbuf->additional_length);
1098 #endif
1099 }
1100 }
1101
1102 #ifdef SCSI_OLD_NOINQUIRY
1103 /*
1104 * Kludge for the Adaptec ACB-4000 SCSI->MFM translator.
1105 * This board doesn't support the INQUIRY command at all.
1106 */
1107 if (error == EINVAL || error == EACCES) {
1108 /*
1109 * Conjure up an INQUIRY response.
1110 */
1111 inqbuf->device = (error == EINVAL ?
1112 SID_QUAL_LU_PRESENT :
1113 SID_QUAL_LU_NOTPRESENT) | T_DIRECT;
1114 inqbuf->dev_qual2 = 0;
1115 inqbuf->version = 0;
1116 inqbuf->response_format = SID_FORMAT_SCSI1;
1117 inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
1118 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1119 memcpy(inqbuf->vendor, "ADAPTEC ACB-4000 ", 28);
1120 error = 0;
1121 }
1122
1123 /*
1124 * Kludge for the Emulex MT-02 SCSI->QIC translator.
1125 * This board gives an empty response to an INQUIRY command.
1126 */
1127 else if (error == 0 &&
1128 inqbuf->device == (SID_QUAL_LU_PRESENT | T_DIRECT) &&
1129 inqbuf->dev_qual2 == 0 &&
1130 inqbuf->version == 0 &&
1131 inqbuf->response_format == SID_FORMAT_SCSI1) {
1132 /*
1133 * Fill out the INQUIRY response.
1134 */
1135 inqbuf->device = (SID_QUAL_LU_PRESENT | T_SEQUENTIAL);
1136 inqbuf->dev_qual2 = SID_REMOVABLE;
1137 inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
1138 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1139 memcpy(inqbuf->vendor, "EMULEX MT-02 QIC ", 28);
1140 }
1141 #endif /* SCSI_OLD_NOINQUIRY */
1142
1143 return error;
1144 }
1145
1146 /*
1147 * scsipi_prevent:
1148 *
1149 * Prevent or allow the user to remove the media
1150 */
1151 int
1152 scsipi_prevent(struct scsipi_periph *periph, int type, int flags)
1153 {
1154 struct scsi_prevent_allow_medium_removal cmd;
1155
1156 if (periph->periph_quirks & PQUIRK_NODOORLOCK)
1157 return 0;
1158
1159 memset(&cmd, 0, sizeof(cmd));
1160 cmd.opcode = SCSI_PREVENT_ALLOW_MEDIUM_REMOVAL;
1161 cmd.how = type;
1162
1163 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
1164 SCSIPIRETRIES, 5000, NULL, flags));
1165 }
1166
1167 /*
1168 * scsipi_start:
1169 *
1170 * Send a START UNIT.
1171 */
1172 int
1173 scsipi_start(struct scsipi_periph *periph, int type, int flags)
1174 {
1175 struct scsipi_start_stop cmd;
1176
1177 memset(&cmd, 0, sizeof(cmd));
1178 cmd.opcode = START_STOP;
1179 cmd.byte2 = 0x00;
1180 cmd.how = type;
1181
1182 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
1183 SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000, NULL, flags));
1184 }
1185
1186 /*
1187 * scsipi_mode_sense, scsipi_mode_sense_big:
1188 * get a sense page from a device
1189 */
1190
1191 int
1192 scsipi_mode_sense(struct scsipi_periph *periph, int byte2, int page,
1193 struct scsi_mode_parameter_header_6 *data, int len, int flags, int retries,
1194 int timeout)
1195 {
1196 struct scsi_mode_sense_6 cmd;
1197
1198 memset(&cmd, 0, sizeof(cmd));
1199 cmd.opcode = SCSI_MODE_SENSE_6;
1200 cmd.byte2 = byte2;
1201 cmd.page = page;
1202 cmd.length = len & 0xff;
1203
1204 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1205 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN));
1206 }
1207
1208 int
1209 scsipi_mode_sense_big(struct scsipi_periph *periph, int byte2, int page,
1210 struct scsi_mode_parameter_header_10 *data, int len, int flags, int retries,
1211 int timeout)
1212 {
1213 struct scsi_mode_sense_10 cmd;
1214
1215 memset(&cmd, 0, sizeof(cmd));
1216 cmd.opcode = SCSI_MODE_SENSE_10;
1217 cmd.byte2 = byte2;
1218 cmd.page = page;
1219 _lto2b(len, cmd.length);
1220
1221 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1222 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN));
1223 }
1224
1225 int
1226 scsipi_mode_select(struct scsipi_periph *periph, int byte2,
1227 struct scsi_mode_parameter_header_6 *data, int len, int flags, int retries,
1228 int timeout)
1229 {
1230 struct scsi_mode_select_6 cmd;
1231
1232 memset(&cmd, 0, sizeof(cmd));
1233 cmd.opcode = SCSI_MODE_SELECT_6;
1234 cmd.byte2 = byte2;
1235 cmd.length = len & 0xff;
1236
1237 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1238 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT));
1239 }
1240
1241 int
1242 scsipi_mode_select_big(struct scsipi_periph *periph, int byte2,
1243 struct scsi_mode_parameter_header_10 *data, int len, int flags, int retries,
1244 int timeout)
1245 {
1246 struct scsi_mode_select_10 cmd;
1247
1248 memset(&cmd, 0, sizeof(cmd));
1249 cmd.opcode = SCSI_MODE_SELECT_10;
1250 cmd.byte2 = byte2;
1251 _lto2b(len, cmd.length);
1252
1253 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1254 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT));
1255 }
1256
1257 /*
1258 * scsipi_done:
1259 *
1260 * This routine is called by an adapter's interrupt handler when
1261 * an xfer is completed.
1262 */
1263 void
1264 scsipi_done(struct scsipi_xfer *xs)
1265 {
1266 struct scsipi_periph *periph = xs->xs_periph;
1267 struct scsipi_channel *chan = periph->periph_channel;
1268 int s, freezecnt;
1269
1270 KASSERT(cold || KERNEL_LOCKED_P());
1271
1272 SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n"));
1273 #ifdef SCSIPI_DEBUG
1274 if (periph->periph_dbflags & SCSIPI_DB1)
1275 show_scsipi_cmd(xs);
1276 #endif
1277
1278 s = splbio();
1279 /*
1280 * The resource this command was using is now free.
1281 */
1282 if (xs->xs_status & XS_STS_DONE) {
1283 /* XXX in certain circumstances, such as a device
1284 * being detached, a xs that has already been
1285 * scsipi_done()'d by the main thread will be done'd
1286 * again by scsibusdetach(). Putting the xs on the
1287 * chan_complete queue causes list corruption and
1288 * everyone dies. This prevents that, but perhaps
1289 * there should be better coordination somewhere such
1290 * that this won't ever happen (and can be turned into
1291 * a KASSERT().
1292 */
1293 splx(s);
1294 goto out;
1295 }
1296 scsipi_put_resource(chan);
1297 xs->xs_periph->periph_sent--;
1298
1299 /*
1300 * If the command was tagged, free the tag.
1301 */
1302 if (XS_CTL_TAGTYPE(xs) != 0)
1303 scsipi_put_tag(xs);
1304 else
1305 periph->periph_flags &= ~PERIPH_UNTAG;
1306
1307 /* Mark the command as `done'. */
1308 xs->xs_status |= XS_STS_DONE;
1309
1310 #ifdef DIAGNOSTIC
1311 if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
1312 (XS_CTL_ASYNC|XS_CTL_POLL))
1313 panic("scsipi_done: ASYNC and POLL");
1314 #endif
1315
1316 /*
1317 * If the xfer had an error of any sort, freeze the
1318 * periph's queue. Freeze it again if we were requested
1319 * to do so in the xfer.
1320 */
1321 freezecnt = 0;
1322 if (xs->error != XS_NOERROR)
1323 freezecnt++;
1324 if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
1325 freezecnt++;
1326 if (freezecnt != 0)
1327 scsipi_periph_freeze(periph, freezecnt);
1328
1329 /*
1330 * record the xfer with a pending sense, in case a SCSI reset is
1331 * received before the thread is waked up.
1332 */
1333 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1334 periph->periph_flags |= PERIPH_SENSE;
1335 periph->periph_xscheck = xs;
1336 }
1337
1338 /*
1339 * If this was an xfer that was not to complete asynchronously,
1340 * let the requesting thread perform error checking/handling
1341 * in its context.
1342 */
1343 if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
1344 splx(s);
1345 /*
1346 * If it's a polling job, just return, to unwind the
1347 * call graph. We don't need to restart the queue,
1348 * because pollings jobs are treated specially, and
1349 * are really only used during crash dumps anyway
1350 * (XXX or during boot-time autconfiguration of
1351 * ATAPI devices).
1352 */
1353 if (xs->xs_control & XS_CTL_POLL)
1354 return;
1355 wakeup(xs);
1356 goto out;
1357 }
1358
1359 /*
1360 * Catch the extremely common case of I/O completing
1361 * without error; no use in taking a context switch
1362 * if we can handle it in interrupt context.
1363 */
1364 if (xs->error == XS_NOERROR) {
1365 splx(s);
1366 (void) scsipi_complete(xs);
1367 goto out;
1368 }
1369
1370 /*
1371 * There is an error on this xfer. Put it on the channel's
1372 * completion queue, and wake up the completion thread.
1373 */
1374 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
1375 splx(s);
1376 wakeup(&chan->chan_complete);
1377
1378 out:
1379 /*
1380 * If there are more xfers on the channel's queue, attempt to
1381 * run them.
1382 */
1383 scsipi_run_queue(chan);
1384 }
1385
1386 /*
1387 * scsipi_complete:
1388 *
1389 * Completion of a scsipi_xfer. This is the guts of scsipi_done().
1390 *
1391 * NOTE: This routine MUST be called with valid thread context
1392 * except for the case where the following two conditions are
1393 * true:
1394 *
1395 * xs->error == XS_NOERROR
1396 * XS_CTL_ASYNC is set in xs->xs_control
1397 *
1398 * The semantics of this routine can be tricky, so here is an
1399 * explanation:
1400 *
1401 * 0 Xfer completed successfully.
1402 *
1403 * ERESTART Xfer had an error, but was restarted.
1404 *
1405 * anything else Xfer had an error, return value is Unix
1406 * errno.
1407 *
1408 * If the return value is anything but ERESTART:
1409 *
1410 * - If XS_CTL_ASYNC is set, `xs' has been freed back to
1411 * the pool.
1412 * - If there is a buf associated with the xfer,
1413 * it has been biodone()'d.
1414 */
1415 static int
1416 scsipi_complete(struct scsipi_xfer *xs)
1417 {
1418 struct scsipi_periph *periph = xs->xs_periph;
1419 struct scsipi_channel *chan = periph->periph_channel;
1420 int error, s;
1421
1422 #ifdef DIAGNOSTIC
1423 if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
1424 panic("scsipi_complete: XS_CTL_ASYNC but no buf");
1425 #endif
1426 /*
1427 * If command terminated with a CHECK CONDITION, we need to issue a
1428 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed
1429 * we'll have the real status.
1430 * Must be processed at splbio() to avoid missing a SCSI bus reset
1431 * for this command.
1432 */
1433 s = splbio();
1434 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1435 /* request sense for a request sense ? */
1436 if (xs->xs_control & XS_CTL_REQSENSE) {
1437 scsipi_printaddr(periph);
1438 printf("request sense for a request sense ?\n");
1439 /* XXX maybe we should reset the device ? */
1440 /* we've been frozen because xs->error != XS_NOERROR */
1441 scsipi_periph_thaw(periph, 1);
1442 splx(s);
1443 if (xs->resid < xs->datalen) {
1444 printf("we read %d bytes of sense anyway:\n",
1445 xs->datalen - xs->resid);
1446 scsipi_print_sense_data((void *)xs->data, 0);
1447 }
1448 return EINVAL;
1449 }
1450 scsipi_request_sense(xs);
1451 }
1452 splx(s);
1453
1454 /*
1455 * If it's a user level request, bypass all usual completion
1456 * processing, let the user work it out..
1457 */
1458 if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
1459 SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n"));
1460 if (xs->error != XS_NOERROR)
1461 scsipi_periph_thaw(periph, 1);
1462 scsipi_user_done(xs);
1463 SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n "));
1464 return 0;
1465 }
1466
1467 switch (xs->error) {
1468 case XS_NOERROR:
1469 error = 0;
1470 break;
1471
1472 case XS_SENSE:
1473 case XS_SHORTSENSE:
1474 error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
1475 break;
1476
1477 case XS_RESOURCE_SHORTAGE:
1478 /*
1479 * XXX Should freeze channel's queue.
1480 */
1481 scsipi_printaddr(periph);
1482 printf("adapter resource shortage\n");
1483 /* FALLTHROUGH */
1484
1485 case XS_BUSY:
1486 if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
1487 struct scsipi_max_openings mo;
1488
1489 /*
1490 * We set the openings to active - 1, assuming that
1491 * the command that got us here is the first one that
1492 * can't fit into the device's queue. If that's not
1493 * the case, I guess we'll find out soon enough.
1494 */
1495 mo.mo_target = periph->periph_target;
1496 mo.mo_lun = periph->periph_lun;
1497 if (periph->periph_active < periph->periph_openings)
1498 mo.mo_openings = periph->periph_active - 1;
1499 else
1500 mo.mo_openings = periph->periph_openings - 1;
1501 #ifdef DIAGNOSTIC
1502 if (mo.mo_openings < 0) {
1503 scsipi_printaddr(periph);
1504 printf("QUEUE FULL resulted in < 0 openings\n");
1505 panic("scsipi_done");
1506 }
1507 #endif
1508 if (mo.mo_openings == 0) {
1509 scsipi_printaddr(periph);
1510 printf("QUEUE FULL resulted in 0 openings\n");
1511 mo.mo_openings = 1;
1512 }
1513 scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
1514 error = ERESTART;
1515 } else if (xs->xs_retries != 0) {
1516 xs->xs_retries--;
1517 /*
1518 * Wait one second, and try again.
1519 */
1520 if ((xs->xs_control & XS_CTL_POLL) ||
1521 (chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
1522 /* XXX: quite extreme */
1523 kpause("xsbusy", false, hz, NULL);
1524 } else if (!callout_pending(&periph->periph_callout)) {
1525 scsipi_periph_freeze(periph, 1);
1526 callout_reset(&periph->periph_callout,
1527 hz, scsipi_periph_timed_thaw, periph);
1528 }
1529 error = ERESTART;
1530 } else
1531 error = EBUSY;
1532 break;
1533
1534 case XS_REQUEUE:
1535 error = ERESTART;
1536 break;
1537
1538 case XS_SELTIMEOUT:
1539 case XS_TIMEOUT:
1540 /*
1541 * If the device hasn't gone away, honor retry counts.
1542 *
1543 * Note that if we're in the middle of probing it,
1544 * it won't be found because it isn't here yet so
1545 * we won't honor the retry count in that case.
1546 */
1547 if (scsipi_lookup_periph(chan, periph->periph_target,
1548 periph->periph_lun) && xs->xs_retries != 0) {
1549 xs->xs_retries--;
1550 error = ERESTART;
1551 } else
1552 error = EIO;
1553 break;
1554
1555 case XS_RESET:
1556 if (xs->xs_control & XS_CTL_REQSENSE) {
1557 /*
1558 * request sense interrupted by reset: signal it
1559 * with EINTR return code.
1560 */
1561 error = EINTR;
1562 } else {
1563 if (xs->xs_retries != 0) {
1564 xs->xs_retries--;
1565 error = ERESTART;
1566 } else
1567 error = EIO;
1568 }
1569 break;
1570
1571 case XS_DRIVER_STUFFUP:
1572 scsipi_printaddr(periph);
1573 printf("generic HBA error\n");
1574 error = EIO;
1575 break;
1576 default:
1577 scsipi_printaddr(periph);
1578 printf("invalid return code from adapter: %d\n", xs->error);
1579 error = EIO;
1580 break;
1581 }
1582
1583 s = splbio();
1584 if (error == ERESTART) {
1585 /*
1586 * If we get here, the periph has been thawed and frozen
1587 * again if we had to issue recovery commands. Alternatively,
1588 * it may have been frozen again and in a timed thaw. In
1589 * any case, we thaw the periph once we re-enqueue the
1590 * command. Once the periph is fully thawed, it will begin
1591 * operation again.
1592 */
1593 xs->error = XS_NOERROR;
1594 xs->status = SCSI_OK;
1595 xs->xs_status &= ~XS_STS_DONE;
1596 xs->xs_requeuecnt++;
1597 error = scsipi_enqueue(xs);
1598 if (error == 0) {
1599 scsipi_periph_thaw(periph, 1);
1600 splx(s);
1601 return (ERESTART);
1602 }
1603 }
1604
1605 /*
1606 * scsipi_done() freezes the queue if not XS_NOERROR.
1607 * Thaw it here.
1608 */
1609 if (xs->error != XS_NOERROR)
1610 scsipi_periph_thaw(periph, 1);
1611
1612 if (periph->periph_switch->psw_done)
1613 periph->periph_switch->psw_done(xs, error);
1614
1615 if (xs->xs_control & XS_CTL_ASYNC)
1616 scsipi_put_xs(xs);
1617 splx(s);
1618
1619 return (error);
1620 }
1621
1622 /*
1623 * Issue a request sense for the given scsipi_xfer. Called when the xfer
1624 * returns with a CHECK_CONDITION status. Must be called in valid thread
1625 * context and at splbio().
1626 */
1627
1628 static void
1629 scsipi_request_sense(struct scsipi_xfer *xs)
1630 {
1631 struct scsipi_periph *periph = xs->xs_periph;
1632 int flags, error;
1633 struct scsi_request_sense cmd;
1634
1635 periph->periph_flags |= PERIPH_SENSE;
1636
1637 /* if command was polling, request sense will too */
1638 flags = xs->xs_control & XS_CTL_POLL;
1639 /* Polling commands can't sleep */
1640 if (flags)
1641 flags |= XS_CTL_NOSLEEP;
1642
1643 flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN |
1644 XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH;
1645
1646 memset(&cmd, 0, sizeof(cmd));
1647 cmd.opcode = SCSI_REQUEST_SENSE;
1648 cmd.length = sizeof(struct scsi_sense_data);
1649
1650 error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1651 (void *)&xs->sense.scsi_sense, sizeof(struct scsi_sense_data),
1652 0, 1000, NULL, flags);
1653 periph->periph_flags &= ~PERIPH_SENSE;
1654 periph->periph_xscheck = NULL;
1655 switch (error) {
1656 case 0:
1657 /* we have a valid sense */
1658 xs->error = XS_SENSE;
1659 return;
1660 case EINTR:
1661 /* REQUEST_SENSE interrupted by bus reset. */
1662 xs->error = XS_RESET;
1663 return;
1664 case EIO:
1665 /* request sense coudn't be performed */
1666 /*
1667 * XXX this isn't quite right but we don't have anything
1668 * better for now
1669 */
1670 xs->error = XS_DRIVER_STUFFUP;
1671 return;
1672 default:
1673 /* Notify that request sense failed. */
1674 xs->error = XS_DRIVER_STUFFUP;
1675 scsipi_printaddr(periph);
1676 printf("request sense failed with error %d\n", error);
1677 return;
1678 }
1679 }
1680
1681 /*
1682 * scsipi_enqueue:
1683 *
1684 * Enqueue an xfer on a channel.
1685 */
1686 static int
1687 scsipi_enqueue(struct scsipi_xfer *xs)
1688 {
1689 struct scsipi_channel *chan = xs->xs_periph->periph_channel;
1690 struct scsipi_xfer *qxs;
1691 int s;
1692
1693 s = splbio();
1694
1695 /*
1696 * If the xfer is to be polled, and there are already jobs on
1697 * the queue, we can't proceed.
1698 */
1699 if ((xs->xs_control & XS_CTL_POLL) != 0 &&
1700 TAILQ_FIRST(&chan->chan_queue) != NULL) {
1701 splx(s);
1702 xs->error = XS_DRIVER_STUFFUP;
1703 return (EAGAIN);
1704 }
1705
1706 /*
1707 * If we have an URGENT xfer, it's an error recovery command
1708 * and it should just go on the head of the channel's queue.
1709 */
1710 if (xs->xs_control & XS_CTL_URGENT) {
1711 TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
1712 goto out;
1713 }
1714
1715 /*
1716 * If this xfer has already been on the queue before, we
1717 * need to reinsert it in the correct order. That order is:
1718 *
1719 * Immediately before the first xfer for this periph
1720 * with a requeuecnt less than xs->xs_requeuecnt.
1721 *
1722 * Failing that, at the end of the queue. (We'll end up
1723 * there naturally.)
1724 */
1725 if (xs->xs_requeuecnt != 0) {
1726 for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
1727 qxs = TAILQ_NEXT(qxs, channel_q)) {
1728 if (qxs->xs_periph == xs->xs_periph &&
1729 qxs->xs_requeuecnt < xs->xs_requeuecnt)
1730 break;
1731 }
1732 if (qxs != NULL) {
1733 TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
1734 channel_q);
1735 goto out;
1736 }
1737 }
1738 TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
1739 out:
1740 if (xs->xs_control & XS_CTL_THAW_PERIPH)
1741 scsipi_periph_thaw(xs->xs_periph, 1);
1742 splx(s);
1743 return (0);
1744 }
1745
1746 /*
1747 * scsipi_run_queue:
1748 *
1749 * Start as many xfers as possible running on the channel.
1750 */
1751 static void
1752 scsipi_run_queue(struct scsipi_channel *chan)
1753 {
1754 struct scsipi_xfer *xs;
1755 struct scsipi_periph *periph;
1756 int s;
1757
1758 for (;;) {
1759 s = splbio();
1760
1761 /*
1762 * If the channel is frozen, we can't do any work right
1763 * now.
1764 */
1765 if (chan->chan_qfreeze != 0) {
1766 splx(s);
1767 return;
1768 }
1769
1770 /*
1771 * Look for work to do, and make sure we can do it.
1772 */
1773 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
1774 xs = TAILQ_NEXT(xs, channel_q)) {
1775 periph = xs->xs_periph;
1776
1777 if ((periph->periph_sent >= periph->periph_openings) ||
1778 periph->periph_qfreeze != 0 ||
1779 (periph->periph_flags & PERIPH_UNTAG) != 0)
1780 continue;
1781
1782 if ((periph->periph_flags &
1783 (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 &&
1784 (xs->xs_control & XS_CTL_URGENT) == 0)
1785 continue;
1786
1787 /*
1788 * We can issue this xfer!
1789 */
1790 goto got_one;
1791 }
1792
1793 /*
1794 * Can't find any work to do right now.
1795 */
1796 splx(s);
1797 return;
1798
1799 got_one:
1800 /*
1801 * Have an xfer to run. Allocate a resource from
1802 * the adapter to run it. If we can't allocate that
1803 * resource, we don't dequeue the xfer.
1804 */
1805 if (scsipi_get_resource(chan) == 0) {
1806 /*
1807 * Adapter is out of resources. If the adapter
1808 * supports it, attempt to grow them.
1809 */
1810 if (scsipi_grow_resources(chan) == 0) {
1811 /*
1812 * Wasn't able to grow resources,
1813 * nothing more we can do.
1814 */
1815 if (xs->xs_control & XS_CTL_POLL) {
1816 scsipi_printaddr(xs->xs_periph);
1817 printf("polling command but no "
1818 "adapter resources");
1819 /* We'll panic shortly... */
1820 }
1821 splx(s);
1822
1823 /*
1824 * XXX: We should be able to note that
1825 * XXX: that resources are needed here!
1826 */
1827 return;
1828 }
1829 /*
1830 * scsipi_grow_resources() allocated the resource
1831 * for us.
1832 */
1833 }
1834
1835 /*
1836 * We have a resource to run this xfer, do it!
1837 */
1838 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
1839
1840 /*
1841 * If the command is to be tagged, allocate a tag ID
1842 * for it.
1843 */
1844 if (XS_CTL_TAGTYPE(xs) != 0)
1845 scsipi_get_tag(xs);
1846 else
1847 periph->periph_flags |= PERIPH_UNTAG;
1848 periph->periph_sent++;
1849 splx(s);
1850
1851 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
1852 }
1853 #ifdef DIAGNOSTIC
1854 panic("scsipi_run_queue: impossible");
1855 #endif
1856 }
1857
1858 /*
1859 * scsipi_execute_xs:
1860 *
1861 * Begin execution of an xfer, waiting for it to complete, if necessary.
1862 */
1863 int
1864 scsipi_execute_xs(struct scsipi_xfer *xs)
1865 {
1866 struct scsipi_periph *periph = xs->xs_periph;
1867 struct scsipi_channel *chan = periph->periph_channel;
1868 int oasync, async, poll, error, s;
1869
1870 KASSERT(!cold);
1871 KASSERT(KERNEL_LOCKED_P());
1872
1873 (chan->chan_bustype->bustype_cmd)(xs);
1874
1875 xs->xs_status &= ~XS_STS_DONE;
1876 xs->error = XS_NOERROR;
1877 xs->resid = xs->datalen;
1878 xs->status = SCSI_OK;
1879
1880 #ifdef SCSIPI_DEBUG
1881 if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) {
1882 printf("scsipi_execute_xs: ");
1883 show_scsipi_xs(xs);
1884 printf("\n");
1885 }
1886 #endif
1887
1888 /*
1889 * Deal with command tagging:
1890 *
1891 * - If the device's current operating mode doesn't
1892 * include tagged queueing, clear the tag mask.
1893 *
1894 * - If the device's current operating mode *does*
1895 * include tagged queueing, set the tag_type in
1896 * the xfer to the appropriate byte for the tag
1897 * message.
1898 */
1899 if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 ||
1900 (xs->xs_control & XS_CTL_REQSENSE)) {
1901 xs->xs_control &= ~XS_CTL_TAGMASK;
1902 xs->xs_tag_type = 0;
1903 } else {
1904 /*
1905 * If the request doesn't specify a tag, give Head
1906 * tags to URGENT operations and Ordered tags to
1907 * everything else.
1908 */
1909 if (XS_CTL_TAGTYPE(xs) == 0) {
1910 if (xs->xs_control & XS_CTL_URGENT)
1911 xs->xs_control |= XS_CTL_HEAD_TAG;
1912 else
1913 xs->xs_control |= XS_CTL_ORDERED_TAG;
1914 }
1915
1916 switch (XS_CTL_TAGTYPE(xs)) {
1917 case XS_CTL_ORDERED_TAG:
1918 xs->xs_tag_type = MSG_ORDERED_Q_TAG;
1919 break;
1920
1921 case XS_CTL_SIMPLE_TAG:
1922 xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
1923 break;
1924
1925 case XS_CTL_HEAD_TAG:
1926 xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
1927 break;
1928
1929 default:
1930 scsipi_printaddr(periph);
1931 printf("invalid tag mask 0x%08x\n",
1932 XS_CTL_TAGTYPE(xs));
1933 panic("scsipi_execute_xs");
1934 }
1935 }
1936
1937 /* If the adaptor wants us to poll, poll. */
1938 if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY)
1939 xs->xs_control |= XS_CTL_POLL;
1940
1941 /*
1942 * If we don't yet have a completion thread, or we are to poll for
1943 * completion, clear the ASYNC flag.
1944 */
1945 oasync = (xs->xs_control & XS_CTL_ASYNC);
1946 if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
1947 xs->xs_control &= ~XS_CTL_ASYNC;
1948
1949 async = (xs->xs_control & XS_CTL_ASYNC);
1950 poll = (xs->xs_control & XS_CTL_POLL);
1951
1952 #ifdef DIAGNOSTIC
1953 if (oasync != 0 && xs->bp == NULL)
1954 panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
1955 #endif
1956
1957 /*
1958 * Enqueue the transfer. If we're not polling for completion, this
1959 * should ALWAYS return `no error'.
1960 */
1961 error = scsipi_enqueue(xs);
1962 if (error) {
1963 if (poll == 0) {
1964 scsipi_printaddr(periph);
1965 printf("not polling, but enqueue failed with %d\n",
1966 error);
1967 panic("scsipi_execute_xs");
1968 }
1969
1970 scsipi_printaddr(periph);
1971 printf("should have flushed queue?\n");
1972 goto free_xs;
1973 }
1974
1975 restarted:
1976 scsipi_run_queue(chan);
1977
1978 /*
1979 * The xfer is enqueued, and possibly running. If it's to be
1980 * completed asynchronously, just return now.
1981 */
1982 if (async)
1983 return (0);
1984
1985 /*
1986 * Not an asynchronous command; wait for it to complete.
1987 */
1988 s = splbio();
1989 while ((xs->xs_status & XS_STS_DONE) == 0) {
1990 if (poll) {
1991 scsipi_printaddr(periph);
1992 printf("polling command not done\n");
1993 panic("scsipi_execute_xs");
1994 }
1995 (void) tsleep(xs, PRIBIO, "xscmd", 0);
1996 }
1997 splx(s);
1998
1999 /*
2000 * Command is complete. scsipi_done() has awakened us to perform
2001 * the error handling.
2002 */
2003 error = scsipi_complete(xs);
2004 if (error == ERESTART)
2005 goto restarted;
2006
2007 /*
2008 * If it was meant to run async and we cleared aync ourselve,
2009 * don't return an error here. It has already been handled
2010 */
2011 if (oasync)
2012 error = 0;
2013 /*
2014 * Command completed successfully or fatal error occurred. Fall
2015 * into....
2016 */
2017 free_xs:
2018 s = splbio();
2019 scsipi_put_xs(xs);
2020 splx(s);
2021
2022 /*
2023 * Kick the queue, keep it running in case it stopped for some
2024 * reason.
2025 */
2026 scsipi_run_queue(chan);
2027
2028 return (error);
2029 }
2030
2031 /*
2032 * scsipi_completion_thread:
2033 *
2034 * This is the completion thread. We wait for errors on
2035 * asynchronous xfers, and perform the error handling
2036 * function, restarting the command, if necessary.
2037 */
2038 static void
2039 scsipi_completion_thread(void *arg)
2040 {
2041 struct scsipi_channel *chan = arg;
2042 struct scsipi_xfer *xs;
2043 int s;
2044
2045 if (chan->chan_init_cb)
2046 (*chan->chan_init_cb)(chan, chan->chan_init_cb_arg);
2047
2048 s = splbio();
2049 chan->chan_flags |= SCSIPI_CHAN_TACTIVE;
2050 splx(s);
2051 for (;;) {
2052 s = splbio();
2053 xs = TAILQ_FIRST(&chan->chan_complete);
2054 if (xs == NULL && chan->chan_tflags == 0) {
2055 /* nothing to do; wait */
2056 (void) tsleep(&chan->chan_complete, PRIBIO,
2057 "sccomp", 0);
2058 splx(s);
2059 continue;
2060 }
2061 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2062 /* call chan_callback from thread context */
2063 chan->chan_tflags &= ~SCSIPI_CHANT_CALLBACK;
2064 chan->chan_callback(chan, chan->chan_callback_arg);
2065 splx(s);
2066 continue;
2067 }
2068 if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) {
2069 /* attempt to get more openings for this channel */
2070 chan->chan_tflags &= ~SCSIPI_CHANT_GROWRES;
2071 scsipi_adapter_request(chan,
2072 ADAPTER_REQ_GROW_RESOURCES, NULL);
2073 scsipi_channel_thaw(chan, 1);
2074 splx(s);
2075 if (chan->chan_tflags & SCSIPI_CHANT_GROWRES)
2076 kpause("scsizzz", FALSE, hz/10, NULL);
2077 continue;
2078 }
2079 if (chan->chan_tflags & SCSIPI_CHANT_KICK) {
2080 /* explicitly run the queues for this channel */
2081 chan->chan_tflags &= ~SCSIPI_CHANT_KICK;
2082 scsipi_run_queue(chan);
2083 splx(s);
2084 continue;
2085 }
2086 if (chan->chan_tflags & SCSIPI_CHANT_SHUTDOWN) {
2087 splx(s);
2088 break;
2089 }
2090 if (xs) {
2091 TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
2092 splx(s);
2093
2094 /*
2095 * Have an xfer with an error; process it.
2096 */
2097 (void) scsipi_complete(xs);
2098
2099 /*
2100 * Kick the queue; keep it running if it was stopped
2101 * for some reason.
2102 */
2103 scsipi_run_queue(chan);
2104 } else {
2105 splx(s);
2106 }
2107 }
2108
2109 chan->chan_thread = NULL;
2110
2111 /* In case parent is waiting for us to exit. */
2112 wakeup(&chan->chan_thread);
2113
2114 kthread_exit(0);
2115 }
2116 /*
2117 * scsipi_thread_call_callback:
2118 *
2119 * request to call a callback from the completion thread
2120 */
2121 int
2122 scsipi_thread_call_callback(struct scsipi_channel *chan,
2123 void (*callback)(struct scsipi_channel *, void *), void *arg)
2124 {
2125 int s;
2126
2127 s = splbio();
2128 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
2129 /* kernel thread doesn't exist yet */
2130 splx(s);
2131 return ESRCH;
2132 }
2133 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2134 splx(s);
2135 return EBUSY;
2136 }
2137 scsipi_channel_freeze(chan, 1);
2138 chan->chan_callback = callback;
2139 chan->chan_callback_arg = arg;
2140 chan->chan_tflags |= SCSIPI_CHANT_CALLBACK;
2141 wakeup(&chan->chan_complete);
2142 splx(s);
2143 return(0);
2144 }
2145
2146 /*
2147 * scsipi_async_event:
2148 *
2149 * Handle an asynchronous event from an adapter.
2150 */
2151 void
2152 scsipi_async_event(struct scsipi_channel *chan, scsipi_async_event_t event,
2153 void *arg)
2154 {
2155 int s;
2156
2157 s = splbio();
2158 switch (event) {
2159 case ASYNC_EVENT_MAX_OPENINGS:
2160 scsipi_async_event_max_openings(chan,
2161 (struct scsipi_max_openings *)arg);
2162 break;
2163
2164 case ASYNC_EVENT_XFER_MODE:
2165 if (chan->chan_bustype->bustype_async_event_xfer_mode) {
2166 chan->chan_bustype->bustype_async_event_xfer_mode(
2167 chan, arg);
2168 }
2169 break;
2170 case ASYNC_EVENT_RESET:
2171 scsipi_async_event_channel_reset(chan);
2172 break;
2173 }
2174 splx(s);
2175 }
2176
2177 /*
2178 * scsipi_async_event_max_openings:
2179 *
2180 * Update the maximum number of outstanding commands a
2181 * device may have.
2182 */
2183 static void
2184 scsipi_async_event_max_openings(struct scsipi_channel *chan,
2185 struct scsipi_max_openings *mo)
2186 {
2187 struct scsipi_periph *periph;
2188 int minlun, maxlun;
2189
2190 if (mo->mo_lun == -1) {
2191 /*
2192 * Wildcarded; apply it to all LUNs.
2193 */
2194 minlun = 0;
2195 maxlun = chan->chan_nluns - 1;
2196 } else
2197 minlun = maxlun = mo->mo_lun;
2198
2199 /* XXX This could really suck with a large LUN space. */
2200 for (; minlun <= maxlun; minlun++) {
2201 periph = scsipi_lookup_periph(chan, mo->mo_target, minlun);
2202 if (periph == NULL)
2203 continue;
2204
2205 if (mo->mo_openings < periph->periph_openings)
2206 periph->periph_openings = mo->mo_openings;
2207 else if (mo->mo_openings > periph->periph_openings &&
2208 (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
2209 periph->periph_openings = mo->mo_openings;
2210 }
2211 }
2212
2213 /*
2214 * scsipi_set_xfer_mode:
2215 *
2216 * Set the xfer mode for the specified I_T Nexus.
2217 */
2218 void
2219 scsipi_set_xfer_mode(struct scsipi_channel *chan, int target, int immed)
2220 {
2221 struct scsipi_xfer_mode xm;
2222 struct scsipi_periph *itperiph;
2223 int lun, s;
2224
2225 /*
2226 * Go to the minimal xfer mode.
2227 */
2228 xm.xm_target = target;
2229 xm.xm_mode = 0;
2230 xm.xm_period = 0; /* ignored */
2231 xm.xm_offset = 0; /* ignored */
2232
2233 /*
2234 * Find the first LUN we know about on this I_T Nexus.
2235 */
2236 for (itperiph = NULL, lun = 0; lun < chan->chan_nluns; lun++) {
2237 itperiph = scsipi_lookup_periph(chan, target, lun);
2238 if (itperiph != NULL)
2239 break;
2240 }
2241 if (itperiph != NULL) {
2242 xm.xm_mode = itperiph->periph_cap;
2243 /*
2244 * Now issue the request to the adapter.
2245 */
2246 s = splbio();
2247 scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm);
2248 splx(s);
2249 /*
2250 * If we want this to happen immediately, issue a dummy
2251 * command, since most adapters can't really negotiate unless
2252 * they're executing a job.
2253 */
2254 if (immed != 0) {
2255 (void) scsipi_test_unit_ready(itperiph,
2256 XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
2257 XS_CTL_IGNORE_NOT_READY |
2258 XS_CTL_IGNORE_MEDIA_CHANGE);
2259 }
2260 }
2261 }
2262
2263 /*
2264 * scsipi_channel_reset:
2265 *
2266 * handle scsi bus reset
2267 * called at splbio
2268 */
2269 static void
2270 scsipi_async_event_channel_reset(struct scsipi_channel *chan)
2271 {
2272 struct scsipi_xfer *xs, *xs_next;
2273 struct scsipi_periph *periph;
2274 int target, lun;
2275
2276 /*
2277 * Channel has been reset. Also mark as reset pending REQUEST_SENSE
2278 * commands; as the sense is not available any more.
2279 * can't call scsipi_done() from here, as the command has not been
2280 * sent to the adapter yet (this would corrupt accounting).
2281 */
2282
2283 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) {
2284 xs_next = TAILQ_NEXT(xs, channel_q);
2285 if (xs->xs_control & XS_CTL_REQSENSE) {
2286 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
2287 xs->error = XS_RESET;
2288 if ((xs->xs_control & XS_CTL_ASYNC) != 0)
2289 TAILQ_INSERT_TAIL(&chan->chan_complete, xs,
2290 channel_q);
2291 }
2292 }
2293 wakeup(&chan->chan_complete);
2294 /* Catch xs with pending sense which may not have a REQSENSE xs yet */
2295 for (target = 0; target < chan->chan_ntargets; target++) {
2296 if (target == chan->chan_id)
2297 continue;
2298 for (lun = 0; lun < chan->chan_nluns; lun++) {
2299 periph = scsipi_lookup_periph(chan, target, lun);
2300 if (periph) {
2301 xs = periph->periph_xscheck;
2302 if (xs)
2303 xs->error = XS_RESET;
2304 }
2305 }
2306 }
2307 }
2308
2309 /*
2310 * scsipi_target_detach:
2311 *
2312 * detach all periph associated with a I_T
2313 * must be called from valid thread context
2314 */
2315 int
2316 scsipi_target_detach(struct scsipi_channel *chan, int target, int lun,
2317 int flags)
2318 {
2319 struct scsipi_periph *periph;
2320 int ctarget, mintarget, maxtarget;
2321 int clun, minlun, maxlun;
2322 int error;
2323
2324 if (target == -1) {
2325 mintarget = 0;
2326 maxtarget = chan->chan_ntargets;
2327 } else {
2328 if (target == chan->chan_id)
2329 return EINVAL;
2330 if (target < 0 || target >= chan->chan_ntargets)
2331 return EINVAL;
2332 mintarget = target;
2333 maxtarget = target + 1;
2334 }
2335
2336 if (lun == -1) {
2337 minlun = 0;
2338 maxlun = chan->chan_nluns;
2339 } else {
2340 if (lun < 0 || lun >= chan->chan_nluns)
2341 return EINVAL;
2342 minlun = lun;
2343 maxlun = lun + 1;
2344 }
2345
2346 for (ctarget = mintarget; ctarget < maxtarget; ctarget++) {
2347 if (ctarget == chan->chan_id)
2348 continue;
2349
2350 for (clun = minlun; clun < maxlun; clun++) {
2351 periph = scsipi_lookup_periph(chan, ctarget, clun);
2352 if (periph == NULL)
2353 continue;
2354 error = config_detach(periph->periph_dev, flags);
2355 if (error)
2356 return (error);
2357 }
2358 }
2359 return(0);
2360 }
2361
2362 /*
2363 * scsipi_adapter_addref:
2364 *
2365 * Add a reference to the adapter pointed to by the provided
2366 * link, enabling the adapter if necessary.
2367 */
2368 int
2369 scsipi_adapter_addref(struct scsipi_adapter *adapt)
2370 {
2371 int s, error = 0;
2372
2373 s = splbio();
2374 if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) {
2375 error = (*adapt->adapt_enable)(adapt->adapt_dev, 1);
2376 if (error)
2377 adapt->adapt_refcnt--;
2378 }
2379 splx(s);
2380 return (error);
2381 }
2382
2383 /*
2384 * scsipi_adapter_delref:
2385 *
2386 * Delete a reference to the adapter pointed to by the provided
2387 * link, disabling the adapter if possible.
2388 */
2389 void
2390 scsipi_adapter_delref(struct scsipi_adapter *adapt)
2391 {
2392 int s;
2393
2394 s = splbio();
2395 if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL)
2396 (void) (*adapt->adapt_enable)(adapt->adapt_dev, 0);
2397 splx(s);
2398 }
2399
2400 static struct scsipi_syncparam {
2401 int ss_factor;
2402 int ss_period; /* ns * 100 */
2403 } scsipi_syncparams[] = {
2404 { 0x08, 625 }, /* FAST-160 (Ultra320) */
2405 { 0x09, 1250 }, /* FAST-80 (Ultra160) */
2406 { 0x0a, 2500 }, /* FAST-40 40MHz (Ultra2) */
2407 { 0x0b, 3030 }, /* FAST-40 33MHz (Ultra2) */
2408 { 0x0c, 5000 }, /* FAST-20 (Ultra) */
2409 };
2410 static const int scsipi_nsyncparams =
2411 sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
2412
2413 int
2414 scsipi_sync_period_to_factor(int period /* ns * 100 */)
2415 {
2416 int i;
2417
2418 for (i = 0; i < scsipi_nsyncparams; i++) {
2419 if (period <= scsipi_syncparams[i].ss_period)
2420 return (scsipi_syncparams[i].ss_factor);
2421 }
2422
2423 return ((period / 100) / 4);
2424 }
2425
2426 int
2427 scsipi_sync_factor_to_period(int factor)
2428 {
2429 int i;
2430
2431 for (i = 0; i < scsipi_nsyncparams; i++) {
2432 if (factor == scsipi_syncparams[i].ss_factor)
2433 return (scsipi_syncparams[i].ss_period);
2434 }
2435
2436 return ((factor * 4) * 100);
2437 }
2438
2439 int
2440 scsipi_sync_factor_to_freq(int factor)
2441 {
2442 int i;
2443
2444 for (i = 0; i < scsipi_nsyncparams; i++) {
2445 if (factor == scsipi_syncparams[i].ss_factor)
2446 return (100000000 / scsipi_syncparams[i].ss_period);
2447 }
2448
2449 return (10000000 / ((factor * 4) * 10));
2450 }
2451
2452 #ifdef SCSIPI_DEBUG
2453 /*
2454 * Given a scsipi_xfer, dump the request, in all it's glory
2455 */
2456 void
2457 show_scsipi_xs(struct scsipi_xfer *xs)
2458 {
2459
2460 printf("xs(%p): ", xs);
2461 printf("xs_control(0x%08x)", xs->xs_control);
2462 printf("xs_status(0x%08x)", xs->xs_status);
2463 printf("periph(%p)", xs->xs_periph);
2464 printf("retr(0x%x)", xs->xs_retries);
2465 printf("timo(0x%x)", xs->timeout);
2466 printf("cmd(%p)", xs->cmd);
2467 printf("len(0x%x)", xs->cmdlen);
2468 printf("data(%p)", xs->data);
2469 printf("len(0x%x)", xs->datalen);
2470 printf("res(0x%x)", xs->resid);
2471 printf("err(0x%x)", xs->error);
2472 printf("bp(%p)", xs->bp);
2473 show_scsipi_cmd(xs);
2474 }
2475
2476 void
2477 show_scsipi_cmd(struct scsipi_xfer *xs)
2478 {
2479 u_char *b = (u_char *) xs->cmd;
2480 int i = 0;
2481
2482 scsipi_printaddr(xs->xs_periph);
2483 printf(" command: ");
2484
2485 if ((xs->xs_control & XS_CTL_RESET) == 0) {
2486 while (i < xs->cmdlen) {
2487 if (i)
2488 printf(",");
2489 printf("0x%x", b[i++]);
2490 }
2491 printf("-[%d bytes]\n", xs->datalen);
2492 if (xs->datalen)
2493 show_mem(xs->data, min(64, xs->datalen));
2494 } else
2495 printf("-RESET-\n");
2496 }
2497
2498 void
2499 show_mem(u_char *address, int num)
2500 {
2501 int x;
2502
2503 printf("------------------------------");
2504 for (x = 0; x < num; x++) {
2505 if ((x % 16) == 0)
2506 printf("\n%03d: ", x);
2507 printf("%02x ", *address++);
2508 }
2509 printf("\n------------------------------\n");
2510 }
2511 #endif /* SCSIPI_DEBUG */
2512