scsipi_base.c revision 1.115 1 /* $NetBSD: scsipi_base.c,v 1.115 2004/09/17 23:43:17 mycroft Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999, 2000, 2002, 2003, 2004 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: scsipi_base.c,v 1.115 2004/09/17 23:43:17 mycroft Exp $");
42
43 #include "opt_scsi.h"
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/buf.h>
49 #include <sys/uio.h>
50 #include <sys/malloc.h>
51 #include <sys/pool.h>
52 #include <sys/errno.h>
53 #include <sys/device.h>
54 #include <sys/proc.h>
55 #include <sys/kthread.h>
56 #include <sys/hash.h>
57
58 #include <uvm/uvm_extern.h>
59
60 #include <dev/scsipi/scsipi_all.h>
61 #include <dev/scsipi/scsipi_disk.h>
62 #include <dev/scsipi/scsipiconf.h>
63 #include <dev/scsipi/scsipi_base.h>
64
65 #include <dev/scsipi/scsi_all.h>
66 #include <dev/scsipi/scsi_message.h>
67
68 static int scsipi_complete(struct scsipi_xfer *);
69 static void scsipi_request_sense(struct scsipi_xfer *);
70 static int scsipi_enqueue(struct scsipi_xfer *);
71 static void scsipi_run_queue(struct scsipi_channel *chan);
72
73 static void scsipi_completion_thread(void *);
74
75 static void scsipi_get_tag(struct scsipi_xfer *);
76 static void scsipi_put_tag(struct scsipi_xfer *);
77
78 static int scsipi_get_resource(struct scsipi_channel *);
79 static void scsipi_put_resource(struct scsipi_channel *);
80
81 static void scsipi_async_event_max_openings(struct scsipi_channel *,
82 struct scsipi_max_openings *);
83 static void scsipi_async_event_xfer_mode(struct scsipi_channel *,
84 struct scsipi_xfer_mode *);
85 static void scsipi_async_event_channel_reset(struct scsipi_channel *);
86
87 static struct pool scsipi_xfer_pool;
88
89 /*
90 * scsipi_init:
91 *
92 * Called when a scsibus or atapibus is attached to the system
93 * to initialize shared data structures.
94 */
95 void
96 scsipi_init(void)
97 {
98 static int scsipi_init_done;
99
100 if (scsipi_init_done)
101 return;
102 scsipi_init_done = 1;
103
104 /* Initialize the scsipi_xfer pool. */
105 pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
106 0, 0, "scxspl", NULL);
107 if (pool_prime(&scsipi_xfer_pool,
108 PAGE_SIZE / sizeof(struct scsipi_xfer)) == ENOMEM) {
109 printf("WARNING: not enough memory for scsipi_xfer_pool\n");
110 }
111 }
112
113 /*
114 * scsipi_channel_init:
115 *
116 * Initialize a scsipi_channel when it is attached.
117 */
118 int
119 scsipi_channel_init(struct scsipi_channel *chan)
120 {
121 int i;
122
123 /* Initialize shared data. */
124 scsipi_init();
125
126 /* Initialize the queues. */
127 TAILQ_INIT(&chan->chan_queue);
128 TAILQ_INIT(&chan->chan_complete);
129
130 for (i = 0; i < SCSIPI_CHAN_PERIPH_BUCKETS; i++)
131 LIST_INIT(&chan->chan_periphtab[i]);
132
133 /*
134 * Create the asynchronous completion thread.
135 */
136 kthread_create(scsipi_create_completion_thread, chan);
137 return (0);
138 }
139
140 /*
141 * scsipi_channel_shutdown:
142 *
143 * Shutdown a scsipi_channel.
144 */
145 void
146 scsipi_channel_shutdown(struct scsipi_channel *chan)
147 {
148
149 /*
150 * Shut down the completion thread.
151 */
152 chan->chan_tflags |= SCSIPI_CHANT_SHUTDOWN;
153 wakeup(&chan->chan_complete);
154
155 /*
156 * Now wait for the thread to exit.
157 */
158 while (chan->chan_thread != NULL)
159 (void) tsleep(&chan->chan_thread, PRIBIO, "scshut", 0);
160 }
161
162 static uint32_t
163 scsipi_chan_periph_hash(uint64_t t, uint64_t l)
164 {
165 uint32_t hash;
166
167 hash = hash32_buf(&t, sizeof(t), HASH32_BUF_INIT);
168 hash = hash32_buf(&l, sizeof(l), hash);
169
170 return (hash & SCSIPI_CHAN_PERIPH_HASHMASK);
171 }
172
173 /*
174 * scsipi_insert_periph:
175 *
176 * Insert a periph into the channel.
177 */
178 void
179 scsipi_insert_periph(struct scsipi_channel *chan, struct scsipi_periph *periph)
180 {
181 uint32_t hash;
182 int s;
183
184 hash = scsipi_chan_periph_hash(periph->periph_target,
185 periph->periph_lun);
186
187 s = splbio();
188 LIST_INSERT_HEAD(&chan->chan_periphtab[hash], periph, periph_hash);
189 splx(s);
190 }
191
192 /*
193 * scsipi_remove_periph:
194 *
195 * Remove a periph from the channel.
196 */
197 void
198 scsipi_remove_periph(struct scsipi_channel *chan, struct scsipi_periph *periph)
199 {
200 int s;
201
202 s = splbio();
203 LIST_REMOVE(periph, periph_hash);
204 splx(s);
205 }
206
207 /*
208 * scsipi_lookup_periph:
209 *
210 * Lookup a periph on the specified channel.
211 */
212 struct scsipi_periph *
213 scsipi_lookup_periph(struct scsipi_channel *chan, int target, int lun)
214 {
215 struct scsipi_periph *periph;
216 uint32_t hash;
217 int s;
218
219 if (target >= chan->chan_ntargets ||
220 lun >= chan->chan_nluns)
221 return (NULL);
222
223 hash = scsipi_chan_periph_hash(target, lun);
224
225 s = splbio();
226 LIST_FOREACH(periph, &chan->chan_periphtab[hash], periph_hash) {
227 if (periph->periph_target == target &&
228 periph->periph_lun == lun)
229 break;
230 }
231 splx(s);
232
233 return (periph);
234 }
235
236 /*
237 * scsipi_get_resource:
238 *
239 * Allocate a single xfer `resource' from the channel.
240 *
241 * NOTE: Must be called at splbio().
242 */
243 static int
244 scsipi_get_resource(struct scsipi_channel *chan)
245 {
246 struct scsipi_adapter *adapt = chan->chan_adapter;
247
248 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
249 if (chan->chan_openings > 0) {
250 chan->chan_openings--;
251 return (1);
252 }
253 return (0);
254 }
255
256 if (adapt->adapt_openings > 0) {
257 adapt->adapt_openings--;
258 return (1);
259 }
260 return (0);
261 }
262
263 /*
264 * scsipi_grow_resources:
265 *
266 * Attempt to grow resources for a channel. If this succeeds,
267 * we allocate one for our caller.
268 *
269 * NOTE: Must be called at splbio().
270 */
271 static __inline int
272 scsipi_grow_resources(struct scsipi_channel *chan)
273 {
274
275 if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
276 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
277 scsipi_adapter_request(chan,
278 ADAPTER_REQ_GROW_RESOURCES, NULL);
279 return (scsipi_get_resource(chan));
280 }
281 /*
282 * ask the channel thread to do it. It'll have to thaw the
283 * queue
284 */
285 scsipi_channel_freeze(chan, 1);
286 chan->chan_tflags |= SCSIPI_CHANT_GROWRES;
287 wakeup(&chan->chan_complete);
288 return (0);
289 }
290
291 return (0);
292 }
293
294 /*
295 * scsipi_put_resource:
296 *
297 * Free a single xfer `resource' to the channel.
298 *
299 * NOTE: Must be called at splbio().
300 */
301 static void
302 scsipi_put_resource(struct scsipi_channel *chan)
303 {
304 struct scsipi_adapter *adapt = chan->chan_adapter;
305
306 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
307 chan->chan_openings++;
308 else
309 adapt->adapt_openings++;
310 }
311
312 /*
313 * scsipi_get_tag:
314 *
315 * Get a tag ID for the specified xfer.
316 *
317 * NOTE: Must be called at splbio().
318 */
319 static void
320 scsipi_get_tag(struct scsipi_xfer *xs)
321 {
322 struct scsipi_periph *periph = xs->xs_periph;
323 int bit, tag;
324 u_int word;
325
326 bit = 0; /* XXX gcc */
327 for (word = 0; word < PERIPH_NTAGWORDS; word++) {
328 bit = ffs(periph->periph_freetags[word]);
329 if (bit != 0)
330 break;
331 }
332 #ifdef DIAGNOSTIC
333 if (word == PERIPH_NTAGWORDS) {
334 scsipi_printaddr(periph);
335 printf("no free tags\n");
336 panic("scsipi_get_tag");
337 }
338 #endif
339
340 bit -= 1;
341 periph->periph_freetags[word] &= ~(1 << bit);
342 tag = (word << 5) | bit;
343
344 /* XXX Should eventually disallow this completely. */
345 if (tag >= periph->periph_openings) {
346 scsipi_printaddr(periph);
347 printf("WARNING: tag %d greater than available openings %d\n",
348 tag, periph->periph_openings);
349 }
350
351 xs->xs_tag_id = tag;
352 }
353
354 /*
355 * scsipi_put_tag:
356 *
357 * Put the tag ID for the specified xfer back into the pool.
358 *
359 * NOTE: Must be called at splbio().
360 */
361 static void
362 scsipi_put_tag(struct scsipi_xfer *xs)
363 {
364 struct scsipi_periph *periph = xs->xs_periph;
365 int word, bit;
366
367 word = xs->xs_tag_id >> 5;
368 bit = xs->xs_tag_id & 0x1f;
369
370 periph->periph_freetags[word] |= (1 << bit);
371 }
372
373 /*
374 * scsipi_get_xs:
375 *
376 * Allocate an xfer descriptor and associate it with the
377 * specified peripherial. If the peripherial has no more
378 * available command openings, we either block waiting for
379 * one to become available, or fail.
380 */
381 struct scsipi_xfer *
382 scsipi_get_xs(struct scsipi_periph *periph, int flags)
383 {
384 struct scsipi_xfer *xs;
385 int s;
386
387 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n"));
388
389 /*
390 * If we're cold, make sure we poll.
391 */
392 if (cold)
393 flags |= XS_CTL_NOSLEEP | XS_CTL_POLL;
394
395 #ifdef DIAGNOSTIC
396 /*
397 * URGENT commands can never be ASYNC.
398 */
399 if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
400 (XS_CTL_URGENT|XS_CTL_ASYNC)) {
401 scsipi_printaddr(periph);
402 printf("URGENT and ASYNC\n");
403 panic("scsipi_get_xs");
404 }
405 #endif
406
407 s = splbio();
408 /*
409 * Wait for a command opening to become available. Rules:
410 *
411 * - All xfers must wait for an available opening.
412 * Exception: URGENT xfers can proceed when
413 * active == openings, because we use the opening
414 * of the command we're recovering for.
415 * - if the periph has sense pending, only URGENT & REQSENSE
416 * xfers may proceed.
417 *
418 * - If the periph is recovering, only URGENT xfers may
419 * proceed.
420 *
421 * - If the periph is currently executing a recovery
422 * command, URGENT commands must block, because only
423 * one recovery command can execute at a time.
424 */
425 for (;;) {
426 if (flags & XS_CTL_URGENT) {
427 if (periph->periph_active > periph->periph_openings)
428 goto wait_for_opening;
429 if (periph->periph_flags & PERIPH_SENSE) {
430 if ((flags & XS_CTL_REQSENSE) == 0)
431 goto wait_for_opening;
432 } else {
433 if ((periph->periph_flags &
434 PERIPH_RECOVERY_ACTIVE) != 0)
435 goto wait_for_opening;
436 periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
437 }
438 break;
439 }
440 if (periph->periph_active >= periph->periph_openings ||
441 (periph->periph_flags & PERIPH_RECOVERING) != 0)
442 goto wait_for_opening;
443 periph->periph_active++;
444 break;
445
446 wait_for_opening:
447 if (flags & XS_CTL_NOSLEEP) {
448 splx(s);
449 return (NULL);
450 }
451 SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n"));
452 periph->periph_flags |= PERIPH_WAITING;
453 (void) tsleep(periph, PRIBIO, "getxs", 0);
454 }
455 SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n"));
456 xs = pool_get(&scsipi_xfer_pool,
457 ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
458 if (xs == NULL) {
459 if (flags & XS_CTL_URGENT) {
460 if ((flags & XS_CTL_REQSENSE) == 0)
461 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
462 } else
463 periph->periph_active--;
464 scsipi_printaddr(periph);
465 printf("unable to allocate %sscsipi_xfer\n",
466 (flags & XS_CTL_URGENT) ? "URGENT " : "");
467 }
468 splx(s);
469
470 SC_DEBUG(periph, SCSIPI_DB3, ("returning\n"));
471
472 if (xs != NULL) {
473 memset(xs, 0, sizeof(*xs));
474 callout_init(&xs->xs_callout);
475 xs->xs_periph = periph;
476 xs->xs_control = flags;
477 xs->xs_status = 0;
478 s = splbio();
479 TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
480 splx(s);
481 }
482 return (xs);
483 }
484
485 /*
486 * scsipi_put_xs:
487 *
488 * Release an xfer descriptor, decreasing the outstanding command
489 * count for the peripherial. If there is a thread waiting for
490 * an opening, wake it up. If not, kick any queued I/O the
491 * peripherial may have.
492 *
493 * NOTE: Must be called at splbio().
494 */
495 void
496 scsipi_put_xs(struct scsipi_xfer *xs)
497 {
498 struct scsipi_periph *periph = xs->xs_periph;
499 int flags = xs->xs_control;
500
501 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n"));
502
503 TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
504 pool_put(&scsipi_xfer_pool, xs);
505
506 #ifdef DIAGNOSTIC
507 if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
508 periph->periph_active == 0) {
509 scsipi_printaddr(periph);
510 printf("recovery without a command to recovery for\n");
511 panic("scsipi_put_xs");
512 }
513 #endif
514
515 if (flags & XS_CTL_URGENT) {
516 if ((flags & XS_CTL_REQSENSE) == 0)
517 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
518 } else
519 periph->periph_active--;
520 if (periph->periph_active == 0 &&
521 (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
522 periph->periph_flags &= ~PERIPH_WAITDRAIN;
523 wakeup(&periph->periph_active);
524 }
525
526 if (periph->periph_flags & PERIPH_WAITING) {
527 periph->periph_flags &= ~PERIPH_WAITING;
528 wakeup(periph);
529 } else {
530 if (periph->periph_switch->psw_start != NULL &&
531 (periph->periph_dev->dv_flags & DVF_ACTIVE)) {
532 SC_DEBUG(periph, SCSIPI_DB2,
533 ("calling private start()\n"));
534 (*periph->periph_switch->psw_start)(periph);
535 }
536 }
537 }
538
539 /*
540 * scsipi_channel_freeze:
541 *
542 * Freeze a channel's xfer queue.
543 */
544 void
545 scsipi_channel_freeze(struct scsipi_channel *chan, int count)
546 {
547 int s;
548
549 s = splbio();
550 chan->chan_qfreeze += count;
551 splx(s);
552 }
553
554 /*
555 * scsipi_channel_thaw:
556 *
557 * Thaw a channel's xfer queue.
558 */
559 void
560 scsipi_channel_thaw(struct scsipi_channel *chan, int count)
561 {
562 int s;
563
564 s = splbio();
565 chan->chan_qfreeze -= count;
566 /*
567 * Don't let the freeze count go negative.
568 *
569 * Presumably the adapter driver could keep track of this,
570 * but it might just be easier to do this here so as to allow
571 * multiple callers, including those outside the adapter driver.
572 */
573 if (chan->chan_qfreeze < 0) {
574 chan->chan_qfreeze = 0;
575 }
576 splx(s);
577 /*
578 * Kick the channel's queue here. Note, we may be running in
579 * interrupt context (softclock or HBA's interrupt), so the adapter
580 * driver had better not sleep.
581 */
582 if (chan->chan_qfreeze == 0)
583 scsipi_run_queue(chan);
584 }
585
586 /*
587 * scsipi_channel_timed_thaw:
588 *
589 * Thaw a channel after some time has expired. This will also
590 * run the channel's queue if the freeze count has reached 0.
591 */
592 void
593 scsipi_channel_timed_thaw(void *arg)
594 {
595 struct scsipi_channel *chan = arg;
596
597 scsipi_channel_thaw(chan, 1);
598 }
599
600 /*
601 * scsipi_periph_freeze:
602 *
603 * Freeze a device's xfer queue.
604 */
605 void
606 scsipi_periph_freeze(struct scsipi_periph *periph, int count)
607 {
608 int s;
609
610 s = splbio();
611 periph->periph_qfreeze += count;
612 splx(s);
613 }
614
615 /*
616 * scsipi_periph_thaw:
617 *
618 * Thaw a device's xfer queue.
619 */
620 void
621 scsipi_periph_thaw(struct scsipi_periph *periph, int count)
622 {
623 int s;
624
625 s = splbio();
626 periph->periph_qfreeze -= count;
627 #ifdef DIAGNOSTIC
628 if (periph->periph_qfreeze < 0) {
629 static const char pc[] = "periph freeze count < 0";
630 scsipi_printaddr(periph);
631 printf("%s\n", pc);
632 panic(pc);
633 }
634 #endif
635 if (periph->periph_qfreeze == 0 &&
636 (periph->periph_flags & PERIPH_WAITING) != 0)
637 wakeup(periph);
638 splx(s);
639 }
640
641 /*
642 * scsipi_periph_timed_thaw:
643 *
644 * Thaw a device after some time has expired.
645 */
646 void
647 scsipi_periph_timed_thaw(void *arg)
648 {
649 int s;
650 struct scsipi_periph *periph = arg;
651
652 callout_stop(&periph->periph_callout);
653
654 s = splbio();
655 scsipi_periph_thaw(periph, 1);
656 if ((periph->periph_channel->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
657 /*
658 * Kick the channel's queue here. Note, we're running in
659 * interrupt context (softclock), so the adapter driver
660 * had better not sleep.
661 */
662 scsipi_run_queue(periph->periph_channel);
663 } else {
664 /*
665 * Tell the completion thread to kick the channel's queue here.
666 */
667 periph->periph_channel->chan_tflags |= SCSIPI_CHANT_KICK;
668 wakeup(&periph->periph_channel->chan_complete);
669 }
670 splx(s);
671 }
672
673 /*
674 * scsipi_wait_drain:
675 *
676 * Wait for a periph's pending xfers to drain.
677 */
678 void
679 scsipi_wait_drain(struct scsipi_periph *periph)
680 {
681 int s;
682
683 s = splbio();
684 while (periph->periph_active != 0) {
685 periph->periph_flags |= PERIPH_WAITDRAIN;
686 (void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0);
687 }
688 splx(s);
689 }
690
691 /*
692 * scsipi_kill_pending:
693 *
694 * Kill off all pending xfers for a periph.
695 *
696 * NOTE: Must be called at splbio().
697 */
698 void
699 scsipi_kill_pending(struct scsipi_periph *periph)
700 {
701
702 (*periph->periph_channel->chan_bustype->bustype_kill_pending)(periph);
703 scsipi_wait_drain(periph);
704 }
705
706 /*
707 * scsipi_print_cdb:
708 * prints a command descriptor block (for debug purpose, error messages,
709 * SCSIPI_VERBOSE, ...)
710 */
711 void
712 scsipi_print_cdb(struct scsipi_generic *cmd)
713 {
714 int i, j;
715
716 printf("0x%02x", cmd->opcode);
717
718 switch (CDB_GROUPID(cmd->opcode)) {
719 case CDB_GROUPID_0:
720 j = CDB_GROUP0;
721 break;
722 case CDB_GROUPID_1:
723 j = CDB_GROUP1;
724 break;
725 case CDB_GROUPID_2:
726 j = CDB_GROUP2;
727 break;
728 case CDB_GROUPID_3:
729 j = CDB_GROUP3;
730 break;
731 case CDB_GROUPID_4:
732 j = CDB_GROUP4;
733 break;
734 case CDB_GROUPID_5:
735 j = CDB_GROUP5;
736 break;
737 case CDB_GROUPID_6:
738 j = CDB_GROUP6;
739 break;
740 case CDB_GROUPID_7:
741 j = CDB_GROUP7;
742 break;
743 default:
744 j = 0;
745 }
746 if (j == 0)
747 j = sizeof (cmd->bytes);
748 for (i = 0; i < j-1; i++) /* already done the opcode */
749 printf(" %02x", cmd->bytes[i]);
750 }
751
752 /*
753 * scsipi_interpret_sense:
754 *
755 * Look at the returned sense and act on the error, determining
756 * the unix error number to pass back. (0 = report no error)
757 *
758 * NOTE: If we return ERESTART, we are expected to haved
759 * thawed the device!
760 *
761 * THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
762 */
763 int
764 scsipi_interpret_sense(struct scsipi_xfer *xs)
765 {
766 struct scsipi_sense_data *sense;
767 struct scsipi_periph *periph = xs->xs_periph;
768 u_int8_t key;
769 int error;
770 #ifndef SCSIVERBOSE
771 u_int32_t info;
772 static char *error_mes[] = {
773 "soft error (corrected)",
774 "not ready", "medium error",
775 "non-media hardware failure", "illegal request",
776 "unit attention", "readonly device",
777 "no data found", "vendor unique",
778 "copy aborted", "command aborted",
779 "search returned equal", "volume overflow",
780 "verify miscompare", "unknown error key"
781 };
782 #endif
783
784 sense = &xs->sense.scsi_sense;
785 #ifdef SCSIPI_DEBUG
786 if (periph->periph_flags & SCSIPI_DB1) {
787 int count;
788 scsipi_printaddr(periph);
789 printf(" sense debug information:\n");
790 printf("\tcode 0x%x valid 0x%x\n",
791 sense->error_code & SSD_ERRCODE,
792 sense->error_code & SSD_ERRCODE_VALID ? 1 : 0);
793 printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
794 sense->segment,
795 sense->flags & SSD_KEY,
796 sense->flags & SSD_ILI ? 1 : 0,
797 sense->flags & SSD_EOM ? 1 : 0,
798 sense->flags & SSD_FILEMARK ? 1 : 0);
799 printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d "
800 "extra bytes\n",
801 sense->info[0],
802 sense->info[1],
803 sense->info[2],
804 sense->info[3],
805 sense->extra_len);
806 printf("\textra: ");
807 for (count = 0; count < ADD_BYTES_LIM(sense); count++)
808 printf("0x%x ", sense->cmd_spec_info[count]);
809 printf("\n");
810 }
811 #endif
812
813 /*
814 * If the periph has it's own error handler, call it first.
815 * If it returns a legit error value, return that, otherwise
816 * it wants us to continue with normal error processing.
817 */
818 if (periph->periph_switch->psw_error != NULL) {
819 SC_DEBUG(periph, SCSIPI_DB2,
820 ("calling private err_handler()\n"));
821 error = (*periph->periph_switch->psw_error)(xs);
822 if (error != EJUSTRETURN)
823 return (error);
824 }
825 /* otherwise use the default */
826 switch (sense->error_code & SSD_ERRCODE) {
827
828 /*
829 * Old SCSI-1 and SASI devices respond with
830 * codes other than 70.
831 */
832 case 0x00: /* no error (command completed OK) */
833 return (0);
834 case 0x04: /* drive not ready after it was selected */
835 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
836 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
837 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
838 return (0);
839 /* XXX - display some sort of error here? */
840 return (EIO);
841 case 0x20: /* invalid command */
842 if ((xs->xs_control &
843 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
844 return (0);
845 return (EINVAL);
846 case 0x25: /* invalid LUN (Adaptec ACB-4000) */
847 return (EACCES);
848
849 /*
850 * If it's code 70, use the extended stuff and
851 * interpret the key
852 */
853 case 0x71: /* delayed error */
854 scsipi_printaddr(periph);
855 key = sense->flags & SSD_KEY;
856 printf(" DEFERRED ERROR, key = 0x%x\n", key);
857 /* FALLTHROUGH */
858 case 0x70:
859 #ifndef SCSIVERBOSE
860 if ((sense->error_code & SSD_ERRCODE_VALID) != 0)
861 info = _4btol(sense->info);
862 else
863 info = 0;
864 #endif
865 key = sense->flags & SSD_KEY;
866
867 switch (key) {
868 case SKEY_NO_SENSE:
869 case SKEY_RECOVERED_ERROR:
870 if (xs->resid == xs->datalen && xs->datalen) {
871 /*
872 * Why is this here?
873 */
874 xs->resid = 0; /* not short read */
875 }
876 case SKEY_EQUAL:
877 error = 0;
878 break;
879 case SKEY_NOT_READY:
880 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
881 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
882 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
883 return (0);
884 if (sense->add_sense_code == 0x3A) {
885 error = ENODEV; /* Medium not present */
886 if (xs->xs_control & XS_CTL_SILENT_NODEV)
887 return (error);
888 } else
889 error = EIO;
890 if ((xs->xs_control & XS_CTL_SILENT) != 0)
891 return (error);
892 break;
893 case SKEY_ILLEGAL_REQUEST:
894 if ((xs->xs_control &
895 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
896 return (0);
897 /*
898 * Handle the case where a device reports
899 * Logical Unit Not Supported during discovery.
900 */
901 if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
902 sense->add_sense_code == 0x25 &&
903 sense->add_sense_code_qual == 0x00)
904 return (EINVAL);
905 if ((xs->xs_control & XS_CTL_SILENT) != 0)
906 return (EIO);
907 error = EINVAL;
908 break;
909 case SKEY_UNIT_ATTENTION:
910 if (sense->add_sense_code == 0x29 &&
911 sense->add_sense_code_qual == 0x00) {
912 /* device or bus reset */
913 return (ERESTART);
914 }
915 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
916 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
917 if ((xs->xs_control &
918 XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
919 /* XXX Should reupload any transient state. */
920 (periph->periph_flags &
921 PERIPH_REMOVABLE) == 0) {
922 return (ERESTART);
923 }
924 if ((xs->xs_control & XS_CTL_SILENT) != 0)
925 return (EIO);
926 error = EIO;
927 break;
928 case SKEY_WRITE_PROTECT:
929 error = EROFS;
930 break;
931 case SKEY_BLANK_CHECK:
932 error = 0;
933 break;
934 case SKEY_ABORTED_COMMAND:
935 if (xs->xs_retries != 0) {
936 xs->xs_retries--;
937 error = ERESTART;
938 } else
939 error = EIO;
940 break;
941 case SKEY_VOLUME_OVERFLOW:
942 error = ENOSPC;
943 break;
944 default:
945 error = EIO;
946 break;
947 }
948
949 #ifdef SCSIVERBOSE
950 if (key && (xs->xs_control & XS_CTL_SILENT) == 0)
951 scsipi_print_sense(xs, 0);
952 #else
953 if (key) {
954 scsipi_printaddr(periph);
955 printf("%s", error_mes[key - 1]);
956 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
957 switch (key) {
958 case SKEY_NOT_READY:
959 case SKEY_ILLEGAL_REQUEST:
960 case SKEY_UNIT_ATTENTION:
961 case SKEY_WRITE_PROTECT:
962 break;
963 case SKEY_BLANK_CHECK:
964 printf(", requested size: %d (decimal)",
965 info);
966 break;
967 case SKEY_ABORTED_COMMAND:
968 if (xs->xs_retries)
969 printf(", retrying");
970 printf(", cmd 0x%x, info 0x%x",
971 xs->cmd->opcode, info);
972 break;
973 default:
974 printf(", info = %d (decimal)", info);
975 }
976 }
977 if (sense->extra_len != 0) {
978 int n;
979 printf(", data =");
980 for (n = 0; n < sense->extra_len; n++)
981 printf(" %02x",
982 sense->cmd_spec_info[n]);
983 }
984 printf("\n");
985 }
986 #endif
987 return (error);
988
989 /*
990 * Some other code, just report it
991 */
992 default:
993 #if defined(SCSIDEBUG) || defined(DEBUG)
994 {
995 static char *uc = "undecodable sense error";
996 int i;
997 u_int8_t *cptr = (u_int8_t *) sense;
998 scsipi_printaddr(periph);
999 if (xs->cmd == &xs->cmdstore) {
1000 printf("%s for opcode 0x%x, data=",
1001 uc, xs->cmdstore.opcode);
1002 } else {
1003 printf("%s, data=", uc);
1004 }
1005 for (i = 0; i < sizeof (sense); i++)
1006 printf(" 0x%02x", *(cptr++) & 0xff);
1007 printf("\n");
1008 }
1009 #else
1010 scsipi_printaddr(periph);
1011 printf("Sense Error Code 0x%x",
1012 sense->error_code & SSD_ERRCODE);
1013 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
1014 struct scsipi_sense_data_unextended *usense =
1015 (struct scsipi_sense_data_unextended *)sense;
1016 printf(" at block no. %d (decimal)",
1017 _3btol(usense->block));
1018 }
1019 printf("\n");
1020 #endif
1021 return (EIO);
1022 }
1023 }
1024
1025 /*
1026 * scsipi_size:
1027 *
1028 * Find out from the device what its capacity is.
1029 */
1030 u_int64_t
1031 scsipi_size(struct scsipi_periph *periph, int flags)
1032 {
1033 struct scsipi_read_cap_data rdcap;
1034 struct scsipi_read_capacity scsipi_cmd;
1035
1036 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1037 scsipi_cmd.opcode = READ_CAPACITY;
1038
1039 /*
1040 * If the command works, interpret the result as a 4 byte
1041 * number of blocks
1042 */
1043 if (scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1044 sizeof(scsipi_cmd), (u_char *)&rdcap, sizeof(rdcap),
1045 SCSIPIRETRIES, 20000, NULL,
1046 flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK | XS_CTL_SILENT) != 0)
1047 return (0);
1048
1049 return (_4btol(rdcap.addr) + 1);
1050 }
1051
1052 /*
1053 * scsipi_test_unit_ready:
1054 *
1055 * Issue a `test unit ready' request.
1056 */
1057 int
1058 scsipi_test_unit_ready(struct scsipi_periph *periph, int flags)
1059 {
1060 int retries;
1061 struct scsipi_test_unit_ready scsipi_cmd;
1062
1063 /* some ATAPI drives don't support TEST_UNIT_READY. Sigh */
1064 if (periph->periph_quirks & PQUIRK_NOTUR)
1065 return (0);
1066
1067 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1068 scsipi_cmd.opcode = TEST_UNIT_READY;
1069
1070 if (flags & XS_CTL_DISCOVERY)
1071 retries = 0;
1072 else
1073 retries = SCSIPIRETRIES;
1074
1075 return (scsipi_command(periph,
1076 (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd),
1077 0, 0, retries, 10000, NULL, flags));
1078 }
1079
1080 /*
1081 * scsipi_inquire:
1082 *
1083 * Ask the device about itself.
1084 */
1085 int
1086 scsipi_inquire(struct scsipi_periph *periph, struct scsipi_inquiry_data *inqbuf,
1087 int flags)
1088 {
1089 int retries;
1090 struct scsipi_inquiry scsipi_cmd;
1091 int error;
1092
1093 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1094 scsipi_cmd.opcode = INQUIRY;
1095
1096 if (flags & XS_CTL_DISCOVERY)
1097 retries = 0;
1098 else
1099 retries = SCSIPIRETRIES;
1100
1101 /*
1102 * If we request more data than the device can provide, it SHOULD just
1103 * return a short reponse. However, some devices error with an
1104 * ILLEGAL REQUEST sense code, and yet others have even more special
1105 * failture modes (such as the GL641USB flash adapter, which goes loony
1106 * and sends corrupted CRCs). To work around this, and to bring our
1107 * behavior more in line with other OSes, we do a shorter inquiry,
1108 * covering all the SCSI-2 information, first, and then request more
1109 * data iff the "additional length" field indicates there is more.
1110 * - mycroft, 2003/10/16
1111 */
1112 scsipi_cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI2;
1113 error = scsipi_command(periph,
1114 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1115 (u_char *) inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI2,
1116 retries, 10000, NULL, XS_CTL_DATA_IN | flags);
1117 if (!error && inqbuf->additional_length > SCSIPI_INQUIRY_LENGTH_SCSI2 - 4) {
1118 scsipi_cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI3;
1119 error = scsipi_command(periph,
1120 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1121 (u_char *) inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI3,
1122 retries, 10000, NULL, XS_CTL_DATA_IN | flags);
1123 }
1124
1125 #ifdef SCSI_OLD_NOINQUIRY
1126 /*
1127 * Kludge for the Adaptec ACB-4000 SCSI->MFM translator.
1128 * This board doesn't support the INQUIRY command at all.
1129 */
1130 if (error == EINVAL || error == EACCES) {
1131 /*
1132 * Conjure up an INQUIRY response.
1133 */
1134 inqbuf->device = (error == EINVAL ?
1135 SID_QUAL_LU_PRESENT :
1136 SID_QUAL_LU_NOTPRESENT) | T_DIRECT;
1137 inqbuf->dev_qual2 = 0;
1138 inqbuf->version = 0;
1139 inqbuf->response_format = SID_FORMAT_SCSI1;
1140 inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
1141 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1142 memcpy(inqbuf->vendor, "ADAPTEC ACB-4000 ", 28);
1143 error = 0;
1144 }
1145
1146 /*
1147 * Kludge for the Emulex MT-02 SCSI->QIC translator.
1148 * This board gives an empty response to an INQUIRY command.
1149 */
1150 else if (error == 0 &&
1151 inqbuf->device == (SID_QUAL_LU_PRESENT | T_DIRECT) &&
1152 inqbuf->dev_qual2 == 0 &&
1153 inqbuf->version == 0 &&
1154 inqbuf->response_format == SID_FORMAT_SCSI1) {
1155 /*
1156 * Fill out the INQUIRY response.
1157 */
1158 inqbuf->device = (SID_QUAL_LU_PRESENT | T_SEQUENTIAL);
1159 inqbuf->dev_qual2 = SID_REMOVABLE;
1160 inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
1161 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1162 memcpy(inqbuf->vendor, "EMULEX MT-02 QIC ", 28);
1163 }
1164 #endif /* SCSI_OLD_NOINQUIRY */
1165
1166 return error;
1167 }
1168
1169 /*
1170 * scsipi_prevent:
1171 *
1172 * Prevent or allow the user to remove the media
1173 */
1174 int
1175 scsipi_prevent(struct scsipi_periph *periph, int type, int flags)
1176 {
1177 struct scsipi_prevent scsipi_cmd;
1178
1179 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1180 scsipi_cmd.opcode = PREVENT_ALLOW;
1181 scsipi_cmd.how = type;
1182
1183 return (scsipi_command(periph,
1184 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1185 0, 0, SCSIPIRETRIES, 5000, NULL, flags));
1186 }
1187
1188 /*
1189 * scsipi_start:
1190 *
1191 * Send a START UNIT.
1192 */
1193 int
1194 scsipi_start(struct scsipi_periph *periph, int type, int flags)
1195 {
1196 struct scsipi_start_stop scsipi_cmd;
1197
1198 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1199 scsipi_cmd.opcode = START_STOP;
1200 scsipi_cmd.byte2 = 0x00;
1201 scsipi_cmd.how = type;
1202
1203 return (scsipi_command(periph,
1204 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1205 0, 0, SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000,
1206 NULL, flags));
1207 }
1208
1209 /*
1210 * scsipi_mode_sense, scsipi_mode_sense_big:
1211 * get a sense page from a device
1212 */
1213
1214 int
1215 scsipi_mode_sense(struct scsipi_periph *periph, int byte2, int page,
1216 struct scsipi_mode_header *data, int len, int flags, int retries,
1217 int timeout)
1218 {
1219 struct scsipi_mode_sense scsipi_cmd;
1220 int error;
1221
1222 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1223 scsipi_cmd.opcode = MODE_SENSE;
1224 scsipi_cmd.byte2 = byte2;
1225 scsipi_cmd.page = page;
1226 scsipi_cmd.length = len & 0xff;
1227 error = scsipi_command(periph,
1228 (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd),
1229 (void *)data, len, retries, timeout, NULL,
1230 flags | XS_CTL_DATA_IN);
1231 SC_DEBUG(periph, SCSIPI_DB2,
1232 ("scsipi_mode_sense: error=%d\n", error));
1233 return (error);
1234 }
1235
1236 int
1237 scsipi_mode_sense_big(struct scsipi_periph *periph, int byte2, int page,
1238 struct scsipi_mode_header_big *data, int len, int flags, int retries,
1239 int timeout)
1240 {
1241 struct scsipi_mode_sense_big scsipi_cmd;
1242 int error;
1243
1244 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1245 scsipi_cmd.opcode = MODE_SENSE_BIG;
1246 scsipi_cmd.byte2 = byte2;
1247 scsipi_cmd.page = page;
1248 _lto2b(len, scsipi_cmd.length);
1249 error = scsipi_command(periph,
1250 (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd),
1251 (void *)data, len, retries, timeout, NULL,
1252 flags | XS_CTL_DATA_IN);
1253 SC_DEBUG(periph, SCSIPI_DB2,
1254 ("scsipi_mode_sense_big: error=%d\n", error));
1255 return (error);
1256 }
1257
1258 int
1259 scsipi_mode_select(struct scsipi_periph *periph, int byte2,
1260 struct scsipi_mode_header *data, int len, int flags, int retries,
1261 int timeout)
1262 {
1263 struct scsipi_mode_select scsipi_cmd;
1264 int error;
1265
1266 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1267 scsipi_cmd.opcode = MODE_SELECT;
1268 scsipi_cmd.byte2 = byte2;
1269 scsipi_cmd.length = len & 0xff;
1270 error = scsipi_command(periph,
1271 (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd),
1272 (void *)data, len, retries, timeout, NULL,
1273 flags | XS_CTL_DATA_OUT);
1274 SC_DEBUG(periph, SCSIPI_DB2,
1275 ("scsipi_mode_select: error=%d\n", error));
1276 return (error);
1277 }
1278
1279 int
1280 scsipi_mode_select_big(struct scsipi_periph *periph, int byte2,
1281 struct scsipi_mode_header_big *data, int len, int flags, int retries,
1282 int timeout)
1283 {
1284 struct scsipi_mode_select_big scsipi_cmd;
1285 int error;
1286
1287 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1288 scsipi_cmd.opcode = MODE_SELECT_BIG;
1289 scsipi_cmd.byte2 = byte2;
1290 _lto2b(len, scsipi_cmd.length);
1291 error = scsipi_command(periph,
1292 (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd),
1293 (void *)data, len, retries, timeout, NULL,
1294 flags | XS_CTL_DATA_OUT);
1295 SC_DEBUG(periph, SCSIPI_DB2,
1296 ("scsipi_mode_select: error=%d\n", error));
1297 return (error);
1298 }
1299
1300 /*
1301 * scsipi_done:
1302 *
1303 * This routine is called by an adapter's interrupt handler when
1304 * an xfer is completed.
1305 */
1306 void
1307 scsipi_done(struct scsipi_xfer *xs)
1308 {
1309 struct scsipi_periph *periph = xs->xs_periph;
1310 struct scsipi_channel *chan = periph->periph_channel;
1311 int s, freezecnt;
1312
1313 SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n"));
1314 #ifdef SCSIPI_DEBUG
1315 if (periph->periph_dbflags & SCSIPI_DB1)
1316 show_scsipi_cmd(xs);
1317 #endif
1318
1319 s = splbio();
1320 /*
1321 * The resource this command was using is now free.
1322 */
1323 scsipi_put_resource(chan);
1324 xs->xs_periph->periph_sent--;
1325
1326 /*
1327 * If the command was tagged, free the tag.
1328 */
1329 if (XS_CTL_TAGTYPE(xs) != 0)
1330 scsipi_put_tag(xs);
1331 else
1332 periph->periph_flags &= ~PERIPH_UNTAG;
1333
1334 /* Mark the command as `done'. */
1335 xs->xs_status |= XS_STS_DONE;
1336
1337 #ifdef DIAGNOSTIC
1338 if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
1339 (XS_CTL_ASYNC|XS_CTL_POLL))
1340 panic("scsipi_done: ASYNC and POLL");
1341 #endif
1342
1343 /*
1344 * If the xfer had an error of any sort, freeze the
1345 * periph's queue. Freeze it again if we were requested
1346 * to do so in the xfer.
1347 */
1348 freezecnt = 0;
1349 if (xs->error != XS_NOERROR)
1350 freezecnt++;
1351 if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
1352 freezecnt++;
1353 if (freezecnt != 0)
1354 scsipi_periph_freeze(periph, freezecnt);
1355
1356 /*
1357 * record the xfer with a pending sense, in case a SCSI reset is
1358 * received before the thread is waked up.
1359 */
1360 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1361 periph->periph_flags |= PERIPH_SENSE;
1362 periph->periph_xscheck = xs;
1363 }
1364
1365 /*
1366 * If this was an xfer that was not to complete asynchronously,
1367 * let the requesting thread perform error checking/handling
1368 * in its context.
1369 */
1370 if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
1371 splx(s);
1372 /*
1373 * If it's a polling job, just return, to unwind the
1374 * call graph. We don't need to restart the queue,
1375 * because pollings jobs are treated specially, and
1376 * are really only used during crash dumps anyway
1377 * (XXX or during boot-time autconfiguration of
1378 * ATAPI devices).
1379 */
1380 if (xs->xs_control & XS_CTL_POLL)
1381 return;
1382 wakeup(xs);
1383 goto out;
1384 }
1385
1386 /*
1387 * Catch the extremely common case of I/O completing
1388 * without error; no use in taking a context switch
1389 * if we can handle it in interrupt context.
1390 */
1391 if (xs->error == XS_NOERROR) {
1392 splx(s);
1393 (void) scsipi_complete(xs);
1394 goto out;
1395 }
1396
1397 /*
1398 * There is an error on this xfer. Put it on the channel's
1399 * completion queue, and wake up the completion thread.
1400 */
1401 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
1402 splx(s);
1403 wakeup(&chan->chan_complete);
1404
1405 out:
1406 /*
1407 * If there are more xfers on the channel's queue, attempt to
1408 * run them.
1409 */
1410 scsipi_run_queue(chan);
1411 }
1412
1413 /*
1414 * scsipi_complete:
1415 *
1416 * Completion of a scsipi_xfer. This is the guts of scsipi_done().
1417 *
1418 * NOTE: This routine MUST be called with valid thread context
1419 * except for the case where the following two conditions are
1420 * true:
1421 *
1422 * xs->error == XS_NOERROR
1423 * XS_CTL_ASYNC is set in xs->xs_control
1424 *
1425 * The semantics of this routine can be tricky, so here is an
1426 * explanation:
1427 *
1428 * 0 Xfer completed successfully.
1429 *
1430 * ERESTART Xfer had an error, but was restarted.
1431 *
1432 * anything else Xfer had an error, return value is Unix
1433 * errno.
1434 *
1435 * If the return value is anything but ERESTART:
1436 *
1437 * - If XS_CTL_ASYNC is set, `xs' has been freed back to
1438 * the pool.
1439 * - If there is a buf associated with the xfer,
1440 * it has been biodone()'d.
1441 */
1442 static int
1443 scsipi_complete(struct scsipi_xfer *xs)
1444 {
1445 struct scsipi_periph *periph = xs->xs_periph;
1446 struct scsipi_channel *chan = periph->periph_channel;
1447 int error, s;
1448
1449 #ifdef DIAGNOSTIC
1450 if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
1451 panic("scsipi_complete: XS_CTL_ASYNC but no buf");
1452 #endif
1453 /*
1454 * If command terminated with a CHECK CONDITION, we need to issue a
1455 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed
1456 * we'll have the real status.
1457 * Must be processed at splbio() to avoid missing a SCSI bus reset
1458 * for this command.
1459 */
1460 s = splbio();
1461 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1462 /* request sense for a request sense ? */
1463 if (xs->xs_control & XS_CTL_REQSENSE) {
1464 scsipi_printaddr(periph);
1465 printf("request sense for a request sense ?\n");
1466 /* XXX maybe we should reset the device ? */
1467 /* we've been frozen because xs->error != XS_NOERROR */
1468 scsipi_periph_thaw(periph, 1);
1469 splx(s);
1470 if (xs->resid < xs->datalen) {
1471 printf("we read %d bytes of sense anyway:\n",
1472 xs->datalen - xs->resid);
1473 #ifdef SCSIVERBOSE
1474 scsipi_print_sense_data((void *)xs->data, 0);
1475 #endif
1476 }
1477 return EINVAL;
1478 }
1479 scsipi_request_sense(xs);
1480 }
1481 splx(s);
1482
1483 /*
1484 * If it's a user level request, bypass all usual completion
1485 * processing, let the user work it out..
1486 */
1487 if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
1488 SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n"));
1489 if (xs->error != XS_NOERROR)
1490 scsipi_periph_thaw(periph, 1);
1491 scsipi_user_done(xs);
1492 SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n "));
1493 return 0;
1494 }
1495
1496 switch (xs->error) {
1497 case XS_NOERROR:
1498 error = 0;
1499 break;
1500
1501 case XS_SENSE:
1502 case XS_SHORTSENSE:
1503 error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
1504 break;
1505
1506 case XS_RESOURCE_SHORTAGE:
1507 /*
1508 * XXX Should freeze channel's queue.
1509 */
1510 scsipi_printaddr(periph);
1511 printf("adapter resource shortage\n");
1512 /* FALLTHROUGH */
1513
1514 case XS_BUSY:
1515 if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
1516 struct scsipi_max_openings mo;
1517
1518 /*
1519 * We set the openings to active - 1, assuming that
1520 * the command that got us here is the first one that
1521 * can't fit into the device's queue. If that's not
1522 * the case, I guess we'll find out soon enough.
1523 */
1524 mo.mo_target = periph->periph_target;
1525 mo.mo_lun = periph->periph_lun;
1526 if (periph->periph_active < periph->periph_openings)
1527 mo.mo_openings = periph->periph_active - 1;
1528 else
1529 mo.mo_openings = periph->periph_openings - 1;
1530 #ifdef DIAGNOSTIC
1531 if (mo.mo_openings < 0) {
1532 scsipi_printaddr(periph);
1533 printf("QUEUE FULL resulted in < 0 openings\n");
1534 panic("scsipi_done");
1535 }
1536 #endif
1537 if (mo.mo_openings == 0) {
1538 scsipi_printaddr(periph);
1539 printf("QUEUE FULL resulted in 0 openings\n");
1540 mo.mo_openings = 1;
1541 }
1542 scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
1543 error = ERESTART;
1544 } else if (xs->xs_retries != 0) {
1545 xs->xs_retries--;
1546 /*
1547 * Wait one second, and try again.
1548 */
1549 if ((xs->xs_control & XS_CTL_POLL) ||
1550 (chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
1551 delay(1000000);
1552 } else if (!callout_pending(&periph->periph_callout)) {
1553 scsipi_periph_freeze(periph, 1);
1554 callout_reset(&periph->periph_callout,
1555 hz, scsipi_periph_timed_thaw, periph);
1556 }
1557 error = ERESTART;
1558 } else
1559 error = EBUSY;
1560 break;
1561
1562 case XS_REQUEUE:
1563 error = ERESTART;
1564 break;
1565
1566 case XS_SELTIMEOUT:
1567 case XS_TIMEOUT:
1568 /*
1569 * If the device hasn't gone away, honor retry counts.
1570 *
1571 * Note that if we're in the middle of probing it,
1572 * it won't be found because it isn't here yet so
1573 * we won't honor the retry count in that case.
1574 */
1575 if (scsipi_lookup_periph(chan, periph->periph_target,
1576 periph->periph_lun) && xs->xs_retries != 0) {
1577 xs->xs_retries--;
1578 error = ERESTART;
1579 } else
1580 error = EIO;
1581 break;
1582
1583 case XS_RESET:
1584 if (xs->xs_control & XS_CTL_REQSENSE) {
1585 /*
1586 * request sense interrupted by reset: signal it
1587 * with EINTR return code.
1588 */
1589 error = EINTR;
1590 } else {
1591 if (xs->xs_retries != 0) {
1592 xs->xs_retries--;
1593 error = ERESTART;
1594 } else
1595 error = EIO;
1596 }
1597 break;
1598
1599 case XS_DRIVER_STUFFUP:
1600 scsipi_printaddr(periph);
1601 printf("generic HBA error\n");
1602 error = EIO;
1603 break;
1604 default:
1605 scsipi_printaddr(periph);
1606 printf("invalid return code from adapter: %d\n", xs->error);
1607 error = EIO;
1608 break;
1609 }
1610
1611 s = splbio();
1612 if (error == ERESTART) {
1613 /*
1614 * If we get here, the periph has been thawed and frozen
1615 * again if we had to issue recovery commands. Alternatively,
1616 * it may have been frozen again and in a timed thaw. In
1617 * any case, we thaw the periph once we re-enqueue the
1618 * command. Once the periph is fully thawed, it will begin
1619 * operation again.
1620 */
1621 xs->error = XS_NOERROR;
1622 xs->status = SCSI_OK;
1623 xs->xs_status &= ~XS_STS_DONE;
1624 xs->xs_requeuecnt++;
1625 error = scsipi_enqueue(xs);
1626 if (error == 0) {
1627 scsipi_periph_thaw(periph, 1);
1628 splx(s);
1629 return (ERESTART);
1630 }
1631 }
1632
1633 /*
1634 * scsipi_done() freezes the queue if not XS_NOERROR.
1635 * Thaw it here.
1636 */
1637 if (xs->error != XS_NOERROR)
1638 scsipi_periph_thaw(periph, 1);
1639
1640 if (periph->periph_switch->psw_done)
1641 periph->periph_switch->psw_done(xs, error);
1642
1643 if (xs->xs_control & XS_CTL_ASYNC)
1644 scsipi_put_xs(xs);
1645 splx(s);
1646
1647 return (error);
1648 }
1649
1650 /*
1651 * Issue a request sense for the given scsipi_xfer. Called when the xfer
1652 * returns with a CHECK_CONDITION status. Must be called in valid thread
1653 * context and at splbio().
1654 */
1655
1656 static void
1657 scsipi_request_sense(struct scsipi_xfer *xs)
1658 {
1659 struct scsipi_periph *periph = xs->xs_periph;
1660 int flags, error;
1661 struct scsipi_sense cmd;
1662
1663 periph->periph_flags |= PERIPH_SENSE;
1664
1665 /* if command was polling, request sense will too */
1666 flags = xs->xs_control & XS_CTL_POLL;
1667 /* Polling commands can't sleep */
1668 if (flags)
1669 flags |= XS_CTL_NOSLEEP;
1670
1671 flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN |
1672 XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH;
1673
1674 memset(&cmd, 0, sizeof(cmd));
1675 cmd.opcode = REQUEST_SENSE;
1676 cmd.length = sizeof(struct scsipi_sense_data);
1677
1678 error = scsipi_command(periph,
1679 (struct scsipi_generic *) &cmd, sizeof(cmd),
1680 (u_char*)&xs->sense.scsi_sense, sizeof(struct scsipi_sense_data),
1681 0, 1000, NULL, flags);
1682 periph->periph_flags &= ~PERIPH_SENSE;
1683 periph->periph_xscheck = NULL;
1684 switch(error) {
1685 case 0:
1686 /* we have a valid sense */
1687 xs->error = XS_SENSE;
1688 return;
1689 case EINTR:
1690 /* REQUEST_SENSE interrupted by bus reset. */
1691 xs->error = XS_RESET;
1692 return;
1693 case EIO:
1694 /* request sense coudn't be performed */
1695 /*
1696 * XXX this isn't quite right but we don't have anything
1697 * better for now
1698 */
1699 xs->error = XS_DRIVER_STUFFUP;
1700 return;
1701 default:
1702 /* Notify that request sense failed. */
1703 xs->error = XS_DRIVER_STUFFUP;
1704 scsipi_printaddr(periph);
1705 printf("request sense failed with error %d\n", error);
1706 return;
1707 }
1708 }
1709
1710 /*
1711 * scsipi_enqueue:
1712 *
1713 * Enqueue an xfer on a channel.
1714 */
1715 static int
1716 scsipi_enqueue(struct scsipi_xfer *xs)
1717 {
1718 struct scsipi_channel *chan = xs->xs_periph->periph_channel;
1719 struct scsipi_xfer *qxs;
1720 int s;
1721
1722 s = splbio();
1723
1724 /*
1725 * If the xfer is to be polled, and there are already jobs on
1726 * the queue, we can't proceed.
1727 */
1728 if ((xs->xs_control & XS_CTL_POLL) != 0 &&
1729 TAILQ_FIRST(&chan->chan_queue) != NULL) {
1730 splx(s);
1731 xs->error = XS_DRIVER_STUFFUP;
1732 return (EAGAIN);
1733 }
1734
1735 /*
1736 * If we have an URGENT xfer, it's an error recovery command
1737 * and it should just go on the head of the channel's queue.
1738 */
1739 if (xs->xs_control & XS_CTL_URGENT) {
1740 TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
1741 goto out;
1742 }
1743
1744 /*
1745 * If this xfer has already been on the queue before, we
1746 * need to reinsert it in the correct order. That order is:
1747 *
1748 * Immediately before the first xfer for this periph
1749 * with a requeuecnt less than xs->xs_requeuecnt.
1750 *
1751 * Failing that, at the end of the queue. (We'll end up
1752 * there naturally.)
1753 */
1754 if (xs->xs_requeuecnt != 0) {
1755 for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
1756 qxs = TAILQ_NEXT(qxs, channel_q)) {
1757 if (qxs->xs_periph == xs->xs_periph &&
1758 qxs->xs_requeuecnt < xs->xs_requeuecnt)
1759 break;
1760 }
1761 if (qxs != NULL) {
1762 TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
1763 channel_q);
1764 goto out;
1765 }
1766 }
1767 TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
1768 out:
1769 if (xs->xs_control & XS_CTL_THAW_PERIPH)
1770 scsipi_periph_thaw(xs->xs_periph, 1);
1771 splx(s);
1772 return (0);
1773 }
1774
1775 /*
1776 * scsipi_run_queue:
1777 *
1778 * Start as many xfers as possible running on the channel.
1779 */
1780 static void
1781 scsipi_run_queue(struct scsipi_channel *chan)
1782 {
1783 struct scsipi_xfer *xs;
1784 struct scsipi_periph *periph;
1785 int s;
1786
1787 for (;;) {
1788 s = splbio();
1789
1790 /*
1791 * If the channel is frozen, we can't do any work right
1792 * now.
1793 */
1794 if (chan->chan_qfreeze != 0) {
1795 splx(s);
1796 return;
1797 }
1798
1799 /*
1800 * Look for work to do, and make sure we can do it.
1801 */
1802 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
1803 xs = TAILQ_NEXT(xs, channel_q)) {
1804 periph = xs->xs_periph;
1805
1806 if ((periph->periph_sent >= periph->periph_openings) ||
1807 periph->periph_qfreeze != 0 ||
1808 (periph->periph_flags & PERIPH_UNTAG) != 0)
1809 continue;
1810
1811 if ((periph->periph_flags &
1812 (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 &&
1813 (xs->xs_control & XS_CTL_URGENT) == 0)
1814 continue;
1815
1816 /*
1817 * We can issue this xfer!
1818 */
1819 goto got_one;
1820 }
1821
1822 /*
1823 * Can't find any work to do right now.
1824 */
1825 splx(s);
1826 return;
1827
1828 got_one:
1829 /*
1830 * Have an xfer to run. Allocate a resource from
1831 * the adapter to run it. If we can't allocate that
1832 * resource, we don't dequeue the xfer.
1833 */
1834 if (scsipi_get_resource(chan) == 0) {
1835 /*
1836 * Adapter is out of resources. If the adapter
1837 * supports it, attempt to grow them.
1838 */
1839 if (scsipi_grow_resources(chan) == 0) {
1840 /*
1841 * Wasn't able to grow resources,
1842 * nothing more we can do.
1843 */
1844 if (xs->xs_control & XS_CTL_POLL) {
1845 scsipi_printaddr(xs->xs_periph);
1846 printf("polling command but no "
1847 "adapter resources");
1848 /* We'll panic shortly... */
1849 }
1850 splx(s);
1851
1852 /*
1853 * XXX: We should be able to note that
1854 * XXX: that resources are needed here!
1855 */
1856 return;
1857 }
1858 /*
1859 * scsipi_grow_resources() allocated the resource
1860 * for us.
1861 */
1862 }
1863
1864 /*
1865 * We have a resource to run this xfer, do it!
1866 */
1867 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
1868
1869 /*
1870 * If the command is to be tagged, allocate a tag ID
1871 * for it.
1872 */
1873 if (XS_CTL_TAGTYPE(xs) != 0)
1874 scsipi_get_tag(xs);
1875 else
1876 periph->periph_flags |= PERIPH_UNTAG;
1877 periph->periph_sent++;
1878 splx(s);
1879
1880 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
1881 }
1882 #ifdef DIAGNOSTIC
1883 panic("scsipi_run_queue: impossible");
1884 #endif
1885 }
1886
1887 /*
1888 * scsipi_execute_xs:
1889 *
1890 * Begin execution of an xfer, waiting for it to complete, if necessary.
1891 */
1892 int
1893 scsipi_execute_xs(struct scsipi_xfer *xs)
1894 {
1895 struct scsipi_periph *periph = xs->xs_periph;
1896 struct scsipi_channel *chan = periph->periph_channel;
1897 int oasync, async, poll, retries, error, s;
1898
1899 (chan->chan_bustype->bustype_cmd)(xs);
1900
1901 if (xs->xs_control & XS_CTL_DATA_ONSTACK) {
1902 #if 1
1903 if (xs->xs_control & XS_CTL_ASYNC)
1904 panic("scsipi_execute_xs: on stack and async");
1905 #endif
1906 /*
1907 * If the I/O buffer is allocated on stack, the
1908 * process must NOT be swapped out, as the device will
1909 * be accessing the stack.
1910 */
1911 PHOLD(curlwp);
1912 }
1913
1914 xs->xs_status &= ~XS_STS_DONE;
1915 xs->error = XS_NOERROR;
1916 xs->resid = xs->datalen;
1917 xs->status = SCSI_OK;
1918
1919 #ifdef SCSIPI_DEBUG
1920 if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) {
1921 printf("scsipi_execute_xs: ");
1922 show_scsipi_xs(xs);
1923 printf("\n");
1924 }
1925 #endif
1926
1927 /*
1928 * Deal with command tagging:
1929 *
1930 * - If the device's current operating mode doesn't
1931 * include tagged queueing, clear the tag mask.
1932 *
1933 * - If the device's current operating mode *does*
1934 * include tagged queueing, set the tag_type in
1935 * the xfer to the appropriate byte for the tag
1936 * message.
1937 */
1938 if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 ||
1939 (xs->xs_control & XS_CTL_REQSENSE)) {
1940 xs->xs_control &= ~XS_CTL_TAGMASK;
1941 xs->xs_tag_type = 0;
1942 } else {
1943 /*
1944 * If the request doesn't specify a tag, give Head
1945 * tags to URGENT operations and Ordered tags to
1946 * everything else.
1947 */
1948 if (XS_CTL_TAGTYPE(xs) == 0) {
1949 if (xs->xs_control & XS_CTL_URGENT)
1950 xs->xs_control |= XS_CTL_HEAD_TAG;
1951 else
1952 xs->xs_control |= XS_CTL_ORDERED_TAG;
1953 }
1954
1955 switch (XS_CTL_TAGTYPE(xs)) {
1956 case XS_CTL_ORDERED_TAG:
1957 xs->xs_tag_type = MSG_ORDERED_Q_TAG;
1958 break;
1959
1960 case XS_CTL_SIMPLE_TAG:
1961 xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
1962 break;
1963
1964 case XS_CTL_HEAD_TAG:
1965 xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
1966 break;
1967
1968 default:
1969 scsipi_printaddr(periph);
1970 printf("invalid tag mask 0x%08x\n",
1971 XS_CTL_TAGTYPE(xs));
1972 panic("scsipi_execute_xs");
1973 }
1974 }
1975
1976 /* If the adaptor wants us to poll, poll. */
1977 if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY)
1978 xs->xs_control |= XS_CTL_POLL;
1979
1980 /*
1981 * If we don't yet have a completion thread, or we are to poll for
1982 * completion, clear the ASYNC flag.
1983 */
1984 oasync = (xs->xs_control & XS_CTL_ASYNC);
1985 if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
1986 xs->xs_control &= ~XS_CTL_ASYNC;
1987
1988 async = (xs->xs_control & XS_CTL_ASYNC);
1989 poll = (xs->xs_control & XS_CTL_POLL);
1990 retries = xs->xs_retries; /* for polling commands */
1991
1992 #ifdef DIAGNOSTIC
1993 if (oasync != 0 && xs->bp == NULL)
1994 panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
1995 #endif
1996
1997 /*
1998 * Enqueue the transfer. If we're not polling for completion, this
1999 * should ALWAYS return `no error'.
2000 */
2001 try_again:
2002 error = scsipi_enqueue(xs);
2003 if (error) {
2004 if (poll == 0) {
2005 scsipi_printaddr(periph);
2006 printf("not polling, but enqueue failed with %d\n",
2007 error);
2008 panic("scsipi_execute_xs");
2009 }
2010
2011 scsipi_printaddr(periph);
2012 printf("failed to enqueue polling command");
2013 if (retries != 0) {
2014 printf(", retrying...\n");
2015 delay(1000000);
2016 retries--;
2017 goto try_again;
2018 }
2019 printf("\n");
2020 goto free_xs;
2021 }
2022
2023 restarted:
2024 scsipi_run_queue(chan);
2025
2026 /*
2027 * The xfer is enqueued, and possibly running. If it's to be
2028 * completed asynchronously, just return now.
2029 */
2030 if (async)
2031 return (EJUSTRETURN);
2032
2033 /*
2034 * Not an asynchronous command; wait for it to complete.
2035 */
2036 s = splbio();
2037 while ((xs->xs_status & XS_STS_DONE) == 0) {
2038 if (poll) {
2039 scsipi_printaddr(periph);
2040 printf("polling command not done\n");
2041 panic("scsipi_execute_xs");
2042 }
2043 (void) tsleep(xs, PRIBIO, "xscmd", 0);
2044 }
2045 splx(s);
2046
2047 /*
2048 * Command is complete. scsipi_done() has awakened us to perform
2049 * the error handling.
2050 */
2051 error = scsipi_complete(xs);
2052 if (error == ERESTART)
2053 goto restarted;
2054
2055 /*
2056 * If it was meant to run async and we cleared aync ourselve,
2057 * don't return an error here. It has already been handled
2058 */
2059 if (oasync)
2060 error = EJUSTRETURN;
2061 /*
2062 * Command completed successfully or fatal error occurred. Fall
2063 * into....
2064 */
2065 free_xs:
2066 s = splbio();
2067 scsipi_put_xs(xs);
2068 splx(s);
2069
2070 /*
2071 * Kick the queue, keep it running in case it stopped for some
2072 * reason.
2073 */
2074 scsipi_run_queue(chan);
2075
2076 if (xs->xs_control & XS_CTL_DATA_ONSTACK)
2077 PRELE(curlwp);
2078 return (error);
2079 }
2080
2081 /*
2082 * scsipi_completion_thread:
2083 *
2084 * This is the completion thread. We wait for errors on
2085 * asynchronous xfers, and perform the error handling
2086 * function, restarting the command, if necessary.
2087 */
2088 static void
2089 scsipi_completion_thread(void *arg)
2090 {
2091 struct scsipi_channel *chan = arg;
2092 struct scsipi_xfer *xs;
2093 int s;
2094
2095 if (chan->chan_init_cb)
2096 (*chan->chan_init_cb)(chan, chan->chan_init_cb_arg);
2097
2098 s = splbio();
2099 chan->chan_flags |= SCSIPI_CHAN_TACTIVE;
2100 splx(s);
2101 for (;;) {
2102 s = splbio();
2103 xs = TAILQ_FIRST(&chan->chan_complete);
2104 if (xs == NULL && chan->chan_tflags == 0) {
2105 /* nothing to do; wait */
2106 (void) tsleep(&chan->chan_complete, PRIBIO,
2107 "sccomp", 0);
2108 splx(s);
2109 continue;
2110 }
2111 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2112 /* call chan_callback from thread context */
2113 chan->chan_tflags &= ~SCSIPI_CHANT_CALLBACK;
2114 chan->chan_callback(chan, chan->chan_callback_arg);
2115 splx(s);
2116 continue;
2117 }
2118 if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) {
2119 /* attempt to get more openings for this channel */
2120 chan->chan_tflags &= ~SCSIPI_CHANT_GROWRES;
2121 scsipi_adapter_request(chan,
2122 ADAPTER_REQ_GROW_RESOURCES, NULL);
2123 scsipi_channel_thaw(chan, 1);
2124 splx(s);
2125 continue;
2126 }
2127 if (chan->chan_tflags & SCSIPI_CHANT_KICK) {
2128 /* explicitly run the queues for this channel */
2129 chan->chan_tflags &= ~SCSIPI_CHANT_KICK;
2130 scsipi_run_queue(chan);
2131 splx(s);
2132 continue;
2133 }
2134 if (chan->chan_tflags & SCSIPI_CHANT_SHUTDOWN) {
2135 splx(s);
2136 break;
2137 }
2138 if (xs) {
2139 TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
2140 splx(s);
2141
2142 /*
2143 * Have an xfer with an error; process it.
2144 */
2145 (void) scsipi_complete(xs);
2146
2147 /*
2148 * Kick the queue; keep it running if it was stopped
2149 * for some reason.
2150 */
2151 scsipi_run_queue(chan);
2152 } else {
2153 splx(s);
2154 }
2155 }
2156
2157 chan->chan_thread = NULL;
2158
2159 /* In case parent is waiting for us to exit. */
2160 wakeup(&chan->chan_thread);
2161
2162 kthread_exit(0);
2163 }
2164
2165 /*
2166 * scsipi_create_completion_thread:
2167 *
2168 * Callback to actually create the completion thread.
2169 */
2170 void
2171 scsipi_create_completion_thread(void *arg)
2172 {
2173 struct scsipi_channel *chan = arg;
2174 struct scsipi_adapter *adapt = chan->chan_adapter;
2175
2176 if (kthread_create1(scsipi_completion_thread, chan,
2177 &chan->chan_thread, "%s", chan->chan_name)) {
2178 printf("%s: unable to create completion thread for "
2179 "channel %d\n", adapt->adapt_dev->dv_xname,
2180 chan->chan_channel);
2181 panic("scsipi_create_completion_thread");
2182 }
2183 }
2184
2185 /*
2186 * scsipi_thread_call_callback:
2187 *
2188 * request to call a callback from the completion thread
2189 */
2190 int
2191 scsipi_thread_call_callback(struct scsipi_channel *chan,
2192 void (*callback)(struct scsipi_channel *, void *), void *arg)
2193 {
2194 int s;
2195
2196 s = splbio();
2197 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
2198 /* kernel thread doesn't exist yet */
2199 splx(s);
2200 return ESRCH;
2201 }
2202 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2203 splx(s);
2204 return EBUSY;
2205 }
2206 scsipi_channel_freeze(chan, 1);
2207 chan->chan_callback = callback;
2208 chan->chan_callback_arg = arg;
2209 chan->chan_tflags |= SCSIPI_CHANT_CALLBACK;
2210 wakeup(&chan->chan_complete);
2211 splx(s);
2212 return(0);
2213 }
2214
2215 /*
2216 * scsipi_async_event:
2217 *
2218 * Handle an asynchronous event from an adapter.
2219 */
2220 void
2221 scsipi_async_event(struct scsipi_channel *chan, scsipi_async_event_t event,
2222 void *arg)
2223 {
2224 int s;
2225
2226 s = splbio();
2227 switch (event) {
2228 case ASYNC_EVENT_MAX_OPENINGS:
2229 scsipi_async_event_max_openings(chan,
2230 (struct scsipi_max_openings *)arg);
2231 break;
2232
2233 case ASYNC_EVENT_XFER_MODE:
2234 scsipi_async_event_xfer_mode(chan,
2235 (struct scsipi_xfer_mode *)arg);
2236 break;
2237 case ASYNC_EVENT_RESET:
2238 scsipi_async_event_channel_reset(chan);
2239 break;
2240 }
2241 splx(s);
2242 }
2243
2244 /*
2245 * scsipi_print_xfer_mode:
2246 *
2247 * Print a periph's capabilities.
2248 */
2249 void
2250 scsipi_print_xfer_mode(struct scsipi_periph *periph)
2251 {
2252 int period, freq, speed, mbs;
2253
2254 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0)
2255 return;
2256
2257 aprint_normal("%s: ", periph->periph_dev->dv_xname);
2258 if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) {
2259 period = scsipi_sync_factor_to_period(periph->periph_period);
2260 aprint_normal("sync (%d.%02dns offset %d)",
2261 period / 100, period % 100, periph->periph_offset);
2262 } else
2263 aprint_normal("async");
2264
2265 if (periph->periph_mode & PERIPH_CAP_WIDE32)
2266 aprint_normal(", 32-bit");
2267 else if (periph->periph_mode & (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
2268 aprint_normal(", 16-bit");
2269 else
2270 aprint_normal(", 8-bit");
2271
2272 if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) {
2273 freq = scsipi_sync_factor_to_freq(periph->periph_period);
2274 speed = freq;
2275 if (periph->periph_mode & PERIPH_CAP_WIDE32)
2276 speed *= 4;
2277 else if (periph->periph_mode &
2278 (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
2279 speed *= 2;
2280 mbs = speed / 1000;
2281 if (mbs > 0)
2282 aprint_normal(" (%d.%03dMB/s)", mbs, speed % 1000);
2283 else
2284 aprint_normal(" (%dKB/s)", speed % 1000);
2285 }
2286
2287 aprint_normal(" transfers");
2288
2289 if (periph->periph_mode & PERIPH_CAP_TQING)
2290 aprint_normal(", tagged queueing");
2291
2292 aprint_normal("\n");
2293 }
2294
2295 /*
2296 * scsipi_async_event_max_openings:
2297 *
2298 * Update the maximum number of outstanding commands a
2299 * device may have.
2300 */
2301 static void
2302 scsipi_async_event_max_openings(struct scsipi_channel *chan,
2303 struct scsipi_max_openings *mo)
2304 {
2305 struct scsipi_periph *periph;
2306 int minlun, maxlun;
2307
2308 if (mo->mo_lun == -1) {
2309 /*
2310 * Wildcarded; apply it to all LUNs.
2311 */
2312 minlun = 0;
2313 maxlun = chan->chan_nluns - 1;
2314 } else
2315 minlun = maxlun = mo->mo_lun;
2316
2317 /* XXX This could really suck with a large LUN space. */
2318 for (; minlun <= maxlun; minlun++) {
2319 periph = scsipi_lookup_periph(chan, mo->mo_target, minlun);
2320 if (periph == NULL)
2321 continue;
2322
2323 if (mo->mo_openings < periph->periph_openings)
2324 periph->periph_openings = mo->mo_openings;
2325 else if (mo->mo_openings > periph->periph_openings &&
2326 (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
2327 periph->periph_openings = mo->mo_openings;
2328 }
2329 }
2330
2331 /*
2332 * scsipi_async_event_xfer_mode:
2333 *
2334 * Update the xfer mode for all periphs sharing the
2335 * specified I_T Nexus.
2336 */
2337 static void
2338 scsipi_async_event_xfer_mode(struct scsipi_channel *chan,
2339 struct scsipi_xfer_mode *xm)
2340 {
2341 struct scsipi_periph *periph;
2342 int lun, announce, mode, period, offset;
2343
2344 for (lun = 0; lun < chan->chan_nluns; lun++) {
2345 periph = scsipi_lookup_periph(chan, xm->xm_target, lun);
2346 if (periph == NULL)
2347 continue;
2348 announce = 0;
2349
2350 /*
2351 * Clamp the xfer mode down to this periph's capabilities.
2352 */
2353 mode = xm->xm_mode & periph->periph_cap;
2354 if (mode & PERIPH_CAP_SYNC) {
2355 period = xm->xm_period;
2356 offset = xm->xm_offset;
2357 } else {
2358 period = 0;
2359 offset = 0;
2360 }
2361
2362 /*
2363 * If we do not have a valid xfer mode yet, or the parameters
2364 * are different, announce them.
2365 */
2366 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0 ||
2367 periph->periph_mode != mode ||
2368 periph->periph_period != period ||
2369 periph->periph_offset != offset)
2370 announce = 1;
2371
2372 periph->periph_mode = mode;
2373 periph->periph_period = period;
2374 periph->periph_offset = offset;
2375 periph->periph_flags |= PERIPH_MODE_VALID;
2376
2377 if (announce)
2378 scsipi_print_xfer_mode(periph);
2379 }
2380 }
2381
2382 /*
2383 * scsipi_set_xfer_mode:
2384 *
2385 * Set the xfer mode for the specified I_T Nexus.
2386 */
2387 void
2388 scsipi_set_xfer_mode(struct scsipi_channel *chan, int target, int immed)
2389 {
2390 struct scsipi_xfer_mode xm;
2391 struct scsipi_periph *itperiph;
2392 int lun, s;
2393
2394 /*
2395 * Go to the minimal xfer mode.
2396 */
2397 xm.xm_target = target;
2398 xm.xm_mode = 0;
2399 xm.xm_period = 0; /* ignored */
2400 xm.xm_offset = 0; /* ignored */
2401
2402 /*
2403 * Find the first LUN we know about on this I_T Nexus.
2404 */
2405 for (itperiph = NULL, lun = 0; lun < chan->chan_nluns; lun++) {
2406 itperiph = scsipi_lookup_periph(chan, target, lun);
2407 if (itperiph != NULL)
2408 break;
2409 }
2410 if (itperiph != NULL) {
2411 xm.xm_mode = itperiph->periph_cap;
2412 /*
2413 * Now issue the request to the adapter.
2414 */
2415 s = splbio();
2416 scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm);
2417 splx(s);
2418 /*
2419 * If we want this to happen immediately, issue a dummy
2420 * command, since most adapters can't really negotiate unless
2421 * they're executing a job.
2422 */
2423 if (immed != 0) {
2424 (void) scsipi_test_unit_ready(itperiph,
2425 XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
2426 XS_CTL_IGNORE_NOT_READY |
2427 XS_CTL_IGNORE_MEDIA_CHANGE);
2428 }
2429 }
2430 }
2431
2432 /*
2433 * scsipi_channel_reset:
2434 *
2435 * handle scsi bus reset
2436 * called at splbio
2437 */
2438 static void
2439 scsipi_async_event_channel_reset(struct scsipi_channel *chan)
2440 {
2441 struct scsipi_xfer *xs, *xs_next;
2442 struct scsipi_periph *periph;
2443 int target, lun;
2444
2445 /*
2446 * Channel has been reset. Also mark as reset pending REQUEST_SENSE
2447 * commands; as the sense is not available any more.
2448 * can't call scsipi_done() from here, as the command has not been
2449 * sent to the adapter yet (this would corrupt accounting).
2450 */
2451
2452 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) {
2453 xs_next = TAILQ_NEXT(xs, channel_q);
2454 if (xs->xs_control & XS_CTL_REQSENSE) {
2455 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
2456 xs->error = XS_RESET;
2457 if ((xs->xs_control & XS_CTL_ASYNC) != 0)
2458 TAILQ_INSERT_TAIL(&chan->chan_complete, xs,
2459 channel_q);
2460 }
2461 }
2462 wakeup(&chan->chan_complete);
2463 /* Catch xs with pending sense which may not have a REQSENSE xs yet */
2464 for (target = 0; target < chan->chan_ntargets; target++) {
2465 if (target == chan->chan_id)
2466 continue;
2467 for (lun = 0; lun < chan->chan_nluns; lun++) {
2468 periph = scsipi_lookup_periph(chan, target, lun);
2469 if (periph) {
2470 xs = periph->periph_xscheck;
2471 if (xs)
2472 xs->error = XS_RESET;
2473 }
2474 }
2475 }
2476 }
2477
2478 /*
2479 * scsipi_target_detach:
2480 *
2481 * detach all periph associated with a I_T
2482 * must be called from valid thread context
2483 */
2484 int
2485 scsipi_target_detach(struct scsipi_channel *chan, int target, int lun,
2486 int flags)
2487 {
2488 struct scsipi_periph *periph;
2489 int ctarget, mintarget, maxtarget;
2490 int clun, minlun, maxlun;
2491 int error;
2492
2493 if (target == -1) {
2494 mintarget = 0;
2495 maxtarget = chan->chan_ntargets;
2496 } else {
2497 if (target == chan->chan_id)
2498 return EINVAL;
2499 if (target < 0 || target >= chan->chan_ntargets)
2500 return EINVAL;
2501 mintarget = target;
2502 maxtarget = target + 1;
2503 }
2504
2505 if (lun == -1) {
2506 minlun = 0;
2507 maxlun = chan->chan_nluns;
2508 } else {
2509 if (lun < 0 || lun >= chan->chan_nluns)
2510 return EINVAL;
2511 minlun = lun;
2512 maxlun = lun + 1;
2513 }
2514
2515 for (ctarget = mintarget; ctarget < maxtarget; ctarget++) {
2516 if (ctarget == chan->chan_id)
2517 continue;
2518
2519 for (clun = minlun; clun < maxlun; clun++) {
2520 periph = scsipi_lookup_periph(chan, ctarget, clun);
2521 if (periph == NULL)
2522 continue;
2523 error = config_detach(periph->periph_dev, flags);
2524 if (error)
2525 return (error);
2526 }
2527 }
2528 return(0);
2529 }
2530
2531 /*
2532 * scsipi_adapter_addref:
2533 *
2534 * Add a reference to the adapter pointed to by the provided
2535 * link, enabling the adapter if necessary.
2536 */
2537 int
2538 scsipi_adapter_addref(struct scsipi_adapter *adapt)
2539 {
2540 int s, error = 0;
2541
2542 s = splbio();
2543 if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) {
2544 error = (*adapt->adapt_enable)(adapt->adapt_dev, 1);
2545 if (error)
2546 adapt->adapt_refcnt--;
2547 }
2548 splx(s);
2549 return (error);
2550 }
2551
2552 /*
2553 * scsipi_adapter_delref:
2554 *
2555 * Delete a reference to the adapter pointed to by the provided
2556 * link, disabling the adapter if possible.
2557 */
2558 void
2559 scsipi_adapter_delref(struct scsipi_adapter *adapt)
2560 {
2561 int s;
2562
2563 s = splbio();
2564 if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL)
2565 (void) (*adapt->adapt_enable)(adapt->adapt_dev, 0);
2566 splx(s);
2567 }
2568
2569 static struct scsipi_syncparam {
2570 int ss_factor;
2571 int ss_period; /* ns * 100 */
2572 } scsipi_syncparams[] = {
2573 { 0x08, 625 }, /* FAST-160 (Ultra320) */
2574 { 0x09, 1250 }, /* FAST-80 (Ultra160) */
2575 { 0x0a, 2500 }, /* FAST-40 40MHz (Ultra2) */
2576 { 0x0b, 3030 }, /* FAST-40 33MHz (Ultra2) */
2577 { 0x0c, 5000 }, /* FAST-20 (Ultra) */
2578 };
2579 static const int scsipi_nsyncparams =
2580 sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
2581
2582 int
2583 scsipi_sync_period_to_factor(int period /* ns * 100 */)
2584 {
2585 int i;
2586
2587 for (i = 0; i < scsipi_nsyncparams; i++) {
2588 if (period <= scsipi_syncparams[i].ss_period)
2589 return (scsipi_syncparams[i].ss_factor);
2590 }
2591
2592 return ((period / 100) / 4);
2593 }
2594
2595 int
2596 scsipi_sync_factor_to_period(int factor)
2597 {
2598 int i;
2599
2600 for (i = 0; i < scsipi_nsyncparams; i++) {
2601 if (factor == scsipi_syncparams[i].ss_factor)
2602 return (scsipi_syncparams[i].ss_period);
2603 }
2604
2605 return ((factor * 4) * 100);
2606 }
2607
2608 int
2609 scsipi_sync_factor_to_freq(int factor)
2610 {
2611 int i;
2612
2613 for (i = 0; i < scsipi_nsyncparams; i++) {
2614 if (factor == scsipi_syncparams[i].ss_factor)
2615 return (100000000 / scsipi_syncparams[i].ss_period);
2616 }
2617
2618 return (10000000 / ((factor * 4) * 10));
2619 }
2620
2621 #ifdef SCSIPI_DEBUG
2622 /*
2623 * Given a scsipi_xfer, dump the request, in all it's glory
2624 */
2625 void
2626 show_scsipi_xs(struct scsipi_xfer *xs)
2627 {
2628
2629 printf("xs(%p): ", xs);
2630 printf("xs_control(0x%08x)", xs->xs_control);
2631 printf("xs_status(0x%08x)", xs->xs_status);
2632 printf("periph(%p)", xs->xs_periph);
2633 printf("retr(0x%x)", xs->xs_retries);
2634 printf("timo(0x%x)", xs->timeout);
2635 printf("cmd(%p)", xs->cmd);
2636 printf("len(0x%x)", xs->cmdlen);
2637 printf("data(%p)", xs->data);
2638 printf("len(0x%x)", xs->datalen);
2639 printf("res(0x%x)", xs->resid);
2640 printf("err(0x%x)", xs->error);
2641 printf("bp(%p)", xs->bp);
2642 show_scsipi_cmd(xs);
2643 }
2644
2645 void
2646 show_scsipi_cmd(struct scsipi_xfer *xs)
2647 {
2648 u_char *b = (u_char *) xs->cmd;
2649 int i = 0;
2650
2651 scsipi_printaddr(xs->xs_periph);
2652 printf(" command: ");
2653
2654 if ((xs->xs_control & XS_CTL_RESET) == 0) {
2655 while (i < xs->cmdlen) {
2656 if (i)
2657 printf(",");
2658 printf("0x%x", b[i++]);
2659 }
2660 printf("-[%d bytes]\n", xs->datalen);
2661 if (xs->datalen)
2662 show_mem(xs->data, min(64, xs->datalen));
2663 } else
2664 printf("-RESET-\n");
2665 }
2666
2667 void
2668 show_mem(u_char *address, int num)
2669 {
2670 int x;
2671
2672 printf("------------------------------");
2673 for (x = 0; x < num; x++) {
2674 if ((x % 16) == 0)
2675 printf("\n%03d: ", x);
2676 printf("%02x ", *address++);
2677 }
2678 printf("\n------------------------------\n");
2679 }
2680 #endif /* SCSIPI_DEBUG */
2681