scsipi_base.c revision 1.69 1 /* $NetBSD: scsipi_base.c,v 1.69 2002/03/16 17:21:19 bouyer Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: scsipi_base.c,v 1.69 2002/03/16 17:21:19 bouyer Exp $");
42
43 #include "opt_scsi.h"
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/buf.h>
49 #include <sys/uio.h>
50 #include <sys/malloc.h>
51 #include <sys/pool.h>
52 #include <sys/errno.h>
53 #include <sys/device.h>
54 #include <sys/proc.h>
55 #include <sys/kthread.h>
56
57 #include <dev/scsipi/scsipi_all.h>
58 #include <dev/scsipi/scsipi_disk.h>
59 #include <dev/scsipi/scsipiconf.h>
60 #include <dev/scsipi/scsipi_base.h>
61
62 #include <dev/scsipi/scsi_all.h>
63 #include <dev/scsipi/scsi_message.h>
64
65 int scsipi_complete __P((struct scsipi_xfer *));
66 void scsipi_request_sense __P((struct scsipi_xfer *));
67 int scsipi_enqueue __P((struct scsipi_xfer *));
68 void scsipi_run_queue __P((struct scsipi_channel *chan));
69
70 void scsipi_completion_thread __P((void *));
71
72 void scsipi_get_tag __P((struct scsipi_xfer *));
73 void scsipi_put_tag __P((struct scsipi_xfer *));
74
75 int scsipi_get_resource __P((struct scsipi_channel *));
76 void scsipi_put_resource __P((struct scsipi_channel *));
77 __inline int scsipi_grow_resources __P((struct scsipi_channel *));
78
79 void scsipi_async_event_max_openings __P((struct scsipi_channel *,
80 struct scsipi_max_openings *));
81 void scsipi_async_event_xfer_mode __P((struct scsipi_channel *,
82 struct scsipi_xfer_mode *));
83 void scsipi_async_event_channel_reset __P((struct scsipi_channel *));
84
85 struct pool scsipi_xfer_pool;
86
87 /*
88 * scsipi_init:
89 *
90 * Called when a scsibus or atapibus is attached to the system
91 * to initialize shared data structures.
92 */
93 void
94 scsipi_init()
95 {
96 static int scsipi_init_done;
97
98 if (scsipi_init_done)
99 return;
100 scsipi_init_done = 1;
101
102 /* Initialize the scsipi_xfer pool. */
103 pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
104 0, 0, "scxspl", NULL);
105 }
106
107 /*
108 * scsipi_channel_init:
109 *
110 * Initialize a scsipi_channel when it is attached.
111 */
112 int
113 scsipi_channel_init(chan)
114 struct scsipi_channel *chan;
115 {
116 size_t nbytes;
117 int i;
118
119 /* Initialize shared data. */
120 scsipi_init();
121
122 /* Initialize the queues. */
123 TAILQ_INIT(&chan->chan_queue);
124 TAILQ_INIT(&chan->chan_complete);
125
126 nbytes = chan->chan_ntargets * sizeof(struct scsipi_periph **);
127 chan->chan_periphs = malloc(nbytes, M_DEVBUF, M_NOWAIT);
128 if (chan->chan_periphs == NULL)
129 return (ENOMEM);
130
131
132 nbytes = chan->chan_nluns * sizeof(struct scsipi_periph *);
133 for (i = 0; i < chan->chan_ntargets; i++) {
134 chan->chan_periphs[i] = malloc(nbytes, M_DEVBUF,
135 M_NOWAIT|M_ZERO);
136 if (chan->chan_periphs[i] == NULL) {
137 while (--i >= 0) {
138 free(chan->chan_periphs[i], M_DEVBUF);
139 }
140 return (ENOMEM);
141 }
142 }
143
144 /*
145 * Create the asynchronous completion thread.
146 */
147 kthread_create(scsipi_create_completion_thread, chan);
148 return (0);
149 }
150
151 /*
152 * scsipi_channel_shutdown:
153 *
154 * Shutdown a scsipi_channel.
155 */
156 void
157 scsipi_channel_shutdown(chan)
158 struct scsipi_channel *chan;
159 {
160
161 /*
162 * Shut down the completion thread.
163 */
164 chan->chan_tflags |= SCSIPI_CHANT_SHUTDOWN;
165 wakeup(&chan->chan_complete);
166
167 /*
168 * Now wait for the thread to exit.
169 */
170 while (chan->chan_thread != NULL)
171 (void) tsleep(&chan->chan_thread, PRIBIO, "scshut", 0);
172 }
173
174 /*
175 * scsipi_insert_periph:
176 *
177 * Insert a periph into the channel.
178 */
179 void
180 scsipi_insert_periph(chan, periph)
181 struct scsipi_channel *chan;
182 struct scsipi_periph *periph;
183 {
184 int s;
185
186 s = splbio();
187 chan->chan_periphs[periph->periph_target][periph->periph_lun] = periph;
188 splx(s);
189 }
190
191 /*
192 * scsipi_remove_periph:
193 *
194 * Remove a periph from the channel.
195 */
196 void
197 scsipi_remove_periph(chan, periph)
198 struct scsipi_channel *chan;
199 struct scsipi_periph *periph;
200 {
201 int s;
202
203 s = splbio();
204 chan->chan_periphs[periph->periph_target][periph->periph_lun] = NULL;
205 splx(s);
206 }
207
208 /*
209 * scsipi_lookup_periph:
210 *
211 * Lookup a periph on the specified channel.
212 */
213 struct scsipi_periph *
214 scsipi_lookup_periph(chan, target, lun)
215 struct scsipi_channel *chan;
216 int target, lun;
217 {
218 struct scsipi_periph *periph;
219 int s;
220
221 if (target >= chan->chan_ntargets ||
222 lun >= chan->chan_nluns)
223 return (NULL);
224
225 s = splbio();
226 periph = chan->chan_periphs[target][lun];
227 splx(s);
228
229 return (periph);
230 }
231
232 /*
233 * scsipi_get_resource:
234 *
235 * Allocate a single xfer `resource' from the channel.
236 *
237 * NOTE: Must be called at splbio().
238 */
239 int
240 scsipi_get_resource(chan)
241 struct scsipi_channel *chan;
242 {
243 struct scsipi_adapter *adapt = chan->chan_adapter;
244
245 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
246 if (chan->chan_openings > 0) {
247 chan->chan_openings--;
248 return (1);
249 }
250 return (0);
251 }
252
253 if (adapt->adapt_openings > 0) {
254 adapt->adapt_openings--;
255 return (1);
256 }
257 return (0);
258 }
259
260 /*
261 * scsipi_grow_resources:
262 *
263 * Attempt to grow resources for a channel. If this succeeds,
264 * we allocate one for our caller.
265 *
266 * NOTE: Must be called at splbio().
267 */
268 __inline int
269 scsipi_grow_resources(chan)
270 struct scsipi_channel *chan;
271 {
272
273 if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
274 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
275 scsipi_adapter_request(chan,
276 ADAPTER_REQ_GROW_RESOURCES, NULL);
277 return (scsipi_get_resource(chan));
278 }
279 /*
280 * ask the channel thread to do it. It'll have to thaw the
281 * queue
282 */
283 scsipi_channel_freeze(chan, 1);
284 chan->chan_tflags |= SCSIPI_CHANT_GROWRES;
285 wakeup(&chan->chan_complete);
286 return (0);
287 }
288
289 return (0);
290 }
291
292 /*
293 * scsipi_put_resource:
294 *
295 * Free a single xfer `resource' to the channel.
296 *
297 * NOTE: Must be called at splbio().
298 */
299 void
300 scsipi_put_resource(chan)
301 struct scsipi_channel *chan;
302 {
303 struct scsipi_adapter *adapt = chan->chan_adapter;
304
305 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
306 chan->chan_openings++;
307 else
308 adapt->adapt_openings++;
309 }
310
311 /*
312 * scsipi_get_tag:
313 *
314 * Get a tag ID for the specified xfer.
315 *
316 * NOTE: Must be called at splbio().
317 */
318 void
319 scsipi_get_tag(xs)
320 struct scsipi_xfer *xs;
321 {
322 struct scsipi_periph *periph = xs->xs_periph;
323 int word, bit, tag;
324
325 for (word = 0; word < PERIPH_NTAGWORDS; word++) {
326 bit = ffs(periph->periph_freetags[word]);
327 if (bit != 0)
328 break;
329 }
330 #ifdef DIAGNOSTIC
331 if (word == PERIPH_NTAGWORDS) {
332 scsipi_printaddr(periph);
333 printf("no free tags\n");
334 panic("scsipi_get_tag");
335 }
336 #endif
337
338 bit -= 1;
339 periph->periph_freetags[word] &= ~(1 << bit);
340 tag = (word << 5) | bit;
341
342 /* XXX Should eventually disallow this completely. */
343 if (tag >= periph->periph_openings) {
344 scsipi_printaddr(periph);
345 printf("WARNING: tag %d greater than available openings %d\n",
346 tag, periph->periph_openings);
347 }
348
349 xs->xs_tag_id = tag;
350 }
351
352 /*
353 * scsipi_put_tag:
354 *
355 * Put the tag ID for the specified xfer back into the pool.
356 *
357 * NOTE: Must be called at splbio().
358 */
359 void
360 scsipi_put_tag(xs)
361 struct scsipi_xfer *xs;
362 {
363 struct scsipi_periph *periph = xs->xs_periph;
364 int word, bit;
365
366 word = xs->xs_tag_id >> 5;
367 bit = xs->xs_tag_id & 0x1f;
368
369 periph->periph_freetags[word] |= (1 << bit);
370 }
371
372 /*
373 * scsipi_get_xs:
374 *
375 * Allocate an xfer descriptor and associate it with the
376 * specified peripherial. If the peripherial has no more
377 * available command openings, we either block waiting for
378 * one to become available, or fail.
379 */
380 struct scsipi_xfer *
381 scsipi_get_xs(periph, flags)
382 struct scsipi_periph *periph;
383 int flags;
384 {
385 struct scsipi_xfer *xs;
386 int s;
387
388 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n"));
389
390 /*
391 * If we're cold, make sure we poll.
392 */
393 if (cold)
394 flags |= XS_CTL_NOSLEEP | XS_CTL_POLL;
395
396 #ifdef DIAGNOSTIC
397 /*
398 * URGENT commands can never be ASYNC.
399 */
400 if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
401 (XS_CTL_URGENT|XS_CTL_ASYNC)) {
402 scsipi_printaddr(periph);
403 printf("URGENT and ASYNC\n");
404 panic("scsipi_get_xs");
405 }
406 #endif
407
408 s = splbio();
409 /*
410 * Wait for a command opening to become available. Rules:
411 *
412 * - All xfers must wait for an available opening.
413 * Exception: URGENT xfers can proceed when
414 * active == openings, because we use the opening
415 * of the command we're recovering for.
416 * - if the periph has sense pending, only URGENT & REQSENSE
417 * xfers may proceed.
418 *
419 * - If the periph is recovering, only URGENT xfers may
420 * proceed.
421 *
422 * - If the periph is currently executing a recovery
423 * command, URGENT commands must block, because only
424 * one recovery command can execute at a time.
425 */
426 for (;;) {
427 if (flags & XS_CTL_URGENT) {
428 if (periph->periph_active > periph->periph_openings)
429 goto wait_for_opening;
430 if (periph->periph_flags & PERIPH_SENSE) {
431 if ((flags & XS_CTL_REQSENSE) == 0)
432 goto wait_for_opening;
433 } else {
434 if ((periph->periph_flags &
435 PERIPH_RECOVERY_ACTIVE) != 0)
436 goto wait_for_opening;
437 periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
438 }
439 break;
440 }
441 if (periph->periph_active >= periph->periph_openings ||
442 (periph->periph_flags & PERIPH_RECOVERING) != 0)
443 goto wait_for_opening;
444 periph->periph_active++;
445 break;
446
447 wait_for_opening:
448 if (flags & XS_CTL_NOSLEEP) {
449 splx(s);
450 return (NULL);
451 }
452 SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n"));
453 periph->periph_flags |= PERIPH_WAITING;
454 (void) tsleep(periph, PRIBIO, "getxs", 0);
455 }
456 SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n"));
457 xs = pool_get(&scsipi_xfer_pool,
458 ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
459 if (xs == NULL) {
460 if (flags & XS_CTL_URGENT) {
461 if ((flags & XS_CTL_REQSENSE) == 0)
462 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
463 } else
464 periph->periph_active--;
465 scsipi_printaddr(periph);
466 printf("unable to allocate %sscsipi_xfer\n",
467 (flags & XS_CTL_URGENT) ? "URGENT " : "");
468 }
469 splx(s);
470
471 SC_DEBUG(periph, SCSIPI_DB3, ("returning\n"));
472
473 if (xs != NULL) {
474 callout_init(&xs->xs_callout);
475 memset(xs, 0, sizeof(*xs));
476 xs->xs_periph = periph;
477 xs->xs_control = flags;
478 xs->xs_status = 0;
479 s = splbio();
480 TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
481 splx(s);
482 }
483 return (xs);
484 }
485
486 /*
487 * scsipi_put_xs:
488 *
489 * Release an xfer descriptor, decreasing the outstanding command
490 * count for the peripherial. If there is a thread waiting for
491 * an opening, wake it up. If not, kick any queued I/O the
492 * peripherial may have.
493 *
494 * NOTE: Must be called at splbio().
495 */
496 void
497 scsipi_put_xs(xs)
498 struct scsipi_xfer *xs;
499 {
500 struct scsipi_periph *periph = xs->xs_periph;
501 int flags = xs->xs_control;
502
503 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n"));
504
505 TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
506 pool_put(&scsipi_xfer_pool, xs);
507
508 #ifdef DIAGNOSTIC
509 if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
510 periph->periph_active == 0) {
511 scsipi_printaddr(periph);
512 printf("recovery without a command to recovery for\n");
513 panic("scsipi_put_xs");
514 }
515 #endif
516
517 if (flags & XS_CTL_URGENT) {
518 if ((flags & XS_CTL_REQSENSE) == 0)
519 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
520 } else
521 periph->periph_active--;
522 if (periph->periph_active == 0 &&
523 (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
524 periph->periph_flags &= ~PERIPH_WAITDRAIN;
525 wakeup(&periph->periph_active);
526 }
527
528 if (periph->periph_flags & PERIPH_WAITING) {
529 periph->periph_flags &= ~PERIPH_WAITING;
530 wakeup(periph);
531 } else {
532 if (periph->periph_switch->psw_start != NULL) {
533 SC_DEBUG(periph, SCSIPI_DB2,
534 ("calling private start()\n"));
535 (*periph->periph_switch->psw_start)(periph);
536 }
537 }
538 }
539
540 /*
541 * scsipi_channel_freeze:
542 *
543 * Freeze a channel's xfer queue.
544 */
545 void
546 scsipi_channel_freeze(chan, count)
547 struct scsipi_channel *chan;
548 int count;
549 {
550 int s;
551
552 s = splbio();
553 chan->chan_qfreeze += count;
554 splx(s);
555 }
556
557 /*
558 * scsipi_channel_thaw:
559 *
560 * Thaw a channel's xfer queue.
561 */
562 void
563 scsipi_channel_thaw(chan, count)
564 struct scsipi_channel *chan;
565 int count;
566 {
567 int s;
568
569 s = splbio();
570 chan->chan_qfreeze -= count;
571 /*
572 * Don't let the freeze count go negative.
573 *
574 * Presumably the adapter driver could keep track of this,
575 * but it might just be easier to do this here so as to allow
576 * multiple callers, including those outside the adapter driver.
577 */
578 if (chan->chan_qfreeze < 0) {
579 chan->chan_qfreeze = 0;
580 }
581 splx(s);
582 /*
583 * Kick the channel's queue here. Note, we may be running in
584 * interrupt context (softclock or HBA's interrupt), so the adapter
585 * driver had better not sleep.
586 */
587 if (chan->chan_qfreeze == 0)
588 scsipi_run_queue(chan);
589 }
590
591 /*
592 * scsipi_channel_timed_thaw:
593 *
594 * Thaw a channel after some time has expired. This will also
595 * run the channel's queue if the freeze count has reached 0.
596 */
597 void
598 scsipi_channel_timed_thaw(arg)
599 void *arg;
600 {
601 struct scsipi_channel *chan = arg;
602
603 scsipi_channel_thaw(chan, 1);
604 }
605
606 /*
607 * scsipi_periph_freeze:
608 *
609 * Freeze a device's xfer queue.
610 */
611 void
612 scsipi_periph_freeze(periph, count)
613 struct scsipi_periph *periph;
614 int count;
615 {
616 int s;
617
618 s = splbio();
619 periph->periph_qfreeze += count;
620 splx(s);
621 }
622
623 /*
624 * scsipi_periph_thaw:
625 *
626 * Thaw a device's xfer queue.
627 */
628 void
629 scsipi_periph_thaw(periph, count)
630 struct scsipi_periph *periph;
631 int count;
632 {
633 int s;
634
635 s = splbio();
636 periph->periph_qfreeze -= count;
637 #ifdef DIAGNOSTIC
638 if (periph->periph_qfreeze < 0) {
639 static const char pc[] = "periph freeze count < 0";
640 scsipi_printaddr(periph);
641 printf("%s\n", pc);
642 panic(pc);
643 }
644 #endif
645 if (periph->periph_qfreeze == 0 &&
646 (periph->periph_flags & PERIPH_WAITING) != 0)
647 wakeup(periph);
648 splx(s);
649 }
650
651 /*
652 * scsipi_periph_timed_thaw:
653 *
654 * Thaw a device after some time has expired.
655 */
656 void
657 scsipi_periph_timed_thaw(arg)
658 void *arg;
659 {
660 int s;
661 struct scsipi_periph *periph = arg;
662
663 callout_stop(&periph->periph_callout);
664
665 s = splbio();
666 scsipi_periph_thaw(periph, 1);
667 if ((periph->periph_channel->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
668 /*
669 * Kick the channel's queue here. Note, we're running in
670 * interrupt context (softclock), so the adapter driver
671 * had better not sleep.
672 */
673 scsipi_run_queue(periph->periph_channel);
674 } else {
675 /*
676 * Tell the completion thread to kick the channel's queue here.
677 */
678 periph->periph_channel->chan_tflags |= SCSIPI_CHANT_KICK;
679 wakeup(&periph->periph_channel->chan_complete);
680 }
681 splx(s);
682 }
683
684 /*
685 * scsipi_wait_drain:
686 *
687 * Wait for a periph's pending xfers to drain.
688 */
689 void
690 scsipi_wait_drain(periph)
691 struct scsipi_periph *periph;
692 {
693 int s;
694
695 s = splbio();
696 while (periph->periph_active != 0) {
697 periph->periph_flags |= PERIPH_WAITDRAIN;
698 (void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0);
699 }
700 splx(s);
701 }
702
703 /*
704 * scsipi_kill_pending:
705 *
706 * Kill off all pending xfers for a periph.
707 *
708 * NOTE: Must be called at splbio().
709 */
710 void
711 scsipi_kill_pending(periph)
712 struct scsipi_periph *periph;
713 {
714
715 (*periph->periph_channel->chan_bustype->bustype_kill_pending)(periph);
716 #ifdef DIAGNOSTIC
717 if (TAILQ_FIRST(&periph->periph_xferq) != NULL)
718 panic("scsipi_kill_pending");
719 #endif
720 scsipi_wait_drain(periph);
721 }
722
723 /*
724 * scsipi_interpret_sense:
725 *
726 * Look at the returned sense and act on the error, determining
727 * the unix error number to pass back. (0 = report no error)
728 *
729 * NOTE: If we return ERESTART, we are expected to haved
730 * thawed the device!
731 *
732 * THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
733 */
734 int
735 scsipi_interpret_sense(xs)
736 struct scsipi_xfer *xs;
737 {
738 struct scsipi_sense_data *sense;
739 struct scsipi_periph *periph = xs->xs_periph;
740 u_int8_t key;
741 u_int32_t info;
742 int error;
743 #ifndef SCSIVERBOSE
744 static char *error_mes[] = {
745 "soft error (corrected)",
746 "not ready", "medium error",
747 "non-media hardware failure", "illegal request",
748 "unit attention", "readonly device",
749 "no data found", "vendor unique",
750 "copy aborted", "command aborted",
751 "search returned equal", "volume overflow",
752 "verify miscompare", "unknown error key"
753 };
754 #endif
755
756 sense = &xs->sense.scsi_sense;
757 #ifdef SCSIPI_DEBUG
758 if (periph->periph_flags & SCSIPI_DB1) {
759 int count;
760 scsipi_printaddr(periph);
761 printf(" sense debug information:\n");
762 printf("\tcode 0x%x valid 0x%x\n",
763 sense->error_code & SSD_ERRCODE,
764 sense->error_code & SSD_ERRCODE_VALID ? 1 : 0);
765 printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
766 sense->segment,
767 sense->flags & SSD_KEY,
768 sense->flags & SSD_ILI ? 1 : 0,
769 sense->flags & SSD_EOM ? 1 : 0,
770 sense->flags & SSD_FILEMARK ? 1 : 0);
771 printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d "
772 "extra bytes\n",
773 sense->info[0],
774 sense->info[1],
775 sense->info[2],
776 sense->info[3],
777 sense->extra_len);
778 printf("\textra: ");
779 for (count = 0; count < ADD_BYTES_LIM(sense); count++)
780 printf("0x%x ", sense->cmd_spec_info[count]);
781 printf("\n");
782 }
783 #endif
784
785 /*
786 * If the periph has it's own error handler, call it first.
787 * If it returns a legit error value, return that, otherwise
788 * it wants us to continue with normal error processing.
789 */
790 if (periph->periph_switch->psw_error != NULL) {
791 SC_DEBUG(periph, SCSIPI_DB2,
792 ("calling private err_handler()\n"));
793 error = (*periph->periph_switch->psw_error)(xs);
794 if (error != EJUSTRETURN)
795 return (error);
796 }
797 /* otherwise use the default */
798 switch (sense->error_code & SSD_ERRCODE) {
799
800 /*
801 * Old SCSI-1 and SASI devices respond with
802 * codes other than 70.
803 */
804 case 0x00: /* no error (command completed OK) */
805 return (0);
806 case 0x04: /* drive not ready after it was selected */
807 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
808 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
809 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
810 return (0);
811 /* XXX - display some sort of error here? */
812 return (EIO);
813 case 0x20: /* invalid command */
814 if ((xs->xs_control &
815 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
816 return (0);
817 return (EINVAL);
818 case 0x25: /* invalid LUN (Adaptec ACB-4000) */
819 return (EACCES);
820
821 /*
822 * If it's code 70, use the extended stuff and
823 * interpret the key
824 */
825 case 0x71: /* delayed error */
826 scsipi_printaddr(periph);
827 key = sense->flags & SSD_KEY;
828 printf(" DEFERRED ERROR, key = 0x%x\n", key);
829 /* FALLTHROUGH */
830 case 0x70:
831 if ((sense->error_code & SSD_ERRCODE_VALID) != 0)
832 info = _4btol(sense->info);
833 else
834 info = 0;
835 key = sense->flags & SSD_KEY;
836
837 switch (key) {
838 case SKEY_NO_SENSE:
839 case SKEY_RECOVERED_ERROR:
840 if (xs->resid == xs->datalen && xs->datalen) {
841 /*
842 * Why is this here?
843 */
844 xs->resid = 0; /* not short read */
845 }
846 case SKEY_EQUAL:
847 error = 0;
848 break;
849 case SKEY_NOT_READY:
850 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
851 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
852 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
853 return (0);
854 if (sense->add_sense_code == 0x3A) {
855 error = ENODEV; /* Medium not present */
856 if (xs->xs_control & XS_CTL_SILENT_NODEV)
857 return (error);
858 } else
859 error = EIO;
860 if ((xs->xs_control & XS_CTL_SILENT) != 0)
861 return (error);
862 break;
863 case SKEY_ILLEGAL_REQUEST:
864 if ((xs->xs_control &
865 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
866 return (0);
867 /*
868 * Handle the case where a device reports
869 * Logical Unit Not Supported during discovery.
870 */
871 if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
872 sense->add_sense_code == 0x25 &&
873 sense->add_sense_code_qual == 0x00)
874 return (EINVAL);
875 if ((xs->xs_control & XS_CTL_SILENT) != 0)
876 return (EIO);
877 error = EINVAL;
878 break;
879 case SKEY_UNIT_ATTENTION:
880 if (sense->add_sense_code == 0x29 &&
881 sense->add_sense_code_qual == 0x00) {
882 /* device or bus reset */
883 return (ERESTART);
884 }
885 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
886 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
887 if ((xs->xs_control &
888 XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
889 /* XXX Should reupload any transient state. */
890 (periph->periph_flags &
891 PERIPH_REMOVABLE) == 0) {
892 return (ERESTART);
893 }
894 if ((xs->xs_control & XS_CTL_SILENT) != 0)
895 return (EIO);
896 error = EIO;
897 break;
898 case SKEY_WRITE_PROTECT:
899 error = EROFS;
900 break;
901 case SKEY_BLANK_CHECK:
902 error = 0;
903 break;
904 case SKEY_ABORTED_COMMAND:
905 error = ERESTART;
906 break;
907 case SKEY_VOLUME_OVERFLOW:
908 error = ENOSPC;
909 break;
910 default:
911 error = EIO;
912 break;
913 }
914
915 #ifdef SCSIVERBOSE
916 if (key && (xs->xs_control & XS_CTL_SILENT) == 0)
917 scsipi_print_sense(xs, 0);
918 #else
919 if (key) {
920 scsipi_printaddr(periph);
921 printf("%s", error_mes[key - 1]);
922 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
923 switch (key) {
924 case SKEY_NOT_READY:
925 case SKEY_ILLEGAL_REQUEST:
926 case SKEY_UNIT_ATTENTION:
927 case SKEY_WRITE_PROTECT:
928 break;
929 case SKEY_BLANK_CHECK:
930 printf(", requested size: %d (decimal)",
931 info);
932 break;
933 case SKEY_ABORTED_COMMAND:
934 if (xs->xs_retries)
935 printf(", retrying");
936 printf(", cmd 0x%x, info 0x%x",
937 xs->cmd->opcode, info);
938 break;
939 default:
940 printf(", info = %d (decimal)", info);
941 }
942 }
943 if (sense->extra_len != 0) {
944 int n;
945 printf(", data =");
946 for (n = 0; n < sense->extra_len; n++)
947 printf(" %02x",
948 sense->cmd_spec_info[n]);
949 }
950 printf("\n");
951 }
952 #endif
953 return (error);
954
955 /*
956 * Some other code, just report it
957 */
958 default:
959 #if defined(SCSIDEBUG) || defined(DEBUG)
960 {
961 static char *uc = "undecodable sense error";
962 int i;
963 u_int8_t *cptr = (u_int8_t *) sense;
964 scsipi_printaddr(periph);
965 if (xs->cmd == &xs->cmdstore) {
966 printf("%s for opcode 0x%x, data=",
967 uc, xs->cmdstore.opcode);
968 } else {
969 printf("%s, data=", uc);
970 }
971 for (i = 0; i < sizeof (sense); i++)
972 printf(" 0x%02x", *(cptr++) & 0xff);
973 printf("\n");
974 }
975 #else
976 scsipi_printaddr(periph);
977 printf("Sense Error Code 0x%x",
978 sense->error_code & SSD_ERRCODE);
979 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
980 struct scsipi_sense_data_unextended *usense =
981 (struct scsipi_sense_data_unextended *)sense;
982 printf(" at block no. %d (decimal)",
983 _3btol(usense->block));
984 }
985 printf("\n");
986 #endif
987 return (EIO);
988 }
989 }
990
991 /*
992 * scsipi_size:
993 *
994 * Find out from the device what its capacity is.
995 */
996 u_long
997 scsipi_size(periph, flags)
998 struct scsipi_periph *periph;
999 int flags;
1000 {
1001 struct scsipi_read_cap_data rdcap;
1002 struct scsipi_read_capacity scsipi_cmd;
1003
1004 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1005 scsipi_cmd.opcode = READ_CAPACITY;
1006
1007 /*
1008 * If the command works, interpret the result as a 4 byte
1009 * number of blocks
1010 */
1011 if (scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1012 sizeof(scsipi_cmd), (u_char *)&rdcap, sizeof(rdcap),
1013 SCSIPIRETRIES, 20000, NULL,
1014 flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK) != 0) {
1015 scsipi_printaddr(periph);
1016 printf("could not get size\n");
1017 return (0);
1018 }
1019
1020 return (_4btol(rdcap.addr) + 1);
1021 }
1022
1023 /*
1024 * scsipi_test_unit_ready:
1025 *
1026 * Issue a `test unit ready' request.
1027 */
1028 int
1029 scsipi_test_unit_ready(periph, flags)
1030 struct scsipi_periph *periph;
1031 int flags;
1032 {
1033 struct scsipi_test_unit_ready scsipi_cmd;
1034
1035 /* some ATAPI drives don't support TEST_UNIT_READY. Sigh */
1036 if (periph->periph_quirks & PQUIRK_NOTUR)
1037 return (0);
1038
1039 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1040 scsipi_cmd.opcode = TEST_UNIT_READY;
1041
1042 return (scsipi_command(periph,
1043 (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd),
1044 0, 0, SCSIPIRETRIES, 10000, NULL, flags));
1045 }
1046
1047 /*
1048 * scsipi_inquire:
1049 *
1050 * Ask the device about itself.
1051 */
1052 int
1053 scsipi_inquire(periph, inqbuf, flags)
1054 struct scsipi_periph *periph;
1055 struct scsipi_inquiry_data *inqbuf;
1056 int flags;
1057 {
1058 struct scsipi_inquiry scsipi_cmd;
1059 int error;
1060
1061 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1062 scsipi_cmd.opcode = INQUIRY;
1063 scsipi_cmd.length = sizeof(struct scsipi_inquiry_data);
1064
1065 error = scsipi_command(periph,
1066 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1067 (u_char *) inqbuf, sizeof(struct scsipi_inquiry_data),
1068 SCSIPIRETRIES, 10000, NULL, XS_CTL_DATA_IN | flags);
1069
1070 #ifdef SCSI_OLD_NOINQUIRY
1071 /*
1072 * Kludge for the Adaptec ACB-4000 SCSI->MFM translator.
1073 * This board doesn't support the INQUIRY command at all.
1074 */
1075 if (error == EINVAL || error == EACCES) {
1076 /*
1077 * Conjure up an INQUIRY response.
1078 */
1079 inqbuf->device = (error == EINVAL ?
1080 SID_QUAL_LU_PRESENT :
1081 SID_QUAL_LU_NOTPRESENT) | T_DIRECT;
1082 inqbuf->dev_qual2 = 0;
1083 inqbuf->version = 0;
1084 inqbuf->response_format = SID_FORMAT_SCSI1;
1085 inqbuf->additional_length = 3 + 28;
1086 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1087 memcpy(inqbuf->vendor, "ADAPTEC ", sizeof(inqbuf->vendor));
1088 memcpy(inqbuf->product, "ACB-4000 ",
1089 sizeof(inqbuf->product));
1090 memcpy(inqbuf->revision, " ", sizeof(inqbuf->revision));
1091 error = 0;
1092 }
1093
1094 /*
1095 * Kludge for the Emulex MT-02 SCSI->QIC translator.
1096 * This board gives an empty response to an INQUIRY command.
1097 */
1098 else if (error == 0 &&
1099 inqbuf->device == (SID_QUAL_LU_PRESENT | T_DIRECT) &&
1100 inqbuf->dev_qual2 == 0 &&
1101 inqbuf->version == 0 &&
1102 inqbuf->response_format == SID_FORMAT_SCSI1) {
1103 /*
1104 * Fill out the INQUIRY response.
1105 */
1106 inqbuf->device = (SID_QUAL_LU_PRESENT | T_SEQUENTIAL);
1107 inqbuf->dev_qual2 = SID_REMOVABLE;
1108 inqbuf->additional_length = 3 + 28;
1109 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1110 memcpy(inqbuf->vendor, "EMULEX ", sizeof(inqbuf->vendor));
1111 memcpy(inqbuf->product, "MT-02 QIC ",
1112 sizeof(inqbuf->product));
1113 memcpy(inqbuf->revision, " ", sizeof(inqbuf->revision));
1114 }
1115 #endif /* SCSI_OLD_NOINQUIRY */
1116
1117 return error;
1118 }
1119
1120 /*
1121 * scsipi_prevent:
1122 *
1123 * Prevent or allow the user to remove the media
1124 */
1125 int
1126 scsipi_prevent(periph, type, flags)
1127 struct scsipi_periph *periph;
1128 int type, flags;
1129 {
1130 struct scsipi_prevent scsipi_cmd;
1131
1132 if (periph->periph_quirks & PQUIRK_NODOORLOCK)
1133 return (0);
1134
1135 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1136 scsipi_cmd.opcode = PREVENT_ALLOW;
1137 scsipi_cmd.how = type;
1138
1139 return (scsipi_command(periph,
1140 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1141 0, 0, SCSIPIRETRIES, 5000, NULL, flags));
1142 }
1143
1144 /*
1145 * scsipi_start:
1146 *
1147 * Send a START UNIT.
1148 */
1149 int
1150 scsipi_start(periph, type, flags)
1151 struct scsipi_periph *periph;
1152 int type, flags;
1153 {
1154 struct scsipi_start_stop scsipi_cmd;
1155
1156 if (periph->periph_quirks & PQUIRK_NOSTARTUNIT)
1157 return 0;
1158
1159 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1160 scsipi_cmd.opcode = START_STOP;
1161 scsipi_cmd.byte2 = 0x00;
1162 scsipi_cmd.how = type;
1163
1164 return (scsipi_command(periph,
1165 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1166 0, 0, SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000,
1167 NULL, flags));
1168 }
1169
1170 /*
1171 * scsipi_mode_sense, scsipi_mode_sense_big:
1172 * get a sense page from a device
1173 */
1174
1175 int
1176 scsipi_mode_sense(periph, byte2, page, data, len, flags, retries, timeout)
1177 struct scsipi_periph *periph;
1178 int byte2, page, len, flags, retries, timeout;
1179 struct scsipi_mode_header *data;
1180 {
1181 struct scsipi_mode_sense scsipi_cmd;
1182 int error;
1183
1184 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1185 scsipi_cmd.opcode = MODE_SENSE;
1186 scsipi_cmd.byte2 = byte2;
1187 scsipi_cmd.page = page;
1188 if (scsipi_periph_bustype(periph) == SCSIPI_BUSTYPE_ATAPI)
1189 _lto2b(len, scsipi_cmd.u_len.atapi.length);
1190 else
1191 scsipi_cmd.u_len.scsi.length = len & 0xff;
1192 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1193 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1194 flags | XS_CTL_DATA_IN);
1195 SC_DEBUG(periph, SCSIPI_DB2,
1196 ("scsipi_mode_sense: error=%d\n", error));
1197 return (error);
1198 }
1199
1200 int
1201 scsipi_mode_sense_big(periph, byte2, page, data, len, flags, retries, timeout)
1202 struct scsipi_periph *periph;
1203 int byte2, page, len, flags, retries, timeout;
1204 struct scsipi_mode_header_big *data;
1205 {
1206 struct scsipi_mode_sense_big scsipi_cmd;
1207 int error;
1208
1209 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1210 scsipi_cmd.opcode = MODE_SENSE_BIG;
1211 scsipi_cmd.byte2 = byte2;
1212 scsipi_cmd.page = page;
1213 _lto2b(len, scsipi_cmd.length);
1214 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1215 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1216 flags | XS_CTL_DATA_IN);
1217 SC_DEBUG(periph, SCSIPI_DB2,
1218 ("scsipi_mode_sense_big: error=%d\n", error));
1219 return (error);
1220 }
1221
1222 int
1223 scsipi_mode_select(periph, byte2, data, len, flags, retries, timeout)
1224 struct scsipi_periph *periph;
1225 int byte2, len, flags, retries, timeout;
1226 struct scsipi_mode_header *data;
1227 {
1228 struct scsipi_mode_select scsipi_cmd;
1229 int error;
1230
1231 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1232 scsipi_cmd.opcode = MODE_SELECT;
1233 scsipi_cmd.byte2 = byte2;
1234 if (scsipi_periph_bustype(periph) == SCSIPI_BUSTYPE_ATAPI)
1235 _lto2b(len, scsipi_cmd.u_len.atapi.length);
1236 else
1237 scsipi_cmd.u_len.scsi.length = len & 0xff;
1238 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1239 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1240 flags | XS_CTL_DATA_OUT);
1241 SC_DEBUG(periph, SCSIPI_DB2,
1242 ("scsipi_mode_select: error=%d\n", error));
1243 return (error);
1244 }
1245
1246 int
1247 scsipi_mode_select_big(periph, byte2, data, len, flags, retries, timeout)
1248 struct scsipi_periph *periph;
1249 int byte2, len, flags, retries, timeout;
1250 struct scsipi_mode_header_big *data;
1251 {
1252 struct scsipi_mode_select_big scsipi_cmd;
1253 int error;
1254
1255 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1256 scsipi_cmd.opcode = MODE_SELECT_BIG;
1257 scsipi_cmd.byte2 = byte2;
1258 _lto2b(len, scsipi_cmd.length);
1259 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1260 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1261 flags | XS_CTL_DATA_OUT);
1262 SC_DEBUG(periph, SCSIPI_DB2,
1263 ("scsipi_mode_select: error=%d\n", error));
1264 return (error);
1265 }
1266
1267 /*
1268 * scsipi_done:
1269 *
1270 * This routine is called by an adapter's interrupt handler when
1271 * an xfer is completed.
1272 */
1273 void
1274 scsipi_done(xs)
1275 struct scsipi_xfer *xs;
1276 {
1277 struct scsipi_periph *periph = xs->xs_periph;
1278 struct scsipi_channel *chan = periph->periph_channel;
1279 int s, freezecnt;
1280
1281 SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n"));
1282 #ifdef SCSIPI_DEBUG
1283 if (periph->periph_dbflags & SCSIPI_DB1)
1284 show_scsipi_cmd(xs);
1285 #endif
1286
1287 s = splbio();
1288 /*
1289 * The resource this command was using is now free.
1290 */
1291 scsipi_put_resource(chan);
1292 xs->xs_periph->periph_sent--;
1293
1294 /*
1295 * If the command was tagged, free the tag.
1296 */
1297 if (XS_CTL_TAGTYPE(xs) != 0)
1298 scsipi_put_tag(xs);
1299 else
1300 periph->periph_flags &= ~PERIPH_UNTAG;
1301
1302 /* Mark the command as `done'. */
1303 xs->xs_status |= XS_STS_DONE;
1304
1305 #ifdef DIAGNOSTIC
1306 if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
1307 (XS_CTL_ASYNC|XS_CTL_POLL))
1308 panic("scsipi_done: ASYNC and POLL");
1309 #endif
1310
1311 /*
1312 * If the xfer had an error of any sort, freeze the
1313 * periph's queue. Freeze it again if we were requested
1314 * to do so in the xfer.
1315 */
1316 freezecnt = 0;
1317 if (xs->error != XS_NOERROR)
1318 freezecnt++;
1319 if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
1320 freezecnt++;
1321 if (freezecnt != 0)
1322 scsipi_periph_freeze(periph, freezecnt);
1323
1324 /*
1325 * record the xfer with a pending sense, in case a SCSI reset is
1326 * received before the thread is waked up.
1327 */
1328 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1329 periph->periph_flags |= PERIPH_SENSE;
1330 periph->periph_xscheck = xs;
1331 }
1332
1333 /*
1334 * If this was an xfer that was not to complete asynchronously,
1335 * let the requesting thread perform error checking/handling
1336 * in its context.
1337 */
1338 if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
1339 splx(s);
1340 /*
1341 * If it's a polling job, just return, to unwind the
1342 * call graph. We don't need to restart the queue,
1343 * because pollings jobs are treated specially, and
1344 * are really only used during crash dumps anyway
1345 * (XXX or during boot-time autconfiguration of
1346 * ATAPI devices).
1347 */
1348 if (xs->xs_control & XS_CTL_POLL)
1349 return;
1350 wakeup(xs);
1351 goto out;
1352 }
1353
1354 /*
1355 * Catch the extremely common case of I/O completing
1356 * without error; no use in taking a context switch
1357 * if we can handle it in interrupt context.
1358 */
1359 if (xs->error == XS_NOERROR) {
1360 splx(s);
1361 (void) scsipi_complete(xs);
1362 goto out;
1363 }
1364
1365 /*
1366 * There is an error on this xfer. Put it on the channel's
1367 * completion queue, and wake up the completion thread.
1368 */
1369 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
1370 splx(s);
1371 wakeup(&chan->chan_complete);
1372
1373 out:
1374 /*
1375 * If there are more xfers on the channel's queue, attempt to
1376 * run them.
1377 */
1378 scsipi_run_queue(chan);
1379 }
1380
1381 /*
1382 * scsipi_complete:
1383 *
1384 * Completion of a scsipi_xfer. This is the guts of scsipi_done().
1385 *
1386 * NOTE: This routine MUST be called with valid thread context
1387 * except for the case where the following two conditions are
1388 * true:
1389 *
1390 * xs->error == XS_NOERROR
1391 * XS_CTL_ASYNC is set in xs->xs_control
1392 *
1393 * The semantics of this routine can be tricky, so here is an
1394 * explanation:
1395 *
1396 * 0 Xfer completed successfully.
1397 *
1398 * ERESTART Xfer had an error, but was restarted.
1399 *
1400 * anything else Xfer had an error, return value is Unix
1401 * errno.
1402 *
1403 * If the return value is anything but ERESTART:
1404 *
1405 * - If XS_CTL_ASYNC is set, `xs' has been freed back to
1406 * the pool.
1407 * - If there is a buf associated with the xfer,
1408 * it has been biodone()'d.
1409 */
1410 int
1411 scsipi_complete(xs)
1412 struct scsipi_xfer *xs;
1413 {
1414 struct scsipi_periph *periph = xs->xs_periph;
1415 struct scsipi_channel *chan = periph->periph_channel;
1416 struct buf *bp;
1417 int error, s;
1418
1419 #ifdef DIAGNOSTIC
1420 if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
1421 panic("scsipi_complete: XS_CTL_ASYNC but no buf");
1422 #endif
1423 /*
1424 * If command terminated with a CHECK CONDITION, we need to issue a
1425 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed
1426 * we'll have the real status.
1427 * Must be processed at splbio() to avoid missing a SCSI bus reset
1428 * for this command.
1429 */
1430 s = splbio();
1431 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1432 /* request sense for a request sense ? */
1433 if (xs->xs_control & XS_CTL_REQSENSE) {
1434 scsipi_printaddr(periph);
1435 printf("request sense for a request sense ?\n");
1436 /* XXX maybe we should reset the device ? */
1437 /* we've been frozen because xs->error != XS_NOERROR */
1438 scsipi_periph_thaw(periph, 1);
1439 splx(s);
1440 if (xs->resid < xs->datalen) {
1441 printf("we read %d bytes of sense anyway:\n",
1442 xs->datalen - xs->resid);
1443 #ifdef SCSIVERBOSE
1444 scsipi_print_sense_data((void *)xs->data, 0);
1445 #endif
1446 }
1447 return EINVAL;
1448 }
1449 scsipi_request_sense(xs);
1450 }
1451 splx(s);
1452
1453 /*
1454 * If it's a user level request, bypass all usual completion
1455 * processing, let the user work it out..
1456 */
1457 if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
1458 SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n"));
1459 if (xs->error != XS_NOERROR)
1460 scsipi_periph_thaw(periph, 1);
1461 scsipi_user_done(xs);
1462 SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n "));
1463 return 0;
1464 }
1465
1466 switch (xs->error) {
1467 case XS_NOERROR:
1468 error = 0;
1469 break;
1470
1471 case XS_SENSE:
1472 case XS_SHORTSENSE:
1473 error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
1474 break;
1475
1476 case XS_RESOURCE_SHORTAGE:
1477 /*
1478 * XXX Should freeze channel's queue.
1479 */
1480 scsipi_printaddr(periph);
1481 printf("adapter resource shortage\n");
1482 /* FALLTHROUGH */
1483
1484 case XS_BUSY:
1485 if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
1486 struct scsipi_max_openings mo;
1487
1488 /*
1489 * We set the openings to active - 1, assuming that
1490 * the command that got us here is the first one that
1491 * can't fit into the device's queue. If that's not
1492 * the case, I guess we'll find out soon enough.
1493 */
1494 mo.mo_target = periph->periph_target;
1495 mo.mo_lun = periph->periph_lun;
1496 if (periph->periph_active < periph->periph_openings)
1497 mo.mo_openings = periph->periph_active - 1;
1498 else
1499 mo.mo_openings = periph->periph_openings - 1;
1500 #ifdef DIAGNOSTIC
1501 if (mo.mo_openings < 0) {
1502 scsipi_printaddr(periph);
1503 printf("QUEUE FULL resulted in < 0 openings\n");
1504 panic("scsipi_done");
1505 }
1506 #endif
1507 if (mo.mo_openings == 0) {
1508 scsipi_printaddr(periph);
1509 printf("QUEUE FULL resulted in 0 openings\n");
1510 mo.mo_openings = 1;
1511 }
1512 scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
1513 error = ERESTART;
1514 } else if (xs->xs_retries != 0) {
1515 xs->xs_retries--;
1516 /*
1517 * Wait one second, and try again.
1518 */
1519 if ((xs->xs_control & XS_CTL_POLL) ||
1520 (chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
1521 delay(1000000);
1522 } else {
1523 scsipi_periph_freeze(periph, 1);
1524 callout_reset(&periph->periph_callout,
1525 hz, scsipi_periph_timed_thaw, periph);
1526 }
1527 error = ERESTART;
1528 } else
1529 error = EBUSY;
1530 break;
1531
1532 case XS_REQUEUE:
1533 error = ERESTART;
1534 break;
1535
1536 case XS_TIMEOUT:
1537 if (xs->xs_retries != 0) {
1538 xs->xs_retries--;
1539 error = ERESTART;
1540 } else
1541 error = EIO;
1542 break;
1543
1544 case XS_SELTIMEOUT:
1545 /* XXX Disable device? */
1546 error = EIO;
1547 break;
1548
1549 case XS_RESET:
1550 if (xs->xs_control & XS_CTL_REQSENSE) {
1551 /*
1552 * request sense interrupted by reset: signal it
1553 * with EINTR return code.
1554 */
1555 error = EINTR;
1556 } else {
1557 if (xs->xs_retries != 0) {
1558 xs->xs_retries--;
1559 error = ERESTART;
1560 } else
1561 error = EIO;
1562 }
1563 break;
1564
1565 default:
1566 scsipi_printaddr(periph);
1567 printf("invalid return code from adapter: %d\n", xs->error);
1568 error = EIO;
1569 break;
1570 }
1571
1572 s = splbio();
1573 if (error == ERESTART) {
1574 /*
1575 * If we get here, the periph has been thawed and frozen
1576 * again if we had to issue recovery commands. Alternatively,
1577 * it may have been frozen again and in a timed thaw. In
1578 * any case, we thaw the periph once we re-enqueue the
1579 * command. Once the periph is fully thawed, it will begin
1580 * operation again.
1581 */
1582 xs->error = XS_NOERROR;
1583 xs->status = SCSI_OK;
1584 xs->xs_status &= ~XS_STS_DONE;
1585 xs->xs_requeuecnt++;
1586 error = scsipi_enqueue(xs);
1587 if (error == 0) {
1588 scsipi_periph_thaw(periph, 1);
1589 splx(s);
1590 return (ERESTART);
1591 }
1592 }
1593
1594 /*
1595 * scsipi_done() freezes the queue if not XS_NOERROR.
1596 * Thaw it here.
1597 */
1598 if (xs->error != XS_NOERROR)
1599 scsipi_periph_thaw(periph, 1);
1600
1601
1602 if (periph->periph_switch->psw_done)
1603 periph->periph_switch->psw_done(xs);
1604 if ((bp = xs->bp) != NULL) {
1605 if (error) {
1606 bp->b_error = error;
1607 bp->b_flags |= B_ERROR;
1608 bp->b_resid = bp->b_bcount;
1609 } else {
1610 bp->b_error = 0;
1611 bp->b_resid = xs->resid;
1612 }
1613 biodone(bp);
1614 }
1615
1616 if (xs->xs_control & XS_CTL_ASYNC)
1617 scsipi_put_xs(xs);
1618 splx(s);
1619
1620 return (error);
1621 }
1622
1623 /*
1624 * Issue a request sense for the given scsipi_xfer. Called when the xfer
1625 * returns with a CHECK_CONDITION status. Must be called in valid thread
1626 * context and at splbio().
1627 */
1628
1629 void
1630 scsipi_request_sense(xs)
1631 struct scsipi_xfer *xs;
1632 {
1633 struct scsipi_periph *periph = xs->xs_periph;
1634 int flags, error;
1635 struct scsipi_sense cmd;
1636
1637 periph->periph_flags |= PERIPH_SENSE;
1638
1639 /* if command was polling, request sense will too */
1640 flags = xs->xs_control & XS_CTL_POLL;
1641 /* Polling commands can't sleep */
1642 if (flags)
1643 flags |= XS_CTL_NOSLEEP;
1644
1645 flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN |
1646 XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH;
1647
1648 memset(&cmd, 0, sizeof(cmd));
1649 cmd.opcode = REQUEST_SENSE;
1650 cmd.length = sizeof(struct scsipi_sense_data);
1651
1652 error = scsipi_command(periph,
1653 (struct scsipi_generic *) &cmd, sizeof(cmd),
1654 (u_char*)&xs->sense.scsi_sense, sizeof(struct scsipi_sense_data),
1655 0, 1000, NULL, flags);
1656 periph->periph_flags &= ~PERIPH_SENSE;
1657 periph->periph_xscheck = NULL;
1658 switch(error) {
1659 case 0:
1660 /* we have a valid sense */
1661 xs->error = XS_SENSE;
1662 return;
1663 case EINTR:
1664 /* REQUEST_SENSE interrupted by bus reset. */
1665 xs->error = XS_RESET;
1666 return;
1667 case EIO:
1668 /* request sense coudn't be performed */
1669 /*
1670 * XXX this isn't quite rigth but we don't have anything
1671 * better for now
1672 */
1673 xs->error = XS_DRIVER_STUFFUP;
1674 return;
1675 default:
1676 /* Notify that request sense failed. */
1677 xs->error = XS_DRIVER_STUFFUP;
1678 scsipi_printaddr(periph);
1679 printf("request sense failed with error %d\n", error);
1680 return;
1681 }
1682 }
1683
1684 /*
1685 * scsipi_enqueue:
1686 *
1687 * Enqueue an xfer on a channel.
1688 */
1689 int
1690 scsipi_enqueue(xs)
1691 struct scsipi_xfer *xs;
1692 {
1693 struct scsipi_channel *chan = xs->xs_periph->periph_channel;
1694 struct scsipi_xfer *qxs;
1695 int s;
1696
1697 s = splbio();
1698
1699 /*
1700 * If the xfer is to be polled, and there are already jobs on
1701 * the queue, we can't proceed.
1702 */
1703 if ((xs->xs_control & XS_CTL_POLL) != 0 &&
1704 TAILQ_FIRST(&chan->chan_queue) != NULL) {
1705 splx(s);
1706 xs->error = XS_DRIVER_STUFFUP;
1707 return (EAGAIN);
1708 }
1709
1710 /*
1711 * If we have an URGENT xfer, it's an error recovery command
1712 * and it should just go on the head of the channel's queue.
1713 */
1714 if (xs->xs_control & XS_CTL_URGENT) {
1715 TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
1716 goto out;
1717 }
1718
1719 /*
1720 * If this xfer has already been on the queue before, we
1721 * need to reinsert it in the correct order. That order is:
1722 *
1723 * Immediately before the first xfer for this periph
1724 * with a requeuecnt less than xs->xs_requeuecnt.
1725 *
1726 * Failing that, at the end of the queue. (We'll end up
1727 * there naturally.)
1728 */
1729 if (xs->xs_requeuecnt != 0) {
1730 for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
1731 qxs = TAILQ_NEXT(qxs, channel_q)) {
1732 if (qxs->xs_periph == xs->xs_periph &&
1733 qxs->xs_requeuecnt < xs->xs_requeuecnt)
1734 break;
1735 }
1736 if (qxs != NULL) {
1737 TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
1738 channel_q);
1739 goto out;
1740 }
1741 }
1742 TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
1743 out:
1744 if (xs->xs_control & XS_CTL_THAW_PERIPH)
1745 scsipi_periph_thaw(xs->xs_periph, 1);
1746 splx(s);
1747 return (0);
1748 }
1749
1750 /*
1751 * scsipi_run_queue:
1752 *
1753 * Start as many xfers as possible running on the channel.
1754 */
1755 void
1756 scsipi_run_queue(chan)
1757 struct scsipi_channel *chan;
1758 {
1759 struct scsipi_xfer *xs;
1760 struct scsipi_periph *periph;
1761 int s;
1762
1763 for (;;) {
1764 s = splbio();
1765
1766 /*
1767 * If the channel is frozen, we can't do any work right
1768 * now.
1769 */
1770 if (chan->chan_qfreeze != 0) {
1771 splx(s);
1772 return;
1773 }
1774
1775 /*
1776 * Look for work to do, and make sure we can do it.
1777 */
1778 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
1779 xs = TAILQ_NEXT(xs, channel_q)) {
1780 periph = xs->xs_periph;
1781
1782 if ((periph->periph_sent >= periph->periph_openings) ||
1783 periph->periph_qfreeze != 0 ||
1784 (periph->periph_flags & PERIPH_UNTAG) != 0)
1785 continue;
1786
1787 if ((periph->periph_flags &
1788 (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 &&
1789 (xs->xs_control & XS_CTL_URGENT) == 0)
1790 continue;
1791
1792 /*
1793 * We can issue this xfer!
1794 */
1795 goto got_one;
1796 }
1797
1798 /*
1799 * Can't find any work to do right now.
1800 */
1801 splx(s);
1802 return;
1803
1804 got_one:
1805 /*
1806 * Have an xfer to run. Allocate a resource from
1807 * the adapter to run it. If we can't allocate that
1808 * resource, we don't dequeue the xfer.
1809 */
1810 if (scsipi_get_resource(chan) == 0) {
1811 /*
1812 * Adapter is out of resources. If the adapter
1813 * supports it, attempt to grow them.
1814 */
1815 if (scsipi_grow_resources(chan) == 0) {
1816 /*
1817 * Wasn't able to grow resources,
1818 * nothing more we can do.
1819 */
1820 if (xs->xs_control & XS_CTL_POLL) {
1821 scsipi_printaddr(xs->xs_periph);
1822 printf("polling command but no "
1823 "adapter resources");
1824 /* We'll panic shortly... */
1825 }
1826 splx(s);
1827
1828 /*
1829 * XXX: We should be able to note that
1830 * XXX: that resources are needed here!
1831 */
1832 return;
1833 }
1834 /*
1835 * scsipi_grow_resources() allocated the resource
1836 * for us.
1837 */
1838 }
1839
1840 /*
1841 * We have a resource to run this xfer, do it!
1842 */
1843 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
1844
1845 /*
1846 * If the command is to be tagged, allocate a tag ID
1847 * for it.
1848 */
1849 if (XS_CTL_TAGTYPE(xs) != 0)
1850 scsipi_get_tag(xs);
1851 else
1852 periph->periph_flags |= PERIPH_UNTAG;
1853 periph->periph_sent++;
1854 splx(s);
1855
1856 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
1857 }
1858 #ifdef DIAGNOSTIC
1859 panic("scsipi_run_queue: impossible");
1860 #endif
1861 }
1862
1863 /*
1864 * scsipi_execute_xs:
1865 *
1866 * Begin execution of an xfer, waiting for it to complete, if necessary.
1867 */
1868 int
1869 scsipi_execute_xs(xs)
1870 struct scsipi_xfer *xs;
1871 {
1872 struct scsipi_periph *periph = xs->xs_periph;
1873 struct scsipi_channel *chan = periph->periph_channel;
1874 int oasync, async, poll, retries, error, s;
1875
1876 xs->xs_status &= ~XS_STS_DONE;
1877 xs->error = XS_NOERROR;
1878 xs->resid = xs->datalen;
1879 xs->status = SCSI_OK;
1880
1881 #ifdef SCSIPI_DEBUG
1882 if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) {
1883 printf("scsipi_execute_xs: ");
1884 show_scsipi_xs(xs);
1885 printf("\n");
1886 }
1887 #endif
1888
1889 /*
1890 * Deal with command tagging:
1891 *
1892 * - If the device's current operating mode doesn't
1893 * include tagged queueing, clear the tag mask.
1894 *
1895 * - If the device's current operating mode *does*
1896 * include tagged queueing, set the tag_type in
1897 * the xfer to the appropriate byte for the tag
1898 * message.
1899 */
1900 if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 ||
1901 (xs->xs_control & XS_CTL_REQSENSE)) {
1902 xs->xs_control &= ~XS_CTL_TAGMASK;
1903 xs->xs_tag_type = 0;
1904 } else {
1905 /*
1906 * If the request doesn't specify a tag, give Head
1907 * tags to URGENT operations and Ordered tags to
1908 * everything else.
1909 */
1910 if (XS_CTL_TAGTYPE(xs) == 0) {
1911 if (xs->xs_control & XS_CTL_URGENT)
1912 xs->xs_control |= XS_CTL_HEAD_TAG;
1913 else
1914 xs->xs_control |= XS_CTL_ORDERED_TAG;
1915 }
1916
1917 switch (XS_CTL_TAGTYPE(xs)) {
1918 case XS_CTL_ORDERED_TAG:
1919 xs->xs_tag_type = MSG_ORDERED_Q_TAG;
1920 break;
1921
1922 case XS_CTL_SIMPLE_TAG:
1923 xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
1924 break;
1925
1926 case XS_CTL_HEAD_TAG:
1927 xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
1928 break;
1929
1930 default:
1931 scsipi_printaddr(periph);
1932 printf("invalid tag mask 0x%08x\n",
1933 XS_CTL_TAGTYPE(xs));
1934 panic("scsipi_execute_xs");
1935 }
1936 }
1937
1938 /* If the adaptor wants us to poll, poll. */
1939 if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY)
1940 xs->xs_control |= XS_CTL_POLL;
1941
1942 /*
1943 * If we don't yet have a completion thread, or we are to poll for
1944 * completion, clear the ASYNC flag.
1945 */
1946 oasync = (xs->xs_control & XS_CTL_ASYNC);
1947 if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
1948 xs->xs_control &= ~XS_CTL_ASYNC;
1949
1950 async = (xs->xs_control & XS_CTL_ASYNC);
1951 poll = (xs->xs_control & XS_CTL_POLL);
1952 retries = xs->xs_retries; /* for polling commands */
1953
1954 #ifdef DIAGNOSTIC
1955 if (oasync != 0 && xs->bp == NULL)
1956 panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
1957 #endif
1958
1959 /*
1960 * Enqueue the transfer. If we're not polling for completion, this
1961 * should ALWAYS return `no error'.
1962 */
1963 try_again:
1964 error = scsipi_enqueue(xs);
1965 if (error) {
1966 if (poll == 0) {
1967 scsipi_printaddr(periph);
1968 printf("not polling, but enqueue failed with %d\n",
1969 error);
1970 panic("scsipi_execute_xs");
1971 }
1972
1973 scsipi_printaddr(periph);
1974 printf("failed to enqueue polling command");
1975 if (retries != 0) {
1976 printf(", retrying...\n");
1977 delay(1000000);
1978 retries--;
1979 goto try_again;
1980 }
1981 printf("\n");
1982 goto free_xs;
1983 }
1984
1985 restarted:
1986 scsipi_run_queue(chan);
1987
1988 /*
1989 * The xfer is enqueued, and possibly running. If it's to be
1990 * completed asynchronously, just return now.
1991 */
1992 if (async)
1993 return (EJUSTRETURN);
1994
1995 /*
1996 * Not an asynchronous command; wait for it to complete.
1997 */
1998 s = splbio();
1999 while ((xs->xs_status & XS_STS_DONE) == 0) {
2000 if (poll) {
2001 scsipi_printaddr(periph);
2002 printf("polling command not done\n");
2003 panic("scsipi_execute_xs");
2004 }
2005 (void) tsleep(xs, PRIBIO, "xscmd", 0);
2006 }
2007 splx(s);
2008
2009 /*
2010 * Command is complete. scsipi_done() has awakened us to perform
2011 * the error handling.
2012 */
2013 error = scsipi_complete(xs);
2014 if (error == ERESTART)
2015 goto restarted;
2016
2017 /*
2018 * If it was meant to run async and we cleared aync ourselve,
2019 * don't return an error here. It has already been handled
2020 */
2021 if (oasync)
2022 error = EJUSTRETURN;
2023 /*
2024 * Command completed successfully or fatal error occurred. Fall
2025 * into....
2026 */
2027 free_xs:
2028 s = splbio();
2029 scsipi_put_xs(xs);
2030 splx(s);
2031
2032 /*
2033 * Kick the queue, keep it running in case it stopped for some
2034 * reason.
2035 */
2036 scsipi_run_queue(chan);
2037
2038 return (error);
2039 }
2040
2041 /*
2042 * scsipi_completion_thread:
2043 *
2044 * This is the completion thread. We wait for errors on
2045 * asynchronous xfers, and perform the error handling
2046 * function, restarting the command, if necessary.
2047 */
2048 void
2049 scsipi_completion_thread(arg)
2050 void *arg;
2051 {
2052 struct scsipi_channel *chan = arg;
2053 struct scsipi_xfer *xs;
2054 int s;
2055
2056 s = splbio();
2057 chan->chan_flags |= SCSIPI_CHAN_TACTIVE;
2058 splx(s);
2059 for (;;) {
2060 s = splbio();
2061 xs = TAILQ_FIRST(&chan->chan_complete);
2062 if (xs == NULL && chan->chan_tflags == 0) {
2063 /* nothing to do; wait */
2064 (void) tsleep(&chan->chan_complete, PRIBIO,
2065 "sccomp", 0);
2066 splx(s);
2067 continue;
2068 }
2069 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2070 /* call chan_callback from thread context */
2071 chan->chan_tflags &= ~SCSIPI_CHANT_CALLBACK;
2072 chan->chan_callback(chan, chan->chan_callback_arg);
2073 splx(s);
2074 continue;
2075 }
2076 if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) {
2077 /* attempt to get more openings for this channel */
2078 chan->chan_tflags &= ~SCSIPI_CHANT_GROWRES;
2079 scsipi_adapter_request(chan,
2080 ADAPTER_REQ_GROW_RESOURCES, NULL);
2081 scsipi_channel_thaw(chan, 1);
2082 splx(s);
2083 continue;
2084 }
2085 if (chan->chan_tflags & SCSIPI_CHANT_KICK) {
2086 /* explicitly run the queues for this channel */
2087 chan->chan_tflags &= ~SCSIPI_CHANT_KICK;
2088 scsipi_run_queue(chan);
2089 splx(s);
2090 continue;
2091 }
2092 if (chan->chan_tflags & SCSIPI_CHANT_SHUTDOWN) {
2093 splx(s);
2094 break;
2095 }
2096 if (xs) {
2097 TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
2098 splx(s);
2099
2100 /*
2101 * Have an xfer with an error; process it.
2102 */
2103 (void) scsipi_complete(xs);
2104
2105 /*
2106 * Kick the queue; keep it running if it was stopped
2107 * for some reason.
2108 */
2109 scsipi_run_queue(chan);
2110 } else {
2111 splx(s);
2112 }
2113 }
2114
2115 chan->chan_thread = NULL;
2116
2117 /* In case parent is waiting for us to exit. */
2118 wakeup(&chan->chan_thread);
2119
2120 kthread_exit(0);
2121 }
2122
2123 /*
2124 * scsipi_create_completion_thread:
2125 *
2126 * Callback to actually create the completion thread.
2127 */
2128 void
2129 scsipi_create_completion_thread(arg)
2130 void *arg;
2131 {
2132 struct scsipi_channel *chan = arg;
2133 struct scsipi_adapter *adapt = chan->chan_adapter;
2134
2135 if (kthread_create1(scsipi_completion_thread, chan,
2136 &chan->chan_thread, "%s:%d", adapt->adapt_dev->dv_xname,
2137 chan->chan_channel)) {
2138 printf("%s: unable to create completion thread for "
2139 "channel %d\n", adapt->adapt_dev->dv_xname,
2140 chan->chan_channel);
2141 panic("scsipi_create_completion_thread");
2142 }
2143 }
2144
2145 /*
2146 * scsipi_thread_call_callback:
2147 *
2148 * request to call a callback from the completion thread
2149 */
2150 int
2151 scsipi_thread_call_callback(chan, callback, arg)
2152 struct scsipi_channel *chan;
2153 void (*callback) __P((struct scsipi_channel *, void *));
2154 void *arg;
2155 {
2156 int s;
2157
2158 s = splbio();
2159 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
2160 /* kernel thread doesn't exist yet */
2161 splx(s);
2162 return ESRCH;
2163 }
2164 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2165 splx(s);
2166 return EBUSY;
2167 }
2168 scsipi_channel_freeze(chan, 1);
2169 chan->chan_callback = callback;
2170 chan->chan_callback_arg = arg;
2171 chan->chan_tflags |= SCSIPI_CHANT_CALLBACK;
2172 wakeup(&chan->chan_complete);
2173 splx(s);
2174 return(0);
2175 }
2176
2177 /*
2178 * scsipi_async_event:
2179 *
2180 * Handle an asynchronous event from an adapter.
2181 */
2182 void
2183 scsipi_async_event(chan, event, arg)
2184 struct scsipi_channel *chan;
2185 scsipi_async_event_t event;
2186 void *arg;
2187 {
2188 int s;
2189
2190 s = splbio();
2191 switch (event) {
2192 case ASYNC_EVENT_MAX_OPENINGS:
2193 scsipi_async_event_max_openings(chan,
2194 (struct scsipi_max_openings *)arg);
2195 break;
2196
2197 case ASYNC_EVENT_XFER_MODE:
2198 scsipi_async_event_xfer_mode(chan,
2199 (struct scsipi_xfer_mode *)arg);
2200 break;
2201 case ASYNC_EVENT_RESET:
2202 scsipi_async_event_channel_reset(chan);
2203 break;
2204 }
2205 splx(s);
2206 }
2207
2208 /*
2209 * scsipi_print_xfer_mode:
2210 *
2211 * Print a periph's capabilities.
2212 */
2213 void
2214 scsipi_print_xfer_mode(periph)
2215 struct scsipi_periph *periph;
2216 {
2217 int period, freq, speed, mbs;
2218
2219 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0)
2220 return;
2221
2222 printf("%s: ", periph->periph_dev->dv_xname);
2223 if (periph->periph_mode & PERIPH_CAP_SYNC) {
2224 period = scsipi_sync_factor_to_period(periph->periph_period);
2225 printf("sync (%d.%dns offset %d)",
2226 period / 10, period % 10, periph->periph_offset);
2227 } else
2228 printf("async");
2229
2230 if (periph->periph_mode & PERIPH_CAP_WIDE32)
2231 printf(", 32-bit");
2232 else if (periph->periph_mode & PERIPH_CAP_WIDE16)
2233 printf(", 16-bit");
2234 else
2235 printf(", 8-bit");
2236
2237 if (periph->periph_mode & PERIPH_CAP_SYNC) {
2238 freq = scsipi_sync_factor_to_freq(periph->periph_period);
2239 speed = freq;
2240 if (periph->periph_mode & PERIPH_CAP_WIDE32)
2241 speed *= 4;
2242 else if (periph->periph_mode & PERIPH_CAP_WIDE16)
2243 speed *= 2;
2244 mbs = speed / 1000;
2245 if (mbs > 0)
2246 printf(" (%d.%03dMB/s)", mbs, speed % 1000);
2247 else
2248 printf(" (%dKB/s)", speed % 1000);
2249 }
2250
2251 printf(" transfers");
2252
2253 if (periph->periph_mode & PERIPH_CAP_TQING)
2254 printf(", tagged queueing");
2255
2256 printf("\n");
2257 }
2258
2259 /*
2260 * scsipi_async_event_max_openings:
2261 *
2262 * Update the maximum number of outstanding commands a
2263 * device may have.
2264 */
2265 void
2266 scsipi_async_event_max_openings(chan, mo)
2267 struct scsipi_channel *chan;
2268 struct scsipi_max_openings *mo;
2269 {
2270 struct scsipi_periph *periph;
2271 int minlun, maxlun;
2272
2273 if (mo->mo_lun == -1) {
2274 /*
2275 * Wildcarded; apply it to all LUNs.
2276 */
2277 minlun = 0;
2278 maxlun = chan->chan_nluns - 1;
2279 } else
2280 minlun = maxlun = mo->mo_lun;
2281
2282 for (; minlun <= maxlun; minlun++) {
2283 periph = scsipi_lookup_periph(chan, mo->mo_target, minlun);
2284 if (periph == NULL)
2285 continue;
2286
2287 if (mo->mo_openings < periph->periph_openings)
2288 periph->periph_openings = mo->mo_openings;
2289 else if (mo->mo_openings > periph->periph_openings &&
2290 (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
2291 periph->periph_openings = mo->mo_openings;
2292 }
2293 }
2294
2295 /*
2296 * scsipi_async_event_xfer_mode:
2297 *
2298 * Update the xfer mode for all periphs sharing the
2299 * specified I_T Nexus.
2300 */
2301 void
2302 scsipi_async_event_xfer_mode(chan, xm)
2303 struct scsipi_channel *chan;
2304 struct scsipi_xfer_mode *xm;
2305 {
2306 struct scsipi_periph *periph;
2307 int lun, announce, mode, period, offset;
2308
2309 for (lun = 0; lun < chan->chan_nluns; lun++) {
2310 periph = scsipi_lookup_periph(chan, xm->xm_target, lun);
2311 if (periph == NULL)
2312 continue;
2313 announce = 0;
2314
2315 /*
2316 * Clamp the xfer mode down to this periph's capabilities.
2317 */
2318 mode = xm->xm_mode & periph->periph_cap;
2319 if (mode & PERIPH_CAP_SYNC) {
2320 period = xm->xm_period;
2321 offset = xm->xm_offset;
2322 } else {
2323 period = 0;
2324 offset = 0;
2325 }
2326
2327 /*
2328 * If we do not have a valid xfer mode yet, or the parameters
2329 * are different, announce them.
2330 */
2331 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0 ||
2332 periph->periph_mode != mode ||
2333 periph->periph_period != period ||
2334 periph->periph_offset != offset)
2335 announce = 1;
2336
2337 periph->periph_mode = mode;
2338 periph->periph_period = period;
2339 periph->periph_offset = offset;
2340 periph->periph_flags |= PERIPH_MODE_VALID;
2341
2342 if (announce)
2343 scsipi_print_xfer_mode(periph);
2344 }
2345 }
2346
2347 /*
2348 * scsipi_set_xfer_mode:
2349 *
2350 * Set the xfer mode for the specified I_T Nexus.
2351 */
2352 void
2353 scsipi_set_xfer_mode(chan, target, immed)
2354 struct scsipi_channel *chan;
2355 int target, immed;
2356 {
2357 struct scsipi_xfer_mode xm;
2358 struct scsipi_periph *itperiph;
2359 int lun, s;
2360
2361 /*
2362 * Go to the minimal xfer mode.
2363 */
2364 xm.xm_target = target;
2365 xm.xm_mode = 0;
2366 xm.xm_period = 0; /* ignored */
2367 xm.xm_offset = 0; /* ignored */
2368
2369 /*
2370 * Find the first LUN we know about on this I_T Nexus.
2371 */
2372 for (lun = 0; lun < chan->chan_nluns; lun++) {
2373 itperiph = scsipi_lookup_periph(chan, target, lun);
2374 if (itperiph != NULL)
2375 break;
2376 }
2377 if (itperiph != NULL) {
2378 xm.xm_mode = itperiph->periph_cap;
2379 /*
2380 * Now issue the request to the adapter.
2381 */
2382 s = splbio();
2383 scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm);
2384 splx(s);
2385 /*
2386 * If we want this to happen immediately, issue a dummy
2387 * command, since most adapters can't really negotiate unless
2388 * they're executing a job.
2389 */
2390 if (immed != 0) {
2391 (void) scsipi_test_unit_ready(itperiph,
2392 XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
2393 XS_CTL_IGNORE_NOT_READY |
2394 XS_CTL_IGNORE_MEDIA_CHANGE);
2395 }
2396 }
2397 }
2398
2399 /*
2400 * scsipi_channel_reset:
2401 *
2402 * handle scsi bus reset
2403 * called at splbio
2404 */
2405 void
2406 scsipi_async_event_channel_reset(chan)
2407 struct scsipi_channel *chan;
2408 {
2409 struct scsipi_xfer *xs, *xs_next;
2410 struct scsipi_periph *periph;
2411 int target, lun;
2412
2413 /*
2414 * Channel has been reset. Also mark as reset pending REQUEST_SENSE
2415 * commands; as the sense is not available any more.
2416 * can't call scsipi_done() from here, as the command has not been
2417 * sent to the adapter yet (this would corrupt accounting).
2418 */
2419
2420 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) {
2421 xs_next = TAILQ_NEXT(xs, channel_q);
2422 if (xs->xs_control & XS_CTL_REQSENSE) {
2423 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
2424 xs->error = XS_RESET;
2425 if ((xs->xs_control & XS_CTL_ASYNC) != 0)
2426 TAILQ_INSERT_TAIL(&chan->chan_complete, xs,
2427 channel_q);
2428 }
2429 }
2430 wakeup(&chan->chan_complete);
2431 /* Catch xs with pending sense which may not have a REQSENSE xs yet */
2432 for (target = 0; target < chan->chan_ntargets; target++) {
2433 if (target == chan->chan_id)
2434 continue;
2435 for (lun = 0; lun < chan->chan_nluns; lun++) {
2436 periph = chan->chan_periphs[target][lun];
2437 if (periph) {
2438 xs = periph->periph_xscheck;
2439 if (xs)
2440 xs->error = XS_RESET;
2441 }
2442 }
2443 }
2444 }
2445
2446 /*
2447 * scsipi_target_detach:
2448 *
2449 * detach all periph associated with a I_T
2450 * must be called from valid thread context
2451 */
2452 int
2453 scsipi_target_detach(chan, target, lun, flags)
2454 struct scsipi_channel *chan;
2455 int target, lun;
2456 int flags;
2457 {
2458 struct scsipi_periph *periph;
2459 int ctarget, mintarget, maxtarget;
2460 int clun, minlun, maxlun;
2461 int error;
2462
2463 if (target == -1) {
2464 mintarget = 0;
2465 maxtarget = chan->chan_ntargets;
2466 } else {
2467 if (target == chan->chan_id)
2468 return EINVAL;
2469 if (target < 0 || target >= chan->chan_ntargets)
2470 return EINVAL;
2471 mintarget = target;
2472 maxtarget = target + 1;
2473 }
2474
2475 if (lun == -1) {
2476 minlun = 0;
2477 maxlun = chan->chan_nluns;
2478 } else {
2479 if (lun < 0 || lun >= chan->chan_nluns)
2480 return EINVAL;
2481 minlun = lun;
2482 maxlun = lun + 1;
2483 }
2484
2485 for (ctarget = mintarget; ctarget < maxtarget; ctarget++) {
2486 if (ctarget == chan->chan_id)
2487 continue;
2488
2489 for (clun = minlun; clun < maxlun; clun++) {
2490 periph = scsipi_lookup_periph(chan, ctarget, clun);
2491 if (periph == NULL)
2492 continue;
2493 error = config_detach(periph->periph_dev, flags);
2494 if (error)
2495 return (error);
2496 scsipi_remove_periph(chan, periph);
2497 free(periph, M_DEVBUF);
2498 }
2499 }
2500 return(0);
2501 }
2502
2503 /*
2504 * scsipi_adapter_addref:
2505 *
2506 * Add a reference to the adapter pointed to by the provided
2507 * link, enabling the adapter if necessary.
2508 */
2509 int
2510 scsipi_adapter_addref(adapt)
2511 struct scsipi_adapter *adapt;
2512 {
2513 int s, error = 0;
2514
2515 s = splbio();
2516 if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) {
2517 error = (*adapt->adapt_enable)(adapt->adapt_dev, 1);
2518 if (error)
2519 adapt->adapt_refcnt--;
2520 }
2521 splx(s);
2522 return (error);
2523 }
2524
2525 /*
2526 * scsipi_adapter_delref:
2527 *
2528 * Delete a reference to the adapter pointed to by the provided
2529 * link, disabling the adapter if possible.
2530 */
2531 void
2532 scsipi_adapter_delref(adapt)
2533 struct scsipi_adapter *adapt;
2534 {
2535 int s;
2536
2537 s = splbio();
2538 if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL)
2539 (void) (*adapt->adapt_enable)(adapt->adapt_dev, 0);
2540 splx(s);
2541 }
2542
2543 struct scsipi_syncparam {
2544 int ss_factor;
2545 int ss_period; /* ns * 10 */
2546 } scsipi_syncparams[] = {
2547 { 0x09, 125 },
2548 { 0x0a, 250 },
2549 { 0x0b, 303 },
2550 { 0x0c, 500 },
2551 };
2552 const int scsipi_nsyncparams =
2553 sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
2554
2555 int
2556 scsipi_sync_period_to_factor(period)
2557 int period; /* ns * 10 */
2558 {
2559 int i;
2560
2561 for (i = 0; i < scsipi_nsyncparams; i++) {
2562 if (period <= scsipi_syncparams[i].ss_period)
2563 return (scsipi_syncparams[i].ss_factor);
2564 }
2565
2566 return ((period / 10) / 4);
2567 }
2568
2569 int
2570 scsipi_sync_factor_to_period(factor)
2571 int factor;
2572 {
2573 int i;
2574
2575 for (i = 0; i < scsipi_nsyncparams; i++) {
2576 if (factor == scsipi_syncparams[i].ss_factor)
2577 return (scsipi_syncparams[i].ss_period);
2578 }
2579
2580 return ((factor * 4) * 10);
2581 }
2582
2583 int
2584 scsipi_sync_factor_to_freq(factor)
2585 int factor;
2586 {
2587 int i;
2588
2589 for (i = 0; i < scsipi_nsyncparams; i++) {
2590 if (factor == scsipi_syncparams[i].ss_factor)
2591 return (10000000 / scsipi_syncparams[i].ss_period);
2592 }
2593
2594 return (10000000 / ((factor * 4) * 10));
2595 }
2596
2597 #ifdef SCSIPI_DEBUG
2598 /*
2599 * Given a scsipi_xfer, dump the request, in all it's glory
2600 */
2601 void
2602 show_scsipi_xs(xs)
2603 struct scsipi_xfer *xs;
2604 {
2605
2606 printf("xs(%p): ", xs);
2607 printf("xs_control(0x%08x)", xs->xs_control);
2608 printf("xs_status(0x%08x)", xs->xs_status);
2609 printf("periph(%p)", xs->xs_periph);
2610 printf("retr(0x%x)", xs->xs_retries);
2611 printf("timo(0x%x)", xs->timeout);
2612 printf("cmd(%p)", xs->cmd);
2613 printf("len(0x%x)", xs->cmdlen);
2614 printf("data(%p)", xs->data);
2615 printf("len(0x%x)", xs->datalen);
2616 printf("res(0x%x)", xs->resid);
2617 printf("err(0x%x)", xs->error);
2618 printf("bp(%p)", xs->bp);
2619 show_scsipi_cmd(xs);
2620 }
2621
2622 void
2623 show_scsipi_cmd(xs)
2624 struct scsipi_xfer *xs;
2625 {
2626 u_char *b = (u_char *) xs->cmd;
2627 int i = 0;
2628
2629 scsipi_printaddr(xs->xs_periph);
2630 printf(" command: ");
2631
2632 if ((xs->xs_control & XS_CTL_RESET) == 0) {
2633 while (i < xs->cmdlen) {
2634 if (i)
2635 printf(",");
2636 printf("0x%x", b[i++]);
2637 }
2638 printf("-[%d bytes]\n", xs->datalen);
2639 if (xs->datalen)
2640 show_mem(xs->data, min(64, xs->datalen));
2641 } else
2642 printf("-RESET-\n");
2643 }
2644
2645 void
2646 show_mem(address, num)
2647 u_char *address;
2648 int num;
2649 {
2650 int x;
2651
2652 printf("------------------------------");
2653 for (x = 0; x < num; x++) {
2654 if ((x % 16) == 0)
2655 printf("\n%03d: ", x);
2656 printf("%02x ", *address++);
2657 }
2658 printf("\n------------------------------\n");
2659 }
2660 #endif /* SCSIPI_DEBUG */
2661