scsipi_base.c revision 1.26.2.2 1 /* $NetBSD: scsipi_base.c,v 1.26.2.2 1999/10/19 21:04:27 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include "opt_scsi.h"
41
42 #include <sys/types.h>
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/buf.h>
47 #include <sys/uio.h>
48 #include <sys/malloc.h>
49 #include <sys/pool.h>
50 #include <sys/errno.h>
51 #include <sys/device.h>
52 #include <sys/proc.h>
53 #include <sys/kthread.h>
54
55 #include <dev/scsipi/scsipi_all.h>
56 #include <dev/scsipi/scsipi_disk.h>
57 #include <dev/scsipi/scsipiconf.h>
58 #include <dev/scsipi/scsipi_base.h>
59
60 #include <dev/scsipi/scsi_all.h>
61 #include <dev/scsipi/scsi_message.h>
62
63 int scsipi_complete __P((struct scsipi_xfer *));
64 int scsipi_enqueue __P((struct scsipi_xfer *));
65 void scsipi_run_queue __P((struct scsipi_channel *chan));
66
67 void scsipi_completion_thread __P((void *));
68
69 void scsipi_get_tag __P((struct scsipi_xfer *));
70 void scsipi_put_tag __P((struct scsipi_xfer *));
71
72 void scsipi_async_event_max_openings __P((struct scsipi_channel *,
73 struct scsipi_max_openings *));
74 void scsipi_async_event_xfer_mode __P((struct scsipi_channel *,
75 struct scsipi_xfer_mode *));
76
77 struct pool scsipi_xfer_pool;
78
79 /*
80 * scsipi_init:
81 *
82 * Called when a scsibus or atapibus is attached to the system
83 * to initialize shared data structures.
84 */
85 void
86 scsipi_init()
87 {
88 static int scsipi_init_done;
89
90 if (scsipi_init_done)
91 return;
92 scsipi_init_done = 1;
93
94 /* Initialize the scsipi_xfer pool. */
95 pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
96 0, 0, "scxspl", 0, NULL, NULL, M_DEVBUF);
97 }
98
99 /*
100 * scsipi_channel_init:
101 *
102 * Initialize a scsipi_channel when it is attached.
103 */
104 void
105 scsipi_channel_init(chan)
106 struct scsipi_channel *chan;
107 {
108 size_t nbytes;
109 int i;
110
111 /* Initialize shared data. */
112 scsipi_init();
113
114 /* Initialize the queues. */
115 TAILQ_INIT(&chan->chan_queue);
116 TAILQ_INIT(&chan->chan_complete);
117
118 nbytes = chan->chan_ntargets * sizeof(struct scsipi_link **);
119 chan->chan_periphs = malloc(nbytes, M_DEVBUF, M_WAITOK);
120
121 nbytes = chan->chan_nluns * sizeof(struct scsipi_periph *);
122 for (i = 0; i < chan->chan_ntargets; i++) {
123 chan->chan_periphs[i] = malloc(nbytes, M_DEVBUF, M_WAITOK);
124 memset(chan->chan_periphs[i], 0, nbytes);
125 }
126
127 /*
128 * Create the asynchronous completion thread.
129 */
130 kthread_create(scsipi_create_completion_thread, chan);
131 }
132
133 /*
134 * scsipi_lookup_periph:
135 *
136 * Lookup a periph on the specified channel.
137 */
138 struct scsipi_periph *
139 scsipi_lookup_periph(chan, target, lun)
140 struct scsipi_channel *chan;
141 int target, lun;
142 {
143 struct scsipi_periph *periph;
144 int s;
145
146 if (target >= chan->chan_ntargets ||
147 lun >= chan->chan_nluns)
148 return (NULL);
149
150 s = splbio();
151 periph = chan->chan_periphs[target][lun];
152 splx(s);
153
154 return (periph);
155 }
156
157 /*
158 * scsipi_get_resource:
159 *
160 * Allocate a single xfer `resource' from the channel.
161 *
162 * NOTE: Must be called at splbio().
163 */
164 int
165 scsipi_get_resource(chan)
166 struct scsipi_channel *chan;
167 {
168 struct scsipi_adapter *adapt = chan->chan_adapter;
169
170 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
171 if (chan->chan_openings > 0) {
172 chan->chan_openings--;
173 return (1);
174 }
175 return (0);
176 }
177
178 if (adapt->adapt_openings > 0) {
179 adapt->adapt_openings--;
180 return (1);
181 }
182 return (0);
183 }
184
185 /*
186 * scsipi_grow_resources:
187 *
188 * Attempt to grow resources for a channel. If this succeeds,
189 * we allocate one for our caller.
190 *
191 * NOTE: Must be called at splbio().
192 */
193 int
194 scsipi_grow_resources(chan)
195 struct scsipi_channel *chan;
196 {
197
198 if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
199 scsipi_adapter_request(chan, ADAPTER_REQ_GROW_RESOURCES, NULL);
200 return (scsipi_get_resource(chan));
201 }
202
203 return (0);
204 }
205
206 /*
207 * scsipi_put_resource:
208 *
209 * Free a single xfer `resource' to the channel.
210 *
211 * NOTE: Must be called at splbio().
212 */
213 void
214 scsipi_put_resource(chan)
215 struct scsipi_channel *chan;
216 {
217 struct scsipi_adapter *adapt = chan->chan_adapter;
218
219 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
220 chan->chan_openings++;
221 else
222 adapt->adapt_openings++;
223 }
224
225 /*
226 * scsipi_get_tag:
227 *
228 * Get a tag ID for the specified xfer.
229 *
230 * NOTE: Must be called at splbio().
231 */
232 void
233 scsipi_get_tag(xs)
234 struct scsipi_xfer *xs;
235 {
236 struct scsipi_periph *periph = xs->xs_periph;
237 int word, bit, tag;
238
239 for (word = 0; word < PERIPH_NTAGWORDS; word++) {
240 bit = ffs(periph->periph_freetags[word]);
241 if (bit != 0)
242 break;
243 }
244 #ifdef DIAGNOSTIC
245 if (word == PERIPH_NTAGWORDS) {
246 scsipi_printaddr(periph);
247 printf("no free tags\n");
248 panic("scsipi_get_tag");
249 }
250 #endif
251
252 bit -= 1;
253 periph->periph_freetags[word] &= ~(1 << bit);
254 tag = (word << 5) | bit;
255
256 /* XXX Should eventually disallow this completely. */
257 if (tag >= periph->periph_openings) {
258 scsipi_printaddr(periph);
259 printf("WARNING: tag %d greater than available openings %d\n",
260 tag, periph->periph_openings);
261 }
262
263 xs->xs_tag_id = tag;
264 }
265
266 /*
267 * scsipi_put_tag:
268 *
269 * Put the tag ID for the specified xfer back into the pool.
270 *
271 * NOTE: Must be called at splbio().
272 */
273 void
274 scsipi_put_tag(xs)
275 struct scsipi_xfer *xs;
276 {
277 struct scsipi_periph *periph = xs->xs_periph;
278 int word, bit;
279
280 word = xs->xs_tag_id >> 5;
281 bit = xs->xs_tag_id & 0x1f;
282
283 periph->periph_freetags[word] |= (1 << bit);
284 }
285
286 /*
287 * scsipi_get_xs:
288 *
289 * Allocate an xfer descriptor and associate it with the
290 * specified peripherial. If the peripherial has no more
291 * available command openings, we either block waiting for
292 * one to become available, or fail.
293 */
294 struct scsipi_xfer *
295 scsipi_get_xs(periph, flags)
296 struct scsipi_periph *periph;
297 int flags;
298 {
299 struct scsipi_xfer *xs;
300 int s;
301
302 SC_DEBUG(sc_link, SDEV_DB3, ("scsipi_get_xs\n"));
303
304 /*
305 * If we're cold, make sure we poll.
306 */
307 if (cold)
308 flags |= XS_CTL_NOSLEEP | XS_CTL_POLL;
309
310 #ifdef DIAGNOSTIC
311 /*
312 * URGENT commands can never be ASYNC.
313 */
314 if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
315 (XS_CTL_URGENT|XS_CTL_ASYNC)) {
316 scsipi_printaddr(periph);
317 printf("URGENT and ASYNC\n");
318 panic("scsipi_get_xs");
319 }
320 #endif
321
322 s = splbio();
323 /*
324 * Wait for a command opening to become available. Rules:
325 *
326 * - All xfers must wait for an available opening.
327 * Exception: URGENT xfers can proceed when
328 * active == openings, because we use the opening
329 * of the command we're recovering for.
330 *
331 * - If the periph is recovering, only URGENT xfers may
332 * proceed.
333 *
334 * - If the periph is currently executing a recovery
335 * command, URGENT commands must block, because only
336 * one recovery command can execute at a time.
337 */
338 for (;;) {
339 if (flags & XS_CTL_URGENT) {
340 if (periph->periph_active > periph->periph_openings ||
341 (periph->periph_flags &
342 PERIPH_RECOVERY_ACTIVE) != 0)
343 goto wait_for_opening;
344 periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
345 break;
346 }
347 if (periph->periph_active >= periph->periph_openings ||
348 (periph->periph_flags & PERIPH_RECOVERING) != 0)
349 goto wait_for_opening;
350 periph->periph_active++;
351 break;
352
353 wait_for_opening:
354 if (flags & XS_CTL_NOSLEEP) {
355 splx(s);
356 return (NULL);
357 }
358 SC_DEBUG(sc_link, SDEV_DB3, ("sleeping\n"));
359 periph->periph_flags |= PERIPH_WAITING;
360 (void) tsleep(periph, PRIBIO, "getxs", 0);
361 }
362 SC_DEBUG(sc_link, SDEV_DB3, ("calling pool_get\n"));
363 xs = pool_get(&scsipi_xfer_pool,
364 ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
365 if (xs == NULL) {
366 if (flags & XS_CTL_URGENT)
367 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
368 else
369 periph->periph_active--;
370 scsipi_printaddr(periph);
371 printf("unable to allocate %sscsipi_xfer\n",
372 (flags & XS_CTL_URGENT) ? "URGENT " : "");
373 }
374 splx(s);
375
376 SC_DEBUG(sc_link, SDEV_DB3, ("returning\n"));
377
378 if (xs != NULL) {
379 memset(xs, 0, sizeof(*xs));
380 xs->xs_periph = periph;
381 xs->xs_control = flags;
382 s = splbio();
383 TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
384 splx(s);
385 }
386 return (xs);
387 }
388
389 /*
390 * scsipi_put_xs:
391 *
392 * Release an xfer descriptor, decreasing the outstanding command
393 * count for the peripherial. If there is a thread waiting for
394 * an opening, wake it up. If not, kick any queued I/O the
395 * peripherial may have.
396 *
397 * NOTE: Must be called at splbio().
398 */
399 void
400 scsipi_put_xs(xs)
401 struct scsipi_xfer *xs;
402 {
403 struct scsipi_periph *periph = xs->xs_periph;
404 int flags = xs->xs_control;
405
406 SC_DEBUG(sc_link, SDEV_DB3, ("scsipi_free_xs\n"));
407
408 TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
409 pool_put(&scsipi_xfer_pool, xs);
410
411 #ifdef DIAGNOSTIC
412 if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
413 periph->periph_active == 0) {
414 scsipi_printaddr(periph);
415 printf("recovery without a command to recovery for\n");
416 panic("scsipi_put_xs");
417 }
418 #endif
419
420 if (flags & XS_CTL_URGENT)
421 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
422 else
423 periph->periph_active--;
424 if (periph->periph_active == 0 &&
425 (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
426 periph->periph_flags &= ~PERIPH_WAITDRAIN;
427 wakeup(&periph->periph_active);
428 }
429
430 if (periph->periph_flags & PERIPH_WAITING) {
431 periph->periph_flags &= ~PERIPH_WAITING;
432 wakeup(periph);
433 } else {
434 if (periph->periph_switch->psw_start != NULL) {
435 SC_DEBUG(sc_link, SDEV_DB2,
436 ("calling private start()\n"));
437 (*periph->periph_switch->psw_start)(periph);
438 }
439 }
440 }
441
442 /*
443 * scsipi_periph_freeze:
444 *
445 * Freeze a device's xfer queue.
446 */
447 void
448 scsipi_periph_freeze(periph, count)
449 struct scsipi_periph *periph;
450 int count;
451 {
452 int s;
453
454 s = splbio();
455 periph->periph_qfreeze += count;
456 splx(s);
457 }
458
459 /*
460 * scsipi_periph_thaw:
461 *
462 * Thaw a device's xfer queue.
463 */
464 void
465 scsipi_periph_thaw(periph, count)
466 struct scsipi_periph *periph;
467 int count;
468 {
469 int s;
470
471 s = splbio();
472 periph->periph_qfreeze -= count;
473 if (periph->periph_qfreeze == 0 &&
474 (periph->periph_flags & PERIPH_WAITING) != 0)
475 wakeup(periph);
476 splx(s);
477 }
478
479 /*
480 * scsipi_periph_timed_thaw:
481 *
482 * Thaw a device after some time has expired.
483 */
484 void
485 scsipi_periph_timed_thaw(arg)
486 void *arg;
487 {
488 struct scsipi_periph *periph = arg;
489
490 scsipi_periph_thaw(periph, 1);
491
492 /* XXX XXX XXX */
493 scsipi_printaddr(periph);
494 printf("timed thaw: should kick channel's queue here.\n");
495 }
496
497 /*
498 * scsipi_wait_drain:
499 *
500 * Wait for a periph's pending xfers to drain.
501 */
502 void
503 scsipi_wait_drain(periph)
504 struct scsipi_periph *periph;
505 {
506 int s;
507
508 s = splbio();
509 while (periph->periph_active != 0) {
510 periph->periph_flags |= PERIPH_WAITDRAIN;
511 (void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0);
512 }
513 splx(s);
514 }
515
516 /*
517 * scsipi_kill_pending:
518 *
519 * Kill off all pending xfers for a periph.
520 *
521 * NOTE: Must be called at splbio().
522 */
523 void
524 scsipi_kill_pending(periph)
525 struct scsipi_periph *periph;
526 {
527 struct scsipi_xfer *xs;
528
529 while ((xs = TAILQ_FIRST(&periph->periph_xferq)) != NULL) {
530 xs->error = XS_DRIVER_STUFFUP;
531 scsipi_done(xs);
532 }
533 }
534
535 /*
536 * scsipi_interpret_sense:
537 *
538 * Look at the returned sense and act on the error, determining
539 * the unix error number to pass back. (0 = report no error)
540 *
541 * NOTE: If we return ERESTART, we are expected to haved
542 * thawed the device!
543 *
544 * THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
545 */
546 int
547 scsipi_interpret_sense(xs)
548 struct scsipi_xfer *xs;
549 {
550 struct scsipi_sense_data *sense;
551 struct scsipi_periph *periph = xs->xs_periph;
552 u_int8_t key;
553 u_int32_t info;
554 int error;
555 #ifndef SCSIVERBOSE
556 static char *error_mes[] = {
557 "soft error (corrected)",
558 "not ready", "medium error",
559 "non-media hardware failure", "illegal request",
560 "unit attention", "readonly device",
561 "no data found", "vendor unique",
562 "copy aborted", "command aborted",
563 "search returned equal", "volume overflow",
564 "verify miscompare", "unknown error key"
565 };
566 #endif
567
568 sense = &xs->sense.scsi_sense;
569 #ifdef SCSIDEBUG
570 if ((sc_link->flags & SDEV_DB1) != 0) {
571 int count;
572 printf("code 0x%x valid 0x%x ",
573 sense->error_code & SSD_ERRCODE,
574 sense->error_code & SSD_ERRCODE_VALID ? 1 : 0);
575 printf("seg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
576 sense->segment,
577 sense->flags & SSD_KEY,
578 sense->flags & SSD_ILI ? 1 : 0,
579 sense->flags & SSD_EOM ? 1 : 0,
580 sense->flags & SSD_FILEMARK ? 1 : 0);
581 printf("info: 0x%x 0x%x 0x%x 0x%x followed by %d extra bytes\n",
582 sense->info[0],
583 sense->info[1],
584 sense->info[2],
585 sense->info[3],
586 sense->extra_len);
587 printf("extra: ");
588 for (count = 0; count < ADD_BYTES_LIM(sense); count++)
589 printf("0x%x ", sense->cmd_spec_info[count]);
590 printf("\n");
591 }
592 #endif /* SCSIDEBUG */
593
594 /*
595 * If the periph has it's own error handler, call it first.
596 * If it returns a legit error value, return that, otherwise
597 * it wants us to continue with normal error processing.
598 */
599 if (periph->periph_switch->psw_error != NULL) {
600 SC_DEBUG(sc_link, SDEV_DB2,
601 ("calling private err_handler()\n"));
602 error = (*periph->periph_switch->psw_error)(xs);
603 if (error != EJUSTRETURN)
604 return (error);
605 }
606 /* otherwise use the default */
607 switch (sense->error_code & SSD_ERRCODE) {
608 /*
609 * If it's code 70, use the extended stuff and
610 * interpret the key
611 */
612 case 0x71: /* delayed error */
613 scsipi_printaddr(periph);
614 key = sense->flags & SSD_KEY;
615 printf(" DEFERRED ERROR, key = 0x%x\n", key);
616 /* FALLTHROUGH */
617 case 0x70:
618 if ((sense->error_code & SSD_ERRCODE_VALID) != 0)
619 info = _4btol(sense->info);
620 else
621 info = 0;
622 key = sense->flags & SSD_KEY;
623
624 switch (key) {
625 case SKEY_NO_SENSE:
626 case SKEY_RECOVERED_ERROR:
627 if (xs->resid == xs->datalen && xs->datalen) {
628 /*
629 * Why is this here?
630 */
631 xs->resid = 0; /* not short read */
632 }
633 case SKEY_EQUAL:
634 error = 0;
635 break;
636 case SKEY_NOT_READY:
637 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
638 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
639 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
640 return (0);
641 if (sense->add_sense_code == 0x3A &&
642 sense->add_sense_code_qual == 0x00)
643 error = ENODEV; /* Medium not present */
644 else
645 error = EIO;
646 if ((xs->xs_control & XS_CTL_SILENT) != 0)
647 return (error);
648 break;
649 case SKEY_ILLEGAL_REQUEST:
650 if ((xs->xs_control &
651 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
652 return (0);
653 /*
654 * Handle the case where a device reports
655 * Logical Unit Not Supported during discovery.
656 */
657 if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
658 sense->add_sense_code == 0x25 &&
659 sense->add_sense_code_qual == 0x00)
660 return (EINVAL);
661 if ((xs->xs_control & XS_CTL_SILENT) != 0)
662 return (EIO);
663 error = EINVAL;
664 break;
665 case SKEY_UNIT_ATTENTION:
666 if (sense->add_sense_code == 0x29 &&
667 sense->add_sense_code_qual == 0x00) {
668 /* device or bus reset */
669 return (ERESTART);
670 }
671 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
672 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
673 if ((xs->xs_control &
674 XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
675 /* XXX Should reupload any transient state. */
676 (periph->periph_flags &
677 PERIPH_REMOVABLE) == 0) {
678 return (ERESTART);
679 }
680 if ((xs->xs_control & XS_CTL_SILENT) != 0)
681 return (EIO);
682 error = EIO;
683 break;
684 case SKEY_WRITE_PROTECT:
685 error = EROFS;
686 break;
687 case SKEY_BLANK_CHECK:
688 error = 0;
689 break;
690 case SKEY_ABORTED_COMMAND:
691 error = ERESTART;
692 break;
693 case SKEY_VOLUME_OVERFLOW:
694 error = ENOSPC;
695 break;
696 default:
697 error = EIO;
698 break;
699 }
700
701 #ifdef SCSIVERBOSE
702 if ((xs->xs_control & XS_CTL_SILENT) == 0)
703 scsipi_print_sense(xs, 0);
704 #else
705 if (key) {
706 scsipi_printaddr(periph);
707 printf("%s", error_mes[key - 1]);
708 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
709 switch (key) {
710 case SKEY_NOT_READY:
711 case SKEY_ILLEGAL_REQUEST:
712 case SKEY_UNIT_ATTENTION:
713 case SKEY_WRITE_PROTECT:
714 break;
715 case SKEY_BLANK_CHECK:
716 printf(", requested size: %d (decimal)",
717 info);
718 break;
719 case SKEY_ABORTED_COMMAND:
720 if (xs->xs_retries)
721 printf(", retrying");
722 printf(", cmd 0x%x, info 0x%x",
723 xs->cmd->opcode, info);
724 break;
725 default:
726 printf(", info = %d (decimal)", info);
727 }
728 }
729 if (sense->extra_len != 0) {
730 int n;
731 printf(", data =");
732 for (n = 0; n < sense->extra_len; n++)
733 printf(" %02x",
734 sense->cmd_spec_info[n]);
735 }
736 printf("\n");
737 }
738 #endif
739 return (error);
740
741 /*
742 * Not code 70, just report it
743 */
744 default:
745 scsipi_printaddr(periph);
746 printf("Sense Error Code 0x%x",
747 sense->error_code & SSD_ERRCODE);
748 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
749 struct scsipi_sense_data_unextended *usense =
750 (struct scsipi_sense_data_unextended *)sense;
751 printf(" at block no. %d (decimal)",
752 _3btol(usense->block));
753 }
754 printf("\n");
755 return (EIO);
756 }
757 }
758
759 /*
760 * scsipi_size:
761 *
762 * Find out from the device what its capacity is.
763 */
764 u_long
765 scsipi_size(periph, flags)
766 struct scsipi_periph *periph;
767 int flags;
768 {
769 struct scsipi_read_cap_data rdcap;
770 struct scsipi_read_capacity scsipi_cmd;
771
772 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
773 scsipi_cmd.opcode = READ_CAPACITY;
774
775 /*
776 * If the command works, interpret the result as a 4 byte
777 * number of blocks
778 */
779 if (scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
780 sizeof(scsipi_cmd), (u_char *)&rdcap, sizeof(rdcap),
781 2, 20000, NULL, flags | XS_CTL_DATA_IN) != 0) {
782 scsipi_printaddr(periph);
783 printf("could not get size\n");
784 return (0);
785 }
786
787 return (_4btol(rdcap.addr) + 1);
788 }
789
790 /*
791 * scsipi_test_unit_ready:
792 *
793 * Issue a `test unit ready' request.
794 */
795 int
796 scsipi_test_unit_ready(periph, flags)
797 struct scsipi_periph *periph;
798 int flags;
799 {
800 struct scsipi_test_unit_ready scsipi_cmd;
801
802 /* some ATAPI drives don't support TEST_UNIT_READY. Sigh */
803 if (periph->periph_quirks & PQUIRK_NOTUR)
804 return (0);
805
806 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
807 scsipi_cmd.opcode = TEST_UNIT_READY;
808
809 return (scsipi_command(periph,
810 (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd),
811 0, 0, 2, 10000, NULL, flags));
812 }
813
814 /*
815 * scsipi_inquire:
816 *
817 * Ask the device about itself.
818 */
819 int
820 scsipi_inquire(periph, inqbuf, flags)
821 struct scsipi_periph *periph;
822 struct scsipi_inquiry_data *inqbuf;
823 int flags;
824 {
825 struct scsipi_inquiry scsipi_cmd;
826
827 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
828 scsipi_cmd.opcode = INQUIRY;
829 scsipi_cmd.length = sizeof(struct scsipi_inquiry_data);
830
831 return (scsipi_command(periph,
832 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
833 (u_char *) inqbuf, sizeof(struct scsipi_inquiry_data),
834 2, 10000, NULL, XS_CTL_DATA_IN | flags));
835 }
836
837 /*
838 * scsipi_prevent:
839 *
840 * Prevent or allow the user to remove the media
841 */
842 int
843 scsipi_prevent(periph, type, flags)
844 struct scsipi_periph *periph;
845 int type, flags;
846 {
847 struct scsipi_prevent scsipi_cmd;
848
849 if (periph->periph_quirks & PQUIRK_NODOORLOCK)
850 return (0);
851
852 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
853 scsipi_cmd.opcode = PREVENT_ALLOW;
854 scsipi_cmd.how = type;
855
856 return (scsipi_command(periph,
857 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
858 0, 0, 2, 5000, NULL, flags));
859 }
860
861 /*
862 * scsipi_start:
863 *
864 * Send a START UNIT.
865 */
866 int
867 scsipi_start(periph, type, flags)
868 struct scsipi_periph *periph;
869 int type, flags;
870 {
871 struct scsipi_start_stop scsipi_cmd;
872
873 if (periph->periph_quirks & PQUIRK_NOSTARTUNIT)
874 return 0;
875
876 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
877 scsipi_cmd.opcode = START_STOP;
878 scsipi_cmd.byte2 = 0x00;
879 scsipi_cmd.how = type;
880
881 return (scsipi_command(periph,
882 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
883 0, 0, 2, (type & SSS_START) ? 30000 : 10000, NULL, flags));
884 }
885
886 /*
887 * scsipi_done:
888 *
889 * This routine is called by an adapter's interrupt handler when
890 * an xfer is completed.
891 */
892 void
893 scsipi_done(xs)
894 struct scsipi_xfer *xs;
895 {
896 struct scsipi_periph *periph = xs->xs_periph;
897 struct scsipi_channel *chan = periph->periph_channel;
898 int s, freezecnt;
899
900 SC_DEBUG(sc_link, SDEV_DB2, ("scsipi_done\n"));
901 #ifdef SCSIDEBUG
902 if ((sc_link->flags & SDEV_DB1) != 0)
903 show_scsipi_cmd(xs);
904 #endif /* SCSIDEBUG */
905
906 s = splbio();
907 /*
908 * The resource this command was using is now free.
909 */
910 scsipi_put_resource(chan);
911
912 /*
913 * If the command was tagged, free the tag.
914 */
915 if (XS_CTL_TAGTYPE(xs) != 0)
916 scsipi_put_tag(xs);
917
918 /* Mark the command as `done'. */
919 xs->xs_status |= XS_STS_DONE;
920
921 /*
922 * If it's a user level request, bypass all usual completion
923 * processing, let the user work it out.. We take reponsibility
924 * for freeing the xs (and restarting the device's queue) when
925 * the user returns.
926 */
927 if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
928 splx(s);
929 SC_DEBUG(sc_link, SDEV_DB3, ("calling user done()\n"));
930 scsipi_user_done(xs);
931 SC_DEBUG(sc_link, SDEV_DB3, ("returned from user done()\n "));
932 goto out;
933 }
934
935 #ifdef DIAGNOSTIC
936 if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
937 (XS_CTL_ASYNC|XS_CTL_POLL))
938 panic("scsipi_done: ASYNC and POLL");
939 #endif
940
941 /*
942 * If the xfer had an error of any sort, freeze the
943 * periph's queue. Freeze it again if we were requested
944 * to do so in the xfer.
945 */
946 freezecnt = 0;
947 if (xs->error != XS_NOERROR)
948 freezecnt++;
949 if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
950 freezecnt++;
951 if (freezecnt != 0)
952 scsipi_periph_freeze(periph, freezecnt);
953
954 /*
955 * If this was an xfer that was not to complete asynchrnously,
956 * let the requesting thread perform error checking/handling
957 * in its context.
958 */
959 if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
960 splx(s);
961 /*
962 * If it's a polling job, just return, to unwind the
963 * call graph. We don't need to restart the queue,
964 * because pollings jobs are treated specially, and
965 * are really only used during crash dumps anyway
966 * (XXX or during boot-time autconfiguration of
967 * ATAPI devices).
968 */
969 if (xs->xs_control & XS_CTL_POLL)
970 return;
971 wakeup(xs);
972 goto out;
973 }
974
975 /*
976 * Catch the extremely common case of I/O completing
977 * without error; no use in taking a context switch
978 * if we can handle it in interrupt context.
979 */
980 if (xs->error == XS_NOERROR) {
981 splx(s);
982 (void) scsipi_complete(xs);
983 goto out;
984 }
985
986 /*
987 * There is an error on this xfer. Put it on the channel's
988 * completion queue, and wake up the completion thread.
989 */
990 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
991 splx(s);
992 wakeup(&chan->chan_complete);
993
994 out:
995 /*
996 * If there are more xfers on the channel's queue, attempt to
997 * run them.
998 */
999 scsipi_run_queue(chan);
1000 }
1001
1002 /*
1003 * scsipi_complete:
1004 *
1005 * Completion of a scsipi_xfer. This is the guts of scsipi_done().
1006 *
1007 * NOTE: This routine MUST be called with valid thread context
1008 * except for the case where the following two conditions are
1009 * true:
1010 *
1011 * xs->error == XS_NOERROR
1012 * XS_CTL_ASYNC is set in xs->xs_control
1013 *
1014 * The semantics of this routine can be tricky, so here is an
1015 * explanation:
1016 *
1017 * 0 Xfer completed successfully.
1018 *
1019 * ERESTART Xfer had an error, but was restarted.
1020 *
1021 * anything else Xfer had an error, return value is Unix
1022 * errno.
1023 *
1024 * If the return value is anything but ERESTART:
1025 *
1026 * - If XS_CTL_ASYNC is set, `xs' has been freed back to
1027 * the pool.
1028 * - If there is a buf associated with the xfer,
1029 * it has been biodone()'d.
1030 */
1031 int
1032 scsipi_complete(xs)
1033 struct scsipi_xfer *xs;
1034 {
1035 struct scsipi_periph *periph = xs->xs_periph;
1036 struct scsipi_channel *chan = periph->periph_channel;
1037 struct buf *bp;
1038 int error, s;
1039
1040 #ifdef DIAGNOSTIC
1041 if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
1042 panic("scsipi_complete: XS_CTL_ASYNC but no buf");
1043 #endif
1044
1045 switch (xs->error) {
1046 case XS_NOERROR:
1047 error = 0;
1048 break;
1049
1050 case XS_SENSE:
1051 case XS_SHORTSENSE:
1052 error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
1053 break;
1054
1055 case XS_RESOURCE_SHORTAGE:
1056 /*
1057 * XXX Should freeze channel's queue.
1058 */
1059 scsipi_printaddr(periph);
1060 printf("adapter resource shortage\n");
1061 /* FALLTHROUGH */
1062
1063 case XS_BUSY:
1064 if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
1065 struct scsipi_max_openings mo;
1066
1067 /*
1068 * We set the openings to active - 1, assuming that
1069 * the command that got us here is the first one that
1070 * can't fit into the device's queue. If that's not
1071 * the case, I guess we'll find out soon enough.
1072 */
1073 mo.mo_target = periph->periph_target;
1074 mo.mo_lun = periph->periph_lun;
1075 mo.mo_openings = periph->periph_active - 1;
1076 #ifdef DIAGNOSTIC
1077 if (mo.mo_openings < 0) {
1078 scsipi_printaddr(periph);
1079 printf("QUEUE FULL resulted in < 0 openings\n");
1080 panic("scsipi_done");
1081 }
1082 #endif
1083 if (mo.mo_openings == 0) {
1084 scsipi_printaddr(periph);
1085 printf("QUEUE FULL resulted in 0 openings\n");
1086 mo.mo_openings = 1;
1087 }
1088 scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
1089 error = ERESTART;
1090 } else if (xs->xs_retries != 0) {
1091 xs->xs_retries--;
1092 /*
1093 * Wait one second, and try again.
1094 */
1095 if (xs->xs_control & XS_CTL_POLL)
1096 delay(1000000);
1097 else {
1098 scsipi_periph_freeze(periph, 1);
1099 timeout(scsipi_periph_timed_thaw, periph, hz);
1100 }
1101 error = ERESTART;
1102 } else
1103 error = EBUSY;
1104 break;
1105
1106 case XS_TIMEOUT:
1107 if (xs->xs_retries != 0) {
1108 xs->xs_retries--;
1109 error = ERESTART;
1110 } else
1111 error = EIO;
1112 break;
1113
1114 case XS_SELTIMEOUT:
1115 /* XXX Disable device? */
1116 error = EIO;
1117 break;
1118
1119 case XS_RESET:
1120 if (xs->xs_retries != 0) {
1121 xs->xs_retries--;
1122 error = ERESTART;
1123 } else
1124 error = EIO;
1125 break;
1126
1127 default:
1128 scsipi_printaddr(periph);
1129 printf("invalid return code from adapter: %d\n", xs->error);
1130 error = EIO;
1131 break;
1132 }
1133
1134 s = splbio();
1135 if (error == ERESTART) {
1136 /*
1137 * If we get here, the periph has been thawed and frozen
1138 * again if we had to issue recovery commands. Alternatively,
1139 * it may have been frozen again and in a timed thaw. In
1140 * any case, we thaw the periph once we re-enqueue the
1141 * command. Once the periph is fully thawed, it will begin
1142 * operation again.
1143 */
1144 xs->error = XS_NOERROR;
1145 xs->status = SCSI_OK;
1146 xs->xs_status &= ~XS_STS_DONE;
1147 xs->xs_requeuecnt++;
1148 error = scsipi_enqueue(xs);
1149 if (error == 0) {
1150 scsipi_periph_thaw(periph, 1);
1151 splx(s);
1152 return (ERESTART);
1153 }
1154 }
1155
1156 /*
1157 * scsipi_done() freezes the queue if not XS_NOERROR.
1158 * Thaw it here.
1159 */
1160 if (xs->error != XS_NOERROR)
1161 scsipi_periph_thaw(periph, 1);
1162
1163 if ((bp = xs->bp) != NULL) {
1164 if (error) {
1165 bp->b_error = error;
1166 bp->b_flags |= B_ERROR;
1167 bp->b_resid = bp->b_bcount;
1168 } else {
1169 bp->b_error = 0;
1170 bp->b_resid = xs->resid;
1171 }
1172 biodone(bp);
1173 }
1174
1175 if (xs->xs_control & XS_CTL_ASYNC)
1176 scsipi_put_xs(xs);
1177 splx(s);
1178
1179 return (error);
1180 }
1181
1182 /*
1183 * scsipi_enqueue:
1184 *
1185 * Enqueue an xfer on a channel.
1186 */
1187 int
1188 scsipi_enqueue(xs)
1189 struct scsipi_xfer *xs;
1190 {
1191 struct scsipi_channel *chan = xs->xs_periph->periph_channel;
1192 struct scsipi_xfer *qxs;
1193 int s;
1194
1195 s = splbio();
1196
1197 /*
1198 * If the xfer is to be polled, and there are already jobs on
1199 * the queue, we can't proceed.
1200 */
1201 if ((xs->xs_control & XS_CTL_POLL) != 0 &&
1202 TAILQ_FIRST(&chan->chan_queue) != NULL) {
1203 splx(s);
1204 xs->error = XS_DRIVER_STUFFUP;
1205 return (EAGAIN);
1206 }
1207
1208 /*
1209 * If we have an URGENT xfer, it's an error recovery command
1210 * and it should just go on the head of the channel's queue.
1211 */
1212 if (xs->xs_control & XS_CTL_URGENT) {
1213 TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
1214 goto out;
1215 }
1216
1217 /*
1218 * If this xfer has already been on the queue before, we
1219 * need to reinsert it in the correct order. That order is:
1220 *
1221 * Immediately before the first xfer for this periph
1222 * with a requeuecnt less than xs->xs_requeuecnt.
1223 *
1224 * Failing that, at the end of the queue. (We'll end up
1225 * there naturally.)
1226 */
1227 if (xs->xs_requeuecnt != 0) {
1228 for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
1229 qxs = TAILQ_NEXT(qxs, channel_q)) {
1230 if (qxs->xs_periph == xs->xs_periph &&
1231 qxs->xs_requeuecnt < xs->xs_requeuecnt)
1232 break;
1233 }
1234 if (qxs != NULL) {
1235 TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
1236 channel_q);
1237 goto out;
1238 }
1239 }
1240 TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
1241 out:
1242 if (xs->xs_control & XS_CTL_THAW_PERIPH)
1243 scsipi_periph_thaw(xs->xs_periph, 1);
1244 splx(s);
1245 return (0);
1246 }
1247
1248 /*
1249 * scsipi_run_queue:
1250 *
1251 * Start as many xfers as possible running on the channel.
1252 */
1253 void
1254 scsipi_run_queue(chan)
1255 struct scsipi_channel *chan;
1256 {
1257 struct scsipi_xfer *xs;
1258 struct scsipi_periph *periph;
1259 int s;
1260
1261 for (;;) {
1262 s = splbio();
1263 /*
1264 * Look for work to do, and make sure we can do it.
1265 */
1266 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
1267 xs = TAILQ_NEXT(xs, channel_q)) {
1268 periph = xs->xs_periph;
1269
1270 if ((periph->periph_active > periph->periph_openings) || periph->periph_qfreeze != 0)
1271 continue;
1272
1273 if ((periph->periph_flags & PERIPH_RECOVERING) != 0 &&
1274 (xs->xs_control & XS_CTL_URGENT) == 0)
1275 continue;
1276
1277 /*
1278 * We can issue this xfer!
1279 */
1280 goto got_one;
1281 }
1282
1283 /*
1284 * Can't find any work to do right now.
1285 */
1286 splx(s);
1287 return;
1288
1289 got_one:
1290 /*
1291 * Have an xfer to run. Allocate a resource from
1292 * the adapter to run it. If we can't allocate that
1293 * resource, we don't dequeue the xfer.
1294 */
1295 if (scsipi_get_resource(chan) == 0) {
1296 /*
1297 * Adapter is out of resources. If the adapter
1298 * supports it, attempt to grow them.
1299 */
1300 if (scsipi_grow_resources(chan) == 0) {
1301 /*
1302 * Wasn't able to grow resources,
1303 * nothing more we can do.
1304 */
1305 if (xs->xs_control & XS_CTL_POLL) {
1306 scsipi_printaddr(xs->xs_periph);
1307 printf("polling command but no "
1308 "adapter resources");
1309 /* We'll panic shortly... */
1310 }
1311 splx(s);
1312 return;
1313 }
1314 /*
1315 * scsipi_grow_resources() allocated the resource
1316 * for us.
1317 */
1318 }
1319
1320 /*
1321 * We have a resource to run this xfer, do it!
1322 */
1323 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
1324
1325 /*
1326 * If the command is to be tagged, allocate a tag ID
1327 * for it.
1328 */
1329 if (XS_CTL_TAGTYPE(xs) != 0)
1330 scsipi_get_tag(xs);
1331 splx(s);
1332
1333 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
1334 }
1335 #ifdef DIAGNOSTIC
1336 panic("scsipi_run_queue: impossible");
1337 #endif
1338 }
1339
1340 /*
1341 * scsipi_execute_xs:
1342 *
1343 * Begin execution of an xfer, waiting for it to complete, if necessary.
1344 */
1345 int
1346 scsipi_execute_xs(xs)
1347 struct scsipi_xfer *xs;
1348 {
1349 struct scsipi_periph *periph = xs->xs_periph;
1350 struct scsipi_channel *chan = periph->periph_channel;
1351 int async, poll, retries, error, s;
1352
1353 xs->xs_status &= ~XS_STS_DONE;
1354 xs->error = XS_NOERROR;
1355 xs->resid = xs->datalen;
1356 xs->status = SCSI_OK;
1357
1358 #ifdef SCSIDEBUG
1359 if (xs->sc_link->flags & SDEV_DB3) {
1360 printf("scsipi_exec_cmd: ");
1361 show_scsipi_xs(xs);
1362 printf("\n");
1363 }
1364 #endif
1365
1366 /*
1367 * Deal with command tagging:
1368 *
1369 * - If the device's current operating mode doesn't
1370 * include tagged queueing, clear the tag mask.
1371 *
1372 * - If the device's current operating mode *does*
1373 * include tagged queueing, set the tag_type in
1374 * the xfer to the appropriate byte for the tag
1375 * message.
1376 */
1377 if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0) {
1378 xs->xs_control &= ~XS_CTL_TAGMASK;
1379 xs->xs_tag_type = 0;
1380 } else {
1381 /*
1382 * If the request doesn't specify a tag, give Head
1383 * tags to URGENT operations and Ordered tags to
1384 * everything else.
1385 */
1386 if (XS_CTL_TAGTYPE(xs) == 0) {
1387 if (xs->xs_control & XS_CTL_URGENT)
1388 xs->xs_control |= XS_CTL_HEAD_TAG;
1389 else
1390 xs->xs_control |= XS_CTL_ORDERED_TAG;
1391 }
1392
1393 switch (XS_CTL_TAGTYPE(xs)) {
1394 case XS_CTL_ORDERED_TAG:
1395 xs->xs_tag_type = MSG_ORDERED_Q_TAG;
1396 break;
1397
1398 case XS_CTL_SIMPLE_TAG:
1399 xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
1400 break;
1401
1402 case XS_CTL_HEAD_TAG:
1403 xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
1404 break;
1405
1406 default:
1407 scsipi_printaddr(periph);
1408 printf("invalid tag mask 0x%08x\n",
1409 XS_CTL_TAGTYPE(xs));
1410 panic("scsipi_execute_xs");
1411 }
1412 }
1413
1414 /*
1415 * If we don't yet have a completion thread, or we are to poll for
1416 * completion, clear the ASYNC flag.
1417 */
1418 if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
1419 xs->xs_control &= ~XS_CTL_ASYNC;
1420
1421 async = (xs->xs_control & XS_CTL_ASYNC);
1422 poll = (xs->xs_control & XS_CTL_POLL);
1423 retries = xs->xs_retries; /* for polling commands */
1424
1425 #ifdef DIAGNOSTIC
1426 if (async != 0 && xs->bp == NULL)
1427 panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
1428 #endif
1429
1430 /*
1431 * Enqueue the transfer. If we're not polling for completion, this
1432 * should ALWAYS return `no error'.
1433 */
1434 try_again:
1435 error = scsipi_enqueue(xs);
1436 if (error) {
1437 if (poll == 0) {
1438 scsipi_printaddr(periph);
1439 printf("not polling, but enqueue failed with %d\n",
1440 error);
1441 panic("scsipi_execute_xs");
1442 }
1443
1444 scsipi_printaddr(periph);
1445 printf("failed to enqueue polling command");
1446 if (retries != 0) {
1447 printf(", retrying...\n");
1448 delay(1000000);
1449 retries--;
1450 goto try_again;
1451 }
1452 printf("\n");
1453 goto free_xs;
1454 }
1455
1456 restarted:
1457 scsipi_run_queue(chan);
1458
1459 /*
1460 * The xfer is enqueued, and possibly running. If it's to be
1461 * completed asynchronously, just return now.
1462 */
1463 if (async)
1464 return (EJUSTRETURN);
1465
1466 /*
1467 * Not an asynchronous command; wait for it to complete.
1468 */
1469 while ((xs->xs_status & XS_STS_DONE) == 0) {
1470 if (poll) {
1471 scsipi_printaddr(periph);
1472 printf("polling command not done\n");
1473 panic("scsipi_execute_xs");
1474 }
1475 (void) tsleep(xs, PRIBIO, "xscmd", 0);
1476 }
1477
1478 /*
1479 * Command is complete. scsipi_done() has awakened us to perform
1480 * the error handling.
1481 */
1482 error = scsipi_complete(xs);
1483 if (error == ERESTART)
1484 goto restarted;
1485
1486 /*
1487 * Command completed successfully or fatal error occurred. Fall
1488 * into....
1489 */
1490 free_xs:
1491 s = splbio();
1492 scsipi_put_xs(xs);
1493 splx(s);
1494
1495 /*
1496 * Kick the queue, keep it running in case it stopped for some
1497 * reason.
1498 */
1499 scsipi_run_queue(chan);
1500
1501 return (error);
1502 }
1503
1504 /*
1505 * scsipi_completion_thread:
1506 *
1507 * This is the completion thread. We wait for errors on
1508 * asynchronous xfers, and perform the error handling
1509 * function, restarting the command, if necessary.
1510 */
1511 void
1512 scsipi_completion_thread(arg)
1513 void *arg;
1514 {
1515 struct scsipi_channel *chan = arg;
1516 struct scsipi_xfer *xs;
1517 int s;
1518
1519 while ((chan->chan_flags & SCSIPI_CHAN_SHUTDOWN) == 0) {
1520 s = splbio();
1521 if ((xs = TAILQ_FIRST(&chan->chan_complete)) == NULL) {
1522 splx(s);
1523 (void) tsleep(&chan->chan_complete, PRIBIO,
1524 "sccomp", 0);
1525 continue;
1526 }
1527 TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
1528 splx(s);
1529
1530 /*
1531 * Have an xfer with an error; process it.
1532 */
1533 (void) scsipi_complete(xs);
1534
1535 /*
1536 * Kick the queue; keep it running if it was stopped
1537 * for some reason.
1538 */
1539 scsipi_run_queue(chan);
1540 }
1541
1542 chan->chan_thread = NULL;
1543
1544 /* In case parent is waiting for us to exit. */
1545 wakeup(&chan->chan_thread);
1546
1547 kthread_exit(0);
1548 }
1549
1550 /*
1551 * scsipi_create_completion_thread:
1552 *
1553 * Callback to actually create the completion thread.
1554 */
1555 void
1556 scsipi_create_completion_thread(arg)
1557 void *arg;
1558 {
1559 struct scsipi_channel *chan = arg;
1560 struct scsipi_adapter *adapt = chan->chan_adapter;
1561
1562 if (kthread_create1(scsipi_completion_thread, chan,
1563 &chan->chan_thread, "%s:%d", adapt->adapt_dev->dv_xname,
1564 chan->chan_channel)) {
1565 printf("%s: unable to create completion thread for "
1566 "channel %d\n", adapt->adapt_dev->dv_xname,
1567 chan->chan_channel);
1568 panic("scsipi_create_completion_thread");
1569 }
1570 }
1571
1572 /*
1573 * scsipi_async_event:
1574 *
1575 * Handle an asynchronous event from an adapter.
1576 */
1577 void
1578 scsipi_async_event(chan, event, arg)
1579 struct scsipi_channel *chan;
1580 scsipi_async_event_t event;
1581 void *arg;
1582 {
1583 int s;
1584
1585 s = splbio();
1586 switch (event) {
1587 case ASYNC_EVENT_MAX_OPENINGS:
1588 scsipi_async_event_max_openings(chan,
1589 (struct scsipi_max_openings *)arg);
1590 break;
1591
1592 case ASYNC_EVENT_XFER_MODE:
1593 scsipi_async_event_xfer_mode(chan,
1594 (struct scsipi_xfer_mode *)arg);
1595 break;
1596 }
1597 splx(s);
1598 }
1599
1600 /*
1601 * scsipi_print_xfer_mode:
1602 *
1603 * Print a periph's capabilities.
1604 */
1605 void
1606 scsipi_print_xfer_mode(periph)
1607 struct scsipi_periph *periph;
1608 {
1609 int period, freq, speed, mbs;
1610
1611 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0)
1612 return;
1613
1614 printf("%s: ", periph->periph_dev->dv_xname);
1615 if (periph->periph_mode & PERIPH_CAP_SYNC) {
1616 period = scsipi_sync_factor_to_period(periph->periph_period);
1617 printf("Sync (%d.%dns offset %d)",
1618 period / 10, period % 10, periph->periph_offset);
1619 } else
1620 printf("Async");
1621
1622 if (periph->periph_mode & PERIPH_CAP_WIDE32)
1623 printf(", 32-bit");
1624 else if (periph->periph_mode & PERIPH_CAP_WIDE16)
1625 printf(", 16-bit");
1626 else
1627 printf(", 8-bit");
1628
1629 if (periph->periph_mode & PERIPH_CAP_SYNC) {
1630 freq = scsipi_sync_factor_to_freq(periph->periph_period);
1631 speed = freq;
1632 if (periph->periph_mode & PERIPH_CAP_WIDE32)
1633 speed *= 4;
1634 else if (periph->periph_mode & PERIPH_CAP_WIDE16)
1635 speed *= 2;
1636 mbs = speed / 1000;
1637 if (mbs > 0)
1638 printf(" (%d.%03dMB/s)", mbs, speed % 1000);
1639 else
1640 printf(" (%dKB/s)", speed % 1000);
1641 }
1642
1643 printf(" transfers");
1644
1645 if (periph->periph_mode & PERIPH_CAP_TQING)
1646 printf(", tagged queueing");
1647
1648 printf("\n");
1649 }
1650
1651 /*
1652 * scsipi_async_event_max_openings:
1653 *
1654 * Update the maximum number of outstanding commands a
1655 * device may have.
1656 */
1657 void
1658 scsipi_async_event_max_openings(chan, mo)
1659 struct scsipi_channel *chan;
1660 struct scsipi_max_openings *mo;
1661 {
1662 struct scsipi_periph *periph;
1663
1664 periph = scsipi_lookup_periph(chan, mo->mo_target, mo->mo_lun);
1665 if (periph == NULL) {
1666 printf("%s:%d: xfer mode update for non-existent periph at "
1667 "target %d lun %d\n",
1668 chan->chan_adapter->adapt_dev->dv_xname,
1669 chan->chan_channel, mo->mo_target, mo->mo_lun);
1670 return;
1671 }
1672
1673 if (mo->mo_openings < periph->periph_openings)
1674 periph->periph_openings = mo->mo_openings;
1675 else if (mo->mo_openings > periph->periph_openings &&
1676 (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
1677 periph->periph_openings = mo->mo_openings;
1678 }
1679
1680 /*
1681 * scsipi_async_event_xfer_mode:
1682 *
1683 * Update the xfer mode for all periphs sharing the
1684 * specified I_T Nexus.
1685 */
1686 void
1687 scsipi_async_event_xfer_mode(chan, xm)
1688 struct scsipi_channel *chan;
1689 struct scsipi_xfer_mode *xm;
1690 {
1691 struct scsipi_periph *periph;
1692 int lun;
1693
1694 for (lun = 0; lun < chan->chan_nluns; lun++) {
1695 periph = scsipi_lookup_periph(chan, xm->xm_target, lun);
1696 if (periph == NULL)
1697 continue;
1698 scsipi_adapter_request(chan, ADAPTER_REQ_GET_XFER_MODE, periph);
1699 periph->periph_mode &= periph->periph_cap;
1700 scsipi_print_xfer_mode(periph);
1701 }
1702 }
1703
1704 /*
1705 * scsipi_adapter_addref:
1706 *
1707 * Add a reference to the adapter pointed to by the provided
1708 * link, enabling the adapter if necessary.
1709 */
1710 int
1711 scsipi_adapter_addref(adapt)
1712 struct scsipi_adapter *adapt;
1713 {
1714 int s, error = 0;
1715
1716 s = splbio();
1717 if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) {
1718 error = (*adapt->adapt_enable)(adapt->adapt_dev, 1);
1719 if (error)
1720 adapt->adapt_refcnt--;
1721 }
1722 splx(s);
1723 return (error);
1724 }
1725
1726 /*
1727 * scsipi_adapter_delref:
1728 *
1729 * Delete a reference to the adapter pointed to by the provided
1730 * link, disabling the adapter if possible.
1731 */
1732 void
1733 scsipi_adapter_delref(adapt)
1734 struct scsipi_adapter *adapt;
1735 {
1736 int s;
1737
1738 s = splbio();
1739 if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL)
1740 (void) (*adapt->adapt_enable)(adapt->adapt_dev, 0);
1741 splx(s);
1742 }
1743
1744 struct scsipi_syncparam {
1745 int ss_factor;
1746 int ss_period; /* ns * 10 */
1747 } scsipi_syncparams[] = {
1748 { 0x0a, 250 },
1749 { 0x0b, 303 },
1750 { 0x0c, 500 },
1751 };
1752 const int scsipi_nsyncparams =
1753 sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
1754
1755 int
1756 scsipi_sync_period_to_factor(period)
1757 int period; /* ns * 10 */
1758 {
1759 int i;
1760
1761 for (i = 0; i < scsipi_nsyncparams; i++) {
1762 if (period <= scsipi_syncparams[i].ss_period)
1763 return (scsipi_syncparams[i].ss_factor);
1764 }
1765
1766 return ((period / 10) / 4);
1767 }
1768
1769 int
1770 scsipi_sync_factor_to_period(factor)
1771 int factor;
1772 {
1773 int i;
1774
1775 for (i = 0; i < scsipi_nsyncparams; i++) {
1776 if (factor == scsipi_syncparams[i].ss_factor)
1777 return (scsipi_syncparams[i].ss_period);
1778 }
1779
1780 return ((factor * 4) * 10);
1781 }
1782
1783 int
1784 scsipi_sync_factor_to_freq(factor)
1785 int factor;
1786 {
1787 int i;
1788
1789 for (i = 0; i < scsipi_nsyncparams; i++) {
1790 if (factor == scsipi_syncparams[i].ss_factor)
1791 return (10000000 / scsipi_syncparams[i].ss_period);
1792 }
1793
1794 return (10000000 / ((factor * 4) * 10));
1795 }
1796
1797 #ifdef SCSIDEBUG
1798 /*
1799 * Given a scsipi_xfer, dump the request, in all it's glory
1800 */
1801 void
1802 show_scsipi_xs(xs)
1803 struct scsipi_xfer *xs;
1804 {
1805
1806 printf("xs(%p): ", xs);
1807 printf("xs_control(0x%08x)", xs->xs_control);
1808 printf("xs_status(0x%08x)", xs->xs_status);
1809 printf("sc_link(%p)", xs->sc_link);
1810 printf("retr(0x%x)", xs->xs_retries);
1811 printf("timo(0x%x)", xs->timeout);
1812 printf("cmd(%p)", xs->cmd);
1813 printf("len(0x%x)", xs->cmdlen);
1814 printf("data(%p)", xs->data);
1815 printf("len(0x%x)", xs->datalen);
1816 printf("res(0x%x)", xs->resid);
1817 printf("err(0x%x)", xs->error);
1818 printf("bp(%p)", xs->bp);
1819 show_scsipi_cmd(xs);
1820 }
1821
1822 void
1823 show_scsipi_cmd(xs)
1824 struct scsipi_xfer *xs;
1825 {
1826 u_char *b = (u_char *) xs->cmd;
1827 int i = 0;
1828
1829 (*xs->sc_link->sc_print_addr)(xs->sc_link);
1830 printf("command: ");
1831
1832 if ((xs->xs_control & XS_CTL_RESET) == 0) {
1833 while (i < xs->cmdlen) {
1834 if (i)
1835 printf(",");
1836 printf("0x%x", b[i++]);
1837 }
1838 printf("-[%d bytes]\n", xs->datalen);
1839 if (xs->datalen)
1840 show_mem(xs->data, min(64, xs->datalen));
1841 } else
1842 printf("-RESET-\n");
1843 }
1844
1845 void
1846 show_mem(address, num)
1847 u_char *address;
1848 int num;
1849 {
1850 int x;
1851
1852 printf("------------------------------");
1853 for (x = 0; x < num; x++) {
1854 if ((x % 16) == 0)
1855 printf("\n%03d: ", x);
1856 printf("%02x ", *address++);
1857 }
1858 printf("\n------------------------------\n");
1859 }
1860 #endif /*SCSIDEBUG */
1861