scsipi_base.c revision 1.26.2.1 1 /* $NetBSD: scsipi_base.c,v 1.26.2.1 1999/10/19 17:39:35 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include "opt_scsi.h"
41
42 #include <sys/types.h>
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/buf.h>
47 #include <sys/uio.h>
48 #include <sys/malloc.h>
49 #include <sys/pool.h>
50 #include <sys/errno.h>
51 #include <sys/device.h>
52 #include <sys/proc.h>
53 #include <sys/kthread.h>
54
55 #include <dev/scsipi/scsipi_all.h>
56 #include <dev/scsipi/scsipi_disk.h>
57 #include <dev/scsipi/scsipiconf.h>
58 #include <dev/scsipi/scsipi_base.h>
59
60 #include <dev/scsipi/scsi_all.h>
61 #include <dev/scsipi/scsi_message.h>
62
63 int scsipi_complete __P((struct scsipi_xfer *));
64 int scsipi_enqueue __P((struct scsipi_xfer *));
65 void scsipi_run_queue __P((struct scsipi_channel *chan));
66
67 void scsipi_completion_thread __P((void *));
68
69 void scsipi_get_tag __P((struct scsipi_xfer *));
70 void scsipi_put_tag __P((struct scsipi_xfer *));
71
72 void scsipi_async_event_max_openings __P((struct scsipi_channel *,
73 struct scsipi_max_openings *));
74
75 struct pool scsipi_xfer_pool;
76
77 /*
78 * scsipi_init:
79 *
80 * Called when a scsibus or atapibus is attached to the system
81 * to initialize shared data structures.
82 */
83 void
84 scsipi_init()
85 {
86 static int scsipi_init_done;
87
88 if (scsipi_init_done)
89 return;
90 scsipi_init_done = 1;
91
92 /* Initialize the scsipi_xfer pool. */
93 pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
94 0, 0, "scxspl", 0, NULL, NULL, M_DEVBUF);
95 }
96
97 /*
98 * scsipi_channel_init:
99 *
100 * Initialize a scsipi_channel when it is attached.
101 */
102 void
103 scsipi_channel_init(chan)
104 struct scsipi_channel *chan;
105 {
106 size_t nbytes;
107 int i;
108
109 /* Initialize shared data. */
110 scsipi_init();
111
112 /* Initialize the queues. */
113 TAILQ_INIT(&chan->chan_queue);
114 TAILQ_INIT(&chan->chan_complete);
115
116 nbytes = chan->chan_ntargets * sizeof(struct scsipi_link **);
117 chan->chan_periphs = malloc(nbytes, M_DEVBUF, M_WAITOK);
118
119 nbytes = chan->chan_nluns * sizeof(struct scsipi_periph *);
120 for (i = 0; i < chan->chan_ntargets; i++) {
121 chan->chan_periphs[i] = malloc(nbytes, M_DEVBUF, M_WAITOK);
122 memset(chan->chan_periphs[i], 0, nbytes);
123 }
124
125 /*
126 * Create the asynchronous completion thread.
127 */
128 kthread_create(scsipi_create_completion_thread, chan);
129 }
130
131 /*
132 * scsipi_lookup_periph:
133 *
134 * Lookup a periph on the specified channel.
135 */
136 struct scsipi_periph *
137 scsipi_lookup_periph(chan, target, lun)
138 struct scsipi_channel *chan;
139 int target, lun;
140 {
141 struct scsipi_periph *periph;
142 int s;
143
144 if (target >= chan->chan_ntargets ||
145 lun >= chan->chan_nluns)
146 return (NULL);
147
148 s = splbio();
149 periph = chan->chan_periphs[target][lun];
150 splx(s);
151
152 return (periph);
153 }
154
155 /*
156 * scsipi_get_resource:
157 *
158 * Allocate a single xfer `resource' from the channel.
159 *
160 * NOTE: Must be called at splbio().
161 */
162 int
163 scsipi_get_resource(chan)
164 struct scsipi_channel *chan;
165 {
166 struct scsipi_adapter *adapt = chan->chan_adapter;
167
168 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
169 if (chan->chan_openings > 0) {
170 chan->chan_openings--;
171 return (1);
172 }
173 return (0);
174 }
175
176 if (adapt->adapt_openings > 0) {
177 adapt->adapt_openings--;
178 return (1);
179 }
180 return (0);
181 }
182
183 /*
184 * scsipi_grow_resources:
185 *
186 * Attempt to grow resources for a channel. If this succeeds,
187 * we allocate one for our caller.
188 *
189 * NOTE: Must be called at splbio().
190 */
191 int
192 scsipi_grow_resources(chan)
193 struct scsipi_channel *chan;
194 {
195
196 if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
197 scsipi_adapter_request(chan, ADAPTER_REQ_GROW_RESOURCES, NULL);
198 return (scsipi_get_resource(chan));
199 }
200
201 return (0);
202 }
203
204 /*
205 * scsipi_put_resource:
206 *
207 * Free a single xfer `resource' to the channel.
208 *
209 * NOTE: Must be called at splbio().
210 */
211 void
212 scsipi_put_resource(chan)
213 struct scsipi_channel *chan;
214 {
215 struct scsipi_adapter *adapt = chan->chan_adapter;
216
217 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
218 chan->chan_openings++;
219 else
220 adapt->adapt_openings++;
221 }
222
223 /*
224 * scsipi_get_tag:
225 *
226 * Get a tag ID for the specified xfer.
227 *
228 * NOTE: Must be called at splbio().
229 */
230 void
231 scsipi_get_tag(xs)
232 struct scsipi_xfer *xs;
233 {
234 struct scsipi_periph *periph = xs->xs_periph;
235 int word, bit, tag;
236
237 for (word = 0; word < PERIPH_NTAGWORDS; word++) {
238 bit = ffs(periph->periph_freetags[word]);
239 if (bit != 0)
240 break;
241 }
242 #ifdef DIAGNOSTIC
243 if (word == PERIPH_NTAGWORDS) {
244 scsipi_printaddr(periph);
245 printf("no free tags\n");
246 panic("scsipi_get_tag");
247 }
248 #endif
249
250 bit -= 1;
251 periph->periph_freetags[word] &= ~(1 << bit);
252 tag = (word << 5) | bit;
253
254 /* XXX Should eventually disallow this completely. */
255 if (tag >= periph->periph_openings) {
256 scsipi_printaddr(periph);
257 printf("WARNING: tag %d greater than available openings %d\n",
258 tag, periph->periph_openings);
259 }
260
261 xs->xs_tag_id = tag;
262 }
263
264 /*
265 * scsipi_put_tag:
266 *
267 * Put the tag ID for the specified xfer back into the pool.
268 *
269 * NOTE: Must be called at splbio().
270 */
271 void
272 scsipi_put_tag(xs)
273 struct scsipi_xfer *xs;
274 {
275 struct scsipi_periph *periph = xs->xs_periph;
276 int word, bit;
277
278 word = xs->xs_tag_id >> 5;
279 bit = xs->xs_tag_id & 0x1f;
280
281 periph->periph_freetags[word] |= (1 << bit);
282 }
283
284 /*
285 * scsipi_get_xs:
286 *
287 * Allocate an xfer descriptor and associate it with the
288 * specified peripherial. If the peripherial has no more
289 * available command openings, we either block waiting for
290 * one to become available, or fail.
291 */
292 struct scsipi_xfer *
293 scsipi_get_xs(periph, flags)
294 struct scsipi_periph *periph;
295 int flags;
296 {
297 struct scsipi_xfer *xs;
298 int s;
299
300 SC_DEBUG(sc_link, SDEV_DB3, ("scsipi_get_xs\n"));
301
302 /*
303 * If we're cold, make sure we poll.
304 */
305 if (cold)
306 flags |= XS_CTL_NOSLEEP | XS_CTL_POLL;
307
308 #ifdef DIAGNOSTIC
309 /*
310 * URGENT commands can never be ASYNC.
311 */
312 if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
313 (XS_CTL_URGENT|XS_CTL_ASYNC)) {
314 scsipi_printaddr(periph);
315 printf("URGENT and ASYNC\n");
316 panic("scsipi_get_xs");
317 }
318 #endif
319
320 s = splbio();
321 /*
322 * Wait for a command opening to become available. Rules:
323 *
324 * - All xfers must wait for an available opening.
325 * Exception: URGENT xfers can proceed when
326 * active == openings, because we use the opening
327 * of the command we're recovering for.
328 *
329 * - If the periph is recovering, only URGENT xfers may
330 * proceed.
331 *
332 * - If the periph is currently executing a recovery
333 * command, URGENT commands must block, because only
334 * one recovery command can execute at a time.
335 */
336 for (;;) {
337 if (flags & XS_CTL_URGENT) {
338 if (periph->periph_active > periph->periph_openings ||
339 (periph->periph_flags &
340 PERIPH_RECOVERY_ACTIVE) != 0)
341 goto wait_for_opening;
342 periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
343 break;
344 }
345 if (periph->periph_active >= periph->periph_openings ||
346 (periph->periph_flags & PERIPH_RECOVERING) != 0)
347 goto wait_for_opening;
348 periph->periph_active++;
349 break;
350
351 wait_for_opening:
352 if (flags & XS_CTL_NOSLEEP) {
353 splx(s);
354 return (NULL);
355 }
356 SC_DEBUG(sc_link, SDEV_DB3, ("sleeping\n"));
357 periph->periph_flags |= PERIPH_WAITING;
358 (void) tsleep(periph, PRIBIO, "getxs", 0);
359 }
360 SC_DEBUG(sc_link, SDEV_DB3, ("calling pool_get\n"));
361 xs = pool_get(&scsipi_xfer_pool,
362 ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
363 if (xs == NULL) {
364 if (flags & XS_CTL_URGENT)
365 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
366 else
367 periph->periph_active--;
368 scsipi_printaddr(periph);
369 printf("unable to allocate %sscsipi_xfer\n",
370 (flags & XS_CTL_URGENT) ? "URGENT " : "");
371 }
372 splx(s);
373
374 SC_DEBUG(sc_link, SDEV_DB3, ("returning\n"));
375
376 if (xs != NULL) {
377 memset(xs, 0, sizeof(*xs));
378 xs->xs_periph = periph;
379 xs->xs_control = flags;
380 s = splbio();
381 TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
382 splx(s);
383 }
384 return (xs);
385 }
386
387 /*
388 * scsipi_put_xs:
389 *
390 * Release an xfer descriptor, decreasing the outstanding command
391 * count for the peripherial. If there is a thread waiting for
392 * an opening, wake it up. If not, kick any queued I/O the
393 * peripherial may have.
394 *
395 * NOTE: Must be called at splbio().
396 */
397 void
398 scsipi_put_xs(xs)
399 struct scsipi_xfer *xs;
400 {
401 struct scsipi_periph *periph = xs->xs_periph;
402 int flags = xs->xs_control;
403
404 SC_DEBUG(sc_link, SDEV_DB3, ("scsipi_free_xs\n"));
405
406 TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
407 pool_put(&scsipi_xfer_pool, xs);
408
409 #ifdef DIAGNOSTIC
410 if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
411 periph->periph_active == 0) {
412 scsipi_printaddr(periph);
413 printf("recovery without a command to recovery for\n");
414 panic("scsipi_put_xs");
415 }
416 #endif
417
418 if (flags & XS_CTL_URGENT)
419 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
420 else
421 periph->periph_active--;
422 if (periph->periph_active == 0 &&
423 (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
424 periph->periph_flags &= ~PERIPH_WAITDRAIN;
425 wakeup(&periph->periph_active);
426 }
427
428 if (periph->periph_flags & PERIPH_WAITING) {
429 periph->periph_flags &= ~PERIPH_WAITING;
430 wakeup(periph);
431 } else {
432 if (periph->periph_switch->psw_start != NULL) {
433 SC_DEBUG(sc_link, SDEV_DB2,
434 ("calling private start()\n"));
435 (*periph->periph_switch->psw_start)(periph);
436 }
437 }
438 }
439
440 /*
441 * scsipi_periph_freeze:
442 *
443 * Freeze a device's xfer queue.
444 */
445 void
446 scsipi_periph_freeze(periph, count)
447 struct scsipi_periph *periph;
448 int count;
449 {
450 int s;
451
452 s = splbio();
453 periph->periph_qfreeze += count;
454 splx(s);
455 }
456
457 /*
458 * scsipi_periph_thaw:
459 *
460 * Thaw a device's xfer queue.
461 */
462 void
463 scsipi_periph_thaw(periph, count)
464 struct scsipi_periph *periph;
465 int count;
466 {
467 int s;
468
469 s = splbio();
470 periph->periph_qfreeze -= count;
471 if (periph->periph_qfreeze == 0 &&
472 (periph->periph_flags & PERIPH_WAITING) != 0)
473 wakeup(periph);
474 splx(s);
475 }
476
477 /*
478 * scsipi_periph_timed_thaw:
479 *
480 * Thaw a device after some time has expired.
481 */
482 void
483 scsipi_periph_timed_thaw(arg)
484 void *arg;
485 {
486 struct scsipi_periph *periph = arg;
487
488 scsipi_periph_thaw(periph, 1);
489
490 /* XXX XXX XXX */
491 scsipi_printaddr(periph);
492 printf("timed thaw: should kick channel's queue here.\n");
493 }
494
495 /*
496 * scsipi_wait_drain:
497 *
498 * Wait for a periph's pending xfers to drain.
499 */
500 void
501 scsipi_wait_drain(periph)
502 struct scsipi_periph *periph;
503 {
504 int s;
505
506 s = splbio();
507 while (periph->periph_active != 0) {
508 periph->periph_flags |= PERIPH_WAITDRAIN;
509 (void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0);
510 }
511 splx(s);
512 }
513
514 /*
515 * scsipi_kill_pending:
516 *
517 * Kill off all pending xfers for a periph.
518 *
519 * NOTE: Must be called at splbio().
520 */
521 void
522 scsipi_kill_pending(periph)
523 struct scsipi_periph *periph;
524 {
525 struct scsipi_xfer *xs;
526
527 while ((xs = TAILQ_FIRST(&periph->periph_xferq)) != NULL) {
528 xs->error = XS_DRIVER_STUFFUP;
529 scsipi_done(xs);
530 }
531 }
532
533 /*
534 * scsipi_interpret_sense:
535 *
536 * Look at the returned sense and act on the error, determining
537 * the unix error number to pass back. (0 = report no error)
538 *
539 * NOTE: If we return ERESTART, we are expected to haved
540 * thawed the device!
541 *
542 * THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
543 */
544 int
545 scsipi_interpret_sense(xs)
546 struct scsipi_xfer *xs;
547 {
548 struct scsipi_sense_data *sense;
549 struct scsipi_periph *periph = xs->xs_periph;
550 u_int8_t key;
551 u_int32_t info;
552 int error;
553 #ifndef SCSIVERBOSE
554 static char *error_mes[] = {
555 "soft error (corrected)",
556 "not ready", "medium error",
557 "non-media hardware failure", "illegal request",
558 "unit attention", "readonly device",
559 "no data found", "vendor unique",
560 "copy aborted", "command aborted",
561 "search returned equal", "volume overflow",
562 "verify miscompare", "unknown error key"
563 };
564 #endif
565
566 sense = &xs->sense.scsi_sense;
567 #ifdef SCSIDEBUG
568 if ((sc_link->flags & SDEV_DB1) != 0) {
569 int count;
570 printf("code 0x%x valid 0x%x ",
571 sense->error_code & SSD_ERRCODE,
572 sense->error_code & SSD_ERRCODE_VALID ? 1 : 0);
573 printf("seg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
574 sense->segment,
575 sense->flags & SSD_KEY,
576 sense->flags & SSD_ILI ? 1 : 0,
577 sense->flags & SSD_EOM ? 1 : 0,
578 sense->flags & SSD_FILEMARK ? 1 : 0);
579 printf("info: 0x%x 0x%x 0x%x 0x%x followed by %d extra bytes\n",
580 sense->info[0],
581 sense->info[1],
582 sense->info[2],
583 sense->info[3],
584 sense->extra_len);
585 printf("extra: ");
586 for (count = 0; count < ADD_BYTES_LIM(sense); count++)
587 printf("0x%x ", sense->cmd_spec_info[count]);
588 printf("\n");
589 }
590 #endif /* SCSIDEBUG */
591
592 /*
593 * If the periph has it's own error handler, call it first.
594 * If it returns a legit error value, return that, otherwise
595 * it wants us to continue with normal error processing.
596 */
597 if (periph->periph_switch->psw_error != NULL) {
598 SC_DEBUG(sc_link, SDEV_DB2,
599 ("calling private err_handler()\n"));
600 error = (*periph->periph_switch->psw_error)(xs);
601 if (error != EJUSTRETURN)
602 return (error);
603 }
604 /* otherwise use the default */
605 switch (sense->error_code & SSD_ERRCODE) {
606 /*
607 * If it's code 70, use the extended stuff and
608 * interpret the key
609 */
610 case 0x71: /* delayed error */
611 scsipi_printaddr(periph);
612 key = sense->flags & SSD_KEY;
613 printf(" DEFERRED ERROR, key = 0x%x\n", key);
614 /* FALLTHROUGH */
615 case 0x70:
616 if ((sense->error_code & SSD_ERRCODE_VALID) != 0)
617 info = _4btol(sense->info);
618 else
619 info = 0;
620 key = sense->flags & SSD_KEY;
621
622 switch (key) {
623 case SKEY_NO_SENSE:
624 case SKEY_RECOVERED_ERROR:
625 if (xs->resid == xs->datalen && xs->datalen) {
626 /*
627 * Why is this here?
628 */
629 xs->resid = 0; /* not short read */
630 }
631 case SKEY_EQUAL:
632 error = 0;
633 break;
634 case SKEY_NOT_READY:
635 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
636 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
637 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
638 return (0);
639 if (sense->add_sense_code == 0x3A &&
640 sense->add_sense_code_qual == 0x00)
641 error = ENODEV; /* Medium not present */
642 else
643 error = EIO;
644 if ((xs->xs_control & XS_CTL_SILENT) != 0)
645 return (error);
646 break;
647 case SKEY_ILLEGAL_REQUEST:
648 if ((xs->xs_control &
649 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
650 return (0);
651 /*
652 * Handle the case where a device reports
653 * Logical Unit Not Supported during discovery.
654 */
655 if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
656 sense->add_sense_code == 0x25 &&
657 sense->add_sense_code_qual == 0x00)
658 return (EINVAL);
659 if ((xs->xs_control & XS_CTL_SILENT) != 0)
660 return (EIO);
661 error = EINVAL;
662 break;
663 case SKEY_UNIT_ATTENTION:
664 if (sense->add_sense_code == 0x29 &&
665 sense->add_sense_code_qual == 0x00) {
666 /* device or bus reset */
667 return (ERESTART);
668 }
669 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
670 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
671 if ((xs->xs_control &
672 XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
673 /* XXX Should reupload any transient state. */
674 (periph->periph_flags &
675 PERIPH_REMOVABLE) == 0) {
676 return (ERESTART);
677 }
678 if ((xs->xs_control & XS_CTL_SILENT) != 0)
679 return (EIO);
680 error = EIO;
681 break;
682 case SKEY_WRITE_PROTECT:
683 error = EROFS;
684 break;
685 case SKEY_BLANK_CHECK:
686 error = 0;
687 break;
688 case SKEY_ABORTED_COMMAND:
689 error = ERESTART;
690 break;
691 case SKEY_VOLUME_OVERFLOW:
692 error = ENOSPC;
693 break;
694 default:
695 error = EIO;
696 break;
697 }
698
699 #ifdef SCSIVERBOSE
700 if ((xs->xs_control & XS_CTL_SILENT) == 0)
701 scsipi_print_sense(xs, 0);
702 #else
703 if (key) {
704 scsipi_printaddr(periph);
705 printf("%s", error_mes[key - 1]);
706 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
707 switch (key) {
708 case SKEY_NOT_READY:
709 case SKEY_ILLEGAL_REQUEST:
710 case SKEY_UNIT_ATTENTION:
711 case SKEY_WRITE_PROTECT:
712 break;
713 case SKEY_BLANK_CHECK:
714 printf(", requested size: %d (decimal)",
715 info);
716 break;
717 case SKEY_ABORTED_COMMAND:
718 if (xs->xs_retries)
719 printf(", retrying");
720 printf(", cmd 0x%x, info 0x%x",
721 xs->cmd->opcode, info);
722 break;
723 default:
724 printf(", info = %d (decimal)", info);
725 }
726 }
727 if (sense->extra_len != 0) {
728 int n;
729 printf(", data =");
730 for (n = 0; n < sense->extra_len; n++)
731 printf(" %02x",
732 sense->cmd_spec_info[n]);
733 }
734 printf("\n");
735 }
736 #endif
737 return (error);
738
739 /*
740 * Not code 70, just report it
741 */
742 default:
743 scsipi_printaddr(periph);
744 printf("Sense Error Code 0x%x",
745 sense->error_code & SSD_ERRCODE);
746 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
747 struct scsipi_sense_data_unextended *usense =
748 (struct scsipi_sense_data_unextended *)sense;
749 printf(" at block no. %d (decimal)",
750 _3btol(usense->block));
751 }
752 printf("\n");
753 return (EIO);
754 }
755 }
756
757 /*
758 * scsipi_size:
759 *
760 * Find out from the device what its capacity is.
761 */
762 u_long
763 scsipi_size(periph, flags)
764 struct scsipi_periph *periph;
765 int flags;
766 {
767 struct scsipi_read_cap_data rdcap;
768 struct scsipi_read_capacity scsipi_cmd;
769
770 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
771 scsipi_cmd.opcode = READ_CAPACITY;
772
773 /*
774 * If the command works, interpret the result as a 4 byte
775 * number of blocks
776 */
777 if (scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
778 sizeof(scsipi_cmd), (u_char *)&rdcap, sizeof(rdcap),
779 2, 20000, NULL, flags | XS_CTL_DATA_IN) != 0) {
780 scsipi_printaddr(periph);
781 printf("could not get size\n");
782 return (0);
783 }
784
785 return (_4btol(rdcap.addr) + 1);
786 }
787
788 /*
789 * scsipi_test_unit_ready:
790 *
791 * Issue a `test unit ready' request.
792 */
793 int
794 scsipi_test_unit_ready(periph, flags)
795 struct scsipi_periph *periph;
796 int flags;
797 {
798 struct scsipi_test_unit_ready scsipi_cmd;
799
800 /* some ATAPI drives don't support TEST_UNIT_READY. Sigh */
801 if (periph->periph_quirks & PQUIRK_NOTUR)
802 return (0);
803
804 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
805 scsipi_cmd.opcode = TEST_UNIT_READY;
806
807 return (scsipi_command(periph,
808 (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd),
809 0, 0, 2, 10000, NULL, flags));
810 }
811
812 /*
813 * scsipi_inquire:
814 *
815 * Ask the device about itself.
816 */
817 int
818 scsipi_inquire(periph, inqbuf, flags)
819 struct scsipi_periph *periph;
820 struct scsipi_inquiry_data *inqbuf;
821 int flags;
822 {
823 struct scsipi_inquiry scsipi_cmd;
824
825 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
826 scsipi_cmd.opcode = INQUIRY;
827 scsipi_cmd.length = sizeof(struct scsipi_inquiry_data);
828
829 return (scsipi_command(periph,
830 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
831 (u_char *) inqbuf, sizeof(struct scsipi_inquiry_data),
832 2, 10000, NULL, XS_CTL_DATA_IN | flags));
833 }
834
835 /*
836 * scsipi_prevent:
837 *
838 * Prevent or allow the user to remove the media
839 */
840 int
841 scsipi_prevent(periph, type, flags)
842 struct scsipi_periph *periph;
843 int type, flags;
844 {
845 struct scsipi_prevent scsipi_cmd;
846
847 if (periph->periph_quirks & PQUIRK_NODOORLOCK)
848 return (0);
849
850 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
851 scsipi_cmd.opcode = PREVENT_ALLOW;
852 scsipi_cmd.how = type;
853
854 return (scsipi_command(periph,
855 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
856 0, 0, 2, 5000, NULL, flags));
857 }
858
859 /*
860 * scsipi_start:
861 *
862 * Send a START UNIT.
863 */
864 int
865 scsipi_start(periph, type, flags)
866 struct scsipi_periph *periph;
867 int type, flags;
868 {
869 struct scsipi_start_stop scsipi_cmd;
870
871 if (periph->periph_quirks & PQUIRK_NOSTARTUNIT)
872 return 0;
873
874 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
875 scsipi_cmd.opcode = START_STOP;
876 scsipi_cmd.byte2 = 0x00;
877 scsipi_cmd.how = type;
878
879 return (scsipi_command(periph,
880 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
881 0, 0, 2, (type & SSS_START) ? 30000 : 10000, NULL, flags));
882 }
883
884 /*
885 * scsipi_done:
886 *
887 * This routine is called by an adapter's interrupt handler when
888 * an xfer is completed.
889 */
890 void
891 scsipi_done(xs)
892 struct scsipi_xfer *xs;
893 {
894 struct scsipi_periph *periph = xs->xs_periph;
895 struct scsipi_channel *chan = periph->periph_channel;
896 int s, freezecnt;
897
898 SC_DEBUG(sc_link, SDEV_DB2, ("scsipi_done\n"));
899 #ifdef SCSIDEBUG
900 if ((sc_link->flags & SDEV_DB1) != 0)
901 show_scsipi_cmd(xs);
902 #endif /* SCSIDEBUG */
903
904 s = splbio();
905 /*
906 * The resource this command was using is now free.
907 */
908 scsipi_put_resource(chan);
909
910 /*
911 * If the command was tagged, free the tag.
912 */
913 if (XS_CTL_TAGTYPE(xs) != 0)
914 scsipi_put_tag(xs);
915
916 /* Mark the command as `done'. */
917 xs->xs_status |= XS_STS_DONE;
918
919 /*
920 * If it's a user level request, bypass all usual completion
921 * processing, let the user work it out.. We take reponsibility
922 * for freeing the xs (and restarting the device's queue) when
923 * the user returns.
924 */
925 if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
926 splx(s);
927 SC_DEBUG(sc_link, SDEV_DB3, ("calling user done()\n"));
928 scsipi_user_done(xs);
929 SC_DEBUG(sc_link, SDEV_DB3, ("returned from user done()\n "));
930 goto out;
931 }
932
933 #ifdef DIAGNOSTIC
934 if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
935 (XS_CTL_ASYNC|XS_CTL_POLL))
936 panic("scsipi_done: ASYNC and POLL");
937 #endif
938
939 /*
940 * If the xfer had an error of any sort, freeze the
941 * periph's queue. Freeze it again if we were requested
942 * to do so in the xfer.
943 */
944 freezecnt = 0;
945 if (xs->error != XS_NOERROR)
946 freezecnt++;
947 if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
948 freezecnt++;
949 if (freezecnt != 0)
950 scsipi_periph_freeze(periph, freezecnt);
951
952 /*
953 * If this was an xfer that was not to complete asynchrnously,
954 * let the requesting thread perform error checking/handling
955 * in its context.
956 */
957 if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
958 splx(s);
959 /*
960 * If it's a polling job, just return, to unwind the
961 * call graph. We don't need to restart the queue,
962 * because pollings jobs are treated specially, and
963 * are really only used during crash dumps anyway
964 * (XXX or during boot-time autconfiguration of
965 * ATAPI devices).
966 */
967 if (xs->xs_control & XS_CTL_POLL)
968 return;
969 wakeup(xs);
970 goto out;
971 }
972
973 /*
974 * Catch the extremely common case of I/O completing
975 * without error; no use in taking a context switch
976 * if we can handle it in interrupt context.
977 */
978 if (xs->error == XS_NOERROR) {
979 splx(s);
980 (void) scsipi_complete(xs);
981 goto out;
982 }
983
984 /*
985 * There is an error on this xfer. Put it on the channel's
986 * completion queue, and wake up the completion thread.
987 */
988 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
989 splx(s);
990 wakeup(&chan->chan_complete);
991
992 out:
993 /*
994 * If there are more xfers on the channel's queue, attempt to
995 * run them.
996 */
997 scsipi_run_queue(chan);
998 }
999
1000 /*
1001 * scsipi_complete:
1002 *
1003 * Completion of a scsipi_xfer. This is the guts of scsipi_done().
1004 *
1005 * NOTE: This routine MUST be called with valid thread context
1006 * except for the case where the following two conditions are
1007 * true:
1008 *
1009 * xs->error == XS_NOERROR
1010 * XS_CTL_ASYNC is set in xs->xs_control
1011 *
1012 * The semantics of this routine can be tricky, so here is an
1013 * explanation:
1014 *
1015 * 0 Xfer completed successfully.
1016 *
1017 * ERESTART Xfer had an error, but was restarted.
1018 *
1019 * anything else Xfer had an error, return value is Unix
1020 * errno.
1021 *
1022 * If the return value is anything but ERESTART:
1023 *
1024 * - If XS_CTL_ASYNC is set, `xs' has been freed back to
1025 * the pool.
1026 * - If there is a buf associated with the xfer,
1027 * it has been biodone()'d.
1028 */
1029 int
1030 scsipi_complete(xs)
1031 struct scsipi_xfer *xs;
1032 {
1033 struct scsipi_periph *periph = xs->xs_periph;
1034 struct scsipi_channel *chan = periph->periph_channel;
1035 struct buf *bp;
1036 int error, s;
1037
1038 #ifdef DIAGNOSTIC
1039 if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
1040 panic("scsipi_complete: XS_CTL_ASYNC but no buf");
1041 #endif
1042
1043 switch (xs->error) {
1044 case XS_NOERROR:
1045 error = 0;
1046 break;
1047
1048 case XS_SENSE:
1049 case XS_SHORTSENSE:
1050 error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
1051 break;
1052
1053 case XS_RESOURCE_SHORTAGE:
1054 /*
1055 * XXX Should freeze channel's queue.
1056 */
1057 scsipi_printaddr(periph);
1058 printf("adapter resource shortage\n");
1059 /* FALLTHROUGH */
1060
1061 case XS_BUSY:
1062 if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
1063 struct scsipi_max_openings mo;
1064
1065 /*
1066 * We set the openings to active - 1, assuming that
1067 * the command that got us here is the first one that
1068 * can't fit into the device's queue. If that's not
1069 * the case, I guess we'll find out soon enough.
1070 */
1071 mo.mo_target = periph->periph_target;
1072 mo.mo_lun = periph->periph_lun;
1073 mo.mo_openings = periph->periph_active - 1;
1074 #ifdef DIAGNOSTIC
1075 if (mo.mo_openings < 0) {
1076 scsipi_printaddr(periph);
1077 printf("QUEUE FULL resulted in < 0 openings\n");
1078 panic("scsipi_done");
1079 }
1080 #endif
1081 if (mo.mo_openings == 0) {
1082 scsipi_printaddr(periph);
1083 printf("QUEUE FULL resulted in 0 openings\n");
1084 mo.mo_openings = 1;
1085 }
1086 scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
1087 error = ERESTART;
1088 } else if (xs->xs_retries != 0) {
1089 xs->xs_retries--;
1090 /*
1091 * Wait one second, and try again.
1092 */
1093 if (xs->xs_control & XS_CTL_POLL)
1094 delay(1000000);
1095 else {
1096 scsipi_periph_freeze(periph, 1);
1097 timeout(scsipi_periph_timed_thaw, periph, hz);
1098 }
1099 error = ERESTART;
1100 } else
1101 error = EBUSY;
1102 break;
1103
1104 case XS_TIMEOUT:
1105 if (xs->xs_retries != 0) {
1106 xs->xs_retries--;
1107 error = ERESTART;
1108 } else
1109 error = EIO;
1110 break;
1111
1112 case XS_SELTIMEOUT:
1113 /* XXX Disable device? */
1114 error = EIO;
1115 break;
1116
1117 case XS_RESET:
1118 if (xs->xs_retries != 0) {
1119 xs->xs_retries--;
1120 error = ERESTART;
1121 } else
1122 error = EIO;
1123 break;
1124
1125 default:
1126 scsipi_printaddr(periph);
1127 printf("invalid return code from adapter: %d\n", xs->error);
1128 error = EIO;
1129 break;
1130 }
1131
1132 s = splbio();
1133 if (error == ERESTART) {
1134 /*
1135 * If we get here, the periph has been thawed and frozen
1136 * again if we had to issue recovery commands. Alternatively,
1137 * it may have been frozen again and in a timed thaw. In
1138 * any case, we thaw the periph once we re-enqueue the
1139 * command. Once the periph is fully thawed, it will begin
1140 * operation again.
1141 */
1142 xs->error = XS_NOERROR;
1143 xs->status = SCSI_OK;
1144 xs->xs_status &= ~XS_STS_DONE;
1145 xs->xs_requeuecnt++;
1146 error = scsipi_enqueue(xs);
1147 if (error == 0) {
1148 scsipi_periph_thaw(periph, 1);
1149 splx(s);
1150 return (ERESTART);
1151 }
1152 }
1153
1154 /*
1155 * scsipi_done() freezes the queue if not XS_NOERROR.
1156 * Thaw it here.
1157 */
1158 if (xs->error != XS_NOERROR)
1159 scsipi_periph_thaw(periph, 1);
1160
1161 if ((bp = xs->bp) != NULL) {
1162 if (error) {
1163 bp->b_error = error;
1164 bp->b_flags |= B_ERROR;
1165 bp->b_resid = bp->b_bcount;
1166 } else {
1167 bp->b_error = 0;
1168 bp->b_resid = xs->resid;
1169 }
1170 biodone(bp);
1171 }
1172
1173 if (xs->xs_control & XS_CTL_ASYNC)
1174 scsipi_put_xs(xs);
1175 splx(s);
1176
1177 return (error);
1178 }
1179
1180 /*
1181 * scsipi_enqueue:
1182 *
1183 * Enqueue an xfer on a channel.
1184 */
1185 int
1186 scsipi_enqueue(xs)
1187 struct scsipi_xfer *xs;
1188 {
1189 struct scsipi_channel *chan = xs->xs_periph->periph_channel;
1190 struct scsipi_xfer *qxs;
1191 int s;
1192
1193 s = splbio();
1194
1195 /*
1196 * If the xfer is to be polled, and there are already jobs on
1197 * the queue, we can't proceed.
1198 */
1199 if ((xs->xs_control & XS_CTL_POLL) != 0 &&
1200 TAILQ_FIRST(&chan->chan_queue) != NULL) {
1201 splx(s);
1202 xs->error = XS_DRIVER_STUFFUP;
1203 return (EAGAIN);
1204 }
1205
1206 /*
1207 * If we have an URGENT xfer, it's an error recovery command
1208 * and it should just go on the head of the channel's queue.
1209 */
1210 if (xs->xs_control & XS_CTL_URGENT) {
1211 TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
1212 goto out;
1213 }
1214
1215 /*
1216 * If this xfer has already been on the queue before, we
1217 * need to reinsert it in the correct order. That order is:
1218 *
1219 * Immediately before the first xfer for this periph
1220 * with a requeuecnt less than xs->xs_requeuecnt.
1221 *
1222 * Failing that, at the end of the queue. (We'll end up
1223 * there naturally.)
1224 */
1225 if (xs->xs_requeuecnt != 0) {
1226 for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
1227 qxs = TAILQ_NEXT(qxs, channel_q)) {
1228 if (qxs->xs_periph == xs->xs_periph &&
1229 qxs->xs_requeuecnt < xs->xs_requeuecnt)
1230 break;
1231 }
1232 if (qxs != NULL) {
1233 TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
1234 channel_q);
1235 goto out;
1236 }
1237 }
1238 TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
1239 out:
1240 if (xs->xs_control & XS_CTL_THAW_PERIPH)
1241 scsipi_periph_thaw(xs->xs_periph, 1);
1242 splx(s);
1243 return (0);
1244 }
1245
1246 /*
1247 * scsipi_run_queue:
1248 *
1249 * Start as many xfers as possible running on the channel.
1250 */
1251 void
1252 scsipi_run_queue(chan)
1253 struct scsipi_channel *chan;
1254 {
1255 struct scsipi_xfer *xs;
1256 struct scsipi_periph *periph;
1257 int s;
1258
1259 for (;;) {
1260 s = splbio();
1261 /*
1262 * Look for work to do, and make sure we can do it.
1263 */
1264 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
1265 xs = TAILQ_NEXT(xs, channel_q)) {
1266 periph = xs->xs_periph;
1267
1268 if ((periph->periph_active > periph->periph_openings) || periph->periph_qfreeze != 0)
1269 continue;
1270
1271 if ((periph->periph_flags & PERIPH_RECOVERING) != 0 &&
1272 (xs->xs_control & XS_CTL_URGENT) == 0)
1273 continue;
1274
1275 /*
1276 * We can issue this xfer!
1277 */
1278 goto got_one;
1279 }
1280
1281 /*
1282 * Can't find any work to do right now.
1283 */
1284 splx(s);
1285 return;
1286
1287 got_one:
1288 /*
1289 * Have an xfer to run. Allocate a resource from
1290 * the adapter to run it. If we can't allocate that
1291 * resource, we don't dequeue the xfer.
1292 */
1293 if (scsipi_get_resource(chan) == 0) {
1294 /*
1295 * Adapter is out of resources. If the adapter
1296 * supports it, attempt to grow them.
1297 */
1298 if (scsipi_grow_resources(chan) == 0) {
1299 /*
1300 * Wasn't able to grow resources,
1301 * nothing more we can do.
1302 */
1303 if (xs->xs_control & XS_CTL_POLL) {
1304 scsipi_printaddr(xs->xs_periph);
1305 printf("polling command but no "
1306 "adapter resources");
1307 /* We'll panic shortly... */
1308 }
1309 splx(s);
1310 return;
1311 }
1312 /*
1313 * scsipi_grow_resources() allocated the resource
1314 * for us.
1315 */
1316 }
1317
1318 /*
1319 * We have a resource to run this xfer, do it!
1320 */
1321 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
1322
1323 /*
1324 * If the command is to be tagged, allocate a tag ID
1325 * for it.
1326 */
1327 if (XS_CTL_TAGTYPE(xs) != 0)
1328 scsipi_get_tag(xs);
1329 splx(s);
1330
1331 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
1332 }
1333 #ifdef DIAGNOSTIC
1334 panic("scsipi_run_queue: impossible");
1335 #endif
1336 }
1337
1338 /*
1339 * scsipi_execute_xs:
1340 *
1341 * Begin execution of an xfer, waiting for it to complete, if necessary.
1342 */
1343 int
1344 scsipi_execute_xs(xs)
1345 struct scsipi_xfer *xs;
1346 {
1347 struct scsipi_periph *periph = xs->xs_periph;
1348 struct scsipi_channel *chan = periph->periph_channel;
1349 int async, poll, retries, error, s;
1350
1351 xs->xs_status &= ~XS_STS_DONE;
1352 xs->error = XS_NOERROR;
1353 xs->resid = xs->datalen;
1354 xs->status = SCSI_OK;
1355
1356 #ifdef SCSIDEBUG
1357 if (xs->sc_link->flags & SDEV_DB3) {
1358 printf("scsipi_exec_cmd: ");
1359 show_scsipi_xs(xs);
1360 printf("\n");
1361 }
1362 #endif
1363
1364 /*
1365 * Deal with command tagging:
1366 *
1367 * - If the device's current operating mode doesn't
1368 * include tagged queueing, clear the tag mask.
1369 *
1370 * - If the device's current operating mode *does*
1371 * include tagged queueing, set the tag_type in
1372 * the xfer to the appropriate byte for the tag
1373 * message.
1374 */
1375 if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0) {
1376 xs->xs_control &= ~XS_CTL_TAGMASK;
1377 xs->xs_tag_type = 0;
1378 } else {
1379 /*
1380 * If the request doesn't specify a tag, give Head
1381 * tags to URGENT operations and Ordered tags to
1382 * everything else.
1383 */
1384 if (XS_CTL_TAGTYPE(xs) == 0) {
1385 if (xs->xs_control & XS_CTL_URGENT)
1386 xs->xs_control |= XS_CTL_HEAD_TAG;
1387 else
1388 xs->xs_control |= XS_CTL_ORDERED_TAG;
1389 }
1390
1391 switch (XS_CTL_TAGTYPE(xs)) {
1392 case XS_CTL_ORDERED_TAG:
1393 xs->xs_tag_type = MSG_ORDERED_Q_TAG;
1394 break;
1395
1396 case XS_CTL_SIMPLE_TAG:
1397 xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
1398 break;
1399
1400 case XS_CTL_HEAD_TAG:
1401 xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
1402 break;
1403
1404 default:
1405 scsipi_printaddr(periph);
1406 printf("invalid tag mask 0x%08x\n",
1407 XS_CTL_TAGTYPE(xs));
1408 panic("scsipi_execute_xs");
1409 }
1410 }
1411
1412 /*
1413 * If we don't yet have a completion thread, or we are to poll for
1414 * completion, clear the ASYNC flag.
1415 */
1416 if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
1417 xs->xs_control &= ~XS_CTL_ASYNC;
1418
1419 async = (xs->xs_control & XS_CTL_ASYNC);
1420 poll = (xs->xs_control & XS_CTL_POLL);
1421 retries = xs->xs_retries; /* for polling commands */
1422
1423 #ifdef DIAGNOSTIC
1424 if (async != 0 && xs->bp == NULL)
1425 panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
1426 #endif
1427
1428 /*
1429 * Enqueue the transfer. If we're not polling for completion, this
1430 * should ALWAYS return `no error'.
1431 */
1432 try_again:
1433 error = scsipi_enqueue(xs);
1434 if (error) {
1435 if (poll == 0) {
1436 scsipi_printaddr(periph);
1437 printf("not polling, but enqueue failed with %d\n",
1438 error);
1439 panic("scsipi_execute_xs");
1440 }
1441
1442 scsipi_printaddr(periph);
1443 printf("failed to enqueue polling command");
1444 if (retries != 0) {
1445 printf(", retrying...\n");
1446 delay(1000000);
1447 retries--;
1448 goto try_again;
1449 }
1450 printf("\n");
1451 goto free_xs;
1452 }
1453
1454 restarted:
1455 scsipi_run_queue(chan);
1456
1457 /*
1458 * The xfer is enqueued, and possibly running. If it's to be
1459 * completed asynchronously, just return now.
1460 */
1461 if (async)
1462 return (EJUSTRETURN);
1463
1464 /*
1465 * Not an asynchronous command; wait for it to complete.
1466 */
1467 while ((xs->xs_status & XS_STS_DONE) == 0) {
1468 if (poll) {
1469 scsipi_printaddr(periph);
1470 printf("polling command not done\n");
1471 panic("scsipi_execute_xs");
1472 }
1473 (void) tsleep(xs, PRIBIO, "xscmd", 0);
1474 }
1475
1476 /*
1477 * Command is complete. scsipi_done() has awakened us to perform
1478 * the error handling.
1479 */
1480 error = scsipi_complete(xs);
1481 if (error == ERESTART)
1482 goto restarted;
1483
1484 /*
1485 * Command completed successfully or fatal error occurred. Fall
1486 * into....
1487 */
1488 free_xs:
1489 s = splbio();
1490 scsipi_put_xs(xs);
1491 splx(s);
1492
1493 /*
1494 * Kick the queue, keep it running in case it stopped for some
1495 * reason.
1496 */
1497 scsipi_run_queue(chan);
1498
1499 return (error);
1500 }
1501
1502 /*
1503 * scsipi_completion_thread:
1504 *
1505 * This is the completion thread. We wait for errors on
1506 * asynchronous xfers, and perform the error handling
1507 * function, restarting the command, if necessary.
1508 */
1509 void
1510 scsipi_completion_thread(arg)
1511 void *arg;
1512 {
1513 struct scsipi_channel *chan = arg;
1514 struct scsipi_xfer *xs;
1515 int s;
1516
1517 while ((chan->chan_flags & SCSIPI_CHAN_SHUTDOWN) == 0) {
1518 s = splbio();
1519 if ((xs = TAILQ_FIRST(&chan->chan_complete)) == NULL) {
1520 splx(s);
1521 (void) tsleep(&chan->chan_complete, PRIBIO,
1522 "sccomp", 0);
1523 continue;
1524 }
1525 TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
1526 splx(s);
1527
1528 /*
1529 * Have an xfer with an error; process it.
1530 */
1531 (void) scsipi_complete(xs);
1532
1533 /*
1534 * Kick the queue; keep it running if it was stopped
1535 * for some reason.
1536 */
1537 scsipi_run_queue(chan);
1538 }
1539
1540 chan->chan_thread = NULL;
1541
1542 /* In case parent is waiting for us to exit. */
1543 wakeup(&chan->chan_thread);
1544
1545 kthread_exit(0);
1546 }
1547
1548 /*
1549 * scsipi_create_completion_thread:
1550 *
1551 * Callback to actually create the completion thread.
1552 */
1553 void
1554 scsipi_create_completion_thread(arg)
1555 void *arg;
1556 {
1557 struct scsipi_channel *chan = arg;
1558 struct scsipi_adapter *adapt = chan->chan_adapter;
1559
1560 if (kthread_create1(scsipi_completion_thread, chan,
1561 &chan->chan_thread, "%s:%d", adapt->adapt_dev->dv_xname,
1562 chan->chan_channel)) {
1563 printf("%s: unable to create completion thread for "
1564 "channel %d\n", adapt->adapt_dev->dv_xname,
1565 chan->chan_channel);
1566 panic("scsipi_create_completion_thread");
1567 }
1568 }
1569
1570 /*
1571 * scsipi_async_event:
1572 *
1573 * Handle an asynchronous event from an adapter.
1574 */
1575 void
1576 scsipi_async_event(chan, event, arg)
1577 struct scsipi_channel *chan;
1578 scsipi_async_event_t event;
1579 void *arg;
1580 {
1581 int s;
1582
1583 s = splbio();
1584 switch (event) {
1585 case ASYNC_EVENT_MAX_OPENINGS:
1586 scsipi_async_event_max_openings(chan,
1587 (struct scsipi_max_openings *)arg);
1588 break;
1589 }
1590 splx(s);
1591 }
1592
1593 /*
1594 * scsipi_print_xfer_mode:
1595 *
1596 * Print a periph's capabilities.
1597 */
1598 void
1599 scsipi_print_xfer_mode(periph)
1600 struct scsipi_periph *periph;
1601 {
1602 int period, freq, speed, mbs;
1603
1604 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0)
1605 return;
1606
1607 printf("%s: ", periph->periph_dev->dv_xname);
1608 if (periph->periph_mode & PERIPH_CAP_SYNC) {
1609 period = scsipi_sync_factor_to_period(periph->periph_period);
1610 printf("Sync (%d.%dns offset %d)",
1611 period / 10, period % 10, periph->periph_offset);
1612 } else
1613 printf("Async");
1614
1615 if (periph->periph_mode & PERIPH_CAP_WIDE32)
1616 printf(", 32-bit");
1617 else if (periph->periph_mode & PERIPH_CAP_WIDE16)
1618 printf(", 16-bit");
1619 else
1620 printf(", 8-bit");
1621
1622 if (periph->periph_mode & PERIPH_CAP_SYNC) {
1623 freq = scsipi_sync_factor_to_freq(periph->periph_period);
1624 speed = freq;
1625 if (periph->periph_mode & PERIPH_CAP_WIDE32)
1626 speed *= 4;
1627 else if (periph->periph_mode & PERIPH_CAP_WIDE16)
1628 speed *= 2;
1629 mbs = speed / 1000;
1630 if (mbs > 0)
1631 printf(" (%d.%03dMB/s)", mbs, speed % 1000);
1632 else
1633 printf(" (%dKB/s)", speed % 1000);
1634 }
1635
1636 printf(" transfers");
1637
1638 if (periph->periph_mode & PERIPH_CAP_TQING)
1639 printf(", tagged queueing");
1640
1641 printf("\n");
1642 }
1643
1644 /*
1645 * scsipi_async_event_max_openings:
1646 *
1647 * Update the maximum number of outstanding commands a
1648 * device may have.
1649 */
1650 void
1651 scsipi_async_event_max_openings(chan, mo)
1652 struct scsipi_channel *chan;
1653 struct scsipi_max_openings *mo;
1654 {
1655 struct scsipi_periph *periph;
1656
1657 periph = scsipi_lookup_periph(chan, mo->mo_target, mo->mo_lun);
1658 if (periph == NULL) {
1659 printf("%s:%d: xfer mode update for non-existent periph at "
1660 "target %d lun %d\n",
1661 chan->chan_adapter->adapt_dev->dv_xname,
1662 chan->chan_channel, mo->mo_target, mo->mo_lun);
1663 return;
1664 }
1665
1666 if (mo->mo_openings < periph->periph_openings)
1667 periph->periph_openings = mo->mo_openings;
1668 else if (mo->mo_openings > periph->periph_openings &&
1669 (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
1670 periph->periph_openings = mo->mo_openings;
1671 }
1672
1673 /*
1674 * scsipi_adapter_addref:
1675 *
1676 * Add a reference to the adapter pointed to by the provided
1677 * link, enabling the adapter if necessary.
1678 */
1679 int
1680 scsipi_adapter_addref(adapt)
1681 struct scsipi_adapter *adapt;
1682 {
1683 int s, error = 0;
1684
1685 s = splbio();
1686 if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) {
1687 error = (*adapt->adapt_enable)(adapt->adapt_dev, 1);
1688 if (error)
1689 adapt->adapt_refcnt--;
1690 }
1691 splx(s);
1692 return (error);
1693 }
1694
1695 /*
1696 * scsipi_adapter_delref:
1697 *
1698 * Delete a reference to the adapter pointed to by the provided
1699 * link, disabling the adapter if possible.
1700 */
1701 void
1702 scsipi_adapter_delref(adapt)
1703 struct scsipi_adapter *adapt;
1704 {
1705 int s;
1706
1707 s = splbio();
1708 if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL)
1709 (void) (*adapt->adapt_enable)(adapt->adapt_dev, 0);
1710 splx(s);
1711 }
1712
1713 struct scsipi_syncparam {
1714 int ss_factor;
1715 int ss_period; /* ns * 10 */
1716 } scsipi_syncparams[] = {
1717 { 0x0a, 250 },
1718 { 0x0b, 303 },
1719 { 0x0c, 500 },
1720 };
1721 const int scsipi_nsyncparams =
1722 sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
1723
1724 int
1725 scsipi_sync_period_to_factor(period)
1726 int period; /* ns * 10 */
1727 {
1728 int i;
1729
1730 for (i = 0; i < scsipi_nsyncparams; i++) {
1731 if (period <= scsipi_syncparams[i].ss_period)
1732 return (scsipi_syncparams[i].ss_factor);
1733 }
1734
1735 return ((period / 10) / 4);
1736 }
1737
1738 int
1739 scsipi_sync_factor_to_period(factor)
1740 int factor;
1741 {
1742 int i;
1743
1744 for (i = 0; i < scsipi_nsyncparams; i++) {
1745 if (factor == scsipi_syncparams[i].ss_factor)
1746 return (scsipi_syncparams[i].ss_period);
1747 }
1748
1749 return ((factor * 4) * 10);
1750 }
1751
1752 int
1753 scsipi_sync_factor_to_freq(factor)
1754 int factor;
1755 {
1756 int i;
1757
1758 for (i = 0; i < scsipi_nsyncparams; i++) {
1759 if (factor == scsipi_syncparams[i].ss_factor)
1760 return (10000000 / scsipi_syncparams[i].ss_period);
1761 }
1762
1763 return (10000000 / ((factor * 4) * 10));
1764 }
1765
1766 #ifdef SCSIDEBUG
1767 /*
1768 * Given a scsipi_xfer, dump the request, in all it's glory
1769 */
1770 void
1771 show_scsipi_xs(xs)
1772 struct scsipi_xfer *xs;
1773 {
1774
1775 printf("xs(%p): ", xs);
1776 printf("xs_control(0x%08x)", xs->xs_control);
1777 printf("xs_status(0x%08x)", xs->xs_status);
1778 printf("sc_link(%p)", xs->sc_link);
1779 printf("retr(0x%x)", xs->xs_retries);
1780 printf("timo(0x%x)", xs->timeout);
1781 printf("cmd(%p)", xs->cmd);
1782 printf("len(0x%x)", xs->cmdlen);
1783 printf("data(%p)", xs->data);
1784 printf("len(0x%x)", xs->datalen);
1785 printf("res(0x%x)", xs->resid);
1786 printf("err(0x%x)", xs->error);
1787 printf("bp(%p)", xs->bp);
1788 show_scsipi_cmd(xs);
1789 }
1790
1791 void
1792 show_scsipi_cmd(xs)
1793 struct scsipi_xfer *xs;
1794 {
1795 u_char *b = (u_char *) xs->cmd;
1796 int i = 0;
1797
1798 (*xs->sc_link->sc_print_addr)(xs->sc_link);
1799 printf("command: ");
1800
1801 if ((xs->xs_control & XS_CTL_RESET) == 0) {
1802 while (i < xs->cmdlen) {
1803 if (i)
1804 printf(",");
1805 printf("0x%x", b[i++]);
1806 }
1807 printf("-[%d bytes]\n", xs->datalen);
1808 if (xs->datalen)
1809 show_mem(xs->data, min(64, xs->datalen));
1810 } else
1811 printf("-RESET-\n");
1812 }
1813
1814 void
1815 show_mem(address, num)
1816 u_char *address;
1817 int num;
1818 {
1819 int x;
1820
1821 printf("------------------------------");
1822 for (x = 0; x < num; x++) {
1823 if ((x % 16) == 0)
1824 printf("\n%03d: ", x);
1825 printf("%02x ", *address++);
1826 }
1827 printf("\n------------------------------\n");
1828 }
1829 #endif /*SCSIDEBUG */
1830