scsipi_base.c revision 1.26.2.4 1 /* $NetBSD: scsipi_base.c,v 1.26.2.4 1999/10/20 22:50:47 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include "opt_scsi.h"
41
42 #include <sys/types.h>
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/buf.h>
47 #include <sys/uio.h>
48 #include <sys/malloc.h>
49 #include <sys/pool.h>
50 #include <sys/errno.h>
51 #include <sys/device.h>
52 #include <sys/proc.h>
53 #include <sys/kthread.h>
54
55 #include <dev/scsipi/scsipi_all.h>
56 #include <dev/scsipi/scsipi_disk.h>
57 #include <dev/scsipi/scsipiconf.h>
58 #include <dev/scsipi/scsipi_base.h>
59
60 #include <dev/scsipi/scsi_all.h>
61 #include <dev/scsipi/scsi_message.h>
62
63 int scsipi_complete __P((struct scsipi_xfer *));
64 int scsipi_enqueue __P((struct scsipi_xfer *));
65 void scsipi_run_queue __P((struct scsipi_channel *chan));
66
67 void scsipi_completion_thread __P((void *));
68
69 void scsipi_get_tag __P((struct scsipi_xfer *));
70 void scsipi_put_tag __P((struct scsipi_xfer *));
71
72 void scsipi_async_event_max_openings __P((struct scsipi_channel *,
73 struct scsipi_max_openings *));
74 void scsipi_async_event_xfer_mode __P((struct scsipi_channel *,
75 struct scsipi_xfer_mode *));
76
77 struct pool scsipi_xfer_pool;
78
79 /*
80 * scsipi_init:
81 *
82 * Called when a scsibus or atapibus is attached to the system
83 * to initialize shared data structures.
84 */
85 void
86 scsipi_init()
87 {
88 static int scsipi_init_done;
89
90 if (scsipi_init_done)
91 return;
92 scsipi_init_done = 1;
93
94 /* Initialize the scsipi_xfer pool. */
95 pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
96 0, 0, "scxspl", 0, NULL, NULL, M_DEVBUF);
97 }
98
99 /*
100 * scsipi_channel_init:
101 *
102 * Initialize a scsipi_channel when it is attached.
103 */
104 void
105 scsipi_channel_init(chan)
106 struct scsipi_channel *chan;
107 {
108 size_t nbytes;
109 int i;
110
111 /* Initialize shared data. */
112 scsipi_init();
113
114 /* Initialize the queues. */
115 TAILQ_INIT(&chan->chan_queue);
116 TAILQ_INIT(&chan->chan_complete);
117
118 nbytes = chan->chan_ntargets * sizeof(struct scsipi_link **);
119 chan->chan_periphs = malloc(nbytes, M_DEVBUF, M_WAITOK);
120
121 nbytes = chan->chan_nluns * sizeof(struct scsipi_periph *);
122 for (i = 0; i < chan->chan_ntargets; i++) {
123 chan->chan_periphs[i] = malloc(nbytes, M_DEVBUF, M_WAITOK);
124 memset(chan->chan_periphs[i], 0, nbytes);
125 }
126
127 /*
128 * Create the asynchronous completion thread.
129 */
130 kthread_create(scsipi_create_completion_thread, chan);
131 }
132
133 /*
134 * scsipi_lookup_periph:
135 *
136 * Lookup a periph on the specified channel.
137 */
138 struct scsipi_periph *
139 scsipi_lookup_periph(chan, target, lun)
140 struct scsipi_channel *chan;
141 int target, lun;
142 {
143 struct scsipi_periph *periph;
144 int s;
145
146 if (target >= chan->chan_ntargets ||
147 lun >= chan->chan_nluns)
148 return (NULL);
149
150 s = splbio();
151 periph = chan->chan_periphs[target][lun];
152 splx(s);
153
154 return (periph);
155 }
156
157 /*
158 * scsipi_get_resource:
159 *
160 * Allocate a single xfer `resource' from the channel.
161 *
162 * NOTE: Must be called at splbio().
163 */
164 int
165 scsipi_get_resource(chan)
166 struct scsipi_channel *chan;
167 {
168 struct scsipi_adapter *adapt = chan->chan_adapter;
169
170 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
171 if (chan->chan_openings > 0) {
172 chan->chan_openings--;
173 return (1);
174 }
175 return (0);
176 }
177
178 if (adapt->adapt_openings > 0) {
179 adapt->adapt_openings--;
180 return (1);
181 }
182 return (0);
183 }
184
185 /*
186 * scsipi_grow_resources:
187 *
188 * Attempt to grow resources for a channel. If this succeeds,
189 * we allocate one for our caller.
190 *
191 * NOTE: Must be called at splbio().
192 */
193 int
194 scsipi_grow_resources(chan)
195 struct scsipi_channel *chan;
196 {
197
198 if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
199 scsipi_adapter_request(chan, ADAPTER_REQ_GROW_RESOURCES, NULL);
200 return (scsipi_get_resource(chan));
201 }
202
203 return (0);
204 }
205
206 /*
207 * scsipi_put_resource:
208 *
209 * Free a single xfer `resource' to the channel.
210 *
211 * NOTE: Must be called at splbio().
212 */
213 void
214 scsipi_put_resource(chan)
215 struct scsipi_channel *chan;
216 {
217 struct scsipi_adapter *adapt = chan->chan_adapter;
218
219 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
220 chan->chan_openings++;
221 else
222 adapt->adapt_openings++;
223 }
224
225 /*
226 * scsipi_get_tag:
227 *
228 * Get a tag ID for the specified xfer.
229 *
230 * NOTE: Must be called at splbio().
231 */
232 void
233 scsipi_get_tag(xs)
234 struct scsipi_xfer *xs;
235 {
236 struct scsipi_periph *periph = xs->xs_periph;
237 int word, bit, tag;
238
239 for (word = 0; word < PERIPH_NTAGWORDS; word++) {
240 bit = ffs(periph->periph_freetags[word]);
241 if (bit != 0)
242 break;
243 }
244 #ifdef DIAGNOSTIC
245 if (word == PERIPH_NTAGWORDS) {
246 scsipi_printaddr(periph);
247 printf("no free tags\n");
248 panic("scsipi_get_tag");
249 }
250 #endif
251
252 bit -= 1;
253 periph->periph_freetags[word] &= ~(1 << bit);
254 tag = (word << 5) | bit;
255
256 /* XXX Should eventually disallow this completely. */
257 if (tag >= periph->periph_openings) {
258 scsipi_printaddr(periph);
259 printf("WARNING: tag %d greater than available openings %d\n",
260 tag, periph->periph_openings);
261 }
262
263 xs->xs_tag_id = tag;
264 }
265
266 /*
267 * scsipi_put_tag:
268 *
269 * Put the tag ID for the specified xfer back into the pool.
270 *
271 * NOTE: Must be called at splbio().
272 */
273 void
274 scsipi_put_tag(xs)
275 struct scsipi_xfer *xs;
276 {
277 struct scsipi_periph *periph = xs->xs_periph;
278 int word, bit;
279
280 word = xs->xs_tag_id >> 5;
281 bit = xs->xs_tag_id & 0x1f;
282
283 periph->periph_freetags[word] |= (1 << bit);
284 }
285
286 /*
287 * scsipi_get_xs:
288 *
289 * Allocate an xfer descriptor and associate it with the
290 * specified peripherial. If the peripherial has no more
291 * available command openings, we either block waiting for
292 * one to become available, or fail.
293 */
294 struct scsipi_xfer *
295 scsipi_get_xs(periph, flags)
296 struct scsipi_periph *periph;
297 int flags;
298 {
299 struct scsipi_xfer *xs;
300 int s;
301
302 SC_DEBUG(sc_link, SDEV_DB3, ("scsipi_get_xs\n"));
303
304 /*
305 * If we're cold, make sure we poll.
306 */
307 if (cold)
308 flags |= XS_CTL_NOSLEEP | XS_CTL_POLL;
309
310 #ifdef DIAGNOSTIC
311 /*
312 * URGENT commands can never be ASYNC.
313 */
314 if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
315 (XS_CTL_URGENT|XS_CTL_ASYNC)) {
316 scsipi_printaddr(periph);
317 printf("URGENT and ASYNC\n");
318 panic("scsipi_get_xs");
319 }
320 #endif
321
322 s = splbio();
323 /*
324 * Wait for a command opening to become available. Rules:
325 *
326 * - All xfers must wait for an available opening.
327 * Exception: URGENT xfers can proceed when
328 * active == openings, because we use the opening
329 * of the command we're recovering for.
330 *
331 * - If the periph is recovering, only URGENT xfers may
332 * proceed.
333 *
334 * - If the periph is currently executing a recovery
335 * command, URGENT commands must block, because only
336 * one recovery command can execute at a time.
337 */
338 for (;;) {
339 if (flags & XS_CTL_URGENT) {
340 if (periph->periph_active > periph->periph_openings ||
341 (periph->periph_flags &
342 PERIPH_RECOVERY_ACTIVE) != 0)
343 goto wait_for_opening;
344 periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
345 break;
346 }
347 if (periph->periph_active >= periph->periph_openings ||
348 (periph->periph_flags & PERIPH_RECOVERING) != 0)
349 goto wait_for_opening;
350 periph->periph_active++;
351 break;
352
353 wait_for_opening:
354 if (flags & XS_CTL_NOSLEEP) {
355 splx(s);
356 return (NULL);
357 }
358 SC_DEBUG(sc_link, SDEV_DB3, ("sleeping\n"));
359 periph->periph_flags |= PERIPH_WAITING;
360 (void) tsleep(periph, PRIBIO, "getxs", 0);
361 }
362 SC_DEBUG(sc_link, SDEV_DB3, ("calling pool_get\n"));
363 xs = pool_get(&scsipi_xfer_pool,
364 ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
365 if (xs == NULL) {
366 if (flags & XS_CTL_URGENT)
367 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
368 else
369 periph->periph_active--;
370 scsipi_printaddr(periph);
371 printf("unable to allocate %sscsipi_xfer\n",
372 (flags & XS_CTL_URGENT) ? "URGENT " : "");
373 }
374 splx(s);
375
376 SC_DEBUG(sc_link, SDEV_DB3, ("returning\n"));
377
378 if (xs != NULL) {
379 memset(xs, 0, sizeof(*xs));
380 xs->xs_periph = periph;
381 xs->xs_control = flags;
382 s = splbio();
383 TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
384 splx(s);
385 }
386 return (xs);
387 }
388
389 /*
390 * scsipi_put_xs:
391 *
392 * Release an xfer descriptor, decreasing the outstanding command
393 * count for the peripherial. If there is a thread waiting for
394 * an opening, wake it up. If not, kick any queued I/O the
395 * peripherial may have.
396 *
397 * NOTE: Must be called at splbio().
398 */
399 void
400 scsipi_put_xs(xs)
401 struct scsipi_xfer *xs;
402 {
403 struct scsipi_periph *periph = xs->xs_periph;
404 int flags = xs->xs_control;
405
406 SC_DEBUG(sc_link, SDEV_DB3, ("scsipi_free_xs\n"));
407
408 TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
409 pool_put(&scsipi_xfer_pool, xs);
410
411 #ifdef DIAGNOSTIC
412 if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
413 periph->periph_active == 0) {
414 scsipi_printaddr(periph);
415 printf("recovery without a command to recovery for\n");
416 panic("scsipi_put_xs");
417 }
418 #endif
419
420 if (flags & XS_CTL_URGENT)
421 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
422 else
423 periph->periph_active--;
424 if (periph->periph_active == 0 &&
425 (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
426 periph->periph_flags &= ~PERIPH_WAITDRAIN;
427 wakeup(&periph->periph_active);
428 }
429
430 if (periph->periph_flags & PERIPH_WAITING) {
431 periph->periph_flags &= ~PERIPH_WAITING;
432 wakeup(periph);
433 } else {
434 if (periph->periph_switch->psw_start != NULL) {
435 SC_DEBUG(sc_link, SDEV_DB2,
436 ("calling private start()\n"));
437 (*periph->periph_switch->psw_start)(periph);
438 }
439 }
440 }
441
442 /*
443 * scsipi_channel_freeze:
444 *
445 * Freeze a channel's xfer queue.
446 */
447 void
448 scsipi_channel_freeze(chan, count)
449 struct scsipi_channel *chan;
450 int count;
451 {
452 int s;
453
454 s = splbio();
455 chan->chan_qfreeze += count;
456 splx(s);
457 }
458
459 /*
460 * scsipi_channel_thaw:
461 *
462 * Thaw a channel's xfer queue.
463 */
464 void
465 scsipi_channel_thaw(chan, count)
466 struct scsipi_channel *chan;
467 int count;
468 {
469 int s;
470
471 s = splbio();
472 chan->chan_qfreeze -= count;
473 splx(s);
474 }
475
476 /*
477 * scsipi_channel_timed_thaw:
478 *
479 * Thaw a channel after some time has expired.
480 */
481 void
482 scsipi_channel_timed_thaw(arg)
483 void *arg;
484 {
485 struct scsipi_channel *chan = arg;
486
487 scsipi_channel_thaw(chan, 1);
488
489 /*
490 * Kick the channel's queue here. Note, we're running in
491 * interrupt context (softclock), so the adapter driver
492 * had better not sleep.
493 */
494 scsipi_run_queue(chan);
495 }
496
497 /*
498 * scsipi_periph_freeze:
499 *
500 * Freeze a device's xfer queue.
501 */
502 void
503 scsipi_periph_freeze(periph, count)
504 struct scsipi_periph *periph;
505 int count;
506 {
507 int s;
508
509 s = splbio();
510 periph->periph_qfreeze += count;
511 splx(s);
512 }
513
514 /*
515 * scsipi_periph_thaw:
516 *
517 * Thaw a device's xfer queue.
518 */
519 void
520 scsipi_periph_thaw(periph, count)
521 struct scsipi_periph *periph;
522 int count;
523 {
524 int s;
525
526 s = splbio();
527 periph->periph_qfreeze -= count;
528 if (periph->periph_qfreeze == 0 &&
529 (periph->periph_flags & PERIPH_WAITING) != 0)
530 wakeup(periph);
531 splx(s);
532 }
533
534 /*
535 * scsipi_periph_timed_thaw:
536 *
537 * Thaw a device after some time has expired.
538 */
539 void
540 scsipi_periph_timed_thaw(arg)
541 void *arg;
542 {
543 struct scsipi_periph *periph = arg;
544
545 scsipi_periph_thaw(periph, 1);
546
547 /*
548 * Kick the channel's queue here. Note, we're running in
549 * interrupt context (softclock), so the adapter driver
550 * had better not sleep.
551 */
552 scsipi_run_queue(periph->periph_channel);
553 }
554
555 /*
556 * scsipi_wait_drain:
557 *
558 * Wait for a periph's pending xfers to drain.
559 */
560 void
561 scsipi_wait_drain(periph)
562 struct scsipi_periph *periph;
563 {
564 int s;
565
566 s = splbio();
567 while (periph->periph_active != 0) {
568 periph->periph_flags |= PERIPH_WAITDRAIN;
569 (void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0);
570 }
571 splx(s);
572 }
573
574 /*
575 * scsipi_kill_pending:
576 *
577 * Kill off all pending xfers for a periph.
578 *
579 * NOTE: Must be called at splbio().
580 */
581 void
582 scsipi_kill_pending(periph)
583 struct scsipi_periph *periph;
584 {
585
586 (*periph->periph_channel->chan_bustype->bustype_kill_pending)(periph);
587 #ifdef DIAGNOSTIC
588 if (TAILQ_FIRST(&periph->periph_xferq) != NULL)
589 panic("scsipi_kill_pending");
590 #endif
591 }
592
593 /*
594 * scsipi_interpret_sense:
595 *
596 * Look at the returned sense and act on the error, determining
597 * the unix error number to pass back. (0 = report no error)
598 *
599 * NOTE: If we return ERESTART, we are expected to haved
600 * thawed the device!
601 *
602 * THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
603 */
604 int
605 scsipi_interpret_sense(xs)
606 struct scsipi_xfer *xs;
607 {
608 struct scsipi_sense_data *sense;
609 struct scsipi_periph *periph = xs->xs_periph;
610 u_int8_t key;
611 u_int32_t info;
612 int error;
613 #ifndef SCSIVERBOSE
614 static char *error_mes[] = {
615 "soft error (corrected)",
616 "not ready", "medium error",
617 "non-media hardware failure", "illegal request",
618 "unit attention", "readonly device",
619 "no data found", "vendor unique",
620 "copy aborted", "command aborted",
621 "search returned equal", "volume overflow",
622 "verify miscompare", "unknown error key"
623 };
624 #endif
625
626 sense = &xs->sense.scsi_sense;
627 #ifdef SCSIDEBUG
628 if ((sc_link->flags & SDEV_DB1) != 0) {
629 int count;
630 printf("code 0x%x valid 0x%x ",
631 sense->error_code & SSD_ERRCODE,
632 sense->error_code & SSD_ERRCODE_VALID ? 1 : 0);
633 printf("seg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
634 sense->segment,
635 sense->flags & SSD_KEY,
636 sense->flags & SSD_ILI ? 1 : 0,
637 sense->flags & SSD_EOM ? 1 : 0,
638 sense->flags & SSD_FILEMARK ? 1 : 0);
639 printf("info: 0x%x 0x%x 0x%x 0x%x followed by %d extra bytes\n",
640 sense->info[0],
641 sense->info[1],
642 sense->info[2],
643 sense->info[3],
644 sense->extra_len);
645 printf("extra: ");
646 for (count = 0; count < ADD_BYTES_LIM(sense); count++)
647 printf("0x%x ", sense->cmd_spec_info[count]);
648 printf("\n");
649 }
650 #endif /* SCSIDEBUG */
651
652 /*
653 * If the periph has it's own error handler, call it first.
654 * If it returns a legit error value, return that, otherwise
655 * it wants us to continue with normal error processing.
656 */
657 if (periph->periph_switch->psw_error != NULL) {
658 SC_DEBUG(sc_link, SDEV_DB2,
659 ("calling private err_handler()\n"));
660 error = (*periph->periph_switch->psw_error)(xs);
661 if (error != EJUSTRETURN)
662 return (error);
663 }
664 /* otherwise use the default */
665 switch (sense->error_code & SSD_ERRCODE) {
666 /*
667 * If it's code 70, use the extended stuff and
668 * interpret the key
669 */
670 case 0x71: /* delayed error */
671 scsipi_printaddr(periph);
672 key = sense->flags & SSD_KEY;
673 printf(" DEFERRED ERROR, key = 0x%x\n", key);
674 /* FALLTHROUGH */
675 case 0x70:
676 if ((sense->error_code & SSD_ERRCODE_VALID) != 0)
677 info = _4btol(sense->info);
678 else
679 info = 0;
680 key = sense->flags & SSD_KEY;
681
682 switch (key) {
683 case SKEY_NO_SENSE:
684 case SKEY_RECOVERED_ERROR:
685 if (xs->resid == xs->datalen && xs->datalen) {
686 /*
687 * Why is this here?
688 */
689 xs->resid = 0; /* not short read */
690 }
691 case SKEY_EQUAL:
692 error = 0;
693 break;
694 case SKEY_NOT_READY:
695 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
696 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
697 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
698 return (0);
699 if (sense->add_sense_code == 0x3A &&
700 sense->add_sense_code_qual == 0x00)
701 error = ENODEV; /* Medium not present */
702 else
703 error = EIO;
704 if ((xs->xs_control & XS_CTL_SILENT) != 0)
705 return (error);
706 break;
707 case SKEY_ILLEGAL_REQUEST:
708 if ((xs->xs_control &
709 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
710 return (0);
711 /*
712 * Handle the case where a device reports
713 * Logical Unit Not Supported during discovery.
714 */
715 if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
716 sense->add_sense_code == 0x25 &&
717 sense->add_sense_code_qual == 0x00)
718 return (EINVAL);
719 if ((xs->xs_control & XS_CTL_SILENT) != 0)
720 return (EIO);
721 error = EINVAL;
722 break;
723 case SKEY_UNIT_ATTENTION:
724 if (sense->add_sense_code == 0x29 &&
725 sense->add_sense_code_qual == 0x00) {
726 /* device or bus reset */
727 return (ERESTART);
728 }
729 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
730 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
731 if ((xs->xs_control &
732 XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
733 /* XXX Should reupload any transient state. */
734 (periph->periph_flags &
735 PERIPH_REMOVABLE) == 0) {
736 return (ERESTART);
737 }
738 if ((xs->xs_control & XS_CTL_SILENT) != 0)
739 return (EIO);
740 error = EIO;
741 break;
742 case SKEY_WRITE_PROTECT:
743 error = EROFS;
744 break;
745 case SKEY_BLANK_CHECK:
746 error = 0;
747 break;
748 case SKEY_ABORTED_COMMAND:
749 error = ERESTART;
750 break;
751 case SKEY_VOLUME_OVERFLOW:
752 error = ENOSPC;
753 break;
754 default:
755 error = EIO;
756 break;
757 }
758
759 #ifdef SCSIVERBOSE
760 if ((xs->xs_control & XS_CTL_SILENT) == 0)
761 scsipi_print_sense(xs, 0);
762 #else
763 if (key) {
764 scsipi_printaddr(periph);
765 printf("%s", error_mes[key - 1]);
766 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
767 switch (key) {
768 case SKEY_NOT_READY:
769 case SKEY_ILLEGAL_REQUEST:
770 case SKEY_UNIT_ATTENTION:
771 case SKEY_WRITE_PROTECT:
772 break;
773 case SKEY_BLANK_CHECK:
774 printf(", requested size: %d (decimal)",
775 info);
776 break;
777 case SKEY_ABORTED_COMMAND:
778 if (xs->xs_retries)
779 printf(", retrying");
780 printf(", cmd 0x%x, info 0x%x",
781 xs->cmd->opcode, info);
782 break;
783 default:
784 printf(", info = %d (decimal)", info);
785 }
786 }
787 if (sense->extra_len != 0) {
788 int n;
789 printf(", data =");
790 for (n = 0; n < sense->extra_len; n++)
791 printf(" %02x",
792 sense->cmd_spec_info[n]);
793 }
794 printf("\n");
795 }
796 #endif
797 return (error);
798
799 /*
800 * Not code 70, just report it
801 */
802 default:
803 scsipi_printaddr(periph);
804 printf("Sense Error Code 0x%x",
805 sense->error_code & SSD_ERRCODE);
806 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
807 struct scsipi_sense_data_unextended *usense =
808 (struct scsipi_sense_data_unextended *)sense;
809 printf(" at block no. %d (decimal)",
810 _3btol(usense->block));
811 }
812 printf("\n");
813 return (EIO);
814 }
815 }
816
817 /*
818 * scsipi_size:
819 *
820 * Find out from the device what its capacity is.
821 */
822 u_long
823 scsipi_size(periph, flags)
824 struct scsipi_periph *periph;
825 int flags;
826 {
827 struct scsipi_read_cap_data rdcap;
828 struct scsipi_read_capacity scsipi_cmd;
829
830 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
831 scsipi_cmd.opcode = READ_CAPACITY;
832
833 /*
834 * If the command works, interpret the result as a 4 byte
835 * number of blocks
836 */
837 if (scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
838 sizeof(scsipi_cmd), (u_char *)&rdcap, sizeof(rdcap),
839 2, 20000, NULL, flags | XS_CTL_DATA_IN) != 0) {
840 scsipi_printaddr(periph);
841 printf("could not get size\n");
842 return (0);
843 }
844
845 return (_4btol(rdcap.addr) + 1);
846 }
847
848 /*
849 * scsipi_test_unit_ready:
850 *
851 * Issue a `test unit ready' request.
852 */
853 int
854 scsipi_test_unit_ready(periph, flags)
855 struct scsipi_periph *periph;
856 int flags;
857 {
858 struct scsipi_test_unit_ready scsipi_cmd;
859
860 /* some ATAPI drives don't support TEST_UNIT_READY. Sigh */
861 if (periph->periph_quirks & PQUIRK_NOTUR)
862 return (0);
863
864 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
865 scsipi_cmd.opcode = TEST_UNIT_READY;
866
867 return (scsipi_command(periph,
868 (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd),
869 0, 0, 2, 10000, NULL, flags));
870 }
871
872 /*
873 * scsipi_inquire:
874 *
875 * Ask the device about itself.
876 */
877 int
878 scsipi_inquire(periph, inqbuf, flags)
879 struct scsipi_periph *periph;
880 struct scsipi_inquiry_data *inqbuf;
881 int flags;
882 {
883 struct scsipi_inquiry scsipi_cmd;
884
885 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
886 scsipi_cmd.opcode = INQUIRY;
887 scsipi_cmd.length = sizeof(struct scsipi_inquiry_data);
888
889 return (scsipi_command(periph,
890 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
891 (u_char *) inqbuf, sizeof(struct scsipi_inquiry_data),
892 2, 10000, NULL, XS_CTL_DATA_IN | flags));
893 }
894
895 /*
896 * scsipi_prevent:
897 *
898 * Prevent or allow the user to remove the media
899 */
900 int
901 scsipi_prevent(periph, type, flags)
902 struct scsipi_periph *periph;
903 int type, flags;
904 {
905 struct scsipi_prevent scsipi_cmd;
906
907 if (periph->periph_quirks & PQUIRK_NODOORLOCK)
908 return (0);
909
910 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
911 scsipi_cmd.opcode = PREVENT_ALLOW;
912 scsipi_cmd.how = type;
913
914 return (scsipi_command(periph,
915 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
916 0, 0, 2, 5000, NULL, flags));
917 }
918
919 /*
920 * scsipi_start:
921 *
922 * Send a START UNIT.
923 */
924 int
925 scsipi_start(periph, type, flags)
926 struct scsipi_periph *periph;
927 int type, flags;
928 {
929 struct scsipi_start_stop scsipi_cmd;
930
931 if (periph->periph_quirks & PQUIRK_NOSTARTUNIT)
932 return 0;
933
934 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
935 scsipi_cmd.opcode = START_STOP;
936 scsipi_cmd.byte2 = 0x00;
937 scsipi_cmd.how = type;
938
939 return (scsipi_command(periph,
940 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
941 0, 0, 2, (type & SSS_START) ? 60000 : 10000, NULL, flags));
942 }
943
944 /*
945 * scsipi_done:
946 *
947 * This routine is called by an adapter's interrupt handler when
948 * an xfer is completed.
949 */
950 void
951 scsipi_done(xs)
952 struct scsipi_xfer *xs;
953 {
954 struct scsipi_periph *periph = xs->xs_periph;
955 struct scsipi_channel *chan = periph->periph_channel;
956 int s, freezecnt;
957
958 SC_DEBUG(sc_link, SDEV_DB2, ("scsipi_done\n"));
959 #ifdef SCSIDEBUG
960 if ((sc_link->flags & SDEV_DB1) != 0)
961 show_scsipi_cmd(xs);
962 #endif /* SCSIDEBUG */
963
964 s = splbio();
965 /*
966 * The resource this command was using is now free.
967 */
968 scsipi_put_resource(chan);
969
970 /*
971 * If the command was tagged, free the tag.
972 */
973 if (XS_CTL_TAGTYPE(xs) != 0)
974 scsipi_put_tag(xs);
975
976 /* Mark the command as `done'. */
977 xs->xs_status |= XS_STS_DONE;
978
979 /*
980 * If it's a user level request, bypass all usual completion
981 * processing, let the user work it out.. We take reponsibility
982 * for freeing the xs (and restarting the device's queue) when
983 * the user returns.
984 */
985 if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
986 splx(s);
987 SC_DEBUG(sc_link, SDEV_DB3, ("calling user done()\n"));
988 scsipi_user_done(xs);
989 SC_DEBUG(sc_link, SDEV_DB3, ("returned from user done()\n "));
990 goto out;
991 }
992
993 #ifdef DIAGNOSTIC
994 if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
995 (XS_CTL_ASYNC|XS_CTL_POLL))
996 panic("scsipi_done: ASYNC and POLL");
997 #endif
998
999 /*
1000 * If the xfer had an error of any sort, freeze the
1001 * periph's queue. Freeze it again if we were requested
1002 * to do so in the xfer.
1003 */
1004 freezecnt = 0;
1005 if (xs->error != XS_NOERROR)
1006 freezecnt++;
1007 if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
1008 freezecnt++;
1009 if (freezecnt != 0)
1010 scsipi_periph_freeze(periph, freezecnt);
1011
1012 /*
1013 * If this was an xfer that was not to complete asynchrnously,
1014 * let the requesting thread perform error checking/handling
1015 * in its context.
1016 */
1017 if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
1018 splx(s);
1019 /*
1020 * If it's a polling job, just return, to unwind the
1021 * call graph. We don't need to restart the queue,
1022 * because pollings jobs are treated specially, and
1023 * are really only used during crash dumps anyway
1024 * (XXX or during boot-time autconfiguration of
1025 * ATAPI devices).
1026 */
1027 if (xs->xs_control & XS_CTL_POLL)
1028 return;
1029 wakeup(xs);
1030 goto out;
1031 }
1032
1033 /*
1034 * Catch the extremely common case of I/O completing
1035 * without error; no use in taking a context switch
1036 * if we can handle it in interrupt context.
1037 */
1038 if (xs->error == XS_NOERROR) {
1039 splx(s);
1040 (void) scsipi_complete(xs);
1041 goto out;
1042 }
1043
1044 /*
1045 * There is an error on this xfer. Put it on the channel's
1046 * completion queue, and wake up the completion thread.
1047 */
1048 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
1049 splx(s);
1050 wakeup(&chan->chan_complete);
1051
1052 out:
1053 /*
1054 * If there are more xfers on the channel's queue, attempt to
1055 * run them.
1056 */
1057 scsipi_run_queue(chan);
1058 }
1059
1060 /*
1061 * scsipi_complete:
1062 *
1063 * Completion of a scsipi_xfer. This is the guts of scsipi_done().
1064 *
1065 * NOTE: This routine MUST be called with valid thread context
1066 * except for the case where the following two conditions are
1067 * true:
1068 *
1069 * xs->error == XS_NOERROR
1070 * XS_CTL_ASYNC is set in xs->xs_control
1071 *
1072 * The semantics of this routine can be tricky, so here is an
1073 * explanation:
1074 *
1075 * 0 Xfer completed successfully.
1076 *
1077 * ERESTART Xfer had an error, but was restarted.
1078 *
1079 * anything else Xfer had an error, return value is Unix
1080 * errno.
1081 *
1082 * If the return value is anything but ERESTART:
1083 *
1084 * - If XS_CTL_ASYNC is set, `xs' has been freed back to
1085 * the pool.
1086 * - If there is a buf associated with the xfer,
1087 * it has been biodone()'d.
1088 */
1089 int
1090 scsipi_complete(xs)
1091 struct scsipi_xfer *xs;
1092 {
1093 struct scsipi_periph *periph = xs->xs_periph;
1094 struct scsipi_channel *chan = periph->periph_channel;
1095 struct buf *bp;
1096 int error, s;
1097
1098 #ifdef DIAGNOSTIC
1099 if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
1100 panic("scsipi_complete: XS_CTL_ASYNC but no buf");
1101 #endif
1102
1103 switch (xs->error) {
1104 case XS_NOERROR:
1105 error = 0;
1106 break;
1107
1108 case XS_SENSE:
1109 case XS_SHORTSENSE:
1110 error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
1111 break;
1112
1113 case XS_RESOURCE_SHORTAGE:
1114 /*
1115 * XXX Should freeze channel's queue.
1116 */
1117 scsipi_printaddr(periph);
1118 printf("adapter resource shortage\n");
1119 /* FALLTHROUGH */
1120
1121 case XS_BUSY:
1122 if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
1123 struct scsipi_max_openings mo;
1124
1125 /*
1126 * We set the openings to active - 1, assuming that
1127 * the command that got us here is the first one that
1128 * can't fit into the device's queue. If that's not
1129 * the case, I guess we'll find out soon enough.
1130 */
1131 mo.mo_target = periph->periph_target;
1132 mo.mo_lun = periph->periph_lun;
1133 mo.mo_openings = periph->periph_active - 1;
1134 #ifdef DIAGNOSTIC
1135 if (mo.mo_openings < 0) {
1136 scsipi_printaddr(periph);
1137 printf("QUEUE FULL resulted in < 0 openings\n");
1138 panic("scsipi_done");
1139 }
1140 #endif
1141 if (mo.mo_openings == 0) {
1142 scsipi_printaddr(periph);
1143 printf("QUEUE FULL resulted in 0 openings\n");
1144 mo.mo_openings = 1;
1145 }
1146 scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
1147 error = ERESTART;
1148 } else if (xs->xs_retries != 0) {
1149 xs->xs_retries--;
1150 /*
1151 * Wait one second, and try again.
1152 */
1153 if (xs->xs_control & XS_CTL_POLL)
1154 delay(1000000);
1155 else {
1156 scsipi_periph_freeze(periph, 1);
1157 timeout(scsipi_periph_timed_thaw, periph, hz);
1158 }
1159 error = ERESTART;
1160 } else
1161 error = EBUSY;
1162 break;
1163
1164 case XS_TIMEOUT:
1165 if (xs->xs_retries != 0) {
1166 xs->xs_retries--;
1167 error = ERESTART;
1168 } else
1169 error = EIO;
1170 break;
1171
1172 case XS_SELTIMEOUT:
1173 /* XXX Disable device? */
1174 error = EIO;
1175 break;
1176
1177 case XS_RESET:
1178 if (xs->xs_retries != 0) {
1179 xs->xs_retries--;
1180 error = ERESTART;
1181 } else
1182 error = EIO;
1183 break;
1184
1185 default:
1186 scsipi_printaddr(periph);
1187 printf("invalid return code from adapter: %d\n", xs->error);
1188 error = EIO;
1189 break;
1190 }
1191
1192 s = splbio();
1193 if (error == ERESTART) {
1194 /*
1195 * If we get here, the periph has been thawed and frozen
1196 * again if we had to issue recovery commands. Alternatively,
1197 * it may have been frozen again and in a timed thaw. In
1198 * any case, we thaw the periph once we re-enqueue the
1199 * command. Once the periph is fully thawed, it will begin
1200 * operation again.
1201 */
1202 xs->error = XS_NOERROR;
1203 xs->status = SCSI_OK;
1204 xs->xs_status &= ~XS_STS_DONE;
1205 xs->xs_requeuecnt++;
1206 error = scsipi_enqueue(xs);
1207 if (error == 0) {
1208 scsipi_periph_thaw(periph, 1);
1209 splx(s);
1210 return (ERESTART);
1211 }
1212 }
1213
1214 /*
1215 * scsipi_done() freezes the queue if not XS_NOERROR.
1216 * Thaw it here.
1217 */
1218 if (xs->error != XS_NOERROR)
1219 scsipi_periph_thaw(periph, 1);
1220
1221 if ((bp = xs->bp) != NULL) {
1222 if (error) {
1223 bp->b_error = error;
1224 bp->b_flags |= B_ERROR;
1225 bp->b_resid = bp->b_bcount;
1226 } else {
1227 bp->b_error = 0;
1228 bp->b_resid = xs->resid;
1229 }
1230 biodone(bp);
1231 }
1232
1233 if (xs->xs_control & XS_CTL_ASYNC)
1234 scsipi_put_xs(xs);
1235 splx(s);
1236
1237 return (error);
1238 }
1239
1240 /*
1241 * scsipi_enqueue:
1242 *
1243 * Enqueue an xfer on a channel.
1244 */
1245 int
1246 scsipi_enqueue(xs)
1247 struct scsipi_xfer *xs;
1248 {
1249 struct scsipi_channel *chan = xs->xs_periph->periph_channel;
1250 struct scsipi_xfer *qxs;
1251 int s;
1252
1253 s = splbio();
1254
1255 /*
1256 * If the xfer is to be polled, and there are already jobs on
1257 * the queue, we can't proceed.
1258 */
1259 if ((xs->xs_control & XS_CTL_POLL) != 0 &&
1260 TAILQ_FIRST(&chan->chan_queue) != NULL) {
1261 splx(s);
1262 xs->error = XS_DRIVER_STUFFUP;
1263 return (EAGAIN);
1264 }
1265
1266 /*
1267 * If we have an URGENT xfer, it's an error recovery command
1268 * and it should just go on the head of the channel's queue.
1269 */
1270 if (xs->xs_control & XS_CTL_URGENT) {
1271 TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
1272 goto out;
1273 }
1274
1275 /*
1276 * If this xfer has already been on the queue before, we
1277 * need to reinsert it in the correct order. That order is:
1278 *
1279 * Immediately before the first xfer for this periph
1280 * with a requeuecnt less than xs->xs_requeuecnt.
1281 *
1282 * Failing that, at the end of the queue. (We'll end up
1283 * there naturally.)
1284 */
1285 if (xs->xs_requeuecnt != 0) {
1286 for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
1287 qxs = TAILQ_NEXT(qxs, channel_q)) {
1288 if (qxs->xs_periph == xs->xs_periph &&
1289 qxs->xs_requeuecnt < xs->xs_requeuecnt)
1290 break;
1291 }
1292 if (qxs != NULL) {
1293 TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
1294 channel_q);
1295 goto out;
1296 }
1297 }
1298 TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
1299 out:
1300 if (xs->xs_control & XS_CTL_THAW_PERIPH)
1301 scsipi_periph_thaw(xs->xs_periph, 1);
1302 splx(s);
1303 return (0);
1304 }
1305
1306 /*
1307 * scsipi_run_queue:
1308 *
1309 * Start as many xfers as possible running on the channel.
1310 */
1311 void
1312 scsipi_run_queue(chan)
1313 struct scsipi_channel *chan;
1314 {
1315 struct scsipi_xfer *xs;
1316 struct scsipi_periph *periph;
1317 int s;
1318
1319 for (;;) {
1320 s = splbio();
1321
1322 /*
1323 * If the channel is frozen, we can't do any work right
1324 * now.
1325 */
1326 if (chan->chan_qfreeze != 0) {
1327 splx(s);
1328 return;
1329 }
1330
1331 /*
1332 * Look for work to do, and make sure we can do it.
1333 */
1334 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
1335 xs = TAILQ_NEXT(xs, channel_q)) {
1336 periph = xs->xs_periph;
1337
1338 if ((periph->periph_active > periph->periph_openings) || periph->periph_qfreeze != 0)
1339 continue;
1340
1341 if ((periph->periph_flags & PERIPH_RECOVERING) != 0 &&
1342 (xs->xs_control & XS_CTL_URGENT) == 0)
1343 continue;
1344
1345 /*
1346 * We can issue this xfer!
1347 */
1348 goto got_one;
1349 }
1350
1351 /*
1352 * Can't find any work to do right now.
1353 */
1354 splx(s);
1355 return;
1356
1357 got_one:
1358 /*
1359 * Have an xfer to run. Allocate a resource from
1360 * the adapter to run it. If we can't allocate that
1361 * resource, we don't dequeue the xfer.
1362 */
1363 if (scsipi_get_resource(chan) == 0) {
1364 /*
1365 * Adapter is out of resources. If the adapter
1366 * supports it, attempt to grow them.
1367 */
1368 if (scsipi_grow_resources(chan) == 0) {
1369 /*
1370 * Wasn't able to grow resources,
1371 * nothing more we can do.
1372 */
1373 if (xs->xs_control & XS_CTL_POLL) {
1374 scsipi_printaddr(xs->xs_periph);
1375 printf("polling command but no "
1376 "adapter resources");
1377 /* We'll panic shortly... */
1378 }
1379 splx(s);
1380 return;
1381 }
1382 /*
1383 * scsipi_grow_resources() allocated the resource
1384 * for us.
1385 */
1386 }
1387
1388 /*
1389 * We have a resource to run this xfer, do it!
1390 */
1391 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
1392
1393 /*
1394 * If the command is to be tagged, allocate a tag ID
1395 * for it.
1396 */
1397 if (XS_CTL_TAGTYPE(xs) != 0)
1398 scsipi_get_tag(xs);
1399 splx(s);
1400
1401 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
1402 }
1403 #ifdef DIAGNOSTIC
1404 panic("scsipi_run_queue: impossible");
1405 #endif
1406 }
1407
1408 /*
1409 * scsipi_execute_xs:
1410 *
1411 * Begin execution of an xfer, waiting for it to complete, if necessary.
1412 */
1413 int
1414 scsipi_execute_xs(xs)
1415 struct scsipi_xfer *xs;
1416 {
1417 struct scsipi_periph *periph = xs->xs_periph;
1418 struct scsipi_channel *chan = periph->periph_channel;
1419 int async, poll, retries, error, s;
1420
1421 xs->xs_status &= ~XS_STS_DONE;
1422 xs->error = XS_NOERROR;
1423 xs->resid = xs->datalen;
1424 xs->status = SCSI_OK;
1425
1426 #ifdef SCSIDEBUG
1427 if (xs->sc_link->flags & SDEV_DB3) {
1428 printf("scsipi_exec_cmd: ");
1429 show_scsipi_xs(xs);
1430 printf("\n");
1431 }
1432 #endif
1433
1434 /*
1435 * Deal with command tagging:
1436 *
1437 * - If the device's current operating mode doesn't
1438 * include tagged queueing, clear the tag mask.
1439 *
1440 * - If the device's current operating mode *does*
1441 * include tagged queueing, set the tag_type in
1442 * the xfer to the appropriate byte for the tag
1443 * message.
1444 */
1445 if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0) {
1446 xs->xs_control &= ~XS_CTL_TAGMASK;
1447 xs->xs_tag_type = 0;
1448 } else {
1449 /*
1450 * If the request doesn't specify a tag, give Head
1451 * tags to URGENT operations and Ordered tags to
1452 * everything else.
1453 */
1454 if (XS_CTL_TAGTYPE(xs) == 0) {
1455 if (xs->xs_control & XS_CTL_URGENT)
1456 xs->xs_control |= XS_CTL_HEAD_TAG;
1457 else
1458 xs->xs_control |= XS_CTL_ORDERED_TAG;
1459 }
1460
1461 switch (XS_CTL_TAGTYPE(xs)) {
1462 case XS_CTL_ORDERED_TAG:
1463 xs->xs_tag_type = MSG_ORDERED_Q_TAG;
1464 break;
1465
1466 case XS_CTL_SIMPLE_TAG:
1467 xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
1468 break;
1469
1470 case XS_CTL_HEAD_TAG:
1471 xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
1472 break;
1473
1474 default:
1475 scsipi_printaddr(periph);
1476 printf("invalid tag mask 0x%08x\n",
1477 XS_CTL_TAGTYPE(xs));
1478 panic("scsipi_execute_xs");
1479 }
1480 }
1481
1482 /*
1483 * If we don't yet have a completion thread, or we are to poll for
1484 * completion, clear the ASYNC flag.
1485 */
1486 if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
1487 xs->xs_control &= ~XS_CTL_ASYNC;
1488
1489 async = (xs->xs_control & XS_CTL_ASYNC);
1490 poll = (xs->xs_control & XS_CTL_POLL);
1491 retries = xs->xs_retries; /* for polling commands */
1492
1493 #ifdef DIAGNOSTIC
1494 if (async != 0 && xs->bp == NULL)
1495 panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
1496 #endif
1497
1498 /*
1499 * Enqueue the transfer. If we're not polling for completion, this
1500 * should ALWAYS return `no error'.
1501 */
1502 try_again:
1503 error = scsipi_enqueue(xs);
1504 if (error) {
1505 if (poll == 0) {
1506 scsipi_printaddr(periph);
1507 printf("not polling, but enqueue failed with %d\n",
1508 error);
1509 panic("scsipi_execute_xs");
1510 }
1511
1512 scsipi_printaddr(periph);
1513 printf("failed to enqueue polling command");
1514 if (retries != 0) {
1515 printf(", retrying...\n");
1516 delay(1000000);
1517 retries--;
1518 goto try_again;
1519 }
1520 printf("\n");
1521 goto free_xs;
1522 }
1523
1524 restarted:
1525 scsipi_run_queue(chan);
1526
1527 /*
1528 * The xfer is enqueued, and possibly running. If it's to be
1529 * completed asynchronously, just return now.
1530 */
1531 if (async)
1532 return (EJUSTRETURN);
1533
1534 /*
1535 * Not an asynchronous command; wait for it to complete.
1536 */
1537 while ((xs->xs_status & XS_STS_DONE) == 0) {
1538 if (poll) {
1539 scsipi_printaddr(periph);
1540 printf("polling command not done\n");
1541 panic("scsipi_execute_xs");
1542 }
1543 (void) tsleep(xs, PRIBIO, "xscmd", 0);
1544 }
1545
1546 /*
1547 * Command is complete. scsipi_done() has awakened us to perform
1548 * the error handling.
1549 */
1550 error = scsipi_complete(xs);
1551 if (error == ERESTART)
1552 goto restarted;
1553
1554 /*
1555 * Command completed successfully or fatal error occurred. Fall
1556 * into....
1557 */
1558 free_xs:
1559 s = splbio();
1560 scsipi_put_xs(xs);
1561 splx(s);
1562
1563 /*
1564 * Kick the queue, keep it running in case it stopped for some
1565 * reason.
1566 */
1567 scsipi_run_queue(chan);
1568
1569 return (error);
1570 }
1571
1572 /*
1573 * scsipi_completion_thread:
1574 *
1575 * This is the completion thread. We wait for errors on
1576 * asynchronous xfers, and perform the error handling
1577 * function, restarting the command, if necessary.
1578 */
1579 void
1580 scsipi_completion_thread(arg)
1581 void *arg;
1582 {
1583 struct scsipi_channel *chan = arg;
1584 struct scsipi_xfer *xs;
1585 int s;
1586
1587 while ((chan->chan_flags & SCSIPI_CHAN_SHUTDOWN) == 0) {
1588 s = splbio();
1589 if ((xs = TAILQ_FIRST(&chan->chan_complete)) == NULL) {
1590 splx(s);
1591 (void) tsleep(&chan->chan_complete, PRIBIO,
1592 "sccomp", 0);
1593 continue;
1594 }
1595 TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
1596 splx(s);
1597
1598 /*
1599 * Have an xfer with an error; process it.
1600 */
1601 (void) scsipi_complete(xs);
1602
1603 /*
1604 * Kick the queue; keep it running if it was stopped
1605 * for some reason.
1606 */
1607 scsipi_run_queue(chan);
1608 }
1609
1610 chan->chan_thread = NULL;
1611
1612 /* In case parent is waiting for us to exit. */
1613 wakeup(&chan->chan_thread);
1614
1615 kthread_exit(0);
1616 }
1617
1618 /*
1619 * scsipi_create_completion_thread:
1620 *
1621 * Callback to actually create the completion thread.
1622 */
1623 void
1624 scsipi_create_completion_thread(arg)
1625 void *arg;
1626 {
1627 struct scsipi_channel *chan = arg;
1628 struct scsipi_adapter *adapt = chan->chan_adapter;
1629
1630 if (kthread_create1(scsipi_completion_thread, chan,
1631 &chan->chan_thread, "%s:%d", adapt->adapt_dev->dv_xname,
1632 chan->chan_channel)) {
1633 printf("%s: unable to create completion thread for "
1634 "channel %d\n", adapt->adapt_dev->dv_xname,
1635 chan->chan_channel);
1636 panic("scsipi_create_completion_thread");
1637 }
1638 }
1639
1640 /*
1641 * scsipi_async_event:
1642 *
1643 * Handle an asynchronous event from an adapter.
1644 */
1645 void
1646 scsipi_async_event(chan, event, arg)
1647 struct scsipi_channel *chan;
1648 scsipi_async_event_t event;
1649 void *arg;
1650 {
1651 int s;
1652
1653 s = splbio();
1654 switch (event) {
1655 case ASYNC_EVENT_MAX_OPENINGS:
1656 scsipi_async_event_max_openings(chan,
1657 (struct scsipi_max_openings *)arg);
1658 break;
1659
1660 case ASYNC_EVENT_XFER_MODE:
1661 scsipi_async_event_xfer_mode(chan,
1662 (struct scsipi_xfer_mode *)arg);
1663 break;
1664 }
1665 splx(s);
1666 }
1667
1668 /*
1669 * scsipi_print_xfer_mode:
1670 *
1671 * Print a periph's capabilities.
1672 */
1673 void
1674 scsipi_print_xfer_mode(periph)
1675 struct scsipi_periph *periph;
1676 {
1677 int period, freq, speed, mbs;
1678
1679 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0)
1680 return;
1681
1682 printf("%s: ", periph->periph_dev->dv_xname);
1683 if (periph->periph_mode & PERIPH_CAP_SYNC) {
1684 period = scsipi_sync_factor_to_period(periph->periph_period);
1685 printf("Sync (%d.%dns offset %d)",
1686 period / 10, period % 10, periph->periph_offset);
1687 } else
1688 printf("Async");
1689
1690 if (periph->periph_mode & PERIPH_CAP_WIDE32)
1691 printf(", 32-bit");
1692 else if (periph->periph_mode & PERIPH_CAP_WIDE16)
1693 printf(", 16-bit");
1694 else
1695 printf(", 8-bit");
1696
1697 if (periph->periph_mode & PERIPH_CAP_SYNC) {
1698 freq = scsipi_sync_factor_to_freq(periph->periph_period);
1699 speed = freq;
1700 if (periph->periph_mode & PERIPH_CAP_WIDE32)
1701 speed *= 4;
1702 else if (periph->periph_mode & PERIPH_CAP_WIDE16)
1703 speed *= 2;
1704 mbs = speed / 1000;
1705 if (mbs > 0)
1706 printf(" (%d.%03dMB/s)", mbs, speed % 1000);
1707 else
1708 printf(" (%dKB/s)", speed % 1000);
1709 }
1710
1711 printf(" transfers");
1712
1713 if (periph->periph_mode & PERIPH_CAP_TQING)
1714 printf(", tagged queueing");
1715
1716 printf("\n");
1717 }
1718
1719 /*
1720 * scsipi_async_event_max_openings:
1721 *
1722 * Update the maximum number of outstanding commands a
1723 * device may have.
1724 */
1725 void
1726 scsipi_async_event_max_openings(chan, mo)
1727 struct scsipi_channel *chan;
1728 struct scsipi_max_openings *mo;
1729 {
1730 struct scsipi_periph *periph;
1731
1732 periph = scsipi_lookup_periph(chan, mo->mo_target, mo->mo_lun);
1733 if (periph == NULL) {
1734 printf("%s:%d: xfer mode update for non-existent periph at "
1735 "target %d lun %d\n",
1736 chan->chan_adapter->adapt_dev->dv_xname,
1737 chan->chan_channel, mo->mo_target, mo->mo_lun);
1738 return;
1739 }
1740
1741 if (mo->mo_openings < periph->periph_openings)
1742 periph->periph_openings = mo->mo_openings;
1743 else if (mo->mo_openings > periph->periph_openings &&
1744 (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
1745 periph->periph_openings = mo->mo_openings;
1746 }
1747
1748 /*
1749 * scsipi_async_event_xfer_mode:
1750 *
1751 * Update the xfer mode for all periphs sharing the
1752 * specified I_T Nexus.
1753 */
1754 void
1755 scsipi_async_event_xfer_mode(chan, xm)
1756 struct scsipi_channel *chan;
1757 struct scsipi_xfer_mode *xm;
1758 {
1759 struct scsipi_periph *periph;
1760 int lun;
1761
1762 for (lun = 0; lun < chan->chan_nluns; lun++) {
1763 periph = scsipi_lookup_periph(chan, xm->xm_target, lun);
1764 if (periph == NULL)
1765 continue;
1766 scsipi_adapter_request(chan, ADAPTER_REQ_GET_XFER_MODE, periph);
1767 periph->periph_mode &= periph->periph_cap;
1768 scsipi_print_xfer_mode(periph);
1769 }
1770 }
1771
1772 /*
1773 * scsipi_adapter_addref:
1774 *
1775 * Add a reference to the adapter pointed to by the provided
1776 * link, enabling the adapter if necessary.
1777 */
1778 int
1779 scsipi_adapter_addref(adapt)
1780 struct scsipi_adapter *adapt;
1781 {
1782 int s, error = 0;
1783
1784 s = splbio();
1785 if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) {
1786 error = (*adapt->adapt_enable)(adapt->adapt_dev, 1);
1787 if (error)
1788 adapt->adapt_refcnt--;
1789 }
1790 splx(s);
1791 return (error);
1792 }
1793
1794 /*
1795 * scsipi_adapter_delref:
1796 *
1797 * Delete a reference to the adapter pointed to by the provided
1798 * link, disabling the adapter if possible.
1799 */
1800 void
1801 scsipi_adapter_delref(adapt)
1802 struct scsipi_adapter *adapt;
1803 {
1804 int s;
1805
1806 s = splbio();
1807 if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL)
1808 (void) (*adapt->adapt_enable)(adapt->adapt_dev, 0);
1809 splx(s);
1810 }
1811
1812 struct scsipi_syncparam {
1813 int ss_factor;
1814 int ss_period; /* ns * 10 */
1815 } scsipi_syncparams[] = {
1816 { 0x0a, 250 },
1817 { 0x0b, 303 },
1818 { 0x0c, 500 },
1819 };
1820 const int scsipi_nsyncparams =
1821 sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
1822
1823 int
1824 scsipi_sync_period_to_factor(period)
1825 int period; /* ns * 10 */
1826 {
1827 int i;
1828
1829 for (i = 0; i < scsipi_nsyncparams; i++) {
1830 if (period <= scsipi_syncparams[i].ss_period)
1831 return (scsipi_syncparams[i].ss_factor);
1832 }
1833
1834 return ((period / 10) / 4);
1835 }
1836
1837 int
1838 scsipi_sync_factor_to_period(factor)
1839 int factor;
1840 {
1841 int i;
1842
1843 for (i = 0; i < scsipi_nsyncparams; i++) {
1844 if (factor == scsipi_syncparams[i].ss_factor)
1845 return (scsipi_syncparams[i].ss_period);
1846 }
1847
1848 return ((factor * 4) * 10);
1849 }
1850
1851 int
1852 scsipi_sync_factor_to_freq(factor)
1853 int factor;
1854 {
1855 int i;
1856
1857 for (i = 0; i < scsipi_nsyncparams; i++) {
1858 if (factor == scsipi_syncparams[i].ss_factor)
1859 return (10000000 / scsipi_syncparams[i].ss_period);
1860 }
1861
1862 return (10000000 / ((factor * 4) * 10));
1863 }
1864
1865 #ifdef SCSIDEBUG
1866 /*
1867 * Given a scsipi_xfer, dump the request, in all it's glory
1868 */
1869 void
1870 show_scsipi_xs(xs)
1871 struct scsipi_xfer *xs;
1872 {
1873
1874 printf("xs(%p): ", xs);
1875 printf("xs_control(0x%08x)", xs->xs_control);
1876 printf("xs_status(0x%08x)", xs->xs_status);
1877 printf("sc_link(%p)", xs->sc_link);
1878 printf("retr(0x%x)", xs->xs_retries);
1879 printf("timo(0x%x)", xs->timeout);
1880 printf("cmd(%p)", xs->cmd);
1881 printf("len(0x%x)", xs->cmdlen);
1882 printf("data(%p)", xs->data);
1883 printf("len(0x%x)", xs->datalen);
1884 printf("res(0x%x)", xs->resid);
1885 printf("err(0x%x)", xs->error);
1886 printf("bp(%p)", xs->bp);
1887 show_scsipi_cmd(xs);
1888 }
1889
1890 void
1891 show_scsipi_cmd(xs)
1892 struct scsipi_xfer *xs;
1893 {
1894 u_char *b = (u_char *) xs->cmd;
1895 int i = 0;
1896
1897 (*xs->sc_link->sc_print_addr)(xs->sc_link);
1898 printf("command: ");
1899
1900 if ((xs->xs_control & XS_CTL_RESET) == 0) {
1901 while (i < xs->cmdlen) {
1902 if (i)
1903 printf(",");
1904 printf("0x%x", b[i++]);
1905 }
1906 printf("-[%d bytes]\n", xs->datalen);
1907 if (xs->datalen)
1908 show_mem(xs->data, min(64, xs->datalen));
1909 } else
1910 printf("-RESET-\n");
1911 }
1912
1913 void
1914 show_mem(address, num)
1915 u_char *address;
1916 int num;
1917 {
1918 int x;
1919
1920 printf("------------------------------");
1921 for (x = 0; x < num; x++) {
1922 if ((x % 16) == 0)
1923 printf("\n%03d: ", x);
1924 printf("%02x ", *address++);
1925 }
1926 printf("\n------------------------------\n");
1927 }
1928 #endif /*SCSIDEBUG */
1929