scsipi_base.c revision 1.26.2.7 1 /* $NetBSD: scsipi_base.c,v 1.26.2.7 2000/02/04 23:01:54 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include "opt_scsi.h"
41
42 #include <sys/types.h>
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/buf.h>
47 #include <sys/uio.h>
48 #include <sys/malloc.h>
49 #include <sys/pool.h>
50 #include <sys/errno.h>
51 #include <sys/device.h>
52 #include <sys/proc.h>
53 #include <sys/kthread.h>
54
55 #include <dev/scsipi/scsipi_all.h>
56 #include <dev/scsipi/scsipi_disk.h>
57 #include <dev/scsipi/scsipiconf.h>
58 #include <dev/scsipi/scsipi_base.h>
59
60 #include <dev/scsipi/scsi_all.h>
61 #include <dev/scsipi/scsi_message.h>
62
63 int scsipi_complete __P((struct scsipi_xfer *));
64 int scsipi_enqueue __P((struct scsipi_xfer *));
65 void scsipi_run_queue __P((struct scsipi_channel *chan));
66
67 void scsipi_completion_thread __P((void *));
68
69 void scsipi_get_tag __P((struct scsipi_xfer *));
70 void scsipi_put_tag __P((struct scsipi_xfer *));
71
72 int scsipi_get_resource __P((struct scsipi_channel *));
73 void scsipi_put_resource __P((struct scsipi_channel *));
74 __inline int scsipi_grow_resources __P((struct scsipi_channel *));
75
76 void scsipi_async_event_max_openings __P((struct scsipi_channel *,
77 struct scsipi_max_openings *));
78 void scsipi_async_event_xfer_mode __P((struct scsipi_channel *,
79 struct scsipi_xfer_mode *));
80
81 struct pool scsipi_xfer_pool;
82
83 /*
84 * scsipi_init:
85 *
86 * Called when a scsibus or atapibus is attached to the system
87 * to initialize shared data structures.
88 */
89 void
90 scsipi_init()
91 {
92 static int scsipi_init_done;
93
94 if (scsipi_init_done)
95 return;
96 scsipi_init_done = 1;
97
98 /* Initialize the scsipi_xfer pool. */
99 pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
100 0, 0, "scxspl", 0, NULL, NULL, M_DEVBUF);
101 }
102
103 /*
104 * scsipi_channel_init:
105 *
106 * Initialize a scsipi_channel when it is attached.
107 */
108 void
109 scsipi_channel_init(chan)
110 struct scsipi_channel *chan;
111 {
112 size_t nbytes;
113 int i;
114
115 /* Initialize shared data. */
116 scsipi_init();
117
118 /* Initialize the queues. */
119 TAILQ_INIT(&chan->chan_queue);
120 TAILQ_INIT(&chan->chan_complete);
121
122 nbytes = chan->chan_ntargets * sizeof(struct scsipi_link **);
123 chan->chan_periphs = malloc(nbytes, M_DEVBUF, M_WAITOK);
124
125 nbytes = chan->chan_nluns * sizeof(struct scsipi_periph *);
126 for (i = 0; i < chan->chan_ntargets; i++) {
127 chan->chan_periphs[i] = malloc(nbytes, M_DEVBUF, M_WAITOK);
128 memset(chan->chan_periphs[i], 0, nbytes);
129 }
130
131 /*
132 * Create the asynchronous completion thread.
133 */
134 kthread_create(scsipi_create_completion_thread, chan);
135 }
136
137 /*
138 * scsipi_channel_shutdown:
139 *
140 * Shutdown a scsipi_channel.
141 */
142 void
143 scsipi_channel_shutdown(chan)
144 struct scsipi_channel *chan;
145 {
146
147 /*
148 * Shut down the completion thread.
149 */
150 chan->chan_flags |= SCSIPI_CHAN_SHUTDOWN;
151 wakeup(&chan->chan_complete);
152
153 /*
154 * Now wait for the thread to exit.
155 */
156 while (chan->chan_thread != NULL)
157 (void) tsleep(&chan->chan_thread, PRIBIO, "scshut", 0);
158 }
159
160 /*
161 * scsipi_insert_periph:
162 *
163 * Insert a periph into the channel.
164 */
165 void
166 scsipi_insert_periph(chan, periph)
167 struct scsipi_channel *chan;
168 struct scsipi_periph *periph;
169 {
170 int s;
171
172 s = splbio();
173 chan->chan_periphs[periph->periph_target][periph->periph_lun] = periph;
174 splx(s);
175 }
176
177 /*
178 * scsipi_remove_periph:
179 *
180 * Remove a periph from the channel.
181 */
182 void
183 scsipi_remove_periph(chan, periph)
184 struct scsipi_channel *chan;
185 struct scsipi_periph *periph;
186 {
187 int s;
188
189 s = splbio();
190 chan->chan_periphs[periph->periph_target][periph->periph_lun] = NULL;
191 splx(s);
192 }
193
194 /*
195 * scsipi_lookup_periph:
196 *
197 * Lookup a periph on the specified channel.
198 */
199 struct scsipi_periph *
200 scsipi_lookup_periph(chan, target, lun)
201 struct scsipi_channel *chan;
202 int target, lun;
203 {
204 struct scsipi_periph *periph;
205 int s;
206
207 if (target >= chan->chan_ntargets ||
208 lun >= chan->chan_nluns)
209 return (NULL);
210
211 s = splbio();
212 periph = chan->chan_periphs[target][lun];
213 splx(s);
214
215 return (periph);
216 }
217
218 /*
219 * scsipi_get_resource:
220 *
221 * Allocate a single xfer `resource' from the channel.
222 *
223 * NOTE: Must be called at splbio().
224 */
225 int
226 scsipi_get_resource(chan)
227 struct scsipi_channel *chan;
228 {
229 struct scsipi_adapter *adapt = chan->chan_adapter;
230
231 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
232 if (chan->chan_openings > 0) {
233 chan->chan_openings--;
234 return (1);
235 }
236 return (0);
237 }
238
239 if (adapt->adapt_openings > 0) {
240 adapt->adapt_openings--;
241 return (1);
242 }
243 return (0);
244 }
245
246 /*
247 * scsipi_grow_resources:
248 *
249 * Attempt to grow resources for a channel. If this succeeds,
250 * we allocate one for our caller.
251 *
252 * NOTE: Must be called at splbio().
253 */
254 __inline int
255 scsipi_grow_resources(chan)
256 struct scsipi_channel *chan;
257 {
258
259 if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
260 scsipi_adapter_request(chan, ADAPTER_REQ_GROW_RESOURCES, NULL);
261 return (scsipi_get_resource(chan));
262 }
263
264 return (0);
265 }
266
267 /*
268 * scsipi_put_resource:
269 *
270 * Free a single xfer `resource' to the channel.
271 *
272 * NOTE: Must be called at splbio().
273 */
274 void
275 scsipi_put_resource(chan)
276 struct scsipi_channel *chan;
277 {
278 struct scsipi_adapter *adapt = chan->chan_adapter;
279
280 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
281 chan->chan_openings++;
282 else
283 adapt->adapt_openings++;
284 }
285
286 /*
287 * scsipi_get_tag:
288 *
289 * Get a tag ID for the specified xfer.
290 *
291 * NOTE: Must be called at splbio().
292 */
293 void
294 scsipi_get_tag(xs)
295 struct scsipi_xfer *xs;
296 {
297 struct scsipi_periph *periph = xs->xs_periph;
298 int word, bit, tag;
299
300 for (word = 0; word < PERIPH_NTAGWORDS; word++) {
301 bit = ffs(periph->periph_freetags[word]);
302 if (bit != 0)
303 break;
304 }
305 #ifdef DIAGNOSTIC
306 if (word == PERIPH_NTAGWORDS) {
307 scsipi_printaddr(periph);
308 printf("no free tags\n");
309 panic("scsipi_get_tag");
310 }
311 #endif
312
313 bit -= 1;
314 periph->periph_freetags[word] &= ~(1 << bit);
315 tag = (word << 5) | bit;
316
317 /* XXX Should eventually disallow this completely. */
318 if (tag >= periph->periph_openings) {
319 scsipi_printaddr(periph);
320 printf("WARNING: tag %d greater than available openings %d\n",
321 tag, periph->periph_openings);
322 }
323
324 xs->xs_tag_id = tag;
325 }
326
327 /*
328 * scsipi_put_tag:
329 *
330 * Put the tag ID for the specified xfer back into the pool.
331 *
332 * NOTE: Must be called at splbio().
333 */
334 void
335 scsipi_put_tag(xs)
336 struct scsipi_xfer *xs;
337 {
338 struct scsipi_periph *periph = xs->xs_periph;
339 int word, bit;
340
341 word = xs->xs_tag_id >> 5;
342 bit = xs->xs_tag_id & 0x1f;
343
344 periph->periph_freetags[word] |= (1 << bit);
345 }
346
347 /*
348 * scsipi_get_xs:
349 *
350 * Allocate an xfer descriptor and associate it with the
351 * specified peripherial. If the peripherial has no more
352 * available command openings, we either block waiting for
353 * one to become available, or fail.
354 */
355 struct scsipi_xfer *
356 scsipi_get_xs(periph, flags)
357 struct scsipi_periph *periph;
358 int flags;
359 {
360 struct scsipi_xfer *xs;
361 int s;
362
363 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n"));
364
365 /*
366 * If we're cold, make sure we poll.
367 */
368 if (cold)
369 flags |= XS_CTL_NOSLEEP | XS_CTL_POLL;
370
371 #ifdef DIAGNOSTIC
372 /*
373 * URGENT commands can never be ASYNC.
374 */
375 if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
376 (XS_CTL_URGENT|XS_CTL_ASYNC)) {
377 scsipi_printaddr(periph);
378 printf("URGENT and ASYNC\n");
379 panic("scsipi_get_xs");
380 }
381 #endif
382
383 s = splbio();
384 /*
385 * Wait for a command opening to become available. Rules:
386 *
387 * - All xfers must wait for an available opening.
388 * Exception: URGENT xfers can proceed when
389 * active == openings, because we use the opening
390 * of the command we're recovering for.
391 *
392 * - If the periph is recovering, only URGENT xfers may
393 * proceed.
394 *
395 * - If the periph is currently executing a recovery
396 * command, URGENT commands must block, because only
397 * one recovery command can execute at a time.
398 */
399 for (;;) {
400 if (flags & XS_CTL_URGENT) {
401 if (periph->periph_active > periph->periph_openings ||
402 (periph->periph_flags &
403 PERIPH_RECOVERY_ACTIVE) != 0)
404 goto wait_for_opening;
405 periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
406 break;
407 }
408 if (periph->periph_active >= periph->periph_openings ||
409 (periph->periph_flags & PERIPH_RECOVERING) != 0)
410 goto wait_for_opening;
411 periph->periph_active++;
412 break;
413
414 wait_for_opening:
415 if (flags & XS_CTL_NOSLEEP) {
416 splx(s);
417 return (NULL);
418 }
419 SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n"));
420 periph->periph_flags |= PERIPH_WAITING;
421 (void) tsleep(periph, PRIBIO, "getxs", 0);
422 }
423 SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n"));
424 xs = pool_get(&scsipi_xfer_pool,
425 ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
426 if (xs == NULL) {
427 if (flags & XS_CTL_URGENT)
428 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
429 else
430 periph->periph_active--;
431 scsipi_printaddr(periph);
432 printf("unable to allocate %sscsipi_xfer\n",
433 (flags & XS_CTL_URGENT) ? "URGENT " : "");
434 }
435 splx(s);
436
437 SC_DEBUG(periph, SCSIPI_DB3, ("returning\n"));
438
439 if (xs != NULL) {
440 memset(xs, 0, sizeof(*xs));
441 xs->xs_periph = periph;
442 xs->xs_control = flags;
443 s = splbio();
444 TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
445 splx(s);
446 }
447 return (xs);
448 }
449
450 /*
451 * scsipi_put_xs:
452 *
453 * Release an xfer descriptor, decreasing the outstanding command
454 * count for the peripherial. If there is a thread waiting for
455 * an opening, wake it up. If not, kick any queued I/O the
456 * peripherial may have.
457 *
458 * NOTE: Must be called at splbio().
459 */
460 void
461 scsipi_put_xs(xs)
462 struct scsipi_xfer *xs;
463 {
464 struct scsipi_periph *periph = xs->xs_periph;
465 int flags = xs->xs_control;
466
467 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n"));
468
469 TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
470 pool_put(&scsipi_xfer_pool, xs);
471
472 #ifdef DIAGNOSTIC
473 if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
474 periph->periph_active == 0) {
475 scsipi_printaddr(periph);
476 printf("recovery without a command to recovery for\n");
477 panic("scsipi_put_xs");
478 }
479 #endif
480
481 if (flags & XS_CTL_URGENT)
482 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
483 else
484 periph->periph_active--;
485 if (periph->periph_active == 0 &&
486 (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
487 periph->periph_flags &= ~PERIPH_WAITDRAIN;
488 wakeup(&periph->periph_active);
489 }
490
491 if (periph->periph_flags & PERIPH_WAITING) {
492 periph->periph_flags &= ~PERIPH_WAITING;
493 wakeup(periph);
494 } else {
495 if (periph->periph_switch->psw_start != NULL) {
496 SC_DEBUG(periph, SCSIPI_DB2,
497 ("calling private start()\n"));
498 (*periph->periph_switch->psw_start)(periph);
499 }
500 }
501 }
502
503 /*
504 * scsipi_channel_freeze:
505 *
506 * Freeze a channel's xfer queue.
507 */
508 void
509 scsipi_channel_freeze(chan, count)
510 struct scsipi_channel *chan;
511 int count;
512 {
513 int s;
514
515 s = splbio();
516 chan->chan_qfreeze += count;
517 splx(s);
518 }
519
520 /*
521 * scsipi_channel_thaw:
522 *
523 * Thaw a channel's xfer queue.
524 */
525 void
526 scsipi_channel_thaw(chan, count)
527 struct scsipi_channel *chan;
528 int count;
529 {
530 int s;
531
532 s = splbio();
533 chan->chan_qfreeze -= count;
534 splx(s);
535 }
536
537 /*
538 * scsipi_channel_timed_thaw:
539 *
540 * Thaw a channel after some time has expired.
541 */
542 void
543 scsipi_channel_timed_thaw(arg)
544 void *arg;
545 {
546 struct scsipi_channel *chan = arg;
547
548 scsipi_channel_thaw(chan, 1);
549
550 /*
551 * Kick the channel's queue here. Note, we're running in
552 * interrupt context (softclock), so the adapter driver
553 * had better not sleep.
554 */
555 scsipi_run_queue(chan);
556 }
557
558 /*
559 * scsipi_periph_freeze:
560 *
561 * Freeze a device's xfer queue.
562 */
563 void
564 scsipi_periph_freeze(periph, count)
565 struct scsipi_periph *periph;
566 int count;
567 {
568 int s;
569
570 s = splbio();
571 periph->periph_qfreeze += count;
572 splx(s);
573 }
574
575 /*
576 * scsipi_periph_thaw:
577 *
578 * Thaw a device's xfer queue.
579 */
580 void
581 scsipi_periph_thaw(periph, count)
582 struct scsipi_periph *periph;
583 int count;
584 {
585 int s;
586
587 s = splbio();
588 periph->periph_qfreeze -= count;
589 if (periph->periph_qfreeze == 0 &&
590 (periph->periph_flags & PERIPH_WAITING) != 0)
591 wakeup(periph);
592 splx(s);
593 }
594
595 /*
596 * scsipi_periph_timed_thaw:
597 *
598 * Thaw a device after some time has expired.
599 */
600 void
601 scsipi_periph_timed_thaw(arg)
602 void *arg;
603 {
604 struct scsipi_periph *periph = arg;
605
606 scsipi_periph_thaw(periph, 1);
607
608 /*
609 * Kick the channel's queue here. Note, we're running in
610 * interrupt context (softclock), so the adapter driver
611 * had better not sleep.
612 */
613 scsipi_run_queue(periph->periph_channel);
614 }
615
616 /*
617 * scsipi_wait_drain:
618 *
619 * Wait for a periph's pending xfers to drain.
620 */
621 void
622 scsipi_wait_drain(periph)
623 struct scsipi_periph *periph;
624 {
625 int s;
626
627 s = splbio();
628 while (periph->periph_active != 0) {
629 periph->periph_flags |= PERIPH_WAITDRAIN;
630 (void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0);
631 }
632 splx(s);
633 }
634
635 /*
636 * scsipi_kill_pending:
637 *
638 * Kill off all pending xfers for a periph.
639 *
640 * NOTE: Must be called at splbio().
641 */
642 void
643 scsipi_kill_pending(periph)
644 struct scsipi_periph *periph;
645 {
646
647 (*periph->periph_channel->chan_bustype->bustype_kill_pending)(periph);
648 #ifdef DIAGNOSTIC
649 if (TAILQ_FIRST(&periph->periph_xferq) != NULL)
650 panic("scsipi_kill_pending");
651 #endif
652 }
653
654 /*
655 * scsipi_interpret_sense:
656 *
657 * Look at the returned sense and act on the error, determining
658 * the unix error number to pass back. (0 = report no error)
659 *
660 * NOTE: If we return ERESTART, we are expected to haved
661 * thawed the device!
662 *
663 * THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
664 */
665 int
666 scsipi_interpret_sense(xs)
667 struct scsipi_xfer *xs;
668 {
669 struct scsipi_sense_data *sense;
670 struct scsipi_periph *periph = xs->xs_periph;
671 u_int8_t key;
672 u_int32_t info;
673 int error;
674 #ifndef SCSIVERBOSE
675 static char *error_mes[] = {
676 "soft error (corrected)",
677 "not ready", "medium error",
678 "non-media hardware failure", "illegal request",
679 "unit attention", "readonly device",
680 "no data found", "vendor unique",
681 "copy aborted", "command aborted",
682 "search returned equal", "volume overflow",
683 "verify miscompare", "unknown error key"
684 };
685 #endif
686
687 sense = &xs->sense.scsi_sense;
688 #ifdef SCSIPI_DEBUG
689 if (periph->periph_flags & SCSIPI_DB1) {
690 int count;
691 scsipi_printaddr(periph);
692 printf(" sense debug information:\n");
693 printf("\tcode 0x%x valid 0x%x\n",
694 sense->error_code & SSD_ERRCODE,
695 sense->error_code & SSD_ERRCODE_VALID ? 1 : 0);
696 printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
697 sense->segment,
698 sense->flags & SSD_KEY,
699 sense->flags & SSD_ILI ? 1 : 0,
700 sense->flags & SSD_EOM ? 1 : 0,
701 sense->flags & SSD_FILEMARK ? 1 : 0);
702 printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d "
703 "extra bytes\n",
704 sense->info[0],
705 sense->info[1],
706 sense->info[2],
707 sense->info[3],
708 sense->extra_len);
709 printf("\textra: ");
710 for (count = 0; count < ADD_BYTES_LIM(sense); count++)
711 printf("0x%x ", sense->cmd_spec_info[count]);
712 printf("\n");
713 }
714 #endif
715
716 /*
717 * If the periph has it's own error handler, call it first.
718 * If it returns a legit error value, return that, otherwise
719 * it wants us to continue with normal error processing.
720 */
721 if (periph->periph_switch->psw_error != NULL) {
722 SC_DEBUG(periph, SCSIPI_DB2,
723 ("calling private err_handler()\n"));
724 error = (*periph->periph_switch->psw_error)(xs);
725 if (error != EJUSTRETURN)
726 return (error);
727 }
728 /* otherwise use the default */
729 switch (sense->error_code & SSD_ERRCODE) {
730 /*
731 * If it's code 70, use the extended stuff and
732 * interpret the key
733 */
734 case 0x71: /* delayed error */
735 scsipi_printaddr(periph);
736 key = sense->flags & SSD_KEY;
737 printf(" DEFERRED ERROR, key = 0x%x\n", key);
738 /* FALLTHROUGH */
739 case 0x70:
740 if ((sense->error_code & SSD_ERRCODE_VALID) != 0)
741 info = _4btol(sense->info);
742 else
743 info = 0;
744 key = sense->flags & SSD_KEY;
745
746 switch (key) {
747 case SKEY_NO_SENSE:
748 case SKEY_RECOVERED_ERROR:
749 if (xs->resid == xs->datalen && xs->datalen) {
750 /*
751 * Why is this here?
752 */
753 xs->resid = 0; /* not short read */
754 }
755 case SKEY_EQUAL:
756 error = 0;
757 break;
758 case SKEY_NOT_READY:
759 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
760 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
761 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
762 return (0);
763 if (sense->add_sense_code == 0x3A &&
764 sense->add_sense_code_qual == 0x00)
765 error = ENODEV; /* Medium not present */
766 else
767 error = EIO;
768 if ((xs->xs_control & XS_CTL_SILENT) != 0)
769 return (error);
770 break;
771 case SKEY_ILLEGAL_REQUEST:
772 if ((xs->xs_control &
773 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
774 return (0);
775 /*
776 * Handle the case where a device reports
777 * Logical Unit Not Supported during discovery.
778 */
779 if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
780 sense->add_sense_code == 0x25 &&
781 sense->add_sense_code_qual == 0x00)
782 return (EINVAL);
783 if ((xs->xs_control & XS_CTL_SILENT) != 0)
784 return (EIO);
785 error = EINVAL;
786 break;
787 case SKEY_UNIT_ATTENTION:
788 if (sense->add_sense_code == 0x29 &&
789 sense->add_sense_code_qual == 0x00) {
790 /* device or bus reset */
791 return (ERESTART);
792 }
793 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
794 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
795 if ((xs->xs_control &
796 XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
797 /* XXX Should reupload any transient state. */
798 (periph->periph_flags &
799 PERIPH_REMOVABLE) == 0) {
800 return (ERESTART);
801 }
802 if ((xs->xs_control & XS_CTL_SILENT) != 0)
803 return (EIO);
804 error = EIO;
805 break;
806 case SKEY_WRITE_PROTECT:
807 error = EROFS;
808 break;
809 case SKEY_BLANK_CHECK:
810 error = 0;
811 break;
812 case SKEY_ABORTED_COMMAND:
813 error = ERESTART;
814 break;
815 case SKEY_VOLUME_OVERFLOW:
816 error = ENOSPC;
817 break;
818 default:
819 error = EIO;
820 break;
821 }
822
823 #ifdef SCSIVERBOSE
824 if ((xs->xs_control & XS_CTL_SILENT) == 0)
825 scsipi_print_sense(xs, 0);
826 #else
827 if (key) {
828 scsipi_printaddr(periph);
829 printf("%s", error_mes[key - 1]);
830 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
831 switch (key) {
832 case SKEY_NOT_READY:
833 case SKEY_ILLEGAL_REQUEST:
834 case SKEY_UNIT_ATTENTION:
835 case SKEY_WRITE_PROTECT:
836 break;
837 case SKEY_BLANK_CHECK:
838 printf(", requested size: %d (decimal)",
839 info);
840 break;
841 case SKEY_ABORTED_COMMAND:
842 if (xs->xs_retries)
843 printf(", retrying");
844 printf(", cmd 0x%x, info 0x%x",
845 xs->cmd->opcode, info);
846 break;
847 default:
848 printf(", info = %d (decimal)", info);
849 }
850 }
851 if (sense->extra_len != 0) {
852 int n;
853 printf(", data =");
854 for (n = 0; n < sense->extra_len; n++)
855 printf(" %02x",
856 sense->cmd_spec_info[n]);
857 }
858 printf("\n");
859 }
860 #endif
861 return (error);
862
863 /*
864 * Not code 70, just report it
865 */
866 default:
867 scsipi_printaddr(periph);
868 printf("Sense Error Code 0x%x",
869 sense->error_code & SSD_ERRCODE);
870 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
871 struct scsipi_sense_data_unextended *usense =
872 (struct scsipi_sense_data_unextended *)sense;
873 printf(" at block no. %d (decimal)",
874 _3btol(usense->block));
875 }
876 printf("\n");
877 return (EIO);
878 }
879 }
880
881 /*
882 * scsipi_size:
883 *
884 * Find out from the device what its capacity is.
885 */
886 u_long
887 scsipi_size(periph, flags)
888 struct scsipi_periph *periph;
889 int flags;
890 {
891 struct scsipi_read_cap_data rdcap;
892 struct scsipi_read_capacity scsipi_cmd;
893
894 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
895 scsipi_cmd.opcode = READ_CAPACITY;
896
897 /*
898 * If the command works, interpret the result as a 4 byte
899 * number of blocks
900 */
901 if (scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
902 sizeof(scsipi_cmd), (u_char *)&rdcap, sizeof(rdcap),
903 2, 20000, NULL, flags | XS_CTL_DATA_IN) != 0) {
904 scsipi_printaddr(periph);
905 printf("could not get size\n");
906 return (0);
907 }
908
909 return (_4btol(rdcap.addr) + 1);
910 }
911
912 /*
913 * scsipi_test_unit_ready:
914 *
915 * Issue a `test unit ready' request.
916 */
917 int
918 scsipi_test_unit_ready(periph, flags)
919 struct scsipi_periph *periph;
920 int flags;
921 {
922 struct scsipi_test_unit_ready scsipi_cmd;
923
924 /* some ATAPI drives don't support TEST_UNIT_READY. Sigh */
925 if (periph->periph_quirks & PQUIRK_NOTUR)
926 return (0);
927
928 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
929 scsipi_cmd.opcode = TEST_UNIT_READY;
930
931 return (scsipi_command(periph,
932 (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd),
933 0, 0, 2, 10000, NULL, flags));
934 }
935
936 /*
937 * scsipi_inquire:
938 *
939 * Ask the device about itself.
940 */
941 int
942 scsipi_inquire(periph, inqbuf, flags)
943 struct scsipi_periph *periph;
944 struct scsipi_inquiry_data *inqbuf;
945 int flags;
946 {
947 struct scsipi_inquiry scsipi_cmd;
948
949 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
950 scsipi_cmd.opcode = INQUIRY;
951 scsipi_cmd.length = sizeof(struct scsipi_inquiry_data);
952
953 return (scsipi_command(periph,
954 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
955 (u_char *) inqbuf, sizeof(struct scsipi_inquiry_data),
956 2, 10000, NULL, XS_CTL_DATA_IN | flags));
957 }
958
959 /*
960 * scsipi_prevent:
961 *
962 * Prevent or allow the user to remove the media
963 */
964 int
965 scsipi_prevent(periph, type, flags)
966 struct scsipi_periph *periph;
967 int type, flags;
968 {
969 struct scsipi_prevent scsipi_cmd;
970
971 if (periph->periph_quirks & PQUIRK_NODOORLOCK)
972 return (0);
973
974 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
975 scsipi_cmd.opcode = PREVENT_ALLOW;
976 scsipi_cmd.how = type;
977
978 return (scsipi_command(periph,
979 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
980 0, 0, 2, 5000, NULL, flags));
981 }
982
983 /*
984 * scsipi_start:
985 *
986 * Send a START UNIT.
987 */
988 int
989 scsipi_start(periph, type, flags)
990 struct scsipi_periph *periph;
991 int type, flags;
992 {
993 struct scsipi_start_stop scsipi_cmd;
994
995 if (periph->periph_quirks & PQUIRK_NOSTARTUNIT)
996 return 0;
997
998 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
999 scsipi_cmd.opcode = START_STOP;
1000 scsipi_cmd.byte2 = 0x00;
1001 scsipi_cmd.how = type;
1002
1003 return (scsipi_command(periph,
1004 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1005 0, 0, 2, (type & SSS_START) ? 60000 : 10000, NULL, flags));
1006 }
1007
1008 /*
1009 * scsipi_done:
1010 *
1011 * This routine is called by an adapter's interrupt handler when
1012 * an xfer is completed.
1013 */
1014 void
1015 scsipi_done(xs)
1016 struct scsipi_xfer *xs;
1017 {
1018 struct scsipi_periph *periph = xs->xs_periph;
1019 struct scsipi_channel *chan = periph->periph_channel;
1020 int s, freezecnt;
1021
1022 SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n"));
1023 #ifdef SCSIPI_DEBUG
1024 if (periph->periph_dbflags & SCSIPI_DB1)
1025 show_scsipi_cmd(xs);
1026 #endif
1027
1028 s = splbio();
1029 /*
1030 * The resource this command was using is now free.
1031 */
1032 scsipi_put_resource(chan);
1033
1034 /*
1035 * If the command was tagged, free the tag.
1036 */
1037 if (XS_CTL_TAGTYPE(xs) != 0)
1038 scsipi_put_tag(xs);
1039
1040 /* Mark the command as `done'. */
1041 xs->xs_status |= XS_STS_DONE;
1042
1043 /*
1044 * If it's a user level request, bypass all usual completion
1045 * processing, let the user work it out.. We take reponsibility
1046 * for freeing the xs (and restarting the device's queue) when
1047 * the user returns.
1048 */
1049 if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
1050 splx(s);
1051 SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n"));
1052 scsipi_user_done(xs);
1053 SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n "));
1054 goto out;
1055 }
1056
1057 #ifdef DIAGNOSTIC
1058 if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
1059 (XS_CTL_ASYNC|XS_CTL_POLL))
1060 panic("scsipi_done: ASYNC and POLL");
1061 #endif
1062
1063 /*
1064 * If the xfer had an error of any sort, freeze the
1065 * periph's queue. Freeze it again if we were requested
1066 * to do so in the xfer.
1067 */
1068 freezecnt = 0;
1069 if (xs->error != XS_NOERROR)
1070 freezecnt++;
1071 if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
1072 freezecnt++;
1073 if (freezecnt != 0)
1074 scsipi_periph_freeze(periph, freezecnt);
1075
1076 /*
1077 * If this was an xfer that was not to complete asynchrnously,
1078 * let the requesting thread perform error checking/handling
1079 * in its context.
1080 */
1081 if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
1082 splx(s);
1083 /*
1084 * If it's a polling job, just return, to unwind the
1085 * call graph. We don't need to restart the queue,
1086 * because pollings jobs are treated specially, and
1087 * are really only used during crash dumps anyway
1088 * (XXX or during boot-time autconfiguration of
1089 * ATAPI devices).
1090 */
1091 if (xs->xs_control & XS_CTL_POLL)
1092 return;
1093 wakeup(xs);
1094 goto out;
1095 }
1096
1097 /*
1098 * Catch the extremely common case of I/O completing
1099 * without error; no use in taking a context switch
1100 * if we can handle it in interrupt context.
1101 */
1102 if (xs->error == XS_NOERROR) {
1103 splx(s);
1104 (void) scsipi_complete(xs);
1105 goto out;
1106 }
1107
1108 /*
1109 * There is an error on this xfer. Put it on the channel's
1110 * completion queue, and wake up the completion thread.
1111 */
1112 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
1113 splx(s);
1114 wakeup(&chan->chan_complete);
1115
1116 out:
1117 /*
1118 * If there are more xfers on the channel's queue, attempt to
1119 * run them.
1120 */
1121 scsipi_run_queue(chan);
1122 }
1123
1124 /*
1125 * scsipi_complete:
1126 *
1127 * Completion of a scsipi_xfer. This is the guts of scsipi_done().
1128 *
1129 * NOTE: This routine MUST be called with valid thread context
1130 * except for the case where the following two conditions are
1131 * true:
1132 *
1133 * xs->error == XS_NOERROR
1134 * XS_CTL_ASYNC is set in xs->xs_control
1135 *
1136 * The semantics of this routine can be tricky, so here is an
1137 * explanation:
1138 *
1139 * 0 Xfer completed successfully.
1140 *
1141 * ERESTART Xfer had an error, but was restarted.
1142 *
1143 * anything else Xfer had an error, return value is Unix
1144 * errno.
1145 *
1146 * If the return value is anything but ERESTART:
1147 *
1148 * - If XS_CTL_ASYNC is set, `xs' has been freed back to
1149 * the pool.
1150 * - If there is a buf associated with the xfer,
1151 * it has been biodone()'d.
1152 */
1153 int
1154 scsipi_complete(xs)
1155 struct scsipi_xfer *xs;
1156 {
1157 struct scsipi_periph *periph = xs->xs_periph;
1158 struct scsipi_channel *chan = periph->periph_channel;
1159 struct buf *bp;
1160 int error, s;
1161
1162 #ifdef DIAGNOSTIC
1163 if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
1164 panic("scsipi_complete: XS_CTL_ASYNC but no buf");
1165 #endif
1166
1167 switch (xs->error) {
1168 case XS_NOERROR:
1169 error = 0;
1170 break;
1171
1172 case XS_SENSE:
1173 case XS_SHORTSENSE:
1174 error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
1175 break;
1176
1177 case XS_RESOURCE_SHORTAGE:
1178 /*
1179 * XXX Should freeze channel's queue.
1180 */
1181 scsipi_printaddr(periph);
1182 printf("adapter resource shortage\n");
1183 /* FALLTHROUGH */
1184
1185 case XS_BUSY:
1186 if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
1187 struct scsipi_max_openings mo;
1188
1189 /*
1190 * We set the openings to active - 1, assuming that
1191 * the command that got us here is the first one that
1192 * can't fit into the device's queue. If that's not
1193 * the case, I guess we'll find out soon enough.
1194 */
1195 mo.mo_target = periph->periph_target;
1196 mo.mo_lun = periph->periph_lun;
1197 mo.mo_openings = periph->periph_active - 1;
1198 #ifdef DIAGNOSTIC
1199 if (mo.mo_openings < 0) {
1200 scsipi_printaddr(periph);
1201 printf("QUEUE FULL resulted in < 0 openings\n");
1202 panic("scsipi_done");
1203 }
1204 #endif
1205 if (mo.mo_openings == 0) {
1206 scsipi_printaddr(periph);
1207 printf("QUEUE FULL resulted in 0 openings\n");
1208 mo.mo_openings = 1;
1209 }
1210 scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
1211 error = ERESTART;
1212 } else if (xs->xs_retries != 0) {
1213 xs->xs_retries--;
1214 /*
1215 * Wait one second, and try again.
1216 */
1217 if (xs->xs_control & XS_CTL_POLL)
1218 delay(1000000);
1219 else {
1220 scsipi_periph_freeze(periph, 1);
1221 timeout(scsipi_periph_timed_thaw, periph, hz);
1222 }
1223 error = ERESTART;
1224 } else
1225 error = EBUSY;
1226 break;
1227
1228 case XS_TIMEOUT:
1229 if (xs->xs_retries != 0) {
1230 xs->xs_retries--;
1231 error = ERESTART;
1232 } else
1233 error = EIO;
1234 break;
1235
1236 case XS_SELTIMEOUT:
1237 /* XXX Disable device? */
1238 error = EIO;
1239 break;
1240
1241 case XS_RESET:
1242 if (xs->xs_retries != 0) {
1243 xs->xs_retries--;
1244 error = ERESTART;
1245 } else
1246 error = EIO;
1247 break;
1248
1249 default:
1250 scsipi_printaddr(periph);
1251 printf("invalid return code from adapter: %d\n", xs->error);
1252 error = EIO;
1253 break;
1254 }
1255
1256 s = splbio();
1257 if (error == ERESTART) {
1258 /*
1259 * If we get here, the periph has been thawed and frozen
1260 * again if we had to issue recovery commands. Alternatively,
1261 * it may have been frozen again and in a timed thaw. In
1262 * any case, we thaw the periph once we re-enqueue the
1263 * command. Once the periph is fully thawed, it will begin
1264 * operation again.
1265 */
1266 xs->error = XS_NOERROR;
1267 xs->status = SCSI_OK;
1268 xs->xs_status &= ~XS_STS_DONE;
1269 xs->xs_requeuecnt++;
1270 error = scsipi_enqueue(xs);
1271 if (error == 0) {
1272 scsipi_periph_thaw(periph, 1);
1273 splx(s);
1274 return (ERESTART);
1275 }
1276 }
1277
1278 /*
1279 * scsipi_done() freezes the queue if not XS_NOERROR.
1280 * Thaw it here.
1281 */
1282 if (xs->error != XS_NOERROR)
1283 scsipi_periph_thaw(periph, 1);
1284
1285 if ((bp = xs->bp) != NULL) {
1286 if (error) {
1287 bp->b_error = error;
1288 bp->b_flags |= B_ERROR;
1289 bp->b_resid = bp->b_bcount;
1290 } else {
1291 bp->b_error = 0;
1292 bp->b_resid = xs->resid;
1293 }
1294 biodone(bp);
1295 }
1296
1297 if (xs->xs_control & XS_CTL_ASYNC)
1298 scsipi_put_xs(xs);
1299 splx(s);
1300
1301 return (error);
1302 }
1303
1304 /*
1305 * scsipi_enqueue:
1306 *
1307 * Enqueue an xfer on a channel.
1308 */
1309 int
1310 scsipi_enqueue(xs)
1311 struct scsipi_xfer *xs;
1312 {
1313 struct scsipi_channel *chan = xs->xs_periph->periph_channel;
1314 struct scsipi_xfer *qxs;
1315 int s;
1316
1317 s = splbio();
1318
1319 /*
1320 * If the xfer is to be polled, and there are already jobs on
1321 * the queue, we can't proceed.
1322 */
1323 if ((xs->xs_control & XS_CTL_POLL) != 0 &&
1324 TAILQ_FIRST(&chan->chan_queue) != NULL) {
1325 splx(s);
1326 xs->error = XS_DRIVER_STUFFUP;
1327 return (EAGAIN);
1328 }
1329
1330 /*
1331 * If we have an URGENT xfer, it's an error recovery command
1332 * and it should just go on the head of the channel's queue.
1333 */
1334 if (xs->xs_control & XS_CTL_URGENT) {
1335 TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
1336 goto out;
1337 }
1338
1339 /*
1340 * If this xfer has already been on the queue before, we
1341 * need to reinsert it in the correct order. That order is:
1342 *
1343 * Immediately before the first xfer for this periph
1344 * with a requeuecnt less than xs->xs_requeuecnt.
1345 *
1346 * Failing that, at the end of the queue. (We'll end up
1347 * there naturally.)
1348 */
1349 if (xs->xs_requeuecnt != 0) {
1350 for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
1351 qxs = TAILQ_NEXT(qxs, channel_q)) {
1352 if (qxs->xs_periph == xs->xs_periph &&
1353 qxs->xs_requeuecnt < xs->xs_requeuecnt)
1354 break;
1355 }
1356 if (qxs != NULL) {
1357 TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
1358 channel_q);
1359 goto out;
1360 }
1361 }
1362 TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
1363 out:
1364 if (xs->xs_control & XS_CTL_THAW_PERIPH)
1365 scsipi_periph_thaw(xs->xs_periph, 1);
1366 splx(s);
1367 return (0);
1368 }
1369
1370 /*
1371 * scsipi_run_queue:
1372 *
1373 * Start as many xfers as possible running on the channel.
1374 */
1375 void
1376 scsipi_run_queue(chan)
1377 struct scsipi_channel *chan;
1378 {
1379 struct scsipi_xfer *xs;
1380 struct scsipi_periph *periph;
1381 int s;
1382
1383 for (;;) {
1384 s = splbio();
1385
1386 /*
1387 * If the channel is frozen, we can't do any work right
1388 * now.
1389 */
1390 if (chan->chan_qfreeze != 0) {
1391 splx(s);
1392 return;
1393 }
1394
1395 /*
1396 * Look for work to do, and make sure we can do it.
1397 */
1398 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
1399 xs = TAILQ_NEXT(xs, channel_q)) {
1400 periph = xs->xs_periph;
1401
1402 if ((periph->periph_active > periph->periph_openings) || periph->periph_qfreeze != 0)
1403 continue;
1404
1405 if ((periph->periph_flags & PERIPH_RECOVERING) != 0 &&
1406 (xs->xs_control & XS_CTL_URGENT) == 0)
1407 continue;
1408
1409 /*
1410 * We can issue this xfer!
1411 */
1412 goto got_one;
1413 }
1414
1415 /*
1416 * Can't find any work to do right now.
1417 */
1418 splx(s);
1419 return;
1420
1421 got_one:
1422 /*
1423 * Have an xfer to run. Allocate a resource from
1424 * the adapter to run it. If we can't allocate that
1425 * resource, we don't dequeue the xfer.
1426 */
1427 if (scsipi_get_resource(chan) == 0) {
1428 /*
1429 * Adapter is out of resources. If the adapter
1430 * supports it, attempt to grow them.
1431 */
1432 if (scsipi_grow_resources(chan) == 0) {
1433 /*
1434 * Wasn't able to grow resources,
1435 * nothing more we can do.
1436 */
1437 if (xs->xs_control & XS_CTL_POLL) {
1438 scsipi_printaddr(xs->xs_periph);
1439 printf("polling command but no "
1440 "adapter resources");
1441 /* We'll panic shortly... */
1442 }
1443 splx(s);
1444 return;
1445 }
1446 /*
1447 * scsipi_grow_resources() allocated the resource
1448 * for us.
1449 */
1450 }
1451
1452 /*
1453 * We have a resource to run this xfer, do it!
1454 */
1455 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
1456
1457 /*
1458 * If the command is to be tagged, allocate a tag ID
1459 * for it.
1460 */
1461 if (XS_CTL_TAGTYPE(xs) != 0)
1462 scsipi_get_tag(xs);
1463 splx(s);
1464
1465 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
1466 }
1467 #ifdef DIAGNOSTIC
1468 panic("scsipi_run_queue: impossible");
1469 #endif
1470 }
1471
1472 /*
1473 * scsipi_execute_xs:
1474 *
1475 * Begin execution of an xfer, waiting for it to complete, if necessary.
1476 */
1477 int
1478 scsipi_execute_xs(xs)
1479 struct scsipi_xfer *xs;
1480 {
1481 struct scsipi_periph *periph = xs->xs_periph;
1482 struct scsipi_channel *chan = periph->periph_channel;
1483 int async, poll, retries, error, s;
1484
1485 xs->xs_status &= ~XS_STS_DONE;
1486 xs->error = XS_NOERROR;
1487 xs->resid = xs->datalen;
1488 xs->status = SCSI_OK;
1489
1490 #ifdef SCSIPI_DEBUG
1491 if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) {
1492 printf("scsipi_execute_xs: ");
1493 show_scsipi_xs(xs);
1494 printf("\n");
1495 }
1496 #endif
1497
1498 /*
1499 * Deal with command tagging:
1500 *
1501 * - If the device's current operating mode doesn't
1502 * include tagged queueing, clear the tag mask.
1503 *
1504 * - If the device's current operating mode *does*
1505 * include tagged queueing, set the tag_type in
1506 * the xfer to the appropriate byte for the tag
1507 * message.
1508 */
1509 if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0) {
1510 xs->xs_control &= ~XS_CTL_TAGMASK;
1511 xs->xs_tag_type = 0;
1512 } else {
1513 /*
1514 * If the request doesn't specify a tag, give Head
1515 * tags to URGENT operations and Ordered tags to
1516 * everything else.
1517 */
1518 if (XS_CTL_TAGTYPE(xs) == 0) {
1519 if (xs->xs_control & XS_CTL_URGENT)
1520 xs->xs_control |= XS_CTL_HEAD_TAG;
1521 else
1522 xs->xs_control |= XS_CTL_ORDERED_TAG;
1523 }
1524
1525 switch (XS_CTL_TAGTYPE(xs)) {
1526 case XS_CTL_ORDERED_TAG:
1527 xs->xs_tag_type = MSG_ORDERED_Q_TAG;
1528 break;
1529
1530 case XS_CTL_SIMPLE_TAG:
1531 xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
1532 break;
1533
1534 case XS_CTL_HEAD_TAG:
1535 xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
1536 break;
1537
1538 default:
1539 scsipi_printaddr(periph);
1540 printf("invalid tag mask 0x%08x\n",
1541 XS_CTL_TAGTYPE(xs));
1542 panic("scsipi_execute_xs");
1543 }
1544 }
1545
1546 /*
1547 * If we don't yet have a completion thread, or we are to poll for
1548 * completion, clear the ASYNC flag.
1549 */
1550 if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
1551 xs->xs_control &= ~XS_CTL_ASYNC;
1552
1553 async = (xs->xs_control & XS_CTL_ASYNC);
1554 poll = (xs->xs_control & XS_CTL_POLL);
1555 retries = xs->xs_retries; /* for polling commands */
1556
1557 #ifdef DIAGNOSTIC
1558 if (async != 0 && xs->bp == NULL)
1559 panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
1560 #endif
1561
1562 /*
1563 * Enqueue the transfer. If we're not polling for completion, this
1564 * should ALWAYS return `no error'.
1565 */
1566 try_again:
1567 error = scsipi_enqueue(xs);
1568 if (error) {
1569 if (poll == 0) {
1570 scsipi_printaddr(periph);
1571 printf("not polling, but enqueue failed with %d\n",
1572 error);
1573 panic("scsipi_execute_xs");
1574 }
1575
1576 scsipi_printaddr(periph);
1577 printf("failed to enqueue polling command");
1578 if (retries != 0) {
1579 printf(", retrying...\n");
1580 delay(1000000);
1581 retries--;
1582 goto try_again;
1583 }
1584 printf("\n");
1585 goto free_xs;
1586 }
1587
1588 restarted:
1589 scsipi_run_queue(chan);
1590
1591 /*
1592 * The xfer is enqueued, and possibly running. If it's to be
1593 * completed asynchronously, just return now.
1594 */
1595 if (async)
1596 return (EJUSTRETURN);
1597
1598 /*
1599 * Not an asynchronous command; wait for it to complete.
1600 */
1601 while ((xs->xs_status & XS_STS_DONE) == 0) {
1602 if (poll) {
1603 scsipi_printaddr(periph);
1604 printf("polling command not done\n");
1605 panic("scsipi_execute_xs");
1606 }
1607 (void) tsleep(xs, PRIBIO, "xscmd", 0);
1608 }
1609
1610 /*
1611 * Command is complete. scsipi_done() has awakened us to perform
1612 * the error handling.
1613 */
1614 error = scsipi_complete(xs);
1615 if (error == ERESTART)
1616 goto restarted;
1617
1618 /*
1619 * Command completed successfully or fatal error occurred. Fall
1620 * into....
1621 */
1622 free_xs:
1623 s = splbio();
1624 scsipi_put_xs(xs);
1625 splx(s);
1626
1627 /*
1628 * Kick the queue, keep it running in case it stopped for some
1629 * reason.
1630 */
1631 scsipi_run_queue(chan);
1632
1633 return (error);
1634 }
1635
1636 /*
1637 * scsipi_completion_thread:
1638 *
1639 * This is the completion thread. We wait for errors on
1640 * asynchronous xfers, and perform the error handling
1641 * function, restarting the command, if necessary.
1642 */
1643 void
1644 scsipi_completion_thread(arg)
1645 void *arg;
1646 {
1647 struct scsipi_channel *chan = arg;
1648 struct scsipi_xfer *xs;
1649 int s;
1650
1651 for (;;) {
1652 s = splbio();
1653 xs = TAILQ_FIRST(&chan->chan_complete);
1654 if (xs == NULL &&
1655 (chan->chan_flags & SCSIPI_CHAN_SHUTDOWN) == 0) {
1656 splx(s);
1657 (void) tsleep(&chan->chan_complete, PRIBIO,
1658 "sccomp", 0);
1659 continue;
1660 }
1661 if (chan->chan_flags & SCSIPI_CHAN_SHUTDOWN) {
1662 splx(s);
1663 break;
1664 }
1665 TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
1666 splx(s);
1667
1668 /*
1669 * Have an xfer with an error; process it.
1670 */
1671 (void) scsipi_complete(xs);
1672
1673 /*
1674 * Kick the queue; keep it running if it was stopped
1675 * for some reason.
1676 */
1677 scsipi_run_queue(chan);
1678 }
1679
1680 chan->chan_thread = NULL;
1681
1682 /* In case parent is waiting for us to exit. */
1683 wakeup(&chan->chan_thread);
1684
1685 kthread_exit(0);
1686 }
1687
1688 /*
1689 * scsipi_create_completion_thread:
1690 *
1691 * Callback to actually create the completion thread.
1692 */
1693 void
1694 scsipi_create_completion_thread(arg)
1695 void *arg;
1696 {
1697 struct scsipi_channel *chan = arg;
1698 struct scsipi_adapter *adapt = chan->chan_adapter;
1699
1700 if (kthread_create1(scsipi_completion_thread, chan,
1701 &chan->chan_thread, "%s:%d", adapt->adapt_dev->dv_xname,
1702 chan->chan_channel)) {
1703 printf("%s: unable to create completion thread for "
1704 "channel %d\n", adapt->adapt_dev->dv_xname,
1705 chan->chan_channel);
1706 panic("scsipi_create_completion_thread");
1707 }
1708 }
1709
1710 /*
1711 * scsipi_async_event:
1712 *
1713 * Handle an asynchronous event from an adapter.
1714 */
1715 void
1716 scsipi_async_event(chan, event, arg)
1717 struct scsipi_channel *chan;
1718 scsipi_async_event_t event;
1719 void *arg;
1720 {
1721 int s;
1722
1723 s = splbio();
1724 switch (event) {
1725 case ASYNC_EVENT_MAX_OPENINGS:
1726 scsipi_async_event_max_openings(chan,
1727 (struct scsipi_max_openings *)arg);
1728 break;
1729
1730 case ASYNC_EVENT_XFER_MODE:
1731 scsipi_async_event_xfer_mode(chan,
1732 (struct scsipi_xfer_mode *)arg);
1733 break;
1734 }
1735 splx(s);
1736 }
1737
1738 /*
1739 * scsipi_print_xfer_mode:
1740 *
1741 * Print a periph's capabilities.
1742 */
1743 void
1744 scsipi_print_xfer_mode(periph)
1745 struct scsipi_periph *periph;
1746 {
1747 int period, freq, speed, mbs;
1748
1749 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0)
1750 return;
1751
1752 printf("%s: ", periph->periph_dev->dv_xname);
1753 if (periph->periph_mode & PERIPH_CAP_SYNC) {
1754 period = scsipi_sync_factor_to_period(periph->periph_period);
1755 printf("Sync (%d.%dns offset %d)",
1756 period / 10, period % 10, periph->periph_offset);
1757 } else
1758 printf("Async");
1759
1760 if (periph->periph_mode & PERIPH_CAP_WIDE32)
1761 printf(", 32-bit");
1762 else if (periph->periph_mode & PERIPH_CAP_WIDE16)
1763 printf(", 16-bit");
1764 else
1765 printf(", 8-bit");
1766
1767 if (periph->periph_mode & PERIPH_CAP_SYNC) {
1768 freq = scsipi_sync_factor_to_freq(periph->periph_period);
1769 speed = freq;
1770 if (periph->periph_mode & PERIPH_CAP_WIDE32)
1771 speed *= 4;
1772 else if (periph->periph_mode & PERIPH_CAP_WIDE16)
1773 speed *= 2;
1774 mbs = speed / 1000;
1775 if (mbs > 0)
1776 printf(" (%d.%03dMB/s)", mbs, speed % 1000);
1777 else
1778 printf(" (%dKB/s)", speed % 1000);
1779 }
1780
1781 printf(" transfers");
1782
1783 if (periph->periph_mode & PERIPH_CAP_TQING)
1784 printf(", tagged queueing");
1785
1786 printf("\n");
1787 }
1788
1789 /*
1790 * scsipi_async_event_max_openings:
1791 *
1792 * Update the maximum number of outstanding commands a
1793 * device may have.
1794 */
1795 void
1796 scsipi_async_event_max_openings(chan, mo)
1797 struct scsipi_channel *chan;
1798 struct scsipi_max_openings *mo;
1799 {
1800 struct scsipi_periph *periph;
1801 int minlun, maxlun;
1802
1803 if (mo->mo_lun == -1) {
1804 /*
1805 * Wildcarded; apply it to all LUNs.
1806 */
1807 minlun = 0;
1808 maxlun = chan->chan_nluns - 1;
1809 } else
1810 minlun = maxlun = mo->mo_lun;
1811
1812 for (; minlun <= maxlun; minlun++) {
1813 periph = scsipi_lookup_periph(chan, mo->mo_target, minlun);
1814 if (periph == NULL)
1815 continue;
1816
1817 if (mo->mo_openings < periph->periph_openings)
1818 periph->periph_openings = mo->mo_openings;
1819 else if (mo->mo_openings > periph->periph_openings &&
1820 (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
1821 periph->periph_openings = mo->mo_openings;
1822 }
1823 }
1824
1825 /*
1826 * scsipi_async_event_xfer_mode:
1827 *
1828 * Update the xfer mode for all periphs sharing the
1829 * specified I_T Nexus.
1830 */
1831 void
1832 scsipi_async_event_xfer_mode(chan, xm)
1833 struct scsipi_channel *chan;
1834 struct scsipi_xfer_mode *xm;
1835 {
1836 struct scsipi_periph *periph;
1837 int lun, announce, mode, period, offset;
1838
1839 for (lun = 0; lun < chan->chan_nluns; lun++) {
1840 periph = scsipi_lookup_periph(chan, xm->xm_target, lun);
1841 if (periph == NULL)
1842 continue;
1843 announce = 0;
1844
1845 /*
1846 * Clamp the xfer mode down to this periph's capabilities.
1847 */
1848 mode = xm->xm_mode & periph->periph_cap;
1849 if (mode & PERIPH_CAP_SYNC) {
1850 period = xm->xm_period;
1851 offset = xm->xm_offset;
1852 } else {
1853 period = 0;
1854 offset = 0;
1855 }
1856
1857 /*
1858 * If we do not have a valid xfer mode yet, or the parameters
1859 * are different, announce them.
1860 */
1861 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0 ||
1862 periph->periph_mode != mode ||
1863 periph->periph_period != period ||
1864 periph->periph_offset != offset)
1865 announce = 1;
1866
1867 periph->periph_mode = mode;
1868 periph->periph_period = period;
1869 periph->periph_offset = offset;
1870 periph->periph_flags |= PERIPH_MODE_VALID;
1871
1872 if (announce)
1873 scsipi_print_xfer_mode(periph);
1874 }
1875 }
1876
1877 /*
1878 * scsipi_set_xfer_mode:
1879 *
1880 * Set the xfer mode for the specified I_T Nexus.
1881 */
1882 void
1883 scsipi_set_xfer_mode(chan, target, immed)
1884 struct scsipi_channel *chan;
1885 int target, immed;
1886 {
1887 struct scsipi_xfer_mode xm;
1888 struct scsipi_periph *itperiph;
1889 int lun, s;
1890
1891 /*
1892 * Go to the minimal xfer mode.
1893 */
1894 xm.xm_target = target;
1895 xm.xm_mode = 0;
1896 xm.xm_period = 0; /* ignored */
1897 xm.xm_offset = 0; /* ignored */
1898
1899 /*
1900 * Find the first LUN we know about on this I_T Nexus.
1901 */
1902 for (lun = 0; lun < chan->chan_nluns; lun++) {
1903 itperiph = scsipi_lookup_periph(chan, target, lun);
1904 if (itperiph != NULL)
1905 break;
1906 }
1907 if (itperiph != NULL)
1908 xm.xm_mode = itperiph->periph_cap;
1909
1910 /*
1911 * Now issue the request to the adapter.
1912 */
1913 s = splbio();
1914 scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm);
1915 splx(s);
1916
1917 /*
1918 * If we want this to happen immediately, issue a dummy command,
1919 * since most adapters can't really negotiate unless they're
1920 * executing a job.
1921 */
1922 if (immed != 0 && itperiph != NULL) {
1923 (void) scsipi_test_unit_ready(itperiph,
1924 XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
1925 XS_CTL_IGNORE_NOT_READY |
1926 XS_CTL_IGNORE_MEDIA_CHANGE);
1927 }
1928 }
1929
1930 /*
1931 * scsipi_adapter_addref:
1932 *
1933 * Add a reference to the adapter pointed to by the provided
1934 * link, enabling the adapter if necessary.
1935 */
1936 int
1937 scsipi_adapter_addref(adapt)
1938 struct scsipi_adapter *adapt;
1939 {
1940 int s, error = 0;
1941
1942 s = splbio();
1943 if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) {
1944 error = (*adapt->adapt_enable)(adapt->adapt_dev, 1);
1945 if (error)
1946 adapt->adapt_refcnt--;
1947 }
1948 splx(s);
1949 return (error);
1950 }
1951
1952 /*
1953 * scsipi_adapter_delref:
1954 *
1955 * Delete a reference to the adapter pointed to by the provided
1956 * link, disabling the adapter if possible.
1957 */
1958 void
1959 scsipi_adapter_delref(adapt)
1960 struct scsipi_adapter *adapt;
1961 {
1962 int s;
1963
1964 s = splbio();
1965 if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL)
1966 (void) (*adapt->adapt_enable)(adapt->adapt_dev, 0);
1967 splx(s);
1968 }
1969
1970 struct scsipi_syncparam {
1971 int ss_factor;
1972 int ss_period; /* ns * 10 */
1973 } scsipi_syncparams[] = {
1974 { 0x0a, 250 },
1975 { 0x0b, 303 },
1976 { 0x0c, 500 },
1977 };
1978 const int scsipi_nsyncparams =
1979 sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
1980
1981 int
1982 scsipi_sync_period_to_factor(period)
1983 int period; /* ns * 10 */
1984 {
1985 int i;
1986
1987 for (i = 0; i < scsipi_nsyncparams; i++) {
1988 if (period <= scsipi_syncparams[i].ss_period)
1989 return (scsipi_syncparams[i].ss_factor);
1990 }
1991
1992 return ((period / 10) / 4);
1993 }
1994
1995 int
1996 scsipi_sync_factor_to_period(factor)
1997 int factor;
1998 {
1999 int i;
2000
2001 for (i = 0; i < scsipi_nsyncparams; i++) {
2002 if (factor == scsipi_syncparams[i].ss_factor)
2003 return (scsipi_syncparams[i].ss_period);
2004 }
2005
2006 return ((factor * 4) * 10);
2007 }
2008
2009 int
2010 scsipi_sync_factor_to_freq(factor)
2011 int factor;
2012 {
2013 int i;
2014
2015 for (i = 0; i < scsipi_nsyncparams; i++) {
2016 if (factor == scsipi_syncparams[i].ss_factor)
2017 return (10000000 / scsipi_syncparams[i].ss_period);
2018 }
2019
2020 return (10000000 / ((factor * 4) * 10));
2021 }
2022
2023 #ifdef SCSIPI_DEBUG
2024 /*
2025 * Given a scsipi_xfer, dump the request, in all it's glory
2026 */
2027 void
2028 show_scsipi_xs(xs)
2029 struct scsipi_xfer *xs;
2030 {
2031
2032 printf("xs(%p): ", xs);
2033 printf("xs_control(0x%08x)", xs->xs_control);
2034 printf("xs_status(0x%08x)", xs->xs_status);
2035 printf("periph(%p)", xs->xs_periph);
2036 printf("retr(0x%x)", xs->xs_retries);
2037 printf("timo(0x%x)", xs->timeout);
2038 printf("cmd(%p)", xs->cmd);
2039 printf("len(0x%x)", xs->cmdlen);
2040 printf("data(%p)", xs->data);
2041 printf("len(0x%x)", xs->datalen);
2042 printf("res(0x%x)", xs->resid);
2043 printf("err(0x%x)", xs->error);
2044 printf("bp(%p)", xs->bp);
2045 show_scsipi_cmd(xs);
2046 }
2047
2048 void
2049 show_scsipi_cmd(xs)
2050 struct scsipi_xfer *xs;
2051 {
2052 u_char *b = (u_char *) xs->cmd;
2053 int i = 0;
2054
2055 scsipi_printaddr(xs->xs_periph);
2056 printf(" command: ");
2057
2058 if ((xs->xs_control & XS_CTL_RESET) == 0) {
2059 while (i < xs->cmdlen) {
2060 if (i)
2061 printf(",");
2062 printf("0x%x", b[i++]);
2063 }
2064 printf("-[%d bytes]\n", xs->datalen);
2065 if (xs->datalen)
2066 show_mem(xs->data, min(64, xs->datalen));
2067 } else
2068 printf("-RESET-\n");
2069 }
2070
2071 void
2072 show_mem(address, num)
2073 u_char *address;
2074 int num;
2075 {
2076 int x;
2077
2078 printf("------------------------------");
2079 for (x = 0; x < num; x++) {
2080 if ((x % 16) == 0)
2081 printf("\n%03d: ", x);
2082 printf("%02x ", *address++);
2083 }
2084 printf("\n------------------------------\n");
2085 }
2086 #endif /* SCSIPI_DEBUG */
2087