scsipi_base.c revision 1.26.2.8 1 /* $NetBSD: scsipi_base.c,v 1.26.2.8 2000/11/20 09:59:26 bouyer Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include "opt_scsi.h"
41
42 #include <sys/types.h>
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/buf.h>
47 #include <sys/uio.h>
48 #include <sys/malloc.h>
49 #include <sys/pool.h>
50 #include <sys/errno.h>
51 #include <sys/device.h>
52 #include <sys/proc.h>
53 #include <sys/kthread.h>
54
55 #include <dev/scsipi/scsipi_all.h>
56 #include <dev/scsipi/scsipi_disk.h>
57 #include <dev/scsipi/scsipiconf.h>
58 #include <dev/scsipi/scsipi_base.h>
59
60 #include <dev/scsipi/scsi_all.h>
61 #include <dev/scsipi/scsi_message.h>
62
63 int scsipi_complete __P((struct scsipi_xfer *));
64 int scsipi_enqueue __P((struct scsipi_xfer *));
65 void scsipi_run_queue __P((struct scsipi_channel *chan));
66
67 void scsipi_completion_thread __P((void *));
68
69 void scsipi_get_tag __P((struct scsipi_xfer *));
70 void scsipi_put_tag __P((struct scsipi_xfer *));
71
72 int scsipi_get_resource __P((struct scsipi_channel *));
73 void scsipi_put_resource __P((struct scsipi_channel *));
74 __inline int scsipi_grow_resources __P((struct scsipi_channel *));
75
76 void scsipi_async_event_max_openings __P((struct scsipi_channel *,
77 struct scsipi_max_openings *));
78 void scsipi_async_event_xfer_mode __P((struct scsipi_channel *,
79 struct scsipi_xfer_mode *));
80
81 struct pool scsipi_xfer_pool;
82
83 /*
84 * scsipi_init:
85 *
86 * Called when a scsibus or atapibus is attached to the system
87 * to initialize shared data structures.
88 */
89 void
90 scsipi_init()
91 {
92 static int scsipi_init_done;
93
94 if (scsipi_init_done)
95 return;
96 scsipi_init_done = 1;
97
98 /* Initialize the scsipi_xfer pool. */
99 pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
100 0, 0, "scxspl", 0, NULL, NULL, M_DEVBUF);
101 }
102
103 /*
104 * scsipi_channel_init:
105 *
106 * Initialize a scsipi_channel when it is attached.
107 */
108 void
109 scsipi_channel_init(chan)
110 struct scsipi_channel *chan;
111 {
112 size_t nbytes;
113 int i;
114
115 /* Initialize shared data. */
116 scsipi_init();
117
118 /* Initialize the queues. */
119 TAILQ_INIT(&chan->chan_queue);
120 TAILQ_INIT(&chan->chan_complete);
121
122 nbytes = chan->chan_ntargets * sizeof(struct scsipi_link **);
123 chan->chan_periphs = malloc(nbytes, M_DEVBUF, M_WAITOK);
124
125 nbytes = chan->chan_nluns * sizeof(struct scsipi_periph *);
126 for (i = 0; i < chan->chan_ntargets; i++) {
127 chan->chan_periphs[i] = malloc(nbytes, M_DEVBUF, M_WAITOK);
128 memset(chan->chan_periphs[i], 0, nbytes);
129 }
130
131 /*
132 * Create the asynchronous completion thread.
133 */
134 kthread_create(scsipi_create_completion_thread, chan);
135 }
136
137 /*
138 * scsipi_channel_shutdown:
139 *
140 * Shutdown a scsipi_channel.
141 */
142 void
143 scsipi_channel_shutdown(chan)
144 struct scsipi_channel *chan;
145 {
146
147 /*
148 * Shut down the completion thread.
149 */
150 chan->chan_flags |= SCSIPI_CHAN_SHUTDOWN;
151 wakeup(&chan->chan_complete);
152
153 /*
154 * Now wait for the thread to exit.
155 */
156 while (chan->chan_thread != NULL)
157 (void) tsleep(&chan->chan_thread, PRIBIO, "scshut", 0);
158 }
159
160 /*
161 * scsipi_insert_periph:
162 *
163 * Insert a periph into the channel.
164 */
165 void
166 scsipi_insert_periph(chan, periph)
167 struct scsipi_channel *chan;
168 struct scsipi_periph *periph;
169 {
170 int s;
171
172 s = splbio();
173 chan->chan_periphs[periph->periph_target][periph->periph_lun] = periph;
174 splx(s);
175 }
176
177 /*
178 * scsipi_remove_periph:
179 *
180 * Remove a periph from the channel.
181 */
182 void
183 scsipi_remove_periph(chan, periph)
184 struct scsipi_channel *chan;
185 struct scsipi_periph *periph;
186 {
187 int s;
188
189 s = splbio();
190 chan->chan_periphs[periph->periph_target][periph->periph_lun] = NULL;
191 splx(s);
192 }
193
194 /*
195 * scsipi_lookup_periph:
196 *
197 * Lookup a periph on the specified channel.
198 */
199 struct scsipi_periph *
200 scsipi_lookup_periph(chan, target, lun)
201 struct scsipi_channel *chan;
202 int target, lun;
203 {
204 struct scsipi_periph *periph;
205 int s;
206
207 if (target >= chan->chan_ntargets ||
208 lun >= chan->chan_nluns)
209 return (NULL);
210
211 s = splbio();
212 periph = chan->chan_periphs[target][lun];
213 splx(s);
214
215 return (periph);
216 }
217
218 /*
219 * scsipi_get_resource:
220 *
221 * Allocate a single xfer `resource' from the channel.
222 *
223 * NOTE: Must be called at splbio().
224 */
225 int
226 scsipi_get_resource(chan)
227 struct scsipi_channel *chan;
228 {
229 struct scsipi_adapter *adapt = chan->chan_adapter;
230
231 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
232 if (chan->chan_openings > 0) {
233 chan->chan_openings--;
234 return (1);
235 }
236 return (0);
237 }
238
239 if (adapt->adapt_openings > 0) {
240 adapt->adapt_openings--;
241 return (1);
242 }
243 return (0);
244 }
245
246 /*
247 * scsipi_grow_resources:
248 *
249 * Attempt to grow resources for a channel. If this succeeds,
250 * we allocate one for our caller.
251 *
252 * NOTE: Must be called at splbio().
253 */
254 __inline int
255 scsipi_grow_resources(chan)
256 struct scsipi_channel *chan;
257 {
258
259 if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
260 scsipi_adapter_request(chan, ADAPTER_REQ_GROW_RESOURCES, NULL);
261 return (scsipi_get_resource(chan));
262 }
263
264 return (0);
265 }
266
267 /*
268 * scsipi_put_resource:
269 *
270 * Free a single xfer `resource' to the channel.
271 *
272 * NOTE: Must be called at splbio().
273 */
274 void
275 scsipi_put_resource(chan)
276 struct scsipi_channel *chan;
277 {
278 struct scsipi_adapter *adapt = chan->chan_adapter;
279
280 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
281 chan->chan_openings++;
282 else
283 adapt->adapt_openings++;
284 }
285
286 /*
287 * scsipi_get_tag:
288 *
289 * Get a tag ID for the specified xfer.
290 *
291 * NOTE: Must be called at splbio().
292 */
293 void
294 scsipi_get_tag(xs)
295 struct scsipi_xfer *xs;
296 {
297 struct scsipi_periph *periph = xs->xs_periph;
298 int word, bit, tag;
299
300 for (word = 0; word < PERIPH_NTAGWORDS; word++) {
301 bit = ffs(periph->periph_freetags[word]);
302 if (bit != 0)
303 break;
304 }
305 #ifdef DIAGNOSTIC
306 if (word == PERIPH_NTAGWORDS) {
307 scsipi_printaddr(periph);
308 printf("no free tags\n");
309 panic("scsipi_get_tag");
310 }
311 #endif
312
313 bit -= 1;
314 periph->periph_freetags[word] &= ~(1 << bit);
315 tag = (word << 5) | bit;
316
317 /* XXX Should eventually disallow this completely. */
318 if (tag >= periph->periph_openings) {
319 scsipi_printaddr(periph);
320 printf("WARNING: tag %d greater than available openings %d\n",
321 tag, periph->periph_openings);
322 }
323
324 xs->xs_tag_id = tag;
325 }
326
327 /*
328 * scsipi_put_tag:
329 *
330 * Put the tag ID for the specified xfer back into the pool.
331 *
332 * NOTE: Must be called at splbio().
333 */
334 void
335 scsipi_put_tag(xs)
336 struct scsipi_xfer *xs;
337 {
338 struct scsipi_periph *periph = xs->xs_periph;
339 int word, bit;
340
341 word = xs->xs_tag_id >> 5;
342 bit = xs->xs_tag_id & 0x1f;
343
344 periph->periph_freetags[word] |= (1 << bit);
345 }
346
347 /*
348 * scsipi_get_xs:
349 *
350 * Allocate an xfer descriptor and associate it with the
351 * specified peripherial. If the peripherial has no more
352 * available command openings, we either block waiting for
353 * one to become available, or fail.
354 */
355 struct scsipi_xfer *
356 scsipi_get_xs(periph, flags)
357 struct scsipi_periph *periph;
358 int flags;
359 {
360 struct scsipi_xfer *xs;
361 int s;
362
363 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n"));
364
365 /*
366 * If we're cold, make sure we poll.
367 */
368 if (cold)
369 flags |= XS_CTL_NOSLEEP | XS_CTL_POLL;
370
371 #ifdef DIAGNOSTIC
372 /*
373 * URGENT commands can never be ASYNC.
374 */
375 if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
376 (XS_CTL_URGENT|XS_CTL_ASYNC)) {
377 scsipi_printaddr(periph);
378 printf("URGENT and ASYNC\n");
379 panic("scsipi_get_xs");
380 }
381 #endif
382
383 s = splbio();
384 /*
385 * Wait for a command opening to become available. Rules:
386 *
387 * - All xfers must wait for an available opening.
388 * Exception: URGENT xfers can proceed when
389 * active == openings, because we use the opening
390 * of the command we're recovering for.
391 *
392 * - If the periph is recovering, only URGENT xfers may
393 * proceed.
394 *
395 * - If the periph is currently executing a recovery
396 * command, URGENT commands must block, because only
397 * one recovery command can execute at a time.
398 */
399 for (;;) {
400 if (flags & XS_CTL_URGENT) {
401 if (periph->periph_active > periph->periph_openings ||
402 (periph->periph_flags &
403 PERIPH_RECOVERY_ACTIVE) != 0)
404 goto wait_for_opening;
405 periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
406 break;
407 }
408 if (periph->periph_active >= periph->periph_openings ||
409 (periph->periph_flags & PERIPH_RECOVERING) != 0)
410 goto wait_for_opening;
411 periph->periph_active++;
412 break;
413
414 wait_for_opening:
415 if (flags & XS_CTL_NOSLEEP) {
416 splx(s);
417 return (NULL);
418 }
419 SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n"));
420 periph->periph_flags |= PERIPH_WAITING;
421 (void) tsleep(periph, PRIBIO, "getxs", 0);
422 }
423 SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n"));
424 xs = pool_get(&scsipi_xfer_pool,
425 ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
426 if (xs == NULL) {
427 if (flags & XS_CTL_URGENT)
428 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
429 else
430 periph->periph_active--;
431 scsipi_printaddr(periph);
432 printf("unable to allocate %sscsipi_xfer\n",
433 (flags & XS_CTL_URGENT) ? "URGENT " : "");
434 }
435 splx(s);
436
437 SC_DEBUG(periph, SCSIPI_DB3, ("returning\n"));
438
439 if (xs != NULL) {
440 callout_init(&xs->xs_callout);
441 memset(xs, 0, sizeof(*xs));
442 xs->xs_periph = periph;
443 xs->xs_control = flags;
444 xs->xs_status = 0;
445 s = splbio();
446 TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
447 splx(s);
448 }
449 return (xs);
450 }
451
452 /*
453 * scsipi_put_xs:
454 *
455 * Release an xfer descriptor, decreasing the outstanding command
456 * count for the peripherial. If there is a thread waiting for
457 * an opening, wake it up. If not, kick any queued I/O the
458 * peripherial may have.
459 *
460 * NOTE: Must be called at splbio().
461 */
462 void
463 scsipi_put_xs(xs)
464 struct scsipi_xfer *xs;
465 {
466 struct scsipi_periph *periph = xs->xs_periph;
467 int flags = xs->xs_control;
468
469 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n"));
470
471 TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
472 pool_put(&scsipi_xfer_pool, xs);
473
474 #ifdef DIAGNOSTIC
475 if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
476 periph->periph_active == 0) {
477 scsipi_printaddr(periph);
478 printf("recovery without a command to recovery for\n");
479 panic("scsipi_put_xs");
480 }
481 #endif
482
483 if (flags & XS_CTL_URGENT)
484 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
485 else
486 periph->periph_active--;
487 if (periph->periph_active == 0 &&
488 (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
489 periph->periph_flags &= ~PERIPH_WAITDRAIN;
490 wakeup(&periph->periph_active);
491 }
492
493 if (periph->periph_flags & PERIPH_WAITING) {
494 periph->periph_flags &= ~PERIPH_WAITING;
495 wakeup(periph);
496 } else {
497 if (periph->periph_switch->psw_start != NULL) {
498 SC_DEBUG(periph, SCSIPI_DB2,
499 ("calling private start()\n"));
500 (*periph->periph_switch->psw_start)(periph);
501 }
502 }
503 }
504
505 /*
506 * scsipi_channel_freeze:
507 *
508 * Freeze a channel's xfer queue.
509 */
510 void
511 scsipi_channel_freeze(chan, count)
512 struct scsipi_channel *chan;
513 int count;
514 {
515 int s;
516
517 s = splbio();
518 chan->chan_qfreeze += count;
519 splx(s);
520 }
521
522 /*
523 * scsipi_channel_thaw:
524 *
525 * Thaw a channel's xfer queue.
526 */
527 void
528 scsipi_channel_thaw(chan, count)
529 struct scsipi_channel *chan;
530 int count;
531 {
532 int s;
533
534 s = splbio();
535 chan->chan_qfreeze -= count;
536 splx(s);
537 }
538
539 /*
540 * scsipi_channel_timed_thaw:
541 *
542 * Thaw a channel after some time has expired.
543 */
544 void
545 scsipi_channel_timed_thaw(arg)
546 void *arg;
547 {
548 struct scsipi_channel *chan = arg;
549
550 scsipi_channel_thaw(chan, 1);
551
552 /*
553 * Kick the channel's queue here. Note, we're running in
554 * interrupt context (softclock), so the adapter driver
555 * had better not sleep.
556 */
557 scsipi_run_queue(chan);
558 }
559
560 /*
561 * scsipi_periph_freeze:
562 *
563 * Freeze a device's xfer queue.
564 */
565 void
566 scsipi_periph_freeze(periph, count)
567 struct scsipi_periph *periph;
568 int count;
569 {
570 int s;
571
572 s = splbio();
573 periph->periph_qfreeze += count;
574 splx(s);
575 }
576
577 /*
578 * scsipi_periph_thaw:
579 *
580 * Thaw a device's xfer queue.
581 */
582 void
583 scsipi_periph_thaw(periph, count)
584 struct scsipi_periph *periph;
585 int count;
586 {
587 int s;
588
589 s = splbio();
590 periph->periph_qfreeze -= count;
591 if (periph->periph_qfreeze == 0 &&
592 (periph->periph_flags & PERIPH_WAITING) != 0)
593 wakeup(periph);
594 splx(s);
595 }
596
597 /*
598 * scsipi_periph_timed_thaw:
599 *
600 * Thaw a device after some time has expired.
601 */
602 void
603 scsipi_periph_timed_thaw(arg)
604 void *arg;
605 {
606 struct scsipi_periph *periph = arg;
607
608 callout_stop(&periph->periph_callout);
609 scsipi_periph_thaw(periph, 1);
610
611 /*
612 * Kick the channel's queue here. Note, we're running in
613 * interrupt context (softclock), so the adapter driver
614 * had better not sleep.
615 */
616 scsipi_run_queue(periph->periph_channel);
617 }
618
619 /*
620 * scsipi_wait_drain:
621 *
622 * Wait for a periph's pending xfers to drain.
623 */
624 void
625 scsipi_wait_drain(periph)
626 struct scsipi_periph *periph;
627 {
628 int s;
629
630 s = splbio();
631 while (periph->periph_active != 0) {
632 periph->periph_flags |= PERIPH_WAITDRAIN;
633 (void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0);
634 }
635 splx(s);
636 }
637
638 /*
639 * scsipi_kill_pending:
640 *
641 * Kill off all pending xfers for a periph.
642 *
643 * NOTE: Must be called at splbio().
644 */
645 void
646 scsipi_kill_pending(periph)
647 struct scsipi_periph *periph;
648 {
649
650 (*periph->periph_channel->chan_bustype->bustype_kill_pending)(periph);
651 #ifdef DIAGNOSTIC
652 if (TAILQ_FIRST(&periph->periph_xferq) != NULL)
653 panic("scsipi_kill_pending");
654 #endif
655 scsipi_wait_drain(periph);
656 }
657
658 /*
659 * scsipi_interpret_sense:
660 *
661 * Look at the returned sense and act on the error, determining
662 * the unix error number to pass back. (0 = report no error)
663 *
664 * NOTE: If we return ERESTART, we are expected to haved
665 * thawed the device!
666 *
667 * THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
668 */
669 int
670 scsipi_interpret_sense(xs)
671 struct scsipi_xfer *xs;
672 {
673 struct scsipi_sense_data *sense;
674 struct scsipi_periph *periph = xs->xs_periph;
675 u_int8_t key;
676 u_int32_t info;
677 int error;
678 #ifndef SCSIVERBOSE
679 static char *error_mes[] = {
680 "soft error (corrected)",
681 "not ready", "medium error",
682 "non-media hardware failure", "illegal request",
683 "unit attention", "readonly device",
684 "no data found", "vendor unique",
685 "copy aborted", "command aborted",
686 "search returned equal", "volume overflow",
687 "verify miscompare", "unknown error key"
688 };
689 #endif
690
691 sense = &xs->sense.scsi_sense;
692 #ifdef SCSIPI_DEBUG
693 if (periph->periph_flags & SCSIPI_DB1) {
694 int count;
695 scsipi_printaddr(periph);
696 printf(" sense debug information:\n");
697 printf("\tcode 0x%x valid 0x%x\n",
698 sense->error_code & SSD_ERRCODE,
699 sense->error_code & SSD_ERRCODE_VALID ? 1 : 0);
700 printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
701 sense->segment,
702 sense->flags & SSD_KEY,
703 sense->flags & SSD_ILI ? 1 : 0,
704 sense->flags & SSD_EOM ? 1 : 0,
705 sense->flags & SSD_FILEMARK ? 1 : 0);
706 printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d "
707 "extra bytes\n",
708 sense->info[0],
709 sense->info[1],
710 sense->info[2],
711 sense->info[3],
712 sense->extra_len);
713 printf("\textra: ");
714 for (count = 0; count < ADD_BYTES_LIM(sense); count++)
715 printf("0x%x ", sense->cmd_spec_info[count]);
716 printf("\n");
717 }
718 #endif
719
720 /*
721 * If the periph has it's own error handler, call it first.
722 * If it returns a legit error value, return that, otherwise
723 * it wants us to continue with normal error processing.
724 */
725 if (periph->periph_switch->psw_error != NULL) {
726 SC_DEBUG(periph, SCSIPI_DB2,
727 ("calling private err_handler()\n"));
728 error = (*periph->periph_switch->psw_error)(xs);
729 if (error != EJUSTRETURN)
730 return (error);
731 }
732 /* otherwise use the default */
733 switch (sense->error_code & SSD_ERRCODE) {
734 /*
735 * If it's code 70, use the extended stuff and
736 * interpret the key
737 */
738 case 0x71: /* delayed error */
739 scsipi_printaddr(periph);
740 key = sense->flags & SSD_KEY;
741 printf(" DEFERRED ERROR, key = 0x%x\n", key);
742 /* FALLTHROUGH */
743 case 0x70:
744 if ((sense->error_code & SSD_ERRCODE_VALID) != 0)
745 info = _4btol(sense->info);
746 else
747 info = 0;
748 key = sense->flags & SSD_KEY;
749
750 switch (key) {
751 case SKEY_NO_SENSE:
752 case SKEY_RECOVERED_ERROR:
753 if (xs->resid == xs->datalen && xs->datalen) {
754 /*
755 * Why is this here?
756 */
757 xs->resid = 0; /* not short read */
758 }
759 case SKEY_EQUAL:
760 error = 0;
761 break;
762 case SKEY_NOT_READY:
763 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
764 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
765 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
766 return (0);
767 if (sense->add_sense_code == 0x3A &&
768 sense->add_sense_code_qual == 0x00)
769 error = ENODEV; /* Medium not present */
770 else
771 error = EIO;
772 if ((xs->xs_control & XS_CTL_SILENT) != 0)
773 return (error);
774 break;
775 case SKEY_ILLEGAL_REQUEST:
776 if ((xs->xs_control &
777 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
778 return (0);
779 /*
780 * Handle the case where a device reports
781 * Logical Unit Not Supported during discovery.
782 */
783 if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
784 sense->add_sense_code == 0x25 &&
785 sense->add_sense_code_qual == 0x00)
786 return (EINVAL);
787 if ((xs->xs_control & XS_CTL_SILENT) != 0)
788 return (EIO);
789 error = EINVAL;
790 break;
791 case SKEY_UNIT_ATTENTION:
792 if (sense->add_sense_code == 0x29 &&
793 sense->add_sense_code_qual == 0x00) {
794 /* device or bus reset */
795 return (ERESTART);
796 }
797 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
798 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
799 if ((xs->xs_control &
800 XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
801 /* XXX Should reupload any transient state. */
802 (periph->periph_flags &
803 PERIPH_REMOVABLE) == 0) {
804 return (ERESTART);
805 }
806 if ((xs->xs_control & XS_CTL_SILENT) != 0)
807 return (EIO);
808 error = EIO;
809 break;
810 case SKEY_WRITE_PROTECT:
811 error = EROFS;
812 break;
813 case SKEY_BLANK_CHECK:
814 error = 0;
815 break;
816 case SKEY_ABORTED_COMMAND:
817 error = ERESTART;
818 break;
819 case SKEY_VOLUME_OVERFLOW:
820 error = ENOSPC;
821 break;
822 default:
823 error = EIO;
824 break;
825 }
826
827 #ifdef SCSIVERBOSE
828 if (key && (xs->xs_control & XS_CTL_SILENT) == 0)
829 scsipi_print_sense(xs, 0);
830 #else
831 if (key) {
832 scsipi_printaddr(periph);
833 printf("%s", error_mes[key - 1]);
834 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
835 switch (key) {
836 case SKEY_NOT_READY:
837 case SKEY_ILLEGAL_REQUEST:
838 case SKEY_UNIT_ATTENTION:
839 case SKEY_WRITE_PROTECT:
840 break;
841 case SKEY_BLANK_CHECK:
842 printf(", requested size: %d (decimal)",
843 info);
844 break;
845 case SKEY_ABORTED_COMMAND:
846 if (xs->xs_retries)
847 printf(", retrying");
848 printf(", cmd 0x%x, info 0x%x",
849 xs->cmd->opcode, info);
850 break;
851 default:
852 printf(", info = %d (decimal)", info);
853 }
854 }
855 if (sense->extra_len != 0) {
856 int n;
857 printf(", data =");
858 for (n = 0; n < sense->extra_len; n++)
859 printf(" %02x",
860 sense->cmd_spec_info[n]);
861 }
862 printf("\n");
863 }
864 #endif
865 return (error);
866
867 /*
868 * Not code 70, just report it
869 */
870 default:
871 #if defined(SCSIDEBUG) || defined(DEBUG)
872 {
873 static char *uc = "undecodable sense error";
874 int i;
875 u_int8_t *cptr = (u_int8_t *) sense;
876 scsipi_printaddr(periph);
877 if (xs->cmd == &xs->cmdstore) {
878 printf("%s for opcode 0x%x, data=",
879 uc, xs->cmdstore.opcode);
880 } else {
881 printf("%s, data=", uc);
882 }
883 for (i = 0; i < sizeof (sense); i++)
884 printf(" 0x%02x", *(cptr++) & 0xff);
885 printf("\n");
886 }
887 #else
888
889 scsipi_printaddr(periph);
890 printf("Sense Error Code 0x%x",
891 sense->error_code & SSD_ERRCODE);
892 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
893 struct scsipi_sense_data_unextended *usense =
894 (struct scsipi_sense_data_unextended *)sense;
895 printf(" at block no. %d (decimal)",
896 _3btol(usense->block));
897 }
898 printf("\n");
899 #endif
900 return (EIO);
901 }
902 }
903
904 /*
905 * scsipi_size:
906 *
907 * Find out from the device what its capacity is.
908 */
909 u_long
910 scsipi_size(periph, flags)
911 struct scsipi_periph *periph;
912 int flags;
913 {
914 struct scsipi_read_cap_data rdcap;
915 struct scsipi_read_capacity scsipi_cmd;
916
917 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
918 scsipi_cmd.opcode = READ_CAPACITY;
919
920 /*
921 * If the command works, interpret the result as a 4 byte
922 * number of blocks
923 */
924 if (scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
925 sizeof(scsipi_cmd), (u_char *)&rdcap, sizeof(rdcap),
926 SCSIPIRETRIES, 20000, NULL,
927 flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK) != 0) {
928 scsipi_printaddr(periph);
929 printf("could not get size\n");
930 return (0);
931 }
932
933 return (_4btol(rdcap.addr) + 1);
934 }
935
936 /*
937 * scsipi_test_unit_ready:
938 *
939 * Issue a `test unit ready' request.
940 */
941 int
942 scsipi_test_unit_ready(periph, flags)
943 struct scsipi_periph *periph;
944 int flags;
945 {
946 struct scsipi_test_unit_ready scsipi_cmd;
947
948 /* some ATAPI drives don't support TEST_UNIT_READY. Sigh */
949 if (periph->periph_quirks & PQUIRK_NOTUR)
950 return (0);
951
952 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
953 scsipi_cmd.opcode = TEST_UNIT_READY;
954
955 return (scsipi_command(periph,
956 (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd),
957 0, 0, SCSIPIRETRIES, 10000, NULL, flags));
958 }
959
960 /*
961 * scsipi_inquire:
962 *
963 * Ask the device about itself.
964 */
965 int
966 scsipi_inquire(periph, inqbuf, flags)
967 struct scsipi_periph *periph;
968 struct scsipi_inquiry_data *inqbuf;
969 int flags;
970 {
971 struct scsipi_inquiry scsipi_cmd;
972
973 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
974 scsipi_cmd.opcode = INQUIRY;
975 scsipi_cmd.length = sizeof(struct scsipi_inquiry_data);
976
977 return (scsipi_command(periph,
978 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
979 (u_char *) inqbuf, sizeof(struct scsipi_inquiry_data),
980 SCSIPIRETRIES, 10000, NULL, XS_CTL_DATA_IN | flags));
981 }
982
983 /*
984 * scsipi_prevent:
985 *
986 * Prevent or allow the user to remove the media
987 */
988 int
989 scsipi_prevent(periph, type, flags)
990 struct scsipi_periph *periph;
991 int type, flags;
992 {
993 struct scsipi_prevent scsipi_cmd;
994
995 if (periph->periph_quirks & PQUIRK_NODOORLOCK)
996 return (0);
997
998 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
999 scsipi_cmd.opcode = PREVENT_ALLOW;
1000 scsipi_cmd.how = type;
1001
1002 return (scsipi_command(periph,
1003 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1004 0, 0, SCSIPIRETRIES, 5000, NULL, flags));
1005 }
1006
1007 /*
1008 * scsipi_start:
1009 *
1010 * Send a START UNIT.
1011 */
1012 int
1013 scsipi_start(periph, type, flags)
1014 struct scsipi_periph *periph;
1015 int type, flags;
1016 {
1017 struct scsipi_start_stop scsipi_cmd;
1018
1019 if (periph->periph_quirks & PQUIRK_NOSTARTUNIT)
1020 return 0;
1021
1022 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
1023 scsipi_cmd.opcode = START_STOP;
1024 scsipi_cmd.byte2 = 0x00;
1025 scsipi_cmd.how = type;
1026
1027 return (scsipi_command(periph,
1028 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1029 0, 0, SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000,
1030 NULL, flags));
1031 }
1032
1033 /*
1034 * scsipi_done:
1035 *
1036 * This routine is called by an adapter's interrupt handler when
1037 * an xfer is completed.
1038 */
1039 void
1040 scsipi_done(xs)
1041 struct scsipi_xfer *xs;
1042 {
1043 struct scsipi_periph *periph = xs->xs_periph;
1044 struct scsipi_channel *chan = periph->periph_channel;
1045 int s, freezecnt;
1046
1047 SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n"));
1048 #ifdef SCSIPI_DEBUG
1049 if (periph->periph_dbflags & SCSIPI_DB1)
1050 show_scsipi_cmd(xs);
1051 #endif
1052
1053 s = splbio();
1054 /*
1055 * The resource this command was using is now free.
1056 */
1057 scsipi_put_resource(chan);
1058
1059 /*
1060 * If the command was tagged, free the tag.
1061 */
1062 if (XS_CTL_TAGTYPE(xs) != 0)
1063 scsipi_put_tag(xs);
1064
1065 /* Mark the command as `done'. */
1066 xs->xs_status |= XS_STS_DONE;
1067
1068 /*
1069 * If it's a user level request, bypass all usual completion
1070 * processing, let the user work it out.. We take reponsibility
1071 * for freeing the xs (and restarting the device's queue) when
1072 * the user returns.
1073 */
1074 if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
1075 splx(s);
1076 SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n"));
1077 scsipi_user_done(xs);
1078 SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n "));
1079 goto out;
1080 }
1081
1082 #ifdef DIAGNOSTIC
1083 if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
1084 (XS_CTL_ASYNC|XS_CTL_POLL))
1085 panic("scsipi_done: ASYNC and POLL");
1086 #endif
1087
1088 /*
1089 * If the xfer had an error of any sort, freeze the
1090 * periph's queue. Freeze it again if we were requested
1091 * to do so in the xfer.
1092 */
1093 freezecnt = 0;
1094 if (xs->error != XS_NOERROR)
1095 freezecnt++;
1096 if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
1097 freezecnt++;
1098 if (freezecnt != 0)
1099 scsipi_periph_freeze(periph, freezecnt);
1100
1101 /*
1102 * If this was an xfer that was not to complete asynchrnously,
1103 * let the requesting thread perform error checking/handling
1104 * in its context.
1105 */
1106 if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
1107 splx(s);
1108 /*
1109 * If it's a polling job, just return, to unwind the
1110 * call graph. We don't need to restart the queue,
1111 * because pollings jobs are treated specially, and
1112 * are really only used during crash dumps anyway
1113 * (XXX or during boot-time autconfiguration of
1114 * ATAPI devices).
1115 */
1116 if (xs->xs_control & XS_CTL_POLL)
1117 return;
1118 wakeup(xs);
1119 goto out;
1120 }
1121
1122 /*
1123 * Catch the extremely common case of I/O completing
1124 * without error; no use in taking a context switch
1125 * if we can handle it in interrupt context.
1126 */
1127 if (xs->error == XS_NOERROR) {
1128 splx(s);
1129 (void) scsipi_complete(xs);
1130 goto out;
1131 }
1132
1133 /*
1134 * There is an error on this xfer. Put it on the channel's
1135 * completion queue, and wake up the completion thread.
1136 */
1137 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
1138 splx(s);
1139 wakeup(&chan->chan_complete);
1140
1141 out:
1142 /*
1143 * If there are more xfers on the channel's queue, attempt to
1144 * run them.
1145 */
1146 scsipi_run_queue(chan);
1147 }
1148
1149 /*
1150 * scsipi_complete:
1151 *
1152 * Completion of a scsipi_xfer. This is the guts of scsipi_done().
1153 *
1154 * NOTE: This routine MUST be called with valid thread context
1155 * except for the case where the following two conditions are
1156 * true:
1157 *
1158 * xs->error == XS_NOERROR
1159 * XS_CTL_ASYNC is set in xs->xs_control
1160 *
1161 * The semantics of this routine can be tricky, so here is an
1162 * explanation:
1163 *
1164 * 0 Xfer completed successfully.
1165 *
1166 * ERESTART Xfer had an error, but was restarted.
1167 *
1168 * anything else Xfer had an error, return value is Unix
1169 * errno.
1170 *
1171 * If the return value is anything but ERESTART:
1172 *
1173 * - If XS_CTL_ASYNC is set, `xs' has been freed back to
1174 * the pool.
1175 * - If there is a buf associated with the xfer,
1176 * it has been biodone()'d.
1177 */
1178 int
1179 scsipi_complete(xs)
1180 struct scsipi_xfer *xs;
1181 {
1182 struct scsipi_periph *periph = xs->xs_periph;
1183 struct scsipi_channel *chan = periph->periph_channel;
1184 struct buf *bp;
1185 int error, s;
1186
1187 #ifdef DIAGNOSTIC
1188 if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
1189 panic("scsipi_complete: XS_CTL_ASYNC but no buf");
1190 #endif
1191
1192 switch (xs->error) {
1193 case XS_NOERROR:
1194 error = 0;
1195 break;
1196
1197 case XS_SENSE:
1198 case XS_SHORTSENSE:
1199 error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
1200 break;
1201
1202 case XS_RESOURCE_SHORTAGE:
1203 /*
1204 * XXX Should freeze channel's queue.
1205 */
1206 scsipi_printaddr(periph);
1207 printf("adapter resource shortage\n");
1208 /* FALLTHROUGH */
1209
1210 case XS_BUSY:
1211 if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
1212 struct scsipi_max_openings mo;
1213
1214 /*
1215 * We set the openings to active - 1, assuming that
1216 * the command that got us here is the first one that
1217 * can't fit into the device's queue. If that's not
1218 * the case, I guess we'll find out soon enough.
1219 */
1220 mo.mo_target = periph->periph_target;
1221 mo.mo_lun = periph->periph_lun;
1222 mo.mo_openings = periph->periph_active - 1;
1223 #ifdef DIAGNOSTIC
1224 if (mo.mo_openings < 0) {
1225 scsipi_printaddr(periph);
1226 printf("QUEUE FULL resulted in < 0 openings\n");
1227 panic("scsipi_done");
1228 }
1229 #endif
1230 if (mo.mo_openings == 0) {
1231 scsipi_printaddr(periph);
1232 printf("QUEUE FULL resulted in 0 openings\n");
1233 mo.mo_openings = 1;
1234 }
1235 scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
1236 error = ERESTART;
1237 } else if (xs->xs_retries != 0) {
1238 xs->xs_retries--;
1239 /*
1240 * Wait one second, and try again.
1241 */
1242 if (xs->xs_control & XS_CTL_POLL)
1243 delay(1000000);
1244 else {
1245 scsipi_periph_freeze(periph, 1);
1246 callout_reset(&periph->periph_callout,
1247 hz, scsipi_periph_timed_thaw, periph);
1248 }
1249 error = ERESTART;
1250 } else
1251 error = EBUSY;
1252 break;
1253
1254 case XS_TIMEOUT:
1255 if (xs->xs_retries != 0) {
1256 xs->xs_retries--;
1257 error = ERESTART;
1258 } else
1259 error = EIO;
1260 break;
1261
1262 case XS_SELTIMEOUT:
1263 /* XXX Disable device? */
1264 error = EIO;
1265 break;
1266
1267 case XS_RESET:
1268 if (xs->xs_retries != 0) {
1269 xs->xs_retries--;
1270 error = ERESTART;
1271 } else
1272 error = EIO;
1273 break;
1274
1275 default:
1276 scsipi_printaddr(periph);
1277 printf("invalid return code from adapter: %d\n", xs->error);
1278 error = EIO;
1279 break;
1280 }
1281
1282 s = splbio();
1283 if (error == ERESTART) {
1284 /*
1285 * If we get here, the periph has been thawed and frozen
1286 * again if we had to issue recovery commands. Alternatively,
1287 * it may have been frozen again and in a timed thaw. In
1288 * any case, we thaw the periph once we re-enqueue the
1289 * command. Once the periph is fully thawed, it will begin
1290 * operation again.
1291 */
1292 xs->error = XS_NOERROR;
1293 xs->status = SCSI_OK;
1294 xs->xs_status &= ~XS_STS_DONE;
1295 xs->xs_requeuecnt++;
1296 error = scsipi_enqueue(xs);
1297 if (error == 0) {
1298 scsipi_periph_thaw(periph, 1);
1299 splx(s);
1300 return (ERESTART);
1301 }
1302 }
1303
1304 /*
1305 * scsipi_done() freezes the queue if not XS_NOERROR.
1306 * Thaw it here.
1307 */
1308 if (xs->error != XS_NOERROR)
1309 scsipi_periph_thaw(periph, 1);
1310
1311 if ((bp = xs->bp) != NULL) {
1312 if (error) {
1313 bp->b_error = error;
1314 bp->b_flags |= B_ERROR;
1315 bp->b_resid = bp->b_bcount;
1316 } else {
1317 bp->b_error = 0;
1318 bp->b_resid = xs->resid;
1319 }
1320 biodone(bp);
1321 }
1322
1323 if (xs->xs_control & XS_CTL_ASYNC)
1324 scsipi_put_xs(xs);
1325 splx(s);
1326
1327 return (error);
1328 }
1329
1330 /*
1331 * scsipi_enqueue:
1332 *
1333 * Enqueue an xfer on a channel.
1334 */
1335 int
1336 scsipi_enqueue(xs)
1337 struct scsipi_xfer *xs;
1338 {
1339 struct scsipi_channel *chan = xs->xs_periph->periph_channel;
1340 struct scsipi_xfer *qxs;
1341 int s;
1342
1343 s = splbio();
1344
1345 /*
1346 * If the xfer is to be polled, and there are already jobs on
1347 * the queue, we can't proceed.
1348 */
1349 if ((xs->xs_control & XS_CTL_POLL) != 0 &&
1350 TAILQ_FIRST(&chan->chan_queue) != NULL) {
1351 splx(s);
1352 xs->error = XS_DRIVER_STUFFUP;
1353 return (EAGAIN);
1354 }
1355
1356 /*
1357 * If we have an URGENT xfer, it's an error recovery command
1358 * and it should just go on the head of the channel's queue.
1359 */
1360 if (xs->xs_control & XS_CTL_URGENT) {
1361 TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
1362 goto out;
1363 }
1364
1365 /*
1366 * If this xfer has already been on the queue before, we
1367 * need to reinsert it in the correct order. That order is:
1368 *
1369 * Immediately before the first xfer for this periph
1370 * with a requeuecnt less than xs->xs_requeuecnt.
1371 *
1372 * Failing that, at the end of the queue. (We'll end up
1373 * there naturally.)
1374 */
1375 if (xs->xs_requeuecnt != 0) {
1376 for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
1377 qxs = TAILQ_NEXT(qxs, channel_q)) {
1378 if (qxs->xs_periph == xs->xs_periph &&
1379 qxs->xs_requeuecnt < xs->xs_requeuecnt)
1380 break;
1381 }
1382 if (qxs != NULL) {
1383 TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
1384 channel_q);
1385 goto out;
1386 }
1387 }
1388 TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
1389 out:
1390 if (xs->xs_control & XS_CTL_THAW_PERIPH)
1391 scsipi_periph_thaw(xs->xs_periph, 1);
1392 splx(s);
1393 return (0);
1394 }
1395
1396 /*
1397 * scsipi_run_queue:
1398 *
1399 * Start as many xfers as possible running on the channel.
1400 */
1401 void
1402 scsipi_run_queue(chan)
1403 struct scsipi_channel *chan;
1404 {
1405 struct scsipi_xfer *xs;
1406 struct scsipi_periph *periph;
1407 int s;
1408
1409 for (;;) {
1410 s = splbio();
1411
1412 /*
1413 * If the channel is frozen, we can't do any work right
1414 * now.
1415 */
1416 if (chan->chan_qfreeze != 0) {
1417 splx(s);
1418 return;
1419 }
1420
1421 /*
1422 * Look for work to do, and make sure we can do it.
1423 */
1424 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
1425 xs = TAILQ_NEXT(xs, channel_q)) {
1426 periph = xs->xs_periph;
1427
1428 if ((periph->periph_active > periph->periph_openings) || periph->periph_qfreeze != 0)
1429 continue;
1430
1431 if ((periph->periph_flags & PERIPH_RECOVERING) != 0 &&
1432 (xs->xs_control & XS_CTL_URGENT) == 0)
1433 continue;
1434
1435 /*
1436 * We can issue this xfer!
1437 */
1438 goto got_one;
1439 }
1440
1441 /*
1442 * Can't find any work to do right now.
1443 */
1444 splx(s);
1445 return;
1446
1447 got_one:
1448 /*
1449 * Have an xfer to run. Allocate a resource from
1450 * the adapter to run it. If we can't allocate that
1451 * resource, we don't dequeue the xfer.
1452 */
1453 if (scsipi_get_resource(chan) == 0) {
1454 /*
1455 * Adapter is out of resources. If the adapter
1456 * supports it, attempt to grow them.
1457 */
1458 if (scsipi_grow_resources(chan) == 0) {
1459 /*
1460 * Wasn't able to grow resources,
1461 * nothing more we can do.
1462 */
1463 if (xs->xs_control & XS_CTL_POLL) {
1464 scsipi_printaddr(xs->xs_periph);
1465 printf("polling command but no "
1466 "adapter resources");
1467 /* We'll panic shortly... */
1468 }
1469 splx(s);
1470 return;
1471 }
1472 /*
1473 * scsipi_grow_resources() allocated the resource
1474 * for us.
1475 */
1476 }
1477
1478 /*
1479 * We have a resource to run this xfer, do it!
1480 */
1481 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
1482
1483 /*
1484 * If the command is to be tagged, allocate a tag ID
1485 * for it.
1486 */
1487 if (XS_CTL_TAGTYPE(xs) != 0)
1488 scsipi_get_tag(xs);
1489 splx(s);
1490
1491 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
1492 }
1493 #ifdef DIAGNOSTIC
1494 panic("scsipi_run_queue: impossible");
1495 #endif
1496 }
1497
1498 /*
1499 * scsipi_execute_xs:
1500 *
1501 * Begin execution of an xfer, waiting for it to complete, if necessary.
1502 */
1503 int
1504 scsipi_execute_xs(xs)
1505 struct scsipi_xfer *xs;
1506 {
1507 struct scsipi_periph *periph = xs->xs_periph;
1508 struct scsipi_channel *chan = periph->periph_channel;
1509 int async, poll, retries, error, s;
1510
1511 xs->xs_status &= ~XS_STS_DONE;
1512 xs->error = XS_NOERROR;
1513 xs->resid = xs->datalen;
1514 xs->status = SCSI_OK;
1515
1516 #ifdef SCSIPI_DEBUG
1517 if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) {
1518 printf("scsipi_execute_xs: ");
1519 show_scsipi_xs(xs);
1520 printf("\n");
1521 }
1522 #endif
1523
1524 /*
1525 * Deal with command tagging:
1526 *
1527 * - If the device's current operating mode doesn't
1528 * include tagged queueing, clear the tag mask.
1529 *
1530 * - If the device's current operating mode *does*
1531 * include tagged queueing, set the tag_type in
1532 * the xfer to the appropriate byte for the tag
1533 * message.
1534 */
1535 if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0) {
1536 xs->xs_control &= ~XS_CTL_TAGMASK;
1537 xs->xs_tag_type = 0;
1538 } else {
1539 /*
1540 * If the request doesn't specify a tag, give Head
1541 * tags to URGENT operations and Ordered tags to
1542 * everything else.
1543 */
1544 if (XS_CTL_TAGTYPE(xs) == 0) {
1545 if (xs->xs_control & XS_CTL_URGENT)
1546 xs->xs_control |= XS_CTL_HEAD_TAG;
1547 else
1548 xs->xs_control |= XS_CTL_ORDERED_TAG;
1549 }
1550
1551 switch (XS_CTL_TAGTYPE(xs)) {
1552 case XS_CTL_ORDERED_TAG:
1553 xs->xs_tag_type = MSG_ORDERED_Q_TAG;
1554 break;
1555
1556 case XS_CTL_SIMPLE_TAG:
1557 xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
1558 break;
1559
1560 case XS_CTL_HEAD_TAG:
1561 xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
1562 break;
1563
1564 default:
1565 scsipi_printaddr(periph);
1566 printf("invalid tag mask 0x%08x\n",
1567 XS_CTL_TAGTYPE(xs));
1568 panic("scsipi_execute_xs");
1569 }
1570 }
1571
1572 /*
1573 * If we don't yet have a completion thread, or we are to poll for
1574 * completion, clear the ASYNC flag.
1575 */
1576 if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
1577 xs->xs_control &= ~XS_CTL_ASYNC;
1578
1579 async = (xs->xs_control & XS_CTL_ASYNC);
1580 poll = (xs->xs_control & XS_CTL_POLL);
1581 retries = xs->xs_retries; /* for polling commands */
1582
1583 #ifdef DIAGNOSTIC
1584 if (async != 0 && xs->bp == NULL)
1585 panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
1586 #endif
1587
1588 /*
1589 * Enqueue the transfer. If we're not polling for completion, this
1590 * should ALWAYS return `no error'.
1591 */
1592 try_again:
1593 error = scsipi_enqueue(xs);
1594 if (error) {
1595 if (poll == 0) {
1596 scsipi_printaddr(periph);
1597 printf("not polling, but enqueue failed with %d\n",
1598 error);
1599 panic("scsipi_execute_xs");
1600 }
1601
1602 scsipi_printaddr(periph);
1603 printf("failed to enqueue polling command");
1604 if (retries != 0) {
1605 printf(", retrying...\n");
1606 delay(1000000);
1607 retries--;
1608 goto try_again;
1609 }
1610 printf("\n");
1611 goto free_xs;
1612 }
1613
1614 restarted:
1615 scsipi_run_queue(chan);
1616
1617 /*
1618 * The xfer is enqueued, and possibly running. If it's to be
1619 * completed asynchronously, just return now.
1620 */
1621 if (async)
1622 return (EJUSTRETURN);
1623
1624 /*
1625 * Not an asynchronous command; wait for it to complete.
1626 */
1627 while ((xs->xs_status & XS_STS_DONE) == 0) {
1628 if (poll) {
1629 scsipi_printaddr(periph);
1630 printf("polling command not done\n");
1631 panic("scsipi_execute_xs");
1632 }
1633 (void) tsleep(xs, PRIBIO, "xscmd", 0);
1634 }
1635
1636 /*
1637 * Command is complete. scsipi_done() has awakened us to perform
1638 * the error handling.
1639 */
1640 error = scsipi_complete(xs);
1641 if (error == ERESTART)
1642 goto restarted;
1643
1644 /*
1645 * Command completed successfully or fatal error occurred. Fall
1646 * into....
1647 */
1648 free_xs:
1649 s = splbio();
1650 scsipi_put_xs(xs);
1651 splx(s);
1652
1653 /*
1654 * Kick the queue, keep it running in case it stopped for some
1655 * reason.
1656 */
1657 scsipi_run_queue(chan);
1658
1659 return (error);
1660 }
1661
1662 /*
1663 * scsipi_completion_thread:
1664 *
1665 * This is the completion thread. We wait for errors on
1666 * asynchronous xfers, and perform the error handling
1667 * function, restarting the command, if necessary.
1668 */
1669 void
1670 scsipi_completion_thread(arg)
1671 void *arg;
1672 {
1673 struct scsipi_channel *chan = arg;
1674 struct scsipi_xfer *xs;
1675 int s;
1676
1677 for (;;) {
1678 s = splbio();
1679 xs = TAILQ_FIRST(&chan->chan_complete);
1680 if (xs == NULL &&
1681 (chan->chan_flags & SCSIPI_CHAN_SHUTDOWN) == 0) {
1682 splx(s);
1683 (void) tsleep(&chan->chan_complete, PRIBIO,
1684 "sccomp", 0);
1685 continue;
1686 }
1687 if (chan->chan_flags & SCSIPI_CHAN_SHUTDOWN) {
1688 splx(s);
1689 break;
1690 }
1691 TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
1692 splx(s);
1693
1694 /*
1695 * Have an xfer with an error; process it.
1696 */
1697 (void) scsipi_complete(xs);
1698
1699 /*
1700 * Kick the queue; keep it running if it was stopped
1701 * for some reason.
1702 */
1703 scsipi_run_queue(chan);
1704 }
1705
1706 chan->chan_thread = NULL;
1707
1708 /* In case parent is waiting for us to exit. */
1709 wakeup(&chan->chan_thread);
1710
1711 kthread_exit(0);
1712 }
1713
1714 /*
1715 * scsipi_create_completion_thread:
1716 *
1717 * Callback to actually create the completion thread.
1718 */
1719 void
1720 scsipi_create_completion_thread(arg)
1721 void *arg;
1722 {
1723 struct scsipi_channel *chan = arg;
1724 struct scsipi_adapter *adapt = chan->chan_adapter;
1725
1726 if (kthread_create1(scsipi_completion_thread, chan,
1727 &chan->chan_thread, "%s:%d", adapt->adapt_dev->dv_xname,
1728 chan->chan_channel)) {
1729 printf("%s: unable to create completion thread for "
1730 "channel %d\n", adapt->adapt_dev->dv_xname,
1731 chan->chan_channel);
1732 panic("scsipi_create_completion_thread");
1733 }
1734 }
1735
1736 /*
1737 * scsipi_async_event:
1738 *
1739 * Handle an asynchronous event from an adapter.
1740 */
1741 void
1742 scsipi_async_event(chan, event, arg)
1743 struct scsipi_channel *chan;
1744 scsipi_async_event_t event;
1745 void *arg;
1746 {
1747 int s;
1748
1749 s = splbio();
1750 switch (event) {
1751 case ASYNC_EVENT_MAX_OPENINGS:
1752 scsipi_async_event_max_openings(chan,
1753 (struct scsipi_max_openings *)arg);
1754 break;
1755
1756 case ASYNC_EVENT_XFER_MODE:
1757 scsipi_async_event_xfer_mode(chan,
1758 (struct scsipi_xfer_mode *)arg);
1759 break;
1760 }
1761 splx(s);
1762 }
1763
1764 /*
1765 * scsipi_print_xfer_mode:
1766 *
1767 * Print a periph's capabilities.
1768 */
1769 void
1770 scsipi_print_xfer_mode(periph)
1771 struct scsipi_periph *periph;
1772 {
1773 int period, freq, speed, mbs;
1774
1775 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0)
1776 return;
1777
1778 printf("%s: ", periph->periph_dev->dv_xname);
1779 if (periph->periph_mode & PERIPH_CAP_SYNC) {
1780 period = scsipi_sync_factor_to_period(periph->periph_period);
1781 printf("Sync (%d.%dns offset %d)",
1782 period / 10, period % 10, periph->periph_offset);
1783 } else
1784 printf("Async");
1785
1786 if (periph->periph_mode & PERIPH_CAP_WIDE32)
1787 printf(", 32-bit");
1788 else if (periph->periph_mode & PERIPH_CAP_WIDE16)
1789 printf(", 16-bit");
1790 else
1791 printf(", 8-bit");
1792
1793 if (periph->periph_mode & PERIPH_CAP_SYNC) {
1794 freq = scsipi_sync_factor_to_freq(periph->periph_period);
1795 speed = freq;
1796 if (periph->periph_mode & PERIPH_CAP_WIDE32)
1797 speed *= 4;
1798 else if (periph->periph_mode & PERIPH_CAP_WIDE16)
1799 speed *= 2;
1800 mbs = speed / 1000;
1801 if (mbs > 0)
1802 printf(" (%d.%03dMB/s)", mbs, speed % 1000);
1803 else
1804 printf(" (%dKB/s)", speed % 1000);
1805 }
1806
1807 printf(" transfers");
1808
1809 if (periph->periph_mode & PERIPH_CAP_TQING)
1810 printf(", tagged queueing");
1811
1812 printf("\n");
1813 }
1814
1815 /*
1816 * scsipi_async_event_max_openings:
1817 *
1818 * Update the maximum number of outstanding commands a
1819 * device may have.
1820 */
1821 void
1822 scsipi_async_event_max_openings(chan, mo)
1823 struct scsipi_channel *chan;
1824 struct scsipi_max_openings *mo;
1825 {
1826 struct scsipi_periph *periph;
1827 int minlun, maxlun;
1828
1829 if (mo->mo_lun == -1) {
1830 /*
1831 * Wildcarded; apply it to all LUNs.
1832 */
1833 minlun = 0;
1834 maxlun = chan->chan_nluns - 1;
1835 } else
1836 minlun = maxlun = mo->mo_lun;
1837
1838 for (; minlun <= maxlun; minlun++) {
1839 periph = scsipi_lookup_periph(chan, mo->mo_target, minlun);
1840 if (periph == NULL)
1841 continue;
1842
1843 if (mo->mo_openings < periph->periph_openings)
1844 periph->periph_openings = mo->mo_openings;
1845 else if (mo->mo_openings > periph->periph_openings &&
1846 (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
1847 periph->periph_openings = mo->mo_openings;
1848 }
1849 }
1850
1851 /*
1852 * scsipi_async_event_xfer_mode:
1853 *
1854 * Update the xfer mode for all periphs sharing the
1855 * specified I_T Nexus.
1856 */
1857 void
1858 scsipi_async_event_xfer_mode(chan, xm)
1859 struct scsipi_channel *chan;
1860 struct scsipi_xfer_mode *xm;
1861 {
1862 struct scsipi_periph *periph;
1863 int lun, announce, mode, period, offset;
1864
1865 for (lun = 0; lun < chan->chan_nluns; lun++) {
1866 periph = scsipi_lookup_periph(chan, xm->xm_target, lun);
1867 if (periph == NULL)
1868 continue;
1869 announce = 0;
1870
1871 /*
1872 * Clamp the xfer mode down to this periph's capabilities.
1873 */
1874 mode = xm->xm_mode & periph->periph_cap;
1875 if (mode & PERIPH_CAP_SYNC) {
1876 period = xm->xm_period;
1877 offset = xm->xm_offset;
1878 } else {
1879 period = 0;
1880 offset = 0;
1881 }
1882
1883 /*
1884 * If we do not have a valid xfer mode yet, or the parameters
1885 * are different, announce them.
1886 */
1887 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0 ||
1888 periph->periph_mode != mode ||
1889 periph->periph_period != period ||
1890 periph->periph_offset != offset)
1891 announce = 1;
1892
1893 periph->periph_mode = mode;
1894 periph->periph_period = period;
1895 periph->periph_offset = offset;
1896 periph->periph_flags |= PERIPH_MODE_VALID;
1897
1898 if (announce)
1899 scsipi_print_xfer_mode(periph);
1900 }
1901 }
1902
1903 /*
1904 * scsipi_set_xfer_mode:
1905 *
1906 * Set the xfer mode for the specified I_T Nexus.
1907 */
1908 void
1909 scsipi_set_xfer_mode(chan, target, immed)
1910 struct scsipi_channel *chan;
1911 int target, immed;
1912 {
1913 struct scsipi_xfer_mode xm;
1914 struct scsipi_periph *itperiph;
1915 int lun, s;
1916
1917 /*
1918 * Go to the minimal xfer mode.
1919 */
1920 xm.xm_target = target;
1921 xm.xm_mode = 0;
1922 xm.xm_period = 0; /* ignored */
1923 xm.xm_offset = 0; /* ignored */
1924
1925 /*
1926 * Find the first LUN we know about on this I_T Nexus.
1927 */
1928 for (lun = 0; lun < chan->chan_nluns; lun++) {
1929 itperiph = scsipi_lookup_periph(chan, target, lun);
1930 if (itperiph != NULL)
1931 break;
1932 }
1933 if (itperiph != NULL)
1934 xm.xm_mode = itperiph->periph_cap;
1935
1936 /*
1937 * Now issue the request to the adapter.
1938 */
1939 s = splbio();
1940 scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm);
1941 splx(s);
1942
1943 /*
1944 * If we want this to happen immediately, issue a dummy command,
1945 * since most adapters can't really negotiate unless they're
1946 * executing a job.
1947 */
1948 if (immed != 0 && itperiph != NULL) {
1949 (void) scsipi_test_unit_ready(itperiph,
1950 XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
1951 XS_CTL_IGNORE_NOT_READY |
1952 XS_CTL_IGNORE_MEDIA_CHANGE);
1953 }
1954 }
1955
1956 /*
1957 * scsipi_adapter_addref:
1958 *
1959 * Add a reference to the adapter pointed to by the provided
1960 * link, enabling the adapter if necessary.
1961 */
1962 int
1963 scsipi_adapter_addref(adapt)
1964 struct scsipi_adapter *adapt;
1965 {
1966 int s, error = 0;
1967
1968 s = splbio();
1969 if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) {
1970 error = (*adapt->adapt_enable)(adapt->adapt_dev, 1);
1971 if (error)
1972 adapt->adapt_refcnt--;
1973 }
1974 splx(s);
1975 return (error);
1976 }
1977
1978 /*
1979 * scsipi_adapter_delref:
1980 *
1981 * Delete a reference to the adapter pointed to by the provided
1982 * link, disabling the adapter if possible.
1983 */
1984 void
1985 scsipi_adapter_delref(adapt)
1986 struct scsipi_adapter *adapt;
1987 {
1988 int s;
1989
1990 s = splbio();
1991 if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL)
1992 (void) (*adapt->adapt_enable)(adapt->adapt_dev, 0);
1993 splx(s);
1994 }
1995
1996 struct scsipi_syncparam {
1997 int ss_factor;
1998 int ss_period; /* ns * 10 */
1999 } scsipi_syncparams[] = {
2000 { 0x0a, 250 },
2001 { 0x0b, 303 },
2002 { 0x0c, 500 },
2003 };
2004 const int scsipi_nsyncparams =
2005 sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
2006
2007 int
2008 scsipi_sync_period_to_factor(period)
2009 int period; /* ns * 10 */
2010 {
2011 int i;
2012
2013 for (i = 0; i < scsipi_nsyncparams; i++) {
2014 if (period <= scsipi_syncparams[i].ss_period)
2015 return (scsipi_syncparams[i].ss_factor);
2016 }
2017
2018 return ((period / 10) / 4);
2019 }
2020
2021 int
2022 scsipi_sync_factor_to_period(factor)
2023 int factor;
2024 {
2025 int i;
2026
2027 for (i = 0; i < scsipi_nsyncparams; i++) {
2028 if (factor == scsipi_syncparams[i].ss_factor)
2029 return (scsipi_syncparams[i].ss_period);
2030 }
2031
2032 return ((factor * 4) * 10);
2033 }
2034
2035 int
2036 scsipi_sync_factor_to_freq(factor)
2037 int factor;
2038 {
2039 int i;
2040
2041 for (i = 0; i < scsipi_nsyncparams; i++) {
2042 if (factor == scsipi_syncparams[i].ss_factor)
2043 return (10000000 / scsipi_syncparams[i].ss_period);
2044 }
2045
2046 return (10000000 / ((factor * 4) * 10));
2047 }
2048
2049 #ifdef SCSIPI_DEBUG
2050 /*
2051 * Given a scsipi_xfer, dump the request, in all it's glory
2052 */
2053 void
2054 show_scsipi_xs(xs)
2055 struct scsipi_xfer *xs;
2056 {
2057
2058 printf("xs(%p): ", xs);
2059 printf("xs_control(0x%08x)", xs->xs_control);
2060 printf("xs_status(0x%08x)", xs->xs_status);
2061 printf("periph(%p)", xs->xs_periph);
2062 printf("retr(0x%x)", xs->xs_retries);
2063 printf("timo(0x%x)", xs->timeout);
2064 printf("cmd(%p)", xs->cmd);
2065 printf("len(0x%x)", xs->cmdlen);
2066 printf("data(%p)", xs->data);
2067 printf("len(0x%x)", xs->datalen);
2068 printf("res(0x%x)", xs->resid);
2069 printf("err(0x%x)", xs->error);
2070 printf("bp(%p)", xs->bp);
2071 show_scsipi_cmd(xs);
2072 }
2073
2074 void
2075 show_scsipi_cmd(xs)
2076 struct scsipi_xfer *xs;
2077 {
2078 u_char *b = (u_char *) xs->cmd;
2079 int i = 0;
2080
2081 scsipi_printaddr(xs->xs_periph);
2082 printf(" command: ");
2083
2084 if ((xs->xs_control & XS_CTL_RESET) == 0) {
2085 while (i < xs->cmdlen) {
2086 if (i)
2087 printf(",");
2088 printf("0x%x", b[i++]);
2089 }
2090 printf("-[%d bytes]\n", xs->datalen);
2091 if (xs->datalen)
2092 show_mem(xs->data, min(64, xs->datalen));
2093 } else
2094 printf("-RESET-\n");
2095 }
2096
2097 void
2098 show_mem(address, num)
2099 u_char *address;
2100 int num;
2101 {
2102 int x;
2103
2104 printf("------------------------------");
2105 for (x = 0; x < num; x++) {
2106 if ((x % 16) == 0)
2107 printf("\n%03d: ", x);
2108 printf("%02x ", *address++);
2109 }
2110 printf("\n------------------------------\n");
2111 }
2112 #endif /* SCSIPI_DEBUG */
2113