scsipi_base.c revision 1.44 1 /* $NetBSD: scsipi_base.c,v 1.44 2001/05/23 15:50:32 bouyer Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include "opt_scsi.h"
41
42 #include <sys/types.h>
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/buf.h>
47 #include <sys/uio.h>
48 #include <sys/malloc.h>
49 #include <sys/pool.h>
50 #include <sys/errno.h>
51 #include <sys/device.h>
52 #include <sys/proc.h>
53 #include <sys/kthread.h>
54
55 #include <dev/scsipi/scsipi_all.h>
56 #include <dev/scsipi/scsipi_disk.h>
57 #include <dev/scsipi/scsipiconf.h>
58 #include <dev/scsipi/scsipi_base.h>
59
60 #include <dev/scsipi/scsi_all.h>
61 #include <dev/scsipi/scsi_message.h>
62
63 int scsipi_complete __P((struct scsipi_xfer *));
64 void scsipi_request_sense __P((struct scsipi_xfer *));
65 int scsipi_enqueue __P((struct scsipi_xfer *));
66 void scsipi_run_queue __P((struct scsipi_channel *chan));
67
68 void scsipi_completion_thread __P((void *));
69
70 void scsipi_get_tag __P((struct scsipi_xfer *));
71 void scsipi_put_tag __P((struct scsipi_xfer *));
72
73 int scsipi_get_resource __P((struct scsipi_channel *));
74 void scsipi_put_resource __P((struct scsipi_channel *));
75 __inline int scsipi_grow_resources __P((struct scsipi_channel *));
76
77 void scsipi_async_event_max_openings __P((struct scsipi_channel *,
78 struct scsipi_max_openings *));
79 void scsipi_async_event_xfer_mode __P((struct scsipi_channel *,
80 struct scsipi_xfer_mode *));
81 void scsipi_async_event_channel_reset __P((struct scsipi_channel *));
82
83 struct pool scsipi_xfer_pool;
84
85 /*
86 * scsipi_init:
87 *
88 * Called when a scsibus or atapibus is attached to the system
89 * to initialize shared data structures.
90 */
91 void
92 scsipi_init()
93 {
94 static int scsipi_init_done;
95
96 if (scsipi_init_done)
97 return;
98 scsipi_init_done = 1;
99
100 /* Initialize the scsipi_xfer pool. */
101 pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
102 0, 0, "scxspl", 0, NULL, NULL, M_DEVBUF);
103 }
104
105 /*
106 * scsipi_channel_init:
107 *
108 * Initialize a scsipi_channel when it is attached.
109 */
110 int
111 scsipi_channel_init(chan)
112 struct scsipi_channel *chan;
113 {
114 size_t nbytes;
115 int i;
116
117 /* Initialize shared data. */
118 scsipi_init();
119
120 /* Initialize the queues. */
121 TAILQ_INIT(&chan->chan_queue);
122 TAILQ_INIT(&chan->chan_complete);
123
124 nbytes = chan->chan_ntargets * sizeof(struct scsipi_periph **);
125 chan->chan_periphs = malloc(nbytes, M_DEVBUF, M_NOWAIT);
126 if (chan->chan_periphs == NULL)
127 return (ENOMEM);
128
129
130 nbytes = chan->chan_nluns * sizeof(struct scsipi_periph *);
131 for (i = 0; i < chan->chan_ntargets; i++) {
132 chan->chan_periphs[i] = malloc(nbytes, M_DEVBUF, M_NOWAIT);
133 if (chan->chan_periphs[i] == NULL) {
134 while (--i >= 0) {
135 free(chan->chan_periphs[i], M_DEVBUF);
136 }
137 return (ENOMEM);
138 }
139 memset(chan->chan_periphs[i], 0, nbytes);
140 }
141
142 /*
143 * Create the asynchronous completion thread.
144 */
145 kthread_create(scsipi_create_completion_thread, chan);
146 return (0);
147 }
148
149 /*
150 * scsipi_channel_shutdown:
151 *
152 * Shutdown a scsipi_channel.
153 */
154 void
155 scsipi_channel_shutdown(chan)
156 struct scsipi_channel *chan;
157 {
158
159 /*
160 * Shut down the completion thread.
161 */
162 chan->chan_flags |= SCSIPI_CHAN_SHUTDOWN;
163 wakeup(&chan->chan_complete);
164
165 /*
166 * Now wait for the thread to exit.
167 */
168 while (chan->chan_thread != NULL)
169 (void) tsleep(&chan->chan_thread, PRIBIO, "scshut", 0);
170 }
171
172 /*
173 * scsipi_insert_periph:
174 *
175 * Insert a periph into the channel.
176 */
177 void
178 scsipi_insert_periph(chan, periph)
179 struct scsipi_channel *chan;
180 struct scsipi_periph *periph;
181 {
182 int s;
183
184 s = splbio();
185 chan->chan_periphs[periph->periph_target][periph->periph_lun] = periph;
186 splx(s);
187 }
188
189 /*
190 * scsipi_remove_periph:
191 *
192 * Remove a periph from the channel.
193 */
194 void
195 scsipi_remove_periph(chan, periph)
196 struct scsipi_channel *chan;
197 struct scsipi_periph *periph;
198 {
199 int s;
200
201 s = splbio();
202 chan->chan_periphs[periph->periph_target][periph->periph_lun] = NULL;
203 splx(s);
204 }
205
206 /*
207 * scsipi_lookup_periph:
208 *
209 * Lookup a periph on the specified channel.
210 */
211 struct scsipi_periph *
212 scsipi_lookup_periph(chan, target, lun)
213 struct scsipi_channel *chan;
214 int target, lun;
215 {
216 struct scsipi_periph *periph;
217 int s;
218
219 if (target >= chan->chan_ntargets ||
220 lun >= chan->chan_nluns)
221 return (NULL);
222
223 s = splbio();
224 periph = chan->chan_periphs[target][lun];
225 splx(s);
226
227 return (periph);
228 }
229
230 /*
231 * scsipi_get_resource:
232 *
233 * Allocate a single xfer `resource' from the channel.
234 *
235 * NOTE: Must be called at splbio().
236 */
237 int
238 scsipi_get_resource(chan)
239 struct scsipi_channel *chan;
240 {
241 struct scsipi_adapter *adapt = chan->chan_adapter;
242
243 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
244 if (chan->chan_openings > 0) {
245 chan->chan_openings--;
246 return (1);
247 }
248 return (0);
249 }
250
251 if (adapt->adapt_openings > 0) {
252 adapt->adapt_openings--;
253 return (1);
254 }
255 return (0);
256 }
257
258 /*
259 * scsipi_grow_resources:
260 *
261 * Attempt to grow resources for a channel. If this succeeds,
262 * we allocate one for our caller.
263 *
264 * NOTE: Must be called at splbio().
265 */
266 __inline int
267 scsipi_grow_resources(chan)
268 struct scsipi_channel *chan;
269 {
270
271 if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
272 scsipi_adapter_request(chan, ADAPTER_REQ_GROW_RESOURCES, NULL);
273 return (scsipi_get_resource(chan));
274 }
275
276 return (0);
277 }
278
279 /*
280 * scsipi_put_resource:
281 *
282 * Free a single xfer `resource' to the channel.
283 *
284 * NOTE: Must be called at splbio().
285 */
286 void
287 scsipi_put_resource(chan)
288 struct scsipi_channel *chan;
289 {
290 struct scsipi_adapter *adapt = chan->chan_adapter;
291
292 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
293 chan->chan_openings++;
294 else
295 adapt->adapt_openings++;
296 }
297
298 /*
299 * scsipi_get_tag:
300 *
301 * Get a tag ID for the specified xfer.
302 *
303 * NOTE: Must be called at splbio().
304 */
305 void
306 scsipi_get_tag(xs)
307 struct scsipi_xfer *xs;
308 {
309 struct scsipi_periph *periph = xs->xs_periph;
310 int word, bit, tag;
311
312 for (word = 0; word < PERIPH_NTAGWORDS; word++) {
313 bit = ffs(periph->periph_freetags[word]);
314 if (bit != 0)
315 break;
316 }
317 #ifdef DIAGNOSTIC
318 if (word == PERIPH_NTAGWORDS) {
319 scsipi_printaddr(periph);
320 printf("no free tags\n");
321 panic("scsipi_get_tag");
322 }
323 #endif
324
325 bit -= 1;
326 periph->periph_freetags[word] &= ~(1 << bit);
327 tag = (word << 5) | bit;
328
329 /* XXX Should eventually disallow this completely. */
330 if (tag >= periph->periph_openings) {
331 scsipi_printaddr(periph);
332 printf("WARNING: tag %d greater than available openings %d\n",
333 tag, periph->periph_openings);
334 }
335
336 xs->xs_tag_id = tag;
337 }
338
339 /*
340 * scsipi_put_tag:
341 *
342 * Put the tag ID for the specified xfer back into the pool.
343 *
344 * NOTE: Must be called at splbio().
345 */
346 void
347 scsipi_put_tag(xs)
348 struct scsipi_xfer *xs;
349 {
350 struct scsipi_periph *periph = xs->xs_periph;
351 int word, bit;
352
353 word = xs->xs_tag_id >> 5;
354 bit = xs->xs_tag_id & 0x1f;
355
356 periph->periph_freetags[word] |= (1 << bit);
357 }
358
359 /*
360 * scsipi_get_xs:
361 *
362 * Allocate an xfer descriptor and associate it with the
363 * specified peripherial. If the peripherial has no more
364 * available command openings, we either block waiting for
365 * one to become available, or fail.
366 */
367 struct scsipi_xfer *
368 scsipi_get_xs(periph, flags)
369 struct scsipi_periph *periph;
370 int flags;
371 {
372 struct scsipi_xfer *xs;
373 int s;
374
375 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n"));
376
377 /*
378 * If we're cold, make sure we poll.
379 */
380 if (cold)
381 flags |= XS_CTL_NOSLEEP | XS_CTL_POLL;
382
383 #ifdef DIAGNOSTIC
384 /*
385 * URGENT commands can never be ASYNC.
386 */
387 if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
388 (XS_CTL_URGENT|XS_CTL_ASYNC)) {
389 scsipi_printaddr(periph);
390 printf("URGENT and ASYNC\n");
391 panic("scsipi_get_xs");
392 }
393 #endif
394
395 s = splbio();
396 /*
397 * Wait for a command opening to become available. Rules:
398 *
399 * - All xfers must wait for an available opening.
400 * Exception: URGENT xfers can proceed when
401 * active == openings, because we use the opening
402 * of the command we're recovering for.
403 * - if the periph has sense pending, only URGENT & REQSENSE
404 * xfers may proceed.
405 *
406 * - If the periph is recovering, only URGENT xfers may
407 * proceed.
408 *
409 * - If the periph is currently executing a recovery
410 * command, URGENT commands must block, because only
411 * one recovery command can execute at a time.
412 */
413 for (;;) {
414 if (flags & XS_CTL_URGENT) {
415 if (periph->periph_active > periph->periph_openings)
416 goto wait_for_opening;
417 if (periph->periph_flags & PERIPH_SENSE) {
418 if ((flags & XS_CTL_REQSENSE) == 0)
419 goto wait_for_opening;
420 } else {
421 if ((periph->periph_flags &
422 PERIPH_RECOVERY_ACTIVE) != 0)
423 goto wait_for_opening;
424 periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
425 }
426 break;
427 }
428 if (periph->periph_active >= periph->periph_openings ||
429 (periph->periph_flags & PERIPH_RECOVERING) != 0)
430 goto wait_for_opening;
431 periph->periph_active++;
432 break;
433
434 wait_for_opening:
435 if (flags & XS_CTL_NOSLEEP) {
436 splx(s);
437 return (NULL);
438 }
439 SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n"));
440 periph->periph_flags |= PERIPH_WAITING;
441 (void) tsleep(periph, PRIBIO, "getxs", 0);
442 }
443 SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n"));
444 xs = pool_get(&scsipi_xfer_pool,
445 ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
446 if (xs == NULL) {
447 if (flags & XS_CTL_URGENT) {
448 if ((flags & XS_CTL_REQSENSE) == 0)
449 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
450 } else
451 periph->periph_active--;
452 scsipi_printaddr(periph);
453 printf("unable to allocate %sscsipi_xfer\n",
454 (flags & XS_CTL_URGENT) ? "URGENT " : "");
455 }
456 splx(s);
457
458 SC_DEBUG(periph, SCSIPI_DB3, ("returning\n"));
459
460 if (xs != NULL) {
461 callout_init(&xs->xs_callout);
462 memset(xs, 0, sizeof(*xs));
463 xs->xs_periph = periph;
464 xs->xs_control = flags;
465 xs->xs_status = 0;
466 s = splbio();
467 TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
468 splx(s);
469 }
470 return (xs);
471 }
472
473 /*
474 * scsipi_put_xs:
475 *
476 * Release an xfer descriptor, decreasing the outstanding command
477 * count for the peripherial. If there is a thread waiting for
478 * an opening, wake it up. If not, kick any queued I/O the
479 * peripherial may have.
480 *
481 * NOTE: Must be called at splbio().
482 */
483 void
484 scsipi_put_xs(xs)
485 struct scsipi_xfer *xs;
486 {
487 struct scsipi_periph *periph = xs->xs_periph;
488 int flags = xs->xs_control;
489
490 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n"));
491
492 TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
493 pool_put(&scsipi_xfer_pool, xs);
494
495 #ifdef DIAGNOSTIC
496 if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
497 periph->periph_active == 0) {
498 scsipi_printaddr(periph);
499 printf("recovery without a command to recovery for\n");
500 panic("scsipi_put_xs");
501 }
502 #endif
503
504 if (flags & XS_CTL_URGENT) {
505 if ((flags & XS_CTL_REQSENSE) == 0)
506 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
507 } else
508 periph->periph_active--;
509 if (periph->periph_active == 0 &&
510 (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
511 periph->periph_flags &= ~PERIPH_WAITDRAIN;
512 wakeup(&periph->periph_active);
513 }
514
515 if (periph->periph_flags & PERIPH_WAITING) {
516 periph->periph_flags &= ~PERIPH_WAITING;
517 wakeup(periph);
518 } else {
519 if (periph->periph_switch->psw_start != NULL) {
520 SC_DEBUG(periph, SCSIPI_DB2,
521 ("calling private start()\n"));
522 (*periph->periph_switch->psw_start)(periph);
523 }
524 }
525 }
526
527 /*
528 * scsipi_channel_freeze:
529 *
530 * Freeze a channel's xfer queue.
531 */
532 void
533 scsipi_channel_freeze(chan, count)
534 struct scsipi_channel *chan;
535 int count;
536 {
537 int s;
538
539 s = splbio();
540 chan->chan_qfreeze += count;
541 splx(s);
542 }
543
544 /*
545 * scsipi_channel_thaw:
546 *
547 * Thaw a channel's xfer queue.
548 */
549 void
550 scsipi_channel_thaw(chan, count)
551 struct scsipi_channel *chan;
552 int count;
553 {
554 int s;
555
556 s = splbio();
557 chan->chan_qfreeze -= count;
558 /*
559 * Don't let the freeze count go negative.
560 *
561 * Presumably the adapter driver could keep track of this,
562 * but it might just be easier to do this here so as to allow
563 * multiple callers, including those outside the adapter driver.
564 */
565 if (chan->chan_qfreeze < 0) {
566 chan->chan_qfreeze = 0;
567 }
568 splx(s);
569 /*
570 * Kick the channel's queue here. Note, we may be running in
571 * interrupt context (softclock or HBA's interrupt), so the adapter
572 * driver had better not sleep.
573 */
574 if (chan->chan_qfreeze == 0)
575 scsipi_run_queue(chan);
576 }
577
578 /*
579 * scsipi_channel_timed_thaw:
580 *
581 * Thaw a channel after some time has expired. This will also
582 * run the channel's queue if the freeze count has reached 0.
583 */
584 void
585 scsipi_channel_timed_thaw(arg)
586 void *arg;
587 {
588 struct scsipi_channel *chan = arg;
589
590 scsipi_channel_thaw(chan, 1);
591 }
592
593 /*
594 * scsipi_periph_freeze:
595 *
596 * Freeze a device's xfer queue.
597 */
598 void
599 scsipi_periph_freeze(periph, count)
600 struct scsipi_periph *periph;
601 int count;
602 {
603 int s;
604
605 s = splbio();
606 periph->periph_qfreeze += count;
607 splx(s);
608 }
609
610 /*
611 * scsipi_periph_thaw:
612 *
613 * Thaw a device's xfer queue.
614 */
615 void
616 scsipi_periph_thaw(periph, count)
617 struct scsipi_periph *periph;
618 int count;
619 {
620 int s;
621
622 s = splbio();
623 periph->periph_qfreeze -= count;
624 if (periph->periph_qfreeze == 0 &&
625 (periph->periph_flags & PERIPH_WAITING) != 0)
626 wakeup(periph);
627 splx(s);
628 }
629
630 /*
631 * scsipi_periph_timed_thaw:
632 *
633 * Thaw a device after some time has expired.
634 */
635 void
636 scsipi_periph_timed_thaw(arg)
637 void *arg;
638 {
639 struct scsipi_periph *periph = arg;
640
641 callout_stop(&periph->periph_callout);
642 scsipi_periph_thaw(periph, 1);
643
644 /*
645 * Kick the channel's queue here. Note, we're running in
646 * interrupt context (softclock), so the adapter driver
647 * had better not sleep.
648 */
649 scsipi_run_queue(periph->periph_channel);
650 }
651
652 /*
653 * scsipi_wait_drain:
654 *
655 * Wait for a periph's pending xfers to drain.
656 */
657 void
658 scsipi_wait_drain(periph)
659 struct scsipi_periph *periph;
660 {
661 int s;
662
663 s = splbio();
664 while (periph->periph_active != 0) {
665 periph->periph_flags |= PERIPH_WAITDRAIN;
666 (void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0);
667 }
668 splx(s);
669 }
670
671 /*
672 * scsipi_kill_pending:
673 *
674 * Kill off all pending xfers for a periph.
675 *
676 * NOTE: Must be called at splbio().
677 */
678 void
679 scsipi_kill_pending(periph)
680 struct scsipi_periph *periph;
681 {
682
683 (*periph->periph_channel->chan_bustype->bustype_kill_pending)(periph);
684 #ifdef DIAGNOSTIC
685 if (TAILQ_FIRST(&periph->periph_xferq) != NULL)
686 panic("scsipi_kill_pending");
687 #endif
688 scsipi_wait_drain(periph);
689 }
690
691 /*
692 * scsipi_interpret_sense:
693 *
694 * Look at the returned sense and act on the error, determining
695 * the unix error number to pass back. (0 = report no error)
696 *
697 * NOTE: If we return ERESTART, we are expected to haved
698 * thawed the device!
699 *
700 * THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
701 */
702 int
703 scsipi_interpret_sense(xs)
704 struct scsipi_xfer *xs;
705 {
706 struct scsipi_sense_data *sense;
707 struct scsipi_periph *periph = xs->xs_periph;
708 u_int8_t key;
709 u_int32_t info;
710 int error;
711 #ifndef SCSIVERBOSE
712 static char *error_mes[] = {
713 "soft error (corrected)",
714 "not ready", "medium error",
715 "non-media hardware failure", "illegal request",
716 "unit attention", "readonly device",
717 "no data found", "vendor unique",
718 "copy aborted", "command aborted",
719 "search returned equal", "volume overflow",
720 "verify miscompare", "unknown error key"
721 };
722 #endif
723
724 sense = &xs->sense.scsi_sense;
725 #ifdef SCSIPI_DEBUG
726 if (periph->periph_flags & SCSIPI_DB1) {
727 int count;
728 scsipi_printaddr(periph);
729 printf(" sense debug information:\n");
730 printf("\tcode 0x%x valid 0x%x\n",
731 sense->error_code & SSD_ERRCODE,
732 sense->error_code & SSD_ERRCODE_VALID ? 1 : 0);
733 printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
734 sense->segment,
735 sense->flags & SSD_KEY,
736 sense->flags & SSD_ILI ? 1 : 0,
737 sense->flags & SSD_EOM ? 1 : 0,
738 sense->flags & SSD_FILEMARK ? 1 : 0);
739 printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d "
740 "extra bytes\n",
741 sense->info[0],
742 sense->info[1],
743 sense->info[2],
744 sense->info[3],
745 sense->extra_len);
746 printf("\textra: ");
747 for (count = 0; count < ADD_BYTES_LIM(sense); count++)
748 printf("0x%x ", sense->cmd_spec_info[count]);
749 printf("\n");
750 }
751 #endif
752
753 /*
754 * If the periph has it's own error handler, call it first.
755 * If it returns a legit error value, return that, otherwise
756 * it wants us to continue with normal error processing.
757 */
758 if (periph->periph_switch->psw_error != NULL) {
759 SC_DEBUG(periph, SCSIPI_DB2,
760 ("calling private err_handler()\n"));
761 error = (*periph->periph_switch->psw_error)(xs);
762 if (error != EJUSTRETURN)
763 return (error);
764 }
765 /* otherwise use the default */
766 switch (sense->error_code & SSD_ERRCODE) {
767 /*
768 * If it's code 70, use the extended stuff and
769 * interpret the key
770 */
771 case 0x71: /* delayed error */
772 scsipi_printaddr(periph);
773 key = sense->flags & SSD_KEY;
774 printf(" DEFERRED ERROR, key = 0x%x\n", key);
775 /* FALLTHROUGH */
776 case 0x70:
777 if ((sense->error_code & SSD_ERRCODE_VALID) != 0)
778 info = _4btol(sense->info);
779 else
780 info = 0;
781 key = sense->flags & SSD_KEY;
782
783 switch (key) {
784 case SKEY_NO_SENSE:
785 case SKEY_RECOVERED_ERROR:
786 if (xs->resid == xs->datalen && xs->datalen) {
787 /*
788 * Why is this here?
789 */
790 xs->resid = 0; /* not short read */
791 }
792 case SKEY_EQUAL:
793 error = 0;
794 break;
795 case SKEY_NOT_READY:
796 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
797 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
798 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
799 return (0);
800 if (sense->add_sense_code == 0x3A &&
801 sense->add_sense_code_qual == 0x00)
802 error = ENODEV; /* Medium not present */
803 else
804 error = EIO;
805 if ((xs->xs_control & XS_CTL_SILENT) != 0)
806 return (error);
807 break;
808 case SKEY_ILLEGAL_REQUEST:
809 if ((xs->xs_control &
810 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
811 return (0);
812 /*
813 * Handle the case where a device reports
814 * Logical Unit Not Supported during discovery.
815 */
816 if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
817 sense->add_sense_code == 0x25 &&
818 sense->add_sense_code_qual == 0x00)
819 return (EINVAL);
820 if ((xs->xs_control & XS_CTL_SILENT) != 0)
821 return (EIO);
822 error = EINVAL;
823 break;
824 case SKEY_UNIT_ATTENTION:
825 if (sense->add_sense_code == 0x29 &&
826 sense->add_sense_code_qual == 0x00) {
827 /* device or bus reset */
828 return (ERESTART);
829 }
830 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
831 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
832 if ((xs->xs_control &
833 XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
834 /* XXX Should reupload any transient state. */
835 (periph->periph_flags &
836 PERIPH_REMOVABLE) == 0) {
837 return (ERESTART);
838 }
839 if ((xs->xs_control & XS_CTL_SILENT) != 0)
840 return (EIO);
841 error = EIO;
842 break;
843 case SKEY_WRITE_PROTECT:
844 error = EROFS;
845 break;
846 case SKEY_BLANK_CHECK:
847 error = 0;
848 break;
849 case SKEY_ABORTED_COMMAND:
850 error = ERESTART;
851 break;
852 case SKEY_VOLUME_OVERFLOW:
853 error = ENOSPC;
854 break;
855 default:
856 error = EIO;
857 break;
858 }
859
860 #ifdef SCSIVERBOSE
861 if (key && (xs->xs_control & XS_CTL_SILENT) == 0)
862 scsipi_print_sense(xs, 0);
863 #else
864 if (key) {
865 scsipi_printaddr(periph);
866 printf("%s", error_mes[key - 1]);
867 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
868 switch (key) {
869 case SKEY_NOT_READY:
870 case SKEY_ILLEGAL_REQUEST:
871 case SKEY_UNIT_ATTENTION:
872 case SKEY_WRITE_PROTECT:
873 break;
874 case SKEY_BLANK_CHECK:
875 printf(", requested size: %d (decimal)",
876 info);
877 break;
878 case SKEY_ABORTED_COMMAND:
879 if (xs->xs_retries)
880 printf(", retrying");
881 printf(", cmd 0x%x, info 0x%x",
882 xs->cmd->opcode, info);
883 break;
884 default:
885 printf(", info = %d (decimal)", info);
886 }
887 }
888 if (sense->extra_len != 0) {
889 int n;
890 printf(", data =");
891 for (n = 0; n < sense->extra_len; n++)
892 printf(" %02x",
893 sense->cmd_spec_info[n]);
894 }
895 printf("\n");
896 }
897 #endif
898 return (error);
899
900 /*
901 * Not code 70, just report it
902 */
903 default:
904 #if defined(SCSIDEBUG) || defined(DEBUG)
905 {
906 static char *uc = "undecodable sense error";
907 int i;
908 u_int8_t *cptr = (u_int8_t *) sense;
909 scsipi_printaddr(periph);
910 if (xs->cmd == &xs->cmdstore) {
911 printf("%s for opcode 0x%x, data=",
912 uc, xs->cmdstore.opcode);
913 } else {
914 printf("%s, data=", uc);
915 }
916 for (i = 0; i < sizeof (sense); i++)
917 printf(" 0x%02x", *(cptr++) & 0xff);
918 printf("\n");
919 }
920 #else
921
922 scsipi_printaddr(periph);
923 printf("Sense Error Code 0x%x",
924 sense->error_code & SSD_ERRCODE);
925 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
926 struct scsipi_sense_data_unextended *usense =
927 (struct scsipi_sense_data_unextended *)sense;
928 printf(" at block no. %d (decimal)",
929 _3btol(usense->block));
930 }
931 printf("\n");
932 #endif
933 return (EIO);
934 }
935 }
936
937 /*
938 * scsipi_size:
939 *
940 * Find out from the device what its capacity is.
941 */
942 u_long
943 scsipi_size(periph, flags)
944 struct scsipi_periph *periph;
945 int flags;
946 {
947 struct scsipi_read_cap_data rdcap;
948 struct scsipi_read_capacity scsipi_cmd;
949
950 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
951 scsipi_cmd.opcode = READ_CAPACITY;
952
953 /*
954 * If the command works, interpret the result as a 4 byte
955 * number of blocks
956 */
957 if (scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
958 sizeof(scsipi_cmd), (u_char *)&rdcap, sizeof(rdcap),
959 SCSIPIRETRIES, 20000, NULL,
960 flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK) != 0) {
961 scsipi_printaddr(periph);
962 printf("could not get size\n");
963 return (0);
964 }
965
966 return (_4btol(rdcap.addr) + 1);
967 }
968
969 /*
970 * scsipi_test_unit_ready:
971 *
972 * Issue a `test unit ready' request.
973 */
974 int
975 scsipi_test_unit_ready(periph, flags)
976 struct scsipi_periph *periph;
977 int flags;
978 {
979 struct scsipi_test_unit_ready scsipi_cmd;
980
981 /* some ATAPI drives don't support TEST_UNIT_READY. Sigh */
982 if (periph->periph_quirks & PQUIRK_NOTUR)
983 return (0);
984
985 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
986 scsipi_cmd.opcode = TEST_UNIT_READY;
987
988 return (scsipi_command(periph,
989 (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd),
990 0, 0, SCSIPIRETRIES, 10000, NULL, flags));
991 }
992
993 /*
994 * scsipi_inquire:
995 *
996 * Ask the device about itself.
997 */
998 int
999 scsipi_inquire(periph, inqbuf, flags)
1000 struct scsipi_periph *periph;
1001 struct scsipi_inquiry_data *inqbuf;
1002 int flags;
1003 {
1004 struct scsipi_inquiry scsipi_cmd;
1005
1006 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
1007 scsipi_cmd.opcode = INQUIRY;
1008 scsipi_cmd.length = sizeof(struct scsipi_inquiry_data);
1009
1010 return (scsipi_command(periph,
1011 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1012 (u_char *) inqbuf, sizeof(struct scsipi_inquiry_data),
1013 SCSIPIRETRIES, 10000, NULL, XS_CTL_DATA_IN | flags));
1014 }
1015
1016 /*
1017 * scsipi_prevent:
1018 *
1019 * Prevent or allow the user to remove the media
1020 */
1021 int
1022 scsipi_prevent(periph, type, flags)
1023 struct scsipi_periph *periph;
1024 int type, flags;
1025 {
1026 struct scsipi_prevent scsipi_cmd;
1027
1028 if (periph->periph_quirks & PQUIRK_NODOORLOCK)
1029 return (0);
1030
1031 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
1032 scsipi_cmd.opcode = PREVENT_ALLOW;
1033 scsipi_cmd.how = type;
1034
1035 return (scsipi_command(periph,
1036 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1037 0, 0, SCSIPIRETRIES, 5000, NULL, flags));
1038 }
1039
1040 /*
1041 * scsipi_start:
1042 *
1043 * Send a START UNIT.
1044 */
1045 int
1046 scsipi_start(periph, type, flags)
1047 struct scsipi_periph *periph;
1048 int type, flags;
1049 {
1050 struct scsipi_start_stop scsipi_cmd;
1051
1052 if (periph->periph_quirks & PQUIRK_NOSTARTUNIT)
1053 return 0;
1054
1055 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
1056 scsipi_cmd.opcode = START_STOP;
1057 scsipi_cmd.byte2 = 0x00;
1058 scsipi_cmd.how = type;
1059
1060 return (scsipi_command(periph,
1061 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1062 0, 0, SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000,
1063 NULL, flags));
1064 }
1065
1066 /*
1067 * scsipi_mode_sense, scsipi_mode_sense_big:
1068 * get a sense page from a device
1069 */
1070
1071 int
1072 scsipi_mode_sense(periph, byte2, page, data, len, flags, retries, timeout)
1073 struct scsipi_periph *periph;
1074 int byte2, page, len, flags, retries, timeout;
1075 struct scsipi_mode_header *data;
1076 {
1077 struct scsipi_mode_sense scsipi_cmd;
1078 int error;
1079
1080 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
1081 scsipi_cmd.opcode = MODE_SENSE;
1082 scsipi_cmd.byte2 = byte2;
1083 scsipi_cmd.page = page;
1084 if (scsipi_periph_bustype(periph) == SCSIPI_BUSTYPE_ATAPI)
1085 _lto2b(len, scsipi_cmd.u_len.atapi.length);
1086 else
1087 scsipi_cmd.u_len.scsi.length = len & 0xff;
1088 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1089 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1090 flags | XS_CTL_DATA_IN);
1091 SC_DEBUG(periph, SCSIPI_DB2,
1092 ("scsipi_mode_sense: error=%d\n", error));
1093 return (error);
1094 }
1095
1096 int
1097 scsipi_mode_sense_big(periph, byte2, page, data, len, flags, retries, timeout)
1098 struct scsipi_periph *periph;
1099 int byte2, page, len, flags, retries, timeout;
1100 struct scsipi_mode_header_big *data;
1101 {
1102 struct scsipi_mode_sense_big scsipi_cmd;
1103 int error;
1104
1105 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
1106 scsipi_cmd.opcode = MODE_SENSE_BIG;
1107 scsipi_cmd.byte2 = byte2;
1108 scsipi_cmd.page = page;
1109 _lto2b(len, scsipi_cmd.length);
1110 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1111 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1112 flags | XS_CTL_DATA_IN);
1113 SC_DEBUG(periph, SCSIPI_DB2,
1114 ("scsipi_mode_sense_big: error=%d\n", error));
1115 return (error);
1116 }
1117
1118 int
1119 scsipi_mode_select(periph, byte2, data, len, flags, retries, timeout)
1120 struct scsipi_periph *periph;
1121 int byte2, len, flags, retries, timeout;
1122 struct scsipi_mode_header *data;
1123 {
1124 struct scsipi_mode_select scsipi_cmd;
1125 int error;
1126
1127 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
1128 scsipi_cmd.opcode = MODE_SELECT;
1129 scsipi_cmd.byte2 = byte2;
1130 if (scsipi_periph_bustype(periph) == SCSIPI_BUSTYPE_ATAPI)
1131 _lto2b(len, scsipi_cmd.u_len.atapi.length);
1132 else
1133 scsipi_cmd.u_len.scsi.length = len & 0xff;
1134 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1135 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1136 flags | XS_CTL_DATA_OUT);
1137 SC_DEBUG(periph, SCSIPI_DB2,
1138 ("scsipi_mode_select: error=%d\n", error));
1139 return (error);
1140 }
1141
1142 int
1143 scsipi_mode_select_big(periph, byte2, data, len, flags, retries, timeout)
1144 struct scsipi_periph *periph;
1145 int byte2, len, flags, retries, timeout;
1146 struct scsipi_mode_header_big *data;
1147 {
1148 struct scsipi_mode_select_big scsipi_cmd;
1149 int error;
1150
1151 bzero(&scsipi_cmd, sizeof(scsipi_cmd));
1152 scsipi_cmd.opcode = MODE_SELECT_BIG;
1153 scsipi_cmd.byte2 = byte2;
1154 _lto2b(len, scsipi_cmd.length);
1155 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1156 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1157 flags | XS_CTL_DATA_OUT);
1158 SC_DEBUG(periph, SCSIPI_DB2,
1159 ("scsipi_mode_select: error=%d\n", error));
1160 return (error);
1161 }
1162
1163 /*
1164 * scsipi_done:
1165 *
1166 * This routine is called by an adapter's interrupt handler when
1167 * an xfer is completed.
1168 */
1169 void
1170 scsipi_done(xs)
1171 struct scsipi_xfer *xs;
1172 {
1173 struct scsipi_periph *periph = xs->xs_periph;
1174 struct scsipi_channel *chan = periph->periph_channel;
1175 int s, freezecnt;
1176
1177 SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n"));
1178 #ifdef SCSIPI_DEBUG
1179 if (periph->periph_dbflags & SCSIPI_DB1)
1180 show_scsipi_cmd(xs);
1181 #endif
1182
1183 s = splbio();
1184 /*
1185 * The resource this command was using is now free.
1186 */
1187 scsipi_put_resource(chan);
1188 xs->xs_periph->periph_sent--;
1189
1190 /*
1191 * If the command was tagged, free the tag.
1192 */
1193 if (XS_CTL_TAGTYPE(xs) != 0)
1194 scsipi_put_tag(xs);
1195 else
1196 periph->periph_flags &= ~PERIPH_UNTAG;
1197
1198 /* Mark the command as `done'. */
1199 xs->xs_status |= XS_STS_DONE;
1200
1201 #ifdef DIAGNOSTIC
1202 if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
1203 (XS_CTL_ASYNC|XS_CTL_POLL))
1204 panic("scsipi_done: ASYNC and POLL");
1205 #endif
1206
1207 /*
1208 * If the xfer had an error of any sort, freeze the
1209 * periph's queue. Freeze it again if we were requested
1210 * to do so in the xfer.
1211 */
1212 freezecnt = 0;
1213 if (xs->error != XS_NOERROR)
1214 freezecnt++;
1215 if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
1216 freezecnt++;
1217 if (freezecnt != 0)
1218 scsipi_periph_freeze(periph, freezecnt);
1219
1220 /*
1221 * record the xfer with a pending sense, in case a SCSI reset is
1222 * received before the thread is waked up.
1223 */
1224 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1225 periph->periph_flags |= PERIPH_SENSE;
1226 periph->periph_xscheck = xs;
1227 }
1228
1229 /*
1230 * If this was an xfer that was not to complete asynchrnously,
1231 * let the requesting thread perform error checking/handling
1232 * in its context.
1233 */
1234 if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
1235 splx(s);
1236 /*
1237 * If it's a polling job, just return, to unwind the
1238 * call graph. We don't need to restart the queue,
1239 * because pollings jobs are treated specially, and
1240 * are really only used during crash dumps anyway
1241 * (XXX or during boot-time autconfiguration of
1242 * ATAPI devices).
1243 */
1244 if (xs->xs_control & XS_CTL_POLL)
1245 return;
1246 wakeup(xs);
1247 goto out;
1248 }
1249
1250 /*
1251 * Catch the extremely common case of I/O completing
1252 * without error; no use in taking a context switch
1253 * if we can handle it in interrupt context.
1254 */
1255 if (xs->error == XS_NOERROR) {
1256 splx(s);
1257 (void) scsipi_complete(xs);
1258 goto out;
1259 }
1260
1261 /*
1262 * There is an error on this xfer. Put it on the channel's
1263 * completion queue, and wake up the completion thread.
1264 */
1265 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
1266 splx(s);
1267 wakeup(&chan->chan_complete);
1268
1269 out:
1270 /*
1271 * If there are more xfers on the channel's queue, attempt to
1272 * run them.
1273 */
1274 scsipi_run_queue(chan);
1275 }
1276
1277 /*
1278 * scsipi_complete:
1279 *
1280 * Completion of a scsipi_xfer. This is the guts of scsipi_done().
1281 *
1282 * NOTE: This routine MUST be called with valid thread context
1283 * except for the case where the following two conditions are
1284 * true:
1285 *
1286 * xs->error == XS_NOERROR
1287 * XS_CTL_ASYNC is set in xs->xs_control
1288 *
1289 * The semantics of this routine can be tricky, so here is an
1290 * explanation:
1291 *
1292 * 0 Xfer completed successfully.
1293 *
1294 * ERESTART Xfer had an error, but was restarted.
1295 *
1296 * anything else Xfer had an error, return value is Unix
1297 * errno.
1298 *
1299 * If the return value is anything but ERESTART:
1300 *
1301 * - If XS_CTL_ASYNC is set, `xs' has been freed back to
1302 * the pool.
1303 * - If there is a buf associated with the xfer,
1304 * it has been biodone()'d.
1305 */
1306 int
1307 scsipi_complete(xs)
1308 struct scsipi_xfer *xs;
1309 {
1310 struct scsipi_periph *periph = xs->xs_periph;
1311 struct scsipi_channel *chan = periph->periph_channel;
1312 struct buf *bp;
1313 int error, s;
1314
1315 #ifdef DIAGNOSTIC
1316 if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
1317 panic("scsipi_complete: XS_CTL_ASYNC but no buf");
1318 #endif
1319 /*
1320 * If command terminated with a CHECK CONDITION, we need to issue a
1321 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed
1322 * we'll have the real status.
1323 * Must be processed at splbio() to avoid missing a SCSI bus reset
1324 * for this command.
1325 */
1326 s = splbio();
1327 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1328 /* request sense for a request sense ? */
1329 if (xs->xs_control & XS_CTL_REQSENSE) {
1330 scsipi_printaddr(periph);
1331 /* XXX maybe we should reset the device ? */
1332 /* we've been frozen because xs->error != XS_NOERROR */
1333 scsipi_periph_thaw(periph, 1);
1334 splx(s);
1335 return EINVAL;
1336 }
1337 scsipi_request_sense(xs);
1338 }
1339 splx(s);
1340 /*
1341 * If it's a user level request, bypass all usual completion
1342 * processing, let the user work it out..
1343 */
1344 if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
1345 SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n"));
1346 if (xs->error != XS_NOERROR)
1347 scsipi_periph_thaw(periph, 1);
1348 scsipi_user_done(xs);
1349 SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n "));
1350 return 0;
1351 }
1352
1353
1354 switch (xs->error) {
1355 case XS_NOERROR:
1356 error = 0;
1357 break;
1358
1359 case XS_SENSE:
1360 case XS_SHORTSENSE:
1361 error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
1362 break;
1363
1364 case XS_RESOURCE_SHORTAGE:
1365 /*
1366 * XXX Should freeze channel's queue.
1367 */
1368 scsipi_printaddr(periph);
1369 printf("adapter resource shortage\n");
1370 /* FALLTHROUGH */
1371
1372 case XS_BUSY:
1373 if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
1374 struct scsipi_max_openings mo;
1375
1376 /*
1377 * We set the openings to active - 1, assuming that
1378 * the command that got us here is the first one that
1379 * can't fit into the device's queue. If that's not
1380 * the case, I guess we'll find out soon enough.
1381 */
1382 mo.mo_target = periph->periph_target;
1383 mo.mo_lun = periph->periph_lun;
1384 if (periph->periph_active < periph->periph_openings)
1385 mo.mo_openings = periph->periph_active - 1;
1386 else
1387 mo.mo_openings = periph->periph_openings - 1;
1388 #ifdef DIAGNOSTIC
1389 if (mo.mo_openings < 0) {
1390 scsipi_printaddr(periph);
1391 printf("QUEUE FULL resulted in < 0 openings\n");
1392 panic("scsipi_done");
1393 }
1394 #endif
1395 if (mo.mo_openings == 0) {
1396 scsipi_printaddr(periph);
1397 printf("QUEUE FULL resulted in 0 openings\n");
1398 mo.mo_openings = 1;
1399 }
1400 scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
1401 error = ERESTART;
1402 } else if (xs->xs_retries != 0) {
1403 xs->xs_retries--;
1404 /*
1405 * Wait one second, and try again.
1406 */
1407 if (xs->xs_control & XS_CTL_POLL)
1408 delay(1000000);
1409 else {
1410 scsipi_periph_freeze(periph, 1);
1411 callout_reset(&periph->periph_callout,
1412 hz, scsipi_periph_timed_thaw, periph);
1413 }
1414 error = ERESTART;
1415 } else
1416 error = EBUSY;
1417 break;
1418
1419 case XS_REQUEUE:
1420 error = ERESTART;
1421 break;
1422
1423 case XS_TIMEOUT:
1424 if (xs->xs_retries != 0) {
1425 xs->xs_retries--;
1426 error = ERESTART;
1427 } else
1428 error = EIO;
1429 break;
1430
1431 case XS_SELTIMEOUT:
1432 /* XXX Disable device? */
1433 error = EIO;
1434 break;
1435
1436 case XS_RESET:
1437 if (xs->xs_control & XS_CTL_REQSENSE) {
1438 /*
1439 * request sense interrupted by reset: signal it
1440 * with EINTR return code.
1441 */
1442 error = EINTR;
1443 } else {
1444 if (xs->xs_retries != 0) {
1445 xs->xs_retries--;
1446 error = ERESTART;
1447 } else
1448 error = EIO;
1449 }
1450 break;
1451
1452 default:
1453 scsipi_printaddr(periph);
1454 printf("invalid return code from adapter: %d\n", xs->error);
1455 error = EIO;
1456 break;
1457 }
1458
1459 s = splbio();
1460 if (error == ERESTART) {
1461 /*
1462 * If we get here, the periph has been thawed and frozen
1463 * again if we had to issue recovery commands. Alternatively,
1464 * it may have been frozen again and in a timed thaw. In
1465 * any case, we thaw the periph once we re-enqueue the
1466 * command. Once the periph is fully thawed, it will begin
1467 * operation again.
1468 */
1469 xs->error = XS_NOERROR;
1470 xs->status = SCSI_OK;
1471 xs->xs_status &= ~XS_STS_DONE;
1472 xs->xs_requeuecnt++;
1473 error = scsipi_enqueue(xs);
1474 if (error == 0) {
1475 scsipi_periph_thaw(periph, 1);
1476 splx(s);
1477 return (ERESTART);
1478 }
1479 }
1480
1481 /*
1482 * scsipi_done() freezes the queue if not XS_NOERROR.
1483 * Thaw it here.
1484 */
1485 if (xs->error != XS_NOERROR)
1486 scsipi_periph_thaw(periph, 1);
1487
1488
1489 if (periph->periph_switch->psw_done)
1490 periph->periph_switch->psw_done(xs);
1491 if ((bp = xs->bp) != NULL) {
1492 if (error) {
1493 bp->b_error = error;
1494 bp->b_flags |= B_ERROR;
1495 bp->b_resid = bp->b_bcount;
1496 } else {
1497 bp->b_error = 0;
1498 bp->b_resid = xs->resid;
1499 }
1500 biodone(bp);
1501 }
1502
1503 if (xs->xs_control & XS_CTL_ASYNC)
1504 scsipi_put_xs(xs);
1505 splx(s);
1506
1507 return (error);
1508 }
1509
1510 /*
1511 * Issue a request sense for the given scsipi_xfer. Called when the xfer
1512 * returns with a CHECK_CONDITION status. Must be called in valid thread
1513 * context and at splbio().
1514 */
1515
1516 void
1517 scsipi_request_sense(xs)
1518 struct scsipi_xfer *xs;
1519 {
1520 struct scsipi_periph *periph = xs->xs_periph;
1521 int flags, error;
1522 struct scsipi_sense cmd;
1523
1524 periph->periph_flags |= PERIPH_SENSE;
1525
1526 /* if command was polling, request sense will too */
1527 flags = xs->xs_control & XS_CTL_POLL;
1528 /* Polling commands can't sleep */
1529 if (flags)
1530 flags |= XS_CTL_NOSLEEP;
1531
1532 flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN |
1533 XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH;
1534
1535 bzero(&cmd, sizeof(cmd));
1536 cmd.opcode = REQUEST_SENSE;
1537 cmd.length = sizeof(struct scsipi_sense_data);
1538
1539 error = scsipi_command(periph,
1540 (struct scsipi_generic *) &cmd, sizeof(cmd),
1541 (u_char*)&xs->sense.scsi_sense, sizeof(struct scsipi_sense_data),
1542 0, 1000, NULL, flags);
1543 periph->periph_flags &= ~PERIPH_SENSE;
1544 periph->periph_xscheck = NULL;
1545 switch(error) {
1546 case 0:
1547 /* we have a valid sense */
1548 xs->error = XS_SENSE;
1549 return;
1550 case EINTR:
1551 /* REQUEST_SENSE interrupted by bus reset. */
1552 xs->error = XS_RESET;
1553 return;
1554 case EIO:
1555 /* request sense coudn't be performed */
1556 /*
1557 * XXX this isn't quite rigth but we don't have anything
1558 * better for now
1559 */
1560 xs->error = XS_DRIVER_STUFFUP;
1561 return;
1562 default:
1563 /* Notify that request sense failed. */
1564 xs->error = XS_DRIVER_STUFFUP;
1565 scsipi_printaddr(periph);
1566 printf("request sense failed with error %d\n", error);
1567 return;
1568 }
1569 }
1570
1571 /*
1572 * scsipi_enqueue:
1573 *
1574 * Enqueue an xfer on a channel.
1575 */
1576 int
1577 scsipi_enqueue(xs)
1578 struct scsipi_xfer *xs;
1579 {
1580 struct scsipi_channel *chan = xs->xs_periph->periph_channel;
1581 struct scsipi_xfer *qxs;
1582 int s;
1583
1584 s = splbio();
1585
1586 /*
1587 * If the xfer is to be polled, and there are already jobs on
1588 * the queue, we can't proceed.
1589 */
1590 if ((xs->xs_control & XS_CTL_POLL) != 0 &&
1591 TAILQ_FIRST(&chan->chan_queue) != NULL) {
1592 splx(s);
1593 xs->error = XS_DRIVER_STUFFUP;
1594 return (EAGAIN);
1595 }
1596
1597 /*
1598 * If we have an URGENT xfer, it's an error recovery command
1599 * and it should just go on the head of the channel's queue.
1600 */
1601 if (xs->xs_control & XS_CTL_URGENT) {
1602 TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
1603 goto out;
1604 }
1605
1606 /*
1607 * If this xfer has already been on the queue before, we
1608 * need to reinsert it in the correct order. That order is:
1609 *
1610 * Immediately before the first xfer for this periph
1611 * with a requeuecnt less than xs->xs_requeuecnt.
1612 *
1613 * Failing that, at the end of the queue. (We'll end up
1614 * there naturally.)
1615 */
1616 if (xs->xs_requeuecnt != 0) {
1617 for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
1618 qxs = TAILQ_NEXT(qxs, channel_q)) {
1619 if (qxs->xs_periph == xs->xs_periph &&
1620 qxs->xs_requeuecnt < xs->xs_requeuecnt)
1621 break;
1622 }
1623 if (qxs != NULL) {
1624 TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
1625 channel_q);
1626 goto out;
1627 }
1628 }
1629 TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
1630 out:
1631 if (xs->xs_control & XS_CTL_THAW_PERIPH)
1632 scsipi_periph_thaw(xs->xs_periph, 1);
1633 splx(s);
1634 return (0);
1635 }
1636
1637 /*
1638 * scsipi_run_queue:
1639 *
1640 * Start as many xfers as possible running on the channel.
1641 */
1642 void
1643 scsipi_run_queue(chan)
1644 struct scsipi_channel *chan;
1645 {
1646 struct scsipi_xfer *xs;
1647 struct scsipi_periph *periph;
1648 int s;
1649
1650 for (;;) {
1651 s = splbio();
1652
1653 /*
1654 * If the channel is frozen, we can't do any work right
1655 * now.
1656 */
1657 if (chan->chan_qfreeze != 0) {
1658 splx(s);
1659 return;
1660 }
1661
1662 /*
1663 * Look for work to do, and make sure we can do it.
1664 */
1665 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
1666 xs = TAILQ_NEXT(xs, channel_q)) {
1667 periph = xs->xs_periph;
1668
1669 if ((periph->periph_sent >= periph->periph_openings) ||
1670 periph->periph_qfreeze != 0 ||
1671 (periph->periph_flags & PERIPH_UNTAG) != 0)
1672 continue;
1673
1674 if ((periph->periph_flags &
1675 (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 &&
1676 (xs->xs_control & XS_CTL_URGENT) == 0)
1677 continue;
1678
1679 /*
1680 * We can issue this xfer!
1681 */
1682 goto got_one;
1683 }
1684
1685 /*
1686 * Can't find any work to do right now.
1687 */
1688 splx(s);
1689 return;
1690
1691 got_one:
1692 /*
1693 * Have an xfer to run. Allocate a resource from
1694 * the adapter to run it. If we can't allocate that
1695 * resource, we don't dequeue the xfer.
1696 */
1697 if (scsipi_get_resource(chan) == 0) {
1698 /*
1699 * Adapter is out of resources. If the adapter
1700 * supports it, attempt to grow them.
1701 */
1702 if (scsipi_grow_resources(chan) == 0) {
1703 /*
1704 * Wasn't able to grow resources,
1705 * nothing more we can do.
1706 */
1707 if (xs->xs_control & XS_CTL_POLL) {
1708 scsipi_printaddr(xs->xs_periph);
1709 printf("polling command but no "
1710 "adapter resources");
1711 /* We'll panic shortly... */
1712 }
1713 splx(s);
1714
1715 /*
1716 * XXX: We should be able to note that
1717 * XXX: that resources are needed here!
1718 */
1719 return;
1720 }
1721 /*
1722 * scsipi_grow_resources() allocated the resource
1723 * for us.
1724 */
1725 }
1726
1727 /*
1728 * We have a resource to run this xfer, do it!
1729 */
1730 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
1731
1732 /*
1733 * If the command is to be tagged, allocate a tag ID
1734 * for it.
1735 */
1736 if (XS_CTL_TAGTYPE(xs) != 0)
1737 scsipi_get_tag(xs);
1738 else
1739 periph->periph_flags |= PERIPH_UNTAG;
1740 periph->periph_sent++;
1741 splx(s);
1742
1743 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
1744 }
1745 #ifdef DIAGNOSTIC
1746 panic("scsipi_run_queue: impossible");
1747 #endif
1748 }
1749
1750 /*
1751 * scsipi_execute_xs:
1752 *
1753 * Begin execution of an xfer, waiting for it to complete, if necessary.
1754 */
1755 int
1756 scsipi_execute_xs(xs)
1757 struct scsipi_xfer *xs;
1758 {
1759 struct scsipi_periph *periph = xs->xs_periph;
1760 struct scsipi_channel *chan = periph->periph_channel;
1761 int async, poll, retries, error, s;
1762
1763 xs->xs_status &= ~XS_STS_DONE;
1764 xs->error = XS_NOERROR;
1765 xs->resid = xs->datalen;
1766 xs->status = SCSI_OK;
1767
1768 #ifdef SCSIPI_DEBUG
1769 if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) {
1770 printf("scsipi_execute_xs: ");
1771 show_scsipi_xs(xs);
1772 printf("\n");
1773 }
1774 #endif
1775
1776 /*
1777 * Deal with command tagging:
1778 *
1779 * - If the device's current operating mode doesn't
1780 * include tagged queueing, clear the tag mask.
1781 *
1782 * - If the device's current operating mode *does*
1783 * include tagged queueing, set the tag_type in
1784 * the xfer to the appropriate byte for the tag
1785 * message.
1786 */
1787 if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 ||
1788 (xs->xs_control & XS_CTL_REQSENSE)) {
1789 xs->xs_control &= ~XS_CTL_TAGMASK;
1790 xs->xs_tag_type = 0;
1791 } else {
1792 /*
1793 * If the request doesn't specify a tag, give Head
1794 * tags to URGENT operations and Ordered tags to
1795 * everything else.
1796 */
1797 if (XS_CTL_TAGTYPE(xs) == 0) {
1798 if (xs->xs_control & XS_CTL_URGENT)
1799 xs->xs_control |= XS_CTL_HEAD_TAG;
1800 else
1801 xs->xs_control |= XS_CTL_ORDERED_TAG;
1802 }
1803
1804 switch (XS_CTL_TAGTYPE(xs)) {
1805 case XS_CTL_ORDERED_TAG:
1806 xs->xs_tag_type = MSG_ORDERED_Q_TAG;
1807 break;
1808
1809 case XS_CTL_SIMPLE_TAG:
1810 xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
1811 break;
1812
1813 case XS_CTL_HEAD_TAG:
1814 xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
1815 break;
1816
1817 default:
1818 scsipi_printaddr(periph);
1819 printf("invalid tag mask 0x%08x\n",
1820 XS_CTL_TAGTYPE(xs));
1821 panic("scsipi_execute_xs");
1822 }
1823 }
1824
1825 /*
1826 * If we don't yet have a completion thread, or we are to poll for
1827 * completion, clear the ASYNC flag.
1828 */
1829 if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
1830 xs->xs_control &= ~XS_CTL_ASYNC;
1831
1832 async = (xs->xs_control & XS_CTL_ASYNC);
1833 poll = (xs->xs_control & XS_CTL_POLL);
1834 retries = xs->xs_retries; /* for polling commands */
1835
1836 #ifdef DIAGNOSTIC
1837 if (async != 0 && xs->bp == NULL)
1838 panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
1839 #endif
1840
1841 /*
1842 * Enqueue the transfer. If we're not polling for completion, this
1843 * should ALWAYS return `no error'.
1844 */
1845 try_again:
1846 error = scsipi_enqueue(xs);
1847 if (error) {
1848 if (poll == 0) {
1849 scsipi_printaddr(periph);
1850 printf("not polling, but enqueue failed with %d\n",
1851 error);
1852 panic("scsipi_execute_xs");
1853 }
1854
1855 scsipi_printaddr(periph);
1856 printf("failed to enqueue polling command");
1857 if (retries != 0) {
1858 printf(", retrying...\n");
1859 delay(1000000);
1860 retries--;
1861 goto try_again;
1862 }
1863 printf("\n");
1864 goto free_xs;
1865 }
1866
1867 restarted:
1868 scsipi_run_queue(chan);
1869
1870 /*
1871 * The xfer is enqueued, and possibly running. If it's to be
1872 * completed asynchronously, just return now.
1873 */
1874 if (async)
1875 return (EJUSTRETURN);
1876
1877 /*
1878 * Not an asynchronous command; wait for it to complete.
1879 */
1880 s = splbio();
1881 while ((xs->xs_status & XS_STS_DONE) == 0) {
1882 if (poll) {
1883 scsipi_printaddr(periph);
1884 printf("polling command not done\n");
1885 panic("scsipi_execute_xs");
1886 }
1887 (void) tsleep(xs, PRIBIO, "xscmd", 0);
1888 }
1889 splx(s);
1890
1891 /*
1892 * Command is complete. scsipi_done() has awakened us to perform
1893 * the error handling.
1894 */
1895 error = scsipi_complete(xs);
1896 if (error == ERESTART)
1897 goto restarted;
1898
1899 /*
1900 * Command completed successfully or fatal error occurred. Fall
1901 * into....
1902 */
1903 free_xs:
1904 s = splbio();
1905 scsipi_put_xs(xs);
1906 splx(s);
1907
1908 /*
1909 * Kick the queue, keep it running in case it stopped for some
1910 * reason.
1911 */
1912 scsipi_run_queue(chan);
1913
1914 return (error);
1915 }
1916
1917 /*
1918 * scsipi_completion_thread:
1919 *
1920 * This is the completion thread. We wait for errors on
1921 * asynchronous xfers, and perform the error handling
1922 * function, restarting the command, if necessary.
1923 */
1924 void
1925 scsipi_completion_thread(arg)
1926 void *arg;
1927 {
1928 struct scsipi_channel *chan = arg;
1929 struct scsipi_xfer *xs;
1930 int s;
1931
1932 for (;;) {
1933 s = splbio();
1934 xs = TAILQ_FIRST(&chan->chan_complete);
1935 if (xs == NULL &&
1936 (chan->chan_flags & SCSIPI_CHAN_SHUTDOWN) == 0) {
1937 (void) tsleep(&chan->chan_complete, PRIBIO,
1938 "sccomp", 0);
1939 splx(s);
1940 continue;
1941 }
1942 if (chan->chan_flags & SCSIPI_CHAN_SHUTDOWN) {
1943 splx(s);
1944 break;
1945 }
1946 TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
1947 splx(s);
1948
1949 /*
1950 * Have an xfer with an error; process it.
1951 */
1952 (void) scsipi_complete(xs);
1953
1954 /*
1955 * Kick the queue; keep it running if it was stopped
1956 * for some reason.
1957 */
1958 scsipi_run_queue(chan);
1959 }
1960
1961 chan->chan_thread = NULL;
1962
1963 /* In case parent is waiting for us to exit. */
1964 wakeup(&chan->chan_thread);
1965
1966 kthread_exit(0);
1967 }
1968
1969 /*
1970 * scsipi_create_completion_thread:
1971 *
1972 * Callback to actually create the completion thread.
1973 */
1974 void
1975 scsipi_create_completion_thread(arg)
1976 void *arg;
1977 {
1978 struct scsipi_channel *chan = arg;
1979 struct scsipi_adapter *adapt = chan->chan_adapter;
1980
1981 if (kthread_create1(scsipi_completion_thread, chan,
1982 &chan->chan_thread, "%s:%d", adapt->adapt_dev->dv_xname,
1983 chan->chan_channel)) {
1984 printf("%s: unable to create completion thread for "
1985 "channel %d\n", adapt->adapt_dev->dv_xname,
1986 chan->chan_channel);
1987 panic("scsipi_create_completion_thread");
1988 }
1989 }
1990
1991 /*
1992 * scsipi_async_event:
1993 *
1994 * Handle an asynchronous event from an adapter.
1995 */
1996 void
1997 scsipi_async_event(chan, event, arg)
1998 struct scsipi_channel *chan;
1999 scsipi_async_event_t event;
2000 void *arg;
2001 {
2002 int s;
2003
2004 s = splbio();
2005 switch (event) {
2006 case ASYNC_EVENT_MAX_OPENINGS:
2007 scsipi_async_event_max_openings(chan,
2008 (struct scsipi_max_openings *)arg);
2009 break;
2010
2011 case ASYNC_EVENT_XFER_MODE:
2012 scsipi_async_event_xfer_mode(chan,
2013 (struct scsipi_xfer_mode *)arg);
2014 break;
2015 case ASYNC_EVENT_RESET:
2016 scsipi_async_event_channel_reset(chan);
2017 break;
2018 }
2019 splx(s);
2020 }
2021
2022 /*
2023 * scsipi_print_xfer_mode:
2024 *
2025 * Print a periph's capabilities.
2026 */
2027 void
2028 scsipi_print_xfer_mode(periph)
2029 struct scsipi_periph *periph;
2030 {
2031 int period, freq, speed, mbs;
2032
2033 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0)
2034 return;
2035
2036 printf("%s: ", periph->periph_dev->dv_xname);
2037 if (periph->periph_mode & PERIPH_CAP_SYNC) {
2038 period = scsipi_sync_factor_to_period(periph->periph_period);
2039 printf("sync (%d.%dns offset %d)",
2040 period / 10, period % 10, periph->periph_offset);
2041 } else
2042 printf("async");
2043
2044 if (periph->periph_mode & PERIPH_CAP_WIDE32)
2045 printf(", 32-bit");
2046 else if (periph->periph_mode & PERIPH_CAP_WIDE16)
2047 printf(", 16-bit");
2048 else
2049 printf(", 8-bit");
2050
2051 if (periph->periph_mode & PERIPH_CAP_SYNC) {
2052 freq = scsipi_sync_factor_to_freq(periph->periph_period);
2053 speed = freq;
2054 if (periph->periph_mode & PERIPH_CAP_WIDE32)
2055 speed *= 4;
2056 else if (periph->periph_mode & PERIPH_CAP_WIDE16)
2057 speed *= 2;
2058 mbs = speed / 1000;
2059 if (mbs > 0)
2060 printf(" (%d.%03dMB/s)", mbs, speed % 1000);
2061 else
2062 printf(" (%dKB/s)", speed % 1000);
2063 }
2064
2065 printf(" transfers");
2066
2067 if (periph->periph_mode & PERIPH_CAP_TQING)
2068 printf(", tagged queueing");
2069
2070 printf("\n");
2071 }
2072
2073 /*
2074 * scsipi_async_event_max_openings:
2075 *
2076 * Update the maximum number of outstanding commands a
2077 * device may have.
2078 */
2079 void
2080 scsipi_async_event_max_openings(chan, mo)
2081 struct scsipi_channel *chan;
2082 struct scsipi_max_openings *mo;
2083 {
2084 struct scsipi_periph *periph;
2085 int minlun, maxlun;
2086
2087 if (mo->mo_lun == -1) {
2088 /*
2089 * Wildcarded; apply it to all LUNs.
2090 */
2091 minlun = 0;
2092 maxlun = chan->chan_nluns - 1;
2093 } else
2094 minlun = maxlun = mo->mo_lun;
2095
2096 for (; minlun <= maxlun; minlun++) {
2097 periph = scsipi_lookup_periph(chan, mo->mo_target, minlun);
2098 if (periph == NULL)
2099 continue;
2100
2101 if (mo->mo_openings < periph->periph_openings)
2102 periph->periph_openings = mo->mo_openings;
2103 else if (mo->mo_openings > periph->periph_openings &&
2104 (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
2105 periph->periph_openings = mo->mo_openings;
2106 }
2107 }
2108
2109 /*
2110 * scsipi_async_event_xfer_mode:
2111 *
2112 * Update the xfer mode for all periphs sharing the
2113 * specified I_T Nexus.
2114 */
2115 void
2116 scsipi_async_event_xfer_mode(chan, xm)
2117 struct scsipi_channel *chan;
2118 struct scsipi_xfer_mode *xm;
2119 {
2120 struct scsipi_periph *periph;
2121 int lun, announce, mode, period, offset;
2122
2123 for (lun = 0; lun < chan->chan_nluns; lun++) {
2124 periph = scsipi_lookup_periph(chan, xm->xm_target, lun);
2125 if (periph == NULL)
2126 continue;
2127 announce = 0;
2128
2129 /*
2130 * Clamp the xfer mode down to this periph's capabilities.
2131 */
2132 mode = xm->xm_mode & periph->periph_cap;
2133 if (mode & PERIPH_CAP_SYNC) {
2134 period = xm->xm_period;
2135 offset = xm->xm_offset;
2136 } else {
2137 period = 0;
2138 offset = 0;
2139 }
2140
2141 /*
2142 * If we do not have a valid xfer mode yet, or the parameters
2143 * are different, announce them.
2144 */
2145 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0 ||
2146 periph->periph_mode != mode ||
2147 periph->periph_period != period ||
2148 periph->periph_offset != offset)
2149 announce = 1;
2150
2151 periph->periph_mode = mode;
2152 periph->periph_period = period;
2153 periph->periph_offset = offset;
2154 periph->periph_flags |= PERIPH_MODE_VALID;
2155
2156 if (announce)
2157 scsipi_print_xfer_mode(periph);
2158 }
2159 }
2160
2161 /*
2162 * scsipi_set_xfer_mode:
2163 *
2164 * Set the xfer mode for the specified I_T Nexus.
2165 */
2166 void
2167 scsipi_set_xfer_mode(chan, target, immed)
2168 struct scsipi_channel *chan;
2169 int target, immed;
2170 {
2171 struct scsipi_xfer_mode xm;
2172 struct scsipi_periph *itperiph;
2173 int lun, s;
2174
2175 /*
2176 * Go to the minimal xfer mode.
2177 */
2178 xm.xm_target = target;
2179 xm.xm_mode = 0;
2180 xm.xm_period = 0; /* ignored */
2181 xm.xm_offset = 0; /* ignored */
2182
2183 /*
2184 * Find the first LUN we know about on this I_T Nexus.
2185 */
2186 for (lun = 0; lun < chan->chan_nluns; lun++) {
2187 itperiph = scsipi_lookup_periph(chan, target, lun);
2188 if (itperiph != NULL)
2189 break;
2190 }
2191 if (itperiph != NULL)
2192 xm.xm_mode = itperiph->periph_cap;
2193
2194 /*
2195 * Now issue the request to the adapter.
2196 */
2197 s = splbio();
2198 scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm);
2199 splx(s);
2200
2201 /*
2202 * If we want this to happen immediately, issue a dummy command,
2203 * since most adapters can't really negotiate unless they're
2204 * executing a job.
2205 */
2206 if (immed != 0 && itperiph != NULL) {
2207 (void) scsipi_test_unit_ready(itperiph,
2208 XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
2209 XS_CTL_IGNORE_NOT_READY |
2210 XS_CTL_IGNORE_MEDIA_CHANGE);
2211 }
2212 }
2213
2214 /*
2215 * scsipi_channel_reset:
2216 *
2217 * handle scsi bus reset
2218 * called at splbio
2219 */
2220 void
2221 scsipi_async_event_channel_reset(chan)
2222 struct scsipi_channel *chan;
2223 {
2224 struct scsipi_xfer *xs, *xs_next;
2225 struct scsipi_periph *periph;
2226 int target, lun;
2227
2228 /*
2229 * Channel has been reset. Also mark as reset pending REQUEST_SENSE
2230 * commands; as the sense is not available any more.
2231 * can't call scsipi_done() from here, as the command has not been
2232 * sent to the adapter yet (this would corrupt accounting).
2233 */
2234
2235 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) {
2236 xs_next = TAILQ_NEXT(xs, channel_q);
2237 if (xs->xs_control & XS_CTL_REQSENSE) {
2238 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
2239 xs->error = XS_RESET;
2240 if ((xs->xs_control & XS_CTL_ASYNC) != 0)
2241 TAILQ_INSERT_TAIL(&chan->chan_complete, xs,
2242 channel_q);
2243 }
2244 }
2245 wakeup(&chan->chan_complete);
2246 /* Catch xs with pending sense which may not have a REQSENSE xs yet */
2247 for (target = 0; target < chan->chan_ntargets; target++) {
2248 if (target == chan->chan_id)
2249 continue;
2250 for (lun = 0; lun < chan->chan_nluns; lun++) {
2251 periph = chan->chan_periphs[target][lun];
2252 if (periph) {
2253 xs = periph->periph_xscheck;
2254 if (xs)
2255 xs->error = XS_RESET;
2256 }
2257 }
2258 }
2259 }
2260
2261
2262 /*
2263 * scsipi_adapter_addref:
2264 *
2265 * Add a reference to the adapter pointed to by the provided
2266 * link, enabling the adapter if necessary.
2267 */
2268 int
2269 scsipi_adapter_addref(adapt)
2270 struct scsipi_adapter *adapt;
2271 {
2272 int s, error = 0;
2273
2274 s = splbio();
2275 if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) {
2276 error = (*adapt->adapt_enable)(adapt->adapt_dev, 1);
2277 if (error)
2278 adapt->adapt_refcnt--;
2279 }
2280 splx(s);
2281 return (error);
2282 }
2283
2284 /*
2285 * scsipi_adapter_delref:
2286 *
2287 * Delete a reference to the adapter pointed to by the provided
2288 * link, disabling the adapter if possible.
2289 */
2290 void
2291 scsipi_adapter_delref(adapt)
2292 struct scsipi_adapter *adapt;
2293 {
2294 int s;
2295
2296 s = splbio();
2297 if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL)
2298 (void) (*adapt->adapt_enable)(adapt->adapt_dev, 0);
2299 splx(s);
2300 }
2301
2302 struct scsipi_syncparam {
2303 int ss_factor;
2304 int ss_period; /* ns * 10 */
2305 } scsipi_syncparams[] = {
2306 { 0x0a, 250 },
2307 { 0x0b, 303 },
2308 { 0x0c, 500 },
2309 };
2310 const int scsipi_nsyncparams =
2311 sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
2312
2313 int
2314 scsipi_sync_period_to_factor(period)
2315 int period; /* ns * 10 */
2316 {
2317 int i;
2318
2319 for (i = 0; i < scsipi_nsyncparams; i++) {
2320 if (period <= scsipi_syncparams[i].ss_period)
2321 return (scsipi_syncparams[i].ss_factor);
2322 }
2323
2324 return ((period / 10) / 4);
2325 }
2326
2327 int
2328 scsipi_sync_factor_to_period(factor)
2329 int factor;
2330 {
2331 int i;
2332
2333 for (i = 0; i < scsipi_nsyncparams; i++) {
2334 if (factor == scsipi_syncparams[i].ss_factor)
2335 return (scsipi_syncparams[i].ss_period);
2336 }
2337
2338 return ((factor * 4) * 10);
2339 }
2340
2341 int
2342 scsipi_sync_factor_to_freq(factor)
2343 int factor;
2344 {
2345 int i;
2346
2347 for (i = 0; i < scsipi_nsyncparams; i++) {
2348 if (factor == scsipi_syncparams[i].ss_factor)
2349 return (10000000 / scsipi_syncparams[i].ss_period);
2350 }
2351
2352 return (10000000 / ((factor * 4) * 10));
2353 }
2354
2355 #ifdef SCSIPI_DEBUG
2356 /*
2357 * Given a scsipi_xfer, dump the request, in all it's glory
2358 */
2359 void
2360 show_scsipi_xs(xs)
2361 struct scsipi_xfer *xs;
2362 {
2363
2364 printf("xs(%p): ", xs);
2365 printf("xs_control(0x%08x)", xs->xs_control);
2366 printf("xs_status(0x%08x)", xs->xs_status);
2367 printf("periph(%p)", xs->xs_periph);
2368 printf("retr(0x%x)", xs->xs_retries);
2369 printf("timo(0x%x)", xs->timeout);
2370 printf("cmd(%p)", xs->cmd);
2371 printf("len(0x%x)", xs->cmdlen);
2372 printf("data(%p)", xs->data);
2373 printf("len(0x%x)", xs->datalen);
2374 printf("res(0x%x)", xs->resid);
2375 printf("err(0x%x)", xs->error);
2376 printf("bp(%p)", xs->bp);
2377 show_scsipi_cmd(xs);
2378 }
2379
2380 void
2381 show_scsipi_cmd(xs)
2382 struct scsipi_xfer *xs;
2383 {
2384 u_char *b = (u_char *) xs->cmd;
2385 int i = 0;
2386
2387 scsipi_printaddr(xs->xs_periph);
2388 printf(" command: ");
2389
2390 if ((xs->xs_control & XS_CTL_RESET) == 0) {
2391 while (i < xs->cmdlen) {
2392 if (i)
2393 printf(",");
2394 printf("0x%x", b[i++]);
2395 }
2396 printf("-[%d bytes]\n", xs->datalen);
2397 if (xs->datalen)
2398 show_mem(xs->data, min(64, xs->datalen));
2399 } else
2400 printf("-RESET-\n");
2401 }
2402
2403 void
2404 show_mem(address, num)
2405 u_char *address;
2406 int num;
2407 {
2408 int x;
2409
2410 printf("------------------------------");
2411 for (x = 0; x < num; x++) {
2412 if ((x % 16) == 0)
2413 printf("\n%03d: ", x);
2414 printf("%02x ", *address++);
2415 }
2416 printf("\n------------------------------\n");
2417 }
2418 #endif /* SCSIPI_DEBUG */
2419