scsipi_base.c revision 1.53 1 /* $NetBSD: scsipi_base.c,v 1.53 2001/08/20 07:47:01 ad Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include "opt_scsi.h"
41
42 #include <sys/types.h>
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/buf.h>
47 #include <sys/uio.h>
48 #include <sys/malloc.h>
49 #include <sys/pool.h>
50 #include <sys/errno.h>
51 #include <sys/device.h>
52 #include <sys/proc.h>
53 #include <sys/kthread.h>
54
55 #include <dev/scsipi/scsipi_all.h>
56 #include <dev/scsipi/scsipi_disk.h>
57 #include <dev/scsipi/scsipiconf.h>
58 #include <dev/scsipi/scsipi_base.h>
59
60 #include <dev/scsipi/scsi_all.h>
61 #include <dev/scsipi/scsi_message.h>
62
63 int scsipi_complete __P((struct scsipi_xfer *));
64 void scsipi_request_sense __P((struct scsipi_xfer *));
65 int scsipi_enqueue __P((struct scsipi_xfer *));
66 void scsipi_run_queue __P((struct scsipi_channel *chan));
67
68 void scsipi_completion_thread __P((void *));
69
70 void scsipi_get_tag __P((struct scsipi_xfer *));
71 void scsipi_put_tag __P((struct scsipi_xfer *));
72
73 int scsipi_get_resource __P((struct scsipi_channel *));
74 void scsipi_put_resource __P((struct scsipi_channel *));
75 __inline int scsipi_grow_resources __P((struct scsipi_channel *));
76
77 void scsipi_async_event_max_openings __P((struct scsipi_channel *,
78 struct scsipi_max_openings *));
79 void scsipi_async_event_xfer_mode __P((struct scsipi_channel *,
80 struct scsipi_xfer_mode *));
81 void scsipi_async_event_channel_reset __P((struct scsipi_channel *));
82
83 struct pool scsipi_xfer_pool;
84
85 /*
86 * scsipi_init:
87 *
88 * Called when a scsibus or atapibus is attached to the system
89 * to initialize shared data structures.
90 */
91 void
92 scsipi_init()
93 {
94 static int scsipi_init_done;
95
96 if (scsipi_init_done)
97 return;
98 scsipi_init_done = 1;
99
100 /* Initialize the scsipi_xfer pool. */
101 pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
102 0, 0, "scxspl", 0, NULL, NULL, M_DEVBUF);
103 }
104
105 /*
106 * scsipi_channel_init:
107 *
108 * Initialize a scsipi_channel when it is attached.
109 */
110 int
111 scsipi_channel_init(chan)
112 struct scsipi_channel *chan;
113 {
114 size_t nbytes;
115 int i;
116
117 /* Initialize shared data. */
118 scsipi_init();
119
120 /* Initialize the queues. */
121 TAILQ_INIT(&chan->chan_queue);
122 TAILQ_INIT(&chan->chan_complete);
123
124 nbytes = chan->chan_ntargets * sizeof(struct scsipi_periph **);
125 chan->chan_periphs = malloc(nbytes, M_DEVBUF, M_NOWAIT);
126 if (chan->chan_periphs == NULL)
127 return (ENOMEM);
128
129
130 nbytes = chan->chan_nluns * sizeof(struct scsipi_periph *);
131 for (i = 0; i < chan->chan_ntargets; i++) {
132 chan->chan_periphs[i] = malloc(nbytes, M_DEVBUF, M_NOWAIT);
133 if (chan->chan_periphs[i] == NULL) {
134 while (--i >= 0) {
135 free(chan->chan_periphs[i], M_DEVBUF);
136 }
137 return (ENOMEM);
138 }
139 memset(chan->chan_periphs[i], 0, nbytes);
140 }
141
142 /*
143 * Create the asynchronous completion thread.
144 */
145 kthread_create(scsipi_create_completion_thread, chan);
146 return (0);
147 }
148
149 /*
150 * scsipi_channel_shutdown:
151 *
152 * Shutdown a scsipi_channel.
153 */
154 void
155 scsipi_channel_shutdown(chan)
156 struct scsipi_channel *chan;
157 {
158
159 /*
160 * Shut down the completion thread.
161 */
162 chan->chan_flags |= SCSIPI_CHAN_SHUTDOWN;
163 wakeup(&chan->chan_complete);
164
165 /*
166 * Now wait for the thread to exit.
167 */
168 while (chan->chan_thread != NULL)
169 (void) tsleep(&chan->chan_thread, PRIBIO, "scshut", 0);
170 }
171
172 /*
173 * scsipi_insert_periph:
174 *
175 * Insert a periph into the channel.
176 */
177 void
178 scsipi_insert_periph(chan, periph)
179 struct scsipi_channel *chan;
180 struct scsipi_periph *periph;
181 {
182 int s;
183
184 s = splbio();
185 chan->chan_periphs[periph->periph_target][periph->periph_lun] = periph;
186 splx(s);
187 }
188
189 /*
190 * scsipi_remove_periph:
191 *
192 * Remove a periph from the channel.
193 */
194 void
195 scsipi_remove_periph(chan, periph)
196 struct scsipi_channel *chan;
197 struct scsipi_periph *periph;
198 {
199 int s;
200
201 s = splbio();
202 chan->chan_periphs[periph->periph_target][periph->periph_lun] = NULL;
203 splx(s);
204 }
205
206 /*
207 * scsipi_lookup_periph:
208 *
209 * Lookup a periph on the specified channel.
210 */
211 struct scsipi_periph *
212 scsipi_lookup_periph(chan, target, lun)
213 struct scsipi_channel *chan;
214 int target, lun;
215 {
216 struct scsipi_periph *periph;
217 int s;
218
219 if (target >= chan->chan_ntargets ||
220 lun >= chan->chan_nluns)
221 return (NULL);
222
223 s = splbio();
224 periph = chan->chan_periphs[target][lun];
225 splx(s);
226
227 return (periph);
228 }
229
230 /*
231 * scsipi_get_resource:
232 *
233 * Allocate a single xfer `resource' from the channel.
234 *
235 * NOTE: Must be called at splbio().
236 */
237 int
238 scsipi_get_resource(chan)
239 struct scsipi_channel *chan;
240 {
241 struct scsipi_adapter *adapt = chan->chan_adapter;
242
243 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
244 if (chan->chan_openings > 0) {
245 chan->chan_openings--;
246 return (1);
247 }
248 return (0);
249 }
250
251 if (adapt->adapt_openings > 0) {
252 adapt->adapt_openings--;
253 return (1);
254 }
255 return (0);
256 }
257
258 /*
259 * scsipi_grow_resources:
260 *
261 * Attempt to grow resources for a channel. If this succeeds,
262 * we allocate one for our caller.
263 *
264 * NOTE: Must be called at splbio().
265 */
266 __inline int
267 scsipi_grow_resources(chan)
268 struct scsipi_channel *chan;
269 {
270
271 if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
272 scsipi_adapter_request(chan, ADAPTER_REQ_GROW_RESOURCES, NULL);
273 return (scsipi_get_resource(chan));
274 }
275
276 return (0);
277 }
278
279 /*
280 * scsipi_put_resource:
281 *
282 * Free a single xfer `resource' to the channel.
283 *
284 * NOTE: Must be called at splbio().
285 */
286 void
287 scsipi_put_resource(chan)
288 struct scsipi_channel *chan;
289 {
290 struct scsipi_adapter *adapt = chan->chan_adapter;
291
292 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
293 chan->chan_openings++;
294 else
295 adapt->adapt_openings++;
296 }
297
298 /*
299 * scsipi_get_tag:
300 *
301 * Get a tag ID for the specified xfer.
302 *
303 * NOTE: Must be called at splbio().
304 */
305 void
306 scsipi_get_tag(xs)
307 struct scsipi_xfer *xs;
308 {
309 struct scsipi_periph *periph = xs->xs_periph;
310 int word, bit, tag;
311
312 for (word = 0; word < PERIPH_NTAGWORDS; word++) {
313 bit = ffs(periph->periph_freetags[word]);
314 if (bit != 0)
315 break;
316 }
317 #ifdef DIAGNOSTIC
318 if (word == PERIPH_NTAGWORDS) {
319 scsipi_printaddr(periph);
320 printf("no free tags\n");
321 panic("scsipi_get_tag");
322 }
323 #endif
324
325 bit -= 1;
326 periph->periph_freetags[word] &= ~(1 << bit);
327 tag = (word << 5) | bit;
328
329 /* XXX Should eventually disallow this completely. */
330 if (tag >= periph->periph_openings) {
331 scsipi_printaddr(periph);
332 printf("WARNING: tag %d greater than available openings %d\n",
333 tag, periph->periph_openings);
334 }
335
336 xs->xs_tag_id = tag;
337 }
338
339 /*
340 * scsipi_put_tag:
341 *
342 * Put the tag ID for the specified xfer back into the pool.
343 *
344 * NOTE: Must be called at splbio().
345 */
346 void
347 scsipi_put_tag(xs)
348 struct scsipi_xfer *xs;
349 {
350 struct scsipi_periph *periph = xs->xs_periph;
351 int word, bit;
352
353 word = xs->xs_tag_id >> 5;
354 bit = xs->xs_tag_id & 0x1f;
355
356 periph->periph_freetags[word] |= (1 << bit);
357 }
358
359 /*
360 * scsipi_get_xs:
361 *
362 * Allocate an xfer descriptor and associate it with the
363 * specified peripherial. If the peripherial has no more
364 * available command openings, we either block waiting for
365 * one to become available, or fail.
366 */
367 struct scsipi_xfer *
368 scsipi_get_xs(periph, flags)
369 struct scsipi_periph *periph;
370 int flags;
371 {
372 struct scsipi_xfer *xs;
373 int s;
374
375 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n"));
376
377 /*
378 * If we're cold, make sure we poll.
379 */
380 if (cold)
381 flags |= XS_CTL_NOSLEEP | XS_CTL_POLL;
382
383 #ifdef DIAGNOSTIC
384 /*
385 * URGENT commands can never be ASYNC.
386 */
387 if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
388 (XS_CTL_URGENT|XS_CTL_ASYNC)) {
389 scsipi_printaddr(periph);
390 printf("URGENT and ASYNC\n");
391 panic("scsipi_get_xs");
392 }
393 #endif
394
395 s = splbio();
396 /*
397 * Wait for a command opening to become available. Rules:
398 *
399 * - All xfers must wait for an available opening.
400 * Exception: URGENT xfers can proceed when
401 * active == openings, because we use the opening
402 * of the command we're recovering for.
403 * - if the periph has sense pending, only URGENT & REQSENSE
404 * xfers may proceed.
405 *
406 * - If the periph is recovering, only URGENT xfers may
407 * proceed.
408 *
409 * - If the periph is currently executing a recovery
410 * command, URGENT commands must block, because only
411 * one recovery command can execute at a time.
412 */
413 for (;;) {
414 if (flags & XS_CTL_URGENT) {
415 if (periph->periph_active > periph->periph_openings)
416 goto wait_for_opening;
417 if (periph->periph_flags & PERIPH_SENSE) {
418 if ((flags & XS_CTL_REQSENSE) == 0)
419 goto wait_for_opening;
420 } else {
421 if ((periph->periph_flags &
422 PERIPH_RECOVERY_ACTIVE) != 0)
423 goto wait_for_opening;
424 periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
425 }
426 break;
427 }
428 if (periph->periph_active >= periph->periph_openings ||
429 (periph->periph_flags & PERIPH_RECOVERING) != 0)
430 goto wait_for_opening;
431 periph->periph_active++;
432 break;
433
434 wait_for_opening:
435 if (flags & XS_CTL_NOSLEEP) {
436 splx(s);
437 return (NULL);
438 }
439 SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n"));
440 periph->periph_flags |= PERIPH_WAITING;
441 (void) tsleep(periph, PRIBIO, "getxs", 0);
442 }
443 SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n"));
444 xs = pool_get(&scsipi_xfer_pool,
445 ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
446 if (xs == NULL) {
447 if (flags & XS_CTL_URGENT) {
448 if ((flags & XS_CTL_REQSENSE) == 0)
449 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
450 } else
451 periph->periph_active--;
452 scsipi_printaddr(periph);
453 printf("unable to allocate %sscsipi_xfer\n",
454 (flags & XS_CTL_URGENT) ? "URGENT " : "");
455 }
456 splx(s);
457
458 SC_DEBUG(periph, SCSIPI_DB3, ("returning\n"));
459
460 if (xs != NULL) {
461 callout_init(&xs->xs_callout);
462 memset(xs, 0, sizeof(*xs));
463 xs->xs_periph = periph;
464 xs->xs_control = flags;
465 xs->xs_status = 0;
466 s = splbio();
467 TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
468 splx(s);
469 }
470 return (xs);
471 }
472
473 /*
474 * scsipi_put_xs:
475 *
476 * Release an xfer descriptor, decreasing the outstanding command
477 * count for the peripherial. If there is a thread waiting for
478 * an opening, wake it up. If not, kick any queued I/O the
479 * peripherial may have.
480 *
481 * NOTE: Must be called at splbio().
482 */
483 void
484 scsipi_put_xs(xs)
485 struct scsipi_xfer *xs;
486 {
487 struct scsipi_periph *periph = xs->xs_periph;
488 int flags = xs->xs_control;
489
490 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n"));
491
492 TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
493 pool_put(&scsipi_xfer_pool, xs);
494
495 #ifdef DIAGNOSTIC
496 if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
497 periph->periph_active == 0) {
498 scsipi_printaddr(periph);
499 printf("recovery without a command to recovery for\n");
500 panic("scsipi_put_xs");
501 }
502 #endif
503
504 if (flags & XS_CTL_URGENT) {
505 if ((flags & XS_CTL_REQSENSE) == 0)
506 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
507 } else
508 periph->periph_active--;
509 if (periph->periph_active == 0 &&
510 (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
511 periph->periph_flags &= ~PERIPH_WAITDRAIN;
512 wakeup(&periph->periph_active);
513 }
514
515 if (periph->periph_flags & PERIPH_WAITING) {
516 periph->periph_flags &= ~PERIPH_WAITING;
517 wakeup(periph);
518 } else {
519 if (periph->periph_switch->psw_start != NULL) {
520 SC_DEBUG(periph, SCSIPI_DB2,
521 ("calling private start()\n"));
522 (*periph->periph_switch->psw_start)(periph);
523 }
524 }
525 }
526
527 /*
528 * scsipi_channel_freeze:
529 *
530 * Freeze a channel's xfer queue.
531 */
532 void
533 scsipi_channel_freeze(chan, count)
534 struct scsipi_channel *chan;
535 int count;
536 {
537 int s;
538
539 s = splbio();
540 chan->chan_qfreeze += count;
541 splx(s);
542 }
543
544 /*
545 * scsipi_channel_thaw:
546 *
547 * Thaw a channel's xfer queue.
548 */
549 void
550 scsipi_channel_thaw(chan, count)
551 struct scsipi_channel *chan;
552 int count;
553 {
554 int s;
555
556 s = splbio();
557 chan->chan_qfreeze -= count;
558 /*
559 * Don't let the freeze count go negative.
560 *
561 * Presumably the adapter driver could keep track of this,
562 * but it might just be easier to do this here so as to allow
563 * multiple callers, including those outside the adapter driver.
564 */
565 if (chan->chan_qfreeze < 0) {
566 chan->chan_qfreeze = 0;
567 }
568 splx(s);
569 /*
570 * Kick the channel's queue here. Note, we may be running in
571 * interrupt context (softclock or HBA's interrupt), so the adapter
572 * driver had better not sleep.
573 */
574 if (chan->chan_qfreeze == 0)
575 scsipi_run_queue(chan);
576 }
577
578 /*
579 * scsipi_channel_timed_thaw:
580 *
581 * Thaw a channel after some time has expired. This will also
582 * run the channel's queue if the freeze count has reached 0.
583 */
584 void
585 scsipi_channel_timed_thaw(arg)
586 void *arg;
587 {
588 struct scsipi_channel *chan = arg;
589
590 scsipi_channel_thaw(chan, 1);
591 }
592
593 /*
594 * scsipi_periph_freeze:
595 *
596 * Freeze a device's xfer queue.
597 */
598 void
599 scsipi_periph_freeze(periph, count)
600 struct scsipi_periph *periph;
601 int count;
602 {
603 int s;
604
605 s = splbio();
606 periph->periph_qfreeze += count;
607 splx(s);
608 }
609
610 /*
611 * scsipi_periph_thaw:
612 *
613 * Thaw a device's xfer queue.
614 */
615 void
616 scsipi_periph_thaw(periph, count)
617 struct scsipi_periph *periph;
618 int count;
619 {
620 int s;
621
622 s = splbio();
623 periph->periph_qfreeze -= count;
624 if (periph->periph_qfreeze == 0 &&
625 (periph->periph_flags & PERIPH_WAITING) != 0)
626 wakeup(periph);
627 splx(s);
628 }
629
630 /*
631 * scsipi_periph_timed_thaw:
632 *
633 * Thaw a device after some time has expired.
634 */
635 void
636 scsipi_periph_timed_thaw(arg)
637 void *arg;
638 {
639 struct scsipi_periph *periph = arg;
640
641 callout_stop(&periph->periph_callout);
642 scsipi_periph_thaw(periph, 1);
643
644 /*
645 * Kick the channel's queue here. Note, we're running in
646 * interrupt context (softclock), so the adapter driver
647 * had better not sleep.
648 */
649 scsipi_run_queue(periph->periph_channel);
650 }
651
652 /*
653 * scsipi_wait_drain:
654 *
655 * Wait for a periph's pending xfers to drain.
656 */
657 void
658 scsipi_wait_drain(periph)
659 struct scsipi_periph *periph;
660 {
661 int s;
662
663 s = splbio();
664 while (periph->periph_active != 0) {
665 periph->periph_flags |= PERIPH_WAITDRAIN;
666 (void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0);
667 }
668 splx(s);
669 }
670
671 /*
672 * scsipi_kill_pending:
673 *
674 * Kill off all pending xfers for a periph.
675 *
676 * NOTE: Must be called at splbio().
677 */
678 void
679 scsipi_kill_pending(periph)
680 struct scsipi_periph *periph;
681 {
682
683 (*periph->periph_channel->chan_bustype->bustype_kill_pending)(periph);
684 #ifdef DIAGNOSTIC
685 if (TAILQ_FIRST(&periph->periph_xferq) != NULL)
686 panic("scsipi_kill_pending");
687 #endif
688 scsipi_wait_drain(periph);
689 }
690
691 /*
692 * scsipi_interpret_sense:
693 *
694 * Look at the returned sense and act on the error, determining
695 * the unix error number to pass back. (0 = report no error)
696 *
697 * NOTE: If we return ERESTART, we are expected to haved
698 * thawed the device!
699 *
700 * THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
701 */
702 int
703 scsipi_interpret_sense(xs)
704 struct scsipi_xfer *xs;
705 {
706 struct scsipi_sense_data *sense;
707 struct scsipi_periph *periph = xs->xs_periph;
708 u_int8_t key;
709 u_int32_t info;
710 int error;
711 #ifndef SCSIVERBOSE
712 static char *error_mes[] = {
713 "soft error (corrected)",
714 "not ready", "medium error",
715 "non-media hardware failure", "illegal request",
716 "unit attention", "readonly device",
717 "no data found", "vendor unique",
718 "copy aborted", "command aborted",
719 "search returned equal", "volume overflow",
720 "verify miscompare", "unknown error key"
721 };
722 #endif
723
724 sense = &xs->sense.scsi_sense;
725 #ifdef SCSIPI_DEBUG
726 if (periph->periph_flags & SCSIPI_DB1) {
727 int count;
728 scsipi_printaddr(periph);
729 printf(" sense debug information:\n");
730 printf("\tcode 0x%x valid 0x%x\n",
731 sense->error_code & SSD_ERRCODE,
732 sense->error_code & SSD_ERRCODE_VALID ? 1 : 0);
733 printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
734 sense->segment,
735 sense->flags & SSD_KEY,
736 sense->flags & SSD_ILI ? 1 : 0,
737 sense->flags & SSD_EOM ? 1 : 0,
738 sense->flags & SSD_FILEMARK ? 1 : 0);
739 printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d "
740 "extra bytes\n",
741 sense->info[0],
742 sense->info[1],
743 sense->info[2],
744 sense->info[3],
745 sense->extra_len);
746 printf("\textra: ");
747 for (count = 0; count < ADD_BYTES_LIM(sense); count++)
748 printf("0x%x ", sense->cmd_spec_info[count]);
749 printf("\n");
750 }
751 #endif
752
753 /*
754 * If the periph has it's own error handler, call it first.
755 * If it returns a legit error value, return that, otherwise
756 * it wants us to continue with normal error processing.
757 */
758 if (periph->periph_switch->psw_error != NULL) {
759 SC_DEBUG(periph, SCSIPI_DB2,
760 ("calling private err_handler()\n"));
761 error = (*periph->periph_switch->psw_error)(xs);
762 if (error != EJUSTRETURN)
763 return (error);
764 }
765 /* otherwise use the default */
766 switch (sense->error_code & SSD_ERRCODE) {
767 /*
768 * If it's code 70, use the extended stuff and
769 * interpret the key
770 */
771 case 0x71: /* delayed error */
772 scsipi_printaddr(periph);
773 key = sense->flags & SSD_KEY;
774 printf(" DEFERRED ERROR, key = 0x%x\n", key);
775 /* FALLTHROUGH */
776 case 0x70:
777 if ((sense->error_code & SSD_ERRCODE_VALID) != 0)
778 info = _4btol(sense->info);
779 else
780 info = 0;
781 key = sense->flags & SSD_KEY;
782
783 switch (key) {
784 case SKEY_NO_SENSE:
785 case SKEY_RECOVERED_ERROR:
786 if (xs->resid == xs->datalen && xs->datalen) {
787 /*
788 * Why is this here?
789 */
790 xs->resid = 0; /* not short read */
791 }
792 case SKEY_EQUAL:
793 error = 0;
794 break;
795 case SKEY_NOT_READY:
796 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
797 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
798 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
799 return (0);
800 if (sense->add_sense_code == 0x3A) {
801 error = ENODEV; /* Medium not present */
802 if (xs->xs_control & XS_CTL_SILENT_NODEV)
803 return (error);
804 } else
805 error = EIO;
806 if ((xs->xs_control & XS_CTL_SILENT) != 0)
807 return (error);
808 break;
809 case SKEY_ILLEGAL_REQUEST:
810 if ((xs->xs_control &
811 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
812 return (0);
813 /*
814 * Handle the case where a device reports
815 * Logical Unit Not Supported during discovery.
816 */
817 if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
818 sense->add_sense_code == 0x25 &&
819 sense->add_sense_code_qual == 0x00)
820 return (EINVAL);
821 if ((xs->xs_control & XS_CTL_SILENT) != 0)
822 return (EIO);
823 error = EINVAL;
824 break;
825 case SKEY_UNIT_ATTENTION:
826 if (sense->add_sense_code == 0x29 &&
827 sense->add_sense_code_qual == 0x00) {
828 /* device or bus reset */
829 return (ERESTART);
830 }
831 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
832 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
833 if ((xs->xs_control &
834 XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
835 /* XXX Should reupload any transient state. */
836 (periph->periph_flags &
837 PERIPH_REMOVABLE) == 0) {
838 return (ERESTART);
839 }
840 if ((xs->xs_control & XS_CTL_SILENT) != 0)
841 return (EIO);
842 error = EIO;
843 break;
844 case SKEY_WRITE_PROTECT:
845 error = EROFS;
846 break;
847 case SKEY_BLANK_CHECK:
848 error = 0;
849 break;
850 case SKEY_ABORTED_COMMAND:
851 error = ERESTART;
852 break;
853 case SKEY_VOLUME_OVERFLOW:
854 error = ENOSPC;
855 break;
856 default:
857 error = EIO;
858 break;
859 }
860
861 #ifdef SCSIVERBOSE
862 if (key && (xs->xs_control & XS_CTL_SILENT) == 0)
863 scsipi_print_sense(xs, 0);
864 #else
865 if (key) {
866 scsipi_printaddr(periph);
867 printf("%s", error_mes[key - 1]);
868 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
869 switch (key) {
870 case SKEY_NOT_READY:
871 case SKEY_ILLEGAL_REQUEST:
872 case SKEY_UNIT_ATTENTION:
873 case SKEY_WRITE_PROTECT:
874 break;
875 case SKEY_BLANK_CHECK:
876 printf(", requested size: %d (decimal)",
877 info);
878 break;
879 case SKEY_ABORTED_COMMAND:
880 if (xs->xs_retries)
881 printf(", retrying");
882 printf(", cmd 0x%x, info 0x%x",
883 xs->cmd->opcode, info);
884 break;
885 default:
886 printf(", info = %d (decimal)", info);
887 }
888 }
889 if (sense->extra_len != 0) {
890 int n;
891 printf(", data =");
892 for (n = 0; n < sense->extra_len; n++)
893 printf(" %02x",
894 sense->cmd_spec_info[n]);
895 }
896 printf("\n");
897 }
898 #endif
899 return (error);
900
901 /*
902 * Not code 70, just report it
903 */
904 default:
905 #if defined(SCSIDEBUG) || defined(DEBUG)
906 {
907 static char *uc = "undecodable sense error";
908 int i;
909 u_int8_t *cptr = (u_int8_t *) sense;
910 scsipi_printaddr(periph);
911 if (xs->cmd == &xs->cmdstore) {
912 printf("%s for opcode 0x%x, data=",
913 uc, xs->cmdstore.opcode);
914 } else {
915 printf("%s, data=", uc);
916 }
917 for (i = 0; i < sizeof (sense); i++)
918 printf(" 0x%02x", *(cptr++) & 0xff);
919 printf("\n");
920 }
921 #else
922
923 scsipi_printaddr(periph);
924 printf("Sense Error Code 0x%x",
925 sense->error_code & SSD_ERRCODE);
926 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
927 struct scsipi_sense_data_unextended *usense =
928 (struct scsipi_sense_data_unextended *)sense;
929 printf(" at block no. %d (decimal)",
930 _3btol(usense->block));
931 }
932 printf("\n");
933 #endif
934 return (EIO);
935 }
936 }
937
938 /*
939 * scsipi_size:
940 *
941 * Find out from the device what its capacity is.
942 */
943 u_long
944 scsipi_size(periph, flags)
945 struct scsipi_periph *periph;
946 int flags;
947 {
948 struct scsipi_read_cap_data rdcap;
949 struct scsipi_read_capacity scsipi_cmd;
950
951 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
952 scsipi_cmd.opcode = READ_CAPACITY;
953
954 /*
955 * If the command works, interpret the result as a 4 byte
956 * number of blocks
957 */
958 if (scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
959 sizeof(scsipi_cmd), (u_char *)&rdcap, sizeof(rdcap),
960 SCSIPIRETRIES, 20000, NULL,
961 flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK) != 0) {
962 scsipi_printaddr(periph);
963 printf("could not get size\n");
964 return (0);
965 }
966
967 return (_4btol(rdcap.addr) + 1);
968 }
969
970 /*
971 * scsipi_test_unit_ready:
972 *
973 * Issue a `test unit ready' request.
974 */
975 int
976 scsipi_test_unit_ready(periph, flags)
977 struct scsipi_periph *periph;
978 int flags;
979 {
980 struct scsipi_test_unit_ready scsipi_cmd;
981
982 /* some ATAPI drives don't support TEST_UNIT_READY. Sigh */
983 if (periph->periph_quirks & PQUIRK_NOTUR)
984 return (0);
985
986 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
987 scsipi_cmd.opcode = TEST_UNIT_READY;
988
989 return (scsipi_command(periph,
990 (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd),
991 0, 0, SCSIPIRETRIES, 10000, NULL, flags));
992 }
993
994 /*
995 * scsipi_inquire:
996 *
997 * Ask the device about itself.
998 */
999 int
1000 scsipi_inquire(periph, inqbuf, flags)
1001 struct scsipi_periph *periph;
1002 struct scsipi_inquiry_data *inqbuf;
1003 int flags;
1004 {
1005 struct scsipi_inquiry scsipi_cmd;
1006
1007 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1008 scsipi_cmd.opcode = INQUIRY;
1009 scsipi_cmd.length = sizeof(struct scsipi_inquiry_data);
1010
1011 return (scsipi_command(periph,
1012 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1013 (u_char *) inqbuf, sizeof(struct scsipi_inquiry_data),
1014 SCSIPIRETRIES, 10000, NULL, XS_CTL_DATA_IN | flags));
1015 }
1016
1017 /*
1018 * scsipi_prevent:
1019 *
1020 * Prevent or allow the user to remove the media
1021 */
1022 int
1023 scsipi_prevent(periph, type, flags)
1024 struct scsipi_periph *periph;
1025 int type, flags;
1026 {
1027 struct scsipi_prevent scsipi_cmd;
1028
1029 if (periph->periph_quirks & PQUIRK_NODOORLOCK)
1030 return (0);
1031
1032 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1033 scsipi_cmd.opcode = PREVENT_ALLOW;
1034 scsipi_cmd.how = type;
1035
1036 return (scsipi_command(periph,
1037 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1038 0, 0, SCSIPIRETRIES, 5000, NULL, flags));
1039 }
1040
1041 /*
1042 * scsipi_start:
1043 *
1044 * Send a START UNIT.
1045 */
1046 int
1047 scsipi_start(periph, type, flags)
1048 struct scsipi_periph *periph;
1049 int type, flags;
1050 {
1051 struct scsipi_start_stop scsipi_cmd;
1052
1053 if (periph->periph_quirks & PQUIRK_NOSTARTUNIT)
1054 return 0;
1055
1056 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1057 scsipi_cmd.opcode = START_STOP;
1058 scsipi_cmd.byte2 = 0x00;
1059 scsipi_cmd.how = type;
1060
1061 return (scsipi_command(periph,
1062 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1063 0, 0, SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000,
1064 NULL, flags));
1065 }
1066
1067 /*
1068 * scsipi_mode_sense, scsipi_mode_sense_big:
1069 * get a sense page from a device
1070 */
1071
1072 int
1073 scsipi_mode_sense(periph, byte2, page, data, len, flags, retries, timeout)
1074 struct scsipi_periph *periph;
1075 int byte2, page, len, flags, retries, timeout;
1076 struct scsipi_mode_header *data;
1077 {
1078 struct scsipi_mode_sense scsipi_cmd;
1079 int error;
1080
1081 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1082 scsipi_cmd.opcode = MODE_SENSE;
1083 scsipi_cmd.byte2 = byte2;
1084 scsipi_cmd.page = page;
1085 if (scsipi_periph_bustype(periph) == SCSIPI_BUSTYPE_ATAPI)
1086 _lto2b(len, scsipi_cmd.u_len.atapi.length);
1087 else
1088 scsipi_cmd.u_len.scsi.length = len & 0xff;
1089 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1090 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1091 flags | XS_CTL_DATA_IN);
1092 SC_DEBUG(periph, SCSIPI_DB2,
1093 ("scsipi_mode_sense: error=%d\n", error));
1094 return (error);
1095 }
1096
1097 int
1098 scsipi_mode_sense_big(periph, byte2, page, data, len, flags, retries, timeout)
1099 struct scsipi_periph *periph;
1100 int byte2, page, len, flags, retries, timeout;
1101 struct scsipi_mode_header_big *data;
1102 {
1103 struct scsipi_mode_sense_big scsipi_cmd;
1104 int error;
1105
1106 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1107 scsipi_cmd.opcode = MODE_SENSE_BIG;
1108 scsipi_cmd.byte2 = byte2;
1109 scsipi_cmd.page = page;
1110 _lto2b(len, scsipi_cmd.length);
1111 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1112 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1113 flags | XS_CTL_DATA_IN);
1114 SC_DEBUG(periph, SCSIPI_DB2,
1115 ("scsipi_mode_sense_big: error=%d\n", error));
1116 return (error);
1117 }
1118
1119 int
1120 scsipi_mode_select(periph, byte2, data, len, flags, retries, timeout)
1121 struct scsipi_periph *periph;
1122 int byte2, len, flags, retries, timeout;
1123 struct scsipi_mode_header *data;
1124 {
1125 struct scsipi_mode_select scsipi_cmd;
1126 int error;
1127
1128 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1129 scsipi_cmd.opcode = MODE_SELECT;
1130 scsipi_cmd.byte2 = byte2;
1131 if (scsipi_periph_bustype(periph) == SCSIPI_BUSTYPE_ATAPI)
1132 _lto2b(len, scsipi_cmd.u_len.atapi.length);
1133 else
1134 scsipi_cmd.u_len.scsi.length = len & 0xff;
1135 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1136 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1137 flags | XS_CTL_DATA_OUT);
1138 SC_DEBUG(periph, SCSIPI_DB2,
1139 ("scsipi_mode_select: error=%d\n", error));
1140 return (error);
1141 }
1142
1143 int
1144 scsipi_mode_select_big(periph, byte2, data, len, flags, retries, timeout)
1145 struct scsipi_periph *periph;
1146 int byte2, len, flags, retries, timeout;
1147 struct scsipi_mode_header_big *data;
1148 {
1149 struct scsipi_mode_select_big scsipi_cmd;
1150 int error;
1151
1152 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1153 scsipi_cmd.opcode = MODE_SELECT_BIG;
1154 scsipi_cmd.byte2 = byte2;
1155 _lto2b(len, scsipi_cmd.length);
1156 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1157 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1158 flags | XS_CTL_DATA_OUT);
1159 SC_DEBUG(periph, SCSIPI_DB2,
1160 ("scsipi_mode_select: error=%d\n", error));
1161 return (error);
1162 }
1163
1164 /*
1165 * scsipi_done:
1166 *
1167 * This routine is called by an adapter's interrupt handler when
1168 * an xfer is completed.
1169 */
1170 void
1171 scsipi_done(xs)
1172 struct scsipi_xfer *xs;
1173 {
1174 struct scsipi_periph *periph = xs->xs_periph;
1175 struct scsipi_channel *chan = periph->periph_channel;
1176 int s, freezecnt;
1177
1178 SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n"));
1179 #ifdef SCSIPI_DEBUG
1180 if (periph->periph_dbflags & SCSIPI_DB1)
1181 show_scsipi_cmd(xs);
1182 #endif
1183
1184 s = splbio();
1185 /*
1186 * The resource this command was using is now free.
1187 */
1188 scsipi_put_resource(chan);
1189 xs->xs_periph->periph_sent--;
1190
1191 /*
1192 * If the command was tagged, free the tag.
1193 */
1194 if (XS_CTL_TAGTYPE(xs) != 0)
1195 scsipi_put_tag(xs);
1196 else
1197 periph->periph_flags &= ~PERIPH_UNTAG;
1198
1199 /* Mark the command as `done'. */
1200 xs->xs_status |= XS_STS_DONE;
1201
1202 #ifdef DIAGNOSTIC
1203 if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
1204 (XS_CTL_ASYNC|XS_CTL_POLL))
1205 panic("scsipi_done: ASYNC and POLL");
1206 #endif
1207
1208 /*
1209 * If the xfer had an error of any sort, freeze the
1210 * periph's queue. Freeze it again if we were requested
1211 * to do so in the xfer.
1212 */
1213 freezecnt = 0;
1214 if (xs->error != XS_NOERROR)
1215 freezecnt++;
1216 if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
1217 freezecnt++;
1218 if (freezecnt != 0)
1219 scsipi_periph_freeze(periph, freezecnt);
1220
1221 /*
1222 * record the xfer with a pending sense, in case a SCSI reset is
1223 * received before the thread is waked up.
1224 */
1225 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1226 periph->periph_flags |= PERIPH_SENSE;
1227 periph->periph_xscheck = xs;
1228 }
1229
1230 /*
1231 * If this was an xfer that was not to complete asynchrnously,
1232 * let the requesting thread perform error checking/handling
1233 * in its context.
1234 */
1235 if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
1236 splx(s);
1237 /*
1238 * If it's a polling job, just return, to unwind the
1239 * call graph. We don't need to restart the queue,
1240 * because pollings jobs are treated specially, and
1241 * are really only used during crash dumps anyway
1242 * (XXX or during boot-time autconfiguration of
1243 * ATAPI devices).
1244 */
1245 if (xs->xs_control & XS_CTL_POLL)
1246 return;
1247 wakeup(xs);
1248 goto out;
1249 }
1250
1251 /*
1252 * Catch the extremely common case of I/O completing
1253 * without error; no use in taking a context switch
1254 * if we can handle it in interrupt context.
1255 */
1256 if (xs->error == XS_NOERROR) {
1257 splx(s);
1258 (void) scsipi_complete(xs);
1259 goto out;
1260 }
1261
1262 /*
1263 * There is an error on this xfer. Put it on the channel's
1264 * completion queue, and wake up the completion thread.
1265 */
1266 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
1267 splx(s);
1268 wakeup(&chan->chan_complete);
1269
1270 out:
1271 /*
1272 * If there are more xfers on the channel's queue, attempt to
1273 * run them.
1274 */
1275 scsipi_run_queue(chan);
1276 }
1277
1278 /*
1279 * scsipi_complete:
1280 *
1281 * Completion of a scsipi_xfer. This is the guts of scsipi_done().
1282 *
1283 * NOTE: This routine MUST be called with valid thread context
1284 * except for the case where the following two conditions are
1285 * true:
1286 *
1287 * xs->error == XS_NOERROR
1288 * XS_CTL_ASYNC is set in xs->xs_control
1289 *
1290 * The semantics of this routine can be tricky, so here is an
1291 * explanation:
1292 *
1293 * 0 Xfer completed successfully.
1294 *
1295 * ERESTART Xfer had an error, but was restarted.
1296 *
1297 * anything else Xfer had an error, return value is Unix
1298 * errno.
1299 *
1300 * If the return value is anything but ERESTART:
1301 *
1302 * - If XS_CTL_ASYNC is set, `xs' has been freed back to
1303 * the pool.
1304 * - If there is a buf associated with the xfer,
1305 * it has been biodone()'d.
1306 */
1307 int
1308 scsipi_complete(xs)
1309 struct scsipi_xfer *xs;
1310 {
1311 struct scsipi_periph *periph = xs->xs_periph;
1312 struct scsipi_channel *chan = periph->periph_channel;
1313 struct buf *bp;
1314 int error, s;
1315
1316 #ifdef DIAGNOSTIC
1317 if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
1318 panic("scsipi_complete: XS_CTL_ASYNC but no buf");
1319 #endif
1320 /*
1321 * If command terminated with a CHECK CONDITION, we need to issue a
1322 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed
1323 * we'll have the real status.
1324 * Must be processed at splbio() to avoid missing a SCSI bus reset
1325 * for this command.
1326 */
1327 s = splbio();
1328 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1329 /* request sense for a request sense ? */
1330 if (xs->xs_control & XS_CTL_REQSENSE) {
1331 scsipi_printaddr(periph);
1332 printf("request sense for a request sense ?\n");
1333 /* XXX maybe we should reset the device ? */
1334 /* we've been frozen because xs->error != XS_NOERROR */
1335 scsipi_periph_thaw(periph, 1);
1336 splx(s);
1337 if (xs->resid < xs->datalen) {
1338 printf("we read %d bytes of sense anyway:\n",
1339 xs->datalen - xs->resid);
1340 #ifdef SCSIVERBOSE
1341 scsipi_print_sense_data((void *)xs->data, 0);
1342 #endif
1343 }
1344 return EINVAL;
1345 }
1346 scsipi_request_sense(xs);
1347 }
1348 splx(s);
1349 /*
1350 * If it's a user level request, bypass all usual completion
1351 * processing, let the user work it out..
1352 */
1353 if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
1354 SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n"));
1355 if (xs->error != XS_NOERROR)
1356 scsipi_periph_thaw(periph, 1);
1357 scsipi_user_done(xs);
1358 SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n "));
1359 return 0;
1360 }
1361
1362
1363 switch (xs->error) {
1364 case XS_NOERROR:
1365 error = 0;
1366 break;
1367
1368 case XS_SENSE:
1369 case XS_SHORTSENSE:
1370 error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
1371 break;
1372
1373 case XS_RESOURCE_SHORTAGE:
1374 /*
1375 * XXX Should freeze channel's queue.
1376 */
1377 scsipi_printaddr(periph);
1378 printf("adapter resource shortage\n");
1379 /* FALLTHROUGH */
1380
1381 case XS_BUSY:
1382 if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
1383 struct scsipi_max_openings mo;
1384
1385 /*
1386 * We set the openings to active - 1, assuming that
1387 * the command that got us here is the first one that
1388 * can't fit into the device's queue. If that's not
1389 * the case, I guess we'll find out soon enough.
1390 */
1391 mo.mo_target = periph->periph_target;
1392 mo.mo_lun = periph->periph_lun;
1393 if (periph->periph_active < periph->periph_openings)
1394 mo.mo_openings = periph->periph_active - 1;
1395 else
1396 mo.mo_openings = periph->periph_openings - 1;
1397 #ifdef DIAGNOSTIC
1398 if (mo.mo_openings < 0) {
1399 scsipi_printaddr(periph);
1400 printf("QUEUE FULL resulted in < 0 openings\n");
1401 panic("scsipi_done");
1402 }
1403 #endif
1404 if (mo.mo_openings == 0) {
1405 scsipi_printaddr(periph);
1406 printf("QUEUE FULL resulted in 0 openings\n");
1407 mo.mo_openings = 1;
1408 }
1409 scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
1410 error = ERESTART;
1411 } else if (xs->xs_retries != 0) {
1412 xs->xs_retries--;
1413 /*
1414 * Wait one second, and try again.
1415 */
1416 if (xs->xs_control & XS_CTL_POLL)
1417 delay(1000000);
1418 else {
1419 scsipi_periph_freeze(periph, 1);
1420 callout_reset(&periph->periph_callout,
1421 hz, scsipi_periph_timed_thaw, periph);
1422 }
1423 error = ERESTART;
1424 } else
1425 error = EBUSY;
1426 break;
1427
1428 case XS_REQUEUE:
1429 error = ERESTART;
1430 break;
1431
1432 case XS_TIMEOUT:
1433 if (xs->xs_retries != 0) {
1434 xs->xs_retries--;
1435 error = ERESTART;
1436 } else
1437 error = EIO;
1438 break;
1439
1440 case XS_SELTIMEOUT:
1441 /* XXX Disable device? */
1442 error = EIO;
1443 break;
1444
1445 case XS_RESET:
1446 if (xs->xs_control & XS_CTL_REQSENSE) {
1447 /*
1448 * request sense interrupted by reset: signal it
1449 * with EINTR return code.
1450 */
1451 error = EINTR;
1452 } else {
1453 if (xs->xs_retries != 0) {
1454 xs->xs_retries--;
1455 error = ERESTART;
1456 } else
1457 error = EIO;
1458 }
1459 break;
1460
1461 default:
1462 scsipi_printaddr(periph);
1463 printf("invalid return code from adapter: %d\n", xs->error);
1464 error = EIO;
1465 break;
1466 }
1467
1468 s = splbio();
1469 if (error == ERESTART) {
1470 /*
1471 * If we get here, the periph has been thawed and frozen
1472 * again if we had to issue recovery commands. Alternatively,
1473 * it may have been frozen again and in a timed thaw. In
1474 * any case, we thaw the periph once we re-enqueue the
1475 * command. Once the periph is fully thawed, it will begin
1476 * operation again.
1477 */
1478 xs->error = XS_NOERROR;
1479 xs->status = SCSI_OK;
1480 xs->xs_status &= ~XS_STS_DONE;
1481 xs->xs_requeuecnt++;
1482 error = scsipi_enqueue(xs);
1483 if (error == 0) {
1484 scsipi_periph_thaw(periph, 1);
1485 splx(s);
1486 return (ERESTART);
1487 }
1488 }
1489
1490 /*
1491 * scsipi_done() freezes the queue if not XS_NOERROR.
1492 * Thaw it here.
1493 */
1494 if (xs->error != XS_NOERROR)
1495 scsipi_periph_thaw(periph, 1);
1496
1497
1498 if (periph->periph_switch->psw_done)
1499 periph->periph_switch->psw_done(xs);
1500 if ((bp = xs->bp) != NULL) {
1501 if (error) {
1502 bp->b_error = error;
1503 bp->b_flags |= B_ERROR;
1504 bp->b_resid = bp->b_bcount;
1505 } else {
1506 bp->b_error = 0;
1507 bp->b_resid = xs->resid;
1508 }
1509 biodone(bp);
1510 }
1511
1512 if (xs->xs_control & XS_CTL_ASYNC)
1513 scsipi_put_xs(xs);
1514 splx(s);
1515
1516 return (error);
1517 }
1518
1519 /*
1520 * Issue a request sense for the given scsipi_xfer. Called when the xfer
1521 * returns with a CHECK_CONDITION status. Must be called in valid thread
1522 * context and at splbio().
1523 */
1524
1525 void
1526 scsipi_request_sense(xs)
1527 struct scsipi_xfer *xs;
1528 {
1529 struct scsipi_periph *periph = xs->xs_periph;
1530 int flags, error;
1531 struct scsipi_sense cmd;
1532
1533 periph->periph_flags |= PERIPH_SENSE;
1534
1535 /* if command was polling, request sense will too */
1536 flags = xs->xs_control & XS_CTL_POLL;
1537 /* Polling commands can't sleep */
1538 if (flags)
1539 flags |= XS_CTL_NOSLEEP;
1540
1541 flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN |
1542 XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH;
1543
1544 memset(&cmd, 0, sizeof(cmd));
1545 cmd.opcode = REQUEST_SENSE;
1546 cmd.length = sizeof(struct scsipi_sense_data);
1547
1548 error = scsipi_command(periph,
1549 (struct scsipi_generic *) &cmd, sizeof(cmd),
1550 (u_char*)&xs->sense.scsi_sense, sizeof(struct scsipi_sense_data),
1551 0, 1000, NULL, flags);
1552 periph->periph_flags &= ~PERIPH_SENSE;
1553 periph->periph_xscheck = NULL;
1554 switch(error) {
1555 case 0:
1556 /* we have a valid sense */
1557 xs->error = XS_SENSE;
1558 return;
1559 case EINTR:
1560 /* REQUEST_SENSE interrupted by bus reset. */
1561 xs->error = XS_RESET;
1562 return;
1563 case EIO:
1564 /* request sense coudn't be performed */
1565 /*
1566 * XXX this isn't quite rigth but we don't have anything
1567 * better for now
1568 */
1569 xs->error = XS_DRIVER_STUFFUP;
1570 return;
1571 default:
1572 /* Notify that request sense failed. */
1573 xs->error = XS_DRIVER_STUFFUP;
1574 scsipi_printaddr(periph);
1575 printf("request sense failed with error %d\n", error);
1576 return;
1577 }
1578 }
1579
1580 /*
1581 * scsipi_enqueue:
1582 *
1583 * Enqueue an xfer on a channel.
1584 */
1585 int
1586 scsipi_enqueue(xs)
1587 struct scsipi_xfer *xs;
1588 {
1589 struct scsipi_channel *chan = xs->xs_periph->periph_channel;
1590 struct scsipi_xfer *qxs;
1591 int s;
1592
1593 s = splbio();
1594
1595 /*
1596 * If the xfer is to be polled, and there are already jobs on
1597 * the queue, we can't proceed.
1598 */
1599 if ((xs->xs_control & XS_CTL_POLL) != 0 &&
1600 TAILQ_FIRST(&chan->chan_queue) != NULL) {
1601 splx(s);
1602 xs->error = XS_DRIVER_STUFFUP;
1603 return (EAGAIN);
1604 }
1605
1606 /*
1607 * If we have an URGENT xfer, it's an error recovery command
1608 * and it should just go on the head of the channel's queue.
1609 */
1610 if (xs->xs_control & XS_CTL_URGENT) {
1611 TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
1612 goto out;
1613 }
1614
1615 /*
1616 * If this xfer has already been on the queue before, we
1617 * need to reinsert it in the correct order. That order is:
1618 *
1619 * Immediately before the first xfer for this periph
1620 * with a requeuecnt less than xs->xs_requeuecnt.
1621 *
1622 * Failing that, at the end of the queue. (We'll end up
1623 * there naturally.)
1624 */
1625 if (xs->xs_requeuecnt != 0) {
1626 for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
1627 qxs = TAILQ_NEXT(qxs, channel_q)) {
1628 if (qxs->xs_periph == xs->xs_periph &&
1629 qxs->xs_requeuecnt < xs->xs_requeuecnt)
1630 break;
1631 }
1632 if (qxs != NULL) {
1633 TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
1634 channel_q);
1635 goto out;
1636 }
1637 }
1638 TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
1639 out:
1640 if (xs->xs_control & XS_CTL_THAW_PERIPH)
1641 scsipi_periph_thaw(xs->xs_periph, 1);
1642 splx(s);
1643 return (0);
1644 }
1645
1646 /*
1647 * scsipi_run_queue:
1648 *
1649 * Start as many xfers as possible running on the channel.
1650 */
1651 void
1652 scsipi_run_queue(chan)
1653 struct scsipi_channel *chan;
1654 {
1655 struct scsipi_xfer *xs;
1656 struct scsipi_periph *periph;
1657 int s;
1658
1659 for (;;) {
1660 s = splbio();
1661
1662 /*
1663 * If the channel is frozen, we can't do any work right
1664 * now.
1665 */
1666 if (chan->chan_qfreeze != 0) {
1667 splx(s);
1668 return;
1669 }
1670
1671 /*
1672 * Look for work to do, and make sure we can do it.
1673 */
1674 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
1675 xs = TAILQ_NEXT(xs, channel_q)) {
1676 periph = xs->xs_periph;
1677
1678 if ((periph->periph_sent >= periph->periph_openings) ||
1679 periph->periph_qfreeze != 0 ||
1680 (periph->periph_flags & PERIPH_UNTAG) != 0)
1681 continue;
1682
1683 if ((periph->periph_flags &
1684 (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 &&
1685 (xs->xs_control & XS_CTL_URGENT) == 0)
1686 continue;
1687
1688 /*
1689 * We can issue this xfer!
1690 */
1691 goto got_one;
1692 }
1693
1694 /*
1695 * Can't find any work to do right now.
1696 */
1697 splx(s);
1698 return;
1699
1700 got_one:
1701 /*
1702 * Have an xfer to run. Allocate a resource from
1703 * the adapter to run it. If we can't allocate that
1704 * resource, we don't dequeue the xfer.
1705 */
1706 if (scsipi_get_resource(chan) == 0) {
1707 /*
1708 * Adapter is out of resources. If the adapter
1709 * supports it, attempt to grow them.
1710 */
1711 if (scsipi_grow_resources(chan) == 0) {
1712 /*
1713 * Wasn't able to grow resources,
1714 * nothing more we can do.
1715 */
1716 if (xs->xs_control & XS_CTL_POLL) {
1717 scsipi_printaddr(xs->xs_periph);
1718 printf("polling command but no "
1719 "adapter resources");
1720 /* We'll panic shortly... */
1721 }
1722 splx(s);
1723
1724 /*
1725 * XXX: We should be able to note that
1726 * XXX: that resources are needed here!
1727 */
1728 return;
1729 }
1730 /*
1731 * scsipi_grow_resources() allocated the resource
1732 * for us.
1733 */
1734 }
1735
1736 /*
1737 * We have a resource to run this xfer, do it!
1738 */
1739 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
1740
1741 /*
1742 * If the command is to be tagged, allocate a tag ID
1743 * for it.
1744 */
1745 if (XS_CTL_TAGTYPE(xs) != 0)
1746 scsipi_get_tag(xs);
1747 else
1748 periph->periph_flags |= PERIPH_UNTAG;
1749 periph->periph_sent++;
1750 splx(s);
1751
1752 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
1753 }
1754 #ifdef DIAGNOSTIC
1755 panic("scsipi_run_queue: impossible");
1756 #endif
1757 }
1758
1759 /*
1760 * scsipi_execute_xs:
1761 *
1762 * Begin execution of an xfer, waiting for it to complete, if necessary.
1763 */
1764 int
1765 scsipi_execute_xs(xs)
1766 struct scsipi_xfer *xs;
1767 {
1768 struct scsipi_periph *periph = xs->xs_periph;
1769 struct scsipi_channel *chan = periph->periph_channel;
1770 int async, poll, retries, error, s;
1771
1772 xs->xs_status &= ~XS_STS_DONE;
1773 xs->error = XS_NOERROR;
1774 xs->resid = xs->datalen;
1775 xs->status = SCSI_OK;
1776
1777 #ifdef SCSIPI_DEBUG
1778 if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) {
1779 printf("scsipi_execute_xs: ");
1780 show_scsipi_xs(xs);
1781 printf("\n");
1782 }
1783 #endif
1784
1785 /*
1786 * Deal with command tagging:
1787 *
1788 * - If the device's current operating mode doesn't
1789 * include tagged queueing, clear the tag mask.
1790 *
1791 * - If the device's current operating mode *does*
1792 * include tagged queueing, set the tag_type in
1793 * the xfer to the appropriate byte for the tag
1794 * message.
1795 */
1796 if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 ||
1797 (xs->xs_control & XS_CTL_REQSENSE)) {
1798 xs->xs_control &= ~XS_CTL_TAGMASK;
1799 xs->xs_tag_type = 0;
1800 } else {
1801 /*
1802 * If the request doesn't specify a tag, give Head
1803 * tags to URGENT operations and Ordered tags to
1804 * everything else.
1805 */
1806 if (XS_CTL_TAGTYPE(xs) == 0) {
1807 if (xs->xs_control & XS_CTL_URGENT)
1808 xs->xs_control |= XS_CTL_HEAD_TAG;
1809 else
1810 xs->xs_control |= XS_CTL_ORDERED_TAG;
1811 }
1812
1813 switch (XS_CTL_TAGTYPE(xs)) {
1814 case XS_CTL_ORDERED_TAG:
1815 xs->xs_tag_type = MSG_ORDERED_Q_TAG;
1816 break;
1817
1818 case XS_CTL_SIMPLE_TAG:
1819 xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
1820 break;
1821
1822 case XS_CTL_HEAD_TAG:
1823 xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
1824 break;
1825
1826 default:
1827 scsipi_printaddr(periph);
1828 printf("invalid tag mask 0x%08x\n",
1829 XS_CTL_TAGTYPE(xs));
1830 panic("scsipi_execute_xs");
1831 }
1832 }
1833
1834 /* If the adaptor wants us to poll, poll. */
1835 if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY)
1836 xs->xs_control |= XS_CTL_POLL;
1837
1838 /*
1839 * If we don't yet have a completion thread, or we are to poll for
1840 * completion, clear the ASYNC flag.
1841 */
1842 if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
1843 xs->xs_control &= ~XS_CTL_ASYNC;
1844
1845 async = (xs->xs_control & XS_CTL_ASYNC);
1846 poll = (xs->xs_control & XS_CTL_POLL);
1847 retries = xs->xs_retries; /* for polling commands */
1848
1849 #ifdef DIAGNOSTIC
1850 if (async != 0 && xs->bp == NULL)
1851 panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
1852 #endif
1853
1854 /*
1855 * Enqueue the transfer. If we're not polling for completion, this
1856 * should ALWAYS return `no error'.
1857 */
1858 try_again:
1859 error = scsipi_enqueue(xs);
1860 if (error) {
1861 if (poll == 0) {
1862 scsipi_printaddr(periph);
1863 printf("not polling, but enqueue failed with %d\n",
1864 error);
1865 panic("scsipi_execute_xs");
1866 }
1867
1868 scsipi_printaddr(periph);
1869 printf("failed to enqueue polling command");
1870 if (retries != 0) {
1871 printf(", retrying...\n");
1872 delay(1000000);
1873 retries--;
1874 goto try_again;
1875 }
1876 printf("\n");
1877 goto free_xs;
1878 }
1879
1880 restarted:
1881 scsipi_run_queue(chan);
1882
1883 /*
1884 * The xfer is enqueued, and possibly running. If it's to be
1885 * completed asynchronously, just return now.
1886 */
1887 if (async)
1888 return (EJUSTRETURN);
1889
1890 /*
1891 * Not an asynchronous command; wait for it to complete.
1892 */
1893 s = splbio();
1894 while ((xs->xs_status & XS_STS_DONE) == 0) {
1895 if (poll) {
1896 scsipi_printaddr(periph);
1897 printf("polling command not done\n");
1898 panic("scsipi_execute_xs");
1899 }
1900 (void) tsleep(xs, PRIBIO, "xscmd", 0);
1901 }
1902 splx(s);
1903
1904 /*
1905 * Command is complete. scsipi_done() has awakened us to perform
1906 * the error handling.
1907 */
1908 error = scsipi_complete(xs);
1909 if (error == ERESTART)
1910 goto restarted;
1911
1912 /*
1913 * Command completed successfully or fatal error occurred. Fall
1914 * into....
1915 */
1916 free_xs:
1917 s = splbio();
1918 scsipi_put_xs(xs);
1919 splx(s);
1920
1921 /*
1922 * Kick the queue, keep it running in case it stopped for some
1923 * reason.
1924 */
1925 scsipi_run_queue(chan);
1926
1927 return (error);
1928 }
1929
1930 /*
1931 * scsipi_completion_thread:
1932 *
1933 * This is the completion thread. We wait for errors on
1934 * asynchronous xfers, and perform the error handling
1935 * function, restarting the command, if necessary.
1936 */
1937 void
1938 scsipi_completion_thread(arg)
1939 void *arg;
1940 {
1941 struct scsipi_channel *chan = arg;
1942 struct scsipi_xfer *xs;
1943 int s;
1944
1945 for (;;) {
1946 s = splbio();
1947 xs = TAILQ_FIRST(&chan->chan_complete);
1948 if (xs == NULL &&
1949 (chan->chan_flags &
1950 (SCSIPI_CHAN_SHUTDOWN | SCSIPI_CHAN_CALLBACK)) == 0) {
1951 (void) tsleep(&chan->chan_complete, PRIBIO,
1952 "sccomp", 0);
1953 splx(s);
1954 continue;
1955 }
1956 if (chan->chan_flags & SCSIPI_CHAN_CALLBACK) {
1957 /* call chan_callback from thread context */
1958 chan->chan_flags &= ~SCSIPI_CHAN_CALLBACK;
1959 chan->chan_callback(chan, chan->chan_callback_arg);
1960 splx(s);
1961 continue;
1962 }
1963 if (chan->chan_flags & SCSIPI_CHAN_SHUTDOWN) {
1964 splx(s);
1965 break;
1966 }
1967 if (xs) {
1968 TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
1969 splx(s);
1970
1971 /*
1972 * Have an xfer with an error; process it.
1973 */
1974 (void) scsipi_complete(xs);
1975
1976 /*
1977 * Kick the queue; keep it running if it was stopped
1978 * for some reason.
1979 */
1980 scsipi_run_queue(chan);
1981 } else {
1982 splx(s);
1983 }
1984 }
1985
1986 chan->chan_thread = NULL;
1987
1988 /* In case parent is waiting for us to exit. */
1989 wakeup(&chan->chan_thread);
1990
1991 kthread_exit(0);
1992 }
1993
1994 /*
1995 * scsipi_create_completion_thread:
1996 *
1997 * Callback to actually create the completion thread.
1998 */
1999 void
2000 scsipi_create_completion_thread(arg)
2001 void *arg;
2002 {
2003 struct scsipi_channel *chan = arg;
2004 struct scsipi_adapter *adapt = chan->chan_adapter;
2005
2006 if (kthread_create1(scsipi_completion_thread, chan,
2007 &chan->chan_thread, "%s:%d", adapt->adapt_dev->dv_xname,
2008 chan->chan_channel)) {
2009 printf("%s: unable to create completion thread for "
2010 "channel %d\n", adapt->adapt_dev->dv_xname,
2011 chan->chan_channel);
2012 panic("scsipi_create_completion_thread");
2013 }
2014 }
2015
2016 /*
2017 * scsipi_thread_call_callback:
2018 *
2019 * request to call a callback from the completion thread
2020 */
2021 int
2022 scsipi_thread_call_callback(chan, callback, arg)
2023 struct scsipi_channel *chan;
2024 void (*callback) __P((struct scsipi_channel *, void *));
2025 void *arg;
2026 {
2027 int s;
2028
2029 s = splbio();
2030 if (chan->chan_flags & SCSIPI_CHAN_CALLBACK) {
2031 splx(s);
2032 return EBUSY;
2033 }
2034 scsipi_channel_freeze(chan, 1);
2035 chan->chan_callback = callback;
2036 chan->chan_callback_arg = arg;
2037 chan->chan_flags |= SCSIPI_CHAN_CALLBACK;
2038 wakeup(&chan->chan_complete);
2039 splx(s);
2040 return(0);
2041 }
2042
2043 /*
2044 * scsipi_async_event:
2045 *
2046 * Handle an asynchronous event from an adapter.
2047 */
2048 void
2049 scsipi_async_event(chan, event, arg)
2050 struct scsipi_channel *chan;
2051 scsipi_async_event_t event;
2052 void *arg;
2053 {
2054 int s;
2055
2056 s = splbio();
2057 switch (event) {
2058 case ASYNC_EVENT_MAX_OPENINGS:
2059 scsipi_async_event_max_openings(chan,
2060 (struct scsipi_max_openings *)arg);
2061 break;
2062
2063 case ASYNC_EVENT_XFER_MODE:
2064 scsipi_async_event_xfer_mode(chan,
2065 (struct scsipi_xfer_mode *)arg);
2066 break;
2067 case ASYNC_EVENT_RESET:
2068 scsipi_async_event_channel_reset(chan);
2069 break;
2070 }
2071 splx(s);
2072 }
2073
2074 /*
2075 * scsipi_print_xfer_mode:
2076 *
2077 * Print a periph's capabilities.
2078 */
2079 void
2080 scsipi_print_xfer_mode(periph)
2081 struct scsipi_periph *periph;
2082 {
2083 int period, freq, speed, mbs;
2084
2085 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0)
2086 return;
2087
2088 printf("%s: ", periph->periph_dev->dv_xname);
2089 if (periph->periph_mode & PERIPH_CAP_SYNC) {
2090 period = scsipi_sync_factor_to_period(periph->periph_period);
2091 printf("sync (%d.%dns offset %d)",
2092 period / 10, period % 10, periph->periph_offset);
2093 } else
2094 printf("async");
2095
2096 if (periph->periph_mode & PERIPH_CAP_WIDE32)
2097 printf(", 32-bit");
2098 else if (periph->periph_mode & PERIPH_CAP_WIDE16)
2099 printf(", 16-bit");
2100 else
2101 printf(", 8-bit");
2102
2103 if (periph->periph_mode & PERIPH_CAP_SYNC) {
2104 freq = scsipi_sync_factor_to_freq(periph->periph_period);
2105 speed = freq;
2106 if (periph->periph_mode & PERIPH_CAP_WIDE32)
2107 speed *= 4;
2108 else if (periph->periph_mode & PERIPH_CAP_WIDE16)
2109 speed *= 2;
2110 mbs = speed / 1000;
2111 if (mbs > 0)
2112 printf(" (%d.%03dMB/s)", mbs, speed % 1000);
2113 else
2114 printf(" (%dKB/s)", speed % 1000);
2115 }
2116
2117 printf(" transfers");
2118
2119 if (periph->periph_mode & PERIPH_CAP_TQING)
2120 printf(", tagged queueing");
2121
2122 printf("\n");
2123 }
2124
2125 /*
2126 * scsipi_async_event_max_openings:
2127 *
2128 * Update the maximum number of outstanding commands a
2129 * device may have.
2130 */
2131 void
2132 scsipi_async_event_max_openings(chan, mo)
2133 struct scsipi_channel *chan;
2134 struct scsipi_max_openings *mo;
2135 {
2136 struct scsipi_periph *periph;
2137 int minlun, maxlun;
2138
2139 if (mo->mo_lun == -1) {
2140 /*
2141 * Wildcarded; apply it to all LUNs.
2142 */
2143 minlun = 0;
2144 maxlun = chan->chan_nluns - 1;
2145 } else
2146 minlun = maxlun = mo->mo_lun;
2147
2148 for (; minlun <= maxlun; minlun++) {
2149 periph = scsipi_lookup_periph(chan, mo->mo_target, minlun);
2150 if (periph == NULL)
2151 continue;
2152
2153 if (mo->mo_openings < periph->periph_openings)
2154 periph->periph_openings = mo->mo_openings;
2155 else if (mo->mo_openings > periph->periph_openings &&
2156 (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
2157 periph->periph_openings = mo->mo_openings;
2158 }
2159 }
2160
2161 /*
2162 * scsipi_async_event_xfer_mode:
2163 *
2164 * Update the xfer mode for all periphs sharing the
2165 * specified I_T Nexus.
2166 */
2167 void
2168 scsipi_async_event_xfer_mode(chan, xm)
2169 struct scsipi_channel *chan;
2170 struct scsipi_xfer_mode *xm;
2171 {
2172 struct scsipi_periph *periph;
2173 int lun, announce, mode, period, offset;
2174
2175 for (lun = 0; lun < chan->chan_nluns; lun++) {
2176 periph = scsipi_lookup_periph(chan, xm->xm_target, lun);
2177 if (periph == NULL)
2178 continue;
2179 announce = 0;
2180
2181 /*
2182 * Clamp the xfer mode down to this periph's capabilities.
2183 */
2184 mode = xm->xm_mode & periph->periph_cap;
2185 if (mode & PERIPH_CAP_SYNC) {
2186 period = xm->xm_period;
2187 offset = xm->xm_offset;
2188 } else {
2189 period = 0;
2190 offset = 0;
2191 }
2192
2193 /*
2194 * If we do not have a valid xfer mode yet, or the parameters
2195 * are different, announce them.
2196 */
2197 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0 ||
2198 periph->periph_mode != mode ||
2199 periph->periph_period != period ||
2200 periph->periph_offset != offset)
2201 announce = 1;
2202
2203 periph->periph_mode = mode;
2204 periph->periph_period = period;
2205 periph->periph_offset = offset;
2206 periph->periph_flags |= PERIPH_MODE_VALID;
2207
2208 if (announce)
2209 scsipi_print_xfer_mode(periph);
2210 }
2211 }
2212
2213 /*
2214 * scsipi_set_xfer_mode:
2215 *
2216 * Set the xfer mode for the specified I_T Nexus.
2217 */
2218 void
2219 scsipi_set_xfer_mode(chan, target, immed)
2220 struct scsipi_channel *chan;
2221 int target, immed;
2222 {
2223 struct scsipi_xfer_mode xm;
2224 struct scsipi_periph *itperiph;
2225 int lun, s;
2226
2227 /*
2228 * Go to the minimal xfer mode.
2229 */
2230 xm.xm_target = target;
2231 xm.xm_mode = 0;
2232 xm.xm_period = 0; /* ignored */
2233 xm.xm_offset = 0; /* ignored */
2234
2235 /*
2236 * Find the first LUN we know about on this I_T Nexus.
2237 */
2238 for (lun = 0; lun < chan->chan_nluns; lun++) {
2239 itperiph = scsipi_lookup_periph(chan, target, lun);
2240 if (itperiph != NULL)
2241 break;
2242 }
2243 if (itperiph != NULL) {
2244 xm.xm_mode = itperiph->periph_cap;
2245 /*
2246 * Now issue the request to the adapter.
2247 */
2248 s = splbio();
2249 scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm);
2250 splx(s);
2251 /*
2252 * If we want this to happen immediately, issue a dummy
2253 * command, since most adapters can't really negotiate unless
2254 * they're executing a job.
2255 */
2256 if (immed != 0) {
2257 (void) scsipi_test_unit_ready(itperiph,
2258 XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
2259 XS_CTL_IGNORE_NOT_READY |
2260 XS_CTL_IGNORE_MEDIA_CHANGE);
2261 }
2262 }
2263 }
2264
2265 /*
2266 * scsipi_channel_reset:
2267 *
2268 * handle scsi bus reset
2269 * called at splbio
2270 */
2271 void
2272 scsipi_async_event_channel_reset(chan)
2273 struct scsipi_channel *chan;
2274 {
2275 struct scsipi_xfer *xs, *xs_next;
2276 struct scsipi_periph *periph;
2277 int target, lun;
2278
2279 /*
2280 * Channel has been reset. Also mark as reset pending REQUEST_SENSE
2281 * commands; as the sense is not available any more.
2282 * can't call scsipi_done() from here, as the command has not been
2283 * sent to the adapter yet (this would corrupt accounting).
2284 */
2285
2286 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) {
2287 xs_next = TAILQ_NEXT(xs, channel_q);
2288 if (xs->xs_control & XS_CTL_REQSENSE) {
2289 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
2290 xs->error = XS_RESET;
2291 if ((xs->xs_control & XS_CTL_ASYNC) != 0)
2292 TAILQ_INSERT_TAIL(&chan->chan_complete, xs,
2293 channel_q);
2294 }
2295 }
2296 wakeup(&chan->chan_complete);
2297 /* Catch xs with pending sense which may not have a REQSENSE xs yet */
2298 for (target = 0; target < chan->chan_ntargets; target++) {
2299 if (target == chan->chan_id)
2300 continue;
2301 for (lun = 0; lun < chan->chan_nluns; lun++) {
2302 periph = chan->chan_periphs[target][lun];
2303 if (periph) {
2304 xs = periph->periph_xscheck;
2305 if (xs)
2306 xs->error = XS_RESET;
2307 }
2308 }
2309 }
2310 }
2311
2312 /*
2313 * scsipi_target_detach:
2314 *
2315 * detach all periph associated with a I_T
2316 * must be called from valid thread context
2317 */
2318 int
2319 scsipi_target_detach(chan, target, lun, flags)
2320 struct scsipi_channel *chan;
2321 int target, lun;
2322 int flags;
2323 {
2324 struct scsipi_periph *periph;
2325 int ctarget, mintarget, maxtarget;
2326 int clun, minlun, maxlun;
2327 int error;
2328
2329 if (target == -1) {
2330 mintarget = 0;
2331 maxtarget = chan->chan_ntargets;
2332 } else {
2333 if (target == chan->chan_id)
2334 return EINVAL;
2335 if (target < 0 || target >= chan->chan_ntargets)
2336 return EINVAL;
2337 mintarget = target;
2338 maxtarget = target + 1;
2339 }
2340
2341 if (lun == -1) {
2342 minlun = 0;
2343 maxlun = chan->chan_nluns;
2344 } else {
2345 if (lun < 0 || lun >= chan->chan_nluns)
2346 return EINVAL;
2347 minlun = lun;
2348 maxlun = lun + 1;
2349 }
2350
2351 for (ctarget = mintarget; ctarget < maxtarget; ctarget++) {
2352 if (ctarget == chan->chan_id)
2353 continue;
2354
2355 for (clun = minlun; clun < maxlun; clun++) {
2356 periph = scsipi_lookup_periph(chan, ctarget, clun);
2357 if (periph == NULL)
2358 continue;
2359 error = config_detach(periph->periph_dev, flags);
2360 if (error)
2361 return (error);
2362 scsipi_remove_periph(chan, periph);
2363 free(periph, M_DEVBUF);
2364 }
2365 }
2366 return(0);
2367 }
2368
2369 /*
2370 * scsipi_adapter_addref:
2371 *
2372 * Add a reference to the adapter pointed to by the provided
2373 * link, enabling the adapter if necessary.
2374 */
2375 int
2376 scsipi_adapter_addref(adapt)
2377 struct scsipi_adapter *adapt;
2378 {
2379 int s, error = 0;
2380
2381 s = splbio();
2382 if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) {
2383 error = (*adapt->adapt_enable)(adapt->adapt_dev, 1);
2384 if (error)
2385 adapt->adapt_refcnt--;
2386 }
2387 splx(s);
2388 return (error);
2389 }
2390
2391 /*
2392 * scsipi_adapter_delref:
2393 *
2394 * Delete a reference to the adapter pointed to by the provided
2395 * link, disabling the adapter if possible.
2396 */
2397 void
2398 scsipi_adapter_delref(adapt)
2399 struct scsipi_adapter *adapt;
2400 {
2401 int s;
2402
2403 s = splbio();
2404 if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL)
2405 (void) (*adapt->adapt_enable)(adapt->adapt_dev, 0);
2406 splx(s);
2407 }
2408
2409 struct scsipi_syncparam {
2410 int ss_factor;
2411 int ss_period; /* ns * 10 */
2412 } scsipi_syncparams[] = {
2413 { 0x0a, 250 },
2414 { 0x0b, 303 },
2415 { 0x0c, 500 },
2416 };
2417 const int scsipi_nsyncparams =
2418 sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
2419
2420 int
2421 scsipi_sync_period_to_factor(period)
2422 int period; /* ns * 10 */
2423 {
2424 int i;
2425
2426 for (i = 0; i < scsipi_nsyncparams; i++) {
2427 if (period <= scsipi_syncparams[i].ss_period)
2428 return (scsipi_syncparams[i].ss_factor);
2429 }
2430
2431 return ((period / 10) / 4);
2432 }
2433
2434 int
2435 scsipi_sync_factor_to_period(factor)
2436 int factor;
2437 {
2438 int i;
2439
2440 for (i = 0; i < scsipi_nsyncparams; i++) {
2441 if (factor == scsipi_syncparams[i].ss_factor)
2442 return (scsipi_syncparams[i].ss_period);
2443 }
2444
2445 return ((factor * 4) * 10);
2446 }
2447
2448 int
2449 scsipi_sync_factor_to_freq(factor)
2450 int factor;
2451 {
2452 int i;
2453
2454 for (i = 0; i < scsipi_nsyncparams; i++) {
2455 if (factor == scsipi_syncparams[i].ss_factor)
2456 return (10000000 / scsipi_syncparams[i].ss_period);
2457 }
2458
2459 return (10000000 / ((factor * 4) * 10));
2460 }
2461
2462 #ifdef SCSIPI_DEBUG
2463 /*
2464 * Given a scsipi_xfer, dump the request, in all it's glory
2465 */
2466 void
2467 show_scsipi_xs(xs)
2468 struct scsipi_xfer *xs;
2469 {
2470
2471 printf("xs(%p): ", xs);
2472 printf("xs_control(0x%08x)", xs->xs_control);
2473 printf("xs_status(0x%08x)", xs->xs_status);
2474 printf("periph(%p)", xs->xs_periph);
2475 printf("retr(0x%x)", xs->xs_retries);
2476 printf("timo(0x%x)", xs->timeout);
2477 printf("cmd(%p)", xs->cmd);
2478 printf("len(0x%x)", xs->cmdlen);
2479 printf("data(%p)", xs->data);
2480 printf("len(0x%x)", xs->datalen);
2481 printf("res(0x%x)", xs->resid);
2482 printf("err(0x%x)", xs->error);
2483 printf("bp(%p)", xs->bp);
2484 show_scsipi_cmd(xs);
2485 }
2486
2487 void
2488 show_scsipi_cmd(xs)
2489 struct scsipi_xfer *xs;
2490 {
2491 u_char *b = (u_char *) xs->cmd;
2492 int i = 0;
2493
2494 scsipi_printaddr(xs->xs_periph);
2495 printf(" command: ");
2496
2497 if ((xs->xs_control & XS_CTL_RESET) == 0) {
2498 while (i < xs->cmdlen) {
2499 if (i)
2500 printf(",");
2501 printf("0x%x", b[i++]);
2502 }
2503 printf("-[%d bytes]\n", xs->datalen);
2504 if (xs->datalen)
2505 show_mem(xs->data, min(64, xs->datalen));
2506 } else
2507 printf("-RESET-\n");
2508 }
2509
2510 void
2511 show_mem(address, num)
2512 u_char *address;
2513 int num;
2514 {
2515 int x;
2516
2517 printf("------------------------------");
2518 for (x = 0; x < num; x++) {
2519 if ((x % 16) == 0)
2520 printf("\n%03d: ", x);
2521 printf("%02x ", *address++);
2522 }
2523 printf("\n------------------------------\n");
2524 }
2525 #endif /* SCSIPI_DEBUG */
2526