scsipi_base.c revision 1.56 1 /* $NetBSD: scsipi_base.c,v 1.56 2001/09/18 20:20:26 mjacob Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include "opt_scsi.h"
41
42 #include <sys/types.h>
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/buf.h>
47 #include <sys/uio.h>
48 #include <sys/malloc.h>
49 #include <sys/pool.h>
50 #include <sys/errno.h>
51 #include <sys/device.h>
52 #include <sys/proc.h>
53 #include <sys/kthread.h>
54
55 #include <dev/scsipi/scsipi_all.h>
56 #include <dev/scsipi/scsipi_disk.h>
57 #include <dev/scsipi/scsipiconf.h>
58 #include <dev/scsipi/scsipi_base.h>
59
60 #include <dev/scsipi/scsi_all.h>
61 #include <dev/scsipi/scsi_message.h>
62
63 int scsipi_complete __P((struct scsipi_xfer *));
64 void scsipi_request_sense __P((struct scsipi_xfer *));
65 int scsipi_enqueue __P((struct scsipi_xfer *));
66 void scsipi_run_queue __P((struct scsipi_channel *chan));
67
68 void scsipi_completion_thread __P((void *));
69
70 void scsipi_get_tag __P((struct scsipi_xfer *));
71 void scsipi_put_tag __P((struct scsipi_xfer *));
72
73 int scsipi_get_resource __P((struct scsipi_channel *));
74 void scsipi_put_resource __P((struct scsipi_channel *));
75 __inline int scsipi_grow_resources __P((struct scsipi_channel *));
76
77 void scsipi_async_event_max_openings __P((struct scsipi_channel *,
78 struct scsipi_max_openings *));
79 void scsipi_async_event_xfer_mode __P((struct scsipi_channel *,
80 struct scsipi_xfer_mode *));
81 void scsipi_async_event_channel_reset __P((struct scsipi_channel *));
82
83 struct pool scsipi_xfer_pool;
84
85 /*
86 * scsipi_init:
87 *
88 * Called when a scsibus or atapibus is attached to the system
89 * to initialize shared data structures.
90 */
91 void
92 scsipi_init()
93 {
94 static int scsipi_init_done;
95
96 if (scsipi_init_done)
97 return;
98 scsipi_init_done = 1;
99
100 /* Initialize the scsipi_xfer pool. */
101 pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
102 0, 0, "scxspl", 0, NULL, NULL, M_DEVBUF);
103 }
104
105 /*
106 * scsipi_channel_init:
107 *
108 * Initialize a scsipi_channel when it is attached.
109 */
110 int
111 scsipi_channel_init(chan)
112 struct scsipi_channel *chan;
113 {
114 size_t nbytes;
115 int i;
116
117 /* Initialize shared data. */
118 scsipi_init();
119
120 /* Initialize the queues. */
121 TAILQ_INIT(&chan->chan_queue);
122 TAILQ_INIT(&chan->chan_complete);
123
124 nbytes = chan->chan_ntargets * sizeof(struct scsipi_periph **);
125 chan->chan_periphs = malloc(nbytes, M_DEVBUF, M_NOWAIT);
126 if (chan->chan_periphs == NULL)
127 return (ENOMEM);
128
129
130 nbytes = chan->chan_nluns * sizeof(struct scsipi_periph *);
131 for (i = 0; i < chan->chan_ntargets; i++) {
132 chan->chan_periphs[i] = malloc(nbytes, M_DEVBUF, M_NOWAIT);
133 if (chan->chan_periphs[i] == NULL) {
134 while (--i >= 0) {
135 free(chan->chan_periphs[i], M_DEVBUF);
136 }
137 return (ENOMEM);
138 }
139 memset(chan->chan_periphs[i], 0, nbytes);
140 }
141
142 /*
143 * Create the asynchronous completion thread.
144 */
145 kthread_create(scsipi_create_completion_thread, chan);
146 return (0);
147 }
148
149 /*
150 * scsipi_channel_shutdown:
151 *
152 * Shutdown a scsipi_channel.
153 */
154 void
155 scsipi_channel_shutdown(chan)
156 struct scsipi_channel *chan;
157 {
158
159 /*
160 * Shut down the completion thread.
161 */
162 chan->chan_flags |= SCSIPI_CHAN_SHUTDOWN;
163 wakeup(&chan->chan_complete);
164
165 /*
166 * Now wait for the thread to exit.
167 */
168 while (chan->chan_thread != NULL)
169 (void) tsleep(&chan->chan_thread, PRIBIO, "scshut", 0);
170 }
171
172 /*
173 * scsipi_insert_periph:
174 *
175 * Insert a periph into the channel.
176 */
177 void
178 scsipi_insert_periph(chan, periph)
179 struct scsipi_channel *chan;
180 struct scsipi_periph *periph;
181 {
182 int s;
183
184 s = splbio();
185 chan->chan_periphs[periph->periph_target][periph->periph_lun] = periph;
186 splx(s);
187 }
188
189 /*
190 * scsipi_remove_periph:
191 *
192 * Remove a periph from the channel.
193 */
194 void
195 scsipi_remove_periph(chan, periph)
196 struct scsipi_channel *chan;
197 struct scsipi_periph *periph;
198 {
199 int s;
200
201 s = splbio();
202 chan->chan_periphs[periph->periph_target][periph->periph_lun] = NULL;
203 splx(s);
204 }
205
206 /*
207 * scsipi_lookup_periph:
208 *
209 * Lookup a periph on the specified channel.
210 */
211 struct scsipi_periph *
212 scsipi_lookup_periph(chan, target, lun)
213 struct scsipi_channel *chan;
214 int target, lun;
215 {
216 struct scsipi_periph *periph;
217 int s;
218
219 if (target >= chan->chan_ntargets ||
220 lun >= chan->chan_nluns)
221 return (NULL);
222
223 s = splbio();
224 periph = chan->chan_periphs[target][lun];
225 splx(s);
226
227 return (periph);
228 }
229
230 /*
231 * scsipi_get_resource:
232 *
233 * Allocate a single xfer `resource' from the channel.
234 *
235 * NOTE: Must be called at splbio().
236 */
237 int
238 scsipi_get_resource(chan)
239 struct scsipi_channel *chan;
240 {
241 struct scsipi_adapter *adapt = chan->chan_adapter;
242
243 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
244 if (chan->chan_openings > 0) {
245 chan->chan_openings--;
246 return (1);
247 }
248 return (0);
249 }
250
251 if (adapt->adapt_openings > 0) {
252 adapt->adapt_openings--;
253 return (1);
254 }
255 return (0);
256 }
257
258 /*
259 * scsipi_grow_resources:
260 *
261 * Attempt to grow resources for a channel. If this succeeds,
262 * we allocate one for our caller.
263 *
264 * NOTE: Must be called at splbio().
265 */
266 __inline int
267 scsipi_grow_resources(chan)
268 struct scsipi_channel *chan;
269 {
270
271 if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
272 scsipi_adapter_request(chan, ADAPTER_REQ_GROW_RESOURCES, NULL);
273 return (scsipi_get_resource(chan));
274 }
275
276 return (0);
277 }
278
279 /*
280 * scsipi_put_resource:
281 *
282 * Free a single xfer `resource' to the channel.
283 *
284 * NOTE: Must be called at splbio().
285 */
286 void
287 scsipi_put_resource(chan)
288 struct scsipi_channel *chan;
289 {
290 struct scsipi_adapter *adapt = chan->chan_adapter;
291
292 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
293 chan->chan_openings++;
294 else
295 adapt->adapt_openings++;
296 }
297
298 /*
299 * scsipi_get_tag:
300 *
301 * Get a tag ID for the specified xfer.
302 *
303 * NOTE: Must be called at splbio().
304 */
305 void
306 scsipi_get_tag(xs)
307 struct scsipi_xfer *xs;
308 {
309 struct scsipi_periph *periph = xs->xs_periph;
310 int word, bit, tag;
311
312 for (word = 0; word < PERIPH_NTAGWORDS; word++) {
313 bit = ffs(periph->periph_freetags[word]);
314 if (bit != 0)
315 break;
316 }
317 #ifdef DIAGNOSTIC
318 if (word == PERIPH_NTAGWORDS) {
319 scsipi_printaddr(periph);
320 printf("no free tags\n");
321 panic("scsipi_get_tag");
322 }
323 #endif
324
325 bit -= 1;
326 periph->periph_freetags[word] &= ~(1 << bit);
327 tag = (word << 5) | bit;
328
329 /* XXX Should eventually disallow this completely. */
330 if (tag >= periph->periph_openings) {
331 scsipi_printaddr(periph);
332 printf("WARNING: tag %d greater than available openings %d\n",
333 tag, periph->periph_openings);
334 }
335
336 xs->xs_tag_id = tag;
337 }
338
339 /*
340 * scsipi_put_tag:
341 *
342 * Put the tag ID for the specified xfer back into the pool.
343 *
344 * NOTE: Must be called at splbio().
345 */
346 void
347 scsipi_put_tag(xs)
348 struct scsipi_xfer *xs;
349 {
350 struct scsipi_periph *periph = xs->xs_periph;
351 int word, bit;
352
353 word = xs->xs_tag_id >> 5;
354 bit = xs->xs_tag_id & 0x1f;
355
356 periph->periph_freetags[word] |= (1 << bit);
357 }
358
359 /*
360 * scsipi_get_xs:
361 *
362 * Allocate an xfer descriptor and associate it with the
363 * specified peripherial. If the peripherial has no more
364 * available command openings, we either block waiting for
365 * one to become available, or fail.
366 */
367 struct scsipi_xfer *
368 scsipi_get_xs(periph, flags)
369 struct scsipi_periph *periph;
370 int flags;
371 {
372 struct scsipi_xfer *xs;
373 int s;
374
375 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n"));
376
377 /*
378 * If we're cold, make sure we poll.
379 */
380 if (cold)
381 flags |= XS_CTL_NOSLEEP | XS_CTL_POLL;
382
383 #ifdef DIAGNOSTIC
384 /*
385 * URGENT commands can never be ASYNC.
386 */
387 if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
388 (XS_CTL_URGENT|XS_CTL_ASYNC)) {
389 scsipi_printaddr(periph);
390 printf("URGENT and ASYNC\n");
391 panic("scsipi_get_xs");
392 }
393 #endif
394
395 s = splbio();
396 /*
397 * Wait for a command opening to become available. Rules:
398 *
399 * - All xfers must wait for an available opening.
400 * Exception: URGENT xfers can proceed when
401 * active == openings, because we use the opening
402 * of the command we're recovering for.
403 * - if the periph has sense pending, only URGENT & REQSENSE
404 * xfers may proceed.
405 *
406 * - If the periph is recovering, only URGENT xfers may
407 * proceed.
408 *
409 * - If the periph is currently executing a recovery
410 * command, URGENT commands must block, because only
411 * one recovery command can execute at a time.
412 */
413 for (;;) {
414 if (flags & XS_CTL_URGENT) {
415 if (periph->periph_active > periph->periph_openings)
416 goto wait_for_opening;
417 if (periph->periph_flags & PERIPH_SENSE) {
418 if ((flags & XS_CTL_REQSENSE) == 0)
419 goto wait_for_opening;
420 } else {
421 if ((periph->periph_flags &
422 PERIPH_RECOVERY_ACTIVE) != 0)
423 goto wait_for_opening;
424 periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
425 }
426 break;
427 }
428 if (periph->periph_active >= periph->periph_openings ||
429 (periph->periph_flags & PERIPH_RECOVERING) != 0)
430 goto wait_for_opening;
431 periph->periph_active++;
432 break;
433
434 wait_for_opening:
435 if (flags & XS_CTL_NOSLEEP) {
436 splx(s);
437 return (NULL);
438 }
439 SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n"));
440 periph->periph_flags |= PERIPH_WAITING;
441 (void) tsleep(periph, PRIBIO, "getxs", 0);
442 }
443 SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n"));
444 xs = pool_get(&scsipi_xfer_pool,
445 ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
446 if (xs == NULL) {
447 if (flags & XS_CTL_URGENT) {
448 if ((flags & XS_CTL_REQSENSE) == 0)
449 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
450 } else
451 periph->periph_active--;
452 scsipi_printaddr(periph);
453 printf("unable to allocate %sscsipi_xfer\n",
454 (flags & XS_CTL_URGENT) ? "URGENT " : "");
455 }
456 splx(s);
457
458 SC_DEBUG(periph, SCSIPI_DB3, ("returning\n"));
459
460 if (xs != NULL) {
461 callout_init(&xs->xs_callout);
462 memset(xs, 0, sizeof(*xs));
463 xs->xs_periph = periph;
464 xs->xs_control = flags;
465 xs->xs_status = 0;
466 s = splbio();
467 TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
468 splx(s);
469 }
470 return (xs);
471 }
472
473 /*
474 * scsipi_put_xs:
475 *
476 * Release an xfer descriptor, decreasing the outstanding command
477 * count for the peripherial. If there is a thread waiting for
478 * an opening, wake it up. If not, kick any queued I/O the
479 * peripherial may have.
480 *
481 * NOTE: Must be called at splbio().
482 */
483 void
484 scsipi_put_xs(xs)
485 struct scsipi_xfer *xs;
486 {
487 struct scsipi_periph *periph = xs->xs_periph;
488 int flags = xs->xs_control;
489
490 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n"));
491
492 TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
493 pool_put(&scsipi_xfer_pool, xs);
494
495 #ifdef DIAGNOSTIC
496 if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
497 periph->periph_active == 0) {
498 scsipi_printaddr(periph);
499 printf("recovery without a command to recovery for\n");
500 panic("scsipi_put_xs");
501 }
502 #endif
503
504 if (flags & XS_CTL_URGENT) {
505 if ((flags & XS_CTL_REQSENSE) == 0)
506 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
507 } else
508 periph->periph_active--;
509 if (periph->periph_active == 0 &&
510 (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
511 periph->periph_flags &= ~PERIPH_WAITDRAIN;
512 wakeup(&periph->periph_active);
513 }
514
515 if (periph->periph_flags & PERIPH_WAITING) {
516 periph->periph_flags &= ~PERIPH_WAITING;
517 wakeup(periph);
518 } else {
519 if (periph->periph_switch->psw_start != NULL) {
520 SC_DEBUG(periph, SCSIPI_DB2,
521 ("calling private start()\n"));
522 (*periph->periph_switch->psw_start)(periph);
523 }
524 }
525 }
526
527 /*
528 * scsipi_channel_freeze:
529 *
530 * Freeze a channel's xfer queue.
531 */
532 void
533 scsipi_channel_freeze(chan, count)
534 struct scsipi_channel *chan;
535 int count;
536 {
537 int s;
538
539 s = splbio();
540 chan->chan_qfreeze += count;
541 splx(s);
542 }
543
544 /*
545 * scsipi_channel_thaw:
546 *
547 * Thaw a channel's xfer queue.
548 */
549 void
550 scsipi_channel_thaw(chan, count)
551 struct scsipi_channel *chan;
552 int count;
553 {
554 int s;
555
556 s = splbio();
557 chan->chan_qfreeze -= count;
558 /*
559 * Don't let the freeze count go negative.
560 *
561 * Presumably the adapter driver could keep track of this,
562 * but it might just be easier to do this here so as to allow
563 * multiple callers, including those outside the adapter driver.
564 */
565 if (chan->chan_qfreeze < 0) {
566 chan->chan_qfreeze = 0;
567 }
568 splx(s);
569 /*
570 * Kick the channel's queue here. Note, we may be running in
571 * interrupt context (softclock or HBA's interrupt), so the adapter
572 * driver had better not sleep.
573 */
574 if (chan->chan_qfreeze == 0)
575 scsipi_run_queue(chan);
576 }
577
578 /*
579 * scsipi_channel_timed_thaw:
580 *
581 * Thaw a channel after some time has expired. This will also
582 * run the channel's queue if the freeze count has reached 0.
583 */
584 void
585 scsipi_channel_timed_thaw(arg)
586 void *arg;
587 {
588 struct scsipi_channel *chan = arg;
589
590 scsipi_channel_thaw(chan, 1);
591 }
592
593 /*
594 * scsipi_periph_freeze:
595 *
596 * Freeze a device's xfer queue.
597 */
598 void
599 scsipi_periph_freeze(periph, count)
600 struct scsipi_periph *periph;
601 int count;
602 {
603 int s;
604
605 s = splbio();
606 periph->periph_qfreeze += count;
607 splx(s);
608 }
609
610 /*
611 * scsipi_periph_thaw:
612 *
613 * Thaw a device's xfer queue.
614 */
615 void
616 scsipi_periph_thaw(periph, count)
617 struct scsipi_periph *periph;
618 int count;
619 {
620 int s;
621
622 s = splbio();
623 periph->periph_qfreeze -= count;
624 #ifdef DIAGNOSTIC
625 if (periph->periph_qfreeze < 0) {
626 static const char pc[] = "periph freeze count < 0";
627 scsipi_printaddr(periph);
628 printf("%s\n", pc);
629 panic(pc);
630 }
631 #endif
632 if (periph->periph_qfreeze == 0 &&
633 (periph->periph_flags & PERIPH_WAITING) != 0)
634 wakeup(periph);
635 splx(s);
636 }
637
638 /*
639 * scsipi_periph_timed_thaw:
640 *
641 * Thaw a device after some time has expired.
642 */
643 void
644 scsipi_periph_timed_thaw(arg)
645 void *arg;
646 {
647 int s;
648 struct scsipi_periph *periph = arg;
649
650 callout_stop(&periph->periph_callout);
651
652 s = splbio();
653 scsipi_periph_thaw(periph, 1);
654
655 /*
656 * Tell the completion thread to kick the channel's queue here.
657 */
658 periph->periph_channel->chan_flags |= SCSIPI_CHAN_KICK;
659 wakeup(&periph->periph_channel->chan_complete);
660 splx(s);
661 }
662
663 /*
664 * scsipi_wait_drain:
665 *
666 * Wait for a periph's pending xfers to drain.
667 */
668 void
669 scsipi_wait_drain(periph)
670 struct scsipi_periph *periph;
671 {
672 int s;
673
674 s = splbio();
675 while (periph->periph_active != 0) {
676 periph->periph_flags |= PERIPH_WAITDRAIN;
677 (void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0);
678 }
679 splx(s);
680 }
681
682 /*
683 * scsipi_kill_pending:
684 *
685 * Kill off all pending xfers for a periph.
686 *
687 * NOTE: Must be called at splbio().
688 */
689 void
690 scsipi_kill_pending(periph)
691 struct scsipi_periph *periph;
692 {
693
694 (*periph->periph_channel->chan_bustype->bustype_kill_pending)(periph);
695 #ifdef DIAGNOSTIC
696 if (TAILQ_FIRST(&periph->periph_xferq) != NULL)
697 panic("scsipi_kill_pending");
698 #endif
699 scsipi_wait_drain(periph);
700 }
701
702 /*
703 * scsipi_interpret_sense:
704 *
705 * Look at the returned sense and act on the error, determining
706 * the unix error number to pass back. (0 = report no error)
707 *
708 * NOTE: If we return ERESTART, we are expected to haved
709 * thawed the device!
710 *
711 * THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
712 */
713 int
714 scsipi_interpret_sense(xs)
715 struct scsipi_xfer *xs;
716 {
717 struct scsipi_sense_data *sense;
718 struct scsipi_periph *periph = xs->xs_periph;
719 u_int8_t key;
720 u_int32_t info;
721 int error;
722 #ifndef SCSIVERBOSE
723 static char *error_mes[] = {
724 "soft error (corrected)",
725 "not ready", "medium error",
726 "non-media hardware failure", "illegal request",
727 "unit attention", "readonly device",
728 "no data found", "vendor unique",
729 "copy aborted", "command aborted",
730 "search returned equal", "volume overflow",
731 "verify miscompare", "unknown error key"
732 };
733 #endif
734
735 sense = &xs->sense.scsi_sense;
736 #ifdef SCSIPI_DEBUG
737 if (periph->periph_flags & SCSIPI_DB1) {
738 int count;
739 scsipi_printaddr(periph);
740 printf(" sense debug information:\n");
741 printf("\tcode 0x%x valid 0x%x\n",
742 sense->error_code & SSD_ERRCODE,
743 sense->error_code & SSD_ERRCODE_VALID ? 1 : 0);
744 printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
745 sense->segment,
746 sense->flags & SSD_KEY,
747 sense->flags & SSD_ILI ? 1 : 0,
748 sense->flags & SSD_EOM ? 1 : 0,
749 sense->flags & SSD_FILEMARK ? 1 : 0);
750 printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d "
751 "extra bytes\n",
752 sense->info[0],
753 sense->info[1],
754 sense->info[2],
755 sense->info[3],
756 sense->extra_len);
757 printf("\textra: ");
758 for (count = 0; count < ADD_BYTES_LIM(sense); count++)
759 printf("0x%x ", sense->cmd_spec_info[count]);
760 printf("\n");
761 }
762 #endif
763
764 /*
765 * If the periph has it's own error handler, call it first.
766 * If it returns a legit error value, return that, otherwise
767 * it wants us to continue with normal error processing.
768 */
769 if (periph->periph_switch->psw_error != NULL) {
770 SC_DEBUG(periph, SCSIPI_DB2,
771 ("calling private err_handler()\n"));
772 error = (*periph->periph_switch->psw_error)(xs);
773 if (error != EJUSTRETURN)
774 return (error);
775 }
776 /* otherwise use the default */
777 switch (sense->error_code & SSD_ERRCODE) {
778 /*
779 * If it's code 70, use the extended stuff and
780 * interpret the key
781 */
782 case 0x71: /* delayed error */
783 scsipi_printaddr(periph);
784 key = sense->flags & SSD_KEY;
785 printf(" DEFERRED ERROR, key = 0x%x\n", key);
786 /* FALLTHROUGH */
787 case 0x70:
788 if ((sense->error_code & SSD_ERRCODE_VALID) != 0)
789 info = _4btol(sense->info);
790 else
791 info = 0;
792 key = sense->flags & SSD_KEY;
793
794 switch (key) {
795 case SKEY_NO_SENSE:
796 case SKEY_RECOVERED_ERROR:
797 if (xs->resid == xs->datalen && xs->datalen) {
798 /*
799 * Why is this here?
800 */
801 xs->resid = 0; /* not short read */
802 }
803 case SKEY_EQUAL:
804 error = 0;
805 break;
806 case SKEY_NOT_READY:
807 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
808 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
809 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
810 return (0);
811 if (sense->add_sense_code == 0x3A) {
812 error = ENODEV; /* Medium not present */
813 if (xs->xs_control & XS_CTL_SILENT_NODEV)
814 return (error);
815 } else
816 error = EIO;
817 if ((xs->xs_control & XS_CTL_SILENT) != 0)
818 return (error);
819 break;
820 case SKEY_ILLEGAL_REQUEST:
821 if ((xs->xs_control &
822 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
823 return (0);
824 /*
825 * Handle the case where a device reports
826 * Logical Unit Not Supported during discovery.
827 */
828 if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
829 sense->add_sense_code == 0x25 &&
830 sense->add_sense_code_qual == 0x00)
831 return (EINVAL);
832 if ((xs->xs_control & XS_CTL_SILENT) != 0)
833 return (EIO);
834 error = EINVAL;
835 break;
836 case SKEY_UNIT_ATTENTION:
837 if (sense->add_sense_code == 0x29 &&
838 sense->add_sense_code_qual == 0x00) {
839 /* device or bus reset */
840 return (ERESTART);
841 }
842 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
843 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
844 if ((xs->xs_control &
845 XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
846 /* XXX Should reupload any transient state. */
847 (periph->periph_flags &
848 PERIPH_REMOVABLE) == 0) {
849 return (ERESTART);
850 }
851 if ((xs->xs_control & XS_CTL_SILENT) != 0)
852 return (EIO);
853 error = EIO;
854 break;
855 case SKEY_WRITE_PROTECT:
856 error = EROFS;
857 break;
858 case SKEY_BLANK_CHECK:
859 error = 0;
860 break;
861 case SKEY_ABORTED_COMMAND:
862 error = ERESTART;
863 break;
864 case SKEY_VOLUME_OVERFLOW:
865 error = ENOSPC;
866 break;
867 default:
868 error = EIO;
869 break;
870 }
871
872 #ifdef SCSIVERBOSE
873 if (key && (xs->xs_control & XS_CTL_SILENT) == 0)
874 scsipi_print_sense(xs, 0);
875 #else
876 if (key) {
877 scsipi_printaddr(periph);
878 printf("%s", error_mes[key - 1]);
879 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
880 switch (key) {
881 case SKEY_NOT_READY:
882 case SKEY_ILLEGAL_REQUEST:
883 case SKEY_UNIT_ATTENTION:
884 case SKEY_WRITE_PROTECT:
885 break;
886 case SKEY_BLANK_CHECK:
887 printf(", requested size: %d (decimal)",
888 info);
889 break;
890 case SKEY_ABORTED_COMMAND:
891 if (xs->xs_retries)
892 printf(", retrying");
893 printf(", cmd 0x%x, info 0x%x",
894 xs->cmd->opcode, info);
895 break;
896 default:
897 printf(", info = %d (decimal)", info);
898 }
899 }
900 if (sense->extra_len != 0) {
901 int n;
902 printf(", data =");
903 for (n = 0; n < sense->extra_len; n++)
904 printf(" %02x",
905 sense->cmd_spec_info[n]);
906 }
907 printf("\n");
908 }
909 #endif
910 return (error);
911
912 /*
913 * Not code 70, just report it
914 */
915 default:
916 #if defined(SCSIDEBUG) || defined(DEBUG)
917 {
918 static char *uc = "undecodable sense error";
919 int i;
920 u_int8_t *cptr = (u_int8_t *) sense;
921 scsipi_printaddr(periph);
922 if (xs->cmd == &xs->cmdstore) {
923 printf("%s for opcode 0x%x, data=",
924 uc, xs->cmdstore.opcode);
925 } else {
926 printf("%s, data=", uc);
927 }
928 for (i = 0; i < sizeof (sense); i++)
929 printf(" 0x%02x", *(cptr++) & 0xff);
930 printf("\n");
931 }
932 #else
933 scsipi_printaddr(periph);
934 printf("Sense Error Code 0x%x",
935 sense->error_code & SSD_ERRCODE);
936 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
937 struct scsipi_sense_data_unextended *usense =
938 (struct scsipi_sense_data_unextended *)sense;
939 printf(" at block no. %d (decimal)",
940 _3btol(usense->block));
941 }
942 printf("\n");
943 #endif
944 return (EIO);
945 }
946 }
947
948 /*
949 * scsipi_size:
950 *
951 * Find out from the device what its capacity is.
952 */
953 u_long
954 scsipi_size(periph, flags)
955 struct scsipi_periph *periph;
956 int flags;
957 {
958 struct scsipi_read_cap_data rdcap;
959 struct scsipi_read_capacity scsipi_cmd;
960
961 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
962 scsipi_cmd.opcode = READ_CAPACITY;
963
964 /*
965 * If the command works, interpret the result as a 4 byte
966 * number of blocks
967 */
968 if (scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
969 sizeof(scsipi_cmd), (u_char *)&rdcap, sizeof(rdcap),
970 SCSIPIRETRIES, 20000, NULL,
971 flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK) != 0) {
972 scsipi_printaddr(periph);
973 printf("could not get size\n");
974 return (0);
975 }
976
977 return (_4btol(rdcap.addr) + 1);
978 }
979
980 /*
981 * scsipi_test_unit_ready:
982 *
983 * Issue a `test unit ready' request.
984 */
985 int
986 scsipi_test_unit_ready(periph, flags)
987 struct scsipi_periph *periph;
988 int flags;
989 {
990 struct scsipi_test_unit_ready scsipi_cmd;
991
992 /* some ATAPI drives don't support TEST_UNIT_READY. Sigh */
993 if (periph->periph_quirks & PQUIRK_NOTUR)
994 return (0);
995
996 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
997 scsipi_cmd.opcode = TEST_UNIT_READY;
998
999 return (scsipi_command(periph,
1000 (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd),
1001 0, 0, SCSIPIRETRIES, 10000, NULL, flags));
1002 }
1003
1004 /*
1005 * scsipi_inquire:
1006 *
1007 * Ask the device about itself.
1008 */
1009 int
1010 scsipi_inquire(periph, inqbuf, flags)
1011 struct scsipi_periph *periph;
1012 struct scsipi_inquiry_data *inqbuf;
1013 int flags;
1014 {
1015 struct scsipi_inquiry scsipi_cmd;
1016
1017 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1018 scsipi_cmd.opcode = INQUIRY;
1019 scsipi_cmd.length = sizeof(struct scsipi_inquiry_data);
1020
1021 return (scsipi_command(periph,
1022 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1023 (u_char *) inqbuf, sizeof(struct scsipi_inquiry_data),
1024 SCSIPIRETRIES, 10000, NULL, XS_CTL_DATA_IN | flags));
1025 }
1026
1027 /*
1028 * scsipi_prevent:
1029 *
1030 * Prevent or allow the user to remove the media
1031 */
1032 int
1033 scsipi_prevent(periph, type, flags)
1034 struct scsipi_periph *periph;
1035 int type, flags;
1036 {
1037 struct scsipi_prevent scsipi_cmd;
1038
1039 if (periph->periph_quirks & PQUIRK_NODOORLOCK)
1040 return (0);
1041
1042 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1043 scsipi_cmd.opcode = PREVENT_ALLOW;
1044 scsipi_cmd.how = type;
1045
1046 return (scsipi_command(periph,
1047 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1048 0, 0, SCSIPIRETRIES, 5000, NULL, flags));
1049 }
1050
1051 /*
1052 * scsipi_start:
1053 *
1054 * Send a START UNIT.
1055 */
1056 int
1057 scsipi_start(periph, type, flags)
1058 struct scsipi_periph *periph;
1059 int type, flags;
1060 {
1061 struct scsipi_start_stop scsipi_cmd;
1062
1063 if (periph->periph_quirks & PQUIRK_NOSTARTUNIT)
1064 return 0;
1065
1066 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1067 scsipi_cmd.opcode = START_STOP;
1068 scsipi_cmd.byte2 = 0x00;
1069 scsipi_cmd.how = type;
1070
1071 return (scsipi_command(periph,
1072 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1073 0, 0, SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000,
1074 NULL, flags));
1075 }
1076
1077 /*
1078 * scsipi_mode_sense, scsipi_mode_sense_big:
1079 * get a sense page from a device
1080 */
1081
1082 int
1083 scsipi_mode_sense(periph, byte2, page, data, len, flags, retries, timeout)
1084 struct scsipi_periph *periph;
1085 int byte2, page, len, flags, retries, timeout;
1086 struct scsipi_mode_header *data;
1087 {
1088 struct scsipi_mode_sense scsipi_cmd;
1089 int error;
1090
1091 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1092 scsipi_cmd.opcode = MODE_SENSE;
1093 scsipi_cmd.byte2 = byte2;
1094 scsipi_cmd.page = page;
1095 if (scsipi_periph_bustype(periph) == SCSIPI_BUSTYPE_ATAPI)
1096 _lto2b(len, scsipi_cmd.u_len.atapi.length);
1097 else
1098 scsipi_cmd.u_len.scsi.length = len & 0xff;
1099 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1100 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1101 flags | XS_CTL_DATA_IN);
1102 SC_DEBUG(periph, SCSIPI_DB2,
1103 ("scsipi_mode_sense: error=%d\n", error));
1104 return (error);
1105 }
1106
1107 int
1108 scsipi_mode_sense_big(periph, byte2, page, data, len, flags, retries, timeout)
1109 struct scsipi_periph *periph;
1110 int byte2, page, len, flags, retries, timeout;
1111 struct scsipi_mode_header_big *data;
1112 {
1113 struct scsipi_mode_sense_big scsipi_cmd;
1114 int error;
1115
1116 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1117 scsipi_cmd.opcode = MODE_SENSE_BIG;
1118 scsipi_cmd.byte2 = byte2;
1119 scsipi_cmd.page = page;
1120 _lto2b(len, scsipi_cmd.length);
1121 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1122 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1123 flags | XS_CTL_DATA_IN);
1124 SC_DEBUG(periph, SCSIPI_DB2,
1125 ("scsipi_mode_sense_big: error=%d\n", error));
1126 return (error);
1127 }
1128
1129 int
1130 scsipi_mode_select(periph, byte2, data, len, flags, retries, timeout)
1131 struct scsipi_periph *periph;
1132 int byte2, len, flags, retries, timeout;
1133 struct scsipi_mode_header *data;
1134 {
1135 struct scsipi_mode_select scsipi_cmd;
1136 int error;
1137
1138 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1139 scsipi_cmd.opcode = MODE_SELECT;
1140 scsipi_cmd.byte2 = byte2;
1141 if (scsipi_periph_bustype(periph) == SCSIPI_BUSTYPE_ATAPI)
1142 _lto2b(len, scsipi_cmd.u_len.atapi.length);
1143 else
1144 scsipi_cmd.u_len.scsi.length = len & 0xff;
1145 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1146 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1147 flags | XS_CTL_DATA_OUT);
1148 SC_DEBUG(periph, SCSIPI_DB2,
1149 ("scsipi_mode_select: error=%d\n", error));
1150 return (error);
1151 }
1152
1153 int
1154 scsipi_mode_select_big(periph, byte2, data, len, flags, retries, timeout)
1155 struct scsipi_periph *periph;
1156 int byte2, len, flags, retries, timeout;
1157 struct scsipi_mode_header_big *data;
1158 {
1159 struct scsipi_mode_select_big scsipi_cmd;
1160 int error;
1161
1162 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1163 scsipi_cmd.opcode = MODE_SELECT_BIG;
1164 scsipi_cmd.byte2 = byte2;
1165 _lto2b(len, scsipi_cmd.length);
1166 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1167 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1168 flags | XS_CTL_DATA_OUT);
1169 SC_DEBUG(periph, SCSIPI_DB2,
1170 ("scsipi_mode_select: error=%d\n", error));
1171 return (error);
1172 }
1173
1174 /*
1175 * scsipi_done:
1176 *
1177 * This routine is called by an adapter's interrupt handler when
1178 * an xfer is completed.
1179 */
1180 void
1181 scsipi_done(xs)
1182 struct scsipi_xfer *xs;
1183 {
1184 struct scsipi_periph *periph = xs->xs_periph;
1185 struct scsipi_channel *chan = periph->periph_channel;
1186 int s, freezecnt;
1187
1188 SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n"));
1189 #ifdef SCSIPI_DEBUG
1190 if (periph->periph_dbflags & SCSIPI_DB1)
1191 show_scsipi_cmd(xs);
1192 #endif
1193
1194 s = splbio();
1195 /*
1196 * The resource this command was using is now free.
1197 */
1198 scsipi_put_resource(chan);
1199 xs->xs_periph->periph_sent--;
1200
1201 /*
1202 * If the command was tagged, free the tag.
1203 */
1204 if (XS_CTL_TAGTYPE(xs) != 0)
1205 scsipi_put_tag(xs);
1206 else
1207 periph->periph_flags &= ~PERIPH_UNTAG;
1208
1209 /* Mark the command as `done'. */
1210 xs->xs_status |= XS_STS_DONE;
1211
1212 #ifdef DIAGNOSTIC
1213 if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
1214 (XS_CTL_ASYNC|XS_CTL_POLL))
1215 panic("scsipi_done: ASYNC and POLL");
1216 #endif
1217
1218 /*
1219 * If the xfer had an error of any sort, freeze the
1220 * periph's queue. Freeze it again if we were requested
1221 * to do so in the xfer.
1222 */
1223 freezecnt = 0;
1224 if (xs->error != XS_NOERROR)
1225 freezecnt++;
1226 if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
1227 freezecnt++;
1228 if (freezecnt != 0)
1229 scsipi_periph_freeze(periph, freezecnt);
1230
1231 /*
1232 * record the xfer with a pending sense, in case a SCSI reset is
1233 * received before the thread is waked up.
1234 */
1235 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1236 periph->periph_flags |= PERIPH_SENSE;
1237 periph->periph_xscheck = xs;
1238 }
1239
1240 /*
1241 * If this was an xfer that was not to complete asynchrnously,
1242 * let the requesting thread perform error checking/handling
1243 * in its context.
1244 */
1245 if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
1246 splx(s);
1247 /*
1248 * If it's a polling job, just return, to unwind the
1249 * call graph. We don't need to restart the queue,
1250 * because pollings jobs are treated specially, and
1251 * are really only used during crash dumps anyway
1252 * (XXX or during boot-time autconfiguration of
1253 * ATAPI devices).
1254 */
1255 if (xs->xs_control & XS_CTL_POLL)
1256 return;
1257 wakeup(xs);
1258 goto out;
1259 }
1260
1261 /*
1262 * Catch the extremely common case of I/O completing
1263 * without error; no use in taking a context switch
1264 * if we can handle it in interrupt context.
1265 */
1266 if (xs->error == XS_NOERROR) {
1267 splx(s);
1268 (void) scsipi_complete(xs);
1269 goto out;
1270 }
1271
1272 /*
1273 * There is an error on this xfer. Put it on the channel's
1274 * completion queue, and wake up the completion thread.
1275 */
1276 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
1277 splx(s);
1278 wakeup(&chan->chan_complete);
1279
1280 out:
1281 /*
1282 * If there are more xfers on the channel's queue, attempt to
1283 * run them.
1284 */
1285 scsipi_run_queue(chan);
1286 }
1287
1288 /*
1289 * scsipi_complete:
1290 *
1291 * Completion of a scsipi_xfer. This is the guts of scsipi_done().
1292 *
1293 * NOTE: This routine MUST be called with valid thread context
1294 * except for the case where the following two conditions are
1295 * true:
1296 *
1297 * xs->error == XS_NOERROR
1298 * XS_CTL_ASYNC is set in xs->xs_control
1299 *
1300 * The semantics of this routine can be tricky, so here is an
1301 * explanation:
1302 *
1303 * 0 Xfer completed successfully.
1304 *
1305 * ERESTART Xfer had an error, but was restarted.
1306 *
1307 * anything else Xfer had an error, return value is Unix
1308 * errno.
1309 *
1310 * If the return value is anything but ERESTART:
1311 *
1312 * - If XS_CTL_ASYNC is set, `xs' has been freed back to
1313 * the pool.
1314 * - If there is a buf associated with the xfer,
1315 * it has been biodone()'d.
1316 */
1317 int
1318 scsipi_complete(xs)
1319 struct scsipi_xfer *xs;
1320 {
1321 struct scsipi_periph *periph = xs->xs_periph;
1322 struct scsipi_channel *chan = periph->periph_channel;
1323 struct buf *bp;
1324 int error, s;
1325
1326 #ifdef DIAGNOSTIC
1327 if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
1328 panic("scsipi_complete: XS_CTL_ASYNC but no buf");
1329 #endif
1330 /*
1331 * If command terminated with a CHECK CONDITION, we need to issue a
1332 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed
1333 * we'll have the real status.
1334 * Must be processed at splbio() to avoid missing a SCSI bus reset
1335 * for this command.
1336 */
1337 s = splbio();
1338 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1339 /* request sense for a request sense ? */
1340 if (xs->xs_control & XS_CTL_REQSENSE) {
1341 scsipi_printaddr(periph);
1342 printf("request sense for a request sense ?\n");
1343 /* XXX maybe we should reset the device ? */
1344 /* we've been frozen because xs->error != XS_NOERROR */
1345 scsipi_periph_thaw(periph, 1);
1346 splx(s);
1347 if (xs->resid < xs->datalen) {
1348 printf("we read %d bytes of sense anyway:\n",
1349 xs->datalen - xs->resid);
1350 #ifdef SCSIVERBOSE
1351 scsipi_print_sense_data((void *)xs->data, 0);
1352 #endif
1353 }
1354 return EINVAL;
1355 }
1356 scsipi_request_sense(xs);
1357 }
1358 splx(s);
1359
1360 /*
1361 * If it's a user level request, bypass all usual completion
1362 * processing, let the user work it out..
1363 */
1364 if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
1365 SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n"));
1366 if (xs->error != XS_NOERROR)
1367 scsipi_periph_thaw(periph, 1);
1368 scsipi_user_done(xs);
1369 SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n "));
1370 return 0;
1371 }
1372
1373 switch (xs->error) {
1374 case XS_NOERROR:
1375 error = 0;
1376 break;
1377
1378 case XS_SENSE:
1379 case XS_SHORTSENSE:
1380 error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
1381 break;
1382
1383 case XS_RESOURCE_SHORTAGE:
1384 /*
1385 * XXX Should freeze channel's queue.
1386 */
1387 scsipi_printaddr(periph);
1388 printf("adapter resource shortage\n");
1389 /* FALLTHROUGH */
1390
1391 case XS_BUSY:
1392 if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
1393 struct scsipi_max_openings mo;
1394
1395 /*
1396 * We set the openings to active - 1, assuming that
1397 * the command that got us here is the first one that
1398 * can't fit into the device's queue. If that's not
1399 * the case, I guess we'll find out soon enough.
1400 */
1401 mo.mo_target = periph->periph_target;
1402 mo.mo_lun = periph->periph_lun;
1403 if (periph->periph_active < periph->periph_openings)
1404 mo.mo_openings = periph->periph_active - 1;
1405 else
1406 mo.mo_openings = periph->periph_openings - 1;
1407 #ifdef DIAGNOSTIC
1408 if (mo.mo_openings < 0) {
1409 scsipi_printaddr(periph);
1410 printf("QUEUE FULL resulted in < 0 openings\n");
1411 panic("scsipi_done");
1412 }
1413 #endif
1414 if (mo.mo_openings == 0) {
1415 scsipi_printaddr(periph);
1416 printf("QUEUE FULL resulted in 0 openings\n");
1417 mo.mo_openings = 1;
1418 }
1419 scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
1420 error = ERESTART;
1421 } else if (xs->xs_retries != 0) {
1422 xs->xs_retries--;
1423 /*
1424 * Wait one second, and try again.
1425 */
1426 if ((xs->xs_control & XS_CTL_POLL) ||
1427 (chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
1428 delay(1000000);
1429 } else {
1430 scsipi_periph_freeze(periph, 1);
1431 callout_reset(&periph->periph_callout,
1432 hz, scsipi_periph_timed_thaw, periph);
1433 }
1434 error = ERESTART;
1435 } else
1436 error = EBUSY;
1437 break;
1438
1439 case XS_REQUEUE:
1440 error = ERESTART;
1441 break;
1442
1443 case XS_TIMEOUT:
1444 if (xs->xs_retries != 0) {
1445 xs->xs_retries--;
1446 error = ERESTART;
1447 } else
1448 error = EIO;
1449 break;
1450
1451 case XS_SELTIMEOUT:
1452 /* XXX Disable device? */
1453 error = EIO;
1454 break;
1455
1456 case XS_RESET:
1457 if (xs->xs_control & XS_CTL_REQSENSE) {
1458 /*
1459 * request sense interrupted by reset: signal it
1460 * with EINTR return code.
1461 */
1462 error = EINTR;
1463 } else {
1464 if (xs->xs_retries != 0) {
1465 xs->xs_retries--;
1466 error = ERESTART;
1467 } else
1468 error = EIO;
1469 }
1470 break;
1471
1472 default:
1473 scsipi_printaddr(periph);
1474 printf("invalid return code from adapter: %d\n", xs->error);
1475 error = EIO;
1476 break;
1477 }
1478
1479 s = splbio();
1480 if (error == ERESTART) {
1481 /*
1482 * If we get here, the periph has been thawed and frozen
1483 * again if we had to issue recovery commands. Alternatively,
1484 * it may have been frozen again and in a timed thaw. In
1485 * any case, we thaw the periph once we re-enqueue the
1486 * command. Once the periph is fully thawed, it will begin
1487 * operation again.
1488 */
1489 xs->error = XS_NOERROR;
1490 xs->status = SCSI_OK;
1491 xs->xs_status &= ~XS_STS_DONE;
1492 xs->xs_requeuecnt++;
1493 error = scsipi_enqueue(xs);
1494 if (error == 0) {
1495 scsipi_periph_thaw(periph, 1);
1496 splx(s);
1497 return (ERESTART);
1498 }
1499 }
1500
1501 /*
1502 * scsipi_done() freezes the queue if not XS_NOERROR.
1503 * Thaw it here.
1504 */
1505 if (xs->error != XS_NOERROR)
1506 scsipi_periph_thaw(periph, 1);
1507
1508
1509 if (periph->periph_switch->psw_done)
1510 periph->periph_switch->psw_done(xs);
1511 if ((bp = xs->bp) != NULL) {
1512 if (error) {
1513 bp->b_error = error;
1514 bp->b_flags |= B_ERROR;
1515 bp->b_resid = bp->b_bcount;
1516 } else {
1517 bp->b_error = 0;
1518 bp->b_resid = xs->resid;
1519 }
1520 biodone(bp);
1521 }
1522
1523 if (xs->xs_control & XS_CTL_ASYNC)
1524 scsipi_put_xs(xs);
1525 splx(s);
1526
1527 return (error);
1528 }
1529
1530 /*
1531 * Issue a request sense for the given scsipi_xfer. Called when the xfer
1532 * returns with a CHECK_CONDITION status. Must be called in valid thread
1533 * context and at splbio().
1534 */
1535
1536 void
1537 scsipi_request_sense(xs)
1538 struct scsipi_xfer *xs;
1539 {
1540 struct scsipi_periph *periph = xs->xs_periph;
1541 int flags, error;
1542 struct scsipi_sense cmd;
1543
1544 periph->periph_flags |= PERIPH_SENSE;
1545
1546 /* if command was polling, request sense will too */
1547 flags = xs->xs_control & XS_CTL_POLL;
1548 /* Polling commands can't sleep */
1549 if (flags)
1550 flags |= XS_CTL_NOSLEEP;
1551
1552 flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN |
1553 XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH;
1554
1555 memset(&cmd, 0, sizeof(cmd));
1556 cmd.opcode = REQUEST_SENSE;
1557 cmd.length = sizeof(struct scsipi_sense_data);
1558
1559 error = scsipi_command(periph,
1560 (struct scsipi_generic *) &cmd, sizeof(cmd),
1561 (u_char*)&xs->sense.scsi_sense, sizeof(struct scsipi_sense_data),
1562 0, 1000, NULL, flags);
1563 periph->periph_flags &= ~PERIPH_SENSE;
1564 periph->periph_xscheck = NULL;
1565 switch(error) {
1566 case 0:
1567 /* we have a valid sense */
1568 xs->error = XS_SENSE;
1569 return;
1570 case EINTR:
1571 /* REQUEST_SENSE interrupted by bus reset. */
1572 xs->error = XS_RESET;
1573 return;
1574 case EIO:
1575 /* request sense coudn't be performed */
1576 /*
1577 * XXX this isn't quite rigth but we don't have anything
1578 * better for now
1579 */
1580 xs->error = XS_DRIVER_STUFFUP;
1581 return;
1582 default:
1583 /* Notify that request sense failed. */
1584 xs->error = XS_DRIVER_STUFFUP;
1585 scsipi_printaddr(periph);
1586 printf("request sense failed with error %d\n", error);
1587 return;
1588 }
1589 }
1590
1591 /*
1592 * scsipi_enqueue:
1593 *
1594 * Enqueue an xfer on a channel.
1595 */
1596 int
1597 scsipi_enqueue(xs)
1598 struct scsipi_xfer *xs;
1599 {
1600 struct scsipi_channel *chan = xs->xs_periph->periph_channel;
1601 struct scsipi_xfer *qxs;
1602 int s;
1603
1604 s = splbio();
1605
1606 /*
1607 * If the xfer is to be polled, and there are already jobs on
1608 * the queue, we can't proceed.
1609 */
1610 if ((xs->xs_control & XS_CTL_POLL) != 0 &&
1611 TAILQ_FIRST(&chan->chan_queue) != NULL) {
1612 splx(s);
1613 xs->error = XS_DRIVER_STUFFUP;
1614 return (EAGAIN);
1615 }
1616
1617 /*
1618 * If we have an URGENT xfer, it's an error recovery command
1619 * and it should just go on the head of the channel's queue.
1620 */
1621 if (xs->xs_control & XS_CTL_URGENT) {
1622 TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
1623 goto out;
1624 }
1625
1626 /*
1627 * If this xfer has already been on the queue before, we
1628 * need to reinsert it in the correct order. That order is:
1629 *
1630 * Immediately before the first xfer for this periph
1631 * with a requeuecnt less than xs->xs_requeuecnt.
1632 *
1633 * Failing that, at the end of the queue. (We'll end up
1634 * there naturally.)
1635 */
1636 if (xs->xs_requeuecnt != 0) {
1637 for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
1638 qxs = TAILQ_NEXT(qxs, channel_q)) {
1639 if (qxs->xs_periph == xs->xs_periph &&
1640 qxs->xs_requeuecnt < xs->xs_requeuecnt)
1641 break;
1642 }
1643 if (qxs != NULL) {
1644 TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
1645 channel_q);
1646 goto out;
1647 }
1648 }
1649 TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
1650 out:
1651 if (xs->xs_control & XS_CTL_THAW_PERIPH)
1652 scsipi_periph_thaw(xs->xs_periph, 1);
1653 splx(s);
1654 return (0);
1655 }
1656
1657 /*
1658 * scsipi_run_queue:
1659 *
1660 * Start as many xfers as possible running on the channel.
1661 */
1662 void
1663 scsipi_run_queue(chan)
1664 struct scsipi_channel *chan;
1665 {
1666 struct scsipi_xfer *xs;
1667 struct scsipi_periph *periph;
1668 int s;
1669
1670 for (;;) {
1671 s = splbio();
1672
1673 /*
1674 * If the channel is frozen, we can't do any work right
1675 * now.
1676 */
1677 if (chan->chan_qfreeze != 0) {
1678 splx(s);
1679 return;
1680 }
1681
1682 /*
1683 * Look for work to do, and make sure we can do it.
1684 */
1685 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
1686 xs = TAILQ_NEXT(xs, channel_q)) {
1687 periph = xs->xs_periph;
1688
1689 if ((periph->periph_sent >= periph->periph_openings) ||
1690 periph->periph_qfreeze != 0 ||
1691 (periph->periph_flags & PERIPH_UNTAG) != 0)
1692 continue;
1693
1694 if ((periph->periph_flags &
1695 (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 &&
1696 (xs->xs_control & XS_CTL_URGENT) == 0)
1697 continue;
1698
1699 /*
1700 * We can issue this xfer!
1701 */
1702 goto got_one;
1703 }
1704
1705 /*
1706 * Can't find any work to do right now.
1707 */
1708 splx(s);
1709 return;
1710
1711 got_one:
1712 /*
1713 * Have an xfer to run. Allocate a resource from
1714 * the adapter to run it. If we can't allocate that
1715 * resource, we don't dequeue the xfer.
1716 */
1717 if (scsipi_get_resource(chan) == 0) {
1718 /*
1719 * Adapter is out of resources. If the adapter
1720 * supports it, attempt to grow them.
1721 */
1722 if (scsipi_grow_resources(chan) == 0) {
1723 /*
1724 * Wasn't able to grow resources,
1725 * nothing more we can do.
1726 */
1727 if (xs->xs_control & XS_CTL_POLL) {
1728 scsipi_printaddr(xs->xs_periph);
1729 printf("polling command but no "
1730 "adapter resources");
1731 /* We'll panic shortly... */
1732 }
1733 splx(s);
1734
1735 /*
1736 * XXX: We should be able to note that
1737 * XXX: that resources are needed here!
1738 */
1739 return;
1740 }
1741 /*
1742 * scsipi_grow_resources() allocated the resource
1743 * for us.
1744 */
1745 }
1746
1747 /*
1748 * We have a resource to run this xfer, do it!
1749 */
1750 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
1751
1752 /*
1753 * If the command is to be tagged, allocate a tag ID
1754 * for it.
1755 */
1756 if (XS_CTL_TAGTYPE(xs) != 0)
1757 scsipi_get_tag(xs);
1758 else
1759 periph->periph_flags |= PERIPH_UNTAG;
1760 periph->periph_sent++;
1761 splx(s);
1762
1763 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
1764 }
1765 #ifdef DIAGNOSTIC
1766 panic("scsipi_run_queue: impossible");
1767 #endif
1768 }
1769
1770 /*
1771 * scsipi_execute_xs:
1772 *
1773 * Begin execution of an xfer, waiting for it to complete, if necessary.
1774 */
1775 int
1776 scsipi_execute_xs(xs)
1777 struct scsipi_xfer *xs;
1778 {
1779 struct scsipi_periph *periph = xs->xs_periph;
1780 struct scsipi_channel *chan = periph->periph_channel;
1781 int async, poll, retries, error, s;
1782
1783 xs->xs_status &= ~XS_STS_DONE;
1784 xs->error = XS_NOERROR;
1785 xs->resid = xs->datalen;
1786 xs->status = SCSI_OK;
1787
1788 #ifdef SCSIPI_DEBUG
1789 if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) {
1790 printf("scsipi_execute_xs: ");
1791 show_scsipi_xs(xs);
1792 printf("\n");
1793 }
1794 #endif
1795
1796 /*
1797 * Deal with command tagging:
1798 *
1799 * - If the device's current operating mode doesn't
1800 * include tagged queueing, clear the tag mask.
1801 *
1802 * - If the device's current operating mode *does*
1803 * include tagged queueing, set the tag_type in
1804 * the xfer to the appropriate byte for the tag
1805 * message.
1806 */
1807 if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 ||
1808 (xs->xs_control & XS_CTL_REQSENSE)) {
1809 xs->xs_control &= ~XS_CTL_TAGMASK;
1810 xs->xs_tag_type = 0;
1811 } else {
1812 /*
1813 * If the request doesn't specify a tag, give Head
1814 * tags to URGENT operations and Ordered tags to
1815 * everything else.
1816 */
1817 if (XS_CTL_TAGTYPE(xs) == 0) {
1818 if (xs->xs_control & XS_CTL_URGENT)
1819 xs->xs_control |= XS_CTL_HEAD_TAG;
1820 else
1821 xs->xs_control |= XS_CTL_ORDERED_TAG;
1822 }
1823
1824 switch (XS_CTL_TAGTYPE(xs)) {
1825 case XS_CTL_ORDERED_TAG:
1826 xs->xs_tag_type = MSG_ORDERED_Q_TAG;
1827 break;
1828
1829 case XS_CTL_SIMPLE_TAG:
1830 xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
1831 break;
1832
1833 case XS_CTL_HEAD_TAG:
1834 xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
1835 break;
1836
1837 default:
1838 scsipi_printaddr(periph);
1839 printf("invalid tag mask 0x%08x\n",
1840 XS_CTL_TAGTYPE(xs));
1841 panic("scsipi_execute_xs");
1842 }
1843 }
1844
1845 /* If the adaptor wants us to poll, poll. */
1846 if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY)
1847 xs->xs_control |= XS_CTL_POLL;
1848
1849 /*
1850 * If we don't yet have a completion thread, or we are to poll for
1851 * completion, clear the ASYNC flag.
1852 */
1853 if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
1854 xs->xs_control &= ~XS_CTL_ASYNC;
1855
1856 async = (xs->xs_control & XS_CTL_ASYNC);
1857 poll = (xs->xs_control & XS_CTL_POLL);
1858 retries = xs->xs_retries; /* for polling commands */
1859
1860 #ifdef DIAGNOSTIC
1861 if (async != 0 && xs->bp == NULL)
1862 panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
1863 #endif
1864
1865 /*
1866 * Enqueue the transfer. If we're not polling for completion, this
1867 * should ALWAYS return `no error'.
1868 */
1869 try_again:
1870 error = scsipi_enqueue(xs);
1871 if (error) {
1872 if (poll == 0) {
1873 scsipi_printaddr(periph);
1874 printf("not polling, but enqueue failed with %d\n",
1875 error);
1876 panic("scsipi_execute_xs");
1877 }
1878
1879 scsipi_printaddr(periph);
1880 printf("failed to enqueue polling command");
1881 if (retries != 0) {
1882 printf(", retrying...\n");
1883 delay(1000000);
1884 retries--;
1885 goto try_again;
1886 }
1887 printf("\n");
1888 goto free_xs;
1889 }
1890
1891 restarted:
1892 scsipi_run_queue(chan);
1893
1894 /*
1895 * The xfer is enqueued, and possibly running. If it's to be
1896 * completed asynchronously, just return now.
1897 */
1898 if (async)
1899 return (EJUSTRETURN);
1900
1901 /*
1902 * Not an asynchronous command; wait for it to complete.
1903 */
1904 s = splbio();
1905 while ((xs->xs_status & XS_STS_DONE) == 0) {
1906 if (poll) {
1907 scsipi_printaddr(periph);
1908 printf("polling command not done\n");
1909 panic("scsipi_execute_xs");
1910 }
1911 (void) tsleep(xs, PRIBIO, "xscmd", 0);
1912 }
1913 splx(s);
1914
1915 /*
1916 * Command is complete. scsipi_done() has awakened us to perform
1917 * the error handling.
1918 */
1919 error = scsipi_complete(xs);
1920 if (error == ERESTART)
1921 goto restarted;
1922
1923 /*
1924 * Command completed successfully or fatal error occurred. Fall
1925 * into....
1926 */
1927 free_xs:
1928 s = splbio();
1929 scsipi_put_xs(xs);
1930 splx(s);
1931
1932 /*
1933 * Kick the queue, keep it running in case it stopped for some
1934 * reason.
1935 */
1936 scsipi_run_queue(chan);
1937
1938 return (error);
1939 }
1940
1941 /*
1942 * scsipi_completion_thread:
1943 *
1944 * This is the completion thread. We wait for errors on
1945 * asynchronous xfers, and perform the error handling
1946 * function, restarting the command, if necessary.
1947 */
1948 void
1949 scsipi_completion_thread(arg)
1950 void *arg;
1951 {
1952 struct scsipi_channel *chan = arg;
1953 struct scsipi_xfer *xs;
1954 int s;
1955
1956 s = splbio();
1957 chan->chan_flags |= SCSIPI_CHAN_TACTIVE;
1958 splx(s);
1959 for (;;) {
1960 s = splbio();
1961 xs = TAILQ_FIRST(&chan->chan_complete);
1962 if (xs == NULL &&
1963 (chan->chan_flags &
1964 (SCSIPI_CHAN_SHUTDOWN | SCSIPI_CHAN_CALLBACK |
1965 SCSIPI_CHAN_KICK)) == 0) {
1966 (void) tsleep(&chan->chan_complete, PRIBIO,
1967 "sccomp", 0);
1968 splx(s);
1969 continue;
1970 }
1971 if (chan->chan_flags & SCSIPI_CHAN_CALLBACK) {
1972 /* call chan_callback from thread context */
1973 chan->chan_flags &= ~SCSIPI_CHAN_CALLBACK;
1974 chan->chan_callback(chan, chan->chan_callback_arg);
1975 splx(s);
1976 continue;
1977 }
1978 if (chan->chan_flags & SCSIPI_CHAN_KICK) {
1979 /* explicitly run the queues for this channel */
1980 chan->chan_flags &= ~SCSIPI_CHAN_KICK;
1981 scsipi_run_queue(chan);
1982 splx(s);
1983 continue;
1984 }
1985 if (chan->chan_flags & SCSIPI_CHAN_SHUTDOWN) {
1986 splx(s);
1987 break;
1988 }
1989 if (xs) {
1990 TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
1991 splx(s);
1992
1993 /*
1994 * Have an xfer with an error; process it.
1995 */
1996 (void) scsipi_complete(xs);
1997
1998 /*
1999 * Kick the queue; keep it running if it was stopped
2000 * for some reason.
2001 */
2002 scsipi_run_queue(chan);
2003 } else {
2004 splx(s);
2005 }
2006 }
2007
2008 chan->chan_thread = NULL;
2009
2010 /* In case parent is waiting for us to exit. */
2011 wakeup(&chan->chan_thread);
2012
2013 kthread_exit(0);
2014 }
2015
2016 /*
2017 * scsipi_create_completion_thread:
2018 *
2019 * Callback to actually create the completion thread.
2020 */
2021 void
2022 scsipi_create_completion_thread(arg)
2023 void *arg;
2024 {
2025 struct scsipi_channel *chan = arg;
2026 struct scsipi_adapter *adapt = chan->chan_adapter;
2027
2028 if (kthread_create1(scsipi_completion_thread, chan,
2029 &chan->chan_thread, "%s:%d", adapt->adapt_dev->dv_xname,
2030 chan->chan_channel)) {
2031 printf("%s: unable to create completion thread for "
2032 "channel %d\n", adapt->adapt_dev->dv_xname,
2033 chan->chan_channel);
2034 panic("scsipi_create_completion_thread");
2035 }
2036 }
2037
2038 /*
2039 * scsipi_thread_call_callback:
2040 *
2041 * request to call a callback from the completion thread
2042 */
2043 int
2044 scsipi_thread_call_callback(chan, callback, arg)
2045 struct scsipi_channel *chan;
2046 void (*callback) __P((struct scsipi_channel *, void *));
2047 void *arg;
2048 {
2049 int s;
2050
2051 s = splbio();
2052 if (chan->chan_flags & SCSIPI_CHAN_CALLBACK) {
2053 splx(s);
2054 return EBUSY;
2055 }
2056 scsipi_channel_freeze(chan, 1);
2057 chan->chan_callback = callback;
2058 chan->chan_callback_arg = arg;
2059 chan->chan_flags |= SCSIPI_CHAN_CALLBACK;
2060 wakeup(&chan->chan_complete);
2061 splx(s);
2062 return(0);
2063 }
2064
2065 /*
2066 * scsipi_async_event:
2067 *
2068 * Handle an asynchronous event from an adapter.
2069 */
2070 void
2071 scsipi_async_event(chan, event, arg)
2072 struct scsipi_channel *chan;
2073 scsipi_async_event_t event;
2074 void *arg;
2075 {
2076 int s;
2077
2078 s = splbio();
2079 switch (event) {
2080 case ASYNC_EVENT_MAX_OPENINGS:
2081 scsipi_async_event_max_openings(chan,
2082 (struct scsipi_max_openings *)arg);
2083 break;
2084
2085 case ASYNC_EVENT_XFER_MODE:
2086 scsipi_async_event_xfer_mode(chan,
2087 (struct scsipi_xfer_mode *)arg);
2088 break;
2089 case ASYNC_EVENT_RESET:
2090 scsipi_async_event_channel_reset(chan);
2091 break;
2092 }
2093 splx(s);
2094 }
2095
2096 /*
2097 * scsipi_print_xfer_mode:
2098 *
2099 * Print a periph's capabilities.
2100 */
2101 void
2102 scsipi_print_xfer_mode(periph)
2103 struct scsipi_periph *periph;
2104 {
2105 int period, freq, speed, mbs;
2106
2107 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0)
2108 return;
2109
2110 printf("%s: ", periph->periph_dev->dv_xname);
2111 if (periph->periph_mode & PERIPH_CAP_SYNC) {
2112 period = scsipi_sync_factor_to_period(periph->periph_period);
2113 printf("sync (%d.%dns offset %d)",
2114 period / 10, period % 10, periph->periph_offset);
2115 } else
2116 printf("async");
2117
2118 if (periph->periph_mode & PERIPH_CAP_WIDE32)
2119 printf(", 32-bit");
2120 else if (periph->periph_mode & PERIPH_CAP_WIDE16)
2121 printf(", 16-bit");
2122 else
2123 printf(", 8-bit");
2124
2125 if (periph->periph_mode & PERIPH_CAP_SYNC) {
2126 freq = scsipi_sync_factor_to_freq(periph->periph_period);
2127 speed = freq;
2128 if (periph->periph_mode & PERIPH_CAP_WIDE32)
2129 speed *= 4;
2130 else if (periph->periph_mode & PERIPH_CAP_WIDE16)
2131 speed *= 2;
2132 mbs = speed / 1000;
2133 if (mbs > 0)
2134 printf(" (%d.%03dMB/s)", mbs, speed % 1000);
2135 else
2136 printf(" (%dKB/s)", speed % 1000);
2137 }
2138
2139 printf(" transfers");
2140
2141 if (periph->periph_mode & PERIPH_CAP_TQING)
2142 printf(", tagged queueing");
2143
2144 printf("\n");
2145 }
2146
2147 /*
2148 * scsipi_async_event_max_openings:
2149 *
2150 * Update the maximum number of outstanding commands a
2151 * device may have.
2152 */
2153 void
2154 scsipi_async_event_max_openings(chan, mo)
2155 struct scsipi_channel *chan;
2156 struct scsipi_max_openings *mo;
2157 {
2158 struct scsipi_periph *periph;
2159 int minlun, maxlun;
2160
2161 if (mo->mo_lun == -1) {
2162 /*
2163 * Wildcarded; apply it to all LUNs.
2164 */
2165 minlun = 0;
2166 maxlun = chan->chan_nluns - 1;
2167 } else
2168 minlun = maxlun = mo->mo_lun;
2169
2170 for (; minlun <= maxlun; minlun++) {
2171 periph = scsipi_lookup_periph(chan, mo->mo_target, minlun);
2172 if (periph == NULL)
2173 continue;
2174
2175 if (mo->mo_openings < periph->periph_openings)
2176 periph->periph_openings = mo->mo_openings;
2177 else if (mo->mo_openings > periph->periph_openings &&
2178 (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
2179 periph->periph_openings = mo->mo_openings;
2180 }
2181 }
2182
2183 /*
2184 * scsipi_async_event_xfer_mode:
2185 *
2186 * Update the xfer mode for all periphs sharing the
2187 * specified I_T Nexus.
2188 */
2189 void
2190 scsipi_async_event_xfer_mode(chan, xm)
2191 struct scsipi_channel *chan;
2192 struct scsipi_xfer_mode *xm;
2193 {
2194 struct scsipi_periph *periph;
2195 int lun, announce, mode, period, offset;
2196
2197 for (lun = 0; lun < chan->chan_nluns; lun++) {
2198 periph = scsipi_lookup_periph(chan, xm->xm_target, lun);
2199 if (periph == NULL)
2200 continue;
2201 announce = 0;
2202
2203 /*
2204 * Clamp the xfer mode down to this periph's capabilities.
2205 */
2206 mode = xm->xm_mode & periph->periph_cap;
2207 if (mode & PERIPH_CAP_SYNC) {
2208 period = xm->xm_period;
2209 offset = xm->xm_offset;
2210 } else {
2211 period = 0;
2212 offset = 0;
2213 }
2214
2215 /*
2216 * If we do not have a valid xfer mode yet, or the parameters
2217 * are different, announce them.
2218 */
2219 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0 ||
2220 periph->periph_mode != mode ||
2221 periph->periph_period != period ||
2222 periph->periph_offset != offset)
2223 announce = 1;
2224
2225 periph->periph_mode = mode;
2226 periph->periph_period = period;
2227 periph->periph_offset = offset;
2228 periph->periph_flags |= PERIPH_MODE_VALID;
2229
2230 if (announce)
2231 scsipi_print_xfer_mode(periph);
2232 }
2233 }
2234
2235 /*
2236 * scsipi_set_xfer_mode:
2237 *
2238 * Set the xfer mode for the specified I_T Nexus.
2239 */
2240 void
2241 scsipi_set_xfer_mode(chan, target, immed)
2242 struct scsipi_channel *chan;
2243 int target, immed;
2244 {
2245 struct scsipi_xfer_mode xm;
2246 struct scsipi_periph *itperiph;
2247 int lun, s;
2248
2249 /*
2250 * Go to the minimal xfer mode.
2251 */
2252 xm.xm_target = target;
2253 xm.xm_mode = 0;
2254 xm.xm_period = 0; /* ignored */
2255 xm.xm_offset = 0; /* ignored */
2256
2257 /*
2258 * Find the first LUN we know about on this I_T Nexus.
2259 */
2260 for (lun = 0; lun < chan->chan_nluns; lun++) {
2261 itperiph = scsipi_lookup_periph(chan, target, lun);
2262 if (itperiph != NULL)
2263 break;
2264 }
2265 if (itperiph != NULL) {
2266 xm.xm_mode = itperiph->periph_cap;
2267 /*
2268 * Now issue the request to the adapter.
2269 */
2270 s = splbio();
2271 scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm);
2272 splx(s);
2273 /*
2274 * If we want this to happen immediately, issue a dummy
2275 * command, since most adapters can't really negotiate unless
2276 * they're executing a job.
2277 */
2278 if (immed != 0) {
2279 (void) scsipi_test_unit_ready(itperiph,
2280 XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
2281 XS_CTL_IGNORE_NOT_READY |
2282 XS_CTL_IGNORE_MEDIA_CHANGE);
2283 }
2284 }
2285 }
2286
2287 /*
2288 * scsipi_channel_reset:
2289 *
2290 * handle scsi bus reset
2291 * called at splbio
2292 */
2293 void
2294 scsipi_async_event_channel_reset(chan)
2295 struct scsipi_channel *chan;
2296 {
2297 struct scsipi_xfer *xs, *xs_next;
2298 struct scsipi_periph *periph;
2299 int target, lun;
2300
2301 /*
2302 * Channel has been reset. Also mark as reset pending REQUEST_SENSE
2303 * commands; as the sense is not available any more.
2304 * can't call scsipi_done() from here, as the command has not been
2305 * sent to the adapter yet (this would corrupt accounting).
2306 */
2307
2308 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) {
2309 xs_next = TAILQ_NEXT(xs, channel_q);
2310 if (xs->xs_control & XS_CTL_REQSENSE) {
2311 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
2312 xs->error = XS_RESET;
2313 if ((xs->xs_control & XS_CTL_ASYNC) != 0)
2314 TAILQ_INSERT_TAIL(&chan->chan_complete, xs,
2315 channel_q);
2316 }
2317 }
2318 wakeup(&chan->chan_complete);
2319 /* Catch xs with pending sense which may not have a REQSENSE xs yet */
2320 for (target = 0; target < chan->chan_ntargets; target++) {
2321 if (target == chan->chan_id)
2322 continue;
2323 for (lun = 0; lun < chan->chan_nluns; lun++) {
2324 periph = chan->chan_periphs[target][lun];
2325 if (periph) {
2326 xs = periph->periph_xscheck;
2327 if (xs)
2328 xs->error = XS_RESET;
2329 }
2330 }
2331 }
2332 }
2333
2334 /*
2335 * scsipi_target_detach:
2336 *
2337 * detach all periph associated with a I_T
2338 * must be called from valid thread context
2339 */
2340 int
2341 scsipi_target_detach(chan, target, lun, flags)
2342 struct scsipi_channel *chan;
2343 int target, lun;
2344 int flags;
2345 {
2346 struct scsipi_periph *periph;
2347 int ctarget, mintarget, maxtarget;
2348 int clun, minlun, maxlun;
2349 int error;
2350
2351 if (target == -1) {
2352 mintarget = 0;
2353 maxtarget = chan->chan_ntargets;
2354 } else {
2355 if (target == chan->chan_id)
2356 return EINVAL;
2357 if (target < 0 || target >= chan->chan_ntargets)
2358 return EINVAL;
2359 mintarget = target;
2360 maxtarget = target + 1;
2361 }
2362
2363 if (lun == -1) {
2364 minlun = 0;
2365 maxlun = chan->chan_nluns;
2366 } else {
2367 if (lun < 0 || lun >= chan->chan_nluns)
2368 return EINVAL;
2369 minlun = lun;
2370 maxlun = lun + 1;
2371 }
2372
2373 for (ctarget = mintarget; ctarget < maxtarget; ctarget++) {
2374 if (ctarget == chan->chan_id)
2375 continue;
2376
2377 for (clun = minlun; clun < maxlun; clun++) {
2378 periph = scsipi_lookup_periph(chan, ctarget, clun);
2379 if (periph == NULL)
2380 continue;
2381 error = config_detach(periph->periph_dev, flags);
2382 if (error)
2383 return (error);
2384 scsipi_remove_periph(chan, periph);
2385 free(periph, M_DEVBUF);
2386 }
2387 }
2388 return(0);
2389 }
2390
2391 /*
2392 * scsipi_adapter_addref:
2393 *
2394 * Add a reference to the adapter pointed to by the provided
2395 * link, enabling the adapter if necessary.
2396 */
2397 int
2398 scsipi_adapter_addref(adapt)
2399 struct scsipi_adapter *adapt;
2400 {
2401 int s, error = 0;
2402
2403 s = splbio();
2404 if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) {
2405 error = (*adapt->adapt_enable)(adapt->adapt_dev, 1);
2406 if (error)
2407 adapt->adapt_refcnt--;
2408 }
2409 splx(s);
2410 return (error);
2411 }
2412
2413 /*
2414 * scsipi_adapter_delref:
2415 *
2416 * Delete a reference to the adapter pointed to by the provided
2417 * link, disabling the adapter if possible.
2418 */
2419 void
2420 scsipi_adapter_delref(adapt)
2421 struct scsipi_adapter *adapt;
2422 {
2423 int s;
2424
2425 s = splbio();
2426 if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL)
2427 (void) (*adapt->adapt_enable)(adapt->adapt_dev, 0);
2428 splx(s);
2429 }
2430
2431 struct scsipi_syncparam {
2432 int ss_factor;
2433 int ss_period; /* ns * 10 */
2434 } scsipi_syncparams[] = {
2435 { 0x09, 125 },
2436 { 0x0a, 250 },
2437 { 0x0b, 303 },
2438 { 0x0c, 500 },
2439 };
2440 const int scsipi_nsyncparams =
2441 sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
2442
2443 int
2444 scsipi_sync_period_to_factor(period)
2445 int period; /* ns * 10 */
2446 {
2447 int i;
2448
2449 for (i = 0; i < scsipi_nsyncparams; i++) {
2450 if (period <= scsipi_syncparams[i].ss_period)
2451 return (scsipi_syncparams[i].ss_factor);
2452 }
2453
2454 return ((period / 10) / 4);
2455 }
2456
2457 int
2458 scsipi_sync_factor_to_period(factor)
2459 int factor;
2460 {
2461 int i;
2462
2463 for (i = 0; i < scsipi_nsyncparams; i++) {
2464 if (factor == scsipi_syncparams[i].ss_factor)
2465 return (scsipi_syncparams[i].ss_period);
2466 }
2467
2468 return ((factor * 4) * 10);
2469 }
2470
2471 int
2472 scsipi_sync_factor_to_freq(factor)
2473 int factor;
2474 {
2475 int i;
2476
2477 for (i = 0; i < scsipi_nsyncparams; i++) {
2478 if (factor == scsipi_syncparams[i].ss_factor)
2479 return (10000000 / scsipi_syncparams[i].ss_period);
2480 }
2481
2482 return (10000000 / ((factor * 4) * 10));
2483 }
2484
2485 #ifdef SCSIPI_DEBUG
2486 /*
2487 * Given a scsipi_xfer, dump the request, in all it's glory
2488 */
2489 void
2490 show_scsipi_xs(xs)
2491 struct scsipi_xfer *xs;
2492 {
2493
2494 printf("xs(%p): ", xs);
2495 printf("xs_control(0x%08x)", xs->xs_control);
2496 printf("xs_status(0x%08x)", xs->xs_status);
2497 printf("periph(%p)", xs->xs_periph);
2498 printf("retr(0x%x)", xs->xs_retries);
2499 printf("timo(0x%x)", xs->timeout);
2500 printf("cmd(%p)", xs->cmd);
2501 printf("len(0x%x)", xs->cmdlen);
2502 printf("data(%p)", xs->data);
2503 printf("len(0x%x)", xs->datalen);
2504 printf("res(0x%x)", xs->resid);
2505 printf("err(0x%x)", xs->error);
2506 printf("bp(%p)", xs->bp);
2507 show_scsipi_cmd(xs);
2508 }
2509
2510 void
2511 show_scsipi_cmd(xs)
2512 struct scsipi_xfer *xs;
2513 {
2514 u_char *b = (u_char *) xs->cmd;
2515 int i = 0;
2516
2517 scsipi_printaddr(xs->xs_periph);
2518 printf(" command: ");
2519
2520 if ((xs->xs_control & XS_CTL_RESET) == 0) {
2521 while (i < xs->cmdlen) {
2522 if (i)
2523 printf(",");
2524 printf("0x%x", b[i++]);
2525 }
2526 printf("-[%d bytes]\n", xs->datalen);
2527 if (xs->datalen)
2528 show_mem(xs->data, min(64, xs->datalen));
2529 } else
2530 printf("-RESET-\n");
2531 }
2532
2533 void
2534 show_mem(address, num)
2535 u_char *address;
2536 int num;
2537 {
2538 int x;
2539
2540 printf("------------------------------");
2541 for (x = 0; x < num; x++) {
2542 if ((x % 16) == 0)
2543 printf("\n%03d: ", x);
2544 printf("%02x ", *address++);
2545 }
2546 printf("\n------------------------------\n");
2547 }
2548 #endif /* SCSIPI_DEBUG */
2549