scsipi_base.c revision 1.120 1 /* $NetBSD: scsipi_base.c,v 1.120 2004/09/18 18:49:50 mycroft Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999, 2000, 2002, 2003, 2004 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: scsipi_base.c,v 1.120 2004/09/18 18:49:50 mycroft Exp $");
42
43 #include "opt_scsi.h"
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/buf.h>
49 #include <sys/uio.h>
50 #include <sys/malloc.h>
51 #include <sys/pool.h>
52 #include <sys/errno.h>
53 #include <sys/device.h>
54 #include <sys/proc.h>
55 #include <sys/kthread.h>
56 #include <sys/hash.h>
57
58 #include <uvm/uvm_extern.h>
59
60 #include <dev/scsipi/scsipi_all.h>
61 #include <dev/scsipi/scsipi_disk.h>
62 #include <dev/scsipi/scsipiconf.h>
63 #include <dev/scsipi/scsipi_base.h>
64
65 #include <dev/scsipi/scsi_all.h>
66 #include <dev/scsipi/scsi_message.h>
67
68 static int scsipi_complete(struct scsipi_xfer *);
69 static void scsipi_request_sense(struct scsipi_xfer *);
70 static int scsipi_enqueue(struct scsipi_xfer *);
71 static void scsipi_run_queue(struct scsipi_channel *chan);
72
73 static void scsipi_completion_thread(void *);
74
75 static void scsipi_get_tag(struct scsipi_xfer *);
76 static void scsipi_put_tag(struct scsipi_xfer *);
77
78 static int scsipi_get_resource(struct scsipi_channel *);
79 static void scsipi_put_resource(struct scsipi_channel *);
80
81 static void scsipi_async_event_max_openings(struct scsipi_channel *,
82 struct scsipi_max_openings *);
83 static void scsipi_async_event_xfer_mode(struct scsipi_channel *,
84 struct scsipi_xfer_mode *);
85 static void scsipi_async_event_channel_reset(struct scsipi_channel *);
86
87 static struct pool scsipi_xfer_pool;
88
89 /*
90 * scsipi_init:
91 *
92 * Called when a scsibus or atapibus is attached to the system
93 * to initialize shared data structures.
94 */
95 void
96 scsipi_init(void)
97 {
98 static int scsipi_init_done;
99
100 if (scsipi_init_done)
101 return;
102 scsipi_init_done = 1;
103
104 /* Initialize the scsipi_xfer pool. */
105 pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
106 0, 0, "scxspl", NULL);
107 if (pool_prime(&scsipi_xfer_pool,
108 PAGE_SIZE / sizeof(struct scsipi_xfer)) == ENOMEM) {
109 printf("WARNING: not enough memory for scsipi_xfer_pool\n");
110 }
111 }
112
113 /*
114 * scsipi_channel_init:
115 *
116 * Initialize a scsipi_channel when it is attached.
117 */
118 int
119 scsipi_channel_init(struct scsipi_channel *chan)
120 {
121 int i;
122
123 /* Initialize shared data. */
124 scsipi_init();
125
126 /* Initialize the queues. */
127 TAILQ_INIT(&chan->chan_queue);
128 TAILQ_INIT(&chan->chan_complete);
129
130 for (i = 0; i < SCSIPI_CHAN_PERIPH_BUCKETS; i++)
131 LIST_INIT(&chan->chan_periphtab[i]);
132
133 /*
134 * Create the asynchronous completion thread.
135 */
136 kthread_create(scsipi_create_completion_thread, chan);
137 return (0);
138 }
139
140 /*
141 * scsipi_channel_shutdown:
142 *
143 * Shutdown a scsipi_channel.
144 */
145 void
146 scsipi_channel_shutdown(struct scsipi_channel *chan)
147 {
148
149 /*
150 * Shut down the completion thread.
151 */
152 chan->chan_tflags |= SCSIPI_CHANT_SHUTDOWN;
153 wakeup(&chan->chan_complete);
154
155 /*
156 * Now wait for the thread to exit.
157 */
158 while (chan->chan_thread != NULL)
159 (void) tsleep(&chan->chan_thread, PRIBIO, "scshut", 0);
160 }
161
162 static uint32_t
163 scsipi_chan_periph_hash(uint64_t t, uint64_t l)
164 {
165 uint32_t hash;
166
167 hash = hash32_buf(&t, sizeof(t), HASH32_BUF_INIT);
168 hash = hash32_buf(&l, sizeof(l), hash);
169
170 return (hash & SCSIPI_CHAN_PERIPH_HASHMASK);
171 }
172
173 /*
174 * scsipi_insert_periph:
175 *
176 * Insert a periph into the channel.
177 */
178 void
179 scsipi_insert_periph(struct scsipi_channel *chan, struct scsipi_periph *periph)
180 {
181 uint32_t hash;
182 int s;
183
184 hash = scsipi_chan_periph_hash(periph->periph_target,
185 periph->periph_lun);
186
187 s = splbio();
188 LIST_INSERT_HEAD(&chan->chan_periphtab[hash], periph, periph_hash);
189 splx(s);
190 }
191
192 /*
193 * scsipi_remove_periph:
194 *
195 * Remove a periph from the channel.
196 */
197 void
198 scsipi_remove_periph(struct scsipi_channel *chan, struct scsipi_periph *periph)
199 {
200 int s;
201
202 s = splbio();
203 LIST_REMOVE(periph, periph_hash);
204 splx(s);
205 }
206
207 /*
208 * scsipi_lookup_periph:
209 *
210 * Lookup a periph on the specified channel.
211 */
212 struct scsipi_periph *
213 scsipi_lookup_periph(struct scsipi_channel *chan, int target, int lun)
214 {
215 struct scsipi_periph *periph;
216 uint32_t hash;
217 int s;
218
219 if (target >= chan->chan_ntargets ||
220 lun >= chan->chan_nluns)
221 return (NULL);
222
223 hash = scsipi_chan_periph_hash(target, lun);
224
225 s = splbio();
226 LIST_FOREACH(periph, &chan->chan_periphtab[hash], periph_hash) {
227 if (periph->periph_target == target &&
228 periph->periph_lun == lun)
229 break;
230 }
231 splx(s);
232
233 return (periph);
234 }
235
236 /*
237 * scsipi_get_resource:
238 *
239 * Allocate a single xfer `resource' from the channel.
240 *
241 * NOTE: Must be called at splbio().
242 */
243 static int
244 scsipi_get_resource(struct scsipi_channel *chan)
245 {
246 struct scsipi_adapter *adapt = chan->chan_adapter;
247
248 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
249 if (chan->chan_openings > 0) {
250 chan->chan_openings--;
251 return (1);
252 }
253 return (0);
254 }
255
256 if (adapt->adapt_openings > 0) {
257 adapt->adapt_openings--;
258 return (1);
259 }
260 return (0);
261 }
262
263 /*
264 * scsipi_grow_resources:
265 *
266 * Attempt to grow resources for a channel. If this succeeds,
267 * we allocate one for our caller.
268 *
269 * NOTE: Must be called at splbio().
270 */
271 static __inline int
272 scsipi_grow_resources(struct scsipi_channel *chan)
273 {
274
275 if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
276 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
277 scsipi_adapter_request(chan,
278 ADAPTER_REQ_GROW_RESOURCES, NULL);
279 return (scsipi_get_resource(chan));
280 }
281 /*
282 * ask the channel thread to do it. It'll have to thaw the
283 * queue
284 */
285 scsipi_channel_freeze(chan, 1);
286 chan->chan_tflags |= SCSIPI_CHANT_GROWRES;
287 wakeup(&chan->chan_complete);
288 return (0);
289 }
290
291 return (0);
292 }
293
294 /*
295 * scsipi_put_resource:
296 *
297 * Free a single xfer `resource' to the channel.
298 *
299 * NOTE: Must be called at splbio().
300 */
301 static void
302 scsipi_put_resource(struct scsipi_channel *chan)
303 {
304 struct scsipi_adapter *adapt = chan->chan_adapter;
305
306 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
307 chan->chan_openings++;
308 else
309 adapt->adapt_openings++;
310 }
311
312 /*
313 * scsipi_get_tag:
314 *
315 * Get a tag ID for the specified xfer.
316 *
317 * NOTE: Must be called at splbio().
318 */
319 static void
320 scsipi_get_tag(struct scsipi_xfer *xs)
321 {
322 struct scsipi_periph *periph = xs->xs_periph;
323 int bit, tag;
324 u_int word;
325
326 bit = 0; /* XXX gcc */
327 for (word = 0; word < PERIPH_NTAGWORDS; word++) {
328 bit = ffs(periph->periph_freetags[word]);
329 if (bit != 0)
330 break;
331 }
332 #ifdef DIAGNOSTIC
333 if (word == PERIPH_NTAGWORDS) {
334 scsipi_printaddr(periph);
335 printf("no free tags\n");
336 panic("scsipi_get_tag");
337 }
338 #endif
339
340 bit -= 1;
341 periph->periph_freetags[word] &= ~(1 << bit);
342 tag = (word << 5) | bit;
343
344 /* XXX Should eventually disallow this completely. */
345 if (tag >= periph->periph_openings) {
346 scsipi_printaddr(periph);
347 printf("WARNING: tag %d greater than available openings %d\n",
348 tag, periph->periph_openings);
349 }
350
351 xs->xs_tag_id = tag;
352 }
353
354 /*
355 * scsipi_put_tag:
356 *
357 * Put the tag ID for the specified xfer back into the pool.
358 *
359 * NOTE: Must be called at splbio().
360 */
361 static void
362 scsipi_put_tag(struct scsipi_xfer *xs)
363 {
364 struct scsipi_periph *periph = xs->xs_periph;
365 int word, bit;
366
367 word = xs->xs_tag_id >> 5;
368 bit = xs->xs_tag_id & 0x1f;
369
370 periph->periph_freetags[word] |= (1 << bit);
371 }
372
373 /*
374 * scsipi_get_xs:
375 *
376 * Allocate an xfer descriptor and associate it with the
377 * specified peripherial. If the peripherial has no more
378 * available command openings, we either block waiting for
379 * one to become available, or fail.
380 */
381 struct scsipi_xfer *
382 scsipi_get_xs(struct scsipi_periph *periph, int flags)
383 {
384 struct scsipi_xfer *xs;
385 int s;
386
387 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n"));
388
389 KASSERT(!cold);
390
391 #ifdef DIAGNOSTIC
392 /*
393 * URGENT commands can never be ASYNC.
394 */
395 if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
396 (XS_CTL_URGENT|XS_CTL_ASYNC)) {
397 scsipi_printaddr(periph);
398 printf("URGENT and ASYNC\n");
399 panic("scsipi_get_xs");
400 }
401 #endif
402
403 s = splbio();
404 /*
405 * Wait for a command opening to become available. Rules:
406 *
407 * - All xfers must wait for an available opening.
408 * Exception: URGENT xfers can proceed when
409 * active == openings, because we use the opening
410 * of the command we're recovering for.
411 * - if the periph has sense pending, only URGENT & REQSENSE
412 * xfers may proceed.
413 *
414 * - If the periph is recovering, only URGENT xfers may
415 * proceed.
416 *
417 * - If the periph is currently executing a recovery
418 * command, URGENT commands must block, because only
419 * one recovery command can execute at a time.
420 */
421 for (;;) {
422 if (flags & XS_CTL_URGENT) {
423 if (periph->periph_active > periph->periph_openings)
424 goto wait_for_opening;
425 if (periph->periph_flags & PERIPH_SENSE) {
426 if ((flags & XS_CTL_REQSENSE) == 0)
427 goto wait_for_opening;
428 } else {
429 if ((periph->periph_flags &
430 PERIPH_RECOVERY_ACTIVE) != 0)
431 goto wait_for_opening;
432 periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
433 }
434 break;
435 }
436 if (periph->periph_active >= periph->periph_openings ||
437 (periph->periph_flags & PERIPH_RECOVERING) != 0)
438 goto wait_for_opening;
439 periph->periph_active++;
440 break;
441
442 wait_for_opening:
443 if (flags & XS_CTL_NOSLEEP) {
444 splx(s);
445 return (NULL);
446 }
447 SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n"));
448 periph->periph_flags |= PERIPH_WAITING;
449 (void) tsleep(periph, PRIBIO, "getxs", 0);
450 }
451 SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n"));
452 xs = pool_get(&scsipi_xfer_pool,
453 ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
454 if (xs == NULL) {
455 if (flags & XS_CTL_URGENT) {
456 if ((flags & XS_CTL_REQSENSE) == 0)
457 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
458 } else
459 periph->periph_active--;
460 scsipi_printaddr(periph);
461 printf("unable to allocate %sscsipi_xfer\n",
462 (flags & XS_CTL_URGENT) ? "URGENT " : "");
463 }
464 splx(s);
465
466 SC_DEBUG(periph, SCSIPI_DB3, ("returning\n"));
467
468 if (xs != NULL) {
469 memset(xs, 0, sizeof(*xs));
470 callout_init(&xs->xs_callout);
471 xs->xs_periph = periph;
472 xs->xs_control = flags;
473 xs->xs_status = 0;
474 s = splbio();
475 TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
476 splx(s);
477 }
478 return (xs);
479 }
480
481 /*
482 * scsipi_put_xs:
483 *
484 * Release an xfer descriptor, decreasing the outstanding command
485 * count for the peripherial. If there is a thread waiting for
486 * an opening, wake it up. If not, kick any queued I/O the
487 * peripherial may have.
488 *
489 * NOTE: Must be called at splbio().
490 */
491 void
492 scsipi_put_xs(struct scsipi_xfer *xs)
493 {
494 struct scsipi_periph *periph = xs->xs_periph;
495 int flags = xs->xs_control;
496
497 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n"));
498
499 TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
500 pool_put(&scsipi_xfer_pool, xs);
501
502 #ifdef DIAGNOSTIC
503 if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
504 periph->periph_active == 0) {
505 scsipi_printaddr(periph);
506 printf("recovery without a command to recovery for\n");
507 panic("scsipi_put_xs");
508 }
509 #endif
510
511 if (flags & XS_CTL_URGENT) {
512 if ((flags & XS_CTL_REQSENSE) == 0)
513 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
514 } else
515 periph->periph_active--;
516 if (periph->periph_active == 0 &&
517 (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
518 periph->periph_flags &= ~PERIPH_WAITDRAIN;
519 wakeup(&periph->periph_active);
520 }
521
522 if (periph->periph_flags & PERIPH_WAITING) {
523 periph->periph_flags &= ~PERIPH_WAITING;
524 wakeup(periph);
525 } else {
526 if (periph->periph_switch->psw_start != NULL &&
527 (periph->periph_dev->dv_flags & DVF_ACTIVE)) {
528 SC_DEBUG(periph, SCSIPI_DB2,
529 ("calling private start()\n"));
530 (*periph->periph_switch->psw_start)(periph);
531 }
532 }
533 }
534
535 /*
536 * scsipi_channel_freeze:
537 *
538 * Freeze a channel's xfer queue.
539 */
540 void
541 scsipi_channel_freeze(struct scsipi_channel *chan, int count)
542 {
543 int s;
544
545 s = splbio();
546 chan->chan_qfreeze += count;
547 splx(s);
548 }
549
550 /*
551 * scsipi_channel_thaw:
552 *
553 * Thaw a channel's xfer queue.
554 */
555 void
556 scsipi_channel_thaw(struct scsipi_channel *chan, int count)
557 {
558 int s;
559
560 s = splbio();
561 chan->chan_qfreeze -= count;
562 /*
563 * Don't let the freeze count go negative.
564 *
565 * Presumably the adapter driver could keep track of this,
566 * but it might just be easier to do this here so as to allow
567 * multiple callers, including those outside the adapter driver.
568 */
569 if (chan->chan_qfreeze < 0) {
570 chan->chan_qfreeze = 0;
571 }
572 splx(s);
573 /*
574 * Kick the channel's queue here. Note, we may be running in
575 * interrupt context (softclock or HBA's interrupt), so the adapter
576 * driver had better not sleep.
577 */
578 if (chan->chan_qfreeze == 0)
579 scsipi_run_queue(chan);
580 }
581
582 /*
583 * scsipi_channel_timed_thaw:
584 *
585 * Thaw a channel after some time has expired. This will also
586 * run the channel's queue if the freeze count has reached 0.
587 */
588 void
589 scsipi_channel_timed_thaw(void *arg)
590 {
591 struct scsipi_channel *chan = arg;
592
593 scsipi_channel_thaw(chan, 1);
594 }
595
596 /*
597 * scsipi_periph_freeze:
598 *
599 * Freeze a device's xfer queue.
600 */
601 void
602 scsipi_periph_freeze(struct scsipi_periph *periph, int count)
603 {
604 int s;
605
606 s = splbio();
607 periph->periph_qfreeze += count;
608 splx(s);
609 }
610
611 /*
612 * scsipi_periph_thaw:
613 *
614 * Thaw a device's xfer queue.
615 */
616 void
617 scsipi_periph_thaw(struct scsipi_periph *periph, int count)
618 {
619 int s;
620
621 s = splbio();
622 periph->periph_qfreeze -= count;
623 #ifdef DIAGNOSTIC
624 if (periph->periph_qfreeze < 0) {
625 static const char pc[] = "periph freeze count < 0";
626 scsipi_printaddr(periph);
627 printf("%s\n", pc);
628 panic(pc);
629 }
630 #endif
631 if (periph->periph_qfreeze == 0 &&
632 (periph->periph_flags & PERIPH_WAITING) != 0)
633 wakeup(periph);
634 splx(s);
635 }
636
637 /*
638 * scsipi_periph_timed_thaw:
639 *
640 * Thaw a device after some time has expired.
641 */
642 void
643 scsipi_periph_timed_thaw(void *arg)
644 {
645 int s;
646 struct scsipi_periph *periph = arg;
647
648 callout_stop(&periph->periph_callout);
649
650 s = splbio();
651 scsipi_periph_thaw(periph, 1);
652 if ((periph->periph_channel->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
653 /*
654 * Kick the channel's queue here. Note, we're running in
655 * interrupt context (softclock), so the adapter driver
656 * had better not sleep.
657 */
658 scsipi_run_queue(periph->periph_channel);
659 } else {
660 /*
661 * Tell the completion thread to kick the channel's queue here.
662 */
663 periph->periph_channel->chan_tflags |= SCSIPI_CHANT_KICK;
664 wakeup(&periph->periph_channel->chan_complete);
665 }
666 splx(s);
667 }
668
669 /*
670 * scsipi_wait_drain:
671 *
672 * Wait for a periph's pending xfers to drain.
673 */
674 void
675 scsipi_wait_drain(struct scsipi_periph *periph)
676 {
677 int s;
678
679 s = splbio();
680 while (periph->periph_active != 0) {
681 periph->periph_flags |= PERIPH_WAITDRAIN;
682 (void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0);
683 }
684 splx(s);
685 }
686
687 /*
688 * scsipi_kill_pending:
689 *
690 * Kill off all pending xfers for a periph.
691 *
692 * NOTE: Must be called at splbio().
693 */
694 void
695 scsipi_kill_pending(struct scsipi_periph *periph)
696 {
697
698 (*periph->periph_channel->chan_bustype->bustype_kill_pending)(periph);
699 scsipi_wait_drain(periph);
700 }
701
702 /*
703 * scsipi_print_cdb:
704 * prints a command descriptor block (for debug purpose, error messages,
705 * SCSIPI_VERBOSE, ...)
706 */
707 void
708 scsipi_print_cdb(struct scsipi_generic *cmd)
709 {
710 int i, j;
711
712 printf("0x%02x", cmd->opcode);
713
714 switch (CDB_GROUPID(cmd->opcode)) {
715 case CDB_GROUPID_0:
716 j = CDB_GROUP0;
717 break;
718 case CDB_GROUPID_1:
719 j = CDB_GROUP1;
720 break;
721 case CDB_GROUPID_2:
722 j = CDB_GROUP2;
723 break;
724 case CDB_GROUPID_3:
725 j = CDB_GROUP3;
726 break;
727 case CDB_GROUPID_4:
728 j = CDB_GROUP4;
729 break;
730 case CDB_GROUPID_5:
731 j = CDB_GROUP5;
732 break;
733 case CDB_GROUPID_6:
734 j = CDB_GROUP6;
735 break;
736 case CDB_GROUPID_7:
737 j = CDB_GROUP7;
738 break;
739 default:
740 j = 0;
741 }
742 if (j == 0)
743 j = sizeof (cmd->bytes);
744 for (i = 0; i < j-1; i++) /* already done the opcode */
745 printf(" %02x", cmd->bytes[i]);
746 }
747
748 /*
749 * scsipi_interpret_sense:
750 *
751 * Look at the returned sense and act on the error, determining
752 * the unix error number to pass back. (0 = report no error)
753 *
754 * NOTE: If we return ERESTART, we are expected to haved
755 * thawed the device!
756 *
757 * THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
758 */
759 int
760 scsipi_interpret_sense(struct scsipi_xfer *xs)
761 {
762 struct scsipi_sense_data *sense;
763 struct scsipi_periph *periph = xs->xs_periph;
764 u_int8_t key;
765 int error;
766 #ifndef SCSIVERBOSE
767 u_int32_t info;
768 static char *error_mes[] = {
769 "soft error (corrected)",
770 "not ready", "medium error",
771 "non-media hardware failure", "illegal request",
772 "unit attention", "readonly device",
773 "no data found", "vendor unique",
774 "copy aborted", "command aborted",
775 "search returned equal", "volume overflow",
776 "verify miscompare", "unknown error key"
777 };
778 #endif
779
780 sense = &xs->sense.scsi_sense;
781 #ifdef SCSIPI_DEBUG
782 if (periph->periph_flags & SCSIPI_DB1) {
783 int count;
784 scsipi_printaddr(periph);
785 printf(" sense debug information:\n");
786 printf("\tcode 0x%x valid 0x%x\n",
787 sense->error_code & SSD_ERRCODE,
788 sense->error_code & SSD_ERRCODE_VALID ? 1 : 0);
789 printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
790 sense->segment,
791 sense->flags & SSD_KEY,
792 sense->flags & SSD_ILI ? 1 : 0,
793 sense->flags & SSD_EOM ? 1 : 0,
794 sense->flags & SSD_FILEMARK ? 1 : 0);
795 printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d "
796 "extra bytes\n",
797 sense->info[0],
798 sense->info[1],
799 sense->info[2],
800 sense->info[3],
801 sense->extra_len);
802 printf("\textra: ");
803 for (count = 0; count < ADD_BYTES_LIM(sense); count++)
804 printf("0x%x ", sense->cmd_spec_info[count]);
805 printf("\n");
806 }
807 #endif
808
809 /*
810 * If the periph has it's own error handler, call it first.
811 * If it returns a legit error value, return that, otherwise
812 * it wants us to continue with normal error processing.
813 */
814 if (periph->periph_switch->psw_error != NULL) {
815 SC_DEBUG(periph, SCSIPI_DB2,
816 ("calling private err_handler()\n"));
817 error = (*periph->periph_switch->psw_error)(xs);
818 if (error != EJUSTRETURN)
819 return (error);
820 }
821 /* otherwise use the default */
822 switch (sense->error_code & SSD_ERRCODE) {
823
824 /*
825 * Old SCSI-1 and SASI devices respond with
826 * codes other than 70.
827 */
828 case 0x00: /* no error (command completed OK) */
829 return (0);
830 case 0x04: /* drive not ready after it was selected */
831 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
832 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
833 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
834 return (0);
835 /* XXX - display some sort of error here? */
836 return (EIO);
837 case 0x20: /* invalid command */
838 if ((xs->xs_control &
839 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
840 return (0);
841 return (EINVAL);
842 case 0x25: /* invalid LUN (Adaptec ACB-4000) */
843 return (EACCES);
844
845 /*
846 * If it's code 70, use the extended stuff and
847 * interpret the key
848 */
849 case 0x71: /* delayed error */
850 scsipi_printaddr(periph);
851 key = sense->flags & SSD_KEY;
852 printf(" DEFERRED ERROR, key = 0x%x\n", key);
853 /* FALLTHROUGH */
854 case 0x70:
855 #ifndef SCSIVERBOSE
856 if ((sense->error_code & SSD_ERRCODE_VALID) != 0)
857 info = _4btol(sense->info);
858 else
859 info = 0;
860 #endif
861 key = sense->flags & SSD_KEY;
862
863 switch (key) {
864 case SKEY_NO_SENSE:
865 case SKEY_RECOVERED_ERROR:
866 if (xs->resid == xs->datalen && xs->datalen) {
867 /*
868 * Why is this here?
869 */
870 xs->resid = 0; /* not short read */
871 }
872 case SKEY_EQUAL:
873 error = 0;
874 break;
875 case SKEY_NOT_READY:
876 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
877 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
878 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
879 return (0);
880 if (sense->add_sense_code == 0x3A) {
881 error = ENODEV; /* Medium not present */
882 if (xs->xs_control & XS_CTL_SILENT_NODEV)
883 return (error);
884 } else
885 error = EIO;
886 if ((xs->xs_control & XS_CTL_SILENT) != 0)
887 return (error);
888 break;
889 case SKEY_ILLEGAL_REQUEST:
890 if ((xs->xs_control &
891 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
892 return (0);
893 /*
894 * Handle the case where a device reports
895 * Logical Unit Not Supported during discovery.
896 */
897 if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
898 sense->add_sense_code == 0x25 &&
899 sense->add_sense_code_qual == 0x00)
900 return (EINVAL);
901 if ((xs->xs_control & XS_CTL_SILENT) != 0)
902 return (EIO);
903 error = EINVAL;
904 break;
905 case SKEY_UNIT_ATTENTION:
906 if (sense->add_sense_code == 0x29 &&
907 sense->add_sense_code_qual == 0x00) {
908 /* device or bus reset */
909 return (ERESTART);
910 }
911 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
912 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
913 if ((xs->xs_control &
914 XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
915 /* XXX Should reupload any transient state. */
916 (periph->periph_flags &
917 PERIPH_REMOVABLE) == 0) {
918 return (ERESTART);
919 }
920 if ((xs->xs_control & XS_CTL_SILENT) != 0)
921 return (EIO);
922 error = EIO;
923 break;
924 case SKEY_WRITE_PROTECT:
925 error = EROFS;
926 break;
927 case SKEY_BLANK_CHECK:
928 error = 0;
929 break;
930 case SKEY_ABORTED_COMMAND:
931 if (xs->xs_retries != 0) {
932 xs->xs_retries--;
933 error = ERESTART;
934 } else
935 error = EIO;
936 break;
937 case SKEY_VOLUME_OVERFLOW:
938 error = ENOSPC;
939 break;
940 default:
941 error = EIO;
942 break;
943 }
944
945 #ifdef SCSIVERBOSE
946 if (key && (xs->xs_control & XS_CTL_SILENT) == 0)
947 scsipi_print_sense(xs, 0);
948 #else
949 if (key) {
950 scsipi_printaddr(periph);
951 printf("%s", error_mes[key - 1]);
952 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
953 switch (key) {
954 case SKEY_NOT_READY:
955 case SKEY_ILLEGAL_REQUEST:
956 case SKEY_UNIT_ATTENTION:
957 case SKEY_WRITE_PROTECT:
958 break;
959 case SKEY_BLANK_CHECK:
960 printf(", requested size: %d (decimal)",
961 info);
962 break;
963 case SKEY_ABORTED_COMMAND:
964 if (xs->xs_retries)
965 printf(", retrying");
966 printf(", cmd 0x%x, info 0x%x",
967 xs->cmd->opcode, info);
968 break;
969 default:
970 printf(", info = %d (decimal)", info);
971 }
972 }
973 if (sense->extra_len != 0) {
974 int n;
975 printf(", data =");
976 for (n = 0; n < sense->extra_len; n++)
977 printf(" %02x",
978 sense->cmd_spec_info[n]);
979 }
980 printf("\n");
981 }
982 #endif
983 return (error);
984
985 /*
986 * Some other code, just report it
987 */
988 default:
989 #if defined(SCSIDEBUG) || defined(DEBUG)
990 {
991 static char *uc = "undecodable sense error";
992 int i;
993 u_int8_t *cptr = (u_int8_t *) sense;
994 scsipi_printaddr(periph);
995 if (xs->cmd == &xs->cmdstore) {
996 printf("%s for opcode 0x%x, data=",
997 uc, xs->cmdstore.opcode);
998 } else {
999 printf("%s, data=", uc);
1000 }
1001 for (i = 0; i < sizeof (sense); i++)
1002 printf(" 0x%02x", *(cptr++) & 0xff);
1003 printf("\n");
1004 }
1005 #else
1006 scsipi_printaddr(periph);
1007 printf("Sense Error Code 0x%x",
1008 sense->error_code & SSD_ERRCODE);
1009 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
1010 struct scsipi_sense_data_unextended *usense =
1011 (struct scsipi_sense_data_unextended *)sense;
1012 printf(" at block no. %d (decimal)",
1013 _3btol(usense->block));
1014 }
1015 printf("\n");
1016 #endif
1017 return (EIO);
1018 }
1019 }
1020
1021 /*
1022 * scsipi_size:
1023 *
1024 * Find out from the device what its capacity is.
1025 */
1026 u_int64_t
1027 scsipi_size(struct scsipi_periph *periph, int flags)
1028 {
1029 struct scsipi_read_capacity cmd;
1030 struct scsipi_read_cap_data data;
1031
1032 memset(&cmd, 0, sizeof(cmd));
1033 cmd.opcode = READ_CAPACITY;
1034
1035 /*
1036 * If the command works, interpret the result as a 4 byte
1037 * number of blocks
1038 */
1039 if (scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1040 (void *)&data, sizeof(data), SCSIPIRETRIES, 20000, NULL,
1041 flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK | XS_CTL_SILENT) != 0)
1042 return (0);
1043
1044 return (_4btol(data.addr) + 1);
1045 }
1046
1047 /*
1048 * scsipi_test_unit_ready:
1049 *
1050 * Issue a `test unit ready' request.
1051 */
1052 int
1053 scsipi_test_unit_ready(struct scsipi_periph *periph, int flags)
1054 {
1055 struct scsipi_test_unit_ready cmd;
1056 int retries;
1057
1058 /* some ATAPI drives don't support TEST_UNIT_READY. Sigh */
1059 if (periph->periph_quirks & PQUIRK_NOTUR)
1060 return (0);
1061
1062 if (flags & XS_CTL_DISCOVERY)
1063 retries = 0;
1064 else
1065 retries = SCSIPIRETRIES;
1066
1067 memset(&cmd, 0, sizeof(cmd));
1068 cmd.opcode = TEST_UNIT_READY;
1069
1070 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
1071 retries, 10000, NULL, flags));
1072 }
1073
1074 /*
1075 * scsipi_inquire:
1076 *
1077 * Ask the device about itself.
1078 */
1079 int
1080 scsipi_inquire(struct scsipi_periph *periph, struct scsipi_inquiry_data *inqbuf,
1081 int flags)
1082 {
1083 struct scsipi_inquiry cmd;
1084 int error;
1085 int retries;
1086
1087 if (flags & XS_CTL_DISCOVERY)
1088 retries = 0;
1089 else
1090 retries = SCSIPIRETRIES;
1091
1092 /*
1093 * If we request more data than the device can provide, it SHOULD just
1094 * return a short reponse. However, some devices error with an
1095 * ILLEGAL REQUEST sense code, and yet others have even more special
1096 * failture modes (such as the GL641USB flash adapter, which goes loony
1097 * and sends corrupted CRCs). To work around this, and to bring our
1098 * behavior more in line with other OSes, we do a shorter inquiry,
1099 * covering all the SCSI-2 information, first, and then request more
1100 * data iff the "additional length" field indicates there is more.
1101 * - mycroft, 2003/10/16
1102 */
1103 memset(&cmd, 0, sizeof(cmd));
1104 cmd.opcode = INQUIRY;
1105 cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI2;
1106 error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1107 (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI2, retries,
1108 10000, NULL, flags | XS_CTL_DATA_IN);
1109 if (!error &&
1110 inqbuf->additional_length > SCSIPI_INQUIRY_LENGTH_SCSI2 - 4) {
1111 #if 0
1112 printf("inquire: addlen=%d, retrying\n", inqbuf->additional_length);
1113 #endif
1114 cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI3;
1115 error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1116 (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI3, retries,
1117 10000, NULL, flags | XS_CTL_DATA_IN);
1118 #if 0
1119 printf("inquire: error=%d\n", error);
1120 #endif
1121 }
1122
1123 #ifdef SCSI_OLD_NOINQUIRY
1124 /*
1125 * Kludge for the Adaptec ACB-4000 SCSI->MFM translator.
1126 * This board doesn't support the INQUIRY command at all.
1127 */
1128 if (error == EINVAL || error == EACCES) {
1129 /*
1130 * Conjure up an INQUIRY response.
1131 */
1132 inqbuf->device = (error == EINVAL ?
1133 SID_QUAL_LU_PRESENT :
1134 SID_QUAL_LU_NOTPRESENT) | T_DIRECT;
1135 inqbuf->dev_qual2 = 0;
1136 inqbuf->version = 0;
1137 inqbuf->response_format = SID_FORMAT_SCSI1;
1138 inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
1139 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1140 memcpy(inqbuf->vendor, "ADAPTEC ACB-4000 ", 28);
1141 error = 0;
1142 }
1143
1144 /*
1145 * Kludge for the Emulex MT-02 SCSI->QIC translator.
1146 * This board gives an empty response to an INQUIRY command.
1147 */
1148 else if (error == 0 &&
1149 inqbuf->device == (SID_QUAL_LU_PRESENT | T_DIRECT) &&
1150 inqbuf->dev_qual2 == 0 &&
1151 inqbuf->version == 0 &&
1152 inqbuf->response_format == SID_FORMAT_SCSI1) {
1153 /*
1154 * Fill out the INQUIRY response.
1155 */
1156 inqbuf->device = (SID_QUAL_LU_PRESENT | T_SEQUENTIAL);
1157 inqbuf->dev_qual2 = SID_REMOVABLE;
1158 inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
1159 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1160 memcpy(inqbuf->vendor, "EMULEX MT-02 QIC ", 28);
1161 }
1162 #endif /* SCSI_OLD_NOINQUIRY */
1163
1164 return error;
1165 }
1166
1167 /*
1168 * scsipi_prevent:
1169 *
1170 * Prevent or allow the user to remove the media
1171 */
1172 int
1173 scsipi_prevent(struct scsipi_periph *periph, int type, int flags)
1174 {
1175 struct scsipi_prevent cmd;
1176
1177 memset(&cmd, 0, sizeof(cmd));
1178 cmd.opcode = PREVENT_ALLOW;
1179 cmd.how = type;
1180
1181 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
1182 SCSIPIRETRIES, 5000, NULL, flags));
1183 }
1184
1185 /*
1186 * scsipi_start:
1187 *
1188 * Send a START UNIT.
1189 */
1190 int
1191 scsipi_start(struct scsipi_periph *periph, int type, int flags)
1192 {
1193 struct scsipi_start_stop cmd;
1194
1195 memset(&cmd, 0, sizeof(cmd));
1196 cmd.opcode = START_STOP;
1197 cmd.byte2 = 0x00;
1198 cmd.how = type;
1199
1200 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
1201 SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000, NULL, flags));
1202 }
1203
1204 /*
1205 * scsipi_mode_sense, scsipi_mode_sense_big:
1206 * get a sense page from a device
1207 */
1208
1209 int
1210 scsipi_mode_sense(struct scsipi_periph *periph, int byte2, int page,
1211 struct scsipi_mode_header *data, int len, int flags, int retries,
1212 int timeout)
1213 {
1214 struct scsipi_mode_sense cmd;
1215
1216 memset(&cmd, 0, sizeof(cmd));
1217 cmd.opcode = MODE_SENSE;
1218 cmd.byte2 = byte2;
1219 cmd.page = page;
1220 cmd.length = len & 0xff;
1221
1222 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1223 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN));
1224 }
1225
1226 int
1227 scsipi_mode_sense_big(struct scsipi_periph *periph, int byte2, int page,
1228 struct scsipi_mode_header_big *data, int len, int flags, int retries,
1229 int timeout)
1230 {
1231 struct scsipi_mode_sense_big cmd;
1232
1233 memset(&cmd, 0, sizeof(cmd));
1234 cmd.opcode = MODE_SENSE_BIG;
1235 cmd.byte2 = byte2;
1236 cmd.page = page;
1237 _lto2b(len, cmd.length);
1238
1239 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1240 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN));
1241 }
1242
1243 int
1244 scsipi_mode_select(struct scsipi_periph *periph, int byte2,
1245 struct scsipi_mode_header *data, int len, int flags, int retries,
1246 int timeout)
1247 {
1248 struct scsipi_mode_select cmd;
1249
1250 memset(&cmd, 0, sizeof(cmd));
1251 cmd.opcode = MODE_SELECT;
1252 cmd.byte2 = byte2;
1253 cmd.length = len & 0xff;
1254
1255 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1256 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT));
1257 }
1258
1259 int
1260 scsipi_mode_select_big(struct scsipi_periph *periph, int byte2,
1261 struct scsipi_mode_header_big *data, int len, int flags, int retries,
1262 int timeout)
1263 {
1264 struct scsipi_mode_select_big cmd;
1265
1266 memset(&cmd, 0, sizeof(cmd));
1267 cmd.opcode = MODE_SELECT_BIG;
1268 cmd.byte2 = byte2;
1269 _lto2b(len, cmd.length);
1270
1271 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1272 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT));
1273 }
1274
1275 /*
1276 * scsipi_done:
1277 *
1278 * This routine is called by an adapter's interrupt handler when
1279 * an xfer is completed.
1280 */
1281 void
1282 scsipi_done(struct scsipi_xfer *xs)
1283 {
1284 struct scsipi_periph *periph = xs->xs_periph;
1285 struct scsipi_channel *chan = periph->periph_channel;
1286 int s, freezecnt;
1287
1288 SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n"));
1289 #ifdef SCSIPI_DEBUG
1290 if (periph->periph_dbflags & SCSIPI_DB1)
1291 show_scsipi_cmd(xs);
1292 #endif
1293
1294 s = splbio();
1295 /*
1296 * The resource this command was using is now free.
1297 */
1298 scsipi_put_resource(chan);
1299 xs->xs_periph->periph_sent--;
1300
1301 /*
1302 * If the command was tagged, free the tag.
1303 */
1304 if (XS_CTL_TAGTYPE(xs) != 0)
1305 scsipi_put_tag(xs);
1306 else
1307 periph->periph_flags &= ~PERIPH_UNTAG;
1308
1309 /* Mark the command as `done'. */
1310 xs->xs_status |= XS_STS_DONE;
1311
1312 #ifdef DIAGNOSTIC
1313 if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
1314 (XS_CTL_ASYNC|XS_CTL_POLL))
1315 panic("scsipi_done: ASYNC and POLL");
1316 #endif
1317
1318 /*
1319 * If the xfer had an error of any sort, freeze the
1320 * periph's queue. Freeze it again if we were requested
1321 * to do so in the xfer.
1322 */
1323 freezecnt = 0;
1324 if (xs->error != XS_NOERROR)
1325 freezecnt++;
1326 if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
1327 freezecnt++;
1328 if (freezecnt != 0)
1329 scsipi_periph_freeze(periph, freezecnt);
1330
1331 /*
1332 * record the xfer with a pending sense, in case a SCSI reset is
1333 * received before the thread is waked up.
1334 */
1335 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1336 periph->periph_flags |= PERIPH_SENSE;
1337 periph->periph_xscheck = xs;
1338 }
1339
1340 /*
1341 * If this was an xfer that was not to complete asynchronously,
1342 * let the requesting thread perform error checking/handling
1343 * in its context.
1344 */
1345 if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
1346 splx(s);
1347 /*
1348 * If it's a polling job, just return, to unwind the
1349 * call graph. We don't need to restart the queue,
1350 * because pollings jobs are treated specially, and
1351 * are really only used during crash dumps anyway
1352 * (XXX or during boot-time autconfiguration of
1353 * ATAPI devices).
1354 */
1355 if (xs->xs_control & XS_CTL_POLL)
1356 return;
1357 wakeup(xs);
1358 goto out;
1359 }
1360
1361 /*
1362 * Catch the extremely common case of I/O completing
1363 * without error; no use in taking a context switch
1364 * if we can handle it in interrupt context.
1365 */
1366 if (xs->error == XS_NOERROR) {
1367 splx(s);
1368 (void) scsipi_complete(xs);
1369 goto out;
1370 }
1371
1372 /*
1373 * There is an error on this xfer. Put it on the channel's
1374 * completion queue, and wake up the completion thread.
1375 */
1376 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
1377 splx(s);
1378 wakeup(&chan->chan_complete);
1379
1380 out:
1381 /*
1382 * If there are more xfers on the channel's queue, attempt to
1383 * run them.
1384 */
1385 scsipi_run_queue(chan);
1386 }
1387
1388 /*
1389 * scsipi_complete:
1390 *
1391 * Completion of a scsipi_xfer. This is the guts of scsipi_done().
1392 *
1393 * NOTE: This routine MUST be called with valid thread context
1394 * except for the case where the following two conditions are
1395 * true:
1396 *
1397 * xs->error == XS_NOERROR
1398 * XS_CTL_ASYNC is set in xs->xs_control
1399 *
1400 * The semantics of this routine can be tricky, so here is an
1401 * explanation:
1402 *
1403 * 0 Xfer completed successfully.
1404 *
1405 * ERESTART Xfer had an error, but was restarted.
1406 *
1407 * anything else Xfer had an error, return value is Unix
1408 * errno.
1409 *
1410 * If the return value is anything but ERESTART:
1411 *
1412 * - If XS_CTL_ASYNC is set, `xs' has been freed back to
1413 * the pool.
1414 * - If there is a buf associated with the xfer,
1415 * it has been biodone()'d.
1416 */
1417 static int
1418 scsipi_complete(struct scsipi_xfer *xs)
1419 {
1420 struct scsipi_periph *periph = xs->xs_periph;
1421 struct scsipi_channel *chan = periph->periph_channel;
1422 int error, s;
1423
1424 #ifdef DIAGNOSTIC
1425 if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
1426 panic("scsipi_complete: XS_CTL_ASYNC but no buf");
1427 #endif
1428 /*
1429 * If command terminated with a CHECK CONDITION, we need to issue a
1430 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed
1431 * we'll have the real status.
1432 * Must be processed at splbio() to avoid missing a SCSI bus reset
1433 * for this command.
1434 */
1435 s = splbio();
1436 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1437 /* request sense for a request sense ? */
1438 if (xs->xs_control & XS_CTL_REQSENSE) {
1439 scsipi_printaddr(periph);
1440 printf("request sense for a request sense ?\n");
1441 /* XXX maybe we should reset the device ? */
1442 /* we've been frozen because xs->error != XS_NOERROR */
1443 scsipi_periph_thaw(periph, 1);
1444 splx(s);
1445 if (xs->resid < xs->datalen) {
1446 printf("we read %d bytes of sense anyway:\n",
1447 xs->datalen - xs->resid);
1448 #ifdef SCSIVERBOSE
1449 scsipi_print_sense_data((void *)xs->data, 0);
1450 #endif
1451 }
1452 return EINVAL;
1453 }
1454 scsipi_request_sense(xs);
1455 }
1456 splx(s);
1457
1458 /*
1459 * If it's a user level request, bypass all usual completion
1460 * processing, let the user work it out..
1461 */
1462 if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
1463 SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n"));
1464 if (xs->error != XS_NOERROR)
1465 scsipi_periph_thaw(periph, 1);
1466 scsipi_user_done(xs);
1467 SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n "));
1468 return 0;
1469 }
1470
1471 switch (xs->error) {
1472 case XS_NOERROR:
1473 error = 0;
1474 break;
1475
1476 case XS_SENSE:
1477 case XS_SHORTSENSE:
1478 error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
1479 break;
1480
1481 case XS_RESOURCE_SHORTAGE:
1482 /*
1483 * XXX Should freeze channel's queue.
1484 */
1485 scsipi_printaddr(periph);
1486 printf("adapter resource shortage\n");
1487 /* FALLTHROUGH */
1488
1489 case XS_BUSY:
1490 if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
1491 struct scsipi_max_openings mo;
1492
1493 /*
1494 * We set the openings to active - 1, assuming that
1495 * the command that got us here is the first one that
1496 * can't fit into the device's queue. If that's not
1497 * the case, I guess we'll find out soon enough.
1498 */
1499 mo.mo_target = periph->periph_target;
1500 mo.mo_lun = periph->periph_lun;
1501 if (periph->periph_active < periph->periph_openings)
1502 mo.mo_openings = periph->periph_active - 1;
1503 else
1504 mo.mo_openings = periph->periph_openings - 1;
1505 #ifdef DIAGNOSTIC
1506 if (mo.mo_openings < 0) {
1507 scsipi_printaddr(periph);
1508 printf("QUEUE FULL resulted in < 0 openings\n");
1509 panic("scsipi_done");
1510 }
1511 #endif
1512 if (mo.mo_openings == 0) {
1513 scsipi_printaddr(periph);
1514 printf("QUEUE FULL resulted in 0 openings\n");
1515 mo.mo_openings = 1;
1516 }
1517 scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
1518 error = ERESTART;
1519 } else if (xs->xs_retries != 0) {
1520 xs->xs_retries--;
1521 /*
1522 * Wait one second, and try again.
1523 */
1524 if ((xs->xs_control & XS_CTL_POLL) ||
1525 (chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
1526 delay(1000000);
1527 } else if (!callout_pending(&periph->periph_callout)) {
1528 scsipi_periph_freeze(periph, 1);
1529 callout_reset(&periph->periph_callout,
1530 hz, scsipi_periph_timed_thaw, periph);
1531 }
1532 error = ERESTART;
1533 } else
1534 error = EBUSY;
1535 break;
1536
1537 case XS_REQUEUE:
1538 error = ERESTART;
1539 break;
1540
1541 case XS_SELTIMEOUT:
1542 case XS_TIMEOUT:
1543 /*
1544 * If the device hasn't gone away, honor retry counts.
1545 *
1546 * Note that if we're in the middle of probing it,
1547 * it won't be found because it isn't here yet so
1548 * we won't honor the retry count in that case.
1549 */
1550 if (scsipi_lookup_periph(chan, periph->periph_target,
1551 periph->periph_lun) && xs->xs_retries != 0) {
1552 xs->xs_retries--;
1553 error = ERESTART;
1554 } else
1555 error = EIO;
1556 break;
1557
1558 case XS_RESET:
1559 if (xs->xs_control & XS_CTL_REQSENSE) {
1560 /*
1561 * request sense interrupted by reset: signal it
1562 * with EINTR return code.
1563 */
1564 error = EINTR;
1565 } else {
1566 if (xs->xs_retries != 0) {
1567 xs->xs_retries--;
1568 error = ERESTART;
1569 } else
1570 error = EIO;
1571 }
1572 break;
1573
1574 case XS_DRIVER_STUFFUP:
1575 scsipi_printaddr(periph);
1576 printf("generic HBA error\n");
1577 error = EIO;
1578 break;
1579 default:
1580 scsipi_printaddr(periph);
1581 printf("invalid return code from adapter: %d\n", xs->error);
1582 error = EIO;
1583 break;
1584 }
1585
1586 s = splbio();
1587 if (error == ERESTART) {
1588 /*
1589 * If we get here, the periph has been thawed and frozen
1590 * again if we had to issue recovery commands. Alternatively,
1591 * it may have been frozen again and in a timed thaw. In
1592 * any case, we thaw the periph once we re-enqueue the
1593 * command. Once the periph is fully thawed, it will begin
1594 * operation again.
1595 */
1596 xs->error = XS_NOERROR;
1597 xs->status = SCSI_OK;
1598 xs->xs_status &= ~XS_STS_DONE;
1599 xs->xs_requeuecnt++;
1600 error = scsipi_enqueue(xs);
1601 if (error == 0) {
1602 scsipi_periph_thaw(periph, 1);
1603 splx(s);
1604 return (ERESTART);
1605 }
1606 }
1607
1608 /*
1609 * scsipi_done() freezes the queue if not XS_NOERROR.
1610 * Thaw it here.
1611 */
1612 if (xs->error != XS_NOERROR)
1613 scsipi_periph_thaw(periph, 1);
1614
1615 if (periph->periph_switch->psw_done)
1616 periph->periph_switch->psw_done(xs, error);
1617
1618 if (xs->xs_control & XS_CTL_ASYNC)
1619 scsipi_put_xs(xs);
1620 splx(s);
1621
1622 return (error);
1623 }
1624
1625 /*
1626 * Issue a request sense for the given scsipi_xfer. Called when the xfer
1627 * returns with a CHECK_CONDITION status. Must be called in valid thread
1628 * context and at splbio().
1629 */
1630
1631 static void
1632 scsipi_request_sense(struct scsipi_xfer *xs)
1633 {
1634 struct scsipi_periph *periph = xs->xs_periph;
1635 int flags, error;
1636 struct scsipi_sense cmd;
1637
1638 periph->periph_flags |= PERIPH_SENSE;
1639
1640 /* if command was polling, request sense will too */
1641 flags = xs->xs_control & XS_CTL_POLL;
1642 /* Polling commands can't sleep */
1643 if (flags)
1644 flags |= XS_CTL_NOSLEEP;
1645
1646 flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN |
1647 XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH;
1648
1649 memset(&cmd, 0, sizeof(cmd));
1650 cmd.opcode = REQUEST_SENSE;
1651 cmd.length = sizeof(struct scsipi_sense_data);
1652
1653 error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1654 (void *)&xs->sense.scsi_sense, sizeof(struct scsipi_sense_data),
1655 0, 1000, NULL, flags);
1656 periph->periph_flags &= ~PERIPH_SENSE;
1657 periph->periph_xscheck = NULL;
1658 switch (error) {
1659 case 0:
1660 /* we have a valid sense */
1661 xs->error = XS_SENSE;
1662 return;
1663 case EINTR:
1664 /* REQUEST_SENSE interrupted by bus reset. */
1665 xs->error = XS_RESET;
1666 return;
1667 case EIO:
1668 /* request sense coudn't be performed */
1669 /*
1670 * XXX this isn't quite right but we don't have anything
1671 * better for now
1672 */
1673 xs->error = XS_DRIVER_STUFFUP;
1674 return;
1675 default:
1676 /* Notify that request sense failed. */
1677 xs->error = XS_DRIVER_STUFFUP;
1678 scsipi_printaddr(periph);
1679 printf("request sense failed with error %d\n", error);
1680 return;
1681 }
1682 }
1683
1684 /*
1685 * scsipi_enqueue:
1686 *
1687 * Enqueue an xfer on a channel.
1688 */
1689 static int
1690 scsipi_enqueue(struct scsipi_xfer *xs)
1691 {
1692 struct scsipi_channel *chan = xs->xs_periph->periph_channel;
1693 struct scsipi_xfer *qxs;
1694 int s;
1695
1696 s = splbio();
1697
1698 /*
1699 * If the xfer is to be polled, and there are already jobs on
1700 * the queue, we can't proceed.
1701 */
1702 if ((xs->xs_control & XS_CTL_POLL) != 0 &&
1703 TAILQ_FIRST(&chan->chan_queue) != NULL) {
1704 splx(s);
1705 xs->error = XS_DRIVER_STUFFUP;
1706 return (EAGAIN);
1707 }
1708
1709 /*
1710 * If we have an URGENT xfer, it's an error recovery command
1711 * and it should just go on the head of the channel's queue.
1712 */
1713 if (xs->xs_control & XS_CTL_URGENT) {
1714 TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
1715 goto out;
1716 }
1717
1718 /*
1719 * If this xfer has already been on the queue before, we
1720 * need to reinsert it in the correct order. That order is:
1721 *
1722 * Immediately before the first xfer for this periph
1723 * with a requeuecnt less than xs->xs_requeuecnt.
1724 *
1725 * Failing that, at the end of the queue. (We'll end up
1726 * there naturally.)
1727 */
1728 if (xs->xs_requeuecnt != 0) {
1729 for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
1730 qxs = TAILQ_NEXT(qxs, channel_q)) {
1731 if (qxs->xs_periph == xs->xs_periph &&
1732 qxs->xs_requeuecnt < xs->xs_requeuecnt)
1733 break;
1734 }
1735 if (qxs != NULL) {
1736 TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
1737 channel_q);
1738 goto out;
1739 }
1740 }
1741 TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
1742 out:
1743 if (xs->xs_control & XS_CTL_THAW_PERIPH)
1744 scsipi_periph_thaw(xs->xs_periph, 1);
1745 splx(s);
1746 return (0);
1747 }
1748
1749 /*
1750 * scsipi_run_queue:
1751 *
1752 * Start as many xfers as possible running on the channel.
1753 */
1754 static void
1755 scsipi_run_queue(struct scsipi_channel *chan)
1756 {
1757 struct scsipi_xfer *xs;
1758 struct scsipi_periph *periph;
1759 int s;
1760
1761 for (;;) {
1762 s = splbio();
1763
1764 /*
1765 * If the channel is frozen, we can't do any work right
1766 * now.
1767 */
1768 if (chan->chan_qfreeze != 0) {
1769 splx(s);
1770 return;
1771 }
1772
1773 /*
1774 * Look for work to do, and make sure we can do it.
1775 */
1776 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
1777 xs = TAILQ_NEXT(xs, channel_q)) {
1778 periph = xs->xs_periph;
1779
1780 if ((periph->periph_sent >= periph->periph_openings) ||
1781 periph->periph_qfreeze != 0 ||
1782 (periph->periph_flags & PERIPH_UNTAG) != 0)
1783 continue;
1784
1785 if ((periph->periph_flags &
1786 (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 &&
1787 (xs->xs_control & XS_CTL_URGENT) == 0)
1788 continue;
1789
1790 /*
1791 * We can issue this xfer!
1792 */
1793 goto got_one;
1794 }
1795
1796 /*
1797 * Can't find any work to do right now.
1798 */
1799 splx(s);
1800 return;
1801
1802 got_one:
1803 /*
1804 * Have an xfer to run. Allocate a resource from
1805 * the adapter to run it. If we can't allocate that
1806 * resource, we don't dequeue the xfer.
1807 */
1808 if (scsipi_get_resource(chan) == 0) {
1809 /*
1810 * Adapter is out of resources. If the adapter
1811 * supports it, attempt to grow them.
1812 */
1813 if (scsipi_grow_resources(chan) == 0) {
1814 /*
1815 * Wasn't able to grow resources,
1816 * nothing more we can do.
1817 */
1818 if (xs->xs_control & XS_CTL_POLL) {
1819 scsipi_printaddr(xs->xs_periph);
1820 printf("polling command but no "
1821 "adapter resources");
1822 /* We'll panic shortly... */
1823 }
1824 splx(s);
1825
1826 /*
1827 * XXX: We should be able to note that
1828 * XXX: that resources are needed here!
1829 */
1830 return;
1831 }
1832 /*
1833 * scsipi_grow_resources() allocated the resource
1834 * for us.
1835 */
1836 }
1837
1838 /*
1839 * We have a resource to run this xfer, do it!
1840 */
1841 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
1842
1843 /*
1844 * If the command is to be tagged, allocate a tag ID
1845 * for it.
1846 */
1847 if (XS_CTL_TAGTYPE(xs) != 0)
1848 scsipi_get_tag(xs);
1849 else
1850 periph->periph_flags |= PERIPH_UNTAG;
1851 periph->periph_sent++;
1852 splx(s);
1853
1854 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
1855 }
1856 #ifdef DIAGNOSTIC
1857 panic("scsipi_run_queue: impossible");
1858 #endif
1859 }
1860
1861 /*
1862 * scsipi_execute_xs:
1863 *
1864 * Begin execution of an xfer, waiting for it to complete, if necessary.
1865 */
1866 int
1867 scsipi_execute_xs(struct scsipi_xfer *xs)
1868 {
1869 struct scsipi_periph *periph = xs->xs_periph;
1870 struct scsipi_channel *chan = periph->periph_channel;
1871 int oasync, async, poll, error, s;
1872
1873 KASSERT(!cold);
1874
1875 (chan->chan_bustype->bustype_cmd)(xs);
1876
1877 if (xs->xs_control & XS_CTL_DATA_ONSTACK) {
1878 #if 1
1879 if (xs->xs_control & XS_CTL_ASYNC)
1880 panic("scsipi_execute_xs: on stack and async");
1881 #endif
1882 /*
1883 * If the I/O buffer is allocated on stack, the
1884 * process must NOT be swapped out, as the device will
1885 * be accessing the stack.
1886 */
1887 PHOLD(curlwp);
1888 }
1889
1890 xs->xs_status &= ~XS_STS_DONE;
1891 xs->error = XS_NOERROR;
1892 xs->resid = xs->datalen;
1893 xs->status = SCSI_OK;
1894
1895 #ifdef SCSIPI_DEBUG
1896 if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) {
1897 printf("scsipi_execute_xs: ");
1898 show_scsipi_xs(xs);
1899 printf("\n");
1900 }
1901 #endif
1902
1903 /*
1904 * Deal with command tagging:
1905 *
1906 * - If the device's current operating mode doesn't
1907 * include tagged queueing, clear the tag mask.
1908 *
1909 * - If the device's current operating mode *does*
1910 * include tagged queueing, set the tag_type in
1911 * the xfer to the appropriate byte for the tag
1912 * message.
1913 */
1914 if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 ||
1915 (xs->xs_control & XS_CTL_REQSENSE)) {
1916 xs->xs_control &= ~XS_CTL_TAGMASK;
1917 xs->xs_tag_type = 0;
1918 } else {
1919 /*
1920 * If the request doesn't specify a tag, give Head
1921 * tags to URGENT operations and Ordered tags to
1922 * everything else.
1923 */
1924 if (XS_CTL_TAGTYPE(xs) == 0) {
1925 if (xs->xs_control & XS_CTL_URGENT)
1926 xs->xs_control |= XS_CTL_HEAD_TAG;
1927 else
1928 xs->xs_control |= XS_CTL_ORDERED_TAG;
1929 }
1930
1931 switch (XS_CTL_TAGTYPE(xs)) {
1932 case XS_CTL_ORDERED_TAG:
1933 xs->xs_tag_type = MSG_ORDERED_Q_TAG;
1934 break;
1935
1936 case XS_CTL_SIMPLE_TAG:
1937 xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
1938 break;
1939
1940 case XS_CTL_HEAD_TAG:
1941 xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
1942 break;
1943
1944 default:
1945 scsipi_printaddr(periph);
1946 printf("invalid tag mask 0x%08x\n",
1947 XS_CTL_TAGTYPE(xs));
1948 panic("scsipi_execute_xs");
1949 }
1950 }
1951
1952 /* If the adaptor wants us to poll, poll. */
1953 if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY)
1954 xs->xs_control |= XS_CTL_POLL;
1955
1956 /*
1957 * If we don't yet have a completion thread, or we are to poll for
1958 * completion, clear the ASYNC flag.
1959 */
1960 oasync = (xs->xs_control & XS_CTL_ASYNC);
1961 if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
1962 xs->xs_control &= ~XS_CTL_ASYNC;
1963
1964 async = (xs->xs_control & XS_CTL_ASYNC);
1965 poll = (xs->xs_control & XS_CTL_POLL);
1966
1967 #ifdef DIAGNOSTIC
1968 if (oasync != 0 && xs->bp == NULL)
1969 panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
1970 #endif
1971
1972 /*
1973 * Enqueue the transfer. If we're not polling for completion, this
1974 * should ALWAYS return `no error'.
1975 */
1976 error = scsipi_enqueue(xs);
1977 if (error) {
1978 if (poll == 0) {
1979 scsipi_printaddr(periph);
1980 printf("not polling, but enqueue failed with %d\n",
1981 error);
1982 panic("scsipi_execute_xs");
1983 }
1984
1985 scsipi_printaddr(periph);
1986 printf("should have flushed queue?\n");
1987 goto free_xs;
1988 }
1989
1990 restarted:
1991 scsipi_run_queue(chan);
1992
1993 /*
1994 * The xfer is enqueued, and possibly running. If it's to be
1995 * completed asynchronously, just return now.
1996 */
1997 if (async)
1998 return (0);
1999
2000 /*
2001 * Not an asynchronous command; wait for it to complete.
2002 */
2003 s = splbio();
2004 while ((xs->xs_status & XS_STS_DONE) == 0) {
2005 if (poll) {
2006 scsipi_printaddr(periph);
2007 printf("polling command not done\n");
2008 panic("scsipi_execute_xs");
2009 }
2010 (void) tsleep(xs, PRIBIO, "xscmd", 0);
2011 }
2012 splx(s);
2013
2014 /*
2015 * Command is complete. scsipi_done() has awakened us to perform
2016 * the error handling.
2017 */
2018 error = scsipi_complete(xs);
2019 if (error == ERESTART)
2020 goto restarted;
2021
2022 /*
2023 * If it was meant to run async and we cleared aync ourselve,
2024 * don't return an error here. It has already been handled
2025 */
2026 if (oasync)
2027 error = 0;
2028 /*
2029 * Command completed successfully or fatal error occurred. Fall
2030 * into....
2031 */
2032 free_xs:
2033 s = splbio();
2034 scsipi_put_xs(xs);
2035 splx(s);
2036
2037 /*
2038 * Kick the queue, keep it running in case it stopped for some
2039 * reason.
2040 */
2041 scsipi_run_queue(chan);
2042
2043 if (xs->xs_control & XS_CTL_DATA_ONSTACK)
2044 PRELE(curlwp);
2045 return (error);
2046 }
2047
2048 /*
2049 * scsipi_completion_thread:
2050 *
2051 * This is the completion thread. We wait for errors on
2052 * asynchronous xfers, and perform the error handling
2053 * function, restarting the command, if necessary.
2054 */
2055 static void
2056 scsipi_completion_thread(void *arg)
2057 {
2058 struct scsipi_channel *chan = arg;
2059 struct scsipi_xfer *xs;
2060 int s;
2061
2062 if (chan->chan_init_cb)
2063 (*chan->chan_init_cb)(chan, chan->chan_init_cb_arg);
2064
2065 s = splbio();
2066 chan->chan_flags |= SCSIPI_CHAN_TACTIVE;
2067 splx(s);
2068 for (;;) {
2069 s = splbio();
2070 xs = TAILQ_FIRST(&chan->chan_complete);
2071 if (xs == NULL && chan->chan_tflags == 0) {
2072 /* nothing to do; wait */
2073 (void) tsleep(&chan->chan_complete, PRIBIO,
2074 "sccomp", 0);
2075 splx(s);
2076 continue;
2077 }
2078 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2079 /* call chan_callback from thread context */
2080 chan->chan_tflags &= ~SCSIPI_CHANT_CALLBACK;
2081 chan->chan_callback(chan, chan->chan_callback_arg);
2082 splx(s);
2083 continue;
2084 }
2085 if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) {
2086 /* attempt to get more openings for this channel */
2087 chan->chan_tflags &= ~SCSIPI_CHANT_GROWRES;
2088 scsipi_adapter_request(chan,
2089 ADAPTER_REQ_GROW_RESOURCES, NULL);
2090 scsipi_channel_thaw(chan, 1);
2091 splx(s);
2092 continue;
2093 }
2094 if (chan->chan_tflags & SCSIPI_CHANT_KICK) {
2095 /* explicitly run the queues for this channel */
2096 chan->chan_tflags &= ~SCSIPI_CHANT_KICK;
2097 scsipi_run_queue(chan);
2098 splx(s);
2099 continue;
2100 }
2101 if (chan->chan_tflags & SCSIPI_CHANT_SHUTDOWN) {
2102 splx(s);
2103 break;
2104 }
2105 if (xs) {
2106 TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
2107 splx(s);
2108
2109 /*
2110 * Have an xfer with an error; process it.
2111 */
2112 (void) scsipi_complete(xs);
2113
2114 /*
2115 * Kick the queue; keep it running if it was stopped
2116 * for some reason.
2117 */
2118 scsipi_run_queue(chan);
2119 } else {
2120 splx(s);
2121 }
2122 }
2123
2124 chan->chan_thread = NULL;
2125
2126 /* In case parent is waiting for us to exit. */
2127 wakeup(&chan->chan_thread);
2128
2129 kthread_exit(0);
2130 }
2131
2132 /*
2133 * scsipi_create_completion_thread:
2134 *
2135 * Callback to actually create the completion thread.
2136 */
2137 void
2138 scsipi_create_completion_thread(void *arg)
2139 {
2140 struct scsipi_channel *chan = arg;
2141 struct scsipi_adapter *adapt = chan->chan_adapter;
2142
2143 if (kthread_create1(scsipi_completion_thread, chan,
2144 &chan->chan_thread, "%s", chan->chan_name)) {
2145 printf("%s: unable to create completion thread for "
2146 "channel %d\n", adapt->adapt_dev->dv_xname,
2147 chan->chan_channel);
2148 panic("scsipi_create_completion_thread");
2149 }
2150 }
2151
2152 /*
2153 * scsipi_thread_call_callback:
2154 *
2155 * request to call a callback from the completion thread
2156 */
2157 int
2158 scsipi_thread_call_callback(struct scsipi_channel *chan,
2159 void (*callback)(struct scsipi_channel *, void *), void *arg)
2160 {
2161 int s;
2162
2163 s = splbio();
2164 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
2165 /* kernel thread doesn't exist yet */
2166 splx(s);
2167 return ESRCH;
2168 }
2169 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2170 splx(s);
2171 return EBUSY;
2172 }
2173 scsipi_channel_freeze(chan, 1);
2174 chan->chan_callback = callback;
2175 chan->chan_callback_arg = arg;
2176 chan->chan_tflags |= SCSIPI_CHANT_CALLBACK;
2177 wakeup(&chan->chan_complete);
2178 splx(s);
2179 return(0);
2180 }
2181
2182 /*
2183 * scsipi_async_event:
2184 *
2185 * Handle an asynchronous event from an adapter.
2186 */
2187 void
2188 scsipi_async_event(struct scsipi_channel *chan, scsipi_async_event_t event,
2189 void *arg)
2190 {
2191 int s;
2192
2193 s = splbio();
2194 switch (event) {
2195 case ASYNC_EVENT_MAX_OPENINGS:
2196 scsipi_async_event_max_openings(chan,
2197 (struct scsipi_max_openings *)arg);
2198 break;
2199
2200 case ASYNC_EVENT_XFER_MODE:
2201 scsipi_async_event_xfer_mode(chan,
2202 (struct scsipi_xfer_mode *)arg);
2203 break;
2204 case ASYNC_EVENT_RESET:
2205 scsipi_async_event_channel_reset(chan);
2206 break;
2207 }
2208 splx(s);
2209 }
2210
2211 /*
2212 * scsipi_print_xfer_mode:
2213 *
2214 * Print a periph's capabilities.
2215 */
2216 void
2217 scsipi_print_xfer_mode(struct scsipi_periph *periph)
2218 {
2219 int period, freq, speed, mbs;
2220
2221 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0)
2222 return;
2223
2224 aprint_normal("%s: ", periph->periph_dev->dv_xname);
2225 if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) {
2226 period = scsipi_sync_factor_to_period(periph->periph_period);
2227 aprint_normal("sync (%d.%02dns offset %d)",
2228 period / 100, period % 100, periph->periph_offset);
2229 } else
2230 aprint_normal("async");
2231
2232 if (periph->periph_mode & PERIPH_CAP_WIDE32)
2233 aprint_normal(", 32-bit");
2234 else if (periph->periph_mode & (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
2235 aprint_normal(", 16-bit");
2236 else
2237 aprint_normal(", 8-bit");
2238
2239 if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) {
2240 freq = scsipi_sync_factor_to_freq(periph->periph_period);
2241 speed = freq;
2242 if (periph->periph_mode & PERIPH_CAP_WIDE32)
2243 speed *= 4;
2244 else if (periph->periph_mode &
2245 (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
2246 speed *= 2;
2247 mbs = speed / 1000;
2248 if (mbs > 0)
2249 aprint_normal(" (%d.%03dMB/s)", mbs, speed % 1000);
2250 else
2251 aprint_normal(" (%dKB/s)", speed % 1000);
2252 }
2253
2254 aprint_normal(" transfers");
2255
2256 if (periph->periph_mode & PERIPH_CAP_TQING)
2257 aprint_normal(", tagged queueing");
2258
2259 aprint_normal("\n");
2260 }
2261
2262 /*
2263 * scsipi_async_event_max_openings:
2264 *
2265 * Update the maximum number of outstanding commands a
2266 * device may have.
2267 */
2268 static void
2269 scsipi_async_event_max_openings(struct scsipi_channel *chan,
2270 struct scsipi_max_openings *mo)
2271 {
2272 struct scsipi_periph *periph;
2273 int minlun, maxlun;
2274
2275 if (mo->mo_lun == -1) {
2276 /*
2277 * Wildcarded; apply it to all LUNs.
2278 */
2279 minlun = 0;
2280 maxlun = chan->chan_nluns - 1;
2281 } else
2282 minlun = maxlun = mo->mo_lun;
2283
2284 /* XXX This could really suck with a large LUN space. */
2285 for (; minlun <= maxlun; minlun++) {
2286 periph = scsipi_lookup_periph(chan, mo->mo_target, minlun);
2287 if (periph == NULL)
2288 continue;
2289
2290 if (mo->mo_openings < periph->periph_openings)
2291 periph->periph_openings = mo->mo_openings;
2292 else if (mo->mo_openings > periph->periph_openings &&
2293 (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
2294 periph->periph_openings = mo->mo_openings;
2295 }
2296 }
2297
2298 /*
2299 * scsipi_async_event_xfer_mode:
2300 *
2301 * Update the xfer mode for all periphs sharing the
2302 * specified I_T Nexus.
2303 */
2304 static void
2305 scsipi_async_event_xfer_mode(struct scsipi_channel *chan,
2306 struct scsipi_xfer_mode *xm)
2307 {
2308 struct scsipi_periph *periph;
2309 int lun, announce, mode, period, offset;
2310
2311 for (lun = 0; lun < chan->chan_nluns; lun++) {
2312 periph = scsipi_lookup_periph(chan, xm->xm_target, lun);
2313 if (periph == NULL)
2314 continue;
2315 announce = 0;
2316
2317 /*
2318 * Clamp the xfer mode down to this periph's capabilities.
2319 */
2320 mode = xm->xm_mode & periph->periph_cap;
2321 if (mode & PERIPH_CAP_SYNC) {
2322 period = xm->xm_period;
2323 offset = xm->xm_offset;
2324 } else {
2325 period = 0;
2326 offset = 0;
2327 }
2328
2329 /*
2330 * If we do not have a valid xfer mode yet, or the parameters
2331 * are different, announce them.
2332 */
2333 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0 ||
2334 periph->periph_mode != mode ||
2335 periph->periph_period != period ||
2336 periph->periph_offset != offset)
2337 announce = 1;
2338
2339 periph->periph_mode = mode;
2340 periph->periph_period = period;
2341 periph->periph_offset = offset;
2342 periph->periph_flags |= PERIPH_MODE_VALID;
2343
2344 if (announce)
2345 scsipi_print_xfer_mode(periph);
2346 }
2347 }
2348
2349 /*
2350 * scsipi_set_xfer_mode:
2351 *
2352 * Set the xfer mode for the specified I_T Nexus.
2353 */
2354 void
2355 scsipi_set_xfer_mode(struct scsipi_channel *chan, int target, int immed)
2356 {
2357 struct scsipi_xfer_mode xm;
2358 struct scsipi_periph *itperiph;
2359 int lun, s;
2360
2361 /*
2362 * Go to the minimal xfer mode.
2363 */
2364 xm.xm_target = target;
2365 xm.xm_mode = 0;
2366 xm.xm_period = 0; /* ignored */
2367 xm.xm_offset = 0; /* ignored */
2368
2369 /*
2370 * Find the first LUN we know about on this I_T Nexus.
2371 */
2372 for (itperiph = NULL, lun = 0; lun < chan->chan_nluns; lun++) {
2373 itperiph = scsipi_lookup_periph(chan, target, lun);
2374 if (itperiph != NULL)
2375 break;
2376 }
2377 if (itperiph != NULL) {
2378 xm.xm_mode = itperiph->periph_cap;
2379 /*
2380 * Now issue the request to the adapter.
2381 */
2382 s = splbio();
2383 scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm);
2384 splx(s);
2385 /*
2386 * If we want this to happen immediately, issue a dummy
2387 * command, since most adapters can't really negotiate unless
2388 * they're executing a job.
2389 */
2390 if (immed != 0) {
2391 (void) scsipi_test_unit_ready(itperiph,
2392 XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
2393 XS_CTL_IGNORE_NOT_READY |
2394 XS_CTL_IGNORE_MEDIA_CHANGE);
2395 }
2396 }
2397 }
2398
2399 /*
2400 * scsipi_channel_reset:
2401 *
2402 * handle scsi bus reset
2403 * called at splbio
2404 */
2405 static void
2406 scsipi_async_event_channel_reset(struct scsipi_channel *chan)
2407 {
2408 struct scsipi_xfer *xs, *xs_next;
2409 struct scsipi_periph *periph;
2410 int target, lun;
2411
2412 /*
2413 * Channel has been reset. Also mark as reset pending REQUEST_SENSE
2414 * commands; as the sense is not available any more.
2415 * can't call scsipi_done() from here, as the command has not been
2416 * sent to the adapter yet (this would corrupt accounting).
2417 */
2418
2419 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) {
2420 xs_next = TAILQ_NEXT(xs, channel_q);
2421 if (xs->xs_control & XS_CTL_REQSENSE) {
2422 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
2423 xs->error = XS_RESET;
2424 if ((xs->xs_control & XS_CTL_ASYNC) != 0)
2425 TAILQ_INSERT_TAIL(&chan->chan_complete, xs,
2426 channel_q);
2427 }
2428 }
2429 wakeup(&chan->chan_complete);
2430 /* Catch xs with pending sense which may not have a REQSENSE xs yet */
2431 for (target = 0; target < chan->chan_ntargets; target++) {
2432 if (target == chan->chan_id)
2433 continue;
2434 for (lun = 0; lun < chan->chan_nluns; lun++) {
2435 periph = scsipi_lookup_periph(chan, target, lun);
2436 if (periph) {
2437 xs = periph->periph_xscheck;
2438 if (xs)
2439 xs->error = XS_RESET;
2440 }
2441 }
2442 }
2443 }
2444
2445 /*
2446 * scsipi_target_detach:
2447 *
2448 * detach all periph associated with a I_T
2449 * must be called from valid thread context
2450 */
2451 int
2452 scsipi_target_detach(struct scsipi_channel *chan, int target, int lun,
2453 int flags)
2454 {
2455 struct scsipi_periph *periph;
2456 int ctarget, mintarget, maxtarget;
2457 int clun, minlun, maxlun;
2458 int error;
2459
2460 if (target == -1) {
2461 mintarget = 0;
2462 maxtarget = chan->chan_ntargets;
2463 } else {
2464 if (target == chan->chan_id)
2465 return EINVAL;
2466 if (target < 0 || target >= chan->chan_ntargets)
2467 return EINVAL;
2468 mintarget = target;
2469 maxtarget = target + 1;
2470 }
2471
2472 if (lun == -1) {
2473 minlun = 0;
2474 maxlun = chan->chan_nluns;
2475 } else {
2476 if (lun < 0 || lun >= chan->chan_nluns)
2477 return EINVAL;
2478 minlun = lun;
2479 maxlun = lun + 1;
2480 }
2481
2482 for (ctarget = mintarget; ctarget < maxtarget; ctarget++) {
2483 if (ctarget == chan->chan_id)
2484 continue;
2485
2486 for (clun = minlun; clun < maxlun; clun++) {
2487 periph = scsipi_lookup_periph(chan, ctarget, clun);
2488 if (periph == NULL)
2489 continue;
2490 error = config_detach(periph->periph_dev, flags);
2491 if (error)
2492 return (error);
2493 }
2494 }
2495 return(0);
2496 }
2497
2498 /*
2499 * scsipi_adapter_addref:
2500 *
2501 * Add a reference to the adapter pointed to by the provided
2502 * link, enabling the adapter if necessary.
2503 */
2504 int
2505 scsipi_adapter_addref(struct scsipi_adapter *adapt)
2506 {
2507 int s, error = 0;
2508
2509 s = splbio();
2510 if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) {
2511 error = (*adapt->adapt_enable)(adapt->adapt_dev, 1);
2512 if (error)
2513 adapt->adapt_refcnt--;
2514 }
2515 splx(s);
2516 return (error);
2517 }
2518
2519 /*
2520 * scsipi_adapter_delref:
2521 *
2522 * Delete a reference to the adapter pointed to by the provided
2523 * link, disabling the adapter if possible.
2524 */
2525 void
2526 scsipi_adapter_delref(struct scsipi_adapter *adapt)
2527 {
2528 int s;
2529
2530 s = splbio();
2531 if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL)
2532 (void) (*adapt->adapt_enable)(adapt->adapt_dev, 0);
2533 splx(s);
2534 }
2535
2536 static struct scsipi_syncparam {
2537 int ss_factor;
2538 int ss_period; /* ns * 100 */
2539 } scsipi_syncparams[] = {
2540 { 0x08, 625 }, /* FAST-160 (Ultra320) */
2541 { 0x09, 1250 }, /* FAST-80 (Ultra160) */
2542 { 0x0a, 2500 }, /* FAST-40 40MHz (Ultra2) */
2543 { 0x0b, 3030 }, /* FAST-40 33MHz (Ultra2) */
2544 { 0x0c, 5000 }, /* FAST-20 (Ultra) */
2545 };
2546 static const int scsipi_nsyncparams =
2547 sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
2548
2549 int
2550 scsipi_sync_period_to_factor(int period /* ns * 100 */)
2551 {
2552 int i;
2553
2554 for (i = 0; i < scsipi_nsyncparams; i++) {
2555 if (period <= scsipi_syncparams[i].ss_period)
2556 return (scsipi_syncparams[i].ss_factor);
2557 }
2558
2559 return ((period / 100) / 4);
2560 }
2561
2562 int
2563 scsipi_sync_factor_to_period(int factor)
2564 {
2565 int i;
2566
2567 for (i = 0; i < scsipi_nsyncparams; i++) {
2568 if (factor == scsipi_syncparams[i].ss_factor)
2569 return (scsipi_syncparams[i].ss_period);
2570 }
2571
2572 return ((factor * 4) * 100);
2573 }
2574
2575 int
2576 scsipi_sync_factor_to_freq(int factor)
2577 {
2578 int i;
2579
2580 for (i = 0; i < scsipi_nsyncparams; i++) {
2581 if (factor == scsipi_syncparams[i].ss_factor)
2582 return (100000000 / scsipi_syncparams[i].ss_period);
2583 }
2584
2585 return (10000000 / ((factor * 4) * 10));
2586 }
2587
2588 #ifdef SCSIPI_DEBUG
2589 /*
2590 * Given a scsipi_xfer, dump the request, in all it's glory
2591 */
2592 void
2593 show_scsipi_xs(struct scsipi_xfer *xs)
2594 {
2595
2596 printf("xs(%p): ", xs);
2597 printf("xs_control(0x%08x)", xs->xs_control);
2598 printf("xs_status(0x%08x)", xs->xs_status);
2599 printf("periph(%p)", xs->xs_periph);
2600 printf("retr(0x%x)", xs->xs_retries);
2601 printf("timo(0x%x)", xs->timeout);
2602 printf("cmd(%p)", xs->cmd);
2603 printf("len(0x%x)", xs->cmdlen);
2604 printf("data(%p)", xs->data);
2605 printf("len(0x%x)", xs->datalen);
2606 printf("res(0x%x)", xs->resid);
2607 printf("err(0x%x)", xs->error);
2608 printf("bp(%p)", xs->bp);
2609 show_scsipi_cmd(xs);
2610 }
2611
2612 void
2613 show_scsipi_cmd(struct scsipi_xfer *xs)
2614 {
2615 u_char *b = (u_char *) xs->cmd;
2616 int i = 0;
2617
2618 scsipi_printaddr(xs->xs_periph);
2619 printf(" command: ");
2620
2621 if ((xs->xs_control & XS_CTL_RESET) == 0) {
2622 while (i < xs->cmdlen) {
2623 if (i)
2624 printf(",");
2625 printf("0x%x", b[i++]);
2626 }
2627 printf("-[%d bytes]\n", xs->datalen);
2628 if (xs->datalen)
2629 show_mem(xs->data, min(64, xs->datalen));
2630 } else
2631 printf("-RESET-\n");
2632 }
2633
2634 void
2635 show_mem(u_char *address, int num)
2636 {
2637 int x;
2638
2639 printf("------------------------------");
2640 for (x = 0; x < num; x++) {
2641 if ((x % 16) == 0)
2642 printf("\n%03d: ", x);
2643 printf("%02x ", *address++);
2644 }
2645 printf("\n------------------------------\n");
2646 }
2647 #endif /* SCSIPI_DEBUG */
2648