scsipi_base.c revision 1.173 1 /* $NetBSD: scsipi_base.c,v 1.173 2016/12/18 15:27:34 skrll Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999, 2000, 2002, 2003, 2004 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: scsipi_base.c,v 1.173 2016/12/18 15:27:34 skrll Exp $");
35
36 #ifdef _KERNEL_OPT
37 #include "opt_scsi.h"
38 #endif
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/buf.h>
44 #include <sys/uio.h>
45 #include <sys/malloc.h>
46 #include <sys/pool.h>
47 #include <sys/errno.h>
48 #include <sys/device.h>
49 #include <sys/proc.h>
50 #include <sys/kthread.h>
51 #include <sys/hash.h>
52 #include <sys/atomic.h>
53
54 #include <dev/scsipi/scsi_spc.h>
55 #include <dev/scsipi/scsipi_all.h>
56 #include <dev/scsipi/scsipi_disk.h>
57 #include <dev/scsipi/scsipiconf.h>
58 #include <dev/scsipi/scsipi_base.h>
59
60 #include <dev/scsipi/scsi_all.h>
61 #include <dev/scsipi/scsi_message.h>
62
63 #include <machine/param.h>
64
65 static int scsipi_complete(struct scsipi_xfer *);
66 static void scsipi_request_sense(struct scsipi_xfer *);
67 static int scsipi_enqueue(struct scsipi_xfer *);
68 static void scsipi_run_queue(struct scsipi_channel *chan);
69
70 static void scsipi_completion_thread(void *);
71
72 static void scsipi_get_tag(struct scsipi_xfer *);
73 static void scsipi_put_tag(struct scsipi_xfer *);
74
75 static int scsipi_get_resource(struct scsipi_channel *);
76 static void scsipi_put_resource(struct scsipi_channel *);
77
78 static void scsipi_async_event_max_openings(struct scsipi_channel *,
79 struct scsipi_max_openings *);
80 static void scsipi_async_event_channel_reset(struct scsipi_channel *);
81
82 static void scsipi_channel_freeze_locked(struct scsipi_channel *, int);
83
84 static void scsipi_adapter_lock(struct scsipi_adapter *adapt);
85 static void scsipi_adapter_unlock(struct scsipi_adapter *adapt);
86
87 static struct pool scsipi_xfer_pool;
88
89 int scsipi_xs_count = 0;
90
91 /*
92 * scsipi_init:
93 *
94 * Called when a scsibus or atapibus is attached to the system
95 * to initialize shared data structures.
96 */
97 void
98 scsipi_init(void)
99 {
100 static int scsipi_init_done;
101
102 if (scsipi_init_done)
103 return;
104 scsipi_init_done = 1;
105
106 /* Initialize the scsipi_xfer pool. */
107 pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
108 0, 0, "scxspl", NULL, IPL_BIO);
109 if (pool_prime(&scsipi_xfer_pool,
110 PAGE_SIZE / sizeof(struct scsipi_xfer)) == ENOMEM) {
111 printf("WARNING: not enough memory for scsipi_xfer_pool\n");
112 }
113
114 scsipi_ioctl_init();
115 }
116
117 /*
118 * scsipi_channel_init:
119 *
120 * Initialize a scsipi_channel when it is attached.
121 */
122 int
123 scsipi_channel_init(struct scsipi_channel *chan)
124 {
125 struct scsipi_adapter *adapt = chan->chan_adapter;
126 int i;
127
128 /* Initialize shared data. */
129 scsipi_init();
130
131 /* Initialize the queues. */
132 TAILQ_INIT(&chan->chan_queue);
133 TAILQ_INIT(&chan->chan_complete);
134
135 for (i = 0; i < SCSIPI_CHAN_PERIPH_BUCKETS; i++)
136 LIST_INIT(&chan->chan_periphtab[i]);
137
138 /*
139 * Create the asynchronous completion thread.
140 */
141 if (kthread_create(PRI_NONE, 0, NULL, scsipi_completion_thread, chan,
142 &chan->chan_thread, "%s", chan->chan_name)) {
143 aprint_error_dev(adapt->adapt_dev, "unable to create completion thread for "
144 "channel %d\n", chan->chan_channel);
145 panic("scsipi_channel_init");
146 }
147
148 return (0);
149 }
150
151 /*
152 * scsipi_channel_shutdown:
153 *
154 * Shutdown a scsipi_channel.
155 */
156 void
157 scsipi_channel_shutdown(struct scsipi_channel *chan)
158 {
159
160 mutex_enter(chan_mtx(chan));
161 /*
162 * Shut down the completion thread.
163 */
164 chan->chan_tflags |= SCSIPI_CHANT_SHUTDOWN;
165 cv_broadcast(chan_cv_complete(chan));
166
167 /*
168 * Now wait for the thread to exit.
169 */
170 while (chan->chan_thread != NULL)
171 cv_wait(chan_cv_thread(chan), chan_mtx(chan));
172 mutex_exit(chan_mtx(chan));
173 }
174
175 static uint32_t
176 scsipi_chan_periph_hash(uint64_t t, uint64_t l)
177 {
178 uint32_t hash;
179
180 hash = hash32_buf(&t, sizeof(t), HASH32_BUF_INIT);
181 hash = hash32_buf(&l, sizeof(l), hash);
182
183 return (hash & SCSIPI_CHAN_PERIPH_HASHMASK);
184 }
185
186 /*
187 * scsipi_insert_periph:
188 *
189 * Insert a periph into the channel.
190 */
191 void
192 scsipi_insert_periph(struct scsipi_channel *chan, struct scsipi_periph *periph)
193 {
194 uint32_t hash;
195
196 hash = scsipi_chan_periph_hash(periph->periph_target,
197 periph->periph_lun);
198
199 mutex_enter(chan_mtx(chan));
200 LIST_INSERT_HEAD(&chan->chan_periphtab[hash], periph, periph_hash);
201 mutex_exit(chan_mtx(chan));
202 }
203
204 /*
205 * scsipi_remove_periph:
206 *
207 * Remove a periph from the channel.
208 */
209 void
210 scsipi_remove_periph(struct scsipi_channel *chan,
211 struct scsipi_periph *periph)
212 {
213
214 LIST_REMOVE(periph, periph_hash);
215 }
216
217 /*
218 * scsipi_lookup_periph:
219 *
220 * Lookup a periph on the specified channel.
221 */
222 static struct scsipi_periph *
223 scsipi_lookup_periph_internal(struct scsipi_channel *chan, int target, int lun, bool lock)
224 {
225 struct scsipi_periph *periph;
226 uint32_t hash;
227
228 if (target >= chan->chan_ntargets ||
229 lun >= chan->chan_nluns)
230 return (NULL);
231
232 hash = scsipi_chan_periph_hash(target, lun);
233
234 if (lock)
235 mutex_enter(chan_mtx(chan));
236 LIST_FOREACH(periph, &chan->chan_periphtab[hash], periph_hash) {
237 if (periph->periph_target == target &&
238 periph->periph_lun == lun)
239 break;
240 }
241 if (lock)
242 mutex_exit(chan_mtx(chan));
243
244 return (periph);
245 }
246
247 struct scsipi_periph *
248 scsipi_lookup_periph_locked(struct scsipi_channel *chan, int target, int lun)
249 {
250 return scsipi_lookup_periph_internal(chan, target, lun, false);
251 }
252
253 struct scsipi_periph *
254 scsipi_lookup_periph(struct scsipi_channel *chan, int target, int lun)
255 {
256 return scsipi_lookup_periph_internal(chan, target, lun, true);
257 }
258
259 /*
260 * scsipi_get_resource:
261 *
262 * Allocate a single xfer `resource' from the channel.
263 *
264 * NOTE: Must be called with channel lock held
265 */
266 static int
267 scsipi_get_resource(struct scsipi_channel *chan)
268 {
269 struct scsipi_adapter *adapt = chan->chan_adapter;
270
271 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
272 if (chan->chan_openings > 0) {
273 chan->chan_openings--;
274 return (1);
275 }
276 return (0);
277 }
278
279 if (adapt->adapt_openings > 0) {
280 adapt->adapt_openings--;
281 return (1);
282 }
283 return (0);
284 }
285
286 /*
287 * scsipi_grow_resources:
288 *
289 * Attempt to grow resources for a channel. If this succeeds,
290 * we allocate one for our caller.
291 *
292 * NOTE: Must be called with channel lock held
293 */
294 static inline int
295 scsipi_grow_resources(struct scsipi_channel *chan)
296 {
297
298 if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
299 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
300 mutex_exit(chan_mtx(chan));
301 scsipi_adapter_request(chan,
302 ADAPTER_REQ_GROW_RESOURCES, NULL);
303 mutex_enter(chan_mtx(chan));
304 return (scsipi_get_resource(chan));
305 }
306 /*
307 * ask the channel thread to do it. It'll have to thaw the
308 * queue
309 */
310 scsipi_channel_freeze_locked(chan, 1);
311 chan->chan_tflags |= SCSIPI_CHANT_GROWRES;
312 cv_broadcast(chan_cv_complete(chan));
313 return (0);
314 }
315
316 return (0);
317 }
318
319 /*
320 * scsipi_put_resource:
321 *
322 * Free a single xfer `resource' to the channel.
323 *
324 * NOTE: Must be called with channel lock held
325 */
326 static void
327 scsipi_put_resource(struct scsipi_channel *chan)
328 {
329 struct scsipi_adapter *adapt = chan->chan_adapter;
330
331 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
332 chan->chan_openings++;
333 else
334 adapt->adapt_openings++;
335 }
336
337 /*
338 * scsipi_get_tag:
339 *
340 * Get a tag ID for the specified xfer.
341 *
342 * NOTE: Must be called with channel lock held
343 */
344 static void
345 scsipi_get_tag(struct scsipi_xfer *xs)
346 {
347 struct scsipi_periph *periph = xs->xs_periph;
348 int bit, tag;
349 u_int word;
350
351 bit = 0; /* XXX gcc */
352 for (word = 0; word < PERIPH_NTAGWORDS; word++) {
353 bit = ffs(periph->periph_freetags[word]);
354 if (bit != 0)
355 break;
356 }
357 #ifdef DIAGNOSTIC
358 if (word == PERIPH_NTAGWORDS) {
359 scsipi_printaddr(periph);
360 printf("no free tags\n");
361 panic("scsipi_get_tag");
362 }
363 #endif
364
365 bit -= 1;
366 periph->periph_freetags[word] &= ~(1 << bit);
367 tag = (word << 5) | bit;
368
369 /* XXX Should eventually disallow this completely. */
370 if (tag >= periph->periph_openings) {
371 scsipi_printaddr(periph);
372 printf("WARNING: tag %d greater than available openings %d\n",
373 tag, periph->periph_openings);
374 }
375
376 xs->xs_tag_id = tag;
377 }
378
379 /*
380 * scsipi_put_tag:
381 *
382 * Put the tag ID for the specified xfer back into the pool.
383 *
384 * NOTE: Must be called with channel lock held
385 */
386 static void
387 scsipi_put_tag(struct scsipi_xfer *xs)
388 {
389 struct scsipi_periph *periph = xs->xs_periph;
390 int word, bit;
391
392 word = xs->xs_tag_id >> 5;
393 bit = xs->xs_tag_id & 0x1f;
394
395 periph->periph_freetags[word] |= (1 << bit);
396 }
397
398 /*
399 * scsipi_get_xs:
400 *
401 * Allocate an xfer descriptor and associate it with the
402 * specified peripheral. If the peripheral has no more
403 * available command openings, we either block waiting for
404 * one to become available, or fail.
405 *
406 * When this routine is called with the channel lock held
407 * the flags must include XS_CTL_NOSLEEP.
408 */
409 struct scsipi_xfer *
410 scsipi_get_xs(struct scsipi_periph *periph, int flags)
411 {
412 struct scsipi_xfer *xs;
413 bool lock = (flags & XS_CTL_NOSLEEP) == 0;
414
415 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n"));
416
417 KASSERT(!cold);
418
419 #ifdef DIAGNOSTIC
420 /*
421 * URGENT commands can never be ASYNC.
422 */
423 if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
424 (XS_CTL_URGENT|XS_CTL_ASYNC)) {
425 scsipi_printaddr(periph);
426 printf("URGENT and ASYNC\n");
427 panic("scsipi_get_xs");
428 }
429 #endif
430
431 /*
432 * Wait for a command opening to become available. Rules:
433 *
434 * - All xfers must wait for an available opening.
435 * Exception: URGENT xfers can proceed when
436 * active == openings, because we use the opening
437 * of the command we're recovering for.
438 * - if the periph has sense pending, only URGENT & REQSENSE
439 * xfers may proceed.
440 *
441 * - If the periph is recovering, only URGENT xfers may
442 * proceed.
443 *
444 * - If the periph is currently executing a recovery
445 * command, URGENT commands must block, because only
446 * one recovery command can execute at a time.
447 */
448 if (lock)
449 mutex_enter(chan_mtx(periph->periph_channel));
450 for (;;) {
451 if (flags & XS_CTL_URGENT) {
452 if (periph->periph_active > periph->periph_openings)
453 goto wait_for_opening;
454 if (periph->periph_flags & PERIPH_SENSE) {
455 if ((flags & XS_CTL_REQSENSE) == 0)
456 goto wait_for_opening;
457 } else {
458 if ((periph->periph_flags &
459 PERIPH_RECOVERY_ACTIVE) != 0)
460 goto wait_for_opening;
461 periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
462 }
463 break;
464 }
465 if (periph->periph_active >= periph->periph_openings ||
466 (periph->periph_flags & PERIPH_RECOVERING) != 0)
467 goto wait_for_opening;
468 periph->periph_active++;
469 break;
470
471 wait_for_opening:
472 if (flags & XS_CTL_NOSLEEP) {
473 KASSERT(!lock);
474 return (NULL);
475 }
476 KASSERT(lock);
477 SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n"));
478 periph->periph_flags |= PERIPH_WAITING;
479 cv_wait(periph_cv_periph(periph),
480 chan_mtx(periph->periph_channel));
481 }
482 if (lock)
483 mutex_exit(chan_mtx(periph->periph_channel));
484
485 SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n"));
486 xs = pool_get(&scsipi_xfer_pool,
487 ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
488 if (xs == NULL) {
489 if (lock)
490 mutex_enter(chan_mtx(periph->periph_channel));
491 if (flags & XS_CTL_URGENT) {
492 if ((flags & XS_CTL_REQSENSE) == 0)
493 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
494 } else
495 periph->periph_active--;
496 if (lock)
497 mutex_exit(chan_mtx(periph->periph_channel));
498 scsipi_printaddr(periph);
499 printf("unable to allocate %sscsipi_xfer\n",
500 (flags & XS_CTL_URGENT) ? "URGENT " : "");
501 }
502
503 SC_DEBUG(periph, SCSIPI_DB3, ("returning\n"));
504
505 if (xs != NULL) {
506 memset(xs, 0, sizeof(*xs));
507 callout_init(&xs->xs_callout, 0);
508 xs->xs_periph = periph;
509 xs->xs_control = flags;
510 xs->xs_status = 0;
511 if ((flags & XS_CTL_NOSLEEP) == 0)
512 mutex_enter(chan_mtx(periph->periph_channel));
513 TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
514 if ((flags & XS_CTL_NOSLEEP) == 0)
515 mutex_exit(chan_mtx(periph->periph_channel));
516 }
517 return (xs);
518 }
519
520 /*
521 * scsipi_put_xs:
522 *
523 * Release an xfer descriptor, decreasing the outstanding command
524 * count for the peripheral. If there is a thread waiting for
525 * an opening, wake it up. If not, kick any queued I/O the
526 * peripheral may have.
527 *
528 * NOTE: Must be called with channel lock held
529 */
530 void
531 scsipi_put_xs(struct scsipi_xfer *xs)
532 {
533 struct scsipi_periph *periph = xs->xs_periph;
534 int flags = xs->xs_control;
535
536 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n"));
537
538 TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
539 callout_destroy(&xs->xs_callout);
540 pool_put(&scsipi_xfer_pool, xs);
541
542 #ifdef DIAGNOSTIC
543 if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
544 periph->periph_active == 0) {
545 scsipi_printaddr(periph);
546 printf("recovery without a command to recovery for\n");
547 panic("scsipi_put_xs");
548 }
549 #endif
550
551 if (flags & XS_CTL_URGENT) {
552 if ((flags & XS_CTL_REQSENSE) == 0)
553 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
554 } else
555 periph->periph_active--;
556 if (periph->periph_active == 0 &&
557 (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
558 periph->periph_flags &= ~PERIPH_WAITDRAIN;
559 cv_broadcast(periph_cv_active(periph));
560 }
561
562 if (periph->periph_flags & PERIPH_WAITING) {
563 periph->periph_flags &= ~PERIPH_WAITING;
564 cv_broadcast(periph_cv_periph(periph));
565 } else {
566 if (periph->periph_switch->psw_start != NULL &&
567 device_is_active(periph->periph_dev)) {
568 SC_DEBUG(periph, SCSIPI_DB2,
569 ("calling private start()\n"));
570 (*periph->periph_switch->psw_start)(periph);
571 }
572 }
573 }
574
575 /*
576 * scsipi_channel_freeze:
577 *
578 * Freeze a channel's xfer queue.
579 */
580 void
581 scsipi_channel_freeze(struct scsipi_channel *chan, int count)
582 {
583 bool lock = chan_running(chan) > 0;
584
585 if (lock)
586 mutex_enter(chan_mtx(chan));
587 chan->chan_qfreeze += count;
588 if (lock)
589 mutex_exit(chan_mtx(chan));
590 }
591
592 static void
593 scsipi_channel_freeze_locked(struct scsipi_channel *chan, int count)
594 {
595
596 chan->chan_qfreeze += count;
597 }
598
599 /*
600 * scsipi_channel_thaw:
601 *
602 * Thaw a channel's xfer queue.
603 */
604 void
605 scsipi_channel_thaw(struct scsipi_channel *chan, int count)
606 {
607 bool lock = chan_running(chan) > 0;
608
609 if (lock)
610 mutex_enter(chan_mtx(chan));
611 chan->chan_qfreeze -= count;
612 /*
613 * Don't let the freeze count go negative.
614 *
615 * Presumably the adapter driver could keep track of this,
616 * but it might just be easier to do this here so as to allow
617 * multiple callers, including those outside the adapter driver.
618 */
619 if (chan->chan_qfreeze < 0) {
620 chan->chan_qfreeze = 0;
621 }
622 if (lock)
623 mutex_exit(chan_mtx(chan));
624
625 /*
626 * until the channel is running
627 */
628 if (!lock)
629 return;
630
631 /*
632 * Kick the channel's queue here. Note, we may be running in
633 * interrupt context (softclock or HBA's interrupt), so the adapter
634 * driver had better not sleep.
635 */
636 if (chan->chan_qfreeze == 0)
637 scsipi_run_queue(chan);
638 }
639
640 /*
641 * scsipi_channel_timed_thaw:
642 *
643 * Thaw a channel after some time has expired. This will also
644 * run the channel's queue if the freeze count has reached 0.
645 */
646 void
647 scsipi_channel_timed_thaw(void *arg)
648 {
649 struct scsipi_channel *chan = arg;
650
651 scsipi_channel_thaw(chan, 1);
652 }
653
654 /*
655 * scsipi_periph_freeze:
656 *
657 * Freeze a device's xfer queue.
658 */
659 void
660 scsipi_periph_freeze_locked(struct scsipi_periph *periph, int count)
661 {
662
663 periph->periph_qfreeze += count;
664 }
665
666 /*
667 * scsipi_periph_thaw:
668 *
669 * Thaw a device's xfer queue.
670 */
671 void
672 scsipi_periph_thaw_locked(struct scsipi_periph *periph, int count)
673 {
674
675 periph->periph_qfreeze -= count;
676 #ifdef DIAGNOSTIC
677 if (periph->periph_qfreeze < 0) {
678 static const char pc[] = "periph freeze count < 0";
679 scsipi_printaddr(periph);
680 printf("%s\n", pc);
681 panic(pc);
682 }
683 #endif
684 if (periph->periph_qfreeze == 0 &&
685 (periph->periph_flags & PERIPH_WAITING) != 0)
686 cv_broadcast(periph_cv_periph(periph));
687 }
688
689 void
690 scsipi_periph_freeze(struct scsipi_periph *periph, int count)
691 {
692
693 mutex_enter(chan_mtx(periph->periph_channel));
694 scsipi_periph_freeze_locked(periph, count);
695 mutex_exit(chan_mtx(periph->periph_channel));
696 }
697
698 void
699 scsipi_periph_thaw(struct scsipi_periph *periph, int count)
700 {
701
702 mutex_enter(chan_mtx(periph->periph_channel));
703 scsipi_periph_thaw_locked(periph, count);
704 mutex_exit(chan_mtx(periph->periph_channel));
705 }
706
707 /*
708 * scsipi_periph_timed_thaw:
709 *
710 * Thaw a device after some time has expired.
711 */
712 void
713 scsipi_periph_timed_thaw(void *arg)
714 {
715 struct scsipi_periph *periph = arg;
716 struct scsipi_channel *chan = periph->periph_channel;
717
718 callout_stop(&periph->periph_callout);
719
720 mutex_enter(chan_mtx(chan));
721 scsipi_periph_thaw_locked(periph, 1);
722 if ((periph->periph_channel->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
723 /*
724 * Kick the channel's queue here. Note, we're running in
725 * interrupt context (softclock), so the adapter driver
726 * had better not sleep.
727 */
728 mutex_exit(chan_mtx(chan));
729 scsipi_run_queue(periph->periph_channel);
730 } else {
731 /*
732 * Tell the completion thread to kick the channel's queue here.
733 */
734 periph->periph_channel->chan_tflags |= SCSIPI_CHANT_KICK;
735 cv_broadcast(chan_cv_complete(chan));
736 mutex_exit(chan_mtx(chan));
737 }
738 }
739
740 /*
741 * scsipi_wait_drain:
742 *
743 * Wait for a periph's pending xfers to drain.
744 */
745 void
746 scsipi_wait_drain(struct scsipi_periph *periph)
747 {
748 struct scsipi_channel *chan = periph->periph_channel;
749
750 mutex_enter(chan_mtx(chan));
751 while (periph->periph_active != 0) {
752 periph->periph_flags |= PERIPH_WAITDRAIN;
753 cv_wait(periph_cv_active(periph), chan_mtx(chan));
754 }
755 mutex_exit(chan_mtx(chan));
756 }
757
758 /*
759 * scsipi_kill_pending:
760 *
761 * Kill off all pending xfers for a periph.
762 *
763 * NOTE: Must be called with channel lock held
764 */
765 void
766 scsipi_kill_pending(struct scsipi_periph *periph)
767 {
768 struct scsipi_channel *chan = periph->periph_channel;
769
770 (*chan->chan_bustype->bustype_kill_pending)(periph);
771 while (periph->periph_active != 0) {
772 periph->periph_flags |= PERIPH_WAITDRAIN;
773 cv_wait(periph_cv_active(periph), chan_mtx(chan));
774 }
775 }
776
777 /*
778 * scsipi_print_cdb:
779 * prints a command descriptor block (for debug purpose, error messages,
780 * SCSIVERBOSE, ...)
781 */
782 void
783 scsipi_print_cdb(struct scsipi_generic *cmd)
784 {
785 int i, j;
786
787 printf("0x%02x", cmd->opcode);
788
789 switch (CDB_GROUPID(cmd->opcode)) {
790 case CDB_GROUPID_0:
791 j = CDB_GROUP0;
792 break;
793 case CDB_GROUPID_1:
794 j = CDB_GROUP1;
795 break;
796 case CDB_GROUPID_2:
797 j = CDB_GROUP2;
798 break;
799 case CDB_GROUPID_3:
800 j = CDB_GROUP3;
801 break;
802 case CDB_GROUPID_4:
803 j = CDB_GROUP4;
804 break;
805 case CDB_GROUPID_5:
806 j = CDB_GROUP5;
807 break;
808 case CDB_GROUPID_6:
809 j = CDB_GROUP6;
810 break;
811 case CDB_GROUPID_7:
812 j = CDB_GROUP7;
813 break;
814 default:
815 j = 0;
816 }
817 if (j == 0)
818 j = sizeof (cmd->bytes);
819 for (i = 0; i < j-1; i++) /* already done the opcode */
820 printf(" %02x", cmd->bytes[i]);
821 }
822
823 /*
824 * scsipi_interpret_sense:
825 *
826 * Look at the returned sense and act on the error, determining
827 * the unix error number to pass back. (0 = report no error)
828 *
829 * NOTE: If we return ERESTART, we are expected to haved
830 * thawed the device!
831 *
832 * THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
833 */
834 int
835 scsipi_interpret_sense(struct scsipi_xfer *xs)
836 {
837 struct scsi_sense_data *sense;
838 struct scsipi_periph *periph = xs->xs_periph;
839 u_int8_t key;
840 int error;
841 u_int32_t info;
842 static const char *error_mes[] = {
843 "soft error (corrected)",
844 "not ready", "medium error",
845 "non-media hardware failure", "illegal request",
846 "unit attention", "readonly device",
847 "no data found", "vendor unique",
848 "copy aborted", "command aborted",
849 "search returned equal", "volume overflow",
850 "verify miscompare", "unknown error key"
851 };
852
853 sense = &xs->sense.scsi_sense;
854 #ifdef SCSIPI_DEBUG
855 if (periph->periph_flags & SCSIPI_DB1) {
856 int count;
857 scsipi_printaddr(periph);
858 printf(" sense debug information:\n");
859 printf("\tcode 0x%x valid %d\n",
860 SSD_RCODE(sense->response_code),
861 sense->response_code & SSD_RCODE_VALID ? 1 : 0);
862 printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
863 sense->segment,
864 SSD_SENSE_KEY(sense->flags),
865 sense->flags & SSD_ILI ? 1 : 0,
866 sense->flags & SSD_EOM ? 1 : 0,
867 sense->flags & SSD_FILEMARK ? 1 : 0);
868 printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d "
869 "extra bytes\n",
870 sense->info[0],
871 sense->info[1],
872 sense->info[2],
873 sense->info[3],
874 sense->extra_len);
875 printf("\textra: ");
876 for (count = 0; count < SSD_ADD_BYTES_LIM(sense); count++)
877 printf("0x%x ", sense->csi[count]);
878 printf("\n");
879 }
880 #endif
881
882 /*
883 * If the periph has its own error handler, call it first.
884 * If it returns a legit error value, return that, otherwise
885 * it wants us to continue with normal error processing.
886 */
887 if (periph->periph_switch->psw_error != NULL) {
888 SC_DEBUG(periph, SCSIPI_DB2,
889 ("calling private err_handler()\n"));
890 error = (*periph->periph_switch->psw_error)(xs);
891 if (error != EJUSTRETURN)
892 return (error);
893 }
894 /* otherwise use the default */
895 switch (SSD_RCODE(sense->response_code)) {
896
897 /*
898 * Old SCSI-1 and SASI devices respond with
899 * codes other than 70.
900 */
901 case 0x00: /* no error (command completed OK) */
902 return (0);
903 case 0x04: /* drive not ready after it was selected */
904 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
905 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
906 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
907 return (0);
908 /* XXX - display some sort of error here? */
909 return (EIO);
910 case 0x20: /* invalid command */
911 if ((xs->xs_control &
912 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
913 return (0);
914 return (EINVAL);
915 case 0x25: /* invalid LUN (Adaptec ACB-4000) */
916 return (EACCES);
917
918 /*
919 * If it's code 70, use the extended stuff and
920 * interpret the key
921 */
922 case 0x71: /* delayed error */
923 scsipi_printaddr(periph);
924 key = SSD_SENSE_KEY(sense->flags);
925 printf(" DEFERRED ERROR, key = 0x%x\n", key);
926 /* FALLTHROUGH */
927 case 0x70:
928 if ((sense->response_code & SSD_RCODE_VALID) != 0)
929 info = _4btol(sense->info);
930 else
931 info = 0;
932 key = SSD_SENSE_KEY(sense->flags);
933
934 switch (key) {
935 case SKEY_NO_SENSE:
936 case SKEY_RECOVERED_ERROR:
937 if (xs->resid == xs->datalen && xs->datalen) {
938 /*
939 * Why is this here?
940 */
941 xs->resid = 0; /* not short read */
942 }
943 case SKEY_EQUAL:
944 error = 0;
945 break;
946 case SKEY_NOT_READY:
947 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
948 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
949 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
950 return (0);
951 if (sense->asc == 0x3A) {
952 error = ENODEV; /* Medium not present */
953 if (xs->xs_control & XS_CTL_SILENT_NODEV)
954 return (error);
955 } else
956 error = EIO;
957 if ((xs->xs_control & XS_CTL_SILENT) != 0)
958 return (error);
959 break;
960 case SKEY_ILLEGAL_REQUEST:
961 if ((xs->xs_control &
962 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
963 return (0);
964 /*
965 * Handle the case where a device reports
966 * Logical Unit Not Supported during discovery.
967 */
968 if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
969 sense->asc == 0x25 &&
970 sense->ascq == 0x00)
971 return (EINVAL);
972 if ((xs->xs_control & XS_CTL_SILENT) != 0)
973 return (EIO);
974 error = EINVAL;
975 break;
976 case SKEY_UNIT_ATTENTION:
977 if (sense->asc == 0x29 &&
978 sense->ascq == 0x00) {
979 /* device or bus reset */
980 return (ERESTART);
981 }
982 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
983 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
984 if ((xs->xs_control &
985 XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
986 /* XXX Should reupload any transient state. */
987 (periph->periph_flags &
988 PERIPH_REMOVABLE) == 0) {
989 return (ERESTART);
990 }
991 if ((xs->xs_control & XS_CTL_SILENT) != 0)
992 return (EIO);
993 error = EIO;
994 break;
995 case SKEY_DATA_PROTECT:
996 error = EROFS;
997 break;
998 case SKEY_BLANK_CHECK:
999 error = 0;
1000 break;
1001 case SKEY_ABORTED_COMMAND:
1002 if (xs->xs_retries != 0) {
1003 xs->xs_retries--;
1004 error = ERESTART;
1005 } else
1006 error = EIO;
1007 break;
1008 case SKEY_VOLUME_OVERFLOW:
1009 error = ENOSPC;
1010 break;
1011 default:
1012 error = EIO;
1013 break;
1014 }
1015
1016 /* Print verbose decode if appropriate and possible */
1017 if ((key == 0) ||
1018 ((xs->xs_control & XS_CTL_SILENT) != 0) ||
1019 (scsipi_print_sense(xs, 0) != 0))
1020 return (error);
1021
1022 /* Print brief(er) sense information */
1023 scsipi_printaddr(periph);
1024 printf("%s", error_mes[key - 1]);
1025 if ((sense->response_code & SSD_RCODE_VALID) != 0) {
1026 switch (key) {
1027 case SKEY_NOT_READY:
1028 case SKEY_ILLEGAL_REQUEST:
1029 case SKEY_UNIT_ATTENTION:
1030 case SKEY_DATA_PROTECT:
1031 break;
1032 case SKEY_BLANK_CHECK:
1033 printf(", requested size: %d (decimal)",
1034 info);
1035 break;
1036 case SKEY_ABORTED_COMMAND:
1037 if (xs->xs_retries)
1038 printf(", retrying");
1039 printf(", cmd 0x%x, info 0x%x",
1040 xs->cmd->opcode, info);
1041 break;
1042 default:
1043 printf(", info = %d (decimal)", info);
1044 }
1045 }
1046 if (sense->extra_len != 0) {
1047 int n;
1048 printf(", data =");
1049 for (n = 0; n < sense->extra_len; n++)
1050 printf(" %02x",
1051 sense->csi[n]);
1052 }
1053 printf("\n");
1054 return (error);
1055
1056 /*
1057 * Some other code, just report it
1058 */
1059 default:
1060 #if defined(SCSIDEBUG) || defined(DEBUG)
1061 {
1062 static const char *uc = "undecodable sense error";
1063 int i;
1064 u_int8_t *cptr = (u_int8_t *) sense;
1065 scsipi_printaddr(periph);
1066 if (xs->cmd == &xs->cmdstore) {
1067 printf("%s for opcode 0x%x, data=",
1068 uc, xs->cmdstore.opcode);
1069 } else {
1070 printf("%s, data=", uc);
1071 }
1072 for (i = 0; i < sizeof (sense); i++)
1073 printf(" 0x%02x", *(cptr++) & 0xff);
1074 printf("\n");
1075 }
1076 #else
1077 scsipi_printaddr(periph);
1078 printf("Sense Error Code 0x%x",
1079 SSD_RCODE(sense->response_code));
1080 if ((sense->response_code & SSD_RCODE_VALID) != 0) {
1081 struct scsi_sense_data_unextended *usense =
1082 (struct scsi_sense_data_unextended *)sense;
1083 printf(" at block no. %d (decimal)",
1084 _3btol(usense->block));
1085 }
1086 printf("\n");
1087 #endif
1088 return (EIO);
1089 }
1090 }
1091
1092 /*
1093 * scsipi_test_unit_ready:
1094 *
1095 * Issue a `test unit ready' request.
1096 */
1097 int
1098 scsipi_test_unit_ready(struct scsipi_periph *periph, int flags)
1099 {
1100 struct scsi_test_unit_ready cmd;
1101 int retries;
1102
1103 /* some ATAPI drives don't support TEST UNIT READY. Sigh */
1104 if (periph->periph_quirks & PQUIRK_NOTUR)
1105 return (0);
1106
1107 if (flags & XS_CTL_DISCOVERY)
1108 retries = 0;
1109 else
1110 retries = SCSIPIRETRIES;
1111
1112 memset(&cmd, 0, sizeof(cmd));
1113 cmd.opcode = SCSI_TEST_UNIT_READY;
1114
1115 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
1116 retries, 10000, NULL, flags));
1117 }
1118
1119 static const struct scsipi_inquiry3_pattern {
1120 const char vendor[8];
1121 const char product[16];
1122 const char revision[4];
1123 } scsipi_inquiry3_quirk[] = {
1124 { "ES-6600 ", "", "" },
1125 };
1126
1127 static int
1128 scsipi_inquiry3_ok(const struct scsipi_inquiry_data *ib)
1129 {
1130 for (size_t i = 0; i < __arraycount(scsipi_inquiry3_quirk); i++) {
1131 const struct scsipi_inquiry3_pattern *q =
1132 &scsipi_inquiry3_quirk[i];
1133 #define MATCH(field) \
1134 (q->field[0] ? memcmp(ib->field, q->field, sizeof(ib->field)) == 0 : 1)
1135 if (MATCH(vendor) && MATCH(product) && MATCH(revision))
1136 return 0;
1137 }
1138 return 1;
1139 }
1140
1141 /*
1142 * scsipi_inquire:
1143 *
1144 * Ask the device about itself.
1145 */
1146 int
1147 scsipi_inquire(struct scsipi_periph *periph, struct scsipi_inquiry_data *inqbuf,
1148 int flags)
1149 {
1150 struct scsipi_inquiry cmd;
1151 int error;
1152 int retries;
1153
1154 if (flags & XS_CTL_DISCOVERY)
1155 retries = 0;
1156 else
1157 retries = SCSIPIRETRIES;
1158
1159 /*
1160 * If we request more data than the device can provide, it SHOULD just
1161 * return a short response. However, some devices error with an
1162 * ILLEGAL REQUEST sense code, and yet others have even more special
1163 * failture modes (such as the GL641USB flash adapter, which goes loony
1164 * and sends corrupted CRCs). To work around this, and to bring our
1165 * behavior more in line with other OSes, we do a shorter inquiry,
1166 * covering all the SCSI-2 information, first, and then request more
1167 * data iff the "additional length" field indicates there is more.
1168 * - mycroft, 2003/10/16
1169 */
1170 memset(&cmd, 0, sizeof(cmd));
1171 cmd.opcode = INQUIRY;
1172 cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI2;
1173 error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1174 (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI2, retries,
1175 10000, NULL, flags | XS_CTL_DATA_IN);
1176 if (!error &&
1177 inqbuf->additional_length > SCSIPI_INQUIRY_LENGTH_SCSI2 - 4) {
1178 if (scsipi_inquiry3_ok(inqbuf)) {
1179 #if 0
1180 printf("inquire: addlen=%d, retrying\n", inqbuf->additional_length);
1181 #endif
1182 cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI3;
1183 error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1184 (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI3, retries,
1185 10000, NULL, flags | XS_CTL_DATA_IN);
1186 #if 0
1187 printf("inquire: error=%d\n", error);
1188 #endif
1189 }
1190 }
1191
1192 #ifdef SCSI_OLD_NOINQUIRY
1193 /*
1194 * Kludge for the Adaptec ACB-4000 SCSI->MFM translator.
1195 * This board doesn't support the INQUIRY command at all.
1196 */
1197 if (error == EINVAL || error == EACCES) {
1198 /*
1199 * Conjure up an INQUIRY response.
1200 */
1201 inqbuf->device = (error == EINVAL ?
1202 SID_QUAL_LU_PRESENT :
1203 SID_QUAL_LU_NOTPRESENT) | T_DIRECT;
1204 inqbuf->dev_qual2 = 0;
1205 inqbuf->version = 0;
1206 inqbuf->response_format = SID_FORMAT_SCSI1;
1207 inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
1208 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1209 memcpy(inqbuf->vendor, "ADAPTEC ACB-4000 ", 28);
1210 error = 0;
1211 }
1212
1213 /*
1214 * Kludge for the Emulex MT-02 SCSI->QIC translator.
1215 * This board gives an empty response to an INQUIRY command.
1216 */
1217 else if (error == 0 &&
1218 inqbuf->device == (SID_QUAL_LU_PRESENT | T_DIRECT) &&
1219 inqbuf->dev_qual2 == 0 &&
1220 inqbuf->version == 0 &&
1221 inqbuf->response_format == SID_FORMAT_SCSI1) {
1222 /*
1223 * Fill out the INQUIRY response.
1224 */
1225 inqbuf->device = (SID_QUAL_LU_PRESENT | T_SEQUENTIAL);
1226 inqbuf->dev_qual2 = SID_REMOVABLE;
1227 inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
1228 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1229 memcpy(inqbuf->vendor, "EMULEX MT-02 QIC ", 28);
1230 }
1231 #endif /* SCSI_OLD_NOINQUIRY */
1232
1233 return error;
1234 }
1235
1236 /*
1237 * scsipi_prevent:
1238 *
1239 * Prevent or allow the user to remove the media
1240 */
1241 int
1242 scsipi_prevent(struct scsipi_periph *periph, int type, int flags)
1243 {
1244 struct scsi_prevent_allow_medium_removal cmd;
1245
1246 if (periph->periph_quirks & PQUIRK_NODOORLOCK)
1247 return 0;
1248
1249 memset(&cmd, 0, sizeof(cmd));
1250 cmd.opcode = SCSI_PREVENT_ALLOW_MEDIUM_REMOVAL;
1251 cmd.how = type;
1252
1253 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
1254 SCSIPIRETRIES, 5000, NULL, flags));
1255 }
1256
1257 /*
1258 * scsipi_start:
1259 *
1260 * Send a START UNIT.
1261 */
1262 int
1263 scsipi_start(struct scsipi_periph *periph, int type, int flags)
1264 {
1265 struct scsipi_start_stop cmd;
1266
1267 memset(&cmd, 0, sizeof(cmd));
1268 cmd.opcode = START_STOP;
1269 cmd.byte2 = 0x00;
1270 cmd.how = type;
1271
1272 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
1273 SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000, NULL, flags));
1274 }
1275
1276 /*
1277 * scsipi_mode_sense, scsipi_mode_sense_big:
1278 * get a sense page from a device
1279 */
1280
1281 int
1282 scsipi_mode_sense(struct scsipi_periph *periph, int byte2, int page,
1283 struct scsi_mode_parameter_header_6 *data, int len, int flags, int retries,
1284 int timeout)
1285 {
1286 struct scsi_mode_sense_6 cmd;
1287
1288 memset(&cmd, 0, sizeof(cmd));
1289 cmd.opcode = SCSI_MODE_SENSE_6;
1290 cmd.byte2 = byte2;
1291 cmd.page = page;
1292 cmd.length = len & 0xff;
1293
1294 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1295 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN));
1296 }
1297
1298 int
1299 scsipi_mode_sense_big(struct scsipi_periph *periph, int byte2, int page,
1300 struct scsi_mode_parameter_header_10 *data, int len, int flags, int retries,
1301 int timeout)
1302 {
1303 struct scsi_mode_sense_10 cmd;
1304
1305 memset(&cmd, 0, sizeof(cmd));
1306 cmd.opcode = SCSI_MODE_SENSE_10;
1307 cmd.byte2 = byte2;
1308 cmd.page = page;
1309 _lto2b(len, cmd.length);
1310
1311 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1312 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN));
1313 }
1314
1315 int
1316 scsipi_mode_select(struct scsipi_periph *periph, int byte2,
1317 struct scsi_mode_parameter_header_6 *data, int len, int flags, int retries,
1318 int timeout)
1319 {
1320 struct scsi_mode_select_6 cmd;
1321
1322 memset(&cmd, 0, sizeof(cmd));
1323 cmd.opcode = SCSI_MODE_SELECT_6;
1324 cmd.byte2 = byte2;
1325 cmd.length = len & 0xff;
1326
1327 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1328 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT));
1329 }
1330
1331 int
1332 scsipi_mode_select_big(struct scsipi_periph *periph, int byte2,
1333 struct scsi_mode_parameter_header_10 *data, int len, int flags, int retries,
1334 int timeout)
1335 {
1336 struct scsi_mode_select_10 cmd;
1337
1338 memset(&cmd, 0, sizeof(cmd));
1339 cmd.opcode = SCSI_MODE_SELECT_10;
1340 cmd.byte2 = byte2;
1341 _lto2b(len, cmd.length);
1342
1343 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1344 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT));
1345 }
1346
1347 /*
1348 * scsipi_done:
1349 *
1350 * This routine is called by an adapter's interrupt handler when
1351 * an xfer is completed.
1352 */
1353 void
1354 scsipi_done(struct scsipi_xfer *xs)
1355 {
1356 struct scsipi_periph *periph = xs->xs_periph;
1357 struct scsipi_channel *chan = periph->periph_channel;
1358 int freezecnt;
1359
1360 SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n"));
1361 #ifdef SCSIPI_DEBUG
1362 if (periph->periph_dbflags & SCSIPI_DB1)
1363 show_scsipi_cmd(xs);
1364 #endif
1365
1366 mutex_enter(chan_mtx(chan));
1367 /*
1368 * The resource this command was using is now free.
1369 */
1370 if (xs->xs_status & XS_STS_DONE) {
1371 /* XXX in certain circumstances, such as a device
1372 * being detached, a xs that has already been
1373 * scsipi_done()'d by the main thread will be done'd
1374 * again by scsibusdetach(). Putting the xs on the
1375 * chan_complete queue causes list corruption and
1376 * everyone dies. This prevents that, but perhaps
1377 * there should be better coordination somewhere such
1378 * that this won't ever happen (and can be turned into
1379 * a KASSERT().
1380 */
1381 mutex_exit(chan_mtx(chan));
1382 goto out;
1383 }
1384 scsipi_put_resource(chan);
1385 xs->xs_periph->periph_sent--;
1386
1387 /*
1388 * If the command was tagged, free the tag.
1389 */
1390 if (XS_CTL_TAGTYPE(xs) != 0)
1391 scsipi_put_tag(xs);
1392 else
1393 periph->periph_flags &= ~PERIPH_UNTAG;
1394
1395 /* Mark the command as `done'. */
1396 xs->xs_status |= XS_STS_DONE;
1397
1398 #ifdef DIAGNOSTIC
1399 if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
1400 (XS_CTL_ASYNC|XS_CTL_POLL))
1401 panic("scsipi_done: ASYNC and POLL");
1402 #endif
1403
1404 /*
1405 * If the xfer had an error of any sort, freeze the
1406 * periph's queue. Freeze it again if we were requested
1407 * to do so in the xfer.
1408 */
1409 freezecnt = 0;
1410 if (xs->error != XS_NOERROR)
1411 freezecnt++;
1412 if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
1413 freezecnt++;
1414 if (freezecnt != 0)
1415 scsipi_periph_freeze_locked(periph, freezecnt);
1416
1417 /*
1418 * record the xfer with a pending sense, in case a SCSI reset is
1419 * received before the thread is waked up.
1420 */
1421 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1422 periph->periph_flags |= PERIPH_SENSE;
1423 periph->periph_xscheck = xs;
1424 }
1425
1426 /*
1427 * If this was an xfer that was not to complete asynchronously,
1428 * let the requesting thread perform error checking/handling
1429 * in its context.
1430 */
1431 if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
1432 /*
1433 * If it's a polling job, just return, to unwind the
1434 * call graph. We don't need to restart the queue,
1435 * because pollings jobs are treated specially, and
1436 * are really only used during crash dumps anyway
1437 * (XXX or during boot-time autconfiguration of
1438 * ATAPI devices).
1439 */
1440 if (xs->xs_control & XS_CTL_POLL) {
1441 mutex_exit(chan_mtx(chan));
1442 return;
1443 }
1444 cv_broadcast(xs_cv(xs));
1445 mutex_exit(chan_mtx(chan));
1446 goto out;
1447 }
1448
1449 /*
1450 * Catch the extremely common case of I/O completing
1451 * without error; no use in taking a context switch
1452 * if we can handle it in interrupt context.
1453 */
1454 if (xs->error == XS_NOERROR) {
1455 mutex_exit(chan_mtx(chan));
1456 (void) scsipi_complete(xs);
1457 goto out;
1458 }
1459
1460 /*
1461 * There is an error on this xfer. Put it on the channel's
1462 * completion queue, and wake up the completion thread.
1463 */
1464 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
1465 cv_broadcast(chan_cv_complete(chan));
1466 mutex_exit(chan_mtx(chan));
1467
1468 out:
1469 /*
1470 * If there are more xfers on the channel's queue, attempt to
1471 * run them.
1472 */
1473 scsipi_run_queue(chan);
1474 }
1475
1476 /*
1477 * scsipi_complete:
1478 *
1479 * Completion of a scsipi_xfer. This is the guts of scsipi_done().
1480 *
1481 * NOTE: This routine MUST be called with valid thread context
1482 * except for the case where the following two conditions are
1483 * true:
1484 *
1485 * xs->error == XS_NOERROR
1486 * XS_CTL_ASYNC is set in xs->xs_control
1487 *
1488 * The semantics of this routine can be tricky, so here is an
1489 * explanation:
1490 *
1491 * 0 Xfer completed successfully.
1492 *
1493 * ERESTART Xfer had an error, but was restarted.
1494 *
1495 * anything else Xfer had an error, return value is Unix
1496 * errno.
1497 *
1498 * If the return value is anything but ERESTART:
1499 *
1500 * - If XS_CTL_ASYNC is set, `xs' has been freed back to
1501 * the pool.
1502 * - If there is a buf associated with the xfer,
1503 * it has been biodone()'d.
1504 */
1505 static int
1506 scsipi_complete(struct scsipi_xfer *xs)
1507 {
1508 struct scsipi_periph *periph = xs->xs_periph;
1509 struct scsipi_channel *chan = periph->periph_channel;
1510 int error;
1511
1512 #ifdef DIAGNOSTIC
1513 if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
1514 panic("scsipi_complete: XS_CTL_ASYNC but no buf");
1515 #endif
1516 /*
1517 * If command terminated with a CHECK CONDITION, we need to issue a
1518 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed
1519 * we'll have the real status.
1520 * Must be processed with channel lock held to avoid missing
1521 * a SCSI bus reset for this command.
1522 */
1523 mutex_enter(chan_mtx(chan));
1524 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1525 /* request sense for a request sense ? */
1526 if (xs->xs_control & XS_CTL_REQSENSE) {
1527 scsipi_printaddr(periph);
1528 printf("request sense for a request sense ?\n");
1529 /* XXX maybe we should reset the device ? */
1530 /* we've been frozen because xs->error != XS_NOERROR */
1531 scsipi_periph_thaw_locked(periph, 1);
1532 mutex_exit(chan_mtx(chan));
1533 if (xs->resid < xs->datalen) {
1534 printf("we read %d bytes of sense anyway:\n",
1535 xs->datalen - xs->resid);
1536 scsipi_print_sense_data((void *)xs->data, 0);
1537 }
1538 return EINVAL;
1539 }
1540 mutex_exit(chan_mtx(chan)); // XXX allows other commands to queue or run
1541 scsipi_request_sense(xs);
1542 } else
1543 mutex_exit(chan_mtx(chan));
1544
1545 /*
1546 * If it's a user level request, bypass all usual completion
1547 * processing, let the user work it out..
1548 */
1549 if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
1550 SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n"));
1551 mutex_enter(chan_mtx(chan));
1552 if (xs->error != XS_NOERROR)
1553 scsipi_periph_thaw_locked(periph, 1);
1554 mutex_exit(chan_mtx(chan));
1555 scsipi_user_done(xs);
1556 SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n "));
1557 return 0;
1558 }
1559
1560 switch (xs->error) {
1561 case XS_NOERROR:
1562 error = 0;
1563 break;
1564
1565 case XS_SENSE:
1566 case XS_SHORTSENSE:
1567 error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
1568 break;
1569
1570 case XS_RESOURCE_SHORTAGE:
1571 /*
1572 * XXX Should freeze channel's queue.
1573 */
1574 scsipi_printaddr(periph);
1575 printf("adapter resource shortage\n");
1576 /* FALLTHROUGH */
1577
1578 case XS_BUSY:
1579 if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
1580 struct scsipi_max_openings mo;
1581
1582 /*
1583 * We set the openings to active - 1, assuming that
1584 * the command that got us here is the first one that
1585 * can't fit into the device's queue. If that's not
1586 * the case, I guess we'll find out soon enough.
1587 */
1588 mo.mo_target = periph->periph_target;
1589 mo.mo_lun = periph->periph_lun;
1590 if (periph->periph_active < periph->periph_openings)
1591 mo.mo_openings = periph->periph_active - 1;
1592 else
1593 mo.mo_openings = periph->periph_openings - 1;
1594 #ifdef DIAGNOSTIC
1595 if (mo.mo_openings < 0) {
1596 scsipi_printaddr(periph);
1597 printf("QUEUE FULL resulted in < 0 openings\n");
1598 panic("scsipi_done");
1599 }
1600 #endif
1601 if (mo.mo_openings == 0) {
1602 scsipi_printaddr(periph);
1603 printf("QUEUE FULL resulted in 0 openings\n");
1604 mo.mo_openings = 1;
1605 }
1606 scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
1607 error = ERESTART;
1608 } else if (xs->xs_retries != 0) {
1609 xs->xs_retries--;
1610 /*
1611 * Wait one second, and try again.
1612 */
1613 mutex_enter(chan_mtx(chan));
1614 if ((xs->xs_control & XS_CTL_POLL) ||
1615 (chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
1616 /* XXX: quite extreme */
1617 kpause("xsbusy", false, hz, chan_mtx(chan));
1618 } else if (!callout_pending(&periph->periph_callout)) {
1619 scsipi_periph_freeze_locked(periph, 1);
1620 callout_reset(&periph->periph_callout,
1621 hz, scsipi_periph_timed_thaw, periph);
1622 }
1623 mutex_exit(chan_mtx(chan));
1624 error = ERESTART;
1625 } else
1626 error = EBUSY;
1627 break;
1628
1629 case XS_REQUEUE:
1630 error = ERESTART;
1631 break;
1632
1633 case XS_SELTIMEOUT:
1634 case XS_TIMEOUT:
1635 /*
1636 * If the device hasn't gone away, honor retry counts.
1637 *
1638 * Note that if we're in the middle of probing it,
1639 * it won't be found because it isn't here yet so
1640 * we won't honor the retry count in that case.
1641 */
1642 if (scsipi_lookup_periph(chan, periph->periph_target,
1643 periph->periph_lun) && xs->xs_retries != 0) {
1644 xs->xs_retries--;
1645 error = ERESTART;
1646 } else
1647 error = EIO;
1648 break;
1649
1650 case XS_RESET:
1651 if (xs->xs_control & XS_CTL_REQSENSE) {
1652 /*
1653 * request sense interrupted by reset: signal it
1654 * with EINTR return code.
1655 */
1656 error = EINTR;
1657 } else {
1658 if (xs->xs_retries != 0) {
1659 xs->xs_retries--;
1660 error = ERESTART;
1661 } else
1662 error = EIO;
1663 }
1664 break;
1665
1666 case XS_DRIVER_STUFFUP:
1667 scsipi_printaddr(periph);
1668 printf("generic HBA error\n");
1669 error = EIO;
1670 break;
1671 default:
1672 scsipi_printaddr(periph);
1673 printf("invalid return code from adapter: %d\n", xs->error);
1674 error = EIO;
1675 break;
1676 }
1677
1678 mutex_enter(chan_mtx(chan));
1679 if (error == ERESTART) {
1680 /*
1681 * If we get here, the periph has been thawed and frozen
1682 * again if we had to issue recovery commands. Alternatively,
1683 * it may have been frozen again and in a timed thaw. In
1684 * any case, we thaw the periph once we re-enqueue the
1685 * command. Once the periph is fully thawed, it will begin
1686 * operation again.
1687 */
1688 xs->error = XS_NOERROR;
1689 xs->status = SCSI_OK;
1690 xs->xs_status &= ~XS_STS_DONE;
1691 xs->xs_requeuecnt++;
1692 error = scsipi_enqueue(xs);
1693 if (error == 0) {
1694 scsipi_periph_thaw_locked(periph, 1);
1695 mutex_exit(chan_mtx(chan));
1696 return (ERESTART);
1697 }
1698 }
1699
1700 /*
1701 * scsipi_done() freezes the queue if not XS_NOERROR.
1702 * Thaw it here.
1703 */
1704 if (xs->error != XS_NOERROR)
1705 scsipi_periph_thaw_locked(periph, 1);
1706 mutex_exit(chan_mtx(chan));
1707
1708 if (periph->periph_switch->psw_done)
1709 periph->periph_switch->psw_done(xs, error);
1710
1711 mutex_enter(chan_mtx(chan));
1712 if (xs->xs_control & XS_CTL_ASYNC)
1713 scsipi_put_xs(xs);
1714 mutex_exit(chan_mtx(chan));
1715
1716 return (error);
1717 }
1718
1719 /*
1720 * Issue a request sense for the given scsipi_xfer. Called when the xfer
1721 * returns with a CHECK_CONDITION status. Must be called in valid thread
1722 * context and with channel lock held.
1723 */
1724
1725 static void
1726 scsipi_request_sense(struct scsipi_xfer *xs)
1727 {
1728 struct scsipi_periph *periph = xs->xs_periph;
1729 int flags, error;
1730 struct scsi_request_sense cmd;
1731
1732 periph->periph_flags |= PERIPH_SENSE;
1733
1734 /* if command was polling, request sense will too */
1735 flags = xs->xs_control & XS_CTL_POLL;
1736 /* Polling commands can't sleep */
1737 if (flags)
1738 flags |= XS_CTL_NOSLEEP;
1739
1740 flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN |
1741 XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH;
1742
1743 memset(&cmd, 0, sizeof(cmd));
1744 cmd.opcode = SCSI_REQUEST_SENSE;
1745 cmd.length = sizeof(struct scsi_sense_data);
1746
1747 error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1748 (void *)&xs->sense.scsi_sense, sizeof(struct scsi_sense_data),
1749 0, 1000, NULL, flags);
1750 periph->periph_flags &= ~PERIPH_SENSE;
1751 periph->periph_xscheck = NULL;
1752 switch (error) {
1753 case 0:
1754 /* we have a valid sense */
1755 xs->error = XS_SENSE;
1756 return;
1757 case EINTR:
1758 /* REQUEST_SENSE interrupted by bus reset. */
1759 xs->error = XS_RESET;
1760 return;
1761 case EIO:
1762 /* request sense coudn't be performed */
1763 /*
1764 * XXX this isn't quite right but we don't have anything
1765 * better for now
1766 */
1767 xs->error = XS_DRIVER_STUFFUP;
1768 return;
1769 default:
1770 /* Notify that request sense failed. */
1771 xs->error = XS_DRIVER_STUFFUP;
1772 scsipi_printaddr(periph);
1773 printf("request sense failed with error %d\n", error);
1774 return;
1775 }
1776 }
1777
1778 /*
1779 * scsipi_enqueue:
1780 *
1781 * Enqueue an xfer on a channel.
1782 */
1783 static int
1784 scsipi_enqueue(struct scsipi_xfer *xs)
1785 {
1786 struct scsipi_channel *chan = xs->xs_periph->periph_channel;
1787 struct scsipi_xfer *qxs;
1788
1789 /*
1790 * If the xfer is to be polled, and there are already jobs on
1791 * the queue, we can't proceed.
1792 */
1793 if ((xs->xs_control & XS_CTL_POLL) != 0 &&
1794 TAILQ_FIRST(&chan->chan_queue) != NULL) {
1795 xs->error = XS_DRIVER_STUFFUP;
1796 return (EAGAIN);
1797 }
1798
1799 /*
1800 * If we have an URGENT xfer, it's an error recovery command
1801 * and it should just go on the head of the channel's queue.
1802 */
1803 if (xs->xs_control & XS_CTL_URGENT) {
1804 TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
1805 goto out;
1806 }
1807
1808 /*
1809 * If this xfer has already been on the queue before, we
1810 * need to reinsert it in the correct order. That order is:
1811 *
1812 * Immediately before the first xfer for this periph
1813 * with a requeuecnt less than xs->xs_requeuecnt.
1814 *
1815 * Failing that, at the end of the queue. (We'll end up
1816 * there naturally.)
1817 */
1818 if (xs->xs_requeuecnt != 0) {
1819 for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
1820 qxs = TAILQ_NEXT(qxs, channel_q)) {
1821 if (qxs->xs_periph == xs->xs_periph &&
1822 qxs->xs_requeuecnt < xs->xs_requeuecnt)
1823 break;
1824 }
1825 if (qxs != NULL) {
1826 TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
1827 channel_q);
1828 goto out;
1829 }
1830 }
1831 TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
1832 out:
1833 if (xs->xs_control & XS_CTL_THAW_PERIPH)
1834 scsipi_periph_thaw_locked(xs->xs_periph, 1);
1835 return (0);
1836 }
1837
1838 /*
1839 * scsipi_run_queue:
1840 *
1841 * Start as many xfers as possible running on the channel.
1842 */
1843 static void
1844 scsipi_run_queue(struct scsipi_channel *chan)
1845 {
1846 struct scsipi_xfer *xs;
1847 struct scsipi_periph *periph;
1848
1849 for (;;) {
1850 mutex_enter(chan_mtx(chan));
1851
1852 /*
1853 * If the channel is frozen, we can't do any work right
1854 * now.
1855 */
1856 if (chan->chan_qfreeze != 0) {
1857 mutex_exit(chan_mtx(chan));
1858 return;
1859 }
1860
1861 /*
1862 * Look for work to do, and make sure we can do it.
1863 */
1864 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
1865 xs = TAILQ_NEXT(xs, channel_q)) {
1866 periph = xs->xs_periph;
1867
1868 if ((periph->periph_sent >= periph->periph_openings) ||
1869 periph->periph_qfreeze != 0 ||
1870 (periph->periph_flags & PERIPH_UNTAG) != 0)
1871 continue;
1872
1873 if ((periph->periph_flags &
1874 (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 &&
1875 (xs->xs_control & XS_CTL_URGENT) == 0)
1876 continue;
1877
1878 /*
1879 * We can issue this xfer!
1880 */
1881 goto got_one;
1882 }
1883
1884 /*
1885 * Can't find any work to do right now.
1886 */
1887 mutex_exit(chan_mtx(chan));
1888 return;
1889
1890 got_one:
1891 /*
1892 * Have an xfer to run. Allocate a resource from
1893 * the adapter to run it. If we can't allocate that
1894 * resource, we don't dequeue the xfer.
1895 */
1896 if (scsipi_get_resource(chan) == 0) {
1897 /*
1898 * Adapter is out of resources. If the adapter
1899 * supports it, attempt to grow them.
1900 */
1901 if (scsipi_grow_resources(chan) == 0) {
1902 /*
1903 * Wasn't able to grow resources,
1904 * nothing more we can do.
1905 */
1906 if (xs->xs_control & XS_CTL_POLL) {
1907 scsipi_printaddr(xs->xs_periph);
1908 printf("polling command but no "
1909 "adapter resources");
1910 /* We'll panic shortly... */
1911 }
1912 mutex_exit(chan_mtx(chan));
1913
1914 /*
1915 * XXX: We should be able to note that
1916 * XXX: that resources are needed here!
1917 */
1918 return;
1919 }
1920 /*
1921 * scsipi_grow_resources() allocated the resource
1922 * for us.
1923 */
1924 }
1925
1926 /*
1927 * We have a resource to run this xfer, do it!
1928 */
1929 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
1930
1931 /*
1932 * If the command is to be tagged, allocate a tag ID
1933 * for it.
1934 */
1935 if (XS_CTL_TAGTYPE(xs) != 0)
1936 scsipi_get_tag(xs);
1937 else
1938 periph->periph_flags |= PERIPH_UNTAG;
1939 periph->periph_sent++;
1940 mutex_exit(chan_mtx(chan));
1941
1942 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
1943 }
1944 #ifdef DIAGNOSTIC
1945 panic("scsipi_run_queue: impossible");
1946 #endif
1947 }
1948
1949 /*
1950 * scsipi_execute_xs:
1951 *
1952 * Begin execution of an xfer, waiting for it to complete, if necessary.
1953 */
1954 int
1955 scsipi_execute_xs(struct scsipi_xfer *xs)
1956 {
1957 struct scsipi_periph *periph = xs->xs_periph;
1958 struct scsipi_channel *chan = periph->periph_channel;
1959 int oasync, async, poll, error;
1960
1961 KASSERT(!cold);
1962
1963 (chan->chan_bustype->bustype_cmd)(xs);
1964
1965 xs->xs_status &= ~XS_STS_DONE;
1966 xs->error = XS_NOERROR;
1967 xs->resid = xs->datalen;
1968 xs->status = SCSI_OK;
1969
1970 #ifdef SCSIPI_DEBUG
1971 if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) {
1972 printf("scsipi_execute_xs: ");
1973 show_scsipi_xs(xs);
1974 printf("\n");
1975 }
1976 #endif
1977
1978 /*
1979 * Deal with command tagging:
1980 *
1981 * - If the device's current operating mode doesn't
1982 * include tagged queueing, clear the tag mask.
1983 *
1984 * - If the device's current operating mode *does*
1985 * include tagged queueing, set the tag_type in
1986 * the xfer to the appropriate byte for the tag
1987 * message.
1988 */
1989 if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 ||
1990 (xs->xs_control & XS_CTL_REQSENSE)) {
1991 xs->xs_control &= ~XS_CTL_TAGMASK;
1992 xs->xs_tag_type = 0;
1993 } else {
1994 /*
1995 * If the request doesn't specify a tag, give Head
1996 * tags to URGENT operations and Simple tags to
1997 * everything else.
1998 */
1999 if (XS_CTL_TAGTYPE(xs) == 0) {
2000 if (xs->xs_control & XS_CTL_URGENT)
2001 xs->xs_control |= XS_CTL_HEAD_TAG;
2002 else
2003 xs->xs_control |= XS_CTL_SIMPLE_TAG;
2004 }
2005
2006 switch (XS_CTL_TAGTYPE(xs)) {
2007 case XS_CTL_ORDERED_TAG:
2008 xs->xs_tag_type = MSG_ORDERED_Q_TAG;
2009 break;
2010
2011 case XS_CTL_SIMPLE_TAG:
2012 xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
2013 break;
2014
2015 case XS_CTL_HEAD_TAG:
2016 xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
2017 break;
2018
2019 default:
2020 scsipi_printaddr(periph);
2021 printf("invalid tag mask 0x%08x\n",
2022 XS_CTL_TAGTYPE(xs));
2023 panic("scsipi_execute_xs");
2024 }
2025 }
2026
2027 /* If the adaptor wants us to poll, poll. */
2028 if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY)
2029 xs->xs_control |= XS_CTL_POLL;
2030
2031 /*
2032 * If we don't yet have a completion thread, or we are to poll for
2033 * completion, clear the ASYNC flag.
2034 */
2035 oasync = (xs->xs_control & XS_CTL_ASYNC);
2036 if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
2037 xs->xs_control &= ~XS_CTL_ASYNC;
2038
2039 async = (xs->xs_control & XS_CTL_ASYNC);
2040 poll = (xs->xs_control & XS_CTL_POLL);
2041
2042 #ifdef DIAGNOSTIC
2043 if (oasync != 0 && xs->bp == NULL)
2044 panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
2045 #endif
2046
2047 /*
2048 * Enqueue the transfer. If we're not polling for completion, this
2049 * should ALWAYS return `no error'.
2050 */
2051 error = scsipi_enqueue(xs);
2052 if (error) {
2053 if (poll == 0) {
2054 scsipi_printaddr(periph);
2055 printf("not polling, but enqueue failed with %d\n",
2056 error);
2057 panic("scsipi_execute_xs");
2058 }
2059
2060 scsipi_printaddr(periph);
2061 printf("should have flushed queue?\n");
2062 goto free_xs;
2063 }
2064
2065 mutex_exit(chan_mtx(chan));
2066 restarted:
2067 scsipi_run_queue(chan);
2068 mutex_enter(chan_mtx(chan));
2069
2070 /*
2071 * The xfer is enqueued, and possibly running. If it's to be
2072 * completed asynchronously, just return now.
2073 */
2074 if (async)
2075 return (0);
2076
2077 /*
2078 * Not an asynchronous command; wait for it to complete.
2079 */
2080 while ((xs->xs_status & XS_STS_DONE) == 0) {
2081 if (poll) {
2082 scsipi_printaddr(periph);
2083 printf("polling command not done\n");
2084 panic("scsipi_execute_xs");
2085 }
2086 cv_wait(xs_cv(xs), chan_mtx(chan));
2087 }
2088
2089 /*
2090 * Command is complete. scsipi_done() has awakened us to perform
2091 * the error handling.
2092 */
2093 mutex_exit(chan_mtx(chan));
2094 error = scsipi_complete(xs);
2095 if (error == ERESTART)
2096 goto restarted;
2097
2098 /*
2099 * If it was meant to run async and we cleared aync ourselve,
2100 * don't return an error here. It has already been handled
2101 */
2102 if (oasync)
2103 error = 0;
2104 /*
2105 * Command completed successfully or fatal error occurred. Fall
2106 * into....
2107 */
2108 mutex_enter(chan_mtx(chan));
2109 free_xs:
2110 scsipi_put_xs(xs);
2111 mutex_exit(chan_mtx(chan));
2112
2113 /*
2114 * Kick the queue, keep it running in case it stopped for some
2115 * reason.
2116 */
2117 scsipi_run_queue(chan);
2118
2119 mutex_enter(chan_mtx(chan));
2120 return (error);
2121 }
2122
2123 /*
2124 * scsipi_completion_thread:
2125 *
2126 * This is the completion thread. We wait for errors on
2127 * asynchronous xfers, and perform the error handling
2128 * function, restarting the command, if necessary.
2129 */
2130 static void
2131 scsipi_completion_thread(void *arg)
2132 {
2133 struct scsipi_channel *chan = arg;
2134 struct scsipi_xfer *xs;
2135
2136 if (chan->chan_init_cb)
2137 (*chan->chan_init_cb)(chan, chan->chan_init_cb_arg);
2138
2139 mutex_enter(chan_mtx(chan));
2140 chan->chan_flags |= SCSIPI_CHAN_TACTIVE;
2141 for (;;) {
2142 xs = TAILQ_FIRST(&chan->chan_complete);
2143 if (xs == NULL && chan->chan_tflags == 0) {
2144 /* nothing to do; wait */
2145 cv_wait(chan_cv_complete(chan), chan_mtx(chan));
2146 continue;
2147 }
2148 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2149 /* call chan_callback from thread context */
2150 chan->chan_tflags &= ~SCSIPI_CHANT_CALLBACK;
2151 chan->chan_callback(chan, chan->chan_callback_arg);
2152 continue;
2153 }
2154 if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) {
2155 /* attempt to get more openings for this channel */
2156 chan->chan_tflags &= ~SCSIPI_CHANT_GROWRES;
2157 mutex_exit(chan_mtx(chan));
2158 scsipi_adapter_request(chan,
2159 ADAPTER_REQ_GROW_RESOURCES, NULL);
2160 scsipi_channel_thaw(chan, 1);
2161 if (chan->chan_tflags & SCSIPI_CHANT_GROWRES)
2162 kpause("scsizzz", FALSE, hz/10, NULL);
2163 mutex_enter(chan_mtx(chan));
2164 continue;
2165 }
2166 if (chan->chan_tflags & SCSIPI_CHANT_KICK) {
2167 /* explicitly run the queues for this channel */
2168 chan->chan_tflags &= ~SCSIPI_CHANT_KICK;
2169 mutex_exit(chan_mtx(chan));
2170 scsipi_run_queue(chan);
2171 mutex_enter(chan_mtx(chan));
2172 continue;
2173 }
2174 if (chan->chan_tflags & SCSIPI_CHANT_SHUTDOWN) {
2175 break;
2176 }
2177 if (xs) {
2178 TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
2179 mutex_exit(chan_mtx(chan));
2180
2181 /*
2182 * Have an xfer with an error; process it.
2183 */
2184 (void) scsipi_complete(xs);
2185
2186 /*
2187 * Kick the queue; keep it running if it was stopped
2188 * for some reason.
2189 */
2190 scsipi_run_queue(chan);
2191 mutex_enter(chan_mtx(chan));
2192 }
2193 }
2194
2195 chan->chan_thread = NULL;
2196
2197 /* In case parent is waiting for us to exit. */
2198 cv_broadcast(chan_cv_thread(chan));
2199 mutex_exit(chan_mtx(chan));
2200
2201 kthread_exit(0);
2202 }
2203 /*
2204 * scsipi_thread_call_callback:
2205 *
2206 * request to call a callback from the completion thread
2207 */
2208 int
2209 scsipi_thread_call_callback(struct scsipi_channel *chan,
2210 void (*callback)(struct scsipi_channel *, void *), void *arg)
2211 {
2212
2213 mutex_enter(chan_mtx(chan));
2214 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
2215 /* kernel thread doesn't exist yet */
2216 mutex_exit(chan_mtx(chan));
2217 return ESRCH;
2218 }
2219 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2220 mutex_exit(chan_mtx(chan));
2221 return EBUSY;
2222 }
2223 scsipi_channel_freeze(chan, 1);
2224 chan->chan_callback = callback;
2225 chan->chan_callback_arg = arg;
2226 chan->chan_tflags |= SCSIPI_CHANT_CALLBACK;
2227 cv_broadcast(chan_cv_complete(chan));
2228 mutex_exit(chan_mtx(chan));
2229 return(0);
2230 }
2231
2232 /*
2233 * scsipi_async_event:
2234 *
2235 * Handle an asynchronous event from an adapter.
2236 */
2237 void
2238 scsipi_async_event(struct scsipi_channel *chan, scsipi_async_event_t event,
2239 void *arg)
2240 {
2241
2242 mutex_enter(chan_mtx(chan));
2243 switch (event) {
2244 case ASYNC_EVENT_MAX_OPENINGS:
2245 scsipi_async_event_max_openings(chan,
2246 (struct scsipi_max_openings *)arg);
2247 break;
2248
2249 case ASYNC_EVENT_XFER_MODE:
2250 if (chan->chan_bustype->bustype_async_event_xfer_mode) {
2251 chan->chan_bustype->bustype_async_event_xfer_mode(
2252 chan, arg);
2253 }
2254 break;
2255 case ASYNC_EVENT_RESET:
2256 scsipi_async_event_channel_reset(chan);
2257 break;
2258 }
2259 mutex_exit(chan_mtx(chan));
2260 }
2261
2262 /*
2263 * scsipi_async_event_max_openings:
2264 *
2265 * Update the maximum number of outstanding commands a
2266 * device may have.
2267 */
2268 static void
2269 scsipi_async_event_max_openings(struct scsipi_channel *chan,
2270 struct scsipi_max_openings *mo)
2271 {
2272 struct scsipi_periph *periph;
2273 int minlun, maxlun;
2274
2275 if (mo->mo_lun == -1) {
2276 /*
2277 * Wildcarded; apply it to all LUNs.
2278 */
2279 minlun = 0;
2280 maxlun = chan->chan_nluns - 1;
2281 } else
2282 minlun = maxlun = mo->mo_lun;
2283
2284 /* XXX This could really suck with a large LUN space. */
2285 for (; minlun <= maxlun; minlun++) {
2286 periph = scsipi_lookup_periph_locked(chan, mo->mo_target, minlun);
2287 if (periph == NULL)
2288 continue;
2289
2290 if (mo->mo_openings < periph->periph_openings)
2291 periph->periph_openings = mo->mo_openings;
2292 else if (mo->mo_openings > periph->periph_openings &&
2293 (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
2294 periph->periph_openings = mo->mo_openings;
2295 }
2296 }
2297
2298 /*
2299 * scsipi_set_xfer_mode:
2300 *
2301 * Set the xfer mode for the specified I_T Nexus.
2302 */
2303 void
2304 scsipi_set_xfer_mode(struct scsipi_channel *chan, int target, int immed)
2305 {
2306 struct scsipi_xfer_mode xm;
2307 struct scsipi_periph *itperiph;
2308 int lun;
2309
2310 /*
2311 * Go to the minimal xfer mode.
2312 */
2313 xm.xm_target = target;
2314 xm.xm_mode = 0;
2315 xm.xm_period = 0; /* ignored */
2316 xm.xm_offset = 0; /* ignored */
2317
2318 /*
2319 * Find the first LUN we know about on this I_T Nexus.
2320 */
2321 for (itperiph = NULL, lun = 0; lun < chan->chan_nluns; lun++) {
2322 itperiph = scsipi_lookup_periph(chan, target, lun);
2323 if (itperiph != NULL)
2324 break;
2325 }
2326 if (itperiph != NULL) {
2327 xm.xm_mode = itperiph->periph_cap;
2328 /*
2329 * Now issue the request to the adapter.
2330 */
2331 scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm);
2332 /*
2333 * If we want this to happen immediately, issue a dummy
2334 * command, since most adapters can't really negotiate unless
2335 * they're executing a job.
2336 */
2337 if (immed != 0) {
2338 (void) scsipi_test_unit_ready(itperiph,
2339 XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
2340 XS_CTL_IGNORE_NOT_READY |
2341 XS_CTL_IGNORE_MEDIA_CHANGE);
2342 }
2343 }
2344 }
2345
2346 /*
2347 * scsipi_channel_reset:
2348 *
2349 * handle scsi bus reset
2350 * called with channel lock held
2351 */
2352 static void
2353 scsipi_async_event_channel_reset(struct scsipi_channel *chan)
2354 {
2355 struct scsipi_xfer *xs, *xs_next;
2356 struct scsipi_periph *periph;
2357 int target, lun;
2358
2359 /*
2360 * Channel has been reset. Also mark as reset pending REQUEST_SENSE
2361 * commands; as the sense is not available any more.
2362 * can't call scsipi_done() from here, as the command has not been
2363 * sent to the adapter yet (this would corrupt accounting).
2364 */
2365
2366 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) {
2367 xs_next = TAILQ_NEXT(xs, channel_q);
2368 if (xs->xs_control & XS_CTL_REQSENSE) {
2369 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
2370 xs->error = XS_RESET;
2371 if ((xs->xs_control & XS_CTL_ASYNC) != 0)
2372 TAILQ_INSERT_TAIL(&chan->chan_complete, xs,
2373 channel_q);
2374 }
2375 }
2376 cv_broadcast(chan_cv_complete(chan));
2377 /* Catch xs with pending sense which may not have a REQSENSE xs yet */
2378 for (target = 0; target < chan->chan_ntargets; target++) {
2379 if (target == chan->chan_id)
2380 continue;
2381 for (lun = 0; lun < chan->chan_nluns; lun++) {
2382 periph = scsipi_lookup_periph_locked(chan, target, lun);
2383 if (periph) {
2384 xs = periph->periph_xscheck;
2385 if (xs)
2386 xs->error = XS_RESET;
2387 }
2388 }
2389 }
2390 }
2391
2392 /*
2393 * scsipi_target_detach:
2394 *
2395 * detach all periph associated with a I_T
2396 * must be called from valid thread context
2397 */
2398 int
2399 scsipi_target_detach(struct scsipi_channel *chan, int target, int lun,
2400 int flags)
2401 {
2402 struct scsipi_periph *periph;
2403 int ctarget, mintarget, maxtarget;
2404 int clun, minlun, maxlun;
2405 int error;
2406
2407 if (target == -1) {
2408 mintarget = 0;
2409 maxtarget = chan->chan_ntargets;
2410 } else {
2411 if (target == chan->chan_id)
2412 return EINVAL;
2413 if (target < 0 || target >= chan->chan_ntargets)
2414 return EINVAL;
2415 mintarget = target;
2416 maxtarget = target + 1;
2417 }
2418
2419 if (lun == -1) {
2420 minlun = 0;
2421 maxlun = chan->chan_nluns;
2422 } else {
2423 if (lun < 0 || lun >= chan->chan_nluns)
2424 return EINVAL;
2425 minlun = lun;
2426 maxlun = lun + 1;
2427 }
2428
2429 for (ctarget = mintarget; ctarget < maxtarget; ctarget++) {
2430 if (ctarget == chan->chan_id)
2431 continue;
2432
2433 for (clun = minlun; clun < maxlun; clun++) {
2434 periph = scsipi_lookup_periph(chan, ctarget, clun);
2435 if (periph == NULL)
2436 continue;
2437 error = config_detach(periph->periph_dev, flags);
2438 if (error)
2439 return (error);
2440 }
2441 }
2442 return(0);
2443 }
2444
2445 /*
2446 * scsipi_adapter_addref:
2447 *
2448 * Add a reference to the adapter pointed to by the provided
2449 * link, enabling the adapter if necessary.
2450 */
2451 int
2452 scsipi_adapter_addref(struct scsipi_adapter *adapt)
2453 {
2454 int error = 0;
2455
2456 if (atomic_inc_uint_nv(&adapt->adapt_refcnt) == 1
2457 && adapt->adapt_enable != NULL) {
2458 scsipi_adapter_lock(adapt);
2459 error = scsipi_adapter_enable(adapt, 1);
2460 scsipi_adapter_unlock(adapt);
2461 if (error)
2462 atomic_dec_uint(&adapt->adapt_refcnt);
2463 }
2464 return (error);
2465 }
2466
2467 /*
2468 * scsipi_adapter_delref:
2469 *
2470 * Delete a reference to the adapter pointed to by the provided
2471 * link, disabling the adapter if possible.
2472 */
2473 void
2474 scsipi_adapter_delref(struct scsipi_adapter *adapt)
2475 {
2476
2477 if (atomic_dec_uint_nv(&adapt->adapt_refcnt) == 0
2478 && adapt->adapt_enable != NULL) {
2479 scsipi_adapter_lock(adapt);
2480 (void) scsipi_adapter_enable(adapt, 0);
2481 scsipi_adapter_unlock(adapt);
2482 }
2483 }
2484
2485 static struct scsipi_syncparam {
2486 int ss_factor;
2487 int ss_period; /* ns * 100 */
2488 } scsipi_syncparams[] = {
2489 { 0x08, 625 }, /* FAST-160 (Ultra320) */
2490 { 0x09, 1250 }, /* FAST-80 (Ultra160) */
2491 { 0x0a, 2500 }, /* FAST-40 40MHz (Ultra2) */
2492 { 0x0b, 3030 }, /* FAST-40 33MHz (Ultra2) */
2493 { 0x0c, 5000 }, /* FAST-20 (Ultra) */
2494 };
2495 static const int scsipi_nsyncparams =
2496 sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
2497
2498 int
2499 scsipi_sync_period_to_factor(int period /* ns * 100 */)
2500 {
2501 int i;
2502
2503 for (i = 0; i < scsipi_nsyncparams; i++) {
2504 if (period <= scsipi_syncparams[i].ss_period)
2505 return (scsipi_syncparams[i].ss_factor);
2506 }
2507
2508 return ((period / 100) / 4);
2509 }
2510
2511 int
2512 scsipi_sync_factor_to_period(int factor)
2513 {
2514 int i;
2515
2516 for (i = 0; i < scsipi_nsyncparams; i++) {
2517 if (factor == scsipi_syncparams[i].ss_factor)
2518 return (scsipi_syncparams[i].ss_period);
2519 }
2520
2521 return ((factor * 4) * 100);
2522 }
2523
2524 int
2525 scsipi_sync_factor_to_freq(int factor)
2526 {
2527 int i;
2528
2529 for (i = 0; i < scsipi_nsyncparams; i++) {
2530 if (factor == scsipi_syncparams[i].ss_factor)
2531 return (100000000 / scsipi_syncparams[i].ss_period);
2532 }
2533
2534 return (10000000 / ((factor * 4) * 10));
2535 }
2536
2537 static inline void
2538 scsipi_adapter_lock(struct scsipi_adapter *adapt)
2539 {
2540
2541 if ((adapt->adapt_flags & SCSIPI_ADAPT_MPSAFE) == 0)
2542 KERNEL_LOCK(1, NULL);
2543 }
2544
2545 static inline void
2546 scsipi_adapter_unlock(struct scsipi_adapter *adapt)
2547 {
2548
2549 if ((adapt->adapt_flags & SCSIPI_ADAPT_MPSAFE) == 0)
2550 KERNEL_UNLOCK_ONE(NULL);
2551 }
2552
2553 void
2554 scsipi_adapter_minphys(struct scsipi_channel *chan, struct buf *bp)
2555 {
2556 struct scsipi_adapter *adapt = chan->chan_adapter;
2557
2558 scsipi_adapter_lock(adapt);
2559 (adapt->adapt_minphys)(bp);
2560 scsipi_adapter_unlock(chan->chan_adapter);
2561 }
2562
2563 void
2564 scsipi_adapter_request(struct scsipi_channel *chan,
2565 scsipi_adapter_req_t req, void *arg)
2566
2567 {
2568 struct scsipi_adapter *adapt = chan->chan_adapter;
2569
2570 scsipi_adapter_lock(adapt);
2571 (adapt->adapt_request)(chan, req, arg);
2572 scsipi_adapter_unlock(adapt);
2573 }
2574
2575 int
2576 scsipi_adapter_ioctl(struct scsipi_channel *chan, u_long cmd,
2577 void *data, int flag, struct proc *p)
2578 {
2579 struct scsipi_adapter *adapt = chan->chan_adapter;
2580 int error;
2581
2582 if (adapt->adapt_ioctl == NULL)
2583 return ENOTTY;
2584
2585 scsipi_adapter_lock(adapt);
2586 error = (adapt->adapt_ioctl)(chan, cmd, data, flag, p);
2587 scsipi_adapter_unlock(adapt);
2588 return error;
2589 }
2590
2591 int
2592 scsipi_adapter_enable(struct scsipi_adapter *adapt, int enable)
2593 {
2594 int error;
2595
2596 scsipi_adapter_lock(adapt);
2597 error = (adapt->adapt_enable)(adapt->adapt_dev, enable);
2598 scsipi_adapter_unlock(adapt);
2599 return error;
2600 }
2601
2602 #ifdef SCSIPI_DEBUG
2603 /*
2604 * Given a scsipi_xfer, dump the request, in all its glory
2605 */
2606 void
2607 show_scsipi_xs(struct scsipi_xfer *xs)
2608 {
2609
2610 printf("xs(%p): ", xs);
2611 printf("xs_control(0x%08x)", xs->xs_control);
2612 printf("xs_status(0x%08x)", xs->xs_status);
2613 printf("periph(%p)", xs->xs_periph);
2614 printf("retr(0x%x)", xs->xs_retries);
2615 printf("timo(0x%x)", xs->timeout);
2616 printf("cmd(%p)", xs->cmd);
2617 printf("len(0x%x)", xs->cmdlen);
2618 printf("data(%p)", xs->data);
2619 printf("len(0x%x)", xs->datalen);
2620 printf("res(0x%x)", xs->resid);
2621 printf("err(0x%x)", xs->error);
2622 printf("bp(%p)", xs->bp);
2623 show_scsipi_cmd(xs);
2624 }
2625
2626 void
2627 show_scsipi_cmd(struct scsipi_xfer *xs)
2628 {
2629 u_char *b = (u_char *) xs->cmd;
2630 int i = 0;
2631
2632 scsipi_printaddr(xs->xs_periph);
2633 printf(" command: ");
2634
2635 if ((xs->xs_control & XS_CTL_RESET) == 0) {
2636 while (i < xs->cmdlen) {
2637 if (i)
2638 printf(",");
2639 printf("0x%x", b[i++]);
2640 }
2641 printf("-[%d bytes]\n", xs->datalen);
2642 if (xs->datalen)
2643 show_mem(xs->data, min(64, xs->datalen));
2644 } else
2645 printf("-RESET-\n");
2646 }
2647
2648 void
2649 show_mem(u_char *address, int num)
2650 {
2651 int x;
2652
2653 printf("------------------------------");
2654 for (x = 0; x < num; x++) {
2655 if ((x % 16) == 0)
2656 printf("\n%03d: ", x);
2657 printf("%02x ", *address++);
2658 }
2659 printf("\n------------------------------\n");
2660 }
2661 #endif /* SCSIPI_DEBUG */
2662