scsipi_base.c revision 1.191 1 /* $NetBSD: scsipi_base.c,v 1.191 2024/10/28 14:36:43 nat Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999, 2000, 2002, 2003, 2004 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: scsipi_base.c,v 1.191 2024/10/28 14:36:43 nat Exp $");
35
36 #ifdef _KERNEL_OPT
37 #include "opt_scsi.h"
38 #endif
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/buf.h>
44 #include <sys/uio.h>
45 #include <sys/malloc.h>
46 #include <sys/pool.h>
47 #include <sys/errno.h>
48 #include <sys/device.h>
49 #include <sys/proc.h>
50 #include <sys/kthread.h>
51 #include <sys/hash.h>
52 #include <sys/atomic.h>
53
54 #include <dev/scsipi/scsi_sdt.h>
55 #include <dev/scsipi/scsi_spc.h>
56 #include <dev/scsipi/scsipi_all.h>
57 #include <dev/scsipi/scsipi_disk.h>
58 #include <dev/scsipi/scsipiconf.h>
59 #include <dev/scsipi/scsipi_base.h>
60
61 #include <dev/scsipi/scsi_all.h>
62 #include <dev/scsipi/scsi_message.h>
63
64 #include <machine/param.h>
65
66 SDT_PROVIDER_DEFINE(scsi);
67
68 SDT_PROBE_DEFINE3(scsi, base, tag, get,
69 "struct scsipi_xfer *"/*xs*/, "uint8_t"/*tag*/, "uint8_t"/*type*/);
70 SDT_PROBE_DEFINE3(scsi, base, tag, put,
71 "struct scsipi_xfer *"/*xs*/, "uint8_t"/*tag*/, "uint8_t"/*type*/);
72
73 SDT_PROBE_DEFINE3(scsi, base, adapter, request__start,
74 "struct scsipi_channel *"/*chan*/,
75 "scsipi_adapter_req_t"/*req*/,
76 "void *"/*arg*/);
77 SDT_PROBE_DEFINE3(scsi, base, adapter, request__done,
78 "struct scsipi_channel *"/*chan*/,
79 "scsipi_adapter_req_t"/*req*/,
80 "void *"/*arg*/);
81
82 SDT_PROBE_DEFINE1(scsi, base, queue, batch__start,
83 "struct scsipi_channel *"/*chan*/);
84 SDT_PROBE_DEFINE2(scsi, base, queue, run,
85 "struct scsipi_channel *"/*chan*/,
86 "struct scsipi_xfer *"/*xs*/);
87 SDT_PROBE_DEFINE1(scsi, base, queue, batch__done,
88 "struct scsipi_channel *"/*chan*/);
89
90 SDT_PROBE_DEFINE1(scsi, base, xfer, execute, "struct scsipi_xfer *"/*xs*/);
91 SDT_PROBE_DEFINE1(scsi, base, xfer, enqueue, "struct scsipi_xfer *"/*xs*/);
92 SDT_PROBE_DEFINE1(scsi, base, xfer, done, "struct scsipi_xfer *"/*xs*/);
93 SDT_PROBE_DEFINE1(scsi, base, xfer, redone, "struct scsipi_xfer *"/*xs*/);
94 SDT_PROBE_DEFINE1(scsi, base, xfer, complete, "struct scsipi_xfer *"/*xs*/);
95 SDT_PROBE_DEFINE1(scsi, base, xfer, restart, "struct scsipi_xfer *"/*xs*/);
96 SDT_PROBE_DEFINE1(scsi, base, xfer, free, "struct scsipi_xfer *"/*xs*/);
97
98 static int scsipi_complete(struct scsipi_xfer *);
99 static struct scsipi_channel*
100 scsipi_done_internal(struct scsipi_xfer *, bool);
101 static void scsipi_request_sense(struct scsipi_xfer *);
102 static int scsipi_enqueue(struct scsipi_xfer *);
103 static void scsipi_run_queue(struct scsipi_channel *chan);
104
105 static void scsipi_completion_thread(void *);
106
107 static void scsipi_get_tag(struct scsipi_xfer *);
108 static void scsipi_put_tag(struct scsipi_xfer *);
109
110 static int scsipi_get_resource(struct scsipi_channel *);
111 static void scsipi_put_resource(struct scsipi_channel *);
112
113 static void scsipi_async_event_max_openings(struct scsipi_channel *,
114 struct scsipi_max_openings *);
115 static void scsipi_async_event_channel_reset(struct scsipi_channel *);
116
117 static void scsipi_channel_freeze_locked(struct scsipi_channel *, int);
118
119 static void scsipi_adapter_lock(struct scsipi_adapter *adapt);
120 static void scsipi_adapter_unlock(struct scsipi_adapter *adapt);
121
122 static void scsipi_update_timeouts(struct scsipi_xfer *xs);
123
124 static struct pool scsipi_xfer_pool;
125
126 int scsipi_xs_count = 0;
127
128 /*
129 * scsipi_init:
130 *
131 * Called when a scsibus or atapibus is attached to the system
132 * to initialize shared data structures.
133 */
134 void
135 scsipi_init(void)
136 {
137 static int scsipi_init_done;
138
139 if (scsipi_init_done)
140 return;
141 scsipi_init_done = 1;
142
143 /* Initialize the scsipi_xfer pool. */
144 pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
145 0, 0, "scxspl", NULL, IPL_BIO);
146 pool_prime(&scsipi_xfer_pool, 1);
147
148 scsipi_ioctl_init();
149 }
150
151 /*
152 * scsipi_channel_init:
153 *
154 * Initialize a scsipi_channel when it is attached.
155 */
156 int
157 scsipi_channel_init(struct scsipi_channel *chan)
158 {
159 struct scsipi_adapter *adapt = chan->chan_adapter;
160 int i;
161
162 /* Initialize shared data. */
163 scsipi_init();
164
165 /* Initialize the queues. */
166 TAILQ_INIT(&chan->chan_queue);
167 TAILQ_INIT(&chan->chan_complete);
168
169 for (i = 0; i < SCSIPI_CHAN_PERIPH_BUCKETS; i++)
170 LIST_INIT(&chan->chan_periphtab[i]);
171
172 /*
173 * Create the asynchronous completion thread.
174 */
175 if (kthread_create(PRI_NONE, 0, NULL, scsipi_completion_thread, chan,
176 &chan->chan_thread, "%s", chan->chan_name)) {
177 aprint_error_dev(adapt->adapt_dev, "unable to create completion thread for "
178 "channel %d\n", chan->chan_channel);
179 panic("scsipi_channel_init");
180 }
181
182 return 0;
183 }
184
185 /*
186 * scsipi_channel_shutdown:
187 *
188 * Shutdown a scsipi_channel.
189 */
190 void
191 scsipi_channel_shutdown(struct scsipi_channel *chan)
192 {
193
194 mutex_enter(chan_mtx(chan));
195 /*
196 * Shut down the completion thread.
197 */
198 chan->chan_tflags |= SCSIPI_CHANT_SHUTDOWN;
199 cv_broadcast(chan_cv_complete(chan));
200
201 /*
202 * Now wait for the thread to exit.
203 */
204 while (chan->chan_thread != NULL)
205 cv_wait(chan_cv_thread(chan), chan_mtx(chan));
206 mutex_exit(chan_mtx(chan));
207 }
208
209 static uint32_t
210 scsipi_chan_periph_hash(uint64_t t, uint64_t l)
211 {
212 uint32_t hash;
213
214 hash = hash32_buf(&t, sizeof(t), HASH32_BUF_INIT);
215 hash = hash32_buf(&l, sizeof(l), hash);
216
217 return hash & SCSIPI_CHAN_PERIPH_HASHMASK;
218 }
219
220 /*
221 * scsipi_insert_periph:
222 *
223 * Insert a periph into the channel.
224 */
225 void
226 scsipi_insert_periph(struct scsipi_channel *chan, struct scsipi_periph *periph)
227 {
228 uint32_t hash;
229
230 hash = scsipi_chan_periph_hash(periph->periph_target,
231 periph->periph_lun);
232
233 mutex_enter(chan_mtx(chan));
234 LIST_INSERT_HEAD(&chan->chan_periphtab[hash], periph, periph_hash);
235 mutex_exit(chan_mtx(chan));
236 }
237
238 /*
239 * scsipi_remove_periph:
240 *
241 * Remove a periph from the channel.
242 */
243 void
244 scsipi_remove_periph(struct scsipi_channel *chan,
245 struct scsipi_periph *periph)
246 {
247
248 LIST_REMOVE(periph, periph_hash);
249 }
250
251 /*
252 * scsipi_lookup_periph:
253 *
254 * Lookup a periph on the specified channel.
255 */
256 static struct scsipi_periph *
257 scsipi_lookup_periph_internal(struct scsipi_channel *chan, int target, int lun, bool lock)
258 {
259 struct scsipi_periph *periph;
260 uint32_t hash;
261
262 if (target >= chan->chan_ntargets ||
263 lun >= chan->chan_nluns)
264 return NULL;
265
266 hash = scsipi_chan_periph_hash(target, lun);
267
268 if (lock)
269 mutex_enter(chan_mtx(chan));
270 LIST_FOREACH(periph, &chan->chan_periphtab[hash], periph_hash) {
271 if (periph->periph_target == target &&
272 periph->periph_lun == lun)
273 break;
274 }
275 if (lock)
276 mutex_exit(chan_mtx(chan));
277
278 return periph;
279 }
280
281 struct scsipi_periph *
282 scsipi_lookup_periph_locked(struct scsipi_channel *chan, int target, int lun)
283 {
284 return scsipi_lookup_periph_internal(chan, target, lun, false);
285 }
286
287 struct scsipi_periph *
288 scsipi_lookup_periph(struct scsipi_channel *chan, int target, int lun)
289 {
290 return scsipi_lookup_periph_internal(chan, target, lun, true);
291 }
292
293 /*
294 * scsipi_get_resource:
295 *
296 * Allocate a single xfer `resource' from the channel.
297 *
298 * NOTE: Must be called with channel lock held
299 */
300 static int
301 scsipi_get_resource(struct scsipi_channel *chan)
302 {
303 struct scsipi_adapter *adapt = chan->chan_adapter;
304
305 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
306 if (chan->chan_openings > 0) {
307 chan->chan_openings--;
308 return 1;
309 }
310 return 0;
311 }
312
313 if (adapt->adapt_openings > 0) {
314 adapt->adapt_openings--;
315 return 1;
316 }
317 return 0;
318 }
319
320 /*
321 * scsipi_grow_resources:
322 *
323 * Attempt to grow resources for a channel. If this succeeds,
324 * we allocate one for our caller.
325 *
326 * NOTE: Must be called with channel lock held
327 */
328 static inline int
329 scsipi_grow_resources(struct scsipi_channel *chan)
330 {
331
332 if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
333 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
334 mutex_exit(chan_mtx(chan));
335 scsipi_adapter_request(chan,
336 ADAPTER_REQ_GROW_RESOURCES, NULL);
337 mutex_enter(chan_mtx(chan));
338 return scsipi_get_resource(chan);
339 }
340 /*
341 * ask the channel thread to do it. It'll have to thaw the
342 * queue
343 */
344 scsipi_channel_freeze_locked(chan, 1);
345 chan->chan_tflags |= SCSIPI_CHANT_GROWRES;
346 cv_broadcast(chan_cv_complete(chan));
347 return 0;
348 }
349
350 return 0;
351 }
352
353 /*
354 * scsipi_put_resource:
355 *
356 * Free a single xfer `resource' to the channel.
357 *
358 * NOTE: Must be called with channel lock held
359 */
360 static void
361 scsipi_put_resource(struct scsipi_channel *chan)
362 {
363 struct scsipi_adapter *adapt = chan->chan_adapter;
364
365 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
366 chan->chan_openings++;
367 else
368 adapt->adapt_openings++;
369 }
370
371 /*
372 * scsipi_get_tag:
373 *
374 * Get a tag ID for the specified xfer.
375 *
376 * NOTE: Must be called with channel lock held
377 */
378 static void
379 scsipi_get_tag(struct scsipi_xfer *xs)
380 {
381 struct scsipi_periph *periph = xs->xs_periph;
382 int bit, tag;
383 u_int word;
384
385 KASSERT(mutex_owned(chan_mtx(periph->periph_channel)));
386
387 bit = 0; /* XXX gcc */
388 for (word = 0; word < PERIPH_NTAGWORDS; word++) {
389 bit = ffs(periph->periph_freetags[word]);
390 if (bit != 0)
391 break;
392 }
393 #ifdef DIAGNOSTIC
394 if (word == PERIPH_NTAGWORDS) {
395 scsipi_printaddr(periph);
396 printf("no free tags\n");
397 panic("scsipi_get_tag");
398 }
399 #endif
400
401 bit -= 1;
402 periph->periph_freetags[word] &= ~(1U << bit);
403 tag = (word << 5) | bit;
404
405 /* XXX Should eventually disallow this completely. */
406 if (tag >= periph->periph_openings) {
407 scsipi_printaddr(periph);
408 printf("WARNING: tag %d greater than available openings %d\n",
409 tag, periph->periph_openings);
410 }
411
412 xs->xs_tag_id = tag;
413 SDT_PROBE3(scsi, base, tag, get,
414 xs, xs->xs_tag_id, xs->xs_tag_type);
415 }
416
417 /*
418 * scsipi_put_tag:
419 *
420 * Put the tag ID for the specified xfer back into the pool.
421 *
422 * NOTE: Must be called with channel lock held
423 */
424 static void
425 scsipi_put_tag(struct scsipi_xfer *xs)
426 {
427 struct scsipi_periph *periph = xs->xs_periph;
428 int word, bit;
429
430 KASSERT(mutex_owned(chan_mtx(periph->periph_channel)));
431
432 SDT_PROBE3(scsi, base, tag, put,
433 xs, xs->xs_tag_id, xs->xs_tag_type);
434
435 word = xs->xs_tag_id >> 5;
436 bit = xs->xs_tag_id & 0x1f;
437
438 periph->periph_freetags[word] |= (1U << bit);
439 }
440
441 /*
442 * scsipi_get_xs:
443 *
444 * Allocate an xfer descriptor and associate it with the
445 * specified peripheral. If the peripheral has no more
446 * available command openings, we either block waiting for
447 * one to become available, or fail.
448 *
449 * When this routine is called with the channel lock held
450 * the flags must include XS_CTL_NOSLEEP.
451 */
452 struct scsipi_xfer *
453 scsipi_get_xs(struct scsipi_periph *periph, int flags)
454 {
455 struct scsipi_xfer *xs;
456 bool lock = (flags & XS_CTL_NOSLEEP) == 0;
457
458 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n"));
459
460 KASSERT(!cold);
461
462 #ifdef DIAGNOSTIC
463 /*
464 * URGENT commands can never be ASYNC.
465 */
466 if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
467 (XS_CTL_URGENT|XS_CTL_ASYNC)) {
468 scsipi_printaddr(periph);
469 printf("URGENT and ASYNC\n");
470 panic("scsipi_get_xs");
471 }
472 #endif
473
474 /*
475 * Wait for a command opening to become available. Rules:
476 *
477 * - All xfers must wait for an available opening.
478 * Exception: URGENT xfers can proceed when
479 * active == openings, because we use the opening
480 * of the command we're recovering for.
481 * - if the periph has sense pending, only URGENT & REQSENSE
482 * xfers may proceed.
483 *
484 * - If the periph is recovering, only URGENT xfers may
485 * proceed.
486 *
487 * - If the periph is currently executing a recovery
488 * command, URGENT commands must block, because only
489 * one recovery command can execute at a time.
490 */
491 if (lock)
492 mutex_enter(chan_mtx(periph->periph_channel));
493 for (;;) {
494 if (flags & XS_CTL_URGENT) {
495 if (periph->periph_active > periph->periph_openings)
496 goto wait_for_opening;
497 if (periph->periph_flags & PERIPH_SENSE) {
498 if ((flags & XS_CTL_REQSENSE) == 0)
499 goto wait_for_opening;
500 } else {
501 if ((periph->periph_flags &
502 PERIPH_RECOVERY_ACTIVE) != 0)
503 goto wait_for_opening;
504 periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
505 }
506 break;
507 }
508 if (periph->periph_active >= periph->periph_openings ||
509 (periph->periph_flags & PERIPH_RECOVERING) != 0)
510 goto wait_for_opening;
511 periph->periph_active++;
512 KASSERT(mutex_owned(chan_mtx(periph->periph_channel)));
513 break;
514
515 wait_for_opening:
516 if (flags & XS_CTL_NOSLEEP) {
517 KASSERT(!lock);
518 return NULL;
519 }
520 KASSERT(lock);
521 SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n"));
522 periph->periph_flags |= PERIPH_WAITING;
523 cv_wait(periph_cv_periph(periph),
524 chan_mtx(periph->periph_channel));
525 }
526 if (lock)
527 mutex_exit(chan_mtx(periph->periph_channel));
528
529 SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n"));
530 xs = pool_get(&scsipi_xfer_pool,
531 ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
532 if (xs == NULL) {
533 if (lock)
534 mutex_enter(chan_mtx(periph->periph_channel));
535 if (flags & XS_CTL_URGENT) {
536 if ((flags & XS_CTL_REQSENSE) == 0)
537 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
538 } else
539 periph->periph_active--;
540 if (lock)
541 mutex_exit(chan_mtx(periph->periph_channel));
542 scsipi_printaddr(periph);
543 printf("unable to allocate %sscsipi_xfer\n",
544 (flags & XS_CTL_URGENT) ? "URGENT " : "");
545 }
546
547 SC_DEBUG(periph, SCSIPI_DB3, ("returning\n"));
548
549 if (xs != NULL) {
550 memset(xs, 0, sizeof(*xs));
551 callout_init(&xs->xs_callout, 0);
552 xs->xs_periph = periph;
553 xs->xs_control = flags;
554 xs->xs_status = 0;
555 if ((flags & XS_CTL_NOSLEEP) == 0)
556 mutex_enter(chan_mtx(periph->periph_channel));
557 TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
558 KASSERT(mutex_owned(chan_mtx(periph->periph_channel)));
559 if ((flags & XS_CTL_NOSLEEP) == 0)
560 mutex_exit(chan_mtx(periph->periph_channel));
561 }
562 return xs;
563 }
564
565 /*
566 * scsipi_put_xs:
567 *
568 * Release an xfer descriptor, decreasing the outstanding command
569 * count for the peripheral. If there is a thread waiting for
570 * an opening, wake it up. If not, kick any queued I/O the
571 * peripheral may have.
572 *
573 * NOTE: Must be called with channel lock held
574 */
575 void
576 scsipi_put_xs(struct scsipi_xfer *xs)
577 {
578 struct scsipi_periph *periph = xs->xs_periph;
579 int flags = xs->xs_control;
580
581 SDT_PROBE1(scsi, base, xfer, free, xs);
582 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n"));
583 KASSERT(mutex_owned(chan_mtx(periph->periph_channel)));
584
585 TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
586 callout_destroy(&xs->xs_callout);
587 pool_put(&scsipi_xfer_pool, xs);
588
589 #ifdef DIAGNOSTIC
590 if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
591 periph->periph_active == 0) {
592 scsipi_printaddr(periph);
593 printf("recovery without a command to recovery for\n");
594 panic("scsipi_put_xs");
595 }
596 #endif
597
598 if (flags & XS_CTL_URGENT) {
599 if ((flags & XS_CTL_REQSENSE) == 0)
600 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
601 } else
602 periph->periph_active--;
603 if (periph->periph_active == 0 &&
604 (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
605 periph->periph_flags &= ~PERIPH_WAITDRAIN;
606 cv_broadcast(periph_cv_active(periph));
607 }
608
609 if (periph->periph_flags & PERIPH_WAITING) {
610 periph->periph_flags &= ~PERIPH_WAITING;
611 cv_broadcast(periph_cv_periph(periph));
612 } else {
613 if (periph->periph_switch->psw_start != NULL &&
614 device_is_active(periph->periph_dev)) {
615 SC_DEBUG(periph, SCSIPI_DB2,
616 ("calling private start()\n"));
617 (*periph->periph_switch->psw_start)(periph);
618 }
619 }
620 }
621
622 /*
623 * scsipi_channel_freeze:
624 *
625 * Freeze a channel's xfer queue.
626 */
627 void
628 scsipi_channel_freeze(struct scsipi_channel *chan, int count)
629 {
630 bool lock = chan_running(chan) > 0;
631
632 if (lock)
633 mutex_enter(chan_mtx(chan));
634 chan->chan_qfreeze += count;
635 if (lock)
636 mutex_exit(chan_mtx(chan));
637 }
638
639 static void
640 scsipi_channel_freeze_locked(struct scsipi_channel *chan, int count)
641 {
642
643 chan->chan_qfreeze += count;
644 }
645
646 /*
647 * scsipi_channel_thaw:
648 *
649 * Thaw a channel's xfer queue.
650 */
651 void
652 scsipi_channel_thaw(struct scsipi_channel *chan, int count)
653 {
654 bool lock = chan_running(chan) > 0;
655
656 if (lock)
657 mutex_enter(chan_mtx(chan));
658 chan->chan_qfreeze -= count;
659 /*
660 * Don't let the freeze count go negative.
661 *
662 * Presumably the adapter driver could keep track of this,
663 * but it might just be easier to do this here so as to allow
664 * multiple callers, including those outside the adapter driver.
665 */
666 if (chan->chan_qfreeze < 0) {
667 chan->chan_qfreeze = 0;
668 }
669 if (lock)
670 mutex_exit(chan_mtx(chan));
671
672 /*
673 * until the channel is running
674 */
675 if (!lock)
676 return;
677
678 /*
679 * Kick the channel's queue here. Note, we may be running in
680 * interrupt context (softclock or HBA's interrupt), so the adapter
681 * driver had better not sleep.
682 */
683 if (chan->chan_qfreeze == 0)
684 scsipi_run_queue(chan);
685 }
686
687 /*
688 * scsipi_channel_timed_thaw:
689 *
690 * Thaw a channel after some time has expired. This will also
691 * run the channel's queue if the freeze count has reached 0.
692 */
693 void
694 scsipi_channel_timed_thaw(void *arg)
695 {
696 struct scsipi_channel *chan = arg;
697
698 scsipi_channel_thaw(chan, 1);
699 }
700
701 /*
702 * scsipi_periph_freeze:
703 *
704 * Freeze a device's xfer queue.
705 */
706 void
707 scsipi_periph_freeze_locked(struct scsipi_periph *periph, int count)
708 {
709
710 periph->periph_qfreeze += count;
711 }
712
713 /*
714 * scsipi_periph_thaw:
715 *
716 * Thaw a device's xfer queue.
717 */
718 void
719 scsipi_periph_thaw_locked(struct scsipi_periph *periph, int count)
720 {
721
722 periph->periph_qfreeze -= count;
723 #ifdef DIAGNOSTIC
724 if (periph->periph_qfreeze < 0) {
725 static const char pc[] = "periph freeze count < 0";
726 scsipi_printaddr(periph);
727 printf("%s\n", pc);
728 panic(pc);
729 }
730 #endif
731 if (periph->periph_qfreeze == 0 &&
732 (periph->periph_flags & PERIPH_WAITING) != 0)
733 cv_broadcast(periph_cv_periph(periph));
734 }
735
736 void
737 scsipi_periph_freeze(struct scsipi_periph *periph, int count)
738 {
739
740 mutex_enter(chan_mtx(periph->periph_channel));
741 scsipi_periph_freeze_locked(periph, count);
742 mutex_exit(chan_mtx(periph->periph_channel));
743 }
744
745 void
746 scsipi_periph_thaw(struct scsipi_periph *periph, int count)
747 {
748
749 mutex_enter(chan_mtx(periph->periph_channel));
750 scsipi_periph_thaw_locked(periph, count);
751 mutex_exit(chan_mtx(periph->periph_channel));
752 }
753
754 /*
755 * scsipi_periph_timed_thaw:
756 *
757 * Thaw a device after some time has expired.
758 */
759 void
760 scsipi_periph_timed_thaw(void *arg)
761 {
762 struct scsipi_periph *periph = arg;
763 struct scsipi_channel *chan = periph->periph_channel;
764
765 callout_stop(&periph->periph_callout);
766
767 mutex_enter(chan_mtx(chan));
768 scsipi_periph_thaw_locked(periph, 1);
769 if ((periph->periph_channel->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
770 /*
771 * Kick the channel's queue here. Note, we're running in
772 * interrupt context (softclock), so the adapter driver
773 * had better not sleep.
774 */
775 mutex_exit(chan_mtx(chan));
776 scsipi_run_queue(periph->periph_channel);
777 } else {
778 /*
779 * Tell the completion thread to kick the channel's queue here.
780 */
781 periph->periph_channel->chan_tflags |= SCSIPI_CHANT_KICK;
782 cv_broadcast(chan_cv_complete(chan));
783 mutex_exit(chan_mtx(chan));
784 }
785 }
786
787 /*
788 * scsipi_wait_drain:
789 *
790 * Wait for a periph's pending xfers to drain.
791 */
792 void
793 scsipi_wait_drain(struct scsipi_periph *periph)
794 {
795 struct scsipi_channel *chan = periph->periph_channel;
796
797 mutex_enter(chan_mtx(chan));
798 while (periph->periph_active != 0) {
799 periph->periph_flags |= PERIPH_WAITDRAIN;
800 cv_wait(periph_cv_active(periph), chan_mtx(chan));
801 }
802 mutex_exit(chan_mtx(chan));
803 }
804
805 /*
806 * scsipi_kill_pending:
807 *
808 * Kill off all pending xfers for a periph.
809 *
810 * NOTE: Must be called with channel lock held
811 */
812 void
813 scsipi_kill_pending(struct scsipi_periph *periph)
814 {
815 struct scsipi_channel *chan = periph->periph_channel;
816
817 (*chan->chan_bustype->bustype_kill_pending)(periph);
818 while (periph->periph_active != 0) {
819 periph->periph_flags |= PERIPH_WAITDRAIN;
820 cv_wait(periph_cv_active(periph), chan_mtx(chan));
821 }
822 }
823
824 /*
825 * scsipi_print_cdb:
826 * prints a command descriptor block (for debug purpose, error messages,
827 * SCSIVERBOSE, ...)
828 */
829 void
830 scsipi_print_cdb(struct scsipi_generic *cmd)
831 {
832 int i, j;
833
834 printf("0x%02x", cmd->opcode);
835
836 switch (CDB_GROUPID(cmd->opcode)) {
837 case CDB_GROUPID_0:
838 j = CDB_GROUP0;
839 break;
840 case CDB_GROUPID_1:
841 j = CDB_GROUP1;
842 break;
843 case CDB_GROUPID_2:
844 j = CDB_GROUP2;
845 break;
846 case CDB_GROUPID_3:
847 j = CDB_GROUP3;
848 break;
849 case CDB_GROUPID_4:
850 j = CDB_GROUP4;
851 break;
852 case CDB_GROUPID_5:
853 j = CDB_GROUP5;
854 break;
855 case CDB_GROUPID_6:
856 j = CDB_GROUP6;
857 break;
858 case CDB_GROUPID_7:
859 j = CDB_GROUP7;
860 break;
861 default:
862 j = 0;
863 }
864 if (j == 0)
865 j = sizeof (cmd->bytes);
866 for (i = 0; i < j-1; i++) /* already done the opcode */
867 printf(" %02x", cmd->bytes[i]);
868 }
869
870 /*
871 * scsipi_interpret_sense:
872 *
873 * Look at the returned sense and act on the error, determining
874 * the unix error number to pass back. (0 = report no error)
875 *
876 * NOTE: If we return ERESTART, we are expected to have
877 * thawed the device!
878 *
879 * THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
880 */
881 int
882 scsipi_interpret_sense(struct scsipi_xfer *xs)
883 {
884 struct scsi_sense_data *sense;
885 struct scsipi_periph *periph = xs->xs_periph;
886 u_int8_t key;
887 int error;
888 u_int32_t info;
889 static const char *error_mes[] = {
890 "soft error (corrected)",
891 "not ready", "medium error",
892 "non-media hardware failure", "illegal request",
893 "unit attention", "readonly device",
894 "no data found", "vendor unique",
895 "copy aborted", "command aborted",
896 "search returned equal", "volume overflow",
897 "verify miscompare", "unknown error key"
898 };
899
900 sense = &xs->sense.scsi_sense;
901 #ifdef SCSIPI_DEBUG
902 if (periph->periph_flags & SCSIPI_DB1) {
903 int count, len;
904 scsipi_printaddr(periph);
905 printf(" sense debug information:\n");
906 printf("\tcode 0x%x valid %d\n",
907 SSD_RCODE(sense->response_code),
908 sense->response_code & SSD_RCODE_VALID ? 1 : 0);
909 printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
910 sense->segment,
911 SSD_SENSE_KEY(sense->flags),
912 sense->flags & SSD_ILI ? 1 : 0,
913 sense->flags & SSD_EOM ? 1 : 0,
914 sense->flags & SSD_FILEMARK ? 1 : 0);
915 printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d "
916 "extra bytes\n",
917 sense->info[0],
918 sense->info[1],
919 sense->info[2],
920 sense->info[3],
921 sense->extra_len);
922 len = SSD_ADD_BYTES_LIM(sense);
923 printf("\textra (up to %d bytes): ", len);
924 for (count = 0; count < len; count++)
925 printf("0x%x ", sense->csi[count]);
926 printf("\n");
927 }
928 #endif
929
930 /*
931 * If the periph has its own error handler, call it first.
932 * If it returns a legit error value, return that, otherwise
933 * it wants us to continue with normal error processing.
934 */
935 if (periph->periph_switch->psw_error != NULL) {
936 SC_DEBUG(periph, SCSIPI_DB2,
937 ("calling private err_handler()\n"));
938 error = (*periph->periph_switch->psw_error)(xs);
939 if (error != EJUSTRETURN)
940 return error;
941 }
942 /* otherwise use the default */
943 switch (SSD_RCODE(sense->response_code)) {
944
945 /*
946 * Old SCSI-1 and SASI devices respond with
947 * codes other than 70.
948 */
949 case 0x00: /* no error (command completed OK) */
950 return 0;
951 case 0x04: /* drive not ready after it was selected */
952 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
953 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
954 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
955 return 0;
956 /* XXX - display some sort of error here? */
957 return EIO;
958 case 0x20: /* invalid command */
959 if ((xs->xs_control &
960 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
961 return 0;
962 return EINVAL;
963 case 0x25: /* invalid LUN (Adaptec ACB-4000) */
964 return EACCES;
965
966 /*
967 * If it's code 70, use the extended stuff and
968 * interpret the key
969 */
970 case 0x71: /* delayed error */
971 scsipi_printaddr(periph);
972 key = SSD_SENSE_KEY(sense->flags);
973 printf(" DEFERRED ERROR, key = 0x%x\n", key);
974 /* FALLTHROUGH */
975 case 0x70:
976 if ((sense->response_code & SSD_RCODE_VALID) != 0)
977 info = _4btol(sense->info);
978 else
979 info = 0;
980 key = SSD_SENSE_KEY(sense->flags);
981
982 switch (key) {
983 case SKEY_NO_SENSE:
984 case SKEY_RECOVERED_ERROR:
985 if (xs->resid == xs->datalen && xs->datalen) {
986 /*
987 * Why is this here?
988 */
989 xs->resid = 0; /* not short read */
990 }
991 error = 0;
992 break;
993 case SKEY_EQUAL:
994 error = 0;
995 break;
996 case SKEY_NOT_READY:
997 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
998 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
999 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
1000 return 0;
1001 if (sense->asc == 0x3A) {
1002 error = ENODEV; /* Medium not present */
1003 if (xs->xs_control & XS_CTL_SILENT_NODEV)
1004 return error;
1005 } else
1006 error = EIO;
1007 if ((xs->xs_control & XS_CTL_SILENT) != 0)
1008 return error;
1009 break;
1010 case SKEY_ILLEGAL_REQUEST:
1011 if ((xs->xs_control &
1012 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
1013 return 0;
1014 /*
1015 * Handle the case where a device reports
1016 * Logical Unit Not Supported during discovery.
1017 */
1018 if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
1019 sense->asc == 0x25 &&
1020 sense->ascq == 0x00)
1021 return EINVAL;
1022 if ((xs->xs_control & XS_CTL_SILENT) != 0)
1023 return EIO;
1024 error = EINVAL;
1025 break;
1026 case SKEY_UNIT_ATTENTION:
1027 if (sense->asc == 0x29 &&
1028 sense->ascq == 0x00) {
1029 /* device or bus reset */
1030 return ERESTART;
1031 }
1032 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
1033 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
1034 if ((xs->xs_control &
1035 XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
1036 /* XXX Should reupload any transient state. */
1037 (periph->periph_flags &
1038 PERIPH_REMOVABLE) == 0) {
1039 return ERESTART;
1040 }
1041 if ((xs->xs_control & XS_CTL_SILENT) != 0)
1042 return EIO;
1043 error = EIO;
1044 break;
1045 case SKEY_DATA_PROTECT:
1046 error = EROFS;
1047 break;
1048 case SKEY_BLANK_CHECK:
1049 error = 0;
1050 break;
1051 case SKEY_ABORTED_COMMAND:
1052 if (xs->xs_retries != 0) {
1053 xs->xs_retries--;
1054 error = ERESTART;
1055 } else
1056 error = EIO;
1057 break;
1058 case SKEY_VOLUME_OVERFLOW:
1059 error = ENOSPC;
1060 break;
1061 default:
1062 error = EIO;
1063 break;
1064 }
1065
1066 /* Print verbose decode if appropriate and possible */
1067 if ((key == 0) ||
1068 ((xs->xs_control & XS_CTL_SILENT) != 0) ||
1069 (scsipi_print_sense(xs, 0) != 0))
1070 return error;
1071
1072 /* Print brief(er) sense information */
1073 scsipi_printaddr(periph);
1074 printf("%s", error_mes[key - 1]);
1075 if ((sense->response_code & SSD_RCODE_VALID) != 0) {
1076 switch (key) {
1077 case SKEY_NOT_READY:
1078 case SKEY_ILLEGAL_REQUEST:
1079 case SKEY_UNIT_ATTENTION:
1080 case SKEY_DATA_PROTECT:
1081 break;
1082 case SKEY_BLANK_CHECK:
1083 printf(", requested size: %d (decimal)",
1084 info);
1085 break;
1086 case SKEY_ABORTED_COMMAND:
1087 if (xs->xs_retries)
1088 printf(", retrying");
1089 printf(", cmd 0x%x, info 0x%x",
1090 xs->cmd->opcode, info);
1091 break;
1092 default:
1093 printf(", info = %d (decimal)", info);
1094 }
1095 }
1096 if (sense->extra_len != 0) {
1097 int n;
1098 printf(", data =");
1099 for (n = 0; n < sense->extra_len; n++)
1100 printf(" %02x",
1101 sense->csi[n]);
1102 }
1103 printf("\n");
1104 return error;
1105
1106 /*
1107 * Some other code, just report it
1108 */
1109 default:
1110 #if defined(SCSIDEBUG) || defined(DEBUG)
1111 {
1112 static const char *uc = "undecodable sense error";
1113 int i;
1114 u_int8_t *cptr = (u_int8_t *) sense;
1115 scsipi_printaddr(periph);
1116 if (xs->cmd == &xs->cmdstore) {
1117 printf("%s for opcode 0x%x, data=",
1118 uc, xs->cmdstore.opcode);
1119 } else {
1120 printf("%s, data=", uc);
1121 }
1122 for (i = 0; i < sizeof (sense); i++)
1123 printf(" 0x%02x", *(cptr++) & 0xff);
1124 printf("\n");
1125 }
1126 #else
1127 scsipi_printaddr(periph);
1128 printf("Sense Error Code 0x%x",
1129 SSD_RCODE(sense->response_code));
1130 if ((sense->response_code & SSD_RCODE_VALID) != 0) {
1131 struct scsi_sense_data_unextended *usense =
1132 (struct scsi_sense_data_unextended *)sense;
1133 printf(" at block no. %d (decimal)",
1134 _3btol(usense->block));
1135 }
1136 printf("\n");
1137 #endif
1138 return EIO;
1139 }
1140 }
1141
1142 /*
1143 * scsipi_test_unit_ready:
1144 *
1145 * Issue a `test unit ready' request.
1146 */
1147 int
1148 scsipi_test_unit_ready(struct scsipi_periph *periph, int flags)
1149 {
1150 struct scsi_test_unit_ready cmd;
1151 int retries;
1152
1153 /* some ATAPI drives don't support TEST UNIT READY. Sigh */
1154 if (periph->periph_quirks & PQUIRK_NOTUR)
1155 return 0;
1156
1157 if (flags & XS_CTL_DISCOVERY)
1158 retries = 0;
1159 else
1160 retries = SCSIPIRETRIES;
1161
1162 memset(&cmd, 0, sizeof(cmd));
1163 cmd.opcode = SCSI_TEST_UNIT_READY;
1164
1165 return scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
1166 retries, 10000, NULL, flags);
1167 }
1168
1169 static const struct scsipi_inquiry3_pattern {
1170 const char vendor[8];
1171 const char product[16];
1172 const char revision[4];
1173 } scsipi_inquiry3_quirk[] = {
1174 { "ES-6600 ", "", "" },
1175 };
1176
1177 static int
1178 scsipi_inquiry3_ok(const struct scsipi_inquiry_data *ib)
1179 {
1180 for (size_t i = 0; i < __arraycount(scsipi_inquiry3_quirk); i++) {
1181 const struct scsipi_inquiry3_pattern *q =
1182 &scsipi_inquiry3_quirk[i];
1183 #define MATCH(field) \
1184 (q->field[0] ? memcmp(ib->field, q->field, sizeof(ib->field)) == 0 : 1)
1185 if (MATCH(vendor) && MATCH(product) && MATCH(revision))
1186 return 0;
1187 }
1188 return 1;
1189 }
1190
1191 /*
1192 * scsipi_inquire:
1193 *
1194 * Ask the device about itself.
1195 */
1196 int
1197 scsipi_inquire(struct scsipi_periph *periph, struct scsipi_inquiry_data *inqbuf,
1198 int flags)
1199 {
1200 struct scsipi_inquiry cmd;
1201 int error;
1202 int retries;
1203
1204 if (flags & XS_CTL_DISCOVERY)
1205 retries = 0;
1206 else
1207 retries = SCSIPIRETRIES;
1208
1209 /*
1210 * If we request more data than the device can provide, it SHOULD just
1211 * return a short response. However, some devices error with an
1212 * ILLEGAL REQUEST sense code, and yet others have even more special
1213 * failure modes (such as the GL641USB flash adapter, which goes loony
1214 * and sends corrupted CRCs). To work around this, and to bring our
1215 * behavior more in line with other OSes, we do a shorter inquiry,
1216 * covering all the SCSI-2 information, first, and then request more
1217 * data iff the "additional length" field indicates there is more.
1218 * - mycroft, 2003/10/16
1219 */
1220 memset(&cmd, 0, sizeof(cmd));
1221 cmd.opcode = INQUIRY;
1222 cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI2;
1223 error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1224 (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI2, retries,
1225 10000, NULL, flags | XS_CTL_DATA_IN);
1226 if (!error &&
1227 inqbuf->additional_length > SCSIPI_INQUIRY_LENGTH_SCSI2 - 4) {
1228 if (scsipi_inquiry3_ok(inqbuf)) {
1229 #if 0
1230 printf("inquire: addlen=%d, retrying\n", inqbuf->additional_length);
1231 #endif
1232 cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI3;
1233 error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1234 (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI3, retries,
1235 10000, NULL, flags | XS_CTL_DATA_IN);
1236 #if 0
1237 printf("inquire: error=%d\n", error);
1238 #endif
1239 }
1240 }
1241
1242 #ifdef SCSI_OLD_NOINQUIRY
1243 /*
1244 * Kludge for the Adaptec ACB-4000 SCSI->MFM translator.
1245 * This board doesn't support the INQUIRY command at all.
1246 */
1247 if (error == EINVAL || error == EACCES) {
1248 /*
1249 * Conjure up an INQUIRY response.
1250 */
1251 inqbuf->device = (error == EINVAL ?
1252 SID_QUAL_LU_PRESENT :
1253 SID_QUAL_LU_NOTPRESENT) | T_DIRECT;
1254 inqbuf->dev_qual2 = 0;
1255 inqbuf->version = 0;
1256 inqbuf->response_format = SID_FORMAT_SCSI1;
1257 inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
1258 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1259 memcpy(inqbuf->vendor, "ADAPTEC ACB-4000 ", 28);
1260 error = 0;
1261 }
1262
1263 /*
1264 * Kludge for the Emulex MT-02 SCSI->QIC translator.
1265 * This board gives an empty response to an INQUIRY command.
1266 */
1267 else if (error == 0 &&
1268 inqbuf->device == (SID_QUAL_LU_PRESENT | T_DIRECT) &&
1269 inqbuf->dev_qual2 == 0 &&
1270 inqbuf->version == 0 &&
1271 inqbuf->response_format == SID_FORMAT_SCSI1) {
1272 /*
1273 * Fill out the INQUIRY response.
1274 */
1275 inqbuf->device = (SID_QUAL_LU_PRESENT | T_SEQUENTIAL);
1276 inqbuf->dev_qual2 = SID_REMOVABLE;
1277 inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
1278 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1279 memcpy(inqbuf->vendor, "EMULEX MT-02 QIC ", 28);
1280 }
1281 #endif /* SCSI_OLD_NOINQUIRY */
1282
1283 return error;
1284 }
1285
1286 /*
1287 * scsipi_prevent:
1288 *
1289 * Prevent or allow the user to remove the media
1290 */
1291 int
1292 scsipi_prevent(struct scsipi_periph *periph, int type, int flags)
1293 {
1294 struct scsi_prevent_allow_medium_removal cmd;
1295
1296 if (periph->periph_quirks & PQUIRK_NODOORLOCK)
1297 return 0;
1298
1299 memset(&cmd, 0, sizeof(cmd));
1300 cmd.opcode = SCSI_PREVENT_ALLOW_MEDIUM_REMOVAL;
1301 cmd.how = type;
1302
1303 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
1304 SCSIPIRETRIES, 5000, NULL, flags));
1305 }
1306
1307 /*
1308 * scsipi_start:
1309 *
1310 * Send a START UNIT.
1311 */
1312 int
1313 scsipi_start(struct scsipi_periph *periph, int type, int flags)
1314 {
1315 struct scsipi_start_stop cmd;
1316
1317 memset(&cmd, 0, sizeof(cmd));
1318 cmd.opcode = START_STOP;
1319 cmd.byte2 = 0x00;
1320 cmd.how = type;
1321
1322 return scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
1323 SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000, NULL, flags);
1324 }
1325
1326 /*
1327 * scsipi_mode_sense, scsipi_mode_sense_big:
1328 * get a sense page from a device
1329 */
1330
1331 int
1332 scsipi_mode_sense(struct scsipi_periph *periph, int byte2, int page,
1333 struct scsi_mode_parameter_header_6 *data, int len, int flags, int retries,
1334 int timeout)
1335 {
1336 struct scsi_mode_sense_6 cmd;
1337
1338 memset(&cmd, 0, sizeof(cmd));
1339 cmd.opcode = SCSI_MODE_SENSE_6;
1340 cmd.byte2 = byte2;
1341 cmd.page = page;
1342 cmd.length = len & 0xff;
1343
1344 return scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1345 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN);
1346 }
1347
1348 int
1349 scsipi_mode_sense_big(struct scsipi_periph *periph, int byte2, int page,
1350 struct scsi_mode_parameter_header_10 *data, int len, int flags, int retries,
1351 int timeout)
1352 {
1353 struct scsi_mode_sense_10 cmd;
1354
1355 memset(&cmd, 0, sizeof(cmd));
1356 cmd.opcode = SCSI_MODE_SENSE_10;
1357 cmd.byte2 = byte2;
1358 cmd.page = page;
1359 _lto2b(len, cmd.length);
1360
1361 return scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1362 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN);
1363 }
1364
1365 int
1366 scsipi_mode_select(struct scsipi_periph *periph, int byte2,
1367 struct scsi_mode_parameter_header_6 *data, int len, int flags, int retries,
1368 int timeout)
1369 {
1370 struct scsi_mode_select_6 cmd;
1371
1372 memset(&cmd, 0, sizeof(cmd));
1373 cmd.opcode = SCSI_MODE_SELECT_6;
1374 cmd.byte2 = byte2;
1375 cmd.length = len & 0xff;
1376
1377 return scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1378 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT);
1379 }
1380
1381 int
1382 scsipi_mode_select_big(struct scsipi_periph *periph, int byte2,
1383 struct scsi_mode_parameter_header_10 *data, int len, int flags, int retries,
1384 int timeout)
1385 {
1386 struct scsi_mode_select_10 cmd;
1387
1388 memset(&cmd, 0, sizeof(cmd));
1389 cmd.opcode = SCSI_MODE_SELECT_10;
1390 cmd.byte2 = byte2;
1391 _lto2b(len, cmd.length);
1392
1393 return scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1394 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT);
1395 }
1396
1397 /*
1398 * scsipi_get_opcodeinfo:
1399 *
1400 * query the device for supported commands and their timeout
1401 * building a timeout lookup table if timeout information is available.
1402 */
1403 void
1404 scsipi_get_opcodeinfo(struct scsipi_periph *periph)
1405 {
1406 u_int8_t *data;
1407 int len = 16*1024;
1408 int rc;
1409 int retries;
1410 struct scsi_repsuppopcode cmd;
1411
1412 /* refrain from asking for supported opcodes */
1413 if (periph->periph_quirks & PQUIRK_NOREPSUPPOPC ||
1414 periph->periph_type == T_PROCESSOR || /* spec. */
1415 periph->periph_type == T_CDROM) /* spec. */
1416 return;
1417
1418 scsipi_free_opcodeinfo(periph);
1419
1420 /*
1421 * query REPORT SUPPORTED OPERATION CODES
1422 * if OK
1423 * enumerate all codes
1424 * if timeout exists insert maximum into opcode table
1425 */
1426 data = malloc(len, M_DEVBUF, M_WAITOK|M_ZERO);
1427
1428 memset(&cmd, 0, sizeof(cmd));
1429 cmd.opcode = SCSI_MAINTENANCE_IN;
1430 cmd.svcaction = RSOC_REPORT_SUPPORTED_OPCODES;
1431 cmd.repoption = RSOC_RCTD|RSOC_ALL;
1432 _lto4b(len, cmd.alloclen);
1433
1434 /* loop to skip any UNIT ATTENTIONS at this point */
1435 retries = 3;
1436 do {
1437 rc = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1438 (void *)data, len, 0, 60000, NULL,
1439 XS_CTL_DATA_IN|XS_CTL_SILENT);
1440 #ifdef SCSIPI_DEBUG
1441 if (rc != 0) {
1442 SC_DEBUG(periph, SCSIPI_DB3,
1443 ("SCSI_MAINTENANCE_IN"
1444 "[RSOC_REPORT_SUPPORTED_OPCODES] command"
1445 " failed: rc=%d, retries=%d\n",
1446 rc, retries));
1447 }
1448 #endif
1449 } while (rc == EIO && retries-- > 0);
1450
1451 if (rc == 0) {
1452 int count;
1453 int dlen = _4btol(data);
1454 u_int8_t *c = data + 4;
1455
1456 SC_DEBUG(periph, SCSIPI_DB3,
1457 ("supported opcode timeout-values loaded\n"));
1458 SC_DEBUG(periph, SCSIPI_DB3,
1459 ("CMD LEN SA spec nom. time cmd timeout\n"));
1460
1461 struct scsipi_opcodes *tot = malloc(sizeof(struct scsipi_opcodes),
1462 M_DEVBUF, M_WAITOK|M_ZERO);
1463
1464 count = 0;
1465 while (tot != NULL &&
1466 dlen >= (int)sizeof(struct scsi_repsupopcode_all_commands_descriptor)) {
1467 struct scsi_repsupopcode_all_commands_descriptor *acd
1468 = (struct scsi_repsupopcode_all_commands_descriptor *)c;
1469 #ifdef SCSIPI_DEBUG
1470 int cdblen = _2btol((const u_int8_t *)&acd->cdblen);
1471 #endif
1472 dlen -= sizeof(struct scsi_repsupopcode_all_commands_descriptor);
1473 c += sizeof(struct scsi_repsupopcode_all_commands_descriptor);
1474 SC_DEBUG(periph, SCSIPI_DB3,
1475 ("0x%02x(%2d) ", acd->opcode, cdblen));
1476
1477 tot->opcode_info[acd->opcode].ti_flags = SCSIPI_TI_VALID;
1478
1479 if (acd->flags & RSOC_ACD_SERVACTV) {
1480 SC_DEBUGN(periph, SCSIPI_DB3,
1481 ("0x%02x%02x ",
1482 acd->serviceaction[0],
1483 acd->serviceaction[1]));
1484 } else {
1485 SC_DEBUGN(periph, SCSIPI_DB3, (" "));
1486 }
1487
1488 if (acd->flags & RSOC_ACD_CTDP
1489 && dlen >= (int)sizeof(struct scsi_repsupopcode_timeouts_descriptor)) {
1490 struct scsi_repsupopcode_timeouts_descriptor *td
1491 = (struct scsi_repsupopcode_timeouts_descriptor *)c;
1492 long nomto = _4btol(td->nom_process_timeout);
1493 long cmdto = _4btol(td->cmd_process_timeout);
1494 long t = (cmdto > nomto) ? cmdto : nomto;
1495
1496 dlen -= sizeof(struct scsi_repsupopcode_timeouts_descriptor);
1497 c += sizeof(struct scsi_repsupopcode_timeouts_descriptor);
1498
1499 SC_DEBUGN(periph, SCSIPI_DB3,
1500 ("0x%02x %10ld %10ld",
1501 td->cmd_specific,
1502 nomto, cmdto));
1503
1504 if (t > tot->opcode_info[acd->opcode].ti_timeout) {
1505 tot->opcode_info[acd->opcode].ti_timeout = t;
1506 ++count;
1507 }
1508 }
1509 SC_DEBUGN(periph, SCSIPI_DB3,("\n"));
1510 }
1511
1512 if (count > 0) {
1513 periph->periph_opcs = tot;
1514 } else {
1515 free(tot, M_DEVBUF);
1516 SC_DEBUG(periph, SCSIPI_DB3,
1517 ("no usable timeout values available\n"));
1518 }
1519 } else {
1520 SC_DEBUG(periph, SCSIPI_DB3,
1521 ("SCSI_MAINTENANCE_IN"
1522 "[RSOC_REPORT_SUPPORTED_OPCODES] failed error=%d"
1523 " - no device provided timeout "
1524 "values available\n", rc));
1525 }
1526
1527 free(data, M_DEVBUF);
1528 }
1529
1530 /*
1531 * scsipi_update_timeouts:
1532 * Override timeout value if device/config provided
1533 * timeouts are available.
1534 */
1535 static void
1536 scsipi_update_timeouts(struct scsipi_xfer *xs)
1537 {
1538 struct scsipi_opcodes *opcs;
1539 u_int8_t cmd;
1540 int timeout;
1541 struct scsipi_opinfo *oi;
1542
1543 if (xs->timeout <= 0) {
1544 return;
1545 }
1546
1547 opcs = xs->xs_periph->periph_opcs;
1548
1549 if (opcs == NULL) {
1550 return;
1551 }
1552
1553 cmd = xs->cmd->opcode;
1554 oi = &opcs->opcode_info[cmd];
1555
1556 timeout = 1000 * (int)oi->ti_timeout;
1557
1558
1559 if (timeout > xs->timeout && timeout < 86400000) {
1560 /*
1561 * pick up device configured timeouts if they
1562 * are longer than the requested ones but less
1563 * than a day
1564 */
1565 #ifdef SCSIPI_DEBUG
1566 if ((oi->ti_flags & SCSIPI_TI_LOGGED) == 0) {
1567 SC_DEBUG(xs->xs_periph, SCSIPI_DB3,
1568 ("Overriding command 0x%02x "
1569 "timeout of %d with %d ms\n",
1570 cmd, xs->timeout, timeout));
1571 oi->ti_flags |= SCSIPI_TI_LOGGED;
1572 }
1573 #endif
1574 xs->timeout = timeout;
1575 }
1576 }
1577
1578 /*
1579 * scsipi_free_opcodeinfo:
1580 *
1581 * free the opcode information table
1582 */
1583 void
1584 scsipi_free_opcodeinfo(struct scsipi_periph *periph)
1585 {
1586 if (periph->periph_opcs != NULL) {
1587 free(periph->periph_opcs, M_DEVBUF);
1588 }
1589
1590 periph->periph_opcs = NULL;
1591 }
1592
1593 /*
1594 * scsipi_done:
1595 *
1596 * This routine is called by an adapter's interrupt handler when
1597 * an xfer is completed.
1598 */
1599 void
1600 scsipi_done(struct scsipi_xfer *xs)
1601 {
1602 struct scsipi_channel *chan;
1603 /*
1604 * If there are more xfers on the channel's queue, attempt to
1605 * run them.
1606 */
1607 if ((chan = scsipi_done_internal(xs, true)) != NULL)
1608 scsipi_run_queue(chan);
1609 }
1610
1611 /*
1612 * Just like scsipi_done(), but no recursion. Useful if aborting the current
1613 * transfer.
1614 */
1615 void
1616 scsipi_done_once(struct scsipi_xfer *xs)
1617 {
1618 (void)scsipi_done_internal(xs, false);
1619 }
1620
1621 static struct scsipi_channel*
1622 scsipi_done_internal(struct scsipi_xfer *xs, bool more)
1623 {
1624 struct scsipi_periph *periph = xs->xs_periph;
1625 struct scsipi_channel *chan = periph->periph_channel;
1626 int freezecnt;
1627
1628 SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n"));
1629 #ifdef SCSIPI_DEBUG
1630 if (periph->periph_dbflags & SCSIPI_DB1)
1631 show_scsipi_cmd(xs);
1632 #endif
1633
1634 mutex_enter(chan_mtx(chan));
1635 SDT_PROBE1(scsi, base, xfer, done, xs);
1636 /*
1637 * The resource this command was using is now free.
1638 */
1639 if (xs->xs_status & XS_STS_DONE) {
1640 /* XXX in certain circumstances, such as a device
1641 * being detached, a xs that has already been
1642 * scsipi_done()'d by the main thread will be done'd
1643 * again by scsibusdetach(). Putting the xs on the
1644 * chan_complete queue causes list corruption and
1645 * everyone dies. This prevents that, but perhaps
1646 * there should be better coordination somewhere such
1647 * that this won't ever happen (and can be turned into
1648 * a KASSERT().
1649 */
1650 SDT_PROBE1(scsi, base, xfer, redone, xs);
1651 mutex_exit(chan_mtx(chan));
1652 goto out;
1653 }
1654 scsipi_put_resource(chan);
1655 xs->xs_periph->periph_sent--;
1656
1657 /*
1658 * If the command was tagged, free the tag.
1659 */
1660 if (XS_CTL_TAGTYPE(xs) != 0)
1661 scsipi_put_tag(xs);
1662 else
1663 periph->periph_flags &= ~PERIPH_UNTAG;
1664
1665 /* Mark the command as `done'. */
1666 xs->xs_status |= XS_STS_DONE;
1667
1668 #ifdef DIAGNOSTIC
1669 if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
1670 (XS_CTL_ASYNC|XS_CTL_POLL))
1671 panic("scsipi_done: ASYNC and POLL");
1672 #endif
1673
1674 /*
1675 * If the xfer had an error of any sort, freeze the
1676 * periph's queue. Freeze it again if we were requested
1677 * to do so in the xfer.
1678 */
1679 freezecnt = 0;
1680 if (xs->error != XS_NOERROR)
1681 freezecnt++;
1682 if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
1683 freezecnt++;
1684 if (freezecnt != 0)
1685 scsipi_periph_freeze_locked(periph, freezecnt);
1686
1687 /*
1688 * record the xfer with a pending sense, in case a SCSI reset is
1689 * received before the thread is waked up.
1690 */
1691 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1692 periph->periph_flags |= PERIPH_SENSE;
1693 periph->periph_xscheck = xs;
1694 }
1695
1696 /*
1697 * If this was an xfer that was not to complete asynchronously,
1698 * let the requesting thread perform error checking/handling
1699 * in its context.
1700 */
1701 if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
1702 /*
1703 * If it's a polling job, just return, to unwind the
1704 * call graph. We don't need to restart the queue,
1705 * because polling jobs are treated specially, and
1706 * are really only used during crash dumps anyway
1707 * (XXX or during boot-time autoconfiguration of
1708 * ATAPI devices).
1709 */
1710 if (xs->xs_control & XS_CTL_POLL) {
1711 mutex_exit(chan_mtx(chan));
1712 return NULL;
1713 }
1714 cv_broadcast(xs_cv(xs));
1715 mutex_exit(chan_mtx(chan));
1716 goto out;
1717 }
1718
1719 /*
1720 * Catch the extremely common case of I/O completing
1721 * without error; no use in taking a context switch
1722 * if we can handle it in interrupt context.
1723 */
1724 if (xs->error == XS_NOERROR && more == true) {
1725 mutex_exit(chan_mtx(chan));
1726 (void) scsipi_complete(xs);
1727 goto out;
1728 }
1729
1730 /*
1731 * There is an error on this xfer. Put it on the channel's
1732 * completion queue, and wake up the completion thread.
1733 */
1734 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
1735 cv_broadcast(chan_cv_complete(chan));
1736 mutex_exit(chan_mtx(chan));
1737
1738 out:
1739 return chan;
1740 }
1741
1742 /*
1743 * scsipi_complete:
1744 *
1745 * Completion of a scsipi_xfer. This is the guts of scsipi_done().
1746 *
1747 * NOTE: This routine MUST be called with valid thread context
1748 * except for the case where the following two conditions are
1749 * true:
1750 *
1751 * xs->error == XS_NOERROR
1752 * XS_CTL_ASYNC is set in xs->xs_control
1753 *
1754 * The semantics of this routine can be tricky, so here is an
1755 * explanation:
1756 *
1757 * 0 Xfer completed successfully.
1758 *
1759 * ERESTART Xfer had an error, but was restarted.
1760 *
1761 * anything else Xfer had an error, return value is Unix
1762 * errno.
1763 *
1764 * If the return value is anything but ERESTART:
1765 *
1766 * - If XS_CTL_ASYNC is set, `xs' has been freed back to
1767 * the pool.
1768 * - If there is a buf associated with the xfer,
1769 * it has been biodone()'d.
1770 */
1771 static int
1772 scsipi_complete(struct scsipi_xfer *xs)
1773 {
1774 struct scsipi_periph *periph = xs->xs_periph;
1775 struct scsipi_channel *chan = periph->periph_channel;
1776 int error;
1777
1778 SDT_PROBE1(scsi, base, xfer, complete, xs);
1779
1780 #ifdef DIAGNOSTIC
1781 if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
1782 panic("scsipi_complete: XS_CTL_ASYNC but no buf");
1783 #endif
1784 /*
1785 * If command terminated with a CHECK CONDITION, we need to issue a
1786 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed
1787 * we'll have the real status.
1788 * Must be processed with channel lock held to avoid missing
1789 * a SCSI bus reset for this command.
1790 */
1791 mutex_enter(chan_mtx(chan));
1792 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1793 /* request sense for a request sense ? */
1794 if (xs->xs_control & XS_CTL_REQSENSE) {
1795 scsipi_printaddr(periph);
1796 printf("request sense for a request sense ?\n");
1797 /* XXX maybe we should reset the device ? */
1798 /* we've been frozen because xs->error != XS_NOERROR */
1799 scsipi_periph_thaw_locked(periph, 1);
1800 mutex_exit(chan_mtx(chan));
1801 if (xs->resid < xs->datalen) {
1802 printf("we read %d bytes of sense anyway:\n",
1803 xs->datalen - xs->resid);
1804 scsipi_print_sense_data((void *)xs->data, 0);
1805 }
1806 return EINVAL;
1807 }
1808 mutex_exit(chan_mtx(chan)); // XXX allows other commands to queue or run
1809 scsipi_request_sense(xs);
1810 } else
1811 mutex_exit(chan_mtx(chan));
1812
1813 /*
1814 * If it's a user level request, bypass all usual completion
1815 * processing, let the user work it out..
1816 */
1817 if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
1818 SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n"));
1819 mutex_enter(chan_mtx(chan));
1820 if (xs->error != XS_NOERROR)
1821 scsipi_periph_thaw_locked(periph, 1);
1822 mutex_exit(chan_mtx(chan));
1823 scsipi_user_done(xs);
1824 SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n "));
1825 return 0;
1826 }
1827
1828 switch (xs->error) {
1829 case XS_NOERROR:
1830 error = 0;
1831 break;
1832
1833 case XS_SENSE:
1834 case XS_SHORTSENSE:
1835 error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
1836 break;
1837
1838 case XS_RESOURCE_SHORTAGE:
1839 /*
1840 * XXX Should freeze channel's queue.
1841 */
1842 scsipi_printaddr(periph);
1843 printf("adapter resource shortage\n");
1844 /* FALLTHROUGH */
1845
1846 case XS_BUSY:
1847 if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
1848 struct scsipi_max_openings mo;
1849
1850 /*
1851 * We set the openings to active - 1, assuming that
1852 * the command that got us here is the first one that
1853 * can't fit into the device's queue. If that's not
1854 * the case, I guess we'll find out soon enough.
1855 */
1856 mo.mo_target = periph->periph_target;
1857 mo.mo_lun = periph->periph_lun;
1858 if (periph->periph_active < periph->periph_openings)
1859 mo.mo_openings = periph->periph_active - 1;
1860 else
1861 mo.mo_openings = periph->periph_openings - 1;
1862 #ifdef DIAGNOSTIC
1863 if (mo.mo_openings < 0) {
1864 scsipi_printaddr(periph);
1865 printf("QUEUE FULL resulted in < 0 openings\n");
1866 panic("scsipi_done");
1867 }
1868 #endif
1869 if (mo.mo_openings == 0) {
1870 scsipi_printaddr(periph);
1871 printf("QUEUE FULL resulted in 0 openings\n");
1872 mo.mo_openings = 1;
1873 }
1874 scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
1875 error = ERESTART;
1876 } else if (xs->xs_retries != 0) {
1877 xs->xs_retries--;
1878 /*
1879 * Wait one second, and try again.
1880 */
1881 mutex_enter(chan_mtx(chan));
1882 if ((xs->xs_control & XS_CTL_POLL) ||
1883 (chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
1884 /* XXX: quite extreme */
1885 kpause("xsbusy", false, hz, chan_mtx(chan));
1886 } else if (!callout_pending(&periph->periph_callout)) {
1887 scsipi_periph_freeze_locked(periph, 1);
1888 callout_reset(&periph->periph_callout,
1889 hz, scsipi_periph_timed_thaw, periph);
1890 }
1891 mutex_exit(chan_mtx(chan));
1892 error = ERESTART;
1893 } else
1894 error = EBUSY;
1895 break;
1896
1897 case XS_REQUEUE:
1898 error = ERESTART;
1899 break;
1900
1901 case XS_SELTIMEOUT:
1902 case XS_TIMEOUT:
1903 /*
1904 * If the device hasn't gone away, honor retry counts.
1905 *
1906 * Note that if we're in the middle of probing it,
1907 * it won't be found because it isn't here yet so
1908 * we won't honor the retry count in that case.
1909 */
1910 if (scsipi_lookup_periph(chan, periph->periph_target,
1911 periph->periph_lun) && xs->xs_retries != 0) {
1912 xs->xs_retries--;
1913 error = ERESTART;
1914 } else
1915 error = EIO;
1916 break;
1917
1918 case XS_RESET:
1919 if (xs->xs_control & XS_CTL_REQSENSE) {
1920 /*
1921 * request sense interrupted by reset: signal it
1922 * with EINTR return code.
1923 */
1924 error = EINTR;
1925 } else {
1926 if (xs->xs_retries != 0) {
1927 xs->xs_retries--;
1928 error = ERESTART;
1929 } else
1930 error = EIO;
1931 }
1932 break;
1933
1934 case XS_DRIVER_STUFFUP:
1935 scsipi_printaddr(periph);
1936 printf("generic HBA error\n");
1937 error = EIO;
1938 break;
1939 default:
1940 scsipi_printaddr(periph);
1941 printf("invalid return code from adapter: %d\n", xs->error);
1942 error = EIO;
1943 break;
1944 }
1945
1946 mutex_enter(chan_mtx(chan));
1947 if (error == ERESTART) {
1948 SDT_PROBE1(scsi, base, xfer, restart, xs);
1949 /*
1950 * If we get here, the periph has been thawed and frozen
1951 * again if we had to issue recovery commands. Alternatively,
1952 * it may have been frozen again and in a timed thaw. In
1953 * any case, we thaw the periph once we re-enqueue the
1954 * command. Once the periph is fully thawed, it will begin
1955 * operation again.
1956 */
1957 xs->error = XS_NOERROR;
1958 xs->status = SCSI_OK;
1959 xs->xs_status &= ~XS_STS_DONE;
1960 xs->xs_requeuecnt++;
1961 error = scsipi_enqueue(xs);
1962 if (error == 0) {
1963 scsipi_periph_thaw_locked(periph, 1);
1964 mutex_exit(chan_mtx(chan));
1965 return ERESTART;
1966 }
1967 }
1968
1969 /*
1970 * scsipi_done() freezes the queue if not XS_NOERROR.
1971 * Thaw it here.
1972 */
1973 if (xs->error != XS_NOERROR)
1974 scsipi_periph_thaw_locked(periph, 1);
1975 mutex_exit(chan_mtx(chan));
1976
1977 if (periph->periph_switch->psw_done)
1978 periph->periph_switch->psw_done(xs, error);
1979
1980 mutex_enter(chan_mtx(chan));
1981 if (xs->xs_control & XS_CTL_ASYNC)
1982 scsipi_put_xs(xs);
1983 mutex_exit(chan_mtx(chan));
1984
1985 return error;
1986 }
1987
1988 /*
1989 * Issue a request sense for the given scsipi_xfer. Called when the xfer
1990 * returns with a CHECK_CONDITION status. Must be called in valid thread
1991 * context.
1992 */
1993
1994 static void
1995 scsipi_request_sense(struct scsipi_xfer *xs)
1996 {
1997 struct scsipi_periph *periph = xs->xs_periph;
1998 int flags, error;
1999 struct scsi_request_sense cmd;
2000
2001 periph->periph_flags |= PERIPH_SENSE;
2002
2003 /* if command was polling, request sense will too */
2004 flags = xs->xs_control & XS_CTL_POLL;
2005 /* Polling commands can't sleep */
2006 if (flags)
2007 flags |= XS_CTL_NOSLEEP;
2008
2009 flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN |
2010 XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH;
2011
2012 memset(&cmd, 0, sizeof(cmd));
2013 cmd.opcode = SCSI_REQUEST_SENSE;
2014 cmd.length = sizeof(struct scsi_sense_data);
2015
2016 error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
2017 (void *)&xs->sense.scsi_sense, sizeof(struct scsi_sense_data),
2018 0, 1000, NULL, flags);
2019 periph->periph_flags &= ~PERIPH_SENSE;
2020 periph->periph_xscheck = NULL;
2021 switch (error) {
2022 case 0:
2023 /* we have a valid sense */
2024 xs->error = XS_SENSE;
2025 return;
2026 case EINTR:
2027 /* REQUEST_SENSE interrupted by bus reset. */
2028 xs->error = XS_RESET;
2029 return;
2030 case EIO:
2031 /* request sense couldn't be performed */
2032 /*
2033 * XXX this isn't quite right but we don't have anything
2034 * better for now
2035 */
2036 xs->error = XS_DRIVER_STUFFUP;
2037 return;
2038 default:
2039 /* Notify that request sense failed. */
2040 xs->error = XS_DRIVER_STUFFUP;
2041 scsipi_printaddr(periph);
2042 printf("request sense failed with error %d\n", error);
2043 return;
2044 }
2045 }
2046
2047 /*
2048 * scsipi_enqueue:
2049 *
2050 * Enqueue an xfer on a channel.
2051 */
2052 static int
2053 scsipi_enqueue(struct scsipi_xfer *xs)
2054 {
2055 struct scsipi_channel *chan = xs->xs_periph->periph_channel;
2056 struct scsipi_xfer *qxs;
2057
2058 SDT_PROBE1(scsi, base, xfer, enqueue, xs);
2059
2060 /*
2061 * If the xfer is to be polled, and there are already jobs on
2062 * the queue, we can't proceed.
2063 */
2064 KASSERT(mutex_owned(chan_mtx(chan)));
2065 if ((xs->xs_control & XS_CTL_POLL) != 0 &&
2066 TAILQ_FIRST(&chan->chan_queue) != NULL) {
2067 xs->error = XS_DRIVER_STUFFUP;
2068 return EAGAIN;
2069 }
2070
2071 /*
2072 * If we have an URGENT xfer, it's an error recovery command
2073 * and it should just go on the head of the channel's queue.
2074 */
2075 if (xs->xs_control & XS_CTL_URGENT) {
2076 TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
2077 goto out;
2078 }
2079
2080 /*
2081 * If this xfer has already been on the queue before, we
2082 * need to reinsert it in the correct order. That order is:
2083 *
2084 * Immediately before the first xfer for this periph
2085 * with a requeuecnt less than xs->xs_requeuecnt.
2086 *
2087 * Failing that, at the end of the queue. (We'll end up
2088 * there naturally.)
2089 */
2090 if (xs->xs_requeuecnt != 0) {
2091 for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
2092 qxs = TAILQ_NEXT(qxs, channel_q)) {
2093 if (qxs->xs_periph == xs->xs_periph &&
2094 qxs->xs_requeuecnt < xs->xs_requeuecnt)
2095 break;
2096 }
2097 if (qxs != NULL) {
2098 TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
2099 channel_q);
2100 goto out;
2101 }
2102 }
2103 TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
2104 out:
2105 if (xs->xs_control & XS_CTL_THAW_PERIPH)
2106 scsipi_periph_thaw_locked(xs->xs_periph, 1);
2107 return 0;
2108 }
2109
2110 /*
2111 * scsipi_run_queue:
2112 *
2113 * Start as many xfers as possible running on the channel.
2114 */
2115 static void
2116 scsipi_run_queue(struct scsipi_channel *chan)
2117 {
2118 struct scsipi_xfer *xs;
2119 struct scsipi_periph *periph;
2120
2121 SDT_PROBE1(scsi, base, queue, batch__start, chan);
2122 for (;;) {
2123 mutex_enter(chan_mtx(chan));
2124
2125 /*
2126 * If the channel is frozen, we can't do any work right
2127 * now.
2128 */
2129 if (chan->chan_qfreeze != 0) {
2130 mutex_exit(chan_mtx(chan));
2131 break;
2132 }
2133
2134 /*
2135 * Look for work to do, and make sure we can do it.
2136 */
2137 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
2138 xs = TAILQ_NEXT(xs, channel_q)) {
2139 periph = xs->xs_periph;
2140
2141 if ((periph->periph_sent >= periph->periph_openings) ||
2142 periph->periph_qfreeze != 0 ||
2143 (periph->periph_flags & PERIPH_UNTAG) != 0)
2144 continue;
2145
2146 if ((periph->periph_flags &
2147 (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 &&
2148 (xs->xs_control & XS_CTL_URGENT) == 0)
2149 continue;
2150
2151 /*
2152 * We can issue this xfer!
2153 */
2154 goto got_one;
2155 }
2156
2157 /*
2158 * Can't find any work to do right now.
2159 */
2160 mutex_exit(chan_mtx(chan));
2161 break;
2162
2163 got_one:
2164 /*
2165 * Have an xfer to run. Allocate a resource from
2166 * the adapter to run it. If we can't allocate that
2167 * resource, we don't dequeue the xfer.
2168 */
2169 if (scsipi_get_resource(chan) == 0) {
2170 /*
2171 * Adapter is out of resources. If the adapter
2172 * supports it, attempt to grow them.
2173 */
2174 if (scsipi_grow_resources(chan) == 0) {
2175 /*
2176 * Wasn't able to grow resources,
2177 * nothing more we can do.
2178 */
2179 if (xs->xs_control & XS_CTL_POLL) {
2180 scsipi_printaddr(xs->xs_periph);
2181 printf("polling command but no "
2182 "adapter resources");
2183 /* We'll panic shortly... */
2184 }
2185 mutex_exit(chan_mtx(chan));
2186
2187 /*
2188 * XXX: We should be able to note that
2189 * XXX: that resources are needed here!
2190 */
2191 break;
2192 }
2193 /*
2194 * scsipi_grow_resources() allocated the resource
2195 * for us.
2196 */
2197 }
2198
2199 /*
2200 * We have a resource to run this xfer, do it!
2201 */
2202 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
2203
2204 /*
2205 * If the command is to be tagged, allocate a tag ID
2206 * for it.
2207 */
2208 if (XS_CTL_TAGTYPE(xs) != 0)
2209 scsipi_get_tag(xs);
2210 else
2211 periph->periph_flags |= PERIPH_UNTAG;
2212 periph->periph_sent++;
2213 mutex_exit(chan_mtx(chan));
2214
2215 SDT_PROBE2(scsi, base, queue, run, chan, xs);
2216 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
2217 }
2218 SDT_PROBE1(scsi, base, queue, batch__done, chan);
2219 }
2220
2221 /*
2222 * scsipi_execute_xs:
2223 *
2224 * Begin execution of an xfer, waiting for it to complete, if necessary.
2225 */
2226 int
2227 scsipi_execute_xs(struct scsipi_xfer *xs)
2228 {
2229 struct scsipi_periph *periph = xs->xs_periph;
2230 struct scsipi_channel *chan = periph->periph_channel;
2231 int oasync, async, poll, error;
2232
2233 KASSERT(!cold);
2234
2235 scsipi_update_timeouts(xs);
2236
2237 (chan->chan_bustype->bustype_cmd)(xs);
2238
2239 xs->xs_status &= ~XS_STS_DONE;
2240 xs->error = XS_NOERROR;
2241 xs->resid = xs->datalen;
2242 xs->status = SCSI_OK;
2243 SDT_PROBE1(scsi, base, xfer, execute, xs);
2244
2245 #ifdef SCSIPI_DEBUG
2246 if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) {
2247 printf("scsipi_execute_xs: ");
2248 show_scsipi_xs(xs);
2249 printf("\n");
2250 }
2251 #endif
2252
2253 /*
2254 * Deal with command tagging:
2255 *
2256 * - If the device's current operating mode doesn't
2257 * include tagged queueing, clear the tag mask.
2258 *
2259 * - If the device's current operating mode *does*
2260 * include tagged queueing, set the tag_type in
2261 * the xfer to the appropriate byte for the tag
2262 * message.
2263 */
2264 if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 ||
2265 (xs->xs_control & XS_CTL_REQSENSE)) {
2266 xs->xs_control &= ~XS_CTL_TAGMASK;
2267 xs->xs_tag_type = 0;
2268 } else {
2269 /*
2270 * If the request doesn't specify a tag, give Head
2271 * tags to URGENT operations and Simple tags to
2272 * everything else.
2273 */
2274 if (XS_CTL_TAGTYPE(xs) == 0) {
2275 if (xs->xs_control & XS_CTL_URGENT)
2276 xs->xs_control |= XS_CTL_HEAD_TAG;
2277 else
2278 xs->xs_control |= XS_CTL_SIMPLE_TAG;
2279 }
2280
2281 switch (XS_CTL_TAGTYPE(xs)) {
2282 case XS_CTL_ORDERED_TAG:
2283 xs->xs_tag_type = MSG_ORDERED_Q_TAG;
2284 break;
2285
2286 case XS_CTL_SIMPLE_TAG:
2287 xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
2288 break;
2289
2290 case XS_CTL_HEAD_TAG:
2291 xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
2292 break;
2293
2294 default:
2295 scsipi_printaddr(periph);
2296 printf("invalid tag mask 0x%08x\n",
2297 XS_CTL_TAGTYPE(xs));
2298 panic("scsipi_execute_xs");
2299 }
2300 }
2301
2302 /* If the adapter wants us to poll, poll. */
2303 if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY)
2304 xs->xs_control |= XS_CTL_POLL;
2305
2306 /*
2307 * If we don't yet have a completion thread, or we are to poll for
2308 * completion, clear the ASYNC flag.
2309 */
2310 oasync = (xs->xs_control & XS_CTL_ASYNC);
2311 if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
2312 xs->xs_control &= ~XS_CTL_ASYNC;
2313
2314 async = (xs->xs_control & XS_CTL_ASYNC);
2315 poll = (xs->xs_control & XS_CTL_POLL);
2316
2317 #ifdef DIAGNOSTIC
2318 if (oasync != 0 && xs->bp == NULL)
2319 panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
2320 #endif
2321
2322 /*
2323 * Enqueue the transfer. If we're not polling for completion, this
2324 * should ALWAYS return `no error'.
2325 */
2326 error = scsipi_enqueue(xs);
2327 if (error) {
2328 if (poll == 0) {
2329 scsipi_printaddr(periph);
2330 printf("not polling, but enqueue failed with %d\n",
2331 error);
2332 panic("scsipi_execute_xs");
2333 }
2334
2335 scsipi_printaddr(periph);
2336 printf("should have flushed queue?\n");
2337 goto free_xs;
2338 }
2339
2340 mutex_exit(chan_mtx(chan));
2341 restarted:
2342 scsipi_run_queue(chan);
2343 mutex_enter(chan_mtx(chan));
2344
2345 /*
2346 * The xfer is enqueued, and possibly running. If it's to be
2347 * completed asynchronously, just return now.
2348 */
2349 if (async)
2350 return 0;
2351
2352 /*
2353 * Not an asynchronous command; wait for it to complete.
2354 */
2355 while ((xs->xs_status & XS_STS_DONE) == 0) {
2356 if (poll) {
2357 scsipi_printaddr(periph);
2358 printf("polling command not done\n");
2359 panic("scsipi_execute_xs");
2360 }
2361 cv_wait(xs_cv(xs), chan_mtx(chan));
2362 }
2363
2364 /*
2365 * Command is complete. scsipi_done() has awakened us to perform
2366 * the error handling.
2367 */
2368 mutex_exit(chan_mtx(chan));
2369 error = scsipi_complete(xs);
2370 if (error == ERESTART)
2371 goto restarted;
2372
2373 /*
2374 * If it was meant to run async and we cleared async ourselves,
2375 * don't return an error here. It has already been handled
2376 */
2377 if (oasync)
2378 error = 0;
2379 /*
2380 * Command completed successfully or fatal error occurred. Fall
2381 * into....
2382 */
2383 mutex_enter(chan_mtx(chan));
2384 free_xs:
2385 scsipi_put_xs(xs);
2386 mutex_exit(chan_mtx(chan));
2387
2388 /*
2389 * Kick the queue, keep it running in case it stopped for some
2390 * reason.
2391 */
2392 scsipi_run_queue(chan);
2393
2394 mutex_enter(chan_mtx(chan));
2395 return error;
2396 }
2397
2398 /*
2399 * scsipi_completion_thread:
2400 *
2401 * This is the completion thread. We wait for errors on
2402 * asynchronous xfers, and perform the error handling
2403 * function, restarting the command, if necessary.
2404 */
2405 static void
2406 scsipi_completion_thread(void *arg)
2407 {
2408 struct scsipi_channel *chan = arg;
2409 struct scsipi_xfer *xs;
2410
2411 if (chan->chan_init_cb)
2412 (*chan->chan_init_cb)(chan, chan->chan_init_cb_arg);
2413
2414 mutex_enter(chan_mtx(chan));
2415 chan->chan_flags |= SCSIPI_CHAN_TACTIVE;
2416 for (;;) {
2417 xs = TAILQ_FIRST(&chan->chan_complete);
2418 if (xs == NULL && chan->chan_tflags == 0) {
2419 /* nothing to do; wait */
2420 cv_wait(chan_cv_complete(chan), chan_mtx(chan));
2421 continue;
2422 }
2423 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2424 /* call chan_callback from thread context */
2425 chan->chan_tflags &= ~SCSIPI_CHANT_CALLBACK;
2426 chan->chan_callback(chan, chan->chan_callback_arg);
2427 continue;
2428 }
2429 if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) {
2430 /* attempt to get more openings for this channel */
2431 chan->chan_tflags &= ~SCSIPI_CHANT_GROWRES;
2432 mutex_exit(chan_mtx(chan));
2433 scsipi_adapter_request(chan,
2434 ADAPTER_REQ_GROW_RESOURCES, NULL);
2435 scsipi_channel_thaw(chan, 1);
2436 if (chan->chan_tflags & SCSIPI_CHANT_GROWRES)
2437 kpause("scsizzz", FALSE, hz/10, NULL);
2438 mutex_enter(chan_mtx(chan));
2439 continue;
2440 }
2441 if (chan->chan_tflags & SCSIPI_CHANT_KICK) {
2442 /* explicitly run the queues for this channel */
2443 chan->chan_tflags &= ~SCSIPI_CHANT_KICK;
2444 mutex_exit(chan_mtx(chan));
2445 scsipi_run_queue(chan);
2446 mutex_enter(chan_mtx(chan));
2447 continue;
2448 }
2449 if (chan->chan_tflags & SCSIPI_CHANT_SHUTDOWN) {
2450 break;
2451 }
2452 if (xs) {
2453 TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
2454 mutex_exit(chan_mtx(chan));
2455
2456 /*
2457 * Have an xfer with an error; process it.
2458 */
2459 (void) scsipi_complete(xs);
2460
2461 /*
2462 * Kick the queue; keep it running if it was stopped
2463 * for some reason.
2464 */
2465 scsipi_run_queue(chan);
2466 mutex_enter(chan_mtx(chan));
2467 }
2468 }
2469
2470 chan->chan_thread = NULL;
2471
2472 /* In case parent is waiting for us to exit. */
2473 cv_broadcast(chan_cv_thread(chan));
2474 mutex_exit(chan_mtx(chan));
2475
2476 kthread_exit(0);
2477 }
2478 /*
2479 * scsipi_thread_call_callback:
2480 *
2481 * request to call a callback from the completion thread
2482 */
2483 int
2484 scsipi_thread_call_callback(struct scsipi_channel *chan,
2485 void (*callback)(struct scsipi_channel *, void *), void *arg)
2486 {
2487
2488 mutex_enter(chan_mtx(chan));
2489 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
2490 /* kernel thread doesn't exist yet */
2491 mutex_exit(chan_mtx(chan));
2492 return ESRCH;
2493 }
2494 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2495 mutex_exit(chan_mtx(chan));
2496 return EBUSY;
2497 }
2498 scsipi_channel_freeze(chan, 1);
2499 chan->chan_callback = callback;
2500 chan->chan_callback_arg = arg;
2501 chan->chan_tflags |= SCSIPI_CHANT_CALLBACK;
2502 cv_broadcast(chan_cv_complete(chan));
2503 mutex_exit(chan_mtx(chan));
2504 return 0;
2505 }
2506
2507 /*
2508 * scsipi_async_event:
2509 *
2510 * Handle an asynchronous event from an adapter.
2511 */
2512 void
2513 scsipi_async_event(struct scsipi_channel *chan, scsipi_async_event_t event,
2514 void *arg)
2515 {
2516 bool lock = chan_running(chan) > 0;
2517
2518 if (lock)
2519 mutex_enter(chan_mtx(chan));
2520 switch (event) {
2521 case ASYNC_EVENT_MAX_OPENINGS:
2522 scsipi_async_event_max_openings(chan,
2523 (struct scsipi_max_openings *)arg);
2524 break;
2525
2526 case ASYNC_EVENT_XFER_MODE:
2527 if (chan->chan_bustype->bustype_async_event_xfer_mode) {
2528 chan->chan_bustype->bustype_async_event_xfer_mode(
2529 chan, arg);
2530 }
2531 break;
2532 case ASYNC_EVENT_RESET:
2533 scsipi_async_event_channel_reset(chan);
2534 break;
2535 }
2536 if (lock)
2537 mutex_exit(chan_mtx(chan));
2538 }
2539
2540 /*
2541 * scsipi_async_event_max_openings:
2542 *
2543 * Update the maximum number of outstanding commands a
2544 * device may have.
2545 */
2546 static void
2547 scsipi_async_event_max_openings(struct scsipi_channel *chan,
2548 struct scsipi_max_openings *mo)
2549 {
2550 struct scsipi_periph *periph;
2551 int minlun, maxlun;
2552
2553 if (mo->mo_lun == -1) {
2554 /*
2555 * Wildcarded; apply it to all LUNs.
2556 */
2557 minlun = 0;
2558 maxlun = chan->chan_nluns - 1;
2559 } else
2560 minlun = maxlun = mo->mo_lun;
2561
2562 /* XXX This could really suck with a large LUN space. */
2563 for (; minlun <= maxlun; minlun++) {
2564 periph = scsipi_lookup_periph_locked(chan, mo->mo_target, minlun);
2565 if (periph == NULL)
2566 continue;
2567
2568 if (mo->mo_openings < periph->periph_openings)
2569 periph->periph_openings = mo->mo_openings;
2570 else if (mo->mo_openings > periph->periph_openings &&
2571 (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
2572 periph->periph_openings = mo->mo_openings;
2573 }
2574 }
2575
2576 /*
2577 * scsipi_set_xfer_mode:
2578 *
2579 * Set the xfer mode for the specified I_T Nexus.
2580 */
2581 void
2582 scsipi_set_xfer_mode(struct scsipi_channel *chan, int target, int immed)
2583 {
2584 struct scsipi_xfer_mode xm;
2585 struct scsipi_periph *itperiph;
2586 int lun;
2587
2588 /*
2589 * Go to the minimal xfer mode.
2590 */
2591 xm.xm_target = target;
2592 xm.xm_mode = 0;
2593 xm.xm_period = 0; /* ignored */
2594 xm.xm_offset = 0; /* ignored */
2595
2596 /*
2597 * Find the first LUN we know about on this I_T Nexus.
2598 */
2599 for (itperiph = NULL, lun = 0; lun < chan->chan_nluns; lun++) {
2600 itperiph = scsipi_lookup_periph(chan, target, lun);
2601 if (itperiph != NULL)
2602 break;
2603 }
2604 if (itperiph != NULL) {
2605 xm.xm_mode = itperiph->periph_cap;
2606 /*
2607 * Now issue the request to the adapter.
2608 */
2609 scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm);
2610 /*
2611 * If we want this to happen immediately, issue a dummy
2612 * command, since most adapters can't really negotiate unless
2613 * they're executing a job.
2614 */
2615 if (immed != 0) {
2616 (void) scsipi_test_unit_ready(itperiph,
2617 XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
2618 XS_CTL_IGNORE_NOT_READY |
2619 XS_CTL_IGNORE_MEDIA_CHANGE);
2620 }
2621 }
2622 }
2623
2624 /*
2625 * scsipi_channel_reset:
2626 *
2627 * handle scsi bus reset
2628 * called with channel lock held
2629 */
2630 static void
2631 scsipi_async_event_channel_reset(struct scsipi_channel *chan)
2632 {
2633 struct scsipi_xfer *xs, *xs_next;
2634 struct scsipi_periph *periph;
2635 int target, lun;
2636
2637 /*
2638 * Channel has been reset. Also mark as reset pending REQUEST_SENSE
2639 * commands; as the sense is not available any more.
2640 * can't call scsipi_done() from here, as the command has not been
2641 * sent to the adapter yet (this would corrupt accounting).
2642 */
2643
2644 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) {
2645 xs_next = TAILQ_NEXT(xs, channel_q);
2646 if (xs->xs_control & XS_CTL_REQSENSE) {
2647 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
2648 xs->error = XS_RESET;
2649 if ((xs->xs_control & XS_CTL_ASYNC) != 0)
2650 TAILQ_INSERT_TAIL(&chan->chan_complete, xs,
2651 channel_q);
2652 }
2653 }
2654 cv_broadcast(chan_cv_complete(chan));
2655 /* Catch xs with pending sense which may not have a REQSENSE xs yet */
2656 for (target = 0; target < chan->chan_ntargets; target++) {
2657 if (target == chan->chan_id)
2658 continue;
2659 for (lun = 0; lun < chan->chan_nluns; lun++) {
2660 periph = scsipi_lookup_periph_locked(chan, target, lun);
2661 if (periph) {
2662 xs = periph->periph_xscheck;
2663 if (xs)
2664 xs->error = XS_RESET;
2665 }
2666 }
2667 }
2668 }
2669
2670 /*
2671 * scsipi_target_detach:
2672 *
2673 * detach all periph associated with a I_T
2674 * must be called from valid thread context
2675 */
2676 int
2677 scsipi_target_detach(struct scsipi_channel *chan, int target, int lun,
2678 int flags)
2679 {
2680 struct scsipi_periph *periph;
2681 device_t tdev;
2682 int ctarget, mintarget, maxtarget;
2683 int clun, minlun, maxlun;
2684 int error = 0;
2685
2686 if (target == -1) {
2687 mintarget = 0;
2688 maxtarget = chan->chan_ntargets;
2689 } else {
2690 if (target == chan->chan_id)
2691 return EINVAL;
2692 if (target < 0 || target >= chan->chan_ntargets)
2693 return EINVAL;
2694 mintarget = target;
2695 maxtarget = target + 1;
2696 }
2697
2698 if (lun == -1) {
2699 minlun = 0;
2700 maxlun = chan->chan_nluns;
2701 } else {
2702 if (lun < 0 || lun >= chan->chan_nluns)
2703 return EINVAL;
2704 minlun = lun;
2705 maxlun = lun + 1;
2706 }
2707
2708 /* for config_detach */
2709 KERNEL_LOCK(1, curlwp);
2710
2711 mutex_enter(chan_mtx(chan));
2712 for (ctarget = mintarget; ctarget < maxtarget; ctarget++) {
2713 if (ctarget == chan->chan_id)
2714 continue;
2715
2716 for (clun = minlun; clun < maxlun; clun++) {
2717 periph = scsipi_lookup_periph_locked(chan, ctarget, clun);
2718 if (periph == NULL)
2719 continue;
2720 tdev = periph->periph_dev;
2721 mutex_exit(chan_mtx(chan));
2722 error = config_detach(tdev, flags);
2723 if (error)
2724 goto out;
2725 mutex_enter(chan_mtx(chan));
2726 KASSERT(scsipi_lookup_periph_locked(chan, ctarget, clun) == NULL);
2727 }
2728 }
2729 mutex_exit(chan_mtx(chan));
2730
2731 out:
2732 KERNEL_UNLOCK_ONE(curlwp);
2733
2734 return error;
2735 }
2736
2737 /*
2738 * scsipi_adapter_addref:
2739 *
2740 * Add a reference to the adapter pointed to by the provided
2741 * link, enabling the adapter if necessary.
2742 */
2743 int
2744 scsipi_adapter_addref(struct scsipi_adapter *adapt)
2745 {
2746 int error = 0;
2747
2748 if (atomic_inc_uint_nv(&adapt->adapt_refcnt) == 1
2749 && adapt->adapt_enable != NULL) {
2750 scsipi_adapter_lock(adapt);
2751 error = scsipi_adapter_enable(adapt, 1);
2752 scsipi_adapter_unlock(adapt);
2753 if (error)
2754 atomic_dec_uint(&adapt->adapt_refcnt);
2755 }
2756 return error;
2757 }
2758
2759 /*
2760 * scsipi_adapter_delref:
2761 *
2762 * Delete a reference to the adapter pointed to by the provided
2763 * link, disabling the adapter if possible.
2764 */
2765 void
2766 scsipi_adapter_delref(struct scsipi_adapter *adapt)
2767 {
2768
2769 membar_release();
2770 if (atomic_dec_uint_nv(&adapt->adapt_refcnt) == 0
2771 && adapt->adapt_enable != NULL) {
2772 membar_acquire();
2773 scsipi_adapter_lock(adapt);
2774 (void) scsipi_adapter_enable(adapt, 0);
2775 scsipi_adapter_unlock(adapt);
2776 }
2777 }
2778
2779 static struct scsipi_syncparam {
2780 int ss_factor;
2781 int ss_period; /* ns * 100 */
2782 } scsipi_syncparams[] = {
2783 { 0x08, 625 }, /* FAST-160 (Ultra320) */
2784 { 0x09, 1250 }, /* FAST-80 (Ultra160) */
2785 { 0x0a, 2500 }, /* FAST-40 40MHz (Ultra2) */
2786 { 0x0b, 3030 }, /* FAST-40 33MHz (Ultra2) */
2787 { 0x0c, 5000 }, /* FAST-20 (Ultra) */
2788 };
2789 static const int scsipi_nsyncparams =
2790 sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
2791
2792 int
2793 scsipi_sync_period_to_factor(int period /* ns * 100 */)
2794 {
2795 int i;
2796
2797 for (i = 0; i < scsipi_nsyncparams; i++) {
2798 if (period <= scsipi_syncparams[i].ss_period)
2799 return scsipi_syncparams[i].ss_factor;
2800 }
2801
2802 return (period / 100) / 4;
2803 }
2804
2805 int
2806 scsipi_sync_factor_to_period(int factor)
2807 {
2808 int i;
2809
2810 for (i = 0; i < scsipi_nsyncparams; i++) {
2811 if (factor == scsipi_syncparams[i].ss_factor)
2812 return scsipi_syncparams[i].ss_period;
2813 }
2814
2815 return (factor * 4) * 100;
2816 }
2817
2818 int
2819 scsipi_sync_factor_to_freq(int factor)
2820 {
2821 int i;
2822
2823 for (i = 0; i < scsipi_nsyncparams; i++) {
2824 if (factor == scsipi_syncparams[i].ss_factor)
2825 return 100000000 / scsipi_syncparams[i].ss_period;
2826 }
2827
2828 return 10000000 / ((factor * 4) * 10);
2829 }
2830
2831 static inline void
2832 scsipi_adapter_lock(struct scsipi_adapter *adapt)
2833 {
2834
2835 if ((adapt->adapt_flags & SCSIPI_ADAPT_MPSAFE) == 0)
2836 KERNEL_LOCK(1, NULL);
2837 }
2838
2839 static inline void
2840 scsipi_adapter_unlock(struct scsipi_adapter *adapt)
2841 {
2842
2843 if ((adapt->adapt_flags & SCSIPI_ADAPT_MPSAFE) == 0)
2844 KERNEL_UNLOCK_ONE(NULL);
2845 }
2846
2847 void
2848 scsipi_adapter_minphys(struct scsipi_channel *chan, struct buf *bp)
2849 {
2850 struct scsipi_adapter *adapt = chan->chan_adapter;
2851
2852 scsipi_adapter_lock(adapt);
2853 (adapt->adapt_minphys)(bp);
2854 scsipi_adapter_unlock(chan->chan_adapter);
2855 }
2856
2857 void
2858 scsipi_adapter_request(struct scsipi_channel *chan,
2859 scsipi_adapter_req_t req, void *arg)
2860
2861 {
2862 struct scsipi_adapter *adapt = chan->chan_adapter;
2863
2864 scsipi_adapter_lock(adapt);
2865 SDT_PROBE3(scsi, base, adapter, request__start, chan, req, arg);
2866 (adapt->adapt_request)(chan, req, arg);
2867 SDT_PROBE3(scsi, base, adapter, request__done, chan, req, arg);
2868 scsipi_adapter_unlock(adapt);
2869 }
2870
2871 int
2872 scsipi_adapter_ioctl(struct scsipi_channel *chan, u_long cmd,
2873 void *data, int flag, struct proc *p)
2874 {
2875 struct scsipi_adapter *adapt = chan->chan_adapter;
2876 int error;
2877
2878 if (adapt->adapt_ioctl == NULL)
2879 return ENOTTY;
2880
2881 scsipi_adapter_lock(adapt);
2882 error = (adapt->adapt_ioctl)(chan, cmd, data, flag, p);
2883 scsipi_adapter_unlock(adapt);
2884 return error;
2885 }
2886
2887 int
2888 scsipi_adapter_enable(struct scsipi_adapter *adapt, int enable)
2889 {
2890 int error;
2891
2892 scsipi_adapter_lock(adapt);
2893 error = (adapt->adapt_enable)(adapt->adapt_dev, enable);
2894 scsipi_adapter_unlock(adapt);
2895 return error;
2896 }
2897
2898 #ifdef SCSIPI_DEBUG
2899 /*
2900 * Given a scsipi_xfer, dump the request, in all its glory
2901 */
2902 void
2903 show_scsipi_xs(struct scsipi_xfer *xs)
2904 {
2905
2906 printf("xs(%p): ", xs);
2907 printf("xs_control(0x%08x)", xs->xs_control);
2908 printf("xs_status(0x%08x)", xs->xs_status);
2909 printf("periph(%p)", xs->xs_periph);
2910 printf("retr(0x%x)", xs->xs_retries);
2911 printf("timo(0x%x)", xs->timeout);
2912 printf("cmd(%p)", xs->cmd);
2913 printf("len(0x%x)", xs->cmdlen);
2914 printf("data(%p)", xs->data);
2915 printf("len(0x%x)", xs->datalen);
2916 printf("res(0x%x)", xs->resid);
2917 printf("err(0x%x)", xs->error);
2918 printf("bp(%p)", xs->bp);
2919 show_scsipi_cmd(xs);
2920 }
2921
2922 void
2923 show_scsipi_cmd(struct scsipi_xfer *xs)
2924 {
2925 u_char *b = (u_char *) xs->cmd;
2926 int i = 0;
2927
2928 scsipi_printaddr(xs->xs_periph);
2929 printf(" command: ");
2930
2931 if ((xs->xs_control & XS_CTL_RESET) == 0) {
2932 while (i < xs->cmdlen) {
2933 if (i)
2934 printf(",");
2935 printf("0x%x", b[i++]);
2936 }
2937 printf("-[%d bytes]\n", xs->datalen);
2938 if (xs->datalen)
2939 show_mem(xs->data, uimin(64, xs->datalen));
2940 } else
2941 printf("-RESET-\n");
2942 }
2943
2944 void
2945 show_mem(u_char *address, int num)
2946 {
2947 int x;
2948
2949 printf("------------------------------");
2950 for (x = 0; x < num; x++) {
2951 if ((x % 16) == 0)
2952 printf("\n%03d: ", x);
2953 printf("%02x ", *address++);
2954 }
2955 printf("\n------------------------------\n");
2956 }
2957 #endif /* SCSIPI_DEBUG */
2958