scsipi_base.c revision 1.38.4.10 1 1.38.4.9 nathanw /* $NetBSD: scsipi_base.c,v 1.38.4.10 2002/04/17 00:06:12 nathanw Exp $ */
2 1.2 bouyer
3 1.8 mycroft /*-
4 1.38.4.1 nathanw * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
5 1.8 mycroft * All rights reserved.
6 1.8 mycroft *
7 1.8 mycroft * This code is derived from software contributed to The NetBSD Foundation
8 1.38.4.1 nathanw * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
9 1.38.4.1 nathanw * Simulation Facility, NASA Ames Research Center.
10 1.2 bouyer *
11 1.2 bouyer * Redistribution and use in source and binary forms, with or without
12 1.2 bouyer * modification, are permitted provided that the following conditions
13 1.2 bouyer * are met:
14 1.2 bouyer * 1. Redistributions of source code must retain the above copyright
15 1.2 bouyer * notice, this list of conditions and the following disclaimer.
16 1.2 bouyer * 2. Redistributions in binary form must reproduce the above copyright
17 1.2 bouyer * notice, this list of conditions and the following disclaimer in the
18 1.2 bouyer * documentation and/or other materials provided with the distribution.
19 1.2 bouyer * 3. All advertising materials mentioning features or use of this software
20 1.2 bouyer * must display the following acknowledgement:
21 1.8 mycroft * This product includes software developed by the NetBSD
22 1.8 mycroft * Foundation, Inc. and its contributors.
23 1.8 mycroft * 4. Neither the name of The NetBSD Foundation nor the names of its
24 1.8 mycroft * contributors may be used to endorse or promote products derived
25 1.8 mycroft * from this software without specific prior written permission.
26 1.2 bouyer *
27 1.8 mycroft * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 1.8 mycroft * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 1.8 mycroft * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 1.8 mycroft * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 1.8 mycroft * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 1.8 mycroft * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 1.8 mycroft * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 1.8 mycroft * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 1.8 mycroft * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 1.8 mycroft * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 1.8 mycroft * POSSIBILITY OF SUCH DAMAGE.
38 1.2 bouyer */
39 1.38.4.6 nathanw
40 1.38.4.6 nathanw #include <sys/cdefs.h>
41 1.38.4.9 nathanw __KERNEL_RCSID(0, "$NetBSD: scsipi_base.c,v 1.38.4.10 2002/04/17 00:06:12 nathanw Exp $");
42 1.2 bouyer
43 1.13 bouyer #include "opt_scsi.h"
44 1.13 bouyer
45 1.2 bouyer #include <sys/param.h>
46 1.2 bouyer #include <sys/systm.h>
47 1.2 bouyer #include <sys/kernel.h>
48 1.2 bouyer #include <sys/buf.h>
49 1.2 bouyer #include <sys/uio.h>
50 1.2 bouyer #include <sys/malloc.h>
51 1.6 thorpej #include <sys/pool.h>
52 1.2 bouyer #include <sys/errno.h>
53 1.2 bouyer #include <sys/device.h>
54 1.2 bouyer #include <sys/proc.h>
55 1.38.4.1 nathanw #include <sys/kthread.h>
56 1.2 bouyer
57 1.2 bouyer #include <dev/scsipi/scsipi_all.h>
58 1.2 bouyer #include <dev/scsipi/scsipi_disk.h>
59 1.2 bouyer #include <dev/scsipi/scsipiconf.h>
60 1.2 bouyer #include <dev/scsipi/scsipi_base.h>
61 1.2 bouyer
62 1.38.4.1 nathanw #include <dev/scsipi/scsi_all.h>
63 1.38.4.1 nathanw #include <dev/scsipi/scsi_message.h>
64 1.38.4.1 nathanw
65 1.38.4.1 nathanw int scsipi_complete __P((struct scsipi_xfer *));
66 1.38.4.1 nathanw void scsipi_request_sense __P((struct scsipi_xfer *));
67 1.38.4.1 nathanw int scsipi_enqueue __P((struct scsipi_xfer *));
68 1.38.4.1 nathanw void scsipi_run_queue __P((struct scsipi_channel *chan));
69 1.38.4.1 nathanw
70 1.38.4.1 nathanw void scsipi_completion_thread __P((void *));
71 1.38.4.1 nathanw
72 1.38.4.1 nathanw void scsipi_get_tag __P((struct scsipi_xfer *));
73 1.38.4.1 nathanw void scsipi_put_tag __P((struct scsipi_xfer *));
74 1.38.4.1 nathanw
75 1.38.4.1 nathanw int scsipi_get_resource __P((struct scsipi_channel *));
76 1.38.4.1 nathanw void scsipi_put_resource __P((struct scsipi_channel *));
77 1.38.4.1 nathanw __inline int scsipi_grow_resources __P((struct scsipi_channel *));
78 1.38.4.1 nathanw
79 1.38.4.1 nathanw void scsipi_async_event_max_openings __P((struct scsipi_channel *,
80 1.38.4.1 nathanw struct scsipi_max_openings *));
81 1.38.4.1 nathanw void scsipi_async_event_xfer_mode __P((struct scsipi_channel *,
82 1.38.4.1 nathanw struct scsipi_xfer_mode *));
83 1.38.4.1 nathanw void scsipi_async_event_channel_reset __P((struct scsipi_channel *));
84 1.6 thorpej
85 1.38.4.1 nathanw struct pool scsipi_xfer_pool;
86 1.2 bouyer
87 1.2 bouyer /*
88 1.38.4.1 nathanw * scsipi_init:
89 1.38.4.1 nathanw *
90 1.38.4.1 nathanw * Called when a scsibus or atapibus is attached to the system
91 1.38.4.1 nathanw * to initialize shared data structures.
92 1.6 thorpej */
93 1.6 thorpej void
94 1.6 thorpej scsipi_init()
95 1.6 thorpej {
96 1.6 thorpej static int scsipi_init_done;
97 1.6 thorpej
98 1.6 thorpej if (scsipi_init_done)
99 1.6 thorpej return;
100 1.6 thorpej scsipi_init_done = 1;
101 1.6 thorpej
102 1.6 thorpej /* Initialize the scsipi_xfer pool. */
103 1.6 thorpej pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
104 1.38.4.9 nathanw 0, 0, "scxspl", NULL);
105 1.6 thorpej }
106 1.6 thorpej
107 1.6 thorpej /*
108 1.38.4.1 nathanw * scsipi_channel_init:
109 1.38.4.1 nathanw *
110 1.38.4.1 nathanw * Initialize a scsipi_channel when it is attached.
111 1.38.4.1 nathanw */
112 1.38.4.1 nathanw int
113 1.38.4.1 nathanw scsipi_channel_init(chan)
114 1.38.4.1 nathanw struct scsipi_channel *chan;
115 1.38.4.1 nathanw {
116 1.38.4.1 nathanw size_t nbytes;
117 1.38.4.1 nathanw int i;
118 1.38.4.1 nathanw
119 1.38.4.1 nathanw /* Initialize shared data. */
120 1.38.4.1 nathanw scsipi_init();
121 1.38.4.1 nathanw
122 1.38.4.1 nathanw /* Initialize the queues. */
123 1.38.4.1 nathanw TAILQ_INIT(&chan->chan_queue);
124 1.38.4.1 nathanw TAILQ_INIT(&chan->chan_complete);
125 1.38.4.1 nathanw
126 1.38.4.1 nathanw nbytes = chan->chan_ntargets * sizeof(struct scsipi_periph **);
127 1.38.4.1 nathanw chan->chan_periphs = malloc(nbytes, M_DEVBUF, M_NOWAIT);
128 1.38.4.1 nathanw if (chan->chan_periphs == NULL)
129 1.38.4.1 nathanw return (ENOMEM);
130 1.38.4.1 nathanw
131 1.38.4.1 nathanw
132 1.38.4.1 nathanw nbytes = chan->chan_nluns * sizeof(struct scsipi_periph *);
133 1.38.4.1 nathanw for (i = 0; i < chan->chan_ntargets; i++) {
134 1.38.4.8 nathanw chan->chan_periphs[i] = malloc(nbytes, M_DEVBUF,
135 1.38.4.8 nathanw M_NOWAIT|M_ZERO);
136 1.38.4.1 nathanw if (chan->chan_periphs[i] == NULL) {
137 1.38.4.1 nathanw while (--i >= 0) {
138 1.38.4.1 nathanw free(chan->chan_periphs[i], M_DEVBUF);
139 1.38.4.1 nathanw }
140 1.38.4.1 nathanw return (ENOMEM);
141 1.38.4.1 nathanw }
142 1.38.4.1 nathanw }
143 1.38.4.1 nathanw
144 1.38.4.1 nathanw /*
145 1.38.4.1 nathanw * Create the asynchronous completion thread.
146 1.38.4.1 nathanw */
147 1.38.4.1 nathanw kthread_create(scsipi_create_completion_thread, chan);
148 1.38.4.1 nathanw return (0);
149 1.38.4.1 nathanw }
150 1.38.4.1 nathanw
151 1.38.4.1 nathanw /*
152 1.38.4.1 nathanw * scsipi_channel_shutdown:
153 1.38.4.1 nathanw *
154 1.38.4.1 nathanw * Shutdown a scsipi_channel.
155 1.38.4.1 nathanw */
156 1.38.4.1 nathanw void
157 1.38.4.1 nathanw scsipi_channel_shutdown(chan)
158 1.38.4.1 nathanw struct scsipi_channel *chan;
159 1.38.4.1 nathanw {
160 1.38.4.1 nathanw
161 1.38.4.1 nathanw /*
162 1.38.4.1 nathanw * Shut down the completion thread.
163 1.38.4.1 nathanw */
164 1.38.4.5 nathanw chan->chan_tflags |= SCSIPI_CHANT_SHUTDOWN;
165 1.38.4.1 nathanw wakeup(&chan->chan_complete);
166 1.38.4.1 nathanw
167 1.38.4.1 nathanw /*
168 1.38.4.1 nathanw * Now wait for the thread to exit.
169 1.38.4.1 nathanw */
170 1.38.4.1 nathanw while (chan->chan_thread != NULL)
171 1.38.4.1 nathanw (void) tsleep(&chan->chan_thread, PRIBIO, "scshut", 0);
172 1.38.4.1 nathanw }
173 1.38.4.1 nathanw
174 1.38.4.1 nathanw /*
175 1.38.4.1 nathanw * scsipi_insert_periph:
176 1.38.4.1 nathanw *
177 1.38.4.1 nathanw * Insert a periph into the channel.
178 1.38.4.1 nathanw */
179 1.38.4.1 nathanw void
180 1.38.4.1 nathanw scsipi_insert_periph(chan, periph)
181 1.38.4.1 nathanw struct scsipi_channel *chan;
182 1.38.4.1 nathanw struct scsipi_periph *periph;
183 1.38.4.1 nathanw {
184 1.38.4.1 nathanw int s;
185 1.38.4.1 nathanw
186 1.38.4.1 nathanw s = splbio();
187 1.38.4.1 nathanw chan->chan_periphs[periph->periph_target][periph->periph_lun] = periph;
188 1.38.4.1 nathanw splx(s);
189 1.38.4.1 nathanw }
190 1.38.4.1 nathanw
191 1.38.4.1 nathanw /*
192 1.38.4.1 nathanw * scsipi_remove_periph:
193 1.38.4.1 nathanw *
194 1.38.4.1 nathanw * Remove a periph from the channel.
195 1.38.4.1 nathanw */
196 1.38.4.1 nathanw void
197 1.38.4.1 nathanw scsipi_remove_periph(chan, periph)
198 1.38.4.1 nathanw struct scsipi_channel *chan;
199 1.38.4.1 nathanw struct scsipi_periph *periph;
200 1.38.4.1 nathanw {
201 1.38.4.1 nathanw int s;
202 1.38.4.1 nathanw
203 1.38.4.1 nathanw s = splbio();
204 1.38.4.1 nathanw chan->chan_periphs[periph->periph_target][periph->periph_lun] = NULL;
205 1.38.4.1 nathanw splx(s);
206 1.38.4.1 nathanw }
207 1.38.4.1 nathanw
208 1.38.4.1 nathanw /*
209 1.38.4.1 nathanw * scsipi_lookup_periph:
210 1.38.4.1 nathanw *
211 1.38.4.1 nathanw * Lookup a periph on the specified channel.
212 1.38.4.1 nathanw */
213 1.38.4.1 nathanw struct scsipi_periph *
214 1.38.4.1 nathanw scsipi_lookup_periph(chan, target, lun)
215 1.38.4.1 nathanw struct scsipi_channel *chan;
216 1.38.4.1 nathanw int target, lun;
217 1.38.4.1 nathanw {
218 1.38.4.1 nathanw struct scsipi_periph *periph;
219 1.38.4.1 nathanw int s;
220 1.38.4.1 nathanw
221 1.38.4.1 nathanw if (target >= chan->chan_ntargets ||
222 1.38.4.1 nathanw lun >= chan->chan_nluns)
223 1.38.4.1 nathanw return (NULL);
224 1.38.4.1 nathanw
225 1.38.4.1 nathanw s = splbio();
226 1.38.4.1 nathanw periph = chan->chan_periphs[target][lun];
227 1.38.4.1 nathanw splx(s);
228 1.38.4.1 nathanw
229 1.38.4.1 nathanw return (periph);
230 1.38.4.1 nathanw }
231 1.38.4.1 nathanw
232 1.38.4.1 nathanw /*
233 1.38.4.1 nathanw * scsipi_get_resource:
234 1.38.4.1 nathanw *
235 1.38.4.1 nathanw * Allocate a single xfer `resource' from the channel.
236 1.38.4.1 nathanw *
237 1.38.4.1 nathanw * NOTE: Must be called at splbio().
238 1.38.4.1 nathanw */
239 1.38.4.1 nathanw int
240 1.38.4.1 nathanw scsipi_get_resource(chan)
241 1.38.4.1 nathanw struct scsipi_channel *chan;
242 1.38.4.1 nathanw {
243 1.38.4.1 nathanw struct scsipi_adapter *adapt = chan->chan_adapter;
244 1.38.4.1 nathanw
245 1.38.4.1 nathanw if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
246 1.38.4.1 nathanw if (chan->chan_openings > 0) {
247 1.38.4.1 nathanw chan->chan_openings--;
248 1.38.4.1 nathanw return (1);
249 1.38.4.1 nathanw }
250 1.38.4.1 nathanw return (0);
251 1.38.4.1 nathanw }
252 1.38.4.1 nathanw
253 1.38.4.1 nathanw if (adapt->adapt_openings > 0) {
254 1.38.4.1 nathanw adapt->adapt_openings--;
255 1.38.4.1 nathanw return (1);
256 1.38.4.1 nathanw }
257 1.38.4.1 nathanw return (0);
258 1.38.4.1 nathanw }
259 1.38.4.1 nathanw
260 1.38.4.1 nathanw /*
261 1.38.4.1 nathanw * scsipi_grow_resources:
262 1.38.4.1 nathanw *
263 1.38.4.1 nathanw * Attempt to grow resources for a channel. If this succeeds,
264 1.38.4.1 nathanw * we allocate one for our caller.
265 1.38.4.1 nathanw *
266 1.38.4.1 nathanw * NOTE: Must be called at splbio().
267 1.38.4.1 nathanw */
268 1.38.4.1 nathanw __inline int
269 1.38.4.1 nathanw scsipi_grow_resources(chan)
270 1.38.4.1 nathanw struct scsipi_channel *chan;
271 1.38.4.1 nathanw {
272 1.38.4.1 nathanw
273 1.38.4.1 nathanw if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
274 1.38.4.5 nathanw if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
275 1.38.4.5 nathanw scsipi_adapter_request(chan,
276 1.38.4.5 nathanw ADAPTER_REQ_GROW_RESOURCES, NULL);
277 1.38.4.5 nathanw return (scsipi_get_resource(chan));
278 1.38.4.5 nathanw }
279 1.38.4.5 nathanw /*
280 1.38.4.5 nathanw * ask the channel thread to do it. It'll have to thaw the
281 1.38.4.5 nathanw * queue
282 1.38.4.5 nathanw */
283 1.38.4.5 nathanw scsipi_channel_freeze(chan, 1);
284 1.38.4.5 nathanw chan->chan_tflags |= SCSIPI_CHANT_GROWRES;
285 1.38.4.5 nathanw wakeup(&chan->chan_complete);
286 1.38.4.5 nathanw return (0);
287 1.38.4.1 nathanw }
288 1.38.4.1 nathanw
289 1.38.4.1 nathanw return (0);
290 1.38.4.1 nathanw }
291 1.38.4.1 nathanw
292 1.38.4.1 nathanw /*
293 1.38.4.1 nathanw * scsipi_put_resource:
294 1.38.4.1 nathanw *
295 1.38.4.1 nathanw * Free a single xfer `resource' to the channel.
296 1.38.4.1 nathanw *
297 1.38.4.1 nathanw * NOTE: Must be called at splbio().
298 1.38.4.1 nathanw */
299 1.38.4.1 nathanw void
300 1.38.4.1 nathanw scsipi_put_resource(chan)
301 1.38.4.1 nathanw struct scsipi_channel *chan;
302 1.38.4.1 nathanw {
303 1.38.4.1 nathanw struct scsipi_adapter *adapt = chan->chan_adapter;
304 1.38.4.1 nathanw
305 1.38.4.1 nathanw if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
306 1.38.4.1 nathanw chan->chan_openings++;
307 1.38.4.1 nathanw else
308 1.38.4.1 nathanw adapt->adapt_openings++;
309 1.38.4.1 nathanw }
310 1.38.4.1 nathanw
311 1.38.4.1 nathanw /*
312 1.38.4.1 nathanw * scsipi_get_tag:
313 1.38.4.1 nathanw *
314 1.38.4.1 nathanw * Get a tag ID for the specified xfer.
315 1.38.4.1 nathanw *
316 1.38.4.1 nathanw * NOTE: Must be called at splbio().
317 1.38.4.1 nathanw */
318 1.38.4.1 nathanw void
319 1.38.4.1 nathanw scsipi_get_tag(xs)
320 1.38.4.1 nathanw struct scsipi_xfer *xs;
321 1.38.4.1 nathanw {
322 1.38.4.1 nathanw struct scsipi_periph *periph = xs->xs_periph;
323 1.38.4.1 nathanw int word, bit, tag;
324 1.38.4.1 nathanw
325 1.38.4.1 nathanw for (word = 0; word < PERIPH_NTAGWORDS; word++) {
326 1.38.4.1 nathanw bit = ffs(periph->periph_freetags[word]);
327 1.38.4.1 nathanw if (bit != 0)
328 1.38.4.1 nathanw break;
329 1.38.4.1 nathanw }
330 1.38.4.1 nathanw #ifdef DIAGNOSTIC
331 1.38.4.1 nathanw if (word == PERIPH_NTAGWORDS) {
332 1.38.4.1 nathanw scsipi_printaddr(periph);
333 1.38.4.1 nathanw printf("no free tags\n");
334 1.38.4.1 nathanw panic("scsipi_get_tag");
335 1.38.4.1 nathanw }
336 1.38.4.1 nathanw #endif
337 1.38.4.1 nathanw
338 1.38.4.1 nathanw bit -= 1;
339 1.38.4.1 nathanw periph->periph_freetags[word] &= ~(1 << bit);
340 1.38.4.1 nathanw tag = (word << 5) | bit;
341 1.38.4.1 nathanw
342 1.38.4.1 nathanw /* XXX Should eventually disallow this completely. */
343 1.38.4.1 nathanw if (tag >= periph->periph_openings) {
344 1.38.4.1 nathanw scsipi_printaddr(periph);
345 1.38.4.1 nathanw printf("WARNING: tag %d greater than available openings %d\n",
346 1.38.4.1 nathanw tag, periph->periph_openings);
347 1.38.4.1 nathanw }
348 1.38.4.1 nathanw
349 1.38.4.1 nathanw xs->xs_tag_id = tag;
350 1.38.4.1 nathanw }
351 1.38.4.1 nathanw
352 1.38.4.1 nathanw /*
353 1.38.4.1 nathanw * scsipi_put_tag:
354 1.38.4.1 nathanw *
355 1.38.4.1 nathanw * Put the tag ID for the specified xfer back into the pool.
356 1.38.4.1 nathanw *
357 1.38.4.1 nathanw * NOTE: Must be called at splbio().
358 1.2 bouyer */
359 1.38.4.1 nathanw void
360 1.38.4.1 nathanw scsipi_put_tag(xs)
361 1.38.4.1 nathanw struct scsipi_xfer *xs;
362 1.38.4.1 nathanw {
363 1.38.4.1 nathanw struct scsipi_periph *periph = xs->xs_periph;
364 1.38.4.1 nathanw int word, bit;
365 1.38.4.1 nathanw
366 1.38.4.1 nathanw word = xs->xs_tag_id >> 5;
367 1.38.4.1 nathanw bit = xs->xs_tag_id & 0x1f;
368 1.38.4.1 nathanw
369 1.38.4.1 nathanw periph->periph_freetags[word] |= (1 << bit);
370 1.38.4.1 nathanw }
371 1.2 bouyer
372 1.38.4.1 nathanw /*
373 1.38.4.1 nathanw * scsipi_get_xs:
374 1.38.4.1 nathanw *
375 1.38.4.1 nathanw * Allocate an xfer descriptor and associate it with the
376 1.38.4.1 nathanw * specified peripherial. If the peripherial has no more
377 1.38.4.1 nathanw * available command openings, we either block waiting for
378 1.38.4.1 nathanw * one to become available, or fail.
379 1.38.4.1 nathanw */
380 1.2 bouyer struct scsipi_xfer *
381 1.38.4.1 nathanw scsipi_get_xs(periph, flags)
382 1.38.4.1 nathanw struct scsipi_periph *periph;
383 1.38.4.1 nathanw int flags;
384 1.2 bouyer {
385 1.2 bouyer struct scsipi_xfer *xs;
386 1.2 bouyer int s;
387 1.2 bouyer
388 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n"));
389 1.6 thorpej
390 1.24 thorpej /*
391 1.24 thorpej * If we're cold, make sure we poll.
392 1.24 thorpej */
393 1.24 thorpej if (cold)
394 1.24 thorpej flags |= XS_CTL_NOSLEEP | XS_CTL_POLL;
395 1.24 thorpej
396 1.38.4.1 nathanw #ifdef DIAGNOSTIC
397 1.38.4.1 nathanw /*
398 1.38.4.1 nathanw * URGENT commands can never be ASYNC.
399 1.38.4.1 nathanw */
400 1.38.4.1 nathanw if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
401 1.38.4.1 nathanw (XS_CTL_URGENT|XS_CTL_ASYNC)) {
402 1.38.4.1 nathanw scsipi_printaddr(periph);
403 1.38.4.1 nathanw printf("URGENT and ASYNC\n");
404 1.38.4.1 nathanw panic("scsipi_get_xs");
405 1.38.4.1 nathanw }
406 1.38.4.1 nathanw #endif
407 1.38.4.1 nathanw
408 1.2 bouyer s = splbio();
409 1.38.4.1 nathanw /*
410 1.38.4.1 nathanw * Wait for a command opening to become available. Rules:
411 1.38.4.1 nathanw *
412 1.38.4.1 nathanw * - All xfers must wait for an available opening.
413 1.38.4.1 nathanw * Exception: URGENT xfers can proceed when
414 1.38.4.1 nathanw * active == openings, because we use the opening
415 1.38.4.1 nathanw * of the command we're recovering for.
416 1.38.4.1 nathanw * - if the periph has sense pending, only URGENT & REQSENSE
417 1.38.4.1 nathanw * xfers may proceed.
418 1.38.4.1 nathanw *
419 1.38.4.1 nathanw * - If the periph is recovering, only URGENT xfers may
420 1.38.4.1 nathanw * proceed.
421 1.38.4.1 nathanw *
422 1.38.4.1 nathanw * - If the periph is currently executing a recovery
423 1.38.4.1 nathanw * command, URGENT commands must block, because only
424 1.38.4.1 nathanw * one recovery command can execute at a time.
425 1.38.4.1 nathanw */
426 1.38.4.1 nathanw for (;;) {
427 1.38.4.1 nathanw if (flags & XS_CTL_URGENT) {
428 1.38.4.1 nathanw if (periph->periph_active > periph->periph_openings)
429 1.38.4.1 nathanw goto wait_for_opening;
430 1.38.4.1 nathanw if (periph->periph_flags & PERIPH_SENSE) {
431 1.38.4.1 nathanw if ((flags & XS_CTL_REQSENSE) == 0)
432 1.38.4.1 nathanw goto wait_for_opening;
433 1.38.4.1 nathanw } else {
434 1.38.4.1 nathanw if ((periph->periph_flags &
435 1.38.4.1 nathanw PERIPH_RECOVERY_ACTIVE) != 0)
436 1.38.4.1 nathanw goto wait_for_opening;
437 1.38.4.1 nathanw periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
438 1.38.4.1 nathanw }
439 1.38.4.1 nathanw break;
440 1.38.4.1 nathanw }
441 1.38.4.1 nathanw if (periph->periph_active >= periph->periph_openings ||
442 1.38.4.1 nathanw (periph->periph_flags & PERIPH_RECOVERING) != 0)
443 1.38.4.1 nathanw goto wait_for_opening;
444 1.38.4.1 nathanw periph->periph_active++;
445 1.38.4.1 nathanw break;
446 1.38.4.1 nathanw
447 1.38.4.1 nathanw wait_for_opening:
448 1.38.4.1 nathanw if (flags & XS_CTL_NOSLEEP) {
449 1.2 bouyer splx(s);
450 1.38.4.1 nathanw return (NULL);
451 1.2 bouyer }
452 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n"));
453 1.38.4.1 nathanw periph->periph_flags |= PERIPH_WAITING;
454 1.38.4.1 nathanw (void) tsleep(periph, PRIBIO, "getxs", 0);
455 1.2 bouyer }
456 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n"));
457 1.6 thorpej xs = pool_get(&scsipi_xfer_pool,
458 1.24 thorpej ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
459 1.38.4.1 nathanw if (xs == NULL) {
460 1.38.4.1 nathanw if (flags & XS_CTL_URGENT) {
461 1.38.4.1 nathanw if ((flags & XS_CTL_REQSENSE) == 0)
462 1.38.4.1 nathanw periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
463 1.38.4.1 nathanw } else
464 1.38.4.1 nathanw periph->periph_active--;
465 1.38.4.1 nathanw scsipi_printaddr(periph);
466 1.38.4.1 nathanw printf("unable to allocate %sscsipi_xfer\n",
467 1.38.4.1 nathanw (flags & XS_CTL_URGENT) ? "URGENT " : "");
468 1.2 bouyer }
469 1.6 thorpej splx(s);
470 1.2 bouyer
471 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB3, ("returning\n"));
472 1.6 thorpej
473 1.7 scottr if (xs != NULL) {
474 1.30 thorpej callout_init(&xs->xs_callout);
475 1.38.4.1 nathanw memset(xs, 0, sizeof(*xs));
476 1.38.4.1 nathanw xs->xs_periph = periph;
477 1.24 thorpej xs->xs_control = flags;
478 1.37 fvdl xs->xs_status = 0;
479 1.38.4.1 nathanw s = splbio();
480 1.38.4.1 nathanw TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
481 1.38.4.1 nathanw splx(s);
482 1.7 scottr }
483 1.3 enami return (xs);
484 1.2 bouyer }
485 1.2 bouyer
486 1.2 bouyer /*
487 1.38.4.1 nathanw * scsipi_put_xs:
488 1.38.4.1 nathanw *
489 1.38.4.1 nathanw * Release an xfer descriptor, decreasing the outstanding command
490 1.38.4.1 nathanw * count for the peripherial. If there is a thread waiting for
491 1.38.4.1 nathanw * an opening, wake it up. If not, kick any queued I/O the
492 1.38.4.1 nathanw * peripherial may have.
493 1.6 thorpej *
494 1.38.4.1 nathanw * NOTE: Must be called at splbio().
495 1.2 bouyer */
496 1.3 enami void
497 1.38.4.1 nathanw scsipi_put_xs(xs)
498 1.2 bouyer struct scsipi_xfer *xs;
499 1.2 bouyer {
500 1.38.4.1 nathanw struct scsipi_periph *periph = xs->xs_periph;
501 1.38.4.1 nathanw int flags = xs->xs_control;
502 1.2 bouyer
503 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n"));
504 1.38.4.1 nathanw
505 1.38.4.1 nathanw TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
506 1.6 thorpej pool_put(&scsipi_xfer_pool, xs);
507 1.2 bouyer
508 1.38.4.1 nathanw #ifdef DIAGNOSTIC
509 1.38.4.1 nathanw if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
510 1.38.4.1 nathanw periph->periph_active == 0) {
511 1.38.4.1 nathanw scsipi_printaddr(periph);
512 1.38.4.1 nathanw printf("recovery without a command to recovery for\n");
513 1.38.4.1 nathanw panic("scsipi_put_xs");
514 1.38.4.1 nathanw }
515 1.38.4.1 nathanw #endif
516 1.38.4.1 nathanw
517 1.38.4.1 nathanw if (flags & XS_CTL_URGENT) {
518 1.38.4.1 nathanw if ((flags & XS_CTL_REQSENSE) == 0)
519 1.38.4.1 nathanw periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
520 1.38.4.1 nathanw } else
521 1.38.4.1 nathanw periph->periph_active--;
522 1.38.4.1 nathanw if (periph->periph_active == 0 &&
523 1.38.4.1 nathanw (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
524 1.38.4.1 nathanw periph->periph_flags &= ~PERIPH_WAITDRAIN;
525 1.38.4.1 nathanw wakeup(&periph->periph_active);
526 1.38.4.1 nathanw }
527 1.38.4.1 nathanw
528 1.38.4.1 nathanw if (periph->periph_flags & PERIPH_WAITING) {
529 1.38.4.1 nathanw periph->periph_flags &= ~PERIPH_WAITING;
530 1.38.4.1 nathanw wakeup(periph);
531 1.2 bouyer } else {
532 1.38.4.1 nathanw if (periph->periph_switch->psw_start != NULL) {
533 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB2,
534 1.3 enami ("calling private start()\n"));
535 1.38.4.1 nathanw (*periph->periph_switch->psw_start)(periph);
536 1.2 bouyer }
537 1.2 bouyer }
538 1.15 thorpej }
539 1.15 thorpej
540 1.15 thorpej /*
541 1.38.4.1 nathanw * scsipi_channel_freeze:
542 1.38.4.1 nathanw *
543 1.38.4.1 nathanw * Freeze a channel's xfer queue.
544 1.38.4.1 nathanw */
545 1.38.4.1 nathanw void
546 1.38.4.1 nathanw scsipi_channel_freeze(chan, count)
547 1.38.4.1 nathanw struct scsipi_channel *chan;
548 1.38.4.1 nathanw int count;
549 1.38.4.1 nathanw {
550 1.38.4.1 nathanw int s;
551 1.38.4.1 nathanw
552 1.38.4.1 nathanw s = splbio();
553 1.38.4.1 nathanw chan->chan_qfreeze += count;
554 1.38.4.1 nathanw splx(s);
555 1.38.4.1 nathanw }
556 1.38.4.1 nathanw
557 1.38.4.1 nathanw /*
558 1.38.4.1 nathanw * scsipi_channel_thaw:
559 1.38.4.1 nathanw *
560 1.38.4.1 nathanw * Thaw a channel's xfer queue.
561 1.38.4.1 nathanw */
562 1.38.4.1 nathanw void
563 1.38.4.1 nathanw scsipi_channel_thaw(chan, count)
564 1.38.4.1 nathanw struct scsipi_channel *chan;
565 1.38.4.1 nathanw int count;
566 1.38.4.1 nathanw {
567 1.38.4.1 nathanw int s;
568 1.38.4.1 nathanw
569 1.38.4.1 nathanw s = splbio();
570 1.38.4.1 nathanw chan->chan_qfreeze -= count;
571 1.38.4.1 nathanw /*
572 1.38.4.1 nathanw * Don't let the freeze count go negative.
573 1.38.4.1 nathanw *
574 1.38.4.1 nathanw * Presumably the adapter driver could keep track of this,
575 1.38.4.1 nathanw * but it might just be easier to do this here so as to allow
576 1.38.4.1 nathanw * multiple callers, including those outside the adapter driver.
577 1.38.4.1 nathanw */
578 1.38.4.1 nathanw if (chan->chan_qfreeze < 0) {
579 1.38.4.1 nathanw chan->chan_qfreeze = 0;
580 1.38.4.1 nathanw }
581 1.38.4.1 nathanw splx(s);
582 1.38.4.1 nathanw /*
583 1.38.4.1 nathanw * Kick the channel's queue here. Note, we may be running in
584 1.38.4.1 nathanw * interrupt context (softclock or HBA's interrupt), so the adapter
585 1.38.4.1 nathanw * driver had better not sleep.
586 1.38.4.1 nathanw */
587 1.38.4.1 nathanw if (chan->chan_qfreeze == 0)
588 1.38.4.1 nathanw scsipi_run_queue(chan);
589 1.38.4.1 nathanw }
590 1.38.4.1 nathanw
591 1.38.4.1 nathanw /*
592 1.38.4.1 nathanw * scsipi_channel_timed_thaw:
593 1.38.4.1 nathanw *
594 1.38.4.1 nathanw * Thaw a channel after some time has expired. This will also
595 1.38.4.1 nathanw * run the channel's queue if the freeze count has reached 0.
596 1.38.4.1 nathanw */
597 1.38.4.1 nathanw void
598 1.38.4.1 nathanw scsipi_channel_timed_thaw(arg)
599 1.38.4.1 nathanw void *arg;
600 1.38.4.1 nathanw {
601 1.38.4.1 nathanw struct scsipi_channel *chan = arg;
602 1.38.4.1 nathanw
603 1.38.4.1 nathanw scsipi_channel_thaw(chan, 1);
604 1.38.4.1 nathanw }
605 1.38.4.1 nathanw
606 1.38.4.1 nathanw /*
607 1.38.4.1 nathanw * scsipi_periph_freeze:
608 1.38.4.1 nathanw *
609 1.38.4.1 nathanw * Freeze a device's xfer queue.
610 1.38.4.1 nathanw */
611 1.38.4.1 nathanw void
612 1.38.4.1 nathanw scsipi_periph_freeze(periph, count)
613 1.38.4.1 nathanw struct scsipi_periph *periph;
614 1.38.4.1 nathanw int count;
615 1.38.4.1 nathanw {
616 1.38.4.1 nathanw int s;
617 1.38.4.1 nathanw
618 1.38.4.1 nathanw s = splbio();
619 1.38.4.1 nathanw periph->periph_qfreeze += count;
620 1.38.4.1 nathanw splx(s);
621 1.38.4.1 nathanw }
622 1.38.4.1 nathanw
623 1.38.4.1 nathanw /*
624 1.38.4.1 nathanw * scsipi_periph_thaw:
625 1.38.4.1 nathanw *
626 1.38.4.1 nathanw * Thaw a device's xfer queue.
627 1.38.4.1 nathanw */
628 1.38.4.1 nathanw void
629 1.38.4.1 nathanw scsipi_periph_thaw(periph, count)
630 1.38.4.1 nathanw struct scsipi_periph *periph;
631 1.38.4.1 nathanw int count;
632 1.38.4.1 nathanw {
633 1.38.4.1 nathanw int s;
634 1.38.4.1 nathanw
635 1.38.4.1 nathanw s = splbio();
636 1.38.4.1 nathanw periph->periph_qfreeze -= count;
637 1.38.4.4 nathanw #ifdef DIAGNOSTIC
638 1.38.4.4 nathanw if (periph->periph_qfreeze < 0) {
639 1.38.4.4 nathanw static const char pc[] = "periph freeze count < 0";
640 1.38.4.4 nathanw scsipi_printaddr(periph);
641 1.38.4.4 nathanw printf("%s\n", pc);
642 1.38.4.4 nathanw panic(pc);
643 1.38.4.4 nathanw }
644 1.38.4.4 nathanw #endif
645 1.38.4.1 nathanw if (periph->periph_qfreeze == 0 &&
646 1.38.4.1 nathanw (periph->periph_flags & PERIPH_WAITING) != 0)
647 1.38.4.1 nathanw wakeup(periph);
648 1.38.4.1 nathanw splx(s);
649 1.38.4.1 nathanw }
650 1.38.4.1 nathanw
651 1.38.4.1 nathanw /*
652 1.38.4.1 nathanw * scsipi_periph_timed_thaw:
653 1.38.4.1 nathanw *
654 1.38.4.1 nathanw * Thaw a device after some time has expired.
655 1.38.4.1 nathanw */
656 1.38.4.1 nathanw void
657 1.38.4.1 nathanw scsipi_periph_timed_thaw(arg)
658 1.38.4.1 nathanw void *arg;
659 1.38.4.1 nathanw {
660 1.38.4.4 nathanw int s;
661 1.38.4.1 nathanw struct scsipi_periph *periph = arg;
662 1.38.4.1 nathanw
663 1.38.4.1 nathanw callout_stop(&periph->periph_callout);
664 1.38.4.1 nathanw
665 1.38.4.4 nathanw s = splbio();
666 1.38.4.4 nathanw scsipi_periph_thaw(periph, 1);
667 1.38.4.4 nathanw if ((periph->periph_channel->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
668 1.38.4.4 nathanw /*
669 1.38.4.4 nathanw * Kick the channel's queue here. Note, we're running in
670 1.38.4.4 nathanw * interrupt context (softclock), so the adapter driver
671 1.38.4.4 nathanw * had better not sleep.
672 1.38.4.4 nathanw */
673 1.38.4.4 nathanw scsipi_run_queue(periph->periph_channel);
674 1.38.4.4 nathanw } else {
675 1.38.4.4 nathanw /*
676 1.38.4.4 nathanw * Tell the completion thread to kick the channel's queue here.
677 1.38.4.4 nathanw */
678 1.38.4.5 nathanw periph->periph_channel->chan_tflags |= SCSIPI_CHANT_KICK;
679 1.38.4.4 nathanw wakeup(&periph->periph_channel->chan_complete);
680 1.38.4.4 nathanw }
681 1.38.4.4 nathanw splx(s);
682 1.38.4.1 nathanw }
683 1.38.4.1 nathanw
684 1.38.4.1 nathanw /*
685 1.38.4.1 nathanw * scsipi_wait_drain:
686 1.38.4.1 nathanw *
687 1.38.4.1 nathanw * Wait for a periph's pending xfers to drain.
688 1.15 thorpej */
689 1.15 thorpej void
690 1.38.4.1 nathanw scsipi_wait_drain(periph)
691 1.38.4.1 nathanw struct scsipi_periph *periph;
692 1.15 thorpej {
693 1.15 thorpej int s;
694 1.15 thorpej
695 1.15 thorpej s = splbio();
696 1.38.4.1 nathanw while (periph->periph_active != 0) {
697 1.38.4.1 nathanw periph->periph_flags |= PERIPH_WAITDRAIN;
698 1.38.4.1 nathanw (void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0);
699 1.15 thorpej }
700 1.15 thorpej splx(s);
701 1.23 thorpej }
702 1.23 thorpej
703 1.23 thorpej /*
704 1.38.4.1 nathanw * scsipi_kill_pending:
705 1.23 thorpej *
706 1.38.4.1 nathanw * Kill off all pending xfers for a periph.
707 1.38.4.1 nathanw *
708 1.38.4.1 nathanw * NOTE: Must be called at splbio().
709 1.23 thorpej */
710 1.23 thorpej void
711 1.38.4.1 nathanw scsipi_kill_pending(periph)
712 1.38.4.1 nathanw struct scsipi_periph *periph;
713 1.23 thorpej {
714 1.23 thorpej
715 1.38.4.1 nathanw (*periph->periph_channel->chan_bustype->bustype_kill_pending)(periph);
716 1.38.4.1 nathanw #ifdef DIAGNOSTIC
717 1.38.4.1 nathanw if (TAILQ_FIRST(&periph->periph_xferq) != NULL)
718 1.38.4.1 nathanw panic("scsipi_kill_pending");
719 1.38.4.1 nathanw #endif
720 1.38.4.1 nathanw scsipi_wait_drain(periph);
721 1.2 bouyer }
722 1.2 bouyer
723 1.2 bouyer /*
724 1.38.4.1 nathanw * scsipi_interpret_sense:
725 1.38.4.1 nathanw *
726 1.38.4.1 nathanw * Look at the returned sense and act on the error, determining
727 1.38.4.1 nathanw * the unix error number to pass back. (0 = report no error)
728 1.13 bouyer *
729 1.38.4.1 nathanw * NOTE: If we return ERESTART, we are expected to haved
730 1.38.4.1 nathanw * thawed the device!
731 1.38.4.1 nathanw *
732 1.38.4.1 nathanw * THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
733 1.13 bouyer */
734 1.13 bouyer int
735 1.13 bouyer scsipi_interpret_sense(xs)
736 1.13 bouyer struct scsipi_xfer *xs;
737 1.13 bouyer {
738 1.13 bouyer struct scsipi_sense_data *sense;
739 1.38.4.1 nathanw struct scsipi_periph *periph = xs->xs_periph;
740 1.13 bouyer u_int8_t key;
741 1.13 bouyer u_int32_t info;
742 1.13 bouyer int error;
743 1.13 bouyer #ifndef SCSIVERBOSE
744 1.13 bouyer static char *error_mes[] = {
745 1.13 bouyer "soft error (corrected)",
746 1.13 bouyer "not ready", "medium error",
747 1.13 bouyer "non-media hardware failure", "illegal request",
748 1.13 bouyer "unit attention", "readonly device",
749 1.13 bouyer "no data found", "vendor unique",
750 1.13 bouyer "copy aborted", "command aborted",
751 1.13 bouyer "search returned equal", "volume overflow",
752 1.13 bouyer "verify miscompare", "unknown error key"
753 1.13 bouyer };
754 1.13 bouyer #endif
755 1.13 bouyer
756 1.13 bouyer sense = &xs->sense.scsi_sense;
757 1.38.4.1 nathanw #ifdef SCSIPI_DEBUG
758 1.38.4.1 nathanw if (periph->periph_flags & SCSIPI_DB1) {
759 1.13 bouyer int count;
760 1.38.4.1 nathanw scsipi_printaddr(periph);
761 1.38.4.1 nathanw printf(" sense debug information:\n");
762 1.38.4.1 nathanw printf("\tcode 0x%x valid 0x%x\n",
763 1.13 bouyer sense->error_code & SSD_ERRCODE,
764 1.13 bouyer sense->error_code & SSD_ERRCODE_VALID ? 1 : 0);
765 1.38.4.1 nathanw printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
766 1.13 bouyer sense->segment,
767 1.13 bouyer sense->flags & SSD_KEY,
768 1.13 bouyer sense->flags & SSD_ILI ? 1 : 0,
769 1.13 bouyer sense->flags & SSD_EOM ? 1 : 0,
770 1.13 bouyer sense->flags & SSD_FILEMARK ? 1 : 0);
771 1.38.4.1 nathanw printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d "
772 1.38.4.1 nathanw "extra bytes\n",
773 1.13 bouyer sense->info[0],
774 1.13 bouyer sense->info[1],
775 1.13 bouyer sense->info[2],
776 1.13 bouyer sense->info[3],
777 1.13 bouyer sense->extra_len);
778 1.38.4.1 nathanw printf("\textra: ");
779 1.13 bouyer for (count = 0; count < ADD_BYTES_LIM(sense); count++)
780 1.13 bouyer printf("0x%x ", sense->cmd_spec_info[count]);
781 1.13 bouyer printf("\n");
782 1.13 bouyer }
783 1.38.4.1 nathanw #endif
784 1.38.4.1 nathanw
785 1.13 bouyer /*
786 1.38.4.1 nathanw * If the periph has it's own error handler, call it first.
787 1.13 bouyer * If it returns a legit error value, return that, otherwise
788 1.13 bouyer * it wants us to continue with normal error processing.
789 1.13 bouyer */
790 1.38.4.1 nathanw if (periph->periph_switch->psw_error != NULL) {
791 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB2,
792 1.13 bouyer ("calling private err_handler()\n"));
793 1.38.4.1 nathanw error = (*periph->periph_switch->psw_error)(xs);
794 1.38.4.1 nathanw if (error != EJUSTRETURN)
795 1.38.4.1 nathanw return (error);
796 1.13 bouyer }
797 1.13 bouyer /* otherwise use the default */
798 1.13 bouyer switch (sense->error_code & SSD_ERRCODE) {
799 1.38.4.7 nathanw
800 1.38.4.7 nathanw /*
801 1.38.4.7 nathanw * Old SCSI-1 and SASI devices respond with
802 1.38.4.7 nathanw * codes other than 70.
803 1.38.4.7 nathanw */
804 1.38.4.7 nathanw case 0x00: /* no error (command completed OK) */
805 1.38.4.7 nathanw return (0);
806 1.38.4.7 nathanw case 0x04: /* drive not ready after it was selected */
807 1.38.4.7 nathanw if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
808 1.38.4.7 nathanw periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
809 1.38.4.7 nathanw if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
810 1.38.4.7 nathanw return (0);
811 1.38.4.7 nathanw /* XXX - display some sort of error here? */
812 1.38.4.7 nathanw return (EIO);
813 1.38.4.7 nathanw case 0x20: /* invalid command */
814 1.38.4.7 nathanw if ((xs->xs_control &
815 1.38.4.7 nathanw XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
816 1.38.4.7 nathanw return (0);
817 1.38.4.7 nathanw return (EINVAL);
818 1.38.4.7 nathanw case 0x25: /* invalid LUN (Adaptec ACB-4000) */
819 1.38.4.7 nathanw return (EACCES);
820 1.38.4.7 nathanw
821 1.13 bouyer /*
822 1.13 bouyer * If it's code 70, use the extended stuff and
823 1.13 bouyer * interpret the key
824 1.13 bouyer */
825 1.13 bouyer case 0x71: /* delayed error */
826 1.38.4.1 nathanw scsipi_printaddr(periph);
827 1.13 bouyer key = sense->flags & SSD_KEY;
828 1.13 bouyer printf(" DEFERRED ERROR, key = 0x%x\n", key);
829 1.13 bouyer /* FALLTHROUGH */
830 1.13 bouyer case 0x70:
831 1.13 bouyer if ((sense->error_code & SSD_ERRCODE_VALID) != 0)
832 1.13 bouyer info = _4btol(sense->info);
833 1.13 bouyer else
834 1.13 bouyer info = 0;
835 1.13 bouyer key = sense->flags & SSD_KEY;
836 1.13 bouyer
837 1.13 bouyer switch (key) {
838 1.13 bouyer case SKEY_NO_SENSE:
839 1.13 bouyer case SKEY_RECOVERED_ERROR:
840 1.13 bouyer if (xs->resid == xs->datalen && xs->datalen) {
841 1.13 bouyer /*
842 1.13 bouyer * Why is this here?
843 1.13 bouyer */
844 1.13 bouyer xs->resid = 0; /* not short read */
845 1.13 bouyer }
846 1.13 bouyer case SKEY_EQUAL:
847 1.13 bouyer error = 0;
848 1.13 bouyer break;
849 1.13 bouyer case SKEY_NOT_READY:
850 1.38.4.1 nathanw if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
851 1.38.4.1 nathanw periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
852 1.24 thorpej if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
853 1.13 bouyer return (0);
854 1.38.4.2 nathanw if (sense->add_sense_code == 0x3A) {
855 1.19 bouyer error = ENODEV; /* Medium not present */
856 1.38.4.2 nathanw if (xs->xs_control & XS_CTL_SILENT_NODEV)
857 1.38.4.2 nathanw return (error);
858 1.38.4.2 nathanw } else
859 1.19 bouyer error = EIO;
860 1.24 thorpej if ((xs->xs_control & XS_CTL_SILENT) != 0)
861 1.19 bouyer return (error);
862 1.13 bouyer break;
863 1.13 bouyer case SKEY_ILLEGAL_REQUEST:
864 1.24 thorpej if ((xs->xs_control &
865 1.24 thorpej XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
866 1.13 bouyer return (0);
867 1.24 thorpej /*
868 1.24 thorpej * Handle the case where a device reports
869 1.24 thorpej * Logical Unit Not Supported during discovery.
870 1.24 thorpej */
871 1.24 thorpej if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
872 1.24 thorpej sense->add_sense_code == 0x25 &&
873 1.24 thorpej sense->add_sense_code_qual == 0x00)
874 1.24 thorpej return (EINVAL);
875 1.24 thorpej if ((xs->xs_control & XS_CTL_SILENT) != 0)
876 1.13 bouyer return (EIO);
877 1.13 bouyer error = EINVAL;
878 1.13 bouyer break;
879 1.13 bouyer case SKEY_UNIT_ATTENTION:
880 1.20 bouyer if (sense->add_sense_code == 0x29 &&
881 1.38.4.1 nathanw sense->add_sense_code_qual == 0x00) {
882 1.38.4.1 nathanw /* device or bus reset */
883 1.38.4.1 nathanw return (ERESTART);
884 1.38.4.1 nathanw }
885 1.38.4.1 nathanw if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
886 1.38.4.1 nathanw periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
887 1.24 thorpej if ((xs->xs_control &
888 1.24 thorpej XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
889 1.13 bouyer /* XXX Should reupload any transient state. */
890 1.38.4.1 nathanw (periph->periph_flags &
891 1.38.4.1 nathanw PERIPH_REMOVABLE) == 0) {
892 1.13 bouyer return (ERESTART);
893 1.38.4.1 nathanw }
894 1.24 thorpej if ((xs->xs_control & XS_CTL_SILENT) != 0)
895 1.13 bouyer return (EIO);
896 1.13 bouyer error = EIO;
897 1.13 bouyer break;
898 1.13 bouyer case SKEY_WRITE_PROTECT:
899 1.13 bouyer error = EROFS;
900 1.13 bouyer break;
901 1.13 bouyer case SKEY_BLANK_CHECK:
902 1.13 bouyer error = 0;
903 1.13 bouyer break;
904 1.13 bouyer case SKEY_ABORTED_COMMAND:
905 1.13 bouyer error = ERESTART;
906 1.13 bouyer break;
907 1.13 bouyer case SKEY_VOLUME_OVERFLOW:
908 1.13 bouyer error = ENOSPC;
909 1.13 bouyer break;
910 1.13 bouyer default:
911 1.13 bouyer error = EIO;
912 1.13 bouyer break;
913 1.13 bouyer }
914 1.13 bouyer
915 1.13 bouyer #ifdef SCSIVERBOSE
916 1.32 augustss if (key && (xs->xs_control & XS_CTL_SILENT) == 0)
917 1.13 bouyer scsipi_print_sense(xs, 0);
918 1.13 bouyer #else
919 1.13 bouyer if (key) {
920 1.38.4.1 nathanw scsipi_printaddr(periph);
921 1.13 bouyer printf("%s", error_mes[key - 1]);
922 1.13 bouyer if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
923 1.13 bouyer switch (key) {
924 1.13 bouyer case SKEY_NOT_READY:
925 1.13 bouyer case SKEY_ILLEGAL_REQUEST:
926 1.13 bouyer case SKEY_UNIT_ATTENTION:
927 1.13 bouyer case SKEY_WRITE_PROTECT:
928 1.13 bouyer break;
929 1.13 bouyer case SKEY_BLANK_CHECK:
930 1.13 bouyer printf(", requested size: %d (decimal)",
931 1.13 bouyer info);
932 1.13 bouyer break;
933 1.13 bouyer case SKEY_ABORTED_COMMAND:
934 1.38.4.1 nathanw if (xs->xs_retries)
935 1.13 bouyer printf(", retrying");
936 1.13 bouyer printf(", cmd 0x%x, info 0x%x",
937 1.13 bouyer xs->cmd->opcode, info);
938 1.13 bouyer break;
939 1.13 bouyer default:
940 1.13 bouyer printf(", info = %d (decimal)", info);
941 1.13 bouyer }
942 1.13 bouyer }
943 1.13 bouyer if (sense->extra_len != 0) {
944 1.13 bouyer int n;
945 1.13 bouyer printf(", data =");
946 1.13 bouyer for (n = 0; n < sense->extra_len; n++)
947 1.13 bouyer printf(" %02x",
948 1.13 bouyer sense->cmd_spec_info[n]);
949 1.13 bouyer }
950 1.13 bouyer printf("\n");
951 1.13 bouyer }
952 1.13 bouyer #endif
953 1.13 bouyer return (error);
954 1.13 bouyer
955 1.13 bouyer /*
956 1.38.4.7 nathanw * Some other code, just report it
957 1.13 bouyer */
958 1.13 bouyer default:
959 1.38.4.1 nathanw #if defined(SCSIDEBUG) || defined(DEBUG)
960 1.28 mjacob {
961 1.28 mjacob static char *uc = "undecodable sense error";
962 1.28 mjacob int i;
963 1.28 mjacob u_int8_t *cptr = (u_int8_t *) sense;
964 1.38.4.1 nathanw scsipi_printaddr(periph);
965 1.28 mjacob if (xs->cmd == &xs->cmdstore) {
966 1.28 mjacob printf("%s for opcode 0x%x, data=",
967 1.28 mjacob uc, xs->cmdstore.opcode);
968 1.28 mjacob } else {
969 1.28 mjacob printf("%s, data=", uc);
970 1.28 mjacob }
971 1.28 mjacob for (i = 0; i < sizeof (sense); i++)
972 1.28 mjacob printf(" 0x%02x", *(cptr++) & 0xff);
973 1.28 mjacob printf("\n");
974 1.28 mjacob }
975 1.28 mjacob #else
976 1.38.4.1 nathanw scsipi_printaddr(periph);
977 1.17 mjacob printf("Sense Error Code 0x%x",
978 1.17 mjacob sense->error_code & SSD_ERRCODE);
979 1.13 bouyer if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
980 1.13 bouyer struct scsipi_sense_data_unextended *usense =
981 1.13 bouyer (struct scsipi_sense_data_unextended *)sense;
982 1.13 bouyer printf(" at block no. %d (decimal)",
983 1.13 bouyer _3btol(usense->block));
984 1.13 bouyer }
985 1.13 bouyer printf("\n");
986 1.28 mjacob #endif
987 1.13 bouyer return (EIO);
988 1.13 bouyer }
989 1.13 bouyer }
990 1.13 bouyer
991 1.13 bouyer /*
992 1.38.4.1 nathanw * scsipi_size:
993 1.38.4.1 nathanw *
994 1.38.4.1 nathanw * Find out from the device what its capacity is.
995 1.2 bouyer */
996 1.2 bouyer u_long
997 1.38.4.1 nathanw scsipi_size(periph, flags)
998 1.38.4.1 nathanw struct scsipi_periph *periph;
999 1.2 bouyer int flags;
1000 1.2 bouyer {
1001 1.2 bouyer struct scsipi_read_cap_data rdcap;
1002 1.2 bouyer struct scsipi_read_capacity scsipi_cmd;
1003 1.2 bouyer
1004 1.38.4.2 nathanw memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1005 1.2 bouyer scsipi_cmd.opcode = READ_CAPACITY;
1006 1.2 bouyer
1007 1.2 bouyer /*
1008 1.2 bouyer * If the command works, interpret the result as a 4 byte
1009 1.2 bouyer * number of blocks
1010 1.2 bouyer */
1011 1.38.4.1 nathanw if (scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1012 1.3 enami sizeof(scsipi_cmd), (u_char *)&rdcap, sizeof(rdcap),
1013 1.38 enami SCSIPIRETRIES, 20000, NULL,
1014 1.38 enami flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK) != 0) {
1015 1.38.4.1 nathanw scsipi_printaddr(periph);
1016 1.2 bouyer printf("could not get size\n");
1017 1.3 enami return (0);
1018 1.2 bouyer }
1019 1.2 bouyer
1020 1.3 enami return (_4btol(rdcap.addr) + 1);
1021 1.2 bouyer }
1022 1.2 bouyer
1023 1.2 bouyer /*
1024 1.38.4.1 nathanw * scsipi_test_unit_ready:
1025 1.38.4.1 nathanw *
1026 1.38.4.1 nathanw * Issue a `test unit ready' request.
1027 1.2 bouyer */
1028 1.3 enami int
1029 1.38.4.1 nathanw scsipi_test_unit_ready(periph, flags)
1030 1.38.4.1 nathanw struct scsipi_periph *periph;
1031 1.2 bouyer int flags;
1032 1.2 bouyer {
1033 1.2 bouyer struct scsipi_test_unit_ready scsipi_cmd;
1034 1.2 bouyer
1035 1.2 bouyer /* some ATAPI drives don't support TEST_UNIT_READY. Sigh */
1036 1.38.4.1 nathanw if (periph->periph_quirks & PQUIRK_NOTUR)
1037 1.3 enami return (0);
1038 1.2 bouyer
1039 1.38.4.2 nathanw memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1040 1.2 bouyer scsipi_cmd.opcode = TEST_UNIT_READY;
1041 1.2 bouyer
1042 1.38.4.1 nathanw return (scsipi_command(periph,
1043 1.3 enami (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd),
1044 1.29 bouyer 0, 0, SCSIPIRETRIES, 10000, NULL, flags));
1045 1.2 bouyer }
1046 1.2 bouyer
1047 1.2 bouyer /*
1048 1.38.4.1 nathanw * scsipi_inquire:
1049 1.38.4.1 nathanw *
1050 1.38.4.1 nathanw * Ask the device about itself.
1051 1.2 bouyer */
1052 1.3 enami int
1053 1.38.4.1 nathanw scsipi_inquire(periph, inqbuf, flags)
1054 1.38.4.1 nathanw struct scsipi_periph *periph;
1055 1.2 bouyer struct scsipi_inquiry_data *inqbuf;
1056 1.2 bouyer int flags;
1057 1.2 bouyer {
1058 1.2 bouyer struct scsipi_inquiry scsipi_cmd;
1059 1.38.4.7 nathanw int error;
1060 1.2 bouyer
1061 1.38.4.2 nathanw memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1062 1.2 bouyer scsipi_cmd.opcode = INQUIRY;
1063 1.2 bouyer scsipi_cmd.length = sizeof(struct scsipi_inquiry_data);
1064 1.2 bouyer
1065 1.38.4.7 nathanw error = scsipi_command(periph,
1066 1.3 enami (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1067 1.3 enami (u_char *) inqbuf, sizeof(struct scsipi_inquiry_data),
1068 1.38.4.7 nathanw SCSIPIRETRIES, 10000, NULL, XS_CTL_DATA_IN | flags);
1069 1.38.4.7 nathanw
1070 1.38.4.7 nathanw #ifdef SCSI_OLD_NOINQUIRY
1071 1.38.4.7 nathanw /*
1072 1.38.4.7 nathanw * Kludge for the Adaptec ACB-4000 SCSI->MFM translator.
1073 1.38.4.7 nathanw * This board doesn't support the INQUIRY command at all.
1074 1.38.4.7 nathanw */
1075 1.38.4.7 nathanw if (error == EINVAL || error == EACCES) {
1076 1.38.4.7 nathanw /*
1077 1.38.4.7 nathanw * Conjure up an INQUIRY response.
1078 1.38.4.7 nathanw */
1079 1.38.4.7 nathanw inqbuf->device = (error == EINVAL ?
1080 1.38.4.7 nathanw SID_QUAL_LU_PRESENT :
1081 1.38.4.7 nathanw SID_QUAL_LU_NOTPRESENT) | T_DIRECT;
1082 1.38.4.7 nathanw inqbuf->dev_qual2 = 0;
1083 1.38.4.7 nathanw inqbuf->version = 0;
1084 1.38.4.7 nathanw inqbuf->response_format = SID_FORMAT_SCSI1;
1085 1.38.4.7 nathanw inqbuf->additional_length = 3 + 28;
1086 1.38.4.7 nathanw inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1087 1.38.4.7 nathanw memcpy(inqbuf->vendor, "ADAPTEC ", sizeof(inqbuf->vendor));
1088 1.38.4.7 nathanw memcpy(inqbuf->product, "ACB-4000 ",
1089 1.38.4.7 nathanw sizeof(inqbuf->product));
1090 1.38.4.7 nathanw memcpy(inqbuf->revision, " ", sizeof(inqbuf->revision));
1091 1.38.4.7 nathanw error = 0;
1092 1.38.4.7 nathanw }
1093 1.38.4.7 nathanw
1094 1.38.4.7 nathanw /*
1095 1.38.4.7 nathanw * Kludge for the Emulex MT-02 SCSI->QIC translator.
1096 1.38.4.7 nathanw * This board gives an empty response to an INQUIRY command.
1097 1.38.4.7 nathanw */
1098 1.38.4.7 nathanw else if (error == 0 &&
1099 1.38.4.7 nathanw inqbuf->device == (SID_QUAL_LU_PRESENT | T_DIRECT) &&
1100 1.38.4.7 nathanw inqbuf->dev_qual2 == 0 &&
1101 1.38.4.7 nathanw inqbuf->version == 0 &&
1102 1.38.4.7 nathanw inqbuf->response_format == SID_FORMAT_SCSI1) {
1103 1.38.4.7 nathanw /*
1104 1.38.4.7 nathanw * Fill out the INQUIRY response.
1105 1.38.4.7 nathanw */
1106 1.38.4.7 nathanw inqbuf->device = (SID_QUAL_LU_PRESENT | T_SEQUENTIAL);
1107 1.38.4.7 nathanw inqbuf->dev_qual2 = SID_REMOVABLE;
1108 1.38.4.7 nathanw inqbuf->additional_length = 3 + 28;
1109 1.38.4.7 nathanw inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1110 1.38.4.7 nathanw memcpy(inqbuf->vendor, "EMULEX ", sizeof(inqbuf->vendor));
1111 1.38.4.7 nathanw memcpy(inqbuf->product, "MT-02 QIC ",
1112 1.38.4.7 nathanw sizeof(inqbuf->product));
1113 1.38.4.7 nathanw memcpy(inqbuf->revision, " ", sizeof(inqbuf->revision));
1114 1.38.4.7 nathanw }
1115 1.38.4.7 nathanw #endif /* SCSI_OLD_NOINQUIRY */
1116 1.38.4.7 nathanw
1117 1.38.4.7 nathanw return error;
1118 1.2 bouyer }
1119 1.2 bouyer
1120 1.2 bouyer /*
1121 1.38.4.1 nathanw * scsipi_prevent:
1122 1.38.4.1 nathanw *
1123 1.38.4.1 nathanw * Prevent or allow the user to remove the media
1124 1.2 bouyer */
1125 1.3 enami int
1126 1.38.4.1 nathanw scsipi_prevent(periph, type, flags)
1127 1.38.4.1 nathanw struct scsipi_periph *periph;
1128 1.2 bouyer int type, flags;
1129 1.2 bouyer {
1130 1.2 bouyer struct scsipi_prevent scsipi_cmd;
1131 1.2 bouyer
1132 1.38.4.1 nathanw if (periph->periph_quirks & PQUIRK_NODOORLOCK)
1133 1.3 enami return (0);
1134 1.2 bouyer
1135 1.38.4.2 nathanw memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1136 1.2 bouyer scsipi_cmd.opcode = PREVENT_ALLOW;
1137 1.2 bouyer scsipi_cmd.how = type;
1138 1.38.4.1 nathanw
1139 1.38.4.1 nathanw return (scsipi_command(periph,
1140 1.3 enami (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1141 1.29 bouyer 0, 0, SCSIPIRETRIES, 5000, NULL, flags));
1142 1.2 bouyer }
1143 1.2 bouyer
1144 1.2 bouyer /*
1145 1.38.4.1 nathanw * scsipi_start:
1146 1.38.4.1 nathanw *
1147 1.38.4.1 nathanw * Send a START UNIT.
1148 1.2 bouyer */
1149 1.3 enami int
1150 1.38.4.1 nathanw scsipi_start(periph, type, flags)
1151 1.38.4.1 nathanw struct scsipi_periph *periph;
1152 1.2 bouyer int type, flags;
1153 1.2 bouyer {
1154 1.2 bouyer struct scsipi_start_stop scsipi_cmd;
1155 1.18 bouyer
1156 1.38.4.1 nathanw if (periph->periph_quirks & PQUIRK_NOSTARTUNIT)
1157 1.18 bouyer return 0;
1158 1.2 bouyer
1159 1.38.4.2 nathanw memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1160 1.2 bouyer scsipi_cmd.opcode = START_STOP;
1161 1.2 bouyer scsipi_cmd.byte2 = 0x00;
1162 1.2 bouyer scsipi_cmd.how = type;
1163 1.38.4.1 nathanw
1164 1.38.4.1 nathanw return (scsipi_command(periph,
1165 1.3 enami (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1166 1.29 bouyer 0, 0, SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000,
1167 1.29 bouyer NULL, flags));
1168 1.2 bouyer }
1169 1.2 bouyer
1170 1.2 bouyer /*
1171 1.38.4.1 nathanw * scsipi_mode_sense, scsipi_mode_sense_big:
1172 1.38.4.1 nathanw * get a sense page from a device
1173 1.2 bouyer */
1174 1.2 bouyer
1175 1.38.4.1 nathanw int
1176 1.38.4.1 nathanw scsipi_mode_sense(periph, byte2, page, data, len, flags, retries, timeout)
1177 1.38.4.1 nathanw struct scsipi_periph *periph;
1178 1.38.4.1 nathanw int byte2, page, len, flags, retries, timeout;
1179 1.38.4.1 nathanw struct scsipi_mode_header *data;
1180 1.38.4.1 nathanw {
1181 1.38.4.1 nathanw struct scsipi_mode_sense scsipi_cmd;
1182 1.38.4.1 nathanw int error;
1183 1.38.4.1 nathanw
1184 1.38.4.2 nathanw memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1185 1.38.4.1 nathanw scsipi_cmd.opcode = MODE_SENSE;
1186 1.38.4.1 nathanw scsipi_cmd.byte2 = byte2;
1187 1.38.4.1 nathanw scsipi_cmd.page = page;
1188 1.38.4.1 nathanw if (scsipi_periph_bustype(periph) == SCSIPI_BUSTYPE_ATAPI)
1189 1.38.4.1 nathanw _lto2b(len, scsipi_cmd.u_len.atapi.length);
1190 1.38.4.1 nathanw else
1191 1.38.4.1 nathanw scsipi_cmd.u_len.scsi.length = len & 0xff;
1192 1.38.4.1 nathanw error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1193 1.38.4.1 nathanw sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1194 1.38.4.1 nathanw flags | XS_CTL_DATA_IN);
1195 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB2,
1196 1.38.4.1 nathanw ("scsipi_mode_sense: error=%d\n", error));
1197 1.38.4.1 nathanw return (error);
1198 1.38.4.1 nathanw }
1199 1.38.4.1 nathanw
1200 1.38.4.1 nathanw int
1201 1.38.4.1 nathanw scsipi_mode_sense_big(periph, byte2, page, data, len, flags, retries, timeout)
1202 1.38.4.1 nathanw struct scsipi_periph *periph;
1203 1.38.4.1 nathanw int byte2, page, len, flags, retries, timeout;
1204 1.38.4.1 nathanw struct scsipi_mode_header_big *data;
1205 1.38.4.1 nathanw {
1206 1.38.4.1 nathanw struct scsipi_mode_sense_big scsipi_cmd;
1207 1.38.4.1 nathanw int error;
1208 1.38.4.1 nathanw
1209 1.38.4.2 nathanw memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1210 1.38.4.1 nathanw scsipi_cmd.opcode = MODE_SENSE_BIG;
1211 1.38.4.1 nathanw scsipi_cmd.byte2 = byte2;
1212 1.38.4.1 nathanw scsipi_cmd.page = page;
1213 1.38.4.1 nathanw _lto2b(len, scsipi_cmd.length);
1214 1.38.4.1 nathanw error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1215 1.38.4.1 nathanw sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1216 1.38.4.1 nathanw flags | XS_CTL_DATA_IN);
1217 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB2,
1218 1.38.4.1 nathanw ("scsipi_mode_sense_big: error=%d\n", error));
1219 1.38.4.1 nathanw return (error);
1220 1.38.4.1 nathanw }
1221 1.38.4.1 nathanw
1222 1.38.4.1 nathanw int
1223 1.38.4.1 nathanw scsipi_mode_select(periph, byte2, data, len, flags, retries, timeout)
1224 1.38.4.1 nathanw struct scsipi_periph *periph;
1225 1.38.4.1 nathanw int byte2, len, flags, retries, timeout;
1226 1.38.4.1 nathanw struct scsipi_mode_header *data;
1227 1.38.4.1 nathanw {
1228 1.38.4.1 nathanw struct scsipi_mode_select scsipi_cmd;
1229 1.38.4.1 nathanw int error;
1230 1.38.4.1 nathanw
1231 1.38.4.2 nathanw memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1232 1.38.4.1 nathanw scsipi_cmd.opcode = MODE_SELECT;
1233 1.38.4.1 nathanw scsipi_cmd.byte2 = byte2;
1234 1.38.4.1 nathanw if (scsipi_periph_bustype(periph) == SCSIPI_BUSTYPE_ATAPI)
1235 1.38.4.1 nathanw _lto2b(len, scsipi_cmd.u_len.atapi.length);
1236 1.38.4.1 nathanw else
1237 1.38.4.1 nathanw scsipi_cmd.u_len.scsi.length = len & 0xff;
1238 1.38.4.1 nathanw error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1239 1.38.4.1 nathanw sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1240 1.38.4.1 nathanw flags | XS_CTL_DATA_OUT);
1241 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB2,
1242 1.38.4.1 nathanw ("scsipi_mode_select: error=%d\n", error));
1243 1.38.4.1 nathanw return (error);
1244 1.38.4.1 nathanw }
1245 1.38.4.1 nathanw
1246 1.38.4.1 nathanw int
1247 1.38.4.1 nathanw scsipi_mode_select_big(periph, byte2, data, len, flags, retries, timeout)
1248 1.38.4.1 nathanw struct scsipi_periph *periph;
1249 1.38.4.1 nathanw int byte2, len, flags, retries, timeout;
1250 1.38.4.1 nathanw struct scsipi_mode_header_big *data;
1251 1.38.4.1 nathanw {
1252 1.38.4.1 nathanw struct scsipi_mode_select_big scsipi_cmd;
1253 1.38.4.1 nathanw int error;
1254 1.38.4.1 nathanw
1255 1.38.4.2 nathanw memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1256 1.38.4.1 nathanw scsipi_cmd.opcode = MODE_SELECT_BIG;
1257 1.38.4.1 nathanw scsipi_cmd.byte2 = byte2;
1258 1.38.4.1 nathanw _lto2b(len, scsipi_cmd.length);
1259 1.38.4.1 nathanw error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1260 1.38.4.1 nathanw sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1261 1.38.4.1 nathanw flags | XS_CTL_DATA_OUT);
1262 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB2,
1263 1.38.4.1 nathanw ("scsipi_mode_select: error=%d\n", error));
1264 1.38.4.1 nathanw return (error);
1265 1.38.4.1 nathanw }
1266 1.38.4.1 nathanw
1267 1.38.4.1 nathanw /*
1268 1.38.4.1 nathanw * scsipi_done:
1269 1.38.4.1 nathanw *
1270 1.38.4.1 nathanw * This routine is called by an adapter's interrupt handler when
1271 1.38.4.1 nathanw * an xfer is completed.
1272 1.38.4.1 nathanw */
1273 1.38.4.1 nathanw void
1274 1.38.4.1 nathanw scsipi_done(xs)
1275 1.38.4.1 nathanw struct scsipi_xfer *xs;
1276 1.38.4.1 nathanw {
1277 1.38.4.1 nathanw struct scsipi_periph *periph = xs->xs_periph;
1278 1.38.4.1 nathanw struct scsipi_channel *chan = periph->periph_channel;
1279 1.38.4.1 nathanw int s, freezecnt;
1280 1.38.4.1 nathanw
1281 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n"));
1282 1.38.4.1 nathanw #ifdef SCSIPI_DEBUG
1283 1.38.4.1 nathanw if (periph->periph_dbflags & SCSIPI_DB1)
1284 1.2 bouyer show_scsipi_cmd(xs);
1285 1.38.4.1 nathanw #endif
1286 1.2 bouyer
1287 1.38.4.1 nathanw s = splbio();
1288 1.2 bouyer /*
1289 1.38.4.1 nathanw * The resource this command was using is now free.
1290 1.3 enami */
1291 1.38.4.1 nathanw scsipi_put_resource(chan);
1292 1.38.4.1 nathanw xs->xs_periph->periph_sent--;
1293 1.2 bouyer
1294 1.38.4.1 nathanw /*
1295 1.38.4.1 nathanw * If the command was tagged, free the tag.
1296 1.38.4.1 nathanw */
1297 1.38.4.1 nathanw if (XS_CTL_TAGTYPE(xs) != 0)
1298 1.38.4.1 nathanw scsipi_put_tag(xs);
1299 1.38.4.1 nathanw else
1300 1.38.4.1 nathanw periph->periph_flags &= ~PERIPH_UNTAG;
1301 1.2 bouyer
1302 1.38.4.1 nathanw /* Mark the command as `done'. */
1303 1.38.4.1 nathanw xs->xs_status |= XS_STS_DONE;
1304 1.38.4.1 nathanw
1305 1.38.4.1 nathanw #ifdef DIAGNOSTIC
1306 1.38.4.1 nathanw if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
1307 1.38.4.1 nathanw (XS_CTL_ASYNC|XS_CTL_POLL))
1308 1.38.4.1 nathanw panic("scsipi_done: ASYNC and POLL");
1309 1.38.4.1 nathanw #endif
1310 1.2 bouyer
1311 1.2 bouyer /*
1312 1.38.4.1 nathanw * If the xfer had an error of any sort, freeze the
1313 1.38.4.1 nathanw * periph's queue. Freeze it again if we were requested
1314 1.38.4.1 nathanw * to do so in the xfer.
1315 1.2 bouyer */
1316 1.38.4.1 nathanw freezecnt = 0;
1317 1.38.4.1 nathanw if (xs->error != XS_NOERROR)
1318 1.38.4.1 nathanw freezecnt++;
1319 1.38.4.1 nathanw if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
1320 1.38.4.1 nathanw freezecnt++;
1321 1.38.4.1 nathanw if (freezecnt != 0)
1322 1.38.4.1 nathanw scsipi_periph_freeze(periph, freezecnt);
1323 1.2 bouyer
1324 1.38.4.1 nathanw /*
1325 1.38.4.1 nathanw * record the xfer with a pending sense, in case a SCSI reset is
1326 1.38.4.1 nathanw * received before the thread is waked up.
1327 1.38.4.1 nathanw */
1328 1.38.4.1 nathanw if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1329 1.38.4.1 nathanw periph->periph_flags |= PERIPH_SENSE;
1330 1.38.4.1 nathanw periph->periph_xscheck = xs;
1331 1.20 bouyer }
1332 1.2 bouyer
1333 1.38.4.1 nathanw /*
1334 1.38.4.4 nathanw * If this was an xfer that was not to complete asynchronously,
1335 1.38.4.1 nathanw * let the requesting thread perform error checking/handling
1336 1.38.4.1 nathanw * in its context.
1337 1.38.4.1 nathanw */
1338 1.38.4.1 nathanw if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
1339 1.38.4.1 nathanw splx(s);
1340 1.2 bouyer /*
1341 1.38.4.1 nathanw * If it's a polling job, just return, to unwind the
1342 1.38.4.1 nathanw * call graph. We don't need to restart the queue,
1343 1.38.4.1 nathanw * because pollings jobs are treated specially, and
1344 1.38.4.1 nathanw * are really only used during crash dumps anyway
1345 1.38.4.1 nathanw * (XXX or during boot-time autconfiguration of
1346 1.38.4.1 nathanw * ATAPI devices).
1347 1.2 bouyer */
1348 1.38.4.1 nathanw if (xs->xs_control & XS_CTL_POLL)
1349 1.38.4.1 nathanw return;
1350 1.38.4.1 nathanw wakeup(xs);
1351 1.38.4.1 nathanw goto out;
1352 1.2 bouyer }
1353 1.38.4.1 nathanw
1354 1.9 scottr /*
1355 1.38.4.1 nathanw * Catch the extremely common case of I/O completing
1356 1.38.4.1 nathanw * without error; no use in taking a context switch
1357 1.38.4.1 nathanw * if we can handle it in interrupt context.
1358 1.9 scottr */
1359 1.38.4.1 nathanw if (xs->error == XS_NOERROR) {
1360 1.22 pk splx(s);
1361 1.38.4.1 nathanw (void) scsipi_complete(xs);
1362 1.38.4.1 nathanw goto out;
1363 1.22 pk }
1364 1.2 bouyer
1365 1.2 bouyer /*
1366 1.38.4.1 nathanw * There is an error on this xfer. Put it on the channel's
1367 1.38.4.1 nathanw * completion queue, and wake up the completion thread.
1368 1.38.4.1 nathanw */
1369 1.38.4.1 nathanw TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
1370 1.38.4.1 nathanw splx(s);
1371 1.38.4.1 nathanw wakeup(&chan->chan_complete);
1372 1.2 bouyer
1373 1.38.4.1 nathanw out:
1374 1.38.4.1 nathanw /*
1375 1.38.4.1 nathanw * If there are more xfers on the channel's queue, attempt to
1376 1.38.4.1 nathanw * run them.
1377 1.38.4.1 nathanw */
1378 1.38.4.1 nathanw scsipi_run_queue(chan);
1379 1.2 bouyer }
1380 1.2 bouyer
1381 1.38.4.1 nathanw /*
1382 1.38.4.1 nathanw * scsipi_complete:
1383 1.38.4.1 nathanw *
1384 1.38.4.1 nathanw * Completion of a scsipi_xfer. This is the guts of scsipi_done().
1385 1.38.4.1 nathanw *
1386 1.38.4.1 nathanw * NOTE: This routine MUST be called with valid thread context
1387 1.38.4.1 nathanw * except for the case where the following two conditions are
1388 1.38.4.1 nathanw * true:
1389 1.38.4.1 nathanw *
1390 1.38.4.1 nathanw * xs->error == XS_NOERROR
1391 1.38.4.1 nathanw * XS_CTL_ASYNC is set in xs->xs_control
1392 1.38.4.1 nathanw *
1393 1.38.4.1 nathanw * The semantics of this routine can be tricky, so here is an
1394 1.38.4.1 nathanw * explanation:
1395 1.38.4.1 nathanw *
1396 1.38.4.1 nathanw * 0 Xfer completed successfully.
1397 1.38.4.1 nathanw *
1398 1.38.4.1 nathanw * ERESTART Xfer had an error, but was restarted.
1399 1.38.4.1 nathanw *
1400 1.38.4.1 nathanw * anything else Xfer had an error, return value is Unix
1401 1.38.4.1 nathanw * errno.
1402 1.38.4.1 nathanw *
1403 1.38.4.1 nathanw * If the return value is anything but ERESTART:
1404 1.38.4.1 nathanw *
1405 1.38.4.1 nathanw * - If XS_CTL_ASYNC is set, `xs' has been freed back to
1406 1.38.4.1 nathanw * the pool.
1407 1.38.4.1 nathanw * - If there is a buf associated with the xfer,
1408 1.38.4.1 nathanw * it has been biodone()'d.
1409 1.38.4.1 nathanw */
1410 1.3 enami int
1411 1.38.4.1 nathanw scsipi_complete(xs)
1412 1.2 bouyer struct scsipi_xfer *xs;
1413 1.2 bouyer {
1414 1.38.4.1 nathanw struct scsipi_periph *periph = xs->xs_periph;
1415 1.38.4.1 nathanw struct scsipi_channel *chan = periph->periph_channel;
1416 1.38.4.1 nathanw struct buf *bp;
1417 1.38.4.1 nathanw int error, s;
1418 1.2 bouyer
1419 1.38.4.1 nathanw #ifdef DIAGNOSTIC
1420 1.38.4.1 nathanw if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
1421 1.38.4.1 nathanw panic("scsipi_complete: XS_CTL_ASYNC but no buf");
1422 1.38.4.1 nathanw #endif
1423 1.2 bouyer /*
1424 1.38.4.1 nathanw * If command terminated with a CHECK CONDITION, we need to issue a
1425 1.38.4.1 nathanw * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed
1426 1.38.4.1 nathanw * we'll have the real status.
1427 1.38.4.1 nathanw * Must be processed at splbio() to avoid missing a SCSI bus reset
1428 1.38.4.1 nathanw * for this command.
1429 1.38.4.1 nathanw */
1430 1.38.4.1 nathanw s = splbio();
1431 1.38.4.1 nathanw if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1432 1.38.4.1 nathanw /* request sense for a request sense ? */
1433 1.38.4.1 nathanw if (xs->xs_control & XS_CTL_REQSENSE) {
1434 1.38.4.1 nathanw scsipi_printaddr(periph);
1435 1.38.4.2 nathanw printf("request sense for a request sense ?\n");
1436 1.38.4.1 nathanw /* XXX maybe we should reset the device ? */
1437 1.38.4.1 nathanw /* we've been frozen because xs->error != XS_NOERROR */
1438 1.38.4.1 nathanw scsipi_periph_thaw(periph, 1);
1439 1.38.4.1 nathanw splx(s);
1440 1.38.4.2 nathanw if (xs->resid < xs->datalen) {
1441 1.38.4.2 nathanw printf("we read %d bytes of sense anyway:\n",
1442 1.38.4.2 nathanw xs->datalen - xs->resid);
1443 1.38.4.2 nathanw #ifdef SCSIVERBOSE
1444 1.38.4.2 nathanw scsipi_print_sense_data((void *)xs->data, 0);
1445 1.38.4.2 nathanw #endif
1446 1.38.4.2 nathanw }
1447 1.38.4.1 nathanw return EINVAL;
1448 1.38.4.1 nathanw }
1449 1.38.4.1 nathanw scsipi_request_sense(xs);
1450 1.38.4.1 nathanw }
1451 1.38.4.1 nathanw splx(s);
1452 1.38.4.2 nathanw
1453 1.38.4.1 nathanw /*
1454 1.38.4.1 nathanw * If it's a user level request, bypass all usual completion
1455 1.38.4.1 nathanw * processing, let the user work it out..
1456 1.2 bouyer */
1457 1.38.4.1 nathanw if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
1458 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n"));
1459 1.38.4.1 nathanw if (xs->error != XS_NOERROR)
1460 1.38.4.1 nathanw scsipi_periph_thaw(periph, 1);
1461 1.38.4.1 nathanw scsipi_user_done(xs);
1462 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n "));
1463 1.38.4.1 nathanw return 0;
1464 1.38.4.1 nathanw }
1465 1.38.4.1 nathanw
1466 1.2 bouyer switch (xs->error) {
1467 1.38.4.1 nathanw case XS_NOERROR:
1468 1.2 bouyer error = 0;
1469 1.2 bouyer break;
1470 1.2 bouyer
1471 1.2 bouyer case XS_SENSE:
1472 1.13 bouyer case XS_SHORTSENSE:
1473 1.38.4.1 nathanw error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
1474 1.2 bouyer break;
1475 1.2 bouyer
1476 1.38.4.1 nathanw case XS_RESOURCE_SHORTAGE:
1477 1.38.4.1 nathanw /*
1478 1.38.4.1 nathanw * XXX Should freeze channel's queue.
1479 1.38.4.1 nathanw */
1480 1.38.4.1 nathanw scsipi_printaddr(periph);
1481 1.38.4.1 nathanw printf("adapter resource shortage\n");
1482 1.38.4.1 nathanw /* FALLTHROUGH */
1483 1.38.4.1 nathanw
1484 1.2 bouyer case XS_BUSY:
1485 1.38.4.1 nathanw if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
1486 1.38.4.1 nathanw struct scsipi_max_openings mo;
1487 1.38.4.1 nathanw
1488 1.38.4.1 nathanw /*
1489 1.38.4.1 nathanw * We set the openings to active - 1, assuming that
1490 1.38.4.1 nathanw * the command that got us here is the first one that
1491 1.38.4.1 nathanw * can't fit into the device's queue. If that's not
1492 1.38.4.1 nathanw * the case, I guess we'll find out soon enough.
1493 1.38.4.1 nathanw */
1494 1.38.4.1 nathanw mo.mo_target = periph->periph_target;
1495 1.38.4.1 nathanw mo.mo_lun = periph->periph_lun;
1496 1.38.4.1 nathanw if (periph->periph_active < periph->periph_openings)
1497 1.38.4.1 nathanw mo.mo_openings = periph->periph_active - 1;
1498 1.2 bouyer else
1499 1.38.4.1 nathanw mo.mo_openings = periph->periph_openings - 1;
1500 1.38.4.1 nathanw #ifdef DIAGNOSTIC
1501 1.38.4.1 nathanw if (mo.mo_openings < 0) {
1502 1.38.4.1 nathanw scsipi_printaddr(periph);
1503 1.38.4.1 nathanw printf("QUEUE FULL resulted in < 0 openings\n");
1504 1.38.4.1 nathanw panic("scsipi_done");
1505 1.38.4.1 nathanw }
1506 1.2 bouyer #endif
1507 1.38.4.1 nathanw if (mo.mo_openings == 0) {
1508 1.38.4.1 nathanw scsipi_printaddr(periph);
1509 1.38.4.1 nathanw printf("QUEUE FULL resulted in 0 openings\n");
1510 1.38.4.1 nathanw mo.mo_openings = 1;
1511 1.38.4.1 nathanw }
1512 1.38.4.1 nathanw scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
1513 1.38.4.1 nathanw error = ERESTART;
1514 1.38.4.1 nathanw } else if (xs->xs_retries != 0) {
1515 1.38.4.1 nathanw xs->xs_retries--;
1516 1.38.4.1 nathanw /*
1517 1.38.4.1 nathanw * Wait one second, and try again.
1518 1.38.4.1 nathanw */
1519 1.38.4.4 nathanw if ((xs->xs_control & XS_CTL_POLL) ||
1520 1.38.4.4 nathanw (chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
1521 1.38.4.1 nathanw delay(1000000);
1522 1.38.4.4 nathanw } else {
1523 1.38.4.1 nathanw scsipi_periph_freeze(periph, 1);
1524 1.38.4.1 nathanw callout_reset(&periph->periph_callout,
1525 1.38.4.1 nathanw hz, scsipi_periph_timed_thaw, periph);
1526 1.38.4.1 nathanw }
1527 1.38.4.1 nathanw error = ERESTART;
1528 1.38.4.1 nathanw } else
1529 1.38.4.1 nathanw error = EBUSY;
1530 1.38.4.1 nathanw break;
1531 1.38.4.1 nathanw
1532 1.38.4.1 nathanw case XS_REQUEUE:
1533 1.38.4.1 nathanw error = ERESTART;
1534 1.38.4.1 nathanw break;
1535 1.38.4.1 nathanw
1536 1.2 bouyer case XS_TIMEOUT:
1537 1.38.4.1 nathanw if (xs->xs_retries != 0) {
1538 1.38.4.1 nathanw xs->xs_retries--;
1539 1.38.4.1 nathanw error = ERESTART;
1540 1.38.4.1 nathanw } else
1541 1.38.4.1 nathanw error = EIO;
1542 1.2 bouyer break;
1543 1.2 bouyer
1544 1.2 bouyer case XS_SELTIMEOUT:
1545 1.2 bouyer /* XXX Disable device? */
1546 1.12 thorpej error = EIO;
1547 1.12 thorpej break;
1548 1.12 thorpej
1549 1.12 thorpej case XS_RESET:
1550 1.38.4.1 nathanw if (xs->xs_control & XS_CTL_REQSENSE) {
1551 1.38.4.1 nathanw /*
1552 1.38.4.1 nathanw * request sense interrupted by reset: signal it
1553 1.38.4.1 nathanw * with EINTR return code.
1554 1.38.4.1 nathanw */
1555 1.38.4.1 nathanw error = EINTR;
1556 1.38.4.1 nathanw } else {
1557 1.38.4.1 nathanw if (xs->xs_retries != 0) {
1558 1.38.4.1 nathanw xs->xs_retries--;
1559 1.38.4.1 nathanw error = ERESTART;
1560 1.38.4.1 nathanw } else
1561 1.38.4.1 nathanw error = EIO;
1562 1.12 thorpej }
1563 1.2 bouyer break;
1564 1.2 bouyer
1565 1.2 bouyer default:
1566 1.38.4.1 nathanw scsipi_printaddr(periph);
1567 1.38.4.1 nathanw printf("invalid return code from adapter: %d\n", xs->error);
1568 1.2 bouyer error = EIO;
1569 1.2 bouyer break;
1570 1.2 bouyer }
1571 1.2 bouyer
1572 1.38.4.1 nathanw s = splbio();
1573 1.38.4.1 nathanw if (error == ERESTART) {
1574 1.38.4.1 nathanw /*
1575 1.38.4.1 nathanw * If we get here, the periph has been thawed and frozen
1576 1.38.4.1 nathanw * again if we had to issue recovery commands. Alternatively,
1577 1.38.4.1 nathanw * it may have been frozen again and in a timed thaw. In
1578 1.38.4.1 nathanw * any case, we thaw the periph once we re-enqueue the
1579 1.38.4.1 nathanw * command. Once the periph is fully thawed, it will begin
1580 1.38.4.1 nathanw * operation again.
1581 1.38.4.1 nathanw */
1582 1.38.4.1 nathanw xs->error = XS_NOERROR;
1583 1.38.4.1 nathanw xs->status = SCSI_OK;
1584 1.38.4.1 nathanw xs->xs_status &= ~XS_STS_DONE;
1585 1.38.4.1 nathanw xs->xs_requeuecnt++;
1586 1.38.4.1 nathanw error = scsipi_enqueue(xs);
1587 1.38.4.1 nathanw if (error == 0) {
1588 1.38.4.1 nathanw scsipi_periph_thaw(periph, 1);
1589 1.38.4.1 nathanw splx(s);
1590 1.38.4.1 nathanw return (ERESTART);
1591 1.38.4.1 nathanw }
1592 1.38.4.1 nathanw }
1593 1.38.4.1 nathanw
1594 1.38.4.1 nathanw /*
1595 1.38.4.1 nathanw * scsipi_done() freezes the queue if not XS_NOERROR.
1596 1.38.4.1 nathanw * Thaw it here.
1597 1.38.4.1 nathanw */
1598 1.38.4.1 nathanw if (xs->error != XS_NOERROR)
1599 1.38.4.1 nathanw scsipi_periph_thaw(periph, 1);
1600 1.38.4.1 nathanw
1601 1.38.4.10 nathanw /*
1602 1.38.4.10 nathanw * Set buffer fields in case the periph
1603 1.38.4.10 nathanw * switch done func uses them
1604 1.38.4.10 nathanw */
1605 1.38.4.1 nathanw if ((bp = xs->bp) != NULL) {
1606 1.38.4.1 nathanw if (error) {
1607 1.38.4.1 nathanw bp->b_error = error;
1608 1.38.4.1 nathanw bp->b_flags |= B_ERROR;
1609 1.38.4.1 nathanw bp->b_resid = bp->b_bcount;
1610 1.38.4.1 nathanw } else {
1611 1.38.4.1 nathanw bp->b_error = 0;
1612 1.38.4.1 nathanw bp->b_resid = xs->resid;
1613 1.38.4.8 nathanw }
1614 1.38.4.1 nathanw }
1615 1.38.4.1 nathanw
1616 1.38.4.10 nathanw if (periph->periph_switch->psw_done)
1617 1.38.4.10 nathanw periph->periph_switch->psw_done(xs);
1618 1.38.4.10 nathanw
1619 1.38.4.10 nathanw if (bp)
1620 1.38.4.10 nathanw biodone(bp);
1621 1.38.4.10 nathanw
1622 1.38.4.1 nathanw if (xs->xs_control & XS_CTL_ASYNC)
1623 1.38.4.1 nathanw scsipi_put_xs(xs);
1624 1.38.4.1 nathanw splx(s);
1625 1.38.4.1 nathanw
1626 1.3 enami return (error);
1627 1.2 bouyer }
1628 1.2 bouyer
1629 1.14 thorpej /*
1630 1.38.4.1 nathanw * Issue a request sense for the given scsipi_xfer. Called when the xfer
1631 1.38.4.1 nathanw * returns with a CHECK_CONDITION status. Must be called in valid thread
1632 1.38.4.1 nathanw * context and at splbio().
1633 1.38.4.1 nathanw */
1634 1.38.4.1 nathanw
1635 1.38.4.1 nathanw void
1636 1.38.4.1 nathanw scsipi_request_sense(xs)
1637 1.38.4.1 nathanw struct scsipi_xfer *xs;
1638 1.38.4.1 nathanw {
1639 1.38.4.1 nathanw struct scsipi_periph *periph = xs->xs_periph;
1640 1.38.4.1 nathanw int flags, error;
1641 1.38.4.1 nathanw struct scsipi_sense cmd;
1642 1.38.4.1 nathanw
1643 1.38.4.1 nathanw periph->periph_flags |= PERIPH_SENSE;
1644 1.38.4.1 nathanw
1645 1.38.4.1 nathanw /* if command was polling, request sense will too */
1646 1.38.4.1 nathanw flags = xs->xs_control & XS_CTL_POLL;
1647 1.38.4.1 nathanw /* Polling commands can't sleep */
1648 1.38.4.1 nathanw if (flags)
1649 1.38.4.1 nathanw flags |= XS_CTL_NOSLEEP;
1650 1.38.4.1 nathanw
1651 1.38.4.1 nathanw flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN |
1652 1.38.4.1 nathanw XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH;
1653 1.38.4.1 nathanw
1654 1.38.4.2 nathanw memset(&cmd, 0, sizeof(cmd));
1655 1.38.4.1 nathanw cmd.opcode = REQUEST_SENSE;
1656 1.38.4.1 nathanw cmd.length = sizeof(struct scsipi_sense_data);
1657 1.38.4.1 nathanw
1658 1.38.4.1 nathanw error = scsipi_command(periph,
1659 1.38.4.1 nathanw (struct scsipi_generic *) &cmd, sizeof(cmd),
1660 1.38.4.1 nathanw (u_char*)&xs->sense.scsi_sense, sizeof(struct scsipi_sense_data),
1661 1.38.4.1 nathanw 0, 1000, NULL, flags);
1662 1.38.4.1 nathanw periph->periph_flags &= ~PERIPH_SENSE;
1663 1.38.4.1 nathanw periph->periph_xscheck = NULL;
1664 1.38.4.1 nathanw switch(error) {
1665 1.38.4.1 nathanw case 0:
1666 1.38.4.1 nathanw /* we have a valid sense */
1667 1.38.4.1 nathanw xs->error = XS_SENSE;
1668 1.38.4.1 nathanw return;
1669 1.38.4.1 nathanw case EINTR:
1670 1.38.4.1 nathanw /* REQUEST_SENSE interrupted by bus reset. */
1671 1.38.4.1 nathanw xs->error = XS_RESET;
1672 1.38.4.1 nathanw return;
1673 1.38.4.1 nathanw case EIO:
1674 1.38.4.1 nathanw /* request sense coudn't be performed */
1675 1.38.4.1 nathanw /*
1676 1.38.4.1 nathanw * XXX this isn't quite rigth but we don't have anything
1677 1.38.4.1 nathanw * better for now
1678 1.38.4.1 nathanw */
1679 1.38.4.1 nathanw xs->error = XS_DRIVER_STUFFUP;
1680 1.38.4.1 nathanw return;
1681 1.38.4.1 nathanw default:
1682 1.38.4.1 nathanw /* Notify that request sense failed. */
1683 1.38.4.1 nathanw xs->error = XS_DRIVER_STUFFUP;
1684 1.38.4.1 nathanw scsipi_printaddr(periph);
1685 1.38.4.1 nathanw printf("request sense failed with error %d\n", error);
1686 1.38.4.1 nathanw return;
1687 1.38.4.1 nathanw }
1688 1.38.4.1 nathanw }
1689 1.38.4.1 nathanw
1690 1.38.4.1 nathanw /*
1691 1.38.4.1 nathanw * scsipi_enqueue:
1692 1.38.4.1 nathanw *
1693 1.38.4.1 nathanw * Enqueue an xfer on a channel.
1694 1.14 thorpej */
1695 1.14 thorpej int
1696 1.38.4.1 nathanw scsipi_enqueue(xs)
1697 1.38.4.1 nathanw struct scsipi_xfer *xs;
1698 1.14 thorpej {
1699 1.38.4.1 nathanw struct scsipi_channel *chan = xs->xs_periph->periph_channel;
1700 1.38.4.1 nathanw struct scsipi_xfer *qxs;
1701 1.38.4.1 nathanw int s;
1702 1.14 thorpej
1703 1.14 thorpej s = splbio();
1704 1.38.4.1 nathanw
1705 1.38.4.1 nathanw /*
1706 1.38.4.1 nathanw * If the xfer is to be polled, and there are already jobs on
1707 1.38.4.1 nathanw * the queue, we can't proceed.
1708 1.38.4.1 nathanw */
1709 1.38.4.1 nathanw if ((xs->xs_control & XS_CTL_POLL) != 0 &&
1710 1.38.4.1 nathanw TAILQ_FIRST(&chan->chan_queue) != NULL) {
1711 1.38.4.1 nathanw splx(s);
1712 1.38.4.1 nathanw xs->error = XS_DRIVER_STUFFUP;
1713 1.38.4.1 nathanw return (EAGAIN);
1714 1.38.4.1 nathanw }
1715 1.38.4.1 nathanw
1716 1.38.4.1 nathanw /*
1717 1.38.4.1 nathanw * If we have an URGENT xfer, it's an error recovery command
1718 1.38.4.1 nathanw * and it should just go on the head of the channel's queue.
1719 1.38.4.1 nathanw */
1720 1.38.4.1 nathanw if (xs->xs_control & XS_CTL_URGENT) {
1721 1.38.4.1 nathanw TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
1722 1.38.4.1 nathanw goto out;
1723 1.38.4.1 nathanw }
1724 1.38.4.1 nathanw
1725 1.38.4.1 nathanw /*
1726 1.38.4.1 nathanw * If this xfer has already been on the queue before, we
1727 1.38.4.1 nathanw * need to reinsert it in the correct order. That order is:
1728 1.38.4.1 nathanw *
1729 1.38.4.1 nathanw * Immediately before the first xfer for this periph
1730 1.38.4.1 nathanw * with a requeuecnt less than xs->xs_requeuecnt.
1731 1.38.4.1 nathanw *
1732 1.38.4.1 nathanw * Failing that, at the end of the queue. (We'll end up
1733 1.38.4.1 nathanw * there naturally.)
1734 1.38.4.1 nathanw */
1735 1.38.4.1 nathanw if (xs->xs_requeuecnt != 0) {
1736 1.38.4.1 nathanw for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
1737 1.38.4.1 nathanw qxs = TAILQ_NEXT(qxs, channel_q)) {
1738 1.38.4.1 nathanw if (qxs->xs_periph == xs->xs_periph &&
1739 1.38.4.1 nathanw qxs->xs_requeuecnt < xs->xs_requeuecnt)
1740 1.38.4.1 nathanw break;
1741 1.38.4.1 nathanw }
1742 1.38.4.1 nathanw if (qxs != NULL) {
1743 1.38.4.1 nathanw TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
1744 1.38.4.1 nathanw channel_q);
1745 1.38.4.1 nathanw goto out;
1746 1.38.4.1 nathanw }
1747 1.14 thorpej }
1748 1.38.4.1 nathanw TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
1749 1.38.4.1 nathanw out:
1750 1.38.4.1 nathanw if (xs->xs_control & XS_CTL_THAW_PERIPH)
1751 1.38.4.1 nathanw scsipi_periph_thaw(xs->xs_periph, 1);
1752 1.14 thorpej splx(s);
1753 1.38.4.1 nathanw return (0);
1754 1.14 thorpej }
1755 1.14 thorpej
1756 1.14 thorpej /*
1757 1.38.4.1 nathanw * scsipi_run_queue:
1758 1.38.4.1 nathanw *
1759 1.38.4.1 nathanw * Start as many xfers as possible running on the channel.
1760 1.14 thorpej */
1761 1.14 thorpej void
1762 1.38.4.1 nathanw scsipi_run_queue(chan)
1763 1.38.4.1 nathanw struct scsipi_channel *chan;
1764 1.14 thorpej {
1765 1.38.4.1 nathanw struct scsipi_xfer *xs;
1766 1.38.4.1 nathanw struct scsipi_periph *periph;
1767 1.14 thorpej int s;
1768 1.14 thorpej
1769 1.38.4.1 nathanw for (;;) {
1770 1.38.4.1 nathanw s = splbio();
1771 1.38.4.1 nathanw
1772 1.38.4.1 nathanw /*
1773 1.38.4.1 nathanw * If the channel is frozen, we can't do any work right
1774 1.38.4.1 nathanw * now.
1775 1.38.4.1 nathanw */
1776 1.38.4.1 nathanw if (chan->chan_qfreeze != 0) {
1777 1.38.4.1 nathanw splx(s);
1778 1.38.4.1 nathanw return;
1779 1.38.4.1 nathanw }
1780 1.38.4.1 nathanw
1781 1.38.4.1 nathanw /*
1782 1.38.4.1 nathanw * Look for work to do, and make sure we can do it.
1783 1.38.4.1 nathanw */
1784 1.38.4.1 nathanw for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
1785 1.38.4.1 nathanw xs = TAILQ_NEXT(xs, channel_q)) {
1786 1.38.4.1 nathanw periph = xs->xs_periph;
1787 1.38.4.1 nathanw
1788 1.38.4.1 nathanw if ((periph->periph_sent >= periph->periph_openings) ||
1789 1.38.4.1 nathanw periph->periph_qfreeze != 0 ||
1790 1.38.4.1 nathanw (periph->periph_flags & PERIPH_UNTAG) != 0)
1791 1.38.4.1 nathanw continue;
1792 1.38.4.1 nathanw
1793 1.38.4.1 nathanw if ((periph->periph_flags &
1794 1.38.4.1 nathanw (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 &&
1795 1.38.4.1 nathanw (xs->xs_control & XS_CTL_URGENT) == 0)
1796 1.38.4.1 nathanw continue;
1797 1.38.4.1 nathanw
1798 1.38.4.1 nathanw /*
1799 1.38.4.1 nathanw * We can issue this xfer!
1800 1.38.4.1 nathanw */
1801 1.38.4.1 nathanw goto got_one;
1802 1.38.4.1 nathanw }
1803 1.38.4.1 nathanw
1804 1.38.4.1 nathanw /*
1805 1.38.4.1 nathanw * Can't find any work to do right now.
1806 1.38.4.1 nathanw */
1807 1.38.4.1 nathanw splx(s);
1808 1.38.4.1 nathanw return;
1809 1.38.4.1 nathanw
1810 1.38.4.1 nathanw got_one:
1811 1.38.4.1 nathanw /*
1812 1.38.4.1 nathanw * Have an xfer to run. Allocate a resource from
1813 1.38.4.1 nathanw * the adapter to run it. If we can't allocate that
1814 1.38.4.1 nathanw * resource, we don't dequeue the xfer.
1815 1.38.4.1 nathanw */
1816 1.38.4.1 nathanw if (scsipi_get_resource(chan) == 0) {
1817 1.38.4.1 nathanw /*
1818 1.38.4.1 nathanw * Adapter is out of resources. If the adapter
1819 1.38.4.1 nathanw * supports it, attempt to grow them.
1820 1.38.4.1 nathanw */
1821 1.38.4.1 nathanw if (scsipi_grow_resources(chan) == 0) {
1822 1.38.4.1 nathanw /*
1823 1.38.4.1 nathanw * Wasn't able to grow resources,
1824 1.38.4.1 nathanw * nothing more we can do.
1825 1.38.4.1 nathanw */
1826 1.38.4.1 nathanw if (xs->xs_control & XS_CTL_POLL) {
1827 1.38.4.1 nathanw scsipi_printaddr(xs->xs_periph);
1828 1.38.4.1 nathanw printf("polling command but no "
1829 1.38.4.1 nathanw "adapter resources");
1830 1.38.4.1 nathanw /* We'll panic shortly... */
1831 1.38.4.1 nathanw }
1832 1.38.4.1 nathanw splx(s);
1833 1.38.4.1 nathanw
1834 1.38.4.1 nathanw /*
1835 1.38.4.1 nathanw * XXX: We should be able to note that
1836 1.38.4.1 nathanw * XXX: that resources are needed here!
1837 1.38.4.1 nathanw */
1838 1.38.4.1 nathanw return;
1839 1.38.4.1 nathanw }
1840 1.38.4.1 nathanw /*
1841 1.38.4.1 nathanw * scsipi_grow_resources() allocated the resource
1842 1.38.4.1 nathanw * for us.
1843 1.38.4.1 nathanw */
1844 1.38.4.1 nathanw }
1845 1.38.4.1 nathanw
1846 1.38.4.1 nathanw /*
1847 1.38.4.1 nathanw * We have a resource to run this xfer, do it!
1848 1.38.4.1 nathanw */
1849 1.38.4.1 nathanw TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
1850 1.38.4.1 nathanw
1851 1.38.4.1 nathanw /*
1852 1.38.4.1 nathanw * If the command is to be tagged, allocate a tag ID
1853 1.38.4.1 nathanw * for it.
1854 1.38.4.1 nathanw */
1855 1.38.4.1 nathanw if (XS_CTL_TAGTYPE(xs) != 0)
1856 1.38.4.1 nathanw scsipi_get_tag(xs);
1857 1.38.4.1 nathanw else
1858 1.38.4.1 nathanw periph->periph_flags |= PERIPH_UNTAG;
1859 1.38.4.1 nathanw periph->periph_sent++;
1860 1.38.4.1 nathanw splx(s);
1861 1.38.4.1 nathanw
1862 1.38.4.1 nathanw scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
1863 1.38.4.1 nathanw }
1864 1.38.4.1 nathanw #ifdef DIAGNOSTIC
1865 1.38.4.1 nathanw panic("scsipi_run_queue: impossible");
1866 1.38.4.1 nathanw #endif
1867 1.38.4.1 nathanw }
1868 1.38.4.1 nathanw
1869 1.38.4.1 nathanw /*
1870 1.38.4.1 nathanw * scsipi_execute_xs:
1871 1.38.4.1 nathanw *
1872 1.38.4.1 nathanw * Begin execution of an xfer, waiting for it to complete, if necessary.
1873 1.38.4.1 nathanw */
1874 1.38.4.1 nathanw int
1875 1.38.4.1 nathanw scsipi_execute_xs(xs)
1876 1.38.4.1 nathanw struct scsipi_xfer *xs;
1877 1.38.4.1 nathanw {
1878 1.38.4.1 nathanw struct scsipi_periph *periph = xs->xs_periph;
1879 1.38.4.1 nathanw struct scsipi_channel *chan = periph->periph_channel;
1880 1.38.4.9 nathanw int oasync, async, poll, retries, error, s;
1881 1.38.4.1 nathanw
1882 1.38.4.1 nathanw xs->xs_status &= ~XS_STS_DONE;
1883 1.38.4.1 nathanw xs->error = XS_NOERROR;
1884 1.38.4.1 nathanw xs->resid = xs->datalen;
1885 1.38.4.1 nathanw xs->status = SCSI_OK;
1886 1.38.4.1 nathanw
1887 1.38.4.1 nathanw #ifdef SCSIPI_DEBUG
1888 1.38.4.1 nathanw if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) {
1889 1.38.4.1 nathanw printf("scsipi_execute_xs: ");
1890 1.38.4.1 nathanw show_scsipi_xs(xs);
1891 1.38.4.1 nathanw printf("\n");
1892 1.38.4.1 nathanw }
1893 1.38.4.1 nathanw #endif
1894 1.38.4.1 nathanw
1895 1.38.4.1 nathanw /*
1896 1.38.4.1 nathanw * Deal with command tagging:
1897 1.38.4.1 nathanw *
1898 1.38.4.1 nathanw * - If the device's current operating mode doesn't
1899 1.38.4.1 nathanw * include tagged queueing, clear the tag mask.
1900 1.38.4.1 nathanw *
1901 1.38.4.1 nathanw * - If the device's current operating mode *does*
1902 1.38.4.1 nathanw * include tagged queueing, set the tag_type in
1903 1.38.4.1 nathanw * the xfer to the appropriate byte for the tag
1904 1.38.4.1 nathanw * message.
1905 1.38.4.1 nathanw */
1906 1.38.4.1 nathanw if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 ||
1907 1.38.4.1 nathanw (xs->xs_control & XS_CTL_REQSENSE)) {
1908 1.38.4.1 nathanw xs->xs_control &= ~XS_CTL_TAGMASK;
1909 1.38.4.1 nathanw xs->xs_tag_type = 0;
1910 1.38.4.1 nathanw } else {
1911 1.38.4.1 nathanw /*
1912 1.38.4.1 nathanw * If the request doesn't specify a tag, give Head
1913 1.38.4.1 nathanw * tags to URGENT operations and Ordered tags to
1914 1.38.4.1 nathanw * everything else.
1915 1.38.4.1 nathanw */
1916 1.38.4.1 nathanw if (XS_CTL_TAGTYPE(xs) == 0) {
1917 1.38.4.1 nathanw if (xs->xs_control & XS_CTL_URGENT)
1918 1.38.4.1 nathanw xs->xs_control |= XS_CTL_HEAD_TAG;
1919 1.38.4.1 nathanw else
1920 1.38.4.1 nathanw xs->xs_control |= XS_CTL_ORDERED_TAG;
1921 1.38.4.1 nathanw }
1922 1.38.4.1 nathanw
1923 1.38.4.1 nathanw switch (XS_CTL_TAGTYPE(xs)) {
1924 1.38.4.1 nathanw case XS_CTL_ORDERED_TAG:
1925 1.38.4.1 nathanw xs->xs_tag_type = MSG_ORDERED_Q_TAG;
1926 1.38.4.1 nathanw break;
1927 1.38.4.1 nathanw
1928 1.38.4.1 nathanw case XS_CTL_SIMPLE_TAG:
1929 1.38.4.1 nathanw xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
1930 1.38.4.1 nathanw break;
1931 1.38.4.1 nathanw
1932 1.38.4.1 nathanw case XS_CTL_HEAD_TAG:
1933 1.38.4.1 nathanw xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
1934 1.38.4.1 nathanw break;
1935 1.38.4.1 nathanw
1936 1.38.4.1 nathanw default:
1937 1.38.4.1 nathanw scsipi_printaddr(periph);
1938 1.38.4.1 nathanw printf("invalid tag mask 0x%08x\n",
1939 1.38.4.1 nathanw XS_CTL_TAGTYPE(xs));
1940 1.38.4.1 nathanw panic("scsipi_execute_xs");
1941 1.38.4.1 nathanw }
1942 1.38.4.1 nathanw }
1943 1.38.4.1 nathanw
1944 1.38.4.1 nathanw /* If the adaptor wants us to poll, poll. */
1945 1.38.4.1 nathanw if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY)
1946 1.38.4.1 nathanw xs->xs_control |= XS_CTL_POLL;
1947 1.38.4.1 nathanw
1948 1.38.4.1 nathanw /*
1949 1.38.4.1 nathanw * If we don't yet have a completion thread, or we are to poll for
1950 1.38.4.1 nathanw * completion, clear the ASYNC flag.
1951 1.38.4.1 nathanw */
1952 1.38.4.9 nathanw oasync = (xs->xs_control & XS_CTL_ASYNC);
1953 1.38.4.1 nathanw if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
1954 1.38.4.1 nathanw xs->xs_control &= ~XS_CTL_ASYNC;
1955 1.38.4.1 nathanw
1956 1.38.4.1 nathanw async = (xs->xs_control & XS_CTL_ASYNC);
1957 1.38.4.1 nathanw poll = (xs->xs_control & XS_CTL_POLL);
1958 1.38.4.1 nathanw retries = xs->xs_retries; /* for polling commands */
1959 1.38.4.1 nathanw
1960 1.38.4.1 nathanw #ifdef DIAGNOSTIC
1961 1.38.4.9 nathanw if (oasync != 0 && xs->bp == NULL)
1962 1.38.4.1 nathanw panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
1963 1.38.4.1 nathanw #endif
1964 1.38.4.1 nathanw
1965 1.38.4.1 nathanw /*
1966 1.38.4.1 nathanw * Enqueue the transfer. If we're not polling for completion, this
1967 1.38.4.1 nathanw * should ALWAYS return `no error'.
1968 1.38.4.1 nathanw */
1969 1.38.4.1 nathanw try_again:
1970 1.38.4.1 nathanw error = scsipi_enqueue(xs);
1971 1.38.4.1 nathanw if (error) {
1972 1.38.4.1 nathanw if (poll == 0) {
1973 1.38.4.1 nathanw scsipi_printaddr(periph);
1974 1.38.4.1 nathanw printf("not polling, but enqueue failed with %d\n",
1975 1.38.4.1 nathanw error);
1976 1.38.4.1 nathanw panic("scsipi_execute_xs");
1977 1.38.4.1 nathanw }
1978 1.38.4.1 nathanw
1979 1.38.4.1 nathanw scsipi_printaddr(periph);
1980 1.38.4.1 nathanw printf("failed to enqueue polling command");
1981 1.38.4.1 nathanw if (retries != 0) {
1982 1.38.4.1 nathanw printf(", retrying...\n");
1983 1.38.4.1 nathanw delay(1000000);
1984 1.38.4.1 nathanw retries--;
1985 1.38.4.1 nathanw goto try_again;
1986 1.38.4.1 nathanw }
1987 1.38.4.1 nathanw printf("\n");
1988 1.38.4.1 nathanw goto free_xs;
1989 1.38.4.1 nathanw }
1990 1.38.4.1 nathanw
1991 1.38.4.1 nathanw restarted:
1992 1.38.4.1 nathanw scsipi_run_queue(chan);
1993 1.38.4.1 nathanw
1994 1.38.4.1 nathanw /*
1995 1.38.4.1 nathanw * The xfer is enqueued, and possibly running. If it's to be
1996 1.38.4.1 nathanw * completed asynchronously, just return now.
1997 1.38.4.1 nathanw */
1998 1.38.4.1 nathanw if (async)
1999 1.38.4.1 nathanw return (EJUSTRETURN);
2000 1.38.4.1 nathanw
2001 1.38.4.1 nathanw /*
2002 1.38.4.1 nathanw * Not an asynchronous command; wait for it to complete.
2003 1.38.4.1 nathanw */
2004 1.38.4.1 nathanw s = splbio();
2005 1.38.4.1 nathanw while ((xs->xs_status & XS_STS_DONE) == 0) {
2006 1.38.4.1 nathanw if (poll) {
2007 1.38.4.1 nathanw scsipi_printaddr(periph);
2008 1.38.4.1 nathanw printf("polling command not done\n");
2009 1.38.4.1 nathanw panic("scsipi_execute_xs");
2010 1.38.4.1 nathanw }
2011 1.38.4.1 nathanw (void) tsleep(xs, PRIBIO, "xscmd", 0);
2012 1.38.4.1 nathanw }
2013 1.38.4.1 nathanw splx(s);
2014 1.38.4.1 nathanw
2015 1.38.4.1 nathanw /*
2016 1.38.4.1 nathanw * Command is complete. scsipi_done() has awakened us to perform
2017 1.38.4.1 nathanw * the error handling.
2018 1.38.4.1 nathanw */
2019 1.38.4.1 nathanw error = scsipi_complete(xs);
2020 1.38.4.1 nathanw if (error == ERESTART)
2021 1.38.4.1 nathanw goto restarted;
2022 1.38.4.1 nathanw
2023 1.38.4.9 nathanw /*
2024 1.38.4.9 nathanw * If it was meant to run async and we cleared aync ourselve,
2025 1.38.4.9 nathanw * don't return an error here. It has already been handled
2026 1.38.4.9 nathanw */
2027 1.38.4.9 nathanw if (oasync)
2028 1.38.4.9 nathanw error = EJUSTRETURN;
2029 1.38.4.1 nathanw /*
2030 1.38.4.1 nathanw * Command completed successfully or fatal error occurred. Fall
2031 1.38.4.1 nathanw * into....
2032 1.38.4.1 nathanw */
2033 1.38.4.1 nathanw free_xs:
2034 1.38.4.1 nathanw s = splbio();
2035 1.38.4.1 nathanw scsipi_put_xs(xs);
2036 1.38.4.1 nathanw splx(s);
2037 1.38.4.1 nathanw
2038 1.38.4.1 nathanw /*
2039 1.38.4.1 nathanw * Kick the queue, keep it running in case it stopped for some
2040 1.38.4.1 nathanw * reason.
2041 1.38.4.1 nathanw */
2042 1.38.4.1 nathanw scsipi_run_queue(chan);
2043 1.38.4.1 nathanw
2044 1.38.4.1 nathanw return (error);
2045 1.38.4.1 nathanw }
2046 1.38.4.1 nathanw
2047 1.38.4.1 nathanw /*
2048 1.38.4.1 nathanw * scsipi_completion_thread:
2049 1.38.4.1 nathanw *
2050 1.38.4.1 nathanw * This is the completion thread. We wait for errors on
2051 1.38.4.1 nathanw * asynchronous xfers, and perform the error handling
2052 1.38.4.1 nathanw * function, restarting the command, if necessary.
2053 1.38.4.1 nathanw */
2054 1.38.4.1 nathanw void
2055 1.38.4.1 nathanw scsipi_completion_thread(arg)
2056 1.38.4.1 nathanw void *arg;
2057 1.38.4.1 nathanw {
2058 1.38.4.1 nathanw struct scsipi_channel *chan = arg;
2059 1.38.4.1 nathanw struct scsipi_xfer *xs;
2060 1.38.4.1 nathanw int s;
2061 1.38.4.1 nathanw
2062 1.38.4.4 nathanw s = splbio();
2063 1.38.4.4 nathanw chan->chan_flags |= SCSIPI_CHAN_TACTIVE;
2064 1.38.4.4 nathanw splx(s);
2065 1.38.4.1 nathanw for (;;) {
2066 1.38.4.1 nathanw s = splbio();
2067 1.38.4.1 nathanw xs = TAILQ_FIRST(&chan->chan_complete);
2068 1.38.4.5 nathanw if (xs == NULL && chan->chan_tflags == 0) {
2069 1.38.4.5 nathanw /* nothing to do; wait */
2070 1.38.4.1 nathanw (void) tsleep(&chan->chan_complete, PRIBIO,
2071 1.38.4.1 nathanw "sccomp", 0);
2072 1.38.4.1 nathanw splx(s);
2073 1.38.4.1 nathanw continue;
2074 1.38.4.1 nathanw }
2075 1.38.4.5 nathanw if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2076 1.38.4.2 nathanw /* call chan_callback from thread context */
2077 1.38.4.5 nathanw chan->chan_tflags &= ~SCSIPI_CHANT_CALLBACK;
2078 1.38.4.2 nathanw chan->chan_callback(chan, chan->chan_callback_arg);
2079 1.38.4.4 nathanw splx(s);
2080 1.38.4.4 nathanw continue;
2081 1.38.4.4 nathanw }
2082 1.38.4.5 nathanw if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) {
2083 1.38.4.5 nathanw /* attempt to get more openings for this channel */
2084 1.38.4.5 nathanw chan->chan_tflags &= ~SCSIPI_CHANT_GROWRES;
2085 1.38.4.5 nathanw scsipi_adapter_request(chan,
2086 1.38.4.5 nathanw ADAPTER_REQ_GROW_RESOURCES, NULL);
2087 1.38.4.5 nathanw scsipi_channel_thaw(chan, 1);
2088 1.38.4.5 nathanw splx(s);
2089 1.38.4.5 nathanw continue;
2090 1.38.4.5 nathanw }
2091 1.38.4.5 nathanw if (chan->chan_tflags & SCSIPI_CHANT_KICK) {
2092 1.38.4.4 nathanw /* explicitly run the queues for this channel */
2093 1.38.4.5 nathanw chan->chan_tflags &= ~SCSIPI_CHANT_KICK;
2094 1.38.4.4 nathanw scsipi_run_queue(chan);
2095 1.38.4.2 nathanw splx(s);
2096 1.38.4.2 nathanw continue;
2097 1.38.4.2 nathanw }
2098 1.38.4.5 nathanw if (chan->chan_tflags & SCSIPI_CHANT_SHUTDOWN) {
2099 1.38.4.1 nathanw splx(s);
2100 1.38.4.1 nathanw break;
2101 1.38.4.1 nathanw }
2102 1.38.4.2 nathanw if (xs) {
2103 1.38.4.2 nathanw TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
2104 1.38.4.2 nathanw splx(s);
2105 1.38.4.1 nathanw
2106 1.38.4.2 nathanw /*
2107 1.38.4.2 nathanw * Have an xfer with an error; process it.
2108 1.38.4.2 nathanw */
2109 1.38.4.2 nathanw (void) scsipi_complete(xs);
2110 1.38.4.1 nathanw
2111 1.38.4.2 nathanw /*
2112 1.38.4.2 nathanw * Kick the queue; keep it running if it was stopped
2113 1.38.4.2 nathanw * for some reason.
2114 1.38.4.2 nathanw */
2115 1.38.4.2 nathanw scsipi_run_queue(chan);
2116 1.38.4.2 nathanw } else {
2117 1.38.4.2 nathanw splx(s);
2118 1.38.4.2 nathanw }
2119 1.38.4.1 nathanw }
2120 1.38.4.1 nathanw
2121 1.38.4.1 nathanw chan->chan_thread = NULL;
2122 1.38.4.1 nathanw
2123 1.38.4.1 nathanw /* In case parent is waiting for us to exit. */
2124 1.38.4.1 nathanw wakeup(&chan->chan_thread);
2125 1.38.4.1 nathanw
2126 1.38.4.1 nathanw kthread_exit(0);
2127 1.38.4.1 nathanw }
2128 1.38.4.1 nathanw
2129 1.38.4.1 nathanw /*
2130 1.38.4.1 nathanw * scsipi_create_completion_thread:
2131 1.38.4.1 nathanw *
2132 1.38.4.1 nathanw * Callback to actually create the completion thread.
2133 1.38.4.1 nathanw */
2134 1.38.4.1 nathanw void
2135 1.38.4.1 nathanw scsipi_create_completion_thread(arg)
2136 1.38.4.1 nathanw void *arg;
2137 1.38.4.1 nathanw {
2138 1.38.4.1 nathanw struct scsipi_channel *chan = arg;
2139 1.38.4.1 nathanw struct scsipi_adapter *adapt = chan->chan_adapter;
2140 1.38.4.1 nathanw
2141 1.38.4.1 nathanw if (kthread_create1(scsipi_completion_thread, chan,
2142 1.38.4.10 nathanw &chan->chan_thread, "%s", chan->chan_name)) {
2143 1.38.4.1 nathanw printf("%s: unable to create completion thread for "
2144 1.38.4.1 nathanw "channel %d\n", adapt->adapt_dev->dv_xname,
2145 1.38.4.1 nathanw chan->chan_channel);
2146 1.38.4.1 nathanw panic("scsipi_create_completion_thread");
2147 1.38.4.1 nathanw }
2148 1.38.4.1 nathanw }
2149 1.38.4.1 nathanw
2150 1.38.4.1 nathanw /*
2151 1.38.4.2 nathanw * scsipi_thread_call_callback:
2152 1.38.4.2 nathanw *
2153 1.38.4.2 nathanw * request to call a callback from the completion thread
2154 1.38.4.2 nathanw */
2155 1.38.4.2 nathanw int
2156 1.38.4.2 nathanw scsipi_thread_call_callback(chan, callback, arg)
2157 1.38.4.2 nathanw struct scsipi_channel *chan;
2158 1.38.4.2 nathanw void (*callback) __P((struct scsipi_channel *, void *));
2159 1.38.4.2 nathanw void *arg;
2160 1.38.4.2 nathanw {
2161 1.38.4.2 nathanw int s;
2162 1.38.4.2 nathanw
2163 1.38.4.2 nathanw s = splbio();
2164 1.38.4.5 nathanw if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
2165 1.38.4.5 nathanw /* kernel thread doesn't exist yet */
2166 1.38.4.5 nathanw splx(s);
2167 1.38.4.5 nathanw return ESRCH;
2168 1.38.4.5 nathanw }
2169 1.38.4.5 nathanw if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2170 1.38.4.2 nathanw splx(s);
2171 1.38.4.2 nathanw return EBUSY;
2172 1.38.4.2 nathanw }
2173 1.38.4.2 nathanw scsipi_channel_freeze(chan, 1);
2174 1.38.4.2 nathanw chan->chan_callback = callback;
2175 1.38.4.2 nathanw chan->chan_callback_arg = arg;
2176 1.38.4.5 nathanw chan->chan_tflags |= SCSIPI_CHANT_CALLBACK;
2177 1.38.4.2 nathanw wakeup(&chan->chan_complete);
2178 1.38.4.2 nathanw splx(s);
2179 1.38.4.2 nathanw return(0);
2180 1.38.4.2 nathanw }
2181 1.38.4.2 nathanw
2182 1.38.4.2 nathanw /*
2183 1.38.4.1 nathanw * scsipi_async_event:
2184 1.38.4.1 nathanw *
2185 1.38.4.1 nathanw * Handle an asynchronous event from an adapter.
2186 1.38.4.1 nathanw */
2187 1.38.4.1 nathanw void
2188 1.38.4.1 nathanw scsipi_async_event(chan, event, arg)
2189 1.38.4.1 nathanw struct scsipi_channel *chan;
2190 1.38.4.1 nathanw scsipi_async_event_t event;
2191 1.38.4.1 nathanw void *arg;
2192 1.38.4.1 nathanw {
2193 1.38.4.1 nathanw int s;
2194 1.38.4.1 nathanw
2195 1.38.4.1 nathanw s = splbio();
2196 1.38.4.1 nathanw switch (event) {
2197 1.38.4.1 nathanw case ASYNC_EVENT_MAX_OPENINGS:
2198 1.38.4.1 nathanw scsipi_async_event_max_openings(chan,
2199 1.38.4.1 nathanw (struct scsipi_max_openings *)arg);
2200 1.38.4.1 nathanw break;
2201 1.38.4.1 nathanw
2202 1.38.4.1 nathanw case ASYNC_EVENT_XFER_MODE:
2203 1.38.4.1 nathanw scsipi_async_event_xfer_mode(chan,
2204 1.38.4.1 nathanw (struct scsipi_xfer_mode *)arg);
2205 1.38.4.1 nathanw break;
2206 1.38.4.1 nathanw case ASYNC_EVENT_RESET:
2207 1.38.4.1 nathanw scsipi_async_event_channel_reset(chan);
2208 1.38.4.1 nathanw break;
2209 1.38.4.1 nathanw }
2210 1.38.4.1 nathanw splx(s);
2211 1.38.4.1 nathanw }
2212 1.38.4.1 nathanw
2213 1.38.4.1 nathanw /*
2214 1.38.4.1 nathanw * scsipi_print_xfer_mode:
2215 1.38.4.1 nathanw *
2216 1.38.4.1 nathanw * Print a periph's capabilities.
2217 1.38.4.1 nathanw */
2218 1.38.4.1 nathanw void
2219 1.38.4.1 nathanw scsipi_print_xfer_mode(periph)
2220 1.38.4.1 nathanw struct scsipi_periph *periph;
2221 1.38.4.1 nathanw {
2222 1.38.4.1 nathanw int period, freq, speed, mbs;
2223 1.38.4.1 nathanw
2224 1.38.4.1 nathanw if ((periph->periph_flags & PERIPH_MODE_VALID) == 0)
2225 1.38.4.1 nathanw return;
2226 1.38.4.1 nathanw
2227 1.38.4.1 nathanw printf("%s: ", periph->periph_dev->dv_xname);
2228 1.38.4.1 nathanw if (periph->periph_mode & PERIPH_CAP_SYNC) {
2229 1.38.4.1 nathanw period = scsipi_sync_factor_to_period(periph->periph_period);
2230 1.38.4.1 nathanw printf("sync (%d.%dns offset %d)",
2231 1.38.4.1 nathanw period / 10, period % 10, periph->periph_offset);
2232 1.38.4.1 nathanw } else
2233 1.38.4.1 nathanw printf("async");
2234 1.38.4.1 nathanw
2235 1.38.4.1 nathanw if (periph->periph_mode & PERIPH_CAP_WIDE32)
2236 1.38.4.1 nathanw printf(", 32-bit");
2237 1.38.4.1 nathanw else if (periph->periph_mode & PERIPH_CAP_WIDE16)
2238 1.38.4.1 nathanw printf(", 16-bit");
2239 1.38.4.1 nathanw else
2240 1.38.4.1 nathanw printf(", 8-bit");
2241 1.38.4.1 nathanw
2242 1.38.4.1 nathanw if (periph->periph_mode & PERIPH_CAP_SYNC) {
2243 1.38.4.1 nathanw freq = scsipi_sync_factor_to_freq(periph->periph_period);
2244 1.38.4.1 nathanw speed = freq;
2245 1.38.4.1 nathanw if (periph->periph_mode & PERIPH_CAP_WIDE32)
2246 1.38.4.1 nathanw speed *= 4;
2247 1.38.4.1 nathanw else if (periph->periph_mode & PERIPH_CAP_WIDE16)
2248 1.38.4.1 nathanw speed *= 2;
2249 1.38.4.1 nathanw mbs = speed / 1000;
2250 1.38.4.1 nathanw if (mbs > 0)
2251 1.38.4.1 nathanw printf(" (%d.%03dMB/s)", mbs, speed % 1000);
2252 1.38.4.1 nathanw else
2253 1.38.4.1 nathanw printf(" (%dKB/s)", speed % 1000);
2254 1.38.4.1 nathanw }
2255 1.38.4.1 nathanw
2256 1.38.4.1 nathanw printf(" transfers");
2257 1.38.4.1 nathanw
2258 1.38.4.1 nathanw if (periph->periph_mode & PERIPH_CAP_TQING)
2259 1.38.4.1 nathanw printf(", tagged queueing");
2260 1.38.4.1 nathanw
2261 1.38.4.1 nathanw printf("\n");
2262 1.38.4.1 nathanw }
2263 1.38.4.1 nathanw
2264 1.38.4.1 nathanw /*
2265 1.38.4.1 nathanw * scsipi_async_event_max_openings:
2266 1.38.4.1 nathanw *
2267 1.38.4.1 nathanw * Update the maximum number of outstanding commands a
2268 1.38.4.1 nathanw * device may have.
2269 1.38.4.1 nathanw */
2270 1.38.4.1 nathanw void
2271 1.38.4.1 nathanw scsipi_async_event_max_openings(chan, mo)
2272 1.38.4.1 nathanw struct scsipi_channel *chan;
2273 1.38.4.1 nathanw struct scsipi_max_openings *mo;
2274 1.38.4.1 nathanw {
2275 1.38.4.1 nathanw struct scsipi_periph *periph;
2276 1.38.4.1 nathanw int minlun, maxlun;
2277 1.38.4.1 nathanw
2278 1.38.4.1 nathanw if (mo->mo_lun == -1) {
2279 1.38.4.1 nathanw /*
2280 1.38.4.1 nathanw * Wildcarded; apply it to all LUNs.
2281 1.38.4.1 nathanw */
2282 1.38.4.1 nathanw minlun = 0;
2283 1.38.4.1 nathanw maxlun = chan->chan_nluns - 1;
2284 1.38.4.1 nathanw } else
2285 1.38.4.1 nathanw minlun = maxlun = mo->mo_lun;
2286 1.38.4.1 nathanw
2287 1.38.4.1 nathanw for (; minlun <= maxlun; minlun++) {
2288 1.38.4.1 nathanw periph = scsipi_lookup_periph(chan, mo->mo_target, minlun);
2289 1.38.4.1 nathanw if (periph == NULL)
2290 1.38.4.1 nathanw continue;
2291 1.38.4.1 nathanw
2292 1.38.4.1 nathanw if (mo->mo_openings < periph->periph_openings)
2293 1.38.4.1 nathanw periph->periph_openings = mo->mo_openings;
2294 1.38.4.1 nathanw else if (mo->mo_openings > periph->periph_openings &&
2295 1.38.4.1 nathanw (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
2296 1.38.4.1 nathanw periph->periph_openings = mo->mo_openings;
2297 1.38.4.1 nathanw }
2298 1.38.4.1 nathanw }
2299 1.38.4.1 nathanw
2300 1.38.4.1 nathanw /*
2301 1.38.4.1 nathanw * scsipi_async_event_xfer_mode:
2302 1.38.4.1 nathanw *
2303 1.38.4.1 nathanw * Update the xfer mode for all periphs sharing the
2304 1.38.4.1 nathanw * specified I_T Nexus.
2305 1.38.4.1 nathanw */
2306 1.38.4.1 nathanw void
2307 1.38.4.1 nathanw scsipi_async_event_xfer_mode(chan, xm)
2308 1.38.4.1 nathanw struct scsipi_channel *chan;
2309 1.38.4.1 nathanw struct scsipi_xfer_mode *xm;
2310 1.38.4.1 nathanw {
2311 1.38.4.1 nathanw struct scsipi_periph *periph;
2312 1.38.4.1 nathanw int lun, announce, mode, period, offset;
2313 1.38.4.1 nathanw
2314 1.38.4.1 nathanw for (lun = 0; lun < chan->chan_nluns; lun++) {
2315 1.38.4.1 nathanw periph = scsipi_lookup_periph(chan, xm->xm_target, lun);
2316 1.38.4.1 nathanw if (periph == NULL)
2317 1.38.4.1 nathanw continue;
2318 1.38.4.1 nathanw announce = 0;
2319 1.38.4.1 nathanw
2320 1.38.4.1 nathanw /*
2321 1.38.4.1 nathanw * Clamp the xfer mode down to this periph's capabilities.
2322 1.38.4.1 nathanw */
2323 1.38.4.1 nathanw mode = xm->xm_mode & periph->periph_cap;
2324 1.38.4.1 nathanw if (mode & PERIPH_CAP_SYNC) {
2325 1.38.4.1 nathanw period = xm->xm_period;
2326 1.38.4.1 nathanw offset = xm->xm_offset;
2327 1.38.4.1 nathanw } else {
2328 1.38.4.1 nathanw period = 0;
2329 1.38.4.1 nathanw offset = 0;
2330 1.38.4.1 nathanw }
2331 1.38.4.1 nathanw
2332 1.38.4.1 nathanw /*
2333 1.38.4.1 nathanw * If we do not have a valid xfer mode yet, or the parameters
2334 1.38.4.1 nathanw * are different, announce them.
2335 1.38.4.1 nathanw */
2336 1.38.4.1 nathanw if ((periph->periph_flags & PERIPH_MODE_VALID) == 0 ||
2337 1.38.4.1 nathanw periph->periph_mode != mode ||
2338 1.38.4.1 nathanw periph->periph_period != period ||
2339 1.38.4.1 nathanw periph->periph_offset != offset)
2340 1.38.4.1 nathanw announce = 1;
2341 1.38.4.1 nathanw
2342 1.38.4.1 nathanw periph->periph_mode = mode;
2343 1.38.4.1 nathanw periph->periph_period = period;
2344 1.38.4.1 nathanw periph->periph_offset = offset;
2345 1.38.4.1 nathanw periph->periph_flags |= PERIPH_MODE_VALID;
2346 1.38.4.1 nathanw
2347 1.38.4.1 nathanw if (announce)
2348 1.38.4.1 nathanw scsipi_print_xfer_mode(periph);
2349 1.38.4.1 nathanw }
2350 1.38.4.1 nathanw }
2351 1.38.4.1 nathanw
2352 1.38.4.1 nathanw /*
2353 1.38.4.1 nathanw * scsipi_set_xfer_mode:
2354 1.38.4.1 nathanw *
2355 1.38.4.1 nathanw * Set the xfer mode for the specified I_T Nexus.
2356 1.38.4.1 nathanw */
2357 1.38.4.1 nathanw void
2358 1.38.4.1 nathanw scsipi_set_xfer_mode(chan, target, immed)
2359 1.38.4.1 nathanw struct scsipi_channel *chan;
2360 1.38.4.1 nathanw int target, immed;
2361 1.38.4.1 nathanw {
2362 1.38.4.1 nathanw struct scsipi_xfer_mode xm;
2363 1.38.4.1 nathanw struct scsipi_periph *itperiph;
2364 1.38.4.1 nathanw int lun, s;
2365 1.38.4.1 nathanw
2366 1.38.4.1 nathanw /*
2367 1.38.4.1 nathanw * Go to the minimal xfer mode.
2368 1.38.4.1 nathanw */
2369 1.38.4.1 nathanw xm.xm_target = target;
2370 1.38.4.1 nathanw xm.xm_mode = 0;
2371 1.38.4.1 nathanw xm.xm_period = 0; /* ignored */
2372 1.38.4.1 nathanw xm.xm_offset = 0; /* ignored */
2373 1.38.4.1 nathanw
2374 1.38.4.1 nathanw /*
2375 1.38.4.1 nathanw * Find the first LUN we know about on this I_T Nexus.
2376 1.38.4.1 nathanw */
2377 1.38.4.1 nathanw for (lun = 0; lun < chan->chan_nluns; lun++) {
2378 1.38.4.1 nathanw itperiph = scsipi_lookup_periph(chan, target, lun);
2379 1.38.4.1 nathanw if (itperiph != NULL)
2380 1.38.4.1 nathanw break;
2381 1.38.4.1 nathanw }
2382 1.38.4.2 nathanw if (itperiph != NULL) {
2383 1.38.4.1 nathanw xm.xm_mode = itperiph->periph_cap;
2384 1.38.4.2 nathanw /*
2385 1.38.4.2 nathanw * Now issue the request to the adapter.
2386 1.38.4.2 nathanw */
2387 1.38.4.2 nathanw s = splbio();
2388 1.38.4.2 nathanw scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm);
2389 1.38.4.2 nathanw splx(s);
2390 1.38.4.2 nathanw /*
2391 1.38.4.2 nathanw * If we want this to happen immediately, issue a dummy
2392 1.38.4.2 nathanw * command, since most adapters can't really negotiate unless
2393 1.38.4.2 nathanw * they're executing a job.
2394 1.38.4.2 nathanw */
2395 1.38.4.2 nathanw if (immed != 0) {
2396 1.38.4.2 nathanw (void) scsipi_test_unit_ready(itperiph,
2397 1.38.4.2 nathanw XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
2398 1.38.4.2 nathanw XS_CTL_IGNORE_NOT_READY |
2399 1.38.4.2 nathanw XS_CTL_IGNORE_MEDIA_CHANGE);
2400 1.38.4.2 nathanw }
2401 1.38.4.1 nathanw }
2402 1.38.4.1 nathanw }
2403 1.38.4.1 nathanw
2404 1.38.4.1 nathanw /*
2405 1.38.4.1 nathanw * scsipi_channel_reset:
2406 1.38.4.1 nathanw *
2407 1.38.4.1 nathanw * handle scsi bus reset
2408 1.38.4.1 nathanw * called at splbio
2409 1.38.4.1 nathanw */
2410 1.38.4.1 nathanw void
2411 1.38.4.1 nathanw scsipi_async_event_channel_reset(chan)
2412 1.38.4.1 nathanw struct scsipi_channel *chan;
2413 1.38.4.1 nathanw {
2414 1.38.4.1 nathanw struct scsipi_xfer *xs, *xs_next;
2415 1.38.4.1 nathanw struct scsipi_periph *periph;
2416 1.38.4.1 nathanw int target, lun;
2417 1.38.4.1 nathanw
2418 1.38.4.1 nathanw /*
2419 1.38.4.1 nathanw * Channel has been reset. Also mark as reset pending REQUEST_SENSE
2420 1.38.4.1 nathanw * commands; as the sense is not available any more.
2421 1.38.4.1 nathanw * can't call scsipi_done() from here, as the command has not been
2422 1.38.4.1 nathanw * sent to the adapter yet (this would corrupt accounting).
2423 1.38.4.1 nathanw */
2424 1.38.4.1 nathanw
2425 1.38.4.1 nathanw for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) {
2426 1.38.4.1 nathanw xs_next = TAILQ_NEXT(xs, channel_q);
2427 1.38.4.1 nathanw if (xs->xs_control & XS_CTL_REQSENSE) {
2428 1.38.4.1 nathanw TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
2429 1.38.4.1 nathanw xs->error = XS_RESET;
2430 1.38.4.1 nathanw if ((xs->xs_control & XS_CTL_ASYNC) != 0)
2431 1.38.4.1 nathanw TAILQ_INSERT_TAIL(&chan->chan_complete, xs,
2432 1.38.4.1 nathanw channel_q);
2433 1.38.4.1 nathanw }
2434 1.38.4.1 nathanw }
2435 1.38.4.1 nathanw wakeup(&chan->chan_complete);
2436 1.38.4.1 nathanw /* Catch xs with pending sense which may not have a REQSENSE xs yet */
2437 1.38.4.1 nathanw for (target = 0; target < chan->chan_ntargets; target++) {
2438 1.38.4.1 nathanw if (target == chan->chan_id)
2439 1.38.4.1 nathanw continue;
2440 1.38.4.1 nathanw for (lun = 0; lun < chan->chan_nluns; lun++) {
2441 1.38.4.1 nathanw periph = chan->chan_periphs[target][lun];
2442 1.38.4.1 nathanw if (periph) {
2443 1.38.4.1 nathanw xs = periph->periph_xscheck;
2444 1.38.4.1 nathanw if (xs)
2445 1.38.4.1 nathanw xs->error = XS_RESET;
2446 1.38.4.1 nathanw }
2447 1.38.4.1 nathanw }
2448 1.38.4.1 nathanw }
2449 1.38.4.1 nathanw }
2450 1.38.4.1 nathanw
2451 1.38.4.2 nathanw /*
2452 1.38.4.2 nathanw * scsipi_target_detach:
2453 1.38.4.2 nathanw *
2454 1.38.4.2 nathanw * detach all periph associated with a I_T
2455 1.38.4.2 nathanw * must be called from valid thread context
2456 1.38.4.2 nathanw */
2457 1.38.4.2 nathanw int
2458 1.38.4.2 nathanw scsipi_target_detach(chan, target, lun, flags)
2459 1.38.4.2 nathanw struct scsipi_channel *chan;
2460 1.38.4.2 nathanw int target, lun;
2461 1.38.4.2 nathanw int flags;
2462 1.38.4.2 nathanw {
2463 1.38.4.2 nathanw struct scsipi_periph *periph;
2464 1.38.4.2 nathanw int ctarget, mintarget, maxtarget;
2465 1.38.4.2 nathanw int clun, minlun, maxlun;
2466 1.38.4.2 nathanw int error;
2467 1.38.4.2 nathanw
2468 1.38.4.2 nathanw if (target == -1) {
2469 1.38.4.2 nathanw mintarget = 0;
2470 1.38.4.2 nathanw maxtarget = chan->chan_ntargets;
2471 1.38.4.2 nathanw } else {
2472 1.38.4.2 nathanw if (target == chan->chan_id)
2473 1.38.4.2 nathanw return EINVAL;
2474 1.38.4.2 nathanw if (target < 0 || target >= chan->chan_ntargets)
2475 1.38.4.2 nathanw return EINVAL;
2476 1.38.4.2 nathanw mintarget = target;
2477 1.38.4.2 nathanw maxtarget = target + 1;
2478 1.38.4.2 nathanw }
2479 1.38.4.2 nathanw
2480 1.38.4.2 nathanw if (lun == -1) {
2481 1.38.4.2 nathanw minlun = 0;
2482 1.38.4.2 nathanw maxlun = chan->chan_nluns;
2483 1.38.4.2 nathanw } else {
2484 1.38.4.2 nathanw if (lun < 0 || lun >= chan->chan_nluns)
2485 1.38.4.2 nathanw return EINVAL;
2486 1.38.4.2 nathanw minlun = lun;
2487 1.38.4.2 nathanw maxlun = lun + 1;
2488 1.38.4.2 nathanw }
2489 1.38.4.2 nathanw
2490 1.38.4.2 nathanw for (ctarget = mintarget; ctarget < maxtarget; ctarget++) {
2491 1.38.4.2 nathanw if (ctarget == chan->chan_id)
2492 1.38.4.2 nathanw continue;
2493 1.38.4.2 nathanw
2494 1.38.4.2 nathanw for (clun = minlun; clun < maxlun; clun++) {
2495 1.38.4.2 nathanw periph = scsipi_lookup_periph(chan, ctarget, clun);
2496 1.38.4.2 nathanw if (periph == NULL)
2497 1.38.4.2 nathanw continue;
2498 1.38.4.2 nathanw error = config_detach(periph->periph_dev, flags);
2499 1.38.4.2 nathanw if (error)
2500 1.38.4.2 nathanw return (error);
2501 1.38.4.2 nathanw scsipi_remove_periph(chan, periph);
2502 1.38.4.2 nathanw free(periph, M_DEVBUF);
2503 1.38.4.2 nathanw }
2504 1.38.4.2 nathanw }
2505 1.38.4.2 nathanw return(0);
2506 1.38.4.2 nathanw }
2507 1.38.4.1 nathanw
2508 1.38.4.1 nathanw /*
2509 1.38.4.1 nathanw * scsipi_adapter_addref:
2510 1.38.4.1 nathanw *
2511 1.38.4.1 nathanw * Add a reference to the adapter pointed to by the provided
2512 1.38.4.1 nathanw * link, enabling the adapter if necessary.
2513 1.38.4.1 nathanw */
2514 1.38.4.1 nathanw int
2515 1.38.4.1 nathanw scsipi_adapter_addref(adapt)
2516 1.38.4.1 nathanw struct scsipi_adapter *adapt;
2517 1.38.4.1 nathanw {
2518 1.38.4.1 nathanw int s, error = 0;
2519 1.38.4.1 nathanw
2520 1.38.4.1 nathanw s = splbio();
2521 1.38.4.1 nathanw if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) {
2522 1.38.4.1 nathanw error = (*adapt->adapt_enable)(adapt->adapt_dev, 1);
2523 1.38.4.1 nathanw if (error)
2524 1.38.4.1 nathanw adapt->adapt_refcnt--;
2525 1.38.4.1 nathanw }
2526 1.38.4.1 nathanw splx(s);
2527 1.38.4.1 nathanw return (error);
2528 1.38.4.1 nathanw }
2529 1.38.4.1 nathanw
2530 1.38.4.1 nathanw /*
2531 1.38.4.1 nathanw * scsipi_adapter_delref:
2532 1.38.4.1 nathanw *
2533 1.38.4.1 nathanw * Delete a reference to the adapter pointed to by the provided
2534 1.38.4.1 nathanw * link, disabling the adapter if possible.
2535 1.38.4.1 nathanw */
2536 1.38.4.1 nathanw void
2537 1.38.4.1 nathanw scsipi_adapter_delref(adapt)
2538 1.38.4.1 nathanw struct scsipi_adapter *adapt;
2539 1.38.4.1 nathanw {
2540 1.38.4.1 nathanw int s;
2541 1.38.4.1 nathanw
2542 1.38.4.1 nathanw s = splbio();
2543 1.38.4.1 nathanw if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL)
2544 1.38.4.1 nathanw (void) (*adapt->adapt_enable)(adapt->adapt_dev, 0);
2545 1.38.4.1 nathanw splx(s);
2546 1.38.4.1 nathanw }
2547 1.38.4.1 nathanw
2548 1.38.4.1 nathanw struct scsipi_syncparam {
2549 1.38.4.1 nathanw int ss_factor;
2550 1.38.4.1 nathanw int ss_period; /* ns * 10 */
2551 1.38.4.1 nathanw } scsipi_syncparams[] = {
2552 1.38.4.3 nathanw { 0x09, 125 },
2553 1.38.4.1 nathanw { 0x0a, 250 },
2554 1.38.4.1 nathanw { 0x0b, 303 },
2555 1.38.4.1 nathanw { 0x0c, 500 },
2556 1.38.4.1 nathanw };
2557 1.38.4.1 nathanw const int scsipi_nsyncparams =
2558 1.38.4.1 nathanw sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
2559 1.38.4.1 nathanw
2560 1.38.4.1 nathanw int
2561 1.38.4.1 nathanw scsipi_sync_period_to_factor(period)
2562 1.38.4.1 nathanw int period; /* ns * 10 */
2563 1.38.4.1 nathanw {
2564 1.38.4.1 nathanw int i;
2565 1.38.4.1 nathanw
2566 1.38.4.1 nathanw for (i = 0; i < scsipi_nsyncparams; i++) {
2567 1.38.4.1 nathanw if (period <= scsipi_syncparams[i].ss_period)
2568 1.38.4.1 nathanw return (scsipi_syncparams[i].ss_factor);
2569 1.38.4.1 nathanw }
2570 1.38.4.1 nathanw
2571 1.38.4.1 nathanw return ((period / 10) / 4);
2572 1.38.4.1 nathanw }
2573 1.38.4.1 nathanw
2574 1.38.4.1 nathanw int
2575 1.38.4.1 nathanw scsipi_sync_factor_to_period(factor)
2576 1.38.4.1 nathanw int factor;
2577 1.38.4.1 nathanw {
2578 1.38.4.1 nathanw int i;
2579 1.38.4.1 nathanw
2580 1.38.4.1 nathanw for (i = 0; i < scsipi_nsyncparams; i++) {
2581 1.38.4.1 nathanw if (factor == scsipi_syncparams[i].ss_factor)
2582 1.38.4.1 nathanw return (scsipi_syncparams[i].ss_period);
2583 1.38.4.1 nathanw }
2584 1.38.4.1 nathanw
2585 1.38.4.1 nathanw return ((factor * 4) * 10);
2586 1.38.4.1 nathanw }
2587 1.38.4.1 nathanw
2588 1.38.4.1 nathanw int
2589 1.38.4.1 nathanw scsipi_sync_factor_to_freq(factor)
2590 1.38.4.1 nathanw int factor;
2591 1.38.4.1 nathanw {
2592 1.38.4.1 nathanw int i;
2593 1.38.4.1 nathanw
2594 1.38.4.1 nathanw for (i = 0; i < scsipi_nsyncparams; i++) {
2595 1.38.4.1 nathanw if (factor == scsipi_syncparams[i].ss_factor)
2596 1.38.4.1 nathanw return (10000000 / scsipi_syncparams[i].ss_period);
2597 1.38.4.1 nathanw }
2598 1.38.4.1 nathanw
2599 1.38.4.1 nathanw return (10000000 / ((factor * 4) * 10));
2600 1.14 thorpej }
2601 1.14 thorpej
2602 1.38.4.1 nathanw #ifdef SCSIPI_DEBUG
2603 1.2 bouyer /*
2604 1.2 bouyer * Given a scsipi_xfer, dump the request, in all it's glory
2605 1.2 bouyer */
2606 1.2 bouyer void
2607 1.2 bouyer show_scsipi_xs(xs)
2608 1.2 bouyer struct scsipi_xfer *xs;
2609 1.2 bouyer {
2610 1.3 enami
2611 1.2 bouyer printf("xs(%p): ", xs);
2612 1.24 thorpej printf("xs_control(0x%08x)", xs->xs_control);
2613 1.24 thorpej printf("xs_status(0x%08x)", xs->xs_status);
2614 1.38.4.1 nathanw printf("periph(%p)", xs->xs_periph);
2615 1.38.4.1 nathanw printf("retr(0x%x)", xs->xs_retries);
2616 1.2 bouyer printf("timo(0x%x)", xs->timeout);
2617 1.2 bouyer printf("cmd(%p)", xs->cmd);
2618 1.2 bouyer printf("len(0x%x)", xs->cmdlen);
2619 1.2 bouyer printf("data(%p)", xs->data);
2620 1.2 bouyer printf("len(0x%x)", xs->datalen);
2621 1.2 bouyer printf("res(0x%x)", xs->resid);
2622 1.2 bouyer printf("err(0x%x)", xs->error);
2623 1.2 bouyer printf("bp(%p)", xs->bp);
2624 1.2 bouyer show_scsipi_cmd(xs);
2625 1.2 bouyer }
2626 1.2 bouyer
2627 1.2 bouyer void
2628 1.2 bouyer show_scsipi_cmd(xs)
2629 1.2 bouyer struct scsipi_xfer *xs;
2630 1.2 bouyer {
2631 1.2 bouyer u_char *b = (u_char *) xs->cmd;
2632 1.3 enami int i = 0;
2633 1.2 bouyer
2634 1.38.4.1 nathanw scsipi_printaddr(xs->xs_periph);
2635 1.38.4.1 nathanw printf(" command: ");
2636 1.2 bouyer
2637 1.24 thorpej if ((xs->xs_control & XS_CTL_RESET) == 0) {
2638 1.2 bouyer while (i < xs->cmdlen) {
2639 1.2 bouyer if (i)
2640 1.2 bouyer printf(",");
2641 1.2 bouyer printf("0x%x", b[i++]);
2642 1.2 bouyer }
2643 1.2 bouyer printf("-[%d bytes]\n", xs->datalen);
2644 1.2 bouyer if (xs->datalen)
2645 1.2 bouyer show_mem(xs->data, min(64, xs->datalen));
2646 1.2 bouyer } else
2647 1.2 bouyer printf("-RESET-\n");
2648 1.2 bouyer }
2649 1.2 bouyer
2650 1.2 bouyer void
2651 1.2 bouyer show_mem(address, num)
2652 1.2 bouyer u_char *address;
2653 1.2 bouyer int num;
2654 1.2 bouyer {
2655 1.2 bouyer int x;
2656 1.2 bouyer
2657 1.2 bouyer printf("------------------------------");
2658 1.2 bouyer for (x = 0; x < num; x++) {
2659 1.2 bouyer if ((x % 16) == 0)
2660 1.2 bouyer printf("\n%03d: ", x);
2661 1.2 bouyer printf("%02x ", *address++);
2662 1.2 bouyer }
2663 1.2 bouyer printf("\n------------------------------\n");
2664 1.2 bouyer }
2665 1.38.4.1 nathanw #endif /* SCSIPI_DEBUG */
2666