scsipi_base.c revision 1.38.4.3 1 1.38.4.3 nathanw /* $NetBSD: scsipi_base.c,v 1.38.4.3 2001/09/21 22:36:14 nathanw Exp $ */
2 1.2 bouyer
3 1.8 mycroft /*-
4 1.38.4.1 nathanw * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
5 1.8 mycroft * All rights reserved.
6 1.8 mycroft *
7 1.8 mycroft * This code is derived from software contributed to The NetBSD Foundation
8 1.38.4.1 nathanw * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
9 1.38.4.1 nathanw * Simulation Facility, NASA Ames Research Center.
10 1.2 bouyer *
11 1.2 bouyer * Redistribution and use in source and binary forms, with or without
12 1.2 bouyer * modification, are permitted provided that the following conditions
13 1.2 bouyer * are met:
14 1.2 bouyer * 1. Redistributions of source code must retain the above copyright
15 1.2 bouyer * notice, this list of conditions and the following disclaimer.
16 1.2 bouyer * 2. Redistributions in binary form must reproduce the above copyright
17 1.2 bouyer * notice, this list of conditions and the following disclaimer in the
18 1.2 bouyer * documentation and/or other materials provided with the distribution.
19 1.2 bouyer * 3. All advertising materials mentioning features or use of this software
20 1.2 bouyer * must display the following acknowledgement:
21 1.8 mycroft * This product includes software developed by the NetBSD
22 1.8 mycroft * Foundation, Inc. and its contributors.
23 1.8 mycroft * 4. Neither the name of The NetBSD Foundation nor the names of its
24 1.8 mycroft * contributors may be used to endorse or promote products derived
25 1.8 mycroft * from this software without specific prior written permission.
26 1.2 bouyer *
27 1.8 mycroft * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 1.8 mycroft * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 1.8 mycroft * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 1.8 mycroft * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 1.8 mycroft * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 1.8 mycroft * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 1.8 mycroft * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 1.8 mycroft * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 1.8 mycroft * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 1.8 mycroft * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 1.8 mycroft * POSSIBILITY OF SUCH DAMAGE.
38 1.2 bouyer */
39 1.2 bouyer
40 1.13 bouyer #include "opt_scsi.h"
41 1.13 bouyer
42 1.2 bouyer #include <sys/types.h>
43 1.2 bouyer #include <sys/param.h>
44 1.2 bouyer #include <sys/systm.h>
45 1.2 bouyer #include <sys/kernel.h>
46 1.2 bouyer #include <sys/buf.h>
47 1.2 bouyer #include <sys/uio.h>
48 1.2 bouyer #include <sys/malloc.h>
49 1.6 thorpej #include <sys/pool.h>
50 1.2 bouyer #include <sys/errno.h>
51 1.2 bouyer #include <sys/device.h>
52 1.2 bouyer #include <sys/proc.h>
53 1.38.4.1 nathanw #include <sys/kthread.h>
54 1.2 bouyer
55 1.2 bouyer #include <dev/scsipi/scsipi_all.h>
56 1.2 bouyer #include <dev/scsipi/scsipi_disk.h>
57 1.2 bouyer #include <dev/scsipi/scsipiconf.h>
58 1.2 bouyer #include <dev/scsipi/scsipi_base.h>
59 1.2 bouyer
60 1.38.4.1 nathanw #include <dev/scsipi/scsi_all.h>
61 1.38.4.1 nathanw #include <dev/scsipi/scsi_message.h>
62 1.38.4.1 nathanw
63 1.38.4.1 nathanw int scsipi_complete __P((struct scsipi_xfer *));
64 1.38.4.1 nathanw void scsipi_request_sense __P((struct scsipi_xfer *));
65 1.38.4.1 nathanw int scsipi_enqueue __P((struct scsipi_xfer *));
66 1.38.4.1 nathanw void scsipi_run_queue __P((struct scsipi_channel *chan));
67 1.38.4.1 nathanw
68 1.38.4.1 nathanw void scsipi_completion_thread __P((void *));
69 1.38.4.1 nathanw
70 1.38.4.1 nathanw void scsipi_get_tag __P((struct scsipi_xfer *));
71 1.38.4.1 nathanw void scsipi_put_tag __P((struct scsipi_xfer *));
72 1.38.4.1 nathanw
73 1.38.4.1 nathanw int scsipi_get_resource __P((struct scsipi_channel *));
74 1.38.4.1 nathanw void scsipi_put_resource __P((struct scsipi_channel *));
75 1.38.4.1 nathanw __inline int scsipi_grow_resources __P((struct scsipi_channel *));
76 1.38.4.1 nathanw
77 1.38.4.1 nathanw void scsipi_async_event_max_openings __P((struct scsipi_channel *,
78 1.38.4.1 nathanw struct scsipi_max_openings *));
79 1.38.4.1 nathanw void scsipi_async_event_xfer_mode __P((struct scsipi_channel *,
80 1.38.4.1 nathanw struct scsipi_xfer_mode *));
81 1.38.4.1 nathanw void scsipi_async_event_channel_reset __P((struct scsipi_channel *));
82 1.6 thorpej
83 1.38.4.1 nathanw struct pool scsipi_xfer_pool;
84 1.2 bouyer
85 1.2 bouyer /*
86 1.38.4.1 nathanw * scsipi_init:
87 1.38.4.1 nathanw *
88 1.38.4.1 nathanw * Called when a scsibus or atapibus is attached to the system
89 1.38.4.1 nathanw * to initialize shared data structures.
90 1.6 thorpej */
91 1.6 thorpej void
92 1.6 thorpej scsipi_init()
93 1.6 thorpej {
94 1.6 thorpej static int scsipi_init_done;
95 1.6 thorpej
96 1.6 thorpej if (scsipi_init_done)
97 1.6 thorpej return;
98 1.6 thorpej scsipi_init_done = 1;
99 1.6 thorpej
100 1.6 thorpej /* Initialize the scsipi_xfer pool. */
101 1.6 thorpej pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
102 1.6 thorpej 0, 0, "scxspl", 0, NULL, NULL, M_DEVBUF);
103 1.6 thorpej }
104 1.6 thorpej
105 1.6 thorpej /*
106 1.38.4.1 nathanw * scsipi_channel_init:
107 1.38.4.1 nathanw *
108 1.38.4.1 nathanw * Initialize a scsipi_channel when it is attached.
109 1.38.4.1 nathanw */
110 1.38.4.1 nathanw int
111 1.38.4.1 nathanw scsipi_channel_init(chan)
112 1.38.4.1 nathanw struct scsipi_channel *chan;
113 1.38.4.1 nathanw {
114 1.38.4.1 nathanw size_t nbytes;
115 1.38.4.1 nathanw int i;
116 1.38.4.1 nathanw
117 1.38.4.1 nathanw /* Initialize shared data. */
118 1.38.4.1 nathanw scsipi_init();
119 1.38.4.1 nathanw
120 1.38.4.1 nathanw /* Initialize the queues. */
121 1.38.4.1 nathanw TAILQ_INIT(&chan->chan_queue);
122 1.38.4.1 nathanw TAILQ_INIT(&chan->chan_complete);
123 1.38.4.1 nathanw
124 1.38.4.1 nathanw nbytes = chan->chan_ntargets * sizeof(struct scsipi_periph **);
125 1.38.4.1 nathanw chan->chan_periphs = malloc(nbytes, M_DEVBUF, M_NOWAIT);
126 1.38.4.1 nathanw if (chan->chan_periphs == NULL)
127 1.38.4.1 nathanw return (ENOMEM);
128 1.38.4.1 nathanw
129 1.38.4.1 nathanw
130 1.38.4.1 nathanw nbytes = chan->chan_nluns * sizeof(struct scsipi_periph *);
131 1.38.4.1 nathanw for (i = 0; i < chan->chan_ntargets; i++) {
132 1.38.4.1 nathanw chan->chan_periphs[i] = malloc(nbytes, M_DEVBUF, M_NOWAIT);
133 1.38.4.1 nathanw if (chan->chan_periphs[i] == NULL) {
134 1.38.4.1 nathanw while (--i >= 0) {
135 1.38.4.1 nathanw free(chan->chan_periphs[i], M_DEVBUF);
136 1.38.4.1 nathanw }
137 1.38.4.1 nathanw return (ENOMEM);
138 1.38.4.1 nathanw }
139 1.38.4.1 nathanw memset(chan->chan_periphs[i], 0, nbytes);
140 1.38.4.1 nathanw }
141 1.38.4.1 nathanw
142 1.38.4.1 nathanw /*
143 1.38.4.1 nathanw * Create the asynchronous completion thread.
144 1.38.4.1 nathanw */
145 1.38.4.1 nathanw kthread_create(scsipi_create_completion_thread, chan);
146 1.38.4.1 nathanw return (0);
147 1.38.4.1 nathanw }
148 1.38.4.1 nathanw
149 1.38.4.1 nathanw /*
150 1.38.4.1 nathanw * scsipi_channel_shutdown:
151 1.38.4.1 nathanw *
152 1.38.4.1 nathanw * Shutdown a scsipi_channel.
153 1.38.4.1 nathanw */
154 1.38.4.1 nathanw void
155 1.38.4.1 nathanw scsipi_channel_shutdown(chan)
156 1.38.4.1 nathanw struct scsipi_channel *chan;
157 1.38.4.1 nathanw {
158 1.38.4.1 nathanw
159 1.38.4.1 nathanw /*
160 1.38.4.1 nathanw * Shut down the completion thread.
161 1.38.4.1 nathanw */
162 1.38.4.1 nathanw chan->chan_flags |= SCSIPI_CHAN_SHUTDOWN;
163 1.38.4.1 nathanw wakeup(&chan->chan_complete);
164 1.38.4.1 nathanw
165 1.38.4.1 nathanw /*
166 1.38.4.1 nathanw * Now wait for the thread to exit.
167 1.38.4.1 nathanw */
168 1.38.4.1 nathanw while (chan->chan_thread != NULL)
169 1.38.4.1 nathanw (void) tsleep(&chan->chan_thread, PRIBIO, "scshut", 0);
170 1.38.4.1 nathanw }
171 1.38.4.1 nathanw
172 1.38.4.1 nathanw /*
173 1.38.4.1 nathanw * scsipi_insert_periph:
174 1.38.4.1 nathanw *
175 1.38.4.1 nathanw * Insert a periph into the channel.
176 1.38.4.1 nathanw */
177 1.38.4.1 nathanw void
178 1.38.4.1 nathanw scsipi_insert_periph(chan, periph)
179 1.38.4.1 nathanw struct scsipi_channel *chan;
180 1.38.4.1 nathanw struct scsipi_periph *periph;
181 1.38.4.1 nathanw {
182 1.38.4.1 nathanw int s;
183 1.38.4.1 nathanw
184 1.38.4.1 nathanw s = splbio();
185 1.38.4.1 nathanw chan->chan_periphs[periph->periph_target][periph->periph_lun] = periph;
186 1.38.4.1 nathanw splx(s);
187 1.38.4.1 nathanw }
188 1.38.4.1 nathanw
189 1.38.4.1 nathanw /*
190 1.38.4.1 nathanw * scsipi_remove_periph:
191 1.38.4.1 nathanw *
192 1.38.4.1 nathanw * Remove a periph from the channel.
193 1.38.4.1 nathanw */
194 1.38.4.1 nathanw void
195 1.38.4.1 nathanw scsipi_remove_periph(chan, periph)
196 1.38.4.1 nathanw struct scsipi_channel *chan;
197 1.38.4.1 nathanw struct scsipi_periph *periph;
198 1.38.4.1 nathanw {
199 1.38.4.1 nathanw int s;
200 1.38.4.1 nathanw
201 1.38.4.1 nathanw s = splbio();
202 1.38.4.1 nathanw chan->chan_periphs[periph->periph_target][periph->periph_lun] = NULL;
203 1.38.4.1 nathanw splx(s);
204 1.38.4.1 nathanw }
205 1.38.4.1 nathanw
206 1.38.4.1 nathanw /*
207 1.38.4.1 nathanw * scsipi_lookup_periph:
208 1.38.4.1 nathanw *
209 1.38.4.1 nathanw * Lookup a periph on the specified channel.
210 1.38.4.1 nathanw */
211 1.38.4.1 nathanw struct scsipi_periph *
212 1.38.4.1 nathanw scsipi_lookup_periph(chan, target, lun)
213 1.38.4.1 nathanw struct scsipi_channel *chan;
214 1.38.4.1 nathanw int target, lun;
215 1.38.4.1 nathanw {
216 1.38.4.1 nathanw struct scsipi_periph *periph;
217 1.38.4.1 nathanw int s;
218 1.38.4.1 nathanw
219 1.38.4.1 nathanw if (target >= chan->chan_ntargets ||
220 1.38.4.1 nathanw lun >= chan->chan_nluns)
221 1.38.4.1 nathanw return (NULL);
222 1.38.4.1 nathanw
223 1.38.4.1 nathanw s = splbio();
224 1.38.4.1 nathanw periph = chan->chan_periphs[target][lun];
225 1.38.4.1 nathanw splx(s);
226 1.38.4.1 nathanw
227 1.38.4.1 nathanw return (periph);
228 1.38.4.1 nathanw }
229 1.38.4.1 nathanw
230 1.38.4.1 nathanw /*
231 1.38.4.1 nathanw * scsipi_get_resource:
232 1.38.4.1 nathanw *
233 1.38.4.1 nathanw * Allocate a single xfer `resource' from the channel.
234 1.38.4.1 nathanw *
235 1.38.4.1 nathanw * NOTE: Must be called at splbio().
236 1.38.4.1 nathanw */
237 1.38.4.1 nathanw int
238 1.38.4.1 nathanw scsipi_get_resource(chan)
239 1.38.4.1 nathanw struct scsipi_channel *chan;
240 1.38.4.1 nathanw {
241 1.38.4.1 nathanw struct scsipi_adapter *adapt = chan->chan_adapter;
242 1.38.4.1 nathanw
243 1.38.4.1 nathanw if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
244 1.38.4.1 nathanw if (chan->chan_openings > 0) {
245 1.38.4.1 nathanw chan->chan_openings--;
246 1.38.4.1 nathanw return (1);
247 1.38.4.1 nathanw }
248 1.38.4.1 nathanw return (0);
249 1.38.4.1 nathanw }
250 1.38.4.1 nathanw
251 1.38.4.1 nathanw if (adapt->adapt_openings > 0) {
252 1.38.4.1 nathanw adapt->adapt_openings--;
253 1.38.4.1 nathanw return (1);
254 1.38.4.1 nathanw }
255 1.38.4.1 nathanw return (0);
256 1.38.4.1 nathanw }
257 1.38.4.1 nathanw
258 1.38.4.1 nathanw /*
259 1.38.4.1 nathanw * scsipi_grow_resources:
260 1.38.4.1 nathanw *
261 1.38.4.1 nathanw * Attempt to grow resources for a channel. If this succeeds,
262 1.38.4.1 nathanw * we allocate one for our caller.
263 1.38.4.1 nathanw *
264 1.38.4.1 nathanw * NOTE: Must be called at splbio().
265 1.38.4.1 nathanw */
266 1.38.4.1 nathanw __inline int
267 1.38.4.1 nathanw scsipi_grow_resources(chan)
268 1.38.4.1 nathanw struct scsipi_channel *chan;
269 1.38.4.1 nathanw {
270 1.38.4.1 nathanw
271 1.38.4.1 nathanw if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
272 1.38.4.1 nathanw scsipi_adapter_request(chan, ADAPTER_REQ_GROW_RESOURCES, NULL);
273 1.38.4.1 nathanw return (scsipi_get_resource(chan));
274 1.38.4.1 nathanw }
275 1.38.4.1 nathanw
276 1.38.4.1 nathanw return (0);
277 1.38.4.1 nathanw }
278 1.38.4.1 nathanw
279 1.38.4.1 nathanw /*
280 1.38.4.1 nathanw * scsipi_put_resource:
281 1.38.4.1 nathanw *
282 1.38.4.1 nathanw * Free a single xfer `resource' to the channel.
283 1.38.4.1 nathanw *
284 1.38.4.1 nathanw * NOTE: Must be called at splbio().
285 1.38.4.1 nathanw */
286 1.38.4.1 nathanw void
287 1.38.4.1 nathanw scsipi_put_resource(chan)
288 1.38.4.1 nathanw struct scsipi_channel *chan;
289 1.38.4.1 nathanw {
290 1.38.4.1 nathanw struct scsipi_adapter *adapt = chan->chan_adapter;
291 1.38.4.1 nathanw
292 1.38.4.1 nathanw if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
293 1.38.4.1 nathanw chan->chan_openings++;
294 1.38.4.1 nathanw else
295 1.38.4.1 nathanw adapt->adapt_openings++;
296 1.38.4.1 nathanw }
297 1.38.4.1 nathanw
298 1.38.4.1 nathanw /*
299 1.38.4.1 nathanw * scsipi_get_tag:
300 1.38.4.1 nathanw *
301 1.38.4.1 nathanw * Get a tag ID for the specified xfer.
302 1.38.4.1 nathanw *
303 1.38.4.1 nathanw * NOTE: Must be called at splbio().
304 1.38.4.1 nathanw */
305 1.38.4.1 nathanw void
306 1.38.4.1 nathanw scsipi_get_tag(xs)
307 1.38.4.1 nathanw struct scsipi_xfer *xs;
308 1.38.4.1 nathanw {
309 1.38.4.1 nathanw struct scsipi_periph *periph = xs->xs_periph;
310 1.38.4.1 nathanw int word, bit, tag;
311 1.38.4.1 nathanw
312 1.38.4.1 nathanw for (word = 0; word < PERIPH_NTAGWORDS; word++) {
313 1.38.4.1 nathanw bit = ffs(periph->periph_freetags[word]);
314 1.38.4.1 nathanw if (bit != 0)
315 1.38.4.1 nathanw break;
316 1.38.4.1 nathanw }
317 1.38.4.1 nathanw #ifdef DIAGNOSTIC
318 1.38.4.1 nathanw if (word == PERIPH_NTAGWORDS) {
319 1.38.4.1 nathanw scsipi_printaddr(periph);
320 1.38.4.1 nathanw printf("no free tags\n");
321 1.38.4.1 nathanw panic("scsipi_get_tag");
322 1.38.4.1 nathanw }
323 1.38.4.1 nathanw #endif
324 1.38.4.1 nathanw
325 1.38.4.1 nathanw bit -= 1;
326 1.38.4.1 nathanw periph->periph_freetags[word] &= ~(1 << bit);
327 1.38.4.1 nathanw tag = (word << 5) | bit;
328 1.38.4.1 nathanw
329 1.38.4.1 nathanw /* XXX Should eventually disallow this completely. */
330 1.38.4.1 nathanw if (tag >= periph->periph_openings) {
331 1.38.4.1 nathanw scsipi_printaddr(periph);
332 1.38.4.1 nathanw printf("WARNING: tag %d greater than available openings %d\n",
333 1.38.4.1 nathanw tag, periph->periph_openings);
334 1.38.4.1 nathanw }
335 1.38.4.1 nathanw
336 1.38.4.1 nathanw xs->xs_tag_id = tag;
337 1.38.4.1 nathanw }
338 1.38.4.1 nathanw
339 1.38.4.1 nathanw /*
340 1.38.4.1 nathanw * scsipi_put_tag:
341 1.38.4.1 nathanw *
342 1.38.4.1 nathanw * Put the tag ID for the specified xfer back into the pool.
343 1.38.4.1 nathanw *
344 1.38.4.1 nathanw * NOTE: Must be called at splbio().
345 1.2 bouyer */
346 1.38.4.1 nathanw void
347 1.38.4.1 nathanw scsipi_put_tag(xs)
348 1.38.4.1 nathanw struct scsipi_xfer *xs;
349 1.38.4.1 nathanw {
350 1.38.4.1 nathanw struct scsipi_periph *periph = xs->xs_periph;
351 1.38.4.1 nathanw int word, bit;
352 1.38.4.1 nathanw
353 1.38.4.1 nathanw word = xs->xs_tag_id >> 5;
354 1.38.4.1 nathanw bit = xs->xs_tag_id & 0x1f;
355 1.38.4.1 nathanw
356 1.38.4.1 nathanw periph->periph_freetags[word] |= (1 << bit);
357 1.38.4.1 nathanw }
358 1.2 bouyer
359 1.38.4.1 nathanw /*
360 1.38.4.1 nathanw * scsipi_get_xs:
361 1.38.4.1 nathanw *
362 1.38.4.1 nathanw * Allocate an xfer descriptor and associate it with the
363 1.38.4.1 nathanw * specified peripherial. If the peripherial has no more
364 1.38.4.1 nathanw * available command openings, we either block waiting for
365 1.38.4.1 nathanw * one to become available, or fail.
366 1.38.4.1 nathanw */
367 1.2 bouyer struct scsipi_xfer *
368 1.38.4.1 nathanw scsipi_get_xs(periph, flags)
369 1.38.4.1 nathanw struct scsipi_periph *periph;
370 1.38.4.1 nathanw int flags;
371 1.2 bouyer {
372 1.2 bouyer struct scsipi_xfer *xs;
373 1.2 bouyer int s;
374 1.2 bouyer
375 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n"));
376 1.6 thorpej
377 1.24 thorpej /*
378 1.24 thorpej * If we're cold, make sure we poll.
379 1.24 thorpej */
380 1.24 thorpej if (cold)
381 1.24 thorpej flags |= XS_CTL_NOSLEEP | XS_CTL_POLL;
382 1.24 thorpej
383 1.38.4.1 nathanw #ifdef DIAGNOSTIC
384 1.38.4.1 nathanw /*
385 1.38.4.1 nathanw * URGENT commands can never be ASYNC.
386 1.38.4.1 nathanw */
387 1.38.4.1 nathanw if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
388 1.38.4.1 nathanw (XS_CTL_URGENT|XS_CTL_ASYNC)) {
389 1.38.4.1 nathanw scsipi_printaddr(periph);
390 1.38.4.1 nathanw printf("URGENT and ASYNC\n");
391 1.38.4.1 nathanw panic("scsipi_get_xs");
392 1.38.4.1 nathanw }
393 1.38.4.1 nathanw #endif
394 1.38.4.1 nathanw
395 1.2 bouyer s = splbio();
396 1.38.4.1 nathanw /*
397 1.38.4.1 nathanw * Wait for a command opening to become available. Rules:
398 1.38.4.1 nathanw *
399 1.38.4.1 nathanw * - All xfers must wait for an available opening.
400 1.38.4.1 nathanw * Exception: URGENT xfers can proceed when
401 1.38.4.1 nathanw * active == openings, because we use the opening
402 1.38.4.1 nathanw * of the command we're recovering for.
403 1.38.4.1 nathanw * - if the periph has sense pending, only URGENT & REQSENSE
404 1.38.4.1 nathanw * xfers may proceed.
405 1.38.4.1 nathanw *
406 1.38.4.1 nathanw * - If the periph is recovering, only URGENT xfers may
407 1.38.4.1 nathanw * proceed.
408 1.38.4.1 nathanw *
409 1.38.4.1 nathanw * - If the periph is currently executing a recovery
410 1.38.4.1 nathanw * command, URGENT commands must block, because only
411 1.38.4.1 nathanw * one recovery command can execute at a time.
412 1.38.4.1 nathanw */
413 1.38.4.1 nathanw for (;;) {
414 1.38.4.1 nathanw if (flags & XS_CTL_URGENT) {
415 1.38.4.1 nathanw if (periph->periph_active > periph->periph_openings)
416 1.38.4.1 nathanw goto wait_for_opening;
417 1.38.4.1 nathanw if (periph->periph_flags & PERIPH_SENSE) {
418 1.38.4.1 nathanw if ((flags & XS_CTL_REQSENSE) == 0)
419 1.38.4.1 nathanw goto wait_for_opening;
420 1.38.4.1 nathanw } else {
421 1.38.4.1 nathanw if ((periph->periph_flags &
422 1.38.4.1 nathanw PERIPH_RECOVERY_ACTIVE) != 0)
423 1.38.4.1 nathanw goto wait_for_opening;
424 1.38.4.1 nathanw periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
425 1.38.4.1 nathanw }
426 1.38.4.1 nathanw break;
427 1.38.4.1 nathanw }
428 1.38.4.1 nathanw if (periph->periph_active >= periph->periph_openings ||
429 1.38.4.1 nathanw (periph->periph_flags & PERIPH_RECOVERING) != 0)
430 1.38.4.1 nathanw goto wait_for_opening;
431 1.38.4.1 nathanw periph->periph_active++;
432 1.38.4.1 nathanw break;
433 1.38.4.1 nathanw
434 1.38.4.1 nathanw wait_for_opening:
435 1.38.4.1 nathanw if (flags & XS_CTL_NOSLEEP) {
436 1.2 bouyer splx(s);
437 1.38.4.1 nathanw return (NULL);
438 1.2 bouyer }
439 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n"));
440 1.38.4.1 nathanw periph->periph_flags |= PERIPH_WAITING;
441 1.38.4.1 nathanw (void) tsleep(periph, PRIBIO, "getxs", 0);
442 1.2 bouyer }
443 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n"));
444 1.6 thorpej xs = pool_get(&scsipi_xfer_pool,
445 1.24 thorpej ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
446 1.38.4.1 nathanw if (xs == NULL) {
447 1.38.4.1 nathanw if (flags & XS_CTL_URGENT) {
448 1.38.4.1 nathanw if ((flags & XS_CTL_REQSENSE) == 0)
449 1.38.4.1 nathanw periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
450 1.38.4.1 nathanw } else
451 1.38.4.1 nathanw periph->periph_active--;
452 1.38.4.1 nathanw scsipi_printaddr(periph);
453 1.38.4.1 nathanw printf("unable to allocate %sscsipi_xfer\n",
454 1.38.4.1 nathanw (flags & XS_CTL_URGENT) ? "URGENT " : "");
455 1.2 bouyer }
456 1.6 thorpej splx(s);
457 1.2 bouyer
458 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB3, ("returning\n"));
459 1.6 thorpej
460 1.7 scottr if (xs != NULL) {
461 1.30 thorpej callout_init(&xs->xs_callout);
462 1.38.4.1 nathanw memset(xs, 0, sizeof(*xs));
463 1.38.4.1 nathanw xs->xs_periph = periph;
464 1.24 thorpej xs->xs_control = flags;
465 1.37 fvdl xs->xs_status = 0;
466 1.38.4.1 nathanw s = splbio();
467 1.38.4.1 nathanw TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
468 1.38.4.1 nathanw splx(s);
469 1.7 scottr }
470 1.3 enami return (xs);
471 1.2 bouyer }
472 1.2 bouyer
473 1.2 bouyer /*
474 1.38.4.1 nathanw * scsipi_put_xs:
475 1.38.4.1 nathanw *
476 1.38.4.1 nathanw * Release an xfer descriptor, decreasing the outstanding command
477 1.38.4.1 nathanw * count for the peripherial. If there is a thread waiting for
478 1.38.4.1 nathanw * an opening, wake it up. If not, kick any queued I/O the
479 1.38.4.1 nathanw * peripherial may have.
480 1.6 thorpej *
481 1.38.4.1 nathanw * NOTE: Must be called at splbio().
482 1.2 bouyer */
483 1.3 enami void
484 1.38.4.1 nathanw scsipi_put_xs(xs)
485 1.2 bouyer struct scsipi_xfer *xs;
486 1.2 bouyer {
487 1.38.4.1 nathanw struct scsipi_periph *periph = xs->xs_periph;
488 1.38.4.1 nathanw int flags = xs->xs_control;
489 1.2 bouyer
490 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n"));
491 1.38.4.1 nathanw
492 1.38.4.1 nathanw TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
493 1.6 thorpej pool_put(&scsipi_xfer_pool, xs);
494 1.2 bouyer
495 1.38.4.1 nathanw #ifdef DIAGNOSTIC
496 1.38.4.1 nathanw if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
497 1.38.4.1 nathanw periph->periph_active == 0) {
498 1.38.4.1 nathanw scsipi_printaddr(periph);
499 1.38.4.1 nathanw printf("recovery without a command to recovery for\n");
500 1.38.4.1 nathanw panic("scsipi_put_xs");
501 1.38.4.1 nathanw }
502 1.38.4.1 nathanw #endif
503 1.38.4.1 nathanw
504 1.38.4.1 nathanw if (flags & XS_CTL_URGENT) {
505 1.38.4.1 nathanw if ((flags & XS_CTL_REQSENSE) == 0)
506 1.38.4.1 nathanw periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
507 1.38.4.1 nathanw } else
508 1.38.4.1 nathanw periph->periph_active--;
509 1.38.4.1 nathanw if (periph->periph_active == 0 &&
510 1.38.4.1 nathanw (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
511 1.38.4.1 nathanw periph->periph_flags &= ~PERIPH_WAITDRAIN;
512 1.38.4.1 nathanw wakeup(&periph->periph_active);
513 1.38.4.1 nathanw }
514 1.38.4.1 nathanw
515 1.38.4.1 nathanw if (periph->periph_flags & PERIPH_WAITING) {
516 1.38.4.1 nathanw periph->periph_flags &= ~PERIPH_WAITING;
517 1.38.4.1 nathanw wakeup(periph);
518 1.2 bouyer } else {
519 1.38.4.1 nathanw if (periph->periph_switch->psw_start != NULL) {
520 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB2,
521 1.3 enami ("calling private start()\n"));
522 1.38.4.1 nathanw (*periph->periph_switch->psw_start)(periph);
523 1.2 bouyer }
524 1.2 bouyer }
525 1.15 thorpej }
526 1.15 thorpej
527 1.15 thorpej /*
528 1.38.4.1 nathanw * scsipi_channel_freeze:
529 1.38.4.1 nathanw *
530 1.38.4.1 nathanw * Freeze a channel's xfer queue.
531 1.38.4.1 nathanw */
532 1.38.4.1 nathanw void
533 1.38.4.1 nathanw scsipi_channel_freeze(chan, count)
534 1.38.4.1 nathanw struct scsipi_channel *chan;
535 1.38.4.1 nathanw int count;
536 1.38.4.1 nathanw {
537 1.38.4.1 nathanw int s;
538 1.38.4.1 nathanw
539 1.38.4.1 nathanw s = splbio();
540 1.38.4.1 nathanw chan->chan_qfreeze += count;
541 1.38.4.1 nathanw splx(s);
542 1.38.4.1 nathanw }
543 1.38.4.1 nathanw
544 1.38.4.1 nathanw /*
545 1.38.4.1 nathanw * scsipi_channel_thaw:
546 1.38.4.1 nathanw *
547 1.38.4.1 nathanw * Thaw a channel's xfer queue.
548 1.38.4.1 nathanw */
549 1.38.4.1 nathanw void
550 1.38.4.1 nathanw scsipi_channel_thaw(chan, count)
551 1.38.4.1 nathanw struct scsipi_channel *chan;
552 1.38.4.1 nathanw int count;
553 1.38.4.1 nathanw {
554 1.38.4.1 nathanw int s;
555 1.38.4.1 nathanw
556 1.38.4.1 nathanw s = splbio();
557 1.38.4.1 nathanw chan->chan_qfreeze -= count;
558 1.38.4.1 nathanw /*
559 1.38.4.1 nathanw * Don't let the freeze count go negative.
560 1.38.4.1 nathanw *
561 1.38.4.1 nathanw * Presumably the adapter driver could keep track of this,
562 1.38.4.1 nathanw * but it might just be easier to do this here so as to allow
563 1.38.4.1 nathanw * multiple callers, including those outside the adapter driver.
564 1.38.4.1 nathanw */
565 1.38.4.1 nathanw if (chan->chan_qfreeze < 0) {
566 1.38.4.1 nathanw chan->chan_qfreeze = 0;
567 1.38.4.1 nathanw }
568 1.38.4.1 nathanw splx(s);
569 1.38.4.1 nathanw /*
570 1.38.4.1 nathanw * Kick the channel's queue here. Note, we may be running in
571 1.38.4.1 nathanw * interrupt context (softclock or HBA's interrupt), so the adapter
572 1.38.4.1 nathanw * driver had better not sleep.
573 1.38.4.1 nathanw */
574 1.38.4.1 nathanw if (chan->chan_qfreeze == 0)
575 1.38.4.1 nathanw scsipi_run_queue(chan);
576 1.38.4.1 nathanw }
577 1.38.4.1 nathanw
578 1.38.4.1 nathanw /*
579 1.38.4.1 nathanw * scsipi_channel_timed_thaw:
580 1.38.4.1 nathanw *
581 1.38.4.1 nathanw * Thaw a channel after some time has expired. This will also
582 1.38.4.1 nathanw * run the channel's queue if the freeze count has reached 0.
583 1.38.4.1 nathanw */
584 1.38.4.1 nathanw void
585 1.38.4.1 nathanw scsipi_channel_timed_thaw(arg)
586 1.38.4.1 nathanw void *arg;
587 1.38.4.1 nathanw {
588 1.38.4.1 nathanw struct scsipi_channel *chan = arg;
589 1.38.4.1 nathanw
590 1.38.4.1 nathanw scsipi_channel_thaw(chan, 1);
591 1.38.4.1 nathanw }
592 1.38.4.1 nathanw
593 1.38.4.1 nathanw /*
594 1.38.4.1 nathanw * scsipi_periph_freeze:
595 1.38.4.1 nathanw *
596 1.38.4.1 nathanw * Freeze a device's xfer queue.
597 1.38.4.1 nathanw */
598 1.38.4.1 nathanw void
599 1.38.4.1 nathanw scsipi_periph_freeze(periph, count)
600 1.38.4.1 nathanw struct scsipi_periph *periph;
601 1.38.4.1 nathanw int count;
602 1.38.4.1 nathanw {
603 1.38.4.1 nathanw int s;
604 1.38.4.1 nathanw
605 1.38.4.1 nathanw s = splbio();
606 1.38.4.1 nathanw periph->periph_qfreeze += count;
607 1.38.4.1 nathanw splx(s);
608 1.38.4.1 nathanw }
609 1.38.4.1 nathanw
610 1.38.4.1 nathanw /*
611 1.38.4.1 nathanw * scsipi_periph_thaw:
612 1.38.4.1 nathanw *
613 1.38.4.1 nathanw * Thaw a device's xfer queue.
614 1.38.4.1 nathanw */
615 1.38.4.1 nathanw void
616 1.38.4.1 nathanw scsipi_periph_thaw(periph, count)
617 1.38.4.1 nathanw struct scsipi_periph *periph;
618 1.38.4.1 nathanw int count;
619 1.38.4.1 nathanw {
620 1.38.4.1 nathanw int s;
621 1.38.4.1 nathanw
622 1.38.4.1 nathanw s = splbio();
623 1.38.4.1 nathanw periph->periph_qfreeze -= count;
624 1.38.4.1 nathanw if (periph->periph_qfreeze == 0 &&
625 1.38.4.1 nathanw (periph->periph_flags & PERIPH_WAITING) != 0)
626 1.38.4.1 nathanw wakeup(periph);
627 1.38.4.1 nathanw splx(s);
628 1.38.4.1 nathanw }
629 1.38.4.1 nathanw
630 1.38.4.1 nathanw /*
631 1.38.4.1 nathanw * scsipi_periph_timed_thaw:
632 1.38.4.1 nathanw *
633 1.38.4.1 nathanw * Thaw a device after some time has expired.
634 1.38.4.1 nathanw */
635 1.38.4.1 nathanw void
636 1.38.4.1 nathanw scsipi_periph_timed_thaw(arg)
637 1.38.4.1 nathanw void *arg;
638 1.38.4.1 nathanw {
639 1.38.4.1 nathanw struct scsipi_periph *periph = arg;
640 1.38.4.1 nathanw
641 1.38.4.1 nathanw callout_stop(&periph->periph_callout);
642 1.38.4.1 nathanw scsipi_periph_thaw(periph, 1);
643 1.38.4.1 nathanw
644 1.38.4.1 nathanw /*
645 1.38.4.1 nathanw * Kick the channel's queue here. Note, we're running in
646 1.38.4.1 nathanw * interrupt context (softclock), so the adapter driver
647 1.38.4.1 nathanw * had better not sleep.
648 1.38.4.1 nathanw */
649 1.38.4.1 nathanw scsipi_run_queue(periph->periph_channel);
650 1.38.4.1 nathanw }
651 1.38.4.1 nathanw
652 1.38.4.1 nathanw /*
653 1.38.4.1 nathanw * scsipi_wait_drain:
654 1.38.4.1 nathanw *
655 1.38.4.1 nathanw * Wait for a periph's pending xfers to drain.
656 1.15 thorpej */
657 1.15 thorpej void
658 1.38.4.1 nathanw scsipi_wait_drain(periph)
659 1.38.4.1 nathanw struct scsipi_periph *periph;
660 1.15 thorpej {
661 1.15 thorpej int s;
662 1.15 thorpej
663 1.15 thorpej s = splbio();
664 1.38.4.1 nathanw while (periph->periph_active != 0) {
665 1.38.4.1 nathanw periph->periph_flags |= PERIPH_WAITDRAIN;
666 1.38.4.1 nathanw (void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0);
667 1.15 thorpej }
668 1.15 thorpej splx(s);
669 1.23 thorpej }
670 1.23 thorpej
671 1.23 thorpej /*
672 1.38.4.1 nathanw * scsipi_kill_pending:
673 1.23 thorpej *
674 1.38.4.1 nathanw * Kill off all pending xfers for a periph.
675 1.38.4.1 nathanw *
676 1.38.4.1 nathanw * NOTE: Must be called at splbio().
677 1.23 thorpej */
678 1.23 thorpej void
679 1.38.4.1 nathanw scsipi_kill_pending(periph)
680 1.38.4.1 nathanw struct scsipi_periph *periph;
681 1.23 thorpej {
682 1.23 thorpej
683 1.38.4.1 nathanw (*periph->periph_channel->chan_bustype->bustype_kill_pending)(periph);
684 1.38.4.1 nathanw #ifdef DIAGNOSTIC
685 1.38.4.1 nathanw if (TAILQ_FIRST(&periph->periph_xferq) != NULL)
686 1.38.4.1 nathanw panic("scsipi_kill_pending");
687 1.38.4.1 nathanw #endif
688 1.38.4.1 nathanw scsipi_wait_drain(periph);
689 1.2 bouyer }
690 1.2 bouyer
691 1.2 bouyer /*
692 1.38.4.1 nathanw * scsipi_interpret_sense:
693 1.38.4.1 nathanw *
694 1.38.4.1 nathanw * Look at the returned sense and act on the error, determining
695 1.38.4.1 nathanw * the unix error number to pass back. (0 = report no error)
696 1.13 bouyer *
697 1.38.4.1 nathanw * NOTE: If we return ERESTART, we are expected to haved
698 1.38.4.1 nathanw * thawed the device!
699 1.38.4.1 nathanw *
700 1.38.4.1 nathanw * THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
701 1.13 bouyer */
702 1.13 bouyer int
703 1.13 bouyer scsipi_interpret_sense(xs)
704 1.13 bouyer struct scsipi_xfer *xs;
705 1.13 bouyer {
706 1.13 bouyer struct scsipi_sense_data *sense;
707 1.38.4.1 nathanw struct scsipi_periph *periph = xs->xs_periph;
708 1.13 bouyer u_int8_t key;
709 1.13 bouyer u_int32_t info;
710 1.13 bouyer int error;
711 1.13 bouyer #ifndef SCSIVERBOSE
712 1.13 bouyer static char *error_mes[] = {
713 1.13 bouyer "soft error (corrected)",
714 1.13 bouyer "not ready", "medium error",
715 1.13 bouyer "non-media hardware failure", "illegal request",
716 1.13 bouyer "unit attention", "readonly device",
717 1.13 bouyer "no data found", "vendor unique",
718 1.13 bouyer "copy aborted", "command aborted",
719 1.13 bouyer "search returned equal", "volume overflow",
720 1.13 bouyer "verify miscompare", "unknown error key"
721 1.13 bouyer };
722 1.13 bouyer #endif
723 1.13 bouyer
724 1.13 bouyer sense = &xs->sense.scsi_sense;
725 1.38.4.1 nathanw #ifdef SCSIPI_DEBUG
726 1.38.4.1 nathanw if (periph->periph_flags & SCSIPI_DB1) {
727 1.13 bouyer int count;
728 1.38.4.1 nathanw scsipi_printaddr(periph);
729 1.38.4.1 nathanw printf(" sense debug information:\n");
730 1.38.4.1 nathanw printf("\tcode 0x%x valid 0x%x\n",
731 1.13 bouyer sense->error_code & SSD_ERRCODE,
732 1.13 bouyer sense->error_code & SSD_ERRCODE_VALID ? 1 : 0);
733 1.38.4.1 nathanw printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
734 1.13 bouyer sense->segment,
735 1.13 bouyer sense->flags & SSD_KEY,
736 1.13 bouyer sense->flags & SSD_ILI ? 1 : 0,
737 1.13 bouyer sense->flags & SSD_EOM ? 1 : 0,
738 1.13 bouyer sense->flags & SSD_FILEMARK ? 1 : 0);
739 1.38.4.1 nathanw printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d "
740 1.38.4.1 nathanw "extra bytes\n",
741 1.13 bouyer sense->info[0],
742 1.13 bouyer sense->info[1],
743 1.13 bouyer sense->info[2],
744 1.13 bouyer sense->info[3],
745 1.13 bouyer sense->extra_len);
746 1.38.4.1 nathanw printf("\textra: ");
747 1.13 bouyer for (count = 0; count < ADD_BYTES_LIM(sense); count++)
748 1.13 bouyer printf("0x%x ", sense->cmd_spec_info[count]);
749 1.13 bouyer printf("\n");
750 1.13 bouyer }
751 1.38.4.1 nathanw #endif
752 1.38.4.1 nathanw
753 1.13 bouyer /*
754 1.38.4.1 nathanw * If the periph has it's own error handler, call it first.
755 1.13 bouyer * If it returns a legit error value, return that, otherwise
756 1.13 bouyer * it wants us to continue with normal error processing.
757 1.13 bouyer */
758 1.38.4.1 nathanw if (periph->periph_switch->psw_error != NULL) {
759 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB2,
760 1.13 bouyer ("calling private err_handler()\n"));
761 1.38.4.1 nathanw error = (*periph->periph_switch->psw_error)(xs);
762 1.38.4.1 nathanw if (error != EJUSTRETURN)
763 1.38.4.1 nathanw return (error);
764 1.13 bouyer }
765 1.13 bouyer /* otherwise use the default */
766 1.13 bouyer switch (sense->error_code & SSD_ERRCODE) {
767 1.13 bouyer /*
768 1.13 bouyer * If it's code 70, use the extended stuff and
769 1.13 bouyer * interpret the key
770 1.13 bouyer */
771 1.13 bouyer case 0x71: /* delayed error */
772 1.38.4.1 nathanw scsipi_printaddr(periph);
773 1.13 bouyer key = sense->flags & SSD_KEY;
774 1.13 bouyer printf(" DEFERRED ERROR, key = 0x%x\n", key);
775 1.13 bouyer /* FALLTHROUGH */
776 1.13 bouyer case 0x70:
777 1.13 bouyer if ((sense->error_code & SSD_ERRCODE_VALID) != 0)
778 1.13 bouyer info = _4btol(sense->info);
779 1.13 bouyer else
780 1.13 bouyer info = 0;
781 1.13 bouyer key = sense->flags & SSD_KEY;
782 1.13 bouyer
783 1.13 bouyer switch (key) {
784 1.13 bouyer case SKEY_NO_SENSE:
785 1.13 bouyer case SKEY_RECOVERED_ERROR:
786 1.13 bouyer if (xs->resid == xs->datalen && xs->datalen) {
787 1.13 bouyer /*
788 1.13 bouyer * Why is this here?
789 1.13 bouyer */
790 1.13 bouyer xs->resid = 0; /* not short read */
791 1.13 bouyer }
792 1.13 bouyer case SKEY_EQUAL:
793 1.13 bouyer error = 0;
794 1.13 bouyer break;
795 1.13 bouyer case SKEY_NOT_READY:
796 1.38.4.1 nathanw if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
797 1.38.4.1 nathanw periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
798 1.24 thorpej if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
799 1.13 bouyer return (0);
800 1.38.4.2 nathanw if (sense->add_sense_code == 0x3A) {
801 1.19 bouyer error = ENODEV; /* Medium not present */
802 1.38.4.2 nathanw if (xs->xs_control & XS_CTL_SILENT_NODEV)
803 1.38.4.2 nathanw return (error);
804 1.38.4.2 nathanw } else
805 1.19 bouyer error = EIO;
806 1.24 thorpej if ((xs->xs_control & XS_CTL_SILENT) != 0)
807 1.19 bouyer return (error);
808 1.13 bouyer break;
809 1.13 bouyer case SKEY_ILLEGAL_REQUEST:
810 1.24 thorpej if ((xs->xs_control &
811 1.24 thorpej XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
812 1.13 bouyer return (0);
813 1.24 thorpej /*
814 1.24 thorpej * Handle the case where a device reports
815 1.24 thorpej * Logical Unit Not Supported during discovery.
816 1.24 thorpej */
817 1.24 thorpej if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
818 1.24 thorpej sense->add_sense_code == 0x25 &&
819 1.24 thorpej sense->add_sense_code_qual == 0x00)
820 1.24 thorpej return (EINVAL);
821 1.24 thorpej if ((xs->xs_control & XS_CTL_SILENT) != 0)
822 1.13 bouyer return (EIO);
823 1.13 bouyer error = EINVAL;
824 1.13 bouyer break;
825 1.13 bouyer case SKEY_UNIT_ATTENTION:
826 1.20 bouyer if (sense->add_sense_code == 0x29 &&
827 1.38.4.1 nathanw sense->add_sense_code_qual == 0x00) {
828 1.38.4.1 nathanw /* device or bus reset */
829 1.38.4.1 nathanw return (ERESTART);
830 1.38.4.1 nathanw }
831 1.38.4.1 nathanw if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
832 1.38.4.1 nathanw periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
833 1.24 thorpej if ((xs->xs_control &
834 1.24 thorpej XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
835 1.13 bouyer /* XXX Should reupload any transient state. */
836 1.38.4.1 nathanw (periph->periph_flags &
837 1.38.4.1 nathanw PERIPH_REMOVABLE) == 0) {
838 1.13 bouyer return (ERESTART);
839 1.38.4.1 nathanw }
840 1.24 thorpej if ((xs->xs_control & XS_CTL_SILENT) != 0)
841 1.13 bouyer return (EIO);
842 1.13 bouyer error = EIO;
843 1.13 bouyer break;
844 1.13 bouyer case SKEY_WRITE_PROTECT:
845 1.13 bouyer error = EROFS;
846 1.13 bouyer break;
847 1.13 bouyer case SKEY_BLANK_CHECK:
848 1.13 bouyer error = 0;
849 1.13 bouyer break;
850 1.13 bouyer case SKEY_ABORTED_COMMAND:
851 1.13 bouyer error = ERESTART;
852 1.13 bouyer break;
853 1.13 bouyer case SKEY_VOLUME_OVERFLOW:
854 1.13 bouyer error = ENOSPC;
855 1.13 bouyer break;
856 1.13 bouyer default:
857 1.13 bouyer error = EIO;
858 1.13 bouyer break;
859 1.13 bouyer }
860 1.13 bouyer
861 1.13 bouyer #ifdef SCSIVERBOSE
862 1.32 augustss if (key && (xs->xs_control & XS_CTL_SILENT) == 0)
863 1.13 bouyer scsipi_print_sense(xs, 0);
864 1.13 bouyer #else
865 1.13 bouyer if (key) {
866 1.38.4.1 nathanw scsipi_printaddr(periph);
867 1.13 bouyer printf("%s", error_mes[key - 1]);
868 1.13 bouyer if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
869 1.13 bouyer switch (key) {
870 1.13 bouyer case SKEY_NOT_READY:
871 1.13 bouyer case SKEY_ILLEGAL_REQUEST:
872 1.13 bouyer case SKEY_UNIT_ATTENTION:
873 1.13 bouyer case SKEY_WRITE_PROTECT:
874 1.13 bouyer break;
875 1.13 bouyer case SKEY_BLANK_CHECK:
876 1.13 bouyer printf(", requested size: %d (decimal)",
877 1.13 bouyer info);
878 1.13 bouyer break;
879 1.13 bouyer case SKEY_ABORTED_COMMAND:
880 1.38.4.1 nathanw if (xs->xs_retries)
881 1.13 bouyer printf(", retrying");
882 1.13 bouyer printf(", cmd 0x%x, info 0x%x",
883 1.13 bouyer xs->cmd->opcode, info);
884 1.13 bouyer break;
885 1.13 bouyer default:
886 1.13 bouyer printf(", info = %d (decimal)", info);
887 1.13 bouyer }
888 1.13 bouyer }
889 1.13 bouyer if (sense->extra_len != 0) {
890 1.13 bouyer int n;
891 1.13 bouyer printf(", data =");
892 1.13 bouyer for (n = 0; n < sense->extra_len; n++)
893 1.13 bouyer printf(" %02x",
894 1.13 bouyer sense->cmd_spec_info[n]);
895 1.13 bouyer }
896 1.13 bouyer printf("\n");
897 1.13 bouyer }
898 1.13 bouyer #endif
899 1.13 bouyer return (error);
900 1.13 bouyer
901 1.13 bouyer /*
902 1.13 bouyer * Not code 70, just report it
903 1.13 bouyer */
904 1.13 bouyer default:
905 1.38.4.1 nathanw #if defined(SCSIDEBUG) || defined(DEBUG)
906 1.28 mjacob {
907 1.28 mjacob static char *uc = "undecodable sense error";
908 1.28 mjacob int i;
909 1.28 mjacob u_int8_t *cptr = (u_int8_t *) sense;
910 1.38.4.1 nathanw scsipi_printaddr(periph);
911 1.28 mjacob if (xs->cmd == &xs->cmdstore) {
912 1.28 mjacob printf("%s for opcode 0x%x, data=",
913 1.28 mjacob uc, xs->cmdstore.opcode);
914 1.28 mjacob } else {
915 1.28 mjacob printf("%s, data=", uc);
916 1.28 mjacob }
917 1.28 mjacob for (i = 0; i < sizeof (sense); i++)
918 1.28 mjacob printf(" 0x%02x", *(cptr++) & 0xff);
919 1.28 mjacob printf("\n");
920 1.28 mjacob }
921 1.28 mjacob #else
922 1.38.4.1 nathanw
923 1.38.4.1 nathanw scsipi_printaddr(periph);
924 1.17 mjacob printf("Sense Error Code 0x%x",
925 1.17 mjacob sense->error_code & SSD_ERRCODE);
926 1.13 bouyer if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
927 1.13 bouyer struct scsipi_sense_data_unextended *usense =
928 1.13 bouyer (struct scsipi_sense_data_unextended *)sense;
929 1.13 bouyer printf(" at block no. %d (decimal)",
930 1.13 bouyer _3btol(usense->block));
931 1.13 bouyer }
932 1.13 bouyer printf("\n");
933 1.28 mjacob #endif
934 1.13 bouyer return (EIO);
935 1.13 bouyer }
936 1.13 bouyer }
937 1.13 bouyer
938 1.13 bouyer /*
939 1.38.4.1 nathanw * scsipi_size:
940 1.38.4.1 nathanw *
941 1.38.4.1 nathanw * Find out from the device what its capacity is.
942 1.2 bouyer */
943 1.2 bouyer u_long
944 1.38.4.1 nathanw scsipi_size(periph, flags)
945 1.38.4.1 nathanw struct scsipi_periph *periph;
946 1.2 bouyer int flags;
947 1.2 bouyer {
948 1.2 bouyer struct scsipi_read_cap_data rdcap;
949 1.2 bouyer struct scsipi_read_capacity scsipi_cmd;
950 1.2 bouyer
951 1.38.4.2 nathanw memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
952 1.2 bouyer scsipi_cmd.opcode = READ_CAPACITY;
953 1.2 bouyer
954 1.2 bouyer /*
955 1.2 bouyer * If the command works, interpret the result as a 4 byte
956 1.2 bouyer * number of blocks
957 1.2 bouyer */
958 1.38.4.1 nathanw if (scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
959 1.3 enami sizeof(scsipi_cmd), (u_char *)&rdcap, sizeof(rdcap),
960 1.38 enami SCSIPIRETRIES, 20000, NULL,
961 1.38 enami flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK) != 0) {
962 1.38.4.1 nathanw scsipi_printaddr(periph);
963 1.2 bouyer printf("could not get size\n");
964 1.3 enami return (0);
965 1.2 bouyer }
966 1.2 bouyer
967 1.3 enami return (_4btol(rdcap.addr) + 1);
968 1.2 bouyer }
969 1.2 bouyer
970 1.2 bouyer /*
971 1.38.4.1 nathanw * scsipi_test_unit_ready:
972 1.38.4.1 nathanw *
973 1.38.4.1 nathanw * Issue a `test unit ready' request.
974 1.2 bouyer */
975 1.3 enami int
976 1.38.4.1 nathanw scsipi_test_unit_ready(periph, flags)
977 1.38.4.1 nathanw struct scsipi_periph *periph;
978 1.2 bouyer int flags;
979 1.2 bouyer {
980 1.2 bouyer struct scsipi_test_unit_ready scsipi_cmd;
981 1.2 bouyer
982 1.2 bouyer /* some ATAPI drives don't support TEST_UNIT_READY. Sigh */
983 1.38.4.1 nathanw if (periph->periph_quirks & PQUIRK_NOTUR)
984 1.3 enami return (0);
985 1.2 bouyer
986 1.38.4.2 nathanw memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
987 1.2 bouyer scsipi_cmd.opcode = TEST_UNIT_READY;
988 1.2 bouyer
989 1.38.4.1 nathanw return (scsipi_command(periph,
990 1.3 enami (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd),
991 1.29 bouyer 0, 0, SCSIPIRETRIES, 10000, NULL, flags));
992 1.2 bouyer }
993 1.2 bouyer
994 1.2 bouyer /*
995 1.38.4.1 nathanw * scsipi_inquire:
996 1.38.4.1 nathanw *
997 1.38.4.1 nathanw * Ask the device about itself.
998 1.2 bouyer */
999 1.3 enami int
1000 1.38.4.1 nathanw scsipi_inquire(periph, inqbuf, flags)
1001 1.38.4.1 nathanw struct scsipi_periph *periph;
1002 1.2 bouyer struct scsipi_inquiry_data *inqbuf;
1003 1.2 bouyer int flags;
1004 1.2 bouyer {
1005 1.2 bouyer struct scsipi_inquiry scsipi_cmd;
1006 1.2 bouyer
1007 1.38.4.2 nathanw memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1008 1.2 bouyer scsipi_cmd.opcode = INQUIRY;
1009 1.2 bouyer scsipi_cmd.length = sizeof(struct scsipi_inquiry_data);
1010 1.2 bouyer
1011 1.38.4.1 nathanw return (scsipi_command(periph,
1012 1.3 enami (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1013 1.3 enami (u_char *) inqbuf, sizeof(struct scsipi_inquiry_data),
1014 1.29 bouyer SCSIPIRETRIES, 10000, NULL, XS_CTL_DATA_IN | flags));
1015 1.2 bouyer }
1016 1.2 bouyer
1017 1.2 bouyer /*
1018 1.38.4.1 nathanw * scsipi_prevent:
1019 1.38.4.1 nathanw *
1020 1.38.4.1 nathanw * Prevent or allow the user to remove the media
1021 1.2 bouyer */
1022 1.3 enami int
1023 1.38.4.1 nathanw scsipi_prevent(periph, type, flags)
1024 1.38.4.1 nathanw struct scsipi_periph *periph;
1025 1.2 bouyer int type, flags;
1026 1.2 bouyer {
1027 1.2 bouyer struct scsipi_prevent scsipi_cmd;
1028 1.2 bouyer
1029 1.38.4.1 nathanw if (periph->periph_quirks & PQUIRK_NODOORLOCK)
1030 1.3 enami return (0);
1031 1.2 bouyer
1032 1.38.4.2 nathanw memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1033 1.2 bouyer scsipi_cmd.opcode = PREVENT_ALLOW;
1034 1.2 bouyer scsipi_cmd.how = type;
1035 1.38.4.1 nathanw
1036 1.38.4.1 nathanw return (scsipi_command(periph,
1037 1.3 enami (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1038 1.29 bouyer 0, 0, SCSIPIRETRIES, 5000, NULL, flags));
1039 1.2 bouyer }
1040 1.2 bouyer
1041 1.2 bouyer /*
1042 1.38.4.1 nathanw * scsipi_start:
1043 1.38.4.1 nathanw *
1044 1.38.4.1 nathanw * Send a START UNIT.
1045 1.2 bouyer */
1046 1.3 enami int
1047 1.38.4.1 nathanw scsipi_start(periph, type, flags)
1048 1.38.4.1 nathanw struct scsipi_periph *periph;
1049 1.2 bouyer int type, flags;
1050 1.2 bouyer {
1051 1.2 bouyer struct scsipi_start_stop scsipi_cmd;
1052 1.18 bouyer
1053 1.38.4.1 nathanw if (periph->periph_quirks & PQUIRK_NOSTARTUNIT)
1054 1.18 bouyer return 0;
1055 1.2 bouyer
1056 1.38.4.2 nathanw memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1057 1.2 bouyer scsipi_cmd.opcode = START_STOP;
1058 1.2 bouyer scsipi_cmd.byte2 = 0x00;
1059 1.2 bouyer scsipi_cmd.how = type;
1060 1.38.4.1 nathanw
1061 1.38.4.1 nathanw return (scsipi_command(periph,
1062 1.3 enami (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1063 1.29 bouyer 0, 0, SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000,
1064 1.29 bouyer NULL, flags));
1065 1.2 bouyer }
1066 1.2 bouyer
1067 1.2 bouyer /*
1068 1.38.4.1 nathanw * scsipi_mode_sense, scsipi_mode_sense_big:
1069 1.38.4.1 nathanw * get a sense page from a device
1070 1.2 bouyer */
1071 1.2 bouyer
1072 1.38.4.1 nathanw int
1073 1.38.4.1 nathanw scsipi_mode_sense(periph, byte2, page, data, len, flags, retries, timeout)
1074 1.38.4.1 nathanw struct scsipi_periph *periph;
1075 1.38.4.1 nathanw int byte2, page, len, flags, retries, timeout;
1076 1.38.4.1 nathanw struct scsipi_mode_header *data;
1077 1.38.4.1 nathanw {
1078 1.38.4.1 nathanw struct scsipi_mode_sense scsipi_cmd;
1079 1.38.4.1 nathanw int error;
1080 1.38.4.1 nathanw
1081 1.38.4.2 nathanw memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1082 1.38.4.1 nathanw scsipi_cmd.opcode = MODE_SENSE;
1083 1.38.4.1 nathanw scsipi_cmd.byte2 = byte2;
1084 1.38.4.1 nathanw scsipi_cmd.page = page;
1085 1.38.4.1 nathanw if (scsipi_periph_bustype(periph) == SCSIPI_BUSTYPE_ATAPI)
1086 1.38.4.1 nathanw _lto2b(len, scsipi_cmd.u_len.atapi.length);
1087 1.38.4.1 nathanw else
1088 1.38.4.1 nathanw scsipi_cmd.u_len.scsi.length = len & 0xff;
1089 1.38.4.1 nathanw error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1090 1.38.4.1 nathanw sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1091 1.38.4.1 nathanw flags | XS_CTL_DATA_IN);
1092 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB2,
1093 1.38.4.1 nathanw ("scsipi_mode_sense: error=%d\n", error));
1094 1.38.4.1 nathanw return (error);
1095 1.38.4.1 nathanw }
1096 1.38.4.1 nathanw
1097 1.38.4.1 nathanw int
1098 1.38.4.1 nathanw scsipi_mode_sense_big(periph, byte2, page, data, len, flags, retries, timeout)
1099 1.38.4.1 nathanw struct scsipi_periph *periph;
1100 1.38.4.1 nathanw int byte2, page, len, flags, retries, timeout;
1101 1.38.4.1 nathanw struct scsipi_mode_header_big *data;
1102 1.38.4.1 nathanw {
1103 1.38.4.1 nathanw struct scsipi_mode_sense_big scsipi_cmd;
1104 1.38.4.1 nathanw int error;
1105 1.38.4.1 nathanw
1106 1.38.4.2 nathanw memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1107 1.38.4.1 nathanw scsipi_cmd.opcode = MODE_SENSE_BIG;
1108 1.38.4.1 nathanw scsipi_cmd.byte2 = byte2;
1109 1.38.4.1 nathanw scsipi_cmd.page = page;
1110 1.38.4.1 nathanw _lto2b(len, scsipi_cmd.length);
1111 1.38.4.1 nathanw error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1112 1.38.4.1 nathanw sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1113 1.38.4.1 nathanw flags | XS_CTL_DATA_IN);
1114 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB2,
1115 1.38.4.1 nathanw ("scsipi_mode_sense_big: error=%d\n", error));
1116 1.38.4.1 nathanw return (error);
1117 1.38.4.1 nathanw }
1118 1.38.4.1 nathanw
1119 1.38.4.1 nathanw int
1120 1.38.4.1 nathanw scsipi_mode_select(periph, byte2, data, len, flags, retries, timeout)
1121 1.38.4.1 nathanw struct scsipi_periph *periph;
1122 1.38.4.1 nathanw int byte2, len, flags, retries, timeout;
1123 1.38.4.1 nathanw struct scsipi_mode_header *data;
1124 1.38.4.1 nathanw {
1125 1.38.4.1 nathanw struct scsipi_mode_select scsipi_cmd;
1126 1.38.4.1 nathanw int error;
1127 1.38.4.1 nathanw
1128 1.38.4.2 nathanw memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1129 1.38.4.1 nathanw scsipi_cmd.opcode = MODE_SELECT;
1130 1.38.4.1 nathanw scsipi_cmd.byte2 = byte2;
1131 1.38.4.1 nathanw if (scsipi_periph_bustype(periph) == SCSIPI_BUSTYPE_ATAPI)
1132 1.38.4.1 nathanw _lto2b(len, scsipi_cmd.u_len.atapi.length);
1133 1.38.4.1 nathanw else
1134 1.38.4.1 nathanw scsipi_cmd.u_len.scsi.length = len & 0xff;
1135 1.38.4.1 nathanw error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1136 1.38.4.1 nathanw sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1137 1.38.4.1 nathanw flags | XS_CTL_DATA_OUT);
1138 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB2,
1139 1.38.4.1 nathanw ("scsipi_mode_select: error=%d\n", error));
1140 1.38.4.1 nathanw return (error);
1141 1.38.4.1 nathanw }
1142 1.38.4.1 nathanw
1143 1.38.4.1 nathanw int
1144 1.38.4.1 nathanw scsipi_mode_select_big(periph, byte2, data, len, flags, retries, timeout)
1145 1.38.4.1 nathanw struct scsipi_periph *periph;
1146 1.38.4.1 nathanw int byte2, len, flags, retries, timeout;
1147 1.38.4.1 nathanw struct scsipi_mode_header_big *data;
1148 1.38.4.1 nathanw {
1149 1.38.4.1 nathanw struct scsipi_mode_select_big scsipi_cmd;
1150 1.38.4.1 nathanw int error;
1151 1.38.4.1 nathanw
1152 1.38.4.2 nathanw memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1153 1.38.4.1 nathanw scsipi_cmd.opcode = MODE_SELECT_BIG;
1154 1.38.4.1 nathanw scsipi_cmd.byte2 = byte2;
1155 1.38.4.1 nathanw _lto2b(len, scsipi_cmd.length);
1156 1.38.4.1 nathanw error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1157 1.38.4.1 nathanw sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1158 1.38.4.1 nathanw flags | XS_CTL_DATA_OUT);
1159 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB2,
1160 1.38.4.1 nathanw ("scsipi_mode_select: error=%d\n", error));
1161 1.38.4.1 nathanw return (error);
1162 1.38.4.1 nathanw }
1163 1.38.4.1 nathanw
1164 1.38.4.1 nathanw /*
1165 1.38.4.1 nathanw * scsipi_done:
1166 1.38.4.1 nathanw *
1167 1.38.4.1 nathanw * This routine is called by an adapter's interrupt handler when
1168 1.38.4.1 nathanw * an xfer is completed.
1169 1.38.4.1 nathanw */
1170 1.38.4.1 nathanw void
1171 1.38.4.1 nathanw scsipi_done(xs)
1172 1.38.4.1 nathanw struct scsipi_xfer *xs;
1173 1.38.4.1 nathanw {
1174 1.38.4.1 nathanw struct scsipi_periph *periph = xs->xs_periph;
1175 1.38.4.1 nathanw struct scsipi_channel *chan = periph->periph_channel;
1176 1.38.4.1 nathanw int s, freezecnt;
1177 1.38.4.1 nathanw
1178 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n"));
1179 1.38.4.1 nathanw #ifdef SCSIPI_DEBUG
1180 1.38.4.1 nathanw if (periph->periph_dbflags & SCSIPI_DB1)
1181 1.2 bouyer show_scsipi_cmd(xs);
1182 1.38.4.1 nathanw #endif
1183 1.2 bouyer
1184 1.38.4.1 nathanw s = splbio();
1185 1.2 bouyer /*
1186 1.38.4.1 nathanw * The resource this command was using is now free.
1187 1.3 enami */
1188 1.38.4.1 nathanw scsipi_put_resource(chan);
1189 1.38.4.1 nathanw xs->xs_periph->periph_sent--;
1190 1.2 bouyer
1191 1.38.4.1 nathanw /*
1192 1.38.4.1 nathanw * If the command was tagged, free the tag.
1193 1.38.4.1 nathanw */
1194 1.38.4.1 nathanw if (XS_CTL_TAGTYPE(xs) != 0)
1195 1.38.4.1 nathanw scsipi_put_tag(xs);
1196 1.38.4.1 nathanw else
1197 1.38.4.1 nathanw periph->periph_flags &= ~PERIPH_UNTAG;
1198 1.2 bouyer
1199 1.38.4.1 nathanw /* Mark the command as `done'. */
1200 1.38.4.1 nathanw xs->xs_status |= XS_STS_DONE;
1201 1.38.4.1 nathanw
1202 1.38.4.1 nathanw #ifdef DIAGNOSTIC
1203 1.38.4.1 nathanw if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
1204 1.38.4.1 nathanw (XS_CTL_ASYNC|XS_CTL_POLL))
1205 1.38.4.1 nathanw panic("scsipi_done: ASYNC and POLL");
1206 1.38.4.1 nathanw #endif
1207 1.2 bouyer
1208 1.2 bouyer /*
1209 1.38.4.1 nathanw * If the xfer had an error of any sort, freeze the
1210 1.38.4.1 nathanw * periph's queue. Freeze it again if we were requested
1211 1.38.4.1 nathanw * to do so in the xfer.
1212 1.2 bouyer */
1213 1.38.4.1 nathanw freezecnt = 0;
1214 1.38.4.1 nathanw if (xs->error != XS_NOERROR)
1215 1.38.4.1 nathanw freezecnt++;
1216 1.38.4.1 nathanw if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
1217 1.38.4.1 nathanw freezecnt++;
1218 1.38.4.1 nathanw if (freezecnt != 0)
1219 1.38.4.1 nathanw scsipi_periph_freeze(periph, freezecnt);
1220 1.2 bouyer
1221 1.38.4.1 nathanw /*
1222 1.38.4.1 nathanw * record the xfer with a pending sense, in case a SCSI reset is
1223 1.38.4.1 nathanw * received before the thread is waked up.
1224 1.38.4.1 nathanw */
1225 1.38.4.1 nathanw if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1226 1.38.4.1 nathanw periph->periph_flags |= PERIPH_SENSE;
1227 1.38.4.1 nathanw periph->periph_xscheck = xs;
1228 1.20 bouyer }
1229 1.2 bouyer
1230 1.38.4.1 nathanw /*
1231 1.38.4.1 nathanw * If this was an xfer that was not to complete asynchrnously,
1232 1.38.4.1 nathanw * let the requesting thread perform error checking/handling
1233 1.38.4.1 nathanw * in its context.
1234 1.38.4.1 nathanw */
1235 1.38.4.1 nathanw if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
1236 1.38.4.1 nathanw splx(s);
1237 1.2 bouyer /*
1238 1.38.4.1 nathanw * If it's a polling job, just return, to unwind the
1239 1.38.4.1 nathanw * call graph. We don't need to restart the queue,
1240 1.38.4.1 nathanw * because pollings jobs are treated specially, and
1241 1.38.4.1 nathanw * are really only used during crash dumps anyway
1242 1.38.4.1 nathanw * (XXX or during boot-time autconfiguration of
1243 1.38.4.1 nathanw * ATAPI devices).
1244 1.2 bouyer */
1245 1.38.4.1 nathanw if (xs->xs_control & XS_CTL_POLL)
1246 1.38.4.1 nathanw return;
1247 1.38.4.1 nathanw wakeup(xs);
1248 1.38.4.1 nathanw goto out;
1249 1.2 bouyer }
1250 1.38.4.1 nathanw
1251 1.9 scottr /*
1252 1.38.4.1 nathanw * Catch the extremely common case of I/O completing
1253 1.38.4.1 nathanw * without error; no use in taking a context switch
1254 1.38.4.1 nathanw * if we can handle it in interrupt context.
1255 1.9 scottr */
1256 1.38.4.1 nathanw if (xs->error == XS_NOERROR) {
1257 1.22 pk splx(s);
1258 1.38.4.1 nathanw (void) scsipi_complete(xs);
1259 1.38.4.1 nathanw goto out;
1260 1.22 pk }
1261 1.2 bouyer
1262 1.2 bouyer /*
1263 1.38.4.1 nathanw * There is an error on this xfer. Put it on the channel's
1264 1.38.4.1 nathanw * completion queue, and wake up the completion thread.
1265 1.38.4.1 nathanw */
1266 1.38.4.1 nathanw TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
1267 1.38.4.1 nathanw splx(s);
1268 1.38.4.1 nathanw wakeup(&chan->chan_complete);
1269 1.2 bouyer
1270 1.38.4.1 nathanw out:
1271 1.38.4.1 nathanw /*
1272 1.38.4.1 nathanw * If there are more xfers on the channel's queue, attempt to
1273 1.38.4.1 nathanw * run them.
1274 1.38.4.1 nathanw */
1275 1.38.4.1 nathanw scsipi_run_queue(chan);
1276 1.2 bouyer }
1277 1.2 bouyer
1278 1.38.4.1 nathanw /*
1279 1.38.4.1 nathanw * scsipi_complete:
1280 1.38.4.1 nathanw *
1281 1.38.4.1 nathanw * Completion of a scsipi_xfer. This is the guts of scsipi_done().
1282 1.38.4.1 nathanw *
1283 1.38.4.1 nathanw * NOTE: This routine MUST be called with valid thread context
1284 1.38.4.1 nathanw * except for the case where the following two conditions are
1285 1.38.4.1 nathanw * true:
1286 1.38.4.1 nathanw *
1287 1.38.4.1 nathanw * xs->error == XS_NOERROR
1288 1.38.4.1 nathanw * XS_CTL_ASYNC is set in xs->xs_control
1289 1.38.4.1 nathanw *
1290 1.38.4.1 nathanw * The semantics of this routine can be tricky, so here is an
1291 1.38.4.1 nathanw * explanation:
1292 1.38.4.1 nathanw *
1293 1.38.4.1 nathanw * 0 Xfer completed successfully.
1294 1.38.4.1 nathanw *
1295 1.38.4.1 nathanw * ERESTART Xfer had an error, but was restarted.
1296 1.38.4.1 nathanw *
1297 1.38.4.1 nathanw * anything else Xfer had an error, return value is Unix
1298 1.38.4.1 nathanw * errno.
1299 1.38.4.1 nathanw *
1300 1.38.4.1 nathanw * If the return value is anything but ERESTART:
1301 1.38.4.1 nathanw *
1302 1.38.4.1 nathanw * - If XS_CTL_ASYNC is set, `xs' has been freed back to
1303 1.38.4.1 nathanw * the pool.
1304 1.38.4.1 nathanw * - If there is a buf associated with the xfer,
1305 1.38.4.1 nathanw * it has been biodone()'d.
1306 1.38.4.1 nathanw */
1307 1.3 enami int
1308 1.38.4.1 nathanw scsipi_complete(xs)
1309 1.2 bouyer struct scsipi_xfer *xs;
1310 1.2 bouyer {
1311 1.38.4.1 nathanw struct scsipi_periph *periph = xs->xs_periph;
1312 1.38.4.1 nathanw struct scsipi_channel *chan = periph->periph_channel;
1313 1.38.4.1 nathanw struct buf *bp;
1314 1.38.4.1 nathanw int error, s;
1315 1.2 bouyer
1316 1.38.4.1 nathanw #ifdef DIAGNOSTIC
1317 1.38.4.1 nathanw if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
1318 1.38.4.1 nathanw panic("scsipi_complete: XS_CTL_ASYNC but no buf");
1319 1.38.4.1 nathanw #endif
1320 1.2 bouyer /*
1321 1.38.4.1 nathanw * If command terminated with a CHECK CONDITION, we need to issue a
1322 1.38.4.1 nathanw * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed
1323 1.38.4.1 nathanw * we'll have the real status.
1324 1.38.4.1 nathanw * Must be processed at splbio() to avoid missing a SCSI bus reset
1325 1.38.4.1 nathanw * for this command.
1326 1.38.4.1 nathanw */
1327 1.38.4.1 nathanw s = splbio();
1328 1.38.4.1 nathanw if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1329 1.38.4.1 nathanw /* request sense for a request sense ? */
1330 1.38.4.1 nathanw if (xs->xs_control & XS_CTL_REQSENSE) {
1331 1.38.4.1 nathanw scsipi_printaddr(periph);
1332 1.38.4.2 nathanw printf("request sense for a request sense ?\n");
1333 1.38.4.1 nathanw /* XXX maybe we should reset the device ? */
1334 1.38.4.1 nathanw /* we've been frozen because xs->error != XS_NOERROR */
1335 1.38.4.1 nathanw scsipi_periph_thaw(periph, 1);
1336 1.38.4.1 nathanw splx(s);
1337 1.38.4.2 nathanw if (xs->resid < xs->datalen) {
1338 1.38.4.2 nathanw printf("we read %d bytes of sense anyway:\n",
1339 1.38.4.2 nathanw xs->datalen - xs->resid);
1340 1.38.4.2 nathanw #ifdef SCSIVERBOSE
1341 1.38.4.2 nathanw scsipi_print_sense_data((void *)xs->data, 0);
1342 1.38.4.2 nathanw #endif
1343 1.38.4.2 nathanw }
1344 1.38.4.1 nathanw return EINVAL;
1345 1.38.4.1 nathanw }
1346 1.38.4.1 nathanw scsipi_request_sense(xs);
1347 1.38.4.1 nathanw }
1348 1.38.4.1 nathanw splx(s);
1349 1.38.4.2 nathanw
1350 1.38.4.1 nathanw /*
1351 1.38.4.1 nathanw * If it's a user level request, bypass all usual completion
1352 1.38.4.1 nathanw * processing, let the user work it out..
1353 1.2 bouyer */
1354 1.38.4.1 nathanw if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
1355 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n"));
1356 1.38.4.1 nathanw if (xs->error != XS_NOERROR)
1357 1.38.4.1 nathanw scsipi_periph_thaw(periph, 1);
1358 1.38.4.1 nathanw scsipi_user_done(xs);
1359 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n "));
1360 1.38.4.1 nathanw return 0;
1361 1.38.4.1 nathanw }
1362 1.38.4.1 nathanw
1363 1.2 bouyer switch (xs->error) {
1364 1.38.4.1 nathanw case XS_NOERROR:
1365 1.2 bouyer error = 0;
1366 1.2 bouyer break;
1367 1.2 bouyer
1368 1.2 bouyer case XS_SENSE:
1369 1.13 bouyer case XS_SHORTSENSE:
1370 1.38.4.1 nathanw error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
1371 1.2 bouyer break;
1372 1.2 bouyer
1373 1.38.4.1 nathanw case XS_RESOURCE_SHORTAGE:
1374 1.38.4.1 nathanw /*
1375 1.38.4.1 nathanw * XXX Should freeze channel's queue.
1376 1.38.4.1 nathanw */
1377 1.38.4.1 nathanw scsipi_printaddr(periph);
1378 1.38.4.1 nathanw printf("adapter resource shortage\n");
1379 1.38.4.1 nathanw /* FALLTHROUGH */
1380 1.38.4.1 nathanw
1381 1.2 bouyer case XS_BUSY:
1382 1.38.4.1 nathanw if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
1383 1.38.4.1 nathanw struct scsipi_max_openings mo;
1384 1.38.4.1 nathanw
1385 1.38.4.1 nathanw /*
1386 1.38.4.1 nathanw * We set the openings to active - 1, assuming that
1387 1.38.4.1 nathanw * the command that got us here is the first one that
1388 1.38.4.1 nathanw * can't fit into the device's queue. If that's not
1389 1.38.4.1 nathanw * the case, I guess we'll find out soon enough.
1390 1.38.4.1 nathanw */
1391 1.38.4.1 nathanw mo.mo_target = periph->periph_target;
1392 1.38.4.1 nathanw mo.mo_lun = periph->periph_lun;
1393 1.38.4.1 nathanw if (periph->periph_active < periph->periph_openings)
1394 1.38.4.1 nathanw mo.mo_openings = periph->periph_active - 1;
1395 1.2 bouyer else
1396 1.38.4.1 nathanw mo.mo_openings = periph->periph_openings - 1;
1397 1.38.4.1 nathanw #ifdef DIAGNOSTIC
1398 1.38.4.1 nathanw if (mo.mo_openings < 0) {
1399 1.38.4.1 nathanw scsipi_printaddr(periph);
1400 1.38.4.1 nathanw printf("QUEUE FULL resulted in < 0 openings\n");
1401 1.38.4.1 nathanw panic("scsipi_done");
1402 1.38.4.1 nathanw }
1403 1.2 bouyer #endif
1404 1.38.4.1 nathanw if (mo.mo_openings == 0) {
1405 1.38.4.1 nathanw scsipi_printaddr(periph);
1406 1.38.4.1 nathanw printf("QUEUE FULL resulted in 0 openings\n");
1407 1.38.4.1 nathanw mo.mo_openings = 1;
1408 1.38.4.1 nathanw }
1409 1.38.4.1 nathanw scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
1410 1.38.4.1 nathanw error = ERESTART;
1411 1.38.4.1 nathanw } else if (xs->xs_retries != 0) {
1412 1.38.4.1 nathanw xs->xs_retries--;
1413 1.38.4.1 nathanw /*
1414 1.38.4.1 nathanw * Wait one second, and try again.
1415 1.38.4.1 nathanw */
1416 1.38.4.1 nathanw if (xs->xs_control & XS_CTL_POLL)
1417 1.38.4.1 nathanw delay(1000000);
1418 1.38.4.1 nathanw else {
1419 1.38.4.1 nathanw scsipi_periph_freeze(periph, 1);
1420 1.38.4.1 nathanw callout_reset(&periph->periph_callout,
1421 1.38.4.1 nathanw hz, scsipi_periph_timed_thaw, periph);
1422 1.38.4.1 nathanw }
1423 1.38.4.1 nathanw error = ERESTART;
1424 1.38.4.1 nathanw } else
1425 1.38.4.1 nathanw error = EBUSY;
1426 1.38.4.1 nathanw break;
1427 1.38.4.1 nathanw
1428 1.38.4.1 nathanw case XS_REQUEUE:
1429 1.38.4.1 nathanw error = ERESTART;
1430 1.38.4.1 nathanw break;
1431 1.38.4.1 nathanw
1432 1.2 bouyer case XS_TIMEOUT:
1433 1.38.4.1 nathanw if (xs->xs_retries != 0) {
1434 1.38.4.1 nathanw xs->xs_retries--;
1435 1.38.4.1 nathanw error = ERESTART;
1436 1.38.4.1 nathanw } else
1437 1.38.4.1 nathanw error = EIO;
1438 1.2 bouyer break;
1439 1.2 bouyer
1440 1.2 bouyer case XS_SELTIMEOUT:
1441 1.2 bouyer /* XXX Disable device? */
1442 1.12 thorpej error = EIO;
1443 1.12 thorpej break;
1444 1.12 thorpej
1445 1.12 thorpej case XS_RESET:
1446 1.38.4.1 nathanw if (xs->xs_control & XS_CTL_REQSENSE) {
1447 1.38.4.1 nathanw /*
1448 1.38.4.1 nathanw * request sense interrupted by reset: signal it
1449 1.38.4.1 nathanw * with EINTR return code.
1450 1.38.4.1 nathanw */
1451 1.38.4.1 nathanw error = EINTR;
1452 1.38.4.1 nathanw } else {
1453 1.38.4.1 nathanw if (xs->xs_retries != 0) {
1454 1.38.4.1 nathanw xs->xs_retries--;
1455 1.38.4.1 nathanw error = ERESTART;
1456 1.38.4.1 nathanw } else
1457 1.38.4.1 nathanw error = EIO;
1458 1.12 thorpej }
1459 1.2 bouyer break;
1460 1.2 bouyer
1461 1.2 bouyer default:
1462 1.38.4.1 nathanw scsipi_printaddr(periph);
1463 1.38.4.1 nathanw printf("invalid return code from adapter: %d\n", xs->error);
1464 1.2 bouyer error = EIO;
1465 1.2 bouyer break;
1466 1.2 bouyer }
1467 1.2 bouyer
1468 1.38.4.1 nathanw s = splbio();
1469 1.38.4.1 nathanw if (error == ERESTART) {
1470 1.38.4.1 nathanw /*
1471 1.38.4.1 nathanw * If we get here, the periph has been thawed and frozen
1472 1.38.4.1 nathanw * again if we had to issue recovery commands. Alternatively,
1473 1.38.4.1 nathanw * it may have been frozen again and in a timed thaw. In
1474 1.38.4.1 nathanw * any case, we thaw the periph once we re-enqueue the
1475 1.38.4.1 nathanw * command. Once the periph is fully thawed, it will begin
1476 1.38.4.1 nathanw * operation again.
1477 1.38.4.1 nathanw */
1478 1.38.4.1 nathanw xs->error = XS_NOERROR;
1479 1.38.4.1 nathanw xs->status = SCSI_OK;
1480 1.38.4.1 nathanw xs->xs_status &= ~XS_STS_DONE;
1481 1.38.4.1 nathanw xs->xs_requeuecnt++;
1482 1.38.4.1 nathanw error = scsipi_enqueue(xs);
1483 1.38.4.1 nathanw if (error == 0) {
1484 1.38.4.1 nathanw scsipi_periph_thaw(periph, 1);
1485 1.38.4.1 nathanw splx(s);
1486 1.38.4.1 nathanw return (ERESTART);
1487 1.38.4.1 nathanw }
1488 1.38.4.1 nathanw }
1489 1.38.4.1 nathanw
1490 1.38.4.1 nathanw /*
1491 1.38.4.1 nathanw * scsipi_done() freezes the queue if not XS_NOERROR.
1492 1.38.4.1 nathanw * Thaw it here.
1493 1.38.4.1 nathanw */
1494 1.38.4.1 nathanw if (xs->error != XS_NOERROR)
1495 1.38.4.1 nathanw scsipi_periph_thaw(periph, 1);
1496 1.38.4.1 nathanw
1497 1.38.4.1 nathanw
1498 1.38.4.1 nathanw if (periph->periph_switch->psw_done)
1499 1.38.4.1 nathanw periph->periph_switch->psw_done(xs);
1500 1.38.4.1 nathanw if ((bp = xs->bp) != NULL) {
1501 1.38.4.1 nathanw if (error) {
1502 1.38.4.1 nathanw bp->b_error = error;
1503 1.38.4.1 nathanw bp->b_flags |= B_ERROR;
1504 1.38.4.1 nathanw bp->b_resid = bp->b_bcount;
1505 1.38.4.1 nathanw } else {
1506 1.38.4.1 nathanw bp->b_error = 0;
1507 1.38.4.1 nathanw bp->b_resid = xs->resid;
1508 1.38.4.1 nathanw }
1509 1.38.4.1 nathanw biodone(bp);
1510 1.38.4.1 nathanw }
1511 1.38.4.1 nathanw
1512 1.38.4.1 nathanw if (xs->xs_control & XS_CTL_ASYNC)
1513 1.38.4.1 nathanw scsipi_put_xs(xs);
1514 1.38.4.1 nathanw splx(s);
1515 1.38.4.1 nathanw
1516 1.3 enami return (error);
1517 1.2 bouyer }
1518 1.2 bouyer
1519 1.14 thorpej /*
1520 1.38.4.1 nathanw * Issue a request sense for the given scsipi_xfer. Called when the xfer
1521 1.38.4.1 nathanw * returns with a CHECK_CONDITION status. Must be called in valid thread
1522 1.38.4.1 nathanw * context and at splbio().
1523 1.38.4.1 nathanw */
1524 1.38.4.1 nathanw
1525 1.38.4.1 nathanw void
1526 1.38.4.1 nathanw scsipi_request_sense(xs)
1527 1.38.4.1 nathanw struct scsipi_xfer *xs;
1528 1.38.4.1 nathanw {
1529 1.38.4.1 nathanw struct scsipi_periph *periph = xs->xs_periph;
1530 1.38.4.1 nathanw int flags, error;
1531 1.38.4.1 nathanw struct scsipi_sense cmd;
1532 1.38.4.1 nathanw
1533 1.38.4.1 nathanw periph->periph_flags |= PERIPH_SENSE;
1534 1.38.4.1 nathanw
1535 1.38.4.1 nathanw /* if command was polling, request sense will too */
1536 1.38.4.1 nathanw flags = xs->xs_control & XS_CTL_POLL;
1537 1.38.4.1 nathanw /* Polling commands can't sleep */
1538 1.38.4.1 nathanw if (flags)
1539 1.38.4.1 nathanw flags |= XS_CTL_NOSLEEP;
1540 1.38.4.1 nathanw
1541 1.38.4.1 nathanw flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN |
1542 1.38.4.1 nathanw XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH;
1543 1.38.4.1 nathanw
1544 1.38.4.2 nathanw memset(&cmd, 0, sizeof(cmd));
1545 1.38.4.1 nathanw cmd.opcode = REQUEST_SENSE;
1546 1.38.4.1 nathanw cmd.length = sizeof(struct scsipi_sense_data);
1547 1.38.4.1 nathanw
1548 1.38.4.1 nathanw error = scsipi_command(periph,
1549 1.38.4.1 nathanw (struct scsipi_generic *) &cmd, sizeof(cmd),
1550 1.38.4.1 nathanw (u_char*)&xs->sense.scsi_sense, sizeof(struct scsipi_sense_data),
1551 1.38.4.1 nathanw 0, 1000, NULL, flags);
1552 1.38.4.1 nathanw periph->periph_flags &= ~PERIPH_SENSE;
1553 1.38.4.1 nathanw periph->periph_xscheck = NULL;
1554 1.38.4.1 nathanw switch(error) {
1555 1.38.4.1 nathanw case 0:
1556 1.38.4.1 nathanw /* we have a valid sense */
1557 1.38.4.1 nathanw xs->error = XS_SENSE;
1558 1.38.4.1 nathanw return;
1559 1.38.4.1 nathanw case EINTR:
1560 1.38.4.1 nathanw /* REQUEST_SENSE interrupted by bus reset. */
1561 1.38.4.1 nathanw xs->error = XS_RESET;
1562 1.38.4.1 nathanw return;
1563 1.38.4.1 nathanw case EIO:
1564 1.38.4.1 nathanw /* request sense coudn't be performed */
1565 1.38.4.1 nathanw /*
1566 1.38.4.1 nathanw * XXX this isn't quite rigth but we don't have anything
1567 1.38.4.1 nathanw * better for now
1568 1.38.4.1 nathanw */
1569 1.38.4.1 nathanw xs->error = XS_DRIVER_STUFFUP;
1570 1.38.4.1 nathanw return;
1571 1.38.4.1 nathanw default:
1572 1.38.4.1 nathanw /* Notify that request sense failed. */
1573 1.38.4.1 nathanw xs->error = XS_DRIVER_STUFFUP;
1574 1.38.4.1 nathanw scsipi_printaddr(periph);
1575 1.38.4.1 nathanw printf("request sense failed with error %d\n", error);
1576 1.38.4.1 nathanw return;
1577 1.38.4.1 nathanw }
1578 1.38.4.1 nathanw }
1579 1.38.4.1 nathanw
1580 1.38.4.1 nathanw /*
1581 1.38.4.1 nathanw * scsipi_enqueue:
1582 1.38.4.1 nathanw *
1583 1.38.4.1 nathanw * Enqueue an xfer on a channel.
1584 1.14 thorpej */
1585 1.14 thorpej int
1586 1.38.4.1 nathanw scsipi_enqueue(xs)
1587 1.38.4.1 nathanw struct scsipi_xfer *xs;
1588 1.14 thorpej {
1589 1.38.4.1 nathanw struct scsipi_channel *chan = xs->xs_periph->periph_channel;
1590 1.38.4.1 nathanw struct scsipi_xfer *qxs;
1591 1.38.4.1 nathanw int s;
1592 1.14 thorpej
1593 1.14 thorpej s = splbio();
1594 1.38.4.1 nathanw
1595 1.38.4.1 nathanw /*
1596 1.38.4.1 nathanw * If the xfer is to be polled, and there are already jobs on
1597 1.38.4.1 nathanw * the queue, we can't proceed.
1598 1.38.4.1 nathanw */
1599 1.38.4.1 nathanw if ((xs->xs_control & XS_CTL_POLL) != 0 &&
1600 1.38.4.1 nathanw TAILQ_FIRST(&chan->chan_queue) != NULL) {
1601 1.38.4.1 nathanw splx(s);
1602 1.38.4.1 nathanw xs->error = XS_DRIVER_STUFFUP;
1603 1.38.4.1 nathanw return (EAGAIN);
1604 1.38.4.1 nathanw }
1605 1.38.4.1 nathanw
1606 1.38.4.1 nathanw /*
1607 1.38.4.1 nathanw * If we have an URGENT xfer, it's an error recovery command
1608 1.38.4.1 nathanw * and it should just go on the head of the channel's queue.
1609 1.38.4.1 nathanw */
1610 1.38.4.1 nathanw if (xs->xs_control & XS_CTL_URGENT) {
1611 1.38.4.1 nathanw TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
1612 1.38.4.1 nathanw goto out;
1613 1.38.4.1 nathanw }
1614 1.38.4.1 nathanw
1615 1.38.4.1 nathanw /*
1616 1.38.4.1 nathanw * If this xfer has already been on the queue before, we
1617 1.38.4.1 nathanw * need to reinsert it in the correct order. That order is:
1618 1.38.4.1 nathanw *
1619 1.38.4.1 nathanw * Immediately before the first xfer for this periph
1620 1.38.4.1 nathanw * with a requeuecnt less than xs->xs_requeuecnt.
1621 1.38.4.1 nathanw *
1622 1.38.4.1 nathanw * Failing that, at the end of the queue. (We'll end up
1623 1.38.4.1 nathanw * there naturally.)
1624 1.38.4.1 nathanw */
1625 1.38.4.1 nathanw if (xs->xs_requeuecnt != 0) {
1626 1.38.4.1 nathanw for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
1627 1.38.4.1 nathanw qxs = TAILQ_NEXT(qxs, channel_q)) {
1628 1.38.4.1 nathanw if (qxs->xs_periph == xs->xs_periph &&
1629 1.38.4.1 nathanw qxs->xs_requeuecnt < xs->xs_requeuecnt)
1630 1.38.4.1 nathanw break;
1631 1.38.4.1 nathanw }
1632 1.38.4.1 nathanw if (qxs != NULL) {
1633 1.38.4.1 nathanw TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
1634 1.38.4.1 nathanw channel_q);
1635 1.38.4.1 nathanw goto out;
1636 1.38.4.1 nathanw }
1637 1.14 thorpej }
1638 1.38.4.1 nathanw TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
1639 1.38.4.1 nathanw out:
1640 1.38.4.1 nathanw if (xs->xs_control & XS_CTL_THAW_PERIPH)
1641 1.38.4.1 nathanw scsipi_periph_thaw(xs->xs_periph, 1);
1642 1.14 thorpej splx(s);
1643 1.38.4.1 nathanw return (0);
1644 1.14 thorpej }
1645 1.14 thorpej
1646 1.14 thorpej /*
1647 1.38.4.1 nathanw * scsipi_run_queue:
1648 1.38.4.1 nathanw *
1649 1.38.4.1 nathanw * Start as many xfers as possible running on the channel.
1650 1.14 thorpej */
1651 1.14 thorpej void
1652 1.38.4.1 nathanw scsipi_run_queue(chan)
1653 1.38.4.1 nathanw struct scsipi_channel *chan;
1654 1.14 thorpej {
1655 1.38.4.1 nathanw struct scsipi_xfer *xs;
1656 1.38.4.1 nathanw struct scsipi_periph *periph;
1657 1.14 thorpej int s;
1658 1.14 thorpej
1659 1.38.4.1 nathanw for (;;) {
1660 1.38.4.1 nathanw s = splbio();
1661 1.38.4.1 nathanw
1662 1.38.4.1 nathanw /*
1663 1.38.4.1 nathanw * If the channel is frozen, we can't do any work right
1664 1.38.4.1 nathanw * now.
1665 1.38.4.1 nathanw */
1666 1.38.4.1 nathanw if (chan->chan_qfreeze != 0) {
1667 1.38.4.1 nathanw splx(s);
1668 1.38.4.1 nathanw return;
1669 1.38.4.1 nathanw }
1670 1.38.4.1 nathanw
1671 1.38.4.1 nathanw /*
1672 1.38.4.1 nathanw * Look for work to do, and make sure we can do it.
1673 1.38.4.1 nathanw */
1674 1.38.4.1 nathanw for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
1675 1.38.4.1 nathanw xs = TAILQ_NEXT(xs, channel_q)) {
1676 1.38.4.1 nathanw periph = xs->xs_periph;
1677 1.38.4.1 nathanw
1678 1.38.4.1 nathanw if ((periph->periph_sent >= periph->periph_openings) ||
1679 1.38.4.1 nathanw periph->periph_qfreeze != 0 ||
1680 1.38.4.1 nathanw (periph->periph_flags & PERIPH_UNTAG) != 0)
1681 1.38.4.1 nathanw continue;
1682 1.38.4.1 nathanw
1683 1.38.4.1 nathanw if ((periph->periph_flags &
1684 1.38.4.1 nathanw (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 &&
1685 1.38.4.1 nathanw (xs->xs_control & XS_CTL_URGENT) == 0)
1686 1.38.4.1 nathanw continue;
1687 1.38.4.1 nathanw
1688 1.38.4.1 nathanw /*
1689 1.38.4.1 nathanw * We can issue this xfer!
1690 1.38.4.1 nathanw */
1691 1.38.4.1 nathanw goto got_one;
1692 1.38.4.1 nathanw }
1693 1.38.4.1 nathanw
1694 1.38.4.1 nathanw /*
1695 1.38.4.1 nathanw * Can't find any work to do right now.
1696 1.38.4.1 nathanw */
1697 1.38.4.1 nathanw splx(s);
1698 1.38.4.1 nathanw return;
1699 1.38.4.1 nathanw
1700 1.38.4.1 nathanw got_one:
1701 1.38.4.1 nathanw /*
1702 1.38.4.1 nathanw * Have an xfer to run. Allocate a resource from
1703 1.38.4.1 nathanw * the adapter to run it. If we can't allocate that
1704 1.38.4.1 nathanw * resource, we don't dequeue the xfer.
1705 1.38.4.1 nathanw */
1706 1.38.4.1 nathanw if (scsipi_get_resource(chan) == 0) {
1707 1.38.4.1 nathanw /*
1708 1.38.4.1 nathanw * Adapter is out of resources. If the adapter
1709 1.38.4.1 nathanw * supports it, attempt to grow them.
1710 1.38.4.1 nathanw */
1711 1.38.4.1 nathanw if (scsipi_grow_resources(chan) == 0) {
1712 1.38.4.1 nathanw /*
1713 1.38.4.1 nathanw * Wasn't able to grow resources,
1714 1.38.4.1 nathanw * nothing more we can do.
1715 1.38.4.1 nathanw */
1716 1.38.4.1 nathanw if (xs->xs_control & XS_CTL_POLL) {
1717 1.38.4.1 nathanw scsipi_printaddr(xs->xs_periph);
1718 1.38.4.1 nathanw printf("polling command but no "
1719 1.38.4.1 nathanw "adapter resources");
1720 1.38.4.1 nathanw /* We'll panic shortly... */
1721 1.38.4.1 nathanw }
1722 1.38.4.1 nathanw splx(s);
1723 1.38.4.1 nathanw
1724 1.38.4.1 nathanw /*
1725 1.38.4.1 nathanw * XXX: We should be able to note that
1726 1.38.4.1 nathanw * XXX: that resources are needed here!
1727 1.38.4.1 nathanw */
1728 1.38.4.1 nathanw return;
1729 1.38.4.1 nathanw }
1730 1.38.4.1 nathanw /*
1731 1.38.4.1 nathanw * scsipi_grow_resources() allocated the resource
1732 1.38.4.1 nathanw * for us.
1733 1.38.4.1 nathanw */
1734 1.38.4.1 nathanw }
1735 1.38.4.1 nathanw
1736 1.38.4.1 nathanw /*
1737 1.38.4.1 nathanw * We have a resource to run this xfer, do it!
1738 1.38.4.1 nathanw */
1739 1.38.4.1 nathanw TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
1740 1.38.4.1 nathanw
1741 1.38.4.1 nathanw /*
1742 1.38.4.1 nathanw * If the command is to be tagged, allocate a tag ID
1743 1.38.4.1 nathanw * for it.
1744 1.38.4.1 nathanw */
1745 1.38.4.1 nathanw if (XS_CTL_TAGTYPE(xs) != 0)
1746 1.38.4.1 nathanw scsipi_get_tag(xs);
1747 1.38.4.1 nathanw else
1748 1.38.4.1 nathanw periph->periph_flags |= PERIPH_UNTAG;
1749 1.38.4.1 nathanw periph->periph_sent++;
1750 1.38.4.1 nathanw splx(s);
1751 1.38.4.1 nathanw
1752 1.38.4.1 nathanw scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
1753 1.38.4.1 nathanw }
1754 1.38.4.1 nathanw #ifdef DIAGNOSTIC
1755 1.38.4.1 nathanw panic("scsipi_run_queue: impossible");
1756 1.38.4.1 nathanw #endif
1757 1.38.4.1 nathanw }
1758 1.38.4.1 nathanw
1759 1.38.4.1 nathanw /*
1760 1.38.4.1 nathanw * scsipi_execute_xs:
1761 1.38.4.1 nathanw *
1762 1.38.4.1 nathanw * Begin execution of an xfer, waiting for it to complete, if necessary.
1763 1.38.4.1 nathanw */
1764 1.38.4.1 nathanw int
1765 1.38.4.1 nathanw scsipi_execute_xs(xs)
1766 1.38.4.1 nathanw struct scsipi_xfer *xs;
1767 1.38.4.1 nathanw {
1768 1.38.4.1 nathanw struct scsipi_periph *periph = xs->xs_periph;
1769 1.38.4.1 nathanw struct scsipi_channel *chan = periph->periph_channel;
1770 1.38.4.1 nathanw int async, poll, retries, error, s;
1771 1.38.4.1 nathanw
1772 1.38.4.1 nathanw xs->xs_status &= ~XS_STS_DONE;
1773 1.38.4.1 nathanw xs->error = XS_NOERROR;
1774 1.38.4.1 nathanw xs->resid = xs->datalen;
1775 1.38.4.1 nathanw xs->status = SCSI_OK;
1776 1.38.4.1 nathanw
1777 1.38.4.1 nathanw #ifdef SCSIPI_DEBUG
1778 1.38.4.1 nathanw if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) {
1779 1.38.4.1 nathanw printf("scsipi_execute_xs: ");
1780 1.38.4.1 nathanw show_scsipi_xs(xs);
1781 1.38.4.1 nathanw printf("\n");
1782 1.38.4.1 nathanw }
1783 1.38.4.1 nathanw #endif
1784 1.38.4.1 nathanw
1785 1.38.4.1 nathanw /*
1786 1.38.4.1 nathanw * Deal with command tagging:
1787 1.38.4.1 nathanw *
1788 1.38.4.1 nathanw * - If the device's current operating mode doesn't
1789 1.38.4.1 nathanw * include tagged queueing, clear the tag mask.
1790 1.38.4.1 nathanw *
1791 1.38.4.1 nathanw * - If the device's current operating mode *does*
1792 1.38.4.1 nathanw * include tagged queueing, set the tag_type in
1793 1.38.4.1 nathanw * the xfer to the appropriate byte for the tag
1794 1.38.4.1 nathanw * message.
1795 1.38.4.1 nathanw */
1796 1.38.4.1 nathanw if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 ||
1797 1.38.4.1 nathanw (xs->xs_control & XS_CTL_REQSENSE)) {
1798 1.38.4.1 nathanw xs->xs_control &= ~XS_CTL_TAGMASK;
1799 1.38.4.1 nathanw xs->xs_tag_type = 0;
1800 1.38.4.1 nathanw } else {
1801 1.38.4.1 nathanw /*
1802 1.38.4.1 nathanw * If the request doesn't specify a tag, give Head
1803 1.38.4.1 nathanw * tags to URGENT operations and Ordered tags to
1804 1.38.4.1 nathanw * everything else.
1805 1.38.4.1 nathanw */
1806 1.38.4.1 nathanw if (XS_CTL_TAGTYPE(xs) == 0) {
1807 1.38.4.1 nathanw if (xs->xs_control & XS_CTL_URGENT)
1808 1.38.4.1 nathanw xs->xs_control |= XS_CTL_HEAD_TAG;
1809 1.38.4.1 nathanw else
1810 1.38.4.1 nathanw xs->xs_control |= XS_CTL_ORDERED_TAG;
1811 1.38.4.1 nathanw }
1812 1.38.4.1 nathanw
1813 1.38.4.1 nathanw switch (XS_CTL_TAGTYPE(xs)) {
1814 1.38.4.1 nathanw case XS_CTL_ORDERED_TAG:
1815 1.38.4.1 nathanw xs->xs_tag_type = MSG_ORDERED_Q_TAG;
1816 1.38.4.1 nathanw break;
1817 1.38.4.1 nathanw
1818 1.38.4.1 nathanw case XS_CTL_SIMPLE_TAG:
1819 1.38.4.1 nathanw xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
1820 1.38.4.1 nathanw break;
1821 1.38.4.1 nathanw
1822 1.38.4.1 nathanw case XS_CTL_HEAD_TAG:
1823 1.38.4.1 nathanw xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
1824 1.38.4.1 nathanw break;
1825 1.38.4.1 nathanw
1826 1.38.4.1 nathanw default:
1827 1.38.4.1 nathanw scsipi_printaddr(periph);
1828 1.38.4.1 nathanw printf("invalid tag mask 0x%08x\n",
1829 1.38.4.1 nathanw XS_CTL_TAGTYPE(xs));
1830 1.38.4.1 nathanw panic("scsipi_execute_xs");
1831 1.38.4.1 nathanw }
1832 1.38.4.1 nathanw }
1833 1.38.4.1 nathanw
1834 1.38.4.1 nathanw /* If the adaptor wants us to poll, poll. */
1835 1.38.4.1 nathanw if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY)
1836 1.38.4.1 nathanw xs->xs_control |= XS_CTL_POLL;
1837 1.38.4.1 nathanw
1838 1.38.4.1 nathanw /*
1839 1.38.4.1 nathanw * If we don't yet have a completion thread, or we are to poll for
1840 1.38.4.1 nathanw * completion, clear the ASYNC flag.
1841 1.38.4.1 nathanw */
1842 1.38.4.1 nathanw if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
1843 1.38.4.1 nathanw xs->xs_control &= ~XS_CTL_ASYNC;
1844 1.38.4.1 nathanw
1845 1.38.4.1 nathanw async = (xs->xs_control & XS_CTL_ASYNC);
1846 1.38.4.1 nathanw poll = (xs->xs_control & XS_CTL_POLL);
1847 1.38.4.1 nathanw retries = xs->xs_retries; /* for polling commands */
1848 1.38.4.1 nathanw
1849 1.38.4.1 nathanw #ifdef DIAGNOSTIC
1850 1.38.4.1 nathanw if (async != 0 && xs->bp == NULL)
1851 1.38.4.1 nathanw panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
1852 1.38.4.1 nathanw #endif
1853 1.38.4.1 nathanw
1854 1.38.4.1 nathanw /*
1855 1.38.4.1 nathanw * Enqueue the transfer. If we're not polling for completion, this
1856 1.38.4.1 nathanw * should ALWAYS return `no error'.
1857 1.38.4.1 nathanw */
1858 1.38.4.1 nathanw try_again:
1859 1.38.4.1 nathanw error = scsipi_enqueue(xs);
1860 1.38.4.1 nathanw if (error) {
1861 1.38.4.1 nathanw if (poll == 0) {
1862 1.38.4.1 nathanw scsipi_printaddr(periph);
1863 1.38.4.1 nathanw printf("not polling, but enqueue failed with %d\n",
1864 1.38.4.1 nathanw error);
1865 1.38.4.1 nathanw panic("scsipi_execute_xs");
1866 1.38.4.1 nathanw }
1867 1.38.4.1 nathanw
1868 1.38.4.1 nathanw scsipi_printaddr(periph);
1869 1.38.4.1 nathanw printf("failed to enqueue polling command");
1870 1.38.4.1 nathanw if (retries != 0) {
1871 1.38.4.1 nathanw printf(", retrying...\n");
1872 1.38.4.1 nathanw delay(1000000);
1873 1.38.4.1 nathanw retries--;
1874 1.38.4.1 nathanw goto try_again;
1875 1.38.4.1 nathanw }
1876 1.38.4.1 nathanw printf("\n");
1877 1.38.4.1 nathanw goto free_xs;
1878 1.38.4.1 nathanw }
1879 1.38.4.1 nathanw
1880 1.38.4.1 nathanw restarted:
1881 1.38.4.1 nathanw scsipi_run_queue(chan);
1882 1.38.4.1 nathanw
1883 1.38.4.1 nathanw /*
1884 1.38.4.1 nathanw * The xfer is enqueued, and possibly running. If it's to be
1885 1.38.4.1 nathanw * completed asynchronously, just return now.
1886 1.38.4.1 nathanw */
1887 1.38.4.1 nathanw if (async)
1888 1.38.4.1 nathanw return (EJUSTRETURN);
1889 1.38.4.1 nathanw
1890 1.38.4.1 nathanw /*
1891 1.38.4.1 nathanw * Not an asynchronous command; wait for it to complete.
1892 1.38.4.1 nathanw */
1893 1.38.4.1 nathanw s = splbio();
1894 1.38.4.1 nathanw while ((xs->xs_status & XS_STS_DONE) == 0) {
1895 1.38.4.1 nathanw if (poll) {
1896 1.38.4.1 nathanw scsipi_printaddr(periph);
1897 1.38.4.1 nathanw printf("polling command not done\n");
1898 1.38.4.1 nathanw panic("scsipi_execute_xs");
1899 1.38.4.1 nathanw }
1900 1.38.4.1 nathanw (void) tsleep(xs, PRIBIO, "xscmd", 0);
1901 1.38.4.1 nathanw }
1902 1.38.4.1 nathanw splx(s);
1903 1.38.4.1 nathanw
1904 1.38.4.1 nathanw /*
1905 1.38.4.1 nathanw * Command is complete. scsipi_done() has awakened us to perform
1906 1.38.4.1 nathanw * the error handling.
1907 1.38.4.1 nathanw */
1908 1.38.4.1 nathanw error = scsipi_complete(xs);
1909 1.38.4.1 nathanw if (error == ERESTART)
1910 1.38.4.1 nathanw goto restarted;
1911 1.38.4.1 nathanw
1912 1.38.4.1 nathanw /*
1913 1.38.4.1 nathanw * Command completed successfully or fatal error occurred. Fall
1914 1.38.4.1 nathanw * into....
1915 1.38.4.1 nathanw */
1916 1.38.4.1 nathanw free_xs:
1917 1.38.4.1 nathanw s = splbio();
1918 1.38.4.1 nathanw scsipi_put_xs(xs);
1919 1.38.4.1 nathanw splx(s);
1920 1.38.4.1 nathanw
1921 1.38.4.1 nathanw /*
1922 1.38.4.1 nathanw * Kick the queue, keep it running in case it stopped for some
1923 1.38.4.1 nathanw * reason.
1924 1.38.4.1 nathanw */
1925 1.38.4.1 nathanw scsipi_run_queue(chan);
1926 1.38.4.1 nathanw
1927 1.38.4.1 nathanw return (error);
1928 1.38.4.1 nathanw }
1929 1.38.4.1 nathanw
1930 1.38.4.1 nathanw /*
1931 1.38.4.1 nathanw * scsipi_completion_thread:
1932 1.38.4.1 nathanw *
1933 1.38.4.1 nathanw * This is the completion thread. We wait for errors on
1934 1.38.4.1 nathanw * asynchronous xfers, and perform the error handling
1935 1.38.4.1 nathanw * function, restarting the command, if necessary.
1936 1.38.4.1 nathanw */
1937 1.38.4.1 nathanw void
1938 1.38.4.1 nathanw scsipi_completion_thread(arg)
1939 1.38.4.1 nathanw void *arg;
1940 1.38.4.1 nathanw {
1941 1.38.4.1 nathanw struct scsipi_channel *chan = arg;
1942 1.38.4.1 nathanw struct scsipi_xfer *xs;
1943 1.38.4.1 nathanw int s;
1944 1.38.4.1 nathanw
1945 1.38.4.1 nathanw for (;;) {
1946 1.38.4.1 nathanw s = splbio();
1947 1.38.4.1 nathanw xs = TAILQ_FIRST(&chan->chan_complete);
1948 1.38.4.1 nathanw if (xs == NULL &&
1949 1.38.4.2 nathanw (chan->chan_flags &
1950 1.38.4.2 nathanw (SCSIPI_CHAN_SHUTDOWN | SCSIPI_CHAN_CALLBACK)) == 0) {
1951 1.38.4.1 nathanw (void) tsleep(&chan->chan_complete, PRIBIO,
1952 1.38.4.1 nathanw "sccomp", 0);
1953 1.38.4.1 nathanw splx(s);
1954 1.38.4.1 nathanw continue;
1955 1.38.4.1 nathanw }
1956 1.38.4.2 nathanw if (chan->chan_flags & SCSIPI_CHAN_CALLBACK) {
1957 1.38.4.2 nathanw /* call chan_callback from thread context */
1958 1.38.4.2 nathanw chan->chan_flags &= ~SCSIPI_CHAN_CALLBACK;
1959 1.38.4.2 nathanw chan->chan_callback(chan, chan->chan_callback_arg);
1960 1.38.4.2 nathanw splx(s);
1961 1.38.4.2 nathanw continue;
1962 1.38.4.2 nathanw }
1963 1.38.4.1 nathanw if (chan->chan_flags & SCSIPI_CHAN_SHUTDOWN) {
1964 1.38.4.1 nathanw splx(s);
1965 1.38.4.1 nathanw break;
1966 1.38.4.1 nathanw }
1967 1.38.4.2 nathanw if (xs) {
1968 1.38.4.2 nathanw TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
1969 1.38.4.2 nathanw splx(s);
1970 1.38.4.1 nathanw
1971 1.38.4.2 nathanw /*
1972 1.38.4.2 nathanw * Have an xfer with an error; process it.
1973 1.38.4.2 nathanw */
1974 1.38.4.2 nathanw (void) scsipi_complete(xs);
1975 1.38.4.1 nathanw
1976 1.38.4.2 nathanw /*
1977 1.38.4.2 nathanw * Kick the queue; keep it running if it was stopped
1978 1.38.4.2 nathanw * for some reason.
1979 1.38.4.2 nathanw */
1980 1.38.4.2 nathanw scsipi_run_queue(chan);
1981 1.38.4.2 nathanw } else {
1982 1.38.4.2 nathanw splx(s);
1983 1.38.4.2 nathanw }
1984 1.38.4.1 nathanw }
1985 1.38.4.1 nathanw
1986 1.38.4.1 nathanw chan->chan_thread = NULL;
1987 1.38.4.1 nathanw
1988 1.38.4.1 nathanw /* In case parent is waiting for us to exit. */
1989 1.38.4.1 nathanw wakeup(&chan->chan_thread);
1990 1.38.4.1 nathanw
1991 1.38.4.1 nathanw kthread_exit(0);
1992 1.38.4.1 nathanw }
1993 1.38.4.1 nathanw
1994 1.38.4.1 nathanw /*
1995 1.38.4.1 nathanw * scsipi_create_completion_thread:
1996 1.38.4.1 nathanw *
1997 1.38.4.1 nathanw * Callback to actually create the completion thread.
1998 1.38.4.1 nathanw */
1999 1.38.4.1 nathanw void
2000 1.38.4.1 nathanw scsipi_create_completion_thread(arg)
2001 1.38.4.1 nathanw void *arg;
2002 1.38.4.1 nathanw {
2003 1.38.4.1 nathanw struct scsipi_channel *chan = arg;
2004 1.38.4.1 nathanw struct scsipi_adapter *adapt = chan->chan_adapter;
2005 1.38.4.1 nathanw
2006 1.38.4.1 nathanw if (kthread_create1(scsipi_completion_thread, chan,
2007 1.38.4.1 nathanw &chan->chan_thread, "%s:%d", adapt->adapt_dev->dv_xname,
2008 1.38.4.1 nathanw chan->chan_channel)) {
2009 1.38.4.1 nathanw printf("%s: unable to create completion thread for "
2010 1.38.4.1 nathanw "channel %d\n", adapt->adapt_dev->dv_xname,
2011 1.38.4.1 nathanw chan->chan_channel);
2012 1.38.4.1 nathanw panic("scsipi_create_completion_thread");
2013 1.38.4.1 nathanw }
2014 1.38.4.1 nathanw }
2015 1.38.4.1 nathanw
2016 1.38.4.1 nathanw /*
2017 1.38.4.2 nathanw * scsipi_thread_call_callback:
2018 1.38.4.2 nathanw *
2019 1.38.4.2 nathanw * request to call a callback from the completion thread
2020 1.38.4.2 nathanw */
2021 1.38.4.2 nathanw int
2022 1.38.4.2 nathanw scsipi_thread_call_callback(chan, callback, arg)
2023 1.38.4.2 nathanw struct scsipi_channel *chan;
2024 1.38.4.2 nathanw void (*callback) __P((struct scsipi_channel *, void *));
2025 1.38.4.2 nathanw void *arg;
2026 1.38.4.2 nathanw {
2027 1.38.4.2 nathanw int s;
2028 1.38.4.2 nathanw
2029 1.38.4.2 nathanw s = splbio();
2030 1.38.4.2 nathanw if (chan->chan_flags & SCSIPI_CHAN_CALLBACK) {
2031 1.38.4.2 nathanw splx(s);
2032 1.38.4.2 nathanw return EBUSY;
2033 1.38.4.2 nathanw }
2034 1.38.4.2 nathanw scsipi_channel_freeze(chan, 1);
2035 1.38.4.2 nathanw chan->chan_callback = callback;
2036 1.38.4.2 nathanw chan->chan_callback_arg = arg;
2037 1.38.4.2 nathanw chan->chan_flags |= SCSIPI_CHAN_CALLBACK;
2038 1.38.4.2 nathanw wakeup(&chan->chan_complete);
2039 1.38.4.2 nathanw splx(s);
2040 1.38.4.2 nathanw return(0);
2041 1.38.4.2 nathanw }
2042 1.38.4.2 nathanw
2043 1.38.4.2 nathanw /*
2044 1.38.4.1 nathanw * scsipi_async_event:
2045 1.38.4.1 nathanw *
2046 1.38.4.1 nathanw * Handle an asynchronous event from an adapter.
2047 1.38.4.1 nathanw */
2048 1.38.4.1 nathanw void
2049 1.38.4.1 nathanw scsipi_async_event(chan, event, arg)
2050 1.38.4.1 nathanw struct scsipi_channel *chan;
2051 1.38.4.1 nathanw scsipi_async_event_t event;
2052 1.38.4.1 nathanw void *arg;
2053 1.38.4.1 nathanw {
2054 1.38.4.1 nathanw int s;
2055 1.38.4.1 nathanw
2056 1.38.4.1 nathanw s = splbio();
2057 1.38.4.1 nathanw switch (event) {
2058 1.38.4.1 nathanw case ASYNC_EVENT_MAX_OPENINGS:
2059 1.38.4.1 nathanw scsipi_async_event_max_openings(chan,
2060 1.38.4.1 nathanw (struct scsipi_max_openings *)arg);
2061 1.38.4.1 nathanw break;
2062 1.38.4.1 nathanw
2063 1.38.4.1 nathanw case ASYNC_EVENT_XFER_MODE:
2064 1.38.4.1 nathanw scsipi_async_event_xfer_mode(chan,
2065 1.38.4.1 nathanw (struct scsipi_xfer_mode *)arg);
2066 1.38.4.1 nathanw break;
2067 1.38.4.1 nathanw case ASYNC_EVENT_RESET:
2068 1.38.4.1 nathanw scsipi_async_event_channel_reset(chan);
2069 1.38.4.1 nathanw break;
2070 1.38.4.1 nathanw }
2071 1.38.4.1 nathanw splx(s);
2072 1.38.4.1 nathanw }
2073 1.38.4.1 nathanw
2074 1.38.4.1 nathanw /*
2075 1.38.4.1 nathanw * scsipi_print_xfer_mode:
2076 1.38.4.1 nathanw *
2077 1.38.4.1 nathanw * Print a periph's capabilities.
2078 1.38.4.1 nathanw */
2079 1.38.4.1 nathanw void
2080 1.38.4.1 nathanw scsipi_print_xfer_mode(periph)
2081 1.38.4.1 nathanw struct scsipi_periph *periph;
2082 1.38.4.1 nathanw {
2083 1.38.4.1 nathanw int period, freq, speed, mbs;
2084 1.38.4.1 nathanw
2085 1.38.4.1 nathanw if ((periph->periph_flags & PERIPH_MODE_VALID) == 0)
2086 1.38.4.1 nathanw return;
2087 1.38.4.1 nathanw
2088 1.38.4.1 nathanw printf("%s: ", periph->periph_dev->dv_xname);
2089 1.38.4.1 nathanw if (periph->periph_mode & PERIPH_CAP_SYNC) {
2090 1.38.4.1 nathanw period = scsipi_sync_factor_to_period(periph->periph_period);
2091 1.38.4.1 nathanw printf("sync (%d.%dns offset %d)",
2092 1.38.4.1 nathanw period / 10, period % 10, periph->periph_offset);
2093 1.38.4.1 nathanw } else
2094 1.38.4.1 nathanw printf("async");
2095 1.38.4.1 nathanw
2096 1.38.4.1 nathanw if (periph->periph_mode & PERIPH_CAP_WIDE32)
2097 1.38.4.1 nathanw printf(", 32-bit");
2098 1.38.4.1 nathanw else if (periph->periph_mode & PERIPH_CAP_WIDE16)
2099 1.38.4.1 nathanw printf(", 16-bit");
2100 1.38.4.1 nathanw else
2101 1.38.4.1 nathanw printf(", 8-bit");
2102 1.38.4.1 nathanw
2103 1.38.4.1 nathanw if (periph->periph_mode & PERIPH_CAP_SYNC) {
2104 1.38.4.1 nathanw freq = scsipi_sync_factor_to_freq(periph->periph_period);
2105 1.38.4.1 nathanw speed = freq;
2106 1.38.4.1 nathanw if (periph->periph_mode & PERIPH_CAP_WIDE32)
2107 1.38.4.1 nathanw speed *= 4;
2108 1.38.4.1 nathanw else if (periph->periph_mode & PERIPH_CAP_WIDE16)
2109 1.38.4.1 nathanw speed *= 2;
2110 1.38.4.1 nathanw mbs = speed / 1000;
2111 1.38.4.1 nathanw if (mbs > 0)
2112 1.38.4.1 nathanw printf(" (%d.%03dMB/s)", mbs, speed % 1000);
2113 1.38.4.1 nathanw else
2114 1.38.4.1 nathanw printf(" (%dKB/s)", speed % 1000);
2115 1.38.4.1 nathanw }
2116 1.38.4.1 nathanw
2117 1.38.4.1 nathanw printf(" transfers");
2118 1.38.4.1 nathanw
2119 1.38.4.1 nathanw if (periph->periph_mode & PERIPH_CAP_TQING)
2120 1.38.4.1 nathanw printf(", tagged queueing");
2121 1.38.4.1 nathanw
2122 1.38.4.1 nathanw printf("\n");
2123 1.38.4.1 nathanw }
2124 1.38.4.1 nathanw
2125 1.38.4.1 nathanw /*
2126 1.38.4.1 nathanw * scsipi_async_event_max_openings:
2127 1.38.4.1 nathanw *
2128 1.38.4.1 nathanw * Update the maximum number of outstanding commands a
2129 1.38.4.1 nathanw * device may have.
2130 1.38.4.1 nathanw */
2131 1.38.4.1 nathanw void
2132 1.38.4.1 nathanw scsipi_async_event_max_openings(chan, mo)
2133 1.38.4.1 nathanw struct scsipi_channel *chan;
2134 1.38.4.1 nathanw struct scsipi_max_openings *mo;
2135 1.38.4.1 nathanw {
2136 1.38.4.1 nathanw struct scsipi_periph *periph;
2137 1.38.4.1 nathanw int minlun, maxlun;
2138 1.38.4.1 nathanw
2139 1.38.4.1 nathanw if (mo->mo_lun == -1) {
2140 1.38.4.1 nathanw /*
2141 1.38.4.1 nathanw * Wildcarded; apply it to all LUNs.
2142 1.38.4.1 nathanw */
2143 1.38.4.1 nathanw minlun = 0;
2144 1.38.4.1 nathanw maxlun = chan->chan_nluns - 1;
2145 1.38.4.1 nathanw } else
2146 1.38.4.1 nathanw minlun = maxlun = mo->mo_lun;
2147 1.38.4.1 nathanw
2148 1.38.4.1 nathanw for (; minlun <= maxlun; minlun++) {
2149 1.38.4.1 nathanw periph = scsipi_lookup_periph(chan, mo->mo_target, minlun);
2150 1.38.4.1 nathanw if (periph == NULL)
2151 1.38.4.1 nathanw continue;
2152 1.38.4.1 nathanw
2153 1.38.4.1 nathanw if (mo->mo_openings < periph->periph_openings)
2154 1.38.4.1 nathanw periph->periph_openings = mo->mo_openings;
2155 1.38.4.1 nathanw else if (mo->mo_openings > periph->periph_openings &&
2156 1.38.4.1 nathanw (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
2157 1.38.4.1 nathanw periph->periph_openings = mo->mo_openings;
2158 1.38.4.1 nathanw }
2159 1.38.4.1 nathanw }
2160 1.38.4.1 nathanw
2161 1.38.4.1 nathanw /*
2162 1.38.4.1 nathanw * scsipi_async_event_xfer_mode:
2163 1.38.4.1 nathanw *
2164 1.38.4.1 nathanw * Update the xfer mode for all periphs sharing the
2165 1.38.4.1 nathanw * specified I_T Nexus.
2166 1.38.4.1 nathanw */
2167 1.38.4.1 nathanw void
2168 1.38.4.1 nathanw scsipi_async_event_xfer_mode(chan, xm)
2169 1.38.4.1 nathanw struct scsipi_channel *chan;
2170 1.38.4.1 nathanw struct scsipi_xfer_mode *xm;
2171 1.38.4.1 nathanw {
2172 1.38.4.1 nathanw struct scsipi_periph *periph;
2173 1.38.4.1 nathanw int lun, announce, mode, period, offset;
2174 1.38.4.1 nathanw
2175 1.38.4.1 nathanw for (lun = 0; lun < chan->chan_nluns; lun++) {
2176 1.38.4.1 nathanw periph = scsipi_lookup_periph(chan, xm->xm_target, lun);
2177 1.38.4.1 nathanw if (periph == NULL)
2178 1.38.4.1 nathanw continue;
2179 1.38.4.1 nathanw announce = 0;
2180 1.38.4.1 nathanw
2181 1.38.4.1 nathanw /*
2182 1.38.4.1 nathanw * Clamp the xfer mode down to this periph's capabilities.
2183 1.38.4.1 nathanw */
2184 1.38.4.1 nathanw mode = xm->xm_mode & periph->periph_cap;
2185 1.38.4.1 nathanw if (mode & PERIPH_CAP_SYNC) {
2186 1.38.4.1 nathanw period = xm->xm_period;
2187 1.38.4.1 nathanw offset = xm->xm_offset;
2188 1.38.4.1 nathanw } else {
2189 1.38.4.1 nathanw period = 0;
2190 1.38.4.1 nathanw offset = 0;
2191 1.38.4.1 nathanw }
2192 1.38.4.1 nathanw
2193 1.38.4.1 nathanw /*
2194 1.38.4.1 nathanw * If we do not have a valid xfer mode yet, or the parameters
2195 1.38.4.1 nathanw * are different, announce them.
2196 1.38.4.1 nathanw */
2197 1.38.4.1 nathanw if ((periph->periph_flags & PERIPH_MODE_VALID) == 0 ||
2198 1.38.4.1 nathanw periph->periph_mode != mode ||
2199 1.38.4.1 nathanw periph->periph_period != period ||
2200 1.38.4.1 nathanw periph->periph_offset != offset)
2201 1.38.4.1 nathanw announce = 1;
2202 1.38.4.1 nathanw
2203 1.38.4.1 nathanw periph->periph_mode = mode;
2204 1.38.4.1 nathanw periph->periph_period = period;
2205 1.38.4.1 nathanw periph->periph_offset = offset;
2206 1.38.4.1 nathanw periph->periph_flags |= PERIPH_MODE_VALID;
2207 1.38.4.1 nathanw
2208 1.38.4.1 nathanw if (announce)
2209 1.38.4.1 nathanw scsipi_print_xfer_mode(periph);
2210 1.38.4.1 nathanw }
2211 1.38.4.1 nathanw }
2212 1.38.4.1 nathanw
2213 1.38.4.1 nathanw /*
2214 1.38.4.1 nathanw * scsipi_set_xfer_mode:
2215 1.38.4.1 nathanw *
2216 1.38.4.1 nathanw * Set the xfer mode for the specified I_T Nexus.
2217 1.38.4.1 nathanw */
2218 1.38.4.1 nathanw void
2219 1.38.4.1 nathanw scsipi_set_xfer_mode(chan, target, immed)
2220 1.38.4.1 nathanw struct scsipi_channel *chan;
2221 1.38.4.1 nathanw int target, immed;
2222 1.38.4.1 nathanw {
2223 1.38.4.1 nathanw struct scsipi_xfer_mode xm;
2224 1.38.4.1 nathanw struct scsipi_periph *itperiph;
2225 1.38.4.1 nathanw int lun, s;
2226 1.38.4.1 nathanw
2227 1.38.4.1 nathanw /*
2228 1.38.4.1 nathanw * Go to the minimal xfer mode.
2229 1.38.4.1 nathanw */
2230 1.38.4.1 nathanw xm.xm_target = target;
2231 1.38.4.1 nathanw xm.xm_mode = 0;
2232 1.38.4.1 nathanw xm.xm_period = 0; /* ignored */
2233 1.38.4.1 nathanw xm.xm_offset = 0; /* ignored */
2234 1.38.4.1 nathanw
2235 1.38.4.1 nathanw /*
2236 1.38.4.1 nathanw * Find the first LUN we know about on this I_T Nexus.
2237 1.38.4.1 nathanw */
2238 1.38.4.1 nathanw for (lun = 0; lun < chan->chan_nluns; lun++) {
2239 1.38.4.1 nathanw itperiph = scsipi_lookup_periph(chan, target, lun);
2240 1.38.4.1 nathanw if (itperiph != NULL)
2241 1.38.4.1 nathanw break;
2242 1.38.4.1 nathanw }
2243 1.38.4.2 nathanw if (itperiph != NULL) {
2244 1.38.4.1 nathanw xm.xm_mode = itperiph->periph_cap;
2245 1.38.4.2 nathanw /*
2246 1.38.4.2 nathanw * Now issue the request to the adapter.
2247 1.38.4.2 nathanw */
2248 1.38.4.2 nathanw s = splbio();
2249 1.38.4.2 nathanw scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm);
2250 1.38.4.2 nathanw splx(s);
2251 1.38.4.2 nathanw /*
2252 1.38.4.2 nathanw * If we want this to happen immediately, issue a dummy
2253 1.38.4.2 nathanw * command, since most adapters can't really negotiate unless
2254 1.38.4.2 nathanw * they're executing a job.
2255 1.38.4.2 nathanw */
2256 1.38.4.2 nathanw if (immed != 0) {
2257 1.38.4.2 nathanw (void) scsipi_test_unit_ready(itperiph,
2258 1.38.4.2 nathanw XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
2259 1.38.4.2 nathanw XS_CTL_IGNORE_NOT_READY |
2260 1.38.4.2 nathanw XS_CTL_IGNORE_MEDIA_CHANGE);
2261 1.38.4.2 nathanw }
2262 1.38.4.1 nathanw }
2263 1.38.4.1 nathanw }
2264 1.38.4.1 nathanw
2265 1.38.4.1 nathanw /*
2266 1.38.4.1 nathanw * scsipi_channel_reset:
2267 1.38.4.1 nathanw *
2268 1.38.4.1 nathanw * handle scsi bus reset
2269 1.38.4.1 nathanw * called at splbio
2270 1.38.4.1 nathanw */
2271 1.38.4.1 nathanw void
2272 1.38.4.1 nathanw scsipi_async_event_channel_reset(chan)
2273 1.38.4.1 nathanw struct scsipi_channel *chan;
2274 1.38.4.1 nathanw {
2275 1.38.4.1 nathanw struct scsipi_xfer *xs, *xs_next;
2276 1.38.4.1 nathanw struct scsipi_periph *periph;
2277 1.38.4.1 nathanw int target, lun;
2278 1.38.4.1 nathanw
2279 1.38.4.1 nathanw /*
2280 1.38.4.1 nathanw * Channel has been reset. Also mark as reset pending REQUEST_SENSE
2281 1.38.4.1 nathanw * commands; as the sense is not available any more.
2282 1.38.4.1 nathanw * can't call scsipi_done() from here, as the command has not been
2283 1.38.4.1 nathanw * sent to the adapter yet (this would corrupt accounting).
2284 1.38.4.1 nathanw */
2285 1.38.4.1 nathanw
2286 1.38.4.1 nathanw for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) {
2287 1.38.4.1 nathanw xs_next = TAILQ_NEXT(xs, channel_q);
2288 1.38.4.1 nathanw if (xs->xs_control & XS_CTL_REQSENSE) {
2289 1.38.4.1 nathanw TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
2290 1.38.4.1 nathanw xs->error = XS_RESET;
2291 1.38.4.1 nathanw if ((xs->xs_control & XS_CTL_ASYNC) != 0)
2292 1.38.4.1 nathanw TAILQ_INSERT_TAIL(&chan->chan_complete, xs,
2293 1.38.4.1 nathanw channel_q);
2294 1.38.4.1 nathanw }
2295 1.38.4.1 nathanw }
2296 1.38.4.1 nathanw wakeup(&chan->chan_complete);
2297 1.38.4.1 nathanw /* Catch xs with pending sense which may not have a REQSENSE xs yet */
2298 1.38.4.1 nathanw for (target = 0; target < chan->chan_ntargets; target++) {
2299 1.38.4.1 nathanw if (target == chan->chan_id)
2300 1.38.4.1 nathanw continue;
2301 1.38.4.1 nathanw for (lun = 0; lun < chan->chan_nluns; lun++) {
2302 1.38.4.1 nathanw periph = chan->chan_periphs[target][lun];
2303 1.38.4.1 nathanw if (periph) {
2304 1.38.4.1 nathanw xs = periph->periph_xscheck;
2305 1.38.4.1 nathanw if (xs)
2306 1.38.4.1 nathanw xs->error = XS_RESET;
2307 1.38.4.1 nathanw }
2308 1.38.4.1 nathanw }
2309 1.38.4.1 nathanw }
2310 1.38.4.1 nathanw }
2311 1.38.4.1 nathanw
2312 1.38.4.2 nathanw /*
2313 1.38.4.2 nathanw * scsipi_target_detach:
2314 1.38.4.2 nathanw *
2315 1.38.4.2 nathanw * detach all periph associated with a I_T
2316 1.38.4.2 nathanw * must be called from valid thread context
2317 1.38.4.2 nathanw */
2318 1.38.4.2 nathanw int
2319 1.38.4.2 nathanw scsipi_target_detach(chan, target, lun, flags)
2320 1.38.4.2 nathanw struct scsipi_channel *chan;
2321 1.38.4.2 nathanw int target, lun;
2322 1.38.4.2 nathanw int flags;
2323 1.38.4.2 nathanw {
2324 1.38.4.2 nathanw struct scsipi_periph *periph;
2325 1.38.4.2 nathanw int ctarget, mintarget, maxtarget;
2326 1.38.4.2 nathanw int clun, minlun, maxlun;
2327 1.38.4.2 nathanw int error;
2328 1.38.4.2 nathanw
2329 1.38.4.2 nathanw if (target == -1) {
2330 1.38.4.2 nathanw mintarget = 0;
2331 1.38.4.2 nathanw maxtarget = chan->chan_ntargets;
2332 1.38.4.2 nathanw } else {
2333 1.38.4.2 nathanw if (target == chan->chan_id)
2334 1.38.4.2 nathanw return EINVAL;
2335 1.38.4.2 nathanw if (target < 0 || target >= chan->chan_ntargets)
2336 1.38.4.2 nathanw return EINVAL;
2337 1.38.4.2 nathanw mintarget = target;
2338 1.38.4.2 nathanw maxtarget = target + 1;
2339 1.38.4.2 nathanw }
2340 1.38.4.2 nathanw
2341 1.38.4.2 nathanw if (lun == -1) {
2342 1.38.4.2 nathanw minlun = 0;
2343 1.38.4.2 nathanw maxlun = chan->chan_nluns;
2344 1.38.4.2 nathanw } else {
2345 1.38.4.2 nathanw if (lun < 0 || lun >= chan->chan_nluns)
2346 1.38.4.2 nathanw return EINVAL;
2347 1.38.4.2 nathanw minlun = lun;
2348 1.38.4.2 nathanw maxlun = lun + 1;
2349 1.38.4.2 nathanw }
2350 1.38.4.2 nathanw
2351 1.38.4.2 nathanw for (ctarget = mintarget; ctarget < maxtarget; ctarget++) {
2352 1.38.4.2 nathanw if (ctarget == chan->chan_id)
2353 1.38.4.2 nathanw continue;
2354 1.38.4.2 nathanw
2355 1.38.4.2 nathanw for (clun = minlun; clun < maxlun; clun++) {
2356 1.38.4.2 nathanw periph = scsipi_lookup_periph(chan, ctarget, clun);
2357 1.38.4.2 nathanw if (periph == NULL)
2358 1.38.4.2 nathanw continue;
2359 1.38.4.2 nathanw error = config_detach(periph->periph_dev, flags);
2360 1.38.4.2 nathanw if (error)
2361 1.38.4.2 nathanw return (error);
2362 1.38.4.2 nathanw scsipi_remove_periph(chan, periph);
2363 1.38.4.2 nathanw free(periph, M_DEVBUF);
2364 1.38.4.2 nathanw }
2365 1.38.4.2 nathanw }
2366 1.38.4.2 nathanw return(0);
2367 1.38.4.2 nathanw }
2368 1.38.4.1 nathanw
2369 1.38.4.1 nathanw /*
2370 1.38.4.1 nathanw * scsipi_adapter_addref:
2371 1.38.4.1 nathanw *
2372 1.38.4.1 nathanw * Add a reference to the adapter pointed to by the provided
2373 1.38.4.1 nathanw * link, enabling the adapter if necessary.
2374 1.38.4.1 nathanw */
2375 1.38.4.1 nathanw int
2376 1.38.4.1 nathanw scsipi_adapter_addref(adapt)
2377 1.38.4.1 nathanw struct scsipi_adapter *adapt;
2378 1.38.4.1 nathanw {
2379 1.38.4.1 nathanw int s, error = 0;
2380 1.38.4.1 nathanw
2381 1.38.4.1 nathanw s = splbio();
2382 1.38.4.1 nathanw if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) {
2383 1.38.4.1 nathanw error = (*adapt->adapt_enable)(adapt->adapt_dev, 1);
2384 1.38.4.1 nathanw if (error)
2385 1.38.4.1 nathanw adapt->adapt_refcnt--;
2386 1.38.4.1 nathanw }
2387 1.38.4.1 nathanw splx(s);
2388 1.38.4.1 nathanw return (error);
2389 1.38.4.1 nathanw }
2390 1.38.4.1 nathanw
2391 1.38.4.1 nathanw /*
2392 1.38.4.1 nathanw * scsipi_adapter_delref:
2393 1.38.4.1 nathanw *
2394 1.38.4.1 nathanw * Delete a reference to the adapter pointed to by the provided
2395 1.38.4.1 nathanw * link, disabling the adapter if possible.
2396 1.38.4.1 nathanw */
2397 1.38.4.1 nathanw void
2398 1.38.4.1 nathanw scsipi_adapter_delref(adapt)
2399 1.38.4.1 nathanw struct scsipi_adapter *adapt;
2400 1.38.4.1 nathanw {
2401 1.38.4.1 nathanw int s;
2402 1.38.4.1 nathanw
2403 1.38.4.1 nathanw s = splbio();
2404 1.38.4.1 nathanw if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL)
2405 1.38.4.1 nathanw (void) (*adapt->adapt_enable)(adapt->adapt_dev, 0);
2406 1.38.4.1 nathanw splx(s);
2407 1.38.4.1 nathanw }
2408 1.38.4.1 nathanw
2409 1.38.4.1 nathanw struct scsipi_syncparam {
2410 1.38.4.1 nathanw int ss_factor;
2411 1.38.4.1 nathanw int ss_period; /* ns * 10 */
2412 1.38.4.1 nathanw } scsipi_syncparams[] = {
2413 1.38.4.3 nathanw { 0x09, 125 },
2414 1.38.4.1 nathanw { 0x0a, 250 },
2415 1.38.4.1 nathanw { 0x0b, 303 },
2416 1.38.4.1 nathanw { 0x0c, 500 },
2417 1.38.4.1 nathanw };
2418 1.38.4.1 nathanw const int scsipi_nsyncparams =
2419 1.38.4.1 nathanw sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
2420 1.38.4.1 nathanw
2421 1.38.4.1 nathanw int
2422 1.38.4.1 nathanw scsipi_sync_period_to_factor(period)
2423 1.38.4.1 nathanw int period; /* ns * 10 */
2424 1.38.4.1 nathanw {
2425 1.38.4.1 nathanw int i;
2426 1.38.4.1 nathanw
2427 1.38.4.1 nathanw for (i = 0; i < scsipi_nsyncparams; i++) {
2428 1.38.4.1 nathanw if (period <= scsipi_syncparams[i].ss_period)
2429 1.38.4.1 nathanw return (scsipi_syncparams[i].ss_factor);
2430 1.38.4.1 nathanw }
2431 1.38.4.1 nathanw
2432 1.38.4.1 nathanw return ((period / 10) / 4);
2433 1.38.4.1 nathanw }
2434 1.38.4.1 nathanw
2435 1.38.4.1 nathanw int
2436 1.38.4.1 nathanw scsipi_sync_factor_to_period(factor)
2437 1.38.4.1 nathanw int factor;
2438 1.38.4.1 nathanw {
2439 1.38.4.1 nathanw int i;
2440 1.38.4.1 nathanw
2441 1.38.4.1 nathanw for (i = 0; i < scsipi_nsyncparams; i++) {
2442 1.38.4.1 nathanw if (factor == scsipi_syncparams[i].ss_factor)
2443 1.38.4.1 nathanw return (scsipi_syncparams[i].ss_period);
2444 1.38.4.1 nathanw }
2445 1.38.4.1 nathanw
2446 1.38.4.1 nathanw return ((factor * 4) * 10);
2447 1.38.4.1 nathanw }
2448 1.38.4.1 nathanw
2449 1.38.4.1 nathanw int
2450 1.38.4.1 nathanw scsipi_sync_factor_to_freq(factor)
2451 1.38.4.1 nathanw int factor;
2452 1.38.4.1 nathanw {
2453 1.38.4.1 nathanw int i;
2454 1.38.4.1 nathanw
2455 1.38.4.1 nathanw for (i = 0; i < scsipi_nsyncparams; i++) {
2456 1.38.4.1 nathanw if (factor == scsipi_syncparams[i].ss_factor)
2457 1.38.4.1 nathanw return (10000000 / scsipi_syncparams[i].ss_period);
2458 1.38.4.1 nathanw }
2459 1.38.4.1 nathanw
2460 1.38.4.1 nathanw return (10000000 / ((factor * 4) * 10));
2461 1.14 thorpej }
2462 1.14 thorpej
2463 1.38.4.1 nathanw #ifdef SCSIPI_DEBUG
2464 1.2 bouyer /*
2465 1.2 bouyer * Given a scsipi_xfer, dump the request, in all it's glory
2466 1.2 bouyer */
2467 1.2 bouyer void
2468 1.2 bouyer show_scsipi_xs(xs)
2469 1.2 bouyer struct scsipi_xfer *xs;
2470 1.2 bouyer {
2471 1.3 enami
2472 1.2 bouyer printf("xs(%p): ", xs);
2473 1.24 thorpej printf("xs_control(0x%08x)", xs->xs_control);
2474 1.24 thorpej printf("xs_status(0x%08x)", xs->xs_status);
2475 1.38.4.1 nathanw printf("periph(%p)", xs->xs_periph);
2476 1.38.4.1 nathanw printf("retr(0x%x)", xs->xs_retries);
2477 1.2 bouyer printf("timo(0x%x)", xs->timeout);
2478 1.2 bouyer printf("cmd(%p)", xs->cmd);
2479 1.2 bouyer printf("len(0x%x)", xs->cmdlen);
2480 1.2 bouyer printf("data(%p)", xs->data);
2481 1.2 bouyer printf("len(0x%x)", xs->datalen);
2482 1.2 bouyer printf("res(0x%x)", xs->resid);
2483 1.2 bouyer printf("err(0x%x)", xs->error);
2484 1.2 bouyer printf("bp(%p)", xs->bp);
2485 1.2 bouyer show_scsipi_cmd(xs);
2486 1.2 bouyer }
2487 1.2 bouyer
2488 1.2 bouyer void
2489 1.2 bouyer show_scsipi_cmd(xs)
2490 1.2 bouyer struct scsipi_xfer *xs;
2491 1.2 bouyer {
2492 1.2 bouyer u_char *b = (u_char *) xs->cmd;
2493 1.3 enami int i = 0;
2494 1.2 bouyer
2495 1.38.4.1 nathanw scsipi_printaddr(xs->xs_periph);
2496 1.38.4.1 nathanw printf(" command: ");
2497 1.2 bouyer
2498 1.24 thorpej if ((xs->xs_control & XS_CTL_RESET) == 0) {
2499 1.2 bouyer while (i < xs->cmdlen) {
2500 1.2 bouyer if (i)
2501 1.2 bouyer printf(",");
2502 1.2 bouyer printf("0x%x", b[i++]);
2503 1.2 bouyer }
2504 1.2 bouyer printf("-[%d bytes]\n", xs->datalen);
2505 1.2 bouyer if (xs->datalen)
2506 1.2 bouyer show_mem(xs->data, min(64, xs->datalen));
2507 1.2 bouyer } else
2508 1.2 bouyer printf("-RESET-\n");
2509 1.2 bouyer }
2510 1.2 bouyer
2511 1.2 bouyer void
2512 1.2 bouyer show_mem(address, num)
2513 1.2 bouyer u_char *address;
2514 1.2 bouyer int num;
2515 1.2 bouyer {
2516 1.2 bouyer int x;
2517 1.2 bouyer
2518 1.2 bouyer printf("------------------------------");
2519 1.2 bouyer for (x = 0; x < num; x++) {
2520 1.2 bouyer if ((x % 16) == 0)
2521 1.2 bouyer printf("\n%03d: ", x);
2522 1.2 bouyer printf("%02x ", *address++);
2523 1.2 bouyer }
2524 1.2 bouyer printf("\n------------------------------\n");
2525 1.2 bouyer }
2526 1.38.4.1 nathanw #endif /* SCSIPI_DEBUG */
2527