scsipi_base.c revision 1.38.4.6 1 1.38.4.6 nathanw /* $NetBSD: scsipi_base.c,v 1.38.4.6 2001/11/14 19:16:03 nathanw Exp $ */
2 1.2 bouyer
3 1.8 mycroft /*-
4 1.38.4.1 nathanw * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
5 1.8 mycroft * All rights reserved.
6 1.8 mycroft *
7 1.8 mycroft * This code is derived from software contributed to The NetBSD Foundation
8 1.38.4.1 nathanw * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
9 1.38.4.1 nathanw * Simulation Facility, NASA Ames Research Center.
10 1.2 bouyer *
11 1.2 bouyer * Redistribution and use in source and binary forms, with or without
12 1.2 bouyer * modification, are permitted provided that the following conditions
13 1.2 bouyer * are met:
14 1.2 bouyer * 1. Redistributions of source code must retain the above copyright
15 1.2 bouyer * notice, this list of conditions and the following disclaimer.
16 1.2 bouyer * 2. Redistributions in binary form must reproduce the above copyright
17 1.2 bouyer * notice, this list of conditions and the following disclaimer in the
18 1.2 bouyer * documentation and/or other materials provided with the distribution.
19 1.2 bouyer * 3. All advertising materials mentioning features or use of this software
20 1.2 bouyer * must display the following acknowledgement:
21 1.8 mycroft * This product includes software developed by the NetBSD
22 1.8 mycroft * Foundation, Inc. and its contributors.
23 1.8 mycroft * 4. Neither the name of The NetBSD Foundation nor the names of its
24 1.8 mycroft * contributors may be used to endorse or promote products derived
25 1.8 mycroft * from this software without specific prior written permission.
26 1.2 bouyer *
27 1.8 mycroft * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 1.8 mycroft * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 1.8 mycroft * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 1.8 mycroft * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 1.8 mycroft * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 1.8 mycroft * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 1.8 mycroft * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 1.8 mycroft * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 1.8 mycroft * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 1.8 mycroft * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 1.8 mycroft * POSSIBILITY OF SUCH DAMAGE.
38 1.2 bouyer */
39 1.38.4.6 nathanw
40 1.38.4.6 nathanw #include <sys/cdefs.h>
41 1.38.4.6 nathanw __KERNEL_RCSID(0, "$NetBSD: scsipi_base.c,v 1.38.4.6 2001/11/14 19:16:03 nathanw Exp $");
42 1.2 bouyer
43 1.13 bouyer #include "opt_scsi.h"
44 1.13 bouyer
45 1.2 bouyer #include <sys/types.h>
46 1.2 bouyer #include <sys/param.h>
47 1.2 bouyer #include <sys/systm.h>
48 1.2 bouyer #include <sys/kernel.h>
49 1.2 bouyer #include <sys/buf.h>
50 1.2 bouyer #include <sys/uio.h>
51 1.2 bouyer #include <sys/malloc.h>
52 1.6 thorpej #include <sys/pool.h>
53 1.2 bouyer #include <sys/errno.h>
54 1.2 bouyer #include <sys/device.h>
55 1.2 bouyer #include <sys/proc.h>
56 1.38.4.1 nathanw #include <sys/kthread.h>
57 1.2 bouyer
58 1.2 bouyer #include <dev/scsipi/scsipi_all.h>
59 1.2 bouyer #include <dev/scsipi/scsipi_disk.h>
60 1.2 bouyer #include <dev/scsipi/scsipiconf.h>
61 1.2 bouyer #include <dev/scsipi/scsipi_base.h>
62 1.2 bouyer
63 1.38.4.1 nathanw #include <dev/scsipi/scsi_all.h>
64 1.38.4.1 nathanw #include <dev/scsipi/scsi_message.h>
65 1.38.4.1 nathanw
66 1.38.4.1 nathanw int scsipi_complete __P((struct scsipi_xfer *));
67 1.38.4.1 nathanw void scsipi_request_sense __P((struct scsipi_xfer *));
68 1.38.4.1 nathanw int scsipi_enqueue __P((struct scsipi_xfer *));
69 1.38.4.1 nathanw void scsipi_run_queue __P((struct scsipi_channel *chan));
70 1.38.4.1 nathanw
71 1.38.4.1 nathanw void scsipi_completion_thread __P((void *));
72 1.38.4.1 nathanw
73 1.38.4.1 nathanw void scsipi_get_tag __P((struct scsipi_xfer *));
74 1.38.4.1 nathanw void scsipi_put_tag __P((struct scsipi_xfer *));
75 1.38.4.1 nathanw
76 1.38.4.1 nathanw int scsipi_get_resource __P((struct scsipi_channel *));
77 1.38.4.1 nathanw void scsipi_put_resource __P((struct scsipi_channel *));
78 1.38.4.1 nathanw __inline int scsipi_grow_resources __P((struct scsipi_channel *));
79 1.38.4.1 nathanw
80 1.38.4.1 nathanw void scsipi_async_event_max_openings __P((struct scsipi_channel *,
81 1.38.4.1 nathanw struct scsipi_max_openings *));
82 1.38.4.1 nathanw void scsipi_async_event_xfer_mode __P((struct scsipi_channel *,
83 1.38.4.1 nathanw struct scsipi_xfer_mode *));
84 1.38.4.1 nathanw void scsipi_async_event_channel_reset __P((struct scsipi_channel *));
85 1.6 thorpej
86 1.38.4.1 nathanw struct pool scsipi_xfer_pool;
87 1.2 bouyer
88 1.2 bouyer /*
89 1.38.4.1 nathanw * scsipi_init:
90 1.38.4.1 nathanw *
91 1.38.4.1 nathanw * Called when a scsibus or atapibus is attached to the system
92 1.38.4.1 nathanw * to initialize shared data structures.
93 1.6 thorpej */
94 1.6 thorpej void
95 1.6 thorpej scsipi_init()
96 1.6 thorpej {
97 1.6 thorpej static int scsipi_init_done;
98 1.6 thorpej
99 1.6 thorpej if (scsipi_init_done)
100 1.6 thorpej return;
101 1.6 thorpej scsipi_init_done = 1;
102 1.6 thorpej
103 1.6 thorpej /* Initialize the scsipi_xfer pool. */
104 1.6 thorpej pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
105 1.6 thorpej 0, 0, "scxspl", 0, NULL, NULL, M_DEVBUF);
106 1.6 thorpej }
107 1.6 thorpej
108 1.6 thorpej /*
109 1.38.4.1 nathanw * scsipi_channel_init:
110 1.38.4.1 nathanw *
111 1.38.4.1 nathanw * Initialize a scsipi_channel when it is attached.
112 1.38.4.1 nathanw */
113 1.38.4.1 nathanw int
114 1.38.4.1 nathanw scsipi_channel_init(chan)
115 1.38.4.1 nathanw struct scsipi_channel *chan;
116 1.38.4.1 nathanw {
117 1.38.4.1 nathanw size_t nbytes;
118 1.38.4.1 nathanw int i;
119 1.38.4.1 nathanw
120 1.38.4.1 nathanw /* Initialize shared data. */
121 1.38.4.1 nathanw scsipi_init();
122 1.38.4.1 nathanw
123 1.38.4.1 nathanw /* Initialize the queues. */
124 1.38.4.1 nathanw TAILQ_INIT(&chan->chan_queue);
125 1.38.4.1 nathanw TAILQ_INIT(&chan->chan_complete);
126 1.38.4.1 nathanw
127 1.38.4.1 nathanw nbytes = chan->chan_ntargets * sizeof(struct scsipi_periph **);
128 1.38.4.1 nathanw chan->chan_periphs = malloc(nbytes, M_DEVBUF, M_NOWAIT);
129 1.38.4.1 nathanw if (chan->chan_periphs == NULL)
130 1.38.4.1 nathanw return (ENOMEM);
131 1.38.4.1 nathanw
132 1.38.4.1 nathanw
133 1.38.4.1 nathanw nbytes = chan->chan_nluns * sizeof(struct scsipi_periph *);
134 1.38.4.1 nathanw for (i = 0; i < chan->chan_ntargets; i++) {
135 1.38.4.1 nathanw chan->chan_periphs[i] = malloc(nbytes, M_DEVBUF, M_NOWAIT);
136 1.38.4.1 nathanw if (chan->chan_periphs[i] == NULL) {
137 1.38.4.1 nathanw while (--i >= 0) {
138 1.38.4.1 nathanw free(chan->chan_periphs[i], M_DEVBUF);
139 1.38.4.1 nathanw }
140 1.38.4.1 nathanw return (ENOMEM);
141 1.38.4.1 nathanw }
142 1.38.4.1 nathanw memset(chan->chan_periphs[i], 0, nbytes);
143 1.38.4.1 nathanw }
144 1.38.4.1 nathanw
145 1.38.4.1 nathanw /*
146 1.38.4.1 nathanw * Create the asynchronous completion thread.
147 1.38.4.1 nathanw */
148 1.38.4.1 nathanw kthread_create(scsipi_create_completion_thread, chan);
149 1.38.4.1 nathanw return (0);
150 1.38.4.1 nathanw }
151 1.38.4.1 nathanw
152 1.38.4.1 nathanw /*
153 1.38.4.1 nathanw * scsipi_channel_shutdown:
154 1.38.4.1 nathanw *
155 1.38.4.1 nathanw * Shutdown a scsipi_channel.
156 1.38.4.1 nathanw */
157 1.38.4.1 nathanw void
158 1.38.4.1 nathanw scsipi_channel_shutdown(chan)
159 1.38.4.1 nathanw struct scsipi_channel *chan;
160 1.38.4.1 nathanw {
161 1.38.4.1 nathanw
162 1.38.4.1 nathanw /*
163 1.38.4.1 nathanw * Shut down the completion thread.
164 1.38.4.1 nathanw */
165 1.38.4.5 nathanw chan->chan_tflags |= SCSIPI_CHANT_SHUTDOWN;
166 1.38.4.1 nathanw wakeup(&chan->chan_complete);
167 1.38.4.1 nathanw
168 1.38.4.1 nathanw /*
169 1.38.4.1 nathanw * Now wait for the thread to exit.
170 1.38.4.1 nathanw */
171 1.38.4.1 nathanw while (chan->chan_thread != NULL)
172 1.38.4.1 nathanw (void) tsleep(&chan->chan_thread, PRIBIO, "scshut", 0);
173 1.38.4.1 nathanw }
174 1.38.4.1 nathanw
175 1.38.4.1 nathanw /*
176 1.38.4.1 nathanw * scsipi_insert_periph:
177 1.38.4.1 nathanw *
178 1.38.4.1 nathanw * Insert a periph into the channel.
179 1.38.4.1 nathanw */
180 1.38.4.1 nathanw void
181 1.38.4.1 nathanw scsipi_insert_periph(chan, periph)
182 1.38.4.1 nathanw struct scsipi_channel *chan;
183 1.38.4.1 nathanw struct scsipi_periph *periph;
184 1.38.4.1 nathanw {
185 1.38.4.1 nathanw int s;
186 1.38.4.1 nathanw
187 1.38.4.1 nathanw s = splbio();
188 1.38.4.1 nathanw chan->chan_periphs[periph->periph_target][periph->periph_lun] = periph;
189 1.38.4.1 nathanw splx(s);
190 1.38.4.1 nathanw }
191 1.38.4.1 nathanw
192 1.38.4.1 nathanw /*
193 1.38.4.1 nathanw * scsipi_remove_periph:
194 1.38.4.1 nathanw *
195 1.38.4.1 nathanw * Remove a periph from the channel.
196 1.38.4.1 nathanw */
197 1.38.4.1 nathanw void
198 1.38.4.1 nathanw scsipi_remove_periph(chan, periph)
199 1.38.4.1 nathanw struct scsipi_channel *chan;
200 1.38.4.1 nathanw struct scsipi_periph *periph;
201 1.38.4.1 nathanw {
202 1.38.4.1 nathanw int s;
203 1.38.4.1 nathanw
204 1.38.4.1 nathanw s = splbio();
205 1.38.4.1 nathanw chan->chan_periphs[periph->periph_target][periph->periph_lun] = NULL;
206 1.38.4.1 nathanw splx(s);
207 1.38.4.1 nathanw }
208 1.38.4.1 nathanw
209 1.38.4.1 nathanw /*
210 1.38.4.1 nathanw * scsipi_lookup_periph:
211 1.38.4.1 nathanw *
212 1.38.4.1 nathanw * Lookup a periph on the specified channel.
213 1.38.4.1 nathanw */
214 1.38.4.1 nathanw struct scsipi_periph *
215 1.38.4.1 nathanw scsipi_lookup_periph(chan, target, lun)
216 1.38.4.1 nathanw struct scsipi_channel *chan;
217 1.38.4.1 nathanw int target, lun;
218 1.38.4.1 nathanw {
219 1.38.4.1 nathanw struct scsipi_periph *periph;
220 1.38.4.1 nathanw int s;
221 1.38.4.1 nathanw
222 1.38.4.1 nathanw if (target >= chan->chan_ntargets ||
223 1.38.4.1 nathanw lun >= chan->chan_nluns)
224 1.38.4.1 nathanw return (NULL);
225 1.38.4.1 nathanw
226 1.38.4.1 nathanw s = splbio();
227 1.38.4.1 nathanw periph = chan->chan_periphs[target][lun];
228 1.38.4.1 nathanw splx(s);
229 1.38.4.1 nathanw
230 1.38.4.1 nathanw return (periph);
231 1.38.4.1 nathanw }
232 1.38.4.1 nathanw
233 1.38.4.1 nathanw /*
234 1.38.4.1 nathanw * scsipi_get_resource:
235 1.38.4.1 nathanw *
236 1.38.4.1 nathanw * Allocate a single xfer `resource' from the channel.
237 1.38.4.1 nathanw *
238 1.38.4.1 nathanw * NOTE: Must be called at splbio().
239 1.38.4.1 nathanw */
240 1.38.4.1 nathanw int
241 1.38.4.1 nathanw scsipi_get_resource(chan)
242 1.38.4.1 nathanw struct scsipi_channel *chan;
243 1.38.4.1 nathanw {
244 1.38.4.1 nathanw struct scsipi_adapter *adapt = chan->chan_adapter;
245 1.38.4.1 nathanw
246 1.38.4.1 nathanw if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
247 1.38.4.1 nathanw if (chan->chan_openings > 0) {
248 1.38.4.1 nathanw chan->chan_openings--;
249 1.38.4.1 nathanw return (1);
250 1.38.4.1 nathanw }
251 1.38.4.1 nathanw return (0);
252 1.38.4.1 nathanw }
253 1.38.4.1 nathanw
254 1.38.4.1 nathanw if (adapt->adapt_openings > 0) {
255 1.38.4.1 nathanw adapt->adapt_openings--;
256 1.38.4.1 nathanw return (1);
257 1.38.4.1 nathanw }
258 1.38.4.1 nathanw return (0);
259 1.38.4.1 nathanw }
260 1.38.4.1 nathanw
261 1.38.4.1 nathanw /*
262 1.38.4.1 nathanw * scsipi_grow_resources:
263 1.38.4.1 nathanw *
264 1.38.4.1 nathanw * Attempt to grow resources for a channel. If this succeeds,
265 1.38.4.1 nathanw * we allocate one for our caller.
266 1.38.4.1 nathanw *
267 1.38.4.1 nathanw * NOTE: Must be called at splbio().
268 1.38.4.1 nathanw */
269 1.38.4.1 nathanw __inline int
270 1.38.4.1 nathanw scsipi_grow_resources(chan)
271 1.38.4.1 nathanw struct scsipi_channel *chan;
272 1.38.4.1 nathanw {
273 1.38.4.1 nathanw
274 1.38.4.1 nathanw if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
275 1.38.4.5 nathanw if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
276 1.38.4.5 nathanw scsipi_adapter_request(chan,
277 1.38.4.5 nathanw ADAPTER_REQ_GROW_RESOURCES, NULL);
278 1.38.4.5 nathanw return (scsipi_get_resource(chan));
279 1.38.4.5 nathanw }
280 1.38.4.5 nathanw /*
281 1.38.4.5 nathanw * ask the channel thread to do it. It'll have to thaw the
282 1.38.4.5 nathanw * queue
283 1.38.4.5 nathanw */
284 1.38.4.5 nathanw scsipi_channel_freeze(chan, 1);
285 1.38.4.5 nathanw chan->chan_tflags |= SCSIPI_CHANT_GROWRES;
286 1.38.4.5 nathanw wakeup(&chan->chan_complete);
287 1.38.4.5 nathanw return (0);
288 1.38.4.1 nathanw }
289 1.38.4.1 nathanw
290 1.38.4.1 nathanw return (0);
291 1.38.4.1 nathanw }
292 1.38.4.1 nathanw
293 1.38.4.1 nathanw /*
294 1.38.4.1 nathanw * scsipi_put_resource:
295 1.38.4.1 nathanw *
296 1.38.4.1 nathanw * Free a single xfer `resource' to the channel.
297 1.38.4.1 nathanw *
298 1.38.4.1 nathanw * NOTE: Must be called at splbio().
299 1.38.4.1 nathanw */
300 1.38.4.1 nathanw void
301 1.38.4.1 nathanw scsipi_put_resource(chan)
302 1.38.4.1 nathanw struct scsipi_channel *chan;
303 1.38.4.1 nathanw {
304 1.38.4.1 nathanw struct scsipi_adapter *adapt = chan->chan_adapter;
305 1.38.4.1 nathanw
306 1.38.4.1 nathanw if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
307 1.38.4.1 nathanw chan->chan_openings++;
308 1.38.4.1 nathanw else
309 1.38.4.1 nathanw adapt->adapt_openings++;
310 1.38.4.1 nathanw }
311 1.38.4.1 nathanw
312 1.38.4.1 nathanw /*
313 1.38.4.1 nathanw * scsipi_get_tag:
314 1.38.4.1 nathanw *
315 1.38.4.1 nathanw * Get a tag ID for the specified xfer.
316 1.38.4.1 nathanw *
317 1.38.4.1 nathanw * NOTE: Must be called at splbio().
318 1.38.4.1 nathanw */
319 1.38.4.1 nathanw void
320 1.38.4.1 nathanw scsipi_get_tag(xs)
321 1.38.4.1 nathanw struct scsipi_xfer *xs;
322 1.38.4.1 nathanw {
323 1.38.4.1 nathanw struct scsipi_periph *periph = xs->xs_periph;
324 1.38.4.1 nathanw int word, bit, tag;
325 1.38.4.1 nathanw
326 1.38.4.1 nathanw for (word = 0; word < PERIPH_NTAGWORDS; word++) {
327 1.38.4.1 nathanw bit = ffs(periph->periph_freetags[word]);
328 1.38.4.1 nathanw if (bit != 0)
329 1.38.4.1 nathanw break;
330 1.38.4.1 nathanw }
331 1.38.4.1 nathanw #ifdef DIAGNOSTIC
332 1.38.4.1 nathanw if (word == PERIPH_NTAGWORDS) {
333 1.38.4.1 nathanw scsipi_printaddr(periph);
334 1.38.4.1 nathanw printf("no free tags\n");
335 1.38.4.1 nathanw panic("scsipi_get_tag");
336 1.38.4.1 nathanw }
337 1.38.4.1 nathanw #endif
338 1.38.4.1 nathanw
339 1.38.4.1 nathanw bit -= 1;
340 1.38.4.1 nathanw periph->periph_freetags[word] &= ~(1 << bit);
341 1.38.4.1 nathanw tag = (word << 5) | bit;
342 1.38.4.1 nathanw
343 1.38.4.1 nathanw /* XXX Should eventually disallow this completely. */
344 1.38.4.1 nathanw if (tag >= periph->periph_openings) {
345 1.38.4.1 nathanw scsipi_printaddr(periph);
346 1.38.4.1 nathanw printf("WARNING: tag %d greater than available openings %d\n",
347 1.38.4.1 nathanw tag, periph->periph_openings);
348 1.38.4.1 nathanw }
349 1.38.4.1 nathanw
350 1.38.4.1 nathanw xs->xs_tag_id = tag;
351 1.38.4.1 nathanw }
352 1.38.4.1 nathanw
353 1.38.4.1 nathanw /*
354 1.38.4.1 nathanw * scsipi_put_tag:
355 1.38.4.1 nathanw *
356 1.38.4.1 nathanw * Put the tag ID for the specified xfer back into the pool.
357 1.38.4.1 nathanw *
358 1.38.4.1 nathanw * NOTE: Must be called at splbio().
359 1.2 bouyer */
360 1.38.4.1 nathanw void
361 1.38.4.1 nathanw scsipi_put_tag(xs)
362 1.38.4.1 nathanw struct scsipi_xfer *xs;
363 1.38.4.1 nathanw {
364 1.38.4.1 nathanw struct scsipi_periph *periph = xs->xs_periph;
365 1.38.4.1 nathanw int word, bit;
366 1.38.4.1 nathanw
367 1.38.4.1 nathanw word = xs->xs_tag_id >> 5;
368 1.38.4.1 nathanw bit = xs->xs_tag_id & 0x1f;
369 1.38.4.1 nathanw
370 1.38.4.1 nathanw periph->periph_freetags[word] |= (1 << bit);
371 1.38.4.1 nathanw }
372 1.2 bouyer
373 1.38.4.1 nathanw /*
374 1.38.4.1 nathanw * scsipi_get_xs:
375 1.38.4.1 nathanw *
376 1.38.4.1 nathanw * Allocate an xfer descriptor and associate it with the
377 1.38.4.1 nathanw * specified peripherial. If the peripherial has no more
378 1.38.4.1 nathanw * available command openings, we either block waiting for
379 1.38.4.1 nathanw * one to become available, or fail.
380 1.38.4.1 nathanw */
381 1.2 bouyer struct scsipi_xfer *
382 1.38.4.1 nathanw scsipi_get_xs(periph, flags)
383 1.38.4.1 nathanw struct scsipi_periph *periph;
384 1.38.4.1 nathanw int flags;
385 1.2 bouyer {
386 1.2 bouyer struct scsipi_xfer *xs;
387 1.2 bouyer int s;
388 1.2 bouyer
389 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n"));
390 1.6 thorpej
391 1.24 thorpej /*
392 1.24 thorpej * If we're cold, make sure we poll.
393 1.24 thorpej */
394 1.24 thorpej if (cold)
395 1.24 thorpej flags |= XS_CTL_NOSLEEP | XS_CTL_POLL;
396 1.24 thorpej
397 1.38.4.1 nathanw #ifdef DIAGNOSTIC
398 1.38.4.1 nathanw /*
399 1.38.4.1 nathanw * URGENT commands can never be ASYNC.
400 1.38.4.1 nathanw */
401 1.38.4.1 nathanw if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
402 1.38.4.1 nathanw (XS_CTL_URGENT|XS_CTL_ASYNC)) {
403 1.38.4.1 nathanw scsipi_printaddr(periph);
404 1.38.4.1 nathanw printf("URGENT and ASYNC\n");
405 1.38.4.1 nathanw panic("scsipi_get_xs");
406 1.38.4.1 nathanw }
407 1.38.4.1 nathanw #endif
408 1.38.4.1 nathanw
409 1.2 bouyer s = splbio();
410 1.38.4.1 nathanw /*
411 1.38.4.1 nathanw * Wait for a command opening to become available. Rules:
412 1.38.4.1 nathanw *
413 1.38.4.1 nathanw * - All xfers must wait for an available opening.
414 1.38.4.1 nathanw * Exception: URGENT xfers can proceed when
415 1.38.4.1 nathanw * active == openings, because we use the opening
416 1.38.4.1 nathanw * of the command we're recovering for.
417 1.38.4.1 nathanw * - if the periph has sense pending, only URGENT & REQSENSE
418 1.38.4.1 nathanw * xfers may proceed.
419 1.38.4.1 nathanw *
420 1.38.4.1 nathanw * - If the periph is recovering, only URGENT xfers may
421 1.38.4.1 nathanw * proceed.
422 1.38.4.1 nathanw *
423 1.38.4.1 nathanw * - If the periph is currently executing a recovery
424 1.38.4.1 nathanw * command, URGENT commands must block, because only
425 1.38.4.1 nathanw * one recovery command can execute at a time.
426 1.38.4.1 nathanw */
427 1.38.4.1 nathanw for (;;) {
428 1.38.4.1 nathanw if (flags & XS_CTL_URGENT) {
429 1.38.4.1 nathanw if (periph->periph_active > periph->periph_openings)
430 1.38.4.1 nathanw goto wait_for_opening;
431 1.38.4.1 nathanw if (periph->periph_flags & PERIPH_SENSE) {
432 1.38.4.1 nathanw if ((flags & XS_CTL_REQSENSE) == 0)
433 1.38.4.1 nathanw goto wait_for_opening;
434 1.38.4.1 nathanw } else {
435 1.38.4.1 nathanw if ((periph->periph_flags &
436 1.38.4.1 nathanw PERIPH_RECOVERY_ACTIVE) != 0)
437 1.38.4.1 nathanw goto wait_for_opening;
438 1.38.4.1 nathanw periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
439 1.38.4.1 nathanw }
440 1.38.4.1 nathanw break;
441 1.38.4.1 nathanw }
442 1.38.4.1 nathanw if (periph->periph_active >= periph->periph_openings ||
443 1.38.4.1 nathanw (periph->periph_flags & PERIPH_RECOVERING) != 0)
444 1.38.4.1 nathanw goto wait_for_opening;
445 1.38.4.1 nathanw periph->periph_active++;
446 1.38.4.1 nathanw break;
447 1.38.4.1 nathanw
448 1.38.4.1 nathanw wait_for_opening:
449 1.38.4.1 nathanw if (flags & XS_CTL_NOSLEEP) {
450 1.2 bouyer splx(s);
451 1.38.4.1 nathanw return (NULL);
452 1.2 bouyer }
453 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n"));
454 1.38.4.1 nathanw periph->periph_flags |= PERIPH_WAITING;
455 1.38.4.1 nathanw (void) tsleep(periph, PRIBIO, "getxs", 0);
456 1.2 bouyer }
457 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n"));
458 1.6 thorpej xs = pool_get(&scsipi_xfer_pool,
459 1.24 thorpej ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
460 1.38.4.1 nathanw if (xs == NULL) {
461 1.38.4.1 nathanw if (flags & XS_CTL_URGENT) {
462 1.38.4.1 nathanw if ((flags & XS_CTL_REQSENSE) == 0)
463 1.38.4.1 nathanw periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
464 1.38.4.1 nathanw } else
465 1.38.4.1 nathanw periph->periph_active--;
466 1.38.4.1 nathanw scsipi_printaddr(periph);
467 1.38.4.1 nathanw printf("unable to allocate %sscsipi_xfer\n",
468 1.38.4.1 nathanw (flags & XS_CTL_URGENT) ? "URGENT " : "");
469 1.2 bouyer }
470 1.6 thorpej splx(s);
471 1.2 bouyer
472 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB3, ("returning\n"));
473 1.6 thorpej
474 1.7 scottr if (xs != NULL) {
475 1.30 thorpej callout_init(&xs->xs_callout);
476 1.38.4.1 nathanw memset(xs, 0, sizeof(*xs));
477 1.38.4.1 nathanw xs->xs_periph = periph;
478 1.24 thorpej xs->xs_control = flags;
479 1.37 fvdl xs->xs_status = 0;
480 1.38.4.1 nathanw s = splbio();
481 1.38.4.1 nathanw TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
482 1.38.4.1 nathanw splx(s);
483 1.7 scottr }
484 1.3 enami return (xs);
485 1.2 bouyer }
486 1.2 bouyer
487 1.2 bouyer /*
488 1.38.4.1 nathanw * scsipi_put_xs:
489 1.38.4.1 nathanw *
490 1.38.4.1 nathanw * Release an xfer descriptor, decreasing the outstanding command
491 1.38.4.1 nathanw * count for the peripherial. If there is a thread waiting for
492 1.38.4.1 nathanw * an opening, wake it up. If not, kick any queued I/O the
493 1.38.4.1 nathanw * peripherial may have.
494 1.6 thorpej *
495 1.38.4.1 nathanw * NOTE: Must be called at splbio().
496 1.2 bouyer */
497 1.3 enami void
498 1.38.4.1 nathanw scsipi_put_xs(xs)
499 1.2 bouyer struct scsipi_xfer *xs;
500 1.2 bouyer {
501 1.38.4.1 nathanw struct scsipi_periph *periph = xs->xs_periph;
502 1.38.4.1 nathanw int flags = xs->xs_control;
503 1.2 bouyer
504 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n"));
505 1.38.4.1 nathanw
506 1.38.4.1 nathanw TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
507 1.6 thorpej pool_put(&scsipi_xfer_pool, xs);
508 1.2 bouyer
509 1.38.4.1 nathanw #ifdef DIAGNOSTIC
510 1.38.4.1 nathanw if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
511 1.38.4.1 nathanw periph->periph_active == 0) {
512 1.38.4.1 nathanw scsipi_printaddr(periph);
513 1.38.4.1 nathanw printf("recovery without a command to recovery for\n");
514 1.38.4.1 nathanw panic("scsipi_put_xs");
515 1.38.4.1 nathanw }
516 1.38.4.1 nathanw #endif
517 1.38.4.1 nathanw
518 1.38.4.1 nathanw if (flags & XS_CTL_URGENT) {
519 1.38.4.1 nathanw if ((flags & XS_CTL_REQSENSE) == 0)
520 1.38.4.1 nathanw periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
521 1.38.4.1 nathanw } else
522 1.38.4.1 nathanw periph->periph_active--;
523 1.38.4.1 nathanw if (periph->periph_active == 0 &&
524 1.38.4.1 nathanw (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
525 1.38.4.1 nathanw periph->periph_flags &= ~PERIPH_WAITDRAIN;
526 1.38.4.1 nathanw wakeup(&periph->periph_active);
527 1.38.4.1 nathanw }
528 1.38.4.1 nathanw
529 1.38.4.1 nathanw if (periph->periph_flags & PERIPH_WAITING) {
530 1.38.4.1 nathanw periph->periph_flags &= ~PERIPH_WAITING;
531 1.38.4.1 nathanw wakeup(periph);
532 1.2 bouyer } else {
533 1.38.4.1 nathanw if (periph->periph_switch->psw_start != NULL) {
534 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB2,
535 1.3 enami ("calling private start()\n"));
536 1.38.4.1 nathanw (*periph->periph_switch->psw_start)(periph);
537 1.2 bouyer }
538 1.2 bouyer }
539 1.15 thorpej }
540 1.15 thorpej
541 1.15 thorpej /*
542 1.38.4.1 nathanw * scsipi_channel_freeze:
543 1.38.4.1 nathanw *
544 1.38.4.1 nathanw * Freeze a channel's xfer queue.
545 1.38.4.1 nathanw */
546 1.38.4.1 nathanw void
547 1.38.4.1 nathanw scsipi_channel_freeze(chan, count)
548 1.38.4.1 nathanw struct scsipi_channel *chan;
549 1.38.4.1 nathanw int count;
550 1.38.4.1 nathanw {
551 1.38.4.1 nathanw int s;
552 1.38.4.1 nathanw
553 1.38.4.1 nathanw s = splbio();
554 1.38.4.1 nathanw chan->chan_qfreeze += count;
555 1.38.4.1 nathanw splx(s);
556 1.38.4.1 nathanw }
557 1.38.4.1 nathanw
558 1.38.4.1 nathanw /*
559 1.38.4.1 nathanw * scsipi_channel_thaw:
560 1.38.4.1 nathanw *
561 1.38.4.1 nathanw * Thaw a channel's xfer queue.
562 1.38.4.1 nathanw */
563 1.38.4.1 nathanw void
564 1.38.4.1 nathanw scsipi_channel_thaw(chan, count)
565 1.38.4.1 nathanw struct scsipi_channel *chan;
566 1.38.4.1 nathanw int count;
567 1.38.4.1 nathanw {
568 1.38.4.1 nathanw int s;
569 1.38.4.1 nathanw
570 1.38.4.1 nathanw s = splbio();
571 1.38.4.1 nathanw chan->chan_qfreeze -= count;
572 1.38.4.1 nathanw /*
573 1.38.4.1 nathanw * Don't let the freeze count go negative.
574 1.38.4.1 nathanw *
575 1.38.4.1 nathanw * Presumably the adapter driver could keep track of this,
576 1.38.4.1 nathanw * but it might just be easier to do this here so as to allow
577 1.38.4.1 nathanw * multiple callers, including those outside the adapter driver.
578 1.38.4.1 nathanw */
579 1.38.4.1 nathanw if (chan->chan_qfreeze < 0) {
580 1.38.4.1 nathanw chan->chan_qfreeze = 0;
581 1.38.4.1 nathanw }
582 1.38.4.1 nathanw splx(s);
583 1.38.4.1 nathanw /*
584 1.38.4.1 nathanw * Kick the channel's queue here. Note, we may be running in
585 1.38.4.1 nathanw * interrupt context (softclock or HBA's interrupt), so the adapter
586 1.38.4.1 nathanw * driver had better not sleep.
587 1.38.4.1 nathanw */
588 1.38.4.1 nathanw if (chan->chan_qfreeze == 0)
589 1.38.4.1 nathanw scsipi_run_queue(chan);
590 1.38.4.1 nathanw }
591 1.38.4.1 nathanw
592 1.38.4.1 nathanw /*
593 1.38.4.1 nathanw * scsipi_channel_timed_thaw:
594 1.38.4.1 nathanw *
595 1.38.4.1 nathanw * Thaw a channel after some time has expired. This will also
596 1.38.4.1 nathanw * run the channel's queue if the freeze count has reached 0.
597 1.38.4.1 nathanw */
598 1.38.4.1 nathanw void
599 1.38.4.1 nathanw scsipi_channel_timed_thaw(arg)
600 1.38.4.1 nathanw void *arg;
601 1.38.4.1 nathanw {
602 1.38.4.1 nathanw struct scsipi_channel *chan = arg;
603 1.38.4.1 nathanw
604 1.38.4.1 nathanw scsipi_channel_thaw(chan, 1);
605 1.38.4.1 nathanw }
606 1.38.4.1 nathanw
607 1.38.4.1 nathanw /*
608 1.38.4.1 nathanw * scsipi_periph_freeze:
609 1.38.4.1 nathanw *
610 1.38.4.1 nathanw * Freeze a device's xfer queue.
611 1.38.4.1 nathanw */
612 1.38.4.1 nathanw void
613 1.38.4.1 nathanw scsipi_periph_freeze(periph, count)
614 1.38.4.1 nathanw struct scsipi_periph *periph;
615 1.38.4.1 nathanw int count;
616 1.38.4.1 nathanw {
617 1.38.4.1 nathanw int s;
618 1.38.4.1 nathanw
619 1.38.4.1 nathanw s = splbio();
620 1.38.4.1 nathanw periph->periph_qfreeze += count;
621 1.38.4.1 nathanw splx(s);
622 1.38.4.1 nathanw }
623 1.38.4.1 nathanw
624 1.38.4.1 nathanw /*
625 1.38.4.1 nathanw * scsipi_periph_thaw:
626 1.38.4.1 nathanw *
627 1.38.4.1 nathanw * Thaw a device's xfer queue.
628 1.38.4.1 nathanw */
629 1.38.4.1 nathanw void
630 1.38.4.1 nathanw scsipi_periph_thaw(periph, count)
631 1.38.4.1 nathanw struct scsipi_periph *periph;
632 1.38.4.1 nathanw int count;
633 1.38.4.1 nathanw {
634 1.38.4.1 nathanw int s;
635 1.38.4.1 nathanw
636 1.38.4.1 nathanw s = splbio();
637 1.38.4.1 nathanw periph->periph_qfreeze -= count;
638 1.38.4.4 nathanw #ifdef DIAGNOSTIC
639 1.38.4.4 nathanw if (periph->periph_qfreeze < 0) {
640 1.38.4.4 nathanw static const char pc[] = "periph freeze count < 0";
641 1.38.4.4 nathanw scsipi_printaddr(periph);
642 1.38.4.4 nathanw printf("%s\n", pc);
643 1.38.4.4 nathanw panic(pc);
644 1.38.4.4 nathanw }
645 1.38.4.4 nathanw #endif
646 1.38.4.1 nathanw if (periph->periph_qfreeze == 0 &&
647 1.38.4.1 nathanw (periph->periph_flags & PERIPH_WAITING) != 0)
648 1.38.4.1 nathanw wakeup(periph);
649 1.38.4.1 nathanw splx(s);
650 1.38.4.1 nathanw }
651 1.38.4.1 nathanw
652 1.38.4.1 nathanw /*
653 1.38.4.1 nathanw * scsipi_periph_timed_thaw:
654 1.38.4.1 nathanw *
655 1.38.4.1 nathanw * Thaw a device after some time has expired.
656 1.38.4.1 nathanw */
657 1.38.4.1 nathanw void
658 1.38.4.1 nathanw scsipi_periph_timed_thaw(arg)
659 1.38.4.1 nathanw void *arg;
660 1.38.4.1 nathanw {
661 1.38.4.4 nathanw int s;
662 1.38.4.1 nathanw struct scsipi_periph *periph = arg;
663 1.38.4.1 nathanw
664 1.38.4.1 nathanw callout_stop(&periph->periph_callout);
665 1.38.4.1 nathanw
666 1.38.4.4 nathanw s = splbio();
667 1.38.4.4 nathanw scsipi_periph_thaw(periph, 1);
668 1.38.4.4 nathanw if ((periph->periph_channel->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
669 1.38.4.4 nathanw /*
670 1.38.4.4 nathanw * Kick the channel's queue here. Note, we're running in
671 1.38.4.4 nathanw * interrupt context (softclock), so the adapter driver
672 1.38.4.4 nathanw * had better not sleep.
673 1.38.4.4 nathanw */
674 1.38.4.4 nathanw scsipi_run_queue(periph->periph_channel);
675 1.38.4.4 nathanw } else {
676 1.38.4.4 nathanw /*
677 1.38.4.4 nathanw * Tell the completion thread to kick the channel's queue here.
678 1.38.4.4 nathanw */
679 1.38.4.5 nathanw periph->periph_channel->chan_tflags |= SCSIPI_CHANT_KICK;
680 1.38.4.4 nathanw wakeup(&periph->periph_channel->chan_complete);
681 1.38.4.4 nathanw }
682 1.38.4.4 nathanw splx(s);
683 1.38.4.1 nathanw }
684 1.38.4.1 nathanw
685 1.38.4.1 nathanw /*
686 1.38.4.1 nathanw * scsipi_wait_drain:
687 1.38.4.1 nathanw *
688 1.38.4.1 nathanw * Wait for a periph's pending xfers to drain.
689 1.15 thorpej */
690 1.15 thorpej void
691 1.38.4.1 nathanw scsipi_wait_drain(periph)
692 1.38.4.1 nathanw struct scsipi_periph *periph;
693 1.15 thorpej {
694 1.15 thorpej int s;
695 1.15 thorpej
696 1.15 thorpej s = splbio();
697 1.38.4.1 nathanw while (periph->periph_active != 0) {
698 1.38.4.1 nathanw periph->periph_flags |= PERIPH_WAITDRAIN;
699 1.38.4.1 nathanw (void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0);
700 1.15 thorpej }
701 1.15 thorpej splx(s);
702 1.23 thorpej }
703 1.23 thorpej
704 1.23 thorpej /*
705 1.38.4.1 nathanw * scsipi_kill_pending:
706 1.23 thorpej *
707 1.38.4.1 nathanw * Kill off all pending xfers for a periph.
708 1.38.4.1 nathanw *
709 1.38.4.1 nathanw * NOTE: Must be called at splbio().
710 1.23 thorpej */
711 1.23 thorpej void
712 1.38.4.1 nathanw scsipi_kill_pending(periph)
713 1.38.4.1 nathanw struct scsipi_periph *periph;
714 1.23 thorpej {
715 1.23 thorpej
716 1.38.4.1 nathanw (*periph->periph_channel->chan_bustype->bustype_kill_pending)(periph);
717 1.38.4.1 nathanw #ifdef DIAGNOSTIC
718 1.38.4.1 nathanw if (TAILQ_FIRST(&periph->periph_xferq) != NULL)
719 1.38.4.1 nathanw panic("scsipi_kill_pending");
720 1.38.4.1 nathanw #endif
721 1.38.4.1 nathanw scsipi_wait_drain(periph);
722 1.2 bouyer }
723 1.2 bouyer
724 1.2 bouyer /*
725 1.38.4.1 nathanw * scsipi_interpret_sense:
726 1.38.4.1 nathanw *
727 1.38.4.1 nathanw * Look at the returned sense and act on the error, determining
728 1.38.4.1 nathanw * the unix error number to pass back. (0 = report no error)
729 1.13 bouyer *
730 1.38.4.1 nathanw * NOTE: If we return ERESTART, we are expected to haved
731 1.38.4.1 nathanw * thawed the device!
732 1.38.4.1 nathanw *
733 1.38.4.1 nathanw * THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
734 1.13 bouyer */
735 1.13 bouyer int
736 1.13 bouyer scsipi_interpret_sense(xs)
737 1.13 bouyer struct scsipi_xfer *xs;
738 1.13 bouyer {
739 1.13 bouyer struct scsipi_sense_data *sense;
740 1.38.4.1 nathanw struct scsipi_periph *periph = xs->xs_periph;
741 1.13 bouyer u_int8_t key;
742 1.13 bouyer u_int32_t info;
743 1.13 bouyer int error;
744 1.13 bouyer #ifndef SCSIVERBOSE
745 1.13 bouyer static char *error_mes[] = {
746 1.13 bouyer "soft error (corrected)",
747 1.13 bouyer "not ready", "medium error",
748 1.13 bouyer "non-media hardware failure", "illegal request",
749 1.13 bouyer "unit attention", "readonly device",
750 1.13 bouyer "no data found", "vendor unique",
751 1.13 bouyer "copy aborted", "command aborted",
752 1.13 bouyer "search returned equal", "volume overflow",
753 1.13 bouyer "verify miscompare", "unknown error key"
754 1.13 bouyer };
755 1.13 bouyer #endif
756 1.13 bouyer
757 1.13 bouyer sense = &xs->sense.scsi_sense;
758 1.38.4.1 nathanw #ifdef SCSIPI_DEBUG
759 1.38.4.1 nathanw if (periph->periph_flags & SCSIPI_DB1) {
760 1.13 bouyer int count;
761 1.38.4.1 nathanw scsipi_printaddr(periph);
762 1.38.4.1 nathanw printf(" sense debug information:\n");
763 1.38.4.1 nathanw printf("\tcode 0x%x valid 0x%x\n",
764 1.13 bouyer sense->error_code & SSD_ERRCODE,
765 1.13 bouyer sense->error_code & SSD_ERRCODE_VALID ? 1 : 0);
766 1.38.4.1 nathanw printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
767 1.13 bouyer sense->segment,
768 1.13 bouyer sense->flags & SSD_KEY,
769 1.13 bouyer sense->flags & SSD_ILI ? 1 : 0,
770 1.13 bouyer sense->flags & SSD_EOM ? 1 : 0,
771 1.13 bouyer sense->flags & SSD_FILEMARK ? 1 : 0);
772 1.38.4.1 nathanw printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d "
773 1.38.4.1 nathanw "extra bytes\n",
774 1.13 bouyer sense->info[0],
775 1.13 bouyer sense->info[1],
776 1.13 bouyer sense->info[2],
777 1.13 bouyer sense->info[3],
778 1.13 bouyer sense->extra_len);
779 1.38.4.1 nathanw printf("\textra: ");
780 1.13 bouyer for (count = 0; count < ADD_BYTES_LIM(sense); count++)
781 1.13 bouyer printf("0x%x ", sense->cmd_spec_info[count]);
782 1.13 bouyer printf("\n");
783 1.13 bouyer }
784 1.38.4.1 nathanw #endif
785 1.38.4.1 nathanw
786 1.13 bouyer /*
787 1.38.4.1 nathanw * If the periph has it's own error handler, call it first.
788 1.13 bouyer * If it returns a legit error value, return that, otherwise
789 1.13 bouyer * it wants us to continue with normal error processing.
790 1.13 bouyer */
791 1.38.4.1 nathanw if (periph->periph_switch->psw_error != NULL) {
792 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB2,
793 1.13 bouyer ("calling private err_handler()\n"));
794 1.38.4.1 nathanw error = (*periph->periph_switch->psw_error)(xs);
795 1.38.4.1 nathanw if (error != EJUSTRETURN)
796 1.38.4.1 nathanw return (error);
797 1.13 bouyer }
798 1.13 bouyer /* otherwise use the default */
799 1.13 bouyer switch (sense->error_code & SSD_ERRCODE) {
800 1.13 bouyer /*
801 1.13 bouyer * If it's code 70, use the extended stuff and
802 1.13 bouyer * interpret the key
803 1.13 bouyer */
804 1.13 bouyer case 0x71: /* delayed error */
805 1.38.4.1 nathanw scsipi_printaddr(periph);
806 1.13 bouyer key = sense->flags & SSD_KEY;
807 1.13 bouyer printf(" DEFERRED ERROR, key = 0x%x\n", key);
808 1.13 bouyer /* FALLTHROUGH */
809 1.13 bouyer case 0x70:
810 1.13 bouyer if ((sense->error_code & SSD_ERRCODE_VALID) != 0)
811 1.13 bouyer info = _4btol(sense->info);
812 1.13 bouyer else
813 1.13 bouyer info = 0;
814 1.13 bouyer key = sense->flags & SSD_KEY;
815 1.13 bouyer
816 1.13 bouyer switch (key) {
817 1.13 bouyer case SKEY_NO_SENSE:
818 1.13 bouyer case SKEY_RECOVERED_ERROR:
819 1.13 bouyer if (xs->resid == xs->datalen && xs->datalen) {
820 1.13 bouyer /*
821 1.13 bouyer * Why is this here?
822 1.13 bouyer */
823 1.13 bouyer xs->resid = 0; /* not short read */
824 1.13 bouyer }
825 1.13 bouyer case SKEY_EQUAL:
826 1.13 bouyer error = 0;
827 1.13 bouyer break;
828 1.13 bouyer case SKEY_NOT_READY:
829 1.38.4.1 nathanw if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
830 1.38.4.1 nathanw periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
831 1.24 thorpej if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
832 1.13 bouyer return (0);
833 1.38.4.2 nathanw if (sense->add_sense_code == 0x3A) {
834 1.19 bouyer error = ENODEV; /* Medium not present */
835 1.38.4.2 nathanw if (xs->xs_control & XS_CTL_SILENT_NODEV)
836 1.38.4.2 nathanw return (error);
837 1.38.4.2 nathanw } else
838 1.19 bouyer error = EIO;
839 1.24 thorpej if ((xs->xs_control & XS_CTL_SILENT) != 0)
840 1.19 bouyer return (error);
841 1.13 bouyer break;
842 1.13 bouyer case SKEY_ILLEGAL_REQUEST:
843 1.24 thorpej if ((xs->xs_control &
844 1.24 thorpej XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
845 1.13 bouyer return (0);
846 1.24 thorpej /*
847 1.24 thorpej * Handle the case where a device reports
848 1.24 thorpej * Logical Unit Not Supported during discovery.
849 1.24 thorpej */
850 1.24 thorpej if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
851 1.24 thorpej sense->add_sense_code == 0x25 &&
852 1.24 thorpej sense->add_sense_code_qual == 0x00)
853 1.24 thorpej return (EINVAL);
854 1.24 thorpej if ((xs->xs_control & XS_CTL_SILENT) != 0)
855 1.13 bouyer return (EIO);
856 1.13 bouyer error = EINVAL;
857 1.13 bouyer break;
858 1.13 bouyer case SKEY_UNIT_ATTENTION:
859 1.20 bouyer if (sense->add_sense_code == 0x29 &&
860 1.38.4.1 nathanw sense->add_sense_code_qual == 0x00) {
861 1.38.4.1 nathanw /* device or bus reset */
862 1.38.4.1 nathanw return (ERESTART);
863 1.38.4.1 nathanw }
864 1.38.4.1 nathanw if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
865 1.38.4.1 nathanw periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
866 1.24 thorpej if ((xs->xs_control &
867 1.24 thorpej XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
868 1.13 bouyer /* XXX Should reupload any transient state. */
869 1.38.4.1 nathanw (periph->periph_flags &
870 1.38.4.1 nathanw PERIPH_REMOVABLE) == 0) {
871 1.13 bouyer return (ERESTART);
872 1.38.4.1 nathanw }
873 1.24 thorpej if ((xs->xs_control & XS_CTL_SILENT) != 0)
874 1.13 bouyer return (EIO);
875 1.13 bouyer error = EIO;
876 1.13 bouyer break;
877 1.13 bouyer case SKEY_WRITE_PROTECT:
878 1.13 bouyer error = EROFS;
879 1.13 bouyer break;
880 1.13 bouyer case SKEY_BLANK_CHECK:
881 1.13 bouyer error = 0;
882 1.13 bouyer break;
883 1.13 bouyer case SKEY_ABORTED_COMMAND:
884 1.13 bouyer error = ERESTART;
885 1.13 bouyer break;
886 1.13 bouyer case SKEY_VOLUME_OVERFLOW:
887 1.13 bouyer error = ENOSPC;
888 1.13 bouyer break;
889 1.13 bouyer default:
890 1.13 bouyer error = EIO;
891 1.13 bouyer break;
892 1.13 bouyer }
893 1.13 bouyer
894 1.13 bouyer #ifdef SCSIVERBOSE
895 1.32 augustss if (key && (xs->xs_control & XS_CTL_SILENT) == 0)
896 1.13 bouyer scsipi_print_sense(xs, 0);
897 1.13 bouyer #else
898 1.13 bouyer if (key) {
899 1.38.4.1 nathanw scsipi_printaddr(periph);
900 1.13 bouyer printf("%s", error_mes[key - 1]);
901 1.13 bouyer if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
902 1.13 bouyer switch (key) {
903 1.13 bouyer case SKEY_NOT_READY:
904 1.13 bouyer case SKEY_ILLEGAL_REQUEST:
905 1.13 bouyer case SKEY_UNIT_ATTENTION:
906 1.13 bouyer case SKEY_WRITE_PROTECT:
907 1.13 bouyer break;
908 1.13 bouyer case SKEY_BLANK_CHECK:
909 1.13 bouyer printf(", requested size: %d (decimal)",
910 1.13 bouyer info);
911 1.13 bouyer break;
912 1.13 bouyer case SKEY_ABORTED_COMMAND:
913 1.38.4.1 nathanw if (xs->xs_retries)
914 1.13 bouyer printf(", retrying");
915 1.13 bouyer printf(", cmd 0x%x, info 0x%x",
916 1.13 bouyer xs->cmd->opcode, info);
917 1.13 bouyer break;
918 1.13 bouyer default:
919 1.13 bouyer printf(", info = %d (decimal)", info);
920 1.13 bouyer }
921 1.13 bouyer }
922 1.13 bouyer if (sense->extra_len != 0) {
923 1.13 bouyer int n;
924 1.13 bouyer printf(", data =");
925 1.13 bouyer for (n = 0; n < sense->extra_len; n++)
926 1.13 bouyer printf(" %02x",
927 1.13 bouyer sense->cmd_spec_info[n]);
928 1.13 bouyer }
929 1.13 bouyer printf("\n");
930 1.13 bouyer }
931 1.13 bouyer #endif
932 1.13 bouyer return (error);
933 1.13 bouyer
934 1.13 bouyer /*
935 1.13 bouyer * Not code 70, just report it
936 1.13 bouyer */
937 1.13 bouyer default:
938 1.38.4.1 nathanw #if defined(SCSIDEBUG) || defined(DEBUG)
939 1.28 mjacob {
940 1.28 mjacob static char *uc = "undecodable sense error";
941 1.28 mjacob int i;
942 1.28 mjacob u_int8_t *cptr = (u_int8_t *) sense;
943 1.38.4.1 nathanw scsipi_printaddr(periph);
944 1.28 mjacob if (xs->cmd == &xs->cmdstore) {
945 1.28 mjacob printf("%s for opcode 0x%x, data=",
946 1.28 mjacob uc, xs->cmdstore.opcode);
947 1.28 mjacob } else {
948 1.28 mjacob printf("%s, data=", uc);
949 1.28 mjacob }
950 1.28 mjacob for (i = 0; i < sizeof (sense); i++)
951 1.28 mjacob printf(" 0x%02x", *(cptr++) & 0xff);
952 1.28 mjacob printf("\n");
953 1.28 mjacob }
954 1.28 mjacob #else
955 1.38.4.1 nathanw scsipi_printaddr(periph);
956 1.17 mjacob printf("Sense Error Code 0x%x",
957 1.17 mjacob sense->error_code & SSD_ERRCODE);
958 1.13 bouyer if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
959 1.13 bouyer struct scsipi_sense_data_unextended *usense =
960 1.13 bouyer (struct scsipi_sense_data_unextended *)sense;
961 1.13 bouyer printf(" at block no. %d (decimal)",
962 1.13 bouyer _3btol(usense->block));
963 1.13 bouyer }
964 1.13 bouyer printf("\n");
965 1.28 mjacob #endif
966 1.13 bouyer return (EIO);
967 1.13 bouyer }
968 1.13 bouyer }
969 1.13 bouyer
970 1.13 bouyer /*
971 1.38.4.1 nathanw * scsipi_size:
972 1.38.4.1 nathanw *
973 1.38.4.1 nathanw * Find out from the device what its capacity is.
974 1.2 bouyer */
975 1.2 bouyer u_long
976 1.38.4.1 nathanw scsipi_size(periph, flags)
977 1.38.4.1 nathanw struct scsipi_periph *periph;
978 1.2 bouyer int flags;
979 1.2 bouyer {
980 1.2 bouyer struct scsipi_read_cap_data rdcap;
981 1.2 bouyer struct scsipi_read_capacity scsipi_cmd;
982 1.2 bouyer
983 1.38.4.2 nathanw memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
984 1.2 bouyer scsipi_cmd.opcode = READ_CAPACITY;
985 1.2 bouyer
986 1.2 bouyer /*
987 1.2 bouyer * If the command works, interpret the result as a 4 byte
988 1.2 bouyer * number of blocks
989 1.2 bouyer */
990 1.38.4.1 nathanw if (scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
991 1.3 enami sizeof(scsipi_cmd), (u_char *)&rdcap, sizeof(rdcap),
992 1.38 enami SCSIPIRETRIES, 20000, NULL,
993 1.38 enami flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK) != 0) {
994 1.38.4.1 nathanw scsipi_printaddr(periph);
995 1.2 bouyer printf("could not get size\n");
996 1.3 enami return (0);
997 1.2 bouyer }
998 1.2 bouyer
999 1.3 enami return (_4btol(rdcap.addr) + 1);
1000 1.2 bouyer }
1001 1.2 bouyer
1002 1.2 bouyer /*
1003 1.38.4.1 nathanw * scsipi_test_unit_ready:
1004 1.38.4.1 nathanw *
1005 1.38.4.1 nathanw * Issue a `test unit ready' request.
1006 1.2 bouyer */
1007 1.3 enami int
1008 1.38.4.1 nathanw scsipi_test_unit_ready(periph, flags)
1009 1.38.4.1 nathanw struct scsipi_periph *periph;
1010 1.2 bouyer int flags;
1011 1.2 bouyer {
1012 1.2 bouyer struct scsipi_test_unit_ready scsipi_cmd;
1013 1.2 bouyer
1014 1.2 bouyer /* some ATAPI drives don't support TEST_UNIT_READY. Sigh */
1015 1.38.4.1 nathanw if (periph->periph_quirks & PQUIRK_NOTUR)
1016 1.3 enami return (0);
1017 1.2 bouyer
1018 1.38.4.2 nathanw memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1019 1.2 bouyer scsipi_cmd.opcode = TEST_UNIT_READY;
1020 1.2 bouyer
1021 1.38.4.1 nathanw return (scsipi_command(periph,
1022 1.3 enami (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd),
1023 1.29 bouyer 0, 0, SCSIPIRETRIES, 10000, NULL, flags));
1024 1.2 bouyer }
1025 1.2 bouyer
1026 1.2 bouyer /*
1027 1.38.4.1 nathanw * scsipi_inquire:
1028 1.38.4.1 nathanw *
1029 1.38.4.1 nathanw * Ask the device about itself.
1030 1.2 bouyer */
1031 1.3 enami int
1032 1.38.4.1 nathanw scsipi_inquire(periph, inqbuf, flags)
1033 1.38.4.1 nathanw struct scsipi_periph *periph;
1034 1.2 bouyer struct scsipi_inquiry_data *inqbuf;
1035 1.2 bouyer int flags;
1036 1.2 bouyer {
1037 1.2 bouyer struct scsipi_inquiry scsipi_cmd;
1038 1.2 bouyer
1039 1.38.4.2 nathanw memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1040 1.2 bouyer scsipi_cmd.opcode = INQUIRY;
1041 1.2 bouyer scsipi_cmd.length = sizeof(struct scsipi_inquiry_data);
1042 1.2 bouyer
1043 1.38.4.1 nathanw return (scsipi_command(periph,
1044 1.3 enami (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1045 1.3 enami (u_char *) inqbuf, sizeof(struct scsipi_inquiry_data),
1046 1.29 bouyer SCSIPIRETRIES, 10000, NULL, XS_CTL_DATA_IN | flags));
1047 1.2 bouyer }
1048 1.2 bouyer
1049 1.2 bouyer /*
1050 1.38.4.1 nathanw * scsipi_prevent:
1051 1.38.4.1 nathanw *
1052 1.38.4.1 nathanw * Prevent or allow the user to remove the media
1053 1.2 bouyer */
1054 1.3 enami int
1055 1.38.4.1 nathanw scsipi_prevent(periph, type, flags)
1056 1.38.4.1 nathanw struct scsipi_periph *periph;
1057 1.2 bouyer int type, flags;
1058 1.2 bouyer {
1059 1.2 bouyer struct scsipi_prevent scsipi_cmd;
1060 1.2 bouyer
1061 1.38.4.1 nathanw if (periph->periph_quirks & PQUIRK_NODOORLOCK)
1062 1.3 enami return (0);
1063 1.2 bouyer
1064 1.38.4.2 nathanw memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1065 1.2 bouyer scsipi_cmd.opcode = PREVENT_ALLOW;
1066 1.2 bouyer scsipi_cmd.how = type;
1067 1.38.4.1 nathanw
1068 1.38.4.1 nathanw return (scsipi_command(periph,
1069 1.3 enami (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1070 1.29 bouyer 0, 0, SCSIPIRETRIES, 5000, NULL, flags));
1071 1.2 bouyer }
1072 1.2 bouyer
1073 1.2 bouyer /*
1074 1.38.4.1 nathanw * scsipi_start:
1075 1.38.4.1 nathanw *
1076 1.38.4.1 nathanw * Send a START UNIT.
1077 1.2 bouyer */
1078 1.3 enami int
1079 1.38.4.1 nathanw scsipi_start(periph, type, flags)
1080 1.38.4.1 nathanw struct scsipi_periph *periph;
1081 1.2 bouyer int type, flags;
1082 1.2 bouyer {
1083 1.2 bouyer struct scsipi_start_stop scsipi_cmd;
1084 1.18 bouyer
1085 1.38.4.1 nathanw if (periph->periph_quirks & PQUIRK_NOSTARTUNIT)
1086 1.18 bouyer return 0;
1087 1.2 bouyer
1088 1.38.4.2 nathanw memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1089 1.2 bouyer scsipi_cmd.opcode = START_STOP;
1090 1.2 bouyer scsipi_cmd.byte2 = 0x00;
1091 1.2 bouyer scsipi_cmd.how = type;
1092 1.38.4.1 nathanw
1093 1.38.4.1 nathanw return (scsipi_command(periph,
1094 1.3 enami (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1095 1.29 bouyer 0, 0, SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000,
1096 1.29 bouyer NULL, flags));
1097 1.2 bouyer }
1098 1.2 bouyer
1099 1.2 bouyer /*
1100 1.38.4.1 nathanw * scsipi_mode_sense, scsipi_mode_sense_big:
1101 1.38.4.1 nathanw * get a sense page from a device
1102 1.2 bouyer */
1103 1.2 bouyer
1104 1.38.4.1 nathanw int
1105 1.38.4.1 nathanw scsipi_mode_sense(periph, byte2, page, data, len, flags, retries, timeout)
1106 1.38.4.1 nathanw struct scsipi_periph *periph;
1107 1.38.4.1 nathanw int byte2, page, len, flags, retries, timeout;
1108 1.38.4.1 nathanw struct scsipi_mode_header *data;
1109 1.38.4.1 nathanw {
1110 1.38.4.1 nathanw struct scsipi_mode_sense scsipi_cmd;
1111 1.38.4.1 nathanw int error;
1112 1.38.4.1 nathanw
1113 1.38.4.2 nathanw memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1114 1.38.4.1 nathanw scsipi_cmd.opcode = MODE_SENSE;
1115 1.38.4.1 nathanw scsipi_cmd.byte2 = byte2;
1116 1.38.4.1 nathanw scsipi_cmd.page = page;
1117 1.38.4.1 nathanw if (scsipi_periph_bustype(periph) == SCSIPI_BUSTYPE_ATAPI)
1118 1.38.4.1 nathanw _lto2b(len, scsipi_cmd.u_len.atapi.length);
1119 1.38.4.1 nathanw else
1120 1.38.4.1 nathanw scsipi_cmd.u_len.scsi.length = len & 0xff;
1121 1.38.4.1 nathanw error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1122 1.38.4.1 nathanw sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1123 1.38.4.1 nathanw flags | XS_CTL_DATA_IN);
1124 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB2,
1125 1.38.4.1 nathanw ("scsipi_mode_sense: error=%d\n", error));
1126 1.38.4.1 nathanw return (error);
1127 1.38.4.1 nathanw }
1128 1.38.4.1 nathanw
1129 1.38.4.1 nathanw int
1130 1.38.4.1 nathanw scsipi_mode_sense_big(periph, byte2, page, data, len, flags, retries, timeout)
1131 1.38.4.1 nathanw struct scsipi_periph *periph;
1132 1.38.4.1 nathanw int byte2, page, len, flags, retries, timeout;
1133 1.38.4.1 nathanw struct scsipi_mode_header_big *data;
1134 1.38.4.1 nathanw {
1135 1.38.4.1 nathanw struct scsipi_mode_sense_big scsipi_cmd;
1136 1.38.4.1 nathanw int error;
1137 1.38.4.1 nathanw
1138 1.38.4.2 nathanw memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1139 1.38.4.1 nathanw scsipi_cmd.opcode = MODE_SENSE_BIG;
1140 1.38.4.1 nathanw scsipi_cmd.byte2 = byte2;
1141 1.38.4.1 nathanw scsipi_cmd.page = page;
1142 1.38.4.1 nathanw _lto2b(len, scsipi_cmd.length);
1143 1.38.4.1 nathanw error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1144 1.38.4.1 nathanw sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1145 1.38.4.1 nathanw flags | XS_CTL_DATA_IN);
1146 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB2,
1147 1.38.4.1 nathanw ("scsipi_mode_sense_big: error=%d\n", error));
1148 1.38.4.1 nathanw return (error);
1149 1.38.4.1 nathanw }
1150 1.38.4.1 nathanw
1151 1.38.4.1 nathanw int
1152 1.38.4.1 nathanw scsipi_mode_select(periph, byte2, data, len, flags, retries, timeout)
1153 1.38.4.1 nathanw struct scsipi_periph *periph;
1154 1.38.4.1 nathanw int byte2, len, flags, retries, timeout;
1155 1.38.4.1 nathanw struct scsipi_mode_header *data;
1156 1.38.4.1 nathanw {
1157 1.38.4.1 nathanw struct scsipi_mode_select scsipi_cmd;
1158 1.38.4.1 nathanw int error;
1159 1.38.4.1 nathanw
1160 1.38.4.2 nathanw memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1161 1.38.4.1 nathanw scsipi_cmd.opcode = MODE_SELECT;
1162 1.38.4.1 nathanw scsipi_cmd.byte2 = byte2;
1163 1.38.4.1 nathanw if (scsipi_periph_bustype(periph) == SCSIPI_BUSTYPE_ATAPI)
1164 1.38.4.1 nathanw _lto2b(len, scsipi_cmd.u_len.atapi.length);
1165 1.38.4.1 nathanw else
1166 1.38.4.1 nathanw scsipi_cmd.u_len.scsi.length = len & 0xff;
1167 1.38.4.1 nathanw error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1168 1.38.4.1 nathanw sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1169 1.38.4.1 nathanw flags | XS_CTL_DATA_OUT);
1170 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB2,
1171 1.38.4.1 nathanw ("scsipi_mode_select: error=%d\n", error));
1172 1.38.4.1 nathanw return (error);
1173 1.38.4.1 nathanw }
1174 1.38.4.1 nathanw
1175 1.38.4.1 nathanw int
1176 1.38.4.1 nathanw scsipi_mode_select_big(periph, byte2, data, len, flags, retries, timeout)
1177 1.38.4.1 nathanw struct scsipi_periph *periph;
1178 1.38.4.1 nathanw int byte2, len, flags, retries, timeout;
1179 1.38.4.1 nathanw struct scsipi_mode_header_big *data;
1180 1.38.4.1 nathanw {
1181 1.38.4.1 nathanw struct scsipi_mode_select_big scsipi_cmd;
1182 1.38.4.1 nathanw int error;
1183 1.38.4.1 nathanw
1184 1.38.4.2 nathanw memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1185 1.38.4.1 nathanw scsipi_cmd.opcode = MODE_SELECT_BIG;
1186 1.38.4.1 nathanw scsipi_cmd.byte2 = byte2;
1187 1.38.4.1 nathanw _lto2b(len, scsipi_cmd.length);
1188 1.38.4.1 nathanw error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1189 1.38.4.1 nathanw sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1190 1.38.4.1 nathanw flags | XS_CTL_DATA_OUT);
1191 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB2,
1192 1.38.4.1 nathanw ("scsipi_mode_select: error=%d\n", error));
1193 1.38.4.1 nathanw return (error);
1194 1.38.4.1 nathanw }
1195 1.38.4.1 nathanw
1196 1.38.4.1 nathanw /*
1197 1.38.4.1 nathanw * scsipi_done:
1198 1.38.4.1 nathanw *
1199 1.38.4.1 nathanw * This routine is called by an adapter's interrupt handler when
1200 1.38.4.1 nathanw * an xfer is completed.
1201 1.38.4.1 nathanw */
1202 1.38.4.1 nathanw void
1203 1.38.4.1 nathanw scsipi_done(xs)
1204 1.38.4.1 nathanw struct scsipi_xfer *xs;
1205 1.38.4.1 nathanw {
1206 1.38.4.1 nathanw struct scsipi_periph *periph = xs->xs_periph;
1207 1.38.4.1 nathanw struct scsipi_channel *chan = periph->periph_channel;
1208 1.38.4.1 nathanw int s, freezecnt;
1209 1.38.4.1 nathanw
1210 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n"));
1211 1.38.4.1 nathanw #ifdef SCSIPI_DEBUG
1212 1.38.4.1 nathanw if (periph->periph_dbflags & SCSIPI_DB1)
1213 1.2 bouyer show_scsipi_cmd(xs);
1214 1.38.4.1 nathanw #endif
1215 1.2 bouyer
1216 1.38.4.1 nathanw s = splbio();
1217 1.2 bouyer /*
1218 1.38.4.1 nathanw * The resource this command was using is now free.
1219 1.3 enami */
1220 1.38.4.1 nathanw scsipi_put_resource(chan);
1221 1.38.4.1 nathanw xs->xs_periph->periph_sent--;
1222 1.2 bouyer
1223 1.38.4.1 nathanw /*
1224 1.38.4.1 nathanw * If the command was tagged, free the tag.
1225 1.38.4.1 nathanw */
1226 1.38.4.1 nathanw if (XS_CTL_TAGTYPE(xs) != 0)
1227 1.38.4.1 nathanw scsipi_put_tag(xs);
1228 1.38.4.1 nathanw else
1229 1.38.4.1 nathanw periph->periph_flags &= ~PERIPH_UNTAG;
1230 1.2 bouyer
1231 1.38.4.1 nathanw /* Mark the command as `done'. */
1232 1.38.4.1 nathanw xs->xs_status |= XS_STS_DONE;
1233 1.38.4.1 nathanw
1234 1.38.4.1 nathanw #ifdef DIAGNOSTIC
1235 1.38.4.1 nathanw if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
1236 1.38.4.1 nathanw (XS_CTL_ASYNC|XS_CTL_POLL))
1237 1.38.4.1 nathanw panic("scsipi_done: ASYNC and POLL");
1238 1.38.4.1 nathanw #endif
1239 1.2 bouyer
1240 1.2 bouyer /*
1241 1.38.4.1 nathanw * If the xfer had an error of any sort, freeze the
1242 1.38.4.1 nathanw * periph's queue. Freeze it again if we were requested
1243 1.38.4.1 nathanw * to do so in the xfer.
1244 1.2 bouyer */
1245 1.38.4.1 nathanw freezecnt = 0;
1246 1.38.4.1 nathanw if (xs->error != XS_NOERROR)
1247 1.38.4.1 nathanw freezecnt++;
1248 1.38.4.1 nathanw if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
1249 1.38.4.1 nathanw freezecnt++;
1250 1.38.4.1 nathanw if (freezecnt != 0)
1251 1.38.4.1 nathanw scsipi_periph_freeze(periph, freezecnt);
1252 1.2 bouyer
1253 1.38.4.1 nathanw /*
1254 1.38.4.1 nathanw * record the xfer with a pending sense, in case a SCSI reset is
1255 1.38.4.1 nathanw * received before the thread is waked up.
1256 1.38.4.1 nathanw */
1257 1.38.4.1 nathanw if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1258 1.38.4.1 nathanw periph->periph_flags |= PERIPH_SENSE;
1259 1.38.4.1 nathanw periph->periph_xscheck = xs;
1260 1.20 bouyer }
1261 1.2 bouyer
1262 1.38.4.1 nathanw /*
1263 1.38.4.4 nathanw * If this was an xfer that was not to complete asynchronously,
1264 1.38.4.1 nathanw * let the requesting thread perform error checking/handling
1265 1.38.4.1 nathanw * in its context.
1266 1.38.4.1 nathanw */
1267 1.38.4.1 nathanw if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
1268 1.38.4.1 nathanw splx(s);
1269 1.2 bouyer /*
1270 1.38.4.1 nathanw * If it's a polling job, just return, to unwind the
1271 1.38.4.1 nathanw * call graph. We don't need to restart the queue,
1272 1.38.4.1 nathanw * because pollings jobs are treated specially, and
1273 1.38.4.1 nathanw * are really only used during crash dumps anyway
1274 1.38.4.1 nathanw * (XXX or during boot-time autconfiguration of
1275 1.38.4.1 nathanw * ATAPI devices).
1276 1.2 bouyer */
1277 1.38.4.1 nathanw if (xs->xs_control & XS_CTL_POLL)
1278 1.38.4.1 nathanw return;
1279 1.38.4.1 nathanw wakeup(xs);
1280 1.38.4.1 nathanw goto out;
1281 1.2 bouyer }
1282 1.38.4.1 nathanw
1283 1.9 scottr /*
1284 1.38.4.1 nathanw * Catch the extremely common case of I/O completing
1285 1.38.4.1 nathanw * without error; no use in taking a context switch
1286 1.38.4.1 nathanw * if we can handle it in interrupt context.
1287 1.9 scottr */
1288 1.38.4.1 nathanw if (xs->error == XS_NOERROR) {
1289 1.22 pk splx(s);
1290 1.38.4.1 nathanw (void) scsipi_complete(xs);
1291 1.38.4.1 nathanw goto out;
1292 1.22 pk }
1293 1.2 bouyer
1294 1.2 bouyer /*
1295 1.38.4.1 nathanw * There is an error on this xfer. Put it on the channel's
1296 1.38.4.1 nathanw * completion queue, and wake up the completion thread.
1297 1.38.4.1 nathanw */
1298 1.38.4.1 nathanw TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
1299 1.38.4.1 nathanw splx(s);
1300 1.38.4.1 nathanw wakeup(&chan->chan_complete);
1301 1.2 bouyer
1302 1.38.4.1 nathanw out:
1303 1.38.4.1 nathanw /*
1304 1.38.4.1 nathanw * If there are more xfers on the channel's queue, attempt to
1305 1.38.4.1 nathanw * run them.
1306 1.38.4.1 nathanw */
1307 1.38.4.1 nathanw scsipi_run_queue(chan);
1308 1.2 bouyer }
1309 1.2 bouyer
1310 1.38.4.1 nathanw /*
1311 1.38.4.1 nathanw * scsipi_complete:
1312 1.38.4.1 nathanw *
1313 1.38.4.1 nathanw * Completion of a scsipi_xfer. This is the guts of scsipi_done().
1314 1.38.4.1 nathanw *
1315 1.38.4.1 nathanw * NOTE: This routine MUST be called with valid thread context
1316 1.38.4.1 nathanw * except for the case where the following two conditions are
1317 1.38.4.1 nathanw * true:
1318 1.38.4.1 nathanw *
1319 1.38.4.1 nathanw * xs->error == XS_NOERROR
1320 1.38.4.1 nathanw * XS_CTL_ASYNC is set in xs->xs_control
1321 1.38.4.1 nathanw *
1322 1.38.4.1 nathanw * The semantics of this routine can be tricky, so here is an
1323 1.38.4.1 nathanw * explanation:
1324 1.38.4.1 nathanw *
1325 1.38.4.1 nathanw * 0 Xfer completed successfully.
1326 1.38.4.1 nathanw *
1327 1.38.4.1 nathanw * ERESTART Xfer had an error, but was restarted.
1328 1.38.4.1 nathanw *
1329 1.38.4.1 nathanw * anything else Xfer had an error, return value is Unix
1330 1.38.4.1 nathanw * errno.
1331 1.38.4.1 nathanw *
1332 1.38.4.1 nathanw * If the return value is anything but ERESTART:
1333 1.38.4.1 nathanw *
1334 1.38.4.1 nathanw * - If XS_CTL_ASYNC is set, `xs' has been freed back to
1335 1.38.4.1 nathanw * the pool.
1336 1.38.4.1 nathanw * - If there is a buf associated with the xfer,
1337 1.38.4.1 nathanw * it has been biodone()'d.
1338 1.38.4.1 nathanw */
1339 1.3 enami int
1340 1.38.4.1 nathanw scsipi_complete(xs)
1341 1.2 bouyer struct scsipi_xfer *xs;
1342 1.2 bouyer {
1343 1.38.4.1 nathanw struct scsipi_periph *periph = xs->xs_periph;
1344 1.38.4.1 nathanw struct scsipi_channel *chan = periph->periph_channel;
1345 1.38.4.1 nathanw struct buf *bp;
1346 1.38.4.1 nathanw int error, s;
1347 1.2 bouyer
1348 1.38.4.1 nathanw #ifdef DIAGNOSTIC
1349 1.38.4.1 nathanw if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
1350 1.38.4.1 nathanw panic("scsipi_complete: XS_CTL_ASYNC but no buf");
1351 1.38.4.1 nathanw #endif
1352 1.2 bouyer /*
1353 1.38.4.1 nathanw * If command terminated with a CHECK CONDITION, we need to issue a
1354 1.38.4.1 nathanw * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed
1355 1.38.4.1 nathanw * we'll have the real status.
1356 1.38.4.1 nathanw * Must be processed at splbio() to avoid missing a SCSI bus reset
1357 1.38.4.1 nathanw * for this command.
1358 1.38.4.1 nathanw */
1359 1.38.4.1 nathanw s = splbio();
1360 1.38.4.1 nathanw if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1361 1.38.4.1 nathanw /* request sense for a request sense ? */
1362 1.38.4.1 nathanw if (xs->xs_control & XS_CTL_REQSENSE) {
1363 1.38.4.1 nathanw scsipi_printaddr(periph);
1364 1.38.4.2 nathanw printf("request sense for a request sense ?\n");
1365 1.38.4.1 nathanw /* XXX maybe we should reset the device ? */
1366 1.38.4.1 nathanw /* we've been frozen because xs->error != XS_NOERROR */
1367 1.38.4.1 nathanw scsipi_periph_thaw(periph, 1);
1368 1.38.4.1 nathanw splx(s);
1369 1.38.4.2 nathanw if (xs->resid < xs->datalen) {
1370 1.38.4.2 nathanw printf("we read %d bytes of sense anyway:\n",
1371 1.38.4.2 nathanw xs->datalen - xs->resid);
1372 1.38.4.2 nathanw #ifdef SCSIVERBOSE
1373 1.38.4.2 nathanw scsipi_print_sense_data((void *)xs->data, 0);
1374 1.38.4.2 nathanw #endif
1375 1.38.4.2 nathanw }
1376 1.38.4.1 nathanw return EINVAL;
1377 1.38.4.1 nathanw }
1378 1.38.4.1 nathanw scsipi_request_sense(xs);
1379 1.38.4.1 nathanw }
1380 1.38.4.1 nathanw splx(s);
1381 1.38.4.2 nathanw
1382 1.38.4.1 nathanw /*
1383 1.38.4.1 nathanw * If it's a user level request, bypass all usual completion
1384 1.38.4.1 nathanw * processing, let the user work it out..
1385 1.2 bouyer */
1386 1.38.4.1 nathanw if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
1387 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n"));
1388 1.38.4.1 nathanw if (xs->error != XS_NOERROR)
1389 1.38.4.1 nathanw scsipi_periph_thaw(periph, 1);
1390 1.38.4.1 nathanw scsipi_user_done(xs);
1391 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n "));
1392 1.38.4.1 nathanw return 0;
1393 1.38.4.1 nathanw }
1394 1.38.4.1 nathanw
1395 1.2 bouyer switch (xs->error) {
1396 1.38.4.1 nathanw case XS_NOERROR:
1397 1.2 bouyer error = 0;
1398 1.2 bouyer break;
1399 1.2 bouyer
1400 1.2 bouyer case XS_SENSE:
1401 1.13 bouyer case XS_SHORTSENSE:
1402 1.38.4.1 nathanw error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
1403 1.2 bouyer break;
1404 1.2 bouyer
1405 1.38.4.1 nathanw case XS_RESOURCE_SHORTAGE:
1406 1.38.4.1 nathanw /*
1407 1.38.4.1 nathanw * XXX Should freeze channel's queue.
1408 1.38.4.1 nathanw */
1409 1.38.4.1 nathanw scsipi_printaddr(periph);
1410 1.38.4.1 nathanw printf("adapter resource shortage\n");
1411 1.38.4.1 nathanw /* FALLTHROUGH */
1412 1.38.4.1 nathanw
1413 1.2 bouyer case XS_BUSY:
1414 1.38.4.1 nathanw if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
1415 1.38.4.1 nathanw struct scsipi_max_openings mo;
1416 1.38.4.1 nathanw
1417 1.38.4.1 nathanw /*
1418 1.38.4.1 nathanw * We set the openings to active - 1, assuming that
1419 1.38.4.1 nathanw * the command that got us here is the first one that
1420 1.38.4.1 nathanw * can't fit into the device's queue. If that's not
1421 1.38.4.1 nathanw * the case, I guess we'll find out soon enough.
1422 1.38.4.1 nathanw */
1423 1.38.4.1 nathanw mo.mo_target = periph->periph_target;
1424 1.38.4.1 nathanw mo.mo_lun = periph->periph_lun;
1425 1.38.4.1 nathanw if (periph->periph_active < periph->periph_openings)
1426 1.38.4.1 nathanw mo.mo_openings = periph->periph_active - 1;
1427 1.2 bouyer else
1428 1.38.4.1 nathanw mo.mo_openings = periph->periph_openings - 1;
1429 1.38.4.1 nathanw #ifdef DIAGNOSTIC
1430 1.38.4.1 nathanw if (mo.mo_openings < 0) {
1431 1.38.4.1 nathanw scsipi_printaddr(periph);
1432 1.38.4.1 nathanw printf("QUEUE FULL resulted in < 0 openings\n");
1433 1.38.4.1 nathanw panic("scsipi_done");
1434 1.38.4.1 nathanw }
1435 1.2 bouyer #endif
1436 1.38.4.1 nathanw if (mo.mo_openings == 0) {
1437 1.38.4.1 nathanw scsipi_printaddr(periph);
1438 1.38.4.1 nathanw printf("QUEUE FULL resulted in 0 openings\n");
1439 1.38.4.1 nathanw mo.mo_openings = 1;
1440 1.38.4.1 nathanw }
1441 1.38.4.1 nathanw scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
1442 1.38.4.1 nathanw error = ERESTART;
1443 1.38.4.1 nathanw } else if (xs->xs_retries != 0) {
1444 1.38.4.1 nathanw xs->xs_retries--;
1445 1.38.4.1 nathanw /*
1446 1.38.4.1 nathanw * Wait one second, and try again.
1447 1.38.4.1 nathanw */
1448 1.38.4.4 nathanw if ((xs->xs_control & XS_CTL_POLL) ||
1449 1.38.4.4 nathanw (chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
1450 1.38.4.1 nathanw delay(1000000);
1451 1.38.4.4 nathanw } else {
1452 1.38.4.1 nathanw scsipi_periph_freeze(periph, 1);
1453 1.38.4.1 nathanw callout_reset(&periph->periph_callout,
1454 1.38.4.1 nathanw hz, scsipi_periph_timed_thaw, periph);
1455 1.38.4.1 nathanw }
1456 1.38.4.1 nathanw error = ERESTART;
1457 1.38.4.1 nathanw } else
1458 1.38.4.1 nathanw error = EBUSY;
1459 1.38.4.1 nathanw break;
1460 1.38.4.1 nathanw
1461 1.38.4.1 nathanw case XS_REQUEUE:
1462 1.38.4.1 nathanw error = ERESTART;
1463 1.38.4.1 nathanw break;
1464 1.38.4.1 nathanw
1465 1.2 bouyer case XS_TIMEOUT:
1466 1.38.4.1 nathanw if (xs->xs_retries != 0) {
1467 1.38.4.1 nathanw xs->xs_retries--;
1468 1.38.4.1 nathanw error = ERESTART;
1469 1.38.4.1 nathanw } else
1470 1.38.4.1 nathanw error = EIO;
1471 1.2 bouyer break;
1472 1.2 bouyer
1473 1.2 bouyer case XS_SELTIMEOUT:
1474 1.2 bouyer /* XXX Disable device? */
1475 1.12 thorpej error = EIO;
1476 1.12 thorpej break;
1477 1.12 thorpej
1478 1.12 thorpej case XS_RESET:
1479 1.38.4.1 nathanw if (xs->xs_control & XS_CTL_REQSENSE) {
1480 1.38.4.1 nathanw /*
1481 1.38.4.1 nathanw * request sense interrupted by reset: signal it
1482 1.38.4.1 nathanw * with EINTR return code.
1483 1.38.4.1 nathanw */
1484 1.38.4.1 nathanw error = EINTR;
1485 1.38.4.1 nathanw } else {
1486 1.38.4.1 nathanw if (xs->xs_retries != 0) {
1487 1.38.4.1 nathanw xs->xs_retries--;
1488 1.38.4.1 nathanw error = ERESTART;
1489 1.38.4.1 nathanw } else
1490 1.38.4.1 nathanw error = EIO;
1491 1.12 thorpej }
1492 1.2 bouyer break;
1493 1.2 bouyer
1494 1.2 bouyer default:
1495 1.38.4.1 nathanw scsipi_printaddr(periph);
1496 1.38.4.1 nathanw printf("invalid return code from adapter: %d\n", xs->error);
1497 1.2 bouyer error = EIO;
1498 1.2 bouyer break;
1499 1.2 bouyer }
1500 1.2 bouyer
1501 1.38.4.1 nathanw s = splbio();
1502 1.38.4.1 nathanw if (error == ERESTART) {
1503 1.38.4.1 nathanw /*
1504 1.38.4.1 nathanw * If we get here, the periph has been thawed and frozen
1505 1.38.4.1 nathanw * again if we had to issue recovery commands. Alternatively,
1506 1.38.4.1 nathanw * it may have been frozen again and in a timed thaw. In
1507 1.38.4.1 nathanw * any case, we thaw the periph once we re-enqueue the
1508 1.38.4.1 nathanw * command. Once the periph is fully thawed, it will begin
1509 1.38.4.1 nathanw * operation again.
1510 1.38.4.1 nathanw */
1511 1.38.4.1 nathanw xs->error = XS_NOERROR;
1512 1.38.4.1 nathanw xs->status = SCSI_OK;
1513 1.38.4.1 nathanw xs->xs_status &= ~XS_STS_DONE;
1514 1.38.4.1 nathanw xs->xs_requeuecnt++;
1515 1.38.4.1 nathanw error = scsipi_enqueue(xs);
1516 1.38.4.1 nathanw if (error == 0) {
1517 1.38.4.1 nathanw scsipi_periph_thaw(periph, 1);
1518 1.38.4.1 nathanw splx(s);
1519 1.38.4.1 nathanw return (ERESTART);
1520 1.38.4.1 nathanw }
1521 1.38.4.1 nathanw }
1522 1.38.4.1 nathanw
1523 1.38.4.1 nathanw /*
1524 1.38.4.1 nathanw * scsipi_done() freezes the queue if not XS_NOERROR.
1525 1.38.4.1 nathanw * Thaw it here.
1526 1.38.4.1 nathanw */
1527 1.38.4.1 nathanw if (xs->error != XS_NOERROR)
1528 1.38.4.1 nathanw scsipi_periph_thaw(periph, 1);
1529 1.38.4.1 nathanw
1530 1.38.4.1 nathanw
1531 1.38.4.1 nathanw if (periph->periph_switch->psw_done)
1532 1.38.4.1 nathanw periph->periph_switch->psw_done(xs);
1533 1.38.4.1 nathanw if ((bp = xs->bp) != NULL) {
1534 1.38.4.1 nathanw if (error) {
1535 1.38.4.1 nathanw bp->b_error = error;
1536 1.38.4.1 nathanw bp->b_flags |= B_ERROR;
1537 1.38.4.1 nathanw bp->b_resid = bp->b_bcount;
1538 1.38.4.1 nathanw } else {
1539 1.38.4.1 nathanw bp->b_error = 0;
1540 1.38.4.1 nathanw bp->b_resid = xs->resid;
1541 1.38.4.4 nathanw }
1542 1.38.4.1 nathanw biodone(bp);
1543 1.38.4.1 nathanw }
1544 1.38.4.1 nathanw
1545 1.38.4.1 nathanw if (xs->xs_control & XS_CTL_ASYNC)
1546 1.38.4.1 nathanw scsipi_put_xs(xs);
1547 1.38.4.1 nathanw splx(s);
1548 1.38.4.1 nathanw
1549 1.3 enami return (error);
1550 1.2 bouyer }
1551 1.2 bouyer
1552 1.14 thorpej /*
1553 1.38.4.1 nathanw * Issue a request sense for the given scsipi_xfer. Called when the xfer
1554 1.38.4.1 nathanw * returns with a CHECK_CONDITION status. Must be called in valid thread
1555 1.38.4.1 nathanw * context and at splbio().
1556 1.38.4.1 nathanw */
1557 1.38.4.1 nathanw
1558 1.38.4.1 nathanw void
1559 1.38.4.1 nathanw scsipi_request_sense(xs)
1560 1.38.4.1 nathanw struct scsipi_xfer *xs;
1561 1.38.4.1 nathanw {
1562 1.38.4.1 nathanw struct scsipi_periph *periph = xs->xs_periph;
1563 1.38.4.1 nathanw int flags, error;
1564 1.38.4.1 nathanw struct scsipi_sense cmd;
1565 1.38.4.1 nathanw
1566 1.38.4.1 nathanw periph->periph_flags |= PERIPH_SENSE;
1567 1.38.4.1 nathanw
1568 1.38.4.1 nathanw /* if command was polling, request sense will too */
1569 1.38.4.1 nathanw flags = xs->xs_control & XS_CTL_POLL;
1570 1.38.4.1 nathanw /* Polling commands can't sleep */
1571 1.38.4.1 nathanw if (flags)
1572 1.38.4.1 nathanw flags |= XS_CTL_NOSLEEP;
1573 1.38.4.1 nathanw
1574 1.38.4.1 nathanw flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN |
1575 1.38.4.1 nathanw XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH;
1576 1.38.4.1 nathanw
1577 1.38.4.2 nathanw memset(&cmd, 0, sizeof(cmd));
1578 1.38.4.1 nathanw cmd.opcode = REQUEST_SENSE;
1579 1.38.4.1 nathanw cmd.length = sizeof(struct scsipi_sense_data);
1580 1.38.4.1 nathanw
1581 1.38.4.1 nathanw error = scsipi_command(periph,
1582 1.38.4.1 nathanw (struct scsipi_generic *) &cmd, sizeof(cmd),
1583 1.38.4.1 nathanw (u_char*)&xs->sense.scsi_sense, sizeof(struct scsipi_sense_data),
1584 1.38.4.1 nathanw 0, 1000, NULL, flags);
1585 1.38.4.1 nathanw periph->periph_flags &= ~PERIPH_SENSE;
1586 1.38.4.1 nathanw periph->periph_xscheck = NULL;
1587 1.38.4.1 nathanw switch(error) {
1588 1.38.4.1 nathanw case 0:
1589 1.38.4.1 nathanw /* we have a valid sense */
1590 1.38.4.1 nathanw xs->error = XS_SENSE;
1591 1.38.4.1 nathanw return;
1592 1.38.4.1 nathanw case EINTR:
1593 1.38.4.1 nathanw /* REQUEST_SENSE interrupted by bus reset. */
1594 1.38.4.1 nathanw xs->error = XS_RESET;
1595 1.38.4.1 nathanw return;
1596 1.38.4.1 nathanw case EIO:
1597 1.38.4.1 nathanw /* request sense coudn't be performed */
1598 1.38.4.1 nathanw /*
1599 1.38.4.1 nathanw * XXX this isn't quite rigth but we don't have anything
1600 1.38.4.1 nathanw * better for now
1601 1.38.4.1 nathanw */
1602 1.38.4.1 nathanw xs->error = XS_DRIVER_STUFFUP;
1603 1.38.4.1 nathanw return;
1604 1.38.4.1 nathanw default:
1605 1.38.4.1 nathanw /* Notify that request sense failed. */
1606 1.38.4.1 nathanw xs->error = XS_DRIVER_STUFFUP;
1607 1.38.4.1 nathanw scsipi_printaddr(periph);
1608 1.38.4.1 nathanw printf("request sense failed with error %d\n", error);
1609 1.38.4.1 nathanw return;
1610 1.38.4.1 nathanw }
1611 1.38.4.1 nathanw }
1612 1.38.4.1 nathanw
1613 1.38.4.1 nathanw /*
1614 1.38.4.1 nathanw * scsipi_enqueue:
1615 1.38.4.1 nathanw *
1616 1.38.4.1 nathanw * Enqueue an xfer on a channel.
1617 1.14 thorpej */
1618 1.14 thorpej int
1619 1.38.4.1 nathanw scsipi_enqueue(xs)
1620 1.38.4.1 nathanw struct scsipi_xfer *xs;
1621 1.14 thorpej {
1622 1.38.4.1 nathanw struct scsipi_channel *chan = xs->xs_periph->periph_channel;
1623 1.38.4.1 nathanw struct scsipi_xfer *qxs;
1624 1.38.4.1 nathanw int s;
1625 1.14 thorpej
1626 1.14 thorpej s = splbio();
1627 1.38.4.1 nathanw
1628 1.38.4.1 nathanw /*
1629 1.38.4.1 nathanw * If the xfer is to be polled, and there are already jobs on
1630 1.38.4.1 nathanw * the queue, we can't proceed.
1631 1.38.4.1 nathanw */
1632 1.38.4.1 nathanw if ((xs->xs_control & XS_CTL_POLL) != 0 &&
1633 1.38.4.1 nathanw TAILQ_FIRST(&chan->chan_queue) != NULL) {
1634 1.38.4.1 nathanw splx(s);
1635 1.38.4.1 nathanw xs->error = XS_DRIVER_STUFFUP;
1636 1.38.4.1 nathanw return (EAGAIN);
1637 1.38.4.1 nathanw }
1638 1.38.4.1 nathanw
1639 1.38.4.1 nathanw /*
1640 1.38.4.1 nathanw * If we have an URGENT xfer, it's an error recovery command
1641 1.38.4.1 nathanw * and it should just go on the head of the channel's queue.
1642 1.38.4.1 nathanw */
1643 1.38.4.1 nathanw if (xs->xs_control & XS_CTL_URGENT) {
1644 1.38.4.1 nathanw TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
1645 1.38.4.1 nathanw goto out;
1646 1.38.4.1 nathanw }
1647 1.38.4.1 nathanw
1648 1.38.4.1 nathanw /*
1649 1.38.4.1 nathanw * If this xfer has already been on the queue before, we
1650 1.38.4.1 nathanw * need to reinsert it in the correct order. That order is:
1651 1.38.4.1 nathanw *
1652 1.38.4.1 nathanw * Immediately before the first xfer for this periph
1653 1.38.4.1 nathanw * with a requeuecnt less than xs->xs_requeuecnt.
1654 1.38.4.1 nathanw *
1655 1.38.4.1 nathanw * Failing that, at the end of the queue. (We'll end up
1656 1.38.4.1 nathanw * there naturally.)
1657 1.38.4.1 nathanw */
1658 1.38.4.1 nathanw if (xs->xs_requeuecnt != 0) {
1659 1.38.4.1 nathanw for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
1660 1.38.4.1 nathanw qxs = TAILQ_NEXT(qxs, channel_q)) {
1661 1.38.4.1 nathanw if (qxs->xs_periph == xs->xs_periph &&
1662 1.38.4.1 nathanw qxs->xs_requeuecnt < xs->xs_requeuecnt)
1663 1.38.4.1 nathanw break;
1664 1.38.4.1 nathanw }
1665 1.38.4.1 nathanw if (qxs != NULL) {
1666 1.38.4.1 nathanw TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
1667 1.38.4.1 nathanw channel_q);
1668 1.38.4.1 nathanw goto out;
1669 1.38.4.1 nathanw }
1670 1.14 thorpej }
1671 1.38.4.1 nathanw TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
1672 1.38.4.1 nathanw out:
1673 1.38.4.1 nathanw if (xs->xs_control & XS_CTL_THAW_PERIPH)
1674 1.38.4.1 nathanw scsipi_periph_thaw(xs->xs_periph, 1);
1675 1.14 thorpej splx(s);
1676 1.38.4.1 nathanw return (0);
1677 1.14 thorpej }
1678 1.14 thorpej
1679 1.14 thorpej /*
1680 1.38.4.1 nathanw * scsipi_run_queue:
1681 1.38.4.1 nathanw *
1682 1.38.4.1 nathanw * Start as many xfers as possible running on the channel.
1683 1.14 thorpej */
1684 1.14 thorpej void
1685 1.38.4.1 nathanw scsipi_run_queue(chan)
1686 1.38.4.1 nathanw struct scsipi_channel *chan;
1687 1.14 thorpej {
1688 1.38.4.1 nathanw struct scsipi_xfer *xs;
1689 1.38.4.1 nathanw struct scsipi_periph *periph;
1690 1.14 thorpej int s;
1691 1.14 thorpej
1692 1.38.4.1 nathanw for (;;) {
1693 1.38.4.1 nathanw s = splbio();
1694 1.38.4.1 nathanw
1695 1.38.4.1 nathanw /*
1696 1.38.4.1 nathanw * If the channel is frozen, we can't do any work right
1697 1.38.4.1 nathanw * now.
1698 1.38.4.1 nathanw */
1699 1.38.4.1 nathanw if (chan->chan_qfreeze != 0) {
1700 1.38.4.1 nathanw splx(s);
1701 1.38.4.1 nathanw return;
1702 1.38.4.1 nathanw }
1703 1.38.4.1 nathanw
1704 1.38.4.1 nathanw /*
1705 1.38.4.1 nathanw * Look for work to do, and make sure we can do it.
1706 1.38.4.1 nathanw */
1707 1.38.4.1 nathanw for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
1708 1.38.4.1 nathanw xs = TAILQ_NEXT(xs, channel_q)) {
1709 1.38.4.1 nathanw periph = xs->xs_periph;
1710 1.38.4.1 nathanw
1711 1.38.4.1 nathanw if ((periph->periph_sent >= periph->periph_openings) ||
1712 1.38.4.1 nathanw periph->periph_qfreeze != 0 ||
1713 1.38.4.1 nathanw (periph->periph_flags & PERIPH_UNTAG) != 0)
1714 1.38.4.1 nathanw continue;
1715 1.38.4.1 nathanw
1716 1.38.4.1 nathanw if ((periph->periph_flags &
1717 1.38.4.1 nathanw (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 &&
1718 1.38.4.1 nathanw (xs->xs_control & XS_CTL_URGENT) == 0)
1719 1.38.4.1 nathanw continue;
1720 1.38.4.1 nathanw
1721 1.38.4.1 nathanw /*
1722 1.38.4.1 nathanw * We can issue this xfer!
1723 1.38.4.1 nathanw */
1724 1.38.4.1 nathanw goto got_one;
1725 1.38.4.1 nathanw }
1726 1.38.4.1 nathanw
1727 1.38.4.1 nathanw /*
1728 1.38.4.1 nathanw * Can't find any work to do right now.
1729 1.38.4.1 nathanw */
1730 1.38.4.1 nathanw splx(s);
1731 1.38.4.1 nathanw return;
1732 1.38.4.1 nathanw
1733 1.38.4.1 nathanw got_one:
1734 1.38.4.1 nathanw /*
1735 1.38.4.1 nathanw * Have an xfer to run. Allocate a resource from
1736 1.38.4.1 nathanw * the adapter to run it. If we can't allocate that
1737 1.38.4.1 nathanw * resource, we don't dequeue the xfer.
1738 1.38.4.1 nathanw */
1739 1.38.4.1 nathanw if (scsipi_get_resource(chan) == 0) {
1740 1.38.4.1 nathanw /*
1741 1.38.4.1 nathanw * Adapter is out of resources. If the adapter
1742 1.38.4.1 nathanw * supports it, attempt to grow them.
1743 1.38.4.1 nathanw */
1744 1.38.4.1 nathanw if (scsipi_grow_resources(chan) == 0) {
1745 1.38.4.1 nathanw /*
1746 1.38.4.1 nathanw * Wasn't able to grow resources,
1747 1.38.4.1 nathanw * nothing more we can do.
1748 1.38.4.1 nathanw */
1749 1.38.4.1 nathanw if (xs->xs_control & XS_CTL_POLL) {
1750 1.38.4.1 nathanw scsipi_printaddr(xs->xs_periph);
1751 1.38.4.1 nathanw printf("polling command but no "
1752 1.38.4.1 nathanw "adapter resources");
1753 1.38.4.1 nathanw /* We'll panic shortly... */
1754 1.38.4.1 nathanw }
1755 1.38.4.1 nathanw splx(s);
1756 1.38.4.1 nathanw
1757 1.38.4.1 nathanw /*
1758 1.38.4.1 nathanw * XXX: We should be able to note that
1759 1.38.4.1 nathanw * XXX: that resources are needed here!
1760 1.38.4.1 nathanw */
1761 1.38.4.1 nathanw return;
1762 1.38.4.1 nathanw }
1763 1.38.4.1 nathanw /*
1764 1.38.4.1 nathanw * scsipi_grow_resources() allocated the resource
1765 1.38.4.1 nathanw * for us.
1766 1.38.4.1 nathanw */
1767 1.38.4.1 nathanw }
1768 1.38.4.1 nathanw
1769 1.38.4.1 nathanw /*
1770 1.38.4.1 nathanw * We have a resource to run this xfer, do it!
1771 1.38.4.1 nathanw */
1772 1.38.4.1 nathanw TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
1773 1.38.4.1 nathanw
1774 1.38.4.1 nathanw /*
1775 1.38.4.1 nathanw * If the command is to be tagged, allocate a tag ID
1776 1.38.4.1 nathanw * for it.
1777 1.38.4.1 nathanw */
1778 1.38.4.1 nathanw if (XS_CTL_TAGTYPE(xs) != 0)
1779 1.38.4.1 nathanw scsipi_get_tag(xs);
1780 1.38.4.1 nathanw else
1781 1.38.4.1 nathanw periph->periph_flags |= PERIPH_UNTAG;
1782 1.38.4.1 nathanw periph->periph_sent++;
1783 1.38.4.1 nathanw splx(s);
1784 1.38.4.1 nathanw
1785 1.38.4.1 nathanw scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
1786 1.38.4.1 nathanw }
1787 1.38.4.1 nathanw #ifdef DIAGNOSTIC
1788 1.38.4.1 nathanw panic("scsipi_run_queue: impossible");
1789 1.38.4.1 nathanw #endif
1790 1.38.4.1 nathanw }
1791 1.38.4.1 nathanw
1792 1.38.4.1 nathanw /*
1793 1.38.4.1 nathanw * scsipi_execute_xs:
1794 1.38.4.1 nathanw *
1795 1.38.4.1 nathanw * Begin execution of an xfer, waiting for it to complete, if necessary.
1796 1.38.4.1 nathanw */
1797 1.38.4.1 nathanw int
1798 1.38.4.1 nathanw scsipi_execute_xs(xs)
1799 1.38.4.1 nathanw struct scsipi_xfer *xs;
1800 1.38.4.1 nathanw {
1801 1.38.4.1 nathanw struct scsipi_periph *periph = xs->xs_periph;
1802 1.38.4.1 nathanw struct scsipi_channel *chan = periph->periph_channel;
1803 1.38.4.1 nathanw int async, poll, retries, error, s;
1804 1.38.4.1 nathanw
1805 1.38.4.1 nathanw xs->xs_status &= ~XS_STS_DONE;
1806 1.38.4.1 nathanw xs->error = XS_NOERROR;
1807 1.38.4.1 nathanw xs->resid = xs->datalen;
1808 1.38.4.1 nathanw xs->status = SCSI_OK;
1809 1.38.4.1 nathanw
1810 1.38.4.1 nathanw #ifdef SCSIPI_DEBUG
1811 1.38.4.1 nathanw if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) {
1812 1.38.4.1 nathanw printf("scsipi_execute_xs: ");
1813 1.38.4.1 nathanw show_scsipi_xs(xs);
1814 1.38.4.1 nathanw printf("\n");
1815 1.38.4.1 nathanw }
1816 1.38.4.1 nathanw #endif
1817 1.38.4.1 nathanw
1818 1.38.4.1 nathanw /*
1819 1.38.4.1 nathanw * Deal with command tagging:
1820 1.38.4.1 nathanw *
1821 1.38.4.1 nathanw * - If the device's current operating mode doesn't
1822 1.38.4.1 nathanw * include tagged queueing, clear the tag mask.
1823 1.38.4.1 nathanw *
1824 1.38.4.1 nathanw * - If the device's current operating mode *does*
1825 1.38.4.1 nathanw * include tagged queueing, set the tag_type in
1826 1.38.4.1 nathanw * the xfer to the appropriate byte for the tag
1827 1.38.4.1 nathanw * message.
1828 1.38.4.1 nathanw */
1829 1.38.4.1 nathanw if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 ||
1830 1.38.4.1 nathanw (xs->xs_control & XS_CTL_REQSENSE)) {
1831 1.38.4.1 nathanw xs->xs_control &= ~XS_CTL_TAGMASK;
1832 1.38.4.1 nathanw xs->xs_tag_type = 0;
1833 1.38.4.1 nathanw } else {
1834 1.38.4.1 nathanw /*
1835 1.38.4.1 nathanw * If the request doesn't specify a tag, give Head
1836 1.38.4.1 nathanw * tags to URGENT operations and Ordered tags to
1837 1.38.4.1 nathanw * everything else.
1838 1.38.4.1 nathanw */
1839 1.38.4.1 nathanw if (XS_CTL_TAGTYPE(xs) == 0) {
1840 1.38.4.1 nathanw if (xs->xs_control & XS_CTL_URGENT)
1841 1.38.4.1 nathanw xs->xs_control |= XS_CTL_HEAD_TAG;
1842 1.38.4.1 nathanw else
1843 1.38.4.1 nathanw xs->xs_control |= XS_CTL_ORDERED_TAG;
1844 1.38.4.1 nathanw }
1845 1.38.4.1 nathanw
1846 1.38.4.1 nathanw switch (XS_CTL_TAGTYPE(xs)) {
1847 1.38.4.1 nathanw case XS_CTL_ORDERED_TAG:
1848 1.38.4.1 nathanw xs->xs_tag_type = MSG_ORDERED_Q_TAG;
1849 1.38.4.1 nathanw break;
1850 1.38.4.1 nathanw
1851 1.38.4.1 nathanw case XS_CTL_SIMPLE_TAG:
1852 1.38.4.1 nathanw xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
1853 1.38.4.1 nathanw break;
1854 1.38.4.1 nathanw
1855 1.38.4.1 nathanw case XS_CTL_HEAD_TAG:
1856 1.38.4.1 nathanw xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
1857 1.38.4.1 nathanw break;
1858 1.38.4.1 nathanw
1859 1.38.4.1 nathanw default:
1860 1.38.4.1 nathanw scsipi_printaddr(periph);
1861 1.38.4.1 nathanw printf("invalid tag mask 0x%08x\n",
1862 1.38.4.1 nathanw XS_CTL_TAGTYPE(xs));
1863 1.38.4.1 nathanw panic("scsipi_execute_xs");
1864 1.38.4.1 nathanw }
1865 1.38.4.1 nathanw }
1866 1.38.4.1 nathanw
1867 1.38.4.1 nathanw /* If the adaptor wants us to poll, poll. */
1868 1.38.4.1 nathanw if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY)
1869 1.38.4.1 nathanw xs->xs_control |= XS_CTL_POLL;
1870 1.38.4.1 nathanw
1871 1.38.4.1 nathanw /*
1872 1.38.4.1 nathanw * If we don't yet have a completion thread, or we are to poll for
1873 1.38.4.1 nathanw * completion, clear the ASYNC flag.
1874 1.38.4.1 nathanw */
1875 1.38.4.1 nathanw if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
1876 1.38.4.1 nathanw xs->xs_control &= ~XS_CTL_ASYNC;
1877 1.38.4.1 nathanw
1878 1.38.4.1 nathanw async = (xs->xs_control & XS_CTL_ASYNC);
1879 1.38.4.1 nathanw poll = (xs->xs_control & XS_CTL_POLL);
1880 1.38.4.1 nathanw retries = xs->xs_retries; /* for polling commands */
1881 1.38.4.1 nathanw
1882 1.38.4.1 nathanw #ifdef DIAGNOSTIC
1883 1.38.4.1 nathanw if (async != 0 && xs->bp == NULL)
1884 1.38.4.1 nathanw panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
1885 1.38.4.1 nathanw #endif
1886 1.38.4.1 nathanw
1887 1.38.4.1 nathanw /*
1888 1.38.4.1 nathanw * Enqueue the transfer. If we're not polling for completion, this
1889 1.38.4.1 nathanw * should ALWAYS return `no error'.
1890 1.38.4.1 nathanw */
1891 1.38.4.1 nathanw try_again:
1892 1.38.4.1 nathanw error = scsipi_enqueue(xs);
1893 1.38.4.1 nathanw if (error) {
1894 1.38.4.1 nathanw if (poll == 0) {
1895 1.38.4.1 nathanw scsipi_printaddr(periph);
1896 1.38.4.1 nathanw printf("not polling, but enqueue failed with %d\n",
1897 1.38.4.1 nathanw error);
1898 1.38.4.1 nathanw panic("scsipi_execute_xs");
1899 1.38.4.1 nathanw }
1900 1.38.4.1 nathanw
1901 1.38.4.1 nathanw scsipi_printaddr(periph);
1902 1.38.4.1 nathanw printf("failed to enqueue polling command");
1903 1.38.4.1 nathanw if (retries != 0) {
1904 1.38.4.1 nathanw printf(", retrying...\n");
1905 1.38.4.1 nathanw delay(1000000);
1906 1.38.4.1 nathanw retries--;
1907 1.38.4.1 nathanw goto try_again;
1908 1.38.4.1 nathanw }
1909 1.38.4.1 nathanw printf("\n");
1910 1.38.4.1 nathanw goto free_xs;
1911 1.38.4.1 nathanw }
1912 1.38.4.1 nathanw
1913 1.38.4.1 nathanw restarted:
1914 1.38.4.1 nathanw scsipi_run_queue(chan);
1915 1.38.4.1 nathanw
1916 1.38.4.1 nathanw /*
1917 1.38.4.1 nathanw * The xfer is enqueued, and possibly running. If it's to be
1918 1.38.4.1 nathanw * completed asynchronously, just return now.
1919 1.38.4.1 nathanw */
1920 1.38.4.1 nathanw if (async)
1921 1.38.4.1 nathanw return (EJUSTRETURN);
1922 1.38.4.1 nathanw
1923 1.38.4.1 nathanw /*
1924 1.38.4.1 nathanw * Not an asynchronous command; wait for it to complete.
1925 1.38.4.1 nathanw */
1926 1.38.4.1 nathanw s = splbio();
1927 1.38.4.1 nathanw while ((xs->xs_status & XS_STS_DONE) == 0) {
1928 1.38.4.1 nathanw if (poll) {
1929 1.38.4.1 nathanw scsipi_printaddr(periph);
1930 1.38.4.1 nathanw printf("polling command not done\n");
1931 1.38.4.1 nathanw panic("scsipi_execute_xs");
1932 1.38.4.1 nathanw }
1933 1.38.4.1 nathanw (void) tsleep(xs, PRIBIO, "xscmd", 0);
1934 1.38.4.1 nathanw }
1935 1.38.4.1 nathanw splx(s);
1936 1.38.4.1 nathanw
1937 1.38.4.1 nathanw /*
1938 1.38.4.1 nathanw * Command is complete. scsipi_done() has awakened us to perform
1939 1.38.4.1 nathanw * the error handling.
1940 1.38.4.1 nathanw */
1941 1.38.4.1 nathanw error = scsipi_complete(xs);
1942 1.38.4.1 nathanw if (error == ERESTART)
1943 1.38.4.1 nathanw goto restarted;
1944 1.38.4.1 nathanw
1945 1.38.4.1 nathanw /*
1946 1.38.4.1 nathanw * Command completed successfully or fatal error occurred. Fall
1947 1.38.4.1 nathanw * into....
1948 1.38.4.1 nathanw */
1949 1.38.4.1 nathanw free_xs:
1950 1.38.4.1 nathanw s = splbio();
1951 1.38.4.1 nathanw scsipi_put_xs(xs);
1952 1.38.4.1 nathanw splx(s);
1953 1.38.4.1 nathanw
1954 1.38.4.1 nathanw /*
1955 1.38.4.1 nathanw * Kick the queue, keep it running in case it stopped for some
1956 1.38.4.1 nathanw * reason.
1957 1.38.4.1 nathanw */
1958 1.38.4.1 nathanw scsipi_run_queue(chan);
1959 1.38.4.1 nathanw
1960 1.38.4.1 nathanw return (error);
1961 1.38.4.1 nathanw }
1962 1.38.4.1 nathanw
1963 1.38.4.1 nathanw /*
1964 1.38.4.1 nathanw * scsipi_completion_thread:
1965 1.38.4.1 nathanw *
1966 1.38.4.1 nathanw * This is the completion thread. We wait for errors on
1967 1.38.4.1 nathanw * asynchronous xfers, and perform the error handling
1968 1.38.4.1 nathanw * function, restarting the command, if necessary.
1969 1.38.4.1 nathanw */
1970 1.38.4.1 nathanw void
1971 1.38.4.1 nathanw scsipi_completion_thread(arg)
1972 1.38.4.1 nathanw void *arg;
1973 1.38.4.1 nathanw {
1974 1.38.4.1 nathanw struct scsipi_channel *chan = arg;
1975 1.38.4.1 nathanw struct scsipi_xfer *xs;
1976 1.38.4.1 nathanw int s;
1977 1.38.4.1 nathanw
1978 1.38.4.4 nathanw s = splbio();
1979 1.38.4.4 nathanw chan->chan_flags |= SCSIPI_CHAN_TACTIVE;
1980 1.38.4.4 nathanw splx(s);
1981 1.38.4.1 nathanw for (;;) {
1982 1.38.4.1 nathanw s = splbio();
1983 1.38.4.1 nathanw xs = TAILQ_FIRST(&chan->chan_complete);
1984 1.38.4.5 nathanw if (xs == NULL && chan->chan_tflags == 0) {
1985 1.38.4.5 nathanw /* nothing to do; wait */
1986 1.38.4.1 nathanw (void) tsleep(&chan->chan_complete, PRIBIO,
1987 1.38.4.1 nathanw "sccomp", 0);
1988 1.38.4.1 nathanw splx(s);
1989 1.38.4.1 nathanw continue;
1990 1.38.4.1 nathanw }
1991 1.38.4.5 nathanw if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
1992 1.38.4.2 nathanw /* call chan_callback from thread context */
1993 1.38.4.5 nathanw chan->chan_tflags &= ~SCSIPI_CHANT_CALLBACK;
1994 1.38.4.2 nathanw chan->chan_callback(chan, chan->chan_callback_arg);
1995 1.38.4.4 nathanw splx(s);
1996 1.38.4.4 nathanw continue;
1997 1.38.4.4 nathanw }
1998 1.38.4.5 nathanw if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) {
1999 1.38.4.5 nathanw /* attempt to get more openings for this channel */
2000 1.38.4.5 nathanw chan->chan_tflags &= ~SCSIPI_CHANT_GROWRES;
2001 1.38.4.5 nathanw scsipi_adapter_request(chan,
2002 1.38.4.5 nathanw ADAPTER_REQ_GROW_RESOURCES, NULL);
2003 1.38.4.5 nathanw scsipi_channel_thaw(chan, 1);
2004 1.38.4.5 nathanw splx(s);
2005 1.38.4.5 nathanw continue;
2006 1.38.4.5 nathanw }
2007 1.38.4.5 nathanw if (chan->chan_tflags & SCSIPI_CHANT_KICK) {
2008 1.38.4.4 nathanw /* explicitly run the queues for this channel */
2009 1.38.4.5 nathanw chan->chan_tflags &= ~SCSIPI_CHANT_KICK;
2010 1.38.4.4 nathanw scsipi_run_queue(chan);
2011 1.38.4.2 nathanw splx(s);
2012 1.38.4.2 nathanw continue;
2013 1.38.4.2 nathanw }
2014 1.38.4.5 nathanw if (chan->chan_tflags & SCSIPI_CHANT_SHUTDOWN) {
2015 1.38.4.1 nathanw splx(s);
2016 1.38.4.1 nathanw break;
2017 1.38.4.1 nathanw }
2018 1.38.4.2 nathanw if (xs) {
2019 1.38.4.2 nathanw TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
2020 1.38.4.2 nathanw splx(s);
2021 1.38.4.1 nathanw
2022 1.38.4.2 nathanw /*
2023 1.38.4.2 nathanw * Have an xfer with an error; process it.
2024 1.38.4.2 nathanw */
2025 1.38.4.2 nathanw (void) scsipi_complete(xs);
2026 1.38.4.1 nathanw
2027 1.38.4.2 nathanw /*
2028 1.38.4.2 nathanw * Kick the queue; keep it running if it was stopped
2029 1.38.4.2 nathanw * for some reason.
2030 1.38.4.2 nathanw */
2031 1.38.4.2 nathanw scsipi_run_queue(chan);
2032 1.38.4.2 nathanw } else {
2033 1.38.4.2 nathanw splx(s);
2034 1.38.4.2 nathanw }
2035 1.38.4.1 nathanw }
2036 1.38.4.1 nathanw
2037 1.38.4.1 nathanw chan->chan_thread = NULL;
2038 1.38.4.1 nathanw
2039 1.38.4.1 nathanw /* In case parent is waiting for us to exit. */
2040 1.38.4.1 nathanw wakeup(&chan->chan_thread);
2041 1.38.4.1 nathanw
2042 1.38.4.1 nathanw kthread_exit(0);
2043 1.38.4.1 nathanw }
2044 1.38.4.1 nathanw
2045 1.38.4.1 nathanw /*
2046 1.38.4.1 nathanw * scsipi_create_completion_thread:
2047 1.38.4.1 nathanw *
2048 1.38.4.1 nathanw * Callback to actually create the completion thread.
2049 1.38.4.1 nathanw */
2050 1.38.4.1 nathanw void
2051 1.38.4.1 nathanw scsipi_create_completion_thread(arg)
2052 1.38.4.1 nathanw void *arg;
2053 1.38.4.1 nathanw {
2054 1.38.4.1 nathanw struct scsipi_channel *chan = arg;
2055 1.38.4.1 nathanw struct scsipi_adapter *adapt = chan->chan_adapter;
2056 1.38.4.1 nathanw
2057 1.38.4.1 nathanw if (kthread_create1(scsipi_completion_thread, chan,
2058 1.38.4.1 nathanw &chan->chan_thread, "%s:%d", adapt->adapt_dev->dv_xname,
2059 1.38.4.1 nathanw chan->chan_channel)) {
2060 1.38.4.1 nathanw printf("%s: unable to create completion thread for "
2061 1.38.4.1 nathanw "channel %d\n", adapt->adapt_dev->dv_xname,
2062 1.38.4.1 nathanw chan->chan_channel);
2063 1.38.4.1 nathanw panic("scsipi_create_completion_thread");
2064 1.38.4.1 nathanw }
2065 1.38.4.1 nathanw }
2066 1.38.4.1 nathanw
2067 1.38.4.1 nathanw /*
2068 1.38.4.2 nathanw * scsipi_thread_call_callback:
2069 1.38.4.2 nathanw *
2070 1.38.4.2 nathanw * request to call a callback from the completion thread
2071 1.38.4.2 nathanw */
2072 1.38.4.2 nathanw int
2073 1.38.4.2 nathanw scsipi_thread_call_callback(chan, callback, arg)
2074 1.38.4.2 nathanw struct scsipi_channel *chan;
2075 1.38.4.2 nathanw void (*callback) __P((struct scsipi_channel *, void *));
2076 1.38.4.2 nathanw void *arg;
2077 1.38.4.2 nathanw {
2078 1.38.4.2 nathanw int s;
2079 1.38.4.2 nathanw
2080 1.38.4.2 nathanw s = splbio();
2081 1.38.4.5 nathanw if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
2082 1.38.4.5 nathanw /* kernel thread doesn't exist yet */
2083 1.38.4.5 nathanw splx(s);
2084 1.38.4.5 nathanw return ESRCH;
2085 1.38.4.5 nathanw }
2086 1.38.4.5 nathanw if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2087 1.38.4.2 nathanw splx(s);
2088 1.38.4.2 nathanw return EBUSY;
2089 1.38.4.2 nathanw }
2090 1.38.4.2 nathanw scsipi_channel_freeze(chan, 1);
2091 1.38.4.2 nathanw chan->chan_callback = callback;
2092 1.38.4.2 nathanw chan->chan_callback_arg = arg;
2093 1.38.4.5 nathanw chan->chan_tflags |= SCSIPI_CHANT_CALLBACK;
2094 1.38.4.2 nathanw wakeup(&chan->chan_complete);
2095 1.38.4.2 nathanw splx(s);
2096 1.38.4.2 nathanw return(0);
2097 1.38.4.2 nathanw }
2098 1.38.4.2 nathanw
2099 1.38.4.2 nathanw /*
2100 1.38.4.1 nathanw * scsipi_async_event:
2101 1.38.4.1 nathanw *
2102 1.38.4.1 nathanw * Handle an asynchronous event from an adapter.
2103 1.38.4.1 nathanw */
2104 1.38.4.1 nathanw void
2105 1.38.4.1 nathanw scsipi_async_event(chan, event, arg)
2106 1.38.4.1 nathanw struct scsipi_channel *chan;
2107 1.38.4.1 nathanw scsipi_async_event_t event;
2108 1.38.4.1 nathanw void *arg;
2109 1.38.4.1 nathanw {
2110 1.38.4.1 nathanw int s;
2111 1.38.4.1 nathanw
2112 1.38.4.1 nathanw s = splbio();
2113 1.38.4.1 nathanw switch (event) {
2114 1.38.4.1 nathanw case ASYNC_EVENT_MAX_OPENINGS:
2115 1.38.4.1 nathanw scsipi_async_event_max_openings(chan,
2116 1.38.4.1 nathanw (struct scsipi_max_openings *)arg);
2117 1.38.4.1 nathanw break;
2118 1.38.4.1 nathanw
2119 1.38.4.1 nathanw case ASYNC_EVENT_XFER_MODE:
2120 1.38.4.1 nathanw scsipi_async_event_xfer_mode(chan,
2121 1.38.4.1 nathanw (struct scsipi_xfer_mode *)arg);
2122 1.38.4.1 nathanw break;
2123 1.38.4.1 nathanw case ASYNC_EVENT_RESET:
2124 1.38.4.1 nathanw scsipi_async_event_channel_reset(chan);
2125 1.38.4.1 nathanw break;
2126 1.38.4.1 nathanw }
2127 1.38.4.1 nathanw splx(s);
2128 1.38.4.1 nathanw }
2129 1.38.4.1 nathanw
2130 1.38.4.1 nathanw /*
2131 1.38.4.1 nathanw * scsipi_print_xfer_mode:
2132 1.38.4.1 nathanw *
2133 1.38.4.1 nathanw * Print a periph's capabilities.
2134 1.38.4.1 nathanw */
2135 1.38.4.1 nathanw void
2136 1.38.4.1 nathanw scsipi_print_xfer_mode(periph)
2137 1.38.4.1 nathanw struct scsipi_periph *periph;
2138 1.38.4.1 nathanw {
2139 1.38.4.1 nathanw int period, freq, speed, mbs;
2140 1.38.4.1 nathanw
2141 1.38.4.1 nathanw if ((periph->periph_flags & PERIPH_MODE_VALID) == 0)
2142 1.38.4.1 nathanw return;
2143 1.38.4.1 nathanw
2144 1.38.4.1 nathanw printf("%s: ", periph->periph_dev->dv_xname);
2145 1.38.4.1 nathanw if (periph->periph_mode & PERIPH_CAP_SYNC) {
2146 1.38.4.1 nathanw period = scsipi_sync_factor_to_period(periph->periph_period);
2147 1.38.4.1 nathanw printf("sync (%d.%dns offset %d)",
2148 1.38.4.1 nathanw period / 10, period % 10, periph->periph_offset);
2149 1.38.4.1 nathanw } else
2150 1.38.4.1 nathanw printf("async");
2151 1.38.4.1 nathanw
2152 1.38.4.1 nathanw if (periph->periph_mode & PERIPH_CAP_WIDE32)
2153 1.38.4.1 nathanw printf(", 32-bit");
2154 1.38.4.1 nathanw else if (periph->periph_mode & PERIPH_CAP_WIDE16)
2155 1.38.4.1 nathanw printf(", 16-bit");
2156 1.38.4.1 nathanw else
2157 1.38.4.1 nathanw printf(", 8-bit");
2158 1.38.4.1 nathanw
2159 1.38.4.1 nathanw if (periph->periph_mode & PERIPH_CAP_SYNC) {
2160 1.38.4.1 nathanw freq = scsipi_sync_factor_to_freq(periph->periph_period);
2161 1.38.4.1 nathanw speed = freq;
2162 1.38.4.1 nathanw if (periph->periph_mode & PERIPH_CAP_WIDE32)
2163 1.38.4.1 nathanw speed *= 4;
2164 1.38.4.1 nathanw else if (periph->periph_mode & PERIPH_CAP_WIDE16)
2165 1.38.4.1 nathanw speed *= 2;
2166 1.38.4.1 nathanw mbs = speed / 1000;
2167 1.38.4.1 nathanw if (mbs > 0)
2168 1.38.4.1 nathanw printf(" (%d.%03dMB/s)", mbs, speed % 1000);
2169 1.38.4.1 nathanw else
2170 1.38.4.1 nathanw printf(" (%dKB/s)", speed % 1000);
2171 1.38.4.1 nathanw }
2172 1.38.4.1 nathanw
2173 1.38.4.1 nathanw printf(" transfers");
2174 1.38.4.1 nathanw
2175 1.38.4.1 nathanw if (periph->periph_mode & PERIPH_CAP_TQING)
2176 1.38.4.1 nathanw printf(", tagged queueing");
2177 1.38.4.1 nathanw
2178 1.38.4.1 nathanw printf("\n");
2179 1.38.4.1 nathanw }
2180 1.38.4.1 nathanw
2181 1.38.4.1 nathanw /*
2182 1.38.4.1 nathanw * scsipi_async_event_max_openings:
2183 1.38.4.1 nathanw *
2184 1.38.4.1 nathanw * Update the maximum number of outstanding commands a
2185 1.38.4.1 nathanw * device may have.
2186 1.38.4.1 nathanw */
2187 1.38.4.1 nathanw void
2188 1.38.4.1 nathanw scsipi_async_event_max_openings(chan, mo)
2189 1.38.4.1 nathanw struct scsipi_channel *chan;
2190 1.38.4.1 nathanw struct scsipi_max_openings *mo;
2191 1.38.4.1 nathanw {
2192 1.38.4.1 nathanw struct scsipi_periph *periph;
2193 1.38.4.1 nathanw int minlun, maxlun;
2194 1.38.4.1 nathanw
2195 1.38.4.1 nathanw if (mo->mo_lun == -1) {
2196 1.38.4.1 nathanw /*
2197 1.38.4.1 nathanw * Wildcarded; apply it to all LUNs.
2198 1.38.4.1 nathanw */
2199 1.38.4.1 nathanw minlun = 0;
2200 1.38.4.1 nathanw maxlun = chan->chan_nluns - 1;
2201 1.38.4.1 nathanw } else
2202 1.38.4.1 nathanw minlun = maxlun = mo->mo_lun;
2203 1.38.4.1 nathanw
2204 1.38.4.1 nathanw for (; minlun <= maxlun; minlun++) {
2205 1.38.4.1 nathanw periph = scsipi_lookup_periph(chan, mo->mo_target, minlun);
2206 1.38.4.1 nathanw if (periph == NULL)
2207 1.38.4.1 nathanw continue;
2208 1.38.4.1 nathanw
2209 1.38.4.1 nathanw if (mo->mo_openings < periph->periph_openings)
2210 1.38.4.1 nathanw periph->periph_openings = mo->mo_openings;
2211 1.38.4.1 nathanw else if (mo->mo_openings > periph->periph_openings &&
2212 1.38.4.1 nathanw (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
2213 1.38.4.1 nathanw periph->periph_openings = mo->mo_openings;
2214 1.38.4.1 nathanw }
2215 1.38.4.1 nathanw }
2216 1.38.4.1 nathanw
2217 1.38.4.1 nathanw /*
2218 1.38.4.1 nathanw * scsipi_async_event_xfer_mode:
2219 1.38.4.1 nathanw *
2220 1.38.4.1 nathanw * Update the xfer mode for all periphs sharing the
2221 1.38.4.1 nathanw * specified I_T Nexus.
2222 1.38.4.1 nathanw */
2223 1.38.4.1 nathanw void
2224 1.38.4.1 nathanw scsipi_async_event_xfer_mode(chan, xm)
2225 1.38.4.1 nathanw struct scsipi_channel *chan;
2226 1.38.4.1 nathanw struct scsipi_xfer_mode *xm;
2227 1.38.4.1 nathanw {
2228 1.38.4.1 nathanw struct scsipi_periph *periph;
2229 1.38.4.1 nathanw int lun, announce, mode, period, offset;
2230 1.38.4.1 nathanw
2231 1.38.4.1 nathanw for (lun = 0; lun < chan->chan_nluns; lun++) {
2232 1.38.4.1 nathanw periph = scsipi_lookup_periph(chan, xm->xm_target, lun);
2233 1.38.4.1 nathanw if (periph == NULL)
2234 1.38.4.1 nathanw continue;
2235 1.38.4.1 nathanw announce = 0;
2236 1.38.4.1 nathanw
2237 1.38.4.1 nathanw /*
2238 1.38.4.1 nathanw * Clamp the xfer mode down to this periph's capabilities.
2239 1.38.4.1 nathanw */
2240 1.38.4.1 nathanw mode = xm->xm_mode & periph->periph_cap;
2241 1.38.4.1 nathanw if (mode & PERIPH_CAP_SYNC) {
2242 1.38.4.1 nathanw period = xm->xm_period;
2243 1.38.4.1 nathanw offset = xm->xm_offset;
2244 1.38.4.1 nathanw } else {
2245 1.38.4.1 nathanw period = 0;
2246 1.38.4.1 nathanw offset = 0;
2247 1.38.4.1 nathanw }
2248 1.38.4.1 nathanw
2249 1.38.4.1 nathanw /*
2250 1.38.4.1 nathanw * If we do not have a valid xfer mode yet, or the parameters
2251 1.38.4.1 nathanw * are different, announce them.
2252 1.38.4.1 nathanw */
2253 1.38.4.1 nathanw if ((periph->periph_flags & PERIPH_MODE_VALID) == 0 ||
2254 1.38.4.1 nathanw periph->periph_mode != mode ||
2255 1.38.4.1 nathanw periph->periph_period != period ||
2256 1.38.4.1 nathanw periph->periph_offset != offset)
2257 1.38.4.1 nathanw announce = 1;
2258 1.38.4.1 nathanw
2259 1.38.4.1 nathanw periph->periph_mode = mode;
2260 1.38.4.1 nathanw periph->periph_period = period;
2261 1.38.4.1 nathanw periph->periph_offset = offset;
2262 1.38.4.1 nathanw periph->periph_flags |= PERIPH_MODE_VALID;
2263 1.38.4.1 nathanw
2264 1.38.4.1 nathanw if (announce)
2265 1.38.4.1 nathanw scsipi_print_xfer_mode(periph);
2266 1.38.4.1 nathanw }
2267 1.38.4.1 nathanw }
2268 1.38.4.1 nathanw
2269 1.38.4.1 nathanw /*
2270 1.38.4.1 nathanw * scsipi_set_xfer_mode:
2271 1.38.4.1 nathanw *
2272 1.38.4.1 nathanw * Set the xfer mode for the specified I_T Nexus.
2273 1.38.4.1 nathanw */
2274 1.38.4.1 nathanw void
2275 1.38.4.1 nathanw scsipi_set_xfer_mode(chan, target, immed)
2276 1.38.4.1 nathanw struct scsipi_channel *chan;
2277 1.38.4.1 nathanw int target, immed;
2278 1.38.4.1 nathanw {
2279 1.38.4.1 nathanw struct scsipi_xfer_mode xm;
2280 1.38.4.1 nathanw struct scsipi_periph *itperiph;
2281 1.38.4.1 nathanw int lun, s;
2282 1.38.4.1 nathanw
2283 1.38.4.1 nathanw /*
2284 1.38.4.1 nathanw * Go to the minimal xfer mode.
2285 1.38.4.1 nathanw */
2286 1.38.4.1 nathanw xm.xm_target = target;
2287 1.38.4.1 nathanw xm.xm_mode = 0;
2288 1.38.4.1 nathanw xm.xm_period = 0; /* ignored */
2289 1.38.4.1 nathanw xm.xm_offset = 0; /* ignored */
2290 1.38.4.1 nathanw
2291 1.38.4.1 nathanw /*
2292 1.38.4.1 nathanw * Find the first LUN we know about on this I_T Nexus.
2293 1.38.4.1 nathanw */
2294 1.38.4.1 nathanw for (lun = 0; lun < chan->chan_nluns; lun++) {
2295 1.38.4.1 nathanw itperiph = scsipi_lookup_periph(chan, target, lun);
2296 1.38.4.1 nathanw if (itperiph != NULL)
2297 1.38.4.1 nathanw break;
2298 1.38.4.1 nathanw }
2299 1.38.4.2 nathanw if (itperiph != NULL) {
2300 1.38.4.1 nathanw xm.xm_mode = itperiph->periph_cap;
2301 1.38.4.2 nathanw /*
2302 1.38.4.2 nathanw * Now issue the request to the adapter.
2303 1.38.4.2 nathanw */
2304 1.38.4.2 nathanw s = splbio();
2305 1.38.4.2 nathanw scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm);
2306 1.38.4.2 nathanw splx(s);
2307 1.38.4.2 nathanw /*
2308 1.38.4.2 nathanw * If we want this to happen immediately, issue a dummy
2309 1.38.4.2 nathanw * command, since most adapters can't really negotiate unless
2310 1.38.4.2 nathanw * they're executing a job.
2311 1.38.4.2 nathanw */
2312 1.38.4.2 nathanw if (immed != 0) {
2313 1.38.4.2 nathanw (void) scsipi_test_unit_ready(itperiph,
2314 1.38.4.2 nathanw XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
2315 1.38.4.2 nathanw XS_CTL_IGNORE_NOT_READY |
2316 1.38.4.2 nathanw XS_CTL_IGNORE_MEDIA_CHANGE);
2317 1.38.4.2 nathanw }
2318 1.38.4.1 nathanw }
2319 1.38.4.1 nathanw }
2320 1.38.4.1 nathanw
2321 1.38.4.1 nathanw /*
2322 1.38.4.1 nathanw * scsipi_channel_reset:
2323 1.38.4.1 nathanw *
2324 1.38.4.1 nathanw * handle scsi bus reset
2325 1.38.4.1 nathanw * called at splbio
2326 1.38.4.1 nathanw */
2327 1.38.4.1 nathanw void
2328 1.38.4.1 nathanw scsipi_async_event_channel_reset(chan)
2329 1.38.4.1 nathanw struct scsipi_channel *chan;
2330 1.38.4.1 nathanw {
2331 1.38.4.1 nathanw struct scsipi_xfer *xs, *xs_next;
2332 1.38.4.1 nathanw struct scsipi_periph *periph;
2333 1.38.4.1 nathanw int target, lun;
2334 1.38.4.1 nathanw
2335 1.38.4.1 nathanw /*
2336 1.38.4.1 nathanw * Channel has been reset. Also mark as reset pending REQUEST_SENSE
2337 1.38.4.1 nathanw * commands; as the sense is not available any more.
2338 1.38.4.1 nathanw * can't call scsipi_done() from here, as the command has not been
2339 1.38.4.1 nathanw * sent to the adapter yet (this would corrupt accounting).
2340 1.38.4.1 nathanw */
2341 1.38.4.1 nathanw
2342 1.38.4.1 nathanw for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) {
2343 1.38.4.1 nathanw xs_next = TAILQ_NEXT(xs, channel_q);
2344 1.38.4.1 nathanw if (xs->xs_control & XS_CTL_REQSENSE) {
2345 1.38.4.1 nathanw TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
2346 1.38.4.1 nathanw xs->error = XS_RESET;
2347 1.38.4.1 nathanw if ((xs->xs_control & XS_CTL_ASYNC) != 0)
2348 1.38.4.1 nathanw TAILQ_INSERT_TAIL(&chan->chan_complete, xs,
2349 1.38.4.1 nathanw channel_q);
2350 1.38.4.1 nathanw }
2351 1.38.4.1 nathanw }
2352 1.38.4.1 nathanw wakeup(&chan->chan_complete);
2353 1.38.4.1 nathanw /* Catch xs with pending sense which may not have a REQSENSE xs yet */
2354 1.38.4.1 nathanw for (target = 0; target < chan->chan_ntargets; target++) {
2355 1.38.4.1 nathanw if (target == chan->chan_id)
2356 1.38.4.1 nathanw continue;
2357 1.38.4.1 nathanw for (lun = 0; lun < chan->chan_nluns; lun++) {
2358 1.38.4.1 nathanw periph = chan->chan_periphs[target][lun];
2359 1.38.4.1 nathanw if (periph) {
2360 1.38.4.1 nathanw xs = periph->periph_xscheck;
2361 1.38.4.1 nathanw if (xs)
2362 1.38.4.1 nathanw xs->error = XS_RESET;
2363 1.38.4.1 nathanw }
2364 1.38.4.1 nathanw }
2365 1.38.4.1 nathanw }
2366 1.38.4.1 nathanw }
2367 1.38.4.1 nathanw
2368 1.38.4.2 nathanw /*
2369 1.38.4.2 nathanw * scsipi_target_detach:
2370 1.38.4.2 nathanw *
2371 1.38.4.2 nathanw * detach all periph associated with a I_T
2372 1.38.4.2 nathanw * must be called from valid thread context
2373 1.38.4.2 nathanw */
2374 1.38.4.2 nathanw int
2375 1.38.4.2 nathanw scsipi_target_detach(chan, target, lun, flags)
2376 1.38.4.2 nathanw struct scsipi_channel *chan;
2377 1.38.4.2 nathanw int target, lun;
2378 1.38.4.2 nathanw int flags;
2379 1.38.4.2 nathanw {
2380 1.38.4.2 nathanw struct scsipi_periph *periph;
2381 1.38.4.2 nathanw int ctarget, mintarget, maxtarget;
2382 1.38.4.2 nathanw int clun, minlun, maxlun;
2383 1.38.4.2 nathanw int error;
2384 1.38.4.2 nathanw
2385 1.38.4.2 nathanw if (target == -1) {
2386 1.38.4.2 nathanw mintarget = 0;
2387 1.38.4.2 nathanw maxtarget = chan->chan_ntargets;
2388 1.38.4.2 nathanw } else {
2389 1.38.4.2 nathanw if (target == chan->chan_id)
2390 1.38.4.2 nathanw return EINVAL;
2391 1.38.4.2 nathanw if (target < 0 || target >= chan->chan_ntargets)
2392 1.38.4.2 nathanw return EINVAL;
2393 1.38.4.2 nathanw mintarget = target;
2394 1.38.4.2 nathanw maxtarget = target + 1;
2395 1.38.4.2 nathanw }
2396 1.38.4.2 nathanw
2397 1.38.4.2 nathanw if (lun == -1) {
2398 1.38.4.2 nathanw minlun = 0;
2399 1.38.4.2 nathanw maxlun = chan->chan_nluns;
2400 1.38.4.2 nathanw } else {
2401 1.38.4.2 nathanw if (lun < 0 || lun >= chan->chan_nluns)
2402 1.38.4.2 nathanw return EINVAL;
2403 1.38.4.2 nathanw minlun = lun;
2404 1.38.4.2 nathanw maxlun = lun + 1;
2405 1.38.4.2 nathanw }
2406 1.38.4.2 nathanw
2407 1.38.4.2 nathanw for (ctarget = mintarget; ctarget < maxtarget; ctarget++) {
2408 1.38.4.2 nathanw if (ctarget == chan->chan_id)
2409 1.38.4.2 nathanw continue;
2410 1.38.4.2 nathanw
2411 1.38.4.2 nathanw for (clun = minlun; clun < maxlun; clun++) {
2412 1.38.4.2 nathanw periph = scsipi_lookup_periph(chan, ctarget, clun);
2413 1.38.4.2 nathanw if (periph == NULL)
2414 1.38.4.2 nathanw continue;
2415 1.38.4.2 nathanw error = config_detach(periph->periph_dev, flags);
2416 1.38.4.2 nathanw if (error)
2417 1.38.4.2 nathanw return (error);
2418 1.38.4.2 nathanw scsipi_remove_periph(chan, periph);
2419 1.38.4.2 nathanw free(periph, M_DEVBUF);
2420 1.38.4.2 nathanw }
2421 1.38.4.2 nathanw }
2422 1.38.4.2 nathanw return(0);
2423 1.38.4.2 nathanw }
2424 1.38.4.1 nathanw
2425 1.38.4.1 nathanw /*
2426 1.38.4.1 nathanw * scsipi_adapter_addref:
2427 1.38.4.1 nathanw *
2428 1.38.4.1 nathanw * Add a reference to the adapter pointed to by the provided
2429 1.38.4.1 nathanw * link, enabling the adapter if necessary.
2430 1.38.4.1 nathanw */
2431 1.38.4.1 nathanw int
2432 1.38.4.1 nathanw scsipi_adapter_addref(adapt)
2433 1.38.4.1 nathanw struct scsipi_adapter *adapt;
2434 1.38.4.1 nathanw {
2435 1.38.4.1 nathanw int s, error = 0;
2436 1.38.4.1 nathanw
2437 1.38.4.1 nathanw s = splbio();
2438 1.38.4.1 nathanw if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) {
2439 1.38.4.1 nathanw error = (*adapt->adapt_enable)(adapt->adapt_dev, 1);
2440 1.38.4.1 nathanw if (error)
2441 1.38.4.1 nathanw adapt->adapt_refcnt--;
2442 1.38.4.1 nathanw }
2443 1.38.4.1 nathanw splx(s);
2444 1.38.4.1 nathanw return (error);
2445 1.38.4.1 nathanw }
2446 1.38.4.1 nathanw
2447 1.38.4.1 nathanw /*
2448 1.38.4.1 nathanw * scsipi_adapter_delref:
2449 1.38.4.1 nathanw *
2450 1.38.4.1 nathanw * Delete a reference to the adapter pointed to by the provided
2451 1.38.4.1 nathanw * link, disabling the adapter if possible.
2452 1.38.4.1 nathanw */
2453 1.38.4.1 nathanw void
2454 1.38.4.1 nathanw scsipi_adapter_delref(adapt)
2455 1.38.4.1 nathanw struct scsipi_adapter *adapt;
2456 1.38.4.1 nathanw {
2457 1.38.4.1 nathanw int s;
2458 1.38.4.1 nathanw
2459 1.38.4.1 nathanw s = splbio();
2460 1.38.4.1 nathanw if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL)
2461 1.38.4.1 nathanw (void) (*adapt->adapt_enable)(adapt->adapt_dev, 0);
2462 1.38.4.1 nathanw splx(s);
2463 1.38.4.1 nathanw }
2464 1.38.4.1 nathanw
2465 1.38.4.1 nathanw struct scsipi_syncparam {
2466 1.38.4.1 nathanw int ss_factor;
2467 1.38.4.1 nathanw int ss_period; /* ns * 10 */
2468 1.38.4.1 nathanw } scsipi_syncparams[] = {
2469 1.38.4.3 nathanw { 0x09, 125 },
2470 1.38.4.1 nathanw { 0x0a, 250 },
2471 1.38.4.1 nathanw { 0x0b, 303 },
2472 1.38.4.1 nathanw { 0x0c, 500 },
2473 1.38.4.1 nathanw };
2474 1.38.4.1 nathanw const int scsipi_nsyncparams =
2475 1.38.4.1 nathanw sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
2476 1.38.4.1 nathanw
2477 1.38.4.1 nathanw int
2478 1.38.4.1 nathanw scsipi_sync_period_to_factor(period)
2479 1.38.4.1 nathanw int period; /* ns * 10 */
2480 1.38.4.1 nathanw {
2481 1.38.4.1 nathanw int i;
2482 1.38.4.1 nathanw
2483 1.38.4.1 nathanw for (i = 0; i < scsipi_nsyncparams; i++) {
2484 1.38.4.1 nathanw if (period <= scsipi_syncparams[i].ss_period)
2485 1.38.4.1 nathanw return (scsipi_syncparams[i].ss_factor);
2486 1.38.4.1 nathanw }
2487 1.38.4.1 nathanw
2488 1.38.4.1 nathanw return ((period / 10) / 4);
2489 1.38.4.1 nathanw }
2490 1.38.4.1 nathanw
2491 1.38.4.1 nathanw int
2492 1.38.4.1 nathanw scsipi_sync_factor_to_period(factor)
2493 1.38.4.1 nathanw int factor;
2494 1.38.4.1 nathanw {
2495 1.38.4.1 nathanw int i;
2496 1.38.4.1 nathanw
2497 1.38.4.1 nathanw for (i = 0; i < scsipi_nsyncparams; i++) {
2498 1.38.4.1 nathanw if (factor == scsipi_syncparams[i].ss_factor)
2499 1.38.4.1 nathanw return (scsipi_syncparams[i].ss_period);
2500 1.38.4.1 nathanw }
2501 1.38.4.1 nathanw
2502 1.38.4.1 nathanw return ((factor * 4) * 10);
2503 1.38.4.1 nathanw }
2504 1.38.4.1 nathanw
2505 1.38.4.1 nathanw int
2506 1.38.4.1 nathanw scsipi_sync_factor_to_freq(factor)
2507 1.38.4.1 nathanw int factor;
2508 1.38.4.1 nathanw {
2509 1.38.4.1 nathanw int i;
2510 1.38.4.1 nathanw
2511 1.38.4.1 nathanw for (i = 0; i < scsipi_nsyncparams; i++) {
2512 1.38.4.1 nathanw if (factor == scsipi_syncparams[i].ss_factor)
2513 1.38.4.1 nathanw return (10000000 / scsipi_syncparams[i].ss_period);
2514 1.38.4.1 nathanw }
2515 1.38.4.1 nathanw
2516 1.38.4.1 nathanw return (10000000 / ((factor * 4) * 10));
2517 1.14 thorpej }
2518 1.14 thorpej
2519 1.38.4.1 nathanw #ifdef SCSIPI_DEBUG
2520 1.2 bouyer /*
2521 1.2 bouyer * Given a scsipi_xfer, dump the request, in all it's glory
2522 1.2 bouyer */
2523 1.2 bouyer void
2524 1.2 bouyer show_scsipi_xs(xs)
2525 1.2 bouyer struct scsipi_xfer *xs;
2526 1.2 bouyer {
2527 1.3 enami
2528 1.2 bouyer printf("xs(%p): ", xs);
2529 1.24 thorpej printf("xs_control(0x%08x)", xs->xs_control);
2530 1.24 thorpej printf("xs_status(0x%08x)", xs->xs_status);
2531 1.38.4.1 nathanw printf("periph(%p)", xs->xs_periph);
2532 1.38.4.1 nathanw printf("retr(0x%x)", xs->xs_retries);
2533 1.2 bouyer printf("timo(0x%x)", xs->timeout);
2534 1.2 bouyer printf("cmd(%p)", xs->cmd);
2535 1.2 bouyer printf("len(0x%x)", xs->cmdlen);
2536 1.2 bouyer printf("data(%p)", xs->data);
2537 1.2 bouyer printf("len(0x%x)", xs->datalen);
2538 1.2 bouyer printf("res(0x%x)", xs->resid);
2539 1.2 bouyer printf("err(0x%x)", xs->error);
2540 1.2 bouyer printf("bp(%p)", xs->bp);
2541 1.2 bouyer show_scsipi_cmd(xs);
2542 1.2 bouyer }
2543 1.2 bouyer
2544 1.2 bouyer void
2545 1.2 bouyer show_scsipi_cmd(xs)
2546 1.2 bouyer struct scsipi_xfer *xs;
2547 1.2 bouyer {
2548 1.2 bouyer u_char *b = (u_char *) xs->cmd;
2549 1.3 enami int i = 0;
2550 1.2 bouyer
2551 1.38.4.1 nathanw scsipi_printaddr(xs->xs_periph);
2552 1.38.4.1 nathanw printf(" command: ");
2553 1.2 bouyer
2554 1.24 thorpej if ((xs->xs_control & XS_CTL_RESET) == 0) {
2555 1.2 bouyer while (i < xs->cmdlen) {
2556 1.2 bouyer if (i)
2557 1.2 bouyer printf(",");
2558 1.2 bouyer printf("0x%x", b[i++]);
2559 1.2 bouyer }
2560 1.2 bouyer printf("-[%d bytes]\n", xs->datalen);
2561 1.2 bouyer if (xs->datalen)
2562 1.2 bouyer show_mem(xs->data, min(64, xs->datalen));
2563 1.2 bouyer } else
2564 1.2 bouyer printf("-RESET-\n");
2565 1.2 bouyer }
2566 1.2 bouyer
2567 1.2 bouyer void
2568 1.2 bouyer show_mem(address, num)
2569 1.2 bouyer u_char *address;
2570 1.2 bouyer int num;
2571 1.2 bouyer {
2572 1.2 bouyer int x;
2573 1.2 bouyer
2574 1.2 bouyer printf("------------------------------");
2575 1.2 bouyer for (x = 0; x < num; x++) {
2576 1.2 bouyer if ((x % 16) == 0)
2577 1.2 bouyer printf("\n%03d: ", x);
2578 1.2 bouyer printf("%02x ", *address++);
2579 1.2 bouyer }
2580 1.2 bouyer printf("\n------------------------------\n");
2581 1.2 bouyer }
2582 1.38.4.1 nathanw #endif /* SCSIPI_DEBUG */
2583