iopsp.c revision 1.31 1 /* $NetBSD: iopsp.c,v 1.31 2008/04/06 20:26:21 cegger Exp $ */
2
3 /*-
4 * Copyright (c) 2000, 2001, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Raw SCSI device support for I2O. IOPs present SCSI devices individually;
41 * we group them by controlling port.
42 */
43
44 #include <sys/cdefs.h>
45 __KERNEL_RCSID(0, "$NetBSD: iopsp.c,v 1.31 2008/04/06 20:26:21 cegger Exp $");
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 #include <sys/device.h>
51 #include <sys/queue.h>
52 #include <sys/proc.h>
53 #include <sys/buf.h>
54 #include <sys/endian.h>
55 #include <sys/malloc.h>
56 #include <sys/scsiio.h>
57
58 #include <sys/bswap.h>
59 #include <sys/bus.h>
60
61 #include <dev/scsipi/scsi_all.h>
62 #include <dev/scsipi/scsi_disk.h>
63 #include <dev/scsipi/scsipi_all.h>
64 #include <dev/scsipi/scsiconf.h>
65 #include <dev/scsipi/scsi_message.h>
66
67 #include <dev/i2o/i2o.h>
68 #include <dev/i2o/iopio.h>
69 #include <dev/i2o/iopvar.h>
70 #include <dev/i2o/iopspvar.h>
71
72 static void iopsp_adjqparam(struct device *, int);
73 static void iopsp_attach(struct device *, struct device *, void *);
74 static void iopsp_intr(struct device *, struct iop_msg *, void *);
75 static int iopsp_ioctl(struct scsipi_channel *, u_long,
76 void *, int, struct proc *);
77 static int iopsp_match(struct device *, struct cfdata *, void *);
78 static int iopsp_rescan(struct iopsp_softc *);
79 static int iopsp_reconfig(struct device *);
80 static void iopsp_scsipi_request(struct scsipi_channel *,
81 scsipi_adapter_req_t, void *);
82
83 CFATTACH_DECL(iopsp, sizeof(struct iopsp_softc),
84 iopsp_match, iopsp_attach, NULL, NULL);
85
86 /*
87 * Match a supported device.
88 */
89 static int
90 iopsp_match(struct device *parent, struct cfdata *match, void *aux)
91 {
92 struct iop_attach_args *ia;
93 struct {
94 struct i2o_param_op_results pr;
95 struct i2o_param_read_results prr;
96 struct i2o_param_hba_ctlr_info ci;
97 } __attribute__ ((__packed__)) param;
98
99 ia = aux;
100
101 if (ia->ia_class != I2O_CLASS_BUS_ADAPTER_PORT)
102 return (0);
103
104 if (iop_field_get_all((struct iop_softc *)parent, ia->ia_tid,
105 I2O_PARAM_HBA_CTLR_INFO, ¶m, sizeof(param), NULL) != 0)
106 return (0);
107
108 return (param.ci.bustype == I2O_HBA_BUS_SCSI ||
109 param.ci.bustype == I2O_HBA_BUS_FCA);
110 }
111
112 /*
113 * Attach a supported device.
114 */
115 static void
116 iopsp_attach(struct device *parent, struct device *self, void *aux)
117 {
118 struct iop_attach_args *ia;
119 struct iopsp_softc *sc;
120 struct iop_softc *iop;
121 struct {
122 struct i2o_param_op_results pr;
123 struct i2o_param_read_results prr;
124 union {
125 struct i2o_param_hba_ctlr_info ci;
126 struct i2o_param_hba_scsi_ctlr_info sci;
127 struct i2o_param_hba_scsi_port_info spi;
128 } p;
129 } __attribute__ ((__packed__)) param;
130 int fc, rv;
131 int size;
132
133 ia = (struct iop_attach_args *)aux;
134 sc = device_private(self);
135 iop = device_private(parent);
136
137 /* Register us as an initiator. */
138 sc->sc_ii.ii_dv = self;
139 sc->sc_ii.ii_intr = iopsp_intr;
140 sc->sc_ii.ii_flags = 0;
141 sc->sc_ii.ii_tid = ia->ia_tid;
142 sc->sc_ii.ii_reconfig = iopsp_reconfig;
143 sc->sc_ii.ii_adjqparam = iopsp_adjqparam;
144 iop_initiator_register(iop, &sc->sc_ii);
145
146 rv = iop_field_get_all(iop, ia->ia_tid, I2O_PARAM_HBA_CTLR_INFO,
147 ¶m, sizeof(param), NULL);
148 if (rv != 0)
149 goto bad;
150
151 fc = (param.p.ci.bustype == I2O_HBA_BUS_FCA);
152
153 /*
154 * Say what the device is. If we can find out what the controling
155 * device is, say what that is too.
156 */
157 aprint_normal(": SCSI port");
158 iop_print_ident(iop, ia->ia_tid);
159 aprint_normal("\n");
160
161 rv = iop_field_get_all(iop, ia->ia_tid, I2O_PARAM_HBA_SCSI_CTLR_INFO,
162 ¶m, sizeof(param), NULL);
163 if (rv != 0)
164 goto bad;
165
166 aprint_normal_dev(&sc->sc_dv, "");
167 if (fc)
168 aprint_normal("FC");
169 else
170 aprint_normal("%d-bit", param.p.sci.maxdatawidth);
171 aprint_normal(", max sync rate %dMHz, initiator ID %d\n",
172 (u_int32_t)le64toh(param.p.sci.maxsyncrate) / 1000,
173 le32toh(param.p.sci.initiatorid));
174
175 sc->sc_openings = 1;
176
177 sc->sc_adapter.adapt_dev = &sc->sc_dv;
178 sc->sc_adapter.adapt_nchannels = 1;
179 sc->sc_adapter.adapt_openings = 1;
180 sc->sc_adapter.adapt_max_periph = 1;
181 sc->sc_adapter.adapt_ioctl = iopsp_ioctl;
182 sc->sc_adapter.adapt_minphys = minphys;
183 sc->sc_adapter.adapt_request = iopsp_scsipi_request;
184
185 memset(&sc->sc_channel, 0, sizeof(sc->sc_channel));
186 sc->sc_channel.chan_adapter = &sc->sc_adapter;
187 sc->sc_channel.chan_bustype = &scsi_bustype;
188 sc->sc_channel.chan_channel = 0;
189 sc->sc_channel.chan_ntargets = fc ?
190 IOPSP_MAX_FC_TARGET : param.p.sci.maxdatawidth;
191 sc->sc_channel.chan_nluns = IOPSP_MAX_LUN;
192 sc->sc_channel.chan_id = le32toh(param.p.sci.initiatorid);
193 sc->sc_channel.chan_flags = SCSIPI_CHAN_NOSETTLE;
194
195 /*
196 * Allocate the target map. Currently used for informational
197 * purposes only.
198 */
199 size = sc->sc_channel.chan_ntargets * sizeof(struct iopsp_target);
200 sc->sc_targetmap = malloc(size, M_DEVBUF, M_NOWAIT|M_ZERO);
201
202 /* Build the two maps, and attach to scsipi. */
203 if (iopsp_reconfig(self) != 0) {
204 aprint_error_dev(&sc->sc_dv, "configure failed\n");
205 goto bad;
206 }
207 config_found(self, &sc->sc_channel, scsiprint);
208 return;
209
210 bad:
211 iop_initiator_unregister(iop, &sc->sc_ii);
212 }
213
214 /*
215 * Scan the LCT to determine which devices we control, and enter them into
216 * the maps.
217 */
218 static int
219 iopsp_reconfig(struct device *dv)
220 {
221 struct iopsp_softc *sc;
222 struct iop_softc *iop;
223 struct i2o_lct_entry *le;
224 struct scsipi_channel *sc_chan;
225 struct {
226 struct i2o_param_op_results pr;
227 struct i2o_param_read_results prr;
228 struct i2o_param_scsi_device_info sdi;
229 } __attribute__ ((__packed__)) param;
230 u_int tid, nent, i, targ, lun, size, rv, bptid;
231 u_short *tidmap;
232 void *tofree;
233 struct iopsp_target *it;
234 int syncrate;
235
236 sc = (struct iopsp_softc *)dv;
237 iop = (struct iop_softc *)device_parent(&sc->sc_dv);
238 sc_chan = &sc->sc_channel;
239
240 KASSERT(mutex_owned(&iop->sc_conflock));
241
242 /* Anything to do? */
243 if (iop->sc_chgind == sc->sc_chgind)
244 return (0);
245
246 /*
247 * Allocate memory for the target/LUN -> TID map. Use zero to
248 * denote absent targets (zero is the TID of the I2O executive,
249 * and we never address that here).
250 */
251 size = sc_chan->chan_ntargets * (IOPSP_MAX_LUN) * sizeof(u_short);
252 if ((tidmap = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO)) == NULL)
253 return (ENOMEM);
254
255 for (i = 0; i < sc_chan->chan_ntargets; i++)
256 sc->sc_targetmap[i].it_flags &= ~IT_PRESENT;
257
258 /*
259 * A quick hack to handle Intel's stacked bus port arrangement.
260 */
261 bptid = sc->sc_ii.ii_tid;
262 nent = iop->sc_nlctent;
263 for (le = iop->sc_lct->entry; nent != 0; nent--, le++)
264 if ((le16toh(le->classid) & 4095) ==
265 I2O_CLASS_BUS_ADAPTER_PORT &&
266 (le32toh(le->usertid) & 4095) == bptid) {
267 bptid = le16toh(le->localtid) & 4095;
268 break;
269 }
270
271 nent = iop->sc_nlctent;
272 for (i = 0, le = iop->sc_lct->entry; i < nent; i++, le++) {
273 if ((le16toh(le->classid) & 4095) != I2O_CLASS_SCSI_PERIPHERAL)
274 continue;
275 if (((le32toh(le->usertid) >> 12) & 4095) != bptid)
276 continue;
277 tid = le16toh(le->localtid) & 4095;
278
279 rv = iop_field_get_all(iop, tid, I2O_PARAM_SCSI_DEVICE_INFO,
280 ¶m, sizeof(param), NULL);
281 if (rv != 0)
282 continue;
283 targ = le32toh(param.sdi.identifier);
284 lun = param.sdi.luninfo[1];
285 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
286 if (targ >= sc_chan->chan_ntargets ||
287 lun >= sc_chan->chan_nluns) {
288 aprint_error_dev(&sc->sc_dv, "target %d,%d (tid %d): "
289 "bad target/LUN\n", targ, lun, tid);
290 continue;
291 }
292 #endif
293
294 /*
295 * If we've already described this target, and nothing has
296 * changed, then don't describe it again.
297 */
298 it = &sc->sc_targetmap[targ];
299 it->it_flags |= IT_PRESENT;
300 syncrate = ((int)le64toh(param.sdi.negsyncrate) + 500) / 1000;
301 if (it->it_width != param.sdi.negdatawidth ||
302 it->it_offset != param.sdi.negoffset ||
303 it->it_syncrate != syncrate) {
304 it->it_width = param.sdi.negdatawidth;
305 it->it_offset = param.sdi.negoffset;
306 it->it_syncrate = syncrate;
307
308 aprint_verbose_dev(&sc->sc_dv, "target %d (tid %d): %d-bit, ",
309 targ, tid, it->it_width);
310 if (it->it_syncrate == 0)
311 aprint_verbose("asynchronous\n");
312 else
313 aprint_verbose("synchronous at %dMHz, "
314 "offset 0x%x\n", it->it_syncrate,
315 it->it_offset);
316 }
317
318 /* Ignore the device if it's in use by somebody else. */
319 if ((le32toh(le->usertid) & 4095) != I2O_TID_NONE) {
320 if (sc->sc_tidmap == NULL ||
321 IOPSP_TIDMAP(sc->sc_tidmap, targ, lun) !=
322 IOPSP_TID_INUSE) {
323 aprint_verbose_dev(&sc->sc_dv, "target %d,%d (tid %d): "
324 "in use by tid %d\n",
325 targ, lun, tid,
326 le32toh(le->usertid) & 4095);
327 }
328 IOPSP_TIDMAP(tidmap, targ, lun) = IOPSP_TID_INUSE;
329 } else
330 IOPSP_TIDMAP(tidmap, targ, lun) = (u_short)tid;
331 }
332
333 for (i = 0; i < sc_chan->chan_ntargets; i++)
334 if ((sc->sc_targetmap[i].it_flags & IT_PRESENT) == 0)
335 sc->sc_targetmap[i].it_width = 0;
336
337 /* Swap in the new map and return. */
338 mutex_spin_enter(&iop->sc_intrlock);
339 tofree = sc->sc_tidmap;
340 sc->sc_tidmap = tidmap;
341 mutex_spin_exit(&iop->sc_intrlock);
342
343 if (tofree != NULL)
344 free(tofree, M_DEVBUF);
345 sc->sc_chgind = iop->sc_chgind;
346 return (0);
347 }
348
349 /*
350 * Re-scan the bus; to be called from a higher level (e.g. scsipi).
351 */
352 static int
353 iopsp_rescan(struct iopsp_softc *sc)
354 {
355 struct iop_softc *iop;
356 struct iop_msg *im;
357 struct i2o_hba_bus_scan mf;
358 int rv;
359
360 iop = (struct iop_softc *)device_parent(&sc->sc_dv);
361
362 mutex_enter(&iop->sc_conflock);
363 im = iop_msg_alloc(iop, IM_WAIT);
364
365 mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan);
366 mf.msgfunc = I2O_MSGFUNC(sc->sc_ii.ii_tid, I2O_HBA_BUS_SCAN);
367 mf.msgictx = sc->sc_ii.ii_ictx;
368 mf.msgtctx = im->im_tctx;
369
370 rv = iop_msg_post(iop, im, &mf, 5*60*1000);
371 iop_msg_free(iop, im);
372 if (rv != 0)
373 aprint_error_dev(&sc->sc_dv, "bus rescan failed (error %d)\n",
374 rv);
375
376 if ((rv = iop_lct_get(iop)) == 0)
377 rv = iopsp_reconfig(&sc->sc_dv);
378
379 mutex_exit(&iop->sc_conflock);
380 return (rv);
381 }
382
383 /*
384 * Start a SCSI command.
385 */
386 static void
387 iopsp_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
388 void *arg)
389 {
390 struct scsipi_xfer *xs;
391 struct scsipi_periph *periph;
392 struct iopsp_softc *sc;
393 struct iop_msg *im;
394 struct iop_softc *iop;
395 struct i2o_scsi_scb_exec *mf;
396 int error, flags, tid;
397 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
398
399 sc = (void *)chan->chan_adapter->adapt_dev;
400 iop = (struct iop_softc *)device_parent(&sc->sc_dv);
401
402 switch (req) {
403 case ADAPTER_REQ_RUN_XFER:
404 xs = arg;
405 periph = xs->xs_periph;
406 flags = xs->xs_control;
407
408 SC_DEBUG(periph, SCSIPI_DB2, ("iopsp_scsi_request run_xfer\n"));
409
410 tid = IOPSP_TIDMAP(sc->sc_tidmap, periph->periph_target,
411 periph->periph_lun);
412 if (tid == IOPSP_TID_ABSENT || tid == IOPSP_TID_INUSE) {
413 xs->error = XS_SELTIMEOUT;
414 scsipi_done(xs);
415 return;
416 }
417
418 /* Need to reset the target? */
419 if ((flags & XS_CTL_RESET) != 0) {
420 if (iop_simple_cmd(iop, tid, I2O_SCSI_DEVICE_RESET,
421 sc->sc_ii.ii_ictx, 1, 30*1000) != 0) {
422 aprint_error_dev(&sc->sc_dv, "reset failed\n");
423 xs->error = XS_DRIVER_STUFFUP;
424 } else
425 xs->error = XS_NOERROR;
426
427 scsipi_done(xs);
428 return;
429 }
430
431 #if defined(I2ODEBUG) || defined(SCSIDEBUG)
432 if (xs->cmdlen > sizeof(mf->cdb))
433 panic("%s: CDB too large", device_xname(&sc->sc_dv));
434 #endif
435
436 im = iop_msg_alloc(iop, IM_POLL_INTR |
437 IM_NOSTATUS | ((flags & XS_CTL_POLL) != 0 ? IM_POLL : 0));
438 im->im_dvcontext = xs;
439
440 mf = (struct i2o_scsi_scb_exec *)mb;
441 mf->msgflags = I2O_MSGFLAGS(i2o_scsi_scb_exec);
442 mf->msgfunc = I2O_MSGFUNC(tid, I2O_SCSI_SCB_EXEC);
443 mf->msgictx = sc->sc_ii.ii_ictx;
444 mf->msgtctx = im->im_tctx;
445 mf->flags = xs->cmdlen | I2O_SCB_FLAG_ENABLE_DISCONNECT |
446 I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
447 mf->datalen = xs->datalen;
448 memcpy(mf->cdb, xs->cmd, xs->cmdlen);
449
450 switch (xs->xs_tag_type) {
451 case MSG_ORDERED_Q_TAG:
452 mf->flags |= I2O_SCB_FLAG_ORDERED_QUEUE_TAG;
453 break;
454 case MSG_SIMPLE_Q_TAG:
455 mf->flags |= I2O_SCB_FLAG_SIMPLE_QUEUE_TAG;
456 break;
457 case MSG_HEAD_OF_Q_TAG:
458 mf->flags |= I2O_SCB_FLAG_HEAD_QUEUE_TAG;
459 break;
460 default:
461 break;
462 }
463
464 if (xs->datalen != 0) {
465 error = iop_msg_map_bio(iop, im, mb, xs->data,
466 xs->datalen, (flags & XS_CTL_DATA_OUT) == 0);
467 if (error) {
468 xs->error = XS_DRIVER_STUFFUP;
469 iop_msg_free(iop, im);
470 scsipi_done(xs);
471 return;
472 }
473 if ((flags & XS_CTL_DATA_IN) == 0)
474 mf->flags |= I2O_SCB_FLAG_XFER_TO_DEVICE;
475 else
476 mf->flags |= I2O_SCB_FLAG_XFER_FROM_DEVICE;
477 }
478
479 if (iop_msg_post(iop, im, mb, xs->timeout)) {
480 if (xs->datalen != 0)
481 iop_msg_unmap(iop, im);
482 iop_msg_free(iop, im);
483 xs->error = XS_DRIVER_STUFFUP;
484 scsipi_done(xs);
485 }
486 break;
487
488 case ADAPTER_REQ_GROW_RESOURCES:
489 /*
490 * Not supported.
491 */
492 break;
493
494 case ADAPTER_REQ_SET_XFER_MODE:
495 /*
496 * The DDM takes care of this, and we can't modify its
497 * behaviour.
498 */
499 break;
500 }
501 }
502
503 #ifdef notyet
504 /*
505 * Abort the specified I2O_SCSI_SCB_EXEC message and its associated SCB.
506 */
507 static int
508 iopsp_scsi_abort(struct iopsp_softc *sc, int atid, struct iop_msg *aim)
509 {
510 struct iop_msg *im;
511 struct i2o_scsi_scb_abort mf;
512 struct iop_softc *iop;
513 int rv, s;
514
515 iop = (struct iop_softc *)device_parent(&sc->sc_dv);
516 im = iop_msg_alloc(iop, IM_POLL);
517
518 mf.msgflags = I2O_MSGFLAGS(i2o_scsi_scb_abort);
519 mf.msgfunc = I2O_MSGFUNC(atid, I2O_SCSI_SCB_ABORT);
520 mf.msgictx = sc->sc_ii.ii_ictx;
521 mf.msgtctx = im->im_tctx;
522 mf.tctxabort = aim->im_tctx;
523
524 rv = iop_msg_post(iop, im, &mf, 30000);
525 iop_msg_free(iop, im);
526
527 return (rv);
528 }
529 #endif
530
531 /*
532 * We have a message which has been processed and replied to by the IOP -
533 * deal with it.
534 */
535 static void
536 iopsp_intr(struct device *dv, struct iop_msg *im, void *reply)
537 {
538 struct scsipi_xfer *xs;
539 struct iopsp_softc *sc;
540 struct i2o_scsi_reply *rb;
541 struct iop_softc *iop;
542 u_int sl;
543
544 sc = (struct iopsp_softc *)dv;
545 xs = (struct scsipi_xfer *)im->im_dvcontext;
546 iop = (struct iop_softc *)device_parent(dv);
547 rb = reply;
548
549 SC_DEBUG(xs->xs_periph, SCSIPI_DB2, ("iopsp_intr\n"));
550
551 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
552 xs->error = XS_DRIVER_STUFFUP;
553 xs->resid = xs->datalen;
554 } else {
555 if (rb->hbastatus != I2O_SCSI_DSC_SUCCESS) {
556 switch (rb->hbastatus) {
557 case I2O_SCSI_DSC_ADAPTER_BUSY:
558 case I2O_SCSI_DSC_SCSI_BUS_RESET:
559 case I2O_SCSI_DSC_BUS_BUSY:
560 xs->error = XS_BUSY;
561 break;
562 case I2O_SCSI_DSC_SELECTION_TIMEOUT:
563 xs->error = XS_SELTIMEOUT;
564 break;
565 case I2O_SCSI_DSC_COMMAND_TIMEOUT:
566 case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
567 case I2O_SCSI_DSC_LUN_INVALID:
568 case I2O_SCSI_DSC_SCSI_TID_INVALID:
569 xs->error = XS_TIMEOUT;
570 break;
571 default:
572 xs->error = XS_DRIVER_STUFFUP;
573 break;
574 }
575 aprint_error_dev(&sc->sc_dv, "HBA status 0x%02x\n",
576 rb->hbastatus);
577 } else if (rb->scsistatus != SCSI_OK) {
578 switch (rb->scsistatus) {
579 case SCSI_CHECK:
580 xs->error = XS_SENSE;
581 sl = le32toh(rb->senselen);
582 if (sl > sizeof(xs->sense.scsi_sense))
583 sl = sizeof(xs->sense.scsi_sense);
584 memcpy(&xs->sense.scsi_sense, rb->sense, sl);
585 break;
586 case SCSI_QUEUE_FULL:
587 case SCSI_BUSY:
588 xs->error = XS_BUSY;
589 break;
590 default:
591 xs->error = XS_DRIVER_STUFFUP;
592 break;
593 }
594 } else
595 xs->error = XS_NOERROR;
596
597 xs->resid = xs->datalen - le32toh(rb->datalen);
598 xs->status = rb->scsistatus;
599 }
600
601 /* Free the message wrapper and pass the news to scsipi. */
602 if (xs->datalen != 0)
603 iop_msg_unmap(iop, im);
604 iop_msg_free(iop, im);
605
606 scsipi_done(xs);
607 }
608
609 /*
610 * ioctl hook; used here only to initiate low-level rescans.
611 */
612 static int
613 iopsp_ioctl(struct scsipi_channel *chan, u_long cmd, void *data,
614 int flag, struct proc *p)
615 {
616 int rv;
617
618 switch (cmd) {
619 case SCBUSIOLLSCAN:
620 /*
621 * If it's boot time, the bus will have been scanned and the
622 * maps built. Locking would stop re-configuration, but we
623 * want to fake success.
624 */
625 if (curlwp != &lwp0)
626 rv = iopsp_rescan(
627 (struct iopsp_softc *)chan->chan_adapter->adapt_dev);
628 else
629 rv = 0;
630 break;
631
632 default:
633 rv = ENOTTY;
634 break;
635 }
636
637 return (rv);
638 }
639
640 /*
641 * The number of openings available to us has changed, so inform scsipi.
642 */
643 static void
644 iopsp_adjqparam(struct device *dv, int mpi)
645 {
646 struct iopsp_softc *sc;
647 struct iop_softc *iop;
648
649 sc = device_private(dv);
650 iop = device_private(device_parent(dv));
651
652 mutex_spin_enter(&iop->sc_intrlock);
653 sc->sc_adapter.adapt_openings += mpi - sc->sc_openings;
654 sc->sc_openings = mpi;
655 mutex_spin_exit(&iop->sc_intrlock);
656 }
657