mly.c revision 1.36 1 /* $NetBSD: mly.c,v 1.36 2007/10/19 12:00:52 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran, Thor Lancelot Simon, and Eric Haszlakiewicz.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*-
40 * Copyright (c) 2000, 2001 Michael Smith
41 * Copyright (c) 2000 BSDi
42 * All rights reserved.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE.
64 *
65 * from FreeBSD: mly.c,v 1.8 2001/07/14 00:12:22 msmith Exp
66 */
67
68 /*
69 * Driver for the Mylex AcceleRAID and eXtremeRAID family with v6 firmware.
70 *
71 * TODO:
72 *
73 * o Make mly->mly_btl a hash, then MLY_BTL_RESCAN becomes a SIMPLEQ.
74 * o Handle FC and multiple LUNs.
75 * o Fix mmbox usage.
76 * o Fix transfer speed fudge.
77 */
78
79 #include <sys/cdefs.h>
80 __KERNEL_RCSID(0, "$NetBSD: mly.c,v 1.36 2007/10/19 12:00:52 ad Exp $");
81
82 #include <sys/param.h>
83 #include <sys/systm.h>
84 #include <sys/device.h>
85 #include <sys/kernel.h>
86 #include <sys/queue.h>
87 #include <sys/buf.h>
88 #include <sys/endian.h>
89 #include <sys/conf.h>
90 #include <sys/malloc.h>
91 #include <sys/ioctl.h>
92 #include <sys/scsiio.h>
93 #include <sys/kthread.h>
94 #include <sys/kauth.h>
95
96 #include <uvm/uvm_extern.h>
97
98 #include <sys/bus.h>
99
100 #include <dev/scsipi/scsi_all.h>
101 #include <dev/scsipi/scsipi_all.h>
102 #include <dev/scsipi/scsiconf.h>
103
104 #include <dev/pci/pcireg.h>
105 #include <dev/pci/pcivar.h>
106 #include <dev/pci/pcidevs.h>
107
108 #include <dev/pci/mlyreg.h>
109 #include <dev/pci/mlyio.h>
110 #include <dev/pci/mlyvar.h>
111 #include <dev/pci/mly_tables.h>
112
113 static void mly_attach(struct device *, struct device *, void *);
114 static int mly_match(struct device *, struct cfdata *, void *);
115 static const struct mly_ident *mly_find_ident(struct pci_attach_args *);
116 static int mly_fwhandshake(struct mly_softc *);
117 static int mly_flush(struct mly_softc *);
118 static int mly_intr(void *);
119 static void mly_shutdown(void *);
120
121 static int mly_alloc_ccbs(struct mly_softc *);
122 static void mly_check_event(struct mly_softc *);
123 static void mly_complete_event(struct mly_softc *, struct mly_ccb *);
124 static void mly_complete_rescan(struct mly_softc *, struct mly_ccb *);
125 static int mly_dmamem_alloc(struct mly_softc *, int, bus_dmamap_t *,
126 void **, bus_addr_t *, bus_dma_segment_t *);
127 static void mly_dmamem_free(struct mly_softc *, int, bus_dmamap_t,
128 void *, bus_dma_segment_t *);
129 static int mly_enable_mmbox(struct mly_softc *);
130 static void mly_fetch_event(struct mly_softc *);
131 static int mly_get_controllerinfo(struct mly_softc *);
132 static int mly_get_eventstatus(struct mly_softc *);
133 static int mly_ioctl(struct mly_softc *, struct mly_cmd_ioctl *,
134 void **, size_t, void *, size_t *);
135 static void mly_padstr(char *, const char *, int);
136 static void mly_process_event(struct mly_softc *, struct mly_event *);
137 static void mly_release_ccbs(struct mly_softc *);
138 static int mly_scan_btl(struct mly_softc *, int, int);
139 static void mly_scan_channel(struct mly_softc *, int);
140 static void mly_thread(void *);
141
142 static int mly_ccb_alloc(struct mly_softc *, struct mly_ccb **);
143 static void mly_ccb_complete(struct mly_softc *, struct mly_ccb *);
144 static void mly_ccb_enqueue(struct mly_softc *, struct mly_ccb *);
145 static void mly_ccb_free(struct mly_softc *, struct mly_ccb *);
146 static int mly_ccb_map(struct mly_softc *, struct mly_ccb *);
147 static int mly_ccb_poll(struct mly_softc *, struct mly_ccb *, int);
148 static int mly_ccb_submit(struct mly_softc *, struct mly_ccb *);
149 static void mly_ccb_unmap(struct mly_softc *, struct mly_ccb *);
150 static int mly_ccb_wait(struct mly_softc *, struct mly_ccb *, int);
151
152 static void mly_get_xfer_mode(struct mly_softc *, int,
153 struct scsipi_xfer_mode *);
154 static void mly_scsipi_complete(struct mly_softc *, struct mly_ccb *);
155 static int mly_scsipi_ioctl(struct scsipi_channel *, u_long, void *,
156 int, struct proc *);
157 static void mly_scsipi_minphys(struct buf *);
158 static void mly_scsipi_request(struct scsipi_channel *,
159 scsipi_adapter_req_t, void *);
160
161 static int mly_user_command(struct mly_softc *, struct mly_user_command *);
162 static int mly_user_health(struct mly_softc *, struct mly_user_health *);
163
164 extern struct cfdriver mly_cd;
165
166 CFATTACH_DECL(mly, sizeof(struct mly_softc),
167 mly_match, mly_attach, NULL, NULL);
168
169 dev_type_open(mlyopen);
170 dev_type_close(mlyclose);
171 dev_type_ioctl(mlyioctl);
172
173 const struct cdevsw mly_cdevsw = {
174 mlyopen, mlyclose, noread, nowrite, mlyioctl,
175 nostop, notty, nopoll, nommap, nokqfilter, D_OTHER,
176 };
177
178 static struct mly_ident {
179 u_short vendor;
180 u_short product;
181 u_short subvendor;
182 u_short subproduct;
183 int hwif;
184 const char *desc;
185 } const mly_ident[] = {
186 {
187 PCI_VENDOR_MYLEX,
188 PCI_PRODUCT_MYLEX_EXTREMERAID,
189 PCI_VENDOR_MYLEX,
190 0x0040,
191 MLY_HWIF_STRONGARM,
192 "eXtremeRAID 2000"
193 },
194 {
195 PCI_VENDOR_MYLEX,
196 PCI_PRODUCT_MYLEX_EXTREMERAID,
197 PCI_VENDOR_MYLEX,
198 0x0030,
199 MLY_HWIF_STRONGARM,
200 "eXtremeRAID 3000"
201 },
202 {
203 PCI_VENDOR_MYLEX,
204 PCI_PRODUCT_MYLEX_ACCELERAID,
205 PCI_VENDOR_MYLEX,
206 0x0050,
207 MLY_HWIF_I960RX,
208 "AcceleRAID 352"
209 },
210 {
211 PCI_VENDOR_MYLEX,
212 PCI_PRODUCT_MYLEX_ACCELERAID,
213 PCI_VENDOR_MYLEX,
214 0x0052,
215 MLY_HWIF_I960RX,
216 "AcceleRAID 170"
217 },
218 {
219 PCI_VENDOR_MYLEX,
220 PCI_PRODUCT_MYLEX_ACCELERAID,
221 PCI_VENDOR_MYLEX,
222 0x0054,
223 MLY_HWIF_I960RX,
224 "AcceleRAID 160"
225 },
226 };
227
228 static void *mly_sdh;
229
230 /*
231 * Try to find a `mly_ident' entry corresponding to this board.
232 */
233 static const struct mly_ident *
234 mly_find_ident(struct pci_attach_args *pa)
235 {
236 const struct mly_ident *mpi, *maxmpi;
237 pcireg_t reg;
238
239 mpi = mly_ident;
240 maxmpi = mpi + sizeof(mly_ident) / sizeof(mly_ident[0]);
241
242 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_I2O)
243 return (NULL);
244
245 for (; mpi < maxmpi; mpi++) {
246 if (PCI_VENDOR(pa->pa_id) != mpi->vendor ||
247 PCI_PRODUCT(pa->pa_id) != mpi->product)
248 continue;
249
250 if (mpi->subvendor == 0x0000)
251 return (mpi);
252
253 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
254
255 if (PCI_VENDOR(reg) == mpi->subvendor &&
256 PCI_PRODUCT(reg) == mpi->subproduct)
257 return (mpi);
258 }
259
260 return (NULL);
261 }
262
263 /*
264 * Match a supported board.
265 */
266 static int
267 mly_match(struct device *parent, struct cfdata *cfdata,
268 void *aux)
269 {
270
271 return (mly_find_ident(aux) != NULL);
272 }
273
274 /*
275 * Attach a supported board.
276 */
277 static void
278 mly_attach(struct device *parent, struct device *self, void *aux)
279 {
280 struct pci_attach_args *pa;
281 struct mly_softc *mly;
282 struct mly_ioctl_getcontrollerinfo *mi;
283 const struct mly_ident *ident;
284 pci_chipset_tag_t pc;
285 pci_intr_handle_t ih;
286 bus_space_handle_t memh, ioh;
287 bus_space_tag_t memt, iot;
288 pcireg_t reg;
289 const char *intrstr;
290 int ior, memr, i, rv, state;
291 struct scsipi_adapter *adapt;
292 struct scsipi_channel *chan;
293
294 mly = (struct mly_softc *)self;
295 pa = aux;
296 pc = pa->pa_pc;
297 ident = mly_find_ident(pa);
298 state = 0;
299
300 mly->mly_dmat = pa->pa_dmat;
301 mly->mly_hwif = ident->hwif;
302
303 printf(": Mylex %s\n", ident->desc);
304
305 /*
306 * Map the PCI register window.
307 */
308 memr = -1;
309 ior = -1;
310
311 for (i = 0x10; i <= 0x14; i += 4) {
312 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, i);
313
314 if (PCI_MAPREG_TYPE(reg) == PCI_MAPREG_TYPE_IO) {
315 if (ior == -1 && PCI_MAPREG_IO_SIZE(reg) != 0)
316 ior = i;
317 } else {
318 if (memr == -1 && PCI_MAPREG_MEM_SIZE(reg) != 0)
319 memr = i;
320 }
321 }
322
323 if (memr != -1)
324 if (pci_mapreg_map(pa, memr, PCI_MAPREG_TYPE_MEM, 0,
325 &memt, &memh, NULL, NULL))
326 memr = -1;
327 if (ior != -1)
328 if (pci_mapreg_map(pa, ior, PCI_MAPREG_TYPE_IO, 0,
329 &iot, &ioh, NULL, NULL))
330 ior = -1;
331
332 if (memr != -1) {
333 mly->mly_iot = memt;
334 mly->mly_ioh = memh;
335 } else if (ior != -1) {
336 mly->mly_iot = iot;
337 mly->mly_ioh = ioh;
338 } else {
339 printf("%s: can't map i/o or memory space\n", self->dv_xname);
340 return;
341 }
342
343 /*
344 * Enable the device.
345 */
346 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
347 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
348 reg | PCI_COMMAND_MASTER_ENABLE);
349
350 /*
351 * Map and establish the interrupt.
352 */
353 if (pci_intr_map(pa, &ih)) {
354 printf("%s: can't map interrupt\n", self->dv_xname);
355 return;
356 }
357 intrstr = pci_intr_string(pc, ih);
358 mly->mly_ih = pci_intr_establish(pc, ih, IPL_BIO, mly_intr, mly);
359 if (mly->mly_ih == NULL) {
360 printf("%s: can't establish interrupt", self->dv_xname);
361 if (intrstr != NULL)
362 printf(" at %s", intrstr);
363 printf("\n");
364 return;
365 }
366
367 if (intrstr != NULL)
368 printf("%s: interrupting at %s\n", mly->mly_dv.dv_xname,
369 intrstr);
370
371 /*
372 * Take care of interface-specific tasks.
373 */
374 switch (mly->mly_hwif) {
375 case MLY_HWIF_I960RX:
376 mly->mly_doorbell_true = 0x00;
377 mly->mly_cmd_mailbox = MLY_I960RX_COMMAND_MAILBOX;
378 mly->mly_status_mailbox = MLY_I960RX_STATUS_MAILBOX;
379 mly->mly_idbr = MLY_I960RX_IDBR;
380 mly->mly_odbr = MLY_I960RX_ODBR;
381 mly->mly_error_status = MLY_I960RX_ERROR_STATUS;
382 mly->mly_interrupt_status = MLY_I960RX_INTERRUPT_STATUS;
383 mly->mly_interrupt_mask = MLY_I960RX_INTERRUPT_MASK;
384 break;
385
386 case MLY_HWIF_STRONGARM:
387 mly->mly_doorbell_true = 0xff;
388 mly->mly_cmd_mailbox = MLY_STRONGARM_COMMAND_MAILBOX;
389 mly->mly_status_mailbox = MLY_STRONGARM_STATUS_MAILBOX;
390 mly->mly_idbr = MLY_STRONGARM_IDBR;
391 mly->mly_odbr = MLY_STRONGARM_ODBR;
392 mly->mly_error_status = MLY_STRONGARM_ERROR_STATUS;
393 mly->mly_interrupt_status = MLY_STRONGARM_INTERRUPT_STATUS;
394 mly->mly_interrupt_mask = MLY_STRONGARM_INTERRUPT_MASK;
395 break;
396 }
397
398 /*
399 * Allocate and map the scatter/gather lists.
400 */
401 rv = mly_dmamem_alloc(mly, MLY_SGL_SIZE * MLY_MAX_CCBS,
402 &mly->mly_sg_dmamap, (void **)&mly->mly_sg,
403 &mly->mly_sg_busaddr, &mly->mly_sg_seg);
404 if (rv) {
405 printf("%s: unable to allocate S/G maps\n",
406 mly->mly_dv.dv_xname);
407 goto bad;
408 }
409 state++;
410
411 /*
412 * Allocate and map the memory mailbox.
413 */
414 rv = mly_dmamem_alloc(mly, sizeof(struct mly_mmbox),
415 &mly->mly_mmbox_dmamap, (void **)&mly->mly_mmbox,
416 &mly->mly_mmbox_busaddr, &mly->mly_mmbox_seg);
417 if (rv) {
418 printf("%s: unable to allocate mailboxes\n",
419 mly->mly_dv.dv_xname);
420 goto bad;
421 }
422 state++;
423
424 /*
425 * Initialise per-controller queues.
426 */
427 SLIST_INIT(&mly->mly_ccb_free);
428 SIMPLEQ_INIT(&mly->mly_ccb_queue);
429
430 /*
431 * Disable interrupts before we start talking to the controller.
432 */
433 mly_outb(mly, mly->mly_interrupt_mask, MLY_INTERRUPT_MASK_DISABLE);
434
435 /*
436 * Wait for the controller to come ready, handshaking with the
437 * firmware if required. This is typically only necessary on
438 * platforms where the controller BIOS does not run.
439 */
440 if (mly_fwhandshake(mly)) {
441 printf("%s: unable to bring controller online\n",
442 mly->mly_dv.dv_xname);
443 goto bad;
444 }
445
446 /*
447 * Allocate initial command buffers, obtain controller feature
448 * information, and then reallocate command buffers, since we'll
449 * know how many we want.
450 */
451 if (mly_alloc_ccbs(mly)) {
452 printf("%s: unable to allocate CCBs\n",
453 mly->mly_dv.dv_xname);
454 goto bad;
455 }
456 state++;
457 if (mly_get_controllerinfo(mly)) {
458 printf("%s: unable to retrieve controller info\n",
459 mly->mly_dv.dv_xname);
460 goto bad;
461 }
462 mly_release_ccbs(mly);
463 if (mly_alloc_ccbs(mly)) {
464 printf("%s: unable to allocate CCBs\n",
465 mly->mly_dv.dv_xname);
466 state--;
467 goto bad;
468 }
469
470 /*
471 * Get the current event counter for health purposes, populate the
472 * initial health status buffer.
473 */
474 if (mly_get_eventstatus(mly)) {
475 printf("%s: unable to retrieve event status\n",
476 mly->mly_dv.dv_xname);
477 goto bad;
478 }
479
480 /*
481 * Enable memory-mailbox mode.
482 */
483 if (mly_enable_mmbox(mly)) {
484 printf("%s: unable to enable memory mailbox\n",
485 mly->mly_dv.dv_xname);
486 goto bad;
487 }
488
489 /*
490 * Print a little information about the controller.
491 */
492 mi = mly->mly_controllerinfo;
493
494 printf("%s: %d physical channel%s, firmware %d.%02d-%d-%02d "
495 "(%02d%02d%02d%02d), %dMB RAM\n", mly->mly_dv.dv_xname,
496 mi->physical_channels_present,
497 (mi->physical_channels_present) > 1 ? "s" : "",
498 mi->fw_major, mi->fw_minor, mi->fw_turn, mi->fw_build,
499 mi->fw_century, mi->fw_year, mi->fw_month, mi->fw_day,
500 le16toh(mi->memory_size));
501
502 /*
503 * Register our `shutdownhook'.
504 */
505 if (mly_sdh == NULL)
506 shutdownhook_establish(mly_shutdown, NULL);
507
508 /*
509 * Clear any previous BTL information. For each bus that scsipi
510 * wants to scan, we'll receive the SCBUSIOLLSCAN ioctl and retrieve
511 * all BTL info at that point.
512 */
513 memset(&mly->mly_btl, 0, sizeof(mly->mly_btl));
514
515 mly->mly_nchans = mly->mly_controllerinfo->physical_channels_present +
516 mly->mly_controllerinfo->virtual_channels_present;
517
518 /*
519 * Attach to scsipi.
520 */
521 adapt = &mly->mly_adapt;
522 memset(adapt, 0, sizeof(*adapt));
523 adapt->adapt_dev = &mly->mly_dv;
524 adapt->adapt_nchannels = mly->mly_nchans;
525 adapt->adapt_openings = mly->mly_ncmds - MLY_CCBS_RESV;
526 adapt->adapt_max_periph = mly->mly_ncmds - MLY_CCBS_RESV;
527 adapt->adapt_request = mly_scsipi_request;
528 adapt->adapt_minphys = mly_scsipi_minphys;
529 adapt->adapt_ioctl = mly_scsipi_ioctl;
530
531 for (i = 0; i < mly->mly_nchans; i++) {
532 chan = &mly->mly_chans[i];
533 memset(chan, 0, sizeof(*chan));
534 chan->chan_adapter = adapt;
535 chan->chan_bustype = &scsi_bustype;
536 chan->chan_channel = i;
537 chan->chan_ntargets = MLY_MAX_TARGETS;
538 chan->chan_nluns = MLY_MAX_LUNS;
539 chan->chan_id = mly->mly_controllerparam->initiator_id;
540 chan->chan_flags = SCSIPI_CHAN_NOSETTLE;
541 config_found(&mly->mly_dv, chan, scsiprint);
542 }
543
544 /*
545 * Now enable interrupts...
546 */
547 mly_outb(mly, mly->mly_interrupt_mask, MLY_INTERRUPT_MASK_ENABLE);
548
549 /*
550 * Finally, create our monitoring thread.
551 */
552 mly->mly_state |= MLY_STATE_INITOK;
553 rv = kthread_create(PRI_NONE, 0, NULL, mly_thread, mly,
554 &mly->mly_thread, "%s", mly->mly_dv.dv_xname);
555 if (rv != 0)
556 printf("%s: unable to create thread (%d)\n",
557 mly->mly_dv.dv_xname, rv);
558 return;
559
560 bad:
561 if (state > 2)
562 mly_release_ccbs(mly);
563 if (state > 1)
564 mly_dmamem_free(mly, sizeof(struct mly_mmbox),
565 mly->mly_mmbox_dmamap, (void *)mly->mly_mmbox,
566 &mly->mly_mmbox_seg);
567 if (state > 0)
568 mly_dmamem_free(mly, MLY_SGL_SIZE * MLY_MAX_CCBS,
569 mly->mly_sg_dmamap, (void *)mly->mly_sg,
570 &mly->mly_sg_seg);
571 }
572
573 /*
574 * Scan all possible devices on the specified channel.
575 */
576 static void
577 mly_scan_channel(struct mly_softc *mly, int bus)
578 {
579 int s, target;
580
581 for (target = 0; target < MLY_MAX_TARGETS; target++) {
582 s = splbio();
583 if (!mly_scan_btl(mly, bus, target)) {
584 tsleep(&mly->mly_btl[bus][target], PRIBIO, "mlyscan",
585 0);
586 }
587 splx(s);
588 }
589 }
590
591 /*
592 * Shut down all configured `mly' devices.
593 */
594 static void
595 mly_shutdown(void *cookie)
596 {
597 struct mly_softc *mly;
598 int i;
599
600 for (i = 0; i < mly_cd.cd_ndevs; i++) {
601 if ((mly = device_lookup(&mly_cd, i)) == NULL)
602 continue;
603
604 if (mly_flush(mly))
605 printf("%s: unable to flush cache\n",
606 mly->mly_dv.dv_xname);
607 }
608 }
609
610 /*
611 * Fill in the mly_controllerinfo and mly_controllerparam fields in the
612 * softc.
613 */
614 static int
615 mly_get_controllerinfo(struct mly_softc *mly)
616 {
617 struct mly_cmd_ioctl mci;
618 int rv;
619
620 /*
621 * Build the getcontrollerinfo ioctl and send it.
622 */
623 memset(&mci, 0, sizeof(mci));
624 mci.sub_ioctl = MDACIOCTL_GETCONTROLLERINFO;
625 rv = mly_ioctl(mly, &mci, (void **)&mly->mly_controllerinfo,
626 sizeof(*mly->mly_controllerinfo), NULL, NULL);
627 if (rv != 0)
628 return (rv);
629
630 /*
631 * Build the getcontrollerparameter ioctl and send it.
632 */
633 memset(&mci, 0, sizeof(mci));
634 mci.sub_ioctl = MDACIOCTL_GETCONTROLLERPARAMETER;
635 rv = mly_ioctl(mly, &mci, (void **)&mly->mly_controllerparam,
636 sizeof(*mly->mly_controllerparam), NULL, NULL);
637
638 return (rv);
639 }
640
641 /*
642 * Rescan a device, possibly as a consequence of getting an event which
643 * suggests that it may have changed. Must be called with interrupts
644 * blocked.
645 */
646 static int
647 mly_scan_btl(struct mly_softc *mly, int bus, int target)
648 {
649 struct mly_ccb *mc;
650 struct mly_cmd_ioctl *mci;
651 int rv;
652
653 if (target == mly->mly_controllerparam->initiator_id) {
654 mly->mly_btl[bus][target].mb_flags = MLY_BTL_PROTECTED;
655 return (EIO);
656 }
657
658 /* Don't re-scan if a scan is already in progress. */
659 if ((mly->mly_btl[bus][target].mb_flags & MLY_BTL_SCANNING) != 0)
660 return (EBUSY);
661
662 /* Get a command. */
663 if ((rv = mly_ccb_alloc(mly, &mc)) != 0)
664 return (rv);
665
666 /* Set up the data buffer. */
667 mc->mc_data = malloc(sizeof(union mly_devinfo),
668 M_DEVBUF, M_NOWAIT|M_ZERO);
669
670 mc->mc_flags |= MLY_CCB_DATAIN;
671 mc->mc_complete = mly_complete_rescan;
672
673 /*
674 * Build the ioctl.
675 */
676 mci = (struct mly_cmd_ioctl *)&mc->mc_packet->ioctl;
677 mci->opcode = MDACMD_IOCTL;
678 mci->timeout = 30 | MLY_TIMEOUT_SECONDS;
679 memset(&mci->param, 0, sizeof(mci->param));
680
681 if (MLY_BUS_IS_VIRTUAL(mly, bus)) {
682 mc->mc_length = sizeof(struct mly_ioctl_getlogdevinfovalid);
683 mci->data_size = htole32(mc->mc_length);
684 mci->sub_ioctl = MDACIOCTL_GETLOGDEVINFOVALID;
685 _lto3l(MLY_LOGADDR(0, MLY_LOGDEV_ID(mly, bus, target)),
686 mci->addr);
687 } else {
688 mc->mc_length = sizeof(struct mly_ioctl_getphysdevinfovalid);
689 mci->data_size = htole32(mc->mc_length);
690 mci->sub_ioctl = MDACIOCTL_GETPHYSDEVINFOVALID;
691 _lto3l(MLY_PHYADDR(0, bus, target, 0), mci->addr);
692 }
693
694 /*
695 * Dispatch the command.
696 */
697 if ((rv = mly_ccb_map(mly, mc)) != 0) {
698 free(mc->mc_data, M_DEVBUF);
699 mly_ccb_free(mly, mc);
700 return(rv);
701 }
702
703 mly->mly_btl[bus][target].mb_flags |= MLY_BTL_SCANNING;
704 mly_ccb_enqueue(mly, mc);
705 return (0);
706 }
707
708 /*
709 * Handle the completion of a rescan operation.
710 */
711 static void
712 mly_complete_rescan(struct mly_softc *mly, struct mly_ccb *mc)
713 {
714 struct mly_ioctl_getlogdevinfovalid *ldi;
715 struct mly_ioctl_getphysdevinfovalid *pdi;
716 struct mly_cmd_ioctl *mci;
717 struct mly_btl btl, *btlp;
718 struct scsipi_xfer_mode xm;
719 int bus, target, rescan;
720 u_int tmp;
721
722 mly_ccb_unmap(mly, mc);
723
724 /*
725 * Recover the bus and target from the command. We need these even
726 * in the case where we don't have a useful response.
727 */
728 mci = (struct mly_cmd_ioctl *)&mc->mc_packet->ioctl;
729 tmp = _3ltol(mci->addr);
730 rescan = 0;
731
732 if (mci->sub_ioctl == MDACIOCTL_GETLOGDEVINFOVALID) {
733 bus = MLY_LOGDEV_BUS(mly, MLY_LOGADDR_DEV(tmp));
734 target = MLY_LOGDEV_TARGET(mly, MLY_LOGADDR_DEV(tmp));
735 } else {
736 bus = MLY_PHYADDR_CHANNEL(tmp);
737 target = MLY_PHYADDR_TARGET(tmp);
738 }
739
740 btlp = &mly->mly_btl[bus][target];
741
742 /* The default result is 'no device'. */
743 memset(&btl, 0, sizeof(btl));
744 btl.mb_flags = MLY_BTL_PROTECTED;
745
746 /* If the rescan completed OK, we have possibly-new BTL data. */
747 if (mc->mc_status != 0)
748 goto out;
749
750 if (mc->mc_length == sizeof(*ldi)) {
751 ldi = (struct mly_ioctl_getlogdevinfovalid *)mc->mc_data;
752 tmp = le32toh(ldi->logical_device_number);
753
754 if (MLY_LOGDEV_BUS(mly, tmp) != bus ||
755 MLY_LOGDEV_TARGET(mly, tmp) != target) {
756 #ifdef MLYDEBUG
757 printf("%s: WARNING: BTL rescan (logical) for %d:%d "
758 "returned data for %d:%d instead\n",
759 mly->mly_dv.dv_xname, bus, target,
760 MLY_LOGDEV_BUS(mly, tmp),
761 MLY_LOGDEV_TARGET(mly, tmp));
762 #endif
763 goto out;
764 }
765
766 btl.mb_flags = MLY_BTL_LOGICAL | MLY_BTL_TQING;
767 btl.mb_type = ldi->raid_level;
768 btl.mb_state = ldi->state;
769 } else if (mc->mc_length == sizeof(*pdi)) {
770 pdi = (struct mly_ioctl_getphysdevinfovalid *)mc->mc_data;
771
772 if (pdi->channel != bus || pdi->target != target) {
773 #ifdef MLYDEBUG
774 printf("%s: WARNING: BTL rescan (physical) for %d:%d "
775 " returned data for %d:%d instead\n",
776 mly->mly_dv.dv_xname,
777 bus, target, pdi->channel, pdi->target);
778 #endif
779 goto out;
780 }
781
782 btl.mb_flags = MLY_BTL_PHYSICAL;
783 btl.mb_type = MLY_DEVICE_TYPE_PHYSICAL;
784 btl.mb_state = pdi->state;
785 btl.mb_speed = pdi->speed;
786 btl.mb_width = pdi->width;
787
788 if (pdi->state != MLY_DEVICE_STATE_UNCONFIGURED)
789 btl.mb_flags |= MLY_BTL_PROTECTED;
790 if (pdi->command_tags != 0)
791 btl.mb_flags |= MLY_BTL_TQING;
792 } else {
793 printf("%s: BTL rescan result invalid\n", mly->mly_dv.dv_xname);
794 goto out;
795 }
796
797 /* Decide whether we need to rescan the device. */
798 if (btl.mb_flags != btlp->mb_flags ||
799 btl.mb_speed != btlp->mb_speed ||
800 btl.mb_width != btlp->mb_width)
801 rescan = 1;
802
803 out:
804 *btlp = btl;
805
806 if (rescan && (btl.mb_flags & MLY_BTL_PROTECTED) == 0) {
807 xm.xm_target = target;
808 mly_get_xfer_mode(mly, bus, &xm);
809 /* XXX SCSI mid-layer rescan goes here. */
810 }
811
812 /* Wake anybody waiting on the device to be rescanned. */
813 wakeup(btlp);
814
815 free(mc->mc_data, M_DEVBUF);
816 mly_ccb_free(mly, mc);
817 }
818
819 /*
820 * Get the current health status and set the 'next event' counter to suit.
821 */
822 static int
823 mly_get_eventstatus(struct mly_softc *mly)
824 {
825 struct mly_cmd_ioctl mci;
826 struct mly_health_status *mh;
827 int rv;
828
829 /* Build the gethealthstatus ioctl and send it. */
830 memset(&mci, 0, sizeof(mci));
831 mh = NULL;
832 mci.sub_ioctl = MDACIOCTL_GETHEALTHSTATUS;
833
834 rv = mly_ioctl(mly, &mci, (void *)&mh, sizeof(*mh), NULL, NULL);
835 if (rv)
836 return (rv);
837
838 /* Get the event counter. */
839 mly->mly_event_change = le32toh(mh->change_counter);
840 mly->mly_event_waiting = le32toh(mh->next_event);
841 mly->mly_event_counter = le32toh(mh->next_event);
842
843 /* Save the health status into the memory mailbox */
844 memcpy(&mly->mly_mmbox->mmm_health.status, mh, sizeof(*mh));
845
846 bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap,
847 offsetof(struct mly_mmbox, mmm_health),
848 sizeof(mly->mly_mmbox->mmm_health),
849 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
850
851 free(mh, M_DEVBUF);
852 return (0);
853 }
854
855 /*
856 * Enable memory mailbox mode.
857 */
858 static int
859 mly_enable_mmbox(struct mly_softc *mly)
860 {
861 struct mly_cmd_ioctl mci;
862 u_int8_t *sp;
863 u_int64_t tmp;
864 int rv;
865
866 /* Build the ioctl and send it. */
867 memset(&mci, 0, sizeof(mci));
868 mci.sub_ioctl = MDACIOCTL_SETMEMORYMAILBOX;
869
870 /* Set buffer addresses. */
871 tmp = mly->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_command);
872 mci.param.setmemorymailbox.command_mailbox_physaddr = htole64(tmp);
873
874 tmp = mly->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_status);
875 mci.param.setmemorymailbox.status_mailbox_physaddr = htole64(tmp);
876
877 tmp = mly->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_health);
878 mci.param.setmemorymailbox.health_buffer_physaddr = htole64(tmp);
879
880 /* Set buffer sizes - abuse of data_size field is revolting. */
881 sp = (u_int8_t *)&mci.data_size;
882 sp[0] = (sizeof(union mly_cmd_packet) * MLY_MMBOX_COMMANDS) >> 10;
883 sp[1] = (sizeof(union mly_status_packet) * MLY_MMBOX_STATUS) >> 10;
884 mci.param.setmemorymailbox.health_buffer_size =
885 sizeof(union mly_health_region) >> 10;
886
887 rv = mly_ioctl(mly, &mci, NULL, 0, NULL, NULL);
888 if (rv)
889 return (rv);
890
891 mly->mly_state |= MLY_STATE_MMBOX_ACTIVE;
892 return (0);
893 }
894
895 /*
896 * Flush all pending I/O from the controller.
897 */
898 static int
899 mly_flush(struct mly_softc *mly)
900 {
901 struct mly_cmd_ioctl mci;
902
903 /* Build the ioctl */
904 memset(&mci, 0, sizeof(mci));
905 mci.sub_ioctl = MDACIOCTL_FLUSHDEVICEDATA;
906 mci.param.deviceoperation.operation_device =
907 MLY_OPDEVICE_PHYSICAL_CONTROLLER;
908
909 /* Pass it off to the controller */
910 return (mly_ioctl(mly, &mci, NULL, 0, NULL, NULL));
911 }
912
913 /*
914 * Perform an ioctl command.
915 *
916 * If (data) is not NULL, the command requires data transfer to the
917 * controller. If (*data) is NULL the command requires data transfer from
918 * the controller, and we will allocate a buffer for it.
919 */
920 static int
921 mly_ioctl(struct mly_softc *mly, struct mly_cmd_ioctl *ioctl, void **data,
922 size_t datasize, void *sense_buffer,
923 size_t *sense_length)
924 {
925 struct mly_ccb *mc;
926 struct mly_cmd_ioctl *mci;
927 u_int8_t status;
928 int rv;
929
930 mc = NULL;
931 if ((rv = mly_ccb_alloc(mly, &mc)) != 0)
932 goto bad;
933
934 /*
935 * Copy the ioctl structure, but save some important fields and then
936 * fixup.
937 */
938 mci = &mc->mc_packet->ioctl;
939 ioctl->sense_buffer_address = htole64(mci->sense_buffer_address);
940 ioctl->maximum_sense_size = mci->maximum_sense_size;
941 *mci = *ioctl;
942 mci->opcode = MDACMD_IOCTL;
943 mci->timeout = 30 | MLY_TIMEOUT_SECONDS;
944
945 /* Handle the data buffer. */
946 if (data != NULL) {
947 if (*data == NULL) {
948 /* Allocate data buffer */
949 mc->mc_data = malloc(datasize, M_DEVBUF, M_NOWAIT);
950 mc->mc_flags |= MLY_CCB_DATAIN;
951 } else {
952 mc->mc_data = *data;
953 mc->mc_flags |= MLY_CCB_DATAOUT;
954 }
955 mc->mc_length = datasize;
956 mc->mc_packet->generic.data_size = htole32(datasize);
957 }
958
959 /* Run the command. */
960 if (datasize > 0)
961 if ((rv = mly_ccb_map(mly, mc)) != 0)
962 goto bad;
963 rv = mly_ccb_poll(mly, mc, 30000);
964 if (datasize > 0)
965 mly_ccb_unmap(mly, mc);
966 if (rv != 0)
967 goto bad;
968
969 /* Clean up and return any data. */
970 status = mc->mc_status;
971
972 if (status != 0)
973 printf("mly_ioctl: command status %d\n", status);
974
975 if (mc->mc_sense > 0 && sense_buffer != NULL) {
976 memcpy(sense_buffer, mc->mc_packet, mc->mc_sense);
977 *sense_length = mc->mc_sense;
978 goto bad;
979 }
980
981 /* Should we return a data pointer? */
982 if (data != NULL && *data == NULL)
983 *data = mc->mc_data;
984
985 /* Command completed OK. */
986 rv = (status != 0 ? EIO : 0);
987
988 bad:
989 if (mc != NULL) {
990 /* Do we need to free a data buffer we allocated? */
991 if (rv != 0 && mc->mc_data != NULL &&
992 (data == NULL || *data == NULL))
993 free(mc->mc_data, M_DEVBUF);
994 mly_ccb_free(mly, mc);
995 }
996
997 return (rv);
998 }
999
1000 /*
1001 * Check for event(s) outstanding in the controller.
1002 */
1003 static void
1004 mly_check_event(struct mly_softc *mly)
1005 {
1006
1007 bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap,
1008 offsetof(struct mly_mmbox, mmm_health),
1009 sizeof(mly->mly_mmbox->mmm_health),
1010 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1011
1012 /*
1013 * The controller may have updated the health status information, so
1014 * check for it here. Note that the counters are all in host
1015 * memory, so this check is very cheap. Also note that we depend on
1016 * checking on completion
1017 */
1018 if (le32toh(mly->mly_mmbox->mmm_health.status.change_counter) !=
1019 mly->mly_event_change) {
1020 mly->mly_event_change =
1021 le32toh(mly->mly_mmbox->mmm_health.status.change_counter);
1022 mly->mly_event_waiting =
1023 le32toh(mly->mly_mmbox->mmm_health.status.next_event);
1024
1025 /* Wake up anyone that might be interested in this. */
1026 wakeup(&mly->mly_event_change);
1027 }
1028
1029 bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap,
1030 offsetof(struct mly_mmbox, mmm_health),
1031 sizeof(mly->mly_mmbox->mmm_health),
1032 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1033
1034 if (mly->mly_event_counter != mly->mly_event_waiting)
1035 mly_fetch_event(mly);
1036 }
1037
1038 /*
1039 * Fetch one event from the controller. If we fail due to resource
1040 * starvation, we'll be retried the next time a command completes.
1041 */
1042 static void
1043 mly_fetch_event(struct mly_softc *mly)
1044 {
1045 struct mly_ccb *mc;
1046 struct mly_cmd_ioctl *mci;
1047 int s;
1048 u_int32_t event;
1049
1050 /* Get a command. */
1051 if (mly_ccb_alloc(mly, &mc))
1052 return;
1053
1054 /* Set up the data buffer. */
1055 mc->mc_data = malloc(sizeof(struct mly_event), M_DEVBUF,
1056 M_NOWAIT|M_ZERO);
1057
1058 mc->mc_length = sizeof(struct mly_event);
1059 mc->mc_flags |= MLY_CCB_DATAIN;
1060 mc->mc_complete = mly_complete_event;
1061
1062 /*
1063 * Get an event number to fetch. It's possible that we've raced
1064 * with another context for the last event, in which case there will
1065 * be no more events.
1066 */
1067 s = splbio();
1068 if (mly->mly_event_counter == mly->mly_event_waiting) {
1069 splx(s);
1070 free(mc->mc_data, M_DEVBUF);
1071 mly_ccb_free(mly, mc);
1072 return;
1073 }
1074 event = mly->mly_event_counter++;
1075 splx(s);
1076
1077 /*
1078 * Build the ioctl.
1079 *
1080 * At this point we are committed to sending this request, as it
1081 * will be the only one constructed for this particular event
1082 * number.
1083 */
1084 mci = (struct mly_cmd_ioctl *)&mc->mc_packet->ioctl;
1085 mci->opcode = MDACMD_IOCTL;
1086 mci->data_size = htole32(sizeof(struct mly_event));
1087 _lto3l(MLY_PHYADDR(0, 0, (event >> 16) & 0xff, (event >> 24) & 0xff),
1088 mci->addr);
1089 mci->timeout = 30 | MLY_TIMEOUT_SECONDS;
1090 mci->sub_ioctl = MDACIOCTL_GETEVENT;
1091 mci->param.getevent.sequence_number_low = htole16(event & 0xffff);
1092
1093 /*
1094 * Submit the command.
1095 */
1096 if (mly_ccb_map(mly, mc) != 0)
1097 goto bad;
1098 mly_ccb_enqueue(mly, mc);
1099 return;
1100
1101 bad:
1102 printf("%s: couldn't fetch event %u\n", mly->mly_dv.dv_xname, event);
1103 free(mc->mc_data, M_DEVBUF);
1104 mly_ccb_free(mly, mc);
1105 }
1106
1107 /*
1108 * Handle the completion of an event poll.
1109 */
1110 static void
1111 mly_complete_event(struct mly_softc *mly, struct mly_ccb *mc)
1112 {
1113 struct mly_event *me;
1114
1115 me = (struct mly_event *)mc->mc_data;
1116 mly_ccb_unmap(mly, mc);
1117 mly_ccb_free(mly, mc);
1118
1119 /* If the event was successfully fetched, process it. */
1120 if (mc->mc_status == SCSI_OK)
1121 mly_process_event(mly, me);
1122 else
1123 printf("%s: unable to fetch event; status = 0x%x\n",
1124 mly->mly_dv.dv_xname, mc->mc_status);
1125
1126 free(me, M_DEVBUF);
1127
1128 /* Check for another event. */
1129 mly_check_event(mly);
1130 }
1131
1132 /*
1133 * Process a controller event. Called with interrupts blocked (i.e., at
1134 * interrupt time).
1135 */
1136 static void
1137 mly_process_event(struct mly_softc *mly, struct mly_event *me)
1138 {
1139 struct scsi_sense_data *ssd;
1140 int bus, target, event, class, action;
1141 const char *fp, *tp;
1142
1143 ssd = (struct scsi_sense_data *)&me->sense[0];
1144
1145 /*
1146 * Errors can be reported using vendor-unique sense data. In this
1147 * case, the event code will be 0x1c (Request sense data present),
1148 * the sense key will be 0x09 (vendor specific), the MSB of the ASC
1149 * will be set, and the actual event code will be a 16-bit value
1150 * comprised of the ASCQ (low byte) and low seven bits of the ASC
1151 * (low seven bits of the high byte).
1152 */
1153 if (le32toh(me->code) == 0x1c &&
1154 SSD_SENSE_KEY(ssd->flags) == SKEY_VENDOR_SPECIFIC &&
1155 (ssd->asc & 0x80) != 0) {
1156 event = ((int)(ssd->asc & ~0x80) << 8) +
1157 ssd->ascq;
1158 } else
1159 event = le32toh(me->code);
1160
1161 /* Look up event, get codes. */
1162 fp = mly_describe_code(mly_table_event, event);
1163
1164 /* Quiet event? */
1165 class = fp[0];
1166 #ifdef notyet
1167 if (isupper(class) && bootverbose)
1168 class = tolower(class);
1169 #endif
1170
1171 /* Get action code, text string. */
1172 action = fp[1];
1173 tp = fp + 3;
1174
1175 /*
1176 * Print some information about the event.
1177 *
1178 * This code uses a table derived from the corresponding portion of
1179 * the Linux driver, and thus the parser is very similar.
1180 */
1181 switch (class) {
1182 case 'p':
1183 /*
1184 * Error on physical drive.
1185 */
1186 printf("%s: physical device %d:%d %s\n", mly->mly_dv.dv_xname,
1187 me->channel, me->target, tp);
1188 if (action == 'r')
1189 mly->mly_btl[me->channel][me->target].mb_flags |=
1190 MLY_BTL_RESCAN;
1191 break;
1192
1193 case 'l':
1194 case 'm':
1195 /*
1196 * Error on logical unit, or message about logical unit.
1197 */
1198 bus = MLY_LOGDEV_BUS(mly, me->lun);
1199 target = MLY_LOGDEV_TARGET(mly, me->lun);
1200 printf("%s: logical device %d:%d %s\n", mly->mly_dv.dv_xname,
1201 bus, target, tp);
1202 if (action == 'r')
1203 mly->mly_btl[bus][target].mb_flags |= MLY_BTL_RESCAN;
1204 break;
1205
1206 case 's':
1207 /*
1208 * Report of sense data.
1209 */
1210 if ((SSD_SENSE_KEY(ssd->flags) == SKEY_NO_SENSE ||
1211 SSD_SENSE_KEY(ssd->flags) == SKEY_NOT_READY) &&
1212 ssd->asc == 0x04 &&
1213 (ssd->ascq == 0x01 ||
1214 ssd->ascq == 0x02)) {
1215 /* Ignore NO_SENSE or NOT_READY in one case */
1216 break;
1217 }
1218
1219 /*
1220 * XXX Should translate this if SCSIVERBOSE.
1221 */
1222 printf("%s: physical device %d:%d %s\n", mly->mly_dv.dv_xname,
1223 me->channel, me->target, tp);
1224 printf("%s: sense key %d asc %02x ascq %02x\n",
1225 mly->mly_dv.dv_xname, SSD_SENSE_KEY(ssd->flags),
1226 ssd->asc, ssd->ascq);
1227 printf("%s: info %x%x%x%x csi %x%x%x%x\n",
1228 mly->mly_dv.dv_xname, ssd->info[0], ssd->info[1],
1229 ssd->info[2], ssd->info[3], ssd->csi[0],
1230 ssd->csi[1], ssd->csi[2],
1231 ssd->csi[3]);
1232 if (action == 'r')
1233 mly->mly_btl[me->channel][me->target].mb_flags |=
1234 MLY_BTL_RESCAN;
1235 break;
1236
1237 case 'e':
1238 printf("%s: ", mly->mly_dv.dv_xname);
1239 printf(tp, me->target, me->lun);
1240 break;
1241
1242 case 'c':
1243 printf("%s: controller %s\n", mly->mly_dv.dv_xname, tp);
1244 break;
1245
1246 case '?':
1247 printf("%s: %s - %d\n", mly->mly_dv.dv_xname, tp, event);
1248 break;
1249
1250 default:
1251 /* Probably a 'noisy' event being ignored. */
1252 break;
1253 }
1254 }
1255
1256 /*
1257 * Perform periodic activities.
1258 */
1259 static void
1260 mly_thread(void *cookie)
1261 {
1262 struct mly_softc *mly;
1263 struct mly_btl *btl;
1264 int s, bus, target, done;
1265
1266 mly = (struct mly_softc *)cookie;
1267
1268 for (;;) {
1269 /* Check for new events. */
1270 mly_check_event(mly);
1271
1272 /* Re-scan up to 1 device. */
1273 s = splbio();
1274 done = 0;
1275 for (bus = 0; bus < mly->mly_nchans && !done; bus++) {
1276 for (target = 0; target < MLY_MAX_TARGETS; target++) {
1277 /* Perform device rescan? */
1278 btl = &mly->mly_btl[bus][target];
1279 if ((btl->mb_flags & MLY_BTL_RESCAN) != 0) {
1280 btl->mb_flags ^= MLY_BTL_RESCAN;
1281 mly_scan_btl(mly, bus, target);
1282 done = 1;
1283 break;
1284 }
1285 }
1286 }
1287 splx(s);
1288
1289 /* Sleep for N seconds. */
1290 tsleep(mly_thread, PWAIT, "mlyzzz",
1291 hz * MLY_PERIODIC_INTERVAL);
1292 }
1293 }
1294
1295 /*
1296 * Submit a command to the controller and poll on completion. Return
1297 * non-zero on timeout.
1298 */
1299 static int
1300 mly_ccb_poll(struct mly_softc *mly, struct mly_ccb *mc, int timo)
1301 {
1302 int rv;
1303
1304 if ((rv = mly_ccb_submit(mly, mc)) != 0)
1305 return (rv);
1306
1307 for (timo *= 10; timo != 0; timo--) {
1308 if ((mc->mc_flags & MLY_CCB_COMPLETE) != 0)
1309 break;
1310 mly_intr(mly);
1311 DELAY(100);
1312 }
1313
1314 return (timo == 0);
1315 }
1316
1317 /*
1318 * Submit a command to the controller and sleep on completion. Return
1319 * non-zero on timeout.
1320 */
1321 static int
1322 mly_ccb_wait(struct mly_softc *mly, struct mly_ccb *mc, int timo)
1323 {
1324 int rv, s;
1325
1326 mly_ccb_enqueue(mly, mc);
1327
1328 s = splbio();
1329 if ((mc->mc_flags & MLY_CCB_COMPLETE) != 0) {
1330 splx(s);
1331 return (0);
1332 }
1333 rv = tsleep(mc, PRIBIO, "mlywccb", timo * hz / 1000);
1334 splx(s);
1335
1336 return (rv);
1337 }
1338
1339 /*
1340 * If a CCB is specified, enqueue it. Pull CCBs off the software queue in
1341 * the order that they were enqueued and try to submit their command blocks
1342 * to the controller for execution.
1343 */
1344 void
1345 mly_ccb_enqueue(struct mly_softc *mly, struct mly_ccb *mc)
1346 {
1347 int s;
1348
1349 s = splbio();
1350
1351 if (mc != NULL)
1352 SIMPLEQ_INSERT_TAIL(&mly->mly_ccb_queue, mc, mc_link.simpleq);
1353
1354 while ((mc = SIMPLEQ_FIRST(&mly->mly_ccb_queue)) != NULL) {
1355 if (mly_ccb_submit(mly, mc))
1356 break;
1357 SIMPLEQ_REMOVE_HEAD(&mly->mly_ccb_queue, mc_link.simpleq);
1358 }
1359
1360 splx(s);
1361 }
1362
1363 /*
1364 * Deliver a command to the controller.
1365 */
1366 static int
1367 mly_ccb_submit(struct mly_softc *mly, struct mly_ccb *mc)
1368 {
1369 union mly_cmd_packet *pkt;
1370 int s, off;
1371
1372 mc->mc_packet->generic.command_id = htole16(mc->mc_slot);
1373
1374 bus_dmamap_sync(mly->mly_dmat, mly->mly_pkt_dmamap,
1375 mc->mc_packetphys - mly->mly_pkt_busaddr,
1376 sizeof(union mly_cmd_packet),
1377 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1378
1379 s = splbio();
1380
1381 /*
1382 * Do we have to use the hardware mailbox?
1383 */
1384 if ((mly->mly_state & MLY_STATE_MMBOX_ACTIVE) == 0) {
1385 /*
1386 * Check to see if the controller is ready for us.
1387 */
1388 if (mly_idbr_true(mly, MLY_HM_CMDSENT)) {
1389 splx(s);
1390 return (EBUSY);
1391 }
1392
1393 /*
1394 * It's ready, send the command.
1395 */
1396 mly_outl(mly, mly->mly_cmd_mailbox,
1397 (u_int64_t)mc->mc_packetphys & 0xffffffff);
1398 mly_outl(mly, mly->mly_cmd_mailbox + 4,
1399 (u_int64_t)mc->mc_packetphys >> 32);
1400 mly_outb(mly, mly->mly_idbr, MLY_HM_CMDSENT);
1401 } else {
1402 pkt = &mly->mly_mmbox->mmm_command[mly->mly_mmbox_cmd_idx];
1403 off = (char *)pkt - (char *)mly->mly_mmbox;
1404
1405 bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap,
1406 off, sizeof(mly->mly_mmbox->mmm_command[0]),
1407 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1408
1409 /* Check to see if the next index is free yet. */
1410 if (pkt->mmbox.flag != 0) {
1411 splx(s);
1412 return (EBUSY);
1413 }
1414
1415 /* Copy in new command */
1416 memcpy(pkt->mmbox.data, mc->mc_packet->mmbox.data,
1417 sizeof(pkt->mmbox.data));
1418
1419 /* Copy flag last. */
1420 pkt->mmbox.flag = mc->mc_packet->mmbox.flag;
1421
1422 bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap,
1423 off, sizeof(mly->mly_mmbox->mmm_command[0]),
1424 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1425
1426 /* Signal controller and update index. */
1427 mly_outb(mly, mly->mly_idbr, MLY_AM_CMDSENT);
1428 mly->mly_mmbox_cmd_idx =
1429 (mly->mly_mmbox_cmd_idx + 1) % MLY_MMBOX_COMMANDS;
1430 }
1431
1432 splx(s);
1433 return (0);
1434 }
1435
1436 /*
1437 * Pick up completed commands from the controller and handle accordingly.
1438 */
1439 int
1440 mly_intr(void *cookie)
1441 {
1442 struct mly_ccb *mc;
1443 union mly_status_packet *sp;
1444 u_int16_t slot;
1445 int forus, off;
1446 struct mly_softc *mly;
1447
1448 mly = cookie;
1449 forus = 0;
1450
1451 /*
1452 * Pick up hardware-mailbox commands.
1453 */
1454 if (mly_odbr_true(mly, MLY_HM_STSREADY)) {
1455 slot = mly_inw(mly, mly->mly_status_mailbox);
1456
1457 if (slot < MLY_SLOT_MAX) {
1458 mc = mly->mly_ccbs + (slot - MLY_SLOT_START);
1459 mc->mc_status =
1460 mly_inb(mly, mly->mly_status_mailbox + 2);
1461 mc->mc_sense =
1462 mly_inb(mly, mly->mly_status_mailbox + 3);
1463 mc->mc_resid =
1464 mly_inl(mly, mly->mly_status_mailbox + 4);
1465
1466 mly_ccb_complete(mly, mc);
1467 } else {
1468 /* Slot 0xffff may mean "extremely bogus command". */
1469 printf("%s: got HM completion for illegal slot %u\n",
1470 mly->mly_dv.dv_xname, slot);
1471 }
1472
1473 /* Unconditionally acknowledge status. */
1474 mly_outb(mly, mly->mly_odbr, MLY_HM_STSREADY);
1475 mly_outb(mly, mly->mly_idbr, MLY_HM_STSACK);
1476 forus = 1;
1477 }
1478
1479 /*
1480 * Pick up memory-mailbox commands.
1481 */
1482 if (mly_odbr_true(mly, MLY_AM_STSREADY)) {
1483 for (;;) {
1484 sp = &mly->mly_mmbox->mmm_status[mly->mly_mmbox_sts_idx];
1485 off = (char *)sp - (char *)mly->mly_mmbox;
1486
1487 bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap,
1488 off, sizeof(mly->mly_mmbox->mmm_command[0]),
1489 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1490
1491 /* Check for more status. */
1492 if (sp->mmbox.flag == 0)
1493 break;
1494
1495 /* Get slot number. */
1496 slot = le16toh(sp->status.command_id);
1497 if (slot < MLY_SLOT_MAX) {
1498 mc = mly->mly_ccbs + (slot - MLY_SLOT_START);
1499 mc->mc_status = sp->status.status;
1500 mc->mc_sense = sp->status.sense_length;
1501 mc->mc_resid = le32toh(sp->status.residue);
1502 mly_ccb_complete(mly, mc);
1503 } else {
1504 /*
1505 * Slot 0xffff may mean "extremely bogus
1506 * command".
1507 */
1508 printf("%s: got AM completion for illegal "
1509 "slot %u at %d\n", mly->mly_dv.dv_xname,
1510 slot, mly->mly_mmbox_sts_idx);
1511 }
1512
1513 /* Clear and move to next index. */
1514 sp->mmbox.flag = 0;
1515 mly->mly_mmbox_sts_idx =
1516 (mly->mly_mmbox_sts_idx + 1) % MLY_MMBOX_STATUS;
1517 }
1518
1519 /* Acknowledge that we have collected status value(s). */
1520 mly_outb(mly, mly->mly_odbr, MLY_AM_STSREADY);
1521 forus = 1;
1522 }
1523
1524 /*
1525 * Run the queue.
1526 */
1527 if (forus && ! SIMPLEQ_EMPTY(&mly->mly_ccb_queue))
1528 mly_ccb_enqueue(mly, NULL);
1529
1530 return (forus);
1531 }
1532
1533 /*
1534 * Process completed commands
1535 */
1536 static void
1537 mly_ccb_complete(struct mly_softc *mly, struct mly_ccb *mc)
1538 {
1539 void (*complete)(struct mly_softc *, struct mly_ccb *);
1540
1541 bus_dmamap_sync(mly->mly_dmat, mly->mly_pkt_dmamap,
1542 mc->mc_packetphys - mly->mly_pkt_busaddr,
1543 sizeof(union mly_cmd_packet),
1544 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1545
1546 complete = mc->mc_complete;
1547 mc->mc_flags |= MLY_CCB_COMPLETE;
1548
1549 /*
1550 * Call completion handler or wake up sleeping consumer.
1551 */
1552 if (complete != NULL)
1553 (*complete)(mly, mc);
1554 else
1555 wakeup(mc);
1556 }
1557
1558 /*
1559 * Allocate a command.
1560 */
1561 int
1562 mly_ccb_alloc(struct mly_softc *mly, struct mly_ccb **mcp)
1563 {
1564 struct mly_ccb *mc;
1565 int s;
1566
1567 s = splbio();
1568 mc = SLIST_FIRST(&mly->mly_ccb_free);
1569 if (mc != NULL)
1570 SLIST_REMOVE_HEAD(&mly->mly_ccb_free, mc_link.slist);
1571 splx(s);
1572
1573 *mcp = mc;
1574 return (mc == NULL ? EAGAIN : 0);
1575 }
1576
1577 /*
1578 * Release a command back to the freelist.
1579 */
1580 void
1581 mly_ccb_free(struct mly_softc *mly, struct mly_ccb *mc)
1582 {
1583 int s;
1584
1585 /*
1586 * Fill in parts of the command that may cause confusion if a
1587 * consumer doesn't when we are later allocated.
1588 */
1589 mc->mc_data = NULL;
1590 mc->mc_flags = 0;
1591 mc->mc_complete = NULL;
1592 mc->mc_private = NULL;
1593 mc->mc_packet->generic.command_control = 0;
1594
1595 /*
1596 * By default, we set up to overwrite the command packet with sense
1597 * information.
1598 */
1599 mc->mc_packet->generic.sense_buffer_address =
1600 htole64(mc->mc_packetphys);
1601 mc->mc_packet->generic.maximum_sense_size =
1602 sizeof(union mly_cmd_packet);
1603
1604 s = splbio();
1605 SLIST_INSERT_HEAD(&mly->mly_ccb_free, mc, mc_link.slist);
1606 splx(s);
1607 }
1608
1609 /*
1610 * Allocate and initialize command and packet structures.
1611 *
1612 * If the controller supports fewer than MLY_MAX_CCBS commands, limit our
1613 * allocation to that number. If we don't yet know how many commands the
1614 * controller supports, allocate a very small set (suitable for initialization
1615 * purposes only).
1616 */
1617 static int
1618 mly_alloc_ccbs(struct mly_softc *mly)
1619 {
1620 struct mly_ccb *mc;
1621 int i, rv;
1622
1623 if (mly->mly_controllerinfo == NULL)
1624 mly->mly_ncmds = MLY_CCBS_RESV;
1625 else {
1626 i = le16toh(mly->mly_controllerinfo->maximum_parallel_commands);
1627 mly->mly_ncmds = min(MLY_MAX_CCBS, i);
1628 }
1629
1630 /*
1631 * Allocate enough space for all the command packets in one chunk
1632 * and map them permanently into controller-visible space.
1633 */
1634 rv = mly_dmamem_alloc(mly,
1635 mly->mly_ncmds * sizeof(union mly_cmd_packet),
1636 &mly->mly_pkt_dmamap, (void **)&mly->mly_pkt,
1637 &mly->mly_pkt_busaddr, &mly->mly_pkt_seg);
1638 if (rv)
1639 return (rv);
1640
1641 mly->mly_ccbs = malloc(sizeof(struct mly_ccb) * mly->mly_ncmds,
1642 M_DEVBUF, M_NOWAIT|M_ZERO);
1643
1644 for (i = 0; i < mly->mly_ncmds; i++) {
1645 mc = mly->mly_ccbs + i;
1646 mc->mc_slot = MLY_SLOT_START + i;
1647 mc->mc_packet = mly->mly_pkt + i;
1648 mc->mc_packetphys = mly->mly_pkt_busaddr +
1649 (i * sizeof(union mly_cmd_packet));
1650
1651 rv = bus_dmamap_create(mly->mly_dmat, MLY_MAX_XFER,
1652 MLY_MAX_SEGS, MLY_MAX_XFER, 0,
1653 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1654 &mc->mc_datamap);
1655 if (rv) {
1656 mly_release_ccbs(mly);
1657 return (rv);
1658 }
1659
1660 mly_ccb_free(mly, mc);
1661 }
1662
1663 return (0);
1664 }
1665
1666 /*
1667 * Free all the storage held by commands.
1668 *
1669 * Must be called with all commands on the free list.
1670 */
1671 static void
1672 mly_release_ccbs(struct mly_softc *mly)
1673 {
1674 struct mly_ccb *mc;
1675
1676 /* Throw away command buffer DMA maps. */
1677 while (mly_ccb_alloc(mly, &mc) == 0)
1678 bus_dmamap_destroy(mly->mly_dmat, mc->mc_datamap);
1679
1680 /* Release CCB storage. */
1681 free(mly->mly_ccbs, M_DEVBUF);
1682
1683 /* Release the packet storage. */
1684 mly_dmamem_free(mly, mly->mly_ncmds * sizeof(union mly_cmd_packet),
1685 mly->mly_pkt_dmamap, (void *)mly->mly_pkt, &mly->mly_pkt_seg);
1686 }
1687
1688 /*
1689 * Map a command into controller-visible space.
1690 */
1691 static int
1692 mly_ccb_map(struct mly_softc *mly, struct mly_ccb *mc)
1693 {
1694 struct mly_cmd_generic *gen;
1695 struct mly_sg_entry *sg;
1696 bus_dma_segment_t *ds;
1697 int flg, nseg, rv;
1698
1699 #ifdef DIAGNOSTIC
1700 /* Don't map more than once. */
1701 if ((mc->mc_flags & MLY_CCB_MAPPED) != 0)
1702 panic("mly_ccb_map: already mapped");
1703 mc->mc_flags |= MLY_CCB_MAPPED;
1704
1705 /* Does the command have a data buffer? */
1706 if (mc->mc_data == NULL)
1707 panic("mly_ccb_map: no data buffer");
1708 #endif
1709
1710 rv = bus_dmamap_load(mly->mly_dmat, mc->mc_datamap, mc->mc_data,
1711 mc->mc_length, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1712 ((mc->mc_flags & MLY_CCB_DATAIN) != 0 ?
1713 BUS_DMA_READ : BUS_DMA_WRITE));
1714 if (rv != 0)
1715 return (rv);
1716
1717 gen = &mc->mc_packet->generic;
1718
1719 /*
1720 * Can we use the transfer structure directly?
1721 */
1722 if ((nseg = mc->mc_datamap->dm_nsegs) <= 2) {
1723 mc->mc_sgoff = -1;
1724 sg = &gen->transfer.direct.sg[0];
1725 } else {
1726 mc->mc_sgoff = (mc->mc_slot - MLY_SLOT_START) *
1727 MLY_MAX_SEGS;
1728 sg = mly->mly_sg + mc->mc_sgoff;
1729 gen->command_control |= MLY_CMDCTL_EXTENDED_SG_TABLE;
1730 gen->transfer.indirect.entries[0] = htole16(nseg);
1731 gen->transfer.indirect.table_physaddr[0] =
1732 htole64(mly->mly_sg_busaddr +
1733 (mc->mc_sgoff * sizeof(struct mly_sg_entry)));
1734 }
1735
1736 /*
1737 * Fill the S/G table.
1738 */
1739 for (ds = mc->mc_datamap->dm_segs; nseg != 0; nseg--, sg++, ds++) {
1740 sg->physaddr = htole64(ds->ds_addr);
1741 sg->length = htole64(ds->ds_len);
1742 }
1743
1744 /*
1745 * Sync up the data map.
1746 */
1747 if ((mc->mc_flags & MLY_CCB_DATAIN) != 0)
1748 flg = BUS_DMASYNC_PREREAD;
1749 else /* if ((mc->mc_flags & MLY_CCB_DATAOUT) != 0) */ {
1750 gen->command_control |= MLY_CMDCTL_DATA_DIRECTION;
1751 flg = BUS_DMASYNC_PREWRITE;
1752 }
1753
1754 bus_dmamap_sync(mly->mly_dmat, mc->mc_datamap, 0, mc->mc_length, flg);
1755
1756 /*
1757 * Sync up the chained S/G table, if we're using one.
1758 */
1759 if (mc->mc_sgoff == -1)
1760 return (0);
1761
1762 bus_dmamap_sync(mly->mly_dmat, mly->mly_sg_dmamap, mc->mc_sgoff,
1763 MLY_SGL_SIZE, BUS_DMASYNC_PREWRITE);
1764
1765 return (0);
1766 }
1767
1768 /*
1769 * Unmap a command from controller-visible space.
1770 */
1771 static void
1772 mly_ccb_unmap(struct mly_softc *mly, struct mly_ccb *mc)
1773 {
1774 int flg;
1775
1776 #ifdef DIAGNOSTIC
1777 if ((mc->mc_flags & MLY_CCB_MAPPED) == 0)
1778 panic("mly_ccb_unmap: not mapped");
1779 mc->mc_flags &= ~MLY_CCB_MAPPED;
1780 #endif
1781
1782 if ((mc->mc_flags & MLY_CCB_DATAIN) != 0)
1783 flg = BUS_DMASYNC_POSTREAD;
1784 else /* if ((mc->mc_flags & MLY_CCB_DATAOUT) != 0) */
1785 flg = BUS_DMASYNC_POSTWRITE;
1786
1787 bus_dmamap_sync(mly->mly_dmat, mc->mc_datamap, 0, mc->mc_length, flg);
1788 bus_dmamap_unload(mly->mly_dmat, mc->mc_datamap);
1789
1790 if (mc->mc_sgoff == -1)
1791 return;
1792
1793 bus_dmamap_sync(mly->mly_dmat, mly->mly_sg_dmamap, mc->mc_sgoff,
1794 MLY_SGL_SIZE, BUS_DMASYNC_POSTWRITE);
1795 }
1796
1797 /*
1798 * Adjust the size of each I/O before it passes to the SCSI layer.
1799 */
1800 static void
1801 mly_scsipi_minphys(struct buf *bp)
1802 {
1803
1804 if (bp->b_bcount > MLY_MAX_XFER)
1805 bp->b_bcount = MLY_MAX_XFER;
1806 minphys(bp);
1807 }
1808
1809 /*
1810 * Start a SCSI command.
1811 */
1812 static void
1813 mly_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
1814 void *arg)
1815 {
1816 struct mly_ccb *mc;
1817 struct mly_cmd_scsi_small *ss;
1818 struct scsipi_xfer *xs;
1819 struct scsipi_periph *periph;
1820 struct mly_softc *mly;
1821 struct mly_btl *btl;
1822 int s, tmp;
1823
1824 mly = (void *)chan->chan_adapter->adapt_dev;
1825
1826 switch (req) {
1827 case ADAPTER_REQ_RUN_XFER:
1828 xs = arg;
1829 periph = xs->xs_periph;
1830 btl = &mly->mly_btl[chan->chan_channel][periph->periph_target];
1831 s = splbio();
1832 tmp = btl->mb_flags;
1833 splx(s);
1834
1835 /*
1836 * Check for I/O attempt to a protected or non-existant
1837 * device.
1838 */
1839 if ((tmp & MLY_BTL_PROTECTED) != 0) {
1840 xs->error = XS_SELTIMEOUT;
1841 scsipi_done(xs);
1842 break;
1843 }
1844
1845 #ifdef DIAGNOSTIC
1846 /* XXX Increase if/when we support large SCSI commands. */
1847 if (xs->cmdlen > MLY_CMD_SCSI_SMALL_CDB) {
1848 printf("%s: cmd too large\n", mly->mly_dv.dv_xname);
1849 xs->error = XS_DRIVER_STUFFUP;
1850 scsipi_done(xs);
1851 break;
1852 }
1853 #endif
1854
1855 if (mly_ccb_alloc(mly, &mc)) {
1856 xs->error = XS_RESOURCE_SHORTAGE;
1857 scsipi_done(xs);
1858 break;
1859 }
1860
1861 /* Build the command. */
1862 mc->mc_data = xs->data;
1863 mc->mc_length = xs->datalen;
1864 mc->mc_complete = mly_scsipi_complete;
1865 mc->mc_private = xs;
1866
1867 /* Build the packet for the controller. */
1868 ss = &mc->mc_packet->scsi_small;
1869 ss->opcode = MDACMD_SCSI;
1870 #ifdef notdef
1871 /*
1872 * XXX FreeBSD does this, but it doesn't fix anything,
1873 * XXX and appears potentially harmful.
1874 */
1875 ss->command_control |= MLY_CMDCTL_DISABLE_DISCONNECT;
1876 #endif
1877
1878 ss->data_size = htole32(xs->datalen);
1879 _lto3l(MLY_PHYADDR(0, chan->chan_channel,
1880 periph->periph_target, periph->periph_lun), ss->addr);
1881
1882 if (xs->timeout < 60 * 1000)
1883 ss->timeout = xs->timeout / 1000 |
1884 MLY_TIMEOUT_SECONDS;
1885 else if (xs->timeout < 60 * 60 * 1000)
1886 ss->timeout = xs->timeout / (60 * 1000) |
1887 MLY_TIMEOUT_MINUTES;
1888 else
1889 ss->timeout = xs->timeout / (60 * 60 * 1000) |
1890 MLY_TIMEOUT_HOURS;
1891
1892 ss->maximum_sense_size = sizeof(xs->sense);
1893 ss->cdb_length = xs->cmdlen;
1894 memcpy(ss->cdb, xs->cmd, xs->cmdlen);
1895
1896 if (mc->mc_length != 0) {
1897 if ((xs->xs_control & XS_CTL_DATA_OUT) != 0)
1898 mc->mc_flags |= MLY_CCB_DATAOUT;
1899 else /* if ((xs->xs_control & XS_CTL_DATA_IN) != 0) */
1900 mc->mc_flags |= MLY_CCB_DATAIN;
1901
1902 if (mly_ccb_map(mly, mc) != 0) {
1903 xs->error = XS_DRIVER_STUFFUP;
1904 mly_ccb_free(mly, mc);
1905 scsipi_done(xs);
1906 break;
1907 }
1908 }
1909
1910 /*
1911 * Give the command to the controller.
1912 */
1913 if ((xs->xs_control & XS_CTL_POLL) != 0) {
1914 if (mly_ccb_poll(mly, mc, xs->timeout + 5000)) {
1915 xs->error = XS_REQUEUE;
1916 if (mc->mc_length != 0)
1917 mly_ccb_unmap(mly, mc);
1918 mly_ccb_free(mly, mc);
1919 scsipi_done(xs);
1920 }
1921 } else
1922 mly_ccb_enqueue(mly, mc);
1923
1924 break;
1925
1926 case ADAPTER_REQ_GROW_RESOURCES:
1927 /*
1928 * Not supported.
1929 */
1930 break;
1931
1932 case ADAPTER_REQ_SET_XFER_MODE:
1933 /*
1934 * We can't change the transfer mode, but at least let
1935 * scsipi know what the adapter has negotiated.
1936 */
1937 mly_get_xfer_mode(mly, chan->chan_channel, arg);
1938 break;
1939 }
1940 }
1941
1942 /*
1943 * Handle completion of a SCSI command.
1944 */
1945 static void
1946 mly_scsipi_complete(struct mly_softc *mly, struct mly_ccb *mc)
1947 {
1948 struct scsipi_xfer *xs;
1949 struct scsipi_channel *chan;
1950 struct scsipi_inquiry_data *inq;
1951 struct mly_btl *btl;
1952 int target, sl, s;
1953 const char *p;
1954
1955 xs = mc->mc_private;
1956 xs->status = mc->mc_status;
1957
1958 /*
1959 * XXX The `resid' value as returned by the controller appears to be
1960 * bogus, so we always set it to zero. Is it perhaps the transfer
1961 * count?
1962 */
1963 xs->resid = 0; /* mc->mc_resid; */
1964
1965 if (mc->mc_length != 0)
1966 mly_ccb_unmap(mly, mc);
1967
1968 switch (mc->mc_status) {
1969 case SCSI_OK:
1970 /*
1971 * In order to report logical device type and status, we
1972 * overwrite the result of the INQUIRY command to logical
1973 * devices.
1974 */
1975 if (xs->cmd->opcode == INQUIRY) {
1976 chan = xs->xs_periph->periph_channel;
1977 target = xs->xs_periph->periph_target;
1978 btl = &mly->mly_btl[chan->chan_channel][target];
1979
1980 s = splbio();
1981 if ((btl->mb_flags & MLY_BTL_LOGICAL) != 0) {
1982 inq = (struct scsipi_inquiry_data *)xs->data;
1983 mly_padstr(inq->vendor, "MYLEX", 8);
1984 p = mly_describe_code(mly_table_device_type,
1985 btl->mb_type);
1986 mly_padstr(inq->product, p, 16);
1987 p = mly_describe_code(mly_table_device_state,
1988 btl->mb_state);
1989 mly_padstr(inq->revision, p, 4);
1990 }
1991 splx(s);
1992 }
1993
1994 xs->error = XS_NOERROR;
1995 break;
1996
1997 case SCSI_CHECK:
1998 sl = mc->mc_sense;
1999 if (sl > sizeof(xs->sense.scsi_sense))
2000 sl = sizeof(xs->sense.scsi_sense);
2001 memcpy(&xs->sense.scsi_sense, mc->mc_packet, sl);
2002 xs->error = XS_SENSE;
2003 break;
2004
2005 case SCSI_BUSY:
2006 case SCSI_QUEUE_FULL:
2007 xs->error = XS_BUSY;
2008 break;
2009
2010 default:
2011 printf("%s: unknown SCSI status 0x%x\n",
2012 mly->mly_dv.dv_xname, xs->status);
2013 xs->error = XS_DRIVER_STUFFUP;
2014 break;
2015 }
2016
2017 mly_ccb_free(mly, mc);
2018 scsipi_done(xs);
2019 }
2020
2021 /*
2022 * Notify scsipi about a target's transfer mode.
2023 */
2024 static void
2025 mly_get_xfer_mode(struct mly_softc *mly, int bus, struct scsipi_xfer_mode *xm)
2026 {
2027 struct mly_btl *btl;
2028 int s;
2029
2030 btl = &mly->mly_btl[bus][xm->xm_target];
2031 xm->xm_mode = 0;
2032
2033 s = splbio();
2034
2035 if ((btl->mb_flags & MLY_BTL_PHYSICAL) != 0) {
2036 if (btl->mb_speed == 0) {
2037 xm->xm_period = 0;
2038 xm->xm_offset = 0;
2039 } else {
2040 xm->xm_period = 12; /* XXX */
2041 xm->xm_offset = 8; /* XXX */
2042 xm->xm_mode |= PERIPH_CAP_SYNC; /* XXX */
2043 }
2044
2045 switch (btl->mb_width) {
2046 case 32:
2047 xm->xm_mode = PERIPH_CAP_WIDE32;
2048 break;
2049 case 16:
2050 xm->xm_mode = PERIPH_CAP_WIDE16;
2051 break;
2052 default:
2053 xm->xm_mode = 0;
2054 break;
2055 }
2056 } else /* ((btl->mb_flags & MLY_BTL_LOGICAL) != 0) */ {
2057 xm->xm_mode = PERIPH_CAP_WIDE16 | PERIPH_CAP_SYNC;
2058 xm->xm_period = 12;
2059 xm->xm_offset = 8;
2060 }
2061
2062 if ((btl->mb_flags & MLY_BTL_TQING) != 0)
2063 xm->xm_mode |= PERIPH_CAP_TQING;
2064
2065 splx(s);
2066
2067 scsipi_async_event(&mly->mly_chans[bus], ASYNC_EVENT_XFER_MODE, xm);
2068 }
2069
2070 /*
2071 * ioctl hook; used here only to initiate low-level rescans.
2072 */
2073 static int
2074 mly_scsipi_ioctl(struct scsipi_channel *chan, u_long cmd, void *data,
2075 int flag, struct proc *p)
2076 {
2077 struct mly_softc *mly;
2078 int rv;
2079
2080 mly = (struct mly_softc *)chan->chan_adapter->adapt_dev;
2081
2082 switch (cmd) {
2083 case SCBUSIOLLSCAN:
2084 mly_scan_channel(mly, chan->chan_channel);
2085 rv = 0;
2086 break;
2087 default:
2088 rv = ENOTTY;
2089 break;
2090 }
2091
2092 return (rv);
2093 }
2094
2095 /*
2096 * Handshake with the firmware while the card is being initialized.
2097 */
2098 static int
2099 mly_fwhandshake(struct mly_softc *mly)
2100 {
2101 u_int8_t error, param0, param1;
2102 int spinup;
2103
2104 spinup = 0;
2105
2106 /* Set HM_STSACK and let the firmware initialize. */
2107 mly_outb(mly, mly->mly_idbr, MLY_HM_STSACK);
2108 DELAY(1000); /* too short? */
2109
2110 /* If HM_STSACK is still true, the controller is initializing. */
2111 if (!mly_idbr_true(mly, MLY_HM_STSACK))
2112 return (0);
2113
2114 printf("%s: controller initialization started\n",
2115 mly->mly_dv.dv_xname);
2116
2117 /*
2118 * Spin waiting for initialization to finish, or for a message to be
2119 * delivered.
2120 */
2121 while (mly_idbr_true(mly, MLY_HM_STSACK)) {
2122 /* Check for a message */
2123 if (!mly_error_valid(mly))
2124 continue;
2125
2126 error = mly_inb(mly, mly->mly_error_status) & ~MLY_MSG_EMPTY;
2127 param0 = mly_inb(mly, mly->mly_cmd_mailbox);
2128 param1 = mly_inb(mly, mly->mly_cmd_mailbox + 1);
2129
2130 switch (error) {
2131 case MLY_MSG_SPINUP:
2132 if (!spinup) {
2133 printf("%s: drive spinup in progress\n",
2134 mly->mly_dv.dv_xname);
2135 spinup = 1;
2136 }
2137 break;
2138
2139 case MLY_MSG_RACE_RECOVERY_FAIL:
2140 printf("%s: mirror race recovery failed - \n",
2141 mly->mly_dv.dv_xname);
2142 printf("%s: one or more drives offline\n",
2143 mly->mly_dv.dv_xname);
2144 break;
2145
2146 case MLY_MSG_RACE_IN_PROGRESS:
2147 printf("%s: mirror race recovery in progress\n",
2148 mly->mly_dv.dv_xname);
2149 break;
2150
2151 case MLY_MSG_RACE_ON_CRITICAL:
2152 printf("%s: mirror race recovery on critical drive\n",
2153 mly->mly_dv.dv_xname);
2154 break;
2155
2156 case MLY_MSG_PARITY_ERROR:
2157 printf("%s: FATAL MEMORY PARITY ERROR\n",
2158 mly->mly_dv.dv_xname);
2159 return (ENXIO);
2160
2161 default:
2162 printf("%s: unknown initialization code 0x%x\n",
2163 mly->mly_dv.dv_xname, error);
2164 break;
2165 }
2166 }
2167
2168 return (0);
2169 }
2170
2171 /*
2172 * Space-fill a character string
2173 */
2174 static void
2175 mly_padstr(char *dst, const char *src, int len)
2176 {
2177
2178 while (len-- > 0) {
2179 if (*src != '\0')
2180 *dst++ = *src++;
2181 else
2182 *dst++ = ' ';
2183 }
2184 }
2185
2186 /*
2187 * Allocate DMA safe memory.
2188 */
2189 static int
2190 mly_dmamem_alloc(struct mly_softc *mly, int size, bus_dmamap_t *dmamap,
2191 void **kva, bus_addr_t *paddr, bus_dma_segment_t *seg)
2192 {
2193 int rseg, rv, state;
2194
2195 state = 0;
2196
2197 if ((rv = bus_dmamem_alloc(mly->mly_dmat, size, PAGE_SIZE, 0,
2198 seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
2199 printf("%s: dmamem_alloc = %d\n", mly->mly_dv.dv_xname, rv);
2200 goto bad;
2201 }
2202
2203 state++;
2204
2205 if ((rv = bus_dmamem_map(mly->mly_dmat, seg, 1, size, kva,
2206 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
2207 printf("%s: dmamem_map = %d\n", mly->mly_dv.dv_xname, rv);
2208 goto bad;
2209 }
2210
2211 state++;
2212
2213 if ((rv = bus_dmamap_create(mly->mly_dmat, size, size, 1, 0,
2214 BUS_DMA_NOWAIT, dmamap)) != 0) {
2215 printf("%s: dmamap_create = %d\n", mly->mly_dv.dv_xname, rv);
2216 goto bad;
2217 }
2218
2219 state++;
2220
2221 if ((rv = bus_dmamap_load(mly->mly_dmat, *dmamap, *kva, size,
2222 NULL, BUS_DMA_NOWAIT)) != 0) {
2223 printf("%s: dmamap_load = %d\n", mly->mly_dv.dv_xname, rv);
2224 goto bad;
2225 }
2226
2227 *paddr = (*dmamap)->dm_segs[0].ds_addr;
2228 memset(*kva, 0, size);
2229 return (0);
2230
2231 bad:
2232 if (state > 2)
2233 bus_dmamap_destroy(mly->mly_dmat, *dmamap);
2234 if (state > 1)
2235 bus_dmamem_unmap(mly->mly_dmat, *kva, size);
2236 if (state > 0)
2237 bus_dmamem_free(mly->mly_dmat, seg, 1);
2238
2239 return (rv);
2240 }
2241
2242 /*
2243 * Free DMA safe memory.
2244 */
2245 static void
2246 mly_dmamem_free(struct mly_softc *mly, int size, bus_dmamap_t dmamap,
2247 void *kva, bus_dma_segment_t *seg)
2248 {
2249
2250 bus_dmamap_unload(mly->mly_dmat, dmamap);
2251 bus_dmamap_destroy(mly->mly_dmat, dmamap);
2252 bus_dmamem_unmap(mly->mly_dmat, kva, size);
2253 bus_dmamem_free(mly->mly_dmat, seg, 1);
2254 }
2255
2256
2257 /*
2258 * Accept an open operation on the control device.
2259 */
2260 int
2261 mlyopen(dev_t dev, int flag, int mode, struct lwp *l)
2262 {
2263 struct mly_softc *mly;
2264
2265 if ((mly = device_lookup(&mly_cd, minor(dev))) == NULL)
2266 return (ENXIO);
2267 if ((mly->mly_state & MLY_STATE_INITOK) == 0)
2268 return (ENXIO);
2269 if ((mly->mly_state & MLY_STATE_OPEN) != 0)
2270 return (EBUSY);
2271
2272 mly->mly_state |= MLY_STATE_OPEN;
2273 return (0);
2274 }
2275
2276 /*
2277 * Accept the last close on the control device.
2278 */
2279 int
2280 mlyclose(dev_t dev, int flag, int mode,
2281 struct lwp *l)
2282 {
2283 struct mly_softc *mly;
2284
2285 mly = device_lookup(&mly_cd, minor(dev));
2286 mly->mly_state &= ~MLY_STATE_OPEN;
2287 return (0);
2288 }
2289
2290 /*
2291 * Handle control operations.
2292 */
2293 int
2294 mlyioctl(dev_t dev, u_long cmd, void *data, int flag,
2295 struct lwp *l)
2296 {
2297 struct mly_softc *mly;
2298 int rv;
2299
2300 mly = device_lookup(&mly_cd, minor(dev));
2301
2302 switch (cmd) {
2303 case MLYIO_COMMAND:
2304 rv = kauth_authorize_device_passthru(l->l_cred, dev,
2305 KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_ALL, data);
2306 if (rv)
2307 break;
2308
2309 rv = mly_user_command(mly, (void *)data);
2310 break;
2311 case MLYIO_HEALTH:
2312 rv = mly_user_health(mly, (void *)data);
2313 break;
2314 default:
2315 rv = ENOTTY;
2316 break;
2317 }
2318
2319 return (rv);
2320 }
2321
2322 /*
2323 * Execute a command passed in from userspace.
2324 *
2325 * The control structure contains the actual command for the controller, as
2326 * well as the user-space data pointer and data size, and an optional sense
2327 * buffer size/pointer. On completion, the data size is adjusted to the
2328 * command residual, and the sense buffer size to the size of the returned
2329 * sense data.
2330 */
2331 static int
2332 mly_user_command(struct mly_softc *mly, struct mly_user_command *uc)
2333 {
2334 struct mly_ccb *mc;
2335 int rv, mapped;
2336
2337 if ((rv = mly_ccb_alloc(mly, &mc)) != 0)
2338 return (rv);
2339
2340 mapped = 0;
2341 mc->mc_data = NULL;
2342
2343 /*
2344 * Handle data size/direction.
2345 */
2346 if ((mc->mc_length = abs(uc->DataTransferLength)) != 0) {
2347 if (mc->mc_length > MAXPHYS) {
2348 rv = EINVAL;
2349 goto out;
2350 }
2351
2352 mc->mc_data = malloc(mc->mc_length, M_DEVBUF, M_WAITOK);
2353 if (mc->mc_data == NULL) {
2354 rv = ENOMEM;
2355 goto out;
2356 }
2357
2358 if (uc->DataTransferLength > 0) {
2359 mc->mc_flags |= MLY_CCB_DATAIN;
2360 memset(mc->mc_data, 0, mc->mc_length);
2361 }
2362
2363 if (uc->DataTransferLength < 0) {
2364 mc->mc_flags |= MLY_CCB_DATAOUT;
2365 rv = copyin(uc->DataTransferBuffer, mc->mc_data,
2366 mc->mc_length);
2367 if (rv != 0)
2368 goto out;
2369 }
2370
2371 if ((rv = mly_ccb_map(mly, mc)) != 0)
2372 goto out;
2373 mapped = 1;
2374 }
2375
2376 /* Copy in the command and execute it. */
2377 memcpy(mc->mc_packet, &uc->CommandMailbox, sizeof(uc->CommandMailbox));
2378
2379 if ((rv = mly_ccb_wait(mly, mc, 60000)) != 0)
2380 goto out;
2381
2382 /* Return the data to userspace. */
2383 if (uc->DataTransferLength > 0) {
2384 rv = copyout(mc->mc_data, uc->DataTransferBuffer,
2385 mc->mc_length);
2386 if (rv != 0)
2387 goto out;
2388 }
2389
2390 /* Return the sense buffer to userspace. */
2391 if (uc->RequestSenseLength > 0 && mc->mc_sense > 0) {
2392 rv = copyout(mc->mc_packet, uc->RequestSenseBuffer,
2393 min(uc->RequestSenseLength, mc->mc_sense));
2394 if (rv != 0)
2395 goto out;
2396 }
2397
2398 /* Return command results to userspace (caller will copy out). */
2399 uc->DataTransferLength = mc->mc_resid;
2400 uc->RequestSenseLength = min(uc->RequestSenseLength, mc->mc_sense);
2401 uc->CommandStatus = mc->mc_status;
2402 rv = 0;
2403
2404 out:
2405 if (mapped)
2406 mly_ccb_unmap(mly, mc);
2407 if (mc->mc_data != NULL)
2408 free(mc->mc_data, M_DEVBUF);
2409 mly_ccb_free(mly, mc);
2410
2411 return (rv);
2412 }
2413
2414 /*
2415 * Return health status to userspace. If the health change index in the
2416 * user structure does not match that currently exported by the controller,
2417 * we return the current status immediately. Otherwise, we block until
2418 * either interrupted or new status is delivered.
2419 */
2420 static int
2421 mly_user_health(struct mly_softc *mly, struct mly_user_health *uh)
2422 {
2423 struct mly_health_status mh;
2424 int rv, s;
2425
2426 /* Fetch the current health status from userspace. */
2427 rv = copyin(uh->HealthStatusBuffer, &mh, sizeof(mh));
2428 if (rv != 0)
2429 return (rv);
2430
2431 /* spin waiting for a status update */
2432 s = splbio();
2433 if (mly->mly_event_change == mh.change_counter)
2434 rv = tsleep(&mly->mly_event_change, PRIBIO | PCATCH,
2435 "mlyhealth", 0);
2436 splx(s);
2437
2438 if (rv == 0) {
2439 /*
2440 * Copy the controller's health status buffer out (there is
2441 * a race here if it changes again).
2442 */
2443 rv = copyout(&mly->mly_mmbox->mmm_health.status,
2444 uh->HealthStatusBuffer, sizeof(uh->HealthStatusBuffer));
2445 }
2446
2447 return (rv);
2448 }
2449