mly.c revision 1.37.2.1 1 /* $NetBSD: mly.c,v 1.37.2.1 2008/05/18 12:34:20 yamt Exp $ */
2
3 /*-
4 * Copyright (c) 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran, Thor Lancelot Simon, and Eric Haszlakiewicz.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*-
33 * Copyright (c) 2000, 2001 Michael Smith
34 * Copyright (c) 2000 BSDi
35 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 *
46 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
47 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
50 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
51 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
52 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
53 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
54 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
55 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
56 * SUCH DAMAGE.
57 *
58 * from FreeBSD: mly.c,v 1.8 2001/07/14 00:12:22 msmith Exp
59 */
60
61 /*
62 * Driver for the Mylex AcceleRAID and eXtremeRAID family with v6 firmware.
63 *
64 * TODO:
65 *
66 * o Make mly->mly_btl a hash, then MLY_BTL_RESCAN becomes a SIMPLEQ.
67 * o Handle FC and multiple LUNs.
68 * o Fix mmbox usage.
69 * o Fix transfer speed fudge.
70 */
71
72 #include <sys/cdefs.h>
73 __KERNEL_RCSID(0, "$NetBSD: mly.c,v 1.37.2.1 2008/05/18 12:34:20 yamt Exp $");
74
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/device.h>
78 #include <sys/kernel.h>
79 #include <sys/queue.h>
80 #include <sys/buf.h>
81 #include <sys/endian.h>
82 #include <sys/conf.h>
83 #include <sys/malloc.h>
84 #include <sys/ioctl.h>
85 #include <sys/scsiio.h>
86 #include <sys/kthread.h>
87 #include <sys/kauth.h>
88
89 #include <uvm/uvm_extern.h>
90
91 #include <sys/bus.h>
92
93 #include <dev/scsipi/scsi_all.h>
94 #include <dev/scsipi/scsipi_all.h>
95 #include <dev/scsipi/scsiconf.h>
96
97 #include <dev/pci/pcireg.h>
98 #include <dev/pci/pcivar.h>
99 #include <dev/pci/pcidevs.h>
100
101 #include <dev/pci/mlyreg.h>
102 #include <dev/pci/mlyio.h>
103 #include <dev/pci/mlyvar.h>
104 #include <dev/pci/mly_tables.h>
105
106 static void mly_attach(struct device *, struct device *, void *);
107 static int mly_match(struct device *, struct cfdata *, void *);
108 static const struct mly_ident *mly_find_ident(struct pci_attach_args *);
109 static int mly_fwhandshake(struct mly_softc *);
110 static int mly_flush(struct mly_softc *);
111 static int mly_intr(void *);
112 static void mly_shutdown(void *);
113
114 static int mly_alloc_ccbs(struct mly_softc *);
115 static void mly_check_event(struct mly_softc *);
116 static void mly_complete_event(struct mly_softc *, struct mly_ccb *);
117 static void mly_complete_rescan(struct mly_softc *, struct mly_ccb *);
118 static int mly_dmamem_alloc(struct mly_softc *, int, bus_dmamap_t *,
119 void **, bus_addr_t *, bus_dma_segment_t *);
120 static void mly_dmamem_free(struct mly_softc *, int, bus_dmamap_t,
121 void *, bus_dma_segment_t *);
122 static int mly_enable_mmbox(struct mly_softc *);
123 static void mly_fetch_event(struct mly_softc *);
124 static int mly_get_controllerinfo(struct mly_softc *);
125 static int mly_get_eventstatus(struct mly_softc *);
126 static int mly_ioctl(struct mly_softc *, struct mly_cmd_ioctl *,
127 void **, size_t, void *, size_t *);
128 static void mly_padstr(char *, const char *, int);
129 static void mly_process_event(struct mly_softc *, struct mly_event *);
130 static void mly_release_ccbs(struct mly_softc *);
131 static int mly_scan_btl(struct mly_softc *, int, int);
132 static void mly_scan_channel(struct mly_softc *, int);
133 static void mly_thread(void *);
134
135 static int mly_ccb_alloc(struct mly_softc *, struct mly_ccb **);
136 static void mly_ccb_complete(struct mly_softc *, struct mly_ccb *);
137 static void mly_ccb_enqueue(struct mly_softc *, struct mly_ccb *);
138 static void mly_ccb_free(struct mly_softc *, struct mly_ccb *);
139 static int mly_ccb_map(struct mly_softc *, struct mly_ccb *);
140 static int mly_ccb_poll(struct mly_softc *, struct mly_ccb *, int);
141 static int mly_ccb_submit(struct mly_softc *, struct mly_ccb *);
142 static void mly_ccb_unmap(struct mly_softc *, struct mly_ccb *);
143 static int mly_ccb_wait(struct mly_softc *, struct mly_ccb *, int);
144
145 static void mly_get_xfer_mode(struct mly_softc *, int,
146 struct scsipi_xfer_mode *);
147 static void mly_scsipi_complete(struct mly_softc *, struct mly_ccb *);
148 static int mly_scsipi_ioctl(struct scsipi_channel *, u_long, void *,
149 int, struct proc *);
150 static void mly_scsipi_minphys(struct buf *);
151 static void mly_scsipi_request(struct scsipi_channel *,
152 scsipi_adapter_req_t, void *);
153
154 static int mly_user_command(struct mly_softc *, struct mly_user_command *);
155 static int mly_user_health(struct mly_softc *, struct mly_user_health *);
156
157 extern struct cfdriver mly_cd;
158
159 CFATTACH_DECL(mly, sizeof(struct mly_softc),
160 mly_match, mly_attach, NULL, NULL);
161
162 dev_type_open(mlyopen);
163 dev_type_close(mlyclose);
164 dev_type_ioctl(mlyioctl);
165
166 const struct cdevsw mly_cdevsw = {
167 mlyopen, mlyclose, noread, nowrite, mlyioctl,
168 nostop, notty, nopoll, nommap, nokqfilter, D_OTHER,
169 };
170
171 static struct mly_ident {
172 u_short vendor;
173 u_short product;
174 u_short subvendor;
175 u_short subproduct;
176 int hwif;
177 const char *desc;
178 } const mly_ident[] = {
179 {
180 PCI_VENDOR_MYLEX,
181 PCI_PRODUCT_MYLEX_EXTREMERAID,
182 PCI_VENDOR_MYLEX,
183 0x0040,
184 MLY_HWIF_STRONGARM,
185 "eXtremeRAID 2000"
186 },
187 {
188 PCI_VENDOR_MYLEX,
189 PCI_PRODUCT_MYLEX_EXTREMERAID,
190 PCI_VENDOR_MYLEX,
191 0x0030,
192 MLY_HWIF_STRONGARM,
193 "eXtremeRAID 3000"
194 },
195 {
196 PCI_VENDOR_MYLEX,
197 PCI_PRODUCT_MYLEX_ACCELERAID,
198 PCI_VENDOR_MYLEX,
199 0x0050,
200 MLY_HWIF_I960RX,
201 "AcceleRAID 352"
202 },
203 {
204 PCI_VENDOR_MYLEX,
205 PCI_PRODUCT_MYLEX_ACCELERAID,
206 PCI_VENDOR_MYLEX,
207 0x0052,
208 MLY_HWIF_I960RX,
209 "AcceleRAID 170"
210 },
211 {
212 PCI_VENDOR_MYLEX,
213 PCI_PRODUCT_MYLEX_ACCELERAID,
214 PCI_VENDOR_MYLEX,
215 0x0054,
216 MLY_HWIF_I960RX,
217 "AcceleRAID 160"
218 },
219 };
220
221 static void *mly_sdh;
222
223 /*
224 * Try to find a `mly_ident' entry corresponding to this board.
225 */
226 static const struct mly_ident *
227 mly_find_ident(struct pci_attach_args *pa)
228 {
229 const struct mly_ident *mpi, *maxmpi;
230 pcireg_t reg;
231
232 mpi = mly_ident;
233 maxmpi = mpi + sizeof(mly_ident) / sizeof(mly_ident[0]);
234
235 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_I2O)
236 return (NULL);
237
238 for (; mpi < maxmpi; mpi++) {
239 if (PCI_VENDOR(pa->pa_id) != mpi->vendor ||
240 PCI_PRODUCT(pa->pa_id) != mpi->product)
241 continue;
242
243 if (mpi->subvendor == 0x0000)
244 return (mpi);
245
246 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
247
248 if (PCI_VENDOR(reg) == mpi->subvendor &&
249 PCI_PRODUCT(reg) == mpi->subproduct)
250 return (mpi);
251 }
252
253 return (NULL);
254 }
255
256 /*
257 * Match a supported board.
258 */
259 static int
260 mly_match(struct device *parent, struct cfdata *cfdata,
261 void *aux)
262 {
263
264 return (mly_find_ident(aux) != NULL);
265 }
266
267 /*
268 * Attach a supported board.
269 */
270 static void
271 mly_attach(struct device *parent, struct device *self, void *aux)
272 {
273 struct pci_attach_args *pa;
274 struct mly_softc *mly;
275 struct mly_ioctl_getcontrollerinfo *mi;
276 const struct mly_ident *ident;
277 pci_chipset_tag_t pc;
278 pci_intr_handle_t ih;
279 bus_space_handle_t memh, ioh;
280 bus_space_tag_t memt, iot;
281 pcireg_t reg;
282 const char *intrstr;
283 int ior, memr, i, rv, state;
284 struct scsipi_adapter *adapt;
285 struct scsipi_channel *chan;
286
287 mly = (struct mly_softc *)self;
288 pa = aux;
289 pc = pa->pa_pc;
290 ident = mly_find_ident(pa);
291 state = 0;
292
293 mly->mly_dmat = pa->pa_dmat;
294 mly->mly_hwif = ident->hwif;
295
296 printf(": Mylex %s\n", ident->desc);
297
298 /*
299 * Map the PCI register window.
300 */
301 memr = -1;
302 ior = -1;
303
304 for (i = 0x10; i <= 0x14; i += 4) {
305 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, i);
306
307 if (PCI_MAPREG_TYPE(reg) == PCI_MAPREG_TYPE_IO) {
308 if (ior == -1 && PCI_MAPREG_IO_SIZE(reg) != 0)
309 ior = i;
310 } else {
311 if (memr == -1 && PCI_MAPREG_MEM_SIZE(reg) != 0)
312 memr = i;
313 }
314 }
315
316 if (memr != -1)
317 if (pci_mapreg_map(pa, memr, PCI_MAPREG_TYPE_MEM, 0,
318 &memt, &memh, NULL, NULL))
319 memr = -1;
320 if (ior != -1)
321 if (pci_mapreg_map(pa, ior, PCI_MAPREG_TYPE_IO, 0,
322 &iot, &ioh, NULL, NULL))
323 ior = -1;
324
325 if (memr != -1) {
326 mly->mly_iot = memt;
327 mly->mly_ioh = memh;
328 } else if (ior != -1) {
329 mly->mly_iot = iot;
330 mly->mly_ioh = ioh;
331 } else {
332 aprint_error_dev(self, "can't map i/o or memory space\n");
333 return;
334 }
335
336 /*
337 * Enable the device.
338 */
339 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
340 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
341 reg | PCI_COMMAND_MASTER_ENABLE);
342
343 /*
344 * Map and establish the interrupt.
345 */
346 if (pci_intr_map(pa, &ih)) {
347 aprint_error_dev(self, "can't map interrupt\n");
348 return;
349 }
350 intrstr = pci_intr_string(pc, ih);
351 mly->mly_ih = pci_intr_establish(pc, ih, IPL_BIO, mly_intr, mly);
352 if (mly->mly_ih == NULL) {
353 aprint_error_dev(self, "can't establish interrupt");
354 if (intrstr != NULL)
355 printf(" at %s", intrstr);
356 printf("\n");
357 return;
358 }
359
360 if (intrstr != NULL)
361 printf("%s: interrupting at %s\n", device_xname(&mly->mly_dv),
362 intrstr);
363
364 /*
365 * Take care of interface-specific tasks.
366 */
367 switch (mly->mly_hwif) {
368 case MLY_HWIF_I960RX:
369 mly->mly_doorbell_true = 0x00;
370 mly->mly_cmd_mailbox = MLY_I960RX_COMMAND_MAILBOX;
371 mly->mly_status_mailbox = MLY_I960RX_STATUS_MAILBOX;
372 mly->mly_idbr = MLY_I960RX_IDBR;
373 mly->mly_odbr = MLY_I960RX_ODBR;
374 mly->mly_error_status = MLY_I960RX_ERROR_STATUS;
375 mly->mly_interrupt_status = MLY_I960RX_INTERRUPT_STATUS;
376 mly->mly_interrupt_mask = MLY_I960RX_INTERRUPT_MASK;
377 break;
378
379 case MLY_HWIF_STRONGARM:
380 mly->mly_doorbell_true = 0xff;
381 mly->mly_cmd_mailbox = MLY_STRONGARM_COMMAND_MAILBOX;
382 mly->mly_status_mailbox = MLY_STRONGARM_STATUS_MAILBOX;
383 mly->mly_idbr = MLY_STRONGARM_IDBR;
384 mly->mly_odbr = MLY_STRONGARM_ODBR;
385 mly->mly_error_status = MLY_STRONGARM_ERROR_STATUS;
386 mly->mly_interrupt_status = MLY_STRONGARM_INTERRUPT_STATUS;
387 mly->mly_interrupt_mask = MLY_STRONGARM_INTERRUPT_MASK;
388 break;
389 }
390
391 /*
392 * Allocate and map the scatter/gather lists.
393 */
394 rv = mly_dmamem_alloc(mly, MLY_SGL_SIZE * MLY_MAX_CCBS,
395 &mly->mly_sg_dmamap, (void **)&mly->mly_sg,
396 &mly->mly_sg_busaddr, &mly->mly_sg_seg);
397 if (rv) {
398 printf("%s: unable to allocate S/G maps\n",
399 device_xname(&mly->mly_dv));
400 goto bad;
401 }
402 state++;
403
404 /*
405 * Allocate and map the memory mailbox.
406 */
407 rv = mly_dmamem_alloc(mly, sizeof(struct mly_mmbox),
408 &mly->mly_mmbox_dmamap, (void **)&mly->mly_mmbox,
409 &mly->mly_mmbox_busaddr, &mly->mly_mmbox_seg);
410 if (rv) {
411 aprint_error_dev(&mly->mly_dv, "unable to allocate mailboxes\n");
412 goto bad;
413 }
414 state++;
415
416 /*
417 * Initialise per-controller queues.
418 */
419 SLIST_INIT(&mly->mly_ccb_free);
420 SIMPLEQ_INIT(&mly->mly_ccb_queue);
421
422 /*
423 * Disable interrupts before we start talking to the controller.
424 */
425 mly_outb(mly, mly->mly_interrupt_mask, MLY_INTERRUPT_MASK_DISABLE);
426
427 /*
428 * Wait for the controller to come ready, handshaking with the
429 * firmware if required. This is typically only necessary on
430 * platforms where the controller BIOS does not run.
431 */
432 if (mly_fwhandshake(mly)) {
433 aprint_error_dev(&mly->mly_dv, "unable to bring controller online\n");
434 goto bad;
435 }
436
437 /*
438 * Allocate initial command buffers, obtain controller feature
439 * information, and then reallocate command buffers, since we'll
440 * know how many we want.
441 */
442 if (mly_alloc_ccbs(mly)) {
443 aprint_error_dev(&mly->mly_dv, "unable to allocate CCBs\n");
444 goto bad;
445 }
446 state++;
447 if (mly_get_controllerinfo(mly)) {
448 aprint_error_dev(&mly->mly_dv, "unable to retrieve controller info\n");
449 goto bad;
450 }
451 mly_release_ccbs(mly);
452 if (mly_alloc_ccbs(mly)) {
453 aprint_error_dev(&mly->mly_dv, "unable to allocate CCBs\n");
454 state--;
455 goto bad;
456 }
457
458 /*
459 * Get the current event counter for health purposes, populate the
460 * initial health status buffer.
461 */
462 if (mly_get_eventstatus(mly)) {
463 aprint_error_dev(&mly->mly_dv, "unable to retrieve event status\n");
464 goto bad;
465 }
466
467 /*
468 * Enable memory-mailbox mode.
469 */
470 if (mly_enable_mmbox(mly)) {
471 aprint_error_dev(&mly->mly_dv, "unable to enable memory mailbox\n");
472 goto bad;
473 }
474
475 /*
476 * Print a little information about the controller.
477 */
478 mi = mly->mly_controllerinfo;
479
480 printf("%s: %d physical channel%s, firmware %d.%02d-%d-%02d "
481 "(%02d%02d%02d%02d), %dMB RAM\n", device_xname(&mly->mly_dv),
482 mi->physical_channels_present,
483 (mi->physical_channels_present) > 1 ? "s" : "",
484 mi->fw_major, mi->fw_minor, mi->fw_turn, mi->fw_build,
485 mi->fw_century, mi->fw_year, mi->fw_month, mi->fw_day,
486 le16toh(mi->memory_size));
487
488 /*
489 * Register our `shutdownhook'.
490 */
491 if (mly_sdh == NULL)
492 shutdownhook_establish(mly_shutdown, NULL);
493
494 /*
495 * Clear any previous BTL information. For each bus that scsipi
496 * wants to scan, we'll receive the SCBUSIOLLSCAN ioctl and retrieve
497 * all BTL info at that point.
498 */
499 memset(&mly->mly_btl, 0, sizeof(mly->mly_btl));
500
501 mly->mly_nchans = mly->mly_controllerinfo->physical_channels_present +
502 mly->mly_controllerinfo->virtual_channels_present;
503
504 /*
505 * Attach to scsipi.
506 */
507 adapt = &mly->mly_adapt;
508 memset(adapt, 0, sizeof(*adapt));
509 adapt->adapt_dev = &mly->mly_dv;
510 adapt->adapt_nchannels = mly->mly_nchans;
511 adapt->adapt_openings = mly->mly_ncmds - MLY_CCBS_RESV;
512 adapt->adapt_max_periph = mly->mly_ncmds - MLY_CCBS_RESV;
513 adapt->adapt_request = mly_scsipi_request;
514 adapt->adapt_minphys = mly_scsipi_minphys;
515 adapt->adapt_ioctl = mly_scsipi_ioctl;
516
517 for (i = 0; i < mly->mly_nchans; i++) {
518 chan = &mly->mly_chans[i];
519 memset(chan, 0, sizeof(*chan));
520 chan->chan_adapter = adapt;
521 chan->chan_bustype = &scsi_bustype;
522 chan->chan_channel = i;
523 chan->chan_ntargets = MLY_MAX_TARGETS;
524 chan->chan_nluns = MLY_MAX_LUNS;
525 chan->chan_id = mly->mly_controllerparam->initiator_id;
526 chan->chan_flags = SCSIPI_CHAN_NOSETTLE;
527 config_found(&mly->mly_dv, chan, scsiprint);
528 }
529
530 /*
531 * Now enable interrupts...
532 */
533 mly_outb(mly, mly->mly_interrupt_mask, MLY_INTERRUPT_MASK_ENABLE);
534
535 /*
536 * Finally, create our monitoring thread.
537 */
538 mly->mly_state |= MLY_STATE_INITOK;
539 rv = kthread_create(PRI_NONE, 0, NULL, mly_thread, mly,
540 &mly->mly_thread, "%s", device_xname(&mly->mly_dv));
541 if (rv != 0)
542 aprint_error_dev(&mly->mly_dv, "unable to create thread (%d)\n",
543 rv);
544 return;
545
546 bad:
547 if (state > 2)
548 mly_release_ccbs(mly);
549 if (state > 1)
550 mly_dmamem_free(mly, sizeof(struct mly_mmbox),
551 mly->mly_mmbox_dmamap, (void *)mly->mly_mmbox,
552 &mly->mly_mmbox_seg);
553 if (state > 0)
554 mly_dmamem_free(mly, MLY_SGL_SIZE * MLY_MAX_CCBS,
555 mly->mly_sg_dmamap, (void *)mly->mly_sg,
556 &mly->mly_sg_seg);
557 }
558
559 /*
560 * Scan all possible devices on the specified channel.
561 */
562 static void
563 mly_scan_channel(struct mly_softc *mly, int bus)
564 {
565 int s, target;
566
567 for (target = 0; target < MLY_MAX_TARGETS; target++) {
568 s = splbio();
569 if (!mly_scan_btl(mly, bus, target)) {
570 tsleep(&mly->mly_btl[bus][target], PRIBIO, "mlyscan",
571 0);
572 }
573 splx(s);
574 }
575 }
576
577 /*
578 * Shut down all configured `mly' devices.
579 */
580 static void
581 mly_shutdown(void *cookie)
582 {
583 struct mly_softc *mly;
584 int i;
585
586 for (i = 0; i < mly_cd.cd_ndevs; i++) {
587 if ((mly = device_lookup(&mly_cd, i)) == NULL)
588 continue;
589
590 if (mly_flush(mly))
591 aprint_error_dev(&mly->mly_dv, "unable to flush cache\n");
592 }
593 }
594
595 /*
596 * Fill in the mly_controllerinfo and mly_controllerparam fields in the
597 * softc.
598 */
599 static int
600 mly_get_controllerinfo(struct mly_softc *mly)
601 {
602 struct mly_cmd_ioctl mci;
603 int rv;
604
605 /*
606 * Build the getcontrollerinfo ioctl and send it.
607 */
608 memset(&mci, 0, sizeof(mci));
609 mci.sub_ioctl = MDACIOCTL_GETCONTROLLERINFO;
610 rv = mly_ioctl(mly, &mci, (void **)&mly->mly_controllerinfo,
611 sizeof(*mly->mly_controllerinfo), NULL, NULL);
612 if (rv != 0)
613 return (rv);
614
615 /*
616 * Build the getcontrollerparameter ioctl and send it.
617 */
618 memset(&mci, 0, sizeof(mci));
619 mci.sub_ioctl = MDACIOCTL_GETCONTROLLERPARAMETER;
620 rv = mly_ioctl(mly, &mci, (void **)&mly->mly_controllerparam,
621 sizeof(*mly->mly_controllerparam), NULL, NULL);
622
623 return (rv);
624 }
625
626 /*
627 * Rescan a device, possibly as a consequence of getting an event which
628 * suggests that it may have changed. Must be called with interrupts
629 * blocked.
630 */
631 static int
632 mly_scan_btl(struct mly_softc *mly, int bus, int target)
633 {
634 struct mly_ccb *mc;
635 struct mly_cmd_ioctl *mci;
636 int rv;
637
638 if (target == mly->mly_controllerparam->initiator_id) {
639 mly->mly_btl[bus][target].mb_flags = MLY_BTL_PROTECTED;
640 return (EIO);
641 }
642
643 /* Don't re-scan if a scan is already in progress. */
644 if ((mly->mly_btl[bus][target].mb_flags & MLY_BTL_SCANNING) != 0)
645 return (EBUSY);
646
647 /* Get a command. */
648 if ((rv = mly_ccb_alloc(mly, &mc)) != 0)
649 return (rv);
650
651 /* Set up the data buffer. */
652 mc->mc_data = malloc(sizeof(union mly_devinfo),
653 M_DEVBUF, M_NOWAIT|M_ZERO);
654
655 mc->mc_flags |= MLY_CCB_DATAIN;
656 mc->mc_complete = mly_complete_rescan;
657
658 /*
659 * Build the ioctl.
660 */
661 mci = (struct mly_cmd_ioctl *)&mc->mc_packet->ioctl;
662 mci->opcode = MDACMD_IOCTL;
663 mci->timeout = 30 | MLY_TIMEOUT_SECONDS;
664 memset(&mci->param, 0, sizeof(mci->param));
665
666 if (MLY_BUS_IS_VIRTUAL(mly, bus)) {
667 mc->mc_length = sizeof(struct mly_ioctl_getlogdevinfovalid);
668 mci->data_size = htole32(mc->mc_length);
669 mci->sub_ioctl = MDACIOCTL_GETLOGDEVINFOVALID;
670 _lto3l(MLY_LOGADDR(0, MLY_LOGDEV_ID(mly, bus, target)),
671 mci->addr);
672 } else {
673 mc->mc_length = sizeof(struct mly_ioctl_getphysdevinfovalid);
674 mci->data_size = htole32(mc->mc_length);
675 mci->sub_ioctl = MDACIOCTL_GETPHYSDEVINFOVALID;
676 _lto3l(MLY_PHYADDR(0, bus, target, 0), mci->addr);
677 }
678
679 /*
680 * Dispatch the command.
681 */
682 if ((rv = mly_ccb_map(mly, mc)) != 0) {
683 free(mc->mc_data, M_DEVBUF);
684 mly_ccb_free(mly, mc);
685 return(rv);
686 }
687
688 mly->mly_btl[bus][target].mb_flags |= MLY_BTL_SCANNING;
689 mly_ccb_enqueue(mly, mc);
690 return (0);
691 }
692
693 /*
694 * Handle the completion of a rescan operation.
695 */
696 static void
697 mly_complete_rescan(struct mly_softc *mly, struct mly_ccb *mc)
698 {
699 struct mly_ioctl_getlogdevinfovalid *ldi;
700 struct mly_ioctl_getphysdevinfovalid *pdi;
701 struct mly_cmd_ioctl *mci;
702 struct mly_btl btl, *btlp;
703 struct scsipi_xfer_mode xm;
704 int bus, target, rescan;
705 u_int tmp;
706
707 mly_ccb_unmap(mly, mc);
708
709 /*
710 * Recover the bus and target from the command. We need these even
711 * in the case where we don't have a useful response.
712 */
713 mci = (struct mly_cmd_ioctl *)&mc->mc_packet->ioctl;
714 tmp = _3ltol(mci->addr);
715 rescan = 0;
716
717 if (mci->sub_ioctl == MDACIOCTL_GETLOGDEVINFOVALID) {
718 bus = MLY_LOGDEV_BUS(mly, MLY_LOGADDR_DEV(tmp));
719 target = MLY_LOGDEV_TARGET(mly, MLY_LOGADDR_DEV(tmp));
720 } else {
721 bus = MLY_PHYADDR_CHANNEL(tmp);
722 target = MLY_PHYADDR_TARGET(tmp);
723 }
724
725 btlp = &mly->mly_btl[bus][target];
726
727 /* The default result is 'no device'. */
728 memset(&btl, 0, sizeof(btl));
729 btl.mb_flags = MLY_BTL_PROTECTED;
730
731 /* If the rescan completed OK, we have possibly-new BTL data. */
732 if (mc->mc_status != 0)
733 goto out;
734
735 if (mc->mc_length == sizeof(*ldi)) {
736 ldi = (struct mly_ioctl_getlogdevinfovalid *)mc->mc_data;
737 tmp = le32toh(ldi->logical_device_number);
738
739 if (MLY_LOGDEV_BUS(mly, tmp) != bus ||
740 MLY_LOGDEV_TARGET(mly, tmp) != target) {
741 #ifdef MLYDEBUG
742 printf("%s: WARNING: BTL rescan (logical) for %d:%d "
743 "returned data for %d:%d instead\n",
744 device_xname(&mly->mly_dv), bus, target,
745 MLY_LOGDEV_BUS(mly, tmp),
746 MLY_LOGDEV_TARGET(mly, tmp));
747 #endif
748 goto out;
749 }
750
751 btl.mb_flags = MLY_BTL_LOGICAL | MLY_BTL_TQING;
752 btl.mb_type = ldi->raid_level;
753 btl.mb_state = ldi->state;
754 } else if (mc->mc_length == sizeof(*pdi)) {
755 pdi = (struct mly_ioctl_getphysdevinfovalid *)mc->mc_data;
756
757 if (pdi->channel != bus || pdi->target != target) {
758 #ifdef MLYDEBUG
759 printf("%s: WARNING: BTL rescan (physical) for %d:%d "
760 " returned data for %d:%d instead\n",
761 device_xname(&mly->mly_dv),
762 bus, target, pdi->channel, pdi->target);
763 #endif
764 goto out;
765 }
766
767 btl.mb_flags = MLY_BTL_PHYSICAL;
768 btl.mb_type = MLY_DEVICE_TYPE_PHYSICAL;
769 btl.mb_state = pdi->state;
770 btl.mb_speed = pdi->speed;
771 btl.mb_width = pdi->width;
772
773 if (pdi->state != MLY_DEVICE_STATE_UNCONFIGURED)
774 btl.mb_flags |= MLY_BTL_PROTECTED;
775 if (pdi->command_tags != 0)
776 btl.mb_flags |= MLY_BTL_TQING;
777 } else {
778 printf("%s: BTL rescan result invalid\n", device_xname(&mly->mly_dv));
779 goto out;
780 }
781
782 /* Decide whether we need to rescan the device. */
783 if (btl.mb_flags != btlp->mb_flags ||
784 btl.mb_speed != btlp->mb_speed ||
785 btl.mb_width != btlp->mb_width)
786 rescan = 1;
787
788 out:
789 *btlp = btl;
790
791 if (rescan && (btl.mb_flags & MLY_BTL_PROTECTED) == 0) {
792 xm.xm_target = target;
793 mly_get_xfer_mode(mly, bus, &xm);
794 /* XXX SCSI mid-layer rescan goes here. */
795 }
796
797 /* Wake anybody waiting on the device to be rescanned. */
798 wakeup(btlp);
799
800 free(mc->mc_data, M_DEVBUF);
801 mly_ccb_free(mly, mc);
802 }
803
804 /*
805 * Get the current health status and set the 'next event' counter to suit.
806 */
807 static int
808 mly_get_eventstatus(struct mly_softc *mly)
809 {
810 struct mly_cmd_ioctl mci;
811 struct mly_health_status *mh;
812 int rv;
813
814 /* Build the gethealthstatus ioctl and send it. */
815 memset(&mci, 0, sizeof(mci));
816 mh = NULL;
817 mci.sub_ioctl = MDACIOCTL_GETHEALTHSTATUS;
818
819 rv = mly_ioctl(mly, &mci, (void *)&mh, sizeof(*mh), NULL, NULL);
820 if (rv)
821 return (rv);
822
823 /* Get the event counter. */
824 mly->mly_event_change = le32toh(mh->change_counter);
825 mly->mly_event_waiting = le32toh(mh->next_event);
826 mly->mly_event_counter = le32toh(mh->next_event);
827
828 /* Save the health status into the memory mailbox */
829 memcpy(&mly->mly_mmbox->mmm_health.status, mh, sizeof(*mh));
830
831 bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap,
832 offsetof(struct mly_mmbox, mmm_health),
833 sizeof(mly->mly_mmbox->mmm_health),
834 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
835
836 free(mh, M_DEVBUF);
837 return (0);
838 }
839
840 /*
841 * Enable memory mailbox mode.
842 */
843 static int
844 mly_enable_mmbox(struct mly_softc *mly)
845 {
846 struct mly_cmd_ioctl mci;
847 u_int8_t *sp;
848 u_int64_t tmp;
849 int rv;
850
851 /* Build the ioctl and send it. */
852 memset(&mci, 0, sizeof(mci));
853 mci.sub_ioctl = MDACIOCTL_SETMEMORYMAILBOX;
854
855 /* Set buffer addresses. */
856 tmp = mly->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_command);
857 mci.param.setmemorymailbox.command_mailbox_physaddr = htole64(tmp);
858
859 tmp = mly->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_status);
860 mci.param.setmemorymailbox.status_mailbox_physaddr = htole64(tmp);
861
862 tmp = mly->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_health);
863 mci.param.setmemorymailbox.health_buffer_physaddr = htole64(tmp);
864
865 /* Set buffer sizes - abuse of data_size field is revolting. */
866 sp = (u_int8_t *)&mci.data_size;
867 sp[0] = (sizeof(union mly_cmd_packet) * MLY_MMBOX_COMMANDS) >> 10;
868 sp[1] = (sizeof(union mly_status_packet) * MLY_MMBOX_STATUS) >> 10;
869 mci.param.setmemorymailbox.health_buffer_size =
870 sizeof(union mly_health_region) >> 10;
871
872 rv = mly_ioctl(mly, &mci, NULL, 0, NULL, NULL);
873 if (rv)
874 return (rv);
875
876 mly->mly_state |= MLY_STATE_MMBOX_ACTIVE;
877 return (0);
878 }
879
880 /*
881 * Flush all pending I/O from the controller.
882 */
883 static int
884 mly_flush(struct mly_softc *mly)
885 {
886 struct mly_cmd_ioctl mci;
887
888 /* Build the ioctl */
889 memset(&mci, 0, sizeof(mci));
890 mci.sub_ioctl = MDACIOCTL_FLUSHDEVICEDATA;
891 mci.param.deviceoperation.operation_device =
892 MLY_OPDEVICE_PHYSICAL_CONTROLLER;
893
894 /* Pass it off to the controller */
895 return (mly_ioctl(mly, &mci, NULL, 0, NULL, NULL));
896 }
897
898 /*
899 * Perform an ioctl command.
900 *
901 * If (data) is not NULL, the command requires data transfer to the
902 * controller. If (*data) is NULL the command requires data transfer from
903 * the controller, and we will allocate a buffer for it.
904 */
905 static int
906 mly_ioctl(struct mly_softc *mly, struct mly_cmd_ioctl *ioctl, void **data,
907 size_t datasize, void *sense_buffer,
908 size_t *sense_length)
909 {
910 struct mly_ccb *mc;
911 struct mly_cmd_ioctl *mci;
912 u_int8_t status;
913 int rv;
914
915 mc = NULL;
916 if ((rv = mly_ccb_alloc(mly, &mc)) != 0)
917 goto bad;
918
919 /*
920 * Copy the ioctl structure, but save some important fields and then
921 * fixup.
922 */
923 mci = &mc->mc_packet->ioctl;
924 ioctl->sense_buffer_address = htole64(mci->sense_buffer_address);
925 ioctl->maximum_sense_size = mci->maximum_sense_size;
926 *mci = *ioctl;
927 mci->opcode = MDACMD_IOCTL;
928 mci->timeout = 30 | MLY_TIMEOUT_SECONDS;
929
930 /* Handle the data buffer. */
931 if (data != NULL) {
932 if (*data == NULL) {
933 /* Allocate data buffer */
934 mc->mc_data = malloc(datasize, M_DEVBUF, M_NOWAIT);
935 mc->mc_flags |= MLY_CCB_DATAIN;
936 } else {
937 mc->mc_data = *data;
938 mc->mc_flags |= MLY_CCB_DATAOUT;
939 }
940 mc->mc_length = datasize;
941 mc->mc_packet->generic.data_size = htole32(datasize);
942 }
943
944 /* Run the command. */
945 if (datasize > 0)
946 if ((rv = mly_ccb_map(mly, mc)) != 0)
947 goto bad;
948 rv = mly_ccb_poll(mly, mc, 30000);
949 if (datasize > 0)
950 mly_ccb_unmap(mly, mc);
951 if (rv != 0)
952 goto bad;
953
954 /* Clean up and return any data. */
955 status = mc->mc_status;
956
957 if (status != 0)
958 printf("mly_ioctl: command status %d\n", status);
959
960 if (mc->mc_sense > 0 && sense_buffer != NULL) {
961 memcpy(sense_buffer, mc->mc_packet, mc->mc_sense);
962 *sense_length = mc->mc_sense;
963 goto bad;
964 }
965
966 /* Should we return a data pointer? */
967 if (data != NULL && *data == NULL)
968 *data = mc->mc_data;
969
970 /* Command completed OK. */
971 rv = (status != 0 ? EIO : 0);
972
973 bad:
974 if (mc != NULL) {
975 /* Do we need to free a data buffer we allocated? */
976 if (rv != 0 && mc->mc_data != NULL &&
977 (data == NULL || *data == NULL))
978 free(mc->mc_data, M_DEVBUF);
979 mly_ccb_free(mly, mc);
980 }
981
982 return (rv);
983 }
984
985 /*
986 * Check for event(s) outstanding in the controller.
987 */
988 static void
989 mly_check_event(struct mly_softc *mly)
990 {
991
992 bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap,
993 offsetof(struct mly_mmbox, mmm_health),
994 sizeof(mly->mly_mmbox->mmm_health),
995 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
996
997 /*
998 * The controller may have updated the health status information, so
999 * check for it here. Note that the counters are all in host
1000 * memory, so this check is very cheap. Also note that we depend on
1001 * checking on completion
1002 */
1003 if (le32toh(mly->mly_mmbox->mmm_health.status.change_counter) !=
1004 mly->mly_event_change) {
1005 mly->mly_event_change =
1006 le32toh(mly->mly_mmbox->mmm_health.status.change_counter);
1007 mly->mly_event_waiting =
1008 le32toh(mly->mly_mmbox->mmm_health.status.next_event);
1009
1010 /* Wake up anyone that might be interested in this. */
1011 wakeup(&mly->mly_event_change);
1012 }
1013
1014 bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap,
1015 offsetof(struct mly_mmbox, mmm_health),
1016 sizeof(mly->mly_mmbox->mmm_health),
1017 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1018
1019 if (mly->mly_event_counter != mly->mly_event_waiting)
1020 mly_fetch_event(mly);
1021 }
1022
1023 /*
1024 * Fetch one event from the controller. If we fail due to resource
1025 * starvation, we'll be retried the next time a command completes.
1026 */
1027 static void
1028 mly_fetch_event(struct mly_softc *mly)
1029 {
1030 struct mly_ccb *mc;
1031 struct mly_cmd_ioctl *mci;
1032 int s;
1033 u_int32_t event;
1034
1035 /* Get a command. */
1036 if (mly_ccb_alloc(mly, &mc))
1037 return;
1038
1039 /* Set up the data buffer. */
1040 mc->mc_data = malloc(sizeof(struct mly_event), M_DEVBUF,
1041 M_NOWAIT|M_ZERO);
1042
1043 mc->mc_length = sizeof(struct mly_event);
1044 mc->mc_flags |= MLY_CCB_DATAIN;
1045 mc->mc_complete = mly_complete_event;
1046
1047 /*
1048 * Get an event number to fetch. It's possible that we've raced
1049 * with another context for the last event, in which case there will
1050 * be no more events.
1051 */
1052 s = splbio();
1053 if (mly->mly_event_counter == mly->mly_event_waiting) {
1054 splx(s);
1055 free(mc->mc_data, M_DEVBUF);
1056 mly_ccb_free(mly, mc);
1057 return;
1058 }
1059 event = mly->mly_event_counter++;
1060 splx(s);
1061
1062 /*
1063 * Build the ioctl.
1064 *
1065 * At this point we are committed to sending this request, as it
1066 * will be the only one constructed for this particular event
1067 * number.
1068 */
1069 mci = (struct mly_cmd_ioctl *)&mc->mc_packet->ioctl;
1070 mci->opcode = MDACMD_IOCTL;
1071 mci->data_size = htole32(sizeof(struct mly_event));
1072 _lto3l(MLY_PHYADDR(0, 0, (event >> 16) & 0xff, (event >> 24) & 0xff),
1073 mci->addr);
1074 mci->timeout = 30 | MLY_TIMEOUT_SECONDS;
1075 mci->sub_ioctl = MDACIOCTL_GETEVENT;
1076 mci->param.getevent.sequence_number_low = htole16(event & 0xffff);
1077
1078 /*
1079 * Submit the command.
1080 */
1081 if (mly_ccb_map(mly, mc) != 0)
1082 goto bad;
1083 mly_ccb_enqueue(mly, mc);
1084 return;
1085
1086 bad:
1087 printf("%s: couldn't fetch event %u\n", device_xname(&mly->mly_dv), event);
1088 free(mc->mc_data, M_DEVBUF);
1089 mly_ccb_free(mly, mc);
1090 }
1091
1092 /*
1093 * Handle the completion of an event poll.
1094 */
1095 static void
1096 mly_complete_event(struct mly_softc *mly, struct mly_ccb *mc)
1097 {
1098 struct mly_event *me;
1099
1100 me = (struct mly_event *)mc->mc_data;
1101 mly_ccb_unmap(mly, mc);
1102 mly_ccb_free(mly, mc);
1103
1104 /* If the event was successfully fetched, process it. */
1105 if (mc->mc_status == SCSI_OK)
1106 mly_process_event(mly, me);
1107 else
1108 aprint_error_dev(&mly->mly_dv, "unable to fetch event; status = 0x%x\n",
1109 mc->mc_status);
1110
1111 free(me, M_DEVBUF);
1112
1113 /* Check for another event. */
1114 mly_check_event(mly);
1115 }
1116
1117 /*
1118 * Process a controller event. Called with interrupts blocked (i.e., at
1119 * interrupt time).
1120 */
1121 static void
1122 mly_process_event(struct mly_softc *mly, struct mly_event *me)
1123 {
1124 struct scsi_sense_data *ssd;
1125 int bus, target, event, class, action;
1126 const char *fp, *tp;
1127
1128 ssd = (struct scsi_sense_data *)&me->sense[0];
1129
1130 /*
1131 * Errors can be reported using vendor-unique sense data. In this
1132 * case, the event code will be 0x1c (Request sense data present),
1133 * the sense key will be 0x09 (vendor specific), the MSB of the ASC
1134 * will be set, and the actual event code will be a 16-bit value
1135 * comprised of the ASCQ (low byte) and low seven bits of the ASC
1136 * (low seven bits of the high byte).
1137 */
1138 if (le32toh(me->code) == 0x1c &&
1139 SSD_SENSE_KEY(ssd->flags) == SKEY_VENDOR_SPECIFIC &&
1140 (ssd->asc & 0x80) != 0) {
1141 event = ((int)(ssd->asc & ~0x80) << 8) +
1142 ssd->ascq;
1143 } else
1144 event = le32toh(me->code);
1145
1146 /* Look up event, get codes. */
1147 fp = mly_describe_code(mly_table_event, event);
1148
1149 /* Quiet event? */
1150 class = fp[0];
1151 #ifdef notyet
1152 if (isupper(class) && bootverbose)
1153 class = tolower(class);
1154 #endif
1155
1156 /* Get action code, text string. */
1157 action = fp[1];
1158 tp = fp + 3;
1159
1160 /*
1161 * Print some information about the event.
1162 *
1163 * This code uses a table derived from the corresponding portion of
1164 * the Linux driver, and thus the parser is very similar.
1165 */
1166 switch (class) {
1167 case 'p':
1168 /*
1169 * Error on physical drive.
1170 */
1171 printf("%s: physical device %d:%d %s\n", device_xname(&mly->mly_dv),
1172 me->channel, me->target, tp);
1173 if (action == 'r')
1174 mly->mly_btl[me->channel][me->target].mb_flags |=
1175 MLY_BTL_RESCAN;
1176 break;
1177
1178 case 'l':
1179 case 'm':
1180 /*
1181 * Error on logical unit, or message about logical unit.
1182 */
1183 bus = MLY_LOGDEV_BUS(mly, me->lun);
1184 target = MLY_LOGDEV_TARGET(mly, me->lun);
1185 printf("%s: logical device %d:%d %s\n", device_xname(&mly->mly_dv),
1186 bus, target, tp);
1187 if (action == 'r')
1188 mly->mly_btl[bus][target].mb_flags |= MLY_BTL_RESCAN;
1189 break;
1190
1191 case 's':
1192 /*
1193 * Report of sense data.
1194 */
1195 if ((SSD_SENSE_KEY(ssd->flags) == SKEY_NO_SENSE ||
1196 SSD_SENSE_KEY(ssd->flags) == SKEY_NOT_READY) &&
1197 ssd->asc == 0x04 &&
1198 (ssd->ascq == 0x01 ||
1199 ssd->ascq == 0x02)) {
1200 /* Ignore NO_SENSE or NOT_READY in one case */
1201 break;
1202 }
1203
1204 /*
1205 * XXX Should translate this if SCSIVERBOSE.
1206 */
1207 printf("%s: physical device %d:%d %s\n", device_xname(&mly->mly_dv),
1208 me->channel, me->target, tp);
1209 printf("%s: sense key %d asc %02x ascq %02x\n",
1210 device_xname(&mly->mly_dv), SSD_SENSE_KEY(ssd->flags),
1211 ssd->asc, ssd->ascq);
1212 printf("%s: info %x%x%x%x csi %x%x%x%x\n",
1213 device_xname(&mly->mly_dv), ssd->info[0], ssd->info[1],
1214 ssd->info[2], ssd->info[3], ssd->csi[0],
1215 ssd->csi[1], ssd->csi[2],
1216 ssd->csi[3]);
1217 if (action == 'r')
1218 mly->mly_btl[me->channel][me->target].mb_flags |=
1219 MLY_BTL_RESCAN;
1220 break;
1221
1222 case 'e':
1223 printf("%s: ", device_xname(&mly->mly_dv));
1224 printf(tp, me->target, me->lun);
1225 break;
1226
1227 case 'c':
1228 printf("%s: controller %s\n", device_xname(&mly->mly_dv), tp);
1229 break;
1230
1231 case '?':
1232 printf("%s: %s - %d\n", device_xname(&mly->mly_dv), tp, event);
1233 break;
1234
1235 default:
1236 /* Probably a 'noisy' event being ignored. */
1237 break;
1238 }
1239 }
1240
1241 /*
1242 * Perform periodic activities.
1243 */
1244 static void
1245 mly_thread(void *cookie)
1246 {
1247 struct mly_softc *mly;
1248 struct mly_btl *btl;
1249 int s, bus, target, done;
1250
1251 mly = (struct mly_softc *)cookie;
1252
1253 for (;;) {
1254 /* Check for new events. */
1255 mly_check_event(mly);
1256
1257 /* Re-scan up to 1 device. */
1258 s = splbio();
1259 done = 0;
1260 for (bus = 0; bus < mly->mly_nchans && !done; bus++) {
1261 for (target = 0; target < MLY_MAX_TARGETS; target++) {
1262 /* Perform device rescan? */
1263 btl = &mly->mly_btl[bus][target];
1264 if ((btl->mb_flags & MLY_BTL_RESCAN) != 0) {
1265 btl->mb_flags ^= MLY_BTL_RESCAN;
1266 mly_scan_btl(mly, bus, target);
1267 done = 1;
1268 break;
1269 }
1270 }
1271 }
1272 splx(s);
1273
1274 /* Sleep for N seconds. */
1275 tsleep(mly_thread, PWAIT, "mlyzzz",
1276 hz * MLY_PERIODIC_INTERVAL);
1277 }
1278 }
1279
1280 /*
1281 * Submit a command to the controller and poll on completion. Return
1282 * non-zero on timeout.
1283 */
1284 static int
1285 mly_ccb_poll(struct mly_softc *mly, struct mly_ccb *mc, int timo)
1286 {
1287 int rv;
1288
1289 if ((rv = mly_ccb_submit(mly, mc)) != 0)
1290 return (rv);
1291
1292 for (timo *= 10; timo != 0; timo--) {
1293 if ((mc->mc_flags & MLY_CCB_COMPLETE) != 0)
1294 break;
1295 mly_intr(mly);
1296 DELAY(100);
1297 }
1298
1299 return (timo == 0);
1300 }
1301
1302 /*
1303 * Submit a command to the controller and sleep on completion. Return
1304 * non-zero on timeout.
1305 */
1306 static int
1307 mly_ccb_wait(struct mly_softc *mly, struct mly_ccb *mc, int timo)
1308 {
1309 int rv, s;
1310
1311 mly_ccb_enqueue(mly, mc);
1312
1313 s = splbio();
1314 if ((mc->mc_flags & MLY_CCB_COMPLETE) != 0) {
1315 splx(s);
1316 return (0);
1317 }
1318 rv = tsleep(mc, PRIBIO, "mlywccb", timo * hz / 1000);
1319 splx(s);
1320
1321 return (rv);
1322 }
1323
1324 /*
1325 * If a CCB is specified, enqueue it. Pull CCBs off the software queue in
1326 * the order that they were enqueued and try to submit their command blocks
1327 * to the controller for execution.
1328 */
1329 void
1330 mly_ccb_enqueue(struct mly_softc *mly, struct mly_ccb *mc)
1331 {
1332 int s;
1333
1334 s = splbio();
1335
1336 if (mc != NULL)
1337 SIMPLEQ_INSERT_TAIL(&mly->mly_ccb_queue, mc, mc_link.simpleq);
1338
1339 while ((mc = SIMPLEQ_FIRST(&mly->mly_ccb_queue)) != NULL) {
1340 if (mly_ccb_submit(mly, mc))
1341 break;
1342 SIMPLEQ_REMOVE_HEAD(&mly->mly_ccb_queue, mc_link.simpleq);
1343 }
1344
1345 splx(s);
1346 }
1347
1348 /*
1349 * Deliver a command to the controller.
1350 */
1351 static int
1352 mly_ccb_submit(struct mly_softc *mly, struct mly_ccb *mc)
1353 {
1354 union mly_cmd_packet *pkt;
1355 int s, off;
1356
1357 mc->mc_packet->generic.command_id = htole16(mc->mc_slot);
1358
1359 bus_dmamap_sync(mly->mly_dmat, mly->mly_pkt_dmamap,
1360 mc->mc_packetphys - mly->mly_pkt_busaddr,
1361 sizeof(union mly_cmd_packet),
1362 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1363
1364 s = splbio();
1365
1366 /*
1367 * Do we have to use the hardware mailbox?
1368 */
1369 if ((mly->mly_state & MLY_STATE_MMBOX_ACTIVE) == 0) {
1370 /*
1371 * Check to see if the controller is ready for us.
1372 */
1373 if (mly_idbr_true(mly, MLY_HM_CMDSENT)) {
1374 splx(s);
1375 return (EBUSY);
1376 }
1377
1378 /*
1379 * It's ready, send the command.
1380 */
1381 mly_outl(mly, mly->mly_cmd_mailbox,
1382 (u_int64_t)mc->mc_packetphys & 0xffffffff);
1383 mly_outl(mly, mly->mly_cmd_mailbox + 4,
1384 (u_int64_t)mc->mc_packetphys >> 32);
1385 mly_outb(mly, mly->mly_idbr, MLY_HM_CMDSENT);
1386 } else {
1387 pkt = &mly->mly_mmbox->mmm_command[mly->mly_mmbox_cmd_idx];
1388 off = (char *)pkt - (char *)mly->mly_mmbox;
1389
1390 bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap,
1391 off, sizeof(mly->mly_mmbox->mmm_command[0]),
1392 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1393
1394 /* Check to see if the next index is free yet. */
1395 if (pkt->mmbox.flag != 0) {
1396 splx(s);
1397 return (EBUSY);
1398 }
1399
1400 /* Copy in new command */
1401 memcpy(pkt->mmbox.data, mc->mc_packet->mmbox.data,
1402 sizeof(pkt->mmbox.data));
1403
1404 /* Copy flag last. */
1405 pkt->mmbox.flag = mc->mc_packet->mmbox.flag;
1406
1407 bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap,
1408 off, sizeof(mly->mly_mmbox->mmm_command[0]),
1409 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1410
1411 /* Signal controller and update index. */
1412 mly_outb(mly, mly->mly_idbr, MLY_AM_CMDSENT);
1413 mly->mly_mmbox_cmd_idx =
1414 (mly->mly_mmbox_cmd_idx + 1) % MLY_MMBOX_COMMANDS;
1415 }
1416
1417 splx(s);
1418 return (0);
1419 }
1420
1421 /*
1422 * Pick up completed commands from the controller and handle accordingly.
1423 */
1424 int
1425 mly_intr(void *cookie)
1426 {
1427 struct mly_ccb *mc;
1428 union mly_status_packet *sp;
1429 u_int16_t slot;
1430 int forus, off;
1431 struct mly_softc *mly;
1432
1433 mly = cookie;
1434 forus = 0;
1435
1436 /*
1437 * Pick up hardware-mailbox commands.
1438 */
1439 if (mly_odbr_true(mly, MLY_HM_STSREADY)) {
1440 slot = mly_inw(mly, mly->mly_status_mailbox);
1441
1442 if (slot < MLY_SLOT_MAX) {
1443 mc = mly->mly_ccbs + (slot - MLY_SLOT_START);
1444 mc->mc_status =
1445 mly_inb(mly, mly->mly_status_mailbox + 2);
1446 mc->mc_sense =
1447 mly_inb(mly, mly->mly_status_mailbox + 3);
1448 mc->mc_resid =
1449 mly_inl(mly, mly->mly_status_mailbox + 4);
1450
1451 mly_ccb_complete(mly, mc);
1452 } else {
1453 /* Slot 0xffff may mean "extremely bogus command". */
1454 printf("%s: got HM completion for illegal slot %u\n",
1455 device_xname(&mly->mly_dv), slot);
1456 }
1457
1458 /* Unconditionally acknowledge status. */
1459 mly_outb(mly, mly->mly_odbr, MLY_HM_STSREADY);
1460 mly_outb(mly, mly->mly_idbr, MLY_HM_STSACK);
1461 forus = 1;
1462 }
1463
1464 /*
1465 * Pick up memory-mailbox commands.
1466 */
1467 if (mly_odbr_true(mly, MLY_AM_STSREADY)) {
1468 for (;;) {
1469 sp = &mly->mly_mmbox->mmm_status[mly->mly_mmbox_sts_idx];
1470 off = (char *)sp - (char *)mly->mly_mmbox;
1471
1472 bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap,
1473 off, sizeof(mly->mly_mmbox->mmm_command[0]),
1474 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1475
1476 /* Check for more status. */
1477 if (sp->mmbox.flag == 0)
1478 break;
1479
1480 /* Get slot number. */
1481 slot = le16toh(sp->status.command_id);
1482 if (slot < MLY_SLOT_MAX) {
1483 mc = mly->mly_ccbs + (slot - MLY_SLOT_START);
1484 mc->mc_status = sp->status.status;
1485 mc->mc_sense = sp->status.sense_length;
1486 mc->mc_resid = le32toh(sp->status.residue);
1487 mly_ccb_complete(mly, mc);
1488 } else {
1489 /*
1490 * Slot 0xffff may mean "extremely bogus
1491 * command".
1492 */
1493 printf("%s: got AM completion for illegal "
1494 "slot %u at %d\n", device_xname(&mly->mly_dv),
1495 slot, mly->mly_mmbox_sts_idx);
1496 }
1497
1498 /* Clear and move to next index. */
1499 sp->mmbox.flag = 0;
1500 mly->mly_mmbox_sts_idx =
1501 (mly->mly_mmbox_sts_idx + 1) % MLY_MMBOX_STATUS;
1502 }
1503
1504 /* Acknowledge that we have collected status value(s). */
1505 mly_outb(mly, mly->mly_odbr, MLY_AM_STSREADY);
1506 forus = 1;
1507 }
1508
1509 /*
1510 * Run the queue.
1511 */
1512 if (forus && ! SIMPLEQ_EMPTY(&mly->mly_ccb_queue))
1513 mly_ccb_enqueue(mly, NULL);
1514
1515 return (forus);
1516 }
1517
1518 /*
1519 * Process completed commands
1520 */
1521 static void
1522 mly_ccb_complete(struct mly_softc *mly, struct mly_ccb *mc)
1523 {
1524 void (*complete)(struct mly_softc *, struct mly_ccb *);
1525
1526 bus_dmamap_sync(mly->mly_dmat, mly->mly_pkt_dmamap,
1527 mc->mc_packetphys - mly->mly_pkt_busaddr,
1528 sizeof(union mly_cmd_packet),
1529 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1530
1531 complete = mc->mc_complete;
1532 mc->mc_flags |= MLY_CCB_COMPLETE;
1533
1534 /*
1535 * Call completion handler or wake up sleeping consumer.
1536 */
1537 if (complete != NULL)
1538 (*complete)(mly, mc);
1539 else
1540 wakeup(mc);
1541 }
1542
1543 /*
1544 * Allocate a command.
1545 */
1546 int
1547 mly_ccb_alloc(struct mly_softc *mly, struct mly_ccb **mcp)
1548 {
1549 struct mly_ccb *mc;
1550 int s;
1551
1552 s = splbio();
1553 mc = SLIST_FIRST(&mly->mly_ccb_free);
1554 if (mc != NULL)
1555 SLIST_REMOVE_HEAD(&mly->mly_ccb_free, mc_link.slist);
1556 splx(s);
1557
1558 *mcp = mc;
1559 return (mc == NULL ? EAGAIN : 0);
1560 }
1561
1562 /*
1563 * Release a command back to the freelist.
1564 */
1565 void
1566 mly_ccb_free(struct mly_softc *mly, struct mly_ccb *mc)
1567 {
1568 int s;
1569
1570 /*
1571 * Fill in parts of the command that may cause confusion if a
1572 * consumer doesn't when we are later allocated.
1573 */
1574 mc->mc_data = NULL;
1575 mc->mc_flags = 0;
1576 mc->mc_complete = NULL;
1577 mc->mc_private = NULL;
1578 mc->mc_packet->generic.command_control = 0;
1579
1580 /*
1581 * By default, we set up to overwrite the command packet with sense
1582 * information.
1583 */
1584 mc->mc_packet->generic.sense_buffer_address =
1585 htole64(mc->mc_packetphys);
1586 mc->mc_packet->generic.maximum_sense_size =
1587 sizeof(union mly_cmd_packet);
1588
1589 s = splbio();
1590 SLIST_INSERT_HEAD(&mly->mly_ccb_free, mc, mc_link.slist);
1591 splx(s);
1592 }
1593
1594 /*
1595 * Allocate and initialize command and packet structures.
1596 *
1597 * If the controller supports fewer than MLY_MAX_CCBS commands, limit our
1598 * allocation to that number. If we don't yet know how many commands the
1599 * controller supports, allocate a very small set (suitable for initialization
1600 * purposes only).
1601 */
1602 static int
1603 mly_alloc_ccbs(struct mly_softc *mly)
1604 {
1605 struct mly_ccb *mc;
1606 int i, rv;
1607
1608 if (mly->mly_controllerinfo == NULL)
1609 mly->mly_ncmds = MLY_CCBS_RESV;
1610 else {
1611 i = le16toh(mly->mly_controllerinfo->maximum_parallel_commands);
1612 mly->mly_ncmds = min(MLY_MAX_CCBS, i);
1613 }
1614
1615 /*
1616 * Allocate enough space for all the command packets in one chunk
1617 * and map them permanently into controller-visible space.
1618 */
1619 rv = mly_dmamem_alloc(mly,
1620 mly->mly_ncmds * sizeof(union mly_cmd_packet),
1621 &mly->mly_pkt_dmamap, (void **)&mly->mly_pkt,
1622 &mly->mly_pkt_busaddr, &mly->mly_pkt_seg);
1623 if (rv)
1624 return (rv);
1625
1626 mly->mly_ccbs = malloc(sizeof(struct mly_ccb) * mly->mly_ncmds,
1627 M_DEVBUF, M_NOWAIT|M_ZERO);
1628
1629 for (i = 0; i < mly->mly_ncmds; i++) {
1630 mc = mly->mly_ccbs + i;
1631 mc->mc_slot = MLY_SLOT_START + i;
1632 mc->mc_packet = mly->mly_pkt + i;
1633 mc->mc_packetphys = mly->mly_pkt_busaddr +
1634 (i * sizeof(union mly_cmd_packet));
1635
1636 rv = bus_dmamap_create(mly->mly_dmat, MLY_MAX_XFER,
1637 MLY_MAX_SEGS, MLY_MAX_XFER, 0,
1638 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1639 &mc->mc_datamap);
1640 if (rv) {
1641 mly_release_ccbs(mly);
1642 return (rv);
1643 }
1644
1645 mly_ccb_free(mly, mc);
1646 }
1647
1648 return (0);
1649 }
1650
1651 /*
1652 * Free all the storage held by commands.
1653 *
1654 * Must be called with all commands on the free list.
1655 */
1656 static void
1657 mly_release_ccbs(struct mly_softc *mly)
1658 {
1659 struct mly_ccb *mc;
1660
1661 /* Throw away command buffer DMA maps. */
1662 while (mly_ccb_alloc(mly, &mc) == 0)
1663 bus_dmamap_destroy(mly->mly_dmat, mc->mc_datamap);
1664
1665 /* Release CCB storage. */
1666 free(mly->mly_ccbs, M_DEVBUF);
1667
1668 /* Release the packet storage. */
1669 mly_dmamem_free(mly, mly->mly_ncmds * sizeof(union mly_cmd_packet),
1670 mly->mly_pkt_dmamap, (void *)mly->mly_pkt, &mly->mly_pkt_seg);
1671 }
1672
1673 /*
1674 * Map a command into controller-visible space.
1675 */
1676 static int
1677 mly_ccb_map(struct mly_softc *mly, struct mly_ccb *mc)
1678 {
1679 struct mly_cmd_generic *gen;
1680 struct mly_sg_entry *sg;
1681 bus_dma_segment_t *ds;
1682 int flg, nseg, rv;
1683
1684 #ifdef DIAGNOSTIC
1685 /* Don't map more than once. */
1686 if ((mc->mc_flags & MLY_CCB_MAPPED) != 0)
1687 panic("mly_ccb_map: already mapped");
1688 mc->mc_flags |= MLY_CCB_MAPPED;
1689
1690 /* Does the command have a data buffer? */
1691 if (mc->mc_data == NULL)
1692 panic("mly_ccb_map: no data buffer");
1693 #endif
1694
1695 rv = bus_dmamap_load(mly->mly_dmat, mc->mc_datamap, mc->mc_data,
1696 mc->mc_length, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1697 ((mc->mc_flags & MLY_CCB_DATAIN) != 0 ?
1698 BUS_DMA_READ : BUS_DMA_WRITE));
1699 if (rv != 0)
1700 return (rv);
1701
1702 gen = &mc->mc_packet->generic;
1703
1704 /*
1705 * Can we use the transfer structure directly?
1706 */
1707 if ((nseg = mc->mc_datamap->dm_nsegs) <= 2) {
1708 mc->mc_sgoff = -1;
1709 sg = &gen->transfer.direct.sg[0];
1710 } else {
1711 mc->mc_sgoff = (mc->mc_slot - MLY_SLOT_START) *
1712 MLY_MAX_SEGS;
1713 sg = mly->mly_sg + mc->mc_sgoff;
1714 gen->command_control |= MLY_CMDCTL_EXTENDED_SG_TABLE;
1715 gen->transfer.indirect.entries[0] = htole16(nseg);
1716 gen->transfer.indirect.table_physaddr[0] =
1717 htole64(mly->mly_sg_busaddr +
1718 (mc->mc_sgoff * sizeof(struct mly_sg_entry)));
1719 }
1720
1721 /*
1722 * Fill the S/G table.
1723 */
1724 for (ds = mc->mc_datamap->dm_segs; nseg != 0; nseg--, sg++, ds++) {
1725 sg->physaddr = htole64(ds->ds_addr);
1726 sg->length = htole64(ds->ds_len);
1727 }
1728
1729 /*
1730 * Sync up the data map.
1731 */
1732 if ((mc->mc_flags & MLY_CCB_DATAIN) != 0)
1733 flg = BUS_DMASYNC_PREREAD;
1734 else /* if ((mc->mc_flags & MLY_CCB_DATAOUT) != 0) */ {
1735 gen->command_control |= MLY_CMDCTL_DATA_DIRECTION;
1736 flg = BUS_DMASYNC_PREWRITE;
1737 }
1738
1739 bus_dmamap_sync(mly->mly_dmat, mc->mc_datamap, 0, mc->mc_length, flg);
1740
1741 /*
1742 * Sync up the chained S/G table, if we're using one.
1743 */
1744 if (mc->mc_sgoff == -1)
1745 return (0);
1746
1747 bus_dmamap_sync(mly->mly_dmat, mly->mly_sg_dmamap, mc->mc_sgoff,
1748 MLY_SGL_SIZE, BUS_DMASYNC_PREWRITE);
1749
1750 return (0);
1751 }
1752
1753 /*
1754 * Unmap a command from controller-visible space.
1755 */
1756 static void
1757 mly_ccb_unmap(struct mly_softc *mly, struct mly_ccb *mc)
1758 {
1759 int flg;
1760
1761 #ifdef DIAGNOSTIC
1762 if ((mc->mc_flags & MLY_CCB_MAPPED) == 0)
1763 panic("mly_ccb_unmap: not mapped");
1764 mc->mc_flags &= ~MLY_CCB_MAPPED;
1765 #endif
1766
1767 if ((mc->mc_flags & MLY_CCB_DATAIN) != 0)
1768 flg = BUS_DMASYNC_POSTREAD;
1769 else /* if ((mc->mc_flags & MLY_CCB_DATAOUT) != 0) */
1770 flg = BUS_DMASYNC_POSTWRITE;
1771
1772 bus_dmamap_sync(mly->mly_dmat, mc->mc_datamap, 0, mc->mc_length, flg);
1773 bus_dmamap_unload(mly->mly_dmat, mc->mc_datamap);
1774
1775 if (mc->mc_sgoff == -1)
1776 return;
1777
1778 bus_dmamap_sync(mly->mly_dmat, mly->mly_sg_dmamap, mc->mc_sgoff,
1779 MLY_SGL_SIZE, BUS_DMASYNC_POSTWRITE);
1780 }
1781
1782 /*
1783 * Adjust the size of each I/O before it passes to the SCSI layer.
1784 */
1785 static void
1786 mly_scsipi_minphys(struct buf *bp)
1787 {
1788
1789 if (bp->b_bcount > MLY_MAX_XFER)
1790 bp->b_bcount = MLY_MAX_XFER;
1791 minphys(bp);
1792 }
1793
1794 /*
1795 * Start a SCSI command.
1796 */
1797 static void
1798 mly_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
1799 void *arg)
1800 {
1801 struct mly_ccb *mc;
1802 struct mly_cmd_scsi_small *ss;
1803 struct scsipi_xfer *xs;
1804 struct scsipi_periph *periph;
1805 struct mly_softc *mly;
1806 struct mly_btl *btl;
1807 int s, tmp;
1808
1809 mly = (void *)chan->chan_adapter->adapt_dev;
1810
1811 switch (req) {
1812 case ADAPTER_REQ_RUN_XFER:
1813 xs = arg;
1814 periph = xs->xs_periph;
1815 btl = &mly->mly_btl[chan->chan_channel][periph->periph_target];
1816 s = splbio();
1817 tmp = btl->mb_flags;
1818 splx(s);
1819
1820 /*
1821 * Check for I/O attempt to a protected or non-existant
1822 * device.
1823 */
1824 if ((tmp & MLY_BTL_PROTECTED) != 0) {
1825 xs->error = XS_SELTIMEOUT;
1826 scsipi_done(xs);
1827 break;
1828 }
1829
1830 #ifdef DIAGNOSTIC
1831 /* XXX Increase if/when we support large SCSI commands. */
1832 if (xs->cmdlen > MLY_CMD_SCSI_SMALL_CDB) {
1833 printf("%s: cmd too large\n", device_xname(&mly->mly_dv));
1834 xs->error = XS_DRIVER_STUFFUP;
1835 scsipi_done(xs);
1836 break;
1837 }
1838 #endif
1839
1840 if (mly_ccb_alloc(mly, &mc)) {
1841 xs->error = XS_RESOURCE_SHORTAGE;
1842 scsipi_done(xs);
1843 break;
1844 }
1845
1846 /* Build the command. */
1847 mc->mc_data = xs->data;
1848 mc->mc_length = xs->datalen;
1849 mc->mc_complete = mly_scsipi_complete;
1850 mc->mc_private = xs;
1851
1852 /* Build the packet for the controller. */
1853 ss = &mc->mc_packet->scsi_small;
1854 ss->opcode = MDACMD_SCSI;
1855 #ifdef notdef
1856 /*
1857 * XXX FreeBSD does this, but it doesn't fix anything,
1858 * XXX and appears potentially harmful.
1859 */
1860 ss->command_control |= MLY_CMDCTL_DISABLE_DISCONNECT;
1861 #endif
1862
1863 ss->data_size = htole32(xs->datalen);
1864 _lto3l(MLY_PHYADDR(0, chan->chan_channel,
1865 periph->periph_target, periph->periph_lun), ss->addr);
1866
1867 if (xs->timeout < 60 * 1000)
1868 ss->timeout = xs->timeout / 1000 |
1869 MLY_TIMEOUT_SECONDS;
1870 else if (xs->timeout < 60 * 60 * 1000)
1871 ss->timeout = xs->timeout / (60 * 1000) |
1872 MLY_TIMEOUT_MINUTES;
1873 else
1874 ss->timeout = xs->timeout / (60 * 60 * 1000) |
1875 MLY_TIMEOUT_HOURS;
1876
1877 ss->maximum_sense_size = sizeof(xs->sense);
1878 ss->cdb_length = xs->cmdlen;
1879 memcpy(ss->cdb, xs->cmd, xs->cmdlen);
1880
1881 if (mc->mc_length != 0) {
1882 if ((xs->xs_control & XS_CTL_DATA_OUT) != 0)
1883 mc->mc_flags |= MLY_CCB_DATAOUT;
1884 else /* if ((xs->xs_control & XS_CTL_DATA_IN) != 0) */
1885 mc->mc_flags |= MLY_CCB_DATAIN;
1886
1887 if (mly_ccb_map(mly, mc) != 0) {
1888 xs->error = XS_DRIVER_STUFFUP;
1889 mly_ccb_free(mly, mc);
1890 scsipi_done(xs);
1891 break;
1892 }
1893 }
1894
1895 /*
1896 * Give the command to the controller.
1897 */
1898 if ((xs->xs_control & XS_CTL_POLL) != 0) {
1899 if (mly_ccb_poll(mly, mc, xs->timeout + 5000)) {
1900 xs->error = XS_REQUEUE;
1901 if (mc->mc_length != 0)
1902 mly_ccb_unmap(mly, mc);
1903 mly_ccb_free(mly, mc);
1904 scsipi_done(xs);
1905 }
1906 } else
1907 mly_ccb_enqueue(mly, mc);
1908
1909 break;
1910
1911 case ADAPTER_REQ_GROW_RESOURCES:
1912 /*
1913 * Not supported.
1914 */
1915 break;
1916
1917 case ADAPTER_REQ_SET_XFER_MODE:
1918 /*
1919 * We can't change the transfer mode, but at least let
1920 * scsipi know what the adapter has negotiated.
1921 */
1922 mly_get_xfer_mode(mly, chan->chan_channel, arg);
1923 break;
1924 }
1925 }
1926
1927 /*
1928 * Handle completion of a SCSI command.
1929 */
1930 static void
1931 mly_scsipi_complete(struct mly_softc *mly, struct mly_ccb *mc)
1932 {
1933 struct scsipi_xfer *xs;
1934 struct scsipi_channel *chan;
1935 struct scsipi_inquiry_data *inq;
1936 struct mly_btl *btl;
1937 int target, sl, s;
1938 const char *p;
1939
1940 xs = mc->mc_private;
1941 xs->status = mc->mc_status;
1942
1943 /*
1944 * XXX The `resid' value as returned by the controller appears to be
1945 * bogus, so we always set it to zero. Is it perhaps the transfer
1946 * count?
1947 */
1948 xs->resid = 0; /* mc->mc_resid; */
1949
1950 if (mc->mc_length != 0)
1951 mly_ccb_unmap(mly, mc);
1952
1953 switch (mc->mc_status) {
1954 case SCSI_OK:
1955 /*
1956 * In order to report logical device type and status, we
1957 * overwrite the result of the INQUIRY command to logical
1958 * devices.
1959 */
1960 if (xs->cmd->opcode == INQUIRY) {
1961 chan = xs->xs_periph->periph_channel;
1962 target = xs->xs_periph->periph_target;
1963 btl = &mly->mly_btl[chan->chan_channel][target];
1964
1965 s = splbio();
1966 if ((btl->mb_flags & MLY_BTL_LOGICAL) != 0) {
1967 inq = (struct scsipi_inquiry_data *)xs->data;
1968 mly_padstr(inq->vendor, "MYLEX", 8);
1969 p = mly_describe_code(mly_table_device_type,
1970 btl->mb_type);
1971 mly_padstr(inq->product, p, 16);
1972 p = mly_describe_code(mly_table_device_state,
1973 btl->mb_state);
1974 mly_padstr(inq->revision, p, 4);
1975 }
1976 splx(s);
1977 }
1978
1979 xs->error = XS_NOERROR;
1980 break;
1981
1982 case SCSI_CHECK:
1983 sl = mc->mc_sense;
1984 if (sl > sizeof(xs->sense.scsi_sense))
1985 sl = sizeof(xs->sense.scsi_sense);
1986 memcpy(&xs->sense.scsi_sense, mc->mc_packet, sl);
1987 xs->error = XS_SENSE;
1988 break;
1989
1990 case SCSI_BUSY:
1991 case SCSI_QUEUE_FULL:
1992 xs->error = XS_BUSY;
1993 break;
1994
1995 default:
1996 printf("%s: unknown SCSI status 0x%x\n",
1997 device_xname(&mly->mly_dv), xs->status);
1998 xs->error = XS_DRIVER_STUFFUP;
1999 break;
2000 }
2001
2002 mly_ccb_free(mly, mc);
2003 scsipi_done(xs);
2004 }
2005
2006 /*
2007 * Notify scsipi about a target's transfer mode.
2008 */
2009 static void
2010 mly_get_xfer_mode(struct mly_softc *mly, int bus, struct scsipi_xfer_mode *xm)
2011 {
2012 struct mly_btl *btl;
2013 int s;
2014
2015 btl = &mly->mly_btl[bus][xm->xm_target];
2016 xm->xm_mode = 0;
2017
2018 s = splbio();
2019
2020 if ((btl->mb_flags & MLY_BTL_PHYSICAL) != 0) {
2021 if (btl->mb_speed == 0) {
2022 xm->xm_period = 0;
2023 xm->xm_offset = 0;
2024 } else {
2025 xm->xm_period = 12; /* XXX */
2026 xm->xm_offset = 8; /* XXX */
2027 xm->xm_mode |= PERIPH_CAP_SYNC; /* XXX */
2028 }
2029
2030 switch (btl->mb_width) {
2031 case 32:
2032 xm->xm_mode = PERIPH_CAP_WIDE32;
2033 break;
2034 case 16:
2035 xm->xm_mode = PERIPH_CAP_WIDE16;
2036 break;
2037 default:
2038 xm->xm_mode = 0;
2039 break;
2040 }
2041 } else /* ((btl->mb_flags & MLY_BTL_LOGICAL) != 0) */ {
2042 xm->xm_mode = PERIPH_CAP_WIDE16 | PERIPH_CAP_SYNC;
2043 xm->xm_period = 12;
2044 xm->xm_offset = 8;
2045 }
2046
2047 if ((btl->mb_flags & MLY_BTL_TQING) != 0)
2048 xm->xm_mode |= PERIPH_CAP_TQING;
2049
2050 splx(s);
2051
2052 scsipi_async_event(&mly->mly_chans[bus], ASYNC_EVENT_XFER_MODE, xm);
2053 }
2054
2055 /*
2056 * ioctl hook; used here only to initiate low-level rescans.
2057 */
2058 static int
2059 mly_scsipi_ioctl(struct scsipi_channel *chan, u_long cmd, void *data,
2060 int flag, struct proc *p)
2061 {
2062 struct mly_softc *mly;
2063 int rv;
2064
2065 mly = (struct mly_softc *)chan->chan_adapter->adapt_dev;
2066
2067 switch (cmd) {
2068 case SCBUSIOLLSCAN:
2069 mly_scan_channel(mly, chan->chan_channel);
2070 rv = 0;
2071 break;
2072 default:
2073 rv = ENOTTY;
2074 break;
2075 }
2076
2077 return (rv);
2078 }
2079
2080 /*
2081 * Handshake with the firmware while the card is being initialized.
2082 */
2083 static int
2084 mly_fwhandshake(struct mly_softc *mly)
2085 {
2086 u_int8_t error, param0, param1;
2087 int spinup;
2088
2089 spinup = 0;
2090
2091 /* Set HM_STSACK and let the firmware initialize. */
2092 mly_outb(mly, mly->mly_idbr, MLY_HM_STSACK);
2093 DELAY(1000); /* too short? */
2094
2095 /* If HM_STSACK is still true, the controller is initializing. */
2096 if (!mly_idbr_true(mly, MLY_HM_STSACK))
2097 return (0);
2098
2099 printf("%s: controller initialization started\n",
2100 device_xname(&mly->mly_dv));
2101
2102 /*
2103 * Spin waiting for initialization to finish, or for a message to be
2104 * delivered.
2105 */
2106 while (mly_idbr_true(mly, MLY_HM_STSACK)) {
2107 /* Check for a message */
2108 if (!mly_error_valid(mly))
2109 continue;
2110
2111 error = mly_inb(mly, mly->mly_error_status) & ~MLY_MSG_EMPTY;
2112 param0 = mly_inb(mly, mly->mly_cmd_mailbox);
2113 param1 = mly_inb(mly, mly->mly_cmd_mailbox + 1);
2114
2115 switch (error) {
2116 case MLY_MSG_SPINUP:
2117 if (!spinup) {
2118 printf("%s: drive spinup in progress\n",
2119 device_xname(&mly->mly_dv));
2120 spinup = 1;
2121 }
2122 break;
2123
2124 case MLY_MSG_RACE_RECOVERY_FAIL:
2125 printf("%s: mirror race recovery failed - \n",
2126 device_xname(&mly->mly_dv));
2127 printf("%s: one or more drives offline\n",
2128 device_xname(&mly->mly_dv));
2129 break;
2130
2131 case MLY_MSG_RACE_IN_PROGRESS:
2132 printf("%s: mirror race recovery in progress\n",
2133 device_xname(&mly->mly_dv));
2134 break;
2135
2136 case MLY_MSG_RACE_ON_CRITICAL:
2137 printf("%s: mirror race recovery on critical drive\n",
2138 device_xname(&mly->mly_dv));
2139 break;
2140
2141 case MLY_MSG_PARITY_ERROR:
2142 printf("%s: FATAL MEMORY PARITY ERROR\n",
2143 device_xname(&mly->mly_dv));
2144 return (ENXIO);
2145
2146 default:
2147 printf("%s: unknown initialization code 0x%x\n",
2148 device_xname(&mly->mly_dv), error);
2149 break;
2150 }
2151 }
2152
2153 return (0);
2154 }
2155
2156 /*
2157 * Space-fill a character string
2158 */
2159 static void
2160 mly_padstr(char *dst, const char *src, int len)
2161 {
2162
2163 while (len-- > 0) {
2164 if (*src != '\0')
2165 *dst++ = *src++;
2166 else
2167 *dst++ = ' ';
2168 }
2169 }
2170
2171 /*
2172 * Allocate DMA safe memory.
2173 */
2174 static int
2175 mly_dmamem_alloc(struct mly_softc *mly, int size, bus_dmamap_t *dmamap,
2176 void **kva, bus_addr_t *paddr, bus_dma_segment_t *seg)
2177 {
2178 int rseg, rv, state;
2179
2180 state = 0;
2181
2182 if ((rv = bus_dmamem_alloc(mly->mly_dmat, size, PAGE_SIZE, 0,
2183 seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
2184 aprint_error_dev(&mly->mly_dv, "dmamem_alloc = %d\n", rv);
2185 goto bad;
2186 }
2187
2188 state++;
2189
2190 if ((rv = bus_dmamem_map(mly->mly_dmat, seg, 1, size, kva,
2191 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
2192 aprint_error_dev(&mly->mly_dv, "dmamem_map = %d\n", rv);
2193 goto bad;
2194 }
2195
2196 state++;
2197
2198 if ((rv = bus_dmamap_create(mly->mly_dmat, size, size, 1, 0,
2199 BUS_DMA_NOWAIT, dmamap)) != 0) {
2200 aprint_error_dev(&mly->mly_dv, "dmamap_create = %d\n", rv);
2201 goto bad;
2202 }
2203
2204 state++;
2205
2206 if ((rv = bus_dmamap_load(mly->mly_dmat, *dmamap, *kva, size,
2207 NULL, BUS_DMA_NOWAIT)) != 0) {
2208 aprint_error_dev(&mly->mly_dv, "dmamap_load = %d\n", rv);
2209 goto bad;
2210 }
2211
2212 *paddr = (*dmamap)->dm_segs[0].ds_addr;
2213 memset(*kva, 0, size);
2214 return (0);
2215
2216 bad:
2217 if (state > 2)
2218 bus_dmamap_destroy(mly->mly_dmat, *dmamap);
2219 if (state > 1)
2220 bus_dmamem_unmap(mly->mly_dmat, *kva, size);
2221 if (state > 0)
2222 bus_dmamem_free(mly->mly_dmat, seg, 1);
2223
2224 return (rv);
2225 }
2226
2227 /*
2228 * Free DMA safe memory.
2229 */
2230 static void
2231 mly_dmamem_free(struct mly_softc *mly, int size, bus_dmamap_t dmamap,
2232 void *kva, bus_dma_segment_t *seg)
2233 {
2234
2235 bus_dmamap_unload(mly->mly_dmat, dmamap);
2236 bus_dmamap_destroy(mly->mly_dmat, dmamap);
2237 bus_dmamem_unmap(mly->mly_dmat, kva, size);
2238 bus_dmamem_free(mly->mly_dmat, seg, 1);
2239 }
2240
2241
2242 /*
2243 * Accept an open operation on the control device.
2244 */
2245 int
2246 mlyopen(dev_t dev, int flag, int mode, struct lwp *l)
2247 {
2248 struct mly_softc *mly;
2249
2250 if ((mly = device_lookup(&mly_cd, minor(dev))) == NULL)
2251 return (ENXIO);
2252 if ((mly->mly_state & MLY_STATE_INITOK) == 0)
2253 return (ENXIO);
2254 if ((mly->mly_state & MLY_STATE_OPEN) != 0)
2255 return (EBUSY);
2256
2257 mly->mly_state |= MLY_STATE_OPEN;
2258 return (0);
2259 }
2260
2261 /*
2262 * Accept the last close on the control device.
2263 */
2264 int
2265 mlyclose(dev_t dev, int flag, int mode,
2266 struct lwp *l)
2267 {
2268 struct mly_softc *mly;
2269
2270 mly = device_lookup(&mly_cd, minor(dev));
2271 mly->mly_state &= ~MLY_STATE_OPEN;
2272 return (0);
2273 }
2274
2275 /*
2276 * Handle control operations.
2277 */
2278 int
2279 mlyioctl(dev_t dev, u_long cmd, void *data, int flag,
2280 struct lwp *l)
2281 {
2282 struct mly_softc *mly;
2283 int rv;
2284
2285 mly = device_lookup(&mly_cd, minor(dev));
2286
2287 switch (cmd) {
2288 case MLYIO_COMMAND:
2289 rv = kauth_authorize_device_passthru(l->l_cred, dev,
2290 KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_ALL, data);
2291 if (rv)
2292 break;
2293
2294 rv = mly_user_command(mly, (void *)data);
2295 break;
2296 case MLYIO_HEALTH:
2297 rv = mly_user_health(mly, (void *)data);
2298 break;
2299 default:
2300 rv = ENOTTY;
2301 break;
2302 }
2303
2304 return (rv);
2305 }
2306
2307 /*
2308 * Execute a command passed in from userspace.
2309 *
2310 * The control structure contains the actual command for the controller, as
2311 * well as the user-space data pointer and data size, and an optional sense
2312 * buffer size/pointer. On completion, the data size is adjusted to the
2313 * command residual, and the sense buffer size to the size of the returned
2314 * sense data.
2315 */
2316 static int
2317 mly_user_command(struct mly_softc *mly, struct mly_user_command *uc)
2318 {
2319 struct mly_ccb *mc;
2320 int rv, mapped;
2321
2322 if ((rv = mly_ccb_alloc(mly, &mc)) != 0)
2323 return (rv);
2324
2325 mapped = 0;
2326 mc->mc_data = NULL;
2327
2328 /*
2329 * Handle data size/direction.
2330 */
2331 if ((mc->mc_length = abs(uc->DataTransferLength)) != 0) {
2332 if (mc->mc_length > MAXPHYS) {
2333 rv = EINVAL;
2334 goto out;
2335 }
2336
2337 mc->mc_data = malloc(mc->mc_length, M_DEVBUF, M_WAITOK);
2338 if (mc->mc_data == NULL) {
2339 rv = ENOMEM;
2340 goto out;
2341 }
2342
2343 if (uc->DataTransferLength > 0) {
2344 mc->mc_flags |= MLY_CCB_DATAIN;
2345 memset(mc->mc_data, 0, mc->mc_length);
2346 }
2347
2348 if (uc->DataTransferLength < 0) {
2349 mc->mc_flags |= MLY_CCB_DATAOUT;
2350 rv = copyin(uc->DataTransferBuffer, mc->mc_data,
2351 mc->mc_length);
2352 if (rv != 0)
2353 goto out;
2354 }
2355
2356 if ((rv = mly_ccb_map(mly, mc)) != 0)
2357 goto out;
2358 mapped = 1;
2359 }
2360
2361 /* Copy in the command and execute it. */
2362 memcpy(mc->mc_packet, &uc->CommandMailbox, sizeof(uc->CommandMailbox));
2363
2364 if ((rv = mly_ccb_wait(mly, mc, 60000)) != 0)
2365 goto out;
2366
2367 /* Return the data to userspace. */
2368 if (uc->DataTransferLength > 0) {
2369 rv = copyout(mc->mc_data, uc->DataTransferBuffer,
2370 mc->mc_length);
2371 if (rv != 0)
2372 goto out;
2373 }
2374
2375 /* Return the sense buffer to userspace. */
2376 if (uc->RequestSenseLength > 0 && mc->mc_sense > 0) {
2377 rv = copyout(mc->mc_packet, uc->RequestSenseBuffer,
2378 min(uc->RequestSenseLength, mc->mc_sense));
2379 if (rv != 0)
2380 goto out;
2381 }
2382
2383 /* Return command results to userspace (caller will copy out). */
2384 uc->DataTransferLength = mc->mc_resid;
2385 uc->RequestSenseLength = min(uc->RequestSenseLength, mc->mc_sense);
2386 uc->CommandStatus = mc->mc_status;
2387 rv = 0;
2388
2389 out:
2390 if (mapped)
2391 mly_ccb_unmap(mly, mc);
2392 if (mc->mc_data != NULL)
2393 free(mc->mc_data, M_DEVBUF);
2394 mly_ccb_free(mly, mc);
2395
2396 return (rv);
2397 }
2398
2399 /*
2400 * Return health status to userspace. If the health change index in the
2401 * user structure does not match that currently exported by the controller,
2402 * we return the current status immediately. Otherwise, we block until
2403 * either interrupted or new status is delivered.
2404 */
2405 static int
2406 mly_user_health(struct mly_softc *mly, struct mly_user_health *uh)
2407 {
2408 struct mly_health_status mh;
2409 int rv, s;
2410
2411 /* Fetch the current health status from userspace. */
2412 rv = copyin(uh->HealthStatusBuffer, &mh, sizeof(mh));
2413 if (rv != 0)
2414 return (rv);
2415
2416 /* spin waiting for a status update */
2417 s = splbio();
2418 if (mly->mly_event_change == mh.change_counter)
2419 rv = tsleep(&mly->mly_event_change, PRIBIO | PCATCH,
2420 "mlyhealth", 0);
2421 splx(s);
2422
2423 if (rv == 0) {
2424 /*
2425 * Copy the controller's health status buffer out (there is
2426 * a race here if it changes again).
2427 */
2428 rv = copyout(&mly->mly_mmbox->mmm_health.status,
2429 uh->HealthStatusBuffer, sizeof(uh->HealthStatusBuffer));
2430 }
2431
2432 return (rv);
2433 }
2434