mly.c revision 1.48.2.1 1 /* $NetBSD: mly.c,v 1.48.2.1 2014/08/10 06:54:54 tls Exp $ */
2
3 /*-
4 * Copyright (c) 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran, Thor Lancelot Simon, and Eric Haszlakiewicz.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*-
33 * Copyright (c) 2000, 2001 Michael Smith
34 * Copyright (c) 2000 BSDi
35 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 *
46 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
47 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
50 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
51 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
52 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
53 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
54 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
55 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
56 * SUCH DAMAGE.
57 *
58 * from FreeBSD: mly.c,v 1.8 2001/07/14 00:12:22 msmith Exp
59 */
60
61 /*
62 * Driver for the Mylex AcceleRAID and eXtremeRAID family with v6 firmware.
63 *
64 * TODO:
65 *
66 * o Make mly->mly_btl a hash, then MLY_BTL_RESCAN becomes a SIMPLEQ.
67 * o Handle FC and multiple LUNs.
68 * o Fix mmbox usage.
69 * o Fix transfer speed fudge.
70 */
71
72 #include <sys/cdefs.h>
73 __KERNEL_RCSID(0, "$NetBSD: mly.c,v 1.48.2.1 2014/08/10 06:54:54 tls Exp $");
74
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/device.h>
78 #include <sys/kernel.h>
79 #include <sys/queue.h>
80 #include <sys/buf.h>
81 #include <sys/endian.h>
82 #include <sys/conf.h>
83 #include <sys/malloc.h>
84 #include <sys/ioctl.h>
85 #include <sys/scsiio.h>
86 #include <sys/kthread.h>
87 #include <sys/kauth.h>
88
89 #include <sys/bus.h>
90
91 #include <dev/scsipi/scsi_all.h>
92 #include <dev/scsipi/scsipi_all.h>
93 #include <dev/scsipi/scsiconf.h>
94
95 #include <dev/pci/pcireg.h>
96 #include <dev/pci/pcivar.h>
97 #include <dev/pci/pcidevs.h>
98
99 #include <dev/pci/mlyreg.h>
100 #include <dev/pci/mlyio.h>
101 #include <dev/pci/mlyvar.h>
102 #include <dev/pci/mly_tables.h>
103
104 static void mly_attach(device_t, device_t, void *);
105 static int mly_match(device_t, cfdata_t, void *);
106 static const struct mly_ident *mly_find_ident(struct pci_attach_args *);
107 static int mly_fwhandshake(struct mly_softc *);
108 static int mly_flush(struct mly_softc *);
109 static int mly_intr(void *);
110 static void mly_shutdown(void *);
111
112 static int mly_alloc_ccbs(struct mly_softc *);
113 static void mly_check_event(struct mly_softc *);
114 static void mly_complete_event(struct mly_softc *, struct mly_ccb *);
115 static void mly_complete_rescan(struct mly_softc *, struct mly_ccb *);
116 static int mly_dmamem_alloc(struct mly_softc *, int, bus_dmamap_t *,
117 void **, bus_addr_t *, bus_dma_segment_t *);
118 static void mly_dmamem_free(struct mly_softc *, int, bus_dmamap_t,
119 void *, bus_dma_segment_t *);
120 static int mly_enable_mmbox(struct mly_softc *);
121 static void mly_fetch_event(struct mly_softc *);
122 static int mly_get_controllerinfo(struct mly_softc *);
123 static int mly_get_eventstatus(struct mly_softc *);
124 static int mly_ioctl(struct mly_softc *, struct mly_cmd_ioctl *,
125 void **, size_t, void *, size_t *);
126 static void mly_padstr(char *, const char *, int);
127 static void mly_process_event(struct mly_softc *, struct mly_event *);
128 static void mly_release_ccbs(struct mly_softc *);
129 static int mly_scan_btl(struct mly_softc *, int, int);
130 static void mly_scan_channel(struct mly_softc *, int);
131 static void mly_thread(void *);
132
133 static int mly_ccb_alloc(struct mly_softc *, struct mly_ccb **);
134 static void mly_ccb_complete(struct mly_softc *, struct mly_ccb *);
135 static void mly_ccb_enqueue(struct mly_softc *, struct mly_ccb *);
136 static void mly_ccb_free(struct mly_softc *, struct mly_ccb *);
137 static int mly_ccb_map(struct mly_softc *, struct mly_ccb *);
138 static int mly_ccb_poll(struct mly_softc *, struct mly_ccb *, int);
139 static int mly_ccb_submit(struct mly_softc *, struct mly_ccb *);
140 static void mly_ccb_unmap(struct mly_softc *, struct mly_ccb *);
141 static int mly_ccb_wait(struct mly_softc *, struct mly_ccb *, int);
142
143 static void mly_get_xfer_mode(struct mly_softc *, int,
144 struct scsipi_xfer_mode *);
145 static void mly_scsipi_complete(struct mly_softc *, struct mly_ccb *);
146 static int mly_scsipi_ioctl(struct scsipi_channel *, u_long, void *,
147 int, struct proc *);
148 static void mly_scsipi_minphys(struct buf *);
149 static void mly_scsipi_request(struct scsipi_channel *,
150 scsipi_adapter_req_t, void *);
151
152 static int mly_user_command(struct mly_softc *, struct mly_user_command *);
153 static int mly_user_health(struct mly_softc *, struct mly_user_health *);
154
155 extern struct cfdriver mly_cd;
156
157 CFATTACH_DECL_NEW(mly, sizeof(struct mly_softc),
158 mly_match, mly_attach, NULL, NULL);
159
160 dev_type_open(mlyopen);
161 dev_type_close(mlyclose);
162 dev_type_ioctl(mlyioctl);
163
164 const struct cdevsw mly_cdevsw = {
165 .d_open = mlyopen,
166 .d_close = mlyclose,
167 .d_read = noread,
168 .d_write = nowrite,
169 .d_ioctl = mlyioctl,
170 .d_stop = nostop,
171 .d_tty = notty,
172 .d_poll = nopoll,
173 .d_mmap = nommap,
174 .d_kqfilter = nokqfilter,
175 .d_discard = nodiscard,
176 .d_flag = D_OTHER
177 };
178
179 static struct mly_ident {
180 u_short vendor;
181 u_short product;
182 u_short subvendor;
183 u_short subproduct;
184 int hwif;
185 const char *desc;
186 } const mly_ident[] = {
187 {
188 PCI_VENDOR_MYLEX,
189 PCI_PRODUCT_MYLEX_EXTREMERAID,
190 PCI_VENDOR_MYLEX,
191 0x0040,
192 MLY_HWIF_STRONGARM,
193 "eXtremeRAID 2000"
194 },
195 {
196 PCI_VENDOR_MYLEX,
197 PCI_PRODUCT_MYLEX_EXTREMERAID,
198 PCI_VENDOR_MYLEX,
199 0x0030,
200 MLY_HWIF_STRONGARM,
201 "eXtremeRAID 3000"
202 },
203 {
204 PCI_VENDOR_MYLEX,
205 PCI_PRODUCT_MYLEX_ACCELERAID,
206 PCI_VENDOR_MYLEX,
207 0x0050,
208 MLY_HWIF_I960RX,
209 "AcceleRAID 352"
210 },
211 {
212 PCI_VENDOR_MYLEX,
213 PCI_PRODUCT_MYLEX_ACCELERAID,
214 PCI_VENDOR_MYLEX,
215 0x0052,
216 MLY_HWIF_I960RX,
217 "AcceleRAID 170"
218 },
219 {
220 PCI_VENDOR_MYLEX,
221 PCI_PRODUCT_MYLEX_ACCELERAID,
222 PCI_VENDOR_MYLEX,
223 0x0054,
224 MLY_HWIF_I960RX,
225 "AcceleRAID 160"
226 },
227 };
228
229 static void *mly_sdh;
230
231 /*
232 * Try to find a `mly_ident' entry corresponding to this board.
233 */
234 static const struct mly_ident *
235 mly_find_ident(struct pci_attach_args *pa)
236 {
237 const struct mly_ident *mpi, *maxmpi;
238 pcireg_t reg;
239
240 mpi = mly_ident;
241 maxmpi = mpi + sizeof(mly_ident) / sizeof(mly_ident[0]);
242
243 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_I2O)
244 return (NULL);
245
246 for (; mpi < maxmpi; mpi++) {
247 if (PCI_VENDOR(pa->pa_id) != mpi->vendor ||
248 PCI_PRODUCT(pa->pa_id) != mpi->product)
249 continue;
250
251 if (mpi->subvendor == 0x0000)
252 return (mpi);
253
254 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
255
256 if (PCI_VENDOR(reg) == mpi->subvendor &&
257 PCI_PRODUCT(reg) == mpi->subproduct)
258 return (mpi);
259 }
260
261 return (NULL);
262 }
263
264 /*
265 * Match a supported board.
266 */
267 static int
268 mly_match(device_t parent, cfdata_t cfdata, void *aux)
269 {
270
271 return (mly_find_ident(aux) != NULL);
272 }
273
274 /*
275 * Attach a supported board.
276 */
277 static void
278 mly_attach(device_t parent, device_t self, void *aux)
279 {
280 struct pci_attach_args *pa;
281 struct mly_softc *mly;
282 struct mly_ioctl_getcontrollerinfo *mi;
283 const struct mly_ident *ident;
284 pci_chipset_tag_t pc;
285 pci_intr_handle_t ih;
286 bus_space_handle_t memh, ioh;
287 bus_space_tag_t memt, iot;
288 pcireg_t reg;
289 const char *intrstr;
290 int ior, memr, i, rv, state;
291 struct scsipi_adapter *adapt;
292 struct scsipi_channel *chan;
293 char intrbuf[PCI_INTRSTR_LEN];
294
295 mly = device_private(self);
296 mly->mly_dv = self;
297 pa = aux;
298 pc = pa->pa_pc;
299 ident = mly_find_ident(pa);
300 state = 0;
301
302 mly->mly_dmat = pa->pa_dmat;
303 mly->mly_hwif = ident->hwif;
304
305 printf(": Mylex %s\n", ident->desc);
306
307 /*
308 * Map the PCI register window.
309 */
310 memr = -1;
311 ior = -1;
312
313 for (i = 0x10; i <= 0x14; i += 4) {
314 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, i);
315
316 if (PCI_MAPREG_TYPE(reg) == PCI_MAPREG_TYPE_IO) {
317 if (ior == -1 && PCI_MAPREG_IO_SIZE(reg) != 0)
318 ior = i;
319 } else {
320 if (memr == -1 && PCI_MAPREG_MEM_SIZE(reg) != 0)
321 memr = i;
322 }
323 }
324
325 if (memr != -1)
326 if (pci_mapreg_map(pa, memr, PCI_MAPREG_TYPE_MEM, 0,
327 &memt, &memh, NULL, NULL))
328 memr = -1;
329 if (ior != -1)
330 if (pci_mapreg_map(pa, ior, PCI_MAPREG_TYPE_IO, 0,
331 &iot, &ioh, NULL, NULL))
332 ior = -1;
333
334 if (memr != -1) {
335 mly->mly_iot = memt;
336 mly->mly_ioh = memh;
337 } else if (ior != -1) {
338 mly->mly_iot = iot;
339 mly->mly_ioh = ioh;
340 } else {
341 aprint_error_dev(self, "can't map i/o or memory space\n");
342 return;
343 }
344
345 /*
346 * Enable the device.
347 */
348 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
349 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
350 reg | PCI_COMMAND_MASTER_ENABLE);
351
352 /*
353 * Map and establish the interrupt.
354 */
355 if (pci_intr_map(pa, &ih)) {
356 aprint_error_dev(self, "can't map interrupt\n");
357 return;
358 }
359 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
360 mly->mly_ih = pci_intr_establish(pc, ih, IPL_BIO, mly_intr, mly);
361 if (mly->mly_ih == NULL) {
362 aprint_error_dev(self, "can't establish interrupt");
363 if (intrstr != NULL)
364 aprint_error(" at %s", intrstr);
365 aprint_error("\n");
366 return;
367 }
368
369 if (intrstr != NULL)
370 aprint_normal_dev(self, "interrupting at %s\n",
371 intrstr);
372
373 /*
374 * Take care of interface-specific tasks.
375 */
376 switch (mly->mly_hwif) {
377 case MLY_HWIF_I960RX:
378 mly->mly_doorbell_true = 0x00;
379 mly->mly_cmd_mailbox = MLY_I960RX_COMMAND_MAILBOX;
380 mly->mly_status_mailbox = MLY_I960RX_STATUS_MAILBOX;
381 mly->mly_idbr = MLY_I960RX_IDBR;
382 mly->mly_odbr = MLY_I960RX_ODBR;
383 mly->mly_error_status = MLY_I960RX_ERROR_STATUS;
384 mly->mly_interrupt_status = MLY_I960RX_INTERRUPT_STATUS;
385 mly->mly_interrupt_mask = MLY_I960RX_INTERRUPT_MASK;
386 break;
387
388 case MLY_HWIF_STRONGARM:
389 mly->mly_doorbell_true = 0xff;
390 mly->mly_cmd_mailbox = MLY_STRONGARM_COMMAND_MAILBOX;
391 mly->mly_status_mailbox = MLY_STRONGARM_STATUS_MAILBOX;
392 mly->mly_idbr = MLY_STRONGARM_IDBR;
393 mly->mly_odbr = MLY_STRONGARM_ODBR;
394 mly->mly_error_status = MLY_STRONGARM_ERROR_STATUS;
395 mly->mly_interrupt_status = MLY_STRONGARM_INTERRUPT_STATUS;
396 mly->mly_interrupt_mask = MLY_STRONGARM_INTERRUPT_MASK;
397 break;
398 }
399
400 /*
401 * Allocate and map the scatter/gather lists.
402 */
403 rv = mly_dmamem_alloc(mly, MLY_SGL_SIZE * MLY_MAX_CCBS,
404 &mly->mly_sg_dmamap, (void **)&mly->mly_sg,
405 &mly->mly_sg_busaddr, &mly->mly_sg_seg);
406 if (rv) {
407 printf("%s: unable to allocate S/G maps\n",
408 device_xname(self));
409 goto bad;
410 }
411 state++;
412
413 /*
414 * Allocate and map the memory mailbox.
415 */
416 rv = mly_dmamem_alloc(mly, sizeof(struct mly_mmbox),
417 &mly->mly_mmbox_dmamap, (void **)&mly->mly_mmbox,
418 &mly->mly_mmbox_busaddr, &mly->mly_mmbox_seg);
419 if (rv) {
420 aprint_error_dev(self, "unable to allocate mailboxes\n");
421 goto bad;
422 }
423 state++;
424
425 /*
426 * Initialise per-controller queues.
427 */
428 SLIST_INIT(&mly->mly_ccb_free);
429 SIMPLEQ_INIT(&mly->mly_ccb_queue);
430
431 /*
432 * Disable interrupts before we start talking to the controller.
433 */
434 mly_outb(mly, mly->mly_interrupt_mask, MLY_INTERRUPT_MASK_DISABLE);
435
436 /*
437 * Wait for the controller to come ready, handshaking with the
438 * firmware if required. This is typically only necessary on
439 * platforms where the controller BIOS does not run.
440 */
441 if (mly_fwhandshake(mly)) {
442 aprint_error_dev(self, "unable to bring controller online\n");
443 goto bad;
444 }
445
446 /*
447 * Allocate initial command buffers, obtain controller feature
448 * information, and then reallocate command buffers, since we'll
449 * know how many we want.
450 */
451 if (mly_alloc_ccbs(mly)) {
452 aprint_error_dev(self, "unable to allocate CCBs\n");
453 goto bad;
454 }
455 state++;
456 if (mly_get_controllerinfo(mly)) {
457 aprint_error_dev(self, "unable to retrieve controller info\n");
458 goto bad;
459 }
460 mly_release_ccbs(mly);
461 if (mly_alloc_ccbs(mly)) {
462 aprint_error_dev(self, "unable to allocate CCBs\n");
463 state--;
464 goto bad;
465 }
466
467 /*
468 * Get the current event counter for health purposes, populate the
469 * initial health status buffer.
470 */
471 if (mly_get_eventstatus(mly)) {
472 aprint_error_dev(self, "unable to retrieve event status\n");
473 goto bad;
474 }
475
476 /*
477 * Enable memory-mailbox mode.
478 */
479 if (mly_enable_mmbox(mly)) {
480 aprint_error_dev(self, "unable to enable memory mailbox\n");
481 goto bad;
482 }
483
484 /*
485 * Print a little information about the controller.
486 */
487 mi = mly->mly_controllerinfo;
488
489 printf("%s: %d physical channel%s, firmware %d.%02d-%d-%02d "
490 "(%02d%02d%02d%02d), %dMB RAM\n", device_xname(self),
491 mi->physical_channels_present,
492 (mi->physical_channels_present) > 1 ? "s" : "",
493 mi->fw_major, mi->fw_minor, mi->fw_turn, mi->fw_build,
494 mi->fw_century, mi->fw_year, mi->fw_month, mi->fw_day,
495 le16toh(mi->memory_size));
496
497 /*
498 * Register our `shutdownhook'.
499 */
500 if (mly_sdh == NULL)
501 shutdownhook_establish(mly_shutdown, NULL);
502
503 /*
504 * Clear any previous BTL information. For each bus that scsipi
505 * wants to scan, we'll receive the SCBUSIOLLSCAN ioctl and retrieve
506 * all BTL info at that point.
507 */
508 memset(&mly->mly_btl, 0, sizeof(mly->mly_btl));
509
510 mly->mly_nchans = mly->mly_controllerinfo->physical_channels_present +
511 mly->mly_controllerinfo->virtual_channels_present;
512
513 /*
514 * Attach to scsipi.
515 */
516 adapt = &mly->mly_adapt;
517 memset(adapt, 0, sizeof(*adapt));
518 adapt->adapt_dev = self;
519 adapt->adapt_nchannels = mly->mly_nchans;
520 adapt->adapt_openings = mly->mly_ncmds - MLY_CCBS_RESV;
521 adapt->adapt_max_periph = mly->mly_ncmds - MLY_CCBS_RESV;
522 adapt->adapt_request = mly_scsipi_request;
523 adapt->adapt_minphys = mly_scsipi_minphys;
524 adapt->adapt_ioctl = mly_scsipi_ioctl;
525
526 for (i = 0; i < mly->mly_nchans; i++) {
527 chan = &mly->mly_chans[i];
528 memset(chan, 0, sizeof(*chan));
529 chan->chan_adapter = adapt;
530 chan->chan_bustype = &scsi_bustype;
531 chan->chan_channel = i;
532 chan->chan_ntargets = MLY_MAX_TARGETS;
533 chan->chan_nluns = MLY_MAX_LUNS;
534 chan->chan_id = mly->mly_controllerparam->initiator_id;
535 chan->chan_flags = SCSIPI_CHAN_NOSETTLE;
536 config_found(self, chan, scsiprint);
537 }
538
539 /*
540 * Now enable interrupts...
541 */
542 mly_outb(mly, mly->mly_interrupt_mask, MLY_INTERRUPT_MASK_ENABLE);
543
544 /*
545 * Finally, create our monitoring thread.
546 */
547 mly->mly_state |= MLY_STATE_INITOK;
548 rv = kthread_create(PRI_NONE, 0, NULL, mly_thread, mly,
549 &mly->mly_thread, "%s", device_xname(self));
550 if (rv != 0)
551 aprint_error_dev(self, "unable to create thread (%d)\n",
552 rv);
553 return;
554
555 bad:
556 if (state > 2)
557 mly_release_ccbs(mly);
558 if (state > 1)
559 mly_dmamem_free(mly, sizeof(struct mly_mmbox),
560 mly->mly_mmbox_dmamap, (void *)mly->mly_mmbox,
561 &mly->mly_mmbox_seg);
562 if (state > 0)
563 mly_dmamem_free(mly, MLY_SGL_SIZE * MLY_MAX_CCBS,
564 mly->mly_sg_dmamap, (void *)mly->mly_sg,
565 &mly->mly_sg_seg);
566 }
567
568 /*
569 * Scan all possible devices on the specified channel.
570 */
571 static void
572 mly_scan_channel(struct mly_softc *mly, int bus)
573 {
574 int s, target;
575
576 for (target = 0; target < MLY_MAX_TARGETS; target++) {
577 s = splbio();
578 if (!mly_scan_btl(mly, bus, target)) {
579 tsleep(&mly->mly_btl[bus][target], PRIBIO, "mlyscan",
580 0);
581 }
582 splx(s);
583 }
584 }
585
586 /*
587 * Shut down all configured `mly' devices.
588 */
589 static void
590 mly_shutdown(void *cookie)
591 {
592 struct mly_softc *mly;
593 int i;
594
595 for (i = 0; i < mly_cd.cd_ndevs; i++) {
596 if ((mly = device_lookup_private(&mly_cd, i)) == NULL)
597 continue;
598
599 if (mly_flush(mly))
600 aprint_error_dev(mly->mly_dv, "unable to flush cache\n");
601 }
602 }
603
604 /*
605 * Fill in the mly_controllerinfo and mly_controllerparam fields in the
606 * softc.
607 */
608 static int
609 mly_get_controllerinfo(struct mly_softc *mly)
610 {
611 struct mly_cmd_ioctl mci;
612 int rv;
613
614 /*
615 * Build the getcontrollerinfo ioctl and send it.
616 */
617 memset(&mci, 0, sizeof(mci));
618 mci.sub_ioctl = MDACIOCTL_GETCONTROLLERINFO;
619 rv = mly_ioctl(mly, &mci, (void **)&mly->mly_controllerinfo,
620 sizeof(*mly->mly_controllerinfo), NULL, NULL);
621 if (rv != 0)
622 return (rv);
623
624 /*
625 * Build the getcontrollerparameter ioctl and send it.
626 */
627 memset(&mci, 0, sizeof(mci));
628 mci.sub_ioctl = MDACIOCTL_GETCONTROLLERPARAMETER;
629 rv = mly_ioctl(mly, &mci, (void **)&mly->mly_controllerparam,
630 sizeof(*mly->mly_controllerparam), NULL, NULL);
631
632 return (rv);
633 }
634
635 /*
636 * Rescan a device, possibly as a consequence of getting an event which
637 * suggests that it may have changed. Must be called with interrupts
638 * blocked.
639 */
640 static int
641 mly_scan_btl(struct mly_softc *mly, int bus, int target)
642 {
643 struct mly_ccb *mc;
644 struct mly_cmd_ioctl *mci;
645 int rv;
646
647 if (target == mly->mly_controllerparam->initiator_id) {
648 mly->mly_btl[bus][target].mb_flags = MLY_BTL_PROTECTED;
649 return (EIO);
650 }
651
652 /* Don't re-scan if a scan is already in progress. */
653 if ((mly->mly_btl[bus][target].mb_flags & MLY_BTL_SCANNING) != 0)
654 return (EBUSY);
655
656 /* Get a command. */
657 if ((rv = mly_ccb_alloc(mly, &mc)) != 0)
658 return (rv);
659
660 /* Set up the data buffer. */
661 mc->mc_data = malloc(sizeof(union mly_devinfo),
662 M_DEVBUF, M_NOWAIT|M_ZERO);
663
664 mc->mc_flags |= MLY_CCB_DATAIN;
665 mc->mc_complete = mly_complete_rescan;
666
667 /*
668 * Build the ioctl.
669 */
670 mci = (struct mly_cmd_ioctl *)&mc->mc_packet->ioctl;
671 mci->opcode = MDACMD_IOCTL;
672 mci->timeout = 30 | MLY_TIMEOUT_SECONDS;
673 memset(&mci->param, 0, sizeof(mci->param));
674
675 if (MLY_BUS_IS_VIRTUAL(mly, bus)) {
676 mc->mc_length = sizeof(struct mly_ioctl_getlogdevinfovalid);
677 mci->data_size = htole32(mc->mc_length);
678 mci->sub_ioctl = MDACIOCTL_GETLOGDEVINFOVALID;
679 _lto3l(MLY_LOGADDR(0, MLY_LOGDEV_ID(mly, bus, target)),
680 mci->addr);
681 } else {
682 mc->mc_length = sizeof(struct mly_ioctl_getphysdevinfovalid);
683 mci->data_size = htole32(mc->mc_length);
684 mci->sub_ioctl = MDACIOCTL_GETPHYSDEVINFOVALID;
685 _lto3l(MLY_PHYADDR(0, bus, target, 0), mci->addr);
686 }
687
688 /*
689 * Dispatch the command.
690 */
691 if ((rv = mly_ccb_map(mly, mc)) != 0) {
692 free(mc->mc_data, M_DEVBUF);
693 mly_ccb_free(mly, mc);
694 return(rv);
695 }
696
697 mly->mly_btl[bus][target].mb_flags |= MLY_BTL_SCANNING;
698 mly_ccb_enqueue(mly, mc);
699 return (0);
700 }
701
702 /*
703 * Handle the completion of a rescan operation.
704 */
705 static void
706 mly_complete_rescan(struct mly_softc *mly, struct mly_ccb *mc)
707 {
708 struct mly_ioctl_getlogdevinfovalid *ldi;
709 struct mly_ioctl_getphysdevinfovalid *pdi;
710 struct mly_cmd_ioctl *mci;
711 struct mly_btl btl, *btlp;
712 struct scsipi_xfer_mode xm;
713 int bus, target, rescan;
714 u_int tmp;
715
716 mly_ccb_unmap(mly, mc);
717
718 /*
719 * Recover the bus and target from the command. We need these even
720 * in the case where we don't have a useful response.
721 */
722 mci = (struct mly_cmd_ioctl *)&mc->mc_packet->ioctl;
723 tmp = _3ltol(mci->addr);
724 rescan = 0;
725
726 if (mci->sub_ioctl == MDACIOCTL_GETLOGDEVINFOVALID) {
727 bus = MLY_LOGDEV_BUS(mly, MLY_LOGADDR_DEV(tmp));
728 target = MLY_LOGDEV_TARGET(mly, MLY_LOGADDR_DEV(tmp));
729 } else {
730 bus = MLY_PHYADDR_CHANNEL(tmp);
731 target = MLY_PHYADDR_TARGET(tmp);
732 }
733
734 btlp = &mly->mly_btl[bus][target];
735
736 /* The default result is 'no device'. */
737 memset(&btl, 0, sizeof(btl));
738 btl.mb_flags = MLY_BTL_PROTECTED;
739
740 /* If the rescan completed OK, we have possibly-new BTL data. */
741 if (mc->mc_status != 0)
742 goto out;
743
744 if (mc->mc_length == sizeof(*ldi)) {
745 ldi = (struct mly_ioctl_getlogdevinfovalid *)mc->mc_data;
746 tmp = le32toh(ldi->logical_device_number);
747
748 if (MLY_LOGDEV_BUS(mly, tmp) != bus ||
749 MLY_LOGDEV_TARGET(mly, tmp) != target) {
750 #ifdef MLYDEBUG
751 printf("%s: WARNING: BTL rescan (logical) for %d:%d "
752 "returned data for %d:%d instead\n",
753 device_xname(mly->mly_dv), bus, target,
754 MLY_LOGDEV_BUS(mly, tmp),
755 MLY_LOGDEV_TARGET(mly, tmp));
756 #endif
757 goto out;
758 }
759
760 btl.mb_flags = MLY_BTL_LOGICAL | MLY_BTL_TQING;
761 btl.mb_type = ldi->raid_level;
762 btl.mb_state = ldi->state;
763 } else if (mc->mc_length == sizeof(*pdi)) {
764 pdi = (struct mly_ioctl_getphysdevinfovalid *)mc->mc_data;
765
766 if (pdi->channel != bus || pdi->target != target) {
767 #ifdef MLYDEBUG
768 printf("%s: WARNING: BTL rescan (physical) for %d:%d "
769 " returned data for %d:%d instead\n",
770 device_xname(mly->mly_dv),
771 bus, target, pdi->channel, pdi->target);
772 #endif
773 goto out;
774 }
775
776 btl.mb_flags = MLY_BTL_PHYSICAL;
777 btl.mb_type = MLY_DEVICE_TYPE_PHYSICAL;
778 btl.mb_state = pdi->state;
779 btl.mb_speed = pdi->speed;
780 btl.mb_width = pdi->width;
781
782 if (pdi->state != MLY_DEVICE_STATE_UNCONFIGURED)
783 btl.mb_flags |= MLY_BTL_PROTECTED;
784 if (pdi->command_tags != 0)
785 btl.mb_flags |= MLY_BTL_TQING;
786 } else {
787 printf("%s: BTL rescan result invalid\n", device_xname(mly->mly_dv));
788 goto out;
789 }
790
791 /* Decide whether we need to rescan the device. */
792 if (btl.mb_flags != btlp->mb_flags ||
793 btl.mb_speed != btlp->mb_speed ||
794 btl.mb_width != btlp->mb_width)
795 rescan = 1;
796
797 out:
798 *btlp = btl;
799
800 if (rescan && (btl.mb_flags & MLY_BTL_PROTECTED) == 0) {
801 xm.xm_target = target;
802 mly_get_xfer_mode(mly, bus, &xm);
803 /* XXX SCSI mid-layer rescan goes here. */
804 }
805
806 /* Wake anybody waiting on the device to be rescanned. */
807 wakeup(btlp);
808
809 free(mc->mc_data, M_DEVBUF);
810 mly_ccb_free(mly, mc);
811 }
812
813 /*
814 * Get the current health status and set the 'next event' counter to suit.
815 */
816 static int
817 mly_get_eventstatus(struct mly_softc *mly)
818 {
819 struct mly_cmd_ioctl mci;
820 struct mly_health_status *mh;
821 int rv;
822
823 /* Build the gethealthstatus ioctl and send it. */
824 memset(&mci, 0, sizeof(mci));
825 mh = NULL;
826 mci.sub_ioctl = MDACIOCTL_GETHEALTHSTATUS;
827
828 rv = mly_ioctl(mly, &mci, (void *)&mh, sizeof(*mh), NULL, NULL);
829 if (rv)
830 return (rv);
831
832 /* Get the event counter. */
833 mly->mly_event_change = le32toh(mh->change_counter);
834 mly->mly_event_waiting = le32toh(mh->next_event);
835 mly->mly_event_counter = le32toh(mh->next_event);
836
837 /* Save the health status into the memory mailbox */
838 memcpy(&mly->mly_mmbox->mmm_health.status, mh, sizeof(*mh));
839
840 bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap,
841 offsetof(struct mly_mmbox, mmm_health),
842 sizeof(mly->mly_mmbox->mmm_health),
843 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
844
845 free(mh, M_DEVBUF);
846 return (0);
847 }
848
849 /*
850 * Enable memory mailbox mode.
851 */
852 static int
853 mly_enable_mmbox(struct mly_softc *mly)
854 {
855 struct mly_cmd_ioctl mci;
856 u_int8_t *sp;
857 u_int64_t tmp;
858 int rv;
859
860 /* Build the ioctl and send it. */
861 memset(&mci, 0, sizeof(mci));
862 mci.sub_ioctl = MDACIOCTL_SETMEMORYMAILBOX;
863
864 /* Set buffer addresses. */
865 tmp = mly->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_command);
866 mci.param.setmemorymailbox.command_mailbox_physaddr = htole64(tmp);
867
868 tmp = mly->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_status);
869 mci.param.setmemorymailbox.status_mailbox_physaddr = htole64(tmp);
870
871 tmp = mly->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_health);
872 mci.param.setmemorymailbox.health_buffer_physaddr = htole64(tmp);
873
874 /* Set buffer sizes - abuse of data_size field is revolting. */
875 sp = (u_int8_t *)&mci.data_size;
876 sp[0] = (sizeof(union mly_cmd_packet) * MLY_MMBOX_COMMANDS) >> 10;
877 sp[1] = (sizeof(union mly_status_packet) * MLY_MMBOX_STATUS) >> 10;
878 mci.param.setmemorymailbox.health_buffer_size =
879 sizeof(union mly_health_region) >> 10;
880
881 rv = mly_ioctl(mly, &mci, NULL, 0, NULL, NULL);
882 if (rv)
883 return (rv);
884
885 mly->mly_state |= MLY_STATE_MMBOX_ACTIVE;
886 return (0);
887 }
888
889 /*
890 * Flush all pending I/O from the controller.
891 */
892 static int
893 mly_flush(struct mly_softc *mly)
894 {
895 struct mly_cmd_ioctl mci;
896
897 /* Build the ioctl */
898 memset(&mci, 0, sizeof(mci));
899 mci.sub_ioctl = MDACIOCTL_FLUSHDEVICEDATA;
900 mci.param.deviceoperation.operation_device =
901 MLY_OPDEVICE_PHYSICAL_CONTROLLER;
902
903 /* Pass it off to the controller */
904 return (mly_ioctl(mly, &mci, NULL, 0, NULL, NULL));
905 }
906
907 /*
908 * Perform an ioctl command.
909 *
910 * If (data) is not NULL, the command requires data transfer to the
911 * controller. If (*data) is NULL the command requires data transfer from
912 * the controller, and we will allocate a buffer for it.
913 */
914 static int
915 mly_ioctl(struct mly_softc *mly, struct mly_cmd_ioctl *ioctl, void **data,
916 size_t datasize, void *sense_buffer,
917 size_t *sense_length)
918 {
919 struct mly_ccb *mc;
920 struct mly_cmd_ioctl *mci;
921 u_int8_t status;
922 int rv;
923
924 mc = NULL;
925 if ((rv = mly_ccb_alloc(mly, &mc)) != 0)
926 goto bad;
927
928 /*
929 * Copy the ioctl structure, but save some important fields and then
930 * fixup.
931 */
932 mci = &mc->mc_packet->ioctl;
933 ioctl->sense_buffer_address = htole64(mci->sense_buffer_address);
934 ioctl->maximum_sense_size = mci->maximum_sense_size;
935 *mci = *ioctl;
936 mci->opcode = MDACMD_IOCTL;
937 mci->timeout = 30 | MLY_TIMEOUT_SECONDS;
938
939 /* Handle the data buffer. */
940 if (data != NULL) {
941 if (*data == NULL) {
942 /* Allocate data buffer */
943 mc->mc_data = malloc(datasize, M_DEVBUF, M_NOWAIT);
944 mc->mc_flags |= MLY_CCB_DATAIN;
945 } else {
946 mc->mc_data = *data;
947 mc->mc_flags |= MLY_CCB_DATAOUT;
948 }
949 mc->mc_length = datasize;
950 mc->mc_packet->generic.data_size = htole32(datasize);
951 }
952
953 /* Run the command. */
954 if (datasize > 0)
955 if ((rv = mly_ccb_map(mly, mc)) != 0)
956 goto bad;
957 rv = mly_ccb_poll(mly, mc, 30000);
958 if (datasize > 0)
959 mly_ccb_unmap(mly, mc);
960 if (rv != 0)
961 goto bad;
962
963 /* Clean up and return any data. */
964 status = mc->mc_status;
965
966 if (status != 0)
967 printf("mly_ioctl: command status %d\n", status);
968
969 if (mc->mc_sense > 0 && sense_buffer != NULL) {
970 memcpy(sense_buffer, mc->mc_packet, mc->mc_sense);
971 *sense_length = mc->mc_sense;
972 goto bad;
973 }
974
975 /* Should we return a data pointer? */
976 if (data != NULL && *data == NULL)
977 *data = mc->mc_data;
978
979 /* Command completed OK. */
980 rv = (status != 0 ? EIO : 0);
981
982 bad:
983 if (mc != NULL) {
984 /* Do we need to free a data buffer we allocated? */
985 if (rv != 0 && mc->mc_data != NULL &&
986 (data == NULL || *data == NULL))
987 free(mc->mc_data, M_DEVBUF);
988 mly_ccb_free(mly, mc);
989 }
990
991 return (rv);
992 }
993
994 /*
995 * Check for event(s) outstanding in the controller.
996 */
997 static void
998 mly_check_event(struct mly_softc *mly)
999 {
1000
1001 bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap,
1002 offsetof(struct mly_mmbox, mmm_health),
1003 sizeof(mly->mly_mmbox->mmm_health),
1004 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1005
1006 /*
1007 * The controller may have updated the health status information, so
1008 * check for it here. Note that the counters are all in host
1009 * memory, so this check is very cheap. Also note that we depend on
1010 * checking on completion
1011 */
1012 if (le32toh(mly->mly_mmbox->mmm_health.status.change_counter) !=
1013 mly->mly_event_change) {
1014 mly->mly_event_change =
1015 le32toh(mly->mly_mmbox->mmm_health.status.change_counter);
1016 mly->mly_event_waiting =
1017 le32toh(mly->mly_mmbox->mmm_health.status.next_event);
1018
1019 /* Wake up anyone that might be interested in this. */
1020 wakeup(&mly->mly_event_change);
1021 }
1022
1023 bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap,
1024 offsetof(struct mly_mmbox, mmm_health),
1025 sizeof(mly->mly_mmbox->mmm_health),
1026 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1027
1028 if (mly->mly_event_counter != mly->mly_event_waiting)
1029 mly_fetch_event(mly);
1030 }
1031
1032 /*
1033 * Fetch one event from the controller. If we fail due to resource
1034 * starvation, we'll be retried the next time a command completes.
1035 */
1036 static void
1037 mly_fetch_event(struct mly_softc *mly)
1038 {
1039 struct mly_ccb *mc;
1040 struct mly_cmd_ioctl *mci;
1041 int s;
1042 u_int32_t event;
1043
1044 /* Get a command. */
1045 if (mly_ccb_alloc(mly, &mc))
1046 return;
1047
1048 /* Set up the data buffer. */
1049 mc->mc_data = malloc(sizeof(struct mly_event), M_DEVBUF,
1050 M_NOWAIT|M_ZERO);
1051
1052 mc->mc_length = sizeof(struct mly_event);
1053 mc->mc_flags |= MLY_CCB_DATAIN;
1054 mc->mc_complete = mly_complete_event;
1055
1056 /*
1057 * Get an event number to fetch. It's possible that we've raced
1058 * with another context for the last event, in which case there will
1059 * be no more events.
1060 */
1061 s = splbio();
1062 if (mly->mly_event_counter == mly->mly_event_waiting) {
1063 splx(s);
1064 free(mc->mc_data, M_DEVBUF);
1065 mly_ccb_free(mly, mc);
1066 return;
1067 }
1068 event = mly->mly_event_counter++;
1069 splx(s);
1070
1071 /*
1072 * Build the ioctl.
1073 *
1074 * At this point we are committed to sending this request, as it
1075 * will be the only one constructed for this particular event
1076 * number.
1077 */
1078 mci = (struct mly_cmd_ioctl *)&mc->mc_packet->ioctl;
1079 mci->opcode = MDACMD_IOCTL;
1080 mci->data_size = htole32(sizeof(struct mly_event));
1081 _lto3l(MLY_PHYADDR(0, 0, (event >> 16) & 0xff, (event >> 24) & 0xff),
1082 mci->addr);
1083 mci->timeout = 30 | MLY_TIMEOUT_SECONDS;
1084 mci->sub_ioctl = MDACIOCTL_GETEVENT;
1085 mci->param.getevent.sequence_number_low = htole16(event & 0xffff);
1086
1087 /*
1088 * Submit the command.
1089 */
1090 if (mly_ccb_map(mly, mc) != 0)
1091 goto bad;
1092 mly_ccb_enqueue(mly, mc);
1093 return;
1094
1095 bad:
1096 printf("%s: couldn't fetch event %u\n", device_xname(mly->mly_dv), event);
1097 free(mc->mc_data, M_DEVBUF);
1098 mly_ccb_free(mly, mc);
1099 }
1100
1101 /*
1102 * Handle the completion of an event poll.
1103 */
1104 static void
1105 mly_complete_event(struct mly_softc *mly, struct mly_ccb *mc)
1106 {
1107 struct mly_event *me;
1108
1109 me = (struct mly_event *)mc->mc_data;
1110 mly_ccb_unmap(mly, mc);
1111 mly_ccb_free(mly, mc);
1112
1113 /* If the event was successfully fetched, process it. */
1114 if (mc->mc_status == SCSI_OK)
1115 mly_process_event(mly, me);
1116 else
1117 aprint_error_dev(mly->mly_dv, "unable to fetch event; status = 0x%x\n",
1118 mc->mc_status);
1119
1120 free(me, M_DEVBUF);
1121
1122 /* Check for another event. */
1123 mly_check_event(mly);
1124 }
1125
1126 /*
1127 * Process a controller event. Called with interrupts blocked (i.e., at
1128 * interrupt time).
1129 */
1130 static void
1131 mly_process_event(struct mly_softc *mly, struct mly_event *me)
1132 {
1133 struct scsi_sense_data *ssd;
1134 int bus, target, event, class, action;
1135 const char *fp, *tp;
1136
1137 ssd = (struct scsi_sense_data *)&me->sense[0];
1138
1139 /*
1140 * Errors can be reported using vendor-unique sense data. In this
1141 * case, the event code will be 0x1c (Request sense data present),
1142 * the sense key will be 0x09 (vendor specific), the MSB of the ASC
1143 * will be set, and the actual event code will be a 16-bit value
1144 * comprised of the ASCQ (low byte) and low seven bits of the ASC
1145 * (low seven bits of the high byte).
1146 */
1147 if (le32toh(me->code) == 0x1c &&
1148 SSD_SENSE_KEY(ssd->flags) == SKEY_VENDOR_SPECIFIC &&
1149 (ssd->asc & 0x80) != 0) {
1150 event = ((int)(ssd->asc & ~0x80) << 8) +
1151 ssd->ascq;
1152 } else
1153 event = le32toh(me->code);
1154
1155 /* Look up event, get codes. */
1156 fp = mly_describe_code(mly_table_event, event);
1157
1158 /* Quiet event? */
1159 class = fp[0];
1160 #ifdef notyet
1161 if (isupper(class) && bootverbose)
1162 class = tolower(class);
1163 #endif
1164
1165 /* Get action code, text string. */
1166 action = fp[1];
1167 tp = fp + 3;
1168
1169 /*
1170 * Print some information about the event.
1171 *
1172 * This code uses a table derived from the corresponding portion of
1173 * the Linux driver, and thus the parser is very similar.
1174 */
1175 switch (class) {
1176 case 'p':
1177 /*
1178 * Error on physical drive.
1179 */
1180 printf("%s: physical device %d:%d %s\n", device_xname(mly->mly_dv),
1181 me->channel, me->target, tp);
1182 if (action == 'r')
1183 mly->mly_btl[me->channel][me->target].mb_flags |=
1184 MLY_BTL_RESCAN;
1185 break;
1186
1187 case 'l':
1188 case 'm':
1189 /*
1190 * Error on logical unit, or message about logical unit.
1191 */
1192 bus = MLY_LOGDEV_BUS(mly, me->lun);
1193 target = MLY_LOGDEV_TARGET(mly, me->lun);
1194 printf("%s: logical device %d:%d %s\n", device_xname(mly->mly_dv),
1195 bus, target, tp);
1196 if (action == 'r')
1197 mly->mly_btl[bus][target].mb_flags |= MLY_BTL_RESCAN;
1198 break;
1199
1200 case 's':
1201 /*
1202 * Report of sense data.
1203 */
1204 if ((SSD_SENSE_KEY(ssd->flags) == SKEY_NO_SENSE ||
1205 SSD_SENSE_KEY(ssd->flags) == SKEY_NOT_READY) &&
1206 ssd->asc == 0x04 &&
1207 (ssd->ascq == 0x01 ||
1208 ssd->ascq == 0x02)) {
1209 /* Ignore NO_SENSE or NOT_READY in one case */
1210 break;
1211 }
1212
1213 /*
1214 * XXX Should translate this if SCSIVERBOSE.
1215 */
1216 printf("%s: physical device %d:%d %s\n", device_xname(mly->mly_dv),
1217 me->channel, me->target, tp);
1218 printf("%s: sense key %d asc %02x ascq %02x\n",
1219 device_xname(mly->mly_dv), SSD_SENSE_KEY(ssd->flags),
1220 ssd->asc, ssd->ascq);
1221 printf("%s: info %x%x%x%x csi %x%x%x%x\n",
1222 device_xname(mly->mly_dv), ssd->info[0], ssd->info[1],
1223 ssd->info[2], ssd->info[3], ssd->csi[0],
1224 ssd->csi[1], ssd->csi[2],
1225 ssd->csi[3]);
1226 if (action == 'r')
1227 mly->mly_btl[me->channel][me->target].mb_flags |=
1228 MLY_BTL_RESCAN;
1229 break;
1230
1231 case 'e':
1232 printf("%s: ", device_xname(mly->mly_dv));
1233 printf(tp, me->target, me->lun);
1234 break;
1235
1236 case 'c':
1237 printf("%s: controller %s\n", device_xname(mly->mly_dv), tp);
1238 break;
1239
1240 case '?':
1241 printf("%s: %s - %d\n", device_xname(mly->mly_dv), tp, event);
1242 break;
1243
1244 default:
1245 /* Probably a 'noisy' event being ignored. */
1246 break;
1247 }
1248 }
1249
1250 /*
1251 * Perform periodic activities.
1252 */
1253 static void
1254 mly_thread(void *cookie)
1255 {
1256 struct mly_softc *mly;
1257 struct mly_btl *btl;
1258 int s, bus, target, done;
1259
1260 mly = (struct mly_softc *)cookie;
1261
1262 for (;;) {
1263 /* Check for new events. */
1264 mly_check_event(mly);
1265
1266 /* Re-scan up to 1 device. */
1267 s = splbio();
1268 done = 0;
1269 for (bus = 0; bus < mly->mly_nchans && !done; bus++) {
1270 for (target = 0; target < MLY_MAX_TARGETS; target++) {
1271 /* Perform device rescan? */
1272 btl = &mly->mly_btl[bus][target];
1273 if ((btl->mb_flags & MLY_BTL_RESCAN) != 0) {
1274 btl->mb_flags ^= MLY_BTL_RESCAN;
1275 mly_scan_btl(mly, bus, target);
1276 done = 1;
1277 break;
1278 }
1279 }
1280 }
1281 splx(s);
1282
1283 /* Sleep for N seconds. */
1284 tsleep(mly_thread, PWAIT, "mlyzzz",
1285 hz * MLY_PERIODIC_INTERVAL);
1286 }
1287 }
1288
1289 /*
1290 * Submit a command to the controller and poll on completion. Return
1291 * non-zero on timeout.
1292 */
1293 static int
1294 mly_ccb_poll(struct mly_softc *mly, struct mly_ccb *mc, int timo)
1295 {
1296 int rv;
1297
1298 if ((rv = mly_ccb_submit(mly, mc)) != 0)
1299 return (rv);
1300
1301 for (timo *= 10; timo != 0; timo--) {
1302 if ((mc->mc_flags & MLY_CCB_COMPLETE) != 0)
1303 break;
1304 mly_intr(mly);
1305 DELAY(100);
1306 }
1307
1308 return (timo == 0);
1309 }
1310
1311 /*
1312 * Submit a command to the controller and sleep on completion. Return
1313 * non-zero on timeout.
1314 */
1315 static int
1316 mly_ccb_wait(struct mly_softc *mly, struct mly_ccb *mc, int timo)
1317 {
1318 int rv, s;
1319
1320 mly_ccb_enqueue(mly, mc);
1321
1322 s = splbio();
1323 if ((mc->mc_flags & MLY_CCB_COMPLETE) != 0) {
1324 splx(s);
1325 return (0);
1326 }
1327 rv = tsleep(mc, PRIBIO, "mlywccb", timo * hz / 1000);
1328 splx(s);
1329
1330 return (rv);
1331 }
1332
1333 /*
1334 * If a CCB is specified, enqueue it. Pull CCBs off the software queue in
1335 * the order that they were enqueued and try to submit their command blocks
1336 * to the controller for execution.
1337 */
1338 void
1339 mly_ccb_enqueue(struct mly_softc *mly, struct mly_ccb *mc)
1340 {
1341 int s;
1342
1343 s = splbio();
1344
1345 if (mc != NULL)
1346 SIMPLEQ_INSERT_TAIL(&mly->mly_ccb_queue, mc, mc_link.simpleq);
1347
1348 while ((mc = SIMPLEQ_FIRST(&mly->mly_ccb_queue)) != NULL) {
1349 if (mly_ccb_submit(mly, mc))
1350 break;
1351 SIMPLEQ_REMOVE_HEAD(&mly->mly_ccb_queue, mc_link.simpleq);
1352 }
1353
1354 splx(s);
1355 }
1356
1357 /*
1358 * Deliver a command to the controller.
1359 */
1360 static int
1361 mly_ccb_submit(struct mly_softc *mly, struct mly_ccb *mc)
1362 {
1363 union mly_cmd_packet *pkt;
1364 int s, off;
1365
1366 mc->mc_packet->generic.command_id = htole16(mc->mc_slot);
1367
1368 bus_dmamap_sync(mly->mly_dmat, mly->mly_pkt_dmamap,
1369 mc->mc_packetphys - mly->mly_pkt_busaddr,
1370 sizeof(union mly_cmd_packet),
1371 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1372
1373 s = splbio();
1374
1375 /*
1376 * Do we have to use the hardware mailbox?
1377 */
1378 if ((mly->mly_state & MLY_STATE_MMBOX_ACTIVE) == 0) {
1379 /*
1380 * Check to see if the controller is ready for us.
1381 */
1382 if (mly_idbr_true(mly, MLY_HM_CMDSENT)) {
1383 splx(s);
1384 return (EBUSY);
1385 }
1386
1387 /*
1388 * It's ready, send the command.
1389 */
1390 mly_outl(mly, mly->mly_cmd_mailbox,
1391 (u_int64_t)mc->mc_packetphys & 0xffffffff);
1392 mly_outl(mly, mly->mly_cmd_mailbox + 4,
1393 (u_int64_t)mc->mc_packetphys >> 32);
1394 mly_outb(mly, mly->mly_idbr, MLY_HM_CMDSENT);
1395 } else {
1396 pkt = &mly->mly_mmbox->mmm_command[mly->mly_mmbox_cmd_idx];
1397 off = (char *)pkt - (char *)mly->mly_mmbox;
1398
1399 bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap,
1400 off, sizeof(mly->mly_mmbox->mmm_command[0]),
1401 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1402
1403 /* Check to see if the next index is free yet. */
1404 if (pkt->mmbox.flag != 0) {
1405 splx(s);
1406 return (EBUSY);
1407 }
1408
1409 /* Copy in new command */
1410 memcpy(pkt->mmbox.data, mc->mc_packet->mmbox.data,
1411 sizeof(pkt->mmbox.data));
1412
1413 /* Copy flag last. */
1414 pkt->mmbox.flag = mc->mc_packet->mmbox.flag;
1415
1416 bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap,
1417 off, sizeof(mly->mly_mmbox->mmm_command[0]),
1418 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1419
1420 /* Signal controller and update index. */
1421 mly_outb(mly, mly->mly_idbr, MLY_AM_CMDSENT);
1422 mly->mly_mmbox_cmd_idx =
1423 (mly->mly_mmbox_cmd_idx + 1) % MLY_MMBOX_COMMANDS;
1424 }
1425
1426 splx(s);
1427 return (0);
1428 }
1429
1430 /*
1431 * Pick up completed commands from the controller and handle accordingly.
1432 */
1433 int
1434 mly_intr(void *cookie)
1435 {
1436 struct mly_ccb *mc;
1437 union mly_status_packet *sp;
1438 u_int16_t slot;
1439 int forus, off;
1440 struct mly_softc *mly;
1441
1442 mly = cookie;
1443 forus = 0;
1444
1445 /*
1446 * Pick up hardware-mailbox commands.
1447 */
1448 if (mly_odbr_true(mly, MLY_HM_STSREADY)) {
1449 slot = mly_inw(mly, mly->mly_status_mailbox);
1450
1451 if (slot < MLY_SLOT_MAX) {
1452 mc = mly->mly_ccbs + (slot - MLY_SLOT_START);
1453 mc->mc_status =
1454 mly_inb(mly, mly->mly_status_mailbox + 2);
1455 mc->mc_sense =
1456 mly_inb(mly, mly->mly_status_mailbox + 3);
1457 mc->mc_resid =
1458 mly_inl(mly, mly->mly_status_mailbox + 4);
1459
1460 mly_ccb_complete(mly, mc);
1461 } else {
1462 /* Slot 0xffff may mean "extremely bogus command". */
1463 printf("%s: got HM completion for illegal slot %u\n",
1464 device_xname(mly->mly_dv), slot);
1465 }
1466
1467 /* Unconditionally acknowledge status. */
1468 mly_outb(mly, mly->mly_odbr, MLY_HM_STSREADY);
1469 mly_outb(mly, mly->mly_idbr, MLY_HM_STSACK);
1470 forus = 1;
1471 }
1472
1473 /*
1474 * Pick up memory-mailbox commands.
1475 */
1476 if (mly_odbr_true(mly, MLY_AM_STSREADY)) {
1477 for (;;) {
1478 sp = &mly->mly_mmbox->mmm_status[mly->mly_mmbox_sts_idx];
1479 off = (char *)sp - (char *)mly->mly_mmbox;
1480
1481 bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap,
1482 off, sizeof(mly->mly_mmbox->mmm_command[0]),
1483 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1484
1485 /* Check for more status. */
1486 if (sp->mmbox.flag == 0)
1487 break;
1488
1489 /* Get slot number. */
1490 slot = le16toh(sp->status.command_id);
1491 if (slot < MLY_SLOT_MAX) {
1492 mc = mly->mly_ccbs + (slot - MLY_SLOT_START);
1493 mc->mc_status = sp->status.status;
1494 mc->mc_sense = sp->status.sense_length;
1495 mc->mc_resid = le32toh(sp->status.residue);
1496 mly_ccb_complete(mly, mc);
1497 } else {
1498 /*
1499 * Slot 0xffff may mean "extremely bogus
1500 * command".
1501 */
1502 printf("%s: got AM completion for illegal "
1503 "slot %u at %d\n", device_xname(mly->mly_dv),
1504 slot, mly->mly_mmbox_sts_idx);
1505 }
1506
1507 /* Clear and move to next index. */
1508 sp->mmbox.flag = 0;
1509 mly->mly_mmbox_sts_idx =
1510 (mly->mly_mmbox_sts_idx + 1) % MLY_MMBOX_STATUS;
1511 }
1512
1513 /* Acknowledge that we have collected status value(s). */
1514 mly_outb(mly, mly->mly_odbr, MLY_AM_STSREADY);
1515 forus = 1;
1516 }
1517
1518 /*
1519 * Run the queue.
1520 */
1521 if (forus && ! SIMPLEQ_EMPTY(&mly->mly_ccb_queue))
1522 mly_ccb_enqueue(mly, NULL);
1523
1524 return (forus);
1525 }
1526
1527 /*
1528 * Process completed commands
1529 */
1530 static void
1531 mly_ccb_complete(struct mly_softc *mly, struct mly_ccb *mc)
1532 {
1533 void (*complete)(struct mly_softc *, struct mly_ccb *);
1534
1535 bus_dmamap_sync(mly->mly_dmat, mly->mly_pkt_dmamap,
1536 mc->mc_packetphys - mly->mly_pkt_busaddr,
1537 sizeof(union mly_cmd_packet),
1538 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1539
1540 complete = mc->mc_complete;
1541 mc->mc_flags |= MLY_CCB_COMPLETE;
1542
1543 /*
1544 * Call completion handler or wake up sleeping consumer.
1545 */
1546 if (complete != NULL)
1547 (*complete)(mly, mc);
1548 else
1549 wakeup(mc);
1550 }
1551
1552 /*
1553 * Allocate a command.
1554 */
1555 int
1556 mly_ccb_alloc(struct mly_softc *mly, struct mly_ccb **mcp)
1557 {
1558 struct mly_ccb *mc;
1559 int s;
1560
1561 s = splbio();
1562 mc = SLIST_FIRST(&mly->mly_ccb_free);
1563 if (mc != NULL)
1564 SLIST_REMOVE_HEAD(&mly->mly_ccb_free, mc_link.slist);
1565 splx(s);
1566
1567 *mcp = mc;
1568 return (mc == NULL ? EAGAIN : 0);
1569 }
1570
1571 /*
1572 * Release a command back to the freelist.
1573 */
1574 void
1575 mly_ccb_free(struct mly_softc *mly, struct mly_ccb *mc)
1576 {
1577 int s;
1578
1579 /*
1580 * Fill in parts of the command that may cause confusion if a
1581 * consumer doesn't when we are later allocated.
1582 */
1583 mc->mc_data = NULL;
1584 mc->mc_flags = 0;
1585 mc->mc_complete = NULL;
1586 mc->mc_private = NULL;
1587 mc->mc_packet->generic.command_control = 0;
1588
1589 /*
1590 * By default, we set up to overwrite the command packet with sense
1591 * information.
1592 */
1593 mc->mc_packet->generic.sense_buffer_address =
1594 htole64(mc->mc_packetphys);
1595 mc->mc_packet->generic.maximum_sense_size =
1596 sizeof(union mly_cmd_packet);
1597
1598 s = splbio();
1599 SLIST_INSERT_HEAD(&mly->mly_ccb_free, mc, mc_link.slist);
1600 splx(s);
1601 }
1602
1603 /*
1604 * Allocate and initialize command and packet structures.
1605 *
1606 * If the controller supports fewer than MLY_MAX_CCBS commands, limit our
1607 * allocation to that number. If we don't yet know how many commands the
1608 * controller supports, allocate a very small set (suitable for initialization
1609 * purposes only).
1610 */
1611 static int
1612 mly_alloc_ccbs(struct mly_softc *mly)
1613 {
1614 struct mly_ccb *mc;
1615 int i, rv;
1616
1617 if (mly->mly_controllerinfo == NULL)
1618 mly->mly_ncmds = MLY_CCBS_RESV;
1619 else {
1620 i = le16toh(mly->mly_controllerinfo->maximum_parallel_commands);
1621 mly->mly_ncmds = min(MLY_MAX_CCBS, i);
1622 }
1623
1624 /*
1625 * Allocate enough space for all the command packets in one chunk
1626 * and map them permanently into controller-visible space.
1627 */
1628 rv = mly_dmamem_alloc(mly,
1629 mly->mly_ncmds * sizeof(union mly_cmd_packet),
1630 &mly->mly_pkt_dmamap, (void **)&mly->mly_pkt,
1631 &mly->mly_pkt_busaddr, &mly->mly_pkt_seg);
1632 if (rv)
1633 return (rv);
1634
1635 mly->mly_ccbs = malloc(sizeof(struct mly_ccb) * mly->mly_ncmds,
1636 M_DEVBUF, M_NOWAIT|M_ZERO);
1637
1638 for (i = 0; i < mly->mly_ncmds; i++) {
1639 mc = mly->mly_ccbs + i;
1640 mc->mc_slot = MLY_SLOT_START + i;
1641 mc->mc_packet = mly->mly_pkt + i;
1642 mc->mc_packetphys = mly->mly_pkt_busaddr +
1643 (i * sizeof(union mly_cmd_packet));
1644
1645 rv = bus_dmamap_create(mly->mly_dmat, MLY_MAX_XFER,
1646 MLY_MAX_SEGS, MLY_MAX_XFER, 0,
1647 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1648 &mc->mc_datamap);
1649 if (rv) {
1650 mly_release_ccbs(mly);
1651 return (rv);
1652 }
1653
1654 mly_ccb_free(mly, mc);
1655 }
1656
1657 return (0);
1658 }
1659
1660 /*
1661 * Free all the storage held by commands.
1662 *
1663 * Must be called with all commands on the free list.
1664 */
1665 static void
1666 mly_release_ccbs(struct mly_softc *mly)
1667 {
1668 struct mly_ccb *mc;
1669
1670 /* Throw away command buffer DMA maps. */
1671 while (mly_ccb_alloc(mly, &mc) == 0)
1672 bus_dmamap_destroy(mly->mly_dmat, mc->mc_datamap);
1673
1674 /* Release CCB storage. */
1675 free(mly->mly_ccbs, M_DEVBUF);
1676
1677 /* Release the packet storage. */
1678 mly_dmamem_free(mly, mly->mly_ncmds * sizeof(union mly_cmd_packet),
1679 mly->mly_pkt_dmamap, (void *)mly->mly_pkt, &mly->mly_pkt_seg);
1680 }
1681
1682 /*
1683 * Map a command into controller-visible space.
1684 */
1685 static int
1686 mly_ccb_map(struct mly_softc *mly, struct mly_ccb *mc)
1687 {
1688 struct mly_cmd_generic *gen;
1689 struct mly_sg_entry *sg;
1690 bus_dma_segment_t *ds;
1691 int flg, nseg, rv;
1692
1693 #ifdef DIAGNOSTIC
1694 /* Don't map more than once. */
1695 if ((mc->mc_flags & MLY_CCB_MAPPED) != 0)
1696 panic("mly_ccb_map: already mapped");
1697 mc->mc_flags |= MLY_CCB_MAPPED;
1698
1699 /* Does the command have a data buffer? */
1700 if (mc->mc_data == NULL)
1701 panic("mly_ccb_map: no data buffer");
1702 #endif
1703
1704 rv = bus_dmamap_load(mly->mly_dmat, mc->mc_datamap, mc->mc_data,
1705 mc->mc_length, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1706 ((mc->mc_flags & MLY_CCB_DATAIN) != 0 ?
1707 BUS_DMA_READ : BUS_DMA_WRITE));
1708 if (rv != 0)
1709 return (rv);
1710
1711 gen = &mc->mc_packet->generic;
1712
1713 /*
1714 * Can we use the transfer structure directly?
1715 */
1716 if ((nseg = mc->mc_datamap->dm_nsegs) <= 2) {
1717 mc->mc_sgoff = -1;
1718 sg = &gen->transfer.direct.sg[0];
1719 } else {
1720 mc->mc_sgoff = (mc->mc_slot - MLY_SLOT_START) *
1721 MLY_MAX_SEGS;
1722 sg = mly->mly_sg + mc->mc_sgoff;
1723 gen->command_control |= MLY_CMDCTL_EXTENDED_SG_TABLE;
1724 gen->transfer.indirect.entries[0] = htole16(nseg);
1725 gen->transfer.indirect.table_physaddr[0] =
1726 htole64(mly->mly_sg_busaddr +
1727 (mc->mc_sgoff * sizeof(struct mly_sg_entry)));
1728 }
1729
1730 /*
1731 * Fill the S/G table.
1732 */
1733 for (ds = mc->mc_datamap->dm_segs; nseg != 0; nseg--, sg++, ds++) {
1734 sg->physaddr = htole64(ds->ds_addr);
1735 sg->length = htole64(ds->ds_len);
1736 }
1737
1738 /*
1739 * Sync up the data map.
1740 */
1741 if ((mc->mc_flags & MLY_CCB_DATAIN) != 0)
1742 flg = BUS_DMASYNC_PREREAD;
1743 else /* if ((mc->mc_flags & MLY_CCB_DATAOUT) != 0) */ {
1744 gen->command_control |= MLY_CMDCTL_DATA_DIRECTION;
1745 flg = BUS_DMASYNC_PREWRITE;
1746 }
1747
1748 bus_dmamap_sync(mly->mly_dmat, mc->mc_datamap, 0, mc->mc_length, flg);
1749
1750 /*
1751 * Sync up the chained S/G table, if we're using one.
1752 */
1753 if (mc->mc_sgoff == -1)
1754 return (0);
1755
1756 bus_dmamap_sync(mly->mly_dmat, mly->mly_sg_dmamap, mc->mc_sgoff,
1757 MLY_SGL_SIZE, BUS_DMASYNC_PREWRITE);
1758
1759 return (0);
1760 }
1761
1762 /*
1763 * Unmap a command from controller-visible space.
1764 */
1765 static void
1766 mly_ccb_unmap(struct mly_softc *mly, struct mly_ccb *mc)
1767 {
1768 int flg;
1769
1770 #ifdef DIAGNOSTIC
1771 if ((mc->mc_flags & MLY_CCB_MAPPED) == 0)
1772 panic("mly_ccb_unmap: not mapped");
1773 mc->mc_flags &= ~MLY_CCB_MAPPED;
1774 #endif
1775
1776 if ((mc->mc_flags & MLY_CCB_DATAIN) != 0)
1777 flg = BUS_DMASYNC_POSTREAD;
1778 else /* if ((mc->mc_flags & MLY_CCB_DATAOUT) != 0) */
1779 flg = BUS_DMASYNC_POSTWRITE;
1780
1781 bus_dmamap_sync(mly->mly_dmat, mc->mc_datamap, 0, mc->mc_length, flg);
1782 bus_dmamap_unload(mly->mly_dmat, mc->mc_datamap);
1783
1784 if (mc->mc_sgoff == -1)
1785 return;
1786
1787 bus_dmamap_sync(mly->mly_dmat, mly->mly_sg_dmamap, mc->mc_sgoff,
1788 MLY_SGL_SIZE, BUS_DMASYNC_POSTWRITE);
1789 }
1790
1791 /*
1792 * Adjust the size of each I/O before it passes to the SCSI layer.
1793 */
1794 static void
1795 mly_scsipi_minphys(struct buf *bp)
1796 {
1797
1798 if (bp->b_bcount > MLY_MAX_XFER)
1799 bp->b_bcount = MLY_MAX_XFER;
1800 minphys(bp);
1801 }
1802
1803 /*
1804 * Start a SCSI command.
1805 */
1806 static void
1807 mly_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
1808 void *arg)
1809 {
1810 struct mly_ccb *mc;
1811 struct mly_cmd_scsi_small *ss;
1812 struct scsipi_xfer *xs;
1813 struct scsipi_periph *periph;
1814 struct mly_softc *mly;
1815 struct mly_btl *btl;
1816 int s, tmp;
1817
1818 mly = device_private(chan->chan_adapter->adapt_dev);
1819
1820 switch (req) {
1821 case ADAPTER_REQ_RUN_XFER:
1822 xs = arg;
1823 periph = xs->xs_periph;
1824 btl = &mly->mly_btl[chan->chan_channel][periph->periph_target];
1825 s = splbio();
1826 tmp = btl->mb_flags;
1827 splx(s);
1828
1829 /*
1830 * Check for I/O attempt to a protected or non-existant
1831 * device.
1832 */
1833 if ((tmp & MLY_BTL_PROTECTED) != 0) {
1834 xs->error = XS_SELTIMEOUT;
1835 scsipi_done(xs);
1836 break;
1837 }
1838
1839 #ifdef DIAGNOSTIC
1840 /* XXX Increase if/when we support large SCSI commands. */
1841 if (xs->cmdlen > MLY_CMD_SCSI_SMALL_CDB) {
1842 printf("%s: cmd too large\n", device_xname(mly->mly_dv));
1843 xs->error = XS_DRIVER_STUFFUP;
1844 scsipi_done(xs);
1845 break;
1846 }
1847 #endif
1848
1849 if (mly_ccb_alloc(mly, &mc)) {
1850 xs->error = XS_RESOURCE_SHORTAGE;
1851 scsipi_done(xs);
1852 break;
1853 }
1854
1855 /* Build the command. */
1856 mc->mc_data = xs->data;
1857 mc->mc_length = xs->datalen;
1858 mc->mc_complete = mly_scsipi_complete;
1859 mc->mc_private = xs;
1860
1861 /* Build the packet for the controller. */
1862 ss = &mc->mc_packet->scsi_small;
1863 ss->opcode = MDACMD_SCSI;
1864 #ifdef notdef
1865 /*
1866 * XXX FreeBSD does this, but it doesn't fix anything,
1867 * XXX and appears potentially harmful.
1868 */
1869 ss->command_control |= MLY_CMDCTL_DISABLE_DISCONNECT;
1870 #endif
1871
1872 ss->data_size = htole32(xs->datalen);
1873 _lto3l(MLY_PHYADDR(0, chan->chan_channel,
1874 periph->periph_target, periph->periph_lun), ss->addr);
1875
1876 if (xs->timeout < 60 * 1000)
1877 ss->timeout = xs->timeout / 1000 |
1878 MLY_TIMEOUT_SECONDS;
1879 else if (xs->timeout < 60 * 60 * 1000)
1880 ss->timeout = xs->timeout / (60 * 1000) |
1881 MLY_TIMEOUT_MINUTES;
1882 else
1883 ss->timeout = xs->timeout / (60 * 60 * 1000) |
1884 MLY_TIMEOUT_HOURS;
1885
1886 ss->maximum_sense_size = sizeof(xs->sense);
1887 ss->cdb_length = xs->cmdlen;
1888 memcpy(ss->cdb, xs->cmd, xs->cmdlen);
1889
1890 if (mc->mc_length != 0) {
1891 if ((xs->xs_control & XS_CTL_DATA_OUT) != 0)
1892 mc->mc_flags |= MLY_CCB_DATAOUT;
1893 else /* if ((xs->xs_control & XS_CTL_DATA_IN) != 0) */
1894 mc->mc_flags |= MLY_CCB_DATAIN;
1895
1896 if (mly_ccb_map(mly, mc) != 0) {
1897 xs->error = XS_DRIVER_STUFFUP;
1898 mly_ccb_free(mly, mc);
1899 scsipi_done(xs);
1900 break;
1901 }
1902 }
1903
1904 /*
1905 * Give the command to the controller.
1906 */
1907 if ((xs->xs_control & XS_CTL_POLL) != 0) {
1908 if (mly_ccb_poll(mly, mc, xs->timeout + 5000)) {
1909 xs->error = XS_REQUEUE;
1910 if (mc->mc_length != 0)
1911 mly_ccb_unmap(mly, mc);
1912 mly_ccb_free(mly, mc);
1913 scsipi_done(xs);
1914 }
1915 } else
1916 mly_ccb_enqueue(mly, mc);
1917
1918 break;
1919
1920 case ADAPTER_REQ_GROW_RESOURCES:
1921 /*
1922 * Not supported.
1923 */
1924 break;
1925
1926 case ADAPTER_REQ_SET_XFER_MODE:
1927 /*
1928 * We can't change the transfer mode, but at least let
1929 * scsipi know what the adapter has negotiated.
1930 */
1931 mly_get_xfer_mode(mly, chan->chan_channel, arg);
1932 break;
1933 }
1934 }
1935
1936 /*
1937 * Handle completion of a SCSI command.
1938 */
1939 static void
1940 mly_scsipi_complete(struct mly_softc *mly, struct mly_ccb *mc)
1941 {
1942 struct scsipi_xfer *xs;
1943 struct scsipi_channel *chan;
1944 struct scsipi_inquiry_data *inq;
1945 struct mly_btl *btl;
1946 int target, sl, s;
1947 const char *p;
1948
1949 xs = mc->mc_private;
1950 xs->status = mc->mc_status;
1951
1952 /*
1953 * XXX The `resid' value as returned by the controller appears to be
1954 * bogus, so we always set it to zero. Is it perhaps the transfer
1955 * count?
1956 */
1957 xs->resid = 0; /* mc->mc_resid; */
1958
1959 if (mc->mc_length != 0)
1960 mly_ccb_unmap(mly, mc);
1961
1962 switch (mc->mc_status) {
1963 case SCSI_OK:
1964 /*
1965 * In order to report logical device type and status, we
1966 * overwrite the result of the INQUIRY command to logical
1967 * devices.
1968 */
1969 if (xs->cmd->opcode == INQUIRY) {
1970 chan = xs->xs_periph->periph_channel;
1971 target = xs->xs_periph->periph_target;
1972 btl = &mly->mly_btl[chan->chan_channel][target];
1973
1974 s = splbio();
1975 if ((btl->mb_flags & MLY_BTL_LOGICAL) != 0) {
1976 inq = (struct scsipi_inquiry_data *)xs->data;
1977 mly_padstr(inq->vendor, "MYLEX", 8);
1978 p = mly_describe_code(mly_table_device_type,
1979 btl->mb_type);
1980 mly_padstr(inq->product, p, 16);
1981 p = mly_describe_code(mly_table_device_state,
1982 btl->mb_state);
1983 mly_padstr(inq->revision, p, 4);
1984 }
1985 splx(s);
1986 }
1987
1988 xs->error = XS_NOERROR;
1989 break;
1990
1991 case SCSI_CHECK:
1992 sl = mc->mc_sense;
1993 if (sl > sizeof(xs->sense.scsi_sense))
1994 sl = sizeof(xs->sense.scsi_sense);
1995 memcpy(&xs->sense.scsi_sense, mc->mc_packet, sl);
1996 xs->error = XS_SENSE;
1997 break;
1998
1999 case SCSI_BUSY:
2000 case SCSI_QUEUE_FULL:
2001 xs->error = XS_BUSY;
2002 break;
2003
2004 default:
2005 printf("%s: unknown SCSI status 0x%x\n",
2006 device_xname(mly->mly_dv), xs->status);
2007 xs->error = XS_DRIVER_STUFFUP;
2008 break;
2009 }
2010
2011 mly_ccb_free(mly, mc);
2012 scsipi_done(xs);
2013 }
2014
2015 /*
2016 * Notify scsipi about a target's transfer mode.
2017 */
2018 static void
2019 mly_get_xfer_mode(struct mly_softc *mly, int bus, struct scsipi_xfer_mode *xm)
2020 {
2021 struct mly_btl *btl;
2022 int s;
2023
2024 btl = &mly->mly_btl[bus][xm->xm_target];
2025 xm->xm_mode = 0;
2026
2027 s = splbio();
2028
2029 if ((btl->mb_flags & MLY_BTL_PHYSICAL) != 0) {
2030 if (btl->mb_speed == 0) {
2031 xm->xm_period = 0;
2032 xm->xm_offset = 0;
2033 } else {
2034 xm->xm_period = 12; /* XXX */
2035 xm->xm_offset = 8; /* XXX */
2036 xm->xm_mode |= PERIPH_CAP_SYNC; /* XXX */
2037 }
2038
2039 switch (btl->mb_width) {
2040 case 32:
2041 xm->xm_mode = PERIPH_CAP_WIDE32;
2042 break;
2043 case 16:
2044 xm->xm_mode = PERIPH_CAP_WIDE16;
2045 break;
2046 default:
2047 xm->xm_mode = 0;
2048 break;
2049 }
2050 } else /* ((btl->mb_flags & MLY_BTL_LOGICAL) != 0) */ {
2051 xm->xm_mode = PERIPH_CAP_WIDE16 | PERIPH_CAP_SYNC;
2052 xm->xm_period = 12;
2053 xm->xm_offset = 8;
2054 }
2055
2056 if ((btl->mb_flags & MLY_BTL_TQING) != 0)
2057 xm->xm_mode |= PERIPH_CAP_TQING;
2058
2059 splx(s);
2060
2061 scsipi_async_event(&mly->mly_chans[bus], ASYNC_EVENT_XFER_MODE, xm);
2062 }
2063
2064 /*
2065 * ioctl hook; used here only to initiate low-level rescans.
2066 */
2067 static int
2068 mly_scsipi_ioctl(struct scsipi_channel *chan, u_long cmd, void *data,
2069 int flag, struct proc *p)
2070 {
2071 struct mly_softc *mly;
2072 int rv;
2073
2074 mly = device_private(chan->chan_adapter->adapt_dev);
2075
2076 switch (cmd) {
2077 case SCBUSIOLLSCAN:
2078 mly_scan_channel(mly, chan->chan_channel);
2079 rv = 0;
2080 break;
2081 default:
2082 rv = ENOTTY;
2083 break;
2084 }
2085
2086 return (rv);
2087 }
2088
2089 /*
2090 * Handshake with the firmware while the card is being initialized.
2091 */
2092 static int
2093 mly_fwhandshake(struct mly_softc *mly)
2094 {
2095 u_int8_t error;
2096 int spinup;
2097
2098 spinup = 0;
2099
2100 /* Set HM_STSACK and let the firmware initialize. */
2101 mly_outb(mly, mly->mly_idbr, MLY_HM_STSACK);
2102 DELAY(1000); /* too short? */
2103
2104 /* If HM_STSACK is still true, the controller is initializing. */
2105 if (!mly_idbr_true(mly, MLY_HM_STSACK))
2106 return (0);
2107
2108 printf("%s: controller initialization started\n",
2109 device_xname(mly->mly_dv));
2110
2111 /*
2112 * Spin waiting for initialization to finish, or for a message to be
2113 * delivered.
2114 */
2115 while (mly_idbr_true(mly, MLY_HM_STSACK)) {
2116 /* Check for a message */
2117 if (!mly_error_valid(mly))
2118 continue;
2119
2120 error = mly_inb(mly, mly->mly_error_status) & ~MLY_MSG_EMPTY;
2121 (void)mly_inb(mly, mly->mly_cmd_mailbox);
2122 (void)mly_inb(mly, mly->mly_cmd_mailbox + 1);
2123
2124 switch (error) {
2125 case MLY_MSG_SPINUP:
2126 if (!spinup) {
2127 printf("%s: drive spinup in progress\n",
2128 device_xname(mly->mly_dv));
2129 spinup = 1;
2130 }
2131 break;
2132
2133 case MLY_MSG_RACE_RECOVERY_FAIL:
2134 printf("%s: mirror race recovery failed - \n",
2135 device_xname(mly->mly_dv));
2136 printf("%s: one or more drives offline\n",
2137 device_xname(mly->mly_dv));
2138 break;
2139
2140 case MLY_MSG_RACE_IN_PROGRESS:
2141 printf("%s: mirror race recovery in progress\n",
2142 device_xname(mly->mly_dv));
2143 break;
2144
2145 case MLY_MSG_RACE_ON_CRITICAL:
2146 printf("%s: mirror race recovery on critical drive\n",
2147 device_xname(mly->mly_dv));
2148 break;
2149
2150 case MLY_MSG_PARITY_ERROR:
2151 printf("%s: FATAL MEMORY PARITY ERROR\n",
2152 device_xname(mly->mly_dv));
2153 return (ENXIO);
2154
2155 default:
2156 printf("%s: unknown initialization code 0x%x\n",
2157 device_xname(mly->mly_dv), error);
2158 break;
2159 }
2160 }
2161
2162 return (0);
2163 }
2164
2165 /*
2166 * Space-fill a character string
2167 */
2168 static void
2169 mly_padstr(char *dst, const char *src, int len)
2170 {
2171
2172 while (len-- > 0) {
2173 if (*src != '\0')
2174 *dst++ = *src++;
2175 else
2176 *dst++ = ' ';
2177 }
2178 }
2179
2180 /*
2181 * Allocate DMA safe memory.
2182 */
2183 static int
2184 mly_dmamem_alloc(struct mly_softc *mly, int size, bus_dmamap_t *dmamap,
2185 void **kva, bus_addr_t *paddr, bus_dma_segment_t *seg)
2186 {
2187 int rseg, rv, state;
2188
2189 state = 0;
2190
2191 if ((rv = bus_dmamem_alloc(mly->mly_dmat, size, PAGE_SIZE, 0,
2192 seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
2193 aprint_error_dev(mly->mly_dv, "dmamem_alloc = %d\n", rv);
2194 goto bad;
2195 }
2196
2197 state++;
2198
2199 if ((rv = bus_dmamem_map(mly->mly_dmat, seg, 1, size, kva,
2200 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
2201 aprint_error_dev(mly->mly_dv, "dmamem_map = %d\n", rv);
2202 goto bad;
2203 }
2204
2205 state++;
2206
2207 if ((rv = bus_dmamap_create(mly->mly_dmat, size, size, 1, 0,
2208 BUS_DMA_NOWAIT, dmamap)) != 0) {
2209 aprint_error_dev(mly->mly_dv, "dmamap_create = %d\n", rv);
2210 goto bad;
2211 }
2212
2213 state++;
2214
2215 if ((rv = bus_dmamap_load(mly->mly_dmat, *dmamap, *kva, size,
2216 NULL, BUS_DMA_NOWAIT)) != 0) {
2217 aprint_error_dev(mly->mly_dv, "dmamap_load = %d\n", rv);
2218 goto bad;
2219 }
2220
2221 *paddr = (*dmamap)->dm_segs[0].ds_addr;
2222 memset(*kva, 0, size);
2223 return (0);
2224
2225 bad:
2226 if (state > 2)
2227 bus_dmamap_destroy(mly->mly_dmat, *dmamap);
2228 if (state > 1)
2229 bus_dmamem_unmap(mly->mly_dmat, *kva, size);
2230 if (state > 0)
2231 bus_dmamem_free(mly->mly_dmat, seg, 1);
2232
2233 return (rv);
2234 }
2235
2236 /*
2237 * Free DMA safe memory.
2238 */
2239 static void
2240 mly_dmamem_free(struct mly_softc *mly, int size, bus_dmamap_t dmamap,
2241 void *kva, bus_dma_segment_t *seg)
2242 {
2243
2244 bus_dmamap_unload(mly->mly_dmat, dmamap);
2245 bus_dmamap_destroy(mly->mly_dmat, dmamap);
2246 bus_dmamem_unmap(mly->mly_dmat, kva, size);
2247 bus_dmamem_free(mly->mly_dmat, seg, 1);
2248 }
2249
2250
2251 /*
2252 * Accept an open operation on the control device.
2253 */
2254 int
2255 mlyopen(dev_t dev, int flag, int mode, struct lwp *l)
2256 {
2257 struct mly_softc *mly;
2258
2259 if ((mly = device_lookup_private(&mly_cd, minor(dev))) == NULL)
2260 return (ENXIO);
2261 if ((mly->mly_state & MLY_STATE_INITOK) == 0)
2262 return (ENXIO);
2263 if ((mly->mly_state & MLY_STATE_OPEN) != 0)
2264 return (EBUSY);
2265
2266 mly->mly_state |= MLY_STATE_OPEN;
2267 return (0);
2268 }
2269
2270 /*
2271 * Accept the last close on the control device.
2272 */
2273 int
2274 mlyclose(dev_t dev, int flag, int mode,
2275 struct lwp *l)
2276 {
2277 struct mly_softc *mly;
2278
2279 mly = device_lookup_private(&mly_cd, minor(dev));
2280 mly->mly_state &= ~MLY_STATE_OPEN;
2281 return (0);
2282 }
2283
2284 /*
2285 * Handle control operations.
2286 */
2287 int
2288 mlyioctl(dev_t dev, u_long cmd, void *data, int flag,
2289 struct lwp *l)
2290 {
2291 struct mly_softc *mly;
2292 int rv;
2293
2294 mly = device_lookup_private(&mly_cd, minor(dev));
2295
2296 switch (cmd) {
2297 case MLYIO_COMMAND:
2298 rv = kauth_authorize_device_passthru(l->l_cred, dev,
2299 KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_ALL, data);
2300 if (rv)
2301 break;
2302
2303 rv = mly_user_command(mly, (void *)data);
2304 break;
2305 case MLYIO_HEALTH:
2306 rv = mly_user_health(mly, (void *)data);
2307 break;
2308 default:
2309 rv = ENOTTY;
2310 break;
2311 }
2312
2313 return (rv);
2314 }
2315
2316 /*
2317 * Execute a command passed in from userspace.
2318 *
2319 * The control structure contains the actual command for the controller, as
2320 * well as the user-space data pointer and data size, and an optional sense
2321 * buffer size/pointer. On completion, the data size is adjusted to the
2322 * command residual, and the sense buffer size to the size of the returned
2323 * sense data.
2324 */
2325 static int
2326 mly_user_command(struct mly_softc *mly, struct mly_user_command *uc)
2327 {
2328 struct mly_ccb *mc;
2329 int rv, mapped;
2330
2331 if ((rv = mly_ccb_alloc(mly, &mc)) != 0)
2332 return (rv);
2333
2334 mapped = 0;
2335 mc->mc_data = NULL;
2336
2337 /*
2338 * Handle data size/direction.
2339 */
2340 if ((mc->mc_length = abs(uc->DataTransferLength)) != 0) {
2341 if (mc->mc_length > MAXPHYS) {
2342 rv = EINVAL;
2343 goto out;
2344 }
2345
2346 mc->mc_data = malloc(mc->mc_length, M_DEVBUF, M_WAITOK);
2347 if (mc->mc_data == NULL) {
2348 rv = ENOMEM;
2349 goto out;
2350 }
2351
2352 if (uc->DataTransferLength > 0) {
2353 mc->mc_flags |= MLY_CCB_DATAIN;
2354 memset(mc->mc_data, 0, mc->mc_length);
2355 }
2356
2357 if (uc->DataTransferLength < 0) {
2358 mc->mc_flags |= MLY_CCB_DATAOUT;
2359 rv = copyin(uc->DataTransferBuffer, mc->mc_data,
2360 mc->mc_length);
2361 if (rv != 0)
2362 goto out;
2363 }
2364
2365 if ((rv = mly_ccb_map(mly, mc)) != 0)
2366 goto out;
2367 mapped = 1;
2368 }
2369
2370 /* Copy in the command and execute it. */
2371 memcpy(mc->mc_packet, &uc->CommandMailbox, sizeof(uc->CommandMailbox));
2372
2373 if ((rv = mly_ccb_wait(mly, mc, 60000)) != 0)
2374 goto out;
2375
2376 /* Return the data to userspace. */
2377 if (uc->DataTransferLength > 0) {
2378 rv = copyout(mc->mc_data, uc->DataTransferBuffer,
2379 mc->mc_length);
2380 if (rv != 0)
2381 goto out;
2382 }
2383
2384 /* Return the sense buffer to userspace. */
2385 if (uc->RequestSenseLength > 0 && mc->mc_sense > 0) {
2386 rv = copyout(mc->mc_packet, uc->RequestSenseBuffer,
2387 min(uc->RequestSenseLength, mc->mc_sense));
2388 if (rv != 0)
2389 goto out;
2390 }
2391
2392 /* Return command results to userspace (caller will copy out). */
2393 uc->DataTransferLength = mc->mc_resid;
2394 uc->RequestSenseLength = min(uc->RequestSenseLength, mc->mc_sense);
2395 uc->CommandStatus = mc->mc_status;
2396 rv = 0;
2397
2398 out:
2399 if (mapped)
2400 mly_ccb_unmap(mly, mc);
2401 if (mc->mc_data != NULL)
2402 free(mc->mc_data, M_DEVBUF);
2403 mly_ccb_free(mly, mc);
2404
2405 return (rv);
2406 }
2407
2408 /*
2409 * Return health status to userspace. If the health change index in the
2410 * user structure does not match that currently exported by the controller,
2411 * we return the current status immediately. Otherwise, we block until
2412 * either interrupted or new status is delivered.
2413 */
2414 static int
2415 mly_user_health(struct mly_softc *mly, struct mly_user_health *uh)
2416 {
2417 struct mly_health_status mh;
2418 int rv, s;
2419
2420 /* Fetch the current health status from userspace. */
2421 rv = copyin(uh->HealthStatusBuffer, &mh, sizeof(mh));
2422 if (rv != 0)
2423 return (rv);
2424
2425 /* spin waiting for a status update */
2426 s = splbio();
2427 if (mly->mly_event_change == mh.change_counter)
2428 rv = tsleep(&mly->mly_event_change, PRIBIO | PCATCH,
2429 "mlyhealth", 0);
2430 splx(s);
2431
2432 if (rv == 0) {
2433 /*
2434 * Copy the controller's health status buffer out (there is
2435 * a race here if it changes again).
2436 */
2437 rv = copyout(&mly->mly_mmbox->mmm_health.status,
2438 uh->HealthStatusBuffer, sizeof(uh->HealthStatusBuffer));
2439 }
2440
2441 return (rv);
2442 }
2443