amr.c revision 1.46.16.2 1 /* $NetBSD: amr.c,v 1.46.16.2 2008/06/02 13:23:36 mjf Exp $ */
2
3 /*-
4 * Copyright (c) 2002, 2003 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*-
33 * Copyright (c) 1999,2000 Michael Smith
34 * Copyright (c) 2000 BSDi
35 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 *
46 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
47 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
50 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
51 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
52 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
53 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
54 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
55 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
56 * SUCH DAMAGE.
57 *
58 * from FreeBSD: amr_pci.c,v 1.5 2000/08/30 07:52:40 msmith Exp
59 * from FreeBSD: amr.c,v 1.16 2000/08/30 07:52:40 msmith Exp
60 */
61
62 /*
63 * Driver for AMI RAID controllers.
64 */
65
66 #include <sys/cdefs.h>
67 __KERNEL_RCSID(0, "$NetBSD: amr.c,v 1.46.16.2 2008/06/02 13:23:36 mjf Exp $");
68
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/kernel.h>
72 #include <sys/device.h>
73 #include <sys/queue.h>
74 #include <sys/proc.h>
75 #include <sys/buf.h>
76 #include <sys/malloc.h>
77 #include <sys/conf.h>
78 #include <sys/kthread.h>
79 #include <sys/kauth.h>
80
81 #include <uvm/uvm_extern.h>
82
83 #include <machine/endian.h>
84 #include <sys/bus.h>
85
86 #include <dev/pci/pcidevs.h>
87 #include <dev/pci/pcivar.h>
88 #include <dev/pci/amrreg.h>
89 #include <dev/pci/amrvar.h>
90 #include <dev/pci/amrio.h>
91
92 #include "locators.h"
93
94 static void amr_attach(struct device *, struct device *, void *);
95 static void amr_ccb_dump(struct amr_softc *, struct amr_ccb *);
96 static void *amr_enquire(struct amr_softc *, u_int8_t, u_int8_t, u_int8_t,
97 void *);
98 static int amr_init(struct amr_softc *, const char *,
99 struct pci_attach_args *pa);
100 static int amr_intr(void *);
101 static int amr_match(struct device *, struct cfdata *, void *);
102 static int amr_print(void *, const char *);
103 static void amr_shutdown(void *);
104 static void amr_teardown(struct amr_softc *);
105 static void amr_thread(void *);
106
107 static int amr_quartz_get_work(struct amr_softc *,
108 struct amr_mailbox_resp *);
109 static int amr_quartz_submit(struct amr_softc *, struct amr_ccb *);
110 static int amr_std_get_work(struct amr_softc *, struct amr_mailbox_resp *);
111 static int amr_std_submit(struct amr_softc *, struct amr_ccb *);
112
113 static dev_type_open(amropen);
114 static dev_type_close(amrclose);
115 static dev_type_ioctl(amrioctl);
116
117 CFATTACH_DECL(amr, sizeof(struct amr_softc),
118 amr_match, amr_attach, NULL, NULL);
119
120 const struct cdevsw amr_cdevsw = {
121 amropen, amrclose, noread, nowrite, amrioctl,
122 nostop, notty, nopoll, nommap, nokqfilter, D_OTHER
123 };
124
125 extern struct cfdriver amr_cd;
126
127 #define AT_QUARTZ 0x01 /* `Quartz' chipset */
128 #define AT_SIG 0x02 /* Check for signature */
129
130 static struct amr_pci_type {
131 u_short apt_vendor;
132 u_short apt_product;
133 u_short apt_flags;
134 } const amr_pci_type[] = {
135 { PCI_VENDOR_AMI, PCI_PRODUCT_AMI_MEGARAID, 0 },
136 { PCI_VENDOR_AMI, PCI_PRODUCT_AMI_MEGARAID2, 0 },
137 { PCI_VENDOR_AMI, PCI_PRODUCT_AMI_MEGARAID3, AT_QUARTZ },
138 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_AMI_MEGARAID3, AT_QUARTZ },
139 { PCI_VENDOR_INTEL, PCI_PRODUCT_AMI_MEGARAID3, AT_QUARTZ | AT_SIG },
140 { PCI_VENDOR_INTEL, PCI_PRODUCT_SYMBIOS_MEGARAID_320X, AT_QUARTZ },
141 { PCI_VENDOR_INTEL, PCI_PRODUCT_SYMBIOS_MEGARAID_320E, AT_QUARTZ },
142 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_300X, AT_QUARTZ },
143 { PCI_VENDOR_DELL, PCI_PRODUCT_DELL_PERC_4DI, AT_QUARTZ },
144 { PCI_VENDOR_DELL, PCI_PRODUCT_DELL_PERC_4DI_2, AT_QUARTZ },
145 { PCI_VENDOR_DELL, PCI_PRODUCT_DELL_PERC_4ESI, AT_QUARTZ },
146 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_PERC_4SC, AT_QUARTZ },
147 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_320X, AT_QUARTZ },
148 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_320E, AT_QUARTZ },
149 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_300X, AT_QUARTZ },
150 };
151
152 static struct amr_typestr {
153 const char *at_str;
154 int at_sig;
155 } const amr_typestr[] = {
156 { "Series 431", AMR_SIG_431 },
157 { "Series 438", AMR_SIG_438 },
158 { "Series 466", AMR_SIG_466 },
159 { "Series 467", AMR_SIG_467 },
160 { "Series 490", AMR_SIG_490 },
161 { "Series 762", AMR_SIG_762 },
162 { "HP NetRAID (T5)", AMR_SIG_T5 },
163 { "HP NetRAID (T7)", AMR_SIG_T7 },
164 };
165
166 static struct {
167 const char *ds_descr;
168 int ds_happy;
169 } const amr_dstate[] = {
170 { "offline", 0 },
171 { "degraded", 1 },
172 { "optimal", 1 },
173 { "online", 1 },
174 { "failed", 0 },
175 { "rebuilding", 1 },
176 { "hotspare", 0 },
177 };
178
179 static void *amr_sdh;
180
181 static int amr_max_segs;
182 int amr_max_xfer;
183
184 static inline u_int8_t
185 amr_inb(struct amr_softc *amr, int off)
186 {
187
188 bus_space_barrier(amr->amr_iot, amr->amr_ioh, off, 1,
189 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
190 return (bus_space_read_1(amr->amr_iot, amr->amr_ioh, off));
191 }
192
193 static inline u_int32_t
194 amr_inl(struct amr_softc *amr, int off)
195 {
196
197 bus_space_barrier(amr->amr_iot, amr->amr_ioh, off, 4,
198 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
199 return (bus_space_read_4(amr->amr_iot, amr->amr_ioh, off));
200 }
201
202 static inline void
203 amr_outb(struct amr_softc *amr, int off, u_int8_t val)
204 {
205
206 bus_space_write_1(amr->amr_iot, amr->amr_ioh, off, val);
207 bus_space_barrier(amr->amr_iot, amr->amr_ioh, off, 1,
208 BUS_SPACE_BARRIER_WRITE);
209 }
210
211 static inline void
212 amr_outl(struct amr_softc *amr, int off, u_int32_t val)
213 {
214
215 bus_space_write_4(amr->amr_iot, amr->amr_ioh, off, val);
216 bus_space_barrier(amr->amr_iot, amr->amr_ioh, off, 4,
217 BUS_SPACE_BARRIER_WRITE);
218 }
219
220 /*
221 * Match a supported device.
222 */
223 static int
224 amr_match(struct device *parent, struct cfdata *match,
225 void *aux)
226 {
227 struct pci_attach_args *pa;
228 pcireg_t s;
229 int i;
230
231 pa = (struct pci_attach_args *)aux;
232
233 /*
234 * Don't match the device if it's operating in I2O mode. In this
235 * case it should be handled by the `iop' driver.
236 */
237 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_I2O)
238 return (0);
239
240 for (i = 0; i < sizeof(amr_pci_type) / sizeof(amr_pci_type[0]); i++)
241 if (PCI_VENDOR(pa->pa_id) == amr_pci_type[i].apt_vendor &&
242 PCI_PRODUCT(pa->pa_id) == amr_pci_type[i].apt_product)
243 break;
244
245 if (i == sizeof(amr_pci_type) / sizeof(amr_pci_type[0]))
246 return (0);
247
248 if ((amr_pci_type[i].apt_flags & AT_SIG) == 0)
249 return (1);
250
251 s = pci_conf_read(pa->pa_pc, pa->pa_tag, AMR_QUARTZ_SIG_REG) & 0xffff;
252 return (s == AMR_QUARTZ_SIG0 || s == AMR_QUARTZ_SIG1);
253 }
254
255 /*
256 * Attach a supported device.
257 */
258 static void
259 amr_attach(struct device *parent, struct device *self, void *aux)
260 {
261 struct pci_attach_args *pa;
262 struct amr_attach_args amra;
263 const struct amr_pci_type *apt;
264 struct amr_softc *amr;
265 pci_chipset_tag_t pc;
266 pci_intr_handle_t ih;
267 const char *intrstr;
268 pcireg_t reg;
269 int rseg, i, j, size, rv, memreg, ioreg;
270 struct amr_ccb *ac;
271 int locs[AMRCF_NLOCS];
272 int unit, major;
273
274 aprint_naive(": RAID controller\n");
275
276 amr = (struct amr_softc *)self;
277 pa = (struct pci_attach_args *)aux;
278 pc = pa->pa_pc;
279
280 for (i = 0; i < sizeof(amr_pci_type) / sizeof(amr_pci_type[0]); i++)
281 if (PCI_VENDOR(pa->pa_id) == amr_pci_type[i].apt_vendor &&
282 PCI_PRODUCT(pa->pa_id) == amr_pci_type[i].apt_product)
283 break;
284 apt = amr_pci_type + i;
285
286 memreg = ioreg = 0;
287 for (i = 0x10; i <= 0x14; i += 4) {
288 reg = pci_conf_read(pc, pa->pa_tag, i);
289 switch (PCI_MAPREG_TYPE(reg)) {
290 case PCI_MAPREG_TYPE_MEM:
291 if (PCI_MAPREG_MEM_SIZE(reg) != 0)
292 memreg = i;
293 break;
294 case PCI_MAPREG_TYPE_IO:
295 if (PCI_MAPREG_IO_SIZE(reg) != 0)
296 ioreg = i;
297 break;
298
299 }
300 }
301
302 if (memreg && pci_mapreg_map(pa, memreg, PCI_MAPREG_TYPE_MEM, 0,
303 &amr->amr_iot, &amr->amr_ioh, NULL, &amr->amr_ios) == 0)
304 ;
305 else if (ioreg && pci_mapreg_map(pa, ioreg, PCI_MAPREG_TYPE_IO, 0,
306 &amr->amr_iot, &amr->amr_ioh, NULL, &amr->amr_ios) == 0)
307 ;
308 else {
309 aprint_error("can't map control registers\n");
310 amr_teardown(amr);
311 return;
312 }
313
314 amr->amr_flags |= AMRF_PCI_REGS;
315 amr->amr_dmat = pa->pa_dmat;
316 amr->amr_pc = pa->pa_pc;
317
318 /* Enable the device. */
319 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
320 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
321 reg | PCI_COMMAND_MASTER_ENABLE);
322
323 /* Map and establish the interrupt. */
324 if (pci_intr_map(pa, &ih)) {
325 aprint_error("can't map interrupt\n");
326 amr_teardown(amr);
327 return;
328 }
329 intrstr = pci_intr_string(pc, ih);
330 amr->amr_ih = pci_intr_establish(pc, ih, IPL_BIO, amr_intr, amr);
331 if (amr->amr_ih == NULL) {
332 aprint_error("can't establish interrupt");
333 if (intrstr != NULL)
334 aprint_normal(" at %s", intrstr);
335 aprint_normal("\n");
336 amr_teardown(amr);
337 return;
338 }
339 amr->amr_flags |= AMRF_PCI_INTR;
340
341 /*
342 * Allocate space for the mailbox and S/G lists. Some controllers
343 * don't like S/G lists to be located below 0x2000, so we allocate
344 * enough slop to enable us to compensate.
345 *
346 * The standard mailbox structure needs to be aligned on a 16-byte
347 * boundary. The 64-bit mailbox has one extra field, 4 bytes in
348 * size, which precedes the standard mailbox.
349 */
350 size = AMR_SGL_SIZE * AMR_MAX_CMDS + 0x2000;
351 amr->amr_dmasize = size;
352
353 if ((rv = bus_dmamem_alloc(amr->amr_dmat, size, PAGE_SIZE, 0,
354 &amr->amr_dmaseg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
355 aprint_error_dev(&amr->amr_dv, "unable to allocate buffer, rv = %d\n",
356 rv);
357 amr_teardown(amr);
358 return;
359 }
360 amr->amr_flags |= AMRF_DMA_ALLOC;
361
362 if ((rv = bus_dmamem_map(amr->amr_dmat, &amr->amr_dmaseg, rseg, size,
363 (void **)&amr->amr_mbox,
364 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
365 aprint_error_dev(&amr->amr_dv, "unable to map buffer, rv = %d\n",
366 rv);
367 amr_teardown(amr);
368 return;
369 }
370 amr->amr_flags |= AMRF_DMA_MAP;
371
372 if ((rv = bus_dmamap_create(amr->amr_dmat, size, 1, size, 0,
373 BUS_DMA_NOWAIT, &amr->amr_dmamap)) != 0) {
374 aprint_error_dev(&amr->amr_dv, "unable to create buffer DMA map, rv = %d\n",
375 rv);
376 amr_teardown(amr);
377 return;
378 }
379 amr->amr_flags |= AMRF_DMA_CREATE;
380
381 if ((rv = bus_dmamap_load(amr->amr_dmat, amr->amr_dmamap,
382 amr->amr_mbox, size, NULL, BUS_DMA_NOWAIT)) != 0) {
383 aprint_error_dev(&amr->amr_dv, "unable to load buffer DMA map, rv = %d\n",
384 rv);
385 amr_teardown(amr);
386 return;
387 }
388 amr->amr_flags |= AMRF_DMA_LOAD;
389
390 memset(amr->amr_mbox, 0, size);
391
392 amr->amr_mbox_paddr = amr->amr_dmamap->dm_segs[0].ds_addr;
393 amr->amr_sgls_paddr = (amr->amr_mbox_paddr + 0x1fff) & ~0x1fff;
394 amr->amr_sgls = (struct amr_sgentry *)((char *)amr->amr_mbox +
395 amr->amr_sgls_paddr - amr->amr_dmamap->dm_segs[0].ds_addr);
396
397 /*
398 * Allocate and initalise the command control blocks.
399 */
400 ac = malloc(sizeof(*ac) * AMR_MAX_CMDS, M_DEVBUF, M_NOWAIT | M_ZERO);
401 amr->amr_ccbs = ac;
402 SLIST_INIT(&amr->amr_ccb_freelist);
403 TAILQ_INIT(&amr->amr_ccb_active);
404 amr->amr_flags |= AMRF_CCBS;
405
406 if (amr_max_xfer == 0) {
407 amr_max_xfer = min(((AMR_MAX_SEGS - 1) * PAGE_SIZE), MAXPHYS);
408 amr_max_segs = (amr_max_xfer + (PAGE_SIZE * 2) - 1) / PAGE_SIZE;
409 }
410
411 for (i = 0; i < AMR_MAX_CMDS; i++, ac++) {
412 rv = bus_dmamap_create(amr->amr_dmat, amr_max_xfer,
413 amr_max_segs, amr_max_xfer, 0,
414 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ac->ac_xfer_map);
415 if (rv != 0)
416 break;
417
418 ac->ac_ident = i;
419 amr_ccb_free(amr, ac);
420 }
421 if (i != AMR_MAX_CMDS) {
422 aprint_error_dev(&amr->amr_dv, "memory exhausted\n");
423 amr_teardown(amr);
424 return;
425 }
426
427 /*
428 * Take care of model-specific tasks.
429 */
430 if ((apt->apt_flags & AT_QUARTZ) != 0) {
431 amr->amr_submit = amr_quartz_submit;
432 amr->amr_get_work = amr_quartz_get_work;
433 } else {
434 amr->amr_submit = amr_std_submit;
435 amr->amr_get_work = amr_std_get_work;
436
437 /* Notify the controller of the mailbox location. */
438 amr_outl(amr, AMR_SREG_MBOX, (u_int32_t)amr->amr_mbox_paddr + 16);
439 amr_outb(amr, AMR_SREG_MBOX_ENABLE, AMR_SMBOX_ENABLE_ADDR);
440
441 /* Clear outstanding interrupts and enable interrupts. */
442 amr_outb(amr, AMR_SREG_CMD, AMR_SCMD_ACKINTR);
443 amr_outb(amr, AMR_SREG_TOGL,
444 amr_inb(amr, AMR_SREG_TOGL) | AMR_STOGL_ENABLE);
445 }
446
447 /*
448 * Retrieve parameters, and tell the world about us.
449 */
450 amr->amr_enqbuf = malloc(AMR_ENQUIRY_BUFSIZE, M_DEVBUF, M_NOWAIT);
451 amr->amr_flags |= AMRF_ENQBUF;
452 amr->amr_maxqueuecnt = i;
453 aprint_normal(": AMI RAID ");
454 if (amr_init(amr, intrstr, pa) != 0) {
455 amr_teardown(amr);
456 return;
457 }
458
459 /*
460 * Cap the maximum number of outstanding commands. AMI's Linux
461 * driver doesn't trust the controller's reported value, and lockups
462 * have been seen when we do.
463 */
464 amr->amr_maxqueuecnt = min(amr->amr_maxqueuecnt, AMR_MAX_CMDS);
465 if (amr->amr_maxqueuecnt > i)
466 amr->amr_maxqueuecnt = i;
467
468 /* Set our `shutdownhook' before we start any device activity. */
469 if (amr_sdh == NULL)
470 amr_sdh = shutdownhook_establish(amr_shutdown, NULL);
471
472 /* Attach sub-devices. */
473 for (j = 0; j < amr->amr_numdrives; j++) {
474 if (amr->amr_drive[j].al_size == 0)
475 continue;
476 amra.amra_unit = j;
477
478 locs[AMRCF_UNIT] = j;
479
480 amr->amr_drive[j].al_dv = config_found_sm_loc(&amr->amr_dv,
481 "amr", locs, &amra, amr_print, config_stdsubmatch);
482 }
483
484 SIMPLEQ_INIT(&amr->amr_ccb_queue);
485
486 /* XXX This doesn't work for newer boards yet. */
487 if ((apt->apt_flags & AT_QUARTZ) == 0) {
488 rv = kthread_create(PRI_NONE, 0, NULL, amr_thread, amr,
489 &amr->amr_thread, "%s", device_xname(&amr->amr_dv));
490 if (rv != 0)
491 aprint_error_dev(&amr->amr_dv, "unable to create thread (%d)",
492 rv);
493 else
494 amr->amr_flags |= AMRF_THREAD;
495 }
496
497 major = cdevsw_lookup_major(&amr_cdevsw);
498 unit = device_unit(self);
499 device_register_name(makedev(major, unit), self, true,
500 DEV_DISK, device_xname(self));
501 }
502
503 /*
504 * Free up resources.
505 */
506 static void
507 amr_teardown(struct amr_softc *amr)
508 {
509 struct amr_ccb *ac;
510 int fl;
511
512 fl = amr->amr_flags;
513
514 if ((fl & AMRF_THREAD) != 0) {
515 amr->amr_flags |= AMRF_THREAD_EXIT;
516 wakeup(amr_thread);
517 while ((amr->amr_flags & AMRF_THREAD_EXIT) != 0)
518 tsleep(&amr->amr_flags, PWAIT, "amrexit", 0);
519 }
520 if ((fl & AMRF_CCBS) != 0) {
521 SLIST_FOREACH(ac, &amr->amr_ccb_freelist, ac_chain.slist) {
522 bus_dmamap_destroy(amr->amr_dmat, ac->ac_xfer_map);
523 }
524 free(amr->amr_ccbs, M_DEVBUF);
525 }
526 if ((fl & AMRF_ENQBUF) != 0)
527 free(amr->amr_enqbuf, M_DEVBUF);
528 if ((fl & AMRF_DMA_LOAD) != 0)
529 bus_dmamap_unload(amr->amr_dmat, amr->amr_dmamap);
530 if ((fl & AMRF_DMA_MAP) != 0)
531 bus_dmamem_unmap(amr->amr_dmat, (void *)amr->amr_mbox,
532 amr->amr_dmasize);
533 if ((fl & AMRF_DMA_ALLOC) != 0)
534 bus_dmamem_free(amr->amr_dmat, &amr->amr_dmaseg, 1);
535 if ((fl & AMRF_DMA_CREATE) != 0)
536 bus_dmamap_destroy(amr->amr_dmat, amr->amr_dmamap);
537 if ((fl & AMRF_PCI_INTR) != 0)
538 pci_intr_disestablish(amr->amr_pc, amr->amr_ih);
539 if ((fl & AMRF_PCI_REGS) != 0)
540 bus_space_unmap(amr->amr_iot, amr->amr_ioh, amr->amr_ios);
541 }
542
543 /*
544 * Print autoconfiguration message for a sub-device.
545 */
546 static int
547 amr_print(void *aux, const char *pnp)
548 {
549 struct amr_attach_args *amra;
550
551 amra = (struct amr_attach_args *)aux;
552
553 if (pnp != NULL)
554 aprint_normal("block device at %s", pnp);
555 aprint_normal(" unit %d", amra->amra_unit);
556 return (UNCONF);
557 }
558
559 /*
560 * Retrieve operational parameters and describe the controller.
561 */
562 static int
563 amr_init(struct amr_softc *amr, const char *intrstr,
564 struct pci_attach_args *pa)
565 {
566 struct amr_adapter_info *aa;
567 struct amr_prodinfo *ap;
568 struct amr_enquiry *ae;
569 struct amr_enquiry3 *aex;
570 const char *prodstr;
571 u_int i, sig, ishp;
572 char sbuf[64];
573
574 /*
575 * Try to get 40LD product info, which tells us what the card is
576 * labelled as.
577 */
578 ap = amr_enquire(amr, AMR_CMD_CONFIG, AMR_CONFIG_PRODUCT_INFO, 0,
579 amr->amr_enqbuf);
580 if (ap != NULL) {
581 aprint_normal("<%.80s>\n", ap->ap_product);
582 if (intrstr != NULL)
583 aprint_normal_dev(&amr->amr_dv, "interrupting at %s\n",
584 intrstr);
585 aprint_normal_dev(&amr->amr_dv, "firmware %.16s, BIOS %.16s, %dMB RAM\n",
586 ap->ap_firmware, ap->ap_bios,
587 le16toh(ap->ap_memsize));
588
589 amr->amr_maxqueuecnt = ap->ap_maxio;
590
591 /*
592 * Fetch and record state of logical drives.
593 */
594 aex = amr_enquire(amr, AMR_CMD_CONFIG, AMR_CONFIG_ENQ3,
595 AMR_CONFIG_ENQ3_SOLICITED_FULL, amr->amr_enqbuf);
596 if (aex == NULL) {
597 aprint_error_dev(&amr->amr_dv, "ENQUIRY3 failed\n");
598 return (-1);
599 }
600
601 if (aex->ae_numldrives > __arraycount(aex->ae_drivestate)) {
602 aprint_error_dev(&amr->amr_dv, "Inquiry returned more drives (%d)"
603 " than the array can handle (%zu)\n",
604 aex->ae_numldrives,
605 __arraycount(aex->ae_drivestate));
606 aex->ae_numldrives = __arraycount(aex->ae_drivestate);
607 }
608 if (aex->ae_numldrives > AMR_MAX_UNITS) {
609 aprint_error_dev(&amr->amr_dv,
610 "adjust AMR_MAX_UNITS to %d (currently %d)"
611 "\n", AMR_MAX_UNITS,
612 amr->amr_numdrives);
613 amr->amr_numdrives = AMR_MAX_UNITS;
614 } else
615 amr->amr_numdrives = aex->ae_numldrives;
616
617 for (i = 0; i < amr->amr_numdrives; i++) {
618 amr->amr_drive[i].al_size =
619 le32toh(aex->ae_drivesize[i]);
620 amr->amr_drive[i].al_state = aex->ae_drivestate[i];
621 amr->amr_drive[i].al_properties = aex->ae_driveprop[i];
622 }
623
624 return (0);
625 }
626
627 /*
628 * Try 8LD extended ENQUIRY to get the controller signature. Once
629 * found, search for a product description.
630 */
631 ae = amr_enquire(amr, AMR_CMD_EXT_ENQUIRY2, 0, 0, amr->amr_enqbuf);
632 if (ae != NULL) {
633 i = 0;
634 sig = le32toh(ae->ae_signature);
635
636 while (i < sizeof(amr_typestr) / sizeof(amr_typestr[0])) {
637 if (amr_typestr[i].at_sig == sig)
638 break;
639 i++;
640 }
641 if (i == sizeof(amr_typestr) / sizeof(amr_typestr[0])) {
642 snprintf(sbuf, sizeof(sbuf),
643 "unknown ENQUIRY2 sig (0x%08x)", sig);
644 prodstr = sbuf;
645 } else
646 prodstr = amr_typestr[i].at_str;
647 } else {
648 ae = amr_enquire(amr, AMR_CMD_ENQUIRY, 0, 0, amr->amr_enqbuf);
649 if (ae == NULL) {
650 aprint_error_dev(&amr->amr_dv, "unsupported controller\n");
651 return (-1);
652 }
653
654 switch (PCI_PRODUCT(pa->pa_id)) {
655 case PCI_PRODUCT_AMI_MEGARAID:
656 prodstr = "Series 428";
657 break;
658 case PCI_PRODUCT_AMI_MEGARAID2:
659 prodstr = "Series 434";
660 break;
661 default:
662 snprintf(sbuf, sizeof(sbuf), "unknown PCI dev (0x%04x)",
663 PCI_PRODUCT(pa->pa_id));
664 prodstr = sbuf;
665 break;
666 }
667 }
668
669 /*
670 * HP NetRaid controllers have a special encoding of the firmware
671 * and BIOS versions. The AMI version seems to have it as strings
672 * whereas the HP version does it with a leading uppercase character
673 * and two binary numbers.
674 */
675 aa = &ae->ae_adapter;
676
677 if (aa->aa_firmware[2] >= 'A' && aa->aa_firmware[2] <= 'Z' &&
678 aa->aa_firmware[1] < ' ' && aa->aa_firmware[0] < ' ' &&
679 aa->aa_bios[2] >= 'A' && aa->aa_bios[2] <= 'Z' &&
680 aa->aa_bios[1] < ' ' && aa->aa_bios[0] < ' ') {
681 if (le32toh(ae->ae_signature) == AMR_SIG_438) {
682 /* The AMI 438 is a NetRaid 3si in HP-land. */
683 prodstr = "HP NetRaid 3si";
684 }
685 ishp = 1;
686 } else
687 ishp = 0;
688
689 aprint_normal("<%s>\n", prodstr);
690 if (intrstr != NULL)
691 aprint_normal_dev(&amr->amr_dv, "interrupting at %s\n",
692 intrstr);
693
694 if (ishp)
695 aprint_normal_dev(&amr->amr_dv, "firmware <%c.%02d.%02d>, BIOS <%c.%02d.%02d>"
696 ", %dMB RAM\n", aa->aa_firmware[2],
697 aa->aa_firmware[1], aa->aa_firmware[0], aa->aa_bios[2],
698 aa->aa_bios[1], aa->aa_bios[0], aa->aa_memorysize);
699 else
700 aprint_normal_dev(&amr->amr_dv, "firmware <%.4s>, BIOS <%.4s>, %dMB RAM\n",
701 aa->aa_firmware, aa->aa_bios,
702 aa->aa_memorysize);
703
704 amr->amr_maxqueuecnt = aa->aa_maxio;
705
706 /*
707 * Record state of logical drives.
708 */
709 if (ae->ae_ldrv.al_numdrives > __arraycount(ae->ae_ldrv.al_size)) {
710 aprint_error_dev(&amr->amr_dv, "Inquiry returned more drives (%d)"
711 " than the array can handle (%zu)\n",
712 ae->ae_ldrv.al_numdrives,
713 __arraycount(ae->ae_ldrv.al_size));
714 ae->ae_ldrv.al_numdrives = __arraycount(ae->ae_ldrv.al_size);
715 }
716 if (ae->ae_ldrv.al_numdrives > AMR_MAX_UNITS) {
717 aprint_error_dev(&amr->amr_dv, "adjust AMR_MAX_UNITS to %d (currently %d)\n",
718 ae->ae_ldrv.al_numdrives,
719 AMR_MAX_UNITS);
720 amr->amr_numdrives = AMR_MAX_UNITS;
721 } else
722 amr->amr_numdrives = ae->ae_ldrv.al_numdrives;
723
724 for (i = 0; i < amr->amr_numdrives; i++) {
725 amr->amr_drive[i].al_size = le32toh(ae->ae_ldrv.al_size[i]);
726 amr->amr_drive[i].al_state = ae->ae_ldrv.al_state[i];
727 amr->amr_drive[i].al_properties = ae->ae_ldrv.al_properties[i];
728 }
729
730 return (0);
731 }
732
733 /*
734 * Flush the internal cache on each configured controller. Called at
735 * shutdown time.
736 */
737 static void
738 amr_shutdown(void *cookie)
739 {
740 extern struct cfdriver amr_cd;
741 struct amr_softc *amr;
742 struct amr_ccb *ac;
743 int i, rv, s;
744
745 for (i = 0; i < amr_cd.cd_ndevs; i++) {
746 if ((amr = device_lookup(&amr_cd, i)) == NULL)
747 continue;
748
749 if ((rv = amr_ccb_alloc(amr, &ac)) == 0) {
750 ac->ac_cmd.mb_command = AMR_CMD_FLUSH;
751 s = splbio();
752 rv = amr_ccb_poll(amr, ac, 30000);
753 splx(s);
754 amr_ccb_free(amr, ac);
755 }
756 if (rv != 0)
757 aprint_error_dev(&amr->amr_dv, "unable to flush cache (%d)\n", rv);
758 }
759 }
760
761 /*
762 * Interrupt service routine.
763 */
764 static int
765 amr_intr(void *cookie)
766 {
767 struct amr_softc *amr;
768 struct amr_ccb *ac;
769 struct amr_mailbox_resp mbox;
770 u_int i, forus, idx;
771
772 amr = cookie;
773 forus = 0;
774
775 while ((*amr->amr_get_work)(amr, &mbox) == 0) {
776 /* Iterate over completed commands in this result. */
777 for (i = 0; i < mbox.mb_nstatus; i++) {
778 idx = mbox.mb_completed[i] - 1;
779 ac = amr->amr_ccbs + idx;
780
781 if (idx >= amr->amr_maxqueuecnt) {
782 printf("%s: bad status (bogus ID: %u=%u)\n",
783 device_xname(&amr->amr_dv), i, idx);
784 continue;
785 }
786
787 if ((ac->ac_flags & AC_ACTIVE) == 0) {
788 printf("%s: bad status (not active; 0x04%x)\n",
789 device_xname(&amr->amr_dv), ac->ac_flags);
790 continue;
791 }
792
793 ac->ac_status = mbox.mb_status;
794 ac->ac_flags = (ac->ac_flags & ~AC_ACTIVE) |
795 AC_COMPLETE;
796 TAILQ_REMOVE(&amr->amr_ccb_active, ac, ac_chain.tailq);
797
798 if ((ac->ac_flags & AC_MOAN) != 0)
799 printf("%s: ccb %d completed\n",
800 device_xname(&amr->amr_dv), ac->ac_ident);
801
802 /* Pass notification to upper layers. */
803 if (ac->ac_handler != NULL)
804 (*ac->ac_handler)(ac);
805 else
806 wakeup(ac);
807 }
808 forus = 1;
809 }
810
811 if (forus)
812 amr_ccb_enqueue(amr, NULL);
813
814 return (forus);
815 }
816
817 /*
818 * Watchdog thread.
819 */
820 static void
821 amr_thread(void *cookie)
822 {
823 struct amr_softc *amr;
824 struct amr_ccb *ac;
825 struct amr_logdrive *al;
826 struct amr_enquiry *ae;
827 int rv, i, s;
828
829 amr = cookie;
830 ae = amr->amr_enqbuf;
831
832 for (;;) {
833 tsleep(amr_thread, PWAIT, "amrwdog", AMR_WDOG_TICKS);
834
835 if ((amr->amr_flags & AMRF_THREAD_EXIT) != 0) {
836 amr->amr_flags ^= AMRF_THREAD_EXIT;
837 wakeup(&amr->amr_flags);
838 kthread_exit(0);
839 }
840
841 s = splbio();
842 amr_intr(cookie);
843 ac = TAILQ_FIRST(&amr->amr_ccb_active);
844 while (ac != NULL) {
845 if (ac->ac_start_time + AMR_TIMEOUT > time_uptime)
846 break;
847 if ((ac->ac_flags & AC_MOAN) == 0) {
848 printf("%s: ccb %d timed out; mailbox:\n",
849 device_xname(&amr->amr_dv), ac->ac_ident);
850 amr_ccb_dump(amr, ac);
851 ac->ac_flags |= AC_MOAN;
852 }
853 ac = TAILQ_NEXT(ac, ac_chain.tailq);
854 }
855 splx(s);
856
857 if ((rv = amr_ccb_alloc(amr, &ac)) != 0) {
858 printf("%s: ccb_alloc failed (%d)\n",
859 device_xname(&amr->amr_dv), rv);
860 continue;
861 }
862
863 ac->ac_cmd.mb_command = AMR_CMD_ENQUIRY;
864
865 rv = amr_ccb_map(amr, ac, amr->amr_enqbuf,
866 AMR_ENQUIRY_BUFSIZE, AC_XFER_IN);
867 if (rv != 0) {
868 aprint_error_dev(&amr->amr_dv, "ccb_map failed (%d)\n",
869 rv);
870 amr_ccb_free(amr, ac);
871 continue;
872 }
873
874 rv = amr_ccb_wait(amr, ac);
875 amr_ccb_unmap(amr, ac);
876 if (rv != 0) {
877 aprint_error_dev(&amr->amr_dv, "enquiry failed (st=%d)\n",
878 ac->ac_status);
879 continue;
880 }
881 amr_ccb_free(amr, ac);
882
883 al = amr->amr_drive;
884 for (i = 0; i < __arraycount(ae->ae_ldrv.al_state); i++, al++) {
885 if (al->al_dv == NULL)
886 continue;
887 if (al->al_state == ae->ae_ldrv.al_state[i])
888 continue;
889
890 printf("%s: state changed: %s -> %s\n",
891 device_xname(al->al_dv),
892 amr_drive_state(al->al_state, NULL),
893 amr_drive_state(ae->ae_ldrv.al_state[i], NULL));
894
895 al->al_state = ae->ae_ldrv.al_state[i];
896 }
897 }
898 }
899
900 /*
901 * Return a text description of a logical drive's current state.
902 */
903 const char *
904 amr_drive_state(int state, int *happy)
905 {
906 const char *str;
907
908 state = AMR_DRV_CURSTATE(state);
909 if (state >= sizeof(amr_dstate) / sizeof(amr_dstate[0])) {
910 if (happy)
911 *happy = 1;
912 str = "status unknown";
913 } else {
914 if (happy)
915 *happy = amr_dstate[state].ds_happy;
916 str = amr_dstate[state].ds_descr;
917 }
918
919 return (str);
920 }
921
922 /*
923 * Run a generic enquiry-style command.
924 */
925 static void *
926 amr_enquire(struct amr_softc *amr, u_int8_t cmd, u_int8_t cmdsub,
927 u_int8_t cmdqual, void *sbuf)
928 {
929 struct amr_ccb *ac;
930 u_int8_t *mb;
931 int rv;
932
933 if (amr_ccb_alloc(amr, &ac) != 0)
934 return (NULL);
935
936 /* Build the command proper. */
937 mb = (u_int8_t *)&ac->ac_cmd;
938 mb[0] = cmd;
939 mb[2] = cmdsub;
940 mb[3] = cmdqual;
941
942 rv = amr_ccb_map(amr, ac, sbuf, AMR_ENQUIRY_BUFSIZE, AC_XFER_IN);
943 if (rv == 0) {
944 rv = amr_ccb_poll(amr, ac, 2000);
945 amr_ccb_unmap(amr, ac);
946 }
947 amr_ccb_free(amr, ac);
948
949 return (rv ? NULL : sbuf);
950 }
951
952 /*
953 * Allocate and initialise a CCB.
954 */
955 int
956 amr_ccb_alloc(struct amr_softc *amr, struct amr_ccb **acp)
957 {
958 int s;
959
960 s = splbio();
961 if ((*acp = SLIST_FIRST(&amr->amr_ccb_freelist)) == NULL) {
962 splx(s);
963 return (EAGAIN);
964 }
965 SLIST_REMOVE_HEAD(&amr->amr_ccb_freelist, ac_chain.slist);
966 splx(s);
967
968 return (0);
969 }
970
971 /*
972 * Free a CCB.
973 */
974 void
975 amr_ccb_free(struct amr_softc *amr, struct amr_ccb *ac)
976 {
977 int s;
978
979 memset(&ac->ac_cmd, 0, sizeof(ac->ac_cmd));
980 ac->ac_cmd.mb_ident = ac->ac_ident + 1;
981 ac->ac_cmd.mb_busy = 1;
982 ac->ac_handler = NULL;
983 ac->ac_flags = 0;
984
985 s = splbio();
986 SLIST_INSERT_HEAD(&amr->amr_ccb_freelist, ac, ac_chain.slist);
987 splx(s);
988 }
989
990 /*
991 * If a CCB is specified, enqueue it. Pull CCBs off the software queue in
992 * the order that they were enqueued and try to submit their command blocks
993 * to the controller for execution.
994 */
995 void
996 amr_ccb_enqueue(struct amr_softc *amr, struct amr_ccb *ac)
997 {
998 int s;
999
1000 s = splbio();
1001
1002 if (ac != NULL)
1003 SIMPLEQ_INSERT_TAIL(&amr->amr_ccb_queue, ac, ac_chain.simpleq);
1004
1005 while ((ac = SIMPLEQ_FIRST(&amr->amr_ccb_queue)) != NULL) {
1006 if ((*amr->amr_submit)(amr, ac) != 0)
1007 break;
1008 SIMPLEQ_REMOVE_HEAD(&amr->amr_ccb_queue, ac_chain.simpleq);
1009 TAILQ_INSERT_TAIL(&amr->amr_ccb_active, ac, ac_chain.tailq);
1010 }
1011
1012 splx(s);
1013 }
1014
1015 /*
1016 * Map the specified CCB's data buffer onto the bus, and fill the
1017 * scatter-gather list.
1018 */
1019 int
1020 amr_ccb_map(struct amr_softc *amr, struct amr_ccb *ac, void *data, int size,
1021 int tflag)
1022 {
1023 struct amr_sgentry *sge;
1024 struct amr_mailbox_cmd *mb;
1025 int nsegs, i, rv, sgloff;
1026 bus_dmamap_t xfer;
1027 int dmaflag = 0;
1028
1029 xfer = ac->ac_xfer_map;
1030
1031 rv = bus_dmamap_load(amr->amr_dmat, xfer, data, size, NULL,
1032 BUS_DMA_NOWAIT);
1033 if (rv != 0)
1034 return (rv);
1035
1036 mb = &ac->ac_cmd;
1037 ac->ac_xfer_size = size;
1038 ac->ac_flags |= (tflag & (AC_XFER_OUT | AC_XFER_IN));
1039 sgloff = AMR_SGL_SIZE * ac->ac_ident;
1040
1041 if (tflag & AC_XFER_OUT)
1042 dmaflag |= BUS_DMASYNC_PREWRITE;
1043 if (tflag & AC_XFER_IN)
1044 dmaflag |= BUS_DMASYNC_PREREAD;
1045
1046 /* We don't need to use a scatter/gather list for just 1 segment. */
1047 nsegs = xfer->dm_nsegs;
1048 if (nsegs == 1) {
1049 mb->mb_nsgelem = 0;
1050 mb->mb_physaddr = htole32(xfer->dm_segs[0].ds_addr);
1051 ac->ac_flags |= AC_NOSGL;
1052 } else {
1053 mb->mb_nsgelem = nsegs;
1054 mb->mb_physaddr = htole32(amr->amr_sgls_paddr + sgloff);
1055
1056 sge = (struct amr_sgentry *)((char *)amr->amr_sgls + sgloff);
1057 for (i = 0; i < nsegs; i++, sge++) {
1058 sge->sge_addr = htole32(xfer->dm_segs[i].ds_addr);
1059 sge->sge_count = htole32(xfer->dm_segs[i].ds_len);
1060 }
1061 }
1062
1063 bus_dmamap_sync(amr->amr_dmat, xfer, 0, ac->ac_xfer_size, dmaflag);
1064
1065 if ((ac->ac_flags & AC_NOSGL) == 0)
1066 bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, sgloff,
1067 AMR_SGL_SIZE, BUS_DMASYNC_PREWRITE);
1068
1069 return (0);
1070 }
1071
1072 /*
1073 * Unmap the specified CCB's data buffer.
1074 */
1075 void
1076 amr_ccb_unmap(struct amr_softc *amr, struct amr_ccb *ac)
1077 {
1078 int dmaflag = 0;
1079
1080 if (ac->ac_flags & AC_XFER_IN)
1081 dmaflag |= BUS_DMASYNC_POSTREAD;
1082 if (ac->ac_flags & AC_XFER_OUT)
1083 dmaflag |= BUS_DMASYNC_POSTWRITE;
1084
1085 if ((ac->ac_flags & AC_NOSGL) == 0)
1086 bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap,
1087 AMR_SGL_SIZE * ac->ac_ident, AMR_SGL_SIZE,
1088 BUS_DMASYNC_POSTWRITE);
1089 bus_dmamap_sync(amr->amr_dmat, ac->ac_xfer_map, 0, ac->ac_xfer_size,
1090 dmaflag);
1091 bus_dmamap_unload(amr->amr_dmat, ac->ac_xfer_map);
1092 }
1093
1094 /*
1095 * Submit a command to the controller and poll on completion. Return
1096 * non-zero on timeout or error. Must be called with interrupts blocked.
1097 */
1098 int
1099 amr_ccb_poll(struct amr_softc *amr, struct amr_ccb *ac, int timo)
1100 {
1101 int rv;
1102
1103 if ((rv = (*amr->amr_submit)(amr, ac)) != 0)
1104 return (rv);
1105 TAILQ_INSERT_TAIL(&amr->amr_ccb_active, ac, ac_chain.tailq);
1106
1107 for (timo *= 10; timo != 0; timo--) {
1108 amr_intr(amr);
1109 if ((ac->ac_flags & AC_COMPLETE) != 0)
1110 break;
1111 DELAY(100);
1112 }
1113
1114 return (timo == 0 || ac->ac_status != 0 ? EIO : 0);
1115 }
1116
1117 /*
1118 * Submit a command to the controller and sleep on completion. Return
1119 * non-zero on error.
1120 */
1121 int
1122 amr_ccb_wait(struct amr_softc *amr, struct amr_ccb *ac)
1123 {
1124 int s;
1125
1126 s = splbio();
1127 amr_ccb_enqueue(amr, ac);
1128 tsleep(ac, PRIBIO, "amrcmd", 0);
1129 splx(s);
1130
1131 return (ac->ac_status != 0 ? EIO : 0);
1132 }
1133
1134 #if 0
1135 /*
1136 * Wait for the mailbox to become available.
1137 */
1138 static int
1139 amr_mbox_wait(struct amr_softc *amr)
1140 {
1141 int timo;
1142
1143 for (timo = 10000; timo != 0; timo--) {
1144 bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1145 sizeof(struct amr_mailbox), BUS_DMASYNC_POSTREAD);
1146 if (amr->amr_mbox->mb_cmd.mb_busy == 0)
1147 break;
1148 DELAY(100);
1149 }
1150
1151 if (timo == 0)
1152 printf("%s: controller wedged\n", device_xname(&amr->amr_dv));
1153
1154 return (timo != 0 ? 0 : EAGAIN);
1155 }
1156 #endif
1157
1158 /*
1159 * Tell the controller that the mailbox contains a valid command. Must be
1160 * called with interrupts blocked.
1161 */
1162 static int
1163 amr_quartz_submit(struct amr_softc *amr, struct amr_ccb *ac)
1164 {
1165 u_int32_t v;
1166
1167 amr->amr_mbox->mb_poll = 0;
1168 amr->amr_mbox->mb_ack = 0;
1169 bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1170 sizeof(struct amr_mailbox), BUS_DMASYNC_PREWRITE);
1171 bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1172 sizeof(struct amr_mailbox), BUS_DMASYNC_POSTREAD);
1173 if (amr->amr_mbox->mb_cmd.mb_busy != 0)
1174 return (EAGAIN);
1175
1176 v = amr_inl(amr, AMR_QREG_IDB);
1177 if ((v & AMR_QIDB_SUBMIT) != 0) {
1178 amr->amr_mbox->mb_cmd.mb_busy = 0;
1179 bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1180 sizeof(struct amr_mailbox), BUS_DMASYNC_PREWRITE);
1181 bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1182 sizeof(struct amr_mailbox), BUS_DMASYNC_PREREAD);
1183 return (EAGAIN);
1184 }
1185
1186 amr->amr_mbox->mb_segment = 0;
1187 memcpy(&amr->amr_mbox->mb_cmd, &ac->ac_cmd, sizeof(ac->ac_cmd));
1188 bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1189 sizeof(struct amr_mailbox), BUS_DMASYNC_PREWRITE);
1190
1191 ac->ac_start_time = time_uptime;
1192 ac->ac_flags |= AC_ACTIVE;
1193 amr_outl(amr, AMR_QREG_IDB,
1194 (amr->amr_mbox_paddr + 16) | AMR_QIDB_SUBMIT);
1195 return (0);
1196 }
1197
1198 static int
1199 amr_std_submit(struct amr_softc *amr, struct amr_ccb *ac)
1200 {
1201
1202 amr->amr_mbox->mb_poll = 0;
1203 amr->amr_mbox->mb_ack = 0;
1204 bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1205 sizeof(struct amr_mailbox), BUS_DMASYNC_PREWRITE);
1206 bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1207 sizeof(struct amr_mailbox), BUS_DMASYNC_POSTREAD);
1208 if (amr->amr_mbox->mb_cmd.mb_busy != 0)
1209 return (EAGAIN);
1210
1211 if ((amr_inb(amr, AMR_SREG_MBOX_BUSY) & AMR_SMBOX_BUSY_FLAG) != 0) {
1212 amr->amr_mbox->mb_cmd.mb_busy = 0;
1213 bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1214 sizeof(struct amr_mailbox), BUS_DMASYNC_PREWRITE);
1215 bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1216 sizeof(struct amr_mailbox), BUS_DMASYNC_PREREAD);
1217 return (EAGAIN);
1218 }
1219
1220 amr->amr_mbox->mb_segment = 0;
1221 memcpy(&amr->amr_mbox->mb_cmd, &ac->ac_cmd, sizeof(ac->ac_cmd));
1222 bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1223 sizeof(struct amr_mailbox), BUS_DMASYNC_PREWRITE);
1224
1225 ac->ac_start_time = time_uptime;
1226 ac->ac_flags |= AC_ACTIVE;
1227 amr_outb(amr, AMR_SREG_CMD, AMR_SCMD_POST);
1228 return (0);
1229 }
1230
1231 /*
1232 * Claim any work that the controller has completed; acknowledge completion,
1233 * save details of the completion in (mbsave). Must be called with
1234 * interrupts blocked.
1235 */
1236 static int
1237 amr_quartz_get_work(struct amr_softc *amr, struct amr_mailbox_resp *mbsave)
1238 {
1239
1240 /* Work waiting for us? */
1241 if (amr_inl(amr, AMR_QREG_ODB) != AMR_QODB_READY)
1242 return (-1);
1243
1244 bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1245 sizeof(struct amr_mailbox), BUS_DMASYNC_POSTREAD);
1246
1247 /* Save the mailbox, which contains a list of completed commands. */
1248 memcpy(mbsave, &amr->amr_mbox->mb_resp, sizeof(*mbsave));
1249
1250 bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1251 sizeof(struct amr_mailbox), BUS_DMASYNC_PREREAD);
1252
1253 /* Ack the interrupt and mailbox transfer. */
1254 amr_outl(amr, AMR_QREG_ODB, AMR_QODB_READY);
1255 amr_outl(amr, AMR_QREG_IDB, (amr->amr_mbox_paddr+16) | AMR_QIDB_ACK);
1256
1257 /*
1258 * This waits for the controller to notice that we've taken the
1259 * command from it. It's very inefficient, and we shouldn't do it,
1260 * but if we remove this code, we stop completing commands under
1261 * load.
1262 *
1263 * Peter J says we shouldn't do this. The documentation says we
1264 * should. Who is right?
1265 */
1266 while ((amr_inl(amr, AMR_QREG_IDB) & AMR_QIDB_ACK) != 0)
1267 DELAY(10);
1268
1269 return (0);
1270 }
1271
1272 static int
1273 amr_std_get_work(struct amr_softc *amr, struct amr_mailbox_resp *mbsave)
1274 {
1275 u_int8_t istat;
1276
1277 /* Check for valid interrupt status. */
1278 if (((istat = amr_inb(amr, AMR_SREG_INTR)) & AMR_SINTR_VALID) == 0)
1279 return (-1);
1280
1281 /* Ack the interrupt. */
1282 amr_outb(amr, AMR_SREG_INTR, istat);
1283
1284 bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1285 sizeof(struct amr_mailbox), BUS_DMASYNC_POSTREAD);
1286
1287 /* Save mailbox, which contains a list of completed commands. */
1288 memcpy(mbsave, &amr->amr_mbox->mb_resp, sizeof(*mbsave));
1289
1290 bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1291 sizeof(struct amr_mailbox), BUS_DMASYNC_PREREAD);
1292
1293 /* Ack mailbox transfer. */
1294 amr_outb(amr, AMR_SREG_CMD, AMR_SCMD_ACKINTR);
1295
1296 return (0);
1297 }
1298
1299 static void
1300 amr_ccb_dump(struct amr_softc *amr, struct amr_ccb *ac)
1301 {
1302 int i;
1303
1304 printf("%s: ", device_xname(&amr->amr_dv));
1305 for (i = 0; i < 4; i++)
1306 printf("%08x ", ((u_int32_t *)&ac->ac_cmd)[i]);
1307 printf("\n");
1308 }
1309
1310 static int
1311 amropen(dev_t dev, int flag, int mode, struct lwp *l)
1312 {
1313 struct amr_softc *amr;
1314
1315 if ((amr = device_lookup(&amr_cd, minor(dev))) == NULL)
1316 return (ENXIO);
1317 if ((amr->amr_flags & AMRF_OPEN) != 0)
1318 return (EBUSY);
1319
1320 amr->amr_flags |= AMRF_OPEN;
1321 return (0);
1322 }
1323
1324 static int
1325 amrclose(dev_t dev, int flag, int mode, struct lwp *l)
1326 {
1327 struct amr_softc *amr;
1328
1329 amr = device_lookup(&amr_cd, minor(dev));
1330 amr->amr_flags &= ~AMRF_OPEN;
1331 return (0);
1332 }
1333
1334 static int
1335 amrioctl(dev_t dev, u_long cmd, void *data, int flag,
1336 struct lwp *l)
1337 {
1338 struct amr_softc *amr;
1339 struct amr_user_ioctl *au;
1340 struct amr_ccb *ac;
1341 struct amr_mailbox_ioctl *mbi;
1342 unsigned long au_length;
1343 uint8_t *au_cmd;
1344 int error;
1345 void *dp = NULL, *au_buffer;
1346
1347 amr = device_lookup(&amr_cd, minor(dev));
1348
1349 /* This should be compatible with the FreeBSD interface */
1350
1351 switch (cmd) {
1352 case AMR_IO_VERSION:
1353 *(int *)data = AMR_IO_VERSION_NUMBER;
1354 return 0;
1355 case AMR_IO_COMMAND:
1356 error = kauth_authorize_device_passthru(l->l_cred, dev,
1357 KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_ALL, data);
1358 if (error)
1359 return (error);
1360
1361 au = (struct amr_user_ioctl *)data;
1362 au_cmd = au->au_cmd;
1363 au_buffer = au->au_buffer;
1364 au_length = au->au_length;
1365 break;
1366 default:
1367 return ENOTTY;
1368 }
1369
1370 if (au_cmd[0] == AMR_CMD_PASS) {
1371 /* not yet */
1372 return EOPNOTSUPP;
1373 }
1374
1375 if (au_length <= 0 || au_length > MAXPHYS || au_cmd[0] == 0x06)
1376 return (EINVAL);
1377
1378 /*
1379 * allocate kernel memory for data, doing I/O directly to user
1380 * buffer isn't that easy.
1381 */
1382 dp = malloc(au_length, M_DEVBUF, M_WAITOK|M_ZERO);
1383 if (dp == NULL)
1384 return ENOMEM;
1385 if ((error = copyin(au_buffer, dp, au_length)) != 0)
1386 goto out;
1387
1388 /* direct command to controller */
1389 while (amr_ccb_alloc(amr, &ac) != 0) {
1390 error = tsleep(NULL, PRIBIO | PCATCH, "armmbx", hz);
1391 if (error == EINTR)
1392 goto out;
1393 }
1394
1395 mbi = (struct amr_mailbox_ioctl *)&ac->ac_cmd;
1396 mbi->mb_command = au_cmd[0];
1397 mbi->mb_channel = au_cmd[1];
1398 mbi->mb_param = au_cmd[2];
1399 mbi->mb_pad[0] = au_cmd[3];
1400 mbi->mb_drive = au_cmd[4];
1401 error = amr_ccb_map(amr, ac, dp, (int)au_length,
1402 AC_XFER_IN | AC_XFER_OUT);
1403 if (error == 0) {
1404 error = amr_ccb_wait(amr, ac);
1405 amr_ccb_unmap(amr, ac);
1406 if (error == 0)
1407 error = copyout(dp, au_buffer, au_length);
1408
1409 }
1410 amr_ccb_free(amr, ac);
1411 out:
1412 free(dp, M_DEVBUF);
1413 return (error);
1414 }
1415