amr.c revision 1.30 1 /* $NetBSD: amr.c,v 1.30 2005/12/11 12:22:48 christos Exp $ */
2
3 /*-
4 * Copyright (c) 2002, 2003 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*-
40 * Copyright (c) 1999,2000 Michael Smith
41 * Copyright (c) 2000 BSDi
42 * All rights reserved.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE.
64 *
65 * from FreeBSD: amr_pci.c,v 1.5 2000/08/30 07:52:40 msmith Exp
66 * from FreeBSD: amr.c,v 1.16 2000/08/30 07:52:40 msmith Exp
67 */
68
69 /*
70 * Driver for AMI RAID controllers.
71 */
72
73 #include <sys/cdefs.h>
74 __KERNEL_RCSID(0, "$NetBSD: amr.c,v 1.30 2005/12/11 12:22:48 christos Exp $");
75
76 #include <sys/param.h>
77 #include <sys/systm.h>
78 #include <sys/kernel.h>
79 #include <sys/device.h>
80 #include <sys/queue.h>
81 #include <sys/proc.h>
82 #include <sys/buf.h>
83 #include <sys/malloc.h>
84 #include <sys/kthread.h>
85
86 #include <uvm/uvm_extern.h>
87
88 #include <machine/endian.h>
89 #include <machine/bus.h>
90
91 #include <dev/pci/pcidevs.h>
92 #include <dev/pci/pcivar.h>
93 #include <dev/pci/amrreg.h>
94 #include <dev/pci/amrvar.h>
95
96 #include "locators.h"
97
98 static void amr_attach(struct device *, struct device *, void *);
99 static void amr_ccb_dump(struct amr_softc *, struct amr_ccb *);
100 static void *amr_enquire(struct amr_softc *, u_int8_t, u_int8_t, u_int8_t,
101 void *);
102 static int amr_init(struct amr_softc *, const char *,
103 struct pci_attach_args *pa);
104 static int amr_intr(void *);
105 static int amr_match(struct device *, struct cfdata *, void *);
106 static int amr_print(void *, const char *);
107 static void amr_shutdown(void *);
108 static void amr_teardown(struct amr_softc *);
109 static void amr_thread(void *);
110 static void amr_thread_create(void *);
111
112 static int amr_quartz_get_work(struct amr_softc *,
113 struct amr_mailbox_resp *);
114 static int amr_quartz_submit(struct amr_softc *, struct amr_ccb *);
115 static int amr_std_get_work(struct amr_softc *, struct amr_mailbox_resp *);
116 static int amr_std_submit(struct amr_softc *, struct amr_ccb *);
117
118 CFATTACH_DECL(amr, sizeof(struct amr_softc),
119 amr_match, amr_attach, NULL, NULL);
120
121 #define AT_QUARTZ 0x01 /* `Quartz' chipset */
122 #define AT_SIG 0x02 /* Check for signature */
123
124 struct amr_pci_type {
125 u_short apt_vendor;
126 u_short apt_product;
127 u_short apt_flags;
128 } static const amr_pci_type[] = {
129 { PCI_VENDOR_AMI, PCI_PRODUCT_AMI_MEGARAID, 0 },
130 { PCI_VENDOR_AMI, PCI_PRODUCT_AMI_MEGARAID2, 0 },
131 { PCI_VENDOR_AMI, PCI_PRODUCT_AMI_MEGARAID3, AT_QUARTZ },
132 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_AMI_MEGARAID3, AT_QUARTZ },
133 { PCI_VENDOR_INTEL, PCI_PRODUCT_AMI_MEGARAID3, AT_QUARTZ | AT_SIG },
134 { PCI_VENDOR_DELL, PCI_PRODUCT_DELL_PERC_4DI, AT_QUARTZ },
135 { PCI_VENDOR_DELL, PCI_PRODUCT_DELL_PERC_4DI_2, AT_QUARTZ },
136 { PCI_VENDOR_DELL, PCI_PRODUCT_DELL_PERC_4ESI, AT_QUARTZ },
137 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_PERC_4SC, AT_QUARTZ },
138 };
139
140 struct amr_typestr {
141 const char *at_str;
142 int at_sig;
143 } static const amr_typestr[] = {
144 { "Series 431", AMR_SIG_431 },
145 { "Series 438", AMR_SIG_438 },
146 { "Series 466", AMR_SIG_466 },
147 { "Series 467", AMR_SIG_467 },
148 { "Series 490", AMR_SIG_490 },
149 { "Series 762", AMR_SIG_762 },
150 { "HP NetRAID (T5)", AMR_SIG_T5 },
151 { "HP NetRAID (T7)", AMR_SIG_T7 },
152 };
153
154 struct {
155 const char *ds_descr;
156 int ds_happy;
157 } static const amr_dstate[] = {
158 { "offline", 0 },
159 { "degraded", 1 },
160 { "optimal", 1 },
161 { "online", 1 },
162 { "failed", 0 },
163 { "rebuilding", 1 },
164 { "hotspare", 0 },
165 };
166
167 static void *amr_sdh;
168
169 static int amr_max_segs;
170 int amr_max_xfer;
171
172 static inline u_int8_t
173 amr_inb(struct amr_softc *amr, int off)
174 {
175
176 bus_space_barrier(amr->amr_iot, amr->amr_ioh, off, 1,
177 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
178 return (bus_space_read_1(amr->amr_iot, amr->amr_ioh, off));
179 }
180
181 static inline u_int32_t
182 amr_inl(struct amr_softc *amr, int off)
183 {
184
185 bus_space_barrier(amr->amr_iot, amr->amr_ioh, off, 4,
186 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
187 return (bus_space_read_4(amr->amr_iot, amr->amr_ioh, off));
188 }
189
190 static inline void
191 amr_outb(struct amr_softc *amr, int off, u_int8_t val)
192 {
193
194 bus_space_write_1(amr->amr_iot, amr->amr_ioh, off, val);
195 bus_space_barrier(amr->amr_iot, amr->amr_ioh, off, 1,
196 BUS_SPACE_BARRIER_WRITE);
197 }
198
199 static inline void
200 amr_outl(struct amr_softc *amr, int off, u_int32_t val)
201 {
202
203 bus_space_write_4(amr->amr_iot, amr->amr_ioh, off, val);
204 bus_space_barrier(amr->amr_iot, amr->amr_ioh, off, 4,
205 BUS_SPACE_BARRIER_WRITE);
206 }
207
208 /*
209 * Match a supported device.
210 */
211 static int
212 amr_match(struct device *parent, struct cfdata *match, void *aux)
213 {
214 struct pci_attach_args *pa;
215 pcireg_t s;
216 int i;
217
218 pa = (struct pci_attach_args *)aux;
219
220 /*
221 * Don't match the device if it's operating in I2O mode. In this
222 * case it should be handled by the `iop' driver.
223 */
224 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_I2O)
225 return (0);
226
227 for (i = 0; i < sizeof(amr_pci_type) / sizeof(amr_pci_type[0]); i++)
228 if (PCI_VENDOR(pa->pa_id) == amr_pci_type[i].apt_vendor &&
229 PCI_PRODUCT(pa->pa_id) == amr_pci_type[i].apt_product)
230 break;
231
232 if (i == sizeof(amr_pci_type) / sizeof(amr_pci_type[0]))
233 return (0);
234
235 if ((amr_pci_type[i].apt_flags & AT_SIG) == 0)
236 return (1);
237
238 s = pci_conf_read(pa->pa_pc, pa->pa_tag, AMR_QUARTZ_SIG_REG) & 0xffff;
239 return (s == AMR_QUARTZ_SIG0 || s == AMR_QUARTZ_SIG1);
240 }
241
242 /*
243 * Attach a supported device.
244 */
245 static void
246 amr_attach(struct device *parent, struct device *self, void *aux)
247 {
248 struct pci_attach_args *pa;
249 struct amr_attach_args amra;
250 const struct amr_pci_type *apt;
251 struct amr_softc *amr;
252 pci_chipset_tag_t pc;
253 pci_intr_handle_t ih;
254 const char *intrstr;
255 pcireg_t reg;
256 int rseg, i, j, size, rv, memreg, ioreg;
257 struct amr_ccb *ac;
258 int locs[AMRCF_NLOCS];
259
260 aprint_naive(": RAID controller\n");
261
262 amr = (struct amr_softc *)self;
263 pa = (struct pci_attach_args *)aux;
264 pc = pa->pa_pc;
265
266 for (i = 0; i < sizeof(amr_pci_type) / sizeof(amr_pci_type[0]); i++)
267 if (PCI_VENDOR(pa->pa_id) == amr_pci_type[i].apt_vendor &&
268 PCI_PRODUCT(pa->pa_id) == amr_pci_type[i].apt_product)
269 break;
270 apt = amr_pci_type + i;
271
272 memreg = ioreg = 0;
273 for (i = 0x10; i <= 0x14; i += 4) {
274 reg = pci_conf_read(pc, pa->pa_tag, i);
275 switch (PCI_MAPREG_TYPE(reg)) {
276 case PCI_MAPREG_TYPE_MEM:
277 if (PCI_MAPREG_MEM_SIZE(reg) != 0)
278 memreg = i;
279 break;
280 case PCI_MAPREG_TYPE_IO:
281 if (PCI_MAPREG_IO_SIZE(reg) != 0)
282 ioreg = i;
283 break;
284
285 }
286 }
287
288 if (memreg && pci_mapreg_map(pa, memreg, PCI_MAPREG_TYPE_MEM, 0,
289 &amr->amr_iot, &amr->amr_ioh, NULL, &amr->amr_ios) == 0)
290 ;
291 else if (ioreg && pci_mapreg_map(pa, ioreg, PCI_MAPREG_TYPE_IO, 0,
292 &amr->amr_iot, &amr->amr_ioh, NULL, &amr->amr_ios) == 0)
293 ;
294 else {
295 aprint_error("can't map control registers\n");
296 amr_teardown(amr);
297 return;
298 }
299
300 amr->amr_flags |= AMRF_PCI_REGS;
301 amr->amr_dmat = pa->pa_dmat;
302 amr->amr_pc = pa->pa_pc;
303
304 /* Enable the device. */
305 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
306 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
307 reg | PCI_COMMAND_MASTER_ENABLE);
308
309 /* Map and establish the interrupt. */
310 if (pci_intr_map(pa, &ih)) {
311 aprint_error("can't map interrupt\n");
312 amr_teardown(amr);
313 return;
314 }
315 intrstr = pci_intr_string(pc, ih);
316 amr->amr_ih = pci_intr_establish(pc, ih, IPL_BIO, amr_intr, amr);
317 if (amr->amr_ih == NULL) {
318 aprint_error("can't establish interrupt");
319 if (intrstr != NULL)
320 aprint_normal(" at %s", intrstr);
321 aprint_normal("\n");
322 amr_teardown(amr);
323 return;
324 }
325 amr->amr_flags |= AMRF_PCI_INTR;
326
327 /*
328 * Allocate space for the mailbox and S/G lists. Some controllers
329 * don't like S/G lists to be located below 0x2000, so we allocate
330 * enough slop to enable us to compensate.
331 *
332 * The standard mailbox structure needs to be aligned on a 16-byte
333 * boundary. The 64-bit mailbox has one extra field, 4 bytes in
334 * size, which preceeds the standard mailbox.
335 */
336 size = AMR_SGL_SIZE * AMR_MAX_CMDS + 0x2000;
337 amr->amr_dmasize = size;
338
339 if ((rv = bus_dmamem_alloc(amr->amr_dmat, size, PAGE_SIZE, 0,
340 &amr->amr_dmaseg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
341 aprint_error("%s: unable to allocate buffer, rv = %d\n",
342 amr->amr_dv.dv_xname, rv);
343 amr_teardown(amr);
344 return;
345 }
346 amr->amr_flags |= AMRF_DMA_ALLOC;
347
348 if ((rv = bus_dmamem_map(amr->amr_dmat, &amr->amr_dmaseg, rseg, size,
349 (caddr_t *)&amr->amr_mbox,
350 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
351 aprint_error("%s: unable to map buffer, rv = %d\n",
352 amr->amr_dv.dv_xname, rv);
353 amr_teardown(amr);
354 return;
355 }
356 amr->amr_flags |= AMRF_DMA_MAP;
357
358 if ((rv = bus_dmamap_create(amr->amr_dmat, size, 1, size, 0,
359 BUS_DMA_NOWAIT, &amr->amr_dmamap)) != 0) {
360 aprint_error("%s: unable to create buffer DMA map, rv = %d\n",
361 amr->amr_dv.dv_xname, rv);
362 amr_teardown(amr);
363 return;
364 }
365 amr->amr_flags |= AMRF_DMA_CREATE;
366
367 if ((rv = bus_dmamap_load(amr->amr_dmat, amr->amr_dmamap,
368 amr->amr_mbox, size, NULL, BUS_DMA_NOWAIT)) != 0) {
369 aprint_error("%s: unable to load buffer DMA map, rv = %d\n",
370 amr->amr_dv.dv_xname, rv);
371 amr_teardown(amr);
372 return;
373 }
374 amr->amr_flags |= AMRF_DMA_LOAD;
375
376 memset(amr->amr_mbox, 0, size);
377
378 amr->amr_mbox_paddr = amr->amr_dmamap->dm_segs[0].ds_addr;
379 amr->amr_sgls_paddr = (amr->amr_mbox_paddr + 0x1fff) & ~0x1fff;
380 amr->amr_sgls = (struct amr_sgentry *)((caddr_t)amr->amr_mbox +
381 amr->amr_sgls_paddr - amr->amr_dmamap->dm_segs[0].ds_addr);
382
383 /*
384 * Allocate and initalise the command control blocks.
385 */
386 ac = malloc(sizeof(*ac) * AMR_MAX_CMDS, M_DEVBUF, M_NOWAIT | M_ZERO);
387 amr->amr_ccbs = ac;
388 SLIST_INIT(&amr->amr_ccb_freelist);
389 TAILQ_INIT(&amr->amr_ccb_active);
390 amr->amr_flags |= AMRF_CCBS;
391
392 if (amr_max_xfer == 0) {
393 amr_max_xfer = min(((AMR_MAX_SEGS - 1) * PAGE_SIZE), MAXPHYS);
394 amr_max_segs = (amr_max_xfer + (PAGE_SIZE * 2) - 1) / PAGE_SIZE;
395 }
396
397 for (i = 0; i < AMR_MAX_CMDS; i++, ac++) {
398 rv = bus_dmamap_create(amr->amr_dmat, amr_max_xfer,
399 amr_max_segs, amr_max_xfer, 0,
400 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ac->ac_xfer_map);
401 if (rv != 0)
402 break;
403
404 ac->ac_ident = i;
405 amr_ccb_free(amr, ac);
406 }
407 if (i != AMR_MAX_CMDS) {
408 aprint_error("%s: memory exhausted\n", amr->amr_dv.dv_xname);
409 amr_teardown(amr);
410 return;
411 }
412
413 /*
414 * Take care of model-specific tasks.
415 */
416 if ((apt->apt_flags & AT_QUARTZ) != 0) {
417 amr->amr_submit = amr_quartz_submit;
418 amr->amr_get_work = amr_quartz_get_work;
419 } else {
420 amr->amr_submit = amr_std_submit;
421 amr->amr_get_work = amr_std_get_work;
422
423 /* Notify the controller of the mailbox location. */
424 amr_outl(amr, AMR_SREG_MBOX, (u_int32_t)amr->amr_mbox_paddr + 16);
425 amr_outb(amr, AMR_SREG_MBOX_ENABLE, AMR_SMBOX_ENABLE_ADDR);
426
427 /* Clear outstanding interrupts and enable interrupts. */
428 amr_outb(amr, AMR_SREG_CMD, AMR_SCMD_ACKINTR);
429 amr_outb(amr, AMR_SREG_TOGL,
430 amr_inb(amr, AMR_SREG_TOGL) | AMR_STOGL_ENABLE);
431 }
432
433 /*
434 * Retrieve parameters, and tell the world about us.
435 */
436 amr->amr_enqbuf = malloc(AMR_ENQUIRY_BUFSIZE, M_DEVBUF, M_NOWAIT);
437 amr->amr_flags |= AMRF_ENQBUF;
438 amr->amr_maxqueuecnt = i;
439 aprint_normal(": AMI RAID ");
440 if (amr_init(amr, intrstr, pa) != 0) {
441 amr_teardown(amr);
442 return;
443 }
444
445 /*
446 * Cap the maximum number of outstanding commands. AMI's Linux
447 * driver doesn't trust the controller's reported value, and lockups
448 * have been seen when we do.
449 */
450 amr->amr_maxqueuecnt = min(amr->amr_maxqueuecnt, AMR_MAX_CMDS);
451 if (amr->amr_maxqueuecnt > i)
452 amr->amr_maxqueuecnt = i;
453
454 /* Set our `shutdownhook' before we start any device activity. */
455 if (amr_sdh == NULL)
456 amr_sdh = shutdownhook_establish(amr_shutdown, NULL);
457
458 /* Attach sub-devices. */
459 for (j = 0; j < amr->amr_numdrives; j++) {
460 if (amr->amr_drive[j].al_size == 0)
461 continue;
462 amra.amra_unit = j;
463
464 locs[AMRCF_UNIT] = j;
465
466 amr->amr_drive[j].al_dv = config_found_sm_loc(&amr->amr_dv,
467 "amr", locs, &amra, amr_print, config_stdsubmatch);
468 }
469
470 SIMPLEQ_INIT(&amr->amr_ccb_queue);
471
472 /* XXX This doesn't work for newer boards yet. */
473 if ((apt->apt_flags & AT_QUARTZ) == 0)
474 kthread_create(amr_thread_create, amr);
475 }
476
477 /*
478 * Free up resources.
479 */
480 static void
481 amr_teardown(struct amr_softc *amr)
482 {
483 struct amr_ccb *ac;
484 int fl;
485
486 fl = amr->amr_flags;
487
488 if ((fl & AMRF_THREAD) != 0) {
489 amr->amr_flags |= AMRF_THREAD_EXIT;
490 wakeup(amr_thread);
491 while ((amr->amr_flags & AMRF_THREAD_EXIT) != 0)
492 tsleep(&amr->amr_flags, PWAIT, "amrexit", 0);
493 }
494 if ((fl & AMRF_CCBS) != 0) {
495 SLIST_FOREACH(ac, &amr->amr_ccb_freelist, ac_chain.slist) {
496 bus_dmamap_destroy(amr->amr_dmat, ac->ac_xfer_map);
497 }
498 free(amr->amr_ccbs, M_DEVBUF);
499 }
500 if ((fl & AMRF_ENQBUF) != 0)
501 free(amr->amr_enqbuf, M_DEVBUF);
502 if ((fl & AMRF_DMA_LOAD) != 0)
503 bus_dmamap_unload(amr->amr_dmat, amr->amr_dmamap);
504 if ((fl & AMRF_DMA_MAP) != 0)
505 bus_dmamem_unmap(amr->amr_dmat, (caddr_t)amr->amr_mbox,
506 amr->amr_dmasize);
507 if ((fl & AMRF_DMA_ALLOC) != 0)
508 bus_dmamem_free(amr->amr_dmat, &amr->amr_dmaseg, 1);
509 if ((fl & AMRF_DMA_CREATE) != 0)
510 bus_dmamap_destroy(amr->amr_dmat, amr->amr_dmamap);
511 if ((fl & AMRF_PCI_INTR) != 0)
512 pci_intr_disestablish(amr->amr_pc, amr->amr_ih);
513 if ((fl & AMRF_PCI_REGS) != 0)
514 bus_space_unmap(amr->amr_iot, amr->amr_ioh, amr->amr_ios);
515 }
516
517 /*
518 * Print autoconfiguration message for a sub-device.
519 */
520 static int
521 amr_print(void *aux, const char *pnp)
522 {
523 struct amr_attach_args *amra;
524
525 amra = (struct amr_attach_args *)aux;
526
527 if (pnp != NULL)
528 aprint_normal("block device at %s", pnp);
529 aprint_normal(" unit %d", amra->amra_unit);
530 return (UNCONF);
531 }
532
533 /*
534 * Retrieve operational parameters and describe the controller.
535 */
536 static int
537 amr_init(struct amr_softc *amr, const char *intrstr,
538 struct pci_attach_args *pa)
539 {
540 struct amr_adapter_info *aa;
541 struct amr_prodinfo *ap;
542 struct amr_enquiry *ae;
543 struct amr_enquiry3 *aex;
544 const char *prodstr;
545 u_int i, sig, ishp;
546 char sbuf[64];
547
548 /*
549 * Try to get 40LD product info, which tells us what the card is
550 * labelled as.
551 */
552 ap = amr_enquire(amr, AMR_CMD_CONFIG, AMR_CONFIG_PRODUCT_INFO, 0,
553 amr->amr_enqbuf);
554 if (ap != NULL) {
555 aprint_normal("<%.80s>\n", ap->ap_product);
556 if (intrstr != NULL)
557 aprint_normal("%s: interrupting at %s\n",
558 amr->amr_dv.dv_xname, intrstr);
559 aprint_normal("%s: firmware %.16s, BIOS %.16s, %dMB RAM\n",
560 amr->amr_dv.dv_xname, ap->ap_firmware, ap->ap_bios,
561 le16toh(ap->ap_memsize));
562
563 amr->amr_maxqueuecnt = ap->ap_maxio;
564
565 /*
566 * Fetch and record state of logical drives.
567 */
568 aex = amr_enquire(amr, AMR_CMD_CONFIG, AMR_CONFIG_ENQ3,
569 AMR_CONFIG_ENQ3_SOLICITED_FULL, amr->amr_enqbuf);
570 if (aex == NULL) {
571 aprint_error("%s ENQUIRY3 failed\n",
572 amr->amr_dv.dv_xname);
573 return (-1);
574 }
575
576 if (aex->ae_numldrives > AMR_MAX_UNITS) {
577 aprint_error(
578 "%s: adjust AMR_MAX_UNITS to %d (currently %d)"
579 "\n", amr->amr_dv.dv_xname, AMR_MAX_UNITS,
580 amr->amr_numdrives);
581 amr->amr_numdrives = AMR_MAX_UNITS;
582 } else
583 amr->amr_numdrives = aex->ae_numldrives;
584
585 for (i = 0; i < amr->amr_numdrives; i++) {
586 amr->amr_drive[i].al_size =
587 le32toh(aex->ae_drivesize[i]);
588 amr->amr_drive[i].al_state = aex->ae_drivestate[i];
589 amr->amr_drive[i].al_properties = aex->ae_driveprop[i];
590 }
591
592 return (0);
593 }
594
595 /*
596 * Try 8LD extended ENQUIRY to get the controller signature. Once
597 * found, search for a product description.
598 */
599 ae = amr_enquire(amr, AMR_CMD_EXT_ENQUIRY2, 0, 0, amr->amr_enqbuf);
600 if (ae != NULL) {
601 i = 0;
602 sig = le32toh(ae->ae_signature);
603
604 while (i < sizeof(amr_typestr) / sizeof(amr_typestr[0])) {
605 if (amr_typestr[i].at_sig == sig)
606 break;
607 i++;
608 }
609 if (i == sizeof(amr_typestr) / sizeof(amr_typestr[0])) {
610 snprintf(sbuf, sizeof(sbuf),
611 "unknown ENQUIRY2 sig (0x%08x)", sig);
612 prodstr = sbuf;
613 } else
614 prodstr = amr_typestr[i].at_str;
615 } else {
616 ae = amr_enquire(amr, AMR_CMD_ENQUIRY, 0, 0, amr->amr_enqbuf);
617 if (ae == NULL) {
618 aprint_error("%s: unsupported controller\n",
619 amr->amr_dv.dv_xname);
620 return (-1);
621 }
622
623 switch (PCI_PRODUCT(pa->pa_id)) {
624 case PCI_PRODUCT_AMI_MEGARAID:
625 prodstr = "Series 428";
626 break;
627 case PCI_PRODUCT_AMI_MEGARAID2:
628 prodstr = "Series 434";
629 break;
630 default:
631 snprintf(sbuf, sizeof(sbuf), "unknown PCI dev (0x%04x)",
632 PCI_PRODUCT(pa->pa_id));
633 prodstr = sbuf;
634 break;
635 }
636 }
637
638 /*
639 * HP NetRaid controllers have a special encoding of the firmware
640 * and BIOS versions. The AMI version seems to have it as strings
641 * whereas the HP version does it with a leading uppercase character
642 * and two binary numbers.
643 */
644 aa = &ae->ae_adapter;
645
646 if (aa->aa_firmware[2] >= 'A' && aa->aa_firmware[2] <= 'Z' &&
647 aa->aa_firmware[1] < ' ' && aa->aa_firmware[0] < ' ' &&
648 aa->aa_bios[2] >= 'A' && aa->aa_bios[2] <= 'Z' &&
649 aa->aa_bios[1] < ' ' && aa->aa_bios[0] < ' ') {
650 if (le32toh(ae->ae_signature) == AMR_SIG_438) {
651 /* The AMI 438 is a NetRaid 3si in HP-land. */
652 prodstr = "HP NetRaid 3si";
653 }
654 ishp = 1;
655 } else
656 ishp = 0;
657
658 aprint_normal("<%s>\n", prodstr);
659 if (intrstr != NULL)
660 aprint_normal("%s: interrupting at %s\n", amr->amr_dv.dv_xname,
661 intrstr);
662
663 if (ishp)
664 aprint_normal("%s: firmware <%c.%02d.%02d>, BIOS <%c.%02d.%02d>"
665 ", %dMB RAM\n", amr->amr_dv.dv_xname, aa->aa_firmware[2],
666 aa->aa_firmware[1], aa->aa_firmware[0], aa->aa_bios[2],
667 aa->aa_bios[1], aa->aa_bios[0], aa->aa_memorysize);
668 else
669 aprint_normal("%s: firmware <%.4s>, BIOS <%.4s>, %dMB RAM\n",
670 amr->amr_dv.dv_xname, aa->aa_firmware, aa->aa_bios,
671 aa->aa_memorysize);
672
673 amr->amr_maxqueuecnt = aa->aa_maxio;
674
675 /*
676 * Record state of logical drives.
677 */
678 if (ae->ae_ldrv.al_numdrives > AMR_MAX_UNITS) {
679 aprint_error("%s: adjust AMR_MAX_UNITS to %d (currently %d)\n",
680 amr->amr_dv.dv_xname, ae->ae_ldrv.al_numdrives,
681 AMR_MAX_UNITS);
682 amr->amr_numdrives = AMR_MAX_UNITS;
683 } else
684 amr->amr_numdrives = ae->ae_ldrv.al_numdrives;
685
686 for (i = 0; i < AMR_MAX_UNITS; i++) {
687 amr->amr_drive[i].al_size = le32toh(ae->ae_ldrv.al_size[i]);
688 amr->amr_drive[i].al_state = ae->ae_ldrv.al_state[i];
689 amr->amr_drive[i].al_properties = ae->ae_ldrv.al_properties[i];
690 }
691
692 return (0);
693 }
694
695 /*
696 * Flush the internal cache on each configured controller. Called at
697 * shutdown time.
698 */
699 static void
700 amr_shutdown(void *cookie)
701 {
702 extern struct cfdriver amr_cd;
703 struct amr_softc *amr;
704 struct amr_ccb *ac;
705 int i, rv, s;
706
707 for (i = 0; i < amr_cd.cd_ndevs; i++) {
708 if ((amr = device_lookup(&amr_cd, i)) == NULL)
709 continue;
710
711 if ((rv = amr_ccb_alloc(amr, &ac)) == 0) {
712 ac->ac_cmd.mb_command = AMR_CMD_FLUSH;
713 s = splbio();
714 rv = amr_ccb_poll(amr, ac, 30000);
715 splx(s);
716 amr_ccb_free(amr, ac);
717 }
718 if (rv != 0)
719 printf("%s: unable to flush cache (%d)\n",
720 amr->amr_dv.dv_xname, rv);
721 }
722 }
723
724 /*
725 * Interrupt service routine.
726 */
727 static int
728 amr_intr(void *cookie)
729 {
730 struct amr_softc *amr;
731 struct amr_ccb *ac;
732 struct amr_mailbox_resp mbox;
733 u_int i, forus, idx;
734
735 amr = cookie;
736 forus = 0;
737
738 while ((*amr->amr_get_work)(amr, &mbox) == 0) {
739 /* Iterate over completed commands in this result. */
740 for (i = 0; i < mbox.mb_nstatus; i++) {
741 idx = mbox.mb_completed[i] - 1;
742 ac = amr->amr_ccbs + idx;
743
744 if (idx >= amr->amr_maxqueuecnt) {
745 printf("%s: bad status (bogus ID: %u=%u)\n",
746 amr->amr_dv.dv_xname, i, idx);
747 continue;
748 }
749
750 if ((ac->ac_flags & AC_ACTIVE) == 0) {
751 printf("%s: bad status (not active; 0x04%x)\n",
752 amr->amr_dv.dv_xname, ac->ac_flags);
753 continue;
754 }
755
756 ac->ac_status = mbox.mb_status;
757 ac->ac_flags = (ac->ac_flags & ~AC_ACTIVE) |
758 AC_COMPLETE;
759 TAILQ_REMOVE(&amr->amr_ccb_active, ac, ac_chain.tailq);
760
761 if ((ac->ac_flags & AC_MOAN) != 0)
762 printf("%s: ccb %d completed\n",
763 amr->amr_dv.dv_xname, ac->ac_ident);
764
765 /* Pass notification to upper layers. */
766 if (ac->ac_handler != NULL)
767 (*ac->ac_handler)(ac);
768 else
769 wakeup(ac);
770 }
771 forus = 1;
772 }
773
774 if (forus)
775 amr_ccb_enqueue(amr, NULL);
776
777 return (forus);
778 }
779
780 /*
781 * Create the watchdog thread.
782 */
783 static void
784 amr_thread_create(void *cookie)
785 {
786 struct amr_softc *amr;
787 int rv;
788
789 amr = cookie;
790
791 if ((amr->amr_flags & AMRF_THREAD_EXIT) != 0) {
792 amr->amr_flags ^= AMRF_THREAD_EXIT;
793 wakeup(&amr->amr_flags);
794 return;
795 }
796
797 rv = kthread_create1(amr_thread, amr, &amr->amr_thread, "%s",
798 amr->amr_dv.dv_xname);
799 if (rv != 0)
800 aprint_error("%s: unable to create thread (%d)",
801 amr->amr_dv.dv_xname, rv);
802 else
803 amr->amr_flags |= AMRF_THREAD;
804 }
805
806 /*
807 * Watchdog thread.
808 */
809 static void
810 amr_thread(void *cookie)
811 {
812 struct amr_softc *amr;
813 struct amr_ccb *ac;
814 struct amr_logdrive *al;
815 struct amr_enquiry *ae;
816 time_t curtime;
817 int rv, i, s;
818
819 amr = cookie;
820 ae = amr->amr_enqbuf;
821
822 for (;;) {
823 tsleep(amr_thread, PWAIT, "amrwdog", AMR_WDOG_TICKS);
824
825 if ((amr->amr_flags & AMRF_THREAD_EXIT) != 0) {
826 amr->amr_flags ^= AMRF_THREAD_EXIT;
827 wakeup(&amr->amr_flags);
828 kthread_exit(0);
829 }
830
831 s = splbio();
832 amr_intr(cookie);
833 curtime = (time_t)mono_time.tv_sec;
834 ac = TAILQ_FIRST(&amr->amr_ccb_active);
835 while (ac != NULL) {
836 if (ac->ac_start_time + AMR_TIMEOUT > curtime)
837 break;
838 if ((ac->ac_flags & AC_MOAN) == 0) {
839 printf("%s: ccb %d timed out; mailbox:\n",
840 amr->amr_dv.dv_xname, ac->ac_ident);
841 amr_ccb_dump(amr, ac);
842 ac->ac_flags |= AC_MOAN;
843 }
844 ac = TAILQ_NEXT(ac, ac_chain.tailq);
845 }
846 splx(s);
847
848 if ((rv = amr_ccb_alloc(amr, &ac)) != 0) {
849 printf("%s: ccb_alloc failed (%d)\n",
850 amr->amr_dv.dv_xname, rv);
851 continue;
852 }
853
854 ac->ac_cmd.mb_command = AMR_CMD_ENQUIRY;
855
856 rv = amr_ccb_map(amr, ac, amr->amr_enqbuf,
857 AMR_ENQUIRY_BUFSIZE, 0);
858 if (rv != 0) {
859 printf("%s: ccb_map failed (%d)\n",
860 amr->amr_dv.dv_xname, rv);
861 amr_ccb_free(amr, ac);
862 continue;
863 }
864
865 rv = amr_ccb_wait(amr, ac);
866 amr_ccb_unmap(amr, ac);
867 if (rv != 0) {
868 printf("%s: enquiry failed (st=%d)\n",
869 amr->amr_dv.dv_xname, ac->ac_status);
870 continue;
871 }
872 amr_ccb_free(amr, ac);
873
874 al = amr->amr_drive;
875 for (i = 0; i < AMR_MAX_UNITS; i++, al++) {
876 if (al->al_dv == NULL)
877 continue;
878 if (al->al_state == ae->ae_ldrv.al_state[i])
879 continue;
880
881 printf("%s: state changed: %s -> %s\n",
882 al->al_dv->dv_xname,
883 amr_drive_state(al->al_state, NULL),
884 amr_drive_state(ae->ae_ldrv.al_state[i], NULL));
885
886 al->al_state = ae->ae_ldrv.al_state[i];
887 }
888 }
889 }
890
891 /*
892 * Return a text description of a logical drive's current state.
893 */
894 const char *
895 amr_drive_state(int state, int *happy)
896 {
897 const char *str;
898
899 state = AMR_DRV_CURSTATE(state);
900 if (state >= sizeof(amr_dstate) / sizeof(amr_dstate[0])) {
901 if (happy)
902 *happy = 1;
903 str = "status unknown";
904 } else {
905 if (happy)
906 *happy = amr_dstate[state].ds_happy;
907 str = amr_dstate[state].ds_descr;
908 }
909
910 return (str);
911 }
912
913 /*
914 * Run a generic enquiry-style command.
915 */
916 static void *
917 amr_enquire(struct amr_softc *amr, u_int8_t cmd, u_int8_t cmdsub,
918 u_int8_t cmdqual, void *sbuf)
919 {
920 struct amr_ccb *ac;
921 u_int8_t *mb;
922 int rv;
923
924 if (amr_ccb_alloc(amr, &ac) != 0)
925 return (NULL);
926
927 /* Build the command proper. */
928 mb = (u_int8_t *)&ac->ac_cmd;
929 mb[0] = cmd;
930 mb[2] = cmdsub;
931 mb[3] = cmdqual;
932
933 rv = amr_ccb_map(amr, ac, sbuf, AMR_ENQUIRY_BUFSIZE, 0);
934 if (rv == 0) {
935 rv = amr_ccb_poll(amr, ac, 2000);
936 amr_ccb_unmap(amr, ac);
937 }
938 amr_ccb_free(amr, ac);
939
940 return (rv ? NULL : sbuf);
941 }
942
943 /*
944 * Allocate and initialise a CCB.
945 */
946 int
947 amr_ccb_alloc(struct amr_softc *amr, struct amr_ccb **acp)
948 {
949 int s;
950
951 s = splbio();
952 if ((*acp = SLIST_FIRST(&amr->amr_ccb_freelist)) == NULL) {
953 splx(s);
954 return (EAGAIN);
955 }
956 SLIST_REMOVE_HEAD(&amr->amr_ccb_freelist, ac_chain.slist);
957 splx(s);
958
959 return (0);
960 }
961
962 /*
963 * Free a CCB.
964 */
965 void
966 amr_ccb_free(struct amr_softc *amr, struct amr_ccb *ac)
967 {
968 int s;
969
970 memset(&ac->ac_cmd, 0, sizeof(ac->ac_cmd));
971 ac->ac_cmd.mb_ident = ac->ac_ident + 1;
972 ac->ac_cmd.mb_busy = 1;
973 ac->ac_handler = NULL;
974 ac->ac_flags = 0;
975
976 s = splbio();
977 SLIST_INSERT_HEAD(&amr->amr_ccb_freelist, ac, ac_chain.slist);
978 splx(s);
979 }
980
981 /*
982 * If a CCB is specified, enqueue it. Pull CCBs off the software queue in
983 * the order that they were enqueued and try to submit their command blocks
984 * to the controller for execution.
985 */
986 void
987 amr_ccb_enqueue(struct amr_softc *amr, struct amr_ccb *ac)
988 {
989 int s;
990
991 s = splbio();
992
993 if (ac != NULL)
994 SIMPLEQ_INSERT_TAIL(&amr->amr_ccb_queue, ac, ac_chain.simpleq);
995
996 while ((ac = SIMPLEQ_FIRST(&amr->amr_ccb_queue)) != NULL) {
997 if ((*amr->amr_submit)(amr, ac) != 0)
998 break;
999 SIMPLEQ_REMOVE_HEAD(&amr->amr_ccb_queue, ac_chain.simpleq);
1000 TAILQ_INSERT_TAIL(&amr->amr_ccb_active, ac, ac_chain.tailq);
1001 }
1002
1003 splx(s);
1004 }
1005
1006 /*
1007 * Map the specified CCB's data buffer onto the bus, and fill the
1008 * scatter-gather list.
1009 */
1010 int
1011 amr_ccb_map(struct amr_softc *amr, struct amr_ccb *ac, void *data, int size,
1012 int out)
1013 {
1014 struct amr_sgentry *sge;
1015 struct amr_mailbox_cmd *mb;
1016 int nsegs, i, rv, sgloff;
1017 bus_dmamap_t xfer;
1018
1019 xfer = ac->ac_xfer_map;
1020
1021 rv = bus_dmamap_load(amr->amr_dmat, xfer, data, size, NULL,
1022 BUS_DMA_NOWAIT);
1023 if (rv != 0)
1024 return (rv);
1025
1026 mb = &ac->ac_cmd;
1027 ac->ac_xfer_size = size;
1028 ac->ac_flags |= (out ? AC_XFER_OUT : AC_XFER_IN);
1029 sgloff = AMR_SGL_SIZE * ac->ac_ident;
1030
1031 /* We don't need to use a scatter/gather list for just 1 segment. */
1032 nsegs = xfer->dm_nsegs;
1033 if (nsegs == 1) {
1034 mb->mb_nsgelem = 0;
1035 mb->mb_physaddr = htole32(xfer->dm_segs[0].ds_addr);
1036 ac->ac_flags |= AC_NOSGL;
1037 } else {
1038 mb->mb_nsgelem = nsegs;
1039 mb->mb_physaddr = htole32(amr->amr_sgls_paddr + sgloff);
1040
1041 sge = (struct amr_sgentry *)((caddr_t)amr->amr_sgls + sgloff);
1042 for (i = 0; i < nsegs; i++, sge++) {
1043 sge->sge_addr = htole32(xfer->dm_segs[i].ds_addr);
1044 sge->sge_count = htole32(xfer->dm_segs[i].ds_len);
1045 }
1046 }
1047
1048 bus_dmamap_sync(amr->amr_dmat, xfer, 0, ac->ac_xfer_size,
1049 out ? BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
1050
1051 if ((ac->ac_flags & AC_NOSGL) == 0)
1052 bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, sgloff,
1053 AMR_SGL_SIZE, BUS_DMASYNC_PREWRITE);
1054
1055 return (0);
1056 }
1057
1058 /*
1059 * Unmap the specified CCB's data buffer.
1060 */
1061 void
1062 amr_ccb_unmap(struct amr_softc *amr, struct amr_ccb *ac)
1063 {
1064
1065 if ((ac->ac_flags & AC_NOSGL) == 0)
1066 bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap,
1067 AMR_SGL_SIZE * ac->ac_ident, AMR_SGL_SIZE,
1068 BUS_DMASYNC_POSTWRITE);
1069 bus_dmamap_sync(amr->amr_dmat, ac->ac_xfer_map, 0, ac->ac_xfer_size,
1070 (ac->ac_flags & AC_XFER_IN) != 0 ?
1071 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1072 bus_dmamap_unload(amr->amr_dmat, ac->ac_xfer_map);
1073 }
1074
1075 /*
1076 * Submit a command to the controller and poll on completion. Return
1077 * non-zero on timeout or error. Must be called with interrupts blocked.
1078 */
1079 int
1080 amr_ccb_poll(struct amr_softc *amr, struct amr_ccb *ac, int timo)
1081 {
1082 int rv;
1083
1084 if ((rv = (*amr->amr_submit)(amr, ac)) != 0)
1085 return (rv);
1086 TAILQ_INSERT_TAIL(&amr->amr_ccb_active, ac, ac_chain.tailq);
1087
1088 for (timo *= 10; timo != 0; timo--) {
1089 amr_intr(amr);
1090 if ((ac->ac_flags & AC_COMPLETE) != 0)
1091 break;
1092 DELAY(100);
1093 }
1094
1095 return (timo == 0 || ac->ac_status != 0 ? EIO : 0);
1096 }
1097
1098 /*
1099 * Submit a command to the controller and sleep on completion. Return
1100 * non-zero on error.
1101 */
1102 int
1103 amr_ccb_wait(struct amr_softc *amr, struct amr_ccb *ac)
1104 {
1105 int s;
1106
1107 s = splbio();
1108 amr_ccb_enqueue(amr, ac);
1109 tsleep(ac, PRIBIO, "amrcmd", 0);
1110 splx(s);
1111
1112 return (ac->ac_status != 0 ? EIO : 0);
1113 }
1114
1115 #if 0
1116 /*
1117 * Wait for the mailbox to become available.
1118 */
1119 static int
1120 amr_mbox_wait(struct amr_softc *amr)
1121 {
1122 int timo;
1123
1124 for (timo = 10000; timo != 0; timo--) {
1125 bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1126 sizeof(struct amr_mailbox), BUS_DMASYNC_POSTREAD);
1127 if (amr->amr_mbox->mb_cmd.mb_busy == 0)
1128 break;
1129 DELAY(100);
1130 }
1131
1132 if (timo == 0)
1133 printf("%s: controller wedged\n", amr->amr_dv.dv_xname);
1134
1135 return (timo != 0 ? 0 : EAGAIN);
1136 }
1137 #endif
1138
1139 /*
1140 * Tell the controller that the mailbox contains a valid command. Must be
1141 * called with interrupts blocked.
1142 */
1143 static int
1144 amr_quartz_submit(struct amr_softc *amr, struct amr_ccb *ac)
1145 {
1146 u_int32_t v;
1147
1148 amr->amr_mbox->mb_poll = 0;
1149 amr->amr_mbox->mb_ack = 0;
1150 bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1151 sizeof(struct amr_mailbox), BUS_DMASYNC_PREWRITE);
1152 bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1153 sizeof(struct amr_mailbox), BUS_DMASYNC_POSTREAD);
1154 if (amr->amr_mbox->mb_cmd.mb_busy != 0)
1155 return (EAGAIN);
1156
1157 v = amr_inl(amr, AMR_QREG_IDB);
1158 if ((v & AMR_QIDB_SUBMIT) != 0) {
1159 amr->amr_mbox->mb_cmd.mb_busy = 0;
1160 bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1161 sizeof(struct amr_mailbox), BUS_DMASYNC_PREWRITE);
1162 bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1163 sizeof(struct amr_mailbox), BUS_DMASYNC_PREREAD);
1164 return (EAGAIN);
1165 }
1166
1167 amr->amr_mbox->mb_segment = 0;
1168 memcpy(&amr->amr_mbox->mb_cmd, &ac->ac_cmd, sizeof(ac->ac_cmd));
1169 bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1170 sizeof(struct amr_mailbox), BUS_DMASYNC_PREWRITE);
1171
1172 ac->ac_start_time = (time_t)mono_time.tv_sec;
1173 ac->ac_flags |= AC_ACTIVE;
1174 amr_outl(amr, AMR_QREG_IDB,
1175 (amr->amr_mbox_paddr + 16) | AMR_QIDB_SUBMIT);
1176 return (0);
1177 }
1178
1179 static int
1180 amr_std_submit(struct amr_softc *amr, struct amr_ccb *ac)
1181 {
1182
1183 amr->amr_mbox->mb_poll = 0;
1184 amr->amr_mbox->mb_ack = 0;
1185 bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1186 sizeof(struct amr_mailbox), BUS_DMASYNC_PREWRITE);
1187 bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1188 sizeof(struct amr_mailbox), BUS_DMASYNC_POSTREAD);
1189 if (amr->amr_mbox->mb_cmd.mb_busy != 0)
1190 return (EAGAIN);
1191
1192 if ((amr_inb(amr, AMR_SREG_MBOX_BUSY) & AMR_SMBOX_BUSY_FLAG) != 0) {
1193 amr->amr_mbox->mb_cmd.mb_busy = 0;
1194 bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1195 sizeof(struct amr_mailbox), BUS_DMASYNC_PREWRITE);
1196 bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1197 sizeof(struct amr_mailbox), BUS_DMASYNC_PREREAD);
1198 return (EAGAIN);
1199 }
1200
1201 amr->amr_mbox->mb_segment = 0;
1202 memcpy(&amr->amr_mbox->mb_cmd, &ac->ac_cmd, sizeof(ac->ac_cmd));
1203 bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1204 sizeof(struct amr_mailbox), BUS_DMASYNC_PREWRITE);
1205
1206 ac->ac_start_time = (time_t)mono_time.tv_sec;
1207 ac->ac_flags |= AC_ACTIVE;
1208 amr_outb(amr, AMR_SREG_CMD, AMR_SCMD_POST);
1209 return (0);
1210 }
1211
1212 /*
1213 * Claim any work that the controller has completed; acknowledge completion,
1214 * save details of the completion in (mbsave). Must be called with
1215 * interrupts blocked.
1216 */
1217 static int
1218 amr_quartz_get_work(struct amr_softc *amr, struct amr_mailbox_resp *mbsave)
1219 {
1220
1221 /* Work waiting for us? */
1222 if (amr_inl(amr, AMR_QREG_ODB) != AMR_QODB_READY)
1223 return (-1);
1224
1225 bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1226 sizeof(struct amr_mailbox), BUS_DMASYNC_POSTREAD);
1227
1228 /* Save the mailbox, which contains a list of completed commands. */
1229 memcpy(mbsave, &amr->amr_mbox->mb_resp, sizeof(*mbsave));
1230
1231 bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1232 sizeof(struct amr_mailbox), BUS_DMASYNC_PREREAD);
1233
1234 /* Ack the interrupt and mailbox transfer. */
1235 amr_outl(amr, AMR_QREG_ODB, AMR_QODB_READY);
1236 amr_outl(amr, AMR_QREG_IDB, (amr->amr_mbox_paddr+16) | AMR_QIDB_ACK);
1237
1238 /*
1239 * This waits for the controller to notice that we've taken the
1240 * command from it. It's very inefficient, and we shouldn't do it,
1241 * but if we remove this code, we stop completing commands under
1242 * load.
1243 *
1244 * Peter J says we shouldn't do this. The documentation says we
1245 * should. Who is right?
1246 */
1247 while ((amr_inl(amr, AMR_QREG_IDB) & AMR_QIDB_ACK) != 0)
1248 DELAY(10);
1249
1250 return (0);
1251 }
1252
1253 static int
1254 amr_std_get_work(struct amr_softc *amr, struct amr_mailbox_resp *mbsave)
1255 {
1256 u_int8_t istat;
1257
1258 /* Check for valid interrupt status. */
1259 if (((istat = amr_inb(amr, AMR_SREG_INTR)) & AMR_SINTR_VALID) == 0)
1260 return (-1);
1261
1262 /* Ack the interrupt. */
1263 amr_outb(amr, AMR_SREG_INTR, istat);
1264
1265 bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1266 sizeof(struct amr_mailbox), BUS_DMASYNC_POSTREAD);
1267
1268 /* Save mailbox, which contains a list of completed commands. */
1269 memcpy(mbsave, &amr->amr_mbox->mb_resp, sizeof(*mbsave));
1270
1271 bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1272 sizeof(struct amr_mailbox), BUS_DMASYNC_PREREAD);
1273
1274 /* Ack mailbox transfer. */
1275 amr_outb(amr, AMR_SREG_CMD, AMR_SCMD_ACKINTR);
1276
1277 return (0);
1278 }
1279
1280 static void
1281 amr_ccb_dump(struct amr_softc *amr, struct amr_ccb *ac)
1282 {
1283 int i;
1284
1285 printf("%s: ", amr->amr_dv.dv_xname);
1286 for (i = 0; i < 4; i++)
1287 printf("%08x ", ((u_int32_t *)&ac->ac_cmd)[i]);
1288 printf("\n");
1289 }
1290