amr.c revision 1.18 1 /* $NetBSD: amr.c,v 1.18 2003/10/29 02:27:32 mycroft Exp $ */
2
3 /*-
4 * Copyright (c) 2002, 2003 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*-
40 * Copyright (c) 1999,2000 Michael Smith
41 * Copyright (c) 2000 BSDi
42 * All rights reserved.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE.
64 *
65 * from FreeBSD: amr_pci.c,v 1.5 2000/08/30 07:52:40 msmith Exp
66 * from FreeBSD: amr.c,v 1.16 2000/08/30 07:52:40 msmith Exp
67 */
68
69 /*
70 * Driver for AMI RAID controllers.
71 */
72
73 #include <sys/cdefs.h>
74 __KERNEL_RCSID(0, "$NetBSD: amr.c,v 1.18 2003/10/29 02:27:32 mycroft Exp $");
75
76 #include <sys/param.h>
77 #include <sys/systm.h>
78 #include <sys/kernel.h>
79 #include <sys/device.h>
80 #include <sys/queue.h>
81 #include <sys/proc.h>
82 #include <sys/buf.h>
83 #include <sys/malloc.h>
84 #include <sys/kthread.h>
85
86 #include <uvm/uvm_extern.h>
87
88 #include <machine/endian.h>
89 #include <machine/bus.h>
90
91 #include <dev/pci/pcidevs.h>
92 #include <dev/pci/pcivar.h>
93 #include <dev/pci/amrreg.h>
94 #include <dev/pci/amrvar.h>
95
96 void amr_attach(struct device *, struct device *, void *);
97 void amr_ccb_dump(struct amr_softc *, struct amr_ccb *);
98 void *amr_enquire(struct amr_softc *, u_int8_t, u_int8_t, u_int8_t, void *);
99 int amr_init(struct amr_softc *, const char *,
100 struct pci_attach_args *pa);
101 int amr_intr(void *);
102 int amr_match(struct device *, struct cfdata *, void *);
103 int amr_print(void *, const char *);
104 void amr_shutdown(void *);
105 int amr_submatch(struct device *, struct cfdata *, void *);
106 void amr_teardown(struct amr_softc *);
107 void amr_thread(void *);
108 void amr_thread_create(void *);
109
110 int amr_mbox_wait(struct amr_softc *);
111 int amr_quartz_get_work(struct amr_softc *, struct amr_mailbox_resp *);
112 int amr_quartz_submit(struct amr_softc *, struct amr_ccb *);
113 int amr_std_get_work(struct amr_softc *, struct amr_mailbox_resp *);
114 int amr_std_submit(struct amr_softc *, struct amr_ccb *);
115
116 static inline u_int8_t amr_inb(struct amr_softc *, int);
117 static inline u_int32_t amr_inl(struct amr_softc *, int);
118 static inline void amr_outb(struct amr_softc *, int, u_int8_t);
119 static inline void amr_outl(struct amr_softc *, int, u_int32_t);
120
121 CFATTACH_DECL(amr, sizeof(struct amr_softc),
122 amr_match, amr_attach, NULL, NULL);
123
124 #define AT_QUARTZ 0x01 /* `Quartz' chipset */
125 #define AT_SIG 0x02 /* Check for signature */
126
127 struct amr_pci_type {
128 u_short apt_vendor;
129 u_short apt_product;
130 u_short apt_flags;
131 } const amr_pci_type[] = {
132 { PCI_VENDOR_AMI, PCI_PRODUCT_AMI_MEGARAID, 0 },
133 { PCI_VENDOR_AMI, PCI_PRODUCT_AMI_MEGARAID2, 0 },
134 { PCI_VENDOR_AMI, PCI_PRODUCT_AMI_MEGARAID3, AT_QUARTZ },
135 { PCI_VENDOR_INTEL, PCI_PRODUCT_AMI_MEGARAID3, AT_QUARTZ | AT_SIG },
136 { PCI_VENDOR_DELL, PCI_PRODUCT_DELL_PERC_4DI, AT_QUARTZ },
137 { PCI_VENDOR_DELL, PCI_PRODUCT_DELL_PERC_4DI_2, AT_QUARTZ },
138 };
139
140 struct amr_typestr {
141 const char *at_str;
142 int at_sig;
143 } const amr_typestr[] = {
144 { "Series 431", AMR_SIG_431 },
145 { "Series 438", AMR_SIG_438 },
146 { "Series 466", AMR_SIG_466 },
147 { "Series 467", AMR_SIG_467 },
148 { "Series 490", AMR_SIG_490 },
149 { "Series 762", AMR_SIG_762 },
150 { "HP NetRAID (T5)", AMR_SIG_T5 },
151 { "HP NetRAID (T7)", AMR_SIG_T7 },
152 };
153
154 struct {
155 const char *ds_descr;
156 int ds_happy;
157 } const amr_dstate[] = {
158 { "offline", 0 },
159 { "degraded", 1 },
160 { "optimal", 1 },
161 { "online", 1 },
162 { "failed", 0 },
163 { "rebuilding", 1 },
164 { "hotspare", 0 },
165 };
166
167 void *amr_sdh;
168 int amr_max_segs;
169 int amr_max_xfer;
170
171 static inline u_int8_t
172 amr_inb(struct amr_softc *amr, int off)
173 {
174
175 bus_space_barrier(amr->amr_iot, amr->amr_ioh, off, 1,
176 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
177 return (bus_space_read_1(amr->amr_iot, amr->amr_ioh, off));
178 }
179
180 static inline u_int32_t
181 amr_inl(struct amr_softc *amr, int off)
182 {
183
184 bus_space_barrier(amr->amr_iot, amr->amr_ioh, off, 4,
185 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
186 return (bus_space_read_4(amr->amr_iot, amr->amr_ioh, off));
187 }
188
189 static inline void
190 amr_outb(struct amr_softc *amr, int off, u_int8_t val)
191 {
192
193 bus_space_write_1(amr->amr_iot, amr->amr_ioh, off, val);
194 bus_space_barrier(amr->amr_iot, amr->amr_ioh, off, 1,
195 BUS_SPACE_BARRIER_WRITE);
196 }
197
198 static inline void
199 amr_outl(struct amr_softc *amr, int off, u_int32_t val)
200 {
201
202 bus_space_write_4(amr->amr_iot, amr->amr_ioh, off, val);
203 bus_space_barrier(amr->amr_iot, amr->amr_ioh, off, 4,
204 BUS_SPACE_BARRIER_WRITE);
205 }
206
207 /*
208 * Match a supported device.
209 */
210 int
211 amr_match(struct device *parent, struct cfdata *match, void *aux)
212 {
213 struct pci_attach_args *pa;
214 pcireg_t s;
215 int i;
216
217 pa = (struct pci_attach_args *)aux;
218
219 /*
220 * Don't match the device if it's operating in I2O mode. In this
221 * case it should be handled by the `iop' driver.
222 */
223 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_I2O)
224 return (0);
225
226 for (i = 0; i < sizeof(amr_pci_type) / sizeof(amr_pci_type[0]); i++)
227 if (PCI_VENDOR(pa->pa_id) == amr_pci_type[i].apt_vendor &&
228 PCI_PRODUCT(pa->pa_id) == amr_pci_type[i].apt_product)
229 break;
230
231 if (i == sizeof(amr_pci_type) / sizeof(amr_pci_type[0]))
232 return (0);
233
234 if ((amr_pci_type[i].apt_flags & AT_SIG) == 0)
235 return (1);
236
237 s = pci_conf_read(pa->pa_pc, pa->pa_tag, AMR_QUARTZ_SIG_REG) & 0xffff;
238 return (s == AMR_QUARTZ_SIG0 || s == AMR_QUARTZ_SIG1);
239 }
240
241 /*
242 * Attach a supported device.
243 */
244 void
245 amr_attach(struct device *parent, struct device *self, void *aux)
246 {
247 struct pci_attach_args *pa;
248 struct amr_attach_args amra;
249 const struct amr_pci_type *apt;
250 struct amr_softc *amr;
251 pci_chipset_tag_t pc;
252 pci_intr_handle_t ih;
253 const char *intrstr;
254 pcireg_t reg;
255 int rseg, i, j, size, rv, memreg, ioreg;
256 struct amr_ccb *ac;
257
258 aprint_naive(": RAID controller\n");
259
260 amr = (struct amr_softc *)self;
261 pa = (struct pci_attach_args *)aux;
262 pc = pa->pa_pc;
263
264 for (i = 0; i < sizeof(amr_pci_type) / sizeof(amr_pci_type[0]); i++)
265 if (PCI_VENDOR(pa->pa_id) == amr_pci_type[i].apt_vendor &&
266 PCI_PRODUCT(pa->pa_id) == amr_pci_type[i].apt_product)
267 break;
268 apt = amr_pci_type + i;
269
270 memreg = ioreg = 0;
271 for (i = 0x10; i <= 0x14; i += 4) {
272 reg = pci_conf_read(pc, pa->pa_tag, i);
273 switch (PCI_MAPREG_TYPE(reg)) {
274 case PCI_MAPREG_TYPE_MEM:
275 memreg = i;
276 break;
277 case PCI_MAPREG_TYPE_IO:
278 ioreg = i;
279 break;
280
281 }
282 }
283
284 if (memreg && pci_mapreg_map(pa, memreg, PCI_MAPREG_TYPE_MEM, 0,
285 &amr->amr_iot, &amr->amr_ioh, NULL, &amr->amr_ios) == 0)
286 ;
287 else if (ioreg && pci_mapreg_map(pa, ioreg, PCI_MAPREG_TYPE_IO, 0,
288 &amr->amr_iot, &amr->amr_ioh, NULL, &amr->amr_ios) == 0)
289 ;
290 else {
291 aprint_error("can't map control registers\n");
292 amr_teardown(amr);
293 return;
294 }
295
296 amr->amr_flags |= AMRF_PCI_REGS;
297 amr->amr_dmat = pa->pa_dmat;
298 amr->amr_pc = pa->pa_pc;
299
300 /* Enable the device. */
301 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
302 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
303 reg | PCI_COMMAND_MASTER_ENABLE);
304
305 /* Map and establish the interrupt. */
306 if (pci_intr_map(pa, &ih)) {
307 aprint_error("can't map interrupt\n");
308 amr_teardown(amr);
309 return;
310 }
311 intrstr = pci_intr_string(pc, ih);
312 amr->amr_ih = pci_intr_establish(pc, ih, IPL_BIO, amr_intr, amr);
313 if (amr->amr_ih == NULL) {
314 aprint_error("can't establish interrupt");
315 if (intrstr != NULL)
316 aprint_normal(" at %s", intrstr);
317 aprint_normal("\n");
318 amr_teardown(amr);
319 return;
320 }
321 amr->amr_flags |= AMRF_PCI_INTR;
322
323 /*
324 * Allocate space for the mailbox and S/G lists. Some controllers
325 * don't like S/G lists to be located below 0x2000, so we allocate
326 * enough slop to enable us to compensate.
327 *
328 * The standard mailbox structure needs to be aligned on a 16-byte
329 * boundary. The 64-bit mailbox has one extra field, 4 bytes in
330 * size, which preceeds the standard mailbox.
331 */
332 size = AMR_SGL_SIZE * AMR_MAX_CMDS + 0x2000;
333 amr->amr_dmasize = size;
334
335 if ((rv = bus_dmamem_alloc(amr->amr_dmat, size, PAGE_SIZE, 0,
336 &amr->amr_dmaseg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
337 aprint_error("%s: unable to allocate buffer, rv = %d\n",
338 amr->amr_dv.dv_xname, rv);
339 amr_teardown(amr);
340 return;
341 }
342 amr->amr_flags |= AMRF_DMA_ALLOC;
343
344 if ((rv = bus_dmamem_map(amr->amr_dmat, &amr->amr_dmaseg, rseg, size,
345 (caddr_t *)&amr->amr_mbox,
346 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
347 aprint_error("%s: unable to map buffer, rv = %d\n",
348 amr->amr_dv.dv_xname, rv);
349 amr_teardown(amr);
350 return;
351 }
352 amr->amr_flags |= AMRF_DMA_MAP;
353
354 if ((rv = bus_dmamap_create(amr->amr_dmat, size, 1, size, 0,
355 BUS_DMA_NOWAIT, &amr->amr_dmamap)) != 0) {
356 aprint_error("%s: unable to create buffer DMA map, rv = %d\n",
357 amr->amr_dv.dv_xname, rv);
358 amr_teardown(amr);
359 return;
360 }
361 amr->amr_flags |= AMRF_DMA_CREATE;
362
363 if ((rv = bus_dmamap_load(amr->amr_dmat, amr->amr_dmamap,
364 amr->amr_mbox, size, NULL, BUS_DMA_NOWAIT)) != 0) {
365 aprint_error("%s: unable to load buffer DMA map, rv = %d\n",
366 amr->amr_dv.dv_xname, rv);
367 amr_teardown(amr);
368 return;
369 }
370 amr->amr_flags |= AMRF_DMA_LOAD;
371
372 memset(amr->amr_mbox, 0, size);
373
374 amr->amr_mbox_paddr = amr->amr_dmamap->dm_segs[0].ds_addr;
375 amr->amr_sgls_paddr = (amr->amr_mbox_paddr + 0x1fff) & ~0x1fff;
376 amr->amr_sgls = (struct amr_sgentry *)((caddr_t)amr->amr_mbox +
377 amr->amr_sgls_paddr - amr->amr_dmamap->dm_segs[0].ds_addr);
378
379 /*
380 * Allocate and initalise the command control blocks.
381 */
382 ac = malloc(sizeof(*ac) * AMR_MAX_CMDS, M_DEVBUF, M_NOWAIT | M_ZERO);
383 amr->amr_ccbs = ac;
384 SLIST_INIT(&amr->amr_ccb_freelist);
385 TAILQ_INIT(&amr->amr_ccb_active);
386 amr->amr_flags |= AMRF_CCBS;
387
388 if (amr_max_xfer == 0) {
389 amr_max_xfer = min(((AMR_MAX_SEGS - 1) * PAGE_SIZE), MAXPHYS);
390 amr_max_segs = (amr_max_xfer + (PAGE_SIZE * 2) - 1) / PAGE_SIZE;
391 }
392
393 for (i = 0; i < AMR_MAX_CMDS; i++, ac++) {
394 rv = bus_dmamap_create(amr->amr_dmat, amr_max_xfer,
395 amr_max_segs, amr_max_xfer, 0,
396 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ac->ac_xfer_map);
397 if (rv != 0)
398 break;
399
400 ac->ac_ident = i;
401 amr_ccb_free(amr, ac);
402 }
403 if (i != AMR_MAX_CMDS) {
404 aprint_error("%s: memory exhausted\n", amr->amr_dv.dv_xname);
405 amr_teardown(amr);
406 return;
407 }
408
409 /*
410 * Take care of model-specific tasks.
411 */
412 if ((apt->apt_flags & AT_QUARTZ) != 0) {
413 amr->amr_submit = amr_quartz_submit;
414 amr->amr_get_work = amr_quartz_get_work;
415 } else {
416 amr->amr_submit = amr_std_submit;
417 amr->amr_get_work = amr_std_get_work;
418
419 /* Notify the controller of the mailbox location. */
420 amr_outl(amr, AMR_SREG_MBOX, (u_int32_t)amr->amr_mbox_paddr + 16);
421 amr_outb(amr, AMR_SREG_MBOX_ENABLE, AMR_SMBOX_ENABLE_ADDR);
422
423 /* Clear outstanding interrupts and enable interrupts. */
424 amr_outb(amr, AMR_SREG_CMD, AMR_SCMD_ACKINTR);
425 amr_outb(amr, AMR_SREG_TOGL,
426 amr_inb(amr, AMR_SREG_TOGL) | AMR_STOGL_ENABLE);
427 }
428
429 /*
430 * Retrieve parameters, and tell the world about us.
431 */
432 amr->amr_enqbuf = malloc(AMR_ENQUIRY_BUFSIZE, M_DEVBUF, M_NOWAIT);
433 amr->amr_flags |= AMRF_ENQBUF;
434 amr->amr_maxqueuecnt = i;
435 aprint_normal(": AMI RAID ");
436 if (amr_init(amr, intrstr, pa) != 0) {
437 amr_teardown(amr);
438 return;
439 }
440
441 /*
442 * Cap the maximum number of outstanding commands. AMI's Linux
443 * driver doesn't trust the controller's reported value, and lockups
444 * have been seen when we do.
445 */
446 amr->amr_maxqueuecnt = min(amr->amr_maxqueuecnt, AMR_MAX_CMDS);
447 if (amr->amr_maxqueuecnt > i)
448 amr->amr_maxqueuecnt = i;
449
450 /* Set our `shutdownhook' before we start any device activity. */
451 if (amr_sdh == NULL)
452 amr_sdh = shutdownhook_establish(amr_shutdown, NULL);
453
454 /* Attach sub-devices. */
455 for (j = 0; j < amr->amr_numdrives; j++) {
456 if (amr->amr_drive[j].al_size == 0)
457 continue;
458 amra.amra_unit = j;
459 amr->amr_drive[j].al_dv = config_found_sm(&amr->amr_dv, &amra,
460 amr_print, amr_submatch);
461 }
462
463 SIMPLEQ_INIT(&amr->amr_ccb_queue);
464
465 /* XXX This doesn't work for newer boards yet. */
466 if ((apt->apt_flags & AT_QUARTZ) == 0)
467 kthread_create(amr_thread_create, amr);
468 }
469
470 /*
471 * Free up resources.
472 */
473 void
474 amr_teardown(struct amr_softc *amr)
475 {
476 struct amr_ccb *ac;
477 int fl;
478
479 fl = amr->amr_flags;
480
481 if ((fl & AMRF_THREAD) != 0) {
482 amr->amr_flags |= AMRF_THREAD_EXIT;
483 wakeup(amr_thread);
484 while ((amr->amr_flags & AMRF_THREAD_EXIT) != 0)
485 tsleep(&amr->amr_flags, PWAIT, "amrexit", 0);
486 }
487 if ((fl & AMRF_CCBS) != 0) {
488 SLIST_FOREACH(ac, &amr->amr_ccb_freelist, ac_chain.slist) {
489 bus_dmamap_destroy(amr->amr_dmat, ac->ac_xfer_map);
490 }
491 free(amr->amr_ccbs, M_DEVBUF);
492 }
493 if ((fl & AMRF_ENQBUF) != 0)
494 free(amr->amr_enqbuf, M_DEVBUF);
495 if ((fl & AMRF_DMA_LOAD) != 0)
496 bus_dmamap_unload(amr->amr_dmat, amr->amr_dmamap);
497 if ((fl & AMRF_DMA_MAP) != 0)
498 bus_dmamem_unmap(amr->amr_dmat, (caddr_t)amr->amr_mbox,
499 amr->amr_dmasize);
500 if ((fl & AMRF_DMA_ALLOC) != 0)
501 bus_dmamem_free(amr->amr_dmat, &amr->amr_dmaseg, 1);
502 if ((fl & AMRF_DMA_CREATE) != 0)
503 bus_dmamap_destroy(amr->amr_dmat, amr->amr_dmamap);
504 if ((fl & AMRF_PCI_INTR) != 0)
505 pci_intr_disestablish(amr->amr_pc, amr->amr_ih);
506 if ((fl & AMRF_PCI_REGS) != 0)
507 bus_space_unmap(amr->amr_iot, amr->amr_ioh, amr->amr_ios);
508 }
509
510 /*
511 * Print autoconfiguration message for a sub-device.
512 */
513 int
514 amr_print(void *aux, const char *pnp)
515 {
516 struct amr_attach_args *amra;
517
518 amra = (struct amr_attach_args *)aux;
519
520 if (pnp != NULL)
521 aprint_normal("block device at %s", pnp);
522 aprint_normal(" unit %d", amra->amra_unit);
523 return (UNCONF);
524 }
525
526 /*
527 * Match a sub-device.
528 */
529 int
530 amr_submatch(struct device *parent, struct cfdata *cf, void *aux)
531 {
532 struct amr_attach_args *amra;
533
534 amra = (struct amr_attach_args *)aux;
535
536 if (cf->amracf_unit != AMRCF_UNIT_DEFAULT &&
537 cf->amracf_unit != amra->amra_unit)
538 return (0);
539
540 return (config_match(parent, cf, aux));
541 }
542
543 /*
544 * Retrieve operational parameters and describe the controller.
545 */
546 int
547 amr_init(struct amr_softc *amr, const char *intrstr,
548 struct pci_attach_args *pa)
549 {
550 struct amr_adapter_info *aa;
551 struct amr_prodinfo *ap;
552 struct amr_enquiry *ae;
553 struct amr_enquiry3 *aex;
554 const char *prodstr;
555 u_int i, sig, ishp;
556 char buf[64];
557
558 /*
559 * Try to get 40LD product info, which tells us what the card is
560 * labelled as.
561 */
562 ap = amr_enquire(amr, AMR_CMD_CONFIG, AMR_CONFIG_PRODUCT_INFO, 0,
563 amr->amr_enqbuf);
564 if (ap != NULL) {
565 aprint_normal("<%.80s>\n", ap->ap_product);
566 if (intrstr != NULL)
567 aprint_normal("%s: interrupting at %s\n",
568 amr->amr_dv.dv_xname, intrstr);
569 aprint_normal("%s: firmware %.16s, BIOS %.16s, %dMB RAM\n",
570 amr->amr_dv.dv_xname, ap->ap_firmware, ap->ap_bios,
571 le16toh(ap->ap_memsize));
572
573 amr->amr_maxqueuecnt = ap->ap_maxio;
574
575 /*
576 * Fetch and record state of logical drives.
577 */
578 aex = amr_enquire(amr, AMR_CMD_CONFIG, AMR_CONFIG_ENQ3,
579 AMR_CONFIG_ENQ3_SOLICITED_FULL, amr->amr_enqbuf);
580 if (aex == NULL) {
581 aprint_error("%s ENQUIRY3 failed\n",
582 amr->amr_dv.dv_xname);
583 return (-1);
584 }
585
586 if (aex->ae_numldrives > AMR_MAX_UNITS) {
587 aprint_error(
588 "%s: adjust AMR_MAX_UNITS to %d (currently %d)"
589 "\n", amr->amr_dv.dv_xname, AMR_MAX_UNITS,
590 amr->amr_numdrives);
591 amr->amr_numdrives = AMR_MAX_UNITS;
592 } else
593 amr->amr_numdrives = aex->ae_numldrives;
594
595 for (i = 0; i < amr->amr_numdrives; i++) {
596 amr->amr_drive[i].al_size =
597 le32toh(aex->ae_drivesize[i]);
598 amr->amr_drive[i].al_state = aex->ae_drivestate[i];
599 amr->amr_drive[i].al_properties = aex->ae_driveprop[i];
600 }
601
602 return (0);
603 }
604
605 /*
606 * Try 8LD extended ENQUIRY to get the controller signature. Once
607 * found, search for a product description.
608 */
609 ae = amr_enquire(amr, AMR_CMD_EXT_ENQUIRY2, 0, 0, amr->amr_enqbuf);
610 if (ae != NULL) {
611 i = 0;
612 sig = le32toh(ae->ae_signature);
613
614 while (i < sizeof(amr_typestr) / sizeof(amr_typestr[0])) {
615 if (amr_typestr[i].at_sig == sig)
616 break;
617 i++;
618 }
619 if (i == sizeof(amr_typestr) / sizeof(amr_typestr[0])) {
620 sprintf(buf, "unknown ENQUIRY2 sig (0x%08x)", sig);
621 prodstr = buf;
622 } else
623 prodstr = amr_typestr[i].at_str;
624 } else {
625 ae = amr_enquire(amr, AMR_CMD_ENQUIRY, 0, 0, amr->amr_enqbuf);
626 if (ae == NULL) {
627 aprint_error("%s: unsupported controller\n",
628 amr->amr_dv.dv_xname);
629 return (-1);
630 }
631
632 switch (PCI_PRODUCT(pa->pa_id)) {
633 case PCI_PRODUCT_AMI_MEGARAID:
634 prodstr = "Series 428";
635 break;
636 case PCI_PRODUCT_AMI_MEGARAID2:
637 prodstr = "Series 434";
638 break;
639 default:
640 sprintf(buf, "unknown PCI dev (0x%04x)",
641 PCI_PRODUCT(pa->pa_id));
642 prodstr = buf;
643 break;
644 }
645 }
646
647 /*
648 * HP NetRaid controllers have a special encoding of the firmware
649 * and BIOS versions. The AMI version seems to have it as strings
650 * whereas the HP version does it with a leading uppercase character
651 * and two binary numbers.
652 */
653 aa = &ae->ae_adapter;
654
655 if (aa->aa_firmware[2] >= 'A' && aa->aa_firmware[2] <= 'Z' &&
656 aa->aa_firmware[1] < ' ' && aa->aa_firmware[0] < ' ' &&
657 aa->aa_bios[2] >= 'A' && aa->aa_bios[2] <= 'Z' &&
658 aa->aa_bios[1] < ' ' && aa->aa_bios[0] < ' ') {
659 if (le32toh(ae->ae_signature) == AMR_SIG_438) {
660 /* The AMI 438 is a NetRaid 3si in HP-land. */
661 prodstr = "HP NetRaid 3si";
662 }
663 ishp = 1;
664 } else
665 ishp = 0;
666
667 aprint_normal("<%s>\n", prodstr);
668 if (intrstr != NULL)
669 aprint_normal("%s: interrupting at %s\n", amr->amr_dv.dv_xname,
670 intrstr);
671
672 if (ishp)
673 aprint_normal("%s: firmware <%c.%02d.%02d>, BIOS <%c.%02d.%02d>"
674 ", %dMB RAM\n", amr->amr_dv.dv_xname, aa->aa_firmware[2],
675 aa->aa_firmware[1], aa->aa_firmware[0], aa->aa_bios[2],
676 aa->aa_bios[1], aa->aa_bios[0], aa->aa_memorysize);
677 else
678 aprint_normal("%s: firmware <%.4s>, BIOS <%.4s>, %dMB RAM\n",
679 amr->amr_dv.dv_xname, aa->aa_firmware, aa->aa_bios,
680 aa->aa_memorysize);
681
682 amr->amr_maxqueuecnt = aa->aa_maxio;
683
684 /*
685 * Record state of logical drives.
686 */
687 if (ae->ae_ldrv.al_numdrives > AMR_MAX_UNITS) {
688 aprint_error("%s: adjust AMR_MAX_UNITS to %d (currently %d)\n",
689 amr->amr_dv.dv_xname, ae->ae_ldrv.al_numdrives,
690 AMR_MAX_UNITS);
691 amr->amr_numdrives = AMR_MAX_UNITS;
692 } else
693 amr->amr_numdrives = ae->ae_ldrv.al_numdrives;
694
695 for (i = 0; i < AMR_MAX_UNITS; i++) {
696 amr->amr_drive[i].al_size = le32toh(ae->ae_ldrv.al_size[i]);
697 amr->amr_drive[i].al_state = ae->ae_ldrv.al_state[i];
698 amr->amr_drive[i].al_properties = ae->ae_ldrv.al_properties[i];
699 }
700
701 return (0);
702 }
703
704 /*
705 * Flush the internal cache on each configured controller. Called at
706 * shutdown time.
707 */
708 void
709 amr_shutdown(void *cookie)
710 {
711 extern struct cfdriver amr_cd;
712 struct amr_softc *amr;
713 struct amr_ccb *ac;
714 int i, rv, s;
715
716 for (i = 0; i < amr_cd.cd_ndevs; i++) {
717 if ((amr = device_lookup(&amr_cd, i)) == NULL)
718 continue;
719
720 if ((rv = amr_ccb_alloc(amr, &ac)) == 0) {
721 ac->ac_cmd.mb_command = AMR_CMD_FLUSH;
722 s = splbio();
723 rv = amr_ccb_poll(amr, ac, 30000);
724 splx(s);
725 amr_ccb_free(amr, ac);
726 }
727 if (rv != 0)
728 printf("%s: unable to flush cache (%d)\n",
729 amr->amr_dv.dv_xname, rv);
730 }
731 }
732
733 /*
734 * Interrupt service routine.
735 */
736 int
737 amr_intr(void *cookie)
738 {
739 struct amr_softc *amr;
740 struct amr_ccb *ac;
741 struct amr_mailbox_resp mbox;
742 u_int i, forus, idx;
743
744 amr = cookie;
745 forus = 0;
746
747 while ((*amr->amr_get_work)(amr, &mbox) == 0) {
748 /* Iterate over completed commands in this result. */
749 for (i = 0; i < mbox.mb_nstatus; i++) {
750 idx = mbox.mb_completed[i] - 1;
751 ac = amr->amr_ccbs + idx;
752
753 if (idx >= amr->amr_maxqueuecnt) {
754 printf("%s: bad status (bogus ID: %u=%u)\n",
755 amr->amr_dv.dv_xname, i, idx);
756 continue;
757 }
758
759 if ((ac->ac_flags & AC_ACTIVE) == 0) {
760 printf("%s: bad status (not active; 0x04%x)\n",
761 amr->amr_dv.dv_xname, ac->ac_flags);
762 continue;
763 }
764
765 ac->ac_status = mbox.mb_status;
766 ac->ac_flags = (ac->ac_flags & ~AC_ACTIVE) |
767 AC_COMPLETE;
768 TAILQ_REMOVE(&amr->amr_ccb_active, ac, ac_chain.tailq);
769
770 if ((ac->ac_flags & AC_MOAN) != 0)
771 printf("%s: ccb %d completed\n",
772 amr->amr_dv.dv_xname, ac->ac_ident);
773
774 /* Pass notification to upper layers. */
775 if (ac->ac_handler != NULL)
776 (*ac->ac_handler)(ac);
777 else
778 wakeup(ac);
779 }
780 forus = 1;
781 }
782
783 if (forus)
784 amr_ccb_enqueue(amr, NULL);
785
786 return (forus);
787 }
788
789 /*
790 * Create the watchdog thread.
791 */
792 void
793 amr_thread_create(void *cookie)
794 {
795 struct amr_softc *amr;
796 int rv;
797
798 amr = cookie;
799
800 if ((amr->amr_flags & AMRF_THREAD_EXIT) != 0) {
801 amr->amr_flags ^= AMRF_THREAD_EXIT;
802 wakeup(&amr->amr_flags);
803 return;
804 }
805
806 rv = kthread_create1(amr_thread, amr, &amr->amr_thread, "%s",
807 amr->amr_dv.dv_xname);
808 if (rv != 0)
809 aprint_error("%s: unable to create thread (%d)",
810 amr->amr_dv.dv_xname, rv);
811 else
812 amr->amr_flags |= AMRF_THREAD;
813 }
814
815 /*
816 * Watchdog thread.
817 */
818 void
819 amr_thread(void *cookie)
820 {
821 struct amr_softc *amr;
822 struct amr_ccb *ac;
823 struct amr_logdrive *al;
824 struct amr_enquiry *ae;
825 time_t curtime;
826 int rv, i, s;
827
828 amr = cookie;
829 ae = amr->amr_enqbuf;
830
831 for (;;) {
832 tsleep(amr_thread, PWAIT, "amrwdog", AMR_WDOG_TICKS);
833
834 if ((amr->amr_flags & AMRF_THREAD_EXIT) != 0) {
835 amr->amr_flags ^= AMRF_THREAD_EXIT;
836 wakeup(&amr->amr_flags);
837 kthread_exit(0);
838 }
839
840 s = splbio();
841 amr_intr(cookie);
842 curtime = (time_t)mono_time.tv_sec;
843 ac = TAILQ_FIRST(&amr->amr_ccb_active);
844 while (ac != NULL) {
845 if (ac->ac_start_time + AMR_TIMEOUT > curtime)
846 break;
847 if ((ac->ac_flags & AC_MOAN) == 0) {
848 printf("%s: ccb %d timed out; mailbox:\n",
849 amr->amr_dv.dv_xname, ac->ac_ident);
850 amr_ccb_dump(amr, ac);
851 ac->ac_flags |= AC_MOAN;
852 }
853 ac = TAILQ_NEXT(ac, ac_chain.tailq);
854 }
855 splx(s);
856
857 if ((rv = amr_ccb_alloc(amr, &ac)) != 0) {
858 printf("%s: ccb_alloc failed (%d)\n",
859 amr->amr_dv.dv_xname, rv);
860 continue;
861 }
862
863 ac->ac_cmd.mb_command = AMR_CMD_ENQUIRY;
864
865 rv = amr_ccb_map(amr, ac, amr->amr_enqbuf,
866 AMR_ENQUIRY_BUFSIZE, 0);
867 if (rv != 0) {
868 printf("%s: ccb_map failed (%d)\n",
869 amr->amr_dv.dv_xname, rv);
870 amr_ccb_free(amr, ac);
871 continue;
872 }
873
874 rv = amr_ccb_wait(amr, ac);
875 amr_ccb_unmap(amr, ac);
876 if (rv != 0) {
877 printf("%s: enquiry failed (st=%d)\n",
878 amr->amr_dv.dv_xname, ac->ac_status);
879 continue;
880 }
881 amr_ccb_free(amr, ac);
882
883 al = amr->amr_drive;
884 for (i = 0; i < AMR_MAX_UNITS; i++, al++) {
885 if (al->al_dv == NULL)
886 continue;
887 if (al->al_state == ae->ae_ldrv.al_state[i])
888 continue;
889
890 printf("%s: state changed: %s -> %s\n",
891 al->al_dv->dv_xname,
892 amr_drive_state(al->al_state, NULL),
893 amr_drive_state(ae->ae_ldrv.al_state[i], NULL));
894
895 al->al_state = ae->ae_ldrv.al_state[i];
896 }
897 }
898 }
899
900 /*
901 * Return a text description of a logical drive's current state.
902 */
903 const char *
904 amr_drive_state(int state, int *happy)
905 {
906 const char *str;
907
908 state = AMR_DRV_CURSTATE(state);
909 if (state >= sizeof(amr_dstate) / sizeof(amr_dstate[0])) {
910 if (happy)
911 *happy = 1;
912 str = "status unknown";
913 } else {
914 if (happy)
915 *happy = amr_dstate[state].ds_happy;
916 str = amr_dstate[state].ds_descr;
917 }
918
919 return (str);
920 }
921
922 /*
923 * Run a generic enquiry-style command.
924 */
925 void *
926 amr_enquire(struct amr_softc *amr, u_int8_t cmd, u_int8_t cmdsub,
927 u_int8_t cmdqual, void *buf)
928 {
929 struct amr_ccb *ac;
930 u_int8_t *mb;
931 int rv;
932
933 if (amr_ccb_alloc(amr, &ac) != 0)
934 return (NULL);
935
936 /* Build the command proper. */
937 mb = (u_int8_t *)&ac->ac_cmd;
938 mb[0] = cmd;
939 mb[2] = cmdsub;
940 mb[3] = cmdqual;
941
942 rv = amr_ccb_map(amr, ac, buf, AMR_ENQUIRY_BUFSIZE, 0);
943 if (rv == 0) {
944 rv = amr_ccb_poll(amr, ac, 2000);
945 amr_ccb_unmap(amr, ac);
946 }
947 amr_ccb_free(amr, ac);
948
949 return (rv ? NULL : buf);
950 }
951
952 /*
953 * Allocate and initialise a CCB.
954 */
955 int
956 amr_ccb_alloc(struct amr_softc *amr, struct amr_ccb **acp)
957 {
958 int s;
959
960 s = splbio();
961 if ((*acp = SLIST_FIRST(&amr->amr_ccb_freelist)) == NULL) {
962 splx(s);
963 return (EAGAIN);
964 }
965 SLIST_REMOVE_HEAD(&amr->amr_ccb_freelist, ac_chain.slist);
966 splx(s);
967
968 return (0);
969 }
970
971 /*
972 * Free a CCB.
973 */
974 void
975 amr_ccb_free(struct amr_softc *amr, struct amr_ccb *ac)
976 {
977 int s;
978
979 memset(&ac->ac_cmd, 0, sizeof(ac->ac_cmd));
980 ac->ac_cmd.mb_ident = ac->ac_ident + 1;
981 ac->ac_cmd.mb_busy = 1;
982 ac->ac_handler = NULL;
983 ac->ac_flags = 0;
984
985 s = splbio();
986 SLIST_INSERT_HEAD(&amr->amr_ccb_freelist, ac, ac_chain.slist);
987 splx(s);
988 }
989
990 /*
991 * If a CCB is specified, enqueue it. Pull CCBs off the software queue in
992 * the order that they were enqueued and try to submit their command blocks
993 * to the controller for execution.
994 */
995 void
996 amr_ccb_enqueue(struct amr_softc *amr, struct amr_ccb *ac)
997 {
998 int s;
999
1000 s = splbio();
1001
1002 if (ac != NULL)
1003 SIMPLEQ_INSERT_TAIL(&amr->amr_ccb_queue, ac, ac_chain.simpleq);
1004
1005 while ((ac = SIMPLEQ_FIRST(&amr->amr_ccb_queue)) != NULL) {
1006 if ((*amr->amr_submit)(amr, ac) != 0)
1007 break;
1008 SIMPLEQ_REMOVE_HEAD(&amr->amr_ccb_queue, ac_chain.simpleq);
1009 TAILQ_INSERT_TAIL(&amr->amr_ccb_active, ac, ac_chain.tailq);
1010 }
1011
1012 splx(s);
1013 }
1014
1015 /*
1016 * Map the specified CCB's data buffer onto the bus, and fill the
1017 * scatter-gather list.
1018 */
1019 int
1020 amr_ccb_map(struct amr_softc *amr, struct amr_ccb *ac, void *data, int size,
1021 int out)
1022 {
1023 struct amr_sgentry *sge;
1024 struct amr_mailbox_cmd *mb;
1025 int nsegs, i, rv, sgloff;
1026 bus_dmamap_t xfer;
1027
1028 xfer = ac->ac_xfer_map;
1029
1030 rv = bus_dmamap_load(amr->amr_dmat, xfer, data, size, NULL,
1031 BUS_DMA_NOWAIT);
1032 if (rv != 0)
1033 return (rv);
1034
1035 mb = &ac->ac_cmd;
1036 ac->ac_xfer_size = size;
1037 ac->ac_flags |= (out ? AC_XFER_OUT : AC_XFER_IN);
1038 sgloff = AMR_SGL_SIZE * ac->ac_ident;
1039
1040 /* We don't need to use a scatter/gather list for just 1 segment. */
1041 nsegs = xfer->dm_nsegs;
1042 if (nsegs == 1) {
1043 mb->mb_nsgelem = 0;
1044 mb->mb_physaddr = htole32(xfer->dm_segs[0].ds_addr);
1045 ac->ac_flags |= AC_NOSGL;
1046 } else {
1047 mb->mb_nsgelem = nsegs;
1048 mb->mb_physaddr = htole32(amr->amr_sgls_paddr + sgloff);
1049
1050 sge = (struct amr_sgentry *)((caddr_t)amr->amr_sgls + sgloff);
1051 for (i = 0; i < nsegs; i++, sge++) {
1052 sge->sge_addr = htole32(xfer->dm_segs[i].ds_addr);
1053 sge->sge_count = htole32(xfer->dm_segs[i].ds_len);
1054 }
1055 }
1056
1057 bus_dmamap_sync(amr->amr_dmat, xfer, 0, ac->ac_xfer_size,
1058 out ? BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
1059
1060 if ((ac->ac_flags & AC_NOSGL) == 0)
1061 bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, sgloff,
1062 AMR_SGL_SIZE, BUS_DMASYNC_PREWRITE);
1063
1064 return (0);
1065 }
1066
1067 /*
1068 * Unmap the specified CCB's data buffer.
1069 */
1070 void
1071 amr_ccb_unmap(struct amr_softc *amr, struct amr_ccb *ac)
1072 {
1073
1074 if ((ac->ac_flags & AC_NOSGL) == 0)
1075 bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap,
1076 AMR_SGL_SIZE * ac->ac_ident, AMR_SGL_SIZE,
1077 BUS_DMASYNC_POSTWRITE);
1078 bus_dmamap_sync(amr->amr_dmat, ac->ac_xfer_map, 0, ac->ac_xfer_size,
1079 (ac->ac_flags & AC_XFER_IN) != 0 ?
1080 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1081 bus_dmamap_unload(amr->amr_dmat, ac->ac_xfer_map);
1082 }
1083
1084 /*
1085 * Submit a command to the controller and poll on completion. Return
1086 * non-zero on timeout or error. Must be called with interrupts blocked.
1087 */
1088 int
1089 amr_ccb_poll(struct amr_softc *amr, struct amr_ccb *ac, int timo)
1090 {
1091 int rv;
1092
1093 if ((rv = (*amr->amr_submit)(amr, ac)) != 0)
1094 return (rv);
1095 TAILQ_INSERT_TAIL(&amr->amr_ccb_active, ac, ac_chain.tailq);
1096
1097 for (timo *= 10; timo != 0; timo--) {
1098 amr_intr(amr);
1099 if ((ac->ac_flags & AC_COMPLETE) != 0)
1100 break;
1101 DELAY(100);
1102 }
1103
1104 return (timo == 0 || ac->ac_status != 0 ? EIO : 0);
1105 }
1106
1107 /*
1108 * Submit a command to the controller and sleep on completion. Return
1109 * non-zero on error.
1110 */
1111 int
1112 amr_ccb_wait(struct amr_softc *amr, struct amr_ccb *ac)
1113 {
1114 int s;
1115
1116 s = splbio();
1117 amr_ccb_enqueue(amr, ac);
1118 tsleep(ac, PRIBIO, "amrcmd", 0);
1119 splx(s);
1120
1121 return (ac->ac_status != 0 ? EIO : 0);
1122 }
1123
1124 /*
1125 * Wait for the mailbox to become available.
1126 */
1127 int
1128 amr_mbox_wait(struct amr_softc *amr)
1129 {
1130 int timo;
1131
1132 for (timo = 10000; timo != 0; timo--) {
1133 bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1134 sizeof(struct amr_mailbox), BUS_DMASYNC_POSTREAD);
1135 if (amr->amr_mbox->mb_cmd.mb_busy == 0)
1136 break;
1137 DELAY(100);
1138 }
1139
1140 if (timo == 0)
1141 printf("%s: controller wedged\n", amr->amr_dv.dv_xname);
1142
1143 return (timo != 0 ? 0 : EAGAIN);
1144 }
1145
1146 /*
1147 * Tell the controller that the mailbox contains a valid command. Must be
1148 * called with interrupts blocked.
1149 */
1150 int
1151 amr_quartz_submit(struct amr_softc *amr, struct amr_ccb *ac)
1152 {
1153 u_int32_t v;
1154
1155 amr->amr_mbox->mb_poll = 0;
1156 amr->amr_mbox->mb_ack = 0;
1157 bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1158 sizeof(struct amr_mailbox), BUS_DMASYNC_PREWRITE);
1159 bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1160 sizeof(struct amr_mailbox), BUS_DMASYNC_POSTREAD);
1161 if (amr->amr_mbox->mb_cmd.mb_busy != 0)
1162 return (EAGAIN);
1163
1164 v = amr_inl(amr, AMR_QREG_IDB);
1165 if ((v & AMR_QIDB_SUBMIT) != 0) {
1166 amr->amr_mbox->mb_cmd.mb_busy = 0;
1167 bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1168 sizeof(struct amr_mailbox), BUS_DMASYNC_PREWRITE);
1169 bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1170 sizeof(struct amr_mailbox), BUS_DMASYNC_PREREAD);
1171 return (EAGAIN);
1172 }
1173
1174 amr->amr_mbox->mb_segment = 0;
1175 memcpy(&amr->amr_mbox->mb_cmd, &ac->ac_cmd, sizeof(ac->ac_cmd));
1176 bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1177 sizeof(struct amr_mailbox), BUS_DMASYNC_PREWRITE);
1178
1179 ac->ac_start_time = (time_t)mono_time.tv_sec;
1180 ac->ac_flags |= AC_ACTIVE;
1181 amr_outl(amr, AMR_QREG_IDB,
1182 (amr->amr_mbox_paddr + 16) | AMR_QIDB_SUBMIT);
1183 return (0);
1184 }
1185
1186 int
1187 amr_std_submit(struct amr_softc *amr, struct amr_ccb *ac)
1188 {
1189
1190 amr->amr_mbox->mb_poll = 0;
1191 amr->amr_mbox->mb_ack = 0;
1192 bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1193 sizeof(struct amr_mailbox), BUS_DMASYNC_PREWRITE);
1194 bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1195 sizeof(struct amr_mailbox), BUS_DMASYNC_POSTREAD);
1196 if (amr->amr_mbox->mb_cmd.mb_busy != 0)
1197 return (EAGAIN);
1198
1199 if ((amr_inb(amr, AMR_SREG_MBOX_BUSY) & AMR_SMBOX_BUSY_FLAG) != 0) {
1200 amr->amr_mbox->mb_cmd.mb_busy = 0;
1201 bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1202 sizeof(struct amr_mailbox), BUS_DMASYNC_PREWRITE);
1203 bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1204 sizeof(struct amr_mailbox), BUS_DMASYNC_PREREAD);
1205 return (EAGAIN);
1206 }
1207
1208 amr->amr_mbox->mb_segment = 0;
1209 memcpy(&amr->amr_mbox->mb_cmd, &ac->ac_cmd, sizeof(ac->ac_cmd));
1210 bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1211 sizeof(struct amr_mailbox), BUS_DMASYNC_PREWRITE);
1212
1213 ac->ac_start_time = (time_t)mono_time.tv_sec;
1214 ac->ac_flags |= AC_ACTIVE;
1215 amr_outb(amr, AMR_SREG_CMD, AMR_SCMD_POST);
1216 return (0);
1217 }
1218
1219 /*
1220 * Claim any work that the controller has completed; acknowledge completion,
1221 * save details of the completion in (mbsave). Must be called with
1222 * interrupts blocked.
1223 */
1224 int
1225 amr_quartz_get_work(struct amr_softc *amr, struct amr_mailbox_resp *mbsave)
1226 {
1227
1228 /* Work waiting for us? */
1229 if (amr_inl(amr, AMR_QREG_ODB) != AMR_QODB_READY)
1230 return (-1);
1231
1232 bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1233 sizeof(struct amr_mailbox), BUS_DMASYNC_POSTREAD);
1234
1235 /* Save the mailbox, which contains a list of completed commands. */
1236 memcpy(mbsave, &amr->amr_mbox->mb_resp, sizeof(*mbsave));
1237
1238 bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1239 sizeof(struct amr_mailbox), BUS_DMASYNC_PREREAD);
1240
1241 /* Ack the interrupt and mailbox transfer. */
1242 amr_outl(amr, AMR_QREG_ODB, AMR_QODB_READY);
1243 amr_outl(amr, AMR_QREG_IDB, (amr->amr_mbox_paddr+16) | AMR_QIDB_ACK);
1244
1245 /*
1246 * This waits for the controller to notice that we've taken the
1247 * command from it. It's very inefficient, and we shouldn't do it,
1248 * but if we remove this code, we stop completing commands under
1249 * load.
1250 *
1251 * Peter J says we shouldn't do this. The documentation says we
1252 * should. Who is right?
1253 */
1254 while ((amr_inl(amr, AMR_QREG_IDB) & AMR_QIDB_ACK) != 0)
1255 DELAY(10);
1256
1257 return (0);
1258 }
1259
1260 int
1261 amr_std_get_work(struct amr_softc *amr, struct amr_mailbox_resp *mbsave)
1262 {
1263 u_int8_t istat;
1264
1265 /* Check for valid interrupt status. */
1266 if (((istat = amr_inb(amr, AMR_SREG_INTR)) & AMR_SINTR_VALID) == 0)
1267 return (-1);
1268
1269 /* Ack the interrupt. */
1270 amr_outb(amr, AMR_SREG_INTR, istat);
1271
1272 bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1273 sizeof(struct amr_mailbox), BUS_DMASYNC_POSTREAD);
1274
1275 /* Save mailbox, which contains a list of completed commands. */
1276 memcpy(mbsave, &amr->amr_mbox->mb_resp, sizeof(*mbsave));
1277
1278 bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1279 sizeof(struct amr_mailbox), BUS_DMASYNC_PREREAD);
1280
1281 /* Ack mailbox transfer. */
1282 amr_outb(amr, AMR_SREG_CMD, AMR_SCMD_ACKINTR);
1283
1284 return (0);
1285 }
1286
1287 void
1288 amr_ccb_dump(struct amr_softc *amr, struct amr_ccb *ac)
1289 {
1290 int i;
1291
1292 printf("%s: ", amr->amr_dv.dv_xname);
1293 for (i = 0; i < 4; i++)
1294 printf("%08x ", ((u_int32_t *)&ac->ac_cmd)[i]);
1295 printf("\n");
1296 }
1297