twa.c revision 1.12 1 /* $NetBSD: twa.c,v 1.12 2006/09/03 07:02:54 christos Exp $ */
2 /* $wasabi: twa.c,v 1.27 2006/07/28 18:17:21 wrstuden Exp $ */
3
4 /*-
5 * Copyright (c) 2004 The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Jordan Rhody of Wasabi Systems, Inc.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*-
41 * Copyright (c) 2003-04 3ware, Inc.
42 * Copyright (c) 2000 Michael Smith
43 * Copyright (c) 2000 BSDi
44 * All rights reserved.
45 *
46 * Redistribution and use in source and binary forms, with or without
47 * modification, are permitted provided that the following conditions
48 * are met:
49 * 1. Redistributions of source code must retain the above copyright
50 * notice, this list of conditions and the following disclaimer.
51 * 2. Redistributions in binary form must reproduce the above copyright
52 * notice, this list of conditions and the following disclaimer in the
53 * documentation and/or other materials provided with the distribution.
54 *
55 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
57 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
58 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65 * SUCH DAMAGE.
66 *
67 * $FreeBSD: src/sys/dev/twa/twa.c,v 1.2 2004/04/02 15:09:57 des Exp $
68 */
69
70 /*
71 * 3ware driver for 9000 series storage controllers.
72 *
73 * Author: Vinod Kashyap
74 */
75
76 #include <sys/cdefs.h>
77 __KERNEL_RCSID(0, "$NetBSD: twa.c,v 1.12 2006/09/03 07:02:54 christos Exp $");
78
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/kernel.h>
82 #include <sys/device.h>
83 #include <sys/queue.h>
84 #include <sys/proc.h>
85 #include <sys/bswap.h>
86 #include <sys/buf.h>
87 #include <sys/bufq.h>
88 #include <sys/endian.h>
89 #include <sys/malloc.h>
90 #include <sys/conf.h>
91 #include <sys/disk.h>
92 #include <sys/syslog.h>
93
94 #include <uvm/uvm_extern.h>
95
96 #include <machine/bus.h>
97
98 #include <dev/pci/pcireg.h>
99 #include <dev/pci/pcivar.h>
100 #include <dev/pci/pcidevs.h>
101 #include <dev/pci/twareg.h>
102 #include <dev/pci/twavar.h>
103 #include <dev/pci/twaio.h>
104
105 #include <dev/scsipi/scsipi_all.h>
106 #include <dev/scsipi/scsipi_disk.h>
107 #include <dev/scsipi/scsipiconf.h>
108 #include <dev/scsipi/scsi_spc.h>
109
110 #include <dev/ldvar.h>
111
112 #include "locators.h"
113
114 #define PCI_CBIO 0x10
115
116 static int twa_fetch_aen(struct twa_softc *);
117 static void twa_aen_callback(struct twa_request *);
118 static int twa_find_aen(struct twa_softc *sc, uint16_t);
119 static uint16_t twa_enqueue_aen(struct twa_softc *sc,
120 struct twa_command_header *);
121
122 static void twa_attach(struct device *, struct device *, void *);
123 static void twa_shutdown(void *);
124 static int twa_init_connection(struct twa_softc *, uint16_t, uint32_t,
125 uint16_t, uint16_t, uint16_t, uint16_t, uint16_t *,
126 uint16_t *, uint16_t *, uint16_t *, uint32_t *);
127 static int twa_intr(void *);
128 static int twa_match(struct device *, struct cfdata *, void *);
129 static int twa_reset(struct twa_softc *);
130
131 static int twa_print(void *, const char *);
132 static int twa_soft_reset(struct twa_softc *);
133
134 static int twa_check_ctlr_state(struct twa_softc *, uint32_t);
135 static int twa_get_param(struct twa_softc *, int, int, size_t,
136 void (* callback)(struct twa_request *),
137 struct twa_param_9k **);
138 static int twa_set_param(struct twa_softc *, int, int, int, void *,
139 void (* callback)(struct twa_request *));
140 static void twa_describe_controller(struct twa_softc *);
141 static int twa_wait_status(struct twa_softc *, uint32_t, uint32_t);
142 static int twa_done(struct twa_softc *);
143 #if 0
144 static int twa_flash_firmware(struct twa_softc *sc);
145 static int twa_hard_reset(struct twa_softc *sc);
146 #endif
147
148 extern struct cfdriver twa_cd;
149 extern uint32_t twa_fw_img_size;
150 extern uint8_t twa_fw_img[];
151
152 CFATTACH_DECL(twa, sizeof(struct twa_softc),
153 twa_match, twa_attach, NULL, NULL);
154
155 /* AEN messages. */
156 static const struct twa_message twa_aen_table[] = {
157 {0x0000, "AEN queue empty"},
158 {0x0001, "Controller reset occurred"},
159 {0x0002, "Degraded unit detected"},
160 {0x0003, "Controller error occured"},
161 {0x0004, "Background rebuild failed"},
162 {0x0005, "Background rebuild done"},
163 {0x0006, "Incomplete unit detected"},
164 {0x0007, "Background initialize done"},
165 {0x0008, "Unclean shutdown detected"},
166 {0x0009, "Drive timeout detected"},
167 {0x000A, "Drive error detected"},
168 {0x000B, "Rebuild started"},
169 {0x000C, "Background initialize started"},
170 {0x000D, "Entire logical unit was deleted"},
171 {0x000E, "Background initialize failed"},
172 {0x000F, "SMART attribute exceeded threshold"},
173 {0x0010, "Power supply reported AC under range"},
174 {0x0011, "Power supply reported DC out of range"},
175 {0x0012, "Power supply reported a malfunction"},
176 {0x0013, "Power supply predicted malfunction"},
177 {0x0014, "Battery charge is below threshold"},
178 {0x0015, "Fan speed is below threshold"},
179 {0x0016, "Temperature sensor is above threshold"},
180 {0x0017, "Power supply was removed"},
181 {0x0018, "Power supply was inserted"},
182 {0x0019, "Drive was removed from a bay"},
183 {0x001A, "Drive was inserted into a bay"},
184 {0x001B, "Drive bay cover door was opened"},
185 {0x001C, "Drive bay cover door was closed"},
186 {0x001D, "Product case was opened"},
187 {0x0020, "Prepare for shutdown (power-off)"},
188 {0x0021, "Downgrade UDMA mode to lower speed"},
189 {0x0022, "Upgrade UDMA mode to higher speed"},
190 {0x0023, "Sector repair completed"},
191 {0x0024, "Sbuf memory test failed"},
192 {0x0025, "Error flushing cached write data to disk"},
193 {0x0026, "Drive reported data ECC error"},
194 {0x0027, "DCB has checksum error"},
195 {0x0028, "DCB version is unsupported"},
196 {0x0029, "Background verify started"},
197 {0x002A, "Background verify failed"},
198 {0x002B, "Background verify done"},
199 {0x002C, "Bad sector overwritten during rebuild"},
200 {0x002E, "Replace failed because replacement drive too small"},
201 {0x002F, "Verify failed because array was never initialized"},
202 {0x0030, "Unsupported ATA drive"},
203 {0x0031, "Synchronize host/controller time"},
204 {0x0032, "Spare capacity is inadequate for some units"},
205 {0x0033, "Background migration started"},
206 {0x0034, "Background migration failed"},
207 {0x0035, "Background migration done"},
208 {0x0036, "Verify detected and fixed data/parity mismatch"},
209 {0x0037, "SO-DIMM incompatible"},
210 {0x0038, "SO-DIMM not detected"},
211 {0x0039, "Corrected Sbuf ECC error"},
212 {0x003A, "Drive power on reset detected"},
213 {0x003B, "Background rebuild paused"},
214 {0x003C, "Background initialize paused"},
215 {0x003D, "Background verify paused"},
216 {0x003E, "Background migration paused"},
217 {0x003F, "Corrupt flash file system detected"},
218 {0x0040, "Flash file system repaired"},
219 {0x0041, "Unit number assignments were lost"},
220 {0x0042, "Error during read of primary DCB"},
221 {0x0043, "Latent error found in backup DCB"},
222 {0x0044, "Battery voltage is normal"},
223 {0x0045, "Battery voltage is low"},
224 {0x0046, "Battery voltage is high"},
225 {0x0047, "Battery voltage is too low"},
226 {0x0048, "Battery voltage is too high"},
227 {0x0049, "Battery temperature is normal"},
228 {0x004A, "Battery temperature is low"},
229 {0x004B, "Battery temperature is high"},
230 {0x004C, "Battery temperature is too low"},
231 {0x004D, "Battery temperature is too high"},
232 {0x004E, "Battery capacity test started"},
233 {0x004F, "Cache synchronization skipped"},
234 {0x0050, "Battery capacity test completed"},
235 {0x0051, "Battery health check started"},
236 {0x0052, "Battery health check completed"},
237 {0x0053, "Need to do a capacity test"},
238 {0x0054, "Charge termination voltage is at high level"},
239 {0x0055, "Battery charging started"},
240 {0x0056, "Battery charging completed"},
241 {0x0057, "Battery charging fault"},
242 {0x0058, "Battery capacity is below warning level"},
243 {0x0059, "Battery capacity is below error level"},
244 {0x005A, "Battery is present"},
245 {0x005B, "Battery is not present"},
246 {0x005C, "Battery is weak"},
247 {0x005D, "Battery health check failed"},
248 {0x005E, "Cache synchronized after power fail"},
249 {0x005F, "Cache synchronization failed; some data lost"},
250 {0x0060, "Bad cache meta data checksum"},
251 {0x0061, "Bad cache meta data signature"},
252 {0x0062, "Cache meta data restore failed"},
253 {0x0063, "BBU not found after power fail"},
254 {0x00FC, "Recovered/finished array membership update"},
255 {0x00FD, "Handler lockup"},
256 {0x00FE, "Retrying PCI transfer"},
257 {0x00FF, "AEN queue is full"},
258 {0xFFFFFFFF, (char *)NULL}
259 };
260
261 /* AEN severity table. */
262 static const char *twa_aen_severity_table[] = {
263 "None",
264 "ERROR",
265 "WARNING",
266 "INFO",
267 "DEBUG",
268 (char *)NULL
269 };
270
271 /* Error messages. */
272 static const struct twa_message twa_error_table[] = {
273 {0x0100, "SGL entry contains zero data"},
274 {0x0101, "Invalid command opcode"},
275 {0x0102, "SGL entry has unaligned address"},
276 {0x0103, "SGL size does not match command"},
277 {0x0104, "SGL entry has illegal length"},
278 {0x0105, "Command packet is not aligned"},
279 {0x0106, "Invalid request ID"},
280 {0x0107, "Duplicate request ID"},
281 {0x0108, "ID not locked"},
282 {0x0109, "LBA out of range"},
283 {0x010A, "Logical unit not supported"},
284 {0x010B, "Parameter table does not exist"},
285 {0x010C, "Parameter index does not exist"},
286 {0x010D, "Invalid field in CDB"},
287 {0x010E, "Specified port has invalid drive"},
288 {0x010F, "Parameter item size mismatch"},
289 {0x0110, "Failed memory allocation"},
290 {0x0111, "Memory request too large"},
291 {0x0112, "Out of memory segments"},
292 {0x0113, "Invalid address to deallocate"},
293 {0x0114, "Out of memory"},
294 {0x0115, "Out of heap"},
295 {0x0120, "Double degrade"},
296 {0x0121, "Drive not degraded"},
297 {0x0122, "Reconstruct error"},
298 {0x0123, "Replace not accepted"},
299 {0x0124, "Replace drive capacity too small"},
300 {0x0125, "Sector count not allowed"},
301 {0x0126, "No spares left"},
302 {0x0127, "Reconstruct error"},
303 {0x0128, "Unit is offline"},
304 {0x0129, "Cannot update status to DCB"},
305 {0x0130, "Invalid stripe handle"},
306 {0x0131, "Handle that was not locked"},
307 {0x0132, "Handle that was not empy"},
308 {0x0133, "Handle has different owner"},
309 {0x0140, "IPR has parent"},
310 {0x0150, "Illegal Pbuf address alignment"},
311 {0x0151, "Illegal Pbuf transfer length"},
312 {0x0152, "Illegal Sbuf address alignment"},
313 {0x0153, "Illegal Sbuf transfer length"},
314 {0x0160, "Command packet too large"},
315 {0x0161, "SGL exceeds maximum length"},
316 {0x0162, "SGL has too many entries"},
317 {0x0170, "Insufficient resources for rebuilder"},
318 {0x0171, "Verify error (data != parity)"},
319 {0x0180, "Requested segment not in directory of this DCB"},
320 {0x0181, "DCB segment has unsupported version"},
321 {0x0182, "DCB segment has checksum error"},
322 {0x0183, "DCB support (settings) segment invalid"},
323 {0x0184, "DCB UDB (unit descriptor block) segment invalid"},
324 {0x0185, "DCB GUID (globally unique identifier) segment invalid"},
325 {0x01A0, "Could not clear Sbuf"},
326 {0x01C0, "Flash identify failed"},
327 {0x01C1, "Flash out of bounds"},
328 {0x01C2, "Flash verify error"},
329 {0x01C3, "Flash file object not found"},
330 {0x01C4, "Flash file already present"},
331 {0x01C5, "Flash file system full"},
332 {0x01C6, "Flash file not present"},
333 {0x01C7, "Flash file size error"},
334 {0x01C8, "Bad flash file checksum"},
335 {0x01CA, "Corrupt flash file system detected"},
336 {0x01D0, "Invalid field in parameter list"},
337 {0x01D1, "Parameter list length error"},
338 {0x01D2, "Parameter item is not changeable"},
339 {0x01D3, "Parameter item is not saveable"},
340 {0x0200, "UDMA CRC error"},
341 {0x0201, "Internal CRC error"},
342 {0x0202, "Data ECC error"},
343 {0x0203, "ADP level 1 error"},
344 {0x0204, "Port timeout"},
345 {0x0205, "Drive power on reset"},
346 {0x0206, "ADP level 2 error"},
347 {0x0207, "Soft reset failed"},
348 {0x0208, "Drive not ready"},
349 {0x0209, "Unclassified port error"},
350 {0x020A, "Drive aborted command"},
351 {0x0210, "Internal CRC error"},
352 {0x0211, "Host PCI bus abort"},
353 {0x0212, "Host PCI parity error"},
354 {0x0213, "Port handler error"},
355 {0x0214, "Token interrupt count error"},
356 {0x0215, "Timeout waiting for PCI transfer"},
357 {0x0216, "Corrected buffer ECC"},
358 {0x0217, "Uncorrected buffer ECC"},
359 {0x0230, "Unsupported command during flash recovery"},
360 {0x0231, "Next image buffer expected"},
361 {0x0232, "Binary image architecture incompatible"},
362 {0x0233, "Binary image has no signature"},
363 {0x0234, "Binary image has bad checksum"},
364 {0x0235, "Image downloaded overflowed buffer"},
365 {0x0240, "I2C device not found"},
366 {0x0241, "I2C transaction aborted"},
367 {0x0242, "SO-DIMM parameter(s) incompatible using defaults"},
368 {0x0243, "SO-DIMM unsupported"},
369 {0x0248, "SPI transfer status error"},
370 {0x0249, "SPI transfer timeout error"},
371 {0x0250, "Invalid unit descriptor size in CreateUnit"},
372 {0x0251, "Unit descriptor size exceeds data buffer in CreateUnit"},
373 {0x0252, "Invalid value in CreateUnit descriptor"},
374 {0x0253, "Inadequate disk space to support descriptor in CreateUnit"},
375 {0x0254, "Unable to create data channel for this unit descriptor"},
376 {0x0255, "CreateUnit descriptor specifies a drive already in use"},
377 {0x0256, "Unable to write configuration to all disks during CreateUnit"},
378 {0x0257, "CreateUnit does not support this descriptor version"},
379 {0x0258, "Invalid subunit for RAID 0 or 5 in CreateUnit"},
380 {0x0259, "Too many descriptors in CreateUnit"},
381 {0x025A, "Invalid configuration specified in CreateUnit descriptor"},
382 {0x025B, "Invalid LBA offset specified in CreateUnit descriptor"},
383 {0x025C, "Invalid stripelet size specified in CreateUnit descriptor"},
384 {0x0260, "SMART attribute exceeded threshold"},
385 {0xFFFFFFFF, (char *)NULL}
386 };
387
388 struct twa_pci_identity {
389 uint32_t vendor_id;
390 uint32_t product_id;
391 const char *name;
392 };
393
394 static const struct twa_pci_identity pci_twa_products[] = {
395 { PCI_VENDOR_3WARE,
396 PCI_PRODUCT_3WARE_9000,
397 "3ware 9000 series",
398 },
399 { PCI_VENDOR_3WARE,
400 PCI_PRODUCT_3WARE_9550,
401 "3ware 9550SX series",
402 },
403 { 0,
404 0,
405 NULL,
406 },
407 };
408
409
410 static inline void
411 twa_outl(struct twa_softc *sc, int off, uint32_t val)
412 {
413
414 bus_space_write_4(sc->twa_bus_iot, sc->twa_bus_ioh, off, val);
415 bus_space_barrier(sc->twa_bus_iot, sc->twa_bus_ioh, off, 4,
416 BUS_SPACE_BARRIER_WRITE);
417 }
418
419 static inline uint32_t twa_inl(struct twa_softc *sc, int off)
420 {
421
422 bus_space_barrier(sc->twa_bus_iot, sc->twa_bus_ioh, off, 4,
423 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
424 return (bus_space_read_4(sc->twa_bus_iot, sc->twa_bus_ioh, off));
425 }
426
427 void
428 twa_request_wait_handler(struct twa_request *tr)
429 {
430
431 wakeup(tr);
432 }
433
434 static int
435 twa_match(struct device *parent, struct cfdata *cfdata, void *aux)
436 {
437 int i;
438 struct pci_attach_args *pa = aux;
439 const struct twa_pci_identity *entry = 0;
440
441 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_3WARE) {
442 for (i = 0; (pci_twa_products[i].product_id); i++) {
443 entry = &pci_twa_products[i];
444 if (entry->product_id == PCI_PRODUCT(pa->pa_id)) {
445 aprint_normal("%s: (rev. 0x%02x)\n",
446 entry->name, PCI_REVISION(pa->pa_class));
447 return (1);
448 }
449 }
450 }
451 return (0);
452 }
453
454 static const char *
455 twa_find_msg_string(const struct twa_message *table, uint16_t code)
456 {
457 int i;
458
459 for (i = 0; table[i].message != NULL; i++)
460 if (table[i].code == code)
461 return(table[i].message);
462
463 return(table[i].message);
464 }
465
466 void
467 twa_release_request(struct twa_request *tr)
468 {
469 int s;
470 struct twa_softc *sc;
471
472 sc = tr->tr_sc;
473
474 if ((tr->tr_flags & TWA_CMD_AEN) == 0) {
475 s = splbio();
476 TAILQ_INSERT_TAIL(&tr->tr_sc->twa_free, tr, tr_link);
477 splx(s);
478 if (__predict_false((tr->tr_sc->twa_sc_flags &
479 TWA_STATE_REQUEST_WAIT) != 0)) {
480 tr->tr_sc->twa_sc_flags &= ~TWA_STATE_REQUEST_WAIT;
481 wakeup(&sc->twa_free);
482 }
483 } else
484 tr->tr_flags &= ~TWA_CMD_AEN_BUSY;
485 }
486
487 static void
488 twa_unmap_request(struct twa_request *tr)
489 {
490 struct twa_softc *sc = tr->tr_sc;
491 uint8_t cmd_status;
492 int s;
493
494 /* If the command involved data, unmap that too. */
495 if (tr->tr_data != NULL) {
496 if (tr->tr_cmd_pkt_type & TWA_CMD_PKT_TYPE_9K)
497 cmd_status = tr->tr_command->command.cmd_pkt_9k.status;
498 else
499 cmd_status =
500 tr->tr_command->command.cmd_pkt_7k.generic.status;
501
502 if (tr->tr_flags & TWA_CMD_DATA_OUT) {
503 bus_dmamap_sync(tr->tr_sc->twa_dma_tag, tr->tr_dma_map,
504 0, tr->tr_length, BUS_DMASYNC_POSTREAD);
505 /*
506 * If we are using a bounce buffer, and we are reading
507 * data, copy the real data in.
508 */
509 if (tr->tr_flags & TWA_CMD_DATA_COPY_NEEDED)
510 if (cmd_status == 0)
511 memcpy(tr->tr_real_data, tr->tr_data,
512 tr->tr_real_length);
513 }
514 if (tr->tr_flags & TWA_CMD_DATA_IN)
515 bus_dmamap_sync(tr->tr_sc->twa_dma_tag, tr->tr_dma_map,
516 0, tr->tr_length, BUS_DMASYNC_POSTWRITE);
517
518 bus_dmamap_unload(sc->twa_dma_tag, tr->tr_dma_map);
519 }
520
521 /* Free alignment buffer if it was used. */
522 if (tr->tr_flags & TWA_CMD_DATA_COPY_NEEDED) {
523 s = splvm();
524 uvm_km_free(kmem_map, (vaddr_t)tr->tr_data,
525 tr->tr_length, UVM_KMF_WIRED);
526 splx(s);
527 tr->tr_data = tr->tr_real_data;
528 tr->tr_length = tr->tr_real_length;
529 }
530 }
531
532 /*
533 * Function name: twa_wait_request
534 * Description: Sends down a firmware cmd, and waits for the completion,
535 * but NOT in a tight loop.
536 *
537 * Input: tr -- ptr to request pkt
538 * timeout -- max # of seconds to wait before giving up
539 * Output: None
540 * Return value: 0 -- success
541 * non-zero-- failure
542 */
543 static int
544 twa_wait_request(struct twa_request *tr, uint32_t timeout)
545 {
546 time_t end_time;
547 struct timeval t1;
548 int s, rv;
549
550 tr->tr_flags |= TWA_CMD_SLEEP_ON_REQUEST;
551 tr->tr_callback = twa_request_wait_handler;
552 tr->tr_status = TWA_CMD_BUSY;
553
554 rv = twa_map_request(tr);
555
556 if (rv != 0)
557 return (rv);
558
559 microtime(&t1);
560 end_time = t1.tv_usec +
561 (timeout * 1000 * 100);
562
563 while (tr->tr_status != TWA_CMD_COMPLETE) {
564 rv = tr->tr_error;
565 if (rv != 0)
566 return(rv);
567 if ((rv = tsleep(tr, PRIBIO, "twawait", timeout * hz)) == 0)
568 break;
569
570 if (rv == EWOULDBLOCK) {
571 /*
572 * We will reset the controller only if the request has
573 * already been submitted, so as to not lose the
574 * request packet. If a busy request timed out, the
575 * reset will take care of freeing resources. If a
576 * pending request timed out, we will free resources
577 * for that request, right here. So, the caller is
578 * expected to NOT cleanup when ETIMEDOUT is returned.
579 */
580 if (tr->tr_status == TWA_CMD_BUSY)
581 twa_reset(tr->tr_sc);
582 else {
583 /* Request was never submitted. Clean up. */
584 s = splbio();
585 TAILQ_REMOVE(&tr->tr_sc->twa_pending, tr,
586 tr_link);
587 splx(s);
588
589 twa_unmap_request(tr);
590 if (tr->tr_data)
591 free(tr->tr_data, M_DEVBUF);
592
593 twa_release_request(tr);
594 }
595 return(ETIMEDOUT);
596 }
597 /*
598 * Either the request got completed, or we were woken up by a
599 * signal. Calculate the new timeout, in case it was the
600 * latter.
601 */
602 microtime(&t1);
603
604 timeout = (end_time - t1.tv_usec) / (1000 * 100);
605 }
606 return(rv);
607 }
608
609 /*
610 * Function name: twa_immediate_request
611 * Description: Sends down a firmware cmd, and waits for the completion
612 * in a tight loop.
613 *
614 * Input: tr -- ptr to request pkt
615 * timeout -- max # of seconds to wait before giving up
616 * Output: None
617 * Return value: 0 -- success
618 * non-zero-- failure
619 */
620 static int
621 twa_immediate_request(struct twa_request *tr, uint32_t timeout)
622 {
623 struct timeval t1;
624 int s = 0, rv = 0;
625
626 rv = twa_map_request(tr);
627
628 if (rv != 0)
629 return(rv);
630
631 timeout = (timeout * 10000 * 10);
632
633 microtime(&t1);
634
635 timeout += t1.tv_usec;
636
637 do {
638 rv = tr->tr_error;
639 if (rv != 0)
640 return(rv);
641 s = splbio();
642 twa_done(tr->tr_sc);
643 splx(s);
644 if (tr->tr_status == TWA_CMD_COMPLETE)
645 return(rv);
646 microtime(&t1);
647 } while (t1.tv_usec <= timeout);
648
649 /*
650 * We will reset the controller only if the request has
651 * already been submitted, so as to not lose the
652 * request packet. If a busy request timed out, the
653 * reset will take care of freeing resources. If a
654 * pending request timed out, we will free resources
655 * for that request, right here. So, the caller is
656 * expected to NOT cleanup when ETIMEDOUT is returned.
657 */
658 rv = ETIMEDOUT;
659
660 if (tr->tr_status == TWA_CMD_BUSY)
661 twa_reset(tr->tr_sc);
662 else {
663 /* Request was never submitted. Clean up. */
664 s = splbio();
665 TAILQ_REMOVE(&tr->tr_sc->twa_pending, tr, tr_link);
666 splx(s);
667 twa_unmap_request(tr);
668 if (tr->tr_data)
669 free(tr->tr_data, M_DEVBUF);
670
671 twa_release_request(tr);
672 }
673 return (rv);
674 }
675
676 static int
677 twa_inquiry(struct twa_request *tr, int lunid)
678 {
679 int error;
680 struct twa_command_9k *tr_9k_cmd;
681
682 if (tr->tr_data == NULL)
683 return (ENOMEM);
684
685 memset(tr->tr_data, 0, TWA_SECTOR_SIZE);
686
687 tr->tr_length = TWA_SECTOR_SIZE;
688 tr->tr_cmd_pkt_type = TWA_CMD_PKT_TYPE_9K;
689 tr->tr_flags |= TWA_CMD_DATA_IN;
690
691 tr_9k_cmd = &tr->tr_command->command.cmd_pkt_9k;
692
693 tr_9k_cmd->command.opcode = TWA_OP_EXECUTE_SCSI_COMMAND;
694 tr_9k_cmd->unit = lunid;
695 tr_9k_cmd->request_id = tr->tr_request_id;
696 tr_9k_cmd->status = 0;
697 tr_9k_cmd->sgl_offset = 16;
698 tr_9k_cmd->sgl_entries = 1;
699 /* create the CDB here */
700 tr_9k_cmd->cdb[0] = INQUIRY;
701 tr_9k_cmd->cdb[1] = ((lunid << 5) & 0x0e);
702 tr_9k_cmd->cdb[4] = 255;
703
704 /* XXXX setup page data no lun device
705 * it seems 9000 series does not indicate
706 * NOTPRESENT - need more investigation
707 */
708 ((struct scsipi_inquiry_data *)tr->tr_data)->device =
709 SID_QUAL_LU_NOTPRESENT;
710
711 error = twa_immediate_request(tr, TWA_REQUEST_TIMEOUT_PERIOD);
712
713 if (error != 0)
714 return (error);
715
716 if (((struct scsipi_inquiry_data *)tr->tr_data)->device ==
717 SID_QUAL_LU_NOTPRESENT)
718 error = 1;
719
720 return (error);
721 }
722
723 static int
724 twa_print_inquiry_data(struct twa_softc *sc, struct scsipi_inquiry_data *scsipi)
725 {
726
727 printf("%s: %s\n", sc->twa_dv.dv_xname, scsipi->vendor);
728
729 return (1);
730 }
731
732
733 static uint64_t
734 twa_read_capacity(struct twa_request *tr, int lunid)
735 {
736 int error;
737 struct twa_command_9k *tr_9k_cmd;
738 uint64_t array_size = 0LL;
739
740 if (tr->tr_data == NULL)
741 return (ENOMEM);
742
743 memset(tr->tr_data, 0, TWA_SECTOR_SIZE);
744
745 tr->tr_length = TWA_SECTOR_SIZE;
746 tr->tr_cmd_pkt_type = TWA_CMD_PKT_TYPE_9K;
747 tr->tr_flags |= TWA_CMD_DATA_OUT;
748
749 tr_9k_cmd = &tr->tr_command->command.cmd_pkt_9k;
750
751 tr_9k_cmd->command.opcode = TWA_OP_EXECUTE_SCSI_COMMAND;
752 tr_9k_cmd->unit = lunid;
753 tr_9k_cmd->request_id = tr->tr_request_id;
754 tr_9k_cmd->status = 0;
755 tr_9k_cmd->sgl_offset = 16;
756 tr_9k_cmd->sgl_entries = 1;
757 /* create the CDB here */
758 tr_9k_cmd->cdb[0] = READ_CAPACITY_16;
759 tr_9k_cmd->cdb[1] = ((lunid << 5) & 0x0e) | SRC16_SERVICE_ACTION;
760
761 error = twa_immediate_request(tr, TWA_REQUEST_TIMEOUT_PERIOD);
762
763 if (error == 0) {
764 #if BYTE_ORDER == BIG_ENDIAN
765 array_size = bswap64(_8btol(
766 ((struct scsipi_read_capacity_16_data *)tr->tr_data->addr) + 1);
767 #else
768 array_size = _8btol(((struct scsipi_read_capacity_16_data *)
769 tr->tr_data)->addr) + 1;
770 #endif
771 }
772 return (array_size);
773 }
774
775 static int
776 twa_request_sense(struct twa_request *tr, int lunid)
777 {
778 int error = 1;
779 struct twa_command_9k *tr_9k_cmd;
780
781 if (tr->tr_data == NULL)
782 return (error);
783
784 memset(tr->tr_data, 0, TWA_SECTOR_SIZE);
785
786 tr->tr_length = TWA_SECTOR_SIZE;
787 tr->tr_cmd_pkt_type = TWA_CMD_PKT_TYPE_9K;
788 tr->tr_flags |= TWA_CMD_DATA_OUT;
789
790 tr_9k_cmd = &tr->tr_command->command.cmd_pkt_9k;
791
792 tr_9k_cmd->command.opcode = TWA_OP_EXECUTE_SCSI_COMMAND;
793 tr_9k_cmd->unit = lunid;
794 tr_9k_cmd->request_id = tr->tr_request_id;
795 tr_9k_cmd->status = 0;
796 tr_9k_cmd->sgl_offset = 16;
797 tr_9k_cmd->sgl_entries = 1;
798 /* create the CDB here */
799 tr_9k_cmd->cdb[0] = SCSI_REQUEST_SENSE;
800 tr_9k_cmd->cdb[1] = ((lunid << 5) & 0x0e);
801 tr_9k_cmd->cdb[4] = 255;
802
803 /*XXX AEN notification called in interrupt context
804 * so just queue the request. Return as quickly
805 * as possible from interrupt
806 */
807 if ((tr->tr_flags & TWA_CMD_AEN) != 0)
808 error = twa_map_request(tr);
809 else
810 error = twa_immediate_request(tr, TWA_REQUEST_TIMEOUT_PERIOD);
811
812 return (error);
813 }
814
815 static int
816 twa_alloc_req_pkts(struct twa_softc *sc, int num_reqs)
817 {
818 struct twa_request *tr;
819 struct twa_command_packet *tc;
820 bus_dma_segment_t seg;
821 size_t max_segs, max_xfer;
822 int i, rv, rseg, size;
823
824 if ((sc->twa_req_buf = malloc(num_reqs * sizeof(struct twa_request),
825 M_DEVBUF, M_NOWAIT)) == NULL)
826 return(ENOMEM);
827
828 size = num_reqs * sizeof(struct twa_command_packet);
829
830 /* Allocate memory for cmd pkts. */
831 if ((rv = bus_dmamem_alloc(sc->twa_dma_tag,
832 size, PAGE_SIZE, 0, &seg,
833 1, &rseg, BUS_DMA_NOWAIT)) != 0){
834 aprint_error("%s: unable to allocate "
835 "command packets, rv = %d\n",
836 sc->twa_dv.dv_xname, rv);
837 return (ENOMEM);
838 }
839
840 if ((rv = bus_dmamem_map(sc->twa_dma_tag,
841 &seg, rseg, size, (caddr_t *)&sc->twa_cmds,
842 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
843 aprint_error("%s: unable to map commands, rv = %d\n",
844 sc->twa_dv.dv_xname, rv);
845 return (1);
846 }
847
848 if ((rv = bus_dmamap_create(sc->twa_dma_tag,
849 size, num_reqs, size,
850 0, BUS_DMA_NOWAIT, &sc->twa_cmd_map)) != 0) {
851 aprint_error("%s: unable to create command DMA map, "
852 "rv = %d\n", sc->twa_dv.dv_xname, rv);
853 return (ENOMEM);
854 }
855
856 if ((rv = bus_dmamap_load(sc->twa_dma_tag, sc->twa_cmd_map,
857 sc->twa_cmds, size, NULL,
858 BUS_DMA_NOWAIT)) != 0) {
859 aprint_error("%s: unable to load command DMA map, "
860 "rv = %d\n", sc->twa_dv.dv_xname, rv);
861 return (1);
862 }
863
864 if ((uintptr_t)sc->twa_cmds % TWA_ALIGNMENT) {
865 aprint_error("%s: DMA map memory not aligned on %d boundary\n",
866 sc->twa_dv.dv_xname, TWA_ALIGNMENT);
867
868 return (1);
869 }
870 tc = sc->twa_cmd_pkt_buf = (struct twa_command_packet *)sc->twa_cmds;
871 sc->twa_cmd_pkt_phys = sc->twa_cmd_map->dm_segs[0].ds_addr;
872
873 memset(sc->twa_req_buf, 0, num_reqs * sizeof(struct twa_request));
874 memset(sc->twa_cmd_pkt_buf, 0,
875 num_reqs * sizeof(struct twa_command_packet));
876
877 sc->sc_twa_request = sc->twa_req_buf;
878 max_segs = twa_get_maxsegs();
879 max_xfer = twa_get_maxxfer(max_segs);
880
881 for (i = 0; i < num_reqs; i++, tc++) {
882 tr = &(sc->twa_req_buf[i]);
883 tr->tr_command = tc;
884 tr->tr_cmd_phys = sc->twa_cmd_pkt_phys +
885 (i * sizeof(struct twa_command_packet));
886 tr->tr_request_id = i;
887 tr->tr_sc = sc;
888
889 /*
890 * Create a map for data buffers. maxsize (256 * 1024) used in
891 * bus_dma_tag_create above should suffice the bounce page needs
892 * for data buffers, since the max I/O size we support is 128KB.
893 * If we supported I/O's bigger than 256KB, we would have to
894 * create a second dma_tag, with the appropriate maxsize.
895 */
896 if ((rv = bus_dmamap_create(sc->twa_dma_tag,
897 max_xfer, max_segs, 1, 0, BUS_DMA_NOWAIT,
898 &tr->tr_dma_map)) != 0) {
899 aprint_error("%s: unable to create command "
900 "DMA map, rv = %d\n",
901 sc->twa_dv.dv_xname, rv);
902 return (ENOMEM);
903 }
904 /* Insert request into the free queue. */
905 if (i != 0) {
906 sc->twa_lookup[i] = tr;
907 twa_release_request(tr);
908 } else
909 tr->tr_flags |= TWA_CMD_AEN;
910 }
911 return(0);
912 }
913
914 static void
915 twa_recompute_openings(struct twa_softc *sc)
916 {
917 struct twa_drive *td;
918 int unit;
919 int openings;
920
921 if (sc->sc_nunits != 0)
922 openings = ((TWA_Q_LENGTH / 2) / sc->sc_nunits);
923 else
924 openings = 0;
925 if (openings == sc->sc_openings)
926 return;
927 sc->sc_openings = openings;
928
929 #ifdef TWA_DEBUG
930 printf("%s: %d array%s, %d openings per array\n",
931 sc->twa_dv.dv_xname, sc->sc_nunits,
932 sc->sc_nunits == 1 ? "" : "s", sc->sc_openings);
933 #endif
934 for (unit = 0; unit < TWA_MAX_UNITS; unit++) {
935 td = &sc->sc_units[unit];
936 if (td->td_dev != NULL)
937 (*td->td_callbacks->tcb_openings)(td->td_dev,
938 sc->sc_openings);
939 }
940 }
941
942 static int
943 twa_request_bus_scan(struct twa_softc *sc)
944 {
945 struct twa_drive *td;
946 struct twa_request *tr;
947 struct twa_attach_args twaa;
948 int locs[TWACF_NLOCS];
949 int s, unit;
950
951 s = splbio();
952 for (unit = 0; unit < TWA_MAX_UNITS; unit++) {
953
954 if ((tr = twa_get_request(sc, 0)) == NULL) {
955 splx(s);
956 return (EIO);
957 }
958
959 tr->tr_cmd_pkt_type |= TWA_CMD_PKT_TYPE_INTERNAL;
960
961 tr->tr_data = malloc(TWA_SECTOR_SIZE, M_DEVBUF, M_NOWAIT);
962
963 if (tr->tr_data == NULL) {
964 twa_release_request(tr);
965 splx(s);
966 return (ENOMEM);
967 }
968 td = &sc->sc_units[unit];
969
970 if (twa_inquiry(tr, unit) == 0) {
971 if (td->td_dev == NULL) {
972 twa_print_inquiry_data(sc,
973 ((struct scsipi_inquiry_data *)tr->tr_data));
974
975 sc->sc_nunits++;
976
977 sc->sc_units[unit].td_size =
978 twa_read_capacity(tr, unit);
979
980 twaa.twaa_unit = unit;
981
982 twa_recompute_openings(sc);
983
984 locs[TWACF_UNIT] = unit;
985
986 sc->sc_units[unit].td_dev =
987 config_found_sm_loc(&sc->twa_dv, "twa",
988 locs, &twaa, twa_print, config_stdsubmatch);
989 }
990 } else {
991 if (td->td_dev != NULL) {
992 sc->sc_nunits--;
993
994 (void) config_detach(td->td_dev, DETACH_FORCE);
995 td->td_dev = NULL;
996 td->td_size = 0;
997
998 twa_recompute_openings(sc);
999 }
1000 }
1001 free(tr->tr_data, M_DEVBUF);
1002
1003 twa_release_request(tr);
1004 }
1005 splx(s);
1006
1007 return (0);
1008 }
1009
1010
1011 #ifdef DIAGNOSTIC
1012 static inline void
1013 twa_check_busy_q(struct twa_request *tr)
1014 {
1015 struct twa_request *rq;
1016 struct twa_softc *sc = tr->tr_sc;
1017
1018 TAILQ_FOREACH(rq, &sc->twa_busy, tr_link) {
1019 if (tr->tr_request_id == rq->tr_request_id) {
1020 panic("cannot submit same request more than once");
1021 } else if (tr->bp == rq->bp && tr->bp != 0) {
1022 /* XXX A check for 0 for the buf ptr is needed to
1023 * guard against ioctl requests with a buf ptr of
1024 * 0 and also aen notifications. Looking for
1025 * external cmds only.
1026 */
1027 panic("cannot submit same buf more than once");
1028 } else {
1029 /* Empty else statement */
1030 }
1031 }
1032 }
1033 #endif
1034
1035 static int
1036 twa_start(struct twa_request *tr)
1037 {
1038 struct twa_softc *sc = tr->tr_sc;
1039 uint32_t status_reg;
1040 int s;
1041 int error;
1042
1043 s = splbio();
1044 /* Check to see if we can post a command. */
1045 status_reg = twa_inl(sc, TWA_STATUS_REGISTER_OFFSET);
1046 if ((error = twa_check_ctlr_state(sc, status_reg)))
1047 goto out;
1048
1049 if (status_reg & TWA_STATUS_COMMAND_QUEUE_FULL) {
1050 if (tr->tr_status != TWA_CMD_PENDING) {
1051 tr->tr_status = TWA_CMD_PENDING;
1052 TAILQ_INSERT_TAIL(&tr->tr_sc->twa_pending,
1053 tr, tr_link);
1054 }
1055 twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
1056 TWA_CONTROL_UNMASK_COMMAND_INTERRUPT);
1057 error = EBUSY;
1058 } else {
1059 bus_dmamap_sync(sc->twa_dma_tag, sc->twa_cmd_map,
1060 (caddr_t)tr->tr_command - sc->twa_cmds,
1061 sizeof(struct twa_command_packet),
1062 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1063
1064 /* Cmd queue is not full. Post the command. */
1065 TWA_WRITE_COMMAND_QUEUE(sc, tr->tr_cmd_phys +
1066 sizeof(struct twa_command_header));
1067
1068 /* Mark the request as currently being processed. */
1069 tr->tr_status = TWA_CMD_BUSY;
1070
1071 #ifdef DIAGNOSTIC
1072 twa_check_busy_q(tr);
1073 #endif
1074
1075 /* Move the request into the busy queue. */
1076 TAILQ_INSERT_TAIL(&tr->tr_sc->twa_busy, tr, tr_link);
1077 }
1078 out:
1079 splx(s);
1080 return(error);
1081 }
1082
1083 static int
1084 twa_drain_response_queue(struct twa_softc *sc)
1085 {
1086 union twa_response_queue rq;
1087 uint32_t status_reg;
1088
1089 for (;;) {
1090 status_reg = twa_inl(sc, TWA_STATUS_REGISTER_OFFSET);
1091 if (twa_check_ctlr_state(sc, status_reg))
1092 return(1);
1093 if (status_reg & TWA_STATUS_RESPONSE_QUEUE_EMPTY)
1094 return(0); /* no more response queue entries */
1095 rq = (union twa_response_queue)twa_inl(sc,
1096 TWA_RESPONSE_QUEUE_OFFSET);
1097 }
1098 }
1099
1100 static void
1101 twa_drain_busy_queue(struct twa_softc *sc)
1102 {
1103 struct twa_request *tr;
1104
1105 /* Walk the busy queue. */
1106
1107 while ((tr = TAILQ_FIRST(&sc->twa_busy)) != NULL) {
1108 TAILQ_REMOVE(&sc->twa_busy, tr, tr_link);
1109
1110 twa_unmap_request(tr);
1111 if ((tr->tr_cmd_pkt_type & TWA_CMD_PKT_TYPE_INTERNAL) ||
1112 (tr->tr_cmd_pkt_type & TWA_CMD_PKT_TYPE_IOCTL)) {
1113 /* It's an internal/ioctl request. Simply free it. */
1114 if (tr->tr_data)
1115 free(tr->tr_data, M_DEVBUF);
1116 twa_release_request(tr);
1117 } else {
1118 /* It's a SCSI request. Complete it. */
1119 tr->tr_command->command.cmd_pkt_9k.status = EIO;
1120 if (tr->tr_callback)
1121 tr->tr_callback(tr);
1122 }
1123 }
1124 }
1125
1126 static int
1127 twa_drain_pending_queue(struct twa_softc *sc)
1128 {
1129 struct twa_request *tr;
1130 int s, error = 0;
1131
1132 /*
1133 * Pull requests off the pending queue, and submit them.
1134 */
1135 s = splbio();
1136 while ((tr = TAILQ_FIRST(&sc->twa_pending)) != NULL) {
1137 TAILQ_REMOVE(&sc->twa_pending, tr, tr_link);
1138
1139 if ((error = twa_start(tr))) {
1140 if (error == EBUSY) {
1141 tr->tr_status = TWA_CMD_PENDING;
1142
1143 /* queue at the head */
1144 TAILQ_INSERT_HEAD(&tr->tr_sc->twa_pending,
1145 tr, tr_link);
1146 error = 0;
1147 break;
1148 } else {
1149 if (tr->tr_flags & TWA_CMD_SLEEP_ON_REQUEST) {
1150 tr->tr_error = error;
1151 tr->tr_callback(tr);
1152 error = EIO;
1153 }
1154 }
1155 }
1156 }
1157 splx(s);
1158
1159 return(error);
1160 }
1161
1162 static int
1163 twa_drain_aen_queue(struct twa_softc *sc)
1164 {
1165 int s, error = 0;
1166 struct twa_request *tr;
1167 struct twa_command_header *cmd_hdr;
1168 struct timeval t1;
1169 uint32_t timeout;
1170
1171 for (;;) {
1172 if ((tr = twa_get_request(sc, 0)) == NULL) {
1173 error = EIO;
1174 break;
1175 }
1176 tr->tr_cmd_pkt_type |= TWA_CMD_PKT_TYPE_INTERNAL;
1177 tr->tr_callback = NULL;
1178
1179 tr->tr_data = malloc(TWA_SECTOR_SIZE, M_DEVBUF, M_NOWAIT);
1180
1181 if (tr->tr_data == NULL) {
1182 error = 1;
1183 goto out;
1184 }
1185
1186 if (twa_request_sense(tr, 0) != 0) {
1187 error = 1;
1188 break;
1189 }
1190
1191 timeout = (1000/*ms*/ * 100/*us*/ * TWA_REQUEST_TIMEOUT_PERIOD);
1192
1193 microtime(&t1);
1194
1195 timeout += t1.tv_usec;
1196
1197 do {
1198 s = splbio();
1199 twa_done(tr->tr_sc);
1200 splx(s);
1201 if (tr->tr_status != TWA_CMD_BUSY)
1202 break;
1203 microtime(&t1);
1204 } while (t1.tv_usec <= timeout);
1205
1206 if (tr->tr_status != TWA_CMD_COMPLETE) {
1207 error = ETIMEDOUT;
1208 break;
1209 }
1210
1211 if ((error = tr->tr_command->command.cmd_pkt_9k.status))
1212 break;
1213
1214 cmd_hdr = (struct twa_command_header *)(tr->tr_data);
1215 if ((cmd_hdr->status_block.error) /* aen_code */
1216 == TWA_AEN_QUEUE_EMPTY)
1217 break;
1218 (void)twa_enqueue_aen(sc, cmd_hdr);
1219
1220 free(tr->tr_data, M_DEVBUF);
1221 twa_release_request(tr);
1222 }
1223 out:
1224 if (tr) {
1225 if (tr->tr_data)
1226 free(tr->tr_data, M_DEVBUF);
1227
1228 twa_release_request(tr);
1229 }
1230 return(error);
1231 }
1232
1233
1234 #ifdef DIAGNOSTIC
1235 static void
1236 twa_check_response_q(struct twa_request *tr, int clear)
1237 {
1238 int j;
1239 static int i = 0;
1240 static struct twa_request *req = 0;
1241 static struct buf *hist[255];
1242
1243
1244 if (clear) {
1245 i = 0;
1246 for (j = 0; j < 255; j++)
1247 hist[j] = 0;
1248 return;
1249 }
1250
1251 if (req == 0)
1252 req = tr;
1253
1254 if ((tr->tr_cmd_pkt_type & TWA_CMD_PKT_TYPE_EXTERNAL) != 0) {
1255 if (req->tr_request_id == tr->tr_request_id)
1256 panic("req id: %d on controller queue twice",
1257 tr->tr_request_id);
1258
1259 for (j = 0; j < i; j++)
1260 if (tr->bp == hist[j])
1261 panic("req id: %d buf found twice",
1262 tr->tr_request_id);
1263 }
1264 req = tr;
1265
1266 hist[i++] = req->bp;
1267 }
1268 #endif
1269
1270 static int
1271 twa_done(struct twa_softc *sc)
1272 {
1273 union twa_response_queue rq;
1274 struct twa_request *tr;
1275 int rv = 0;
1276 uint32_t status_reg;
1277
1278 for (;;) {
1279 status_reg = twa_inl(sc, TWA_STATUS_REGISTER_OFFSET);
1280 if ((rv = twa_check_ctlr_state(sc, status_reg)))
1281 break;
1282 if (status_reg & TWA_STATUS_RESPONSE_QUEUE_EMPTY)
1283 break;
1284 /* Response queue is not empty. */
1285 rq = (union twa_response_queue)twa_inl(sc,
1286 TWA_RESPONSE_QUEUE_OFFSET);
1287 tr = sc->sc_twa_request + rq.u.response_id;
1288 #ifdef DIAGNOSTIC
1289 twa_check_response_q(tr, 0);
1290 #endif
1291 /* Unmap the command packet, and any associated data buffer. */
1292 twa_unmap_request(tr);
1293
1294 tr->tr_status = TWA_CMD_COMPLETE;
1295 TAILQ_REMOVE(&tr->tr_sc->twa_busy, tr, tr_link);
1296
1297 if (tr->tr_callback)
1298 tr->tr_callback(tr);
1299 }
1300 (void)twa_drain_pending_queue(sc);
1301
1302 #ifdef DIAGNOSTIC
1303 twa_check_response_q(NULL, 1);
1304 #endif
1305 return(rv);
1306 }
1307
1308 /*
1309 * Function name: twa_init_ctlr
1310 * Description: Establishes a logical connection with the controller.
1311 * If bundled with firmware, determines whether or not
1312 * to flash firmware, based on arch_id, fw SRL (Spec.
1313 * Revision Level), branch & build #'s. Also determines
1314 * whether or not the driver is compatible with the
1315 * firmware on the controller, before proceeding to work
1316 * with it.
1317 *
1318 * Input: sc -- ptr to per ctlr structure
1319 * Output: None
1320 * Return value: 0 -- success
1321 * non-zero-- failure
1322 */
1323 static int
1324 twa_init_ctlr(struct twa_softc *sc)
1325 {
1326 uint16_t fw_on_ctlr_srl = 0;
1327 uint16_t fw_on_ctlr_arch_id = 0;
1328 uint16_t fw_on_ctlr_branch = 0;
1329 uint16_t fw_on_ctlr_build = 0;
1330 uint32_t init_connect_result = 0;
1331 int error = 0;
1332 #if 0
1333 int8_t fw_flashed = FALSE;
1334 int8_t fw_flash_failed = FALSE;
1335 #endif
1336
1337 /* Wait for the controller to become ready. */
1338 if (twa_wait_status(sc, TWA_STATUS_MICROCONTROLLER_READY,
1339 TWA_REQUEST_TIMEOUT_PERIOD)) {
1340 return(ENXIO);
1341 }
1342 /* Drain the response queue. */
1343 if (twa_drain_response_queue(sc))
1344 return(1);
1345
1346 /* Establish a logical connection with the controller. */
1347 if ((error = twa_init_connection(sc, TWA_INIT_MESSAGE_CREDITS,
1348 TWA_EXTENDED_INIT_CONNECT, TWA_CURRENT_FW_SRL,
1349 TWA_9000_ARCH_ID, TWA_CURRENT_FW_BRANCH,
1350 TWA_CURRENT_FW_BUILD, &fw_on_ctlr_srl,
1351 &fw_on_ctlr_arch_id, &fw_on_ctlr_branch,
1352 &fw_on_ctlr_build, &init_connect_result))) {
1353 return(error);
1354 }
1355 #if 0
1356 if ((init_connect_result & TWA_BUNDLED_FW_SAFE_TO_FLASH) &&
1357 (init_connect_result & TWA_CTLR_FW_RECOMMENDS_FLASH)) {
1358 /*
1359 * The bundled firmware is safe to flash, and the firmware
1360 * on the controller recommends a flash. So, flash!
1361 */
1362 printf("%s: flashing bundled firmware...\n",
1363 sc->twa_dv.dv_xname);
1364
1365 if ((error = twa_flash_firmware(sc))) {
1366 fw_flash_failed = TRUE;
1367
1368 printf("%s: unable to flash bundled firmware.\n",
1369 sc->twa_dv.dv_xname);
1370 } else {
1371 printf("%s: successfully flashed bundled firmware.\n",
1372 sc->twa_dv.dv_xname);
1373 fw_flashed = TRUE;
1374 }
1375 }
1376 if (fw_flashed) {
1377 /* The firmware was flashed. Have the new image loaded */
1378 error = twa_hard_reset(sc);
1379 if (error == 0)
1380 error = twa_init_ctlr(sc);
1381 /*
1382 * If hard reset of controller failed, we need to return.
1383 * Otherwise, the above recursive call to twa_init_ctlr will
1384 * have completed the rest of the initialization (starting
1385 * from twa_drain_aen_queue below). Don't do it again.
1386 * Just return.
1387 */
1388 return(error);
1389 } else {
1390 /*
1391 * Either we are not bundled with a firmware image, or
1392 * the bundled firmware is not safe to flash,
1393 * or flash failed for some reason. See if we can at
1394 * least work with the firmware on the controller in the
1395 * current mode.
1396 */
1397 if (init_connect_result & TWA_CTLR_FW_COMPATIBLE) {
1398 /* Yes, we can. Make note of the operating mode. */
1399 sc->working_srl = TWA_CURRENT_FW_SRL;
1400 sc->working_branch = TWA_CURRENT_FW_BRANCH;
1401 sc->working_build = TWA_CURRENT_FW_BUILD;
1402 } else {
1403 /*
1404 * No, we can't. See if we can at least work with
1405 * it in the base mode. We should never come here
1406 * if firmware has just been flashed.
1407 */
1408 printf("%s: Driver/Firmware mismatch. Negotiating "
1409 "for base level.\n", sc->twa_dv.dv_xname);
1410 if ((error = twa_init_connection(sc,
1411 TWA_INIT_MESSAGE_CREDITS,
1412 TWA_EXTENDED_INIT_CONNECT, TWA_BASE_FW_SRL,
1413 TWA_9000_ARCH_ID, TWA_BASE_FW_BRANCH,
1414 TWA_BASE_FW_BUILD, &fw_on_ctlr_srl,
1415 &fw_on_ctlr_arch_id, &fw_on_ctlr_branch,
1416 &fw_on_ctlr_build, &init_connect_result))) {
1417 printf("%s: can't initialize connection in "
1418 "base mode.\n", sc->twa_dv.dv_xname);
1419 return(error);
1420 }
1421 if (!(init_connect_result & TWA_CTLR_FW_COMPATIBLE)) {
1422 /*
1423 * The firmware on the controller is not even
1424 * compatible with our base mode. We cannot
1425 * work with it. Bail...
1426 */
1427 printf("Incompatible firmware on controller\n");
1428 #ifdef TWA_FLASH_FIRMWARE
1429 if (fw_flash_failed)
1430 printf("...and could not flash bundled "
1431 "firmware.\n");
1432 else
1433 printf("...and bundled firmware not "
1434 "safe to flash.\n");
1435 #endif /* TWA_FLASH_FIRMWARE */
1436 return(1);
1437 }
1438 /*
1439 * We can work with this firmware, but only in
1440 * base mode.
1441 */
1442 sc->working_srl = TWA_BASE_FW_SRL;
1443 sc->working_branch = TWA_BASE_FW_BRANCH;
1444 sc->working_build = TWA_BASE_FW_BUILD;
1445 sc->twa_operating_mode = TWA_BASE_MODE;
1446 }
1447 }
1448 #endif
1449 twa_drain_aen_queue(sc);
1450
1451 /* Set controller state to initialized. */
1452 sc->twa_state &= ~TWA_STATE_SHUTDOWN;
1453 return(0);
1454 }
1455
1456 static int
1457 twa_setup(struct twa_softc *sc)
1458 {
1459 struct tw_cl_event_packet *aen_queue;
1460 uint32_t i = 0;
1461 int error = 0;
1462
1463 /* Initialize request queues. */
1464 TAILQ_INIT(&sc->twa_free);
1465 TAILQ_INIT(&sc->twa_busy);
1466 TAILQ_INIT(&sc->twa_pending);
1467
1468 sc->sc_nunits = 0;
1469 sc->twa_sc_flags = 0;
1470
1471 if (twa_alloc_req_pkts(sc, TWA_Q_LENGTH)) {
1472
1473 return(ENOMEM);
1474 }
1475
1476 /* Allocate memory for the AEN queue. */
1477 if ((aen_queue = malloc(sizeof(struct tw_cl_event_packet) *
1478 TWA_Q_LENGTH, M_DEVBUF, M_WAITOK)) == NULL) {
1479 /*
1480 * This should not cause us to return error. We will only be
1481 * unable to support AEN's. But then, we will have to check
1482 * time and again to see if we can support AEN's, if we
1483 * continue. So, we will just return error.
1484 */
1485 return (ENOMEM);
1486 }
1487 /* Initialize the aen queue. */
1488 memset(aen_queue, 0, sizeof(struct tw_cl_event_packet) * TWA_Q_LENGTH);
1489
1490 for (i = 0; i < TWA_Q_LENGTH; i++)
1491 sc->twa_aen_queue[i] = &(aen_queue[i]);
1492
1493 twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
1494 TWA_CONTROL_DISABLE_INTERRUPTS);
1495
1496 /* Initialize the controller. */
1497 if ((error = twa_init_ctlr(sc))) {
1498 /* Soft reset the controller, and try one more time. */
1499
1500 printf("%s: controller initialization failed. "
1501 "Retrying initialization\n", sc->twa_dv.dv_xname);
1502
1503 if ((error = twa_soft_reset(sc)) == 0)
1504 error = twa_init_ctlr(sc);
1505 }
1506
1507 twa_describe_controller(sc);
1508
1509 error = twa_request_bus_scan(sc);
1510
1511 twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
1512 TWA_CONTROL_CLEAR_ATTENTION_INTERRUPT |
1513 TWA_CONTROL_UNMASK_RESPONSE_INTERRUPT |
1514 TWA_CONTROL_ENABLE_INTERRUPTS);
1515
1516 return (error);
1517 }
1518
1519 void *twa_sdh;
1520
1521 static void
1522 twa_attach(struct device *parent, struct device *self, void *aux)
1523 {
1524 struct pci_attach_args *pa;
1525 struct twa_softc *sc;
1526 pci_chipset_tag_t pc;
1527 pcireg_t csr;
1528 pci_intr_handle_t ih;
1529 const char *intrstr;
1530
1531 sc = (struct twa_softc *)self;
1532
1533 pa = aux;
1534 pc = pa->pa_pc;
1535 sc->pc = pa->pa_pc;
1536 sc->tag = pa->pa_tag;
1537 sc->twa_dma_tag = pa->pa_dmat;
1538
1539 aprint_naive(": RAID controller\n");
1540 aprint_normal(": 3ware Apache\n");
1541
1542 if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_3WARE_9000) {
1543 if (pci_mapreg_map(pa, PCI_MAPREG_START, PCI_MAPREG_TYPE_IO, 0,
1544 &sc->twa_bus_iot, &sc->twa_bus_ioh, NULL, NULL)) {
1545 aprint_error("%s: can't map i/o space\n",
1546 sc->twa_dv.dv_xname);
1547 return;
1548 }
1549 } else if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_3WARE_9550) {
1550 if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x08,
1551 PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->twa_bus_iot,
1552 &sc->twa_bus_ioh, NULL, NULL)) {
1553 aprint_error("%s: can't map mem space\n",
1554 sc->twa_dv.dv_xname);
1555 return;
1556 }
1557 } else {
1558 aprint_error("%s: product id 0x%02x not recognized\n",
1559 sc->twa_dv.dv_xname, PCI_PRODUCT(pa->pa_id));
1560 return;
1561 }
1562 /* Enable the device. */
1563 csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1564
1565 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
1566 csr | PCI_COMMAND_MASTER_ENABLE);
1567
1568 /* Map and establish the interrupt. */
1569 if (pci_intr_map(pa, &ih)) {
1570 aprint_error("%s: can't map interrupt\n", sc->twa_dv.dv_xname);
1571 return;
1572 }
1573 intrstr = pci_intr_string(pc, ih);
1574
1575 sc->twa_ih = pci_intr_establish(pc, ih, IPL_BIO, twa_intr, sc);
1576 if (sc->twa_ih == NULL) {
1577 aprint_error("%s: can't establish interrupt%s%s\n",
1578 sc->twa_dv.dv_xname,
1579 (intrstr) ? " at " : "",
1580 (intrstr) ? intrstr : "");
1581 return;
1582 }
1583
1584 if (intrstr != NULL)
1585 aprint_normal("%s: interrupting at %s\n",
1586 sc->twa_dv.dv_xname, intrstr);
1587
1588 twa_setup(sc);
1589
1590 if (twa_sdh == NULL)
1591 twa_sdh = shutdownhook_establish(twa_shutdown, NULL);
1592
1593 return;
1594 }
1595
1596 static void
1597 twa_shutdown(void *arg)
1598 {
1599 extern struct cfdriver twa_cd;
1600 struct twa_softc *sc;
1601 int i, rv, unit;
1602
1603 for (i = 0; i < twa_cd.cd_ndevs; i++) {
1604 if ((sc = device_lookup(&twa_cd, i)) == NULL)
1605 continue;
1606
1607 for (unit = 0; unit < TWA_MAX_UNITS; unit++)
1608 if (sc->sc_units[unit].td_dev != NULL)
1609 (void) config_detach(sc->sc_units[unit].td_dev,
1610 DETACH_FORCE | DETACH_QUIET);
1611
1612 twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
1613 TWA_CONTROL_DISABLE_INTERRUPTS);
1614
1615 /* Let the controller know that we are going down. */
1616 rv = twa_init_connection(sc, TWA_SHUTDOWN_MESSAGE_CREDITS,
1617 0, 0, 0, 0, 0,
1618 NULL, NULL, NULL, NULL, NULL);
1619 }
1620 }
1621
1622 void
1623 twa_register_callbacks(struct twa_softc *sc, int unit,
1624 const struct twa_callbacks *tcb)
1625 {
1626
1627 sc->sc_units[unit].td_callbacks = tcb;
1628 }
1629
1630 /*
1631 * Print autoconfiguration message for a sub-device
1632 */
1633 static int
1634 twa_print(void *aux, const char *pnp)
1635 {
1636 struct twa_attach_args *twaa;
1637
1638 twaa = aux;
1639
1640 if (pnp !=NULL)
1641 aprint_normal("block device at %s\n", pnp);
1642 aprint_normal(" unit %d\n", twaa->twaa_unit);
1643 return (UNCONF);
1644 }
1645
1646 static void
1647 twa_fillin_sgl(struct twa_sg *sgl, bus_dma_segment_t *segs, int nsegments)
1648 {
1649 int i;
1650 for (i = 0; i < nsegments; i++) {
1651 sgl[i].address = segs[i].ds_addr;
1652 sgl[i].length = (uint32_t)(segs[i].ds_len);
1653 }
1654 }
1655
1656 static int
1657 twa_submit_io(struct twa_request *tr)
1658 {
1659 int error;
1660
1661 if ((error = twa_start(tr))) {
1662 if (error == EBUSY)
1663 error = 0; /* request is in the pending queue */
1664 else {
1665 tr->tr_error = error;
1666 }
1667 }
1668 return(error);
1669 }
1670
1671 /*
1672 * Function name: twa_setup_data_dmamap
1673 * Description: Callback of bus_dmamap_load for the buffer associated
1674 * with data. Updates the cmd pkt (size/sgl_entries
1675 * fields, as applicable) to reflect the number of sg
1676 * elements.
1677 *
1678 * Input: arg -- ptr to request pkt
1679 * segs -- ptr to a list of segment descriptors
1680 * nsegments--# of segments
1681 * error -- 0 if no errors encountered before callback,
1682 * non-zero if errors were encountered
1683 * Output: None
1684 * Return value: None
1685 */
1686 static int
1687 twa_setup_data_dmamap(void *arg, bus_dma_segment_t *segs, int nsegments,
1688 int error)
1689 {
1690 struct twa_request *tr = (struct twa_request *)arg;
1691 struct twa_command_packet *cmdpkt = tr->tr_command;
1692 struct twa_command_9k *cmd9k;
1693 union twa_command_7k *cmd7k;
1694 uint8_t sgl_offset;
1695
1696 if (error == EFBIG) {
1697 tr->tr_error = error;
1698 goto out;
1699 }
1700
1701 if (tr->tr_cmd_pkt_type & TWA_CMD_PKT_TYPE_9K) {
1702 cmd9k = &(cmdpkt->command.cmd_pkt_9k);
1703 twa_fillin_sgl(&(cmd9k->sg_list[0]), segs, nsegments);
1704 cmd9k->sgl_entries += nsegments - 1;
1705 } else {
1706 /* It's a 7000 command packet. */
1707 cmd7k = &(cmdpkt->command.cmd_pkt_7k);
1708 if ((sgl_offset = cmdpkt->command.cmd_pkt_7k.generic.sgl_offset))
1709 twa_fillin_sgl((struct twa_sg *)
1710 (((uint32_t *)cmd7k) + sgl_offset),
1711 segs, nsegments);
1712 /* Modify the size field, based on sg address size. */
1713 cmd7k->generic.size +=
1714 ((TWA_64BIT_ADDRESSES ? 3 : 2) * nsegments);
1715 }
1716
1717 if (tr->tr_flags & TWA_CMD_DATA_IN)
1718 bus_dmamap_sync(tr->tr_sc->twa_dma_tag, tr->tr_dma_map, 0,
1719 tr->tr_length, BUS_DMASYNC_PREREAD);
1720 if (tr->tr_flags & TWA_CMD_DATA_OUT) {
1721 /*
1722 * If we're using an alignment buffer, and we're
1723 * writing data, copy the real data out.
1724 */
1725 if (tr->tr_flags & TWA_CMD_DATA_COPY_NEEDED)
1726 memcpy(tr->tr_data, tr->tr_real_data,
1727 tr->tr_real_length);
1728 bus_dmamap_sync(tr->tr_sc->twa_dma_tag, tr->tr_dma_map, 0,
1729 tr->tr_length, BUS_DMASYNC_PREWRITE);
1730 }
1731 error = twa_submit_io(tr);
1732
1733 out:
1734 if (error) {
1735 twa_unmap_request(tr);
1736 /*
1737 * If the caller had been returned EINPROGRESS, and he has
1738 * registered a callback for handling completion, the callback
1739 * will never get called because we were unable to submit the
1740 * request. So, free up the request right here.
1741 */
1742 if ((tr->tr_flags & TWA_CMD_IN_PROGRESS) && (tr->tr_callback))
1743 twa_release_request(tr);
1744 }
1745 return (error);
1746 }
1747
1748 /*
1749 * Function name: twa_map_request
1750 * Description: Maps a cmd pkt and data associated with it, into
1751 * DMA'able memory.
1752 *
1753 * Input: tr -- ptr to request pkt
1754 * Output: None
1755 * Return value: 0 -- success
1756 * non-zero-- failure
1757 */
1758 int
1759 twa_map_request(struct twa_request *tr)
1760 {
1761 struct twa_softc *sc = tr->tr_sc;
1762 int s, rv, error = 0;
1763
1764 /* If the command involves data, map that too. */
1765 if (tr->tr_data != NULL) {
1766
1767 if (((u_long)tr->tr_data & (511)) != 0) {
1768 tr->tr_flags |= TWA_CMD_DATA_COPY_NEEDED;
1769 tr->tr_real_data = tr->tr_data;
1770 tr->tr_real_length = tr->tr_length;
1771 s = splvm();
1772 tr->tr_data = (void *)uvm_km_alloc(kmem_map,
1773 tr->tr_length, 512, UVM_KMF_NOWAIT|UVM_KMF_WIRED);
1774 splx(s);
1775
1776 if (tr->tr_data == NULL) {
1777 tr->tr_data = tr->tr_real_data;
1778 tr->tr_length = tr->tr_real_length;
1779 return(ENOMEM);
1780 }
1781 if ((tr->tr_flags & TWA_CMD_DATA_IN) != 0)
1782 memcpy(tr->tr_data, tr->tr_real_data,
1783 tr->tr_length);
1784 }
1785
1786 /*
1787 * Map the data buffer into bus space and build the S/G list.
1788 */
1789 rv = bus_dmamap_load(sc->twa_dma_tag, tr->tr_dma_map,
1790 tr->tr_data, tr->tr_length, NULL, BUS_DMA_NOWAIT |
1791 BUS_DMA_STREAMING | (tr->tr_flags & TWA_CMD_DATA_OUT) ?
1792 BUS_DMA_READ : BUS_DMA_WRITE);
1793
1794 if (rv != 0) {
1795 if ((tr->tr_flags & TWA_CMD_DATA_COPY_NEEDED) != 0) {
1796 s = splvm();
1797 uvm_km_free(kmem_map, (vaddr_t)tr->tr_data,
1798 tr->tr_length, UVM_KMF_WIRED);
1799 splx(s);
1800 }
1801 return (rv);
1802 }
1803
1804 if ((rv = twa_setup_data_dmamap(tr,
1805 tr->tr_dma_map->dm_segs,
1806 tr->tr_dma_map->dm_nsegs, error))) {
1807
1808 if (tr->tr_flags & TWA_CMD_DATA_COPY_NEEDED) {
1809 s = splvm();
1810 uvm_km_free(kmem_map, (vaddr_t)tr->tr_data,
1811 tr->tr_length, UVM_KMF_WIRED);
1812 splx(s);
1813 tr->tr_data = tr->tr_real_data;
1814 tr->tr_length = tr->tr_real_length;
1815 }
1816 } else
1817 error = tr->tr_error;
1818
1819 } else
1820 if ((rv = twa_submit_io(tr)))
1821 twa_unmap_request(tr);
1822
1823 return (rv);
1824 }
1825
1826 #if 0
1827 /*
1828 * Function name: twa_flash_firmware
1829 * Description: Flashes bundled firmware image onto controller.
1830 *
1831 * Input: sc -- ptr to per ctlr structure
1832 * Output: None
1833 * Return value: 0 -- success
1834 * non-zero-- failure
1835 */
1836 static int
1837 twa_flash_firmware(struct twa_softc *sc)
1838 {
1839 struct twa_request *tr;
1840 struct twa_command_download_firmware *cmd;
1841 uint32_t count;
1842 uint32_t fw_img_chunk_size;
1843 uint32_t this_chunk_size = 0;
1844 uint32_t remaining_img_size = 0;
1845 int s, error = 0;
1846 int i;
1847
1848 if ((tr = twa_get_request(sc, 0)) == NULL) {
1849 /* No free request packets available. Can't proceed. */
1850 error = EIO;
1851 goto out;
1852 }
1853
1854 count = (twa_fw_img_size / 65536);
1855
1856 count += ((twa_fw_img_size % 65536) != 0) ? 1 : 0;
1857
1858 tr->tr_cmd_pkt_type |= TWA_CMD_PKT_TYPE_INTERNAL;
1859 /* Allocate sufficient memory to hold a chunk of the firmware image. */
1860 fw_img_chunk_size = ((twa_fw_img_size / count) + 511) & ~511;
1861
1862 s = splvm();
1863 tr->tr_data = (void *)uvm_km_alloc(kmem_map, fw_img_chunk_size, 512,
1864 UVM_KMF_WIRED);
1865 splx(s);
1866
1867 if (tr->tr_data == NULL) {
1868 error = ENOMEM;
1869 goto out;
1870 }
1871
1872 remaining_img_size = twa_fw_img_size;
1873 cmd = &(tr->tr_command->command.cmd_pkt_7k.download_fw);
1874
1875 for (i = 0; i < count; i++) {
1876 /* Build a cmd pkt for downloading firmware. */
1877 memset(tr->tr_command, 0, sizeof(struct twa_command_packet));
1878
1879 tr->tr_command->cmd_hdr.header_desc.size_header = 128;
1880
1881 cmd->opcode = TWA_OP_DOWNLOAD_FIRMWARE;
1882 cmd->sgl_offset = 2; /* offset in dwords, to the beginning
1883 of sg list */
1884 cmd->size = 2; /* this field will be updated at data
1885 map time */
1886 cmd->request_id = tr->tr_request_id;
1887 cmd->unit = 0;
1888 cmd->status = 0;
1889 cmd->flags = 0;
1890 cmd->param = 8; /* prom image */
1891
1892 if (i != (count - 1))
1893 this_chunk_size = fw_img_chunk_size;
1894 else /* last chunk */
1895 this_chunk_size = remaining_img_size;
1896
1897 remaining_img_size -= this_chunk_size;
1898
1899 memset(tr->tr_data, fw_img_chunk_size, 0);
1900
1901 memcpy(tr->tr_data, twa_fw_img + (i * fw_img_chunk_size),
1902 this_chunk_size);
1903 /*
1904 * The next line will effect only the last chunk.
1905 */
1906 tr->tr_length = (this_chunk_size + 511) & ~511;
1907
1908 tr->tr_flags |= TWA_CMD_DATA_OUT;
1909
1910 error = twa_immediate_request(tr, TWA_REQUEST_TIMEOUT_PERIOD);
1911
1912 if (error) {
1913 if (error == ETIMEDOUT)
1914 /* clean-up done by twa_immediate_request */
1915 return(error);
1916 break;
1917 }
1918 error = cmd->status;
1919
1920 if (i != (count - 1)) {
1921
1922 /*
1923 * XXX FreeBSD code doesn't check for no error condition
1924 * but based on observation, error seems to return 0
1925 */
1926 if ((error =
1927 tr->tr_command->cmd_hdr.status_block.error) == 0) {
1928 continue;
1929 } else if ((error =
1930 tr->tr_command->cmd_hdr.status_block.error) ==
1931 TWA_ERROR_MORE_DATA) {
1932 continue;
1933 } else {
1934 twa_hard_reset(sc);
1935 break;
1936 }
1937 } else /* last chunk */
1938 if (error) {
1939 printf("%s: firmware flash request failed. "
1940 "error = 0x%x\n", sc->twa_dv.dv_xname,
1941 error);
1942 twa_hard_reset(sc);
1943 }
1944 }
1945
1946 if (tr->tr_data) {
1947 s = splvm();
1948 uvm_km_free(kmem_map, (vaddr_t)tr->tr_data,
1949 fw_img_chunk_size, UVM_KMF_WIRED);
1950 splx(s);
1951 }
1952 out:
1953 if (tr)
1954 twa_release_request(tr);
1955 return(error);
1956 }
1957
1958 /*
1959 * Function name: twa_hard_reset
1960 * Description: Hard reset the controller.
1961 *
1962 * Input: sc -- ptr to per ctlr structure
1963 * Output: None
1964 * Return value: 0 -- success
1965 * non-zero-- failure
1966 */
1967 static int
1968 twa_hard_reset(struct twa_softc *sc)
1969 {
1970 struct twa_request *tr;
1971 struct twa_command_reset_firmware *cmd;
1972 int error;
1973
1974 if ((tr = twa_get_request(sc, 0)) == NULL)
1975 return(EIO);
1976 tr->tr_cmd_pkt_type |= TWA_CMD_PKT_TYPE_INTERNAL;
1977 /* Build a cmd pkt for sending down the hard reset command. */
1978 tr->tr_command->cmd_hdr.header_desc.size_header = 128;
1979
1980 cmd = &(tr->tr_command->command.cmd_pkt_7k.reset_fw);
1981 cmd->opcode = TWA_OP_RESET_FIRMWARE;
1982 cmd->size = 2; /* this field will be updated at data map time */
1983 cmd->request_id = tr->tr_request_id;
1984 cmd->unit = 0;
1985 cmd->status = 0;
1986 cmd->flags = 0;
1987 cmd->param = 0; /* don't reload FPGA logic */
1988
1989 tr->tr_data = NULL;
1990 tr->tr_length = 0;
1991
1992 error = twa_immediate_request(tr, TWA_REQUEST_TIMEOUT_PERIOD);
1993 if (error) {
1994 printf("%s: hard reset request could not be posted. "
1995 "error = 0x%x\n", sc->twa_dv.dv_xname, error);
1996 if (error == ETIMEDOUT)
1997 /* clean-up done by twa_immediate_request */
1998 return(error);
1999 goto out;
2000 }
2001 if ((error = cmd->status)) {
2002 printf("%s: hard reset request failed. error = 0x%x\n",
2003 sc->twa_dv.dv_xname, error);
2004 }
2005
2006 out:
2007 if (tr)
2008 twa_release_request(tr);
2009 return(error);
2010 }
2011 #endif
2012
2013 /*
2014 * Function name: twa_intr
2015 * Description: Interrupt handler. Determines the kind of interrupt,
2016 * and calls the appropriate handler.
2017 *
2018 * Input: sc -- ptr to per ctlr structure
2019 * Output: None
2020 * Return value: None
2021 */
2022
2023 static int
2024 twa_intr(void *arg)
2025 {
2026 int caught, s, rv;
2027 struct twa_softc *sc;
2028 uint32_t status_reg;
2029 sc = (struct twa_softc *)arg;
2030
2031 caught = 0;
2032 /* Collect current interrupt status. */
2033 status_reg = twa_inl(sc, TWA_STATUS_REGISTER_OFFSET);
2034 if (twa_check_ctlr_state(sc, status_reg)) {
2035 caught = 1;
2036 goto bail;
2037 }
2038 /* Dispatch based on the kind of interrupt. */
2039 if (status_reg & TWA_STATUS_HOST_INTERRUPT) {
2040 twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
2041 TWA_CONTROL_CLEAR_HOST_INTERRUPT);
2042 caught = 1;
2043 }
2044 if ((status_reg & TWA_STATUS_ATTENTION_INTERRUPT) != 0) {
2045 twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
2046 TWA_CONTROL_CLEAR_ATTENTION_INTERRUPT);
2047 rv = twa_fetch_aen(sc);
2048 #ifdef DIAGNOSTIC
2049 if (rv != 0)
2050 printf("%s: unable to retrieve AEN (%d)\n",
2051 sc->twa_dv.dv_xname, rv);
2052 #endif
2053 caught = 1;
2054 }
2055 if (status_reg & TWA_STATUS_COMMAND_INTERRUPT) {
2056 /* Start any requests that might be in the pending queue. */
2057 twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
2058 TWA_CONTROL_MASK_COMMAND_INTERRUPT);
2059 (void)twa_drain_pending_queue(sc);
2060 caught = 1;
2061 }
2062 if (status_reg & TWA_STATUS_RESPONSE_INTERRUPT) {
2063 s = splbio();
2064 twa_done(sc);
2065 splx(s);
2066 caught = 1;
2067 }
2068 bail:
2069 return (caught);
2070 }
2071
2072 /*
2073 * Accept an open operation on the control device.
2074 */
2075 static int
2076 twaopen(dev_t dev, int flag, int mode, struct lwp *l)
2077 {
2078 struct twa_softc *twa;
2079
2080 if ((twa = device_lookup(&twa_cd, minor(dev))) == NULL)
2081 return (ENXIO);
2082 if ((twa->twa_sc_flags & TWA_STATE_OPEN) != 0)
2083 return (EBUSY);
2084
2085 twa->twa_sc_flags |= TWA_STATE_OPEN;
2086
2087 return (0);
2088 }
2089
2090 /*
2091 * Accept the last close on the control device.
2092 */
2093 static int
2094 twaclose(dev_t dev, int flag, int mode, struct lwp *l)
2095 {
2096 struct twa_softc *twa;
2097
2098 twa = device_lookup(&twa_cd, minor(dev));
2099 twa->twa_sc_flags &= ~TWA_STATE_OPEN;
2100 return (0);
2101 }
2102
2103 /*
2104 * Function name: twaioctl
2105 * Description: ioctl handler.
2106 *
2107 * Input: sc -- ptr to per ctlr structure
2108 * cmd -- ioctl cmd
2109 * buf -- ptr to buffer in kernel memory, which is
2110 * a copy of the input buffer in user-space
2111 * Output: buf -- ptr to buffer in kernel memory, which will
2112 * be copied of the output buffer in user-space
2113 * Return value: 0 -- success
2114 * non-zero-- failure
2115 */
2116 static int
2117 twaioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct lwp *l)
2118 {
2119 struct twa_softc *sc;
2120 struct twa_ioctl_9k *user_buf = (struct twa_ioctl_9k *)data;
2121 struct tw_cl_event_packet event_buf;
2122 struct twa_request *tr = 0;
2123 int32_t event_index = 0;
2124 int32_t start_index;
2125 int s, error = 0;
2126
2127 sc = device_lookup(&twa_cd, minor(dev));
2128
2129 switch (cmd) {
2130 case TW_OSL_IOCTL_FIRMWARE_PASS_THROUGH:
2131 {
2132 struct twa_command_packet *cmdpkt;
2133 uint32_t data_buf_size_adjusted;
2134
2135 /* Get a request packet */
2136 tr = twa_get_request_wait(sc, 0);
2137 KASSERT(tr != NULL);
2138 /*
2139 * Make sure that the data buffer sent to firmware is a
2140 * 512 byte multiple in size.
2141 */
2142 data_buf_size_adjusted =
2143 (user_buf->twa_drvr_pkt.buffer_length + 511) & ~511;
2144
2145 if ((tr->tr_length = data_buf_size_adjusted)) {
2146 if ((tr->tr_data = malloc(data_buf_size_adjusted,
2147 M_DEVBUF, M_WAITOK)) == NULL) {
2148 error = ENOMEM;
2149 goto fw_passthru_done;
2150 }
2151 /* Copy the payload. */
2152 if ((error = copyin((void *) (user_buf->pdata),
2153 (void *) (tr->tr_data),
2154 user_buf->twa_drvr_pkt.buffer_length)) != 0) {
2155 goto fw_passthru_done;
2156 }
2157 tr->tr_flags |= TWA_CMD_DATA_IN | TWA_CMD_DATA_OUT;
2158 }
2159 tr->tr_cmd_pkt_type |= TWA_CMD_PKT_TYPE_IOCTL;
2160 cmdpkt = tr->tr_command;
2161
2162 /* Copy the command packet. */
2163 memcpy(cmdpkt, &(user_buf->twa_cmd_pkt),
2164 sizeof(struct twa_command_packet));
2165 cmdpkt->command.cmd_pkt_7k.generic.request_id =
2166 tr->tr_request_id;
2167
2168 /* Send down the request, and wait for it to complete. */
2169 if ((error = twa_wait_request(tr, TWA_REQUEST_TIMEOUT_PERIOD))) {
2170 if (error == ETIMEDOUT)
2171 break; /* clean-up done by twa_wait_request */
2172 goto fw_passthru_done;
2173 }
2174
2175 /* Copy the command packet back into user space. */
2176 memcpy(&user_buf->twa_cmd_pkt, cmdpkt,
2177 sizeof(struct twa_command_packet));
2178
2179 /* If there was a payload, copy it back too. */
2180 if (tr->tr_length)
2181 error = copyout(tr->tr_data, user_buf->pdata,
2182 user_buf->twa_drvr_pkt.buffer_length);
2183 fw_passthru_done:
2184 /* Free resources. */
2185 if (tr->tr_data)
2186 free(tr->tr_data, M_DEVBUF);
2187
2188 if (tr)
2189 twa_release_request(tr);
2190 break;
2191 }
2192
2193 case TW_OSL_IOCTL_SCAN_BUS:
2194 twa_request_bus_scan(sc);
2195 break;
2196
2197 case TW_CL_IOCTL_GET_FIRST_EVENT:
2198 if (sc->twa_aen_queue_wrapped) {
2199 if (sc->twa_aen_queue_overflow) {
2200 /*
2201 * The aen queue has wrapped, even before some
2202 * events have been retrieved. Let the caller
2203 * know that he missed out on some AEN's.
2204 */
2205 user_buf->twa_drvr_pkt.status =
2206 TWA_ERROR_AEN_OVERFLOW;
2207 sc->twa_aen_queue_overflow = FALSE;
2208 } else
2209 user_buf->twa_drvr_pkt.status = 0;
2210 event_index = sc->twa_aen_head;
2211 } else {
2212 if (sc->twa_aen_head == sc->twa_aen_tail) {
2213 user_buf->twa_drvr_pkt.status =
2214 TWA_ERROR_AEN_NO_EVENTS;
2215 break;
2216 }
2217 user_buf->twa_drvr_pkt.status = 0;
2218 event_index = sc->twa_aen_tail; /* = 0 */
2219 }
2220 if ((error = copyout(sc->twa_aen_queue[event_index],
2221 user_buf->pdata, sizeof(struct tw_cl_event_packet))) != 0)
2222 (sc->twa_aen_queue[event_index])->retrieved =
2223 TWA_AEN_RETRIEVED;
2224 break;
2225
2226 case TW_CL_IOCTL_GET_LAST_EVENT:
2227 if (sc->twa_aen_queue_wrapped) {
2228 if (sc->twa_aen_queue_overflow) {
2229 /*
2230 * The aen queue has wrapped, even before some
2231 * events have been retrieved. Let the caller
2232 * know that he missed out on some AEN's.
2233 */
2234 user_buf->twa_drvr_pkt.status =
2235 TWA_ERROR_AEN_OVERFLOW;
2236 sc->twa_aen_queue_overflow = FALSE;
2237 } else
2238 user_buf->twa_drvr_pkt.status = 0;
2239 } else {
2240 if (sc->twa_aen_head == sc->twa_aen_tail) {
2241 user_buf->twa_drvr_pkt.status =
2242 TWA_ERROR_AEN_NO_EVENTS;
2243 break;
2244 }
2245 user_buf->twa_drvr_pkt.status = 0;
2246 }
2247 event_index =
2248 (sc->twa_aen_head - 1 + TWA_Q_LENGTH) % TWA_Q_LENGTH;
2249 if ((error = copyout(sc->twa_aen_queue[event_index],
2250 user_buf->pdata, sizeof(struct tw_cl_event_packet))) != 0)
2251 (sc->twa_aen_queue[event_index])->retrieved =
2252 TWA_AEN_RETRIEVED;
2253 break;
2254
2255 case TW_CL_IOCTL_GET_NEXT_EVENT:
2256 user_buf->twa_drvr_pkt.status = 0;
2257 if (sc->twa_aen_queue_wrapped) {
2258
2259 if (sc->twa_aen_queue_overflow) {
2260 /*
2261 * The aen queue has wrapped, even before some
2262 * events have been retrieved. Let the caller
2263 * know that he missed out on some AEN's.
2264 */
2265 user_buf->twa_drvr_pkt.status =
2266 TWA_ERROR_AEN_OVERFLOW;
2267 sc->twa_aen_queue_overflow = FALSE;
2268 }
2269 start_index = sc->twa_aen_head;
2270 } else {
2271 if (sc->twa_aen_head == sc->twa_aen_tail) {
2272 user_buf->twa_drvr_pkt.status =
2273 TWA_ERROR_AEN_NO_EVENTS;
2274 break;
2275 }
2276 start_index = sc->twa_aen_tail; /* = 0 */
2277 }
2278 error = copyin(user_buf->pdata, &event_buf,
2279 sizeof(struct tw_cl_event_packet));
2280
2281 event_index = (start_index + event_buf.sequence_id -
2282 (sc->twa_aen_queue[start_index])->sequence_id + 1)
2283 % TWA_Q_LENGTH;
2284
2285 if (!((sc->twa_aen_queue[event_index])->sequence_id >
2286 event_buf.sequence_id)) {
2287 if (user_buf->twa_drvr_pkt.status ==
2288 TWA_ERROR_AEN_OVERFLOW)
2289 /* so we report the overflow next time */
2290 sc->twa_aen_queue_overflow = TRUE;
2291 user_buf->twa_drvr_pkt.status = TWA_ERROR_AEN_NO_EVENTS;
2292 break;
2293 }
2294 if ((error = copyout(sc->twa_aen_queue[event_index],
2295 user_buf->pdata, sizeof(struct tw_cl_event_packet))) != 0)
2296 (sc->twa_aen_queue[event_index])->retrieved =
2297 TWA_AEN_RETRIEVED;
2298 break;
2299
2300 case TW_CL_IOCTL_GET_PREVIOUS_EVENT:
2301 user_buf->twa_drvr_pkt.status = 0;
2302 if (sc->twa_aen_queue_wrapped) {
2303 if (sc->twa_aen_queue_overflow) {
2304 /*
2305 * The aen queue has wrapped, even before some
2306 * events have been retrieved. Let the caller
2307 * know that he missed out on some AEN's.
2308 */
2309 user_buf->twa_drvr_pkt.status =
2310 TWA_ERROR_AEN_OVERFLOW;
2311 sc->twa_aen_queue_overflow = FALSE;
2312 }
2313 start_index = sc->twa_aen_head;
2314 } else {
2315 if (sc->twa_aen_head == sc->twa_aen_tail) {
2316 user_buf->twa_drvr_pkt.status =
2317 TWA_ERROR_AEN_NO_EVENTS;
2318 break;
2319 }
2320 start_index = sc->twa_aen_tail; /* = 0 */
2321 }
2322 if ((error = copyin(user_buf->pdata, &event_buf,
2323 sizeof(struct tw_cl_event_packet))) != 0)
2324
2325 event_index = (start_index + event_buf.sequence_id -
2326 (sc->twa_aen_queue[start_index])->sequence_id - 1)
2327 % TWA_Q_LENGTH;
2328 if (!((sc->twa_aen_queue[event_index])->sequence_id <
2329 event_buf.sequence_id)) {
2330 if (user_buf->twa_drvr_pkt.status ==
2331 TWA_ERROR_AEN_OVERFLOW)
2332 /* so we report the overflow next time */
2333 sc->twa_aen_queue_overflow = TRUE;
2334 user_buf->twa_drvr_pkt.status =
2335 TWA_ERROR_AEN_NO_EVENTS;
2336 break;
2337 }
2338 if ((error = copyout(sc->twa_aen_queue [event_index],
2339 user_buf->pdata, sizeof(struct tw_cl_event_packet))) != 0)
2340 aprint_error("%s: get_previous: Could not copyout to "
2341 "event_buf. error = %x\n", sc->twa_dv.dv_xname,
2342 error);
2343 (sc->twa_aen_queue[event_index])->retrieved = TWA_AEN_RETRIEVED;
2344 break;
2345
2346 case TW_CL_IOCTL_GET_LOCK:
2347 {
2348 struct tw_cl_lock_packet twa_lock;
2349
2350 copyin(user_buf->pdata, &twa_lock,
2351 sizeof(struct tw_cl_lock_packet));
2352 s = splbio();
2353 if ((sc->twa_ioctl_lock.lock == TWA_LOCK_FREE) ||
2354 (twa_lock.force_flag) ||
2355 (time_second >= sc->twa_ioctl_lock.timeout)) {
2356
2357 sc->twa_ioctl_lock.lock = TWA_LOCK_HELD;
2358 sc->twa_ioctl_lock.timeout = time_second +
2359 (twa_lock.timeout_msec / 1000);
2360 twa_lock.time_remaining_msec = twa_lock.timeout_msec;
2361 user_buf->twa_drvr_pkt.status = 0;
2362 } else {
2363 twa_lock.time_remaining_msec =
2364 (sc->twa_ioctl_lock.timeout - time_second) *
2365 1000;
2366 user_buf->twa_drvr_pkt.status =
2367 TWA_ERROR_IOCTL_LOCK_ALREADY_HELD;
2368 }
2369 splx(s);
2370 copyout(&twa_lock, user_buf->pdata,
2371 sizeof(struct tw_cl_lock_packet));
2372 break;
2373 }
2374
2375 case TW_CL_IOCTL_RELEASE_LOCK:
2376 s = splbio();
2377 if (sc->twa_ioctl_lock.lock == TWA_LOCK_FREE) {
2378 user_buf->twa_drvr_pkt.status =
2379 TWA_ERROR_IOCTL_LOCK_NOT_HELD;
2380 } else {
2381 sc->twa_ioctl_lock.lock = TWA_LOCK_FREE;
2382 user_buf->twa_drvr_pkt.status = 0;
2383 }
2384 splx(s);
2385 break;
2386
2387 case TW_CL_IOCTL_GET_COMPATIBILITY_INFO:
2388 {
2389 struct tw_cl_compatibility_packet comp_pkt;
2390
2391 memcpy(comp_pkt.driver_version, TWA_DRIVER_VERSION_STRING,
2392 sizeof(TWA_DRIVER_VERSION_STRING));
2393 comp_pkt.working_srl = sc->working_srl;
2394 comp_pkt.working_branch = sc->working_branch;
2395 comp_pkt.working_build = sc->working_build;
2396 user_buf->twa_drvr_pkt.status = 0;
2397
2398 /* Copy compatibility information to user space. */
2399 copyout(&comp_pkt, user_buf->pdata,
2400 min(sizeof(struct tw_cl_compatibility_packet),
2401 user_buf->twa_drvr_pkt.buffer_length));
2402 break;
2403 }
2404
2405 case TWA_IOCTL_GET_UNITNAME: /* WASABI EXTENSION */
2406 {
2407 struct twa_unitname *tn;
2408 struct twa_drive *tdr;
2409
2410 tn = (struct twa_unitname *)data;
2411 /* XXX mutex */
2412 if (tn->tn_unit < 0 || tn->tn_unit >= TWA_MAX_UNITS)
2413 return (EINVAL);
2414 tdr = &sc->sc_units[tn->tn_unit];
2415 if (tdr->td_dev == NULL)
2416 tn->tn_name[0] = '\0';
2417 else
2418 strlcpy(tn->tn_name, tdr->td_dev->dv_xname,
2419 sizeof(tn->tn_name));
2420 return (0);
2421 }
2422
2423 default:
2424 /* Unknown opcode. */
2425 error = ENOTTY;
2426 }
2427
2428 return(error);
2429 }
2430
2431 const struct cdevsw twa_cdevsw = {
2432 twaopen, twaclose, noread, nowrite, twaioctl,
2433 nostop, notty, nopoll, nommap, nokqfilter, D_OTHER,
2434 };
2435
2436 /*
2437 * Function name: twa_get_param
2438 * Description: Get a firmware parameter.
2439 *
2440 * Input: sc -- ptr to per ctlr structure
2441 * table_id -- parameter table #
2442 * param_id -- index of the parameter in the table
2443 * param_size -- size of the parameter in bytes
2444 * callback -- ptr to function, if any, to be called
2445 * back on completion; NULL if no callback.
2446 * Output: None
2447 * Return value: ptr to param structure -- success
2448 * NULL -- failure
2449 */
2450 static int
2451 twa_get_param(struct twa_softc *sc, int table_id, int param_id,
2452 size_t param_size, void (* callback)(struct twa_request *tr),
2453 struct twa_param_9k **param)
2454 {
2455 int rv = 0;
2456 struct twa_request *tr;
2457 union twa_command_7k *cmd;
2458
2459 /* Get a request packet. */
2460 if ((tr = twa_get_request(sc, 0)) == NULL) {
2461 rv = EAGAIN;
2462 goto out;
2463 }
2464
2465 tr->tr_cmd_pkt_type |= TWA_CMD_PKT_TYPE_INTERNAL;
2466
2467 /* Allocate memory to read data into. */
2468 if ((*param = (struct twa_param_9k *)
2469 malloc(TWA_SECTOR_SIZE, M_DEVBUF, M_NOWAIT)) == NULL) {
2470 rv = ENOMEM;
2471 goto out;
2472 }
2473
2474 memset(*param, 0, sizeof(struct twa_param_9k) - 1 + param_size);
2475 tr->tr_data = *param;
2476 tr->tr_length = TWA_SECTOR_SIZE;
2477 tr->tr_flags = TWA_CMD_DATA_IN | TWA_CMD_DATA_OUT;
2478
2479 /* Build the cmd pkt. */
2480 cmd = &(tr->tr_command->command.cmd_pkt_7k);
2481
2482 tr->tr_command->cmd_hdr.header_desc.size_header = 128;
2483
2484 cmd->param.opcode = TWA_OP_GET_PARAM;
2485 cmd->param.sgl_offset = 2;
2486 cmd->param.size = 2;
2487 cmd->param.request_id = tr->tr_request_id;
2488 cmd->param.unit = 0;
2489 cmd->param.param_count = 1;
2490
2491 /* Specify which parameter we need. */
2492 (*param)->table_id = table_id | TWA_9K_PARAM_DESCRIPTOR;
2493 (*param)->parameter_id = param_id;
2494 (*param)->parameter_size_bytes = param_size;
2495
2496 /* Submit the command. */
2497 if (callback == NULL) {
2498 /* There's no call back; wait till the command completes. */
2499 rv = twa_immediate_request(tr, TWA_REQUEST_TIMEOUT_PERIOD);
2500
2501 if (rv != 0)
2502 goto out;
2503
2504 if ((rv = cmd->param.status) != 0) {
2505 /* twa_drain_complete_queue will have done the unmapping */
2506 goto out;
2507 }
2508 twa_release_request(tr);
2509 return (rv);
2510 } else {
2511 /* There's a call back. Simply submit the command. */
2512 tr->tr_callback = callback;
2513 rv = twa_map_request(tr);
2514 return (rv);
2515 }
2516 out:
2517 if (tr)
2518 twa_release_request(tr);
2519 return(rv);
2520 }
2521
2522 /*
2523 * Function name: twa_set_param
2524 * Description: Set a firmware parameter.
2525 *
2526 * Input: sc -- ptr to per ctlr structure
2527 * table_id -- parameter table #
2528 * param_id -- index of the parameter in the table
2529 * param_size -- size of the parameter in bytes
2530 * callback -- ptr to function, if any, to be called
2531 * back on completion; NULL if no callback.
2532 * Output: None
2533 * Return value: 0 -- success
2534 * non-zero-- failure
2535 */
2536 static int
2537 twa_set_param(struct twa_softc *sc, int table_id, int param_id, int param_size,
2538 void *data, void (* callback)(struct twa_request *tr))
2539 {
2540 struct twa_request *tr;
2541 union twa_command_7k *cmd;
2542 struct twa_param_9k *param = NULL;
2543 int error = ENOMEM;
2544
2545 tr = twa_get_request(sc, 0);
2546 if (tr == NULL)
2547 return (EAGAIN);
2548
2549 tr->tr_cmd_pkt_type |= TWA_CMD_PKT_TYPE_INTERNAL;
2550
2551 /* Allocate memory to send data using. */
2552 if ((param = (struct twa_param_9k *)
2553 malloc(TWA_SECTOR_SIZE, M_DEVBUF, M_NOWAIT)) == NULL)
2554 goto out;
2555 memset(param, 0, sizeof(struct twa_param_9k) - 1 + param_size);
2556 tr->tr_data = param;
2557 tr->tr_length = TWA_SECTOR_SIZE;
2558 tr->tr_flags = TWA_CMD_DATA_IN | TWA_CMD_DATA_OUT;
2559
2560 /* Build the cmd pkt. */
2561 cmd = &(tr->tr_command->command.cmd_pkt_7k);
2562
2563 tr->tr_command->cmd_hdr.header_desc.size_header = 128;
2564
2565 cmd->param.opcode = TWA_OP_SET_PARAM;
2566 cmd->param.sgl_offset = 2;
2567 cmd->param.size = 2;
2568 cmd->param.request_id = tr->tr_request_id;
2569 cmd->param.unit = 0;
2570 cmd->param.param_count = 1;
2571
2572 /* Specify which parameter we want to set. */
2573 param->table_id = table_id | TWA_9K_PARAM_DESCRIPTOR;
2574 param->parameter_id = param_id;
2575 param->parameter_size_bytes = param_size;
2576 memcpy(param->data, data, param_size);
2577
2578 /* Submit the command. */
2579 if (callback == NULL) {
2580 /* There's no call back; wait till the command completes. */
2581 error = twa_immediate_request(tr, TWA_REQUEST_TIMEOUT_PERIOD);
2582 if (error == ETIMEDOUT)
2583 /* clean-up done by twa_immediate_request */
2584 return(error);
2585 if (error)
2586 goto out;
2587 if ((error = cmd->param.status)) {
2588 /*
2589 * twa_drain_complete_queue will have done the
2590 * unmapping.
2591 */
2592 goto out;
2593 }
2594 free(param, M_DEVBUF);
2595 twa_release_request(tr);
2596 return(error);
2597 } else {
2598 /* There's a call back. Simply submit the command. */
2599 tr->tr_callback = callback;
2600 if ((error = twa_map_request(tr)))
2601 goto out;
2602
2603 return (0);
2604 }
2605 out:
2606 if (param)
2607 free(param, M_DEVBUF);
2608 if (tr)
2609 twa_release_request(tr);
2610 return(error);
2611 }
2612
2613 /*
2614 * Function name: twa_init_connection
2615 * Description: Send init_connection cmd to firmware
2616 *
2617 * Input: sc -- ptr to per ctlr structure
2618 * message_credits -- max # of requests that we might send
2619 * down simultaneously. This will be
2620 * typically set to 256 at init-time or
2621 * after a reset, and to 1 at shutdown-time
2622 * set_features -- indicates if we intend to use 64-bit
2623 * sg, also indicates if we want to do a
2624 * basic or an extended init_connection;
2625 *
2626 * Note: The following input/output parameters are valid, only in case of an
2627 * extended init_connection:
2628 *
2629 * current_fw_srl -- srl of fw we are bundled
2630 * with, if any; 0 otherwise
2631 * current_fw_arch_id -- arch_id of fw we are bundled
2632 * with, if any; 0 otherwise
2633 * current_fw_branch -- branch # of fw we are bundled
2634 * with, if any; 0 otherwise
2635 * current_fw_build -- build # of fw we are bundled
2636 * with, if any; 0 otherwise
2637 * Output: fw_on_ctlr_srl -- srl of fw on ctlr
2638 * fw_on_ctlr_arch_id -- arch_id of fw on ctlr
2639 * fw_on_ctlr_branch -- branch # of fw on ctlr
2640 * fw_on_ctlr_build -- build # of fw on ctlr
2641 * init_connect_result -- result bitmap of fw response
2642 * Return value: 0 -- success
2643 * non-zero-- failure
2644 */
2645 static int
2646 twa_init_connection(struct twa_softc *sc, uint16_t message_credits,
2647 uint32_t set_features, uint16_t current_fw_srl,
2648 uint16_t current_fw_arch_id, uint16_t current_fw_branch,
2649 uint16_t current_fw_build, uint16_t *fw_on_ctlr_srl,
2650 uint16_t *fw_on_ctlr_arch_id, uint16_t *fw_on_ctlr_branch,
2651 uint16_t *fw_on_ctlr_build, uint32_t *init_connect_result)
2652 {
2653 struct twa_request *tr;
2654 struct twa_command_init_connect *init_connect;
2655 int error = 1;
2656
2657 /* Get a request packet. */
2658 if ((tr = twa_get_request(sc, 0)) == NULL)
2659 goto out;
2660 tr->tr_cmd_pkt_type |= TWA_CMD_PKT_TYPE_INTERNAL;
2661 /* Build the cmd pkt. */
2662 init_connect = &(tr->tr_command->command.cmd_pkt_7k.init_connect);
2663
2664 tr->tr_command->cmd_hdr.header_desc.size_header = 128;
2665
2666 init_connect->opcode = TWA_OP_INIT_CONNECTION;
2667 init_connect->request_id = tr->tr_request_id;
2668 init_connect->message_credits = message_credits;
2669 init_connect->features = set_features;
2670 if (TWA_64BIT_ADDRESSES) {
2671 printf("64 bit addressing supported for scatter/gather list\n");
2672 init_connect->features |= TWA_64BIT_SG_ADDRESSES;
2673 }
2674 if (set_features & TWA_EXTENDED_INIT_CONNECT) {
2675 /*
2676 * Fill in the extra fields needed for
2677 * an extended init_connect.
2678 */
2679 init_connect->size = 6;
2680 init_connect->fw_srl = current_fw_srl;
2681 init_connect->fw_arch_id = current_fw_arch_id;
2682 init_connect->fw_branch = current_fw_branch;
2683 } else
2684 init_connect->size = 3;
2685
2686 /* Submit the command, and wait for it to complete. */
2687 error = twa_immediate_request(tr, TWA_REQUEST_TIMEOUT_PERIOD);
2688 if (error == ETIMEDOUT)
2689 return(error); /* clean-up done by twa_immediate_request */
2690 if (error)
2691 goto out;
2692 if ((error = init_connect->status)) {
2693 /* twa_drain_complete_queue will have done the unmapping */
2694 goto out;
2695 }
2696 if (set_features & TWA_EXTENDED_INIT_CONNECT) {
2697 *fw_on_ctlr_srl = init_connect->fw_srl;
2698 *fw_on_ctlr_arch_id = init_connect->fw_arch_id;
2699 *fw_on_ctlr_branch = init_connect->fw_branch;
2700 *fw_on_ctlr_build = init_connect->fw_build;
2701 *init_connect_result = init_connect->result;
2702 }
2703 twa_release_request(tr);
2704 return(error);
2705
2706 out:
2707 if (tr)
2708 twa_release_request(tr);
2709 return(error);
2710 }
2711
2712 static int
2713 twa_reset(struct twa_softc *sc)
2714 {
2715 int s;
2716 int error = 0;
2717
2718 /*
2719 * Disable interrupts from the controller, and mask any
2720 * accidental entry into our interrupt handler.
2721 */
2722 twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
2723 TWA_CONTROL_DISABLE_INTERRUPTS);
2724
2725 s = splbio();
2726
2727 /* Soft reset the controller. */
2728 if ((error = twa_soft_reset(sc)))
2729 goto out;
2730
2731 /* Re-establish logical connection with the controller. */
2732 if ((error = twa_init_connection(sc, TWA_INIT_MESSAGE_CREDITS,
2733 0, 0, 0, 0, 0,
2734 NULL, NULL, NULL, NULL, NULL))) {
2735 goto out;
2736 }
2737 /*
2738 * Complete all requests in the complete queue; error back all requests
2739 * in the busy queue. Any internal requests will be simply freed.
2740 * Re-submit any requests in the pending queue.
2741 */
2742 twa_drain_busy_queue(sc);
2743
2744 out:
2745 splx(s);
2746 /*
2747 * Enable interrupts, and also clear attention and response interrupts.
2748 */
2749 twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
2750 TWA_CONTROL_CLEAR_ATTENTION_INTERRUPT |
2751 TWA_CONTROL_UNMASK_RESPONSE_INTERRUPT |
2752 TWA_CONTROL_ENABLE_INTERRUPTS);
2753 return(error);
2754 }
2755
2756 static int
2757 twa_soft_reset(struct twa_softc *sc)
2758 {
2759 uint32_t status_reg;
2760
2761 twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
2762 TWA_CONTROL_ISSUE_SOFT_RESET |
2763 TWA_CONTROL_CLEAR_HOST_INTERRUPT |
2764 TWA_CONTROL_CLEAR_ATTENTION_INTERRUPT |
2765 TWA_CONTROL_MASK_COMMAND_INTERRUPT |
2766 TWA_CONTROL_MASK_RESPONSE_INTERRUPT |
2767 TWA_CONTROL_DISABLE_INTERRUPTS);
2768
2769 if (twa_wait_status(sc, TWA_STATUS_MICROCONTROLLER_READY |
2770 TWA_STATUS_ATTENTION_INTERRUPT, 30)) {
2771 aprint_error("%s: no attention interrupt after reset.\n",
2772 sc->twa_dv.dv_xname);
2773 return(1);
2774 }
2775 twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
2776 TWA_CONTROL_CLEAR_ATTENTION_INTERRUPT);
2777
2778 if (twa_drain_response_queue(sc)) {
2779 aprint_error("%s: cannot drain response queue.\n",
2780 sc->twa_dv.dv_xname);
2781 return(1);
2782 }
2783 if (twa_drain_aen_queue(sc)) {
2784 aprint_error("%s: cannot drain AEN queue.\n",
2785 sc->twa_dv.dv_xname);
2786 return(1);
2787 }
2788 if (twa_find_aen(sc, TWA_AEN_SOFT_RESET)) {
2789 aprint_error("%s: reset not reported by controller.\n",
2790 sc->twa_dv.dv_xname);
2791 return(1);
2792 }
2793 status_reg = twa_inl(sc, TWA_STATUS_REGISTER_OFFSET);
2794 if (TWA_STATUS_ERRORS(status_reg) ||
2795 twa_check_ctlr_state(sc, status_reg)) {
2796 aprint_error("%s: controller errors detected.\n",
2797 sc->twa_dv.dv_xname);
2798 return(1);
2799 }
2800 return(0);
2801 }
2802
2803 static int
2804 twa_wait_status(struct twa_softc *sc, uint32_t status, uint32_t timeout)
2805 {
2806 struct timeval t1;
2807 time_t end_time;
2808 uint32_t status_reg;
2809
2810 timeout = (timeout * 1000 * 100);
2811
2812 microtime(&t1);
2813
2814 end_time = t1.tv_usec + timeout;
2815
2816 do {
2817 status_reg = twa_inl(sc, TWA_STATUS_REGISTER_OFFSET);
2818 /* got the required bit(s)? */
2819 if ((status_reg & status) == status)
2820 return(0);
2821 DELAY(100000);
2822 microtime(&t1);
2823 } while (t1.tv_usec <= end_time);
2824
2825 return(1);
2826 }
2827
2828 static int
2829 twa_fetch_aen(struct twa_softc *sc)
2830 {
2831 struct twa_request *tr;
2832 int s, error = 0;
2833
2834 s = splbio();
2835
2836 if ((tr = twa_get_request(sc, TWA_CMD_AEN)) == NULL) {
2837 splx(s);
2838 return(EIO);
2839 }
2840 tr->tr_cmd_pkt_type |= TWA_CMD_PKT_TYPE_INTERNAL;
2841 tr->tr_callback = twa_aen_callback;
2842 tr->tr_data = malloc(TWA_SECTOR_SIZE, M_DEVBUF, M_NOWAIT);
2843 if (twa_request_sense(tr, 0) != 0) {
2844 if (tr->tr_data)
2845 free(tr->tr_data, M_DEVBUF);
2846 twa_release_request(tr);
2847 error = 1;
2848 }
2849 splx(s);
2850
2851 return(error);
2852 }
2853
2854 /*
2855 * Function name: twa_aen_callback
2856 * Description: Callback for requests to fetch AEN's.
2857 *
2858 * Input: tr -- ptr to completed request pkt
2859 * Output: None
2860 * Return value: None
2861 */
2862 static void
2863 twa_aen_callback(struct twa_request *tr)
2864 {
2865 int i;
2866 int fetch_more_aens = 0;
2867 struct twa_softc *sc = tr->tr_sc;
2868 struct twa_command_header *cmd_hdr =
2869 (struct twa_command_header *)(tr->tr_data);
2870 struct twa_command_9k *cmd =
2871 &(tr->tr_command->command.cmd_pkt_9k);
2872
2873 if (! cmd->status) {
2874 if ((tr->tr_cmd_pkt_type & TWA_CMD_PKT_TYPE_9K) &&
2875 (cmd->cdb[0] == 0x3 /* REQUEST_SENSE */))
2876 if (twa_enqueue_aen(sc, cmd_hdr)
2877 != TWA_AEN_QUEUE_EMPTY)
2878 fetch_more_aens = 1;
2879 } else {
2880 cmd_hdr->err_specific_desc[sizeof(cmd_hdr->err_specific_desc) - 1] = '\0';
2881 for (i = 0; i < 18; i++)
2882 printf("%x\t", tr->tr_command->cmd_hdr.sense_data[i]);
2883
2884 printf(""); /* print new line */
2885
2886 for (i = 0; i < 128; i++)
2887 printf("%x\t", ((int8_t *)(tr->tr_data))[i]);
2888 }
2889 if (tr->tr_data)
2890 free(tr->tr_data, M_DEVBUF);
2891 twa_release_request(tr);
2892
2893 if (fetch_more_aens)
2894 twa_fetch_aen(sc);
2895 }
2896
2897 /*
2898 * Function name: twa_enqueue_aen
2899 * Description: Queues AEN's to be supplied to user-space tools on request.
2900 *
2901 * Input: sc -- ptr to per ctlr structure
2902 * cmd_hdr -- ptr to hdr of fw cmd pkt, from where the AEN
2903 * details can be retrieved.
2904 * Output: None
2905 * Return value: None
2906 */
2907 static uint16_t
2908 twa_enqueue_aen(struct twa_softc *sc, struct twa_command_header *cmd_hdr)
2909 {
2910 int rv, s;
2911 struct tw_cl_event_packet *event;
2912 uint16_t aen_code;
2913 unsigned long sync_time;
2914
2915 s = splbio();
2916 aen_code = cmd_hdr->status_block.error;
2917
2918 switch (aen_code) {
2919 case TWA_AEN_SYNC_TIME_WITH_HOST:
2920
2921 sync_time = (time_second - (3 * 86400)) % 604800;
2922 rv = twa_set_param(sc, TWA_PARAM_TIME_TABLE,
2923 TWA_PARAM_TIME_SchedulerTime, 4,
2924 &sync_time, twa_aen_callback);
2925 #ifdef DIAGNOSTIC
2926 if (rv != 0)
2927 printf("%s: unable to sync time with ctlr\n",
2928 sc->twa_dv.dv_xname);
2929 #endif
2930 break;
2931
2932 case TWA_AEN_QUEUE_EMPTY:
2933 break;
2934
2935 default:
2936 /* Queue the event. */
2937 event = sc->twa_aen_queue[sc->twa_aen_head];
2938 if (event->retrieved == TWA_AEN_NOT_RETRIEVED)
2939 sc->twa_aen_queue_overflow = TRUE;
2940 event->severity =
2941 cmd_hdr->status_block.substatus_block.severity;
2942 event->time_stamp_sec = time_second;
2943 event->aen_code = aen_code;
2944 event->retrieved = TWA_AEN_NOT_RETRIEVED;
2945 event->sequence_id = ++(sc->twa_current_sequence_id);
2946 cmd_hdr->err_specific_desc[sizeof(cmd_hdr->err_specific_desc) - 1] = '\0';
2947 event->parameter_len = strlen(cmd_hdr->err_specific_desc);
2948 memcpy(event->parameter_data, cmd_hdr->err_specific_desc,
2949 event->parameter_len);
2950
2951 if (event->severity < TWA_AEN_SEVERITY_DEBUG) {
2952 printf("%s: AEN 0x%04X: %s: %s: %s\n",
2953 sc->twa_dv.dv_xname,
2954 aen_code,
2955 twa_aen_severity_table[event->severity],
2956 twa_find_msg_string(twa_aen_table, aen_code),
2957 event->parameter_data);
2958 }
2959
2960 if ((sc->twa_aen_head + 1) == TWA_Q_LENGTH)
2961 sc->twa_aen_queue_wrapped = TRUE;
2962 sc->twa_aen_head = (sc->twa_aen_head + 1) % TWA_Q_LENGTH;
2963 break;
2964 } /* switch */
2965 splx(s);
2966
2967 return (aen_code);
2968 }
2969
2970 /*
2971 * Function name: twa_find_aen
2972 * Description: Reports whether a given AEN ever occurred.
2973 *
2974 * Input: sc -- ptr to per ctlr structure
2975 * aen_code-- AEN to look for
2976 * Output: None
2977 * Return value: 0 -- success
2978 * non-zero-- failure
2979 */
2980 static int
2981 twa_find_aen(struct twa_softc *sc, uint16_t aen_code)
2982 {
2983 uint32_t last_index;
2984 int s;
2985 int i;
2986
2987 s = splbio();
2988
2989 if (sc->twa_aen_queue_wrapped)
2990 last_index = sc->twa_aen_head;
2991 else
2992 last_index = 0;
2993
2994 i = sc->twa_aen_head;
2995 do {
2996 i = (i + TWA_Q_LENGTH - 1) % TWA_Q_LENGTH;
2997 if ((sc->twa_aen_queue[i])->aen_code == aen_code) {
2998 splx(s);
2999 return(0);
3000 }
3001 } while (i != last_index);
3002
3003 splx(s);
3004 return(1);
3005 }
3006
3007 static inline void
3008 twa_request_init(struct twa_request *tr, int flags)
3009 {
3010 tr->tr_data = NULL;
3011 tr->tr_real_data = NULL;
3012 tr->tr_length = 0;
3013 tr->tr_real_length = 0;
3014 tr->tr_status = TWA_CMD_SETUP;/* command is in setup phase */
3015 tr->tr_flags = flags;
3016 tr->tr_error = 0;
3017 tr->tr_callback = NULL;
3018 tr->tr_cmd_pkt_type = 0;
3019 tr->bp = 0;
3020
3021 /*
3022 * Look at the status field in the command packet to see how
3023 * it completed the last time it was used, and zero out only
3024 * the portions that might have changed. Note that we don't
3025 * care to zero out the sglist.
3026 */
3027 if (tr->tr_command->command.cmd_pkt_9k.status)
3028 memset(tr->tr_command, 0,
3029 sizeof(struct twa_command_header) + 28);
3030 else
3031 memset(&(tr->tr_command->command), 0, 28);
3032 }
3033
3034 struct twa_request *
3035 twa_get_request_wait(struct twa_softc *sc, int flags)
3036 {
3037 struct twa_request *tr;
3038 int s;
3039
3040 KASSERT((flags & TWA_CMD_AEN) == 0);
3041
3042 s = splbio();
3043 while ((tr = TAILQ_FIRST(&sc->twa_free)) == NULL) {
3044 sc->twa_sc_flags |= TWA_STATE_REQUEST_WAIT;
3045 (void) tsleep(&sc->twa_free, PRIBIO, "twaccb", hz);
3046 }
3047 TAILQ_REMOVE(&sc->twa_free, tr, tr_link);
3048
3049 splx(s);
3050
3051 twa_request_init(tr, flags);
3052
3053 return(tr);
3054 }
3055
3056 struct twa_request *
3057 twa_get_request(struct twa_softc *sc, int flags)
3058 {
3059 int s;
3060 struct twa_request *tr;
3061
3062 /* Get a free request packet. */
3063 s = splbio();
3064 if (__predict_false((flags & TWA_CMD_AEN) != 0)) {
3065
3066 if ((sc->sc_twa_request->tr_flags & TWA_CMD_AEN_BUSY) == 0) {
3067 tr = sc->sc_twa_request;
3068 flags |= TWA_CMD_AEN_BUSY;
3069 } else {
3070 splx(s);
3071 return (NULL);
3072 }
3073 } else {
3074 if (__predict_false((tr =
3075 TAILQ_FIRST(&sc->twa_free)) == NULL)) {
3076 splx(s);
3077 return (NULL);
3078 }
3079 TAILQ_REMOVE(&sc->twa_free, tr, tr_link);
3080 }
3081 splx(s);
3082
3083 twa_request_init(tr, flags);
3084
3085 return(tr);
3086 }
3087
3088 /*
3089 * Print some information about the controller
3090 */
3091 static void
3092 twa_describe_controller(struct twa_softc *sc)
3093 {
3094 struct twa_param_9k *p[10];
3095 int i, rv = 0;
3096 uint32_t dsize;
3097 uint8_t ports;
3098
3099 memset(p, sizeof(struct twa_param_9k *), 10);
3100
3101 /* Get the port count. */
3102 rv |= twa_get_param(sc, TWA_PARAM_CONTROLLER,
3103 TWA_PARAM_CONTROLLER_PortCount, 1, NULL, &p[0]);
3104
3105 /* get version strings */
3106 rv |= twa_get_param(sc, TWA_PARAM_VERSION, TWA_PARAM_VERSION_FW,
3107 16, NULL, &p[1]);
3108 rv |= twa_get_param(sc, TWA_PARAM_VERSION, TWA_PARAM_VERSION_BIOS,
3109 16, NULL, &p[2]);
3110 rv |= twa_get_param(sc, TWA_PARAM_VERSION, TWA_PARAM_VERSION_Mon,
3111 16, NULL, &p[3]);
3112 rv |= twa_get_param(sc, TWA_PARAM_VERSION, TWA_PARAM_VERSION_PCBA,
3113 8, NULL, &p[4]);
3114 rv |= twa_get_param(sc, TWA_PARAM_VERSION, TWA_PARAM_VERSION_ATA,
3115 8, NULL, &p[5]);
3116 rv |= twa_get_param(sc, TWA_PARAM_VERSION, TWA_PARAM_VERSION_PCI,
3117 8, NULL, &p[6]);
3118 rv |= twa_get_param(sc, TWA_PARAM_DRIVESUMMARY, TWA_PARAM_DRIVESTATUS,
3119 16, NULL, &p[7]);
3120
3121 if (rv) {
3122 /* some error occurred */
3123 aprint_error("%s: failed to fetch version information\n",
3124 sc->twa_dv.dv_xname);
3125 goto bail;
3126 }
3127
3128 ports = *(uint8_t *)(p[0]->data);
3129
3130 aprint_normal("%s: %d ports, Firmware %.16s, BIOS %.16s\n",
3131 sc->twa_dv.dv_xname, ports,
3132 p[1]->data, p[2]->data);
3133
3134 aprint_verbose("%s: Monitor %.16s, PCB %.8s, Achip %.8s, Pchip %.8s\n",
3135 sc->twa_dv.dv_xname,
3136 p[3]->data, p[4]->data,
3137 p[5]->data, p[6]->data);
3138
3139 for (i = 0; i < ports; i++) {
3140
3141 if ((*((char *)(p[7]->data + i)) & TWA_DRIVE_DETECTED) == 0)
3142 continue;
3143
3144 rv = twa_get_param(sc, TWA_PARAM_DRIVE_TABLE + i,
3145 TWA_PARAM_DRIVEMODELINDEX,
3146 TWA_PARAM_DRIVEMODEL_LENGTH, NULL, &p[8]);
3147
3148 if (rv != 0) {
3149 aprint_error("%s: unable to get drive model for port"
3150 " %d\n", sc->twa_dv.dv_xname, i);
3151 continue;
3152 }
3153
3154 rv = twa_get_param(sc, TWA_PARAM_DRIVE_TABLE + i,
3155 TWA_PARAM_DRIVESIZEINDEX,
3156 TWA_PARAM_DRIVESIZE_LENGTH, NULL, &p[9]);
3157
3158 if (rv != 0) {
3159 aprint_error("%s: unable to get drive size"
3160 " for port %d\n", sc->twa_dv.dv_xname,
3161 i);
3162 free(p[8], M_DEVBUF);
3163 continue;
3164 }
3165
3166 dsize = *(uint32_t *)(p[9]->data);
3167
3168 aprint_verbose("%s: port %d: %.40s %d MB\n",
3169 sc->twa_dv.dv_xname, i, p[8]->data, dsize / 2048);
3170
3171 if (p[8])
3172 free(p[8], M_DEVBUF);
3173 if (p[9])
3174 free(p[9], M_DEVBUF);
3175 }
3176 bail:
3177 if (p[0])
3178 free(p[0], M_DEVBUF);
3179 if (p[1])
3180 free(p[1], M_DEVBUF);
3181 if (p[2])
3182 free(p[2], M_DEVBUF);
3183 if (p[3])
3184 free(p[3], M_DEVBUF);
3185 if (p[4])
3186 free(p[4], M_DEVBUF);
3187 if (p[5])
3188 free(p[5], M_DEVBUF);
3189 if (p[6])
3190 free(p[6], M_DEVBUF);
3191 }
3192
3193 /*
3194 * Function name: twa_check_ctlr_state
3195 * Description: Makes sure that the fw status register reports a
3196 * proper status.
3197 *
3198 * Input: sc -- ptr to per ctlr structure
3199 * status_reg -- value in the status register
3200 * Output: None
3201 * Return value: 0 -- no errors
3202 * non-zero-- errors
3203 */
3204 static int
3205 twa_check_ctlr_state(struct twa_softc *sc, uint32_t status_reg)
3206 {
3207 int result = 0;
3208 struct timeval t1;
3209 static time_t last_warning[2] = {0, 0};
3210
3211 /* Check if the 'micro-controller ready' bit is not set. */
3212 if ((status_reg & TWA_STATUS_EXPECTED_BITS) !=
3213 TWA_STATUS_EXPECTED_BITS) {
3214
3215 microtime(&t1);
3216
3217 last_warning[0] += (5 * 1000 * 100);
3218
3219 if (t1.tv_usec > last_warning[0]) {
3220 microtime(&t1);
3221 last_warning[0] = t1.tv_usec;
3222 }
3223 result = 1;
3224 }
3225
3226 /* Check if any error bits are set. */
3227 if ((status_reg & TWA_STATUS_UNEXPECTED_BITS) != 0) {
3228
3229 microtime(&t1);
3230 last_warning[1] += (5 * 1000 * 100);
3231 if (t1.tv_usec > last_warning[1]) {
3232 microtime(&t1);
3233 last_warning[1] = t1.tv_usec;
3234 }
3235 if (status_reg & TWA_STATUS_PCI_PARITY_ERROR_INTERRUPT) {
3236 aprint_error("%s: clearing PCI parity error "
3237 "re-seat/move/replace card.\n",
3238 sc->twa_dv.dv_xname);
3239 twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
3240 TWA_CONTROL_CLEAR_PARITY_ERROR);
3241 pci_conf_write(sc->pc, sc->tag,
3242 PCI_COMMAND_STATUS_REG,
3243 TWA_PCI_CONFIG_CLEAR_PARITY_ERROR);
3244 result = 1;
3245 }
3246 if (status_reg & TWA_STATUS_PCI_ABORT_INTERRUPT) {
3247 aprint_error("%s: clearing PCI abort\n",
3248 sc->twa_dv.dv_xname);
3249 twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
3250 TWA_CONTROL_CLEAR_PCI_ABORT);
3251 pci_conf_write(sc->pc, sc->tag,
3252 PCI_COMMAND_STATUS_REG,
3253 TWA_PCI_CONFIG_CLEAR_PCI_ABORT);
3254 result = 1;
3255 }
3256 if (status_reg & TWA_STATUS_QUEUE_ERROR_INTERRUPT) {
3257 aprint_error("%s: clearing controller queue error\n",
3258 sc->twa_dv.dv_xname);
3259 twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
3260 TWA_CONTROL_CLEAR_PCI_ABORT);
3261 result = 1;
3262 }
3263 if (status_reg & TWA_STATUS_SBUF_WRITE_ERROR) {
3264 aprint_error("%s: clearing SBUF write error\n",
3265 sc->twa_dv.dv_xname);
3266 twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
3267 TWA_CONTROL_CLEAR_SBUF_WRITE_ERROR);
3268 result = 1;
3269 }
3270 if (status_reg & TWA_STATUS_MICROCONTROLLER_ERROR) {
3271 aprint_error("%s: micro-controller error\n",
3272 sc->twa_dv.dv_xname);
3273 result = 1;
3274 }
3275 }
3276 return(result);
3277 }
3278