twa.c revision 1.1 1 /* $wasabi: twa.c,v 1.24 2006/04/27 17:12:39 wrstuden Exp $ */
2 /*
3 * Copyright (c) 2004-2006 Wasabi Systems, Inc.
4 * All rights reserved.
5 *
6 * Your Wasabi Systems License Agreement specifies the terms and
7 * conditions for use and redistribution.
8 */
9
10 /*-
11 * Copyright (c) 2004 The NetBSD Foundation, Inc.
12 * All rights reserved.
13 *
14 * This code is derived from software contributed to The NetBSD Foundation
15 * by Jordan Rhody of Wasabi Systems, Inc.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
19 * are met:
20 * 1. Redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution.
25 * 3. All advertising materials mentioning features or use of this software
26 * must display the following acknowledgement:
27 * This product includes software developed by the NetBSD
28 * Foundation, Inc. and its contributors.
29 * 4. Neither the name of The NetBSD Foundation nor the names of its
30 * contributors may be used to endorse or promote products derived
31 * from this software without specific prior written permission.
32 *
33 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
34 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
35 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
36 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
37 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
38 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
39 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
40 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
41 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
43 * POSSIBILITY OF SUCH DAMAGE.
44 */
45
46 /*-
47 * Copyright (c) 2003-04 3ware, Inc.
48 * Copyright (c) 2000 Michael Smith
49 * Copyright (c) 2000 BSDi
50 * All rights reserved.
51 *
52 * Redistribution and use in source and binary forms, with or without
53 * modification, are permitted provided that the following conditions
54 * are met:
55 * 1. Redistributions of source code must retain the above copyright
56 * notice, this list of conditions and the following disclaimer.
57 * 2. Redistributions in binary form must reproduce the above copyright
58 * notice, this list of conditions and the following disclaimer in the
59 * documentation and/or other materials provided with the distribution.
60 *
61 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
62 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
63 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
64 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
65 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
66 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
67 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
68 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
69 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
70 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
71 * SUCH DAMAGE.
72 *
73 * $FreeBSD: src/sys/dev/twa/twa.c,v 1.2 2004/04/02 15:09:57 des Exp $
74 */
75
76 /*
77 * 3ware driver for 9000 series storage controllers.
78 *
79 * Author: Vinod Kashyap
80 */
81
82 #include <sys/cdefs.h>
83 __KERNEL_RCSID(0, "$wasabi: twa.c,v 1.24 2006/04/27 17:12:39 wrstuden Exp $");
84
85 #include <sys/param.h>
86 #include <sys/systm.h>
87 #include <sys/kernel.h>
88 #include <sys/device.h>
89 #include <sys/queue.h>
90 #include <sys/proc.h>
91 #include <sys/buf.h>
92 #include <sys/bufq.h>
93 #include <sys/endian.h>
94 #include <sys/malloc.h>
95 #include <sys/conf.h>
96 #include <sys/disk.h>
97 #include <sys/syslog.h>
98
99 #include <uvm/uvm_extern.h>
100
101 #include <machine/bswap.h>
102 #include <machine/bus.h>
103
104 #include <dev/pci/pcireg.h>
105 #include <dev/pci/pcivar.h>
106 #include <dev/pci/pcidevs.h>
107 #include <dev/pci/twareg.h>
108 #include <dev/pci/twavar.h>
109 #include <dev/pci/twaio.h>
110
111 #include <dev/scsipi/scsipi_all.h>
112 #include <dev/scsipi/scsipi_disk.h>
113 #include <dev/scsipi/scsipiconf.h>
114 #include <dev/scsipi/scsi_spc.h>
115
116 #include <dev/ldvar.h>
117
118 #include "locators.h"
119
120 #define PCI_CBIO 0x10
121
122 static int twa_fetch_aen(struct twa_softc *);
123 static void twa_aen_callback(struct twa_request *);
124 static int twa_find_aen(struct twa_softc *sc, u_int16_t);
125 static uint16_t twa_enqueue_aen(struct twa_softc *sc,
126 struct twa_command_header *);
127
128 static void twa_attach(struct device *, struct device *, void *);
129 static void twa_shutdown(void *);
130 static int twa_init_connection(struct twa_softc *, u_int16_t, u_int32_t,
131 u_int16_t, u_int16_t, u_int16_t, u_int16_t, u_int16_t *,
132 u_int16_t *, u_int16_t *, u_int16_t *, u_int32_t *);
133 static int twa_intr(void *);
134 static int twa_match(struct device *, struct cfdata *, void *);
135 static int twa_reset(struct twa_softc *);
136
137 static int twa_print(void *, const char *);
138 static int twa_submatch(struct device *, struct cfdata *,
139 const int *, void *);
140 static int twa_soft_reset(struct twa_softc *);
141
142 static int twa_check_ctlr_state(struct twa_softc *, u_int32_t);
143 static int twa_get_param(struct twa_softc *, int, int, size_t,
144 void (* callback)(struct twa_request *),
145 struct twa_param_9k **);
146 static int twa_set_param(struct twa_softc *, int, int, int, void *,
147 void (* callback)(struct twa_request *));
148 static void twa_describe_controller(struct twa_softc *);
149 static int twa_wait_status(struct twa_softc *, u_int32_t, u_int32_t);
150 static int twa_done(struct twa_softc *);
151 #if 0
152 static int twa_flash_firmware(struct twa_softc *sc);
153 static int twa_hard_reset(struct twa_softc *sc);
154 #endif
155
156 dev_type_open(twaopen);
157 dev_type_close(twaclose);
158 dev_type_ioctl(twaioctl);
159
160 const struct cdevsw twa_cdevsw = {
161 twaopen, twaclose, noread, nowrite, twaioctl,
162 nostop, notty, nopoll, nommap,
163 };
164
165 extern struct cfdriver twa_cd;
166 extern uint32_t twa_fw_img_size;
167 extern uint8_t twa_fw_img[];
168
169 CFATTACH_DECL(twa, sizeof(struct twa_softc),
170 twa_match, twa_attach, NULL, NULL);
171
172 /* AEN messages. */
173 static const struct twa_message twa_aen_table[] = {
174 {0x0000, "AEN queue empty"},
175 {0x0001, "Controller reset occurred"},
176 {0x0002, "Degraded unit detected"},
177 {0x0003, "Controller error occured"},
178 {0x0004, "Background rebuild failed"},
179 {0x0005, "Background rebuild done"},
180 {0x0006, "Incomplete unit detected"},
181 {0x0007, "Background initialize done"},
182 {0x0008, "Unclean shutdown detected"},
183 {0x0009, "Drive timeout detected"},
184 {0x000A, "Drive error detected"},
185 {0x000B, "Rebuild started"},
186 {0x000C, "Background initialize started"},
187 {0x000D, "Entire logical unit was deleted"},
188 {0x000E, "Background initialize failed"},
189 {0x000F, "SMART attribute exceeded threshold"},
190 {0x0010, "Power supply reported AC under range"},
191 {0x0011, "Power supply reported DC out of range"},
192 {0x0012, "Power supply reported a malfunction"},
193 {0x0013, "Power supply predicted malfunction"},
194 {0x0014, "Battery charge is below threshold"},
195 {0x0015, "Fan speed is below threshold"},
196 {0x0016, "Temperature sensor is above threshold"},
197 {0x0017, "Power supply was removed"},
198 {0x0018, "Power supply was inserted"},
199 {0x0019, "Drive was removed from a bay"},
200 {0x001A, "Drive was inserted into a bay"},
201 {0x001B, "Drive bay cover door was opened"},
202 {0x001C, "Drive bay cover door was closed"},
203 {0x001D, "Product case was opened"},
204 {0x0020, "Prepare for shutdown (power-off)"},
205 {0x0021, "Downgrade UDMA mode to lower speed"},
206 {0x0022, "Upgrade UDMA mode to higher speed"},
207 {0x0023, "Sector repair completed"},
208 {0x0024, "Sbuf memory test failed"},
209 {0x0025, "Error flushing cached write data to disk"},
210 {0x0026, "Drive reported data ECC error"},
211 {0x0027, "DCB has checksum error"},
212 {0x0028, "DCB version is unsupported"},
213 {0x0029, "Background verify started"},
214 {0x002A, "Background verify failed"},
215 {0x002B, "Background verify done"},
216 {0x002C, "Bad sector overwritten during rebuild"},
217 {0x002E, "Replace failed because replacement drive too small"},
218 {0x002F, "Verify failed because array was never initialized"},
219 {0x0030, "Unsupported ATA drive"},
220 {0x0031, "Synchronize host/controller time"},
221 {0x0032, "Spare capacity is inadequate for some units"},
222 {0x0033, "Background migration started"},
223 {0x0034, "Background migration failed"},
224 {0x0035, "Background migration done"},
225 {0x0036, "Verify detected and fixed data/parity mismatch"},
226 {0x0037, "SO-DIMM incompatible"},
227 {0x0038, "SO-DIMM not detected"},
228 {0x0039, "Corrected Sbuf ECC error"},
229 {0x003A, "Drive power on reset detected"},
230 {0x003B, "Background rebuild paused"},
231 {0x003C, "Background initialize paused"},
232 {0x003D, "Background verify paused"},
233 {0x003E, "Background migration paused"},
234 {0x003F, "Corrupt flash file system detected"},
235 {0x0040, "Flash file system repaired"},
236 {0x0041, "Unit number assignments were lost"},
237 {0x0042, "Error during read of primary DCB"},
238 {0x0043, "Latent error found in backup DCB"},
239 {0x0044, "Battery voltage is normal"},
240 {0x0045, "Battery voltage is low"},
241 {0x0046, "Battery voltage is high"},
242 {0x0047, "Battery voltage is too low"},
243 {0x0048, "Battery voltage is too high"},
244 {0x0049, "Battery temperature is normal"},
245 {0x004A, "Battery temperature is low"},
246 {0x004B, "Battery temperature is high"},
247 {0x004C, "Battery temperature is too low"},
248 {0x004D, "Battery temperature is too high"},
249 {0x004E, "Battery capacity test started"},
250 {0x004F, "Cache synchronization skipped"},
251 {0x0050, "Battery capacity test completed"},
252 {0x0051, "Battery health check started"},
253 {0x0052, "Battery health check completed"},
254 {0x0053, "Need to do a capacity test"},
255 {0x0054, "Charge termination voltage is at high level"},
256 {0x0055, "Battery charging started"},
257 {0x0056, "Battery charging completed"},
258 {0x0057, "Battery charging fault"},
259 {0x0058, "Battery capacity is below warning level"},
260 {0x0059, "Battery capacity is below error level"},
261 {0x005A, "Battery is present"},
262 {0x005B, "Battery is not present"},
263 {0x005C, "Battery is weak"},
264 {0x005D, "Battery health check failed"},
265 {0x005E, "Cache synchronized after power fail"},
266 {0x005F, "Cache synchronization failed; some data lost"},
267 {0x0060, "Bad cache meta data checksum"},
268 {0x0061, "Bad cache meta data signature"},
269 {0x0062, "Cache meta data restore failed"},
270 {0x0063, "BBU not found after power fail"},
271 {0x00FC, "Recovered/finished array membership update"},
272 {0x00FD, "Handler lockup"},
273 {0x00FE, "Retrying PCI transfer"},
274 {0x00FF, "AEN queue is full"},
275 {0xFFFFFFFF, (char *)NULL}
276 };
277
278 /* AEN severity table. */
279 static const char *twa_aen_severity_table[] = {
280 "None",
281 "ERROR",
282 "WARNING",
283 "INFO",
284 "DEBUG",
285 (char *)NULL
286 };
287
288 /* Error messages. */
289 static const struct twa_message twa_error_table[] = {
290 {0x0100, "SGL entry contains zero data"},
291 {0x0101, "Invalid command opcode"},
292 {0x0102, "SGL entry has unaligned address"},
293 {0x0103, "SGL size does not match command"},
294 {0x0104, "SGL entry has illegal length"},
295 {0x0105, "Command packet is not aligned"},
296 {0x0106, "Invalid request ID"},
297 {0x0107, "Duplicate request ID"},
298 {0x0108, "ID not locked"},
299 {0x0109, "LBA out of range"},
300 {0x010A, "Logical unit not supported"},
301 {0x010B, "Parameter table does not exist"},
302 {0x010C, "Parameter index does not exist"},
303 {0x010D, "Invalid field in CDB"},
304 {0x010E, "Specified port has invalid drive"},
305 {0x010F, "Parameter item size mismatch"},
306 {0x0110, "Failed memory allocation"},
307 {0x0111, "Memory request too large"},
308 {0x0112, "Out of memory segments"},
309 {0x0113, "Invalid address to deallocate"},
310 {0x0114, "Out of memory"},
311 {0x0115, "Out of heap"},
312 {0x0120, "Double degrade"},
313 {0x0121, "Drive not degraded"},
314 {0x0122, "Reconstruct error"},
315 {0x0123, "Replace not accepted"},
316 {0x0124, "Replace drive capacity too small"},
317 {0x0125, "Sector count not allowed"},
318 {0x0126, "No spares left"},
319 {0x0127, "Reconstruct error"},
320 {0x0128, "Unit is offline"},
321 {0x0129, "Cannot update status to DCB"},
322 {0x0130, "Invalid stripe handle"},
323 {0x0131, "Handle that was not locked"},
324 {0x0132, "Handle that was not empy"},
325 {0x0133, "Handle has different owner"},
326 {0x0140, "IPR has parent"},
327 {0x0150, "Illegal Pbuf address alignment"},
328 {0x0151, "Illegal Pbuf transfer length"},
329 {0x0152, "Illegal Sbuf address alignment"},
330 {0x0153, "Illegal Sbuf transfer length"},
331 {0x0160, "Command packet too large"},
332 {0x0161, "SGL exceeds maximum length"},
333 {0x0162, "SGL has too many entries"},
334 {0x0170, "Insufficient resources for rebuilder"},
335 {0x0171, "Verify error (data != parity)"},
336 {0x0180, "Requested segment not in directory of this DCB"},
337 {0x0181, "DCB segment has unsupported version"},
338 {0x0182, "DCB segment has checksum error"},
339 {0x0183, "DCB support (settings) segment invalid"},
340 {0x0184, "DCB UDB (unit descriptor block) segment invalid"},
341 {0x0185, "DCB GUID (globally unique identifier) segment invalid"},
342 {0x01A0, "Could not clear Sbuf"},
343 {0x01C0, "Flash identify failed"},
344 {0x01C1, "Flash out of bounds"},
345 {0x01C2, "Flash verify error"},
346 {0x01C3, "Flash file object not found"},
347 {0x01C4, "Flash file already present"},
348 {0x01C5, "Flash file system full"},
349 {0x01C6, "Flash file not present"},
350 {0x01C7, "Flash file size error"},
351 {0x01C8, "Bad flash file checksum"},
352 {0x01CA, "Corrupt flash file system detected"},
353 {0x01D0, "Invalid field in parameter list"},
354 {0x01D1, "Parameter list length error"},
355 {0x01D2, "Parameter item is not changeable"},
356 {0x01D3, "Parameter item is not saveable"},
357 {0x0200, "UDMA CRC error"},
358 {0x0201, "Internal CRC error"},
359 {0x0202, "Data ECC error"},
360 {0x0203, "ADP level 1 error"},
361 {0x0204, "Port timeout"},
362 {0x0205, "Drive power on reset"},
363 {0x0206, "ADP level 2 error"},
364 {0x0207, "Soft reset failed"},
365 {0x0208, "Drive not ready"},
366 {0x0209, "Unclassified port error"},
367 {0x020A, "Drive aborted command"},
368 {0x0210, "Internal CRC error"},
369 {0x0211, "Host PCI bus abort"},
370 {0x0212, "Host PCI parity error"},
371 {0x0213, "Port handler error"},
372 {0x0214, "Token interrupt count error"},
373 {0x0215, "Timeout waiting for PCI transfer"},
374 {0x0216, "Corrected buffer ECC"},
375 {0x0217, "Uncorrected buffer ECC"},
376 {0x0230, "Unsupported command during flash recovery"},
377 {0x0231, "Next image buffer expected"},
378 {0x0232, "Binary image architecture incompatible"},
379 {0x0233, "Binary image has no signature"},
380 {0x0234, "Binary image has bad checksum"},
381 {0x0235, "Image downloaded overflowed buffer"},
382 {0x0240, "I2C device not found"},
383 {0x0241, "I2C transaction aborted"},
384 {0x0242, "SO-DIMM parameter(s) incompatible using defaults"},
385 {0x0243, "SO-DIMM unsupported"},
386 {0x0248, "SPI transfer status error"},
387 {0x0249, "SPI transfer timeout error"},
388 {0x0250, "Invalid unit descriptor size in CreateUnit"},
389 {0x0251, "Unit descriptor size exceeds data buffer in CreateUnit"},
390 {0x0252, "Invalid value in CreateUnit descriptor"},
391 {0x0253, "Inadequate disk space to support descriptor in CreateUnit"},
392 {0x0254, "Unable to create data channel for this unit descriptor"},
393 {0x0255, "CreateUnit descriptor specifies a drive already in use"},
394 {0x0256, "Unable to write configuration to all disks during CreateUnit"},
395 {0x0257, "CreateUnit does not support this descriptor version"},
396 {0x0258, "Invalid subunit for RAID 0 or 5 in CreateUnit"},
397 {0x0259, "Too many descriptors in CreateUnit"},
398 {0x025A, "Invalid configuration specified in CreateUnit descriptor"},
399 {0x025B, "Invalid LBA offset specified in CreateUnit descriptor"},
400 {0x025C, "Invalid stripelet size specified in CreateUnit descriptor"},
401 {0x0260, "SMART attribute exceeded threshold"},
402 {0xFFFFFFFF, (char *)NULL}
403 };
404
405 struct twa_pci_identity {
406 uint32_t vendor_id;
407 uint32_t product_id;
408 const char *name;
409 };
410
411 static const struct twa_pci_identity pci_twa_products[] = {
412 { PCI_VENDOR_3WARE,
413 PCI_PRODUCT_3WARE_9000,
414 "3ware 9000 series",
415 },
416 { PCI_VENDOR_3WARE,
417 PCI_PRODUCT_3WARE_9550,
418 "3ware 9550SX series",
419 },
420 { 0,
421 0,
422 NULL,
423 },
424 };
425
426
427 static inline void
428 twa_outl(struct twa_softc *sc, int off, u_int32_t val)
429 {
430 bus_space_write_4(sc->twa_bus_iot, sc->twa_bus_ioh, off, val);
431 bus_space_barrier(sc->twa_bus_iot, sc->twa_bus_ioh, off, 4,
432 BUS_SPACE_BARRIER_WRITE);
433 }
434
435
436 static inline u_int32_t twa_inl(struct twa_softc *sc, int off)
437 {
438 bus_space_barrier(sc->twa_bus_iot, sc->twa_bus_ioh, off, 4,
439 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
440 return (bus_space_read_4(sc->twa_bus_iot, sc->twa_bus_ioh, off));
441 }
442
443 void
444 twa_request_wait_handler(struct twa_request *tr)
445 {
446 wakeup(tr);
447 }
448
449
450 static int
451 twa_match(struct device *parent, struct cfdata *cfdata, void *aux)
452 {
453 int i;
454 struct pci_attach_args *pa = aux;
455 const struct twa_pci_identity *entry = 0;
456
457 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_3WARE) {
458 for (i = 0; (pci_twa_products[i].product_id); i++) {
459 entry = &pci_twa_products[i];
460 if (entry->product_id == PCI_PRODUCT(pa->pa_id)) {
461 aprint_normal("%s: (rev. 0x%02x)\n",
462 entry->name, PCI_REVISION(pa->pa_class));
463 return (1);
464 }
465 }
466 }
467 return (0);
468 }
469
470
471 static const char *
472 twa_find_msg_string(const struct twa_message *table, u_int16_t code)
473 {
474 int i;
475
476 for (i = 0; table[i].message != NULL; i++)
477 if (table[i].code == code)
478 return(table[i].message);
479
480 return(table[i].message);
481 }
482
483
484 void
485 twa_release_request(struct twa_request *tr)
486 {
487 int s;
488 struct twa_softc *sc;
489
490 sc = tr->tr_sc;
491
492 if ((tr->tr_flags & TWA_CMD_AEN) == 0) {
493 s = splbio();
494 TAILQ_INSERT_TAIL(&tr->tr_sc->twa_free, tr, tr_link);
495 splx(s);
496 if (__predict_false((tr->tr_sc->twa_sc_flags &
497 TWA_STATE_REQUEST_WAIT) != 0)) {
498 tr->tr_sc->twa_sc_flags &= ~TWA_STATE_REQUEST_WAIT;
499 wakeup(&sc->twa_free);
500 }
501 } else
502 tr->tr_flags &= ~TWA_CMD_AEN_BUSY;
503 }
504
505
506 static void
507 twa_unmap_request(struct twa_request *tr)
508 {
509 struct twa_softc *sc = tr->tr_sc;
510 u_int8_t cmd_status;
511
512 /* If the command involved data, unmap that too. */
513 if (tr->tr_data != NULL) {
514 if (tr->tr_cmd_pkt_type & TWA_CMD_PKT_TYPE_9K)
515 cmd_status = tr->tr_command->command.cmd_pkt_9k.status;
516 else
517 cmd_status =
518 tr->tr_command->command.cmd_pkt_7k.generic.status;
519
520 if (tr->tr_flags & TWA_CMD_DATA_OUT) {
521 bus_dmamap_sync(tr->tr_sc->twa_dma_tag, tr->tr_dma_map,
522 0, tr->tr_length, BUS_DMASYNC_POSTREAD);
523 /*
524 * If we are using a bounce buffer, and we are reading
525 * data, copy the real data in.
526 */
527 if (tr->tr_flags & TWA_CMD_DATA_COPY_NEEDED)
528 if (cmd_status == 0)
529 memcpy(tr->tr_real_data, tr->tr_data,
530 tr->tr_real_length);
531 }
532 if (tr->tr_flags & TWA_CMD_DATA_IN)
533 bus_dmamap_sync(tr->tr_sc->twa_dma_tag, tr->tr_dma_map,
534 0, tr->tr_length, BUS_DMASYNC_POSTWRITE);
535
536 bus_dmamap_unload(sc->twa_dma_tag, tr->tr_dma_map);
537 }
538
539 /* Free alignment buffer if it was used. */
540 if (tr->tr_flags & TWA_CMD_DATA_COPY_NEEDED) {
541 uvm_km_free(kmem_map, (vaddr_t)tr->tr_data,
542 tr->tr_length, UVM_KMF_WIRED);
543 tr->tr_data = tr->tr_real_data;
544 tr->tr_length = tr->tr_real_length;
545 }
546 }
547
548
549 /*
550 * Function name: twa_wait_request
551 * Description: Sends down a firmware cmd, and waits for the completion,
552 * but NOT in a tight loop.
553 *
554 * Input: tr -- ptr to request pkt
555 * timeout -- max # of seconds to wait before giving up
556 * Output: None
557 * Return value: 0 -- success
558 * non-zero-- failure
559 */
560 static int
561 twa_wait_request(struct twa_request *tr, u_int32_t timeout)
562 {
563 time_t end_time;
564 struct timeval t1;
565 int s, error;
566
567 tr->tr_flags |= TWA_CMD_SLEEP_ON_REQUEST;
568 tr->tr_callback = twa_request_wait_handler;
569 tr->tr_status = TWA_CMD_BUSY;
570
571 if ((error = twa_map_request(tr)))
572 return (error);
573
574 microtime(&t1);
575 end_time = t1.tv_usec +
576 (timeout * 1000 * 100);
577
578 while (tr->tr_status != TWA_CMD_COMPLETE) {
579 if ((error = tr->tr_error))
580 return(error);
581 if ((error = tsleep(tr, PRIBIO, "twawait", timeout * hz)) == 0)
582 {
583 error = (tr->tr_status != TWA_CMD_COMPLETE);
584 break;
585 }
586 if (error == EWOULDBLOCK) {
587 /*
588 * We will reset the controller only if the request has
589 * already been submitted, so as to not lose the
590 * request packet. If a busy request timed out, the
591 * reset will take care of freeing resources. If a
592 * pending request timed out, we will free resources
593 * for that request, right here. So, the caller is
594 * expected to NOT cleanup when ETIMEDOUT is returned.
595 */
596 if (tr->tr_status != TWA_CMD_PENDING &&
597 tr->tr_status != TWA_CMD_COMPLETE)
598 twa_reset(tr->tr_sc);
599 else {
600 /* Request was never submitted. Clean up. */
601 s = splbio();
602 TAILQ_REMOVE(&tr->tr_sc->twa_pending, tr, tr_link);
603 splx(s);
604
605 twa_unmap_request(tr);
606 if (tr->tr_data)
607 free(tr->tr_data, M_DEVBUF);
608
609 twa_release_request(tr);
610 }
611 return(ETIMEDOUT);
612 }
613 /*
614 * Either the request got completed, or we were woken up by a
615 * signal. Calculate the new timeout, in case it was the latter.
616 */
617 microtime(&t1);
618
619 timeout = (end_time - t1.tv_usec) / (1000 * 100);
620 }
621 twa_unmap_request(tr);
622 return(error);
623 }
624
625
626 /*
627 * Function name: twa_immediate_request
628 * Description: Sends down a firmware cmd, and waits for the completion
629 * in a tight loop.
630 *
631 * Input: tr -- ptr to request pkt
632 * timeout -- max # of seconds to wait before giving up
633 * Output: None
634 * Return value: 0 -- success
635 * non-zero-- failure
636 */
637 static int
638 twa_immediate_request(struct twa_request *tr, u_int32_t timeout)
639 {
640 struct timeval t1;
641 int s = 0, error = 0;
642
643 if ((error = twa_map_request(tr))) {
644 return(error);
645 }
646
647 timeout = (timeout * 10000 * 10);
648
649 microtime(&t1);
650
651 timeout += t1.tv_usec;
652
653 do {
654 if ((error = tr->tr_error))
655 return(error);
656 twa_done(tr->tr_sc);
657 if ((tr->tr_status != TWA_CMD_BUSY) &&
658 (tr->tr_status != TWA_CMD_PENDING)) {
659 twa_unmap_request(tr);
660 return(tr->tr_status != TWA_CMD_COMPLETE);
661 }
662 microtime(&t1);
663 } while (t1.tv_usec <= timeout);
664
665 /*
666 * We will reset the controller only if the request has
667 * already been submitted, so as to not lose the
668 * request packet. If a busy request timed out, the
669 * reset will take care of freeing resources. If a
670 * pending request timed out, we will free resources
671 * for that request, right here. So, the caller is
672 * expected to NOT cleanup when ETIMEDOUT is returned.
673 */
674 if (tr->tr_status != TWA_CMD_PENDING)
675 twa_reset(tr->tr_sc);
676 else {
677 /* Request was never submitted. Clean up. */
678 s = splbio();
679 TAILQ_REMOVE(&tr->tr_sc->twa_pending, tr, tr_link);
680 splx(s);
681 twa_unmap_request(tr);
682 if (tr->tr_data)
683 free(tr->tr_data, M_DEVBUF);
684
685 twa_release_request(tr);
686 }
687 return(ETIMEDOUT);
688 }
689
690
691 static int
692 twa_inquiry(struct twa_request *tr, int lunid)
693 {
694 int error;
695 struct twa_command_9k *tr_9k_cmd;
696
697 if (tr->tr_data == NULL)
698 return (ENOMEM);
699
700 memset(tr->tr_data, 0, TWA_SECTOR_SIZE);
701
702 tr->tr_length = TWA_SECTOR_SIZE;
703 tr->tr_cmd_pkt_type = TWA_CMD_PKT_TYPE_9K;
704 tr->tr_flags |= TWA_CMD_DATA_IN;
705
706 tr_9k_cmd = &tr->tr_command->command.cmd_pkt_9k;
707
708 tr_9k_cmd->command.opcode = TWA_OP_EXECUTE_SCSI_COMMAND;
709 tr_9k_cmd->unit = lunid;
710 tr_9k_cmd->request_id = tr->tr_request_id;
711 tr_9k_cmd->status = 0;
712 tr_9k_cmd->sgl_offset = 16;
713 tr_9k_cmd->sgl_entries = 1;
714 /* create the CDB here */
715 tr_9k_cmd->cdb[0] = INQUIRY;
716 tr_9k_cmd->cdb[1] = ((lunid << 5) & 0x0e);
717 tr_9k_cmd->cdb[4] = 255;
718
719 /* XXXX setup page data no lun device
720 * it seems 9000 series does not indicate
721 * NOTPRESENT - need more investigation
722 */
723 ((struct scsipi_inquiry_data *)tr->tr_data)->device =
724 SID_QUAL_LU_NOTPRESENT;
725
726 error = twa_immediate_request(tr, TWA_REQUEST_TIMEOUT_PERIOD);
727
728 if (((struct scsipi_inquiry_data *)tr->tr_data)->device ==
729 SID_QUAL_LU_NOTPRESENT)
730 error = 1;
731
732 return (error);
733 }
734
735 static int
736 twa_print_inquiry_data(struct twa_softc *sc,
737 struct scsipi_inquiry_data *scsipi)
738 {
739 printf("%s: %s\n", sc->twa_dv.dv_xname, scsipi->vendor);
740
741 return (1);
742 }
743
744
745 static uint64_t
746 twa_read_capacity(struct twa_request *tr, int lunid)
747 {
748 int error;
749 struct twa_command_9k *tr_9k_cmd;
750 uint64_t array_size = 0LL;
751
752 if (tr->tr_data == NULL)
753 return (ENOMEM);
754
755 memset(tr->tr_data, 0, TWA_SECTOR_SIZE);
756
757 tr->tr_length = TWA_SECTOR_SIZE;
758 tr->tr_cmd_pkt_type = TWA_CMD_PKT_TYPE_9K;
759 tr->tr_flags |= TWA_CMD_DATA_OUT;
760
761 tr_9k_cmd = &tr->tr_command->command.cmd_pkt_9k;
762
763 tr_9k_cmd->command.opcode = TWA_OP_EXECUTE_SCSI_COMMAND;
764 tr_9k_cmd->unit = lunid;
765 tr_9k_cmd->request_id = tr->tr_request_id;
766 tr_9k_cmd->status = 0;
767 tr_9k_cmd->sgl_offset = 16;
768 tr_9k_cmd->sgl_entries = 1;
769 /* create the CDB here */
770 tr_9k_cmd->cdb[0] = READ_CAPACITY_16;
771 tr_9k_cmd->cdb[1] = ((lunid << 5) & 0x0e) | SRC16_SERVICE_ACTION;
772
773 error = twa_immediate_request(tr, TWA_REQUEST_TIMEOUT_PERIOD);
774 #if BYTE_ORDER == BIG_ENDIAN
775 array_size = bswap64(_8btol(((struct scsipi_read_capacity_16_data *)
776 tr->tr_data)->addr) + 1);
777 #else
778 array_size = _8btol(((struct scsipi_read_capacity_16_data *)
779 tr->tr_data)->addr) + 1;
780 #endif
781 return (array_size);
782 }
783
784 static int
785 twa_request_sense(struct twa_request *tr, int lunid)
786 {
787 int error = 1;
788 struct twa_command_9k *tr_9k_cmd;
789
790 if (tr->tr_data == NULL)
791 return (error);
792
793 memset(tr->tr_data, 0, TWA_SECTOR_SIZE);
794
795 tr->tr_length = TWA_SECTOR_SIZE;
796 tr->tr_cmd_pkt_type = TWA_CMD_PKT_TYPE_9K;
797 tr->tr_flags |= TWA_CMD_DATA_OUT;
798
799 tr_9k_cmd = &tr->tr_command->command.cmd_pkt_9k;
800
801 tr_9k_cmd->command.opcode = TWA_OP_EXECUTE_SCSI_COMMAND;
802 tr_9k_cmd->unit = lunid;
803 tr_9k_cmd->request_id = tr->tr_request_id;
804 tr_9k_cmd->status = 0;
805 tr_9k_cmd->sgl_offset = 16;
806 tr_9k_cmd->sgl_entries = 1;
807 /* create the CDB here */
808 tr_9k_cmd->cdb[0] = SCSI_REQUEST_SENSE;
809 tr_9k_cmd->cdb[1] = ((lunid << 5) & 0x0e);
810 tr_9k_cmd->cdb[4] = 255;
811
812 /*XXX AEN notification called in interrupt context
813 * so just queue the request. Return as quickly
814 * as possible from interrupt
815 */
816 if ((tr->tr_flags & TWA_CMD_AEN) != 0)
817 error = twa_map_request(tr);
818 else
819 error = twa_immediate_request(tr, TWA_REQUEST_TIMEOUT_PERIOD);
820
821 return (error);
822 }
823
824
825
826 static int
827 twa_alloc_req_pkts(struct twa_softc *sc, int num_reqs)
828 {
829 struct twa_request *tr;
830 struct twa_command_packet *tc;
831 bus_dma_segment_t seg;
832 size_t max_segs, max_xfer;
833 int i, rv, rseg, size;
834
835 if ((sc->twa_req_buf = malloc(num_reqs * sizeof(struct twa_request),
836 M_DEVBUF, M_NOWAIT)) == NULL)
837 return(ENOMEM);
838
839 size = num_reqs * sizeof(struct twa_command_packet);
840
841 /* Allocate memory for cmd pkts. */
842 if ((rv = bus_dmamem_alloc(sc->twa_dma_tag,
843 size, PAGE_SIZE, 0, &seg,
844 1, &rseg, BUS_DMA_NOWAIT)) != 0){
845 aprint_error("%s: unable to allocate "
846 "command packets, rv = %d\n",
847 sc->twa_dv.dv_xname, rv);
848 return (ENOMEM);
849 }
850
851 if ((rv = bus_dmamem_map(sc->twa_dma_tag,
852 &seg, rseg, size, (caddr_t *)&sc->twa_cmds,
853 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
854 aprint_error("%s: unable to map commands, rv = %d\n",
855 sc->twa_dv.dv_xname, rv);
856 return (1);
857 }
858
859 if ((rv = bus_dmamap_create(sc->twa_dma_tag,
860 size, num_reqs, size,
861 0, BUS_DMA_NOWAIT, &sc->twa_cmd_map)) != 0) {
862 aprint_error("%s: unable to create command DMA map, "
863 "rv = %d\n", sc->twa_dv.dv_xname, rv);
864 return (ENOMEM);
865 }
866
867 if ((rv = bus_dmamap_load(sc->twa_dma_tag, sc->twa_cmd_map,
868 sc->twa_cmds, size, NULL,
869 BUS_DMA_NOWAIT)) != 0) {
870 aprint_error("%s: unable to load command DMA map, "
871 "rv = %d\n", sc->twa_dv.dv_xname, rv);
872 return (1);
873 }
874
875 if ((uintptr_t)sc->twa_cmds % TWA_ALIGNMENT) {
876 aprint_error("%s: DMA map memory not aligned on %d boundary\n",
877 sc->twa_dv.dv_xname, TWA_ALIGNMENT);
878
879 return (1);
880 }
881 tc = sc->twa_cmd_pkt_buf = (struct twa_command_packet *)sc->twa_cmds;
882 sc->twa_cmd_pkt_phys = sc->twa_cmd_map->dm_segs[0].ds_addr;
883
884 memset(sc->twa_req_buf, 0, num_reqs * sizeof(struct twa_request));
885 memset(sc->twa_cmd_pkt_buf, 0,
886 num_reqs * sizeof(struct twa_command_packet));
887
888 sc->sc_twa_request = sc->twa_req_buf;
889 max_segs = twa_get_maxsegs();
890 max_xfer = twa_get_maxxfer(max_segs);
891
892 for (i = 0; i < num_reqs; i++, tc++) {
893 tr = &(sc->twa_req_buf[i]);
894 tr->tr_command = tc;
895 tr->tr_cmd_phys = sc->twa_cmd_pkt_phys +
896 (i * sizeof(struct twa_command_packet));
897 tr->tr_request_id = i;
898 tr->tr_sc = sc;
899
900 /*
901 * Create a map for data buffers. maxsize (256 * 1024) used in
902 * bus_dma_tag_create above should suffice the bounce page needs
903 * for data buffers, since the max I/O size we support is 128KB.
904 * If we supported I/O's bigger than 256KB, we would have to
905 * create a second dma_tag, with the appropriate maxsize.
906 */
907 if ((rv = bus_dmamap_create(sc->twa_dma_tag,
908 max_xfer, max_segs, 1, 0, BUS_DMA_NOWAIT,
909 &tr->tr_dma_map)) != 0) {
910 aprint_error("%s: unable to create command "
911 "DMA map, rv = %d\n",
912 sc->twa_dv.dv_xname, rv);
913 return (ENOMEM);
914 }
915 /* Insert request into the free queue. */
916 if (i != 0) {
917 sc->twa_lookup[i] = tr;
918 twa_release_request(tr);
919 } else
920 tr->tr_flags |= TWA_CMD_AEN;
921 }
922 return(0);
923 }
924
925
926 static void
927 twa_recompute_openings(struct twa_softc *sc)
928 {
929 struct twa_drive *td;
930 int unit;
931 int openings;
932
933 if (sc->sc_nunits != 0)
934 openings = ((TWA_Q_LENGTH / 2) / sc->sc_nunits);
935 else
936 openings = 0;
937 if (openings == sc->sc_openings)
938 return;
939 sc->sc_openings = openings;
940
941 #ifdef TWA_DEBUG
942 printf("%s: %d array%s, %d openings per array\n",
943 sc->sc_twa.dv_xname, sc->sc_nunits,
944 sc->sc_nunits == 1 ? "" : "s", sc->sc_openings);
945 #endif
946 for (unit = 0; unit < TWA_MAX_UNITS; unit++) {
947 td = &sc->sc_units[unit];
948 if (td->td_dev != NULL)
949 (*td->td_callbacks->tcb_openings)(td->td_dev,
950 sc->sc_openings);
951 }
952 }
953
954
955 static int
956 twa_request_bus_scan(struct twa_softc *sc)
957 {
958 struct twa_drive *td;
959 struct twa_request *tr;
960 struct twa_attach_args twaa;
961 int s, unit;
962
963 s = splbio();
964 for (unit = 0; unit < TWA_MAX_UNITS; unit++) {
965
966 if ((tr = twa_get_request(sc, 0)) == NULL) {
967 splx(s);
968 return (EIO);
969 }
970
971 tr->tr_cmd_pkt_type |= TWA_CMD_PKT_TYPE_INTERNAL;
972
973 tr->tr_data = malloc(TWA_SECTOR_SIZE, M_DEVBUF, M_NOWAIT);
974
975 if (tr->tr_data == NULL) {
976 twa_release_request(tr);
977 splx(s);
978 return (ENOMEM);
979 }
980 td = &sc->sc_units[unit];
981
982 if (twa_inquiry(tr, unit) == 0) {
983
984 if (td->td_dev == NULL) {
985 twa_print_inquiry_data(sc,
986 ((struct scsipi_inquiry_data *)tr->tr_data));
987
988 sc->sc_nunits++;
989
990 sc->sc_units[unit].td_size =
991 twa_read_capacity(tr, unit);
992
993 twaa.twaa_unit = unit;
994
995 twa_recompute_openings(sc);
996
997 sc->sc_units[unit].td_dev =
998 config_found_sm_loc(&sc->twa_dv, "twa",
999 NULL, &twaa, twa_print,
1000 twa_submatch);
1001 }
1002 } else {
1003 if (td->td_dev != NULL) {
1004
1005 sc->sc_nunits--;
1006
1007 (void) config_detach(td->td_dev, DETACH_FORCE);
1008 td->td_dev = NULL;
1009 td->td_size = 0;
1010
1011 twa_recompute_openings(sc);
1012 }
1013 }
1014 free(tr->tr_data, M_DEVBUF);
1015
1016 twa_release_request(tr);
1017 }
1018 splx(s);
1019
1020 return (0);
1021 }
1022
1023
1024 static int
1025 twa_start(struct twa_request *tr)
1026 {
1027 struct twa_softc *sc = tr->tr_sc;
1028 u_int32_t status_reg;
1029 int s;
1030 int error;
1031
1032 s = splbio();
1033 /* Check to see if we can post a command. */
1034 status_reg = twa_inl(sc, TWA_STATUS_REGISTER_OFFSET);
1035 if ((error = twa_check_ctlr_state(sc, status_reg)))
1036 goto out;
1037
1038 if (status_reg & TWA_STATUS_COMMAND_QUEUE_FULL) {
1039 if (tr->tr_status != TWA_CMD_PENDING) {
1040 tr->tr_status = TWA_CMD_PENDING;
1041 TAILQ_INSERT_TAIL(&tr->tr_sc->twa_pending,
1042 tr, tr_link);
1043 }
1044 twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
1045 TWA_CONTROL_UNMASK_COMMAND_INTERRUPT);
1046 error = EBUSY;
1047 } else {
1048 bus_dmamap_sync(sc->twa_dma_tag, sc->twa_cmd_map,
1049 (caddr_t)tr->tr_command - sc->twa_cmds,
1050 sizeof(struct twa_command_packet),
1051 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1052
1053 /* Cmd queue is not full. Post the command. */
1054 TWA_WRITE_COMMAND_QUEUE(sc, tr->tr_cmd_phys +
1055 sizeof(struct twa_command_header));
1056
1057 /* Mark the request as currently being processed. */
1058 tr->tr_status = TWA_CMD_BUSY;
1059 /* Move the request into the busy queue. */
1060 TAILQ_INSERT_TAIL(&tr->tr_sc->twa_busy, tr, tr_link);
1061 }
1062 out:
1063 splx(s);
1064 return(error);
1065 }
1066
1067
1068 static int
1069 twa_drain_response_queue(struct twa_softc *sc)
1070 {
1071 union twa_response_queue rq;
1072 u_int32_t status_reg;
1073
1074 for (;;) {
1075 status_reg = twa_inl(sc, TWA_STATUS_REGISTER_OFFSET);
1076 if (twa_check_ctlr_state(sc, status_reg))
1077 return(1);
1078 if (status_reg & TWA_STATUS_RESPONSE_QUEUE_EMPTY)
1079 return(0); /* no more response queue entries */
1080 rq = (union twa_response_queue)twa_inl(sc, TWA_RESPONSE_QUEUE_OFFSET);
1081 }
1082 }
1083
1084
1085 static void
1086 twa_drain_busy_queue(struct twa_softc *sc)
1087 {
1088 struct twa_request *tr;
1089
1090 /* Walk the busy queue. */
1091
1092 while ((tr = TAILQ_FIRST(&sc->twa_busy)) != NULL) {
1093 TAILQ_REMOVE(&sc->twa_busy, tr, tr_link);
1094
1095 twa_unmap_request(tr);
1096 if ((tr->tr_cmd_pkt_type & TWA_CMD_PKT_TYPE_INTERNAL) ||
1097 (tr->tr_cmd_pkt_type & TWA_CMD_PKT_TYPE_IOCTL)) {
1098 /* It's an internal/ioctl request. Simply free it. */
1099 if (tr->tr_data)
1100 free(tr->tr_data, M_DEVBUF);
1101 twa_release_request(tr);
1102 } else {
1103 /* It's a SCSI request. Complete it. */
1104 tr->tr_command->command.cmd_pkt_9k.status = EIO;
1105 if (tr->tr_callback)
1106 tr->tr_callback(tr);
1107 }
1108 }
1109 }
1110
1111
1112 static int
1113 twa_drain_pending_queue(struct twa_softc *sc)
1114 {
1115 struct twa_request *tr;
1116 int s, error = 0;
1117
1118 /*
1119 * Pull requests off the pending queue, and submit them.
1120 */
1121 s = splbio();
1122 while ((tr = TAILQ_FIRST(&sc->twa_pending)) != NULL) {
1123 TAILQ_REMOVE(&sc->twa_pending, tr, tr_link);
1124
1125 if ((error = twa_start(tr))) {
1126 if (error == EBUSY) {
1127 tr->tr_status = TWA_CMD_PENDING;
1128
1129 /* queue at the head */
1130 TAILQ_INSERT_HEAD(&tr->tr_sc->twa_pending,
1131 tr, tr_link);
1132 error = 0;
1133 break;
1134 } else {
1135 if (tr->tr_flags & TWA_CMD_SLEEP_ON_REQUEST) {
1136 tr->tr_error = error;
1137 tr->tr_callback(tr);
1138 error = EIO;
1139 }
1140 }
1141 }
1142 }
1143 splx(s);
1144
1145 return(error);
1146 }
1147
1148
1149 static int
1150 twa_drain_aen_queue(struct twa_softc *sc)
1151 {
1152 int error = 0;
1153 struct twa_request *tr;
1154 struct twa_command_header *cmd_hdr;
1155 struct timeval t1;
1156 u_int32_t timeout;
1157
1158 for (;;) {
1159 if ((tr = twa_get_request(sc, 0)) == NULL) {
1160 error = EIO;
1161 break;
1162 }
1163 tr->tr_cmd_pkt_type |= TWA_CMD_PKT_TYPE_INTERNAL;
1164 tr->tr_callback = NULL;
1165
1166 tr->tr_data = malloc(TWA_SECTOR_SIZE, M_DEVBUF, M_NOWAIT);
1167
1168 if (tr->tr_data == NULL) {
1169 error = 1;
1170 goto out;
1171 }
1172
1173 if (twa_request_sense(tr, 0) != 0) {
1174 error = 1;
1175 break;
1176 }
1177
1178 timeout = (1000/*ms*/ * 100/*us*/ * TWA_REQUEST_TIMEOUT_PERIOD);
1179
1180 microtime(&t1);
1181
1182 timeout += t1.tv_usec;
1183
1184 do {
1185 twa_done(tr->tr_sc);
1186 if (tr->tr_status != TWA_CMD_BUSY)
1187 break;
1188 microtime(&t1);
1189 } while (t1.tv_usec <= timeout);
1190
1191 if (tr->tr_status != TWA_CMD_COMPLETE) {
1192 error = ETIMEDOUT;
1193 break;
1194 }
1195
1196 if ((error = tr->tr_command->command.cmd_pkt_9k.status))
1197 break;
1198
1199 cmd_hdr = (struct twa_command_header *)(tr->tr_data);
1200 if ((cmd_hdr->status_block.error) /* aen_code */
1201 == TWA_AEN_QUEUE_EMPTY)
1202 break;
1203 (void)twa_enqueue_aen(sc, cmd_hdr);
1204
1205 free(tr->tr_data, M_DEVBUF);
1206 twa_release_request(tr);
1207 }
1208 out:
1209 if (tr) {
1210 if (tr->tr_data)
1211 free(tr->tr_data, M_DEVBUF);
1212
1213 twa_release_request(tr);
1214 }
1215 return(error);
1216 }
1217
1218
1219 static int
1220 twa_done(struct twa_softc *sc)
1221 {
1222 union twa_response_queue rq;
1223 struct twa_request *tr;
1224 int s, error = 0;
1225 u_int32_t status_reg;
1226
1227 for (;;) {
1228 status_reg = twa_inl(sc, TWA_STATUS_REGISTER_OFFSET);
1229 if ((error = twa_check_ctlr_state(sc, status_reg)))
1230 break;
1231 if (status_reg & TWA_STATUS_RESPONSE_QUEUE_EMPTY)
1232 break;
1233 /* Response queue is not empty. */
1234 rq = (union twa_response_queue)twa_inl(sc,
1235 TWA_RESPONSE_QUEUE_OFFSET);
1236 tr = sc->sc_twa_request + rq.u.response_id;
1237
1238 /* Unmap the command packet, and any associated data buffer. */
1239 twa_unmap_request(tr);
1240
1241 s = splbio();
1242 tr->tr_status = TWA_CMD_COMPLETE;
1243 TAILQ_REMOVE(&tr->tr_sc->twa_busy, tr, tr_link);
1244 splx(s);
1245
1246 if (tr->tr_callback)
1247 tr->tr_callback(tr);
1248 }
1249 (void)twa_drain_pending_queue(sc);
1250
1251 return(error);
1252 }
1253
1254 /*
1255 * Function name: twa_init_ctlr
1256 * Description: Establishes a logical connection with the controller.
1257 * If bundled with firmware, determines whether or not
1258 * to flash firmware, based on arch_id, fw SRL (Spec.
1259 * Revision Level), branch & build #'s. Also determines
1260 * whether or not the driver is compatible with the
1261 * firmware on the controller, before proceeding to work
1262 * with it.
1263 *
1264 * Input: sc -- ptr to per ctlr structure
1265 * Output: None
1266 * Return value: 0 -- success
1267 * non-zero-- failure
1268 */
1269 static int
1270 twa_init_ctlr(struct twa_softc *sc)
1271 {
1272 u_int16_t fw_on_ctlr_srl = 0;
1273 u_int16_t fw_on_ctlr_arch_id = 0;
1274 u_int16_t fw_on_ctlr_branch = 0;
1275 u_int16_t fw_on_ctlr_build = 0;
1276 u_int32_t init_connect_result = 0;
1277 int error = 0;
1278 #if 0
1279 int8_t fw_flashed = FALSE;
1280 int8_t fw_flash_failed = FALSE;
1281 #endif
1282
1283 /* Wait for the controller to become ready. */
1284 if (twa_wait_status(sc, TWA_STATUS_MICROCONTROLLER_READY,
1285 TWA_REQUEST_TIMEOUT_PERIOD)) {
1286 return(ENXIO);
1287 }
1288 /* Drain the response queue. */
1289 if (twa_drain_response_queue(sc))
1290 return(1);
1291
1292 /* Establish a logical connection with the controller. */
1293 if ((error = twa_init_connection(sc, TWA_INIT_MESSAGE_CREDITS,
1294 TWA_EXTENDED_INIT_CONNECT, TWA_CURRENT_FW_SRL,
1295 TWA_9000_ARCH_ID, TWA_CURRENT_FW_BRANCH,
1296 TWA_CURRENT_FW_BUILD, &fw_on_ctlr_srl,
1297 &fw_on_ctlr_arch_id, &fw_on_ctlr_branch,
1298 &fw_on_ctlr_build, &init_connect_result))) {
1299 return(error);
1300 }
1301 #if 0
1302 if ((init_connect_result & TWA_BUNDLED_FW_SAFE_TO_FLASH) &&
1303 (init_connect_result & TWA_CTLR_FW_RECOMMENDS_FLASH)) {
1304 /*
1305 * The bundled firmware is safe to flash, and the firmware
1306 * on the controller recommends a flash. So, flash!
1307 */
1308 printf("%s: flashing bundled firmware...\n", sc->twa_dv.dv_xname);
1309
1310 if ((error = twa_flash_firmware(sc))) {
1311 fw_flash_failed = TRUE;
1312
1313 printf("%s: unable to flash bundled firmware.\n", sc->twa_dv.dv_xname);
1314 } else {
1315 printf("%s: successfully flashed bundled firmware.\n",
1316 sc->twa_dv.dv_xname);
1317 fw_flashed = TRUE;
1318 }
1319 }
1320 if (fw_flashed) {
1321 /* The firmware was flashed. Have the new image loaded */
1322 error = twa_hard_reset(sc);
1323 if (error == 0)
1324 error = twa_init_ctlr(sc);
1325 /*
1326 * If hard reset of controller failed, we need to return.
1327 * Otherwise, the above recursive call to twa_init_ctlr will
1328 * have completed the rest of the initialization (starting
1329 * from twa_drain_aen_queue below). Don't do it again.
1330 * Just return.
1331 */
1332 return(error);
1333 } else {
1334 /*
1335 * Either we are not bundled with a firmware image, or
1336 * the bundled firmware is not safe to flash,
1337 * or flash failed for some reason. See if we can at
1338 * least work with the firmware on the controller in the
1339 * current mode.
1340 */
1341 if (init_connect_result & TWA_CTLR_FW_COMPATIBLE) {
1342 /* Yes, we can. Make note of the operating mode. */
1343 sc->working_srl = TWA_CURRENT_FW_SRL;
1344 sc->working_branch = TWA_CURRENT_FW_BRANCH;
1345 sc->working_build = TWA_CURRENT_FW_BUILD;
1346 } else {
1347 /*
1348 * No, we can't. See if we can at least work with
1349 * it in the base mode. We should never come here
1350 * if firmware has just been flashed.
1351 */
1352 printf("%s: Driver/Firmware mismatch. Negotiating for base level.\n",
1353 sc->twa_dv.dv_xname);
1354 if ((error = twa_init_connection(sc, TWA_INIT_MESSAGE_CREDITS,
1355 TWA_EXTENDED_INIT_CONNECT, TWA_BASE_FW_SRL,
1356 TWA_9000_ARCH_ID, TWA_BASE_FW_BRANCH,
1357 TWA_BASE_FW_BUILD, &fw_on_ctlr_srl,
1358 &fw_on_ctlr_arch_id, &fw_on_ctlr_branch,
1359 &fw_on_ctlr_build, &init_connect_result))) {
1360 printf("%s: can't initialize connection in base mode.\n",
1361 sc->twa_dv.dv_xname);
1362 return(error);
1363 }
1364 if (!(init_connect_result & TWA_CTLR_FW_COMPATIBLE)) {
1365 /*
1366 * The firmware on the controller is not even
1367 * compatible with our base mode. We cannot
1368 * work with it. Bail...
1369 */
1370 printf("Incompatible firmware on controller\n");
1371 #ifdef TWA_FLASH_FIRMWARE
1372 if (fw_flash_failed)
1373 printf("...and could not flash bundled firmware.\n");
1374 else
1375 printf("...and bundled firmware not safe to flash.\n");
1376 #endif /* TWA_FLASH_FIRMWARE */
1377 return(1);
1378 }
1379 /* We can work with this firmware, but only in base mode. */
1380 sc->working_srl = TWA_BASE_FW_SRL;
1381 sc->working_branch = TWA_BASE_FW_BRANCH;
1382 sc->working_build = TWA_BASE_FW_BUILD;
1383 sc->twa_operating_mode = TWA_BASE_MODE;
1384 }
1385 }
1386 #endif
1387 twa_drain_aen_queue(sc);
1388
1389 /* Set controller state to initialized. */
1390 sc->twa_state &= ~TWA_STATE_SHUTDOWN;
1391 return(0);
1392 }
1393
1394
1395 static int
1396 twa_setup(struct twa_softc *sc)
1397 {
1398 struct tw_cl_event_packet *aen_queue;
1399 uint32_t i = 0;
1400 int error = 0;
1401
1402 /* Initialize request queues. */
1403 TAILQ_INIT(&sc->twa_free);
1404 TAILQ_INIT(&sc->twa_busy);
1405 TAILQ_INIT(&sc->twa_pending);
1406
1407 sc->sc_nunits = 0;
1408 sc->twa_sc_flags = 0;
1409
1410 if (twa_alloc_req_pkts(sc, TWA_Q_LENGTH)) {
1411
1412 return(ENOMEM);
1413 }
1414
1415 /* Allocate memory for the AEN queue. */
1416 if ((aen_queue = malloc(sizeof(struct tw_cl_event_packet) * TWA_Q_LENGTH,
1417 M_DEVBUF, M_WAITOK)) == NULL) {
1418 /*
1419 * This should not cause us to return error. We will only be
1420 * unable to support AEN's. But then, we will have to check
1421 * time and again to see if we can support AEN's, if we
1422 * continue. So, we will just return error.
1423 */
1424 return (ENOMEM);
1425 }
1426 /* Initialize the aen queue. */
1427 memset(aen_queue, 0, sizeof(struct tw_cl_event_packet) * TWA_Q_LENGTH);
1428
1429 for (i = 0; i < TWA_Q_LENGTH; i++)
1430 sc->twa_aen_queue[i] = &(aen_queue[i]);
1431
1432 twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
1433 TWA_CONTROL_DISABLE_INTERRUPTS);
1434
1435 /* Initialize the controller. */
1436 if ((error = twa_init_ctlr(sc))) {
1437 /* Soft reset the controller, and try one more time. */
1438
1439 printf("%s: controller initialization failed. Retrying initialization\n",
1440 sc->twa_dv.dv_xname);
1441
1442 if ((error = twa_soft_reset(sc)) == 0)
1443 error = twa_init_ctlr(sc);
1444 }
1445
1446 twa_describe_controller(sc);
1447
1448 error = twa_request_bus_scan(sc);
1449
1450 twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
1451 TWA_CONTROL_CLEAR_ATTENTION_INTERRUPT |
1452 TWA_CONTROL_UNMASK_RESPONSE_INTERRUPT |
1453 TWA_CONTROL_ENABLE_INTERRUPTS);
1454
1455 return (error);
1456 }
1457
1458 void *twa_sdh;
1459
1460 static void
1461 twa_attach(struct device *parent, struct device *self, void *aux)
1462 {
1463 struct pci_attach_args *pa;
1464 struct twa_softc *sc;
1465 pci_chipset_tag_t pc;
1466 pcireg_t csr;
1467 pci_intr_handle_t ih;
1468 const char *intrstr;
1469
1470 sc = (struct twa_softc *)self;
1471
1472 pa = aux;
1473 pc = pa->pa_pc;
1474 sc->pc = pa->pa_pc;
1475 sc->tag = pa->pa_tag;
1476 sc->twa_dma_tag = pa->pa_dmat;
1477
1478 aprint_naive(": RAID controller\n");
1479 aprint_normal(": 3ware Apache\n");
1480
1481 if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_3WARE_9000) {
1482 if (pci_mapreg_map(pa, PCI_MAPREG_START, PCI_MAPREG_TYPE_IO, 0,
1483 &sc->twa_bus_iot, &sc->twa_bus_ioh, NULL, NULL)) {
1484 aprint_error("%s: can't map i/o space\n",
1485 sc->twa_dv.dv_xname);
1486 return;
1487 }
1488 } else if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_3WARE_9550) {
1489 if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x08,
1490 PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->twa_bus_iot,
1491 &sc->twa_bus_ioh, NULL, NULL)) {
1492 aprint_error("%s: can't map mem space\n",
1493 sc->twa_dv.dv_xname);
1494 return;
1495 }
1496 } else {
1497 aprint_error("%s: product id 0x%02x not recognized\n",
1498 sc->twa_dv.dv_xname, PCI_PRODUCT(pa->pa_id));
1499 return;
1500 }
1501 /* Enable the device. */
1502 csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1503
1504 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
1505 csr | PCI_COMMAND_MASTER_ENABLE);
1506
1507 /* Map and establish the interrupt. */
1508 if (pci_intr_map(pa, &ih)) {
1509 aprint_error("%s: can't map interrupt\n", sc->twa_dv.dv_xname);
1510 return;
1511 }
1512 intrstr = pci_intr_string(pc, ih);
1513
1514 sc->twa_ih = pci_intr_establish(pc, ih, IPL_BIO, twa_intr, sc);
1515 if (sc->twa_ih == NULL) {
1516 aprint_error("%s: can't establish interrupt%s%s\n",
1517 sc->twa_dv.dv_xname,
1518 (intrstr) ? " at " : "",
1519 (intrstr) ? intrstr : "");
1520 return;
1521 }
1522
1523 if (intrstr != NULL)
1524 aprint_normal("%s: interrupting at %s\n",
1525 sc->twa_dv.dv_xname, intrstr);
1526
1527 twa_setup(sc);
1528
1529 if (twa_sdh == NULL)
1530 twa_sdh = shutdownhook_establish(twa_shutdown, NULL);
1531
1532 return;
1533 }
1534
1535
1536 static void
1537 twa_shutdown(void *arg)
1538 {
1539 extern struct cfdriver twa_cd;
1540 struct twa_softc *sc;
1541 int i, rv, unit;
1542
1543 for (i = 0; i < twa_cd.cd_ndevs; i++) {
1544 if ((sc = device_lookup(&twa_cd, i)) == NULL)
1545 continue;
1546
1547 for (unit = 0; unit < TWA_MAX_UNITS; unit++)
1548 if (sc->sc_units[unit].td_dev != NULL)
1549 (void) config_detach(sc->sc_units[unit].td_dev,
1550 DETACH_FORCE | DETACH_QUIET);
1551
1552 twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
1553 TWA_CONTROL_DISABLE_INTERRUPTS);
1554
1555 /* Let the controller know that we are going down. */
1556 rv = twa_init_connection(sc, TWA_SHUTDOWN_MESSAGE_CREDITS,
1557 0, 0, 0, 0, 0,
1558 NULL, NULL, NULL, NULL, NULL);
1559 }
1560 }
1561
1562
1563 void
1564 twa_register_callbacks(struct twa_softc *sc, int unit,
1565 const struct twa_callbacks *tcb)
1566 {
1567
1568 sc->sc_units[unit].td_callbacks = tcb;
1569 }
1570
1571
1572 static int
1573 twa_submatch(struct device *parent, struct cfdata *cf,
1574 const int *ldesc, void *aux)
1575 {
1576 struct twa_attach_args *twaa;
1577
1578 twaa = aux;
1579
1580 if (cf->twaacf_unit != TWACF_UNIT_DEFAULT &&
1581 cf->twaacf_unit != twaa->twaa_unit)
1582 return (0);
1583
1584 return (config_match(parent, cf, aux));
1585 }
1586
1587
1588 /*
1589 * Print autoconfiguration message for a sub-device
1590 */
1591 static int
1592 twa_print(void *aux, const char *pnp)
1593 {
1594 struct twa_attach_args *twaa;
1595
1596 twaa = aux;
1597
1598 if (pnp !=NULL)
1599 aprint_normal("block device at %s\n", pnp);
1600 aprint_normal(" unit %d\n", twaa->twaa_unit);
1601 return (UNCONF);
1602 }
1603
1604
1605 static void
1606 twa_fillin_sgl(struct twa_sg *sgl, bus_dma_segment_t *segs, int nsegments)
1607 {
1608 int i;
1609 for (i = 0; i < nsegments; i++) {
1610 sgl[i].address = segs[i].ds_addr;
1611 sgl[i].length = (u_int32_t)(segs[i].ds_len);
1612 }
1613 }
1614
1615
1616 static int
1617 twa_submit_io(struct twa_request *tr)
1618 {
1619 int error;
1620
1621 if ((error = twa_start(tr))) {
1622 if (error == EBUSY)
1623 error = 0; /* request is in the pending queue */
1624 else {
1625 tr->tr_error = error;
1626 }
1627 }
1628 return(error);
1629 }
1630
1631
1632 /*
1633 * Function name: twa_setup_data_dmamap
1634 * Description: Callback of bus_dmamap_load for the buffer associated
1635 * with data. Updates the cmd pkt (size/sgl_entries
1636 * fields, as applicable) to reflect the number of sg
1637 * elements.
1638 *
1639 * Input: arg -- ptr to request pkt
1640 * segs -- ptr to a list of segment descriptors
1641 * nsegments--# of segments
1642 * error -- 0 if no errors encountered before callback,
1643 * non-zero if errors were encountered
1644 * Output: None
1645 * Return value: None
1646 */
1647 static int
1648 twa_setup_data_dmamap(void *arg, bus_dma_segment_t *segs,
1649 int nsegments, int error)
1650 {
1651 struct twa_request *tr = (struct twa_request *)arg;
1652 struct twa_command_packet *cmdpkt = tr->tr_command;
1653 struct twa_command_9k *cmd9k;
1654 union twa_command_7k *cmd7k;
1655 u_int8_t sgl_offset;
1656
1657 if (error == EFBIG) {
1658 tr->tr_error = error;
1659 goto out;
1660 }
1661
1662 if (tr->tr_cmd_pkt_type & TWA_CMD_PKT_TYPE_9K) {
1663 cmd9k = &(cmdpkt->command.cmd_pkt_9k);
1664 twa_fillin_sgl(&(cmd9k->sg_list[0]), segs, nsegments);
1665 cmd9k->sgl_entries += nsegments - 1;
1666 } else {
1667 /* It's a 7000 command packet. */
1668 cmd7k = &(cmdpkt->command.cmd_pkt_7k);
1669 if ((sgl_offset = cmdpkt->command.cmd_pkt_7k.generic.sgl_offset))
1670 twa_fillin_sgl((struct twa_sg *)
1671 (((u_int32_t *)cmd7k) + sgl_offset),
1672 segs, nsegments);
1673 /* Modify the size field, based on sg address size. */
1674 cmd7k->generic.size +=
1675 ((TWA_64BIT_ADDRESSES ? 3 : 2) * nsegments);
1676 }
1677
1678 if (tr->tr_flags & TWA_CMD_DATA_IN)
1679 bus_dmamap_sync(tr->tr_sc->twa_dma_tag, tr->tr_dma_map, 0,
1680 tr->tr_length, BUS_DMASYNC_PREREAD);
1681 if (tr->tr_flags & TWA_CMD_DATA_OUT) {
1682 /*
1683 * If we're using an alignment buffer, and we're
1684 * writing data, copy the real data out.
1685 */
1686 if (tr->tr_flags & TWA_CMD_DATA_COPY_NEEDED)
1687 memcpy(tr->tr_data, tr->tr_real_data,
1688 tr->tr_real_length);
1689 bus_dmamap_sync(tr->tr_sc->twa_dma_tag, tr->tr_dma_map, 0,
1690 tr->tr_length, BUS_DMASYNC_PREWRITE);
1691 }
1692 error = twa_submit_io(tr);
1693
1694 out:
1695 if (error) {
1696 twa_unmap_request(tr);
1697 /*
1698 * If the caller had been returned EINPROGRESS, and he has
1699 * registered a callback for handling completion, the callback
1700 * will never get called because we were unable to submit the
1701 * request. So, free up the request right here.
1702 */
1703 if ((tr->tr_flags & TWA_CMD_IN_PROGRESS) && (tr->tr_callback))
1704 twa_release_request(tr);
1705 }
1706 return (error);
1707 }
1708
1709
1710 /*
1711 * Function name: twa_map_request
1712 * Description: Maps a cmd pkt and data associated with it, into
1713 * DMA'able memory.
1714 *
1715 * Input: tr -- ptr to request pkt
1716 * Output: None
1717 * Return value: 0 -- success
1718 * non-zero-- failure
1719 */
1720 int
1721 twa_map_request(struct twa_request *tr)
1722 {
1723 struct twa_softc *sc = tr->tr_sc;
1724 int s, rv, error = 0;
1725
1726 /* If the command involves data, map that too. */
1727 if (tr->tr_data != NULL) {
1728
1729 if (((u_long)tr->tr_data & (511)) != 0) {
1730 tr->tr_flags |= TWA_CMD_DATA_COPY_NEEDED;
1731 tr->tr_real_data = tr->tr_data;
1732 tr->tr_real_length = tr->tr_length;
1733 s = splvm();
1734 tr->tr_data = (void *)uvm_km_alloc(kmem_map,
1735 tr->tr_length, 512, UVM_KMF_NOWAIT|UVM_KMF_WIRED);
1736 splx(s);
1737
1738 if (tr->tr_data == NULL) {
1739 tr->tr_data = tr->tr_real_data;
1740 tr->tr_length = tr->tr_real_length;
1741 return(ENOMEM);
1742 }
1743 if ((tr->tr_flags & TWA_CMD_DATA_IN) != 0)
1744 memcpy(tr->tr_data, tr->tr_real_data,
1745 tr->tr_length);
1746 }
1747
1748 /*
1749 * Map the data buffer into bus space and build the S/G list.
1750 */
1751 rv = bus_dmamap_load(sc->twa_dma_tag, tr->tr_dma_map,
1752 tr->tr_data, tr->tr_length, NULL, BUS_DMA_NOWAIT |
1753 BUS_DMA_STREAMING | (tr->tr_flags & TWA_CMD_DATA_OUT) ?
1754 BUS_DMA_READ : BUS_DMA_WRITE);
1755
1756 if (rv != 0) {
1757 if ((tr->tr_flags & TWA_CMD_DATA_COPY_NEEDED) != 0) {
1758 s = splvm();
1759 uvm_km_free(kmem_map, (vaddr_t)tr->tr_data,
1760 tr->tr_length, UVM_KMF_WIRED);
1761 splx(s);
1762 }
1763 return (rv);
1764 }
1765
1766 if ((rv = twa_setup_data_dmamap(tr,
1767 tr->tr_dma_map->dm_segs,
1768 tr->tr_dma_map->dm_nsegs, error))) {
1769
1770 if (tr->tr_flags & TWA_CMD_DATA_COPY_NEEDED) {
1771 uvm_km_free(kmem_map, (vaddr_t)tr->tr_data,
1772 tr->tr_length, UVM_KMF_WIRED);
1773 tr->tr_data = tr->tr_real_data;
1774 tr->tr_length = tr->tr_real_length;
1775 }
1776 } else
1777 error = tr->tr_error;
1778
1779 } else
1780 if ((rv = twa_submit_io(tr)))
1781 twa_unmap_request(tr);
1782
1783 return (rv);
1784 }
1785
1786 #if 0
1787 /*
1788 * Function name: twa_flash_firmware
1789 * Description: Flashes bundled firmware image onto controller.
1790 *
1791 * Input: sc -- ptr to per ctlr structure
1792 * Output: None
1793 * Return value: 0 -- success
1794 * non-zero-- failure
1795 */
1796 static int
1797 twa_flash_firmware(struct twa_softc *sc)
1798 {
1799 struct twa_request *tr;
1800 struct twa_command_download_firmware *cmd;
1801 uint32_t count;
1802 uint32_t fw_img_chunk_size;
1803 uint32_t this_chunk_size = 0;
1804 uint32_t remaining_img_size = 0;
1805 int s, error = 0;
1806 int i;
1807
1808 if ((tr = twa_get_request(sc, 0)) == NULL) {
1809 /* No free request packets available. Can't proceed. */
1810 error = EIO;
1811 goto out;
1812 }
1813
1814 count = (twa_fw_img_size / 65536);
1815
1816 count += ((twa_fw_img_size % 65536) != 0) ? 1 : 0;
1817
1818 tr->tr_cmd_pkt_type |= TWA_CMD_PKT_TYPE_INTERNAL;
1819 /* Allocate sufficient memory to hold a chunk of the firmware image. */
1820 fw_img_chunk_size = ((twa_fw_img_size / count) + 511) & ~511;
1821
1822 s = splvm();
1823 tr->tr_data = (void *)uvm_km_alloc(kmem_map, fw_img_chunk_size, 512,
1824 UVM_KMF_WIRED);
1825 splx(s);
1826
1827 if (tr->tr_data == NULL) {
1828 error = ENOMEM;
1829 goto out;
1830 }
1831
1832 remaining_img_size = twa_fw_img_size;
1833 cmd = &(tr->tr_command->command.cmd_pkt_7k.download_fw);
1834
1835 for (i = 0; i < count; i++) {
1836 /* Build a cmd pkt for downloading firmware. */
1837 memset(tr->tr_command, 0, sizeof(struct twa_command_packet));
1838
1839 tr->tr_command->cmd_hdr.header_desc.size_header = 128;
1840
1841 cmd->opcode = TWA_OP_DOWNLOAD_FIRMWARE;
1842 cmd->sgl_offset = 2;/* offset in dwords, to the beginning of sg list */
1843 cmd->size = 2; /* this field will be updated at data map time */
1844 cmd->request_id = tr->tr_request_id;
1845 cmd->unit = 0;
1846 cmd->status = 0;
1847 cmd->flags = 0;
1848 cmd->param = 8; /* prom image */
1849
1850 if (i != (count - 1))
1851 this_chunk_size = fw_img_chunk_size;
1852 else /* last chunk */
1853 this_chunk_size = remaining_img_size;
1854
1855 remaining_img_size -= this_chunk_size;
1856
1857 memset(tr->tr_data, fw_img_chunk_size, 0);
1858
1859 memcpy(tr->tr_data, twa_fw_img + (i * fw_img_chunk_size),
1860 this_chunk_size);
1861 /*
1862 * The next line will effect only the last chunk.
1863 */
1864 tr->tr_length = (this_chunk_size + 511) & ~511;
1865
1866 tr->tr_flags |= TWA_CMD_DATA_OUT;
1867
1868 error = twa_immediate_request(tr, TWA_REQUEST_TIMEOUT_PERIOD);
1869
1870 if (error) {
1871 if (error == ETIMEDOUT)
1872 return(error); /* clean-up done by twa_immediate_request */
1873 break;
1874 }
1875 error = cmd->status;
1876
1877 if (i != (count - 1)) {
1878
1879 /* XXX FreeBSD code doesn't check for no error condition
1880 * but based on observation, error seems to return 0
1881 */
1882 if ((error = tr->tr_command->cmd_hdr.status_block.error) == 0) {
1883 continue;
1884 } else if ((error = tr->tr_command->cmd_hdr.status_block.error) ==
1885 TWA_ERROR_MORE_DATA) {
1886 continue;
1887 } else {
1888 twa_hard_reset(sc);
1889 break;
1890 }
1891 } else /* last chunk */
1892 if (error) {
1893 printf("%s: firmware flash request failed. error = 0x%x\n",
1894 sc->twa_dv.dv_xname, error);
1895 twa_hard_reset(sc);
1896 }
1897 } /* for */
1898
1899 if (tr->tr_data) {
1900 s = splvm();
1901 uvm_km_free(kmem_map, (vaddr_t)tr->tr_data,
1902 fw_img_chunk_size, UVM_KMF_WIRED);
1903 splx(s);
1904 }
1905 out:
1906 if (tr)
1907 twa_release_request(tr);
1908 return(error);
1909 }
1910
1911 /*
1912 * Function name: twa_hard_reset
1913 * Description: Hard reset the controller.
1914 *
1915 * Input: sc -- ptr to per ctlr structure
1916 * Output: None
1917 * Return value: 0 -- success
1918 * non-zero-- failure
1919 */
1920 static int
1921 twa_hard_reset(struct twa_softc *sc)
1922 {
1923 struct twa_request *tr;
1924 struct twa_command_reset_firmware *cmd;
1925 int error;
1926
1927 if ((tr = twa_get_request(sc, 0)) == NULL)
1928 return(EIO);
1929 tr->tr_cmd_pkt_type |= TWA_CMD_PKT_TYPE_INTERNAL;
1930 /* Build a cmd pkt for sending down the hard reset command. */
1931 tr->tr_command->cmd_hdr.header_desc.size_header = 128;
1932
1933 cmd = &(tr->tr_command->command.cmd_pkt_7k.reset_fw);
1934 cmd->opcode = TWA_OP_RESET_FIRMWARE;
1935 cmd->size = 2; /* this field will be updated at data map time */
1936 cmd->request_id = tr->tr_request_id;
1937 cmd->unit = 0;
1938 cmd->status = 0;
1939 cmd->flags = 0;
1940 cmd->param = 0; /* don't reload FPGA logic */
1941
1942 tr->tr_data = NULL;
1943 tr->tr_length = 0;
1944
1945 error = twa_immediate_request(tr, TWA_REQUEST_TIMEOUT_PERIOD);
1946 if (error) {
1947 printf("%s: hard reset request could not "
1948 " be posted. error = 0x%x\n", sc->twa_dv.dv_xname, error);
1949 if (error == ETIMEDOUT)
1950 return(error); /* clean-up done by twa_immediate_request */
1951 goto out;
1952 }
1953 if ((error = cmd->status)) {
1954 printf("%s: hard reset request failed. error = 0x%x\n",
1955 sc->twa_dv.dv_xname, error);
1956 }
1957
1958 out:
1959 if (tr)
1960 twa_release_request(tr);
1961 return(error);
1962 }
1963 #endif
1964
1965 /*
1966 * Function name: twa_intr
1967 * Description: Interrupt handler. Determines the kind of interrupt,
1968 * and calls the appropriate handler.
1969 *
1970 * Input: sc -- ptr to per ctlr structure
1971 * Output: None
1972 * Return value: None
1973 */
1974
1975 static int
1976 twa_intr(void *arg)
1977 {
1978 int caught, rv;
1979 struct twa_softc *sc;
1980 u_int32_t status_reg;
1981 sc = (struct twa_softc *)arg;
1982
1983 caught = 0;
1984 /* Collect current interrupt status. */
1985 status_reg = twa_inl(sc, TWA_STATUS_REGISTER_OFFSET);
1986 if (twa_check_ctlr_state(sc, status_reg)) {
1987 caught = 1;
1988 goto bail;
1989 }
1990 /* Dispatch based on the kind of interrupt. */
1991 if (status_reg & TWA_STATUS_HOST_INTERRUPT) {
1992 twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
1993 TWA_CONTROL_CLEAR_HOST_INTERRUPT);
1994 caught = 1;
1995 }
1996 if ((status_reg & TWA_STATUS_ATTENTION_INTERRUPT) != 0) {
1997 twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
1998 TWA_CONTROL_CLEAR_ATTENTION_INTERRUPT);
1999 rv = twa_fetch_aen(sc);
2000 #ifdef DIAGNOSTIC
2001 if (rv != 0)
2002 printf("%s: unable to retrieve AEN (%d)\n",
2003 sc->twa_dv.dv_xname, rv);
2004 #endif
2005 caught = 1;
2006 }
2007 if (status_reg & TWA_STATUS_COMMAND_INTERRUPT) {
2008 /* Start any requests that might be in the pending queue. */
2009 twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
2010 TWA_CONTROL_MASK_COMMAND_INTERRUPT);
2011 (void)twa_drain_pending_queue(sc);
2012 caught = 1;
2013 }
2014 if (status_reg & TWA_STATUS_RESPONSE_INTERRUPT) {
2015 twa_done(sc);
2016 caught = 1;
2017 }
2018 bail:
2019 return (caught);
2020 }
2021
2022
2023 /*
2024 * Accept an open operation on the control device.
2025 */
2026 int
2027 twaopen(dev_t dev, int flag, int mode, struct lwp *l)
2028 {
2029 struct twa_softc *twa;
2030
2031 if ((twa = device_lookup(&twa_cd, minor(dev))) == NULL)
2032 return (ENXIO);
2033 if ((twa->twa_sc_flags & TWA_STATE_OPEN) != 0)
2034 return (EBUSY);
2035
2036 twa->twa_sc_flags |= TWA_STATE_OPEN;
2037
2038 return (0);
2039 }
2040
2041
2042 /*
2043 * Accept the last close on the control device.
2044 */
2045 int
2046 twaclose(dev_t dev, int flag, int mode, struct lwp *l)
2047 {
2048 struct twa_softc *twa;
2049
2050 twa = device_lookup(&twa_cd, minor(dev));
2051 twa->twa_sc_flags &= ~TWA_STATE_OPEN;
2052 return (0);
2053 }
2054
2055
2056 /*
2057 * Function name: twaioctl
2058 * Description: ioctl handler.
2059 *
2060 * Input: sc -- ptr to per ctlr structure
2061 * cmd -- ioctl cmd
2062 * buf -- ptr to buffer in kernel memory, which is
2063 * a copy of the input buffer in user-space
2064 * Output: buf -- ptr to buffer in kernel memory, which will
2065 * be copied of the output buffer in user-space
2066 * Return value: 0 -- success
2067 * non-zero-- failure
2068 */
2069 int
2070 twaioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct lwp *l)
2071 {
2072 struct twa_softc *sc;
2073 struct twa_ioctl_9k *user_buf = (struct twa_ioctl_9k *)data;
2074 struct tw_cl_event_packet event_buf;
2075 struct twa_request *tr = 0;
2076 int32_t event_index = 0;
2077 int32_t start_index;
2078 int s, error = 0;
2079
2080 sc = device_lookup(&twa_cd, minor(dev));
2081
2082 switch (cmd) {
2083 case TW_OSL_IOCTL_FIRMWARE_PASS_THROUGH:
2084 {
2085 struct twa_command_packet *cmdpkt;
2086 u_int32_t data_buf_size_adjusted;
2087
2088 /* Get a request packet */
2089 tr = twa_get_request_wait(sc, 0);
2090 KASSERT(tr != NULL);
2091 /*
2092 * Make sure that the data buffer sent to firmware is a
2093 * 512 byte multiple in size.
2094 */
2095 data_buf_size_adjusted =
2096 (user_buf->twa_drvr_pkt.buffer_length + 511) & ~511;
2097
2098 if ((tr->tr_length = data_buf_size_adjusted)) {
2099 if ((tr->tr_data = malloc(data_buf_size_adjusted,
2100 M_DEVBUF, M_WAITOK)) == NULL) {
2101 error = ENOMEM;
2102 goto fw_passthru_done;
2103 }
2104 /* Copy the payload. */
2105 if ((error = copyin((void *) (user_buf->pdata),
2106 (void *) (tr->tr_data),
2107 user_buf->twa_drvr_pkt.buffer_length)) != 0) {
2108 goto fw_passthru_done;
2109 }
2110 tr->tr_flags |= TWA_CMD_DATA_IN | TWA_CMD_DATA_OUT;
2111 }
2112 tr->tr_cmd_pkt_type |= TWA_CMD_PKT_TYPE_IOCTL;
2113 cmdpkt = tr->tr_command;
2114
2115 /* Copy the command packet. */
2116 memcpy(cmdpkt, &(user_buf->twa_cmd_pkt),
2117 sizeof(struct twa_command_packet));
2118 cmdpkt->command.cmd_pkt_7k.generic.request_id =
2119 tr->tr_request_id;
2120
2121 /* Send down the request, and wait for it to complete. */
2122 if ((error = twa_wait_request(tr, TWA_REQUEST_TIMEOUT_PERIOD))) {
2123 if (error == ETIMEDOUT)
2124 break; /* clean-up done by twa_wait_request */
2125 goto fw_passthru_done;
2126 }
2127
2128 /* Copy the command packet back into user space. */
2129 memcpy(&user_buf->twa_cmd_pkt, cmdpkt,
2130 sizeof(struct twa_command_packet));
2131
2132 /* If there was a payload, copy it back too. */
2133 if (tr->tr_length)
2134 error = copyout(tr->tr_data, user_buf->pdata,
2135 user_buf->twa_drvr_pkt.buffer_length);
2136 fw_passthru_done:
2137 /* Free resources. */
2138 if (tr->tr_data)
2139 free(tr->tr_data, M_DEVBUF);
2140
2141 if (tr)
2142 twa_release_request(tr);
2143 break;
2144 }
2145
2146 case TW_OSL_IOCTL_SCAN_BUS:
2147 twa_request_bus_scan(sc);
2148 break;
2149
2150 case TW_CL_IOCTL_GET_FIRST_EVENT:
2151 if (sc->twa_aen_queue_wrapped) {
2152 if (sc->twa_aen_queue_overflow) {
2153 /*
2154 * The aen queue has wrapped, even before some
2155 * events have been retrieved. Let the caller
2156 * know that he missed out on some AEN's.
2157 */
2158 user_buf->twa_drvr_pkt.status =
2159 TWA_ERROR_AEN_OVERFLOW;
2160 sc->twa_aen_queue_overflow = FALSE;
2161 } else
2162 user_buf->twa_drvr_pkt.status = 0;
2163 event_index = sc->twa_aen_head;
2164 } else {
2165 if (sc->twa_aen_head == sc->twa_aen_tail) {
2166 user_buf->twa_drvr_pkt.status =
2167 TWA_ERROR_AEN_NO_EVENTS;
2168 break;
2169 }
2170 user_buf->twa_drvr_pkt.status = 0;
2171 event_index = sc->twa_aen_tail; /* = 0 */
2172 }
2173 if ((error = copyout(sc->twa_aen_queue[event_index],
2174 user_buf->pdata, sizeof(struct tw_cl_event_packet))) != 0)
2175 (sc->twa_aen_queue[event_index])->retrieved =
2176 TWA_AEN_RETRIEVED;
2177 break;
2178
2179
2180 case TW_CL_IOCTL_GET_LAST_EVENT:
2181
2182 if (sc->twa_aen_queue_wrapped) {
2183 if (sc->twa_aen_queue_overflow) {
2184 /*
2185 * The aen queue has wrapped, even before some
2186 * events have been retrieved. Let the caller
2187 * know that he missed out on some AEN's.
2188 */
2189 user_buf->twa_drvr_pkt.status =
2190 TWA_ERROR_AEN_OVERFLOW;
2191 sc->twa_aen_queue_overflow = FALSE;
2192 } else
2193 user_buf->twa_drvr_pkt.status = 0;
2194 } else {
2195 if (sc->twa_aen_head == sc->twa_aen_tail) {
2196 user_buf->twa_drvr_pkt.status =
2197 TWA_ERROR_AEN_NO_EVENTS;
2198 break;
2199 }
2200 user_buf->twa_drvr_pkt.status = 0;
2201 }
2202 event_index = (sc->twa_aen_head - 1 + TWA_Q_LENGTH) % TWA_Q_LENGTH;
2203 if ((error = copyout(sc->twa_aen_queue[event_index], user_buf->pdata,
2204 sizeof(struct tw_cl_event_packet))) != 0)
2205
2206 (sc->twa_aen_queue[event_index])->retrieved =
2207 TWA_AEN_RETRIEVED;
2208 break;
2209
2210
2211 case TW_CL_IOCTL_GET_NEXT_EVENT:
2212
2213 user_buf->twa_drvr_pkt.status = 0;
2214 if (sc->twa_aen_queue_wrapped) {
2215
2216 if (sc->twa_aen_queue_overflow) {
2217 /*
2218 * The aen queue has wrapped, even before some
2219 * events have been retrieved. Let the caller
2220 * know that he missed out on some AEN's.
2221 */
2222 user_buf->twa_drvr_pkt.status =
2223 TWA_ERROR_AEN_OVERFLOW;
2224 sc->twa_aen_queue_overflow = FALSE;
2225 }
2226 start_index = sc->twa_aen_head;
2227 } else {
2228 if (sc->twa_aen_head == sc->twa_aen_tail) {
2229 user_buf->twa_drvr_pkt.status =
2230 TWA_ERROR_AEN_NO_EVENTS;
2231 break;
2232 }
2233 start_index = sc->twa_aen_tail; /* = 0 */
2234 }
2235 error = copyin(user_buf->pdata, &event_buf,
2236 sizeof(struct tw_cl_event_packet));
2237
2238 event_index = (start_index + event_buf.sequence_id -
2239 (sc->twa_aen_queue[start_index])->sequence_id + 1)
2240 % TWA_Q_LENGTH;
2241
2242 if (! ((sc->twa_aen_queue[event_index])->sequence_id >
2243 event_buf.sequence_id)) {
2244 if (user_buf->twa_drvr_pkt.status == TWA_ERROR_AEN_OVERFLOW)
2245 sc->twa_aen_queue_overflow = TRUE; /* so we report the overflow next time */
2246 user_buf->twa_drvr_pkt.status =
2247 TWA_ERROR_AEN_NO_EVENTS;
2248 break;
2249 }
2250 if ((error = copyout(sc->twa_aen_queue[event_index], user_buf->pdata,
2251 sizeof(struct tw_cl_event_packet))) != 0)
2252
2253 (sc->twa_aen_queue[event_index])->retrieved =
2254 TWA_AEN_RETRIEVED;
2255 break;
2256
2257
2258 case TW_CL_IOCTL_GET_PREVIOUS_EVENT:
2259
2260 user_buf->twa_drvr_pkt.status = 0;
2261 if (sc->twa_aen_queue_wrapped) {
2262 if (sc->twa_aen_queue_overflow) {
2263 /*
2264 * The aen queue has wrapped, even before some
2265 * events have been retrieved. Let the caller
2266 * know that he missed out on some AEN's.
2267 */
2268 user_buf->twa_drvr_pkt.status =
2269 TWA_ERROR_AEN_OVERFLOW;
2270 sc->twa_aen_queue_overflow = FALSE;
2271 }
2272 start_index = sc->twa_aen_head;
2273 } else {
2274 if (sc->twa_aen_head == sc->twa_aen_tail) {
2275 user_buf->twa_drvr_pkt.status =
2276 TWA_ERROR_AEN_NO_EVENTS;
2277 break;
2278 }
2279 start_index = sc->twa_aen_tail; /* = 0 */
2280 }
2281 if ((error = copyin(user_buf->pdata, &event_buf,
2282 sizeof(struct tw_cl_event_packet))) != 0)
2283
2284 event_index = (start_index + event_buf.sequence_id -
2285 (sc->twa_aen_queue[start_index])->sequence_id - 1) % TWA_Q_LENGTH;
2286 if (! ((sc->twa_aen_queue[event_index])->sequence_id <
2287 event_buf.sequence_id)) {
2288 if (user_buf->twa_drvr_pkt.status == TWA_ERROR_AEN_OVERFLOW)
2289 sc->twa_aen_queue_overflow = TRUE; /* so we report the overflow next time */
2290 user_buf->twa_drvr_pkt.status =
2291 TWA_ERROR_AEN_NO_EVENTS;
2292 break;
2293 }
2294 if ((error = copyout(sc->twa_aen_queue [event_index], user_buf->pdata,
2295 sizeof(struct tw_cl_event_packet))) != 0)
2296 aprint_error("%s: get_previous: Could not copyout to "
2297 "event_buf. error = %x\n", sc->twa_dv.dv_xname, error);
2298 (sc->twa_aen_queue[event_index])->retrieved = TWA_AEN_RETRIEVED;
2299 break;
2300
2301 case TW_CL_IOCTL_GET_LOCK:
2302 {
2303 struct tw_cl_lock_packet twa_lock;
2304
2305 copyin(user_buf->pdata, &twa_lock,
2306 sizeof(struct tw_cl_lock_packet));
2307 s = splbio();
2308 if ((sc->twa_ioctl_lock.lock == TWA_LOCK_FREE) ||
2309 (twa_lock.force_flag) ||
2310 (time.tv_sec >= sc->twa_ioctl_lock.timeout)) {
2311
2312 sc->twa_ioctl_lock.lock = TWA_LOCK_HELD;
2313 sc->twa_ioctl_lock.timeout = time.tv_sec +
2314 (twa_lock.timeout_msec / 1000);
2315 twa_lock.time_remaining_msec = twa_lock.timeout_msec;
2316 user_buf->twa_drvr_pkt.status = 0;
2317 } else {
2318 twa_lock.time_remaining_msec =
2319 (sc->twa_ioctl_lock.timeout - time.tv_sec) *
2320 1000;
2321 user_buf->twa_drvr_pkt.status =
2322 TWA_ERROR_IOCTL_LOCK_ALREADY_HELD;
2323 }
2324 splx(s);
2325 copyout(&twa_lock, user_buf->pdata,
2326 sizeof(struct tw_cl_lock_packet));
2327 break;
2328 }
2329
2330 case TW_CL_IOCTL_RELEASE_LOCK:
2331 s = splbio();
2332 if (sc->twa_ioctl_lock.lock == TWA_LOCK_FREE) {
2333 user_buf->twa_drvr_pkt.status =
2334 TWA_ERROR_IOCTL_LOCK_NOT_HELD;
2335 } else {
2336 sc->twa_ioctl_lock.lock = TWA_LOCK_FREE;
2337 user_buf->twa_drvr_pkt.status = 0;
2338 }
2339 splx(s);
2340 break;
2341
2342 case TW_CL_IOCTL_GET_COMPATIBILITY_INFO:
2343 {
2344 struct tw_cl_compatibility_packet comp_pkt;
2345
2346 memcpy(comp_pkt.driver_version, TWA_DRIVER_VERSION_STRING,
2347 sizeof(TWA_DRIVER_VERSION_STRING));
2348 comp_pkt.working_srl = sc->working_srl;
2349 comp_pkt.working_branch = sc->working_branch;
2350 comp_pkt.working_build = sc->working_build;
2351 user_buf->twa_drvr_pkt.status = 0;
2352
2353 /* Copy compatibility information to user space. */
2354 copyout(&comp_pkt, user_buf->pdata,
2355 min(sizeof(struct tw_cl_compatibility_packet),
2356 user_buf->twa_drvr_pkt.buffer_length));
2357 break;
2358 }
2359
2360 case TWA_IOCTL_GET_UNITNAME: /* WASABI EXTENSION */
2361 {
2362 struct twa_unitname *tn;
2363 struct twa_drive *tdr;
2364
2365 tn = (struct twa_unitname *)data;
2366 /* XXX mutex */
2367 if (tn->tn_unit < 0 || tn->tn_unit >= TWA_MAX_UNITS)
2368 return (EINVAL);
2369 tdr = &sc->sc_units[tn->tn_unit];
2370 if (tdr->td_dev == NULL)
2371 tn->tn_name[0] = '\0';
2372 else
2373 strlcpy(tn->tn_name, tdr->td_dev->dv_xname,
2374 sizeof(tn->tn_name));
2375 return (0);
2376 }
2377
2378 default:
2379 /* Unknown opcode. */
2380 error = ENOTTY;
2381 }
2382
2383 return(error);
2384 }
2385
2386
2387 /*
2388 * Function name: twa_get_param
2389 * Description: Get a firmware parameter.
2390 *
2391 * Input: sc -- ptr to per ctlr structure
2392 * table_id -- parameter table #
2393 * param_id -- index of the parameter in the table
2394 * param_size -- size of the parameter in bytes
2395 * callback -- ptr to function, if any, to be called
2396 * back on completion; NULL if no callback.
2397 * Output: None
2398 * Return value: ptr to param structure -- success
2399 * NULL -- failure
2400 */
2401 static int
2402 twa_get_param(struct twa_softc *sc, int table_id, int param_id,
2403 size_t param_size, void (* callback)(struct twa_request *tr),
2404 struct twa_param_9k **param)
2405 {
2406 int rv = 0;
2407 struct twa_request *tr;
2408 union twa_command_7k *cmd;
2409
2410 /* Get a request packet. */
2411 if ((tr = twa_get_request(sc, 0)) == NULL) {
2412 rv = EAGAIN;
2413 goto out;
2414 }
2415
2416 tr->tr_cmd_pkt_type |= TWA_CMD_PKT_TYPE_INTERNAL;
2417
2418 /* Allocate memory to read data into. */
2419 if ((*param = (struct twa_param_9k *)
2420 malloc(TWA_SECTOR_SIZE, M_DEVBUF, M_NOWAIT)) == NULL) {
2421 rv = ENOMEM;
2422 goto out;
2423 }
2424
2425 memset(*param, 0, sizeof(struct twa_param_9k) - 1 + param_size);
2426 tr->tr_data = *param;
2427 tr->tr_length = TWA_SECTOR_SIZE;
2428 tr->tr_flags = TWA_CMD_DATA_IN | TWA_CMD_DATA_OUT;
2429
2430 /* Build the cmd pkt. */
2431 cmd = &(tr->tr_command->command.cmd_pkt_7k);
2432
2433 tr->tr_command->cmd_hdr.header_desc.size_header = 128;
2434
2435 cmd->param.opcode = TWA_OP_GET_PARAM;
2436 cmd->param.sgl_offset = 2;
2437 cmd->param.size = 2;
2438 cmd->param.request_id = tr->tr_request_id;
2439 cmd->param.unit = 0;
2440 cmd->param.param_count = 1;
2441
2442 /* Specify which parameter we need. */
2443 (*param)->table_id = table_id | TWA_9K_PARAM_DESCRIPTOR;
2444 (*param)->parameter_id = param_id;
2445 (*param)->parameter_size_bytes = param_size;
2446
2447 /* Submit the command. */
2448 if (callback == NULL) {
2449 /* There's no call back; wait till the command completes. */
2450 rv = twa_immediate_request(tr, TWA_REQUEST_TIMEOUT_PERIOD);
2451
2452 if (rv != 0)
2453 goto out;
2454
2455 if ((rv = cmd->param.status) != 0) {
2456 /* twa_drain_complete_queue will have done the unmapping */
2457 goto out;
2458 }
2459 twa_release_request(tr);
2460 return (rv);
2461 } else {
2462 /* There's a call back. Simply submit the command. */
2463 tr->tr_callback = callback;
2464 rv = twa_map_request(tr);
2465 return (rv);
2466 }
2467 out:
2468 if (tr)
2469 twa_release_request(tr);
2470 return(rv);
2471 }
2472
2473
2474 /*
2475 * Function name: twa_set_param
2476 * Description: Set a firmware parameter.
2477 *
2478 * Input: sc -- ptr to per ctlr structure
2479 * table_id -- parameter table #
2480 * param_id -- index of the parameter in the table
2481 * param_size -- size of the parameter in bytes
2482 * callback -- ptr to function, if any, to be called
2483 * back on completion; NULL if no callback.
2484 * Output: None
2485 * Return value: 0 -- success
2486 * non-zero-- failure
2487 */
2488 static int
2489 twa_set_param(struct twa_softc *sc, int table_id,
2490 int param_id, int param_size, void *data,
2491 void (* callback)(struct twa_request *tr))
2492 {
2493 struct twa_request *tr;
2494 union twa_command_7k *cmd;
2495 struct twa_param_9k *param = NULL;
2496 int error = ENOMEM;
2497
2498 tr = twa_get_request(sc, 0);
2499 if (tr == NULL)
2500 return (EAGAIN);
2501
2502 tr->tr_cmd_pkt_type |= TWA_CMD_PKT_TYPE_INTERNAL;
2503
2504 /* Allocate memory to send data using. */
2505 if ((param = (struct twa_param_9k *)
2506 malloc(TWA_SECTOR_SIZE, M_DEVBUF, M_NOWAIT)) == NULL)
2507 goto out;
2508 memset(param, 0, sizeof(struct twa_param_9k) - 1 + param_size);
2509 tr->tr_data = param;
2510 tr->tr_length = TWA_SECTOR_SIZE;
2511 tr->tr_flags = TWA_CMD_DATA_IN | TWA_CMD_DATA_OUT;
2512
2513 /* Build the cmd pkt. */
2514 cmd = &(tr->tr_command->command.cmd_pkt_7k);
2515
2516 tr->tr_command->cmd_hdr.header_desc.size_header = 128;
2517
2518 cmd->param.opcode = TWA_OP_SET_PARAM;
2519 cmd->param.sgl_offset = 2;
2520 cmd->param.size = 2;
2521 cmd->param.request_id = tr->tr_request_id;
2522 cmd->param.unit = 0;
2523 cmd->param.param_count = 1;
2524
2525 /* Specify which parameter we want to set. */
2526 param->table_id = table_id | TWA_9K_PARAM_DESCRIPTOR;
2527 param->parameter_id = param_id;
2528 param->parameter_size_bytes = param_size;
2529 memcpy(param->data, data, param_size);
2530
2531 /* Submit the command. */
2532 if (callback == NULL) {
2533 /* There's no call back; wait till the command completes. */
2534 error = twa_immediate_request(tr, TWA_REQUEST_TIMEOUT_PERIOD);
2535 if (error == ETIMEDOUT)
2536 return(error); /* clean-up done by twa_immediate_request */
2537 if (error)
2538 goto out;
2539 if ((error = cmd->param.status)) {
2540 goto out; /* twa_drain_complete_queue will have done the unmapping */
2541 }
2542 free(param, M_DEVBUF);
2543 twa_release_request(tr);
2544 return(error);
2545 } else {
2546 /* There's a call back. Simply submit the command. */
2547 tr->tr_callback = callback;
2548 if ((error = twa_map_request(tr)))
2549 goto out;
2550
2551 return (0);
2552 }
2553 out:
2554 if (param)
2555 free(param, M_DEVBUF);
2556 if (tr)
2557 twa_release_request(tr);
2558 return(error);
2559 }
2560
2561
2562 /*
2563 * Function name: twa_init_connection
2564 * Description: Send init_connection cmd to firmware
2565 *
2566 * Input: sc -- ptr to per ctlr structure
2567 * message_credits -- max # of requests that we might send
2568 * down simultaneously. This will be
2569 * typically set to 256 at init-time or
2570 * after a reset, and to 1 at shutdown-time
2571 * set_features -- indicates if we intend to use 64-bit
2572 * sg, also indicates if we want to do a
2573 * basic or an extended init_connection;
2574 *
2575 * Note: The following input/output parameters are valid, only in case of an
2576 * extended init_connection:
2577 *
2578 * current_fw_srl -- srl of fw we are bundled
2579 * with, if any; 0 otherwise
2580 * current_fw_arch_id -- arch_id of fw we are bundled
2581 * with, if any; 0 otherwise
2582 * current_fw_branch -- branch # of fw we are bundled
2583 * with, if any; 0 otherwise
2584 * current_fw_build -- build # of fw we are bundled
2585 * with, if any; 0 otherwise
2586 * Output: fw_on_ctlr_srl -- srl of fw on ctlr
2587 * fw_on_ctlr_arch_id -- arch_id of fw on ctlr
2588 * fw_on_ctlr_branch -- branch # of fw on ctlr
2589 * fw_on_ctlr_build -- build # of fw on ctlr
2590 * init_connect_result -- result bitmap of fw response
2591 * Return value: 0 -- success
2592 * non-zero-- failure
2593 */
2594 static int
2595 twa_init_connection(struct twa_softc *sc, u_int16_t message_credits,
2596 u_int32_t set_features, u_int16_t current_fw_srl,
2597 u_int16_t current_fw_arch_id, u_int16_t current_fw_branch,
2598 u_int16_t current_fw_build, u_int16_t *fw_on_ctlr_srl,
2599 u_int16_t *fw_on_ctlr_arch_id, u_int16_t *fw_on_ctlr_branch,
2600 u_int16_t *fw_on_ctlr_build, u_int32_t *init_connect_result)
2601 {
2602 struct twa_request *tr;
2603 struct twa_command_init_connect *init_connect;
2604 int error = 1;
2605
2606 /* Get a request packet. */
2607 if ((tr = twa_get_request(sc, 0)) == NULL)
2608 goto out;
2609 tr->tr_cmd_pkt_type |= TWA_CMD_PKT_TYPE_INTERNAL;
2610 /* Build the cmd pkt. */
2611 init_connect = &(tr->tr_command->command.cmd_pkt_7k.init_connect);
2612
2613 tr->tr_command->cmd_hdr.header_desc.size_header = 128;
2614
2615 init_connect->opcode = TWA_OP_INIT_CONNECTION;
2616 init_connect->request_id = tr->tr_request_id;
2617 init_connect->message_credits = message_credits;
2618 init_connect->features = set_features;
2619 if (TWA_64BIT_ADDRESSES) {
2620 printf("64 bit addressing supported for scatter/gather list\n");
2621 init_connect->features |= TWA_64BIT_SG_ADDRESSES;
2622 }
2623 if (set_features & TWA_EXTENDED_INIT_CONNECT) {
2624 /*
2625 * Fill in the extra fields needed for
2626 * an extended init_connect.
2627 */
2628 init_connect->size = 6;
2629 init_connect->fw_srl = current_fw_srl;
2630 init_connect->fw_arch_id = current_fw_arch_id;
2631 init_connect->fw_branch = current_fw_branch;
2632 } else
2633 init_connect->size = 3;
2634
2635 /* Submit the command, and wait for it to complete. */
2636 error = twa_immediate_request(tr, TWA_REQUEST_TIMEOUT_PERIOD);
2637 if (error == ETIMEDOUT)
2638 return(error); /* clean-up done by twa_immediate_request */
2639 if (error)
2640 goto out;
2641 if ((error = init_connect->status)) {
2642 goto out; /* twa_drain_complete_queue will have done the unmapping */
2643 }
2644 if (set_features & TWA_EXTENDED_INIT_CONNECT) {
2645 *fw_on_ctlr_srl = init_connect->fw_srl;
2646 *fw_on_ctlr_arch_id = init_connect->fw_arch_id;
2647 *fw_on_ctlr_branch = init_connect->fw_branch;
2648 *fw_on_ctlr_build = init_connect->fw_build;
2649 *init_connect_result = init_connect->result;
2650 }
2651 twa_release_request(tr);
2652 return(error);
2653
2654 out:
2655 if (tr)
2656 twa_release_request(tr);
2657 return(error);
2658 }
2659
2660
2661 static int
2662 twa_reset(struct twa_softc *sc)
2663 {
2664 int s;
2665 int error = 0;
2666
2667 /*
2668 * Disable interrupts from the controller, and mask any
2669 * accidental entry into our interrupt handler.
2670 */
2671 twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
2672 TWA_CONTROL_DISABLE_INTERRUPTS);
2673
2674 s = splbio();
2675
2676 /* Soft reset the controller. */
2677 if ((error = twa_soft_reset(sc)))
2678 goto out;
2679
2680 /* Re-establish logical connection with the controller. */
2681 if ((error = twa_init_connection(sc, TWA_INIT_MESSAGE_CREDITS,
2682 0, 0, 0, 0, 0,
2683 NULL, NULL, NULL, NULL, NULL))) {
2684 goto out;
2685 }
2686 /*
2687 * Complete all requests in the complete queue; error back all requests
2688 * in the busy queue. Any internal requests will be simply freed.
2689 * Re-submit any requests in the pending queue.
2690 */
2691 twa_drain_busy_queue(sc);
2692
2693 out:
2694 splx(s);
2695 /*
2696 * Enable interrupts, and also clear attention and response interrupts.
2697 */
2698 twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
2699 TWA_CONTROL_CLEAR_ATTENTION_INTERRUPT |
2700 TWA_CONTROL_UNMASK_RESPONSE_INTERRUPT |
2701 TWA_CONTROL_ENABLE_INTERRUPTS);
2702 return(error);
2703 }
2704
2705
2706 static int
2707 twa_soft_reset(struct twa_softc *sc)
2708 {
2709 u_int32_t status_reg;
2710
2711 twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
2712 TWA_CONTROL_ISSUE_SOFT_RESET |
2713 TWA_CONTROL_CLEAR_HOST_INTERRUPT |
2714 TWA_CONTROL_CLEAR_ATTENTION_INTERRUPT |
2715 TWA_CONTROL_MASK_COMMAND_INTERRUPT |
2716 TWA_CONTROL_MASK_RESPONSE_INTERRUPT |
2717 TWA_CONTROL_DISABLE_INTERRUPTS);
2718
2719 if (twa_wait_status(sc, TWA_STATUS_MICROCONTROLLER_READY |
2720 TWA_STATUS_ATTENTION_INTERRUPT, 30)) {
2721 aprint_error("%s: no attention interrupt after reset.\n",
2722 sc->twa_dv.dv_xname);
2723 return(1);
2724 }
2725 twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
2726 TWA_CONTROL_CLEAR_ATTENTION_INTERRUPT);
2727
2728 if (twa_drain_response_queue(sc)) {
2729 aprint_error("%s: cannot drain response queue.\n",sc->twa_dv.dv_xname);
2730 return(1);
2731 }
2732 if (twa_drain_aen_queue(sc)) {
2733 aprint_error("%s: cannot drain AEN queue.\n", sc->twa_dv.dv_xname);
2734 return(1);
2735 }
2736 if (twa_find_aen(sc, TWA_AEN_SOFT_RESET)) {
2737 aprint_error("%s: reset not reported by controller.\n",
2738 sc->twa_dv.dv_xname);
2739 return(1);
2740 }
2741 status_reg = twa_inl(sc, TWA_STATUS_REGISTER_OFFSET);
2742 if (TWA_STATUS_ERRORS(status_reg) ||
2743 twa_check_ctlr_state(sc, status_reg)) {
2744 aprint_error("%s: controller errors detected.\n", sc->twa_dv.dv_xname);
2745 return(1);
2746 }
2747 return(0);
2748 }
2749
2750
2751 static int
2752 twa_wait_status(struct twa_softc *sc, u_int32_t status, u_int32_t timeout)
2753 {
2754 struct timeval t1;
2755 time_t end_time;
2756 u_int32_t status_reg;
2757
2758 timeout = (timeout * 1000 * 100);
2759
2760 microtime(&t1);
2761
2762 end_time = t1.tv_usec + timeout;
2763
2764 do {
2765 status_reg = twa_inl(sc, TWA_STATUS_REGISTER_OFFSET);
2766 if ((status_reg & status) == status)/* got the required bit(s)? */
2767 return(0);
2768 DELAY(100000);
2769 microtime(&t1);
2770 } while (t1.tv_usec <= end_time);
2771
2772 return(1);
2773 }
2774
2775
2776 static int
2777 twa_fetch_aen(struct twa_softc *sc)
2778 {
2779 struct twa_request *tr;
2780 int s, error = 0;
2781
2782 s = splbio();
2783
2784 if ((tr = twa_get_request(sc, TWA_CMD_AEN)) == NULL)
2785 return(EIO);
2786 tr->tr_cmd_pkt_type |= TWA_CMD_PKT_TYPE_INTERNAL;
2787 tr->tr_callback = twa_aen_callback;
2788 tr->tr_data = malloc(TWA_SECTOR_SIZE, M_DEVBUF, M_NOWAIT);
2789 if (twa_request_sense(tr, 0) != 0) {
2790 if (tr->tr_data)
2791 free(tr->tr_data, M_DEVBUF);
2792 twa_release_request(tr);
2793 error = 1;
2794 }
2795 splx(s);
2796
2797 return(error);
2798 }
2799
2800
2801
2802 /*
2803 * Function name: twa_aen_callback
2804 * Description: Callback for requests to fetch AEN's.
2805 *
2806 * Input: tr -- ptr to completed request pkt
2807 * Output: None
2808 * Return value: None
2809 */
2810 static void
2811 twa_aen_callback(struct twa_request *tr)
2812 {
2813 int i;
2814 int fetch_more_aens = 0;
2815 struct twa_softc *sc = tr->tr_sc;
2816 struct twa_command_header *cmd_hdr =
2817 (struct twa_command_header *)(tr->tr_data);
2818 struct twa_command_9k *cmd =
2819 &(tr->tr_command->command.cmd_pkt_9k);
2820
2821 if (! cmd->status) {
2822 if ((tr->tr_cmd_pkt_type & TWA_CMD_PKT_TYPE_9K) &&
2823 (cmd->cdb[0] == 0x3 /* REQUEST_SENSE */))
2824 if (twa_enqueue_aen(sc, cmd_hdr)
2825 != TWA_AEN_QUEUE_EMPTY)
2826 fetch_more_aens = 1;
2827 } else {
2828 cmd_hdr->err_specific_desc[sizeof(cmd_hdr->err_specific_desc) - 1] = '\0';
2829 for (i = 0; i < 18; i++)
2830 printf("%x\t", tr->tr_command->cmd_hdr.sense_data[i]);
2831
2832 printf(""); /* print new line */
2833
2834 for (i = 0; i < 128; i++)
2835 printf("%x\t", ((int8_t *)(tr->tr_data))[i]);
2836 }
2837 if (tr->tr_data)
2838 free(tr->tr_data, M_DEVBUF);
2839 twa_release_request(tr);
2840
2841 if (fetch_more_aens)
2842 twa_fetch_aen(sc);
2843 }
2844
2845
2846 /*
2847 * Function name: twa_enqueue_aen
2848 * Description: Queues AEN's to be supplied to user-space tools on request.
2849 *
2850 * Input: sc -- ptr to per ctlr structure
2851 * cmd_hdr -- ptr to hdr of fw cmd pkt, from where the AEN
2852 * details can be retrieved.
2853 * Output: None
2854 * Return value: None
2855 */
2856 static uint16_t
2857 twa_enqueue_aen(struct twa_softc *sc, struct twa_command_header *cmd_hdr)
2858 {
2859 int rv, s;
2860 struct tw_cl_event_packet *event;
2861 uint16_t aen_code;
2862 unsigned long sync_time;
2863
2864 s = splbio();
2865 aen_code = cmd_hdr->status_block.error;
2866
2867 switch (aen_code) {
2868 case TWA_AEN_SYNC_TIME_WITH_HOST:
2869
2870 sync_time = (time.tv_sec - (3 * 86400)) % 604800;
2871 rv = twa_set_param(sc, TWA_PARAM_TIME_TABLE,
2872 TWA_PARAM_TIME_SchedulerTime, 4,
2873 &sync_time, twa_aen_callback);
2874 #ifdef DIAGNOSTIC
2875 if (rv != 0)
2876 printf("%s: unable to sync time with ctlr\n",
2877 sc->twa_dv.dv_xname);
2878 #endif
2879 break;
2880
2881 case TWA_AEN_QUEUE_EMPTY:
2882 break;
2883
2884 default:
2885 /* Queue the event. */
2886 event = sc->twa_aen_queue[sc->twa_aen_head];
2887 if (event->retrieved == TWA_AEN_NOT_RETRIEVED)
2888 sc->twa_aen_queue_overflow = TRUE;
2889 event->severity =
2890 cmd_hdr->status_block.substatus_block.severity;
2891 event->time_stamp_sec = time.tv_sec;
2892 event->aen_code = aen_code;
2893 event->retrieved = TWA_AEN_NOT_RETRIEVED;
2894 event->sequence_id = ++(sc->twa_current_sequence_id);
2895 cmd_hdr->err_specific_desc[sizeof(cmd_hdr->err_specific_desc) - 1] = '\0';
2896 event->parameter_len = strlen(cmd_hdr->err_specific_desc);
2897 memcpy(event->parameter_data, cmd_hdr->err_specific_desc,
2898 event->parameter_len);
2899
2900 if (event->severity < TWA_AEN_SEVERITY_DEBUG) {
2901 printf("%s: AEN 0x%04X: %s: %s: %s\n",
2902 sc->twa_dv.dv_xname,
2903 aen_code,
2904 twa_aen_severity_table[event->severity],
2905 twa_find_msg_string(twa_aen_table, aen_code),
2906 event->parameter_data);
2907 }
2908
2909 if ((sc->twa_aen_head + 1) == TWA_Q_LENGTH)
2910 sc->twa_aen_queue_wrapped = TRUE;
2911 sc->twa_aen_head = (sc->twa_aen_head + 1) % TWA_Q_LENGTH;
2912 break;
2913 } /* switch */
2914 splx(s);
2915
2916 return (aen_code);
2917 }
2918
2919
2920
2921 /*
2922 * Function name: twa_find_aen
2923 * Description: Reports whether a given AEN ever occurred.
2924 *
2925 * Input: sc -- ptr to per ctlr structure
2926 * aen_code-- AEN to look for
2927 * Output: None
2928 * Return value: 0 -- success
2929 * non-zero-- failure
2930 */
2931 static int
2932 twa_find_aen(struct twa_softc *sc, u_int16_t aen_code)
2933 {
2934 u_int32_t last_index;
2935 int s;
2936 int i;
2937
2938 s = splbio();
2939
2940 if (sc->twa_aen_queue_wrapped)
2941 last_index = sc->twa_aen_head;
2942 else
2943 last_index = 0;
2944
2945 i = sc->twa_aen_head;
2946 do {
2947 i = (i + TWA_Q_LENGTH - 1) % TWA_Q_LENGTH;
2948 if ((sc->twa_aen_queue[i])->aen_code == aen_code) {
2949 splx(s);
2950 return(0);
2951 }
2952 } while (i != last_index);
2953
2954 splx(s);
2955 return(1);
2956 }
2957
2958 static void __inline
2959 twa_request_init(struct twa_request *tr, int flags)
2960 {
2961 tr->tr_data = NULL;
2962 tr->tr_real_data = NULL;
2963 tr->tr_length = 0;
2964 tr->tr_real_length = 0;
2965 tr->tr_status = TWA_CMD_SETUP;/* command is in setup phase */
2966 tr->tr_flags = flags;
2967 tr->tr_error = 0;
2968 tr->tr_callback = NULL;
2969 tr->tr_cmd_pkt_type = 0;
2970
2971 /*
2972 * Look at the status field in the command packet to see how
2973 * it completed the last time it was used, and zero out only
2974 * the portions that might have changed. Note that we don't
2975 * care to zero out the sglist.
2976 */
2977 if (tr->tr_command->command.cmd_pkt_9k.status)
2978 memset(tr->tr_command, 0,
2979 sizeof(struct twa_command_header) + 28);
2980 else
2981 memset(&(tr->tr_command->command), 0, 28);
2982 }
2983
2984 struct twa_request *
2985 twa_get_request_wait(struct twa_softc *sc, int flags)
2986 {
2987 struct twa_request *tr;
2988 int s;
2989
2990 KASSERT((flags & TWA_CMD_AEN) == 0);
2991
2992 s = splbio();
2993 while ((tr = TAILQ_FIRST(&sc->twa_free)) == NULL) {
2994 sc->twa_sc_flags |= TWA_STATE_REQUEST_WAIT;
2995 (void) tsleep(&sc->twa_free, PRIBIO, "twaccb", hz);
2996 }
2997 TAILQ_REMOVE(&sc->twa_free, tr, tr_link);
2998
2999 splx(s);
3000
3001 twa_request_init(tr, flags);
3002
3003 return(tr);
3004 }
3005
3006
3007 struct twa_request *
3008 twa_get_request(struct twa_softc *sc, int flags)
3009 {
3010 int s;
3011 struct twa_request *tr;
3012
3013 /* Get a free request packet. */
3014 s = splbio();
3015 if (__predict_false((flags & TWA_CMD_AEN) != 0)) {
3016
3017 if ((sc->sc_twa_request->tr_flags & TWA_CMD_AEN_BUSY) == 0) {
3018 tr = sc->sc_twa_request;
3019 flags |= TWA_CMD_AEN_BUSY;
3020 } else {
3021 splx(s);
3022 return (NULL);
3023 }
3024 } else {
3025 if (__predict_false((tr =
3026 TAILQ_FIRST(&sc->twa_free)) == NULL)) {
3027 splx(s);
3028 return (NULL);
3029 }
3030 TAILQ_REMOVE(&sc->twa_free, tr, tr_link);
3031 }
3032 splx(s);
3033
3034 twa_request_init(tr, flags);
3035
3036 return(tr);
3037 }
3038
3039
3040 /*
3041 * Print some information about the controller
3042 */
3043 static void
3044 twa_describe_controller(struct twa_softc *sc)
3045 {
3046 struct twa_param_9k *p[10];
3047 int i, rv = 0;
3048 uint32_t dsize;
3049 uint8_t ports;
3050
3051 memset(p, sizeof(struct twa_param_9k *), 10);
3052
3053 /* Get the port count. */
3054 rv |= twa_get_param(sc, TWA_PARAM_CONTROLLER,
3055 TWA_PARAM_CONTROLLER_PortCount, 1, NULL, &p[0]);
3056
3057 /* get version strings */
3058 rv |= twa_get_param(sc, TWA_PARAM_VERSION, TWA_PARAM_VERSION_FW,
3059 16, NULL, &p[1]);
3060 rv |= twa_get_param(sc, TWA_PARAM_VERSION, TWA_PARAM_VERSION_BIOS,
3061 16, NULL, &p[2]);
3062 rv |= twa_get_param(sc, TWA_PARAM_VERSION, TWA_PARAM_VERSION_Mon,
3063 16, NULL, &p[3]);
3064 rv |= twa_get_param(sc, TWA_PARAM_VERSION, TWA_PARAM_VERSION_PCBA,
3065 8, NULL, &p[4]);
3066 rv |= twa_get_param(sc, TWA_PARAM_VERSION, TWA_PARAM_VERSION_ATA,
3067 8, NULL, &p[5]);
3068 rv |= twa_get_param(sc, TWA_PARAM_VERSION, TWA_PARAM_VERSION_PCI,
3069 8, NULL, &p[6]);
3070 rv |= twa_get_param(sc, TWA_PARAM_DRIVESUMMARY, TWA_PARAM_DRIVESTATUS,
3071 16, NULL, &p[7]);
3072
3073 if (rv) {
3074 /* some error occurred */
3075 aprint_error("%s: failed to fetch version information\n",
3076 sc->twa_dv.dv_xname);
3077 goto bail;
3078 }
3079
3080 ports = *(u_int8_t *)(p[0]->data);
3081
3082 aprint_normal("%s: %d ports, Firmware %.16s, BIOS %.16s\n",
3083 sc->twa_dv.dv_xname, ports,
3084 p[1]->data, p[2]->data);
3085
3086 aprint_verbose("%s: Monitor %.16s, PCB %.8s, Achip %.8s, Pchip %.8s\n",
3087 sc->twa_dv.dv_xname,
3088 p[3]->data, p[4]->data,
3089 p[5]->data, p[6]->data);
3090
3091 for (i = 0; i < ports; i++) {
3092
3093 if ((*((char *)(p[7]->data + i)) & TWA_DRIVE_DETECTED) == 0)
3094 continue;
3095
3096 rv = twa_get_param(sc, TWA_PARAM_DRIVE_TABLE,
3097 TWA_PARAM_DRIVEMODELINDEX,
3098 TWA_PARAM_DRIVEMODEL_LENGTH, NULL, &p[8]);
3099
3100 if (rv != 0) {
3101 aprint_error("%s: unable to get drive model for port"
3102 " %d\n", sc->twa_dv.dv_xname, i);
3103 continue;
3104 }
3105
3106 rv = twa_get_param(sc, TWA_PARAM_DRIVE_TABLE,
3107 TWA_PARAM_DRIVESIZEINDEX,
3108 TWA_PARAM_DRIVESIZE_LENGTH, NULL, &p[9]);
3109
3110 if (rv != 0) {
3111 aprint_error("%s: unable to get drive size"
3112 " for port %d\n", sc->twa_dv.dv_xname,
3113 i);
3114 free(p[8], M_DEVBUF);
3115 continue;
3116 }
3117
3118 dsize = *(uint32_t *)(p[9]->data);
3119
3120 aprint_verbose("%s: port %d: %.40s %d MB\n",
3121 sc->twa_dv.dv_xname, i, p[8]->data, dsize / 2048);
3122
3123 if (p[8])
3124 free(p[8], M_DEVBUF);
3125 if (p[9])
3126 free(p[9], M_DEVBUF);
3127 }
3128 bail:
3129 if (p[0])
3130 free(p[0], M_DEVBUF);
3131 if (p[1])
3132 free(p[1], M_DEVBUF);
3133 if (p[2])
3134 free(p[2], M_DEVBUF);
3135 if (p[3])
3136 free(p[3], M_DEVBUF);
3137 if (p[4])
3138 free(p[4], M_DEVBUF);
3139 if (p[5])
3140 free(p[5], M_DEVBUF);
3141 if (p[6])
3142 free(p[6], M_DEVBUF);
3143 }
3144
3145
3146
3147 /*
3148 * Function name: twa_check_ctlr_state
3149 * Description: Makes sure that the fw status register reports a
3150 * proper status.
3151 *
3152 * Input: sc -- ptr to per ctlr structure
3153 * status_reg -- value in the status register
3154 * Output: None
3155 * Return value: 0 -- no errors
3156 * non-zero-- errors
3157 */
3158 static int
3159 twa_check_ctlr_state(struct twa_softc *sc, u_int32_t status_reg)
3160 {
3161 int result = 0;
3162 struct timeval t1;
3163 static time_t last_warning[2] = {0, 0};
3164
3165 /* Check if the 'micro-controller ready' bit is not set. */
3166 if ((status_reg & TWA_STATUS_EXPECTED_BITS) !=
3167 TWA_STATUS_EXPECTED_BITS) {
3168
3169 microtime(&t1);
3170
3171 last_warning[0] += (5 * 1000 * 100);
3172
3173 if (t1.tv_usec > last_warning[0]) {
3174 microtime(&t1);
3175 last_warning[0] = t1.tv_usec;
3176 }
3177 result = 1;
3178 }
3179
3180 /* Check if any error bits are set. */
3181 if ((status_reg & TWA_STATUS_UNEXPECTED_BITS) != 0) {
3182
3183 microtime(&t1);
3184 last_warning[1] += (5 * 1000 * 100);
3185 if (t1.tv_usec > last_warning[1]) {
3186 microtime(&t1);
3187 last_warning[1] = t1.tv_usec;
3188 }
3189 if (status_reg & TWA_STATUS_PCI_PARITY_ERROR_INTERRUPT) {
3190 aprint_error("%s: clearing PCI parity error "
3191 "re-seat/move/replace card.\n",
3192 sc->twa_dv.dv_xname);
3193 twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
3194 TWA_CONTROL_CLEAR_PARITY_ERROR);
3195 pci_conf_write(sc->pc, sc->tag,
3196 PCI_COMMAND_STATUS_REG,
3197 TWA_PCI_CONFIG_CLEAR_PARITY_ERROR);
3198 result = 1;
3199 }
3200 if (status_reg & TWA_STATUS_PCI_ABORT_INTERRUPT) {
3201 aprint_error("%s: clearing PCI abort\n",
3202 sc->twa_dv.dv_xname);
3203 twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
3204 TWA_CONTROL_CLEAR_PCI_ABORT);
3205 pci_conf_write(sc->pc, sc->tag,
3206 PCI_COMMAND_STATUS_REG,
3207 TWA_PCI_CONFIG_CLEAR_PCI_ABORT);
3208 result = 1;
3209 }
3210 if (status_reg & TWA_STATUS_QUEUE_ERROR_INTERRUPT) {
3211 aprint_error("%s: clearing controller queue error\n",
3212 sc->twa_dv.dv_xname);
3213 twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
3214 TWA_CONTROL_CLEAR_PCI_ABORT);
3215 result = 1;
3216 }
3217 if (status_reg & TWA_STATUS_SBUF_WRITE_ERROR) {
3218 aprint_error("%s: clearing SBUF write error\n",
3219 sc->twa_dv.dv_xname);
3220 twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
3221 TWA_CONTROL_CLEAR_SBUF_WRITE_ERROR);
3222 result = 1;
3223 }
3224 if (status_reg & TWA_STATUS_MICROCONTROLLER_ERROR) {
3225 aprint_error("%s: micro-controller error\n",
3226 sc->twa_dv.dv_xname);
3227 result = 1;
3228 }
3229 }
3230 return(result);
3231 }
3232
3233
3234