twa.c revision 1.2 1 /* $wasabi: twa.c,v 1.25 2006/05/01 15:16:59 simonb Exp $ */
2 /*
3 * Copyright (c) 2004-2006 Wasabi Systems, Inc.
4 * All rights reserved.
5 *
6 * Your Wasabi Systems License Agreement specifies the terms and
7 * conditions for use and redistribution.
8 */
9
10 /*-
11 * Copyright (c) 2004 The NetBSD Foundation, Inc.
12 * All rights reserved.
13 *
14 * This code is derived from software contributed to The NetBSD Foundation
15 * by Jordan Rhody of Wasabi Systems, Inc.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
19 * are met:
20 * 1. Redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution.
25 * 3. All advertising materials mentioning features or use of this software
26 * must display the following acknowledgement:
27 * This product includes software developed by the NetBSD
28 * Foundation, Inc. and its contributors.
29 * 4. Neither the name of The NetBSD Foundation nor the names of its
30 * contributors may be used to endorse or promote products derived
31 * from this software without specific prior written permission.
32 *
33 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
34 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
35 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
36 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
37 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
38 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
39 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
40 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
41 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
43 * POSSIBILITY OF SUCH DAMAGE.
44 */
45
46 /*-
47 * Copyright (c) 2003-04 3ware, Inc.
48 * Copyright (c) 2000 Michael Smith
49 * Copyright (c) 2000 BSDi
50 * All rights reserved.
51 *
52 * Redistribution and use in source and binary forms, with or without
53 * modification, are permitted provided that the following conditions
54 * are met:
55 * 1. Redistributions of source code must retain the above copyright
56 * notice, this list of conditions and the following disclaimer.
57 * 2. Redistributions in binary form must reproduce the above copyright
58 * notice, this list of conditions and the following disclaimer in the
59 * documentation and/or other materials provided with the distribution.
60 *
61 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
62 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
63 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
64 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
65 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
66 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
67 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
68 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
69 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
70 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
71 * SUCH DAMAGE.
72 *
73 * $FreeBSD: src/sys/dev/twa/twa.c,v 1.2 2004/04/02 15:09:57 des Exp $
74 */
75
76 /*
77 * 3ware driver for 9000 series storage controllers.
78 *
79 * Author: Vinod Kashyap
80 */
81
82 #include <sys/cdefs.h>
83 __KERNEL_RCSID(0, "$wasabi: twa.c,v 1.25 2006/05/01 15:16:59 simonb Exp $");
84
85 #include <sys/param.h>
86 #include <sys/systm.h>
87 #include <sys/kernel.h>
88 #include <sys/device.h>
89 #include <sys/queue.h>
90 #include <sys/proc.h>
91 #include <sys/bswap.h>
92 #include <sys/buf.h>
93 #include <sys/bufq.h>
94 #include <sys/endian.h>
95 #include <sys/malloc.h>
96 #include <sys/conf.h>
97 #include <sys/disk.h>
98 #include <sys/syslog.h>
99
100 #include <uvm/uvm_extern.h>
101
102 #include <machine/bus.h>
103
104 #include <dev/pci/pcireg.h>
105 #include <dev/pci/pcivar.h>
106 #include <dev/pci/pcidevs.h>
107 #include <dev/pci/twareg.h>
108 #include <dev/pci/twavar.h>
109 #include <dev/pci/twaio.h>
110
111 #include <dev/scsipi/scsipi_all.h>
112 #include <dev/scsipi/scsipi_disk.h>
113 #include <dev/scsipi/scsipiconf.h>
114 #include <dev/scsipi/scsi_spc.h>
115
116 #include <dev/ldvar.h>
117
118 #include "locators.h"
119
120 #define PCI_CBIO 0x10
121
122 static int twa_fetch_aen(struct twa_softc *);
123 static void twa_aen_callback(struct twa_request *);
124 static int twa_find_aen(struct twa_softc *sc, u_int16_t);
125 static uint16_t twa_enqueue_aen(struct twa_softc *sc,
126 struct twa_command_header *);
127
128 static void twa_attach(struct device *, struct device *, void *);
129 static void twa_shutdown(void *);
130 static int twa_init_connection(struct twa_softc *, u_int16_t, u_int32_t,
131 u_int16_t, u_int16_t, u_int16_t, u_int16_t, u_int16_t *,
132 u_int16_t *, u_int16_t *, u_int16_t *, u_int32_t *);
133 static int twa_intr(void *);
134 static int twa_match(struct device *, struct cfdata *, void *);
135 static int twa_reset(struct twa_softc *);
136
137 static int twa_print(void *, const char *);
138 static int twa_soft_reset(struct twa_softc *);
139
140 static int twa_check_ctlr_state(struct twa_softc *, u_int32_t);
141 static int twa_get_param(struct twa_softc *, int, int, size_t,
142 void (* callback)(struct twa_request *),
143 struct twa_param_9k **);
144 static int twa_set_param(struct twa_softc *, int, int, int, void *,
145 void (* callback)(struct twa_request *));
146 static void twa_describe_controller(struct twa_softc *);
147 static int twa_wait_status(struct twa_softc *, u_int32_t, u_int32_t);
148 static int twa_done(struct twa_softc *);
149 #if 0
150 static int twa_flash_firmware(struct twa_softc *sc);
151 static int twa_hard_reset(struct twa_softc *sc);
152 #endif
153
154 extern struct cfdriver twa_cd;
155 extern uint32_t twa_fw_img_size;
156 extern uint8_t twa_fw_img[];
157
158 CFATTACH_DECL(twa, sizeof(struct twa_softc),
159 twa_match, twa_attach, NULL, NULL);
160
161 /* AEN messages. */
162 static const struct twa_message twa_aen_table[] = {
163 {0x0000, "AEN queue empty"},
164 {0x0001, "Controller reset occurred"},
165 {0x0002, "Degraded unit detected"},
166 {0x0003, "Controller error occured"},
167 {0x0004, "Background rebuild failed"},
168 {0x0005, "Background rebuild done"},
169 {0x0006, "Incomplete unit detected"},
170 {0x0007, "Background initialize done"},
171 {0x0008, "Unclean shutdown detected"},
172 {0x0009, "Drive timeout detected"},
173 {0x000A, "Drive error detected"},
174 {0x000B, "Rebuild started"},
175 {0x000C, "Background initialize started"},
176 {0x000D, "Entire logical unit was deleted"},
177 {0x000E, "Background initialize failed"},
178 {0x000F, "SMART attribute exceeded threshold"},
179 {0x0010, "Power supply reported AC under range"},
180 {0x0011, "Power supply reported DC out of range"},
181 {0x0012, "Power supply reported a malfunction"},
182 {0x0013, "Power supply predicted malfunction"},
183 {0x0014, "Battery charge is below threshold"},
184 {0x0015, "Fan speed is below threshold"},
185 {0x0016, "Temperature sensor is above threshold"},
186 {0x0017, "Power supply was removed"},
187 {0x0018, "Power supply was inserted"},
188 {0x0019, "Drive was removed from a bay"},
189 {0x001A, "Drive was inserted into a bay"},
190 {0x001B, "Drive bay cover door was opened"},
191 {0x001C, "Drive bay cover door was closed"},
192 {0x001D, "Product case was opened"},
193 {0x0020, "Prepare for shutdown (power-off)"},
194 {0x0021, "Downgrade UDMA mode to lower speed"},
195 {0x0022, "Upgrade UDMA mode to higher speed"},
196 {0x0023, "Sector repair completed"},
197 {0x0024, "Sbuf memory test failed"},
198 {0x0025, "Error flushing cached write data to disk"},
199 {0x0026, "Drive reported data ECC error"},
200 {0x0027, "DCB has checksum error"},
201 {0x0028, "DCB version is unsupported"},
202 {0x0029, "Background verify started"},
203 {0x002A, "Background verify failed"},
204 {0x002B, "Background verify done"},
205 {0x002C, "Bad sector overwritten during rebuild"},
206 {0x002E, "Replace failed because replacement drive too small"},
207 {0x002F, "Verify failed because array was never initialized"},
208 {0x0030, "Unsupported ATA drive"},
209 {0x0031, "Synchronize host/controller time"},
210 {0x0032, "Spare capacity is inadequate for some units"},
211 {0x0033, "Background migration started"},
212 {0x0034, "Background migration failed"},
213 {0x0035, "Background migration done"},
214 {0x0036, "Verify detected and fixed data/parity mismatch"},
215 {0x0037, "SO-DIMM incompatible"},
216 {0x0038, "SO-DIMM not detected"},
217 {0x0039, "Corrected Sbuf ECC error"},
218 {0x003A, "Drive power on reset detected"},
219 {0x003B, "Background rebuild paused"},
220 {0x003C, "Background initialize paused"},
221 {0x003D, "Background verify paused"},
222 {0x003E, "Background migration paused"},
223 {0x003F, "Corrupt flash file system detected"},
224 {0x0040, "Flash file system repaired"},
225 {0x0041, "Unit number assignments were lost"},
226 {0x0042, "Error during read of primary DCB"},
227 {0x0043, "Latent error found in backup DCB"},
228 {0x0044, "Battery voltage is normal"},
229 {0x0045, "Battery voltage is low"},
230 {0x0046, "Battery voltage is high"},
231 {0x0047, "Battery voltage is too low"},
232 {0x0048, "Battery voltage is too high"},
233 {0x0049, "Battery temperature is normal"},
234 {0x004A, "Battery temperature is low"},
235 {0x004B, "Battery temperature is high"},
236 {0x004C, "Battery temperature is too low"},
237 {0x004D, "Battery temperature is too high"},
238 {0x004E, "Battery capacity test started"},
239 {0x004F, "Cache synchronization skipped"},
240 {0x0050, "Battery capacity test completed"},
241 {0x0051, "Battery health check started"},
242 {0x0052, "Battery health check completed"},
243 {0x0053, "Need to do a capacity test"},
244 {0x0054, "Charge termination voltage is at high level"},
245 {0x0055, "Battery charging started"},
246 {0x0056, "Battery charging completed"},
247 {0x0057, "Battery charging fault"},
248 {0x0058, "Battery capacity is below warning level"},
249 {0x0059, "Battery capacity is below error level"},
250 {0x005A, "Battery is present"},
251 {0x005B, "Battery is not present"},
252 {0x005C, "Battery is weak"},
253 {0x005D, "Battery health check failed"},
254 {0x005E, "Cache synchronized after power fail"},
255 {0x005F, "Cache synchronization failed; some data lost"},
256 {0x0060, "Bad cache meta data checksum"},
257 {0x0061, "Bad cache meta data signature"},
258 {0x0062, "Cache meta data restore failed"},
259 {0x0063, "BBU not found after power fail"},
260 {0x00FC, "Recovered/finished array membership update"},
261 {0x00FD, "Handler lockup"},
262 {0x00FE, "Retrying PCI transfer"},
263 {0x00FF, "AEN queue is full"},
264 {0xFFFFFFFF, (char *)NULL}
265 };
266
267 /* AEN severity table. */
268 static const char *twa_aen_severity_table[] = {
269 "None",
270 "ERROR",
271 "WARNING",
272 "INFO",
273 "DEBUG",
274 (char *)NULL
275 };
276
277 /* Error messages. */
278 static const struct twa_message twa_error_table[] = {
279 {0x0100, "SGL entry contains zero data"},
280 {0x0101, "Invalid command opcode"},
281 {0x0102, "SGL entry has unaligned address"},
282 {0x0103, "SGL size does not match command"},
283 {0x0104, "SGL entry has illegal length"},
284 {0x0105, "Command packet is not aligned"},
285 {0x0106, "Invalid request ID"},
286 {0x0107, "Duplicate request ID"},
287 {0x0108, "ID not locked"},
288 {0x0109, "LBA out of range"},
289 {0x010A, "Logical unit not supported"},
290 {0x010B, "Parameter table does not exist"},
291 {0x010C, "Parameter index does not exist"},
292 {0x010D, "Invalid field in CDB"},
293 {0x010E, "Specified port has invalid drive"},
294 {0x010F, "Parameter item size mismatch"},
295 {0x0110, "Failed memory allocation"},
296 {0x0111, "Memory request too large"},
297 {0x0112, "Out of memory segments"},
298 {0x0113, "Invalid address to deallocate"},
299 {0x0114, "Out of memory"},
300 {0x0115, "Out of heap"},
301 {0x0120, "Double degrade"},
302 {0x0121, "Drive not degraded"},
303 {0x0122, "Reconstruct error"},
304 {0x0123, "Replace not accepted"},
305 {0x0124, "Replace drive capacity too small"},
306 {0x0125, "Sector count not allowed"},
307 {0x0126, "No spares left"},
308 {0x0127, "Reconstruct error"},
309 {0x0128, "Unit is offline"},
310 {0x0129, "Cannot update status to DCB"},
311 {0x0130, "Invalid stripe handle"},
312 {0x0131, "Handle that was not locked"},
313 {0x0132, "Handle that was not empy"},
314 {0x0133, "Handle has different owner"},
315 {0x0140, "IPR has parent"},
316 {0x0150, "Illegal Pbuf address alignment"},
317 {0x0151, "Illegal Pbuf transfer length"},
318 {0x0152, "Illegal Sbuf address alignment"},
319 {0x0153, "Illegal Sbuf transfer length"},
320 {0x0160, "Command packet too large"},
321 {0x0161, "SGL exceeds maximum length"},
322 {0x0162, "SGL has too many entries"},
323 {0x0170, "Insufficient resources for rebuilder"},
324 {0x0171, "Verify error (data != parity)"},
325 {0x0180, "Requested segment not in directory of this DCB"},
326 {0x0181, "DCB segment has unsupported version"},
327 {0x0182, "DCB segment has checksum error"},
328 {0x0183, "DCB support (settings) segment invalid"},
329 {0x0184, "DCB UDB (unit descriptor block) segment invalid"},
330 {0x0185, "DCB GUID (globally unique identifier) segment invalid"},
331 {0x01A0, "Could not clear Sbuf"},
332 {0x01C0, "Flash identify failed"},
333 {0x01C1, "Flash out of bounds"},
334 {0x01C2, "Flash verify error"},
335 {0x01C3, "Flash file object not found"},
336 {0x01C4, "Flash file already present"},
337 {0x01C5, "Flash file system full"},
338 {0x01C6, "Flash file not present"},
339 {0x01C7, "Flash file size error"},
340 {0x01C8, "Bad flash file checksum"},
341 {0x01CA, "Corrupt flash file system detected"},
342 {0x01D0, "Invalid field in parameter list"},
343 {0x01D1, "Parameter list length error"},
344 {0x01D2, "Parameter item is not changeable"},
345 {0x01D3, "Parameter item is not saveable"},
346 {0x0200, "UDMA CRC error"},
347 {0x0201, "Internal CRC error"},
348 {0x0202, "Data ECC error"},
349 {0x0203, "ADP level 1 error"},
350 {0x0204, "Port timeout"},
351 {0x0205, "Drive power on reset"},
352 {0x0206, "ADP level 2 error"},
353 {0x0207, "Soft reset failed"},
354 {0x0208, "Drive not ready"},
355 {0x0209, "Unclassified port error"},
356 {0x020A, "Drive aborted command"},
357 {0x0210, "Internal CRC error"},
358 {0x0211, "Host PCI bus abort"},
359 {0x0212, "Host PCI parity error"},
360 {0x0213, "Port handler error"},
361 {0x0214, "Token interrupt count error"},
362 {0x0215, "Timeout waiting for PCI transfer"},
363 {0x0216, "Corrected buffer ECC"},
364 {0x0217, "Uncorrected buffer ECC"},
365 {0x0230, "Unsupported command during flash recovery"},
366 {0x0231, "Next image buffer expected"},
367 {0x0232, "Binary image architecture incompatible"},
368 {0x0233, "Binary image has no signature"},
369 {0x0234, "Binary image has bad checksum"},
370 {0x0235, "Image downloaded overflowed buffer"},
371 {0x0240, "I2C device not found"},
372 {0x0241, "I2C transaction aborted"},
373 {0x0242, "SO-DIMM parameter(s) incompatible using defaults"},
374 {0x0243, "SO-DIMM unsupported"},
375 {0x0248, "SPI transfer status error"},
376 {0x0249, "SPI transfer timeout error"},
377 {0x0250, "Invalid unit descriptor size in CreateUnit"},
378 {0x0251, "Unit descriptor size exceeds data buffer in CreateUnit"},
379 {0x0252, "Invalid value in CreateUnit descriptor"},
380 {0x0253, "Inadequate disk space to support descriptor in CreateUnit"},
381 {0x0254, "Unable to create data channel for this unit descriptor"},
382 {0x0255, "CreateUnit descriptor specifies a drive already in use"},
383 {0x0256, "Unable to write configuration to all disks during CreateUnit"},
384 {0x0257, "CreateUnit does not support this descriptor version"},
385 {0x0258, "Invalid subunit for RAID 0 or 5 in CreateUnit"},
386 {0x0259, "Too many descriptors in CreateUnit"},
387 {0x025A, "Invalid configuration specified in CreateUnit descriptor"},
388 {0x025B, "Invalid LBA offset specified in CreateUnit descriptor"},
389 {0x025C, "Invalid stripelet size specified in CreateUnit descriptor"},
390 {0x0260, "SMART attribute exceeded threshold"},
391 {0xFFFFFFFF, (char *)NULL}
392 };
393
394 struct twa_pci_identity {
395 uint32_t vendor_id;
396 uint32_t product_id;
397 const char *name;
398 };
399
400 static const struct twa_pci_identity pci_twa_products[] = {
401 { PCI_VENDOR_3WARE,
402 PCI_PRODUCT_3WARE_9000,
403 "3ware 9000 series",
404 },
405 { PCI_VENDOR_3WARE,
406 PCI_PRODUCT_3WARE_9550,
407 "3ware 9550SX series",
408 },
409 { 0,
410 0,
411 NULL,
412 },
413 };
414
415
416 static inline void
417 twa_outl(struct twa_softc *sc, int off, u_int32_t val)
418 {
419 bus_space_write_4(sc->twa_bus_iot, sc->twa_bus_ioh, off, val);
420 bus_space_barrier(sc->twa_bus_iot, sc->twa_bus_ioh, off, 4,
421 BUS_SPACE_BARRIER_WRITE);
422 }
423
424
425 static inline u_int32_t twa_inl(struct twa_softc *sc, int off)
426 {
427 bus_space_barrier(sc->twa_bus_iot, sc->twa_bus_ioh, off, 4,
428 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
429 return (bus_space_read_4(sc->twa_bus_iot, sc->twa_bus_ioh, off));
430 }
431
432 void
433 twa_request_wait_handler(struct twa_request *tr)
434 {
435 wakeup(tr);
436 }
437
438
439 static int
440 twa_match(struct device *parent, struct cfdata *cfdata, void *aux)
441 {
442 int i;
443 struct pci_attach_args *pa = aux;
444 const struct twa_pci_identity *entry = 0;
445
446 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_3WARE) {
447 for (i = 0; (pci_twa_products[i].product_id); i++) {
448 entry = &pci_twa_products[i];
449 if (entry->product_id == PCI_PRODUCT(pa->pa_id)) {
450 aprint_normal("%s: (rev. 0x%02x)\n",
451 entry->name, PCI_REVISION(pa->pa_class));
452 return (1);
453 }
454 }
455 }
456 return (0);
457 }
458
459
460 static const char *
461 twa_find_msg_string(const struct twa_message *table, u_int16_t code)
462 {
463 int i;
464
465 for (i = 0; table[i].message != NULL; i++)
466 if (table[i].code == code)
467 return(table[i].message);
468
469 return(table[i].message);
470 }
471
472
473 void
474 twa_release_request(struct twa_request *tr)
475 {
476 int s;
477 struct twa_softc *sc;
478
479 sc = tr->tr_sc;
480
481 if ((tr->tr_flags & TWA_CMD_AEN) == 0) {
482 s = splbio();
483 TAILQ_INSERT_TAIL(&tr->tr_sc->twa_free, tr, tr_link);
484 splx(s);
485 if (__predict_false((tr->tr_sc->twa_sc_flags &
486 TWA_STATE_REQUEST_WAIT) != 0)) {
487 tr->tr_sc->twa_sc_flags &= ~TWA_STATE_REQUEST_WAIT;
488 wakeup(&sc->twa_free);
489 }
490 } else
491 tr->tr_flags &= ~TWA_CMD_AEN_BUSY;
492 }
493
494
495 static void
496 twa_unmap_request(struct twa_request *tr)
497 {
498 struct twa_softc *sc = tr->tr_sc;
499 u_int8_t cmd_status;
500
501 /* If the command involved data, unmap that too. */
502 if (tr->tr_data != NULL) {
503 if (tr->tr_cmd_pkt_type & TWA_CMD_PKT_TYPE_9K)
504 cmd_status = tr->tr_command->command.cmd_pkt_9k.status;
505 else
506 cmd_status =
507 tr->tr_command->command.cmd_pkt_7k.generic.status;
508
509 if (tr->tr_flags & TWA_CMD_DATA_OUT) {
510 bus_dmamap_sync(tr->tr_sc->twa_dma_tag, tr->tr_dma_map,
511 0, tr->tr_length, BUS_DMASYNC_POSTREAD);
512 /*
513 * If we are using a bounce buffer, and we are reading
514 * data, copy the real data in.
515 */
516 if (tr->tr_flags & TWA_CMD_DATA_COPY_NEEDED)
517 if (cmd_status == 0)
518 memcpy(tr->tr_real_data, tr->tr_data,
519 tr->tr_real_length);
520 }
521 if (tr->tr_flags & TWA_CMD_DATA_IN)
522 bus_dmamap_sync(tr->tr_sc->twa_dma_tag, tr->tr_dma_map,
523 0, tr->tr_length, BUS_DMASYNC_POSTWRITE);
524
525 bus_dmamap_unload(sc->twa_dma_tag, tr->tr_dma_map);
526 }
527
528 /* Free alignment buffer if it was used. */
529 if (tr->tr_flags & TWA_CMD_DATA_COPY_NEEDED) {
530 uvm_km_free(kmem_map, (vaddr_t)tr->tr_data,
531 tr->tr_length, UVM_KMF_WIRED);
532 tr->tr_data = tr->tr_real_data;
533 tr->tr_length = tr->tr_real_length;
534 }
535 }
536
537
538 /*
539 * Function name: twa_wait_request
540 * Description: Sends down a firmware cmd, and waits for the completion,
541 * but NOT in a tight loop.
542 *
543 * Input: tr -- ptr to request pkt
544 * timeout -- max # of seconds to wait before giving up
545 * Output: None
546 * Return value: 0 -- success
547 * non-zero-- failure
548 */
549 static int
550 twa_wait_request(struct twa_request *tr, u_int32_t timeout)
551 {
552 time_t end_time;
553 struct timeval t1;
554 int s, error;
555
556 tr->tr_flags |= TWA_CMD_SLEEP_ON_REQUEST;
557 tr->tr_callback = twa_request_wait_handler;
558 tr->tr_status = TWA_CMD_BUSY;
559
560 if ((error = twa_map_request(tr)))
561 return (error);
562
563 microtime(&t1);
564 end_time = t1.tv_usec +
565 (timeout * 1000 * 100);
566
567 while (tr->tr_status != TWA_CMD_COMPLETE) {
568 if ((error = tr->tr_error))
569 return(error);
570 if ((error = tsleep(tr, PRIBIO, "twawait", timeout * hz)) == 0)
571 {
572 error = (tr->tr_status != TWA_CMD_COMPLETE);
573 break;
574 }
575 if (error == EWOULDBLOCK) {
576 /*
577 * We will reset the controller only if the request has
578 * already been submitted, so as to not lose the
579 * request packet. If a busy request timed out, the
580 * reset will take care of freeing resources. If a
581 * pending request timed out, we will free resources
582 * for that request, right here. So, the caller is
583 * expected to NOT cleanup when ETIMEDOUT is returned.
584 */
585 if (tr->tr_status != TWA_CMD_PENDING &&
586 tr->tr_status != TWA_CMD_COMPLETE)
587 twa_reset(tr->tr_sc);
588 else {
589 /* Request was never submitted. Clean up. */
590 s = splbio();
591 TAILQ_REMOVE(&tr->tr_sc->twa_pending, tr, tr_link);
592 splx(s);
593
594 twa_unmap_request(tr);
595 if (tr->tr_data)
596 free(tr->tr_data, M_DEVBUF);
597
598 twa_release_request(tr);
599 }
600 return(ETIMEDOUT);
601 }
602 /*
603 * Either the request got completed, or we were woken up by a
604 * signal. Calculate the new timeout, in case it was the latter.
605 */
606 microtime(&t1);
607
608 timeout = (end_time - t1.tv_usec) / (1000 * 100);
609 }
610 twa_unmap_request(tr);
611 return(error);
612 }
613
614
615 /*
616 * Function name: twa_immediate_request
617 * Description: Sends down a firmware cmd, and waits for the completion
618 * in a tight loop.
619 *
620 * Input: tr -- ptr to request pkt
621 * timeout -- max # of seconds to wait before giving up
622 * Output: None
623 * Return value: 0 -- success
624 * non-zero-- failure
625 */
626 static int
627 twa_immediate_request(struct twa_request *tr, u_int32_t timeout)
628 {
629 struct timeval t1;
630 int s = 0, error = 0;
631
632 if ((error = twa_map_request(tr))) {
633 return(error);
634 }
635
636 timeout = (timeout * 10000 * 10);
637
638 microtime(&t1);
639
640 timeout += t1.tv_usec;
641
642 do {
643 if ((error = tr->tr_error))
644 return(error);
645 twa_done(tr->tr_sc);
646 if ((tr->tr_status != TWA_CMD_BUSY) &&
647 (tr->tr_status != TWA_CMD_PENDING)) {
648 twa_unmap_request(tr);
649 return(tr->tr_status != TWA_CMD_COMPLETE);
650 }
651 microtime(&t1);
652 } while (t1.tv_usec <= timeout);
653
654 /*
655 * We will reset the controller only if the request has
656 * already been submitted, so as to not lose the
657 * request packet. If a busy request timed out, the
658 * reset will take care of freeing resources. If a
659 * pending request timed out, we will free resources
660 * for that request, right here. So, the caller is
661 * expected to NOT cleanup when ETIMEDOUT is returned.
662 */
663 if (tr->tr_status != TWA_CMD_PENDING)
664 twa_reset(tr->tr_sc);
665 else {
666 /* Request was never submitted. Clean up. */
667 s = splbio();
668 TAILQ_REMOVE(&tr->tr_sc->twa_pending, tr, tr_link);
669 splx(s);
670 twa_unmap_request(tr);
671 if (tr->tr_data)
672 free(tr->tr_data, M_DEVBUF);
673
674 twa_release_request(tr);
675 }
676 return(ETIMEDOUT);
677 }
678
679
680 static int
681 twa_inquiry(struct twa_request *tr, int lunid)
682 {
683 int error;
684 struct twa_command_9k *tr_9k_cmd;
685
686 if (tr->tr_data == NULL)
687 return (ENOMEM);
688
689 memset(tr->tr_data, 0, TWA_SECTOR_SIZE);
690
691 tr->tr_length = TWA_SECTOR_SIZE;
692 tr->tr_cmd_pkt_type = TWA_CMD_PKT_TYPE_9K;
693 tr->tr_flags |= TWA_CMD_DATA_IN;
694
695 tr_9k_cmd = &tr->tr_command->command.cmd_pkt_9k;
696
697 tr_9k_cmd->command.opcode = TWA_OP_EXECUTE_SCSI_COMMAND;
698 tr_9k_cmd->unit = lunid;
699 tr_9k_cmd->request_id = tr->tr_request_id;
700 tr_9k_cmd->status = 0;
701 tr_9k_cmd->sgl_offset = 16;
702 tr_9k_cmd->sgl_entries = 1;
703 /* create the CDB here */
704 tr_9k_cmd->cdb[0] = INQUIRY;
705 tr_9k_cmd->cdb[1] = ((lunid << 5) & 0x0e);
706 tr_9k_cmd->cdb[4] = 255;
707
708 /* XXXX setup page data no lun device
709 * it seems 9000 series does not indicate
710 * NOTPRESENT - need more investigation
711 */
712 ((struct scsipi_inquiry_data *)tr->tr_data)->device =
713 SID_QUAL_LU_NOTPRESENT;
714
715 error = twa_immediate_request(tr, TWA_REQUEST_TIMEOUT_PERIOD);
716
717 if (((struct scsipi_inquiry_data *)tr->tr_data)->device ==
718 SID_QUAL_LU_NOTPRESENT)
719 error = 1;
720
721 return (error);
722 }
723
724 static int
725 twa_print_inquiry_data(struct twa_softc *sc,
726 struct scsipi_inquiry_data *scsipi)
727 {
728 printf("%s: %s\n", sc->twa_dv.dv_xname, scsipi->vendor);
729
730 return (1);
731 }
732
733
734 static uint64_t
735 twa_read_capacity(struct twa_request *tr, int lunid)
736 {
737 int error;
738 struct twa_command_9k *tr_9k_cmd;
739 uint64_t array_size = 0LL;
740
741 if (tr->tr_data == NULL)
742 return (ENOMEM);
743
744 memset(tr->tr_data, 0, TWA_SECTOR_SIZE);
745
746 tr->tr_length = TWA_SECTOR_SIZE;
747 tr->tr_cmd_pkt_type = TWA_CMD_PKT_TYPE_9K;
748 tr->tr_flags |= TWA_CMD_DATA_OUT;
749
750 tr_9k_cmd = &tr->tr_command->command.cmd_pkt_9k;
751
752 tr_9k_cmd->command.opcode = TWA_OP_EXECUTE_SCSI_COMMAND;
753 tr_9k_cmd->unit = lunid;
754 tr_9k_cmd->request_id = tr->tr_request_id;
755 tr_9k_cmd->status = 0;
756 tr_9k_cmd->sgl_offset = 16;
757 tr_9k_cmd->sgl_entries = 1;
758 /* create the CDB here */
759 tr_9k_cmd->cdb[0] = READ_CAPACITY_16;
760 tr_9k_cmd->cdb[1] = ((lunid << 5) & 0x0e) | SRC16_SERVICE_ACTION;
761
762 error = twa_immediate_request(tr, TWA_REQUEST_TIMEOUT_PERIOD);
763 #if BYTE_ORDER == BIG_ENDIAN
764 array_size = bswap64(_8btol(((struct scsipi_read_capacity_16_data *)
765 tr->tr_data)->addr) + 1);
766 #else
767 array_size = _8btol(((struct scsipi_read_capacity_16_data *)
768 tr->tr_data)->addr) + 1;
769 #endif
770 return (array_size);
771 }
772
773 static int
774 twa_request_sense(struct twa_request *tr, int lunid)
775 {
776 int error = 1;
777 struct twa_command_9k *tr_9k_cmd;
778
779 if (tr->tr_data == NULL)
780 return (error);
781
782 memset(tr->tr_data, 0, TWA_SECTOR_SIZE);
783
784 tr->tr_length = TWA_SECTOR_SIZE;
785 tr->tr_cmd_pkt_type = TWA_CMD_PKT_TYPE_9K;
786 tr->tr_flags |= TWA_CMD_DATA_OUT;
787
788 tr_9k_cmd = &tr->tr_command->command.cmd_pkt_9k;
789
790 tr_9k_cmd->command.opcode = TWA_OP_EXECUTE_SCSI_COMMAND;
791 tr_9k_cmd->unit = lunid;
792 tr_9k_cmd->request_id = tr->tr_request_id;
793 tr_9k_cmd->status = 0;
794 tr_9k_cmd->sgl_offset = 16;
795 tr_9k_cmd->sgl_entries = 1;
796 /* create the CDB here */
797 tr_9k_cmd->cdb[0] = SCSI_REQUEST_SENSE;
798 tr_9k_cmd->cdb[1] = ((lunid << 5) & 0x0e);
799 tr_9k_cmd->cdb[4] = 255;
800
801 /*XXX AEN notification called in interrupt context
802 * so just queue the request. Return as quickly
803 * as possible from interrupt
804 */
805 if ((tr->tr_flags & TWA_CMD_AEN) != 0)
806 error = twa_map_request(tr);
807 else
808 error = twa_immediate_request(tr, TWA_REQUEST_TIMEOUT_PERIOD);
809
810 return (error);
811 }
812
813
814
815 static int
816 twa_alloc_req_pkts(struct twa_softc *sc, int num_reqs)
817 {
818 struct twa_request *tr;
819 struct twa_command_packet *tc;
820 bus_dma_segment_t seg;
821 size_t max_segs, max_xfer;
822 int i, rv, rseg, size;
823
824 if ((sc->twa_req_buf = malloc(num_reqs * sizeof(struct twa_request),
825 M_DEVBUF, M_NOWAIT)) == NULL)
826 return(ENOMEM);
827
828 size = num_reqs * sizeof(struct twa_command_packet);
829
830 /* Allocate memory for cmd pkts. */
831 if ((rv = bus_dmamem_alloc(sc->twa_dma_tag,
832 size, PAGE_SIZE, 0, &seg,
833 1, &rseg, BUS_DMA_NOWAIT)) != 0){
834 aprint_error("%s: unable to allocate "
835 "command packets, rv = %d\n",
836 sc->twa_dv.dv_xname, rv);
837 return (ENOMEM);
838 }
839
840 if ((rv = bus_dmamem_map(sc->twa_dma_tag,
841 &seg, rseg, size, (caddr_t *)&sc->twa_cmds,
842 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
843 aprint_error("%s: unable to map commands, rv = %d\n",
844 sc->twa_dv.dv_xname, rv);
845 return (1);
846 }
847
848 if ((rv = bus_dmamap_create(sc->twa_dma_tag,
849 size, num_reqs, size,
850 0, BUS_DMA_NOWAIT, &sc->twa_cmd_map)) != 0) {
851 aprint_error("%s: unable to create command DMA map, "
852 "rv = %d\n", sc->twa_dv.dv_xname, rv);
853 return (ENOMEM);
854 }
855
856 if ((rv = bus_dmamap_load(sc->twa_dma_tag, sc->twa_cmd_map,
857 sc->twa_cmds, size, NULL,
858 BUS_DMA_NOWAIT)) != 0) {
859 aprint_error("%s: unable to load command DMA map, "
860 "rv = %d\n", sc->twa_dv.dv_xname, rv);
861 return (1);
862 }
863
864 if ((uintptr_t)sc->twa_cmds % TWA_ALIGNMENT) {
865 aprint_error("%s: DMA map memory not aligned on %d boundary\n",
866 sc->twa_dv.dv_xname, TWA_ALIGNMENT);
867
868 return (1);
869 }
870 tc = sc->twa_cmd_pkt_buf = (struct twa_command_packet *)sc->twa_cmds;
871 sc->twa_cmd_pkt_phys = sc->twa_cmd_map->dm_segs[0].ds_addr;
872
873 memset(sc->twa_req_buf, 0, num_reqs * sizeof(struct twa_request));
874 memset(sc->twa_cmd_pkt_buf, 0,
875 num_reqs * sizeof(struct twa_command_packet));
876
877 sc->sc_twa_request = sc->twa_req_buf;
878 max_segs = twa_get_maxsegs();
879 max_xfer = twa_get_maxxfer(max_segs);
880
881 for (i = 0; i < num_reqs; i++, tc++) {
882 tr = &(sc->twa_req_buf[i]);
883 tr->tr_command = tc;
884 tr->tr_cmd_phys = sc->twa_cmd_pkt_phys +
885 (i * sizeof(struct twa_command_packet));
886 tr->tr_request_id = i;
887 tr->tr_sc = sc;
888
889 /*
890 * Create a map for data buffers. maxsize (256 * 1024) used in
891 * bus_dma_tag_create above should suffice the bounce page needs
892 * for data buffers, since the max I/O size we support is 128KB.
893 * If we supported I/O's bigger than 256KB, we would have to
894 * create a second dma_tag, with the appropriate maxsize.
895 */
896 if ((rv = bus_dmamap_create(sc->twa_dma_tag,
897 max_xfer, max_segs, 1, 0, BUS_DMA_NOWAIT,
898 &tr->tr_dma_map)) != 0) {
899 aprint_error("%s: unable to create command "
900 "DMA map, rv = %d\n",
901 sc->twa_dv.dv_xname, rv);
902 return (ENOMEM);
903 }
904 /* Insert request into the free queue. */
905 if (i != 0) {
906 sc->twa_lookup[i] = tr;
907 twa_release_request(tr);
908 } else
909 tr->tr_flags |= TWA_CMD_AEN;
910 }
911 return(0);
912 }
913
914
915 static void
916 twa_recompute_openings(struct twa_softc *sc)
917 {
918 struct twa_drive *td;
919 int unit;
920 int openings;
921
922 if (sc->sc_nunits != 0)
923 openings = ((TWA_Q_LENGTH / 2) / sc->sc_nunits);
924 else
925 openings = 0;
926 if (openings == sc->sc_openings)
927 return;
928 sc->sc_openings = openings;
929
930 #ifdef TWA_DEBUG
931 printf("%s: %d array%s, %d openings per array\n",
932 sc->sc_twa.dv_xname, sc->sc_nunits,
933 sc->sc_nunits == 1 ? "" : "s", sc->sc_openings);
934 #endif
935 for (unit = 0; unit < TWA_MAX_UNITS; unit++) {
936 td = &sc->sc_units[unit];
937 if (td->td_dev != NULL)
938 (*td->td_callbacks->tcb_openings)(td->td_dev,
939 sc->sc_openings);
940 }
941 }
942
943
944 static int
945 twa_request_bus_scan(struct twa_softc *sc)
946 {
947 struct twa_drive *td;
948 struct twa_request *tr;
949 struct twa_attach_args twaa;
950 int locs[TWACF_NLOCS];
951 int s, unit;
952
953 s = splbio();
954 for (unit = 0; unit < TWA_MAX_UNITS; unit++) {
955
956 if ((tr = twa_get_request(sc, 0)) == NULL) {
957 splx(s);
958 return (EIO);
959 }
960
961 tr->tr_cmd_pkt_type |= TWA_CMD_PKT_TYPE_INTERNAL;
962
963 tr->tr_data = malloc(TWA_SECTOR_SIZE, M_DEVBUF, M_NOWAIT);
964
965 if (tr->tr_data == NULL) {
966 twa_release_request(tr);
967 splx(s);
968 return (ENOMEM);
969 }
970 td = &sc->sc_units[unit];
971
972 if (twa_inquiry(tr, unit) == 0) {
973
974 if (td->td_dev == NULL) {
975 twa_print_inquiry_data(sc,
976 ((struct scsipi_inquiry_data *)tr->tr_data));
977
978 sc->sc_nunits++;
979
980 sc->sc_units[unit].td_size =
981 twa_read_capacity(tr, unit);
982
983 twaa.twaa_unit = unit;
984
985 twa_recompute_openings(sc);
986
987 locs[TWACF_UNIT] = unit;
988
989 sc->sc_units[unit].td_dev =
990 config_found_sm_loc(&sc->twa_dv, "twa", locs,
991 &twaa, twa_print, config_stdsubmatch);
992 }
993 } else {
994 if (td->td_dev != NULL) {
995
996 sc->sc_nunits--;
997
998 (void) config_detach(td->td_dev, DETACH_FORCE);
999 td->td_dev = NULL;
1000 td->td_size = 0;
1001
1002 twa_recompute_openings(sc);
1003 }
1004 }
1005 free(tr->tr_data, M_DEVBUF);
1006
1007 twa_release_request(tr);
1008 }
1009 splx(s);
1010
1011 return (0);
1012 }
1013
1014
1015 static int
1016 twa_start(struct twa_request *tr)
1017 {
1018 struct twa_softc *sc = tr->tr_sc;
1019 u_int32_t status_reg;
1020 int s;
1021 int error;
1022
1023 s = splbio();
1024 /* Check to see if we can post a command. */
1025 status_reg = twa_inl(sc, TWA_STATUS_REGISTER_OFFSET);
1026 if ((error = twa_check_ctlr_state(sc, status_reg)))
1027 goto out;
1028
1029 if (status_reg & TWA_STATUS_COMMAND_QUEUE_FULL) {
1030 if (tr->tr_status != TWA_CMD_PENDING) {
1031 tr->tr_status = TWA_CMD_PENDING;
1032 TAILQ_INSERT_TAIL(&tr->tr_sc->twa_pending,
1033 tr, tr_link);
1034 }
1035 twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
1036 TWA_CONTROL_UNMASK_COMMAND_INTERRUPT);
1037 error = EBUSY;
1038 } else {
1039 bus_dmamap_sync(sc->twa_dma_tag, sc->twa_cmd_map,
1040 (caddr_t)tr->tr_command - sc->twa_cmds,
1041 sizeof(struct twa_command_packet),
1042 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1043
1044 /* Cmd queue is not full. Post the command. */
1045 TWA_WRITE_COMMAND_QUEUE(sc, tr->tr_cmd_phys +
1046 sizeof(struct twa_command_header));
1047
1048 /* Mark the request as currently being processed. */
1049 tr->tr_status = TWA_CMD_BUSY;
1050 /* Move the request into the busy queue. */
1051 TAILQ_INSERT_TAIL(&tr->tr_sc->twa_busy, tr, tr_link);
1052 }
1053 out:
1054 splx(s);
1055 return(error);
1056 }
1057
1058
1059 static int
1060 twa_drain_response_queue(struct twa_softc *sc)
1061 {
1062 union twa_response_queue rq;
1063 u_int32_t status_reg;
1064
1065 for (;;) {
1066 status_reg = twa_inl(sc, TWA_STATUS_REGISTER_OFFSET);
1067 if (twa_check_ctlr_state(sc, status_reg))
1068 return(1);
1069 if (status_reg & TWA_STATUS_RESPONSE_QUEUE_EMPTY)
1070 return(0); /* no more response queue entries */
1071 rq = (union twa_response_queue)twa_inl(sc, TWA_RESPONSE_QUEUE_OFFSET);
1072 }
1073 }
1074
1075
1076 static void
1077 twa_drain_busy_queue(struct twa_softc *sc)
1078 {
1079 struct twa_request *tr;
1080
1081 /* Walk the busy queue. */
1082
1083 while ((tr = TAILQ_FIRST(&sc->twa_busy)) != NULL) {
1084 TAILQ_REMOVE(&sc->twa_busy, tr, tr_link);
1085
1086 twa_unmap_request(tr);
1087 if ((tr->tr_cmd_pkt_type & TWA_CMD_PKT_TYPE_INTERNAL) ||
1088 (tr->tr_cmd_pkt_type & TWA_CMD_PKT_TYPE_IOCTL)) {
1089 /* It's an internal/ioctl request. Simply free it. */
1090 if (tr->tr_data)
1091 free(tr->tr_data, M_DEVBUF);
1092 twa_release_request(tr);
1093 } else {
1094 /* It's a SCSI request. Complete it. */
1095 tr->tr_command->command.cmd_pkt_9k.status = EIO;
1096 if (tr->tr_callback)
1097 tr->tr_callback(tr);
1098 }
1099 }
1100 }
1101
1102
1103 static int
1104 twa_drain_pending_queue(struct twa_softc *sc)
1105 {
1106 struct twa_request *tr;
1107 int s, error = 0;
1108
1109 /*
1110 * Pull requests off the pending queue, and submit them.
1111 */
1112 s = splbio();
1113 while ((tr = TAILQ_FIRST(&sc->twa_pending)) != NULL) {
1114 TAILQ_REMOVE(&sc->twa_pending, tr, tr_link);
1115
1116 if ((error = twa_start(tr))) {
1117 if (error == EBUSY) {
1118 tr->tr_status = TWA_CMD_PENDING;
1119
1120 /* queue at the head */
1121 TAILQ_INSERT_HEAD(&tr->tr_sc->twa_pending,
1122 tr, tr_link);
1123 error = 0;
1124 break;
1125 } else {
1126 if (tr->tr_flags & TWA_CMD_SLEEP_ON_REQUEST) {
1127 tr->tr_error = error;
1128 tr->tr_callback(tr);
1129 error = EIO;
1130 }
1131 }
1132 }
1133 }
1134 splx(s);
1135
1136 return(error);
1137 }
1138
1139
1140 static int
1141 twa_drain_aen_queue(struct twa_softc *sc)
1142 {
1143 int error = 0;
1144 struct twa_request *tr;
1145 struct twa_command_header *cmd_hdr;
1146 struct timeval t1;
1147 u_int32_t timeout;
1148
1149 for (;;) {
1150 if ((tr = twa_get_request(sc, 0)) == NULL) {
1151 error = EIO;
1152 break;
1153 }
1154 tr->tr_cmd_pkt_type |= TWA_CMD_PKT_TYPE_INTERNAL;
1155 tr->tr_callback = NULL;
1156
1157 tr->tr_data = malloc(TWA_SECTOR_SIZE, M_DEVBUF, M_NOWAIT);
1158
1159 if (tr->tr_data == NULL) {
1160 error = 1;
1161 goto out;
1162 }
1163
1164 if (twa_request_sense(tr, 0) != 0) {
1165 error = 1;
1166 break;
1167 }
1168
1169 timeout = (1000/*ms*/ * 100/*us*/ * TWA_REQUEST_TIMEOUT_PERIOD);
1170
1171 microtime(&t1);
1172
1173 timeout += t1.tv_usec;
1174
1175 do {
1176 twa_done(tr->tr_sc);
1177 if (tr->tr_status != TWA_CMD_BUSY)
1178 break;
1179 microtime(&t1);
1180 } while (t1.tv_usec <= timeout);
1181
1182 if (tr->tr_status != TWA_CMD_COMPLETE) {
1183 error = ETIMEDOUT;
1184 break;
1185 }
1186
1187 if ((error = tr->tr_command->command.cmd_pkt_9k.status))
1188 break;
1189
1190 cmd_hdr = (struct twa_command_header *)(tr->tr_data);
1191 if ((cmd_hdr->status_block.error) /* aen_code */
1192 == TWA_AEN_QUEUE_EMPTY)
1193 break;
1194 (void)twa_enqueue_aen(sc, cmd_hdr);
1195
1196 free(tr->tr_data, M_DEVBUF);
1197 twa_release_request(tr);
1198 }
1199 out:
1200 if (tr) {
1201 if (tr->tr_data)
1202 free(tr->tr_data, M_DEVBUF);
1203
1204 twa_release_request(tr);
1205 }
1206 return(error);
1207 }
1208
1209
1210 static int
1211 twa_done(struct twa_softc *sc)
1212 {
1213 union twa_response_queue rq;
1214 struct twa_request *tr;
1215 int s, error = 0;
1216 u_int32_t status_reg;
1217
1218 for (;;) {
1219 status_reg = twa_inl(sc, TWA_STATUS_REGISTER_OFFSET);
1220 if ((error = twa_check_ctlr_state(sc, status_reg)))
1221 break;
1222 if (status_reg & TWA_STATUS_RESPONSE_QUEUE_EMPTY)
1223 break;
1224 /* Response queue is not empty. */
1225 rq = (union twa_response_queue)twa_inl(sc,
1226 TWA_RESPONSE_QUEUE_OFFSET);
1227 tr = sc->sc_twa_request + rq.u.response_id;
1228
1229 /* Unmap the command packet, and any associated data buffer. */
1230 twa_unmap_request(tr);
1231
1232 s = splbio();
1233 tr->tr_status = TWA_CMD_COMPLETE;
1234 TAILQ_REMOVE(&tr->tr_sc->twa_busy, tr, tr_link);
1235 splx(s);
1236
1237 if (tr->tr_callback)
1238 tr->tr_callback(tr);
1239 }
1240 (void)twa_drain_pending_queue(sc);
1241
1242 return(error);
1243 }
1244
1245 /*
1246 * Function name: twa_init_ctlr
1247 * Description: Establishes a logical connection with the controller.
1248 * If bundled with firmware, determines whether or not
1249 * to flash firmware, based on arch_id, fw SRL (Spec.
1250 * Revision Level), branch & build #'s. Also determines
1251 * whether or not the driver is compatible with the
1252 * firmware on the controller, before proceeding to work
1253 * with it.
1254 *
1255 * Input: sc -- ptr to per ctlr structure
1256 * Output: None
1257 * Return value: 0 -- success
1258 * non-zero-- failure
1259 */
1260 static int
1261 twa_init_ctlr(struct twa_softc *sc)
1262 {
1263 u_int16_t fw_on_ctlr_srl = 0;
1264 u_int16_t fw_on_ctlr_arch_id = 0;
1265 u_int16_t fw_on_ctlr_branch = 0;
1266 u_int16_t fw_on_ctlr_build = 0;
1267 u_int32_t init_connect_result = 0;
1268 int error = 0;
1269 #if 0
1270 int8_t fw_flashed = FALSE;
1271 int8_t fw_flash_failed = FALSE;
1272 #endif
1273
1274 /* Wait for the controller to become ready. */
1275 if (twa_wait_status(sc, TWA_STATUS_MICROCONTROLLER_READY,
1276 TWA_REQUEST_TIMEOUT_PERIOD)) {
1277 return(ENXIO);
1278 }
1279 /* Drain the response queue. */
1280 if (twa_drain_response_queue(sc))
1281 return(1);
1282
1283 /* Establish a logical connection with the controller. */
1284 if ((error = twa_init_connection(sc, TWA_INIT_MESSAGE_CREDITS,
1285 TWA_EXTENDED_INIT_CONNECT, TWA_CURRENT_FW_SRL,
1286 TWA_9000_ARCH_ID, TWA_CURRENT_FW_BRANCH,
1287 TWA_CURRENT_FW_BUILD, &fw_on_ctlr_srl,
1288 &fw_on_ctlr_arch_id, &fw_on_ctlr_branch,
1289 &fw_on_ctlr_build, &init_connect_result))) {
1290 return(error);
1291 }
1292 #if 0
1293 if ((init_connect_result & TWA_BUNDLED_FW_SAFE_TO_FLASH) &&
1294 (init_connect_result & TWA_CTLR_FW_RECOMMENDS_FLASH)) {
1295 /*
1296 * The bundled firmware is safe to flash, and the firmware
1297 * on the controller recommends a flash. So, flash!
1298 */
1299 printf("%s: flashing bundled firmware...\n", sc->twa_dv.dv_xname);
1300
1301 if ((error = twa_flash_firmware(sc))) {
1302 fw_flash_failed = TRUE;
1303
1304 printf("%s: unable to flash bundled firmware.\n", sc->twa_dv.dv_xname);
1305 } else {
1306 printf("%s: successfully flashed bundled firmware.\n",
1307 sc->twa_dv.dv_xname);
1308 fw_flashed = TRUE;
1309 }
1310 }
1311 if (fw_flashed) {
1312 /* The firmware was flashed. Have the new image loaded */
1313 error = twa_hard_reset(sc);
1314 if (error == 0)
1315 error = twa_init_ctlr(sc);
1316 /*
1317 * If hard reset of controller failed, we need to return.
1318 * Otherwise, the above recursive call to twa_init_ctlr will
1319 * have completed the rest of the initialization (starting
1320 * from twa_drain_aen_queue below). Don't do it again.
1321 * Just return.
1322 */
1323 return(error);
1324 } else {
1325 /*
1326 * Either we are not bundled with a firmware image, or
1327 * the bundled firmware is not safe to flash,
1328 * or flash failed for some reason. See if we can at
1329 * least work with the firmware on the controller in the
1330 * current mode.
1331 */
1332 if (init_connect_result & TWA_CTLR_FW_COMPATIBLE) {
1333 /* Yes, we can. Make note of the operating mode. */
1334 sc->working_srl = TWA_CURRENT_FW_SRL;
1335 sc->working_branch = TWA_CURRENT_FW_BRANCH;
1336 sc->working_build = TWA_CURRENT_FW_BUILD;
1337 } else {
1338 /*
1339 * No, we can't. See if we can at least work with
1340 * it in the base mode. We should never come here
1341 * if firmware has just been flashed.
1342 */
1343 printf("%s: Driver/Firmware mismatch. Negotiating for base level.\n",
1344 sc->twa_dv.dv_xname);
1345 if ((error = twa_init_connection(sc, TWA_INIT_MESSAGE_CREDITS,
1346 TWA_EXTENDED_INIT_CONNECT, TWA_BASE_FW_SRL,
1347 TWA_9000_ARCH_ID, TWA_BASE_FW_BRANCH,
1348 TWA_BASE_FW_BUILD, &fw_on_ctlr_srl,
1349 &fw_on_ctlr_arch_id, &fw_on_ctlr_branch,
1350 &fw_on_ctlr_build, &init_connect_result))) {
1351 printf("%s: can't initialize connection in base mode.\n",
1352 sc->twa_dv.dv_xname);
1353 return(error);
1354 }
1355 if (!(init_connect_result & TWA_CTLR_FW_COMPATIBLE)) {
1356 /*
1357 * The firmware on the controller is not even
1358 * compatible with our base mode. We cannot
1359 * work with it. Bail...
1360 */
1361 printf("Incompatible firmware on controller\n");
1362 #ifdef TWA_FLASH_FIRMWARE
1363 if (fw_flash_failed)
1364 printf("...and could not flash bundled firmware.\n");
1365 else
1366 printf("...and bundled firmware not safe to flash.\n");
1367 #endif /* TWA_FLASH_FIRMWARE */
1368 return(1);
1369 }
1370 /* We can work with this firmware, but only in base mode. */
1371 sc->working_srl = TWA_BASE_FW_SRL;
1372 sc->working_branch = TWA_BASE_FW_BRANCH;
1373 sc->working_build = TWA_BASE_FW_BUILD;
1374 sc->twa_operating_mode = TWA_BASE_MODE;
1375 }
1376 }
1377 #endif
1378 twa_drain_aen_queue(sc);
1379
1380 /* Set controller state to initialized. */
1381 sc->twa_state &= ~TWA_STATE_SHUTDOWN;
1382 return(0);
1383 }
1384
1385
1386 static int
1387 twa_setup(struct twa_softc *sc)
1388 {
1389 struct tw_cl_event_packet *aen_queue;
1390 uint32_t i = 0;
1391 int error = 0;
1392
1393 /* Initialize request queues. */
1394 TAILQ_INIT(&sc->twa_free);
1395 TAILQ_INIT(&sc->twa_busy);
1396 TAILQ_INIT(&sc->twa_pending);
1397
1398 sc->sc_nunits = 0;
1399 sc->twa_sc_flags = 0;
1400
1401 if (twa_alloc_req_pkts(sc, TWA_Q_LENGTH)) {
1402
1403 return(ENOMEM);
1404 }
1405
1406 /* Allocate memory for the AEN queue. */
1407 if ((aen_queue = malloc(sizeof(struct tw_cl_event_packet) * TWA_Q_LENGTH,
1408 M_DEVBUF, M_WAITOK)) == NULL) {
1409 /*
1410 * This should not cause us to return error. We will only be
1411 * unable to support AEN's. But then, we will have to check
1412 * time and again to see if we can support AEN's, if we
1413 * continue. So, we will just return error.
1414 */
1415 return (ENOMEM);
1416 }
1417 /* Initialize the aen queue. */
1418 memset(aen_queue, 0, sizeof(struct tw_cl_event_packet) * TWA_Q_LENGTH);
1419
1420 for (i = 0; i < TWA_Q_LENGTH; i++)
1421 sc->twa_aen_queue[i] = &(aen_queue[i]);
1422
1423 twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
1424 TWA_CONTROL_DISABLE_INTERRUPTS);
1425
1426 /* Initialize the controller. */
1427 if ((error = twa_init_ctlr(sc))) {
1428 /* Soft reset the controller, and try one more time. */
1429
1430 printf("%s: controller initialization failed. Retrying initialization\n",
1431 sc->twa_dv.dv_xname);
1432
1433 if ((error = twa_soft_reset(sc)) == 0)
1434 error = twa_init_ctlr(sc);
1435 }
1436
1437 twa_describe_controller(sc);
1438
1439 error = twa_request_bus_scan(sc);
1440
1441 twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
1442 TWA_CONTROL_CLEAR_ATTENTION_INTERRUPT |
1443 TWA_CONTROL_UNMASK_RESPONSE_INTERRUPT |
1444 TWA_CONTROL_ENABLE_INTERRUPTS);
1445
1446 return (error);
1447 }
1448
1449 void *twa_sdh;
1450
1451 static void
1452 twa_attach(struct device *parent, struct device *self, void *aux)
1453 {
1454 struct pci_attach_args *pa;
1455 struct twa_softc *sc;
1456 pci_chipset_tag_t pc;
1457 pcireg_t csr;
1458 pci_intr_handle_t ih;
1459 const char *intrstr;
1460
1461 sc = (struct twa_softc *)self;
1462
1463 pa = aux;
1464 pc = pa->pa_pc;
1465 sc->pc = pa->pa_pc;
1466 sc->tag = pa->pa_tag;
1467 sc->twa_dma_tag = pa->pa_dmat;
1468
1469 aprint_naive(": RAID controller\n");
1470 aprint_normal(": 3ware Apache\n");
1471
1472 if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_3WARE_9000) {
1473 if (pci_mapreg_map(pa, PCI_MAPREG_START, PCI_MAPREG_TYPE_IO, 0,
1474 &sc->twa_bus_iot, &sc->twa_bus_ioh, NULL, NULL)) {
1475 aprint_error("%s: can't map i/o space\n",
1476 sc->twa_dv.dv_xname);
1477 return;
1478 }
1479 } else if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_3WARE_9550) {
1480 if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x08,
1481 PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->twa_bus_iot,
1482 &sc->twa_bus_ioh, NULL, NULL)) {
1483 aprint_error("%s: can't map mem space\n",
1484 sc->twa_dv.dv_xname);
1485 return;
1486 }
1487 } else {
1488 aprint_error("%s: product id 0x%02x not recognized\n",
1489 sc->twa_dv.dv_xname, PCI_PRODUCT(pa->pa_id));
1490 return;
1491 }
1492 /* Enable the device. */
1493 csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1494
1495 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
1496 csr | PCI_COMMAND_MASTER_ENABLE);
1497
1498 /* Map and establish the interrupt. */
1499 if (pci_intr_map(pa, &ih)) {
1500 aprint_error("%s: can't map interrupt\n", sc->twa_dv.dv_xname);
1501 return;
1502 }
1503 intrstr = pci_intr_string(pc, ih);
1504
1505 sc->twa_ih = pci_intr_establish(pc, ih, IPL_BIO, twa_intr, sc);
1506 if (sc->twa_ih == NULL) {
1507 aprint_error("%s: can't establish interrupt%s%s\n",
1508 sc->twa_dv.dv_xname,
1509 (intrstr) ? " at " : "",
1510 (intrstr) ? intrstr : "");
1511 return;
1512 }
1513
1514 if (intrstr != NULL)
1515 aprint_normal("%s: interrupting at %s\n",
1516 sc->twa_dv.dv_xname, intrstr);
1517
1518 twa_setup(sc);
1519
1520 if (twa_sdh == NULL)
1521 twa_sdh = shutdownhook_establish(twa_shutdown, NULL);
1522
1523 return;
1524 }
1525
1526
1527 static void
1528 twa_shutdown(void *arg)
1529 {
1530 extern struct cfdriver twa_cd;
1531 struct twa_softc *sc;
1532 int i, rv, unit;
1533
1534 for (i = 0; i < twa_cd.cd_ndevs; i++) {
1535 if ((sc = device_lookup(&twa_cd, i)) == NULL)
1536 continue;
1537
1538 for (unit = 0; unit < TWA_MAX_UNITS; unit++)
1539 if (sc->sc_units[unit].td_dev != NULL)
1540 (void) config_detach(sc->sc_units[unit].td_dev,
1541 DETACH_FORCE | DETACH_QUIET);
1542
1543 twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
1544 TWA_CONTROL_DISABLE_INTERRUPTS);
1545
1546 /* Let the controller know that we are going down. */
1547 rv = twa_init_connection(sc, TWA_SHUTDOWN_MESSAGE_CREDITS,
1548 0, 0, 0, 0, 0,
1549 NULL, NULL, NULL, NULL, NULL);
1550 }
1551 }
1552
1553
1554 void
1555 twa_register_callbacks(struct twa_softc *sc, int unit,
1556 const struct twa_callbacks *tcb)
1557 {
1558
1559 sc->sc_units[unit].td_callbacks = tcb;
1560 }
1561
1562
1563 /*
1564 * Print autoconfiguration message for a sub-device
1565 */
1566 static int
1567 twa_print(void *aux, const char *pnp)
1568 {
1569 struct twa_attach_args *twaa;
1570
1571 twaa = aux;
1572
1573 if (pnp !=NULL)
1574 aprint_normal("block device at %s\n", pnp);
1575 aprint_normal(" unit %d\n", twaa->twaa_unit);
1576 return (UNCONF);
1577 }
1578
1579
1580 static void
1581 twa_fillin_sgl(struct twa_sg *sgl, bus_dma_segment_t *segs, int nsegments)
1582 {
1583 int i;
1584 for (i = 0; i < nsegments; i++) {
1585 sgl[i].address = segs[i].ds_addr;
1586 sgl[i].length = (u_int32_t)(segs[i].ds_len);
1587 }
1588 }
1589
1590
1591 static int
1592 twa_submit_io(struct twa_request *tr)
1593 {
1594 int error;
1595
1596 if ((error = twa_start(tr))) {
1597 if (error == EBUSY)
1598 error = 0; /* request is in the pending queue */
1599 else {
1600 tr->tr_error = error;
1601 }
1602 }
1603 return(error);
1604 }
1605
1606
1607 /*
1608 * Function name: twa_setup_data_dmamap
1609 * Description: Callback of bus_dmamap_load for the buffer associated
1610 * with data. Updates the cmd pkt (size/sgl_entries
1611 * fields, as applicable) to reflect the number of sg
1612 * elements.
1613 *
1614 * Input: arg -- ptr to request pkt
1615 * segs -- ptr to a list of segment descriptors
1616 * nsegments--# of segments
1617 * error -- 0 if no errors encountered before callback,
1618 * non-zero if errors were encountered
1619 * Output: None
1620 * Return value: None
1621 */
1622 static int
1623 twa_setup_data_dmamap(void *arg, bus_dma_segment_t *segs,
1624 int nsegments, int error)
1625 {
1626 struct twa_request *tr = (struct twa_request *)arg;
1627 struct twa_command_packet *cmdpkt = tr->tr_command;
1628 struct twa_command_9k *cmd9k;
1629 union twa_command_7k *cmd7k;
1630 u_int8_t sgl_offset;
1631
1632 if (error == EFBIG) {
1633 tr->tr_error = error;
1634 goto out;
1635 }
1636
1637 if (tr->tr_cmd_pkt_type & TWA_CMD_PKT_TYPE_9K) {
1638 cmd9k = &(cmdpkt->command.cmd_pkt_9k);
1639 twa_fillin_sgl(&(cmd9k->sg_list[0]), segs, nsegments);
1640 cmd9k->sgl_entries += nsegments - 1;
1641 } else {
1642 /* It's a 7000 command packet. */
1643 cmd7k = &(cmdpkt->command.cmd_pkt_7k);
1644 if ((sgl_offset = cmdpkt->command.cmd_pkt_7k.generic.sgl_offset))
1645 twa_fillin_sgl((struct twa_sg *)
1646 (((u_int32_t *)cmd7k) + sgl_offset),
1647 segs, nsegments);
1648 /* Modify the size field, based on sg address size. */
1649 cmd7k->generic.size +=
1650 ((TWA_64BIT_ADDRESSES ? 3 : 2) * nsegments);
1651 }
1652
1653 if (tr->tr_flags & TWA_CMD_DATA_IN)
1654 bus_dmamap_sync(tr->tr_sc->twa_dma_tag, tr->tr_dma_map, 0,
1655 tr->tr_length, BUS_DMASYNC_PREREAD);
1656 if (tr->tr_flags & TWA_CMD_DATA_OUT) {
1657 /*
1658 * If we're using an alignment buffer, and we're
1659 * writing data, copy the real data out.
1660 */
1661 if (tr->tr_flags & TWA_CMD_DATA_COPY_NEEDED)
1662 memcpy(tr->tr_data, tr->tr_real_data,
1663 tr->tr_real_length);
1664 bus_dmamap_sync(tr->tr_sc->twa_dma_tag, tr->tr_dma_map, 0,
1665 tr->tr_length, BUS_DMASYNC_PREWRITE);
1666 }
1667 error = twa_submit_io(tr);
1668
1669 out:
1670 if (error) {
1671 twa_unmap_request(tr);
1672 /*
1673 * If the caller had been returned EINPROGRESS, and he has
1674 * registered a callback for handling completion, the callback
1675 * will never get called because we were unable to submit the
1676 * request. So, free up the request right here.
1677 */
1678 if ((tr->tr_flags & TWA_CMD_IN_PROGRESS) && (tr->tr_callback))
1679 twa_release_request(tr);
1680 }
1681 return (error);
1682 }
1683
1684
1685 /*
1686 * Function name: twa_map_request
1687 * Description: Maps a cmd pkt and data associated with it, into
1688 * DMA'able memory.
1689 *
1690 * Input: tr -- ptr to request pkt
1691 * Output: None
1692 * Return value: 0 -- success
1693 * non-zero-- failure
1694 */
1695 int
1696 twa_map_request(struct twa_request *tr)
1697 {
1698 struct twa_softc *sc = tr->tr_sc;
1699 int s, rv, error = 0;
1700
1701 /* If the command involves data, map that too. */
1702 if (tr->tr_data != NULL) {
1703
1704 if (((u_long)tr->tr_data & (511)) != 0) {
1705 tr->tr_flags |= TWA_CMD_DATA_COPY_NEEDED;
1706 tr->tr_real_data = tr->tr_data;
1707 tr->tr_real_length = tr->tr_length;
1708 s = splvm();
1709 tr->tr_data = (void *)uvm_km_alloc(kmem_map,
1710 tr->tr_length, 512, UVM_KMF_NOWAIT|UVM_KMF_WIRED);
1711 splx(s);
1712
1713 if (tr->tr_data == NULL) {
1714 tr->tr_data = tr->tr_real_data;
1715 tr->tr_length = tr->tr_real_length;
1716 return(ENOMEM);
1717 }
1718 if ((tr->tr_flags & TWA_CMD_DATA_IN) != 0)
1719 memcpy(tr->tr_data, tr->tr_real_data,
1720 tr->tr_length);
1721 }
1722
1723 /*
1724 * Map the data buffer into bus space and build the S/G list.
1725 */
1726 rv = bus_dmamap_load(sc->twa_dma_tag, tr->tr_dma_map,
1727 tr->tr_data, tr->tr_length, NULL, BUS_DMA_NOWAIT |
1728 BUS_DMA_STREAMING | (tr->tr_flags & TWA_CMD_DATA_OUT) ?
1729 BUS_DMA_READ : BUS_DMA_WRITE);
1730
1731 if (rv != 0) {
1732 if ((tr->tr_flags & TWA_CMD_DATA_COPY_NEEDED) != 0) {
1733 s = splvm();
1734 uvm_km_free(kmem_map, (vaddr_t)tr->tr_data,
1735 tr->tr_length, UVM_KMF_WIRED);
1736 splx(s);
1737 }
1738 return (rv);
1739 }
1740
1741 if ((rv = twa_setup_data_dmamap(tr,
1742 tr->tr_dma_map->dm_segs,
1743 tr->tr_dma_map->dm_nsegs, error))) {
1744
1745 if (tr->tr_flags & TWA_CMD_DATA_COPY_NEEDED) {
1746 uvm_km_free(kmem_map, (vaddr_t)tr->tr_data,
1747 tr->tr_length, UVM_KMF_WIRED);
1748 tr->tr_data = tr->tr_real_data;
1749 tr->tr_length = tr->tr_real_length;
1750 }
1751 } else
1752 error = tr->tr_error;
1753
1754 } else
1755 if ((rv = twa_submit_io(tr)))
1756 twa_unmap_request(tr);
1757
1758 return (rv);
1759 }
1760
1761 #if 0
1762 /*
1763 * Function name: twa_flash_firmware
1764 * Description: Flashes bundled firmware image onto controller.
1765 *
1766 * Input: sc -- ptr to per ctlr structure
1767 * Output: None
1768 * Return value: 0 -- success
1769 * non-zero-- failure
1770 */
1771 static int
1772 twa_flash_firmware(struct twa_softc *sc)
1773 {
1774 struct twa_request *tr;
1775 struct twa_command_download_firmware *cmd;
1776 uint32_t count;
1777 uint32_t fw_img_chunk_size;
1778 uint32_t this_chunk_size = 0;
1779 uint32_t remaining_img_size = 0;
1780 int s, error = 0;
1781 int i;
1782
1783 if ((tr = twa_get_request(sc, 0)) == NULL) {
1784 /* No free request packets available. Can't proceed. */
1785 error = EIO;
1786 goto out;
1787 }
1788
1789 count = (twa_fw_img_size / 65536);
1790
1791 count += ((twa_fw_img_size % 65536) != 0) ? 1 : 0;
1792
1793 tr->tr_cmd_pkt_type |= TWA_CMD_PKT_TYPE_INTERNAL;
1794 /* Allocate sufficient memory to hold a chunk of the firmware image. */
1795 fw_img_chunk_size = ((twa_fw_img_size / count) + 511) & ~511;
1796
1797 s = splvm();
1798 tr->tr_data = (void *)uvm_km_alloc(kmem_map, fw_img_chunk_size, 512,
1799 UVM_KMF_WIRED);
1800 splx(s);
1801
1802 if (tr->tr_data == NULL) {
1803 error = ENOMEM;
1804 goto out;
1805 }
1806
1807 remaining_img_size = twa_fw_img_size;
1808 cmd = &(tr->tr_command->command.cmd_pkt_7k.download_fw);
1809
1810 for (i = 0; i < count; i++) {
1811 /* Build a cmd pkt for downloading firmware. */
1812 memset(tr->tr_command, 0, sizeof(struct twa_command_packet));
1813
1814 tr->tr_command->cmd_hdr.header_desc.size_header = 128;
1815
1816 cmd->opcode = TWA_OP_DOWNLOAD_FIRMWARE;
1817 cmd->sgl_offset = 2;/* offset in dwords, to the beginning of sg list */
1818 cmd->size = 2; /* this field will be updated at data map time */
1819 cmd->request_id = tr->tr_request_id;
1820 cmd->unit = 0;
1821 cmd->status = 0;
1822 cmd->flags = 0;
1823 cmd->param = 8; /* prom image */
1824
1825 if (i != (count - 1))
1826 this_chunk_size = fw_img_chunk_size;
1827 else /* last chunk */
1828 this_chunk_size = remaining_img_size;
1829
1830 remaining_img_size -= this_chunk_size;
1831
1832 memset(tr->tr_data, fw_img_chunk_size, 0);
1833
1834 memcpy(tr->tr_data, twa_fw_img + (i * fw_img_chunk_size),
1835 this_chunk_size);
1836 /*
1837 * The next line will effect only the last chunk.
1838 */
1839 tr->tr_length = (this_chunk_size + 511) & ~511;
1840
1841 tr->tr_flags |= TWA_CMD_DATA_OUT;
1842
1843 error = twa_immediate_request(tr, TWA_REQUEST_TIMEOUT_PERIOD);
1844
1845 if (error) {
1846 if (error == ETIMEDOUT)
1847 return(error); /* clean-up done by twa_immediate_request */
1848 break;
1849 }
1850 error = cmd->status;
1851
1852 if (i != (count - 1)) {
1853
1854 /* XXX FreeBSD code doesn't check for no error condition
1855 * but based on observation, error seems to return 0
1856 */
1857 if ((error = tr->tr_command->cmd_hdr.status_block.error) == 0) {
1858 continue;
1859 } else if ((error = tr->tr_command->cmd_hdr.status_block.error) ==
1860 TWA_ERROR_MORE_DATA) {
1861 continue;
1862 } else {
1863 twa_hard_reset(sc);
1864 break;
1865 }
1866 } else /* last chunk */
1867 if (error) {
1868 printf("%s: firmware flash request failed. error = 0x%x\n",
1869 sc->twa_dv.dv_xname, error);
1870 twa_hard_reset(sc);
1871 }
1872 } /* for */
1873
1874 if (tr->tr_data) {
1875 s = splvm();
1876 uvm_km_free(kmem_map, (vaddr_t)tr->tr_data,
1877 fw_img_chunk_size, UVM_KMF_WIRED);
1878 splx(s);
1879 }
1880 out:
1881 if (tr)
1882 twa_release_request(tr);
1883 return(error);
1884 }
1885
1886 /*
1887 * Function name: twa_hard_reset
1888 * Description: Hard reset the controller.
1889 *
1890 * Input: sc -- ptr to per ctlr structure
1891 * Output: None
1892 * Return value: 0 -- success
1893 * non-zero-- failure
1894 */
1895 static int
1896 twa_hard_reset(struct twa_softc *sc)
1897 {
1898 struct twa_request *tr;
1899 struct twa_command_reset_firmware *cmd;
1900 int error;
1901
1902 if ((tr = twa_get_request(sc, 0)) == NULL)
1903 return(EIO);
1904 tr->tr_cmd_pkt_type |= TWA_CMD_PKT_TYPE_INTERNAL;
1905 /* Build a cmd pkt for sending down the hard reset command. */
1906 tr->tr_command->cmd_hdr.header_desc.size_header = 128;
1907
1908 cmd = &(tr->tr_command->command.cmd_pkt_7k.reset_fw);
1909 cmd->opcode = TWA_OP_RESET_FIRMWARE;
1910 cmd->size = 2; /* this field will be updated at data map time */
1911 cmd->request_id = tr->tr_request_id;
1912 cmd->unit = 0;
1913 cmd->status = 0;
1914 cmd->flags = 0;
1915 cmd->param = 0; /* don't reload FPGA logic */
1916
1917 tr->tr_data = NULL;
1918 tr->tr_length = 0;
1919
1920 error = twa_immediate_request(tr, TWA_REQUEST_TIMEOUT_PERIOD);
1921 if (error) {
1922 printf("%s: hard reset request could not "
1923 " be posted. error = 0x%x\n", sc->twa_dv.dv_xname, error);
1924 if (error == ETIMEDOUT)
1925 return(error); /* clean-up done by twa_immediate_request */
1926 goto out;
1927 }
1928 if ((error = cmd->status)) {
1929 printf("%s: hard reset request failed. error = 0x%x\n",
1930 sc->twa_dv.dv_xname, error);
1931 }
1932
1933 out:
1934 if (tr)
1935 twa_release_request(tr);
1936 return(error);
1937 }
1938 #endif
1939
1940 /*
1941 * Function name: twa_intr
1942 * Description: Interrupt handler. Determines the kind of interrupt,
1943 * and calls the appropriate handler.
1944 *
1945 * Input: sc -- ptr to per ctlr structure
1946 * Output: None
1947 * Return value: None
1948 */
1949
1950 static int
1951 twa_intr(void *arg)
1952 {
1953 int caught, rv;
1954 struct twa_softc *sc;
1955 u_int32_t status_reg;
1956 sc = (struct twa_softc *)arg;
1957
1958 caught = 0;
1959 /* Collect current interrupt status. */
1960 status_reg = twa_inl(sc, TWA_STATUS_REGISTER_OFFSET);
1961 if (twa_check_ctlr_state(sc, status_reg)) {
1962 caught = 1;
1963 goto bail;
1964 }
1965 /* Dispatch based on the kind of interrupt. */
1966 if (status_reg & TWA_STATUS_HOST_INTERRUPT) {
1967 twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
1968 TWA_CONTROL_CLEAR_HOST_INTERRUPT);
1969 caught = 1;
1970 }
1971 if ((status_reg & TWA_STATUS_ATTENTION_INTERRUPT) != 0) {
1972 twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
1973 TWA_CONTROL_CLEAR_ATTENTION_INTERRUPT);
1974 rv = twa_fetch_aen(sc);
1975 #ifdef DIAGNOSTIC
1976 if (rv != 0)
1977 printf("%s: unable to retrieve AEN (%d)\n",
1978 sc->twa_dv.dv_xname, rv);
1979 #endif
1980 caught = 1;
1981 }
1982 if (status_reg & TWA_STATUS_COMMAND_INTERRUPT) {
1983 /* Start any requests that might be in the pending queue. */
1984 twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
1985 TWA_CONTROL_MASK_COMMAND_INTERRUPT);
1986 (void)twa_drain_pending_queue(sc);
1987 caught = 1;
1988 }
1989 if (status_reg & TWA_STATUS_RESPONSE_INTERRUPT) {
1990 twa_done(sc);
1991 caught = 1;
1992 }
1993 bail:
1994 return (caught);
1995 }
1996
1997
1998 /*
1999 * Accept an open operation on the control device.
2000 */
2001 static int
2002 twaopen(dev_t dev, int flag, int mode, struct lwp *l)
2003 {
2004 struct twa_softc *twa;
2005
2006 if ((twa = device_lookup(&twa_cd, minor(dev))) == NULL)
2007 return (ENXIO);
2008 if ((twa->twa_sc_flags & TWA_STATE_OPEN) != 0)
2009 return (EBUSY);
2010
2011 twa->twa_sc_flags |= TWA_STATE_OPEN;
2012
2013 return (0);
2014 }
2015
2016
2017 /*
2018 * Accept the last close on the control device.
2019 */
2020 static int
2021 twaclose(dev_t dev, int flag, int mode, struct lwp *l)
2022 {
2023 struct twa_softc *twa;
2024
2025 twa = device_lookup(&twa_cd, minor(dev));
2026 twa->twa_sc_flags &= ~TWA_STATE_OPEN;
2027 return (0);
2028 }
2029
2030
2031 /*
2032 * Function name: twaioctl
2033 * Description: ioctl handler.
2034 *
2035 * Input: sc -- ptr to per ctlr structure
2036 * cmd -- ioctl cmd
2037 * buf -- ptr to buffer in kernel memory, which is
2038 * a copy of the input buffer in user-space
2039 * Output: buf -- ptr to buffer in kernel memory, which will
2040 * be copied of the output buffer in user-space
2041 * Return value: 0 -- success
2042 * non-zero-- failure
2043 */
2044 static int
2045 twaioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct lwp *l)
2046 {
2047 struct twa_softc *sc;
2048 struct twa_ioctl_9k *user_buf = (struct twa_ioctl_9k *)data;
2049 struct tw_cl_event_packet event_buf;
2050 struct twa_request *tr = 0;
2051 int32_t event_index = 0;
2052 int32_t start_index;
2053 int s, error = 0;
2054
2055 sc = device_lookup(&twa_cd, minor(dev));
2056
2057 switch (cmd) {
2058 case TW_OSL_IOCTL_FIRMWARE_PASS_THROUGH:
2059 {
2060 struct twa_command_packet *cmdpkt;
2061 u_int32_t data_buf_size_adjusted;
2062
2063 /* Get a request packet */
2064 tr = twa_get_request_wait(sc, 0);
2065 KASSERT(tr != NULL);
2066 /*
2067 * Make sure that the data buffer sent to firmware is a
2068 * 512 byte multiple in size.
2069 */
2070 data_buf_size_adjusted =
2071 (user_buf->twa_drvr_pkt.buffer_length + 511) & ~511;
2072
2073 if ((tr->tr_length = data_buf_size_adjusted)) {
2074 if ((tr->tr_data = malloc(data_buf_size_adjusted,
2075 M_DEVBUF, M_WAITOK)) == NULL) {
2076 error = ENOMEM;
2077 goto fw_passthru_done;
2078 }
2079 /* Copy the payload. */
2080 if ((error = copyin((void *) (user_buf->pdata),
2081 (void *) (tr->tr_data),
2082 user_buf->twa_drvr_pkt.buffer_length)) != 0) {
2083 goto fw_passthru_done;
2084 }
2085 tr->tr_flags |= TWA_CMD_DATA_IN | TWA_CMD_DATA_OUT;
2086 }
2087 tr->tr_cmd_pkt_type |= TWA_CMD_PKT_TYPE_IOCTL;
2088 cmdpkt = tr->tr_command;
2089
2090 /* Copy the command packet. */
2091 memcpy(cmdpkt, &(user_buf->twa_cmd_pkt),
2092 sizeof(struct twa_command_packet));
2093 cmdpkt->command.cmd_pkt_7k.generic.request_id =
2094 tr->tr_request_id;
2095
2096 /* Send down the request, and wait for it to complete. */
2097 if ((error = twa_wait_request(tr, TWA_REQUEST_TIMEOUT_PERIOD))) {
2098 if (error == ETIMEDOUT)
2099 break; /* clean-up done by twa_wait_request */
2100 goto fw_passthru_done;
2101 }
2102
2103 /* Copy the command packet back into user space. */
2104 memcpy(&user_buf->twa_cmd_pkt, cmdpkt,
2105 sizeof(struct twa_command_packet));
2106
2107 /* If there was a payload, copy it back too. */
2108 if (tr->tr_length)
2109 error = copyout(tr->tr_data, user_buf->pdata,
2110 user_buf->twa_drvr_pkt.buffer_length);
2111 fw_passthru_done:
2112 /* Free resources. */
2113 if (tr->tr_data)
2114 free(tr->tr_data, M_DEVBUF);
2115
2116 if (tr)
2117 twa_release_request(tr);
2118 break;
2119 }
2120
2121 case TW_OSL_IOCTL_SCAN_BUS:
2122 twa_request_bus_scan(sc);
2123 break;
2124
2125 case TW_CL_IOCTL_GET_FIRST_EVENT:
2126 if (sc->twa_aen_queue_wrapped) {
2127 if (sc->twa_aen_queue_overflow) {
2128 /*
2129 * The aen queue has wrapped, even before some
2130 * events have been retrieved. Let the caller
2131 * know that he missed out on some AEN's.
2132 */
2133 user_buf->twa_drvr_pkt.status =
2134 TWA_ERROR_AEN_OVERFLOW;
2135 sc->twa_aen_queue_overflow = FALSE;
2136 } else
2137 user_buf->twa_drvr_pkt.status = 0;
2138 event_index = sc->twa_aen_head;
2139 } else {
2140 if (sc->twa_aen_head == sc->twa_aen_tail) {
2141 user_buf->twa_drvr_pkt.status =
2142 TWA_ERROR_AEN_NO_EVENTS;
2143 break;
2144 }
2145 user_buf->twa_drvr_pkt.status = 0;
2146 event_index = sc->twa_aen_tail; /* = 0 */
2147 }
2148 if ((error = copyout(sc->twa_aen_queue[event_index],
2149 user_buf->pdata, sizeof(struct tw_cl_event_packet))) != 0)
2150 (sc->twa_aen_queue[event_index])->retrieved =
2151 TWA_AEN_RETRIEVED;
2152 break;
2153
2154
2155 case TW_CL_IOCTL_GET_LAST_EVENT:
2156
2157 if (sc->twa_aen_queue_wrapped) {
2158 if (sc->twa_aen_queue_overflow) {
2159 /*
2160 * The aen queue has wrapped, even before some
2161 * events have been retrieved. Let the caller
2162 * know that he missed out on some AEN's.
2163 */
2164 user_buf->twa_drvr_pkt.status =
2165 TWA_ERROR_AEN_OVERFLOW;
2166 sc->twa_aen_queue_overflow = FALSE;
2167 } else
2168 user_buf->twa_drvr_pkt.status = 0;
2169 } else {
2170 if (sc->twa_aen_head == sc->twa_aen_tail) {
2171 user_buf->twa_drvr_pkt.status =
2172 TWA_ERROR_AEN_NO_EVENTS;
2173 break;
2174 }
2175 user_buf->twa_drvr_pkt.status = 0;
2176 }
2177 event_index = (sc->twa_aen_head - 1 + TWA_Q_LENGTH) % TWA_Q_LENGTH;
2178 if ((error = copyout(sc->twa_aen_queue[event_index], user_buf->pdata,
2179 sizeof(struct tw_cl_event_packet))) != 0)
2180
2181 (sc->twa_aen_queue[event_index])->retrieved =
2182 TWA_AEN_RETRIEVED;
2183 break;
2184
2185
2186 case TW_CL_IOCTL_GET_NEXT_EVENT:
2187
2188 user_buf->twa_drvr_pkt.status = 0;
2189 if (sc->twa_aen_queue_wrapped) {
2190
2191 if (sc->twa_aen_queue_overflow) {
2192 /*
2193 * The aen queue has wrapped, even before some
2194 * events have been retrieved. Let the caller
2195 * know that he missed out on some AEN's.
2196 */
2197 user_buf->twa_drvr_pkt.status =
2198 TWA_ERROR_AEN_OVERFLOW;
2199 sc->twa_aen_queue_overflow = FALSE;
2200 }
2201 start_index = sc->twa_aen_head;
2202 } else {
2203 if (sc->twa_aen_head == sc->twa_aen_tail) {
2204 user_buf->twa_drvr_pkt.status =
2205 TWA_ERROR_AEN_NO_EVENTS;
2206 break;
2207 }
2208 start_index = sc->twa_aen_tail; /* = 0 */
2209 }
2210 error = copyin(user_buf->pdata, &event_buf,
2211 sizeof(struct tw_cl_event_packet));
2212
2213 event_index = (start_index + event_buf.sequence_id -
2214 (sc->twa_aen_queue[start_index])->sequence_id + 1)
2215 % TWA_Q_LENGTH;
2216
2217 if (! ((sc->twa_aen_queue[event_index])->sequence_id >
2218 event_buf.sequence_id)) {
2219 if (user_buf->twa_drvr_pkt.status == TWA_ERROR_AEN_OVERFLOW)
2220 sc->twa_aen_queue_overflow = TRUE; /* so we report the overflow next time */
2221 user_buf->twa_drvr_pkt.status =
2222 TWA_ERROR_AEN_NO_EVENTS;
2223 break;
2224 }
2225 if ((error = copyout(sc->twa_aen_queue[event_index], user_buf->pdata,
2226 sizeof(struct tw_cl_event_packet))) != 0)
2227
2228 (sc->twa_aen_queue[event_index])->retrieved =
2229 TWA_AEN_RETRIEVED;
2230 break;
2231
2232
2233 case TW_CL_IOCTL_GET_PREVIOUS_EVENT:
2234
2235 user_buf->twa_drvr_pkt.status = 0;
2236 if (sc->twa_aen_queue_wrapped) {
2237 if (sc->twa_aen_queue_overflow) {
2238 /*
2239 * The aen queue has wrapped, even before some
2240 * events have been retrieved. Let the caller
2241 * know that he missed out on some AEN's.
2242 */
2243 user_buf->twa_drvr_pkt.status =
2244 TWA_ERROR_AEN_OVERFLOW;
2245 sc->twa_aen_queue_overflow = FALSE;
2246 }
2247 start_index = sc->twa_aen_head;
2248 } else {
2249 if (sc->twa_aen_head == sc->twa_aen_tail) {
2250 user_buf->twa_drvr_pkt.status =
2251 TWA_ERROR_AEN_NO_EVENTS;
2252 break;
2253 }
2254 start_index = sc->twa_aen_tail; /* = 0 */
2255 }
2256 if ((error = copyin(user_buf->pdata, &event_buf,
2257 sizeof(struct tw_cl_event_packet))) != 0)
2258
2259 event_index = (start_index + event_buf.sequence_id -
2260 (sc->twa_aen_queue[start_index])->sequence_id - 1) % TWA_Q_LENGTH;
2261 if (! ((sc->twa_aen_queue[event_index])->sequence_id <
2262 event_buf.sequence_id)) {
2263 if (user_buf->twa_drvr_pkt.status == TWA_ERROR_AEN_OVERFLOW)
2264 sc->twa_aen_queue_overflow = TRUE; /* so we report the overflow next time */
2265 user_buf->twa_drvr_pkt.status =
2266 TWA_ERROR_AEN_NO_EVENTS;
2267 break;
2268 }
2269 if ((error = copyout(sc->twa_aen_queue [event_index], user_buf->pdata,
2270 sizeof(struct tw_cl_event_packet))) != 0)
2271 aprint_error("%s: get_previous: Could not copyout to "
2272 "event_buf. error = %x\n", sc->twa_dv.dv_xname, error);
2273 (sc->twa_aen_queue[event_index])->retrieved = TWA_AEN_RETRIEVED;
2274 break;
2275
2276 case TW_CL_IOCTL_GET_LOCK:
2277 {
2278 struct tw_cl_lock_packet twa_lock;
2279
2280 copyin(user_buf->pdata, &twa_lock,
2281 sizeof(struct tw_cl_lock_packet));
2282 s = splbio();
2283 if ((sc->twa_ioctl_lock.lock == TWA_LOCK_FREE) ||
2284 (twa_lock.force_flag) ||
2285 (time.tv_sec >= sc->twa_ioctl_lock.timeout)) {
2286
2287 sc->twa_ioctl_lock.lock = TWA_LOCK_HELD;
2288 sc->twa_ioctl_lock.timeout = time.tv_sec +
2289 (twa_lock.timeout_msec / 1000);
2290 twa_lock.time_remaining_msec = twa_lock.timeout_msec;
2291 user_buf->twa_drvr_pkt.status = 0;
2292 } else {
2293 twa_lock.time_remaining_msec =
2294 (sc->twa_ioctl_lock.timeout - time.tv_sec) *
2295 1000;
2296 user_buf->twa_drvr_pkt.status =
2297 TWA_ERROR_IOCTL_LOCK_ALREADY_HELD;
2298 }
2299 splx(s);
2300 copyout(&twa_lock, user_buf->pdata,
2301 sizeof(struct tw_cl_lock_packet));
2302 break;
2303 }
2304
2305 case TW_CL_IOCTL_RELEASE_LOCK:
2306 s = splbio();
2307 if (sc->twa_ioctl_lock.lock == TWA_LOCK_FREE) {
2308 user_buf->twa_drvr_pkt.status =
2309 TWA_ERROR_IOCTL_LOCK_NOT_HELD;
2310 } else {
2311 sc->twa_ioctl_lock.lock = TWA_LOCK_FREE;
2312 user_buf->twa_drvr_pkt.status = 0;
2313 }
2314 splx(s);
2315 break;
2316
2317 case TW_CL_IOCTL_GET_COMPATIBILITY_INFO:
2318 {
2319 struct tw_cl_compatibility_packet comp_pkt;
2320
2321 memcpy(comp_pkt.driver_version, TWA_DRIVER_VERSION_STRING,
2322 sizeof(TWA_DRIVER_VERSION_STRING));
2323 comp_pkt.working_srl = sc->working_srl;
2324 comp_pkt.working_branch = sc->working_branch;
2325 comp_pkt.working_build = sc->working_build;
2326 user_buf->twa_drvr_pkt.status = 0;
2327
2328 /* Copy compatibility information to user space. */
2329 copyout(&comp_pkt, user_buf->pdata,
2330 min(sizeof(struct tw_cl_compatibility_packet),
2331 user_buf->twa_drvr_pkt.buffer_length));
2332 break;
2333 }
2334
2335 case TWA_IOCTL_GET_UNITNAME: /* WASABI EXTENSION */
2336 {
2337 struct twa_unitname *tn;
2338 struct twa_drive *tdr;
2339
2340 tn = (struct twa_unitname *)data;
2341 /* XXX mutex */
2342 if (tn->tn_unit < 0 || tn->tn_unit >= TWA_MAX_UNITS)
2343 return (EINVAL);
2344 tdr = &sc->sc_units[tn->tn_unit];
2345 if (tdr->td_dev == NULL)
2346 tn->tn_name[0] = '\0';
2347 else
2348 strlcpy(tn->tn_name, tdr->td_dev->dv_xname,
2349 sizeof(tn->tn_name));
2350 return (0);
2351 }
2352
2353 default:
2354 /* Unknown opcode. */
2355 error = ENOTTY;
2356 }
2357
2358 return(error);
2359 }
2360
2361
2362 const struct cdevsw twa_cdevsw = {
2363 twaopen, twaclose, noread, nowrite, twaioctl,
2364 nostop, notty, nopoll, nommap,
2365 };
2366
2367
2368 /*
2369 * Function name: twa_get_param
2370 * Description: Get a firmware parameter.
2371 *
2372 * Input: sc -- ptr to per ctlr structure
2373 * table_id -- parameter table #
2374 * param_id -- index of the parameter in the table
2375 * param_size -- size of the parameter in bytes
2376 * callback -- ptr to function, if any, to be called
2377 * back on completion; NULL if no callback.
2378 * Output: None
2379 * Return value: ptr to param structure -- success
2380 * NULL -- failure
2381 */
2382 static int
2383 twa_get_param(struct twa_softc *sc, int table_id, int param_id,
2384 size_t param_size, void (* callback)(struct twa_request *tr),
2385 struct twa_param_9k **param)
2386 {
2387 int rv = 0;
2388 struct twa_request *tr;
2389 union twa_command_7k *cmd;
2390
2391 /* Get a request packet. */
2392 if ((tr = twa_get_request(sc, 0)) == NULL) {
2393 rv = EAGAIN;
2394 goto out;
2395 }
2396
2397 tr->tr_cmd_pkt_type |= TWA_CMD_PKT_TYPE_INTERNAL;
2398
2399 /* Allocate memory to read data into. */
2400 if ((*param = (struct twa_param_9k *)
2401 malloc(TWA_SECTOR_SIZE, M_DEVBUF, M_NOWAIT)) == NULL) {
2402 rv = ENOMEM;
2403 goto out;
2404 }
2405
2406 memset(*param, 0, sizeof(struct twa_param_9k) - 1 + param_size);
2407 tr->tr_data = *param;
2408 tr->tr_length = TWA_SECTOR_SIZE;
2409 tr->tr_flags = TWA_CMD_DATA_IN | TWA_CMD_DATA_OUT;
2410
2411 /* Build the cmd pkt. */
2412 cmd = &(tr->tr_command->command.cmd_pkt_7k);
2413
2414 tr->tr_command->cmd_hdr.header_desc.size_header = 128;
2415
2416 cmd->param.opcode = TWA_OP_GET_PARAM;
2417 cmd->param.sgl_offset = 2;
2418 cmd->param.size = 2;
2419 cmd->param.request_id = tr->tr_request_id;
2420 cmd->param.unit = 0;
2421 cmd->param.param_count = 1;
2422
2423 /* Specify which parameter we need. */
2424 (*param)->table_id = table_id | TWA_9K_PARAM_DESCRIPTOR;
2425 (*param)->parameter_id = param_id;
2426 (*param)->parameter_size_bytes = param_size;
2427
2428 /* Submit the command. */
2429 if (callback == NULL) {
2430 /* There's no call back; wait till the command completes. */
2431 rv = twa_immediate_request(tr, TWA_REQUEST_TIMEOUT_PERIOD);
2432
2433 if (rv != 0)
2434 goto out;
2435
2436 if ((rv = cmd->param.status) != 0) {
2437 /* twa_drain_complete_queue will have done the unmapping */
2438 goto out;
2439 }
2440 twa_release_request(tr);
2441 return (rv);
2442 } else {
2443 /* There's a call back. Simply submit the command. */
2444 tr->tr_callback = callback;
2445 rv = twa_map_request(tr);
2446 return (rv);
2447 }
2448 out:
2449 if (tr)
2450 twa_release_request(tr);
2451 return(rv);
2452 }
2453
2454
2455 /*
2456 * Function name: twa_set_param
2457 * Description: Set a firmware parameter.
2458 *
2459 * Input: sc -- ptr to per ctlr structure
2460 * table_id -- parameter table #
2461 * param_id -- index of the parameter in the table
2462 * param_size -- size of the parameter in bytes
2463 * callback -- ptr to function, if any, to be called
2464 * back on completion; NULL if no callback.
2465 * Output: None
2466 * Return value: 0 -- success
2467 * non-zero-- failure
2468 */
2469 static int
2470 twa_set_param(struct twa_softc *sc, int table_id,
2471 int param_id, int param_size, void *data,
2472 void (* callback)(struct twa_request *tr))
2473 {
2474 struct twa_request *tr;
2475 union twa_command_7k *cmd;
2476 struct twa_param_9k *param = NULL;
2477 int error = ENOMEM;
2478
2479 tr = twa_get_request(sc, 0);
2480 if (tr == NULL)
2481 return (EAGAIN);
2482
2483 tr->tr_cmd_pkt_type |= TWA_CMD_PKT_TYPE_INTERNAL;
2484
2485 /* Allocate memory to send data using. */
2486 if ((param = (struct twa_param_9k *)
2487 malloc(TWA_SECTOR_SIZE, M_DEVBUF, M_NOWAIT)) == NULL)
2488 goto out;
2489 memset(param, 0, sizeof(struct twa_param_9k) - 1 + param_size);
2490 tr->tr_data = param;
2491 tr->tr_length = TWA_SECTOR_SIZE;
2492 tr->tr_flags = TWA_CMD_DATA_IN | TWA_CMD_DATA_OUT;
2493
2494 /* Build the cmd pkt. */
2495 cmd = &(tr->tr_command->command.cmd_pkt_7k);
2496
2497 tr->tr_command->cmd_hdr.header_desc.size_header = 128;
2498
2499 cmd->param.opcode = TWA_OP_SET_PARAM;
2500 cmd->param.sgl_offset = 2;
2501 cmd->param.size = 2;
2502 cmd->param.request_id = tr->tr_request_id;
2503 cmd->param.unit = 0;
2504 cmd->param.param_count = 1;
2505
2506 /* Specify which parameter we want to set. */
2507 param->table_id = table_id | TWA_9K_PARAM_DESCRIPTOR;
2508 param->parameter_id = param_id;
2509 param->parameter_size_bytes = param_size;
2510 memcpy(param->data, data, param_size);
2511
2512 /* Submit the command. */
2513 if (callback == NULL) {
2514 /* There's no call back; wait till the command completes. */
2515 error = twa_immediate_request(tr, TWA_REQUEST_TIMEOUT_PERIOD);
2516 if (error == ETIMEDOUT)
2517 return(error); /* clean-up done by twa_immediate_request */
2518 if (error)
2519 goto out;
2520 if ((error = cmd->param.status)) {
2521 goto out; /* twa_drain_complete_queue will have done the unmapping */
2522 }
2523 free(param, M_DEVBUF);
2524 twa_release_request(tr);
2525 return(error);
2526 } else {
2527 /* There's a call back. Simply submit the command. */
2528 tr->tr_callback = callback;
2529 if ((error = twa_map_request(tr)))
2530 goto out;
2531
2532 return (0);
2533 }
2534 out:
2535 if (param)
2536 free(param, M_DEVBUF);
2537 if (tr)
2538 twa_release_request(tr);
2539 return(error);
2540 }
2541
2542
2543 /*
2544 * Function name: twa_init_connection
2545 * Description: Send init_connection cmd to firmware
2546 *
2547 * Input: sc -- ptr to per ctlr structure
2548 * message_credits -- max # of requests that we might send
2549 * down simultaneously. This will be
2550 * typically set to 256 at init-time or
2551 * after a reset, and to 1 at shutdown-time
2552 * set_features -- indicates if we intend to use 64-bit
2553 * sg, also indicates if we want to do a
2554 * basic or an extended init_connection;
2555 *
2556 * Note: The following input/output parameters are valid, only in case of an
2557 * extended init_connection:
2558 *
2559 * current_fw_srl -- srl of fw we are bundled
2560 * with, if any; 0 otherwise
2561 * current_fw_arch_id -- arch_id of fw we are bundled
2562 * with, if any; 0 otherwise
2563 * current_fw_branch -- branch # of fw we are bundled
2564 * with, if any; 0 otherwise
2565 * current_fw_build -- build # of fw we are bundled
2566 * with, if any; 0 otherwise
2567 * Output: fw_on_ctlr_srl -- srl of fw on ctlr
2568 * fw_on_ctlr_arch_id -- arch_id of fw on ctlr
2569 * fw_on_ctlr_branch -- branch # of fw on ctlr
2570 * fw_on_ctlr_build -- build # of fw on ctlr
2571 * init_connect_result -- result bitmap of fw response
2572 * Return value: 0 -- success
2573 * non-zero-- failure
2574 */
2575 static int
2576 twa_init_connection(struct twa_softc *sc, u_int16_t message_credits,
2577 u_int32_t set_features, u_int16_t current_fw_srl,
2578 u_int16_t current_fw_arch_id, u_int16_t current_fw_branch,
2579 u_int16_t current_fw_build, u_int16_t *fw_on_ctlr_srl,
2580 u_int16_t *fw_on_ctlr_arch_id, u_int16_t *fw_on_ctlr_branch,
2581 u_int16_t *fw_on_ctlr_build, u_int32_t *init_connect_result)
2582 {
2583 struct twa_request *tr;
2584 struct twa_command_init_connect *init_connect;
2585 int error = 1;
2586
2587 /* Get a request packet. */
2588 if ((tr = twa_get_request(sc, 0)) == NULL)
2589 goto out;
2590 tr->tr_cmd_pkt_type |= TWA_CMD_PKT_TYPE_INTERNAL;
2591 /* Build the cmd pkt. */
2592 init_connect = &(tr->tr_command->command.cmd_pkt_7k.init_connect);
2593
2594 tr->tr_command->cmd_hdr.header_desc.size_header = 128;
2595
2596 init_connect->opcode = TWA_OP_INIT_CONNECTION;
2597 init_connect->request_id = tr->tr_request_id;
2598 init_connect->message_credits = message_credits;
2599 init_connect->features = set_features;
2600 if (TWA_64BIT_ADDRESSES) {
2601 printf("64 bit addressing supported for scatter/gather list\n");
2602 init_connect->features |= TWA_64BIT_SG_ADDRESSES;
2603 }
2604 if (set_features & TWA_EXTENDED_INIT_CONNECT) {
2605 /*
2606 * Fill in the extra fields needed for
2607 * an extended init_connect.
2608 */
2609 init_connect->size = 6;
2610 init_connect->fw_srl = current_fw_srl;
2611 init_connect->fw_arch_id = current_fw_arch_id;
2612 init_connect->fw_branch = current_fw_branch;
2613 } else
2614 init_connect->size = 3;
2615
2616 /* Submit the command, and wait for it to complete. */
2617 error = twa_immediate_request(tr, TWA_REQUEST_TIMEOUT_PERIOD);
2618 if (error == ETIMEDOUT)
2619 return(error); /* clean-up done by twa_immediate_request */
2620 if (error)
2621 goto out;
2622 if ((error = init_connect->status)) {
2623 goto out; /* twa_drain_complete_queue will have done the unmapping */
2624 }
2625 if (set_features & TWA_EXTENDED_INIT_CONNECT) {
2626 *fw_on_ctlr_srl = init_connect->fw_srl;
2627 *fw_on_ctlr_arch_id = init_connect->fw_arch_id;
2628 *fw_on_ctlr_branch = init_connect->fw_branch;
2629 *fw_on_ctlr_build = init_connect->fw_build;
2630 *init_connect_result = init_connect->result;
2631 }
2632 twa_release_request(tr);
2633 return(error);
2634
2635 out:
2636 if (tr)
2637 twa_release_request(tr);
2638 return(error);
2639 }
2640
2641
2642 static int
2643 twa_reset(struct twa_softc *sc)
2644 {
2645 int s;
2646 int error = 0;
2647
2648 /*
2649 * Disable interrupts from the controller, and mask any
2650 * accidental entry into our interrupt handler.
2651 */
2652 twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
2653 TWA_CONTROL_DISABLE_INTERRUPTS);
2654
2655 s = splbio();
2656
2657 /* Soft reset the controller. */
2658 if ((error = twa_soft_reset(sc)))
2659 goto out;
2660
2661 /* Re-establish logical connection with the controller. */
2662 if ((error = twa_init_connection(sc, TWA_INIT_MESSAGE_CREDITS,
2663 0, 0, 0, 0, 0,
2664 NULL, NULL, NULL, NULL, NULL))) {
2665 goto out;
2666 }
2667 /*
2668 * Complete all requests in the complete queue; error back all requests
2669 * in the busy queue. Any internal requests will be simply freed.
2670 * Re-submit any requests in the pending queue.
2671 */
2672 twa_drain_busy_queue(sc);
2673
2674 out:
2675 splx(s);
2676 /*
2677 * Enable interrupts, and also clear attention and response interrupts.
2678 */
2679 twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
2680 TWA_CONTROL_CLEAR_ATTENTION_INTERRUPT |
2681 TWA_CONTROL_UNMASK_RESPONSE_INTERRUPT |
2682 TWA_CONTROL_ENABLE_INTERRUPTS);
2683 return(error);
2684 }
2685
2686
2687 static int
2688 twa_soft_reset(struct twa_softc *sc)
2689 {
2690 u_int32_t status_reg;
2691
2692 twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
2693 TWA_CONTROL_ISSUE_SOFT_RESET |
2694 TWA_CONTROL_CLEAR_HOST_INTERRUPT |
2695 TWA_CONTROL_CLEAR_ATTENTION_INTERRUPT |
2696 TWA_CONTROL_MASK_COMMAND_INTERRUPT |
2697 TWA_CONTROL_MASK_RESPONSE_INTERRUPT |
2698 TWA_CONTROL_DISABLE_INTERRUPTS);
2699
2700 if (twa_wait_status(sc, TWA_STATUS_MICROCONTROLLER_READY |
2701 TWA_STATUS_ATTENTION_INTERRUPT, 30)) {
2702 aprint_error("%s: no attention interrupt after reset.\n",
2703 sc->twa_dv.dv_xname);
2704 return(1);
2705 }
2706 twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
2707 TWA_CONTROL_CLEAR_ATTENTION_INTERRUPT);
2708
2709 if (twa_drain_response_queue(sc)) {
2710 aprint_error("%s: cannot drain response queue.\n",sc->twa_dv.dv_xname);
2711 return(1);
2712 }
2713 if (twa_drain_aen_queue(sc)) {
2714 aprint_error("%s: cannot drain AEN queue.\n", sc->twa_dv.dv_xname);
2715 return(1);
2716 }
2717 if (twa_find_aen(sc, TWA_AEN_SOFT_RESET)) {
2718 aprint_error("%s: reset not reported by controller.\n",
2719 sc->twa_dv.dv_xname);
2720 return(1);
2721 }
2722 status_reg = twa_inl(sc, TWA_STATUS_REGISTER_OFFSET);
2723 if (TWA_STATUS_ERRORS(status_reg) ||
2724 twa_check_ctlr_state(sc, status_reg)) {
2725 aprint_error("%s: controller errors detected.\n", sc->twa_dv.dv_xname);
2726 return(1);
2727 }
2728 return(0);
2729 }
2730
2731
2732 static int
2733 twa_wait_status(struct twa_softc *sc, u_int32_t status, u_int32_t timeout)
2734 {
2735 struct timeval t1;
2736 time_t end_time;
2737 u_int32_t status_reg;
2738
2739 timeout = (timeout * 1000 * 100);
2740
2741 microtime(&t1);
2742
2743 end_time = t1.tv_usec + timeout;
2744
2745 do {
2746 status_reg = twa_inl(sc, TWA_STATUS_REGISTER_OFFSET);
2747 if ((status_reg & status) == status)/* got the required bit(s)? */
2748 return(0);
2749 DELAY(100000);
2750 microtime(&t1);
2751 } while (t1.tv_usec <= end_time);
2752
2753 return(1);
2754 }
2755
2756
2757 static int
2758 twa_fetch_aen(struct twa_softc *sc)
2759 {
2760 struct twa_request *tr;
2761 int s, error = 0;
2762
2763 s = splbio();
2764
2765 if ((tr = twa_get_request(sc, TWA_CMD_AEN)) == NULL)
2766 return(EIO);
2767 tr->tr_cmd_pkt_type |= TWA_CMD_PKT_TYPE_INTERNAL;
2768 tr->tr_callback = twa_aen_callback;
2769 tr->tr_data = malloc(TWA_SECTOR_SIZE, M_DEVBUF, M_NOWAIT);
2770 if (twa_request_sense(tr, 0) != 0) {
2771 if (tr->tr_data)
2772 free(tr->tr_data, M_DEVBUF);
2773 twa_release_request(tr);
2774 error = 1;
2775 }
2776 splx(s);
2777
2778 return(error);
2779 }
2780
2781
2782
2783 /*
2784 * Function name: twa_aen_callback
2785 * Description: Callback for requests to fetch AEN's.
2786 *
2787 * Input: tr -- ptr to completed request pkt
2788 * Output: None
2789 * Return value: None
2790 */
2791 static void
2792 twa_aen_callback(struct twa_request *tr)
2793 {
2794 int i;
2795 int fetch_more_aens = 0;
2796 struct twa_softc *sc = tr->tr_sc;
2797 struct twa_command_header *cmd_hdr =
2798 (struct twa_command_header *)(tr->tr_data);
2799 struct twa_command_9k *cmd =
2800 &(tr->tr_command->command.cmd_pkt_9k);
2801
2802 if (! cmd->status) {
2803 if ((tr->tr_cmd_pkt_type & TWA_CMD_PKT_TYPE_9K) &&
2804 (cmd->cdb[0] == 0x3 /* REQUEST_SENSE */))
2805 if (twa_enqueue_aen(sc, cmd_hdr)
2806 != TWA_AEN_QUEUE_EMPTY)
2807 fetch_more_aens = 1;
2808 } else {
2809 cmd_hdr->err_specific_desc[sizeof(cmd_hdr->err_specific_desc) - 1] = '\0';
2810 for (i = 0; i < 18; i++)
2811 printf("%x\t", tr->tr_command->cmd_hdr.sense_data[i]);
2812
2813 printf(""); /* print new line */
2814
2815 for (i = 0; i < 128; i++)
2816 printf("%x\t", ((int8_t *)(tr->tr_data))[i]);
2817 }
2818 if (tr->tr_data)
2819 free(tr->tr_data, M_DEVBUF);
2820 twa_release_request(tr);
2821
2822 if (fetch_more_aens)
2823 twa_fetch_aen(sc);
2824 }
2825
2826
2827 /*
2828 * Function name: twa_enqueue_aen
2829 * Description: Queues AEN's to be supplied to user-space tools on request.
2830 *
2831 * Input: sc -- ptr to per ctlr structure
2832 * cmd_hdr -- ptr to hdr of fw cmd pkt, from where the AEN
2833 * details can be retrieved.
2834 * Output: None
2835 * Return value: None
2836 */
2837 static uint16_t
2838 twa_enqueue_aen(struct twa_softc *sc, struct twa_command_header *cmd_hdr)
2839 {
2840 int rv, s;
2841 struct tw_cl_event_packet *event;
2842 uint16_t aen_code;
2843 unsigned long sync_time;
2844
2845 s = splbio();
2846 aen_code = cmd_hdr->status_block.error;
2847
2848 switch (aen_code) {
2849 case TWA_AEN_SYNC_TIME_WITH_HOST:
2850
2851 sync_time = (time.tv_sec - (3 * 86400)) % 604800;
2852 rv = twa_set_param(sc, TWA_PARAM_TIME_TABLE,
2853 TWA_PARAM_TIME_SchedulerTime, 4,
2854 &sync_time, twa_aen_callback);
2855 #ifdef DIAGNOSTIC
2856 if (rv != 0)
2857 printf("%s: unable to sync time with ctlr\n",
2858 sc->twa_dv.dv_xname);
2859 #endif
2860 break;
2861
2862 case TWA_AEN_QUEUE_EMPTY:
2863 break;
2864
2865 default:
2866 /* Queue the event. */
2867 event = sc->twa_aen_queue[sc->twa_aen_head];
2868 if (event->retrieved == TWA_AEN_NOT_RETRIEVED)
2869 sc->twa_aen_queue_overflow = TRUE;
2870 event->severity =
2871 cmd_hdr->status_block.substatus_block.severity;
2872 event->time_stamp_sec = time.tv_sec;
2873 event->aen_code = aen_code;
2874 event->retrieved = TWA_AEN_NOT_RETRIEVED;
2875 event->sequence_id = ++(sc->twa_current_sequence_id);
2876 cmd_hdr->err_specific_desc[sizeof(cmd_hdr->err_specific_desc) - 1] = '\0';
2877 event->parameter_len = strlen(cmd_hdr->err_specific_desc);
2878 memcpy(event->parameter_data, cmd_hdr->err_specific_desc,
2879 event->parameter_len);
2880
2881 if (event->severity < TWA_AEN_SEVERITY_DEBUG) {
2882 printf("%s: AEN 0x%04X: %s: %s: %s\n",
2883 sc->twa_dv.dv_xname,
2884 aen_code,
2885 twa_aen_severity_table[event->severity],
2886 twa_find_msg_string(twa_aen_table, aen_code),
2887 event->parameter_data);
2888 }
2889
2890 if ((sc->twa_aen_head + 1) == TWA_Q_LENGTH)
2891 sc->twa_aen_queue_wrapped = TRUE;
2892 sc->twa_aen_head = (sc->twa_aen_head + 1) % TWA_Q_LENGTH;
2893 break;
2894 } /* switch */
2895 splx(s);
2896
2897 return (aen_code);
2898 }
2899
2900
2901
2902 /*
2903 * Function name: twa_find_aen
2904 * Description: Reports whether a given AEN ever occurred.
2905 *
2906 * Input: sc -- ptr to per ctlr structure
2907 * aen_code-- AEN to look for
2908 * Output: None
2909 * Return value: 0 -- success
2910 * non-zero-- failure
2911 */
2912 static int
2913 twa_find_aen(struct twa_softc *sc, u_int16_t aen_code)
2914 {
2915 u_int32_t last_index;
2916 int s;
2917 int i;
2918
2919 s = splbio();
2920
2921 if (sc->twa_aen_queue_wrapped)
2922 last_index = sc->twa_aen_head;
2923 else
2924 last_index = 0;
2925
2926 i = sc->twa_aen_head;
2927 do {
2928 i = (i + TWA_Q_LENGTH - 1) % TWA_Q_LENGTH;
2929 if ((sc->twa_aen_queue[i])->aen_code == aen_code) {
2930 splx(s);
2931 return(0);
2932 }
2933 } while (i != last_index);
2934
2935 splx(s);
2936 return(1);
2937 }
2938
2939 static void inline
2940 twa_request_init(struct twa_request *tr, int flags)
2941 {
2942 tr->tr_data = NULL;
2943 tr->tr_real_data = NULL;
2944 tr->tr_length = 0;
2945 tr->tr_real_length = 0;
2946 tr->tr_status = TWA_CMD_SETUP;/* command is in setup phase */
2947 tr->tr_flags = flags;
2948 tr->tr_error = 0;
2949 tr->tr_callback = NULL;
2950 tr->tr_cmd_pkt_type = 0;
2951
2952 /*
2953 * Look at the status field in the command packet to see how
2954 * it completed the last time it was used, and zero out only
2955 * the portions that might have changed. Note that we don't
2956 * care to zero out the sglist.
2957 */
2958 if (tr->tr_command->command.cmd_pkt_9k.status)
2959 memset(tr->tr_command, 0,
2960 sizeof(struct twa_command_header) + 28);
2961 else
2962 memset(&(tr->tr_command->command), 0, 28);
2963 }
2964
2965 struct twa_request *
2966 twa_get_request_wait(struct twa_softc *sc, int flags)
2967 {
2968 struct twa_request *tr;
2969 int s;
2970
2971 KASSERT((flags & TWA_CMD_AEN) == 0);
2972
2973 s = splbio();
2974 while ((tr = TAILQ_FIRST(&sc->twa_free)) == NULL) {
2975 sc->twa_sc_flags |= TWA_STATE_REQUEST_WAIT;
2976 (void) tsleep(&sc->twa_free, PRIBIO, "twaccb", hz);
2977 }
2978 TAILQ_REMOVE(&sc->twa_free, tr, tr_link);
2979
2980 splx(s);
2981
2982 twa_request_init(tr, flags);
2983
2984 return(tr);
2985 }
2986
2987
2988 struct twa_request *
2989 twa_get_request(struct twa_softc *sc, int flags)
2990 {
2991 int s;
2992 struct twa_request *tr;
2993
2994 /* Get a free request packet. */
2995 s = splbio();
2996 if (__predict_false((flags & TWA_CMD_AEN) != 0)) {
2997
2998 if ((sc->sc_twa_request->tr_flags & TWA_CMD_AEN_BUSY) == 0) {
2999 tr = sc->sc_twa_request;
3000 flags |= TWA_CMD_AEN_BUSY;
3001 } else {
3002 splx(s);
3003 return (NULL);
3004 }
3005 } else {
3006 if (__predict_false((tr =
3007 TAILQ_FIRST(&sc->twa_free)) == NULL)) {
3008 splx(s);
3009 return (NULL);
3010 }
3011 TAILQ_REMOVE(&sc->twa_free, tr, tr_link);
3012 }
3013 splx(s);
3014
3015 twa_request_init(tr, flags);
3016
3017 return(tr);
3018 }
3019
3020
3021 /*
3022 * Print some information about the controller
3023 */
3024 static void
3025 twa_describe_controller(struct twa_softc *sc)
3026 {
3027 struct twa_param_9k *p[10];
3028 int i, rv = 0;
3029 uint32_t dsize;
3030 uint8_t ports;
3031
3032 memset(p, sizeof(struct twa_param_9k *), 10);
3033
3034 /* Get the port count. */
3035 rv |= twa_get_param(sc, TWA_PARAM_CONTROLLER,
3036 TWA_PARAM_CONTROLLER_PortCount, 1, NULL, &p[0]);
3037
3038 /* get version strings */
3039 rv |= twa_get_param(sc, TWA_PARAM_VERSION, TWA_PARAM_VERSION_FW,
3040 16, NULL, &p[1]);
3041 rv |= twa_get_param(sc, TWA_PARAM_VERSION, TWA_PARAM_VERSION_BIOS,
3042 16, NULL, &p[2]);
3043 rv |= twa_get_param(sc, TWA_PARAM_VERSION, TWA_PARAM_VERSION_Mon,
3044 16, NULL, &p[3]);
3045 rv |= twa_get_param(sc, TWA_PARAM_VERSION, TWA_PARAM_VERSION_PCBA,
3046 8, NULL, &p[4]);
3047 rv |= twa_get_param(sc, TWA_PARAM_VERSION, TWA_PARAM_VERSION_ATA,
3048 8, NULL, &p[5]);
3049 rv |= twa_get_param(sc, TWA_PARAM_VERSION, TWA_PARAM_VERSION_PCI,
3050 8, NULL, &p[6]);
3051 rv |= twa_get_param(sc, TWA_PARAM_DRIVESUMMARY, TWA_PARAM_DRIVESTATUS,
3052 16, NULL, &p[7]);
3053
3054 if (rv) {
3055 /* some error occurred */
3056 aprint_error("%s: failed to fetch version information\n",
3057 sc->twa_dv.dv_xname);
3058 goto bail;
3059 }
3060
3061 ports = *(u_int8_t *)(p[0]->data);
3062
3063 aprint_normal("%s: %d ports, Firmware %.16s, BIOS %.16s\n",
3064 sc->twa_dv.dv_xname, ports,
3065 p[1]->data, p[2]->data);
3066
3067 aprint_verbose("%s: Monitor %.16s, PCB %.8s, Achip %.8s, Pchip %.8s\n",
3068 sc->twa_dv.dv_xname,
3069 p[3]->data, p[4]->data,
3070 p[5]->data, p[6]->data);
3071
3072 for (i = 0; i < ports; i++) {
3073
3074 if ((*((char *)(p[7]->data + i)) & TWA_DRIVE_DETECTED) == 0)
3075 continue;
3076
3077 rv = twa_get_param(sc, TWA_PARAM_DRIVE_TABLE,
3078 TWA_PARAM_DRIVEMODELINDEX,
3079 TWA_PARAM_DRIVEMODEL_LENGTH, NULL, &p[8]);
3080
3081 if (rv != 0) {
3082 aprint_error("%s: unable to get drive model for port"
3083 " %d\n", sc->twa_dv.dv_xname, i);
3084 continue;
3085 }
3086
3087 rv = twa_get_param(sc, TWA_PARAM_DRIVE_TABLE,
3088 TWA_PARAM_DRIVESIZEINDEX,
3089 TWA_PARAM_DRIVESIZE_LENGTH, NULL, &p[9]);
3090
3091 if (rv != 0) {
3092 aprint_error("%s: unable to get drive size"
3093 " for port %d\n", sc->twa_dv.dv_xname,
3094 i);
3095 free(p[8], M_DEVBUF);
3096 continue;
3097 }
3098
3099 dsize = *(uint32_t *)(p[9]->data);
3100
3101 aprint_verbose("%s: port %d: %.40s %d MB\n",
3102 sc->twa_dv.dv_xname, i, p[8]->data, dsize / 2048);
3103
3104 if (p[8])
3105 free(p[8], M_DEVBUF);
3106 if (p[9])
3107 free(p[9], M_DEVBUF);
3108 }
3109 bail:
3110 if (p[0])
3111 free(p[0], M_DEVBUF);
3112 if (p[1])
3113 free(p[1], M_DEVBUF);
3114 if (p[2])
3115 free(p[2], M_DEVBUF);
3116 if (p[3])
3117 free(p[3], M_DEVBUF);
3118 if (p[4])
3119 free(p[4], M_DEVBUF);
3120 if (p[5])
3121 free(p[5], M_DEVBUF);
3122 if (p[6])
3123 free(p[6], M_DEVBUF);
3124 }
3125
3126
3127
3128 /*
3129 * Function name: twa_check_ctlr_state
3130 * Description: Makes sure that the fw status register reports a
3131 * proper status.
3132 *
3133 * Input: sc -- ptr to per ctlr structure
3134 * status_reg -- value in the status register
3135 * Output: None
3136 * Return value: 0 -- no errors
3137 * non-zero-- errors
3138 */
3139 static int
3140 twa_check_ctlr_state(struct twa_softc *sc, u_int32_t status_reg)
3141 {
3142 int result = 0;
3143 struct timeval t1;
3144 static time_t last_warning[2] = {0, 0};
3145
3146 /* Check if the 'micro-controller ready' bit is not set. */
3147 if ((status_reg & TWA_STATUS_EXPECTED_BITS) !=
3148 TWA_STATUS_EXPECTED_BITS) {
3149
3150 microtime(&t1);
3151
3152 last_warning[0] += (5 * 1000 * 100);
3153
3154 if (t1.tv_usec > last_warning[0]) {
3155 microtime(&t1);
3156 last_warning[0] = t1.tv_usec;
3157 }
3158 result = 1;
3159 }
3160
3161 /* Check if any error bits are set. */
3162 if ((status_reg & TWA_STATUS_UNEXPECTED_BITS) != 0) {
3163
3164 microtime(&t1);
3165 last_warning[1] += (5 * 1000 * 100);
3166 if (t1.tv_usec > last_warning[1]) {
3167 microtime(&t1);
3168 last_warning[1] = t1.tv_usec;
3169 }
3170 if (status_reg & TWA_STATUS_PCI_PARITY_ERROR_INTERRUPT) {
3171 aprint_error("%s: clearing PCI parity error "
3172 "re-seat/move/replace card.\n",
3173 sc->twa_dv.dv_xname);
3174 twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
3175 TWA_CONTROL_CLEAR_PARITY_ERROR);
3176 pci_conf_write(sc->pc, sc->tag,
3177 PCI_COMMAND_STATUS_REG,
3178 TWA_PCI_CONFIG_CLEAR_PARITY_ERROR);
3179 result = 1;
3180 }
3181 if (status_reg & TWA_STATUS_PCI_ABORT_INTERRUPT) {
3182 aprint_error("%s: clearing PCI abort\n",
3183 sc->twa_dv.dv_xname);
3184 twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
3185 TWA_CONTROL_CLEAR_PCI_ABORT);
3186 pci_conf_write(sc->pc, sc->tag,
3187 PCI_COMMAND_STATUS_REG,
3188 TWA_PCI_CONFIG_CLEAR_PCI_ABORT);
3189 result = 1;
3190 }
3191 if (status_reg & TWA_STATUS_QUEUE_ERROR_INTERRUPT) {
3192 aprint_error("%s: clearing controller queue error\n",
3193 sc->twa_dv.dv_xname);
3194 twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
3195 TWA_CONTROL_CLEAR_PCI_ABORT);
3196 result = 1;
3197 }
3198 if (status_reg & TWA_STATUS_SBUF_WRITE_ERROR) {
3199 aprint_error("%s: clearing SBUF write error\n",
3200 sc->twa_dv.dv_xname);
3201 twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
3202 TWA_CONTROL_CLEAR_SBUF_WRITE_ERROR);
3203 result = 1;
3204 }
3205 if (status_reg & TWA_STATUS_MICROCONTROLLER_ERROR) {
3206 aprint_error("%s: micro-controller error\n",
3207 sc->twa_dv.dv_xname);
3208 result = 1;
3209 }
3210 }
3211 return(result);
3212 }
3213
3214
3215