ciss.c revision 1.46 1 /* $NetBSD: ciss.c,v 1.46 2020/07/14 10:44:34 jdolecek Exp $ */
2 /* $OpenBSD: ciss.c,v 1.68 2013/05/30 16:15:02 deraadt Exp $ */
3
4 /*
5 * Copyright (c) 2005,2006 Michael Shalayeff
6 * All rights reserved.
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF MIND, USE, DATA OR PROFITS, WHETHER IN
17 * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
18 * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21 #include <sys/cdefs.h>
22 __KERNEL_RCSID(0, "$NetBSD: ciss.c,v 1.46 2020/07/14 10:44:34 jdolecek Exp $");
23
24 #include "bio.h"
25
26 /* #define CISS_DEBUG */
27
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/buf.h>
31 #include <sys/ioctl.h>
32 #include <sys/device.h>
33 #include <sys/kernel.h>
34 #include <sys/malloc.h>
35 #include <sys/proc.h>
36
37 #include <sys/bus.h>
38
39 #include <dev/scsipi/scsi_all.h>
40 #include <dev/scsipi/scsi_disk.h>
41 #include <dev/scsipi/scsiconf.h>
42 #include <dev/scsipi/scsipi_all.h>
43
44 #include <dev/ic/cissreg.h>
45 #include <dev/ic/cissvar.h>
46
47 #if NBIO > 0
48 #include <dev/biovar.h>
49 #endif /* NBIO > 0 */
50
51 #ifdef CISS_DEBUG
52 #define CISS_DPRINTF(m,a) if (ciss_debug & (m)) printf a
53 #define CISS_D_CMD 0x0001
54 #define CISS_D_INTR 0x0002
55 #define CISS_D_MISC 0x0004
56 #define CISS_D_DMA 0x0008
57 #define CISS_D_IOCTL 0x0010
58 #define CISS_D_ERR 0x0020
59 int ciss_debug = 0
60 | CISS_D_CMD
61 | CISS_D_INTR
62 | CISS_D_MISC
63 | CISS_D_DMA
64 | CISS_D_IOCTL
65 | CISS_D_ERR
66 ;
67 #else
68 #define CISS_DPRINTF(m,a) /* m, a */
69 #endif
70
71 static void ciss_scsi_cmd(struct scsipi_channel *chan,
72 scsipi_adapter_req_t req, void *arg);
73 static int ciss_scsi_ioctl(struct scsipi_channel *chan, u_long cmd,
74 void *addr, int flag, struct proc *p);
75 static void cissminphys(struct buf *bp);
76
77 #if 0
78 static void ciss_scsi_raw_cmd(struct scsipi_channel *chan,
79 scsipi_adapter_req_t req, void *arg);
80 #endif
81
82 static int ciss_sync(struct ciss_softc *sc);
83 static void ciss_heartbeat(void *v);
84 static void ciss_shutdown(void *v);
85
86 static struct ciss_ccb *ciss_get_ccb(struct ciss_softc *sc);
87 static void ciss_put_ccb(struct ciss_ccb *ccb);
88 static int ciss_cmd(struct ciss_ccb *ccb, int flags, int wait);
89 static int ciss_done(struct ciss_ccb *ccb);
90 static int ciss_error(struct ciss_ccb *ccb);
91 struct ciss_ld *ciss_pdscan(struct ciss_softc *sc, int ld);
92 static int ciss_inq(struct ciss_softc *sc, struct ciss_inquiry *inq);
93 int ciss_ldid(struct ciss_softc *, int, struct ciss_ldid *);
94 int ciss_ldstat(struct ciss_softc *, int, struct ciss_ldstat *);
95 static int ciss_ldmap(struct ciss_softc *sc);
96 int ciss_pdid(struct ciss_softc *, u_int8_t, struct ciss_pdid *, int);
97
98 #if NBIO > 0
99 int ciss_ioctl(device_t, u_long, void *);
100 int ciss_ioctl_vol(struct ciss_softc *, struct bioc_vol *);
101 int ciss_blink(struct ciss_softc *, int, int, int, struct ciss_blink *);
102 int ciss_create_sensors(struct ciss_softc *);
103 void ciss_sensor_refresh(struct sysmon_envsys *, envsys_data_t *);
104 #endif /* NBIO > 0 */
105
106 static struct ciss_ccb *
107 ciss_get_ccb(struct ciss_softc *sc)
108 {
109 struct ciss_ccb *ccb;
110
111 mutex_enter(&sc->sc_mutex);
112 if ((ccb = TAILQ_LAST(&sc->sc_free_ccb, ciss_queue_head))) {
113 TAILQ_REMOVE(&sc->sc_free_ccb, ccb, ccb_link);
114 ccb->ccb_state = CISS_CCB_READY;
115 }
116 mutex_exit(&sc->sc_mutex);
117 return ccb;
118 }
119
120 static void
121 ciss_put_ccb(struct ciss_ccb *ccb)
122 {
123 struct ciss_softc *sc = ccb->ccb_sc;
124
125 ccb->ccb_state = CISS_CCB_FREE;
126 mutex_enter(&sc->sc_mutex);
127 TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, ccb_link);
128 mutex_exit(&sc->sc_mutex);
129 }
130
131 static int
132 ciss_init_perf(struct ciss_softc *sc)
133 {
134 struct ciss_perf_config *pc = &sc->perfcfg;
135 int error, total, rseg;
136
137 if (sc->cfg.max_perfomant_mode_cmds)
138 sc->maxcmd = sc->cfg.max_perfomant_mode_cmds;
139
140 bus_space_read_region_4(sc->sc_iot, sc->cfg_ioh,
141 sc->cfgoff + sc->cfg.troff,
142 (u_int32_t *)pc, sizeof(*pc) / 4);
143
144 total = sizeof(uint64_t) * sc->maxcmd;
145
146 if ((error = bus_dmamem_alloc(sc->sc_dmat, total, PAGE_SIZE, 0,
147 sc->replyseg, 1, &rseg, BUS_DMA_WAITOK))) {
148 aprint_error(": cannot allocate perf area (%d)\n", error);
149 return -1;
150 }
151
152 if ((error = bus_dmamem_map(sc->sc_dmat, sc->replyseg, rseg, total,
153 (void **)&sc->perf_reply, BUS_DMA_WAITOK))) {
154 aprint_error(": cannot map perf area (%d)\n", error);
155 bus_dmamem_free(sc->sc_dmat, sc->replyseg, 1);
156 return -1;
157 }
158
159 if ((error = bus_dmamap_create(sc->sc_dmat, total, 1,
160 total, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &sc->replymap))) {
161 aprint_error(": cannot create perf dmamap (%d)\n", error);
162 bus_dmamem_unmap(sc->sc_dmat, sc->perf_reply, total);
163 sc->perf_reply = NULL;
164 bus_dmamem_free(sc->sc_dmat, sc->replyseg, 1);
165 return -1;
166 }
167
168 if ((error = bus_dmamap_load(sc->sc_dmat, sc->replymap, sc->perf_reply,
169 total, NULL, BUS_DMA_WAITOK))) {
170 aprint_error(": cannot load perf dmamap (%d)\n", error);
171 bus_dmamap_destroy(sc->sc_dmat, sc->replymap);
172 bus_dmamem_unmap(sc->sc_dmat, sc->perf_reply, total);
173 sc->perf_reply = NULL;
174 bus_dmamem_free(sc->sc_dmat, sc->replyseg, 1);
175 return -1;
176 }
177
178 memset(sc->perf_reply, 0, total);
179
180 sc->perf_cycle = 0x1;
181 sc->perf_rqidx = 0;
182
183 /*
184 * Preload the fetch table with common command sizes. This allows the
185 * hardware to not waste bus cycles for typical i/o commands, but also
186 * not tax the driver to be too exact in choosing sizes. The table
187 * is optimized for page-aligned i/o's, but since most i/o comes
188 * from the various pagers, it's a reasonable assumption to make.
189 */
190 #define CISS_FETCH_COUNT(x) \
191 (sizeof(struct ciss_cmd) + sizeof(struct ciss_sg_entry) * (x - 1) + 15) / 16
192
193 pc->fetch_count[CISS_SG_FETCH_NONE] = CISS_FETCH_COUNT(0);
194 pc->fetch_count[CISS_SG_FETCH_1] = CISS_FETCH_COUNT(1);
195 pc->fetch_count[CISS_SG_FETCH_2] = CISS_FETCH_COUNT(2);
196 pc->fetch_count[CISS_SG_FETCH_4] = CISS_FETCH_COUNT(4);
197 pc->fetch_count[CISS_SG_FETCH_8] = CISS_FETCH_COUNT(8);
198 pc->fetch_count[CISS_SG_FETCH_16] = CISS_FETCH_COUNT(16);
199 pc->fetch_count[CISS_SG_FETCH_32] = CISS_FETCH_COUNT(32);
200 pc->fetch_count[CISS_SG_FETCH_MAX] = (sc->ccblen + 15) / 16;
201
202 pc->rq_size = sc->maxcmd;
203 pc->rq_count = 1; /* Hardcode for a single queue */
204 pc->rq_bank_hi = 0;
205 pc->rq_bank_lo = 0;
206 pc->rq[0].rq_addr_hi = 0x0;
207 pc->rq[0].rq_addr_lo = sc->replymap->dm_segs[0].ds_addr;
208
209 /*
210 * Write back the changed configuration. Tt will be picked up
211 * by controller together with general configuration later on.
212 */
213 bus_space_write_region_4(sc->sc_iot, sc->cfg_ioh,
214 sc->cfgoff + sc->cfg.troff,
215 (u_int32_t *)pc, sizeof(*pc) / 4);
216 bus_space_barrier(sc->sc_iot, sc->cfg_ioh,
217 sc->cfgoff + sc->cfg.troff, sizeof(*pc),
218 BUS_SPACE_BARRIER_READ|BUS_SPACE_BARRIER_WRITE);
219
220 return 0;
221 }
222
223 int
224 ciss_attach(struct ciss_softc *sc)
225 {
226 struct ciss_ccb *ccb;
227 struct ciss_cmd *cmd;
228 struct ciss_inquiry *inq;
229 bus_dma_segment_t seg[1];
230 int error, i, total, rseg, maxfer;
231 paddr_t pa;
232
233 if (sc->cfg.signature != CISS_SIGNATURE) {
234 aprint_error(": bad sign 0x%08x\n", sc->cfg.signature);
235 return -1;
236 }
237
238 if (!(sc->cfg.methods & (CISS_METH_SIMPL|CISS_METH_PERF))) {
239 aprint_error(": no supported method 0x%08x\n", sc->cfg.methods);
240 return -1;
241 }
242
243 if (!sc->cfg.maxsg)
244 sc->cfg.maxsg = MAXPHYS / PAGE_SIZE + 1;
245
246 sc->maxcmd = sc->cfg.maxcmd;
247 sc->maxsg = sc->cfg.maxsg;
248 if (sc->maxsg > MAXPHYS / PAGE_SIZE + 1)
249 sc->maxsg = MAXPHYS / PAGE_SIZE + 1;
250 i = sizeof(struct ciss_ccb) +
251 sizeof(ccb->ccb_cmd.sgl[0]) * (sc->maxsg - 1);
252 for (sc->ccblen = 0x10; sc->ccblen < i; sc->ccblen <<= 1);
253
254 sc->cfg.paddr_lim = 0; /* 32bit addrs */
255 sc->cfg.int_delay = 0; /* disable coalescing */
256 sc->cfg.int_count = 0;
257 strlcpy(sc->cfg.hostname, "HUMPPA", sizeof(sc->cfg.hostname));
258 sc->cfg.driverf |= CISS_DRV_PRF; /* enable prefetch */
259 if (CISS_PERF_SUPPORTED(sc)) {
260 sc->cfg.rmethod = CISS_METH_PERF | CISS_METH_SHORT_TAG;
261 if (ciss_init_perf(sc) != 0) {
262 /* Don't try to fallback, just bail out */
263 return -1;
264 }
265 } else {
266 sc->cfg.rmethod = CISS_METH_SIMPL;
267 }
268
269 bus_space_write_region_4(sc->sc_iot, sc->cfg_ioh, sc->cfgoff,
270 (u_int32_t *)&sc->cfg, sizeof(sc->cfg) / 4);
271 bus_space_barrier(sc->sc_iot, sc->cfg_ioh, sc->cfgoff, sizeof(sc->cfg),
272 BUS_SPACE_BARRIER_READ|BUS_SPACE_BARRIER_WRITE);
273
274 bus_space_write_4(sc->sc_iot, sc->sc_ioh, CISS_IDB, CISS_IDB_CFG);
275 bus_space_barrier(sc->sc_iot, sc->sc_ioh, CISS_IDB, 4,
276 BUS_SPACE_BARRIER_WRITE);
277 for (i = 1000; i--; DELAY(1000)) {
278 /* XXX maybe IDB is really 64bit? - hp dl380 needs this */
279 (void)bus_space_read_4(sc->sc_iot, sc->sc_ioh, CISS_IDB + 4);
280 if (!(bus_space_read_4(sc->sc_iot, sc->sc_ioh, CISS_IDB) & CISS_IDB_CFG))
281 break;
282 bus_space_barrier(sc->sc_iot, sc->sc_ioh, CISS_IDB, 4,
283 BUS_SPACE_BARRIER_READ);
284 }
285
286 if (bus_space_read_4(sc->sc_iot, sc->sc_ioh, CISS_IDB) & CISS_IDB_CFG) {
287 aprint_error(": cannot set config\n");
288 return -1;
289 }
290
291 bus_space_read_region_4(sc->sc_iot, sc->cfg_ioh, sc->cfgoff,
292 (u_int32_t *)&sc->cfg, sizeof(sc->cfg) / 4);
293
294 if (!(sc->cfg.amethod & (CISS_METH_SIMPL|CISS_METH_PERF))) {
295 aprint_error(": cannot set method 0x%08x\n", sc->cfg.amethod);
296 return -1;
297 }
298
299 /* i'm ready for you and i hope you're ready for me */
300 for (i = 30000; i--; DELAY(1000)) {
301 if (bus_space_read_4(sc->sc_iot, sc->cfg_ioh, sc->cfgoff +
302 offsetof(struct ciss_config, amethod)) & CISS_METH_READY)
303 break;
304 bus_space_barrier(sc->sc_iot, sc->cfg_ioh, sc->cfgoff +
305 offsetof(struct ciss_config, amethod), 4,
306 BUS_SPACE_BARRIER_READ);
307 }
308
309 if (!(bus_space_read_4(sc->sc_iot, sc->cfg_ioh, sc->cfgoff +
310 offsetof(struct ciss_config, amethod)) & CISS_METH_READY)) {
311 aprint_error(": she never came ready for me 0x%08x\n",
312 sc->cfg.amethod);
313 return -1;
314 }
315
316 mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, IPL_VM);
317 mutex_init(&sc->sc_mutex_scratch, MUTEX_DEFAULT, IPL_VM);
318 cv_init(&sc->sc_condvar, "ciss_cmd");
319
320 total = sc->ccblen * sc->maxcmd;
321 if ((error = bus_dmamem_alloc(sc->sc_dmat, total, PAGE_SIZE, 0,
322 sc->cmdseg, 1, &rseg, BUS_DMA_NOWAIT))) {
323 aprint_error(": cannot allocate CCBs (%d)\n", error);
324 return -1;
325 }
326
327 if ((error = bus_dmamem_map(sc->sc_dmat, sc->cmdseg, rseg, total,
328 (void **)&sc->ccbs, BUS_DMA_NOWAIT))) {
329 aprint_error(": cannot map CCBs (%d)\n", error);
330 return -1;
331 }
332 memset(sc->ccbs, 0, total);
333
334 if ((error = bus_dmamap_create(sc->sc_dmat, total, 1,
335 total, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->cmdmap))) {
336 aprint_error(": cannot create CCBs dmamap (%d)\n", error);
337 bus_dmamem_free(sc->sc_dmat, sc->cmdseg, 1);
338 return -1;
339 }
340
341 if ((error = bus_dmamap_load(sc->sc_dmat, sc->cmdmap, sc->ccbs, total,
342 NULL, BUS_DMA_NOWAIT))) {
343 aprint_error(": cannot load CCBs dmamap (%d)\n", error);
344 bus_dmamem_free(sc->sc_dmat, sc->cmdseg, 1);
345 bus_dmamap_destroy(sc->sc_dmat, sc->cmdmap);
346 return -1;
347 }
348
349 TAILQ_INIT(&sc->sc_free_ccb);
350
351 maxfer = sc->maxsg * PAGE_SIZE;
352 for (i = 0; total > 0 && i < sc->maxcmd; i++, total -= sc->ccblen) {
353 ccb = (struct ciss_ccb *) ((char *)sc->ccbs + i * sc->ccblen);
354 cmd = &ccb->ccb_cmd;
355 pa = sc->cmdseg[0].ds_addr + i * sc->ccblen;
356
357 ccb->ccb_sc = sc;
358 ccb->ccb_cmdpa = pa + offsetof(struct ciss_ccb, ccb_cmd);
359 ccb->ccb_state = CISS_CCB_FREE;
360
361 cmd->id = htole32(i << 2);
362 cmd->id_hi = htole32(0);
363 cmd->sgin = sc->maxsg;
364 cmd->sglen = htole16((u_int16_t)cmd->sgin);
365 cmd->err_len = htole32(sizeof(ccb->ccb_err));
366 pa += offsetof(struct ciss_ccb, ccb_err);
367 cmd->err_pa = htole64((u_int64_t)pa);
368
369 if ((error = bus_dmamap_create(sc->sc_dmat, maxfer, sc->maxsg,
370 maxfer, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
371 &ccb->ccb_dmamap)))
372 break;
373
374 TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, ccb_link);
375 }
376
377 if (i < sc->maxcmd) {
378 aprint_error(": cannot create ccb#%d dmamap (%d)\n", i, error);
379 if (i == 0) {
380 /* TODO leaking cmd's dmamaps and shitz */
381 bus_dmamem_free(sc->sc_dmat, sc->cmdseg, 1);
382 bus_dmamap_destroy(sc->sc_dmat, sc->cmdmap);
383 return -1;
384 }
385 }
386
387 if ((error = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
388 seg, 1, &rseg, BUS_DMA_NOWAIT))) {
389 aprint_error(": cannot allocate scratch buffer (%d)\n", error);
390 return -1;
391 }
392
393 if ((error = bus_dmamem_map(sc->sc_dmat, seg, rseg, PAGE_SIZE,
394 (void **)&sc->scratch, BUS_DMA_NOWAIT))) {
395 aprint_error(": cannot map scratch buffer (%d)\n", error);
396 return -1;
397 }
398 memset(sc->scratch, 0, PAGE_SIZE);
399 sc->sc_waitflag = XS_CTL_NOSLEEP; /* can't sleep yet */
400
401 mutex_enter(&sc->sc_mutex_scratch); /* is this really needed? */
402 inq = sc->scratch;
403 if (ciss_inq(sc, inq)) {
404 aprint_error(": adapter inquiry failed\n");
405 mutex_exit(&sc->sc_mutex_scratch);
406 bus_dmamem_free(sc->sc_dmat, sc->cmdseg, 1);
407 bus_dmamap_destroy(sc->sc_dmat, sc->cmdmap);
408 return -1;
409 }
410
411 if (!(inq->flags & CISS_INQ_BIGMAP)) {
412 aprint_error(": big map is not supported, flags=0x%x\n",
413 inq->flags);
414 mutex_exit(&sc->sc_mutex_scratch);
415 bus_dmamem_free(sc->sc_dmat, sc->cmdseg, 1);
416 bus_dmamap_destroy(sc->sc_dmat, sc->cmdmap);
417 return -1;
418 }
419
420 sc->maxunits = inq->numld;
421 sc->nbus = inq->nscsi_bus;
422 sc->ndrives = inq->buswidth ? inq->buswidth : 256;
423 aprint_normal(": %d LD%s, HW rev %d, FW %4.4s/%4.4s",
424 inq->numld, inq->numld == 1? "" : "s",
425 inq->hw_rev, inq->fw_running, inq->fw_stored);
426
427 if (sc->cfg.methods & CISS_METH_FIFO64)
428 aprint_normal(", 64bit fifo");
429 else if (sc->cfg.methods & CISS_METH_FIFO64_RRO)
430 aprint_normal(", 64bit fifo rro");
431 aprint_normal(", method %s %#x",
432 CISS_IS_PERF(sc) ? "perf" : "simple",
433 sc->cfg.amethod);
434 aprint_normal("\n");
435
436 mutex_exit(&sc->sc_mutex_scratch);
437
438 callout_init(&sc->sc_hb, 0);
439 callout_setfunc(&sc->sc_hb, ciss_heartbeat, sc);
440 callout_schedule(&sc->sc_hb, hz * 3);
441
442 /* map LDs */
443 if (ciss_ldmap(sc)) {
444 aprint_error_dev(sc->sc_dev, "adapter LD map failed\n");
445 bus_dmamem_free(sc->sc_dmat, sc->cmdseg, 1);
446 bus_dmamap_destroy(sc->sc_dmat, sc->cmdmap);
447 return -1;
448 }
449
450 sc->sc_lds = malloc(sc->maxunits * sizeof(*sc->sc_lds),
451 M_DEVBUF, M_WAITOK | M_ZERO);
452
453 sc->sc_flush = CISS_FLUSH_ENABLE;
454 if (!(sc->sc_sh = shutdownhook_establish(ciss_shutdown, sc))) {
455 aprint_error_dev(sc->sc_dev,
456 "unable to establish shutdown hook\n");
457 bus_dmamem_free(sc->sc_dmat, sc->cmdseg, 1);
458 bus_dmamap_destroy(sc->sc_dmat, sc->cmdmap);
459 return -1;
460 }
461
462 sc->sc_channel.chan_adapter = &sc->sc_adapter;
463 sc->sc_channel.chan_bustype = &scsi_bustype;
464 sc->sc_channel.chan_channel = 0;
465 sc->sc_channel.chan_ntargets = sc->maxunits;
466 sc->sc_channel.chan_nluns = 1; /* ciss doesn't really have SCSI luns */
467 sc->sc_channel.chan_openings = sc->maxcmd;
468 #if NBIO > 0
469 /* XXX Reserve some ccb's for sensor and bioctl. */
470 if (sc->sc_channel.chan_openings > 2)
471 sc->sc_channel.chan_openings -= 2;
472 #endif
473 sc->sc_channel.chan_flags = 0;
474 sc->sc_channel.chan_id = sc->maxunits;
475
476 sc->sc_adapter.adapt_dev = sc->sc_dev;
477 sc->sc_adapter.adapt_openings = sc->sc_channel.chan_openings;
478 sc->sc_adapter.adapt_max_periph = uimin(sc->sc_adapter.adapt_openings, 256);
479 sc->sc_adapter.adapt_request = ciss_scsi_cmd;
480 sc->sc_adapter.adapt_minphys = cissminphys;
481 sc->sc_adapter.adapt_ioctl = ciss_scsi_ioctl;
482 sc->sc_adapter.adapt_nchannels = 1;
483 config_found(sc->sc_dev, &sc->sc_channel, scsiprint);
484
485 #if 0
486 sc->sc_link_raw.adapter_softc = sc;
487 sc->sc_link.openings = sc->sc_channel.chan_openings;
488 sc->sc_link_raw.adapter = &ciss_raw_switch;
489 sc->sc_link_raw.adapter_target = sc->ndrives;
490 sc->sc_link_raw.adapter_buswidth = sc->ndrives;
491 config_found(sc->sc_dev, &sc->sc_channel, scsiprint);
492 #endif
493
494 #if NBIO > 0
495 /* now map all the physdevs into their lds */
496 /* XXX currently we assign all of them into ld0 */
497 for (i = 0; i < sc->maxunits && i < 1; i++)
498 if (!(sc->sc_lds[i] = ciss_pdscan(sc, i))) {
499 sc->sc_waitflag = 0; /* we can sleep now */
500 return 0;
501 }
502
503 if (bio_register(sc->sc_dev, ciss_ioctl) != 0)
504 aprint_error_dev(sc->sc_dev, "controller registration failed");
505 else
506 sc->sc_ioctl = ciss_ioctl;
507 if (ciss_create_sensors(sc) != 0)
508 aprint_error_dev(sc->sc_dev, "unable to create sensors");
509 #endif
510 sc->sc_waitflag = 0; /* we can sleep now */
511
512 return 0;
513 }
514
515 static void
516 ciss_shutdown(void *v)
517 {
518 struct ciss_softc *sc = v;
519
520 sc->sc_flush = CISS_FLUSH_DISABLE;
521 /* timeout_del(&sc->sc_hb); */
522 ciss_sync(sc);
523 }
524
525 static void
526 cissminphys(struct buf *bp)
527 {
528 #if 0 /* TODO */
529 #define CISS_MAXFER (PAGE_SIZE * (sc->maxsg + 1))
530 if (bp->b_bcount > CISS_MAXFER)
531 bp->b_bcount = CISS_MAXFER;
532 #endif
533 minphys(bp);
534 }
535
536 static void
537 ciss_enqueue(struct ciss_softc *sc, ciss_queue_head *q, uint32_t id)
538 {
539 struct ciss_ccb *ccb;
540
541 KASSERT(mutex_owned(&sc->sc_mutex));
542
543 KASSERT((id >> 2) <= sc->maxcmd);
544 ccb = (struct ciss_ccb *) ((char *)sc->ccbs + (id >> 2) * sc->ccblen);
545 ccb->ccb_cmd.id = htole32(id);
546 ccb->ccb_cmd.id_hi = htole32(0);
547 TAILQ_INSERT_TAIL(q, ccb, ccb_link);
548 }
549
550 static void
551 ciss_completed_simple(struct ciss_softc *sc, ciss_queue_head *q)
552 {
553 uint32_t id;
554
555 KASSERT(mutex_owned(&sc->sc_mutex));
556
557 for (;;) {
558 if (sc->cfg.methods & CISS_METH_FIFO64) {
559 if (bus_space_read_4(sc->sc_iot, sc->sc_ioh,
560 CISS_OUTQ64_HI) == 0xffffffff) {
561 CISS_DPRINTF(CISS_D_CMD, ("Q"));
562 break;
563 }
564 id = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
565 CISS_OUTQ64_LO);
566 } else if (sc->cfg.methods & CISS_METH_FIFO64_RRO) {
567 id = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
568 CISS_OUTQ64_LO);
569 if (id == 0xffffffff) {
570 CISS_DPRINTF(CISS_D_CMD, ("Q"));
571 break;
572 }
573 (void)bus_space_read_4(sc->sc_iot, sc->sc_ioh,
574 CISS_OUTQ64_HI);
575 } else {
576 id = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
577 CISS_OUTQ);
578 if (id == 0xffffffff) {
579 CISS_DPRINTF(CISS_D_CMD, ("Q"));
580 break;
581 }
582 }
583
584 CISS_DPRINTF(CISS_D_CMD, ("got=0x%x ", id));
585 ciss_enqueue(sc, q, id);
586 }
587 }
588
589 static void
590 ciss_completed_perf(struct ciss_softc *sc, ciss_queue_head *q)
591 {
592 uint32_t id;
593
594 KASSERT(mutex_owned(&sc->sc_mutex));
595
596 for (;;) {
597 id = sc->perf_reply[sc->perf_rqidx];
598 if ((id & CISS_CYCLE_MASK) != sc->perf_cycle)
599 break;
600
601 if (++sc->perf_rqidx == sc->maxcmd) {
602 sc->perf_rqidx = 0;
603 sc->perf_cycle ^= 1;
604 }
605
606 CISS_DPRINTF(CISS_D_CMD, ("got=0x%x ", id));
607 ciss_enqueue(sc, q, id);
608 }
609 }
610
611 static int
612 ciss_poll(struct ciss_softc *sc, struct ciss_ccb *ccb, int ms)
613 {
614 ciss_queue_head q;
615 struct ciss_ccb *ccb1;
616
617 TAILQ_INIT(&q);
618 ms /= 10;
619
620 while (ms-- > 0) {
621 DELAY(10);
622 mutex_enter(&sc->sc_mutex);
623 if (CISS_IS_PERF(sc))
624 ciss_completed_perf(sc, &q);
625 else
626 ciss_completed_simple(sc, &q);
627 mutex_exit(&sc->sc_mutex);
628
629 while (!TAILQ_EMPTY(&q)) {
630 ccb1 = TAILQ_FIRST(&q);
631 TAILQ_REMOVE(&q, ccb1, ccb_link);
632
633 KASSERT(ccb1->ccb_state == CISS_CCB_ONQ);
634 ciss_done(ccb1);
635 if (ccb1 == ccb) {
636 KASSERT(TAILQ_EMPTY(&q));
637 return 0;
638 }
639 }
640 }
641
642 return ETIMEDOUT;
643 }
644
645 static int
646 ciss_wait(struct ciss_softc *sc, struct ciss_ccb *ccb, int ms)
647 {
648 int tohz, etick;
649
650 tohz = mstohz(ms);
651 if (tohz == 0)
652 tohz = 1;
653 etick = getticks() + tohz;
654
655 for (;;) {
656 CISS_DPRINTF(CISS_D_CMD, ("cv_timedwait(%d) ", tohz));
657 mutex_enter(&sc->sc_mutex);
658 if (cv_timedwait(&sc->sc_condvar, &sc->sc_mutex, tohz)
659 == EWOULDBLOCK) {
660 mutex_exit(&sc->sc_mutex);
661 return EWOULDBLOCK;
662 }
663 mutex_exit(&sc->sc_mutex);
664 if (ccb->ccb_state == CISS_CCB_ONQ) {
665 ciss_done(ccb);
666 return 0;
667 }
668 tohz = etick - getticks();
669 if (tohz <= 0)
670 return EWOULDBLOCK;
671 CISS_DPRINTF(CISS_D_CMD, ("T"));
672 }
673 }
674
675 /*
676 * submit a command and optionally wait for completition.
677 * wait arg abuses XS_CTL_POLL|XS_CTL_NOSLEEP flags to request
678 * to wait (XS_CTL_POLL) and to allow tsleep() (!XS_CTL_NOSLEEP)
679 * instead of busy loop waiting
680 */
681 static int
682 ciss_cmd(struct ciss_ccb *ccb, int flags, int wait)
683 {
684 struct ciss_softc *sc = ccb->ccb_sc;
685 struct ciss_cmd *cmd = &ccb->ccb_cmd;
686 bus_dmamap_t dmap = ccb->ccb_dmamap;
687 u_int64_t addr;
688 int i, error = 0;
689 const bool pollsleep = ((wait & (XS_CTL_POLL|XS_CTL_NOSLEEP)) ==
690 XS_CTL_POLL);
691
692 if (ccb->ccb_state != CISS_CCB_READY) {
693 printf("%s: ccb %d not ready state=0x%x\n", device_xname(sc->sc_dev),
694 cmd->id, ccb->ccb_state);
695 return (EINVAL);
696 }
697
698 if (ccb->ccb_data) {
699 bus_dma_segment_t *sgd;
700
701 if ((error = bus_dmamap_load(sc->sc_dmat, dmap, ccb->ccb_data,
702 ccb->ccb_len, NULL, flags))) {
703 if (error == EFBIG)
704 printf("more than %d dma segs\n", sc->maxsg);
705 else
706 printf("error %d loading dma map\n", error);
707 ciss_put_ccb(ccb);
708 return (error);
709 }
710 cmd->sgin = dmap->dm_nsegs;
711
712 sgd = dmap->dm_segs;
713 CISS_DPRINTF(CISS_D_DMA, ("data=%p/%zu<%#" PRIxPADDR "/%zu",
714 ccb->ccb_data, ccb->ccb_len, sgd->ds_addr, sgd->ds_len));
715
716 for (i = 0; i < dmap->dm_nsegs; sgd++, i++) {
717 cmd->sgl[i].addr_lo = htole32(sgd->ds_addr);
718 cmd->sgl[i].addr_hi =
719 htole32((u_int64_t)sgd->ds_addr >> 32);
720 cmd->sgl[i].len = htole32(sgd->ds_len);
721 cmd->sgl[i].flags = htole32(0);
722 if (i) {
723 CISS_DPRINTF(CISS_D_DMA,
724 (",%#" PRIxPADDR "/%zu", sgd->ds_addr,
725 sgd->ds_len));
726 }
727 }
728
729 CISS_DPRINTF(CISS_D_DMA, ("> "));
730
731 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
732 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
733
734 if (dmap->dm_nsegs == 0)
735 ccb->ccb_sg_tag = CISS_SG_FETCH_NONE;
736 else if (dmap->dm_nsegs == 1)
737 ccb->ccb_sg_tag = CISS_SG_FETCH_1;
738 else if (dmap->dm_nsegs == 2)
739 ccb->ccb_sg_tag = CISS_SG_FETCH_2;
740 else if (dmap->dm_nsegs <= 4)
741 ccb->ccb_sg_tag = CISS_SG_FETCH_4;
742 else if (dmap->dm_nsegs <= 8)
743 ccb->ccb_sg_tag = CISS_SG_FETCH_8;
744 else if (dmap->dm_nsegs <= 16)
745 ccb->ccb_sg_tag = CISS_SG_FETCH_16;
746 else if (dmap->dm_nsegs <= 32)
747 ccb->ccb_sg_tag = CISS_SG_FETCH_32;
748 else
749 ccb->ccb_sg_tag = CISS_SG_FETCH_MAX;
750 } else {
751 ccb->ccb_sg_tag = CISS_SG_FETCH_NONE;
752 cmd->sgin = 0;
753 }
754 cmd->sglen = htole16((u_int16_t)cmd->sgin);
755 memset(&ccb->ccb_err, 0, sizeof(ccb->ccb_err));
756
757 bus_dmamap_sync(sc->sc_dmat, sc->cmdmap, 0, sc->cmdmap->dm_mapsize,
758 BUS_DMASYNC_PREWRITE);
759
760 #ifndef CISS_NO_INTERRUPT_HACK
761 if ((wait & (XS_CTL_POLL|XS_CTL_NOSLEEP)) == (XS_CTL_POLL|XS_CTL_NOSLEEP))
762 bus_space_write_4(sc->sc_iot, sc->sc_ioh, CISS_IMR,
763 bus_space_read_4(sc->sc_iot, sc->sc_ioh, CISS_IMR) | sc->iem);
764 #endif
765
766 if (!pollsleep)
767 ccb->ccb_state = CISS_CCB_ONQ;
768 else
769 ccb->ccb_state = CISS_CCB_POLL;
770 CISS_DPRINTF(CISS_D_CMD, ("submit=0x%x ", cmd->id));
771
772 addr = (u_int64_t)ccb->ccb_cmdpa;
773 if (CISS_IS_PERF(sc)) {
774 KASSERT((addr & 0xf) == 0);
775 /*
776 * The bits in addr in performant mean:
777 * - performant mode bit (bit 0)
778 * - pull count (bits 1-3)
779 * There is no support for ioaccel mode
780 */
781 addr |= 1 | (ccb->ccb_sg_tag << 1);
782 }
783 if (sc->cfg.methods & (CISS_METH_FIFO64|CISS_METH_FIFO64_RRO)) {
784 /*
785 * Write the upper 32bits immediately before the lower
786 * 32bits and set bit 63 to indicate 64bit FIFO mode.
787 */
788 bus_space_write_4(sc->sc_iot, sc->sc_ioh, CISS_INQ64_HI,
789 (addr >> 32) | 0x80000000);
790 bus_space_write_4(sc->sc_iot, sc->sc_ioh, CISS_INQ64_LO,
791 addr & 0x00000000ffffffffULL);
792 } else
793 bus_space_write_4(sc->sc_iot, sc->sc_ioh, CISS_INQ,
794 (uint32_t)addr);
795
796 if (wait & XS_CTL_POLL) {
797 int ms;
798 CISS_DPRINTF(CISS_D_CMD, ("waiting "));
799
800 ms = ccb->ccb_xs ? ccb->ccb_xs->timeout : 60000;
801 if (pollsleep)
802 error = ciss_wait(sc, ccb, ms);
803 else
804 error = ciss_poll(sc, ccb, ms);
805
806 /* if never got a chance to be done above... */
807 if (ccb->ccb_state != CISS_CCB_FREE) {
808 KASSERT(error);
809 ccb->ccb_err.cmd_stat = CISS_ERR_TMO;
810 error = ciss_done(ccb);
811 }
812
813 CISS_DPRINTF(CISS_D_CMD, ("done %d:%d",
814 ccb->ccb_err.cmd_stat, ccb->ccb_err.scsi_stat));
815 }
816
817 #ifndef CISS_NO_INTERRUPT_HACK
818 if ((wait & (XS_CTL_POLL|XS_CTL_NOSLEEP)) == (XS_CTL_POLL|XS_CTL_NOSLEEP))
819 bus_space_write_4(sc->sc_iot, sc->sc_ioh, CISS_IMR,
820 bus_space_read_4(sc->sc_iot, sc->sc_ioh, CISS_IMR) & ~sc->iem);
821 #endif
822
823 return (error);
824 }
825
826 static int
827 ciss_done(struct ciss_ccb *ccb)
828 {
829 struct ciss_softc *sc = ccb->ccb_sc;
830 struct scsipi_xfer *xs = ccb->ccb_xs;
831 struct ciss_cmd *cmd;
832 int error = 0;
833
834 CISS_DPRINTF(CISS_D_CMD, ("ciss_done(%p) ", ccb));
835
836 if (ccb->ccb_state != CISS_CCB_ONQ) {
837 printf("%s: unqueued ccb %p ready, state=0x%x\n",
838 device_xname(sc->sc_dev), ccb, ccb->ccb_state);
839 return 1;
840 }
841
842 ccb->ccb_state = CISS_CCB_READY;
843
844 if (ccb->ccb_cmd.id & CISS_CMD_ERR)
845 error = ciss_error(ccb);
846
847 cmd = &ccb->ccb_cmd;
848 if (ccb->ccb_data) {
849 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
850 ccb->ccb_dmamap->dm_mapsize, (cmd->flags & CISS_CDB_IN) ?
851 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
852 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
853 ccb->ccb_xs = NULL;
854 ccb->ccb_data = NULL;
855 }
856
857 ciss_put_ccb(ccb);
858
859 if (xs) {
860 xs->resid = 0;
861 CISS_DPRINTF(CISS_D_CMD, ("scsipi_done(%p) ", xs));
862 if (xs->cmd->opcode == INQUIRY) {
863 struct scsipi_inquiry_data *inq;
864 inq = (struct scsipi_inquiry_data *)xs->data;
865 if ((inq->version & SID_ANSII) == 0 &&
866 (inq->flags3 & SID_CmdQue) != 0) {
867 inq->version |= 2;
868 }
869 }
870 scsipi_done(xs);
871 }
872
873 return error;
874 }
875
876 static int
877 ciss_error(struct ciss_ccb *ccb)
878 {
879 struct ciss_softc *sc = ccb->ccb_sc;
880 struct ciss_error *err = &ccb->ccb_err;
881 struct scsipi_xfer *xs = ccb->ccb_xs;
882 int rv;
883
884 switch ((rv = le16toh(err->cmd_stat))) {
885 case CISS_ERR_OK:
886 rv = 0;
887 break;
888
889 case CISS_ERR_INVCMD:
890 if (xs == NULL ||
891 xs->cmd->opcode != SCSI_SYNCHRONIZE_CACHE_10)
892 printf("%s: invalid cmd 0x%x: 0x%x is not valid @ 0x%x[%d]\n",
893 device_xname(sc->sc_dev), ccb->ccb_cmd.id,
894 err->err_info, err->err_type[3], err->err_type[2]);
895 if (xs) {
896 memset(&xs->sense, 0, sizeof(xs->sense));
897 xs->sense.scsi_sense.response_code =
898 SSD_RCODE_CURRENT | SSD_RCODE_VALID;
899 xs->sense.scsi_sense.flags = SKEY_ILLEGAL_REQUEST;
900 xs->sense.scsi_sense.asc = 0x24; /* ill field */
901 xs->sense.scsi_sense.ascq = 0x0;
902 xs->error = XS_SENSE;
903 }
904 rv = EIO;
905 break;
906
907 case CISS_ERR_TMO:
908 xs->error = XS_TIMEOUT;
909 rv = ETIMEDOUT;
910 break;
911
912 case CISS_ERR_UNRUN:
913 /* Underrun */
914 xs->resid = le32toh(err->resid);
915 CISS_DPRINTF(CISS_D_CMD, (" underrun resid=0x%x ",
916 xs->resid));
917 rv = EIO;
918 break;
919 default:
920 if (xs) {
921 CISS_DPRINTF(CISS_D_CMD, ("scsi_stat=%x ", err->scsi_stat));
922 switch (err->scsi_stat) {
923 case SCSI_CHECK:
924 xs->error = XS_SENSE;
925 memcpy(&xs->sense, &err->sense[0],
926 sizeof(xs->sense));
927 CISS_DPRINTF(CISS_D_CMD, (" sense=%02x %02x %02x %02x ",
928 err->sense[0], err->sense[1], err->sense[2], err->sense[3]));
929 rv = EIO;
930 break;
931
932 case XS_BUSY:
933 xs->error = XS_BUSY;
934 rv = EBUSY;
935 break;
936
937 default:
938 CISS_DPRINTF(CISS_D_ERR, ("%s: "
939 "cmd_stat=%x scsi_stat=0x%x resid=0x%x\n",
940 device_xname(sc->sc_dev), rv, err->scsi_stat,
941 le32toh(err->resid)));
942 printf("ciss driver stuffup in %s:%d: %s()\n",
943 __FILE__, __LINE__, __func__);
944 xs->error = XS_DRIVER_STUFFUP;
945 rv = EIO;
946 break;
947 }
948 xs->resid = le32toh(err->resid);
949 } else
950 rv = EIO;
951 }
952 ccb->ccb_cmd.id &= htole32(~3);
953
954 return rv;
955 }
956
957 static int
958 ciss_inq(struct ciss_softc *sc, struct ciss_inquiry *inq)
959 {
960 struct ciss_ccb *ccb;
961 struct ciss_cmd *cmd;
962
963 ccb = ciss_get_ccb(sc);
964 ccb->ccb_len = sizeof(*inq);
965 ccb->ccb_data = inq;
966 ccb->ccb_xs = NULL;
967 cmd = &ccb->ccb_cmd;
968 cmd->tgt = htole32(CISS_CMD_MODE_PERIPH);
969 cmd->tgt2 = 0;
970 cmd->cdblen = 10;
971 cmd->flags = CISS_CDB_CMD | CISS_CDB_SIMPL | CISS_CDB_IN;
972 cmd->tmo = htole16(0);
973 memset(&cmd->cdb[0], 0, sizeof(cmd->cdb));
974 cmd->cdb[0] = CISS_CMD_CTRL_GET;
975 cmd->cdb[6] = CISS_CMS_CTRL_CTRL;
976 cmd->cdb[7] = sizeof(*inq) >> 8; /* biiiig endian */
977 cmd->cdb[8] = sizeof(*inq) & 0xff;
978
979 return ciss_cmd(ccb, BUS_DMA_NOWAIT, XS_CTL_POLL|XS_CTL_NOSLEEP);
980 }
981
982 static int
983 ciss_ldmap(struct ciss_softc *sc)
984 {
985 struct ciss_ccb *ccb;
986 struct ciss_cmd *cmd;
987 struct ciss_ldmap *lmap;
988 int total, rv;
989
990 mutex_enter(&sc->sc_mutex_scratch);
991 lmap = sc->scratch;
992 lmap->size = htobe32(sc->maxunits * sizeof(lmap->map));
993 total = sizeof(*lmap) + (sc->maxunits - 1) * sizeof(lmap->map);
994
995 ccb = ciss_get_ccb(sc);
996 ccb->ccb_len = total;
997 ccb->ccb_data = lmap;
998 ccb->ccb_xs = NULL;
999 cmd = &ccb->ccb_cmd;
1000 cmd->tgt = CISS_CMD_MODE_PERIPH;
1001 cmd->tgt2 = 0;
1002 cmd->cdblen = 12;
1003 cmd->flags = CISS_CDB_CMD | CISS_CDB_SIMPL | CISS_CDB_IN;
1004 cmd->tmo = htole16(30);
1005 memset(&cmd->cdb[0], 0, sizeof(cmd->cdb));
1006 cmd->cdb[0] = CISS_CMD_LDMAP;
1007 cmd->cdb[8] = total >> 8; /* biiiig endian */
1008 cmd->cdb[9] = total & 0xff;
1009
1010 rv = ciss_cmd(ccb, BUS_DMA_NOWAIT, XS_CTL_POLL|XS_CTL_NOSLEEP);
1011
1012 if (rv) {
1013 mutex_exit(&sc->sc_mutex_scratch);
1014 return rv;
1015 }
1016
1017 CISS_DPRINTF(CISS_D_MISC, ("lmap %x:%x\n",
1018 lmap->map[0].tgt, lmap->map[0].tgt2));
1019
1020 mutex_exit(&sc->sc_mutex_scratch);
1021 return 0;
1022 }
1023
1024 static int
1025 ciss_sync(struct ciss_softc *sc)
1026 {
1027 struct ciss_ccb *ccb;
1028 struct ciss_cmd *cmd;
1029 struct ciss_flush *flush;
1030 int rv;
1031
1032 mutex_enter(&sc->sc_mutex_scratch);
1033 flush = sc->scratch;
1034 memset(flush, 0, sizeof(*flush));
1035 flush->flush = sc->sc_flush;
1036
1037 ccb = ciss_get_ccb(sc);
1038 ccb->ccb_len = sizeof(*flush);
1039 ccb->ccb_data = flush;
1040 ccb->ccb_xs = NULL;
1041 cmd = &ccb->ccb_cmd;
1042 cmd->tgt = CISS_CMD_MODE_PERIPH;
1043 cmd->tgt2 = 0;
1044 cmd->cdblen = 10;
1045 cmd->flags = CISS_CDB_CMD | CISS_CDB_SIMPL | CISS_CDB_OUT;
1046 cmd->tmo = 0;
1047 memset(&cmd->cdb[0], 0, sizeof(cmd->cdb));
1048 cmd->cdb[0] = CISS_CMD_CTRL_SET;
1049 cmd->cdb[6] = CISS_CMS_CTRL_FLUSH;
1050 cmd->cdb[7] = sizeof(*flush) >> 8; /* biiiig endian */
1051 cmd->cdb[8] = sizeof(*flush) & 0xff;
1052
1053 rv = ciss_cmd(ccb, BUS_DMA_NOWAIT, XS_CTL_POLL|XS_CTL_NOSLEEP);
1054 mutex_exit(&sc->sc_mutex_scratch);
1055
1056 return rv;
1057 }
1058
1059 int
1060 ciss_ldid(struct ciss_softc *sc, int target, struct ciss_ldid *id)
1061 {
1062 struct ciss_ccb *ccb;
1063 struct ciss_cmd *cmd;
1064
1065 ccb = ciss_get_ccb(sc);
1066 if (ccb == NULL)
1067 return ENOMEM;
1068 ccb->ccb_len = sizeof(*id);
1069 ccb->ccb_data = id;
1070 ccb->ccb_xs = NULL;
1071 cmd = &ccb->ccb_cmd;
1072 cmd->tgt = htole32(CISS_CMD_MODE_PERIPH);
1073 cmd->tgt2 = 0;
1074 cmd->cdblen = 10;
1075 cmd->flags = CISS_CDB_CMD | CISS_CDB_SIMPL | CISS_CDB_IN;
1076 cmd->tmo = htole16(0);
1077 memset(&cmd->cdb[0], 0, sizeof(cmd->cdb));
1078 cmd->cdb[0] = CISS_CMD_CTRL_GET;
1079 cmd->cdb[1] = target;
1080 cmd->cdb[6] = CISS_CMS_CTRL_LDIDEXT;
1081 cmd->cdb[7] = sizeof(*id) >> 8; /* biiiig endian */
1082 cmd->cdb[8] = sizeof(*id) & 0xff;
1083
1084 return ciss_cmd(ccb, BUS_DMA_NOWAIT, XS_CTL_POLL | sc->sc_waitflag);
1085 }
1086
1087 int
1088 ciss_ldstat(struct ciss_softc *sc, int target, struct ciss_ldstat *stat)
1089 {
1090 struct ciss_ccb *ccb;
1091 struct ciss_cmd *cmd;
1092
1093 ccb = ciss_get_ccb(sc);
1094 if (ccb == NULL)
1095 return ENOMEM;
1096 ccb->ccb_len = sizeof(*stat);
1097 ccb->ccb_data = stat;
1098 ccb->ccb_xs = NULL;
1099 cmd = &ccb->ccb_cmd;
1100 cmd->tgt = htole32(CISS_CMD_MODE_PERIPH);
1101 cmd->tgt2 = 0;
1102 cmd->cdblen = 10;
1103 cmd->flags = CISS_CDB_CMD | CISS_CDB_SIMPL | CISS_CDB_IN;
1104 cmd->tmo = htole16(0);
1105 memset(&cmd->cdb[0], 0, sizeof(cmd->cdb));
1106 cmd->cdb[0] = CISS_CMD_CTRL_GET;
1107 cmd->cdb[1] = target;
1108 cmd->cdb[6] = CISS_CMS_CTRL_LDSTAT;
1109 cmd->cdb[7] = sizeof(*stat) >> 8; /* biiiig endian */
1110 cmd->cdb[8] = sizeof(*stat) & 0xff;
1111
1112 return ciss_cmd(ccb, BUS_DMA_NOWAIT, XS_CTL_POLL | sc->sc_waitflag);
1113 }
1114
1115 int
1116 ciss_pdid(struct ciss_softc *sc, u_int8_t drv, struct ciss_pdid *id, int wait)
1117 {
1118 struct ciss_ccb *ccb;
1119 struct ciss_cmd *cmd;
1120
1121 ccb = ciss_get_ccb(sc);
1122 if (ccb == NULL)
1123 return ENOMEM;
1124 ccb->ccb_len = sizeof(*id);
1125 ccb->ccb_data = id;
1126 ccb->ccb_xs = NULL;
1127 cmd = &ccb->ccb_cmd;
1128 cmd->tgt = htole32(CISS_CMD_MODE_PERIPH);
1129 cmd->tgt2 = 0;
1130 cmd->cdblen = 10;
1131 cmd->flags = CISS_CDB_CMD | CISS_CDB_SIMPL | CISS_CDB_IN;
1132 cmd->tmo = htole16(0);
1133 memset(&cmd->cdb[0], 0, sizeof(cmd->cdb));
1134 cmd->cdb[0] = CISS_CMD_CTRL_GET;
1135 cmd->cdb[2] = drv;
1136 cmd->cdb[6] = CISS_CMS_CTRL_PDID;
1137 cmd->cdb[7] = sizeof(*id) >> 8; /* biiiig endian */
1138 cmd->cdb[8] = sizeof(*id) & 0xff;
1139
1140 return ciss_cmd(ccb, BUS_DMA_NOWAIT, wait);
1141 }
1142
1143
1144 struct ciss_ld *
1145 ciss_pdscan(struct ciss_softc *sc, int ld)
1146 {
1147 struct ciss_pdid *pdid;
1148 struct ciss_ld *ldp;
1149 u_int8_t drv, buf[128];
1150 int i, j, k = 0;
1151
1152 mutex_enter(&sc->sc_mutex_scratch);
1153 pdid = sc->scratch;
1154 if (sc->ndrives == 256) {
1155 for (i = 0; i < CISS_BIGBIT; i++)
1156 if (!ciss_pdid(sc, i, pdid,
1157 XS_CTL_POLL|XS_CTL_NOSLEEP) &&
1158 (pdid->present & CISS_PD_PRESENT))
1159 buf[k++] = i;
1160 } else
1161 for (i = 0; i < sc->nbus; i++)
1162 for (j = 0; j < sc->ndrives; j++) {
1163 drv = CISS_BIGBIT + i * sc->ndrives + j;
1164 if (!ciss_pdid(sc, drv, pdid,
1165 XS_CTL_POLL|XS_CTL_NOSLEEP))
1166 buf[k++] = drv;
1167 }
1168 mutex_exit(&sc->sc_mutex_scratch);
1169
1170 if (!k)
1171 return NULL;
1172
1173 ldp = malloc(sizeof(*ldp) + (k-1), M_DEVBUF, M_WAITOK);
1174 memset(&ldp->bling, 0, sizeof(ldp->bling));
1175 ldp->ndrives = k;
1176 ldp->xname[0] = 0;
1177 memcpy(ldp->tgts, buf, k);
1178 return ldp;
1179 }
1180
1181 static void
1182 ciss_scsi_cmd(struct scsipi_channel *chan, scsipi_adapter_req_t req,
1183 void *arg)
1184 {
1185 struct scsipi_xfer *xs;
1186 struct scsipi_xfer_mode *xm;
1187 struct ciss_softc *sc = device_private(chan->chan_adapter->adapt_dev);
1188 u_int8_t target;
1189 struct ciss_ccb *ccb;
1190 struct ciss_cmd *cmd;
1191
1192 CISS_DPRINTF(CISS_D_CMD, ("ciss_scsi_cmd "));
1193
1194 switch (req)
1195 {
1196 case ADAPTER_REQ_RUN_XFER:
1197 xs = (struct scsipi_xfer *) arg;
1198 target = xs->xs_periph->periph_target;
1199 CISS_DPRINTF(CISS_D_CMD, ("targ=%d ", target));
1200 if (xs->cmdlen > CISS_MAX_CDB) {
1201 CISS_DPRINTF(CISS_D_CMD, ("CDB too big %p ", xs));
1202 memset(&xs->sense, 0, sizeof(xs->sense));
1203 xs->error = XS_SENSE;
1204 printf("ciss driver stuffup in %s:%d: %s()\n",
1205 __FILE__, __LINE__, __func__);
1206 scsipi_done(xs);
1207 break;
1208 }
1209
1210 xs->error = XS_NOERROR;
1211
1212 /* XXX emulate SYNCHRONIZE_CACHE ??? */
1213
1214 ccb = ciss_get_ccb(sc);
1215 cmd = &ccb->ccb_cmd;
1216 ccb->ccb_len = xs->datalen;
1217 ccb->ccb_data = xs->data;
1218 ccb->ccb_xs = xs;
1219 cmd->tgt = CISS_CMD_MODE_LD | target;
1220 cmd->tgt2 = 0;
1221 cmd->cdblen = xs->cmdlen;
1222 cmd->flags = CISS_CDB_CMD | CISS_CDB_SIMPL;
1223 if (xs->xs_control & XS_CTL_DATA_IN)
1224 cmd->flags |= CISS_CDB_IN;
1225 else if (xs->xs_control & XS_CTL_DATA_OUT)
1226 cmd->flags |= CISS_CDB_OUT;
1227 cmd->tmo = htole16(xs->timeout < 1000? 1 : xs->timeout / 1000);
1228 memcpy(&cmd->cdb[0], xs->cmd, xs->cmdlen);
1229 CISS_DPRINTF(CISS_D_CMD, ("cmd=%02x %02x %02x %02x %02x %02x ",
1230 cmd->cdb[0], cmd->cdb[1], cmd->cdb[2],
1231 cmd->cdb[3], cmd->cdb[4], cmd->cdb[5]));
1232
1233 if (ciss_cmd(ccb, BUS_DMA_WAITOK,
1234 xs->xs_control & (XS_CTL_POLL|XS_CTL_NOSLEEP))) {
1235 printf("ciss driver stuffup in %s:%d: %s()\n",
1236 __FILE__, __LINE__, __func__);
1237 xs->error = XS_DRIVER_STUFFUP;
1238 scsipi_done(xs);
1239 return;
1240 }
1241
1242 break;
1243 case ADAPTER_REQ_GROW_RESOURCES:
1244 /*
1245 * Not supported.
1246 */
1247 break;
1248 case ADAPTER_REQ_SET_XFER_MODE:
1249 /*
1250 * We can't change the transfer mode, but at least let
1251 * scsipi know what the adapter has negociated.
1252 */
1253 xm = (struct scsipi_xfer_mode *)arg;
1254 xm->xm_mode |= PERIPH_CAP_TQING;
1255 scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, xm);
1256 break;
1257 default:
1258 printf("%s: %d %d unsupported\n", __func__, __LINE__, req);
1259 }
1260 }
1261
1262 static void
1263 ciss_completed_process(struct ciss_softc *sc, ciss_queue_head *q)
1264 {
1265 struct ciss_ccb *ccb;
1266
1267 while (!TAILQ_EMPTY(q)) {
1268 ccb = TAILQ_FIRST(q);
1269 TAILQ_REMOVE(q, ccb, ccb_link);
1270
1271 if (ccb->ccb_state == CISS_CCB_POLL) {
1272 ccb->ccb_state = CISS_CCB_ONQ;
1273 mutex_enter(&sc->sc_mutex);
1274 cv_broadcast(&sc->sc_condvar);
1275 mutex_exit(&sc->sc_mutex);
1276 } else
1277 ciss_done(ccb);
1278 }
1279 }
1280
1281 int
1282 ciss_intr_simple_intx(void *v)
1283 {
1284 struct ciss_softc *sc = v;
1285 ciss_queue_head q;
1286 int hit = 0;
1287
1288 CISS_DPRINTF(CISS_D_INTR, ("intr "));
1289
1290 /* XXX shouldn't be necessary, intr triggers only if enabled */
1291 if (!(bus_space_read_4(sc->sc_iot, sc->sc_ioh, CISS_ISR) & sc->iem))
1292 return 0;
1293
1294 TAILQ_INIT(&q);
1295 mutex_enter(&sc->sc_mutex);
1296 ciss_completed_simple(sc, &q);
1297 mutex_exit(&sc->sc_mutex);
1298
1299 hit = (!TAILQ_EMPTY(&q));
1300 ciss_completed_process(sc, &q);
1301
1302 KASSERT(TAILQ_EMPTY(&q));
1303 CISS_DPRINTF(CISS_D_INTR, ("exit\n"));
1304
1305 return hit;
1306 }
1307
1308 int
1309 ciss_intr_perf_intx(void *v)
1310 {
1311 struct ciss_softc *sc = v;
1312
1313 CISS_DPRINTF(CISS_D_INTR, ("intr "));
1314
1315 /* Clear the interrupt and flush the bridges. Docs say that the flush
1316 * needs to be done twice, which doesn't seem right.
1317 */
1318 bus_space_read_4(sc->sc_iot, sc->sc_ioh, CISS_OSR);
1319 bus_space_write_4(sc->sc_iot, sc->sc_ioh, CISS_ODC, CISS_ODC_CLEAR);
1320
1321 return ciss_intr_perf_msi(sc);
1322 }
1323
1324 int
1325 ciss_intr_perf_msi(void *v)
1326 {
1327 struct ciss_softc *sc = v;
1328 ciss_queue_head q;
1329
1330 CISS_DPRINTF(CISS_D_INTR, ("intr "));
1331
1332 TAILQ_INIT(&q);
1333 mutex_enter(&sc->sc_mutex);
1334 ciss_completed_perf(sc, &q);
1335 mutex_exit(&sc->sc_mutex);
1336
1337 ciss_completed_process(sc, &q);
1338
1339 KASSERT(TAILQ_EMPTY(&q));
1340 CISS_DPRINTF(CISS_D_INTR, ("exit"));
1341
1342 return 1;
1343 }
1344
1345 static void
1346 ciss_heartbeat(void *v)
1347 {
1348 struct ciss_softc *sc = v;
1349 u_int32_t hb;
1350
1351 hb = bus_space_read_4(sc->sc_iot, sc->cfg_ioh,
1352 sc->cfgoff + offsetof(struct ciss_config, heartbeat));
1353 if (hb == sc->heartbeat) {
1354 sc->fibrillation++;
1355 CISS_DPRINTF(CISS_D_ERR, ("%s: fibrillation #%d (value=%d)\n",
1356 device_xname(sc->sc_dev), sc->fibrillation, hb));
1357 if (sc->fibrillation >= 11) {
1358 /* No heartbeat for 33 seconds */
1359 panic("%s: dead", device_xname(sc->sc_dev)); /* XXX reset! */
1360 }
1361 } else {
1362 sc->heartbeat = hb;
1363 if (sc->fibrillation) {
1364 CISS_DPRINTF(CISS_D_ERR, ("%s: "
1365 "fibrillation ended (value=%d)\n",
1366 device_xname(sc->sc_dev), hb));
1367 }
1368 sc->fibrillation = 0;
1369 }
1370
1371 callout_schedule(&sc->sc_hb, hz * 3);
1372 }
1373
1374 static int
1375 ciss_scsi_ioctl(struct scsipi_channel *chan, u_long cmd,
1376 void *addr, int flag, struct proc *p)
1377 {
1378 #if NBIO > 0
1379 return ciss_ioctl(chan->chan_adapter->adapt_dev, cmd, addr);
1380 #else
1381 return ENOTTY;
1382 #endif
1383 }
1384
1385 #if NBIO > 0
1386 const int ciss_level[] = { 0, 4, 1, 5, 51, 7 };
1387 const int ciss_stat[] = { BIOC_SVONLINE, BIOC_SVOFFLINE, BIOC_SVOFFLINE,
1388 BIOC_SVDEGRADED, BIOC_SVREBUILD, BIOC_SVREBUILD, BIOC_SVDEGRADED,
1389 BIOC_SVDEGRADED, BIOC_SVINVALID, BIOC_SVINVALID, BIOC_SVBUILDING,
1390 BIOC_SVOFFLINE, BIOC_SVBUILDING };
1391
1392 int
1393 ciss_ioctl(device_t dev, u_long cmd, void *addr)
1394 {
1395 struct ciss_softc *sc = device_private(dev);
1396 struct bioc_inq *bi;
1397 struct bioc_disk *bd;
1398 struct bioc_blink *bb;
1399 struct ciss_ldstat *ldstat;
1400 struct ciss_pdid *pdid;
1401 struct ciss_blink *blink;
1402 struct ciss_ld *ldp;
1403 u_int8_t drv;
1404 int ld, pd, error = 0;
1405
1406 switch (cmd) {
1407 case BIOCINQ:
1408 bi = (struct bioc_inq *)addr;
1409 strlcpy(bi->bi_dev, device_xname(sc->sc_dev), sizeof(bi->bi_dev));
1410 bi->bi_novol = sc->maxunits;
1411 bi->bi_nodisk = sc->sc_lds[0]->ndrives;
1412 break;
1413
1414 case BIOCVOL:
1415 error = ciss_ioctl_vol(sc, (struct bioc_vol *)addr);
1416 break;
1417
1418 case BIOCDISK_NOVOL:
1419 /*
1420 * XXX since we don't know how to associate physical drives with logical drives
1421 * yet, BIOCDISK_NOVOL is equivalent to BIOCDISK to the volume that we've
1422 * associated all physical drives to.
1423 * Maybe assoicate all physical drives to all logical volumes, but only return
1424 * physical drives on one logical volume. Which one? Either 1st volume that
1425 * is degraded, rebuilding, or failed?
1426 */
1427 bd = (struct bioc_disk *)addr;
1428 bd->bd_volid = 0;
1429 bd->bd_disknovol = true;
1430 /* FALLTHROUGH */
1431 case BIOCDISK:
1432 bd = (struct bioc_disk *)addr;
1433 if (bd->bd_volid < 0 || bd->bd_volid > sc->maxunits) {
1434 error = EINVAL;
1435 break;
1436 }
1437 ldp = sc->sc_lds[0];
1438 if (!ldp || (pd = bd->bd_diskid) < 0 || pd > ldp->ndrives) {
1439 error = EINVAL;
1440 break;
1441 }
1442 ldstat = sc->scratch;
1443 if ((error = ciss_ldstat(sc, bd->bd_volid, ldstat))) {
1444 break;
1445 }
1446 bd->bd_status = -1;
1447 if (ldstat->stat == CISS_LD_REBLD &&
1448 ldstat->bigrebuild == ldp->tgts[pd])
1449 bd->bd_status = BIOC_SDREBUILD;
1450 if (ciss_bitset(ldp->tgts[pd] & (~CISS_BIGBIT),
1451 ldstat->bigfailed)) {
1452 bd->bd_status = BIOC_SDFAILED;
1453 bd->bd_size = 0;
1454 bd->bd_channel = (ldp->tgts[pd] & (~CISS_BIGBIT)) /
1455 sc->ndrives;
1456 bd->bd_target = ldp->tgts[pd] % sc->ndrives;
1457 bd->bd_lun = 0;
1458 bd->bd_vendor[0] = '\0';
1459 bd->bd_serial[0] = '\0';
1460 bd->bd_procdev[0] = '\0';
1461 } else {
1462 pdid = sc->scratch;
1463 if ((error = ciss_pdid(sc, ldp->tgts[pd], pdid,
1464 XS_CTL_POLL))) {
1465 bd->bd_status = BIOC_SDFAILED;
1466 bd->bd_size = 0;
1467 bd->bd_channel = (ldp->tgts[pd] & (~CISS_BIGBIT)) /
1468 sc->ndrives;
1469 bd->bd_target = ldp->tgts[pd] % sc->ndrives;
1470 bd->bd_lun = 0;
1471 bd->bd_vendor[0] = '\0';
1472 bd->bd_serial[0] = '\0';
1473 bd->bd_procdev[0] = '\0';
1474 error = 0;
1475 break;
1476 }
1477 if (bd->bd_status < 0) {
1478 if (pdid->config & CISS_PD_SPARE)
1479 bd->bd_status = BIOC_SDHOTSPARE;
1480 else if (pdid->present & CISS_PD_PRESENT)
1481 bd->bd_status = BIOC_SDONLINE;
1482 else
1483 bd->bd_status = BIOC_SDINVALID;
1484 }
1485 bd->bd_size = (u_int64_t)le32toh(pdid->nblocks) *
1486 le16toh(pdid->blksz);
1487 bd->bd_channel = pdid->bus;
1488 bd->bd_target = pdid->target;
1489 bd->bd_lun = 0;
1490 strlcpy(bd->bd_vendor, pdid->model,
1491 sizeof(bd->bd_vendor));
1492 strlcpy(bd->bd_serial, pdid->serial,
1493 sizeof(bd->bd_serial));
1494 bd->bd_procdev[0] = '\0';
1495 }
1496 break;
1497
1498 case BIOCBLINK:
1499 bb = (struct bioc_blink *)addr;
1500 blink = sc->scratch;
1501 error = EINVAL;
1502 /* XXX workaround completely dumb scsi addressing */
1503 for (ld = 0; ld < sc->maxunits; ld++) {
1504 ldp = sc->sc_lds[ld];
1505 if (!ldp)
1506 continue;
1507 if (sc->ndrives == 256)
1508 drv = bb->bb_target;
1509 else
1510 drv = CISS_BIGBIT +
1511 bb->bb_channel * sc->ndrives +
1512 bb->bb_target;
1513 for (pd = 0; pd < ldp->ndrives; pd++)
1514 if (ldp->tgts[pd] == drv)
1515 error = ciss_blink(sc, ld, pd,
1516 bb->bb_status, blink);
1517 }
1518 break;
1519
1520 default:
1521 error = EINVAL;
1522 }
1523
1524 return (error);
1525 }
1526
1527 int
1528 ciss_ioctl_vol(struct ciss_softc *sc, struct bioc_vol *bv)
1529 {
1530 struct ciss_ldid *ldid;
1531 struct ciss_ld *ldp;
1532 struct ciss_ldstat *ldstat;
1533 struct ciss_pdid *pdid;
1534 int error = 0;
1535 u_int blks;
1536
1537 if (bv->bv_volid < 0 || bv->bv_volid > sc->maxunits) {
1538 return EINVAL;
1539 }
1540 ldp = sc->sc_lds[bv->bv_volid];
1541 ldid = sc->scratch;
1542 if ((error = ciss_ldid(sc, bv->bv_volid, ldid))) {
1543 return error;
1544 }
1545 bv->bv_status = BIOC_SVINVALID;
1546 blks = (u_int)le16toh(ldid->nblocks[1]) << 16 |
1547 le16toh(ldid->nblocks[0]);
1548 bv->bv_size = blks * (u_quad_t)le16toh(ldid->blksize);
1549 bv->bv_level = ciss_level[ldid->type];
1550 /*
1551 * XXX Should only return bv_nodisk for logigal volume that we've associated
1552 * the physical drives to: either the 1st degraded, rebuilding, or failed
1553 * volume else volume 0?
1554 */
1555 if (ldp) {
1556 bv->bv_nodisk = ldp->ndrives;
1557 strlcpy(bv->bv_dev, ldp->xname, sizeof(bv->bv_dev));
1558 }
1559 strlcpy(bv->bv_vendor, "CISS", sizeof(bv->bv_vendor));
1560 ldstat = sc->scratch;
1561 memset(ldstat, 0, sizeof(*ldstat));
1562 if ((error = ciss_ldstat(sc, bv->bv_volid, ldstat))) {
1563 return error;
1564 }
1565 bv->bv_percent = -1;
1566 bv->bv_seconds = 0;
1567 if (ldstat->stat < sizeof(ciss_stat)/sizeof(ciss_stat[0]))
1568 bv->bv_status = ciss_stat[ldstat->stat];
1569 if (bv->bv_status == BIOC_SVREBUILD ||
1570 bv->bv_status == BIOC_SVBUILDING) {
1571 u_int64_t prog;
1572
1573 ldp = sc->sc_lds[0];
1574 if (ldp) {
1575 bv->bv_nodisk = ldp->ndrives;
1576 strlcpy(bv->bv_dev, ldp->xname, sizeof(bv->bv_dev));
1577 }
1578 /*
1579 * XXX ldstat->prog is blocks remaining on physical drive being rebuilt
1580 * blks is only correct for a RAID1 set; RAID5 needs to determine the
1581 * size of the physical device - which we don't yet know.
1582 * ldstat->bigrebuild has physical device target, so could be used with
1583 * pdid to get size. Another way is to save pd information in sc so it's
1584 * easy to reference.
1585 */
1586 prog = (u_int64_t)((ldstat->prog[3] << 24) |
1587 (ldstat->prog[2] << 16) | (ldstat->prog[1] << 8) |
1588 ldstat->prog[0]);
1589 pdid = sc->scratch;
1590 if (!ciss_pdid(sc, ldstat->bigrebuild, pdid, XS_CTL_POLL)) {
1591 blks = le32toh(pdid->nblocks);
1592 bv->bv_percent = (blks - prog) * 1000ULL / blks;
1593 }
1594 }
1595 return 0;
1596 }
1597
1598 int
1599 ciss_blink(struct ciss_softc *sc, int ld, int pd, int stat,
1600 struct ciss_blink *blink)
1601 {
1602 struct ciss_ccb *ccb;
1603 struct ciss_cmd *cmd;
1604 struct ciss_ld *ldp;
1605
1606 if (ld > sc->maxunits)
1607 return EINVAL;
1608
1609 ldp = sc->sc_lds[ld];
1610 if (!ldp || pd > ldp->ndrives)
1611 return EINVAL;
1612
1613 ldp->bling.pdtab[ldp->tgts[pd]] = stat == BIOC_SBUNBLINK? 0 :
1614 CISS_BLINK_ALL;
1615 memcpy(blink, &ldp->bling, sizeof(*blink));
1616
1617 ccb = ciss_get_ccb(sc);
1618 if (ccb == NULL)
1619 return ENOMEM;
1620 ccb->ccb_len = sizeof(*blink);
1621 ccb->ccb_data = blink;
1622 ccb->ccb_xs = NULL;
1623 cmd = &ccb->ccb_cmd;
1624 cmd->tgt = htole32(CISS_CMD_MODE_PERIPH);
1625 cmd->tgt2 = 0;
1626 cmd->cdblen = 10;
1627 cmd->flags = CISS_CDB_CMD | CISS_CDB_SIMPL | CISS_CDB_OUT;
1628 cmd->tmo = htole16(0);
1629 memset(&cmd->cdb[0], 0, sizeof(cmd->cdb));
1630 cmd->cdb[0] = CISS_CMD_CTRL_SET;
1631 cmd->cdb[6] = CISS_CMS_CTRL_PDBLINK;
1632 cmd->cdb[7] = sizeof(*blink) >> 8; /* biiiig endian */
1633 cmd->cdb[8] = sizeof(*blink) & 0xff;
1634
1635 return ciss_cmd(ccb, BUS_DMA_NOWAIT, XS_CTL_POLL);
1636 }
1637
1638 int
1639 ciss_create_sensors(struct ciss_softc *sc)
1640 {
1641 int i;
1642 int nsensors = sc->maxunits;
1643
1644 if (nsensors == 0) {
1645 return 0;
1646 }
1647
1648 sc->sc_sme = sysmon_envsys_create();
1649 sc->sc_sensor = malloc(sizeof(envsys_data_t) * nsensors,
1650 M_DEVBUF, M_WAITOK | M_ZERO);
1651
1652 for (i = 0; i < nsensors; i++) {
1653 sc->sc_sensor[i].units = ENVSYS_DRIVE;
1654 sc->sc_sensor[i].state = ENVSYS_SINVALID;
1655 sc->sc_sensor[i].value_cur = ENVSYS_DRIVE_EMPTY;
1656 /* Enable monitoring for drive state changes */
1657 sc->sc_sensor[i].flags |= ENVSYS_FMONSTCHANGED;
1658 /* logical drives */
1659 snprintf(sc->sc_sensor[i].desc,
1660 sizeof(sc->sc_sensor[i].desc), "%s:%d",
1661 device_xname(sc->sc_dev), i);
1662 if (sysmon_envsys_sensor_attach(sc->sc_sme,
1663 &sc->sc_sensor[i]))
1664 goto out;
1665 }
1666
1667 sc->sc_sme->sme_name = device_xname(sc->sc_dev);
1668 sc->sc_sme->sme_cookie = sc;
1669 sc->sc_sme->sme_refresh = ciss_sensor_refresh;
1670 if (sysmon_envsys_register(sc->sc_sme)) {
1671 printf("%s: unable to register with sysmon\n",
1672 device_xname(sc->sc_dev));
1673 return(1);
1674 }
1675 return (0);
1676
1677 out:
1678 free(sc->sc_sensor, M_DEVBUF);
1679 sysmon_envsys_destroy(sc->sc_sme);
1680 return EINVAL;
1681 }
1682
1683 void
1684 ciss_sensor_refresh(struct sysmon_envsys *sme, envsys_data_t *edata)
1685 {
1686 struct ciss_softc *sc = sme->sme_cookie;
1687 struct bioc_vol bv;
1688
1689 if (edata->sensor >= sc->maxunits)
1690 return;
1691
1692 memset(&bv, 0, sizeof(bv));
1693 bv.bv_volid = edata->sensor;
1694 if (ciss_ioctl_vol(sc, &bv))
1695 bv.bv_status = BIOC_SVINVALID;
1696
1697 bio_vol_to_envsys(edata, &bv);
1698 }
1699 #endif /* NBIO > 0 */
1700