Lines Matching defs:icp
1 /* $NetBSD: icp.c,v 1.37 2022/04/10 09:50:45 andvar Exp $ */
65 * from both ICP-Vortex and �ko.neT. I want to thank them for their support.
70 * Support for the ICP-Vortex management tools added by
79 __KERNEL_RCSID(0, "$NetBSD: icp.c,v 1.37 2022/04/10 09:50:45 andvar Exp $");
108 void icp_ccb_submit(struct icp_softc *icp, struct icp_ccb *ic);
126 icp_init(struct icp_softc *icp, const char *intrstr)
138 aprint_normal_dev(icp->icp_dv, "interrupting at %s\n",
141 SIMPLEQ_INIT(&icp->icp_ccb_queue);
142 SIMPLEQ_INIT(&icp->icp_ccb_freelist);
143 SIMPLEQ_INIT(&icp->icp_ucmd_queue);
144 callout_init(&icp->icp_wdog_callout, 0);
149 if (bus_dmamap_create(icp->icp_dmat, ICP_SCRATCH_SIZE, 1,
151 &icp->icp_scr_dmamap) != 0) {
152 aprint_error_dev(icp->icp_dv, "cannot create scratch dmamap\n");
157 if (bus_dmamem_alloc(icp->icp_dmat, ICP_SCRATCH_SIZE, PAGE_SIZE, 0,
158 icp->icp_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
159 aprint_error_dev(icp->icp_dv, "cannot alloc scratch dmamem\n");
164 if (bus_dmamem_map(icp->icp_dmat, icp->icp_scr_seg, nsegs,
165 ICP_SCRATCH_SIZE, &icp->icp_scr, 0)) {
166 aprint_error_dev(icp->icp_dv, "cannot map scratch dmamem\n");
171 if (bus_dmamap_load(icp->icp_dmat, icp->icp_scr_dmamap, icp->icp_scr,
173 aprint_error_dev(icp->icp_dv, "cannot load scratch dmamap\n");
182 icp->icp_ccbs = ic;
191 rv = bus_dmamap_create(icp->icp_dmat, ICP_MAX_XFER,
197 icp->icp_nccbs++;
198 icp_ccb_free(icp, ic);
201 if (icp->icp_nccbs != ICP_NCCBS)
202 aprint_error_dev(icp->icp_dv, "%d/%d CCBs usable\n",
203 icp->icp_nccbs, ICP_NCCBS);
209 if (!icp_cmd(icp, ICP_SCREENSERVICE, ICP_INIT, 0, 0, 0)) {
210 aprint_error_dev(icp->icp_dv, "screen service init error %d\n",
211 icp->icp_status);
215 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INIT, ICP_LINUX_OS, 0, 0)) {
216 aprint_error_dev(icp->icp_dv, "cache service init error %d\n",
217 icp->icp_status);
221 icp_cmd(icp, ICP_CACHESERVICE, ICP_UNFREEZE_IO, 0, 0, 0);
223 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_MOUNT, 0xffff, 1, 0)) {
224 aprint_error_dev(icp->icp_dv, "cache service mount error %d\n",
225 icp->icp_status);
229 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INIT, ICP_LINUX_OS, 0, 0)) {
230 aprint_error_dev(icp->icp_dv, "cache service post-mount init error %d\n",
231 icp->icp_status);
234 cdev_cnt = (u_int16_t)icp->icp_info;
235 icp->icp_fw_vers = icp->icp_service;
237 if (!icp_cmd(icp, ICP_SCSIRAWSERVICE, ICP_INIT, 0, 0, 0)) {
238 aprint_error_dev(icp->icp_dv, "raw service init error %d\n",
239 icp->icp_status);
247 if (icp_cmd(icp, ICP_SCSIRAWSERVICE, ICP_SET_FEAT, ICP_SCATTER_GATHER,
249 if (icp_cmd(icp, ICP_SCSIRAWSERVICE, ICP_GET_FEAT, 0, 0, 0))
250 feat = icp->icp_info;
254 aprint_normal_dev(icp->icp_dv,
258 icp->icp_features |= ICP_FEAT_RAWSERVICE;
264 if (icp_cmd(icp, ICP_CACHESERVICE, ICP_SET_FEAT, 0,
266 if (icp_cmd(icp, ICP_CACHESERVICE, ICP_GET_FEAT, 0, 0, 0))
267 feat = icp->icp_info;
271 aprint_normal_dev(icp->icp_dv,
275 icp->icp_features |= ICP_FEAT_CACHESERVICE;
280 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_IOCTL, ICP_BOARD_INFO,
282 aprint_error_dev(icp->icp_dv, "unable to retrieve board info\n");
285 memcpy(&binfo, icp->icp_scr, sizeof(binfo));
287 aprint_normal_dev(icp->icp_dv,
296 if (icp->icp_features & ICP_FEAT_CACHESERVICE) {
298 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INFO, j, 0,
302 icp->icp_cdr[j].cd_size = icp->icp_info;
303 if (icp->icp_cdr[j].cd_size != 0)
304 icp->icp_ndevs++;
306 if (icp_cmd(icp, ICP_CACHESERVICE, ICP_DEVTYPE, j, 0,
308 icp->icp_cdr[j].cd_type = icp->icp_info;
312 if (icp->icp_features & ICP_FEAT_RAWSERVICE) {
313 icp->icp_nchan = binfo.bi_chan_count;
314 icp->icp_ndevs += icp->icp_nchan;
317 icp_recompute_openings(icp);
322 if (icp->icp_features & ICP_FEAT_RAWSERVICE) {
327 iv = (struct icp_ioc_version *)icp->icp_scr;
334 if (icp_cmd(icp, ICP_CACHESERVICE, ICP_IOCTL,
339 icp->icp_bus_id[j] = ri->ri_procid;
344 gc = (struct icp_getch *)icp->icp_scr;
347 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_IOCTL,
351 aprint_error_dev(icp->icp_dv,
355 icp->icp_bus_id[j] = gc->gc_scsiid;
360 if (icp->icp_bus_id[j] > ICP_MAXID_FC)
361 icp->icp_bus_id[j] = ICP_MAXID_FC;
367 icp->icp_children[icpa.icpa_unit] =
368 config_found(icp->icp_dv, &icpa, icp_print,
377 if (icp->icp_features & ICP_FEAT_CACHESERVICE) {
379 if (icp->icp_cdr[j].cd_size == 0)
386 icp->icp_children[icpa.icpa_unit] =
387 config_found(icp->icp_dv, &icpa, icp_print,
396 icp_watchdog(icp);
409 bus_dmamap_destroy(icp->icp_dmat,
410 icp->icp_ccbs[j].ic_xfer_map);
412 free(icp->icp_ccbs, M_DEVBUF);
414 bus_dmamap_unload(icp->icp_dmat, icp->icp_scr_dmamap);
416 bus_dmamem_unmap(icp->icp_dmat, icp->icp_scr,
419 bus_dmamem_free(icp->icp_dmat, icp->icp_scr_seg, nsegs);
420 bus_dmamap_destroy(icp->icp_dmat, icp->icp_scr_dmamap);
426 icp_register_servicecb(struct icp_softc *icp, int unit,
430 icp->icp_servicecb[unit] = cb;
434 icp_rescan(struct icp_softc *icp, int unit)
446 KASSERT(icp->icp_qfreeze != 0);
447 KASSERT(icp->icp_running == 0);
450 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INFO, unit, 0, 0)) {
453 device_xname(icp->icp_dv), unit, icp->icp_status);
457 if ((newsize = icp->icp_info) == 0) {
460 device_xname(icp->icp_dv), unit);
467 if (icp->icp_cdr[unit].cd_size != 0)
468 icp->icp_ndevs--;
469 icp->icp_cdr[unit].cd_size = 0;
470 if (icp->icp_children[unit] != NULL) {
471 (void) config_detach(icp->icp_children[unit],
473 icp->icp_children[unit] = NULL;
478 if (icp_cmd(icp, ICP_CACHESERVICE, ICP_DEVTYPE, unit, 0, 0))
479 newtype = icp->icp_info;
483 device_xname(icp->icp_dv), unit);
490 device_xname(icp->icp_dv), unit, icp->icp_cdr[unit].cd_size,
491 icp->icp_cdr[unit].cd_type, newsize, newtype);
498 if (icp->icp_children[unit] == NULL ||
499 newsize != icp->icp_cdr[unit].cd_size ||
500 newtype != icp->icp_cdr[unit].cd_type) {
501 if (icp->icp_cdr[unit].cd_size == 0)
502 icp->icp_ndevs++;
503 icp->icp_cdr[unit].cd_size = newsize;
504 icp->icp_cdr[unit].cd_type = newtype;
505 if (icp->icp_children[unit] != NULL)
506 (void) config_detach(icp->icp_children[unit],
513 icp->icp_children[unit] =
514 config_found(icp->icp_dv, &icpa, icp_print,
519 icp_recompute_openings(icp);
523 icp_rescan_all(struct icp_softc *icp)
532 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INIT, ICP_LINUX_OS, 0, 0)) {
534 device_xname(icp->icp_dv));
537 cdev_cnt = (u_int16_t) icp->icp_info;
541 icp_rescan(icp, unit);
545 if (icp->icp_cdr[unit].cd_size != 0) {
548 device_xname(icp->icp_dv), unit, cdev_cnt);
550 icp->icp_ndevs--;
551 icp->icp_cdr[unit].cd_size = 0;
552 if (icp->icp_children[unit] != NULL) {
553 (void) config_detach(icp->icp_children[unit],
555 icp->icp_children[unit] = NULL;
560 icp);
564 icp_recompute_openings(struct icp_softc *icp)
568 if (icp->icp_ndevs != 0)
570 (icp->icp_nccbs - ICP_NCCB_RESERVE) / icp->icp_ndevs;
573 if (openings == icp->icp_openings)
575 icp->icp_openings = openings;
579 device_xname(icp->icp_dv), icp->icp_ndevs,
580 icp->icp_ndevs == 1 ? "" : "s", icp->icp_openings);
584 if (icp->icp_children[unit] != NULL)
585 (*icp->icp_servicecb[unit]->iscb_openings)(
586 icp->icp_children[unit], icp->icp_openings);
593 struct icp_softc *icp;
596 icp = cookie;
599 icp_intr(icp);
600 if (ICP_HAS_WORK(icp))
601 icp_ccb_enqueue(icp, NULL);
604 callout_reset(&icp->icp_wdog_callout, hz * ICP_WATCHDOG_FREQ,
605 icp_watchdog, icp);
629 icp_async_event(struct icp_softc *icp, int service)
633 if (icp->icp_status == ICP_S_MSG_REQUEST) {
637 if ((icp->icp_fw_vers & 0xff) >= 0x1a) {
638 icp->icp_evt.size = 0;
639 icp->icp_evt.eu.async.ionode =
640 device_unit(icp->icp_dv);
641 icp->icp_evt.eu.async.status = icp->icp_status;
646 printf("%s: %s\n", device_xname(icp->icp_dv),
647 icp->icp_evt.event_string);
649 icp->icp_evt.size = sizeof(icp->icp_evt.eu.async);
650 icp->icp_evt.eu.async.ionode =
651 device_unit(icp->icp_dv);
652 icp->icp_evt.eu.async.service = service;
653 icp->icp_evt.eu.async.status = icp->icp_status;
654 icp->icp_evt.eu.async.info = icp->icp_info;
656 *(u_int32_t *) icp->icp_evt.eu.async.scsi_coord =
657 icp->icp_info2;
659 icp_store_event(icp, GDT_ES_ASYNC, service, &icp->icp_evt);
668 struct icp_softc *icp;
672 icp = cookie;
674 ctx.istatus = (*icp->icp_get_status)(icp);
676 icp->icp_status = ICP_S_NO_STATUS;
680 (*icp->icp_intr)(icp, &ctx);
682 icp->icp_status = ctx.cmd_status;
683 icp->icp_service = ctx.service;
684 icp->icp_info = ctx.info;
685 icp->icp_info2 = ctx.info2;
689 icp_async_event(icp, ctx.service);
693 aprint_error_dev(icp->icp_dv, "uninitialized or unknown service (%d/%d)\n",
695 icp->icp_evt.size = sizeof(icp->icp_evt.eu.driver);
696 icp->icp_evt.eu.driver.ionode = device_unit(icp->icp_dv);
697 icp_store_event(icp, GDT_ES_DRIVER, 4, &icp->icp_evt);
701 if ((ctx.istatus - 2) > icp->icp_nccbs)
704 ic = &icp->icp_ccbs[ctx.istatus - 2];
705 ic->ic_status = icp->icp_status;
708 /* XXX ICP's "iir" driver just sends an event here. */
716 KDASSERT(icp->icp_running != 0);
717 if (--icp->icp_running == 0 &&
718 (icp->icp_flags & ICP_F_WAIT_FREEZE) != 0) {
719 icp->icp_flags &= ~ICP_F_WAIT_FREEZE;
720 wakeup(&icp->icp_qfreeze);
723 switch (icp->icp_status) {
726 printf("%s: ICP_S_BSY received\n", device_xname(icp->icp_dv));
729 SIMPLEQ_INSERT_HEAD(&icp->icp_ucmd_queue, ic, ic_chain);
731 SIMPLEQ_INSERT_HEAD(&icp->icp_ccb_queue, ic, ic_chain);
742 if (ICP_HAS_WORK(icp))
743 icp_ccb_enqueue(icp, NULL);
759 struct icp_softc *icp = device_private(ic->ic_dv);
763 ucmd->status = icp->icp_status;
764 ucmd->info = icp->icp_info;
767 bus_dmamap_sync(icp->icp_dmat,
768 icp->icp_scr_dmamap,
772 (char *)icp->icp_scr + ICP_SCRATCH_UCMD, iu->iu_cnt);
775 icp->icp_ucmd_ccb = NULL;
785 icp_cmd(struct icp_softc *icp, u_int8_t service, u_int16_t opcode,
797 ic = icp_ccb_alloc_wait(icp);
809 htole32(icp->icp_scr_seg[0].ds_addr);
811 bus_dmamap_sync(icp->icp_dmat,
812 icp->icp_scr_dmamap, 0, arg3,
833 rv = icp_ccb_poll(icp, ic, 10000);
838 bus_dmamap_sync(icp->icp_dmat,
839 icp->icp_scr_dmamap, 0, arg3,
846 icp_ccb_free(icp, ic);
849 return (icp->icp_status == ICP_S_OK);
853 icp_ucmd(struct icp_softc *icp, gdt_ucmd_t *ucmd)
864 aprint_error_dev(icp->icp_dv, "scratch buffer too small (%d/%d)\n",
872 aprint_error_dev(icp->icp_dv, "scratch buffer too small (%d/%d)\n",
881 aprint_error_dev(icp->icp_dv, "scratch buffer too small (%d/%d)\n",
890 ic = icp_ccb_alloc_wait(icp);
905 htole32(icp->icp_scr_seg[0].ds_addr +
919 htole32(icp->icp_scr_seg[0].ds_addr +
939 htole32(icp->icp_scr_seg[0].ds_addr +
943 htole32(icp->icp_scr_seg[0].ds_addr + ICP_SCRATCH_UCMD);
955 if (__predict_false((error = icp_ccb_wait_user(icp, ic, 30000)) != 0))
956 aprint_error_dev(icp->icp_dv, "error %d waiting for ucmd to complete\n",
960 icp_ccb_free(icp, ic);
966 icp_ccb_alloc(struct icp_softc *icp)
973 SIMPLEQ_FIRST(&icp->icp_ccb_freelist)) == NULL)) {
977 SIMPLEQ_REMOVE_HEAD(&icp->icp_ccb_freelist, ic_chain);
985 icp_ccb_alloc_wait(struct icp_softc *icp)
991 while ((ic = SIMPLEQ_FIRST(&icp->icp_ccb_freelist)) == NULL) {
992 icp->icp_flags |= ICP_F_WAIT_CCB;
993 (void) tsleep(&icp->icp_ccb_freelist, PRIBIO, "icpccb", 0);
995 SIMPLEQ_REMOVE_HEAD(&icp->icp_ccb_freelist, ic_chain);
1003 icp_ccb_free(struct icp_softc *icp, struct icp_ccb *ic)
1010 SIMPLEQ_INSERT_HEAD(&icp->icp_ccb_freelist, ic, ic_chain);
1011 if (__predict_false((icp->icp_flags & ICP_F_WAIT_CCB) != 0)) {
1012 icp->icp_flags &= ~ICP_F_WAIT_CCB;
1013 wakeup(&icp->icp_ccb_freelist);
1019 icp_ccb_enqueue(struct icp_softc *icp, struct icp_ccb *ic)
1027 SIMPLEQ_INSERT_TAIL(&icp->icp_ucmd_queue, ic, ic_chain);
1029 SIMPLEQ_INSERT_TAIL(&icp->icp_ccb_queue, ic, ic_chain);
1032 for (; icp->icp_qfreeze == 0;) {
1034 SIMPLEQ_FIRST(&icp->icp_ucmd_queue)) != NULL)) {
1043 if (icp->icp_ucmd_ccb != NULL)
1045 if ((*icp->icp_test_busy)(icp))
1047 icp->icp_ucmd_ccb = ic;
1050 memcpy((char *)icp->icp_scr + ICP_SCRATCH_UCMD,
1052 bus_dmamap_sync(icp->icp_dmat,
1053 icp->icp_scr_dmamap,
1059 SIMPLEQ_FIRST(&icp->icp_ccb_queue)) != NULL)) {
1060 if ((*icp->icp_test_busy)(icp))
1066 icp_ccb_submit(icp, ic);
1068 SIMPLEQ_REMOVE_HEAD(&icp->icp_ucmd_queue, ic_chain);
1070 SIMPLEQ_REMOVE_HEAD(&icp->icp_ccb_queue, ic_chain);
1077 icp_ccb_map(struct icp_softc *icp, struct icp_ccb *ic, void *data, int size,
1086 rv = bus_dmamap_load(icp->icp_dmat, xfer, data, size, NULL,
1111 bus_dmamap_sync(icp->icp_dmat, xfer, 0, ic->ic_xfer_size, i);
1116 icp_ccb_unmap(struct icp_softc *icp, struct icp_ccb *ic)
1125 bus_dmamap_sync(icp->icp_dmat, ic->ic_xfer_map, 0, ic->ic_xfer_size, i);
1126 bus_dmamap_unload(icp->icp_dmat, ic->ic_xfer_map);
1130 icp_ccb_poll(struct icp_softc *icp, struct icp_ccb *ic, int timo)
1137 if (!(*icp->icp_test_busy)(icp))
1142 printf("%s: submit: busy\n", device_xname(icp->icp_dv));
1146 icp_ccb_submit(icp, ic);
1151 icp_intr(icp);
1170 device_xname(icp->icp_dv), ic->ic_status);
1176 aprint_error_dev(icp->icp_dv, "command timed out\n");
1180 while ((*icp->icp_test_busy)(icp) != 0)
1189 icp_ccb_wait(struct icp_softc *icp, struct icp_ccb *ic, int timo)
1196 icp_ccb_enqueue(icp, ic);
1206 aprint_error_dev(icp->icp_dv, "command failed; status=%x\n",
1215 icp_ccb_wait_user(struct icp_softc *icp, struct icp_ccb *ic, int timo)
1219 ic->ic_dv = icp->icp_dv;
1224 icp_ccb_enqueue(icp, ic);
1237 icp_ccb_submit(struct icp_softc *icp, struct icp_ccb *ic)
1242 (*icp->icp_set_sema0)(icp);
1248 icp->icp_running++;
1250 (*icp->icp_copy_cmd)(icp, ic);
1251 (*icp->icp_release_event)(icp, ic);
1255 icp_freeze(struct icp_softc *icp)
1260 if (icp->icp_qfreeze++ == 0) {
1261 while (icp->icp_running != 0) {
1262 icp->icp_flags |= ICP_F_WAIT_FREEZE;
1263 error = tsleep(&icp->icp_qfreeze, PRIBIO|PCATCH,
1265 if (error != 0 && --icp->icp_qfreeze == 0 &&
1266 ICP_HAS_WORK(icp)) {
1267 icp_ccb_enqueue(icp, NULL);
1278 icp)
1283 KDASSERT(icp->icp_qfreeze != 0);
1284 if (--icp->icp_qfreeze == 0 && ICP_HAS_WORK(icp))
1285 icp_ccb_enqueue(icp, NULL);
1295 icp_store_event(struct icp_softc *icp, u_int16_t source, u_int16_t idx,
1336 icp_read_event(struct icp_softc *icp, int handle, gdt_evt_str *estr)
1372 icp_readapp_event(struct icp_softc *icp, u_int8_t application,
1405 icp_clear_events(struct icp_softc *icp)