sbp.c revision 1.39 1 /* $NetBSD: sbp.c,v 1.39 2019/11/10 21:16:35 chs Exp $ */
2 /*-
3 * Copyright (c) 2003 Hidetoshi Shimokawa
4 * Copyright (c) 1998-2002 Katsushi Kobayashi and Hidetoshi Shimokawa
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the acknowledgement as bellow:
17 *
18 * This product includes software developed by K. Kobayashi and H. Shimokawa
19 *
20 * 4. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
27 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
29 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
31 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
32 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 * POSSIBILITY OF SUCH DAMAGE.
34 *
35 * $FreeBSD: src/sys/dev/firewire/sbp.c,v 1.100 2009/02/18 18:41:34 sbruno Exp $
36 *
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: sbp.c,v 1.39 2019/11/10 21:16:35 chs Exp $");
41
42
43 #include <sys/param.h>
44 #include <sys/device.h>
45 #include <sys/errno.h>
46 #include <sys/buf.h>
47 #include <sys/callout.h>
48 #include <sys/condvar.h>
49 #include <sys/kernel.h>
50 #include <sys/kthread.h>
51 #include <sys/malloc.h>
52 #include <sys/mutex.h>
53 #include <sys/proc.h>
54 #include <sys/sysctl.h>
55
56 #include <sys/bus.h>
57
58 #include <dev/scsipi/scsi_spc.h>
59 #include <dev/scsipi/scsi_all.h>
60 #include <dev/scsipi/scsipi_all.h>
61 #include <dev/scsipi/scsiconf.h>
62 #include <dev/scsipi/scsipiconf.h>
63
64 #include <dev/ieee1394/firewire.h>
65 #include <dev/ieee1394/firewirereg.h>
66 #include <dev/ieee1394/fwdma.h>
67 #include <dev/ieee1394/iec13213.h>
68 #include <dev/ieee1394/sbp.h>
69
70 #include "locators.h"
71
72
73 #define SBP_FWDEV_ALIVE(fwdev) (((fwdev)->status == FWDEVATTACHED) \
74 && crom_has_specver((fwdev)->csrrom, CSRVAL_ANSIT10, CSRVAL_T10SBP2))
75
76 #define SBP_NUM_TARGETS 8 /* MAX 64 */
77 #define SBP_NUM_LUNS 64
78 #define SBP_MAXPHYS MIN(MAXPHYS, (512*1024) /* 512KB */)
79 #define SBP_DMA_SIZE PAGE_SIZE
80 #define SBP_LOGIN_SIZE sizeof(struct sbp_login_res)
81 #define SBP_QUEUE_LEN ((SBP_DMA_SIZE - SBP_LOGIN_SIZE) / sizeof(struct sbp_ocb))
82 #define SBP_NUM_OCB (SBP_QUEUE_LEN * SBP_NUM_TARGETS)
83
84 /*
85 * STATUS FIFO addressing
86 * bit
87 * -----------------------
88 * 0- 1( 2): 0 (alignment)
89 * 2- 9( 8): lun
90 * 10-31(14): unit
91 * 32-47(16): SBP_BIND_HI
92 * 48-64(16): bus_id, node_id
93 */
94 #define SBP_BIND_HI 0x1
95 #define SBP_DEV2ADDR(u, l) \
96 (((uint64_t)SBP_BIND_HI << 32) |\
97 (((u) & 0x3fff) << 10) |\
98 (((l) & 0xff) << 2))
99 #define SBP_ADDR2UNIT(a) (((a) >> 10) & 0x3fff)
100 #define SBP_ADDR2LUN(a) (((a) >> 2) & 0xff)
101 #define SBP_INITIATOR 7
102
103 static const char *orb_fun_name[] = {
104 ORB_FUN_NAMES
105 };
106
107 static int debug = 0;
108 static int auto_login = 1;
109 static int max_speed = -1;
110 static int sbp_cold = 1;
111 static int ex_login = 1;
112 static int login_delay = 1000; /* msec */
113 static int scan_delay = 500; /* msec */
114 static int use_doorbell = 0;
115 static int sbp_tags = 0;
116
117 static int sysctl_sbp_verify(SYSCTLFN_PROTO, int lower, int upper);
118 static int sysctl_sbp_verify_max_speed(SYSCTLFN_PROTO);
119 static int sysctl_sbp_verify_tags(SYSCTLFN_PROTO);
120
121 /*
122 * Setup sysctl(3) MIB, hw.sbp.*
123 *
124 * TBD condition CTLFLAG_PERMANENT on being a module or not
125 */
126 SYSCTL_SETUP(sysctl_sbp, "sysctl sbp(4) subtree setup")
127 {
128 int rc, sbp_node_num;
129 const struct sysctlnode *node;
130
131 if ((rc = sysctl_createv(clog, 0, NULL, &node,
132 CTLFLAG_PERMANENT, CTLTYPE_NODE, "sbp",
133 SYSCTL_DESCR("sbp controls"), NULL, 0, NULL,
134 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
135 goto err;
136 sbp_node_num = node->sysctl_num;
137
138 /* sbp auto login flag */
139 if ((rc = sysctl_createv(clog, 0, NULL, &node,
140 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT,
141 "auto_login", SYSCTL_DESCR("SBP perform login automatically"),
142 NULL, 0, &auto_login,
143 0, CTL_HW, sbp_node_num, CTL_CREATE, CTL_EOL)) != 0)
144 goto err;
145
146 /* sbp max speed */
147 if ((rc = sysctl_createv(clog, 0, NULL, &node,
148 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT,
149 "max_speed", SYSCTL_DESCR("SBP transfer max speed"),
150 sysctl_sbp_verify_max_speed, 0, &max_speed,
151 0, CTL_HW, sbp_node_num, CTL_CREATE, CTL_EOL)) != 0)
152 goto err;
153
154 /* sbp exclusive login flag */
155 if ((rc = sysctl_createv(clog, 0, NULL, &node,
156 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT,
157 "exclusive_login", SYSCTL_DESCR("SBP enable exclusive login"),
158 NULL, 0, &ex_login,
159 0, CTL_HW, sbp_node_num, CTL_CREATE, CTL_EOL)) != 0)
160 goto err;
161
162 /* sbp login delay */
163 if ((rc = sysctl_createv(clog, 0, NULL, &node,
164 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT,
165 "login_delay", SYSCTL_DESCR("SBP login delay in msec"),
166 NULL, 0, &login_delay,
167 0, CTL_HW, sbp_node_num, CTL_CREATE, CTL_EOL)) != 0)
168 goto err;
169
170 /* sbp scan delay */
171 if ((rc = sysctl_createv(clog, 0, NULL, &node,
172 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT,
173 "scan_delay", SYSCTL_DESCR("SBP scan delay in msec"),
174 NULL, 0, &scan_delay,
175 0, CTL_HW, sbp_node_num, CTL_CREATE, CTL_EOL)) != 0)
176 goto err;
177
178 /* sbp use doorbell flag */
179 if ((rc = sysctl_createv(clog, 0, NULL, &node,
180 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT,
181 "use_doorbell", SYSCTL_DESCR("SBP use doorbell request"),
182 NULL, 0, &use_doorbell,
183 0, CTL_HW, sbp_node_num, CTL_CREATE, CTL_EOL)) != 0)
184 goto err;
185
186 /* sbp force tagged queuing */
187 if ((rc = sysctl_createv(clog, 0, NULL, &node,
188 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT,
189 "tags", SYSCTL_DESCR("SBP tagged queuing support"),
190 sysctl_sbp_verify_tags, 0, &sbp_tags,
191 0, CTL_HW, sbp_node_num, CTL_CREATE, CTL_EOL)) != 0)
192 goto err;
193
194 /* sbp driver debug flag */
195 if ((rc = sysctl_createv(clog, 0, NULL, &node,
196 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT,
197 "sbp_debug", SYSCTL_DESCR("SBP debug flag"),
198 NULL, 0, &debug,
199 0, CTL_HW, sbp_node_num, CTL_CREATE, CTL_EOL)) != 0)
200 goto err;
201
202 return;
203
204 err:
205 aprint_error("%s: sysctl_createv failed (rc = %d)\n", __func__, rc);
206 }
207
208 static int
209 sysctl_sbp_verify(SYSCTLFN_ARGS, int lower, int upper)
210 {
211 int error, t;
212 struct sysctlnode node;
213
214 node = *rnode;
215 t = *(int*)rnode->sysctl_data;
216 node.sysctl_data = &t;
217 error = sysctl_lookup(SYSCTLFN_CALL(&node));
218 if (error || newp == NULL)
219 return error;
220
221 if (t < lower || t > upper)
222 return EINVAL;
223
224 *(int*)rnode->sysctl_data = t;
225
226 return 0;
227 }
228
229 static int
230 sysctl_sbp_verify_max_speed(SYSCTLFN_ARGS)
231 {
232
233 return sysctl_sbp_verify(SYSCTLFN_CALL(rnode), 0, FWSPD_S400);
234 }
235
236 static int
237 sysctl_sbp_verify_tags(SYSCTLFN_ARGS)
238 {
239
240 return sysctl_sbp_verify(SYSCTLFN_CALL(rnode), -1, 1);
241 }
242
243 #define NEED_RESPONSE 0
244
245 #define SBP_SEG_MAX rounddown(0xffff, PAGE_SIZE)
246 #ifdef __sparc64__ /* iommu */
247 #define SBP_IND_MAX howmany(SBP_MAXPHYS, SBP_SEG_MAX)
248 #else
249 #define SBP_IND_MAX howmany(SBP_MAXPHYS, PAGE_SIZE)
250 #endif
251 struct sbp_ocb {
252 uint32_t orb[8];
253 #define IND_PTR_OFFSET (sizeof(uint32_t) * 8)
254 struct ind_ptr ind_ptr[SBP_IND_MAX];
255 struct scsipi_xfer *xs;
256 struct sbp_dev *sdev;
257 uint16_t index;
258 uint16_t flags; /* XXX should be removed */
259 bus_dmamap_t dmamap;
260 bus_addr_t bus_addr;
261 STAILQ_ENTRY(sbp_ocb) ocb;
262 };
263
264 #define SBP_ORB_DMA_SYNC(dma, i, op) \
265 bus_dmamap_sync((dma).dma_tag, (dma).dma_map, \
266 sizeof(struct sbp_ocb) * (i), \
267 sizeof(ocb->orb) + sizeof(ocb->ind_ptr), (op));
268
269 #define OCB_ACT_MGM 0
270 #define OCB_ACT_CMD 1
271 #define OCB_MATCH(o,s) ((o)->bus_addr == ntohl((s)->orb_lo))
272
273 struct sbp_dev{
274 #define SBP_DEV_RESET 0 /* accept login */
275 #define SBP_DEV_LOGIN 1 /* to login */
276 #if 0
277 #define SBP_DEV_RECONN 2 /* to reconnect */
278 #endif
279 #define SBP_DEV_TOATTACH 3 /* to attach */
280 #define SBP_DEV_PROBE 4 /* scan lun */
281 #define SBP_DEV_ATTACHED 5 /* in operation */
282 #define SBP_DEV_DEAD 6 /* unavailable unit */
283 #define SBP_DEV_RETRY 7 /* unavailable unit */
284 uint8_t status:4,
285 timeout:4;
286 uint8_t type;
287 uint16_t lun_id;
288 uint16_t freeze;
289 #define ORB_LINK_DEAD (1 << 0)
290 #define VALID_LUN (1 << 1)
291 #define ORB_POINTER_ACTIVE (1 << 2)
292 #define ORB_POINTER_NEED (1 << 3)
293 #define ORB_DOORBELL_ACTIVE (1 << 4)
294 #define ORB_DOORBELL_NEED (1 << 5)
295 #define ORB_SHORTAGE (1 << 6)
296 uint16_t flags;
297 struct scsipi_periph *periph;
298 struct sbp_target *target;
299 struct fwdma_alloc dma;
300 struct sbp_login_res *login;
301 struct callout login_callout;
302 struct sbp_ocb *ocb;
303 STAILQ_HEAD(, sbp_ocb) ocbs;
304 STAILQ_HEAD(, sbp_ocb) free_ocbs;
305 struct sbp_ocb *last_ocb;
306 char vendor[32];
307 char product[32];
308 char revision[10];
309 char bustgtlun[32];
310 };
311
312 struct sbp_target {
313 int target_id;
314 int num_lun;
315 struct sbp_dev **luns;
316 struct sbp_softc *sbp;
317 struct fw_device *fwdev;
318 uint32_t mgm_hi, mgm_lo;
319 struct sbp_ocb *mgm_ocb_cur;
320 STAILQ_HEAD(, sbp_ocb) mgm_ocb_queue;
321 struct callout mgm_ocb_timeout;
322 STAILQ_HEAD(, fw_xfer) xferlist;
323 int n_xfer;
324 };
325
326 struct sbp_softc {
327 struct firewire_dev_comm sc_fd;
328 struct scsipi_adapter sc_adapter;
329 struct scsipi_channel sc_channel;
330 device_t sc_bus;
331 struct lwp *sc_lwp;
332 struct sbp_target sc_target;
333 struct fw_bind sc_fwb;
334 bus_dma_tag_t sc_dmat;
335 struct timeval sc_last_busreset;
336 int sc_flags;
337 kmutex_t sc_mtx;
338 kcondvar_t sc_cv;
339 };
340
341 MALLOC_DEFINE(M_SBP, "sbp", "SBP-II/IEEE1394");
342 MALLOC_DECLARE(M_SBP);
343
344
345 static int sbpmatch(device_t, cfdata_t, void *);
346 static void sbpattach(device_t, device_t, void *);
347 static int sbpdetach(device_t, int);
348
349 static void sbp_scsipi_request(struct scsipi_channel *, scsipi_adapter_req_t,
350 void *);
351 static void sbp_minphys(struct buf *);
352
353 static void sbp_show_sdev_info(struct sbp_dev *);
354 static void sbp_alloc_lun(struct sbp_target *);
355 static struct sbp_target *sbp_alloc_target(struct sbp_softc *,
356 struct fw_device *);
357 static void sbp_probe_lun(struct sbp_dev *);
358 static void sbp_login_callout(void *);
359 static void sbp_login(struct sbp_dev *);
360 static void sbp_probe_target(void *);
361 static void sbp_post_busreset(void *);
362 static void sbp_post_explore(void *);
363 #if NEED_RESPONSE
364 static void sbp_loginres_callback(struct fw_xfer *);
365 #endif
366 static inline void sbp_xfer_free(struct fw_xfer *);
367 static void sbp_reset_start_callback(struct fw_xfer *);
368 static void sbp_reset_start(struct sbp_dev *);
369 static void sbp_mgm_callback(struct fw_xfer *);
370 static void sbp_scsipi_scan_target(void *);
371 static inline void sbp_scan_dev(struct sbp_dev *);
372 static void sbp_do_attach(struct fw_xfer *);
373 static void sbp_agent_reset_callback(struct fw_xfer *);
374 static void sbp_agent_reset(struct sbp_dev *);
375 static void sbp_busy_timeout_callback(struct fw_xfer *);
376 static void sbp_busy_timeout(struct sbp_dev *);
377 static void sbp_orb_pointer_callback(struct fw_xfer *);
378 static void sbp_orb_pointer(struct sbp_dev *, struct sbp_ocb *);
379 static void sbp_doorbell_callback(struct fw_xfer *);
380 static void sbp_doorbell(struct sbp_dev *);
381 static struct fw_xfer *sbp_write_cmd(struct sbp_dev *, int, int);
382 static void sbp_mgm_orb(struct sbp_dev *, int, struct sbp_ocb *);
383 static void sbp_print_scsi_cmd(struct sbp_ocb *);
384 static void sbp_scsi_status(struct sbp_status *, struct sbp_ocb *);
385 static void sbp_fix_inq_data(struct sbp_ocb *);
386 static void sbp_recv(struct fw_xfer *);
387 static int sbp_logout_all(struct sbp_softc *);
388 static void sbp_free_sdev(struct sbp_dev *);
389 static void sbp_free_target(struct sbp_target *);
390 static void sbp_scsipi_detach_sdev(struct sbp_dev *);
391 static void sbp_scsipi_detach_target(struct sbp_target *);
392 static void sbp_target_reset(struct sbp_dev *, int);
393 static void sbp_mgm_timeout(void *);
394 static void sbp_timeout(void *);
395 static void sbp_action1(struct sbp_softc *, struct scsipi_xfer *);
396 static void sbp_execute_ocb(struct sbp_ocb *, bus_dma_segment_t *, int);
397 static struct sbp_ocb *sbp_dequeue_ocb(struct sbp_dev *, struct sbp_status *);
398 static struct sbp_ocb *sbp_enqueue_ocb(struct sbp_dev *, struct sbp_ocb *);
399 static struct sbp_ocb *sbp_get_ocb(struct sbp_dev *);
400 static void sbp_free_ocb(struct sbp_dev *, struct sbp_ocb *);
401 static void sbp_abort_ocb(struct sbp_ocb *, int);
402 static void sbp_abort_all_ocbs(struct sbp_dev *, int);
403
404
405 static const char *orb_status0[] = {
406 /* 0 */ "No additional information to report",
407 /* 1 */ "Request type not supported",
408 /* 2 */ "Speed not supported",
409 /* 3 */ "Page size not supported",
410 /* 4 */ "Access denied",
411 /* 5 */ "Logical unit not supported",
412 /* 6 */ "Maximum payload too small",
413 /* 7 */ "Reserved for future standardization",
414 /* 8 */ "Resources unavailable",
415 /* 9 */ "Function rejected",
416 /* A */ "Login ID not recognized",
417 /* B */ "Dummy ORB completed",
418 /* C */ "Request aborted",
419 /* FF */ "Unspecified error"
420 #define MAX_ORB_STATUS0 0xd
421 };
422
423 static const char *orb_status1_object[] = {
424 /* 0 */ "Operation request block (ORB)",
425 /* 1 */ "Data buffer",
426 /* 2 */ "Page table",
427 /* 3 */ "Unable to specify"
428 };
429
430 static const char *orb_status1_serial_bus_error[] = {
431 /* 0 */ "Missing acknowledge",
432 /* 1 */ "Reserved; not to be used",
433 /* 2 */ "Time-out error",
434 /* 3 */ "Reserved; not to be used",
435 /* 4 */ "Busy retry limit exceeded(X)",
436 /* 5 */ "Busy retry limit exceeded(A)",
437 /* 6 */ "Busy retry limit exceeded(B)",
438 /* 7 */ "Reserved for future standardization",
439 /* 8 */ "Reserved for future standardization",
440 /* 9 */ "Reserved for future standardization",
441 /* A */ "Reserved for future standardization",
442 /* B */ "Tardy retry limit exceeded",
443 /* C */ "Conflict error",
444 /* D */ "Data error",
445 /* E */ "Type error",
446 /* F */ "Address error"
447 };
448
449
450 CFATTACH_DECL_NEW(sbp, sizeof(struct sbp_softc),
451 sbpmatch, sbpattach, sbpdetach, NULL);
452
453
454 int
455 sbpmatch(device_t parent, cfdata_t cf, void *aux)
456 {
457 struct fw_attach_args *fwa = aux;
458
459 if (strcmp(fwa->name, "sbp") == 0)
460 return 1;
461 return 0;
462 }
463
464 static void
465 sbpattach(device_t parent, device_t self, void *aux)
466 {
467 struct sbp_softc *sc = device_private(self);
468 struct fw_attach_args *fwa = (struct fw_attach_args *)aux;
469 struct firewire_comm *fc;
470 struct scsipi_adapter *sc_adapter = &sc->sc_adapter;
471 struct scsipi_channel *sc_channel = &sc->sc_channel;
472 struct sbp_target *target = &sc->sc_target;
473 int dv_unit;
474
475 aprint_naive("\n");
476 aprint_normal(": SBP-2/SCSI over IEEE1394\n");
477
478 sc->sc_fd.dev = self;
479
480 if (cold)
481 sbp_cold++;
482 sc->sc_fd.fc = fc = fwa->fc;
483 mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_VM);
484 cv_init(&sc->sc_cv, "sbp");
485
486 if (max_speed < 0)
487 max_speed = fc->speed;
488
489 sc->sc_dmat = fc->dmat;
490
491 sc->sc_target.fwdev = NULL;
492 sc->sc_target.luns = NULL;
493
494 /* Initialize mutexes and lists before we can error out
495 * to prevent crashes on detach
496 */
497 mutex_init(&sc->sc_fwb.fwb_mtx, MUTEX_DEFAULT, IPL_VM);
498 STAILQ_INIT(&sc->sc_fwb.xferlist);
499
500 if (sbp_alloc_target(sc, fwa->fwdev) == NULL)
501 return;
502
503 sc_adapter->adapt_dev = sc->sc_fd.dev;
504 sc_adapter->adapt_nchannels = 1;
505 sc_adapter->adapt_max_periph = 1;
506 sc_adapter->adapt_request = sbp_scsipi_request;
507 sc_adapter->adapt_minphys = sbp_minphys;
508 sc_adapter->adapt_openings = 8;
509
510 sc_channel->chan_adapter = sc_adapter;
511 sc_channel->chan_bustype = &scsi_bustype;
512 sc_channel->chan_defquirks = PQUIRK_ONLYBIG;
513 sc_channel->chan_channel = 0;
514 sc_channel->chan_flags = SCSIPI_CHAN_CANGROW | SCSIPI_CHAN_NOSETTLE;
515
516 sc_channel->chan_ntargets = 1;
517 sc_channel->chan_nluns = target->num_lun; /* We set nluns 0 now */
518 sc_channel->chan_id = 1;
519
520 sc->sc_bus = config_found(sc->sc_fd.dev, sc_channel, scsiprint);
521 if (sc->sc_bus == NULL) {
522 aprint_error_dev(self, "attach failed\n");
523 return;
524 }
525
526 /* We reserve 16 bit space (4 bytes X 64 unit X 256 luns) */
527 dv_unit = device_unit(sc->sc_fd.dev);
528 sc->sc_fwb.start = SBP_DEV2ADDR(dv_unit, 0);
529 sc->sc_fwb.end = SBP_DEV2ADDR(dv_unit, -1);
530 /* pre-allocate xfer */
531 fw_xferlist_add(&sc->sc_fwb.xferlist, M_SBP,
532 /*send*/ 0, /*recv*/ SBP_RECV_LEN, SBP_NUM_OCB / 2,
533 fc, (void *)sc, sbp_recv);
534 fw_bindadd(fc, &sc->sc_fwb);
535
536 sc->sc_fd.post_busreset = sbp_post_busreset;
537 sc->sc_fd.post_explore = sbp_post_explore;
538
539 if (fc->status != FWBUSNOTREADY) {
540 sbp_post_busreset((void *)sc);
541 sbp_post_explore((void *)sc);
542 }
543 }
544
545 static int
546 sbpdetach(device_t self, int flags)
547 {
548 struct sbp_softc *sc = device_private(self);
549 struct firewire_comm *fc = sc->sc_fd.fc;
550
551 sbp_scsipi_detach_target(&sc->sc_target);
552
553 if (sc->sc_target.fwdev && SBP_FWDEV_ALIVE(sc->sc_target.fwdev)) {
554 sbp_logout_all(sc);
555
556 /* XXX wait for logout completion */
557 mutex_enter(&sc->sc_mtx);
558 cv_timedwait_sig(&sc->sc_cv, &sc->sc_mtx, hz/2);
559 mutex_exit(&sc->sc_mtx);
560 }
561
562 sbp_free_target(&sc->sc_target);
563
564 fw_bindremove(fc, &sc->sc_fwb);
565 fw_xferlist_remove(&sc->sc_fwb.xferlist);
566 mutex_destroy(&sc->sc_fwb.fwb_mtx);
567
568 mutex_destroy(&sc->sc_mtx);
569 cv_destroy(&sc->sc_cv);
570
571 return 0;
572 }
573
574
575 static void
576 sbp_scsipi_request(struct scsipi_channel *channel, scsipi_adapter_req_t req,
577 void *arg)
578 {
579 struct sbp_softc *sc = device_private(channel->chan_adapter->adapt_dev);
580 struct scsipi_xfer *xs = arg;
581 int i;
582
583 SBP_DEBUG(1)
584 printf("Called sbp_scsipi_request\n");
585 END_DEBUG
586
587 switch (req) {
588 case ADAPTER_REQ_RUN_XFER:
589 SBP_DEBUG(1)
590 printf("Got req_run_xfer\n");
591 printf("xs control: 0x%08x, timeout: %d\n",
592 xs->xs_control, xs->timeout);
593 printf("opcode: 0x%02x\n", (int)xs->cmd->opcode);
594 for (i = 0; i < 15; i++)
595 printf("0x%02x ",(int)xs->cmd->bytes[i]);
596 printf("\n");
597 END_DEBUG
598 if (xs->xs_control & XS_CTL_RESET) {
599 SBP_DEBUG(1)
600 printf("XS_CTL_RESET not support\n");
601 END_DEBUG
602 break;
603 }
604 #define SBPSCSI_SBP2_MAX_CDB 12
605 if (xs->cmdlen > SBPSCSI_SBP2_MAX_CDB) {
606 SBP_DEBUG(0)
607 printf(
608 "sbp doesn't support cdb's larger than %d bytes\n",
609 SBPSCSI_SBP2_MAX_CDB);
610 END_DEBUG
611 xs->error = XS_DRIVER_STUFFUP;
612 scsipi_done(xs);
613 return;
614 }
615 sbp_action1(sc, xs);
616
617 break;
618 case ADAPTER_REQ_GROW_RESOURCES:
619 SBP_DEBUG(1)
620 printf("Got req_grow_resources\n");
621 END_DEBUG
622 break;
623 case ADAPTER_REQ_SET_XFER_MODE:
624 SBP_DEBUG(1)
625 printf("Got set xfer mode\n");
626 END_DEBUG
627 break;
628 default:
629 panic("Unknown request: %d\n", (int)req);
630 }
631 }
632
633 static void
634 sbp_minphys(struct buf *bp)
635 {
636
637 minphys(bp);
638 }
639
640
641 /*
642 * Display device characteristics on the console
643 */
644 static void
645 sbp_show_sdev_info(struct sbp_dev *sdev)
646 {
647 struct fw_device *fwdev = sdev->target->fwdev;
648 struct sbp_softc *sc = sdev->target->sbp;
649
650 aprint_normal_dev(sc->sc_fd.dev,
651 "ordered:%d type:%d EUI:%08x%08x node:%d speed:%d maxrec:%d\n",
652 (sdev->type & 0x40) >> 6,
653 (sdev->type & 0x1f),
654 fwdev->eui.hi,
655 fwdev->eui.lo,
656 fwdev->dst,
657 fwdev->speed,
658 fwdev->maxrec);
659 aprint_normal_dev(sc->sc_fd.dev, "%s '%s' '%s' '%s'\n",
660 sdev->bustgtlun, sdev->vendor, sdev->product, sdev->revision);
661 }
662
663 static void
664 sbp_alloc_lun(struct sbp_target *target)
665 {
666 struct crom_context cc;
667 struct csrreg *reg;
668 struct sbp_dev *sdev, **newluns;
669 struct sbp_softc *sc;
670 int maxlun, lun, i;
671
672 sc = target->sbp;
673 crom_init_context(&cc, target->fwdev->csrrom);
674 /* XXX shoud parse appropriate unit directories only */
675 maxlun = -1;
676 while (cc.depth >= 0) {
677 reg = crom_search_key(&cc, CROM_LUN);
678 if (reg == NULL)
679 break;
680 lun = reg->val & 0xffff;
681 SBP_DEBUG(0)
682 printf("target %d lun %d found\n", target->target_id, lun);
683 END_DEBUG
684 if (maxlun < lun)
685 maxlun = lun;
686 crom_next(&cc);
687 }
688 if (maxlun < 0)
689 aprint_normal_dev(sc->sc_fd.dev, "%d: no LUN found\n",
690 target->target_id);
691
692 maxlun++;
693 if (maxlun >= SBP_NUM_LUNS)
694 maxlun = SBP_NUM_LUNS;
695
696 /* Invalidiate stale devices */
697 for (lun = 0; lun < target->num_lun; lun++) {
698 sdev = target->luns[lun];
699 if (sdev == NULL)
700 continue;
701 sdev->flags &= ~VALID_LUN;
702 if (lun >= maxlun) {
703 /* lost device */
704 sbp_scsipi_detach_sdev(sdev);
705 sbp_free_sdev(sdev);
706 target->luns[lun] = NULL;
707 }
708 }
709
710 /* Reallocate */
711 if (maxlun != target->num_lun) {
712 newluns = (struct sbp_dev **) realloc(target->luns,
713 sizeof(struct sbp_dev *) * maxlun,
714 M_SBP, M_WAITOK | M_ZERO);
715
716 /*
717 * We must zero the extended region for the case
718 * realloc() doesn't allocate new buffer.
719 */
720 if (maxlun > target->num_lun) {
721 const int sbp_dev_p_sz = sizeof(struct sbp_dev *);
722
723 memset(&newluns[target->num_lun], 0,
724 sbp_dev_p_sz * (maxlun - target->num_lun));
725 }
726
727 target->luns = newluns;
728 target->num_lun = maxlun;
729 }
730
731 crom_init_context(&cc, target->fwdev->csrrom);
732 while (cc.depth >= 0) {
733 int new = 0;
734
735 reg = crom_search_key(&cc, CROM_LUN);
736 if (reg == NULL)
737 break;
738 lun = reg->val & 0xffff;
739 if (lun >= SBP_NUM_LUNS) {
740 aprint_error_dev(sc->sc_fd.dev, "too large lun %d\n",
741 lun);
742 goto next;
743 }
744
745 sdev = target->luns[lun];
746 if (sdev == NULL) {
747 sdev = malloc(sizeof(struct sbp_dev),
748 M_SBP, M_WAITOK | M_ZERO);
749 target->luns[lun] = sdev;
750 sdev->lun_id = lun;
751 sdev->target = target;
752 STAILQ_INIT(&sdev->ocbs);
753 callout_init(&sdev->login_callout, CALLOUT_MPSAFE);
754 callout_setfunc(&sdev->login_callout,
755 sbp_login_callout, sdev);
756 sdev->status = SBP_DEV_RESET;
757 new = 1;
758 snprintf(sdev->bustgtlun, 32, "%s:%d:%d",
759 device_xname(sc->sc_fd.dev),
760 sdev->target->target_id,
761 sdev->lun_id);
762 if (!sc->sc_lwp)
763 if (kthread_create(
764 PRI_NONE, KTHREAD_MPSAFE, NULL,
765 sbp_scsipi_scan_target, &sc->sc_target,
766 &sc->sc_lwp,
767 "sbp%d_attach", device_unit(sc->sc_fd.dev)))
768 aprint_error_dev(sc->sc_fd.dev,
769 "unable to create thread");
770 }
771 sdev->flags |= VALID_LUN;
772 sdev->type = (reg->val & 0xff0000) >> 16;
773
774 if (new == 0)
775 goto next;
776
777 fwdma_alloc_setup(sc->sc_fd.dev, sc->sc_dmat, SBP_DMA_SIZE,
778 &sdev->dma, sizeof(uint32_t), BUS_DMA_NOWAIT);
779 if (sdev->dma.v_addr == NULL) {
780 free(sdev, M_SBP);
781 target->luns[lun] = NULL;
782 goto next;
783 }
784 sdev->ocb = (struct sbp_ocb *)sdev->dma.v_addr;
785 sdev->login = (struct sbp_login_res *)&sdev->ocb[SBP_QUEUE_LEN];
786 memset((char *)sdev->ocb, 0,
787 sizeof(struct sbp_ocb) * SBP_QUEUE_LEN);
788
789 STAILQ_INIT(&sdev->free_ocbs);
790 for (i = 0; i < SBP_QUEUE_LEN; i++) {
791 struct sbp_ocb *ocb = &sdev->ocb[i];
792
793 ocb->index = i;
794 ocb->bus_addr =
795 sdev->dma.bus_addr + sizeof(struct sbp_ocb) * i;
796 if (bus_dmamap_create(sc->sc_dmat, 0x100000,
797 SBP_IND_MAX, SBP_SEG_MAX, 0, 0, &ocb->dmamap)) {
798 aprint_error_dev(sc->sc_fd.dev,
799 "cannot create dmamap %d\n", i);
800 /* XXX */
801 goto next;
802 }
803 sbp_free_ocb(sdev, ocb); /* into free queue */
804 }
805 next:
806 crom_next(&cc);
807 }
808
809 for (lun = 0; lun < target->num_lun; lun++) {
810 sdev = target->luns[lun];
811 if (sdev != NULL && (sdev->flags & VALID_LUN) == 0) {
812 sbp_scsipi_detach_sdev(sdev);
813 sbp_free_sdev(sdev);
814 target->luns[lun] = NULL;
815 }
816 }
817 }
818
819 static struct sbp_target *
820 sbp_alloc_target(struct sbp_softc *sc, struct fw_device *fwdev)
821 {
822 struct sbp_target *target;
823 struct crom_context cc;
824 struct csrreg *reg;
825
826 SBP_DEBUG(1)
827 printf("sbp_alloc_target\n");
828 END_DEBUG
829 /* new target */
830 target = &sc->sc_target;
831 target->sbp = sc;
832 target->fwdev = fwdev;
833 target->target_id = 0;
834 target->mgm_ocb_cur = NULL;
835 SBP_DEBUG(1)
836 printf("target: mgm_port: %x\n", target->mgm_lo);
837 END_DEBUG
838 STAILQ_INIT(&target->xferlist);
839 target->n_xfer = 0;
840 STAILQ_INIT(&target->mgm_ocb_queue);
841 callout_init(&target->mgm_ocb_timeout, CALLOUT_MPSAFE);
842
843 target->luns = NULL;
844 target->num_lun = 0;
845
846 /* XXX we may want to reload mgm port after each bus reset */
847 /* XXX there might be multiple management agents */
848 crom_init_context(&cc, target->fwdev->csrrom);
849 reg = crom_search_key(&cc, CROM_MGM);
850 if (reg == NULL || reg->val == 0) {
851 aprint_error_dev(sc->sc_fd.dev, "NULL management address\n");
852 target->fwdev = NULL;
853 return NULL;
854 }
855
856 target->mgm_hi = 0xffff;
857 target->mgm_lo = 0xf0000000 | (reg->val << 2);
858
859 return target;
860 }
861
862 static void
863 sbp_probe_lun(struct sbp_dev *sdev)
864 {
865 struct fw_device *fwdev;
866 struct crom_context c, *cc = &c;
867 struct csrreg *reg;
868
869 memset(sdev->vendor, 0, sizeof(sdev->vendor));
870 memset(sdev->product, 0, sizeof(sdev->product));
871
872 fwdev = sdev->target->fwdev;
873 crom_init_context(cc, fwdev->csrrom);
874 /* get vendor string */
875 crom_search_key(cc, CSRKEY_VENDOR);
876 crom_next(cc);
877 crom_parse_text(cc, sdev->vendor, sizeof(sdev->vendor));
878 /* skip to the unit directory for SBP-2 */
879 while ((reg = crom_search_key(cc, CSRKEY_VER)) != NULL) {
880 if (reg->val == CSRVAL_T10SBP2)
881 break;
882 crom_next(cc);
883 }
884 /* get firmware revision */
885 reg = crom_search_key(cc, CSRKEY_FIRM_VER);
886 if (reg != NULL)
887 snprintf(sdev->revision, sizeof(sdev->revision), "%06x",
888 reg->val);
889 /* get product string */
890 crom_search_key(cc, CSRKEY_MODEL);
891 crom_next(cc);
892 crom_parse_text(cc, sdev->product, sizeof(sdev->product));
893 }
894
895 static void
896 sbp_login_callout(void *arg)
897 {
898 struct sbp_dev *sdev = (struct sbp_dev *)arg;
899
900 sbp_mgm_orb(sdev, ORB_FUN_LGI, NULL);
901 }
902
903 static void
904 sbp_login(struct sbp_dev *sdev)
905 {
906 struct sbp_softc *sc = sdev->target->sbp;
907 struct timeval delta;
908 struct timeval t;
909 int ticks = 0;
910
911 microtime(&delta);
912 timersub(&delta, &sc->sc_last_busreset, &delta);
913 t.tv_sec = login_delay / 1000;
914 t.tv_usec = (login_delay % 1000) * 1000;
915 timersub(&t, &delta, &t);
916 if (t.tv_sec >= 0 && t.tv_usec > 0)
917 ticks = (t.tv_sec * 1000 + t.tv_usec / 1000) * hz / 1000;
918 SBP_DEBUG(0)
919 printf("%s: sec = %lld usec = %ld ticks = %d\n", __func__,
920 (long long)t.tv_sec, (long)t.tv_usec, ticks);
921 END_DEBUG
922 callout_schedule(&sdev->login_callout, ticks);
923 }
924
925 static void
926 sbp_probe_target(void *arg)
927 {
928 struct sbp_target *target = (struct sbp_target *)arg;
929 struct sbp_dev *sdev;
930 int i;
931
932 SBP_DEBUG(1)
933 printf("%s %d\n", __func__, target->target_id);
934 END_DEBUG
935
936 sbp_alloc_lun(target);
937
938 /* XXX untimeout mgm_ocb and dequeue */
939 for (i = 0; i < target->num_lun; i++) {
940 sdev = target->luns[i];
941 if (sdev == NULL || sdev->status == SBP_DEV_DEAD)
942 continue;
943
944 if (sdev->periph != NULL) {
945 scsipi_periph_freeze(sdev->periph, 1);
946 sdev->freeze++;
947 }
948 sbp_probe_lun(sdev);
949 sbp_show_sdev_info(sdev);
950
951 sbp_abort_all_ocbs(sdev, XS_RESET);
952 switch (sdev->status) {
953 case SBP_DEV_RESET:
954 /* new or revived target */
955 if (auto_login)
956 sbp_login(sdev);
957 break;
958 case SBP_DEV_TOATTACH:
959 case SBP_DEV_PROBE:
960 case SBP_DEV_ATTACHED:
961 case SBP_DEV_RETRY:
962 default:
963 sbp_mgm_orb(sdev, ORB_FUN_RCN, NULL);
964 break;
965 }
966 }
967 }
968
969 static void
970 sbp_post_busreset(void *arg)
971 {
972 struct sbp_softc *sc = (struct sbp_softc *)arg;
973 struct sbp_target *target = &sc->sc_target;
974 struct fw_device *fwdev = target->fwdev;
975 int alive;
976
977 alive = SBP_FWDEV_ALIVE(fwdev);
978 SBP_DEBUG(0)
979 printf("sbp_post_busreset\n");
980 if (!alive)
981 printf("not alive\n");
982 END_DEBUG
983 microtime(&sc->sc_last_busreset);
984
985 if (!alive)
986 return;
987
988 scsipi_channel_freeze(&sc->sc_channel, 1);
989 }
990
991 static void
992 sbp_post_explore(void *arg)
993 {
994 struct sbp_softc *sc = (struct sbp_softc *)arg;
995 struct sbp_target *target = &sc->sc_target;
996 struct fw_device *fwdev = target->fwdev;
997 int alive;
998
999 alive = SBP_FWDEV_ALIVE(fwdev);
1000 SBP_DEBUG(0)
1001 printf("sbp_post_explore (sbp_cold=%d)\n", sbp_cold);
1002 if (!alive)
1003 printf("not alive\n");
1004 END_DEBUG
1005 if (!alive)
1006 return;
1007
1008 if (!firewire_phydma_enable)
1009 return;
1010
1011 if (sbp_cold > 0)
1012 sbp_cold--;
1013
1014 SBP_DEBUG(0)
1015 printf("sbp_post_explore: EUI:%08x%08x ", fwdev->eui.hi, fwdev->eui.lo);
1016 END_DEBUG
1017 sbp_probe_target((void *)target);
1018 if (target->num_lun == 0)
1019 sbp_free_target(target);
1020
1021 scsipi_channel_thaw(&sc->sc_channel, 1);
1022 }
1023
1024 #if NEED_RESPONSE
1025 static void
1026 sbp_loginres_callback(struct fw_xfer *xfer)
1027 {
1028 struct sbp_dev *sdev = (struct sbp_dev *)xfer->sc;
1029 struct sbp_softc *sc = sdev->target->sbp;
1030
1031 SBP_DEBUG(1)
1032 printf("sbp_loginres_callback\n");
1033 END_DEBUG
1034 /* recycle */
1035 mutex_enter(&sc->sc_fwb.fwb_mtx);
1036 STAILQ_INSERT_TAIL(&sc->sc_fwb.xferlist, xfer, link);
1037 mutex_exit(&sc->sc_fwb.fwb_mtx);
1038 return;
1039 }
1040 #endif
1041
1042 static inline void
1043 sbp_xfer_free(struct fw_xfer *xfer)
1044 {
1045 struct sbp_dev *sdev = (struct sbp_dev *)xfer->sc;
1046 struct sbp_softc *sc = sdev->target->sbp;
1047
1048 fw_xfer_unload(xfer);
1049 mutex_enter(&sc->sc_mtx);
1050 STAILQ_INSERT_TAIL(&sdev->target->xferlist, xfer, link);
1051 mutex_exit(&sc->sc_mtx);
1052 }
1053
1054 static void
1055 sbp_reset_start_callback(struct fw_xfer *xfer)
1056 {
1057 struct sbp_dev *tsdev, *sdev = (struct sbp_dev *)xfer->sc;
1058 struct sbp_target *target = sdev->target;
1059 int i;
1060
1061 if (xfer->resp != 0)
1062 aprint_error("%s: sbp_reset_start failed: resp=%d\n",
1063 sdev->bustgtlun, xfer->resp);
1064
1065 for (i = 0; i < target->num_lun; i++) {
1066 tsdev = target->luns[i];
1067 if (tsdev != NULL && tsdev->status == SBP_DEV_LOGIN)
1068 sbp_login(tsdev);
1069 }
1070 }
1071
1072 static void
1073 sbp_reset_start(struct sbp_dev *sdev)
1074 {
1075 struct fw_xfer *xfer;
1076 struct fw_pkt *fp;
1077
1078 SBP_DEBUG(0)
1079 printf("%s: sbp_reset_start: %s\n",
1080 device_xname(sdev->target->sbp->sc_fd.dev), sdev->bustgtlun);
1081 END_DEBUG
1082
1083 xfer = sbp_write_cmd(sdev, FWTCODE_WREQQ, 0);
1084 if (xfer == NULL)
1085 return;
1086 xfer->hand = sbp_reset_start_callback;
1087 fp = &xfer->send.hdr;
1088 fp->mode.wreqq.dest_hi = 0xffff;
1089 fp->mode.wreqq.dest_lo = 0xf0000000 | RESET_START;
1090 fp->mode.wreqq.data = htonl(0xf);
1091 if (fw_asyreq(xfer->fc, -1, xfer) != 0)
1092 sbp_xfer_free(xfer);
1093 }
1094
1095 static void
1096 sbp_mgm_callback(struct fw_xfer *xfer)
1097 {
1098 struct sbp_dev *sdev;
1099
1100 sdev = (struct sbp_dev *)xfer->sc;
1101
1102 SBP_DEBUG(1)
1103 printf("%s: sbp_mgm_callback: %s\n",
1104 device_xname(sdev->target->sbp->sc_fd.dev), sdev->bustgtlun);
1105 END_DEBUG
1106 sbp_xfer_free(xfer);
1107 return;
1108 }
1109
1110 static void
1111 sbp_scsipi_scan_target(void *arg)
1112 {
1113 struct sbp_target *target = (struct sbp_target *)arg;
1114 struct sbp_softc *sc = target->sbp;
1115 struct sbp_dev *sdev;
1116 struct scsipi_channel *chan = &sc->sc_channel;
1117 struct scsibus_softc *sc_bus = device_private(sc->sc_bus);
1118 int lun, yet;
1119
1120 do {
1121 mutex_enter(&sc->sc_mtx);
1122 cv_wait_sig(&sc->sc_cv, &sc->sc_mtx);
1123 mutex_exit(&sc->sc_mtx);
1124 yet = 0;
1125
1126 for (lun = 0; lun < target->num_lun; lun++) {
1127 sdev = target->luns[lun];
1128 if (sdev == NULL)
1129 continue;
1130 if (sdev->status != SBP_DEV_PROBE) {
1131 yet++;
1132 continue;
1133 }
1134
1135 if (sdev->periph == NULL) {
1136 if (chan->chan_nluns < target->num_lun)
1137 chan->chan_nluns = target->num_lun;
1138
1139 scsi_probe_bus(sc_bus, target->target_id,
1140 sdev->lun_id);
1141 sdev->periph = scsipi_lookup_periph(chan,
1142 target->target_id, lun);
1143 }
1144 sdev->status = SBP_DEV_ATTACHED;
1145 }
1146 } while (yet > 0);
1147
1148 sc->sc_lwp = NULL;
1149 kthread_exit(0);
1150
1151 /* NOTREACHED */
1152 }
1153
1154 static inline void
1155 sbp_scan_dev(struct sbp_dev *sdev)
1156 {
1157 struct sbp_softc *sc = sdev->target->sbp;
1158
1159 sdev->status = SBP_DEV_PROBE;
1160 mutex_enter(&sc->sc_mtx);
1161 cv_signal(&sdev->target->sbp->sc_cv);
1162 mutex_exit(&sc->sc_mtx);
1163 }
1164
1165
1166 static void
1167 sbp_do_attach(struct fw_xfer *xfer)
1168 {
1169 struct sbp_dev *sdev;
1170 struct sbp_target *target;
1171 struct sbp_softc *sc;
1172
1173 sdev = (struct sbp_dev *)xfer->sc;
1174 target = sdev->target;
1175 sc = target->sbp;
1176
1177 SBP_DEBUG(0)
1178 printf("%s:%s:%s\n", device_xname(sc->sc_fd.dev), __func__,
1179 sdev->bustgtlun);
1180 END_DEBUG
1181 sbp_xfer_free(xfer);
1182
1183 sbp_scan_dev(sdev);
1184 return;
1185 }
1186
1187 static void
1188 sbp_agent_reset_callback(struct fw_xfer *xfer)
1189 {
1190 struct sbp_dev *sdev = (struct sbp_dev *)xfer->sc;
1191 struct sbp_softc *sc = sdev->target->sbp;
1192
1193 SBP_DEBUG(1)
1194 printf("%s:%s:%s\n", device_xname(sc->sc_fd.dev), __func__,
1195 sdev->bustgtlun);
1196 END_DEBUG
1197 if (xfer->resp != 0)
1198 aprint_error_dev(sc->sc_fd.dev, "%s:%s: resp=%d\n", __func__,
1199 sdev->bustgtlun, xfer->resp);
1200
1201 sbp_xfer_free(xfer);
1202 if (sdev->periph != NULL) {
1203 scsipi_periph_thaw(sdev->periph, sdev->freeze);
1204 scsipi_channel_thaw(&sc->sc_channel, 0);
1205 sdev->freeze = 0;
1206 }
1207 }
1208
1209 static void
1210 sbp_agent_reset(struct sbp_dev *sdev)
1211 {
1212 struct fw_xfer *xfer;
1213 struct fw_pkt *fp;
1214
1215 SBP_DEBUG(0)
1216 printf("%s:%s:%s\n", device_xname(sdev->target->sbp->sc_fd.dev),
1217 __func__, sdev->bustgtlun);
1218 END_DEBUG
1219 xfer = sbp_write_cmd(sdev, FWTCODE_WREQQ, 0x04);
1220 if (xfer == NULL)
1221 return;
1222 if (sdev->status == SBP_DEV_ATTACHED || sdev->status == SBP_DEV_PROBE)
1223 xfer->hand = sbp_agent_reset_callback;
1224 else
1225 xfer->hand = sbp_do_attach;
1226 fp = &xfer->send.hdr;
1227 fp->mode.wreqq.data = htonl(0xf);
1228 if (fw_asyreq(xfer->fc, -1, xfer) != 0)
1229 sbp_xfer_free(xfer);
1230 sbp_abort_all_ocbs(sdev, XS_RESET);
1231 }
1232
1233 static void
1234 sbp_busy_timeout_callback(struct fw_xfer *xfer)
1235 {
1236 struct sbp_dev *sdev = (struct sbp_dev *)xfer->sc;
1237
1238 SBP_DEBUG(1)
1239 printf("%s:%s:%s\n", device_xname(sdev->target->sbp->sc_fd.dev),
1240 __func__, sdev->bustgtlun);
1241 END_DEBUG
1242 sbp_xfer_free(xfer);
1243 sbp_agent_reset(sdev);
1244 }
1245
1246 static void
1247 sbp_busy_timeout(struct sbp_dev *sdev)
1248 {
1249 struct fw_pkt *fp;
1250 struct fw_xfer *xfer;
1251
1252 SBP_DEBUG(0)
1253 printf("%s:%s:%s\n", device_xname(sdev->target->sbp->sc_fd.dev),
1254 __func__, sdev->bustgtlun);
1255 END_DEBUG
1256 xfer = sbp_write_cmd(sdev, FWTCODE_WREQQ, 0);
1257 if (xfer == NULL)
1258 return;
1259 xfer->hand = sbp_busy_timeout_callback;
1260 fp = &xfer->send.hdr;
1261 fp->mode.wreqq.dest_hi = 0xffff;
1262 fp->mode.wreqq.dest_lo = 0xf0000000 | BUSY_TIMEOUT;
1263 fp->mode.wreqq.data = htonl((1 << (13+12)) | 0xf);
1264 if (fw_asyreq(xfer->fc, -1, xfer) != 0)
1265 sbp_xfer_free(xfer);
1266 }
1267
1268 static void
1269 sbp_orb_pointer_callback(struct fw_xfer *xfer)
1270 {
1271 struct sbp_dev *sdev = (struct sbp_dev *)xfer->sc;
1272 struct sbp_softc *sc = sdev->target->sbp;
1273
1274 SBP_DEBUG(1)
1275 printf("%s:%s:%s\n", device_xname(sc->sc_fd.dev), __func__,
1276 sdev->bustgtlun);
1277 END_DEBUG
1278 if (xfer->resp != 0)
1279 aprint_error_dev(sc->sc_fd.dev, "%s:%s: xfer->resp = %d\n",
1280 __func__, sdev->bustgtlun, xfer->resp);
1281 sbp_xfer_free(xfer);
1282 sdev->flags &= ~ORB_POINTER_ACTIVE;
1283
1284 if ((sdev->flags & ORB_POINTER_NEED) != 0) {
1285 struct sbp_ocb *ocb;
1286
1287 sdev->flags &= ~ORB_POINTER_NEED;
1288 ocb = STAILQ_FIRST(&sdev->ocbs);
1289 if (ocb != NULL)
1290 sbp_orb_pointer(sdev, ocb);
1291 }
1292 return;
1293 }
1294
1295 static void
1296 sbp_orb_pointer(struct sbp_dev *sdev, struct sbp_ocb *ocb)
1297 {
1298 struct sbp_softc *sc = sdev->target->sbp;
1299 struct fw_xfer *xfer;
1300 struct fw_pkt *fp;
1301
1302 SBP_DEBUG(1)
1303 printf("%s:%s:%s: 0x%08x\n", device_xname(sc->sc_fd.dev), __func__,
1304 sdev->bustgtlun, (uint32_t)ocb->bus_addr);
1305 END_DEBUG
1306
1307 if ((sdev->flags & ORB_POINTER_ACTIVE) != 0) {
1308 SBP_DEBUG(0)
1309 printf("%s: orb pointer active\n", __func__);
1310 END_DEBUG
1311 sdev->flags |= ORB_POINTER_NEED;
1312 return;
1313 }
1314
1315 sdev->flags |= ORB_POINTER_ACTIVE;
1316 xfer = sbp_write_cmd(sdev, FWTCODE_WREQB, 0x08);
1317 if (xfer == NULL)
1318 return;
1319 xfer->hand = sbp_orb_pointer_callback;
1320
1321 fp = &xfer->send.hdr;
1322 fp->mode.wreqb.len = 8;
1323 fp->mode.wreqb.extcode = 0;
1324 xfer->send.payload[0] =
1325 htonl(((sc->sc_fd.fc->nodeid | FWLOCALBUS) << 16));
1326 xfer->send.payload[1] = htonl((uint32_t)ocb->bus_addr);
1327
1328 if (fw_asyreq(xfer->fc, -1, xfer) != 0) {
1329 sbp_xfer_free(xfer);
1330 ocb->xs->error = XS_DRIVER_STUFFUP;
1331 scsipi_done(ocb->xs);
1332 }
1333 }
1334
1335 static void
1336 sbp_doorbell_callback(struct fw_xfer *xfer)
1337 {
1338 struct sbp_dev *sdev = (struct sbp_dev *)xfer->sc;
1339 struct sbp_softc *sc = sdev->target->sbp;
1340
1341 SBP_DEBUG(1)
1342 printf("%s:%s:%s\n", device_xname(sc->sc_fd.dev), __func__,
1343 sdev->bustgtlun);
1344 END_DEBUG
1345 if (xfer->resp != 0) {
1346 aprint_error_dev(sc->sc_fd.dev, "%s: xfer->resp = %d\n",
1347 __func__, xfer->resp);
1348 }
1349 sbp_xfer_free(xfer);
1350 sdev->flags &= ~ORB_DOORBELL_ACTIVE;
1351 if ((sdev->flags & ORB_DOORBELL_NEED) != 0) {
1352 sdev->flags &= ~ORB_DOORBELL_NEED;
1353 sbp_doorbell(sdev);
1354 }
1355 return;
1356 }
1357
1358 static void
1359 sbp_doorbell(struct sbp_dev *sdev)
1360 {
1361 struct fw_xfer *xfer;
1362 struct fw_pkt *fp;
1363
1364 SBP_DEBUG(1)
1365 printf("%s:%s:%s\n", device_xname(sdev->target->sbp->sc_fd.dev),
1366 __func__, sdev->bustgtlun);
1367 END_DEBUG
1368
1369 if ((sdev->flags & ORB_DOORBELL_ACTIVE) != 0) {
1370 sdev->flags |= ORB_DOORBELL_NEED;
1371 return;
1372 }
1373 sdev->flags |= ORB_DOORBELL_ACTIVE;
1374 xfer = sbp_write_cmd(sdev, FWTCODE_WREQQ, 0x10);
1375 if (xfer == NULL)
1376 return;
1377 xfer->hand = sbp_doorbell_callback;
1378 fp = &xfer->send.hdr;
1379 fp->mode.wreqq.data = htonl(0xf);
1380 if (fw_asyreq(xfer->fc, -1, xfer) != 0)
1381 sbp_xfer_free(xfer);
1382 }
1383
1384 static struct fw_xfer *
1385 sbp_write_cmd(struct sbp_dev *sdev, int tcode, int offset)
1386 {
1387 struct sbp_softc *sc;
1388 struct fw_xfer *xfer;
1389 struct fw_pkt *fp;
1390 struct sbp_target *target;
1391 int new = 0;
1392
1393 target = sdev->target;
1394 sc = target->sbp;
1395 mutex_enter(&sc->sc_mtx);
1396 xfer = STAILQ_FIRST(&target->xferlist);
1397 if (xfer == NULL) {
1398 if (target->n_xfer > 5 /* XXX */) {
1399 aprint_error_dev(sc->sc_fd.dev,
1400 "no more xfer for this target\n");
1401 mutex_exit(&sc->sc_mtx);
1402 return NULL;
1403 }
1404 xfer = fw_xfer_alloc_buf(M_SBP, 8, 0);
1405 if (xfer == NULL) {
1406 aprint_error_dev(sc->sc_fd.dev,
1407 "fw_xfer_alloc_buf failed\n");
1408 mutex_exit(&sc->sc_mtx);
1409 return NULL;
1410 }
1411 target->n_xfer++;
1412 SBP_DEBUG(0)
1413 printf("sbp: alloc %d xfer\n", target->n_xfer);
1414 END_DEBUG
1415 new = 1;
1416 } else
1417 STAILQ_REMOVE_HEAD(&target->xferlist, link);
1418 mutex_exit(&sc->sc_mtx);
1419
1420 microtime(&xfer->tv);
1421
1422 if (new) {
1423 xfer->recv.pay_len = 0;
1424 xfer->send.spd = uimin(target->fwdev->speed, max_speed);
1425 xfer->fc = target->sbp->sc_fd.fc;
1426 }
1427
1428 if (tcode == FWTCODE_WREQB)
1429 xfer->send.pay_len = 8;
1430 else
1431 xfer->send.pay_len = 0;
1432
1433 xfer->sc = (void *)sdev;
1434 fp = &xfer->send.hdr;
1435 fp->mode.wreqq.dest_hi = sdev->login->cmd_hi;
1436 fp->mode.wreqq.dest_lo = sdev->login->cmd_lo + offset;
1437 fp->mode.wreqq.tlrt = 0;
1438 fp->mode.wreqq.tcode = tcode;
1439 fp->mode.wreqq.pri = 0;
1440 fp->mode.wreqq.dst = FWLOCALBUS | target->fwdev->dst;
1441
1442 return xfer;
1443 }
1444
1445 static void
1446 sbp_mgm_orb(struct sbp_dev *sdev, int func, struct sbp_ocb *aocb)
1447 {
1448 struct fw_xfer *xfer;
1449 struct fw_pkt *fp;
1450 struct sbp_ocb *ocb;
1451 struct sbp_target *target;
1452 int nid, dv_unit;
1453
1454 target = sdev->target;
1455 nid = target->sbp->sc_fd.fc->nodeid | FWLOCALBUS;
1456 dv_unit = device_unit(target->sbp->sc_fd.dev);
1457
1458 mutex_enter(&target->sbp->sc_mtx);
1459 if (func == ORB_FUN_RUNQUEUE) {
1460 ocb = STAILQ_FIRST(&target->mgm_ocb_queue);
1461 if (target->mgm_ocb_cur != NULL || ocb == NULL) {
1462 mutex_exit(&target->sbp->sc_mtx);
1463 return;
1464 }
1465 STAILQ_REMOVE_HEAD(&target->mgm_ocb_queue, ocb);
1466 mutex_exit(&target->sbp->sc_mtx);
1467 goto start;
1468 }
1469 if ((ocb = sbp_get_ocb(sdev)) == NULL) {
1470 mutex_exit(&target->sbp->sc_mtx);
1471 /* XXX */
1472 return;
1473 }
1474 mutex_exit(&target->sbp->sc_mtx);
1475 ocb->flags = OCB_ACT_MGM;
1476 ocb->sdev = sdev;
1477
1478 memset(ocb->orb, 0, sizeof(ocb->orb));
1479 ocb->orb[6] = htonl((nid << 16) | SBP_BIND_HI);
1480 ocb->orb[7] = htonl(SBP_DEV2ADDR(dv_unit, sdev->lun_id));
1481
1482 SBP_DEBUG(0)
1483 printf("%s:%s:%s: %s\n", device_xname(sdev->target->sbp->sc_fd.dev),
1484 __func__, sdev->bustgtlun, orb_fun_name[(func>>16)&0xf]);
1485 END_DEBUG
1486 switch (func) {
1487 case ORB_FUN_LGI:
1488 {
1489 const off_t sbp_login_off =
1490 sizeof(struct sbp_ocb) * SBP_QUEUE_LEN;
1491
1492 ocb->orb[0] = ocb->orb[1] = 0; /* password */
1493 ocb->orb[2] = htonl(nid << 16);
1494 ocb->orb[3] = htonl(sdev->dma.bus_addr + sbp_login_off);
1495 ocb->orb[4] = htonl(ORB_NOTIFY | sdev->lun_id);
1496 if (ex_login)
1497 ocb->orb[4] |= htonl(ORB_EXV);
1498 ocb->orb[5] = htonl(SBP_LOGIN_SIZE);
1499 bus_dmamap_sync(sdev->dma.dma_tag, sdev->dma.dma_map,
1500 sbp_login_off, SBP_LOGIN_SIZE, BUS_DMASYNC_PREREAD);
1501 break;
1502 }
1503
1504 case ORB_FUN_ATA:
1505 ocb->orb[0] = htonl((0 << 16) | 0);
1506 ocb->orb[1] = htonl(aocb->bus_addr & 0xffffffff);
1507 /* fall through */
1508 case ORB_FUN_RCN:
1509 case ORB_FUN_LGO:
1510 case ORB_FUN_LUR:
1511 case ORB_FUN_RST:
1512 case ORB_FUN_ATS:
1513 ocb->orb[4] = htonl(ORB_NOTIFY | func | sdev->login->id);
1514 break;
1515 }
1516
1517 if (target->mgm_ocb_cur != NULL) {
1518 /* there is a standing ORB */
1519 mutex_enter(&target->sbp->sc_mtx);
1520 STAILQ_INSERT_TAIL(&sdev->target->mgm_ocb_queue, ocb, ocb);
1521 mutex_exit(&target->sbp->sc_mtx);
1522 return;
1523 }
1524 start:
1525 target->mgm_ocb_cur = ocb;
1526
1527 callout_reset(&target->mgm_ocb_timeout, 5 * hz, sbp_mgm_timeout, ocb);
1528 xfer = sbp_write_cmd(sdev, FWTCODE_WREQB, 0);
1529 if (xfer == NULL)
1530 return;
1531 xfer->hand = sbp_mgm_callback;
1532
1533 fp = &xfer->send.hdr;
1534 fp->mode.wreqb.dest_hi = sdev->target->mgm_hi;
1535 fp->mode.wreqb.dest_lo = sdev->target->mgm_lo;
1536 fp->mode.wreqb.len = 8;
1537 fp->mode.wreqb.extcode = 0;
1538 xfer->send.payload[0] = htonl(nid << 16);
1539 xfer->send.payload[1] = htonl(ocb->bus_addr & 0xffffffff);
1540
1541 /* cache writeback & invalidate(required ORB_FUN_LGI func) */
1542 /* when abort_ocb, should sync POST ope ? */
1543 SBP_ORB_DMA_SYNC(sdev->dma, ocb->index, BUS_DMASYNC_PREWRITE);
1544 if (fw_asyreq(xfer->fc, -1, xfer) != 0)
1545 sbp_xfer_free(xfer);
1546 }
1547
1548 static void
1549 sbp_print_scsi_cmd(struct sbp_ocb *ocb)
1550 {
1551 struct scsipi_xfer *xs = ocb->xs;
1552
1553 printf("%s:%d:%d:"
1554 " cmd: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x,"
1555 " flags: 0x%02x, %db cmd/%db data\n",
1556 device_xname(ocb->sdev->target->sbp->sc_fd.dev),
1557 xs->xs_periph->periph_target,
1558 xs->xs_periph->periph_lun,
1559 xs->cmd->opcode,
1560 xs->cmd->bytes[0], xs->cmd->bytes[1],
1561 xs->cmd->bytes[2], xs->cmd->bytes[3],
1562 xs->cmd->bytes[4], xs->cmd->bytes[5],
1563 xs->cmd->bytes[6], xs->cmd->bytes[7],
1564 xs->cmd->bytes[8],
1565 xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT),
1566 xs->cmdlen, xs->datalen);
1567 }
1568
1569 static void
1570 sbp_scsi_status(struct sbp_status *sbp_status, struct sbp_ocb *ocb)
1571 {
1572 struct sbp_cmd_status *sbp_cmd_status;
1573 struct scsi_sense_data *sense = &ocb->xs->sense.scsi_sense;
1574
1575 sbp_cmd_status = (struct sbp_cmd_status *)sbp_status->data;
1576
1577 SBP_DEBUG(0)
1578 sbp_print_scsi_cmd(ocb);
1579 /* XXX need decode status */
1580 printf("%s:"
1581 " SCSI status %x sfmt %x valid %x key %x code %x qlfr %x len %d\n",
1582 ocb->sdev->bustgtlun,
1583 sbp_cmd_status->status,
1584 sbp_cmd_status->sfmt,
1585 sbp_cmd_status->valid,
1586 sbp_cmd_status->s_key,
1587 sbp_cmd_status->s_code,
1588 sbp_cmd_status->s_qlfr,
1589 sbp_status->len);
1590 END_DEBUG
1591
1592 switch (sbp_cmd_status->status) {
1593 case SCSI_CHECK:
1594 case SCSI_BUSY:
1595 case SCSI_TERMINATED:
1596 if (sbp_cmd_status->sfmt == SBP_SFMT_CURR)
1597 sense->response_code = SSD_RCODE_CURRENT;
1598 else
1599 sense->response_code = SSD_RCODE_DEFERRED;
1600 if (sbp_cmd_status->valid)
1601 sense->response_code |= SSD_RCODE_VALID;
1602 sense->flags = sbp_cmd_status->s_key;
1603 if (sbp_cmd_status->mark)
1604 sense->flags |= SSD_FILEMARK;
1605 if (sbp_cmd_status->eom)
1606 sense->flags |= SSD_EOM;
1607 if (sbp_cmd_status->ill_len)
1608 sense->flags |= SSD_ILI;
1609
1610 memcpy(sense->info, &sbp_cmd_status->info, 4);
1611
1612 if (sbp_status->len <= 1)
1613 /* XXX not scsi status. shouldn't be happened */
1614 sense->extra_len = 0;
1615 else if (sbp_status->len <= 4)
1616 /* add_sense_code(_qual), info, cmd_spec_info */
1617 sense->extra_len = 6;
1618 else
1619 /* fru, sense_key_spec */
1620 sense->extra_len = 10;
1621
1622 memcpy(sense->csi, &sbp_cmd_status->cdb, 4);
1623
1624 sense->asc = sbp_cmd_status->s_code;
1625 sense->ascq = sbp_cmd_status->s_qlfr;
1626 sense->fru = sbp_cmd_status->fru;
1627
1628 memcpy(sense->sks.sks_bytes, sbp_cmd_status->s_keydep, 3);
1629 ocb->xs->error = XS_SENSE;
1630 ocb->xs->xs_status = sbp_cmd_status->status;
1631 /*
1632 {
1633 uint8_t j, *tmp;
1634 tmp = sense;
1635 for (j = 0; j < 32; j += 8)
1636 aprint_normal(
1637 "sense %02x%02x %02x%02x %02x%02x %02x%02x\n",
1638 tmp[j], tmp[j+1], tmp[j+2], tmp[j+3],
1639 tmp[j+4], tmp[j+5], tmp[j+6], tmp[j+7]);
1640
1641 }
1642 */
1643 break;
1644 default:
1645 aprint_error_dev(ocb->sdev->target->sbp->sc_fd.dev,
1646 "%s:%s: unknown scsi status 0x%x\n",
1647 __func__, ocb->sdev->bustgtlun, sbp_cmd_status->status);
1648 }
1649 }
1650
1651 static void
1652 sbp_fix_inq_data(struct sbp_ocb *ocb)
1653 {
1654 struct scsipi_xfer *xs = ocb->xs;
1655 struct sbp_dev *sdev;
1656 struct scsipi_inquiry_data *inq =
1657 (struct scsipi_inquiry_data *)xs->data;
1658
1659 sdev = ocb->sdev;
1660
1661 #if 0
1662 /*
1663 * NetBSD is assuming always 0 for EVPD-bit and 'Page Code'.
1664 */
1665 #define SI_EVPD 0x01
1666 if (xs->cmd->bytes[0] & SI_EVPD)
1667 return;
1668 #endif
1669 SBP_DEBUG(1)
1670 printf("%s:%s:%s\n", device_xname(sdev->target->sbp->sc_fd.dev),
1671 __func__, sdev->bustgtlun);
1672 END_DEBUG
1673 switch (inq->device & SID_TYPE) {
1674 case T_DIRECT:
1675 #if 0
1676 /*
1677 * XXX Convert Direct Access device to RBC.
1678 * I've never seen FireWire DA devices which support READ_6.
1679 */
1680 if ((inq->device & SID_TYPE) == T_DIRECT)
1681 inq->device |= T_SIMPLE_DIRECT; /* T_DIRECT == 0 */
1682 #endif
1683 /* FALLTHROUGH */
1684
1685 case T_SIMPLE_DIRECT:
1686 /*
1687 * Override vendor/product/revision information.
1688 * Some devices sometimes return strange strings.
1689 */
1690 #if 1
1691 memcpy(inq->vendor, sdev->vendor, sizeof(inq->vendor));
1692 memcpy(inq->product, sdev->product, sizeof(inq->product));
1693 memcpy(inq->revision + 2, sdev->revision,
1694 sizeof(inq->revision));
1695 #endif
1696 break;
1697 }
1698 /*
1699 * Force to enable/disable tagged queuing.
1700 * XXX CAM also checks SCP_QUEUE_DQUE flag in the control mode page.
1701 */
1702 if (sbp_tags > 0)
1703 inq->flags3 |= SID_CmdQue;
1704 else if (sbp_tags < 0)
1705 inq->flags3 &= ~SID_CmdQue;
1706
1707 }
1708
1709 static void
1710 sbp_recv(struct fw_xfer *xfer)
1711 {
1712 struct fw_pkt *rfp;
1713 #if NEED_RESPONSE
1714 struct fw_pkt *sfp;
1715 #endif
1716 struct sbp_softc *sc;
1717 struct sbp_dev *sdev;
1718 struct sbp_ocb *ocb;
1719 struct sbp_login_res *login_res = NULL;
1720 struct sbp_status *sbp_status;
1721 struct sbp_target *target;
1722 int orb_fun, status_valid0, status_valid, l, reset_agent = 0;
1723 uint32_t addr;
1724 /*
1725 uint32_t *ld;
1726 ld = xfer->recv.buf;
1727 printf("sbp %x %d %d %08x %08x %08x %08x\n",
1728 xfer->resp, xfer->recv.len, xfer->recv.off, ntohl(ld[0]), ntohl(ld[1]), ntohl(ld[2]), ntohl(ld[3]));
1729 printf("sbp %08x %08x %08x %08x\n", ntohl(ld[4]), ntohl(ld[5]), ntohl(ld[6]), ntohl(ld[7]));
1730 printf("sbp %08x %08x %08x %08x\n", ntohl(ld[8]), ntohl(ld[9]), ntohl(ld[10]), ntohl(ld[11]));
1731 */
1732
1733 sc = (struct sbp_softc *)xfer->sc;
1734 if (xfer->resp != 0) {
1735 aprint_error_dev(sc->sc_fd.dev,
1736 "sbp_recv: xfer->resp = %d\n", xfer->resp);
1737 goto done0;
1738 }
1739 if (xfer->recv.payload == NULL) {
1740 aprint_error_dev(sc->sc_fd.dev,
1741 "sbp_recv: xfer->recv.payload == NULL\n");
1742 goto done0;
1743 }
1744 rfp = &xfer->recv.hdr;
1745 if (rfp->mode.wreqb.tcode != FWTCODE_WREQB) {
1746 aprint_error_dev(sc->sc_fd.dev,
1747 "sbp_recv: tcode = %d\n", rfp->mode.wreqb.tcode);
1748 goto done0;
1749 }
1750 sbp_status = (struct sbp_status *)xfer->recv.payload;
1751 addr = rfp->mode.wreqb.dest_lo;
1752 SBP_DEBUG(2)
1753 printf("received address 0x%x\n", addr);
1754 END_DEBUG
1755 target = &sc->sc_target;
1756 l = SBP_ADDR2LUN(addr);
1757 if (l >= target->num_lun || target->luns[l] == NULL) {
1758 aprint_error_dev(sc->sc_fd.dev,
1759 "sbp_recv1: invalid lun %d (target=%d)\n",
1760 l, target->target_id);
1761 goto done0;
1762 }
1763 sdev = target->luns[l];
1764
1765 ocb = NULL;
1766 switch (sbp_status->src) {
1767 case SRC_NEXT_EXISTS:
1768 case SRC_NO_NEXT:
1769 /* check mgm_ocb_cur first */
1770 ocb = target->mgm_ocb_cur;
1771 if (ocb != NULL)
1772 if (OCB_MATCH(ocb, sbp_status)) {
1773 callout_stop(&target->mgm_ocb_timeout);
1774 target->mgm_ocb_cur = NULL;
1775 break;
1776 }
1777 ocb = sbp_dequeue_ocb(sdev, sbp_status);
1778 if (ocb == NULL)
1779 aprint_error_dev(sc->sc_fd.dev,
1780 "%s:%s: No ocb(%x) on the queue\n", __func__,
1781 sdev->bustgtlun, ntohl(sbp_status->orb_lo));
1782 break;
1783 case SRC_UNSOL:
1784 /* unsolicit */
1785 aprint_error_dev(sc->sc_fd.dev,
1786 "%s:%s: unsolicit status received\n",
1787 __func__, sdev->bustgtlun);
1788 break;
1789 default:
1790 aprint_error_dev(sc->sc_fd.dev,
1791 "%s:%s: unknown sbp_status->src\n",
1792 __func__, sdev->bustgtlun);
1793 }
1794
1795 status_valid0 = (sbp_status->src < 2
1796 && sbp_status->resp == SBP_REQ_CMP
1797 && sbp_status->dead == 0);
1798 status_valid = (status_valid0 && sbp_status->status == 0);
1799
1800 if (!status_valid0 || debug > 2) {
1801 int status;
1802 SBP_DEBUG(0)
1803 printf("%s:%s:%s: ORB status src:%x resp:%x dead:%x"
1804 " len:%x stat:%x orb:%x%08x\n",
1805 device_xname(sc->sc_fd.dev), __func__, sdev->bustgtlun,
1806 sbp_status->src, sbp_status->resp, sbp_status->dead,
1807 sbp_status->len, sbp_status->status,
1808 ntohs(sbp_status->orb_hi), ntohl(sbp_status->orb_lo));
1809 END_DEBUG
1810 printf("%s:%s\n", device_xname(sc->sc_fd.dev), sdev->bustgtlun);
1811 status = sbp_status->status;
1812 switch (sbp_status->resp) {
1813 case SBP_REQ_CMP:
1814 if (status > MAX_ORB_STATUS0)
1815 printf("%s\n", orb_status0[MAX_ORB_STATUS0]);
1816 else
1817 printf("%s\n", orb_status0[status]);
1818 break;
1819 case SBP_TRANS_FAIL:
1820 printf("Obj: %s, Error: %s\n",
1821 orb_status1_object[(status>>6) & 3],
1822 orb_status1_serial_bus_error[status & 0xf]);
1823 break;
1824 case SBP_ILLE_REQ:
1825 printf("Illegal request\n");
1826 break;
1827 case SBP_VEND_DEP:
1828 printf("Vendor dependent\n");
1829 break;
1830 default:
1831 printf("unknown respose code %d\n", sbp_status->resp);
1832 }
1833 }
1834
1835 /* we have to reset the fetch agent if it's dead */
1836 if (sbp_status->dead) {
1837 if (sdev->periph != NULL) {
1838 scsipi_periph_freeze(sdev->periph, 1);
1839 sdev->freeze++;
1840 }
1841 reset_agent = 1;
1842 }
1843
1844 if (ocb == NULL)
1845 goto done;
1846
1847 switch (ntohl(ocb->orb[4]) & ORB_FMT_MSK) {
1848 case ORB_FMT_NOP:
1849 break;
1850 case ORB_FMT_VED:
1851 break;
1852 case ORB_FMT_STD:
1853 switch (ocb->flags) {
1854 case OCB_ACT_MGM:
1855 orb_fun = ntohl(ocb->orb[4]) & ORB_FUN_MSK;
1856 reset_agent = 0;
1857 switch (orb_fun) {
1858 case ORB_FUN_LGI:
1859 {
1860 const struct fwdma_alloc *dma = &sdev->dma;
1861 const off_t sbp_login_off =
1862 sizeof(struct sbp_ocb) * SBP_QUEUE_LEN;
1863
1864 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
1865 sbp_login_off, SBP_LOGIN_SIZE,
1866 BUS_DMASYNC_POSTREAD);
1867 login_res = sdev->login;
1868 login_res->len = ntohs(login_res->len);
1869 login_res->id = ntohs(login_res->id);
1870 login_res->cmd_hi = ntohs(login_res->cmd_hi);
1871 login_res->cmd_lo = ntohl(login_res->cmd_lo);
1872 if (status_valid) {
1873 SBP_DEBUG(0)
1874 printf("%s:%s:%s: login:"
1875 " len %d, ID %d, cmd %08x%08x,"
1876 " recon_hold %d\n",
1877 device_xname(sc->sc_fd.dev),
1878 __func__, sdev->bustgtlun,
1879 login_res->len, login_res->id,
1880 login_res->cmd_hi,
1881 login_res->cmd_lo,
1882 ntohs(login_res->recon_hold));
1883 END_DEBUG
1884 sbp_busy_timeout(sdev);
1885 } else {
1886 /* forgot logout? */
1887 aprint_error_dev(sc->sc_fd.dev,
1888 "%s:%s: login failed\n",
1889 __func__, sdev->bustgtlun);
1890 sdev->status = SBP_DEV_RESET;
1891 }
1892 break;
1893 }
1894 case ORB_FUN_RCN:
1895 login_res = sdev->login;
1896 if (status_valid) {
1897 SBP_DEBUG(0)
1898 printf("%s:%s:%s: reconnect:"
1899 " len %d, ID %d, cmd %08x%08x\n",
1900 device_xname(sc->sc_fd.dev),
1901 __func__, sdev->bustgtlun,
1902 login_res->len, login_res->id,
1903 login_res->cmd_hi,
1904 login_res->cmd_lo);
1905 END_DEBUG
1906 sbp_agent_reset(sdev);
1907 } else {
1908 /* reconnection hold time exceed? */
1909 SBP_DEBUG(0)
1910 aprint_error_dev(sc->sc_fd.dev,
1911 "%s:%s: reconnect failed\n",
1912 __func__, sdev->bustgtlun);
1913 END_DEBUG
1914 sbp_login(sdev);
1915 }
1916 break;
1917 case ORB_FUN_LGO:
1918 sdev->status = SBP_DEV_RESET;
1919 break;
1920 case ORB_FUN_RST:
1921 sbp_busy_timeout(sdev);
1922 break;
1923 case ORB_FUN_LUR:
1924 case ORB_FUN_ATA:
1925 case ORB_FUN_ATS:
1926 sbp_agent_reset(sdev);
1927 break;
1928 default:
1929 aprint_error_dev(sc->sc_fd.dev,
1930 "%s:%s: unknown function %d\n",
1931 __func__, sdev->bustgtlun, orb_fun);
1932 break;
1933 }
1934 sbp_mgm_orb(sdev, ORB_FUN_RUNQUEUE, NULL);
1935 break;
1936 case OCB_ACT_CMD:
1937 sdev->timeout = 0;
1938 if (ocb->xs != NULL) {
1939 struct scsipi_xfer *xs = ocb->xs;
1940
1941 if (sbp_status->len > 1)
1942 sbp_scsi_status(sbp_status, ocb);
1943 else
1944 if (sbp_status->resp != SBP_REQ_CMP)
1945 xs->error = XS_DRIVER_STUFFUP;
1946 else {
1947 xs->error = XS_NOERROR;
1948 xs->resid = 0;
1949 }
1950 /* fix up inq data */
1951 if (xs->cmd->opcode == INQUIRY)
1952 sbp_fix_inq_data(ocb);
1953 scsipi_done(xs);
1954 }
1955 break;
1956 default:
1957 break;
1958 }
1959 }
1960
1961 if (!use_doorbell)
1962 sbp_free_ocb(sdev, ocb);
1963 done:
1964 if (reset_agent)
1965 sbp_agent_reset(sdev);
1966
1967 done0:
1968 xfer->recv.pay_len = SBP_RECV_LEN;
1969 /* The received packet is usually small enough to be stored within
1970 * the buffer. In that case, the controller return ack_complete and
1971 * no respose is necessary.
1972 *
1973 * XXX fwohci.c and firewire.c should inform event_code such as
1974 * ack_complete or ack_pending to upper driver.
1975 */
1976 #if NEED_RESPONSE
1977 xfer->send.off = 0;
1978 sfp = (struct fw_pkt *)xfer->send.buf;
1979 sfp->mode.wres.dst = rfp->mode.wreqb.src;
1980 xfer->dst = sfp->mode.wres.dst;
1981 xfer->spd = uimin(sdev->target->fwdev->speed, max_speed);
1982 xfer->hand = sbp_loginres_callback;
1983
1984 sfp->mode.wres.tlrt = rfp->mode.wreqb.tlrt;
1985 sfp->mode.wres.tcode = FWTCODE_WRES;
1986 sfp->mode.wres.rtcode = 0;
1987 sfp->mode.wres.pri = 0;
1988
1989 if (fw_asyreq(xfer->fc, -1, xfer) != 0) {
1990 aprint_error_dev(sc->sc_fd.dev, "mgm_orb failed\n");
1991 mutex_enter(&sc->sc_fwb.fwb_mtx);
1992 STAILQ_INSERT_TAIL(&sc->sc_fwb.xferlist, xfer, link);
1993 mutex_exit(&sc->sc_fwb.fwb_mtx);
1994 }
1995 #else
1996 /* recycle */
1997 mutex_enter(&sc->sc_fwb.fwb_mtx);
1998 STAILQ_INSERT_TAIL(&sc->sc_fwb.xferlist, xfer, link);
1999 mutex_exit(&sc->sc_fwb.fwb_mtx);
2000 #endif
2001
2002 return;
2003
2004 }
2005
2006 static int
2007 sbp_logout_all(struct sbp_softc *sbp)
2008 {
2009 struct sbp_target *target;
2010 struct sbp_dev *sdev;
2011 int i;
2012
2013 SBP_DEBUG(0)
2014 printf("sbp_logout_all\n");
2015 END_DEBUG
2016 target = &sbp->sc_target;
2017 if (target->luns != NULL) {
2018 for (i = 0; i < target->num_lun; i++) {
2019 sdev = target->luns[i];
2020 if (sdev == NULL)
2021 continue;
2022 callout_stop(&sdev->login_callout);
2023 if (sdev->status >= SBP_DEV_TOATTACH &&
2024 sdev->status <= SBP_DEV_ATTACHED)
2025 sbp_mgm_orb(sdev, ORB_FUN_LGO, NULL);
2026 }
2027 }
2028
2029 return 0;
2030 }
2031
2032 static void
2033 sbp_free_sdev(struct sbp_dev *sdev)
2034 {
2035 struct sbp_softc *sc = sdev->target->sbp;
2036 int i;
2037
2038 if (sdev == NULL)
2039 return;
2040 for (i = 0; i < SBP_QUEUE_LEN; i++)
2041 bus_dmamap_destroy(sc->sc_dmat, sdev->ocb[i].dmamap);
2042 fwdma_free(sdev->dma.dma_tag, sdev->dma.dma_map, sdev->dma.v_addr);
2043 free(sdev, M_SBP);
2044 }
2045
2046 static void
2047 sbp_free_target(struct sbp_target *target)
2048 {
2049 struct fw_xfer *xfer, *next;
2050 int i;
2051
2052 if (target->luns == NULL)
2053 return;
2054 callout_stop(&target->mgm_ocb_timeout);
2055 for (i = 0; i < target->num_lun; i++)
2056 sbp_free_sdev(target->luns[i]);
2057
2058 for (xfer = STAILQ_FIRST(&target->xferlist);
2059 xfer != NULL; xfer = next) {
2060 next = STAILQ_NEXT(xfer, link);
2061 fw_xfer_free_buf(xfer);
2062 }
2063 STAILQ_INIT(&target->xferlist);
2064 free(target->luns, M_SBP);
2065 target->num_lun = 0;
2066 target->luns = NULL;
2067 target->fwdev = NULL;
2068 }
2069
2070 static void
2071 sbp_scsipi_detach_sdev(struct sbp_dev *sdev)
2072 {
2073 struct sbp_target *target;
2074 struct sbp_softc *sbp;
2075
2076 if (sdev == NULL)
2077 return;
2078
2079 target = sdev->target;
2080 if (target == NULL)
2081 return;
2082
2083 sbp = target->sbp;
2084
2085 if (sdev->status == SBP_DEV_DEAD)
2086 return;
2087 if (sdev->status == SBP_DEV_RESET)
2088 return;
2089 if (sdev->periph != NULL) {
2090 scsipi_periph_thaw(sdev->periph, sdev->freeze);
2091 scsipi_channel_thaw(&sbp->sc_channel, 0); /* XXXX */
2092 sdev->freeze = 0;
2093 if (scsipi_target_detach(&sbp->sc_channel,
2094 target->target_id, sdev->lun_id, DETACH_FORCE) != 0) {
2095 aprint_error_dev(sbp->sc_fd.dev, "detach failed\n");
2096 }
2097 sdev->periph = NULL;
2098 }
2099 sbp_abort_all_ocbs(sdev, XS_DRIVER_STUFFUP);
2100 }
2101
2102 static void
2103 sbp_scsipi_detach_target(struct sbp_target *target)
2104 {
2105 struct sbp_softc *sbp = target->sbp;
2106 int i;
2107
2108 if (target->luns != NULL) {
2109 SBP_DEBUG(0)
2110 printf("sbp_detach_target %d\n", target->target_id);
2111 END_DEBUG
2112 for (i = 0; i < target->num_lun; i++)
2113 sbp_scsipi_detach_sdev(target->luns[i]);
2114 if (config_detach(sbp->sc_bus, DETACH_FORCE) != 0)
2115 aprint_error_dev(sbp->sc_fd.dev, "%d detach failed\n",
2116 target->target_id);
2117 sbp->sc_bus = NULL;
2118 }
2119 }
2120
2121 static void
2122 sbp_target_reset(struct sbp_dev *sdev, int method)
2123 {
2124 struct sbp_target *target = sdev->target;
2125 struct sbp_dev *tsdev;
2126 int i;
2127
2128 for (i = 0; i < target->num_lun; i++) {
2129 tsdev = target->luns[i];
2130 if (tsdev == NULL)
2131 continue;
2132 if (tsdev->status == SBP_DEV_DEAD)
2133 continue;
2134 if (tsdev->status == SBP_DEV_RESET)
2135 continue;
2136 if (sdev->periph != NULL) {
2137 scsipi_periph_freeze(tsdev->periph, 1);
2138 tsdev->freeze++;
2139 }
2140 sbp_abort_all_ocbs(tsdev, XS_TIMEOUT);
2141 if (method == 2)
2142 tsdev->status = SBP_DEV_LOGIN;
2143 }
2144 switch (method) {
2145 case 1:
2146 aprint_error("target reset\n");
2147 sbp_mgm_orb(sdev, ORB_FUN_RST, NULL);
2148 break;
2149 case 2:
2150 aprint_error("reset start\n");
2151 sbp_reset_start(sdev);
2152 break;
2153 }
2154 }
2155
2156 static void
2157 sbp_mgm_timeout(void *arg)
2158 {
2159 struct sbp_ocb *ocb = (struct sbp_ocb *)arg;
2160 struct sbp_dev *sdev = ocb->sdev;
2161 struct sbp_target *target = sdev->target;
2162
2163 aprint_error_dev(sdev->target->sbp->sc_fd.dev,
2164 "%s:%s: request timeout(mgm orb:0x%08x) ... ",
2165 __func__, sdev->bustgtlun, (uint32_t)ocb->bus_addr);
2166 target->mgm_ocb_cur = NULL;
2167 sbp_free_ocb(sdev, ocb);
2168 #if 0
2169 /* XXX */
2170 aprint_error("run next request\n");
2171 sbp_mgm_orb(sdev, ORB_FUN_RUNQUEUE, NULL);
2172 #endif
2173 aprint_error_dev(sdev->target->sbp->sc_fd.dev,
2174 "%s:%s: reset start\n", __func__, sdev->bustgtlun);
2175 sbp_reset_start(sdev);
2176 }
2177
2178 static void
2179 sbp_timeout(void *arg)
2180 {
2181 struct sbp_ocb *ocb = (struct sbp_ocb *)arg;
2182 struct sbp_dev *sdev = ocb->sdev;
2183
2184 aprint_error_dev(sdev->target->sbp->sc_fd.dev,
2185 "%s:%s: request timeout(cmd orb:0x%08x) ... ",
2186 __func__, sdev->bustgtlun, (uint32_t)ocb->bus_addr);
2187
2188 sdev->timeout++;
2189 switch (sdev->timeout) {
2190 case 1:
2191 aprint_error("agent reset\n");
2192 if (sdev->periph != NULL) {
2193 scsipi_periph_freeze(sdev->periph, 1);
2194 sdev->freeze++;
2195 }
2196 sbp_abort_all_ocbs(sdev, XS_TIMEOUT);
2197 sbp_agent_reset(sdev);
2198 break;
2199 case 2:
2200 case 3:
2201 sbp_target_reset(sdev, sdev->timeout - 1);
2202 break;
2203 default:
2204 aprint_error("\n");
2205 #if 0
2206 /* XXX give up */
2207 sbp_scsipi_detach_target(target);
2208 if (target->luns != NULL)
2209 free(target->luns, M_SBP);
2210 target->num_lun = 0;
2211 target->luns = NULL;
2212 target->fwdev = NULL;
2213 #endif
2214 }
2215 }
2216
2217 static void
2218 sbp_action1(struct sbp_softc *sc, struct scsipi_xfer *xs)
2219 {
2220 struct sbp_target *target = &sc->sc_target;
2221 struct sbp_dev *sdev = NULL;
2222 struct sbp_ocb *ocb;
2223 int speed, flag, error;
2224 void *cdb;
2225
2226 /* target:lun -> sdev mapping */
2227 if (target->fwdev != NULL &&
2228 xs->xs_periph->periph_lun < target->num_lun) {
2229 sdev = target->luns[xs->xs_periph->periph_lun];
2230 if (sdev != NULL && sdev->status != SBP_DEV_ATTACHED &&
2231 sdev->status != SBP_DEV_PROBE)
2232 sdev = NULL;
2233 }
2234
2235 if (sdev == NULL) {
2236 SBP_DEBUG(1)
2237 printf("%s:%d:%d: Invalid target (target needed)\n",
2238 sc ? device_xname(sc->sc_fd.dev) : "???",
2239 xs->xs_periph->periph_target,
2240 xs->xs_periph->periph_lun);
2241 END_DEBUG
2242
2243 xs->error = XS_DRIVER_STUFFUP;
2244 scsipi_done(xs);
2245 return;
2246 }
2247
2248 SBP_DEBUG(2)
2249 printf("%s:%d:%d:"
2250 " cmd: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x,"
2251 " flags: 0x%02x, %db cmd/%db data\n",
2252 device_xname(sc->sc_fd.dev),
2253 xs->xs_periph->periph_target,
2254 xs->xs_periph->periph_lun,
2255 xs->cmd->opcode,
2256 xs->cmd->bytes[0], xs->cmd->bytes[1],
2257 xs->cmd->bytes[2], xs->cmd->bytes[3],
2258 xs->cmd->bytes[4], xs->cmd->bytes[5],
2259 xs->cmd->bytes[6], xs->cmd->bytes[7],
2260 xs->cmd->bytes[8],
2261 xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT),
2262 xs->cmdlen, xs->datalen);
2263 END_DEBUG
2264 mutex_enter(&sc->sc_mtx);
2265 ocb = sbp_get_ocb(sdev);
2266 mutex_exit(&sc->sc_mtx);
2267 if (ocb == NULL) {
2268 xs->error = XS_REQUEUE;
2269 if (sdev->freeze == 0) {
2270 scsipi_periph_freeze(sdev->periph, 1);
2271 sdev->freeze++;
2272 }
2273 scsipi_done(xs);
2274 return;
2275 }
2276
2277 ocb->flags = OCB_ACT_CMD;
2278 ocb->sdev = sdev;
2279 ocb->xs = xs;
2280 ocb->orb[0] = htonl(1 << 31);
2281 ocb->orb[1] = 0;
2282 ocb->orb[2] = htonl(((sc->sc_fd.fc->nodeid | FWLOCALBUS) << 16));
2283 ocb->orb[3] = htonl(ocb->bus_addr + IND_PTR_OFFSET);
2284 speed = uimin(target->fwdev->speed, max_speed);
2285 ocb->orb[4] =
2286 htonl(ORB_NOTIFY | ORB_CMD_SPD(speed) | ORB_CMD_MAXP(speed + 7));
2287 if ((xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) ==
2288 XS_CTL_DATA_IN) {
2289 ocb->orb[4] |= htonl(ORB_CMD_IN);
2290 flag = BUS_DMA_READ;
2291 } else
2292 flag = BUS_DMA_WRITE;
2293
2294 cdb = xs->cmd;
2295 memcpy((void *)&ocb->orb[5], cdb, xs->cmdlen);
2296 /*
2297 printf("ORB %08x %08x %08x %08x\n", ntohl(ocb->orb[0]), ntohl(ocb->orb[1]), ntohl(ocb->orb[2]), ntohl(ocb->orb[3]));
2298 printf("ORB %08x %08x %08x %08x\n", ntohl(ocb->orb[4]), ntohl(ocb->orb[5]), ntohl(ocb->orb[6]), ntohl(ocb->orb[7]));
2299 */
2300 if (xs->datalen > 0) {
2301 error = bus_dmamap_load(sc->sc_dmat, ocb->dmamap,
2302 xs->data, xs->datalen, NULL, BUS_DMA_NOWAIT | flag);
2303 if (error) {
2304 aprint_error_dev(sc->sc_fd.dev,
2305 "DMA map load error %d\n", error);
2306 xs->error = XS_DRIVER_STUFFUP;
2307 scsipi_done(xs);
2308 } else
2309 sbp_execute_ocb(ocb, ocb->dmamap->dm_segs,
2310 ocb->dmamap->dm_nsegs);
2311 } else
2312 sbp_execute_ocb(ocb, NULL, 0);
2313
2314 return;
2315 }
2316
2317 static void
2318 sbp_execute_ocb(struct sbp_ocb *ocb, bus_dma_segment_t *segments, int seg)
2319 {
2320 struct sbp_ocb *prev;
2321 bus_dma_segment_t *s;
2322 int i;
2323
2324 SBP_DEBUG(2)
2325 printf("sbp_execute_ocb: seg %d", seg);
2326 for (i = 0; i < seg; i++)
2327 printf(", %jx:%jd", (uintmax_t)segments[i].ds_addr,
2328 (uintmax_t)segments[i].ds_len);
2329 printf("\n");
2330 END_DEBUG
2331
2332 if (seg == 1) {
2333 /* direct pointer */
2334 s = segments;
2335 if (s->ds_len > SBP_SEG_MAX)
2336 panic("ds_len > SBP_SEG_MAX, fix busdma code");
2337 ocb->orb[3] = htonl(s->ds_addr);
2338 ocb->orb[4] |= htonl(s->ds_len);
2339 } else if (seg > 1) {
2340 /* page table */
2341 for (i = 0; i < seg; i++) {
2342 s = &segments[i];
2343 SBP_DEBUG(0)
2344 /* XXX LSI Logic "< 16 byte" bug might be hit */
2345 if (s->ds_len < 16)
2346 printf("sbp_execute_ocb: warning, "
2347 "segment length(%jd) is less than 16."
2348 "(seg=%d/%d)\n",
2349 (uintmax_t)s->ds_len, i + 1, seg);
2350 END_DEBUG
2351 if (s->ds_len > SBP_SEG_MAX)
2352 panic("ds_len > SBP_SEG_MAX, fix busdma code");
2353 ocb->ind_ptr[i].hi = htonl(s->ds_len << 16);
2354 ocb->ind_ptr[i].lo = htonl(s->ds_addr);
2355 }
2356 ocb->orb[4] |= htonl(ORB_CMD_PTBL | seg);
2357 }
2358
2359 if (seg > 0) {
2360 struct sbp_softc *sc = ocb->sdev->target->sbp;
2361 const int flag = (ntohl(ocb->orb[4]) & ORB_CMD_IN) ?
2362 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE;
2363
2364 bus_dmamap_sync(sc->sc_dmat, ocb->dmamap,
2365 0, ocb->dmamap->dm_mapsize, flag);
2366 }
2367 prev = sbp_enqueue_ocb(ocb->sdev, ocb);
2368 SBP_ORB_DMA_SYNC(ocb->sdev->dma, ocb->index, BUS_DMASYNC_PREWRITE);
2369 if (use_doorbell) {
2370 if (prev == NULL) {
2371 if (ocb->sdev->last_ocb != NULL)
2372 sbp_doorbell(ocb->sdev);
2373 else
2374 sbp_orb_pointer(ocb->sdev, ocb);
2375 }
2376 } else
2377 if (prev == NULL || (ocb->sdev->flags & ORB_LINK_DEAD) != 0) {
2378 ocb->sdev->flags &= ~ORB_LINK_DEAD;
2379 sbp_orb_pointer(ocb->sdev, ocb);
2380 }
2381 }
2382
2383 static struct sbp_ocb *
2384 sbp_dequeue_ocb(struct sbp_dev *sdev, struct sbp_status *sbp_status)
2385 {
2386 struct sbp_softc *sc = sdev->target->sbp;
2387 struct sbp_ocb *ocb;
2388 struct sbp_ocb *next;
2389 int order = 0;
2390
2391 SBP_DEBUG(1)
2392 printf("%s:%s:%s: 0x%08x src %d\n", device_xname(sc->sc_fd.dev),
2393 __func__, sdev->bustgtlun, ntohl(sbp_status->orb_lo),
2394 sbp_status->src);
2395 END_DEBUG
2396 mutex_enter(&sc->sc_mtx);
2397 for (ocb = STAILQ_FIRST(&sdev->ocbs); ocb != NULL; ocb = next) {
2398 next = STAILQ_NEXT(ocb, ocb);
2399 if (OCB_MATCH(ocb, sbp_status)) {
2400 /* found */
2401 SBP_ORB_DMA_SYNC(sdev->dma, ocb->index,
2402 BUS_DMASYNC_POSTWRITE);
2403 STAILQ_REMOVE(&sdev->ocbs, ocb, sbp_ocb, ocb);
2404 if (ocb->xs != NULL)
2405 callout_stop(&ocb->xs->xs_callout);
2406 if (ntohl(ocb->orb[4]) & 0xffff) {
2407 const int flag =
2408 (ntohl(ocb->orb[4]) & ORB_CMD_IN) ?
2409 BUS_DMASYNC_POSTREAD :
2410 BUS_DMASYNC_POSTWRITE;
2411
2412 bus_dmamap_sync(sc->sc_dmat, ocb->dmamap,
2413 0, ocb->dmamap->dm_mapsize, flag);
2414 bus_dmamap_unload(sc->sc_dmat, ocb->dmamap);
2415
2416 }
2417 if (!use_doorbell) {
2418 if (sbp_status->src == SRC_NO_NEXT) {
2419 if (next != NULL)
2420 sbp_orb_pointer(sdev, next);
2421 else if (order > 0)
2422 /*
2423 * Unordered execution
2424 * We need to send pointer for
2425 * next ORB
2426 */
2427 sdev->flags |= ORB_LINK_DEAD;
2428 }
2429 }
2430 break;
2431 } else
2432 order++;
2433 }
2434 mutex_exit(&sc->sc_mtx);
2435
2436 if (ocb && use_doorbell) {
2437 /*
2438 * XXX this is not correct for unordered
2439 * execution.
2440 */
2441 if (sdev->last_ocb != NULL)
2442 sbp_free_ocb(sdev, sdev->last_ocb);
2443 sdev->last_ocb = ocb;
2444 if (next != NULL &&
2445 sbp_status->src == SRC_NO_NEXT)
2446 sbp_doorbell(sdev);
2447 }
2448
2449 SBP_DEBUG(0)
2450 if (ocb && order > 0)
2451 printf("%s:%s:%s: unordered execution order:%d\n",
2452 device_xname(sc->sc_fd.dev), __func__, sdev->bustgtlun,
2453 order);
2454 END_DEBUG
2455 return ocb;
2456 }
2457
2458 static struct sbp_ocb *
2459 sbp_enqueue_ocb(struct sbp_dev *sdev, struct sbp_ocb *ocb)
2460 {
2461 struct sbp_softc *sc = sdev->target->sbp;
2462 struct sbp_ocb *tocb, *prev, *prev2;
2463
2464 SBP_DEBUG(1)
2465 printf("%s:%s:%s: 0x%08jx\n", device_xname(sc->sc_fd.dev),
2466 __func__, sdev->bustgtlun, (uintmax_t)ocb->bus_addr);
2467 END_DEBUG
2468 mutex_enter(&sc->sc_mtx);
2469 prev = NULL;
2470 STAILQ_FOREACH(tocb, &sdev->ocbs, ocb)
2471 prev = tocb;
2472 prev2 = prev;
2473 STAILQ_INSERT_TAIL(&sdev->ocbs, ocb, ocb);
2474 mutex_exit(&sc->sc_mtx);
2475
2476 callout_reset(&ocb->xs->xs_callout, mstohz(ocb->xs->timeout),
2477 sbp_timeout, ocb);
2478
2479 if (use_doorbell && prev == NULL)
2480 prev2 = sdev->last_ocb;
2481
2482 if (prev2 != NULL) {
2483 SBP_DEBUG(2)
2484 printf("linking chain 0x%jx -> 0x%jx\n",
2485 (uintmax_t)prev2->bus_addr, (uintmax_t)ocb->bus_addr);
2486 END_DEBUG
2487 /*
2488 * Suppress compiler optimization so that orb[1] must be
2489 * written first.
2490 * XXX We may need an explicit memory barrier for other
2491 * architectures other than i386/amd64.
2492 */
2493 *(volatile uint32_t *)&prev2->orb[1] = htonl(ocb->bus_addr);
2494 *(volatile uint32_t *)&prev2->orb[0] = 0;
2495 }
2496
2497 return prev;
2498 }
2499
2500 static struct sbp_ocb *
2501 sbp_get_ocb(struct sbp_dev *sdev)
2502 {
2503 struct sbp_softc *sc = sdev->target->sbp;
2504 struct sbp_ocb *ocb;
2505
2506 KASSERT(mutex_owned(&sc->sc_mtx));
2507
2508 ocb = STAILQ_FIRST(&sdev->free_ocbs);
2509 if (ocb == NULL) {
2510 sdev->flags |= ORB_SHORTAGE;
2511 aprint_error_dev(sc->sc_fd.dev,
2512 "ocb shortage!!!\n");
2513 return NULL;
2514 }
2515 STAILQ_REMOVE_HEAD(&sdev->free_ocbs, ocb);
2516 ocb->xs = NULL;
2517 return ocb;
2518 }
2519
2520 static void
2521 sbp_free_ocb(struct sbp_dev *sdev, struct sbp_ocb *ocb)
2522 {
2523 struct sbp_softc *sc = sdev->target->sbp;
2524 int count;
2525
2526 ocb->flags = 0;
2527 ocb->xs = NULL;
2528
2529 mutex_enter(&sc->sc_mtx);
2530 STAILQ_INSERT_TAIL(&sdev->free_ocbs, ocb, ocb);
2531 mutex_exit(&sc->sc_mtx);
2532 if (sdev->flags & ORB_SHORTAGE) {
2533 sdev->flags &= ~ORB_SHORTAGE;
2534 count = sdev->freeze;
2535 sdev->freeze = 0;
2536 if (sdev->periph)
2537 scsipi_periph_thaw(sdev->periph, count);
2538 scsipi_channel_thaw(&sc->sc_channel, 0);
2539 }
2540 }
2541
2542 static void
2543 sbp_abort_ocb(struct sbp_ocb *ocb, int status)
2544 {
2545 struct sbp_softc *sc;
2546 struct sbp_dev *sdev;
2547
2548 sdev = ocb->sdev;
2549 sc = sdev->target->sbp;
2550 SBP_DEBUG(0)
2551 printf("%s:%s:%s: sbp_abort_ocb 0x%jx\n", device_xname(sc->sc_fd.dev),
2552 __func__, sdev->bustgtlun, (uintmax_t)ocb->bus_addr);
2553 END_DEBUG
2554 SBP_DEBUG(1)
2555 if (ocb->xs != NULL)
2556 sbp_print_scsi_cmd(ocb);
2557 END_DEBUG
2558 if (ntohl(ocb->orb[4]) & 0xffff) {
2559 const int flag = (ntohl(ocb->orb[4]) & ORB_CMD_IN) ?
2560 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE;
2561
2562 bus_dmamap_sync(sc->sc_dmat, ocb->dmamap,
2563 0, ocb->dmamap->dm_mapsize, flag);
2564 bus_dmamap_unload(sc->sc_dmat, ocb->dmamap);
2565 }
2566 if (ocb->xs != NULL) {
2567 callout_stop(&ocb->xs->xs_callout);
2568 ocb->xs->error = status;
2569 scsipi_done(ocb->xs);
2570 }
2571 sbp_free_ocb(sdev, ocb);
2572 }
2573
2574 static void
2575 sbp_abort_all_ocbs(struct sbp_dev *sdev, int status)
2576 {
2577 struct sbp_softc *sc = sdev->target->sbp;
2578 struct sbp_ocb *ocb, *next;
2579 STAILQ_HEAD(, sbp_ocb) temp;
2580
2581 mutex_enter(&sc->sc_mtx);
2582 STAILQ_INIT(&temp);
2583 STAILQ_CONCAT(&temp, &sdev->ocbs);
2584 STAILQ_INIT(&sdev->ocbs);
2585 mutex_exit(&sc->sc_mtx);
2586
2587 for (ocb = STAILQ_FIRST(&temp); ocb != NULL; ocb = next) {
2588 next = STAILQ_NEXT(ocb, ocb);
2589 sbp_abort_ocb(ocb, status);
2590 }
2591 if (sdev->last_ocb != NULL) {
2592 sbp_free_ocb(sdev, sdev->last_ocb);
2593 sdev->last_ocb = NULL;
2594 }
2595 }
2596