sbp.c revision 1.31 1 /* $NetBSD: sbp.c,v 1.31 2010/05/10 12:17:33 kiyohara Exp $ */
2 /*-
3 * Copyright (c) 2003 Hidetoshi Shimokawa
4 * Copyright (c) 1998-2002 Katsushi Kobayashi and Hidetoshi Shimokawa
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the acknowledgement as bellow:
17 *
18 * This product includes software developed by K. Kobayashi and H. Shimokawa
19 *
20 * 4. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
27 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
29 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
31 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
32 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 * POSSIBILITY OF SUCH DAMAGE.
34 *
35 * $FreeBSD: src/sys/dev/firewire/sbp.c,v 1.100 2009/02/18 18:41:34 sbruno Exp $
36 *
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: sbp.c,v 1.31 2010/05/10 12:17:33 kiyohara Exp $");
41
42
43 #include <sys/param.h>
44 #include <sys/device.h>
45 #include <sys/errno.h>
46 #include <sys/buf.h>
47 #include <sys/callout.h>
48 #include <sys/condvar.h>
49 #include <sys/kernel.h>
50 #include <sys/kmem.h>
51 #include <sys/kthread.h>
52 #include <sys/mutex.h>
53 #include <sys/proc.h>
54 #include <sys/sysctl.h>
55
56 #include <sys/bus.h>
57
58 #include <dev/scsipi/scsi_spc.h>
59 #include <dev/scsipi/scsi_all.h>
60 #include <dev/scsipi/scsipi_all.h>
61 #include <dev/scsipi/scsiconf.h>
62 #include <dev/scsipi/scsipiconf.h>
63
64 #include <dev/ieee1394/firewire.h>
65 #include <dev/ieee1394/firewirereg.h>
66 #include <dev/ieee1394/fwdma.h>
67 #include <dev/ieee1394/iec13213.h>
68 #include <dev/ieee1394/sbp.h>
69
70 #include "locators.h"
71
72
73 #define SBP_FWDEV_ALIVE(fwdev) (((fwdev)->status == FWDEVATTACHED) \
74 && crom_has_specver((fwdev)->csrrom, CSRVAL_ANSIT10, CSRVAL_T10SBP2))
75
76 #define SBP_NUM_TARGETS 8 /* MAX 64 */
77 #define SBP_NUM_LUNS 64
78 #define SBP_MAXPHYS MIN(MAXPHYS, (512*1024) /* 512KB */)
79 #define SBP_DMA_SIZE PAGE_SIZE
80 #define SBP_LOGIN_SIZE sizeof(struct sbp_login_res)
81 #define SBP_QUEUE_LEN ((SBP_DMA_SIZE - SBP_LOGIN_SIZE) / sizeof(struct sbp_ocb))
82 #define SBP_NUM_OCB (SBP_QUEUE_LEN * SBP_NUM_TARGETS)
83
84 /*
85 * STATUS FIFO addressing
86 * bit
87 * -----------------------
88 * 0- 1( 2): 0 (alignment)
89 * 2- 9( 8): lun
90 * 10-31(14): unit
91 * 32-47(16): SBP_BIND_HI
92 * 48-64(16): bus_id, node_id
93 */
94 #define SBP_BIND_HI 0x1
95 #define SBP_DEV2ADDR(u, l) \
96 (((uint64_t)SBP_BIND_HI << 32) |\
97 (((u) & 0x3fff) << 10) |\
98 (((l) & 0xff) << 2))
99 #define SBP_ADDR2UNIT(a) (((a) >> 10) & 0x3fff)
100 #define SBP_ADDR2LUN(a) (((a) >> 2) & 0xff)
101 #define SBP_INITIATOR 7
102
103 static const char *orb_fun_name[] = {
104 ORB_FUN_NAMES
105 };
106
107 static int debug = 0;
108 static int auto_login = 1;
109 static int max_speed = -1;
110 static int sbp_cold = 1;
111 static int ex_login = 1;
112 static int login_delay = 1000; /* msec */
113 static int scan_delay = 500; /* msec */
114 static int use_doorbell = 0;
115 static int sbp_tags = 0;
116
117 static int sysctl_sbp_verify(SYSCTLFN_PROTO, int lower, int upper);
118 static int sysctl_sbp_verify_max_speed(SYSCTLFN_PROTO);
119 static int sysctl_sbp_verify_tags(SYSCTLFN_PROTO);
120
121 /*
122 * Setup sysctl(3) MIB, hw.sbp.*
123 *
124 * TBD condition CTLFLAG_PERMANENT on being a module or not
125 */
126 SYSCTL_SETUP(sysctl_sbp, "sysctl sbp(4) subtree setup")
127 {
128 int rc, sbp_node_num;
129 const struct sysctlnode *node;
130
131 if ((rc = sysctl_createv(clog, 0, NULL, NULL,
132 CTLFLAG_PERMANENT, CTLTYPE_NODE, "hw", NULL,
133 NULL, 0, NULL, 0, CTL_HW, CTL_EOL)) != 0)
134 goto err;
135
136 if ((rc = sysctl_createv(clog, 0, NULL, &node,
137 CTLFLAG_PERMANENT, CTLTYPE_NODE, "sbp",
138 SYSCTL_DESCR("sbp controls"), NULL, 0, NULL,
139 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
140 goto err;
141 sbp_node_num = node->sysctl_num;
142
143 /* sbp auto login flag */
144 if ((rc = sysctl_createv(clog, 0, NULL, &node,
145 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT,
146 "auto_login", SYSCTL_DESCR("SBP perform login automatically"),
147 NULL, 0, &auto_login,
148 0, CTL_HW, sbp_node_num, CTL_CREATE, CTL_EOL)) != 0)
149 goto err;
150
151 /* sbp max speed */
152 if ((rc = sysctl_createv(clog, 0, NULL, &node,
153 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT,
154 "max_speed", SYSCTL_DESCR("SBP transfer max speed"),
155 sysctl_sbp_verify_max_speed, 0, &max_speed,
156 0, CTL_HW, sbp_node_num, CTL_CREATE, CTL_EOL)) != 0)
157 goto err;
158
159 /* sbp exclusive login flag */
160 if ((rc = sysctl_createv(clog, 0, NULL, &node,
161 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT,
162 "exclusive_login", SYSCTL_DESCR("SBP enable exclusive login"),
163 NULL, 0, &ex_login,
164 0, CTL_HW, sbp_node_num, CTL_CREATE, CTL_EOL)) != 0)
165 goto err;
166
167 /* sbp login delay */
168 if ((rc = sysctl_createv(clog, 0, NULL, &node,
169 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT,
170 "login_delay", SYSCTL_DESCR("SBP login delay in msec"),
171 NULL, 0, &login_delay,
172 0, CTL_HW, sbp_node_num, CTL_CREATE, CTL_EOL)) != 0)
173 goto err;
174
175 /* sbp scan delay */
176 if ((rc = sysctl_createv(clog, 0, NULL, &node,
177 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT,
178 "scan_delay", SYSCTL_DESCR("SBP scan delay in msec"),
179 NULL, 0, &scan_delay,
180 0, CTL_HW, sbp_node_num, CTL_CREATE, CTL_EOL)) != 0)
181 goto err;
182
183 /* sbp use doorbell flag */
184 if ((rc = sysctl_createv(clog, 0, NULL, &node,
185 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT,
186 "use_doorbell", SYSCTL_DESCR("SBP use doorbell request"),
187 NULL, 0, &use_doorbell,
188 0, CTL_HW, sbp_node_num, CTL_CREATE, CTL_EOL)) != 0)
189 goto err;
190
191 /* sbp force tagged queuing */
192 if ((rc = sysctl_createv(clog, 0, NULL, &node,
193 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT,
194 "tags", SYSCTL_DESCR("SBP tagged queuing support"),
195 sysctl_sbp_verify_tags, 0, &sbp_tags,
196 0, CTL_HW, sbp_node_num, CTL_CREATE, CTL_EOL)) != 0)
197 goto err;
198
199 /* sbp driver debug flag */
200 if ((rc = sysctl_createv(clog, 0, NULL, &node,
201 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT,
202 "sbp_debug", SYSCTL_DESCR("SBP debug flag"),
203 NULL, 0, &debug,
204 0, CTL_HW, sbp_node_num, CTL_CREATE, CTL_EOL)) != 0)
205 goto err;
206
207 return;
208
209 err:
210 aprint_error("%s: sysctl_createv failed (rc = %d)\n", __func__, rc);
211 }
212
213 static int
214 sysctl_sbp_verify(SYSCTLFN_ARGS, int lower, int upper)
215 {
216 int error, t;
217 struct sysctlnode node;
218
219 node = *rnode;
220 t = *(int*)rnode->sysctl_data;
221 node.sysctl_data = &t;
222 error = sysctl_lookup(SYSCTLFN_CALL(&node));
223 if (error || newp == NULL)
224 return error;
225
226 if (t < lower || t > upper)
227 return EINVAL;
228
229 *(int*)rnode->sysctl_data = t;
230
231 return 0;
232 }
233
234 static int
235 sysctl_sbp_verify_max_speed(SYSCTLFN_ARGS)
236 {
237
238 return sysctl_sbp_verify(SYSCTLFN_CALL(rnode), 0, FWSPD_S400);
239 }
240
241 static int
242 sysctl_sbp_verify_tags(SYSCTLFN_ARGS)
243 {
244
245 return sysctl_sbp_verify(SYSCTLFN_CALL(rnode), -1, 1);
246 }
247
248 #define NEED_RESPONSE 0
249
250 #define SBP_SEG_MAX rounddown(0xffff, PAGE_SIZE)
251 #ifdef __sparc64__ /* iommu */
252 #define SBP_IND_MAX howmany(SBP_MAXPHYS, SBP_SEG_MAX)
253 #else
254 #define SBP_IND_MAX howmany(SBP_MAXPHYS, PAGE_SIZE)
255 #endif
256 struct sbp_ocb {
257 uint32_t orb[8];
258 #define IND_PTR_OFFSET (sizeof(uint32_t) * 8)
259 struct ind_ptr ind_ptr[SBP_IND_MAX];
260 struct scsipi_xfer *xs;
261 struct sbp_dev *sdev;
262 uint16_t index;
263 uint16_t flags; /* XXX should be removed */
264 bus_dmamap_t dmamap;
265 bus_addr_t bus_addr;
266 STAILQ_ENTRY(sbp_ocb) ocb;
267 };
268
269 #define SBP_ORB_DMA_SYNC(dma, i, op) \
270 bus_dmamap_sync((dma).dma_tag, (dma).dma_map, \
271 sizeof(struct sbp_ocb) * (i), \
272 sizeof(ocb->orb) + sizeof(ocb->ind_ptr), (op));
273
274 #define OCB_ACT_MGM 0
275 #define OCB_ACT_CMD 1
276 #define OCB_MATCH(o,s) ((o)->bus_addr == ntohl((s)->orb_lo))
277
278 struct sbp_dev{
279 #define SBP_DEV_RESET 0 /* accept login */
280 #define SBP_DEV_LOGIN 1 /* to login */
281 #if 0
282 #define SBP_DEV_RECONN 2 /* to reconnect */
283 #endif
284 #define SBP_DEV_TOATTACH 3 /* to attach */
285 #define SBP_DEV_PROBE 4 /* scan lun */
286 #define SBP_DEV_ATTACHED 5 /* in operation */
287 #define SBP_DEV_DEAD 6 /* unavailable unit */
288 #define SBP_DEV_RETRY 7 /* unavailable unit */
289 uint8_t status:4,
290 timeout:4;
291 uint8_t type;
292 uint16_t lun_id;
293 uint16_t freeze;
294 #define ORB_LINK_DEAD (1 << 0)
295 #define VALID_LUN (1 << 1)
296 #define ORB_POINTER_ACTIVE (1 << 2)
297 #define ORB_POINTER_NEED (1 << 3)
298 #define ORB_DOORBELL_ACTIVE (1 << 4)
299 #define ORB_DOORBELL_NEED (1 << 5)
300 #define ORB_SHORTAGE (1 << 6)
301 uint16_t flags;
302 struct scsipi_periph *periph;
303 struct sbp_target *target;
304 struct fwdma_alloc dma;
305 struct sbp_login_res *login;
306 struct callout login_callout;
307 struct sbp_ocb *ocb;
308 STAILQ_HEAD(, sbp_ocb) ocbs;
309 STAILQ_HEAD(, sbp_ocb) free_ocbs;
310 struct sbp_ocb *last_ocb;
311 char vendor[32];
312 char product[32];
313 char revision[10];
314 char bustgtlun[32];
315 };
316
317 struct sbp_target {
318 int target_id;
319 int num_lun;
320 struct sbp_dev **luns;
321 struct sbp_softc *sbp;
322 struct fw_device *fwdev;
323 uint32_t mgm_hi, mgm_lo;
324 struct sbp_ocb *mgm_ocb_cur;
325 STAILQ_HEAD(, sbp_ocb) mgm_ocb_queue;
326 struct callout mgm_ocb_timeout;
327 STAILQ_HEAD(, fw_xfer) xferlist;
328 int n_xfer;
329 };
330
331 struct sbp_softc {
332 struct firewire_dev_comm sc_fd;
333 struct scsipi_adapter sc_adapter;
334 struct scsipi_channel sc_channel;
335 device_t sc_bus;
336 struct lwp *sc_lwp;
337 struct sbp_target sc_target;
338 struct fw_bind sc_fwb;
339 bus_dma_tag_t sc_dmat;
340 struct timeval sc_last_busreset;
341 int sc_flags;
342 kmutex_t sc_mtx;
343 kcondvar_t sc_cv;
344 };
345
346 MALLOC_DEFINE(M_SBP, "sbp", "SBP-II/IEEE1394");
347
348
349 static int sbpmatch(device_t, cfdata_t, void *);
350 static void sbpattach(device_t, device_t, void *);
351 static int sbpdetach(device_t, int);
352
353 static void sbp_scsipi_request(struct scsipi_channel *, scsipi_adapter_req_t,
354 void *);
355 static void sbp_minphys(struct buf *);
356
357 static void sbp_show_sdev_info(struct sbp_dev *);
358 static void sbp_alloc_lun(struct sbp_target *);
359 static struct sbp_target *sbp_alloc_target(struct sbp_softc *,
360 struct fw_device *);
361 static void sbp_probe_lun(struct sbp_dev *);
362 static void sbp_login_callout(void *);
363 static void sbp_login(struct sbp_dev *);
364 static void sbp_probe_target(void *);
365 static void sbp_post_busreset(void *);
366 static void sbp_post_explore(void *);
367 #if NEED_RESPONSE
368 static void sbp_loginres_callback(struct fw_xfer *);
369 #endif
370 static inline void sbp_xfer_free(struct fw_xfer *);
371 static void sbp_reset_start_callback(struct fw_xfer *);
372 static void sbp_reset_start(struct sbp_dev *);
373 static void sbp_mgm_callback(struct fw_xfer *);
374 static void sbp_scsipi_scan_target(void *);
375 static inline void sbp_scan_dev(struct sbp_dev *);
376 static void sbp_do_attach(struct fw_xfer *);
377 static void sbp_agent_reset_callback(struct fw_xfer *);
378 static void sbp_agent_reset(struct sbp_dev *);
379 static void sbp_busy_timeout_callback(struct fw_xfer *);
380 static void sbp_busy_timeout(struct sbp_dev *);
381 static void sbp_orb_pointer_callback(struct fw_xfer *);
382 static void sbp_orb_pointer(struct sbp_dev *, struct sbp_ocb *);
383 static void sbp_doorbell_callback(struct fw_xfer *);
384 static void sbp_doorbell(struct sbp_dev *);
385 static struct fw_xfer *sbp_write_cmd(struct sbp_dev *, int, int);
386 static void sbp_mgm_orb(struct sbp_dev *, int, struct sbp_ocb *);
387 static void sbp_print_scsi_cmd(struct sbp_ocb *);
388 static void sbp_scsi_status(struct sbp_status *, struct sbp_ocb *);
389 static void sbp_fix_inq_data(struct sbp_ocb *);
390 static void sbp_recv(struct fw_xfer *);
391 static int sbp_logout_all(struct sbp_softc *);
392 static void sbp_free_sdev(struct sbp_dev *);
393 static void sbp_free_target(struct sbp_target *);
394 static void sbp_scsipi_detach_sdev(struct sbp_dev *);
395 static void sbp_scsipi_detach_target(struct sbp_target *);
396 static void sbp_target_reset(struct sbp_dev *, int);
397 static void sbp_mgm_timeout(void *);
398 static void sbp_timeout(void *);
399 static void sbp_action1(struct sbp_softc *, struct scsipi_xfer *);
400 static void sbp_execute_ocb(struct sbp_ocb *, bus_dma_segment_t *, int);
401 static struct sbp_ocb *sbp_dequeue_ocb(struct sbp_dev *, struct sbp_status *);
402 static struct sbp_ocb *sbp_enqueue_ocb(struct sbp_dev *, struct sbp_ocb *);
403 static struct sbp_ocb *sbp_get_ocb(struct sbp_dev *);
404 static void sbp_free_ocb(struct sbp_dev *, struct sbp_ocb *);
405 static void sbp_abort_ocb(struct sbp_ocb *, int);
406 static void sbp_abort_all_ocbs(struct sbp_dev *, int);
407
408
409 static const char *orb_status0[] = {
410 /* 0 */ "No additional information to report",
411 /* 1 */ "Request type not supported",
412 /* 2 */ "Speed not supported",
413 /* 3 */ "Page size not supported",
414 /* 4 */ "Access denied",
415 /* 5 */ "Logical unit not supported",
416 /* 6 */ "Maximum payload too small",
417 /* 7 */ "Reserved for future standardization",
418 /* 8 */ "Resources unavailable",
419 /* 9 */ "Function rejected",
420 /* A */ "Login ID not recognized",
421 /* B */ "Dummy ORB completed",
422 /* C */ "Request aborted",
423 /* FF */ "Unspecified error"
424 #define MAX_ORB_STATUS0 0xd
425 };
426
427 static const char *orb_status1_object[] = {
428 /* 0 */ "Operation request block (ORB)",
429 /* 1 */ "Data buffer",
430 /* 2 */ "Page table",
431 /* 3 */ "Unable to specify"
432 };
433
434 static const char *orb_status1_serial_bus_error[] = {
435 /* 0 */ "Missing acknowledge",
436 /* 1 */ "Reserved; not to be used",
437 /* 2 */ "Time-out error",
438 /* 3 */ "Reserved; not to be used",
439 /* 4 */ "Busy retry limit exceeded(X)",
440 /* 5 */ "Busy retry limit exceeded(A)",
441 /* 6 */ "Busy retry limit exceeded(B)",
442 /* 7 */ "Reserved for future standardization",
443 /* 8 */ "Reserved for future standardization",
444 /* 9 */ "Reserved for future standardization",
445 /* A */ "Reserved for future standardization",
446 /* B */ "Tardy retry limit exceeded",
447 /* C */ "Conflict error",
448 /* D */ "Data error",
449 /* E */ "Type error",
450 /* F */ "Address error"
451 };
452
453
454 CFATTACH_DECL_NEW(sbp, sizeof(struct sbp_softc),
455 sbpmatch, sbpattach, sbpdetach, NULL);
456
457
458 int
459 sbpmatch(device_t parent, cfdata_t cf, void *aux)
460 {
461 struct fw_attach_args *fwa = aux;
462
463 if (strcmp(fwa->name, "sbp") == 0)
464 return 1;
465 return 0;
466 }
467
468 static void
469 sbpattach(device_t parent, device_t self, void *aux)
470 {
471 struct sbp_softc *sc = device_private(self);
472 struct fw_attach_args *fwa = (struct fw_attach_args *)aux;
473 struct firewire_comm *fc;
474 struct scsipi_adapter *sc_adapter = &sc->sc_adapter;
475 struct scsipi_channel *sc_channel = &sc->sc_channel;
476 struct sbp_target *target = &sc->sc_target;
477 int dv_unit;
478
479 aprint_naive("\n");
480 aprint_normal(": SBP-2/SCSI over IEEE1394\n");
481
482 sc->sc_fd.dev = self;
483
484 if (cold)
485 sbp_cold++;
486 sc->sc_fd.fc = fc = fwa->fc;
487 mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_VM);
488 cv_init(&sc->sc_cv, "sbp");
489
490 if (max_speed < 0)
491 max_speed = fc->speed;
492
493 sc->sc_dmat = fc->dmat;
494
495 sc->sc_target.fwdev = NULL;
496 sc->sc_target.luns = NULL;
497
498 if (sbp_alloc_target(sc, fwa->fwdev) == NULL)
499 return;
500
501 sc_adapter->adapt_dev = sc->sc_fd.dev;
502 sc_adapter->adapt_nchannels = 1;
503 sc_adapter->adapt_max_periph = 1;
504 sc_adapter->adapt_request = sbp_scsipi_request;
505 sc_adapter->adapt_minphys = sbp_minphys;
506 sc_adapter->adapt_openings = 8;
507
508 sc_channel->chan_adapter = sc_adapter;
509 sc_channel->chan_bustype = &scsi_bustype;
510 sc_channel->chan_defquirks = PQUIRK_ONLYBIG;
511 sc_channel->chan_channel = 0;
512 sc_channel->chan_flags = SCSIPI_CHAN_CANGROW | SCSIPI_CHAN_NOSETTLE;
513
514 sc_channel->chan_ntargets = 1;
515 sc_channel->chan_nluns = target->num_lun; /* We set nluns 0 now */
516 sc_channel->chan_id = 1;
517
518 sc->sc_bus = config_found(sc->sc_fd.dev, sc_channel, scsiprint);
519 if (sc->sc_bus == NULL) {
520 aprint_error_dev(self, "attach failed\n");
521 return;
522 }
523
524 /* We reserve 16 bit space (4 bytes X 64 unit X 256 luns) */
525 dv_unit = device_unit(sc->sc_fd.dev);
526 sc->sc_fwb.start = SBP_DEV2ADDR(dv_unit, 0);
527 sc->sc_fwb.end = SBP_DEV2ADDR(dv_unit, -1);
528 mutex_init(&sc->sc_fwb.fwb_mtx, MUTEX_DEFAULT, IPL_VM);
529 /* pre-allocate xfer */
530 STAILQ_INIT(&sc->sc_fwb.xferlist);
531 fw_xferlist_add(&sc->sc_fwb.xferlist, M_SBP,
532 /*send*/ 0, /*recv*/ SBP_RECV_LEN, SBP_NUM_OCB / 2,
533 fc, (void *)sc, sbp_recv);
534 fw_bindadd(fc, &sc->sc_fwb);
535
536 sc->sc_fd.post_busreset = sbp_post_busreset;
537 sc->sc_fd.post_explore = sbp_post_explore;
538
539 if (fc->status != FWBUSNOTREADY) {
540 sbp_post_busreset((void *)sc);
541 sbp_post_explore((void *)sc);
542 }
543 }
544
545 static int
546 sbpdetach(device_t self, int flags)
547 {
548 struct sbp_softc *sc = device_private(self);
549 struct firewire_comm *fc = sc->sc_fd.fc;
550
551 sbp_scsipi_detach_target(&sc->sc_target);
552
553 if (SBP_FWDEV_ALIVE(sc->sc_target.fwdev)) {
554 sbp_logout_all(sc);
555
556 /* XXX wait for logout completion */
557 mutex_enter(&sc->sc_mtx);
558 cv_timedwait_sig(&sc->sc_cv, &sc->sc_mtx, hz/2);
559 mutex_exit(&sc->sc_mtx);
560 }
561
562 sbp_free_target(&sc->sc_target);
563
564 fw_bindremove(fc, &sc->sc_fwb);
565 fw_xferlist_remove(&sc->sc_fwb.xferlist);
566 mutex_destroy(&sc->sc_fwb.fwb_mtx);
567
568 mutex_destroy(&sc->sc_mtx);
569
570 return 0;
571 }
572
573
574 static void
575 sbp_scsipi_request(struct scsipi_channel *channel, scsipi_adapter_req_t req,
576 void *arg)
577 {
578 struct sbp_softc *sc = device_private(channel->chan_adapter->adapt_dev);
579 struct scsipi_xfer *xs = arg;
580 int i;
581
582 SBP_DEBUG(1)
583 printf("Called sbp_scsipi_request\n");
584 END_DEBUG
585
586 switch (req) {
587 case ADAPTER_REQ_RUN_XFER:
588 SBP_DEBUG(1)
589 printf("Got req_run_xfer\n");
590 printf("xs control: 0x%08x, timeout: %d\n",
591 xs->xs_control, xs->timeout);
592 printf("opcode: 0x%02x\n", (int)xs->cmd->opcode);
593 for (i = 0; i < 15; i++)
594 printf("0x%02x ",(int)xs->cmd->bytes[i]);
595 printf("\n");
596 END_DEBUG
597 if (xs->xs_control & XS_CTL_RESET) {
598 SBP_DEBUG(1)
599 printf("XS_CTL_RESET not support\n");
600 END_DEBUG
601 break;
602 }
603 #define SBPSCSI_SBP2_MAX_CDB 12
604 if (xs->cmdlen > SBPSCSI_SBP2_MAX_CDB) {
605 SBP_DEBUG(0)
606 printf(
607 "sbp doesn't support cdb's larger than %d bytes\n",
608 SBPSCSI_SBP2_MAX_CDB);
609 END_DEBUG
610 xs->error = XS_DRIVER_STUFFUP;
611 scsipi_done(xs);
612 return;
613 }
614 sbp_action1(sc, xs);
615
616 break;
617 case ADAPTER_REQ_GROW_RESOURCES:
618 SBP_DEBUG(1)
619 printf("Got req_grow_resources\n");
620 END_DEBUG
621 break;
622 case ADAPTER_REQ_SET_XFER_MODE:
623 SBP_DEBUG(1)
624 printf("Got set xfer mode\n");
625 END_DEBUG
626 break;
627 default:
628 panic("Unknown request: %d\n", (int)req);
629 }
630 }
631
632 static void
633 sbp_minphys(struct buf *bp)
634 {
635
636 minphys(bp);
637 }
638
639
640 /*
641 * Display device characteristics on the console
642 */
643 static void
644 sbp_show_sdev_info(struct sbp_dev *sdev)
645 {
646 struct fw_device *fwdev = sdev->target->fwdev;
647 struct sbp_softc *sc = sdev->target->sbp;
648
649 aprint_normal_dev(sc->sc_fd.dev,
650 "ordered:%d type:%d EUI:%08x%08x node:%d speed:%d maxrec:%d\n",
651 (sdev->type & 0x40) >> 6,
652 (sdev->type & 0x1f),
653 fwdev->eui.hi,
654 fwdev->eui.lo,
655 fwdev->dst,
656 fwdev->speed,
657 fwdev->maxrec);
658 aprint_normal_dev(sc->sc_fd.dev, "%s '%s' '%s' '%s'\n",
659 sdev->bustgtlun, sdev->vendor, sdev->product, sdev->revision);
660 }
661
662 static void
663 sbp_alloc_lun(struct sbp_target *target)
664 {
665 struct crom_context cc;
666 struct csrreg *reg;
667 struct sbp_dev *sdev, **newluns;
668 struct sbp_softc *sc;
669 int maxlun, lun, i;
670
671 sc = target->sbp;
672 crom_init_context(&cc, target->fwdev->csrrom);
673 /* XXX shoud parse appropriate unit directories only */
674 maxlun = -1;
675 while (cc.depth >= 0) {
676 reg = crom_search_key(&cc, CROM_LUN);
677 if (reg == NULL)
678 break;
679 lun = reg->val & 0xffff;
680 SBP_DEBUG(0)
681 printf("target %d lun %d found\n", target->target_id, lun);
682 END_DEBUG
683 if (maxlun < lun)
684 maxlun = lun;
685 crom_next(&cc);
686 }
687 if (maxlun < 0)
688 aprint_normal_dev(sc->sc_fd.dev, "%d: no LUN found\n",
689 target->target_id);
690
691 maxlun++;
692 if (maxlun >= SBP_NUM_LUNS)
693 maxlun = SBP_NUM_LUNS;
694
695 /* Invalidiate stale devices */
696 for (lun = 0; lun < target->num_lun; lun++) {
697 sdev = target->luns[lun];
698 if (sdev == NULL)
699 continue;
700 sdev->flags &= ~VALID_LUN;
701 if (lun >= maxlun) {
702 /* lost device */
703 sbp_scsipi_detach_sdev(sdev);
704 sbp_free_sdev(sdev);
705 target->luns[lun] = NULL;
706 }
707 }
708
709 /* Reallocate */
710 if (maxlun != target->num_lun) {
711 const int sbp_dev_p_sz = sizeof(struct sbp_dev *);
712
713 newluns = kmem_alloc(sbp_dev_p_sz * maxlun, KM_NOSLEEP);
714 if (newluns != NULL)
715 memcpy(*newluns, target->luns,
716 sizeof(struct sbp_dev *) * target->num_lun);
717 else {
718 aprint_error_dev(sc->sc_fd.dev, "kmem alloc failed\n");
719 newluns = target->luns;
720 maxlun = target->num_lun;
721 }
722
723 /*
724 * We must zero the extended region for the case
725 * realloc() doesn't allocate new buffer.
726 */
727 if (maxlun > target->num_lun)
728 memset(&newluns[target->num_lun], 0,
729 sbp_dev_p_sz * (maxlun - target->num_lun));
730
731 target->luns = newluns;
732 target->num_lun = maxlun;
733 }
734
735 crom_init_context(&cc, target->fwdev->csrrom);
736 while (cc.depth >= 0) {
737 int new = 0;
738
739 reg = crom_search_key(&cc, CROM_LUN);
740 if (reg == NULL)
741 break;
742 lun = reg->val & 0xffff;
743 if (lun >= SBP_NUM_LUNS) {
744 aprint_error_dev(sc->sc_fd.dev, "too large lun %d\n",
745 lun);
746 goto next;
747 }
748
749 sdev = target->luns[lun];
750 if (sdev == NULL) {
751 sdev = kmem_zalloc(sizeof(struct sbp_dev), KM_NOSLEEP);
752 if (sdev == NULL) {
753 aprint_error_dev(sc->sc_fd.dev,
754 "kmem alloc failed\n");
755 goto next;
756 }
757 target->luns[lun] = sdev;
758 sdev->lun_id = lun;
759 sdev->target = target;
760 STAILQ_INIT(&sdev->ocbs);
761 callout_init(&sdev->login_callout, CALLOUT_MPSAFE);
762 callout_setfunc(&sdev->login_callout,
763 sbp_login_callout, sdev);
764 sdev->status = SBP_DEV_RESET;
765 new = 1;
766 snprintf(sdev->bustgtlun, 32, "%s:%d:%d",
767 device_xname(sc->sc_fd.dev),
768 sdev->target->target_id,
769 sdev->lun_id);
770 if (!sc->sc_lwp)
771 if (kthread_create(
772 PRI_NONE, KTHREAD_MPSAFE, NULL,
773 sbp_scsipi_scan_target, &sc->sc_target,
774 &sc->sc_lwp,
775 "sbp%d_attach", device_unit(sc->sc_fd.dev)))
776 aprint_error_dev(sc->sc_fd.dev,
777 "unable to create thread");
778 }
779 sdev->flags |= VALID_LUN;
780 sdev->type = (reg->val & 0xff0000) >> 16;
781
782 if (new == 0)
783 goto next;
784
785 fwdma_alloc_setup(sc->sc_fd.dev, sc->sc_dmat, SBP_DMA_SIZE,
786 &sdev->dma, sizeof(uint32_t), BUS_DMA_NOWAIT);
787 if (sdev->dma.v_addr == NULL) {
788 kmem_free(sdev, sizeof(struct sbp_dev));
789 target->luns[lun] = NULL;
790 goto next;
791 }
792 sdev->ocb = (struct sbp_ocb *)sdev->dma.v_addr;
793 sdev->login = (struct sbp_login_res *)&sdev->ocb[SBP_QUEUE_LEN];
794 memset((char *)sdev->ocb, 0,
795 sizeof(struct sbp_ocb) * SBP_QUEUE_LEN);
796
797 STAILQ_INIT(&sdev->free_ocbs);
798 for (i = 0; i < SBP_QUEUE_LEN; i++) {
799 struct sbp_ocb *ocb = &sdev->ocb[i];
800
801 ocb->index = i;
802 ocb->bus_addr =
803 sdev->dma.bus_addr + sizeof(struct sbp_ocb) * i;
804 if (bus_dmamap_create(sc->sc_dmat, 0x100000,
805 SBP_IND_MAX, SBP_SEG_MAX, 0, 0, &ocb->dmamap)) {
806 aprint_error_dev(sc->sc_fd.dev,
807 "cannot create dmamap %d\n", i);
808 /* XXX */
809 goto next;
810 }
811 sbp_free_ocb(sdev, ocb); /* into free queue */
812 }
813 next:
814 crom_next(&cc);
815 }
816
817 for (lun = 0; lun < target->num_lun; lun++) {
818 sdev = target->luns[lun];
819 if (sdev != NULL && (sdev->flags & VALID_LUN) == 0) {
820 sbp_scsipi_detach_sdev(sdev);
821 sbp_free_sdev(sdev);
822 target->luns[lun] = NULL;
823 }
824 }
825 }
826
827 static struct sbp_target *
828 sbp_alloc_target(struct sbp_softc *sc, struct fw_device *fwdev)
829 {
830 struct sbp_target *target;
831 struct crom_context cc;
832 struct csrreg *reg;
833
834 SBP_DEBUG(1)
835 printf("sbp_alloc_target\n");
836 END_DEBUG
837 /* new target */
838 target = &sc->sc_target;
839 target->sbp = sc;
840 target->fwdev = fwdev;
841 target->target_id = 0;
842 /* XXX we may want to reload mgm port after each bus reset */
843 /* XXX there might be multiple management agents */
844 crom_init_context(&cc, target->fwdev->csrrom);
845 reg = crom_search_key(&cc, CROM_MGM);
846 if (reg == NULL || reg->val == 0) {
847 aprint_error_dev(sc->sc_fd.dev, "NULL management address\n");
848 target->fwdev = NULL;
849 return NULL;
850 }
851 target->mgm_hi = 0xffff;
852 target->mgm_lo = 0xf0000000 | (reg->val << 2);
853 target->mgm_ocb_cur = NULL;
854 SBP_DEBUG(1)
855 printf("target: mgm_port: %x\n", target->mgm_lo);
856 END_DEBUG
857 STAILQ_INIT(&target->xferlist);
858 target->n_xfer = 0;
859 STAILQ_INIT(&target->mgm_ocb_queue);
860 callout_init(&target->mgm_ocb_timeout, CALLOUT_MPSAFE);
861
862 target->luns = NULL;
863 target->num_lun = 0;
864 return target;
865 }
866
867 static void
868 sbp_probe_lun(struct sbp_dev *sdev)
869 {
870 struct fw_device *fwdev;
871 struct crom_context c, *cc = &c;
872 struct csrreg *reg;
873
874 memset(sdev->vendor, 0, sizeof(sdev->vendor));
875 memset(sdev->product, 0, sizeof(sdev->product));
876
877 fwdev = sdev->target->fwdev;
878 crom_init_context(cc, fwdev->csrrom);
879 /* get vendor string */
880 crom_search_key(cc, CSRKEY_VENDOR);
881 crom_next(cc);
882 crom_parse_text(cc, sdev->vendor, sizeof(sdev->vendor));
883 /* skip to the unit directory for SBP-2 */
884 while ((reg = crom_search_key(cc, CSRKEY_VER)) != NULL) {
885 if (reg->val == CSRVAL_T10SBP2)
886 break;
887 crom_next(cc);
888 }
889 /* get firmware revision */
890 reg = crom_search_key(cc, CSRKEY_FIRM_VER);
891 if (reg != NULL)
892 snprintf(sdev->revision, sizeof(sdev->revision), "%06x",
893 reg->val);
894 /* get product string */
895 crom_search_key(cc, CSRKEY_MODEL);
896 crom_next(cc);
897 crom_parse_text(cc, sdev->product, sizeof(sdev->product));
898 }
899
900 static void
901 sbp_login_callout(void *arg)
902 {
903 struct sbp_dev *sdev = (struct sbp_dev *)arg;
904
905 sbp_mgm_orb(sdev, ORB_FUN_LGI, NULL);
906 }
907
908 static void
909 sbp_login(struct sbp_dev *sdev)
910 {
911 struct sbp_softc *sc = sdev->target->sbp;
912 struct timeval delta;
913 struct timeval t;
914 int ticks = 0;
915
916 microtime(&delta);
917 timersub(&delta, &sc->sc_last_busreset, &delta);
918 t.tv_sec = login_delay / 1000;
919 t.tv_usec = (login_delay % 1000) * 1000;
920 timersub(&t, &delta, &t);
921 if (t.tv_sec >= 0 && t.tv_usec > 0)
922 ticks = (t.tv_sec * 1000 + t.tv_usec / 1000) * hz / 1000;
923 SBP_DEBUG(0)
924 printf("%s: sec = %lld usec = %ld ticks = %d\n", __func__,
925 (long long)t.tv_sec, (long)t.tv_usec, ticks);
926 END_DEBUG
927 callout_schedule(&sdev->login_callout, ticks);
928 }
929
930 static void
931 sbp_probe_target(void *arg)
932 {
933 struct sbp_target *target = (struct sbp_target *)arg;
934 struct sbp_dev *sdev;
935 int i;
936
937 SBP_DEBUG(1)
938 printf("%s %d\n", __func__, target->target_id);
939 END_DEBUG
940
941 sbp_alloc_lun(target);
942
943 /* XXX untimeout mgm_ocb and dequeue */
944 for (i = 0; i < target->num_lun; i++) {
945 sdev = target->luns[i];
946 if (sdev == NULL || sdev->status == SBP_DEV_DEAD)
947 continue;
948
949 if (sdev->periph != NULL) {
950 scsipi_periph_freeze(sdev->periph, 1);
951 sdev->freeze++;
952 }
953 sbp_probe_lun(sdev);
954 sbp_show_sdev_info(sdev);
955
956 sbp_abort_all_ocbs(sdev, XS_RESET);
957 switch (sdev->status) {
958 case SBP_DEV_RESET:
959 /* new or revived target */
960 if (auto_login)
961 sbp_login(sdev);
962 break;
963 case SBP_DEV_TOATTACH:
964 case SBP_DEV_PROBE:
965 case SBP_DEV_ATTACHED:
966 case SBP_DEV_RETRY:
967 default:
968 sbp_mgm_orb(sdev, ORB_FUN_RCN, NULL);
969 break;
970 }
971 }
972 }
973
974 static void
975 sbp_post_busreset(void *arg)
976 {
977 struct sbp_softc *sc = (struct sbp_softc *)arg;
978 struct sbp_target *target = &sc->sc_target;
979 struct fw_device *fwdev = target->fwdev;
980 int alive;
981
982 alive = SBP_FWDEV_ALIVE(fwdev);
983 SBP_DEBUG(0)
984 printf("sbp_post_busreset\n");
985 if (!alive)
986 printf("not alive\n");
987 END_DEBUG
988 microtime(&sc->sc_last_busreset);
989
990 if (!alive)
991 return;
992
993 scsipi_channel_freeze(&sc->sc_channel, 1);
994 }
995
996 static void
997 sbp_post_explore(void *arg)
998 {
999 struct sbp_softc *sc = (struct sbp_softc *)arg;
1000 struct sbp_target *target = &sc->sc_target;
1001 struct fw_device *fwdev = target->fwdev;
1002 int alive;
1003
1004 alive = SBP_FWDEV_ALIVE(fwdev);
1005 SBP_DEBUG(0)
1006 printf("sbp_post_explore (sbp_cold=%d)\n", sbp_cold);
1007 if (!alive)
1008 printf("not alive\n");
1009 END_DEBUG
1010 if (!alive)
1011 return;
1012
1013 if (!firewire_phydma_enable)
1014 return;
1015
1016 if (sbp_cold > 0)
1017 sbp_cold--;
1018
1019 SBP_DEBUG(0)
1020 printf("sbp_post_explore: EUI:%08x%08x ", fwdev->eui.hi, fwdev->eui.lo);
1021 END_DEBUG
1022 sbp_probe_target((void *)target);
1023 if (target->num_lun == 0)
1024 sbp_free_target(target);
1025
1026 scsipi_channel_thaw(&sc->sc_channel, 1);
1027 }
1028
1029 #if NEED_RESPONSE
1030 static void
1031 sbp_loginres_callback(struct fw_xfer *xfer)
1032 {
1033 struct sbp_dev *sdev = (struct sbp_dev *)xfer->sc;
1034 struct sbp_softc *sc = sdev->target->sbp;
1035
1036 SBP_DEBUG(1)
1037 printf("sbp_loginres_callback\n");
1038 END_DEBUG
1039 /* recycle */
1040 mutex_enter(&sc->sc_fwb.fwb_mtx);
1041 STAILQ_INSERT_TAIL(&sc->sc_fwb.xferlist, xfer, link);
1042 mutex_exit(&sc->sc_fwb.fwb_mtx);
1043 return;
1044 }
1045 #endif
1046
1047 static inline void
1048 sbp_xfer_free(struct fw_xfer *xfer)
1049 {
1050 struct sbp_dev *sdev = (struct sbp_dev *)xfer->sc;
1051 struct sbp_softc *sc = sdev->target->sbp;
1052
1053 fw_xfer_unload(xfer);
1054 mutex_enter(&sc->sc_mtx);
1055 STAILQ_INSERT_TAIL(&sdev->target->xferlist, xfer, link);
1056 mutex_exit(&sc->sc_mtx);
1057 }
1058
1059 static void
1060 sbp_reset_start_callback(struct fw_xfer *xfer)
1061 {
1062 struct sbp_dev *tsdev, *sdev = (struct sbp_dev *)xfer->sc;
1063 struct sbp_target *target = sdev->target;
1064 int i;
1065
1066 if (xfer->resp != 0)
1067 aprint_error("%s: sbp_reset_start failed: resp=%d\n",
1068 sdev->bustgtlun, xfer->resp);
1069
1070 for (i = 0; i < target->num_lun; i++) {
1071 tsdev = target->luns[i];
1072 if (tsdev != NULL && tsdev->status == SBP_DEV_LOGIN)
1073 sbp_login(tsdev);
1074 }
1075 }
1076
1077 static void
1078 sbp_reset_start(struct sbp_dev *sdev)
1079 {
1080 struct fw_xfer *xfer;
1081 struct fw_pkt *fp;
1082
1083 SBP_DEBUG(0)
1084 printf("%s: sbp_reset_start: %s\n",
1085 device_xname(sdev->target->sbp->sc_fd.dev), sdev->bustgtlun);
1086 END_DEBUG
1087
1088 xfer = sbp_write_cmd(sdev, FWTCODE_WREQQ, 0);
1089 if (xfer == NULL)
1090 return;
1091 xfer->hand = sbp_reset_start_callback;
1092 fp = &xfer->send.hdr;
1093 fp->mode.wreqq.dest_hi = 0xffff;
1094 fp->mode.wreqq.dest_lo = 0xf0000000 | RESET_START;
1095 fp->mode.wreqq.data = htonl(0xf);
1096 if (fw_asyreq(xfer->fc, -1, xfer) != 0)
1097 sbp_xfer_free(xfer);
1098 }
1099
1100 static void
1101 sbp_mgm_callback(struct fw_xfer *xfer)
1102 {
1103 struct sbp_dev *sdev;
1104 int resp;
1105
1106 sdev = (struct sbp_dev *)xfer->sc;
1107
1108 SBP_DEBUG(1)
1109 printf("%s: sbp_mgm_callback: %s\n",
1110 device_xname(sdev->target->sbp->sc_fd.dev), sdev->bustgtlun);
1111 END_DEBUG
1112 resp = xfer->resp;
1113 sbp_xfer_free(xfer);
1114 return;
1115 }
1116
1117 static void
1118 sbp_scsipi_scan_target(void *arg)
1119 {
1120 struct sbp_target *target = (struct sbp_target *)arg;
1121 struct sbp_softc *sc = target->sbp;
1122 struct sbp_dev *sdev;
1123 struct scsipi_channel *chan = &sc->sc_channel;
1124 struct scsibus_softc *sc_bus = device_private(sc->sc_bus);
1125 int lun, yet;
1126
1127 do {
1128 mutex_enter(&sc->sc_mtx);
1129 cv_wait_sig(&sc->sc_cv, &sc->sc_mtx);
1130 mutex_exit(&sc->sc_mtx);
1131 yet = 0;
1132
1133 for (lun = 0; lun < target->num_lun; lun++) {
1134 sdev = target->luns[lun];
1135 if (sdev == NULL)
1136 continue;
1137 if (sdev->status != SBP_DEV_PROBE) {
1138 yet++;
1139 continue;
1140 }
1141
1142 if (sdev->periph == NULL) {
1143 if (chan->chan_nluns < target->num_lun)
1144 chan->chan_nluns = target->num_lun;
1145
1146 scsi_probe_bus(sc_bus, target->target_id,
1147 sdev->lun_id);
1148 sdev->periph = scsipi_lookup_periph(chan,
1149 target->target_id, lun);
1150 }
1151 sdev->status = SBP_DEV_ATTACHED;
1152 }
1153 } while (yet > 0);
1154
1155 sc->sc_lwp = NULL;
1156 kthread_exit(0);
1157
1158 /* NOTREACHED */
1159 }
1160
1161 static inline void
1162 sbp_scan_dev(struct sbp_dev *sdev)
1163 {
1164 struct sbp_softc *sc = sdev->target->sbp;
1165
1166 sdev->status = SBP_DEV_PROBE;
1167 mutex_enter(&sc->sc_mtx);
1168 cv_signal(&sdev->target->sbp->sc_cv);
1169 mutex_exit(&sc->sc_mtx);
1170 }
1171
1172
1173 static void
1174 sbp_do_attach(struct fw_xfer *xfer)
1175 {
1176 struct sbp_dev *sdev;
1177 struct sbp_target *target;
1178 struct sbp_softc *sc;
1179
1180 sdev = (struct sbp_dev *)xfer->sc;
1181 target = sdev->target;
1182 sc = target->sbp;
1183
1184 SBP_DEBUG(0)
1185 printf("%s:%s:%s\n", device_xname(sc->sc_fd.dev), __func__,
1186 sdev->bustgtlun);
1187 END_DEBUG
1188 sbp_xfer_free(xfer);
1189
1190 sbp_scan_dev(sdev);
1191 return;
1192 }
1193
1194 static void
1195 sbp_agent_reset_callback(struct fw_xfer *xfer)
1196 {
1197 struct sbp_dev *sdev = (struct sbp_dev *)xfer->sc;
1198 struct sbp_softc *sc = sdev->target->sbp;
1199
1200 SBP_DEBUG(1)
1201 printf("%s:%s:%s\n", device_xname(sc->sc_fd.dev), __func__,
1202 sdev->bustgtlun);
1203 END_DEBUG
1204 if (xfer->resp != 0)
1205 aprint_error_dev(sc->sc_fd.dev, "%s:%s: resp=%d\n", __func__,
1206 sdev->bustgtlun, xfer->resp);
1207
1208 sbp_xfer_free(xfer);
1209 if (sdev->periph != NULL) {
1210 scsipi_periph_thaw(sdev->periph, sdev->freeze);
1211 scsipi_channel_thaw(&sc->sc_channel, 0);
1212 sdev->freeze = 0;
1213 }
1214 }
1215
1216 static void
1217 sbp_agent_reset(struct sbp_dev *sdev)
1218 {
1219 struct fw_xfer *xfer;
1220 struct fw_pkt *fp;
1221
1222 SBP_DEBUG(0)
1223 printf("%s:%s:%s\n", device_xname(sdev->target->sbp->sc_fd.dev),
1224 __func__, sdev->bustgtlun);
1225 END_DEBUG
1226 xfer = sbp_write_cmd(sdev, FWTCODE_WREQQ, 0x04);
1227 if (xfer == NULL)
1228 return;
1229 if (sdev->status == SBP_DEV_ATTACHED || sdev->status == SBP_DEV_PROBE)
1230 xfer->hand = sbp_agent_reset_callback;
1231 else
1232 xfer->hand = sbp_do_attach;
1233 fp = &xfer->send.hdr;
1234 fp->mode.wreqq.data = htonl(0xf);
1235 if (fw_asyreq(xfer->fc, -1, xfer) != 0)
1236 sbp_xfer_free(xfer);
1237 sbp_abort_all_ocbs(sdev, XS_RESET);
1238 }
1239
1240 static void
1241 sbp_busy_timeout_callback(struct fw_xfer *xfer)
1242 {
1243 struct sbp_dev *sdev = (struct sbp_dev *)xfer->sc;
1244
1245 SBP_DEBUG(1)
1246 printf("%s:%s:%s\n", device_xname(sdev->target->sbp->sc_fd.dev),
1247 __func__, sdev->bustgtlun);
1248 END_DEBUG
1249 sbp_xfer_free(xfer);
1250 sbp_agent_reset(sdev);
1251 }
1252
1253 static void
1254 sbp_busy_timeout(struct sbp_dev *sdev)
1255 {
1256 struct fw_pkt *fp;
1257 struct fw_xfer *xfer;
1258
1259 SBP_DEBUG(0)
1260 printf("%s:%s:%s\n", device_xname(sdev->target->sbp->sc_fd.dev),
1261 __func__, sdev->bustgtlun);
1262 END_DEBUG
1263 xfer = sbp_write_cmd(sdev, FWTCODE_WREQQ, 0);
1264 if (xfer == NULL)
1265 return;
1266 xfer->hand = sbp_busy_timeout_callback;
1267 fp = &xfer->send.hdr;
1268 fp->mode.wreqq.dest_hi = 0xffff;
1269 fp->mode.wreqq.dest_lo = 0xf0000000 | BUSY_TIMEOUT;
1270 fp->mode.wreqq.data = htonl((1 << (13+12)) | 0xf);
1271 if (fw_asyreq(xfer->fc, -1, xfer) != 0)
1272 sbp_xfer_free(xfer);
1273 }
1274
1275 static void
1276 sbp_orb_pointer_callback(struct fw_xfer *xfer)
1277 {
1278 struct sbp_dev *sdev = (struct sbp_dev *)xfer->sc;
1279 struct sbp_softc *sc = sdev->target->sbp;
1280
1281 SBP_DEBUG(1)
1282 printf("%s:%s:%s\n", device_xname(sc->sc_fd.dev), __func__,
1283 sdev->bustgtlun);
1284 END_DEBUG
1285 if (xfer->resp != 0)
1286 aprint_error_dev(sc->sc_fd.dev, "%s:%s: xfer->resp = %d\n",
1287 __func__, sdev->bustgtlun, xfer->resp);
1288 sbp_xfer_free(xfer);
1289 sdev->flags &= ~ORB_POINTER_ACTIVE;
1290
1291 if ((sdev->flags & ORB_POINTER_NEED) != 0) {
1292 struct sbp_ocb *ocb;
1293
1294 sdev->flags &= ~ORB_POINTER_NEED;
1295 ocb = STAILQ_FIRST(&sdev->ocbs);
1296 if (ocb != NULL)
1297 sbp_orb_pointer(sdev, ocb);
1298 }
1299 return;
1300 }
1301
1302 static void
1303 sbp_orb_pointer(struct sbp_dev *sdev, struct sbp_ocb *ocb)
1304 {
1305 struct sbp_softc *sc = sdev->target->sbp;
1306 struct fw_xfer *xfer;
1307 struct fw_pkt *fp;
1308
1309 SBP_DEBUG(1)
1310 printf("%s:%s:%s: 0x%08x\n", device_xname(sc->sc_fd.dev), __func__,
1311 sdev->bustgtlun, (uint32_t)ocb->bus_addr);
1312 END_DEBUG
1313
1314 if ((sdev->flags & ORB_POINTER_ACTIVE) != 0) {
1315 SBP_DEBUG(0)
1316 printf("%s: orb pointer active\n", __func__);
1317 END_DEBUG
1318 sdev->flags |= ORB_POINTER_NEED;
1319 return;
1320 }
1321
1322 sdev->flags |= ORB_POINTER_ACTIVE;
1323 xfer = sbp_write_cmd(sdev, FWTCODE_WREQB, 0x08);
1324 if (xfer == NULL)
1325 return;
1326 xfer->hand = sbp_orb_pointer_callback;
1327
1328 fp = &xfer->send.hdr;
1329 fp->mode.wreqb.len = 8;
1330 fp->mode.wreqb.extcode = 0;
1331 xfer->send.payload[0] =
1332 htonl(((sc->sc_fd.fc->nodeid | FWLOCALBUS) << 16));
1333 xfer->send.payload[1] = htonl((uint32_t)ocb->bus_addr);
1334
1335 if (fw_asyreq(xfer->fc, -1, xfer) != 0) {
1336 sbp_xfer_free(xfer);
1337 ocb->xs->error = XS_DRIVER_STUFFUP;
1338 scsipi_done(ocb->xs);
1339 }
1340 }
1341
1342 static void
1343 sbp_doorbell_callback(struct fw_xfer *xfer)
1344 {
1345 struct sbp_dev *sdev = (struct sbp_dev *)xfer->sc;
1346 struct sbp_softc *sc = sdev->target->sbp;
1347
1348 SBP_DEBUG(1)
1349 printf("%s:%s:%s\n", device_xname(sc->sc_fd.dev), __func__,
1350 sdev->bustgtlun);
1351 END_DEBUG
1352 if (xfer->resp != 0) {
1353 aprint_error_dev(sc->sc_fd.dev, "%s: xfer->resp = %d\n",
1354 __func__, xfer->resp);
1355 }
1356 sbp_xfer_free(xfer);
1357 sdev->flags &= ~ORB_DOORBELL_ACTIVE;
1358 if ((sdev->flags & ORB_DOORBELL_NEED) != 0) {
1359 sdev->flags &= ~ORB_DOORBELL_NEED;
1360 sbp_doorbell(sdev);
1361 }
1362 return;
1363 }
1364
1365 static void
1366 sbp_doorbell(struct sbp_dev *sdev)
1367 {
1368 struct fw_xfer *xfer;
1369 struct fw_pkt *fp;
1370
1371 SBP_DEBUG(1)
1372 printf("%s:%s:%s\n", device_xname(sdev->target->sbp->sc_fd.dev),
1373 __func__, sdev->bustgtlun);
1374 END_DEBUG
1375
1376 if ((sdev->flags & ORB_DOORBELL_ACTIVE) != 0) {
1377 sdev->flags |= ORB_DOORBELL_NEED;
1378 return;
1379 }
1380 sdev->flags |= ORB_DOORBELL_ACTIVE;
1381 xfer = sbp_write_cmd(sdev, FWTCODE_WREQQ, 0x10);
1382 if (xfer == NULL)
1383 return;
1384 xfer->hand = sbp_doorbell_callback;
1385 fp = &xfer->send.hdr;
1386 fp->mode.wreqq.data = htonl(0xf);
1387 if (fw_asyreq(xfer->fc, -1, xfer) != 0)
1388 sbp_xfer_free(xfer);
1389 }
1390
1391 static struct fw_xfer *
1392 sbp_write_cmd(struct sbp_dev *sdev, int tcode, int offset)
1393 {
1394 struct sbp_softc *sc;
1395 struct fw_xfer *xfer;
1396 struct fw_pkt *fp;
1397 struct sbp_target *target;
1398 int new = 0;
1399
1400 target = sdev->target;
1401 sc = target->sbp;
1402 mutex_enter(&sc->sc_mtx);
1403 xfer = STAILQ_FIRST(&target->xferlist);
1404 if (xfer == NULL) {
1405 if (target->n_xfer > 5 /* XXX */) {
1406 aprint_error_dev(sc->sc_fd.dev,
1407 "no more xfer for this target\n");
1408 mutex_exit(&sc->sc_mtx);
1409 return NULL;
1410 }
1411 xfer = fw_xfer_alloc_buf(M_SBP, 8, 0);
1412 if (xfer == NULL) {
1413 aprint_error_dev(sc->sc_fd.dev,
1414 "fw_xfer_alloc_buf failed\n");
1415 mutex_exit(&sc->sc_mtx);
1416 return NULL;
1417 }
1418 target->n_xfer++;
1419 SBP_DEBUG(0)
1420 printf("sbp: alloc %d xfer\n", target->n_xfer);
1421 END_DEBUG
1422 new = 1;
1423 } else
1424 STAILQ_REMOVE_HEAD(&target->xferlist, link);
1425 mutex_exit(&sc->sc_mtx);
1426
1427 microtime(&xfer->tv);
1428
1429 if (new) {
1430 xfer->recv.pay_len = 0;
1431 xfer->send.spd = min(target->fwdev->speed, max_speed);
1432 xfer->fc = target->sbp->sc_fd.fc;
1433 }
1434
1435 if (tcode == FWTCODE_WREQB)
1436 xfer->send.pay_len = 8;
1437 else
1438 xfer->send.pay_len = 0;
1439
1440 xfer->sc = (void *)sdev;
1441 fp = &xfer->send.hdr;
1442 fp->mode.wreqq.dest_hi = sdev->login->cmd_hi;
1443 fp->mode.wreqq.dest_lo = sdev->login->cmd_lo + offset;
1444 fp->mode.wreqq.tlrt = 0;
1445 fp->mode.wreqq.tcode = tcode;
1446 fp->mode.wreqq.pri = 0;
1447 fp->mode.wreqq.dst = FWLOCALBUS | target->fwdev->dst;
1448
1449 return xfer;
1450 }
1451
1452 static void
1453 sbp_mgm_orb(struct sbp_dev *sdev, int func, struct sbp_ocb *aocb)
1454 {
1455 struct fw_xfer *xfer;
1456 struct fw_pkt *fp;
1457 struct sbp_ocb *ocb;
1458 struct sbp_target *target;
1459 int nid, dv_unit;
1460
1461 target = sdev->target;
1462 nid = target->sbp->sc_fd.fc->nodeid | FWLOCALBUS;
1463 dv_unit = device_unit(target->sbp->sc_fd.dev);
1464
1465 mutex_enter(&target->sbp->sc_mtx);
1466 if (func == ORB_FUN_RUNQUEUE) {
1467 ocb = STAILQ_FIRST(&target->mgm_ocb_queue);
1468 if (target->mgm_ocb_cur != NULL || ocb == NULL) {
1469 mutex_exit(&target->sbp->sc_mtx);
1470 return;
1471 }
1472 STAILQ_REMOVE_HEAD(&target->mgm_ocb_queue, ocb);
1473 mutex_exit(&target->sbp->sc_mtx);
1474 goto start;
1475 }
1476 if ((ocb = sbp_get_ocb(sdev)) == NULL) {
1477 mutex_exit(&target->sbp->sc_mtx);
1478 /* XXX */
1479 return;
1480 }
1481 mutex_exit(&target->sbp->sc_mtx);
1482 ocb->flags = OCB_ACT_MGM;
1483 ocb->sdev = sdev;
1484
1485 memset(ocb->orb, 0, sizeof(ocb->orb));
1486 ocb->orb[6] = htonl((nid << 16) | SBP_BIND_HI);
1487 ocb->orb[7] = htonl(SBP_DEV2ADDR(dv_unit, sdev->lun_id));
1488
1489 SBP_DEBUG(0)
1490 printf("%s:%s:%s: %s\n", device_xname(sdev->target->sbp->sc_fd.dev),
1491 __func__, sdev->bustgtlun, orb_fun_name[(func>>16)&0xf]);
1492 END_DEBUG
1493 switch (func) {
1494 case ORB_FUN_LGI:
1495 {
1496 const off_t sbp_login_off =
1497 sizeof(struct sbp_ocb) * SBP_QUEUE_LEN;
1498
1499 ocb->orb[0] = ocb->orb[1] = 0; /* password */
1500 ocb->orb[2] = htonl(nid << 16);
1501 ocb->orb[3] = htonl(sdev->dma.bus_addr + sbp_login_off);
1502 ocb->orb[4] = htonl(ORB_NOTIFY | sdev->lun_id);
1503 if (ex_login)
1504 ocb->orb[4] |= htonl(ORB_EXV);
1505 ocb->orb[5] = htonl(SBP_LOGIN_SIZE);
1506 bus_dmamap_sync(sdev->dma.dma_tag, sdev->dma.dma_map,
1507 sbp_login_off, SBP_LOGIN_SIZE, BUS_DMASYNC_PREREAD);
1508 break;
1509 }
1510
1511 case ORB_FUN_ATA:
1512 ocb->orb[0] = htonl((0 << 16) | 0);
1513 ocb->orb[1] = htonl(aocb->bus_addr & 0xffffffff);
1514 /* fall through */
1515 case ORB_FUN_RCN:
1516 case ORB_FUN_LGO:
1517 case ORB_FUN_LUR:
1518 case ORB_FUN_RST:
1519 case ORB_FUN_ATS:
1520 ocb->orb[4] = htonl(ORB_NOTIFY | func | sdev->login->id);
1521 break;
1522 }
1523
1524 if (target->mgm_ocb_cur != NULL) {
1525 /* there is a standing ORB */
1526 mutex_enter(&target->sbp->sc_mtx);
1527 STAILQ_INSERT_TAIL(&sdev->target->mgm_ocb_queue, ocb, ocb);
1528 mutex_exit(&target->sbp->sc_mtx);
1529 return;
1530 }
1531 start:
1532 target->mgm_ocb_cur = ocb;
1533
1534 callout_reset(&target->mgm_ocb_timeout, 5 * hz, sbp_mgm_timeout, ocb);
1535 xfer = sbp_write_cmd(sdev, FWTCODE_WREQB, 0);
1536 if (xfer == NULL)
1537 return;
1538 xfer->hand = sbp_mgm_callback;
1539
1540 fp = &xfer->send.hdr;
1541 fp->mode.wreqb.dest_hi = sdev->target->mgm_hi;
1542 fp->mode.wreqb.dest_lo = sdev->target->mgm_lo;
1543 fp->mode.wreqb.len = 8;
1544 fp->mode.wreqb.extcode = 0;
1545 xfer->send.payload[0] = htonl(nid << 16);
1546 xfer->send.payload[1] = htonl(ocb->bus_addr & 0xffffffff);
1547
1548 /* cache writeback & invalidate(required ORB_FUN_LGI func) */
1549 /* when abort_ocb, should sync POST ope ? */
1550 SBP_ORB_DMA_SYNC(sdev->dma, ocb->index, BUS_DMASYNC_PREWRITE);
1551 if (fw_asyreq(xfer->fc, -1, xfer) != 0)
1552 sbp_xfer_free(xfer);
1553 }
1554
1555 static void
1556 sbp_print_scsi_cmd(struct sbp_ocb *ocb)
1557 {
1558 struct scsipi_xfer *xs = ocb->xs;
1559
1560 printf("%s:%d:%d:"
1561 " cmd: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x,"
1562 " flags: 0x%02x, %db cmd/%db data\n",
1563 device_xname(ocb->sdev->target->sbp->sc_fd.dev),
1564 xs->xs_periph->periph_target,
1565 xs->xs_periph->periph_lun,
1566 xs->cmd->opcode,
1567 xs->cmd->bytes[0], xs->cmd->bytes[1],
1568 xs->cmd->bytes[2], xs->cmd->bytes[3],
1569 xs->cmd->bytes[4], xs->cmd->bytes[5],
1570 xs->cmd->bytes[6], xs->cmd->bytes[7],
1571 xs->cmd->bytes[8],
1572 xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT),
1573 xs->cmdlen, xs->datalen);
1574 }
1575
1576 static void
1577 sbp_scsi_status(struct sbp_status *sbp_status, struct sbp_ocb *ocb)
1578 {
1579 struct sbp_cmd_status *sbp_cmd_status;
1580 struct scsi_sense_data *sense = &ocb->xs->sense.scsi_sense;
1581
1582 sbp_cmd_status = (struct sbp_cmd_status *)sbp_status->data;
1583
1584 SBP_DEBUG(0)
1585 sbp_print_scsi_cmd(ocb);
1586 /* XXX need decode status */
1587 printf("%s:"
1588 " SCSI status %x sfmt %x valid %x key %x code %x qlfr %x len %d\n",
1589 ocb->sdev->bustgtlun,
1590 sbp_cmd_status->status,
1591 sbp_cmd_status->sfmt,
1592 sbp_cmd_status->valid,
1593 sbp_cmd_status->s_key,
1594 sbp_cmd_status->s_code,
1595 sbp_cmd_status->s_qlfr,
1596 sbp_status->len);
1597 END_DEBUG
1598
1599 switch (sbp_cmd_status->status) {
1600 case SCSI_CHECK:
1601 case SCSI_BUSY:
1602 case SCSI_TERMINATED:
1603 if (sbp_cmd_status->sfmt == SBP_SFMT_CURR)
1604 sense->response_code = SSD_RCODE_CURRENT;
1605 else
1606 sense->response_code = SSD_RCODE_DEFERRED;
1607 if (sbp_cmd_status->valid)
1608 sense->response_code |= SSD_RCODE_VALID;
1609 sense->flags = sbp_cmd_status->s_key;
1610 if (sbp_cmd_status->mark)
1611 sense->flags |= SSD_FILEMARK;
1612 if (sbp_cmd_status->eom)
1613 sense->flags |= SSD_EOM;
1614 if (sbp_cmd_status->ill_len)
1615 sense->flags |= SSD_ILI;
1616
1617 memcpy(sense->info, &sbp_cmd_status->info, 4);
1618
1619 if (sbp_status->len <= 1)
1620 /* XXX not scsi status. shouldn't be happened */
1621 sense->extra_len = 0;
1622 else if (sbp_status->len <= 4)
1623 /* add_sense_code(_qual), info, cmd_spec_info */
1624 sense->extra_len = 6;
1625 else
1626 /* fru, sense_key_spec */
1627 sense->extra_len = 10;
1628
1629 memcpy(sense->csi, &sbp_cmd_status->cdb, 4);
1630
1631 sense->asc = sbp_cmd_status->s_code;
1632 sense->ascq = sbp_cmd_status->s_qlfr;
1633 sense->fru = sbp_cmd_status->fru;
1634
1635 memcpy(sense->sks.sks_bytes, sbp_cmd_status->s_keydep, 3);
1636 ocb->xs->error = XS_SENSE;
1637 ocb->xs->xs_status = sbp_cmd_status->status;
1638 /*
1639 {
1640 uint8_t j, *tmp;
1641 tmp = sense;
1642 for (j = 0; j < 32; j += 8)
1643 aprint_normal(
1644 "sense %02x%02x %02x%02x %02x%02x %02x%02x\n",
1645 tmp[j], tmp[j+1], tmp[j+2], tmp[j+3],
1646 tmp[j+4], tmp[j+5], tmp[j+6], tmp[j+7]);
1647
1648 }
1649 */
1650 break;
1651 default:
1652 aprint_error_dev(ocb->sdev->target->sbp->sc_fd.dev,
1653 "%s:%s: unknown scsi status 0x%x\n",
1654 __func__, ocb->sdev->bustgtlun, sbp_cmd_status->status);
1655 }
1656 }
1657
1658 static void
1659 sbp_fix_inq_data(struct sbp_ocb *ocb)
1660 {
1661 struct scsipi_xfer *xs = ocb->xs;
1662 struct sbp_dev *sdev;
1663 struct scsipi_inquiry_data *inq =
1664 (struct scsipi_inquiry_data *)xs->data;
1665
1666 sdev = ocb->sdev;
1667
1668 #if 0
1669 /*
1670 * NetBSD is assuming always 0 for EVPD-bit and 'Page Code'.
1671 */
1672 #define SI_EVPD 0x01
1673 if (xs->cmd->bytes[0] & SI_EVPD)
1674 return;
1675 #endif
1676 SBP_DEBUG(1)
1677 printf("%s:%s:%s\n", device_xname(sdev->target->sbp->sc_fd.dev),
1678 __func__, sdev->bustgtlun);
1679 END_DEBUG
1680 switch (inq->device & SID_TYPE) {
1681 case T_DIRECT:
1682 #if 0
1683 /*
1684 * XXX Convert Direct Access device to RBC.
1685 * I've never seen FireWire DA devices which support READ_6.
1686 */
1687 if ((inq->device & SID_TYPE) == T_DIRECT)
1688 inq->device |= T_SIMPLE_DIRECT; /* T_DIRECT == 0 */
1689 #endif
1690 /* FALLTHROUGH */
1691
1692 case T_SIMPLE_DIRECT:
1693 /*
1694 * Override vendor/product/revision information.
1695 * Some devices sometimes return strange strings.
1696 */
1697 #if 1
1698 memcpy(inq->vendor, sdev->vendor, sizeof(inq->vendor));
1699 memcpy(inq->product, sdev->product, sizeof(inq->product));
1700 memcpy(inq->revision + 2, sdev->revision,
1701 sizeof(inq->revision));
1702 #endif
1703 break;
1704 }
1705 /*
1706 * Force to enable/disable tagged queuing.
1707 * XXX CAM also checks SCP_QUEUE_DQUE flag in the control mode page.
1708 */
1709 if (sbp_tags > 0)
1710 inq->flags3 |= SID_CmdQue;
1711 else if (sbp_tags < 0)
1712 inq->flags3 &= ~SID_CmdQue;
1713
1714 }
1715
1716 static void
1717 sbp_recv(struct fw_xfer *xfer)
1718 {
1719 struct fw_pkt *rfp;
1720 #if NEED_RESPONSE
1721 struct fw_pkt *sfp;
1722 #endif
1723 struct sbp_softc *sc;
1724 struct sbp_dev *sdev;
1725 struct sbp_ocb *ocb;
1726 struct sbp_login_res *login_res = NULL;
1727 struct sbp_status *sbp_status;
1728 struct sbp_target *target;
1729 int orb_fun, status_valid0, status_valid, l, reset_agent = 0;
1730 uint32_t addr;
1731 /*
1732 uint32_t *ld;
1733 ld = xfer->recv.buf;
1734 printf("sbp %x %d %d %08x %08x %08x %08x\n",
1735 xfer->resp, xfer->recv.len, xfer->recv.off, ntohl(ld[0]), ntohl(ld[1]), ntohl(ld[2]), ntohl(ld[3]));
1736 printf("sbp %08x %08x %08x %08x\n", ntohl(ld[4]), ntohl(ld[5]), ntohl(ld[6]), ntohl(ld[7]));
1737 printf("sbp %08x %08x %08x %08x\n", ntohl(ld[8]), ntohl(ld[9]), ntohl(ld[10]), ntohl(ld[11]));
1738 */
1739
1740 sc = (struct sbp_softc *)xfer->sc;
1741 if (xfer->resp != 0) {
1742 aprint_error_dev(sc->sc_fd.dev,
1743 "sbp_recv: xfer->resp = %d\n", xfer->resp);
1744 goto done0;
1745 }
1746 if (xfer->recv.payload == NULL) {
1747 aprint_error_dev(sc->sc_fd.dev,
1748 "sbp_recv: xfer->recv.payload == NULL\n");
1749 goto done0;
1750 }
1751 rfp = &xfer->recv.hdr;
1752 if (rfp->mode.wreqb.tcode != FWTCODE_WREQB) {
1753 aprint_error_dev(sc->sc_fd.dev,
1754 "sbp_recv: tcode = %d\n", rfp->mode.wreqb.tcode);
1755 goto done0;
1756 }
1757 sbp_status = (struct sbp_status *)xfer->recv.payload;
1758 addr = rfp->mode.wreqb.dest_lo;
1759 SBP_DEBUG(2)
1760 printf("received address 0x%x\n", addr);
1761 END_DEBUG
1762 target = &sc->sc_target;
1763 l = SBP_ADDR2LUN(addr);
1764 if (l >= target->num_lun || target->luns[l] == NULL) {
1765 aprint_error_dev(sc->sc_fd.dev,
1766 "sbp_recv1: invalid lun %d (target=%d)\n",
1767 l, target->target_id);
1768 goto done0;
1769 }
1770 sdev = target->luns[l];
1771
1772 ocb = NULL;
1773 switch (sbp_status->src) {
1774 case SRC_NEXT_EXISTS:
1775 case SRC_NO_NEXT:
1776 /* check mgm_ocb_cur first */
1777 ocb = target->mgm_ocb_cur;
1778 if (ocb != NULL)
1779 if (OCB_MATCH(ocb, sbp_status)) {
1780 callout_stop(&target->mgm_ocb_timeout);
1781 target->mgm_ocb_cur = NULL;
1782 break;
1783 }
1784 ocb = sbp_dequeue_ocb(sdev, sbp_status);
1785 if (ocb == NULL)
1786 aprint_error_dev(sc->sc_fd.dev,
1787 "%s:%s: No ocb(%x) on the queue\n", __func__,
1788 sdev->bustgtlun, ntohl(sbp_status->orb_lo));
1789 break;
1790 case SRC_UNSOL:
1791 /* unsolicit */
1792 aprint_error_dev(sc->sc_fd.dev,
1793 "%s:%s: unsolicit status received\n",
1794 __func__, sdev->bustgtlun);
1795 break;
1796 default:
1797 aprint_error_dev(sc->sc_fd.dev,
1798 "%s:%s: unknown sbp_status->src\n",
1799 __func__, sdev->bustgtlun);
1800 }
1801
1802 status_valid0 = (sbp_status->src < 2
1803 && sbp_status->resp == SBP_REQ_CMP
1804 && sbp_status->dead == 0);
1805 status_valid = (status_valid0 && sbp_status->status == 0);
1806
1807 if (!status_valid0 || debug > 2) {
1808 int status;
1809 SBP_DEBUG(0)
1810 printf("%s:%s:%s: ORB status src:%x resp:%x dead:%x"
1811 " len:%x stat:%x orb:%x%08x\n",
1812 device_xname(sc->sc_fd.dev), __func__, sdev->bustgtlun,
1813 sbp_status->src, sbp_status->resp, sbp_status->dead,
1814 sbp_status->len, sbp_status->status,
1815 ntohs(sbp_status->orb_hi), ntohl(sbp_status->orb_lo));
1816 END_DEBUG
1817 printf("%s:%s\n", device_xname(sc->sc_fd.dev), sdev->bustgtlun);
1818 status = sbp_status->status;
1819 switch (sbp_status->resp) {
1820 case SBP_REQ_CMP:
1821 if (status > MAX_ORB_STATUS0)
1822 printf("%s\n", orb_status0[MAX_ORB_STATUS0]);
1823 else
1824 printf("%s\n", orb_status0[status]);
1825 break;
1826 case SBP_TRANS_FAIL:
1827 printf("Obj: %s, Error: %s\n",
1828 orb_status1_object[(status>>6) & 3],
1829 orb_status1_serial_bus_error[status & 0xf]);
1830 break;
1831 case SBP_ILLE_REQ:
1832 printf("Illegal request\n");
1833 break;
1834 case SBP_VEND_DEP:
1835 printf("Vendor dependent\n");
1836 break;
1837 default:
1838 printf("unknown respose code %d\n", sbp_status->resp);
1839 }
1840 }
1841
1842 /* we have to reset the fetch agent if it's dead */
1843 if (sbp_status->dead) {
1844 if (sdev->periph != NULL) {
1845 scsipi_periph_freeze(sdev->periph, 1);
1846 sdev->freeze++;
1847 }
1848 reset_agent = 1;
1849 }
1850
1851 if (ocb == NULL)
1852 goto done;
1853
1854 switch (ntohl(ocb->orb[4]) & ORB_FMT_MSK) {
1855 case ORB_FMT_NOP:
1856 break;
1857 case ORB_FMT_VED:
1858 break;
1859 case ORB_FMT_STD:
1860 switch (ocb->flags) {
1861 case OCB_ACT_MGM:
1862 orb_fun = ntohl(ocb->orb[4]) & ORB_FUN_MSK;
1863 reset_agent = 0;
1864 switch (orb_fun) {
1865 case ORB_FUN_LGI:
1866 {
1867 const struct fwdma_alloc *dma = &sdev->dma;
1868 const off_t sbp_login_off =
1869 sizeof(struct sbp_ocb) * SBP_QUEUE_LEN;
1870
1871 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
1872 sbp_login_off, SBP_LOGIN_SIZE,
1873 BUS_DMASYNC_POSTREAD);
1874 login_res = sdev->login;
1875 login_res->len = ntohs(login_res->len);
1876 login_res->id = ntohs(login_res->id);
1877 login_res->cmd_hi = ntohs(login_res->cmd_hi);
1878 login_res->cmd_lo = ntohl(login_res->cmd_lo);
1879 if (status_valid) {
1880 SBP_DEBUG(0)
1881 printf("%s:%s:%s: login:"
1882 " len %d, ID %d, cmd %08x%08x,"
1883 " recon_hold %d\n",
1884 device_xname(sc->sc_fd.dev),
1885 __func__, sdev->bustgtlun,
1886 login_res->len, login_res->id,
1887 login_res->cmd_hi,
1888 login_res->cmd_lo,
1889 ntohs(login_res->recon_hold));
1890 END_DEBUG
1891 sbp_busy_timeout(sdev);
1892 } else {
1893 /* forgot logout? */
1894 aprint_error_dev(sc->sc_fd.dev,
1895 "%s:%s: login failed\n",
1896 __func__, sdev->bustgtlun);
1897 sdev->status = SBP_DEV_RESET;
1898 }
1899 break;
1900 }
1901 case ORB_FUN_RCN:
1902 login_res = sdev->login;
1903 if (status_valid) {
1904 SBP_DEBUG(0)
1905 printf("%s:%s:%s: reconnect:"
1906 " len %d, ID %d, cmd %08x%08x\n",
1907 device_xname(sc->sc_fd.dev),
1908 __func__, sdev->bustgtlun,
1909 login_res->len, login_res->id,
1910 login_res->cmd_hi,
1911 login_res->cmd_lo);
1912 END_DEBUG
1913 sbp_agent_reset(sdev);
1914 } else {
1915 /* reconnection hold time exceed? */
1916 SBP_DEBUG(0)
1917 aprint_error_dev(sc->sc_fd.dev,
1918 "%s:%s: reconnect failed\n",
1919 __func__, sdev->bustgtlun);
1920 END_DEBUG
1921 sbp_login(sdev);
1922 }
1923 break;
1924 case ORB_FUN_LGO:
1925 sdev->status = SBP_DEV_RESET;
1926 break;
1927 case ORB_FUN_RST:
1928 sbp_busy_timeout(sdev);
1929 break;
1930 case ORB_FUN_LUR:
1931 case ORB_FUN_ATA:
1932 case ORB_FUN_ATS:
1933 sbp_agent_reset(sdev);
1934 break;
1935 default:
1936 aprint_error_dev(sc->sc_fd.dev,
1937 "%s:%s: unknown function %d\n",
1938 __func__, sdev->bustgtlun, orb_fun);
1939 break;
1940 }
1941 sbp_mgm_orb(sdev, ORB_FUN_RUNQUEUE, NULL);
1942 break;
1943 case OCB_ACT_CMD:
1944 sdev->timeout = 0;
1945 if (ocb->xs != NULL) {
1946 struct scsipi_xfer *xs = ocb->xs;
1947
1948 if (sbp_status->len > 1)
1949 sbp_scsi_status(sbp_status, ocb);
1950 else
1951 if (sbp_status->resp != SBP_REQ_CMP)
1952 xs->error = XS_DRIVER_STUFFUP;
1953 else {
1954 xs->error = XS_NOERROR;
1955 xs->resid = 0;
1956 }
1957 /* fix up inq data */
1958 if (xs->cmd->opcode == INQUIRY)
1959 sbp_fix_inq_data(ocb);
1960 scsipi_done(xs);
1961 }
1962 break;
1963 default:
1964 break;
1965 }
1966 }
1967
1968 if (!use_doorbell)
1969 sbp_free_ocb(sdev, ocb);
1970 done:
1971 if (reset_agent)
1972 sbp_agent_reset(sdev);
1973
1974 done0:
1975 xfer->recv.pay_len = SBP_RECV_LEN;
1976 /* The received packet is usually small enough to be stored within
1977 * the buffer. In that case, the controller return ack_complete and
1978 * no respose is necessary.
1979 *
1980 * XXX fwohci.c and firewire.c should inform event_code such as
1981 * ack_complete or ack_pending to upper driver.
1982 */
1983 #if NEED_RESPONSE
1984 xfer->send.off = 0;
1985 sfp = (struct fw_pkt *)xfer->send.buf;
1986 sfp->mode.wres.dst = rfp->mode.wreqb.src;
1987 xfer->dst = sfp->mode.wres.dst;
1988 xfer->spd = min(sdev->target->fwdev->speed, max_speed);
1989 xfer->hand = sbp_loginres_callback;
1990
1991 sfp->mode.wres.tlrt = rfp->mode.wreqb.tlrt;
1992 sfp->mode.wres.tcode = FWTCODE_WRES;
1993 sfp->mode.wres.rtcode = 0;
1994 sfp->mode.wres.pri = 0;
1995
1996 if (fw_asyreq(xfer->fc, -1, xfer) != 0) {
1997 aprint_error_dev(sc->sc_fd.dev, "mgm_orb failed\n");
1998 mutex_enter(&sc->sc_fwb.fwb_mtx);
1999 STAILQ_INSERT_TAIL(&sc->sc_fwb.xferlist, xfer, link);
2000 mutex_exit(&sc->sc_fwb.fwb_mtx);
2001 }
2002 #else
2003 /* recycle */
2004 mutex_enter(&sc->sc_fwb.fwb_mtx);
2005 STAILQ_INSERT_TAIL(&sc->sc_fwb.xferlist, xfer, link);
2006 mutex_exit(&sc->sc_fwb.fwb_mtx);
2007 #endif
2008
2009 return;
2010
2011 }
2012
2013 static int
2014 sbp_logout_all(struct sbp_softc *sbp)
2015 {
2016 struct sbp_target *target;
2017 struct sbp_dev *sdev;
2018 int i;
2019
2020 SBP_DEBUG(0)
2021 printf("sbp_logout_all\n");
2022 END_DEBUG
2023 target = &sbp->sc_target;
2024 if (target->luns != NULL)
2025 for (i = 0; i < target->num_lun; i++) {
2026 sdev = target->luns[i];
2027 if (sdev == NULL)
2028 continue;
2029 callout_stop(&sdev->login_callout);
2030 if (sdev->status >= SBP_DEV_TOATTACH &&
2031 sdev->status <= SBP_DEV_ATTACHED)
2032 sbp_mgm_orb(sdev, ORB_FUN_LGO, NULL);
2033 }
2034
2035 return 0;
2036 }
2037
2038 static void
2039 sbp_free_sdev(struct sbp_dev *sdev)
2040 {
2041 struct sbp_softc *sc = sdev->target->sbp;
2042 int i;
2043
2044 if (sdev == NULL)
2045 return;
2046 for (i = 0; i < SBP_QUEUE_LEN; i++)
2047 bus_dmamap_destroy(sc->sc_dmat, sdev->ocb[i].dmamap);
2048 fwdma_free(sdev->dma.dma_tag, sdev->dma.dma_map, sdev->dma.v_addr);
2049 kmem_free(sdev, sizeof(struct sbp_dev));
2050 sdev = NULL;
2051 }
2052
2053 static void
2054 sbp_free_target(struct sbp_target *target)
2055 {
2056 struct fw_xfer *xfer, *next;
2057 int i;
2058
2059 if (target->luns == NULL)
2060 return;
2061 callout_stop(&target->mgm_ocb_timeout);
2062 for (i = 0; i < target->num_lun; i++)
2063 sbp_free_sdev(target->luns[i]);
2064
2065 for (xfer = STAILQ_FIRST(&target->xferlist);
2066 xfer != NULL; xfer = next) {
2067 next = STAILQ_NEXT(xfer, link);
2068 fw_xfer_free_buf(xfer);
2069 }
2070 STAILQ_INIT(&target->xferlist);
2071 kmem_free(target->luns, sizeof(struct sbp_dev *) * target->num_lun);
2072 target->num_lun = 0;
2073 target->luns = NULL;
2074 target->fwdev = NULL;
2075 }
2076
2077 static void
2078 sbp_scsipi_detach_sdev(struct sbp_dev *sdev)
2079 {
2080 struct sbp_target *target;
2081 struct sbp_softc *sbp;
2082
2083 if (sdev == NULL)
2084 return;
2085
2086 target = sdev->target;
2087 if (target == NULL)
2088 return;
2089
2090 sbp = target->sbp;
2091
2092 if (sdev->status == SBP_DEV_DEAD)
2093 return;
2094 if (sdev->status == SBP_DEV_RESET)
2095 return;
2096 if (sdev->periph != NULL) {
2097 scsipi_periph_thaw(sdev->periph, sdev->freeze);
2098 scsipi_channel_thaw(&sbp->sc_channel, 0); /* XXXX */
2099 sdev->freeze = 0;
2100 if (scsipi_target_detach(&sbp->sc_channel,
2101 target->target_id, sdev->lun_id, DETACH_FORCE) != 0) {
2102 aprint_error_dev(sbp->sc_fd.dev, "detach failed\n");
2103 }
2104 sdev->periph = NULL;
2105 }
2106 sbp_abort_all_ocbs(sdev, XS_DRIVER_STUFFUP);
2107 }
2108
2109 static void
2110 sbp_scsipi_detach_target(struct sbp_target *target)
2111 {
2112 struct sbp_softc *sbp = target->sbp;
2113 int i;
2114
2115 if (target->luns != NULL) {
2116 SBP_DEBUG(0)
2117 printf("sbp_detach_target %d\n", target->target_id);
2118 END_DEBUG
2119 for (i = 0; i < target->num_lun; i++)
2120 sbp_scsipi_detach_sdev(target->luns[i]);
2121 if (config_detach(sbp->sc_bus, DETACH_FORCE) != 0)
2122 aprint_error_dev(sbp->sc_fd.dev, "%d detach failed\n",
2123 target->target_id);
2124 sbp->sc_bus = NULL;
2125 }
2126 }
2127
2128 static void
2129 sbp_target_reset(struct sbp_dev *sdev, int method)
2130 {
2131 struct sbp_softc *sc;
2132 struct sbp_target *target = sdev->target;
2133 struct sbp_dev *tsdev;
2134 int i;
2135
2136 sc = target->sbp;
2137 for (i = 0; i < target->num_lun; i++) {
2138 tsdev = target->luns[i];
2139 if (tsdev == NULL)
2140 continue;
2141 if (tsdev->status == SBP_DEV_DEAD)
2142 continue;
2143 if (tsdev->status == SBP_DEV_RESET)
2144 continue;
2145 if (sdev->periph != NULL) {
2146 scsipi_periph_freeze(tsdev->periph, 1);
2147 tsdev->freeze++;
2148 }
2149 sbp_abort_all_ocbs(tsdev, XS_TIMEOUT);
2150 if (method == 2)
2151 tsdev->status = SBP_DEV_LOGIN;
2152 }
2153 switch (method) {
2154 case 1:
2155 aprint_error("target reset\n");
2156 sbp_mgm_orb(sdev, ORB_FUN_RST, NULL);
2157 break;
2158 case 2:
2159 aprint_error("reset start\n");
2160 sbp_reset_start(sdev);
2161 break;
2162 }
2163 }
2164
2165 static void
2166 sbp_mgm_timeout(void *arg)
2167 {
2168 struct sbp_ocb *ocb = (struct sbp_ocb *)arg;
2169 struct sbp_dev *sdev = ocb->sdev;
2170 struct sbp_target *target = sdev->target;
2171
2172 aprint_error_dev(sdev->target->sbp->sc_fd.dev,
2173 "%s:%s: request timeout(mgm orb:0x%08x) ... ",
2174 __func__, sdev->bustgtlun, (uint32_t)ocb->bus_addr);
2175 target->mgm_ocb_cur = NULL;
2176 sbp_free_ocb(sdev, ocb);
2177 #if 0
2178 /* XXX */
2179 aprint_error("run next request\n");
2180 sbp_mgm_orb(sdev, ORB_FUN_RUNQUEUE, NULL);
2181 #endif
2182 aprint_error_dev(sdev->target->sbp->sc_fd.dev,
2183 "%s:%s: reset start\n", __func__, sdev->bustgtlun);
2184 sbp_reset_start(sdev);
2185 }
2186
2187 static void
2188 sbp_timeout(void *arg)
2189 {
2190 struct sbp_ocb *ocb = (struct sbp_ocb *)arg;
2191 struct sbp_dev *sdev = ocb->sdev;
2192
2193 aprint_error_dev(sdev->target->sbp->sc_fd.dev,
2194 "%s:%s: request timeout(cmd orb:0x%08x) ... ",
2195 __func__, sdev->bustgtlun, (uint32_t)ocb->bus_addr);
2196
2197 sdev->timeout++;
2198 switch (sdev->timeout) {
2199 case 1:
2200 aprint_error("agent reset\n");
2201 if (sdev->periph != NULL) {
2202 scsipi_periph_freeze(sdev->periph, 1);
2203 sdev->freeze++;
2204 }
2205 sbp_abort_all_ocbs(sdev, XS_TIMEOUT);
2206 sbp_agent_reset(sdev);
2207 break;
2208 case 2:
2209 case 3:
2210 sbp_target_reset(sdev, sdev->timeout - 1);
2211 break;
2212 default:
2213 aprint_error("\n");
2214 #if 0
2215 /* XXX give up */
2216 sbp_scsipi_detach_target(target);
2217 if (target->luns != NULL)
2218 kmem_free(target->luns,
2219 sizeof(struct sbp_dev *) * target->num_lun);
2220 target->num_lun = 0;
2221 target->luns = NULL;
2222 target->fwdev = NULL;
2223 #endif
2224 }
2225 }
2226
2227 static void
2228 sbp_action1(struct sbp_softc *sc, struct scsipi_xfer *xs)
2229 {
2230 struct sbp_target *target = &sc->sc_target;
2231 struct sbp_dev *sdev = NULL;
2232 struct sbp_ocb *ocb;
2233 int speed, flag, error;
2234 void *cdb;
2235
2236 /* target:lun -> sdev mapping */
2237 if (target->fwdev != NULL &&
2238 xs->xs_periph->periph_lun < target->num_lun) {
2239 sdev = target->luns[xs->xs_periph->periph_lun];
2240 if (sdev != NULL && sdev->status != SBP_DEV_ATTACHED &&
2241 sdev->status != SBP_DEV_PROBE)
2242 sdev = NULL;
2243 }
2244
2245 if (sdev == NULL) {
2246 SBP_DEBUG(1)
2247 printf("%s:%d:%d: Invalid target (target needed)\n",
2248 sc ? device_xname(sc->sc_fd.dev) : "???",
2249 xs->xs_periph->periph_target,
2250 xs->xs_periph->periph_lun);
2251 END_DEBUG
2252
2253 xs->error = XS_DRIVER_STUFFUP;
2254 scsipi_done(xs);
2255 return;
2256 }
2257
2258 SBP_DEBUG(2)
2259 printf("%s:%d:%d:"
2260 " cmd: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x,"
2261 " flags: 0x%02x, %db cmd/%db data\n",
2262 device_xname(sc->sc_fd.dev),
2263 xs->xs_periph->periph_target,
2264 xs->xs_periph->periph_lun,
2265 xs->cmd->opcode,
2266 xs->cmd->bytes[0], xs->cmd->bytes[1],
2267 xs->cmd->bytes[2], xs->cmd->bytes[3],
2268 xs->cmd->bytes[4], xs->cmd->bytes[5],
2269 xs->cmd->bytes[6], xs->cmd->bytes[7],
2270 xs->cmd->bytes[8],
2271 xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT),
2272 xs->cmdlen, xs->datalen);
2273 END_DEBUG
2274 mutex_enter(&sc->sc_mtx);
2275 ocb = sbp_get_ocb(sdev);
2276 mutex_exit(&sc->sc_mtx);
2277 if (ocb == NULL) {
2278 xs->error = XS_REQUEUE;
2279 if (sdev->freeze == 0) {
2280 scsipi_periph_freeze(sdev->periph, 1);
2281 sdev->freeze++;
2282 }
2283 scsipi_done(xs);
2284 return;
2285 }
2286
2287 ocb->flags = OCB_ACT_CMD;
2288 ocb->sdev = sdev;
2289 ocb->xs = xs;
2290 ocb->orb[0] = htonl(1 << 31);
2291 ocb->orb[1] = 0;
2292 ocb->orb[2] = htonl(((sc->sc_fd.fc->nodeid | FWLOCALBUS) << 16));
2293 ocb->orb[3] = htonl(ocb->bus_addr + IND_PTR_OFFSET);
2294 speed = min(target->fwdev->speed, max_speed);
2295 ocb->orb[4] =
2296 htonl(ORB_NOTIFY | ORB_CMD_SPD(speed) | ORB_CMD_MAXP(speed + 7));
2297 if ((xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) ==
2298 XS_CTL_DATA_IN) {
2299 ocb->orb[4] |= htonl(ORB_CMD_IN);
2300 flag = BUS_DMA_READ;
2301 } else
2302 flag = BUS_DMA_WRITE;
2303
2304 cdb = xs->cmd;
2305 memcpy((void *)&ocb->orb[5], cdb, xs->cmdlen);
2306 /*
2307 printf("ORB %08x %08x %08x %08x\n", ntohl(ocb->orb[0]), ntohl(ocb->orb[1]), ntohl(ocb->orb[2]), ntohl(ocb->orb[3]));
2308 printf("ORB %08x %08x %08x %08x\n", ntohl(ocb->orb[4]), ntohl(ocb->orb[5]), ntohl(ocb->orb[6]), ntohl(ocb->orb[7]));
2309 */
2310 if (xs->datalen > 0) {
2311 error = bus_dmamap_load(sc->sc_dmat, ocb->dmamap,
2312 xs->data, xs->datalen, NULL, BUS_DMA_NOWAIT | flag);
2313 if (error) {
2314 aprint_error_dev(sc->sc_fd.dev,
2315 "DMA map load error %d\n", error);
2316 xs->error = XS_DRIVER_STUFFUP;
2317 scsipi_done(xs);
2318 } else
2319 sbp_execute_ocb(ocb, ocb->dmamap->dm_segs,
2320 ocb->dmamap->dm_nsegs);
2321 } else
2322 sbp_execute_ocb(ocb, NULL, 0);
2323
2324 return;
2325 }
2326
2327 static void
2328 sbp_execute_ocb(struct sbp_ocb *ocb, bus_dma_segment_t *segments, int seg)
2329 {
2330 struct sbp_ocb *prev;
2331 bus_dma_segment_t *s;
2332 int i;
2333
2334 SBP_DEBUG(2)
2335 printf("sbp_execute_ocb: seg %d", seg);
2336 for (i = 0; i < seg; i++)
2337 printf(", %jx:%jd", (uintmax_t)segments[i].ds_addr,
2338 (uintmax_t)segments[i].ds_len);
2339 printf("\n");
2340 END_DEBUG
2341
2342 if (seg == 1) {
2343 /* direct pointer */
2344 s = segments;
2345 if (s->ds_len > SBP_SEG_MAX)
2346 panic("ds_len > SBP_SEG_MAX, fix busdma code");
2347 ocb->orb[3] = htonl(s->ds_addr);
2348 ocb->orb[4] |= htonl(s->ds_len);
2349 } else if (seg > 1) {
2350 /* page table */
2351 for (i = 0; i < seg; i++) {
2352 s = &segments[i];
2353 SBP_DEBUG(0)
2354 /* XXX LSI Logic "< 16 byte" bug might be hit */
2355 if (s->ds_len < 16)
2356 printf("sbp_execute_ocb: warning, "
2357 "segment length(%jd) is less than 16."
2358 "(seg=%d/%d)\n",
2359 (uintmax_t)s->ds_len, i + 1, seg);
2360 END_DEBUG
2361 if (s->ds_len > SBP_SEG_MAX)
2362 panic("ds_len > SBP_SEG_MAX, fix busdma code");
2363 ocb->ind_ptr[i].hi = htonl(s->ds_len << 16);
2364 ocb->ind_ptr[i].lo = htonl(s->ds_addr);
2365 }
2366 ocb->orb[4] |= htonl(ORB_CMD_PTBL | seg);
2367 }
2368
2369 if (seg > 0) {
2370 struct sbp_softc *sc = ocb->sdev->target->sbp;
2371 const int flag = (ntohl(ocb->orb[4]) & ORB_CMD_IN) ?
2372 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE;
2373
2374 bus_dmamap_sync(sc->sc_dmat, ocb->dmamap,
2375 0, ocb->dmamap->dm_mapsize, flag);
2376 }
2377 prev = sbp_enqueue_ocb(ocb->sdev, ocb);
2378 SBP_ORB_DMA_SYNC(ocb->sdev->dma, ocb->index, BUS_DMASYNC_PREWRITE);
2379 if (use_doorbell) {
2380 if (prev == NULL) {
2381 if (ocb->sdev->last_ocb != NULL)
2382 sbp_doorbell(ocb->sdev);
2383 else
2384 sbp_orb_pointer(ocb->sdev, ocb);
2385 }
2386 } else
2387 if (prev == NULL || (ocb->sdev->flags & ORB_LINK_DEAD) != 0) {
2388 ocb->sdev->flags &= ~ORB_LINK_DEAD;
2389 sbp_orb_pointer(ocb->sdev, ocb);
2390 }
2391 }
2392
2393 static struct sbp_ocb *
2394 sbp_dequeue_ocb(struct sbp_dev *sdev, struct sbp_status *sbp_status)
2395 {
2396 struct sbp_softc *sc = sdev->target->sbp;
2397 struct sbp_ocb *ocb;
2398 struct sbp_ocb *next;
2399 int order = 0;
2400 int flags;
2401
2402 SBP_DEBUG(1)
2403 printf("%s:%s:%s: 0x%08x src %d\n", device_xname(sc->sc_fd.dev),
2404 __func__, sdev->bustgtlun, ntohl(sbp_status->orb_lo),
2405 sbp_status->src);
2406 END_DEBUG
2407 mutex_enter(&sc->sc_mtx);
2408 for (ocb = STAILQ_FIRST(&sdev->ocbs); ocb != NULL; ocb = next) {
2409 next = STAILQ_NEXT(ocb, ocb);
2410 flags = ocb->flags;
2411 if (OCB_MATCH(ocb, sbp_status)) {
2412 /* found */
2413 SBP_ORB_DMA_SYNC(sdev->dma, ocb->index,
2414 BUS_DMASYNC_POSTWRITE);
2415 STAILQ_REMOVE(&sdev->ocbs, ocb, sbp_ocb, ocb);
2416 if (ocb->xs != NULL)
2417 callout_stop(&ocb->xs->xs_callout);
2418 if (ntohl(ocb->orb[4]) & 0xffff) {
2419 const int flag =
2420 (ntohl(ocb->orb[4]) & ORB_CMD_IN) ?
2421 BUS_DMASYNC_POSTREAD :
2422 BUS_DMASYNC_POSTWRITE;
2423
2424 bus_dmamap_sync(sc->sc_dmat, ocb->dmamap,
2425 0, ocb->dmamap->dm_mapsize, flag);
2426 bus_dmamap_unload(sc->sc_dmat, ocb->dmamap);
2427
2428 }
2429 if (!use_doorbell) {
2430 if (sbp_status->src == SRC_NO_NEXT) {
2431 if (next != NULL)
2432 sbp_orb_pointer(sdev, next);
2433 else if (order > 0)
2434 /*
2435 * Unordered execution
2436 * We need to send pointer for
2437 * next ORB
2438 */
2439 sdev->flags |= ORB_LINK_DEAD;
2440 }
2441 }
2442 break;
2443 } else
2444 order++;
2445 }
2446 mutex_exit(&sc->sc_mtx);
2447
2448 if (ocb && use_doorbell) {
2449 /*
2450 * XXX this is not correct for unordered
2451 * execution.
2452 */
2453 if (sdev->last_ocb != NULL)
2454 sbp_free_ocb(sdev, sdev->last_ocb);
2455 sdev->last_ocb = ocb;
2456 if (next != NULL &&
2457 sbp_status->src == SRC_NO_NEXT)
2458 sbp_doorbell(sdev);
2459 }
2460
2461 SBP_DEBUG(0)
2462 if (ocb && order > 0)
2463 printf("%s:%s:%s: unordered execution order:%d\n",
2464 device_xname(sc->sc_fd.dev), __func__, sdev->bustgtlun,
2465 order);
2466 END_DEBUG
2467 return ocb;
2468 }
2469
2470 static struct sbp_ocb *
2471 sbp_enqueue_ocb(struct sbp_dev *sdev, struct sbp_ocb *ocb)
2472 {
2473 struct sbp_softc *sc = sdev->target->sbp;
2474 struct sbp_ocb *tocb, *prev, *prev2;
2475
2476 SBP_DEBUG(1)
2477 printf("%s:%s:%s: 0x%08jx\n", device_xname(sc->sc_fd.dev),
2478 __func__, sdev->bustgtlun, (uintmax_t)ocb->bus_addr);
2479 END_DEBUG
2480 mutex_enter(&sc->sc_mtx);
2481 prev = NULL;
2482 STAILQ_FOREACH(tocb, &sdev->ocbs, ocb)
2483 prev = tocb;
2484 prev2 = prev;
2485 STAILQ_INSERT_TAIL(&sdev->ocbs, ocb, ocb);
2486 mutex_exit(&sc->sc_mtx);
2487
2488 callout_reset(&ocb->xs->xs_callout, mstohz(ocb->xs->timeout),
2489 sbp_timeout, ocb);
2490
2491 if (use_doorbell && prev == NULL)
2492 prev2 = sdev->last_ocb;
2493
2494 if (prev2 != NULL) {
2495 SBP_DEBUG(2)
2496 printf("linking chain 0x%jx -> 0x%jx\n",
2497 (uintmax_t)prev2->bus_addr, (uintmax_t)ocb->bus_addr);
2498 END_DEBUG
2499 /*
2500 * Suppress compiler optimization so that orb[1] must be
2501 * written first.
2502 * XXX We may need an explicit memory barrier for other
2503 * architectures other than i386/amd64.
2504 */
2505 *(volatile uint32_t *)&prev2->orb[1] = htonl(ocb->bus_addr);
2506 *(volatile uint32_t *)&prev2->orb[0] = 0;
2507 }
2508
2509 return prev;
2510 }
2511
2512 static struct sbp_ocb *
2513 sbp_get_ocb(struct sbp_dev *sdev)
2514 {
2515 struct sbp_softc *sc = sdev->target->sbp;
2516 struct sbp_ocb *ocb;
2517
2518 KASSERT(mutex_owned(&sc->sc_mtx));
2519
2520 ocb = STAILQ_FIRST(&sdev->free_ocbs);
2521 if (ocb == NULL) {
2522 sdev->flags |= ORB_SHORTAGE;
2523 aprint_error_dev(sc->sc_fd.dev,
2524 "ocb shortage!!!\n");
2525 return NULL;
2526 }
2527 STAILQ_REMOVE_HEAD(&sdev->free_ocbs, ocb);
2528 ocb->xs = NULL;
2529 return ocb;
2530 }
2531
2532 static void
2533 sbp_free_ocb(struct sbp_dev *sdev, struct sbp_ocb *ocb)
2534 {
2535 struct sbp_softc *sc = sdev->target->sbp;
2536 int count;
2537
2538 ocb->flags = 0;
2539 ocb->xs = NULL;
2540
2541 mutex_enter(&sc->sc_mtx);
2542 STAILQ_INSERT_TAIL(&sdev->free_ocbs, ocb, ocb);
2543 mutex_exit(&sc->sc_mtx);
2544 if (sdev->flags & ORB_SHORTAGE) {
2545 sdev->flags &= ~ORB_SHORTAGE;
2546 count = sdev->freeze;
2547 sdev->freeze = 0;
2548 if (sdev->periph)
2549 scsipi_periph_thaw(sdev->periph, count);
2550 scsipi_channel_thaw(&sc->sc_channel, 0);
2551 }
2552 }
2553
2554 static void
2555 sbp_abort_ocb(struct sbp_ocb *ocb, int status)
2556 {
2557 struct sbp_softc *sc;
2558 struct sbp_dev *sdev;
2559
2560 sdev = ocb->sdev;
2561 sc = sdev->target->sbp;
2562 SBP_DEBUG(0)
2563 printf("%s:%s:%s: sbp_abort_ocb 0x%jx\n", device_xname(sc->sc_fd.dev),
2564 __func__, sdev->bustgtlun, (uintmax_t)ocb->bus_addr);
2565 END_DEBUG
2566 SBP_DEBUG(1)
2567 if (ocb->xs != NULL)
2568 sbp_print_scsi_cmd(ocb);
2569 END_DEBUG
2570 if (ntohl(ocb->orb[4]) & 0xffff) {
2571 const int flag = (ntohl(ocb->orb[4]) & ORB_CMD_IN) ?
2572 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE;
2573
2574 bus_dmamap_sync(sc->sc_dmat, ocb->dmamap,
2575 0, ocb->dmamap->dm_mapsize, flag);
2576 bus_dmamap_unload(sc->sc_dmat, ocb->dmamap);
2577 }
2578 if (ocb->xs != NULL) {
2579 callout_stop(&ocb->xs->xs_callout);
2580 ocb->xs->error = status;
2581 scsipi_done(ocb->xs);
2582 }
2583 sbp_free_ocb(sdev, ocb);
2584 }
2585
2586 static void
2587 sbp_abort_all_ocbs(struct sbp_dev *sdev, int status)
2588 {
2589 struct sbp_softc *sc = sdev->target->sbp;
2590 struct sbp_ocb *ocb, *next;
2591 STAILQ_HEAD(, sbp_ocb) temp;
2592
2593 mutex_enter(&sc->sc_mtx);
2594 STAILQ_INIT(&temp);
2595 STAILQ_CONCAT(&temp, &sdev->ocbs);
2596 STAILQ_INIT(&sdev->ocbs);
2597 mutex_exit(&sc->sc_mtx);
2598
2599 for (ocb = STAILQ_FIRST(&temp); ocb != NULL; ocb = next) {
2600 next = STAILQ_NEXT(ocb, ocb);
2601 sbp_abort_ocb(ocb, status);
2602 }
2603 if (sdev->last_ocb != NULL) {
2604 sbp_free_ocb(sdev, sdev->last_ocb);
2605 sdev->last_ocb = NULL;
2606 }
2607 }
2608