1 1.17 andvar /* $NetBSD: aic79xx.seq,v 1.17 2022/06/27 22:41:29 andvar Exp $ */ 2 1.4 thorpej 3 1.1 fvdl /* 4 1.1 fvdl * Adaptec U320 device driver firmware for Linux and FreeBSD. 5 1.1 fvdl * 6 1.1 fvdl * Copyright (c) 1994-2001 Justin T. Gibbs. 7 1.1 fvdl * Copyright (c) 2000-2002 Adaptec Inc. 8 1.1 fvdl * All rights reserved. 9 1.1 fvdl * 10 1.1 fvdl * Redistribution and use in source and binary forms, with or without 11 1.1 fvdl * modification, are permitted provided that the following conditions 12 1.1 fvdl * are met: 13 1.1 fvdl * 1. Redistributions of source code must retain the above copyright 14 1.1 fvdl * notice, this list of conditions, and the following disclaimer, 15 1.1 fvdl * without modification. 16 1.1 fvdl * 2. Redistributions in binary form must reproduce at minimum a disclaimer 17 1.1 fvdl * substantially similar to the "NO WARRANTY" disclaimer below 18 1.1 fvdl * ("Disclaimer") and any redistribution must be conditioned upon 19 1.1 fvdl * including a substantially similar Disclaimer requirement for further 20 1.1 fvdl * binary redistribution. 21 1.1 fvdl * 3. Neither the names of the above-listed copyright holders nor the names 22 1.1 fvdl * of any contributors may be used to endorse or promote products derived 23 1.1 fvdl * from this software without specific prior written permission. 24 1.1 fvdl * 25 1.1 fvdl * Alternatively, this software may be distributed under the terms of the 26 1.1 fvdl * GNU General Public License ("GPL") version 2 as published by the Free 27 1.1 fvdl * Software Foundation. 28 1.1 fvdl * 29 1.1 fvdl * NO WARRANTY 30 1.1 fvdl * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 31 1.1 fvdl * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 32 1.1 fvdl * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 33 1.1 fvdl * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 34 1.1 fvdl * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35 1.1 fvdl * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36 1.1 fvdl * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37 1.1 fvdl * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 38 1.1 fvdl * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 39 1.1 fvdl * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 40 1.1 fvdl * POSSIBILITY OF SUCH DAMAGES. 41 1.1 fvdl * 42 1.8 thorpej * $FreeBSD: src/sys/dev/aic7xxx/aic79xx.seq,v 1.13 2003/06/28 04:44:10 gibbs Exp $ 43 1.1 fvdl */ 44 1.1 fvdl 45 1.8 thorpej VERSION = "Id: //depot/aic7xxx/aic7xxx/aic79xx.seq#96 $" 46 1.1 fvdl PATCH_ARG_LIST = "struct ahd_softc *ahd" 47 1.1 fvdl PREFIX = "ahd_" 48 1.1 fvdl 49 1.1 fvdl #include <dev/microcode/aic7xxx/aic79xx.reg> 50 1.1 fvdl #include <dev/scsipi/scsi_message.h> 51 1.1 fvdl 52 1.1 fvdl restart: 53 1.1 fvdl if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) { 54 1.1 fvdl test SEQINTCODE, 0xFF jz idle_loop; 55 1.1 fvdl SET_SEQINTCODE(NO_SEQINT) 56 1.1 fvdl } 57 1.1 fvdl 58 1.1 fvdl idle_loop: 59 1.1 fvdl 60 1.1 fvdl if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) { 61 1.1 fvdl /* 62 1.1 fvdl * Convert ERROR status into a sequencer 63 1.1 fvdl * interrupt to handle the case of an 64 1.1 fvdl * interrupt collision on the hardware 65 1.1 fvdl * setting of HWERR. 66 1.1 fvdl */ 67 1.1 fvdl test ERROR, 0xFF jz no_error_set; 68 1.1 fvdl SET_SEQINTCODE(SAW_HWERR) 69 1.1 fvdl no_error_set: 70 1.1 fvdl } 71 1.1 fvdl SET_MODE(M_SCSI, M_SCSI) 72 1.1 fvdl test SCSISEQ0, ENSELO|ENARBO jnz idle_loop_checkbus; 73 1.1 fvdl test SEQ_FLAGS2, SELECTOUT_QFROZEN jnz idle_loop_checkbus; 74 1.1 fvdl cmp WAITING_TID_HEAD[1], SCB_LIST_NULL je idle_loop_checkbus; 75 1.1 fvdl /* 76 1.1 fvdl * ENSELO is cleared by a SELDO, so we must test for SELDO 77 1.1 fvdl * one last time. 78 1.1 fvdl */ 79 1.1 fvdl BEGIN_CRITICAL; 80 1.1 fvdl test SSTAT0, SELDO jnz select_out; 81 1.1 fvdl END_CRITICAL; 82 1.1 fvdl call start_selection; 83 1.1 fvdl idle_loop_checkbus: 84 1.1 fvdl BEGIN_CRITICAL; 85 1.1 fvdl test SSTAT0, SELDO jnz select_out; 86 1.1 fvdl END_CRITICAL; 87 1.1 fvdl test SSTAT0, SELDI jnz select_in; 88 1.1 fvdl test SCSIPHASE, ~DATA_PHASE_MASK jz idle_loop_check_nonpackreq; 89 1.1 fvdl test SCSISIGO, ATNO jz idle_loop_check_nonpackreq; 90 1.1 fvdl call unexpected_nonpkt_phase_find_ctxt; 91 1.1 fvdl idle_loop_check_nonpackreq: 92 1.1 fvdl test SSTAT2, NONPACKREQ jz . + 2; 93 1.1 fvdl call unexpected_nonpkt_phase_find_ctxt; 94 1.4 thorpej if ((ahd->bugs & AHD_FAINT_LED_BUG) != 0) { 95 1.4 thorpej and A, FIFO0FREE|FIFO1FREE, DFFSTAT; 96 1.4 thorpej cmp A, FIFO0FREE|FIFO1FREE jne . + 3; 97 1.4 thorpej and SBLKCTL, ~DIAGLEDEN|DIAGLEDON; 98 1.4 thorpej jmp . + 2; 99 1.4 thorpej or SBLKCTL, DIAGLEDEN|DIAGLEDON; 100 1.4 thorpej } 101 1.1 fvdl call idle_loop_gsfifo_in_scsi_mode; 102 1.1 fvdl call idle_loop_service_fifos; 103 1.1 fvdl call idle_loop_cchan; 104 1.1 fvdl jmp idle_loop; 105 1.1 fvdl 106 1.1 fvdl BEGIN_CRITICAL; 107 1.1 fvdl idle_loop_gsfifo: 108 1.1 fvdl SET_MODE(M_SCSI, M_SCSI) 109 1.1 fvdl idle_loop_gsfifo_in_scsi_mode: 110 1.1 fvdl test LQISTAT2, LQIGSAVAIL jz return; 111 1.1 fvdl /* 112 1.1 fvdl * We have received good status for this transaction. There may 113 1.4 thorpej * still be data in our FIFOs draining to the host. Complete 114 1.4 thorpej * the SCB only if all data has transferred to the host. 115 1.1 fvdl */ 116 1.1 fvdl good_status_IU_done: 117 1.1 fvdl bmov SCBPTR, GSFIFO, 2; 118 1.1 fvdl clr SCB_SCSI_STATUS; 119 1.1 fvdl /* 120 1.1 fvdl * If a command completed before an attempted task management 121 1.1 fvdl * function completed, notify the host after disabling any 122 1.1 fvdl * pending select-outs. 123 1.1 fvdl */ 124 1.1 fvdl test SCB_TASK_MANAGEMENT, 0xFF jz gsfifo_complete_normally; 125 1.1 fvdl test SSTAT0, SELDO|SELINGO jnz . + 2; 126 1.1 fvdl and SCSISEQ0, ~ENSELO; 127 1.1 fvdl SET_SEQINTCODE(TASKMGMT_CMD_CMPLT_OKAY) 128 1.1 fvdl gsfifo_complete_normally: 129 1.1 fvdl or SCB_CONTROL, STATUS_RCVD; 130 1.1 fvdl 131 1.1 fvdl /* 132 1.1 fvdl * Since this status did not consume a FIFO, we have to 133 1.16 andvar * be a bit more diligent in how we check for FIFOs pertaining 134 1.1 fvdl * to this transaction. There are two states that a FIFO still 135 1.1 fvdl * transferring data may be in. 136 1.1 fvdl * 137 1.1 fvdl * 1) Configured and draining to the host, with a FIFO handler. 138 1.1 fvdl * 2) Pending cfg4data, fifo not empty. 139 1.1 fvdl * 140 1.4 thorpej * Case 1 can be detected by noticing a non-zero FIFO active 141 1.4 thorpej * count in the SCB. In this case, we allow the routine servicing 142 1.4 thorpej * the FIFO to complete the SCB. 143 1.9 perry * 144 1.1 fvdl * Case 2 implies either a pending or yet to occur save data 145 1.1 fvdl * pointers for this same context in the other FIFO. So, if 146 1.1 fvdl * we detect case 1, we will properly defer the post of the SCB 147 1.1 fvdl * and achieve the desired result. The pending cfg4data will 148 1.1 fvdl * notice that status has been received and complete the SCB. 149 1.1 fvdl */ 150 1.4 thorpej test SCB_FIFO_USE_COUNT, 0xFF jnz idle_loop_gsfifo_in_scsi_mode; 151 1.1 fvdl call complete; 152 1.1 fvdl END_CRITICAL; 153 1.1 fvdl jmp idle_loop_gsfifo_in_scsi_mode; 154 1.1 fvdl 155 1.1 fvdl idle_loop_service_fifos: 156 1.1 fvdl SET_MODE(M_DFF0, M_DFF0) 157 1.1 fvdl test LONGJMP_ADDR[1], INVALID_ADDR jnz idle_loop_next_fifo; 158 1.1 fvdl call longjmp; 159 1.1 fvdl idle_loop_next_fifo: 160 1.1 fvdl SET_MODE(M_DFF1, M_DFF1) 161 1.1 fvdl test LONGJMP_ADDR[1], INVALID_ADDR jz longjmp; 162 1.4 thorpej return: 163 1.1 fvdl ret; 164 1.1 fvdl 165 1.1 fvdl idle_loop_cchan: 166 1.1 fvdl SET_MODE(M_CCHAN, M_CCHAN) 167 1.1 fvdl test QOFF_CTLSTA, HS_MAILBOX_ACT jz hs_mailbox_empty; 168 1.1 fvdl mov LOCAL_HS_MAILBOX, HS_MAILBOX; 169 1.1 fvdl or QOFF_CTLSTA, HS_MAILBOX_ACT; 170 1.1 fvdl hs_mailbox_empty: 171 1.1 fvdl BEGIN_CRITICAL; 172 1.1 fvdl test CCSCBCTL, CCARREN|CCSCBEN jz scbdma_idle; 173 1.1 fvdl test CCSCBCTL, CCSCBDIR jnz fetch_new_scb_inprog; 174 1.1 fvdl test CCSCBCTL, CCSCBDONE jz return; 175 1.1 fvdl END_CRITICAL; 176 1.1 fvdl /* FALLTHROUGH */ 177 1.1 fvdl scbdma_tohost_done: 178 1.1 fvdl test CCSCBCTL, CCARREN jz fill_qoutfifo_dmadone; 179 1.1 fvdl /* 180 1.14 andvar * An SCB has been successfully uploaded to the host. 181 1.4 thorpej * If the SCB was uploaded for some reason other than 182 1.4 thorpej * bad SCSI status (currently only for underruns), we 183 1.4 thorpej * queue the SCB for normal completion. Otherwise, we 184 1.4 thorpej * wait until any select-out activity has halted, and 185 1.4 thorpej * then notify the host so that the transaction can be 186 1.4 thorpej * dealt with. 187 1.1 fvdl */ 188 1.4 thorpej test SCB_SCSI_STATUS, 0xff jnz scbdma_notify_host; 189 1.4 thorpej and CCSCBCTL, ~(CCARREN|CCSCBEN); 190 1.4 thorpej bmov COMPLETE_DMA_SCB_HEAD, SCB_NEXT_COMPLETE, 2; 191 1.4 thorpej bmov SCB_NEXT_COMPLETE, COMPLETE_SCB_HEAD, 2; 192 1.4 thorpej bmov COMPLETE_SCB_HEAD, SCBPTR, 2 ret; 193 1.4 thorpej scbdma_notify_host: 194 1.4 thorpej SET_MODE(M_SCSI, M_SCSI) 195 1.4 thorpej test SCSISEQ0, ENSELO jnz return; 196 1.4 thorpej test SSTAT0, (SELDO|SELINGO) jnz return; 197 1.4 thorpej SET_MODE(M_CCHAN, M_CCHAN) 198 1.4 thorpej /* 199 1.4 thorpej * Remove SCB and notify host. 200 1.4 thorpej */ 201 1.4 thorpej and CCSCBCTL, ~(CCARREN|CCSCBEN); 202 1.4 thorpej bmov COMPLETE_DMA_SCB_HEAD, SCB_NEXT_COMPLETE, 2; 203 1.4 thorpej SET_SEQINTCODE(BAD_SCB_STATUS) 204 1.4 thorpej ret; 205 1.1 fvdl fill_qoutfifo_dmadone: 206 1.1 fvdl and CCSCBCTL, ~(CCARREN|CCSCBEN); 207 1.1 fvdl call qoutfifo_updated; 208 1.1 fvdl mvi COMPLETE_SCB_DMAINPROG_HEAD[1], SCB_LIST_NULL; 209 1.1 fvdl bmov QOUTFIFO_NEXT_ADDR, SCBHADDR, 4; 210 1.1 fvdl test QOFF_CTLSTA, SDSCB_ROLLOVR jz return; 211 1.1 fvdl bmov QOUTFIFO_NEXT_ADDR, SHARED_DATA_ADDR, 4; 212 1.1 fvdl xor QOUTFIFO_ENTRY_VALID_TAG, QOUTFIFO_ENTRY_VALID_TOGGLE ret; 213 1.1 fvdl 214 1.1 fvdl qoutfifo_updated: 215 1.1 fvdl /* 216 1.2 wiz * If there are more commands waiting to be DMA'ed 217 1.3 wiz * to the host, always coalesce. Otherwise honor the 218 1.1 fvdl * host's wishes. 219 1.1 fvdl */ 220 1.3 wiz cmp COMPLETE_DMA_SCB_HEAD[1], SCB_LIST_NULL jne coalesce_by_count; 221 1.3 wiz cmp COMPLETE_SCB_HEAD[1], SCB_LIST_NULL jne coalesce_by_count; 222 1.3 wiz test LOCAL_HS_MAILBOX, ENINT_COALESCE jz issue_cmdcmplt; 223 1.1 fvdl 224 1.1 fvdl /* 225 1.1 fvdl * If we have relatively few commands outstanding, don't 226 1.1 fvdl * bother waiting for another command to complete. 227 1.1 fvdl */ 228 1.3 wiz test CMDS_PENDING[1], 0xFF jnz coalesce_by_count; 229 1.1 fvdl /* Add -1 so that jnc means <= not just < */ 230 1.3 wiz add A, -1, INT_COALESCING_MINCMDS; 231 1.1 fvdl add NONE, A, CMDS_PENDING; 232 1.1 fvdl jnc issue_cmdcmplt; 233 1.9 perry 234 1.1 fvdl /* 235 1.3 wiz * If coalescing, only coalesce up to the limit 236 1.1 fvdl * provided by the host driver. 237 1.1 fvdl */ 238 1.3 wiz coalesce_by_count: 239 1.3 wiz mov A, INT_COALESCING_MAXCMDS; 240 1.3 wiz add NONE, A, INT_COALESCING_CMDCOUNT; 241 1.1 fvdl jc issue_cmdcmplt; 242 1.1 fvdl /* 243 1.1 fvdl * If the timer is not currently active, 244 1.1 fvdl * fire it up. 245 1.1 fvdl */ 246 1.1 fvdl test INTCTL, SWTMINTMASK jz return; 247 1.3 wiz bmov SWTIMER, INT_COALESCING_TIMER, 2; 248 1.1 fvdl mvi CLRSEQINTSTAT, CLRSEQ_SWTMRTO; 249 1.1 fvdl or INTCTL, SWTMINTEN|SWTIMER_START; 250 1.1 fvdl and INTCTL, ~SWTMINTMASK ret; 251 1.1 fvdl 252 1.1 fvdl issue_cmdcmplt: 253 1.1 fvdl mvi INTSTAT, CMDCMPLT; 254 1.3 wiz clr INT_COALESCING_CMDCOUNT; 255 1.1 fvdl or INTCTL, SWTMINTMASK ret; 256 1.1 fvdl 257 1.1 fvdl BEGIN_CRITICAL; 258 1.1 fvdl fetch_new_scb_inprog: 259 1.1 fvdl test CCSCBCTL, ARRDONE jz return; 260 1.1 fvdl fetch_new_scb_done: 261 1.1 fvdl and CCSCBCTL, ~(CCARREN|CCSCBEN); 262 1.1 fvdl bmov REG0, SCBPTR, 2; 263 1.1 fvdl clr A; 264 1.1 fvdl add CMDS_PENDING, 1; 265 1.1 fvdl adc CMDS_PENDING[1], A; 266 1.5 thorpej if ((ahd->bugs & AHD_PKT_LUN_BUG) != 0) { 267 1.5 thorpej /* 268 1.5 thorpej * "Short Luns" are not placed into outgoing LQ 269 1.5 thorpej * packets in the correct byte order. Use a full 270 1.5 thorpej * sized lun field instead and fill it with the 271 1.5 thorpej * one byte of lun information we support. 272 1.5 thorpej */ 273 1.5 thorpej mov SCB_PKT_LUN[6], SCB_LUN; 274 1.5 thorpej } 275 1.4 thorpej /* 276 1.4 thorpej * The FIFO use count field is shared with the 277 1.4 thorpej * tag set by the host so that our SCB dma engine 278 1.4 thorpej * knows the correct location to store the SCB. 279 1.4 thorpej * Set it to zero before processing the SCB. 280 1.4 thorpej */ 281 1.8 thorpej clr SCB_FIFO_USE_COUNT; 282 1.1 fvdl /* Update the next SCB address to download. */ 283 1.1 fvdl bmov NEXT_QUEUED_SCB_ADDR, SCB_NEXT_SCB_BUSADDR, 4; 284 1.1 fvdl mvi SCB_NEXT[1], SCB_LIST_NULL; 285 1.1 fvdl mvi SCB_NEXT2[1], SCB_LIST_NULL; 286 1.1 fvdl /* Increment our position in the QINFIFO. */ 287 1.1 fvdl mov NONE, SNSCB_QOFF; 288 1.1 fvdl /* 289 1.1 fvdl * SCBs that want to send messages are always 290 1.1 fvdl * queued independently. This ensures that they 291 1.1 fvdl * are at the head of the SCB list to select out 292 1.1 fvdl * to a target and we will see the MK_MESSAGE flag. 293 1.1 fvdl */ 294 1.1 fvdl test SCB_CONTROL, MK_MESSAGE jnz first_new_target_scb; 295 1.1 fvdl shr SINDEX, 3, SCB_SCSIID; 296 1.1 fvdl and SINDEX, ~0x1; 297 1.1 fvdl mvi SINDEX[1], (WAITING_SCB_TAILS >> 8); 298 1.1 fvdl bmov DINDEX, SINDEX, 2; 299 1.1 fvdl bmov SCBPTR, SINDIR, 2; 300 1.1 fvdl bmov DINDIR, REG0, 2; 301 1.1 fvdl cmp SCBPTR[1], SCB_LIST_NULL je first_new_target_scb; 302 1.1 fvdl bmov SCB_NEXT, REG0, 2 ret; 303 1.1 fvdl first_new_target_scb: 304 1.1 fvdl cmp WAITING_TID_HEAD[1], SCB_LIST_NULL je first_new_scb; 305 1.1 fvdl bmov SCBPTR, WAITING_TID_TAIL, 2; 306 1.1 fvdl bmov SCB_NEXT2, REG0, 2; 307 1.1 fvdl bmov WAITING_TID_TAIL, REG0, 2 ret; 308 1.1 fvdl first_new_scb: 309 1.1 fvdl bmov WAITING_TID_HEAD, REG0, 2; 310 1.1 fvdl bmov WAITING_TID_TAIL, REG0, 2 ret; 311 1.1 fvdl END_CRITICAL; 312 1.1 fvdl 313 1.1 fvdl scbdma_idle: 314 1.1 fvdl /* 315 1.1 fvdl * Give precedence to downloading new SCBs to execute 316 1.1 fvdl * unless select-outs are currently frozen. 317 1.1 fvdl */ 318 1.1 fvdl test SEQ_FLAGS2, SELECTOUT_QFROZEN jnz . + 2; 319 1.1 fvdl BEGIN_CRITICAL; 320 1.1 fvdl test QOFF_CTLSTA, NEW_SCB_AVAIL jnz fetch_new_scb; 321 1.1 fvdl cmp COMPLETE_DMA_SCB_HEAD[1], SCB_LIST_NULL jne dma_complete_scb; 322 1.1 fvdl cmp COMPLETE_SCB_HEAD[1], SCB_LIST_NULL je return; 323 1.1 fvdl /* FALLTHROUGH */ 324 1.1 fvdl fill_qoutfifo: 325 1.1 fvdl /* 326 1.2 wiz * Keep track of the SCBs we are DMA'ing just 327 1.1 fvdl * in case the DMA fails or is aborted. 328 1.1 fvdl */ 329 1.1 fvdl mov A, QOUTFIFO_ENTRY_VALID_TAG; 330 1.1 fvdl bmov COMPLETE_SCB_DMAINPROG_HEAD, COMPLETE_SCB_HEAD, 2; 331 1.1 fvdl mvi CCSCBCTL, CCSCBRESET; 332 1.1 fvdl bmov SCBHADDR, QOUTFIFO_NEXT_ADDR, 4; 333 1.1 fvdl bmov SCBPTR, COMPLETE_SCB_HEAD, 2; 334 1.1 fvdl fill_qoutfifo_loop: 335 1.1 fvdl mov CCSCBRAM, SCBPTR; 336 1.1 fvdl or CCSCBRAM, A, SCBPTR[1]; 337 1.1 fvdl mov NONE, SDSCB_QOFF; 338 1.3 wiz inc INT_COALESCING_CMDCOUNT; 339 1.1 fvdl add CMDS_PENDING, -1; 340 1.1 fvdl adc CMDS_PENDING[1], -1; 341 1.1 fvdl cmp SCB_NEXT_COMPLETE[1], SCB_LIST_NULL je fill_qoutfifo_done; 342 1.1 fvdl cmp CCSCBADDR, CCSCBADDR_MAX je fill_qoutfifo_done; 343 1.1 fvdl test QOFF_CTLSTA, SDSCB_ROLLOVR jnz fill_qoutfifo_done; 344 1.1 fvdl bmov SCBPTR, SCB_NEXT_COMPLETE, 2; 345 1.1 fvdl jmp fill_qoutfifo_loop; 346 1.1 fvdl fill_qoutfifo_done: 347 1.1 fvdl mov SCBHCNT, CCSCBADDR; 348 1.1 fvdl mvi CCSCBCTL, CCSCBEN|CCSCBRESET; 349 1.1 fvdl bmov COMPLETE_SCB_HEAD, SCB_NEXT_COMPLETE, 2; 350 1.1 fvdl mvi SCB_NEXT_COMPLETE[1], SCB_LIST_NULL ret; 351 1.1 fvdl 352 1.1 fvdl fetch_new_scb: 353 1.1 fvdl bmov SCBHADDR, NEXT_QUEUED_SCB_ADDR, 4; 354 1.1 fvdl mvi CCARREN|CCSCBEN|CCSCBDIR|CCSCBRESET jmp dma_scb; 355 1.1 fvdl dma_complete_scb: 356 1.1 fvdl bmov SCBPTR, COMPLETE_DMA_SCB_HEAD, 2; 357 1.1 fvdl bmov SCBHADDR, SCB_BUSADDR, 4; 358 1.4 thorpej mvi CCARREN|CCSCBEN|CCSCBRESET jmp dma_scb; 359 1.1 fvdl END_CRITICAL; 360 1.1 fvdl 361 1.1 fvdl /* 362 1.1 fvdl * Either post or fetch an SCB from host memory. The caller 363 1.1 fvdl * is responsible for polling for transfer completion. 364 1.1 fvdl * 365 1.16 andvar * Prerequisites: Mode == M_CCHAN 366 1.1 fvdl * SINDEX contains CCSCBCTL flags 367 1.1 fvdl * SCBHADDR set to Host SCB address 368 1.1 fvdl * SCBPTR set to SCB src location on "push" operations 369 1.1 fvdl */ 370 1.1 fvdl SET_SRC_MODE M_CCHAN; 371 1.1 fvdl SET_DST_MODE M_CCHAN; 372 1.1 fvdl dma_scb: 373 1.1 fvdl mvi SCBHCNT, SCB_TRANSFER_SIZE; 374 1.1 fvdl mov CCSCBCTL, SINDEX ret; 375 1.1 fvdl 376 1.1 fvdl BEGIN_CRITICAL; 377 1.1 fvdl setjmp: 378 1.1 fvdl bmov LONGJMP_ADDR, STACK, 2 ret; 379 1.1 fvdl setjmp_inline: 380 1.1 fvdl bmov LONGJMP_ADDR, STACK, 2; 381 1.1 fvdl longjmp: 382 1.1 fvdl bmov STACK, LONGJMP_ADDR, 2 ret; 383 1.1 fvdl END_CRITICAL; 384 1.1 fvdl 385 1.1 fvdl /*************************** Chip Bug Work Arounds ****************************/ 386 1.1 fvdl /* 387 1.1 fvdl * Must disable interrupts when setting the mode pointer 388 1.1 fvdl * register as an interrupt occurring mid update will 389 1.1 fvdl * fail to store the new mode value for restoration on 390 1.1 fvdl * an iret. 391 1.1 fvdl */ 392 1.1 fvdl if ((ahd->bugs & AHD_SET_MODE_BUG) != 0) { 393 1.1 fvdl set_mode_work_around: 394 1.1 fvdl mvi SEQINTCTL, INTVEC1DSL; 395 1.1 fvdl mov MODE_PTR, SINDEX; 396 1.1 fvdl clr SEQINTCTL ret; 397 1.1 fvdl 398 1.1 fvdl toggle_dff_mode_work_around: 399 1.1 fvdl mvi SEQINTCTL, INTVEC1DSL; 400 1.1 fvdl xor MODE_PTR, MK_MODE(M_DFF1, M_DFF1); 401 1.1 fvdl clr SEQINTCTL ret; 402 1.1 fvdl } 403 1.1 fvdl 404 1.1 fvdl 405 1.1 fvdl if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) { 406 1.1 fvdl set_seqint_work_around: 407 1.1 fvdl mov SEQINTCODE, SINDEX; 408 1.1 fvdl mvi SEQINTCODE, NO_SEQINT ret; 409 1.1 fvdl } 410 1.1 fvdl 411 1.1 fvdl /************************ Packetized LongJmp Routines *************************/ 412 1.1 fvdl SET_SRC_MODE M_SCSI; 413 1.1 fvdl SET_DST_MODE M_SCSI; 414 1.1 fvdl start_selection: 415 1.1 fvdl BEGIN_CRITICAL; 416 1.1 fvdl if ((ahd->bugs & AHD_SENT_SCB_UPDATE_BUG) != 0) { 417 1.1 fvdl /* 418 1.1 fvdl * Razor #494 419 1.1 fvdl * Rev A hardware fails to update LAST/CURR/NEXTSCB 420 1.1 fvdl * correctly after a packetized selection in several 421 1.1 fvdl * situations: 422 1.1 fvdl * 423 1.1 fvdl * 1) If only one command existed in the queue, the 424 1.1 fvdl * LAST/CURR/NEXTSCB are unchanged. 425 1.1 fvdl * 426 1.1 fvdl * 2) In a non QAS, protocol allowed phase change, 427 1.1 fvdl * the queue is shifted 1 too far. LASTSCB is 428 1.1 fvdl * the last SCB that was correctly processed. 429 1.9 perry * 430 1.1 fvdl * 3) In the QAS case, if the full list of commands 431 1.1 fvdl * was successfully sent, NEXTSCB is NULL and neither 432 1.1 fvdl * CURRSCB nor LASTSCB can be trusted. We must 433 1.1 fvdl * manually walk the list counting MAXCMDCNT elements 434 1.1 fvdl * to find the last SCB that was sent correctly. 435 1.1 fvdl * 436 1.1 fvdl * To simplify the workaround for this bug in SELDO 437 1.1 fvdl * handling, we initialize LASTSCB prior to enabling 438 1.1 fvdl * selection so we can rely on it even for case #1 above. 439 1.1 fvdl */ 440 1.1 fvdl bmov LASTSCB, WAITING_TID_HEAD, 2; 441 1.1 fvdl } 442 1.1 fvdl bmov CURRSCB, WAITING_TID_HEAD, 2; 443 1.1 fvdl bmov SCBPTR, WAITING_TID_HEAD, 2; 444 1.1 fvdl shr SELOID, 4, SCB_SCSIID; 445 1.1 fvdl /* 446 1.1 fvdl * If we want to send a message to the device, ensure 447 1.1 fvdl * we are selecting with atn irregardless of our packetized 448 1.1 fvdl * agreement. Since SPI4 only allows target reset or PPR 449 1.1 fvdl * messages if this is a packetized connection, the change 450 1.1 fvdl * to our negotiation table entry for this selection will 451 1.1 fvdl * be cleared when the message is acted on. 452 1.1 fvdl */ 453 1.1 fvdl test SCB_CONTROL, MK_MESSAGE jz . + 3; 454 1.1 fvdl mov NEGOADDR, SELOID; 455 1.1 fvdl or NEGCONOPTS, ENAUTOATNO; 456 1.1 fvdl or SCSISEQ0, ENSELO ret; 457 1.1 fvdl END_CRITICAL; 458 1.1 fvdl 459 1.1 fvdl /* 460 1.1 fvdl * Allocate a FIFO for a non-packetized transaction. 461 1.1 fvdl * In RevA hardware, both FIFOs must be free before we 462 1.1 fvdl * can allocate a FIFO for a non-packetized transaction. 463 1.1 fvdl */ 464 1.1 fvdl allocate_fifo_loop: 465 1.1 fvdl /* 466 1.1 fvdl * Do whatever work is required to free a FIFO. 467 1.1 fvdl */ 468 1.1 fvdl call idle_loop_service_fifos; 469 1.1 fvdl SET_MODE(M_SCSI, M_SCSI) 470 1.1 fvdl allocate_fifo: 471 1.1 fvdl if ((ahd->bugs & AHD_NONPACKFIFO_BUG) != 0) { 472 1.1 fvdl and A, FIFO0FREE|FIFO1FREE, DFFSTAT; 473 1.1 fvdl cmp A, FIFO0FREE|FIFO1FREE jne allocate_fifo_loop; 474 1.1 fvdl } else { 475 1.1 fvdl test DFFSTAT, FIFO1FREE jnz allocate_fifo1; 476 1.1 fvdl test DFFSTAT, FIFO0FREE jz allocate_fifo_loop; 477 1.1 fvdl mvi DFFSTAT, B_CURRFIFO_0; 478 1.1 fvdl SET_MODE(M_DFF0, M_DFF0) 479 1.1 fvdl bmov SCBPTR, ALLOCFIFO_SCBPTR, 2 ret; 480 1.1 fvdl } 481 1.1 fvdl SET_SRC_MODE M_SCSI; 482 1.1 fvdl SET_DST_MODE M_SCSI; 483 1.1 fvdl allocate_fifo1: 484 1.1 fvdl mvi DFFSTAT, CURRFIFO_1; 485 1.1 fvdl SET_MODE(M_DFF1, M_DFF1) 486 1.1 fvdl bmov SCBPTR, ALLOCFIFO_SCBPTR, 2 ret; 487 1.1 fvdl 488 1.1 fvdl /* 489 1.1 fvdl * We have been reselected as an initiator 490 1.1 fvdl * or selected as a target. 491 1.1 fvdl */ 492 1.1 fvdl SET_SRC_MODE M_SCSI; 493 1.1 fvdl SET_DST_MODE M_SCSI; 494 1.1 fvdl select_in: 495 1.1 fvdl if ((ahd->bugs & AHD_BUSFREEREV_BUG) != 0) { 496 1.1 fvdl /* 497 1.1 fvdl * This exposes a window whereby a 498 1.1 fvdl * busfree just after a selection will 499 1.1 fvdl * be missed, but there is no other safe 500 1.1 fvdl * way to enable busfree detection if 501 1.1 fvdl * the busfreerev function is broken. 502 1.1 fvdl */ 503 1.1 fvdl mvi CLRSINT1,CLRBUSFREE; 504 1.1 fvdl or SIMODE1, ENBUSFREE; 505 1.1 fvdl } 506 1.1 fvdl or SXFRCTL0, SPIOEN; 507 1.1 fvdl and SAVED_SCSIID, SELID_MASK, SELID; 508 1.1 fvdl and A, OID, IOWNID; 509 1.1 fvdl or SAVED_SCSIID, A; 510 1.1 fvdl mvi CLRSINT0, CLRSELDI; 511 1.1 fvdl jmp ITloop; 512 1.1 fvdl 513 1.1 fvdl /* 514 1.1 fvdl * We have successfully selected out. 515 1.1 fvdl * 516 1.1 fvdl * Clear SELDO. 517 1.1 fvdl * Dequeue all SCBs sent from the waiting queue 518 1.1 fvdl * Requeue all SCBs *not* sent to the tail of the waiting queue 519 1.1 fvdl * Take Razor #494 into account for above. 520 1.1 fvdl * 521 1.1 fvdl * In Packetized Mode: 522 1.1 fvdl * Return to the idle loop. Our interrupt handler will take 523 1.1 fvdl * care of any incoming L_Qs. 524 1.1 fvdl * 525 1.1 fvdl * In Non-Packetize Mode: 526 1.1 fvdl * Continue to our normal state machine. 527 1.1 fvdl */ 528 1.1 fvdl SET_SRC_MODE M_SCSI; 529 1.1 fvdl SET_DST_MODE M_SCSI; 530 1.1 fvdl select_out: 531 1.1 fvdl BEGIN_CRITICAL; 532 1.1 fvdl /* Clear out all SCBs that have been successfully sent. */ 533 1.1 fvdl if ((ahd->bugs & AHD_SENT_SCB_UPDATE_BUG) != 0) { 534 1.1 fvdl /* 535 1.1 fvdl * For packetized, the LQO manager clears ENSELO on 536 1.1 fvdl * the assertion of SELDO. If we are non-packetized, 537 1.1 fvdl * LASTSCB and CURRSCB are accurate. 538 1.1 fvdl */ 539 1.1 fvdl test SCSISEQ0, ENSELO jnz use_lastscb; 540 1.1 fvdl 541 1.1 fvdl /* 542 1.1 fvdl * The update is correct for LQOSTAT1 errors. All 543 1.1 fvdl * but LQOBUSFREE are handled by kernel interrupts. 544 1.1 fvdl * If we see LQOBUSFREE, return to the idle loop. 545 1.1 fvdl * Once we are out of the select_out critical section, 546 1.1 fvdl * the kernel will cleanup the LQOBUSFREE and we will 547 1.1 fvdl * eventually restart the selection if appropriate. 548 1.1 fvdl */ 549 1.1 fvdl test LQOSTAT1, LQOBUSFREE jnz idle_loop; 550 1.1 fvdl 551 1.1 fvdl /* 552 1.16 andvar * On a phase change outside of packet boundaries, 553 1.1 fvdl * LASTSCB points to the currently active SCB context 554 1.1 fvdl * on the bus. 555 1.1 fvdl */ 556 1.1 fvdl test LQOSTAT2, LQOPHACHGOUTPKT jnz use_lastscb; 557 1.1 fvdl 558 1.1 fvdl /* 559 1.1 fvdl * If the hardware has traversed the whole list, NEXTSCB 560 1.1 fvdl * will be NULL, CURRSCB and LASTSCB cannot be trusted, 561 1.1 fvdl * but MAXCMDCNT is accurate. If we stop part way through 562 1.1 fvdl * the list or only had one command to issue, NEXTSCB[1] is 563 1.1 fvdl * not NULL and LASTSCB is the last command to go out. 564 1.1 fvdl */ 565 1.1 fvdl cmp NEXTSCB[1], SCB_LIST_NULL jne use_lastscb; 566 1.1 fvdl 567 1.1 fvdl /* 568 1.1 fvdl * Brute force walk. 569 1.1 fvdl */ 570 1.1 fvdl bmov SCBPTR, WAITING_TID_HEAD, 2; 571 1.1 fvdl mvi SEQINTCTL, INTVEC1DSL; 572 1.1 fvdl mvi MODE_PTR, MK_MODE(M_CFG, M_CFG); 573 1.1 fvdl mov A, MAXCMDCNT; 574 1.1 fvdl mvi MODE_PTR, MK_MODE(M_SCSI, M_SCSI); 575 1.1 fvdl clr SEQINTCTL; 576 1.1 fvdl find_lastscb_loop: 577 1.1 fvdl dec A; 578 1.1 fvdl test A, 0xFF jz found_last_sent_scb; 579 1.1 fvdl bmov SCBPTR, SCB_NEXT, 2; 580 1.1 fvdl jmp find_lastscb_loop; 581 1.1 fvdl use_lastscb: 582 1.1 fvdl bmov SCBPTR, LASTSCB, 2; 583 1.1 fvdl found_last_sent_scb: 584 1.1 fvdl bmov CURRSCB, SCBPTR, 2; 585 1.1 fvdl curscb_ww_done: 586 1.1 fvdl } else { 587 1.1 fvdl bmov SCBPTR, CURRSCB, 2; 588 1.1 fvdl } 589 1.1 fvdl 590 1.1 fvdl /* 591 1.1 fvdl * Requeue any SCBs not sent, to the tail of the waiting Q. 592 1.1 fvdl */ 593 1.1 fvdl cmp SCB_NEXT[1], SCB_LIST_NULL je select_out_list_done; 594 1.1 fvdl 595 1.1 fvdl /* 596 1.1 fvdl * We know that neither the per-TID list nor the list of 597 1.1 fvdl * TIDs is empty. Use this knowledge to our advantage. 598 1.1 fvdl */ 599 1.1 fvdl bmov REG0, SCB_NEXT, 2; 600 1.1 fvdl bmov SCBPTR, WAITING_TID_TAIL, 2; 601 1.1 fvdl bmov SCB_NEXT2, REG0, 2; 602 1.1 fvdl bmov WAITING_TID_TAIL, REG0, 2; 603 1.1 fvdl jmp select_out_inc_tid_q; 604 1.1 fvdl 605 1.1 fvdl select_out_list_done: 606 1.1 fvdl /* 607 1.1 fvdl * The whole list made it. Just clear our TID's tail pointer 608 1.1 fvdl * unless we were queued independently due to our need to 609 1.1 fvdl * send a message. 610 1.1 fvdl */ 611 1.1 fvdl test SCB_CONTROL, MK_MESSAGE jnz select_out_inc_tid_q; 612 1.1 fvdl shr DINDEX, 3, SCB_SCSIID; 613 1.1 fvdl or DINDEX, 1; /* Want only the second byte */ 614 1.1 fvdl mvi DINDEX[1], ((WAITING_SCB_TAILS) >> 8); 615 1.1 fvdl mvi DINDIR, SCB_LIST_NULL; 616 1.1 fvdl select_out_inc_tid_q: 617 1.1 fvdl bmov SCBPTR, WAITING_TID_HEAD, 2; 618 1.1 fvdl bmov WAITING_TID_HEAD, SCB_NEXT2, 2; 619 1.1 fvdl cmp WAITING_TID_HEAD[1], SCB_LIST_NULL jne . + 2; 620 1.1 fvdl mvi WAITING_TID_TAIL[1], SCB_LIST_NULL; 621 1.1 fvdl bmov SCBPTR, CURRSCB, 2; 622 1.1 fvdl mvi CLRSINT0, CLRSELDO; 623 1.1 fvdl test LQOSTAT2, LQOPHACHGOUTPKT jnz unexpected_nonpkt_phase; 624 1.1 fvdl test LQOSTAT1, LQOPHACHGINPKT jnz unexpected_nonpkt_phase; 625 1.1 fvdl 626 1.1 fvdl /* 627 1.1 fvdl * If this is a packetized connection, return to our 628 1.1 fvdl * idle_loop and let our interrupt handler deal with 629 1.1 fvdl * any connection setup/teardown issues. The only 630 1.1 fvdl * exceptions are the case of MK_MESSAGE and task management 631 1.1 fvdl * SCBs. 632 1.1 fvdl */ 633 1.1 fvdl if ((ahd->bugs & AHD_LQO_ATNO_BUG) != 0) { 634 1.1 fvdl /* 635 1.1 fvdl * In the A, the LQO manager transitions to LQOSTOP0 even if 636 1.1 fvdl * we have selected out with ATN asserted and the target 637 1.1 fvdl * REQs in a non-packet phase. 638 1.1 fvdl */ 639 1.1 fvdl test SCB_CONTROL, MK_MESSAGE jz select_out_no_message; 640 1.1 fvdl test SCSISIGO, ATNO jnz select_out_non_packetized; 641 1.1 fvdl select_out_no_message: 642 1.1 fvdl } 643 1.1 fvdl test LQOSTAT2, LQOSTOP0 jz select_out_non_packetized; 644 1.1 fvdl test SCB_TASK_MANAGEMENT, 0xFF jz idle_loop; 645 1.1 fvdl SET_SEQINTCODE(TASKMGMT_FUNC_COMPLETE) 646 1.1 fvdl jmp idle_loop; 647 1.1 fvdl 648 1.1 fvdl select_out_non_packetized: 649 1.1 fvdl /* Non packetized request. */ 650 1.1 fvdl and SCSISEQ0, ~ENSELO; 651 1.1 fvdl if ((ahd->bugs & AHD_BUSFREEREV_BUG) != 0) { 652 1.1 fvdl /* 653 1.1 fvdl * This exposes a window whereby a 654 1.1 fvdl * busfree just after a selection will 655 1.1 fvdl * be missed, but there is no other safe 656 1.1 fvdl * way to enable busfree detection if 657 1.1 fvdl * the busfreerev function is broken. 658 1.1 fvdl */ 659 1.1 fvdl mvi CLRSINT1,CLRBUSFREE; 660 1.1 fvdl or SIMODE1, ENBUSFREE; 661 1.1 fvdl } 662 1.1 fvdl mov SAVED_SCSIID, SCB_SCSIID; 663 1.1 fvdl mov SAVED_LUN, SCB_LUN; 664 1.1 fvdl mvi SEQ_FLAGS, NO_CDB_SENT; 665 1.1 fvdl END_CRITICAL; 666 1.1 fvdl or SXFRCTL0, SPIOEN; 667 1.1 fvdl 668 1.1 fvdl /* 669 1.1 fvdl * As soon as we get a successful selection, the target 670 1.1 fvdl * should go into the message out phase since we have ATN 671 1.1 fvdl * asserted. 672 1.1 fvdl */ 673 1.1 fvdl mvi MSG_OUT, MSG_IDENTIFYFLAG; 674 1.1 fvdl 675 1.1 fvdl /* 676 1.1 fvdl * Main loop for information transfer phases. Wait for the 677 1.1 fvdl * target to assert REQ before checking MSG, C/D and I/O for 678 1.1 fvdl * the bus phase. 679 1.1 fvdl */ 680 1.1 fvdl mesgin_phasemis: 681 1.1 fvdl ITloop: 682 1.1 fvdl call phase_lock; 683 1.1 fvdl 684 1.1 fvdl mov A, LASTPHASE; 685 1.1 fvdl 686 1.1 fvdl test A, ~P_DATAIN_DT jz p_data; 687 1.1 fvdl cmp A,P_COMMAND je p_command; 688 1.1 fvdl cmp A,P_MESGOUT je p_mesgout; 689 1.1 fvdl cmp A,P_STATUS je p_status; 690 1.1 fvdl cmp A,P_MESGIN je p_mesgin; 691 1.1 fvdl 692 1.1 fvdl SET_SEQINTCODE(BAD_PHASE) 693 1.1 fvdl jmp ITloop; /* Try reading the bus again. */ 694 1.1 fvdl 695 1.1 fvdl /* 696 1.1 fvdl * Command phase. Set up the DMA registers and let 'er rip. 697 1.1 fvdl */ 698 1.1 fvdl p_command: 699 1.1 fvdl test SEQ_FLAGS, NOT_IDENTIFIED jz p_command_okay; 700 1.1 fvdl SET_SEQINTCODE(PROTO_VIOLATION) 701 1.1 fvdl p_command_okay: 702 1.1 fvdl test MODE_PTR, ~(MK_MODE(M_DFF1, M_DFF1)) 703 1.1 fvdl jnz p_command_allocate_fifo; 704 1.1 fvdl /* 705 1.1 fvdl * Command retry. Free our current FIFO and 706 1.1 fvdl * re-allocate a FIFO so transfer state is 707 1.1 fvdl * reset. 708 1.1 fvdl */ 709 1.1 fvdl SET_SRC_MODE M_DFF1; 710 1.1 fvdl SET_DST_MODE M_DFF1; 711 1.1 fvdl mvi DFFSXFRCTL, RSTCHN|CLRSHCNT; 712 1.1 fvdl SET_MODE(M_SCSI, M_SCSI) 713 1.1 fvdl p_command_allocate_fifo: 714 1.1 fvdl bmov ALLOCFIFO_SCBPTR, SCBPTR, 2; 715 1.1 fvdl call allocate_fifo; 716 1.1 fvdl SET_SRC_MODE M_DFF1; 717 1.1 fvdl SET_DST_MODE M_DFF1; 718 1.1 fvdl add NONE, -17, SCB_CDB_LEN; 719 1.1 fvdl jnc p_command_embedded; 720 1.1 fvdl p_command_from_host: 721 1.1 fvdl bmov HADDR[0], SCB_HOST_CDB_PTR, 9; 722 1.1 fvdl mvi SG_CACHE_PRE, LAST_SEG; 723 1.1 fvdl mvi DFCNTRL, (PRELOADEN|SCSIEN|HDMAEN); 724 1.1 fvdl jmp p_command_xfer; 725 1.1 fvdl p_command_embedded: 726 1.1 fvdl bmov SHCNT[0], SCB_CDB_LEN, 1; 727 1.9 perry bmov DFDAT, SCB_CDB_STORE, 16; 728 1.1 fvdl mvi DFCNTRL, SCSIEN; 729 1.1 fvdl p_command_xfer: 730 1.1 fvdl and SEQ_FLAGS, ~NO_CDB_SENT; 731 1.1 fvdl test DFCNTRL, SCSIEN jnz .; 732 1.1 fvdl /* 733 1.1 fvdl * DMA Channel automatically disabled. 734 1.1 fvdl * Don't allow a data phase if the command 735 1.1 fvdl * was not fully transferred. 736 1.1 fvdl */ 737 1.1 fvdl test SSTAT2, SDONE jnz ITloop; 738 1.1 fvdl or SEQ_FLAGS, NO_CDB_SENT; 739 1.1 fvdl jmp ITloop; 740 1.1 fvdl 741 1.1 fvdl 742 1.1 fvdl /* 743 1.1 fvdl * Status phase. Wait for the data byte to appear, then read it 744 1.1 fvdl * and store it into the SCB. 745 1.1 fvdl */ 746 1.1 fvdl SET_SRC_MODE M_SCSI; 747 1.1 fvdl SET_DST_MODE M_SCSI; 748 1.1 fvdl p_status: 749 1.1 fvdl test SEQ_FLAGS,NOT_IDENTIFIED jnz mesgin_proto_violation; 750 1.1 fvdl p_status_okay: 751 1.1 fvdl mov SCB_SCSI_STATUS, SCSIDAT; 752 1.1 fvdl or SCB_CONTROL, STATUS_RCVD; 753 1.1 fvdl jmp ITloop; 754 1.1 fvdl 755 1.1 fvdl /* 756 1.1 fvdl * Message out phase. If MSG_OUT is MSG_IDENTIFYFLAG, build a full 757 1.16 andvar * identify message sequence and send it to the target. The host may 758 1.1 fvdl * override this behavior by setting the MK_MESSAGE bit in the SCB 759 1.1 fvdl * control byte. This will cause us to interrupt the host and allow 760 1.1 fvdl * it to handle the message phase completely on its own. If the bit 761 1.1 fvdl * associated with this target is set, we will also interrupt the host, 762 1.1 fvdl * thereby allowing it to send a message on the next selection regardless 763 1.1 fvdl * of the transaction being sent. 764 1.9 perry * 765 1.1 fvdl * If MSG_OUT is == HOST_MSG, also interrupt the host and take a message. 766 1.1 fvdl * This is done to allow the host to send messages outside of an identify 767 1.16 andvar * sequence while protecting the sequencer from testing the MK_MESSAGE bit 768 1.1 fvdl * on an SCB that might not be for the current nexus. (For example, a 769 1.16 andvar * BDR message in response to a bad reselection would leave us pointed to 770 1.1 fvdl * an SCB that doesn't have anything to do with the current target). 771 1.1 fvdl * 772 1.1 fvdl * Otherwise, treat MSG_OUT as a 1 byte message to send (abort, abort tag, 773 1.1 fvdl * bus device reset). 774 1.1 fvdl * 775 1.1 fvdl * When there are no messages to send, MSG_OUT should be set to MSG_NOOP, 776 1.1 fvdl * in case the target decides to put us in this phase for some strange 777 1.1 fvdl * reason. 778 1.1 fvdl */ 779 1.1 fvdl p_mesgout_retry: 780 1.1 fvdl /* Turn on ATN for the retry */ 781 1.1 fvdl mvi SCSISIGO, ATNO; 782 1.1 fvdl p_mesgout: 783 1.1 fvdl mov SINDEX, MSG_OUT; 784 1.1 fvdl cmp SINDEX, MSG_IDENTIFYFLAG jne p_mesgout_from_host; 785 1.1 fvdl test SCB_CONTROL,MK_MESSAGE jnz host_message_loop; 786 1.1 fvdl p_mesgout_identify: 787 1.1 fvdl or SINDEX, MSG_IDENTIFYFLAG|DISCENB, SCB_LUN; 788 1.1 fvdl test SCB_CONTROL, DISCENB jnz . + 2; 789 1.1 fvdl and SINDEX, ~DISCENB; 790 1.1 fvdl /* 791 1.1 fvdl * Send a tag message if TAG_ENB is set in the SCB control block. 792 1.1 fvdl * Use SCB_NONPACKET_TAG as the tag value. 793 1.1 fvdl */ 794 1.1 fvdl p_mesgout_tag: 795 1.1 fvdl test SCB_CONTROL,TAG_ENB jz p_mesgout_onebyte; 796 1.1 fvdl mov SCSIDAT, SINDEX; /* Send the identify message */ 797 1.1 fvdl call phase_lock; 798 1.1 fvdl cmp LASTPHASE, P_MESGOUT jne p_mesgout_done; 799 1.1 fvdl and SCSIDAT,TAG_ENB|SCB_TAG_TYPE,SCB_CONTROL; 800 1.1 fvdl call phase_lock; 801 1.1 fvdl cmp LASTPHASE, P_MESGOUT jne p_mesgout_done; 802 1.1 fvdl mov SCBPTR jmp p_mesgout_onebyte; 803 1.1 fvdl /* 804 1.1 fvdl * Interrupt the driver, and allow it to handle this message 805 1.1 fvdl * phase and any required retries. 806 1.1 fvdl */ 807 1.1 fvdl p_mesgout_from_host: 808 1.1 fvdl cmp SINDEX, HOST_MSG jne p_mesgout_onebyte; 809 1.1 fvdl jmp host_message_loop; 810 1.1 fvdl 811 1.1 fvdl p_mesgout_onebyte: 812 1.1 fvdl mvi CLRSINT1, CLRATNO; 813 1.1 fvdl mov SCSIDAT, SINDEX; 814 1.1 fvdl 815 1.1 fvdl /* 816 1.1 fvdl * If the next bus phase after ATN drops is message out, it means 817 1.1 fvdl * that the target is requesting that the last message(s) be resent. 818 1.1 fvdl */ 819 1.1 fvdl call phase_lock; 820 1.1 fvdl cmp LASTPHASE, P_MESGOUT je p_mesgout_retry; 821 1.1 fvdl 822 1.1 fvdl p_mesgout_done: 823 1.1 fvdl mvi CLRSINT1,CLRATNO; /* Be sure to turn ATNO off */ 824 1.1 fvdl mov LAST_MSG, MSG_OUT; 825 1.1 fvdl mvi MSG_OUT, MSG_NOOP; /* No message left */ 826 1.1 fvdl jmp ITloop; 827 1.1 fvdl 828 1.1 fvdl /* 829 1.1 fvdl * Message in phase. Bytes are read using Automatic PIO mode. 830 1.1 fvdl */ 831 1.1 fvdl p_mesgin: 832 1.1 fvdl /* read the 1st message byte */ 833 1.1 fvdl mvi ACCUM call inb_first; 834 1.1 fvdl 835 1.1 fvdl test A,MSG_IDENTIFYFLAG jnz mesgin_identify; 836 1.1 fvdl cmp A,MSG_DISCONNECT je mesgin_disconnect; 837 1.1 fvdl cmp A,MSG_SAVEDATAPOINTER je mesgin_sdptrs; 838 1.1 fvdl cmp ALLZEROS,A je mesgin_complete; 839 1.1 fvdl cmp A,MSG_RESTOREPOINTERS je mesgin_rdptrs; 840 1.1 fvdl cmp A,MSG_IGN_WIDE_RESIDUE je mesgin_ign_wide_residue; 841 1.1 fvdl cmp A,MSG_NOOP je mesgin_done; 842 1.1 fvdl 843 1.1 fvdl /* 844 1.1 fvdl * Pushed message loop to allow the kernel to 845 1.11 snj * run its own message state engine. To avoid an 846 1.1 fvdl * extra nop instruction after signaling the kernel, 847 1.1 fvdl * we perform the phase_lock before checking to see 848 1.1 fvdl * if we should exit the loop and skip the phase_lock 849 1.1 fvdl * in the ITloop. Performing back to back phase_locks 850 1.1 fvdl * shouldn't hurt, but why do it twice... 851 1.1 fvdl */ 852 1.1 fvdl host_message_loop: 853 1.1 fvdl call phase_lock; /* Benign the first time through. */ 854 1.1 fvdl SET_SEQINTCODE(HOST_MSG_LOOP) 855 1.1 fvdl cmp RETURN_1, EXIT_MSG_LOOP je ITloop; 856 1.1 fvdl cmp RETURN_1, CONT_MSG_LOOP_WRITE jne . + 3; 857 1.1 fvdl mov SCSIDAT, RETURN_2; 858 1.1 fvdl jmp host_message_loop; 859 1.1 fvdl /* Must be CONT_MSG_LOOP_READ */ 860 1.1 fvdl mov NONE, SCSIDAT; /* ACK Byte */ 861 1.1 fvdl jmp host_message_loop; 862 1.1 fvdl 863 1.1 fvdl mesgin_ign_wide_residue: 864 1.1 fvdl mov SAVED_MODE, MODE_PTR; 865 1.1 fvdl SET_MODE(M_SCSI, M_SCSI) 866 1.1 fvdl shr NEGOADDR, 4, SAVED_SCSIID; 867 1.1 fvdl mov A, NEGCONOPTS; 868 1.1 fvdl RESTORE_MODE(SAVED_MODE) 869 1.1 fvdl test A, WIDEXFER jz mesgin_reject; 870 1.1 fvdl /* Pull the residue byte */ 871 1.1 fvdl mvi REG0 call inb_next; 872 1.1 fvdl cmp REG0, 0x01 jne mesgin_reject; 873 1.1 fvdl test SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jz . + 2; 874 1.6 thorpej test SCB_TASK_ATTRIBUTE, SCB_XFERLEN_ODD jnz mesgin_done; 875 1.6 thorpej SET_SEQINTCODE(IGN_WIDE_RES) 876 1.1 fvdl jmp mesgin_done; 877 1.1 fvdl 878 1.1 fvdl mesgin_proto_violation: 879 1.1 fvdl SET_SEQINTCODE(PROTO_VIOLATION) 880 1.1 fvdl jmp mesgin_done; 881 1.1 fvdl mesgin_reject: 882 1.1 fvdl mvi MSG_MESSAGE_REJECT call mk_mesg; 883 1.1 fvdl mesgin_done: 884 1.1 fvdl mov NONE,SCSIDAT; /*dummy read from latch to ACK*/ 885 1.1 fvdl jmp ITloop; 886 1.1 fvdl 887 1.1 fvdl #define INDEX_DISC_LIST(scsiid, lun) \ 888 1.1 fvdl and A, 0xC0, scsiid; \ 889 1.1 fvdl or SCBPTR, A, lun; \ 890 1.1 fvdl clr SCBPTR[1]; \ 891 1.1 fvdl and SINDEX, 0x30, scsiid; \ 892 1.1 fvdl shr SINDEX, 3; /* Multiply by 2 */ \ 893 1.1 fvdl add SINDEX, (SCB_DISCONNECTED_LISTS & 0xFF); \ 894 1.1 fvdl mvi SINDEX[1], ((SCB_DISCONNECTED_LISTS >> 8) & 0xFF) 895 1.1 fvdl 896 1.1 fvdl mesgin_identify: 897 1.1 fvdl /* 898 1.1 fvdl * Determine whether a target is using tagged or non-tagged 899 1.1 fvdl * transactions by first looking at the transaction stored in 900 1.1 fvdl * the per-device, disconnected array. If there is no untagged 901 1.1 fvdl * transaction for this target, this must be a tagged transaction. 902 1.1 fvdl */ 903 1.1 fvdl and SAVED_LUN, MSG_IDENTIFY_LUNMASK, A; 904 1.1 fvdl INDEX_DISC_LIST(SAVED_SCSIID, SAVED_LUN); 905 1.1 fvdl bmov DINDEX, SINDEX, 2; 906 1.1 fvdl bmov REG0, SINDIR, 2; 907 1.1 fvdl cmp REG0[1], SCB_LIST_NULL je snoop_tag; 908 1.1 fvdl /* Untagged. Clear the busy table entry and setup the SCB. */ 909 1.1 fvdl bmov DINDIR, ALLONES, 2; 910 1.1 fvdl bmov SCBPTR, REG0, 2; 911 1.1 fvdl jmp setup_SCB; 912 1.1 fvdl 913 1.1 fvdl /* 914 1.1 fvdl * Here we "snoop" the bus looking for a SIMPLE QUEUE TAG message. 915 1.1 fvdl * If we get one, we use the tag returned to find the proper 916 1.1 fvdl * SCB. After receiving the tag, look for the SCB at SCB locations tag and 917 1.1 fvdl * tag + 256. 918 1.1 fvdl */ 919 1.1 fvdl snoop_tag: 920 1.1 fvdl if ((ahd->flags & AHD_SEQUENCER_DEBUG) != 0) { 921 1.1 fvdl or SEQ_FLAGS, 0x80; 922 1.1 fvdl } 923 1.1 fvdl mov NONE, SCSIDAT; /* ACK Identify MSG */ 924 1.1 fvdl call phase_lock; 925 1.1 fvdl if ((ahd->flags & AHD_SEQUENCER_DEBUG) != 0) { 926 1.1 fvdl or SEQ_FLAGS, 0x1; 927 1.1 fvdl } 928 1.1 fvdl cmp LASTPHASE, P_MESGIN jne not_found_ITloop; 929 1.1 fvdl if ((ahd->flags & AHD_SEQUENCER_DEBUG) != 0) { 930 1.1 fvdl or SEQ_FLAGS, 0x2; 931 1.1 fvdl } 932 1.1 fvdl cmp SCSIBUS, MSG_SIMPLE_Q_TAG jne not_found; 933 1.1 fvdl get_tag: 934 1.1 fvdl clr SCBPTR[1]; 935 1.1 fvdl mvi SCBPTR call inb_next; /* tag value */ 936 1.1 fvdl verify_scb: 937 1.1 fvdl test SCB_CONTROL,DISCONNECTED jz verify_other_scb; 938 1.1 fvdl mov A, SAVED_SCSIID; 939 1.1 fvdl cmp SCB_SCSIID, A jne verify_other_scb; 940 1.1 fvdl mov A, SAVED_LUN; 941 1.1 fvdl cmp SCB_LUN, A je setup_SCB_disconnected; 942 1.1 fvdl verify_other_scb: 943 1.1 fvdl xor SCBPTR[1], 1; 944 1.1 fvdl test SCBPTR[1], 0xFF jnz verify_scb; 945 1.1 fvdl jmp not_found; 946 1.1 fvdl 947 1.1 fvdl /* 948 1.1 fvdl * Ensure that the SCB the tag points to is for 949 1.1 fvdl * an SCB transaction to the reconnecting target. 950 1.1 fvdl */ 951 1.1 fvdl setup_SCB: 952 1.1 fvdl if ((ahd->flags & AHD_SEQUENCER_DEBUG) != 0) { 953 1.1 fvdl or SEQ_FLAGS, 0x10; 954 1.1 fvdl } 955 1.1 fvdl test SCB_CONTROL,DISCONNECTED jz not_found; 956 1.1 fvdl setup_SCB_disconnected: 957 1.1 fvdl and SCB_CONTROL,~DISCONNECTED; 958 1.1 fvdl clr SEQ_FLAGS; /* make note of IDENTIFY */ 959 1.1 fvdl test SCB_SGPTR, SG_LIST_NULL jnz . + 3; 960 1.1 fvdl bmov ALLOCFIFO_SCBPTR, SCBPTR, 2; 961 1.1 fvdl call allocate_fifo; 962 1.1 fvdl /* See if the host wants to send a message upon reconnection */ 963 1.1 fvdl test SCB_CONTROL, MK_MESSAGE jz mesgin_done; 964 1.1 fvdl mvi HOST_MSG call mk_mesg; 965 1.1 fvdl jmp mesgin_done; 966 1.1 fvdl 967 1.1 fvdl not_found: 968 1.1 fvdl SET_SEQINTCODE(NO_MATCH) 969 1.1 fvdl jmp mesgin_done; 970 1.1 fvdl 971 1.1 fvdl not_found_ITloop: 972 1.1 fvdl SET_SEQINTCODE(NO_MATCH) 973 1.1 fvdl jmp ITloop; 974 1.1 fvdl 975 1.1 fvdl /* 976 1.1 fvdl * We received a "command complete" message. Put the SCB on the complete 977 1.1 fvdl * queue and trigger a completion interrupt via the idle loop. Before doing 978 1.1 fvdl * so, check to see if there 979 1.1 fvdl * is a residual or the status byte is something other than STATUS_GOOD (0). 980 1.1 fvdl * In either of these conditions, we upload the SCB back to the host so it can 981 1.9 perry * process this information. In the case of a non zero status byte, we 982 1.1 fvdl * additionally interrupt the kernel driver synchronously, allowing it to 983 1.1 fvdl * decide if sense should be retrieved. If the kernel driver wishes to request 984 1.1 fvdl * sense, it will fill the kernel SCB with a request sense command, requeue 985 1.9 perry * it to the QINFIFO and tell us not to post to the QOUTFIFO by setting 986 1.1 fvdl * RETURN_1 to SEND_SENSE. 987 1.1 fvdl */ 988 1.1 fvdl mesgin_complete: 989 1.1 fvdl 990 1.1 fvdl /* 991 1.1 fvdl * If ATN is raised, we still want to give the target a message. 992 1.1 fvdl * Perhaps there was a parity error on this last message byte. 993 1.1 fvdl * Either way, the target should take us to message out phase 994 1.1 fvdl * and then attempt to complete the command again. We should use a 995 1.1 fvdl * critical section here to guard against a timeout triggering 996 1.1 fvdl * for this command and setting ATN while we are still processing 997 1.1 fvdl * the completion. 998 1.1 fvdl test SCSISIGI, ATNI jnz mesgin_done; 999 1.1 fvdl */ 1000 1.1 fvdl 1001 1.1 fvdl /* 1002 1.1 fvdl * If we are identified and have successfully sent the CDB, 1003 1.1 fvdl * any status will do. Optimize this fast path. 1004 1.1 fvdl */ 1005 1.1 fvdl test SCB_CONTROL, STATUS_RCVD jz mesgin_proto_violation; 1006 1.1 fvdl test SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT jz complete_accepted; 1007 1.1 fvdl 1008 1.1 fvdl /* 1009 1.1 fvdl * If the target never sent an identify message but instead went 1010 1.1 fvdl * to mesgin to give an invalid message, let the host abort us. 1011 1.1 fvdl */ 1012 1.1 fvdl test SEQ_FLAGS, NOT_IDENTIFIED jnz mesgin_proto_violation; 1013 1.1 fvdl 1014 1.1 fvdl /* 1015 1.12 msaitoh * If we received good status but never successfully sent the 1016 1.1 fvdl * cdb, abort the command. 1017 1.1 fvdl */ 1018 1.1 fvdl test SCB_SCSI_STATUS,0xff jnz complete_accepted; 1019 1.1 fvdl test SEQ_FLAGS, NO_CDB_SENT jnz mesgin_proto_violation; 1020 1.1 fvdl complete_accepted: 1021 1.1 fvdl 1022 1.1 fvdl /* 1023 1.17 andvar * See if we attempted to deliver a message but the target ignored us. 1024 1.1 fvdl */ 1025 1.1 fvdl test SCB_CONTROL, MK_MESSAGE jz complete_nomsg; 1026 1.1 fvdl SET_SEQINTCODE(MKMSG_FAILED) 1027 1.1 fvdl complete_nomsg: 1028 1.1 fvdl call queue_scb_completion; 1029 1.1 fvdl jmp await_busfree; 1030 1.1 fvdl 1031 1.1 fvdl freeze_queue: 1032 1.1 fvdl /* Cancel any pending select-out. */ 1033 1.1 fvdl test SSTAT0, SELDO|SELINGO jnz . + 2; 1034 1.1 fvdl and SCSISEQ0, ~ENSELO; 1035 1.1 fvdl mov ACCUM_SAVE, A; 1036 1.1 fvdl clr A; 1037 1.1 fvdl add QFREEZE_COUNT, 1; 1038 1.1 fvdl adc QFREEZE_COUNT[1], A; 1039 1.1 fvdl or SEQ_FLAGS2, SELECTOUT_QFROZEN; 1040 1.1 fvdl mov A, ACCUM_SAVE ret; 1041 1.1 fvdl 1042 1.4 thorpej /* 1043 1.4 thorpej * Complete the current FIFO's SCB if data for this same 1044 1.4 thorpej * SCB is not transferring in the other FIFO. 1045 1.4 thorpej */ 1046 1.4 thorpej SET_SRC_MODE M_DFF1; 1047 1.4 thorpej SET_DST_MODE M_DFF1; 1048 1.4 thorpej pkt_complete_scb_if_fifos_idle: 1049 1.4 thorpej bmov ARG_1, SCBPTR, 2; 1050 1.4 thorpej mvi DFFSXFRCTL, CLRCHN; 1051 1.1 fvdl SET_MODE(M_SCSI, M_SCSI) 1052 1.1 fvdl bmov SCBPTR, ARG_1, 2; 1053 1.4 thorpej test SCB_FIFO_USE_COUNT, 0xFF jnz return; 1054 1.1 fvdl queue_scb_completion: 1055 1.1 fvdl test SCB_SCSI_STATUS,0xff jnz bad_status; 1056 1.1 fvdl /* 1057 1.1 fvdl * Check for residuals 1058 1.1 fvdl */ 1059 1.1 fvdl test SCB_SGPTR, SG_LIST_NULL jnz complete; /* No xfer */ 1060 1.1 fvdl test SCB_SGPTR, SG_FULL_RESID jnz upload_scb;/* Never xfered */ 1061 1.1 fvdl test SCB_RESIDUAL_SGPTR, SG_LIST_NULL jz upload_scb; 1062 1.1 fvdl complete: 1063 1.1 fvdl bmov SCB_NEXT_COMPLETE, COMPLETE_SCB_HEAD, 2; 1064 1.1 fvdl bmov COMPLETE_SCB_HEAD, SCBPTR, 2 ret; 1065 1.1 fvdl bad_status: 1066 1.1 fvdl cmp SCB_SCSI_STATUS, STATUS_PKT_SENSE je upload_scb; 1067 1.1 fvdl call freeze_queue; 1068 1.1 fvdl upload_scb: 1069 1.4 thorpej /* 1070 1.4 thorpej * Restore SCB TAG since we reuse this field 1071 1.4 thorpej * in the sequencer. We don't want to corrupt 1072 1.4 thorpej * it on the host. 1073 1.4 thorpej */ 1074 1.4 thorpej bmov SCB_TAG, SCBPTR, 2; 1075 1.1 fvdl bmov SCB_NEXT_COMPLETE, COMPLETE_DMA_SCB_HEAD, 2; 1076 1.1 fvdl bmov COMPLETE_DMA_SCB_HEAD, SCBPTR, 2; 1077 1.1 fvdl or SCB_SGPTR, SG_STATUS_VALID ret; 1078 1.1 fvdl 1079 1.1 fvdl /* 1080 1.1 fvdl * Is it a disconnect message? Set a flag in the SCB to remind us 1081 1.1 fvdl * and await the bus going free. If this is an untagged transaction 1082 1.1 fvdl * store the SCB id for it in our untagged target table for lookup on 1083 1.16 andvar * a reselection. 1084 1.1 fvdl */ 1085 1.1 fvdl mesgin_disconnect: 1086 1.1 fvdl /* 1087 1.1 fvdl * If ATN is raised, we still want to give the target a message. 1088 1.1 fvdl * Perhaps there was a parity error on this last message byte 1089 1.1 fvdl * or we want to abort this command. Either way, the target 1090 1.1 fvdl * should take us to message out phase and then attempt to 1091 1.1 fvdl * disconnect again. 1092 1.1 fvdl * XXX - Wait for more testing. 1093 1.1 fvdl test SCSISIGI, ATNI jnz mesgin_done; 1094 1.1 fvdl */ 1095 1.1 fvdl test SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT 1096 1.1 fvdl jnz mesgin_proto_violation; 1097 1.1 fvdl or SCB_CONTROL,DISCONNECTED; 1098 1.1 fvdl test SCB_CONTROL, TAG_ENB jnz await_busfree; 1099 1.1 fvdl queue_disc_scb: 1100 1.1 fvdl bmov REG0, SCBPTR, 2; 1101 1.1 fvdl INDEX_DISC_LIST(SAVED_SCSIID, SAVED_LUN); 1102 1.1 fvdl bmov DINDEX, SINDEX, 2; 1103 1.1 fvdl bmov DINDIR, REG0, 2; 1104 1.1 fvdl bmov SCBPTR, REG0, 2; 1105 1.1 fvdl /* FALLTHROUGH */ 1106 1.1 fvdl await_busfree: 1107 1.1 fvdl and SIMODE1, ~ENBUSFREE; 1108 1.1 fvdl if ((ahd->bugs & AHD_BUSFREEREV_BUG) == 0) { 1109 1.1 fvdl /* 1110 1.1 fvdl * In the BUSFREEREV_BUG case, the 1111 1.1 fvdl * busfree status was cleared at the 1112 1.1 fvdl * beginning of the connection. 1113 1.1 fvdl */ 1114 1.1 fvdl mvi CLRSINT1,CLRBUSFREE; 1115 1.1 fvdl } 1116 1.1 fvdl mov NONE, SCSIDAT; /* Ack the last byte */ 1117 1.1 fvdl test MODE_PTR, ~(MK_MODE(M_DFF1, M_DFF1)) 1118 1.1 fvdl jnz await_busfree_not_m_dff; 1119 1.1 fvdl SET_SRC_MODE M_DFF1; 1120 1.1 fvdl SET_DST_MODE M_DFF1; 1121 1.1 fvdl await_busfree_clrchn: 1122 1.1 fvdl mvi DFFSXFRCTL, CLRCHN; 1123 1.1 fvdl await_busfree_not_m_dff: 1124 1.1 fvdl call clear_target_state; 1125 1.1 fvdl test SSTAT1,REQINIT|BUSFREE jz .; 1126 1.1 fvdl test SSTAT1, BUSFREE jnz idle_loop; 1127 1.1 fvdl SET_SEQINTCODE(MISSED_BUSFREE) 1128 1.1 fvdl 1129 1.1 fvdl 1130 1.1 fvdl /* 1131 1.1 fvdl * Save data pointers message: 1132 1.1 fvdl * Copying RAM values back to SCB, for Save Data Pointers message, but 1133 1.1 fvdl * only if we've actually been into a data phase to change them. This 1134 1.1 fvdl * protects against bogus data in scratch ram and the residual counts 1135 1.1 fvdl * since they are only initialized when we go into data_in or data_out. 1136 1.1 fvdl * Ack the message as soon as possible. 1137 1.1 fvdl */ 1138 1.1 fvdl SET_SRC_MODE M_DFF1; 1139 1.1 fvdl SET_DST_MODE M_DFF1; 1140 1.1 fvdl mesgin_sdptrs: 1141 1.1 fvdl mov NONE,SCSIDAT; /*dummy read from latch to ACK*/ 1142 1.1 fvdl test SEQ_FLAGS, DPHASE jz ITloop; 1143 1.1 fvdl call save_pointers; 1144 1.1 fvdl jmp ITloop; 1145 1.1 fvdl 1146 1.1 fvdl save_pointers: 1147 1.1 fvdl /* 1148 1.1 fvdl * If we are asked to save our position at the end of the 1149 1.1 fvdl * transfer, just mark us at the end rather than perform a 1150 1.1 fvdl * full save. 1151 1.1 fvdl */ 1152 1.1 fvdl test SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jz save_pointers_full; 1153 1.1 fvdl or SCB_SGPTR, SG_LIST_NULL ret; 1154 1.1 fvdl 1155 1.1 fvdl save_pointers_full: 1156 1.1 fvdl /* 1157 1.1 fvdl * The SCB_DATAPTR becomes the current SHADDR. 1158 1.1 fvdl * All other information comes directly from our residual 1159 1.1 fvdl * state. 1160 1.1 fvdl */ 1161 1.1 fvdl bmov SCB_DATAPTR, SHADDR, 8; 1162 1.1 fvdl bmov SCB_DATACNT, SCB_RESIDUAL_DATACNT, 8 ret; 1163 1.1 fvdl 1164 1.1 fvdl /* 1165 1.1 fvdl * Restore pointers message? Data pointers are recopied from the 1166 1.1 fvdl * SCB anytime we enter a data phase for the first time, so all 1167 1.1 fvdl * we need to do is clear the DPHASE flag and let the data phase 1168 1.1 fvdl * code do the rest. We also reset/reallocate the FIFO to make 1169 1.1 fvdl * sure we have a clean start for the next data or command phase. 1170 1.1 fvdl */ 1171 1.1 fvdl mesgin_rdptrs: 1172 1.1 fvdl and SEQ_FLAGS, ~DPHASE; 1173 1.1 fvdl test MODE_PTR, ~(MK_MODE(M_DFF1, M_DFF1)) jnz msgin_rdptrs_get_fifo; 1174 1.1 fvdl mvi DFFSXFRCTL, RSTCHN|CLRSHCNT; 1175 1.1 fvdl SET_MODE(M_SCSI, M_SCSI) 1176 1.1 fvdl msgin_rdptrs_get_fifo: 1177 1.1 fvdl call allocate_fifo; 1178 1.1 fvdl jmp mesgin_done; 1179 1.1 fvdl 1180 1.1 fvdl clear_target_state: 1181 1.1 fvdl mvi LASTPHASE, P_BUSFREE; 1182 1.1 fvdl /* clear target specific flags */ 1183 1.1 fvdl mvi SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT ret; 1184 1.1 fvdl 1185 1.9 perry phase_lock: 1186 1.1 fvdl if ((ahd->bugs & AHD_EARLY_REQ_BUG) != 0) { 1187 1.1 fvdl /* 1188 1.1 fvdl * Don't ignore persistent REQ assertions just because 1189 1.1 fvdl * they were asserted within the bus settle delay window. 1190 1.1 fvdl * This allows us to tolerate devices like the GEM318 1191 1.1 fvdl * that violate the SCSI spec. We are careful not to 1192 1.1 fvdl * count REQ while we are waiting for it to fall during 1193 1.1 fvdl * an async phase due to our asserted ACK. Each 1194 1.1 fvdl * sequencer instruction takes ~25ns, so the REQ must 1195 1.1 fvdl * last at least 100ns in order to be counted as a true 1196 1.1 fvdl * REQ. 1197 1.1 fvdl */ 1198 1.1 fvdl test SCSIPHASE, 0xFF jnz phase_locked; 1199 1.1 fvdl test SCSISIGI, ACKI jnz phase_lock; 1200 1.1 fvdl test SCSISIGI, REQI jz phase_lock; 1201 1.1 fvdl test SCSIPHASE, 0xFF jnz phase_locked; 1202 1.1 fvdl test SCSISIGI, ACKI jnz phase_lock; 1203 1.1 fvdl test SCSISIGI, REQI jz phase_lock; 1204 1.1 fvdl phase_locked: 1205 1.1 fvdl } else { 1206 1.1 fvdl test SCSIPHASE, 0xFF jz .; 1207 1.1 fvdl } 1208 1.1 fvdl test SSTAT1, SCSIPERR jnz phase_lock; 1209 1.1 fvdl phase_lock_latch_phase: 1210 1.1 fvdl and LASTPHASE, PHASE_MASK, SCSISIGI ret; 1211 1.1 fvdl 1212 1.1 fvdl /* 1213 1.1 fvdl * Functions to read data in Automatic PIO mode. 1214 1.1 fvdl * 1215 1.1 fvdl * An ACK is not sent on input from the target until SCSIDATL is read from. 1216 1.1 fvdl * So we wait until SCSIDATL is latched (the usual way), then read the data 1217 1.1 fvdl * byte directly off the bus using SCSIBUSL. When we have pulled the ATN 1218 1.1 fvdl * line, or we just want to acknowledge the byte, then we do a dummy read 1219 1.1 fvdl * from SCISDATL. The SCSI spec guarantees that the target will hold the 1220 1.1 fvdl * data byte on the bus until we send our ACK. 1221 1.1 fvdl * 1222 1.1 fvdl * The assumption here is that these are called in a particular sequence, 1223 1.1 fvdl * and that REQ is already set when inb_first is called. inb_{first,next} 1224 1.1 fvdl * use the same calling convention as inb. 1225 1.1 fvdl */ 1226 1.1 fvdl inb_next: 1227 1.1 fvdl mov NONE,SCSIDAT; /*dummy read from latch to ACK*/ 1228 1.1 fvdl inb_next_wait: 1229 1.1 fvdl /* 1230 1.1 fvdl * If there is a parity error, wait for the kernel to 1231 1.1 fvdl * see the interrupt and prepare our message response 1232 1.1 fvdl * before continuing. 1233 1.1 fvdl */ 1234 1.1 fvdl test SCSIPHASE, 0xFF jz .; 1235 1.1 fvdl test SSTAT1, SCSIPERR jnz inb_next_wait; 1236 1.1 fvdl inb_next_check_phase: 1237 1.1 fvdl and LASTPHASE, PHASE_MASK, SCSISIGI; 1238 1.1 fvdl cmp LASTPHASE, P_MESGIN jne mesgin_phasemis; 1239 1.1 fvdl inb_first: 1240 1.1 fvdl clr DINDEX[1]; 1241 1.1 fvdl mov DINDEX,SINDEX; 1242 1.1 fvdl mov DINDIR,SCSIBUS ret; /*read byte directly from bus*/ 1243 1.1 fvdl inb_last: 1244 1.1 fvdl mov NONE,SCSIDAT ret; /*dummy read from latch to ACK*/ 1245 1.1 fvdl 1246 1.1 fvdl mk_mesg: 1247 1.1 fvdl mvi SCSISIGO, ATNO; 1248 1.1 fvdl mov MSG_OUT,SINDEX ret; 1249 1.1 fvdl 1250 1.1 fvdl SET_SRC_MODE M_DFF1; 1251 1.1 fvdl SET_DST_MODE M_DFF1; 1252 1.1 fvdl disable_ccsgen: 1253 1.1 fvdl test SG_STATE, FETCH_INPROG jz disable_ccsgen_fetch_done; 1254 1.1 fvdl clr CCSGCTL; 1255 1.1 fvdl disable_ccsgen_fetch_done: 1256 1.1 fvdl clr SG_STATE ret; 1257 1.1 fvdl 1258 1.1 fvdl service_fifo: 1259 1.1 fvdl /* 1260 1.1 fvdl * Do we have any prefetch left??? 1261 1.1 fvdl */ 1262 1.1 fvdl test SG_STATE, SEGS_AVAIL jnz idle_sg_avail; 1263 1.1 fvdl 1264 1.1 fvdl /* 1265 1.1 fvdl * Can this FIFO have access to the S/G cache yet? 1266 1.1 fvdl */ 1267 1.1 fvdl test CCSGCTL, SG_CACHE_AVAIL jz return; 1268 1.1 fvdl 1269 1.1 fvdl /* Did we just finish fetching segs? */ 1270 1.1 fvdl test CCSGCTL, CCSGDONE jnz idle_sgfetch_complete; 1271 1.1 fvdl 1272 1.1 fvdl /* Are we actively fetching segments? */ 1273 1.1 fvdl test CCSGCTL, CCSGENACK jnz return; 1274 1.1 fvdl 1275 1.1 fvdl /* 1276 1.1 fvdl * We fetch a "cacheline aligned" and sized amount of data 1277 1.13 andvar * so we don't end up referencing a non-existent page. 1278 1.1 fvdl * Cacheline aligned is in quotes because the kernel will 1279 1.1 fvdl * set the prefetch amount to a reasonable level if the 1280 1.1 fvdl * cacheline size is unknown. 1281 1.1 fvdl */ 1282 1.1 fvdl bmov SGHADDR, SCB_RESIDUAL_SGPTR, 4; 1283 1.1 fvdl mvi SGHCNT, SG_PREFETCH_CNT; 1284 1.1 fvdl if ((ahd->bugs & AHD_REG_SLOW_SETTLE_BUG) != 0) { 1285 1.1 fvdl /* 1286 1.1 fvdl * Need two instruction between "touches" of SGHADDR. 1287 1.1 fvdl */ 1288 1.1 fvdl nop; 1289 1.1 fvdl } 1290 1.1 fvdl and SGHADDR[0], SG_PREFETCH_ALIGN_MASK, SCB_RESIDUAL_SGPTR; 1291 1.1 fvdl mvi CCSGCTL, CCSGEN|CCSGRESET; 1292 1.1 fvdl or SG_STATE, FETCH_INPROG ret; 1293 1.1 fvdl idle_sgfetch_complete: 1294 1.1 fvdl /* 1295 1.1 fvdl * Guard against SG_CACHE_AVAIL activating during sg fetch 1296 1.1 fvdl * request in the other FIFO. 1297 1.1 fvdl */ 1298 1.1 fvdl test SG_STATE, FETCH_INPROG jz return; 1299 1.1 fvdl clr CCSGCTL; 1300 1.1 fvdl and CCSGADDR, SG_PREFETCH_ADDR_MASK, SCB_RESIDUAL_SGPTR; 1301 1.1 fvdl mvi SG_STATE, SEGS_AVAIL|LOADING_NEEDED; 1302 1.1 fvdl idle_sg_avail: 1303 1.1 fvdl /* Does the hardware have space for another SG entry? */ 1304 1.1 fvdl test DFSTATUS, PRELOAD_AVAIL jz return; 1305 1.1 fvdl /* 1306 1.1 fvdl * On the A, preloading a segment before HDMAENACK 1307 1.16 andvar * comes true can clobber the shadow address of the 1308 1.1 fvdl * first segment in the S/G FIFO. Wait until it is 1309 1.1 fvdl * safe to proceed. 1310 1.1 fvdl */ 1311 1.1 fvdl if ((ahd->features & AHD_NEW_DFCNTRL_OPTS) == 0) { 1312 1.1 fvdl test DFCNTRL, HDMAENACK jz return; 1313 1.1 fvdl } 1314 1.1 fvdl if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) { 1315 1.1 fvdl bmov HADDR, CCSGRAM, 8; 1316 1.1 fvdl } else { 1317 1.1 fvdl bmov HADDR, CCSGRAM, 4; 1318 1.1 fvdl } 1319 1.1 fvdl bmov HCNT, CCSGRAM, 3; 1320 1.1 fvdl bmov SCB_RESIDUAL_DATACNT[3], CCSGRAM, 1; 1321 1.1 fvdl if ((ahd->flags & AHD_39BIT_ADDRESSING) != 0) { 1322 1.1 fvdl and HADDR[4], SG_HIGH_ADDR_BITS, SCB_RESIDUAL_DATACNT[3]; 1323 1.1 fvdl } 1324 1.1 fvdl if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) { 1325 1.1 fvdl /* Skip 4 bytes of pad. */ 1326 1.1 fvdl add CCSGADDR, 4; 1327 1.1 fvdl } 1328 1.1 fvdl sg_advance: 1329 1.1 fvdl clr A; /* add sizeof(struct scatter) */ 1330 1.1 fvdl add SCB_RESIDUAL_SGPTR[0],SG_SIZEOF; 1331 1.1 fvdl adc SCB_RESIDUAL_SGPTR[1],A; 1332 1.1 fvdl adc SCB_RESIDUAL_SGPTR[2],A; 1333 1.1 fvdl adc SCB_RESIDUAL_SGPTR[3],A; 1334 1.1 fvdl mov SINDEX, SCB_RESIDUAL_SGPTR[0]; 1335 1.1 fvdl test SCB_RESIDUAL_DATACNT[3], SG_LAST_SEG jz . + 3; 1336 1.1 fvdl or SINDEX, LAST_SEG; 1337 1.1 fvdl clr SG_STATE; 1338 1.1 fvdl mov SG_CACHE_PRE, SINDEX; 1339 1.1 fvdl if ((ahd->features & AHD_NEW_DFCNTRL_OPTS) != 0) { 1340 1.1 fvdl /* 1341 1.1 fvdl * Use SCSIENWRDIS so that SCSIEN is never 1342 1.1 fvdl * modified by this operation. 1343 1.1 fvdl */ 1344 1.1 fvdl or DFCNTRL, PRELOADEN|HDMAEN|SCSIENWRDIS; 1345 1.1 fvdl } else { 1346 1.1 fvdl or DFCNTRL, PRELOADEN|HDMAEN; 1347 1.1 fvdl } 1348 1.1 fvdl /* 1349 1.1 fvdl * Do we have another segment in the cache? 1350 1.1 fvdl */ 1351 1.1 fvdl add NONE, SG_PREFETCH_CNT_LIMIT, CCSGADDR; 1352 1.1 fvdl jnc return; 1353 1.1 fvdl and SG_STATE, ~SEGS_AVAIL ret; 1354 1.1 fvdl 1355 1.1 fvdl /* 1356 1.1 fvdl * Initialize the DMA address and counter from the SCB. 1357 1.1 fvdl */ 1358 1.1 fvdl load_first_seg: 1359 1.1 fvdl bmov HADDR, SCB_DATAPTR, 11; 1360 1.1 fvdl and REG_ISR, ~SG_FULL_RESID, SCB_SGPTR[0]; 1361 1.1 fvdl test SCB_DATACNT[3], SG_LAST_SEG jz . + 2; 1362 1.1 fvdl or REG_ISR, LAST_SEG; 1363 1.1 fvdl mov SG_CACHE_PRE, REG_ISR; 1364 1.1 fvdl mvi DFCNTRL, (PRELOADEN|SCSIEN|HDMAEN); 1365 1.1 fvdl /* 1366 1.1 fvdl * Since we've are entering a data phase, we will 1367 1.1 fvdl * rely on the SCB_RESID* fields. Initialize the 1368 1.1 fvdl * residual and clear the full residual flag. 1369 1.1 fvdl */ 1370 1.1 fvdl and SCB_SGPTR[0], ~SG_FULL_RESID; 1371 1.1 fvdl bmov SCB_RESIDUAL_DATACNT[3], SCB_DATACNT[3], 5; 1372 1.1 fvdl /* If we need more S/G elements, tell the idle loop */ 1373 1.1 fvdl test SCB_RESIDUAL_DATACNT[3], SG_LAST_SEG jnz . + 2; 1374 1.1 fvdl mvi SG_STATE, LOADING_NEEDED ret; 1375 1.1 fvdl clr SG_STATE ret; 1376 1.1 fvdl 1377 1.1 fvdl p_data_handle_xfer: 1378 1.4 thorpej call setjmp; 1379 1.1 fvdl test SG_STATE, LOADING_NEEDED jnz service_fifo; 1380 1.1 fvdl p_data_clear_handler: 1381 1.1 fvdl or LONGJMP_ADDR[1], INVALID_ADDR ret; 1382 1.1 fvdl 1383 1.1 fvdl p_data: 1384 1.1 fvdl test SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT jz p_data_allowed; 1385 1.1 fvdl SET_SEQINTCODE(PROTO_VIOLATION) 1386 1.1 fvdl p_data_allowed: 1387 1.9 perry 1388 1.1 fvdl test SEQ_FLAGS, DPHASE jz data_phase_initialize; 1389 1.1 fvdl 1390 1.1 fvdl /* 1391 1.1 fvdl * If we re-enter the data phase after going through another 1392 1.1 fvdl * phase, our transfer location has almost certainly been 1393 1.1 fvdl * corrupted by the interveining, non-data, transfers. Ask 1394 1.1 fvdl * the host driver to fix us up based on the transfer residual 1395 1.1 fvdl * unless we already know that we should be bitbucketing. 1396 1.1 fvdl */ 1397 1.1 fvdl test SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jnz p_data_bitbucket; 1398 1.1 fvdl SET_SEQINTCODE(PDATA_REINIT) 1399 1.1 fvdl jmp data_phase_inbounds; 1400 1.1 fvdl 1401 1.1 fvdl p_data_bitbucket: 1402 1.1 fvdl /* 1403 1.1 fvdl * Turn on `Bit Bucket' mode, wait until the target takes 1404 1.1 fvdl * us to another phase, and then notify the host. 1405 1.1 fvdl */ 1406 1.1 fvdl mov SAVED_MODE, MODE_PTR; 1407 1.1 fvdl test MODE_PTR, ~(MK_MODE(M_DFF1, M_DFF1)) 1408 1.1 fvdl jnz bitbucket_not_m_dff; 1409 1.1 fvdl /* 1410 1.1 fvdl * Ensure that any FIFO contents are cleared out and the 1411 1.1 fvdl * FIFO free'd prior to starting the BITBUCKET. BITBUCKET 1412 1.1 fvdl * doesn't discard data already in the FIFO. 1413 1.1 fvdl */ 1414 1.1 fvdl mvi DFFSXFRCTL, RSTCHN|CLRSHCNT; 1415 1.1 fvdl SET_MODE(M_SCSI, M_SCSI) 1416 1.1 fvdl bitbucket_not_m_dff: 1417 1.1 fvdl or SXFRCTL1,BITBUCKET; 1418 1.1 fvdl /* Wait for non-data phase. */ 1419 1.1 fvdl test SCSIPHASE, ~DATA_PHASE_MASK jz .; 1420 1.1 fvdl and SXFRCTL1, ~BITBUCKET; 1421 1.1 fvdl RESTORE_MODE(SAVED_MODE) 1422 1.1 fvdl SET_SRC_MODE M_DFF1; 1423 1.1 fvdl SET_DST_MODE M_DFF1; 1424 1.1 fvdl SET_SEQINTCODE(DATA_OVERRUN) 1425 1.1 fvdl jmp ITloop; 1426 1.1 fvdl 1427 1.1 fvdl data_phase_initialize: 1428 1.1 fvdl test SCB_SGPTR[0], SG_LIST_NULL jnz p_data_bitbucket; 1429 1.1 fvdl call load_first_seg; 1430 1.1 fvdl data_phase_inbounds: 1431 1.1 fvdl /* We have seen a data phase at least once. */ 1432 1.1 fvdl or SEQ_FLAGS, DPHASE; 1433 1.1 fvdl mov SAVED_MODE, MODE_PTR; 1434 1.1 fvdl test SG_STATE, LOADING_NEEDED jz data_group_dma_loop; 1435 1.1 fvdl call p_data_handle_xfer; 1436 1.1 fvdl data_group_dma_loop: 1437 1.1 fvdl /* 1438 1.1 fvdl * The transfer is complete if either the last segment 1439 1.1 fvdl * completes or the target changes phase. Both conditions 1440 1.1 fvdl * will clear SCSIEN. 1441 1.1 fvdl */ 1442 1.1 fvdl call idle_loop_service_fifos; 1443 1.1 fvdl call idle_loop_cchan; 1444 1.1 fvdl call idle_loop_gsfifo; 1445 1.1 fvdl RESTORE_MODE(SAVED_MODE) 1446 1.1 fvdl test DFCNTRL, SCSIEN jnz data_group_dma_loop; 1447 1.1 fvdl 1448 1.1 fvdl data_group_dmafinish: 1449 1.1 fvdl /* 1450 1.1 fvdl * The transfer has terminated either due to a phase 1451 1.1 fvdl * change, and/or the completion of the last segment. 1452 1.1 fvdl * We have two goals here. Do as much other work 1453 1.1 fvdl * as possible while the data fifo drains on a read 1454 1.1 fvdl * and respond as quickly as possible to the standard 1455 1.1 fvdl * messages (save data pointers/disconnect and command 1456 1.1 fvdl * complete) that usually follow a data phase. 1457 1.1 fvdl */ 1458 1.1 fvdl call calc_residual; 1459 1.1 fvdl 1460 1.1 fvdl /* 1461 1.1 fvdl * Go ahead and shut down the DMA engine now. 1462 1.1 fvdl */ 1463 1.1 fvdl test DFCNTRL, DIRECTION jnz data_phase_finish; 1464 1.1 fvdl data_group_fifoflush: 1465 1.1 fvdl if ((ahd->bugs & AHD_AUTOFLUSH_BUG) != 0) { 1466 1.1 fvdl or DFCNTRL, FIFOFLUSH; 1467 1.1 fvdl } 1468 1.1 fvdl /* 1469 1.1 fvdl * We have enabled the auto-ack feature. This means 1470 1.1 fvdl * that the controller may have already transferred 1471 1.1 fvdl * some overrun bytes into the data FIFO and acked them 1472 1.1 fvdl * on the bus. The only way to detect this situation is 1473 1.1 fvdl * to wait for LAST_SEG_DONE to come true on a completed 1474 1.1 fvdl * transfer and then test to see if the data FIFO is 1475 1.1 fvdl * non-empty. We know there is more data yet to transfer 1476 1.1 fvdl * if SG_LIST_NULL is not yet set, thus there cannot be 1477 1.1 fvdl * an overrun. 1478 1.1 fvdl */ 1479 1.1 fvdl test SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jz data_phase_finish; 1480 1.1 fvdl test SG_CACHE_SHADOW, LAST_SEG_DONE jz .; 1481 1.1 fvdl test DFSTATUS, FIFOEMP jnz data_phase_finish; 1482 1.1 fvdl /* Overrun */ 1483 1.1 fvdl jmp p_data; 1484 1.1 fvdl data_phase_finish: 1485 1.1 fvdl /* 1486 1.1 fvdl * If the target has left us in data phase, loop through 1487 1.2 wiz * the DMA code again. We will only loop if there is a 1488 1.9 perry * data overrun. 1489 1.1 fvdl */ 1490 1.1 fvdl if ((ahd->flags & AHD_TARGETROLE) != 0) { 1491 1.1 fvdl test SSTAT0, TARGET jnz data_phase_done; 1492 1.1 fvdl } 1493 1.1 fvdl if ((ahd->flags & AHD_INITIATORROLE) != 0) { 1494 1.1 fvdl test SSTAT1, REQINIT jz .; 1495 1.1 fvdl test SCSIPHASE, DATA_PHASE_MASK jnz p_data; 1496 1.1 fvdl } 1497 1.1 fvdl 1498 1.1 fvdl data_phase_done: 1499 1.1 fvdl /* Kill off any pending prefetch */ 1500 1.1 fvdl call disable_ccsgen; 1501 1.1 fvdl or LONGJMP_ADDR[1], INVALID_ADDR; 1502 1.1 fvdl 1503 1.1 fvdl if ((ahd->flags & AHD_TARGETROLE) != 0) { 1504 1.1 fvdl test SEQ_FLAGS, DPHASE_PENDING jz ITloop; 1505 1.1 fvdl /* 1506 1.1 fvdl and SEQ_FLAGS, ~DPHASE_PENDING; 1507 1.1 fvdl * For data-in phases, wait for any pending acks from the 1508 1.1 fvdl * initiator before changing phase. We only need to 1509 1.1 fvdl * send Ignore Wide Residue messages for data-in phases. 1510 1.1 fvdl test DFCNTRL, DIRECTION jz target_ITloop; 1511 1.1 fvdl test SSTAT1, REQINIT jnz .; 1512 1.6 thorpej test SCB_TASK_ATTRIBUTE, SCB_XFERLEN_ODD jz target_ITloop; 1513 1.1 fvdl SET_MODE(M_SCSI, M_SCSI) 1514 1.1 fvdl test NEGCONOPTS, WIDEXFER jz target_ITloop; 1515 1.1 fvdl */ 1516 1.1 fvdl /* 1517 1.1 fvdl * Issue an Ignore Wide Residue Message. 1518 1.1 fvdl mvi P_MESGIN|BSYO call change_phase; 1519 1.1 fvdl mvi MSG_IGN_WIDE_RESIDUE call target_outb; 1520 1.1 fvdl mvi 1 call target_outb; 1521 1.1 fvdl jmp target_ITloop; 1522 1.1 fvdl */ 1523 1.1 fvdl } else { 1524 1.1 fvdl jmp ITloop; 1525 1.1 fvdl } 1526 1.1 fvdl 1527 1.1 fvdl /* 1528 1.1 fvdl * We assume that, even though data may still be 1529 1.1 fvdl * transferring to the host, that the SCSI side of 1530 1.1 fvdl * the DMA engine is now in a static state. This 1531 1.1 fvdl * allows us to update our notion of where we are 1532 1.1 fvdl * in this transfer. 1533 1.1 fvdl * 1534 1.1 fvdl * If, by chance, we stopped before being able 1535 1.1 fvdl * to fetch additional segments for this transfer, 1536 1.1 fvdl * yet the last S/G was completely exhausted, 1537 1.1 fvdl * call our idle loop until it is able to load 1538 1.1 fvdl * another segment. This will allow us to immediately 1539 1.1 fvdl * pickup on the next segment on the next data phase. 1540 1.1 fvdl * 1541 1.1 fvdl * If we happened to stop on the last segment, then 1542 1.1 fvdl * our residual information is still correct from 1543 1.1 fvdl * the idle loop and there is no need to perform 1544 1.1 fvdl * any fixups. 1545 1.1 fvdl */ 1546 1.1 fvdl residual_before_last_seg: 1547 1.1 fvdl test MDFFSTAT, SHVALID jnz sgptr_fixup; 1548 1.1 fvdl /* 1549 1.1 fvdl * Can never happen from an interrupt as the packetized 1550 1.1 fvdl * hardware will only interrupt us once SHVALID or 1551 1.1 fvdl * LAST_SEG_DONE. 1552 1.1 fvdl */ 1553 1.1 fvdl call idle_loop_service_fifos; 1554 1.1 fvdl RESTORE_MODE(SAVED_MODE) 1555 1.1 fvdl /* FALLTHROUGH */ 1556 1.1 fvdl calc_residual: 1557 1.1 fvdl test SG_CACHE_SHADOW, LAST_SEG jz residual_before_last_seg; 1558 1.1 fvdl /* Record if we've consumed all S/G entries */ 1559 1.1 fvdl test MDFFSTAT, SHVALID jz . + 2; 1560 1.1 fvdl bmov SCB_RESIDUAL_DATACNT, SHCNT, 3 ret; 1561 1.1 fvdl or SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL ret; 1562 1.1 fvdl 1563 1.1 fvdl sgptr_fixup: 1564 1.1 fvdl /* 1565 1.1 fvdl * Fixup the residual next S/G pointer. The S/G preload 1566 1.1 fvdl * feature of the chip allows us to load two elements 1567 1.1 fvdl * in addition to the currently active element. We 1568 1.1 fvdl * store the bottom byte of the next S/G pointer in 1569 1.1 fvdl * the SG_CACHE_PTR register so we can restore the 1570 1.1 fvdl * correct value when the DMA completes. If the next 1571 1.1 fvdl * sg ptr value has advanced to the point where higher 1572 1.1 fvdl * bytes in the address have been affected, fix them 1573 1.1 fvdl * too. 1574 1.1 fvdl */ 1575 1.1 fvdl test SG_CACHE_SHADOW, 0x80 jz sgptr_fixup_done; 1576 1.1 fvdl test SCB_RESIDUAL_SGPTR[0], 0x80 jnz sgptr_fixup_done; 1577 1.1 fvdl add SCB_RESIDUAL_SGPTR[1], -1; 1578 1.9 perry adc SCB_RESIDUAL_SGPTR[2], -1; 1579 1.1 fvdl adc SCB_RESIDUAL_SGPTR[3], -1; 1580 1.1 fvdl sgptr_fixup_done: 1581 1.1 fvdl and SCB_RESIDUAL_SGPTR[0], SG_ADDR_MASK, SG_CACHE_SHADOW; 1582 1.1 fvdl clr SCB_RESIDUAL_DATACNT[3]; /* We are not the last seg */ 1583 1.1 fvdl bmov SCB_RESIDUAL_DATACNT, SHCNT, 3 ret; 1584 1.1 fvdl 1585 1.1 fvdl export timer_isr: 1586 1.1 fvdl call issue_cmdcmplt; 1587 1.1 fvdl mvi CLRSEQINTSTAT, CLRSEQ_SWTMRTO; 1588 1.1 fvdl if ((ahd->bugs & AHD_SET_MODE_BUG) != 0) { 1589 1.1 fvdl /* 1590 1.1 fvdl * In H2A4, the mode pointer is not saved 1591 1.1 fvdl * for intvec2, but is restored on iret. 1592 1.1 fvdl * This can lead to the restoration of a 1593 1.1 fvdl * bogus mode ptr. Manually clear the 1594 1.1 fvdl * intmask bits and do a normal return 1595 1.1 fvdl * to compensate. 1596 1.1 fvdl */ 1597 1.1 fvdl and SEQINTCTL, ~(INTMASK2|INTMASK1) ret; 1598 1.1 fvdl } else { 1599 1.1 fvdl or SEQINTCTL, IRET ret; 1600 1.1 fvdl } 1601 1.1 fvdl 1602 1.1 fvdl export seq_isr: 1603 1.1 fvdl if ((ahd->features & AHD_RTI) == 0) { 1604 1.1 fvdl /* 1605 1.1 fvdl * On RevA Silicon, if the target returns us to data-out 1606 1.1 fvdl * after we have already trained for data-out, it is 1607 1.1 fvdl * possible for us to transition the free running clock to 1608 1.1 fvdl * data-valid before the required 100ns P1 setup time (8 P1 1609 1.1 fvdl * assertions in fast-160 mode). This will only happen if 1610 1.1 fvdl * this L-Q is a continuation of a data transfer for which 1611 1.1 fvdl * we have already prefetched data into our FIFO (LQ/Data 1612 1.1 fvdl * followed by LQ/Data for the same write transaction). 1613 1.1 fvdl * This can cause some target implementations to miss the 1614 1.1 fvdl * first few data transfers on the bus. We detect this 1615 1.1 fvdl * situation by noticing that this is the first data transfer 1616 1.1 fvdl * after an LQ (LQIWORKONLQ true), that the data transfer is 1617 1.1 fvdl * a continuation of a transfer already setup in our FIFO 1618 1.1 fvdl * (SAVEPTRS interrupt), and that the transaction is a write 1619 1.1 fvdl * (DIRECTION set in DFCNTRL). The delay is performed by 1620 1.1 fvdl * disabling SCSIEN until we see the first REQ from the 1621 1.1 fvdl * target. 1622 1.9 perry * 1623 1.1 fvdl * First instruction in an ISR cannot be a branch on 1624 1.1 fvdl * Rev A. Snapshot LQISTAT2 so the status is not missed 1625 1.1 fvdl * and deffer the test by one instruction. 1626 1.1 fvdl */ 1627 1.1 fvdl mov REG_ISR, LQISTAT2; 1628 1.4 thorpej test REG_ISR, LQIWORKONLQ jz main_isr; 1629 1.4 thorpej test SEQINTSRC, SAVEPTRS jz main_isr; 1630 1.1 fvdl test LONGJMP_ADDR[1], INVALID_ADDR jz saveptr_active_fifo; 1631 1.1 fvdl /* 1632 1.4 thorpej * Switch to the active FIFO after clearing the snapshot 1633 1.4 thorpej * savepointer in the current FIFO. We do this so that 1634 1.4 thorpej * a pending CTXTDONE or SAVEPTR is visible in the active 1635 1.4 thorpej * FIFO. This status is the only way we can detect if we 1636 1.4 thorpej * have lost the race (e.g. host paused us) and our attepts 1637 1.4 thorpej * to disable the channel occurred after all REQs were 1638 1.4 thorpej * already seen and acked (REQINIT never comes true). 1639 1.1 fvdl */ 1640 1.4 thorpej mvi DFFSXFRCTL, CLRCHN; 1641 1.1 fvdl xor MODE_PTR, MK_MODE(M_DFF1, M_DFF1); 1642 1.4 thorpej test DFCNTRL, DIRECTION jz interrupt_return; 1643 1.1 fvdl and DFCNTRL, ~SCSIEN; 1644 1.4 thorpej snapshot_wait_data_valid: 1645 1.4 thorpej test SEQINTSRC, (CTXTDONE|SAVEPTRS) jnz snapshot_data_valid; 1646 1.4 thorpej test SSTAT1, REQINIT jz snapshot_wait_data_valid; 1647 1.4 thorpej snapshot_data_valid: 1648 1.1 fvdl or DFCNTRL, SCSIEN; 1649 1.4 thorpej or SEQINTCTL, IRET ret; 1650 1.1 fvdl snapshot_saveptr: 1651 1.1 fvdl mvi DFFSXFRCTL, CLRCHN; 1652 1.1 fvdl or SEQINTCTL, IRET ret; 1653 1.4 thorpej main_isr: 1654 1.1 fvdl } 1655 1.1 fvdl test SEQINTSRC, CFG4DATA jnz cfg4data_intr; 1656 1.1 fvdl test SEQINTSRC, CFG4ISTAT jnz cfg4istat_intr; 1657 1.1 fvdl test SEQINTSRC, SAVEPTRS jnz saveptr_intr; 1658 1.1 fvdl test SEQINTSRC, CFG4ICMD jnz cfg4icmd_intr; 1659 1.1 fvdl SET_SEQINTCODE(INVALID_SEQINT) 1660 1.1 fvdl 1661 1.1 fvdl /* 1662 1.1 fvdl * There are two types of save pointers interrupts: 1663 1.1 fvdl * The first is a snapshot save pointers where the current FIFO is not 1664 1.16 andvar * active and contains a snapshot of the current pointer information. 1665 1.1 fvdl * This happens between packets in a stream for a single L_Q. Since we 1666 1.1 fvdl * are not performing a pointer save, we can safely clear the channel 1667 1.1 fvdl * so it can be used for other transactions. On RTI capable controllers, 1668 1.1 fvdl * where snapshots can, and are, disabled, the code to handle this type 1669 1.1 fvdl * of snapshot is not active. 1670 1.1 fvdl * 1671 1.1 fvdl * The second case is a save pointers on an active FIFO which occurs 1672 1.1 fvdl * if the target changes to a new L_Q or busfrees/QASes and the transfer 1673 1.1 fvdl * has a residual. This should occur coincident with a ctxtdone. We 1674 1.1 fvdl * disable the interrupt and allow our active routine to handle the 1675 1.1 fvdl * save. 1676 1.1 fvdl */ 1677 1.1 fvdl saveptr_intr: 1678 1.1 fvdl if ((ahd->features & AHD_RTI) == 0) { 1679 1.1 fvdl test LONGJMP_ADDR[1], INVALID_ADDR jnz snapshot_saveptr; 1680 1.1 fvdl } 1681 1.1 fvdl saveptr_active_fifo: 1682 1.1 fvdl and SEQIMODE, ~ENSAVEPTRS; 1683 1.1 fvdl or SEQINTCTL, IRET ret; 1684 1.1 fvdl 1685 1.1 fvdl cfg4data_intr: 1686 1.4 thorpej test SCB_SGPTR[0], SG_LIST_NULL jnz pkt_handle_overrun_inc_use_count; 1687 1.1 fvdl call load_first_seg; 1688 1.1 fvdl call pkt_handle_xfer; 1689 1.4 thorpej inc SCB_FIFO_USE_COUNT; 1690 1.4 thorpej interrupt_return: 1691 1.1 fvdl or SEQINTCTL, IRET ret; 1692 1.1 fvdl 1693 1.1 fvdl cfg4istat_intr: 1694 1.1 fvdl call freeze_queue; 1695 1.1 fvdl add NONE, -13, SCB_CDB_LEN; 1696 1.1 fvdl jnc cfg4istat_have_sense_addr; 1697 1.1 fvdl test SCB_CDB_LEN, SCB_CDB_LEN_PTR jnz cfg4istat_have_sense_addr; 1698 1.1 fvdl /* 1699 1.1 fvdl * Host sets up address/count and enables transfer. 1700 1.1 fvdl */ 1701 1.1 fvdl SET_SEQINTCODE(CFG4ISTAT_INTR) 1702 1.1 fvdl jmp cfg4istat_setup_handler; 1703 1.1 fvdl cfg4istat_have_sense_addr: 1704 1.1 fvdl bmov HADDR, SCB_SENSE_BUSADDR, 4; 1705 1.1 fvdl mvi HCNT[1], (AHD_SENSE_BUFSIZE >> 8); 1706 1.1 fvdl mvi SG_CACHE_PRE, LAST_SEG; 1707 1.1 fvdl mvi DFCNTRL, PRELOADEN|SCSIEN|HDMAEN; 1708 1.1 fvdl cfg4istat_setup_handler: 1709 1.1 fvdl /* 1710 1.1 fvdl * Status pkt is transferring to host. 1711 1.1 fvdl * Wait in idle loop for transfer to complete. 1712 1.1 fvdl * If a command completed before an attempted 1713 1.1 fvdl * task management function completed, notify the host. 1714 1.1 fvdl */ 1715 1.1 fvdl test SCB_TASK_MANAGEMENT, 0xFF jz cfg4istat_no_taskmgmt_func; 1716 1.1 fvdl SET_SEQINTCODE(TASKMGMT_CMD_CMPLT_OKAY) 1717 1.1 fvdl cfg4istat_no_taskmgmt_func: 1718 1.1 fvdl call pkt_handle_status; 1719 1.1 fvdl or SEQINTCTL, IRET ret; 1720 1.1 fvdl 1721 1.1 fvdl cfg4icmd_intr: 1722 1.1 fvdl /* 1723 1.1 fvdl * In the case of DMAing a CDB from the host, the normal 1724 1.1 fvdl * CDB buffer is formatted with an 8 byte address followed 1725 1.1 fvdl * by a 1 byte count. 1726 1.1 fvdl */ 1727 1.1 fvdl bmov HADDR[0], SCB_HOST_CDB_PTR, 9; 1728 1.1 fvdl mvi SG_CACHE_PRE, LAST_SEG; 1729 1.1 fvdl mvi DFCNTRL, (PRELOADEN|SCSIEN|HDMAEN); 1730 1.1 fvdl call pkt_handle_cdb; 1731 1.1 fvdl or SEQINTCTL, IRET ret; 1732 1.1 fvdl 1733 1.1 fvdl /* 1734 1.1 fvdl * See if the target has gone on in this context creating an 1735 1.1 fvdl * overrun condition. For the write case, the hardware cannot 1736 1.1 fvdl * ack bytes until data are provided. So, if the target begins 1737 1.1 fvdl * another packet without changing contexts, implying we are 1738 1.1 fvdl * not sitting on a packet boundary, we are in an overrun 1739 1.1 fvdl * situation. For the read case, the hardware will continue to 1740 1.1 fvdl * ack bytes into the FIFO, and may even ack the last overrun packet 1741 1.1 fvdl * into the FIFO. If the FIFO should become non-empty, we are in 1742 1.1 fvdl * a read overrun case. 1743 1.1 fvdl */ 1744 1.1 fvdl #define check_overrun \ 1745 1.1 fvdl /* Not on a packet boundary. */ \ 1746 1.1 fvdl test MDFFSTAT, DLZERO jz pkt_handle_overrun; \ 1747 1.1 fvdl test DFSTATUS, FIFOEMP jz pkt_handle_overrun 1748 1.1 fvdl 1749 1.1 fvdl pkt_handle_xfer: 1750 1.1 fvdl test SG_STATE, LOADING_NEEDED jz pkt_last_seg; 1751 1.1 fvdl call setjmp; 1752 1.1 fvdl test SEQINTSRC, SAVEPTRS jnz pkt_saveptrs; 1753 1.1 fvdl test SCSIPHASE, ~DATA_PHASE_MASK jz . + 2; 1754 1.1 fvdl test SCSISIGO, ATNO jnz . + 2; 1755 1.1 fvdl test SSTAT2, NONPACKREQ jz pkt_service_fifo; 1756 1.1 fvdl /* 1757 1.1 fvdl * Defer handling of this NONPACKREQ until we 1758 1.1 fvdl * can be sure it pertains to this FIFO. SAVEPTRS 1759 1.1 fvdl * will not be asserted if the NONPACKREQ is for us, 1760 1.16 andvar * so we must simulate it if shadow is valid. If 1761 1.16 andvar * shadow is not valid, keep running this FIFO until we 1762 1.1 fvdl * have satisfied the transfer by loading segments and 1763 1.16 andvar * waiting for either shadow valid or last_seg_done. 1764 1.1 fvdl */ 1765 1.1 fvdl test MDFFSTAT, SHVALID jnz pkt_saveptrs; 1766 1.1 fvdl pkt_service_fifo: 1767 1.1 fvdl test SG_STATE, LOADING_NEEDED jnz service_fifo; 1768 1.1 fvdl pkt_last_seg: 1769 1.1 fvdl call setjmp; 1770 1.1 fvdl test SEQINTSRC, SAVEPTRS jnz pkt_saveptrs; 1771 1.4 thorpej test SG_CACHE_SHADOW, LAST_SEG_DONE jnz pkt_last_seg_done; 1772 1.1 fvdl test SCSIPHASE, ~DATA_PHASE_MASK jz . + 2; 1773 1.1 fvdl test SCSISIGO, ATNO jnz . + 2; 1774 1.1 fvdl test SSTAT2, NONPACKREQ jz return; 1775 1.1 fvdl test MDFFSTAT, SHVALID jz return; 1776 1.1 fvdl /* FALLTHROUGH */ 1777 1.1 fvdl 1778 1.1 fvdl /* 1779 1.1 fvdl * Either a SAVEPTRS interrupt condition is pending for this FIFO 1780 1.4 thorpej * or we have a pending NONPACKREQ for this FIFO. We differentiate 1781 1.1 fvdl * between the two by capturing the state of the SAVEPTRS interrupt 1782 1.1 fvdl * prior to clearing this status and executing the common code for 1783 1.1 fvdl * these two cases. 1784 1.1 fvdl */ 1785 1.1 fvdl pkt_saveptrs: 1786 1.1 fvdl BEGIN_CRITICAL; 1787 1.1 fvdl if ((ahd->bugs & AHD_AUTOFLUSH_BUG) != 0) { 1788 1.1 fvdl or DFCNTRL, FIFOFLUSH; 1789 1.1 fvdl } 1790 1.1 fvdl mov REG0, SEQINTSRC; 1791 1.1 fvdl call calc_residual; 1792 1.1 fvdl call save_pointers; 1793 1.1 fvdl mvi CLRSEQINTSRC, CLRSAVEPTRS; 1794 1.1 fvdl call disable_ccsgen; 1795 1.1 fvdl or SEQIMODE, ENSAVEPTRS; 1796 1.1 fvdl test DFCNTRL, DIRECTION jnz pkt_saveptrs_check_status; 1797 1.1 fvdl test DFSTATUS, FIFOEMP jnz pkt_saveptrs_check_status; 1798 1.1 fvdl /* 1799 1.1 fvdl * Keep a handler around for this FIFO until it drains 1800 1.1 fvdl * to the host to guarantee that we don't complete the 1801 1.1 fvdl * command to the host before the data arrives. 1802 1.1 fvdl */ 1803 1.1 fvdl pkt_saveptrs_wait_fifoemp: 1804 1.1 fvdl call setjmp; 1805 1.1 fvdl test DFSTATUS, FIFOEMP jz return; 1806 1.1 fvdl pkt_saveptrs_check_status: 1807 1.1 fvdl or LONGJMP_ADDR[1], INVALID_ADDR; 1808 1.1 fvdl test REG0, SAVEPTRS jz unexpected_nonpkt_phase; 1809 1.4 thorpej dec SCB_FIFO_USE_COUNT; 1810 1.4 thorpej test SCB_CONTROL, STATUS_RCVD jnz pkt_complete_scb_if_fifos_idle; 1811 1.1 fvdl mvi DFFSXFRCTL, CLRCHN ret; 1812 1.1 fvdl END_CRITICAL; 1813 1.1 fvdl 1814 1.4 thorpej /* 1815 1.4 thorpej * LAST_SEG_DONE status has been seen in the current FIFO. 1816 1.4 thorpej * This indicates that all of the allowed data for this 1817 1.4 thorpej * command has transferred across the SCSI and host buses. 1818 1.4 thorpej * Check for overrun and see if we can complete this command. 1819 1.4 thorpej */ 1820 1.4 thorpej pkt_last_seg_done: 1821 1.1 fvdl BEGIN_CRITICAL; 1822 1.4 thorpej /* 1823 1.4 thorpej * Mark transfer as completed. 1824 1.4 thorpej */ 1825 1.1 fvdl or SCB_SGPTR, SG_LIST_NULL; 1826 1.4 thorpej 1827 1.1 fvdl /* 1828 1.4 thorpej * Wait for the current context to finish to verify that 1829 1.4 thorpej * no overrun condition has occurred. 1830 1.1 fvdl */ 1831 1.4 thorpej test SEQINTSRC, CTXTDONE jnz pkt_ctxt_done; 1832 1.1 fvdl call setjmp; 1833 1.4 thorpej pkt_wait_ctxt_done_loop: 1834 1.4 thorpej test SEQINTSRC, CTXTDONE jnz pkt_ctxt_done; 1835 1.4 thorpej /* 1836 1.4 thorpej * A sufficiently large overrun or a NONPACKREQ may 1837 1.4 thorpej * prevent CTXTDONE from ever asserting, so we must 1838 1.4 thorpej * poll for these statuses too. 1839 1.4 thorpej */ 1840 1.1 fvdl check_overrun; 1841 1.1 fvdl test SSTAT2, NONPACKREQ jz return; 1842 1.1 fvdl test SEQINTSRC, CTXTDONE jz unexpected_nonpkt_phase; 1843 1.4 thorpej /* FALLTHROUGH */ 1844 1.4 thorpej 1845 1.4 thorpej pkt_ctxt_done: 1846 1.1 fvdl check_overrun; 1847 1.1 fvdl or LONGJMP_ADDR[1], INVALID_ADDR; 1848 1.4 thorpej /* 1849 1.4 thorpej * If status has been received, it is safe to skip 1850 1.4 thorpej * the check to see if another FIFO is active because 1851 1.4 thorpej * LAST_SEG_DONE has been observed. However, we check 1852 1.4 thorpej * the FIFO anyway since it costs us only one extra 1853 1.4 thorpej * instruction to leverage common code to perform the 1854 1.4 thorpej * SCB completion. 1855 1.4 thorpej */ 1856 1.4 thorpej dec SCB_FIFO_USE_COUNT; 1857 1.4 thorpej test SCB_CONTROL, STATUS_RCVD jnz pkt_complete_scb_if_fifos_idle; 1858 1.1 fvdl mvi DFFSXFRCTL, CLRCHN ret; 1859 1.1 fvdl END_CRITICAL; 1860 1.1 fvdl 1861 1.1 fvdl /* 1862 1.4 thorpej * Must wait until CDB xfer is over before issuing the 1863 1.4 thorpej * clear channel. 1864 1.4 thorpej */ 1865 1.4 thorpej pkt_handle_cdb: 1866 1.4 thorpej call setjmp; 1867 1.4 thorpej test SG_CACHE_SHADOW, LAST_SEG_DONE jz return; 1868 1.4 thorpej or LONGJMP_ADDR[1], INVALID_ADDR; 1869 1.4 thorpej mvi DFFSXFRCTL, CLRCHN ret; 1870 1.4 thorpej 1871 1.4 thorpej /* 1872 1.1 fvdl * Watch over the status transfer. Our host sense buffer is 1873 1.1 fvdl * large enough to take the maximum allowed status packet. 1874 1.1 fvdl * None-the-less, we must still catch and report overruns to 1875 1.4 thorpej * the host. Additionally, properly catch unexpected non-packet 1876 1.4 thorpej * phases that are typically caused by CRC errors in status packet 1877 1.4 thorpej * transmission. 1878 1.1 fvdl */ 1879 1.1 fvdl pkt_handle_status: 1880 1.4 thorpej call setjmp; 1881 1.4 thorpej test SG_CACHE_SHADOW, LAST_SEG_DONE jnz pkt_status_check_overrun; 1882 1.4 thorpej test SEQINTSRC, CTXTDONE jz pkt_status_check_nonpackreq; 1883 1.4 thorpej test SG_CACHE_SHADOW, LAST_SEG_DONE jnz pkt_status_check_overrun; 1884 1.4 thorpej pkt_status_IU_done: 1885 1.1 fvdl if ((ahd->bugs & AHD_AUTOFLUSH_BUG) != 0) { 1886 1.1 fvdl or DFCNTRL, FIFOFLUSH; 1887 1.1 fvdl } 1888 1.4 thorpej test DFSTATUS, FIFOEMP jz return; 1889 1.4 thorpej BEGIN_CRITICAL; 1890 1.1 fvdl or LONGJMP_ADDR[1], INVALID_ADDR; 1891 1.1 fvdl mvi SCB_SCSI_STATUS, STATUS_PKT_SENSE; 1892 1.1 fvdl or SCB_CONTROL, STATUS_RCVD; 1893 1.4 thorpej jmp pkt_complete_scb_if_fifos_idle; 1894 1.1 fvdl END_CRITICAL; 1895 1.4 thorpej pkt_status_check_overrun: 1896 1.1 fvdl /* 1897 1.16 andvar * Status PKT overruns are unceremoniously recovered with a 1898 1.4 thorpej * bus reset. If we've overrun, let the host know so that 1899 1.4 thorpej * recovery can be performed. 1900 1.4 thorpej * 1901 1.4 thorpej * LAST_SEG_DONE has been observed. If either CTXTDONE or 1902 1.4 thorpej * a NONPACKREQ phase change have occurred and the FIFO is 1903 1.4 thorpej * empty, there is no overrun. 1904 1.4 thorpej */ 1905 1.4 thorpej test DFSTATUS, FIFOEMP jz pkt_status_report_overrun; 1906 1.4 thorpej test SEQINTSRC, CTXTDONE jz . + 2; 1907 1.4 thorpej test DFSTATUS, FIFOEMP jnz pkt_status_IU_done; 1908 1.4 thorpej test SCSIPHASE, ~DATA_PHASE_MASK jz return; 1909 1.4 thorpej test DFSTATUS, FIFOEMP jnz pkt_status_check_nonpackreq; 1910 1.4 thorpej pkt_status_report_overrun: 1911 1.1 fvdl SET_SEQINTCODE(STATUS_OVERRUN) 1912 1.4 thorpej /* SEQUENCER RESTARTED */ 1913 1.4 thorpej pkt_status_check_nonpackreq: 1914 1.4 thorpej /* 1915 1.4 thorpej * CTXTDONE may be held off if a NONPACKREQ is associated with 1916 1.4 thorpej * the current context. If a NONPACKREQ is observed, decide 1917 1.4 thorpej * if it is for the current context. If it is for the current 1918 1.4 thorpej * context, we must defer NONPACKREQ processing until all data 1919 1.4 thorpej * has transferred to the host. 1920 1.4 thorpej */ 1921 1.4 thorpej test SCSIPHASE, ~DATA_PHASE_MASK jz return; 1922 1.4 thorpej test SCSISIGO, ATNO jnz . + 2; 1923 1.4 thorpej test SSTAT2, NONPACKREQ jz return; 1924 1.4 thorpej test SEQINTSRC, CTXTDONE jnz pkt_status_IU_done; 1925 1.4 thorpej test DFSTATUS, FIFOEMP jz return; 1926 1.4 thorpej /* 1927 1.4 thorpej * The unexpected nonpkt phase handler assumes that any 1928 1.4 thorpej * data channel use will have a FIFO reference count. It 1929 1.15 andvar * turns out that the status handler doesn't need a reference 1930 1.4 thorpej * count since the status received flag, and thus completion 1931 1.4 thorpej * processing, cannot be set until the handler is finished. 1932 1.4 thorpej * We increment the count here to make the nonpkt handler 1933 1.4 thorpej * happy. 1934 1.4 thorpej */ 1935 1.4 thorpej inc SCB_FIFO_USE_COUNT; 1936 1.4 thorpej /* FALLTHROUGH */ 1937 1.1 fvdl 1938 1.1 fvdl /* 1939 1.1 fvdl * Nonpackreq is a polled status. It can come true in three situations: 1940 1.1 fvdl * we have received an L_Q, we have sent one or more L_Qs, or there is no 1941 1.1 fvdl * L_Q context associated with this REQ (REQ occurs immediately after a 1942 1.1 fvdl * (re)selection). Routines that know that the context responsible for this 1943 1.1 fvdl * nonpackreq call directly into unexpected_nonpkt_phase. In the case of the 1944 1.1 fvdl * top level idle loop, we exhaust all active contexts prior to determining that 1945 1.1 fvdl * we simply do not have the full I_T_L_Q for this phase. 1946 1.1 fvdl */ 1947 1.1 fvdl unexpected_nonpkt_phase_find_ctxt: 1948 1.1 fvdl /* 1949 1.1 fvdl * This nonpackreq is most likely associated with one of the tags 1950 1.1 fvdl * in a FIFO or an outgoing LQ. Only treat it as an I_T only 1951 1.1 fvdl * nonpackreq if we've cleared out the FIFOs and handled any 1952 1.1 fvdl * pending SELDO. 1953 1.1 fvdl */ 1954 1.1 fvdl SET_SRC_MODE M_SCSI; 1955 1.1 fvdl SET_DST_MODE M_SCSI; 1956 1.1 fvdl and A, FIFO1FREE|FIFO0FREE, DFFSTAT; 1957 1.1 fvdl cmp A, FIFO1FREE|FIFO0FREE jne return; 1958 1.1 fvdl test SSTAT0, SELDO jnz return; 1959 1.1 fvdl mvi SCBPTR[1], SCB_LIST_NULL; 1960 1.1 fvdl unexpected_nonpkt_phase: 1961 1.7 thorpej test MODE_PTR, ~(MK_MODE(M_DFF1, M_DFF1)) 1962 1.7 thorpej jnz unexpected_nonpkt_mode_cleared; 1963 1.1 fvdl SET_SRC_MODE M_DFF0; 1964 1.1 fvdl SET_DST_MODE M_DFF0; 1965 1.1 fvdl or LONGJMP_ADDR[1], INVALID_ADDR; 1966 1.4 thorpej dec SCB_FIFO_USE_COUNT; 1967 1.1 fvdl mvi DFFSXFRCTL, CLRCHN; 1968 1.7 thorpej unexpected_nonpkt_mode_cleared: 1969 1.1 fvdl mvi CLRSINT2, CLRNONPACKREQ; 1970 1.1 fvdl test SCSIPHASE, ~(MSG_IN_PHASE|MSG_OUT_PHASE) jnz illegal_phase; 1971 1.1 fvdl SET_SEQINTCODE(ENTERING_NONPACK) 1972 1.1 fvdl jmp ITloop; 1973 1.1 fvdl 1974 1.1 fvdl illegal_phase: 1975 1.1 fvdl SET_SEQINTCODE(ILLEGAL_PHASE) 1976 1.1 fvdl jmp ITloop; 1977 1.1 fvdl 1978 1.1 fvdl /* 1979 1.1 fvdl * We have entered an overrun situation. If we have working 1980 1.1 fvdl * BITBUCKET, flip that on and let the hardware eat any overrun 1981 1.1 fvdl * data. Otherwise use an overrun buffer in the host to simulate 1982 1.1 fvdl * BITBUCKET. 1983 1.1 fvdl */ 1984 1.4 thorpej pkt_handle_overrun_inc_use_count: 1985 1.4 thorpej inc SCB_FIFO_USE_COUNT; 1986 1.1 fvdl pkt_handle_overrun: 1987 1.1 fvdl SET_SEQINTCODE(CFG4OVERRUN) 1988 1.1 fvdl call freeze_queue; 1989 1.1 fvdl if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) == 0) { 1990 1.1 fvdl or DFFSXFRCTL, DFFBITBUCKET; 1991 1.1 fvdl SET_SRC_MODE M_DFF1; 1992 1.1 fvdl SET_DST_MODE M_DFF1; 1993 1.1 fvdl } else { 1994 1.1 fvdl call load_overrun_buf; 1995 1.1 fvdl mvi DFCNTRL, (HDMAEN|SCSIEN|PRELOADEN); 1996 1.1 fvdl } 1997 1.1 fvdl call setjmp; 1998 1.1 fvdl if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) != 0) { 1999 1.1 fvdl test DFSTATUS, PRELOAD_AVAIL jz overrun_load_done; 2000 1.1 fvdl call load_overrun_buf; 2001 1.1 fvdl or DFCNTRL, PRELOADEN; 2002 1.1 fvdl overrun_load_done: 2003 1.1 fvdl test SEQINTSRC, CTXTDONE jnz pkt_overrun_end; 2004 1.1 fvdl } else { 2005 1.1 fvdl test DFFSXFRCTL, DFFBITBUCKET jz pkt_overrun_end; 2006 1.1 fvdl } 2007 1.1 fvdl test SSTAT2, NONPACKREQ jz return; 2008 1.1 fvdl pkt_overrun_end: 2009 1.1 fvdl or SCB_RESIDUAL_SGPTR, SG_OVERRUN_RESID; 2010 1.1 fvdl test SEQINTSRC, CTXTDONE jz unexpected_nonpkt_phase; 2011 1.4 thorpej dec SCB_FIFO_USE_COUNT; 2012 1.1 fvdl or LONGJMP_ADDR[1], INVALID_ADDR; 2013 1.4 thorpej test SCB_CONTROL, STATUS_RCVD jnz pkt_complete_scb_if_fifos_idle; 2014 1.1 fvdl mvi DFFSXFRCTL, CLRCHN ret; 2015 1.1 fvdl 2016 1.1 fvdl if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) != 0) { 2017 1.1 fvdl load_overrun_buf: 2018 1.1 fvdl /* 2019 1.1 fvdl * Load a dummy segment if preload space is available. 2020 1.1 fvdl */ 2021 1.1 fvdl mov HADDR[0], SHARED_DATA_ADDR; 2022 1.1 fvdl add HADDR[1], PKT_OVERRUN_BUFOFFSET, SHARED_DATA_ADDR[1]; 2023 1.1 fvdl mov ACCUM_SAVE, A; 2024 1.1 fvdl clr A; 2025 1.1 fvdl adc HADDR[2], A, SHARED_DATA_ADDR[2]; 2026 1.1 fvdl adc HADDR[3], A, SHARED_DATA_ADDR[3]; 2027 1.1 fvdl mov A, ACCUM_SAVE; 2028 1.1 fvdl bmov HADDR[4], ALLZEROS, 4; 2029 1.1 fvdl /* PKT_OVERRUN_BUFSIZE is a multiple of 256 */ 2030 1.1 fvdl clr HCNT[0]; 2031 1.1 fvdl mvi HCNT[1], ((PKT_OVERRUN_BUFSIZE >> 8) & 0xFF); 2032 1.1 fvdl clr HCNT[2] ret; 2033 1.1 fvdl } 2034