subr.S revision 1.6 1 /* $NetBSD: subr.S,v 1.6 2003/01/19 22:29:24 ragge Exp $ */
2
3 /*
4 * Copyright (c) 1994 Ludd, University of Lule}, Sweden.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed at Ludd, University of Lule}.
18 * 4. The name of the author may not be used to endorse or promote products
19 * derived from this software without specific prior written permission
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <machine/asm.h>
34
35 #include "assym.h"
36 #include "opt_ddb.h"
37 #include "opt_multiprocessor.h"
38 #include "opt_lockdebug.h"
39 #include "opt_compat_netbsd.h"
40 #include "opt_compat_ibcs2.h"
41 #ifdef COMPAT_IBCS2
42 #include <compat/ibcs2/ibcs2_syscall.h>
43 #endif
44 #include "opt_compat_ultrix.h"
45 #ifdef COMPAT_ULTRIX
46 #include <compat/ultrix/ultrix_syscall.h>
47 #endif
48
49 #define JSBENTRY(x) .globl x ; .align 2 ; x :
50
51 .text
52
53 #ifdef KERNEL_LOADABLE_BY_MOP
54 /*
55 * This is a little tricky. The kernel is not loaded at the correct
56 * address, so the kernel must first be relocated, then copied, then
57 * jump back to the correct address.
58 */
59 /* Copy routine */
60 cps:
61 2: movb (%r0)+,(%r1)+
62 cmpl %r0,%r7
63 bneq 2b
64
65 3: clrb (%r1)+
66 incl %r0
67 cmpl %r0,%r6
68 bneq 3b
69 clrl -(%sp)
70 movl %sp,%ap
71 movl $_cca,%r7
72 movl %r8,(%r7)
73 movpsl -(%sp)
74 pushl %r2
75 rei
76 cpe:
77
78 /* Copy the copy routine */
79 1: movab cps,%r0
80 movab cpe,%r1
81 movl $0x300000,%sp
82 movl %sp,%r3
83 4: movb (%r0)+,(%r3)+
84 cmpl %r0,%r1
85 bneq 4b
86 movl %r7,%r8
87 /* Ok, copy routine copied, set registers and rei */
88 movab _edata,%r7
89 movab _end,%r6
90 movl $0x80000000,%r1
91 movl $0x80000200,%r0
92 subl3 $0x200,%r6,%r9
93 movab 2f,%r2
94 subl2 $0x200,%r2
95 movpsl -(%sp)
96 pushab 4(%sp)
97 rei
98
99 /*
100 * First entry routine from boot. This should be in a file called locore.
101 */
102 JSBENTRY(start)
103 brb 1b # Netbooted starts here
104 #else
105 ASENTRY(start, 0)
106 #endif
107 2: bisl3 $0x80000000,%r9,_C_LABEL(esym) # End of loaded code
108 pushl $0x1f0000 # Push a nice PSL
109 pushl $to # Address to jump to
110 rei # change to kernel stack
111 to: movw $0xfff,_C_LABEL(panic) # Save all regs in panic
112 cmpb (%ap),$3 # symbols info present?
113 blssu 3f # nope, skip
114 bisl3 $0x80000000,8(%ap),_C_LABEL(symtab_start)
115 # save start of symtab
116 movl 12(%ap),_C_LABEL(symtab_nsyms) # save number of symtab
117 bisl3 $0x80000000,%r9,_C_LABEL(symtab_end)
118 # save end of symtab
119 3: addl3 _C_LABEL(esym),$0x3ff,%r0 # Round symbol table end
120 bicl3 $0x3ff,%r0,_C_LABEL(proc0paddr) # save proc0 uarea pointer
121 bicl3 $0x80000000,_C_LABEL(proc0paddr),%r0 # get phys proc0 uarea addr
122 mtpr %r0,$PR_PCBB # Save in IPR PCBB
123 addl3 $USPACE,_C_LABEL(proc0paddr),%r0 # Get kernel stack top
124 mtpr %r0,$PR_KSP # put in IPR KSP
125 movl %r0,_C_LABEL(Sysmap) # SPT start addr after KSP
126 movl _C_LABEL(proc0paddr),%r0 # get PCB virtual address
127 movab IFTRAP(%r0),4(%r0) # Save trap address in ESP
128 mtpr 4(%r0),$PR_ESP # Put it in ESP also
129
130 # Set some registers in known state
131 movl _C_LABEL(proc0paddr),%r0
132 clrl P0LR(%r0)
133 clrl P1LR(%r0)
134 mtpr $0,$PR_P0LR
135 mtpr $0,$PR_P1LR
136 movl $0x80000000,%r1
137 movl %r1,P0BR(%r0)
138 movl %r1,P1BR(%r0)
139 mtpr %r1,$PR_P0BR
140 mtpr %r1,$PR_P1BR
141 clrl IFTRAP(%r0)
142 mtpr $0,$PR_SCBB
143
144 # Copy the RPB to its new position
145 #if defined(COMPAT_14)
146 tstl (%ap) # Any arguments?
147 bneq 1f # Yes, called from new boot
148 movl %r11,_C_LABEL(boothowto) # Howto boot (single etc...)
149 # movl %r10,_C_LABEL(bootdev) # uninteresting, will complain
150 movl %r8,_C_LABEL(avail_end) # Usable memory (from VMB)
151 clrl -(%sp) # Have no RPB
152 brb 2f
153 #endif
154
155 1: pushl 4(%ap) # Address of old rpb
156 2: calls $1,_C_LABEL(_start) # Jump away.
157 /* NOTREACHED */
158
159
160 /*
161 * Signal handler code.
162 */
163
164 .align 2
165 .globl _C_LABEL(sigcode),_C_LABEL(upcallcode),_C_LABEL(esigcode)
166 _C_LABEL(sigcode):
167 pushr $0x3f
168 subl2 $0xc,%sp
169 movl 0x24(%sp),%r0
170 calls $3,(%r0)
171 popr $0x3f
172 chmk $SYS___sigreturn14
173 chmk $SYS_exit
174 halt
175
176 /*
177 * Trampoline for SA upcalls. This would be totally unnecessary if we
178 * didn't need to account for the saved registers in the callee.
179 */
180 _C_LABEL(upcallcode):
181 callg (%sp),(%r0)
182 halt
183 _C_LABEL(esigcode):
184
185 #ifdef COMPAT_IBCS2
186 .align 2
187 .globl _C_LABEL(ibcs2_sigcode),_C_LABEL(ibcs2_esigcode)
188 _C_LABEL(ibcs2_sigcode):
189 pushr $0x3f
190 subl2 $0xc,%sp
191 movl 0x24(%sp),%r0
192 calls $3,(%r0)
193 popr $0x3f
194 chmk $SYS___sigreturn14
195 chmk $SYS_exit
196 halt
197 _C_LABEL(ibcs2_esigcode):
198 #endif /* COMPAT_IBCS2 */
199
200 #ifdef COMPAT_ULTRIX
201 .align 2
202 .globl _C_LABEL(ultrix_sigcode),_C_LABEL(ultrix_esigcode)
203 _C_LABEL(ultrix_sigcode):
204 pushr $0x3f
205 subl2 $0xc,%sp
206 movl 0x24(%sp),%r0
207 calls $3,(%r0)
208 popr $0x3f
209 chmk $ULTRIX_SYS_sigreturn
210 chmk $SYS_exit
211 halt
212 _C_LABEL(ultrix_esigcode):
213 #endif
214
215 .align 2
216 .globl _C_LABEL(idsptch), _C_LABEL(eidsptch)
217 _C_LABEL(idsptch): pushr $0x3f
218 .word 0x9f16 # jsb to absolute address
219 .long _C_LABEL(cmn_idsptch) # the absolute address
220 .long 0 # the callback interrupt routine
221 .long 0 # its argument
222 .long 0 # ptr to correspond evcnt struct
223 _C_LABEL(eidsptch):
224
225 _C_LABEL(cmn_idsptch):
226 movl (%sp)+,%r0 # get pointer to idspvec
227 movl 8(%r0),%r1 # get evcnt pointer
228 beql 1f # no ptr, skip increment
229 incl EV_COUNT(%r1) # increment low longword
230 adwc $0,EV_COUNT+4(%r1) # add any carry to hi longword
231 1: incl _C_LABEL(uvmexp)+UVME_INTRS # increment uvmexp.intrs
232 pushl 4(%r0) # push argument
233 calls $1,*(%r0) # call interrupt routine
234 popr $0x3f # pop registers
235 rei # return from interrut
236
237 ENTRY(badaddr,0) # Called with addr,b/w/l
238 mfpr $PR_IPL,%r0 # splhigh()
239 mtpr $IPL_HIGH,$PR_IPL
240 movl 4(%ap),%r2 # First argument, the address
241 movl 8(%ap),%r1 # Sec arg, b,w,l
242 pushl %r0 # Save old IPL
243 clrl %r3
244 movab 4f,_C_LABEL(memtest) # Set the return address
245
246 caseb %r1,$1,$4 # What is the size
247 1: .word 1f-1b
248 .word 2f-1b
249 .word 3f-1b # This is unused
250 .word 3f-1b
251
252 1: movb (%r2),%r1 # Test a byte
253 brb 5f
254
255 2: movw (%r2),%r1 # Test a word
256 brb 5f
257
258 3: movl (%r2),%r1 # Test a long
259 brb 5f
260
261 4: incl %r3 # Got machine chk => addr bad
262 5: mtpr (%sp)+,$PR_IPL
263 movl %r3,%r0
264 ret
265
266 #ifdef DDB
267 /*
268 * DDB is the only routine that uses setjmp/longjmp.
269 */
270 .globl _C_LABEL(setjmp), _C_LABEL(longjmp)
271 _C_LABEL(setjmp):.word 0
272 movl 4(%ap), %r0
273 movl 8(%fp), (%r0)
274 movl 12(%fp), 4(%r0)
275 movl 16(%fp), 8(%r0)
276 moval 28(%fp),12(%r0)
277 clrl %r0
278 ret
279
280 _C_LABEL(longjmp):.word 0
281 movl 4(%ap), %r1
282 movl 8(%ap), %r0
283 movl (%r1), %ap
284 movl 4(%r1), %fp
285 movl 12(%r1), %sp
286 jmp *8(%r1)
287 #endif
288
289 #
290 # setrunqueue/remrunqueue fast variants.
291 #
292
293 JSBENTRY(Setrq)
294 #ifdef DIAGNOSTIC
295 tstl 4(%r0) # Check that process actually are off the queue
296 beql 1f
297 pushab setrq
298 calls $1,_C_LABEL(panic)
299 setrq: .asciz "setrunqueue"
300 #endif
301 1: extzv $2,$6,L_PRIORITY(%r0),%r1 # get priority
302 movaq _C_LABEL(sched_qs)[%r1],%r2 # get address of queue
303 insque (%r0),*PH_RLINK(%r2) # put proc last in queue
304 bbss %r1,_C_LABEL(sched_whichqs),1f # set queue bit.
305 1: rsb
306
307 JSBENTRY(Remrq)
308 extzv $2,$6,L_PRIORITY(%r0),%r1
309 #ifdef DIAGNOSTIC
310 bbs %r1,_C_LABEL(sched_whichqs),1f
311 pushab remrq
312 calls $1,_C_LABEL(panic)
313 remrq: .asciz "remrunqueue"
314 #endif
315 1: remque (%r0),%r2
316 bneq 2f # Not last process on queue
317 bbsc %r1,_C_LABEL(sched_whichqs),2f
318 2: clrl L_BACK(%r0) # saftey belt
319 rsb
320
321 #
322 # Idle loop. Here we could do something fun, maybe, like calculating
323 # pi or something.
324 #
325 idle:
326 #if defined(LOCKDEBUG)
327 calls $0,_C_LABEL(sched_unlock_idle)
328 #elif defined(MULTIPROCESSOR)
329 clrl _C_LABEL(sched_lock) # release sched lock
330 #endif
331 mtpr $1,$PR_IPL # IPL cannot be 0 because we are
332 # running on the interrupt stack
333 # and may get interrupts
334
335 1: tstl _C_LABEL(sched_whichqs) # Anything ready to run?
336 beql 1b # no, run the idle loop again.
337 /* Now try the test the long way */
338 mtpr $IPL_HIGH,$PR_IPL # block all types of interrupts
339 #if defined(LOCKDEBUG)
340 calls $0,_C_LABEL(sched_lock_idle)
341 #elif defined(MULTIPROCESSOR)
342 3: bbssi $0,_C_LABEL(sched_lock),3b # acquire sched lock
343 #endif
344 brb lp # check sched_whichqs again
345
346 #
347 # cpu_switch, cpu_preempt, cpu_exit and the idle loop implemented in
348 # assembler for efficiency. This is called at IPL_HIGH.
349 #
350
351 JSBENTRY(Swtch)
352 mfpr $PR_SSP,%r1 # Get ptr to this cpu_info struct
353 clrl CI_CURLWP(%r1) # Stop process accounting
354 svpctx # Save context if another CPU
355 # get control first (must be on
356 # the interrupt stack when idling)
357
358
359 lp: ffs $0,$32,_C_LABEL(sched_whichqs),%r3 # Search for bit set
360 beql idle # no bit set, go to idle loop
361
362 movaq _C_LABEL(sched_qs)[%r3],%r1 # get address of queue head
363 remque *(%r1),%r2 # remove lwp pointed to by queue head
364 # lwp ptr is now in %r2
365 #ifdef DIAGNOSTIC
366 bvc 1f # check if something on queue
367 pushab noque
368 calls $1,_C_LABEL(panic)
369 #endif
370
371 1: bneq 2f # more processes on queue?
372 bbsc %r3,_C_LABEL(sched_whichqs),2f # no, clear bit in whichqs
373 2: clrl L_BACK(%r2) # clear proc backpointer
374 mfpr $PR_SSP,%r1 # Get ptr to this cpu_info struct
375 /* p->p_cpu initialized in fork1() for single-processor */
376 #if defined(MULTIPROCESSOR)
377 movl %r1,L_CPU(%r2) # l->l_cpu = curcpu();
378 #endif
379 movb $LSONPROC,L_STAT(%r2) # l->l_stat = LSONPROC;
380 movl %r2,CI_CURLWP(%r1) # set new process running
381 clrl CI_WANT_RESCHED(%r1) # we are now changing process
382 movl L_ADDR(%r2),%r0 # Get pointer to new pcb.
383 addl3 %r0,$IFTRAP,%r1 # Save for copy* functions.
384 mtpr %r1,$PR_ESP # Use ESP as CPU-specific pointer
385 movl %r1,ESP(%r0) # Must save in PCB also.
386 mfpr $PR_SSP,%r1 # New process must inherit cpu_info
387 movl %r1,SSP(%r0) # Put it in new PCB
388
389 #
390 # Nice routine to get physical from virtual adresses.
391 #
392 extzv $9,$21,%r0,%r1 # extract offset
393 ashl $9,*_C_LABEL(Sysmap)[%r1],%r3
394
395 clrl PCB_R0(%r0) # Assume switch to same lwp
396 mfpr $PR_PCBB,%r1 # Get old PCB address
397 cmpl %r1,%r3 # The same lwp?
398 beql 1f # Branch if it is
399 movl $1,PCB_R0(%r0) # Otherwise, return 1.
400
401 1: mtpr %r3,$PR_PCBB
402 ldpctx
403 #if defined(LOCKDEBUG)
404 pushl %r0
405 calls $0,_C_LABEL(sched_unlock_idle)
406 movl (%sp)+,%r0
407 #elif defined(MULTIPROCESSOR)
408 clrl _C_LABEL(sched_lock) # clear sched lock
409 #endif
410 rei
411
412 #if defined(MULTIPROCESSOR)
413 .align 2
414 .globl _C_LABEL(tramp) # used to kick off multiprocessor systems.
415 _C_LABEL(tramp):
416 ldpctx
417 rei
418 #endif
419
420 JSBENTRY(Swtchto)
421 mfpr $PR_SSP,%r1 # Get ptr to this cpu_info struct
422 clrl CI_CURLWP(%r1) # Stop process accounting
423 svpctx # Now on interrupt stack
424
425 # New LWP already in %r2
426 mfpr $PR_SSP,%r1 # Get ptr to this cpu_info struct
427 #if defined(MULTIPROCESSOR)
428 movl r1,L_CPU(%r2) # l->l_cpu = curcpu();
429 #endif
430 movb $LSONPROC,L_STAT(%r2) # l->l_stat = LSONPROC;
431 movl %r2,CI_CURLWP(%r1) # set new process running
432 movl L_ADDR(%r2),%r0 # Get pointer to new pcb.
433 addl3 %r0,$IFTRAP,%r3 # Save for copy* functions.
434 mtpr %r3,$PR_ESP # Use ESP as CPU-specific pointer
435 movl %r3,ESP(%r0) # Must save in PCB also.
436 movl %r1,SSP(%r0) # Put it in new PCB
437
438 extzv $9,$21,%r0,%r1 # extract offset
439 ashl $9,*_C_LABEL(Sysmap)[%r1],%r3
440
441 mtpr %r3,$PR_PCBB
442 ldpctx
443 #if defined(LOCKDEBUG)
444 pushl %r0
445 calls $0,_C_LABEL(sched_unlock_idle)
446 movl (%sp)+,%r0
447 #elif defined(MULTIPROCESSOR)
448 clrl _C_LABEL(sched_lock) # clear sched lock
449 #endif
450 rei
451
452 #
453 # the last routine called by a process.
454 #
455
456 ENTRY(cpu_exit,0)
457 movl 4(%ap),%r6 # Process pointer in %r6
458
459 pushl %r6
460 calls $1,_C_LABEL(pmap_deactivate)
461
462 mtpr $IPL_CLOCK,$PR_IPL # Block almost everything
463 mfpr $PR_SSP,%r7 # get cpu_info ptr
464 movl CI_EXIT(%r7),%r8 # scratch page address
465 movab 512(%r8),%sp # change stack
466 bicl2 $0xc0000000,%r8 # get physical address
467 mtpr %r8,$PR_PCBB # new PCB
468 mtpr %r7,$PR_SSP # In case...
469 pushl %r6
470 calls $1,_C_LABEL(exit2) # release last resources.
471 mtpr $IPL_HIGH,$PR_IPL # block all types of interrupts
472 #if defined(LOCKDEBUG)
473 calls $0,_C_LABEL(sched_lock_idle)
474 #elif defined(MULTIPROCESSOR)
475 1: bbssi $0,_C_LABEL(sched_lock),1b # acquire sched lock
476 #endif
477 clrl %r6
478 brw Swtch
479
480 #
481 # copy/fetch/store routines.
482 #
483
484 ENTRY(copyout, 0)
485 movl 8(%ap),%r2
486 blss 3f # kernel space
487 movl 4(%ap),%r1
488 brb 2f
489
490 ENTRY(copyin, 0)
491 movl 4(%ap),%r1
492 blss 3f # kernel space
493 movl 8(%ap),%r2
494 2: mfpr $PR_ESP,%r3
495 movab 1f,(%r3)
496 movc3 12(%ap),(%r1),(%r2)
497 1: mfpr $PR_ESP,%r3
498 clrl (%r3)
499 ret
500
501 3: mnegl $1,%r0
502 ret
503
504 ENTRY(kcopy,0)
505 mfpr $PR_ESP,%r3
506 movl (%r3),-(%sp)
507 movab 1f,(%r3)
508 movl 4(%ap),%r1
509 movl 8(%ap),%r2
510 movc3 12(%ap),(%r1), (%r2)
511 clrl %r1
512 1: mfpr $PR_ESP,%r3
513 movl (%sp)+,(%r3)
514 movl %r1,%r0
515 ret
516
517 /*
518 * copy{in,out}str() copies data from/to user space to/from kernel space.
519 * Security checks:
520 * 1) user space address must be < KERNBASE
521 * 2) the VM system will do the checks while copying
522 */
523 ENTRY(copyinstr, 0)
524 tstl 4(%ap) # kernel address?
525 bgeq 8f # no, continue
526 6: movl $EFAULT,%r0
527 movl 16(%ap),%r2
528 beql 7f
529 clrl (%r2)
530 7: ret
531
532 ENTRY(copyoutstr, 0)
533 tstl 8(%ap) # kernel address?
534 bgeq 8f # no, continue
535 brb 6b # yes, return EFAULT
536
537 ENTRY(copystr,0)
538 8: movl 4(%ap),%r5 # from
539 movl 8(%ap),%r4 # to
540 movl 12(%ap),%r3 # len
541 movl 16(%ap),%r2 # copied
542 clrl %r0
543 mfpr $PR_ESP,%r1
544 movab 3f,(%r1)
545
546 tstl %r3 # any chars to copy?
547 bneq 1f # yes, jump for more
548 0: tstl %r2 # save copied len?
549 beql 2f # no
550 subl3 4(%ap),%r5,(%r2) # save copied len
551 2: ret
552
553 1: movb (%r5)+,(%r4)+ # copy one char
554 beql 0b # jmp if last char
555 sobgtr %r3,1b # copy one more
556 movl $ENAMETOOLONG,%r0 # inform about too long string
557 brb 0b # out of chars
558
559 3: mfpr $PR_ESP,%r1
560 clrl (%r1)
561 brb 0b
562
563 ENTRY(subyte,0)
564 movl 4(%ap),%r0
565 blss 3f # illegal space
566 mfpr $PR_ESP,%r1
567 movab 1f,(%r1)
568 movb 8(%ap),(%r0)
569 clrl %r1
570 1: mfpr $PR_ESP,%r2
571 clrl (%r2)
572 movl %r1,%r0
573 ret
574
575 ENTRY(suword,0)
576 movl 4(%ap),%r0
577 blss 3f # illegal space
578 mfpr $PR_ESP,%r1
579 movab 1f,(%r1)
580 movl 8(%ap),(%r0)
581 clrl %r1
582 1: mfpr $PR_ESP,%r2
583 clrl (%r2)
584 movl %r1,%r0
585 ret
586
587 ENTRY(suswintr,0)
588 movl 4(%ap),%r0
589 blss 3f # illegal space
590 mfpr $PR_ESP,%r1
591 movab 1f,(%r1)
592 movw 8(%ap),(%r0)
593 clrl %r1
594 1: mfpr $PR_ESP,%r2
595 clrl (%r2)
596 movl %r1,%r0
597 ret
598
599 3: mnegl $1,%r0
600 ret
601
602 .align 2
603 ALTENTRY(fusword)
604 ENTRY(fuswintr,0)
605 movl 4(%ap),%r0
606 blss 3b
607 mfpr $PR_ESP,%r1
608 movab 1f,(%r1)
609 movzwl (%r0),%r1
610 1: mfpr $PR_ESP,%r2
611 clrl (%r2)
612 movl %r1,%r0
613 ret
614
615 #if defined(MULTIPROCESSOR)
616
617 JSBENTRY(Slock)
618 1: bbssi $0,(%r1),1b
619 rsb
620
621 JSBENTRY(Slocktry)
622 clrl %r0
623 bbssi $0,(%r1),1f
624 incl %r0
625 1: rsb
626
627 JSBENTRY(Sunlock)
628 bbcci $0,(%r1),1f
629 1: rsb
630
631 #endif
632
633 #
634 # data department
635 #
636 .data
637
638 .globl _C_LABEL(memtest)
639 _C_LABEL(memtest): # memory test in progress
640 .long 0
641
642 #ifdef __ELF__
643 .section .rodata
644 #endif
645 noque: .asciz "swtch"
646