cpuswitch.S revision 1.18 1 /* $NetBSD: cpuswitch.S,v 1.18 2002/08/31 03:07:32 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1994-1998 Mark Brinicombe.
5 * Copyright (c) 1994 Brini.
6 * All rights reserved.
7 *
8 * This code is derived from software written for Brini by Mark Brinicombe
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by Brini.
21 * 4. The name of the company nor the name of the author may be used to
22 * endorse or promote products derived from this software without specific
23 * prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
28 * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
29 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
30 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
31 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * RiscBSD kernel project
38 *
39 * cpuswitch.S
40 *
41 * cpu switching functions
42 *
43 * Created : 15/10/94
44 */
45
46 #include "opt_armfpe.h"
47
48 #include "assym.h"
49 #include <machine/param.h>
50 #include <machine/cpu.h>
51 #include <machine/frame.h>
52 #include <machine/asm.h>
53
54 #undef IRQdisable
55 #undef IRQenable
56
57 /*
58 * New experimental definitions of IRQdisable and IRQenable
59 * These keep FIQ's enabled since FIQ's are special.
60 */
61
62 #define IRQdisable \
63 mrs r14, cpsr ; \
64 orr r14, r14, #(I32_bit) ; \
65 msr cpsr_c, r14 ; \
66
67 #define IRQenable \
68 mrs r14, cpsr ; \
69 bic r14, r14, #(I32_bit) ; \
70 msr cpsr_c, r14 ; \
71
72 /*
73 * setrunqueue() and remrunqueue()
74 *
75 * Functions to add and remove a process for the run queue.
76 */
77
78 .text
79
80 .Lwhichqs:
81 .word _C_LABEL(sched_whichqs)
82
83 .Lqs:
84 .word _C_LABEL(sched_qs)
85
86 /*
87 * On entry
88 * r0 = process
89 */
90
91 ENTRY(setrunqueue)
92 /*
93 * Local register usage
94 * r0 = process
95 * r1 = queue
96 * r2 = &qs[queue] and temp
97 * r3 = temp
98 * r12 = whichqs
99 */
100 #ifdef DIAGNOSTIC
101 ldr r1, [r0, #(P_BACK)]
102 teq r1, #0x00000000
103 bne .Lsetrunqueue_erg
104
105 ldr r1, [r0, #(P_WCHAN)]
106 teq r1, #0x00000000
107 bne .Lsetrunqueue_erg
108 #endif
109
110 /* Get the priority of the queue */
111 ldrb r1, [r0, #(P_PRIORITY)]
112
113 /* Indicate that there is a process on this queue */
114 ldr r12, .Lwhichqs
115 mov r1, r1, lsr #2
116 ldr r2, [r12]
117 mov r3, #0x00000001
118 mov r3, r3, lsl r1
119 orr r2, r2, r3
120 str r2, [r12]
121
122 /* Get the address of the queue */
123 ldr r2, .Lqs
124 add r1, r2, r1, lsl # 3
125
126 /* Hook the process in */
127 str r1, [r0, #(P_FORW)]
128 ldr r2, [r1, #(P_BACK)]
129
130 str r0, [r1, #(P_BACK)]
131 #ifdef DIAGNOSTIC
132 teq r2, #0x00000000
133 beq .Lsetrunqueue_erg
134 #endif
135 str r0, [r2, #(P_FORW)]
136 str r2, [r0, #(P_BACK)]
137
138 mov pc, lr
139
140 #ifdef DIAGNOSTIC
141 .Lsetrunqueue_erg:
142 mov r2, r1
143 mov r1, r0
144 add r0, pc, #.Ltext1 - . - 8
145 bl _C_LABEL(printf)
146
147 ldr r2, .Lqs
148 ldr r1, [r2]
149 add r0, pc, #.Ltext2 - . - 8
150 b _C_LABEL(panic)
151
152 .Ltext1:
153 .asciz "setrunqueue : %08x %08x\n"
154 .Ltext2:
155 .asciz "setrunqueue : [qs]=%08x qs=%08x\n"
156 .align 0
157 #endif
158
159 /*
160 * On entry
161 * r0 = process
162 */
163
164 ENTRY(remrunqueue)
165 /*
166 * Local register usage
167 * r0 = oldproc
168 * r1 = queue
169 * r2 = &qs[queue] and scratch
170 * r3 = scratch
171 * r12 = whichqs
172 */
173
174 /* Get the priority of the queue */
175 ldrb r1, [r0, #(P_PRIORITY)]
176 mov r1, r1, lsr #2
177
178 /* Unhook the process */
179 ldr r2, [r0, #(P_FORW)]
180 ldr r3, [r0, #(P_BACK)]
181
182 str r3, [r2, #(P_BACK)]
183 str r2, [r3, #(P_FORW)]
184
185 /* If the queue is now empty clear the queue not empty flag */
186 teq r2, r3
187
188 /* This could be reworked to avoid the use of r4 */
189 ldreq r12, .Lwhichqs
190 moveq r3, #0x00000001
191 ldreq r2, [r12]
192 moveq r3, r3, lsl r1
193 biceq r2, r2, r3
194 streq r2, [r12]
195
196 /* Remove the back pointer for the process */
197 mov r1, #0x00000000
198 str r1, [r0, #(P_BACK)]
199
200 mov pc, lr
201
202
203 /*
204 * cpuswitch()
205 *
206 * preforms a process context switch.
207 * This function has several entry points
208 */
209
210 .Lcurproc:
211 .word _C_LABEL(curproc)
212
213 .Lcurpcb:
214 .word _C_LABEL(curpcb)
215
216 .Lwant_resched:
217 .word _C_LABEL(want_resched)
218
219 .Lcpufuncs:
220 .word _C_LABEL(cpufuncs)
221
222 .data
223 .global _C_LABEL(curpcb)
224 _C_LABEL(curpcb):
225 .word 0x00000000
226 .text
227
228 .Lblock_userspace_access:
229 .word _C_LABEL(block_userspace_access)
230
231 .Lcpu_do_powersave:
232 .word _C_LABEL(cpu_do_powersave)
233
234 /*
235 * Idle loop, exercised while waiting for a process to wake up.
236 *
237 * NOTE: When we jump back to .Lswitch_search, we must have a
238 * pointer to whichqs in r7, which is what it is when we arrive
239 * here.
240 */
241 /* LINTSTUB: Ignore */
242 ASENTRY_NP(idle)
243 #if defined(LOCKDEBUG)
244 bl _C_LABEL(sched_unlock_idle)
245 #endif
246 ldr r3, .Lcpu_do_powersave
247
248 /* Enable interrupts */
249 IRQenable
250
251 /* If we don't want to sleep, use a simpler loop. */
252 ldr r3, [r3] /* r3 = cpu_do_powersave */
253 teq r3, #0
254 bne 2f
255
256 /* Non-powersave idle. */
257 1: /* should maybe do uvm pageidlezero stuff here */
258 ldr r3, [r7] /* r3 = whichqs */
259 teq r3, #0x00000000
260 bne .Lswitch_search
261 b 1b
262
263 2: /* Powersave idle. */
264 ldr r4, .Lcpufuncs
265 3: ldr r3, [r7] /* r3 = whichqs */
266 teq r3, #0x00000000
267 bne .Lswitch_search
268
269 /* if saving power, don't want to pageidlezero */
270 mov r0, #0
271 add lr, pc, #3b - . - 8
272 ldr pc, [r4, #(CF_SLEEP)]
273 /* loops back around */
274
275
276 /*
277 * Find a new process to run, save the current context and
278 * load the new context
279 */
280
281 ENTRY(cpu_switch)
282 /*
283 * Local register usage. Some of these registers are out of date.
284 * r1 = oldproc
285 * r2 = spl level
286 * r3 = whichqs
287 * r4 = queue
288 * r5 = &qs[queue]
289 * r6 = newproc
290 * r7 = scratch
291 */
292 stmfd sp!, {r4-r7, lr}
293
294 /*
295 * Get the current process and indicate that there is no longer
296 * a valid process (curproc = 0). Zero the current PCB pointer
297 * while we're at it.
298 */
299 ldr r7, .Lcurproc
300 ldr r6, .Lcurpcb
301 mov r0, #0x00000000
302 ldr r1, [r7] /* r1 = curproc */
303 str r0, [r7] /* curproc = NULL */
304 str r0, [r6] /* curpcb = NULL */
305
306 /* stash the old proc while we call functions */
307 mov r5, r1
308
309 #if defined(LOCKDEBUG)
310 /* release the sched_lock before handling interrupts */
311 bl _C_LABEL(sched_unlock_idle)
312 #endif
313
314 /* Lower the spl level to spl0 and get the current spl level. */
315 #ifdef __NEWINTR
316 mov r0, #(IPL_NONE)
317 bl _C_LABEL(_spllower)
318 #else /* ! __NEWINTR */
319 #ifdef spl0
320 mov r0, #(_SPL_0)
321 bl _C_LABEL(splx)
322 #else
323 bl _C_LABEL(spl0)
324 #endif /* spl0 */
325 #endif /* __NEWINTR */
326
327 /* Push the old spl level onto the stack */
328 str r0, [sp, #-0x0004]!
329
330 /* First phase : find a new process */
331
332 ldr r7, .Lwhichqs
333
334 /* rem: r5 = old proc */
335 /* rem: r7 = &whichqs */
336
337 .Lswitch_search:
338 IRQdisable
339 #if defined(LOCKDEBUG)
340 bl _C_LABEL(sched_lock_idle)
341 #endif
342
343 /* Do we have any active queues */
344 ldr r3, [r7]
345
346 /* If not we must idle until we do. */
347 teq r3, #0x00000000
348 beq _ASM_LABEL(idle)
349
350 /* put old proc back in r1 */
351 mov r1, r5
352
353 /* rem: r1 = old proc */
354 /* rem: r3 = whichqs */
355 /* rem: interrupts are disabled */
356
357 /*
358 * We have found an active queue. Currently we do not know which queue
359 * is active just that one of them is.
360 */
361 /* this is the ffs algorithm devised by d.seal and posted to
362 * comp.sys.arm on 16 Feb 1994.
363 */
364 rsb r5, r3, #0
365 ands r0, r3, r5
366
367 adr r5, .Lcpu_switch_ffs_table
368
369 /* X = R0 */
370 orr r4, r0, r0, lsl #4 /* r4 = X * 0x11 */
371 orr r4, r4, r4, lsl #6 /* r4 = X * 0x451 */
372 rsb r4, r4, r4, lsl #16 /* r4 = X * 0x0450fbaf */
373
374 /* used further down, saves SA stall */
375 ldr r6, .Lqs
376
377 /* now lookup in table indexed on top 6 bits of a4 */
378 ldrb r4, [ r5, r4, lsr #26 ]
379
380 /* rem: r0 = bit mask of chosen queue (1 << r4) */
381 /* rem: r1 = old proc */
382 /* rem: r3 = whichqs */
383 /* rem: r4 = queue number */
384 /* rem: interrupts are disabled */
385
386 /* Get the address of the queue (&qs[queue]) */
387 add r5, r6, r4, lsl #3
388
389 /*
390 * Get the process from the queue and place the next process in
391 * the queue at the head. This basically unlinks the process at
392 * the head of the queue.
393 */
394 ldr r6, [r5, #(P_FORW)]
395
396 /* rem: r6 = new process */
397 ldr r7, [r6, #(P_FORW)]
398 str r7, [r5, #(P_FORW)]
399
400 /*
401 * Test to see if the queue is now empty. If the head of the queue
402 * points to the queue itself then there are no more processes in
403 * the queue. We can therefore clear the queue not empty flag held
404 * in r3.
405 */
406
407 teq r5, r7
408 biceq r3, r3, r0
409
410 /* rem: r0 = bit mask of chosen queue (1 << r4) - NOT NEEDED AN MORE */
411
412 /* Fix the back pointer for the process now at the head of the queue. */
413 ldr r0, [r6, #(P_BACK)]
414 str r0, [r7, #(P_BACK)]
415
416 /* Update the RAM copy of the queue not empty flags word. */
417 ldr r7, .Lwhichqs
418 str r3, [r7]
419
420 /* rem: r1 = old proc */
421 /* rem: r3 = whichqs - NOT NEEDED ANY MORE */
422 /* rem: r4 = queue number - NOT NEEDED ANY MORE */
423 /* rem: r6 = new process */
424 /* rem: interrupts are disabled */
425
426 /* Clear the want_resched flag */
427 ldr r7, .Lwant_resched
428 mov r0, #0x00000000
429 str r0, [r7]
430
431 /*
432 * Clear the back pointer of the process we have removed from
433 * the head of the queue. The new process is isolated now.
434 */
435 str r0, [r6, #(P_BACK)]
436
437 #if defined(LOCKDEBUG)
438 /*
439 * unlock the sched_lock, but leave interrupts off, for now.
440 */
441 mov r7, r1
442 bl _C_LABEL(sched_unlock_idle)
443 mov r1, r7
444 #endif
445
446 /* p->p_cpu initialized in fork1() for single-processor */
447
448 /* Process is now on a processor. */
449 mov r0, #SONPROC /* p->p_stat = SONPROC */
450 strb r0, [r6, #(P_STAT)]
451
452 /* We have a new curproc now so make a note it */
453 ldr r7, .Lcurproc
454 str r6, [r7]
455
456 /* Hook in a new pcb */
457 ldr r7, .Lcurpcb
458 ldr r0, [r6, #(P_ADDR)]
459 str r0, [r7]
460
461 /* At this point we can allow IRQ's again. */
462 IRQenable
463
464 /* rem: r1 = old proc */
465 /* rem: r6 = new process */
466 /* rem: interrupts are enabled */
467
468 /*
469 * If the new process is the same as the process that called
470 * cpu_switch() then we do not need to save and restore any
471 * contexts. This means we can make a quick exit.
472 * The test is simple if curproc on entry (now in r1) is the
473 * same as the proc removed from the queue we can jump to the exit.
474 */
475 teq r1, r6
476 beq .Lswitch_return
477
478 /* Remember the old process in r0 */
479 mov r0, r1
480
481 /*
482 * If the curproc on entry to cpu_switch was zero then the
483 * process that called it was exiting. This means that we do
484 * not need to save the current context. Instead we can jump
485 * straight to restoring the context for the new process.
486 */
487 teq r0, #0x00000000
488 beq .Lswitch_exited
489
490 /* rem: r0 = old proc */
491 /* rem: r6 = new process */
492 /* rem: interrupts are enabled */
493
494 /* Stage two : Save old context */
495
496 /* Get the user structure for the old process. */
497 ldr r1, [r0, #(P_ADDR)]
498
499 /* Save all the registers in the old process's pcb */
500 add r7, r1, #(PCB_R8)
501 stmia r7, {r8-r13}
502
503 /*
504 * This can be optimised... We know we want to go from SVC32
505 * mode to UND32 mode
506 */
507 mrs r3, cpsr
508 bic r2, r3, #(PSR_MODE)
509 orr r2, r2, #(PSR_UND32_MODE | I32_bit)
510 msr cpsr_c, r2
511
512 str sp, [r1, #(PCB_UND_SP)]
513
514 msr cpsr_c, r3 /* Restore the old mode */
515
516 /* rem: r0 = old proc */
517 /* rem: r1 = old pcb */
518 /* rem: r6 = new process */
519 /* rem: interrupts are enabled */
520
521 /* What else needs to be saved Only FPA stuff when that is supported */
522
523 /* r1 now free! */
524
525 /* Third phase : restore saved context */
526
527 /* rem: r0 = old proc */
528 /* rem: r6 = new process */
529 /* rem: interrupts are enabled */
530
531 /*
532 * Don't allow user space access between the purge and the switch.
533 */
534 ldr r3, .Lblock_userspace_access
535 mov r1, #0x00000001
536 mov r2, #0x00000000
537 str r1, [r3]
538
539 stmfd sp!, {r0-r3}
540 ldr r1, .Lcpufuncs
541 add lr, pc, #.Lcs_cache_purged - . - 8
542 ldr pc, [r1, #CF_IDCACHE_WBINV_ALL]
543
544 .Lcs_cache_purged:
545 ldmfd sp!, {r0-r3}
546
547 .Lcs_cache_purge_skipped:
548 /* At this point we need to kill IRQ's again. */
549 IRQdisable
550
551 /*
552 * Interrupts are disabled so we can allow user space accesses again
553 * as none will occur until interrupts are re-enabled after the
554 * switch.
555 */
556 str r2, [r3]
557
558 /* Get the user structure for the new process in r1 */
559 ldr r1, [r6, #(P_ADDR)]
560
561 /* Get the pagedir physical address for the process. */
562 ldr r0, [r1, #(PCB_PAGEDIR)]
563
564 /* Switch the memory to the new process */
565 ldr r3, .Lcpufuncs
566 add lr, pc, #.Lcs_context_switched - . - 8
567 ldr pc, [r3, #CF_CONTEXT_SWITCH]
568
569 .Lcs_context_switched:
570 /*
571 * This can be optimised... We know we want to go from SVC32
572 * mode to UND32 mode
573 */
574 mrs r3, cpsr
575 bic r2, r3, #(PSR_MODE)
576 orr r2, r2, #(PSR_UND32_MODE)
577 msr cpsr_c, r2
578
579 ldr sp, [r1, #(PCB_UND_SP)]
580
581 msr cpsr_c, r3 /* Restore the old mode */
582
583 /* Restore all the save registers */
584 add r7, r1, #PCB_R8
585 ldmia r7, {r8-r13}
586
587 mov r7, r1 /* preserve PCB pointer */
588
589 #ifdef ARMFPE
590 add r0, r1, #(USER_SIZE) & 0x00ff
591 add r0, r0, #(USER_SIZE) & 0xff00
592 bl _C_LABEL(arm_fpe_core_changecontext)
593 #endif
594
595 /* We can enable interrupts again */
596 IRQenable
597
598 /* rem: r6 = new proc */
599 /* rem: r7 = new PCB */
600
601 /*
602 * Check for restartable atomic sequences (RAS).
603 */
604
605 ldr r2, [r6, #(P_NRAS)]
606 ldr r4, [r7, #(PCB_TF)] /* r4 = trapframe (used below) */
607 teq r2, #0 /* p->p_nras == 0? */
608 bne .Lswitch_do_ras /* no, check for one */
609
610 .Lswitch_return:
611
612 /* Get the spl level from the stack and update the current spl level */
613 ldr r0, [sp], #0x0004
614 bl _C_LABEL(splx)
615
616 /* cpu_switch returns the proc it switched to. */
617 mov r0, r6
618
619 /*
620 * Pull the registers that got pushed when either savectx() or
621 * cpu_switch() was called and return.
622 */
623 ldmfd sp!, {r4-r7, pc}
624
625 .Lswitch_do_ras:
626 ldr r1, [r4, #(TF_PC)] /* second ras_lookup() arg */
627 mov r0, r6 /* first ras_lookup() arg */
628 bl _C_LABEL(ras_lookup)
629 cmn r0, #1 /* -1 means "not in a RAS" */
630 strne r0, [r4, #(TF_PC)]
631 b .Lswitch_return
632
633 .Lswitch_exited:
634 /*
635 * We skip the cache purge because switch_exit() already did
636 * it. Load up registers the way Lcs_cache_purge_skipped
637 * expects. Userspace access already blocked in switch_exit().
638 */
639 ldr r3, .Lblock_userspace_access
640 mov r2, #0x00000000
641 b .Lcs_cache_purge_skipped
642
643 /*
644 * void switch_exit(struct proc *p, struct proc *p0);
645 * Switch to proc0's saved context and deallocate the address space and kernel
646 * stack for p. Then jump into cpu_switch(), as if we were in proc0 all along.
647 */
648
649 /* LINTSTUB: Func: void switch_exit(struct proc *p, struct proc *p0) */
650 ENTRY(switch_exit)
651 /*
652 * r0 = proc
653 * r1 = proc0
654 */
655
656 mov r3, r0
657
658 /* In case we fault */
659 ldr r0, .Lcurproc
660 mov r2, #0x00000000
661 str r2, [r0]
662
663 /* ldr r0, .Lcurpcb
664 str r2, [r0]*/
665
666 /*
667 * Don't allow user space access between the purge and the switch.
668 */
669 ldr r0, .Lblock_userspace_access
670 mov r2, #0x00000001
671 str r2, [r0]
672
673 /* Switch to proc0 context */
674
675 stmfd sp!, {r0-r3}
676
677 ldr r0, .Lcpufuncs
678 add lr, pc, #.Lse_cache_purged - . - 8
679 ldr pc, [r0, #CF_IDCACHE_WBINV_ALL]
680
681 .Lse_cache_purged:
682 ldmfd sp!, {r0-r3}
683
684 IRQdisable
685
686 ldr r2, [r1, #(P_ADDR)]
687 ldr r0, [r2, #(PCB_PAGEDIR)]
688
689 /* Switch the memory to the new process */
690 ldr r4, .Lcpufuncs
691 add lr, pc, #.Lse_context_switched - . - 8
692 ldr pc, [r4, #CF_CONTEXT_SWITCH]
693
694 .Lse_context_switched:
695 /* Restore all the save registers */
696 add r7, r2, #PCB_R8
697 ldmia r7, {r8-r13}
698
699 /* This is not really needed ! */
700 /* Yes it is for the su and fu routines */
701 ldr r0, .Lcurpcb
702 str r2, [r0]
703
704 IRQenable
705
706 /* str r3, [sp, #-0x0004]!*/
707
708 /*
709 * Schedule the vmspace and stack to be freed.
710 */
711 mov r0, r3 /* exit2(p) */
712 bl _C_LABEL(exit2)
713
714 /* Paranoia */
715 ldr r1, .Lcurproc
716 mov r0, #0x00000000
717 str r0, [r1]
718
719 ldr r7, .Lwhichqs /* r7 = &whichqs */
720 mov r5, #0x00000000 /* r5 = old proc = NULL */
721 b .Lswitch_search
722
723 /* LINTSTUB: Func: void savectx(struct pcb *pcb) */
724 ENTRY(savectx)
725 /*
726 * r0 = pcb
727 */
728
729 /* Push registers.*/
730 stmfd sp!, {r4-r7, lr}
731
732 /* Store all the registers in the process's pcb */
733 add r2, r0, #(PCB_R8)
734 stmia r2, {r8-r13}
735
736 /* Pull the regs of the stack */
737 ldmfd sp!, {r4-r7, pc}
738
739 ENTRY(proc_trampoline)
740 add lr, pc, #(.Ltrampoline_return - . - 8)
741 mov r0, r5
742 mov r1, sp
743 mov pc, r4
744
745 .Ltrampoline_return:
746 /* Kill irq's */
747 mrs r0, cpsr
748 orr r0, r0, #(I32_bit)
749 msr cpsr_c, r0
750
751 PULLFRAME
752
753 movs pc, lr /* Exit */
754
755 .type .Lcpu_switch_ffs_table, _ASM_TYPE_OBJECT;
756 .Lcpu_switch_ffs_table:
757 /* same as ffs table but all nums are -1 from that */
758 /* 0 1 2 3 4 5 6 7 */
759 .byte 0, 0, 1, 12, 2, 6, 0, 13 /* 0- 7 */
760 .byte 3, 0, 7, 0, 0, 0, 0, 14 /* 8-15 */
761 .byte 10, 4, 0, 0, 8, 0, 0, 25 /* 16-23 */
762 .byte 0, 0, 0, 0, 0, 21, 27, 15 /* 24-31 */
763 .byte 31, 11, 5, 0, 0, 0, 0, 0 /* 32-39 */
764 .byte 9, 0, 0, 24, 0, 0, 20, 26 /* 40-47 */
765 .byte 30, 0, 0, 0, 0, 23, 0, 19 /* 48-55 */
766 .byte 29, 0, 22, 18, 28, 17, 16, 0 /* 56-63 */
767
768 /* End of cpuswitch.S */
769