rtld_start.S revision 1.21 1 1.21 matt /* $NetBSD: rtld_start.S,v 1.21 2014/03/21 14:03:30 matt Exp $ */
2 1.1 matt
3 1.1 matt /*
4 1.1 matt * Copyright 1996 Matt Thomas <matt (at) 3am-software.com>
5 1.16 mycroft * Portions copyright 2002, 2003 Charles M. Hannum <root (at) ihack.net>
6 1.1 matt * All rights reserved.
7 1.1 matt *
8 1.1 matt * Redistribution and use in source and binary forms, with or without
9 1.1 matt * modification, are permitted provided that the following conditions
10 1.1 matt * are met:
11 1.1 matt * 1. Redistributions of source code must retain the above copyright
12 1.1 matt * notice, this list of conditions and the following disclaimer.
13 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright
14 1.1 matt * notice, this list of conditions and the following disclaimer in the
15 1.1 matt * documentation and/or other materials provided with the distribution.
16 1.1 matt * 3. The name of the author may not be used to endorse or promote products
17 1.1 matt * derived from this software without specific prior written permission.
18 1.1 matt *
19 1.1 matt * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 1.1 matt * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 1.1 matt * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 1.1 matt * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 1.1 matt * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 1.1 matt * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 1.1 matt * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 1.1 matt * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 1.1 matt * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 1.1 matt * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 1.1 matt */
30 1.1 matt
31 1.1 matt #include <machine/asm.h>
32 1.1 matt
33 1.1 matt /* R9 contains the address of PS_STRINGS and since its caller saved,
34 1.1 matt * we can just use it. R6 has a backup copy of the stack pointer which
35 1.16 mycroft * we can use as well.
36 1.1 matt */
37 1.3 matt ENTRY(_rtld_start, 0)
38 1.1 matt /* Allocate space on the stack for the cleanup and obj_main
39 1.1 matt * entries that _rtld() will provide for us.
40 1.1 matt */
41 1.7 matt clrl %fp
42 1.7 matt subl2 $8,%sp
43 1.1 matt
44 1.9 mycroft movab _DYNAMIC,%r0
45 1.16 mycroft subl3 _GLOBAL_OFFSET_TABLE_,%r0,%r10
46 1.10 mycroft pushl %r10 /* relocbase */
47 1.9 mycroft pushl %r0 /* &_DYNAMIC */
48 1.9 mycroft calls $2,_rtld_relocate_nonplt_self
49 1.9 mycroft
50 1.10 mycroft pushl %r10 /* relocbase */
51 1.11 mycroft pushal 4(%sp) /* sp */
52 1.8 mycroft calls $2,_rtld /* entry = _rtld(sp, relocbase) */
53 1.4 matt
54 1.7 matt movq (%sp)+,%r7 /* grab cleanup and obj_main into %r7/%r8 */
55 1.7 matt jmp 2(%r0) /* jump to entry point + 2 */
56 1.17 matt END(_rtld_start)
57 1.1 matt
58 1.1 matt /*
59 1.17 matt * Lazy binding entry point, called via PLT via JMP into pltgot[1].
60 1.21 matt * SP+4: address to relocation offset
61 1.17 matt * SP+0: obj entry points
62 1.1 matt */
63 1.3 matt ALTENTRY(_rtld_bind_start)
64 1.18 matt movq -8(%fp),%r0 /* get addresses of plt.got & reloc index */
65 1.21 matt pushl (%r1) /* push relocation offset */
66 1.7 matt pushl %r0 /* push address of obj entry */
67 1.1 matt calls $2,_rtld_bind
68 1.18 matt
69 1.21 matt /*
70 1.21 matt * This code checks to see if we got called via a call{s,g} $n,*pcrel32
71 1.21 matt * This is by far the most common case (a call indirectly via the PLT).
72 1.21 matt */
73 1.21 matt subl3 $7,16(%fp),%r1 /* return address */
74 1.21 matt bicb3 $1,(%r1),%r2 /* fetch opcode of instruction */
75 1.21 matt cmpb $0xfa,%r2 /* is it calls/callg */
76 1.21 matt jneq 20f /* no it isn't */
77 1.21 matt cmpb $0xff,2(%r1) /* and deferred 32-bit PC displacement? */
78 1.21 matt jneq 20f /* no it isn't */
79 1.21 matt
80 1.21 matt /*
81 1.21 matt * This makes sure the longword with the PLT's address has been updated
82 1.21 matt * to point to the routine's address. If it hasn't, then returning
83 1.21 matt * would put us in an infinite loop. Instead we punt and fake up a
84 1.21 matt * callframe.
85 1.21 matt */
86 1.21 matt movl 3(%r1),%r3 /* get displacement */
87 1.21 matt addl2 16(%fp),%r3 /* add ending location */
88 1.21 matt cmpl (%r3),%r0 /* does it contain the routine address? */
89 1.21 matt #ifdef DEBUG
90 1.21 matt jneq 30f /* no it doesn't, die */
91 1.21 matt #else
92 1.21 matt jneq 20f /* no it doesn't, go fake a new callframe */
93 1.21 matt #endif
94 1.21 matt
95 1.21 matt 11: movl %r1,16(%fp) /* backup to the calls/callg */
96 1.21 matt jbc $29,4(%fp),12f /* skip if this was a callg */
97 1.21 matt clrl (%ap) /* clear argument count */
98 1.21 matt 12: ret /* return and redo the call */
99 1.21 matt
100 1.21 matt #if 1
101 1.21 matt 20:
102 1.21 matt /*
103 1.21 matt * Since the calling standard says only r6-r11 should be saved,
104 1.21 matt * that simplies things for us. That means we can use r0-r5 as
105 1.21 matt * temporaries without worrying about preserving them. This means
106 1.21 matt * can hold the current fixed callframe in r2-r5 as we build the
107 1.21 matt * callframe without having to worry about overwriting the existing
108 1.21 matt * callframe.
109 1.21 matt */
110 1.21 matt extzv $0,$12,(%r0),%r1/* get routine's save mask */
111 1.21 matt bitw $0x3f,%r1 /* does the routine use r0-r5? */
112 1.21 matt jneq 30f /* yes, that sucks */
113 1.21 matt jbc $29,4(%fp),27f /* handle callg */
114 1.21 matt movq 4(%fp),%r2 /* fetch callframe status & saved AP */
115 1.21 matt movq 12(%fp),%r4 /* fetch callframe saved FP & PC */
116 1.21 matt insv %r1,$16,$12,%r2 /* update save mask */
117 1.21 matt movl %ap,%sp /* reset stack to top of callframe */
118 1.21 matt 22: pushr %r1 /* push registers */
119 1.21 matt movq %r4,-(%sp) /* push callframe saved FP & PC */
120 1.21 matt movq %r2,-(%sp) /* push callframe status & saved AP */
121 1.21 matt pushl $0 /* push condition handler */
122 1.21 matt movl %sp,%fp /* sp == fp now */
123 1.21 matt #if 1
124 1.21 matt jmp 2(%r0) /* jump past entry mask */
125 1.21 matt #else
126 1.21 matt /*
127 1.21 matt * More correct but IV/DV are never set so ignore doing this for now.
128 1.21 matt */
129 1.21 matt movpsl -(%sp) /* push PSL */
130 1.21 matt clrb (%sp) /* clear user flags */
131 1.21 matt jbc $14,(%r0),24f /* IV need to be set? */
132 1.21 matt bisb2 $0x20,(%sp) /* yes, set it. */
133 1.21 matt 24: jbc $15,(%r0),25f /* DV need to be set? */
134 1.21 matt bisb2 $0x80,(%sp) /* yes, set it. */
135 1.21 matt 25: pushab 2(%r0) /* push address of first instruction */
136 1.21 matt rei /* and go to it (updating PSW) */
137 1.21 matt #endif
138 1.21 matt
139 1.21 matt /*
140 1.21 matt * Count how many registers are being used for callg.
141 1.21 matt */
142 1.21 matt 27: movl $0x32212110,%r3 /* bit counts */
143 1.21 matt extzv $6,$3,%r1,%r2 /* extract bits 6-8 */
144 1.21 matt ashl $2,%r2,%r2 /* shift by 2 */
145 1.21 matt extzv %r2,$4,%r3,%r4 /* extract count */
146 1.21 matt extzv $9,$3,%r1,%r2 /* extract bits 9-11 */
147 1.21 matt ashl $2,%r2,%r2 /* shift by 2 */
148 1.21 matt extzv %r2,$4,%r3,%r5 /* extract count */
149 1.21 matt movq 4(%fp),%r2 /* fetch callframe status & saved AP */
150 1.21 matt insv %r1,$16,$12,%r2 /* update save mask */
151 1.21 matt addl3 %r3,r4,%r1 /* add counts and discard them */
152 1.21 matt movq 12(%fp),%r4 /* fetch callframe saved FP & PC */
153 1.21 matt moval 20(%fp)[%r1],%sp/* pop callframe */
154 1.21 matt extzv $16,$12,%r2,%r1 /* get save mask back */
155 1.21 matt jbr 22b /* now build the new callframe */
156 1.21 matt
157 1.21 matt 30:
158 1.21 matt calls $0,_C_LABEL(_rtld_die)
159 1.21 matt #else
160 1.21 matt /*
161 1.21 matt * Check to see if called via call? $n,w^off(reg)
162 1.21 matt */
163 1.21 matt 20: addl2 $2,%r1 /* 16-bit displacement */
164 1.21 matt bicb3 $1,(%r1),%r2 /* fetch opcode of instruction */
165 1.21 matt cmpb $0xfa,%r2 /* is it calls/callg */
166 1.21 matt jneq 30f /* no it isn't */
167 1.21 matt bicb3 $0x1f,2(%r1),%r3/* extract addressing mode */
168 1.21 matt cmpb $0xc0,%r3 /* 16-bit displacement? */
169 1.21 matt jeql 11b /* yes, redo the call */
170 1.21 matt halt
171 1.21 matt
172 1.21 matt /*
173 1.21 matt * Check to see if called via call? $n,b^off(reg)
174 1.21 matt */
175 1.21 matt 30: incl %r1 /* 8-bit displacement */
176 1.21 matt bicb3 $1,(%r1),%r2 /* fetch opcode of instruction */
177 1.21 matt cmpb $0xfa,%r2 /* is it calls/callg */
178 1.21 matt jneq 40f /* no it isn't */
179 1.21 matt bicb3 $0x1f,2(%r1),%r3/* extract addressing mode */
180 1.21 matt cmpb $0xa0,%r3 /* 8-bit displacement? */
181 1.21 matt jeql 11b /* yes, redo the call */
182 1.21 matt halt
183 1.21 matt
184 1.21 matt /*
185 1.21 matt * Check to see if called via call? $n,(reg)
186 1.21 matt */
187 1.21 matt 40: incl %r1 /* no displacement */
188 1.21 matt bicb3 $1,(%r1),%r2 /* fetch opcode of instruction */
189 1.21 matt cmpb $0xfa,%r2 /* is it calls/callg */
190 1.21 matt jeql 41f /* yes it is */
191 1.21 matt halt /* no, die die die */
192 1.21 matt 41: bicb3 $0x0f,2(%r1),%r2/* extract addressing mode */
193 1.21 matt bicb3 $0xf0,2(%r1),%r3/* extract register */
194 1.21 matt extzv $0,$12,6(%fp),%r4/* extract saved mask */
195 1.21 matt cmpb $0x60,%r2 /* register deferred? */
196 1.21 matt jeql 42f /* yes, deal with it */
197 1.21 matt cmpb $0x90,%r2 /* autoincrement deferred? */
198 1.21 matt jeql 70f /* yes, deal with it */
199 1.21 matt halt /* no, die die die */
200 1.21 matt
201 1.21 matt 42: cmpw %r4,$0xffc /* did we save r2-r11? */
202 1.21 matt jneq 50f /* no, deal with it */
203 1.21 matt jbc %r3,%r4,43f /* is the register in the saved mask? */
204 1.21 matt
205 1.21 matt /*
206 1.21 matt * We saved r2-r11, so it's easy to replace the saved register with
207 1.21 matt * the right value by indexing into saved register (offset by 8).
208 1.21 matt */
209 1.21 matt movl %r0,(20-8)(%fp)[%r3] /* replace address in saved registers */
210 1.21 matt jbr 11b /* go back and redo call */
211 1.21 matt /*
212 1.21 matt * Must have been called via r0 or r1 which are saved locally.
213 1.21 matt * So move the routine address in the appropriate slot on the stack.
214 1.21 matt */
215 1.21 matt 43: movl %r0,(%sp)[%r3]
216 1.21 matt jbr 11b /* go back and redo call */
217 1.21 matt
218 1.21 matt 50: jbs %r3,%r4,60f /* is the register in the saved mask? */
219 1.21 matt jbs %r3,$0x3f,43b /* is it r0-r5? */
220 1.21 matt /*
221 1.21 matt * The register used for the call was not saved so we need to move
222 1.21 matt * the new function address into it so the re-call will use the new
223 1.21 matt * address.
224 1.21 matt */
225 1.21 matt pushl %r0 /* save function address on the stack */
226 1.21 matt ashl %r5,$1,%r0 /* create a bitmask for the register */
227 1.21 matt popr %r0 /* pop it off the stack. */
228 1.21 matt jbr 11b /* and redo the call */
229 1.21 matt
230 1.21 matt 60: clrl %r2 /* starting offset into saved registers */
231 1.21 matt clrl %r5 /* start with register 0 */
232 1.21 matt
233 1.21 matt 61: cmpl %r2,%r3 /* is the register to save? */
234 1.21 matt jneq 62f /* no, advance to next */
235 1.21 matt movl %r0,20(%fp)[%r5]/* yes, save return address in saved reg */
236 1.21 matt jbr 11b /* and return the call */
237 1.21 matt 62: jbc %r5,%r4,63f /* is this register saved? */
238 1.21 matt incl %r5 /* yes, account for it */
239 1.21 matt 63: incl %r2 /* increment register number */
240 1.21 matt jbr 61b /* and loop */
241 1.21 matt
242 1.21 matt 70: cmpb %r3,$12
243 1.21 matt blss 71f
244 1.21 matt halt
245 1.21 matt
246 1.21 matt 71: cmpw %r4,$0xffc /* did we save r2-r11? */
247 1.21 matt jneq 72f /* no, deal with it */
248 1.21 matt subl2 $4,(20-8)(%fp)[%r3] /* backup incremented register */
249 1.21 matt jbr 11b /* and redo the call.
250 1.21 matt
251 1.21 matt 72: jbs %r3,%r4,80f
252 1.21 matt jbs %r3,%3f,74f
253 1.21 matt ashl %r5,$1,%r0 /* create a bitmask for the register */
254 1.21 matt pushr %r0 /* pop it off the stack. */
255 1.21 matt subl2 $4,(%sp) /* backup incremented register */
256 1.21 matt popr %r0 /* pop it off the stack. */
257 1.21 matt jbr 11b /* and redo the call.
258 1.21 matt
259 1.21 matt 73: subl2 %4,(%sp)[%r3] /* backup incremented register */
260 1.21 matt jbr 11b /* and redo the call.
261 1.21 matt
262 1.21 matt 80: clrl %r2 /* starting offset into saved registers */
263 1.21 matt clrl %r5 /* start with register 0 */
264 1.21 matt
265 1.21 matt 81: cmpl %r2,%r3 /* is the register to save? */
266 1.21 matt jneq 82f /* no, advance to next */
267 1.21 matt subl $4,20(%fp)[%r5] /* yes, backup incremented register */
268 1.21 matt jbr 11b /* and return the call */
269 1.21 matt 82: jbc %r5,%r4,83f /* is this register saved? */
270 1.21 matt incl %r5 /* yes, account for it */
271 1.21 matt 83: incl %r2 /* increment register number */
272 1.21 matt jbr 81b /* and loop */
273 1.21 matt #endif
274 1.17 matt END(_rtld_bind_start)
275