Home | History | Annotate | Line # | Download | only in lib
      1 /*	$NetBSD: realprot.S,v 1.13 2025/03/05 22:21:11 andvar Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2003 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by David Laight.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Loosely based on code from stand/lib/libcrt/bootsect/start_bootsect.S
     34  */
     35 
     36 #include <machine/asm.h>
     37 #include <x86/specialreg.h>
     38 
     39 	.text
     40 	.align  16
     41 gdt:
     42 	.word	0, 0
     43 	.byte	0, 0x00, 0x00, 0
     44 
     45 	/* kernel code segment */
     46 	.globl flatcodeseg
     47 flatcodeseg = . - gdt
     48 	.word	0xffff, 0
     49 	.byte	0, 0x9f, 0xcf, 0
     50 
     51 	/* kernel data segment */
     52 	.globl flatdataseg
     53 flatdataseg = . - gdt
     54 	.word	0xffff, 0
     55 	.byte	0, 0x93, 0xcf, 0
     56 
     57 	/* boot code segment, base will be patched */
     58 bootcodeseg = . - gdt
     59 	.word	0xffff, 0
     60 	.byte	0, 0x9e, 0x4f, 0
     61 
     62 	/* boot data segment, base will be patched */
     63 bootdataseg = . - gdt
     64 	.word	0xffff, 0
     65 	.byte	0, 0x92, 0xcf, 0
     66 
     67 	/* 16 bit real mode, base will be patched */
     68 bootrealseg = . - gdt
     69 	.word	0xffff, 0
     70 	.byte	0, 0x9e, 0x00, 0
     71 
     72 	/* limits (etc) for data segment in real mode */
     73 bootrealdata = . - gdt
     74 	.word	0xffff, 0
     75 	.byte	0, 0x92, 0x00, 0
     76 gdtlen = . - gdt
     77 
     78 	.align	16
     79 gdtarg:
     80 	.word	gdtlen-1		/* limit */
     81 	.long	0			/* physical addr, will be inserted */
     82 
     83 toreal:	.word	xreal			/* off:seg address for indirect jump */
     84 ourseg:	.word	0			/* real mode code and data segment */
     85 
     86 stkseg:	.word	0			/* real mode stack segment */
     87 stkdif:	.long	0			/* diff. between real and prot sp */
     88 
     89 	.global	gdt_fixup
     90 gdt_fixup:
     91 	.code16
     92 	pushl	%eax
     93 	pushl	%edx
     94 
     95 	xorl	%eax, %eax
     96 	mov	%cs, %ax
     97 	mov	%ax, ourseg
     98 	/* sort out stuff for %ss != %ds */
     99 	xorl	%edx, %edx
    100 	movw	%ss, %dx
    101 	movw	%dx, stkseg
    102 	subl	%eax, %edx
    103 	shll	$4, %edx
    104 	movl	%edx, stkdif
    105 
    106 	/* fix up GDT entries for bootstrap */
    107 	mov	%ax, %dx
    108 	shll	$4, %eax
    109 	shr	$12, %dx
    110 
    111 #define FIXUP(gdt_index) \
    112 	movw	%ax, gdt+gdt_index+2; \
    113 	movb	%dl, gdt+gdt_index+4
    114 
    115 	FIXUP(bootcodeseg)
    116 	FIXUP(bootrealseg)
    117 	FIXUP(bootdataseg)
    118 
    119 	/* fix up GDT pointer */
    120 	addl	$gdt, %eax
    121 	movl	%eax, gdtarg+2
    122 
    123 	popl	%edx
    124 	popl	%eax
    125 	ret
    126 
    127 /*
    128  * real_to_prot()
    129  *
    130  * Switch CPU to 32bit protected mode to execute C.
    131  *
    132  * NB: Call with the 32bit calll instruction so that a 32 bit
    133  *     return address is pushed.
    134  *
    135  * All registers are preserved, %ss:%esp will point to the same
    136  * place as %ss:%sp did, although the actual value of %esp might
    137  * be changed.
    138  *
    139  * Interrupts are disabled while we are in 32bit mode to save us
    140  * having to setup a different IDT.  This code is only used during
    141  * the boot process and it doesn't use any interrupts.
    142  */
    143 ENTRY(real_to_prot)
    144 	.code16
    145 	pushl	%eax
    146 	cli
    147 
    148 	lgdt	%cs:gdtarg		/* Global descriptor table */
    149 
    150 	movl	%cr0, %eax
    151 	or	$CR0_PE, %ax
    152 	movl	%eax, %cr0 		/* Enter 'protected mode' */
    153 
    154 	ljmp	$bootcodeseg, $1f	/* Jump into a 32bit segment */
    155 1:
    156 
    157 	.code32
    158 	/*  Set all the segment registers to map the same area as the code */
    159 	mov	$bootdataseg, %eax
    160 	mov	%ax, %ds
    161 	mov	%ax, %es
    162 	mov	%ax, %ss
    163 	addl	stkdif, %esp		/* Allow for real %ss != %ds */
    164 
    165 	popl	%eax
    166 	ret
    167 
    168 /*
    169  * prot_to_real()
    170  *
    171  * Switch CPU back to 16bit real mode in order to call system bios functions.
    172  *
    173  * All registers are preserved, except that %sp may be changed so that
    174  * %ss:%sp points to the same memory.
    175  * Note that %ebp is preserved and will not reference the correct part
    176  * of the stack.
    177  *
    178  * Interrupts are enabled while in real mode.
    179  *
    180  * Based on the description in section 14.5 of the 80386 Programmer's
    181  * reference book.
    182  */
    183 /*
    184  * EPIA_HACK
    185  *
    186  * VIA C3 processors (Eden, Samuel 2) don't seem to correctly switch back to
    187  * executing 16 bit code after the switch to real mode and subsequent jump.
    188  *
    189  * It is speculated that the CPU is prefetching and decoding branch
    190  * targets and not invalidating this buffer on the long jump.
    191  * Further investigation indicates that the caching of return addresses
    192  * is most likely the problem.
    193  *
    194  * Previous versions just used some extra call/ret and a few NOPs, these
    195  * only helped a bit, but booting compressed kernels would still fail.
    196  *
    197  * Trashing the return address stack (by doing 'call' without matched 'ret')
    198  * Seems to fix things completely. 1 iteration isn't enough, 16 is plenty.
    199  */
    200 ENTRY(prot_to_real)
    201 	.code32
    202 	pushl	%eax
    203 #ifdef EPIA_HACK
    204 	push	%ecx
    205 	push	$0x10
    206 	pop	%ecx
    207 1:	call	trash_return_cache
    208 	loop	1b
    209 	pop	%ecx
    210 #endif
    211 
    212 	/*
    213 	 * Load the segment registers while still in protected mode.
    214 	 * Otherwise the control bits don't get changed.
    215 	 * The correct base addresses are loaded later.
    216 	 */
    217 	movw    $bootrealdata, %ax
    218 	movw    %ax, %ds
    219 	movw    %ax, %es
    220 	movw    %ax, %ss
    221 
    222 	/*
    223 	 * Load %cs with a segment that has the correct attributes for
    224 	 * 16bit operation.
    225 	 */
    226 	ljmp	$bootrealseg, $1f
    227 1:
    228 
    229 	.code16
    230 	movl	%cr0, %eax
    231 	and 	$~CR0_PE, %eax
    232 	movl	%eax, %cr0		/* Disable protected mode */
    233 
    234 	/* Jump far indirect to load real mode %cs */
    235 	ljmp	*%cs:toreal
    236 xreal:
    237 	/*
    238 	 * CPU is now in real mode, load the other segment registers
    239 	 * with their correct base addresses.
    240 	 */
    241 	mov	%cs, %ax
    242 	mov	%ax, %ds
    243 	mov	%ax, %es
    244 	/*
    245 	 * If stack was above 64k, 16bit %ss needs to be different from
    246 	 * 32bit %ss (and the other segment registers).
    247 	 */
    248 	mov	stkseg, %ax
    249 	mov	%ax, %ss
    250 	subl	stkdif, %esp
    251 
    252 	/* Check we are returning to an address below 64k */
    253 	push	%bp
    254 	movw	%sp, %bp
    255 	movw	2/*bp*/ + 4/*eax*/ + 2(%bp), %ax	/* high bits ret addr */
    256 	test	%ax, %ax
    257 	jne	1f
    258 	pop	%bp
    259 
    260 	sti
    261 	popl	%eax
    262 	retl
    263 
    264 1:	movw	$3f, %si
    265 	call	message
    266 	movl	2/*bp*/ + 4/*eax*/(%bp), %eax		/*  return address */
    267 	call	dump_eax
    268 	int	$0x18
    269 2:	sti
    270 	hlt
    271 	jmp	2b
    272 3:	.asciz	"prot_to_real can't return to "
    273 
    274 	.global	dump_eax_buff
    275 dump_eax_buff:
    276 	. = . + 16
    277 
    278 #ifdef EPIA_HACK
    279 trash_return_cache:
    280 	.code32
    281 	pop	%eax
    282 	jmp	*%eax
    283 #endif
    284 
    285 /* vtophys(void *)
    286  * convert boot time 'linear' address to a physical one
    287  */
    288 
    289 ENTRY(vtophys)
    290 	.code32
    291 	xorl	%eax, %eax
    292 	movw	ourseg, %ax
    293 	shll	$4, %eax
    294 	addl	4(%esp), %eax
    295 	ret
    296