[ Upstream commit deff8a24e1021fb39dddf5f6bc5832e0e3a632ea ] .Lrelocated, .Lpaging_enabled, .Lno_longmode, and .Lin_pm32 are self-standing local functions, annotate them as such and preserve "no alignment". The annotations do not generate anything yet. Signed-off-by: Jiri Slaby <jslaby@suse.cz> Signed-off-by: Borislav Petkov <bp@suse.de> Cc: Cao jin <caoj.fnst@cn.fujitsu.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Kate Stewart <kstewart@linuxfoundation.org> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: linux-arch@vger.kernel.org Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Wei Huang <wei@redhat.com> Cc: x86-ml <x86@kernel.org> Cc: Xiaoyao Li <xiaoyao.li@linux.intel.com> Link: https://lkml.kernel.org/r/20191011115108.12392-8-jslaby@suse.cz Stable-dep-of: 264b82fdb498 ("x86/decompressor: Don't rely on upper 32 bits of GPRs being preserved") Signed-off-by: Sasha Levin <sashal@kernel.org>
76 lines
1.6 KiB
ArmAsm
76 lines
1.6 KiB
ArmAsm
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/* ----------------------------------------------------------------------- *
|
|
*
|
|
* Copyright (C) 1991, 1992 Linus Torvalds
|
|
* Copyright 2007 rPath, Inc. - All Rights Reserved
|
|
*
|
|
* ----------------------------------------------------------------------- */
|
|
|
|
/*
|
|
* The actual transition into protected mode
|
|
*/
|
|
|
|
#include <asm/boot.h>
|
|
#include <asm/processor-flags.h>
|
|
#include <asm/segment.h>
|
|
#include <linux/linkage.h>
|
|
|
|
.text
|
|
.code16
|
|
|
|
/*
|
|
* void protected_mode_jump(u32 entrypoint, u32 bootparams);
|
|
*/
|
|
GLOBAL(protected_mode_jump)
|
|
movl %edx, %esi # Pointer to boot_params table
|
|
|
|
xorl %ebx, %ebx
|
|
movw %cs, %bx
|
|
shll $4, %ebx
|
|
addl %ebx, 2f
|
|
jmp 1f # Short jump to serialize on 386/486
|
|
1:
|
|
|
|
movw $__BOOT_DS, %cx
|
|
movw $__BOOT_TSS, %di
|
|
|
|
movl %cr0, %edx
|
|
orb $X86_CR0_PE, %dl # Protected mode
|
|
movl %edx, %cr0
|
|
|
|
# Transition to 32-bit mode
|
|
.byte 0x66, 0xea # ljmpl opcode
|
|
2: .long .Lin_pm32 # offset
|
|
.word __BOOT_CS # segment
|
|
ENDPROC(protected_mode_jump)
|
|
|
|
.code32
|
|
.section ".text32","ax"
|
|
SYM_FUNC_START_LOCAL_NOALIGN(.Lin_pm32)
|
|
# Set up data segments for flat 32-bit mode
|
|
movl %ecx, %ds
|
|
movl %ecx, %es
|
|
movl %ecx, %fs
|
|
movl %ecx, %gs
|
|
movl %ecx, %ss
|
|
# The 32-bit code sets up its own stack, but this way we do have
|
|
# a valid stack if some debugging hack wants to use it.
|
|
addl %ebx, %esp
|
|
|
|
# Set up TR to make Intel VT happy
|
|
ltr %di
|
|
|
|
# Clear registers to allow for future extensions to the
|
|
# 32-bit boot protocol
|
|
xorl %ecx, %ecx
|
|
xorl %edx, %edx
|
|
xorl %ebx, %ebx
|
|
xorl %ebp, %ebp
|
|
xorl %edi, %edi
|
|
|
|
# Set up LDTR to make Intel VT happy
|
|
lldt %cx
|
|
|
|
jmpl *%eax # Jump to the 32-bit entrypoint
|
|
SYM_FUNC_END(.Lin_pm32)
|