android_kernel_xiaomi_sm8350/arch/x86_64/kernel/machine_kexec.c

232 lines
5.7 KiB
C
Raw Normal View History

/*
* machine_kexec.c - handle transition of Linux booting another kernel
* Copyright (C) 2002-2005 Eric Biederman <ebiederm@xmission.com>
*
* This source code is licensed under the GNU General Public License,
* Version 2. See the file COPYING for more details.
*/
#include <linux/mm.h>
#include <linux/kexec.h>
#include <linux/string.h>
#include <linux/reboot.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
static void init_level2_page(pmd_t *level2p, unsigned long addr)
{
unsigned long end_addr;
addr &= PAGE_MASK;
end_addr = addr + PUD_SIZE;
while (addr < end_addr) {
set_pmd(level2p++, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC));
addr += PMD_SIZE;
}
}
static int init_level3_page(struct kimage *image, pud_t *level3p,
unsigned long addr, unsigned long last_addr)
{
unsigned long end_addr;
int result;
result = 0;
addr &= PAGE_MASK;
end_addr = addr + PGDIR_SIZE;
while ((addr < last_addr) && (addr < end_addr)) {
struct page *page;
pmd_t *level2p;
page = kimage_alloc_control_pages(image, 0);
if (!page) {
result = -ENOMEM;
goto out;
}
level2p = (pmd_t *)page_address(page);
init_level2_page(level2p, addr);
set_pud(level3p++, __pud(__pa(level2p) | _KERNPG_TABLE));
addr += PUD_SIZE;
}
/* clear the unused entries */
while (addr < end_addr) {
pud_clear(level3p++);
addr += PUD_SIZE;
}
out:
return result;
}
static int init_level4_page(struct kimage *image, pgd_t *level4p,
unsigned long addr, unsigned long last_addr)
{
unsigned long end_addr;
int result;
result = 0;
addr &= PAGE_MASK;
end_addr = addr + (PTRS_PER_PGD * PGDIR_SIZE);
while ((addr < last_addr) && (addr < end_addr)) {
struct page *page;
pud_t *level3p;
page = kimage_alloc_control_pages(image, 0);
if (!page) {
result = -ENOMEM;
goto out;
}
level3p = (pud_t *)page_address(page);
result = init_level3_page(image, level3p, addr, last_addr);
if (result) {
goto out;
}
set_pgd(level4p++, __pgd(__pa(level3p) | _KERNPG_TABLE));
addr += PGDIR_SIZE;
}
/* clear the unused entries */
while (addr < end_addr) {
pgd_clear(level4p++);
addr += PGDIR_SIZE;
}
out:
return result;
}
static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
{
pgd_t *level4p;
level4p = (pgd_t *)__va(start_pgtable);
return init_level4_page(image, level4p, 0, end_pfn << PAGE_SHIFT);
}
static void set_idt(void *newidt, u16 limit)
{
struct desc_ptr curidt;
/* x86-64 supports unaliged loads & stores */
curidt.size = limit;
curidt.address = (unsigned long)newidt;
__asm__ __volatile__ (
"lidtq %0\n"
: : "m" (curidt)
);
};
static void set_gdt(void *newgdt, u16 limit)
{
struct desc_ptr curgdt;
/* x86-64 supports unaligned loads & stores */
curgdt.size = limit;
curgdt.address = (unsigned long)newgdt;
__asm__ __volatile__ (
"lgdtq %0\n"
: : "m" (curgdt)
);
};
static void load_segments(void)
{
__asm__ __volatile__ (
"\tmovl %0,%%ds\n"
"\tmovl %0,%%es\n"
"\tmovl %0,%%ss\n"
"\tmovl %0,%%fs\n"
"\tmovl %0,%%gs\n"
: : "a" (__KERNEL_DS) : "memory"
);
}
typedef NORET_TYPE void (*relocate_new_kernel_t)(unsigned long indirection_page,
unsigned long control_code_buffer,
unsigned long start_address,
unsigned long pgtable) ATTRIB_NORET;
extern const unsigned char relocate_new_kernel[];
extern const unsigned long relocate_new_kernel_size;
int machine_kexec_prepare(struct kimage *image)
{
unsigned long start_pgtable, control_code_buffer;
int result;
/* Calculate the offsets */
start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT;
control_code_buffer = start_pgtable + PAGE_SIZE;
/* Setup the identity mapped 64bit page table */
result = init_pgtable(image, start_pgtable);
if (result)
return result;
/* Place the code in the reboot code buffer */
memcpy(__va(control_code_buffer), relocate_new_kernel,
relocate_new_kernel_size);
return 0;
}
void machine_kexec_cleanup(struct kimage *image)
{
return;
}
/*
* Do not allocate memory (or fail in any way) in machine_kexec().
* We are past the point of no return, committed to rebooting now.
*/
NORET_TYPE void machine_kexec(struct kimage *image)
{
unsigned long page_list;
unsigned long control_code_buffer;
unsigned long start_pgtable;
relocate_new_kernel_t rnk;
/* Interrupts aren't acceptable while we reboot */
local_irq_disable();
/* Calculate the offsets */
page_list = image->head;
start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT;
control_code_buffer = start_pgtable + PAGE_SIZE;
/* Set the low half of the page table to my identity mapped
* page table for kexec. Leave the high half pointing at the
* kernel pages. Don't bother to flush the global pages
* as that will happen when I fully switch to my identity mapped
* page table anyway.
*/
memcpy(__va(read_cr3()), __va(start_pgtable), PAGE_SIZE/2);
__flush_tlb();
/* The segment registers are funny things, they are
* automatically loaded from a table, in memory wherever you
* set them to a specific selector, but this table is never
* accessed again unless you set the segment to a different selector.
*
* The more common model are caches where the behide
* the scenes work is done, but is also dropped at arbitrary
* times.
*
* I take advantage of this here by force loading the
* segments, before I zap the gdt with an invalid value.
*/
load_segments();
/* The gdt & idt are now invalid.
* If you want to load them you must set up your own idt & gdt.
*/
set_gdt(phys_to_virt(0),0);
set_idt(phys_to_virt(0),0);
/* now call it */
rnk = (relocate_new_kernel_t) control_code_buffer;
(*rnk)(page_list, control_code_buffer, image->start, start_pgtable);
}