android_kernel_xiaomi_sm8350/arch/powerpc/kernel/pmc.c
Anton Blanchard 177e9ea49d [POWERPC] Fix typo: MMCR0_PMA0 != MMCR0_PMAO
pmc.c has:

#ifndef MMCR0_PMA0
#define MMCR0_PMA0     0

This one took a while to find.  Unfortunately its the wrong define
(number 0 vs letter O). Its probably worth removing this override, since
if our includes get screwed up we will have the same (hard to debug)
failure.

Fix it simply for now, so that we can backport to stable.

Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2007-05-22 20:20:56 +10:00

102 lines
2.3 KiB
C

/*
* arch/powerpc/kernel/pmc.c
*
* Copyright (C) 2004 David Gibson, IBM Corporation.
* Includes code formerly from arch/ppc/kernel/perfmon.c:
* Author: Andy Fleming
* Copyright (c) 2004 Freescale Semiconductor, Inc
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/errno.h>
#include <linux/spinlock.h>
#include <linux/module.h>
#include <asm/processor.h>
#include <asm/cputable.h>
#include <asm/pmc.h>
#ifndef MMCR0_PMAO
#define MMCR0_PMAO 0
#endif
static void dummy_perf(struct pt_regs *regs)
{
#if defined(CONFIG_FSL_BOOKE) && !defined(CONFIG_E200)
mtpmr(PMRN_PMGC0, mfpmr(PMRN_PMGC0) & ~PMGC0_PMIE);
#elif defined(CONFIG_PPC64) || defined(CONFIG_6xx)
if (cur_cpu_spec->pmc_type == PPC_PMC_IBM)
mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) & ~(MMCR0_PMXE|MMCR0_PMAO));
#else
mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) & ~MMCR0_PMXE);
#endif
}
static DEFINE_SPINLOCK(pmc_owner_lock);
static void *pmc_owner_caller; /* mostly for debugging */
perf_irq_t perf_irq = dummy_perf;
int reserve_pmc_hardware(perf_irq_t new_perf_irq)
{
int err = 0;
spin_lock(&pmc_owner_lock);
if (pmc_owner_caller) {
printk(KERN_WARNING "reserve_pmc_hardware: "
"PMC hardware busy (reserved by caller %p)\n",
pmc_owner_caller);
err = -EBUSY;
goto out;
}
pmc_owner_caller = __builtin_return_address(0);
perf_irq = new_perf_irq ? new_perf_irq : dummy_perf;
out:
spin_unlock(&pmc_owner_lock);
return err;
}
EXPORT_SYMBOL_GPL(reserve_pmc_hardware);
void release_pmc_hardware(void)
{
spin_lock(&pmc_owner_lock);
WARN_ON(! pmc_owner_caller);
pmc_owner_caller = NULL;
perf_irq = dummy_perf;
spin_unlock(&pmc_owner_lock);
}
EXPORT_SYMBOL_GPL(release_pmc_hardware);
#ifdef CONFIG_PPC64
void power4_enable_pmcs(void)
{
unsigned long hid0;
hid0 = mfspr(SPRN_HID0);
hid0 |= 1UL << (63 - 20);
/* POWER4 requires the following sequence */
asm volatile(
"sync\n"
"mtspr %1, %0\n"
"mfspr %0, %1\n"
"mfspr %0, %1\n"
"mfspr %0, %1\n"
"mfspr %0, %1\n"
"mfspr %0, %1\n"
"mfspr %0, %1\n"
"isync" : "=&r" (hid0) : "i" (SPRN_HID0), "0" (hid0):
"memory");
}
#endif /* CONFIG_PPC64 */