android_kernel_xiaomi_sm8350/arch/powerpc/mm/mmu_context.c
Alexey Kardashevskiy ab8dff4b71 powerpc/mm: Switch obsolete dssall to .long
commit d51f86cfd8e378d4907958db77da3074f6dce3ba upstream.

The dssall ("Data Stream Stop All") instruction is obsolete altogether
with other Data Cache Instructions since ISA 2.03 (year 2006).

LLVM IAS does not support it but PPC970 seems to be using it.
This switches dssall to .long as there is no much point in fixing LLVM.

Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20211221055904.555763-6-aik@ozlabs.ru
[sudip: adjust context]
Signed-off-by: Sudip Mukherjee <sudipm.mukherjee@gmail.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-06-22 14:11:24 +02:00

105 lines
2.7 KiB
C

// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Common implementation of switch_mm_irqs_off
*
* Copyright IBM Corp. 2017
*/
#include <linux/mm.h>
#include <linux/cpu.h>
#include <linux/sched/mm.h>
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>
#if defined(CONFIG_PPC32)
static inline void switch_mm_pgdir(struct task_struct *tsk,
struct mm_struct *mm)
{
/* 32-bit keeps track of the current PGDIR in the thread struct */
tsk->thread.pgdir = mm->pgd;
}
#elif defined(CONFIG_PPC_BOOK3E_64)
static inline void switch_mm_pgdir(struct task_struct *tsk,
struct mm_struct *mm)
{
/* 64-bit Book3E keeps track of current PGD in the PACA */
get_paca()->pgd = mm->pgd;
}
#else
static inline void switch_mm_pgdir(struct task_struct *tsk,
struct mm_struct *mm) { }
#endif
void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
bool new_on_cpu = false;
/* Mark this context has been used on the new CPU */
if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next))) {
cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
inc_mm_active_cpus(next);
/*
* This full barrier orders the store to the cpumask above vs
* a subsequent operation which allows this CPU to begin loading
* translations for next.
*
* When using the radix MMU that operation is the load of the
* MMU context id, which is then moved to SPRN_PID.
*
* For the hash MMU it is either the first load from slb_cache
* in switch_slb(), and/or the store of paca->mm_ctx_id in
* copy_mm_to_paca().
*
* On the other side, the barrier is in mm/tlb-radix.c for
* radix which orders earlier stores to clear the PTEs vs
* the load of mm_cpumask. And pte_xchg which does the same
* thing for hash.
*
* This full barrier is needed by membarrier when switching
* between processes after store to rq->curr, before user-space
* memory accesses.
*/
smp_mb();
new_on_cpu = true;
}
/* Some subarchs need to track the PGD elsewhere */
switch_mm_pgdir(tsk, next);
/* Nothing else to do if we aren't actually switching */
if (prev == next)
return;
/*
* We must stop all altivec streams before changing the HW
* context
*/
if (cpu_has_feature(CPU_FTR_ALTIVEC))
asm volatile (PPC_DSSALL);
if (new_on_cpu)
radix_kvm_prefetch_workaround(next);
else
membarrier_arch_switch_mm(prev, next, tsk);
/*
* The actual HW switching method differs between the various
* sub architectures. Out of line for now
*/
switch_mmu_context(prev, next, tsk);
}
#ifndef CONFIG_PPC_BOOK3S_64
void arch_exit_mmap(struct mm_struct *mm)
{
void *frag = pte_frag_get(&mm->context);
if (frag)
pte_frag_destroy(frag);
}
#endif