diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 5ee5789c14fb..409bb4e34235 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3870,8 +3870,12 @@ void scheduler_tick(void) if (curr->sched_class == &fair_sched_class) check_for_migration(rq, curr); - if (idle_cpu(cpu) && is_reserved(cpu)) +#ifdef CONFIG_SMP + rq_lock(rq, &rf); + if (idle_cpu(cpu) && is_reserved(cpu) && !rq->active_balance) clear_reserved(cpu); + rq_unlock(rq, &rf); +#endif trace_android_vh_scheduler_tick(rq); } diff --git a/kernel/sched/walt/qc_vas.c b/kernel/sched/walt/qc_vas.c index d4d6c838babe..ce49eac6d590 100644 --- a/kernel/sched/walt/qc_vas.c +++ b/kernel/sched/walt/qc_vas.c @@ -48,12 +48,23 @@ void walt_rotate_work_func(struct work_struct *work) { struct walt_rotate_work *wr = container_of(work, struct walt_rotate_work, w); + struct rq *src_rq = cpu_rq(wr->src_cpu), *dst_rq = cpu_rq(wr->dst_cpu); + unsigned long flags; migrate_swap(wr->src_task, wr->dst_task, wr->dst_cpu, wr->src_cpu); put_task_struct(wr->src_task); put_task_struct(wr->dst_task); + local_irq_save(flags); + double_rq_lock(src_rq, dst_rq); + + dst_rq->active_balance = 0; + src_rq->active_balance = 0; + + double_rq_unlock(src_rq, dst_rq); + local_irq_restore(flags); + clear_reserved(wr->src_cpu); clear_reserved(wr->dst_cpu); } @@ -140,7 +151,8 @@ void walt_check_for_rotation(struct rq *src_rq) dst_rq = cpu_rq(dst_cpu); double_rq_lock(src_rq, dst_rq); - if (dst_rq->curr->sched_class == &fair_sched_class) { + if (dst_rq->curr->sched_class == &fair_sched_class && + !src_rq->active_balance && !dst_rq->active_balance) { get_task_struct(src_rq->curr); get_task_struct(dst_rq->curr); @@ -153,7 +165,10 @@ void walt_check_for_rotation(struct rq *src_rq) wr->src_cpu = src_cpu; wr->dst_cpu = dst_cpu; + dst_rq->active_balance = 1; + src_rq->active_balance = 1; } + double_rq_unlock(src_rq, dst_rq); if (wr)