Re: [PATCH v7 07/11] powerpc/powernv: Add platform support for stop instruction
From: Michael Neuling
Date: Thu Jul 07 2016 - 22:20:53 EST
> diff --git a/arch/powerpc/include/asm/cpuidle.h b/arch/powerpc/include/asm/cpuidle.h
> index d2f99ca..3d7fc06 100644
> --- a/arch/powerpc/include/asm/cpuidle.h
> +++ b/arch/powerpc/include/asm/cpuidle.h
> @@ -13,6 +13,8 @@
> Â#ifndef __ASSEMBLY__
> Âextern u32 pnv_fastsleep_workaround_at_entry[];
> Âextern u32 pnv_fastsleep_workaround_at_exit[];
> +
> +extern u64 pnv_first_deep_stop_state;
mpe asked a question about this which you neither answered or addressed.
"Should this have some safe initial value?"
I'm thinking we could do this which is what you have in the init call.
 Âu64 pnv_first_deep_stop_state =ÂMAX_STOP_STATE;
> @@ -439,7 +540,18 @@ timebase_resync:
> Â Â*/
> Â bne cr4,clear_lock
> Â
> - /* Restore per core state */
> + /*
> + Â* First thread in the core to wake up and its waking up with
> + Â* complete hypervisor state loss. Restore per core hypervisor
> + Â* state.
> + Â*/
> +BEGIN_FTR_SECTION
> + ld r4,_PTCR(r1)
> + mtspr SPRN_PTCR,r4
> + ld r4,_RPR(r1)
> + mtspr SPRN_RPR,r4
RPR looks wrong here. ÂThis should be on POWER8 too.
This has changed since v6 and not noted in the v7 comments. ÂWhy are you
changing this now?
> +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
> +
> Â ld r4,_TSCR(r1)
> Â mtspr SPRN_TSCR,r4
> Â ld r4,_WORC(r1)
> @@ -461,9 +573,7 @@ common_exit:
> Â
> Â /* Waking up from winkle */
> Â
> - /* Restore per thread state */
> - bl __restore_cpu_power8
> -
> +BEGIN_MMU_FTR_SECTION
> Â /* Restore SLBÂÂfrom PACA */
> Â ld r8,PACA_SLBSHADOWPTR(r13)
> Â
> @@ -477,6 +587,9 @@ common_exit:
> Â slbmte r6,r5
> Â1: addi r8,r8,16
> Â .endr
> +END_MMU_FTR_SECTION_IFCLR(MMU_FTR_RADIX)
> +
> + /* Restore per thread state */
This FTR section is too big ÂIt ends up at 25 instructions with the loop.
Probably better like this:
BEGIN_MMU_FTR_SECTION
b no_segments
END_MMU_FTR_SECTION_IFSET(MMU_FTR_RADIX)
/* Restore SLBÂÂfrom PACA */
ld r8,PACA_SLBSHADOWPTR(r13)
.rept SLB_NUM_BOLTED
li r3, SLBSHADOW_SAVEAREA
LDX_BE r5, r8, r3
addi r3, r3, 8
LDX_BE r6, r8, r3
andis. r7,r5,SLB_ESID_V@h
beq 1f
slbmte r6,r5
1: addi r8,r8,16
.endr
no_segments: