forked from luck/tmp_suning_uos_patched
MIPS: CM: Add cluster & block args to mips_cm_lock_other()
With CM >= 3.5 we have the notion of multiple clusters & can access their CM, CPC & GIC registers via the apporpriate redirect/other register blocks. In order to allow for this introduce cluster & block arguments to mips_cm_lock_other() which configures the redirect/other region to point at the appropriate cluster, core, VP & register block. Since we now have 4 arguments to mips_cm_lock_other() & a common use is likely to be to target the cluster, core & VP corresponding to a particular Linux CPU number we also add a new mips_cm_lock_other_cpu() helper function which handles that without the caller needing to manually pull out the cluster, core & VP numbers. Signed-off-by: Paul Burton <paul.burton@imgtec.com> Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/17013/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
This commit is contained in:
parent
5616897efd
commit
68923cdc2e
|
@ -437,29 +437,56 @@ static inline unsigned int mips_cm_vp_id(unsigned int cpu)
|
||||||
#ifdef CONFIG_MIPS_CM
|
#ifdef CONFIG_MIPS_CM
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* mips_cm_lock_other - lock access to another core
|
* mips_cm_lock_other - lock access to redirect/other region
|
||||||
|
* @cluster: the other cluster to be accessed
|
||||||
* @core: the other core to be accessed
|
* @core: the other core to be accessed
|
||||||
* @vp: the VP within the other core to be accessed
|
* @vp: the VP within the other core to be accessed
|
||||||
|
* @block: the register block to be accessed
|
||||||
*
|
*
|
||||||
* Call before operating upon a core via the 'other' register region in
|
* Configure the redirect/other region for the local core/VP (depending upon
|
||||||
* order to prevent the region being moved during access. Must be followed
|
* the CM revision) to target the specified @cluster, @core, @vp & register
|
||||||
* by a call to mips_cm_unlock_other.
|
* @block. Must be called before using the redirect/other region, and followed
|
||||||
|
* by a call to mips_cm_unlock_other() when access to the redirect/other region
|
||||||
|
* is complete.
|
||||||
|
*
|
||||||
|
* This function acquires a spinlock such that code between it &
|
||||||
|
* mips_cm_unlock_other() calls cannot be pre-empted by anything which may
|
||||||
|
* reconfigure the redirect/other region, and cannot be interfered with by
|
||||||
|
* another VP in the core. As such calls to this function should not be nested.
|
||||||
*/
|
*/
|
||||||
extern void mips_cm_lock_other(unsigned int core, unsigned int vp);
|
extern void mips_cm_lock_other(unsigned int cluster, unsigned int core,
|
||||||
|
unsigned int vp, unsigned int block);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* mips_cm_unlock_other - unlock access to another core
|
* mips_cm_unlock_other - unlock access to redirect/other region
|
||||||
*
|
*
|
||||||
* Call after operating upon another core via the 'other' register region.
|
* Must be called after mips_cm_lock_other() once all required access to the
|
||||||
* Must be called after mips_cm_lock_other.
|
* redirect/other region has been completed.
|
||||||
*/
|
*/
|
||||||
extern void mips_cm_unlock_other(void);
|
extern void mips_cm_unlock_other(void);
|
||||||
|
|
||||||
#else /* !CONFIG_MIPS_CM */
|
#else /* !CONFIG_MIPS_CM */
|
||||||
|
|
||||||
static inline void mips_cm_lock_other(unsigned int core, unsigned int vp) { }
|
static inline void mips_cm_lock_other(unsigned int cluster, unsigned int core,
|
||||||
|
unsigned int vp, unsigned int block) { }
|
||||||
static inline void mips_cm_unlock_other(void) { }
|
static inline void mips_cm_unlock_other(void) { }
|
||||||
|
|
||||||
#endif /* !CONFIG_MIPS_CM */
|
#endif /* !CONFIG_MIPS_CM */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* mips_cm_lock_other_cpu - lock access to redirect/other region
|
||||||
|
* @cpu: the other CPU whose register we want to access
|
||||||
|
*
|
||||||
|
* Configure the redirect/other region for the local core/VP (depending upon
|
||||||
|
* the CM revision) to target the specified @cpu & register @block. This is
|
||||||
|
* equivalent to calling mips_cm_lock_other() but accepts a Linux CPU number
|
||||||
|
* for convenience.
|
||||||
|
*/
|
||||||
|
static inline void mips_cm_lock_other_cpu(unsigned int cpu, unsigned int block)
|
||||||
|
{
|
||||||
|
struct cpuinfo_mips *d = &cpu_data[cpu];
|
||||||
|
|
||||||
|
mips_cm_lock_other(cpu_cluster(d), cpu_core(d), cpu_vpe_id(d), block);
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* __MIPS_ASM_MIPS_CM_H__ */
|
#endif /* __MIPS_ASM_MIPS_CM_H__ */
|
||||||
|
|
|
@ -257,17 +257,28 @@ int mips_cm_probe(void)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void mips_cm_lock_other(unsigned int core, unsigned int vp)
|
void mips_cm_lock_other(unsigned int cluster, unsigned int core,
|
||||||
|
unsigned int vp, unsigned int block)
|
||||||
{
|
{
|
||||||
unsigned curr_core;
|
unsigned int curr_core, cm_rev;
|
||||||
u32 val;
|
u32 val;
|
||||||
|
|
||||||
|
cm_rev = mips_cm_revision();
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
|
|
||||||
if (mips_cm_revision() >= CM_REV_CM3) {
|
if (cm_rev >= CM_REV_CM3) {
|
||||||
val = core << __ffs(CM3_GCR_Cx_OTHER_CORE);
|
val = core << __ffs(CM3_GCR_Cx_OTHER_CORE);
|
||||||
val |= vp << __ffs(CM3_GCR_Cx_OTHER_VP);
|
val |= vp << __ffs(CM3_GCR_Cx_OTHER_VP);
|
||||||
|
|
||||||
|
if (cm_rev >= CM_REV_CM3_5) {
|
||||||
|
val |= CM_GCR_Cx_OTHER_CLUSTER_EN;
|
||||||
|
val |= cluster << __ffs(CM_GCR_Cx_OTHER_CLUSTER);
|
||||||
|
val |= block << __ffs(CM_GCR_Cx_OTHER_BLOCK);
|
||||||
|
} else {
|
||||||
|
WARN_ON(cluster != 0);
|
||||||
|
WARN_ON(block != CM_GCR_Cx_OTHER_BLOCK_LOCAL);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We need to disable interrupts in SMP systems in order to
|
* We need to disable interrupts in SMP systems in order to
|
||||||
* ensure that we don't interrupt the caller with code which
|
* ensure that we don't interrupt the caller with code which
|
||||||
|
@ -280,7 +291,9 @@ void mips_cm_lock_other(unsigned int core, unsigned int vp)
|
||||||
spin_lock_irqsave(this_cpu_ptr(&cm_core_lock),
|
spin_lock_irqsave(this_cpu_ptr(&cm_core_lock),
|
||||||
*this_cpu_ptr(&cm_core_lock_flags));
|
*this_cpu_ptr(&cm_core_lock_flags));
|
||||||
} else {
|
} else {
|
||||||
|
WARN_ON(cluster != 0);
|
||||||
WARN_ON(vp != 0);
|
WARN_ON(vp != 0);
|
||||||
|
WARN_ON(block != CM_GCR_Cx_OTHER_BLOCK_LOCAL);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We only have a GCR_CL_OTHER per core in systems with
|
* We only have a GCR_CL_OTHER per core in systems with
|
||||||
|
|
|
@ -52,7 +52,7 @@ static unsigned core_vpe_count(unsigned core)
|
||||||
&& (!IS_ENABLED(CONFIG_CPU_MIPSR6) || !cpu_has_vp))
|
&& (!IS_ENABLED(CONFIG_CPU_MIPSR6) || !cpu_has_vp))
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
mips_cm_lock_other(core, 0);
|
mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
|
||||||
cfg = read_gcr_co_config() & CM_GCR_Cx_CONFIG_PVPE;
|
cfg = read_gcr_co_config() & CM_GCR_Cx_CONFIG_PVPE;
|
||||||
mips_cm_unlock_other();
|
mips_cm_unlock_other();
|
||||||
return cfg + 1;
|
return cfg + 1;
|
||||||
|
@ -214,7 +214,7 @@ static void boot_core(unsigned int core, unsigned int vpe_id)
|
||||||
unsigned timeout;
|
unsigned timeout;
|
||||||
|
|
||||||
/* Select the appropriate core */
|
/* Select the appropriate core */
|
||||||
mips_cm_lock_other(core, 0);
|
mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
|
||||||
|
|
||||||
/* Set its reset vector */
|
/* Set its reset vector */
|
||||||
write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry));
|
write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry));
|
||||||
|
@ -313,7 +313,7 @@ static void cps_boot_secondary(int cpu, struct task_struct *idle)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cpu_has_vp) {
|
if (cpu_has_vp) {
|
||||||
mips_cm_lock_other(core, vpe_id);
|
mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
|
||||||
core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry);
|
core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry);
|
||||||
write_gcr_co_reset_base(core_entry);
|
write_gcr_co_reset_base(core_entry);
|
||||||
mips_cm_unlock_other();
|
mips_cm_unlock_other();
|
||||||
|
@ -518,7 +518,7 @@ static void cps_cpu_die(unsigned int cpu)
|
||||||
*/
|
*/
|
||||||
fail_time = ktime_add_ms(ktime_get(), 2000);
|
fail_time = ktime_add_ms(ktime_get(), 2000);
|
||||||
do {
|
do {
|
||||||
mips_cm_lock_other(core, 0);
|
mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
|
||||||
mips_cpc_lock_other(core);
|
mips_cpc_lock_other(core);
|
||||||
stat = read_cpc_co_stat_conf();
|
stat = read_cpc_co_stat_conf();
|
||||||
stat &= CPC_Cx_STAT_CONF_SEQSTATE;
|
stat &= CPC_Cx_STAT_CONF_SEQSTATE;
|
||||||
|
@ -562,7 +562,7 @@ static void cps_cpu_die(unsigned int cpu)
|
||||||
panic("Failed to call remote sibling CPU\n");
|
panic("Failed to call remote sibling CPU\n");
|
||||||
} else if (cpu_has_vp) {
|
} else if (cpu_has_vp) {
|
||||||
do {
|
do {
|
||||||
mips_cm_lock_other(core, vpe_id);
|
mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
|
||||||
stat = read_cpc_co_vp_running();
|
stat = read_cpc_co_vp_running();
|
||||||
mips_cm_unlock_other();
|
mips_cm_unlock_other();
|
||||||
} while (stat & (1 << vpe_id));
|
} while (stat & (1 << vpe_id));
|
||||||
|
|
|
@ -190,7 +190,7 @@ void mips_smp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
|
||||||
core = cpu_core(&cpu_data[cpu]);
|
core = cpu_core(&cpu_data[cpu]);
|
||||||
|
|
||||||
while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) {
|
while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) {
|
||||||
mips_cm_lock_other(core, 0);
|
mips_cm_lock_other_cpu(cpu, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
|
||||||
mips_cpc_lock_other(core);
|
mips_cpc_lock_other(core);
|
||||||
write_cpc_co_cmd(CPC_Cx_CMD_PWRUP);
|
write_cpc_co_cmd(CPC_Cx_CMD_PWRUP);
|
||||||
mips_cpc_unlock_other();
|
mips_cpc_unlock_other();
|
||||||
|
|
Loading…
Reference in New Issue
Block a user