Verified Commit a2495963 authored by Michal Hrusecky's avatar Michal Hrusecky 🐉
Browse files

kernel: Testing cpufreq patches

parent ac48f3b0
This patch first shortens the registers definition and also introduces
difference between Armada XP value and Armada 38x value.
Then it adds specific functions for Armada 38x in order to support cpu
freq on these SoCs.
Signed-off-by: Gregory CLEMENT <gregory.clement at free-electrons.com>
---
.../devicetree/bindings/clock/mvebu-cpu-clock.txt | 27 ++-
drivers/clk/mvebu/clk-cpu.c | 220 ++++++++++++++++-----
2 files changed, 201 insertions(+), 46 deletions(-)
diff --git a/Documentation/devicetree/bindings/clock/mvebu-cpu-clock.txt b/Documentation/devicetree/bindings/clock/mvebu-cpu-clock.txt
index 99c214660bdc..9272b3464ab1 100644
--- a/Documentation/devicetree/bindings/clock/mvebu-cpu-clock.txt
+++ b/Documentation/devicetree/bindings/clock/mvebu-cpu-clock.txt
@@ -1,10 +1,13 @@
Device Tree Clock bindings for cpu clock of Marvell EBU platforms
Required properties:
-- compatible : shall be one of the following:
+- compatible : shall be the following:
"marvell,armada-xp-cpu-clock" - cpu clocks for Armada XP
+ "marvell,armada-38x-cpu-clock", "marvell,armada-xp-cpu-clock" - cpu
+ clocks for Armada 38x
- reg : Address and length of the clock complex register set, followed
- by address and length of the PMU DFS registers
+ by address and length of the PMU DFS registers, for Armada 38x
+ a third register set must be addeed: DFX server.
- #clock-cells : should be set to 1.
- clocks : shall be the input parent clock phandle for the clock.
@@ -20,3 +23,23 @@ cpu at 0 {
reg = <0>;
clocks = <&cpuclk 0>;
};
+
+or for Armada38x
+
+cpuclk: clock-complex at 18700 {
+ compatible = "marvell,armada-380-cpu-clock",
+ "marvell,armada-xp-cpu-clock";
+ reg = <0x18700 0xA0>, <0x1c054 0x40>,
+ <0xe4260 0x8>;
+ clocks = <&coreclk 1>;
+ #clock-cells = <1>;
+};
+
+cpu at 0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a9";
+ reg = <0>;
+ clocks = <&cpuclk 0>;
+ clock-latency = <1000000>;
+ clock-names = "cpu0";
+};
diff --git a/drivers/clk/mvebu/clk-cpu.c b/drivers/clk/mvebu/clk-cpu.c
index 3821a88077ea..19b6e73e3cba 100644
--- a/drivers/clk/mvebu/clk-cpu.c
+++ b/drivers/clk/mvebu/clk-cpu.c
@@ -19,16 +19,34 @@
#include <linux/mvebu-pmsu.h>
#include <asm/smp_plat.h>
-#define SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET 0x0
-#define SYS_CTRL_CLK_DIVIDER_CTRL_RESET_ALL 0xff
-#define SYS_CTRL_CLK_DIVIDER_CTRL_RESET_SHIFT 8
-#define SYS_CTRL_CLK_DIVIDER_CTRL2_OFFSET 0x8
-#define SYS_CTRL_CLK_DIVIDER_CTRL2_NBCLK_RATIO_SHIFT 16
-#define SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET 0xC
-#define SYS_CTRL_CLK_DIVIDER_MASK 0x3F
-
-#define PMU_DFS_RATIO_SHIFT 16
-#define PMU_DFS_RATIO_MASK 0x3F
+/* Clock complex registers */
+#define SYS_CTRL_CLK_DIV_CTRL_OFFSET 0x0
+#define SYS_CTRL_CLK_DIV_CTRL_RESET_ALL 0xFF
+#define SYS_CTRL_CLK_DIV_CTRL_RESET_SHIFT 8
+#define SYS_CTRL_CLK_DIV_VALUE_A38X_OFFSET 0x4
+#define SYS_CTRL_CLK_DIV_CTRL2_OFFSET 0x8
+#define SYS_CTRL_CLK_DIV_CTRL2_NBCLK_RATIO_SHIFT 16
+#define SYS_CTRL_CLK_DIV_VALUE_AXP_OFFSET 0xC
+#define SYS_CTRL_CLK_DIV_MASK 0x3F
+
+/* PMU registers */
+#define PMU_DFS_CTRL1_OFFSET 0x0
+#define PMU_DFS_RATIO_SHIFT 16
+#define PMU_DFS_RATIO_MASK 0x3F
+#define PMUL_ACTIVATE_IF_CTRL_OFFSET 0x3C
+#define PMUL_ACTIVATE_IF_CTRL_PMU_DFS_OVRD_EN_MASK 0xFF
+#define PMUL_ACTIVATE_IF_CTRL_PMU_DFS_OVRD_EN_SHIFT 17
+#define PMUL_ACTIVATE_IF_CTRL_PMU_DFS_OVRD_EN 0x1
+
+/* DFX server registers */
+#define DFX_CPU_PLL_CLK_DIV_CTRL0_OFFSET 0x0
+#define DFX_CPU_PLL_CLK_DIV_CTRL0_RELOAD_SMOOTH_MASK 0xFF
+#define DFX_CPU_PLL_CLK_DIV_CTRL0_RELOAD_SMOOTH_SHIFT 0x8
+#define DFX_CPU_PLL_CLK_DIV_CTRL0_RELOAD_SMOOTH_PCLK 0x10
+#define DFX_CPU_PLL_CLK_DIV_CTRL1_OFFSET 0x4
+#define DFX_CPU_PLL_CLK_DIV_CTRL1_RESET_MASK_MASK 0xFF
+#define DFX_CPU_PLL_CLK_DIV_CTRL1_RESET_MASK_SHIFT 0x0
+#define DFX_CPU_PLL_CLK_DIV_CTRL1_RESET_MASK_PCLK 0x10
#define MAX_CPU 4
struct cpu_clk {
@@ -38,6 +56,7 @@ struct cpu_clk {
const char *parent_name;
void __iomem *reg_base;
void __iomem *pmu_dfs;
+ void __iomem *dfx_server_base;
};
static struct clk **clks;
@@ -46,14 +65,30 @@ static struct clk_onecell_data clk_data;
#define to_cpu_clk(p) container_of(p, struct cpu_clk, hw)
-static unsigned long clk_cpu_recalc_rate(struct clk_hw *hwclk,
+static unsigned long armada_xp_clk_cpu_recalc_rate(struct clk_hw *hwclk,
+ unsigned long parent_rate)
+{
+ struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
+ u32 reg, div;
+
+ reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIV_VALUE_AXP_OFFSET);
+ div = (reg >> (cpuclk->cpu * 8)) & SYS_CTRL_CLK_DIV_MASK;
+ return parent_rate / div;
+}
+
+static unsigned long armada_38x_clk_cpu_recalc_rate(struct clk_hw *hwclk,
unsigned long parent_rate)
{
struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
u32 reg, div;
- reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET);
- div = (reg >> (cpuclk->cpu * 8)) & SYS_CTRL_CLK_DIVIDER_MASK;
+ if (__clk_is_enabled(hwclk->clk) == false) {
+ /* for clock init - don't use divider, set maximal rate */
+ return parent_rate;
+ }
+
+ reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIV_VALUE_A38X_OFFSET);
+ div = (reg >> (cpuclk->cpu * 8)) & SYS_CTRL_CLK_DIV_MASK;
return parent_rate / div;
}
@@ -72,42 +107,43 @@ static long clk_cpu_round_rate(struct clk_hw *hwclk, unsigned long rate,
return *parent_rate / div;
}
-static int clk_cpu_off_set_rate(struct clk_hw *hwclk, unsigned long rate,
- unsigned long parent_rate)
-
+static int armada_xp_clk_cpu_off_set_rate(struct clk_hw *hwclk,
+ unsigned long rate,
+ unsigned long parent_rate)
{
struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
u32 reg, div;
u32 reload_mask;
div = parent_rate / rate;
- reg = (readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET)
- & (~(SYS_CTRL_CLK_DIVIDER_MASK << (cpuclk->cpu * 8))))
+ reg = (readl(cpuclk->reg_base + SYS_CTRL_CLK_DIV_VALUE_AXP_OFFSET)
+ & (~(SYS_CTRL_CLK_DIV_MASK << (cpuclk->cpu * 8))))
| (div << (cpuclk->cpu * 8));
- writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET);
+ writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIV_VALUE_AXP_OFFSET);
/* Set clock divider reload smooth bit mask */
reload_mask = 1 << (20 + cpuclk->cpu);
- reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET)
+ reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIV_CTRL_OFFSET)
| reload_mask;
- writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
+ writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIV_CTRL_OFFSET);
/* Now trigger the clock update */
- reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET)
+ reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIV_CTRL_OFFSET)
| 1 << 24;
- writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
+ writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIV_CTRL_OFFSET);
/* Wait for clocks to settle down then clear reload request */
udelay(1000);
reg &= ~(reload_mask | 1 << 24);
- writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
+ writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIV_CTRL_OFFSET);
udelay(1000);
return 0;
}
-static int clk_cpu_on_set_rate(struct clk_hw *hwclk, unsigned long rate,
- unsigned long parent_rate)
+static int armada_xp_clk_cpu_on_set_rate(struct clk_hw *hwclk,
+ unsigned long rate,
+ unsigned long parent_rate)
{
u32 reg;
unsigned long fabric_div, target_div, cur_rate;
@@ -122,9 +158,9 @@ static int clk_cpu_on_set_rate(struct clk_hw *hwclk, unsigned long rate,
cur_rate = __clk_get_rate(hwclk->clk);
- reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL2_OFFSET);
- fabric_div = (reg >> SYS_CTRL_CLK_DIVIDER_CTRL2_NBCLK_RATIO_SHIFT) &
- SYS_CTRL_CLK_DIVIDER_MASK;
+ reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIV_CTRL2_OFFSET);
+ fabric_div = (reg >> SYS_CTRL_CLK_DIV_CTRL2_NBCLK_RATIO_SHIFT) &
+ SYS_CTRL_CLK_DIV_MASK;
/* Frequency is going up */
if (rate == 2 * cur_rate)
@@ -141,40 +177,101 @@ static int clk_cpu_on_set_rate(struct clk_hw *hwclk, unsigned long rate,
reg |= (target_div << PMU_DFS_RATIO_SHIFT);
writel(reg, cpuclk->pmu_dfs);
- reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
- reg |= (SYS_CTRL_CLK_DIVIDER_CTRL_RESET_ALL <<
- SYS_CTRL_CLK_DIVIDER_CTRL_RESET_SHIFT);
- writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
+ reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIV_CTRL_OFFSET);
+ reg |= (SYS_CTRL_CLK_DIV_CTRL_RESET_ALL <<
+ SYS_CTRL_CLK_DIV_CTRL_RESET_SHIFT);
+ writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIV_CTRL_OFFSET);
return mvebu_pmsu_dfs_request(cpuclk->cpu);
}
-static int clk_cpu_set_rate(struct clk_hw *hwclk, unsigned long rate,
+static int armada_xp_clk_cpu_set_rate(struct clk_hw *hwclk, unsigned long rate,
unsigned long parent_rate)
{
if (__clk_is_enabled(hwclk->clk))
- return clk_cpu_on_set_rate(hwclk, rate, parent_rate);
+ return armada_xp_clk_cpu_on_set_rate(hwclk, rate, parent_rate);
else
- return clk_cpu_off_set_rate(hwclk, rate, parent_rate);
+ return armada_xp_clk_cpu_off_set_rate(hwclk, rate, parent_rate);
}
+static int armada_38x_clk_cpu_set_rate(struct clk_hw *hwclk, unsigned long rate,
+ unsigned long parent_rate)
+{
+ u32 reg;
+ u32 target_div;
+ unsigned long cur_rate;
+ struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
+
+ /*
+ * PMU DFS registers are not mapped, Device Tree does not
+ * describes them. We cannot change the frequency dynamically.
+ */
+ if (!cpuclk->pmu_dfs)
+ return -ENODEV;
-static const struct clk_ops cpu_ops = {
- .recalc_rate = clk_cpu_recalc_rate,
+ cur_rate = clk_hw_get_rate(hwclk);
+
+ /* Frequency is going up */
+ if (rate >= cur_rate)
+ target_div = 1;
+ /* Frequency is going down */
+ else
+ target_div = 2;
+
+ reg = readl(cpuclk->dfx_server_base + DFX_CPU_PLL_CLK_DIV_CTRL0_OFFSET);
+ reg &= ~(DFX_CPU_PLL_CLK_DIV_CTRL0_RELOAD_SMOOTH_MASK <<
+ DFX_CPU_PLL_CLK_DIV_CTRL0_RELOAD_SMOOTH_SHIFT);
+ reg |= (DFX_CPU_PLL_CLK_DIV_CTRL0_RELOAD_SMOOTH_PCLK <<
+ DFX_CPU_PLL_CLK_DIV_CTRL0_RELOAD_SMOOTH_SHIFT);
+ writel(reg, cpuclk->dfx_server_base + DFX_CPU_PLL_CLK_DIV_CTRL0_OFFSET);
+
+ reg = readl(cpuclk->dfx_server_base + DFX_CPU_PLL_CLK_DIV_CTRL1_OFFSET);
+ reg &= ~(DFX_CPU_PLL_CLK_DIV_CTRL1_RESET_MASK_MASK <<
+ DFX_CPU_PLL_CLK_DIV_CTRL1_RESET_MASK_SHIFT);
+ reg |= (DFX_CPU_PLL_CLK_DIV_CTRL1_RESET_MASK_PCLK <<
+ DFX_CPU_PLL_CLK_DIV_CTRL1_RESET_MASK_SHIFT);
+ writel(reg, cpuclk->dfx_server_base + DFX_CPU_PLL_CLK_DIV_CTRL1_OFFSET);
+
+ reg = readl(cpuclk->pmu_dfs);
+ reg &= ~(PMU_DFS_RATIO_MASK << PMU_DFS_RATIO_SHIFT);
+ reg |= (target_div << PMU_DFS_RATIO_SHIFT);
+ writel(reg, cpuclk->pmu_dfs);
+
+ reg = readl(cpuclk->pmu_dfs + PMUL_ACTIVATE_IF_CTRL_OFFSET);
+ reg &= ~(PMUL_ACTIVATE_IF_CTRL_PMU_DFS_OVRD_EN_MASK <<
+ PMUL_ACTIVATE_IF_CTRL_PMU_DFS_OVRD_EN_SHIFT);
+ reg |= (PMUL_ACTIVATE_IF_CTRL_PMU_DFS_OVRD_EN <<
+ PMUL_ACTIVATE_IF_CTRL_PMU_DFS_OVRD_EN_SHIFT);
+ writel(reg, cpuclk->pmu_dfs + PMUL_ACTIVATE_IF_CTRL_OFFSET);
+
+ return mvebu_pmsu_dfs_request(cpuclk->cpu);
+}
+
+static const struct clk_ops armada_xp_cpu_ops = {
+ .recalc_rate = armada_xp_clk_cpu_recalc_rate,
.round_rate = clk_cpu_round_rate,
- .set_rate = clk_cpu_set_rate,
+ .set_rate = armada_xp_clk_cpu_set_rate,
};
-static void __init of_cpu_clk_setup(struct device_node *node)
+static const struct clk_ops armada_38x_cpu_ops = {
+ .recalc_rate = armada_38x_clk_cpu_recalc_rate,
+ .round_rate = clk_cpu_round_rate,
+ .set_rate = armada_38x_clk_cpu_set_rate,
+};
+
+static void __init common_cpu_clk_init(struct device_node *node, bool cortexa9)
{
struct cpu_clk *cpuclk;
void __iomem *clock_complex_base = of_iomap(node, 0);
void __iomem *pmu_dfs_base = of_iomap(node, 1);
+ void __iomem *dfx_server_base = of_iomap(node, 2);
int ncpus = 0;
struct device_node *dn;
+ bool independent_clocks = true;
+ const struct clk_ops *cpu_ops = NULL;
if (clock_complex_base == NULL) {
pr_err("%s: clock-complex base register not set\n",
- __func__);
+ __func__);
return;
}
@@ -184,7 +281,20 @@ static void __init of_cpu_clk_setup(struct device_node *node)
for_each_node_by_type(dn, "cpu")
ncpus++;
-
+ if (cortexa9) {
+ if (dfx_server_base == NULL) {
+ pr_err("%s: DFX server base register not set\n",
+ __func__);
+ return;
+ }
+ cpu_ops = &armada_38x_cpu_ops;
+ independent_clocks = false;
+ ncpus = 1;
+ } else {
+ cpu_ops = &armada_xp_cpu_ops;
+ for_each_node_by_type(dn, "cpu")
+ ncpus++;
+ }
cpuclk = kzalloc(ncpus * sizeof(*cpuclk), GFP_KERNEL);
if (WARN_ON(!cpuclk))
goto cpuclk_out;
@@ -216,10 +326,12 @@ static void __init of_cpu_clk_setup(struct device_node *node)
cpuclk[cpu].reg_base = clock_complex_base;
if (pmu_dfs_base)
cpuclk[cpu].pmu_dfs = pmu_dfs_base + 4 * cpu;
+
+ cpuclk[cpu].dfx_server_base = dfx_server_base;
cpuclk[cpu].hw.init = &init;
init.name = cpuclk[cpu].clk_name;
- init.ops = &cpu_ops;
+ init.ops = cpu_ops;
init.flags = 0;
init.parent_names = &cpuclk[cpu].parent_name;
init.num_parents = 1;
@@ -228,6 +340,11 @@ static void __init of_cpu_clk_setup(struct device_node *node)
if (WARN_ON(IS_ERR(clk)))
goto bail_out;
clks[cpu] = clk;
+
+ if (independent_clocks == false) {
+ /* use 1 clock to all cpus */
+ break;
+ }
}
clk_data.clk_num = MAX_CPU;
clk_data.clks = clks;
@@ -242,7 +359,22 @@ clks_out:
kfree(cpuclk);
cpuclk_out:
iounmap(clock_complex_base);
+ iounmap(pmu_dfs_base);
+ iounmap(dfx_server_base);
+}
+
+static void __init armada_xp_cpu_clk_init(struct device_node *node)
+{
+ common_cpu_clk_init(node, false);
+}
+
+static void __init armada_38x_cpu_clk_init(struct device_node *node)
+{
+ common_cpu_clk_init(node, true);
}
CLK_OF_DECLARE(armada_xp_cpu_clock, "marvell,armada-xp-cpu-clock",
- of_cpu_clk_setup);
+ armada_xp_cpu_clk_init);
+CLK_OF_DECLARE(armada_38x_cpu_clock, "marvell,armada-380-cpu-clock",
+ armada_38x_cpu_clk_init);
+
--
2.1.0
The register definition were too verbose. Shorten them in order to
have something more readable and avoiding having most of the
instruction on two lines.
Signed-off-by: Gregory CLEMENT <gregory.clement at free-electrons.com>
---
arch/arm/mach-mvebu/pmsu.c | 102 +++++++++++++++++++++++----------------------
1 file changed, 52 insertions(+), 50 deletions(-)
diff --git a/arch/arm/mach-mvebu/pmsu.c b/arch/arm/mach-mvebu/pmsu.c
index 4f4e22206ae5..d207f5fc13a6 100644
--- a/arch/arm/mach-mvebu/pmsu.c
+++ b/arch/arm/mach-mvebu/pmsu.c
@@ -46,27 +46,29 @@
#define PMSU_REG_SIZE 0x1000
/* PMSU MP registers */
-#define PMSU_CONTROL_AND_CONFIG(cpu) ((cpu * 0x100) + 0x104)
-#define PMSU_CONTROL_AND_CONFIG_DFS_REQ BIT(18)
-#define PMSU_CONTROL_AND_CONFIG_PWDDN_REQ BIT(16)
-#define PMSU_CONTROL_AND_CONFIG_L2_PWDDN BIT(20)
+#define PMSU_CTL_CFG(cpu) ((cpu * 0x100) + 0x104)
+#define PMSU_CTL_CFG_CPU0_FRQ_ID_SFT 4
+#define PMSU_CTL_CFG_CPU0_FRQ_ID_MSK 0xF
+#define PMSU_CTL_CFG_DFS_REQ BIT(18)
+#define PMSU_CTL_CFG_PWDDN_REQ BIT(16)
+#define PMSU_CTL_CFG_L2_PWDDN BIT(20)
#define PMSU_CPU_POWER_DOWN_CONTROL(cpu) ((cpu * 0x100) + 0x108)
#define PMSU_CPU_POWER_DOWN_DIS_SNP_Q_SKIP BIT(0)
-#define PMSU_STATUS_AND_MASK(cpu) ((cpu * 0x100) + 0x10c)
-#define PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT BIT(16)
-#define PMSU_STATUS_AND_MASK_SNP_Q_EMPTY_WAIT BIT(17)
-#define PMSU_STATUS_AND_MASK_IRQ_WAKEUP BIT(20)
-#define PMSU_STATUS_AND_MASK_FIQ_WAKEUP BIT(21)
-#define PMSU_STATUS_AND_MASK_DBG_WAKEUP BIT(22)
-#define PMSU_STATUS_AND_MASK_IRQ_MASK BIT(24)
-#define PMSU_STATUS_AND_MASK_FIQ_MASK BIT(25)
+#define PMSU_STATUS_MSK(cpu) ((cpu * 0x100) + 0x10c)
+#define PMSU_STATUS_MSK_CPU_IDLE_WAIT BIT(16)
+#define PMSU_STATUS_MSK_SNP_Q_EMPTY_WAIT BIT(17)
+#define PMSU_STATUS_MSK_IRQ_WAKEUP BIT(20)
+#define PMSU_STATUS_MSK_FIQ_WAKEUP BIT(21)
+#define PMSU_STATUS_MSK_DBG_WAKEUP BIT(22)
+#define PMSU_STATUS_MSK_IRQ_MASK BIT(24)
+#define PMSU_STATUS_MSK_FIQ_MASK BIT(25)
-#define PMSU_EVENT_STATUS_AND_MASK(cpu) ((cpu * 0x100) + 0x120)
-#define PMSU_EVENT_STATUS_AND_MASK_DFS_DONE BIT(1)
-#define PMSU_EVENT_STATUS_AND_MASK_DFS_DONE_MASK BIT(17)
+#define PMSU_EVENT_STATUS_MSK(cpu) ((cpu * 0x100) + 0x120)
+#define PMSU_EVENT_STATUS_MSK_DFS_DONE BIT(1)
+#define PMSU_EVENT_STATUS_MSK_DFS_DONE_MASK BIT(17)
#define PMSU_BOOT_ADDR_REDIRECT_OFFSET(cpu) ((cpu * 0x100) + 0x124)
@@ -238,23 +240,23 @@ static int mvebu_v7_pmsu_idle_prepare(unsigned long flags)
* IRQ and FIQ as wakeup events, set wait for snoop queue empty
* indication and mask IRQ and FIQ from CPU
*/
- reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu));
- reg |= PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT |
- PMSU_STATUS_AND_MASK_IRQ_WAKEUP |
- PMSU_STATUS_AND_MASK_FIQ_WAKEUP |
- PMSU_STATUS_AND_MASK_SNP_Q_EMPTY_WAIT |
- PMSU_STATUS_AND_MASK_IRQ_MASK |
- PMSU_STATUS_AND_MASK_FIQ_MASK;
- writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu));
-
- reg = readl(pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu));
+ reg = readl(pmsu_mp_base + PMSU_STATUS_MSK(hw_cpu));
+ reg |= PMSU_STATUS_MSK_CPU_IDLE_WAIT |
+ PMSU_STATUS_MSK_IRQ_WAKEUP |
+ PMSU_STATUS_MSK_FIQ_WAKEUP |
+ PMSU_STATUS_MSK_SNP_Q_EMPTY_WAIT |
+ PMSU_STATUS_MSK_IRQ_MASK |
+ PMSU_STATUS_MSK_FIQ_MASK;
+ writel(reg, pmsu_mp_base + PMSU_STATUS_MSK(hw_cpu));
+
+ reg = readl(pmsu_mp_base + PMSU_CTL_CFG(hw_cpu));
/* ask HW to power down the L2 Cache if needed */
if (flags & PMSU_PREPARE_DEEP_IDLE)
- reg |= PMSU_CONTROL_AND_CONFIG_L2_PWDDN;
+ reg |= PMSU_CTL_CFG_L2_PWDDN;
/* request power down */
- reg |= PMSU_CONTROL_AND_CONFIG_PWDDN_REQ;
- writel(reg, pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu));
+ reg |= PMSU_CTL_CFG_PWDDN_REQ;
+ writel(reg, pmsu_mp_base + PMSU_CTL_CFG(hw_cpu));
if (flags & PMSU_PREPARE_SNOOP_DISABLE) {
/* Disable snoop disable by HW - SW is taking care of it */
@@ -347,17 +349,17 @@ void mvebu_v7_pmsu_idle_exit(void)
if (pmsu_mp_base == NULL)
return;
/* cancel ask HW to power down the L2 Cache if possible */
- reg = readl(pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu));
- reg &= ~PMSU_CONTROL_AND_CONFIG_L2_PWDDN;
- writel(reg, pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu));
+ reg = readl(pmsu_mp_base + PMSU_CTL_CFG(hw_cpu));
+ reg &= ~PMSU_CTL_CFG_L2_PWDDN;
+ writel(reg, pmsu_mp_base + PMSU_CTL_CFG(hw_cpu));
/* cancel Enable wakeup events and mask interrupts */
- reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu));
- reg &= ~(PMSU_STATUS_AND_MASK_IRQ_WAKEUP | PMSU_STATUS_AND_MASK_FIQ_WAKEUP);
- reg &= ~PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT;
- reg &= ~PMSU_STATUS_AND_MASK_SNP_Q_EMPTY_WAIT;
- reg &= ~(PMSU_STATUS_AND_MASK_IRQ_MASK | PMSU_STATUS_AND_MASK_FIQ_MASK);
- writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu));
+ reg = readl(pmsu_mp_base + PMSU_STATUS_MSK(hw_cpu));
+ reg &= ~(PMSU_STATUS_MSK_IRQ_WAKEUP | PMSU_STATUS_MSK_FIQ_WAKEUP);
+ reg &= ~PMSU_STATUS_MSK_CPU_IDLE_WAIT;
+ reg &= ~PMSU_STATUS_MSK_SNP_Q_EMPTY_WAIT;
+ reg &= ~(PMSU_STATUS_MSK_IRQ_MASK | PMSU_STATUS_MSK_FIQ_MASK);
+ writel(reg, pmsu_mp_base + PMSU_STATUS_MSK(hw_cpu));
}
static int mvebu_v7_cpu_pm_notify(struct notifier_block *self,
@@ -521,16 +523,16 @@ static void mvebu_pmsu_dfs_request_local(void *data)
local_irq_save(flags);
/* Prepare to enter idle */
- reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu));
- reg |= PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT |
- PMSU_STATUS_AND_MASK_IRQ_MASK |
- PMSU_STATUS_AND_MASK_FIQ_MASK;
- writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu));
+ reg = readl(pmsu_mp_base + PMSU_STATUS_MSK(cpu));
+ reg |= PMSU_STATUS_MSK_CPU_IDLE_WAIT |
+ PMSU_STATUS_MSK_IRQ_MASK |
+ PMSU_STATUS_MSK_FIQ_MASK;
+ writel(reg, pmsu_mp_base + PMSU_STATUS_MSK(cpu));
/* Request the DFS transition */
- reg = readl(pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(cpu));
- reg |= PMSU_CONTROL_AND_CONFIG_DFS_REQ;
- writel(reg, pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(cpu));
+ reg = readl(pmsu_mp_base + PMSU_CTL_CFG(cpu));
+ reg |= PMSU_CTL_CFG_DFS_REQ;
+ writel(reg, pmsu_mp_base + PMSU_CTL_CFG(cpu));
/* The fact of entering idle will trigger the DFS transition */
wfi();
@@ -539,9 +541,9 @@ static void mvebu_pmsu_dfs_request_local(void *data)
* We're back from idle, the DFS transition has completed,
* clear the idle wait indication.
*/
- reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu));
- reg &= ~PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT;
- writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu));
+ reg = readl(pmsu_mp_base + PMSU_STATUS_MSK(cpu));
+ reg &= ~PMSU_STATUS_MSK_CPU_IDLE_WAIT;
+ writel(reg, pmsu_mp_base + PMSU_STATUS_MSK(cpu));
local_irq_restore(flags);
}
@@ -569,8 +571,8 @@ int mvebu_pmsu_dfs_request(int cpu)
/* Poll until the DFS done event is generated */
timeout = jiffies + HZ;
while (time_before(jiffies, timeout)) {
- reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu));
- if (reg & PMSU_EVENT_STATUS_AND_MASK_DFS_DONE)
+ reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_MSK(hwcpu));
+ if (reg & PMSU_EVENT_STATUS_MSK_DFS_DONE)
break;
udelay(10);
}
--
2.1.0
In preparation to support cpufreq for Armada 38x:
- rename the function to be more generic.
- move masking interrupt to the _dfs_request_local function in order
to be use by both SoCs.
- add stubs allowing registering the support for a new SoC
Signed-off-by: Gregory CLEMENT <gregory.clement at free-electrons.com>
---
arch/arm/mach-mvebu/pmsu.c | 48 +++++++++++++++++++++++++---------------------
1 file changed, 26 insertions(+), 22 deletions(-)
diff --git a/arch/arm/mach-mvebu/pmsu.c b/arch/arm/mach-mvebu/pmsu.c