meta-st-stm32mp/recipes-kernel/linux/linux-stm32mp/4.19/4.19.9/0002-ARM-stm32mp1-r0-rc1-CL...

1472 lines
40 KiB
Diff

From 2eae46b85e51f0b995ddfdd6176501d4a6cf1520 Mon Sep 17 00:00:00 2001
From: Romuald JEANNE <romuald.jeanne@st.com>
Date: Tue, 13 Nov 2018 12:15:45 +0100
Subject: [PATCH 02/52] ARM: stm32mp1-r0-rc1: CLOCK
---
drivers/clk/clk-stm32mp1.c | 1022 ++++++++++++++++++++++++++---
drivers/clk/clk.c | 24 +-
include/dt-bindings/clock/stm32mp1-clks.h | 3 -
include/linux/clk.h | 1 +
4 files changed, 968 insertions(+), 82 deletions(-)
diff --git a/drivers/clk/clk-stm32mp1.c b/drivers/clk/clk-stm32mp1.c
index a907555..50d739a 100644
--- a/drivers/clk/clk-stm32mp1.c
+++ b/drivers/clk/clk-stm32mp1.c
@@ -5,15 +5,22 @@
* Author: Gabriel Fernandez <gabriel.fernandez@st.com> for STMicroelectronics.
*/
+#include <linux/arm-smccc.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mm.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/syscore_ops.h>
#include <dt-bindings/clock/stm32mp1-clks.h>
@@ -101,6 +108,10 @@ static DEFINE_SPINLOCK(rlock);
#define RCC_TIMG2PRER 0x82C
#define RCC_RTCDIVR 0x44
#define RCC_DBGCFGR 0x80C
+#define RCC_SREQSETR 0x104
+#define RCC_SREQCLRR 0x108
+#define RCC_CIER 0x414
+#define RCC_CIFR 0x418
#define RCC_CLR 0x4
@@ -121,7 +132,7 @@ static const char * const cpu_src[] = {
};
static const char * const axi_src[] = {
- "ck_hsi", "ck_hse", "pll2_p", "pll3_p"
+ "ck_hsi", "ck_hse", "pll2_p"
};
static const char * const per_src[] = {
@@ -225,19 +236,19 @@ static const char * const usart6_src[] = {
};
static const char * const fdcan_src[] = {
- "ck_hse", "pll3_q", "pll4_q"
+ "ck_hse", "pll3_q", "pll4_q", "pll4_r"
};
static const char * const sai_src[] = {
- "pll4_q", "pll3_q", "i2s_ckin", "ck_per"
+ "pll4_q", "pll3_q", "i2s_ckin", "ck_per", "pll3_r"
};
static const char * const sai2_src[] = {
- "pll4_q", "pll3_q", "i2s_ckin", "ck_per", "spdif_ck_symb"
+ "pll4_q", "pll3_q", "i2s_ckin", "ck_per", "spdif_ck_symb", "pll3_r"
};
static const char * const adc12_src[] = {
- "pll4_q", "ck_per"
+ "pll4_r", "ck_per", "pll3_q"
};
static const char * const dsi_src[] = {
@@ -269,7 +280,7 @@ static const struct clk_div_table axi_div_table[] = {
static const struct clk_div_table mcu_div_table[] = {
{ 0, 1 }, { 1, 2 }, { 2, 4 }, { 3, 8 },
{ 4, 16 }, { 5, 32 }, { 6, 64 }, { 7, 128 },
- { 8, 512 }, { 9, 512 }, { 10, 512}, { 11, 512 },
+ { 8, 256 }, { 9, 512 }, { 10, 512}, { 11, 512 },
{ 12, 512 }, { 13, 512 }, { 14, 512}, { 15, 512 },
{ 0 },
};
@@ -356,17 +367,20 @@ struct stm32_gate_cfg {
struct gate_cfg *gate;
struct stm32_mgate *mgate;
const struct clk_ops *ops;
+ const struct clk_ops *ops_sec;
};
struct stm32_div_cfg {
struct div_cfg *div;
const struct clk_ops *ops;
+ const struct clk_ops *ops_sec;
};
struct stm32_mux_cfg {
struct mux_cfg *mux;
struct stm32_mmux *mmux;
const struct clk_ops *ops;
+ const struct clk_ops *ops_sec;
};
/* STM32 Composite clock */
@@ -376,6 +390,11 @@ struct stm32_composite_cfg {
const struct stm32_mux_cfg *mux;
};
+static inline int _is_soc_secured(void __iomem *base)
+{
+ return readl_relaxed(base) & 0x1;
+}
+
static struct clk_hw *
_clk_hw_register_gate(struct device *dev,
struct clk_hw_onecell_data *clk_data,
@@ -592,6 +611,9 @@ clk_stm32_register_gate_ops(struct device *dev,
if (cfg->ops)
init.ops = cfg->ops;
+ if (cfg->ops_sec && _is_soc_secured(base))
+ init.ops = cfg->ops_sec;
+
hw = _get_stm32_gate(base, cfg, lock);
if (IS_ERR(hw))
return ERR_PTR(-ENOMEM);
@@ -630,6 +652,9 @@ clk_stm32_register_composite(struct device *dev,
if (cfg->mux->ops)
mux_ops = cfg->mux->ops;
+
+ if (cfg->mux->ops_sec && _is_soc_secured(base))
+ mux_ops = cfg->mux->ops_sec;
}
}
@@ -641,6 +666,9 @@ clk_stm32_register_composite(struct device *dev,
if (cfg->div->ops)
div_ops = cfg->div->ops;
+
+ if (cfg->div->ops_sec && _is_soc_secured(base))
+ div_ops = cfg->div->ops_sec;
}
}
@@ -652,6 +680,9 @@ clk_stm32_register_composite(struct device *dev,
if (cfg->gate->ops)
gate_ops = cfg->gate->ops;
+
+ if (cfg->gate->ops_sec && _is_soc_secured(base))
+ gate_ops = cfg->gate->ops_sec;
}
}
@@ -1193,7 +1224,8 @@ _clk_stm32_register_composite(struct device *dev,
.func = _clk_stm32_register_gate,\
}
-#define _STM32_GATE(_gate_offset, _gate_bit_idx, _gate_flags, _mgate, _ops)\
+#define _STM32_GATE(_gate_offset, _gate_bit_idx, _gate_flags, _mgate, _ops,\
+ _ops_sec)\
(&(struct stm32_gate_cfg) {\
&(struct gate_cfg) {\
.reg_off = _gate_offset,\
@@ -1202,6 +1234,7 @@ _clk_stm32_register_composite(struct device *dev,
},\
.mgate = _mgate,\
.ops = _ops,\
+ .ops_sec = _ops_sec,\
})
#define _STM32_MGATE(_mgate)\
@@ -1209,11 +1242,11 @@ _clk_stm32_register_composite(struct device *dev,
#define _GATE(_gate_offset, _gate_bit_idx, _gate_flags)\
_STM32_GATE(_gate_offset, _gate_bit_idx, _gate_flags,\
- NULL, NULL)\
+ NULL, NULL, NULL)\
#define _GATE_MP1(_gate_offset, _gate_bit_idx, _gate_flags)\
_STM32_GATE(_gate_offset, _gate_bit_idx, _gate_flags,\
- NULL, &mp1_gate_clk_ops)\
+ NULL, &mp1_gate_clk_ops, NULL)\
#define _MGATE_MP1(_mgate)\
.gate = &per_gate_cfg[_mgate]
@@ -1227,7 +1260,7 @@ _clk_stm32_register_composite(struct device *dev,
_STM32_MGATE(_mgate))
#define _STM32_DIV(_div_offset, _div_shift, _div_width,\
- _div_flags, _div_table, _ops)\
+ _div_flags, _div_table, _ops, _ops_sec)\
.div = &(struct stm32_div_cfg) {\
&(struct div_cfg) {\
.reg_off = _div_offset,\
@@ -1237,13 +1270,14 @@ _clk_stm32_register_composite(struct device *dev,
.table = _div_table,\
},\
.ops = _ops,\
+ .ops_sec = _ops_sec,\
}
#define _DIV(_div_offset, _div_shift, _div_width, _div_flags, _div_table)\
_STM32_DIV(_div_offset, _div_shift, _div_width,\
- _div_flags, _div_table, NULL)\
+ _div_flags, _div_table, NULL, NULL)\
-#define _STM32_MUX(_offset, _shift, _width, _mux_flags, _mmux, _ops)\
+#define _STM32_MUX(_offset, _shift, _width, _mux_flags, _mmux, _ops, _ops_sec)\
.mux = &(struct stm32_mux_cfg) {\
&(struct mux_cfg) {\
.reg_off = _offset,\
@@ -1254,10 +1288,11 @@ _clk_stm32_register_composite(struct device *dev,
},\
.mmux = _mmux,\
.ops = _ops,\
+ .ops_sec = _ops_sec,\
}
#define _MUX(_offset, _shift, _width, _mux_flags)\
- _STM32_MUX(_offset, _shift, _width, _mux_flags, NULL, NULL)\
+ _STM32_MUX(_offset, _shift, _width, _mux_flags, NULL, NULL, NULL)
#define _MMUX(_mmux) .mux = &ker_mux_cfg[_mmux]
@@ -1286,10 +1321,513 @@ _clk_stm32_register_composite(struct device *dev,
MGATE_MP1(_id, _name, _parent, _flags, _mgate)
#define KCLK(_id, _name, _parents, _flags, _mgate, _mmux)\
- COMPOSITE(_id, _name, _parents, CLK_OPS_PARENT_ENABLE | _flags,\
- _MGATE_MP1(_mgate),\
- _MMUX(_mmux),\
- _NO_DIV)
+ COMPOSITE(_id, _name, _parents, CLK_OPS_PARENT_ENABLE |\
+ CLK_SET_RATE_NO_REPARENT | _flags,\
+ _MGATE_MP1(_mgate),\
+ _MMUX(_mmux),\
+ _NO_DIV)
+/*
+ *
+ * Security management
+ *
+ */
+
+#define STM32_SVC_RCC 0x82001000
+#define STM32_WRITE 0x1
+#define STM32_SET_BITS 0x2
+#define STM32_CLR_BITS 0x3
+
+#define SMC(class, op, address, val)\
+ ({\
+ struct arm_smccc_res res;\
+ arm_smccc_smc(class, op, address, val,\
+ 0, 0, 0, 0, &res);\
+ })
+
+static u32 stm32_clk_writel_secure(u32 value, void __iomem *reg)
+{
+ struct arm_smccc_res res;
+ u32 address;
+
+ address = offset_in_page(reg);
+
+ arm_smccc_smc(STM32_SVC_RCC, STM32_WRITE, address, value, 0, 0, 0,
+ 0, &res);
+
+ if (res.a0)
+ pr_warn("%s: Failed to write in secure mode at 0x%x (err = %ld)\n"
+ , __func__
+ , address
+ , res.a0);
+
+ return res.a0;
+}
+
+static u32 stm32_clk_bit_secure(u32 cmd, u32 value, void __iomem *reg)
+{
+ struct arm_smccc_res res;
+ u32 address;
+
+ address = offset_in_page(reg);
+
+ arm_smccc_smc(STM32_SVC_RCC, cmd, address, value, 0, 0, 0,
+ 0, &res);
+
+ if (res.a0)
+ pr_warn("%s: Failed to write in secure mode at 0x%x (err = %ld)\n"
+ , __func__
+ , address
+ , res.a0);
+
+ return res.a0;
+}
+
+static void clk_sgate_endisable(struct clk_hw *hw, int enable)
+{
+ struct clk_gate *gate = to_clk_gate(hw);
+ unsigned long flags = 0;
+ u32 cmd;
+
+ spin_lock_irqsave(gate->lock, flags);
+
+ if (enable)
+ cmd = STM32_SET_BITS;
+ else
+ cmd = STM32_CLR_BITS;
+
+ stm32_clk_bit_secure(cmd, BIT(gate->bit_idx), gate->reg);
+
+ spin_unlock_irqrestore(gate->lock, flags);
+}
+
+static int clk_sgate_enable(struct clk_hw *hw)
+{
+ clk_sgate_endisable(hw, 1);
+
+ return 0;
+}
+
+static void clk_sgate_disable(struct clk_hw *hw)
+{
+ clk_sgate_endisable(hw, 0);
+}
+
+static const struct clk_ops clk_sgate_ops = {
+ .enable = clk_sgate_enable,
+ .disable = clk_sgate_disable,
+ .is_enabled = clk_gate_is_enabled,
+};
+
+static u8 clk_smux_get_parent(struct clk_hw *hw)
+{
+ return clk_mux_ops.get_parent(hw);
+}
+
+static int clk_smux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_mux *mux = to_clk_mux(hw);
+ u32 val;
+ unsigned long flags = 0;
+
+ if (mux->table) {
+ index = mux->table[index];
+ } else {
+ if (mux->flags & CLK_MUX_INDEX_BIT)
+ index = 1 << index;
+
+ if (mux->flags & CLK_MUX_INDEX_ONE)
+ index++;
+ }
+
+ spin_lock_irqsave(mux->lock, flags);
+
+ val = clk_readl(mux->reg);
+ val &= ~(mux->mask << mux->shift);
+ val |= index << mux->shift;
+
+ stm32_clk_writel_secure(val, mux->reg);
+
+ spin_unlock_irqrestore(mux->lock, flags);
+
+ return 0;
+}
+
+static const struct clk_ops clk_smux_ops = {
+ .get_parent = clk_smux_get_parent,
+ .set_parent = clk_smux_set_parent,
+ .determine_rate = __clk_mux_determine_rate,
+};
+
+static struct clk_hw *clk_hw_register_smux(struct device *dev,
+ const char *name,
+ const char * const *parent_names,
+ u8 num_parents,
+ unsigned long flags,
+ void __iomem *reg, u8 shift,
+ u8 width,
+ u8 clk_mux_flags,
+ spinlock_t *lock)
+{
+ u32 mask = BIT(width) - 1;
+ struct clk_mux *mux;
+ struct clk_hw *hw;
+ struct clk_init_data init;
+ int ret;
+
+ /* allocate the mux */
+ mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+
+ init.ops = &clk_smux_ops;
+
+ init.flags = flags | CLK_IS_BASIC;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+
+ /* struct clk_mux assignments */
+ mux->reg = reg;
+ mux->shift = shift;
+ mux->mask = mask;
+ mux->flags = clk_mux_flags;
+ mux->lock = lock;
+ mux->table = NULL;
+ mux->hw.init = &init;
+
+ hw = &mux->hw;
+ ret = clk_hw_register(dev, hw);
+ if (ret) {
+ kfree(mux);
+ hw = ERR_PTR(ret);
+ }
+
+ return hw;
+}
+
+static struct clk_hw *
+__clk_hw_register_mux(struct device *dev,
+ struct clk_hw_onecell_data *clk_data,
+ void __iomem *base, spinlock_t *lock,
+ const struct clock_config *cfg)
+{
+ struct mux_cfg *mux_cfg = cfg->cfg;
+
+ if (!_is_soc_secured(base))
+ return clk_hw_register_mux(dev, cfg->name, cfg->parent_names,
+ cfg->num_parents, cfg->flags,
+ mux_cfg->reg_off + base,
+ mux_cfg->shift,
+ mux_cfg->width, mux_cfg->mux_flags,
+ lock);
+ else
+ return clk_hw_register_smux(dev, cfg->name,
+ cfg->parent_names,
+ cfg->num_parents, cfg->flags,
+ mux_cfg->reg_off + base,
+ mux_cfg->shift,
+ mux_cfg->width,
+ mux_cfg->mux_flags,
+ lock);
+}
+
+struct clk_div_secure {
+ struct clk_divider div;
+ u8 secure;
+};
+
+#define to_clk_div_secure(_hw) container_of(_hw, struct clk_div_secure, div)
+
+static unsigned long clk_sdivider_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return clk_divider_ops.recalc_rate(hw, parent_rate);
+}
+
+static long clk_sdivider_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ return clk_divider_ops.round_rate(hw, rate, prate);
+}
+
+#define div_mask(width) ((1 << (width)) - 1)
+
+static int clk_sdivider_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ unsigned int value;
+ unsigned long flags = 0;
+ u32 val;
+
+ value = divider_get_val(rate, parent_rate, divider->table,
+ divider->width, divider->flags);
+
+ if (value < 0)
+ return value;
+
+ spin_lock_irqsave(divider->lock, flags);
+
+ if (divider->flags & CLK_DIVIDER_HIWORD_MASK) {
+ val = div_mask(divider->width) << (divider->shift + 16);
+ } else {
+ val = clk_readl(divider->reg);
+ val &= ~(div_mask(divider->width) << divider->shift);
+ }
+ val |= (u32)value << divider->shift;
+
+ stm32_clk_writel_secure(val, divider->reg);
+
+ spin_unlock_irqrestore(divider->lock, flags);
+
+ return 0;
+}
+
+static const struct clk_ops clk_sdivider_ops = {
+ .recalc_rate = clk_sdivider_recalc_rate,
+ .round_rate = clk_sdivider_round_rate,
+ .set_rate = clk_sdivider_set_rate,
+};
+
+static struct clk_hw *
+clk_hw_register_sdivider_table(struct device *dev, const char *name,
+ const char *parent_name,
+ unsigned long flags,
+ void __iomem *reg,
+ u8 shift, u8 width,
+ u8 clk_divider_flags,
+ const struct clk_div_table *table,
+ spinlock_t *lock)
+{
+ struct clk_divider *div;
+ struct clk_hw *hw;
+ struct clk_init_data init;
+ int ret;
+
+ /* allocate the divider */
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
+ if (!div)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ if (clk_divider_flags & CLK_DIVIDER_READ_ONLY)
+ init.ops = &clk_divider_ro_ops;
+ else
+ init.ops = &clk_sdivider_ops;
+
+ init.flags = flags | CLK_IS_BASIC;
+ init.parent_names = (parent_name ? &parent_name : NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+
+ /* struct clk_divider assignments */
+ div->reg = reg;
+ div->shift = shift;
+ div->width = width;
+ div->flags = clk_divider_flags;
+ div->lock = lock;
+ div->hw.init = &init;
+ div->table = table;
+
+ /* register the clock */
+ hw = &div->hw;
+
+ ret = clk_hw_register(dev, hw);
+ if (ret) {
+ kfree(div);
+ hw = ERR_PTR(ret);
+ }
+
+ return hw;
+}
+
+static struct clk_hw *
+__clk_hw_register_divider_table(struct device *dev,
+ struct clk_hw_onecell_data *clk_data,
+ void __iomem *base, spinlock_t *lock,
+ const struct clock_config *cfg)
+{
+ struct div_cfg *div_cfg = cfg->cfg;
+
+ if (!_is_soc_secured(base))
+ return clk_hw_register_divider_table(dev, cfg->name,
+ cfg->parent_name,
+ cfg->flags,
+ div_cfg->reg_off + base,
+ div_cfg->shift,
+ div_cfg->width,
+ div_cfg->div_flags,
+ div_cfg->table,
+ lock);
+ else
+ return clk_hw_register_sdivider_table(dev, cfg->name,
+ cfg->parent_name,
+ cfg->flags,
+ div_cfg->reg_off + base,
+ div_cfg->shift,
+ div_cfg->width,
+ div_cfg->div_flags,
+ div_cfg->table,
+ lock);
+}
+
+static int mp1_sgate_clk_enable(struct clk_hw *hw)
+{
+ struct clk_gate *gate = to_clk_gate(hw);
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(gate->lock, flags);
+
+ stm32_clk_bit_secure(STM32_SET_BITS, BIT(gate->bit_idx),
+ gate->reg);
+
+ spin_unlock_irqrestore(gate->lock, flags);
+
+ return 0;
+}
+
+static void mp1_sgate_clk_disable(struct clk_hw *hw)
+{
+ struct clk_gate *gate = to_clk_gate(hw);
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(gate->lock, flags);
+
+ stm32_clk_bit_secure(STM32_SET_BITS, BIT(gate->bit_idx),
+ gate->reg + RCC_CLR);
+
+ spin_unlock_irqrestore(gate->lock, flags);
+}
+
+static const struct clk_ops mp1_sgate_clk_ops = {
+ .enable = mp1_sgate_clk_enable,
+ .disable = mp1_sgate_clk_disable,
+ .is_enabled = clk_gate_is_enabled,
+};
+
+static int mp1_s_mgate_clk_enable(struct clk_hw *hw)
+{
+ struct clk_gate *gate = to_clk_gate(hw);
+ struct stm32_clk_mgate *clk_mgate = to_clk_mgate(gate);
+
+ clk_mgate->mgate->flag |= clk_mgate->mask;
+
+ mp1_sgate_clk_enable(hw);
+
+ return 0;
+}
+
+static void mp1_s_mgate_clk_disable(struct clk_hw *hw)
+{
+ struct clk_gate *gate = to_clk_gate(hw);
+ struct stm32_clk_mgate *clk_mgate = to_clk_mgate(gate);
+
+ clk_mgate->mgate->flag &= ~clk_mgate->mask;
+
+ if (clk_mgate->mgate->flag == 0)
+ mp1_sgate_clk_disable(hw);
+}
+
+static const struct clk_ops mp1_s_mgate_clk_ops = {
+ .enable = mp1_s_mgate_clk_enable,
+ .disable = mp1_s_mgate_clk_disable,
+ .is_enabled = clk_gate_is_enabled,
+
+};
+
+static u8 clk_s_mmux_get_parent(struct clk_hw *hw)
+{
+ return clk_smux_ops.get_parent(hw);
+}
+
+static int clk_s_mmux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_mux *mux = to_clk_mux(hw);
+ struct stm32_clk_mmux *clk_mmux = to_clk_mmux(mux);
+ struct clk_hw *hwp;
+ int ret, n;
+
+ ret = clk_smux_ops.set_parent(hw, index);
+ if (ret)
+ return ret;
+
+ hwp = clk_hw_get_parent(hw);
+
+ for (n = 0; n < clk_mmux->mmux->nbr_clk; n++)
+ if (clk_mmux->mmux->hws[n] != hw)
+ clk_hw_reparent(clk_mmux->mmux->hws[n], hwp);
+
+ return 0;
+}
+
+static const struct clk_ops clk_s_mmux_ops = {
+ .get_parent = clk_s_mmux_get_parent,
+ .set_parent = clk_s_mmux_set_parent,
+ .determine_rate = __clk_mux_determine_rate,
+};
+
+#define SMUX(_id, _name, _parents, _flags,\
+ _offset, _shift, _width, _mux_flags)\
+{\
+ .id = _id,\
+ .name = _name,\
+ .parent_names = _parents,\
+ .num_parents = ARRAY_SIZE(_parents),\
+ .flags = _flags,\
+ .cfg = &(struct mux_cfg) {\
+ .reg_off = _offset,\
+ .shift = _shift,\
+ .width = _width,\
+ .mux_flags = _mux_flags,\
+ },\
+ .func = __clk_hw_register_mux,\
+}
+
+#define SDIV_TABLE(_id, _name, _parent, _flags, _offset, _shift, _width,\
+ _div_flags, _div_table)\
+{\
+ .id = _id,\
+ .name = _name,\
+ .parent_name = _parent,\
+ .flags = _flags,\
+ .cfg = &(struct div_cfg) {\
+ .reg_off = _offset,\
+ .shift = _shift,\
+ .width = _width,\
+ .div_flags = _div_flags,\
+ .table = _div_table,\
+ },\
+ .func = __clk_hw_register_divider_table,\
+}
+
+#define SDIV(_id, _name, _parent, _flags, _offset, _shift, _width,\
+ _div_flags)\
+ SDIV_TABLE(_id, _name, _parent, _flags, _offset, _shift, _width,\
+ _div_flags, NULL)
+
+#define _S_GATE(_gate_offset, _gate_bit_idx, _gate_flags)\
+ _STM32_GATE(_gate_offset, _gate_bit_idx, _gate_flags,\
+ NULL, NULL, &clk_sgate_ops)
+
+#define SGATE(_id, _name, _parent, _flags, _offset, _bit_idx, _gate_flags)\
+ STM32_GATE(_id, _name, _parent, _flags,\
+ _S_GATE(_offset, _bit_idx, _gate_flags))
+
+#define _S_GATE_MP1(_gate_offset, _gate_bit_idx, _gate_flags)\
+ _STM32_GATE(_gate_offset, _gate_bit_idx, _gate_flags,\
+ NULL, &mp1_gate_clk_ops, &mp1_sgate_clk_ops)
+
+#define SGATE_MP1(_id, _name, _parent, _flags, _offset, _bit_idx, _gate_flags)\
+ STM32_GATE(_id, _name, _parent, _flags,\
+ _S_GATE_MP1(_offset, _bit_idx, _gate_flags))
+
+#define _S_DIV(_div_offset, _div_shift, _div_width, _div_flags, _div_table)\
+ _STM32_DIV(_div_offset, _div_shift, _div_width,\
+ _div_flags, _div_table, NULL, &clk_sdivider_ops)
+
+#define _S_MUX(_offset, _shift, _width, _mux_flags)\
+ _STM32_MUX(_offset, _shift, _width, _mux_flags,\
+ NULL, NULL, &clk_smux_ops)
enum {
G_SAI1,
@@ -1408,7 +1946,7 @@ enum {
static struct stm32_mgate mp1_mgate[G_LAST];
#define _K_GATE(_id, _gate_offset, _gate_bit_idx, _gate_flags,\
- _mgate, _ops)\
+ _mgate, _ops, _ops_sec)\
[_id] = {\
&(struct gate_cfg) {\
.reg_off = _gate_offset,\
@@ -1417,15 +1955,24 @@ static struct stm32_mgate mp1_mgate[G_LAST];
},\
.mgate = _mgate,\
.ops = _ops,\
+ .ops_sec = _ops_sec,\
}
#define K_GATE(_id, _gate_offset, _gate_bit_idx, _gate_flags)\
_K_GATE(_id, _gate_offset, _gate_bit_idx, _gate_flags,\
- NULL, &mp1_gate_clk_ops)
+ NULL, &mp1_gate_clk_ops, NULL)
+
+#define K_GATE_S(_id, _gate_offset, _gate_bit_idx, _gate_flags)\
+ _K_GATE(_id, _gate_offset, _gate_bit_idx, _gate_flags,\
+ NULL, &mp1_gate_clk_ops, &mp1_sgate_clk_ops)
#define K_MGATE(_id, _gate_offset, _gate_bit_idx, _gate_flags)\
_K_GATE(_id, _gate_offset, _gate_bit_idx, _gate_flags,\
- &mp1_mgate[_id], &mp1_mgate_clk_ops)
+ &mp1_mgate[_id], &mp1_mgate_clk_ops, NULL)
+
+#define K_MGATE_S(_id, _gate_offset, _gate_bit_idx, _gate_flags)\
+ _K_GATE(_id, _gate_offset, _gate_bit_idx, _gate_flags,\
+ &mp1_mgate[_id], &mp1_mgate_clk_ops, &mp1_s_mgate_clk_ops)
/* Peripheral gates */
static struct stm32_gate_cfg per_gate_cfg[G_LAST] = {
@@ -1490,17 +2037,17 @@ static struct stm32_gate_cfg per_gate_cfg[G_LAST] = {
K_MGATE(G_DSI, RCC_APB4ENSETR, 4, 0),
K_MGATE(G_LTDC, RCC_APB4ENSETR, 0, 0),
- K_GATE(G_STGEN, RCC_APB5ENSETR, 20, 0),
- K_GATE(G_BSEC, RCC_APB5ENSETR, 16, 0),
- K_GATE(G_IWDG1, RCC_APB5ENSETR, 15, 0),
- K_GATE(G_TZPC, RCC_APB5ENSETR, 13, 0),
- K_GATE(G_TZC2, RCC_APB5ENSETR, 12, 0),
- K_GATE(G_TZC1, RCC_APB5ENSETR, 11, 0),
- K_GATE(G_RTCAPB, RCC_APB5ENSETR, 8, 0),
- K_MGATE(G_USART1, RCC_APB5ENSETR, 4, 0),
- K_MGATE(G_I2C6, RCC_APB5ENSETR, 3, 0),
- K_MGATE(G_I2C4, RCC_APB5ENSETR, 2, 0),
- K_MGATE(G_SPI6, RCC_APB5ENSETR, 0, 0),
+ K_GATE_S(G_STGEN, RCC_APB5ENSETR, 20, 0),
+ K_GATE_S(G_BSEC, RCC_APB5ENSETR, 16, 0),
+ K_GATE_S(G_IWDG1, RCC_APB5ENSETR, 15, 0),
+ K_GATE_S(G_TZPC, RCC_APB5ENSETR, 13, 0),
+ K_GATE_S(G_TZC2, RCC_APB5ENSETR, 12, 0),
+ K_GATE_S(G_TZC1, RCC_APB5ENSETR, 11, 0),
+ K_GATE_S(G_RTCAPB, RCC_APB5ENSETR, 8, 0),
+ K_MGATE_S(G_USART1, RCC_APB5ENSETR, 4, 0),
+ K_MGATE_S(G_I2C6, RCC_APB5ENSETR, 3, 0),
+ K_MGATE_S(G_I2C4, RCC_APB5ENSETR, 2, 0),
+ K_MGATE_S(G_SPI6, RCC_APB5ENSETR, 0, 0),
K_MGATE(G_SDMMC3, RCC_AHB2ENSETR, 16, 0),
K_MGATE(G_USBO, RCC_AHB2ENSETR, 8, 0),
@@ -1529,11 +2076,11 @@ static struct stm32_gate_cfg per_gate_cfg[G_LAST] = {
K_GATE(G_GPIOB, RCC_AHB4ENSETR, 1, 0),
K_GATE(G_GPIOA, RCC_AHB4ENSETR, 0, 0),
- K_GATE(G_BKPSRAM, RCC_AHB5ENSETR, 8, 0),
- K_MGATE(G_RNG1, RCC_AHB5ENSETR, 6, 0),
- K_GATE(G_HASH1, RCC_AHB5ENSETR, 5, 0),
- K_GATE(G_CRYP1, RCC_AHB5ENSETR, 4, 0),
- K_GATE(G_GPIOZ, RCC_AHB5ENSETR, 0, 0),
+ K_GATE_S(G_BKPSRAM, RCC_AHB5ENSETR, 8, 0),
+ K_MGATE_S(G_RNG1, RCC_AHB5ENSETR, 6, 0),
+ K_GATE_S(G_HASH1, RCC_AHB5ENSETR, 5, 0),
+ K_GATE_S(G_CRYP1, RCC_AHB5ENSETR, 4, 0),
+ K_GATE_S(G_GPIOZ, RCC_AHB5ENSETR, 0, 0),
K_GATE(G_USBH, RCC_AHB6ENSETR, 24, 0),
K_GATE(G_CRC1, RCC_AHB6ENSETR, 20, 0),
@@ -1541,12 +2088,15 @@ static struct stm32_gate_cfg per_gate_cfg[G_LAST] = {
K_MGATE(G_SDMMC1, RCC_AHB6ENSETR, 16, 0),
K_MGATE(G_QSPI, RCC_AHB6ENSETR, 14, 0),
K_MGATE(G_FMC, RCC_AHB6ENSETR, 12, 0),
+
K_GATE(G_ETHMAC, RCC_AHB6ENSETR, 10, 0),
K_GATE(G_ETHRX, RCC_AHB6ENSETR, 9, 0),
K_GATE(G_ETHTX, RCC_AHB6ENSETR, 8, 0),
K_GATE(G_ETHCK, RCC_AHB6ENSETR, 7, 0),
+
K_MGATE(G_GPU, RCC_AHB6ENSETR, 5, 0),
K_GATE(G_MDMA, RCC_AHB6ENSETR, 0, 0),
+
K_GATE(G_ETHSTP, RCC_AHB6LPENSETR, 11, 0),
};
@@ -1591,7 +2141,7 @@ enum {
static struct stm32_mmux ker_mux[M_LAST];
-#define _K_MUX(_id, _offset, _shift, _width, _mux_flags, _mmux, _ops)\
+#define _K_MUX(_id, _offset, _shift, _width, _mux_flags, _mmux, _ops, _ops_sec)\
[_id] = {\
&(struct mux_cfg) {\
.reg_off = _offset,\
@@ -1602,15 +2152,24 @@ static struct stm32_mmux ker_mux[M_LAST];
},\
.mmux = _mmux,\
.ops = _ops,\
+ .ops_sec = _ops_sec,\
}
#define K_MUX(_id, _offset, _shift, _width, _mux_flags)\
_K_MUX(_id, _offset, _shift, _width, _mux_flags,\
- NULL, NULL)
+ NULL, NULL, NULL)
+
+#define K_MUX_S(_id, _offset, _shift, _width, _mux_flags)\
+ _K_MUX(_id, _offset, _shift, _width, _mux_flags,\
+ NULL, NULL, &clk_smux_ops)
#define K_MMUX(_id, _offset, _shift, _width, _mux_flags)\
_K_MUX(_id, _offset, _shift, _width, _mux_flags,\
- &ker_mux[_id], &clk_mmux_ops)
+ &ker_mux[_id], &clk_mmux_ops, NULL)
+
+#define K_MMUX_S(_id, _offset, _shift, _width, _mux_flags)\
+ _K_MUX(_id, _offset, _shift, _width, _mux_flags,\
+ &ker_mux[_id], &clk_mmux_ops, &clk_s_mmux_ops)
static const struct stm32_mux_cfg ker_mux_cfg[M_LAST] = {
/* Kernel multi mux */
@@ -1626,7 +2185,7 @@ static const struct stm32_mux_cfg ker_mux_cfg[M_LAST] = {
K_MMUX(M_UART78, RCC_UART78CKSELR, 0, 3, 0),
K_MMUX(M_SAI1, RCC_SAI1CKSELR, 0, 3, 0),
K_MMUX(M_ETHCK, RCC_ETHCKSELR, 0, 2, 0),
- K_MMUX(M_I2C46, RCC_I2C46CKSELR, 0, 3, 0),
+ K_MMUX_S(M_I2C46, RCC_I2C46CKSELR, 0, 3, 0),
/* Kernel simple mux */
K_MUX(M_RNG2, RCC_RNG2CKSELR, 0, 2, 0),
@@ -1647,10 +2206,10 @@ static const struct stm32_mux_cfg ker_mux_cfg[M_LAST] = {
K_MUX(M_ADC12, RCC_ADCCKSELR, 0, 2, 0),
K_MUX(M_DSI, RCC_DSICKSELR, 0, 1, 0),
K_MUX(M_CKPER, RCC_CPERCKSELR, 0, 2, 0),
- K_MUX(M_RNG1, RCC_RNG1CKSELR, 0, 2, 0),
- K_MUX(M_STGEN, RCC_STGENCKSELR, 0, 2, 0),
- K_MUX(M_USART1, RCC_UART1CKSELR, 0, 3, 0),
- K_MUX(M_SPI6, RCC_SPI6CKSELR, 0, 3, 0),
+ K_MUX_S(M_RNG1, RCC_RNG1CKSELR, 0, 2, 0),
+ K_MUX_S(M_STGEN, RCC_STGENCKSELR, 0, 2, 0),
+ K_MUX_S(M_USART1, RCC_UART1CKSELR, 0, 3, 0),
+ K_MUX_S(M_SPI6, RCC_SPI6CKSELR, 0, 3, 0),
};
static const struct clock_config stm32mp1_clock_cfg[] = {
@@ -1659,11 +2218,12 @@ static const struct clock_config stm32mp1_clock_cfg[] = {
CLK_DIVIDER_READ_ONLY),
/* External / Internal Oscillators */
- GATE_MP1(CK_HSE, "ck_hse", "clk-hse", 0, RCC_OCENSETR, 8, 0),
- GATE_MP1(CK_CSI, "ck_csi", "clk-csi", 0, RCC_OCENSETR, 4, 0),
- GATE_MP1(CK_HSI, "ck_hsi", "clk-hsi-div", 0, RCC_OCENSETR, 0, 0),
- GATE(CK_LSI, "ck_lsi", "clk-lsi", 0, RCC_RDLSICR, 0, 0),
- GATE(CK_LSE, "ck_lse", "clk-lse", 0, RCC_BDCR, 0, 0),
+ SGATE_MP1(CK_HSE, "ck_hse", "clk-hse", 0, RCC_OCENSETR, 8, 0),
+ SGATE_MP1(CK_CSI, "ck_csi", "clk-csi", CLK_IS_CRITICAL,
+ RCC_OCENSETR, 4, 0),
+ SGATE_MP1(CK_HSI, "ck_hsi", "clk-hsi-div", 0, RCC_OCENSETR, 0, 0),
+ SGATE(CK_LSI, "ck_lsi", "clk-lsi", 0, RCC_RDLSICR, 0, 0),
+ SGATE(CK_LSE, "ck_lse", "clk-lse", 0, RCC_BDCR, 0, 0),
FIXED_FACTOR(CK_HSE_DIV2, "clk-hse-div2", "ck_hse", 0, 1, 2),
@@ -1685,24 +2245,24 @@ static const struct clock_config stm32mp1_clock_cfg[] = {
/* ODF */
COMPOSITE(PLL1_P, "pll1_p", PARENT("pll1"), 0,
- _GATE(RCC_PLL1CR, 4, 0),
+ _S_GATE(RCC_PLL1CR, 4, 0),
_NO_MUX,
- _DIV(RCC_PLL1CFGR2, 0, 7, 0, NULL)),
+ _S_DIV(RCC_PLL1CFGR2, 0, 7, 0, NULL)),
COMPOSITE(PLL2_P, "pll2_p", PARENT("pll2"), 0,
- _GATE(RCC_PLL2CR, 4, 0),
+ _S_GATE(RCC_PLL2CR, 4, 0),
_NO_MUX,
- _DIV(RCC_PLL2CFGR2, 0, 7, 0, NULL)),
+ _S_DIV(RCC_PLL2CFGR2, 0, 7, 0, NULL)),
COMPOSITE(PLL2_Q, "pll2_q", PARENT("pll2"), 0,
- _GATE(RCC_PLL2CR, 5, 0),
+ _S_GATE(RCC_PLL2CR, 5, 0),
_NO_MUX,
- _DIV(RCC_PLL2CFGR2, 8, 7, 0, NULL)),
+ _S_DIV(RCC_PLL2CFGR2, 8, 7, 0, NULL)),
COMPOSITE(PLL2_R, "pll2_r", PARENT("pll2"), CLK_IS_CRITICAL,
- _GATE(RCC_PLL2CR, 6, 0),
+ _S_GATE(RCC_PLL2CR, 6, 0),
_NO_MUX,
- _DIV(RCC_PLL2CFGR2, 16, 7, 0, NULL)),
+ _S_DIV(RCC_PLL2CFGR2, 16, 7, 0, NULL)),
COMPOSITE(PLL3_P, "pll3_p", PARENT("pll3"), 0,
_GATE(RCC_PLL3CR, 4, 0),
@@ -1738,20 +2298,20 @@ static const struct clock_config stm32mp1_clock_cfg[] = {
MUX(CK_PER, "ck_per", per_src, CLK_OPS_PARENT_ENABLE,
RCC_CPERCKSELR, 0, 2, 0),
- MUX(CK_MPU, "ck_mpu", cpu_src, CLK_OPS_PARENT_ENABLE |
+ SMUX(CK_MPU, "ck_mpu", cpu_src, CLK_OPS_PARENT_ENABLE |
CLK_IS_CRITICAL, RCC_MPCKSELR, 0, 2, 0),
COMPOSITE(CK_AXI, "ck_axi", axi_src, CLK_IS_CRITICAL |
CLK_OPS_PARENT_ENABLE,
_NO_GATE,
- _MUX(RCC_ASSCKSELR, 0, 2, 0),
- _DIV(RCC_AXIDIVR, 0, 3, 0, axi_div_table)),
+ _S_MUX(RCC_ASSCKSELR, 0, 2, 0),
+ _S_DIV(RCC_AXIDIVR, 0, 3, 0, axi_div_table)),
COMPOSITE(CK_MCU, "ck_mcu", mcu_src, CLK_IS_CRITICAL |
CLK_OPS_PARENT_ENABLE,
_NO_GATE,
- _MUX(RCC_MSSCKSELR, 0, 2, 0),
- _DIV(RCC_MCUDIVR, 0, 4, 0, mcu_div_table)),
+ _S_MUX(RCC_MSSCKSELR, 0, 2, 0),
+ _S_DIV(RCC_MCUDIVR, 0, 4, 0, mcu_div_table)),
DIV_TABLE(NO_ID, "pclk1", "ck_mcu", CLK_IGNORE_UNUSED, RCC_APB1DIVR, 0,
3, CLK_DIVIDER_READ_ONLY, apb_div_table),
@@ -1906,7 +2466,7 @@ static const struct clock_config stm32mp1_clock_cfg[] = {
KCLK(RNG1_K, "rng1_k", rng_src, 0, G_RNG1, M_RNG1),
KCLK(RNG2_K, "rng2_k", rng_src, 0, G_RNG2, M_RNG2),
KCLK(USBPHY_K, "usbphy_k", usbphy_src, 0, G_USBPHY, M_USBPHY),
- KCLK(STGEN_K, "stgen_k", stgen_src, CLK_IS_CRITICAL, G_STGEN, M_STGEN),
+ KCLK(STGEN_K, "stgen_k", stgen_src, CLK_IS_CRITICAL, G_STGEN, M_STGEN),
KCLK(SPDIF_K, "spdif_k", spdif_src, 0, G_SPDIF, M_SPDIF),
KCLK(SPI1_K, "spi1_k", spi123_src, 0, G_SPI1, M_SPI1),
KCLK(SPI2_K, "spi2_k", spi123_src, 0, G_SPI2, M_SPI23),
@@ -1952,19 +2512,20 @@ static const struct clock_config stm32mp1_clock_cfg[] = {
MGATE_MP1(GPU_K, "gpu_k", "pll2_q", 0, G_GPU),
MGATE_MP1(DAC12_K, "dac12_k", "ck_lsi", 0, G_DAC12),
- COMPOSITE(ETHPTP_K, "ethptp_k", eth_src, CLK_OPS_PARENT_ENABLE,
+ COMPOSITE(ETHPTP_K, "ethptp_k", eth_src, CLK_OPS_PARENT_ENABLE |
+ CLK_SET_RATE_NO_REPARENT,
_NO_GATE,
_MMUX(M_ETHCK),
_DIV(RCC_ETHCKSELR, 4, 4, CLK_DIVIDER_ALLOW_ZERO, NULL)),
/* RTC clock */
- DIV(NO_ID, "ck_hse_rtc", "ck_hse", 0, RCC_RTCDIVR, 0, 7,
- CLK_DIVIDER_ALLOW_ZERO),
+ SDIV(NO_ID, "ck_hse_rtc", "ck_hse", 0, RCC_RTCDIVR, 0, 7,
+ CLK_DIVIDER_ALLOW_ZERO),
COMPOSITE(RTC, "ck_rtc", rtc_src, CLK_OPS_PARENT_ENABLE |
CLK_SET_RATE_PARENT,
- _GATE(RCC_BDCR, 20, 0),
- _MUX(RCC_BDCR, 16, 2, 0),
+ _S_GATE(RCC_BDCR, 20, 0),
+ _S_MUX(RCC_BDCR, 16, 2, 0),
_NO_DIV),
/* MCO clocks */
@@ -2082,21 +2643,334 @@ static int stm32_rcc_init(struct device_node *np,
return of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
}
+static void __iomem *rcc_base;
+
+static int stm32_rcc_init_pwr(struct device_node *np);
+
static void stm32mp1_rcc_init(struct device_node *np)
{
- void __iomem *base;
-
- base = of_iomap(np, 0);
- if (!base) {
+ rcc_base = of_iomap(np, 0);
+ if (!rcc_base) {
pr_err("%s: unable to map resource", np->name);
of_node_put(np);
return;
}
- if (stm32_rcc_init(np, base, stm32mp1_match_data)) {
- iounmap(base);
+ if (stm32_rcc_init(np, rcc_base, stm32mp1_match_data)) {
+ iounmap(rcc_base);
of_node_put(np);
+ return;
}
+
+ stm32_rcc_init_pwr(np);
}
CLK_OF_DECLARE_DRIVER(stm32mp1_rcc, "st,stm32mp1-rcc", stm32mp1_rcc_init);
+
+/*
+ * RCC POWER
+ *
+ */
+
+static struct regmap *pwr_syscon;
+
+struct reg {
+ u32 address;
+ u32 val;
+};
+
+/* This table lists the IPs for which CSLEEP is enabled */
+static const struct reg lp_table[] = {
+ { 0xB04, 0x00000000 }, /* APB1 */
+ { 0xB0C, 0x00000000 }, /* APB2 */
+ { 0xB14, 0x00000800 }, /* APB3 */
+ { 0x304, 0x00000000 }, /* APB4 */
+ { 0xB1C, 0x00000000 }, /* AHB2 */
+ { 0xB24, 0x00000000 }, /* AHB3 */
+ { 0xB2C, 0x00000000 }, /* AHB4 */
+ { 0x31C, 0x00000000 }, /* AHB6 */
+ { 0xB34, 0x00000000 }, /* AXIM */
+ { 0xB3C, 0x00000000 }, /* MLAHB */
+};
+
+struct sreg {
+ u32 address;
+ u32 secured;
+ u32 val;
+};
+
+static struct sreg clock_gating[] = {
+ { 0xA00, 0 }, /* APB1 */
+ { 0xA08, 0 }, /* APB2 */
+ { 0xA10, 0 }, /* APB3 */
+ { 0x200, 0 }, /* APB4 */
+ { 0x208, 1 }, /* APB5 */
+ { 0x210, 1 }, /* AHB5 */
+ { 0x218, 0 }, /* AHB6 */
+ { 0xA18, 0 }, /* AHB2 */
+ { 0xA20, 0 }, /* AHB3 */
+ { 0xA28, 0 }, /* AHB4 */
+ { 0xA38, 0 }, /* MLAHB */
+ { 0x800, 0 }, /* MCO1 */
+ { 0x804, 0 }, /* MCO2 */
+ { 0x894, 0 }, /* PLL4 */
+ { 0x89C, 0 }, /* PLL4CFGR2 */
+};
+
+struct smux {
+ const char *name;
+ struct clk *clk;
+ struct clk *clkp;
+};
+
+#define KER_SRC(_clk_name)\
+{\
+ .name = _clk_name,\
+}
+
+struct smux _mux_kernel[] = {
+ KER_SRC("sdmmc1_k"),
+ KER_SRC("spi2_k"),
+ KER_SRC("spi4_k"),
+ KER_SRC("i2c1_k"),
+ KER_SRC("i2c3_k"),
+ KER_SRC("lptim2_k"),
+ KER_SRC("lptim3_k"),
+ KER_SRC("usart2_k"),
+ KER_SRC("usart3_k"),
+ KER_SRC("uart7_k"),
+ KER_SRC("sai1_k"),
+ KER_SRC("ethck_k"),
+ KER_SRC("i2c4_k"),
+ KER_SRC("rng2_k"),
+ KER_SRC("sdmmc3_k"),
+ KER_SRC("fmc_k"),
+ KER_SRC("qspi_k"),
+ KER_SRC("usbphy_k"),
+ KER_SRC("usbo_k"),
+ KER_SRC("spdif_k"),
+ KER_SRC("spi1_k"),
+ KER_SRC("cec_k"),
+ KER_SRC("lptim1_k"),
+ KER_SRC("uart6_k"),
+ KER_SRC("fdcan_k"),
+ KER_SRC("sai2_k"),
+ KER_SRC("sai3_k"),
+ KER_SRC("sai4_k"),
+ KER_SRC("adc12_k"),
+ KER_SRC("dsi_k"),
+ KER_SRC("ck_per"),
+ KER_SRC("rng1_k"),
+ KER_SRC("stgen_k"),
+ KER_SRC("usart1_k"),
+ KER_SRC("spi6_k"),
+};
+
+static struct sreg pll_clock[] = {
+ { 0x880, 0 }, /* PLL3 */
+ { 0x894, 0 }, /* PLL4 */
+};
+
+static struct sreg mcu_source[] = {
+ { 0x048, 0 }, /* MSSCKSELR */
+};
+
+#define RCC_IRQ_FLAGS_MASK 0x110F1F
+#define RCC_STOP_MASK (BIT(0) | BIT(1))
+#define RCC_RSTSR 0x408
+#define PWR_MPUCR 0x10
+#define PLL_EN (BIT(0))
+#define STOP_FLAG (BIT(5))
+#define SBF (BIT(11))
+#define SBF_MPU (BIT(12))
+
+
+
+
+static irqreturn_t stm32mp1_rcc_irq_handler(int irq, void *sdata)
+{
+ pr_info("RCC generic interrupt received\n");
+
+ /* clear interrupt flag */
+ SMC(STM32_SVC_RCC, STM32_WRITE, RCC_CIFR, RCC_IRQ_FLAGS_MASK);
+
+ return IRQ_HANDLED;
+}
+
+static void stm32mp1_backup_sreg(struct sreg *sreg, int size)
+{
+ int i;
+
+ for (i = 0; i < size; i++)
+ sreg[i].val = readl_relaxed(rcc_base + sreg[i].address);
+}
+
+static void stm32mp1_restore_sreg(struct sreg *sreg, int size)
+{
+ int i;
+ u32 val, address;
+ int soc_secured;
+
+ soc_secured = _is_soc_secured(rcc_base);
+
+ for (i = 0; i < size; i++) {
+ val = sreg[i].val;
+ address = sreg[i].address;
+
+ if (soc_secured && sreg[i].secured)
+ SMC(STM32_SVC_RCC, STM32_WRITE,
+ address, val);
+ else
+ writel_relaxed(val, rcc_base + address);
+ }
+}
+
+static void stm32mp1_restore_pll(struct sreg *sreg, int size)
+{
+ int i;
+ u32 val;
+ void __iomem *address;
+
+ for (i = 0; i < size; i++) {
+ val = sreg[i].val;
+ address = sreg[i].address + rcc_base;
+
+ /* if pll was off turn it on before */
+ if ((readl_relaxed(address) & PLL_EN) == 0) {
+ writel_relaxed(PLL_EN, address);
+ while ((readl_relaxed(address) & PLL_RDY) == 0)
+ ;
+ }
+
+ /* 2sd step: restore odf */
+ writel_relaxed(val, address);
+ }
+}
+
+static void stm32mp1_backup_mux(struct smux *smux, int size)
+{
+ int i;
+
+ for (i = 0; i < size; i++) {
+ smux[i].clk = __clk_lookup(smux[i].name);
+ smux[i].clkp = clk_get_parent(smux[i].clk);
+ }
+}
+
+static void stm32mp1_restore_mux(struct smux *smux, int size)
+{
+ int i;
+
+ for (i = 0; i < size; i++)
+ clk_set_parent_force(smux[i].clk, smux[i].clkp);
+}
+
+#define RCC_BIT_HSI 0
+#define RCC_BIT_CSI 4
+#define RCC_BIT_HSE 8
+
+#define RCC_CK_OSC_MASK (BIT(RCC_BIT_HSE) | BIT(RCC_BIT_CSI) | BIT(RCC_BIT_HSI))
+
+#define RCC_CK_XXX_KER_MASK (RCC_CK_OSC_MASK << 1)
+
+static int stm32mp1_clk_suspend(void)
+{
+ u32 reg;
+
+ /* Save pll regs */
+ stm32mp1_backup_sreg(pll_clock, ARRAY_SIZE(pll_clock));
+
+ /* Save mcu source */
+ stm32mp1_backup_sreg(mcu_source, ARRAY_SIZE(mcu_source));
+
+ /* Save clock gating regs */
+ stm32mp1_backup_sreg(clock_gating, ARRAY_SIZE(clock_gating));
+
+ /* Save kernel clock regs */
+ stm32mp1_backup_mux(_mux_kernel, ARRAY_SIZE(_mux_kernel));
+
+ /* Enable ck_xxx_ker clocks if ck_xxx was on */
+ reg = readl_relaxed(rcc_base + RCC_OCENSETR) & RCC_CK_OSC_MASK;
+ writel_relaxed(reg << 1, rcc_base + RCC_OCENSETR);
+
+ return 0;
+}
+
+static void stm32mp1_clk_resume(void)
+{
+ u32 power_flags_rcc, power_flags_pwr;
+
+ /* Read power flags and decide what to resume */
+ regmap_read(pwr_syscon, PWR_MPUCR, &power_flags_pwr);
+ power_flags_rcc = readl_relaxed(rcc_base + RCC_RSTSR);
+
+ if ((power_flags_pwr & STOP_FLAG) == STOP_FLAG) {
+ /* Restore pll */
+ stm32mp1_restore_pll(pll_clock, ARRAY_SIZE(pll_clock));
+
+ /* Restore mcu source */
+ stm32mp1_restore_sreg(mcu_source, ARRAY_SIZE(mcu_source));
+ } else if (((power_flags_rcc & SBF) == SBF) ||
+ ((power_flags_rcc & SBF_MPU) == SBF_MPU)) {
+ stm32mp1_restore_sreg(clock_gating, ARRAY_SIZE(clock_gating));
+
+ stm32mp1_restore_mux(_mux_kernel, ARRAY_SIZE(_mux_kernel));
+ }
+
+ SMC(STM32_SVC_RCC, STM32_WRITE, RCC_RSTSR, 0);
+
+ /* Disable ck_xxx_ker clocks */
+ stm32_clk_bit_secure(STM32_SET_BITS, RCC_CK_XXX_KER_MASK,
+ rcc_base + RCC_OCENSETR + RCC_CLR);
+}
+
+static struct syscore_ops stm32mp1_clk_ops = {
+ .suspend = stm32mp1_clk_suspend,
+ .resume = stm32mp1_clk_resume,
+};
+
+static struct irqaction rcc_irq = {
+ .name = "rcc irq",
+ .flags = IRQF_ONESHOT,
+ .handler = stm32mp1_rcc_irq_handler,
+};
+
+static int stm32_rcc_init_pwr(struct device_node *np)
+{
+ int irq;
+ int ret;
+ int i;
+
+ pwr_syscon = syscon_regmap_lookup_by_phandle(np, "st,pwr");
+ if (IS_ERR(pwr_syscon)) {
+ pr_err("%s: pwr syscon required !\n", __func__);
+ return PTR_ERR(pwr_syscon);
+ }
+
+ /* register generic irq */
+ irq = of_irq_get(np, 0);
+ if (irq < 0) {
+ pr_err("%s: failed to get RCC generic IRQ\n", __func__);
+ return irq;
+ }
+
+ ret = setup_irq(irq, &rcc_irq);
+ if (ret) {
+ pr_err("%s: failed to register generic IRQ\n", __func__);
+ return ret;
+ }
+
+
+ /* Configure LPEN static table */
+ for (i = 0; i < ARRAY_SIZE(lp_table); i++)
+ writel_relaxed(lp_table[i].val, rcc_base + lp_table[i].address);
+
+ /* cleanup RCC flags */
+ SMC(STM32_SVC_RCC, STM32_WRITE, RCC_CIFR, RCC_IRQ_FLAGS_MASK);
+
+ SMC(STM32_SVC_RCC, STM32_WRITE, RCC_SREQCLRR, RCC_STOP_MASK);
+
+ register_syscore_ops(&stm32mp1_clk_ops);
+
+ return 0;
+}
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index d31055a..6d8326d 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -2204,7 +2204,8 @@ bool clk_has_parent(struct clk *clk, struct clk *parent)
EXPORT_SYMBOL_GPL(clk_has_parent);
static int clk_core_set_parent_nolock(struct clk_core *core,
- struct clk_core *parent)
+ struct clk_core *parent,
+ bool force)
{
int ret = 0;
int p_index = 0;
@@ -2215,7 +2216,7 @@ static int clk_core_set_parent_nolock(struct clk_core *core,
if (!core)
return 0;
- if (core->parent == parent)
+ if (core->parent == parent && !force)
return 0;
/* verify ops for for multi-parent clks */
@@ -2272,6 +2273,7 @@ static int clk_core_set_parent_nolock(struct clk_core *core,
* clk_set_parent - switch the parent of a mux clk
* @clk: the mux clk whose input we are switching
* @parent: the new input to clk
+ * @force: don't test if parent is already set
*
* Re-parent clk to use parent as its new input source. If clk is in
* prepared state, the clk will get enabled for the duration of this call. If
@@ -2285,7 +2287,7 @@ static int clk_core_set_parent_nolock(struct clk_core *core,
*
* Returns 0 on success, -EERROR otherwise.
*/
-int clk_set_parent(struct clk *clk, struct clk *parent)
+int _clk_set_parent(struct clk *clk, struct clk *parent, bool force)
{
int ret;
@@ -2298,7 +2300,8 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
clk_core_rate_unprotect(clk->core);
ret = clk_core_set_parent_nolock(clk->core,
- parent ? parent->core : NULL);
+ parent ? parent->core : NULL,
+ force);
if (clk->exclusive_count)
clk_core_rate_protect(clk->core);
@@ -2307,8 +2310,19 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
return ret;
}
+
+int clk_set_parent(struct clk *clk, struct clk *parent)
+{
+ return _clk_set_parent(clk, parent, 0);
+}
EXPORT_SYMBOL_GPL(clk_set_parent);
+int clk_set_parent_force(struct clk *clk, struct clk *parent)
+{
+ return _clk_set_parent(clk, parent, 1);
+}
+EXPORT_SYMBOL_GPL(clk_set_parent_force);
+
static int clk_core_set_phase_nolock(struct clk_core *core, int degrees)
{
int ret = -EINVAL;
@@ -3350,7 +3364,7 @@ void clk_unregister(struct clk *clk)
/* Reparent all children to the orphan list. */
hlist_for_each_entry_safe(child, t, &clk->core->children,
child_node)
- clk_core_set_parent_nolock(child, NULL);
+ clk_core_set_parent_nolock(child, NULL, 0);
}
hlist_del_init(&clk->core->child_node);
diff --git a/include/dt-bindings/clock/stm32mp1-clks.h b/include/dt-bindings/clock/stm32mp1-clks.h
index 90ec780..4cdaf13 100644
--- a/include/dt-bindings/clock/stm32mp1-clks.h
+++ b/include/dt-bindings/clock/stm32mp1-clks.h
@@ -248,7 +248,4 @@
#define STM32MP1_LAST_CLK 232
-#define LTDC_K LTDC_PX
-#define ETHMAC_K ETHCK_K
-
#endif /* _DT_BINDINGS_STM32MP1_CLKS_H_ */
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 4f750c4..ffbae16 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -602,6 +602,7 @@ int clk_set_max_rate(struct clk *clk, unsigned long rate);
* Returns success (0) or negative errno.
*/
int clk_set_parent(struct clk *clk, struct clk *parent);
+int clk_set_parent_force(struct clk *clk, struct clk *parent);
/**
* clk_get_parent - get the parent clock source for this clock
--
2.7.4