meta-st-stm32mp/recipes-kernel/linux/linux-stm32mp/4.19/4.19.94/0005-ARM-stm32mp1-r3-CLOCK....

1549 lines
42 KiB
Diff

From 035fdb208f6ba7b3c84fddbb476ab7be33866cb4 Mon Sep 17 00:00:00 2001
From: Romuald JEANNE <romuald.jeanne@st.com>
Date: Mon, 20 Jan 2020 18:07:02 +0100
Subject: [PATCH 05/31] ARM stm32mp1 r3 CLOCK
---
drivers/clk/clk-stm32mp1.c | 1124 +++++++++++++++++++++++++++--
drivers/clk/clk.c | 6 +
include/dt-bindings/clock/stm32mp1-clks.h | 3 -
include/linux/clk-provider.h | 1 +
4 files changed, 1061 insertions(+), 73 deletions(-)
diff --git a/drivers/clk/clk-stm32mp1.c b/drivers/clk/clk-stm32mp1.c
index bf3b6a4..95ed875 100644
--- a/drivers/clk/clk-stm32mp1.c
+++ b/drivers/clk/clk-stm32mp1.c
@@ -5,15 +5,22 @@
* Author: Gabriel Fernandez <gabriel.fernandez@st.com> for STMicroelectronics.
*/
+#include <linux/arm-smccc.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mm.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/syscore_ops.h>
#include <dt-bindings/clock/stm32mp1-clks.h>
@@ -45,6 +52,7 @@ static DEFINE_SPINLOCK(rlock);
#define RCC_AHB5ENSETR 0x210
#define RCC_AHB6ENSETR 0x218
#define RCC_AHB6LPENSETR 0x318
+#define RCC_MLAHBENSETR 0xA38
#define RCC_RCK12SELR 0x28
#define RCC_RCK3SELR 0x820
#define RCC_RCK4SELR 0x824
@@ -101,6 +109,10 @@ static DEFINE_SPINLOCK(rlock);
#define RCC_TIMG2PRER 0x82C
#define RCC_RTCDIVR 0x44
#define RCC_DBGCFGR 0x80C
+#define RCC_SREQSETR 0x104
+#define RCC_SREQCLRR 0x108
+#define RCC_CIER 0x414
+#define RCC_CIFR 0x418
#define RCC_CLR 0x4
@@ -356,17 +368,20 @@ struct stm32_gate_cfg {
struct gate_cfg *gate;
struct stm32_mgate *mgate;
const struct clk_ops *ops;
+ const struct clk_ops *ops_sec;
};
struct stm32_div_cfg {
struct div_cfg *div;
const struct clk_ops *ops;
+ const struct clk_ops *ops_sec;
};
struct stm32_mux_cfg {
struct mux_cfg *mux;
struct stm32_mmux *mmux;
const struct clk_ops *ops;
+ const struct clk_ops *ops_sec;
};
/* STM32 Composite clock */
@@ -376,6 +391,11 @@ struct stm32_composite_cfg {
const struct stm32_mux_cfg *mux;
};
+static inline int _is_soc_secured(void __iomem *base)
+{
+ return readl_relaxed(base) & 0x1;
+}
+
static struct clk_hw *
_clk_hw_register_gate(struct device *dev,
struct clk_hw_onecell_data *clk_data,
@@ -592,6 +612,9 @@ clk_stm32_register_gate_ops(struct device *dev,
if (cfg->ops)
init.ops = cfg->ops;
+ if (cfg->ops_sec && _is_soc_secured(base))
+ init.ops = cfg->ops_sec;
+
hw = _get_stm32_gate(base, cfg, lock);
if (IS_ERR(hw))
return ERR_PTR(-ENOMEM);
@@ -630,6 +653,9 @@ clk_stm32_register_composite(struct device *dev,
if (cfg->mux->ops)
mux_ops = cfg->mux->ops;
+
+ if (cfg->mux->ops_sec && _is_soc_secured(base))
+ mux_ops = cfg->mux->ops_sec;
}
}
@@ -641,6 +667,9 @@ clk_stm32_register_composite(struct device *dev,
if (cfg->div->ops)
div_ops = cfg->div->ops;
+
+ if (cfg->div->ops_sec && _is_soc_secured(base))
+ div_ops = cfg->div->ops_sec;
}
}
@@ -652,6 +681,9 @@ clk_stm32_register_composite(struct device *dev,
if (cfg->gate->ops)
gate_ops = cfg->gate->ops;
+
+ if (cfg->gate->ops_sec && _is_soc_secured(base))
+ gate_ops = cfg->gate->ops_sec;
}
}
@@ -714,7 +746,7 @@ static int clk_mmux_set_parent(struct clk_hw *hw, u8 index)
for (n = 0; n < clk_mmux->mmux->nbr_clk; n++)
if (clk_mmux->mmux->hws[n] != hw)
- clk_hw_reparent(clk_mmux->mmux->hws[n], hwp);
+ clk_hw_set_parent(clk_mmux->mmux->hws[n], hwp);
return 0;
}
@@ -867,6 +899,7 @@ static struct clk_hw *clk_register_pll(struct device *dev, const char *name,
const char *parent_name,
void __iomem *reg,
unsigned long flags,
+ const struct clk_ops *ops,
spinlock_t *lock)
{
struct stm32_pll_obj *element;
@@ -879,7 +912,7 @@ static struct clk_hw *clk_register_pll(struct device *dev, const char *name,
return ERR_PTR(-ENOMEM);
init.name = name;
- init.ops = &pll_ops;
+ init.ops = ops;
init.flags = flags;
init.parent_names = &parent_name;
init.num_parents = 1;
@@ -1033,6 +1066,8 @@ static struct clk_hw *clk_register_cktim(struct device *dev, const char *name,
struct stm32_pll_cfg {
u32 offset;
+ const struct clk_ops *ops;
+ const struct clk_ops *ops_sec;
};
static struct clk_hw *_clk_register_pll(struct device *dev,
@@ -1043,7 +1078,8 @@ static struct clk_hw *_clk_register_pll(struct device *dev,
struct stm32_pll_cfg *stm_pll_cfg = cfg->cfg;
return clk_register_pll(dev, cfg->name, cfg->parent_name,
- base + stm_pll_cfg->offset, cfg->flags, lock);
+ base + stm_pll_cfg->offset, cfg->flags,
+ stm_pll_cfg->ops, lock);
}
struct stm32_cktim_cfg {
@@ -1161,6 +1197,7 @@ _clk_stm32_register_composite(struct device *dev,
.flags = _flags,\
.cfg = &(struct stm32_pll_cfg) {\
.offset = _offset,\
+ .ops = &pll_ops\
},\
.func = _clk_register_pll,\
}
@@ -1193,7 +1230,8 @@ _clk_stm32_register_composite(struct device *dev,
.func = _clk_stm32_register_gate,\
}
-#define _STM32_GATE(_gate_offset, _gate_bit_idx, _gate_flags, _mgate, _ops)\
+#define _STM32_GATE(_gate_offset, _gate_bit_idx, _gate_flags, _mgate, _ops,\
+ _ops_sec)\
(&(struct stm32_gate_cfg) {\
&(struct gate_cfg) {\
.reg_off = _gate_offset,\
@@ -1202,6 +1240,7 @@ _clk_stm32_register_composite(struct device *dev,
},\
.mgate = _mgate,\
.ops = _ops,\
+ .ops_sec = _ops_sec,\
})
#define _STM32_MGATE(_mgate)\
@@ -1209,11 +1248,11 @@ _clk_stm32_register_composite(struct device *dev,
#define _GATE(_gate_offset, _gate_bit_idx, _gate_flags)\
_STM32_GATE(_gate_offset, _gate_bit_idx, _gate_flags,\
- NULL, NULL)\
+ NULL, NULL, NULL)\
#define _GATE_MP1(_gate_offset, _gate_bit_idx, _gate_flags)\
_STM32_GATE(_gate_offset, _gate_bit_idx, _gate_flags,\
- NULL, &mp1_gate_clk_ops)\
+ NULL, &mp1_gate_clk_ops, NULL)\
#define _MGATE_MP1(_mgate)\
.gate = &per_gate_cfg[_mgate]
@@ -1227,7 +1266,7 @@ _clk_stm32_register_composite(struct device *dev,
_STM32_MGATE(_mgate))
#define _STM32_DIV(_div_offset, _div_shift, _div_width,\
- _div_flags, _div_table, _ops)\
+ _div_flags, _div_table, _ops, _ops_sec)\
.div = &(struct stm32_div_cfg) {\
&(struct div_cfg) {\
.reg_off = _div_offset,\
@@ -1237,13 +1276,14 @@ _clk_stm32_register_composite(struct device *dev,
.table = _div_table,\
},\
.ops = _ops,\
+ .ops_sec = _ops_sec,\
}
#define _DIV(_div_offset, _div_shift, _div_width, _div_flags, _div_table)\
_STM32_DIV(_div_offset, _div_shift, _div_width,\
- _div_flags, _div_table, NULL)\
+ _div_flags, _div_table, NULL, NULL)\
-#define _STM32_MUX(_offset, _shift, _width, _mux_flags, _mmux, _ops)\
+#define _STM32_MUX(_offset, _shift, _width, _mux_flags, _mmux, _ops, _ops_sec)\
.mux = &(struct stm32_mux_cfg) {\
&(struct mux_cfg) {\
.reg_off = _offset,\
@@ -1254,10 +1294,11 @@ _clk_stm32_register_composite(struct device *dev,
},\
.mmux = _mmux,\
.ops = _ops,\
+ .ops_sec = _ops_sec,\
}
#define _MUX(_offset, _shift, _width, _mux_flags)\
- _STM32_MUX(_offset, _shift, _width, _mux_flags, NULL, NULL)\
+ _STM32_MUX(_offset, _shift, _width, _mux_flags, NULL, NULL, NULL)
#define _MMUX(_mmux) .mux = &ker_mux_cfg[_mmux]
@@ -1292,6 +1333,581 @@ _clk_stm32_register_composite(struct device *dev,
_MMUX(_mmux),\
_NO_DIV)
+/*
+ *
+ * Security management
+ *
+ */
+
+#define STM32_SVC_RCC 0x82001000
+#define STM32_WRITE 0x1
+#define STM32_SET_BITS 0x2
+#define STM32_CLR_BITS 0x3
+
+#define STM32_SMC_RCC_OPP 0x82001009
+#define STM32_SMC_RCC_OPP_SET 0
+#define STM32_SMC_RCC_OPP_ROUND 1
+
+#define SMC(class, op, address, val)\
+ ({\
+ struct arm_smccc_res res;\
+ arm_smccc_smc(class, op, address, val,\
+ 0, 0, 0, 0, &res);\
+ })
+
+static u32 stm32_clk_writel_secure(u32 value, void __iomem *reg)
+{
+ struct arm_smccc_res res;
+ u32 address;
+
+ address = offset_in_page(reg);
+
+ arm_smccc_smc(STM32_SVC_RCC, STM32_WRITE, address, value, 0, 0, 0,
+ 0, &res);
+
+ if (res.a0)
+ pr_warn("%s: Failed to write in secure mode at 0x%x (err = %ld)\n"
+ , __func__
+ , address
+ , res.a0);
+
+ return res.a0;
+}
+
+static u32 stm32_clk_bit_secure(u32 cmd, u32 value, void __iomem *reg)
+{
+ struct arm_smccc_res res;
+ u32 address;
+
+ address = offset_in_page(reg);
+
+ arm_smccc_smc(STM32_SVC_RCC, cmd, address, value, 0, 0, 0,
+ 0, &res);
+
+ if (res.a0)
+ pr_warn("%s: Failed to write in secure mode at 0x%x (err = %ld)\n"
+ , __func__
+ , address
+ , res.a0);
+
+ return res.a0;
+}
+
+static void clk_sgate_endisable(struct clk_hw *hw, int enable)
+{
+ struct clk_gate *gate = to_clk_gate(hw);
+ unsigned long flags = 0;
+ u32 cmd;
+
+ spin_lock_irqsave(gate->lock, flags);
+
+ if (enable)
+ cmd = STM32_SET_BITS;
+ else
+ cmd = STM32_CLR_BITS;
+
+ stm32_clk_bit_secure(cmd, BIT(gate->bit_idx), gate->reg);
+
+ spin_unlock_irqrestore(gate->lock, flags);
+}
+
+static int clk_sgate_enable(struct clk_hw *hw)
+{
+ clk_sgate_endisable(hw, 1);
+
+ return 0;
+}
+
+static void clk_sgate_disable(struct clk_hw *hw)
+{
+ clk_sgate_endisable(hw, 0);
+}
+
+static const struct clk_ops clk_sgate_ops = {
+ .enable = clk_sgate_enable,
+ .disable = clk_sgate_disable,
+ .is_enabled = clk_gate_is_enabled,
+};
+
+static u8 clk_smux_get_parent(struct clk_hw *hw)
+{
+ return clk_mux_ops.get_parent(hw);
+}
+
+static int clk_smux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_mux *mux = to_clk_mux(hw);
+ u32 val;
+ unsigned long flags = 0;
+
+ if (mux->table) {
+ index = mux->table[index];
+ } else {
+ if (mux->flags & CLK_MUX_INDEX_BIT)
+ index = 1 << index;
+
+ if (mux->flags & CLK_MUX_INDEX_ONE)
+ index++;
+ }
+
+ spin_lock_irqsave(mux->lock, flags);
+
+ val = clk_readl(mux->reg);
+ val &= ~(mux->mask << mux->shift);
+ val |= index << mux->shift;
+
+ stm32_clk_writel_secure(val, mux->reg);
+
+ spin_unlock_irqrestore(mux->lock, flags);
+
+ return 0;
+}
+
+static const struct clk_ops clk_smux_ops = {
+ .get_parent = clk_smux_get_parent,
+ .set_parent = clk_smux_set_parent,
+ .determine_rate = __clk_mux_determine_rate,
+};
+
+static struct clk_hw *clk_hw_register_smux(struct device *dev,
+ const char *name,
+ const char * const *parent_names,
+ u8 num_parents,
+ unsigned long flags,
+ void __iomem *reg, u8 shift,
+ u8 width,
+ u8 clk_mux_flags,
+ spinlock_t *lock)
+{
+ u32 mask = BIT(width) - 1;
+ struct clk_mux *mux;
+ struct clk_hw *hw;
+ struct clk_init_data init;
+ int ret;
+
+ /* allocate the mux */
+ mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+
+ init.ops = &clk_smux_ops;
+
+ init.flags = flags | CLK_IS_BASIC;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+
+ /* struct clk_mux assignments */
+ mux->reg = reg;
+ mux->shift = shift;
+ mux->mask = mask;
+ mux->flags = clk_mux_flags;
+ mux->lock = lock;
+ mux->table = NULL;
+ mux->hw.init = &init;
+
+ hw = &mux->hw;
+ ret = clk_hw_register(dev, hw);
+ if (ret) {
+ kfree(mux);
+ hw = ERR_PTR(ret);
+ }
+
+ return hw;
+}
+
+static struct clk_hw *
+__clk_hw_register_mux(struct device *dev,
+ struct clk_hw_onecell_data *clk_data,
+ void __iomem *base, spinlock_t *lock,
+ const struct clock_config *cfg)
+{
+ struct mux_cfg *mux_cfg = cfg->cfg;
+
+ if (!_is_soc_secured(base))
+ return clk_hw_register_mux(dev, cfg->name, cfg->parent_names,
+ cfg->num_parents, cfg->flags,
+ mux_cfg->reg_off + base,
+ mux_cfg->shift,
+ mux_cfg->width, mux_cfg->mux_flags,
+ lock);
+ else
+ return clk_hw_register_smux(dev, cfg->name,
+ cfg->parent_names,
+ cfg->num_parents, cfg->flags,
+ mux_cfg->reg_off + base,
+ mux_cfg->shift,
+ mux_cfg->width,
+ mux_cfg->mux_flags,
+ lock);
+}
+
+struct clk_div_secure {
+ struct clk_divider div;
+ u8 secure;
+};
+
+#define to_clk_div_secure(_hw) container_of(_hw, struct clk_div_secure, div)
+
+static unsigned long clk_sdivider_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return clk_divider_ops.recalc_rate(hw, parent_rate);
+}
+
+static long clk_sdivider_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ return clk_divider_ops.round_rate(hw, rate, prate);
+}
+
+#define div_mask(width) ((1 << (width)) - 1)
+
+static int clk_sdivider_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ int value;
+ unsigned long flags = 0;
+ u32 val;
+
+ value = divider_get_val(rate, parent_rate, divider->table,
+ divider->width, divider->flags);
+
+ if (value < 0)
+ return value;
+
+ spin_lock_irqsave(divider->lock, flags);
+
+ if (divider->flags & CLK_DIVIDER_HIWORD_MASK) {
+ val = div_mask(divider->width) << (divider->shift + 16);
+ } else {
+ val = clk_readl(divider->reg);
+ val &= ~(div_mask(divider->width) << divider->shift);
+ }
+ val |= (u32)value << divider->shift;
+
+ stm32_clk_writel_secure(val, divider->reg);
+
+ spin_unlock_irqrestore(divider->lock, flags);
+
+ return 0;
+}
+
+static const struct clk_ops clk_sdivider_ops = {
+ .recalc_rate = clk_sdivider_recalc_rate,
+ .round_rate = clk_sdivider_round_rate,
+ .set_rate = clk_sdivider_set_rate,
+};
+
+static const struct clk_ops clk_sdivider_pll1_p_ops = {
+ .recalc_rate = clk_sdivider_recalc_rate,
+};
+
+static struct clk_hw *
+clk_hw_register_sdivider_table(struct device *dev, const char *name,
+ const char *parent_name,
+ unsigned long flags,
+ void __iomem *reg,
+ u8 shift, u8 width,
+ u8 clk_divider_flags,
+ const struct clk_div_table *table,
+ spinlock_t *lock)
+{
+ struct clk_divider *div;
+ struct clk_hw *hw;
+ struct clk_init_data init;
+ int ret;
+
+ /* allocate the divider */
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
+ if (!div)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ if (clk_divider_flags & CLK_DIVIDER_READ_ONLY)
+ init.ops = &clk_divider_ro_ops;
+ else
+ init.ops = &clk_sdivider_ops;
+
+ init.flags = flags | CLK_IS_BASIC;
+ init.parent_names = (parent_name ? &parent_name : NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+
+ /* struct clk_divider assignments */
+ div->reg = reg;
+ div->shift = shift;
+ div->width = width;
+ div->flags = clk_divider_flags;
+ div->lock = lock;
+ div->hw.init = &init;
+ div->table = table;
+
+ /* register the clock */
+ hw = &div->hw;
+
+ ret = clk_hw_register(dev, hw);
+ if (ret) {
+ kfree(div);
+ hw = ERR_PTR(ret);
+ }
+
+ return hw;
+}
+
+static struct clk_hw *
+__clk_hw_register_divider_table(struct device *dev,
+ struct clk_hw_onecell_data *clk_data,
+ void __iomem *base, spinlock_t *lock,
+ const struct clock_config *cfg)
+{
+ struct div_cfg *div_cfg = cfg->cfg;
+
+ if (!_is_soc_secured(base))
+ return clk_hw_register_divider_table(dev, cfg->name,
+ cfg->parent_name,
+ cfg->flags,
+ div_cfg->reg_off + base,
+ div_cfg->shift,
+ div_cfg->width,
+ div_cfg->div_flags,
+ div_cfg->table,
+ lock);
+ else
+ return clk_hw_register_sdivider_table(dev, cfg->name,
+ cfg->parent_name,
+ cfg->flags,
+ div_cfg->reg_off + base,
+ div_cfg->shift,
+ div_cfg->width,
+ div_cfg->div_flags,
+ div_cfg->table,
+ lock);
+}
+
+static long clk_pll1_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(STM32_SMC_RCC_OPP, STM32_SMC_RCC_OPP_ROUND, rate, 0, 0, 0,
+ 0, 0, &res);
+
+ return res.a1;
+}
+
+static int pll1_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ SMC(STM32_SMC_RCC_OPP, STM32_SMC_RCC_OPP_SET, rate, 0);
+
+ return 0;
+}
+
+static const struct clk_ops pll1_ops = {
+ .enable = pll_enable,
+ .disable = pll_disable,
+ .recalc_rate = pll_recalc_rate,
+ .round_rate = clk_pll1_round_rate,
+ .set_rate = pll1_set_rate,
+ .is_enabled = pll_is_enabled,
+};
+
+static struct clk_hw *_clk_sregister_pll(struct device *dev,
+ struct clk_hw_onecell_data *clk_data,
+ void __iomem *base, spinlock_t *lock,
+ const struct clock_config *cfg)
+{
+ struct stm32_pll_cfg *stm_pll_cfg = cfg->cfg;
+
+ if (!_is_soc_secured(base))
+ return clk_register_pll(dev, cfg->name, cfg->parent_name,
+ base + stm_pll_cfg->offset, cfg->flags,
+ stm_pll_cfg->ops, lock);
+ else
+ return clk_register_pll(dev, cfg->name, cfg->parent_name,
+ base + stm_pll_cfg->offset, cfg->flags,
+ stm_pll_cfg->ops_sec, lock);
+}
+
+#define PLL_1(_id, _name, _parent, _flags, _offset)\
+{\
+ .id = _id,\
+ .name = _name,\
+ .parent_name = _parent,\
+ .flags = _flags,\
+ .cfg = &(struct stm32_pll_cfg) {\
+ .offset = _offset,\
+ .ops = &pll_ops,\
+ .ops_sec = &pll1_ops,\
+ },\
+ .func = _clk_sregister_pll,\
+}
+
+static int mp1_sgate_clk_enable(struct clk_hw *hw)
+{
+ struct clk_gate *gate = to_clk_gate(hw);
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(gate->lock, flags);
+
+ stm32_clk_bit_secure(STM32_SET_BITS, BIT(gate->bit_idx),
+ gate->reg);
+
+ spin_unlock_irqrestore(gate->lock, flags);
+
+ return 0;
+}
+
+static void mp1_sgate_clk_disable(struct clk_hw *hw)
+{
+ struct clk_gate *gate = to_clk_gate(hw);
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(gate->lock, flags);
+
+ stm32_clk_bit_secure(STM32_SET_BITS, BIT(gate->bit_idx),
+ gate->reg + RCC_CLR);
+
+ spin_unlock_irqrestore(gate->lock, flags);
+}
+
+static const struct clk_ops mp1_sgate_clk_ops = {
+ .enable = mp1_sgate_clk_enable,
+ .disable = mp1_sgate_clk_disable,
+ .is_enabled = clk_gate_is_enabled,
+};
+
+static int mp1_s_mgate_clk_enable(struct clk_hw *hw)
+{
+ struct clk_gate *gate = to_clk_gate(hw);
+ struct stm32_clk_mgate *clk_mgate = to_clk_mgate(gate);
+
+ clk_mgate->mgate->flag |= clk_mgate->mask;
+
+ mp1_sgate_clk_enable(hw);
+
+ return 0;
+}
+
+static void mp1_s_mgate_clk_disable(struct clk_hw *hw)
+{
+ struct clk_gate *gate = to_clk_gate(hw);
+ struct stm32_clk_mgate *clk_mgate = to_clk_mgate(gate);
+
+ clk_mgate->mgate->flag &= ~clk_mgate->mask;
+
+ if (clk_mgate->mgate->flag == 0)
+ mp1_sgate_clk_disable(hw);
+}
+
+static const struct clk_ops mp1_s_mgate_clk_ops = {
+ .enable = mp1_s_mgate_clk_enable,
+ .disable = mp1_s_mgate_clk_disable,
+ .is_enabled = clk_gate_is_enabled,
+
+};
+
+static u8 clk_s_mmux_get_parent(struct clk_hw *hw)
+{
+ return clk_smux_ops.get_parent(hw);
+}
+
+static int clk_s_mmux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_mux *mux = to_clk_mux(hw);
+ struct stm32_clk_mmux *clk_mmux = to_clk_mmux(mux);
+ struct clk_hw *hwp;
+ int ret, n;
+
+ ret = clk_smux_ops.set_parent(hw, index);
+ if (ret)
+ return ret;
+
+ hwp = clk_hw_get_parent(hw);
+
+ for (n = 0; n < clk_mmux->mmux->nbr_clk; n++)
+ if (clk_mmux->mmux->hws[n] != hw)
+ clk_hw_reparent(clk_mmux->mmux->hws[n], hwp);
+
+ return 0;
+}
+
+static const struct clk_ops clk_s_mmux_ops = {
+ .get_parent = clk_s_mmux_get_parent,
+ .set_parent = clk_s_mmux_set_parent,
+ .determine_rate = __clk_mux_determine_rate,
+};
+
+#define SMUX(_id, _name, _parents, _flags,\
+ _offset, _shift, _width, _mux_flags)\
+{\
+ .id = _id,\
+ .name = _name,\
+ .parent_names = _parents,\
+ .num_parents = ARRAY_SIZE(_parents),\
+ .flags = _flags,\
+ .cfg = &(struct mux_cfg) {\
+ .reg_off = _offset,\
+ .shift = _shift,\
+ .width = _width,\
+ .mux_flags = _mux_flags,\
+ },\
+ .func = __clk_hw_register_mux,\
+}
+
+#define SDIV_TABLE(_id, _name, _parent, _flags, _offset, _shift, _width,\
+ _div_flags, _div_table)\
+{\
+ .id = _id,\
+ .name = _name,\
+ .parent_name = _parent,\
+ .flags = _flags,\
+ .cfg = &(struct div_cfg) {\
+ .reg_off = _offset,\
+ .shift = _shift,\
+ .width = _width,\
+ .div_flags = _div_flags,\
+ .table = _div_table,\
+ },\
+ .func = __clk_hw_register_divider_table,\
+}
+
+#define SDIV(_id, _name, _parent, _flags, _offset, _shift, _width,\
+ _div_flags)\
+ SDIV_TABLE(_id, _name, _parent, _flags, _offset, _shift, _width,\
+ _div_flags, NULL)
+
+#define _S_GATE(_gate_offset, _gate_bit_idx, _gate_flags)\
+ _STM32_GATE(_gate_offset, _gate_bit_idx, _gate_flags,\
+ NULL, NULL, &clk_sgate_ops)
+
+#define SGATE(_id, _name, _parent, _flags, _offset, _bit_idx, _gate_flags)\
+ STM32_GATE(_id, _name, _parent, _flags,\
+ _S_GATE(_offset, _bit_idx, _gate_flags))
+
+#define _S_GATE_MP1(_gate_offset, _gate_bit_idx, _gate_flags)\
+ _STM32_GATE(_gate_offset, _gate_bit_idx, _gate_flags,\
+ NULL, &mp1_gate_clk_ops, &mp1_sgate_clk_ops)
+
+#define SGATE_MP1(_id, _name, _parent, _flags, _offset, _bit_idx, _gate_flags)\
+ STM32_GATE(_id, _name, _parent, _flags,\
+ _S_GATE_MP1(_offset, _bit_idx, _gate_flags))
+
+#define _S_DIV(_div_offset, _div_shift, _div_width, _div_flags, _div_table)\
+ _STM32_DIV(_div_offset, _div_shift, _div_width,\
+ _div_flags, _div_table, NULL, &clk_sdivider_ops)
+
+#define _S_MUX(_offset, _shift, _width, _mux_flags)\
+ _STM32_MUX(_offset, _shift, _width, _mux_flags,\
+ NULL, NULL, &clk_smux_ops)
+
+#define _S_PLL1_P_DIV(_div_offset, _div_shift, _div_width, _div_flags,\
+ _div_table)\
+ _STM32_DIV(_div_offset, _div_shift, _div_width,\
+ _div_flags, _div_table, NULL, &clk_sdivider_pll1_p_ops)
+
enum {
G_SAI1,
G_SAI2,
@@ -1402,6 +2018,7 @@ enum {
G_CRYP1,
G_HASH1,
G_BKPSRAM,
+ G_DDRPERFM,
G_LAST
};
@@ -1409,7 +2026,7 @@ enum {
static struct stm32_mgate mp1_mgate[G_LAST];
#define _K_GATE(_id, _gate_offset, _gate_bit_idx, _gate_flags,\
- _mgate, _ops)\
+ _mgate, _ops, _ops_sec)\
[_id] = {\
&(struct gate_cfg) {\
.reg_off = _gate_offset,\
@@ -1418,15 +2035,24 @@ static struct stm32_mgate mp1_mgate[G_LAST];
},\
.mgate = _mgate,\
.ops = _ops,\
+ .ops_sec = _ops_sec,\
}
#define K_GATE(_id, _gate_offset, _gate_bit_idx, _gate_flags)\
_K_GATE(_id, _gate_offset, _gate_bit_idx, _gate_flags,\
- NULL, &mp1_gate_clk_ops)
+ NULL, &mp1_gate_clk_ops, NULL)
+
+#define K_GATE_S(_id, _gate_offset, _gate_bit_idx, _gate_flags)\
+ _K_GATE(_id, _gate_offset, _gate_bit_idx, _gate_flags,\
+ NULL, &mp1_gate_clk_ops, &mp1_sgate_clk_ops)
#define K_MGATE(_id, _gate_offset, _gate_bit_idx, _gate_flags)\
_K_GATE(_id, _gate_offset, _gate_bit_idx, _gate_flags,\
- &mp1_mgate[_id], &mp1_mgate_clk_ops)
+ &mp1_mgate[_id], &mp1_mgate_clk_ops, NULL)
+
+#define K_MGATE_S(_id, _gate_offset, _gate_bit_idx, _gate_flags)\
+ _K_GATE(_id, _gate_offset, _gate_bit_idx, _gate_flags,\
+ &mp1_mgate[_id], &mp1_mgate_clk_ops, &mp1_s_mgate_clk_ops)
/* Peripheral gates */
static struct stm32_gate_cfg per_gate_cfg[G_LAST] = {
@@ -1488,20 +2114,21 @@ static struct stm32_gate_cfg per_gate_cfg[G_LAST] = {
K_GATE(G_STGENRO, RCC_APB4ENSETR, 20, 0),
K_MGATE(G_USBPHY, RCC_APB4ENSETR, 16, 0),
K_GATE(G_IWDG2, RCC_APB4ENSETR, 15, 0),
+ K_GATE(G_DDRPERFM, RCC_APB4ENSETR, 8, 0),
K_MGATE(G_DSI, RCC_APB4ENSETR, 4, 0),
K_MGATE(G_LTDC, RCC_APB4ENSETR, 0, 0),
- K_GATE(G_STGEN, RCC_APB5ENSETR, 20, 0),
- K_GATE(G_BSEC, RCC_APB5ENSETR, 16, 0),
- K_GATE(G_IWDG1, RCC_APB5ENSETR, 15, 0),
- K_GATE(G_TZPC, RCC_APB5ENSETR, 13, 0),
- K_GATE(G_TZC2, RCC_APB5ENSETR, 12, 0),
- K_GATE(G_TZC1, RCC_APB5ENSETR, 11, 0),
- K_GATE(G_RTCAPB, RCC_APB5ENSETR, 8, 0),
- K_MGATE(G_USART1, RCC_APB5ENSETR, 4, 0),
- K_MGATE(G_I2C6, RCC_APB5ENSETR, 3, 0),
- K_MGATE(G_I2C4, RCC_APB5ENSETR, 2, 0),
- K_MGATE(G_SPI6, RCC_APB5ENSETR, 0, 0),
+ K_GATE_S(G_STGEN, RCC_APB5ENSETR, 20, 0),
+ K_GATE_S(G_BSEC, RCC_APB5ENSETR, 16, 0),
+ K_GATE_S(G_IWDG1, RCC_APB5ENSETR, 15, 0),
+ K_GATE_S(G_TZPC, RCC_APB5ENSETR, 13, 0),
+ K_GATE_S(G_TZC2, RCC_APB5ENSETR, 12, 0),
+ K_GATE_S(G_TZC1, RCC_APB5ENSETR, 11, 0),
+ K_GATE_S(G_RTCAPB, RCC_APB5ENSETR, 8, 0),
+ K_MGATE_S(G_USART1, RCC_APB5ENSETR, 4, 0),
+ K_MGATE_S(G_I2C6, RCC_APB5ENSETR, 3, 0),
+ K_MGATE_S(G_I2C4, RCC_APB5ENSETR, 2, 0),
+ K_MGATE_S(G_SPI6, RCC_APB5ENSETR, 0, 0),
K_MGATE(G_SDMMC3, RCC_AHB2ENSETR, 16, 0),
K_MGATE(G_USBO, RCC_AHB2ENSETR, 8, 0),
@@ -1530,11 +2157,11 @@ static struct stm32_gate_cfg per_gate_cfg[G_LAST] = {
K_GATE(G_GPIOB, RCC_AHB4ENSETR, 1, 0),
K_GATE(G_GPIOA, RCC_AHB4ENSETR, 0, 0),
- K_GATE(G_BKPSRAM, RCC_AHB5ENSETR, 8, 0),
- K_MGATE(G_RNG1, RCC_AHB5ENSETR, 6, 0),
- K_GATE(G_HASH1, RCC_AHB5ENSETR, 5, 0),
- K_GATE(G_CRYP1, RCC_AHB5ENSETR, 4, 0),
- K_GATE(G_GPIOZ, RCC_AHB5ENSETR, 0, 0),
+ K_GATE_S(G_BKPSRAM, RCC_AHB5ENSETR, 8, 0),
+ K_MGATE_S(G_RNG1, RCC_AHB5ENSETR, 6, 0),
+ K_GATE_S(G_HASH1, RCC_AHB5ENSETR, 5, 0),
+ K_GATE_S(G_CRYP1, RCC_AHB5ENSETR, 4, 0),
+ K_GATE_S(G_GPIOZ, RCC_AHB5ENSETR, 0, 0),
K_GATE(G_USBH, RCC_AHB6ENSETR, 24, 0),
K_GATE(G_CRC1, RCC_AHB6ENSETR, 20, 0),
@@ -1542,12 +2169,15 @@ static struct stm32_gate_cfg per_gate_cfg[G_LAST] = {
K_MGATE(G_SDMMC1, RCC_AHB6ENSETR, 16, 0),
K_MGATE(G_QSPI, RCC_AHB6ENSETR, 14, 0),
K_MGATE(G_FMC, RCC_AHB6ENSETR, 12, 0),
+
K_GATE(G_ETHMAC, RCC_AHB6ENSETR, 10, 0),
K_GATE(G_ETHRX, RCC_AHB6ENSETR, 9, 0),
K_GATE(G_ETHTX, RCC_AHB6ENSETR, 8, 0),
K_GATE(G_ETHCK, RCC_AHB6ENSETR, 7, 0),
+
K_MGATE(G_GPU, RCC_AHB6ENSETR, 5, 0),
K_GATE(G_MDMA, RCC_AHB6ENSETR, 0, 0),
+
K_GATE(G_ETHSTP, RCC_AHB6LPENSETR, 11, 0),
};
@@ -1592,7 +2222,7 @@ enum {
static struct stm32_mmux ker_mux[M_LAST];
-#define _K_MUX(_id, _offset, _shift, _width, _mux_flags, _mmux, _ops)\
+#define _K_MUX(_id, _offset, _shift, _width, _mux_flags, _mmux, _ops, _ops_sec)\
[_id] = {\
&(struct mux_cfg) {\
.reg_off = _offset,\
@@ -1603,15 +2233,24 @@ static struct stm32_mmux ker_mux[M_LAST];
},\
.mmux = _mmux,\
.ops = _ops,\
+ .ops_sec = _ops_sec,\
}
#define K_MUX(_id, _offset, _shift, _width, _mux_flags)\
_K_MUX(_id, _offset, _shift, _width, _mux_flags,\
- NULL, NULL)
+ NULL, NULL, NULL)
+
+#define K_MUX_S(_id, _offset, _shift, _width, _mux_flags)\
+ _K_MUX(_id, _offset, _shift, _width, _mux_flags,\
+ NULL, NULL, &clk_smux_ops)
#define K_MMUX(_id, _offset, _shift, _width, _mux_flags)\
_K_MUX(_id, _offset, _shift, _width, _mux_flags,\
- &ker_mux[_id], &clk_mmux_ops)
+ &ker_mux[_id], &clk_mmux_ops, NULL)
+
+#define K_MMUX_S(_id, _offset, _shift, _width, _mux_flags)\
+ _K_MUX(_id, _offset, _shift, _width, _mux_flags,\
+ &ker_mux[_id], &clk_mmux_ops, &clk_s_mmux_ops)
static const struct stm32_mux_cfg ker_mux_cfg[M_LAST] = {
/* Kernel multi mux */
@@ -1627,7 +2266,7 @@ static const struct stm32_mux_cfg ker_mux_cfg[M_LAST] = {
K_MMUX(M_UART78, RCC_UART78CKSELR, 0, 3, 0),
K_MMUX(M_SAI1, RCC_SAI1CKSELR, 0, 3, 0),
K_MMUX(M_ETHCK, RCC_ETHCKSELR, 0, 2, 0),
- K_MMUX(M_I2C46, RCC_I2C46CKSELR, 0, 3, 0),
+ K_MMUX_S(M_I2C46, RCC_I2C46CKSELR, 0, 3, 0),
/* Kernel simple mux */
K_MUX(M_RNG2, RCC_RNG2CKSELR, 0, 2, 0),
@@ -1648,10 +2287,10 @@ static const struct stm32_mux_cfg ker_mux_cfg[M_LAST] = {
K_MUX(M_ADC12, RCC_ADCCKSELR, 0, 2, 0),
K_MUX(M_DSI, RCC_DSICKSELR, 0, 1, 0),
K_MUX(M_CKPER, RCC_CPERCKSELR, 0, 2, 0),
- K_MUX(M_RNG1, RCC_RNG1CKSELR, 0, 2, 0),
- K_MUX(M_STGEN, RCC_STGENCKSELR, 0, 2, 0),
- K_MUX(M_USART1, RCC_UART1CKSELR, 0, 3, 0),
- K_MUX(M_SPI6, RCC_SPI6CKSELR, 0, 3, 0),
+ K_MUX_S(M_RNG1, RCC_RNG1CKSELR, 0, 2, 0),
+ K_MUX_S(M_STGEN, RCC_STGENCKSELR, 0, 2, 0),
+ K_MUX_S(M_USART1, RCC_UART1CKSELR, 0, 3, 0),
+ K_MUX_S(M_SPI6, RCC_SPI6CKSELR, 0, 3, 0),
};
static const struct clock_config stm32mp1_clock_cfg[] = {
@@ -1660,11 +2299,12 @@ static const struct clock_config stm32mp1_clock_cfg[] = {
RCC_HSICFGR, 0, 2, CLK_DIVIDER_READ_ONLY),
/* External / Internal Oscillators */
- GATE_MP1(CK_HSE, "ck_hse", "clk-hse", 0, RCC_OCENSETR, 8, 0),
- GATE_MP1(CK_CSI, "ck_csi", "clk-csi", 0, RCC_OCENSETR, 4, 0),
- GATE_MP1(CK_HSI, "ck_hsi", "clk-hsi-div", 0, RCC_OCENSETR, 0, 0),
- GATE(CK_LSI, "ck_lsi", "clk-lsi", 0, RCC_RDLSICR, 0, 0),
- GATE(CK_LSE, "ck_lse", "clk-lse", 0, RCC_BDCR, 0, 0),
+ SGATE_MP1(CK_HSE, "ck_hse", "clk-hse", 0, RCC_OCENSETR, 8, 0),
+ SGATE_MP1(CK_CSI, "ck_csi", "clk-csi", CLK_IS_CRITICAL,
+ RCC_OCENSETR, 4, 0),
+ SGATE_MP1(CK_HSI, "ck_hsi", "clk-hsi-div", 0, RCC_OCENSETR, 0, 0),
+ SGATE(CK_LSI, "ck_lsi", "clk-lsi", 0, RCC_RDLSICR, 0, 0),
+ SGATE(CK_LSE, "ck_lse", "clk-lse", 0, RCC_BDCR, 0, 0),
FIXED_FACTOR(CK_HSE_DIV2, "clk-hse-div2", "ck_hse", 0, 1, 2),
@@ -1679,31 +2319,31 @@ static const struct clock_config stm32mp1_clock_cfg[] = {
0, 2, CLK_MUX_READ_ONLY),
/* PLLs */
- PLL(PLL1, "pll1", "ref1", CLK_IGNORE_UNUSED, RCC_PLL1CR),
+ PLL_1(PLL1, "pll1", "ref1", CLK_IGNORE_UNUSED, RCC_PLL1CR),
PLL(PLL2, "pll2", "ref1", CLK_IGNORE_UNUSED, RCC_PLL2CR),
PLL(PLL3, "pll3", "ref3", CLK_IGNORE_UNUSED, RCC_PLL3CR),
PLL(PLL4, "pll4", "ref4", CLK_IGNORE_UNUSED, RCC_PLL4CR),
/* ODF */
- COMPOSITE(PLL1_P, "pll1_p", PARENT("pll1"), 0,
- _GATE(RCC_PLL1CR, 4, 0),
+ COMPOSITE(PLL1_P, "pll1_p", PARENT("pll1"), CLK_SET_RATE_PARENT,
+ _S_GATE(RCC_PLL1CR, 4, 0),
_NO_MUX,
- _DIV(RCC_PLL1CFGR2, 0, 7, 0, NULL)),
+ _S_PLL1_P_DIV(RCC_PLL1CFGR2, 0, 7, 0, NULL)),
COMPOSITE(PLL2_P, "pll2_p", PARENT("pll2"), 0,
- _GATE(RCC_PLL2CR, 4, 0),
+ _S_GATE(RCC_PLL2CR, 4, 0),
_NO_MUX,
- _DIV(RCC_PLL2CFGR2, 0, 7, 0, NULL)),
+ _S_DIV(RCC_PLL2CFGR2, 0, 7, 0, NULL)),
COMPOSITE(PLL2_Q, "pll2_q", PARENT("pll2"), 0,
- _GATE(RCC_PLL2CR, 5, 0),
+ _S_GATE(RCC_PLL2CR, 5, 0),
_NO_MUX,
- _DIV(RCC_PLL2CFGR2, 8, 7, 0, NULL)),
+ _S_DIV(RCC_PLL2CFGR2, 8, 7, 0, NULL)),
COMPOSITE(PLL2_R, "pll2_r", PARENT("pll2"), CLK_IS_CRITICAL,
- _GATE(RCC_PLL2CR, 6, 0),
+ _S_GATE(RCC_PLL2CR, 6, 0),
_NO_MUX,
- _DIV(RCC_PLL2CFGR2, 16, 7, 0, NULL)),
+ _S_DIV(RCC_PLL2CFGR2, 16, 7, 0, NULL)),
COMPOSITE(PLL3_P, "pll3_p", PARENT("pll3"), 0,
_GATE(RCC_PLL3CR, 4, 0),
@@ -1739,20 +2379,21 @@ static const struct clock_config stm32mp1_clock_cfg[] = {
MUX(CK_PER, "ck_per", per_src, CLK_OPS_PARENT_ENABLE,
RCC_CPERCKSELR, 0, 2, 0),
- MUX(CK_MPU, "ck_mpu", cpu_src, CLK_OPS_PARENT_ENABLE |
- CLK_IS_CRITICAL, RCC_MPCKSELR, 0, 2, 0),
+ SMUX(CK_MPU, "ck_mpu", cpu_src, CLK_OPS_PARENT_ENABLE |
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ RCC_MPCKSELR, 0, 2, 0),
COMPOSITE(CK_AXI, "ck_axi", axi_src, CLK_IS_CRITICAL |
CLK_OPS_PARENT_ENABLE,
_NO_GATE,
- _MUX(RCC_ASSCKSELR, 0, 2, 0),
- _DIV(RCC_AXIDIVR, 0, 3, 0, axi_div_table)),
+ _S_MUX(RCC_ASSCKSELR, 0, 2, 0),
+ _S_DIV(RCC_AXIDIVR, 0, 3, 0, axi_div_table)),
COMPOSITE(CK_MCU, "ck_mcu", mcu_src, CLK_IS_CRITICAL |
CLK_OPS_PARENT_ENABLE,
_NO_GATE,
- _MUX(RCC_MSSCKSELR, 0, 2, 0),
- _DIV(RCC_MCUDIVR, 0, 4, 0, mcu_div_table)),
+ _S_MUX(RCC_MSSCKSELR, 0, 2, 0),
+ _S_DIV(RCC_MCUDIVR, 0, 4, 0, mcu_div_table)),
DIV_TABLE(NO_ID, "pclk1", "ck_mcu", CLK_IGNORE_UNUSED, RCC_APB1DIVR, 0,
3, CLK_DIVIDER_READ_ONLY, apb_div_table),
@@ -1897,6 +2538,7 @@ static const struct clock_config stm32mp1_clock_cfg[] = {
PCLK(CRC1, "crc1", "ck_axi", 0, G_CRC1),
PCLK(USBH, "usbh", "ck_axi", 0, G_USBH),
PCLK(ETHSTP, "ethstp", "ck_axi", 0, G_ETHSTP),
+ PCLK(DDRPERFM, "ddrperfm", "pclk4", 0, G_DDRPERFM),
/* Kernel clocks */
KCLK(SDMMC1_K, "sdmmc1_k", sdmmc12_src, 0, G_SDMMC1, M_SDMMC12),
@@ -1907,7 +2549,7 @@ static const struct clock_config stm32mp1_clock_cfg[] = {
KCLK(RNG1_K, "rng1_k", rng_src, 0, G_RNG1, M_RNG1),
KCLK(RNG2_K, "rng2_k", rng_src, 0, G_RNG2, M_RNG2),
KCLK(USBPHY_K, "usbphy_k", usbphy_src, 0, G_USBPHY, M_USBPHY),
- KCLK(STGEN_K, "stgen_k", stgen_src, CLK_IS_CRITICAL, G_STGEN, M_STGEN),
+ KCLK(STGEN_K, "stgen_k", stgen_src, CLK_IS_CRITICAL, G_STGEN, M_STGEN),
KCLK(SPDIF_K, "spdif_k", spdif_src, 0, G_SPDIF, M_SPDIF),
KCLK(SPI1_K, "spi1_k", spi123_src, 0, G_SPI1, M_SPI1),
KCLK(SPI2_K, "spi2_k", spi123_src, 0, G_SPI2, M_SPI23),
@@ -1957,16 +2599,15 @@ static const struct clock_config stm32mp1_clock_cfg[] = {
CLK_SET_RATE_NO_REPARENT,
_NO_GATE,
_MMUX(M_ETHCK),
- _DIV(RCC_ETHCKSELR, 4, 4, CLK_DIVIDER_ALLOW_ZERO, NULL)),
+ _DIV(RCC_ETHCKSELR, 4, 4, 0, NULL)),
/* RTC clock */
- DIV(NO_ID, "ck_hse_rtc", "ck_hse", 0, RCC_RTCDIVR, 0, 7,
- CLK_DIVIDER_ALLOW_ZERO),
+ SDIV(NO_ID, "ck_hse_rtc", "ck_hse", 0, RCC_RTCDIVR, 0, 6, 0),
COMPOSITE(RTC, "ck_rtc", rtc_src, CLK_OPS_PARENT_ENABLE |
CLK_SET_RATE_PARENT,
- _GATE(RCC_BDCR, 20, 0),
- _MUX(RCC_BDCR, 16, 2, 0),
+ _S_GATE(RCC_BDCR, 20, 0),
+ _S_MUX(RCC_BDCR, 16, 2, 0),
_NO_DIV),
/* MCO clocks */
@@ -2084,21 +2725,364 @@ static int stm32_rcc_init(struct device_node *np,
return of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
}
+static void __iomem *rcc_base;
+
+static int stm32_rcc_init_pwr(struct device_node *np);
+
static void stm32mp1_rcc_init(struct device_node *np)
{
- void __iomem *base;
-
- base = of_iomap(np, 0);
- if (!base) {
+ rcc_base = of_iomap(np, 0);
+ if (!rcc_base) {
pr_err("%s: unable to map resource", np->name);
of_node_put(np);
return;
}
- if (stm32_rcc_init(np, base, stm32mp1_match_data)) {
- iounmap(base);
+ if (stm32_rcc_init(np, rcc_base, stm32mp1_match_data)) {
+ iounmap(rcc_base);
of_node_put(np);
+ return;
}
+
+ stm32_rcc_init_pwr(np);
}
CLK_OF_DECLARE_DRIVER(stm32mp1_rcc, "st,stm32mp1-rcc", stm32mp1_rcc_init);
+
+/*
+ * RCC POWER
+ *
+ */
+
+struct reg {
+ u32 address;
+ u32 val;
+};
+
+/* This table lists the IPs for which CSLEEP is enabled */
+static const struct reg lp_table[] = {
+ { 0xB04, 0x00000000 }, /* APB1 */
+ { 0xB0C, 0x00000000 }, /* APB2 */
+ { 0xB14, 0x00000800 }, /* APB3 */
+ { 0x304, 0x00000000 }, /* APB4 */
+ { 0xB1C, 0x00000000 }, /* AHB2 */
+ { 0xB24, 0x00000000 }, /* AHB3 */
+ { 0xB2C, 0x00000000 }, /* AHB4 */
+ { 0x31C, 0x00000000 }, /* AHB6 */
+ { 0xB34, 0x00000000 }, /* AXIM */
+ { 0xB3C, 0x00000000 }, /* MLAHB */
+};
+
+struct sreg {
+ u32 address;
+ u32 secured;
+ u32 val;
+ u8 setclr;
+};
+
+#define SREG(_addr, _setclr, _sec) { \
+ .address = _addr,\
+ .setclr = _setclr,\
+ .secured = _sec,\
+ .val = 0,\
+}
+
+static struct sreg clock_gating[] = {
+ SREG(RCC_APB1ENSETR, 1, 0),
+ SREG(RCC_APB2ENSETR, 1, 0),
+ SREG(RCC_APB3ENSETR, 1, 0),
+ SREG(RCC_APB4ENSETR, 1, 0),
+ SREG(RCC_APB5ENSETR, 1, 1),
+ SREG(RCC_AHB5ENSETR, 1, 1),
+ SREG(RCC_AHB6ENSETR, 1, 0),
+ SREG(RCC_AHB2ENSETR, 1, 0),
+ SREG(RCC_AHB3ENSETR, 1, 0),
+ SREG(RCC_AHB4ENSETR, 1, 0),
+ SREG(RCC_MLAHBENSETR, 1, 0),
+ SREG(RCC_MCO1CFGR, 0, 0),
+ SREG(RCC_MCO2CFGR, 0, 0),
+ SREG(RCC_PLL4CFGR2, 0, 0),
+};
+
+struct smux {
+ u32 clk_id;
+ u32 mux_id;
+ struct clk_hw *hw;
+};
+
+#define KER_SRC(_clk_id, _mux_id)\
+{\
+ .clk_id = _clk_id,\
+ .mux_id = _mux_id,\
+}
+
+struct smux _mux_kernel[M_LAST] = {
+ KER_SRC(SDMMC1_K, M_SDMMC12),
+ KER_SRC(SDMMC3_K, M_SDMMC3),
+ KER_SRC(FMC_K, M_FMC),
+ KER_SRC(QSPI_K, M_QSPI),
+ KER_SRC(RNG1_K, M_RNG1),
+ KER_SRC(RNG2_K, M_RNG2),
+ KER_SRC(USBPHY_K, M_USBPHY),
+ KER_SRC(USBO_K, M_USBO),
+ KER_SRC(STGEN_K, M_STGEN),
+ KER_SRC(SPDIF_K, M_SPDIF),
+ KER_SRC(SPI1_K, M_SPI1),
+ KER_SRC(SPI2_K, M_SPI23),
+ KER_SRC(SPI4_K, M_SPI45),
+ KER_SRC(SPI6_K, M_SPI6),
+ KER_SRC(CEC_K, M_CEC),
+ KER_SRC(I2C1_K, M_I2C12),
+ KER_SRC(I2C3_K, M_I2C35),
+ KER_SRC(I2C4_K, M_I2C46),
+ KER_SRC(LPTIM1_K, M_LPTIM1),
+ KER_SRC(LPTIM2_K, M_LPTIM23),
+ KER_SRC(LPTIM4_K, M_LPTIM45),
+ KER_SRC(USART1_K, M_USART1),
+ KER_SRC(USART2_K, M_UART24),
+ KER_SRC(USART3_K, M_UART35),
+ KER_SRC(USART6_K, M_USART6),
+ KER_SRC(UART7_K, M_UART78),
+ KER_SRC(SAI1_K, M_SAI1),
+ KER_SRC(SAI2_K, M_SAI2),
+ KER_SRC(SAI3_K, M_SAI3),
+ KER_SRC(SAI4_K, M_SAI4),
+ KER_SRC(DSI_K, M_DSI),
+ KER_SRC(FDCAN_K, M_FDCAN),
+ KER_SRC(ADC12_K, M_ADC12),
+ KER_SRC(ETHCK_K, M_ETHCK),
+ KER_SRC(CK_PER, M_CKPER),
+};
+
+static struct sreg pll_clock[] = {
+ SREG(RCC_PLL3CR, 0, 0),
+ SREG(RCC_PLL4CR, 0, 0),
+};
+
+static struct sreg mcu_source[] = {
+ SREG(RCC_MCUDIVR, 0, 0),
+ SREG(RCC_MSSCKSELR, 0, 0),
+};
+
+#define RCC_IRQ_FLAGS_MASK 0x110F1F
+#define RCC_STOP_MASK (BIT(0) | BIT(1))
+#define RCC_RSTSR 0x408
+#define PWR_MPUCR 0x10
+#define PLL_EN (BIT(0))
+#define STOP_FLAG (BIT(5))
+#define SBF (BIT(11))
+#define SBF_MPU (BIT(12))
+
+static irqreturn_t stm32mp1_rcc_irq_handler(int irq, void *sdata)
+{
+ pr_info("RCC generic interrupt received\n");
+
+ /* clear interrupt flag */
+ SMC(STM32_SVC_RCC, STM32_WRITE, RCC_CIFR, RCC_IRQ_FLAGS_MASK);
+
+ return IRQ_HANDLED;
+}
+
+static void stm32mp1_backup_sreg(struct sreg *sreg, int size)
+{
+ int i;
+
+ for (i = 0; i < size; i++)
+ sreg[i].val = readl_relaxed(rcc_base + sreg[i].address);
+}
+
+static void stm32mp1_restore_sreg(struct sreg *sreg, int size)
+{
+ int i;
+ u32 val, address, reg;
+ int soc_secured;
+
+ soc_secured = _is_soc_secured(rcc_base);
+
+ for (i = 0; i < size; i++) {
+ val = sreg[i].val;
+ address = sreg[i].address;
+
+ reg = readl_relaxed(rcc_base + address);
+ if (reg == val)
+ continue;
+
+ if (soc_secured && sreg[i].secured) {
+ SMC(STM32_SVC_RCC, STM32_WRITE, address, val);
+ if (sreg[i].setclr)
+ SMC(STM32_SVC_RCC, STM32_WRITE,
+ address + RCC_CLR, ~val);
+ } else {
+ writel_relaxed(val, rcc_base + address);
+ if (sreg[i].setclr)
+ writel_relaxed(~val,
+ rcc_base + address + RCC_CLR);
+ }
+ }
+}
+
+static void stm32mp1_restore_pll(struct sreg *sreg, int size)
+{
+ int i;
+ u32 val;
+ void __iomem *address;
+
+ for (i = 0; i < size; i++) {
+ val = sreg[i].val;
+ address = sreg[i].address + rcc_base;
+
+ /* if pll was off turn it on before */
+ if ((readl_relaxed(address) & PLL_EN) == 0) {
+ writel_relaxed(PLL_EN, address);
+ while ((readl_relaxed(address) & PLL_RDY) == 0)
+ ;
+ }
+
+ /* 2sd step: restore odf */
+ writel_relaxed(val, address);
+ }
+}
+
+static void stm32mp1_backup_mux(struct device_node *np,
+ struct smux *smux, int size)
+{
+ int i;
+ struct of_phandle_args clkspec;
+
+ clkspec.np = np;
+ clkspec.args_count = 1;
+
+ for (i = 0; i < size; i++) {
+ clkspec.args[0] = smux[i].clk_id;
+ smux[i].hw = __clk_get_hw(of_clk_get_from_provider(&clkspec));
+ }
+}
+
+static void stm32mp1_restore_mux(struct smux *smux, int size)
+{
+ int i;
+ struct clk_hw *hw, *hwp1, *hwp2;
+ struct mux_cfg *mux;
+ u8 idx;
+
+ /* These MUX are glitch free.
+ * Then we have to restore mux thru clock framework
+ * to be sure that CLK_OPS_PARENT_ENABLE will be exploited
+ */
+ for (i = 0; i < M_LAST; i++) {
+ /* get parent strored in clock framework */
+ hw = smux[i].hw;
+ hwp1 = clk_hw_get_parent(hw);
+
+ /* Get parent corresponding to mux register */
+ mux = ker_mux_cfg[smux[i].mux_id].mux;
+ idx = readl_relaxed(rcc_base + mux->reg_off) >> mux->shift;
+ idx &= (BIT(mux->width) - 1);
+ hwp2 = clk_hw_get_parent_by_index(hw, idx);
+
+ /* check if parent from mux & clock framework are differents */
+ if (hwp1 != hwp2) {
+ /* update first clock framework with the true parent */
+ clk_set_parent(hw->clk, hwp2->clk);
+
+ /* Restore now new parent */
+ clk_set_parent(hw->clk, hwp1->clk);
+ }
+ }
+}
+
+#define RCC_BIT_HSI 0
+#define RCC_BIT_CSI 4
+#define RCC_BIT_HSE 8
+
+#define RCC_CK_OSC_MASK (BIT(RCC_BIT_HSE) | BIT(RCC_BIT_CSI) | BIT(RCC_BIT_HSI))
+
+#define RCC_CK_XXX_KER_MASK (RCC_CK_OSC_MASK << 1)
+
+static int stm32mp1_clk_suspend(void)
+{
+ u32 reg;
+
+ /* Save pll regs */
+ stm32mp1_backup_sreg(pll_clock, ARRAY_SIZE(pll_clock));
+
+ /* Save mcu source */
+ stm32mp1_backup_sreg(mcu_source, ARRAY_SIZE(mcu_source));
+
+ /* Save clock gating regs */
+ stm32mp1_backup_sreg(clock_gating, ARRAY_SIZE(clock_gating));
+
+ /* Enable ck_xxx_ker clocks if ck_xxx was on */
+ reg = readl_relaxed(rcc_base + RCC_OCENSETR) & RCC_CK_OSC_MASK;
+ writel_relaxed(reg << 1, rcc_base + RCC_OCENSETR);
+
+ SMC(STM32_SVC_RCC, STM32_WRITE, RCC_RSTSR, 0);
+
+ return 0;
+}
+
+static void stm32mp1_clk_resume(void)
+{
+
+ /* Restore pll */
+ stm32mp1_restore_pll(pll_clock, ARRAY_SIZE(pll_clock));
+
+ /* Restore mcu source */
+ stm32mp1_restore_sreg(mcu_source, ARRAY_SIZE(mcu_source));
+
+ stm32mp1_restore_sreg(clock_gating, ARRAY_SIZE(clock_gating));
+
+ stm32mp1_restore_mux(_mux_kernel, ARRAY_SIZE(_mux_kernel));
+
+ /* Disable ck_xxx_ker clocks */
+ stm32_clk_bit_secure(STM32_SET_BITS, RCC_CK_XXX_KER_MASK,
+ rcc_base + RCC_OCENSETR + RCC_CLR);
+}
+
+static struct syscore_ops stm32mp1_clk_ops = {
+ .suspend = stm32mp1_clk_suspend,
+ .resume = stm32mp1_clk_resume,
+};
+
+static struct irqaction rcc_irq = {
+ .name = "rcc irq",
+ .flags = IRQF_ONESHOT,
+ .handler = stm32mp1_rcc_irq_handler,
+};
+
+static int stm32_rcc_init_pwr(struct device_node *np)
+{
+ int irq;
+ int ret;
+ int i;
+
+ /* register generic irq */
+ irq = of_irq_get(np, 0);
+ if (irq <= 0) {
+ pr_err("%s: failed to get RCC generic IRQ\n", __func__);
+ return irq ? irq : -ENXIO;
+ }
+
+ ret = setup_irq(irq, &rcc_irq);
+ if (ret) {
+ pr_err("%s: failed to register generic IRQ\n", __func__);
+ return ret;
+ }
+
+
+ /* Configure LPEN static table */
+ for (i = 0; i < ARRAY_SIZE(lp_table); i++)
+ writel_relaxed(lp_table[i].val, rcc_base + lp_table[i].address);
+
+ /* cleanup RCC flags */
+ SMC(STM32_SVC_RCC, STM32_WRITE, RCC_CIFR, RCC_IRQ_FLAGS_MASK);
+
+ SMC(STM32_SVC_RCC, STM32_WRITE, RCC_SREQCLRR, RCC_STOP_MASK);
+
+ /* Prepare kernel clock source backup */
+ stm32mp1_backup_mux(np, _mux_kernel, ARRAY_SIZE(_mux_kernel));
+
+ register_syscore_ops(&stm32mp1_clk_ops);
+
+ return 0;
+}
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 5413ffa..4290d9e 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -2268,6 +2268,12 @@ static int clk_core_set_parent_nolock(struct clk_core *core,
return ret;
}
+int clk_hw_set_parent(struct clk_hw *hw, struct clk_hw *parent)
+{
+ return clk_core_set_parent_nolock(hw->core, parent->core);
+}
+EXPORT_SYMBOL_GPL(clk_hw_set_parent);
+
/**
* clk_set_parent - switch the parent of a mux clk
* @clk: the mux clk whose input we are switching
diff --git a/include/dt-bindings/clock/stm32mp1-clks.h b/include/dt-bindings/clock/stm32mp1-clks.h
index 90ec780..4cdaf13 100644
--- a/include/dt-bindings/clock/stm32mp1-clks.h
+++ b/include/dt-bindings/clock/stm32mp1-clks.h
@@ -248,7 +248,4 @@
#define STM32MP1_LAST_CLK 232
-#define LTDC_K LTDC_PX
-#define ETHMAC_K ETHCK_K
-
#endif /* _DT_BINDINGS_STM32MP1_CLKS_H_ */
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index d1b6d2c..ec4c906 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -778,6 +778,7 @@ unsigned int clk_hw_get_num_parents(const struct clk_hw *hw);
struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw);
struct clk_hw *clk_hw_get_parent_by_index(const struct clk_hw *hw,
unsigned int index);
+int clk_hw_set_parent(struct clk_hw *hw, struct clk_hw *new_parent);
unsigned int __clk_get_enable_count(struct clk *clk);
unsigned long clk_hw_get_rate(const struct clk_hw *hw);
unsigned long __clk_get_flags(struct clk *clk);
--
2.7.4