3319 lines
107 KiB
Diff
3319 lines
107 KiB
Diff
From 207470b1f44164f76939b25ebb0d50fb81375777 Mon Sep 17 00:00:00 2001
|
|
From: Lionel VITTE <lionel.vitte@st.com>
|
|
Date: Mon, 5 Oct 2020 13:19:48 +0200
|
|
Subject: [PATCH 14/22] ARM-stm32mp1-r2-rc8-MMC-NAND
|
|
|
|
---
|
|
.../devicetree/bindings/mmc/mmci.txt | 2 +
|
|
.../bindings/mtd/stm32-fmc2-nand.txt | 61 -
|
|
drivers/mmc/core/block.c | 11 +
|
|
drivers/mmc/core/core.c | 31 +-
|
|
drivers/mmc/host/mmci.c | 285 ++--
|
|
drivers/mmc/host/mmci.h | 17 +-
|
|
drivers/mmc/host/mmci_stm32_sdmmc.c | 256 +++-
|
|
drivers/mtd/nand/raw/Kconfig | 1 +
|
|
drivers/mtd/nand/raw/stm32_fmc2_nand.c | 1185 ++++++++---------
|
|
include/linux/mmc/core.h | 1 +
|
|
include/linux/mmc/host.h | 6 +
|
|
11 files changed, 1089 insertions(+), 767 deletions(-)
|
|
delete mode 100644 Documentation/devicetree/bindings/mtd/stm32-fmc2-nand.txt
|
|
|
|
diff --git a/Documentation/devicetree/bindings/mmc/mmci.txt b/Documentation/devicetree/bindings/mmc/mmci.txt
|
|
index 6d3c626e017d2..4ec921e4bf344 100644
|
|
--- a/Documentation/devicetree/bindings/mmc/mmci.txt
|
|
+++ b/Documentation/devicetree/bindings/mmc/mmci.txt
|
|
@@ -28,6 +28,8 @@ specific for ux500 variant:
|
|
- st,sig-pin-fbclk : feedback clock signal pin used.
|
|
|
|
specific for sdmmc variant:
|
|
+- reg : a second base register may be defined if a delay
|
|
+ block is present and used for tuning.
|
|
- st,sig-dir : signal direction polarity used for cmd, dat0 dat123.
|
|
- st,neg-edge : data & command phase relation, generated on
|
|
sd clock falling edge.
|
|
diff --git a/Documentation/devicetree/bindings/mtd/stm32-fmc2-nand.txt b/Documentation/devicetree/bindings/mtd/stm32-fmc2-nand.txt
|
|
deleted file mode 100644
|
|
index e55895e8dae44..0000000000000
|
|
--- a/Documentation/devicetree/bindings/mtd/stm32-fmc2-nand.txt
|
|
+++ /dev/null
|
|
@@ -1,61 +0,0 @@
|
|
-STMicroelectronics Flexible Memory Controller 2 (FMC2)
|
|
-NAND Interface
|
|
-
|
|
-Required properties:
|
|
-- compatible: Should be one of:
|
|
- * st,stm32mp15-fmc2
|
|
-- reg: NAND flash controller memory areas.
|
|
- First region contains the register location.
|
|
- Regions 2 to 4 respectively contain the data, command,
|
|
- and address space for CS0.
|
|
- Regions 5 to 7 contain the same areas for CS1.
|
|
-- interrupts: The interrupt number
|
|
-- pinctrl-0: Standard Pinctrl phandle (see: pinctrl/pinctrl-bindings.txt)
|
|
-- clocks: The clock needed by the NAND flash controller
|
|
-
|
|
-Optional properties:
|
|
-- resets: Reference to a reset controller asserting the FMC controller
|
|
-- dmas: DMA specifiers (see: dma/stm32-mdma.txt)
|
|
-- dma-names: Must be "tx", "rx" and "ecc"
|
|
-
|
|
-* NAND device bindings:
|
|
-
|
|
-Required properties:
|
|
-- reg: describes the CS lines assigned to the NAND device.
|
|
-
|
|
-Optional properties:
|
|
-- nand-on-flash-bbt: see nand-controller.yaml
|
|
-- nand-ecc-strength: see nand-controller.yaml
|
|
-- nand-ecc-step-size: see nand-controller.yaml
|
|
-
|
|
-The following ECC strength and step size are currently supported:
|
|
- - nand-ecc-strength = <1>, nand-ecc-step-size = <512> (Hamming)
|
|
- - nand-ecc-strength = <4>, nand-ecc-step-size = <512> (BCH4)
|
|
- - nand-ecc-strength = <8>, nand-ecc-step-size = <512> (BCH8) (default)
|
|
-
|
|
-Example:
|
|
-
|
|
- fmc: nand-controller@58002000 {
|
|
- compatible = "st,stm32mp15-fmc2";
|
|
- reg = <0x58002000 0x1000>,
|
|
- <0x80000000 0x1000>,
|
|
- <0x88010000 0x1000>,
|
|
- <0x88020000 0x1000>,
|
|
- <0x81000000 0x1000>,
|
|
- <0x89010000 0x1000>,
|
|
- <0x89020000 0x1000>;
|
|
- interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
|
|
- clocks = <&rcc FMC_K>;
|
|
- resets = <&rcc FMC_R>;
|
|
- pinctrl-names = "default";
|
|
- pinctrl-0 = <&fmc_pins_a>;
|
|
- #address-cells = <1>;
|
|
- #size-cells = <0>;
|
|
-
|
|
- nand@0 {
|
|
- reg = <0>;
|
|
- nand-on-flash-bbt;
|
|
- #address-cells = <1>;
|
|
- #size-cells = <1>;
|
|
- };
|
|
- };
|
|
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
|
|
index 7f480c6b19810..4ac7a07d87cc6 100644
|
|
--- a/drivers/mmc/core/block.c
|
|
+++ b/drivers/mmc/core/block.c
|
|
@@ -1763,6 +1763,17 @@ static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req)
|
|
u32 blocks;
|
|
int err;
|
|
|
|
+ /*
|
|
+ * the host is in a bad state, and can't sent a new command
|
|
+ * without be unstuck
|
|
+ */
|
|
+ if (brq->sbc.error == -EDEADLK || brq->cmd.error == -EDEADLK ||
|
|
+ brq->stop.error == -EDEADLK || brq->data.error == -EDEADLK) {
|
|
+ pr_err("%s: host is in bad state, must be unstuck\n",
|
|
+ req->rq_disk->disk_name);
|
|
+ mmc_hw_unstuck(card->host);
|
|
+ }
|
|
+
|
|
/*
|
|
* Some errors the host driver might not have seen. Set the number of
|
|
* bytes transferred to zero in that case.
|
|
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
|
|
index 26644b7ec13e3..6d00eed17738e 100644
|
|
--- a/drivers/mmc/core/core.c
|
|
+++ b/drivers/mmc/core/core.c
|
|
@@ -397,6 +397,7 @@ static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
|
|
void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq)
|
|
{
|
|
struct mmc_command *cmd;
|
|
+ int sbc_err, stop_err, data_err;
|
|
|
|
while (1) {
|
|
wait_for_completion(&mrq->completion);
|
|
@@ -420,8 +421,20 @@ void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq)
|
|
mmc_hostname(host), __func__);
|
|
}
|
|
}
|
|
- if (!cmd->error || !cmd->retries ||
|
|
- mmc_card_removed(host->card))
|
|
+
|
|
+ sbc_err = mrq->sbc ? mrq->sbc->error : 0;
|
|
+ stop_err = mrq->stop ? mrq->stop->error : 0;
|
|
+ data_err = mrq->data ? mrq->data->error : 0;
|
|
+
|
|
+ if (cmd->error == -EDEADLK || sbc_err == -EDEADLK ||
|
|
+ stop_err == -EDEADLK || data_err == -EDEADLK) {
|
|
+ pr_debug("%s: host is in bad state, must be unstuck\n",
|
|
+ mmc_hostname(host));
|
|
+ mmc_hw_unstuck(host);
|
|
+ }
|
|
+
|
|
+ if ((!cmd->error && !sbc_err && !stop_err && !data_err) ||
|
|
+ !cmd->retries || mmc_card_removed(host->card))
|
|
break;
|
|
|
|
mmc_retune_recheck(host);
|
|
@@ -430,6 +443,12 @@ void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq)
|
|
mmc_hostname(host), cmd->opcode, cmd->error);
|
|
cmd->retries--;
|
|
cmd->error = 0;
|
|
+ if (mrq->sbc)
|
|
+ mrq->sbc->error = 0;
|
|
+ if (mrq->stop)
|
|
+ mrq->stop->error = 0;
|
|
+ if (mrq->data)
|
|
+ mrq->data->error = 0;
|
|
__mmc_start_request(host, mrq);
|
|
}
|
|
|
|
@@ -2163,6 +2182,14 @@ int mmc_sw_reset(struct mmc_host *host)
|
|
}
|
|
EXPORT_SYMBOL(mmc_sw_reset);
|
|
|
|
+void mmc_hw_unstuck(struct mmc_host *host)
|
|
+{
|
|
+ if (!host->ops->hw_unstuck)
|
|
+ return;
|
|
+ host->ops->hw_unstuck(host);
|
|
+}
|
|
+EXPORT_SYMBOL(mmc_hw_unstuck);
|
|
+
|
|
static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
|
|
{
|
|
host->f_init = freq;
|
|
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
|
|
index 7e4bc9124efde..f2e74aa3d7274 100644
|
|
--- a/drivers/mmc/host/mmci.c
|
|
+++ b/drivers/mmc/host/mmci.c
|
|
@@ -22,6 +22,7 @@
|
|
#include <linux/mmc/pm.h>
|
|
#include <linux/mmc/host.h>
|
|
#include <linux/mmc/card.h>
|
|
+#include <linux/mmc/sd.h>
|
|
#include <linux/mmc/slot-gpio.h>
|
|
#include <linux/amba/bus.h>
|
|
#include <linux/clk.h>
|
|
@@ -44,6 +45,7 @@
|
|
#define DRIVER_NAME "mmci-pl18x"
|
|
|
|
static void mmci_variant_init(struct mmci_host *host);
|
|
+static void ux500_variant_init(struct mmci_host *host);
|
|
static void ux500v2_variant_init(struct mmci_host *host);
|
|
|
|
static unsigned int fmax = 515633;
|
|
@@ -177,7 +179,6 @@ static struct variant_data variant_ux500 = {
|
|
.f_max = 100000000,
|
|
.signal_direction = true,
|
|
.pwrreg_clkgate = true,
|
|
- .busy_detect = true,
|
|
.busy_dpsm_flag = MCI_DPSM_ST_BUSYMODE,
|
|
.busy_detect_flag = MCI_ST_CARDBUSY,
|
|
.busy_detect_mask = MCI_ST_BUSYENDMASK,
|
|
@@ -186,7 +187,7 @@ static struct variant_data variant_ux500 = {
|
|
.irq_pio_mask = MCI_IRQ_PIO_MASK,
|
|
.start_err = MCI_STARTBITERR,
|
|
.opendrain = MCI_OD,
|
|
- .init = mmci_variant_init,
|
|
+ .init = ux500_variant_init,
|
|
};
|
|
|
|
static struct variant_data variant_ux500v2 = {
|
|
@@ -212,7 +213,6 @@ static struct variant_data variant_ux500v2 = {
|
|
.f_max = 100000000,
|
|
.signal_direction = true,
|
|
.pwrreg_clkgate = true,
|
|
- .busy_detect = true,
|
|
.busy_dpsm_flag = MCI_DPSM_ST_BUSYMODE,
|
|
.busy_detect_flag = MCI_ST_CARDBUSY,
|
|
.busy_detect_mask = MCI_ST_BUSYENDMASK,
|
|
@@ -264,8 +264,38 @@ static struct variant_data variant_stm32_sdmmc = {
|
|
.datacnt_useless = true,
|
|
.datalength_bits = 25,
|
|
.datactrl_blocksz = 14,
|
|
- .datactrl_any_blocksz = true,
|
|
+ .datactrl_mask_sdio = MCI_DPSM_STM32_SDIOEN,
|
|
+ .pwrreg_nopower = true,
|
|
.stm32_idmabsize_mask = GENMASK(12, 5),
|
|
+ .busy_timeout = true,
|
|
+ .busy_detect_flag = MCI_STM32_BUSYD0,
|
|
+ .busy_detect_mask = MCI_STM32_BUSYD0ENDMASK,
|
|
+ .init = sdmmc_variant_init,
|
|
+};
|
|
+
|
|
+static struct variant_data variant_stm32_sdmmcv2 = {
|
|
+ .fifosize = 16 * 4,
|
|
+ .fifohalfsize = 8 * 4,
|
|
+ .f_max = 208000000,
|
|
+ .stm32_clkdiv = true,
|
|
+ .cmdreg_cpsm_enable = MCI_CPSM_STM32_ENABLE,
|
|
+ .cmdreg_lrsp_crc = MCI_CPSM_STM32_LRSP_CRC,
|
|
+ .cmdreg_srsp_crc = MCI_CPSM_STM32_SRSP_CRC,
|
|
+ .cmdreg_srsp = MCI_CPSM_STM32_SRSP,
|
|
+ .cmdreg_stop = MCI_CPSM_STM32_CMDSTOP,
|
|
+ .data_cmd_enable = MCI_CPSM_STM32_CMDTRANS,
|
|
+ .irq_pio_mask = MCI_IRQ_PIO_STM32_MASK,
|
|
+ .datactrl_first = true,
|
|
+ .datacnt_useless = true,
|
|
+ .datalength_bits = 25,
|
|
+ .datactrl_blocksz = 14,
|
|
+ .datactrl_mask_sdio = MCI_DPSM_STM32_SDIOEN,
|
|
+ .pwrreg_nopower = true,
|
|
+ .stm32_idmabsize_mask = GENMASK(16, 5),
|
|
+ .dma_lli = true,
|
|
+ .busy_timeout = true,
|
|
+ .busy_detect_flag = MCI_STM32_BUSYD0,
|
|
+ .busy_detect_mask = MCI_STM32_BUSYD0ENDMASK,
|
|
.init = sdmmc_variant_init,
|
|
};
|
|
|
|
@@ -363,6 +393,24 @@ static void mmci_write_datactrlreg(struct mmci_host *host, u32 datactrl)
|
|
}
|
|
}
|
|
|
|
+static void mmci_restore(struct mmci_host *host)
|
|
+{
|
|
+ unsigned long flags;
|
|
+
|
|
+ spin_lock_irqsave(&host->lock, flags);
|
|
+
|
|
+ if (host->variant->pwrreg_nopower) {
|
|
+ writel(host->clk_reg, host->base + MMCICLOCK);
|
|
+ writel(host->datactrl_reg, host->base + MMCIDATACTRL);
|
|
+ writel(host->pwr_reg, host->base + MMCIPOWER);
|
|
+ }
|
|
+ writel(MCI_IRQENABLE | host->variant->start_err,
|
|
+ host->base + MMCIMASK0);
|
|
+ mmci_reg_delay(host);
|
|
+
|
|
+ spin_unlock_irqrestore(&host->lock, flags);
|
|
+}
|
|
+
|
|
/*
|
|
* This must be called with host->lock held
|
|
*/
|
|
@@ -453,11 +501,11 @@ void mmci_dma_setup(struct mmci_host *host)
|
|
static int mmci_validate_data(struct mmci_host *host,
|
|
struct mmc_data *data)
|
|
{
|
|
- struct variant_data *variant = host->variant;
|
|
-
|
|
if (!data)
|
|
return 0;
|
|
- if (!is_power_of_2(data->blksz) && !variant->datactrl_any_blocksz) {
|
|
+
|
|
+ if ((host->mmc->card && !mmc_card_sdio(host->mmc->card)) &&
|
|
+ !is_power_of_2(data->blksz)) {
|
|
dev_err(mmc_dev(host->mmc),
|
|
"unsupported block size (%d bytes)\n", data->blksz);
|
|
return -EINVAL;
|
|
@@ -619,6 +667,67 @@ static u32 ux500v2_get_dctrl_cfg(struct mmci_host *host)
|
|
return MCI_DPSM_ENABLE | (host->data->blksz << 16);
|
|
}
|
|
|
|
+static bool ux500_busy_complete(struct mmci_host *host, u32 status, u32 err_msk)
|
|
+{
|
|
+ void __iomem *base = host->base;
|
|
+
|
|
+ /*
|
|
+ * Before unmasking for the busy end IRQ, confirm that the
|
|
+ * command was sent successfully. To keep track of having a
|
|
+ * command in-progress, waiting for busy signaling to end,
|
|
+ * store the status in host->busy_status.
|
|
+ *
|
|
+ * Note that, the card may need a couple of clock cycles before
|
|
+ * it starts signaling busy on DAT0, hence re-read the
|
|
+ * MMCISTATUS register here, to allow the busy bit to be set.
|
|
+ * Potentially we may even need to poll the register for a
|
|
+ * while, to allow it to be set, but tests indicates that it
|
|
+ * isn't needed.
|
|
+ */
|
|
+ if (!host->busy_status && !(status & err_msk) &&
|
|
+ (readl(base + MMCISTATUS) & host->variant->busy_detect_flag)) {
|
|
+ writel(readl(base + MMCIMASK0) |
|
|
+ host->variant->busy_detect_mask,
|
|
+ base + MMCIMASK0);
|
|
+
|
|
+ host->busy_status = status & (MCI_CMDSENT | MCI_CMDRESPEND);
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * If there is a command in-progress that has been successfully
|
|
+ * sent, then bail out if busy status is set and wait for the
|
|
+ * busy end IRQ.
|
|
+ *
|
|
+ * Note that, the HW triggers an IRQ on both edges while
|
|
+ * monitoring DAT0 for busy completion, but there is only one
|
|
+ * status bit in MMCISTATUS for the busy state. Therefore
|
|
+ * both the start and the end interrupts needs to be cleared,
|
|
+ * one after the other. So, clear the busy start IRQ here.
|
|
+ */
|
|
+ if (host->busy_status &&
|
|
+ (status & host->variant->busy_detect_flag)) {
|
|
+ writel(host->variant->busy_detect_mask, base + MMCICLEAR);
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * If there is a command in-progress that has been successfully
|
|
+ * sent and the busy bit isn't set, it means we have received
|
|
+ * the busy end IRQ. Clear and mask the IRQ, then continue to
|
|
+ * process the command.
|
|
+ */
|
|
+ if (host->busy_status) {
|
|
+ writel(host->variant->busy_detect_mask, base + MMCICLEAR);
|
|
+
|
|
+ writel(readl(base + MMCIMASK0) &
|
|
+ ~host->variant->busy_detect_mask, base + MMCIMASK0);
|
|
+ host->busy_status = 0;
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
/*
|
|
* All the DMA operation mode stuff goes inside this ifdef.
|
|
* This assumes that you have a generic DMA device interface,
|
|
@@ -979,9 +1088,16 @@ void mmci_variant_init(struct mmci_host *host)
|
|
host->ops = &mmci_variant_ops;
|
|
}
|
|
|
|
+void ux500_variant_init(struct mmci_host *host)
|
|
+{
|
|
+ host->ops = &mmci_variant_ops;
|
|
+ host->ops->busy_complete = ux500_busy_complete;
|
|
+}
|
|
+
|
|
void ux500v2_variant_init(struct mmci_host *host)
|
|
{
|
|
host->ops = &mmci_variant_ops;
|
|
+ host->ops->busy_complete = ux500_busy_complete;
|
|
host->ops->get_datactrl_cfg = ux500v2_get_dctrl_cfg;
|
|
}
|
|
|
|
@@ -1101,6 +1217,7 @@ static void
|
|
mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
|
|
{
|
|
void __iomem *base = host->base;
|
|
+ unsigned long long clks;
|
|
|
|
dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n",
|
|
cmd->opcode, cmd->arg, cmd->flags);
|
|
@@ -1123,6 +1240,19 @@ mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
|
|
else
|
|
c |= host->variant->cmdreg_srsp;
|
|
}
|
|
+
|
|
+ if (host->variant->busy_timeout && cmd->flags & MMC_RSP_BUSY) {
|
|
+ if (!cmd->busy_timeout)
|
|
+ cmd->busy_timeout = 1000;
|
|
+
|
|
+ clks = (unsigned long long)cmd->busy_timeout * host->cclk;
|
|
+ do_div(clks, MSEC_PER_SEC);
|
|
+ writel_relaxed(clks, host->base + MMCIDATATIMER);
|
|
+ }
|
|
+
|
|
+ if (host->ops->prep_volt_switch && cmd->opcode == SD_SWITCH_VOLTAGE)
|
|
+ host->ops->prep_volt_switch(host);
|
|
+
|
|
if (/*interrupt*/0)
|
|
c |= MCI_CPSM_INTERRUPT;
|
|
|
|
@@ -1227,6 +1357,7 @@ static void
|
|
mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
|
|
unsigned int status)
|
|
{
|
|
+ u32 err_msk = MCI_CMDCRCFAIL | MCI_CMDTIMEOUT;
|
|
void __iomem *base = host->base;
|
|
bool sbc, busy_resp;
|
|
|
|
@@ -1241,74 +1372,17 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
|
|
* handling. Note that we tag on any latent IRQs postponed
|
|
* due to waiting for busy status.
|
|
*/
|
|
- if (!((status|host->busy_status) &
|
|
- (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND)))
|
|
+ if (host->variant->busy_timeout && busy_resp)
|
|
+ err_msk |= MCI_DATATIMEOUT;
|
|
+
|
|
+ if (!((status | host->busy_status) &
|
|
+ (err_msk | MCI_CMDSENT | MCI_CMDRESPEND)))
|
|
return;
|
|
|
|
/* Handle busy detection on DAT0 if the variant supports it. */
|
|
- if (busy_resp && host->variant->busy_detect) {
|
|
-
|
|
- /*
|
|
- * Before unmasking for the busy end IRQ, confirm that the
|
|
- * command was sent successfully. To keep track of having a
|
|
- * command in-progress, waiting for busy signaling to end,
|
|
- * store the status in host->busy_status.
|
|
- *
|
|
- * Note that, the card may need a couple of clock cycles before
|
|
- * it starts signaling busy on DAT0, hence re-read the
|
|
- * MMCISTATUS register here, to allow the busy bit to be set.
|
|
- * Potentially we may even need to poll the register for a
|
|
- * while, to allow it to be set, but tests indicates that it
|
|
- * isn't needed.
|
|
- */
|
|
- if (!host->busy_status &&
|
|
- !(status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT)) &&
|
|
- (readl(base + MMCISTATUS) & host->variant->busy_detect_flag)) {
|
|
-
|
|
- writel(readl(base + MMCIMASK0) |
|
|
- host->variant->busy_detect_mask,
|
|
- base + MMCIMASK0);
|
|
-
|
|
- host->busy_status =
|
|
- status & (MCI_CMDSENT|MCI_CMDRESPEND);
|
|
- return;
|
|
- }
|
|
-
|
|
- /*
|
|
- * If there is a command in-progress that has been successfully
|
|
- * sent, then bail out if busy status is set and wait for the
|
|
- * busy end IRQ.
|
|
- *
|
|
- * Note that, the HW triggers an IRQ on both edges while
|
|
- * monitoring DAT0 for busy completion, but there is only one
|
|
- * status bit in MMCISTATUS for the busy state. Therefore
|
|
- * both the start and the end interrupts needs to be cleared,
|
|
- * one after the other. So, clear the busy start IRQ here.
|
|
- */
|
|
- if (host->busy_status &&
|
|
- (status & host->variant->busy_detect_flag)) {
|
|
- writel(host->variant->busy_detect_mask,
|
|
- host->base + MMCICLEAR);
|
|
+ if (busy_resp && host->ops->busy_complete)
|
|
+ if (!host->ops->busy_complete(host, status, err_msk))
|
|
return;
|
|
- }
|
|
-
|
|
- /*
|
|
- * If there is a command in-progress that has been successfully
|
|
- * sent and the busy bit isn't set, it means we have received
|
|
- * the busy end IRQ. Clear and mask the IRQ, then continue to
|
|
- * process the command.
|
|
- */
|
|
- if (host->busy_status) {
|
|
-
|
|
- writel(host->variant->busy_detect_mask,
|
|
- host->base + MMCICLEAR);
|
|
-
|
|
- writel(readl(base + MMCIMASK0) &
|
|
- ~host->variant->busy_detect_mask,
|
|
- base + MMCIMASK0);
|
|
- host->busy_status = 0;
|
|
- }
|
|
- }
|
|
|
|
host->cmd = NULL;
|
|
|
|
@@ -1316,6 +1390,9 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
|
|
cmd->error = -ETIMEDOUT;
|
|
} else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
|
|
cmd->error = -EILSEQ;
|
|
+ } else if (host->variant->busy_timeout && busy_resp &&
|
|
+ status & MCI_DATATIMEOUT) {
|
|
+ cmd->error = -EDEADLK;
|
|
} else {
|
|
cmd->resp[0] = readl(base + MMCIRESPONSE0);
|
|
cmd->resp[1] = readl(base + MMCIRESPONSE1);
|
|
@@ -1327,7 +1404,6 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
|
|
if (host->data) {
|
|
/* Terminate the DMA transfer */
|
|
mmci_dma_error(host);
|
|
-
|
|
mmci_stop_data(host);
|
|
if (host->variant->cmdreg_stop && cmd->error) {
|
|
mmci_stop_command(host);
|
|
@@ -1546,7 +1622,7 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
|
|
* clear the corresponding IRQ.
|
|
*/
|
|
status &= readl(host->base + MMCIMASK0);
|
|
- if (host->variant->busy_detect)
|
|
+ if (host->ops->busy_complete)
|
|
writel(status & ~host->variant->busy_detect_mask,
|
|
host->base + MMCICLEAR);
|
|
else
|
|
@@ -1609,6 +1685,20 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
|
|
spin_unlock_irqrestore(&host->lock, flags);
|
|
}
|
|
|
|
+static void mmci_set_max_busy_timeout(struct mmc_host *mmc)
|
|
+{
|
|
+ struct mmci_host *host = mmc_priv(mmc);
|
|
+ u32 max_busy_timeout = 0;
|
|
+
|
|
+ if (!host->ops->busy_complete)
|
|
+ return;
|
|
+
|
|
+ if (host->variant->busy_timeout && mmc->actual_clock)
|
|
+ max_busy_timeout = ~0UL / (mmc->actual_clock / MSEC_PER_SEC);
|
|
+
|
|
+ mmc->max_busy_timeout = max_busy_timeout;
|
|
+}
|
|
+
|
|
static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
|
|
{
|
|
struct mmci_host *host = mmc_priv(mmc);
|
|
@@ -1713,6 +1803,8 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
|
|
else
|
|
mmci_set_clkreg(host, ios->clock);
|
|
|
|
+ mmci_set_max_busy_timeout(mmc);
|
|
+
|
|
if (host->ops && host->ops->set_pwrreg)
|
|
host->ops->set_pwrreg(host, pwr);
|
|
else
|
|
@@ -1740,6 +1832,7 @@ static int mmci_get_cd(struct mmc_host *mmc)
|
|
|
|
static int mmci_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
|
|
{
|
|
+ struct mmci_host *host = mmc_priv(mmc);
|
|
int ret = 0;
|
|
|
|
if (!IS_ERR(mmc->supply.vqmmc)) {
|
|
@@ -1759,6 +1852,9 @@ static int mmci_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
|
|
break;
|
|
}
|
|
|
|
+ if (!ret && host->ops && host->ops->volt_switch)
|
|
+ ret = host->ops->volt_switch(host, ios);
|
|
+
|
|
if (ret)
|
|
dev_warn(mmc_dev(mmc), "Voltage switch failed\n");
|
|
}
|
|
@@ -1766,6 +1862,19 @@ static int mmci_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
|
|
return ret;
|
|
}
|
|
|
|
+static void mmci_hw_unstuck(struct mmc_host *mmc)
|
|
+{
|
|
+ struct mmci_host *host = mmc_priv(mmc);
|
|
+
|
|
+ if (host->rst) {
|
|
+ reset_control_assert(host->rst);
|
|
+ udelay(2);
|
|
+ reset_control_deassert(host->rst);
|
|
+ }
|
|
+
|
|
+ mmci_restore(host);
|
|
+}
|
|
+
|
|
static struct mmc_host_ops mmci_ops = {
|
|
.request = mmci_request,
|
|
.pre_req = mmci_pre_request,
|
|
@@ -1774,6 +1883,7 @@ static struct mmc_host_ops mmci_ops = {
|
|
.get_ro = mmc_gpio_get_ro,
|
|
.get_cd = mmci_get_cd,
|
|
.start_signal_voltage_switch = mmci_sig_volt_switch,
|
|
+ .hw_unstuck = mmci_hw_unstuck,
|
|
};
|
|
|
|
static int mmci_of_parse(struct device_node *np, struct mmc_host *mmc)
|
|
@@ -1843,6 +1953,7 @@ static int mmci_probe(struct amba_device *dev,
|
|
|
|
host = mmc_priv(mmc);
|
|
host->mmc = mmc;
|
|
+ host->mmc_ops = &mmci_ops;
|
|
|
|
/*
|
|
* Some variant (STM32) doesn't have opendrain bit, nevertheless
|
|
@@ -1967,13 +2078,15 @@ static int mmci_probe(struct amba_device *dev,
|
|
else if (plat->ocr_mask)
|
|
dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
|
|
|
|
+ host->pwr_reg = readl_relaxed(host->base + MMCIPOWER);
|
|
+
|
|
/* We support these capabilities. */
|
|
mmc->caps |= MMC_CAP_CMD23;
|
|
|
|
/*
|
|
* Enable busy detection.
|
|
*/
|
|
- if (variant->busy_detect) {
|
|
+ if (host->ops->busy_complete) {
|
|
mmci_ops.card_busy = mmci_card_busy;
|
|
/*
|
|
* Not all variants have a flag to enable busy detection
|
|
@@ -1983,7 +2096,6 @@ static int mmci_probe(struct amba_device *dev,
|
|
mmci_write_datactrlreg(host,
|
|
host->variant->busy_dpsm_flag);
|
|
mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
|
|
- mmc->max_busy_timeout = 0;
|
|
}
|
|
|
|
/* Prepare a CMD12 - needed to clear the DPSM on some variants. */
|
|
@@ -2141,24 +2253,6 @@ static void mmci_save(struct mmci_host *host)
|
|
spin_unlock_irqrestore(&host->lock, flags);
|
|
}
|
|
|
|
-static void mmci_restore(struct mmci_host *host)
|
|
-{
|
|
- unsigned long flags;
|
|
-
|
|
- spin_lock_irqsave(&host->lock, flags);
|
|
-
|
|
- if (host->variant->pwrreg_nopower) {
|
|
- writel(host->clk_reg, host->base + MMCICLOCK);
|
|
- writel(host->datactrl_reg, host->base + MMCIDATACTRL);
|
|
- writel(host->pwr_reg, host->base + MMCIPOWER);
|
|
- }
|
|
- writel(MCI_IRQENABLE | host->variant->start_err,
|
|
- host->base + MMCIMASK0);
|
|
- mmci_reg_delay(host);
|
|
-
|
|
- spin_unlock_irqrestore(&host->lock, flags);
|
|
-}
|
|
-
|
|
static int mmci_runtime_suspend(struct device *dev)
|
|
{
|
|
struct amba_device *adev = to_amba_device(dev);
|
|
@@ -2253,6 +2347,11 @@ static const struct amba_id mmci_ids[] = {
|
|
.mask = 0xf0ffffff,
|
|
.data = &variant_stm32_sdmmc,
|
|
},
|
|
+ {
|
|
+ .id = 0x00253180,
|
|
+ .mask = 0xf0ffffff,
|
|
+ .data = &variant_stm32_sdmmcv2,
|
|
+ },
|
|
/* Qualcomm variants */
|
|
{
|
|
.id = 0x00051180,
|
|
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
|
|
index 89ab73343cf30..a94f384bb1ff5 100644
|
|
--- a/drivers/mmc/host/mmci.h
|
|
+++ b/drivers/mmc/host/mmci.h
|
|
@@ -133,6 +133,8 @@
|
|
#define MCI_DPSM_STM32_MODE_SDIO (1 << 2)
|
|
#define MCI_DPSM_STM32_MODE_STREAM (2 << 2)
|
|
#define MCI_DPSM_STM32_MODE_BLOCK_STOP (3 << 2)
|
|
+#define MCI_DPSM_STM32_SDIOEN BIT(11)
|
|
+
|
|
|
|
#define MMCIDATACNT 0x030
|
|
#define MMCISTATUS 0x034
|
|
@@ -164,6 +166,8 @@
|
|
#define MCI_ST_CARDBUSY (1 << 24)
|
|
/* Extended status bits for the STM32 variants */
|
|
#define MCI_STM32_BUSYD0 BIT(20)
|
|
+#define MCI_STM32_BUSYD0END BIT(21)
|
|
+#define MCI_STM32_VSWEND BIT(25)
|
|
|
|
#define MMCICLEAR 0x038
|
|
#define MCI_CMDCRCFAILCLR (1 << 0)
|
|
@@ -181,6 +185,9 @@
|
|
#define MCI_ST_SDIOITC (1 << 22)
|
|
#define MCI_ST_CEATAENDC (1 << 23)
|
|
#define MCI_ST_BUSYENDC (1 << 24)
|
|
+/* Extended clear bits for the STM32 variants */
|
|
+#define MCI_STM32_VSWENDC (1 << 25)
|
|
+#define MCI_STM32_CKSTOPC (1 << 26)
|
|
|
|
#define MMCIMASK0 0x03c
|
|
#define MCI_CMDCRCFAILMASK (1 << 0)
|
|
@@ -290,7 +297,8 @@ struct mmci_host;
|
|
* @f_max: maximum clk frequency supported by the controller.
|
|
* @signal_direction: input/out direction of bus signals can be indicated
|
|
* @pwrreg_clkgate: MMCIPOWER register must be used to gate the clock
|
|
- * @busy_detect: true if the variant supports busy detection on DAT0.
|
|
+ * @busy_timeout: true if the variant starts data timer when the DPSM
|
|
+ * enter in Wait_R or Busy state.
|
|
* @busy_dpsm_flag: bitmask enabling busy detection in the DPSM
|
|
* @busy_detect_flag: bitmask identifying the bit in the MMCISTATUS register
|
|
* indicating that the card is busy
|
|
@@ -338,7 +346,7 @@ struct variant_data {
|
|
u32 f_max;
|
|
u8 signal_direction:1;
|
|
u8 pwrreg_clkgate:1;
|
|
- u8 busy_detect:1;
|
|
+ u8 busy_timeout:1;
|
|
u32 busy_dpsm_flag;
|
|
u32 busy_detect_flag;
|
|
u32 busy_detect_mask;
|
|
@@ -372,6 +380,9 @@ struct mmci_host_ops {
|
|
void (*dma_error)(struct mmci_host *host);
|
|
void (*set_clkreg)(struct mmci_host *host, unsigned int desired);
|
|
void (*set_pwrreg)(struct mmci_host *host, unsigned int pwr);
|
|
+ bool (*busy_complete)(struct mmci_host *host, u32 status, u32 err_msk);
|
|
+ void (*prep_volt_switch)(struct mmci_host *host);
|
|
+ int (*volt_switch)(struct mmci_host *host, struct mmc_ios *ios);
|
|
};
|
|
|
|
struct mmci_host {
|
|
@@ -402,8 +413,10 @@ struct mmci_host {
|
|
u32 mask1_reg;
|
|
u8 vqmmc_enabled:1;
|
|
struct mmci_platform_data *plat;
|
|
+ struct mmc_host_ops *mmc_ops;
|
|
struct mmci_host_ops *ops;
|
|
struct variant_data *variant;
|
|
+ void *variant_priv;
|
|
struct pinctrl *pinctrl;
|
|
struct pinctrl_state *pins_default;
|
|
struct pinctrl_state *pins_opendrain;
|
|
diff --git a/drivers/mmc/host/mmci_stm32_sdmmc.c b/drivers/mmc/host/mmci_stm32_sdmmc.c
|
|
index 0953bd8a4f79d..7c6ba518bc08e 100644
|
|
--- a/drivers/mmc/host/mmci_stm32_sdmmc.c
|
|
+++ b/drivers/mmc/host/mmci_stm32_sdmmc.c
|
|
@@ -3,10 +3,13 @@
|
|
* Copyright (C) STMicroelectronics 2018 - All Rights Reserved
|
|
* Author: Ludovic.barre@st.com for STMicroelectronics.
|
|
*/
|
|
+#include <linux/bitfield.h>
|
|
#include <linux/delay.h>
|
|
#include <linux/dma-mapping.h>
|
|
+#include <linux/iopoll.h>
|
|
#include <linux/mmc/host.h>
|
|
#include <linux/mmc/card.h>
|
|
+#include <linux/of_address.h>
|
|
#include <linux/reset.h>
|
|
#include <linux/scatterlist.h>
|
|
#include "mmci.h"
|
|
@@ -14,17 +17,36 @@
|
|
#define SDMMC_LLI_BUF_LEN PAGE_SIZE
|
|
#define SDMMC_IDMA_BURST BIT(MMCI_STM32_IDMABNDT_SHIFT)
|
|
|
|
+#define DLYB_CR 0x0
|
|
+#define DLYB_CR_DEN BIT(0)
|
|
+#define DLYB_CR_SEN BIT(1)
|
|
+
|
|
+#define DLYB_CFGR 0x4
|
|
+#define DLYB_CFGR_SEL_MASK GENMASK(3, 0)
|
|
+#define DLYB_CFGR_UNIT_MASK GENMASK(14, 8)
|
|
+#define DLYB_CFGR_LNG_MASK GENMASK(27, 16)
|
|
+#define DLYB_CFGR_LNGF BIT(31)
|
|
+
|
|
+#define DLYB_CFGR_SEL_MAX 12
|
|
+#define DLYB_CFGR_UNIT_MAX 127
|
|
+
|
|
struct sdmmc_lli_desc {
|
|
u32 idmalar;
|
|
u32 idmabase;
|
|
u32 idmasize;
|
|
};
|
|
|
|
-struct sdmmc_priv {
|
|
+struct sdmmc_idma {
|
|
dma_addr_t sg_dma;
|
|
void *sg_cpu;
|
|
};
|
|
|
|
+struct sdmmc_dlyb {
|
|
+ void __iomem *base;
|
|
+ u32 unit;
|
|
+ u32 max;
|
|
+};
|
|
+
|
|
int sdmmc_idma_validate_data(struct mmci_host *host,
|
|
struct mmc_data *data)
|
|
{
|
|
@@ -36,8 +58,8 @@ int sdmmc_idma_validate_data(struct mmci_host *host,
|
|
* excepted the last element which has no constraint on idmasize
|
|
*/
|
|
for_each_sg(data->sg, sg, data->sg_len - 1, i) {
|
|
- if (!IS_ALIGNED(sg_dma_address(data->sg), sizeof(u32)) ||
|
|
- !IS_ALIGNED(sg_dma_len(data->sg), SDMMC_IDMA_BURST)) {
|
|
+ if (!IS_ALIGNED(data->sg->offset, sizeof(u32)) ||
|
|
+ !IS_ALIGNED(data->sg->length, SDMMC_IDMA_BURST)) {
|
|
dev_err(mmc_dev(host->mmc),
|
|
"unaligned scatterlist: ofst:%x length:%d\n",
|
|
data->sg->offset, data->sg->length);
|
|
@@ -45,7 +67,7 @@ int sdmmc_idma_validate_data(struct mmci_host *host,
|
|
}
|
|
}
|
|
|
|
- if (!IS_ALIGNED(sg_dma_address(data->sg), sizeof(u32))) {
|
|
+ if (!IS_ALIGNED(data->sg->offset, sizeof(u32))) {
|
|
dev_err(mmc_dev(host->mmc),
|
|
"unaligned last scatterlist: ofst:%x length:%d\n",
|
|
data->sg->offset, data->sg->length);
|
|
@@ -92,21 +114,21 @@ static void sdmmc_idma_unprep_data(struct mmci_host *host,
|
|
|
|
static int sdmmc_idma_setup(struct mmci_host *host)
|
|
{
|
|
- struct sdmmc_priv *idma;
|
|
+ struct sdmmc_idma *idma;
|
|
+ struct device *dev = mmc_dev(host->mmc);
|
|
|
|
- idma = devm_kzalloc(mmc_dev(host->mmc), sizeof(*idma), GFP_KERNEL);
|
|
- if (!idma)
|
|
+ idma = devm_kzalloc(dev, sizeof(*idma), GFP_KERNEL);
|
|
+ dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL);
|
|
+ if (!idma || !dev->dma_parms)
|
|
return -ENOMEM;
|
|
|
|
host->dma_priv = idma;
|
|
|
|
if (host->variant->dma_lli) {
|
|
- idma->sg_cpu = dmam_alloc_coherent(mmc_dev(host->mmc),
|
|
- SDMMC_LLI_BUF_LEN,
|
|
+ idma->sg_cpu = dmam_alloc_coherent(dev, SDMMC_LLI_BUF_LEN,
|
|
&idma->sg_dma, GFP_KERNEL);
|
|
if (!idma->sg_cpu) {
|
|
- dev_err(mmc_dev(host->mmc),
|
|
- "Failed to alloc IDMA descriptor\n");
|
|
+ dev_err(dev, "Failed to alloc IDMA descriptor\n");
|
|
return -ENOMEM;
|
|
}
|
|
host->mmc->max_segs = SDMMC_LLI_BUF_LEN /
|
|
@@ -117,13 +139,15 @@ static int sdmmc_idma_setup(struct mmci_host *host)
|
|
host->mmc->max_seg_size = host->mmc->max_req_size;
|
|
}
|
|
|
|
+ dma_set_max_seg_size(dev, host->mmc->max_seg_size);
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl)
|
|
|
|
{
|
|
- struct sdmmc_priv *idma = host->dma_priv;
|
|
+ struct sdmmc_idma *idma = host->dma_priv;
|
|
struct sdmmc_lli_desc *desc = (struct sdmmc_lli_desc *)idma->sg_cpu;
|
|
struct mmc_data *data = host->data;
|
|
struct scatterlist *sg;
|
|
@@ -229,12 +253,25 @@ static void mmci_sdmmc_set_clkreg(struct mmci_host *host, unsigned int desired)
|
|
mmci_write_clkreg(host, clk);
|
|
}
|
|
|
|
+static void sdmmc_dlyb_input_ck(struct sdmmc_dlyb *dlyb)
|
|
+{
|
|
+ if (!dlyb || !dlyb->base)
|
|
+ return;
|
|
+
|
|
+ /* Output clock = Input clock */
|
|
+ writel_relaxed(0, dlyb->base + DLYB_CR);
|
|
+}
|
|
+
|
|
static void mmci_sdmmc_set_pwrreg(struct mmci_host *host, unsigned int pwr)
|
|
{
|
|
struct mmc_ios ios = host->mmc->ios;
|
|
+ struct sdmmc_dlyb *dlyb = host->variant_priv;
|
|
|
|
+ /* adds OF options */
|
|
pwr = host->pwr_reg_add;
|
|
|
|
+ sdmmc_dlyb_input_ck(dlyb);
|
|
+
|
|
if (ios.power_mode == MMC_POWER_OFF) {
|
|
/* Only a reset could power-off sdmmc */
|
|
reset_control_assert(host->rst);
|
|
@@ -257,6 +294,10 @@ static void mmci_sdmmc_set_pwrreg(struct mmci_host *host, unsigned int pwr)
|
|
writel(MCI_IRQENABLE | host->variant->start_err,
|
|
host->base + MMCIMASK0);
|
|
|
|
+ /* preserves voltage switch bits */
|
|
+ pwr |= host->pwr_reg & (MCI_STM32_VSWITCHEN |
|
|
+ MCI_STM32_VSWITCH);
|
|
+
|
|
/*
|
|
* After a power-cycle state, we must set the SDMMC in
|
|
* Power-off. The SDMMC_D[7:0], SDMMC_CMD and SDMMC_CK are
|
|
@@ -285,6 +326,178 @@ static u32 sdmmc_get_dctrl_cfg(struct mmci_host *host)
|
|
return datactrl;
|
|
}
|
|
|
|
+bool sdmmc_busy_complete(struct mmci_host *host, u32 status, u32 err_msk)
|
|
+{
|
|
+ void __iomem *base = host->base;
|
|
+ u32 busy_d0, busy_d0end, mask;
|
|
+
|
|
+ mask = readl_relaxed(base + MMCIMASK0);
|
|
+ busy_d0end = readl_relaxed(base + MMCISTATUS) & MCI_STM32_BUSYD0END;
|
|
+ busy_d0 = readl_relaxed(base + MMCISTATUS) & MCI_STM32_BUSYD0;
|
|
+
|
|
+ /* complete if there is an error or busy_d0end */
|
|
+ if ((status & err_msk) || busy_d0end)
|
|
+ goto complete;
|
|
+
|
|
+ /*
|
|
+ * On response the busy signaling is reflected in the BUSYD0 flag.
|
|
+ * if busy_d0 is in-progress we must activate busyd0end interrupt
|
|
+ * to wait this completion. Else this request has no busy step.
|
|
+ */
|
|
+ if (busy_d0) {
|
|
+ if (!host->busy_status) {
|
|
+ writel_relaxed(mask | host->variant->busy_detect_mask,
|
|
+ base + MMCIMASK0);
|
|
+ host->busy_status = status &
|
|
+ (MCI_CMDSENT | MCI_CMDRESPEND);
|
|
+ }
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+complete:
|
|
+ writel_relaxed(mask & ~host->variant->busy_detect_mask,
|
|
+ base + MMCIMASK0);
|
|
+ writel_relaxed(host->variant->busy_detect_mask, base + MMCICLEAR);
|
|
+ host->busy_status = 0;
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static void sdmmc_dlyb_set_cfgr(struct sdmmc_dlyb *dlyb,
|
|
+ int unit, int phase, bool sampler)
|
|
+{
|
|
+ u32 cfgr;
|
|
+
|
|
+ writel_relaxed(DLYB_CR_SEN | DLYB_CR_DEN, dlyb->base + DLYB_CR);
|
|
+
|
|
+ cfgr = FIELD_PREP(DLYB_CFGR_UNIT_MASK, unit) |
|
|
+ FIELD_PREP(DLYB_CFGR_SEL_MASK, phase);
|
|
+ writel_relaxed(cfgr, dlyb->base + DLYB_CFGR);
|
|
+
|
|
+ if (!sampler)
|
|
+ writel_relaxed(DLYB_CR_DEN, dlyb->base + DLYB_CR);
|
|
+}
|
|
+
|
|
+static int sdmmc_dlyb_lng_tuning(struct mmci_host *host)
|
|
+{
|
|
+ struct sdmmc_dlyb *dlyb = host->variant_priv;
|
|
+ u32 cfgr;
|
|
+ int i, lng, ret;
|
|
+
|
|
+ for (i = 0; i <= DLYB_CFGR_UNIT_MAX; i++) {
|
|
+ sdmmc_dlyb_set_cfgr(dlyb, i, DLYB_CFGR_SEL_MAX, true);
|
|
+
|
|
+ ret = readl_relaxed_poll_timeout(dlyb->base + DLYB_CFGR, cfgr,
|
|
+ (cfgr & DLYB_CFGR_LNGF),
|
|
+ 1, 1000);
|
|
+ if (ret) {
|
|
+ dev_warn(mmc_dev(host->mmc),
|
|
+ "delay line cfg timeout unit:%d cfgr:%d\n",
|
|
+ i, cfgr);
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ lng = FIELD_GET(DLYB_CFGR_LNG_MASK, cfgr);
|
|
+ if (lng < (BIT(11) | BIT(10)) && (lng & ~BIT(11)) > 0)
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if (i > DLYB_CFGR_UNIT_MAX)
|
|
+ return -EINVAL;
|
|
+
|
|
+ dlyb->unit = i;
|
|
+ dlyb->max = __fls(lng & ~BIT(11));
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int sdmmc_dlyb_phase_tuning(struct mmci_host *host, u32 opcode)
|
|
+{
|
|
+ struct sdmmc_dlyb *dlyb = host->variant_priv;
|
|
+ int cur_len = 0, max_len = 0, end_of_len = 0;
|
|
+ int phase;
|
|
+
|
|
+ for (phase = 0; phase <= dlyb->max; phase++) {
|
|
+ sdmmc_dlyb_set_cfgr(dlyb, dlyb->unit, phase, false);
|
|
+
|
|
+ if (mmc_send_tuning(host->mmc, opcode, NULL)) {
|
|
+ cur_len = 0;
|
|
+ } else {
|
|
+ cur_len++;
|
|
+ if (cur_len > max_len) {
|
|
+ max_len = cur_len;
|
|
+ end_of_len = phase;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (!max_len) {
|
|
+ dev_err(mmc_dev(host->mmc), "no tuning point found\n");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ writel_relaxed(0, dlyb->base + DLYB_CR);
|
|
+
|
|
+ phase = end_of_len - max_len / 2;
|
|
+ sdmmc_dlyb_set_cfgr(dlyb, dlyb->unit, phase, false);
|
|
+
|
|
+ dev_dbg(mmc_dev(host->mmc), "unit:%d max_dly:%d phase:%d\n",
|
|
+ dlyb->unit, dlyb->max, phase);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int sdmmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
|
|
+{
|
|
+ struct mmci_host *host = mmc_priv(mmc);
|
|
+ struct sdmmc_dlyb *dlyb = host->variant_priv;
|
|
+
|
|
+ if (!dlyb || !dlyb->base)
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (sdmmc_dlyb_lng_tuning(host))
|
|
+ return -EINVAL;
|
|
+
|
|
+ return sdmmc_dlyb_phase_tuning(host, opcode);
|
|
+}
|
|
+
|
|
+static void sdmmc_prep_vswitch(struct mmci_host *host)
|
|
+{
|
|
+ /* clear the voltage switch completion flag */
|
|
+ writel_relaxed(MCI_STM32_VSWENDC, host->base + MMCICLEAR);
|
|
+ /* enable Voltage switch procedure */
|
|
+ mmci_write_pwrreg(host, host->pwr_reg | MCI_STM32_VSWITCHEN);
|
|
+}
|
|
+
|
|
+static int sdmmc_vswitch(struct mmci_host *host, struct mmc_ios *ios)
|
|
+{
|
|
+ unsigned long flags;
|
|
+ u32 status;
|
|
+ int ret = 0;
|
|
+
|
|
+ if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
|
|
+ spin_lock_irqsave(&host->lock, flags);
|
|
+ mmci_write_pwrreg(host, host->pwr_reg | MCI_STM32_VSWITCH);
|
|
+ spin_unlock_irqrestore(&host->lock, flags);
|
|
+
|
|
+ /* wait voltage switch completion while 10ms */
|
|
+ ret = readl_relaxed_poll_timeout(host->base + MMCISTATUS,
|
|
+ status,
|
|
+ (status & MCI_STM32_VSWEND),
|
|
+ 10, 10000);
|
|
+
|
|
+ writel_relaxed(MCI_STM32_VSWENDC | MCI_STM32_CKSTOPC,
|
|
+ host->base + MMCICLEAR);
|
|
+
|
|
+ spin_lock_irqsave(&host->lock, flags);
|
|
+ mmci_write_pwrreg(host, host->pwr_reg &
|
|
+ ~(MCI_STM32_VSWITCHEN | MCI_STM32_VSWITCH));
|
|
+ spin_unlock_irqrestore(&host->lock, flags);
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
static struct mmci_host_ops sdmmc_variant_ops = {
|
|
.validate_data = sdmmc_idma_validate_data,
|
|
.prep_data = sdmmc_idma_prep_data,
|
|
@@ -295,9 +508,28 @@ static struct mmci_host_ops sdmmc_variant_ops = {
|
|
.dma_finalize = sdmmc_idma_finalize,
|
|
.set_clkreg = mmci_sdmmc_set_clkreg,
|
|
.set_pwrreg = mmci_sdmmc_set_pwrreg,
|
|
+ .busy_complete = sdmmc_busy_complete,
|
|
+ .prep_volt_switch = sdmmc_prep_vswitch,
|
|
+ .volt_switch = sdmmc_vswitch,
|
|
};
|
|
|
|
void sdmmc_variant_init(struct mmci_host *host)
|
|
{
|
|
+ struct device_node *np = host->mmc->parent->of_node;
|
|
+ void __iomem *base_dlyb;
|
|
+ struct sdmmc_dlyb *dlyb;
|
|
+
|
|
host->ops = &sdmmc_variant_ops;
|
|
+
|
|
+ base_dlyb = devm_of_iomap(mmc_dev(host->mmc), np, 1, NULL);
|
|
+ if (IS_ERR(base_dlyb))
|
|
+ return;
|
|
+
|
|
+ dlyb = devm_kzalloc(mmc_dev(host->mmc), sizeof(*dlyb), GFP_KERNEL);
|
|
+ if (!dlyb)
|
|
+ return;
|
|
+
|
|
+ dlyb->base = base_dlyb;
|
|
+ host->variant_priv = dlyb;
|
|
+ host->mmc_ops->execute_tuning = sdmmc_execute_tuning;
|
|
}
|
|
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
|
|
index e59de3f60cf6b..1b549d58787c2 100644
|
|
--- a/drivers/mtd/nand/raw/Kconfig
|
|
+++ b/drivers/mtd/nand/raw/Kconfig
|
|
@@ -419,6 +419,7 @@ config MTD_NAND_TEGRA
|
|
config MTD_NAND_STM32_FMC2
|
|
tristate "Support for NAND controller on STM32MP SoCs"
|
|
depends on MACH_STM32MP157 || COMPILE_TEST
|
|
+ select MFD_SYSCON
|
|
help
|
|
Enables support for NAND Flash chips on SoCs containing the FMC2
|
|
NAND controller. This controller is found on STM32MP SoCs.
|
|
diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
|
|
index 5c06e0b4d4ef3..9553702d7a46d 100644
|
|
--- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c
|
|
+++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
|
|
@@ -4,16 +4,20 @@
|
|
* Author: Christophe Kerello <christophe.kerello@st.com>
|
|
*/
|
|
|
|
+#include <linux/bitfield.h>
|
|
#include <linux/clk.h>
|
|
#include <linux/dmaengine.h>
|
|
#include <linux/dma-mapping.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/iopoll.h>
|
|
+#include <linux/mfd/syscon.h>
|
|
#include <linux/module.h>
|
|
#include <linux/mtd/rawnand.h>
|
|
+#include <linux/of_address.h>
|
|
#include <linux/pinctrl/consumer.h>
|
|
#include <linux/platform_device.h>
|
|
+#include <linux/regmap.h>
|
|
#include <linux/reset.h>
|
|
|
|
/* Bad block marker length */
|
|
@@ -37,8 +41,7 @@
|
|
/* Max ECC buffer length */
|
|
#define FMC2_MAX_ECC_BUF_LEN (FMC2_BCHDSRS_LEN * FMC2_MAX_SG)
|
|
|
|
-#define FMC2_TIMEOUT_US 1000
|
|
-#define FMC2_TIMEOUT_MS 1000
|
|
+#define FMC2_TIMEOUT_MS 5000
|
|
|
|
/* Timings */
|
|
#define FMC2_THIZ 1
|
|
@@ -85,20 +88,16 @@
|
|
/* Register: FMC2_PCR */
|
|
#define FMC2_PCR_PWAITEN BIT(1)
|
|
#define FMC2_PCR_PBKEN BIT(2)
|
|
-#define FMC2_PCR_PWID_MASK GENMASK(5, 4)
|
|
-#define FMC2_PCR_PWID(x) (((x) & 0x3) << 4)
|
|
+#define FMC2_PCR_PWID GENMASK(5, 4)
|
|
#define FMC2_PCR_PWID_BUSWIDTH_8 0
|
|
#define FMC2_PCR_PWID_BUSWIDTH_16 1
|
|
#define FMC2_PCR_ECCEN BIT(6)
|
|
#define FMC2_PCR_ECCALG BIT(8)
|
|
-#define FMC2_PCR_TCLR_MASK GENMASK(12, 9)
|
|
-#define FMC2_PCR_TCLR(x) (((x) & 0xf) << 9)
|
|
+#define FMC2_PCR_TCLR GENMASK(12, 9)
|
|
#define FMC2_PCR_TCLR_DEFAULT 0xf
|
|
-#define FMC2_PCR_TAR_MASK GENMASK(16, 13)
|
|
-#define FMC2_PCR_TAR(x) (((x) & 0xf) << 13)
|
|
+#define FMC2_PCR_TAR GENMASK(16, 13)
|
|
#define FMC2_PCR_TAR_DEFAULT 0xf
|
|
-#define FMC2_PCR_ECCSS_MASK GENMASK(19, 17)
|
|
-#define FMC2_PCR_ECCSS(x) (((x) & 0x7) << 17)
|
|
+#define FMC2_PCR_ECCSS GENMASK(19, 17)
|
|
#define FMC2_PCR_ECCSS_512 1
|
|
#define FMC2_PCR_ECCSS_2048 3
|
|
#define FMC2_PCR_BCHECC BIT(24)
|
|
@@ -108,17 +107,17 @@
|
|
#define FMC2_SR_NWRF BIT(6)
|
|
|
|
/* Register: FMC2_PMEM */
|
|
-#define FMC2_PMEM_MEMSET(x) (((x) & 0xff) << 0)
|
|
-#define FMC2_PMEM_MEMWAIT(x) (((x) & 0xff) << 8)
|
|
-#define FMC2_PMEM_MEMHOLD(x) (((x) & 0xff) << 16)
|
|
-#define FMC2_PMEM_MEMHIZ(x) (((x) & 0xff) << 24)
|
|
+#define FMC2_PMEM_MEMSET GENMASK(7, 0)
|
|
+#define FMC2_PMEM_MEMWAIT GENMASK(15, 8)
|
|
+#define FMC2_PMEM_MEMHOLD GENMASK(23, 16)
|
|
+#define FMC2_PMEM_MEMHIZ GENMASK(31, 24)
|
|
#define FMC2_PMEM_DEFAULT 0x0a0a0a0a
|
|
|
|
/* Register: FMC2_PATT */
|
|
-#define FMC2_PATT_ATTSET(x) (((x) & 0xff) << 0)
|
|
-#define FMC2_PATT_ATTWAIT(x) (((x) & 0xff) << 8)
|
|
-#define FMC2_PATT_ATTHOLD(x) (((x) & 0xff) << 16)
|
|
-#define FMC2_PATT_ATTHIZ(x) (((x) & 0xff) << 24)
|
|
+#define FMC2_PATT_ATTSET GENMASK(7, 0)
|
|
+#define FMC2_PATT_ATTWAIT GENMASK(15, 8)
|
|
+#define FMC2_PATT_ATTHOLD GENMASK(23, 16)
|
|
+#define FMC2_PATT_ATTHIZ GENMASK(31, 24)
|
|
#define FMC2_PATT_DEFAULT 0x0a0a0a0a
|
|
|
|
/* Register: FMC2_ISR */
|
|
@@ -133,9 +132,9 @@
|
|
/* Register: FMC2_CSQCFGR1 */
|
|
#define FMC2_CSQCFGR1_CMD2EN BIT(1)
|
|
#define FMC2_CSQCFGR1_DMADEN BIT(2)
|
|
-#define FMC2_CSQCFGR1_ACYNBR(x) (((x) & 0x7) << 4)
|
|
-#define FMC2_CSQCFGR1_CMD1(x) (((x) & 0xff) << 8)
|
|
-#define FMC2_CSQCFGR1_CMD2(x) (((x) & 0xff) << 16)
|
|
+#define FMC2_CSQCFGR1_ACYNBR GENMASK(6, 4)
|
|
+#define FMC2_CSQCFGR1_CMD1 GENMASK(15, 8)
|
|
+#define FMC2_CSQCFGR1_CMD2 GENMASK(23, 16)
|
|
#define FMC2_CSQCFGR1_CMD1T BIT(24)
|
|
#define FMC2_CSQCFGR1_CMD2T BIT(25)
|
|
|
|
@@ -143,13 +142,13 @@
|
|
#define FMC2_CSQCFGR2_SQSDTEN BIT(0)
|
|
#define FMC2_CSQCFGR2_RCMD2EN BIT(1)
|
|
#define FMC2_CSQCFGR2_DMASEN BIT(2)
|
|
-#define FMC2_CSQCFGR2_RCMD1(x) (((x) & 0xff) << 8)
|
|
-#define FMC2_CSQCFGR2_RCMD2(x) (((x) & 0xff) << 16)
|
|
+#define FMC2_CSQCFGR2_RCMD1 GENMASK(15, 8)
|
|
+#define FMC2_CSQCFGR2_RCMD2 GENMASK(23, 16)
|
|
#define FMC2_CSQCFGR2_RCMD1T BIT(24)
|
|
#define FMC2_CSQCFGR2_RCMD2T BIT(25)
|
|
|
|
/* Register: FMC2_CSQCFGR3 */
|
|
-#define FMC2_CSQCFGR3_SNBR(x) (((x) & 0x1f) << 8)
|
|
+#define FMC2_CSQCFGR3_SNBR GENMASK(13, 8)
|
|
#define FMC2_CSQCFGR3_AC1T BIT(16)
|
|
#define FMC2_CSQCFGR3_AC2T BIT(17)
|
|
#define FMC2_CSQCFGR3_AC3T BIT(18)
|
|
@@ -160,15 +159,15 @@
|
|
#define FMC2_CSQCFGR3_RAC2T BIT(23)
|
|
|
|
/* Register: FMC2_CSQCAR1 */
|
|
-#define FMC2_CSQCAR1_ADDC1(x) (((x) & 0xff) << 0)
|
|
-#define FMC2_CSQCAR1_ADDC2(x) (((x) & 0xff) << 8)
|
|
-#define FMC2_CSQCAR1_ADDC3(x) (((x) & 0xff) << 16)
|
|
-#define FMC2_CSQCAR1_ADDC4(x) (((x) & 0xff) << 24)
|
|
+#define FMC2_CSQCAR1_ADDC1 GENMASK(7, 0)
|
|
+#define FMC2_CSQCAR1_ADDC2 GENMASK(15, 8)
|
|
+#define FMC2_CSQCAR1_ADDC3 GENMASK(23, 16)
|
|
+#define FMC2_CSQCAR1_ADDC4 GENMASK(31, 24)
|
|
|
|
/* Register: FMC2_CSQCAR2 */
|
|
-#define FMC2_CSQCAR2_ADDC5(x) (((x) & 0xff) << 0)
|
|
-#define FMC2_CSQCAR2_NANDCEN(x) (((x) & 0x3) << 10)
|
|
-#define FMC2_CSQCAR2_SAO(x) (((x) & 0xffff) << 16)
|
|
+#define FMC2_CSQCAR2_ADDC5 GENMASK(7, 0)
|
|
+#define FMC2_CSQCAR2_NANDCEN GENMASK(11, 10)
|
|
+#define FMC2_CSQCAR2_SAO GENMASK(31, 16)
|
|
|
|
/* Register: FMC2_CSQIER */
|
|
#define FMC2_CSQIER_TCIE BIT(0)
|
|
@@ -189,28 +188,23 @@
|
|
/* Register: FMC2_BCHDSR0 */
|
|
#define FMC2_BCHDSR0_DUE BIT(0)
|
|
#define FMC2_BCHDSR0_DEF BIT(1)
|
|
-#define FMC2_BCHDSR0_DEN_MASK GENMASK(7, 4)
|
|
-#define FMC2_BCHDSR0_DEN_SHIFT 4
|
|
+#define FMC2_BCHDSR0_DEN GENMASK(7, 4)
|
|
|
|
/* Register: FMC2_BCHDSR1 */
|
|
-#define FMC2_BCHDSR1_EBP1_MASK GENMASK(12, 0)
|
|
-#define FMC2_BCHDSR1_EBP2_MASK GENMASK(28, 16)
|
|
-#define FMC2_BCHDSR1_EBP2_SHIFT 16
|
|
+#define FMC2_BCHDSR1_EBP1 GENMASK(12, 0)
|
|
+#define FMC2_BCHDSR1_EBP2 GENMASK(28, 16)
|
|
|
|
/* Register: FMC2_BCHDSR2 */
|
|
-#define FMC2_BCHDSR2_EBP3_MASK GENMASK(12, 0)
|
|
-#define FMC2_BCHDSR2_EBP4_MASK GENMASK(28, 16)
|
|
-#define FMC2_BCHDSR2_EBP4_SHIFT 16
|
|
+#define FMC2_BCHDSR2_EBP3 GENMASK(12, 0)
|
|
+#define FMC2_BCHDSR2_EBP4 GENMASK(28, 16)
|
|
|
|
/* Register: FMC2_BCHDSR3 */
|
|
-#define FMC2_BCHDSR3_EBP5_MASK GENMASK(12, 0)
|
|
-#define FMC2_BCHDSR3_EBP6_MASK GENMASK(28, 16)
|
|
-#define FMC2_BCHDSR3_EBP6_SHIFT 16
|
|
+#define FMC2_BCHDSR3_EBP5 GENMASK(12, 0)
|
|
+#define FMC2_BCHDSR3_EBP6 GENMASK(28, 16)
|
|
|
|
/* Register: FMC2_BCHDSR4 */
|
|
-#define FMC2_BCHDSR4_EBP7_MASK GENMASK(12, 0)
|
|
-#define FMC2_BCHDSR4_EBP8_MASK GENMASK(28, 16)
|
|
-#define FMC2_BCHDSR4_EBP8_SHIFT 16
|
|
+#define FMC2_BCHDSR4_EBP7 GENMASK(12, 0)
|
|
+#define FMC2_BCHDSR4_EBP8 GENMASK(28, 16)
|
|
|
|
enum stm32_fmc2_ecc {
|
|
FMC2_ECC_HAM = 1,
|
|
@@ -251,7 +245,8 @@ struct stm32_fmc2_nfc {
|
|
struct nand_controller base;
|
|
struct stm32_fmc2_nand nand;
|
|
struct device *dev;
|
|
- void __iomem *io_base;
|
|
+ struct device *cdev;
|
|
+ struct regmap *regmap;
|
|
void __iomem *data_base[FMC2_MAX_CE];
|
|
void __iomem *cmd_base[FMC2_MAX_CE];
|
|
void __iomem *addr_base[FMC2_MAX_CE];
|
|
@@ -281,47 +276,42 @@ static inline struct stm32_fmc2_nfc *to_stm32_nfc(struct nand_controller *base)
|
|
return container_of(base, struct stm32_fmc2_nfc, base);
|
|
}
|
|
|
|
-/* Timings configuration */
|
|
-static void stm32_fmc2_timings_init(struct nand_chip *chip)
|
|
+static void stm32_fmc2_nfc_timings_init(struct nand_chip *chip)
|
|
{
|
|
- struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
|
|
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
|
|
struct stm32_fmc2_nand *nand = to_fmc2_nand(chip);
|
|
struct stm32_fmc2_timings *timings = &nand->timings;
|
|
- u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR);
|
|
u32 pmem, patt;
|
|
|
|
/* Set tclr/tar timings */
|
|
- pcr &= ~FMC2_PCR_TCLR_MASK;
|
|
- pcr |= FMC2_PCR_TCLR(timings->tclr);
|
|
- pcr &= ~FMC2_PCR_TAR_MASK;
|
|
- pcr |= FMC2_PCR_TAR(timings->tar);
|
|
+ regmap_update_bits(nfc->regmap, FMC2_PCR,
|
|
+ FMC2_PCR_TCLR | FMC2_PCR_TAR,
|
|
+ FIELD_PREP(FMC2_PCR_TCLR, timings->tclr) |
|
|
+ FIELD_PREP(FMC2_PCR_TAR, timings->tar));
|
|
|
|
/* Set tset/twait/thold/thiz timings in common bank */
|
|
- pmem = FMC2_PMEM_MEMSET(timings->tset_mem);
|
|
- pmem |= FMC2_PMEM_MEMWAIT(timings->twait);
|
|
- pmem |= FMC2_PMEM_MEMHOLD(timings->thold_mem);
|
|
- pmem |= FMC2_PMEM_MEMHIZ(timings->thiz);
|
|
+ pmem = FIELD_PREP(FMC2_PMEM_MEMSET, timings->tset_mem);
|
|
+ pmem |= FIELD_PREP(FMC2_PMEM_MEMWAIT, timings->twait);
|
|
+ pmem |= FIELD_PREP(FMC2_PMEM_MEMHOLD, timings->thold_mem);
|
|
+ pmem |= FIELD_PREP(FMC2_PMEM_MEMHIZ, timings->thiz);
|
|
+ regmap_write(nfc->regmap, FMC2_PMEM, pmem);
|
|
|
|
/* Set tset/twait/thold/thiz timings in attribut bank */
|
|
- patt = FMC2_PATT_ATTSET(timings->tset_att);
|
|
- patt |= FMC2_PATT_ATTWAIT(timings->twait);
|
|
- patt |= FMC2_PATT_ATTHOLD(timings->thold_att);
|
|
- patt |= FMC2_PATT_ATTHIZ(timings->thiz);
|
|
-
|
|
- writel_relaxed(pcr, fmc2->io_base + FMC2_PCR);
|
|
- writel_relaxed(pmem, fmc2->io_base + FMC2_PMEM);
|
|
- writel_relaxed(patt, fmc2->io_base + FMC2_PATT);
|
|
+ patt = FIELD_PREP(FMC2_PATT_ATTSET, timings->tset_att);
|
|
+ patt |= FIELD_PREP(FMC2_PATT_ATTWAIT, timings->twait);
|
|
+ patt |= FIELD_PREP(FMC2_PATT_ATTHOLD, timings->thold_att);
|
|
+ patt |= FIELD_PREP(FMC2_PATT_ATTHIZ, timings->thiz);
|
|
+ regmap_write(nfc->regmap, FMC2_PATT, patt);
|
|
}
|
|
|
|
-/* Controller configuration */
|
|
-static void stm32_fmc2_setup(struct nand_chip *chip)
|
|
+static void stm32_fmc2_nfc_setup(struct nand_chip *chip)
|
|
{
|
|
- struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
|
|
- u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR);
|
|
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
|
|
+ u32 pcr = 0, pcr_mask;
|
|
|
|
/* Configure ECC algorithm (default configuration is Hamming) */
|
|
- pcr &= ~FMC2_PCR_ECCALG;
|
|
- pcr &= ~FMC2_PCR_BCHECC;
|
|
+ pcr_mask = FMC2_PCR_ECCALG;
|
|
+ pcr_mask |= FMC2_PCR_BCHECC;
|
|
if (chip->ecc.strength == FMC2_ECC_BCH8) {
|
|
pcr |= FMC2_PCR_ECCALG;
|
|
pcr |= FMC2_PCR_BCHECC;
|
|
@@ -330,195 +320,159 @@ static void stm32_fmc2_setup(struct nand_chip *chip)
|
|
}
|
|
|
|
/* Set buswidth */
|
|
- pcr &= ~FMC2_PCR_PWID_MASK;
|
|
+ pcr_mask |= FMC2_PCR_PWID;
|
|
if (chip->options & NAND_BUSWIDTH_16)
|
|
- pcr |= FMC2_PCR_PWID(FMC2_PCR_PWID_BUSWIDTH_16);
|
|
+ pcr |= FIELD_PREP(FMC2_PCR_PWID, FMC2_PCR_PWID_BUSWIDTH_16);
|
|
|
|
/* Set ECC sector size */
|
|
- pcr &= ~FMC2_PCR_ECCSS_MASK;
|
|
- pcr |= FMC2_PCR_ECCSS(FMC2_PCR_ECCSS_512);
|
|
+ pcr_mask |= FMC2_PCR_ECCSS;
|
|
+ pcr |= FIELD_PREP(FMC2_PCR_ECCSS, FMC2_PCR_ECCSS_512);
|
|
|
|
- writel_relaxed(pcr, fmc2->io_base + FMC2_PCR);
|
|
+ regmap_update_bits(nfc->regmap, FMC2_PCR, pcr_mask, pcr);
|
|
}
|
|
|
|
-/* Select target */
|
|
-static int stm32_fmc2_select_chip(struct nand_chip *chip, int chipnr)
|
|
+static int stm32_fmc2_nfc_select_chip(struct nand_chip *chip, int chipnr)
|
|
{
|
|
- struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
|
|
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
|
|
struct stm32_fmc2_nand *nand = to_fmc2_nand(chip);
|
|
struct dma_slave_config dma_cfg;
|
|
int ret;
|
|
|
|
- if (nand->cs_used[chipnr] == fmc2->cs_sel)
|
|
+ if (nand->cs_used[chipnr] == nfc->cs_sel)
|
|
return 0;
|
|
|
|
- fmc2->cs_sel = nand->cs_used[chipnr];
|
|
-
|
|
- /* FMC2 setup routine */
|
|
- stm32_fmc2_setup(chip);
|
|
+ nfc->cs_sel = nand->cs_used[chipnr];
|
|
+ stm32_fmc2_nfc_setup(chip);
|
|
+ stm32_fmc2_nfc_timings_init(chip);
|
|
|
|
- /* Apply timings */
|
|
- stm32_fmc2_timings_init(chip);
|
|
-
|
|
- if (fmc2->dma_tx_ch && fmc2->dma_rx_ch) {
|
|
+ if (nfc->dma_tx_ch && nfc->dma_rx_ch) {
|
|
memset(&dma_cfg, 0, sizeof(dma_cfg));
|
|
- dma_cfg.src_addr = fmc2->data_phys_addr[fmc2->cs_sel];
|
|
- dma_cfg.dst_addr = fmc2->data_phys_addr[fmc2->cs_sel];
|
|
+ dma_cfg.src_addr = nfc->data_phys_addr[nfc->cs_sel];
|
|
+ dma_cfg.dst_addr = nfc->data_phys_addr[nfc->cs_sel];
|
|
dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
|
|
dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
|
|
dma_cfg.src_maxburst = 32;
|
|
dma_cfg.dst_maxburst = 32;
|
|
|
|
- ret = dmaengine_slave_config(fmc2->dma_tx_ch, &dma_cfg);
|
|
+ ret = dmaengine_slave_config(nfc->dma_tx_ch, &dma_cfg);
|
|
if (ret) {
|
|
- dev_err(fmc2->dev, "tx DMA engine slave config failed\n");
|
|
+ dev_err(nfc->dev, "tx DMA engine slave config failed\n");
|
|
return ret;
|
|
}
|
|
|
|
- ret = dmaengine_slave_config(fmc2->dma_rx_ch, &dma_cfg);
|
|
+ ret = dmaengine_slave_config(nfc->dma_rx_ch, &dma_cfg);
|
|
if (ret) {
|
|
- dev_err(fmc2->dev, "rx DMA engine slave config failed\n");
|
|
+ dev_err(nfc->dev, "rx DMA engine slave config failed\n");
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
- if (fmc2->dma_ecc_ch) {
|
|
+ if (nfc->dma_ecc_ch) {
|
|
/*
|
|
* Hamming: we read HECCR register
|
|
* BCH4/BCH8: we read BCHDSRSx registers
|
|
*/
|
|
memset(&dma_cfg, 0, sizeof(dma_cfg));
|
|
- dma_cfg.src_addr = fmc2->io_phys_addr;
|
|
+ dma_cfg.src_addr = nfc->io_phys_addr;
|
|
dma_cfg.src_addr += chip->ecc.strength == FMC2_ECC_HAM ?
|
|
FMC2_HECCR : FMC2_BCHDSR0;
|
|
dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
|
|
|
|
- ret = dmaengine_slave_config(fmc2->dma_ecc_ch, &dma_cfg);
|
|
+ ret = dmaengine_slave_config(nfc->dma_ecc_ch, &dma_cfg);
|
|
if (ret) {
|
|
- dev_err(fmc2->dev, "ECC DMA engine slave config failed\n");
|
|
+ dev_err(nfc->dev, "ECC DMA engine slave config failed\n");
|
|
return ret;
|
|
}
|
|
|
|
/* Calculate ECC length needed for one sector */
|
|
- fmc2->dma_ecc_len = chip->ecc.strength == FMC2_ECC_HAM ?
|
|
- FMC2_HECCR_LEN : FMC2_BCHDSRS_LEN;
|
|
+ nfc->dma_ecc_len = chip->ecc.strength == FMC2_ECC_HAM ?
|
|
+ FMC2_HECCR_LEN : FMC2_BCHDSRS_LEN;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
-/* Set bus width to 16-bit or 8-bit */
|
|
-static void stm32_fmc2_set_buswidth_16(struct stm32_fmc2_nfc *fmc2, bool set)
|
|
+static void stm32_fmc2_nfc_set_buswidth_16(struct stm32_fmc2_nfc *nfc, bool set)
|
|
{
|
|
- u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR);
|
|
+ u32 pcr;
|
|
+
|
|
+ pcr = set ? FIELD_PREP(FMC2_PCR_PWID, FMC2_PCR_PWID_BUSWIDTH_16) :
|
|
+ FIELD_PREP(FMC2_PCR_PWID, FMC2_PCR_PWID_BUSWIDTH_8);
|
|
|
|
- pcr &= ~FMC2_PCR_PWID_MASK;
|
|
- if (set)
|
|
- pcr |= FMC2_PCR_PWID(FMC2_PCR_PWID_BUSWIDTH_16);
|
|
- writel_relaxed(pcr, fmc2->io_base + FMC2_PCR);
|
|
+ regmap_update_bits(nfc->regmap, FMC2_PCR, FMC2_PCR_PWID, pcr);
|
|
}
|
|
|
|
-/* Enable/disable ECC */
|
|
-static void stm32_fmc2_set_ecc(struct stm32_fmc2_nfc *fmc2, bool enable)
|
|
+static void stm32_fmc2_nfc_set_ecc(struct stm32_fmc2_nfc *nfc, bool enable)
|
|
{
|
|
- u32 pcr = readl(fmc2->io_base + FMC2_PCR);
|
|
-
|
|
- pcr &= ~FMC2_PCR_ECCEN;
|
|
- if (enable)
|
|
- pcr |= FMC2_PCR_ECCEN;
|
|
- writel(pcr, fmc2->io_base + FMC2_PCR);
|
|
+ regmap_update_bits(nfc->regmap, FMC2_PCR, FMC2_PCR_ECCEN,
|
|
+ enable ? FMC2_PCR_ECCEN : 0);
|
|
}
|
|
|
|
-/* Enable irq sources in case of the sequencer is used */
|
|
-static inline void stm32_fmc2_enable_seq_irq(struct stm32_fmc2_nfc *fmc2)
|
|
+static void stm32_fmc2_nfc_enable_seq_irq(struct stm32_fmc2_nfc *nfc)
|
|
{
|
|
- u32 csqier = readl_relaxed(fmc2->io_base + FMC2_CSQIER);
|
|
-
|
|
- csqier |= FMC2_CSQIER_TCIE;
|
|
-
|
|
- fmc2->irq_state = FMC2_IRQ_SEQ;
|
|
+ nfc->irq_state = FMC2_IRQ_SEQ;
|
|
|
|
- writel_relaxed(csqier, fmc2->io_base + FMC2_CSQIER);
|
|
+ regmap_update_bits(nfc->regmap, FMC2_CSQIER,
|
|
+ FMC2_CSQIER_TCIE, FMC2_CSQIER_TCIE);
|
|
}
|
|
|
|
-/* Disable irq sources in case of the sequencer is used */
|
|
-static inline void stm32_fmc2_disable_seq_irq(struct stm32_fmc2_nfc *fmc2)
|
|
+static void stm32_fmc2_nfc_disable_seq_irq(struct stm32_fmc2_nfc *nfc)
|
|
{
|
|
- u32 csqier = readl_relaxed(fmc2->io_base + FMC2_CSQIER);
|
|
+ regmap_update_bits(nfc->regmap, FMC2_CSQIER, FMC2_CSQIER_TCIE, 0);
|
|
|
|
- csqier &= ~FMC2_CSQIER_TCIE;
|
|
-
|
|
- writel_relaxed(csqier, fmc2->io_base + FMC2_CSQIER);
|
|
-
|
|
- fmc2->irq_state = FMC2_IRQ_UNKNOWN;
|
|
+ nfc->irq_state = FMC2_IRQ_UNKNOWN;
|
|
}
|
|
|
|
-/* Clear irq sources in case of the sequencer is used */
|
|
-static inline void stm32_fmc2_clear_seq_irq(struct stm32_fmc2_nfc *fmc2)
|
|
+static void stm32_fmc2_nfc_clear_seq_irq(struct stm32_fmc2_nfc *nfc)
|
|
{
|
|
- writel_relaxed(FMC2_CSQICR_CLEAR_IRQ, fmc2->io_base + FMC2_CSQICR);
|
|
+ regmap_write(nfc->regmap, FMC2_CSQICR, FMC2_CSQICR_CLEAR_IRQ);
|
|
}
|
|
|
|
-/* Enable irq sources in case of bch is used */
|
|
-static inline void stm32_fmc2_enable_bch_irq(struct stm32_fmc2_nfc *fmc2,
|
|
- int mode)
|
|
+static void stm32_fmc2_nfc_enable_bch_irq(struct stm32_fmc2_nfc *nfc, int mode)
|
|
{
|
|
- u32 bchier = readl_relaxed(fmc2->io_base + FMC2_BCHIER);
|
|
+ nfc->irq_state = FMC2_IRQ_BCH;
|
|
|
|
if (mode == NAND_ECC_WRITE)
|
|
- bchier |= FMC2_BCHIER_EPBRIE;
|
|
+ regmap_update_bits(nfc->regmap, FMC2_BCHIER,
|
|
+ FMC2_BCHIER_EPBRIE, FMC2_BCHIER_EPBRIE);
|
|
else
|
|
- bchier |= FMC2_BCHIER_DERIE;
|
|
-
|
|
- fmc2->irq_state = FMC2_IRQ_BCH;
|
|
-
|
|
- writel_relaxed(bchier, fmc2->io_base + FMC2_BCHIER);
|
|
+ regmap_update_bits(nfc->regmap, FMC2_BCHIER,
|
|
+ FMC2_BCHIER_DERIE, FMC2_BCHIER_DERIE);
|
|
}
|
|
|
|
-/* Disable irq sources in case of bch is used */
|
|
-static inline void stm32_fmc2_disable_bch_irq(struct stm32_fmc2_nfc *fmc2)
|
|
+static void stm32_fmc2_nfc_disable_bch_irq(struct stm32_fmc2_nfc *nfc)
|
|
{
|
|
- u32 bchier = readl_relaxed(fmc2->io_base + FMC2_BCHIER);
|
|
+ regmap_update_bits(nfc->regmap, FMC2_BCHIER,
|
|
+ FMC2_BCHIER_DERIE | FMC2_BCHIER_EPBRIE, 0);
|
|
|
|
- bchier &= ~FMC2_BCHIER_DERIE;
|
|
- bchier &= ~FMC2_BCHIER_EPBRIE;
|
|
-
|
|
- writel_relaxed(bchier, fmc2->io_base + FMC2_BCHIER);
|
|
-
|
|
- fmc2->irq_state = FMC2_IRQ_UNKNOWN;
|
|
+ nfc->irq_state = FMC2_IRQ_UNKNOWN;
|
|
}
|
|
|
|
-/* Clear irq sources in case of bch is used */
|
|
-static inline void stm32_fmc2_clear_bch_irq(struct stm32_fmc2_nfc *fmc2)
|
|
+static void stm32_fmc2_nfc_clear_bch_irq(struct stm32_fmc2_nfc *nfc)
|
|
{
|
|
- writel_relaxed(FMC2_BCHICR_CLEAR_IRQ, fmc2->io_base + FMC2_BCHICR);
|
|
+ regmap_write(nfc->regmap, FMC2_BCHICR, FMC2_BCHICR_CLEAR_IRQ);
|
|
}
|
|
|
|
/*
|
|
* Enable ECC logic and reset syndrome/parity bits previously calculated
|
|
* Syndrome/parity bits is cleared by setting the ECCEN bit to 0
|
|
*/
|
|
-static void stm32_fmc2_hwctl(struct nand_chip *chip, int mode)
|
|
+static void stm32_fmc2_nfc_hwctl(struct nand_chip *chip, int mode)
|
|
{
|
|
- struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
|
|
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
|
|
|
|
- stm32_fmc2_set_ecc(fmc2, false);
|
|
+ stm32_fmc2_nfc_set_ecc(nfc, false);
|
|
|
|
if (chip->ecc.strength != FMC2_ECC_HAM) {
|
|
- u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR);
|
|
-
|
|
- if (mode == NAND_ECC_WRITE)
|
|
- pcr |= FMC2_PCR_WEN;
|
|
- else
|
|
- pcr &= ~FMC2_PCR_WEN;
|
|
- writel_relaxed(pcr, fmc2->io_base + FMC2_PCR);
|
|
+ regmap_update_bits(nfc->regmap, FMC2_PCR, FMC2_PCR_WEN,
|
|
+ mode == NAND_ECC_WRITE ? FMC2_PCR_WEN : 0);
|
|
|
|
- reinit_completion(&fmc2->complete);
|
|
- stm32_fmc2_clear_bch_irq(fmc2);
|
|
- stm32_fmc2_enable_bch_irq(fmc2, mode);
|
|
+ reinit_completion(&nfc->complete);
|
|
+ stm32_fmc2_nfc_clear_bch_irq(nfc);
|
|
+ stm32_fmc2_nfc_enable_bch_irq(nfc, mode);
|
|
}
|
|
|
|
- stm32_fmc2_set_ecc(fmc2, true);
|
|
+ stm32_fmc2_nfc_set_ecc(nfc, true);
|
|
}
|
|
|
|
/*
|
|
@@ -526,40 +480,37 @@ static void stm32_fmc2_hwctl(struct nand_chip *chip, int mode)
|
|
* ECC is 3 bytes for 512 bytes of data (supports error correction up to
|
|
* max of 1-bit)
|
|
*/
|
|
-static inline void stm32_fmc2_ham_set_ecc(const u32 ecc_sta, u8 *ecc)
|
|
+static void stm32_fmc2_nfc_ham_set_ecc(const u32 ecc_sta, u8 *ecc)
|
|
{
|
|
ecc[0] = ecc_sta;
|
|
ecc[1] = ecc_sta >> 8;
|
|
ecc[2] = ecc_sta >> 16;
|
|
}
|
|
|
|
-static int stm32_fmc2_ham_calculate(struct nand_chip *chip, const u8 *data,
|
|
- u8 *ecc)
|
|
+static int stm32_fmc2_nfc_ham_calculate(struct nand_chip *chip, const u8 *data,
|
|
+ u8 *ecc)
|
|
{
|
|
- struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
|
|
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
|
|
u32 sr, heccr;
|
|
int ret;
|
|
|
|
- ret = readl_relaxed_poll_timeout(fmc2->io_base + FMC2_SR,
|
|
- sr, sr & FMC2_SR_NWRF, 10,
|
|
- FMC2_TIMEOUT_MS);
|
|
+ ret = regmap_read_poll_timeout(nfc->regmap, FMC2_SR, sr,
|
|
+ sr & FMC2_SR_NWRF, 1,
|
|
+ 1000 * FMC2_TIMEOUT_MS);
|
|
if (ret) {
|
|
- dev_err(fmc2->dev, "ham timeout\n");
|
|
+ dev_err(nfc->dev, "ham timeout\n");
|
|
return ret;
|
|
}
|
|
|
|
- heccr = readl_relaxed(fmc2->io_base + FMC2_HECCR);
|
|
-
|
|
- stm32_fmc2_ham_set_ecc(heccr, ecc);
|
|
-
|
|
- /* Disable ECC */
|
|
- stm32_fmc2_set_ecc(fmc2, false);
|
|
+ regmap_read(nfc->regmap, FMC2_HECCR, &heccr);
|
|
+ stm32_fmc2_nfc_ham_set_ecc(heccr, ecc);
|
|
+ stm32_fmc2_nfc_set_ecc(nfc, false);
|
|
|
|
return 0;
|
|
}
|
|
|
|
-static int stm32_fmc2_ham_correct(struct nand_chip *chip, u8 *dat,
|
|
- u8 *read_ecc, u8 *calc_ecc)
|
|
+static int stm32_fmc2_nfc_ham_correct(struct nand_chip *chip, u8 *dat,
|
|
+ u8 *read_ecc, u8 *calc_ecc)
|
|
{
|
|
u8 bit_position = 0, b0, b1, b2;
|
|
u32 byte_addr = 0, b;
|
|
@@ -615,28 +566,28 @@ static int stm32_fmc2_ham_correct(struct nand_chip *chip, u8 *dat,
|
|
* ECC is 7/13 bytes for 512 bytes of data (supports error correction up to
|
|
* max of 4-bit/8-bit)
|
|
*/
|
|
-static int stm32_fmc2_bch_calculate(struct nand_chip *chip, const u8 *data,
|
|
- u8 *ecc)
|
|
+static int stm32_fmc2_nfc_bch_calculate(struct nand_chip *chip, const u8 *data,
|
|
+ u8 *ecc)
|
|
{
|
|
- struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
|
|
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
|
|
u32 bchpbr;
|
|
|
|
/* Wait until the BCH code is ready */
|
|
- if (!wait_for_completion_timeout(&fmc2->complete,
|
|
+ if (!wait_for_completion_timeout(&nfc->complete,
|
|
msecs_to_jiffies(FMC2_TIMEOUT_MS))) {
|
|
- dev_err(fmc2->dev, "bch timeout\n");
|
|
- stm32_fmc2_disable_bch_irq(fmc2);
|
|
+ dev_err(nfc->dev, "bch timeout\n");
|
|
+ stm32_fmc2_nfc_disable_bch_irq(nfc);
|
|
return -ETIMEDOUT;
|
|
}
|
|
|
|
/* Read parity bits */
|
|
- bchpbr = readl_relaxed(fmc2->io_base + FMC2_BCHPBR1);
|
|
+ regmap_read(nfc->regmap, FMC2_BCHPBR1, &bchpbr);
|
|
ecc[0] = bchpbr;
|
|
ecc[1] = bchpbr >> 8;
|
|
ecc[2] = bchpbr >> 16;
|
|
ecc[3] = bchpbr >> 24;
|
|
|
|
- bchpbr = readl_relaxed(fmc2->io_base + FMC2_BCHPBR2);
|
|
+ regmap_read(nfc->regmap, FMC2_BCHPBR2, &bchpbr);
|
|
ecc[4] = bchpbr;
|
|
ecc[5] = bchpbr >> 8;
|
|
ecc[6] = bchpbr >> 16;
|
|
@@ -644,24 +595,22 @@ static int stm32_fmc2_bch_calculate(struct nand_chip *chip, const u8 *data,
|
|
if (chip->ecc.strength == FMC2_ECC_BCH8) {
|
|
ecc[7] = bchpbr >> 24;
|
|
|
|
- bchpbr = readl_relaxed(fmc2->io_base + FMC2_BCHPBR3);
|
|
+ regmap_read(nfc->regmap, FMC2_BCHPBR3, &bchpbr);
|
|
ecc[8] = bchpbr;
|
|
ecc[9] = bchpbr >> 8;
|
|
ecc[10] = bchpbr >> 16;
|
|
ecc[11] = bchpbr >> 24;
|
|
|
|
- bchpbr = readl_relaxed(fmc2->io_base + FMC2_BCHPBR4);
|
|
+ regmap_read(nfc->regmap, FMC2_BCHPBR4, &bchpbr);
|
|
ecc[12] = bchpbr;
|
|
}
|
|
|
|
- /* Disable ECC */
|
|
- stm32_fmc2_set_ecc(fmc2, false);
|
|
+ stm32_fmc2_nfc_set_ecc(nfc, false);
|
|
|
|
return 0;
|
|
}
|
|
|
|
-/* BCH algorithm correction */
|
|
-static int stm32_fmc2_bch_decode(int eccsize, u8 *dat, u32 *ecc_sta)
|
|
+static int stm32_fmc2_nfc_bch_decode(int eccsize, u8 *dat, u32 *ecc_sta)
|
|
{
|
|
u32 bchdsr0 = ecc_sta[0];
|
|
u32 bchdsr1 = ecc_sta[1];
|
|
@@ -680,16 +629,16 @@ static int stm32_fmc2_bch_decode(int eccsize, u8 *dat, u32 *ecc_sta)
|
|
if (unlikely(bchdsr0 & FMC2_BCHDSR0_DUE))
|
|
return -EBADMSG;
|
|
|
|
- pos[0] = bchdsr1 & FMC2_BCHDSR1_EBP1_MASK;
|
|
- pos[1] = (bchdsr1 & FMC2_BCHDSR1_EBP2_MASK) >> FMC2_BCHDSR1_EBP2_SHIFT;
|
|
- pos[2] = bchdsr2 & FMC2_BCHDSR2_EBP3_MASK;
|
|
- pos[3] = (bchdsr2 & FMC2_BCHDSR2_EBP4_MASK) >> FMC2_BCHDSR2_EBP4_SHIFT;
|
|
- pos[4] = bchdsr3 & FMC2_BCHDSR3_EBP5_MASK;
|
|
- pos[5] = (bchdsr3 & FMC2_BCHDSR3_EBP6_MASK) >> FMC2_BCHDSR3_EBP6_SHIFT;
|
|
- pos[6] = bchdsr4 & FMC2_BCHDSR4_EBP7_MASK;
|
|
- pos[7] = (bchdsr4 & FMC2_BCHDSR4_EBP8_MASK) >> FMC2_BCHDSR4_EBP8_SHIFT;
|
|
+ pos[0] = FIELD_GET(FMC2_BCHDSR1_EBP1, bchdsr1);
|
|
+ pos[1] = FIELD_GET(FMC2_BCHDSR1_EBP2, bchdsr1);
|
|
+ pos[2] = FIELD_GET(FMC2_BCHDSR2_EBP3, bchdsr2);
|
|
+ pos[3] = FIELD_GET(FMC2_BCHDSR2_EBP4, bchdsr2);
|
|
+ pos[4] = FIELD_GET(FMC2_BCHDSR3_EBP5, bchdsr3);
|
|
+ pos[5] = FIELD_GET(FMC2_BCHDSR3_EBP6, bchdsr3);
|
|
+ pos[6] = FIELD_GET(FMC2_BCHDSR4_EBP7, bchdsr4);
|
|
+ pos[7] = FIELD_GET(FMC2_BCHDSR4_EBP8, bchdsr4);
|
|
|
|
- den = (bchdsr0 & FMC2_BCHDSR0_DEN_MASK) >> FMC2_BCHDSR0_DEN_SHIFT;
|
|
+ den = FIELD_GET(FMC2_BCHDSR0_DEN, bchdsr0);
|
|
for (i = 0; i < den; i++) {
|
|
if (pos[i] < eccsize * 8) {
|
|
change_bit(pos[i], (unsigned long *)dat);
|
|
@@ -700,34 +649,29 @@ static int stm32_fmc2_bch_decode(int eccsize, u8 *dat, u32 *ecc_sta)
|
|
return nb_errs;
|
|
}
|
|
|
|
-static int stm32_fmc2_bch_correct(struct nand_chip *chip, u8 *dat,
|
|
- u8 *read_ecc, u8 *calc_ecc)
|
|
+static int stm32_fmc2_nfc_bch_correct(struct nand_chip *chip, u8 *dat,
|
|
+ u8 *read_ecc, u8 *calc_ecc)
|
|
{
|
|
- struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
|
|
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
|
|
u32 ecc_sta[5];
|
|
|
|
/* Wait until the decoding error is ready */
|
|
- if (!wait_for_completion_timeout(&fmc2->complete,
|
|
+ if (!wait_for_completion_timeout(&nfc->complete,
|
|
msecs_to_jiffies(FMC2_TIMEOUT_MS))) {
|
|
- dev_err(fmc2->dev, "bch timeout\n");
|
|
- stm32_fmc2_disable_bch_irq(fmc2);
|
|
+ dev_err(nfc->dev, "bch timeout\n");
|
|
+ stm32_fmc2_nfc_disable_bch_irq(nfc);
|
|
return -ETIMEDOUT;
|
|
}
|
|
|
|
- ecc_sta[0] = readl_relaxed(fmc2->io_base + FMC2_BCHDSR0);
|
|
- ecc_sta[1] = readl_relaxed(fmc2->io_base + FMC2_BCHDSR1);
|
|
- ecc_sta[2] = readl_relaxed(fmc2->io_base + FMC2_BCHDSR2);
|
|
- ecc_sta[3] = readl_relaxed(fmc2->io_base + FMC2_BCHDSR3);
|
|
- ecc_sta[4] = readl_relaxed(fmc2->io_base + FMC2_BCHDSR4);
|
|
+ regmap_bulk_read(nfc->regmap, FMC2_BCHDSR0, ecc_sta, 5);
|
|
|
|
- /* Disable ECC */
|
|
- stm32_fmc2_set_ecc(fmc2, false);
|
|
+ stm32_fmc2_nfc_set_ecc(nfc, false);
|
|
|
|
- return stm32_fmc2_bch_decode(chip->ecc.size, dat, ecc_sta);
|
|
+ return stm32_fmc2_nfc_bch_decode(chip->ecc.size, dat, ecc_sta);
|
|
}
|
|
|
|
-static int stm32_fmc2_read_page(struct nand_chip *chip, u8 *buf,
|
|
- int oob_required, int page)
|
|
+static int stm32_fmc2_nfc_read_page(struct nand_chip *chip, u8 *buf,
|
|
+ int oob_required, int page)
|
|
{
|
|
struct mtd_info *mtd = nand_to_mtd(chip);
|
|
int ret, i, s, stat, eccsize = chip->ecc.size;
|
|
@@ -789,35 +733,34 @@ static int stm32_fmc2_read_page(struct nand_chip *chip, u8 *buf,
|
|
}
|
|
|
|
/* Sequencer read/write configuration */
|
|
-static void stm32_fmc2_rw_page_init(struct nand_chip *chip, int page,
|
|
- int raw, bool write_data)
|
|
+static void stm32_fmc2_nfc_rw_page_init(struct nand_chip *chip, int page,
|
|
+ int raw, bool write_data)
|
|
{
|
|
- struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
|
|
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
|
|
struct mtd_info *mtd = nand_to_mtd(chip);
|
|
- u32 csqcfgr1, csqcfgr2, csqcfgr3;
|
|
- u32 csqar1, csqar2;
|
|
u32 ecc_offset = mtd->writesize + FMC2_BBM_LEN;
|
|
- u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR);
|
|
+ /*
|
|
+ * cfg[0] => csqcfgr1, cfg[1] => csqcfgr2, cfg[2] => csqcfgr3
|
|
+ * cfg[3] => csqar1, cfg[4] => csqar2
|
|
+ */
|
|
+ u32 cfg[5];
|
|
|
|
- if (write_data)
|
|
- pcr |= FMC2_PCR_WEN;
|
|
- else
|
|
- pcr &= ~FMC2_PCR_WEN;
|
|
- writel_relaxed(pcr, fmc2->io_base + FMC2_PCR);
|
|
+ regmap_update_bits(nfc->regmap, FMC2_PCR, FMC2_PCR_WEN,
|
|
+ write_data ? FMC2_PCR_WEN : 0);
|
|
|
|
/*
|
|
* - Set Program Page/Page Read command
|
|
* - Enable DMA request data
|
|
* - Set timings
|
|
*/
|
|
- csqcfgr1 = FMC2_CSQCFGR1_DMADEN | FMC2_CSQCFGR1_CMD1T;
|
|
+ cfg[0] = FMC2_CSQCFGR1_DMADEN | FMC2_CSQCFGR1_CMD1T;
|
|
if (write_data)
|
|
- csqcfgr1 |= FMC2_CSQCFGR1_CMD1(NAND_CMD_SEQIN);
|
|
+ cfg[0] |= FIELD_PREP(FMC2_CSQCFGR1_CMD1, NAND_CMD_SEQIN);
|
|
else
|
|
- csqcfgr1 |= FMC2_CSQCFGR1_CMD1(NAND_CMD_READ0) |
|
|
- FMC2_CSQCFGR1_CMD2EN |
|
|
- FMC2_CSQCFGR1_CMD2(NAND_CMD_READSTART) |
|
|
- FMC2_CSQCFGR1_CMD2T;
|
|
+ cfg[0] |= FIELD_PREP(FMC2_CSQCFGR1_CMD1, NAND_CMD_READ0) |
|
|
+ FMC2_CSQCFGR1_CMD2EN |
|
|
+ FIELD_PREP(FMC2_CSQCFGR1_CMD2, NAND_CMD_READSTART) |
|
|
+ FMC2_CSQCFGR1_CMD2T;
|
|
|
|
/*
|
|
* - Set Random Data Input/Random Data Read command
|
|
@@ -826,29 +769,29 @@ static void stm32_fmc2_rw_page_init(struct nand_chip *chip, int page,
|
|
* - Set timings
|
|
*/
|
|
if (write_data)
|
|
- csqcfgr2 = FMC2_CSQCFGR2_RCMD1(NAND_CMD_RNDIN);
|
|
+ cfg[1] = FIELD_PREP(FMC2_CSQCFGR2_RCMD1, NAND_CMD_RNDIN);
|
|
else
|
|
- csqcfgr2 = FMC2_CSQCFGR2_RCMD1(NAND_CMD_RNDOUT) |
|
|
- FMC2_CSQCFGR2_RCMD2EN |
|
|
- FMC2_CSQCFGR2_RCMD2(NAND_CMD_RNDOUTSTART) |
|
|
- FMC2_CSQCFGR2_RCMD1T |
|
|
- FMC2_CSQCFGR2_RCMD2T;
|
|
+ cfg[1] = FIELD_PREP(FMC2_CSQCFGR2_RCMD1, NAND_CMD_RNDOUT) |
|
|
+ FMC2_CSQCFGR2_RCMD2EN |
|
|
+ FIELD_PREP(FMC2_CSQCFGR2_RCMD2, NAND_CMD_RNDOUTSTART) |
|
|
+ FMC2_CSQCFGR2_RCMD1T |
|
|
+ FMC2_CSQCFGR2_RCMD2T;
|
|
if (!raw) {
|
|
- csqcfgr2 |= write_data ? 0 : FMC2_CSQCFGR2_DMASEN;
|
|
- csqcfgr2 |= FMC2_CSQCFGR2_SQSDTEN;
|
|
+ cfg[1] |= write_data ? 0 : FMC2_CSQCFGR2_DMASEN;
|
|
+ cfg[1] |= FMC2_CSQCFGR2_SQSDTEN;
|
|
}
|
|
|
|
/*
|
|
* - Set the number of sectors to be written
|
|
* - Set timings
|
|
*/
|
|
- csqcfgr3 = FMC2_CSQCFGR3_SNBR(chip->ecc.steps - 1);
|
|
+ cfg[2] = FIELD_PREP(FMC2_CSQCFGR3_SNBR, chip->ecc.steps - 1);
|
|
if (write_data) {
|
|
- csqcfgr3 |= FMC2_CSQCFGR3_RAC2T;
|
|
+ cfg[2] |= FMC2_CSQCFGR3_RAC2T;
|
|
if (chip->options & NAND_ROW_ADDR_3)
|
|
- csqcfgr3 |= FMC2_CSQCFGR3_AC5T;
|
|
+ cfg[2] |= FMC2_CSQCFGR3_AC5T;
|
|
else
|
|
- csqcfgr3 |= FMC2_CSQCFGR3_AC4T;
|
|
+ cfg[2] |= FMC2_CSQCFGR3_AC4T;
|
|
}
|
|
|
|
/*
|
|
@@ -856,8 +799,8 @@ static void stm32_fmc2_rw_page_init(struct nand_chip *chip, int page,
|
|
* Byte 1 and byte 2 => column, we start at 0x0
|
|
* Byte 3 and byte 4 => page
|
|
*/
|
|
- csqar1 = FMC2_CSQCAR1_ADDC3(page);
|
|
- csqar1 |= FMC2_CSQCAR1_ADDC4(page >> 8);
|
|
+ cfg[3] = FIELD_PREP(FMC2_CSQCAR1_ADDC3, page);
|
|
+ cfg[3] |= FIELD_PREP(FMC2_CSQCAR1_ADDC4, page >> 8);
|
|
|
|
/*
|
|
* - Set chip enable number
|
|
@@ -865,43 +808,39 @@ static void stm32_fmc2_rw_page_init(struct nand_chip *chip, int page,
|
|
* - Calculate the number of address cycles to be issued
|
|
* - Set byte 5 of address cycle if needed
|
|
*/
|
|
- csqar2 = FMC2_CSQCAR2_NANDCEN(fmc2->cs_sel);
|
|
+ cfg[4] = FIELD_PREP(FMC2_CSQCAR2_NANDCEN, nfc->cs_sel);
|
|
if (chip->options & NAND_BUSWIDTH_16)
|
|
- csqar2 |= FMC2_CSQCAR2_SAO(ecc_offset >> 1);
|
|
+ cfg[4] |= FIELD_PREP(FMC2_CSQCAR2_SAO, ecc_offset >> 1);
|
|
else
|
|
- csqar2 |= FMC2_CSQCAR2_SAO(ecc_offset);
|
|
+ cfg[4] |= FIELD_PREP(FMC2_CSQCAR2_SAO, ecc_offset);
|
|
if (chip->options & NAND_ROW_ADDR_3) {
|
|
- csqcfgr1 |= FMC2_CSQCFGR1_ACYNBR(5);
|
|
- csqar2 |= FMC2_CSQCAR2_ADDC5(page >> 16);
|
|
+ cfg[0] |= FIELD_PREP(FMC2_CSQCFGR1_ACYNBR, 5);
|
|
+ cfg[4] |= FIELD_PREP(FMC2_CSQCAR2_ADDC5, page >> 16);
|
|
} else {
|
|
- csqcfgr1 |= FMC2_CSQCFGR1_ACYNBR(4);
|
|
+ cfg[0] |= FIELD_PREP(FMC2_CSQCFGR1_ACYNBR, 4);
|
|
}
|
|
|
|
- writel_relaxed(csqcfgr1, fmc2->io_base + FMC2_CSQCFGR1);
|
|
- writel_relaxed(csqcfgr2, fmc2->io_base + FMC2_CSQCFGR2);
|
|
- writel_relaxed(csqcfgr3, fmc2->io_base + FMC2_CSQCFGR3);
|
|
- writel_relaxed(csqar1, fmc2->io_base + FMC2_CSQAR1);
|
|
- writel_relaxed(csqar2, fmc2->io_base + FMC2_CSQAR2);
|
|
+ regmap_bulk_write(nfc->regmap, FMC2_CSQCFGR1, cfg, 5);
|
|
}
|
|
|
|
-static void stm32_fmc2_dma_callback(void *arg)
|
|
+static void stm32_fmc2_nfc_dma_callback(void *arg)
|
|
{
|
|
complete((struct completion *)arg);
|
|
}
|
|
|
|
/* Read/write data from/to a page */
|
|
-static int stm32_fmc2_xfer(struct nand_chip *chip, const u8 *buf,
|
|
- int raw, bool write_data)
|
|
+static int stm32_fmc2_nfc_xfer(struct nand_chip *chip, const u8 *buf,
|
|
+ int raw, bool write_data)
|
|
{
|
|
- struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
|
|
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
|
|
struct dma_async_tx_descriptor *desc_data, *desc_ecc;
|
|
struct scatterlist *sg;
|
|
- struct dma_chan *dma_ch = fmc2->dma_rx_ch;
|
|
+ struct dma_chan *dma_ch = nfc->dma_rx_ch;
|
|
enum dma_data_direction dma_data_dir = DMA_FROM_DEVICE;
|
|
enum dma_transfer_direction dma_transfer_dir = DMA_DEV_TO_MEM;
|
|
- u32 csqcr = readl_relaxed(fmc2->io_base + FMC2_CSQCR);
|
|
int eccsteps = chip->ecc.steps;
|
|
int eccsize = chip->ecc.size;
|
|
+ unsigned long timeout = msecs_to_jiffies(FMC2_TIMEOUT_MS);
|
|
const u8 *p = buf;
|
|
int s, ret;
|
|
|
|
@@ -909,20 +848,20 @@ static int stm32_fmc2_xfer(struct nand_chip *chip, const u8 *buf,
|
|
if (write_data) {
|
|
dma_data_dir = DMA_TO_DEVICE;
|
|
dma_transfer_dir = DMA_MEM_TO_DEV;
|
|
- dma_ch = fmc2->dma_tx_ch;
|
|
+ dma_ch = nfc->dma_tx_ch;
|
|
}
|
|
|
|
- for_each_sg(fmc2->dma_data_sg.sgl, sg, eccsteps, s) {
|
|
+ for_each_sg(nfc->dma_data_sg.sgl, sg, eccsteps, s) {
|
|
sg_set_buf(sg, p, eccsize);
|
|
p += eccsize;
|
|
}
|
|
|
|
- ret = dma_map_sg(fmc2->dev, fmc2->dma_data_sg.sgl,
|
|
+ ret = dma_map_sg(nfc->dev, nfc->dma_data_sg.sgl,
|
|
eccsteps, dma_data_dir);
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
- desc_data = dmaengine_prep_slave_sg(dma_ch, fmc2->dma_data_sg.sgl,
|
|
+ desc_data = dmaengine_prep_slave_sg(dma_ch, nfc->dma_data_sg.sgl,
|
|
eccsteps, dma_transfer_dir,
|
|
DMA_PREP_INTERRUPT);
|
|
if (!desc_data) {
|
|
@@ -930,10 +869,10 @@ static int stm32_fmc2_xfer(struct nand_chip *chip, const u8 *buf,
|
|
goto err_unmap_data;
|
|
}
|
|
|
|
- reinit_completion(&fmc2->dma_data_complete);
|
|
- reinit_completion(&fmc2->complete);
|
|
- desc_data->callback = stm32_fmc2_dma_callback;
|
|
- desc_data->callback_param = &fmc2->dma_data_complete;
|
|
+ reinit_completion(&nfc->dma_data_complete);
|
|
+ reinit_completion(&nfc->complete);
|
|
+ desc_data->callback = stm32_fmc2_nfc_dma_callback;
|
|
+ desc_data->callback_param = &nfc->dma_data_complete;
|
|
ret = dma_submit_error(dmaengine_submit(desc_data));
|
|
if (ret)
|
|
goto err_unmap_data;
|
|
@@ -942,19 +881,19 @@ static int stm32_fmc2_xfer(struct nand_chip *chip, const u8 *buf,
|
|
|
|
if (!write_data && !raw) {
|
|
/* Configure DMA ECC status */
|
|
- p = fmc2->ecc_buf;
|
|
- for_each_sg(fmc2->dma_ecc_sg.sgl, sg, eccsteps, s) {
|
|
- sg_set_buf(sg, p, fmc2->dma_ecc_len);
|
|
- p += fmc2->dma_ecc_len;
|
|
+ p = nfc->ecc_buf;
|
|
+ for_each_sg(nfc->dma_ecc_sg.sgl, sg, eccsteps, s) {
|
|
+ sg_set_buf(sg, p, nfc->dma_ecc_len);
|
|
+ p += nfc->dma_ecc_len;
|
|
}
|
|
|
|
- ret = dma_map_sg(fmc2->dev, fmc2->dma_ecc_sg.sgl,
|
|
+ ret = dma_map_sg(nfc->dev, nfc->dma_ecc_sg.sgl,
|
|
eccsteps, dma_data_dir);
|
|
if (ret < 0)
|
|
goto err_unmap_data;
|
|
|
|
- desc_ecc = dmaengine_prep_slave_sg(fmc2->dma_ecc_ch,
|
|
- fmc2->dma_ecc_sg.sgl,
|
|
+ desc_ecc = dmaengine_prep_slave_sg(nfc->dma_ecc_ch,
|
|
+ nfc->dma_ecc_sg.sgl,
|
|
eccsteps, dma_transfer_dir,
|
|
DMA_PREP_INTERRUPT);
|
|
if (!desc_ecc) {
|
|
@@ -962,76 +901,73 @@ static int stm32_fmc2_xfer(struct nand_chip *chip, const u8 *buf,
|
|
goto err_unmap_ecc;
|
|
}
|
|
|
|
- reinit_completion(&fmc2->dma_ecc_complete);
|
|
- desc_ecc->callback = stm32_fmc2_dma_callback;
|
|
- desc_ecc->callback_param = &fmc2->dma_ecc_complete;
|
|
+ reinit_completion(&nfc->dma_ecc_complete);
|
|
+ desc_ecc->callback = stm32_fmc2_nfc_dma_callback;
|
|
+ desc_ecc->callback_param = &nfc->dma_ecc_complete;
|
|
ret = dma_submit_error(dmaengine_submit(desc_ecc));
|
|
if (ret)
|
|
goto err_unmap_ecc;
|
|
|
|
- dma_async_issue_pending(fmc2->dma_ecc_ch);
|
|
+ dma_async_issue_pending(nfc->dma_ecc_ch);
|
|
}
|
|
|
|
- stm32_fmc2_clear_seq_irq(fmc2);
|
|
- stm32_fmc2_enable_seq_irq(fmc2);
|
|
+ stm32_fmc2_nfc_clear_seq_irq(nfc);
|
|
+ stm32_fmc2_nfc_enable_seq_irq(nfc);
|
|
|
|
/* Start the transfer */
|
|
- csqcr |= FMC2_CSQCR_CSQSTART;
|
|
- writel_relaxed(csqcr, fmc2->io_base + FMC2_CSQCR);
|
|
+ regmap_update_bits(nfc->regmap, FMC2_CSQCR,
|
|
+ FMC2_CSQCR_CSQSTART, FMC2_CSQCR_CSQSTART);
|
|
|
|
/* Wait end of sequencer transfer */
|
|
- if (!wait_for_completion_timeout(&fmc2->complete,
|
|
- msecs_to_jiffies(FMC2_TIMEOUT_MS))) {
|
|
- dev_err(fmc2->dev, "seq timeout\n");
|
|
- stm32_fmc2_disable_seq_irq(fmc2);
|
|
+ if (!wait_for_completion_timeout(&nfc->complete, timeout)) {
|
|
+ dev_err(nfc->dev, "seq timeout\n");
|
|
+ stm32_fmc2_nfc_disable_seq_irq(nfc);
|
|
dmaengine_terminate_all(dma_ch);
|
|
if (!write_data && !raw)
|
|
- dmaengine_terminate_all(fmc2->dma_ecc_ch);
|
|
+ dmaengine_terminate_all(nfc->dma_ecc_ch);
|
|
ret = -ETIMEDOUT;
|
|
goto err_unmap_ecc;
|
|
}
|
|
|
|
/* Wait DMA data transfer completion */
|
|
- if (!wait_for_completion_timeout(&fmc2->dma_data_complete,
|
|
- msecs_to_jiffies(FMC2_TIMEOUT_MS))) {
|
|
- dev_err(fmc2->dev, "data DMA timeout\n");
|
|
+ if (!wait_for_completion_timeout(&nfc->dma_data_complete, timeout)) {
|
|
+ dev_err(nfc->dev, "data DMA timeout\n");
|
|
dmaengine_terminate_all(dma_ch);
|
|
ret = -ETIMEDOUT;
|
|
}
|
|
|
|
/* Wait DMA ECC transfer completion */
|
|
if (!write_data && !raw) {
|
|
- if (!wait_for_completion_timeout(&fmc2->dma_ecc_complete,
|
|
- msecs_to_jiffies(FMC2_TIMEOUT_MS))) {
|
|
- dev_err(fmc2->dev, "ECC DMA timeout\n");
|
|
- dmaengine_terminate_all(fmc2->dma_ecc_ch);
|
|
+ if (!wait_for_completion_timeout(&nfc->dma_ecc_complete,
|
|
+ timeout)) {
|
|
+ dev_err(nfc->dev, "ECC DMA timeout\n");
|
|
+ dmaengine_terminate_all(nfc->dma_ecc_ch);
|
|
ret = -ETIMEDOUT;
|
|
}
|
|
}
|
|
|
|
err_unmap_ecc:
|
|
if (!write_data && !raw)
|
|
- dma_unmap_sg(fmc2->dev, fmc2->dma_ecc_sg.sgl,
|
|
+ dma_unmap_sg(nfc->dev, nfc->dma_ecc_sg.sgl,
|
|
eccsteps, dma_data_dir);
|
|
|
|
err_unmap_data:
|
|
- dma_unmap_sg(fmc2->dev, fmc2->dma_data_sg.sgl, eccsteps, dma_data_dir);
|
|
+ dma_unmap_sg(nfc->dev, nfc->dma_data_sg.sgl, eccsteps, dma_data_dir);
|
|
|
|
return ret;
|
|
}
|
|
|
|
-static int stm32_fmc2_sequencer_write(struct nand_chip *chip,
|
|
- const u8 *buf, int oob_required,
|
|
- int page, int raw)
|
|
+static int stm32_fmc2_nfc_seq_write(struct nand_chip *chip, const u8 *buf,
|
|
+ int oob_required, int page, int raw)
|
|
{
|
|
struct mtd_info *mtd = nand_to_mtd(chip);
|
|
int ret;
|
|
|
|
/* Configure the sequencer */
|
|
- stm32_fmc2_rw_page_init(chip, page, raw, true);
|
|
+ stm32_fmc2_nfc_rw_page_init(chip, page, raw, true);
|
|
|
|
/* Write the page */
|
|
- ret = stm32_fmc2_xfer(chip, buf, raw, true);
|
|
+ ret = stm32_fmc2_nfc_xfer(chip, buf, raw, true);
|
|
if (ret)
|
|
return ret;
|
|
|
|
@@ -1047,55 +983,52 @@ static int stm32_fmc2_sequencer_write(struct nand_chip *chip,
|
|
return nand_prog_page_end_op(chip);
|
|
}
|
|
|
|
-static int stm32_fmc2_sequencer_write_page(struct nand_chip *chip,
|
|
- const u8 *buf,
|
|
- int oob_required,
|
|
- int page)
|
|
+static int stm32_fmc2_nfc_seq_write_page(struct nand_chip *chip, const u8 *buf,
|
|
+ int oob_required, int page)
|
|
{
|
|
int ret;
|
|
|
|
- /* Select the target */
|
|
- ret = stm32_fmc2_select_chip(chip, chip->cur_cs);
|
|
+ ret = stm32_fmc2_nfc_select_chip(chip, chip->cur_cs);
|
|
if (ret)
|
|
return ret;
|
|
|
|
- return stm32_fmc2_sequencer_write(chip, buf, oob_required, page, false);
|
|
+ return stm32_fmc2_nfc_seq_write(chip, buf, oob_required, page, false);
|
|
}
|
|
|
|
-static int stm32_fmc2_sequencer_write_page_raw(struct nand_chip *chip,
|
|
- const u8 *buf,
|
|
- int oob_required,
|
|
- int page)
|
|
+static int stm32_fmc2_nfc_seq_write_page_raw(struct nand_chip *chip,
|
|
+ const u8 *buf, int oob_required,
|
|
+ int page)
|
|
{
|
|
int ret;
|
|
|
|
- /* Select the target */
|
|
- ret = stm32_fmc2_select_chip(chip, chip->cur_cs);
|
|
+ ret = stm32_fmc2_nfc_select_chip(chip, chip->cur_cs);
|
|
if (ret)
|
|
return ret;
|
|
|
|
- return stm32_fmc2_sequencer_write(chip, buf, oob_required, page, true);
|
|
+ return stm32_fmc2_nfc_seq_write(chip, buf, oob_required, page, true);
|
|
}
|
|
|
|
/* Get a status indicating which sectors have errors */
|
|
-static inline u16 stm32_fmc2_get_mapping_status(struct stm32_fmc2_nfc *fmc2)
|
|
+static u16 stm32_fmc2_nfc_get_mapping_status(struct stm32_fmc2_nfc *nfc)
|
|
{
|
|
- u32 csqemsr = readl_relaxed(fmc2->io_base + FMC2_CSQEMSR);
|
|
+ u32 csqemsr;
|
|
|
|
- return csqemsr & FMC2_CSQEMSR_SEM;
|
|
+ regmap_read(nfc->regmap, FMC2_CSQEMSR, &csqemsr);
|
|
+
|
|
+ return FIELD_GET(FMC2_CSQEMSR_SEM, csqemsr);
|
|
}
|
|
|
|
-static int stm32_fmc2_sequencer_correct(struct nand_chip *chip, u8 *dat,
|
|
- u8 *read_ecc, u8 *calc_ecc)
|
|
+static int stm32_fmc2_nfc_seq_correct(struct nand_chip *chip, u8 *dat,
|
|
+ u8 *read_ecc, u8 *calc_ecc)
|
|
{
|
|
struct mtd_info *mtd = nand_to_mtd(chip);
|
|
- struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
|
|
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
|
|
int eccbytes = chip->ecc.bytes;
|
|
int eccsteps = chip->ecc.steps;
|
|
int eccstrength = chip->ecc.strength;
|
|
int i, s, eccsize = chip->ecc.size;
|
|
- u32 *ecc_sta = (u32 *)fmc2->ecc_buf;
|
|
- u16 sta_map = stm32_fmc2_get_mapping_status(fmc2);
|
|
+ u32 *ecc_sta = (u32 *)nfc->ecc_buf;
|
|
+ u16 sta_map = stm32_fmc2_nfc_get_mapping_status(nfc);
|
|
unsigned int max_bitflips = 0;
|
|
|
|
for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, dat += eccsize) {
|
|
@@ -1104,10 +1037,11 @@ static int stm32_fmc2_sequencer_correct(struct nand_chip *chip, u8 *dat,
|
|
if (eccstrength == FMC2_ECC_HAM) {
|
|
/* Ecc_sta = FMC2_HECCR */
|
|
if (sta_map & BIT(s)) {
|
|
- stm32_fmc2_ham_set_ecc(*ecc_sta, &calc_ecc[i]);
|
|
- stat = stm32_fmc2_ham_correct(chip, dat,
|
|
- &read_ecc[i],
|
|
- &calc_ecc[i]);
|
|
+ stm32_fmc2_nfc_ham_set_ecc(*ecc_sta,
|
|
+ &calc_ecc[i]);
|
|
+ stat = stm32_fmc2_nfc_ham_correct(chip, dat,
|
|
+ &read_ecc[i],
|
|
+ &calc_ecc[i]);
|
|
}
|
|
ecc_sta++;
|
|
} else {
|
|
@@ -1119,8 +1053,8 @@ static int stm32_fmc2_sequencer_correct(struct nand_chip *chip, u8 *dat,
|
|
* Ecc_sta[4] = FMC2_BCHDSR4
|
|
*/
|
|
if (sta_map & BIT(s))
|
|
- stat = stm32_fmc2_bch_decode(eccsize, dat,
|
|
- ecc_sta);
|
|
+ stat = stm32_fmc2_nfc_bch_decode(eccsize, dat,
|
|
+ ecc_sta);
|
|
ecc_sta += 5;
|
|
}
|
|
|
|
@@ -1143,30 +1077,29 @@ static int stm32_fmc2_sequencer_correct(struct nand_chip *chip, u8 *dat,
|
|
return max_bitflips;
|
|
}
|
|
|
|
-static int stm32_fmc2_sequencer_read_page(struct nand_chip *chip, u8 *buf,
|
|
- int oob_required, int page)
|
|
+static int stm32_fmc2_nfc_seq_read_page(struct nand_chip *chip, u8 *buf,
|
|
+ int oob_required, int page)
|
|
{
|
|
struct mtd_info *mtd = nand_to_mtd(chip);
|
|
- struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
|
|
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
|
|
u8 *ecc_calc = chip->ecc.calc_buf;
|
|
u8 *ecc_code = chip->ecc.code_buf;
|
|
u16 sta_map;
|
|
int ret;
|
|
|
|
- /* Select the target */
|
|
- ret = stm32_fmc2_select_chip(chip, chip->cur_cs);
|
|
+ ret = stm32_fmc2_nfc_select_chip(chip, chip->cur_cs);
|
|
if (ret)
|
|
return ret;
|
|
|
|
/* Configure the sequencer */
|
|
- stm32_fmc2_rw_page_init(chip, page, 0, false);
|
|
+ stm32_fmc2_nfc_rw_page_init(chip, page, 0, false);
|
|
|
|
/* Read the page */
|
|
- ret = stm32_fmc2_xfer(chip, buf, 0, false);
|
|
+ ret = stm32_fmc2_nfc_xfer(chip, buf, 0, false);
|
|
if (ret)
|
|
return ret;
|
|
|
|
- sta_map = stm32_fmc2_get_mapping_status(fmc2);
|
|
+ sta_map = stm32_fmc2_nfc_get_mapping_status(nfc);
|
|
|
|
/* Check if errors happen */
|
|
if (likely(!sta_map)) {
|
|
@@ -1193,22 +1126,21 @@ static int stm32_fmc2_sequencer_read_page(struct nand_chip *chip, u8 *buf,
|
|
return chip->ecc.correct(chip, buf, ecc_code, ecc_calc);
|
|
}
|
|
|
|
-static int stm32_fmc2_sequencer_read_page_raw(struct nand_chip *chip, u8 *buf,
|
|
- int oob_required, int page)
|
|
+static int stm32_fmc2_nfc_seq_read_page_raw(struct nand_chip *chip, u8 *buf,
|
|
+ int oob_required, int page)
|
|
{
|
|
struct mtd_info *mtd = nand_to_mtd(chip);
|
|
int ret;
|
|
|
|
- /* Select the target */
|
|
- ret = stm32_fmc2_select_chip(chip, chip->cur_cs);
|
|
+ ret = stm32_fmc2_nfc_select_chip(chip, chip->cur_cs);
|
|
if (ret)
|
|
return ret;
|
|
|
|
/* Configure the sequencer */
|
|
- stm32_fmc2_rw_page_init(chip, page, 1, false);
|
|
+ stm32_fmc2_nfc_rw_page_init(chip, page, 1, false);
|
|
|
|
/* Read the page */
|
|
- ret = stm32_fmc2_xfer(chip, buf, 1, false);
|
|
+ ret = stm32_fmc2_nfc_xfer(chip, buf, 1, false);
|
|
if (ret)
|
|
return ret;
|
|
|
|
@@ -1221,31 +1153,31 @@ static int stm32_fmc2_sequencer_read_page_raw(struct nand_chip *chip, u8 *buf,
|
|
return 0;
|
|
}
|
|
|
|
-static irqreturn_t stm32_fmc2_irq(int irq, void *dev_id)
|
|
+static irqreturn_t stm32_fmc2_nfc_irq(int irq, void *dev_id)
|
|
{
|
|
- struct stm32_fmc2_nfc *fmc2 = (struct stm32_fmc2_nfc *)dev_id;
|
|
+ struct stm32_fmc2_nfc *nfc = (struct stm32_fmc2_nfc *)dev_id;
|
|
|
|
- if (fmc2->irq_state == FMC2_IRQ_SEQ)
|
|
+ if (nfc->irq_state == FMC2_IRQ_SEQ)
|
|
/* Sequencer is used */
|
|
- stm32_fmc2_disable_seq_irq(fmc2);
|
|
- else if (fmc2->irq_state == FMC2_IRQ_BCH)
|
|
+ stm32_fmc2_nfc_disable_seq_irq(nfc);
|
|
+ else if (nfc->irq_state == FMC2_IRQ_BCH)
|
|
/* BCH is used */
|
|
- stm32_fmc2_disable_bch_irq(fmc2);
|
|
+ stm32_fmc2_nfc_disable_bch_irq(nfc);
|
|
|
|
- complete(&fmc2->complete);
|
|
+ complete(&nfc->complete);
|
|
|
|
return IRQ_HANDLED;
|
|
}
|
|
|
|
-static void stm32_fmc2_read_data(struct nand_chip *chip, void *buf,
|
|
- unsigned int len, bool force_8bit)
|
|
+static void stm32_fmc2_nfc_read_data(struct nand_chip *chip, void *buf,
|
|
+ unsigned int len, bool force_8bit)
|
|
{
|
|
- struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
|
|
- void __iomem *io_addr_r = fmc2->data_base[fmc2->cs_sel];
|
|
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
|
|
+ void __iomem *io_addr_r = nfc->data_base[nfc->cs_sel];
|
|
|
|
if (force_8bit && chip->options & NAND_BUSWIDTH_16)
|
|
/* Reconfigure bus width to 8-bit */
|
|
- stm32_fmc2_set_buswidth_16(fmc2, false);
|
|
+ stm32_fmc2_nfc_set_buswidth_16(nfc, false);
|
|
|
|
if (!IS_ALIGNED((uintptr_t)buf, sizeof(u32))) {
|
|
if (!IS_ALIGNED((uintptr_t)buf, sizeof(u16)) && len) {
|
|
@@ -1281,18 +1213,18 @@ static void stm32_fmc2_read_data(struct nand_chip *chip, void *buf,
|
|
|
|
if (force_8bit && chip->options & NAND_BUSWIDTH_16)
|
|
/* Reconfigure bus width to 16-bit */
|
|
- stm32_fmc2_set_buswidth_16(fmc2, true);
|
|
+ stm32_fmc2_nfc_set_buswidth_16(nfc, true);
|
|
}
|
|
|
|
-static void stm32_fmc2_write_data(struct nand_chip *chip, const void *buf,
|
|
- unsigned int len, bool force_8bit)
|
|
+static void stm32_fmc2_nfc_write_data(struct nand_chip *chip, const void *buf,
|
|
+ unsigned int len, bool force_8bit)
|
|
{
|
|
- struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
|
|
- void __iomem *io_addr_w = fmc2->data_base[fmc2->cs_sel];
|
|
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
|
|
+ void __iomem *io_addr_w = nfc->data_base[nfc->cs_sel];
|
|
|
|
if (force_8bit && chip->options & NAND_BUSWIDTH_16)
|
|
/* Reconfigure bus width to 8-bit */
|
|
- stm32_fmc2_set_buswidth_16(fmc2, false);
|
|
+ stm32_fmc2_nfc_set_buswidth_16(nfc, false);
|
|
|
|
if (!IS_ALIGNED((uintptr_t)buf, sizeof(u32))) {
|
|
if (!IS_ALIGNED((uintptr_t)buf, sizeof(u16)) && len) {
|
|
@@ -1328,44 +1260,45 @@ static void stm32_fmc2_write_data(struct nand_chip *chip, const void *buf,
|
|
|
|
if (force_8bit && chip->options & NAND_BUSWIDTH_16)
|
|
/* Reconfigure bus width to 16-bit */
|
|
- stm32_fmc2_set_buswidth_16(fmc2, true);
|
|
+ stm32_fmc2_nfc_set_buswidth_16(nfc, true);
|
|
}
|
|
|
|
-static int stm32_fmc2_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
|
|
+static int stm32_fmc2_nfc_waitrdy(struct nand_chip *chip,
|
|
+ unsigned long timeout_ms)
|
|
{
|
|
- struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
|
|
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
|
|
const struct nand_sdr_timings *timings;
|
|
u32 isr, sr;
|
|
|
|
/* Check if there is no pending requests to the NAND flash */
|
|
- if (readl_relaxed_poll_timeout_atomic(fmc2->io_base + FMC2_SR, sr,
|
|
- sr & FMC2_SR_NWRF, 1,
|
|
- FMC2_TIMEOUT_US))
|
|
- dev_warn(fmc2->dev, "Waitrdy timeout\n");
|
|
+ if (regmap_read_poll_timeout(nfc->regmap, FMC2_SR, sr,
|
|
+ sr & FMC2_SR_NWRF, 1,
|
|
+ 1000 * FMC2_TIMEOUT_MS))
|
|
+ dev_warn(nfc->dev, "Waitrdy timeout\n");
|
|
|
|
/* Wait tWB before R/B# signal is low */
|
|
timings = nand_get_sdr_timings(&chip->data_interface);
|
|
ndelay(PSEC_TO_NSEC(timings->tWB_max));
|
|
|
|
/* R/B# signal is low, clear high level flag */
|
|
- writel_relaxed(FMC2_ICR_CIHLF, fmc2->io_base + FMC2_ICR);
|
|
+ regmap_write(nfc->regmap, FMC2_ICR, FMC2_ICR_CIHLF);
|
|
|
|
/* Wait R/B# signal is high */
|
|
- return readl_relaxed_poll_timeout_atomic(fmc2->io_base + FMC2_ISR,
|
|
- isr, isr & FMC2_ISR_IHLF,
|
|
- 5, 1000 * timeout_ms);
|
|
+ return regmap_read_poll_timeout(nfc->regmap, FMC2_ISR, isr,
|
|
+ isr & FMC2_ISR_IHLF, 5,
|
|
+ 1000 * FMC2_TIMEOUT_MS);
|
|
}
|
|
|
|
-static int stm32_fmc2_exec_op(struct nand_chip *chip,
|
|
- const struct nand_operation *op,
|
|
- bool check_only)
|
|
+static int stm32_fmc2_nfc_exec_op(struct nand_chip *chip,
|
|
+ const struct nand_operation *op,
|
|
+ bool check_only)
|
|
{
|
|
- struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
|
|
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
|
|
const struct nand_op_instr *instr = NULL;
|
|
- unsigned int op_id, i;
|
|
+ unsigned int op_id, i, timeout;
|
|
int ret;
|
|
|
|
- ret = stm32_fmc2_select_chip(chip, op->cs);
|
|
+ ret = stm32_fmc2_nfc_select_chip(chip, op->cs);
|
|
if (ret)
|
|
return ret;
|
|
|
|
@@ -1378,30 +1311,30 @@ static int stm32_fmc2_exec_op(struct nand_chip *chip,
|
|
switch (instr->type) {
|
|
case NAND_OP_CMD_INSTR:
|
|
writeb_relaxed(instr->ctx.cmd.opcode,
|
|
- fmc2->cmd_base[fmc2->cs_sel]);
|
|
+ nfc->cmd_base[nfc->cs_sel]);
|
|
break;
|
|
|
|
case NAND_OP_ADDR_INSTR:
|
|
for (i = 0; i < instr->ctx.addr.naddrs; i++)
|
|
writeb_relaxed(instr->ctx.addr.addrs[i],
|
|
- fmc2->addr_base[fmc2->cs_sel]);
|
|
+ nfc->addr_base[nfc->cs_sel]);
|
|
break;
|
|
|
|
case NAND_OP_DATA_IN_INSTR:
|
|
- stm32_fmc2_read_data(chip, instr->ctx.data.buf.in,
|
|
- instr->ctx.data.len,
|
|
- instr->ctx.data.force_8bit);
|
|
+ stm32_fmc2_nfc_read_data(chip, instr->ctx.data.buf.in,
|
|
+ instr->ctx.data.len,
|
|
+ instr->ctx.data.force_8bit);
|
|
break;
|
|
|
|
case NAND_OP_DATA_OUT_INSTR:
|
|
- stm32_fmc2_write_data(chip, instr->ctx.data.buf.out,
|
|
- instr->ctx.data.len,
|
|
- instr->ctx.data.force_8bit);
|
|
+ stm32_fmc2_nfc_write_data(chip, instr->ctx.data.buf.out,
|
|
+ instr->ctx.data.len,
|
|
+ instr->ctx.data.force_8bit);
|
|
break;
|
|
|
|
case NAND_OP_WAITRDY_INSTR:
|
|
- ret = stm32_fmc2_waitrdy(chip,
|
|
- instr->ctx.waitrdy.timeout_ms);
|
|
+ timeout = instr->ctx.waitrdy.timeout_ms;
|
|
+ ret = stm32_fmc2_nfc_waitrdy(chip, timeout);
|
|
break;
|
|
}
|
|
}
|
|
@@ -1409,21 +1342,21 @@ static int stm32_fmc2_exec_op(struct nand_chip *chip,
|
|
return ret;
|
|
}
|
|
|
|
-/* Controller initialization */
|
|
-static void stm32_fmc2_init(struct stm32_fmc2_nfc *fmc2)
|
|
+static void stm32_fmc2_nfc_init(struct stm32_fmc2_nfc *nfc)
|
|
{
|
|
- u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR);
|
|
- u32 bcr1 = readl_relaxed(fmc2->io_base + FMC2_BCR1);
|
|
+ u32 pcr;
|
|
+
|
|
+ regmap_read(nfc->regmap, FMC2_PCR, &pcr);
|
|
|
|
/* Set CS used to undefined */
|
|
- fmc2->cs_sel = -1;
|
|
+ nfc->cs_sel = -1;
|
|
|
|
/* Enable wait feature and nand flash memory bank */
|
|
pcr |= FMC2_PCR_PWAITEN;
|
|
pcr |= FMC2_PCR_PBKEN;
|
|
|
|
/* Set buswidth to 8 bits mode for identification */
|
|
- pcr &= ~FMC2_PCR_PWID_MASK;
|
|
+ pcr &= ~FMC2_PCR_PWID;
|
|
|
|
/* ECC logic is disabled */
|
|
pcr &= ~FMC2_PCR_ECCEN;
|
|
@@ -1434,32 +1367,32 @@ static void stm32_fmc2_init(struct stm32_fmc2_nfc *fmc2)
|
|
pcr &= ~FMC2_PCR_WEN;
|
|
|
|
/* Set default ECC sector size */
|
|
- pcr &= ~FMC2_PCR_ECCSS_MASK;
|
|
- pcr |= FMC2_PCR_ECCSS(FMC2_PCR_ECCSS_2048);
|
|
+ pcr &= ~FMC2_PCR_ECCSS;
|
|
+ pcr |= FIELD_PREP(FMC2_PCR_ECCSS, FMC2_PCR_ECCSS_2048);
|
|
|
|
/* Set default tclr/tar timings */
|
|
- pcr &= ~FMC2_PCR_TCLR_MASK;
|
|
- pcr |= FMC2_PCR_TCLR(FMC2_PCR_TCLR_DEFAULT);
|
|
- pcr &= ~FMC2_PCR_TAR_MASK;
|
|
- pcr |= FMC2_PCR_TAR(FMC2_PCR_TAR_DEFAULT);
|
|
+ pcr &= ~FMC2_PCR_TCLR;
|
|
+ pcr |= FIELD_PREP(FMC2_PCR_TCLR, FMC2_PCR_TCLR_DEFAULT);
|
|
+ pcr &= ~FMC2_PCR_TAR;
|
|
+ pcr |= FIELD_PREP(FMC2_PCR_TAR, FMC2_PCR_TAR_DEFAULT);
|
|
|
|
/* Enable FMC2 controller */
|
|
- bcr1 |= FMC2_BCR1_FMC2EN;
|
|
+ if (nfc->dev == nfc->cdev)
|
|
+ regmap_update_bits(nfc->regmap, FMC2_BCR1,
|
|
+ FMC2_BCR1_FMC2EN, FMC2_BCR1_FMC2EN);
|
|
|
|
- writel_relaxed(bcr1, fmc2->io_base + FMC2_BCR1);
|
|
- writel_relaxed(pcr, fmc2->io_base + FMC2_PCR);
|
|
- writel_relaxed(FMC2_PMEM_DEFAULT, fmc2->io_base + FMC2_PMEM);
|
|
- writel_relaxed(FMC2_PATT_DEFAULT, fmc2->io_base + FMC2_PATT);
|
|
+ regmap_write(nfc->regmap, FMC2_PCR, pcr);
|
|
+ regmap_write(nfc->regmap, FMC2_PMEM, FMC2_PMEM_DEFAULT);
|
|
+ regmap_write(nfc->regmap, FMC2_PATT, FMC2_PATT_DEFAULT);
|
|
}
|
|
|
|
-/* Controller timings */
|
|
-static void stm32_fmc2_calc_timings(struct nand_chip *chip,
|
|
- const struct nand_sdr_timings *sdrt)
|
|
+static void stm32_fmc2_nfc_calc_timings(struct nand_chip *chip,
|
|
+ const struct nand_sdr_timings *sdrt)
|
|
{
|
|
- struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
|
|
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
|
|
struct stm32_fmc2_nand *nand = to_fmc2_nand(chip);
|
|
struct stm32_fmc2_timings *tims = &nand->timings;
|
|
- unsigned long hclk = clk_get_rate(fmc2->clk);
|
|
+ unsigned long hclk = clk_get_rate(nfc->clk);
|
|
unsigned long hclkp = NSEC_PER_SEC / (hclk / 1000);
|
|
unsigned long timing, tar, tclr, thiz, twait;
|
|
unsigned long tset_mem, tset_att, thold_mem, thold_att;
|
|
@@ -1583,8 +1516,8 @@ static void stm32_fmc2_calc_timings(struct nand_chip *chip,
|
|
tims->thold_att = clamp_val(timing, 1, FMC2_PMEM_PATT_TIMING_MASK);
|
|
}
|
|
|
|
-static int stm32_fmc2_setup_interface(struct nand_chip *chip, int chipnr,
|
|
- const struct nand_data_interface *conf)
|
|
+static int stm32_fmc2_nfc_setup_interface(struct nand_chip *chip, int chipnr,
|
|
+ const struct nand_data_interface *conf)
|
|
{
|
|
const struct nand_sdr_timings *sdrt;
|
|
|
|
@@ -1595,77 +1528,102 @@ static int stm32_fmc2_setup_interface(struct nand_chip *chip, int chipnr,
|
|
if (chipnr == NAND_DATA_IFACE_CHECK_ONLY)
|
|
return 0;
|
|
|
|
- stm32_fmc2_calc_timings(chip, sdrt);
|
|
-
|
|
- /* Apply timings */
|
|
- stm32_fmc2_timings_init(chip);
|
|
+ stm32_fmc2_nfc_calc_timings(chip, sdrt);
|
|
+ stm32_fmc2_nfc_timings_init(chip);
|
|
|
|
return 0;
|
|
}
|
|
|
|
-/* DMA configuration */
|
|
-static int stm32_fmc2_dma_setup(struct stm32_fmc2_nfc *fmc2)
|
|
+static int stm32_fmc2_nfc_dma_setup(struct stm32_fmc2_nfc *nfc)
|
|
{
|
|
- int ret;
|
|
+ int ret = 0;
|
|
|
|
- fmc2->dma_tx_ch = dma_request_slave_channel(fmc2->dev, "tx");
|
|
- fmc2->dma_rx_ch = dma_request_slave_channel(fmc2->dev, "rx");
|
|
- fmc2->dma_ecc_ch = dma_request_slave_channel(fmc2->dev, "ecc");
|
|
+ nfc->dma_tx_ch = dma_request_chan(nfc->dev, "tx");
|
|
+ if (IS_ERR(nfc->dma_tx_ch)) {
|
|
+ ret = PTR_ERR(nfc->dma_tx_ch);
|
|
+ if (ret != -ENODEV && ret != -EPROBE_DEFER)
|
|
+ dev_err(nfc->dev,
|
|
+ "failed to request tx DMA channel: %d\n", ret);
|
|
+ nfc->dma_tx_ch = NULL;
|
|
+ goto err_dma;
|
|
+ }
|
|
|
|
- if (!fmc2->dma_tx_ch || !fmc2->dma_rx_ch || !fmc2->dma_ecc_ch) {
|
|
- dev_warn(fmc2->dev, "DMAs not defined in the device tree, polling mode is used\n");
|
|
- return 0;
|
|
+ nfc->dma_rx_ch = dma_request_chan(nfc->dev, "rx");
|
|
+ if (IS_ERR(nfc->dma_rx_ch)) {
|
|
+ ret = PTR_ERR(nfc->dma_rx_ch);
|
|
+ if (ret != -ENODEV && ret != -EPROBE_DEFER)
|
|
+ dev_err(nfc->dev,
|
|
+ "failed to request rx DMA channel: %d\n", ret);
|
|
+ nfc->dma_rx_ch = NULL;
|
|
+ goto err_dma;
|
|
}
|
|
|
|
- ret = sg_alloc_table(&fmc2->dma_ecc_sg, FMC2_MAX_SG, GFP_KERNEL);
|
|
+ nfc->dma_ecc_ch = dma_request_chan(nfc->dev, "ecc");
|
|
+ if (IS_ERR(nfc->dma_ecc_ch)) {
|
|
+ ret = PTR_ERR(nfc->dma_ecc_ch);
|
|
+ if (ret != -ENODEV && ret != -EPROBE_DEFER)
|
|
+ dev_err(nfc->dev,
|
|
+ "failed to request ecc DMA channel: %d\n", ret);
|
|
+ nfc->dma_ecc_ch = NULL;
|
|
+ goto err_dma;
|
|
+ }
|
|
+
|
|
+ ret = sg_alloc_table(&nfc->dma_ecc_sg, FMC2_MAX_SG, GFP_KERNEL);
|
|
if (ret)
|
|
return ret;
|
|
|
|
/* Allocate a buffer to store ECC status registers */
|
|
- fmc2->ecc_buf = devm_kzalloc(fmc2->dev, FMC2_MAX_ECC_BUF_LEN,
|
|
- GFP_KERNEL);
|
|
- if (!fmc2->ecc_buf)
|
|
+ nfc->ecc_buf = devm_kzalloc(nfc->dev, FMC2_MAX_ECC_BUF_LEN, GFP_KERNEL);
|
|
+ if (!nfc->ecc_buf)
|
|
return -ENOMEM;
|
|
|
|
- ret = sg_alloc_table(&fmc2->dma_data_sg, FMC2_MAX_SG, GFP_KERNEL);
|
|
+ ret = sg_alloc_table(&nfc->dma_data_sg, FMC2_MAX_SG, GFP_KERNEL);
|
|
if (ret)
|
|
return ret;
|
|
|
|
- init_completion(&fmc2->dma_data_complete);
|
|
- init_completion(&fmc2->dma_ecc_complete);
|
|
+ init_completion(&nfc->dma_data_complete);
|
|
+ init_completion(&nfc->dma_ecc_complete);
|
|
|
|
return 0;
|
|
+
|
|
+err_dma:
|
|
+ if (ret == -ENODEV) {
|
|
+ dev_warn(nfc->dev,
|
|
+ "DMAs not defined in the DT, polling mode is used\n");
|
|
+ ret = 0;
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
}
|
|
|
|
-/* NAND callbacks setup */
|
|
-static void stm32_fmc2_nand_callbacks_setup(struct nand_chip *chip)
|
|
+static void stm32_fmc2_nfc_nand_callbacks_setup(struct nand_chip *chip)
|
|
{
|
|
- struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
|
|
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
|
|
|
|
/*
|
|
* Specific callbacks to read/write a page depending on
|
|
* the mode (polling/sequencer) and the algo used (Hamming, BCH).
|
|
*/
|
|
- if (fmc2->dma_tx_ch && fmc2->dma_rx_ch && fmc2->dma_ecc_ch) {
|
|
+ if (nfc->dma_tx_ch && nfc->dma_rx_ch && nfc->dma_ecc_ch) {
|
|
/* DMA => use sequencer mode callbacks */
|
|
- chip->ecc.correct = stm32_fmc2_sequencer_correct;
|
|
- chip->ecc.write_page = stm32_fmc2_sequencer_write_page;
|
|
- chip->ecc.read_page = stm32_fmc2_sequencer_read_page;
|
|
- chip->ecc.write_page_raw = stm32_fmc2_sequencer_write_page_raw;
|
|
- chip->ecc.read_page_raw = stm32_fmc2_sequencer_read_page_raw;
|
|
+ chip->ecc.correct = stm32_fmc2_nfc_seq_correct;
|
|
+ chip->ecc.write_page = stm32_fmc2_nfc_seq_write_page;
|
|
+ chip->ecc.read_page = stm32_fmc2_nfc_seq_read_page;
|
|
+ chip->ecc.write_page_raw = stm32_fmc2_nfc_seq_write_page_raw;
|
|
+ chip->ecc.read_page_raw = stm32_fmc2_nfc_seq_read_page_raw;
|
|
} else {
|
|
/* No DMA => use polling mode callbacks */
|
|
- chip->ecc.hwctl = stm32_fmc2_hwctl;
|
|
+ chip->ecc.hwctl = stm32_fmc2_nfc_hwctl;
|
|
if (chip->ecc.strength == FMC2_ECC_HAM) {
|
|
/* Hamming is used */
|
|
- chip->ecc.calculate = stm32_fmc2_ham_calculate;
|
|
- chip->ecc.correct = stm32_fmc2_ham_correct;
|
|
+ chip->ecc.calculate = stm32_fmc2_nfc_ham_calculate;
|
|
+ chip->ecc.correct = stm32_fmc2_nfc_ham_correct;
|
|
chip->ecc.options |= NAND_ECC_GENERIC_ERASED_CHECK;
|
|
} else {
|
|
/* BCH is used */
|
|
- chip->ecc.calculate = stm32_fmc2_bch_calculate;
|
|
- chip->ecc.correct = stm32_fmc2_bch_correct;
|
|
- chip->ecc.read_page = stm32_fmc2_read_page;
|
|
+ chip->ecc.calculate = stm32_fmc2_nfc_bch_calculate;
|
|
+ chip->ecc.correct = stm32_fmc2_nfc_bch_correct;
|
|
+ chip->ecc.read_page = stm32_fmc2_nfc_read_page;
|
|
}
|
|
}
|
|
|
|
@@ -1678,9 +1636,8 @@ static void stm32_fmc2_nand_callbacks_setup(struct nand_chip *chip)
|
|
chip->ecc.bytes = chip->options & NAND_BUSWIDTH_16 ? 8 : 7;
|
|
}
|
|
|
|
-/* FMC2 layout */
|
|
-static int stm32_fmc2_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
|
|
- struct mtd_oob_region *oobregion)
|
|
+static int stm32_fmc2_nfc_ooblayout_ecc(struct mtd_info *mtd, int section,
|
|
+ struct mtd_oob_region *oobregion)
|
|
{
|
|
struct nand_chip *chip = mtd_to_nand(mtd);
|
|
struct nand_ecc_ctrl *ecc = &chip->ecc;
|
|
@@ -1694,8 +1651,8 @@ static int stm32_fmc2_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
|
|
return 0;
|
|
}
|
|
|
|
-static int stm32_fmc2_nand_ooblayout_free(struct mtd_info *mtd, int section,
|
|
- struct mtd_oob_region *oobregion)
|
|
+static int stm32_fmc2_nfc_ooblayout_free(struct mtd_info *mtd, int section,
|
|
+ struct mtd_oob_region *oobregion)
|
|
{
|
|
struct nand_chip *chip = mtd_to_nand(mtd);
|
|
struct nand_ecc_ctrl *ecc = &chip->ecc;
|
|
@@ -1709,13 +1666,12 @@ static int stm32_fmc2_nand_ooblayout_free(struct mtd_info *mtd, int section,
|
|
return 0;
|
|
}
|
|
|
|
-static const struct mtd_ooblayout_ops stm32_fmc2_nand_ooblayout_ops = {
|
|
- .ecc = stm32_fmc2_nand_ooblayout_ecc,
|
|
- .free = stm32_fmc2_nand_ooblayout_free,
|
|
+static const struct mtd_ooblayout_ops stm32_fmc2_nfc_ooblayout_ops = {
|
|
+ .ecc = stm32_fmc2_nfc_ooblayout_ecc,
|
|
+ .free = stm32_fmc2_nfc_ooblayout_free,
|
|
};
|
|
|
|
-/* FMC2 caps */
|
|
-static int stm32_fmc2_calc_ecc_bytes(int step_size, int strength)
|
|
+static int stm32_fmc2_nfc_calc_ecc_bytes(int step_size, int strength)
|
|
{
|
|
/* Hamming */
|
|
if (strength == FMC2_ECC_HAM)
|
|
@@ -1729,14 +1685,13 @@ static int stm32_fmc2_calc_ecc_bytes(int step_size, int strength)
|
|
return 8;
|
|
}
|
|
|
|
-NAND_ECC_CAPS_SINGLE(stm32_fmc2_ecc_caps, stm32_fmc2_calc_ecc_bytes,
|
|
+NAND_ECC_CAPS_SINGLE(stm32_fmc2_nfc_ecc_caps, stm32_fmc2_nfc_calc_ecc_bytes,
|
|
FMC2_ECC_STEP_SIZE,
|
|
FMC2_ECC_HAM, FMC2_ECC_BCH4, FMC2_ECC_BCH8);
|
|
|
|
-/* FMC2 controller ops */
|
|
-static int stm32_fmc2_attach_chip(struct nand_chip *chip)
|
|
+static int stm32_fmc2_nfc_attach_chip(struct nand_chip *chip)
|
|
{
|
|
- struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
|
|
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
|
|
struct mtd_info *mtd = nand_to_mtd(chip);
|
|
int ret;
|
|
|
|
@@ -1748,49 +1703,45 @@ static int stm32_fmc2_attach_chip(struct nand_chip *chip)
|
|
* ECC sector size = 512
|
|
*/
|
|
if (chip->ecc.mode != NAND_ECC_HW) {
|
|
- dev_err(fmc2->dev, "nand_ecc_mode is not well defined in the DT\n");
|
|
+ dev_err(nfc->dev, "nand_ecc_mode is not well defined in the DT\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
- ret = nand_ecc_choose_conf(chip, &stm32_fmc2_ecc_caps,
|
|
+ ret = nand_ecc_choose_conf(chip, &stm32_fmc2_nfc_ecc_caps,
|
|
mtd->oobsize - FMC2_BBM_LEN);
|
|
if (ret) {
|
|
- dev_err(fmc2->dev, "no valid ECC settings set\n");
|
|
+ dev_err(nfc->dev, "no valid ECC settings set\n");
|
|
return ret;
|
|
}
|
|
|
|
if (mtd->writesize / chip->ecc.size > FMC2_MAX_SG) {
|
|
- dev_err(fmc2->dev, "nand page size is not supported\n");
|
|
+ dev_err(nfc->dev, "nand page size is not supported\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (chip->bbt_options & NAND_BBT_USE_FLASH)
|
|
chip->bbt_options |= NAND_BBT_NO_OOB;
|
|
|
|
- /* NAND callbacks setup */
|
|
- stm32_fmc2_nand_callbacks_setup(chip);
|
|
+ stm32_fmc2_nfc_nand_callbacks_setup(chip);
|
|
|
|
- /* Define ECC layout */
|
|
- mtd_set_ooblayout(mtd, &stm32_fmc2_nand_ooblayout_ops);
|
|
+ mtd_set_ooblayout(mtd, &stm32_fmc2_nfc_ooblayout_ops);
|
|
|
|
- /* Configure bus width to 16-bit */
|
|
if (chip->options & NAND_BUSWIDTH_16)
|
|
- stm32_fmc2_set_buswidth_16(fmc2, true);
|
|
+ stm32_fmc2_nfc_set_buswidth_16(nfc, true);
|
|
|
|
return 0;
|
|
}
|
|
|
|
-static const struct nand_controller_ops stm32_fmc2_nand_controller_ops = {
|
|
- .attach_chip = stm32_fmc2_attach_chip,
|
|
- .exec_op = stm32_fmc2_exec_op,
|
|
- .setup_data_interface = stm32_fmc2_setup_interface,
|
|
+static const struct nand_controller_ops stm32_fmc2_nfc_controller_ops = {
|
|
+ .attach_chip = stm32_fmc2_nfc_attach_chip,
|
|
+ .exec_op = stm32_fmc2_nfc_exec_op,
|
|
+ .setup_data_interface = stm32_fmc2_nfc_setup_interface,
|
|
};
|
|
|
|
-/* FMC2 probe */
|
|
-static int stm32_fmc2_parse_child(struct stm32_fmc2_nfc *fmc2,
|
|
- struct device_node *dn)
|
|
+static int stm32_fmc2_nfc_parse_child(struct stm32_fmc2_nfc *nfc,
|
|
+ struct device_node *dn)
|
|
{
|
|
- struct stm32_fmc2_nand *nand = &fmc2->nand;
|
|
+ struct stm32_fmc2_nand *nand = &nfc->nand;
|
|
u32 cs;
|
|
int ret, i;
|
|
|
|
@@ -1799,29 +1750,29 @@ static int stm32_fmc2_parse_child(struct stm32_fmc2_nfc *fmc2,
|
|
|
|
nand->ncs /= sizeof(u32);
|
|
if (!nand->ncs) {
|
|
- dev_err(fmc2->dev, "invalid reg property size\n");
|
|
+ dev_err(nfc->dev, "invalid reg property size\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
for (i = 0; i < nand->ncs; i++) {
|
|
ret = of_property_read_u32_index(dn, "reg", i, &cs);
|
|
if (ret) {
|
|
- dev_err(fmc2->dev, "could not retrieve reg property: %d\n",
|
|
+ dev_err(nfc->dev, "could not retrieve reg property: %d\n",
|
|
ret);
|
|
return ret;
|
|
}
|
|
|
|
if (cs > FMC2_MAX_CE) {
|
|
- dev_err(fmc2->dev, "invalid reg value: %d\n", cs);
|
|
+ dev_err(nfc->dev, "invalid reg value: %d\n", cs);
|
|
return -EINVAL;
|
|
}
|
|
|
|
- if (fmc2->cs_assigned & BIT(cs)) {
|
|
- dev_err(fmc2->dev, "cs already assigned: %d\n", cs);
|
|
+ if (nfc->cs_assigned & BIT(cs)) {
|
|
+ dev_err(nfc->dev, "cs already assigned: %d\n", cs);
|
|
return -EINVAL;
|
|
}
|
|
|
|
- fmc2->cs_assigned |= BIT(cs);
|
|
+ nfc->cs_assigned |= BIT(cs);
|
|
nand->cs_used[i] = cs;
|
|
}
|
|
|
|
@@ -1830,25 +1781,25 @@ static int stm32_fmc2_parse_child(struct stm32_fmc2_nfc *fmc2,
|
|
return 0;
|
|
}
|
|
|
|
-static int stm32_fmc2_parse_dt(struct stm32_fmc2_nfc *fmc2)
|
|
+static int stm32_fmc2_nfc_parse_dt(struct stm32_fmc2_nfc *nfc)
|
|
{
|
|
- struct device_node *dn = fmc2->dev->of_node;
|
|
+ struct device_node *dn = nfc->dev->of_node;
|
|
struct device_node *child;
|
|
int nchips = of_get_child_count(dn);
|
|
int ret = 0;
|
|
|
|
if (!nchips) {
|
|
- dev_err(fmc2->dev, "NAND chip not defined\n");
|
|
+ dev_err(nfc->dev, "NAND chip not defined\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (nchips > 1) {
|
|
- dev_err(fmc2->dev, "too many NAND chips defined\n");
|
|
+ dev_err(nfc->dev, "too many NAND chips defined\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
for_each_child_of_node(dn, child) {
|
|
- ret = stm32_fmc2_parse_child(fmc2, child);
|
|
+ ret = stm32_fmc2_nfc_parse_child(nfc, child);
|
|
if (ret < 0) {
|
|
of_node_put(child);
|
|
return ret;
|
|
@@ -1858,107 +1809,145 @@ static int stm32_fmc2_parse_dt(struct stm32_fmc2_nfc *fmc2)
|
|
return ret;
|
|
}
|
|
|
|
-static int stm32_fmc2_probe(struct platform_device *pdev)
|
|
+static int stm32_fmc2_nfc_set_cdev(struct stm32_fmc2_nfc *nfc)
|
|
+{
|
|
+ struct device *dev = nfc->dev;
|
|
+ bool ebi_found = false;
|
|
+
|
|
+ if (dev->parent && of_device_is_compatible(dev->parent->of_node,
|
|
+ "st,stm32mp1-fmc2-ebi"))
|
|
+ ebi_found = true;
|
|
+
|
|
+ if (of_device_is_compatible(dev->of_node, "st,stm32mp1-fmc2-nfc")) {
|
|
+ if (ebi_found) {
|
|
+ nfc->cdev = dev->parent;
|
|
+
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (ebi_found)
|
|
+ return -EINVAL;
|
|
+
|
|
+ nfc->cdev = dev;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int stm32_fmc2_nfc_probe(struct platform_device *pdev)
|
|
{
|
|
struct device *dev = &pdev->dev;
|
|
struct reset_control *rstc;
|
|
- struct stm32_fmc2_nfc *fmc2;
|
|
+ struct stm32_fmc2_nfc *nfc;
|
|
struct stm32_fmc2_nand *nand;
|
|
struct resource *res;
|
|
struct mtd_info *mtd;
|
|
struct nand_chip *chip;
|
|
+ struct resource cres;
|
|
int chip_cs, mem_region, ret, irq;
|
|
+ int start_region = 0;
|
|
|
|
- fmc2 = devm_kzalloc(dev, sizeof(*fmc2), GFP_KERNEL);
|
|
- if (!fmc2)
|
|
+ nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL);
|
|
+ if (!nfc)
|
|
return -ENOMEM;
|
|
|
|
- fmc2->dev = dev;
|
|
- nand_controller_init(&fmc2->base);
|
|
- fmc2->base.ops = &stm32_fmc2_nand_controller_ops;
|
|
+ nfc->dev = dev;
|
|
+ nand_controller_init(&nfc->base);
|
|
+ nfc->base.ops = &stm32_fmc2_nfc_controller_ops;
|
|
+
|
|
+ ret = stm32_fmc2_nfc_set_cdev(nfc);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ ret = stm32_fmc2_nfc_parse_dt(nfc);
|
|
+ if (ret)
|
|
+ return ret;
|
|
|
|
- ret = stm32_fmc2_parse_dt(fmc2);
|
|
+ ret = of_address_to_resource(nfc->cdev->of_node, 0, &cres);
|
|
if (ret)
|
|
return ret;
|
|
|
|
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
|
- fmc2->io_base = devm_ioremap_resource(dev, res);
|
|
- if (IS_ERR(fmc2->io_base))
|
|
- return PTR_ERR(fmc2->io_base);
|
|
+ nfc->io_phys_addr = cres.start;
|
|
|
|
- fmc2->io_phys_addr = res->start;
|
|
+ nfc->regmap = device_node_to_regmap(nfc->cdev->of_node);
|
|
+ if (IS_ERR(nfc->regmap))
|
|
+ return PTR_ERR(nfc->regmap);
|
|
|
|
- for (chip_cs = 0, mem_region = 1; chip_cs < FMC2_MAX_CE;
|
|
+ if (nfc->dev == nfc->cdev)
|
|
+ start_region = 1;
|
|
+
|
|
+ for (chip_cs = 0, mem_region = start_region; chip_cs < FMC2_MAX_CE;
|
|
chip_cs++, mem_region += 3) {
|
|
- if (!(fmc2->cs_assigned & BIT(chip_cs)))
|
|
+ if (!(nfc->cs_assigned & BIT(chip_cs)))
|
|
continue;
|
|
|
|
res = platform_get_resource(pdev, IORESOURCE_MEM, mem_region);
|
|
- fmc2->data_base[chip_cs] = devm_ioremap_resource(dev, res);
|
|
- if (IS_ERR(fmc2->data_base[chip_cs]))
|
|
- return PTR_ERR(fmc2->data_base[chip_cs]);
|
|
+ nfc->data_base[chip_cs] = devm_ioremap_resource(dev, res);
|
|
+ if (IS_ERR(nfc->data_base[chip_cs]))
|
|
+ return PTR_ERR(nfc->data_base[chip_cs]);
|
|
|
|
- fmc2->data_phys_addr[chip_cs] = res->start;
|
|
+ nfc->data_phys_addr[chip_cs] = res->start;
|
|
|
|
res = platform_get_resource(pdev, IORESOURCE_MEM,
|
|
mem_region + 1);
|
|
- fmc2->cmd_base[chip_cs] = devm_ioremap_resource(dev, res);
|
|
- if (IS_ERR(fmc2->cmd_base[chip_cs]))
|
|
- return PTR_ERR(fmc2->cmd_base[chip_cs]);
|
|
+ nfc->cmd_base[chip_cs] = devm_ioremap_resource(dev, res);
|
|
+ if (IS_ERR(nfc->cmd_base[chip_cs]))
|
|
+ return PTR_ERR(nfc->cmd_base[chip_cs]);
|
|
|
|
res = platform_get_resource(pdev, IORESOURCE_MEM,
|
|
mem_region + 2);
|
|
- fmc2->addr_base[chip_cs] = devm_ioremap_resource(dev, res);
|
|
- if (IS_ERR(fmc2->addr_base[chip_cs]))
|
|
- return PTR_ERR(fmc2->addr_base[chip_cs]);
|
|
+ nfc->addr_base[chip_cs] = devm_ioremap_resource(dev, res);
|
|
+ if (IS_ERR(nfc->addr_base[chip_cs]))
|
|
+ return PTR_ERR(nfc->addr_base[chip_cs]);
|
|
}
|
|
|
|
irq = platform_get_irq(pdev, 0);
|
|
- if (irq < 0) {
|
|
- if (irq != -EPROBE_DEFER)
|
|
- dev_err(dev, "IRQ error missing or invalid\n");
|
|
+ if (irq < 0)
|
|
return irq;
|
|
- }
|
|
|
|
- ret = devm_request_irq(dev, irq, stm32_fmc2_irq, 0,
|
|
- dev_name(dev), fmc2);
|
|
+ ret = devm_request_irq(dev, irq, stm32_fmc2_nfc_irq, 0,
|
|
+ dev_name(dev), nfc);
|
|
if (ret) {
|
|
dev_err(dev, "failed to request irq\n");
|
|
return ret;
|
|
}
|
|
|
|
- init_completion(&fmc2->complete);
|
|
+ init_completion(&nfc->complete);
|
|
|
|
- fmc2->clk = devm_clk_get(dev, NULL);
|
|
- if (IS_ERR(fmc2->clk))
|
|
- return PTR_ERR(fmc2->clk);
|
|
+ nfc->clk = devm_clk_get(nfc->cdev, NULL);
|
|
+ if (IS_ERR(nfc->clk))
|
|
+ return PTR_ERR(nfc->clk);
|
|
|
|
- ret = clk_prepare_enable(fmc2->clk);
|
|
+ ret = clk_prepare_enable(nfc->clk);
|
|
if (ret) {
|
|
dev_err(dev, "can not enable the clock\n");
|
|
return ret;
|
|
}
|
|
|
|
rstc = devm_reset_control_get(dev, NULL);
|
|
- if (!IS_ERR(rstc)) {
|
|
+ if (IS_ERR(rstc)) {
|
|
+ ret = PTR_ERR(rstc);
|
|
+ if (ret == -EPROBE_DEFER)
|
|
+ goto err_clk_disable;
|
|
+ } else {
|
|
reset_control_assert(rstc);
|
|
reset_control_deassert(rstc);
|
|
}
|
|
|
|
- /* DMA setup */
|
|
- ret = stm32_fmc2_dma_setup(fmc2);
|
|
+ ret = stm32_fmc2_nfc_dma_setup(nfc);
|
|
if (ret)
|
|
- return ret;
|
|
+ goto err_release_dma;
|
|
|
|
- /* FMC2 init routine */
|
|
- stm32_fmc2_init(fmc2);
|
|
+ stm32_fmc2_nfc_init(nfc);
|
|
|
|
- nand = &fmc2->nand;
|
|
+ nand = &nfc->nand;
|
|
chip = &nand->chip;
|
|
mtd = nand_to_mtd(chip);
|
|
mtd->dev.parent = dev;
|
|
|
|
- chip->controller = &fmc2->base;
|
|
+ chip->controller = &nfc->base;
|
|
chip->options |= NAND_BUSWIDTH_AUTO | NAND_NO_SUBPAGE_WRITE |
|
|
NAND_USE_BOUNCE_BUFFER;
|
|
|
|
@@ -1970,86 +1959,87 @@ static int stm32_fmc2_probe(struct platform_device *pdev)
|
|
/* Scan to find existence of the device */
|
|
ret = nand_scan(chip, nand->ncs);
|
|
if (ret)
|
|
- goto err_scan;
|
|
+ goto err_release_dma;
|
|
|
|
ret = mtd_device_register(mtd, NULL, 0);
|
|
if (ret)
|
|
- goto err_device_register;
|
|
+ goto err_nand_cleanup;
|
|
|
|
- platform_set_drvdata(pdev, fmc2);
|
|
+ platform_set_drvdata(pdev, nfc);
|
|
|
|
return 0;
|
|
|
|
-err_device_register:
|
|
+err_nand_cleanup:
|
|
nand_cleanup(chip);
|
|
|
|
-err_scan:
|
|
- if (fmc2->dma_ecc_ch)
|
|
- dma_release_channel(fmc2->dma_ecc_ch);
|
|
- if (fmc2->dma_tx_ch)
|
|
- dma_release_channel(fmc2->dma_tx_ch);
|
|
- if (fmc2->dma_rx_ch)
|
|
- dma_release_channel(fmc2->dma_rx_ch);
|
|
+err_release_dma:
|
|
+ if (nfc->dma_ecc_ch)
|
|
+ dma_release_channel(nfc->dma_ecc_ch);
|
|
+ if (nfc->dma_tx_ch)
|
|
+ dma_release_channel(nfc->dma_tx_ch);
|
|
+ if (nfc->dma_rx_ch)
|
|
+ dma_release_channel(nfc->dma_rx_ch);
|
|
|
|
- sg_free_table(&fmc2->dma_data_sg);
|
|
- sg_free_table(&fmc2->dma_ecc_sg);
|
|
+ sg_free_table(&nfc->dma_data_sg);
|
|
+ sg_free_table(&nfc->dma_ecc_sg);
|
|
|
|
- clk_disable_unprepare(fmc2->clk);
|
|
+err_clk_disable:
|
|
+ clk_disable_unprepare(nfc->clk);
|
|
|
|
return ret;
|
|
}
|
|
|
|
-static int stm32_fmc2_remove(struct platform_device *pdev)
|
|
+static int stm32_fmc2_nfc_remove(struct platform_device *pdev)
|
|
{
|
|
- struct stm32_fmc2_nfc *fmc2 = platform_get_drvdata(pdev);
|
|
- struct stm32_fmc2_nand *nand = &fmc2->nand;
|
|
+ struct stm32_fmc2_nfc *nfc = platform_get_drvdata(pdev);
|
|
+ struct stm32_fmc2_nand *nand = &nfc->nand;
|
|
|
|
nand_release(&nand->chip);
|
|
|
|
- if (fmc2->dma_ecc_ch)
|
|
- dma_release_channel(fmc2->dma_ecc_ch);
|
|
- if (fmc2->dma_tx_ch)
|
|
- dma_release_channel(fmc2->dma_tx_ch);
|
|
- if (fmc2->dma_rx_ch)
|
|
- dma_release_channel(fmc2->dma_rx_ch);
|
|
+ if (nfc->dma_ecc_ch)
|
|
+ dma_release_channel(nfc->dma_ecc_ch);
|
|
+ if (nfc->dma_tx_ch)
|
|
+ dma_release_channel(nfc->dma_tx_ch);
|
|
+ if (nfc->dma_rx_ch)
|
|
+ dma_release_channel(nfc->dma_rx_ch);
|
|
|
|
- sg_free_table(&fmc2->dma_data_sg);
|
|
- sg_free_table(&fmc2->dma_ecc_sg);
|
|
+ sg_free_table(&nfc->dma_data_sg);
|
|
+ sg_free_table(&nfc->dma_ecc_sg);
|
|
|
|
- clk_disable_unprepare(fmc2->clk);
|
|
+ clk_disable_unprepare(nfc->clk);
|
|
|
|
return 0;
|
|
}
|
|
|
|
-static int __maybe_unused stm32_fmc2_suspend(struct device *dev)
|
|
+static int __maybe_unused stm32_fmc2_nfc_suspend(struct device *dev)
|
|
{
|
|
- struct stm32_fmc2_nfc *fmc2 = dev_get_drvdata(dev);
|
|
+ struct stm32_fmc2_nfc *nfc = dev_get_drvdata(dev);
|
|
|
|
- clk_disable_unprepare(fmc2->clk);
|
|
+ clk_disable_unprepare(nfc->clk);
|
|
|
|
pinctrl_pm_select_sleep_state(dev);
|
|
|
|
return 0;
|
|
}
|
|
|
|
-static int __maybe_unused stm32_fmc2_resume(struct device *dev)
|
|
+static int __maybe_unused stm32_fmc2_nfc_resume(struct device *dev)
|
|
{
|
|
- struct stm32_fmc2_nfc *fmc2 = dev_get_drvdata(dev);
|
|
- struct stm32_fmc2_nand *nand = &fmc2->nand;
|
|
+ struct stm32_fmc2_nfc *nfc = dev_get_drvdata(dev);
|
|
+ struct stm32_fmc2_nand *nand = &nfc->nand;
|
|
int chip_cs, ret;
|
|
|
|
pinctrl_pm_select_default_state(dev);
|
|
|
|
- ret = clk_prepare_enable(fmc2->clk);
|
|
+ ret = clk_prepare_enable(nfc->clk);
|
|
if (ret) {
|
|
dev_err(dev, "can not enable the clock\n");
|
|
return ret;
|
|
}
|
|
|
|
- stm32_fmc2_init(fmc2);
|
|
+ stm32_fmc2_nfc_init(nfc);
|
|
|
|
for (chip_cs = 0; chip_cs < FMC2_MAX_CE; chip_cs++) {
|
|
- if (!(fmc2->cs_assigned & BIT(chip_cs)))
|
|
+ if (!(nfc->cs_assigned & BIT(chip_cs)))
|
|
continue;
|
|
|
|
nand_reset(&nand->chip, chip_cs);
|
|
@@ -2058,27 +2048,28 @@ static int __maybe_unused stm32_fmc2_resume(struct device *dev)
|
|
return 0;
|
|
}
|
|
|
|
-static SIMPLE_DEV_PM_OPS(stm32_fmc2_pm_ops, stm32_fmc2_suspend,
|
|
- stm32_fmc2_resume);
|
|
+static SIMPLE_DEV_PM_OPS(stm32_fmc2_nfc_pm_ops, stm32_fmc2_nfc_suspend,
|
|
+ stm32_fmc2_nfc_resume);
|
|
|
|
-static const struct of_device_id stm32_fmc2_match[] = {
|
|
+static const struct of_device_id stm32_fmc2_nfc_match[] = {
|
|
{.compatible = "st,stm32mp15-fmc2"},
|
|
+ {.compatible = "st,stm32mp1-fmc2-nfc"},
|
|
{}
|
|
};
|
|
-MODULE_DEVICE_TABLE(of, stm32_fmc2_match);
|
|
+MODULE_DEVICE_TABLE(of, stm32_fmc2_nfc_match);
|
|
|
|
-static struct platform_driver stm32_fmc2_driver = {
|
|
- .probe = stm32_fmc2_probe,
|
|
- .remove = stm32_fmc2_remove,
|
|
+static struct platform_driver stm32_fmc2_nfc_driver = {
|
|
+ .probe = stm32_fmc2_nfc_probe,
|
|
+ .remove = stm32_fmc2_nfc_remove,
|
|
.driver = {
|
|
- .name = "stm32_fmc2_nand",
|
|
- .of_match_table = stm32_fmc2_match,
|
|
- .pm = &stm32_fmc2_pm_ops,
|
|
+ .name = "stm32_fmc2_nfc",
|
|
+ .of_match_table = stm32_fmc2_nfc_match,
|
|
+ .pm = &stm32_fmc2_nfc_pm_ops,
|
|
},
|
|
};
|
|
-module_platform_driver(stm32_fmc2_driver);
|
|
+module_platform_driver(stm32_fmc2_nfc_driver);
|
|
|
|
-MODULE_ALIAS("platform:stm32_fmc2_nand");
|
|
+MODULE_ALIAS("platform:stm32_fmc2_nfc");
|
|
MODULE_AUTHOR("Christophe Kerello <christophe.kerello@st.com>");
|
|
-MODULE_DESCRIPTION("STMicroelectronics STM32 FMC2 nand driver");
|
|
+MODULE_DESCRIPTION("STMicroelectronics STM32 FMC2 NFC driver");
|
|
MODULE_LICENSE("GPL v2");
|
|
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
|
|
index b7ba8810a3b53..eb10b8194073e 100644
|
|
--- a/include/linux/mmc/core.h
|
|
+++ b/include/linux/mmc/core.h
|
|
@@ -173,6 +173,7 @@ void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq);
|
|
int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd,
|
|
int retries);
|
|
|
|
+void mmc_hw_unstuck(struct mmc_host *host);
|
|
int mmc_hw_reset(struct mmc_host *host);
|
|
int mmc_sw_reset(struct mmc_host *host);
|
|
void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card);
|
|
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
|
|
index 4c5eb3aa8e723..feac3431be49a 100644
|
|
--- a/include/linux/mmc/host.h
|
|
+++ b/include/linux/mmc/host.h
|
|
@@ -163,6 +163,12 @@ struct mmc_host_ops {
|
|
void (*hw_reset)(struct mmc_host *host);
|
|
void (*card_event)(struct mmc_host *host);
|
|
|
|
+ /*
|
|
+ * Optional callback, if your host is in deadlock after a command and
|
|
+ * must done specific action before sent new command.
|
|
+ */
|
|
+ void (*hw_unstuck)(struct mmc_host *host);
|
|
+
|
|
/*
|
|
* Optional callback to support controllers with HW issues for multiple
|
|
* I/O. Returns the number of supported blocks for the request.
|
|
--
|
|
2.17.1
|
|
|