From 286a0f7ad98148a83597347e2e33eb1141973c06 Mon Sep 17 00:00:00 2001 From: Lionel Vitte Date: Thu, 14 Oct 2021 16:51:48 +0200 Subject: [PATCH 13/23] ARM 5.10.61-stm32mp1-r2 NET-TTY --- .../net/ethernet/stmicro/stmmac/dwmac-stm32.c | 58 +- .../net/ethernet/stmicro/stmmac/stmmac_main.c | 42 +- drivers/net/phy/realtek.c | 13 +- drivers/tty/serial/serial_core.c | 10 + drivers/tty/serial/serial_mctrl_gpio.c | 38 + drivers/tty/serial/serial_mctrl_gpio.h | 18 + drivers/tty/serial/stm32-usart.c | 1095 ++++++++++++----- drivers/tty/serial/stm32-usart.h | 30 +- include/uapi/linux/serial.h | 2 + 9 files changed, 949 insertions(+), 357 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c index 5d4df4c52..ffaa434e0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c @@ -89,7 +89,6 @@ struct stm32_dwmac { int enable_eth_ck; int eth_clk_sel_reg; int eth_ref_clk_sel_reg; - int irq_pwr_wakeup; u32 mode_reg; /* MAC glue-logic mode register */ struct regmap *regmap; u32 speed; @@ -300,9 +299,7 @@ static int stm32_dwmac_parse_data(struct stm32_dwmac *dwmac, static int stm32mp1_parse_data(struct stm32_dwmac *dwmac, struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); struct device_node *np = dev->of_node; - int err = 0; /* Ethernet PHY have no crystal */ dwmac->ext_phyclk = of_property_read_bool(np, "st,ext-phyclk"); @@ -334,29 +331,24 @@ static int stm32mp1_parse_data(struct stm32_dwmac *dwmac, if (IS_ERR(dwmac->syscfg_clk)) dwmac->syscfg_clk = NULL; - /* Get IRQ information early to have an ability to ask for deferred - * probe if needed before we went too far with resource allocation. - */ - dwmac->irq_pwr_wakeup = platform_get_irq_byname_optional(pdev, - "stm32_pwr_wakeup"); - if (dwmac->irq_pwr_wakeup == -EPROBE_DEFER) - return -EPROBE_DEFER; - - if (!dwmac->clk_eth_ck && dwmac->irq_pwr_wakeup >= 0) { - err = device_init_wakeup(&pdev->dev, true); - if (err) { - dev_err(&pdev->dev, "Failed to init wake up irq\n"); - return err; - } - err = dev_pm_set_dedicated_wake_irq(&pdev->dev, - dwmac->irq_pwr_wakeup); - if (err) { - dev_err(&pdev->dev, "Failed to set wake up irq\n"); - device_init_wakeup(&pdev->dev, false); - } - device_set_wakeup_enable(&pdev->dev, false); + return 0; +} + +static int stm32_dwmac_wake_init(struct device *dev, + struct stmmac_resources *stmmac_res) +{ + int err; + + device_set_wakeup_capable(dev, true); + + err = dev_pm_set_wake_irq(dev, stmmac_res->wol_irq); + if (err) { + dev_err(dev, "Failed to set wake up irq\n"); + device_set_wakeup_capable(dev, false); + return err; } - return err; + + return 0; } static int stm32_dwmac_probe(struct platform_device *pdev) @@ -397,6 +389,12 @@ static int stm32_dwmac_probe(struct platform_device *pdev) goto err_remove_config_dt; } + if (stmmac_res.wol_irq && !dwmac->clk_eth_ck) { + ret = stm32_dwmac_wake_init(&pdev->dev, &stmmac_res); + if (ret) + return ret; + } + plat_dat->bsp_priv = dwmac; ret = stm32_dwmac_init(plat_dat); @@ -422,14 +420,14 @@ static int stm32_dwmac_remove(struct platform_device *pdev) struct net_device *ndev = platform_get_drvdata(pdev); struct stmmac_priv *priv = netdev_priv(ndev); int ret = stmmac_dvr_remove(&pdev->dev); - struct stm32_dwmac *dwmac = priv->plat->bsp_priv; + + if (ret) + return ret; stm32_dwmac_clk_disable(priv->plat->bsp_priv); - if (dwmac->irq_pwr_wakeup >= 0) { - dev_pm_clear_wake_irq(&pdev->dev); - device_init_wakeup(&pdev->dev, false); - } + dev_pm_clear_wake_irq(&pdev->dev); + ret = device_init_wakeup(&pdev->dev, false); return ret; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 3134f7e66..9ac005db1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1548,18 +1548,18 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue) stmmac_free_tx_buffer(priv, queue, i); } -/** +/* * stmmac_free_tx_skbufs - free TX skb buffers * @priv: private structure */ -static void stmmac_free_tx_skbufs(struct stmmac_priv *priv) +/*static void stmmac_free_tx_skbufs(struct stmmac_priv *priv) { - u32 tx_queue_cnt = priv->plat->tx_queues_to_use; - u32 queue; + u32 tx_queue_cnt = priv->plat->tx_queues_to_use; + u32 queue; - for (queue = 0; queue < tx_queue_cnt; queue++) - dma_free_tx_skbufs(priv, queue); -} + for (queue = 0; queue < tx_queue_cnt; queue++) + dma_free_tx_skbufs(priv, queue); +}*/ /** * free_dma_rx_desc_resources - free RX dma desc resources @@ -5316,8 +5316,25 @@ int stmmac_resume(struct device *dev) stmmac_reset_queues_param(priv); - stmmac_free_tx_skbufs(priv); - stmmac_clear_descriptors(priv); + /* Stop TX/RX DMA and clear the descriptors */ + stmmac_stop_all_dma(priv); + + /* Release and free the Rx/Tx resources */ + free_dma_desc_resources(priv); + + ret = alloc_dma_desc_resources(priv); + if (ret < 0) { + netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n", + __func__); + goto dma_desc_error; + } + + ret = init_dma_desc_rings(ndev, GFP_KERNEL); + if (ret < 0) { + netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n", + __func__); + goto init_error; + } stmmac_hw_setup(ndev, false); stmmac_init_coalesce(priv); @@ -5335,6 +5352,13 @@ int stmmac_resume(struct device *dev) netif_device_attach(ndev); return 0; +init_error: + free_dma_desc_resources(priv); +dma_desc_error: + if (ndev->phydev) + phy_disconnect(ndev->phydev); + + return -1; } EXPORT_SYMBOL_GPL(stmmac_resume); diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index b4879306b..aa3295e9c 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c @@ -26,16 +26,11 @@ #define RTL821x_EXT_PAGE_SELECT 0x1e #define RTL821x_PAGE_SELECT 0x1f -#define RTL8211F_PHYCR1 0x18 #define RTL8211F_INSR 0x1d #define RTL8211F_TX_DELAY BIT(8) #define RTL8211F_RX_DELAY BIT(3) -#define RTL8211F_ALDPS_PLL_OFF BIT(1) -#define RTL8211F_ALDPS_ENABLE BIT(2) -#define RTL8211F_ALDPS_XTAL_OFF BIT(12) - #define RTL8211E_CTRL_DELAY BIT(13) #define RTL8211E_TX_DELAY BIT(12) #define RTL8211E_RX_DELAY BIT(11) @@ -182,11 +177,12 @@ static int rtl8211f_config_init(struct phy_device *phydev) { struct device *dev = &phydev->mdio.dev; u16 val_txdly, val_rxdly; - u16 val; int ret; - val = RTL8211F_ALDPS_ENABLE | RTL8211F_ALDPS_PLL_OFF | RTL8211F_ALDPS_XTAL_OFF; - phy_modify_paged_changed(phydev, 0xa43, RTL8211F_PHYCR1, val, val); + /* Set green LED for Link, yellow LED for Active */ + phy_write(phydev, RTL821x_PAGE_SELECT, 0xd04); + phy_write(phydev, 0x10, 0x617f); + phy_write(phydev, RTL821x_PAGE_SELECT, 0x0); switch (phydev->interface) { case PHY_INTERFACE_MODE_RGMII: @@ -634,6 +630,7 @@ static struct phy_driver realtek_drvs[] = { PHY_ID_MATCH_EXACT(0x001cc916), .name = "RTL8211F Gigabit Ethernet", .config_init = &rtl8211f_config_init, + .read_status = rtlgen_read_status, .ack_interrupt = &rtl8211f_ack_interrupt, .config_intr = &rtl8211f_config_intr, .suspend = genphy_suspend, diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 68a0ff605..1afbeaea6 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -3226,6 +3226,16 @@ int uart_get_rs485_mode(struct uart_port *port) u32 rs485_delay[2]; int ret; + ret = device_property_read_u32_array(dev, "rs485-rts-delay-ns", + rs485_delay, 2); + if (!ret) { + rs485conf->delay_rts_before_send_ns = rs485_delay[0]; + rs485conf->delay_rts_after_send_ns = rs485_delay[1]; + } else { + rs485conf->delay_rts_before_send_ns = 0; + rs485conf->delay_rts_after_send_ns = 0; + } + ret = device_property_read_u32_array(dev, "rs485-rts-delay", rs485_delay, 2); if (!ret) { diff --git a/drivers/tty/serial/serial_mctrl_gpio.c b/drivers/tty/serial/serial_mctrl_gpio.c index fb4781292..1fc2f7047 100644 --- a/drivers/tty/serial/serial_mctrl_gpio.c +++ b/drivers/tty/serial/serial_mctrl_gpio.c @@ -299,4 +299,42 @@ void mctrl_gpio_disable_ms(struct mctrl_gpios *gpios) } EXPORT_SYMBOL_GPL(mctrl_gpio_disable_ms); +void mctrl_gpio_enable_irq_wake(struct mctrl_gpios *gpios) +{ + enum mctrl_gpio_idx i; + + if (!gpios) + return; + + if (!gpios->mctrl_on) + return; + + for (i = 0; i < UART_GPIO_MAX; ++i) { + if (!gpios->irq[i]) + continue; + + enable_irq_wake(gpios->irq[i]); + } +} +EXPORT_SYMBOL_GPL(mctrl_gpio_enable_irq_wake); + +void mctrl_gpio_disable_irq_wake(struct mctrl_gpios *gpios) +{ + enum mctrl_gpio_idx i; + + if (!gpios) + return; + + if (!gpios->mctrl_on) + return; + + for (i = 0; i < UART_GPIO_MAX; ++i) { + if (!gpios->irq[i]) + continue; + + disable_irq_wake(gpios->irq[i]); + } +} +EXPORT_SYMBOL_GPL(mctrl_gpio_disable_irq_wake); + MODULE_LICENSE("GPL"); diff --git a/drivers/tty/serial/serial_mctrl_gpio.h b/drivers/tty/serial/serial_mctrl_gpio.h index b134a0ffc..fc76910fb 100644 --- a/drivers/tty/serial/serial_mctrl_gpio.h +++ b/drivers/tty/serial/serial_mctrl_gpio.h @@ -91,6 +91,16 @@ void mctrl_gpio_enable_ms(struct mctrl_gpios *gpios); */ void mctrl_gpio_disable_ms(struct mctrl_gpios *gpios); +/* + * Enable gpio wakeup interrupts to enable wake up source. + */ +void mctrl_gpio_enable_irq_wake(struct mctrl_gpios *gpios); + +/* + * Disable gpio wakeup interrupts to enable wake up source. + */ +void mctrl_gpio_disable_irq_wake(struct mctrl_gpios *gpios); + #else /* GPIOLIB */ static inline @@ -142,6 +152,14 @@ static inline void mctrl_gpio_disable_ms(struct mctrl_gpios *gpios) { } +static inline void mctrl_gpio_enable_irq_wake(struct mctrl_gpios *gpios) +{ +} + +static inline void mctrl_gpio_disable_irq_wake(struct mctrl_gpios *gpios) +{ +} + #endif /* GPIOLIB */ #endif diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c index 844059861..3475ff831 100644 --- a/drivers/tty/serial/stm32-usart.c +++ b/drivers/tty/serial/stm32-usart.c @@ -4,6 +4,7 @@ * Copyright (C) STMicroelectronics SA 2017 * Authors: Maxime Coquelin * Gerald Baeza + * Erwan Le Ray * * Inspired by st-asc.c from STMicroelectronics (c) */ @@ -60,38 +61,65 @@ static void stm32_usart_clr_bits(struct uart_port *port, u32 reg, u32 bits) writel_relaxed(val, port->membase + reg); } -static void stm32_usart_config_reg_rs485(u32 *cr1, u32 *cr3, u32 delay_ADE, - u32 delay_DDE, u32 baud) +static u32 stm32_usart_config_delay_rs485(u32 *cr1, u32 delay, u32 baud, + bool over8, u32 rs485_deat_dedt_max, + struct serial_rs485 *rs485conf) { - u32 rs485_deat_dedt; + u64 tmp; + + /* + * Compute (de)assertion time by using the delay (in ns), the baud rate + * (in bits/s) and the oversampling (in 1/8 or 1/16 bit) + */ + tmp = (u64)delay * (u64)baud * 8ULL; + + /* Handle oversampling 16 */ + if (!over8) + tmp = tmp * 2ULL; + + tmp = DIV_ROUND_CLOSEST_ULL(tmp, NSEC_PER_SEC); + + /* Set delay to max value if result is higher than max value */ + tmp = tmp > rs485_deat_dedt_max ? rs485_deat_dedt_max : tmp; + + return tmp; +} + +static void stm32_usart_config_reg_rs485(u32 *cr1, u32 *cr3, u32 baud, + struct serial_rs485 *rs485conf) +{ + u32 delay_ADE, delay_DDE, rs485_deat_dedt; u32 rs485_deat_dedt_max = (USART_CR1_DEAT_MASK >> USART_CR1_DEAT_SHIFT); bool over8; + u32 tmp; + + /* + * Assertion and deassertion delays (in ns) are computed by the + * selection of rs485-rts-delay-ns (in ns) or rs485-rts-delay (in ms) + * provided by device tree + */ + if (rs485conf->delay_rts_before_send_ns != 0 || + rs485conf->delay_rts_after_send_ns != 0) { + delay_ADE = rs485conf->delay_rts_before_send_ns; + delay_DDE = rs485conf->delay_rts_after_send_ns; + } else { + delay_ADE = rs485conf->delay_rts_before_send * NSEC_PER_MSEC; + delay_DDE = rs485conf->delay_rts_after_send * NSEC_PER_MSEC; + } *cr3 |= USART_CR3_DEM; over8 = *cr1 & USART_CR1_OVER8; - if (over8) - rs485_deat_dedt = delay_ADE * baud * 8; - else - rs485_deat_dedt = delay_ADE * baud * 16; - - rs485_deat_dedt = DIV_ROUND_CLOSEST(rs485_deat_dedt, 1000); - rs485_deat_dedt = rs485_deat_dedt > rs485_deat_dedt_max ? - rs485_deat_dedt_max : rs485_deat_dedt; - rs485_deat_dedt = (rs485_deat_dedt << USART_CR1_DEAT_SHIFT) & - USART_CR1_DEAT_MASK; + /* Assertion time */ + tmp = stm32_usart_config_delay_rs485(cr1, delay_ADE, baud, over8, + rs485_deat_dedt_max, rs485conf); + rs485_deat_dedt = (tmp << USART_CR1_DEAT_SHIFT) & USART_CR1_DEAT_MASK; *cr1 |= rs485_deat_dedt; - if (over8) - rs485_deat_dedt = delay_DDE * baud * 8; - else - rs485_deat_dedt = delay_DDE * baud * 16; - - rs485_deat_dedt = DIV_ROUND_CLOSEST(rs485_deat_dedt, 1000); - rs485_deat_dedt = rs485_deat_dedt > rs485_deat_dedt_max ? - rs485_deat_dedt_max : rs485_deat_dedt; - rs485_deat_dedt = (rs485_deat_dedt << USART_CR1_DEDT_SHIFT) & - USART_CR1_DEDT_MASK; + /* Deassertion time */ + tmp = stm32_usart_config_delay_rs485(cr1, delay_DDE, baud, over8, + rs485_deat_dedt_max, rs485conf); + rs485_deat_dedt = (tmp << USART_CR1_DEDT_SHIFT) & USART_CR1_DEDT_MASK; *cr1 |= rs485_deat_dedt; } @@ -99,8 +127,8 @@ static int stm32_usart_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf) { struct stm32_port *stm32_port = to_stm32_port(port); - const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; - const struct stm32_usart_config *cfg = &stm32_port->info->cfg; + struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; + struct stm32_usart_config *cfg = &stm32_port->info->cfg; u32 usartdiv, baud, cr1, cr3; bool over8; @@ -122,10 +150,7 @@ static int stm32_usart_config_rs485(struct uart_port *port, << USART_BRR_04_R_SHIFT; baud = DIV_ROUND_CLOSEST(port->uartclk, usartdiv); - stm32_usart_config_reg_rs485(&cr1, &cr3, - rs485conf->delay_rts_before_send, - rs485conf->delay_rts_after_send, - baud); + stm32_usart_config_reg_rs485(&cr1, &cr3, baud, rs485conf); if (rs485conf->flags & SER_RS485_RTS_ON_SEND) { cr3 &= ~USART_CR3_DEP; @@ -164,63 +189,109 @@ static int stm32_usart_init_rs485(struct uart_port *port, return uart_get_rs485_mode(port); } -static int stm32_usart_pending_rx(struct uart_port *port, u32 *sr, - int *last_res, bool threaded) +static bool stm32_usart_rx_dma_started(struct stm32_port *stm32_port) +{ + return stm32_port->rx_ch ? stm32_port->rx_dma_busy : false; +} + +static void stm32_usart_rx_dma_terminate(struct stm32_port *stm32_port) +{ + dmaengine_terminate_async(stm32_port->rx_ch); + stm32_port->rx_dma_busy = false; +} + +static int stm32_usart_dma_pause_resume(struct stm32_port *stm32_port, + struct dma_chan *chan, + enum dma_status expected_status, + int (*dma_action)(struct dma_chan *chan), + bool (*dma_started)(struct stm32_port *stm32_port), + void (*dma_terminate)(struct stm32_port *stm32_port)) +{ + struct uart_port *port = &stm32_port->port; + enum dma_status dma_status; + int ret; + + if (!(*dma_started)(stm32_port)) + return -EPERM; + + dma_status = dmaengine_tx_status(chan, chan->cookie, NULL); + if (dma_status != expected_status) + return -EAGAIN; + + ret = (*dma_action)(chan); + if (ret) { + dev_err(port->dev, "DMA failed with error code: %d\n", ret); + (*dma_terminate)(stm32_port); + } + return ret; +} + +static int stm32_usart_rx_dma_pause(struct stm32_port *stm32_port) +{ + return stm32_usart_dma_pause_resume(stm32_port, stm32_port->rx_ch, + DMA_IN_PROGRESS, dmaengine_pause, + stm32_usart_rx_dma_started, + stm32_usart_rx_dma_terminate); +} + +static int stm32_usart_rx_dma_resume(struct stm32_port *stm32_port) +{ + return stm32_usart_dma_pause_resume(stm32_port, stm32_port->rx_ch, + DMA_PAUSED, dmaengine_resume, + stm32_usart_rx_dma_started, + stm32_usart_rx_dma_terminate); +} + +/* + * Return true when data is pending (in pio mode), and false when no data is + * pending. + */ +static bool stm32_usart_pending_rx_pio(struct uart_port *port, u32 *sr) { struct stm32_port *stm32_port = to_stm32_port(port); - const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; - enum dma_status status; - struct dma_tx_state state; + struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; *sr = readl_relaxed(port->membase + ofs->isr); + /* Get pending characters in RDR or FIFO */ + if (*sr & USART_SR_RXNE) { + /* + * Get all pending characters from the RDR or the FIFO when + * using interrupts + */ + if (!stm32_usart_rx_dma_started(stm32_port)) + return true; - if (threaded && stm32_port->rx_ch) { - status = dmaengine_tx_status(stm32_port->rx_ch, - stm32_port->rx_ch->cookie, - &state); - if (status == DMA_IN_PROGRESS && (*last_res != state.residue)) - return 1; - else - return 0; - } else if (*sr & USART_SR_RXNE) { - return 1; + /* Handle only RX data errors when using DMA */ + if (*sr & USART_SR_ERR_MASK) + return true; } - return 0; + + return false; } -static unsigned long stm32_usart_get_char(struct uart_port *port, u32 *sr, - int *last_res) +static unsigned long stm32_usart_get_char_pio(struct uart_port *port) { struct stm32_port *stm32_port = to_stm32_port(port); - const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; + struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; unsigned long c; - if (stm32_port->rx_ch) { - c = stm32_port->rx_buf[RX_BUF_L - (*last_res)--]; - if ((*last_res) == 0) - *last_res = RX_BUF_L; - } else { - c = readl_relaxed(port->membase + ofs->rdr); - /* apply RDR data mask */ - c &= stm32_port->rdr_mask; - } + c = readl_relaxed(port->membase + ofs->rdr); + /* Apply RDR data mask */ + c &= stm32_port->rdr_mask; return c; } -static void stm32_usart_receive_chars(struct uart_port *port, bool threaded) +static unsigned int stm32_usart_receive_chars_pio(struct uart_port *port) { - struct tty_port *tport = &port->state->port; struct stm32_port *stm32_port = to_stm32_port(port); - const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; + struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; unsigned long c; + unsigned int size = 0; u32 sr; char flag; - spin_lock(&port->lock); - - while (stm32_usart_pending_rx(port, &sr, &stm32_port->last_res, - threaded)) { + while (stm32_usart_pending_rx_pio(port, &sr)) { sr |= USART_SR_DUMMY_RX; flag = TTY_NORMAL; @@ -239,8 +310,9 @@ static void stm32_usart_receive_chars(struct uart_port *port, bool threaded) writel_relaxed(sr & USART_SR_ERR_MASK, port->membase + ofs->icr); - c = stm32_usart_get_char(port, &sr, &stm32_port->last_res); + c = stm32_usart_get_char_pio(port); port->icount.rx++; + size++; if (sr & USART_SR_ERR_MASK) { if (sr & USART_SR_ORE) { port->icount.overrun++; @@ -274,21 +346,216 @@ static void stm32_usart_receive_chars(struct uart_port *port, bool threaded) uart_insert_char(port, sr, USART_SR_ORE, c, flag); } - spin_unlock(&port->lock); + return size; +} + +static void stm32_usart_push_buffer_dma(struct uart_port *port, + unsigned int dma_size) +{ + struct stm32_port *stm32_port = to_stm32_port(port); + struct tty_port *ttyport = &stm32_port->port.state->port; + unsigned char *dma_start; + int dma_count, i; + + dma_start = stm32_port->rx_buf + (RX_BUF_L - stm32_port->last_res); + + /* + * Apply rdr_mask on buffer in order to mask parity bit. + * This loop is useless in cs8 mode because DMA copies only + * 8 bits and already ignores parity bit. + */ + if (!(stm32_port->rdr_mask == (BIT(8) - 1))) + for (i = 0; i < dma_size; i++) + *(dma_start + i) &= stm32_port->rdr_mask; + + dma_count = tty_insert_flip_string(ttyport, dma_start, dma_size); + port->icount.rx += dma_count; + if (dma_count != dma_size) + port->icount.buf_overrun++; + stm32_port->last_res -= dma_count; + if (stm32_port->last_res == 0) + stm32_port->last_res = RX_BUF_L; +} + +static unsigned int stm32_usart_receive_chars_dma(struct uart_port *port) +{ + struct stm32_port *stm32_port = to_stm32_port(port); + unsigned int dma_size, size = 0; + + /* + * DMA buffer is configured in cyclic mode and handles the rollback of + * the buffer. + */ + if (stm32_port->rx_dma_state.residue > stm32_port->last_res) { + /* Conditional first part: from last_res to end of DMA buffer */ + dma_size = stm32_port->last_res; + stm32_usart_push_buffer_dma(port, dma_size); + size = dma_size; + } + + dma_size = stm32_port->last_res - stm32_port->rx_dma_state.residue; + stm32_usart_push_buffer_dma(port, dma_size); + size += dma_size; - tty_flip_buffer_push(tport); + return size; +} + +static void stm32_usart_receive_chars(struct uart_port *port, + bool force_dma_flush) +{ + struct tty_port *tport = &port->state->port; + struct stm32_port *stm32_port = to_stm32_port(port); + struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; + enum dma_status rx_dma_status; + u32 sr; + unsigned int size; + + if (stm32_usart_rx_dma_started(stm32_port) || force_dma_flush) { + rx_dma_status = dmaengine_tx_status(stm32_port->rx_ch, + stm32_port->rx_ch->cookie, + &stm32_port->rx_dma_state); + if (rx_dma_status == DMA_IN_PROGRESS || + rx_dma_status == DMA_PAUSED) { + /* Empty DMA buffer */ + size = stm32_usart_receive_chars_dma(port); + sr = readl_relaxed(port->membase + ofs->isr); + if (sr & USART_SR_ERR_MASK) { + /* Disable DMA request line */ + stm32_usart_clr_bits(port, ofs->cr3, + USART_CR3_DMAR); + + /* Switch to PIO mode to handle the errors */ + size += stm32_usart_receive_chars_pio(port); + + /* Switch back to DMA mode */ + stm32_usart_set_bits(port, ofs->cr3, + USART_CR3_DMAR); + } + } else { + /* Disable RX DMA */ + stm32_usart_rx_dma_terminate(stm32_port); + /* Fall back to interrupt mode */ + dev_dbg(port->dev, + "DMA error, fallback to irq mode\n"); + size = stm32_usart_receive_chars_pio(port); + } + } else { + size = stm32_usart_receive_chars_pio(port); + } + + if (size) + tty_flip_buffer_push(tport); +} + +static void stm32_usart_rx_dma_complete(void *arg) +{ + struct uart_port *port = arg; + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + stm32_usart_receive_chars(port, false); + spin_unlock_irqrestore(&port->lock, flags); +} + +static int stm32_usart_rx_dma_start_or_resume(struct uart_port *port) +{ + struct stm32_port *stm32_port = to_stm32_port(port); + struct dma_async_tx_descriptor *desc; + enum dma_status rx_dma_status; + int ret; + + if (stm32_port->throttled) + return 0; + + if (stm32_port->rx_dma_busy) { + rx_dma_status = dmaengine_tx_status(stm32_port->rx_ch, + stm32_port->rx_ch->cookie, + NULL); + if (rx_dma_status == DMA_IN_PROGRESS) + return 0; + + if (rx_dma_status == DMA_PAUSED && !stm32_usart_rx_dma_resume(stm32_port)) + return 0; + + dev_err(port->dev, "DMA failed : status error.\n"); + stm32_usart_rx_dma_terminate(stm32_port); + } + + stm32_port->rx_dma_busy = true; + + stm32_port->last_res = RX_BUF_L; + /* Prepare a DMA cyclic transaction */ + desc = dmaengine_prep_dma_cyclic(stm32_port->rx_ch, + stm32_port->rx_dma_buf, + RX_BUF_L, RX_BUF_P, + DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT); + if (!desc) { + dev_err(port->dev, "rx dma prep cyclic failed\n"); + stm32_port->rx_dma_busy = false; + return -ENODEV; + } + + desc->callback = stm32_usart_rx_dma_complete; + desc->callback_param = port; + + /* Push current DMA transaction in the pending queue */ + ret = dma_submit_error(dmaengine_submit(desc)); + if (ret) { + dmaengine_terminate_sync(stm32_port->rx_ch); + stm32_port->rx_dma_busy = false; + return ret; + } + + /* Issue pending DMA requests */ + dma_async_issue_pending(stm32_port->rx_ch); + + return 0; +} + +static void stm32_usart_tx_dma_terminate(struct stm32_port *stm32_port) +{ + dmaengine_terminate_async(stm32_port->tx_ch); + stm32_port->tx_dma_busy = false; +} + +static bool stm32_usart_tx_dma_started(struct stm32_port *stm32_port) +{ + /* + * We cannot use the function "dmaengine_tx_status" to know the + * status of DMA. This function does not show if the "dma complete" + * callback of the DMA transaction have been called. So we prefer + * to use "tx_dma_busy" flag to prevent dual dma transaction at the + * same time. + */ + return stm32_port->tx_dma_busy; +} + +static int stm32_usart_tx_dma_pause(struct stm32_port *stm32_port) +{ + return stm32_usart_dma_pause_resume(stm32_port, stm32_port->tx_ch, + DMA_IN_PROGRESS, dmaengine_pause, + stm32_usart_tx_dma_started, + stm32_usart_tx_dma_terminate); +} + +static int stm32_usart_tx_dma_resume(struct stm32_port *stm32_port) +{ + return stm32_usart_dma_pause_resume(stm32_port, stm32_port->tx_ch, + DMA_PAUSED, dmaengine_resume, + stm32_usart_tx_dma_started, + stm32_usart_tx_dma_terminate); } static void stm32_usart_tx_dma_complete(void *arg) { struct uart_port *port = arg; struct stm32_port *stm32port = to_stm32_port(port); - const struct stm32_usart_offsets *ofs = &stm32port->info->ofs; + struct stm32_usart_offsets *ofs = &stm32port->info->ofs; unsigned long flags; - dmaengine_terminate_async(stm32port->tx_ch); stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT); - stm32port->tx_dma_busy = false; + stm32_usart_tx_dma_terminate(stm32port); /* Let's see if we have pending data to send */ spin_lock_irqsave(&port->lock, flags); @@ -299,13 +566,13 @@ static void stm32_usart_tx_dma_complete(void *arg) static void stm32_usart_tx_interrupt_enable(struct uart_port *port) { struct stm32_port *stm32_port = to_stm32_port(port); - const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; + struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; /* * Enables TX FIFO threashold irq when FIFO is enabled, * or TX empty irq when FIFO is disabled */ - if (stm32_port->fifoen) + if (stm32_port->fifoen && stm32_port->txftcfg >= 0) stm32_usart_set_bits(port, ofs->cr3, USART_CR3_TXFTIE); else stm32_usart_set_bits(port, ofs->cr1, USART_CR1_TXEIE); @@ -314,9 +581,9 @@ static void stm32_usart_tx_interrupt_enable(struct uart_port *port) static void stm32_usart_tx_interrupt_disable(struct uart_port *port) { struct stm32_port *stm32_port = to_stm32_port(port); - const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; + struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; - if (stm32_port->fifoen) + if (stm32_port->fifoen && stm32_port->txftcfg >= 0) stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_TXFTIE); else stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_TXEIE); @@ -325,14 +592,9 @@ static void stm32_usart_tx_interrupt_disable(struct uart_port *port) static void stm32_usart_transmit_chars_pio(struct uart_port *port) { struct stm32_port *stm32_port = to_stm32_port(port); - const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; + struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; struct circ_buf *xmit = &port->state->xmit; - if (stm32_port->tx_dma_busy) { - stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT); - stm32_port->tx_dma_busy = false; - } - while (!uart_circ_empty(xmit)) { /* Check that TDR is empty before filling FIFO */ if (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_TXE)) @@ -352,15 +614,19 @@ static void stm32_usart_transmit_chars_pio(struct uart_port *port) static void stm32_usart_transmit_chars_dma(struct uart_port *port) { struct stm32_port *stm32port = to_stm32_port(port); - const struct stm32_usart_offsets *ofs = &stm32port->info->ofs; + struct stm32_usart_offsets *ofs = &stm32port->info->ofs; struct circ_buf *xmit = &port->state->xmit; struct dma_async_tx_descriptor *desc = NULL; - unsigned int count, i; + dma_cookie_t cookie; + unsigned int count; + int ret; - if (stm32port->tx_dma_busy) + if (stm32_usart_tx_dma_started(stm32port)) { + ret = stm32_usart_tx_dma_resume(stm32port); + if (ret < 0 && ret != -EAGAIN) + goto fallback_err; return; - - stm32port->tx_dma_busy = true; + } count = uart_circ_chars_pending(xmit); @@ -391,13 +657,24 @@ static void stm32_usart_transmit_chars_dma(struct uart_port *port) if (!desc) goto fallback_err; + /* + * Take "tx_dma_busy" flag. This flag will be release when + * dmaengine_terminate_async will be called. This flag helps + * transmit_chars_dma to doesn't start another dma transaction + * if the callback of the previous is not called. + */ + stm32port->tx_dma_busy = true; + desc->callback = stm32_usart_tx_dma_complete; desc->callback_param = port; /* Push current DMA TX transaction in the pending queue */ - if (dma_submit_error(dmaengine_submit(desc))) { - /* dma no yet started, safe to free resources */ - dmaengine_terminate_async(stm32port->tx_ch); + cookie = dmaengine_submit(desc); + ret = dma_submit_error(cookie); + /* dma no yet started, safe to free resources */ + if (ret) { + dev_err(port->dev, "DMA failed with error code: %d\n", ret); + stm32_usart_tx_dma_terminate(stm32port); goto fallback_err; } @@ -411,24 +688,36 @@ static void stm32_usart_transmit_chars_dma(struct uart_port *port) return; fallback_err: - for (i = count; i > 0; i--) - stm32_usart_transmit_chars_pio(port); + stm32_usart_transmit_chars_pio(port); } static void stm32_usart_transmit_chars(struct uart_port *port) { struct stm32_port *stm32_port = to_stm32_port(port); - const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; + struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; struct circ_buf *xmit = &port->state->xmit; + u32 isr; + int ret; if (port->x_char) { - if (stm32_port->tx_dma_busy) - stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT); + /* dma terminate may have been called in case of dma pause failure */ + stm32_usart_tx_dma_pause(stm32_port); + + /* Check that TDR is empty before filling FIFO */ + ret = + readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr, + isr, + (isr & USART_SR_TXE), + 10, 1000); + if (ret) + dev_warn(port->dev, "1 character may be erased\n"); + writel_relaxed(port->x_char, port->membase + ofs->tdr); port->x_char = 0; port->icount.tx++; - if (stm32_port->tx_dma_busy) - stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAT); + + /* dma terminate may have been called in case of dma resume failure */ + stm32_usart_tx_dma_resume(stm32_port); return; } @@ -459,7 +748,7 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr) struct uart_port *port = ptr; struct tty_port *tport = &port->state->port; struct stm32_port *stm32_port = to_stm32_port(port); - const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; + struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; u32 sr; sr = readl_relaxed(port->membase + ofs->isr); @@ -477,8 +766,18 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr) pm_wakeup_event(tport->tty->dev, 0); } - if ((sr & USART_SR_RXNE) && !(stm32_port->rx_ch)) + /* + * rx errors in dma mode has to be handled ASAP to avoid overrun as the + * DMA request line has been masked by HW and rx data are stacking in + * FIFO. + */ + if (!stm32_port->throttled && + (((sr & USART_SR_RXNE) && !stm32_usart_rx_dma_started(stm32_port)) || + ((sr & USART_SR_ERR_MASK) && stm32_usart_rx_dma_started(stm32_port)))) { + spin_lock(&port->lock); stm32_usart_receive_chars(port, false); + spin_unlock(&port->lock); + } if ((sr & USART_SR_TXE) && !(stm32_port->tx_ch)) { spin_lock(&port->lock); @@ -486,7 +785,7 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr) spin_unlock(&port->lock); } - if (stm32_port->rx_ch) + if (stm32_usart_rx_dma_started(stm32_port)) return IRQ_WAKE_THREAD; else return IRQ_HANDLED; @@ -496,9 +795,13 @@ static irqreturn_t stm32_usart_threaded_interrupt(int irq, void *ptr) { struct uart_port *port = ptr; struct stm32_port *stm32_port = to_stm32_port(port); + unsigned long flags; - if (stm32_port->rx_ch) - stm32_usart_receive_chars(port, true); + spin_lock_irqsave(&port->lock, flags); + /* Receiver timeout irq for DMA RX */ + if (!stm32_port->throttled) + stm32_usart_receive_chars(port, false); + spin_unlock_irqrestore(&port->lock, flags); return IRQ_HANDLED; } @@ -506,7 +809,7 @@ static irqreturn_t stm32_usart_threaded_interrupt(int irq, void *ptr) static unsigned int stm32_usart_tx_empty(struct uart_port *port) { struct stm32_port *stm32_port = to_stm32_port(port); - const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; + struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; if (readl_relaxed(port->membase + ofs->isr) & USART_SR_TC) return TIOCSER_TEMT; @@ -517,7 +820,7 @@ static unsigned int stm32_usart_tx_empty(struct uart_port *port) static void stm32_usart_set_mctrl(struct uart_port *port, unsigned int mctrl) { struct stm32_port *stm32_port = to_stm32_port(port); - const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; + struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS)) stm32_usart_set_bits(port, ofs->cr3, USART_CR3_RTSE); @@ -556,6 +859,9 @@ static void stm32_usart_stop_tx(struct uart_port *port) stm32_usart_tx_interrupt_disable(port); + /* dma terminate may have been called in case of dma pause failure */ + stm32_usart_tx_dma_pause(stm32_port); + if (rs485conf->flags & SER_RS485_ENABLED) { if (rs485conf->flags & SER_RS485_RTS_ON_SEND) { mctrl_gpio_set(stm32_port->gpios, @@ -574,7 +880,7 @@ static void stm32_usart_start_tx(struct uart_port *port) struct serial_rs485 *rs485conf = &port->rs485; struct circ_buf *xmit = &port->state->xmit; - if (uart_circ_empty(xmit)) + if (uart_circ_empty(xmit) && !port->x_char) return; if (rs485conf->flags & SER_RS485_ENABLED) { @@ -590,18 +896,35 @@ static void stm32_usart_start_tx(struct uart_port *port) stm32_usart_transmit_chars(port); } +/* Flush the transmit buffer. */ +static void stm32_usart_flush_buffer(struct uart_port *port) +{ + struct stm32_port *stm32_port = to_stm32_port(port); + + if (stm32_port->tx_ch) + stm32_usart_tx_dma_terminate(stm32_port); +} + /* Throttle the remote when input buffer is about to overflow. */ static void stm32_usart_throttle(struct uart_port *port) { struct stm32_port *stm32_port = to_stm32_port(port); - const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; + struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; unsigned long flags; spin_lock_irqsave(&port->lock, flags); + + /* + * Pause DMA transfer, so the RX data gets queued into the FIFO. + * Hardware flow control is triggered when RX FIFO is full. + */ + stm32_usart_rx_dma_pause(stm32_port); + stm32_usart_clr_bits(port, ofs->cr1, stm32_port->cr1_irq); if (stm32_port->cr3_irq) stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq); + stm32_port->throttled = true; spin_unlock_irqrestore(&port->lock, flags); } @@ -609,7 +932,7 @@ static void stm32_usart_throttle(struct uart_port *port) static void stm32_usart_unthrottle(struct uart_port *port) { struct stm32_port *stm32_port = to_stm32_port(port); - const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; + struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; unsigned long flags; spin_lock_irqsave(&port->lock, flags); @@ -617,6 +940,15 @@ static void stm32_usart_unthrottle(struct uart_port *port) if (stm32_port->cr3_irq) stm32_usart_set_bits(port, ofs->cr3, stm32_port->cr3_irq); + stm32_port->throttled = false; + + /* + * Switch back to DMA mode (resume DMA). + * Hardware flow control is stopped when FIFO is not full any more. + */ + if (stm32_port->rx_ch) + stm32_usart_rx_dma_start_or_resume(port); + spin_unlock_irqrestore(&port->lock, flags); } @@ -624,7 +956,10 @@ static void stm32_usart_unthrottle(struct uart_port *port) static void stm32_usart_stop_rx(struct uart_port *port) { struct stm32_port *stm32_port = to_stm32_port(port); - const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; + struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; + + /* Disable DMA request line. */ + stm32_usart_rx_dma_pause(stm32_port); stm32_usart_clr_bits(port, ofs->cr1, stm32_port->cr1_irq); if (stm32_port->cr3_irq) @@ -639,8 +974,8 @@ static void stm32_usart_break_ctl(struct uart_port *port, int break_state) static int stm32_usart_startup(struct uart_port *port) { struct stm32_port *stm32_port = to_stm32_port(port); - const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; - const struct stm32_usart_config *cfg = &stm32_port->info->cfg; + struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; + struct stm32_usart_config *cfg = &stm32_port->info->cfg; const char *name = to_platform_device(port->dev)->name; u32 val; int ret; @@ -656,6 +991,14 @@ static int stm32_usart_startup(struct uart_port *port) if (ofs->rqr != UNDEF_REG) writel_relaxed(USART_RQR_RXFRQ, port->membase + ofs->rqr); + if (stm32_port->rx_ch) { + ret = stm32_usart_rx_dma_start_or_resume(port); + if (ret) { + free_irq(port->irq, port); + return ret; + } + } + /* RX enabling */ val = stm32_port->cr1_irq | USART_CR1_RE | BIT(cfg->uart_enable_bit); stm32_usart_set_bits(port, ofs->cr1, val); @@ -666,11 +1009,17 @@ static int stm32_usart_startup(struct uart_port *port) static void stm32_usart_shutdown(struct uart_port *port) { struct stm32_port *stm32_port = to_stm32_port(port); - const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; - const struct stm32_usart_config *cfg = &stm32_port->info->cfg; + struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; + struct stm32_usart_config *cfg = &stm32_port->info->cfg; u32 val, isr; int ret; + if (stm32_usart_tx_dma_started(stm32_port)) + stm32_usart_tx_dma_terminate(stm32_port); + + if (stm32_port->tx_ch) + stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT); + /* Disable modem control interrupts */ stm32_usart_disable_ms(port); @@ -684,8 +1033,13 @@ static void stm32_usart_shutdown(struct uart_port *port) isr, (isr & USART_SR_TC), 10, 100000); + /* Send the TC error message only when ISR_TC is not set */ if (ret) - dev_err(port->dev, "transmission complete not set\n"); + dev_err(port->dev, "Transmission is not complete\n"); + + /* Disable RX DMA. */ + if (stm32_port->rx_ch) + stm32_usart_rx_dma_terminate(stm32_port); /* flush RX & TX FIFO */ if (ofs->rqr != UNDEF_REG) @@ -732,8 +1086,8 @@ static void stm32_usart_set_termios(struct uart_port *port, struct ktermios *old) { struct stm32_port *stm32_port = to_stm32_port(port); - const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; - const struct stm32_usart_config *cfg = &stm32_port->info->cfg; + struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; + struct stm32_usart_config *cfg = &stm32_port->info->cfg; struct serial_rs485 *rs485conf = &port->rs485; unsigned int baud, bits; u32 usartdiv, mantissa, fraction, oversampling; @@ -775,9 +1129,10 @@ static void stm32_usart_set_termios(struct uart_port *port, cr3 = readl_relaxed(port->membase + ofs->cr3); cr3 &= USART_CR3_TXFTIE | USART_CR3_RXFTIE; if (stm32_port->fifoen) { - cr3 &= ~(USART_CR3_TXFTCFG_MASK | USART_CR3_RXFTCFG_MASK); - cr3 |= USART_CR3_TXFTCFG_HALF << USART_CR3_TXFTCFG_SHIFT; - cr3 |= USART_CR3_RXFTCFG_HALF << USART_CR3_RXFTCFG_SHIFT; + if (stm32_port->txftcfg >= 0) + cr3 |= stm32_port->txftcfg << USART_CR3_TXFTCFG_SHIFT; + if (stm32_port->rxftcfg >= 0) + cr3 |= stm32_port->rxftcfg << USART_CR3_RXFTCFG_SHIFT; } if (cflag & CSTOPB) @@ -807,7 +1162,8 @@ static void stm32_usart_set_termios(struct uart_port *port, , bits); if (ofs->rtor != UNDEF_REG && (stm32_port->rx_ch || - stm32_port->fifoen)) { + (stm32_port->fifoen && + stm32_port->rxftcfg >= 0))) { if (cflag & CSTOPB) bits = bits + 3; /* 1 start bit + 2 stop bits */ else @@ -817,9 +1173,12 @@ static void stm32_usart_set_termios(struct uart_port *port, stm32_port->cr1_irq = USART_CR1_RTOIE; writel_relaxed(bits, port->membase + ofs->rtor); cr2 |= USART_CR2_RTOEN; - /* Not using dma, enable fifo threshold irq */ - if (!stm32_port->rx_ch) - stm32_port->cr3_irq = USART_CR3_RXFTIE; + /* + * Enable fifo threshold irq in two cases, either when there + * is no DMA, or when wake up over usart, from low power + * state until the DMA gets re-enabled by resume. + */ + stm32_port->cr3_irq = USART_CR3_RXFTIE; } cr1 |= stm32_port->cr1_irq; @@ -882,14 +1241,19 @@ static void stm32_usart_set_termios(struct uart_port *port, if ((termios->c_cflag & CREAD) == 0) port->ignore_status_mask |= USART_SR_DUMMY_RX; - if (stm32_port->rx_ch) + if (stm32_port->rx_ch) { + /* + * Setup DMA to collect only valid data and enable error irqs. + * This also enables break reception when using DMA. + */ + cr1 |= USART_CR1_PEIE; + cr3 |= USART_CR3_EIE; cr3 |= USART_CR3_DMAR; + cr3 |= USART_CR3_DDRE; + } if (rs485conf->flags & SER_RS485_ENABLED) { - stm32_usart_config_reg_rs485(&cr1, &cr3, - rs485conf->delay_rts_before_send, - rs485conf->delay_rts_after_send, - baud); + stm32_usart_config_reg_rs485(&cr1, &cr3, baud, rs485conf); if (rs485conf->flags & SER_RS485_RTS_ON_SEND) { cr3 &= ~USART_CR3_DEP; rs485conf->flags &= ~SER_RS485_RTS_AFTER_SEND; @@ -903,8 +1267,8 @@ static void stm32_usart_set_termios(struct uart_port *port, cr1 &= ~(USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK); } - /* Configure wake up from low power on start bit detection */ - if (stm32_port->wakeirq > 0) { + /* Enable wake up from low power on start bit detection */ + if (stm32_port->wakeup_src) { cr3 &= ~USART_CR3_WUS_MASK; cr3 |= USART_CR3_WUS_START_BIT; } @@ -955,8 +1319,8 @@ static void stm32_usart_pm(struct uart_port *port, unsigned int state, { struct stm32_port *stm32port = container_of(port, struct stm32_port, port); - const struct stm32_usart_offsets *ofs = &stm32port->info->ofs; - const struct stm32_usart_config *cfg = &stm32port->info->cfg; + struct stm32_usart_offsets *ofs = &stm32port->info->ofs; + struct stm32_usart_config *cfg = &stm32port->info->cfg; unsigned long flags = 0; switch (state) { @@ -972,6 +1336,40 @@ static void stm32_usart_pm(struct uart_port *port, unsigned int state, } } +#if defined(CONFIG_CONSOLE_POLL) + + /* Callbacks for characters polling in debug context (i.e. KGDB). */ +static int stm32_usart_poll_init(struct uart_port *port) +{ + struct stm32_port *stm32_port = to_stm32_port(port); + + return clk_prepare_enable(stm32_port->clk); +} + +static int stm32_usart_poll_get_char(struct uart_port *port) +{ + struct stm32_port *stm32_port = to_stm32_port(port); + struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; + unsigned int ret; + + if (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_RXNE)) + return NO_POLL_CHAR; + + ret = readl_relaxed(port->membase + ofs->rdr); + /* Apply RDR data mask */ + ret &= stm32_port->rdr_mask; + + return ret; +} + +static void __maybe_unused stm32_usart_console_putchar(struct uart_port *port, int ch); + +static void stm32_usart_poll_put_char(struct uart_port *port, unsigned char ch) +{ + stm32_usart_console_putchar(port, (int)ch); +} +#endif + static const struct uart_ops stm32_uart_ops = { .tx_empty = stm32_usart_tx_empty, .set_mctrl = stm32_usart_set_mctrl, @@ -985,6 +1383,7 @@ static const struct uart_ops stm32_uart_ops = { .break_ctl = stm32_usart_break_ctl, .startup = stm32_usart_startup, .shutdown = stm32_usart_shutdown, + .flush_buffer = stm32_usart_flush_buffer, .set_termios = stm32_usart_set_termios, .pm = stm32_usart_pm, .type = stm32_usart_type, @@ -992,8 +1391,52 @@ static const struct uart_ops stm32_uart_ops = { .request_port = stm32_usart_request_port, .config_port = stm32_usart_config_port, .verify_port = stm32_usart_verify_port, +#if defined(CONFIG_CONSOLE_POLL) + .poll_init = stm32_usart_poll_init, + .poll_get_char = stm32_usart_poll_get_char, + .poll_put_char = stm32_usart_poll_put_char, +#endif /* CONFIG_CONSOLE_POLL */ + }; +/* + * STM32H7 RX & TX FIFO threshold configuration (CR3 RXFTCFG / TXFTCFG) + * Note: 1 isn't a valid value in RXFTCFG / TXFTCFG. In this case, + * RXNEIE / TXEIE can be used instead of threshold irqs: RXFTIE / TXFTIE. + * So, RXFTCFG / TXFTCFG bitfields values are encoded as array index + 1. + */ +static const u32 stm32h7_usart_fifo_thresh_cfg[] = { 1, 2, 4, 8, 12, 14, 16 }; + +static void stm32_usart_get_ftcfg(struct platform_device *pdev, const char *p, + int *ftcfg) +{ + u32 bytes, i; + + /* DT option to get RX & TX FIFO threshold (default to 8 bytes) */ + if (of_property_read_u32(pdev->dev.of_node, p, &bytes)) + bytes = 8; + + for (i = 0; i < ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg); i++) + if (stm32h7_usart_fifo_thresh_cfg[i] >= bytes) + break; + if (i >= ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg)) + i = ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg) - 1; + + dev_dbg(&pdev->dev, "%s set to %d bytes\n", p, + stm32h7_usart_fifo_thresh_cfg[i]); + + /* Provide FIFO threshold ftcfg (1 is invalid: threshold irq unused) */ + if (i) + *ftcfg = i - 1; + else + *ftcfg = -EINVAL; +} + +static void stm32_usart_deinit_port(struct stm32_port *stm32port) +{ + clk_disable_unprepare(stm32port->clk); +} + static int stm32_usart_init_port(struct stm32_port *stm32port, struct platform_device *pdev) { @@ -1018,13 +1461,17 @@ static int stm32_usart_init_port(struct stm32_port *stm32port, if (ret) return ret; - if (stm32port->info->cfg.has_wakeup) { - stm32port->wakeirq = platform_get_irq_optional(pdev, 1); - if (stm32port->wakeirq <= 0 && stm32port->wakeirq != -ENXIO) - return stm32port->wakeirq ? : -ENODEV; - } + if (stm32port->info->cfg.has_wakeup) + stm32port->wakeup_src = of_property_read_bool(pdev->dev.of_node, + "wakeup-source"); stm32port->fifoen = stm32port->info->cfg.has_fifo; + if (stm32port->fifoen) { + stm32_usart_get_ftcfg(pdev, "st,rx-fifo-threshold-bytes", + &stm32port->rxftcfg); + stm32_usart_get_ftcfg(pdev, "st,tx-fifo-threshold-bytes", + &stm32port->txftcfg); + } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); port->membase = devm_ioremap_resource(&pdev->dev, res); @@ -1055,7 +1502,10 @@ static int stm32_usart_init_port(struct stm32_port *stm32port, goto err_clk; } - /* Both CTS/RTS gpios and "st,hw-flow-ctrl" should not be specified */ + /* + * Both CTS/RTS gpios and "st,hw-flow-ctrl" (deprecated) or "uart-has-rtscts" + * properties should not be specified. + */ if (stm32port->hw_flow_control) { if (mctrl_gpio_to_gpiod(stm32port->gpios, UART_GPIO_CTS) || mctrl_gpio_to_gpiod(stm32port->gpios, UART_GPIO_RTS)) { @@ -1097,6 +1547,8 @@ static struct stm32_port *stm32_usart_of_get_port(struct platform_device *pdev) stm32_ports[id].cr1_irq = USART_CR1_RXNEIE; stm32_ports[id].cr3_irq = 0; stm32_ports[id].last_res = RX_BUF_L; + stm32_ports[id].rx_dma_buf = 0; + stm32_ports[id].tx_dma_buf = 0; return &stm32_ports[id]; } @@ -1111,36 +1563,28 @@ static const struct of_device_id stm32_match[] = { MODULE_DEVICE_TABLE(of, stm32_match); #endif +static void stm32_usart_of_dma_rx_remove(struct stm32_port *stm32port, + struct platform_device *pdev) +{ + if (stm32port->rx_buf) + dma_free_coherent(&pdev->dev, RX_BUF_L, stm32port->rx_buf, + stm32port->rx_dma_buf); +} + static int stm32_usart_of_dma_rx_probe(struct stm32_port *stm32port, struct platform_device *pdev) { - const struct stm32_usart_offsets *ofs = &stm32port->info->ofs; + struct stm32_usart_offsets *ofs = &stm32port->info->ofs; struct uart_port *port = &stm32port->port; struct device *dev = &pdev->dev; struct dma_slave_config config; - struct dma_async_tx_descriptor *desc = NULL; int ret; - /* - * Using DMA and threaded handler for the console could lead to - * deadlocks. - */ - if (uart_console(port)) - return -ENODEV; - - /* Request DMA RX channel */ - stm32port->rx_ch = dma_request_slave_channel(dev, "rx"); - if (!stm32port->rx_ch) { - dev_info(dev, "rx dma alloc failed\n"); - return -ENODEV; - } stm32port->rx_buf = dma_alloc_coherent(&pdev->dev, RX_BUF_L, &stm32port->rx_dma_buf, GFP_KERNEL); - if (!stm32port->rx_buf) { - ret = -ENOMEM; - goto alloc_err; - } + if (!stm32port->rx_buf) + return -ENOMEM; /* Configure DMA channel */ memset(&config, 0, sizeof(config)); @@ -1150,73 +1594,35 @@ static int stm32_usart_of_dma_rx_probe(struct stm32_port *stm32port, ret = dmaengine_slave_config(stm32port->rx_ch, &config); if (ret < 0) { dev_err(dev, "rx dma channel config failed\n"); - ret = -ENODEV; - goto config_err; - } - - /* Prepare a DMA cyclic transaction */ - desc = dmaengine_prep_dma_cyclic(stm32port->rx_ch, - stm32port->rx_dma_buf, - RX_BUF_L, RX_BUF_P, DMA_DEV_TO_MEM, - DMA_PREP_INTERRUPT); - if (!desc) { - dev_err(dev, "rx dma prep cyclic failed\n"); - ret = -ENODEV; - goto config_err; - } - - /* No callback as dma buffer is drained on usart interrupt */ - desc->callback = NULL; - desc->callback_param = NULL; - - /* Push current DMA transaction in the pending queue */ - ret = dma_submit_error(dmaengine_submit(desc)); - if (ret) { - dmaengine_terminate_sync(stm32port->rx_ch); - goto config_err; + stm32_usart_of_dma_rx_remove(stm32port, pdev); + return ret; } - /* Issue pending DMA requests */ - dma_async_issue_pending(stm32port->rx_ch); - return 0; +} -config_err: - dma_free_coherent(&pdev->dev, - RX_BUF_L, stm32port->rx_buf, - stm32port->rx_dma_buf); - -alloc_err: - dma_release_channel(stm32port->rx_ch); - stm32port->rx_ch = NULL; - - return ret; +static void stm32_usart_of_dma_tx_remove(struct stm32_port *stm32port, + struct platform_device *pdev) +{ + if (stm32port->tx_buf) + dma_free_coherent(&pdev->dev, TX_BUF_L, stm32port->tx_buf, + stm32port->tx_dma_buf); } static int stm32_usart_of_dma_tx_probe(struct stm32_port *stm32port, struct platform_device *pdev) { - const struct stm32_usart_offsets *ofs = &stm32port->info->ofs; + struct stm32_usart_offsets *ofs = &stm32port->info->ofs; struct uart_port *port = &stm32port->port; struct device *dev = &pdev->dev; struct dma_slave_config config; int ret; - stm32port->tx_dma_busy = false; - - /* Request DMA TX channel */ - stm32port->tx_ch = dma_request_slave_channel(dev, "tx"); - if (!stm32port->tx_ch) { - dev_info(dev, "tx dma alloc failed\n"); - return -ENODEV; - } stm32port->tx_buf = dma_alloc_coherent(&pdev->dev, TX_BUF_L, &stm32port->tx_dma_buf, GFP_KERNEL); - if (!stm32port->tx_buf) { - ret = -ENOMEM; - goto alloc_err; - } + if (!stm32port->tx_buf) + return -ENOMEM; /* Configure DMA channel */ memset(&config, 0, sizeof(config)); @@ -1226,26 +1632,16 @@ static int stm32_usart_of_dma_tx_probe(struct stm32_port *stm32port, ret = dmaengine_slave_config(stm32port->tx_ch, &config); if (ret < 0) { dev_err(dev, "tx dma channel config failed\n"); - ret = -ENODEV; - goto config_err; + stm32_usart_of_dma_tx_remove(stm32port, pdev); + return ret; } return 0; - -config_err: - dma_free_coherent(&pdev->dev, - TX_BUF_L, stm32port->tx_buf, - stm32port->tx_dma_buf); - -alloc_err: - dma_release_channel(stm32port->tx_ch); - stm32port->tx_ch = NULL; - - return ret; } static int stm32_usart_serial_probe(struct platform_device *pdev) { + const struct of_device_id *match; struct stm32_port *stm32port; int ret; @@ -1253,34 +1649,57 @@ static int stm32_usart_serial_probe(struct platform_device *pdev) if (!stm32port) return -ENODEV; - stm32port->info = of_device_get_match_data(&pdev->dev); - if (!stm32port->info) + match = of_match_device(stm32_match, &pdev->dev); + if (match && match->data) + stm32port->info = (struct stm32_usart_info *)match->data; + else return -EINVAL; ret = stm32_usart_init_port(stm32port, pdev); if (ret) return ret; - if (stm32port->wakeirq > 0) { - ret = device_init_wakeup(&pdev->dev, true); + if (stm32port->wakeup_src) { + device_set_wakeup_capable(&pdev->dev, true); + ret = dev_pm_set_wake_irq(&pdev->dev, stm32port->port.irq); if (ret) - goto err_uninit; - - ret = dev_pm_set_dedicated_wake_irq(&pdev->dev, - stm32port->wakeirq); - if (ret) - goto err_nowup; + goto err_deinit_port; + } - device_set_wakeup_enable(&pdev->dev, false); + stm32port->rx_ch = dma_request_chan_linked(&pdev->dev, "rx"); + if (PTR_ERR(stm32port->rx_ch) == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto err_wakeirq; + } + /* Fall back in interrupt mode for any non-deferral error */ + if (IS_ERR(stm32port->rx_ch)) + stm32port->rx_ch = NULL; + + stm32port->tx_ch = dma_request_chan(&pdev->dev, "tx"); + if (PTR_ERR(stm32port->tx_ch) == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto err_dma_rx; + } + /* Fall back in interrupt mode for any non-deferral error */ + if (IS_ERR(stm32port->tx_ch)) + stm32port->tx_ch = NULL; + + if (stm32port->rx_ch && stm32_usart_of_dma_rx_probe(stm32port, pdev)) { + /* Fall back in interrupt mode */ + dma_release_chan_linked(&pdev->dev, stm32port->rx_ch); + stm32port->rx_ch = NULL; } - ret = stm32_usart_of_dma_rx_probe(stm32port, pdev); - if (ret) - dev_info(&pdev->dev, "interrupt mode used for rx (no dma)\n"); + if (stm32port->tx_ch && stm32_usart_of_dma_tx_probe(stm32port, pdev)) { + /* Fall back in interrupt mode */ + dma_release_channel(stm32port->tx_ch); + stm32port->tx_ch = NULL; + } - ret = stm32_usart_of_dma_tx_probe(stm32port, pdev); - if (ret) - dev_info(&pdev->dev, "interrupt mode used for tx (no dma)\n"); + if (!stm32port->rx_ch) + dev_info(&pdev->dev, "interrupt mode for rx (no dma)\n"); + if (!stm32port->tx_ch) + dev_info(&pdev->dev, "interrupt mode for tx (no dma)\n"); platform_set_drvdata(pdev, &stm32port->port); @@ -1301,35 +1720,27 @@ static int stm32_usart_serial_probe(struct platform_device *pdev) pm_runtime_set_suspended(&pdev->dev); pm_runtime_put_noidle(&pdev->dev); - if (stm32port->rx_ch) { - dmaengine_terminate_async(stm32port->rx_ch); - dma_release_channel(stm32port->rx_ch); - } - - if (stm32port->rx_dma_buf) - dma_free_coherent(&pdev->dev, - RX_BUF_L, stm32port->rx_buf, - stm32port->rx_dma_buf); - if (stm32port->tx_ch) { - dmaengine_terminate_async(stm32port->tx_ch); + stm32_usart_of_dma_tx_remove(stm32port, pdev); dma_release_channel(stm32port->tx_ch); } - if (stm32port->tx_dma_buf) - dma_free_coherent(&pdev->dev, - TX_BUF_L, stm32port->tx_buf, - stm32port->tx_dma_buf); + if (stm32port->rx_ch) + stm32_usart_of_dma_rx_remove(stm32port, pdev); - if (stm32port->wakeirq > 0) +err_dma_rx: + if (stm32port->rx_ch) + dma_release_chan_linked(&pdev->dev, stm32port->rx_ch); + +err_wakeirq: + if (stm32port->wakeup_src) dev_pm_clear_wake_irq(&pdev->dev); -err_nowup: - if (stm32port->wakeirq > 0) - device_init_wakeup(&pdev->dev, false); +err_deinit_port: + if (stm32port->wakeup_src) + device_set_wakeup_capable(&pdev->dev, false); -err_uninit: - clk_disable_unprepare(stm32port->clk); + stm32_usart_deinit_port(stm32port); return ret; } @@ -1338,8 +1749,9 @@ static int stm32_usart_serial_remove(struct platform_device *pdev) { struct uart_port *port = platform_get_drvdata(pdev); struct stm32_port *stm32_port = to_stm32_port(port); - const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; + struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; int err; + u32 cr3; pm_runtime_get_sync(&pdev->dev); err = uart_remove_one_port(&stm32_usart_driver, port); @@ -1350,59 +1762,61 @@ static int stm32_usart_serial_remove(struct platform_device *pdev) pm_runtime_set_suspended(&pdev->dev); pm_runtime_put_noidle(&pdev->dev); - stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR); - - if (stm32_port->rx_ch) { - dmaengine_terminate_async(stm32_port->rx_ch); - dma_release_channel(stm32_port->rx_ch); - } - - if (stm32_port->rx_dma_buf) - dma_free_coherent(&pdev->dev, - RX_BUF_L, stm32_port->rx_buf, - stm32_port->rx_dma_buf); - - stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT); + stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_PEIE); if (stm32_port->tx_ch) { - dmaengine_terminate_async(stm32_port->tx_ch); + stm32_usart_tx_dma_terminate(stm32_port); + stm32_usart_of_dma_tx_remove(stm32_port, pdev); dma_release_channel(stm32_port->tx_ch); } - if (stm32_port->tx_dma_buf) - dma_free_coherent(&pdev->dev, - TX_BUF_L, stm32_port->tx_buf, - stm32_port->tx_dma_buf); + if (stm32_port->rx_ch) { + stm32_usart_of_dma_rx_remove(stm32_port, pdev); + dma_release_chan_linked(&pdev->dev, stm32_port->rx_ch); + } - if (stm32_port->wakeirq > 0) { + cr3 = readl_relaxed(port->membase + ofs->cr3); + cr3 &= ~USART_CR3_EIE; + cr3 &= ~USART_CR3_DMAR; + cr3 &= ~USART_CR3_DMAT; + cr3 &= ~USART_CR3_DDRE; + writel_relaxed(cr3, port->membase + ofs->cr3); + + if (stm32_port->wakeup_src) { dev_pm_clear_wake_irq(&pdev->dev); device_init_wakeup(&pdev->dev, false); } - clk_disable_unprepare(stm32_port->clk); + stm32_usart_deinit_port(stm32_port); return 0; } -#ifdef CONFIG_SERIAL_STM32_CONSOLE -static void stm32_usart_console_putchar(struct uart_port *port, int ch) +static void __maybe_unused stm32_usart_console_putchar(struct uart_port *port, int ch) { struct stm32_port *stm32_port = to_stm32_port(port); - const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; - - while (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_TXE)) - cpu_relax(); + struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; + u32 isr; + int ret; + ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr, isr, + (isr & USART_SR_TXE), 100, + STM32_USART_TIMEOUT_USEC); + if (ret != 0) { + dev_err(port->dev, "Error while sending data in UART TX : %d\n", ret); + return; + } writel_relaxed(ch, port->membase + ofs->tdr); } +#ifdef CONFIG_SERIAL_STM32_CONSOLE static void stm32_usart_console_write(struct console *co, const char *s, unsigned int cnt) { struct uart_port *port = &stm32_ports[co->index].port; struct stm32_port *stm32_port = to_stm32_port(port); - const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; - const struct stm32_usart_config *cfg = &stm32_port->info->cfg; + struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; + struct stm32_usart_config *cfg = &stm32_port->info->cfg; unsigned long flags; u32 old_cr1, new_cr1; int locked = 1; @@ -1475,6 +1889,57 @@ static struct console stm32_console = { #define STM32_SERIAL_CONSOLE NULL #endif /* CONFIG_SERIAL_STM32_CONSOLE */ +#ifdef CONFIG_SERIAL_EARLYCON +static void early_stm32_usart_console_putchar(struct uart_port *port, int ch) +{ + struct stm32_usart_info *info = port->private_data; + + while (!(readl_relaxed(port->membase + info->ofs.isr) & USART_SR_TXE)) + cpu_relax(); + + writel_relaxed(ch, port->membase + info->ofs.tdr); +} + +static void early_stm32_serial_write(struct console *console, const char *s, unsigned int count) +{ + struct earlycon_device *device = console->data; + struct uart_port *port = &device->port; + + uart_console_write(port, s, count, early_stm32_usart_console_putchar); +} + +static int __init early_stm32_h7_serial_setup(struct earlycon_device *device, const char *options) +{ + if (!(device->port.membase || device->port.iobase)) + return -ENODEV; + device->port.private_data = &stm32h7_info; + device->con->write = early_stm32_serial_write; + return 0; +} + +static int __init early_stm32_f7_serial_setup(struct earlycon_device *device, const char *options) +{ + if (!(device->port.membase || device->port.iobase)) + return -ENODEV; + device->port.private_data = &stm32f7_info; + device->con->write = early_stm32_serial_write; + return 0; +} + +static int __init early_stm32_f4_serial_setup(struct earlycon_device *device, const char *options) +{ + if (!(device->port.membase || device->port.iobase)) + return -ENODEV; + device->port.private_data = &stm32f4_info; + device->con->write = early_stm32_serial_write; + return 0; +} + +OF_EARLYCON_DECLARE(stm32h7serial, "st,stm32h7-uart", early_stm32_h7_serial_setup); +OF_EARLYCON_DECLARE(stm32f7serial, "st,stm32f7-uart", early_stm32_f7_serial_setup); +OF_EARLYCON_DECLARE(stm32f4serial, "st,stm32-uart", early_stm32_f4_serial_setup); +#endif /* CONFIG_SERIAL_EARLYCON */ + static struct uart_driver stm32_usart_driver = { .driver_name = DRIVER_NAME, .dev_name = STM32_SERIAL_NAME, @@ -1484,14 +1949,17 @@ static struct uart_driver stm32_usart_driver = { .cons = STM32_SERIAL_CONSOLE, }; -static void __maybe_unused stm32_usart_serial_en_wakeup(struct uart_port *port, - bool enable) +static int __maybe_unused stm32_usart_serial_en_wakeup(struct uart_port *port, + bool enable) { struct stm32_port *stm32_port = to_stm32_port(port); - const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; + struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; + struct tty_port *tport = &port->state->port; + unsigned long flags; + int ret; - if (stm32_port->wakeirq <= 0) - return; + if (!stm32_port->wakeup_src || !tty_port_initialized(tport)) + return 0; /* * Enable low-power wake-up and wake-up irq if argument is set to @@ -1500,22 +1968,53 @@ static void __maybe_unused stm32_usart_serial_en_wakeup(struct uart_port *port, if (enable) { stm32_usart_set_bits(port, ofs->cr1, USART_CR1_UESM); stm32_usart_set_bits(port, ofs->cr3, USART_CR3_WUFIE); + mctrl_gpio_enable_irq_wake(stm32_port->gpios); + + /* + * When DMA is used for reception, it must be disabled before + * entering low-power mode and re-enabled when exiting from + * low-power mode. + */ + if (stm32_port->rx_ch) { + /* Avoid race with RX IRQ when DMAR is cleared */ + spin_lock_irqsave(&port->lock, flags); + /* Poll data from DMA RX buffer if any */ + if (!stm32_usart_rx_dma_pause(stm32_port)) + stm32_usart_receive_chars(port, true); + stm32_usart_rx_dma_terminate(stm32_port); + spin_unlock_irqrestore(&port->lock, flags); + } + + spin_lock_irqsave(&port->lock, flags); + /* Poll data from RX FIFO if any */ + stm32_usart_receive_chars(port, false); + spin_unlock_irqrestore(&port->lock, flags); } else { + if (stm32_port->rx_ch) { + ret = stm32_usart_rx_dma_start_or_resume(port); + if (ret) + return ret; + } + mctrl_gpio_disable_irq_wake(stm32_port->gpios); stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_UESM); stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE); } + + return 0; } static int __maybe_unused stm32_usart_serial_suspend(struct device *dev) { struct uart_port *port = dev_get_drvdata(dev); + int ret; uart_suspend_port(&stm32_usart_driver, port); - if (device_may_wakeup(dev)) - stm32_usart_serial_en_wakeup(port, true); - else - stm32_usart_serial_en_wakeup(port, false); + if (device_may_wakeup(dev) || device_wakeup_path(dev)) { + ret = stm32_usart_serial_en_wakeup(port, true); + if (ret) + return ret; + } /* * When "no_console_suspend" is enabled, keep the pinctrl default state @@ -1524,7 +2023,7 @@ static int __maybe_unused stm32_usart_serial_suspend(struct device *dev) * capabilities. */ if (console_suspend_enabled || !uart_console(port)) { - if (device_may_wakeup(dev)) + if (device_may_wakeup(dev) || device_wakeup_path(dev)) pinctrl_pm_select_idle_state(dev); else pinctrl_pm_select_sleep_state(dev); @@ -1536,11 +2035,15 @@ static int __maybe_unused stm32_usart_serial_suspend(struct device *dev) static int __maybe_unused stm32_usart_serial_resume(struct device *dev) { struct uart_port *port = dev_get_drvdata(dev); + int ret; pinctrl_pm_select_default_state(dev); - if (device_may_wakeup(dev)) - stm32_usart_serial_en_wakeup(port, false); + if (device_may_wakeup(dev) || device_wakeup_path(dev)) { + ret = stm32_usart_serial_en_wakeup(port, false); + if (ret) + return ret; + } return uart_resume_port(&stm32_usart_driver, port); } diff --git a/drivers/tty/serial/stm32-usart.h b/drivers/tty/serial/stm32-usart.h index 94b568aa4..c634b78de 100644 --- a/drivers/tty/serial/stm32-usart.h +++ b/drivers/tty/serial/stm32-usart.h @@ -106,7 +106,7 @@ struct stm32_usart_info stm32h7_info = { /* USART_SR (F4) / USART_ISR (F7) */ #define USART_SR_PE BIT(0) #define USART_SR_FE BIT(1) -#define USART_SR_NF BIT(2) +#define USART_SR_NE BIT(2) /* F7 (NF for F4) */ #define USART_SR_ORE BIT(3) #define USART_SR_IDLE BIT(4) #define USART_SR_RXNE BIT(5) @@ -123,7 +123,8 @@ struct stm32_usart_info stm32h7_info = { #define USART_SR_SBKF BIT(18) /* F7 */ #define USART_SR_WUF BIT(20) /* H7 */ #define USART_SR_TEACK BIT(21) /* F7 */ -#define USART_SR_ERR_MASK (USART_SR_ORE | USART_SR_FE | USART_SR_PE) +#define USART_SR_ERR_MASK (USART_SR_ORE | USART_SR_NE | USART_SR_FE |\ + USART_SR_PE) /* Dummy bits */ #define USART_SR_DUMMY_RX BIT(16) @@ -213,12 +214,6 @@ struct stm32_usart_info stm32h7_info = { #define USART_CR3_TXFTCFG_MASK GENMASK(31, 29) /* H7 */ #define USART_CR3_TXFTCFG_SHIFT 29 /* H7 */ -/* TX FIFO threashold set to half of its depth */ -#define USART_CR3_TXFTCFG_HALF 0x2 - -/* RX FIFO threashold set to half of its depth */ -#define USART_CR3_RXFTCFG_HALF 0x2 - /* USART_GTPR */ #define USART_GTPR_PSC_MASK GENMASK(7, 0) #define USART_GTPR_GT_MASK GENMASK(15, 8) @@ -249,14 +244,16 @@ struct stm32_usart_info stm32h7_info = { #define STM32_SERIAL_NAME "ttySTM" #define STM32_MAX_PORTS 8 -#define RX_BUF_L 200 /* dma rx buffer length */ -#define RX_BUF_P RX_BUF_L /* dma rx buffer period */ -#define TX_BUF_L 200 /* dma tx buffer length */ +#define RX_BUF_L 4096 /* dma rx buffer length */ +#define RX_BUF_P (RX_BUF_L / 2) /* dma rx buffer period */ +#define TX_BUF_L RX_BUF_L /* dma tx buffer length */ + +#define STM32_USART_TIMEOUT_USEC USEC_PER_SEC /* 1s timeout in µs */ struct stm32_port { struct uart_port port; struct clk *clk; - const struct stm32_usart_info *info; + struct stm32_usart_info *info; struct dma_chan *rx_ch; /* dma rx channel */ dma_addr_t rx_dma_buf; /* dma rx buffer bus address */ unsigned char *rx_buf; /* dma rx buffer cpu address */ @@ -266,12 +263,17 @@ struct stm32_port { u32 cr1_irq; /* USART_CR1_RXNEIE or RTOIE */ u32 cr3_irq; /* USART_CR3_RXFTIE */ int last_res; - bool tx_dma_busy; /* dma tx busy */ + bool throttled; /* port throttled */ bool hw_flow_control; bool fifoen; - int wakeirq; + int rxftcfg; /* RX FIFO threshold CFG */ + int txftcfg; /* TX FIFO threshold CFG */ + bool wakeup_src; int rdr_mask; /* receive data register mask */ struct mctrl_gpios *gpios; /* modem control gpios */ + struct dma_tx_state rx_dma_state; + bool tx_dma_busy; /* dma tx transaction in progress */ + bool rx_dma_busy; /* dma rx transaction in progress */ }; static struct stm32_port stm32_ports[STM32_MAX_PORTS]; diff --git a/include/uapi/linux/serial.h b/include/uapi/linux/serial.h index 93eb3c496..2d98e65d2 100644 --- a/include/uapi/linux/serial.h +++ b/include/uapi/linux/serial.h @@ -128,6 +128,8 @@ struct serial_rs485 { (if supported) */ __u32 delay_rts_before_send; /* Delay before send (milliseconds) */ __u32 delay_rts_after_send; /* Delay after send (milliseconds) */ + __u32 delay_rts_before_send_ns; /* Delay (nanoseconds) */ + __u32 delay_rts_after_send_ns; /* Delay (nanoseconds) */ __u32 padding[5]; /* Memory is cheap, new structs are a royal PITA .. */ }; -- 2.17.1