6323 lines
176 KiB
Diff
6323 lines
176 KiB
Diff
From 1114fa4cbc859f4a0cc5e19f267c66634888c798 Mon Sep 17 00:00:00 2001
|
|
From: Romuald Jeanne <romuald.jeanne@st.com>
|
|
Date: Tue, 25 Jul 2023 10:45:56 +0200
|
|
Subject: [PATCH 10/22] v5.15-stm32mp-r2.1 REMOTEPROC-RPMSG
|
|
|
|
Signed-off-by: Romuald Jeanne <romuald.jeanne@st.com>
|
|
---
|
|
Documentation/staging/remoteproc.rst | 22 +
|
|
Documentation/staging/tee.rst | 30 +
|
|
drivers/remoteproc/Kconfig | 29 +
|
|
drivers/remoteproc/Makefile | 3 +
|
|
drivers/remoteproc/remoteproc_core.c | 8 +
|
|
drivers/remoteproc/rproc_srm_core.c | 303 ++++++++++
|
|
drivers/remoteproc/rproc_srm_core.h | 98 ++++
|
|
drivers/remoteproc/rproc_srm_dev.c | 744 ++++++++++++++++++++++++
|
|
drivers/remoteproc/stm32_rproc.c | 324 +++++++----
|
|
drivers/remoteproc/tee_remoteproc.c | 378 +++++++++++++
|
|
drivers/rpmsg/Kconfig | 8 +
|
|
drivers/rpmsg/Makefile | 1 +
|
|
drivers/rpmsg/qcom_glink_native.c | 2 +-
|
|
drivers/rpmsg/qcom_smd.c | 2 +-
|
|
drivers/rpmsg/rpmsg_char.c | 229 +++-----
|
|
drivers/rpmsg/rpmsg_char.h | 46 ++
|
|
drivers/rpmsg/rpmsg_core.c | 36 +-
|
|
drivers/rpmsg/rpmsg_ctrl.c | 243 ++++++++
|
|
drivers/rpmsg/rpmsg_internal.h | 12 +-
|
|
drivers/rpmsg/virtio_rpmsg_bus.c | 12 +-
|
|
drivers/tee/amdtee/amdtee_private.h | 8 +-
|
|
drivers/tee/amdtee/core.c | 22 +-
|
|
drivers/tee/optee/Makefile | 2 +
|
|
drivers/tee/optee/call.c | 814 +++++++++++++++++++++------
|
|
drivers/tee/optee/call_queue.c | 86 +++
|
|
drivers/tee/optee/core.c | 463 +++++++++++++--
|
|
drivers/tee/optee/notif.c | 125 ++++
|
|
drivers/tee/optee/optee_msg.h | 160 +++++-
|
|
drivers/tee/optee/optee_private.h | 180 +++++-
|
|
drivers/tee/optee/optee_rpc_cmd.h | 31 +-
|
|
drivers/tee/optee/optee_smc.h | 94 +++-
|
|
drivers/tee/optee/rpc.c | 99 ++--
|
|
drivers/tee/tee_core.c | 73 ++-
|
|
drivers/tee/tee_shm.c | 13 +-
|
|
include/linux/rpmsg.h | 10 +
|
|
include/linux/tee_drv.h | 17 +-
|
|
include/linux/tee_remoteproc.h | 101 ++++
|
|
include/uapi/linux/rpmsg.h | 10 +
|
|
include/uapi/linux/tee.h | 36 +-
|
|
39 files changed, 4292 insertions(+), 582 deletions(-)
|
|
create mode 100644 drivers/remoteproc/rproc_srm_core.c
|
|
create mode 100644 drivers/remoteproc/rproc_srm_core.h
|
|
create mode 100644 drivers/remoteproc/rproc_srm_dev.c
|
|
create mode 100644 drivers/remoteproc/tee_remoteproc.c
|
|
create mode 100644 drivers/rpmsg/rpmsg_char.h
|
|
create mode 100644 drivers/rpmsg/rpmsg_ctrl.c
|
|
create mode 100644 drivers/tee/optee/call_queue.c
|
|
create mode 100644 drivers/tee/optee/notif.c
|
|
create mode 100644 include/linux/tee_remoteproc.h
|
|
|
|
diff --git a/Documentation/staging/remoteproc.rst b/Documentation/staging/remoteproc.rst
|
|
index 9cccd3dd6a4b..c2367e3c0b19 100644
|
|
--- a/Documentation/staging/remoteproc.rst
|
|
+++ b/Documentation/staging/remoteproc.rst
|
|
@@ -357,3 +357,25 @@ Of course, RSC_VDEV resource entries are only good enough for static
|
|
allocation of virtio devices. Dynamic allocations will also be made possible
|
|
using the rpmsg bus (similar to how we already do dynamic allocations of
|
|
rpmsg channels; read more about it in rpmsg.txt).
|
|
+
|
|
+8. System Resource Manager (SRM)
|
|
+
|
|
+Since some resources are shared (directly or not) between the processors, a
|
|
+processor cannot manage such resources without potentially impacting the other
|
|
+processors : as an example, if a processor changes the frequency of a clock, the
|
|
+frequency of another clock managed by another processor may be updated too.
|
|
+
|
|
+The System Resource Manager prevents such resource conflicts between the
|
|
+processors : it reserves and initializes the system resources of the peripherals
|
|
+assigned to a remote processor.
|
|
+
|
|
+As of today the following resources are controlled by the SRM:
|
|
+- clocks
|
|
+- regulators (power supplies)
|
|
+
|
|
+The SRM is implemented as an 'rproc_subdev' and registered to remoteproc_core.
|
|
+Unlike the virtio device (vdev), the SRM subdev is probed *before* the rproc
|
|
+boots, ensuring the availability of the resources before the remoteproc starts.
|
|
+
|
|
+The resources handled by the SRM are defined in the DeviceTree: please read
|
|
+Documentation/devicetree/bindings/remoteproc/rproc-srm.txt for details.
|
|
diff --git a/Documentation/staging/tee.rst b/Documentation/staging/tee.rst
|
|
index 4d4b5f889603..3c63d8dcd61e 100644
|
|
--- a/Documentation/staging/tee.rst
|
|
+++ b/Documentation/staging/tee.rst
|
|
@@ -184,6 +184,36 @@ order to support device enumeration. In other words, OP-TEE driver invokes this
|
|
application to retrieve a list of Trusted Applications which can be registered
|
|
as devices on the TEE bus.
|
|
|
|
+OP-TEE notifications
|
|
+--------------------
|
|
+
|
|
+There are two kinds of notifications that secure world can use to make
|
|
+normal world aware of some event.
|
|
+
|
|
+1. Synchronous notifications delivered with ``OPTEE_RPC_CMD_NOTIFICATION``
|
|
+ using the ``OPTEE_RPC_NOTIFICATION_SEND`` parameter.
|
|
+2. Asynchronous notifications delivered with a combination of a non-secure
|
|
+ edge-triggered interrupt and a fast call from the non-secure interrupt
|
|
+ handler.
|
|
+
|
|
+Synchronous notifications are limited by depending on RPC for delivery,
|
|
+this is only usable when secure world is entered with a yielding call via
|
|
+``OPTEE_SMC_CALL_WITH_ARG``. This excludes such notifications from secure
|
|
+world interrupt handlers.
|
|
+
|
|
+An asynchronous notification is delivered via a non-secure edge-triggered
|
|
+interrupt to an interrupt handler registered in the OP-TEE driver. The
|
|
+actual notification value are retrieved with the fast call
|
|
+``OPTEE_SMC_GET_ASYNC_NOTIF_VALUE``. Note that one interrupt can represent
|
|
+multiple notifications.
|
|
+
|
|
+One notification value ``OPTEE_SMC_ASYNC_NOTIF_VALUE_DO_BOTTOM_HALF`` has a
|
|
+special meaning. When this value is received it means that normal world is
|
|
+supposed to make a yielding call ``OPTEE_MSG_CMD_DO_BOTTOM_HALF``. This
|
|
+call is done from the thread assisting the interrupt handler. This is a
|
|
+building block for OP-TEE OS in secure world to implement the top half and
|
|
+bottom half style of device drivers.
|
|
+
|
|
AMD-TEE driver
|
|
==============
|
|
|
|
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
|
|
index 9a6eedc3994a..33e96dd612cf 100644
|
|
--- a/drivers/remoteproc/Kconfig
|
|
+++ b/drivers/remoteproc/Kconfig
|
|
@@ -23,6 +23,25 @@ config REMOTEPROC_CDEV
|
|
|
|
It's safe to say N if you don't want to use this interface.
|
|
|
|
+config REMOTEPROC_SRM_CORE
|
|
+ tristate "Remoteproc System Resource Manager core"
|
|
+ depends on RPMSG
|
|
+ help
|
|
+ Say y here to enable the core driver of the remoteproc System Resource
|
|
+ Manager (SRM).
|
|
+ The SRM handles resources allocated to remote processors.
|
|
+ The core part is in charge of controlling the device children.
|
|
+
|
|
+config REMOTEPROC_SRM_DEV
|
|
+ tristate "Remoteproc System Resource Manager device"
|
|
+ depends on REMOTEPROC_SRM_CORE
|
|
+ help
|
|
+ Say y here to enable the device driver of the remoteproc System
|
|
+ Resource Manager (SRM).
|
|
+ The SRM handles resources allocated to remote processors.
|
|
+ The device part is in charge of reserving and initializing resources
|
|
+ for a peripheral assigned to a coprocessor.
|
|
+
|
|
config IMX_REMOTEPROC
|
|
tristate "i.MX remoteproc support"
|
|
depends on ARCH_MXC
|
|
@@ -279,6 +298,7 @@ config STM32_RPROC
|
|
depends on ARCH_STM32
|
|
depends on REMOTEPROC
|
|
select MAILBOX
|
|
+ select TEE_REMOTEPROC
|
|
help
|
|
Say y here to support STM32 MCU processors via the
|
|
remote processor framework.
|
|
@@ -315,6 +335,15 @@ config TI_K3_R5_REMOTEPROC
|
|
It's safe to say N here if you're not interested in utilizing
|
|
a slave processor.
|
|
|
|
+
|
|
+config TEE_REMOTEPROC
|
|
+ tristate "trusted firmware support by a trusted application"
|
|
+ depends on OPTEE
|
|
+ help
|
|
+ Support for trusted remote processors firmware. The firmware
|
|
+ authentication and/or decryption are managed by a trusted application.
|
|
+ This can be either built-in or a loadable module.
|
|
+
|
|
endif # REMOTEPROC
|
|
|
|
endmenu
|
|
diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile
|
|
index bb26c9e4ef9c..521dd3384fba 100644
|
|
--- a/drivers/remoteproc/Makefile
|
|
+++ b/drivers/remoteproc/Makefile
|
|
@@ -11,6 +11,9 @@ remoteproc-y += remoteproc_sysfs.o
|
|
remoteproc-y += remoteproc_virtio.o
|
|
remoteproc-y += remoteproc_elf_loader.o
|
|
obj-$(CONFIG_REMOTEPROC_CDEV) += remoteproc_cdev.o
|
|
+obj-$(CONFIG_REMOTEPROC_SRM_CORE) += rproc_srm_core.o
|
|
+obj-$(CONFIG_REMOTEPROC_SRM_DEV) += rproc_srm_dev.o
|
|
+obj-$(CONFIG_TEE_REMOTEPROC) += tee_remoteproc.o
|
|
obj-$(CONFIG_IMX_REMOTEPROC) += imx_rproc.o
|
|
obj-$(CONFIG_INGENIC_VPU_RPROC) += ingenic_rproc.o
|
|
obj-$(CONFIG_MTK_SCP) += mtk_scp.o mtk_scp_ipi.o
|
|
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
|
|
index 97e59f746126..4312fe52a826 100644
|
|
--- a/drivers/remoteproc/remoteproc_core.c
|
|
+++ b/drivers/remoteproc/remoteproc_core.c
|
|
@@ -38,6 +38,7 @@
|
|
#include <linux/of_reserved_mem.h>
|
|
#include <linux/virtio_ids.h>
|
|
#include <linux/virtio_ring.h>
|
|
+#include <linux/of_platform.h>
|
|
#include <asm/byteorder.h>
|
|
#include <linux/platform_device.h>
|
|
|
|
@@ -2363,6 +2364,11 @@ int rproc_add(struct rproc *rproc)
|
|
/* create debugfs entries */
|
|
rproc_create_debug_dir(rproc);
|
|
|
|
+ /* add resource manager device */
|
|
+ ret = devm_of_platform_populate(dev->parent);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+
|
|
/* if rproc is marked always-on, request it to boot */
|
|
if (rproc->auto_boot) {
|
|
ret = rproc_trigger_auto_boot(rproc);
|
|
@@ -2644,6 +2650,8 @@ int rproc_del(struct rproc *rproc)
|
|
list_del_rcu(&rproc->node);
|
|
mutex_unlock(&rproc_list_mutex);
|
|
|
|
+ of_platform_depopulate(rproc->dev.parent);
|
|
+
|
|
/* Ensure that no readers of rproc_list are still active */
|
|
synchronize_rcu();
|
|
|
|
diff --git a/drivers/remoteproc/rproc_srm_core.c b/drivers/remoteproc/rproc_srm_core.c
|
|
new file mode 100644
|
|
index 000000000000..fc61e8b35686
|
|
--- /dev/null
|
|
+++ b/drivers/remoteproc/rproc_srm_core.c
|
|
@@ -0,0 +1,303 @@
|
|
+// SPDX-License-Identifier: GPL-2.0
|
|
+/*
|
|
+ * Copyright (C) STMicroelectronics 2018 - All Rights Reserved
|
|
+ * Author: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
|
|
+ */
|
|
+
|
|
+#include <linux/component.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/of_platform.h>
|
|
+#include <linux/remoteproc.h>
|
|
+#include <linux/rpmsg.h>
|
|
+
|
|
+#include "rproc_srm_core.h"
|
|
+
|
|
+#define BIND_TIMEOUT 10000
|
|
+
|
|
+struct rproc_srm_core {
|
|
+ struct device *dev;
|
|
+ struct completion all_bound;
|
|
+ int bind_status;
|
|
+ atomic_t prepared;
|
|
+ struct rproc_subdev subdev;
|
|
+ struct rpmsg_driver rpdrv;
|
|
+ struct blocking_notifier_head notifier;
|
|
+};
|
|
+
|
|
+#define to_rproc_srm_core(s) container_of(s, struct rproc_srm_core, subdev)
|
|
+
|
|
+static struct rproc_srm_core *rpmsg_srm_to_core(struct rpmsg_device *rpdev)
|
|
+{
|
|
+ struct rpmsg_driver *rpdrv;
|
|
+ struct rproc_srm_core *core;
|
|
+
|
|
+ rpdrv = container_of(rpdev->dev.driver, struct rpmsg_driver, drv);
|
|
+ core = container_of(rpdrv, struct rproc_srm_core, rpdrv);
|
|
+
|
|
+ return core;
|
|
+}
|
|
+
|
|
+int rpmsg_srm_send(struct rpmsg_endpoint *ept, struct rpmsg_srm_msg *msg)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ ret = rpmsg_send(ept, (void *)msg, sizeof(*msg));
|
|
+ if (ret)
|
|
+ dev_err(&ept->rpdev->dev, "rpmsg_send failed: %d\n", ret);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+EXPORT_SYMBOL(rpmsg_srm_send);
|
|
+
|
|
+static int rpmsg_srm_cb(struct rpmsg_device *rpdev, void *data, int len,
|
|
+ void *priv, u32 src)
|
|
+{
|
|
+ struct rproc_srm_core *core = rpmsg_srm_to_core(rpdev);
|
|
+ struct rpmsg_srm_msg_desc desc;
|
|
+ int ret;
|
|
+
|
|
+ desc.ept = rpdev->ept;
|
|
+ desc.msg = data;
|
|
+
|
|
+ ret = blocking_notifier_call_chain(&core->notifier, 0, &desc);
|
|
+
|
|
+ if (!(ret & NOTIFY_STOP_MASK)) {
|
|
+ dev_warn(&rpdev->dev, "unknown device\n");
|
|
+ desc.msg->message_type = RPROC_SRM_MSG_ERROR;
|
|
+ rpmsg_srm_send(desc.ept, desc.msg);
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int rpmsg_srm_probe(struct rpmsg_device *rpdev)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ dev_dbg(&rpdev->dev, "%s\n", __func__);
|
|
+
|
|
+ /* Send an empty message to complete the initialization */
|
|
+ ret = rpmsg_send(rpdev->ept, NULL, 0);
|
|
+ if (ret)
|
|
+ dev_err(&rpdev->dev, "failed to send init message\n");
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static void rpmsg_srm_remove(struct rpmsg_device *rpdev)
|
|
+{
|
|
+ /* Note : the remove ops is mandatory */
|
|
+ dev_dbg(&rpdev->dev, "%s\n", __func__);
|
|
+}
|
|
+
|
|
+static struct rpmsg_device_id rpmsg_srm_id_table[] = {
|
|
+ { .name = "rproc-srm" },
|
|
+ { },
|
|
+};
|
|
+MODULE_DEVICE_TABLE(rpmsg, rpmsg_srm_id_table);
|
|
+
|
|
+static struct rpmsg_driver rpmsg_srm_drv = {
|
|
+ .drv.name = "rpmsg_srm",
|
|
+ .id_table = rpmsg_srm_id_table,
|
|
+ .probe = rpmsg_srm_probe,
|
|
+ .callback = rpmsg_srm_cb,
|
|
+ .remove = rpmsg_srm_remove,
|
|
+};
|
|
+
|
|
+int rproc_srm_core_register_notifier(struct rproc_srm_core *core,
|
|
+ struct notifier_block *nb)
|
|
+{
|
|
+ return blocking_notifier_chain_register(&core->notifier, nb);
|
|
+}
|
|
+EXPORT_SYMBOL(rproc_srm_core_register_notifier);
|
|
+
|
|
+int rproc_srm_core_unregister_notifier(struct rproc_srm_core *core,
|
|
+ struct notifier_block *nb)
|
|
+{
|
|
+ return blocking_notifier_chain_unregister(&core->notifier, nb);
|
|
+}
|
|
+EXPORT_SYMBOL(rproc_srm_core_unregister_notifier);
|
|
+
|
|
+static int compare_of(struct device *dev, void *data)
|
|
+{
|
|
+ return dev->of_node == data;
|
|
+}
|
|
+
|
|
+static void release_of(struct device *dev, void *data)
|
|
+{
|
|
+ of_node_put(data);
|
|
+}
|
|
+
|
|
+static void rproc_srm_core_unbind(struct device *dev)
|
|
+{
|
|
+ component_unbind_all(dev, NULL);
|
|
+}
|
|
+
|
|
+static int rproc_srm_core_bind(struct device *dev)
|
|
+{
|
|
+ struct rproc_srm_core *rproc_srm_core = dev_get_drvdata(dev);
|
|
+
|
|
+ rproc_srm_core->bind_status = component_bind_all(dev, NULL);
|
|
+ complete(&rproc_srm_core->all_bound);
|
|
+
|
|
+ return rproc_srm_core->bind_status;
|
|
+}
|
|
+
|
|
+static const struct component_master_ops srm_comp_ops = {
|
|
+ .bind = rproc_srm_core_bind,
|
|
+ .unbind = rproc_srm_core_unbind,
|
|
+};
|
|
+
|
|
+static int rproc_srm_core_prepare(struct rproc_subdev *subdev)
|
|
+{
|
|
+ struct rproc_srm_core *rproc_srm_core = to_rproc_srm_core(subdev);
|
|
+ struct device *dev = rproc_srm_core->dev;
|
|
+ struct device_node *node = dev->of_node;
|
|
+ struct device_node *child_np;
|
|
+ struct component_match *match = NULL;
|
|
+ int ret;
|
|
+
|
|
+ dev_dbg(dev, "%s\n", __func__);
|
|
+
|
|
+ init_completion(&rproc_srm_core->all_bound);
|
|
+
|
|
+ ret = devm_of_platform_populate(dev);
|
|
+ if (ret) {
|
|
+ dev_err(dev, "cannot populate node (%d)\n", ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ child_np = of_get_next_available_child(node, NULL);
|
|
+
|
|
+ while (child_np) {
|
|
+ of_node_get(child_np);
|
|
+ component_match_add_release(dev, &match, release_of, compare_of,
|
|
+ child_np);
|
|
+ child_np = of_get_next_available_child(node, child_np);
|
|
+ }
|
|
+
|
|
+ if (!match) {
|
|
+ dev_dbg(dev, "No available child\n");
|
|
+ goto done;
|
|
+ }
|
|
+
|
|
+ ret = component_master_add_with_match(dev, &srm_comp_ops, match);
|
|
+ if (ret)
|
|
+ goto depopulate;
|
|
+
|
|
+ /* Wait for every child to be bound */
|
|
+ if (!wait_for_completion_timeout(&rproc_srm_core->all_bound,
|
|
+ msecs_to_jiffies(BIND_TIMEOUT))) {
|
|
+ dev_err(dev, "failed to bind one or more system resource device(s)\n");
|
|
+ ret = -ETIMEDOUT;
|
|
+ goto master;
|
|
+ }
|
|
+
|
|
+ ret = rproc_srm_core->bind_status;
|
|
+ if (ret) {
|
|
+ dev_err(dev, "failed to bind\n");
|
|
+ goto master;
|
|
+ }
|
|
+
|
|
+ /* Register rpmsg driver for dynamic management */
|
|
+ rproc_srm_core->rpdrv = rpmsg_srm_drv;
|
|
+ ret = register_rpmsg_driver(&rproc_srm_core->rpdrv);
|
|
+ if (ret) {
|
|
+ dev_err(dev, "failed to register rpmsg drv\n");
|
|
+ goto master;
|
|
+ }
|
|
+
|
|
+done:
|
|
+ atomic_inc(&rproc_srm_core->prepared);
|
|
+
|
|
+ return 0;
|
|
+
|
|
+master:
|
|
+ component_master_del(dev, &srm_comp_ops);
|
|
+depopulate:
|
|
+ devm_of_platform_depopulate(dev);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static void rproc_srm_core_unprepare(struct rproc_subdev *subdev)
|
|
+{
|
|
+ struct rproc_srm_core *rproc_srm_core = to_rproc_srm_core(subdev);
|
|
+ struct device *dev = rproc_srm_core->dev;
|
|
+
|
|
+ dev_dbg(dev, "%s\n", __func__);
|
|
+
|
|
+ if (!atomic_read(&rproc_srm_core->prepared))
|
|
+ return;
|
|
+
|
|
+ atomic_dec(&rproc_srm_core->prepared);
|
|
+
|
|
+ unregister_rpmsg_driver(&rproc_srm_core->rpdrv);
|
|
+
|
|
+ component_master_del(dev, &srm_comp_ops);
|
|
+ devm_of_platform_depopulate(dev);
|
|
+}
|
|
+
|
|
+static int rproc_srm_core_probe(struct platform_device *pdev)
|
|
+{
|
|
+ struct device *dev = &pdev->dev;
|
|
+ struct rproc *rproc = dev_get_drvdata(dev->parent);
|
|
+ struct rproc_srm_core *rproc_srm_core;
|
|
+
|
|
+ dev_dbg(dev, "%s\n", __func__);
|
|
+
|
|
+ rproc_srm_core = devm_kzalloc(dev, sizeof(struct rproc_srm_core),
|
|
+ GFP_KERNEL);
|
|
+ if (!rproc_srm_core)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ rproc_srm_core->dev = dev;
|
|
+ BLOCKING_INIT_NOTIFIER_HEAD(&rproc_srm_core->notifier);
|
|
+
|
|
+ /* Register rproc subdevice with (un)prepare ops */
|
|
+ rproc_srm_core->subdev.prepare = rproc_srm_core_prepare;
|
|
+ rproc_srm_core->subdev.unprepare = rproc_srm_core_unprepare;
|
|
+ rproc_add_subdev(rproc, &rproc_srm_core->subdev);
|
|
+
|
|
+ dev_set_drvdata(dev, rproc_srm_core);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int rproc_srm_core_remove(struct platform_device *pdev)
|
|
+{
|
|
+ struct device *dev = &pdev->dev;
|
|
+ struct rproc_srm_core *rproc_srm_core = dev_get_drvdata(dev);
|
|
+ struct rproc *rproc = dev_get_drvdata(dev->parent);
|
|
+
|
|
+ dev_dbg(dev, "%s\n", __func__);
|
|
+
|
|
+ if (atomic_read(&rproc->power) > 0)
|
|
+ dev_warn(dev, "Releasing resources while firmware running!\n");
|
|
+
|
|
+ rproc_srm_core_unprepare(&rproc_srm_core->subdev);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static const struct of_device_id rproc_srm_core_match[] = {
|
|
+ { .compatible = "rproc-srm-core", },
|
|
+ {},
|
|
+};
|
|
+
|
|
+MODULE_DEVICE_TABLE(of, rproc_srm_core_match);
|
|
+
|
|
+static struct platform_driver rproc_srm_core_driver = {
|
|
+ .probe = rproc_srm_core_probe,
|
|
+ .remove = rproc_srm_core_remove,
|
|
+ .driver = {
|
|
+ .name = "rproc-srm-core",
|
|
+ .of_match_table = of_match_ptr(rproc_srm_core_match),
|
|
+ },
|
|
+};
|
|
+
|
|
+module_platform_driver(rproc_srm_core_driver);
|
|
+
|
|
+MODULE_AUTHOR("Fabien Dessenne <fabien.dessenne@st.com>");
|
|
+MODULE_DESCRIPTION("Remoteproc System Resource Manager driver - core");
|
|
+MODULE_LICENSE("GPL v2");
|
|
diff --git a/drivers/remoteproc/rproc_srm_core.h b/drivers/remoteproc/rproc_srm_core.h
|
|
new file mode 100644
|
|
index 000000000000..7dffdb38f4d4
|
|
--- /dev/null
|
|
+++ b/drivers/remoteproc/rproc_srm_core.h
|
|
@@ -0,0 +1,98 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0 */
|
|
+/*
|
|
+ * Copyright (C) STMicroelectronics 2018 - All Rights Reserved
|
|
+ * Author: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
|
|
+ */
|
|
+
|
|
+#ifndef _RPROC_SRM_CORE_H_
|
|
+#define _RPROC_SRM_CORE_H_
|
|
+
|
|
+/**
|
|
+ * Message type used in resource manager rpmsg:
|
|
+ * RPROC_SRM_MSG_GETCONFIG: Request to get the configuration of a resource
|
|
+ * RPROC_SRM_MSG_SETCONFIG: Request to set the configuration of a resource
|
|
+ * RPROC_SRM_MSG_ERROR: Error when processing a request
|
|
+ */
|
|
+#define RPROC_SRM_MSG_GETCONFIG 0x00
|
|
+#define RPROC_SRM_MSG_SETCONFIG 0x01
|
|
+#define RPROC_SRM_MSG_ERROR 0xFF
|
|
+
|
|
+/**
|
|
+ * Resource type used in resource manager rpmsg:
|
|
+ * RPROC_SRM_RSC_CLOCK: clock resource
|
|
+ * RPROC_SRM_RSC_REGU: regulator resource
|
|
+ */
|
|
+#define RPROC_SRM_RSC_CLOCK 0x00
|
|
+#define RPROC_SRM_RSC_REGU 0x01
|
|
+
|
|
+/**
|
|
+ * struct clock_cfg - clock configuration used in resource manager rpmsg
|
|
+ * @index: clock index
|
|
+ * @name: clock name
|
|
+ * @rate: clock rate request (in SetConfig message) or current status (in
|
|
+ * GetConfig message)
|
|
+ */
|
|
+struct clock_cfg {
|
|
+ u32 index;
|
|
+ u8 name[16];
|
|
+ u32 rate;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct regu_cfg - regu configuration used in resource manager rpmsg
|
|
+ * @index: regulator index
|
|
+ * @name: regulator name
|
|
+ * @enable: regulator enable/disable request (in SetConfig message) or
|
|
+ * current status (in GetConfig message)
|
|
+ * @curr_voltage_mv: current regulator voltage in mV (meaningful in
|
|
+ * SetConfig message)
|
|
+ * @min_voltage_mv: regulator min voltage request in mV (meaningful in
|
|
+ * SetConfig message)
|
|
+ * @max_voltage_mv: regulator max voltage request in mV (meaningful in
|
|
+ * SetConfig message)
|
|
+ */
|
|
+struct regu_cfg {
|
|
+ u32 index;
|
|
+ u8 name[16];
|
|
+ u32 enable;
|
|
+ u32 curr_voltage_mv;
|
|
+ u32 min_voltage_mv;
|
|
+ u32 max_voltage_mv;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct rpmsg_srm_msg - message structure used between processors to
|
|
+ * dynamically update resources configuration
|
|
+ * @message_type: type of the message: see RPROC_SRM_MSG*
|
|
+ * @device_id: an identifier specifying the device owning the resources.
|
|
+ * This is implementation dependent. As example it may be the
|
|
+ * device name or the device address.
|
|
+ * @rsc_type: the type of the resource for which the configuration applies:
|
|
+ * see RPROC_SRM_RSC*
|
|
+ * @clock_cfg: clock config - relevant if &rsc_type is RPROC_SRM_RSC_CLOCK
|
|
+ * @regu_cfg: regulator config - relevant if &rsc_type is RPROC_SRM_RSC_REGU
|
|
+ */
|
|
+struct rpmsg_srm_msg {
|
|
+ u32 message_type;
|
|
+ u8 device_id[32];
|
|
+ u32 rsc_type;
|
|
+ union {
|
|
+ struct clock_cfg clock_cfg;
|
|
+ struct regu_cfg regu_cfg;
|
|
+ };
|
|
+};
|
|
+
|
|
+struct rpmsg_srm_msg_desc {
|
|
+ struct rpmsg_endpoint *ept;
|
|
+ struct rpmsg_srm_msg *msg;
|
|
+};
|
|
+
|
|
+struct rproc_srm_core;
|
|
+
|
|
+int rproc_srm_core_register_notifier(struct rproc_srm_core *core,
|
|
+ struct notifier_block *nb);
|
|
+int rproc_srm_core_unregister_notifier(struct rproc_srm_core *core,
|
|
+ struct notifier_block *nb);
|
|
+int rpmsg_srm_send(struct rpmsg_endpoint *ept, struct rpmsg_srm_msg *msg);
|
|
+
|
|
+#endif
|
|
diff --git a/drivers/remoteproc/rproc_srm_dev.c b/drivers/remoteproc/rproc_srm_dev.c
|
|
new file mode 100644
|
|
index 000000000000..e47654af3e3a
|
|
--- /dev/null
|
|
+++ b/drivers/remoteproc/rproc_srm_dev.c
|
|
@@ -0,0 +1,744 @@
|
|
+// SPDX-License-Identifier: GPL-2.0
|
|
+/*
|
|
+ * Copyright (C) STMicroelectronics 2018 - All Rights Reserved
|
|
+ * Author: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
|
|
+ */
|
|
+
|
|
+#include <linux/clk.h>
|
|
+#include <linux/clk-provider.h>
|
|
+#include <linux/component.h>
|
|
+#include <linux/interrupt.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/of.h>
|
|
+#include <linux/of_irq.h>
|
|
+#include <linux/platform_device.h>
|
|
+#include <linux/regulator/consumer.h>
|
|
+#include <linux/remoteproc.h>
|
|
+
|
|
+#include "rproc_srm_core.h"
|
|
+
|
|
+struct rproc_srm_clk_info {
|
|
+ struct list_head list;
|
|
+ unsigned int index;
|
|
+ struct clk *clk;
|
|
+ const char *name;
|
|
+ bool parent_enabled;
|
|
+};
|
|
+
|
|
+struct rproc_srm_regu_info {
|
|
+ struct list_head list;
|
|
+ unsigned int index;
|
|
+ struct regulator *regu;
|
|
+ const char *name;
|
|
+ bool enabled;
|
|
+};
|
|
+
|
|
+struct rproc_srm_irq_info {
|
|
+ struct list_head list;
|
|
+ unsigned int index;
|
|
+ char *name;
|
|
+ int irq;
|
|
+ bool enabled;
|
|
+};
|
|
+
|
|
+struct rproc_srm_dev {
|
|
+ struct device *dev;
|
|
+ struct rproc_srm_core *core;
|
|
+ struct notifier_block nb;
|
|
+ bool early_boot;
|
|
+
|
|
+ struct list_head clk_list_head;
|
|
+ struct list_head regu_list_head;
|
|
+ struct list_head irq_list_head;
|
|
+};
|
|
+
|
|
+/* Irqs */
|
|
+static void rproc_srm_dev_irqs_put(struct rproc_srm_dev *rproc_srm_dev)
|
|
+{
|
|
+ struct device *dev = rproc_srm_dev->dev;
|
|
+ struct rproc_srm_irq_info *i, *tmp;
|
|
+
|
|
+ list_for_each_entry_safe(i, tmp, &rproc_srm_dev->irq_list_head, list) {
|
|
+ devm_free_irq(dev, i->irq, NULL);
|
|
+ dev_dbg(dev, "Put irq %d (%s)\n", i->irq, i->name);
|
|
+ list_del(&i->list);
|
|
+ }
|
|
+}
|
|
+
|
|
+static irqreturn_t rproc_srm_dev_irq_handler(int irq, void *dev)
|
|
+{
|
|
+ dev_warn(dev, "Spurious interrupt\n");
|
|
+ return IRQ_HANDLED;
|
|
+}
|
|
+
|
|
+static int rproc_srm_dev_irqs_get(struct rproc_srm_dev *rproc_srm_dev)
|
|
+{
|
|
+ struct device *dev = rproc_srm_dev->dev;
|
|
+ struct platform_device *pdev = to_platform_device(dev);
|
|
+ struct device_node *np = dev->of_node;
|
|
+ struct rproc_srm_irq_info *info;
|
|
+ const char *name;
|
|
+ int nr, ret, irq;
|
|
+ unsigned int i;
|
|
+
|
|
+ if (!np)
|
|
+ return 0;
|
|
+
|
|
+ nr = platform_irq_count(pdev);
|
|
+ if (!nr)
|
|
+ return 0;
|
|
+
|
|
+ if (rproc_srm_dev->early_boot)
|
|
+ /*
|
|
+ * Do not overwrite the irq configuration.
|
|
+ * No need to parse irq from DT since the resource manager does
|
|
+ * not offer any service to update the irq config.
|
|
+ */
|
|
+ return 0;
|
|
+
|
|
+ for (i = 0; i < nr; i++) {
|
|
+ info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
|
|
+ if (!info) {
|
|
+ ret = -ENOMEM;
|
|
+ goto err;
|
|
+ }
|
|
+
|
|
+ irq = platform_get_irq(pdev, i);
|
|
+ if (irq <= 0) {
|
|
+ ret = irq;
|
|
+ dev_err(dev, "Failed to get irq (%d)\n", ret);
|
|
+ goto err;
|
|
+ }
|
|
+
|
|
+ info->irq = irq;
|
|
+
|
|
+ /* Register a dummy irq handleras not used by Linux */
|
|
+ ret = devm_request_irq(dev, info->irq,
|
|
+ rproc_srm_dev_irq_handler, 0,
|
|
+ dev_name(dev), NULL);
|
|
+ if (ret) {
|
|
+ dev_err(dev, "Failed to request irq (%d)\n", ret);
|
|
+ goto err;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Disable IRQ. Since it is used by the remote processor we
|
|
+ * must not use the 'irq lazy disable' optimization
|
|
+ */
|
|
+ irq_set_status_flags(info->irq, IRQ_DISABLE_UNLAZY);
|
|
+ disable_irq(info->irq);
|
|
+
|
|
+ /* Note: "interrupt-names" is optional */
|
|
+ if (!of_property_read_string_index(np, "interrupt-names", i,
|
|
+ &name))
|
|
+ info->name = devm_kstrdup(dev, name, GFP_KERNEL);
|
|
+ else
|
|
+ info->name = devm_kstrdup(dev, "", GFP_KERNEL);
|
|
+
|
|
+ info->index = i;
|
|
+
|
|
+ list_add_tail(&info->list, &rproc_srm_dev->irq_list_head);
|
|
+ dev_dbg(dev, "Got irq %d (%s)\n", info->irq, info->name);
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+
|
|
+err:
|
|
+ rproc_srm_dev_irqs_put(rproc_srm_dev);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/* Clocks */
|
|
+static void rproc_srm_dev_clocks_unsetup(struct rproc_srm_dev *rproc_srm_dev)
|
|
+{
|
|
+ struct rproc_srm_clk_info *c;
|
|
+
|
|
+ list_for_each_entry(c, &rproc_srm_dev->clk_list_head, list) {
|
|
+ if (!c->parent_enabled)
|
|
+ continue;
|
|
+
|
|
+ clk_disable_unprepare(clk_get_parent(c->clk));
|
|
+ c->parent_enabled = false;
|
|
+ dev_dbg(rproc_srm_dev->dev, "clk %d (%s) unsetup\n",
|
|
+ c->index, c->name);
|
|
+ }
|
|
+}
|
|
+
|
|
+static int rproc_srm_dev_clocks_setup(struct rproc_srm_dev *rproc_srm_dev)
|
|
+{
|
|
+ struct rproc_srm_clk_info *c;
|
|
+ int ret;
|
|
+
|
|
+ /*
|
|
+ * Prepare and enable the parent clocks.
|
|
+ * Since the clock tree is under the exclusive control of the master
|
|
+ * processor, we need to configure the clock tree of the targeted clock.
|
|
+ * We do not want to enable the clock itself, which is under the
|
|
+ * responsibility of the remote processor.
|
|
+ * Hence we prepare and enable the parent clock.
|
|
+ */
|
|
+
|
|
+ list_for_each_entry(c, &rproc_srm_dev->clk_list_head, list) {
|
|
+ if (c->parent_enabled)
|
|
+ continue;
|
|
+
|
|
+ ret = clk_prepare_enable(clk_get_parent(c->clk));
|
|
+ if (ret) {
|
|
+ dev_err(rproc_srm_dev->dev,
|
|
+ "clk %d (%s) parent enable failed\n",
|
|
+ c->index, c->name);
|
|
+ rproc_srm_dev_clocks_unsetup(rproc_srm_dev);
|
|
+ return ret;
|
|
+ }
|
|
+ c->parent_enabled = true;
|
|
+ dev_dbg(rproc_srm_dev->dev, "clk %d (%s) parent enabled\n",
|
|
+ c->index, c->name);
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static struct rproc_srm_clk_info
|
|
+ *rproc_srm_dev_clock_find(struct rproc_srm_dev *rproc_srm_dev,
|
|
+ struct clock_cfg *cfg)
|
|
+{
|
|
+ struct rproc_srm_clk_info *ci;
|
|
+
|
|
+ /* Search by index (if valid value) otherwise search by name */
|
|
+ list_for_each_entry(ci, &rproc_srm_dev->clk_list_head, list) {
|
|
+ if (cfg->index != U32_MAX) {
|
|
+ if (ci->index == cfg->index)
|
|
+ return ci;
|
|
+ } else {
|
|
+ if (!strcmp(ci->name, cfg->name))
|
|
+ return ci;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+static int rproc_srm_dev_clock_set_cfg(struct rproc_srm_dev *rproc_srm_dev,
|
|
+ struct clock_cfg *cfg)
|
|
+{
|
|
+ struct rproc_srm_clk_info *c;
|
|
+ struct device *dev = rproc_srm_dev->dev;
|
|
+ int ret;
|
|
+
|
|
+ c = rproc_srm_dev_clock_find(rproc_srm_dev, cfg);
|
|
+
|
|
+ if (!c) {
|
|
+ dev_err(dev, "unknown clock (id %d)\n", cfg->index);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (cfg->rate && clk_get_rate(c->clk) != cfg->rate) {
|
|
+ ret = clk_set_rate(c->clk, cfg->rate);
|
|
+ if (ret) {
|
|
+ dev_err(dev, "clk set rate failed\n");
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ dev_dbg(dev, "clk %d (%s) rate = %d\n", c->index, c->name,
|
|
+ cfg->rate);
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int rproc_srm_dev_clock_get_cfg(struct rproc_srm_dev *rproc_srm_dev,
|
|
+ struct clock_cfg *cfg)
|
|
+{
|
|
+ struct rproc_srm_clk_info *c;
|
|
+
|
|
+ c = rproc_srm_dev_clock_find(rproc_srm_dev, cfg);
|
|
+ if (!c) {
|
|
+ dev_err(rproc_srm_dev->dev, "unknown clock (%d)\n", cfg->index);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ strscpy(cfg->name, c->name, sizeof(cfg->name));
|
|
+ cfg->index = c->index;
|
|
+ cfg->rate = (u32)clk_get_rate(c->clk);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void rproc_srm_dev_clocks_put(struct rproc_srm_dev *rproc_srm_dev)
|
|
+{
|
|
+ struct device *dev = rproc_srm_dev->dev;
|
|
+ struct rproc_srm_clk_info *c, *tmp;
|
|
+
|
|
+ list_for_each_entry_safe(c, tmp, &rproc_srm_dev->clk_list_head, list) {
|
|
+ clk_put(c->clk);
|
|
+ dev_dbg(dev, "put clock %d (%s)\n", c->index, c->name);
|
|
+ list_del(&c->list);
|
|
+ }
|
|
+}
|
|
+
|
|
+static int rproc_srm_dev_clocks_get(struct rproc_srm_dev *rproc_srm_dev)
|
|
+{
|
|
+ struct device *dev = rproc_srm_dev->dev;
|
|
+ struct device_node *np = dev->of_node;
|
|
+ struct rproc_srm_clk_info *c;
|
|
+ const char *name;
|
|
+ int nb_c, ret;
|
|
+ unsigned int i;
|
|
+
|
|
+ if (!np)
|
|
+ return 0;
|
|
+
|
|
+ nb_c = of_clk_get_parent_count(np);
|
|
+ if (!nb_c)
|
|
+ return 0;
|
|
+
|
|
+ for (i = 0; i < nb_c; i++) {
|
|
+ c = devm_kzalloc(dev, sizeof(*c), GFP_KERNEL);
|
|
+ if (!c) {
|
|
+ ret = -ENOMEM;
|
|
+ goto err;
|
|
+ }
|
|
+
|
|
+ c->clk = of_clk_get(np, i);
|
|
+ if (IS_ERR(c->clk)) {
|
|
+ dev_err(dev, "clock %d KO (%ld)\n", i,
|
|
+ PTR_ERR(c->clk));
|
|
+ ret = -ENOMEM;
|
|
+ goto err;
|
|
+ }
|
|
+
|
|
+ /* Note: "clock-names" is optional */
|
|
+ if (!of_property_read_string_index(np, "clock-names", i,
|
|
+ &name))
|
|
+ c->name = devm_kstrdup(dev, name, GFP_KERNEL);
|
|
+ else
|
|
+ c->name = devm_kstrdup(dev, "", GFP_KERNEL);
|
|
+
|
|
+ c->index = i;
|
|
+
|
|
+ list_add_tail(&c->list, &rproc_srm_dev->clk_list_head);
|
|
+ dev_dbg(dev, "got clock %d (%s)\n", c->index, c->name);
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+
|
|
+err:
|
|
+ rproc_srm_dev_clocks_put(rproc_srm_dev);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/* Regulators */
|
|
+static void rproc_srm_dev_regus_unsetup(struct rproc_srm_dev *rproc_srm_dev)
|
|
+{
|
|
+ struct rproc_srm_regu_info *r;
|
|
+ struct device *dev = rproc_srm_dev->dev;
|
|
+
|
|
+ list_for_each_entry(r, &rproc_srm_dev->regu_list_head, list) {
|
|
+ if (!r->enabled)
|
|
+ continue;
|
|
+
|
|
+ if (regulator_disable(r->regu)) {
|
|
+ dev_warn(dev, "regu %d disabled failed\n", r->index);
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ r->enabled = false;
|
|
+ dev_dbg(dev, "regu %d (%s) disabled\n", r->index, r->name);
|
|
+ }
|
|
+}
|
|
+
|
|
+static int rproc_srm_dev_regus_setup(struct rproc_srm_dev *rproc_srm_dev)
|
|
+{
|
|
+ struct rproc_srm_regu_info *r;
|
|
+ int ret;
|
|
+
|
|
+ /* Enable all the regulators */
|
|
+ list_for_each_entry(r, &rproc_srm_dev->regu_list_head, list) {
|
|
+ if (r->enabled)
|
|
+ continue;
|
|
+
|
|
+ /* in early_boot mode sync on hw */
|
|
+ if (rproc_srm_dev->early_boot && !regulator_is_enabled(r->regu))
|
|
+ continue;
|
|
+
|
|
+ ret = regulator_enable(r->regu);
|
|
+ if (ret) {
|
|
+ dev_err(rproc_srm_dev->dev, "regu %d (%s) failed\n",
|
|
+ r->index, r->name);
|
|
+ rproc_srm_dev_regus_unsetup(rproc_srm_dev);
|
|
+ return ret;
|
|
+ }
|
|
+ r->enabled = true;
|
|
+ dev_dbg(rproc_srm_dev->dev, "regu %d (%s) enabled\n",
|
|
+ r->index, r->name);
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static struct rproc_srm_regu_info
|
|
+ *rproc_srm_dev_regu_find(struct rproc_srm_dev *rproc_srm_dev,
|
|
+ struct regu_cfg *cfg)
|
|
+{
|
|
+ struct rproc_srm_regu_info *ri;
|
|
+
|
|
+ list_for_each_entry(ri, &rproc_srm_dev->regu_list_head, list) {
|
|
+ if (cfg->index != U32_MAX) {
|
|
+ if (ri->index == cfg->index)
|
|
+ return ri;
|
|
+ } else {
|
|
+ if (!strcmp(ri->name, cfg->name))
|
|
+ return ri;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+static int rproc_srm_dev_regu_set_cfg(struct rproc_srm_dev *rproc_srm_dev,
|
|
+ struct regu_cfg *cfg)
|
|
+{
|
|
+ struct rproc_srm_regu_info *r;
|
|
+ struct device *dev = rproc_srm_dev->dev;
|
|
+ int ret;
|
|
+
|
|
+ r = rproc_srm_dev_regu_find(rproc_srm_dev, cfg);
|
|
+ if (!r) {
|
|
+ dev_err(dev, "unknown regu (%d)\n", cfg->index);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (!r->enabled && cfg->enable) {
|
|
+ ret = regulator_enable(r->regu);
|
|
+ if (ret) {
|
|
+ dev_err(dev, "regu %d enable failed\n", r->index);
|
|
+ return ret;
|
|
+ }
|
|
+ r->enabled = true;
|
|
+ dev_dbg(dev, "regu %d (%s) enabled\n", r->index, r->name);
|
|
+ } else if (r->enabled && !cfg->enable) {
|
|
+ ret = regulator_disable(r->regu);
|
|
+ if (ret) {
|
|
+ dev_err(dev, "regu %d disable failed\n", r->index);
|
|
+ return ret;
|
|
+ }
|
|
+ r->enabled = false;
|
|
+ dev_dbg(dev, "regu %d (%s) disabled\n", r->index, r->name);
|
|
+ }
|
|
+
|
|
+ if (cfg->min_voltage_mv || cfg->max_voltage_mv) {
|
|
+ ret = regulator_set_voltage(r->regu, cfg->min_voltage_mv * 1000,
|
|
+ cfg->max_voltage_mv * 1000);
|
|
+ if (ret) {
|
|
+ dev_err(dev, "regu %d set voltage failed\n", r->index);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ dev_dbg(dev, "regu %d (%s) voltage = [%d - %d] mv\n", r->index,
|
|
+ r->name, cfg->min_voltage_mv, cfg->max_voltage_mv);
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int rproc_srm_dev_regu_get_cfg(struct rproc_srm_dev *rproc_srm_dev,
|
|
+ struct regu_cfg *cfg)
|
|
+{
|
|
+ struct rproc_srm_regu_info *r;
|
|
+ struct device *dev = rproc_srm_dev->dev;
|
|
+ int v;
|
|
+
|
|
+ r = rproc_srm_dev_regu_find(rproc_srm_dev, cfg);
|
|
+ if (!r) {
|
|
+ dev_err(dev, "unknown regu (%d)\n", cfg->index);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ strscpy(cfg->name, r->name, sizeof(cfg->name));
|
|
+ cfg->index = r->index;
|
|
+ cfg->enable = r->enabled;
|
|
+ cfg->min_voltage_mv = 0;
|
|
+ cfg->max_voltage_mv = 0;
|
|
+
|
|
+ v = regulator_get_voltage(r->regu);
|
|
+ if (v < 0) {
|
|
+ dev_warn(dev, "cannot get %s voltage\n", r->name);
|
|
+ cfg->curr_voltage_mv = 0;
|
|
+ } else {
|
|
+ cfg->curr_voltage_mv = v / 1000;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void rproc_srm_dev_regus_put(struct rproc_srm_dev *rproc_srm_dev)
|
|
+{
|
|
+ struct device *dev = rproc_srm_dev->dev;
|
|
+ struct rproc_srm_regu_info *r, *tmp;
|
|
+
|
|
+ list_for_each_entry_safe(r, tmp, &rproc_srm_dev->regu_list_head, list) {
|
|
+ devm_regulator_put(r->regu);
|
|
+ dev_dbg(dev, "put regu %d (%s)\n", r->index, r->name);
|
|
+ list_del(&r->list);
|
|
+ }
|
|
+}
|
|
+
|
|
+static int rproc_srm_dev_regus_get(struct rproc_srm_dev *rproc_srm_dev)
|
|
+{
|
|
+ struct device *dev = rproc_srm_dev->dev;
|
|
+ struct device_node *np = dev->of_node;
|
|
+ struct property *p;
|
|
+ const char *n;
|
|
+ char *name;
|
|
+ struct rproc_srm_regu_info *r;
|
|
+ int ret, nb_s = 0;
|
|
+
|
|
+ if (!np)
|
|
+ return 0;
|
|
+
|
|
+ for_each_property_of_node(np, p) {
|
|
+ n = strstr(p->name, "-supply");
|
|
+ if (!n || n == p->name)
|
|
+ continue;
|
|
+
|
|
+ r = devm_kzalloc(dev, sizeof(*r), GFP_KERNEL);
|
|
+ if (!r) {
|
|
+ ret = -ENOMEM;
|
|
+ goto err_list;
|
|
+ }
|
|
+
|
|
+ name = devm_kstrdup(dev, p->name, GFP_KERNEL);
|
|
+ name[strlen(p->name) - strlen("-supply")] = '\0';
|
|
+ r->name = name;
|
|
+
|
|
+ r->regu = devm_regulator_get(dev, r->name);
|
|
+ if (IS_ERR(r->regu)) {
|
|
+ dev_err(dev, "cannot get regu %s\n", r->name);
|
|
+ ret = -EINVAL;
|
|
+ goto err_list;
|
|
+ }
|
|
+
|
|
+ r->index = nb_s++;
|
|
+
|
|
+ list_add_tail(&r->list, &rproc_srm_dev->regu_list_head);
|
|
+ dev_dbg(dev, "got regu %d (%s)\n", r->index, r->name);
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+
|
|
+err_list:
|
|
+ rproc_srm_dev_regus_put(rproc_srm_dev);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/* Core */
|
|
+static int rproc_srm_dev_notify_cb(struct notifier_block *nb, unsigned long evt,
|
|
+ void *data)
|
|
+{
|
|
+ struct rproc_srm_dev *rproc_srm_dev =
|
|
+ container_of(nb, struct rproc_srm_dev, nb);
|
|
+ struct device *dev = rproc_srm_dev->dev;
|
|
+ struct rpmsg_srm_msg_desc *desc;
|
|
+ struct rpmsg_srm_msg *i, o;
|
|
+ int ret = 0;
|
|
+
|
|
+ dev_dbg(dev, "%s\n", __func__);
|
|
+
|
|
+ desc = (struct rpmsg_srm_msg_desc *)data;
|
|
+ i = desc->msg;
|
|
+ o = *i;
|
|
+
|
|
+ /* Check if 'device_id' (name / addr ) matches this device */
|
|
+ if (!strstr(dev_name(dev), i->device_id))
|
|
+ return NOTIFY_DONE;
|
|
+
|
|
+ switch (i->message_type) {
|
|
+ case RPROC_SRM_MSG_SETCONFIG:
|
|
+ switch (i->rsc_type) {
|
|
+ case RPROC_SRM_RSC_CLOCK:
|
|
+ ret = rproc_srm_dev_clock_set_cfg(rproc_srm_dev,
|
|
+ &i->clock_cfg);
|
|
+ if (!ret)
|
|
+ ret = rproc_srm_dev_clock_get_cfg(rproc_srm_dev,
|
|
+ &o.clock_cfg);
|
|
+ break;
|
|
+ case RPROC_SRM_RSC_REGU:
|
|
+ ret = rproc_srm_dev_regu_set_cfg(rproc_srm_dev,
|
|
+ &i->regu_cfg);
|
|
+ if (!ret)
|
|
+ ret = rproc_srm_dev_regu_get_cfg(rproc_srm_dev,
|
|
+ &o.regu_cfg);
|
|
+ break;
|
|
+ default:
|
|
+ dev_warn(dev, "bad rsc type (%d)\n", i->rsc_type);
|
|
+ ret = -EINVAL;
|
|
+ break;
|
|
+ }
|
|
+ break;
|
|
+ case RPROC_SRM_MSG_GETCONFIG:
|
|
+ switch (i->rsc_type) {
|
|
+ case RPROC_SRM_RSC_CLOCK:
|
|
+ ret = rproc_srm_dev_clock_get_cfg(rproc_srm_dev,
|
|
+ &o.clock_cfg);
|
|
+ break;
|
|
+ case RPROC_SRM_RSC_REGU:
|
|
+ ret = rproc_srm_dev_regu_get_cfg(rproc_srm_dev,
|
|
+ &o.regu_cfg);
|
|
+ break;
|
|
+ default:
|
|
+ dev_warn(dev, "bad rsc type (%d)\n", i->rsc_type);
|
|
+ ret = -EINVAL;
|
|
+ break;
|
|
+ }
|
|
+ break;
|
|
+ default:
|
|
+ dev_warn(dev, "bad msg type (%d)\n", i->message_type);
|
|
+ ret = -EINVAL;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ /* Send return msg */
|
|
+ if (ret)
|
|
+ o.message_type = RPROC_SRM_MSG_ERROR;
|
|
+
|
|
+ ret = rpmsg_srm_send(desc->ept, &o);
|
|
+
|
|
+ return ret ? NOTIFY_BAD : NOTIFY_STOP;
|
|
+}
|
|
+
|
|
+static void
|
|
+rproc_srm_dev_unbind(struct device *dev, struct device *master, void *data)
|
|
+{
|
|
+ struct rproc_srm_dev *rproc_srm_dev = dev_get_drvdata(dev);
|
|
+
|
|
+ dev_dbg(dev, "%s\n", __func__);
|
|
+
|
|
+ rproc_srm_dev_regus_unsetup(rproc_srm_dev);
|
|
+ rproc_srm_dev_clocks_unsetup(rproc_srm_dev);
|
|
+
|
|
+ /* For IRQs: nothing to unsetup */
|
|
+}
|
|
+
|
|
+static int
|
|
+rproc_srm_dev_bind(struct device *dev, struct device *master, void *data)
|
|
+{
|
|
+ struct rproc_srm_dev *rproc_srm_dev = dev_get_drvdata(dev);
|
|
+ int ret;
|
|
+
|
|
+ dev_dbg(dev, "%s\n", __func__);
|
|
+
|
|
+ ret = rproc_srm_dev_clocks_setup(rproc_srm_dev);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ ret = rproc_srm_dev_regus_setup(rproc_srm_dev);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ /* For IRQs: nothing to setup */
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static const struct component_ops rproc_srm_dev_ops = {
|
|
+ .bind = rproc_srm_dev_bind,
|
|
+ .unbind = rproc_srm_dev_unbind,
|
|
+};
|
|
+
|
|
+static int rproc_srm_dev_probe(struct platform_device *pdev)
|
|
+{
|
|
+ struct device *dev = &pdev->dev;
|
|
+ struct rproc_srm_dev *rproc_srm_dev;
|
|
+ struct rproc *rproc;
|
|
+ int ret;
|
|
+
|
|
+ dev_dbg(dev, "%s for node %s\n", __func__, dev->of_node->name);
|
|
+
|
|
+ rproc_srm_dev = devm_kzalloc(dev, sizeof(struct rproc_srm_dev),
|
|
+ GFP_KERNEL);
|
|
+ if (!rproc_srm_dev)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ rproc_srm_dev->dev = dev;
|
|
+ rproc = (struct rproc *)dev_get_drvdata(dev->parent->parent);
|
|
+ rproc_srm_dev->early_boot = (rproc->state == RPROC_DETACHED);
|
|
+ rproc_srm_dev->core = dev_get_drvdata(dev->parent);
|
|
+
|
|
+ INIT_LIST_HEAD(&rproc_srm_dev->clk_list_head);
|
|
+ INIT_LIST_HEAD(&rproc_srm_dev->regu_list_head);
|
|
+ INIT_LIST_HEAD(&rproc_srm_dev->irq_list_head);
|
|
+
|
|
+ /* Get clocks, regu and irqs */
|
|
+ ret = rproc_srm_dev_clocks_get(rproc_srm_dev);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ ret = rproc_srm_dev_regus_get(rproc_srm_dev);
|
|
+ if (ret)
|
|
+ goto err_get;
|
|
+
|
|
+ ret = rproc_srm_dev_irqs_get(rproc_srm_dev);
|
|
+ if (ret)
|
|
+ goto err_get;
|
|
+
|
|
+ rproc_srm_dev->nb.notifier_call = rproc_srm_dev_notify_cb;
|
|
+ ret = rproc_srm_core_register_notifier(rproc_srm_dev->core,
|
|
+ &rproc_srm_dev->nb);
|
|
+ if (ret)
|
|
+ goto err_register;
|
|
+
|
|
+ dev_set_drvdata(dev, rproc_srm_dev);
|
|
+
|
|
+ return component_add(dev, &rproc_srm_dev_ops);
|
|
+
|
|
+err_register:
|
|
+ rproc_srm_core_unregister_notifier(rproc_srm_dev->core,
|
|
+ &rproc_srm_dev->nb);
|
|
+err_get:
|
|
+ rproc_srm_dev_irqs_put(rproc_srm_dev);
|
|
+ rproc_srm_dev_regus_put(rproc_srm_dev);
|
|
+ rproc_srm_dev_clocks_put(rproc_srm_dev);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int rproc_srm_dev_remove(struct platform_device *pdev)
|
|
+{
|
|
+ struct device *dev = &pdev->dev;
|
|
+ struct rproc_srm_dev *rproc_srm_dev = dev_get_drvdata(dev);
|
|
+
|
|
+ dev_dbg(dev, "%s\n", __func__);
|
|
+
|
|
+ component_del(dev, &rproc_srm_dev_ops);
|
|
+
|
|
+ rproc_srm_core_unregister_notifier(rproc_srm_dev->core,
|
|
+ &rproc_srm_dev->nb);
|
|
+
|
|
+ rproc_srm_dev_irqs_put(rproc_srm_dev);
|
|
+ rproc_srm_dev_regus_put(rproc_srm_dev);
|
|
+ rproc_srm_dev_clocks_put(rproc_srm_dev);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static const struct of_device_id rproc_srm_dev_match[] = {
|
|
+ { .compatible = "rproc-srm-dev", },
|
|
+ {},
|
|
+};
|
|
+
|
|
+MODULE_DEVICE_TABLE(of, rproc_srm_dev_match);
|
|
+
|
|
+static struct platform_driver rproc_srm_dev_driver = {
|
|
+ .probe = rproc_srm_dev_probe,
|
|
+ .remove = rproc_srm_dev_remove,
|
|
+ .driver = {
|
|
+ .name = "rproc-srm-dev",
|
|
+ .of_match_table = of_match_ptr(rproc_srm_dev_match),
|
|
+ },
|
|
+};
|
|
+
|
|
+module_platform_driver(rproc_srm_dev_driver);
|
|
+
|
|
+MODULE_AUTHOR("Fabien Dessenne <fabien.dessenne@st.com>");
|
|
+MODULE_DESCRIPTION("Remoteproc System Resource Manager driver - dev");
|
|
+MODULE_LICENSE("GPL v2");
|
|
diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c
|
|
index aba3df1d1bf5..5c7cdeae917e 100644
|
|
--- a/drivers/remoteproc/stm32_rproc.c
|
|
+++ b/drivers/remoteproc/stm32_rproc.c
|
|
@@ -20,13 +20,11 @@
|
|
#include <linux/remoteproc.h>
|
|
#include <linux/reset.h>
|
|
#include <linux/slab.h>
|
|
+#include <linux/tee_remoteproc.h>
|
|
#include <linux/workqueue.h>
|
|
|
|
#include "remoteproc_internal.h"
|
|
|
|
-#define HOLD_BOOT 0
|
|
-#define RELEASE_BOOT 1
|
|
-
|
|
#define MBOX_NB_VQ 2
|
|
#define MBOX_NB_MBX 4
|
|
|
|
@@ -49,6 +47,13 @@
|
|
#define M4_STATE_STANDBY 4
|
|
#define M4_STATE_CRASH 5
|
|
|
|
+/*
|
|
+ * Define a default index in future may come a global list of
|
|
+ * firmwares which list platforms and associated firmware(s)
|
|
+ */
|
|
+
|
|
+#define STM32_MP1_FW_ID 0
|
|
+
|
|
struct stm32_syscon {
|
|
struct regmap *map;
|
|
u32 reg;
|
|
@@ -79,7 +84,7 @@ struct stm32_mbox {
|
|
|
|
struct stm32_rproc {
|
|
struct reset_control *rst;
|
|
- struct stm32_syscon hold_boot;
|
|
+ struct reset_control *hold_boot;
|
|
struct stm32_syscon pdds;
|
|
struct stm32_syscon m4_state;
|
|
struct stm32_syscon rsctbl;
|
|
@@ -88,7 +93,8 @@ struct stm32_rproc {
|
|
struct stm32_rproc_mem *rmems;
|
|
struct stm32_mbox mb[MBOX_NB_MBX];
|
|
struct workqueue_struct *workqueue;
|
|
- bool secured_soc;
|
|
+ bool fw_loaded;
|
|
+ struct tee_rproc *trproc;
|
|
void __iomem *rsc_va;
|
|
};
|
|
|
|
@@ -208,6 +214,139 @@ static int stm32_rproc_mbox_idx(struct rproc *rproc, const unsigned char *name)
|
|
return -EINVAL;
|
|
}
|
|
|
|
+static void stm32_rproc_request_shutdown(struct rproc *rproc)
|
|
+{
|
|
+ struct stm32_rproc *ddata = rproc->priv;
|
|
+ int err, dummy_data, idx;
|
|
+
|
|
+ /* Request shutdown of the remote processor */
|
|
+ if (rproc->state != RPROC_OFFLINE && rproc->state != RPROC_CRASHED) {
|
|
+ idx = stm32_rproc_mbox_idx(rproc, STM32_MBX_SHUTDOWN);
|
|
+ if (idx >= 0 && ddata->mb[idx].chan) {
|
|
+ /* A dummy data is sent to allow to block on transmit. */
|
|
+ err = mbox_send_message(ddata->mb[idx].chan,
|
|
+ &dummy_data);
|
|
+ if (err < 0)
|
|
+ dev_warn(&rproc->dev, "warning: remote FW shutdown without ack\n");
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+static int stm32_rproc_release(struct rproc *rproc)
|
|
+{
|
|
+ struct stm32_rproc *ddata = rproc->priv;
|
|
+ unsigned int err = 0;
|
|
+
|
|
+ /* To allow platform Standby power mode, set remote proc Deep Sleep. */
|
|
+ if (ddata->pdds.map) {
|
|
+ err = regmap_update_bits(ddata->pdds.map, ddata->pdds.reg,
|
|
+ ddata->pdds.mask, 1);
|
|
+ if (err) {
|
|
+ dev_err(&rproc->dev, "failed to set pdds\n");
|
|
+ return err;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* Update coprocessor state to OFF if available. */
|
|
+ if (ddata->m4_state.map) {
|
|
+ err = regmap_update_bits(ddata->m4_state.map,
|
|
+ ddata->m4_state.reg,
|
|
+ ddata->m4_state.mask,
|
|
+ M4_STATE_OFF);
|
|
+ if (err) {
|
|
+ dev_err(&rproc->dev, "failed to set copro state\n");
|
|
+ return err;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return err;
|
|
+}
|
|
+
|
|
+static int stm32_rproc_tee_elf_sanity_check(struct rproc *rproc,
|
|
+ const struct firmware *fw)
|
|
+{
|
|
+ struct stm32_rproc *ddata = rproc->priv;
|
|
+ unsigned int ret = 0;
|
|
+
|
|
+ if (rproc->state == RPROC_DETACHED)
|
|
+ return 0;
|
|
+
|
|
+ ret = tee_rproc_load_fw(ddata->trproc, fw);
|
|
+ if (!ret)
|
|
+ ddata->fw_loaded = true;
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int stm32_rproc_tee_elf_load(struct rproc *rproc,
|
|
+ const struct firmware *fw)
|
|
+{
|
|
+ struct stm32_rproc *ddata = rproc->priv;
|
|
+ unsigned int ret;
|
|
+
|
|
+ /*
|
|
+ * This function can be called by remote proc for recovery
|
|
+ * without the sanity check. In this case we need to load the firmware
|
|
+ * else nothing done here as the firmware has been preloaded for the
|
|
+ * sanity check to be able to parse it for the resource table
|
|
+ */
|
|
+ if (ddata->fw_loaded)
|
|
+ return 0;
|
|
+
|
|
+ ret = tee_rproc_load_fw(ddata->trproc, fw);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+ ddata->fw_loaded = true;
|
|
+
|
|
+ /* update the resource table parameters */
|
|
+ if (rproc_tee_get_rsc_table(ddata->trproc)) {
|
|
+ /* no resource table: reset the related fields */
|
|
+ rproc->cached_table = NULL;
|
|
+ rproc->table_ptr = NULL;
|
|
+ rproc->table_sz = 0;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static struct resource_table *
|
|
+stm32_rproc_tee_elf_find_loaded_rsc_table(struct rproc *rproc,
|
|
+ const struct firmware *fw)
|
|
+{
|
|
+ struct stm32_rproc *ddata = rproc->priv;
|
|
+
|
|
+ return tee_rproc_get_loaded_rsc_table(ddata->trproc);
|
|
+}
|
|
+
|
|
+static int stm32_rproc_tee_start(struct rproc *rproc)
|
|
+{
|
|
+ struct stm32_rproc *ddata = rproc->priv;
|
|
+
|
|
+ return tee_rproc_start(ddata->trproc);
|
|
+}
|
|
+
|
|
+static int stm32_rproc_tee_attach(struct rproc *rproc)
|
|
+{
|
|
+ /* Nothing to do, remote proc already started by the secured context */
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int stm32_rproc_tee_stop(struct rproc *rproc)
|
|
+{
|
|
+ struct stm32_rproc *ddata = rproc->priv;
|
|
+ int err;
|
|
+
|
|
+ stm32_rproc_request_shutdown(rproc);
|
|
+
|
|
+ err = tee_rproc_stop(ddata->trproc);
|
|
+ if (err)
|
|
+ return err;
|
|
+
|
|
+ ddata->fw_loaded = false;
|
|
+
|
|
+ return stm32_rproc_release(rproc);
|
|
+}
|
|
+
|
|
static int stm32_rproc_prepare(struct rproc *rproc)
|
|
{
|
|
struct device *dev = rproc->dev.parent;
|
|
@@ -270,7 +409,14 @@ static int stm32_rproc_prepare(struct rproc *rproc)
|
|
|
|
static int stm32_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
|
|
{
|
|
- if (rproc_elf_load_rsc_table(rproc, fw))
|
|
+ struct stm32_rproc *ddata = rproc->priv;
|
|
+ int ret;
|
|
+
|
|
+ if (ddata->trproc)
|
|
+ ret = rproc_tee_get_rsc_table(ddata->trproc);
|
|
+ else
|
|
+ ret = rproc_elf_load_rsc_table(rproc, fw);
|
|
+ if (ret)
|
|
dev_warn(&rproc->dev, "no resource table found for this firmware\n");
|
|
|
|
return 0;
|
|
@@ -406,30 +552,6 @@ static int stm32_rproc_request_mbox(struct rproc *rproc)
|
|
return -EPROBE_DEFER;
|
|
}
|
|
|
|
-static int stm32_rproc_set_hold_boot(struct rproc *rproc, bool hold)
|
|
-{
|
|
- struct stm32_rproc *ddata = rproc->priv;
|
|
- struct stm32_syscon hold_boot = ddata->hold_boot;
|
|
- struct arm_smccc_res smc_res;
|
|
- int val, err;
|
|
-
|
|
- val = hold ? HOLD_BOOT : RELEASE_BOOT;
|
|
-
|
|
- if (IS_ENABLED(CONFIG_HAVE_ARM_SMCCC) && ddata->secured_soc) {
|
|
- arm_smccc_smc(STM32_SMC_RCC, STM32_SMC_REG_WRITE,
|
|
- hold_boot.reg, val, 0, 0, 0, 0, &smc_res);
|
|
- err = smc_res.a0;
|
|
- } else {
|
|
- err = regmap_update_bits(hold_boot.map, hold_boot.reg,
|
|
- hold_boot.mask, val);
|
|
- }
|
|
-
|
|
- if (err)
|
|
- dev_err(&rproc->dev, "failed to set hold boot\n");
|
|
-
|
|
- return err;
|
|
-}
|
|
-
|
|
static void stm32_rproc_add_coredump_trace(struct rproc *rproc)
|
|
{
|
|
struct rproc_debug_trace *trace;
|
|
@@ -469,18 +591,20 @@ static int stm32_rproc_start(struct rproc *rproc)
|
|
}
|
|
}
|
|
|
|
- err = stm32_rproc_set_hold_boot(rproc, false);
|
|
+ err = reset_control_deassert(ddata->hold_boot);
|
|
if (err)
|
|
return err;
|
|
|
|
- return stm32_rproc_set_hold_boot(rproc, true);
|
|
+ return reset_control_assert(ddata->hold_boot);
|
|
}
|
|
|
|
static int stm32_rproc_attach(struct rproc *rproc)
|
|
{
|
|
+ struct stm32_rproc *ddata = rproc->priv;
|
|
+
|
|
stm32_rproc_add_coredump_trace(rproc);
|
|
|
|
- return stm32_rproc_set_hold_boot(rproc, true);
|
|
+ return reset_control_assert(ddata->hold_boot);
|
|
}
|
|
|
|
static int stm32_rproc_detach(struct rproc *rproc)
|
|
@@ -497,27 +621,21 @@ static int stm32_rproc_detach(struct rproc *rproc)
|
|
}
|
|
|
|
/* Allow remote processor to auto-reboot */
|
|
- return stm32_rproc_set_hold_boot(rproc, false);
|
|
+ return reset_control_deassert(ddata->hold_boot);
|
|
}
|
|
|
|
static int stm32_rproc_stop(struct rproc *rproc)
|
|
{
|
|
struct stm32_rproc *ddata = rproc->priv;
|
|
- int err, idx;
|
|
+ int err;
|
|
|
|
- /* request shutdown of the remote processor */
|
|
- if (rproc->state != RPROC_OFFLINE) {
|
|
- idx = stm32_rproc_mbox_idx(rproc, STM32_MBX_SHUTDOWN);
|
|
- if (idx >= 0 && ddata->mb[idx].chan) {
|
|
- err = mbox_send_message(ddata->mb[idx].chan, "detach");
|
|
- if (err < 0)
|
|
- dev_warn(&rproc->dev, "warning: remote FW shutdown without ack\n");
|
|
- }
|
|
- }
|
|
+ stm32_rproc_request_shutdown(rproc);
|
|
|
|
- err = stm32_rproc_set_hold_boot(rproc, true);
|
|
- if (err)
|
|
+ err = reset_control_assert(ddata->hold_boot);
|
|
+ if (err) {
|
|
+ dev_err(&rproc->dev, "failed to assert the hold boot\n");
|
|
return err;
|
|
+ }
|
|
|
|
err = reset_control_assert(ddata->rst);
|
|
if (err) {
|
|
@@ -525,29 +643,8 @@ static int stm32_rproc_stop(struct rproc *rproc)
|
|
return err;
|
|
}
|
|
|
|
- /* to allow platform Standby power mode, set remote proc Deep Sleep */
|
|
- if (ddata->pdds.map) {
|
|
- err = regmap_update_bits(ddata->pdds.map, ddata->pdds.reg,
|
|
- ddata->pdds.mask, 1);
|
|
- if (err) {
|
|
- dev_err(&rproc->dev, "failed to set pdds\n");
|
|
- return err;
|
|
- }
|
|
- }
|
|
|
|
- /* update coprocessor state to OFF if available */
|
|
- if (ddata->m4_state.map) {
|
|
- err = regmap_update_bits(ddata->m4_state.map,
|
|
- ddata->m4_state.reg,
|
|
- ddata->m4_state.mask,
|
|
- M4_STATE_OFF);
|
|
- if (err) {
|
|
- dev_err(&rproc->dev, "failed to set copro state\n");
|
|
- return err;
|
|
- }
|
|
- }
|
|
-
|
|
- return 0;
|
|
+ return stm32_rproc_release(rproc);
|
|
}
|
|
|
|
static void stm32_rproc_kick(struct rproc *rproc, int vqid)
|
|
@@ -659,8 +756,21 @@ static const struct rproc_ops st_rproc_ops = {
|
|
.get_boot_addr = rproc_elf_get_boot_addr,
|
|
};
|
|
|
|
+static const struct rproc_ops st_rproc_tee_ops = {
|
|
+ .prepare = stm32_rproc_prepare,
|
|
+ .start = stm32_rproc_tee_start,
|
|
+ .stop = stm32_rproc_tee_stop,
|
|
+ .attach = stm32_rproc_tee_attach,
|
|
+ .kick = stm32_rproc_kick,
|
|
+ .parse_fw = stm32_rproc_parse_fw,
|
|
+ .find_loaded_rsc_table = stm32_rproc_tee_elf_find_loaded_rsc_table,
|
|
+ .get_loaded_rsc_table = stm32_rproc_get_loaded_rsc_table,
|
|
+ .sanity_check = stm32_rproc_tee_elf_sanity_check,
|
|
+ .load = stm32_rproc_tee_elf_load,
|
|
+};
|
|
+
|
|
static const struct of_device_id stm32_rproc_match[] = {
|
|
- { .compatible = "st,stm32mp1-m4" },
|
|
+ {.compatible = "st,stm32mp1-m4",},
|
|
{},
|
|
};
|
|
MODULE_DEVICE_TABLE(of, stm32_rproc_match);
|
|
@@ -692,8 +802,6 @@ static int stm32_rproc_parse_dt(struct platform_device *pdev,
|
|
{
|
|
struct device *dev = &pdev->dev;
|
|
struct device_node *np = dev->of_node;
|
|
- struct stm32_syscon tz;
|
|
- unsigned int tzen;
|
|
int err, irq;
|
|
|
|
irq = platform_get_irq(pdev, 0);
|
|
@@ -717,35 +825,15 @@ static int stm32_rproc_parse_dt(struct platform_device *pdev,
|
|
dev_info(dev, "wdg irq registered\n");
|
|
}
|
|
|
|
- ddata->rst = devm_reset_control_get_by_index(dev, 0);
|
|
+ ddata->rst = devm_reset_control_get(dev, "mcu_rst");
|
|
if (IS_ERR(ddata->rst))
|
|
return dev_err_probe(dev, PTR_ERR(ddata->rst),
|
|
"failed to get mcu_reset\n");
|
|
|
|
- /*
|
|
- * if platform is secured the hold boot bit must be written by
|
|
- * smc call and read normally.
|
|
- * if not secure the hold boot bit could be read/write normally
|
|
- */
|
|
- err = stm32_rproc_get_syscon(np, "st,syscfg-tz", &tz);
|
|
- if (err) {
|
|
- dev_err(dev, "failed to get tz syscfg\n");
|
|
- return err;
|
|
- }
|
|
-
|
|
- err = regmap_read(tz.map, tz.reg, &tzen);
|
|
- if (err) {
|
|
- dev_err(dev, "failed to read tzen\n");
|
|
- return err;
|
|
- }
|
|
- ddata->secured_soc = tzen & tz.mask;
|
|
-
|
|
- err = stm32_rproc_get_syscon(np, "st,syscfg-holdboot",
|
|
- &ddata->hold_boot);
|
|
- if (err) {
|
|
- dev_err(dev, "failed to get hold boot\n");
|
|
- return err;
|
|
- }
|
|
+ ddata->hold_boot = devm_reset_control_get(dev, "hold_boot");
|
|
+ if (IS_ERR(ddata->hold_boot))
|
|
+ return dev_err_probe(dev, PTR_ERR(ddata->hold_boot),
|
|
+ "failed to get mcu reset\n");
|
|
|
|
err = stm32_rproc_get_syscon(np, "st,syscfg-pdds", &ddata->pdds);
|
|
if (err)
|
|
@@ -801,6 +889,7 @@ static int stm32_rproc_probe(struct platform_device *pdev)
|
|
struct device *dev = &pdev->dev;
|
|
struct stm32_rproc *ddata;
|
|
struct device_node *np = dev->of_node;
|
|
+ struct tee_rproc *trproc;
|
|
struct rproc *rproc;
|
|
unsigned int state;
|
|
int ret;
|
|
@@ -809,11 +898,32 @@ static int stm32_rproc_probe(struct platform_device *pdev)
|
|
if (ret)
|
|
return ret;
|
|
|
|
- rproc = rproc_alloc(dev, np->name, &st_rproc_ops, NULL, sizeof(*ddata));
|
|
- if (!rproc)
|
|
- return -ENOMEM;
|
|
+ trproc = tee_rproc_register(dev, STM32_MP1_FW_ID);
|
|
+ if (!IS_ERR_OR_NULL(trproc)) {
|
|
+ /*
|
|
+ * Delagate the firmware management to the secure context. The
|
|
+ * firmware loaded has to be signed.
|
|
+ */
|
|
+ dev_info(dev, "Support of signed firmware only\n");
|
|
+
|
|
+ } else {
|
|
+ if (PTR_ERR(trproc) == -EPROBE_DEFER)
|
|
+ return PTR_ERR(trproc);
|
|
+ trproc = NULL;
|
|
+ }
|
|
+
|
|
+ rproc = rproc_alloc(dev, np->name,
|
|
+ trproc ? &st_rproc_tee_ops : &st_rproc_ops,
|
|
+ NULL, sizeof(*ddata));
|
|
+ if (!rproc) {
|
|
+ ret = -ENOMEM;
|
|
+ goto free_tee;
|
|
+ }
|
|
|
|
ddata = rproc->priv;
|
|
+ ddata->trproc = trproc;
|
|
+ if (trproc)
|
|
+ ddata->trproc->rproc = rproc;
|
|
|
|
rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
|
|
|
|
@@ -864,6 +974,10 @@ static int stm32_rproc_probe(struct platform_device *pdev)
|
|
device_init_wakeup(dev, false);
|
|
}
|
|
rproc_free(rproc);
|
|
+free_tee:
|
|
+ if (trproc)
|
|
+ tee_rproc_unregister(trproc);
|
|
+
|
|
return ret;
|
|
}
|
|
|
|
@@ -885,10 +999,21 @@ static int stm32_rproc_remove(struct platform_device *pdev)
|
|
device_init_wakeup(dev, false);
|
|
}
|
|
rproc_free(rproc);
|
|
+ if (ddata->trproc)
|
|
+ tee_rproc_unregister(ddata->trproc);
|
|
|
|
return 0;
|
|
}
|
|
|
|
+static void stm32_rproc_shutdown(struct platform_device *pdev)
|
|
+{
|
|
+ struct rproc *rproc = platform_get_drvdata(pdev);
|
|
+
|
|
+ if (atomic_read(&rproc->power) > 0)
|
|
+ dev_warn(&pdev->dev,
|
|
+ "Warning: remote fw is still running with possible side effect!!!\n");
|
|
+}
|
|
+
|
|
static int __maybe_unused stm32_rproc_suspend(struct device *dev)
|
|
{
|
|
struct rproc *rproc = dev_get_drvdata(dev);
|
|
@@ -917,6 +1042,7 @@ static SIMPLE_DEV_PM_OPS(stm32_rproc_pm_ops,
|
|
static struct platform_driver stm32_rproc_driver = {
|
|
.probe = stm32_rproc_probe,
|
|
.remove = stm32_rproc_remove,
|
|
+ .shutdown = stm32_rproc_shutdown,
|
|
.driver = {
|
|
.name = "stm32-rproc",
|
|
.pm = &stm32_rproc_pm_ops,
|
|
diff --git a/drivers/remoteproc/tee_remoteproc.c b/drivers/remoteproc/tee_remoteproc.c
|
|
new file mode 100644
|
|
index 000000000000..da95273a30ac
|
|
--- /dev/null
|
|
+++ b/drivers/remoteproc/tee_remoteproc.c
|
|
@@ -0,0 +1,378 @@
|
|
+// SPDX-License-Identifier: GPL-2.0-or-later
|
|
+/*
|
|
+ * Copyright (C) STMicroelectronics 2020 - All Rights Reserved
|
|
+ * Authors: Arnaud Pouliquen <arnaud.pouliquen@st.com>
|
|
+ */
|
|
+
|
|
+#include <linux/firmware.h>
|
|
+#include <linux/interrupt.h>
|
|
+#include <linux/io.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/of_address.h>
|
|
+#include <linux/of_device.h>
|
|
+#include <linux/of_reserved_mem.h>
|
|
+#include <linux/remoteproc.h>
|
|
+#include <linux/slab.h>
|
|
+#include <linux/tee_drv.h>
|
|
+#include <linux/tee_remoteproc.h>
|
|
+
|
|
+#include "remoteproc_internal.h"
|
|
+
|
|
+#define MAX_TEE_PARAM_ARRY_MEMBER 4
|
|
+
|
|
+/*
|
|
+ * Authentication of the firmware and load in the remote processor memory
|
|
+ *
|
|
+ * [in] params[0].value.a: unique 32bit identifier of the firmware
|
|
+ * [in] params[1].memref: buffer containing the image of the buffer
|
|
+ */
|
|
+#define TA_RPROC_FW_CMD_LOAD_FW 1
|
|
+
|
|
+/*
|
|
+ * start the remote processor
|
|
+ *
|
|
+ * [in] params[0].value.a: unique 32bit identifier of the firmware
|
|
+ */
|
|
+#define TA_RPROC_FW_CMD_START_FW 2
|
|
+
|
|
+/*
|
|
+ * stop the remote processor
|
|
+ *
|
|
+ * [in] params[0].value.a: unique 32bit identifier of the firmware
|
|
+ */
|
|
+#define TA_RPROC_FW_CMD_STOP_FW 3
|
|
+
|
|
+/*
|
|
+ * return the address of the resource table, or 0 if not found
|
|
+ * No chech is done to verify that the address returned is accessible by
|
|
+ * the non secure context. If the resource table is loaded in a protected
|
|
+ * memory the acces by the non secure context will lead to a data abort.
|
|
+ *
|
|
+ * [in] params[0].value.a: unique 32bit identifier of the firmware
|
|
+ * [out] params[1].value.a: 32bit LSB resource table memory address
|
|
+ * [out] params[1].value.b: 32bit MSB resource table memory address
|
|
+ * [out] params[2].value.a: 32bit LSB resource table memory size
|
|
+ * [out] params[2].value.b: 32bit MSB resource table memory size
|
|
+ */
|
|
+#define TA_RPROC_FW_CMD_GET_RSC_TABLE 4
|
|
+
|
|
+/*
|
|
+ * return the address of the core dump
|
|
+ *
|
|
+ * [in] params[0].value.a: unique 32bit identifier of the firmware
|
|
+ * [out] params[1].memref: address of the core dump image if exist,
|
|
+ * else return Null
|
|
+ */
|
|
+#define TA_RPROC_FW_CMD_GET_COREDUMP 5
|
|
+
|
|
+struct tee_rproc_mem {
|
|
+ char name[20];
|
|
+ void __iomem *cpu_addr;
|
|
+ phys_addr_t bus_addr;
|
|
+ u32 dev_addr;
|
|
+ size_t size;
|
|
+};
|
|
+
|
|
+struct tee_rproc_context {
|
|
+ struct list_head sessions;
|
|
+ struct tee_context *ctx;
|
|
+ struct device *dev;
|
|
+};
|
|
+
|
|
+struct tee_rproc_context pvt_data;
|
|
+
|
|
+static void prepare_args(struct tee_rproc *trproc, int cmd,
|
|
+ struct tee_ioctl_invoke_arg *arg,
|
|
+ struct tee_param *param, unsigned int num_params)
|
|
+{
|
|
+ memset(arg, 0, sizeof(*arg));
|
|
+ memset(param, 0, MAX_TEE_PARAM_ARRY_MEMBER * sizeof(*param));
|
|
+
|
|
+ arg->func = cmd;
|
|
+ arg->session = trproc->session_id;
|
|
+ arg->num_params = num_params + 1;
|
|
+
|
|
+ param[0] = (struct tee_param) {
|
|
+ .attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT,
|
|
+ .u.value.a = trproc->fw_id,
|
|
+ };
|
|
+}
|
|
+
|
|
+int tee_rproc_load_fw(struct tee_rproc *trproc, const struct firmware *fw)
|
|
+{
|
|
+ struct tee_ioctl_invoke_arg arg;
|
|
+ struct tee_param param[MAX_TEE_PARAM_ARRY_MEMBER];
|
|
+ struct tee_shm *fw_shm;
|
|
+ int ret;
|
|
+
|
|
+ /*
|
|
+ * useless copy waiting that tee_shm_register and tee well support
|
|
+ * kernel buffers registration
|
|
+ */
|
|
+
|
|
+ fw_shm = tee_shm_alloc(pvt_data.ctx, fw->size,
|
|
+ TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
|
|
+ if (IS_ERR(fw_shm))
|
|
+ return PTR_ERR(fw_shm);
|
|
+
|
|
+ memcpy(tee_shm_get_va(fw_shm, 0), fw->data, fw->size);
|
|
+
|
|
+ prepare_args(trproc, TA_RPROC_FW_CMD_LOAD_FW, &arg, param, 1);
|
|
+
|
|
+ /* provide the address of the firmware image */
|
|
+ param[1] = (struct tee_param) {
|
|
+ .attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT,
|
|
+ .u.memref = {
|
|
+ .shm = fw_shm,
|
|
+ .size = fw->size,
|
|
+ .shm_offs = 0,
|
|
+ },
|
|
+ };
|
|
+
|
|
+ ret = tee_client_invoke_func(pvt_data.ctx, &arg, param);
|
|
+ if (ret < 0 || arg.ret != 0) {
|
|
+ dev_err(pvt_data.dev,
|
|
+ "TA_RPROC_FW_CMD_LOAD_FW invoke failed TEE err: %x, ret:%x\n",
|
|
+ arg.ret, ret);
|
|
+ if (!ret)
|
|
+ ret = -EIO;
|
|
+ }
|
|
+
|
|
+ tee_shm_free(fw_shm);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+EXPORT_SYMBOL(tee_rproc_load_fw);
|
|
+
|
|
+int rproc_tee_get_rsc_table(struct tee_rproc *trproc)
|
|
+{
|
|
+ struct tee_ioctl_invoke_arg arg;
|
|
+ struct tee_param param[MAX_TEE_PARAM_ARRY_MEMBER];
|
|
+ struct rproc *rproc = trproc->rproc;
|
|
+ size_t rsc_size;
|
|
+ int ret;
|
|
+
|
|
+ prepare_args(trproc, TA_RPROC_FW_CMD_GET_RSC_TABLE, &arg, param, 2);
|
|
+
|
|
+ param[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT;
|
|
+ param[2].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT;
|
|
+
|
|
+ ret = tee_client_invoke_func(pvt_data.ctx, &arg, param);
|
|
+ if (ret < 0 || arg.ret != 0) {
|
|
+ dev_err(pvt_data.dev,
|
|
+ "TA_RPROC_FW_CMD_GET_RSC_TABLE invoke failed TEE err: %x, ret:%x\n",
|
|
+ arg.ret, ret);
|
|
+ return -EIO;
|
|
+ }
|
|
+
|
|
+ rsc_size = param[2].u.value.a;
|
|
+
|
|
+ /*
|
|
+ * Store the resource table address that would be updated by the remote
|
|
+ * core and the virtio.
|
|
+ */
|
|
+ trproc->rsc_va = ioremap_wc(param[1].u.value.a, rsc_size);
|
|
+ if (IS_ERR_OR_NULL(trproc->rsc_va)) {
|
|
+ dev_err(pvt_data.dev, "Unable to map memory region: %lld+%zx\n",
|
|
+ param[1].u.value.a, rsc_size);
|
|
+ trproc->rsc_va = NULL;
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * A cached table is requested as the physical address is not mapped yet
|
|
+ * but remoteproc need to parse the table for resources.
|
|
+ */
|
|
+ rproc->cached_table = kmemdup(trproc->rsc_va, rsc_size, GFP_KERNEL);
|
|
+ if (!rproc->cached_table)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ rproc->table_ptr = rproc->cached_table;
|
|
+ rproc->table_sz = rsc_size;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+EXPORT_SYMBOL(rproc_tee_get_rsc_table);
|
|
+
|
|
+struct resource_table *tee_rproc_get_loaded_rsc_table(struct tee_rproc *trproc)
|
|
+{
|
|
+ return (struct resource_table *)trproc->rsc_va;
|
|
+}
|
|
+EXPORT_SYMBOL(tee_rproc_get_loaded_rsc_table);
|
|
+
|
|
+int tee_rproc_start(struct tee_rproc *trproc)
|
|
+{
|
|
+ struct tee_ioctl_invoke_arg arg;
|
|
+ struct tee_param param[MAX_TEE_PARAM_ARRY_MEMBER];
|
|
+ int ret;
|
|
+
|
|
+ prepare_args(trproc, TA_RPROC_FW_CMD_START_FW, &arg, param, 0);
|
|
+
|
|
+ ret = tee_client_invoke_func(pvt_data.ctx, &arg, param);
|
|
+ if (ret < 0 || arg.ret != 0) {
|
|
+ dev_err(pvt_data.dev,
|
|
+ "TA_RPROC_FW_CMD_START_FW invoke failed TEE err: %x, ret:%x\n",
|
|
+ arg.ret, ret);
|
|
+ if (!ret)
|
|
+ ret = -EIO;
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+EXPORT_SYMBOL(tee_rproc_start);
|
|
+
|
|
+int tee_rproc_stop(struct tee_rproc *trproc)
|
|
+{
|
|
+ struct tee_ioctl_invoke_arg arg;
|
|
+ struct tee_param param[MAX_TEE_PARAM_ARRY_MEMBER];
|
|
+ int ret;
|
|
+
|
|
+ prepare_args(trproc, TA_RPROC_FW_CMD_STOP_FW, &arg, param, 0);
|
|
+
|
|
+ ret = tee_client_invoke_func(pvt_data.ctx, &arg, param);
|
|
+ if (ret < 0 || arg.ret != 0) {
|
|
+ dev_err(pvt_data.dev,
|
|
+ "TA_RPROC_FW_CMD_STOP_FW invoke failed TEE err: %x, ret:%x\n",
|
|
+ arg.ret, ret);
|
|
+ if (!ret)
|
|
+ ret = -EIO;
|
|
+ }
|
|
+ if (trproc->rsc_va)
|
|
+ iounmap(trproc->rsc_va);
|
|
+ trproc->rsc_va = NULL;
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+EXPORT_SYMBOL(tee_rproc_stop);
|
|
+
|
|
+static const struct tee_client_device_id stm32_tee_fw_id_table[] = {
|
|
+ {UUID_INIT(0x80a4c275, 0x0a47, 0x4905,
|
|
+ 0x82, 0x85, 0x14, 0x86, 0xa9, 0x77, 0x1a, 0x08)},
|
|
+ {}
|
|
+};
|
|
+
|
|
+struct tee_rproc *tee_rproc_register(struct device *dev, unsigned int fw_id)
|
|
+{
|
|
+ struct tee_client_device *rproc_tee_device;
|
|
+ struct tee_ioctl_open_session_arg sess_arg;
|
|
+ struct tee_rproc *trproc;
|
|
+ int ret;
|
|
+
|
|
+ if (!pvt_data.ctx)
|
|
+ return ERR_PTR(-ENODEV);
|
|
+
|
|
+ trproc = devm_kzalloc(dev, sizeof(*trproc), GFP_KERNEL);
|
|
+ if (!trproc)
|
|
+ return ERR_PTR(-ENOMEM);
|
|
+
|
|
+ rproc_tee_device = to_tee_client_device(pvt_data.dev);
|
|
+ memset(&sess_arg, 0, sizeof(sess_arg));
|
|
+
|
|
+ /* Open session with rproc_tee load Trusted App */
|
|
+ memcpy(sess_arg.uuid, rproc_tee_device->id.uuid.b, TEE_IOCTL_UUID_LEN);
|
|
+
|
|
+ /*
|
|
+ * TODO: should we replace TEE_IOCTL_LOGIN_PUBLIC by
|
|
+ * TEE_IOCTL_LOGIN_REE_KERNEL?
|
|
+ */
|
|
+ sess_arg.clnt_login = TEE_IOCTL_LOGIN_PUBLIC;
|
|
+ sess_arg.num_params = 0;
|
|
+
|
|
+ ret = tee_client_open_session(pvt_data.ctx, &sess_arg, NULL);
|
|
+ if (ret < 0 || sess_arg.ret != 0) {
|
|
+ dev_err(dev, "tee_client_open_session failed, err: %x\n",
|
|
+ sess_arg.ret);
|
|
+ return ERR_PTR(-EINVAL);
|
|
+ }
|
|
+
|
|
+ trproc->parent = dev;
|
|
+ trproc->fw_id = fw_id;
|
|
+ trproc->session_id = sess_arg.session;
|
|
+
|
|
+ list_add_tail(&trproc->node, &pvt_data.sessions);
|
|
+
|
|
+ return trproc;
|
|
+}
|
|
+EXPORT_SYMBOL(tee_rproc_register);
|
|
+
|
|
+int tee_rproc_unregister(struct tee_rproc *trproc)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ if (!pvt_data.ctx)
|
|
+ return -ENODEV;
|
|
+
|
|
+ ret = tee_client_close_session(pvt_data.ctx, trproc->session_id);
|
|
+ if (ret < 0) {
|
|
+ dev_err(trproc->parent,
|
|
+ "tee_client_close_session failed, err: %x\n", ret);
|
|
+ }
|
|
+
|
|
+ list_del(&trproc->node);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+EXPORT_SYMBOL(tee_rproc_unregister);
|
|
+
|
|
+static int tee_ctx_match(struct tee_ioctl_version_data *ver, const void *data)
|
|
+{
|
|
+ /* Today we support only the OP-TEE, could be extend to other tees */
|
|
+ return (ver->impl_id == TEE_IMPL_ID_OPTEE);
|
|
+}
|
|
+
|
|
+static int tee_rproc_probe(struct device *dev)
|
|
+{
|
|
+ /* Open context with TEE driver */
|
|
+ pvt_data.ctx = tee_client_open_context(NULL, tee_ctx_match, NULL,
|
|
+ NULL);
|
|
+ if (IS_ERR(pvt_data.ctx))
|
|
+ return PTR_ERR(pvt_data.ctx);
|
|
+
|
|
+ pvt_data.dev = dev;
|
|
+ INIT_LIST_HEAD(&pvt_data.sessions);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int tee_rproc_remove(struct device *dev)
|
|
+{
|
|
+ struct tee_rproc *entry, *tmp;
|
|
+
|
|
+ list_for_each_entry_safe(entry, tmp, &pvt_data.sessions, node) {
|
|
+ tee_client_close_session(pvt_data.ctx, entry->session_id);
|
|
+ list_del(&entry->node);
|
|
+ kfree(entry);
|
|
+ }
|
|
+
|
|
+ tee_client_close_context(pvt_data.ctx);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+MODULE_DEVICE_TABLE(tee, stm32_tee_fw_id_table);
|
|
+
|
|
+static struct tee_client_driver tee_rproc_fw_driver = {
|
|
+ .id_table = stm32_tee_fw_id_table,
|
|
+ .driver = {
|
|
+ .name = KBUILD_MODNAME,
|
|
+ .bus = &tee_bus_type,
|
|
+ .probe = tee_rproc_probe,
|
|
+ .remove = tee_rproc_remove,
|
|
+ },
|
|
+};
|
|
+
|
|
+static int __init tee_rproc_fw_mod_init(void)
|
|
+{
|
|
+ return driver_register(&tee_rproc_fw_driver.driver);
|
|
+}
|
|
+
|
|
+static void __exit tee_rproc_fw_mod_exit(void)
|
|
+{
|
|
+ driver_unregister(&tee_rproc_fw_driver.driver);
|
|
+}
|
|
+
|
|
+module_init(tee_rproc_fw_mod_init);
|
|
+module_exit(tee_rproc_fw_mod_exit);
|
|
+
|
|
+MODULE_DESCRIPTION("secure remote processor control driver");
|
|
+MODULE_AUTHOR("Arnaud Pouliquen <arnaud.pouliquen@st.com>");
|
|
+MODULE_LICENSE("GPL v2");
|
|
diff --git a/drivers/rpmsg/Kconfig b/drivers/rpmsg/Kconfig
|
|
index 0b4407abdf13..d3795860f5c0 100644
|
|
--- a/drivers/rpmsg/Kconfig
|
|
+++ b/drivers/rpmsg/Kconfig
|
|
@@ -15,6 +15,14 @@ config RPMSG_CHAR
|
|
in /dev. They make it possible for user-space programs to send and
|
|
receive rpmsg packets.
|
|
|
|
+config RPMSG_CTRL
|
|
+ tristate "RPMSG control interface"
|
|
+ depends on RPMSG && ( RPMSG_CHAR || RPMSG_CHAR=n )
|
|
+ help
|
|
+ Say Y here to enable the support of the /dev/rpmsg_ctrlX API. This API
|
|
+ allows user-space programs to create endpoints with specific service name,
|
|
+ source and destination addresses.
|
|
+
|
|
config RPMSG_NS
|
|
tristate "RPMSG name service announcement"
|
|
depends on RPMSG
|
|
diff --git a/drivers/rpmsg/Makefile b/drivers/rpmsg/Makefile
|
|
index 8d452656f0ee..58e3b382e316 100644
|
|
--- a/drivers/rpmsg/Makefile
|
|
+++ b/drivers/rpmsg/Makefile
|
|
@@ -1,6 +1,7 @@
|
|
# SPDX-License-Identifier: GPL-2.0
|
|
obj-$(CONFIG_RPMSG) += rpmsg_core.o
|
|
obj-$(CONFIG_RPMSG_CHAR) += rpmsg_char.o
|
|
+obj-$(CONFIG_RPMSG_CTRL) += rpmsg_ctrl.o
|
|
obj-$(CONFIG_RPMSG_NS) += rpmsg_ns.o
|
|
obj-$(CONFIG_RPMSG_MTK_SCP) += mtk_rpmsg.o
|
|
qcom_glink-objs := qcom_glink_native.o qcom_glink_ssr.o
|
|
diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
|
|
index fd4c2f0fa4b1..0a7dabf3f329 100644
|
|
--- a/drivers/rpmsg/qcom_glink_native.c
|
|
+++ b/drivers/rpmsg/qcom_glink_native.c
|
|
@@ -1643,7 +1643,7 @@ static int qcom_glink_create_chrdev(struct qcom_glink *glink)
|
|
rpdev->dev.parent = glink->dev;
|
|
rpdev->dev.release = qcom_glink_device_release;
|
|
|
|
- return rpmsg_chrdev_register_device(rpdev);
|
|
+ return rpmsg_ctrldev_register_device(rpdev);
|
|
}
|
|
|
|
struct qcom_glink *qcom_glink_native_probe(struct device *dev,
|
|
diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c
|
|
index 56bc622de25e..754161b02407 100644
|
|
--- a/drivers/rpmsg/qcom_smd.c
|
|
+++ b/drivers/rpmsg/qcom_smd.c
|
|
@@ -1113,7 +1113,7 @@ static int qcom_smd_create_chrdev(struct qcom_smd_edge *edge)
|
|
qsdev->rpdev.dev.parent = &edge->dev;
|
|
qsdev->rpdev.dev.release = qcom_smd_release_device;
|
|
|
|
- return rpmsg_chrdev_register_device(&qsdev->rpdev);
|
|
+ return rpmsg_ctrldev_register_device(&qsdev->rpdev);
|
|
}
|
|
|
|
/*
|
|
diff --git a/drivers/rpmsg/rpmsg_char.c b/drivers/rpmsg/rpmsg_char.c
|
|
index 88c985f9e73a..29a814eb16f0 100644
|
|
--- a/drivers/rpmsg/rpmsg_char.c
|
|
+++ b/drivers/rpmsg/rpmsg_char.c
|
|
@@ -1,5 +1,6 @@
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
+ * Copyright (C) 2022, STMicroelectronics
|
|
* Copyright (c) 2016, Linaro Ltd.
|
|
* Copyright (c) 2012, Michal Simek <monstr@monstr.eu>
|
|
* Copyright (c) 2012, PetaLogix
|
|
@@ -22,35 +23,19 @@
|
|
#include <linux/uaccess.h>
|
|
#include <uapi/linux/rpmsg.h>
|
|
|
|
+#include "rpmsg_char.h"
|
|
#include "rpmsg_internal.h"
|
|
|
|
#define RPMSG_DEV_MAX (MINORMASK + 1)
|
|
|
|
static dev_t rpmsg_major;
|
|
-static struct class *rpmsg_class;
|
|
|
|
-static DEFINE_IDA(rpmsg_ctrl_ida);
|
|
static DEFINE_IDA(rpmsg_ept_ida);
|
|
static DEFINE_IDA(rpmsg_minor_ida);
|
|
|
|
#define dev_to_eptdev(dev) container_of(dev, struct rpmsg_eptdev, dev)
|
|
#define cdev_to_eptdev(i_cdev) container_of(i_cdev, struct rpmsg_eptdev, cdev)
|
|
|
|
-#define dev_to_ctrldev(dev) container_of(dev, struct rpmsg_ctrldev, dev)
|
|
-#define cdev_to_ctrldev(i_cdev) container_of(i_cdev, struct rpmsg_ctrldev, cdev)
|
|
-
|
|
-/**
|
|
- * struct rpmsg_ctrldev - control device for instantiating endpoint devices
|
|
- * @rpdev: underlaying rpmsg device
|
|
- * @cdev: cdev for the ctrl device
|
|
- * @dev: device for the ctrl device
|
|
- */
|
|
-struct rpmsg_ctrldev {
|
|
- struct rpmsg_device *rpdev;
|
|
- struct cdev cdev;
|
|
- struct device dev;
|
|
-};
|
|
-
|
|
/**
|
|
* struct rpmsg_eptdev - endpoint device context
|
|
* @dev: endpoint device
|
|
@@ -62,6 +47,8 @@ struct rpmsg_ctrldev {
|
|
* @queue_lock: synchronization of @queue operations
|
|
* @queue: incoming message queue
|
|
* @readq: wait object for incoming queue
|
|
+ * @default_ept: set to channel default endpoint if the default endpoint should be re-used
|
|
+ * on device open to prevent endpoint address update.
|
|
*/
|
|
struct rpmsg_eptdev {
|
|
struct device dev;
|
|
@@ -72,13 +59,15 @@ struct rpmsg_eptdev {
|
|
|
|
struct mutex ept_lock;
|
|
struct rpmsg_endpoint *ept;
|
|
+ struct rpmsg_endpoint *default_ept;
|
|
|
|
spinlock_t queue_lock;
|
|
struct sk_buff_head queue;
|
|
wait_queue_head_t readq;
|
|
+
|
|
};
|
|
|
|
-static int rpmsg_eptdev_destroy(struct device *dev, void *data)
|
|
+int rpmsg_chrdev_eptdev_destroy(struct device *dev, void *data)
|
|
{
|
|
struct rpmsg_eptdev *eptdev = dev_to_eptdev(dev);
|
|
|
|
@@ -97,6 +86,7 @@ static int rpmsg_eptdev_destroy(struct device *dev, void *data)
|
|
|
|
return 0;
|
|
}
|
|
+EXPORT_SYMBOL(rpmsg_chrdev_eptdev_destroy);
|
|
|
|
static int rpmsg_ept_cb(struct rpmsg_device *rpdev, void *buf, int len,
|
|
void *priv, u32 addr)
|
|
@@ -135,7 +125,15 @@ static int rpmsg_eptdev_open(struct inode *inode, struct file *filp)
|
|
|
|
get_device(dev);
|
|
|
|
- ept = rpmsg_create_ept(rpdev, rpmsg_ept_cb, eptdev, eptdev->chinfo);
|
|
+ /*
|
|
+ * If the default_ept is set, the rpmsg device default endpoint is used.
|
|
+ * Else a new endpoint is created on open that will be destroyed on release.
|
|
+ */
|
|
+ if (eptdev->default_ept)
|
|
+ ept = eptdev->default_ept;
|
|
+ else
|
|
+ ept = rpmsg_create_ept(rpdev, rpmsg_ept_cb, eptdev, eptdev->chinfo);
|
|
+
|
|
if (!ept) {
|
|
dev_err(dev, "failed to open %s\n", eptdev->chinfo.name);
|
|
put_device(dev);
|
|
@@ -158,7 +156,8 @@ static int rpmsg_eptdev_release(struct inode *inode, struct file *filp)
|
|
/* Close the endpoint, if it's not already destroyed by the parent */
|
|
mutex_lock(&eptdev->ept_lock);
|
|
if (eptdev->ept) {
|
|
- rpmsg_destroy_ept(eptdev->ept);
|
|
+ if (!eptdev->default_ept)
|
|
+ rpmsg_destroy_ept(eptdev->ept);
|
|
eptdev->ept = NULL;
|
|
}
|
|
mutex_unlock(&eptdev->ept_lock);
|
|
@@ -285,7 +284,11 @@ static long rpmsg_eptdev_ioctl(struct file *fp, unsigned int cmd,
|
|
if (cmd != RPMSG_DESTROY_EPT_IOCTL)
|
|
return -EINVAL;
|
|
|
|
- return rpmsg_eptdev_destroy(&eptdev->dev, NULL);
|
|
+ /* Don't allow to destroy a default endpoint. */
|
|
+ if (eptdev->default_ept)
|
|
+ return -EINVAL;
|
|
+
|
|
+ return rpmsg_chrdev_eptdev_destroy(&eptdev->dev, NULL);
|
|
}
|
|
|
|
static const struct file_operations rpmsg_eptdev_fops = {
|
|
@@ -343,21 +346,18 @@ static void rpmsg_eptdev_release_device(struct device *dev)
|
|
kfree(eptdev);
|
|
}
|
|
|
|
-static int rpmsg_eptdev_create(struct rpmsg_ctrldev *ctrldev,
|
|
- struct rpmsg_channel_info chinfo)
|
|
+static struct rpmsg_eptdev *rpmsg_chrdev_eptdev_alloc(struct rpmsg_device *rpdev,
|
|
+ struct device *parent)
|
|
{
|
|
- struct rpmsg_device *rpdev = ctrldev->rpdev;
|
|
struct rpmsg_eptdev *eptdev;
|
|
struct device *dev;
|
|
- int ret;
|
|
|
|
eptdev = kzalloc(sizeof(*eptdev), GFP_KERNEL);
|
|
if (!eptdev)
|
|
- return -ENOMEM;
|
|
+ return ERR_PTR(-ENOMEM);
|
|
|
|
dev = &eptdev->dev;
|
|
eptdev->rpdev = rpdev;
|
|
- eptdev->chinfo = chinfo;
|
|
|
|
mutex_init(&eptdev->ept_lock);
|
|
spin_lock_init(&eptdev->queue_lock);
|
|
@@ -366,13 +366,23 @@ static int rpmsg_eptdev_create(struct rpmsg_ctrldev *ctrldev,
|
|
|
|
device_initialize(dev);
|
|
dev->class = rpmsg_class;
|
|
- dev->parent = &ctrldev->dev;
|
|
+ dev->parent = parent;
|
|
dev->groups = rpmsg_eptdev_groups;
|
|
dev_set_drvdata(dev, eptdev);
|
|
|
|
cdev_init(&eptdev->cdev, &rpmsg_eptdev_fops);
|
|
eptdev->cdev.owner = THIS_MODULE;
|
|
|
|
+ return eptdev;
|
|
+}
|
|
+
|
|
+static int rpmsg_chrdev_eptdev_add(struct rpmsg_eptdev *eptdev, struct rpmsg_channel_info chinfo)
|
|
+{
|
|
+ struct device *dev = &eptdev->dev;
|
|
+ int ret;
|
|
+
|
|
+ eptdev->chinfo = chinfo;
|
|
+
|
|
ret = ida_simple_get(&rpmsg_minor_ida, 0, RPMSG_DEV_MAX, GFP_KERNEL);
|
|
if (ret < 0)
|
|
goto free_eptdev;
|
|
@@ -404,163 +414,91 @@ static int rpmsg_eptdev_create(struct rpmsg_ctrldev *ctrldev,
|
|
return ret;
|
|
}
|
|
|
|
-static int rpmsg_ctrldev_open(struct inode *inode, struct file *filp)
|
|
-{
|
|
- struct rpmsg_ctrldev *ctrldev = cdev_to_ctrldev(inode->i_cdev);
|
|
-
|
|
- get_device(&ctrldev->dev);
|
|
- filp->private_data = ctrldev;
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static int rpmsg_ctrldev_release(struct inode *inode, struct file *filp)
|
|
-{
|
|
- struct rpmsg_ctrldev *ctrldev = cdev_to_ctrldev(inode->i_cdev);
|
|
-
|
|
- put_device(&ctrldev->dev);
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static long rpmsg_ctrldev_ioctl(struct file *fp, unsigned int cmd,
|
|
- unsigned long arg)
|
|
+int rpmsg_chrdev_eptdev_create(struct rpmsg_device *rpdev, struct device *parent,
|
|
+ struct rpmsg_channel_info chinfo)
|
|
{
|
|
- struct rpmsg_ctrldev *ctrldev = fp->private_data;
|
|
- void __user *argp = (void __user *)arg;
|
|
- struct rpmsg_endpoint_info eptinfo;
|
|
- struct rpmsg_channel_info chinfo;
|
|
-
|
|
- if (cmd != RPMSG_CREATE_EPT_IOCTL)
|
|
- return -EINVAL;
|
|
-
|
|
- if (copy_from_user(&eptinfo, argp, sizeof(eptinfo)))
|
|
- return -EFAULT;
|
|
-
|
|
- memcpy(chinfo.name, eptinfo.name, RPMSG_NAME_SIZE);
|
|
- chinfo.name[RPMSG_NAME_SIZE-1] = '\0';
|
|
- chinfo.src = eptinfo.src;
|
|
- chinfo.dst = eptinfo.dst;
|
|
-
|
|
- return rpmsg_eptdev_create(ctrldev, chinfo);
|
|
-};
|
|
+ struct rpmsg_eptdev *eptdev;
|
|
+ int ret;
|
|
|
|
-static const struct file_operations rpmsg_ctrldev_fops = {
|
|
- .owner = THIS_MODULE,
|
|
- .open = rpmsg_ctrldev_open,
|
|
- .release = rpmsg_ctrldev_release,
|
|
- .unlocked_ioctl = rpmsg_ctrldev_ioctl,
|
|
- .compat_ioctl = compat_ptr_ioctl,
|
|
-};
|
|
+ eptdev = rpmsg_chrdev_eptdev_alloc(rpdev, parent);
|
|
+ if (IS_ERR(eptdev))
|
|
+ return PTR_ERR(eptdev);
|
|
|
|
-static void rpmsg_ctrldev_release_device(struct device *dev)
|
|
-{
|
|
- struct rpmsg_ctrldev *ctrldev = dev_to_ctrldev(dev);
|
|
+ ret = rpmsg_chrdev_eptdev_add(eptdev, chinfo);
|
|
|
|
- ida_simple_remove(&rpmsg_ctrl_ida, dev->id);
|
|
- ida_simple_remove(&rpmsg_minor_ida, MINOR(dev->devt));
|
|
- kfree(ctrldev);
|
|
+ return ret;
|
|
}
|
|
+EXPORT_SYMBOL(rpmsg_chrdev_eptdev_create);
|
|
|
|
static int rpmsg_chrdev_probe(struct rpmsg_device *rpdev)
|
|
{
|
|
- struct rpmsg_ctrldev *ctrldev;
|
|
- struct device *dev;
|
|
- int ret;
|
|
-
|
|
- ctrldev = kzalloc(sizeof(*ctrldev), GFP_KERNEL);
|
|
- if (!ctrldev)
|
|
- return -ENOMEM;
|
|
-
|
|
- ctrldev->rpdev = rpdev;
|
|
-
|
|
- dev = &ctrldev->dev;
|
|
- device_initialize(dev);
|
|
- dev->parent = &rpdev->dev;
|
|
- dev->class = rpmsg_class;
|
|
-
|
|
- cdev_init(&ctrldev->cdev, &rpmsg_ctrldev_fops);
|
|
- ctrldev->cdev.owner = THIS_MODULE;
|
|
-
|
|
- ret = ida_simple_get(&rpmsg_minor_ida, 0, RPMSG_DEV_MAX, GFP_KERNEL);
|
|
- if (ret < 0)
|
|
- goto free_ctrldev;
|
|
- dev->devt = MKDEV(MAJOR(rpmsg_major), ret);
|
|
-
|
|
- ret = ida_simple_get(&rpmsg_ctrl_ida, 0, 0, GFP_KERNEL);
|
|
- if (ret < 0)
|
|
- goto free_minor_ida;
|
|
- dev->id = ret;
|
|
- dev_set_name(&ctrldev->dev, "rpmsg_ctrl%d", ret);
|
|
+ struct rpmsg_channel_info chinfo;
|
|
+ struct rpmsg_eptdev *eptdev;
|
|
+ struct device *dev = &rpdev->dev;
|
|
|
|
- ret = cdev_device_add(&ctrldev->cdev, &ctrldev->dev);
|
|
- if (ret)
|
|
- goto free_ctrl_ida;
|
|
+ memcpy(chinfo.name, rpdev->id.name, RPMSG_NAME_SIZE);
|
|
+ chinfo.src = rpdev->src;
|
|
+ chinfo.dst = rpdev->dst;
|
|
|
|
- /* We can now rely on the release function for cleanup */
|
|
- dev->release = rpmsg_ctrldev_release_device;
|
|
+ eptdev = rpmsg_chrdev_eptdev_alloc(rpdev, dev);
|
|
+ if (IS_ERR(eptdev))
|
|
+ return PTR_ERR(eptdev);
|
|
|
|
- dev_set_drvdata(&rpdev->dev, ctrldev);
|
|
+ /* Set the default_ept to the rpmsg device endpoint */
|
|
+ eptdev->default_ept = rpdev->ept;
|
|
|
|
- return ret;
|
|
+ /*
|
|
+ * The rpmsg_ept_cb uses *priv parameter to get its rpmsg_eptdev context.
|
|
+ * Storedit in default_ept *priv field.
|
|
+ */
|
|
+ eptdev->default_ept->priv = eptdev;
|
|
|
|
-free_ctrl_ida:
|
|
- ida_simple_remove(&rpmsg_ctrl_ida, dev->id);
|
|
-free_minor_ida:
|
|
- ida_simple_remove(&rpmsg_minor_ida, MINOR(dev->devt));
|
|
-free_ctrldev:
|
|
- put_device(dev);
|
|
- kfree(ctrldev);
|
|
-
|
|
- return ret;
|
|
+ return rpmsg_chrdev_eptdev_add(eptdev, chinfo);
|
|
}
|
|
|
|
static void rpmsg_chrdev_remove(struct rpmsg_device *rpdev)
|
|
{
|
|
- struct rpmsg_ctrldev *ctrldev = dev_get_drvdata(&rpdev->dev);
|
|
int ret;
|
|
|
|
- /* Destroy all endpoints */
|
|
- ret = device_for_each_child(&ctrldev->dev, NULL, rpmsg_eptdev_destroy);
|
|
+ ret = device_for_each_child(&rpdev->dev, NULL, rpmsg_chrdev_eptdev_destroy);
|
|
if (ret)
|
|
- dev_warn(&rpdev->dev, "failed to nuke endpoints: %d\n", ret);
|
|
-
|
|
- cdev_device_del(&ctrldev->cdev, &ctrldev->dev);
|
|
- put_device(&ctrldev->dev);
|
|
+ dev_warn(&rpdev->dev, "failed to destroy endpoints: %d\n", ret);
|
|
}
|
|
|
|
+static struct rpmsg_device_id rpmsg_chrdev_id_table[] = {
|
|
+ { .name = "rpmsg-raw" },
|
|
+ { },
|
|
+};
|
|
+
|
|
static struct rpmsg_driver rpmsg_chrdev_driver = {
|
|
.probe = rpmsg_chrdev_probe,
|
|
.remove = rpmsg_chrdev_remove,
|
|
- .drv = {
|
|
- .name = "rpmsg_chrdev",
|
|
- },
|
|
+ .callback = rpmsg_ept_cb,
|
|
+ .id_table = rpmsg_chrdev_id_table,
|
|
+ .drv.name = "rpmsg_chrdev",
|
|
};
|
|
|
|
static int rpmsg_chrdev_init(void)
|
|
{
|
|
int ret;
|
|
|
|
- ret = alloc_chrdev_region(&rpmsg_major, 0, RPMSG_DEV_MAX, "rpmsg");
|
|
+ ret = alloc_chrdev_region(&rpmsg_major, 0, RPMSG_DEV_MAX, "rpmsg_char");
|
|
if (ret < 0) {
|
|
pr_err("rpmsg: failed to allocate char dev region\n");
|
|
return ret;
|
|
}
|
|
|
|
- rpmsg_class = class_create(THIS_MODULE, "rpmsg");
|
|
- if (IS_ERR(rpmsg_class)) {
|
|
- pr_err("failed to create rpmsg class\n");
|
|
- unregister_chrdev_region(rpmsg_major, RPMSG_DEV_MAX);
|
|
- return PTR_ERR(rpmsg_class);
|
|
- }
|
|
-
|
|
ret = register_rpmsg_driver(&rpmsg_chrdev_driver);
|
|
if (ret < 0) {
|
|
- pr_err("rpmsgchr: failed to register rpmsg driver\n");
|
|
- class_destroy(rpmsg_class);
|
|
- unregister_chrdev_region(rpmsg_major, RPMSG_DEV_MAX);
|
|
+ pr_err("rpmsg: failed to register rpmsg raw driver\n");
|
|
+ goto free_region;
|
|
}
|
|
|
|
+ return 0;
|
|
+
|
|
+free_region:
|
|
+ unregister_chrdev_region(rpmsg_major, RPMSG_DEV_MAX);
|
|
+
|
|
return ret;
|
|
}
|
|
postcore_initcall(rpmsg_chrdev_init);
|
|
@@ -568,7 +506,6 @@ postcore_initcall(rpmsg_chrdev_init);
|
|
static void rpmsg_chrdev_exit(void)
|
|
{
|
|
unregister_rpmsg_driver(&rpmsg_chrdev_driver);
|
|
- class_destroy(rpmsg_class);
|
|
unregister_chrdev_region(rpmsg_major, RPMSG_DEV_MAX);
|
|
}
|
|
module_exit(rpmsg_chrdev_exit);
|
|
diff --git a/drivers/rpmsg/rpmsg_char.h b/drivers/rpmsg/rpmsg_char.h
|
|
new file mode 100644
|
|
index 000000000000..117d9cbc52f0
|
|
--- /dev/null
|
|
+++ b/drivers/rpmsg/rpmsg_char.h
|
|
@@ -0,0 +1,46 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0 */
|
|
+/*
|
|
+ * Copyright (C) 2022, STMicroelectronics
|
|
+ */
|
|
+
|
|
+#ifndef __RPMSG_CHRDEV_H__
|
|
+#define __RPMSG_CHRDEV_H__
|
|
+
|
|
+#if IS_ENABLED(CONFIG_RPMSG_CHAR)
|
|
+/**
|
|
+ * rpmsg_chrdev_eptdev_create() - register char device based on an endpoint
|
|
+ * @rpdev: prepared rpdev to be used for creating endpoints
|
|
+ * @parent: parent device
|
|
+ * @chinfo: associated endpoint channel information.
|
|
+ *
|
|
+ * This function create a new rpmsg char endpoint device to instantiate a new
|
|
+ * endpoint based on chinfo information.
|
|
+ */
|
|
+int rpmsg_chrdev_eptdev_create(struct rpmsg_device *rpdev, struct device *parent,
|
|
+ struct rpmsg_channel_info chinfo);
|
|
+
|
|
+/**
|
|
+ * rpmsg_chrdev_eptdev_destroy() - destroy created char device endpoint.
|
|
+ * @data: private data associated to the endpoint device
|
|
+ *
|
|
+ * This function destroys a rpmsg char endpoint device created by the RPMSG_DESTROY_EPT_IOCTL
|
|
+ * control.
|
|
+ */
|
|
+int rpmsg_chrdev_eptdev_destroy(struct device *dev, void *data);
|
|
+
|
|
+#else /*IS_ENABLED(CONFIG_RPMSG_CHAR) */
|
|
+
|
|
+static inline int rpmsg_chrdev_eptdev_create(struct rpmsg_device *rpdev, struct device *parent,
|
|
+ struct rpmsg_channel_info chinfo)
|
|
+{
|
|
+ return -ENXIO;
|
|
+}
|
|
+
|
|
+static inline int rpmsg_chrdev_eptdev_destroy(struct device *dev, void *data)
|
|
+{
|
|
+ return -ENXIO;
|
|
+}
|
|
+
|
|
+#endif /*IS_ENABLED(CONFIG_RPMSG_CHAR) */
|
|
+
|
|
+#endif /*__RPMSG_CHRDEV_H__ */
|
|
diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c
|
|
index a71de08acc7b..41648ad9f8a6 100644
|
|
--- a/drivers/rpmsg/rpmsg_core.c
|
|
+++ b/drivers/rpmsg/rpmsg_core.c
|
|
@@ -20,6 +20,9 @@
|
|
|
|
#include "rpmsg_internal.h"
|
|
|
|
+struct class *rpmsg_class;
|
|
+EXPORT_SYMBOL(rpmsg_class);
|
|
+
|
|
/**
|
|
* rpmsg_create_channel() - create a new rpmsg channel
|
|
* using its name and address info.
|
|
@@ -327,6 +330,27 @@ int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst,
|
|
}
|
|
EXPORT_SYMBOL(rpmsg_trysend_offchannel);
|
|
|
|
+/**
|
|
+ * rpmsg_get_mtu() - get maximum transmission buffer size for sending message.
|
|
+ * @ept: the rpmsg endpoint
|
|
+ *
|
|
+ * This function returns maximum buffer size available for a single outgoing message.
|
|
+ *
|
|
+ * Return: the maximum transmission size on success and an appropriate error
|
|
+ * value on failure.
|
|
+ */
|
|
+
|
|
+ssize_t rpmsg_get_mtu(struct rpmsg_endpoint *ept)
|
|
+{
|
|
+ if (WARN_ON(!ept))
|
|
+ return -EINVAL;
|
|
+ if (!ept->ops->get_mtu)
|
|
+ return -ENOTSUPP;
|
|
+
|
|
+ return ept->ops->get_mtu(ept);
|
|
+}
|
|
+EXPORT_SYMBOL(rpmsg_get_mtu);
|
|
+
|
|
/*
|
|
* match a rpmsg channel with a channel info struct.
|
|
* this is used to make sure we're not creating rpmsg devices for channels
|
|
@@ -641,10 +665,17 @@ static int __init rpmsg_init(void)
|
|
{
|
|
int ret;
|
|
|
|
+ rpmsg_class = class_create(THIS_MODULE, "rpmsg");
|
|
+ if (IS_ERR(rpmsg_class)) {
|
|
+ pr_err("failed to create rpmsg class\n");
|
|
+ return PTR_ERR(rpmsg_class);
|
|
+ }
|
|
+
|
|
ret = bus_register(&rpmsg_bus);
|
|
- if (ret)
|
|
+ if (ret) {
|
|
pr_err("failed to register rpmsg bus: %d\n", ret);
|
|
-
|
|
+ class_destroy(rpmsg_class);
|
|
+ }
|
|
return ret;
|
|
}
|
|
postcore_initcall(rpmsg_init);
|
|
@@ -652,6 +683,7 @@ postcore_initcall(rpmsg_init);
|
|
static void __exit rpmsg_fini(void)
|
|
{
|
|
bus_unregister(&rpmsg_bus);
|
|
+ class_destroy(rpmsg_class);
|
|
}
|
|
module_exit(rpmsg_fini);
|
|
|
|
diff --git a/drivers/rpmsg/rpmsg_ctrl.c b/drivers/rpmsg/rpmsg_ctrl.c
|
|
new file mode 100644
|
|
index 000000000000..107da70fdbaa
|
|
--- /dev/null
|
|
+++ b/drivers/rpmsg/rpmsg_ctrl.c
|
|
@@ -0,0 +1,243 @@
|
|
+// SPDX-License-Identifier: GPL-2.0
|
|
+/*
|
|
+ * Copyright (C) 2022, STMicroelectronics
|
|
+ * Copyright (c) 2016, Linaro Ltd.
|
|
+ * Copyright (c) 2012, Michal Simek <monstr@monstr.eu>
|
|
+ * Copyright (c) 2012, PetaLogix
|
|
+ * Copyright (c) 2011, Texas Instruments, Inc.
|
|
+ * Copyright (c) 2011, Google, Inc.
|
|
+ *
|
|
+ * Based on rpmsg performance statistics driver by Michal Simek, which in turn
|
|
+ * was based on TI & Google OMX rpmsg driver.
|
|
+ */
|
|
+
|
|
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
+
|
|
+#include <linux/cdev.h>
|
|
+#include <linux/device.h>
|
|
+#include <linux/fs.h>
|
|
+#include <linux/idr.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/rpmsg.h>
|
|
+#include <linux/skbuff.h>
|
|
+#include <linux/slab.h>
|
|
+#include <linux/uaccess.h>
|
|
+#include <uapi/linux/rpmsg.h>
|
|
+
|
|
+#include "rpmsg_char.h"
|
|
+#include "rpmsg_internal.h"
|
|
+
|
|
+#define RPMSG_DEV_MAX (MINORMASK + 1)
|
|
+
|
|
+static dev_t rpmsg_major;
|
|
+
|
|
+static DEFINE_IDA(rpmsg_ctrl_ida);
|
|
+static DEFINE_IDA(rpmsg_minor_ida);
|
|
+
|
|
+#define dev_to_ctrldev(dev) container_of(dev, struct rpmsg_ctrldev, dev)
|
|
+#define cdev_to_ctrldev(i_cdev) container_of(i_cdev, struct rpmsg_ctrldev, cdev)
|
|
+
|
|
+/**
|
|
+ * struct rpmsg_ctrldev - control device for instantiating endpoint devices
|
|
+ * @rpdev: underlaying rpmsg device
|
|
+ * @cdev: cdev for the ctrl device
|
|
+ * @dev: device for the ctrl device
|
|
+ * @ctrl_lock: serialize the ioctrls.
|
|
+ */
|
|
+struct rpmsg_ctrldev {
|
|
+ struct rpmsg_device *rpdev;
|
|
+ struct cdev cdev;
|
|
+ struct device dev;
|
|
+ struct mutex ctrl_lock;
|
|
+};
|
|
+
|
|
+static int rpmsg_ctrldev_open(struct inode *inode, struct file *filp)
|
|
+{
|
|
+ struct rpmsg_ctrldev *ctrldev = cdev_to_ctrldev(inode->i_cdev);
|
|
+
|
|
+ get_device(&ctrldev->dev);
|
|
+ filp->private_data = ctrldev;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int rpmsg_ctrldev_release(struct inode *inode, struct file *filp)
|
|
+{
|
|
+ struct rpmsg_ctrldev *ctrldev = cdev_to_ctrldev(inode->i_cdev);
|
|
+
|
|
+ put_device(&ctrldev->dev);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static long rpmsg_ctrldev_ioctl(struct file *fp, unsigned int cmd,
|
|
+ unsigned long arg)
|
|
+{
|
|
+ struct rpmsg_ctrldev *ctrldev = fp->private_data;
|
|
+ void __user *argp = (void __user *)arg;
|
|
+ struct rpmsg_endpoint_info eptinfo;
|
|
+ struct rpmsg_channel_info chinfo;
|
|
+ struct rpmsg_device *rpdev;
|
|
+ int ret = 0;
|
|
+
|
|
+ if (copy_from_user(&eptinfo, argp, sizeof(eptinfo)))
|
|
+ return -EFAULT;
|
|
+
|
|
+ memcpy(chinfo.name, eptinfo.name, RPMSG_NAME_SIZE);
|
|
+ chinfo.name[RPMSG_NAME_SIZE - 1] = '\0';
|
|
+ chinfo.src = eptinfo.src;
|
|
+ chinfo.dst = eptinfo.dst;
|
|
+
|
|
+ mutex_lock(&ctrldev->ctrl_lock);
|
|
+ switch (cmd) {
|
|
+ case RPMSG_CREATE_EPT_IOCTL:
|
|
+ ret = rpmsg_chrdev_eptdev_create(ctrldev->rpdev, &ctrldev->dev, chinfo);
|
|
+ break;
|
|
+
|
|
+ case RPMSG_CREATE_DEV_IOCTL:
|
|
+ rpdev = rpmsg_create_channel(ctrldev->rpdev, &chinfo);
|
|
+ if (!rpdev) {
|
|
+ dev_err(&ctrldev->dev, "failed to create %s channel\n", chinfo.name);
|
|
+ ret = -ENXIO;
|
|
+ }
|
|
+ break;
|
|
+
|
|
+ case RPMSG_RELEASE_DEV_IOCTL:
|
|
+ ret = rpmsg_release_channel(ctrldev->rpdev, &chinfo);
|
|
+ if (ret)
|
|
+ dev_err(&ctrldev->dev, "failed to release %s channel (%d)\n",
|
|
+ chinfo.name, ret);
|
|
+ break;
|
|
+
|
|
+ default:
|
|
+ ret = -EINVAL;
|
|
+ }
|
|
+ mutex_unlock(&ctrldev->ctrl_lock);
|
|
+
|
|
+ return ret;
|
|
+};
|
|
+
|
|
+static const struct file_operations rpmsg_ctrldev_fops = {
|
|
+ .owner = THIS_MODULE,
|
|
+ .open = rpmsg_ctrldev_open,
|
|
+ .release = rpmsg_ctrldev_release,
|
|
+ .unlocked_ioctl = rpmsg_ctrldev_ioctl,
|
|
+ .compat_ioctl = compat_ptr_ioctl,
|
|
+};
|
|
+
|
|
+static void rpmsg_ctrldev_release_device(struct device *dev)
|
|
+{
|
|
+ struct rpmsg_ctrldev *ctrldev = dev_to_ctrldev(dev);
|
|
+
|
|
+ ida_simple_remove(&rpmsg_ctrl_ida, dev->id);
|
|
+ ida_simple_remove(&rpmsg_minor_ida, MINOR(dev->devt));
|
|
+ kfree(ctrldev);
|
|
+}
|
|
+
|
|
+static int rpmsg_ctrldev_probe(struct rpmsg_device *rpdev)
|
|
+{
|
|
+ struct rpmsg_ctrldev *ctrldev;
|
|
+ struct device *dev;
|
|
+ int ret;
|
|
+
|
|
+ ctrldev = kzalloc(sizeof(*ctrldev), GFP_KERNEL);
|
|
+ if (!ctrldev)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ ctrldev->rpdev = rpdev;
|
|
+
|
|
+ dev = &ctrldev->dev;
|
|
+ device_initialize(dev);
|
|
+ dev->parent = &rpdev->dev;
|
|
+ dev->class = rpmsg_class;
|
|
+
|
|
+ mutex_init(&ctrldev->ctrl_lock);
|
|
+ cdev_init(&ctrldev->cdev, &rpmsg_ctrldev_fops);
|
|
+ ctrldev->cdev.owner = THIS_MODULE;
|
|
+
|
|
+ ret = ida_simple_get(&rpmsg_minor_ida, 0, RPMSG_DEV_MAX, GFP_KERNEL);
|
|
+ if (ret < 0)
|
|
+ goto free_ctrldev;
|
|
+ dev->devt = MKDEV(MAJOR(rpmsg_major), ret);
|
|
+
|
|
+ ret = ida_simple_get(&rpmsg_ctrl_ida, 0, 0, GFP_KERNEL);
|
|
+ if (ret < 0)
|
|
+ goto free_minor_ida;
|
|
+ dev->id = ret;
|
|
+ dev_set_name(&ctrldev->dev, "rpmsg_ctrl%d", ret);
|
|
+
|
|
+ ret = cdev_device_add(&ctrldev->cdev, &ctrldev->dev);
|
|
+ if (ret)
|
|
+ goto free_ctrl_ida;
|
|
+
|
|
+ /* We can now rely on the release function for cleanup */
|
|
+ dev->release = rpmsg_ctrldev_release_device;
|
|
+
|
|
+ dev_set_drvdata(&rpdev->dev, ctrldev);
|
|
+
|
|
+ return ret;
|
|
+
|
|
+free_ctrl_ida:
|
|
+ ida_simple_remove(&rpmsg_ctrl_ida, dev->id);
|
|
+free_minor_ida:
|
|
+ ida_simple_remove(&rpmsg_minor_ida, MINOR(dev->devt));
|
|
+free_ctrldev:
|
|
+ put_device(dev);
|
|
+ kfree(ctrldev);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static void rpmsg_ctrldev_remove(struct rpmsg_device *rpdev)
|
|
+{
|
|
+ struct rpmsg_ctrldev *ctrldev = dev_get_drvdata(&rpdev->dev);
|
|
+ int ret;
|
|
+
|
|
+ /* Destroy all endpoints */
|
|
+ ret = device_for_each_child(&ctrldev->dev, NULL, rpmsg_chrdev_eptdev_destroy);
|
|
+ if (ret)
|
|
+ dev_warn(&rpdev->dev, "failed to nuke endpoints: %d\n", ret);
|
|
+
|
|
+ cdev_device_del(&ctrldev->cdev, &ctrldev->dev);
|
|
+ put_device(&ctrldev->dev);
|
|
+}
|
|
+
|
|
+static struct rpmsg_driver rpmsg_ctrldev_driver = {
|
|
+ .probe = rpmsg_ctrldev_probe,
|
|
+ .remove = rpmsg_ctrldev_remove,
|
|
+ .drv = {
|
|
+ .name = "rpmsg_ctrl",
|
|
+ },
|
|
+};
|
|
+
|
|
+static int rpmsg_ctrldev_init(void)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ ret = alloc_chrdev_region(&rpmsg_major, 0, RPMSG_DEV_MAX, "rpmsg_ctrl");
|
|
+ if (ret < 0) {
|
|
+ pr_err("failed to allocate char dev region\n");
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ ret = register_rpmsg_driver(&rpmsg_ctrldev_driver);
|
|
+ if (ret < 0) {
|
|
+ pr_err("failed to register rpmsg driver\n");
|
|
+ unregister_chrdev_region(rpmsg_major, RPMSG_DEV_MAX);
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+postcore_initcall(rpmsg_ctrldev_init);
|
|
+
|
|
+static void rpmsg_ctrldev_exit(void)
|
|
+{
|
|
+ unregister_rpmsg_driver(&rpmsg_ctrldev_driver);
|
|
+ unregister_chrdev_region(rpmsg_major, RPMSG_DEV_MAX);
|
|
+}
|
|
+module_exit(rpmsg_ctrldev_exit);
|
|
+
|
|
+MODULE_DESCRIPTION("rpmsg control interface");
|
|
+MODULE_ALIAS("rpmsg:" KBUILD_MODNAME);
|
|
+MODULE_LICENSE("GPL v2");
|
|
diff --git a/drivers/rpmsg/rpmsg_internal.h b/drivers/rpmsg/rpmsg_internal.h
|
|
index a76c344253bf..d4b23fd019a8 100644
|
|
--- a/drivers/rpmsg/rpmsg_internal.h
|
|
+++ b/drivers/rpmsg/rpmsg_internal.h
|
|
@@ -18,6 +18,8 @@
|
|
#define to_rpmsg_device(d) container_of(d, struct rpmsg_device, dev)
|
|
#define to_rpmsg_driver(d) container_of(d, struct rpmsg_driver, drv)
|
|
|
|
+extern struct class *rpmsg_class;
|
|
+
|
|
/**
|
|
* struct rpmsg_device_ops - indirection table for the rpmsg_device operations
|
|
* @create_channel: create backend-specific channel, optional
|
|
@@ -53,6 +55,7 @@ struct rpmsg_device_ops {
|
|
* @trysendto: see @rpmsg_trysendto(), optional
|
|
* @trysend_offchannel: see @rpmsg_trysend_offchannel(), optional
|
|
* @poll: see @rpmsg_poll(), optional
|
|
+ * @get_mtu: see @rpmsg_get_mtu(), optional
|
|
*
|
|
* Indirection table for the operations that a rpmsg backend should implement.
|
|
* In addition to @destroy_ept, the backend must at least implement @send and
|
|
@@ -72,6 +75,7 @@ struct rpmsg_endpoint_ops {
|
|
void *data, int len);
|
|
__poll_t (*poll)(struct rpmsg_endpoint *ept, struct file *filp,
|
|
poll_table *wait);
|
|
+ ssize_t (*get_mtu)(struct rpmsg_endpoint *ept);
|
|
};
|
|
|
|
struct device *rpmsg_find_device(struct device *parent,
|
|
@@ -82,16 +86,16 @@ struct rpmsg_device *rpmsg_create_channel(struct rpmsg_device *rpdev,
|
|
int rpmsg_release_channel(struct rpmsg_device *rpdev,
|
|
struct rpmsg_channel_info *chinfo);
|
|
/**
|
|
- * rpmsg_chrdev_register_device() - register chrdev device based on rpdev
|
|
+ * rpmsg_ctrldev_register_device() - register a char device for control based on rpdev
|
|
* @rpdev: prepared rpdev to be used for creating endpoints
|
|
*
|
|
* This function wraps rpmsg_register_device() preparing the rpdev for use as
|
|
* basis for the rpmsg chrdev.
|
|
*/
|
|
-static inline int rpmsg_chrdev_register_device(struct rpmsg_device *rpdev)
|
|
+static inline int rpmsg_ctrldev_register_device(struct rpmsg_device *rpdev)
|
|
{
|
|
- strcpy(rpdev->id.name, "rpmsg_chrdev");
|
|
- rpdev->driver_override = "rpmsg_chrdev";
|
|
+ strcpy(rpdev->id.name, "rpmsg_ctrl");
|
|
+ rpdev->driver_override = "rpmsg_ctrl";
|
|
|
|
return rpmsg_register_device(rpdev);
|
|
}
|
|
diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
|
|
index b03e7404212f..1c54fe2fac5b 100644
|
|
--- a/drivers/rpmsg/virtio_rpmsg_bus.c
|
|
+++ b/drivers/rpmsg/virtio_rpmsg_bus.c
|
|
@@ -149,6 +149,7 @@ static int virtio_rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data,
|
|
int len, u32 dst);
|
|
static int virtio_rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src,
|
|
u32 dst, void *data, int len);
|
|
+static ssize_t virtio_rpmsg_get_mtu(struct rpmsg_endpoint *ept);
|
|
static struct rpmsg_device *__rpmsg_create_channel(struct virtproc_info *vrp,
|
|
struct rpmsg_channel_info *chinfo);
|
|
|
|
@@ -160,6 +161,7 @@ static const struct rpmsg_endpoint_ops virtio_endpoint_ops = {
|
|
.trysend = virtio_rpmsg_trysend,
|
|
.trysendto = virtio_rpmsg_trysendto,
|
|
.trysend_offchannel = virtio_rpmsg_trysend_offchannel,
|
|
+ .get_mtu = virtio_rpmsg_get_mtu,
|
|
};
|
|
|
|
/**
|
|
@@ -696,6 +698,14 @@ static int virtio_rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src,
|
|
return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, false);
|
|
}
|
|
|
|
+static ssize_t virtio_rpmsg_get_mtu(struct rpmsg_endpoint *ept)
|
|
+{
|
|
+ struct rpmsg_device *rpdev = ept->rpdev;
|
|
+ struct virtio_rpmsg_channel *vch = to_virtio_rpmsg_channel(rpdev);
|
|
+
|
|
+ return vch->vrp->buf_size - sizeof(struct rpmsg_hdr);
|
|
+}
|
|
+
|
|
static int rpmsg_recv_single(struct virtproc_info *vrp, struct device *dev,
|
|
struct rpmsg_hdr *msg, unsigned int len)
|
|
{
|
|
@@ -840,7 +850,7 @@ static struct rpmsg_device *rpmsg_virtio_add_ctrl_dev(struct virtio_device *vdev
|
|
rpdev_ctrl->dev.release = virtio_rpmsg_release_device;
|
|
rpdev_ctrl->little_endian = virtio_is_little_endian(vrp->vdev);
|
|
|
|
- err = rpmsg_chrdev_register_device(rpdev_ctrl);
|
|
+ err = rpmsg_ctrldev_register_device(rpdev_ctrl);
|
|
if (err) {
|
|
/* vch will be free in virtio_rpmsg_release_device() */
|
|
return ERR_PTR(err);
|
|
diff --git a/drivers/tee/amdtee/amdtee_private.h b/drivers/tee/amdtee/amdtee_private.h
|
|
index 6d0f7062bb87..73c183510c26 100644
|
|
--- a/drivers/tee/amdtee/amdtee_private.h
|
|
+++ b/drivers/tee/amdtee/amdtee_private.h
|
|
@@ -135,13 +135,17 @@ static inline u32 get_session_index(u32 session)
|
|
|
|
int amdtee_open_session(struct tee_context *ctx,
|
|
struct tee_ioctl_open_session_arg *arg,
|
|
- struct tee_param *param);
|
|
+ struct tee_param *normal_param,
|
|
+ u32 num_normal_params,
|
|
+ struct tee_param *ocall_param);
|
|
|
|
int amdtee_close_session(struct tee_context *ctx, u32 session);
|
|
|
|
int amdtee_invoke_func(struct tee_context *ctx,
|
|
struct tee_ioctl_invoke_arg *arg,
|
|
- struct tee_param *param);
|
|
+ struct tee_param *normal_param,
|
|
+ u32 num_normal_params,
|
|
+ struct tee_param *ocall_param);
|
|
|
|
int amdtee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session);
|
|
|
|
diff --git a/drivers/tee/amdtee/core.c b/drivers/tee/amdtee/core.c
|
|
index 372d64756ed6..78511ae99b91 100644
|
|
--- a/drivers/tee/amdtee/core.c
|
|
+++ b/drivers/tee/amdtee/core.c
|
|
@@ -230,7 +230,9 @@ static void destroy_session(struct kref *ref)
|
|
|
|
int amdtee_open_session(struct tee_context *ctx,
|
|
struct tee_ioctl_open_session_arg *arg,
|
|
- struct tee_param *param)
|
|
+ struct tee_param *normal_param,
|
|
+ u32 num_normal_params,
|
|
+ struct tee_param *ocall_param)
|
|
{
|
|
struct amdtee_context_data *ctxdata = ctx->data;
|
|
struct amdtee_session *sess = NULL;
|
|
@@ -239,6 +241,11 @@ int amdtee_open_session(struct tee_context *ctx,
|
|
int rc, i;
|
|
void *ta;
|
|
|
|
+ if (ocall_param) {
|
|
+ pr_err("OCALLs not supported\n");
|
|
+ return -EOPNOTSUPP;
|
|
+ }
|
|
+
|
|
if (arg->clnt_login != TEE_IOCTL_LOGIN_PUBLIC) {
|
|
pr_err("unsupported client login method\n");
|
|
return -EINVAL;
|
|
@@ -268,7 +275,7 @@ int amdtee_open_session(struct tee_context *ctx,
|
|
}
|
|
|
|
/* Open session with loaded TA */
|
|
- handle_open_session(arg, &session_info, param);
|
|
+ handle_open_session(arg, &session_info, normal_param);
|
|
if (arg->ret != TEEC_SUCCESS) {
|
|
pr_err("open_session failed %d\n", arg->ret);
|
|
handle_unload_ta(ta_handle);
|
|
@@ -404,12 +411,19 @@ void amdtee_unmap_shmem(struct tee_shm *shm)
|
|
|
|
int amdtee_invoke_func(struct tee_context *ctx,
|
|
struct tee_ioctl_invoke_arg *arg,
|
|
- struct tee_param *param)
|
|
+ struct tee_param *normal_param,
|
|
+ u32 num_normal_params,
|
|
+ struct tee_param *ocall_param)
|
|
{
|
|
struct amdtee_context_data *ctxdata = ctx->data;
|
|
struct amdtee_session *sess;
|
|
u32 i, session_info;
|
|
|
|
+ if (ocall_param) {
|
|
+ pr_err("OCALLs not supported\n");
|
|
+ return -EOPNOTSUPP;
|
|
+ }
|
|
+
|
|
/* Check that the session is valid */
|
|
mutex_lock(&session_list_mutex);
|
|
sess = find_session(ctxdata, arg->session);
|
|
@@ -422,7 +436,7 @@ int amdtee_invoke_func(struct tee_context *ctx,
|
|
if (!sess)
|
|
return -EINVAL;
|
|
|
|
- handle_invoke_cmd(arg, session_info, param);
|
|
+ handle_invoke_cmd(arg, session_info, normal_param);
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/tee/optee/Makefile b/drivers/tee/optee/Makefile
|
|
index 3aa33ea9e6a6..446d65dc83b8 100644
|
|
--- a/drivers/tee/optee/Makefile
|
|
+++ b/drivers/tee/optee/Makefile
|
|
@@ -1,7 +1,9 @@
|
|
# SPDX-License-Identifier: GPL-2.0
|
|
obj-$(CONFIG_OPTEE) += optee.o
|
|
optee-objs += core.o
|
|
+optee-objs += call_queue.o
|
|
optee-objs += call.o
|
|
+optee-objs += notif.o
|
|
optee-objs += rpc.o
|
|
optee-objs += supp.o
|
|
optee-objs += shm_pool.o
|
|
diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c
|
|
index 945f03da0223..f2b495ee8c8f 100644
|
|
--- a/drivers/tee/optee/call.c
|
|
+++ b/drivers/tee/optee/call.c
|
|
@@ -17,126 +17,289 @@
|
|
#define CREATE_TRACE_POINTS
|
|
#include "optee_trace.h"
|
|
|
|
-struct optee_call_waiter {
|
|
- struct list_head list_node;
|
|
- struct completion c;
|
|
-};
|
|
-
|
|
-static void optee_cq_wait_init(struct optee_call_queue *cq,
|
|
- struct optee_call_waiter *w)
|
|
+/* Requires the filpstate mutex to be held */
|
|
+static struct optee_session *find_session(struct optee_context_data *ctxdata,
|
|
+ u32 session_id)
|
|
{
|
|
- /*
|
|
- * We're preparing to make a call to secure world. In case we can't
|
|
- * allocate a thread in secure world we'll end up waiting in
|
|
- * optee_cq_wait_for_completion().
|
|
- *
|
|
- * Normally if there's no contention in secure world the call will
|
|
- * complete and we can cleanup directly with optee_cq_wait_final().
|
|
- */
|
|
- mutex_lock(&cq->mutex);
|
|
+ struct optee_session *sess;
|
|
|
|
- /*
|
|
- * We add ourselves to the queue, but we don't wait. This
|
|
- * guarantees that we don't lose a completion if secure world
|
|
- * returns busy and another thread just exited and try to complete
|
|
- * someone.
|
|
- */
|
|
- init_completion(&w->c);
|
|
- list_add_tail(&w->list_node, &cq->waiters);
|
|
+ list_for_each_entry(sess, &ctxdata->sess_list, list_node)
|
|
+ if (sess->session_id == session_id)
|
|
+ return sess;
|
|
|
|
- mutex_unlock(&cq->mutex);
|
|
+ return NULL;
|
|
}
|
|
|
|
-static void optee_cq_wait_for_completion(struct optee_call_queue *cq,
|
|
- struct optee_call_waiter *w)
|
|
+static void param_clear_ocall(struct tee_param *ocall)
|
|
{
|
|
- wait_for_completion(&w->c);
|
|
+ if (ocall)
|
|
+ memset(&ocall->u, 0, sizeof(ocall->u));
|
|
+}
|
|
|
|
- mutex_lock(&cq->mutex);
|
|
+static u64 param_get_ocall_func(struct tee_param *param)
|
|
+{
|
|
+ return TEE_IOCTL_OCALL_GET_FUNC(param->u.value.a);
|
|
+}
|
|
|
|
- /* Move to end of list to get out of the way for other waiters */
|
|
- list_del(&w->list_node);
|
|
- reinit_completion(&w->c);
|
|
- list_add_tail(&w->list_node, &cq->waiters);
|
|
+/* Requires @sem in the parent struct optee_session to be held */
|
|
+static int verify_ocall_request(u32 num_params, struct optee_call_ctx *call_ctx)
|
|
+{
|
|
+ struct optee_msg_arg *arg = call_ctx->rpc_arg;
|
|
+
|
|
+ switch (arg->cmd) {
|
|
+ case OPTEE_MSG_RPC_CMD_OCALL:
|
|
+ /* 'num_params' is checked later */
|
|
+
|
|
+ /* These parameters carry the OCALL descriptors */
|
|
+ if (arg->num_params < 2 ||
|
|
+ arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INOUT ||
|
|
+ arg->params[1].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT ||
|
|
+ arg->params[0].u.value.a > U32_MAX || /* OCALL Cmd Id */
|
|
+ arg->params[1].u.value.c != 0) /* TA UUID (128 bytes) */
|
|
+ return -EINVAL;
|
|
+ break;
|
|
+ default:
|
|
+ return -EINVAL;
|
|
+ }
|
|
|
|
- mutex_unlock(&cq->mutex);
|
|
+ return 0;
|
|
}
|
|
|
|
-static void optee_cq_complete_one(struct optee_call_queue *cq)
|
|
+/* Requires @sem in the parent struct optee_session to be held */
|
|
+static int verify_ocall_reply(u64 func, struct tee_param *params,
|
|
+ u32 num_params, struct optee_call_ctx *call_ctx)
|
|
{
|
|
- struct optee_call_waiter *w;
|
|
+ size_t n;
|
|
+
|
|
+ switch (func) {
|
|
+ case TEE_IOCTL_OCALL_CMD_INVOKE:
|
|
+ if (call_ctx->rpc_arg->cmd != OPTEE_MSG_RPC_CMD_OCALL)
|
|
+ return -EINVAL;
|
|
+
|
|
+ /* Skip the loop below */
|
|
+ return 0;
|
|
+ default:
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ /* The remaining parameters are unused */
|
|
+ for (n = 1; n < num_params; n++)
|
|
+ if (params[n].attr != TEE_IOCTL_PARAM_ATTR_TYPE_NONE)
|
|
+ return -EINVAL;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
|
|
- list_for_each_entry(w, &cq->waiters, list_node) {
|
|
- if (!completion_done(&w->c)) {
|
|
- complete(&w->c);
|
|
+/* Requires @sem in the parent struct optee_session to be held */
|
|
+static void process_ocall_memrefs(struct optee_msg_param *params,
|
|
+ u32 num_params, bool increment)
|
|
+{
|
|
+ size_t n;
|
|
+
|
|
+ for (n = 0; n < num_params; n++) {
|
|
+ struct tee_shm *shm;
|
|
+ const struct optee_msg_param *mp = params + n;
|
|
+ u32 attr = mp->attr & OPTEE_MSG_ATTR_TYPE_MASK;
|
|
+
|
|
+ switch (attr) {
|
|
+ case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT:
|
|
+ case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT:
|
|
+ case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT:
|
|
+ shm = (struct tee_shm *)(uintptr_t)mp->u.rmem.shm_ref;
|
|
+ break;
|
|
+ default:
|
|
+ shm = NULL;
|
|
break;
|
|
}
|
|
+
|
|
+ if (!shm)
|
|
+ continue;
|
|
+
|
|
+ if (increment)
|
|
+ tee_shm_get(shm);
|
|
+ else
|
|
+ tee_shm_put(shm);
|
|
}
|
|
}
|
|
|
|
-static void optee_cq_wait_final(struct optee_call_queue *cq,
|
|
- struct optee_call_waiter *w)
|
|
+/*
|
|
+ * Requires @sem in the parent struct optee_session to be held (if OCALLs are
|
|
+ * expected)
|
|
+ */
|
|
+static void call_prologue(struct optee_call_ctx *call_ctx)
|
|
+{
|
|
+ struct optee *optee = tee_get_drvdata(call_ctx->ctx->teedev);
|
|
+
|
|
+ /* Initialize waiter */
|
|
+ optee_cq_wait_init(&optee->call_queue, &call_ctx->waiter);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Requires @sem in the parent struct optee_session to be held (if OCALLs are
|
|
+ * expected)
|
|
+ */
|
|
+static void call_epilogue(struct optee_call_ctx *call_ctx)
|
|
{
|
|
+ struct optee *optee = tee_get_drvdata(call_ctx->ctx->teedev);
|
|
+
|
|
+ optee_rpc_finalize_call(call_ctx);
|
|
+
|
|
/*
|
|
- * We're done with the call to secure world. The thread in secure
|
|
- * world that was used for this call is now available for some
|
|
- * other task to use.
|
|
+ * We're done with our thread in secure world, if there's any
|
|
+ * thread waiters wake up one.
|
|
*/
|
|
- mutex_lock(&cq->mutex);
|
|
+ optee_cq_wait_final(&optee->call_queue, &call_ctx->waiter);
|
|
+}
|
|
+
|
|
+/* Requires @sem in the parent struct optee_session to be held */
|
|
+static int process_ocall_request(struct tee_param *params, u32 num_params,
|
|
+ struct tee_param *ocall,
|
|
+ struct optee_call_ctx *call_ctx)
|
|
+{
|
|
+ u32 cmd_id;
|
|
+ struct optee_msg_param *msg_param;
|
|
+ u32 msg_num_params;
|
|
+ int rc = 0;
|
|
|
|
- /* Get out of the list */
|
|
- list_del(&w->list_node);
|
|
+ /*
|
|
+ * Points to the octets of the UUID corresponding to the TA requesting
|
|
+ * the OCALL, if applicable for this call.
|
|
+ */
|
|
+ void *clnt_id;
|
|
|
|
- /* Wake up one eventual waiting task */
|
|
- optee_cq_complete_one(cq);
|
|
+ rc = verify_ocall_request(num_params, call_ctx);
|
|
+ if (rc)
|
|
+ goto exit_set_ret;
|
|
|
|
/*
|
|
- * If we're completed we've got a completion from another task that
|
|
- * was just done with its call to secure world. Since yet another
|
|
- * thread now is available in secure world wake up another eventual
|
|
- * waiting task.
|
|
+ * Clear out the parameters of the original function invocation. The
|
|
+ * original contents are backed up in call_ctx->msg_arg and will be
|
|
+ * restored elsewhere once the OCALL is over.
|
|
*/
|
|
- if (completion_done(&w->c))
|
|
- optee_cq_complete_one(cq);
|
|
+ memset(params, 0, num_params * sizeof(*params));
|
|
+
|
|
+ /* Set up the OCALL request */
|
|
+ switch (call_ctx->rpc_arg->cmd) {
|
|
+ case OPTEE_MSG_RPC_CMD_OCALL:
|
|
+ /* -2 here and +2 below to skip the OCALL descriptors */
|
|
+ msg_num_params = call_ctx->rpc_arg->num_params - 2;
|
|
+ if (num_params < msg_num_params) {
|
|
+ rc = -EINVAL;
|
|
+ goto exit_set_ret;
|
|
+ }
|
|
+
|
|
+ msg_param = call_ctx->rpc_arg->params + 2;
|
|
+ rc = optee_from_msg_param(params, msg_num_params, msg_param);
|
|
+ if (rc)
|
|
+ goto exit_set_ret;
|
|
+
|
|
+ process_ocall_memrefs(msg_param, msg_num_params, true);
|
|
+ call_ctx->rpc_must_release = true;
|
|
|
|
- mutex_unlock(&cq->mutex);
|
|
+ cmd_id = (u32)call_ctx->rpc_arg->params[0].u.value.a;
|
|
+ ocall->u.value.a =
|
|
+ TEE_IOCTL_OCALL_MAKE_PAIR(TEE_IOCTL_OCALL_CMD_INVOKE,
|
|
+ cmd_id);
|
|
+
|
|
+ clnt_id = &call_ctx->rpc_arg->params[1].u.value;
|
|
+ memcpy(&ocall->u.value.b, clnt_id, TEE_IOCTL_UUID_LEN);
|
|
+ break;
|
|
+ default:
|
|
+ /* NOT REACHED */
|
|
+ rc = -EINVAL;
|
|
+ goto exit_set_ret;
|
|
+ }
|
|
+
|
|
+ return rc;
|
|
+
|
|
+exit_set_ret:
|
|
+ call_ctx->rpc_arg->ret = TEEC_ERROR_BAD_PARAMETERS;
|
|
+ call_ctx->rpc_arg->ret_origin = TEEC_ORIGIN_COMMS;
|
|
+ return rc;
|
|
}
|
|
|
|
-/* Requires the filpstate mutex to be held */
|
|
-static struct optee_session *find_session(struct optee_context_data *ctxdata,
|
|
- u32 session_id)
|
|
+/* Requires @sem in the parent struct optee_session to be held */
|
|
+static int process_ocall_reply(u32 ret, u32 ret_origin,
|
|
+ struct tee_param *params, u32 num_params,
|
|
+ struct tee_param *ocall,
|
|
+ struct optee_call_ctx *call_ctx)
|
|
{
|
|
- struct optee_session *sess;
|
|
+ const u64 func = param_get_ocall_func(ocall);
|
|
+ struct optee_msg_param *msg_param;
|
|
+ u32 msg_num_params;
|
|
+ int rc = 0;
|
|
|
|
- list_for_each_entry(sess, &ctxdata->sess_list, list_node)
|
|
- if (sess->session_id == session_id)
|
|
- return sess;
|
|
+ rc = verify_ocall_reply(func, params, num_params, call_ctx);
|
|
+ if (rc)
|
|
+ goto exit_set_ret;
|
|
+
|
|
+ switch (func) {
|
|
+ case TEE_IOCTL_OCALL_CMD_INVOKE:
|
|
+ /* -2 here and +2 below to skip the OCALL descriptors */
|
|
+ msg_num_params = call_ctx->rpc_arg->num_params - 2;
|
|
+ if (num_params < msg_num_params) {
|
|
+ rc = -EINVAL;
|
|
+ goto exit_set_ret;
|
|
+ }
|
|
|
|
- return NULL;
|
|
+ msg_param = call_ctx->rpc_arg->params + 2;
|
|
+ rc = optee_to_msg_param(msg_param, msg_num_params, params);
|
|
+ if (rc)
|
|
+ goto exit_set_ret;
|
|
+
|
|
+ process_ocall_memrefs(msg_param, msg_num_params, false);
|
|
+ call_ctx->rpc_must_release = false;
|
|
+
|
|
+ call_ctx->rpc_arg->params[0].u.value.b = ret;
|
|
+ call_ctx->rpc_arg->params[0].u.value.c = ret_origin;
|
|
+ break;
|
|
+ default:
|
|
+ rc = -EINVAL;
|
|
+ goto exit_set_ret;
|
|
+ }
|
|
+
|
|
+ call_ctx->rpc_arg->ret = TEEC_SUCCESS;
|
|
+ call_ctx->rpc_arg->ret_origin = TEEC_ORIGIN_COMMS;
|
|
+
|
|
+ return rc;
|
|
+
|
|
+exit_set_ret:
|
|
+ call_ctx->rpc_arg->ret = TEEC_ERROR_BAD_PARAMETERS;
|
|
+ call_ctx->rpc_arg->ret_origin = TEEC_ORIGIN_COMMS;
|
|
+ return rc;
|
|
+}
|
|
+
|
|
+static void clear_call_ctx(struct optee_call_ctx *call_ctx)
|
|
+{
|
|
+ memset(call_ctx, 0, sizeof(*call_ctx));
|
|
}
|
|
|
|
/**
|
|
- * optee_do_call_with_arg() - Do an SMC to OP-TEE in secure world
|
|
- * @ctx: calling context
|
|
- * @parg: physical address of message to pass to secure world
|
|
+ * optee_do_call_with_ctx() - Invoke OP-TEE in secure world
|
|
+ * @call_ctx: calling context
|
|
*
|
|
* Does and SMC to OP-TEE in secure world and handles eventual resulting
|
|
* Remote Procedure Calls (RPC) from OP-TEE.
|
|
*
|
|
- * Returns return code from secure world, 0 is OK
|
|
+ * Returns return code from secure world, 0 is OK, -EAGAIN means an OCALL
|
|
+ * request was received.
|
|
*/
|
|
-u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg)
|
|
+static u32 optee_do_call_with_ctx(struct optee_call_ctx *call_ctx)
|
|
{
|
|
- struct optee *optee = tee_get_drvdata(ctx->teedev);
|
|
- struct optee_call_waiter w;
|
|
+ struct optee *optee = tee_get_drvdata(call_ctx->ctx->teedev);
|
|
struct optee_rpc_param param = { };
|
|
- struct optee_call_ctx call_ctx = { };
|
|
u32 ret;
|
|
|
|
- param.a0 = OPTEE_SMC_CALL_WITH_ARG;
|
|
- reg_pair_from_64(¶m.a1, ¶m.a2, parg);
|
|
- /* Initialize waiter */
|
|
- optee_cq_wait_init(&optee->call_queue, &w);
|
|
+ if (call_ctx->rpc_shm) {
|
|
+ param.a0 = OPTEE_SMC_CALL_RETURN_FROM_RPC;
|
|
+ reg_pair_from_64(¶m.a1, ¶m.a2,
|
|
+ (uintptr_t)call_ctx->rpc_shm);
|
|
+ param.a3 = call_ctx->thread_id;
|
|
+ } else {
|
|
+ param.a0 = OPTEE_SMC_CALL_WITH_ARG;
|
|
+ reg_pair_from_64(¶m.a1, ¶m.a2, call_ctx->msg_parg);
|
|
+ }
|
|
+
|
|
while (true) {
|
|
struct arm_smccc_res res;
|
|
|
|
@@ -148,36 +311,68 @@ u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg)
|
|
|
|
if (res.a0 == OPTEE_SMC_RETURN_ETHREAD_LIMIT) {
|
|
/*
|
|
- * Out of threads in secure world, wait for a thread
|
|
+ * Out of threads in secure world, wait for a thread to
|
|
* become available.
|
|
*/
|
|
- optee_cq_wait_for_completion(&optee->call_queue, &w);
|
|
+ optee_cq_wait_for_completion(&optee->call_queue,
|
|
+ &call_ctx->waiter);
|
|
} else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) {
|
|
- cond_resched();
|
|
+ if (need_resched())
|
|
+ cond_resched();
|
|
param.a0 = res.a0;
|
|
param.a1 = res.a1;
|
|
param.a2 = res.a2;
|
|
param.a3 = res.a3;
|
|
- optee_handle_rpc(ctx, ¶m, &call_ctx);
|
|
+
|
|
+ if (optee_rpc_is_ocall(¶m, call_ctx))
|
|
+ return -EAGAIN;
|
|
+
|
|
+ optee_handle_rpc(call_ctx->ctx, ¶m, call_ctx);
|
|
} else {
|
|
ret = res.a0;
|
|
break;
|
|
}
|
|
}
|
|
|
|
- optee_rpc_finalize_call(&call_ctx);
|
|
- /*
|
|
- * We're done with our thread in secure world, if there's any
|
|
- * thread waiters wake up one.
|
|
- */
|
|
- optee_cq_wait_final(&optee->call_queue, &w);
|
|
-
|
|
return ret;
|
|
}
|
|
|
|
-static struct tee_shm *get_msg_arg(struct tee_context *ctx, size_t num_params,
|
|
- struct optee_msg_arg **msg_arg,
|
|
- phys_addr_t *msg_parg)
|
|
+/**
|
|
+ * optee_do_call_with_arg() - Invoke OP-TEE in secure world
|
|
+ * @ctx: calling context
|
|
+ * @parg: physical address of message to pass to secure world
|
|
+ *
|
|
+ * Wraps a call to optee_do_call_with_ctx that sets up the calling context on
|
|
+ * behalf of a caller that does not expect OCALLs.
|
|
+ *
|
|
+ * Returns return code from secure world, 0 is OK
|
|
+ */
|
|
+u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg)
|
|
+{
|
|
+ struct optee_call_ctx call_ctx = { };
|
|
+ int rc;
|
|
+
|
|
+ call_ctx.ctx = ctx;
|
|
+ call_ctx.msg_parg = parg;
|
|
+
|
|
+ call_prologue(&call_ctx);
|
|
+
|
|
+ rc = optee_do_call_with_ctx(&call_ctx);
|
|
+ if (rc == -EAGAIN) {
|
|
+ pr_warn("received an unexpected OCALL, cancelling it now");
|
|
+ call_ctx.rpc_arg->ret = TEEC_ERROR_NOT_SUPPORTED;
|
|
+ call_ctx.rpc_arg->ret_origin = TEEC_ORIGIN_COMMS;
|
|
+ optee_do_call_with_ctx(&call_ctx);
|
|
+ }
|
|
+
|
|
+ call_epilogue(&call_ctx);
|
|
+
|
|
+ return rc;
|
|
+}
|
|
+
|
|
+struct tee_shm *optee_get_msg_arg(struct tee_context *ctx, size_t num_params,
|
|
+ struct optee_msg_arg **msg_arg,
|
|
+ phys_addr_t *msg_parg)
|
|
{
|
|
int rc;
|
|
struct tee_shm *shm;
|
|
@@ -210,90 +405,249 @@ static struct tee_shm *get_msg_arg(struct tee_context *ctx, size_t num_params,
|
|
return shm;
|
|
}
|
|
|
|
-int optee_open_session(struct tee_context *ctx,
|
|
- struct tee_ioctl_open_session_arg *arg,
|
|
- struct tee_param *param)
|
|
+/*
|
|
+ * Requires @sem in the parent struct optee_session to be held; the caller is
|
|
+ * expected to have filled in the ret and ret_origin elements of rpc_arg.
|
|
+ */
|
|
+static int cancel_ocall(struct optee_call_ctx *call_ctx)
|
|
{
|
|
- struct optee_context_data *ctxdata = ctx->data;
|
|
int rc;
|
|
+
|
|
+ /* +2 and -2 to skip the OCALL descriptors */
|
|
+ if (call_ctx->rpc_must_release) {
|
|
+ process_ocall_memrefs(call_ctx->rpc_arg->params + 2,
|
|
+ call_ctx->rpc_arg->num_params - 2, false);
|
|
+ call_ctx->rpc_must_release = false;
|
|
+ }
|
|
+
|
|
+ rc = optee_do_call_with_ctx(call_ctx);
|
|
+ if (rc == -EAGAIN)
|
|
+ pr_warn("received an OCALL while cancelling an OCALL");
|
|
+
|
|
+ call_epilogue(call_ctx);
|
|
+
|
|
+ return rc;
|
|
+}
|
|
+
|
|
+static int close_session(struct tee_context *ctx, u32 session)
|
|
+{
|
|
struct tee_shm *shm;
|
|
struct optee_msg_arg *msg_arg;
|
|
phys_addr_t msg_parg;
|
|
- struct optee_session *sess = NULL;
|
|
- uuid_t client_uuid;
|
|
|
|
- /* +2 for the meta parameters added below */
|
|
- shm = get_msg_arg(ctx, arg->num_params + 2, &msg_arg, &msg_parg);
|
|
+ shm = optee_get_msg_arg(ctx, 0, &msg_arg, &msg_parg);
|
|
if (IS_ERR(shm))
|
|
return PTR_ERR(shm);
|
|
|
|
- msg_arg->cmd = OPTEE_MSG_CMD_OPEN_SESSION;
|
|
- msg_arg->cancel_id = arg->cancel_id;
|
|
+ msg_arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION;
|
|
+ msg_arg->session = session;
|
|
+ optee_do_call_with_arg(ctx, msg_parg);
|
|
|
|
- /*
|
|
- * Initialize and add the meta parameters needed when opening a
|
|
- * session.
|
|
- */
|
|
- msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT |
|
|
- OPTEE_MSG_ATTR_META;
|
|
- msg_arg->params[1].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT |
|
|
- OPTEE_MSG_ATTR_META;
|
|
- memcpy(&msg_arg->params[0].u.value, arg->uuid, sizeof(arg->uuid));
|
|
- msg_arg->params[1].u.value.c = arg->clnt_login;
|
|
-
|
|
- rc = tee_session_calc_client_uuid(&client_uuid, arg->clnt_login,
|
|
- arg->clnt_uuid);
|
|
- if (rc)
|
|
- goto out;
|
|
- export_uuid(msg_arg->params[1].u.octets, &client_uuid);
|
|
+ tee_shm_free(shm);
|
|
+ return 0;
|
|
+}
|
|
|
|
- rc = optee_to_msg_param(msg_arg->params + 2, arg->num_params, param);
|
|
- if (rc)
|
|
- goto out;
|
|
+int optee_open_session(struct tee_context *ctx,
|
|
+ struct tee_ioctl_open_session_arg *arg,
|
|
+ struct tee_param *normal_param, u32 num_normal_params,
|
|
+ struct tee_param *ocall_param)
|
|
+{
|
|
+ struct optee_context_data *ctxdata = ctx->data;
|
|
+ struct optee_session *sess = NULL;
|
|
+ struct optee_call_ctx *call_ctx = NULL;
|
|
+ int sess_tmp_id;
|
|
+ u64 ocall_func;
|
|
+ int rc = 0;
|
|
|
|
- sess = kzalloc(sizeof(*sess), GFP_KERNEL);
|
|
- if (!sess) {
|
|
- rc = -ENOMEM;
|
|
- goto out;
|
|
- }
|
|
+ if (ocall_param && !ctx->cap_ocall)
|
|
+ return -EOPNOTSUPP;
|
|
|
|
- if (optee_do_call_with_arg(ctx, msg_parg)) {
|
|
- msg_arg->ret = TEEC_ERROR_COMMUNICATION;
|
|
- msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
|
|
- }
|
|
+ ocall_func = ocall_param ? param_get_ocall_func(ocall_param) : 0;
|
|
+ if (ocall_func) {
|
|
+ if (arg->session > INT_MAX)
|
|
+ return -EINVAL;
|
|
|
|
- if (msg_arg->ret == TEEC_SUCCESS) {
|
|
- /* A new session has been created, add it to the list. */
|
|
- sess->session_id = msg_arg->session;
|
|
+ sess_tmp_id = (int)arg->session;
|
|
mutex_lock(&ctxdata->mutex);
|
|
- list_add(&sess->list_node, &ctxdata->sess_list);
|
|
+ sess = idr_remove(&ctxdata->tmp_sess_list, sess_tmp_id);
|
|
mutex_unlock(&ctxdata->mutex);
|
|
+ if (!sess)
|
|
+ return -EINVAL;
|
|
+
|
|
+ call_ctx = &sess->call_ctx;
|
|
+ if (!call_ctx->rpc_shm) {
|
|
+ rc = -EINVAL;
|
|
+ call_ctx->rpc_arg->ret = TEEC_ERROR_BAD_PARAMETERS;
|
|
+ call_ctx->rpc_arg->ret_origin = TEEC_ORIGIN_COMMS;
|
|
+ goto exit_cancel;
|
|
+ }
|
|
+
|
|
+ rc = process_ocall_reply(arg->ret, arg->ret_origin,
|
|
+ normal_param, num_normal_params,
|
|
+ ocall_param, call_ctx);
|
|
+ if (rc)
|
|
+ goto exit_cancel;
|
|
} else {
|
|
- kfree(sess);
|
|
+ sess = kzalloc(sizeof(*sess), GFP_KERNEL);
|
|
+ if (!sess)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ call_ctx = &sess->call_ctx;
|
|
+ /* +2 for the meta parameters added below */
|
|
+ call_ctx->msg_shm = optee_get_msg_arg(ctx,
|
|
+ num_normal_params + 2,
|
|
+ &call_ctx->msg_arg,
|
|
+ &call_ctx->msg_parg);
|
|
+ if (IS_ERR(call_ctx->msg_shm)) {
|
|
+ rc = PTR_ERR(call_ctx->msg_shm);
|
|
+ goto exit_free;
|
|
+ }
|
|
+
|
|
+ call_ctx->ctx = ctx;
|
|
+ call_ctx->msg_arg->cmd = OPTEE_MSG_CMD_OPEN_SESSION;
|
|
+ call_ctx->msg_arg->cancel_id = arg->cancel_id;
|
|
+
|
|
+ /*
|
|
+ * Initialize and add the meta parameters needed when opening a
|
|
+ * session.
|
|
+ */
|
|
+ call_ctx->msg_arg->params[0].attr =
|
|
+ OPTEE_MSG_ATTR_TYPE_VALUE_INPUT | OPTEE_MSG_ATTR_META;
|
|
+ call_ctx->msg_arg->params[1].attr =
|
|
+ OPTEE_MSG_ATTR_TYPE_VALUE_INPUT | OPTEE_MSG_ATTR_META;
|
|
+ memcpy(&call_ctx->msg_arg->params[0].u.value, arg->uuid,
|
|
+ sizeof(arg->uuid));
|
|
+ call_ctx->msg_arg->params[1].u.value.c = arg->clnt_login;
|
|
+ rc = tee_session_calc_client_uuid((uuid_t *)
|
|
+ &call_ctx->msg_arg->params[1].u.value,
|
|
+ arg->clnt_login, arg->clnt_uuid);
|
|
+ if (rc)
|
|
+ goto exit_free_shm;
|
|
+
|
|
+ rc = optee_to_msg_param(call_ctx->msg_arg->params + 2,
|
|
+ num_normal_params, normal_param);
|
|
+ if (rc)
|
|
+ goto exit_free_shm;
|
|
+
|
|
+ call_prologue(call_ctx);
|
|
}
|
|
|
|
- if (optee_from_msg_param(param, arg->num_params, msg_arg->params + 2)) {
|
|
- arg->ret = TEEC_ERROR_COMMUNICATION;
|
|
- arg->ret_origin = TEEC_ORIGIN_COMMS;
|
|
- /* Close session again to avoid leakage */
|
|
- optee_close_session(ctx, msg_arg->session);
|
|
+ rc = optee_do_call_with_ctx(call_ctx);
|
|
+ if (rc == -EAGAIN) {
|
|
+ rc = process_ocall_request(normal_param, num_normal_params,
|
|
+ ocall_param, call_ctx);
|
|
+ if (rc)
|
|
+ goto exit_cancel;
|
|
+
|
|
+ /*
|
|
+ * 'sess' becomes globally visible after adding it to the IDR,
|
|
+ * so do not touch it once the mutex is unlocked.
|
|
+ */
|
|
+ mutex_lock(&ctxdata->mutex);
|
|
+ sess_tmp_id = idr_alloc(&ctxdata->tmp_sess_list, sess, 1, 0,
|
|
+ GFP_KERNEL);
|
|
+ if (sess_tmp_id >= 1)
|
|
+ sess->session_id = sess_tmp_id;
|
|
+ mutex_unlock(&ctxdata->mutex);
|
|
+ if (sess_tmp_id < 0) {
|
|
+ rc = sess_tmp_id;
|
|
+ call_ctx->rpc_arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
|
|
+ call_ctx->rpc_arg->ret_origin = TEEC_ORIGIN_COMMS;
|
|
+ goto exit_cancel;
|
|
+ }
|
|
+
|
|
+ arg->session = sess_tmp_id;
|
|
} else {
|
|
- arg->session = msg_arg->session;
|
|
- arg->ret = msg_arg->ret;
|
|
- arg->ret_origin = msg_arg->ret_origin;
|
|
+ call_epilogue(call_ctx);
|
|
+
|
|
+ if (rc) {
|
|
+ arg->ret = TEEC_ERROR_COMMUNICATION;
|
|
+ arg->ret_origin = TEEC_ORIGIN_COMMS;
|
|
+ } else {
|
|
+ arg->ret = call_ctx->msg_arg->ret;
|
|
+ arg->ret_origin = call_ctx->msg_arg->ret_origin;
|
|
+ }
|
|
+
|
|
+ if (optee_from_msg_param(normal_param, num_normal_params,
|
|
+ call_ctx->msg_arg->params + 2)) {
|
|
+ if (arg->ret == TEEC_SUCCESS)
|
|
+ close_session(ctx, call_ctx->msg_arg->session);
|
|
+
|
|
+ arg->ret = TEEC_ERROR_COMMUNICATION;
|
|
+ arg->ret_origin = TEEC_ORIGIN_COMMS;
|
|
+ }
|
|
+
|
|
+ if (arg->ret)
|
|
+ goto exit_clear_free_all;
|
|
+
|
|
+ /*
|
|
+ * A new session has been created, initialize it and add it to
|
|
+ * the list.
|
|
+ */
|
|
+ sema_init(&sess->sem, 1);
|
|
+ arg->session = call_ctx->msg_arg->session;
|
|
+ sess->session_id = call_ctx->msg_arg->session;
|
|
+
|
|
+ tee_shm_free(call_ctx->msg_shm);
|
|
+ clear_call_ctx(call_ctx);
|
|
+
|
|
+ mutex_lock(&ctxdata->mutex);
|
|
+ list_add(&sess->list_node, &ctxdata->sess_list);
|
|
+ mutex_unlock(&ctxdata->mutex);
|
|
+
|
|
+ param_clear_ocall(ocall_param);
|
|
}
|
|
-out:
|
|
- tee_shm_free(shm);
|
|
|
|
return rc;
|
|
+
|
|
+exit_cancel:
|
|
+ /* See comment in optee_cancel_open_session_ocall */
|
|
+ if (cancel_ocall(call_ctx) == 0 &&
|
|
+ call_ctx->msg_arg->ret == TEEC_SUCCESS)
|
|
+ close_session(ctx, call_ctx->msg_arg->session);
|
|
+ optee_from_msg_param(normal_param, num_normal_params,
|
|
+ call_ctx->msg_arg->params);
|
|
+exit_clear_free_all:
|
|
+ param_clear_ocall(ocall_param);
|
|
+exit_free_shm:
|
|
+ tee_shm_free(call_ctx->msg_shm);
|
|
+exit_free:
|
|
+ kfree(sess);
|
|
+ return rc;
|
|
+}
|
|
+
|
|
+void optee_cancel_open_session_ocall(struct optee_session *sess)
|
|
+{
|
|
+ struct optee_call_ctx *call_ctx = &sess->call_ctx;
|
|
+
|
|
+ call_ctx->rpc_arg->ret = TEEC_ERROR_TARGET_DEAD;
|
|
+ call_ctx->rpc_arg->ret_origin = TEEC_ORIGIN_COMMS;
|
|
+
|
|
+ /*
|
|
+ * Reaching this function means an OCALL is pending during session open
|
|
+ * but the CA has terminated abnormally. As such, the OCALL is
|
|
+ * cancelled. However, there is a chance that the TA's session open
|
|
+ * handler ignores the cancellation and lets the session open anyway. If
|
|
+ * that happens, close it.
|
|
+ */
|
|
+ if (cancel_ocall(&sess->call_ctx) == 0 &&
|
|
+ call_ctx->msg_arg->ret == TEEC_SUCCESS)
|
|
+ close_session(call_ctx->ctx, call_ctx->msg_arg->session);
|
|
+
|
|
+ /*
|
|
+ * Decrease the ref count on all shared memory pointers passed into the
|
|
+ * original function invocation.
|
|
+ */
|
|
+ process_ocall_memrefs(call_ctx->msg_arg->params,
|
|
+ call_ctx->msg_arg->num_params, false);
|
|
+
|
|
+ tee_shm_free(call_ctx->msg_shm);
|
|
+ kfree(sess);
|
|
}
|
|
|
|
int optee_close_session(struct tee_context *ctx, u32 session)
|
|
{
|
|
struct optee_context_data *ctxdata = ctx->data;
|
|
- struct tee_shm *shm;
|
|
- struct optee_msg_arg *msg_arg;
|
|
- phys_addr_t msg_parg;
|
|
struct optee_session *sess;
|
|
|
|
/* Check that the session is valid and remove it from the list */
|
|
@@ -304,66 +658,162 @@ int optee_close_session(struct tee_context *ctx, u32 session)
|
|
mutex_unlock(&ctxdata->mutex);
|
|
if (!sess)
|
|
return -EINVAL;
|
|
- kfree(sess);
|
|
|
|
- shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg);
|
|
- if (IS_ERR(shm))
|
|
- return PTR_ERR(shm);
|
|
+ /*
|
|
+ * If another thread found the session before we removed it from the
|
|
+ * list and that thread is operating on the session object itself, wait
|
|
+ * until it is done before we destroy it.
|
|
+ */
|
|
+ down(&sess->sem);
|
|
|
|
- msg_arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION;
|
|
- msg_arg->session = session;
|
|
- optee_do_call_with_arg(ctx, msg_parg);
|
|
+ if (sess->call_ctx.rpc_shm)
|
|
+ optee_cancel_invoke_function_ocall(&sess->call_ctx);
|
|
+
|
|
+ kfree(sess);
|
|
+ close_session(ctx, session);
|
|
|
|
- tee_shm_free(shm);
|
|
return 0;
|
|
}
|
|
|
|
int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
|
|
- struct tee_param *param)
|
|
+ struct tee_param *normal_param, u32 num_normal_params,
|
|
+ struct tee_param *ocall_param)
|
|
{
|
|
struct optee_context_data *ctxdata = ctx->data;
|
|
- struct tee_shm *shm;
|
|
- struct optee_msg_arg *msg_arg;
|
|
- phys_addr_t msg_parg;
|
|
+ struct optee_call_ctx *call_ctx;
|
|
struct optee_session *sess;
|
|
- int rc;
|
|
+ u64 ocall_func;
|
|
+ int rc = 0;
|
|
+
|
|
+ if (ocall_param && !ctx->cap_ocall) {
|
|
+ rc = -EOPNOTSUPP;
|
|
+ goto exit;
|
|
+ }
|
|
|
|
/* Check that the session is valid */
|
|
mutex_lock(&ctxdata->mutex);
|
|
sess = find_session(ctxdata, arg->session);
|
|
+ if (sess)
|
|
+ down(&sess->sem);
|
|
mutex_unlock(&ctxdata->mutex);
|
|
if (!sess)
|
|
return -EINVAL;
|
|
|
|
- shm = get_msg_arg(ctx, arg->num_params, &msg_arg, &msg_parg);
|
|
- if (IS_ERR(shm))
|
|
- return PTR_ERR(shm);
|
|
- msg_arg->cmd = OPTEE_MSG_CMD_INVOKE_COMMAND;
|
|
- msg_arg->func = arg->func;
|
|
- msg_arg->session = arg->session;
|
|
- msg_arg->cancel_id = arg->cancel_id;
|
|
+ call_ctx = &sess->call_ctx;
|
|
+ ocall_func = ocall_param ? param_get_ocall_func(ocall_param) : 0;
|
|
+ if (ocall_func) {
|
|
+ /* The current call is a reply to an OCALL request */
|
|
|
|
- rc = optee_to_msg_param(msg_arg->params, arg->num_params, param);
|
|
- if (rc)
|
|
- goto out;
|
|
+ if (!call_ctx->rpc_shm) {
|
|
+ rc = -EINVAL;
|
|
+ goto exit;
|
|
+ }
|
|
+
|
|
+ rc = process_ocall_reply(arg->ret, arg->ret_origin,
|
|
+ normal_param, num_normal_params,
|
|
+ ocall_param, call_ctx);
|
|
+ if (rc)
|
|
+ goto exit_cancel;
|
|
+ } else {
|
|
+ /*
|
|
+ * The current call is an invocation that may result in an OCALL
|
|
+ * request.
|
|
+ */
|
|
+
|
|
+ if (call_ctx->rpc_shm) {
|
|
+ rc = -EINVAL;
|
|
+ call_ctx->rpc_arg->ret = TEEC_ERROR_BAD_PARAMETERS;
|
|
+ call_ctx->rpc_arg->ret_origin = TEEC_ORIGIN_COMMS;
|
|
+ goto exit_cancel;
|
|
+ }
|
|
|
|
- if (optee_do_call_with_arg(ctx, msg_parg)) {
|
|
- msg_arg->ret = TEEC_ERROR_COMMUNICATION;
|
|
- msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
|
|
+ call_ctx->msg_shm = optee_get_msg_arg(ctx, num_normal_params,
|
|
+ &call_ctx->msg_arg,
|
|
+ &call_ctx->msg_parg);
|
|
+ if (IS_ERR(call_ctx->msg_shm)) {
|
|
+ rc = PTR_ERR(call_ctx->msg_shm);
|
|
+ goto exit_clear;
|
|
+ }
|
|
+
|
|
+ call_ctx->ctx = ctx;
|
|
+ call_ctx->msg_arg->cmd = OPTEE_MSG_CMD_INVOKE_COMMAND;
|
|
+ call_ctx->msg_arg->func = arg->func;
|
|
+ call_ctx->msg_arg->session = arg->session;
|
|
+ call_ctx->msg_arg->cancel_id = arg->cancel_id;
|
|
+
|
|
+ rc = optee_to_msg_param(call_ctx->msg_arg->params,
|
|
+ num_normal_params, normal_param);
|
|
+ if (rc) {
|
|
+ tee_shm_free(call_ctx->msg_shm);
|
|
+ goto exit_clear;
|
|
+ }
|
|
+
|
|
+ call_prologue(call_ctx);
|
|
}
|
|
|
|
- if (optee_from_msg_param(param, arg->num_params, msg_arg->params)) {
|
|
- msg_arg->ret = TEEC_ERROR_COMMUNICATION;
|
|
- msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
|
|
+ rc = optee_do_call_with_ctx(call_ctx);
|
|
+ if (rc == -EAGAIN) {
|
|
+ rc = process_ocall_request(normal_param, num_normal_params,
|
|
+ ocall_param, call_ctx);
|
|
+ if (rc)
|
|
+ goto exit_cancel;
|
|
+ } else {
|
|
+ call_epilogue(call_ctx);
|
|
+
|
|
+ arg->ret = call_ctx->msg_arg->ret;
|
|
+ arg->ret_origin = call_ctx->msg_arg->ret_origin;
|
|
+
|
|
+ if (rc) {
|
|
+ arg->ret = TEEC_ERROR_COMMUNICATION;
|
|
+ arg->ret_origin = TEEC_ORIGIN_COMMS;
|
|
+ }
|
|
+
|
|
+ if (optee_from_msg_param(normal_param, num_normal_params,
|
|
+ call_ctx->msg_arg->params)) {
|
|
+ arg->ret = TEEC_ERROR_COMMUNICATION;
|
|
+ arg->ret_origin = TEEC_ORIGIN_COMMS;
|
|
+ }
|
|
+
|
|
+ tee_shm_free(call_ctx->msg_shm);
|
|
+ clear_call_ctx(call_ctx);
|
|
+ param_clear_ocall(ocall_param);
|
|
}
|
|
|
|
- arg->ret = msg_arg->ret;
|
|
- arg->ret_origin = msg_arg->ret_origin;
|
|
-out:
|
|
- tee_shm_free(shm);
|
|
+ up(&sess->sem);
|
|
+ return rc;
|
|
+
|
|
+exit_cancel:
|
|
+ cancel_ocall(call_ctx);
|
|
+ optee_from_msg_param(normal_param, num_normal_params,
|
|
+ call_ctx->msg_arg->params);
|
|
+ tee_shm_free(call_ctx->msg_shm);
|
|
+ param_clear_ocall(ocall_param);
|
|
+exit_clear:
|
|
+ clear_call_ctx(call_ctx);
|
|
+exit:
|
|
+ up(&sess->sem);
|
|
return rc;
|
|
}
|
|
|
|
+/* Requires @sem in the parent struct optee_session to be held */
|
|
+void optee_cancel_invoke_function_ocall(struct optee_call_ctx *call_ctx)
|
|
+{
|
|
+ call_ctx->rpc_arg->ret = TEEC_ERROR_TARGET_DEAD;
|
|
+ call_ctx->rpc_arg->ret_origin = TEEC_ORIGIN_COMMS;
|
|
+
|
|
+ cancel_ocall(call_ctx);
|
|
+
|
|
+ /*
|
|
+ * Decrease the ref count on all shared memory pointers passed into the
|
|
+ * original function invocation.
|
|
+ */
|
|
+ process_ocall_memrefs(call_ctx->msg_arg->params,
|
|
+ call_ctx->msg_arg->num_params, false);
|
|
+
|
|
+ tee_shm_free(call_ctx->msg_shm);
|
|
+ clear_call_ctx(call_ctx);
|
|
+}
|
|
+
|
|
int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session)
|
|
{
|
|
struct optee_context_data *ctxdata = ctx->data;
|
|
@@ -379,7 +829,7 @@ int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session)
|
|
if (!sess)
|
|
return -EINVAL;
|
|
|
|
- shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg);
|
|
+ shm = optee_get_msg_arg(ctx, 0, &msg_arg, &msg_parg);
|
|
if (IS_ERR(shm))
|
|
return PTR_ERR(shm);
|
|
|
|
@@ -632,7 +1082,7 @@ int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
|
|
if (!pages_list)
|
|
return -ENOMEM;
|
|
|
|
- shm_arg = get_msg_arg(ctx, 1, &msg_arg, &msg_parg);
|
|
+ shm_arg = optee_get_msg_arg(ctx, 1, &msg_arg, &msg_parg);
|
|
if (IS_ERR(shm_arg)) {
|
|
rc = PTR_ERR(shm_arg);
|
|
goto out;
|
|
@@ -670,7 +1120,7 @@ int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm)
|
|
phys_addr_t msg_parg;
|
|
int rc = 0;
|
|
|
|
- shm_arg = get_msg_arg(ctx, 1, &msg_arg, &msg_parg);
|
|
+ shm_arg = optee_get_msg_arg(ctx, 1, &msg_arg, &msg_parg);
|
|
if (IS_ERR(shm_arg))
|
|
return PTR_ERR(shm_arg);
|
|
|
|
diff --git a/drivers/tee/optee/call_queue.c b/drivers/tee/optee/call_queue.c
|
|
new file mode 100644
|
|
index 000000000000..70922c040fc6
|
|
--- /dev/null
|
|
+++ b/drivers/tee/optee/call_queue.c
|
|
@@ -0,0 +1,86 @@
|
|
+// SPDX-License-Identifier: GPL-2.0-only
|
|
+/*
|
|
+ * Copyright (c) 2015, Linaro Limited
|
|
+ */
|
|
+
|
|
+#include "optee_private.h"
|
|
+
|
|
+void optee_cq_wait_init(struct optee_call_queue *cq,
|
|
+ struct optee_call_waiter *w)
|
|
+{
|
|
+ /*
|
|
+ * We're preparing to make a call to secure world. In case we can't
|
|
+ * allocate a thread in secure world we'll end up waiting in
|
|
+ * optee_cq_wait_for_completion().
|
|
+ *
|
|
+ * Normally if there's no contention in secure world the call will
|
|
+ * complete and we can cleanup directly with optee_cq_wait_final().
|
|
+ */
|
|
+ mutex_lock(&cq->mutex);
|
|
+
|
|
+ /*
|
|
+ * We add ourselves to the queue, but we don't wait. This
|
|
+ * guarantees that we don't lose a completion if secure world
|
|
+ * returns busy and another thread just exited and try to complete
|
|
+ * someone.
|
|
+ */
|
|
+ init_completion(&w->c);
|
|
+ list_add_tail(&w->list_node, &cq->waiters);
|
|
+
|
|
+ mutex_unlock(&cq->mutex);
|
|
+}
|
|
+
|
|
+void optee_cq_wait_for_completion(struct optee_call_queue *cq,
|
|
+ struct optee_call_waiter *w)
|
|
+{
|
|
+ wait_for_completion(&w->c);
|
|
+
|
|
+ mutex_lock(&cq->mutex);
|
|
+
|
|
+ /* Move to end of list to get out of the way for other waiters */
|
|
+ list_del(&w->list_node);
|
|
+ reinit_completion(&w->c);
|
|
+ list_add_tail(&w->list_node, &cq->waiters);
|
|
+
|
|
+ mutex_unlock(&cq->mutex);
|
|
+}
|
|
+
|
|
+void optee_cq_complete_one(struct optee_call_queue *cq)
|
|
+{
|
|
+ struct optee_call_waiter *w;
|
|
+
|
|
+ list_for_each_entry(w, &cq->waiters, list_node) {
|
|
+ if (!completion_done(&w->c)) {
|
|
+ complete(&w->c);
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+void optee_cq_wait_final(struct optee_call_queue *cq,
|
|
+ struct optee_call_waiter *w)
|
|
+{
|
|
+ /*
|
|
+ * We're done with the call to secure world. The thread in secure
|
|
+ * world that was used for this call is now available for some
|
|
+ * other task to use.
|
|
+ */
|
|
+ mutex_lock(&cq->mutex);
|
|
+
|
|
+ /* Get out of the list */
|
|
+ list_del(&w->list_node);
|
|
+
|
|
+ /* Wake up one eventual waiting task */
|
|
+ optee_cq_complete_one(cq);
|
|
+
|
|
+ /*
|
|
+ * If we're completed we've got a completion from another task that
|
|
+ * was just done with its call to secure world. Since yet another
|
|
+ * thread now is available in secure world wake up another eventual
|
|
+ * waiting task.
|
|
+ */
|
|
+ if (completion_done(&w->c))
|
|
+ optee_cq_complete_one(cq);
|
|
+
|
|
+ mutex_unlock(&cq->mutex);
|
|
+}
|
|
diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
|
|
index 50c0d839fe75..ab3e5d65e946 100644
|
|
--- a/drivers/tee/optee/core.c
|
|
+++ b/drivers/tee/optee/core.c
|
|
@@ -8,17 +8,22 @@
|
|
#include <linux/arm-smccc.h>
|
|
#include <linux/crash_dump.h>
|
|
#include <linux/errno.h>
|
|
+#include <linux/interrupt.h>
|
|
#include <linux/io.h>
|
|
+#include <linux/irqdomain.h>
|
|
#include <linux/module.h>
|
|
#include <linux/of.h>
|
|
+#include <linux/of_irq.h>
|
|
#include <linux/of_platform.h>
|
|
#include <linux/platform_device.h>
|
|
+#include <linux/sched.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/string.h>
|
|
#include <linux/tee_drv.h>
|
|
#include <linux/types.h>
|
|
#include <linux/uaccess.h>
|
|
#include <linux/workqueue.h>
|
|
+#include <linux/xarray.h>
|
|
#include "optee_private.h"
|
|
#include "optee_smc.h"
|
|
#include "shm_pool.h"
|
|
@@ -209,6 +214,8 @@ static void optee_get_version(struct tee_device *teedev,
|
|
v.gen_caps |= TEE_GEN_CAP_REG_MEM;
|
|
if (optee->sec_caps & OPTEE_SMC_SEC_CAP_MEMREF_NULL)
|
|
v.gen_caps |= TEE_GEN_CAP_MEMREF_NULL;
|
|
+ if (optee->sec_caps & OPTEE_SMC_SEC_CAP_OCALL)
|
|
+ v.gen_caps |= TEE_GEN_CAP_OCALL;
|
|
*vers = v;
|
|
}
|
|
|
|
@@ -254,11 +261,10 @@ static int optee_open(struct tee_context *ctx)
|
|
}
|
|
mutex_init(&ctxdata->mutex);
|
|
INIT_LIST_HEAD(&ctxdata->sess_list);
|
|
+ idr_init(&ctxdata->tmp_sess_list);
|
|
|
|
- if (optee->sec_caps & OPTEE_SMC_SEC_CAP_MEMREF_NULL)
|
|
- ctx->cap_memref_null = true;
|
|
- else
|
|
- ctx->cap_memref_null = false;
|
|
+ ctx->cap_memref_null = optee->sec_caps & OPTEE_SMC_SEC_CAP_MEMREF_NULL;
|
|
+ ctx->cap_ocall = optee->sec_caps & OPTEE_SMC_SEC_CAP_OCALL;
|
|
|
|
ctx->data = ctxdata;
|
|
return 0;
|
|
@@ -304,6 +310,7 @@ static void optee_release(struct tee_context *ctx)
|
|
}
|
|
kfree(sess);
|
|
}
|
|
+ idr_destroy(&ctxdata->tmp_sess_list);
|
|
kfree(ctxdata);
|
|
|
|
if (!IS_ERR(shm))
|
|
@@ -355,6 +362,345 @@ static const struct tee_desc optee_supp_desc = {
|
|
.flags = TEE_DESC_PRIVILEGED,
|
|
};
|
|
|
|
+static int simple_call_with_arg(struct tee_context *ctx, u32 cmd)
|
|
+{
|
|
+ struct optee_msg_arg *msg_arg;
|
|
+ struct tee_shm *shm;
|
|
+ phys_addr_t msg_parg;
|
|
+
|
|
+ shm = optee_get_msg_arg(ctx, 0, &msg_arg, &msg_parg);
|
|
+ if (IS_ERR(shm))
|
|
+ return PTR_ERR(shm);
|
|
+
|
|
+ msg_arg->cmd = cmd;
|
|
+ optee_do_call_with_arg(ctx, msg_parg);
|
|
+
|
|
+ tee_shm_free(shm);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static u32 get_it_value(optee_invoke_fn *invoke_fn, bool *value_valid,
|
|
+ bool *value_pending)
|
|
+{
|
|
+ struct arm_smccc_res res;
|
|
+
|
|
+ invoke_fn(OPTEE_SMC_GET_IT_NOTIF_VALUE, 0, 0, 0, 0, 0, 0, 0, &res);
|
|
+
|
|
+ if (res.a0)
|
|
+ return 0;
|
|
+ *value_valid = (res.a2 & OPTEE_SMC_IT_NOTIF_VALUE_VALID);
|
|
+ *value_pending = (res.a2 & OPTEE_SMC_IT_NOTIF_VALUE_PENDING);
|
|
+ return res.a1;
|
|
+}
|
|
+
|
|
+static u32 set_it_mask(optee_invoke_fn *invoke_fn, u32 it_value, bool mask)
|
|
+{
|
|
+ struct arm_smccc_res res;
|
|
+
|
|
+ invoke_fn(OPTEE_SMC_SET_IT_NOTIF_MASK, it_value, mask, 0, 0, 0, 0, 0, &res);
|
|
+
|
|
+ if (res.a0)
|
|
+ return 0;
|
|
+
|
|
+ return res.a1;
|
|
+}
|
|
+
|
|
+static int handle_optee_it(struct optee *optee)
|
|
+{
|
|
+ bool value_valid;
|
|
+ bool value_pending;
|
|
+ u32 it;
|
|
+
|
|
+ do {
|
|
+ struct irq_desc *desc;
|
|
+
|
|
+ it = get_it_value(optee->invoke_fn, &value_valid,
|
|
+ &value_pending);
|
|
+ if (!value_valid)
|
|
+ break;
|
|
+
|
|
+ desc = irq_to_desc(irq_find_mapping(optee->domain, it));
|
|
+ if (!desc) {
|
|
+ pr_err("no desc for optee IT:%d\n", it);
|
|
+ return -EIO;
|
|
+ }
|
|
+
|
|
+ handle_simple_irq(desc);
|
|
+
|
|
+ } while (value_pending);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void optee_it_irq_mask(struct irq_data *d)
|
|
+{
|
|
+ struct optee *optee = d->domain->host_data;
|
|
+
|
|
+ set_it_mask(optee->invoke_fn, d->hwirq, true);
|
|
+}
|
|
+
|
|
+static void optee_it_irq_unmask(struct irq_data *d)
|
|
+{
|
|
+ struct optee *optee = d->domain->host_data;
|
|
+
|
|
+ set_it_mask(optee->invoke_fn, d->hwirq, false);
|
|
+}
|
|
+
|
|
+static struct irq_chip optee_it_irq_chip = {
|
|
+ .name = "optee-it",
|
|
+ .irq_disable = optee_it_irq_mask,
|
|
+ .irq_enable = optee_it_irq_unmask,
|
|
+ .flags = IRQCHIP_SKIP_SET_WAKE,
|
|
+};
|
|
+
|
|
+static int optee_it_alloc(struct irq_domain *d, unsigned int virq,
|
|
+ unsigned int nr_irqs, void *data)
|
|
+{
|
|
+ struct irq_fwspec *fwspec = data;
|
|
+ irq_hw_number_t hwirq;
|
|
+
|
|
+ hwirq = fwspec->param[0];
|
|
+
|
|
+ irq_domain_set_hwirq_and_chip(d, virq, hwirq, &optee_it_irq_chip, d->host_data);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static const struct irq_domain_ops optee_it_irq_domain_ops = {
|
|
+ .alloc = optee_it_alloc,
|
|
+ .free = irq_domain_free_irqs_common,
|
|
+};
|
|
+
|
|
+static int optee_irq_domain_init(struct platform_device *pdev, struct optee *optee)
|
|
+{
|
|
+ struct device *dev = &pdev->dev;
|
|
+ struct device_node *np = dev->of_node;
|
|
+
|
|
+ optee->domain = irq_domain_add_linear(np, OPTEE_MAX_IT,
|
|
+ &optee_it_irq_domain_ops,
|
|
+ optee);
|
|
+ if (!optee->domain) {
|
|
+ pr_err("Unable to add irq domain!\n");
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void optee_irq_domain_uninit(struct optee *optee)
|
|
+{
|
|
+ irq_domain_remove(optee->domain);
|
|
+}
|
|
+
|
|
+static int optee_smc_do_bottom_half(struct tee_context *ctx)
|
|
+{
|
|
+ return simple_call_with_arg(ctx, OPTEE_MSG_CMD_DO_BOTTOM_HALF);
|
|
+}
|
|
+
|
|
+static int optee_smc_stop_async_notif(struct tee_context *ctx)
|
|
+{
|
|
+ return simple_call_with_arg(ctx, OPTEE_MSG_CMD_STOP_ASYNC_NOTIF);
|
|
+}
|
|
+
|
|
+static u32 get_async_notif_value(optee_invoke_fn *invoke_fn, bool *value_valid,
|
|
+ bool *value_pending)
|
|
+{
|
|
+ struct arm_smccc_res res;
|
|
+
|
|
+ invoke_fn(OPTEE_SMC_GET_ASYNC_NOTIF_VALUE, 0, 0, 0, 0, 0, 0, 0, &res);
|
|
+
|
|
+ if (res.a0)
|
|
+ return 0;
|
|
+ *value_valid = (res.a2 & OPTEE_SMC_ASYNC_NOTIF_VALUE_VALID);
|
|
+ *value_pending = (res.a2 & OPTEE_SMC_ASYNC_NOTIF_VALUE_PENDING);
|
|
+ return res.a1;
|
|
+}
|
|
+
|
|
+static irqreturn_t notif_irq_handler(int irq, void *dev_id)
|
|
+{
|
|
+ struct optee_pcpu *optee_pcpu;
|
|
+ struct optee *optee;
|
|
+ bool do_bottom_half = false;
|
|
+ bool value_valid;
|
|
+ bool value_pending;
|
|
+ u32 value;
|
|
+
|
|
+ if (irq_is_percpu_devid(irq)) {
|
|
+ optee_pcpu = (struct optee_pcpu *)dev_id;
|
|
+ optee = optee_pcpu->optee;
|
|
+ } else {
|
|
+ optee = dev_id;
|
|
+ }
|
|
+
|
|
+ do {
|
|
+ value = get_async_notif_value(optee->invoke_fn,
|
|
+ &value_valid, &value_pending);
|
|
+ if (!value_valid)
|
|
+ break;
|
|
+
|
|
+ if (value == OPTEE_SMC_ASYNC_NOTIF_VALUE_DO_BOTTOM_HALF)
|
|
+ do_bottom_half = true;
|
|
+ else if (value == OPTEE_SMC_ASYNC_NOTIF_VALUE_DO_IT)
|
|
+ handle_optee_it(optee);
|
|
+ else
|
|
+ optee_notif_send(optee, value);
|
|
+ } while (value_pending);
|
|
+
|
|
+ if (do_bottom_half) {
|
|
+ if (irq_is_percpu_devid(irq))
|
|
+ queue_work(optee->notif_pcpu_wq, &optee->notif_pcpu_work);
|
|
+ else
|
|
+ return IRQ_WAKE_THREAD;
|
|
+ }
|
|
+
|
|
+ return IRQ_HANDLED;
|
|
+}
|
|
+
|
|
+static irqreturn_t notif_irq_thread_fn(int irq, void *dev_id)
|
|
+{
|
|
+ struct optee *optee = dev_id;
|
|
+
|
|
+ optee_smc_do_bottom_half(optee->notif.ctx);
|
|
+
|
|
+ return IRQ_HANDLED;
|
|
+}
|
|
+
|
|
+static void optee_pcpu_notif(struct work_struct *work)
|
|
+{
|
|
+ struct optee *optee = container_of(work, struct optee, notif_pcpu_work);
|
|
+
|
|
+ optee_smc_do_bottom_half(optee->notif.ctx);
|
|
+}
|
|
+
|
|
+static int optee_smc_notif_init_irq(struct optee *optee, u_int irq)
|
|
+{
|
|
+ struct tee_context *ctx;
|
|
+ int rc;
|
|
+
|
|
+ ctx = teedev_open(optee->teedev);
|
|
+ if (IS_ERR(ctx))
|
|
+ return PTR_ERR(ctx);
|
|
+
|
|
+ optee->notif.ctx = ctx;
|
|
+ rc = request_threaded_irq(irq, notif_irq_handler,
|
|
+ notif_irq_thread_fn,
|
|
+ 0, "optee_notification", optee);
|
|
+ if (rc)
|
|
+ goto err_close_ctx;
|
|
+
|
|
+ optee->notif_irq = irq;
|
|
+
|
|
+ return 0;
|
|
+
|
|
+err_close_ctx:
|
|
+ teedev_close_context(optee->notif.ctx);
|
|
+ optee->notif.ctx = NULL;
|
|
+
|
|
+ return rc;
|
|
+}
|
|
+
|
|
+static int optee_smc_notif_pcpu_init_irq(struct optee *optee, u_int irq)
|
|
+{
|
|
+ struct optee_pcpu *optee_pcpu;
|
|
+ struct tee_context *ctx;
|
|
+ spinlock_t lock;
|
|
+ int cpu;
|
|
+ int rc;
|
|
+
|
|
+ /* Alloc per-cpu port structure */
|
|
+ optee_pcpu = alloc_percpu(struct optee_pcpu);
|
|
+ if (!optee_pcpu)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ for_each_present_cpu(cpu) {
|
|
+ struct optee_pcpu *p = per_cpu_ptr(optee_pcpu, cpu);
|
|
+
|
|
+ p->optee = optee;
|
|
+ }
|
|
+
|
|
+ ctx = teedev_open(optee->teedev);
|
|
+ if (IS_ERR(ctx)) {
|
|
+ rc = PTR_ERR(ctx);
|
|
+ goto err_free_pcpu;
|
|
+ }
|
|
+
|
|
+ optee->notif.ctx = ctx;
|
|
+
|
|
+ rc = request_percpu_irq(irq, notif_irq_handler, "optee_pcpu_notification",
|
|
+ optee_pcpu);
|
|
+ if (rc) {
|
|
+ rc = PTR_ERR(ctx);
|
|
+ goto err_close_ctx;
|
|
+ }
|
|
+
|
|
+ spin_lock_init(&lock);
|
|
+
|
|
+ spin_lock(&lock);
|
|
+ enable_percpu_irq(irq, 0);
|
|
+ spin_unlock(&lock);
|
|
+
|
|
+ INIT_WORK(&optee->notif_pcpu_work, optee_pcpu_notif);
|
|
+ optee->notif_pcpu_wq = create_workqueue("optee_pcpu_notification");
|
|
+ if (!optee->notif_pcpu_wq) {
|
|
+ rc = -EINVAL;
|
|
+ goto err_free_pcpu_irq;
|
|
+ }
|
|
+
|
|
+ optee->optee_pcpu = optee_pcpu;
|
|
+ optee->notif_pcpu_irq = irq;
|
|
+
|
|
+ return 0;
|
|
+
|
|
+err_free_pcpu_irq:
|
|
+ spin_lock(&lock);
|
|
+ disable_percpu_irq(irq);
|
|
+ spin_unlock(&lock);
|
|
+ free_percpu_irq(irq, optee_pcpu);
|
|
+err_close_ctx:
|
|
+ teedev_close_context(optee->notif.ctx);
|
|
+ optee->notif.ctx = NULL;
|
|
+err_free_pcpu:
|
|
+ free_percpu(optee_pcpu);
|
|
+
|
|
+ return rc;
|
|
+}
|
|
+
|
|
+static void optee_smc_notif_uninit_irq(struct optee *optee)
|
|
+{
|
|
+ if (optee->notif.ctx) {
|
|
+ optee_smc_stop_async_notif(optee->notif.ctx);
|
|
+ if (optee->notif_irq) {
|
|
+ free_irq(optee->notif_irq, optee);
|
|
+ irq_dispose_mapping(optee->notif_irq);
|
|
+ } else if (optee->notif_pcpu_irq) {
|
|
+ free_percpu_irq(optee->notif_irq, optee->optee_pcpu);
|
|
+ irq_dispose_mapping(optee->notif_irq);
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * The thread normally working with optee->notif.ctx was
|
|
+ * stopped with free_irq() above.
|
|
+ *
|
|
+ * Note we're not using teedev_close_context() or
|
|
+ * tee_client_close_context() since we have already called
|
|
+ * tee_device_put() while initializing to avoid a circular
|
|
+ * reference counting.
|
|
+ */
|
|
+ teedev_close_context(optee->notif.ctx);
|
|
+ }
|
|
+}
|
|
+
|
|
+static int enable_async_notif(optee_invoke_fn *invoke_fn)
|
|
+{
|
|
+ struct arm_smccc_res res;
|
|
+
|
|
+ invoke_fn(OPTEE_SMC_ENABLE_ASYNC_NOTIF, 0, 0, 0, 0, 0, 0, 0, &res);
|
|
+
|
|
+ if (res.a0)
|
|
+ return -EINVAL;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
static bool optee_msg_api_uid_is_optee_api(optee_invoke_fn *invoke_fn)
|
|
{
|
|
struct arm_smccc_res res;
|
|
@@ -404,7 +750,7 @@ static bool optee_msg_api_revision_is_compatible(optee_invoke_fn *invoke_fn)
|
|
}
|
|
|
|
static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn,
|
|
- u32 *sec_caps)
|
|
+ u32 *sec_caps, u32 *max_notif_value)
|
|
{
|
|
union {
|
|
struct arm_smccc_res smccc;
|
|
@@ -427,6 +773,12 @@ static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn,
|
|
return false;
|
|
|
|
*sec_caps = res.result.capabilities;
|
|
+
|
|
+ if (*sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF)
|
|
+ *max_notif_value = res.result.max_notif_value;
|
|
+ else
|
|
+ *max_notif_value = OPTEE_DEFAULT_MAX_NOTIF_VALUE;
|
|
+
|
|
return true;
|
|
}
|
|
|
|
@@ -596,6 +948,10 @@ static int optee_remove(struct platform_device *pdev)
|
|
*/
|
|
optee_disable_shm_cache(optee);
|
|
|
|
+ optee_irq_domain_uninit(optee);
|
|
+
|
|
+ optee_smc_notif_uninit_irq(optee);
|
|
+
|
|
/*
|
|
* The two devices have to be unregistered before we can free the
|
|
* other resources.
|
|
@@ -606,7 +962,6 @@ static int optee_remove(struct platform_device *pdev)
|
|
tee_shm_pool_free(optee->pool);
|
|
if (optee->memremaped_shm)
|
|
memunmap(optee->memremaped_shm);
|
|
- optee_wait_queue_exit(&optee->wait_queue);
|
|
optee_supp_uninit(&optee->supp);
|
|
mutex_destroy(&optee->call_queue.mutex);
|
|
|
|
@@ -635,6 +990,7 @@ static int optee_probe(struct platform_device *pdev)
|
|
void *memremaped_shm = NULL;
|
|
struct tee_device *teedev;
|
|
struct tee_context *ctx;
|
|
+ u32 max_notif_value;
|
|
u32 sec_caps;
|
|
int rc;
|
|
|
|
@@ -664,7 +1020,8 @@ static int optee_probe(struct platform_device *pdev)
|
|
return -EINVAL;
|
|
}
|
|
|
|
- if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps)) {
|
|
+ if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps,
|
|
+ &max_notif_value)) {
|
|
pr_warn("capabilities mismatch\n");
|
|
return -EINVAL;
|
|
}
|
|
@@ -687,7 +1044,7 @@ static int optee_probe(struct platform_device *pdev)
|
|
optee = kzalloc(sizeof(*optee), GFP_KERNEL);
|
|
if (!optee) {
|
|
rc = -ENOMEM;
|
|
- goto err;
|
|
+ goto err_free_pool;
|
|
}
|
|
|
|
optee->invoke_fn = invoke_fn;
|
|
@@ -696,38 +1053,73 @@ static int optee_probe(struct platform_device *pdev)
|
|
teedev = tee_device_alloc(&optee_desc, NULL, pool, optee);
|
|
if (IS_ERR(teedev)) {
|
|
rc = PTR_ERR(teedev);
|
|
- goto err;
|
|
+ goto err_free_optee;
|
|
}
|
|
optee->teedev = teedev;
|
|
|
|
teedev = tee_device_alloc(&optee_supp_desc, NULL, pool, optee);
|
|
if (IS_ERR(teedev)) {
|
|
rc = PTR_ERR(teedev);
|
|
- goto err;
|
|
+ goto err_unreg_teedev;
|
|
}
|
|
optee->supp_teedev = teedev;
|
|
|
|
rc = tee_device_register(optee->teedev);
|
|
if (rc)
|
|
- goto err;
|
|
+ goto err_unreg_supp_teedev;
|
|
|
|
rc = tee_device_register(optee->supp_teedev);
|
|
if (rc)
|
|
- goto err;
|
|
+ goto err_unreg_supp_teedev;
|
|
|
|
mutex_init(&optee->call_queue.mutex);
|
|
INIT_LIST_HEAD(&optee->call_queue.waiters);
|
|
- optee_wait_queue_init(&optee->wait_queue);
|
|
optee_supp_init(&optee->supp);
|
|
optee->memremaped_shm = memremaped_shm;
|
|
optee->pool = pool;
|
|
+
|
|
ctx = teedev_open(optee->teedev);
|
|
if (IS_ERR(ctx)) {
|
|
rc = PTR_ERR(ctx);
|
|
- goto err;
|
|
+ goto err_supp_uninit;
|
|
}
|
|
optee->ctx = ctx;
|
|
|
|
+ platform_set_drvdata(pdev, optee);
|
|
+ rc = optee_notif_init(optee, max_notif_value);
|
|
+ if (rc)
|
|
+ goto err_close_ctx;
|
|
+
|
|
+ if (sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF) {
|
|
+ unsigned int irq;
|
|
+
|
|
+ rc = platform_get_irq(pdev, 0);
|
|
+ if (rc < 0) {
|
|
+ pr_err("platform_get_irq: ret %d\n", rc);
|
|
+ goto err_notif_uninit;
|
|
+ }
|
|
+ irq = rc;
|
|
+
|
|
+ if (irq_is_percpu_devid(irq))
|
|
+ rc = optee_smc_notif_pcpu_init_irq(optee, irq);
|
|
+ else
|
|
+ rc = optee_smc_notif_init_irq(optee, irq);
|
|
+
|
|
+ if (rc) {
|
|
+ irq_dispose_mapping(irq);
|
|
+ goto err_notif_uninit;
|
|
+ }
|
|
+
|
|
+ rc = optee_irq_domain_init(pdev, optee);
|
|
+ if (rc) {
|
|
+ irq_dispose_mapping(irq);
|
|
+ goto err_notif_uninit;
|
|
+ }
|
|
+
|
|
+ enable_async_notif(optee->invoke_fn);
|
|
+ pr_info("Asynchronous notifications enabled\n");
|
|
+ }
|
|
+
|
|
/*
|
|
* Ensure that there are no pre-existing shm objects before enabling
|
|
* the shm cache so that there's no chance of receiving an invalid
|
|
@@ -742,31 +1134,36 @@ static int optee_probe(struct platform_device *pdev)
|
|
if (optee->sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
|
|
pr_info("dynamic shared memory is enabled\n");
|
|
|
|
- platform_set_drvdata(pdev, optee);
|
|
-
|
|
rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES);
|
|
- if (rc) {
|
|
- optee_remove(pdev);
|
|
- return rc;
|
|
- }
|
|
+ if (rc)
|
|
+ goto err_disable_shm_cache;
|
|
|
|
pr_info("initialized driver\n");
|
|
return 0;
|
|
-err:
|
|
- if (optee) {
|
|
- /*
|
|
- * tee_device_unregister() is safe to call even if the
|
|
- * devices hasn't been registered with
|
|
- * tee_device_register() yet.
|
|
- */
|
|
- tee_device_unregister(optee->supp_teedev);
|
|
- tee_device_unregister(optee->teedev);
|
|
- kfree(optee);
|
|
- }
|
|
- if (pool)
|
|
- tee_shm_pool_free(pool);
|
|
+
|
|
+err_disable_shm_cache:
|
|
+ optee_disable_shm_cache(optee);
|
|
+ optee_smc_notif_uninit_irq(optee);
|
|
+ optee_unregister_devices();
|
|
+ optee_irq_domain_uninit(optee);
|
|
+err_notif_uninit:
|
|
+ optee_notif_uninit(optee);
|
|
+err_close_ctx:
|
|
+ teedev_close_context(optee->ctx);
|
|
+err_supp_uninit:
|
|
+ optee_supp_uninit(&optee->supp);
|
|
+ mutex_destroy(&optee->call_queue.mutex);
|
|
+err_unreg_supp_teedev:
|
|
+ tee_device_unregister(optee->supp_teedev);
|
|
+err_unreg_teedev:
|
|
+ tee_device_unregister(optee->teedev);
|
|
+err_free_optee:
|
|
+ kfree(optee);
|
|
+err_free_pool:
|
|
+ tee_shm_pool_free(pool);
|
|
if (memremaped_shm)
|
|
memunmap(memremaped_shm);
|
|
+
|
|
return rc;
|
|
}
|
|
|
|
diff --git a/drivers/tee/optee/notif.c b/drivers/tee/optee/notif.c
|
|
new file mode 100644
|
|
index 000000000000..a28fa03dcd0e
|
|
--- /dev/null
|
|
+++ b/drivers/tee/optee/notif.c
|
|
@@ -0,0 +1,125 @@
|
|
+// SPDX-License-Identifier: GPL-2.0-only
|
|
+/*
|
|
+ * Copyright (c) 2015-2021, Linaro Limited
|
|
+ */
|
|
+
|
|
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
+
|
|
+#include <linux/arm-smccc.h>
|
|
+#include <linux/errno.h>
|
|
+#include <linux/slab.h>
|
|
+#include <linux/spinlock.h>
|
|
+#include <linux/tee_drv.h>
|
|
+#include "optee_private.h"
|
|
+
|
|
+struct notif_entry {
|
|
+ struct list_head link;
|
|
+ struct completion c;
|
|
+ u_int key;
|
|
+};
|
|
+
|
|
+static bool have_key(struct optee *optee, u_int key)
|
|
+{
|
|
+ struct notif_entry *entry;
|
|
+
|
|
+ list_for_each_entry(entry, &optee->notif.db, link)
|
|
+ if (entry->key == key)
|
|
+ return true;
|
|
+
|
|
+ return false;
|
|
+}
|
|
+
|
|
+int optee_notif_wait(struct optee *optee, u_int key)
|
|
+{
|
|
+ unsigned long flags;
|
|
+ struct notif_entry *entry;
|
|
+ int rc = 0;
|
|
+
|
|
+ if (key > optee->notif.max_key)
|
|
+ return -EINVAL;
|
|
+
|
|
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
|
|
+ if (!entry)
|
|
+ return -ENOMEM;
|
|
+ init_completion(&entry->c);
|
|
+ entry->key = key;
|
|
+
|
|
+ spin_lock_irqsave(&optee->notif.lock, flags);
|
|
+
|
|
+ /*
|
|
+ * If the bit is already set it means that the key has already
|
|
+ * been posted and we must not wait.
|
|
+ */
|
|
+ if (test_bit(key, optee->notif.bitmap)) {
|
|
+ clear_bit(key, optee->notif.bitmap);
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Check if someone is already waiting for this key. If there is
|
|
+ * it's a programming error.
|
|
+ */
|
|
+ if (have_key(optee, key)) {
|
|
+ rc = -EBUSY;
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ list_add_tail(&entry->link, &optee->notif.db);
|
|
+
|
|
+ /*
|
|
+ * Unlock temporarily and wait for completion.
|
|
+ */
|
|
+ spin_unlock_irqrestore(&optee->notif.lock, flags);
|
|
+ wait_for_completion(&entry->c);
|
|
+ spin_lock_irqsave(&optee->notif.lock, flags);
|
|
+
|
|
+ list_del(&entry->link);
|
|
+out:
|
|
+ spin_unlock_irqrestore(&optee->notif.lock, flags);
|
|
+
|
|
+ kfree(entry);
|
|
+
|
|
+ return rc;
|
|
+}
|
|
+
|
|
+int optee_notif_send(struct optee *optee, u_int key)
|
|
+{
|
|
+ unsigned long flags;
|
|
+ struct notif_entry *entry;
|
|
+
|
|
+ if (key > optee->notif.max_key)
|
|
+ return -EINVAL;
|
|
+
|
|
+ spin_lock_irqsave(&optee->notif.lock, flags);
|
|
+
|
|
+ list_for_each_entry(entry, &optee->notif.db, link)
|
|
+ if (entry->key == key) {
|
|
+ complete(&entry->c);
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ /* Only set the bit in case there where nobody waiting */
|
|
+ set_bit(key, optee->notif.bitmap);
|
|
+out:
|
|
+ spin_unlock_irqrestore(&optee->notif.lock, flags);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int optee_notif_init(struct optee *optee, u_int max_key)
|
|
+{
|
|
+ spin_lock_init(&optee->notif.lock);
|
|
+ INIT_LIST_HEAD(&optee->notif.db);
|
|
+ optee->notif.bitmap = bitmap_zalloc(max_key, GFP_KERNEL);
|
|
+ if (!optee->notif.bitmap)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ optee->notif.max_key = max_key;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+void optee_notif_uninit(struct optee *optee)
|
|
+{
|
|
+ kfree(optee->notif.bitmap);
|
|
+}
|
|
diff --git a/drivers/tee/optee/optee_msg.h b/drivers/tee/optee/optee_msg.h
|
|
index e3d72d09c484..e247cbce8710 100644
|
|
--- a/drivers/tee/optee/optee_msg.h
|
|
+++ b/drivers/tee/optee/optee_msg.h
|
|
@@ -12,9 +12,11 @@
|
|
* This file defines the OP-TEE message protocol (ABI) used to communicate
|
|
* with an instance of OP-TEE running in secure world.
|
|
*
|
|
- * This file is divided into two sections.
|
|
+ * This file is divided into three sections.
|
|
* 1. Formatting of messages.
|
|
* 2. Requests from normal world
|
|
+ * 3. Requests from secure world, Remote Procedure Call (RPC), handled by
|
|
+ * tee-supplicant.
|
|
*/
|
|
|
|
/*****************************************************************************
|
|
@@ -52,8 +54,8 @@
|
|
* Every entry in buffer should point to a 4k page beginning (12 least
|
|
* significant bits must be equal to zero).
|
|
*
|
|
- * 12 least significant bits of optee_msg_param.u.tmem.buf_ptr should hold
|
|
- * page offset of user buffer.
|
|
+ * 12 least significant bints of optee_msg_param.u.tmem.buf_ptr should hold page
|
|
+ * offset of the user buffer.
|
|
*
|
|
* So, entries should be placed like members of this structure:
|
|
*
|
|
@@ -293,6 +295,13 @@ struct optee_msg_arg {
|
|
* [in] param[0].u.rmem.shm_ref holds shared memory reference
|
|
* [in] param[0].u.rmem.offs 0
|
|
* [in] param[0].u.rmem.size 0
|
|
+ *
|
|
+ * OPTEE_MSG_CMD_DO_BOTTOM_HALF does the scheduled bottom half processing
|
|
+ * of a driver.
|
|
+ *
|
|
+ * OPTEE_MSG_CMD_STOP_ASYNC_NOTIF informs secure world that from now is
|
|
+ * normal world unable to process asynchronous notifications. Typically
|
|
+ * used when the driver is shut down.
|
|
*/
|
|
#define OPTEE_MSG_CMD_OPEN_SESSION 0
|
|
#define OPTEE_MSG_CMD_INVOKE_COMMAND 1
|
|
@@ -300,6 +309,151 @@ struct optee_msg_arg {
|
|
#define OPTEE_MSG_CMD_CANCEL 3
|
|
#define OPTEE_MSG_CMD_REGISTER_SHM 4
|
|
#define OPTEE_MSG_CMD_UNREGISTER_SHM 5
|
|
+#define OPTEE_MSG_CMD_DO_BOTTOM_HALF 6
|
|
+#define OPTEE_MSG_CMD_STOP_ASYNC_NOTIF 7
|
|
#define OPTEE_MSG_FUNCID_CALL_WITH_ARG 0x0004
|
|
|
|
+/*****************************************************************************
|
|
+ * Part 3 - Requests from secure world, RPC
|
|
+ *****************************************************************************/
|
|
+
|
|
+/*
|
|
+ * All RPC is done with a struct optee_msg_arg as bearer of information,
|
|
+ * struct optee_msg_arg::arg holds values defined by OPTEE_MSG_RPC_CMD_* below
|
|
+ *
|
|
+ * RPC communication with tee-supplicant is reversed compared to normal
|
|
+ * client communication desribed above. The supplicant receives requests
|
|
+ * and sends responses.
|
|
+ */
|
|
+
|
|
+/*
|
|
+ * Load a TA into memory, defined in tee-supplicant
|
|
+ */
|
|
+#define OPTEE_MSG_RPC_CMD_LOAD_TA 0
|
|
+
|
|
+/*
|
|
+ * Reserved
|
|
+ */
|
|
+#define OPTEE_MSG_RPC_CMD_RPMB 1
|
|
+
|
|
+/*
|
|
+ * File system access, defined in tee-supplicant
|
|
+ */
|
|
+#define OPTEE_MSG_RPC_CMD_FS 2
|
|
+
|
|
+/*
|
|
+ * Get time
|
|
+ *
|
|
+ * Returns number of seconds and nano seconds since the Epoch,
|
|
+ * 1970-01-01 00:00:00 +0000 (UTC).
|
|
+ *
|
|
+ * [out] param[0].u.value.a Number of seconds
|
|
+ * [out] param[0].u.value.b Number of nano seconds.
|
|
+ */
|
|
+#define OPTEE_MSG_RPC_CMD_GET_TIME 3
|
|
+
|
|
+/*
|
|
+ * Wait queue primitive, helper for secure world to implement a wait queue.
|
|
+ *
|
|
+ * If secure world need to wait for a secure world mutex it issues a sleep
|
|
+ * request instead of spinning in secure world. Conversely is a wakeup
|
|
+ * request issued when a secure world mutex with a thread waiting thread is
|
|
+ * unlocked.
|
|
+ *
|
|
+ * Waiting on a key
|
|
+ * [in] param[0].u.value.a OPTEE_MSG_RPC_WAIT_QUEUE_SLEEP
|
|
+ * [in] param[0].u.value.b wait key
|
|
+ *
|
|
+ * Waking up a key
|
|
+ * [in] param[0].u.value.a OPTEE_MSG_RPC_WAIT_QUEUE_WAKEUP
|
|
+ * [in] param[0].u.value.b wakeup key
|
|
+ */
|
|
+#define OPTEE_MSG_RPC_CMD_WAIT_QUEUE 4
|
|
+#define OPTEE_MSG_RPC_WAIT_QUEUE_SLEEP 0
|
|
+#define OPTEE_MSG_RPC_WAIT_QUEUE_WAKEUP 1
|
|
+
|
|
+/*
|
|
+ * Suspend execution
|
|
+ *
|
|
+ * [in] param[0].value .a number of milliseconds to suspend
|
|
+ */
|
|
+#define OPTEE_MSG_RPC_CMD_SUSPEND 5
|
|
+
|
|
+/*
|
|
+ * Allocate a piece of shared memory
|
|
+ *
|
|
+ * Shared memory can optionally be fragmented, to support that additional
|
|
+ * spare param entries are allocated to make room for eventual fragments.
|
|
+ * The spare param entries has .attr = OPTEE_MSG_ATTR_TYPE_NONE when
|
|
+ * unused. All returned temp memrefs except the last should have the
|
|
+ * OPTEE_MSG_ATTR_FRAGMENT bit set in the attr field.
|
|
+ *
|
|
+ * [in] param[0].u.value.a type of memory one of
|
|
+ * OPTEE_MSG_RPC_SHM_TYPE_* below
|
|
+ * [in] param[0].u.value.b requested size
|
|
+ * [in] param[0].u.value.c required alignment
|
|
+ *
|
|
+ * [out] param[0].u.tmem.buf_ptr physical address (of first fragment)
|
|
+ * [out] param[0].u.tmem.size size (of first fragment)
|
|
+ * [out] param[0].u.tmem.shm_ref shared memory reference
|
|
+ * ...
|
|
+ * [out] param[n].u.tmem.buf_ptr physical address
|
|
+ * [out] param[n].u.tmem.size size
|
|
+ * [out] param[n].u.tmem.shm_ref shared memory reference (same value
|
|
+ * as in param[n-1].u.tmem.shm_ref)
|
|
+ */
|
|
+#define OPTEE_MSG_RPC_CMD_SHM_ALLOC 6
|
|
+/* Memory that can be shared with a non-secure user space application */
|
|
+#define OPTEE_MSG_RPC_SHM_TYPE_APPL 0
|
|
+/* Memory only shared with non-secure kernel */
|
|
+#define OPTEE_MSG_RPC_SHM_TYPE_KERNEL 1
|
|
+#define OPTEE_MSG_RPC_SHM_TYPE_GLOBAL 2
|
|
+/* Memory shared with the requesting TA's Client Application */
|
|
+#define OPTEE_MSG_RPC_SHM_TYPE_CLIENT_APPL 3
|
|
+
|
|
+/*
|
|
+ * Free shared memory previously allocated with OPTEE_MSG_RPC_CMD_SHM_ALLOC
|
|
+ *
|
|
+ * [in] param[0].u.value.a type of memory one of
|
|
+ * OPTEE_MSG_RPC_SHM_TYPE_* above
|
|
+ * [in] param[0].u.value.b value of shared memory reference
|
|
+ * returned in param[0].u.tmem.shm_ref
|
|
+ * above
|
|
+ */
|
|
+#define OPTEE_MSG_RPC_CMD_SHM_FREE 7
|
|
+
|
|
+/*
|
|
+ * Access a device on an i2c bus
|
|
+ *
|
|
+ * [in] param[0].u.value.a mode: RD(0), WR(1)
|
|
+ * [in] param[0].u.value.b i2c adapter
|
|
+ * [in] param[0].u.value.c i2c chip
|
|
+ *
|
|
+ * [in] param[1].u.value.a i2c control flags
|
|
+ *
|
|
+ * [in/out] memref[2] buffer to exchange the transfer data
|
|
+ * with the secure world
|
|
+ *
|
|
+ * [out] param[3].u.value.a bytes transferred by the driver
|
|
+ */
|
|
+#define OPTEE_MSG_RPC_CMD_I2C_TRANSFER 21
|
|
+/* I2C master transfer modes */
|
|
+#define OPTEE_MSG_RPC_CMD_I2C_TRANSFER_RD 0
|
|
+#define OPTEE_MSG_RPC_CMD_I2C_TRANSFER_WR 1
|
|
+/* I2C master control flags */
|
|
+#define OPTEE_MSG_RPC_CMD_I2C_FLAGS_TEN_BIT BIT(0)
|
|
+
|
|
+/*
|
|
+ * Send a command to the Client Application.
|
|
+ *
|
|
+ * [in] param[0].u.value[0].a command Id
|
|
+ * [out] param[0].u.value[0].b OCALL return value
|
|
+ * [out] param[0].u.value[0].c OCALL return value origin
|
|
+ * [in] param[0].u.value[1].a UUID of TA whence OCALL originated (Hi)
|
|
+ * [out] param[0].u.value[1].b UUID of TA whence OCALL originated (Lo)
|
|
+ *
|
|
+ * [in/out] any[2..5].* OCALL parameters as specified by the TA, if any
|
|
+ */
|
|
+#define OPTEE_MSG_RPC_CMD_OCALL 22
|
|
+
|
|
#endif /* _OPTEE_MSG_H */
|
|
diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h
|
|
index ea09533e30cd..37ab55cc104e 100644
|
|
--- a/drivers/tee/optee/optee_private.h
|
|
+++ b/drivers/tee/optee/optee_private.h
|
|
@@ -16,14 +16,25 @@
|
|
|
|
/* Some Global Platform error codes used in this driver */
|
|
#define TEEC_SUCCESS 0x00000000
|
|
+#define TEEC_ERROR_CANCEL 0xFFFF0002
|
|
#define TEEC_ERROR_BAD_PARAMETERS 0xFFFF0006
|
|
#define TEEC_ERROR_NOT_SUPPORTED 0xFFFF000A
|
|
#define TEEC_ERROR_COMMUNICATION 0xFFFF000E
|
|
#define TEEC_ERROR_OUT_OF_MEMORY 0xFFFF000C
|
|
#define TEEC_ERROR_SHORT_BUFFER 0xFFFF0010
|
|
+#define TEEC_ERROR_TARGET_DEAD 0xFFFF3024
|
|
|
|
#define TEEC_ORIGIN_COMMS 0x00000002
|
|
|
|
+/*
|
|
+ * This value should be larger than the number threads in secure world to
|
|
+ * meet the need from secure world. The number of threads in secure world
|
|
+ * are usually not even close to 255 so we should be safe for now.
|
|
+ */
|
|
+#define OPTEE_DEFAULT_MAX_NOTIF_VALUE 255
|
|
+
|
|
+#define OPTEE_MAX_IT 32
|
|
+
|
|
typedef void (optee_invoke_fn)(unsigned long, unsigned long, unsigned long,
|
|
unsigned long, unsigned long, unsigned long,
|
|
unsigned long, unsigned long,
|
|
@@ -35,10 +46,13 @@ struct optee_call_queue {
|
|
struct list_head waiters;
|
|
};
|
|
|
|
-struct optee_wait_queue {
|
|
- /* Serializes access to this struct */
|
|
- struct mutex mu;
|
|
+struct optee_notif {
|
|
+ u_int max_key;
|
|
+ struct tee_context *ctx;
|
|
+ /* Serializes access to the elements below in this struct */
|
|
+ spinlock_t lock;
|
|
struct list_head db;
|
|
+ u_long *bitmap;
|
|
};
|
|
|
|
/**
|
|
@@ -73,8 +87,7 @@ struct optee_supp {
|
|
* @ctx: driver internal TEE context
|
|
* @invoke_fn: function to issue smc or hvc
|
|
* @call_queue: queue of threads waiting to call @invoke_fn
|
|
- * @wait_queue: queue of threads from secure world waiting for a
|
|
- * secure world sync object
|
|
+ * @notif: notification synchronization struct
|
|
* @supp: supplicant synchronization struct for RPC to supplicant
|
|
* @pool: shared memory pool
|
|
* @memremaped_shm virtual address of memory in shared memory pool
|
|
@@ -83,6 +96,9 @@ struct optee_supp {
|
|
* @scan_bus_done flag if device registation was already done.
|
|
* @scan_bus_wq workqueue to scan optee bus and register optee drivers
|
|
* @scan_bus_work workq to scan optee bus and register optee drivers
|
|
+ * @optee_pcpu per_cpu optee instance
|
|
+ * @notif_pcpu_wq workqueue for per cpu aynchronous notification
|
|
+ * @notif_pcpu_work work for per cpu asynchronous notification
|
|
*/
|
|
struct optee {
|
|
struct tee_device *supp_teedev;
|
|
@@ -90,7 +106,7 @@ struct optee {
|
|
optee_invoke_fn *invoke_fn;
|
|
struct tee_context *ctx;
|
|
struct optee_call_queue call_queue;
|
|
- struct optee_wait_queue wait_queue;
|
|
+ struct optee_notif notif;
|
|
struct optee_supp supp;
|
|
struct tee_shm_pool *pool;
|
|
void *memremaped_shm;
|
|
@@ -98,17 +114,81 @@ struct optee {
|
|
bool scan_bus_done;
|
|
struct workqueue_struct *scan_bus_wq;
|
|
struct work_struct scan_bus_work;
|
|
+ unsigned int notif_irq;
|
|
+ unsigned int notif_pcpu_irq;
|
|
+ struct optee_pcpu __percpu *optee_pcpu;
|
|
+ struct workqueue_struct *notif_pcpu_wq;
|
|
+ struct work_struct notif_pcpu_work;
|
|
+ struct irq_domain *domain;
|
|
+};
|
|
+
|
|
+struct optee_call_waiter {
|
|
+ struct list_head list_node;
|
|
+ struct completion c;
|
|
+};
|
|
+
|
|
+struct optee_pcpu {
|
|
+ struct optee *optee;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct optee_call_ctx - holds context that is preserved during one STD call
|
|
+ * @pages_list: list of pages allocated for RPC requests
|
|
+ * @num_entries: number of pages in 'pages_list'
|
|
+ * @ctx: TEE context whence the OCALL originated, if any
|
|
+ * @msg_shm: shared memory object used for calling into OP-TEE
|
|
+ * @msg_arg: arguments used for calling into OP-TEE, namely the data
|
|
+ * behind 'msg_shm'
|
|
+ * @msg_parg: physical pointer underlying 'msg_shm'
|
|
+ * @rpc_must_release: indicates that OCALL parameters have had their refcount
|
|
+ * increased and must be decreased on cancellation
|
|
+ * @rpc_shm: shared memory object used for responding to RPCs
|
|
+ * @rpc_arg: arguments used for responding to RPCs, namely the data
|
|
+ * behind 'rpc_shm'
|
|
+ * @thread_id: secure thread Id whence the OCALL originated and which
|
|
+ * must be resumed when replying to the OCALL
|
|
+ * @waiter: object used to wait until a secure thread becomes
|
|
+ * available is the previous call into OP-TEE failed
|
|
+ * because all secure threads are in use
|
|
+ * @ocall_pages_list: list of pages allocated for OCALL requests
|
|
+ * @ocall_num_entries: number of pages in 'ocall_pages_list'
|
|
+ */
|
|
+struct optee_call_ctx {
|
|
+ /* Information about pages list used in last allocation */
|
|
+ void *pages_list;
|
|
+ size_t num_entries;
|
|
+
|
|
+ /* OCALL support */
|
|
+ struct tee_context *ctx;
|
|
+
|
|
+ struct tee_shm *msg_shm;
|
|
+ struct optee_msg_arg *msg_arg;
|
|
+ phys_addr_t msg_parg;
|
|
+
|
|
+ bool rpc_must_release;
|
|
+ struct tee_shm *rpc_shm;
|
|
+ struct optee_msg_arg *rpc_arg;
|
|
+
|
|
+ u32 thread_id;
|
|
+ struct optee_call_waiter waiter;
|
|
+
|
|
+ void *ocall_pages_list;
|
|
+ size_t ocall_num_entries;
|
|
};
|
|
|
|
struct optee_session {
|
|
+ /* Serializes access to this struct */
|
|
+ struct semaphore sem;
|
|
struct list_head list_node;
|
|
u32 session_id;
|
|
+ struct optee_call_ctx call_ctx;
|
|
};
|
|
|
|
struct optee_context_data {
|
|
/* Serializes access to this struct */
|
|
struct mutex mutex;
|
|
struct list_head sess_list;
|
|
+ struct idr tmp_sess_list;
|
|
};
|
|
|
|
struct optee_rpc_param {
|
|
@@ -122,19 +202,40 @@ struct optee_rpc_param {
|
|
u32 a7;
|
|
};
|
|
|
|
-/* Holds context that is preserved during one STD call */
|
|
-struct optee_call_ctx {
|
|
- /* information about pages list used in last allocation */
|
|
- void *pages_list;
|
|
- size_t num_entries;
|
|
-};
|
|
+/*
|
|
+ * RPC support
|
|
+ */
|
|
|
|
void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param,
|
|
struct optee_call_ctx *call_ctx);
|
|
+bool optee_rpc_is_ocall(struct optee_rpc_param *param,
|
|
+ struct optee_call_ctx *call_ctx);
|
|
void optee_rpc_finalize_call(struct optee_call_ctx *call_ctx);
|
|
|
|
-void optee_wait_queue_init(struct optee_wait_queue *wq);
|
|
-void optee_wait_queue_exit(struct optee_wait_queue *wq);
|
|
+/*
|
|
+ * Wait queue
|
|
+ */
|
|
+
|
|
+int optee_notif_init(struct optee *optee, u_int max_key);
|
|
+void optee_notif_uninit(struct optee *optee);
|
|
+int optee_notif_wait(struct optee *optee, u_int key);
|
|
+int optee_notif_send(struct optee *optee, u_int key);
|
|
+
|
|
+/*
|
|
+ * Call queue
|
|
+ */
|
|
+
|
|
+void optee_cq_wait_init(struct optee_call_queue *cq,
|
|
+ struct optee_call_waiter *w);
|
|
+void optee_cq_wait_for_completion(struct optee_call_queue *cq,
|
|
+ struct optee_call_waiter *w);
|
|
+void optee_cq_complete_one(struct optee_call_queue *cq);
|
|
+void optee_cq_wait_final(struct optee_call_queue *cq,
|
|
+ struct optee_call_waiter *w);
|
|
+
|
|
+/*
|
|
+ * Supplicant
|
|
+ */
|
|
|
|
u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
|
|
struct tee_param *param);
|
|
@@ -150,15 +251,40 @@ int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params,
|
|
int optee_supp_send(struct tee_context *ctx, u32 ret, u32 num_params,
|
|
struct tee_param *param);
|
|
|
|
+/*
|
|
+ * Calls into OP-TEE
|
|
+ */
|
|
+
|
|
u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg);
|
|
+
|
|
+/*
|
|
+ * Sessions
|
|
+ */
|
|
+
|
|
int optee_open_session(struct tee_context *ctx,
|
|
struct tee_ioctl_open_session_arg *arg,
|
|
- struct tee_param *param);
|
|
+ struct tee_param *normal_param, u32 num_normal_params,
|
|
+ struct tee_param *ocall_param);
|
|
int optee_close_session(struct tee_context *ctx, u32 session);
|
|
+
|
|
+/*
|
|
+ * Function invocations
|
|
+ */
|
|
+
|
|
int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
|
|
- struct tee_param *param);
|
|
+ struct tee_param *normal_param, u32 num_normal_params,
|
|
+ struct tee_param *ocall_param);
|
|
+
|
|
+/*
|
|
+ * Cancellations
|
|
+ */
|
|
+
|
|
int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session);
|
|
|
|
+/*
|
|
+ * Shared memory
|
|
+ */
|
|
+
|
|
void optee_enable_shm_cache(struct optee *optee);
|
|
void optee_disable_shm_cache(struct optee *optee);
|
|
void optee_disable_unmapped_shm_cache(struct optee *optee);
|
|
@@ -173,21 +299,43 @@ int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm,
|
|
unsigned long start);
|
|
int optee_shm_unregister_supp(struct tee_context *ctx, struct tee_shm *shm);
|
|
|
|
+/*
|
|
+ * Paremeters
|
|
+ */
|
|
+
|
|
int optee_from_msg_param(struct tee_param *params, size_t num_params,
|
|
const struct optee_msg_param *msg_params);
|
|
int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params,
|
|
const struct tee_param *params);
|
|
+struct tee_shm *optee_get_msg_arg(struct tee_context *ctx, size_t num_params,
|
|
+ struct optee_msg_arg **msg_arg,
|
|
+ phys_addr_t *msg_parg);
|
|
+
|
|
+/*
|
|
+ * RPC memory
|
|
+ */
|
|
|
|
u64 *optee_allocate_pages_list(size_t num_entries);
|
|
void optee_free_pages_list(void *array, size_t num_entries);
|
|
void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
|
|
size_t page_offset);
|
|
|
|
+/*
|
|
+ * Devices
|
|
+ */
|
|
+
|
|
#define PTA_CMD_GET_DEVICES 0x0
|
|
#define PTA_CMD_GET_DEVICES_SUPP 0x1
|
|
int optee_enumerate_devices(u32 func);
|
|
void optee_unregister_devices(void);
|
|
|
|
+/*
|
|
+ * OCALLs
|
|
+ */
|
|
+
|
|
+void optee_cancel_open_session_ocall(struct optee_session *sess);
|
|
+void optee_cancel_invoke_function_ocall(struct optee_call_ctx *call_ctx);
|
|
+
|
|
/*
|
|
* Small helpers
|
|
*/
|
|
diff --git a/drivers/tee/optee/optee_rpc_cmd.h b/drivers/tee/optee/optee_rpc_cmd.h
|
|
index b8275140cef8..f3f06e0994a7 100644
|
|
--- a/drivers/tee/optee/optee_rpc_cmd.h
|
|
+++ b/drivers/tee/optee/optee_rpc_cmd.h
|
|
@@ -28,24 +28,27 @@
|
|
#define OPTEE_RPC_CMD_GET_TIME 3
|
|
|
|
/*
|
|
- * Wait queue primitive, helper for secure world to implement a wait queue.
|
|
+ * Notification from/to secure world.
|
|
*
|
|
- * If secure world needs to wait for a secure world mutex it issues a sleep
|
|
- * request instead of spinning in secure world. Conversely is a wakeup
|
|
- * request issued when a secure world mutex with a thread waiting thread is
|
|
- * unlocked.
|
|
+ * If secure world needs to wait for something, for instance a mutex, it
|
|
+ * does a notification wait request instead of spinning in secure world.
|
|
+ * Conversely can a synchronous notification can be sent when a secure
|
|
+ * world mutex with a thread waiting thread is unlocked.
|
|
*
|
|
- * Waiting on a key
|
|
- * [in] value[0].a OPTEE_RPC_WAIT_QUEUE_SLEEP
|
|
- * [in] value[0].b Wait key
|
|
+ * This interface can also be used to wait for a asynchronous notification
|
|
+ * which instead is sent via a non-secure interrupt.
|
|
*
|
|
- * Waking up a key
|
|
- * [in] value[0].a OPTEE_RPC_WAIT_QUEUE_WAKEUP
|
|
- * [in] value[0].b Wakeup key
|
|
+ * Waiting on notification
|
|
+ * [in] value[0].a OPTEE_RPC_NOTIFICATION_WAIT
|
|
+ * [in] value[0].b notification value
|
|
+ *
|
|
+ * Sending a synchronous notification
|
|
+ * [in] value[0].a OPTEE_RPC_NOTIFICATION_SEND
|
|
+ * [in] value[0].b notification value
|
|
*/
|
|
-#define OPTEE_RPC_CMD_WAIT_QUEUE 4
|
|
-#define OPTEE_RPC_WAIT_QUEUE_SLEEP 0
|
|
-#define OPTEE_RPC_WAIT_QUEUE_WAKEUP 1
|
|
+#define OPTEE_RPC_CMD_NOTIFICATION 4
|
|
+#define OPTEE_RPC_NOTIFICATION_WAIT 0
|
|
+#define OPTEE_RPC_NOTIFICATION_SEND 1
|
|
|
|
/*
|
|
* Suspend execution
|
|
diff --git a/drivers/tee/optee/optee_smc.h b/drivers/tee/optee/optee_smc.h
|
|
index 80eb763a8a80..6fd5c2cd88a6 100644
|
|
--- a/drivers/tee/optee/optee_smc.h
|
|
+++ b/drivers/tee/optee/optee_smc.h
|
|
@@ -107,6 +107,12 @@ struct optee_smc_call_get_os_revision_result {
|
|
/*
|
|
* Call with struct optee_msg_arg as argument
|
|
*
|
|
+ * When calling this function normal world has a few responsibilities:
|
|
+ * 1. It must be able to handle eventual RPCs
|
|
+ * 2. Non-secure interrupts should not be masked
|
|
+ * 3. If asynchronous notifications has been negotiated successfully, then
|
|
+ * asynchronous notifications should be unmasked during this call.
|
|
+ *
|
|
* Call register usage:
|
|
* a0 SMC Function ID, OPTEE_SMC*CALL_WITH_ARG
|
|
* a1 Upper 32 bits of a 64-bit physical pointer to a struct optee_msg_arg
|
|
@@ -195,7 +201,8 @@ struct optee_smc_get_shm_config_result {
|
|
* Normal return register usage:
|
|
* a0 OPTEE_SMC_RETURN_OK
|
|
* a1 bitfield of secure world capabilities OPTEE_SMC_SEC_CAP_*
|
|
- * a2-7 Preserved
|
|
+ * a2 The maximum secure world notification number
|
|
+ * a3-7 Preserved
|
|
*
|
|
* Error return register usage:
|
|
* a0 OPTEE_SMC_RETURN_ENOTAVAIL, can't use the capabilities from normal world
|
|
@@ -218,6 +225,11 @@ struct optee_smc_get_shm_config_result {
|
|
#define OPTEE_SMC_SEC_CAP_VIRTUALIZATION BIT(3)
|
|
/* Secure world supports Shared Memory with a NULL reference */
|
|
#define OPTEE_SMC_SEC_CAP_MEMREF_NULL BIT(4)
|
|
+/* Secure world supports asynchronous notification of normal world */
|
|
+#define OPTEE_SMC_SEC_CAP_ASYNC_NOTIF BIT(5)
|
|
+
|
|
+/* Secure world is built with OCALL support */
|
|
+#define OPTEE_SMC_SEC_CAP_OCALL BIT(31)
|
|
|
|
#define OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES 9
|
|
#define OPTEE_SMC_EXCHANGE_CAPABILITIES \
|
|
@@ -226,8 +238,8 @@ struct optee_smc_get_shm_config_result {
|
|
struct optee_smc_exchange_capabilities_result {
|
|
unsigned long status;
|
|
unsigned long capabilities;
|
|
+ unsigned long max_notif_value;
|
|
unsigned long reserved0;
|
|
- unsigned long reserved1;
|
|
};
|
|
|
|
/*
|
|
@@ -319,6 +331,84 @@ struct optee_smc_disable_shm_cache_result {
|
|
#define OPTEE_SMC_GET_THREAD_COUNT \
|
|
OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_THREAD_COUNT)
|
|
|
|
+/*
|
|
+ * Inform OP-TEE that normal world is able to receive asynchronous
|
|
+ * notifications.
|
|
+ *
|
|
+ * Call requests usage:
|
|
+ * a0 SMC Function ID, OPTEE_SMC_ENABLE_ASYNC_NOTIF
|
|
+ * a1-6 Not used
|
|
+ * a7 Hypervisor Client ID register
|
|
+ *
|
|
+ * Normal return register usage:
|
|
+ * a0 OPTEE_SMC_RETURN_OK
|
|
+ * a1-7 Preserved
|
|
+ *
|
|
+ * Not supported return register usage:
|
|
+ * a0 OPTEE_SMC_RETURN_ENOTAVAIL
|
|
+ * a1-7 Preserved
|
|
+ */
|
|
+#define OPTEE_SMC_FUNCID_ENABLE_ASYNC_NOTIF 16
|
|
+#define OPTEE_SMC_ENABLE_ASYNC_NOTIF \
|
|
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_ENABLE_ASYNC_NOTIF)
|
|
+
|
|
+/*
|
|
+ * Retrieve a value of notifications pending since the last call of this
|
|
+ * function.
|
|
+ *
|
|
+ * OP-TEE keeps a record of all posted values. When an interrupt is
|
|
+ * received which indicates that there are posted values this function
|
|
+ * should be called until all pended values have been retrieved. When a
|
|
+ * value is retrieved, it's cleared from the record in secure world.
|
|
+ *
|
|
+ * Call requests usage:
|
|
+ * a0 SMC Function ID, OPTEE_SMC_GET_ASYNC_NOTIF_VALUE
|
|
+ * a1-6 Not used
|
|
+ * a7 Hypervisor Client ID register
|
|
+ *
|
|
+ * Normal return register usage:
|
|
+ * a0 OPTEE_SMC_RETURN_OK
|
|
+ * a1 value
|
|
+ * a2 Bit[0]: OPTEE_SMC_ASYNC_NOTIF_VALUE_VALID if the value in a1 is
|
|
+ * valid, else 0 if no values where pending
|
|
+ * a2 Bit[1]: OPTEE_SMC_ASYNC_NOTIF_VALUE_PENDING if another value is
|
|
+ * pending, else 0.
|
|
+ * Bit[31:2]: MBZ
|
|
+ * a3-7 Preserved
|
|
+ *
|
|
+ * Not supported return register usage:
|
|
+ * a0 OPTEE_SMC_RETURN_ENOTAVAIL
|
|
+ * a1-7 Preserved
|
|
+ */
|
|
+#define OPTEE_SMC_ASYNC_NOTIF_VALUE_VALID BIT(0)
|
|
+#define OPTEE_SMC_ASYNC_NOTIF_VALUE_PENDING BIT(1)
|
|
+
|
|
+/*
|
|
+ * Notification that OP-TEE expects a yielding call to do some bottom half
|
|
+ * work in a driver.
|
|
+ */
|
|
+#define OPTEE_SMC_ASYNC_NOTIF_VALUE_DO_BOTTOM_HALF 0
|
|
+
|
|
+#define OPTEE_SMC_FUNCID_GET_ASYNC_NOTIF_VALUE 17
|
|
+#define OPTEE_SMC_GET_ASYNC_NOTIF_VALUE \
|
|
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_ASYNC_NOTIF_VALUE)
|
|
+
|
|
+#define OPTEE_SMC_ASYNC_NOTIF_VALUE_DO_IT 1
|
|
+
|
|
+#define OPTEE_SMC_IT_NOTIF_VALUE_VALID BIT(0)
|
|
+#define OPTEE_SMC_IT_NOTIF_VALUE_PENDING BIT(1)
|
|
+
|
|
+/*
|
|
+ * Notification that OP-TEE generates and interruption.
|
|
+ */
|
|
+#define OPTEE_SMC_FUNCID_GET_IT_NOTIF_VALUE 53
|
|
+#define OPTEE_SMC_GET_IT_NOTIF_VALUE \
|
|
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_IT_NOTIF_VALUE)
|
|
+
|
|
+#define OPTEE_SMC_FUNCID_SET_IT_NOTIF_MASK 54
|
|
+#define OPTEE_SMC_SET_IT_NOTIF_MASK \
|
|
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_SET_IT_NOTIF_MASK)
|
|
+
|
|
/*
|
|
* Resume from RPC (for example after processing a foreign interrupt)
|
|
*
|
|
diff --git a/drivers/tee/optee/rpc.c b/drivers/tee/optee/rpc.c
|
|
index 456833d82007..ae41f4e640ab 100644
|
|
--- a/drivers/tee/optee/rpc.c
|
|
+++ b/drivers/tee/optee/rpc.c
|
|
@@ -14,23 +14,6 @@
|
|
#include "optee_smc.h"
|
|
#include "optee_rpc_cmd.h"
|
|
|
|
-struct wq_entry {
|
|
- struct list_head link;
|
|
- struct completion c;
|
|
- u32 key;
|
|
-};
|
|
-
|
|
-void optee_wait_queue_init(struct optee_wait_queue *priv)
|
|
-{
|
|
- mutex_init(&priv->mu);
|
|
- INIT_LIST_HEAD(&priv->db);
|
|
-}
|
|
-
|
|
-void optee_wait_queue_exit(struct optee_wait_queue *priv)
|
|
-{
|
|
- mutex_destroy(&priv->mu);
|
|
-}
|
|
-
|
|
static void handle_rpc_func_cmd_get_time(struct optee_msg_arg *arg)
|
|
{
|
|
struct timespec64 ts;
|
|
@@ -143,48 +126,6 @@ static void handle_rpc_func_cmd_i2c_transfer(struct tee_context *ctx,
|
|
}
|
|
#endif
|
|
|
|
-static struct wq_entry *wq_entry_get(struct optee_wait_queue *wq, u32 key)
|
|
-{
|
|
- struct wq_entry *w;
|
|
-
|
|
- mutex_lock(&wq->mu);
|
|
-
|
|
- list_for_each_entry(w, &wq->db, link)
|
|
- if (w->key == key)
|
|
- goto out;
|
|
-
|
|
- w = kmalloc(sizeof(*w), GFP_KERNEL);
|
|
- if (w) {
|
|
- init_completion(&w->c);
|
|
- w->key = key;
|
|
- list_add_tail(&w->link, &wq->db);
|
|
- }
|
|
-out:
|
|
- mutex_unlock(&wq->mu);
|
|
- return w;
|
|
-}
|
|
-
|
|
-static void wq_sleep(struct optee_wait_queue *wq, u32 key)
|
|
-{
|
|
- struct wq_entry *w = wq_entry_get(wq, key);
|
|
-
|
|
- if (w) {
|
|
- wait_for_completion(&w->c);
|
|
- mutex_lock(&wq->mu);
|
|
- list_del(&w->link);
|
|
- mutex_unlock(&wq->mu);
|
|
- kfree(w);
|
|
- }
|
|
-}
|
|
-
|
|
-static void wq_wakeup(struct optee_wait_queue *wq, u32 key)
|
|
-{
|
|
- struct wq_entry *w = wq_entry_get(wq, key);
|
|
-
|
|
- if (w)
|
|
- complete(&w->c);
|
|
-}
|
|
-
|
|
static void handle_rpc_func_cmd_wq(struct optee *optee,
|
|
struct optee_msg_arg *arg)
|
|
{
|
|
@@ -196,11 +137,13 @@ static void handle_rpc_func_cmd_wq(struct optee *optee,
|
|
goto bad;
|
|
|
|
switch (arg->params[0].u.value.a) {
|
|
- case OPTEE_RPC_WAIT_QUEUE_SLEEP:
|
|
- wq_sleep(&optee->wait_queue, arg->params[0].u.value.b);
|
|
+ case OPTEE_RPC_NOTIFICATION_WAIT:
|
|
+ if (optee_notif_wait(optee, arg->params[0].u.value.b))
|
|
+ goto bad;
|
|
break;
|
|
- case OPTEE_RPC_WAIT_QUEUE_WAKEUP:
|
|
- wq_wakeup(&optee->wait_queue, arg->params[0].u.value.b);
|
|
+ case OPTEE_RPC_NOTIFICATION_SEND:
|
|
+ if (optee_notif_send(optee, arg->params[0].u.value.b))
|
|
+ goto bad;
|
|
break;
|
|
default:
|
|
goto bad;
|
|
@@ -465,7 +408,7 @@ static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
|
|
case OPTEE_RPC_CMD_GET_TIME:
|
|
handle_rpc_func_cmd_get_time(arg);
|
|
break;
|
|
- case OPTEE_RPC_CMD_WAIT_QUEUE:
|
|
+ case OPTEE_RPC_CMD_NOTIFICATION:
|
|
handle_rpc_func_cmd_wq(optee, arg);
|
|
break;
|
|
case OPTEE_RPC_CMD_SUSPEND:
|
|
@@ -541,3 +484,31 @@ void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param,
|
|
|
|
param->a0 = OPTEE_SMC_CALL_RETURN_FROM_RPC;
|
|
}
|
|
+
|
|
+bool optee_rpc_is_ocall(struct optee_rpc_param *param,
|
|
+ struct optee_call_ctx *call_ctx)
|
|
+{
|
|
+ u32 func;
|
|
+
|
|
+ struct tee_shm *shm;
|
|
+ struct optee_msg_arg *arg;
|
|
+
|
|
+ func = OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0);
|
|
+ if (func != OPTEE_SMC_RPC_FUNC_CMD)
|
|
+ return false;
|
|
+
|
|
+ shm = reg_pair_to_ptr(param->a1, param->a2);
|
|
+ arg = tee_shm_get_va(shm, 0);
|
|
+
|
|
+ switch (arg->cmd) {
|
|
+ case OPTEE_MSG_RPC_CMD_OCALL:
|
|
+ call_ctx->rpc_shm = shm;
|
|
+ call_ctx->rpc_arg = arg;
|
|
+ call_ctx->thread_id = param->a3;
|
|
+ return true;
|
|
+ default:
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ return false;
|
|
+}
|
|
diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c
|
|
index a44e5b53e7a9..4c2103fb4cf8 100644
|
|
--- a/drivers/tee/tee_core.c
|
|
+++ b/drivers/tee/tee_core.c
|
|
@@ -359,6 +359,14 @@ tee_ioctl_shm_register(struct tee_context *ctx,
|
|
return ret;
|
|
}
|
|
|
|
+static bool param_is_ocall(struct tee_param *param)
|
|
+{
|
|
+ u64 type = param->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK;
|
|
+
|
|
+ return param->attr & TEE_IOCTL_PARAM_ATTR_OCALL &&
|
|
+ type == TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT;
|
|
+}
|
|
+
|
|
static int params_from_user(struct tee_context *ctx, struct tee_param *params,
|
|
size_t num_params,
|
|
struct tee_ioctl_param __user *uparams)
|
|
@@ -467,6 +475,33 @@ static int params_to_user(struct tee_ioctl_param __user *uparams,
|
|
return 0;
|
|
}
|
|
|
|
+static inline int find_ocall_param(struct tee_param *params, u32 num_params,
|
|
+ struct tee_param **normal_params,
|
|
+ u32 *num_normal_params,
|
|
+ struct tee_param **ocall_param)
|
|
+{
|
|
+ size_t n;
|
|
+
|
|
+ for (n = 0; n < num_params; n++) {
|
|
+ if (param_is_ocall(params + n)) {
|
|
+ if (n == 0) {
|
|
+ *normal_params = params + 1;
|
|
+ *num_normal_params = num_params - 1;
|
|
+ *ocall_param = params;
|
|
+ return 0;
|
|
+ } else {
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ *normal_params = params;
|
|
+ *num_normal_params = num_params;
|
|
+ *ocall_param = NULL;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
static int tee_ioctl_open_session(struct tee_context *ctx,
|
|
struct tee_ioctl_buf_data __user *ubuf)
|
|
{
|
|
@@ -514,7 +549,9 @@ static int tee_ioctl_open_session(struct tee_context *ctx,
|
|
goto out;
|
|
}
|
|
|
|
- rc = ctx->teedev->desc->ops->open_session(ctx, &arg, params);
|
|
+ rc = ctx->teedev->desc->ops->open_session(ctx, &arg, params,
|
|
+ arg.num_params,
|
|
+ NULL /*ocall_param*/);
|
|
if (rc)
|
|
goto out;
|
|
have_session = true;
|
|
@@ -585,7 +622,9 @@ static int tee_ioctl_invoke(struct tee_context *ctx,
|
|
goto out;
|
|
}
|
|
|
|
- rc = ctx->teedev->desc->ops->invoke_func(ctx, &arg, params);
|
|
+ rc = ctx->teedev->desc->ops->invoke_func(ctx, &arg, params,
|
|
+ arg.num_params,
|
|
+ NULL /*ocall_param*/);
|
|
if (rc)
|
|
goto out;
|
|
|
|
@@ -1162,9 +1201,22 @@ int tee_client_open_session(struct tee_context *ctx,
|
|
struct tee_ioctl_open_session_arg *arg,
|
|
struct tee_param *param)
|
|
{
|
|
+ struct tee_param *ocall_param = NULL;
|
|
+ struct tee_param *normal_params = NULL;
|
|
+ u32 num_normal_params = 0;
|
|
+ int rc;
|
|
+
|
|
if (!ctx->teedev->desc->ops->open_session)
|
|
return -EINVAL;
|
|
- return ctx->teedev->desc->ops->open_session(ctx, arg, param);
|
|
+
|
|
+ rc = find_ocall_param(param, arg->num_params, &normal_params,
|
|
+ &num_normal_params, &ocall_param);
|
|
+ if (rc)
|
|
+ return rc;
|
|
+
|
|
+ return ctx->teedev->desc->ops->open_session(ctx, arg, normal_params,
|
|
+ num_normal_params,
|
|
+ ocall_param);
|
|
}
|
|
EXPORT_SYMBOL_GPL(tee_client_open_session);
|
|
|
|
@@ -1180,9 +1232,22 @@ int tee_client_invoke_func(struct tee_context *ctx,
|
|
struct tee_ioctl_invoke_arg *arg,
|
|
struct tee_param *param)
|
|
{
|
|
+ struct tee_param *ocall_param = NULL;
|
|
+ struct tee_param *normal_params = NULL;
|
|
+ u32 num_normal_params = 0;
|
|
+ int rc;
|
|
+
|
|
if (!ctx->teedev->desc->ops->invoke_func)
|
|
return -EINVAL;
|
|
- return ctx->teedev->desc->ops->invoke_func(ctx, arg, param);
|
|
+
|
|
+ rc = find_ocall_param(param, arg->num_params, &normal_params,
|
|
+ &num_normal_params, &ocall_param);
|
|
+ if (rc)
|
|
+ return rc;
|
|
+
|
|
+ return ctx->teedev->desc->ops->invoke_func(ctx, arg, normal_params,
|
|
+ num_normal_params,
|
|
+ ocall_param);
|
|
}
|
|
EXPORT_SYMBOL_GPL(tee_client_invoke_func);
|
|
|
|
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
|
|
index 6fb4400333fb..36304fe6accd 100644
|
|
--- a/drivers/tee/tee_shm.c
|
|
+++ b/drivers/tee/tee_shm.c
|
|
@@ -158,7 +158,8 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
|
|
int num_pages;
|
|
unsigned long start;
|
|
|
|
- if (flags != req_user_flags && flags != req_kernel_flags)
|
|
+ if (((flags & req_user_flags) != req_user_flags) &&
|
|
+ ((flags & req_kernel_flags) != req_kernel_flags))
|
|
return ERR_PTR(-ENOTSUPP);
|
|
|
|
if (!tee_device_get(teedev))
|
|
@@ -432,6 +433,16 @@ struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id)
|
|
}
|
|
EXPORT_SYMBOL_GPL(tee_shm_get_from_id);
|
|
|
|
+/**
|
|
+ * tee_shm_get() - Increase reference count on a shared memory handle
|
|
+ * @shm: Shared memory handle
|
|
+ */
|
|
+void tee_shm_get(struct tee_shm *shm)
|
|
+{
|
|
+ refcount_inc(&shm->refcount);
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(tee_shm_get);
|
|
+
|
|
/**
|
|
* tee_shm_put() - Decrease reference count on a shared memory handle
|
|
* @shm: Shared memory handle
|
|
diff --git a/include/linux/rpmsg.h b/include/linux/rpmsg.h
|
|
index a8dcf8a9ae88..02fa9116cd60 100644
|
|
--- a/include/linux/rpmsg.h
|
|
+++ b/include/linux/rpmsg.h
|
|
@@ -186,6 +186,8 @@ int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst,
|
|
__poll_t rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp,
|
|
poll_table *wait);
|
|
|
|
+ssize_t rpmsg_get_mtu(struct rpmsg_endpoint *ept);
|
|
+
|
|
#else
|
|
|
|
static inline int rpmsg_register_device(struct rpmsg_device *rpdev)
|
|
@@ -296,6 +298,14 @@ static inline __poll_t rpmsg_poll(struct rpmsg_endpoint *ept,
|
|
return 0;
|
|
}
|
|
|
|
+static inline ssize_t rpmsg_get_mtu(struct rpmsg_endpoint *ept)
|
|
+{
|
|
+ /* This shouldn't be possible */
|
|
+ WARN_ON(1);
|
|
+
|
|
+ return -ENXIO;
|
|
+}
|
|
+
|
|
#endif /* IS_ENABLED(CONFIG_RPMSG) */
|
|
|
|
/* use a macro to avoid include chaining to get THIS_MODULE */
|
|
diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h
|
|
index 38b701b7af4c..7501b9bf6d61 100644
|
|
--- a/include/linux/tee_drv.h
|
|
+++ b/include/linux/tee_drv.h
|
|
@@ -50,6 +50,8 @@ struct tee_shm_pool;
|
|
* non-blocking in nature.
|
|
* @cap_memref_null: flag indicating if the TEE Client support shared
|
|
* memory buffer with a NULL pointer.
|
|
+ * @cap_ocall: flag indicating that OP-TEE supports OCALLs, allowing TAs
|
|
+ * to invoke commands on their CA.
|
|
*/
|
|
struct tee_context {
|
|
struct tee_device *teedev;
|
|
@@ -58,6 +60,7 @@ struct tee_context {
|
|
bool releasing;
|
|
bool supp_nowait;
|
|
bool cap_memref_null;
|
|
+ bool cap_ocall;
|
|
};
|
|
|
|
struct tee_param_memref {
|
|
@@ -101,11 +104,15 @@ struct tee_driver_ops {
|
|
void (*release)(struct tee_context *ctx);
|
|
int (*open_session)(struct tee_context *ctx,
|
|
struct tee_ioctl_open_session_arg *arg,
|
|
- struct tee_param *param);
|
|
+ struct tee_param *normal_param,
|
|
+ u32 num_normal_params,
|
|
+ struct tee_param *ocall_param);
|
|
int (*close_session)(struct tee_context *ctx, u32 session);
|
|
int (*invoke_func)(struct tee_context *ctx,
|
|
struct tee_ioctl_invoke_arg *arg,
|
|
- struct tee_param *param);
|
|
+ struct tee_param *normal_param,
|
|
+ u32 num_normal_params,
|
|
+ struct tee_param *ocall_param);
|
|
int (*cancel_req)(struct tee_context *ctx, u32 cancel_id, u32 session);
|
|
int (*supp_recv)(struct tee_context *ctx, u32 *func, u32 *num_params,
|
|
struct tee_param *param);
|
|
@@ -363,6 +370,12 @@ static inline bool tee_shm_is_registered(struct tee_shm *shm)
|
|
*/
|
|
void tee_shm_free(struct tee_shm *shm);
|
|
|
|
+/**
|
|
+ * tee_shm_get() - Increase reference count on a shared memory handle
|
|
+ * @shm: Shared memory handle
|
|
+ */
|
|
+void tee_shm_get(struct tee_shm *shm);
|
|
+
|
|
/**
|
|
* tee_shm_put() - Decrease reference count on a shared memory handle
|
|
* @shm: Shared memory handle
|
|
diff --git a/include/linux/tee_remoteproc.h b/include/linux/tee_remoteproc.h
|
|
new file mode 100644
|
|
index 000000000000..5ba0b611679c
|
|
--- /dev/null
|
|
+++ b/include/linux/tee_remoteproc.h
|
|
@@ -0,0 +1,101 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0-or-later */
|
|
+/*
|
|
+ * Copyright(c) 2020 STMicroelectronics 2020
|
|
+ */
|
|
+
|
|
+#ifndef TEE_REMOTEPROC_H
|
|
+#define TEE_REMOTEPROC_H
|
|
+
|
|
+#include <linux/remoteproc.h>
|
|
+#include <linux/tee_drv.h>
|
|
+
|
|
+/**
|
|
+ * struct tee_rproc - TEE remoteproc structure
|
|
+ * @node: Reference in list
|
|
+ * @rproc: Remoteproc reference
|
|
+ * @parent: Parent device
|
|
+ * @fw_id: Identifier of the target firmware
|
|
+ * @session_id: TEE session identifier
|
|
+ * @rsc_va: Resource table virtual address.
|
|
+ */
|
|
+struct tee_rproc {
|
|
+ struct list_head node;
|
|
+
|
|
+ struct rproc *rproc;
|
|
+ struct device *parent;
|
|
+ u32 fw_id;
|
|
+ u32 session_id;
|
|
+ void *rsc_va;
|
|
+};
|
|
+
|
|
+#if IS_ENABLED(CONFIG_TEE_REMOTEPROC)
|
|
+
|
|
+struct tee_rproc *tee_rproc_register(struct device *dev, unsigned int fw_id);
|
|
+int tee_rproc_unregister(struct tee_rproc *trproc);
|
|
+
|
|
+int tee_rproc_load_fw(struct tee_rproc *trproc, const struct firmware *fw);
|
|
+int rproc_tee_get_rsc_table(struct tee_rproc *trproc);
|
|
+struct resource_table *tee_rproc_get_loaded_rsc_table(struct tee_rproc *trproc);
|
|
+int tee_rproc_start(struct tee_rproc *trproc);
|
|
+int tee_rproc_stop(struct tee_rproc *trproc);
|
|
+
|
|
+#else
|
|
+
|
|
+static inline struct tee_rproc *tee_rproc_register(struct device *dev,
|
|
+ unsigned int fw_id)
|
|
+{
|
|
+ return ERR_PTR(-ENODEV);
|
|
+}
|
|
+
|
|
+static inline int tee_rproc_unregister(struct tee_rproc *trproc)
|
|
+{
|
|
+ /* This shouldn't be possible */
|
|
+ WARN_ON(1);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static inline int tee_rproc_load_fw(struct tee_rproc *trproc,
|
|
+ const struct firmware *fw)
|
|
+{
|
|
+ /* This shouldn't be possible */
|
|
+ WARN_ON(1);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static inline int tee_rproc_start(struct tee_rproc *trproc)
|
|
+{
|
|
+ /* This shouldn't be possible */
|
|
+ WARN_ON(1);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static inline int tee_rproc_stop(struct tee_rproc *trproc)
|
|
+{
|
|
+ /* This shouldn't be possible */
|
|
+ WARN_ON(1);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static inline int rproc_tee_get_rsc_table(struct tee_rproc *trproc)
|
|
+{
|
|
+ /* This shouldn't be possible */
|
|
+ WARN_ON(1);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static inline struct resource_table *
|
|
+ tee_rproc_get_loaded_rsc_table(struct tee_rproc *trproc)
|
|
+{
|
|
+ /* This shouldn't be possible */
|
|
+ WARN_ON(1);
|
|
+
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+#endif /* CONFIG_TEE_REMOTEPROC */
|
|
+#endif /* TEE_REMOTEPROC_H */
|
|
diff --git a/include/uapi/linux/rpmsg.h b/include/uapi/linux/rpmsg.h
|
|
index f5ca8740f3fb..1637e68177d9 100644
|
|
--- a/include/uapi/linux/rpmsg.h
|
|
+++ b/include/uapi/linux/rpmsg.h
|
|
@@ -33,4 +33,14 @@ struct rpmsg_endpoint_info {
|
|
*/
|
|
#define RPMSG_DESTROY_EPT_IOCTL _IO(0xb5, 0x2)
|
|
|
|
+/**
|
|
+ * Instantiate a new local rpmsg service device.
|
|
+ */
|
|
+#define RPMSG_CREATE_DEV_IOCTL _IOW(0xb5, 0x3, struct rpmsg_endpoint_info)
|
|
+
|
|
+/**
|
|
+ * Release a local rpmsg device.
|
|
+ */
|
|
+#define RPMSG_RELEASE_DEV_IOCTL _IOW(0xb5, 0x4, struct rpmsg_endpoint_info)
|
|
+
|
|
#endif
|
|
diff --git a/include/uapi/linux/tee.h b/include/uapi/linux/tee.h
|
|
index 25a6c534beb1..157ec3dbd8b8 100644
|
|
--- a/include/uapi/linux/tee.h
|
|
+++ b/include/uapi/linux/tee.h
|
|
@@ -52,6 +52,7 @@
|
|
#define TEE_GEN_CAP_PRIVILEGED (1 << 1)/* Privileged device (for supplicant) */
|
|
#define TEE_GEN_CAP_REG_MEM (1 << 2)/* Supports registering shared memory */
|
|
#define TEE_GEN_CAP_MEMREF_NULL (1 << 3)/* NULL MemRef support */
|
|
+#define TEE_GEN_CAP_OCALL (1 << 30)/* Supports calls from TA to CA */
|
|
|
|
#define TEE_MEMREF_NULL (__u64)(-1) /* NULL MemRef Buffer */
|
|
|
|
@@ -162,9 +163,14 @@ struct tee_ioctl_buf_data {
|
|
/* Meta parameter carrying extra information about the message. */
|
|
#define TEE_IOCTL_PARAM_ATTR_META 0x100
|
|
|
|
+/* Parameter carrying information about an OCALL reply or request. */
|
|
+#define TEE_IOCTL_PARAM_ATTR_OCALL 0x200
|
|
+
|
|
/* Mask of all known attr bits */
|
|
#define TEE_IOCTL_PARAM_ATTR_MASK \
|
|
- (TEE_IOCTL_PARAM_ATTR_TYPE_MASK | TEE_IOCTL_PARAM_ATTR_META)
|
|
+ (TEE_IOCTL_PARAM_ATTR_TYPE_MASK | \
|
|
+ TEE_IOCTL_PARAM_ATTR_META | \
|
|
+ TEE_IOCTL_PARAM_ATTR_OCALL)
|
|
|
|
/*
|
|
* Matches TEEC_LOGIN_* in GP TEE Client API
|
|
@@ -257,6 +263,34 @@ struct tee_ioctl_open_session_arg {
|
|
#define TEE_IOC_OPEN_SESSION _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 2, \
|
|
struct tee_ioctl_buf_data)
|
|
|
|
+/*
|
|
+ * Command sent to the CA to execute an OCALL by Id.
|
|
+ *
|
|
+ * [any] param[0..3].u.* carry OCALL parameters
|
|
+ */
|
|
+#define TEE_IOCTL_OCALL_CMD_INVOKE 3
|
|
+
|
|
+/*
|
|
+ * Join the Id of the function that the TEE Client API must execute on behalf of
|
|
+ * the CA with the Id of the command that the CA must execute
|
|
+ *
|
|
+ * As an example, TEE_IOCTL_OCALL_MAKE_PAIR(TEE_IOCTL_OCALL_CMD_INVOKE, 10)
|
|
+ * means that the Client API must forward a function invocation to a CA-provided
|
|
+ * handler, and the handler must execute command Id '10', whose meaning is up to
|
|
+ * the user-defined contract between the CA & TA.
|
|
+ */
|
|
+#define TEE_IOCTL_OCALL_MAKE_PAIR(func, cmd) \
|
|
+ (((__u64)(func) << 32) | (__u32)(cmd))
|
|
+
|
|
+/*
|
|
+ * Get the Id of the function that the TEE Client API must execute on behalf of
|
|
+ * the CA
|
|
+ */
|
|
+#define TEE_IOCTL_OCALL_GET_FUNC(x) ((__u32)((x) >> 32))
|
|
+
|
|
+/* Get the Id of the command that the CA must execute */
|
|
+#define TEE_IOCTL_OCALL_GET_CMD(x) ((__u32)(x))
|
|
+
|
|
/**
|
|
* struct tee_ioctl_invoke_func_arg - Invokes a function in a Trusted
|
|
* Application
|
|
--
|
|
2.17.1
|
|
|