summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorrthavti <quic_rthavti@quicinc.com>2023-04-17 18:32:57 +0530
committerrthavti <quic_rthavti@quicinc.com>2023-04-17 18:33:29 +0530
commit841473f521aca51737b626b99e2050e4c67797f7 (patch)
treec3430349ca656cfa0bb7f064050980efdb08667a
parent64c26ebb8f9c6be81f47cec33a7056dde0d92203 (diff)
parentf7f3c7e45eed6badd77f8e427545805b3cf60307 (diff)
downloadsecuremsm-841473f521aca51737b626b99e2050e4c67797f7.tar.gz
Merge commit 'f7f3c7e45eed6badd77f8e427545805b3cf60307' into sec-kernel.lnx.13.1.r9-rel
Change-Id: I29c909a00b0f403862ad0f7d02eb1cbd639032e9
-rw-r--r--config/sec-kernel_defconfig.conf1
-rw-r--r--config/sec-kernel_defconfig_qseecom.conf1
-rw-r--r--qseecom/qseecom.c302
-rw-r--r--smcinvoke/smcinvoke.c203
-rw-r--r--smcinvoke/smcinvoke.h2
-rw-r--r--smcinvoke/smcinvoke_kernel.c2
-rw-r--r--tz_log/tz_log.c22
7 files changed, 410 insertions, 123 deletions
diff --git a/config/sec-kernel_defconfig.conf b/config/sec-kernel_defconfig.conf
index db6cd22..97498c8 100644
--- a/config/sec-kernel_defconfig.conf
+++ b/config/sec-kernel_defconfig.conf
@@ -3,3 +3,4 @@ export CONFIG_CRYPTO_DEV_QCEDEV=m
export CONFIG_CRYPTO_DEV_QCRYPTO=m
export CONFIG_HDCP_QSEECOM=m
export CONFIG_HW_RANDOM_MSM_LEGACY=m
+export CONFIG_QSEECOM_PROXY=m
diff --git a/config/sec-kernel_defconfig_qseecom.conf b/config/sec-kernel_defconfig_qseecom.conf
index ec586b6..7056667 100644
--- a/config/sec-kernel_defconfig_qseecom.conf
+++ b/config/sec-kernel_defconfig_qseecom.conf
@@ -1 +1,2 @@
export CONFIG_QSEECOM=m
+export CONFIG_QTI_CRYPTO_FDE=m
diff --git a/qseecom/qseecom.c b/qseecom/qseecom.c
index 7f16cd0..7cb9754 100644
--- a/qseecom/qseecom.c
+++ b/qseecom/qseecom.c
@@ -46,7 +46,11 @@
#include <linux/of_reserved_mem.h>
#include <linux/qtee_shmbridge.h>
#include <linux/mem-buf.h>
+#if IS_ENABLED(CONFIG_QTI_CRYPTO_FDE)
+#include <linux/crypto-qti-common.h>
+#else
#include "ice.h"
+#endif
#if IS_ENABLED(CONFIG_QSEECOM_PROXY)
#include <linux/qseecom_kernel.h>
#include "misc/qseecom_priv.h"
@@ -124,8 +128,10 @@
err = copy_from_user((dst),\
(void const __user *)(src),\
(size));\
- else\
+ else {\
memmove((dst), (src), (size));\
+ err = 0;\
+ }\
} while (0)
#define K_COPY_TO_USER(err, dst, src, size) \
@@ -133,8 +139,10 @@
if(!(is_compat_task()))\
err = copy_to_user((void __user *)(dst),\
(src), (size));\
- else\
+ else {\
memmove((dst), (src), (size));\
+ err = 0;\
+ }\
} while (0)
enum qseecom_clk_definitions {
@@ -5337,42 +5345,6 @@ static int __qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
return ret;
}
-#if IS_ENABLED(CONFIG_QSEECOM_PROXY)
-const static struct qseecom_drv_ops qseecom_driver_ops = {
- .qseecom_send_command = __qseecom_send_command,
- .qseecom_start_app = __qseecom_start_app,
- .qseecom_shutdown_app = __qseecom_shutdown_app,
-};
-
-int get_qseecom_kernel_fun_ops(void)
-{
- return provide_qseecom_kernel_fun_ops(&qseecom_driver_ops);
-}
-
-#else
-
-int qseecom_start_app(struct qseecom_handle **handle,
- char *app_name, uint32_t size)
-{
- return __qseecom_start_app(handle, app_name, size);
-}
-EXPORT_SYMBOL(qseecom_start_app);
-
-int qseecom_shutdown_app(struct qseecom_handle **handle)
-{
- return __qseecom_shutdown_app(handle);
-}
-EXPORT_SYMBOL(qseecom_shutdown_app);
-
-int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
- uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len)
-{
- return __qseecom_send_command(handle, send_buf, sbuf_len,
- resp_buf, rbuf_len);
-}
-EXPORT_SYMBOL(qseecom_send_command);
-#endif
-
int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high)
{
int ret = 0;
@@ -5536,10 +5508,10 @@ static int __qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
{
struct qseecom_send_modfd_listener_resp resp;
struct qseecom_registered_listener_list *this_lstnr = NULL;
- int err = 0;
+ int ret = 0;
- K_COPY_FROM_USER(err, &resp, argp, sizeof(resp));
- if(err) {
+ K_COPY_FROM_USER(ret, &resp, argp, sizeof(resp));
+ if(ret) {
pr_err("copy_from_user failed\n");
return -EINVAL;
}
@@ -5580,16 +5552,16 @@ static int qseecom_get_qseos_version(struct qseecom_dev_handle *data,
void __user *argp)
{
struct qseecom_qseos_version_req req;
- int err = 0;
+ int ret = 0;
- K_COPY_FROM_USER(err, &req, argp, sizeof(req));
- if(err) {
+ K_COPY_FROM_USER(ret, &req, argp, sizeof(req));
+ if(ret) {
pr_err("copy_from_user failed\n");
return -EINVAL;
}
req.qseos_version = qseecom.qseos_version;
- K_COPY_TO_USER(err, argp, &req, sizeof(req));
- if(err) {
+ K_COPY_TO_USER(ret, argp, &req, sizeof(req));
+ if(ret) {
pr_err("copy_to_user failed\n");
return -EINVAL;
}
@@ -5881,7 +5853,7 @@ static int qseecom_load_external_elf(struct qseecom_dev_handle *data,
/* Copy the relevant information needed for loading the image */
K_COPY_FROM_USER(ret, &load_img_req, argp,
- sizeof(struct qseecom_load_img_req));
+ sizeof(struct qseecom_load_img_req));
if(ret) {
pr_err("copy_from_user failed\n");
return -EFAULT;
@@ -6466,10 +6438,17 @@ static int qseecom_enable_ice_setup(int usage)
int ret = 0;
if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
+#if IS_ENABLED(CONFIG_QTI_CRYPTO_FDE)
+ ret = crypto_qti_ice_setup_ice_hw("ufs", true);
+#else
ret = qcom_ice_setup_ice_hw("ufs", true);
+#endif
else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
+#if IS_ENABLED(CONFIG_QTI_CRYPTO_FDE)
+ crypto_qti_ice_setup_ice_hw("sdcc", true);
+#else
ret = qcom_ice_setup_ice_hw("sdcc", true);
-
+#endif
return ret;
}
@@ -6478,10 +6457,17 @@ static int qseecom_disable_ice_setup(int usage)
int ret = 0;
if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
+#if IS_ENABLED(CONFIG_QTI_CRYPTO_FDE)
+ crypto_qti_ice_setup_ice_hw("ufs", false);
+#else
ret = qcom_ice_setup_ice_hw("ufs", false);
+#endif
else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
+#if IS_ENABLED(CONFIG_QTI_CRYPTO_FDE)
+ crypto_qti_ice_setup_ice_hw("sdcc", false);
+#else
ret = qcom_ice_setup_ice_hw("sdcc", false);
-
+#endif
return ret;
}
@@ -6523,6 +6509,169 @@ static int qseecom_get_ce_hw_instance(uint32_t unit, uint32_t usage)
return pce_info_use->num_ce_pipe_entries;
}
+#if IS_ENABLED(CONFIG_QTI_CRYPTO_FDE) && IS_ENABLED(CONFIG_QSEECOM_PROXY)
+static int __qseecom_create_key_in_slot(uint8_t usage_code, uint8_t key_slot, const uint8_t *key_id,
+ const uint8_t *inhash32)
+{
+ int i;
+ uint32_t *ce_hw = NULL;
+ uint32_t pipe = 0;
+ int ret = 0;
+ uint32_t flags = 0;
+ struct qseecom_create_key_req create_key_req;
+ struct qseecom_key_generate_ireq generate_key_ireq;
+ struct qseecom_key_select_ireq set_key_ireq;
+ uint32_t entries = 0;
+ bool new_key_generated = false;
+ static struct qseecom_dev_handle local_handle = {0};
+ static struct qseecom_dev_handle *data = &local_handle;
+
+ create_key_req.usage = usage_code;
+ memset((void *)create_key_req.hash32, 0, QSEECOM_HASH_SIZE);
+
+ if (create_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
+ create_key_req.usage >= QSEOS_KM_USAGE_MAX) {
+ pr_err("unsupported usage %d\n", create_key_req.usage);
+ ret = -EFAULT;
+ return ret;
+ }
+ if (key_id == NULL) {
+ pr_err("Key ID is NULL\n");
+ ret = -EINVAL;
+ return ret;
+ }
+ entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
+ create_key_req.usage);
+ if (entries <= 0) {
+ pr_err("no ce instance for usage %d instance %d\n",
+ DEFAULT_CE_INFO_UNIT, create_key_req.usage);
+ ret = -EINVAL;
+ return ret;
+ }
+
+ ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
+ if (!ce_hw) {
+ ret = -ENOMEM;
+ return ret;
+ }
+ ret = __qseecom_get_ce_pipe_info(create_key_req.usage, &pipe, &ce_hw,
+ DEFAULT_CE_INFO_UNIT);
+ if (ret) {
+ pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
+ ret = -EINVAL;
+ goto free_buf;
+ }
+
+ if (qseecom.fde_key_size)
+ flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
+ else
+ flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
+
+ if (qseecom.enable_key_wrap_in_ks)
+ flags |= ENABLE_KEY_WRAP_IN_KS;
+
+ generate_key_ireq.flags = flags;
+ generate_key_ireq.qsee_command_id = QSEOS_GENERATE_KEY;
+ memset((void *)generate_key_ireq.key_id,
+ 0, QSEECOM_KEY_ID_SIZE);
+ memset((void *)generate_key_ireq.hash32,
+ 0, QSEECOM_HASH_SIZE);
+ memcpy((void *)generate_key_ireq.key_id, key_id, QSEECOM_KEY_ID_SIZE);
+
+ //Copy inhash if available
+ if (inhash32 != NULL)
+ memcpy((void *)create_key_req.hash32, (void *)inhash32, QSEECOM_HASH_SIZE);
+
+ memcpy((void *)generate_key_ireq.hash32,
+ (void *)create_key_req.hash32,
+ QSEECOM_HASH_SIZE);
+
+ ret = __qseecom_generate_and_save_key(data,
+ create_key_req.usage, &generate_key_ireq);
+
+ if ((ret != 0) && (ret != QSEOS_RESULT_FAIL_KEY_ID_EXISTS)) {
+ pr_err("Failed to generate key on storage: %d\n", ret);
+ goto free_buf;
+ }
+ if (ret == 0) {
+ //New key was created
+ new_key_generated = true;
+ }
+
+ for (i = 0; i < entries; i++) {
+ set_key_ireq.qsee_command_id = QSEOS_SET_KEY;
+ if (create_key_req.usage ==
+ QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
+ set_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
+ set_key_ireq.pipe = key_slot;
+
+ } else if (create_key_req.usage ==
+ QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
+ set_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
+ set_key_ireq.pipe = key_slot;
+
+ } else {
+ set_key_ireq.ce = ce_hw[i];
+ set_key_ireq.pipe = pipe;
+ }
+ set_key_ireq.flags = flags;
+
+ /* set both PIPE_ENC and PIPE_ENC_XTS*/
+ set_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
+ memset((void *)set_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
+ memset((void *)set_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
+
+ memcpy((void *)set_key_ireq.key_id, key_id, QSEECOM_KEY_ID_SIZE);
+ memcpy((void *)set_key_ireq.hash32,
+ (void *)create_key_req.hash32,
+ QSEECOM_HASH_SIZE);
+ /*
+ * It will return false if it is GPCE based crypto instance or
+ * ICE is setup properly
+ */
+ ret = qseecom_enable_ice_setup(create_key_req.usage);
+ if (ret)
+ goto free_buf;
+
+ do {
+ ret = __qseecom_set_clear_ce_key(data,
+ create_key_req.usage,
+ &set_key_ireq);
+ /*
+ * wait a little before calling scm again to let other
+ * processes run
+ */
+ if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
+ msleep(50);
+
+ } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
+
+ qseecom_disable_ice_setup(create_key_req.usage);
+
+ if (ret) {
+ pr_err("Failed to create key: pipe %d, ce %d: %d\n",
+ pipe, ce_hw[i], ret);
+ goto free_buf;
+ } else {
+ pr_err("Set the key successfully\n");
+ if ((create_key_req.usage ==
+ QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) ||
+ (create_key_req.usage ==
+ QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION))
+ goto free_buf;
+ }
+ }
+
+free_buf:
+ kfree_sensitive(ce_hw);
+ if ((ret == 0) && (new_key_generated)) {
+ //Success , key already exists code
+ ret = QSEOS_RESULT_FAIL_KEY_ID_EXISTS;
+ }
+ return ret;
+}
+#endif //CONFIG_QTI_CRYPTO_FDE
+
static int qseecom_create_key(struct qseecom_dev_handle *data,
void __user *argp)
{
@@ -6656,7 +6805,7 @@ static int qseecom_create_key(struct qseecom_dev_handle *data,
pr_err("Set the key successfully\n");
if ((create_key_req.usage ==
QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) ||
- (create_key_req.usage ==
+ (create_key_req.usage ==
QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION))
goto free_buf;
}
@@ -7608,7 +7757,7 @@ static int qseecom_qteec_request_cancellation(struct qseecom_dev_handle *data,
int ret = 0;
K_COPY_FROM_USER(ret, &req, argp, sizeof(struct qseecom_qteec_modfd_req));
- if(ret) {
+ if(ret) {
pr_err("copy_from_user failed\n");
return ret;
}
@@ -8329,14 +8478,10 @@ long qseecom_ioctl(struct file *file,
break;
}
case QSEECOM_IOCTL_SET_ICE_INFO: {
- struct qseecom_ice_data_t ice_data;
-
- ret = copy_from_user(&ice_data, argp, sizeof(ice_data));
- if (ret) {
- pr_err("copy_from_user failed\n");
- return -EFAULT;
- }
- qcom_ice_set_fde_flag(ice_data.flag);
+ //Return success for backwards compatibility
+ //This call is redundant and not required anymore
+ pr_info("SET_ICE_INFO is reduntant call,return success for backwards compatibility\n");
+ ret = 0;
break;
}
case QSEECOM_IOCTL_FBE_CLEAR_KEY: {
@@ -9623,6 +9768,45 @@ static void qseecom_deregister_shmbridge(void)
qtee_shmbridge_deregister(qseecom.ta_bridge_handle);
}
+#if IS_ENABLED(CONFIG_QSEECOM_PROXY)
+const static struct qseecom_drv_ops qseecom_driver_ops = {
+ .qseecom_send_command = __qseecom_send_command,
+ .qseecom_start_app = __qseecom_start_app,
+ .qseecom_shutdown_app = __qseecom_shutdown_app,
+#if IS_ENABLED(CONFIG_QTI_CRYPTO_FDE)
+ .qseecom_create_key_in_slot = __qseecom_create_key_in_slot,
+#endif
+};
+
+int get_qseecom_kernel_fun_ops(void)
+{
+ return provide_qseecom_kernel_fun_ops(&qseecom_driver_ops);
+}
+
+#else
+
+int qseecom_start_app(struct qseecom_handle **handle,
+ char *app_name, uint32_t size)
+{
+ return __qseecom_start_app(handle, app_name, size);
+}
+EXPORT_SYMBOL(qseecom_start_app);
+
+int qseecom_shutdown_app(struct qseecom_handle **handle)
+{
+ return __qseecom_shutdown_app(handle);
+}
+EXPORT_SYMBOL(qseecom_shutdown_app);
+
+int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
+ uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len)
+{
+ return __qseecom_send_command(handle, send_buf, sbuf_len,
+ resp_buf, rbuf_len);
+}
+EXPORT_SYMBOL(qseecom_send_command);
+#endif
+
static int qseecom_probe(struct platform_device *pdev)
{
int rc;
diff --git a/smcinvoke/smcinvoke.c b/smcinvoke/smcinvoke.c
index f4f13ac..2babd80 100644
--- a/smcinvoke/smcinvoke.c
+++ b/smcinvoke/smcinvoke.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#define pr_fmt(fmt) "smcinvoke: %s: " fmt, __func__
@@ -26,6 +26,7 @@
#include <linux/of_platform.h>
#include <linux/firmware.h>
#include <linux/qcom_scm.h>
+#include <linux/freezer.h>
#include <asm/cacheflush.h>
#include <soc/qcom/qseecomi.h>
#include <linux/qtee_shmbridge.h>
@@ -133,6 +134,8 @@
#define TZHANDLE_GET_SERVER(h) ((uint16_t)((h) & 0xFFFF))
#define TZHANDLE_GET_OBJID(h) (((h) >> 16) & 0x7FFF)
#define TZHANDLE_MAKE_LOCAL(s, o) (((0x8000 | (o)) << 16) | s)
+#define SET_BIT(s,b) (s | (1 << b))
+#define UNSET_BIT(s,b) (s & (~ (1 << b)))
#define TZHANDLE_IS_NULL(h) ((h) == SMCINVOKE_TZ_OBJ_NULL)
#define TZHANDLE_IS_LOCAL(h) ((h) & 0x80000000)
@@ -257,6 +260,7 @@ struct smcinvoke_server_info {
DECLARE_HASHTABLE(responses_table, 4);
struct hlist_node hash;
struct list_head pending_cbobjs;
+ uint8_t is_server_suspended;
};
struct smcinvoke_cbobj {
@@ -323,7 +327,8 @@ struct smcinvoke_worker_thread {
static struct smcinvoke_worker_thread smcinvoke[MAX_THREAD_NUMBER];
static const char thread_name[MAX_THREAD_NUMBER][MAX_CHAR_NAME] = {
"smcinvoke_shmbridge_postprocess", "smcinvoke_object_postprocess", "smcinvoke_adci_thread"};
-static struct Object adci_clientEnv = Object_NULL;
+static struct Object adci_rootEnv = Object_NULL;
+extern int get_root_obj(struct Object *rootObj);
static int prepare_send_scm_msg(const uint8_t *in_buf, phys_addr_t in_paddr,
size_t in_buf_len,
@@ -484,17 +489,45 @@ static void smcinvoke_shmbridge_post_process(void)
} while (1);
}
-static int smcinvoke_object_post_process(void)
+static int smcinvoke_release_tz_object(struct qtee_shm *in_shm, struct qtee_shm *out_shm,
+ uint32_t tzhandle, uint32_t context_type)
{
- struct smcinvoke_object_release_pending_list *entry = NULL;
- struct list_head *pos;
int ret = 0;
bool release_handles;
- uint32_t context_type;
uint8_t *in_buf = NULL;
- uint8_t *out_buf = NULL;
- struct smcinvoke_cmd_req req = {0};
+ uint8_t *out_buf = NULL;
struct smcinvoke_msg_hdr hdr = {0};
+ struct smcinvoke_cmd_req req = {0};
+
+ in_buf = in_shm->vaddr;
+ out_buf = out_shm->vaddr;
+ hdr.tzhandle = tzhandle;
+ hdr.op = OBJECT_OP_RELEASE;
+ hdr.counts = 0;
+ *(struct smcinvoke_msg_hdr *)in_buf = hdr;
+
+ ret = prepare_send_scm_msg(in_buf, in_shm->paddr,
+ SMCINVOKE_TZ_MIN_BUF_SIZE, out_buf, out_shm->paddr,
+ SMCINVOKE_TZ_MIN_BUF_SIZE, &req, NULL,
+ &release_handles, context_type, in_shm, out_shm);
+ process_piggyback_data(out_buf, SMCINVOKE_TZ_MIN_BUF_SIZE);
+ if (ret) {
+ pr_err("Failed to release object(0x%x), ret:%d\n",
+ hdr.tzhandle, ret);
+ } else {
+ pr_debug("Released object(0x%x) successfully.\n",
+ hdr.tzhandle);
+ }
+
+ return ret;
+}
+
+
+static int smcinvoke_object_post_process(void)
+{
+ struct smcinvoke_object_release_pending_list *entry = NULL;
+ struct list_head *pos;
+ int ret = 0;
struct qtee_shm in_shm = {0}, out_shm = {0};
ret = qtee_shmbridge_allocate_shm(SMCINVOKE_TZ_MIN_BUF_SIZE, &in_shm);
@@ -519,37 +552,19 @@ static int smcinvoke_object_post_process(void)
}
pos = g_object_postprocess.next;
entry = list_entry(pos, struct smcinvoke_object_release_pending_list, list);
- if (entry) {
- in_buf = in_shm.vaddr;
- out_buf = out_shm.vaddr;
- hdr.tzhandle = entry->data.tzhandle;
- hdr.op = OBJECT_OP_RELEASE;
- hdr.counts = 0;
- *(struct smcinvoke_msg_hdr *)in_buf = hdr;
- context_type = entry->data.context_type;
- } else {
- pr_err("entry is NULL, pos:%#llx\n", (uint64_t)pos);
- }
+
list_del(pos);
- kfree_sensitive(entry);
mutex_unlock(&object_postprocess_lock);
if (entry) {
do {
- ret = prepare_send_scm_msg(in_buf, in_shm.paddr,
- SMCINVOKE_TZ_MIN_BUF_SIZE, out_buf, out_shm.paddr,
- SMCINVOKE_TZ_MIN_BUF_SIZE, &req, NULL,
- &release_handles, context_type, &in_shm, &out_shm);
- process_piggyback_data(out_buf, SMCINVOKE_TZ_MIN_BUF_SIZE);
- if (ret) {
- pr_err("Failed to release object(0x%x), ret:%d\n",
- hdr.tzhandle, ret);
- } else {
- pr_debug("Released object(0x%x) successfully.\n",
- hdr.tzhandle);
- }
+ ret = smcinvoke_release_tz_object(&in_shm, &out_shm,
+ entry->data.tzhandle, entry->data.context_type);
} while (-EBUSY == ret);
+ } else {
+ pr_err("entry is NULL, pos:%#llx\n", (uint64_t)pos);
}
+ kfree_sensitive(entry);
} while (1);
out:
@@ -565,18 +580,18 @@ static void smcinvoke_start_adci_thread(void)
int32_t ret = OBJECT_ERROR;
int retry_count = 0;
- ret = get_client_env_object(&adci_clientEnv);
+ ret = get_root_obj(&adci_rootEnv);
if (ret) {
- pr_err("failed to get clientEnv for ADCI invoke thread. ret = %d\n", ret);
+ pr_err("failed to get rootEnv for ADCI invoke thread. ret = %d\n", ret);
/* Marking it Object_NULL in case of failure scenario in order to avoid
- * undefined behavior while releasing garbage adci_clientEnv object.
- */
- adci_clientEnv = Object_NULL;
+ * undefined behavior while relasing garbage adci_rootEnv object. */
+ adci_rootEnv = Object_NULL;
goto out;
}
/* Invoke call to QTEE which should never return if ADCI is supported */
+ pr_debug("Invoking adciAccept method in QTEE\n");
do {
- ret = IClientEnv_adciAccept(adci_clientEnv);
+ ret = IClientEnv_adciAccept(adci_rootEnv);
if (ret == OBJECT_ERROR_BUSY) {
pr_err("Secure side is busy,will retry after 5 ms, retry_count = %d",retry_count);
msleep(SMCINVOKE_INTERFACE_BUSY_WAIT_MS);
@@ -590,7 +605,7 @@ static void smcinvoke_start_adci_thread(void)
out:
/* Control should reach to this point only if ADCI feature is not supported by QTEE
(or) ADCI thread held in QTEE is released. */
- Object_ASSIGN_NULL(adci_clientEnv);
+ Object_ASSIGN_NULL(adci_rootEnv);
}
static void __wakeup_postprocess_kthread(struct smcinvoke_worker_thread *smcinvoke)
@@ -694,18 +709,19 @@ static void smcinvoke_destroy_kthreads(void)
int32_t ret = OBJECT_ERROR;
int retry_count = 0;
- if(!Object_isNull(adci_clientEnv)) {
+ if (!Object_isNull(adci_rootEnv)) {
+ pr_debug("Invoking adciShutdown method in QTEE\n");
do {
- ret = IClientEnv_adciShutdown(adci_clientEnv);
+ ret = IClientEnv_adciShutdown(adci_rootEnv);
if (ret == OBJECT_ERROR_BUSY) {
pr_err("Secure side is busy,will retry after 5 ms, retry_count = %d",retry_count);
msleep(SMCINVOKE_INTERFACE_BUSY_WAIT_MS);
}
} while ((ret == OBJECT_ERROR_BUSY) && (retry_count++ < SMCINVOKE_INTERFACE_MAX_RETRY));
- if(OBJECT_isERROR(ret)) {
+ if (OBJECT_isERROR(ret)) {
pr_err("adciShutdown in QTEE failed with error = %d\n", ret);
}
- Object_ASSIGN_NULL(adci_clientEnv);
+ Object_ASSIGN_NULL(adci_rootEnv);
}
for (i = 0; i < MAX_THREAD_NUMBER; i++) {
@@ -1543,13 +1559,15 @@ static void process_tzcb_req(void *buf, size_t buf_len, struct file **arr_filp)
timeout_jiff);
}
if (ret == 0) {
- pr_err("CBobj timed out cb-tzhandle:%d, retry:%d, op:%d counts :%d\n",
- cb_req->hdr.tzhandle, cbobj_retries,
+ if (srvr_info->is_server_suspended == 0) {
+ pr_err("CBobj timed out waiting on cbtxn :%d,cb-tzhandle:%d, retry:%d, op:%d counts :%d\n",
+ cb_txn->txn_id,cb_req->hdr.tzhandle, cbobj_retries,
cb_req->hdr.op, cb_req->hdr.counts);
pr_err("CBobj %d timedout pid %x,tid %x, srvr state=%d, srvr id:%u\n",
cb_req->hdr.tzhandle, current->pid,
current->tgid, srvr_info->state,
srvr_info->server_id);
+ }
} else {
/* wait_event returned due to a signal */
if (srvr_info->state != SMCINVOKE_SERVER_STATE_DEFUNCT &&
@@ -1559,7 +1577,16 @@ static void process_tzcb_req(void *buf, size_t buf_len, struct file **arr_filp)
break;
}
}
- cbobj_retries++;
+ /*
+ * If bit corresponding to any accept thread is set, invoke threads
+ * should wait infinitely for the accept thread to come back with
+ * response.
+ */
+ if (srvr_info->is_server_suspended > 0) {
+ cbobj_retries = 0;
+ } else {
+ cbobj_retries++;
+ }
}
out:
@@ -2115,6 +2142,7 @@ static long process_server_req(struct file *filp, unsigned int cmd,
hash_init(server_info->reqs_table);
hash_init(server_info->responses_table);
INIT_LIST_HEAD(&server_info->pending_cbobjs);
+ server_info->is_server_suspended = 0;
mutex_lock(&g_smcinvoke_lock);
@@ -2178,6 +2206,9 @@ static long process_accept_req(struct file *filp, unsigned int cmd,
if (server_info->state == SMCINVOKE_SERVER_STATE_DEFUNCT)
server_info->state = 0;
+ server_info->is_server_suspended = UNSET_BIT(server_info->is_server_suspended,
+ (current->pid)%DEFAULT_CB_OBJ_THREAD_CNT);
+
mutex_unlock(&g_smcinvoke_lock);
/* First check if it has response otherwise wait for req */
@@ -2193,14 +2224,13 @@ static long process_accept_req(struct file *filp, unsigned int cmd,
* invoke thread died while server was processing cb req.
* if invoke thread dies, it would remove req from Q. So
* no matching cb_txn would be on Q and hence NULL cb_txn.
- * In this case, we want this thread to come back and start
- * waiting for new cb requests, hence return EAGAIN here
+ * In this case, we want this thread to start waiting
+ * new cb requests.
*/
if (!cb_txn) {
pr_err("%s txn %d either invalid or removed from Q\n",
__func__, user_args.txn_id);
- ret = -EAGAIN;
- goto out;
+ goto start_waiting_for_requests;
}
ret = marshal_out_tzcb_req(&user_args, cb_txn,
cb_txn->filp_to_release);
@@ -2223,6 +2253,7 @@ static long process_accept_req(struct file *filp, unsigned int cmd,
if (ret && OBJECT_COUNTS_NUM_OO(user_args.counts))
goto out;
}
+start_waiting_for_requests:
/*
* Once response has been delivered, thread will wait for another
* callback req to process.
@@ -2240,7 +2271,26 @@ static long process_accept_req(struct file *filp, unsigned int cmd,
* using server_info and would crash. So dont do that.
*/
mutex_lock(&g_smcinvoke_lock);
- server_info->state = SMCINVOKE_SERVER_STATE_DEFUNCT;
+
+ if(freezing(current)) {
+ pr_err("Server id :%d interrupted probaby due to suspend, pid:%d",
+ server_info->server_id, current->pid);
+ /*
+ * Each accept thread is identified by bits ranging from
+ * 0 to DEFAULT_CBOBJ_THREAD_CNT-1. When an accept thread is
+ * interrupted by a signal other than SIGUSR1,SIGKILL,SIGTERM,
+ * set the corresponding bit of accept thread, indicating that
+ * current accept thread's state to be "suspended"/ or something
+ * that needs infinite timeout for invoke thread.
+ */
+ server_info->is_server_suspended =
+ SET_BIT(server_info->is_server_suspended,
+ (current->pid)%DEFAULT_CB_OBJ_THREAD_CNT);
+ } else {
+ pr_err("Setting pid:%d, server id : %d state to defunct",
+ current->pid, server_info->server_id);
+ server_info->state = SMCINVOKE_SERVER_STATE_DEFUNCT;
+ }
mutex_unlock(&g_smcinvoke_lock);
wake_up_interruptible(&server_info->rsp_wait_q);
goto out;
@@ -2678,6 +2728,7 @@ int smcinvoke_release_filp(struct file *filp)
struct smcinvoke_file_data *file_data = filp->private_data;
uint32_t tzhandle = 0;
struct smcinvoke_object_release_pending_list *entry = NULL;
+ struct qtee_shm in_shm = {0}, out_shm = {0};
trace_smcinvoke_release_filp(current->files, filp,
file_count(filp), file_data->context_type);
@@ -2689,29 +2740,55 @@ int smcinvoke_release_filp(struct file *filp)
tzhandle = file_data->tzhandle;
/* Root object is special in sense it is indestructible */
- if (!tzhandle || tzhandle == SMCINVOKE_TZ_ROOT_OBJ)
+ if (!tzhandle || tzhandle == SMCINVOKE_TZ_ROOT_OBJ) {
+ if (!tzhandle)
+ pr_err("tzhandle not valid in object release\n");
goto out;
+ }
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry) {
- ret = -ENOMEM;
+ ret = qtee_shmbridge_allocate_shm(SMCINVOKE_TZ_MIN_BUF_SIZE, &in_shm);
+ if (ret) {
+ pr_err("shmbridge alloc failed for in msg in object release"
+ "with ret %d\n", ret);
goto out;
}
- entry->data.tzhandle = tzhandle;
- entry->data.context_type = file_data->context_type;
- mutex_lock(&object_postprocess_lock);
- list_add_tail(&entry->list, &g_object_postprocess);
- mutex_unlock(&object_postprocess_lock);
- pr_debug("Object release list: added a handle:0x%lx\n", tzhandle);
- __wakeup_postprocess_kthread(&smcinvoke[OBJECT_WORKER_THREAD]);
+ ret = qtee_shmbridge_allocate_shm(SMCINVOKE_TZ_MIN_BUF_SIZE, &out_shm);
+ if (ret) {
+ pr_err("shmbridge alloc failed for out msg in object release"
+ "with ret:%d\n", ret);
+ goto out;
+ }
+
+ ret = smcinvoke_release_tz_object(&in_shm, &out_shm,
+ tzhandle, file_data->context_type);
+
+ if (-EBUSY == ret) {
+ pr_debug("failed to release handle in sync adding to list\n");
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = 0;
+ entry->data.tzhandle = tzhandle;
+ entry->data.context_type = file_data->context_type;
+ mutex_lock(&object_postprocess_lock);
+ list_add_tail(&entry->list, &g_object_postprocess);
+ mutex_unlock(&object_postprocess_lock);
+ pr_debug("Object release list: added a handle:0x%lx\n", tzhandle);
+ __wakeup_postprocess_kthread(&smcinvoke[OBJECT_WORKER_THREAD]);
+ }
out:
+ qtee_shmbridge_free_shm(&in_shm);
+ qtee_shmbridge_free_shm(&out_shm);
kfree(filp->private_data);
filp->private_data = NULL;
+ if (ret != 0)
+ pr_err ("Object release failed with ret %d\n", ret);
return ret;
-
}
int smcinvoke_release_from_kernel_client(int fd)
diff --git a/smcinvoke/smcinvoke.h b/smcinvoke/smcinvoke.h
index 7c3ff1b..edfb45e 100644
--- a/smcinvoke/smcinvoke.h
+++ b/smcinvoke/smcinvoke.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _UAPI_SMCINVOKE_H_
#define _UAPI_SMCINVOKE_H_
@@ -9,6 +10,7 @@
#include <linux/ioctl.h>
#define SMCINVOKE_USERSPACE_OBJ_NULL -1
+#define DEFAULT_CB_OBJ_THREAD_CNT 4
struct smcinvoke_buf {
__u64 addr;
diff --git a/smcinvoke/smcinvoke_kernel.c b/smcinvoke/smcinvoke_kernel.c
index e6e58d6..805c48b 100644
--- a/smcinvoke/smcinvoke_kernel.c
+++ b/smcinvoke/smcinvoke_kernel.c
@@ -272,7 +272,7 @@ exit:
return ret | req.result;
}
-static int get_root_obj(struct Object *rootObj)
+int get_root_obj(struct Object *rootObj)
{
int ret = 0;
int root_fd = -1;
diff --git a/tz_log/tz_log.c b/tz_log/tz_log.c
index 411104d..ef87a32 100644
--- a/tz_log/tz_log.c
+++ b/tz_log/tz_log.c
@@ -1460,6 +1460,20 @@ static void tzdbg_free_encrypted_log_buf(struct platform_device *pdev)
enc_qseelog_info.vaddr, enc_qseelog_info.paddr);
}
+static bool is_hyp_dir(int tzdbg_stat_type)
+{
+ switch(tzdbg_stat_type)
+ {
+ case TZDBG_HYP_GENERAL:
+ case TZDBG_HYP_LOG:
+ case TZDBG_RM_LOG:
+ return true;
+ default:
+ return false;
+ }
+ return false;
+}
+
static int tzdbg_fs_init(struct platform_device *pdev)
{
int rc = 0;
@@ -1475,6 +1489,14 @@ static int tzdbg_fs_init(struct platform_device *pdev)
for (i = 0; i < TZDBG_STATS_MAX; i++) {
tzdbg.debug_tz[i] = i;
+ /*
+ * If hypervisor is disabled, do not create
+ * hyp_general, hyp_log and rm_log directories,
+ * as accessing them would give segmentation fault
+ */
+ if ((!tzdbg.is_hyplog_enabled) && (is_hyp_dir(i))) {
+ continue;
+ }
dent = proc_create_data(tzdbg.stat[i].name,
0444, dent_dir,
&tzdbg_fops, &tzdbg.debug_tz[i]);