summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBrandon Anderson <brandonand@google.com>2023-04-06 21:38:31 +0000
committerBrandon Anderson <brandonand@google.com>2024-02-15 23:04:03 +0000
commit3b5b0a31866e2393e352d419d6a5537495c2ba36 (patch)
tree026a63d1438869534760ed119f04c854d120de55
parentc9d0d46732fa07b43bb2672834fd809e6a7b5760 (diff)
downloadtrusty-3b5b0a31866e2393e352d419d6a5537495c2ba36.tar.gz
ANDROID: Delay sharing sched memory with Trusty
Sharing memory with Trusty before interrupts are registered will fail. This change allocates the structure in the Linux driver as before and now initializes to default priorities in case Trusty is unabled to use (failure to register share or unsupported by Trusty). With this change, sharing the memory structure with Trusty is delayed until after interrupts are enabled by the call to of_platform_populate() This also separates unregister from free to keep the structure available until threads finish and are no longer accessing. Bug: 276729665 Change-Id: If0c9518251cd7bbf21ef83d5510fb3dd9b345bb2 Signed-off-by: Brandon Anderson <brandonand@google.com>
-rw-r--r--drivers/trusty/trusty-sched-share-api.h6
-rw-r--r--drivers/trusty/trusty-sched-share.c249
-rw-r--r--drivers/trusty/trusty.c35
3 files changed, 158 insertions, 132 deletions
diff --git a/drivers/trusty/trusty-sched-share-api.h b/drivers/trusty/trusty-sched-share-api.h
index 7605067..412704a 100644
--- a/drivers/trusty/trusty-sched-share-api.h
+++ b/drivers/trusty/trusty-sched-share-api.h
@@ -13,8 +13,12 @@
struct trusty_sched_share_state;
-struct trusty_sched_share_state *trusty_register_sched_share(struct device *device);
+int trusty_alloc_sched_share(struct device *device,
+ struct trusty_sched_share_state **state);
+void trusty_register_sched_share(struct device *device,
+ struct trusty_sched_share_state *sched_share_state);
void trusty_unregister_sched_share(struct trusty_sched_share_state *sched_share_state);
+void trusty_free_sched_share(struct trusty_sched_share_state *sched_share_state);
int trusty_get_requested_nice(unsigned int cpu_num, struct trusty_sched_share_state *tcpu_state);
int trusty_set_actual_nice(unsigned int cpu_num, struct trusty_sched_share_state *tcpu_state,
diff --git a/drivers/trusty/trusty-sched-share.c b/drivers/trusty/trusty-sched-share.c
index a85d899..07e6231 100644
--- a/drivers/trusty/trusty-sched-share.c
+++ b/drivers/trusty/trusty-sched-share.c
@@ -31,106 +31,149 @@ struct trusty_sched_share_state {
u32 mem_size;
u32 buf_size;
u32 num_pages;
+ bool is_registered;
+ bool vm_is_shared;
};
-static int
-trusty_sched_share_resources_allocate(struct trusty_sched_share_state *share_state)
+static inline struct trusty_percpu_data *trusty_get_trusty_percpu_data(
+ struct trusty_sched_shared *tsh, int cpu_num)
{
- struct scatterlist *sg;
+ return (struct trusty_percpu_data *)((unsigned char *)tsh +
+ sizeof(struct trusty_sched_shared) +
+ (cpu_num * sizeof(struct trusty_percpu_data)));
+}
+
+static void trusty_sched_share_reclaim_memory(
+ struct trusty_sched_share_state *sched_share_state)
+{
+ int result;
+
+ if (!sched_share_state->vm_is_shared) {
+ dev_warn(sched_share_state->dev,
+ "%s called unexpectedly when vm not shared\n", __func__);
+ return;
+ }
+
+ result = trusty_reclaim_memory(sched_share_state->dev,
+ sched_share_state->sched_shared_mem_id,
+ sched_share_state->sg,
+ sched_share_state->num_pages);
+ if (result != 0) {
+ dev_err(sched_share_state->dev,
+ "trusty_reclaim_memory() failed: ret=%d mem_id=0x%llx\n",
+ result, sched_share_state->sched_shared_mem_id);
+ /*
+ * It is not safe to free this memory if trusty_reclaim_memory()
+ * failed. Leak it in that case.
+ */
+ dev_err(sched_share_state->dev,
+ "WARNING: leaking some allocated resources!!\n");
+ } else {
+ sched_share_state->vm_is_shared = false;
+ }
+}
+
+int trusty_alloc_sched_share(struct device *device,
+ struct trusty_sched_share_state **state)
+{
+ struct trusty_sched_share_state *sched_share_state = NULL;
struct trusty_sched_shared *shared;
- unsigned char *mem;
- trusty_shared_mem_id_t mem_id;
- int result = 0;
- int i;
+ uint sched_share_state_size;
+ unsigned int cpu;
+
+ sched_share_state_size = sizeof(*sched_share_state);
- share_state->mem_size = sizeof(struct trusty_sched_shared) +
+ sched_share_state = kzalloc(sched_share_state_size, GFP_KERNEL);
+ if (!sched_share_state)
+ goto err_sched_state_alloc;
+ sched_share_state->dev = device;
+ sched_share_state->is_registered = false;
+ sched_share_state->vm_is_shared = false;
+
+ sched_share_state->mem_size = sizeof(struct trusty_sched_shared) +
nr_cpu_ids * sizeof(struct trusty_percpu_data);
- share_state->num_pages =
- round_up(share_state->mem_size, PAGE_SIZE) / PAGE_SIZE;
- share_state->buf_size = share_state->num_pages * PAGE_SIZE;
+ sched_share_state->num_pages =
+ round_up(sched_share_state->mem_size, PAGE_SIZE) / PAGE_SIZE;
+ sched_share_state->buf_size = sched_share_state->num_pages * PAGE_SIZE;
- dev_dbg(share_state->dev,
+ dev_dbg(sched_share_state->dev,
"%s: mem_size=%d, num_pages=%d, buf_size=%d", __func__,
- share_state->mem_size, share_state->num_pages,
- share_state->buf_size);
+ sched_share_state->mem_size, sched_share_state->num_pages,
+ sched_share_state->buf_size);
- share_state->sg = kcalloc(share_state->num_pages,
- sizeof(*share_state->sg), GFP_KERNEL);
- if (!share_state->sg) {
- result = ENOMEM;
- goto err_rsrc_alloc_sg;
+ sched_share_state->sched_shared_vm = vzalloc(sched_share_state->buf_size);
+ if (!sched_share_state->sched_shared_vm)
+ goto err_resources_alloc;
+ dev_dbg(sched_share_state->dev, "%s: sched_shared_vm=%p size=%d\n",
+ __func__, sched_share_state->sched_shared_vm, sched_share_state->buf_size);
+
+ shared = (struct trusty_sched_shared *)sched_share_state->sched_shared_vm;
+ shared->cpu_count = nr_cpu_ids;
+ shared->hdr_size = sizeof(struct trusty_sched_shared);
+ shared->percpu_data_size = sizeof(struct trusty_percpu_data);
+
+ for_each_possible_cpu(cpu) {
+ trusty_get_trusty_percpu_data(shared, cpu)->ask_shadow_priority
+ = TRUSTY_SHADOW_PRIORITY_NORMAL;
}
- mem = vzalloc(share_state->buf_size);
- if (!mem) {
+ *state = sched_share_state;
+ return 0;
+
+err_resources_alloc:
+ kfree(sched_share_state);
+err_sched_state_alloc:
+ return -ENOMEM;
+}
+
+void trusty_register_sched_share(struct device *device,
+ struct trusty_sched_share_state *sched_share_state)
+{
+ int result = 0;
+ struct scatterlist *sg;
+ unsigned char *mem = sched_share_state->sched_shared_vm;
+ trusty_shared_mem_id_t mem_id;
+ int i;
+
+ /* allocate and initialize scatterlist */
+ sched_share_state->sg = kcalloc(sched_share_state->num_pages,
+ sizeof(*sched_share_state->sg), GFP_KERNEL);
+ if (!sched_share_state->sg) {
result = -ENOMEM;
- goto err_rsrc_alloc_mem;
+ dev_err(sched_share_state->dev, "%s: failed to alloc sg\n", __func__);
+ goto err_rsrc_alloc_sg;
}
- share_state->sched_shared_vm = mem;
- dev_dbg(share_state->dev, "%s: sched_shared_vm=%p size=%d\n",
- __func__, share_state->sched_shared_vm, share_state->buf_size);
- sg_init_table(share_state->sg, share_state->num_pages);
- for_each_sg(share_state->sg, sg, share_state->num_pages, i) {
+ sg_init_table(sched_share_state->sg, sched_share_state->num_pages);
+ for_each_sg(sched_share_state->sg, sg, sched_share_state->num_pages, i) {
struct page *pg = vmalloc_to_page(mem + (i * PAGE_SIZE));
if (!pg) {
result = -ENOMEM;
+ dev_err(sched_share_state->dev, "%s: failed to map page i= %d\n",
+ __func__, i);
goto err_rsrc_sg_lookup;
}
sg_set_page(sg, pg, PAGE_SIZE, 0);
}
- result = trusty_share_memory(share_state->dev, &mem_id, share_state->sg,
- share_state->num_pages, PAGE_KERNEL);
+ /* share memory with Trusty */
+ result = trusty_share_memory(sched_share_state->dev, &mem_id, sched_share_state->sg,
+ sched_share_state->num_pages, PAGE_KERNEL);
if (result != 0) {
- dev_err(share_state->dev, "trusty_share_memory failed: %d\n",
+ dev_err(sched_share_state->dev, "trusty_share_memory failed: %d\n",
result);
goto err_rsrc_share_mem;
}
- dev_dbg(share_state->dev, "%s: sched_shared_mem_id=0x%llx", __func__,
+ dev_dbg(sched_share_state->dev, "%s: sched_shared_mem_id=0x%llx", __func__,
mem_id);
- share_state->sched_shared_mem_id = mem_id;
-
- shared = (struct trusty_sched_shared *)share_state->sched_shared_vm;
- shared->hdr_size = sizeof(struct trusty_sched_shared);
- shared->percpu_data_size = sizeof(struct trusty_percpu_data);
-
- return result;
-
-err_rsrc_share_mem:
-err_rsrc_sg_lookup:
- vfree(share_state->sched_shared_vm);
-err_rsrc_alloc_mem:
- kfree(share_state->sg);
-err_rsrc_alloc_sg:
- return result;
-}
-
-struct trusty_sched_share_state *trusty_register_sched_share(struct device *device)
-{
- int result = 0;
- struct trusty_sched_share_state *sched_share_state = NULL;
- struct trusty_sched_shared *shared;
- uint sched_share_state_size;
-
- sched_share_state_size = sizeof(*sched_share_state);
-
- sched_share_state = kzalloc(sched_share_state_size, GFP_KERNEL);
- if (!sched_share_state)
- goto err_sched_state_alloc;
- sched_share_state->dev = device;
-
- result = trusty_sched_share_resources_allocate(sched_share_state);
- if (result)
- goto err_resources_alloc;
-
- shared = (struct trusty_sched_shared *)sched_share_state->sched_shared_vm;
- shared->cpu_count = nr_cpu_ids;
+ sched_share_state->sched_shared_mem_id = mem_id;
+ sched_share_state->vm_is_shared = true;
dev_dbg(device, "%s: calling api SMC_SC_SCHED_SHARE_REGISTER...\n",
__func__);
+ /* tell sched share code on Trusty side to share priorities */
result = trusty_std_call32(
sched_share_state->dev, SMC_SC_SCHED_SHARE_REGISTER,
(u32)sched_share_state->sched_shared_mem_id,
@@ -151,40 +194,26 @@ struct trusty_sched_share_state *trusty_register_sched_share(struct device *devi
dev_dbg(device, "%s: sched_share_state=%llx\n", __func__,
(u64)sched_share_state);
- return sched_share_state;
+ sched_share_state->is_registered = true;
+
+ return;
err_smc_std_call32:
- result = trusty_reclaim_memory(sched_share_state->dev,
- sched_share_state->sched_shared_mem_id,
- sched_share_state->sg,
- sched_share_state->num_pages);
- if (result != 0) {
- dev_err(sched_share_state->dev,
- "trusty_reclaim_memory() failed: ret=%d mem_id=0x%llx\n",
- result, sched_share_state->sched_shared_mem_id);
- /*
- * It is not safe to free this memory if trusty_reclaim_memory()
- * failed. Leak it in that case.
- */
- dev_err(sched_share_state->dev,
- "WARNING: leaking some allocated resources!!\n");
- } else {
- vfree(sched_share_state->sched_shared_vm);
- }
+ trusty_sched_share_reclaim_memory(sched_share_state);
+err_rsrc_share_mem:
+err_rsrc_sg_lookup:
kfree(sched_share_state->sg);
-err_resources_alloc:
- kfree(sched_share_state);
- dev_warn(sched_share_state->dev,
- "Trusty-Sched_Share API not available.\n");
-err_sched_state_alloc:
- return NULL;
+ sched_share_state->sg = NULL;
+err_rsrc_alloc_sg:
+ return;
+
}
void trusty_unregister_sched_share(struct trusty_sched_share_state *sched_share_state)
{
int result;
- if (!sched_share_state)
+ if (!sched_share_state->is_registered)
return;
/* ask Trusty to release the Trusty-side resources */
@@ -197,25 +226,18 @@ void trusty_unregister_sched_share(struct trusty_sched_share_state *sched_share_
"call SMC_SC_SCHED_SHARE_UNREGISTER failed, error=%d\n",
result);
}
- result = trusty_reclaim_memory(sched_share_state->dev,
- sched_share_state->sched_shared_mem_id,
- sched_share_state->sg,
- sched_share_state->num_pages);
- if (result) {
- dev_err(sched_share_state->dev,
- "trusty_reclaim_memory() failed: ret=%d mem_id=0x%llx\n",
- result, sched_share_state->sched_shared_mem_id);
- /*
- * It is not safe to free this memory if trusty_reclaim_memory()
- * failed. Leak it in that case.
- */
- dev_err(sched_share_state->dev,
- "WARNING: leaking some allocated resources!!\n");
- } else {
- vfree(sched_share_state->sched_shared_vm);
- }
+
+
+ trusty_sched_share_reclaim_memory(sched_share_state);
kfree(sched_share_state->sg);
+}
+
+void trusty_free_sched_share(struct trusty_sched_share_state *sched_share_state)
+{
+ if (!sched_share_state->vm_is_shared)
+ vfree(sched_share_state->sched_shared_vm);
+
kfree(sched_share_state);
}
@@ -239,13 +261,6 @@ static inline int map_trusty_prio_to_linux_nice(int trusty_prio)
return new_nice;
}
-static inline struct trusty_percpu_data *trusty_get_trusty_percpu_data(
- struct trusty_sched_shared *tsh, int cpu_num)
-{
- return (struct trusty_percpu_data *)((unsigned char *)tsh + tsh->hdr_size +
- (cpu_num * tsh->percpu_data_size));
-}
-
int trusty_get_requested_nice(unsigned int cpu_num, struct trusty_sched_share_state *tcpu_state)
{
struct trusty_sched_shared *tsh = (struct trusty_sched_shared *)tcpu_state->sched_shared_vm;
diff --git a/drivers/trusty/trusty.c b/drivers/trusty/trusty.c
index c1d26af..8d18368 100644
--- a/drivers/trusty/trusty.c
+++ b/drivers/trusty/trusty.c
@@ -145,11 +145,9 @@ static unsigned long trusty_std_call_helper(struct device *dev,
local_irq_disable();
/* tell Trusty scheduler what the current priority is */
- if (s->trusty_sched_share_state) {
- WARN_ON_ONCE(current->policy != SCHED_NORMAL);
- trusty_set_actual_nice(smp_processor_id(),
- s->trusty_sched_share_state, task_nice(current));
- }
+ WARN_ON_ONCE(current->policy != SCHED_NORMAL);
+ trusty_set_actual_nice(smp_processor_id(),
+ s->trusty_sched_share_state, task_nice(current));
atomic_notifier_call_chain(&s->notifier, TRUSTY_CALL_PREPARE,
NULL);
@@ -835,14 +833,9 @@ static void trusty_adjust_nice_nopreempt(struct trusty_state *s, bool do_nop)
cause_id = CPUNICE_CAUSE_USE_HIGH_WQ;
} else {
/* read trusty request for this cpu if available */
- if (s->trusty_sched_share_state) {
- req_nice = trusty_get_requested_nice(smp_processor_id(),
- s->trusty_sched_share_state);
- cause_id = CPUNICE_CAUSE_TRUSTY_REQ;
- } else {
- /* (unlikely case) default to current */
- req_nice = LINUX_NICE_FOR_TRUSTY_PRIORITY_NORMAL;
- }
+ req_nice = trusty_get_requested_nice(smp_processor_id(),
+ s->trusty_sched_share_state);
+ cause_id = CPUNICE_CAUSE_TRUSTY_REQ;
}
/* ensure priority will not be lower than system request
@@ -1118,7 +1111,12 @@ static int trusty_probe(struct platform_device *pdev)
goto err_add_cpuhp_instance;
}
- s->trusty_sched_share_state = trusty_register_sched_share(&pdev->dev);
+ ret = trusty_alloc_sched_share(&pdev->dev, &s->trusty_sched_share_state);
+ if (ret) {
+ dev_err(s->dev, "%s: unabled to allocate sched memory (%d)\n",
+ __func__, ret);
+ goto err_alloc_sched_share;
+ }
ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
if (ret < 0) {
@@ -1126,9 +1124,16 @@ static int trusty_probe(struct platform_device *pdev)
goto err_add_children;
}
+ /* attempt to share; it is optional for compatibility with Trusty
+ * versions that don't support priority sharing
+ */
+ trusty_register_sched_share(s->dev, s->trusty_sched_share_state);
+
return 0;
err_add_children:
+ trusty_free_sched_share(s->trusty_sched_share_state);
+err_alloc_sched_share:
cpuhp_state_remove_instance(trusty_cpuhp_slot, &s->cpuhp_node);
err_add_cpuhp_instance:
err_thread_create:
@@ -1171,6 +1176,8 @@ static int trusty_remove(struct platform_device *pdev)
}
free_percpu(s->nop_works);
+ trusty_free_sched_share(s->trusty_sched_share_state);
+
mutex_destroy(&s->share_memory_msg_lock);
mutex_destroy(&s->smc_lock);
trusty_free_msg_buf(s, &pdev->dev);