summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBrandon Anderson <brandonand@google.com>2023-01-09 19:06:00 +0000
committerJi Soo Shin <jisshin@google.com>2023-03-16 21:23:29 +0000
commit744efe015112f07dac84b17c6bf68db5de050bfe (patch)
tree3710f14302cd7412d3124918af5466526855bc77
parent95d6069826383711846fd4802508840fd0d4368c (diff)
downloadtrusty-744efe015112f07dac84b17c6bf68db5de050bfe.tar.gz
ANDROID: improve trusty kthread priority
Add 'signaled' flag that can be used by nop_work_func() on each processor to boost priority only when necessary. Slight rework to calculating the correct priority for kthread based on several factors. Add trace point for dequeue. Bug: 260619596 Change-Id: If0ae0f22132a41157b62a0f51f510aa05f141c63 Signed-off-by: Brandon Anderson <brandonand@google.com>
-rw-r--r--drivers/trusty/trusty-trace.h23
-rw-r--r--drivers/trusty/trusty.c120
2 files changed, 110 insertions, 33 deletions
diff --git a/drivers/trusty/trusty-trace.h b/drivers/trusty/trusty-trace.h
index 77b3298..e587ae1 100644
--- a/drivers/trusty/trusty-trace.h
+++ b/drivers/trusty/trusty-trace.h
@@ -170,11 +170,32 @@ TRACE_EVENT(trusty_enqueue_nop,
TP_printk("arg1=0x%x, arg2=0x%x, arg3=0x%x", __entry->arg1, __entry->arg2, __entry->arg3)
);
+TRACE_EVENT(trusty_dequeue_nop,
+ TP_PROTO(bool signaled, bool nop_dequeued, bool queue_emptied),
+ TP_ARGS(signaled, nop_dequeued, queue_emptied),
+ TP_STRUCT__entry(
+ __field(bool, signaled)
+ __field(bool, nop_dequeued)
+ __field(bool, queue_emptied)
+ ),
+ TP_fast_assign(
+ __entry->signaled = signaled;
+ __entry->nop_dequeued = nop_dequeued;
+ __entry->queue_emptied = queue_emptied;
+ ),
+ TP_printk("%c%c%c",
+ __entry->signaled ? 'S' : ' ',
+ __entry->nop_dequeued ? 'D' : ' ',
+ __entry->queue_emptied ? 'E' : ' '
+ )
+);
+
#define CPUNICE_CAUSE_LIST ( \
cpu_nice(CAUSE_DEFAULT) \
cpu_nice(CAUSE_USE_HIGH_WQ) \
cpu_nice(CAUSE_TRUSTY_REQ) \
- cpu_nice_end(CAUSE_NOP_ESCALATE) \
+ cpu_nice(CAUSE_NOP_ESCALATE) \
+ cpu_nice_end(CAUSE_ENQUEUE_BOOST) \
)
#undef cpu_nice
diff --git a/drivers/trusty/trusty.c b/drivers/trusty/trusty.c
index 6c8b2fa..e2c5301 100644
--- a/drivers/trusty/trusty.c
+++ b/drivers/trusty/trusty.c
@@ -31,12 +31,13 @@ static struct platform_driver trusty_driver;
static bool use_high_wq;
module_param(use_high_wq, bool, 0660);
-static bool override_high_prio_nop;
-module_param(override_high_prio_nop, bool, 0660);
+static int nop_nice_value = -20; /* default to highest */
+module_param(nop_nice_value, int, 0660);
struct trusty_work {
struct task_struct *nop_thread;
wait_queue_head_t nop_event_wait;
+ int signaled;
};
struct trusty_state {
@@ -749,8 +750,15 @@ static bool dequeue_nop(struct trusty_state *s, u32 *args)
{
unsigned long flags;
struct trusty_nop *nop = NULL;
+ struct trusty_work *tw;
+ bool ret = false;
+ bool signaled;
+ bool nop_dequeued = false;
+ bool queue_emptied = false;
spin_lock_irqsave(&s->nop_lock, flags);
+ tw = this_cpu_ptr(s->nop_works);
+ signaled = tw->signaled;
if (!list_empty(&s->nop_queue)) {
nop = list_first_entry(&s->nop_queue,
struct trusty_nop, node);
@@ -758,13 +766,27 @@ static bool dequeue_nop(struct trusty_state *s, u32 *args)
args[0] = nop->args[0];
args[1] = nop->args[1];
args[2] = nop->args[2];
+
+ nop_dequeued = true;
+ if (list_empty(&s->nop_queue))
+ queue_emptied = true;
+
+ ret = true;
} else {
args[0] = 0;
args[1] = 0;
args[2] = 0;
+
+ ret = tw->signaled;
}
+ tw->signaled = false;
spin_unlock_irqrestore(&s->nop_lock, flags);
- return nop;
+
+ /* don't log when false as it is preempt case which can be very noisy */
+ if (ret)
+ trace_trusty_dequeue_nop(signaled, nop_dequeued, queue_emptied);
+
+ return ret;
}
static void locked_nop_work_func(struct trusty_state *s)
@@ -784,43 +806,75 @@ enum cpunice_cause {
CPUNICE_CAUSE_USE_HIGH_WQ,
CPUNICE_CAUSE_TRUSTY_REQ,
CPUNICE_CAUSE_NOP_ESCALATE,
+ CPUNICE_CAUSE_ENQUEUE_BOOST,
};
-static void trusty_adjust_nice_nopreempt(struct trusty_state *s, bool next)
+static void trusty_adjust_nice_nopreempt(struct trusty_state *s, bool do_nop)
{
int req_nice, cur_nice;
int cause_id = CPUNICE_CAUSE_DEFAULT;
+ unsigned long flags;
+ struct trusty_work *tw;
+
+ local_irq_save(flags);
+
+ cur_nice = task_nice(current);
+
+ /* check to see if another signal has come in since dequeue_nop */
+ tw = this_cpu_ptr(s->nop_works);
+ do_nop |= tw->signaled;
if (use_high_wq) {
+ /* use highest priority (lowest nice) for everything */
req_nice = LINUX_NICE_FOR_TRUSTY_PRIORITY_HIGH;
cause_id = CPUNICE_CAUSE_USE_HIGH_WQ;
- } else if (!override_high_prio_nop && next) {
- return; /* Do not undo priority boost when there's more */
- } else if (s->trusty_sched_share_state) {
- req_nice = trusty_get_requested_nice(smp_processor_id(),
- s->trusty_sched_share_state);
- cause_id = CPUNICE_CAUSE_TRUSTY_REQ;
} else {
- req_nice = LINUX_NICE_FOR_TRUSTY_PRIORITY_NORMAL;
+ /* read trusty request for this cpu if available */
+ if (s->trusty_sched_share_state) {
+ req_nice = trusty_get_requested_nice(smp_processor_id(),
+ s->trusty_sched_share_state);
+ cause_id = CPUNICE_CAUSE_TRUSTY_REQ;
+ } else {
+ /* (unlikely case) default to current */
+ req_nice = LINUX_NICE_FOR_TRUSTY_PRIORITY_NORMAL;
+ }
}
- cur_nice = task_nice(current);
+ /* ensure priority will not be lower than system request
+ * when there is more work to do
+ */
+ if (do_nop && nop_nice_value < req_nice) {
+ req_nice = nop_nice_value;
+ cause_id = CPUNICE_CAUSE_NOP_ESCALATE;
+ }
+
+ /* trace entry only if changing */
if (req_nice != cur_nice)
trace_trusty_change_cpu_nice(cur_nice, req_nice, cause_id);
/* tell Linux the desired priority */
set_user_nice(current, req_nice);
+
+ local_irq_restore(flags);
}
static void nop_work_func(struct trusty_state *s)
{
int ret;
- bool next;
+ bool do_nop;
u32 args[3];
u32 last_arg0;
- dequeue_nop(s, args);
- do {
+ do_nop = dequeue_nop(s, args);
+
+ if (do_nop) {
+ /* we have been signaled or there's a nop so
+ * adjust priority before making SMC call below
+ */
+ trusty_adjust_nice_nopreempt(s, do_nop);
+ }
+
+ while (do_nop) {
dev_dbg(s->dev, "%s: %x %x %x\n",
__func__, args[0], args[1], args[2]);
@@ -830,14 +884,13 @@ static void nop_work_func(struct trusty_state *s)
ret = trusty_std_call32(s->dev, SMC_SC_NOP,
args[0], args[1], args[2]);
- next = dequeue_nop(s, args);
+ do_nop = dequeue_nop(s, args);
- if (ret == SM_ERR_NOP_INTERRUPTED) {
- local_irq_disable();
- trusty_adjust_nice_nopreempt(s, next);
- local_irq_enable();
+ /* adjust priority in case Trusty has requested a change */
+ trusty_adjust_nice_nopreempt(s, do_nop);
- next = true;
+ if (ret == SM_ERR_NOP_INTERRUPTED) {
+ do_nop = true;
} else if (ret != SM_ERR_NOP_DONE) {
dev_err(s->dev, "%s: SMC_SC_NOP %x failed %d",
__func__, last_arg0, ret);
@@ -846,12 +899,12 @@ static void nop_work_func(struct trusty_state *s)
* Don't break out of the loop if a non-default
* nop-handler returns an error.
*/
- next = true;
+ do_nop = true;
}
}
preempt_enable();
- } while (next);
+ }
dev_dbg(s->dev, "%s: done\n", __func__);
}
@@ -860,7 +913,7 @@ void trusty_enqueue_nop(struct device *dev, struct trusty_nop *nop)
unsigned long flags;
struct trusty_work *tw;
struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
- int old_nice = 0, new_nice = 0;
+ int cur_nice;
trace_trusty_enqueue_nop(nop);
preempt_disable();
@@ -874,14 +927,16 @@ void trusty_enqueue_nop(struct device *dev, struct trusty_nop *nop)
spin_unlock_irqrestore(&s->nop_lock, flags);
}
- if (!override_high_prio_nop) {
- old_nice = task_nice(current);
- set_user_nice(tw->nop_thread, LINUX_NICE_FOR_TRUSTY_PRIORITY_HIGH);
- new_nice = LINUX_NICE_FOR_TRUSTY_PRIORITY_HIGH;
- if (old_nice != new_nice)
- trace_trusty_change_cpu_nice(old_nice, new_nice,
- CPUNICE_CAUSE_NOP_ESCALATE);
- }
+ /* boost the priority here so the thread can get to it fast */
+ cur_nice = task_nice(tw->nop_thread);
+ if (nop_nice_value < cur_nice) {
+ trace_trusty_change_cpu_nice(cur_nice, nop_nice_value,
+ CPUNICE_CAUSE_ENQUEUE_BOOST);
+ set_user_nice(tw->nop_thread, nop_nice_value);
+ }
+
+ /* indicate that this cpu was signaled */
+ tw->signaled = true;
wake_up_interruptible(&tw->nop_event_wait);
preempt_enable();
@@ -990,6 +1045,7 @@ static int trusty_probe(struct platform_device *pdev)
tw->nop_thread = ERR_PTR(-EINVAL);
init_waitqueue_head(&tw->nop_event_wait);
+ tw->signaled = false;
}
for_each_possible_cpu(cpu) {