summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMinchan Kim <minchan@google.com>2022-01-28 11:09:06 -0800
committerWill McVicker <willmcvicker@google.com>2022-05-20 15:04:40 -0700
commit85f5fe00e56e24d52b76d706b9341bea36e05fcb (patch)
tree674fd96a10fccb03d5278c781edb880e0dbb59ad
parente2e4d3226d155e0bbbeb7b74d63491efcfa264e2 (diff)
downloadraviole-device-85f5fe00e56e24d52b76d706b9341bea36e05fcb.tar.gz
eh: add nr_run and nr_compressed
The nr_run represents how many times EH thread was running. The nr_compressed represents how many compression requests were processed. Using those two metric, we could calculate average inflight ratio(nr_compressed / nr_run) to represent EH's thread's efficiency. If it's low, it means we wake up the EH thread too frequently. If it's high but close to HW queue size of EH, it means we need to consider increasing HW queue size. cat /sys/kernel/eh/{nr_run,nr_compressed} Bug: 215573980 Signed-off-by: Minchan Kim <minchan@google.com> Change-Id: I099788bace8c7c39ab70e714cb692a5963037bac (cherry picked from commit 27818f3e93e73fc2eb153c31770b309f476246a2) Signed-off-by: Will McVicker <willmcvicker@google.com>
-rw-r--r--drivers/soc/google/eh/eh_internal.h5
-rw-r--r--drivers/soc/google/eh/eh_main.c44
2 files changed, 46 insertions, 3 deletions
diff --git a/drivers/soc/google/eh/eh_internal.h b/drivers/soc/google/eh/eh_internal.h
index 890c625e0..15bb76279 100644
--- a/drivers/soc/google/eh/eh_internal.h
+++ b/drivers/soc/google/eh/eh_internal.h
@@ -104,6 +104,11 @@ struct eh_device {
eh_cb_fn comp_callback;
+ /* how many compression request were processed */
+ unsigned long nr_compressed;
+ /* how many times the EH thread was running */
+ unsigned long nr_run;
+
/*
* eh_request pool to avoid memory allocation when EH's HW queue
* is full.
diff --git a/drivers/soc/google/eh/eh_main.c b/drivers/soc/google/eh/eh_main.c
index 5ec50ba93..f604b58e4 100644
--- a/drivers/soc/google/eh/eh_main.c
+++ b/drivers/soc/google/eh/eh_main.c
@@ -674,15 +674,29 @@ static void eh_abort_incomplete_descriptors(struct eh_device *eh_dev)
static int eh_comp_thread(void *data)
{
struct eh_device *eh_dev = data;
+ DEFINE_WAIT(wait);
+ int nr_processed = 0;
current->flags |= PF_MEMALLOC;
while (!kthread_should_stop()) {
int ret;
- wait_event_freezable(eh_dev->comp_wq,
- (atomic_read(&eh_dev->nr_request) > 0) ||
- !sw_fifo_empty(&eh_dev->sw_fifo));
+ prepare_to_wait(&eh_dev->comp_wq, &wait, TASK_IDLE);
+ if (atomic_read(&eh_dev->nr_request) == 0 &&
+ sw_fifo_empty(&eh_dev->sw_fifo)) {
+ eh_dev->nr_compressed += nr_processed;
+ schedule();
+ nr_processed = 0;
+ /*
+ * The condition check above is racy so the schedule
+ * couldn't schedule out the process but it should be
+ * rare and the stat doesn't need to be precise.
+ */
+ eh_dev->nr_run++;
+ }
+ finish_wait(&eh_dev->comp_wq, &wait);
+
ret = eh_process_compress(eh_dev);
if (unlikely(ret < 0)) {
unsigned long error;
@@ -712,6 +726,8 @@ static int eh_comp_thread(void *data)
if (!fifo_full(eh_dev))
flush_sw_fifo(eh_dev);
+
+ nr_processed += ret;
}
return 0;
@@ -975,8 +991,30 @@ static ssize_t nr_stall_show(struct kobject *kobj, struct kobj_attribute *attr,
}
EH_ATTR_RO(nr_stall);
+static ssize_t nr_run_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct eh_device *eh_dev = container_of(kobj, struct eh_device, kobj);
+
+ return sysfs_emit(buf, "%lu\n", eh_dev->nr_run);
+}
+EH_ATTR_RO(nr_run);
+
+static ssize_t nr_compressed_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct eh_device *eh_dev = container_of(kobj, struct eh_device, kobj);
+
+ return sysfs_emit(buf, "%lu\n", eh_dev->nr_compressed);
+}
+EH_ATTR_RO(nr_compressed);
+
static struct attribute *eh_attrs[] = {
&nr_stall_attr.attr,
+ &nr_run_attr.attr,
+ &nr_compressed_attr.attr,
NULL,
};
ATTRIBUTE_GROUPS(eh);