summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2023-08-02 03:00:36 +0000
committerAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2023-08-02 03:00:36 +0000
commitb911eac6bf5ac35aa2d6923fa8bdb4cb1c32758f (patch)
tree5884c071e7ea1c07231acea66a90027bc3006cc0
parent104d8b42e565e5c53eb9f3c3d327c732a6e63116 (diff)
parent11482b91e578bb737156a2b7428415806d1291c6 (diff)
downloaddisplay-android-gs-felix-5.10-u-beta5.tar.gz
Change-Id: Icf969e5429ff1d7e9a93366162fd7faa1d7113bd
-rw-r--r--samsung/exynos_drm_decon.c3
-rw-r--r--samsung/exynos_drm_dqe.c140
-rw-r--r--samsung/exynos_drm_dqe.h19
3 files changed, 137 insertions, 25 deletions
diff --git a/samsung/exynos_drm_decon.c b/samsung/exynos_drm_decon.c
index 5fababb..1aad899 100644
--- a/samsung/exynos_drm_decon.c
+++ b/samsung/exynos_drm_decon.c
@@ -1382,6 +1382,7 @@ static void decon_enter_hibernation(struct decon_device *decon)
reset = _decon_wait_for_framedone(decon);
spin_lock_irqsave(&decon->slock, flags);
+ exynos_dqe_hibernation_enter(decon->dqe);
_decon_disable_locked(decon, reset);
pm_runtime_put(decon->dev);
decon->state = DECON_STATE_HIBERNATION;
@@ -1665,9 +1666,9 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_data)
DPU_ATRACE_INT_PID("frame_transfer", 0, decon->thread->pid);
DPU_EVENT_LOG(DPU_EVT_DECON_FRAMEDONE, decon->id, decon);
exynos_dqe_save_lpd_data(decon->dqe);
+ atomic_dec_if_positive(&decon->frames_pending);
if (decon->dqe)
handle_histogram_event(decon->dqe);
- atomic_dec_if_positive(&decon->frames_pending);
wake_up_all(&decon->framedone_wait);
decon_debug(decon, "%s: frame done\n", __func__);
}
diff --git a/samsung/exynos_drm_dqe.c b/samsung/exynos_drm_dqe.c
index ee7116f..cc00562 100644
--- a/samsung/exynos_drm_dqe.c
+++ b/samsung/exynos_drm_dqe.c
@@ -67,6 +67,50 @@ exynos_atc_update(struct exynos_dqe *dqe, struct exynos_dqe_state *state)
dqe_reg_print_atc(id, &p);
}
+/*
+ * emmits event (called should protect)
+ */
+static void histogram_emmit_event(struct exynos_dqe *dqe)
+{
+ struct drm_device *dev = dqe->decon->drm_dev;
+ struct exynos_drm_pending_histogram_event *e = dqe->state.event;
+ uint32_t crtc_id = dqe->decon->crtc->base.base.id;
+
+ e->event.crtc_id = crtc_id;
+ drm_send_event(dev, &e->base);
+ dqe->state.event = NULL;
+}
+
+static void histogram_collect_bins(struct exynos_dqe *dqe, struct histogram_bins *bins)
+{
+ uint32_t id = dqe->decon->id;
+ /* collect data from bins */
+ dqe_reg_get_histogram_bins(id, bins);
+}
+
+static const char *str_run_state(enum histogram_run_state state)
+{
+ switch (state) {
+ case HSTATE_DISABLED:
+ return "disabled";
+ case HSTATE_HIBERNATION:
+ return "hibernation";
+ case HSTATE_PENDING_FRAMEDONE:
+ return "pending_framedone";
+ case HSTATE_IDLE:
+ return "idle";
+ default:
+ return "";
+ }
+}
+
+static void histogram_set_run_state(struct exynos_dqe *dqe, enum histogram_run_state state)
+{
+ pr_debug("histogram: run_state: %s -> %s\n",
+ str_run_state(dqe->state.hist_run_state), str_run_state(state));
+ dqe->state.hist_run_state = state;
+}
+
static struct exynos_drm_pending_histogram_event *create_histogram_event(
struct drm_device *dev, struct drm_file *file)
{
@@ -116,7 +160,6 @@ int histogram_request_ioctl(struct drm_device *dev, void *data,
return -ENODEV;
}
-
e = create_histogram_event(dev, file);
if (IS_ERR(e)) {
pr_err("failed to create a histogram event\n");
@@ -134,11 +177,24 @@ int histogram_request_ioctl(struct drm_device *dev, void *data,
spin_unlock_irqrestore(&dqe->state.histogram_slock, flags);
return -EBUSY;
}
+
dqe->state.event = e;
+
+ /* check cached state */
+ if (dqe->state.hist_run_state == HSTATE_HIBERNATION) {
+ if (dqe->verbose_hist)
+ pr_info("histogram: use cached data\n");
+ memcpy(&e->event.bins, &dqe->state.histogram_cached_bins, sizeof(e->event.bins));
+ histogram_emmit_event(dqe);
+ } else if (dqe->state.hist_run_state == HSTATE_IDLE) {
+ if (dqe->verbose_hist)
+ pr_info("histogram: idle, query now\n");
+ histogram_collect_bins(dqe, &dqe->state.event->event.bins);
+ histogram_emmit_event(dqe);
+ }
spin_unlock_irqrestore(&dqe->state.histogram_slock, flags);
- pr_debug("created histogram event(0x%pK) of decon%u\n",
- dqe->state.event, decon->id);
+ pr_debug("histogram: created event(0x%pK) of decon%u\n", dqe->state.event, decon->id);
return 0;
}
@@ -182,25 +238,33 @@ int histogram_cancel_ioctl(struct drm_device *dev, void *data,
return 0;
}
+/* This function runs in interrupt context */
void handle_histogram_event(struct exynos_dqe *dqe)
{
- /* This function runs in interrupt context */
- struct exynos_drm_pending_histogram_event *e;
- struct drm_device *dev = dqe->decon->drm_dev;
- uint32_t id, crtc_id;
-
spin_lock(&dqe->state.histogram_slock);
- crtc_id = dqe->decon->crtc->base.base.id;
- id = dqe->decon->id;
- e = dqe->state.event;
- if (e) {
- pr_debug("Histogram event(0x%pK) will be handled\n", dqe->state.event);
- dqe_reg_get_histogram_bins(id, &e->event.bins);
- e->event.crtc_id = crtc_id;
- drm_send_event(dev, &e->base);
- pr_debug("histogram event of decon%u signalled\n", dqe->decon->id);
- dqe->state.event = NULL;
+
+ /* return immediately if histogram disabled */
+ if (dqe->state.hist_run_state == HSTATE_DISABLED) {
+ spin_unlock(&dqe->state.histogram_slock);
+ return;
+ }
+
+ /*
+ * histogram engine data is available after first frame done.
+ */
+ if (dqe->state.event) {
+ pr_debug("histogram: handle event(0x%pK), rstate(%s)\n",
+ dqe->state.event, str_run_state(dqe->state.hist_run_state));
+ histogram_collect_bins(dqe, &dqe->state.event->event.bins);
+ histogram_emmit_event(dqe);
}
+
+ if ((atomic_read(&dqe->decon->frames_pending) == 0) &&
+ (dqe->decon->config.mode.op_mode != DECON_VIDEO_MODE))
+ histogram_set_run_state(dqe, HSTATE_IDLE);
+ else
+ histogram_set_run_state(dqe, HSTATE_PENDING_FRAMEDONE);
+
spin_unlock(&dqe->state.histogram_slock);
}
@@ -390,6 +454,7 @@ exynos_histogram_update(struct exynos_dqe *dqe, struct exynos_dqe_state *state)
struct decon_device *decon = dqe->decon;
struct drm_printer p = drm_info_printer(decon->dev);
u32 id = decon->id;
+ unsigned long flags;
if (dqe->state.roi != state->roi) {
dqe_reg_set_histogram_roi(id, state->roi);
@@ -411,14 +476,21 @@ exynos_histogram_update(struct exynos_dqe *dqe, struct exynos_dqe_state *state)
dqe->state.histogram_pos = state->histogram_pos;
}
- if (dqe->state.event && state->roi)
+ if (state->weights && state->roi)
hist_state = HISTOGRAM_ROI;
- else if (dqe->state.event && !state->roi)
+ else if (state->weights)
hist_state = HISTOGRAM_FULL;
else
hist_state = HISTOGRAM_OFF;
+ spin_lock_irqsave(&dqe->state.histogram_slock, flags);
+ if (hist_state == HISTOGRAM_OFF)
+ histogram_set_run_state(dqe, HSTATE_DISABLED);
+ else
+ histogram_set_run_state(dqe, HSTATE_PENDING_FRAMEDONE);
+ dqe->state.hist_state = hist_state;
dqe_reg_set_histogram(id, hist_state);
+ spin_unlock_irqrestore(&dqe->state.histogram_slock, flags);
if (dqe->verbose_hist)
dqe_reg_print_hist(id, &p);
@@ -532,8 +604,31 @@ void exynos_dqe_update(struct exynos_dqe *dqe, struct exynos_dqe_state *state,
dqe->funcs->update(dqe, state, width, height);
}
+/*
+ * operations prior to enter hibernation
+ */
+void exynos_dqe_hibernation_enter(struct exynos_dqe *dqe)
+{
+ unsigned long flags;
+
+ if (!dqe->state.enabled)
+ return;
+
+ spin_lock_irqsave(&dqe->state.histogram_slock, flags);
+ if (dqe->state.hist_run_state == HSTATE_IDLE) {
+ histogram_collect_bins(dqe, &dqe->state.histogram_cached_bins);
+ histogram_set_run_state(dqe, HSTATE_HIBERNATION);
+ } else if (dqe->state.hist_run_state == HSTATE_PENDING_FRAMEDONE) {
+ WARN(1, "pending histogram during hibernation\n");
+ histogram_set_run_state(dqe, HSTATE_DISABLED);
+ }
+ spin_unlock_irqrestore(&dqe->state.histogram_slock, flags);
+}
+
void exynos_dqe_reset(struct exynos_dqe *dqe)
{
+ unsigned long flags;
+
dqe->initialized = false;
dqe->state.gamma_matrix = NULL;
dqe->state.degamma_lut = NULL;
@@ -544,8 +639,13 @@ void exynos_dqe_reset(struct exynos_dqe *dqe)
dqe->state.cgc_dither_config = NULL;
dqe->cgc.first_write = false;
dqe->force_atc_config.dirty = true;
+ spin_lock_irqsave(&dqe->state.histogram_slock, flags);
dqe->state.histogram_threshold = 0;
dqe->state.histogram_pos = POST_DQE;
+ dqe->state.hist_state = HISTOGRAM_OFF;
+ if (dqe->state.hist_run_state != HSTATE_HIBERNATION)
+ histogram_set_run_state(dqe, HSTATE_DISABLED);
+ spin_unlock_irqrestore(&dqe->state.histogram_slock, flags);
dqe->state.roi = NULL;
dqe->state.weights = NULL;
dqe->state.rcd_enabled = false;
diff --git a/samsung/exynos_drm_dqe.h b/samsung/exynos_drm_dqe.h
index d578114..5760adb 100644
--- a/samsung/exynos_drm_dqe.h
+++ b/samsung/exynos_drm_dqe.h
@@ -26,6 +26,13 @@ struct exynos_dqe_funcs {
u32 width, u32 height);
};
+enum histogram_run_state {
+ HSTATE_DISABLED, /* histogram is disabled */
+ HSTATE_HIBERNATION, /* histogram is disabled due hibernation */
+ HSTATE_PENDING_FRAMEDONE, /* histogram is enabled, can be read on frame done */
+ HSTATE_IDLE, /* histogram is enabled, can be read at any time*/
+};
+
struct exynos_dqe_state {
const struct drm_color_lut *degamma_lut;
const struct exynos_matrix *linear_matrix;
@@ -35,15 +42,18 @@ struct exynos_dqe_state {
struct dither_config *disp_dither_config;
struct dither_config *cgc_dither_config;
bool enabled;
+ bool rcd_enabled;
+ struct drm_gem_object *cgc_gem;
+ spinlock_t histogram_slock;
+ struct exynos_drm_pending_histogram_event *event;
struct histogram_roi *roi;
struct histogram_weights *weights;
struct histogram_bins *bins;
- struct exynos_drm_pending_histogram_event *event;
u32 histogram_threshold;
- spinlock_t histogram_slock;
enum exynos_prog_pos histogram_pos;
- bool rcd_enabled;
- struct drm_gem_object *cgc_gem;
+ enum histogram_state hist_state;
+ enum histogram_run_state hist_run_state;
+ struct histogram_bins histogram_cached_bins;
};
struct dither_debug_override {
@@ -151,6 +161,7 @@ void handle_histogram_event(struct exynos_dqe *dqe);
void exynos_dqe_update(struct exynos_dqe *dqe, struct exynos_dqe_state *state,
u32 width, u32 height);
void exynos_dqe_reset(struct exynos_dqe *dqe);
+void exynos_dqe_hibernation_enter(struct exynos_dqe *dqe);
struct exynos_dqe *exynos_dqe_register(struct decon_device *decon);
void exynos_dqe_save_lpd_data(struct exynos_dqe *dqe);
void exynos_dqe_restore_lpd_data(struct exynos_dqe *dqe);