summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNeela Chithirala <chithiralan@google.com>2022-03-21 04:33:21 +0000
committerNeela Chithirala <chithiralan@google.com>2022-03-21 04:33:21 +0000
commita739c7f7e74e637cf882fa9ee73dee0aeb4374ee (patch)
tree1aae9b40f31130bdad07184b92e73be61b1e46ea
parent8f54b9374e56fd08f990fe2fe4dc8b3bde1c00d2 (diff)
downloadgs201-a739c7f7e74e637cf882fa9ee73dee0aeb4374ee.tar.gz
Merge branch 'gs201-release' to android13-gs-pixel-5.10
* gs201-release: gxp: Allow registering eventfds for mailbox responses Bug: 223917403 gxp: support MUX clock switch in power state api Bug: 222625456 gxp: Add power_flags to the mailbox command IOCTL Bug: 221320387 gxp: support power state with mailbox IOCTL Bug: 220975116 Signed-off-by: Neela Chithirala <chithiralan@google.com> Change-Id: I5ca5d964dcff4121d6a0aa0accf4f8a0ed454111
-rw-r--r--gxp-client.c28
-rw-r--r--gxp-client.h6
-rw-r--r--gxp-csrs.h1
-rw-r--r--gxp-debugfs.c4
-rw-r--r--gxp-internal.h1
-rw-r--r--gxp-mailbox.c53
-rw-r--r--gxp-mailbox.h11
-rw-r--r--gxp-platform.c205
-rw-r--r--gxp-pm.c55
-rw-r--r--gxp-pm.h42
-rw-r--r--gxp-vd.c28
-rw-r--r--gxp-vd.h2
-rw-r--r--gxp.h111
13 files changed, 507 insertions, 40 deletions
diff --git a/gxp-client.c b/gxp-client.c
index 73aafc1..0fccea6 100644
--- a/gxp-client.c
+++ b/gxp-client.c
@@ -36,6 +36,7 @@ struct gxp_client *gxp_client_create(struct gxp_dev *gxp)
void gxp_client_destroy(struct gxp_client *client)
{
struct gxp_dev *gxp = client->gxp;
+ int core;
down_write(&gxp->vd_semaphore);
@@ -50,6 +51,11 @@ void gxp_client_destroy(struct gxp_client *client)
if (client->has_vd_wakelock)
gxp_vd_stop(client->vd);
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ if (client->mb_eventfds[core])
+ eventfd_ctx_put(client->mb_eventfds[core]);
+ }
+
up_write(&gxp->vd_semaphore);
if (client->has_block_wakelock) {
@@ -65,3 +71,25 @@ void gxp_client_destroy(struct gxp_client *client)
kfree(client);
}
+
+void gxp_client_signal_mailbox_eventfd(struct gxp_client *client,
+ uint phys_core)
+{
+ int virtual_core;
+
+ down_read(&client->semaphore);
+
+ virtual_core = gxp_vd_phys_core_to_virt_core(client->vd, phys_core);
+ if (unlikely(virtual_core < 0)) {
+ dev_err(client->gxp->dev,
+ "%s: core %d is not part of client's virtual device.\n",
+ __func__, phys_core);
+ goto out;
+ }
+
+ if (client->mb_eventfds[virtual_core])
+ eventfd_signal(client->mb_eventfds[virtual_core], 1);
+
+out:
+ up_read(&client->semaphore);
+}
diff --git a/gxp-client.h b/gxp-client.h
index ae85f3b..34c19eb 100644
--- a/gxp-client.h
+++ b/gxp-client.h
@@ -7,6 +7,7 @@
#ifndef __GXP_CLIENT_H__
#define __GXP_CLIENT_H__
+#include <linux/eventfd.h>
#include <linux/rwsem.h>
#include <linux/types.h>
@@ -34,6 +35,8 @@ struct gxp_client {
struct gxp_virtual_device *vd;
bool tpu_mbx_allocated;
struct gxp_tpu_mbx_desc mbx_desc;
+
+ struct eventfd_ctx *mb_eventfds[GXP_NUM_CORES];
};
/*
@@ -47,4 +50,7 @@ struct gxp_client *gxp_client_create(struct gxp_dev *gxp);
*/
void gxp_client_destroy(struct gxp_client *client);
+void gxp_client_signal_mailbox_eventfd(struct gxp_client *client,
+ uint phys_core);
+
#endif /* __GXP_CLIENT_H__ */
diff --git a/gxp-csrs.h b/gxp-csrs.h
index 95100d1..662837f 100644
--- a/gxp-csrs.h
+++ b/gxp-csrs.h
@@ -11,6 +11,7 @@
#define GXP_REG_DOORBELLS_CLEAR_WRITEMASK 0x1
#define GXP_CMU_OFFSET 0x200000
+#define GXP_CMU_SIZE 0x1000
enum gxp_csrs {
GXP_REG_LPM_VERSION = 0x40000,
diff --git a/gxp-debugfs.c b/gxp-debugfs.c
index 480dca2..ef2c072 100644
--- a/gxp-debugfs.c
+++ b/gxp-debugfs.c
@@ -268,8 +268,8 @@ static int gxp_blk_powerstate_set(void *data, u64 val)
return -ENODEV;
}
- if (val >= AUR_DVFS_MIN_STATE) {
- ret = gxp_pm_blk_set_state_acpm(gxp, val);
+ if (val >= AUR_DVFS_MIN_RATE) {
+ ret = gxp_pm_blk_set_rate_acpm(gxp, val);
} else {
ret = -EINVAL;
dev_err(gxp->dev, "Incorrect state %llu\n", val);
diff --git a/gxp-internal.h b/gxp-internal.h
index 1fa71d1..96ca1d4 100644
--- a/gxp-internal.h
+++ b/gxp-internal.h
@@ -68,6 +68,7 @@ struct gxp_dev {
struct gxp_mapped_resource fwbufs[GXP_NUM_CORES]; /* FW carveout */
struct gxp_mapped_resource fwdatabuf; /* Shared FW data carveout */
struct gxp_mapped_resource coredumpbuf; /* core dump carveout */
+ struct gxp_mapped_resource cmu; /* CMU CSRs */
struct gxp_mailbox_manager *mailbox_mgr;
struct gxp_power_manager *power_mgr;
/*
diff --git a/gxp-mailbox.c b/gxp-mailbox.c
index 7a42d71..5cd568b 100644
--- a/gxp-mailbox.c
+++ b/gxp-mailbox.c
@@ -17,6 +17,7 @@
#include "gxp-internal.h"
#include "gxp-mailbox.h"
#include "gxp-mailbox-driver.h"
+#include "gxp-pm.h"
#include "gxp-tmp.h"
/* Timeout of 8s by default to account for slower emulation platforms */
@@ -237,6 +238,17 @@ static void gxp_mailbox_handle_response(struct gxp_mailbox *mailbox,
struct gxp_async_response,
resp);
cancel_delayed_work(&async_resp->timeout_work);
+ if (async_resp->memory_power_state !=
+ AUR_MEM_UNDEFINED)
+ gxp_pm_update_requested_memory_power_state(
+ async_resp->mailbox->gxp,
+ async_resp->memory_power_state,
+ AUR_MEM_UNDEFINED);
+ if (async_resp->gxp_power_state != AUR_OFF)
+ gxp_pm_update_requested_power_state(
+ async_resp->mailbox->gxp,
+ async_resp->gxp_power_state,
+ AUR_OFF);
spin_lock_irqsave(async_resp->dest_queue_lock,
flags);
list_add_tail(&async_resp->list_entry,
@@ -250,6 +262,11 @@ static void gxp_mailbox_handle_response(struct gxp_mailbox *mailbox,
async_resp->dest_queue = NULL;
spin_unlock_irqrestore(
async_resp->dest_queue_lock, flags);
+ if (async_resp->client) {
+ gxp_client_signal_mailbox_eventfd(
+ async_resp->client,
+ mailbox->core_id);
+ }
wake_up(async_resp->dest_queue_waitq);
}
kfree(cur);
@@ -727,6 +744,23 @@ static void async_cmd_timeout_work(struct work_struct *work)
async_resp->resp.status = GXP_RESP_CANCELLED;
list_add_tail(&async_resp->list_entry, async_resp->dest_queue);
spin_unlock_irqrestore(async_resp->dest_queue_lock, flags);
+
+ if (async_resp->memory_power_state != AUR_MEM_UNDEFINED)
+ gxp_pm_update_requested_memory_power_state(
+ async_resp->mailbox->gxp,
+ async_resp->memory_power_state,
+ AUR_MEM_UNDEFINED);
+ if (async_resp->gxp_power_state != AUR_OFF)
+ gxp_pm_update_requested_power_state(
+ async_resp->mailbox->gxp,
+ async_resp->gxp_power_state, AUR_OFF);
+
+ if (async_resp->client) {
+ gxp_client_signal_mailbox_eventfd(
+ async_resp->client,
+ async_resp->mailbox->core_id);
+ }
+
wake_up(async_resp->dest_queue_waitq);
} else {
spin_unlock_irqrestore(async_resp->dest_queue_lock, flags);
@@ -737,7 +771,9 @@ int gxp_mailbox_execute_cmd_async(struct gxp_mailbox *mailbox,
struct gxp_command *cmd,
struct list_head *resp_queue,
spinlock_t *queue_lock,
- wait_queue_head_t *queue_waitq)
+ wait_queue_head_t *queue_waitq,
+ uint gxp_power_state, uint memory_power_state,
+ struct gxp_client *client)
{
struct gxp_async_response *async_resp;
int ret;
@@ -750,11 +786,20 @@ int gxp_mailbox_execute_cmd_async(struct gxp_mailbox *mailbox,
async_resp->dest_queue = resp_queue;
async_resp->dest_queue_lock = queue_lock;
async_resp->dest_queue_waitq = queue_waitq;
+ async_resp->gxp_power_state = gxp_power_state;
+ async_resp->memory_power_state = memory_power_state;
+ async_resp->client = client;
INIT_DELAYED_WORK(&async_resp->timeout_work, async_cmd_timeout_work);
schedule_delayed_work(&async_resp->timeout_work,
msecs_to_jiffies(MAILBOX_TIMEOUT));
+ if (gxp_power_state != AUR_OFF)
+ gxp_pm_update_requested_power_state(mailbox->gxp, AUR_OFF,
+ gxp_power_state);
+ if (memory_power_state != AUR_MEM_UNDEFINED)
+ gxp_pm_update_requested_memory_power_state(
+ mailbox->gxp, AUR_MEM_UNDEFINED, memory_power_state);
ret = gxp_mailbox_enqueue_cmd(mailbox, cmd, &async_resp->resp,
/* resp_is_async = */ true);
if (ret)
@@ -763,6 +808,12 @@ int gxp_mailbox_execute_cmd_async(struct gxp_mailbox *mailbox,
return 0;
err_free_resp:
+ if (memory_power_state != AUR_MEM_UNDEFINED)
+ gxp_pm_update_requested_memory_power_state(
+ mailbox->gxp, memory_power_state, AUR_MEM_UNDEFINED);
+ if (gxp_power_state != AUR_OFF)
+ gxp_pm_update_requested_power_state(mailbox->gxp,
+ gxp_power_state, AUR_OFF);
cancel_delayed_work_sync(&async_resp->timeout_work);
kfree(async_resp);
return ret;
diff --git a/gxp-mailbox.h b/gxp-mailbox.h
index 06c9718..99ad7f9 100644
--- a/gxp-mailbox.h
+++ b/gxp-mailbox.h
@@ -7,6 +7,7 @@
#ifndef __GXP_MAILBOX_H__
#define __GXP_MAILBOX_H__
+#include "gxp-client.h"
#include "gxp-internal.h"
/* Command/Response Structures */
@@ -80,6 +81,12 @@ struct gxp_async_response {
struct list_head *dest_queue;
spinlock_t *dest_queue_lock;
wait_queue_head_t *dest_queue_waitq;
+ /* Specified power state vote during the command execution */
+ uint gxp_power_state;
+ /* Specified memory power state vote during the command execution */
+ uint memory_power_state;
+ /* gxp_client to signal when the response completes. May be NULL */
+ struct gxp_client *client;
};
enum gxp_response_status {
@@ -177,7 +184,9 @@ int gxp_mailbox_execute_cmd_async(struct gxp_mailbox *mailbox,
struct gxp_command *cmd,
struct list_head *resp_queue,
spinlock_t *queue_lock,
- wait_queue_head_t *queue_waitq);
+ wait_queue_head_t *queue_waitq,
+ uint gxp_power_state, uint memory_power_state,
+ struct gxp_client *client);
int gxp_mailbox_register_interrupt_handler(struct gxp_mailbox *mailbox,
u32 int_bit,
diff --git a/gxp-platform.c b/gxp-platform.c
index ba2093e..e653e84 100644
--- a/gxp-platform.c
+++ b/gxp-platform.c
@@ -64,9 +64,9 @@ static struct platform_device gxp_sscd_dev = {
#endif // CONFIG_SUBSYSTEM_COREDUMP
/* Mapping from GXP_POWER_STATE_* to enum aur_power_state in gxp-pm.h */
-static const uint aur_state_array[GXP_POWER_STATE_NOM + 1] = { AUR_OFF, AUR_UUD,
- AUR_SUD, AUR_UD,
- AUR_NOM };
+static const uint aur_state_array[GXP_POWER_STATE_READY + 1] = {
+ AUR_OFF, AUR_UUD, AUR_SUD, AUR_UD, AUR_NOM, AUR_READY
+};
/* Mapping from MEMORY_POWER_STATE_* to enum aur_memory_power_state in gxp-pm.h */
static const uint aur_memory_state_array[MEMORY_POWER_STATE_MAX + 1] = {
AUR_MEM_UNDEFINED, AUR_MEM_MIN, AUR_MEM_VERY_LOW, AUR_MEM_LOW,
@@ -295,6 +295,96 @@ out:
return ret;
}
+static int
+gxp_mailbox_command_compat(struct gxp_client *client,
+ struct gxp_mailbox_command_compat_ioctl __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ struct gxp_mailbox_command_compat_ioctl ibuf;
+ struct gxp_command cmd;
+ struct buffer_descriptor buffer;
+ int phys_core;
+ int ret = 0;
+ uint gxp_power_state, memory_power_state;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf))) {
+ dev_err(gxp->dev,
+ "Unable to copy ioctl data from user-space\n");
+ return -EFAULT;
+ }
+
+ /* Caller must hold VIRTUAL_DEVICE wakelock */
+ down_read(&client->semaphore);
+
+ if (!client->has_vd_wakelock) {
+ dev_err(gxp->dev,
+ "GXP_MAILBOX_COMMAND requires the client hold a VIRTUAL_DEVICE wakelock\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ phys_core = gxp_vd_virt_core_to_phys_core(client->vd, ibuf.virtual_core_id);
+ if (phys_core < 0) {
+ dev_err(gxp->dev,
+ "Mailbox command failed: Invalid virtual core id (%u)\n",
+ ibuf.virtual_core_id);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!gxp_is_fw_running(gxp, phys_core)) {
+ dev_err(gxp->dev,
+ "Cannot process mailbox command for core %d when firmware isn't running\n",
+ phys_core);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (gxp->mailbox_mgr == NULL || gxp->mailbox_mgr->mailboxes == NULL ||
+ gxp->mailbox_mgr->mailboxes[phys_core] == NULL) {
+ dev_err(gxp->dev, "Mailbox not initialized for core %d\n",
+ phys_core);
+ ret = -EIO;
+ goto out;
+ }
+
+ /* Pack the command structure */
+ buffer.address = ibuf.device_address;
+ buffer.size = ibuf.size;
+ buffer.flags = ibuf.flags;
+ /* cmd.seq is assigned by mailbox implementation */
+ cmd.code = GXP_MBOX_CODE_DISPATCH; /* All IOCTL commands are dispatch */
+ cmd.priority = 0; /* currently unused */
+ cmd.buffer_descriptor = buffer;
+ gxp_power_state = AUR_OFF;
+ memory_power_state = AUR_MEM_UNDEFINED;
+
+ down_read(&gxp->vd_semaphore);
+ ret = gxp_mailbox_execute_cmd_async(
+ gxp->mailbox_mgr->mailboxes[phys_core], &cmd,
+ &gxp->mailbox_resp_queues[phys_core], &gxp->mailbox_resps_lock,
+ &gxp->mailbox_resp_waitqs[phys_core], gxp_power_state,
+ memory_power_state, client);
+ up_read(&gxp->vd_semaphore);
+ if (ret) {
+ dev_err(gxp->dev, "Failed to enqueue mailbox command (ret=%d)\n",
+ ret);
+ goto out;
+ }
+
+ ibuf.sequence_number = cmd.seq;
+ if (copy_to_user(argp, &ibuf, sizeof(ibuf))) {
+ dev_err(gxp->dev, "Failed to copy back sequence number!\n");
+ ret = -EFAULT;
+ goto out;
+ }
+
+out:
+ up_read(&client->semaphore);
+
+ return ret;
+}
+
static int gxp_mailbox_command(struct gxp_client *client,
struct gxp_mailbox_command_ioctl __user *argp)
{
@@ -304,12 +394,28 @@ static int gxp_mailbox_command(struct gxp_client *client,
struct buffer_descriptor buffer;
int phys_core;
int ret = 0;
+ uint gxp_power_state, memory_power_state;
if (copy_from_user(&ibuf, argp, sizeof(ibuf))) {
dev_err(gxp->dev,
"Unable to copy ioctl data from user-space\n");
return -EFAULT;
}
+ if (ibuf.gxp_power_state == GXP_POWER_STATE_OFF) {
+ dev_err(gxp->dev,
+ "GXP_POWER_STATE_OFF is not a valid value when executing a mailbox command\n");
+ return -EINVAL;
+ }
+ if (ibuf.gxp_power_state < GXP_POWER_STATE_OFF ||
+ ibuf.gxp_power_state > GXP_POWER_STATE_NOM) {
+ dev_err(gxp->dev, "Requested power state is invalid\n");
+ return -EINVAL;
+ }
+ if (ibuf.memory_power_state < MEMORY_POWER_STATE_UNDEFINED ||
+ ibuf.memory_power_state > MEMORY_POWER_STATE_MAX) {
+ dev_err(gxp->dev, "Requested memory power state is invalid\n");
+ return -EINVAL;
+ }
/* Caller must hold VIRTUAL_DEVICE wakelock */
down_read(&client->semaphore);
@@ -354,12 +460,15 @@ static int gxp_mailbox_command(struct gxp_client *client,
cmd.code = GXP_MBOX_CODE_DISPATCH; /* All IOCTL commands are dispatch */
cmd.priority = 0; /* currently unused */
cmd.buffer_descriptor = buffer;
+ gxp_power_state = aur_state_array[ibuf.gxp_power_state];
+ memory_power_state = aur_memory_state_array[ibuf.memory_power_state];
down_read(&gxp->vd_semaphore);
ret = gxp_mailbox_execute_cmd_async(
gxp->mailbox_mgr->mailboxes[phys_core], &cmd,
&gxp->mailbox_resp_queues[phys_core], &gxp->mailbox_resps_lock,
- &gxp->mailbox_resp_waitqs[phys_core]);
+ &gxp->mailbox_resp_waitqs[phys_core], gxp_power_state,
+ memory_power_state, client);
up_read(&gxp->vd_semaphore);
if (ret) {
dev_err(gxp->dev, "Failed to enqueue mailbox command (ret=%d)\n",
@@ -935,7 +1044,7 @@ static int gxp_acquire_wake_lock(struct gxp_client *client,
return -EINVAL;
}
if (ibuf.gxp_power_state < GXP_POWER_STATE_OFF ||
- ibuf.gxp_power_state > GXP_POWER_STATE_NOM) {
+ ibuf.gxp_power_state > GXP_POWER_STATE_READY) {
dev_err(gxp->dev, "Requested power state is invalid\n");
return -EINVAL;
}
@@ -1168,6 +1277,61 @@ out:
return ret;
}
+static int gxp_register_mailbox_eventfd(
+ struct gxp_client *client,
+ struct gxp_register_mailbox_eventfd_ioctl __user *argp)
+{
+ struct gxp_register_mailbox_eventfd_ioctl ibuf;
+ struct eventfd_ctx *new_ctx;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ if (ibuf.virtual_core_id >= client->vd->num_cores)
+ return -EINVAL;
+
+ /* Make sure the provided eventfd is valid */
+ new_ctx = eventfd_ctx_fdget(ibuf.eventfd);
+ if (IS_ERR(new_ctx))
+ return PTR_ERR(new_ctx);
+
+ down_write(&client->semaphore);
+
+ /* Set the new eventfd, replacing any existing one */
+ if (client->mb_eventfds[ibuf.virtual_core_id])
+ eventfd_ctx_put(client->mb_eventfds[ibuf.virtual_core_id]);
+
+ client->mb_eventfds[ibuf.virtual_core_id] = new_ctx;
+
+ up_write(&client->semaphore);
+
+ return 0;
+}
+
+static int gxp_unregister_mailbox_eventfd(
+ struct gxp_client *client,
+ struct gxp_register_mailbox_eventfd_ioctl __user *argp)
+{
+ struct gxp_register_mailbox_eventfd_ioctl ibuf;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ if (ibuf.virtual_core_id >= client->vd->num_cores)
+ return -EINVAL;
+
+ down_write(&client->semaphore);
+
+ if (client->mb_eventfds[ibuf.virtual_core_id])
+ eventfd_ctx_put(client->mb_eventfds[ibuf.virtual_core_id]);
+
+ client->mb_eventfds[ibuf.virtual_core_id] = NULL;
+
+ up_write(&client->semaphore);
+
+ return 0;
+}
+
static long gxp_ioctl(struct file *file, uint cmd, ulong arg)
{
struct gxp_client *client = file->private_data;
@@ -1184,8 +1348,8 @@ static long gxp_ioctl(struct file *file, uint cmd, ulong arg)
case GXP_SYNC_BUFFER:
ret = gxp_sync_buffer(client, argp);
break;
- case GXP_MAILBOX_COMMAND:
- ret = gxp_mailbox_command(client, argp);
+ case GXP_MAILBOX_COMMAND_COMPAT:
+ ret = gxp_mailbox_command_compat(client, argp);
break;
case GXP_MAILBOX_RESPONSE:
ret = gxp_mailbox_response(client, argp);
@@ -1241,6 +1405,15 @@ static long gxp_ioctl(struct file *file, uint cmd, ulong arg)
case GXP_UNMAP_DMABUF:
ret = gxp_unmap_dmabuf(client, argp);
break;
+ case GXP_MAILBOX_COMMAND:
+ ret = gxp_mailbox_command(client, argp);
+ break;
+ case GXP_REGISTER_MAILBOX_EVENTFD:
+ ret = gxp_register_mailbox_eventfd(client, argp);
+ break;
+ case GXP_UNREGISTER_MAILBOX_EVENTFD:
+ ret = gxp_unregister_mailbox_eventfd(client, argp);
+ break;
default:
ret = -ENOTTY; /* unknown command */
}
@@ -1330,6 +1503,24 @@ static int gxp_platform_probe(struct platform_device *pdev)
goto err;
}
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cmu");
+ if (!IS_ERR_OR_NULL(r)) {
+ gxp->cmu.paddr = r->start;
+ gxp->cmu.size = resource_size(r);
+ gxp->cmu.vaddr = devm_ioremap_resource(dev, r);
+ }
+ /*
+ * TODO (b/224685748): Remove this block after CMU CSR is supported
+ * in device tree config.
+ */
+ if (IS_ERR_OR_NULL(r) || IS_ERR_OR_NULL(gxp->cmu.vaddr)) {
+ gxp->cmu.paddr = gxp->regs.paddr - GXP_CMU_OFFSET;
+ gxp->cmu.size = GXP_CMU_SIZE;
+ gxp->cmu.vaddr = devm_ioremap(dev, gxp->cmu.paddr, gxp->cmu.size);
+ if (IS_ERR_OR_NULL(gxp->cmu.vaddr))
+ dev_warn(dev, "Failed to map CMU registers\n");
+ }
+
ret = gxp_pm_init(gxp);
if (ret) {
dev_err(dev, "Failed to init power management (ret=%d)\n", ret);
diff --git a/gxp-pm.c b/gxp-pm.c
index 7ca23a5..c315a46 100644
--- a/gxp-pm.c
+++ b/gxp-pm.c
@@ -22,9 +22,13 @@
#include "gxp-lpm.h"
#include "gxp-pm.h"
-static const enum aur_power_state aur_state_array[] = { AUR_OFF, AUR_UUD,
- AUR_SUD, AUR_UD,
- AUR_NOM };
+/*
+ * The order of this array decides the voting priority, should be increasing in
+ * frequencies.
+ */
+static const enum aur_power_state aur_state_array[] = { AUR_OFF, AUR_READY,
+ AUR_UUD, AUR_SUD,
+ AUR_UD, AUR_NOM };
static const uint aur_memory_state_array[] = {
AUR_MEM_UNDEFINED, AUR_MEM_MIN, AUR_MEM_VERY_LOW, AUR_MEM_LOW,
AUR_MEM_HIGH, AUR_MEM_VERY_HIGH, AUR_MEM_MAX
@@ -93,23 +97,45 @@ static int gxp_pm_blkpwr_down(struct gxp_dev *gxp)
return ret;
}
-int gxp_pm_blk_set_state_acpm(struct gxp_dev *gxp, unsigned long state)
+static int gxp_pm_blk_set_state_acpm(struct gxp_dev *gxp, unsigned long state)
+{
+ return gxp_pm_blk_set_rate_acpm(gxp, aur_power_state2rate[state]);
+}
+
+int gxp_pm_blk_set_rate_acpm(struct gxp_dev *gxp, unsigned long rate)
{
int ret = 0;
#if defined(CONFIG_GXP_CLOUDRIPPER)
- ret = exynos_acpm_set_rate(AUR_DVFS_DOMAIN, state);
- dev_dbg(gxp->dev, "%s: state %lu, ret %d\n", __func__, state, ret);
+ ret = exynos_acpm_set_rate(AUR_DVFS_DOMAIN, rate);
+ dev_dbg(gxp->dev, "%s: rate %lu, ret %d\n", __func__, rate, ret);
#endif
return ret;
}
+static void set_cmu_mux_state(struct gxp_dev *gxp, u32 val)
+{
+ writel(val << 4, gxp->cmu.vaddr + PLL_CON0_PLL_AUR);
+ writel(val << 4, gxp->cmu.vaddr + PLL_CON0_NOC_USER);
+}
+
static void gxp_pm_blk_set_state_acpm_async(struct work_struct *work)
{
struct gxp_set_acpm_state_work *set_acpm_state_work =
container_of(work, struct gxp_set_acpm_state_work, work);
mutex_lock(&set_acpm_state_work->gxp->power_mgr->pm_lock);
+ /*
+ * This prev_state may be out of date with the manager's current state,
+ * but we don't need curr_state here. curr_state is the last scheduled
+ * state, while prev_state was the last actually requested state. It's
+ * true because all request are executed synchronously and executed in
+ * FIFO order.
+ */
+ if (set_acpm_state_work->prev_state == AUR_READY)
+ set_cmu_mux_state(set_acpm_state_work->gxp, AUR_CMU_MUX_NORMAL);
+ else if (set_acpm_state_work->state == AUR_READY)
+ set_cmu_mux_state(set_acpm_state_work->gxp, AUR_CMU_MUX_LOW);
gxp_pm_blk_set_state_acpm(set_acpm_state_work->gxp, set_acpm_state_work->state);
mutex_unlock(&set_acpm_state_work->gxp->power_mgr->pm_lock);
}
@@ -167,6 +193,12 @@ int gxp_pm_blk_off(struct gxp_dev *gxp)
mutex_unlock(&gxp->power_mgr->pm_lock);
return ret;
}
+ /*
+ * Before the block is off, CMUMUX cannot be low. Otherwise, powering on
+ * cores will fail later.
+ */
+ if (gxp->power_mgr->curr_state == AUR_READY)
+ set_cmu_mux_state(gxp, AUR_CMU_MUX_NORMAL);
/* Shutdown TOP's PSM */
gxp_lpm_destroy(gxp);
@@ -240,15 +272,16 @@ static int gxp_pm_req_state_locked(struct gxp_dev *gxp, enum aur_power_state sta
return -EINVAL;
}
if (state != gxp->power_mgr->curr_state) {
- gxp->power_mgr->curr_state = state;
if (state == AUR_OFF) {
dev_warn(gxp->dev, "It is not supported to request AUR_OFF\n");
} else {
- gxp->power_mgr->set_acpm_rate_work.gxp = gxp;
- gxp->power_mgr->set_acpm_rate_work.state = state;
+ gxp->power_mgr->set_acpm_state_work.gxp = gxp;
+ gxp->power_mgr->set_acpm_state_work.state = state;
+ gxp->power_mgr->set_acpm_state_work.prev_state = gxp->power_mgr->curr_state;
queue_work(gxp->power_mgr->wq,
- &gxp->power_mgr->set_acpm_rate_work.work);
+ &gxp->power_mgr->set_acpm_state_work.work);
}
+ gxp->power_mgr->curr_state = state;
}
return 0;
@@ -481,7 +514,7 @@ int gxp_pm_init(struct gxp_dev *gxp)
refcount_set(&(mgr->blk_wake_ref), 0);
mgr->ops = &gxp_aur_ops;
gxp->power_mgr = mgr;
- INIT_WORK(&mgr->set_acpm_rate_work.work, gxp_pm_blk_set_state_acpm_async);
+ INIT_WORK(&mgr->set_acpm_state_work.work, gxp_pm_blk_set_state_acpm_async);
INIT_WORK(&mgr->req_pm_qos_work.work, gxp_pm_req_pm_qos_async);
gxp->power_mgr->wq =
create_singlethread_workqueue("gxp_power_work_queue");
diff --git a/gxp-pm.h b/gxp-pm.h
index e5bbdad..9834247 100644
--- a/gxp-pm.h
+++ b/gxp-pm.h
@@ -11,14 +11,17 @@
#include <linux/refcount.h>
#include <soc/google/exynos_pm_qos.h>
-#define AUR_DVFS_MIN_STATE 178000
+#define AUR_DVFS_MIN_RATE 178000
+static const uint aur_power_state2rate[] = { 0, 178000, 373000,
+ 750000, 1160000, 178000 };
enum aur_power_state {
AUR_OFF = 0,
- AUR_UUD = 178000,
- AUR_SUD = 373000,
- AUR_UD = 750000,
- AUR_NOM = 1160000,
+ AUR_UUD = 1,
+ AUR_SUD = 2,
+ AUR_UD = 3,
+ AUR_NOM = 4,
+ AUR_READY = 5,
};
enum aur_memory_power_state {
@@ -31,12 +34,22 @@ enum aur_memory_power_state {
AUR_MEM_MAX = 6,
};
-#define AUR_NUM_POWER_STATE 5
+enum aur_power_cmu_mux_state {
+ AUR_CMU_MUX_LOW = 0,
+ AUR_CMU_MUX_NORMAL = 1,
+};
+
+#define AUR_NUM_POWER_STATE (AUR_MAX_ALLOW_STATE + 1)
#define AUR_NUM_MEMORY_POWER_STATE (AUR_MAX_ALLOW_MEMORY_STATE + 1)
#define AUR_INIT_DVFS_STATE AUR_UUD
-#define AUR_MAX_ALLOW_STATE AUR_NOM
+/*
+ * These macros mean the maximum valid enum value of aur_power_state and
+ * aur_memory_power_state, not necessarily the state with the maximum power
+ * level.
+ */
+#define AUR_MAX_ALLOW_STATE AUR_READY
#define AUR_MAX_ALLOW_MEMORY_STATE AUR_MEM_MAX
struct gxp_pm_device_ops {
@@ -50,6 +63,7 @@ struct gxp_set_acpm_state_work {
struct work_struct work;
struct gxp_dev *gxp;
unsigned long state;
+ unsigned long prev_state;
};
struct gxp_req_pm_qos_work {
@@ -68,7 +82,7 @@ struct gxp_power_manager {
int curr_memory_state;
refcount_t blk_wake_ref;
struct gxp_pm_device_ops *ops;
- struct gxp_set_acpm_state_work set_acpm_rate_work;
+ struct gxp_set_acpm_state_work set_acpm_state_work;
struct gxp_req_pm_qos_work req_pm_qos_work;
struct workqueue_struct *wq;
/* INT/MIF requests for memory bandwidth */
@@ -189,19 +203,19 @@ int gxp_pm_init(struct gxp_dev *gxp);
int gxp_pm_destroy(struct gxp_dev *gxp);
/**
- * gxp_pm_blk_set_state_acpm() - API for setting the block-level DVFS state.
+ * gxp_pm_blk_set_rate_acpm() - API for setting the block-level DVFS rate.
* This function can be called at any point after block power on.
* @gxp: The GXP device to operate
- * @state: State number in khz that need to be set.
- * Supported state is in enum aur_power_state,
- * if experiment is needed for unsupported state
+ * @rate: Rate number in khz that need to be set.
+ * Supported rate is in aur_power_state2rate,
+ * if experiment is needed for unsupported rate
* please refer to Lassen's ECT table.
*
* Return:
* * 0 - Set finished successfully
- * * Other - Set state encounter issue in exynos_acpm_set_rate
+ * * Other - Set rate encounter issue in exynos_acpm_set_rate
*/
-int gxp_pm_blk_set_state_acpm(struct gxp_dev *gxp, unsigned long state);
+int gxp_pm_blk_set_rate_acpm(struct gxp_dev *gxp, unsigned long rate);
/**
* gxp_pm_blk_get_state_acpm() - API for getting
diff --git a/gxp-vd.c b/gxp-vd.c
index a94461b..f632a52 100644
--- a/gxp-vd.c
+++ b/gxp-vd.c
@@ -243,3 +243,31 @@ out:
return phys_core_list;
}
+
+int gxp_vd_phys_core_to_virt_core(struct gxp_virtual_device *vd, u16 phys_core)
+{
+ struct gxp_dev *gxp = vd->gxp;
+ int virt_core = 0;
+ uint core;
+
+ down_read(&gxp->vd_semaphore);
+
+ if (gxp->core_to_vd[phys_core] != vd) {
+ virt_core = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * A core's virtual core ID == the number of physical cores in the same
+ * virtual device with a lower physical core ID than its own.
+ */
+ for (core = 0; core < phys_core; core++) {
+ if (gxp->core_to_vd[core] == vd)
+ virt_core++;
+ }
+
+out:
+ up_read(&gxp->vd_semaphore);
+
+ return virt_core;
+}
diff --git a/gxp-vd.h b/gxp-vd.h
index ac7ec5f..c0a3f49 100644
--- a/gxp-vd.h
+++ b/gxp-vd.h
@@ -88,4 +88,6 @@ int gxp_vd_virt_core_to_phys_core(struct gxp_virtual_device *vd, u16 virt_core);
uint gxp_vd_virt_core_list_to_phys_core_list(struct gxp_virtual_device *vd,
u16 virt_core_list);
+int gxp_vd_phys_core_to_virt_core(struct gxp_virtual_device *vd, u16 phys_core);
+
#endif /* __GXP_VD_H__ */
diff --git a/gxp.h b/gxp.h
index ccfb614..001bb89 100644
--- a/gxp.h
+++ b/gxp.h
@@ -131,7 +131,7 @@ struct gxp_sync_ioctl {
#define GXP_SYNC_BUFFER \
_IOW(GXP_IOCTL_BASE, 2, struct gxp_sync_ioctl)
-struct gxp_mailbox_command_ioctl {
+struct gxp_mailbox_command_compat_ioctl {
/*
* Input:
* The virtual core to dispatch the command to.
@@ -167,8 +167,8 @@ struct gxp_mailbox_command_ioctl {
*
* The client must hold a VIRTUAL_DEVICE wakelock.
*/
-#define GXP_MAILBOX_COMMAND \
- _IOW(GXP_IOCTL_BASE, 3, struct gxp_mailbox_command_ioctl)
+#define GXP_MAILBOX_COMMAND_COMPAT \
+ _IOW(GXP_IOCTL_BASE, 3, struct gxp_mailbox_command_compat_ioctl)
/* GXP mailbox response error code values */
#define GXP_RESPONSE_ERROR_NONE (0)
@@ -457,13 +457,17 @@ struct gxp_register_telemetry_eventfd_ioctl {
/*
* DSP subsystem Power state values for use as `gxp_power_state` in
- * `struct gxp_acquire_wakelock_ioctl`
+ * `struct gxp_acquire_wakelock_ioctl`.
+ * Note: GXP_POWER_STATE_READY is the state to keep the BLOCK idle. By setting
+ * this state, the driver will request UUD frequency and switch the CMUMUX
+ * clocks into 25 MHz to save more power.
*/
#define GXP_POWER_STATE_OFF 0
#define GXP_POWER_STATE_UUD 1
#define GXP_POWER_STATE_SUD 2
#define GXP_POWER_STATE_UD 3
#define GXP_POWER_STATE_NOM 4
+#define GXP_POWER_STATE_READY 5
/*
* Memory interface power state values for use as `memory_power_state` in
@@ -612,4 +616,103 @@ struct gxp_map_dmabuf_ioctl {
*/
#define GXP_UNMAP_DMABUF _IOW(GXP_IOCTL_BASE, 21, struct gxp_map_dmabuf_ioctl)
+struct gxp_mailbox_command_ioctl {
+ /*
+ * Input:
+ * The virtual core to dispatch the command to.
+ */
+ __u16 virtual_core_id;
+ /*
+ * Output:
+ * The sequence number assigned to this command. The caller can use
+ * this value to match responses fetched via `GXP_MAILBOX_RESPONSE`
+ * with this command.
+ */
+ __u64 sequence_number;
+ /*
+ * Input:
+ * Device address to the buffer containing a GXP command. The user
+ * should have obtained this address from the GXP_MAP_BUFFER ioctl.
+ */
+ __u64 device_address;
+ /*
+ * Input:
+ * Size of the buffer at `device_address` in bytes.
+ */
+ __u32 size;
+ /*
+ * Input:
+ * Minimum power state to operate the entire DSP subsystem at until
+ * the mailbox command is finished(executed or timeout). One of the
+ * GXP_POWER_STATE_* defines from below.
+ *
+ * `GXP_POWER_STATE_OFF` is not a valid value when executing a
+ * mailbox command. The caller should pass GXP_POWER_STATE_UUD if the
+ * command is expected to run at the power state the wakelock has
+ * specified.
+ */
+ __u32 gxp_power_state;
+ /*
+ * Input:
+ * Memory interface power state to request from the system so long as
+ * the mailbox command is executing. One of the MEMORY_POWER_STATE*
+ * defines from below.
+ *
+ * If `MEMORY_POWER_STATE_UNDEFINED` is passed, no request to change
+ * the memory interface power state will be made.
+ */
+ __u32 memory_power_state;
+ /*
+ * Input:
+ * Flags describing the command, for use by the GXP device.
+ */
+ __u32 flags;
+ /*
+ * Input:
+ * Flags relevant to the power state requests. Currently reserved.
+ */
+ /* TODO(221320387): Document the flags once support is implemented. */
+ __u32 power_flags;
+};
+
+/*
+ * Push element to the mailbox commmand queue.
+ *
+ * The client must hold a VIRTUAL_DEVICE wakelock.
+ */
+#define GXP_MAILBOX_COMMAND \
+ _IOWR(GXP_IOCTL_BASE, 23, struct gxp_mailbox_command_ioctl)
+
+struct gxp_register_mailbox_eventfd_ioctl {
+ /*
+ * This eventfd will be signaled whenever a mailbox response arrives
+ * for the core specified by `virtual_core_id`.
+ *
+ * When registering, if an eventfd has already been registered for the
+ * specified core, the old eventfd will be unregistered and replaced.
+ *
+ * Not used during the unregister call, which clears any existing
+ * eventfd.
+ */
+ __u32 eventfd;
+ /*
+ * Reserved.
+ * Pass 0 for backwards compatibility.
+ */
+ __u32 flags;
+ /*
+ * The virtual core to register or unregister an eventfd from.
+ * While an eventfd is registered, it will be signaled exactly once
+ * any time a command to this virtual core receives a response or times
+ * out.
+ */
+ __u16 virtual_core_id;
+};
+
+#define GXP_REGISTER_MAILBOX_EVENTFD \
+ _IOW(GXP_IOCTL_BASE, 22, struct gxp_register_mailbox_eventfd_ioctl)
+
+#define GXP_UNREGISTER_MAILBOX_EVENTFD \
+ _IOW(GXP_IOCTL_BASE, 24, struct gxp_register_mailbox_eventfd_ioctl)
+
#endif /* __GXP_H__ */