summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHsiu-Chang Chen <hsiuchangchen@google.com>2021-12-24 02:07:28 +0800
committerHsiu-Chang Chen <hsiuchangchen@google.com>2021-12-24 02:00:10 +0000
commit14b0aff36d9cbc8664b2a0bbdeb4db97e850ee9a (patch)
treece505bce06c0030e4f6a5cd9a13f69156d3fe353
parent382c14c273a6df27518e102a7aad2ba6be714588 (diff)
downloadcnss2-14b0aff36d9cbc8664b2a0bbdeb4db97e850ee9a.tar.gz
wcn6740: Update cnss/mhi/qmi/qrtr drivers
Migrate wlan codes to Post-CS4 Bug: 211358472 Test: Basic functions Change-Id: I4161d344a9260fb855712626048d80e5954e1ac1
-rw-r--r--cnss2/Kconfig1
-rw-r--r--cnss2/Makefile2
-rw-r--r--cnss2/bus.c19
-rw-r--r--cnss2/bus.h3
-rw-r--r--cnss2/debug.c4
-rw-r--r--cnss2/main.c148
-rw-r--r--cnss2/main.h15
-rw-r--r--cnss2/pci.c396
-rw-r--r--cnss2/pci.h15
-rw-r--r--cnss2/pci_platform.h197
-rw-r--r--cnss2/pci_platform_google.c302
-rw-r--r--cnss2/pci_qcom.c (renamed from cnss2/pci_platform_msm.c)539
-rw-r--r--cnss2/power.c55
-rw-r--r--cnss2/qcom_ramdump.c275
-rw-r--r--cnss2/qmi.c12
-rw-r--r--cnss2/reg.h19
-rw-r--r--cnss_utils/cnss_plat_ipc_qmi.c356
-rw-r--r--cnss_utils/cnss_plat_ipc_qmi.h87
-rw-r--r--cnss_utils/cnss_plat_ipc_service_v01.c147
-rw-r--r--cnss_utils/cnss_plat_ipc_service_v01.h30
-rw-r--r--cnss_utils/wlan_firmware_service_v01.c113
-rw-r--r--cnss_utils/wlan_firmware_service_v01.h36
-rw-r--r--inc/cnss_plat_ipc_qmi.h30
-rw-r--r--inc/mhi_misc.h17
-rw-r--r--inc/qcom_ramdump.h5
-rw-r--r--inc/qmi/qmi.h8
-rw-r--r--mhi/core/boot.c65
-rw-r--r--mhi/core/init.c1
-rw-r--r--mhi/core/internal.h1
-rw-r--r--mhi/core/main.c27
-rw-r--r--mhi/core/misc.c29
-rw-r--r--mhi/core/misc.h11
-rw-r--r--mhi/core/pm.c30
-rw-r--r--qmi/qmi_encdec.c6
-rw-r--r--qmi/qmi_interface.c90
-rw-r--r--qrtr/mhi.c14
-rw-r--r--qrtr/ns.c13
-rw-r--r--qrtr/qrtr.c103
-rw-r--r--qrtr/qrtr.h4
39 files changed, 2163 insertions, 1062 deletions
diff --git a/cnss2/Kconfig b/cnss2/Kconfig
index 58e514c..c6b06b3 100644
--- a/cnss2/Kconfig
+++ b/cnss2/Kconfig
@@ -2,6 +2,7 @@
config CNSS2
tristate "CNSS2 Platform Driver for Wi-Fi Module"
+# Remove this depends because we won't use PCI_MSM
# depends on !CNSS && PCI_MSM
select CNSS_PLAT_IPC_QMI_SVC
help
diff --git a/cnss2/Makefile b/cnss2/Makefile
index 244fbc7..bd7d901 100644
--- a/cnss2/Makefile
+++ b/cnss2/Makefile
@@ -16,6 +16,6 @@ cnss2-y += debug.o
cnss2-y += pci.o
cnss2-y += power.o
cnss2-y += genl.o
-cnss2-$(CONFIG_PCI_MSM) += pci_platform_msm.o
+cnss2-y += qcom_ramdump.o
cnss2-$(CONFIG_SOC_GOOGLE) += pci_platform_google.o
cnss2-$(CONFIG_CNSS2_QMI) += qmi.o coexistence_service_v01.o ip_multimedia_subsystem_private_service_v01.o
diff --git a/cnss2/bus.c b/cnss2/bus.c
index 9d53bd9..0c05001 100644
--- a/cnss2/bus.c
+++ b/cnss2/bus.c
@@ -88,7 +88,7 @@ void cnss_bus_deinit(struct cnss_plat_data *plat_priv)
switch (plat_priv->bus_type) {
case CNSS_BUS_PCI:
- cnss_pci_deinit(plat_priv);
+ return cnss_pci_deinit(plat_priv);
default:
cnss_pr_err("Unsupported bus type: %d\n",
plat_priv->bus_type);
@@ -562,3 +562,20 @@ int cnss_bus_get_iova_ipa(struct cnss_plat_data *plat_priv, u64 *addr,
return -EINVAL;
}
}
+
+int cnss_bus_update_time_sync_period(struct cnss_plat_data *plat_priv,
+ unsigned int time_sync_period)
+{
+ if (!plat_priv)
+ return -ENODEV;
+
+ switch (plat_priv->bus_type) {
+ case CNSS_BUS_PCI:
+ return cnss_pci_update_time_sync_period(plat_priv->bus_priv,
+ time_sync_period);
+ default:
+ cnss_pr_err("Unsupported bus type: %d\n",
+ plat_priv->bus_type);
+ return -EINVAL;
+ }
+}
diff --git a/cnss2/bus.h b/cnss2/bus.h
index fd21424..33cff28 100644
--- a/cnss2/bus.h
+++ b/cnss2/bus.h
@@ -64,4 +64,7 @@ int cnss_bus_debug_reg_write(struct cnss_plat_data *plat_priv, u32 offset,
int cnss_bus_get_iova(struct cnss_plat_data *plat_priv, u64 *addr, u64 *size);
int cnss_bus_get_iova_ipa(struct cnss_plat_data *plat_priv, u64 *addr,
u64 *size);
+int cnss_bus_update_time_sync_period(struct cnss_plat_data *plat_priv,
+ unsigned int time_sync_period);
+
#endif /* _CNSS_BUS_H */
diff --git a/cnss2/debug.c b/cnss2/debug.c
index 06e1a9a..aad49ea 100644
--- a/cnss2/debug.c
+++ b/cnss2/debug.c
@@ -131,6 +131,9 @@ static int cnss_stats_show_state(struct seq_file *s,
case CNSS_DAEMON_CONNECTED:
seq_puts(s, "DAEMON_CONNECTED");
continue;
+ case CNSS_PCI_PROBE_DONE:
+ seq_puts(s, "PCI PROBE DONE");
+ continue;
}
seq_printf(s, "UNKNOWN-%d", i);
@@ -722,6 +725,7 @@ static int cnss_show_quirks_state(struct seq_file *s,
continue;
case IGNORE_PCI_LINK_FAILURE:
seq_puts(s, "IGNORE_PCI_LINK_FAILURE");
+ continue;
case DISABLE_TIME_SYNC:
seq_puts(s, "DISABLE_TIME_SYNC");
continue;
diff --git a/cnss2/main.c b/cnss2/main.c
index 050d911..8a5f9d3 100644
--- a/cnss2/main.c
+++ b/cnss2/main.c
@@ -1,5 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. */
+/*
+ * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
#include <linux/delay.h>
#include <linux/jiffies.h>
@@ -11,15 +14,11 @@
#include <linux/rwsem.h>
#include <linux/suspend.h>
#include <linux/timer.h>
-#ifdef CONFIG_CNSS_OUT_OF_TREE
-#include "cnss_plat_ipc_qmi.h"
-#else
-#include <linux/cnss_plat_ipc_qmi.h>
-#endif
#if IS_ENABLED(CONFIG_QCOM_MINIDUMP)
#include <soc/qcom/minidump.h>
#endif
+#include "cnss_plat_ipc_qmi.h"
#include "main.h"
#include "bus.h"
#include "debug.h"
@@ -31,12 +30,11 @@
#define CNSS_DUMP_NAME "CNSS_WLAN"
#define CNSS_DUMP_DESC_SIZE 0x1000
#define CNSS_DUMP_SEG_VER 0x1
-#define RECOVERY_DELAY_MS 100
#define FILE_SYSTEM_READY 1
#define FW_READY_TIMEOUT 20000
#define FW_ASSERT_TIMEOUT 5000
#define CNSS_EVENT_PENDING 2989
-#define COLD_BOOT_CAL_SHUTDOWN_DELAY_MS 50
+#define POWER_RESET_MIN_DELAY_MS 100
#define CNSS_QUIRKS_DEFAULT 0
#ifdef CONFIG_CNSS_EMULATION
@@ -50,10 +48,13 @@
#endif
#define CNSS_BDF_TYPE_DEFAULT CNSS_BDF_ELF
#define CNSS_TIME_SYNC_PERIOD_DEFAULT 900000
+#define CNSS_MIN_TIME_SYNC_PERIOD 2000
#define CNSS_DMS_QMI_CONNECTION_WAIT_MS 50
#define CNSS_DMS_QMI_CONNECTION_WAIT_RETRY 200
#define CNSS_DAEMON_CONNECT_TIMEOUT_MS 30000
#define CNSS_CAL_DB_FILE_NAME "wlfw_cal_db.bin"
+#define CNSS_CAL_START_PROBE_WAIT_RETRY_MAX 100
+#define CNSS_CAL_START_PROBE_WAIT_MS 500
enum cnss_cal_db_op {
CNSS_CAL_DB_UPLOAD,
@@ -484,8 +485,7 @@ static int cnss_fw_mem_ready_hdlr(struct cnss_plat_data *plat_priv)
if (ret)
goto out;
- if (cnss_wlfw_qdss_dnld_send_sync(plat_priv))
- cnss_pr_info("Failed to download qdss configuration file");
+ cnss_wlfw_qdss_dnld_send_sync(plat_priv);
return 0;
out:
@@ -528,7 +528,7 @@ static int cnss_setup_dms_mac(struct cnss_plat_data *plat_priv)
{
u32 i;
int ret = 0;
- struct cnss_plat_ipc_user_config *cfg;
+ struct cnss_plat_ipc_daemon_config *cfg;
ret = cnss_qmi_get_dms_mac(plat_priv);
if (ret == 0 && plat_priv->dms.mac_valid)
@@ -539,7 +539,7 @@ static int cnss_setup_dms_mac(struct cnss_plat_data *plat_priv)
*/
if (plat_priv->use_nv_mac) {
/* Check if Daemon says platform support DMS MAC provisioning */
- cfg = cnss_plat_ipc_qmi_user_config();
+ cfg = cnss_plat_ipc_qmi_daemon_config();
if (cfg) {
if (!cfg->dms_mac_addr_supported) {
cnss_pr_err("DMS MAC address not supported\n");
@@ -577,6 +577,8 @@ static int cnss_cal_db_mem_update(struct cnss_plat_data *plat_priv,
int ret = 0;
u32 timeout = cnss_get_timeout(plat_priv,
CNSS_TIMEOUT_DAEMON_CONNECTION);
+ enum cnss_plat_ipc_qmi_client_id_v01 client_id =
+ CNSS_PLAT_IPC_DAEMON_QMI_CLIENT_V01;
if (op >= CNSS_CAL_DB_INVALID_OP)
return -EINVAL;
@@ -607,12 +609,14 @@ static int cnss_cal_db_mem_update(struct cnss_plat_data *plat_priv,
/* Copy CAL DB file contents to/from CAL_TYPE_DDR mem allocated to FW */
if (op == CNSS_CAL_DB_DOWNLOAD) {
cnss_pr_dbg("Initiating Calibration file download to mem\n");
- ret = cnss_plat_ipc_qmi_file_download(CNSS_CAL_DB_FILE_NAME,
+ ret = cnss_plat_ipc_qmi_file_download(client_id,
+ CNSS_CAL_DB_FILE_NAME,
plat_priv->cal_mem->va,
size);
} else {
cnss_pr_dbg("Initiating Calibration mem upload to file\n");
- ret = cnss_plat_ipc_qmi_file_upload(CNSS_CAL_DB_FILE_NAME,
+ ret = cnss_plat_ipc_qmi_file_upload(client_id,
+ CNSS_CAL_DB_FILE_NAME,
plat_priv->cal_mem->va,
*size);
}
@@ -856,7 +860,10 @@ unsigned int cnss_get_timeout(struct cnss_plat_data *plat_priv,
*/
return (qmi_timeout + WLAN_MISSION_MODE_TIMEOUT * 3);
case CNSS_TIMEOUT_CALIBRATION:
- return (qmi_timeout + WLAN_COLD_BOOT_CAL_TIMEOUT);
+ /* Similar to mission mode, in CBC if FW init fails
+ * fw recovery is tried. Thus return 2x the CBC timeout.
+ */
+ return (qmi_timeout + WLAN_COLD_BOOT_CAL_TIMEOUT * 2);
case CNSS_TIMEOUT_WLAN_WATCHDOG:
return ((qmi_timeout << 1) + WLAN_WD_TIMEOUT_MS);
case CNSS_TIMEOUT_RDDM:
@@ -1315,7 +1322,7 @@ static void cnss_recovery_work_handler(struct work_struct *work)
cnss_bus_dev_shutdown(plat_priv);
cnss_bus_dev_ramdump(plat_priv);
- msleep(RECOVERY_DELAY_MS);
+ msleep(POWER_RESET_MIN_DELAY_MS);
ret = cnss_bus_dev_powerup(plat_priv);
if (ret)
@@ -1677,6 +1684,7 @@ EXPORT_SYMBOL(cnss_qmi_send);
static int cnss_cold_boot_cal_start_hdlr(struct cnss_plat_data *plat_priv)
{
int ret = 0;
+ u32 retry = 0;
if (test_bit(CNSS_COLD_BOOT_CAL_DONE, &plat_priv->driver_state)) {
cnss_pr_dbg("Calibration complete. Ignore calibration req\n");
@@ -1694,9 +1702,23 @@ static int cnss_cold_boot_cal_start_hdlr(struct cnss_plat_data *plat_priv)
return -EINVAL;
}
+ while (retry++ < CNSS_CAL_START_PROBE_WAIT_RETRY_MAX) {
+ if (test_bit(CNSS_PCI_PROBE_DONE, &plat_priv->driver_state))
+ break;
+ msleep(CNSS_CAL_START_PROBE_WAIT_MS);
+
+ if (retry == CNSS_CAL_START_PROBE_WAIT_RETRY_MAX) {
+ cnss_pr_err("Calibration start failed as PCI probe not complete\n");
+ CNSS_ASSERT(0);
+ ret = -EINVAL;
+ goto mark_cal_fail;
+ }
+ }
+
set_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state);
reinit_completion(&plat_priv->cal_complete);
ret = cnss_bus_dev_powerup(plat_priv);
+mark_cal_fail:
if (ret) {
complete(&plat_priv->cal_complete);
clear_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state);
@@ -1740,7 +1762,7 @@ static int cnss_cold_boot_cal_done_hdlr(struct cnss_plat_data *plat_priv,
cnss_bus_free_qdss_mem(plat_priv);
cnss_release_antenna_sharing(plat_priv);
cnss_bus_dev_shutdown(plat_priv);
- msleep(COLD_BOOT_CAL_SHUTDOWN_DELAY_MS);
+ msleep(POWER_RESET_MIN_DELAY_MS);
complete(&plat_priv->cal_complete);
clear_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state);
set_bit(CNSS_COLD_BOOT_CAL_DONE, &plat_priv->driver_state);
@@ -2225,13 +2247,6 @@ static void cnss_destroy_ramdump_device(struct cnss_plat_data *plat_priv,
#endif
#if IS_ENABLED(CONFIG_QCOM_RAMDUMP)
-
-#if IS_ENABLED(CONFIG_WCN_GOOGLE)
-int cnss_do_ramdump(struct cnss_plat_data *plat_priv)
-{
- return 0;
-}
-#else
int cnss_do_ramdump(struct cnss_plat_data *plat_priv)
{
struct cnss_ramdump_info *ramdump_info = &plat_priv->ramdump_info;
@@ -2246,7 +2261,7 @@ int cnss_do_ramdump(struct cnss_plat_data *plat_priv)
return qcom_dump(&head, ramdump_info->ramdump_dev);
}
-#endif
+
int cnss_do_elf_ramdump(struct cnss_plat_data *plat_priv)
{
struct cnss_ramdump_info_v2 *info_v2 = &plat_priv->ramdump_info_v2;
@@ -2257,6 +2272,13 @@ int cnss_do_elf_ramdump(struct cnss_plat_data *plat_priv)
struct list_head head;
int i, ret = 0;
+#if !IS_ENABLED(CONFIG_WCN_GOOGLE)
+ if (!dump_enabled()) {
+ cnss_pr_info("Dump collection is not enabled\n");
+ return ret;
+ }
+#endif
+
INIT_LIST_HEAD(&head);
for (i = 0; i < dump_data->nentries; i++) {
if (dump_seg->type >= CNSS_FW_DUMP_TYPE_MAX) {
@@ -2294,7 +2316,7 @@ int cnss_do_elf_ramdump(struct cnss_plat_data *plat_priv)
list_add(&seg->node, &head);
do_elf_dump:
- ret = qcom_elf_dump(&head, info_v2->ramdump_dev);
+ ret = qcom_elf_dump(&head, info_v2->ramdump_dev, ELF_CLASS);
while (!list_empty(&head)) {
seg = list_first_entry(&head, struct qcom_dump_segment, node);
@@ -2432,7 +2454,6 @@ static int cnss_register_ramdump_v2(struct cnss_plat_data *plat_priv)
int ret = 0;
struct cnss_ramdump_info_v2 *info_v2;
struct cnss_dump_data *dump_data;
-
#if !IS_ENABLED(CONFIG_WCN_GOOGLE)
struct msm_dump_entry dump_entry;
#endif
@@ -2460,7 +2481,6 @@ static int cnss_register_ramdump_v2(struct cnss_plat_data *plat_priv)
sizeof(dump_data->name));
#if !IS_ENABLED(CONFIG_WCN_GOOGLE)
-
dump_entry.id = MSM_DUMP_DATA_CNSS_WLAN;
dump_entry.addr = virt_to_phys(dump_data);
@@ -2473,6 +2493,7 @@ static int cnss_register_ramdump_v2(struct cnss_plat_data *plat_priv)
goto free_ramdump;
}
#endif
+
info_v2->ramdump_dev = cnss_create_ramdump_device(plat_priv);
if (!info_v2->ramdump_dev) {
cnss_pr_err("Failed to create ramdump device!\n");
@@ -2659,6 +2680,7 @@ int cnss_request_firmware_direct(struct cnss_plat_data *plat_priv,
&plat_priv->plat_dev->dev);
}
+// Changed from CONFIG_INTERCONNECT to CONFIG_INTERCONNECT_QCOM by Google
#if IS_ENABLED(CONFIG_INTERCONNECT_QCOM)
/**
* cnss_register_bus_scale() - Setup interconnect voting data
@@ -2839,6 +2861,37 @@ static ssize_t enable_hds_store(struct device *dev,
return count;
}
+static ssize_t time_sync_period_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cnss_plat_data *plat_priv = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%u ms\n",
+ plat_priv->ctrl_params.time_sync_period);
+}
+
+static ssize_t time_sync_period_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cnss_plat_data *plat_priv = dev_get_drvdata(dev);
+ unsigned int time_sync_period = 0;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ if (sscanf(buf, "%du", &time_sync_period) != 1) {
+ cnss_pr_err("Invalid time sync sysfs command\n");
+ return -EINVAL;
+ }
+
+ if (time_sync_period >= CNSS_MIN_TIME_SYNC_PERIOD)
+ cnss_bus_update_time_sync_period(plat_priv, time_sync_period);
+
+ return count;
+}
+
static ssize_t recovery_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
@@ -2979,6 +3032,21 @@ static ssize_t hw_trace_override_store(struct device *dev,
return count;
}
+static ssize_t charger_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cnss_plat_data *plat_priv = dev_get_drvdata(dev);
+ int tmp = 0;
+
+ if (sscanf(buf, "%du", &tmp) != 1)
+ return -EINVAL;
+
+ plat_priv->charger_mode = tmp;
+ cnss_pr_dbg("Received Charger Mode: %d\n", tmp);
+ return count;
+}
+
static DEVICE_ATTR_WO(fs_ready);
static DEVICE_ATTR_WO(shutdown);
static DEVICE_ATTR_WO(recovery);
@@ -2987,6 +3055,8 @@ static DEVICE_ATTR_WO(qdss_trace_start);
static DEVICE_ATTR_WO(qdss_trace_stop);
static DEVICE_ATTR_WO(qdss_conf_download);
static DEVICE_ATTR_WO(hw_trace_override);
+static DEVICE_ATTR_WO(charger_mode);
+static DEVICE_ATTR_RW(time_sync_period);
static struct attribute *cnss_attrs[] = {
&dev_attr_fs_ready.attr,
@@ -2997,6 +3067,8 @@ static struct attribute *cnss_attrs[] = {
&dev_attr_qdss_trace_stop.attr,
&dev_attr_qdss_conf_download.attr,
&dev_attr_hw_trace_override.attr,
+ &dev_attr_charger_mode.attr,
+ &dev_attr_time_sync_period.attr,
NULL,
};
@@ -3136,7 +3208,8 @@ static int cnss_misc_init(struct cnss_plat_data *plat_priv)
if (!plat_priv->recovery_ws)
cnss_pr_err("Failed to setup FW recovery wake source\n");
- ret = cnss_plat_ipc_register(cnss_daemon_connection_update_cb,
+ ret = cnss_plat_ipc_register(CNSS_PLAT_IPC_DAEMON_QMI_CLIENT_V01,
+ cnss_daemon_connection_update_cb,
plat_priv);
if (ret)
cnss_pr_err("QMI IPC connection call back register failed, err = %d\n",
@@ -3147,6 +3220,8 @@ static int cnss_misc_init(struct cnss_plat_data *plat_priv)
static void cnss_misc_deinit(struct cnss_plat_data *plat_priv)
{
+ cnss_plat_ipc_unregister(CNSS_PLAT_IPC_DAEMON_QMI_CLIENT_V01,
+ plat_priv);
complete_all(&plat_priv->recovery_complete);
complete_all(&plat_priv->rddm_complete);
complete_all(&plat_priv->cal_complete);
@@ -3157,7 +3232,6 @@ static void cnss_misc_deinit(struct cnss_plat_data *plat_priv)
unregister_pm_notifier(&cnss_pm_notifier);
del_timer(&plat_priv->fw_boot_timer);
wakeup_source_unregister(plat_priv->recovery_ws);
- cnss_plat_ipc_unregister(plat_priv);
}
static void cnss_init_control_params(struct cnss_plat_data *plat_priv)
@@ -3409,25 +3483,19 @@ static int cnss_remove(struct platform_device *plat_dev)
cnss_unregister_bus_scale(plat_priv);
cnss_unregister_esoc(plat_priv);
cnss_put_resources(plat_priv);
+
+ if (!IS_ERR_OR_NULL(plat_priv->mbox_chan))
+ mbox_free_channel(plat_priv->mbox_chan);
+
platform_set_drvdata(plat_dev, NULL);
plat_env = NULL;
return 0;
}
-static void cnss_shutdown(struct platform_device *plat_dev)
-{
- struct cnss_plat_data *plat_priv = platform_get_drvdata(plat_dev);
-
- cnss_pr_dbg("cnss shutdown\n");
- set_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
- cnss_bus_dev_shutdown(plat_priv);
-}
-
static struct platform_driver cnss_platform_driver = {
.probe = cnss_probe,
.remove = cnss_remove,
- .shutdown = cnss_shutdown,
.driver = {
.name = "cnss2",
.of_match_table = cnss_of_match_table,
diff --git a/cnss2/main.h b/cnss2/main.h
index 8f7f849..60b2eff 100644
--- a/cnss2/main.h
+++ b/cnss2/main.h
@@ -1,5 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. */
+/*
+ * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
#ifndef _CNSS_MAIN_H
#define _CNSS_MAIN_H
@@ -12,6 +15,7 @@
#endif
#include <linux/etherdevice.h>
#include <linux/firmware.h>
+// Changed from CONFIG_INTERCONNECT to CONFIG_INTERCONNECT_QCOM by Google
#if IS_ENABLED(CONFIG_INTERCONNECT_QCOM)
#include <linux/interconnect.h>
#endif
@@ -60,7 +64,7 @@
#define MAX_FIRMWARE_NAME_LEN 40
#define FW_V2_NUMBER 2
#define POWER_ON_RETRY_MAX_TIMES 3
-#define POWER_ON_RETRY_DELAY_MS 200
+#define POWER_ON_RETRY_DELAY_MS 500
#define CNSS_EVENT_SYNC BIT(0)
#define CNSS_EVENT_UNINTERRUPTIBLE BIT(1)
@@ -114,6 +118,7 @@ struct cnss_pinctrl_info {
struct pinctrl_state *wlan_en_sleep;
int bt_en_gpio;
int xo_clk_gpio; /*qca6490 only */
+ int sw_ctrl_gpio;
};
#if IS_ENABLED(CONFIG_MSM_SUBSYSTEM_RESTART)
@@ -167,6 +172,7 @@ struct cnss_esoc_info {
};
#endif
+// Changed from CONFIG_INTERCONNECT to CONFIG_INTERCONNECT_QCOM by Google
#if IS_ENABLED(CONFIG_INTERCONNECT_QCOM)
/**
* struct cnss_bus_bw_cfg - Interconnect vote data
@@ -316,6 +322,7 @@ enum cnss_driver_state {
CNSS_QMI_DEL_SERVER,
CNSS_QMI_DMS_CONNECTED = 20,
CNSS_DAEMON_CONNECTED,
+ CNSS_PCI_PROBE_DONE,
};
struct cnss_recovery_data {
@@ -528,9 +535,10 @@ struct cnss_plat_data {
const char *vreg_ol_cpr, *vreg_ipa;
bool adsp_pc_enabled;
u64 feature_list;
+ u8 charger_mode;
};
-#if IS_ENABLED(CONFIG_ARCH_QCOM)
+#if IS_ENABLED(CONFIG_ARCH_QCOM) && !IS_ENABLED(CONFIG_WCN_GOOGLE)
static inline u64 cnss_get_host_timestamp(struct cnss_plat_data *plat_priv)
{
u64 ticks = __arch_counter_get_cntvct();
@@ -601,4 +609,5 @@ int cnss_set_feature_list(struct cnss_plat_data *plat_priv,
enum cnss_feature_v01 feature);
int cnss_get_feature_list(struct cnss_plat_data *plat_priv,
u64 *feature_list);
+int cnss_get_input_gpio_value(struct cnss_plat_data *plat_priv, int gpio_num);
#endif /* _CNSS_MAIN_H */
diff --git a/cnss2/pci.c b/cnss2/pci.c
index d59469b..3c8c72e 100644
--- a/cnss2/pci.c
+++ b/cnss2/pci.c
@@ -1,14 +1,15 @@
// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. */
+/*
+ * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
-#include <linux/cma.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/msi.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
-#include <linux/of_reserved_mem.h>
#include <linux/pm_runtime.h>
#include <linux/suspend.h>
#include <linux/memblock.h>
@@ -21,8 +22,8 @@
#include "bus.h"
#include "debug.h"
#include "pci.h"
-#include "reg.h"
#include "pci_platform.h"
+#include "reg.h"
#define PCI_LINK_UP 1
#define PCI_LINK_DOWN 0
@@ -30,7 +31,6 @@
#define SAVE_PCI_CONFIG_SPACE 1
#define RESTORE_PCI_CONFIG_SPACE 0
-#define PM_OPTIONS_DEFAULT 0
#define PCI_BAR_NUM 0
#define PCI_INVALID_READ(val) ((val) == U32_MAX)
@@ -79,9 +79,6 @@ static DEFINE_SPINLOCK(time_sync_lock);
#define FORCE_WAKE_DELAY_MAX_US 6000
#define FORCE_WAKE_DELAY_TIMEOUT_US 60000
-#define LINK_TRAINING_RETRY_MAX_TIMES 3
-#define LINK_TRAINING_RETRY_DELAY_MS 500
-
#define MHI_SUSPEND_RETRY_MAX_TIMES 3
#define MHI_SUSPEND_RETRY_DELAY_US 5000
@@ -224,6 +221,7 @@ static const struct mhi_controller_config cnss_mhi_config = {
.ch_cfg = cnss_mhi_channels,
.num_events = ARRAY_SIZE(cnss_mhi_events),
.event_cfg = cnss_mhi_events,
+ .m2_no_db = true,
};
static struct cnss_pci_reg ce_src[] = {
@@ -274,6 +272,13 @@ static struct cnss_pci_reg qdss_csr[] = {
{ NULL },
};
+static struct cnss_pci_reg pci_scratch[] = {
+ { "PCIE_SCRATCH_0", PCIE_SCRATCH_0_SOC_PCIE_REG },
+ { "PCIE_SCRATCH_1", PCIE_SCRATCH_1_SOC_PCIE_REG },
+ { "PCIE_SCRATCH_2", PCIE_SCRATCH_2_SOC_PCIE_REG },
+ { NULL },
+};
+
/* First field of the structure is the device bit mask. Use
* enum cnss_pci_reg_mask as reference for the value.
*/
@@ -810,6 +815,7 @@ static int cnss_pci_force_wake_put(struct cnss_pci_data *pci_priv)
return ret;
}
+// Changed from CONFIG_INTERCONNECT to CONFIG_INTERCONNECT_QCOM by Google
#if IS_ENABLED(CONFIG_INTERCONNECT_QCOM)
/**
* cnss_setup_bus_bandwidth() - Setup interconnect vote for given bandwidth
@@ -1013,6 +1019,37 @@ static int cnss_pci_get_link_status(struct cnss_pci_data *pci_priv)
return 0;
}
+
+static void cnss_pci_soc_scratch_reg_dump(struct cnss_pci_data *pci_priv)
+{
+ u32 reg_offset, val;
+ int i;
+
+ switch (pci_priv->device_id) {
+ case QCA6390_DEVICE_ID:
+ case QCA6490_DEVICE_ID:
+ break;
+ default:
+ return;
+ }
+
+ if (in_interrupt() || irqs_disabled())
+ return;
+
+ if (cnss_pci_check_link_status(pci_priv))
+ return;
+
+ cnss_pr_dbg("Start to dump SOC Scratch registers\n");
+
+ for (i = 0; pci_scratch[i].name; i++) {
+ reg_offset = pci_scratch[i].offset;
+ if (cnss_pci_reg_read(pci_priv, reg_offset, &val))
+ return;
+ cnss_pr_dbg("PCIE_SOC_REG_%s = 0x%x\n",
+ pci_scratch[i].name, val);
+ }
+}
+
int cnss_suspend_pci_link(struct cnss_pci_data *pci_priv)
{
int ret = 0;
@@ -1133,10 +1170,12 @@ int cnss_pci_recover_link_down(struct cnss_pci_data *pci_priv)
jiffies + msecs_to_jiffies(DEV_RDDM_TIMEOUT));
cnss_mhi_debug_reg_dump(pci_priv);
+ cnss_pci_soc_scratch_reg_dump(pci_priv);
return 0;
}
+
static void cnss_pci_update_link_event(struct cnss_pci_data *pci_priv,
enum cnss_bus_event_type type,
void *data)
@@ -1148,7 +1187,7 @@ static void cnss_pci_update_link_event(struct cnss_pci_data *pci_priv,
cnss_pci_call_driver_uevent(pci_priv, CNSS_BUS_EVENT, &bus_event);
}
-static void cnss_pci_handle_linkdown(struct cnss_pci_data *pci_priv)
+void cnss_pci_handle_linkdown(struct cnss_pci_data *pci_priv)
{
struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
struct pci_dev *pci_dev = pci_priv->pci_dev;
@@ -1197,8 +1236,11 @@ int cnss_pci_link_down(struct device *dev)
cnss_pr_err("plat_priv is NULL\n");
return -ENODEV;
}
- cnss_set_perst_gpio(plat_priv);
+ if (pci_priv->pci_link_down_ind) {
+ cnss_pr_dbg("PCI link down recovery is already in progress\n");
+ return -EBUSY;
+ }
if (pci_priv->drv_connected_last &&
of_property_read_bool(plat_priv->plat_dev->dev.of_node,
@@ -1265,39 +1307,31 @@ EXPORT_SYMBOL(cnss_pci_unlock_reg_window);
*/
static void cnss_pci_dump_bl_sram_mem(struct cnss_pci_data *pci_priv)
{
- struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
u32 mem_addr, val, pbl_log_max_size, sbl_log_max_size;
- u32 sbl_log_def_start, sbl_log_def_end;
+ u32 pbl_log_sram_start;
u32 pbl_stage, sbl_log_start, sbl_log_size;
u32 pbl_wlan_boot_cfg, pbl_bootstrap_status;
u32 pbl_bootstrap_status_reg = PBL_BOOTSTRAP_STATUS;
- u32 pbl_log_sram_start_reg = DEBUG_PBL_LOG_SRAM_START;
+ u32 sbl_log_def_start = SRAM_START;
+ u32 sbl_log_def_end = SRAM_END;
int i;
switch (pci_priv->device_id) {
case QCA6390_DEVICE_ID:
+ pbl_log_sram_start = QCA6390_DEBUG_PBL_LOG_SRAM_START;
pbl_log_max_size = QCA6390_DEBUG_PBL_LOG_SRAM_MAX_SIZE;
sbl_log_max_size = QCA6390_DEBUG_SBL_LOG_SRAM_MAX_SIZE;
- sbl_log_def_start = QCA6390_V2_SBL_DATA_START;
- sbl_log_def_end = QCA6390_V2_SBL_DATA_END;
+ break;
case QCA6490_DEVICE_ID:
+ pbl_log_sram_start = QCA6490_DEBUG_PBL_LOG_SRAM_START;
pbl_log_max_size = QCA6490_DEBUG_PBL_LOG_SRAM_MAX_SIZE;
sbl_log_max_size = QCA6490_DEBUG_SBL_LOG_SRAM_MAX_SIZE;
- if (plat_priv->device_version.major_version == FW_V2_NUMBER) {
- sbl_log_def_start = QCA6490_V2_SBL_DATA_START;
- sbl_log_def_end = QCA6490_V2_SBL_DATA_END;
- } else {
- sbl_log_def_start = QCA6490_V1_SBL_DATA_START;
- sbl_log_def_end = QCA6490_V1_SBL_DATA_END;
- }
break;
case WCN7850_DEVICE_ID:
pbl_bootstrap_status_reg = WCN7850_PBL_BOOTSTRAP_STATUS;
- pbl_log_sram_start_reg = WCN7850_DEBUG_PBL_LOG_SRAM_START;
+ pbl_log_sram_start = WCN7850_DEBUG_PBL_LOG_SRAM_START;
pbl_log_max_size = WCN7850_DEBUG_PBL_LOG_SRAM_MAX_SIZE;
sbl_log_max_size = WCN7850_DEBUG_SBL_LOG_SRAM_MAX_SIZE;
- sbl_log_def_start = WCN7850_SBL_DATA_START;
- sbl_log_def_end = WCN7850_SBL_DATA_END;
default:
return;
}
@@ -1318,7 +1352,7 @@ static void cnss_pci_dump_bl_sram_mem(struct cnss_pci_data *pci_priv)
cnss_pr_dbg("Dumping PBL log data\n");
for (i = 0; i < pbl_log_max_size; i += sizeof(val)) {
- mem_addr = pbl_log_sram_start_reg + i;
+ mem_addr = pbl_log_sram_start + i;
if (cnss_pci_reg_read(pci_priv, mem_addr, &val))
break;
cnss_pr_dbg("SRAM[0x%x] = 0x%x\n", mem_addr, val);
@@ -1361,6 +1395,7 @@ static int cnss_pci_handle_mhi_poweron_timeout(struct cnss_pci_data *pci_priv)
} else {
cnss_pr_dbg("RDDM cookie is not set\n");
cnss_mhi_debug_reg_dump(pci_priv);
+ cnss_pci_soc_scratch_reg_dump(pci_priv);
/* Dump PBL/SBL error log if RDDM cookie is not set */
cnss_pci_dump_bl_sram_mem(pci_priv);
return -ETIMEDOUT;
@@ -1560,6 +1595,12 @@ retry_mhi_suspend:
break;
case CNSS_MHI_TRIGGER_RDDM:
ret = mhi_force_rddm_mode(pci_priv->mhi_ctrl);
+ if (ret) {
+ cnss_pr_err("Failed to trigger RDDM, err = %d\n", ret);
+
+ cnss_pr_dbg("Sending host reset req\n");
+ ret = mhi_force_reset(pci_priv->mhi_ctrl);
+ }
break;
case CNSS_MHI_RDDM_DONE:
break;
@@ -1581,56 +1622,6 @@ out:
return ret;
}
-#if IS_ENABLED(CONFIG_PCI_MSM)
-/**
- * cnss_wlan_adsp_pc_enable: Control ADSP power collapse setup
- * @dev: Platform driver pci private data structure
- * @control: Power collapse enable / disable
- *
- * This function controls ADSP power collapse (PC). It must be called
- * based on wlan state. ADSP power collapse during wlan RTPM suspend state
- * results in delay during periodic QMI stats PCI link up/down. This delay
- * causes additional power consumption.
- * Introduced in SM8350.
- *
- * Result: 0 Success. negative error codes.
- */
-static int cnss_wlan_adsp_pc_enable(struct cnss_pci_data *pci_priv,
- bool control)
-{
- struct pci_dev *pci_dev = pci_priv->pci_dev;
- int ret = 0;
- u32 pm_options = PM_OPTIONS_DEFAULT;
- struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
-
- if (plat_priv->adsp_pc_enabled == control) {
- cnss_pr_dbg("ADSP power collapse already %s\n",
- control ? "Enabled" : "Disabled");
- return 0;
- }
-
- if (control)
- pm_options &= ~MSM_PCIE_CONFIG_NO_DRV_PC;
- else
- pm_options |= MSM_PCIE_CONFIG_NO_DRV_PC;
-
- ret = msm_pcie_pm_control(MSM_PCIE_DRV_PC_CTRL, pci_dev->bus->number,
- pci_dev, NULL, pm_options);
- if (ret)
- return ret;
-
- cnss_pr_dbg("%s ADSP power collapse\n", control ? "Enable" : "Disable");
- plat_priv->adsp_pc_enabled = control;
- return 0;
-}
-#else
-static int cnss_wlan_adsp_pc_enable(struct cnss_pci_data *pci_priv,
- bool control)
-{
- return 0;
-}
-#endif
-
int cnss_pci_start_mhi(struct cnss_pci_data *pci_priv)
{
int ret = 0;
@@ -1943,6 +1934,25 @@ static void cnss_pci_stop_time_sync_update(struct cnss_pci_data *pci_priv)
cancel_delayed_work_sync(&pci_priv->time_sync_work);
}
+int cnss_pci_update_time_sync_period(struct cnss_pci_data *pci_priv,
+ unsigned int time_sync_period)
+{
+ struct cnss_plat_data *plat_priv;
+
+ if (!pci_priv)
+ return -ENODEV;
+
+ plat_priv = pci_priv->plat_priv;
+
+ cnss_pci_stop_time_sync_update(pci_priv);
+ plat_priv->ctrl_params.time_sync_period = time_sync_period;
+ cnss_pci_start_time_sync_update(pci_priv);
+ cnss_pr_dbg("WLAN time sync period %u ms\n",
+ plat_priv->ctrl_params.time_sync_period);
+
+ return 0;
+}
+
int cnss_pci_call_driver_probe(struct cnss_pci_data *pci_priv)
{
int ret = 0;
@@ -2304,7 +2314,9 @@ static int cnss_qca6290_powerup(struct cnss_pci_data *pci_priv)
int ret = 0;
struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
unsigned int timeout;
- int retry = 0;
+ int retry = 0, bt_en_gpio = plat_priv->pinctrl_info.bt_en_gpio;
+ int sw_ctrl_gpio = plat_priv->pinctrl_info.sw_ctrl_gpio;
+
if (plat_priv->ramdump_info_v2.dump_data_valid) {
cnss_pci_clear_dump_info(pci_priv);
cnss_pci_power_off_mhi(pci_priv);
@@ -2327,6 +2339,8 @@ retry:
ret = cnss_resume_pci_link(pci_priv);
if (ret) {
cnss_pr_err("Failed to resume PCI link, err = %d\n", ret);
+ cnss_pr_dbg("Value of SW_CTRL GPIO: %d\n",
+ cnss_get_input_gpio_value(plat_priv, sw_ctrl_gpio));
if (test_bit(IGNORE_PCI_LINK_FAILURE,
&plat_priv->ctrl_params.quirks)) {
cnss_pr_dbg("Ignore PCI link resume failure\n");
@@ -2335,7 +2349,19 @@ retry:
}
if (ret == -EAGAIN && retry++ < POWER_ON_RETRY_MAX_TIMES) {
cnss_power_off_device(plat_priv);
+ /* Force toggle BT_EN GPIO low */
+ if (retry == POWER_ON_RETRY_MAX_TIMES) {
+ cnss_pr_dbg("Retry #%d. Set BT_EN GPIO(%u) low\n",
+ retry, bt_en_gpio);
+ if (bt_en_gpio >= 0)
+ gpio_direction_output(bt_en_gpio, 0);
+ cnss_pr_dbg("BT_EN GPIO val: %d\n",
+ gpio_get_value(bt_en_gpio));
+ }
cnss_pr_dbg("Retry to resume PCI link #%d\n", retry);
+ cnss_pr_dbg("Value of SW_CTRL GPIO: %d\n",
+ cnss_get_input_gpio_value(plat_priv,
+ sw_ctrl_gpio));
msleep(POWER_ON_RETRY_DELAY_MS * retry);
goto retry;
}
@@ -2498,6 +2524,7 @@ static int cnss_qca6290_ramdump(struct cnss_pci_data *pci_priv)
int cnss_pci_dev_powerup(struct cnss_pci_data *pci_priv)
{
int ret = 0;
+
if (!pci_priv) {
cnss_pr_err("pci_priv is NULL\n");
return -ENODEV;
@@ -2627,7 +2654,14 @@ static void cnss_wlan_reg_driver_work(struct work_struct *work)
if (test_bit(CNSS_COLD_BOOT_CAL_DONE, &plat_priv->driver_state)) {
goto reg_driver;
} else {
- cnss_pr_err("Calibration still not done\n");
+ cnss_pr_err("Timeout waiting for calibration to complete\n");
+ del_timer(&plat_priv->fw_boot_timer);
+ if (plat_priv->charger_mode) {
+ cnss_pr_err("Ignore calibration timeout in charger mode\n");
+ return;
+ }
+ if (!test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state))
+ CNSS_ASSERT(0);
cal_info = kzalloc(sizeof(*cal_info), GFP_KERNEL);
if (!cal_info)
return;
@@ -2635,8 +2669,6 @@ static void cnss_wlan_reg_driver_work(struct work_struct *work)
cnss_driver_event_post(plat_priv,
CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE,
0, cal_info);
- /* Temporarily return for bringup. CBC will not be triggered */
- return;
}
reg_driver:
if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
@@ -2655,6 +2687,9 @@ int cnss_wlan_register_driver(struct cnss_wlan_driver *driver_ops)
int ret = 0;
struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
struct cnss_pci_data *pci_priv;
+#if !IS_ENABLED(CONFIG_WCN_GOOGLE)
+ const struct pci_device_id *id_table = driver_ops->id_table;
+#endif
unsigned int timeout;
if (!plat_priv) {
@@ -2662,12 +2697,12 @@ int cnss_wlan_register_driver(struct cnss_wlan_driver *driver_ops)
return -EAGAIN;
}
- pci_priv = plat_priv->bus_priv;
- if (!pci_priv) {
- cnss_pr_info("pci_priv is not ready for register driver\n");
+ if (!test_bit(CNSS_PCI_PROBE_DONE, &plat_priv->driver_state)) {
+ cnss_pr_info("pci probe not yet done for register driver\n");
return -EAGAIN;
}
+ pci_priv = plat_priv->bus_priv;
if (pci_priv->driver_ops) {
cnss_pr_err("Driver has already registered\n");
return -EEXIST;
@@ -2678,9 +2713,22 @@ int cnss_wlan_register_driver(struct cnss_wlan_driver *driver_ops)
return -EINVAL;
}
- ret=check_id_table(driver_ops);
- if (ret)
- return ret;
+#if !IS_ENABLED(CONFIG_WCN_GOOGLE)
+ if (!id_table || !pci_dev_present(id_table)) {
+ /* id_table pointer will move from pci_dev_present(),
+ * so check again using local pointer.
+ */
+ id_table = driver_ops->id_table;
+ while (id_table && id_table->vendor) {
+ cnss_pr_info("Host driver is built for PCIe device ID 0x%x\n",
+ id_table->device);
+ id_table++;
+ }
+ cnss_pr_err("Enumerated PCIe device id is 0x%x, reject unsupported driver\n",
+ pci_priv->device_id);
+ return -ENODEV;
+ }
+#endif
if (!plat_priv->cbc_enabled ||
test_bit(CNSS_COLD_BOOT_CAL_DONE, &plat_priv->driver_state))
@@ -2693,7 +2741,6 @@ int cnss_wlan_register_driver(struct cnss_wlan_driver *driver_ops)
* until CBC is complete
*/
timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_CALIBRATION);
-
INIT_DELAYED_WORK(&plat_priv->wlan_reg_driver_work,
cnss_wlan_reg_driver_work);
schedule_delayed_work(&plat_priv->wlan_reg_driver_work,
@@ -2794,7 +2841,6 @@ int cnss_pci_unregister_driver_hdlr(struct cnss_pci_data *pci_priv)
return 0;
}
-
static int cnss_pci_suspend_driver(struct cnss_pci_data *pci_priv)
{
struct pci_dev *pci_dev = pci_priv->pci_dev;
@@ -3008,6 +3054,7 @@ static int cnss_pci_resume(struct device *dev)
#if CONFIG_WCN_GOOGLE
//exynos_pcie_rc_l1ss_ctrl(1, PCIE_L1SS_CTRL_WIFI, GOOGLE_RC_ID);
#endif
+
out:
return ret;
}
@@ -3600,6 +3647,7 @@ int cnss_pci_alloc_fw_mem(struct cnss_pci_data *pci_priv)
for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
if (!fw_mem[i].va && fw_mem[i].size) {
+retry:
fw_mem[i].va =
#ifdef CONFIG_WCN_GOOGLE
dma_direct_alloc(dev, fw_mem[i].size,
@@ -3610,10 +3658,21 @@ int cnss_pci_alloc_fw_mem(struct cnss_pci_data *pci_priv)
&fw_mem[i].pa, GFP_KERNEL,
fw_mem[i].attrs);
#endif
+
if (!fw_mem[i].va) {
+ if ((fw_mem[i].attrs &
+ DMA_ATTR_FORCE_CONTIGUOUS)) {
+ fw_mem[i].attrs &=
+ ~DMA_ATTR_FORCE_CONTIGUOUS;
+
+ cnss_pr_dbg("Fallback to non-contiguous memory for FW, Mem type: %u\n",
+ fw_mem[i].type);
+ goto retry;
+ }
+
cnss_pr_err("Failed to allocate memory for FW, size: 0x%zx, type: %u\n",
fw_mem[i].size, fw_mem[i].type);
-
+ CNSS_ASSERT(0);
return -ENOMEM;
}
}
@@ -3804,6 +3863,7 @@ void cnss_pci_fw_boot_timeout_hdlr(struct cnss_pci_data *pci_priv)
CNSS_REASON_TIMEOUT);
}
+
static void cnss_pci_deinit_smmu(struct cnss_pci_data *pci_priv)
{
pci_priv->iommu_domain = NULL;
@@ -3988,12 +4048,6 @@ int cnss_get_soc_info(struct device *dev, struct cnss_soc_info *info)
}
EXPORT_SYMBOL(cnss_get_soc_info);
-static int cnss_pci_get_msi_assignment(struct cnss_pci_data *pci_priv)
-{
- pci_priv->msi_config = cnss_get_msi_config();
-
- return 0;
-}
static int cnss_pci_enable_msi(struct cnss_pci_data *pci_priv)
{
@@ -4346,6 +4400,7 @@ static void cnss_pci_dump_debug_reg(struct cnss_pci_data *pci_priv)
cnss_pr_dbg("Start to dump debug registers\n");
cnss_mhi_debug_reg_dump(pci_priv);
+ cnss_pci_soc_scratch_reg_dump(pci_priv);
cnss_pci_dump_ce_reg(pci_priv, CNSS_CE_COMMON);
cnss_pci_dump_ce_reg(pci_priv, CNSS_CE_09);
cnss_pci_dump_ce_reg(pci_priv, CNSS_CE_10);
@@ -4372,6 +4427,7 @@ int cnss_pci_force_fw_assert_hdlr(struct cnss_pci_data *pci_priv)
if (!cnss_pci_check_link_status(pci_priv))
cnss_mhi_debug_reg_dump(pci_priv);
+ cnss_pci_soc_scratch_reg_dump(pci_priv);
cnss_pci_dump_misc_reg(pci_priv);
cnss_pci_dump_shadow_reg(pci_priv);
@@ -4414,13 +4470,15 @@ static void cnss_pci_add_dump_seg(struct cnss_pci_data *pci_priv,
struct device *dev = &pci_priv->pci_dev->dev;
phys_addr_t pa;
#endif
+
dump_seg->address = dma;
dump_seg->v_address = va;
dump_seg->size = size;
dump_seg->type = type;
- cnss_pr_dbg("Seg: %x, va: %x, dma: %pa, size: 0x%zx\n",
+ cnss_pr_dbg("Seg: %x, va: %pK, dma: %pa, size: 0x%zx\n",
seg_no, va, &dma, size);
+
#if !IS_ENABLED(CONFIG_WCN_GOOGLE)
if (cnss_va_to_pa(dev, size, va, dma, &pa, DMA_ATTR_FORCE_CONTIGUOUS))
return;
@@ -4557,6 +4615,7 @@ void cnss_pci_collect_dump_info(struct cnss_pci_data *pci_priv, bool in_panic)
}
cnss_mhi_debug_reg_dump(pci_priv);
+ cnss_pci_soc_scratch_reg_dump(pci_priv);
cnss_pci_dump_misc_reg(pci_priv);
cnss_pci_dump_shadow_reg(pci_priv);
cnss_pci_dump_qdss_reg(pci_priv);
@@ -4573,6 +4632,8 @@ void cnss_pci_collect_dump_info(struct cnss_pci_data *pci_priv, bool in_panic)
rddm_image = pci_priv->mhi_ctrl->rddm_image;
dump_data->nentries = 0;
+ cnss_mhi_dump_sfr(pci_priv);
+
if (!dump_seg) {
cnss_pr_warn("FW image dump collection not setup");
goto skip_dump;
@@ -4604,19 +4665,22 @@ void cnss_pci_collect_dump_info(struct cnss_pci_data *pci_priv, bool in_panic)
dump_data->nentries += rddm_image->entries;
- cnss_mhi_dump_sfr(pci_priv);
-
- cnss_pr_dbg("Collect remote heap dump segment\n");
for (i = 0, j = 0; i < plat_priv->fw_mem_seg_len; i++) {
if (fw_mem[i].type == CNSS_MEM_TYPE_DDR) {
- cnss_pci_add_dump_seg(pci_priv, dump_seg,
- CNSS_FW_REMOTE_HEAP, j,
- fw_mem[i].va, fw_mem[i].pa,
- fw_mem[i].size);
- dump_seg++;
- dump_data->nentries++;
- j++;
+ if (fw_mem[i].attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
+ cnss_pr_dbg("Collect remote heap dump segment\n");
+ cnss_pci_add_dump_seg(pci_priv, dump_seg,
+ CNSS_FW_REMOTE_HEAP, j,
+ fw_mem[i].va,
+ fw_mem[i].pa,
+ fw_mem[i].size);
+ dump_seg++;
+ dump_data->nentries++;
+ j++;
+ } else {
+ cnss_pr_dbg("Skip remote heap dumps as it is non-contiguous\n");
+ }
}
}
@@ -4661,7 +4725,8 @@ void cnss_pci_clear_dump_info(struct cnss_pci_data *pci_priv)
}
for (i = 0, j = 0; i < plat_priv->fw_mem_seg_len; i++) {
- if (fw_mem[i].type == CNSS_MEM_TYPE_DDR) {
+ if (fw_mem[i].type == CNSS_MEM_TYPE_DDR &&
+ (fw_mem[i].attrs & DMA_ATTR_FORCE_CONTIGUOUS)) {
cnss_pci_remove_dump_seg(pci_priv, dump_seg,
CNSS_FW_REMOTE_HEAP, j,
fw_mem[i].va, fw_mem[i].pa,
@@ -4835,6 +4900,7 @@ static void cnss_dev_rddm_timeout_hdlr(struct timer_list *t)
cnss_pr_err("Unable to collect ramdumps due to abrupt reset\n");
cnss_mhi_debug_reg_dump(pci_priv);
+ cnss_pci_soc_scratch_reg_dump(pci_priv);
cnss_schedule_recovery(&pci_priv->pci_dev->dev, CNSS_REASON_TIMEOUT);
}
@@ -4862,6 +4928,7 @@ static void cnss_boot_debug_timeout_hdlr(struct timer_list *t)
cnss_pr_dbg("Dump MHI/PBL/SBL debug data every %ds during MHI power on\n",
BOOT_DEBUG_TIMEOUT_MS / 1000);
cnss_mhi_debug_reg_dump(pci_priv);
+ cnss_pci_soc_scratch_reg_dump(pci_priv);
cnss_pci_dump_bl_sram_mem(pci_priv);
mod_timer(&pci_priv->boot_debug_timer,
@@ -5139,7 +5206,115 @@ static void cnss_pci_config_regs(struct cnss_pci_data *pci_priv)
}
}
-//#if !IS_ENABLED(CONFIG_ARCH_QCOM)
+#if !IS_ENABLED(CONFIG_ARCH_QCOM) && !IS_ENABLED(CONFIG_WCN_GOOGLE)
+static int cnss_pci_of_reserved_mem_device_init(struct cnss_pci_data *pci_priv)
+{
+ return 0;
+}
+#endif
+
+#if IS_ENABLED(CONFIG_WCN_GOOGLE) || !IS_ENABLED(CONFIG_ARCH_QCOM)
+static irqreturn_t cnss_pci_wake_handler(int irq, void *data)
+{
+ struct cnss_pci_data *pci_priv = data;
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+ enum rpm_status status;
+ struct device *dev;
+
+ pci_priv->wake_counter++;
+ cnss_pr_dbg("WLAN PCI wake IRQ (%u) is asserted #%u\n",
+ pci_priv->wake_irq, pci_priv->wake_counter);
+
+ /* Make sure abort current suspend */
+ cnss_pm_stay_awake(plat_priv);
+ cnss_pm_relax(plat_priv);
+ /* Above two pm* API calls will abort system suspend only when
+ * plat_dev->dev->ws is initiated by device_init_wakeup() API, and
+ * calling pm_system_wakeup() is just to guarantee system suspend
+ * can be aborted if it is not initiated in any case.
+ */
+ pm_system_wakeup();
+
+ dev = &pci_priv->pci_dev->dev;
+ status = dev->power.runtime_status;
+
+ if ((cnss_pci_get_monitor_wake_intr(pci_priv) &&
+ cnss_pci_get_auto_suspended(pci_priv)) ||
+ (status == RPM_SUSPENDING || status == RPM_SUSPENDED)) {
+ cnss_pci_set_monitor_wake_intr(pci_priv, false);
+ cnss_pci_pm_request_resume(pci_priv);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * cnss_pci_wake_gpio_init() - Setup PCI wake GPIO for WLAN
+ * @pci_priv: driver PCI bus context pointer
+ *
+ * This function initializes WLAN PCI wake GPIO and corresponding
+ * interrupt. It should be used in non-MSM platforms whose PCIe
+ * root complex driver doesn't handle the GPIO.
+ *
+ * Return: 0 for success or skip, negative value for error
+ */
+static int cnss_pci_wake_gpio_init(struct cnss_pci_data *pci_priv)
+{
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+ struct device *dev = &plat_priv->plat_dev->dev;
+ int ret = 0;
+
+ pci_priv->wake_gpio = of_get_named_gpio(dev->of_node,
+ "wlan-pci-wake-gpio", 0);
+ if (pci_priv->wake_gpio < 0)
+ goto out;
+
+ cnss_pr_dbg("Get PCI wake GPIO (%d) from device node\n",
+ pci_priv->wake_gpio);
+
+ ret = gpio_request(pci_priv->wake_gpio, "wlan_pci_wake_gpio");
+ if (ret) {
+ cnss_pr_err("Failed to request PCI wake GPIO, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ gpio_direction_input(pci_priv->wake_gpio);
+ pci_priv->wake_irq = gpio_to_irq(pci_priv->wake_gpio);
+
+ ret = request_irq(pci_priv->wake_irq, cnss_pci_wake_handler,
+ IRQF_TRIGGER_FALLING, "wlan_pci_wake_irq", pci_priv);
+ if (ret) {
+ cnss_pr_err("Failed to request PCI wake IRQ, err = %d\n", ret);
+ goto free_gpio;
+ }
+
+ ret = enable_irq_wake(pci_priv->wake_irq);
+ if (ret) {
+ cnss_pr_err("Failed to enable PCI wake IRQ, err = %d\n", ret);
+ goto free_irq;
+ }
+
+ return 0;
+
+free_irq:
+ free_irq(pci_priv->wake_irq, pci_priv);
+free_gpio:
+ gpio_free(pci_priv->wake_gpio);
+out:
+ return ret;
+}
+
+static void cnss_pci_wake_gpio_deinit(struct cnss_pci_data *pci_priv)
+{
+ if (pci_priv->wake_gpio < 0)
+ return;
+
+ disable_irq_wake(pci_priv->wake_irq);
+ free_irq(pci_priv->wake_irq, pci_priv);
+ gpio_free(pci_priv->wake_gpio);
+}
+#endif
/* Setting to use this cnss_pm_domain ops will let PM framework override the
* ops from dev->bus->pm which is pci_dev_pm_ops from pci-driver.c. This ops
@@ -5170,6 +5345,7 @@ static int cnss_pci_probe(struct pci_dev *pci_dev,
#if CONFIG_WCN_GOOGLE
//exynos_pcie_rc_l1ss_ctrl(0, PCIE_L1SS_CTRL_WIFI, GOOGLE_RC_ID);
#endif
+
pci_priv = devm_kzalloc(dev, sizeof(*pci_priv), GFP_KERNEL);
if (!pci_priv) {
ret = -ENOMEM;
@@ -5257,6 +5433,7 @@ static int cnss_pci_probe(struct pci_dev *pci_dev,
if (ret)
cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret);
cnss_power_off_device(plat_priv);
+ set_bit(CNSS_PCI_PROBE_DONE, &plat_priv->driver_state);
return 0;
@@ -5289,6 +5466,7 @@ static void cnss_pci_remove(struct pci_dev *pci_dev)
struct cnss_plat_data *plat_priv =
cnss_bus_dev_to_plat_priv(&pci_dev->dev);
+ clear_bit(CNSS_PCI_PROBE_DONE, &plat_priv->driver_state);
cnss_pci_free_m3_mem(pci_priv);
cnss_pci_free_fw_mem(pci_priv);
cnss_pci_free_qdss_mem(pci_priv);
@@ -5414,7 +5592,9 @@ int cnss_pci_init(struct cnss_plat_data *plat_priv)
}
ret = pci_register_driver(&cnss_pci_driver);
+#if IS_ENABLED(CONFIG_WCN_GOOGLE)
cnss_pr_err("ret of pci_register_driver: %d\n", ret);
+#endif
if (ret) {
cnss_pr_err("Failed to register to PCI framework, err = %d\n",
ret);
diff --git a/cnss2/pci.h b/cnss2/pci.h
index 8012eb6..2a9a36e 100644
--- a/cnss2/pci.h
+++ b/cnss2/pci.h
@@ -1,9 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. */
+/*
+ * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
#ifndef _CNSS_PCI_H
#define _CNSS_PCI_H
+#include <linux/cma.h>
#include <linux/iommu.h>
#ifdef CONFIG_CNSS_OUT_OF_TREE
#include "mhi.h"
@@ -20,6 +24,7 @@
#if IS_ENABLED(CONFIG_PCI_MSM)
#include <linux/msm_pcie.h>
#endif
+#include <linux/of_reserved_mem.h>
#include <linux/pci.h>
#ifdef CONFIG_WCN_GOOGLE
#include <linux/exynos-pci-noti.h>
@@ -27,6 +32,11 @@
#include "main.h"
+#define PM_OPTIONS_DEFAULT 0
+#define PCI_LINK_DOWN 0
+#define LINK_TRAINING_RETRY_MAX_TIMES 3
+#define LINK_TRAINING_RETRY_DELAY_MS 500
+
enum cnss_mhi_state {
CNSS_MHI_INIT,
CNSS_MHI_DEINIT,
@@ -273,5 +283,8 @@ int cnss_pci_debug_reg_write(struct cnss_pci_data *pci_priv, u32 offset,
int cnss_pci_get_iova(struct cnss_pci_data *pci_priv, u64 *addr, u64 *size);
int cnss_pci_get_iova_ipa(struct cnss_pci_data *pci_priv, u64 *addr,
u64 *size);
+int cnss_pci_update_time_sync_period(struct cnss_pci_data *pci_priv,
+ unsigned int time_sync_period);
+void cnss_pci_handle_linkdown(struct cnss_pci_data *pci_priv);
#endif /* _CNSS_PCI_H */
diff --git a/cnss2/pci_platform.h b/cnss2/pci_platform.h
index 2d7cdb9..9d03f83 100644
--- a/cnss2/pci_platform.h
+++ b/cnss2/pci_platform.h
@@ -1,33 +1,188 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
#ifndef _CNSS_PCI_PLATFORM_H
#define _CNSS_PCI_PLATFORM_H
-#include <linux/platform_device.h>
-#include <linux/pci.h>
#include "pci.h"
-void cnss_shutdown(struct platform_device *plat_dev);
-int cnss_set_pci_link(struct cnss_pci_data *pci_priv, bool link_up);
+#if IS_ENABLED(CONFIG_PCI_MSM) || IS_ENABLED(CONFIG_WCN_GOOGLE)
+/**
+ * _cnss_pci_enumerate() - Enumerate PCIe endpoints
+ * @plat_priv: driver platform context pointer
+ * @rc_num: root complex index that an endpoint connects to
+ *
+ * This function shall call corresponding PCIe root complex driver APIs
+ * to power on root complex and enumerate the endpoint connected to it.
+ *
+ * Return: 0 for success, negative value for error
+ */
+int _cnss_pci_enumerate(struct cnss_plat_data *plat_priv, u32 rc_num);
+
+/**
+ * cnss_pci_assert_perst() - Assert PCIe PERST GPIO
+ * @pci_priv: driver PCI bus context pointer
+ *
+ * This function shall call corresponding PCIe root complex driver APIs
+ * to assert PCIe PERST GPIO.
+ *
+ * Return: 0 for success, negative value for error
+ */
+int cnss_pci_assert_perst(struct cnss_pci_data *pci_priv);
+
+/**
+ * cnss_pci_disable_pc() - Disable PCIe link power collapse from RC driver
+ * @pci_priv: driver PCI bus context pointer
+ * @vote: value to indicate disable (true) or enable (false)
+ *
+ * This function shall call corresponding PCIe root complex driver APIs
+ * to disable PCIe power collapse. The purpose of this API is to avoid
+ * root complex driver still controlling PCIe link from callbacks of
+ * system suspend/resume. Device driver itself should take full control
+ * of the link in such cases.
+ *
+ * Return: 0 for success, negative value for error
+ */
+int cnss_pci_disable_pc(struct cnss_pci_data *pci_priv, bool vote);
+
+/**
+ * cnss_pci_set_link_bandwidth() - Update number of lanes and speed of
+ * PCIe link
+ * @pci_priv: driver PCI bus context pointer
+ * @link_speed: PCIe link gen speed
+ * @link_width: number of lanes for PCIe link
+ *
+ * This function shall call corresponding PCIe root complex driver APIs
+ * to update number of lanes and speed of the link.
+ *
+ * Return: 0 for success, negative value for error
+ */
+int cnss_pci_set_link_bandwidth(struct cnss_pci_data *pci_priv,
+ u16 link_speed, u16 link_width);
+
+/**
+ * cnss_pci_set_max_link_speed() - Set the maximum speed PCIe can link up with
+ * @pci_priv: driver PCI bus context pointer
+ * @rc_num: root complex index that an endpoint connects to
+ * @link_speed: PCIe link gen speed
+ *
+ * This function shall call corresponding PCIe root complex driver APIs
+ * to update the maximum speed that PCIe can link up with.
+ *
+ * Return: 0 for success, negative value for error
+ */
+int cnss_pci_set_max_link_speed(struct cnss_pci_data *pci_priv,
+ u32 rc_num, u16 link_speed);
+
+/**
+ * cnss_reg_pci_event() - Register for PCIe events
+ * @pci_priv: driver PCI bus context pointer
+ *
+ * This function shall call corresponding PCIe root complex driver APIs
+ * to register for PCIe events like link down or WAKE GPIO toggling etc.
+ * The events should be based on PCIe root complex driver's capability.
+ *
+ * Return: 0 for success, negative value for error
+ */
int cnss_reg_pci_event(struct cnss_pci_data *pci_priv);
void cnss_dereg_pci_event(struct cnss_pci_data *pci_priv);
-void cnss_set_perst_gpio(struct cnss_plat_data *plat_priv);
-//int platform_pci_init(struct cnss_plat_data *plat_priv);
-//int cnss_pci_prevent_l1(struct device *dev);
-//void cnss_pci_allow_l1(struct device *dev);
-struct cnss_msi_config *cnss_get_msi_config(void);
+
+/**
+ * cnss_wlan_adsp_pc_enable: Control ADSP power collapse setup
+ * @dev: Platform driver pci private data structure
+ * @control: Power collapse enable / disable
+ *
+ * This function controls ADSP power collapse (PC). It must be called
+ * based on wlan state. ADSP power collapse during wlan RTPM suspend state
+ * results in delay during periodic QMI stats PCI link up/down. This delay
+ * causes additional power consumption.
+ *
+ * Result: 0 Success. negative error codes.
+ */
+int cnss_wlan_adsp_pc_enable(struct cnss_pci_data *pci_priv,
+ bool control);
+int cnss_set_pci_link(struct cnss_pci_data *pci_priv, bool link_up);
+int cnss_pci_prevent_l1(struct device *dev);
+void cnss_pci_allow_l1(struct device *dev);
+int cnss_pci_get_msi_assignment(struct cnss_pci_data *pci_priv);
int cnss_pci_init_smmu(struct cnss_pci_data *pci_priv);
+#else
+int _cnss_pci_enumerate(struct cnss_plat_data *plat_priv, u32 rc_num)
+{
+ return -EOPNOTSUPP;
+}
+
+int cnss_pci_assert_perst(struct cnss_pci_data *pci_priv)
+{
+ return -EOPNOTSUPP;
+}
+
+int cnss_pci_disable_pc(struct cnss_pci_data *pci_priv, bool vote)
+{
+ return 0;
+}
+
+int cnss_pci_set_link_bandwidth(struct cnss_pci_data *pci_priv,
+ u16 link_speed, u16 link_width)
+{
+ return 0;
+}
+
+int cnss_pci_set_max_link_speed(struct cnss_pci_data *pci_priv,
+ u32 rc_num, u16 link_speed)
+{
+ return 0;
+}
+
+int cnss_reg_pci_event(struct cnss_pci_data *pci_priv)
+{
+ return 0;
+}
+
+void cnss_dereg_pci_event(struct cnss_pci_data *pci_priv) {}
+
+int cnss_wlan_adsp_pc_enable(struct cnss_pci_data *pci_priv, bool control)
+{
+ return 0;
+}
+
+int cnss_set_pci_link(struct cnss_pci_data *pci_priv, bool link_up)
+{
+ return 0;
+}
+
+int cnss_pci_prevent_l1(struct device *dev)
+{
+ return 0;
+}
+EXPORT_SYMBOL(cnss_pci_prevent_l1);
+
+void cnss_pci_allow_l1(struct device *dev)
+{
+}
+EXPORT_SYMBOL(cnss_pci_allow_l1);
+
+int cnss_pci_get_msi_assignment(struct cnss_pci_data *pci_priv)
+{
+ return 0;
+}
+
+int cnss_pci_init_smmu(struct cnss_pci_data *pci_priv)
+{
+ return 0;
+}
+#endif /* CONFIG_PCI_MSM */
+
+#if IS_ENABLED(CONFIG_WCN_GOOGLE)
+int cnss_pci_of_reserved_mem_device_init(struct cnss_pci_data *pci_priv);
+#else
+#if IS_ENABLED(CONFIG_ARCH_QCOM)
+int cnss_pci_of_reserved_mem_device_init(struct cnss_pci_data *pci_priv);
int cnss_pci_wake_gpio_init(struct cnss_pci_data *pci_priv);
void cnss_pci_wake_gpio_deinit(struct cnss_pci_data *pci_priv);
-int cnss_pci_of_reserved_mem_device_init(struct cnss_pci_data *pci_priv);
-int cnss_pci_disable_pc(struct cnss_pci_data *pci_priv, bool vote);
-int check_id_table(struct cnss_wlan_driver *driver_ops);
-int _cnss_pci_enumerate(struct cnss_plat_data *plat_priv, u32 rc_num);
-int cnss_pci_assert_perst(struct cnss_pci_data *pci_priv);
-int cnss_pci_set_max_link_speed(struct cnss_pci_data *pci_priv,
- u32 rc_num, u16 link_speed);
-int cnss_pci_set_link_bandwidth(struct cnss_pci_data *pci_priv,
- u16 link_speed, u16 link_width);
-//int _cnss_pci_prevent_l1(struct cnss_pci_data *pci_priv);
-#endif \ No newline at end of file
+#endif /* CONFIG_ARCH_QCOM */
+#endif /* CONFIG_WCN_GOOGLE */
+#endif /* _CNSS_PCI_PLATFORM_H*/
diff --git a/cnss2/pci_platform_google.c b/cnss2/pci_platform_google.c
index bcd2612..869eac0 100644
--- a/cnss2/pci_platform_google.c
+++ b/cnss2/pci_platform_google.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2021, The Linux Foundation. All rights reserved. */
#include <linux/of.h>
#include <linux/of_gpio.h>
@@ -32,16 +32,33 @@ static struct cnss_msi_config msi_config = {
},
};
-int cnss_set_pci_link(struct cnss_pci_data *pci_priv, bool link_up)
+int _cnss_pci_enumerate(struct cnss_plat_data *plat_priv, u32 rc_num)
{
- cnss_pr_vdbg("%s PCI link\n", link_up ? "Resuming" : "Suspending");
+ int ret = 0;
+ ret = exynos_pcie_pm_resume(rc_num);
+ return ret;
+}
- if (link_up) {
- return exynos_pcie_pm_resume(pci_priv->plat_priv->rc_num);
- } else {
- exynos_pcie_pm_suspend(pci_priv->plat_priv->rc_num);
- return 0;
- }
+int cnss_pci_assert_perst(struct cnss_pci_data *pci_priv)
+{
+ return -EOPNOTSUPP;
+}
+
+int cnss_pci_disable_pc(struct cnss_pci_data *pci_priv, bool vote)
+{
+ return 0;
+}
+
+int cnss_pci_set_link_bandwidth(struct cnss_pci_data *pci_priv,
+ u16 link_speed, u16 link_width)
+{
+ return 0;
+}
+
+int cnss_pci_set_max_link_speed(struct cnss_pci_data *pci_priv,
+ u32 rc_num, u16 link_speed)
+{
+ return 0;
}
static void cnss_pci_event_cb(struct exynos_pcie_notify *notify)
@@ -108,7 +125,6 @@ int cnss_reg_pci_event(struct cnss_pci_data *pci_priv)
cnss_pr_err("Failed to register exynos PCI event, err = %d\n",
ret);
return ret;
-
}
void cnss_dereg_pci_event(struct cnss_pci_data *pci_priv)
@@ -116,10 +132,21 @@ void cnss_dereg_pci_event(struct cnss_pci_data *pci_priv)
exynos_pcie_deregister_event(&pci_priv->exynos_pci_event);
}
+int cnss_wlan_adsp_pc_enable(struct cnss_pci_data *pci_priv, bool control)
+{
+ return 0;
+}
-void cnss_set_perst_gpio(struct cnss_plat_data *plat_priv)
+int cnss_set_pci_link(struct cnss_pci_data *pci_priv, bool link_up)
{
- exynos_pcie_set_perst_gpio(plat_priv->rc_num, false);
+ cnss_pr_vdbg("%s PCI link\n", link_up ? "Resuming" : "Suspending");
+
+ if (link_up) {
+ return exynos_pcie_pm_resume(pci_priv->plat_priv->rc_num);
+ } else {
+ exynos_pcie_pm_suspend(pci_priv->plat_priv->rc_num);
+ return 0;
+ }
}
int cnss_pci_prevent_l1(struct device *dev)
@@ -134,109 +161,15 @@ void cnss_pci_allow_l1(struct device *dev)
}
EXPORT_SYMBOL(cnss_pci_allow_l1);
-struct cnss_msi_config *cnss_get_msi_config(void)
+int cnss_pci_get_msi_assignment(struct cnss_pci_data *pci_priv)
{
- return &msi_config;
-}
+ pci_priv->msi_config = &msi_config;
-int cnss_pci_init_smmu(struct cnss_pci_data *pci_priv) {
return 0;
}
-
-static irqreturn_t cnss_pci_wake_handler(int irq, void *data)
-{
- struct cnss_pci_data *pci_priv = data;
- struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
-
- pci_priv->wake_counter++;
- cnss_pr_dbg("WLAN PCI wake IRQ (%u) is asserted #%u\n",
- pci_priv->wake_irq, pci_priv->wake_counter);
-
- /* Make sure abort current suspend */
- cnss_pm_stay_awake(plat_priv);
- cnss_pm_relax(plat_priv);
- /* Above two pm* API calls will abort system suspend only when
- * plat_dev->dev->ws is initiated by device_init_wakeup() API, and
- * calling pm_system_wakeup() is just to guarantee system suspend
- * can be aborted if it is not initiated in any case.
- */
- pm_system_wakeup();
-
- if (cnss_pci_get_monitor_wake_intr(pci_priv) &&
- cnss_pci_get_auto_suspended(pci_priv)) {
- cnss_pci_set_monitor_wake_intr(pci_priv, false);
- cnss_pci_pm_request_resume(pci_priv);
- }
-
- return IRQ_HANDLED;
-}
-
-/**
- * cnss_pci_wake_gpio_init() - Setup PCI wake GPIO for WLAN
- * @pci_priv: driver PCI bus context pointer
- *
- * This function initializes WLAN PCI wake GPIO and corresponding
- * interrupt. It should be used in non-MSM platforms whose PCIe
- * root complex driver doesn't handle the GPIO.
- *
- * Return: 0 for success or skip, negative value for error
- */
-int cnss_pci_wake_gpio_init(struct cnss_pci_data *pci_priv)
-{
- struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
- struct device *dev = &plat_priv->plat_dev->dev;
- int ret = 0;
-
- pci_priv->wake_gpio = of_get_named_gpio(dev->of_node,
- "wlan-pci-wake-gpio", 0);
- if (pci_priv->wake_gpio < 0)
- goto out;
-
- cnss_pr_dbg("Get PCI wake GPIO (%d) from device node\n",
- pci_priv->wake_gpio);
-
- ret = gpio_request(pci_priv->wake_gpio, "wlan_pci_wake_gpio");
- if (ret) {
- cnss_pr_err("Failed to request PCI wake GPIO, err = %d\n",
- ret);
- goto out;
- }
-
- gpio_direction_input(pci_priv->wake_gpio);
- pci_priv->wake_irq = gpio_to_irq(pci_priv->wake_gpio);
-
- ret = request_irq(pci_priv->wake_irq, cnss_pci_wake_handler,
- IRQF_TRIGGER_FALLING, "wlan_pci_wake_irq", pci_priv);
- if (ret) {
- cnss_pr_err("Failed to request PCI wake IRQ, err = %d\n", ret);
- goto free_gpio;
- }
-
- ret = enable_irq_wake(pci_priv->wake_irq);
- if (ret) {
- cnss_pr_err("Failed to enable PCI wake IRQ, err = %d\n", ret);
- goto free_irq;
- }
-
+int cnss_pci_init_smmu(struct cnss_pci_data *pci_priv) {
return 0;
-
-free_irq:
- free_irq(pci_priv->wake_irq, pci_priv);
-free_gpio:
- gpio_free(pci_priv->wake_gpio);
-out:
- return ret;
-}
-
-void cnss_pci_wake_gpio_deinit(struct cnss_pci_data *pci_priv)
-{
- if (pci_priv->wake_gpio < 0)
- return;
-
- disable_irq_wake(pci_priv->wake_irq);
- free_irq(pci_priv->wake_irq, pci_priv);
- gpio_free(pci_priv->wake_gpio);
}
int cnss_pci_of_reserved_mem_device_init(struct cnss_pci_data *pci_priv)
@@ -253,58 +186,6 @@ int cnss_pci_of_reserved_mem_device_init(struct cnss_pci_data *pci_priv)
return ret;
}
-int cnss_pci_disable_pc(struct cnss_pci_data *pci_priv, bool vote)
-{
- return 0;
-}
-
-int check_id_table(struct cnss_wlan_driver *driver_ops)
-{
- return 0;
-}
-
-int _cnss_pci_enumerate(struct cnss_plat_data *plat_priv, u32 rc_num)
-{
- int ret = 0;
- ret = exynos_pcie_pm_resume(rc_num);
- cnss_pr_err("ret of exynos_pcie_pm_resume: %d\n", ret);
- return ret;
-}
-
-int cnss_pci_assert_perst(struct cnss_pci_data *pci_priv)
-{
- return -EOPNOTSUPP;
-}
-
-int cnss_pci_set_max_link_speed(struct cnss_pci_data *pci_priv,
- u32 rc_num, u16 link_speed)
-{
- return 0;
-}
-
-int cnss_pci_set_link_bandwidth(struct cnss_pci_data *pci_priv,
- u16 link_speed, u16 link_width)
-{
- return 0;
-}
-
-// static int _cnss_pci_prevent_l1(struct cnss_pci_data *pci_priv)
-// {
-// return 0;
-// }
-
-// static void _cnss_pci_allow_l1(struct cnss_pci_data *pci_priv) {}
-
-// static int cnss_pci_set_link_up(struct cnss_pci_data *pci_priv)
-// {
-// return 0;
-// }
-
-// static int cnss_pci_set_link_down(struct cnss_pci_data *pci_priv)
-// {
-// return 0;
-// }
-
/*
* The following functions are for ssrdump.
*/
@@ -338,7 +219,8 @@ void sscd_release(struct device *dev)
cnss_pr_info("%s: enter\n", __FUNCTION__);
}
-static void sscd_set_coredump(void *buf, int buf_len, const char *info)
+u8 *crash_info = 0;
+void sscd_set_coredump(void *buf, int buf_len)
{
struct sscd_platform_data *pdata = dev_get_platdata(&sscd_dev.dev);
struct sscd_segment seg;
@@ -347,16 +229,16 @@ static void sscd_set_coredump(void *buf, int buf_len, const char *info)
memset(&seg, 0, sizeof(seg));
seg.addr = buf;
seg.size = buf_len;
- if(info) {
- pdata->sscd_report(&sscd_dev, &seg, 1, 0, info);
+ if(crash_info) {
+ pdata->sscd_report(&sscd_dev, &seg, 1, 0, crash_info);
+ kfree(crash_info);
+ crash_info = 0;
} else {
pdata->sscd_report(&sscd_dev, &seg, 1, 0, "Unknown");
}
-
}
}
-u8 *crash_info = 0;
void crash_info_handler(u8 *info)
{
u32 string_len = 0;
@@ -373,91 +255,3 @@ void crash_info_handler(u8 *info)
strncpy(crash_info, info, string_len);
crash_info[string_len] = '\0';
}
-
-int qcom_elf_dump(struct list_head *segs, struct device *dev)
-{
- struct qcom_dump_segment *segment;
- struct elf32_phdr *phdr;
- struct elf32_hdr *ehdr;
- size_t data_size;
- size_t offset;
- int phnum = 0;
- void *data;
- void __iomem *ptr;
-
- if (!segs || list_empty(segs))
- return -EINVAL;
-
- data_size = sizeof(*ehdr);
- list_for_each_entry(segment, segs, node) {
- data_size += sizeof(*phdr) + segment->size;
-
- phnum++;
- }
-
- data = vmalloc(data_size);
- if (!data)
- return -ENOMEM;
-
- cnss_pr_info("Creating elf with size %d\n", data_size);
- ehdr = data;
-
- memset(ehdr, 0, sizeof(*ehdr));
- memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
- ehdr->e_ident[EI_CLASS] = ELFCLASS32;
- ehdr->e_ident[EI_DATA] = ELFDATA2LSB;
- ehdr->e_ident[EI_VERSION] = EV_CURRENT;
- ehdr->e_ident[EI_OSABI] = ELFOSABI_NONE;
- ehdr->e_type = ET_CORE;
- ehdr->e_machine = EM_NONE;
- ehdr->e_version = EV_CURRENT;
- ehdr->e_entry = 0;
- ehdr->e_phoff = sizeof(*ehdr);
- ehdr->e_ehsize = sizeof(*ehdr);
- ehdr->e_phentsize = sizeof(*phdr);
- ehdr->e_phnum = phnum;
-
- phdr = data + ehdr->e_phoff;
- offset = ehdr->e_phoff + sizeof(*phdr) * ehdr->e_phnum;
- list_for_each_entry(segment, segs, node) {
- memset(phdr, 0, sizeof(*phdr));
- phdr->p_type = PT_LOAD;
- phdr->p_offset = offset;
- phdr->p_vaddr = segment->da;
- phdr->p_paddr = segment->da;
- phdr->p_filesz = segment->size;
- phdr->p_memsz = segment->size;
- phdr->p_flags = PF_R | PF_W | PF_X;
- phdr->p_align = 0;
-
- if (segment->va) {
- memcpy(data + offset, segment->va, segment->size);
- } else {
- ptr = devm_ioremap(dev, segment->da, segment->size);
- if (!ptr) {
- dev_err(dev,
- "invalid coredump segment (%pad, %zu)\n",
- &segment->da, segment->size);
- memset(data + offset, 0xff, segment->size);
- } else
- memcpy_fromio(data + offset, ptr,
- segment->size);
- }
-
- offset += phdr->p_filesz;
- phdr++;
- }
-
- /*
- * SSCD integration
- */
- sscd_set_coredump(data, data_size, crash_info);
- if (crash_info) {
- kfree(crash_info);
- crash_info = 0;
- }
-
-
- vfree(data);
- return 0;
-}
diff --git a/cnss2/pci_platform_msm.c b/cnss2/pci_qcom.c
index ee52fbc..1df4976 100644
--- a/cnss2/pci_platform_msm.c
+++ b/cnss2/pci_qcom.c
@@ -1,8 +1,11 @@
// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
#include "pci_platform.h"
-
+#include "debug.h"
static struct cnss_msi_config msi_config = {
.total_vectors = 32,
@@ -15,77 +18,128 @@ static struct cnss_msi_config msi_config = {
},
};
+int _cnss_pci_enumerate(struct cnss_plat_data *plat_priv, u32 rc_num)
+{
+ return msm_pcie_enumerate(rc_num);
+}
-void cnss_shutdown(struct platform_device *plat_dev) {}
+int cnss_pci_assert_perst(struct cnss_pci_data *pci_priv)
+{
+ struct pci_dev *pci_dev = pci_priv->pci_dev;
-static int cnss_set_pci_link_status(struct cnss_pci_data *pci_priv,
- enum pci_link_status status)
+ return msm_pcie_pm_control(MSM_PCIE_HANDLE_LINKDOWN,
+ pci_dev->bus->number, pci_dev, NULL,
+ PM_OPTIONS_DEFAULT);
+}
+
+int cnss_pci_disable_pc(struct cnss_pci_data *pci_priv, bool vote)
{
- u16 link_speed, link_width;
- int ret;
+ struct pci_dev *pci_dev = pci_priv->pci_dev;
- cnss_pr_vdbg("Set PCI link status to: %u\n", status);
+ return msm_pcie_pm_control(vote ? MSM_PCIE_DISABLE_PC :
+ MSM_PCIE_ENABLE_PC,
+ pci_dev->bus->number, pci_dev, NULL,
+ PM_OPTIONS_DEFAULT);
+}
- switch (status) {
- case PCI_GEN1:
- link_speed = PCI_EXP_LNKSTA_CLS_2_5GB;
- link_width = PCI_EXP_LNKSTA_NLW_X1 >> PCI_EXP_LNKSTA_NLW_SHIFT;
- break;
- case PCI_GEN2:
- link_speed = PCI_EXP_LNKSTA_CLS_5_0GB;
- link_width = PCI_EXP_LNKSTA_NLW_X1 >> PCI_EXP_LNKSTA_NLW_SHIFT;
- break;
- case PCI_DEF:
- link_speed = pci_priv->def_link_speed;
- link_width = pci_priv->def_link_width;
- if (!link_speed && !link_width) {
- cnss_pr_err("PCI link speed or width is not valid\n");
- return -EINVAL;
- }
- break;
- default:
- cnss_pr_err("Unknown PCI link status config: %u\n", status);
- return -EINVAL;
- }
+int cnss_pci_set_link_bandwidth(struct cnss_pci_data *pci_priv,
+ u16 link_speed, u16 link_width)
+{
+ return msm_pcie_set_link_bandwidth(pci_priv->pci_dev,
+ link_speed, link_width);
+}
- ret = cnss_pci_set_link_bandwidth(pci_priv, link_speed, link_width);
- if (!ret)
- pci_priv->cur_link_speed = link_speed;
+int cnss_pci_set_max_link_speed(struct cnss_pci_data *pci_priv,
+ u32 rc_num, u16 link_speed)
+{
+ return msm_pcie_set_target_link_speed(rc_num, link_speed, false);
+}
- return ret;
+/**
+ * _cnss_pci_prevent_l1() - Prevent PCIe L1 and L1 sub-states
+ * @pci_priv: driver PCI bus context pointer
+ *
+ * This function shall call corresponding PCIe root complex driver APIs
+ * to prevent PCIe link enter L1 and L1 sub-states. The APIs should also
+ * bring link out of L1 or L1 sub-states if any and avoid synchronization
+ * issues if any.
+ *
+ * Return: 0 for success, negative value for error
+ */
+static int _cnss_pci_prevent_l1(struct cnss_pci_data *pci_priv)
+{
+ return msm_pcie_prevent_l1(pci_priv->pci_dev);
}
-int cnss_set_pci_link(struct cnss_pci_data *pci_priv, bool link_up)
+/**
+ * _cnss_pci_allow_l1() - Allow PCIe L1 and L1 sub-states
+ * @pci_priv: driver PCI bus context pointer
+ *
+ * This function shall call corresponding PCIe root complex driver APIs
+ * to allow PCIe link enter L1 and L1 sub-states. The APIs should avoid
+ * synchronization issues if any.
+ *
+ * Return: 0 for success, negative value for error
+ */
+static void _cnss_pci_allow_l1(struct cnss_pci_data *pci_priv)
{
- int ret = 0, retry = 0;
+ msm_pcie_allow_l1(pci_priv->pci_dev);
+}
- cnss_pr_vdbg("%s PCI link\n", link_up ? "Resuming" : "Suspending");
+/**
+ * cnss_pci_set_link_up() - Power on or resume PCIe link
+ * @pci_priv: driver PCI bus context pointer
+ *
+ * This function shall call corresponding PCIe root complex driver APIs
+ * to Power on or resume PCIe link.
+ *
+ * Return: 0 for success, negative value for error
+ */
+static int cnss_pci_set_link_up(struct cnss_pci_data *pci_priv)
+{
+ struct pci_dev *pci_dev = pci_priv->pci_dev;
+ enum msm_pcie_pm_opt pm_ops = MSM_PCIE_RESUME;
+ u32 pm_options = PM_OPTIONS_DEFAULT;
+ int ret;
- if (link_up) {
-retry:
- ret = cnss_pci_set_link_up(pci_priv);
- if (ret && retry++ < LINK_TRAINING_RETRY_MAX_TIMES) {
- cnss_pr_dbg("Retry PCI link training #%d\n", retry);
- if (pci_priv->pci_link_down_ind)
- msleep(LINK_TRAINING_RETRY_DELAY_MS * retry);
- goto retry;
- }
- } else {
- /* Since DRV suspend cannot be done in Gen 3, set it to
- * Gen 2 if current link speed is larger than Gen 2.
- */
- if (pci_priv->drv_connected_last &&
- pci_priv->cur_link_speed > PCI_EXP_LNKSTA_CLS_5_0GB)
- cnss_set_pci_link_status(pci_priv, PCI_GEN2);
+ ret = msm_pcie_pm_control(pm_ops, pci_dev->bus->number, pci_dev,
+ NULL, pm_options);
+ if (ret)
+ cnss_pr_err("Failed to resume PCI link with default option, err = %d\n",
+ ret);
- ret = cnss_pci_set_link_down(pci_priv);
- }
+ return ret;
+}
+
+/**
+ * cnss_pci_set_link_down() - Power off or suspend PCIe link
+ * @pci_priv: driver PCI bus context pointer
+ *
+ * This function shall call corresponding PCIe root complex driver APIs
+ * to power off or suspend PCIe link.
+ *
+ * Return: 0 for success, negative value for error
+ */
+static int cnss_pci_set_link_down(struct cnss_pci_data *pci_priv)
+{
+ struct pci_dev *pci_dev = pci_priv->pci_dev;
+ enum msm_pcie_pm_opt pm_ops;
+ u32 pm_options = PM_OPTIONS_DEFAULT;
+ int ret;
if (pci_priv->drv_connected_last) {
- if ((link_up && !ret) || (!link_up && ret))
- cnss_set_pci_link_status(pci_priv, PCI_DEF);
+ cnss_pr_vdbg("Use PCIe DRV suspend\n");
+ pm_ops = MSM_PCIE_DRV_SUSPEND;
+ } else {
+ pm_ops = MSM_PCIE_SUSPEND;
}
+ ret = msm_pcie_pm_control(pm_ops, pci_dev->bus->number, pci_dev,
+ NULL, pm_options);
+ if (ret)
+ cnss_pr_err("Failed to suspend PCI link with default option, err = %d\n",
+ ret);
+
return ret;
}
@@ -125,6 +179,8 @@ static void cnss_pci_event_cb(struct msm_pcie_notify *notify)
struct pci_dev *pci_dev;
struct cnss_pci_data *pci_priv;
struct device *dev;
+ struct cnss_plat_data *plat_priv = NULL;
+ int ret = 0;
if (!notify)
return;
@@ -139,6 +195,23 @@ static void cnss_pci_event_cb(struct msm_pcie_notify *notify)
dev = &pci_priv->pci_dev->dev;
switch (notify->event) {
+ case MSM_PCIE_EVENT_LINK_RECOVER:
+ cnss_pr_dbg("PCI link recover callback\n");
+
+ plat_priv = pci_priv->plat_priv;
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL\n");
+ return;
+ }
+
+ plat_priv->ctrl_params.quirks |= BIT(LINK_DOWN_SELF_RECOVERY);
+
+ ret = msm_pcie_pm_control(MSM_PCIE_HANDLE_LINKDOWN,
+ pci_dev->bus->number, pci_dev, NULL,
+ PM_OPTIONS_DEFAULT);
+ if (ret)
+ cnss_pci_handle_linkdown(pci_priv);
+ break;
case MSM_PCIE_EVENT_LINKDOWN:
cnss_pr_dbg("PCI link down event callback\n");
cnss_pci_handle_linkdown(pci_priv);
@@ -166,24 +239,15 @@ static void cnss_pci_event_cb(struct msm_pcie_notify *notify)
}
}
-/**
- * cnss_reg_pci_event() - Register for PCIe events
- * @pci_priv: driver PCI bus context pointer
- *
- * This function shall call corresponding PCIe root complex driver APIs
- * to register for PCIe events like link down or WAKE GPIO toggling etc.
- * The events should be based on PCIe root complex driver's capability.
- *
- * Return: 0 for success, negative value for error
- */
int cnss_reg_pci_event(struct cnss_pci_data *pci_priv)
{
int ret = 0;
struct msm_pcie_register_event *pci_event;
pci_event = &pci_priv->msm_pci_event;
- pci_event->events = MSM_PCIE_EVENT_LINKDOWN |
- MSM_PCIE_EVENT_WAKEUP;
+ pci_event->events = MSM_PCIE_EVENT_LINK_RECOVER |
+ MSM_PCIE_EVENT_LINKDOWN |
+ MSM_PCIE_EVENT_WAKEUP;
if (cnss_pci_is_drv_supported(pci_priv))
pci_event->events = pci_event->events |
@@ -208,29 +272,105 @@ void cnss_dereg_pci_event(struct cnss_pci_data *pci_priv)
msm_pcie_deregister_event(&pci_priv->msm_pci_event);
}
-void cnss_set_perst_gpio(struct cnss_plat_data *plat_priv) {}
-
-// int platform_pci_init(struct cnss_plat_data *plat_priv) {
-// struct device *dev = &plat_priv->plat_dev->dev;
-// const __be32 *prop;
-// int ret = 0, prop_len = 0, rc_count, i;
-
-// prop = of_get_property(dev->of_node, "qcom,wlan-rc-num", &prop_len);
-// if (!prop || !prop_len) {
-// cnss_pr_err("Failed to get PCIe RC number from DT\n");
-// return ENOENT;
-// }
-
-// rc_count = prop_len / sizeof(__be32);
-// for (i = 0; i < rc_count; i++) {
-// ret = cnss_pci_enumerate(plat_priv, be32_to_cpup(&prop[i]));
-// if (!ret)
-// break;
-// else if (ret == -EPROBE_DEFER || (ret && i == rc_count - 1))
-// break;
-// }
-// return ret;
-// }
+int cnss_wlan_adsp_pc_enable(struct cnss_pci_data *pci_priv,
+ bool control)
+{
+ struct pci_dev *pci_dev = pci_priv->pci_dev;
+ int ret = 0;
+ u32 pm_options = PM_OPTIONS_DEFAULT;
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+
+ if (plat_priv->adsp_pc_enabled == control) {
+ cnss_pr_dbg("ADSP power collapse already %s\n",
+ control ? "Enabled" : "Disabled");
+ return 0;
+ }
+
+ if (control)
+ pm_options &= ~MSM_PCIE_CONFIG_NO_DRV_PC;
+ else
+ pm_options |= MSM_PCIE_CONFIG_NO_DRV_PC;
+
+ ret = msm_pcie_pm_control(MSM_PCIE_DRV_PC_CTRL, pci_dev->bus->number,
+ pci_dev, NULL, pm_options);
+ if (ret)
+ return ret;
+
+ cnss_pr_dbg("%s ADSP power collapse\n", control ? "Enable" : "Disable");
+ plat_priv->adsp_pc_enabled = control;
+ return 0;
+}
+
+static int cnss_set_pci_link_status(struct cnss_pci_data *pci_priv,
+ enum pci_link_status status)
+{
+ u16 link_speed, link_width;
+ int ret;
+
+ cnss_pr_vdbg("Set PCI link status to: %u\n", status);
+
+ switch (status) {
+ case PCI_GEN1:
+ link_speed = PCI_EXP_LNKSTA_CLS_2_5GB;
+ link_width = PCI_EXP_LNKSTA_NLW_X1 >> PCI_EXP_LNKSTA_NLW_SHIFT;
+ break;
+ case PCI_GEN2:
+ link_speed = PCI_EXP_LNKSTA_CLS_5_0GB;
+ link_width = PCI_EXP_LNKSTA_NLW_X1 >> PCI_EXP_LNKSTA_NLW_SHIFT;
+ break;
+ case PCI_DEF:
+ link_speed = pci_priv->def_link_speed;
+ link_width = pci_priv->def_link_width;
+ if (!link_speed && !link_width) {
+ cnss_pr_err("PCI link speed or width is not valid\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ cnss_pr_err("Unknown PCI link status config: %u\n", status);
+ return -EINVAL;
+ }
+
+ ret = cnss_pci_set_link_bandwidth(pci_priv, link_speed, link_width);
+ if (!ret)
+ pci_priv->cur_link_speed = link_speed;
+
+ return ret;
+}
+
+int cnss_set_pci_link(struct cnss_pci_data *pci_priv, bool link_up)
+{
+ int ret = 0, retry = 0;
+
+ cnss_pr_vdbg("%s PCI link\n", link_up ? "Resuming" : "Suspending");
+
+ if (link_up) {
+retry:
+ ret = cnss_pci_set_link_up(pci_priv);
+ if (ret && retry++ < LINK_TRAINING_RETRY_MAX_TIMES) {
+ cnss_pr_dbg("Retry PCI link training #%d\n", retry);
+ if (pci_priv->pci_link_down_ind)
+ msleep(LINK_TRAINING_RETRY_DELAY_MS * retry);
+ goto retry;
+ }
+ } else {
+ /* Since DRV suspend cannot be done in Gen 3, set it to
+ * Gen 2 if current link speed is larger than Gen 2.
+ */
+ if (pci_priv->drv_connected_last &&
+ pci_priv->cur_link_speed > PCI_EXP_LNKSTA_CLS_5_0GB)
+ cnss_set_pci_link_status(pci_priv, PCI_GEN2);
+
+ ret = cnss_pci_set_link_down(pci_priv);
+ }
+
+ if (pci_priv->drv_connected_last) {
+ if ((link_up && !ret) || (!link_up && ret))
+ cnss_set_pci_link_status(pci_priv, PCI_DEF);
+ }
+
+ return ret;
+}
int cnss_pci_prevent_l1(struct device *dev)
{
@@ -285,10 +425,13 @@ void cnss_pci_allow_l1(struct device *dev)
_cnss_pci_allow_l1(pci_priv);
}
-EXPORT_SYMBOL(cnss_pci_allow_l1)
+EXPORT_SYMBOL(cnss_pci_allow_l1);
+
+int cnss_pci_get_msi_assignment(struct cnss_pci_data *pci_priv)
+{
+ pci_priv->msi_config = &msi_config;
-struct cnss_msi_config *cnss_get_msi_config(void) {
- return &msi_config;
+ return 0;
}
static int cnss_pci_smmu_fault_handler(struct iommu_domain *domain,
@@ -371,16 +514,7 @@ int cnss_pci_init_smmu(struct cnss_pci_data *pci_priv)
return 0;
}
-
-int cnss_pci_wake_gpio_init(struct cnss_pci_data *pci_priv)
-{
- return 0;
-}
-
-void cnss_pci_wake_gpio_deinit(struct cnss_pci_data *pci_priv)
-{
-}
-
+#if IS_ENABLED(CONFIG_ARCH_QCOM)
/**
* cnss_pci_of_reserved_mem_device_init() - Assign reserved memory region
* to given PCI device
@@ -393,7 +527,7 @@ void cnss_pci_wake_gpio_deinit(struct cnss_pci_data *pci_priv)
*
* Return: 0 for success, negative value for error
*/
-static int cnss_pci_of_reserved_mem_device_init(struct cnss_pci_data *pci_priv)
+int cnss_pci_of_reserved_mem_device_init(struct cnss_pci_data *pci_priv)
{
struct device *dev_pci = &pci_priv->pci_dev->dev;
int ret;
@@ -412,201 +546,12 @@ static int cnss_pci_of_reserved_mem_device_init(struct cnss_pci_data *pci_priv)
return ret;
}
-/**
- * cnss_pci_disable_pc() - Disable PCIe link power collapse from RC driver
- * @pci_priv: driver PCI bus context pointer
- * @vote: value to indicate disable (true) or enable (false)
- *
- * This function shall call corresponding PCIe root complex driver APIs
- * to disable PCIe power collapse. The purpose of this API is to avoid
- * root complex driver still controlling PCIe link from callbacks of
- * system suspend/resume. Device driver itself should take full control
- * of the link in such cases.
- *
- * Return: 0 for success, negative value for error
- */
-int cnss_pci_disable_pc(struct cnss_pci_data *pci_priv, bool vote)
-{
- struct pci_dev *pci_dev = pci_priv->pci_dev;
-
- return msm_pcie_pm_control(vote ? MSM_PCIE_DISABLE_PC :
- MSM_PCIE_ENABLE_PC,
- pci_dev->bus->number, pci_dev, NULL,
- PM_OPTIONS_DEFAULT);
-}
-
-int check_id_table(struct cnss_wlan_driver *driver_ops)
+int cnss_pci_wake_gpio_init(struct cnss_pci_data *pci_priv)
{
- const struct pci_device_id *id_table = driver_ops->id_table;
- if (!id_table || !pci_dev_present(id_table)) {
- /* id_table pointer will move from pci_dev_present(),
- * so check again using local pointer.
- */
- id_table = driver_ops->id_table;
- while (id_table->vendor) {
- cnss_pr_info("Host driver is built for PCIe device ID 0x%x\n",
- id_table->device);
- id_table++;
- }
- cnss_pr_err("Enumerated PCIe device id is 0x%x, reject unsupported driver\n",
- pci_priv->device_id);
- return -ENODEV;
- }
return 0;
}
-/**
- * _cnss_pci_enumerate() - Enumerate PCIe endpoints
- * @plat_priv: driver platform context pointer
- * @rc_num: root complex index that an endpoint connects to
- *
- * This function shall call corresponding PCIe root complex driver APIs
- * to power on root complex and enumerate the endpoint connected to it.
- *
- * Return: 0 for success, negative value for error
- */
-int _cnss_pci_enumerate(struct cnss_plat_data *plat_priv, u32 rc_num)
-{
- return msm_pcie_enumerate(rc_num);
-}
-
-/**
- * cnss_pci_assert_perst() - Assert PCIe PERST GPIO
- * @pci_priv: driver PCI bus context pointer
- *
- * This function shall call corresponding PCIe root complex driver APIs
- * to assert PCIe PERST GPIO.
- *
- * Return: 0 for success, negative value for error
- */
-static int cnss_pci_assert_perst(struct cnss_pci_data *pci_priv)
-{
- struct pci_dev *pci_dev = pci_priv->pci_dev;
-
- return msm_pcie_pm_control(MSM_PCIE_HANDLE_LINKDOWN,
- pci_dev->bus->number, pci_dev, NULL,
- PM_OPTIONS_DEFAULT);
-}
-
-/**
- * cnss_pci_set_max_link_speed() - Set the maximum speed PCIe can link up with
- * @pci_priv: driver PCI bus context pointer
- * @rc_num: root complex index that an endpoint connects to
- * @link_speed: PCIe link gen speed
- *
- * This function shall call corresponding PCIe root complex driver APIs
- * to update the maximum speed that PCIe can link up with.
- *
- * Return: 0 for success, negative value for error
- */
-int cnss_pci_set_max_link_speed(struct cnss_pci_data *pci_priv,
- u32 rc_num, u16 link_speed)
-{
- return msm_pcie_set_target_link_speed(rc_num, link_speed);
-}
-
-/**
- * cnss_pci_set_link_bandwidth() - Update number of lanes and speed of
- * PCIe link
- * @pci_priv: driver PCI bus context pointer
- * @link_speed: PCIe link gen speed
- * @link_width: number of lanes for PCIe link
- *
- * This function shall call corresponding PCIe root complex driver APIs
- * to update number of lanes and speed of the link.
- *
- * Return: 0 for success, negative value for error
- */
-int cnss_pci_set_link_bandwidth(struct cnss_pci_data *pci_priv,
- u16 link_speed, u16 link_width)
-{
- return msm_pcie_set_link_bandwidth(pci_priv->pci_dev,
- link_speed, link_width);
-}
-
-/**
- * _cnss_pci_prevent_l1() - Prevent PCIe L1 and L1 sub-states
- * @pci_priv: driver PCI bus context pointer
- *
- * This function shall call corresponding PCIe root complex driver APIs
- * to prevent PCIe link enter L1 and L1 sub-states. The APIs should also
- * bring link out of L1 or L1 sub-states if any and avoid synchronization
- * issues if any.
- *
- * Return: 0 for success, negative value for error
- */
-static int _cnss_pci_prevent_l1(struct cnss_pci_data *pci_priv)
-{
- return msm_pcie_prevent_l1(pci_priv->pci_dev);
-}
-
-/**
- * _cnss_pci_allow_l1() - Allow PCIe L1 and L1 sub-states
- * @pci_priv: driver PCI bus context pointer
- *
- * This function shall call corresponding PCIe root complex driver APIs
- * to allow PCIe link enter L1 and L1 sub-states. The APIs should avoid
- * synchronization issues if any.
- *
- * Return: 0 for success, negative value for error
- */
-static void _cnss_pci_allow_l1(struct cnss_pci_data *pci_priv)
-{
- msm_pcie_allow_l1(pci_priv->pci_dev);
-}
-
-/**
- * @pci_priv: driver PCI bus context pointer
- *
- * This function shall call corresponding PCIe root complex driver APIs
- * to Power on or resume PCIe link.
- *
- * Return: 0 for success, negative value for error
- */
-static int cnss_pci_set_link_up(struct cnss_pci_data *pci_priv)
-{
- struct pci_dev *pci_dev = pci_priv->pci_dev;
- enum msm_pcie_pm_opt pm_ops = MSM_PCIE_RESUME;
- u32 pm_options = PM_OPTIONS_DEFAULT;
- int ret;
-
- ret = msm_pcie_pm_control(pm_ops, pci_dev->bus->number, pci_dev,
- NULL, pm_options);
- if (ret)
- cnss_pr_err("Failed to resume PCI link with default option, err = %d\n",
- ret);
-
- return ret;
-}
-
-/**
- * cnss_pci_set_link_down() - Power off or suspend PCIe link
- * @pci_priv: driver PCI bus context pointer
- *
- * This function shall call corresponding PCIe root complex driver APIs
- * to power off or suspend PCIe link.
- *
- * Return: 0 for success, negative value for error
- */
-static int cnss_pci_set_link_down(struct cnss_pci_data *pci_priv)
+void cnss_pci_wake_gpio_deinit(struct cnss_pci_data *pci_priv)
{
- struct pci_dev *pci_dev = pci_priv->pci_dev;
- enum msm_pcie_pm_opt pm_ops;
- u32 pm_options = PM_OPTIONS_DEFAULT;
- int ret;
-
- if (pci_priv->drv_connected_last) {
- cnss_pr_vdbg("Use PCIe DRV suspend\n");
- pm_ops = MSM_PCIE_DRV_SUSPEND;
- } else {
- pm_ops = MSM_PCIE_SUSPEND;
- }
-
- ret = msm_pcie_pm_control(pm_ops, pci_dev->bus->number, pci_dev,
- NULL, pm_options);
- if (ret)
- cnss_pr_err("Failed to suspend PCI link with default option, err = %d\n",
- ret);
-
- return ret;
}
+#endif
diff --git a/cnss2/power.c b/cnss2/power.c
index 76698c1..64c32e2 100644
--- a/cnss2/power.c
+++ b/cnss2/power.c
@@ -1,5 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. */
+/*
+ * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
#include <linux/clk.h>
#include <linux/delay.h>
@@ -18,7 +21,7 @@
#include "debug.h"
#include "bus.h"
-#if IS_ENABLED(CONFIG_ARCH_QCOM)
+#if IS_ENABLED(CONFIG_ARCH_QCOM) && !IS_ENABLED(CONFIG_WCN_GOOGLE)
static struct cnss_vreg_cfg cnss_vreg_list[] = {
{"vdd-wlan-core", 1300000, 1300000, 0, 0, 0},
{"vdd-wlan-io", 1800000, 1800000, 0, 0, 0},
@@ -57,6 +60,7 @@ static struct cnss_clk_cfg cnss_clk_list[] = {
#define WLAN_EN_GPIO "wlan-en-gpio"
#define BT_EN_GPIO "qcom,bt-en-gpio"
#define XO_CLK_GPIO "qcom,xo-clk-gpio"
+#define SW_CTRL_GPIO "qcom,sw-ctrl-gpio"
#define WLAN_EN_ACTIVE "wlan_en_active"
#define WLAN_EN_SLEEP "wlan_en_sleep"
#ifdef CONFIG_WCN_GOOGLE
@@ -744,6 +748,7 @@ int cnss_get_pinctrl(struct cnss_plat_data *plat_priv)
ret);
goto out;
}
+ cnss_set_feature_list(plat_priv, CNSS_WLAN_EN_SUPPORT_V01);
}
/* Added for QCA6490 PMU delayed WLAN_EN_GPIO */
@@ -765,6 +770,17 @@ int cnss_get_pinctrl(struct cnss_plat_data *plat_priv)
} else {
pinctrl_info->xo_clk_gpio = -EINVAL;
}
+
+ if (of_find_property(dev->of_node, SW_CTRL_GPIO, NULL)) {
+ pinctrl_info->sw_ctrl_gpio = of_get_named_gpio(dev->of_node,
+ SW_CTRL_GPIO,
+ 0);
+ cnss_pr_dbg("Switch control GPIO: %d\n",
+ pinctrl_info->sw_ctrl_gpio);
+ } else {
+ pinctrl_info->sw_ctrl_gpio = -EINVAL;
+ }
+
return 0;
out:
return ret;
@@ -907,6 +923,23 @@ set_wlan_en:
return ret;
}
+int cnss_get_input_gpio_value(struct cnss_plat_data *plat_priv, int gpio_num)
+{
+ int ret;
+
+ if (gpio_num < 0)
+ return -EINVAL;
+
+ ret = gpio_direction_input(gpio_num);
+ if (ret) {
+ cnss_pr_err("Failed to set direction of GPIO(%d), err = %d",
+ gpio_num, ret);
+ return -EINVAL;
+ }
+
+ return gpio_get_value(gpio_num);
+}
+
#ifdef CONFIG_WCN_GOOGLE
static int wlan_buck_gpio=0;
int wlan_buck_enable(struct cnss_plat_data *plat_priv)
@@ -960,7 +993,6 @@ int cnss_power_on_device(struct cnss_plat_data *plat_priv)
{
int ret = 0;
- cnss_pr_info("%s Enter\n",__func__);
if (plat_priv->powered_on) {
cnss_pr_dbg("Already powered up");
return 0;
@@ -1144,12 +1176,6 @@ int cnss_aop_mbox_init(struct cnss_plat_data *plat_priv)
mbox->knows_txdone = false;
plat_priv->mbox_chan = NULL;
- chan = mbox_request_channel(mbox, 0);
- if (IS_ERR(chan)) {
- cnss_pr_err("Failed to get mbox channel\n");
- return PTR_ERR(chan);
- }
- plat_priv->mbox_chan = chan;
ret = of_property_read_string(plat_priv->plat_dev->dev.of_node,
"qcom,vreg_ol_cpr",
@@ -1163,7 +1189,18 @@ int cnss_aop_mbox_init(struct cnss_plat_data *plat_priv)
if (ret)
cnss_pr_dbg("Volt regulator for Int Power Amp not configured\n");
+ if (!plat_priv->vreg_ol_cpr && !plat_priv->vreg_ipa)
+ return 0;
+
+ chan = mbox_request_channel(mbox, 0);
+ if (IS_ERR(chan)) {
+ cnss_pr_err("Failed to get mbox channel\n");
+ return PTR_ERR(chan);
+ }
+
+ plat_priv->mbox_chan = chan;
cnss_pr_dbg("Mbox channel initialized\n");
+
return 0;
}
diff --git a/cnss2/qcom_ramdump.c b/cnss2/qcom_ramdump.c
new file mode 100644
index 0000000..6379c0f
--- /dev/null
+++ b/cnss2/qcom_ramdump.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/elf.h>
+#include <linux/wait.h>
+#include <linux/cdev.h>
+#include <linux/atomic.h>
+#ifdef CONFIG_CNSS_OUT_OF_TREE
+#include "qcom_ramdump.h"
+#else
+#include <soc/qcom/qcom_ramdump.h>
+#endif
+#include <linux/devcoredump.h>
+#include <linux/of.h>
+#include <linux/io.h>
+#include <linux/devcoredump.h>
+#if !IS_ENABLED(CONFIG_WCN_GOOGLE)
+#include <linux/soc/qcom/mdt_loader.h>
+#endif
+
+#define SIZEOF_ELF_STRUCT(__xhdr) \
+static inline size_t sizeof_elf_##__xhdr(unsigned char class) \
+{ \
+ if (class == ELFCLASS32) \
+ return sizeof(struct elf32_##__xhdr); \
+ else \
+ return sizeof(struct elf64_##__xhdr); \
+}
+
+SIZEOF_ELF_STRUCT(phdr)
+SIZEOF_ELF_STRUCT(hdr)
+
+#define set_xhdr_property(__xhdr, arg, class, member, value) \
+do { \
+ if (class == ELFCLASS32) \
+ ((struct elf32_##__xhdr *)arg)->member = value; \
+ else \
+ ((struct elf64_##__xhdr *)arg)->member = value; \
+} while (0)
+
+#define set_ehdr_property(arg, class, member, value) \
+ set_xhdr_property(hdr, arg, class, member, value)
+#define set_phdr_property(arg, class, member, value) \
+ set_xhdr_property(phdr, arg, class, member, value)
+
+struct qcom_ramdump_desc {
+ void *data;
+ struct completion dump_done;
+};
+
+static int enable_dump_collection;
+module_param(enable_dump_collection, int, 0644);
+
+bool dump_enabled(void)
+{
+ return enable_dump_collection;
+}
+EXPORT_SYMBOL(dump_enabled);
+
+#if !IS_ENABLED(CONFIG_WCN_GOOGLE)
+static ssize_t qcom_devcd_readv(char *buffer, loff_t offset, size_t count,
+ void *data, size_t datalen)
+{
+ struct qcom_ramdump_desc *desc = data;
+
+ return memory_read_from_buffer(buffer, count, &offset, desc->data, datalen);
+}
+
+static void qcom_devcd_freev(void *data)
+{
+ struct qcom_ramdump_desc *desc = data;
+
+ vfree(desc->data);
+ complete_all(&desc->dump_done);
+}
+
+static int qcom_devcd_dump(struct device *dev, void *data, size_t datalen, gfp_t gfp)
+{
+ struct qcom_ramdump_desc desc;
+
+ desc.data = data;
+ init_completion(&desc.dump_done);
+
+ dev_coredumpm(dev, NULL, &desc, datalen, gfp, qcom_devcd_readv, qcom_devcd_freev);
+
+ wait_for_completion(&desc.dump_done);
+
+ return !completion_done(&desc.dump_done);
+}
+#else
+static int qcom_devcd_dump(struct device *dev, void *data, size_t datalen, gfp_t gfp)
+{
+ vfree(data);
+ return 0;
+}
+#endif
+
+int qcom_dump(struct list_head *segs, struct device *dev)
+{
+ struct qcom_dump_segment *segment;
+ void *data;
+ void __iomem *ptr;
+ size_t data_size = 0;
+ size_t offset = 0;
+
+ if (!segs || list_empty(segs))
+ return -EINVAL;
+
+ list_for_each_entry(segment, segs, node) {
+ pr_info("Got segment size %lu\n", segment->size);
+ data_size += segment->size;
+ }
+
+ data = vmalloc(data_size);
+ if (!data)
+ return -ENOMEM;
+
+ list_for_each_entry(segment, segs, node) {
+ if (segment->va)
+ memcpy(data + offset, segment->va, segment->size);
+ else {
+ ptr = devm_ioremap(dev, segment->da, segment->size);
+ if (!ptr) {
+ dev_err(dev,
+ "invalid coredump segment (%pad, %zu)\n",
+ &segment->da, segment->size);
+ memset(data + offset, 0xff, segment->size);
+ } else
+ memcpy_fromio(data + offset, ptr,
+ segment->size);
+ }
+ offset += segment->size;
+ }
+
+ return qcom_devcd_dump(dev, data, data_size, GFP_KERNEL);
+}
+EXPORT_SYMBOL(qcom_dump);
+
+/* Since the elf32 and elf64 identification is identical
+ * apart from the class we use elf32 by default.
+ */
+static void init_elf_identification(struct elf32_hdr *ehdr, unsigned char class)
+{
+ memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
+ ehdr->e_ident[EI_CLASS] = class;
+ ehdr->e_ident[EI_DATA] = ELFDATA2LSB;
+ ehdr->e_ident[EI_VERSION] = EV_CURRENT;
+ ehdr->e_ident[EI_OSABI] = ELFOSABI_NONE;
+}
+
+int qcom_elf_dump(struct list_head *segs, struct device *dev, unsigned char class)
+{
+ struct qcom_dump_segment *segment;
+ void *phdr;
+ void *ehdr;
+ size_t data_size;
+ size_t offset;
+ int phnum = 0;
+ void *data;
+ void __iomem *ptr;
+
+
+ if (!segs || list_empty(segs))
+ return -EINVAL;
+
+ data_size = sizeof_elf_hdr(class);
+ list_for_each_entry(segment, segs, node) {
+ data_size += sizeof_elf_phdr(class) + segment->size;
+ phnum++;
+ }
+
+ data = vmalloc(data_size);
+ if (!data)
+ return -ENOMEM;
+
+ pr_debug("Creating elf with size %lu\n", data_size);
+ ehdr = data;
+
+ memset(ehdr, 0, sizeof_elf_hdr(class));
+ init_elf_identification(ehdr, class);
+ set_ehdr_property(ehdr, class, e_type, ET_CORE);
+ set_ehdr_property(ehdr, class, e_machine, EM_NONE);
+ set_ehdr_property(ehdr, class, e_version, EV_CURRENT);
+ set_ehdr_property(ehdr, class, e_phoff, sizeof_elf_hdr(class));
+ set_ehdr_property(ehdr, class, e_ehsize, sizeof_elf_hdr(class));
+ set_ehdr_property(ehdr, class, e_phentsize, sizeof_elf_phdr(class));
+ set_ehdr_property(ehdr, class, e_phnum, phnum);
+
+ phdr = data + sizeof_elf_hdr(class);
+ offset = sizeof_elf_hdr(class) + sizeof_elf_phdr(class) * phnum;
+ list_for_each_entry(segment, segs, node) {
+ memset(phdr, 0, sizeof_elf_phdr(class));
+ set_phdr_property(phdr, class, p_type, PT_LOAD);
+ set_phdr_property(phdr, class, p_offset, offset);
+ set_phdr_property(phdr, class, p_vaddr, segment->da);
+ set_phdr_property(phdr, class, p_paddr, segment->da);
+ set_phdr_property(phdr, class, p_filesz, segment->size);
+ set_phdr_property(phdr, class, p_memsz, segment->size);
+ set_phdr_property(phdr, class, p_flags, PF_R | PF_W | PF_X);
+ set_phdr_property(phdr, class, p_align, 0);
+
+ if (segment->va)
+ memcpy(data + offset, segment->va, segment->size);
+ else {
+ ptr = devm_ioremap(dev, segment->da, segment->size);
+ if (!ptr) {
+ dev_err(dev,
+ "invalid coredump segment (%pad, %zu)\n",
+ &segment->da, segment->size);
+ memset(data + offset, 0xff, segment->size);
+ } else
+ memcpy_fromio(data + offset, ptr,
+ segment->size);
+ }
+
+ offset += segment->size;
+ phdr += sizeof_elf_phdr(class);
+ }
+#if IS_ENABLED(CONFIG_WCN_GOOGLE)
+ /* SSCD integration */
+ sscd_set_coredump(data, data_size);
+#endif
+
+ return qcom_devcd_dump(dev, data, data_size, GFP_KERNEL);
+}
+EXPORT_SYMBOL(qcom_elf_dump);
+
+#if !IS_ENABLED(CONFIG_WCN_GOOGLE)
+int qcom_fw_elf_dump(struct firmware *fw, struct device *dev)
+{
+ const struct elf32_phdr *phdrs, *phdr;
+ const struct elf32_hdr *ehdr;
+ struct qcom_dump_segment *segment;
+ struct list_head head;
+ int i;
+
+ ehdr = (struct elf32_hdr *)fw->data;
+ phdrs = (struct elf32_phdr *)(ehdr + 1);
+ INIT_LIST_HEAD(&head);
+
+ for (i = 0; i < ehdr->e_phnum; i++) {
+ phdr = &phdrs[i];
+
+ if (phdr->p_type != PT_LOAD)
+ continue;
+
+ if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH)
+ continue;
+
+ if (!phdr->p_memsz)
+ continue;
+
+
+ segment = kzalloc(sizeof(*segment), GFP_KERNEL);
+ if (!segment)
+ return -ENOMEM;
+
+ segment->da = phdr->p_paddr;
+ segment->size = phdr->p_memsz;
+
+ list_add_tail(&segment->node, &head);
+ }
+ qcom_elf_dump(&head, dev, ELFCLASS32);
+ return 0;
+}
+EXPORT_SYMBOL(qcom_fw_elf_dump);
+#endif
+
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Ramdump driver");
+MODULE_LICENSE("GPL v2");
+
diff --git a/cnss2/qmi.c b/cnss2/qmi.c
index 57ec993..3ffcc66 100644
--- a/cnss2/qmi.c
+++ b/cnss2/qmi.c
@@ -1,5 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2015-2021, The Linux Foundation. All rights reserved. */
+/*
+ * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
#include <linux/module.h>
#ifdef CONFIG_CNSS_OUT_OF_TREE
@@ -1051,7 +1054,7 @@ int cnss_wlfw_qdss_dnld_send_sync(struct cnss_plat_data *plat_priv)
ret = cnss_request_firmware_direct(plat_priv, &fw_entry,
qdss_cfg_filename);
if (ret) {
- cnss_pr_err("Failed to load QDSS: %s\n",
+ cnss_pr_dbg("Unable to load %s\n",
qdss_cfg_filename);
goto err_req_fw;
}
@@ -1761,7 +1764,7 @@ int cnss_wlfw_antenna_switch_send_sync(struct cnss_plat_data *plat_priv)
}
if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
- cnss_pr_err("Antenna switch request failed, result: %d, err: %d\n",
+ cnss_pr_dbg("Antenna switch request failed, result: %d, err: %d\n",
resp->resp.result, resp->resp.error);
ret = -resp->resp.result;
goto out;
@@ -2180,7 +2183,8 @@ static void cnss_wlfw_request_mem_ind_cb(struct qmi_handle *qmi_wlfw,
ind_msg->mem_seg[i].size, ind_msg->mem_seg[i].type);
plat_priv->fw_mem[i].type = ind_msg->mem_seg[i].type;
plat_priv->fw_mem[i].size = ind_msg->mem_seg[i].size;
- if (plat_priv->fw_mem[i].type == CNSS_MEM_TYPE_DDR)
+ if (!plat_priv->fw_mem[i].va &&
+ plat_priv->fw_mem[i].type == CNSS_MEM_TYPE_DDR)
plat_priv->fw_mem[i].attrs |=
DMA_ATTR_FORCE_CONTIGUOUS;
if (plat_priv->fw_mem[i].type == CNSS_MEM_CAL_V01)
diff --git a/cnss2/reg.h b/cnss2/reg.h
index 3d02c74..2807b30 100644
--- a/cnss2/reg.h
+++ b/cnss2/reg.h
@@ -73,24 +73,16 @@
#define TIME_SYNC_ENABLE 0x80000000
#define TIME_SYNC_CLEAR 0x0
-#define DEBUG_PBL_LOG_SRAM_START 0x01403D58
-
+#define QCA6390_DEBUG_PBL_LOG_SRAM_START 0x01403D58
#define QCA6390_DEBUG_PBL_LOG_SRAM_MAX_SIZE 80
-#define QCA6390_V2_SBL_DATA_START 0x016C8580
-#define QCA6390_V2_SBL_DATA_END (0x016C8580 + 0x00011000)
#define QCA6390_DEBUG_SBL_LOG_SRAM_MAX_SIZE 44
+#define QCA6490_DEBUG_PBL_LOG_SRAM_START 0x01403DA0
#define QCA6490_DEBUG_PBL_LOG_SRAM_MAX_SIZE 40
-#define QCA6490_V1_SBL_DATA_START 0x0143B000
-#define QCA6490_V1_SBL_DATA_END (0x0143B000 + 0x00011000)
-#define QCA6490_V2_SBL_DATA_START 0x01435000
-#define QCA6490_V2_SBL_DATA_END (0x01435000 + 0x00011000)
#define QCA6490_DEBUG_SBL_LOG_SRAM_MAX_SIZE 48
#define WCN7850_DEBUG_PBL_LOG_SRAM_START 0x01403D98
#define WCN7850_DEBUG_PBL_LOG_SRAM_MAX_SIZE 40
-#define WCN7850_SBL_DATA_START 0x01790000
-#define WCN7850_SBL_DATA_END (0x01790000 + 0x00011000)
#define WCN7850_DEBUG_SBL_LOG_SRAM_MAX_SIZE 48
#define WCN7850_PBL_BOOTSTRAP_STATUS 0x01A10008
@@ -99,6 +91,8 @@
#define PCIE_BHI_ERRDBG3_REG 0x01E0E23C
#define PBL_WLAN_BOOT_CFG 0x01E22B34
#define PBL_BOOTSTRAP_STATUS 0x01910008
+#define SRAM_START 0x01400000
+#define SRAM_END 0x01800000
#define QCA6390_PCIE_SOC_WDOG_DISC_BAD_DATA_LOW_CFG_SOC_PCIE_REG 0x01E04234
#define QCA6390_PCIE_SOC_WDOG_DISC_BAD_DATA_LOW_CFG_SOC_PCIE_REG_VAL 0xDEAD1234
@@ -330,4 +324,9 @@
#define QCA6390_SYSPM_DBG_BTFM_AON_REG 0x1F82004
#define QCA6390_SYSPM_DBG_BUS_SEL_REG 0x1F82008
#define QCA6390_SYSPM_WCSSAON_SR_STATUS 0x1F8200C
+
+/* PCIE SOC scratch registers, address same for QCA6390 & QCA6490*/
+#define PCIE_SCRATCH_0_SOC_PCIE_REG 0x1E04040
+#define PCIE_SCRATCH_1_SOC_PCIE_REG 0x1E04044
+#define PCIE_SCRATCH_2_SOC_PCIE_REG 0x1E0405C
#endif
diff --git a/cnss_utils/cnss_plat_ipc_qmi.c b/cnss_utils/cnss_plat_ipc_qmi.c
index a28d743..2225c2b 100644
--- a/cnss_utils/cnss_plat_ipc_qmi.c
+++ b/cnss_utils/cnss_plat_ipc_qmi.c
@@ -5,24 +5,29 @@
#include <linux/kernel.h>
#ifdef CONFIG_CNSS_OUT_OF_TREE
#include "uapi/qrtr.h"
-#include "ipc_logging.h"
#include "qmi/qmi.h"
-#include "cnss_plat_ipc_qmi.h"
+#if IS_ENABLED(CONFIG_IPC_LOGGING)
+#include "ipc_logging.h"
+#endif
#else
#include <linux/qrtr.h>
-#include <linux/ipc_logging.h>
#include <linux/soc/qcom/qmi.h>
-#include <linux/cnss_plat_ipc_qmi.h>
+#if IS_ENABLED(CONFIG_IPC_LOGGING)
+#include <linux/ipc_logging.h>
+#endif
#endif
+#include <linux/sched.h>
+#include <asm/current.h>
#include <linux/limits.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/workqueue.h>
#include <linux/of.h>
+#include "cnss_plat_ipc_qmi.h"
#include "cnss_plat_ipc_service_v01.h"
#define CNSS_MAX_FILE_SIZE (32 * 1024 * 1024)
-#define CNSS_PLAT_IPC_MAX_CLIENTS 1
+#define CNSS_PLAT_IPC_MAX_USER 1
#define CNSS_PLAT_IPC_QMI_FILE_TXN_TIMEOUT 10000
#define QMI_INIT_RETRY_MAX_TIMES 240
#define QMI_INIT_RETRY_DELAY_MS 250
@@ -53,35 +58,59 @@ struct cnss_plat_ipc_file_data {
};
/**
+ * struct cnss_plat_ipc_qmi_client_ctx: Context for QMI IPC client
+ * @client_sq: QMI IPC client QRTR socket
+ * @client_connected: QMI IPC client connection status
+ * @connection_update_cb: Registered user callback for QMI connection status
+ * @cb_ctx: Context for registered user
+ * @num_user: Number of registered users
+ */
+struct cnss_plat_ipc_qmi_client_ctx {
+ struct sockaddr_qrtr client_sq;
+ bool client_connected;
+
+ cnss_plat_ipc_connection_update
+ connection_update_cb[CNSS_PLAT_IPC_MAX_USER];
+ void *cb_ctx[CNSS_PLAT_IPC_MAX_USER];
+ u32 num_user;
+};
+
+/**
* struct cnss_plat_ipc_qmi_svc_ctx: Platform context for QMI IPC service
* @svc_hdl: QMI server handle
- * @client_sq: CNSS Daemon client QRTR socket
- * @client_connected: Daemon client connection status
* @file_idr: File ID generator
* @flle_idr_lock: File ID generator usage lock
- * @cfg: CNSS daemon provided user config
- * @connection_update_cb: Registered user callback for daemon connection status
- * @cb_ctx: Context for registered user
- * @num_user: Number of registered users
+ * @qmi_client_ctx: ontext for QMI IPC client
*/
struct cnss_plat_ipc_qmi_svc_ctx {
struct qmi_handle *svc_hdl;
- struct sockaddr_qrtr client_sq;
- bool client_connected;
struct idr file_idr;
struct mutex file_idr_lock; /* File ID generator usage lock */
- struct cnss_plat_ipc_user_config cfg;
-
- cnss_plat_ipc_connection_update
- connection_update_cb[CNSS_PLAT_IPC_MAX_CLIENTS];
- void *cb_ctx[CNSS_PLAT_IPC_MAX_CLIENTS];
- u32 num_user;
+ struct cnss_plat_ipc_qmi_client_ctx
+ qmi_client_ctx[CNSS_PLAT_IPC_MAX_QMI_CLIENTS + 1];
};
static struct cnss_plat_ipc_qmi_svc_ctx plat_ipc_qmi_svc;
-static void *cnss_plat_ipc_log_context;
+static struct cnss_plat_ipc_daemon_config daemon_cfg;
#if IS_ENABLED(CONFIG_IPC_LOGGING)
+static void *cnss_plat_ipc_log_context;
+
+static void cnss_plat_ipc_logging_init(void)
+{
+ cnss_plat_ipc_log_context = ipc_log_context_create(NUM_LOG_PAGES,
+ "cnss_plat", 0);
+ if (!cnss_plat_ipc_log_context)
+ pr_err("cnss_plat: Unable to create log context\n");
+}
+
+static void cnss_plat_ipc_logging_deinit(void)
+{
+ if (cnss_plat_ipc_log_context) {
+ ipc_log_context_destroy(cnss_plat_ipc_log_context);
+ cnss_plat_ipc_log_context = NULL;
+ }
+}
void cnss_plat_ipc_debug_log_print(void *log_ctx, char *process, const char *fn,
const char *log_level, char *fmt, ...)
@@ -104,6 +133,9 @@ void cnss_plat_ipc_debug_log_print(void *log_ctx, char *process, const char *fn,
#define cnss_plat_ipc_log_print(_x...) \
cnss_plat_ipc_debug_log_print(cnss_plat_ipc_log_context, _x)
#else
+static void cnss_plat_ipc_logging_init(void) {};
+static void cnss_plat_ipc_logging_deinit(void) {};
+
void cnss_plat_ipc_debug_log_print(void *log_ctx, char *process, const char *fn,
const char *log_level, char *fmt, ...)
{
@@ -207,40 +239,57 @@ static int cnss_plat_ipc_deinit_file_data(struct cnss_plat_ipc_file_data *fd)
}
/**
- * cnss_plat_ipc_qmi_update_clients() - Inform registered clients for status
- * update
+ * cnss_plat_ipc_qmi_update_user() - Inform registered users about QMI
+ * client status
+ * @client_id: User space QMI IPC client ID. Also works as
+ * array index for QMI client context
*
* Return: None
*/
-static void cnss_plat_ipc_qmi_update_clients(void)
+static void
+cnss_plat_ipc_qmi_update_user(enum cnss_plat_ipc_qmi_client_id_v01 client_id)
{
struct cnss_plat_ipc_qmi_svc_ctx *svc = &plat_ipc_qmi_svc;
+ struct cnss_plat_ipc_qmi_client_ctx *qmi_client =
+ &svc->qmi_client_ctx[client_id];
int i;
- for (i = 0; i < CNSS_PLAT_IPC_MAX_CLIENTS; i++) {
- if (svc->connection_update_cb[i])
- svc->connection_update_cb[i](svc->cb_ctx[i],
- svc->client_connected);
+ for (i = 0; i < qmi_client->num_user; i++) {
+ if (qmi_client->connection_update_cb[i])
+ qmi_client->connection_update_cb[i]
+ (qmi_client->cb_ctx[i],
+ qmi_client->client_connected);
}
}
/**
* cnss_plat_ipc_qmi_file_upload() - Upload data as platform accessible file
+ * @client_id: User space QMI IPC client ID. Also works as
+ * array index for QMI client context
* @file_mame: File name to store in platform data location
* @file_buf: Pointer to buffer with file contents
* @file_size: Provides the size of buffer / file size
*
* Return: 0 on success, negative error values otherwise
*/
-int cnss_plat_ipc_qmi_file_upload(char *file_name, u8 *file_buf,
+int cnss_plat_ipc_qmi_file_upload(enum cnss_plat_ipc_qmi_client_id_v01
+ client_id, char *file_name, u8 *file_buf,
u32 file_size)
{
struct cnss_plat_ipc_qmi_file_upload_ind_msg_v01 ind;
struct cnss_plat_ipc_qmi_svc_ctx *svc = &plat_ipc_qmi_svc;
+ struct cnss_plat_ipc_qmi_client_ctx *qmi_client;
int ret;
struct cnss_plat_ipc_file_data *fd;
- if (!svc->client_connected || !file_name || !file_buf)
+ if (client_id > CNSS_PLAT_IPC_MAX_QMI_CLIENTS) {
+ cnss_plat_ipc_err("Invalid Client ID: %d\n", client_id);
+ return -EINVAL;
+ }
+
+ qmi_client = &svc->qmi_client_ctx[client_id];
+
+ if (!qmi_client->client_connected || !file_name || !file_buf)
return -EINVAL;
cnss_plat_ipc_info("File name: %s Size: %d\n", file_name, file_size);
@@ -260,7 +309,7 @@ int cnss_plat_ipc_qmi_file_upload(char *file_name, u8 *file_buf,
ind.file_id = fd->id;
ret = qmi_send_indication
- (svc->svc_hdl, &svc->client_sq,
+ (svc->svc_hdl, &qmi_client->client_sq,
CNSS_PLAT_IPC_QMI_FILE_UPLOAD_IND_V01,
CNSS_PLAT_IPC_QMI_FILE_UPLOAD_IND_MSG_V01_MAX_MSG_LEN,
cnss_plat_ipc_qmi_file_upload_ind_msg_v01_ei, &ind);
@@ -366,19 +415,31 @@ end:
/**
* cnss_plat_ipc_qmi_file_download() - Download platform accessible file
+ * @client_id: User space QMI IPC client ID. Also works as
+ * array index for QMI client context
* @file_mame: File name to get from platform data location
* @buf: Pointer of the buffer to store file contents
* @size: Provides the size of buffer. It is updated to reflect the file size
* at the end of file download.
*/
-int cnss_plat_ipc_qmi_file_download(char *file_name, char *buf, u32 *size)
+int cnss_plat_ipc_qmi_file_download(enum cnss_plat_ipc_qmi_client_id_v01
+ client_id, char *file_name, char *buf,
+ u32 *size)
{
struct cnss_plat_ipc_qmi_file_download_ind_msg_v01 ind;
struct cnss_plat_ipc_qmi_svc_ctx *svc = &plat_ipc_qmi_svc;
+ struct cnss_plat_ipc_qmi_client_ctx *qmi_client;
int ret;
struct cnss_plat_ipc_file_data *fd;
- if (!svc->client_connected || !file_name || !buf)
+ if (client_id > CNSS_PLAT_IPC_MAX_QMI_CLIENTS) {
+ cnss_plat_ipc_err("Invalid Client ID: %d\n", client_id);
+ return -EINVAL;
+ }
+
+ qmi_client = &svc->qmi_client_ctx[client_id];
+
+ if (!qmi_client->client_connected || !file_name || !buf)
return -EINVAL;
fd = cnss_plat_ipc_init_file_data(file_name, buf, *size, 0);
@@ -392,7 +453,7 @@ int cnss_plat_ipc_qmi_file_download(char *file_name, char *buf, u32 *size)
ind.file_id = fd->id;
ret = qmi_send_indication
- (svc->svc_hdl, &svc->client_sq,
+ (svc->svc_hdl, &qmi_client->client_sq,
CNSS_PLAT_IPC_QMI_FILE_DOWNLOAD_IND_V01,
CNSS_PLAT_IPC_QMI_FILE_DOWNLOAD_IND_MSG_V01_MAX_MSG_LEN,
cnss_plat_ipc_qmi_file_download_ind_msg_v01_ei, &ind);
@@ -523,19 +584,9 @@ cnss_plat_ipc_qmi_init_setup_req_handler(struct qmi_handle *handle,
struct cnss_plat_ipc_qmi_init_setup_req_msg_v01 *req_msg;
struct cnss_plat_ipc_qmi_init_setup_resp_msg_v01 resp = {0};
struct cnss_plat_ipc_qmi_svc_ctx *svc = &plat_ipc_qmi_svc;
+ struct cnss_plat_ipc_daemon_config *cfg = &daemon_cfg;
int ret = 0;
- if (!svc->client_connected) {
- cnss_plat_ipc_info("CNSS Daemon Connected. QMI Socket Node: %d Port: %d\n",
- sq->sq_node, sq->sq_port);
- svc->client_sq = *sq;
- svc->client_connected = true;
- cnss_plat_ipc_qmi_update_clients();
- } else {
- cnss_plat_ipc_err("CNSS Daemon already connected. Invalid new client\n");
- return;
- }
-
req_msg =
(struct cnss_plat_ipc_qmi_init_setup_req_msg_v01 *)decoded_msg;
cnss_plat_ipc_dbg("MAC: %d HW_TRC: %d CAL: %d\n",
@@ -543,16 +594,69 @@ cnss_plat_ipc_qmi_init_setup_req_handler(struct qmi_handle *handle,
req_msg->qdss_hw_trace_override,
req_msg->cal_file_available_bitmask);
- svc->cfg.dms_mac_addr_supported = req_msg->dms_mac_addr_supported;
- svc->cfg.qdss_hw_trace_override = req_msg->qdss_hw_trace_override;
- svc->cfg.cal_file_available_bitmask =
- req_msg->cal_file_available_bitmask;
+ cfg->dms_mac_addr_supported = req_msg->dms_mac_addr_supported;
+ cfg->qdss_hw_trace_override = req_msg->qdss_hw_trace_override;
+ cfg->cal_file_available_bitmask = req_msg->cal_file_available_bitmask;
+
+ ret = qmi_send_response
+ (svc->svc_hdl, sq, txn,
+ CNSS_PLAT_IPC_QMI_INIT_SETUP_RESP_V01,
+ CNSS_PLAT_IPC_QMI_INIT_SETUP_RESP_MSG_V01_MAX_MSG_LEN,
+ cnss_plat_ipc_qmi_init_setup_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ cnss_plat_ipc_err("%s: QMI failed: %d\n", __func__, ret);
+}
+
+/**
+ * cnss_plat_ipc_qmi_reg_client_req_handler() - Register QMI client
+ * @handle: Pointer to QMI handle
+ * @sq: QMI socket
+ * @txn: QMI transaction pointer
+ * @decoded_msg: Pointer to decoded QMI message
+ *
+ * Handles the userspace QMI client registration.
+ *
+ * Return: None
+ */
+static void
+cnss_plat_ipc_qmi_reg_client_req_handler(struct qmi_handle *handle,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *decoded_msg)
+{
+ struct cnss_plat_ipc_qmi_reg_client_req_msg_v01 *req_msg;
+ struct cnss_plat_ipc_qmi_reg_client_resp_msg_v01 resp = {0};
+ struct cnss_plat_ipc_qmi_svc_ctx *svc = &plat_ipc_qmi_svc;
+ struct cnss_plat_ipc_qmi_client_ctx *qmi_client = svc->qmi_client_ctx;
+ int ret = 0;
+
+ req_msg =
+ (struct cnss_plat_ipc_qmi_reg_client_req_msg_v01 *)decoded_msg;
+
+ if (req_msg->client_id_valid) {
+ if (req_msg->client_id <= CNSS_PLAT_IPC_MAX_QMI_CLIENTS &&
+ !qmi_client[req_msg->client_id].client_connected) {
+ cnss_plat_ipc_info
+ ("%s: QMI Client Connected. QMI Socket Node: %d Port: %d ID: %d\n",
+ __func__, sq->sq_node, sq->sq_port,
+ req_msg->client_id);
+ qmi_client[req_msg->client_id].client_sq = *sq;
+ qmi_client[req_msg->client_id].client_connected = true;
+ cnss_plat_ipc_qmi_update_user
+ ((enum cnss_plat_ipc_qmi_client_id_v01)
+ req_msg->client_id);
+ } else {
+ cnss_plat_ipc_err("QMI client already connected or Invalid client id\n");
+ return;
+ }
+ }
ret = qmi_send_response
- (svc->svc_hdl, sq, txn,
- CNSS_PLAT_IPC_QMI_INIT_SETUP_RESP_V01,
- CNSS_PLAT_IPC_QMI_INIT_SETUP_RESP_MSG_V01_MAX_MSG_LEN,
- cnss_plat_ipc_qmi_init_setup_resp_msg_v01_ei, &resp);
+ (svc->svc_hdl, sq, txn,
+ CNSS_PLAT_IPC_QMI_REG_CLIENT_RESP_V01,
+ CNSS_PLAT_IPC_QMI_REG_CLIENT_RESP_MSG_V01_MAX_MSG_LEN,
+ cnss_plat_ipc_qmi_reg_client_resp_msg_v01_ei, &resp);
+
if (ret < 0)
cnss_plat_ipc_err("QMI failed: %d\n", ret);
}
@@ -571,31 +675,38 @@ static void cnss_plat_ipc_qmi_disconnect_cb(struct qmi_handle *handle,
unsigned int port)
{
struct cnss_plat_ipc_qmi_svc_ctx *svc = &plat_ipc_qmi_svc;
+ struct cnss_plat_ipc_qmi_client_ctx *qmi_client =
+ svc->qmi_client_ctx;
struct cnss_plat_ipc_file_data *fd;
u32 file_id;
+ int i;
if (svc->svc_hdl != handle) {
cnss_plat_ipc_err("Invalid QMI Handle\n");
return;
}
- if (svc->client_connected && svc->client_sq.sq_node == node &&
- svc->client_sq.sq_port == port) {
- cnss_plat_ipc_err("CNSS Daemon disconnected. QMI Socket Node:%d Port:%d\n",
- node, port);
- svc->client_sq.sq_node = 0;
- svc->client_sq.sq_port = 0;
- svc->client_sq.sq_family = 0;
- svc->client_connected = false;
-
- /* Daemon killed. Fail any download / upload in progress. This
- * will also free stale fd
- */
- mutex_lock(&svc->file_idr_lock);
- idr_for_each_entry(&svc->file_idr, fd, file_id)
- complete(&fd->complete);
- mutex_unlock(&svc->file_idr_lock);
- cnss_plat_ipc_qmi_update_clients();
+ for (i = 0; i <= CNSS_PLAT_IPC_MAX_QMI_CLIENTS; i++) {
+ if (qmi_client[i].client_connected &&
+ qmi_client[i].client_sq.sq_node == node &&
+ qmi_client[i].client_sq.sq_port == port) {
+ cnss_plat_ipc_err
+ ("%s: QMI client disconnect. QMI Socket Node:%d Port:%d ID: %d\n",
+ __func__, node, port, i);
+ qmi_client[i].client_sq.sq_node = 0;
+ qmi_client[i].client_sq.sq_port = 0;
+ qmi_client[i].client_sq.sq_family = 0;
+ qmi_client[i].client_connected = false;
+
+ /* Daemon killed. Fail any download / upload in progress. This
+ * will also free stale fd
+ */
+ mutex_lock(&svc->file_idr_lock);
+ idr_for_each_entry(&svc->file_idr, fd, file_id)
+ complete(&fd->complete);
+ mutex_unlock(&svc->file_idr_lock);
+ cnss_plat_ipc_qmi_update_user(i);
+ }
}
}
@@ -611,8 +722,15 @@ static void cnss_plat_ipc_qmi_bye_cb(struct qmi_handle *handle,
unsigned int node)
{
struct cnss_plat_ipc_qmi_svc_ctx *svc = &plat_ipc_qmi_svc;
+ struct cnss_plat_ipc_qmi_client_ctx *qmi_client =
+ svc->qmi_client_ctx;
+ int i;
- cnss_plat_ipc_qmi_disconnect_cb(handle, node, svc->client_sq.sq_port);
+ for (i = 0; i <= CNSS_PLAT_IPC_MAX_QMI_CLIENTS; i++) {
+ cnss_plat_ipc_qmi_disconnect_cb
+ (handle, node,
+ qmi_client[i].client_sq.sq_port);
+ }
}
static struct qmi_ops cnss_plat_ipc_qmi_ops = {
@@ -624,10 +742,18 @@ static struct qmi_ops cnss_plat_ipc_qmi_ops = {
static struct qmi_msg_handler cnss_plat_ipc_qmi_req_handlers[] = {
{
.type = QMI_REQUEST,
+ .msg_id = CNSS_PLAT_IPC_QMI_REG_CLIENT_REQ_V01,
+ .ei = cnss_plat_ipc_qmi_reg_client_req_msg_v01_ei,
+ .decoded_size =
+ sizeof(struct cnss_plat_ipc_qmi_reg_client_req_msg_v01),
+ .fn = cnss_plat_ipc_qmi_reg_client_req_handler,
+ },
+ {
+ .type = QMI_REQUEST,
.msg_id = CNSS_PLAT_IPC_QMI_INIT_SETUP_REQ_V01,
.ei = cnss_plat_ipc_qmi_init_setup_req_msg_v01_ei,
.decoded_size =
- CNSS_PLAT_IPC_QMI_INIT_SETUP_REQ_MSG_V01_MAX_MSG_LEN,
+ sizeof(struct cnss_plat_ipc_qmi_init_setup_req_msg_v01),
.fn = cnss_plat_ipc_qmi_init_setup_req_handler,
},
{
@@ -635,7 +761,7 @@ static struct qmi_msg_handler cnss_plat_ipc_qmi_req_handlers[] = {
.msg_id = CNSS_PLAT_IPC_QMI_FILE_DOWNLOAD_REQ_V01,
.ei = cnss_plat_ipc_qmi_file_download_req_msg_v01_ei,
.decoded_size =
- CNSS_PLAT_IPC_QMI_FILE_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN,
+ sizeof(struct cnss_plat_ipc_qmi_file_download_req_msg_v01),
.fn = cnss_plat_ipc_qmi_file_download_req_handler,
},
{
@@ -643,68 +769,95 @@ static struct qmi_msg_handler cnss_plat_ipc_qmi_req_handlers[] = {
.msg_id = CNSS_PLAT_IPC_QMI_FILE_UPLOAD_REQ_V01,
.ei = cnss_plat_ipc_qmi_file_upload_req_msg_v01_ei,
.decoded_size =
- CNSS_PLAT_IPC_QMI_FILE_UPLOAD_REQ_MSG_V01_MAX_MSG_LEN,
+ sizeof(struct cnss_plat_ipc_qmi_file_upload_req_msg_v01),
.fn = cnss_plat_ipc_qmi_file_upload_req_handler,
- }
+ },
+ {}
};
/**
- * cnss_plat_ipc_qmi_user_config() - Get User space config for CNSS platform
+ * cnss_plat_ipc_qmi_daemon_config() - Get daemon config for CNSS platform
*
- * Return: Pointer to user space client config
+ * Return: Pointer to daemon client config
*/
-struct cnss_plat_ipc_user_config *cnss_plat_ipc_qmi_user_config(void)
+struct cnss_plat_ipc_daemon_config *cnss_plat_ipc_qmi_daemon_config(void)
{
struct cnss_plat_ipc_qmi_svc_ctx *svc = &plat_ipc_qmi_svc;
+ struct cnss_plat_ipc_qmi_client_ctx *qmi_client =
+ &svc->qmi_client_ctx[CNSS_PLAT_IPC_DAEMON_QMI_CLIENT_V01];
- if (!svc->client_connected)
+ if (!qmi_client->client_connected)
return NULL;
- return &svc->cfg;
+ return &daemon_cfg;
}
-EXPORT_SYMBOL(cnss_plat_ipc_qmi_user_config);
+EXPORT_SYMBOL(cnss_plat_ipc_qmi_daemon_config);
/**
* cnss_plat_ipc_register() - Register for QMI IPC client status update
+ * @client_id: User space QMI IPC client ID. Also works as
+ * array index for QMI client context
* @connect_update_cb: Function pointer for callback
- * @cb_cbt: Callback context
+ * @cb_ctx: Callback context
*
* Return: 0 on success, negative error value otherwise
*/
-int cnss_plat_ipc_register(cnss_plat_ipc_connection_update
+int cnss_plat_ipc_register(enum cnss_plat_ipc_qmi_client_id_v01 client_id,
+ cnss_plat_ipc_connection_update
connection_update_cb, void *cb_ctx)
{
struct cnss_plat_ipc_qmi_svc_ctx *svc = &plat_ipc_qmi_svc;
+ struct cnss_plat_ipc_qmi_client_ctx *qmi_client;
+ int num_user;
- if (svc->num_user >= CNSS_PLAT_IPC_MAX_CLIENTS) {
+ if (client_id > CNSS_PLAT_IPC_MAX_QMI_CLIENTS) {
+ cnss_plat_ipc_err("Invalid Client ID: %d\n", client_id);
+ return -EINVAL;
+ }
+
+ qmi_client = &svc->qmi_client_ctx[client_id];
+ num_user = qmi_client->num_user;
+
+ if (num_user >= CNSS_PLAT_IPC_MAX_USER) {
cnss_plat_ipc_err("Max Service users reached\n");
return -EINVAL;
}
- svc->connection_update_cb[svc->num_user] = connection_update_cb;
- svc->cb_ctx[svc->num_user] = cb_ctx;
- svc->num_user++;
+ qmi_client->connection_update_cb[num_user] = connection_update_cb;
+ qmi_client->cb_ctx[num_user] = cb_ctx;
+ qmi_client->num_user++;
return 0;
}
EXPORT_SYMBOL(cnss_plat_ipc_register);
/**
- * cnss_plat_ipc_register() - Unregister QMI IPC client status callback
+ * cnss_plat_ipc_unregister() - Unregister QMI IPC client status callback
+ * @client_id: User space QMI IPC client ID. Also works as
+ * array index for QMI client context
* @cb_cbt: Callback context provided during registration
*
* Return: None
*/
-void cnss_plat_ipc_unregister(void *cb_ctx)
+void cnss_plat_ipc_unregister(enum cnss_plat_ipc_qmi_client_id_v01 client_id,
+ void *cb_ctx)
{
struct cnss_plat_ipc_qmi_svc_ctx *svc = &plat_ipc_qmi_svc;
+ struct cnss_plat_ipc_qmi_client_ctx *qmi_client;
int i;
- for (i = 0; i < svc->num_user; i++) {
- if (svc->cb_ctx[i] == cb_ctx) {
- svc->cb_ctx[i] = NULL;
- svc->connection_update_cb[i] = NULL;
- svc->num_user--;
+ if (client_id > CNSS_PLAT_IPC_MAX_QMI_CLIENTS) {
+ cnss_plat_ipc_err("Invalid Client ID: %d\n", client_id);
+ return;
+ }
+
+ qmi_client = &svc->qmi_client_ctx[client_id];
+
+ for (i = 0; i < qmi_client->num_user; i++) {
+ if (qmi_client->cb_ctx[i] == cb_ctx) {
+ qmi_client->cb_ctx[i] = NULL;
+ qmi_client->connection_update_cb[i] = NULL;
+ qmi_client->num_user--;
break;
}
}
@@ -789,21 +942,6 @@ static bool cnss_plat_ipc_is_valid_dt_node_found(void)
return false;
}
-void cnss_plat_ipc_logging_init(void)
-{
- cnss_plat_ipc_log_context = ipc_log_context_create(NUM_LOG_PAGES, "cnss_plat", 0);
- if (!cnss_plat_ipc_log_context)
- cnss_plat_ipc_err("Unable to create log context\n");
-}
-
-void cnss_plat_ipc_lgging_deinit(void)
-{
- if (cnss_plat_ipc_log_context) {
- ipc_log_context_destroy(cnss_plat_ipc_log_context);
- cnss_plat_ipc_log_context = NULL;
- }
-}
-
static DECLARE_WORK(cnss_plat_ipc_init_work, cnss_plat_ipc_init_fn);
static int __init cnss_plat_ipc_qmi_svc_init(void)
@@ -836,7 +974,7 @@ static void __exit cnss_plat_ipc_qmi_svc_exit(void)
idr_destroy(&svc->file_idr);
}
- cnss_plat_ipc_lgging_deinit();
+ cnss_plat_ipc_logging_deinit();
}
module_init(cnss_plat_ipc_qmi_svc_init);
diff --git a/cnss_utils/cnss_plat_ipc_qmi.h b/cnss_utils/cnss_plat_ipc_qmi.h
new file mode 100644
index 0000000..0584056
--- /dev/null
+++ b/cnss_utils/cnss_plat_ipc_qmi.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CNSS_PLAT_IPC_QMI_H
+#define _CNSS_PLAT_IPC_QMI_H
+
+#include "cnss_plat_ipc_service_v01.h"
+
+/* As the value of CNSS_PLAT_IPC_MAX_QMI_CLIENTS will keep changing
+ * addition of new QMI client, it cannot be kept in IDL as change in
+ * existing value can cause backward compatibily issue. Keep it here
+ * and update its value with new QMI client ID added in enum in IDL.
+ */
+#define CNSS_PLAT_IPC_MAX_QMI_CLIENTS CNSS_PLAT_IPC_DAEMON_QMI_CLIENT_V01
+
+/**
+ * cnss_plat_ipc_daemon_config: Config options provided by cnss-daemon
+ * @dms_mac_addr_supported: DMS MAC address provisioning support
+ * @qdss_hw_trace_override: QDSS config for HW trace enable
+ * @cal_file_available_bitmask: Calibration file available
+ */
+struct cnss_plat_ipc_daemon_config {
+ u8 dms_mac_addr_supported;
+ u8 qdss_hw_trace_override;
+ u32 cal_file_available_bitmask;
+};
+
+typedef void (*cnss_plat_ipc_connection_update)(void *cb_ctx,
+ bool connection_status);
+
+/**
+ * Persistent caldb file store which is a runtime FW param based feature will
+ * fail if CONFIG_CNSS_PLAT_IPC_QMI_SVC is not enabled.
+ **/
+#if IS_ENABLED(CONFIG_CNSS_PLAT_IPC_QMI_SVC)
+int cnss_plat_ipc_register(enum cnss_plat_ipc_qmi_client_id_v01 client_id,
+ cnss_plat_ipc_connection_update
+ connection_update_cb, void *cb_ctx);
+void cnss_plat_ipc_unregister(enum cnss_plat_ipc_qmi_client_id_v01 client_id,
+ void *cb_ctx);
+int cnss_plat_ipc_qmi_file_download(enum cnss_plat_ipc_qmi_client_id_v01
+ client_id, char *file_name, char *buf,
+ u32 *size);
+int cnss_plat_ipc_qmi_file_upload(enum cnss_plat_ipc_qmi_client_id_v01
+ client_id, char *file_name, u8 *file_buf,
+ u32 file_size);
+struct cnss_plat_ipc_daemon_config *cnss_plat_ipc_qmi_daemon_config(void);
+#else
+static inline
+int cnss_plat_ipc_register(enum cnss_plat_ipc_qmi_client_id_v01 client_id,
+ cnss_plat_ipc_connection_update
+ connection_update_cb, void *cb_ctx)
+{
+ return 0;
+}
+
+static inline
+void cnss_plat_ipc_unregister(enum cnss_plat_ipc_qmi_client_id_v01 client_id,
+ void *cb_ctx)
+{
+}
+
+static inline
+int cnss_plat_ipc_qmi_file_download(enum cnss_plat_ipc_qmi_client_id_v01
+ client_id, char *file_name, char *buf,
+ u32 *size)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline
+int cnss_plat_ipc_qmi_file_upload(enum cnss_plat_ipc_qmi_client_id_v01
+ client_id, char *file_name, u8 *file_buf,
+ u32 file_size)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline
+struct cnss_plat_ipc_daemon_config *cnss_plat_ipc_qmi_daemon_config(void)
+{
+ return NULL;
+}
+
+#endif
+#endif
diff --git a/cnss_utils/cnss_plat_ipc_service_v01.c b/cnss_utils/cnss_plat_ipc_service_v01.c
index 664e117..51ef049 100644
--- a/cnss_utils/cnss_plat_ipc_service_v01.c
+++ b/cnss_utils/cnss_plat_ipc_service_v01.c
@@ -1,13 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2021, The Linux Foundation. All rights reserved. */
-#ifdef CONFIG_CNSS_OUT_OF_TREE
-#include "qmi/qmi.h"
-#else
-#include <linux/soc/qcom/qmi.h>
-#endif
-
#include "cnss_plat_ipc_service_v01.h"
+#include <linux/module.h>
struct qmi_elem_info cnss_plat_ipc_qmi_init_setup_req_msg_v01_ei[] = {
{
@@ -16,7 +11,8 @@ struct qmi_elem_info cnss_plat_ipc_qmi_init_setup_req_msg_v01_ei[] = {
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x01,
- .offset = offsetof(struct cnss_plat_ipc_qmi_init_setup_req_msg_v01,
+ .offset = offsetof(struct
+ cnss_plat_ipc_qmi_init_setup_req_msg_v01,
dms_mac_addr_supported),
},
{
@@ -25,7 +21,8 @@ struct qmi_elem_info cnss_plat_ipc_qmi_init_setup_req_msg_v01_ei[] = {
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
- .offset = offsetof(struct cnss_plat_ipc_qmi_init_setup_req_msg_v01,
+ .offset = offsetof(struct
+ cnss_plat_ipc_qmi_init_setup_req_msg_v01,
qdss_hw_trace_override),
},
{
@@ -34,7 +31,8 @@ struct qmi_elem_info cnss_plat_ipc_qmi_init_setup_req_msg_v01_ei[] = {
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x03,
- .offset = offsetof(struct cnss_plat_ipc_qmi_init_setup_req_msg_v01,
+ .offset = offsetof(struct
+ cnss_plat_ipc_qmi_init_setup_req_msg_v01,
cal_file_available_bitmask),
},
{
@@ -43,6 +41,7 @@ struct qmi_elem_info cnss_plat_ipc_qmi_init_setup_req_msg_v01_ei[] = {
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
+EXPORT_SYMBOL(cnss_plat_ipc_qmi_init_setup_req_msg_v01_ei);
struct qmi_elem_info cnss_plat_ipc_qmi_init_setup_resp_msg_v01_ei[] = {
{
@@ -51,7 +50,8 @@ struct qmi_elem_info cnss_plat_ipc_qmi_init_setup_resp_msg_v01_ei[] = {
.elem_size = sizeof(struct qmi_response_type_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
- .offset = offsetof(struct cnss_plat_ipc_qmi_init_setup_resp_msg_v01,
+ .offset = offsetof(struct
+ cnss_plat_ipc_qmi_init_setup_resp_msg_v01,
resp),
.ei_array = qmi_response_type_v01_ei,
},
@@ -61,7 +61,8 @@ struct qmi_elem_info cnss_plat_ipc_qmi_init_setup_resp_msg_v01_ei[] = {
.elem_size = sizeof(u64),
.array_type = NO_ARRAY,
.tlv_type = 0x03,
- .offset = offsetof(struct cnss_plat_ipc_qmi_init_setup_resp_msg_v01,
+ .offset = offsetof(struct
+ cnss_plat_ipc_qmi_init_setup_resp_msg_v01,
drv_status),
},
{
@@ -70,6 +71,7 @@ struct qmi_elem_info cnss_plat_ipc_qmi_init_setup_resp_msg_v01_ei[] = {
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
+EXPORT_SYMBOL(cnss_plat_ipc_qmi_init_setup_resp_msg_v01_ei);
struct qmi_elem_info cnss_plat_ipc_qmi_file_download_ind_msg_v01_ei[] = {
{
@@ -78,7 +80,8 @@ struct qmi_elem_info cnss_plat_ipc_qmi_file_download_ind_msg_v01_ei[] = {
.elem_size = sizeof(char),
.array_type = NO_ARRAY,
.tlv_type = 0x01,
- .offset = offsetof(struct cnss_plat_ipc_qmi_file_download_ind_msg_v01,
+ .offset = offsetof(struct
+ cnss_plat_ipc_qmi_file_download_ind_msg_v01,
file_name),
},
{
@@ -87,7 +90,8 @@ struct qmi_elem_info cnss_plat_ipc_qmi_file_download_ind_msg_v01_ei[] = {
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
- .offset = offsetof(struct cnss_plat_ipc_qmi_file_download_ind_msg_v01,
+ .offset = offsetof(struct
+ cnss_plat_ipc_qmi_file_download_ind_msg_v01,
file_id),
},
{
@@ -96,6 +100,7 @@ struct qmi_elem_info cnss_plat_ipc_qmi_file_download_ind_msg_v01_ei[] = {
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
+EXPORT_SYMBOL(cnss_plat_ipc_qmi_file_download_ind_msg_v01_ei);
struct qmi_elem_info cnss_plat_ipc_qmi_file_download_req_msg_v01_ei[] = {
{
@@ -104,7 +109,8 @@ struct qmi_elem_info cnss_plat_ipc_qmi_file_download_req_msg_v01_ei[] = {
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x01,
- .offset = offsetof(struct cnss_plat_ipc_qmi_file_download_req_msg_v01,
+ .offset = offsetof(struct
+ cnss_plat_ipc_qmi_file_download_req_msg_v01,
file_id),
},
{
@@ -113,7 +119,8 @@ struct qmi_elem_info cnss_plat_ipc_qmi_file_download_req_msg_v01_ei[] = {
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
- .offset = offsetof(struct cnss_plat_ipc_qmi_file_download_req_msg_v01,
+ .offset = offsetof(struct
+ cnss_plat_ipc_qmi_file_download_req_msg_v01,
file_size),
},
{
@@ -122,7 +129,8 @@ struct qmi_elem_info cnss_plat_ipc_qmi_file_download_req_msg_v01_ei[] = {
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x03,
- .offset = offsetof(struct cnss_plat_ipc_qmi_file_download_req_msg_v01,
+ .offset = offsetof(struct
+ cnss_plat_ipc_qmi_file_download_req_msg_v01,
end),
},
{
@@ -131,7 +139,8 @@ struct qmi_elem_info cnss_plat_ipc_qmi_file_download_req_msg_v01_ei[] = {
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x04,
- .offset = offsetof(struct cnss_plat_ipc_qmi_file_download_req_msg_v01,
+ .offset = offsetof(struct
+ cnss_plat_ipc_qmi_file_download_req_msg_v01,
seg_index),
},
{
@@ -140,7 +149,8 @@ struct qmi_elem_info cnss_plat_ipc_qmi_file_download_req_msg_v01_ei[] = {
.elem_size = sizeof(u16),
.array_type = NO_ARRAY,
.tlv_type = 0x05,
- .offset = offsetof(struct cnss_plat_ipc_qmi_file_download_req_msg_v01,
+ .offset = offsetof(struct
+ cnss_plat_ipc_qmi_file_download_req_msg_v01,
seg_buf_len),
},
{
@@ -149,7 +159,8 @@ struct qmi_elem_info cnss_plat_ipc_qmi_file_download_req_msg_v01_ei[] = {
.elem_size = sizeof(u8),
.array_type = VAR_LEN_ARRAY,
.tlv_type = 0x05,
- .offset = offsetof(struct cnss_plat_ipc_qmi_file_download_req_msg_v01,
+ .offset = offsetof(struct
+ cnss_plat_ipc_qmi_file_download_req_msg_v01,
seg_buf),
},
{
@@ -158,6 +169,7 @@ struct qmi_elem_info cnss_plat_ipc_qmi_file_download_req_msg_v01_ei[] = {
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
+EXPORT_SYMBOL(cnss_plat_ipc_qmi_file_download_req_msg_v01_ei);
struct qmi_elem_info cnss_plat_ipc_qmi_file_download_resp_msg_v01_ei[] = {
{
@@ -166,7 +178,8 @@ struct qmi_elem_info cnss_plat_ipc_qmi_file_download_resp_msg_v01_ei[] = {
.elem_size = sizeof(struct qmi_response_type_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
- .offset = offsetof(struct cnss_plat_ipc_qmi_file_download_resp_msg_v01,
+ .offset = offsetof(struct
+ cnss_plat_ipc_qmi_file_download_resp_msg_v01,
resp),
.ei_array = qmi_response_type_v01_ei,
},
@@ -176,7 +189,8 @@ struct qmi_elem_info cnss_plat_ipc_qmi_file_download_resp_msg_v01_ei[] = {
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x03,
- .offset = offsetof(struct cnss_plat_ipc_qmi_file_download_resp_msg_v01,
+ .offset = offsetof(struct
+ cnss_plat_ipc_qmi_file_download_resp_msg_v01,
file_id),
},
{
@@ -185,7 +199,8 @@ struct qmi_elem_info cnss_plat_ipc_qmi_file_download_resp_msg_v01_ei[] = {
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x04,
- .offset = offsetof(struct cnss_plat_ipc_qmi_file_download_resp_msg_v01,
+ .offset = offsetof(struct
+ cnss_plat_ipc_qmi_file_download_resp_msg_v01,
seg_index),
},
{
@@ -194,6 +209,7 @@ struct qmi_elem_info cnss_plat_ipc_qmi_file_download_resp_msg_v01_ei[] = {
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
+EXPORT_SYMBOL(cnss_plat_ipc_qmi_file_download_resp_msg_v01_ei);
struct qmi_elem_info cnss_plat_ipc_qmi_file_upload_ind_msg_v01_ei[] = {
{
@@ -202,7 +218,8 @@ struct qmi_elem_info cnss_plat_ipc_qmi_file_upload_ind_msg_v01_ei[] = {
.elem_size = sizeof(char),
.array_type = NO_ARRAY,
.tlv_type = 0x01,
- .offset = offsetof(struct cnss_plat_ipc_qmi_file_upload_ind_msg_v01,
+ .offset = offsetof(struct
+ cnss_plat_ipc_qmi_file_upload_ind_msg_v01,
file_name),
},
{
@@ -211,7 +228,8 @@ struct qmi_elem_info cnss_plat_ipc_qmi_file_upload_ind_msg_v01_ei[] = {
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
- .offset = offsetof(struct cnss_plat_ipc_qmi_file_upload_ind_msg_v01,
+ .offset = offsetof(struct
+ cnss_plat_ipc_qmi_file_upload_ind_msg_v01,
file_id),
},
{
@@ -220,7 +238,8 @@ struct qmi_elem_info cnss_plat_ipc_qmi_file_upload_ind_msg_v01_ei[] = {
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x03,
- .offset = offsetof(struct cnss_plat_ipc_qmi_file_upload_ind_msg_v01,
+ .offset = offsetof(struct
+ cnss_plat_ipc_qmi_file_upload_ind_msg_v01,
file_size),
},
{
@@ -229,6 +248,7 @@ struct qmi_elem_info cnss_plat_ipc_qmi_file_upload_ind_msg_v01_ei[] = {
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
+EXPORT_SYMBOL(cnss_plat_ipc_qmi_file_upload_ind_msg_v01_ei);
struct qmi_elem_info cnss_plat_ipc_qmi_file_upload_req_msg_v01_ei[] = {
{
@@ -237,7 +257,8 @@ struct qmi_elem_info cnss_plat_ipc_qmi_file_upload_req_msg_v01_ei[] = {
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x01,
- .offset = offsetof(struct cnss_plat_ipc_qmi_file_upload_req_msg_v01,
+ .offset = offsetof(struct
+ cnss_plat_ipc_qmi_file_upload_req_msg_v01,
file_id),
},
{
@@ -246,7 +267,8 @@ struct qmi_elem_info cnss_plat_ipc_qmi_file_upload_req_msg_v01_ei[] = {
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
- .offset = offsetof(struct cnss_plat_ipc_qmi_file_upload_req_msg_v01,
+ .offset = offsetof(struct
+ cnss_plat_ipc_qmi_file_upload_req_msg_v01,
seg_index),
},
{
@@ -255,6 +277,7 @@ struct qmi_elem_info cnss_plat_ipc_qmi_file_upload_req_msg_v01_ei[] = {
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
+EXPORT_SYMBOL(cnss_plat_ipc_qmi_file_upload_req_msg_v01_ei);
struct qmi_elem_info cnss_plat_ipc_qmi_file_upload_resp_msg_v01_ei[] = {
{
@@ -263,7 +286,8 @@ struct qmi_elem_info cnss_plat_ipc_qmi_file_upload_resp_msg_v01_ei[] = {
.elem_size = sizeof(struct qmi_response_type_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
- .offset = offsetof(struct cnss_plat_ipc_qmi_file_upload_resp_msg_v01,
+ .offset = offsetof(struct
+ cnss_plat_ipc_qmi_file_upload_resp_msg_v01,
resp),
.ei_array = qmi_response_type_v01_ei,
},
@@ -273,7 +297,8 @@ struct qmi_elem_info cnss_plat_ipc_qmi_file_upload_resp_msg_v01_ei[] = {
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x03,
- .offset = offsetof(struct cnss_plat_ipc_qmi_file_upload_resp_msg_v01,
+ .offset = offsetof(struct
+ cnss_plat_ipc_qmi_file_upload_resp_msg_v01,
file_id),
},
{
@@ -282,7 +307,8 @@ struct qmi_elem_info cnss_plat_ipc_qmi_file_upload_resp_msg_v01_ei[] = {
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x04,
- .offset = offsetof(struct cnss_plat_ipc_qmi_file_upload_resp_msg_v01,
+ .offset = offsetof(struct
+ cnss_plat_ipc_qmi_file_upload_resp_msg_v01,
end),
},
{
@@ -291,7 +317,8 @@ struct qmi_elem_info cnss_plat_ipc_qmi_file_upload_resp_msg_v01_ei[] = {
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x05,
- .offset = offsetof(struct cnss_plat_ipc_qmi_file_upload_resp_msg_v01,
+ .offset = offsetof(struct
+ cnss_plat_ipc_qmi_file_upload_resp_msg_v01,
seg_index),
},
{
@@ -300,7 +327,8 @@ struct qmi_elem_info cnss_plat_ipc_qmi_file_upload_resp_msg_v01_ei[] = {
.elem_size = sizeof(u16),
.array_type = NO_ARRAY,
.tlv_type = 0x06,
- .offset = offsetof(struct cnss_plat_ipc_qmi_file_upload_resp_msg_v01,
+ .offset = offsetof(struct
+ cnss_plat_ipc_qmi_file_upload_resp_msg_v01,
seg_buf_len),
},
{
@@ -309,7 +337,8 @@ struct qmi_elem_info cnss_plat_ipc_qmi_file_upload_resp_msg_v01_ei[] = {
.elem_size = sizeof(u8),
.array_type = VAR_LEN_ARRAY,
.tlv_type = 0x06,
- .offset = offsetof(struct cnss_plat_ipc_qmi_file_upload_resp_msg_v01,
+ .offset = offsetof(struct
+ cnss_plat_ipc_qmi_file_upload_resp_msg_v01,
seg_buf),
},
{
@@ -318,4 +347,56 @@ struct qmi_elem_info cnss_plat_ipc_qmi_file_upload_resp_msg_v01_ei[] = {
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
+EXPORT_SYMBOL(cnss_plat_ipc_qmi_file_upload_resp_msg_v01_ei);
+
+struct qmi_elem_info cnss_plat_ipc_qmi_reg_client_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ cnss_plat_ipc_qmi_reg_client_req_msg_v01,
+ client_id_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum cnss_plat_ipc_qmi_client_id_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ cnss_plat_ipc_qmi_reg_client_req_msg_v01,
+ client_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(cnss_plat_ipc_qmi_reg_client_req_msg_v01_ei);
+
+struct qmi_elem_info cnss_plat_ipc_qmi_reg_client_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ cnss_plat_ipc_qmi_reg_client_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(cnss_plat_ipc_qmi_reg_client_resp_msg_v01_ei);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("WLAN FW QMI service"); \ No newline at end of file
diff --git a/cnss_utils/cnss_plat_ipc_service_v01.h b/cnss_utils/cnss_plat_ipc_service_v01.h
index 6d19af1..d72d8c0 100644
--- a/cnss_utils/cnss_plat_ipc_service_v01.h
+++ b/cnss_utils/cnss_plat_ipc_service_v01.h
@@ -5,12 +5,20 @@
#ifndef CNSS_PLAT_IPC_SERVICE_V01_H
#define CNSS_PLAT_IPC_SERVICE_V01_H
+#ifdef CONFIG_CNSS_OUT_OF_TREE
+#include "qmi/qmi.h"
+#else
+#include <linux/soc/qcom/qmi.h>
+#endif
+
#define CNSS_PLATFORM_SERVICE_ID_V01 0x42E
#define CNSS_PLATFORM_SERVICE_VERS_V01 0x01
+#define CNSS_PLAT_IPC_QMI_REG_CLIENT_RESP_V01 0x0006
#define CNSS_PLAT_IPC_QMI_FILE_DOWNLOAD_REQ_V01 0x0003
#define CNSS_PLAT_IPC_QMI_FILE_UPLOAD_IND_V01 0x0004
#define CNSS_PLAT_IPC_QMI_FILE_DOWNLOAD_IND_V01 0x0002
+#define CNSS_PLAT_IPC_QMI_REG_CLIENT_REQ_V01 0x0006
#define CNSS_PLAT_IPC_QMI_INIT_SETUP_REQ_V01 0x0001
#define CNSS_PLAT_IPC_QMI_FILE_UPLOAD_REQ_V01 0x0005
#define CNSS_PLAT_IPC_QMI_FILE_DOWNLOAD_RESP_V01 0x0003
@@ -24,6 +32,13 @@
#define CNSS_PLAT_IPC_QMI_DRIVER_CBC_DONE_V01 ((u64)0x01ULL)
#define CNSS_PLAT_IPC_QMI_DRIVER_WLAN_ACTIVE_V01 ((u64)0x02ULL)
+enum cnss_plat_ipc_qmi_client_id_v01 {
+ CNSS_PLAT_IPC_QMI_CLIENT_ID_MIN_VAL_V01 = INT_MIN,
+ CNSS_PLAT_IPC_BT_QMI_CLIENT_V01 = 0,
+ CNSS_PLAT_IPC_DAEMON_QMI_CLIENT_V01 = 1,
+ CNSS_PLAT_IPC_QMI_CLIENT_ID_MAX_VAL_V01 = INT_MAX,
+};
+
struct cnss_plat_ipc_qmi_init_setup_req_msg_v01 {
u8 dms_mac_addr_supported;
u8 qdss_hw_trace_override;
@@ -99,4 +114,19 @@ struct cnss_plat_ipc_qmi_file_upload_resp_msg_v01 {
#define CNSS_PLAT_IPC_QMI_FILE_UPLOAD_RESP_MSG_V01_MAX_MSG_LEN 61470
extern struct qmi_elem_info cnss_plat_ipc_qmi_file_upload_resp_msg_v01_ei[];
+struct cnss_plat_ipc_qmi_reg_client_req_msg_v01 {
+ u8 client_id_valid;
+ enum cnss_plat_ipc_qmi_client_id_v01 client_id;
+};
+
+#define CNSS_PLAT_IPC_QMI_REG_CLIENT_REQ_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info cnss_plat_ipc_qmi_reg_client_req_msg_v01_ei[];
+
+struct cnss_plat_ipc_qmi_reg_client_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define CNSS_PLAT_IPC_QMI_REG_CLIENT_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info cnss_plat_ipc_qmi_reg_client_resp_msg_v01_ei[];
+
#endif
diff --git a/cnss_utils/wlan_firmware_service_v01.c b/cnss_utils/wlan_firmware_service_v01.c
index 93a6d59..740b1d8 100644
--- a/cnss_utils/wlan_firmware_service_v01.c
+++ b/cnss_utils/wlan_firmware_service_v01.c
@@ -1,5 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2015-2021, The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
#include "wlan_firmware_service_v01.h"
#include <linux/module.h>
@@ -1767,6 +1769,66 @@ struct qmi_elem_info wlfw_cap_resp_msg_v01_ei[] = {
.ei_array = wlfw_dev_mem_info_s_v01_ei,
},
{
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1D,
+ .offset = offsetof(struct
+ wlfw_cap_resp_msg_v01,
+ foundry_name_valid),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = QMI_WLFW_MAX_STR_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1D,
+ .offset = offsetof(struct
+ wlfw_cap_resp_msg_v01,
+ foundry_name),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1E,
+ .offset = offsetof(struct
+ wlfw_cap_resp_msg_v01,
+ hang_data_addr_offset_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1E,
+ .offset = offsetof(struct
+ wlfw_cap_resp_msg_v01,
+ hang_data_addr_offset),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1F,
+ .offset = offsetof(struct
+ wlfw_cap_resp_msg_v01,
+ hang_data_length_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1F,
+ .offset = offsetof(struct
+ wlfw_cap_resp_msg_v01,
+ hang_data_length),
+ },
+ {
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
@@ -5458,6 +5520,55 @@ struct qmi_elem_info wlfw_m3_dump_upload_segments_req_ind_msg_v01_ei[] = {
};
EXPORT_SYMBOL(wlfw_m3_dump_upload_segments_req_ind_msg_v01_ei);
+struct qmi_elem_info wlfw_subsys_restart_level_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_subsys_restart_level_req_msg_v01,
+ restart_level_type_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_subsys_restart_level_req_msg_v01,
+ restart_level_type),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_subsys_restart_level_req_msg_v01_ei);
+
+struct qmi_elem_info wlfw_subsys_restart_level_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_subsys_restart_level_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_subsys_restart_level_resp_msg_v01_ei);
+
/**
* wlfw_is_valid_dt_node_found - Check if valid device tree node present
*
diff --git a/cnss_utils/wlan_firmware_service_v01.h b/cnss_utils/wlan_firmware_service_v01.h
index 6e313c7..23a43a7 100644
--- a/cnss_utils/wlan_firmware_service_v01.h
+++ b/cnss_utils/wlan_firmware_service_v01.h
@@ -1,5 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (c) 2015-2021, The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
#ifndef WLAN_FIRMWARE_SERVICE_V01_H
#define WLAN_FIRMWARE_SERVICE_V01_H
@@ -13,6 +15,8 @@
#define WLFW_SERVICE_ID_V01 0x45
#define WLFW_SERVICE_VERS_V01 0x01
+#define QMI_WLFW_SUBSYS_RESTART_LEVEL_RESP_V01 0x0055
+#define QMI_WLFW_SUBSYS_RESTART_LEVEL_REQ_V01 0x0055
#define QMI_WLFW_POWER_SAVE_RESP_V01 0x0050
#define QMI_WLFW_CAP_REQ_V01 0x0024
#define QMI_WLFW_CAL_REPORT_REQ_V01 0x0026
@@ -170,6 +174,7 @@ enum wlfw_mem_type_enum_v01 {
QMI_WLFW_MEM_HANG_DATA_V01 = 7,
QMI_WLFW_MLO_GLOBAL_MEM_V01 = 8,
QMI_WLFW_PAGEABLE_MEM_V01 = 9,
+ QMI_WLFW_AFC_MEM_V01 = 10,
WLFW_MEM_TYPE_ENUM_MAX_VAL_V01 = INT_MAX,
};
@@ -252,6 +257,7 @@ enum cnss_feature_v01 {
CNSS_FEATURE_MIN_VAL_V01 = INT_MIN,
BOOTSTRAP_CLOCK_SELECT_V01 = 0,
CNSS_DRV_SUPPORT_V01 = 1,
+ CNSS_WLAN_EN_SUPPORT_V01 = 2,
CNSS_MAX_FEATURE_V01 = 64,
CNSS_FEATURE_MAX_VAL_V01 = INT_MAX,
};
@@ -534,8 +540,15 @@ struct wlfw_cap_resp_msg_v01 {
enum wlfw_rd_card_chain_cap_v01 rd_card_chain_cap;
u8 dev_mem_info_valid;
struct wlfw_dev_mem_info_s_v01 dev_mem_info[QMI_WLFW_MAX_DEV_MEM_NUM_V01];
+ u8 foundry_name_valid;
+ char foundry_name[QMI_WLFW_MAX_STR_LEN_V01 + 1];
+ u8 hang_data_addr_offset_valid;
+ u32 hang_data_addr_offset;
+ u8 hang_data_length_valid;
+ u16 hang_data_length;
};
-#define WLFW_CAP_RESP_MSG_V01_MAX_MSG_LEN 320
+
+#define WLFW_CAP_RESP_MSG_V01_MAX_MSG_LEN 351
extern struct qmi_elem_info wlfw_cap_resp_msg_v01_ei[];
struct wlfw_bdf_download_req_msg_v01 {
@@ -807,9 +820,11 @@ struct wlfw_host_cap_req_msg_v01 {
u8 num_wlan_vaps;
u8 wake_msi_addr_valid;
u32 wake_msi_addr;
+ u8 wlan_enable_delay_valid;
+ u32 wlan_enable_delay;
};
-#define WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN 389
+#define WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN 396
extern struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[];
struct wlfw_host_cap_resp_msg_v01 {
@@ -1272,4 +1287,19 @@ struct wlfw_m3_dump_upload_segments_req_ind_msg_v01 {
#define WLFW_M3_DUMP_UPLOAD_SEGMENTS_REQ_IND_MSG_V01_MAX_MSG_LEN 387
extern struct qmi_elem_info wlfw_m3_dump_upload_segments_req_ind_msg_v01_ei[];
+struct wlfw_subsys_restart_level_req_msg_v01 {
+ u8 restart_level_type_valid;
+ u8 restart_level_type;
+};
+
+#define WLFW_SUBSYS_RESTART_LEVEL_REQ_MSG_V01_MAX_MSG_LEN 4
+extern struct qmi_elem_info wlfw_subsys_restart_level_req_msg_v01_ei[];
+
+struct wlfw_subsys_restart_level_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_SUBSYS_RESTART_LEVEL_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_subsys_restart_level_resp_msg_v01_ei[];
+
#endif
diff --git a/inc/cnss_plat_ipc_qmi.h b/inc/cnss_plat_ipc_qmi.h
deleted file mode 100644
index c77c186..0000000
--- a/inc/cnss_plat_ipc_qmi.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only
- * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _CNSS_PLAT_IPC_QMI_H
-#define _CNSS_PLAT_IPC_QMI_H
-
-/**
- * struct cnss_plat_user_config: Config options provided by user space
- * @dms_mac_addr_supported: DMS MAC address provisioning support
- * @qdss_hw_trace_override: QDSS config for HW trace enable
- * @cal_file_available_bitmask: Calibration file available
- */
-struct cnss_plat_ipc_user_config {
- u8 dms_mac_addr_supported;
- u8 qdss_hw_trace_override;
- u32 cal_file_available_bitmask;
-};
-
-typedef void (*cnss_plat_ipc_connection_update)(void *cb_ctx,
- bool connection_status);
-
-int cnss_plat_ipc_register(cnss_plat_ipc_connection_update
- connection_update_cb, void *cb_ctx);
-void cnss_plat_ipc_unregister(void *cb_ctx);
-int cnss_plat_ipc_qmi_file_download(char *file_name, char *buf, u32 *size);
-int cnss_plat_ipc_qmi_file_upload(char *file_name, u8 *file_buf,
- u32 file_size);
-struct cnss_plat_ipc_user_config *cnss_plat_ipc_qmi_user_config(void);
-#endif
diff --git a/inc/mhi_misc.h b/inc/mhi_misc.h
index 75b25f1..3108ac6 100644
--- a/inc/mhi_misc.h
+++ b/inc/mhi_misc.h
@@ -268,6 +268,13 @@ int mhi_get_remote_time(struct mhi_device *mhi_dev,
u64 local_time,
u64 remote_time));
+/**
+ * mhi_force_reset - does host reset request to collect device side dumps
+ * for debugging purpose
+ * @mhi_cntrl: MHI controller
+ */
+int mhi_force_reset(struct mhi_controller *mhi_cntrl);
+
#else
/**
@@ -572,6 +579,16 @@ int mhi_get_remote_time(struct mhi_device *mhi_dev,
return -EPERM;
}
+/**
+ * mhi_force_reset - does host reset request to collect device side dumps
+ * for debugging purpose
+ * @mhi_cntrl: MHI controller
+ */
+int mhi_force_reset(struct mhi_controller *mhi_cntrl)
+{
+ return -EINVAL;
+}
+
#endif /* CONFIG_MHI_BUS_MISC */
#endif /* _MHI_MISC_H_ */
diff --git a/inc/qcom_ramdump.h b/inc/qcom_ramdump.h
index 9b56dc3..726e868 100644
--- a/inc/qcom_ramdump.h
+++ b/inc/qcom_ramdump.h
@@ -18,7 +18,7 @@ struct qcom_dump_segment {
};
#if IS_ENABLED(CONFIG_QCOM_RAMDUMP)
-extern int qcom_elf_dump(struct list_head *segs, struct device *dev);
+extern int qcom_elf_dump(struct list_head *segs, struct device *dev, unsigned char class);
extern int qcom_dump(struct list_head *head, struct device *dev);
extern int qcom_fw_elf_dump(struct firmware *fw, struct device *dev);
extern bool dump_enabled(void);
@@ -26,9 +26,10 @@ extern bool dump_enabled(void);
extern void cnss_register_sscd(void);
extern void cnss_unregister_sscd(void);
extern void sscd_release(struct device *dev);
+extern void sscd_set_coredump(void *buf, int buf_len);
#endif
#else
-static inline int qcom_elf_dump(struct list_head *segs, struct device *dev)
+static inline int qcom_elf_dump(struct list_head *segs, struct device *dev, unsigned char class)
{
return -ENODEV;
}
diff --git a/inc/qmi/qmi.h b/inc/qmi/qmi.h
index 7a60439..dccde32 100644
--- a/inc/qmi/qmi.h
+++ b/inc/qmi/qmi.h
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2014, 2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2017, Linaro Ltd.
*/
#ifndef __QMI_HELPERS_H__
@@ -91,8 +91,9 @@ struct qmi_elem_info {
#define QMI_ERR_INTERNAL_V01 3
#define QMI_ERR_CLIENT_IDS_EXHAUSTED_V01 5
#define QMI_ERR_INVALID_ID_V01 41
+#define QMI_ERR_NETWORK_NOT_READY_V01 53
#define QMI_ERR_ENCODING_V01 58
-#define QMI_ERR_DISABLED_V01 69
+#define QMI_ERR_DISABLED_V01 69
#define QMI_ERR_INCOMPATIBLE_STATE_V01 90
#define QMI_ERR_NOT_SUPPORTED_V01 94
@@ -162,7 +163,6 @@ struct qmi_ops {
* struct qmi_txn - transaction context
* @qmi: QMI handle this transaction is associated with
* @id: transaction id
- * @lock: for synchronization between handler and waiter of messages
* @completion: completion object as the transaction receives a response
* @result: result code for the completed transaction
* @ei: description of the QMI encoded response (optional)
@@ -173,7 +173,6 @@ struct qmi_txn {
u16 id;
- struct mutex lock;
struct completion completion;
int result;
@@ -272,5 +271,6 @@ int qmi_txn_init(struct qmi_handle *qmi, struct qmi_txn *txn,
struct qmi_elem_info *ei, void *c_struct);
int qmi_txn_wait(struct qmi_txn *txn, unsigned long timeout);
void qmi_txn_cancel(struct qmi_txn *txn);
+void qmi_set_sndtimeo(struct qmi_handle *qmi, long timeo);
#endif
diff --git a/mhi/core/boot.c b/mhi/core/boot.c
index b6d7623..a2d3526 100644
--- a/mhi/core/boot.c
+++ b/mhi/core/boot.c
@@ -60,17 +60,49 @@ void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
&mhi_buf->dma_addr, mhi_buf->len, sequence_id);
}
+/* check RDDM image is downloaded */
+int mhi_rddm_download_status(struct mhi_controller *mhi_cntrl)
+{
+ u32 rx_status;
+ enum mhi_ee_type ee;
+ const u32 delayus = 5000;
+ void __iomem *base = mhi_cntrl->bhie;
+ u32 retry = (mhi_cntrl->timeout_ms * 1000) / delayus;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int ret = 0;
+
+ while (retry--) {
+ ret = mhi_read_reg_field(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS,
+ BHIE_RXVECSTATUS_STATUS_BMSK,
+ BHIE_RXVECSTATUS_STATUS_SHFT,
+ &rx_status);
+ if (ret)
+ return -EIO;
+
+ if (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) {
+ MHI_LOG("RDDM dumps collected successfully");
+ return 0;
+ }
+
+ udelay(delayus);
+ }
+
+ ee = mhi_get_exec_env(mhi_cntrl);
+ ret = mhi_read_reg(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS, &rx_status);
+ MHI_ERR("ret: %d, RXVEC_STATUS: 0x%x, EE:%s\n", ret, rx_status,
+ TO_MHI_EXEC_STR(ee));
+
+ return -EIO;
+}
+
/* Collect RDDM buffer during kernel panic */
static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl)
{
int ret;
- u32 rx_status;
enum mhi_ee_type ee;
const u32 delayus = 2000;
- u32 retry = (mhi_cntrl->timeout_ms * 1000) / delayus;
const u32 rddm_timeout_us = 200000;
int rddm_retry = rddm_timeout_us / delayus;
- void __iomem *base = mhi_cntrl->bhie;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
MHI_VERB("Entered with pm_state:%s dev_state:%s ee:%s\n",
@@ -130,25 +162,12 @@ static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl)
"Waiting for RDDM image download via BHIe, current EE:%s\n",
TO_MHI_EXEC_STR(ee));
- while (retry--) {
- ret = mhi_read_reg_field(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS,
- BHIE_RXVECSTATUS_STATUS_BMSK,
- BHIE_RXVECSTATUS_STATUS_SHFT,
- &rx_status);
- if (ret)
- return -EIO;
-
- if (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL)
- return 0;
-
- udelay(delayus);
+ ret = mhi_rddm_download_status(mhi_cntrl);
+ if (!ret) {
+ MHI_LOG("RDDM dumps collected successfully");
+ return 0;
}
- ee = mhi_get_exec_env(mhi_cntrl);
- ret = mhi_read_reg(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS, &rx_status);
-
- MHI_ERR("RXVEC_STATUS: 0x%x\n", rx_status);
-
error_exit_rddm:
MHI_ERR("RDDM transfer failed. Current EE: %s\n",
TO_MHI_EXEC_STR(ee));
@@ -187,7 +206,7 @@ static int mhi_fw_load_bhie(struct mhi_controller *mhi_cntrl,
void __iomem *base = mhi_cntrl->bhie;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
- u32 tx_status, sequence_id, val;
+ u32 tx_status = 0, sequence_id = 0, val = 0;
int ret, rd;
read_lock_bh(pm_lock);
@@ -277,12 +296,8 @@ static int mhi_fw_load_bhi(struct mhi_controller *mhi_cntrl,
BHI_STATUS_MASK, BHI_STATUS_SHIFT,
&tx_status) || tx_status,
msecs_to_jiffies(mhi_cntrl->timeout_ms));
-
if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
- {
- MHI_ERR("Image transfer failed: MHI_PM_IN_ERROR_STATE\n");
goto invalid_pm_state;
- }
if (tx_status == BHI_STATUS_ERROR) {
MHI_ERR("Image transfer failed\n");
diff --git a/mhi/core/init.c b/mhi/core/init.c
index 417fc6e..2777084 100644
--- a/mhi/core/init.c
+++ b/mhi/core/init.c
@@ -797,6 +797,7 @@ static int parse_ch_cfg(struct mhi_controller *mhi_cntrl,
mhi_chan->offload_ch = ch_cfg->offload_channel;
mhi_chan->db_cfg.reset_req = ch_cfg->doorbell_mode_switch;
mhi_chan->pre_alloc = ch_cfg->auto_queue;
+ mhi_chan->wake_capable = ch_cfg->wake_capable;
/*
* If MHI host allocates buffers, then the channel direction
diff --git a/mhi/core/internal.h b/mhi/core/internal.h
index 293e4a8..b53c8e3 100644
--- a/mhi/core/internal.h
+++ b/mhi/core/internal.h
@@ -638,6 +638,7 @@ int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl);
int mhi_send_cmd(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
enum mhi_cmd_type cmd);
int mhi_download_amss_image(struct mhi_controller *mhi_cntrl);
+int mhi_rddm_download_status(struct mhi_controller *mhi_cntrl);
static inline bool mhi_is_active(struct mhi_controller *mhi_cntrl)
{
return (mhi_cntrl->dev_state >= MHI_STATE_M0 &&
diff --git a/mhi/core/main.c b/mhi/core/main.c
index ed3f8a4..acca77d 100644
--- a/mhi/core/main.c
+++ b/mhi/core/main.c
@@ -819,15 +819,17 @@ static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl,
}
chan = MHI_TRE_GET_CMD_CHID(cmd_pkt);
- if (chan >= mhi_cntrl->max_chan) {
- MHI_ERR("Invalid channel id: %u\n", chan);
- goto exit_cmd_completion;
+
+ if (chan < mhi_cntrl->max_chan &&
+ mhi_cntrl->mhi_chan[chan].configured) {
+ mhi_chan = &mhi_cntrl->mhi_chan[chan];
+ write_lock_bh(&mhi_chan->lock);
+ mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre);
+ complete(&mhi_chan->completion);
+ write_unlock_bh(&mhi_chan->lock);
+ } else {
+ MHI_ERR("Completion packet for invalid channel ID: %d\n", chan);
}
- mhi_chan = &mhi_cntrl->mhi_chan[chan];
- write_lock_bh(&mhi_chan->lock);
- mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre);
- complete(&mhi_chan->completion);
- write_unlock_bh(&mhi_chan->lock);
exit_cmd_completion:
mhi_del_ring_element(mhi_cntrl, mhi_ring);
@@ -1800,9 +1802,7 @@ static int mhi_update_transfer_state(struct mhi_device *mhi_dev,
enum mhi_ch_state_type to_state)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
- struct device *dev = &mhi_cntrl->mhi_dev->dev;
struct mhi_chan *mhi_chan;
- struct mhi_chan_ctxt *chan_ctxt;
int dir, ret;
for (dir = 0; dir < 2; dir++) {
@@ -1816,13 +1816,6 @@ static int mhi_update_transfer_state(struct mhi_device *mhi_dev,
* both upon failure
*/
mutex_lock(&mhi_chan->mutex);
- chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
- if (!(chan_ctxt->chcfg & CHAN_CTX_CHSTATE_MASK)) {
- mutex_unlock(&mhi_chan->mutex);
- MHI_ERR("Channel %s(%u) context not initialized\n",
- mhi_chan->name, mhi_chan->chan);
- return -EINVAL;
- }
ret = mhi_update_channel_state(mhi_cntrl, mhi_chan, to_state);
if (ret) {
mutex_unlock(&mhi_chan->mutex);
diff --git a/mhi/core/misc.c b/mhi/core/misc.c
index b002d57..f0c1f69 100644
--- a/mhi/core/misc.c
+++ b/mhi/core/misc.c
@@ -407,7 +407,19 @@ EXPORT_SYMBOL(mhi_device_configure);
void mhi_set_m2_timeout_ms(struct mhi_controller *mhi_cntrl, u32 timeout)
{
- struct mhi_private *mhi_priv = dev_get_drvdata(&mhi_cntrl->mhi_dev->dev);
+ struct mhi_device *mhi_dev;
+ struct mhi_private *mhi_priv;
+
+ if (!mhi_cntrl)
+ return;
+
+ mhi_dev = mhi_cntrl->mhi_dev;
+ if (!mhi_dev)
+ return;
+
+ mhi_priv = dev_get_drvdata(&mhi_dev->dev);
+ if (!mhi_priv)
+ return;
mhi_priv->m2_timeout_ms = timeout;
}
@@ -1689,3 +1701,18 @@ error_unlock:
return ret;
}
EXPORT_SYMBOL(mhi_get_remote_time);
+
+/* MHI host reset request*/
+int mhi_force_reset(struct mhi_controller *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+
+ MHI_VERB("Entered with pm_state:%s dev_state:%s ee:%s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state),
+ TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+ TO_MHI_EXEC_STR(mhi_cntrl->ee));
+
+ mhi_soc_reset(mhi_cntrl);
+ return mhi_rddm_download_status(mhi_cntrl);
+}
+EXPORT_SYMBOL(mhi_force_reset);
diff --git a/mhi/core/misc.h b/mhi/core/misc.h
index 507cead..4d038fb 100644
--- a/mhi/core/misc.h
+++ b/mhi/core/misc.h
@@ -13,8 +13,9 @@
#include <linux/mhi_misc.h>
#endif
+
#define MHI_FORCE_WAKE_DELAY_US (100)
-#define MHI_IPC_LOG_PAGES (100)
+#define MHI_IPC_LOG_PAGES (200)
#define MAX_RDDM_TABLE_SIZE (7)
#define MHI_REG_SIZE (SZ_4K)
@@ -79,7 +80,7 @@
struct mhi_private *mhi_priv = \
dev_get_drvdata(&mhi_cntrl->mhi_dev->dev); \
dev_dbg(dev, "[D][%s] " fmt, __func__, ##__VA_ARGS__); \
- if (mhi_priv->log_lvl <= MHI_MSG_LVL_VERBOSE) \
+ if (mhi_priv && mhi_priv->log_lvl <= MHI_MSG_LVL_VERBOSE) \
ipc_log_string(mhi_priv->log_buf, "[D][%s] " fmt, __func__, \
##__VA_ARGS__); \
} while (0)
@@ -88,7 +89,7 @@
struct mhi_private *mhi_priv = \
dev_get_drvdata(&mhi_cntrl->mhi_dev->dev); \
dev_dbg(dev, "[I][%s] " fmt, __func__, ##__VA_ARGS__); \
- if (mhi_priv->log_lvl <= MHI_MSG_LVL_INFO) \
+ if (mhi_priv && mhi_priv->log_lvl <= MHI_MSG_LVL_INFO) \
ipc_log_string(mhi_priv->log_buf, "[I][%s] " fmt, __func__, \
##__VA_ARGS__); \
} while (0)
@@ -97,7 +98,7 @@
struct mhi_private *mhi_priv = \
dev_get_drvdata(&mhi_cntrl->mhi_dev->dev); \
dev_err(dev, "[E][%s] " fmt, __func__, ##__VA_ARGS__); \
- if (mhi_priv->log_lvl <= MHI_MSG_LVL_ERROR) \
+ if (mhi_priv && mhi_priv->log_lvl <= MHI_MSG_LVL_ERROR) \
ipc_log_string(mhi_priv->log_buf, "[E][%s] " fmt, __func__, \
##__VA_ARGS__); \
} while (0)
@@ -106,7 +107,7 @@
struct mhi_private *mhi_priv = \
dev_get_drvdata(&mhi_cntrl->mhi_dev->dev); \
dev_crit(dev, "[C][%s] " fmt, __func__, ##__VA_ARGS__); \
- if (mhi_priv->log_lvl <= MHI_MSG_LVL_CRITICAL) \
+ if (mhi_priv && mhi_priv->log_lvl <= MHI_MSG_LVL_CRITICAL) \
ipc_log_string(mhi_priv->log_buf, "[C][%s] " fmt, __func__, \
##__VA_ARGS__); \
} while (0)
diff --git a/mhi/core/pm.c b/mhi/core/pm.c
index ff04d8d..bb9628b 100644
--- a/mhi/core/pm.c
+++ b/mhi/core/pm.c
@@ -317,7 +317,8 @@ int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl)
read_lock_irq(&mhi_chan->lock);
/* Only ring DB if ring is not empty */
- if (tre_ring->base && tre_ring->wp != tre_ring->rp)
+ if (tre_ring->base && tre_ring->wp != tre_ring->rp &&
+ mhi_chan->ch_state == MHI_CH_STATE_ENABLED)
mhi_ring_chan_db(mhi_cntrl, mhi_chan);
read_unlock_irq(&mhi_chan->lock);
}
@@ -797,6 +798,28 @@ void mhi_pm_st_worker(struct work_struct *work)
}
}
+static bool mhi_in_rddm(struct mhi_controller *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+
+ if (mhi_cntrl->rddm_image && mhi_get_exec_env(mhi_cntrl) == MHI_EE_RDDM
+ && mhi_is_active(mhi_cntrl)) {
+ mhi_cntrl->ee = MHI_EE_RDDM;
+
+ MHI_ERR("RDDM event occurred!\n");
+
+ /* notify critical clients with early notifications */
+ mhi_report_error(mhi_cntrl);
+
+ mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
+ wake_up_all(&mhi_cntrl->state_event);
+
+ return true;
+ }
+
+ return false;
+}
+
int mhi_pm_suspend(struct mhi_controller *mhi_cntrl)
{
struct mhi_chan *itr, *tmp;
@@ -909,6 +932,9 @@ int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
if (mhi_cntrl->pm_state != MHI_PM_M3)
panic("mhi_pm_state != M3");
+ if (mhi_in_rddm(mhi_cntrl))
+ return 0;
+
/* Notify clients about exiting LPM */
list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
mutex_lock(&itr->mutex);
@@ -939,6 +965,8 @@ int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
msecs_to_jiffies(mhi_cntrl->timeout_ms));
if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ if (mhi_in_rddm(mhi_cntrl))
+ return 0;
MHI_ERR(
"Did not enter M0 state, MHI state: %s, PM state: %s\n",
TO_MHI_STATE_STR(mhi_cntrl->dev_state),
diff --git a/qmi/qmi_encdec.c b/qmi/qmi_encdec.c
index 1d9f3f5..2b0ef97 100644
--- a/qmi/qmi_encdec.c
+++ b/qmi/qmi_encdec.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2015, 2019 The Linux Foundation. All rights reserved.
* Copyright (C) 2017 Linaro Ltd.
*/
#include <linux/slab.h>
@@ -538,8 +538,8 @@ static int qmi_decode_string_elem(struct qmi_elem_info *ei_array,
decoded_bytes += rc;
}
- if (string_len > temp_ei->elem_len) {
- pr_err("%s: String len %d > Max Len %d\n",
+ if (string_len >= temp_ei->elem_len) {
+ pr_err("%s: String len %d >= Max Len %d\n",
__func__, string_len, temp_ei->elem_len);
return -ETOOSMALL;
} else if (string_len > tlv_len) {
diff --git a/qmi/qmi_interface.c b/qmi/qmi_interface.c
index 5a6c07b..3814565 100644
--- a/qmi/qmi_interface.c
+++ b/qmi/qmi_interface.c
@@ -16,6 +16,7 @@
#include <linux/string.h>
#include <net/sock.h>
#include <linux/workqueue.h>
+#include <linux/rcupdate.h>
#ifdef CONFIG_CNSS_OUT_OF_TREE
#include "qmi/qmi.h"
#else
@@ -319,7 +320,6 @@ int qmi_txn_init(struct qmi_handle *qmi, struct qmi_txn *txn,
memset(txn, 0, sizeof(*txn));
- mutex_init(&txn->lock);
init_completion(&txn->completion);
txn->qmi = qmi;
txn->ei = ei;
@@ -355,10 +355,12 @@ int qmi_txn_wait(struct qmi_txn *txn, unsigned long timeout)
ret = wait_for_completion_timeout(&txn->completion, timeout);
+ if (txn->result == -ENETRESET) {
+ return txn->result;
+ }
+
mutex_lock(&qmi->txn_lock);
- mutex_lock(&txn->lock);
idr_remove(&qmi->txns, txn->id);
- mutex_unlock(&txn->lock);
mutex_unlock(&qmi->txn_lock);
if (ret == 0)
@@ -377,9 +379,7 @@ void qmi_txn_cancel(struct qmi_txn *txn)
struct qmi_handle *qmi = txn->qmi;
mutex_lock(&qmi->txn_lock);
- mutex_lock(&txn->lock);
idr_remove(&qmi->txns, txn->id);
- mutex_unlock(&txn->lock);
mutex_unlock(&qmi->txn_lock);
}
EXPORT_SYMBOL(qmi_txn_cancel);
@@ -449,24 +449,28 @@ static void qmi_handle_net_reset(struct qmi_handle *qmi)
struct sockaddr_qrtr sq;
struct qmi_service *svc;
struct socket *sock;
+ long timeo = qmi->sock->sk->sk_sndtimeo;
sock = qmi_sock_create(qmi, &sq);
if (IS_ERR(sock))
return;
- mutex_lock(&qmi->sock_lock);
- sock_release(qmi->sock);
- qmi->sock = NULL;
- mutex_unlock(&qmi->sock_lock);
-
qmi_recv_del_server(qmi, -1, -1);
if (qmi->ops.net_reset)
qmi->ops.net_reset(qmi);
mutex_lock(&qmi->sock_lock);
+ /* Already qmi_handle_release() started */
+ if (!qmi->sock) {
+ sock_release(sock);
+ mutex_unlock(&qmi->sock_lock);
+ return;
+ }
+ sock_release(qmi->sock);
qmi->sock = sock;
qmi->sq = sq;
+ qmi->sock->sk->sk_sndtimeo = timeo;
mutex_unlock(&qmi->sock_lock);
list_for_each_entry(svc, &qmi->lookups, list_node)
@@ -485,6 +489,9 @@ static void qmi_handle_message(struct qmi_handle *qmi,
struct qmi_txn *txn = NULL;
int ret;
+ if (!len)
+ return;
+
if (len < sizeof(*hdr)) {
pr_err("ignoring short QMI packet\n");
return;
@@ -502,10 +509,6 @@ static void qmi_handle_message(struct qmi_handle *qmi,
mutex_unlock(&qmi->txn_lock);
return;
}
-
- mutex_lock(&txn->lock);
- mutex_unlock(&qmi->txn_lock);
-
if (txn->dest && txn->ei) {
ret = qmi_decode_message(buf, len, txn->ei, txn->dest);
if (ret < 0)
@@ -513,11 +516,10 @@ static void qmi_handle_message(struct qmi_handle *qmi,
txn->result = ret;
complete(&txn->completion);
- } else {
+ } else {
qmi_invoke_handler(qmi, sq, txn, buf, len);
}
-
- mutex_unlock(&txn->lock);
+ mutex_unlock(&qmi->txn_lock);
} else {
/* Create a txn based on the txn_id of the incoming message */
memset(&tmp_txn, 0, sizeof(tmp_txn));
@@ -575,16 +577,17 @@ static void qmi_data_ready_work(struct work_struct *work)
static void qmi_data_ready(struct sock *sk)
{
- struct qmi_handle *qmi = sk->sk_user_data;
+ struct qmi_handle *qmi = NULL;
/*
* This will be NULL if we receive data while being in
* qmi_handle_release()
*/
- if (!qmi)
- return;
-
- queue_work(qmi->wq, &qmi->work);
+ rcu_read_lock();
+ qmi = rcu_dereference_sk_user_data(sk);
+ if (qmi)
+ queue_work(qmi->wq, &qmi->work);
+ rcu_read_unlock();
}
static struct socket *qmi_sock_create(struct qmi_handle *qmi,
@@ -604,14 +607,30 @@ static struct socket *qmi_sock_create(struct qmi_handle *qmi,
return ERR_PTR(ret);
}
- sock->sk->sk_user_data = qmi;
+ rcu_assign_sk_user_data(sock->sk, qmi);
sock->sk->sk_data_ready = qmi_data_ready;
sock->sk->sk_error_report = qmi_data_ready;
+ sock->sk->sk_sndtimeo = HZ * 10;
return sock;
}
/**
+ * qmi_set_sndtimeo() - set the sk_sndtimeo of the qmi handle
+ * @qmi: QMI client handle
+ * @timeo: timeout in jiffies.
+ *
+ * This sets the timeout for the blocking socket send in qmi send.
+ */
+void qmi_set_sndtimeo(struct qmi_handle *qmi, long timeo)
+{
+ mutex_lock(&qmi->sock_lock);
+ qmi->sock->sk->sk_sndtimeo = timeo;
+ mutex_unlock(&qmi->sock_lock);
+}
+EXPORT_SYMBOL(qmi_set_sndtimeo);
+
+/**
* qmi_handle_init() - initialize a QMI client handle
* @qmi: QMI handle to initialize
* @recv_buf_size: maximum size of incoming message
@@ -691,21 +710,32 @@ EXPORT_SYMBOL(qmi_handle_init);
*/
void qmi_handle_release(struct qmi_handle *qmi)
{
- struct socket *sock = qmi->sock;
+ struct socket *sock;
struct qmi_service *svc, *tmp;
-
- sock->sk->sk_user_data = NULL;
- cancel_work_sync(&qmi->work);
-
- qmi_recv_del_server(qmi, -1, -1);
+ struct qmi_txn *txn;
+ int txn_id;
mutex_lock(&qmi->sock_lock);
+ sock = qmi->sock;
+ rcu_assign_sk_user_data(sock->sk, NULL);
+ synchronize_rcu();
sock_release(sock);
qmi->sock = NULL;
mutex_unlock(&qmi->sock_lock);
+ cancel_work_sync(&qmi->work);
+
+ qmi_recv_del_server(qmi, -1, -1);
+
destroy_workqueue(qmi->wq);
+ mutex_lock(&qmi->txn_lock);
+ idr_for_each_entry(&qmi->txns, txn, txn_id) {
+ idr_remove(&qmi->txns, txn->id);
+ txn->result = -ENETRESET;
+ complete(&txn->completion);
+ }
+ mutex_unlock(&qmi->txn_lock);
idr_destroy(&qmi->txns);
kfree(qmi->recv_buf);
@@ -855,3 +885,5 @@ ssize_t qmi_send_indication(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
return rval;
}
EXPORT_SYMBOL(qmi_send_indication);
+
+MODULE_SOFTDEP("pre: qrtr");
diff --git a/qrtr/mhi.c b/qrtr/mhi.c
index fdffbd8..b097fac 100644
--- a/qrtr/mhi.c
+++ b/qrtr/mhi.c
@@ -122,10 +122,9 @@ static int qcom_mhi_qrtr_probe(struct mhi_device *mhi_dev,
int rc;
qdev = devm_kzalloc(&mhi_dev->dev, sizeof(*qdev), GFP_KERNEL);
- if (!qdev){
- dev_dbg(&mhi_dev->dev, "qdev alloc failure\n");
+ if (!qdev)
return -ENOMEM;
- }
+
qdev->mhi_dev = mhi_dev;
qdev->dev = &mhi_dev->dev;
qdev->ep.xmit = qcom_mhi_qrtr_send;
@@ -134,17 +133,14 @@ static int qcom_mhi_qrtr_probe(struct mhi_device *mhi_dev,
dev_set_drvdata(&mhi_dev->dev, qdev);
qrtr_mhi_of_parse(mhi_dev, &net_id, &rt);
- dev_dbg(&mhi_dev->dev, "start endpoint_register\n");
+
rc = qrtr_endpoint_register(&qdev->ep, net_id, rt);
- if (rc){
- dev_dbg(&mhi_dev->dev, "endpoint register failure (%d)\n",rc);
+ if (rc)
return rc;
- }
- /* start channels */
+ /* start channels */
rc = mhi_prepare_for_transfer(mhi_dev);
if (rc) {
- dev_dbg(&mhi_dev->dev, "mhi_prepare_for_transfer failure (%d)\n",rc);
qrtr_endpoint_unregister(&qdev->ep);
dev_set_drvdata(&mhi_dev->dev, NULL);
return rc;
diff --git a/qrtr/ns.c b/qrtr/ns.c
index cebe4b2..38e4405 100644
--- a/qrtr/ns.c
+++ b/qrtr/ns.c
@@ -9,12 +9,15 @@
#ifdef CONFIG_CNSS_OUT_OF_TREE
#include "ipc_logging.h"
-#include "uapi/qrtr.h"
#else
#include <linux/ipc_logging.h>
-#include <linux/qrtr.h>
#endif
#include <linux/module.h>
+#ifdef CONFIG_CNSS_OUT_OF_TREE
+#include "uapi/qrtr.h"
+#else
+#include <linux/qrtr.h>
+#endif
#include <linux/workqueue.h>
#include <linux/xarray.h>
#include <net/sock.h>
@@ -103,7 +106,7 @@ static struct qrtr_node *node_get(unsigned int node_id)
return node;
}
-unsigned int qrtr_get_service_id(unsigned int node_id, unsigned int port_id)
+int qrtr_get_service_id(unsigned int node_id, unsigned int port_id)
{
struct qrtr_server *srv;
struct qrtr_node *node;
@@ -111,14 +114,14 @@ unsigned int qrtr_get_service_id(unsigned int node_id, unsigned int port_id)
node = node_get(node_id);
if (!node)
- return 0;
+ return -EINVAL;
xa_for_each(&node->servers, index, srv) {
if (srv->node == node_id && srv->port == port_id)
return srv->service;
}
- return 0;
+ return -EINVAL;
}
EXPORT_SYMBOL(qrtr_get_service_id);
diff --git a/qrtr/qrtr.c b/qrtr/qrtr.c
index 0efe367..761f021 100644
--- a/qrtr/qrtr.c
+++ b/qrtr/qrtr.c
@@ -48,11 +48,6 @@
#define AID_VENDOR_QRTR KGIDT_INIT(2906)
-#if defined(CONFIG_RPMSG_QCOM_GLINK_NATIVE)
-extern bool glink_resume_pkt;
-#endif
-extern unsigned int qrtr_get_service_id(unsigned int node_id,
- unsigned int port_id);
/**
* struct qrtr_hdr_v1 - (I|R)PCrouter packet header version 1
* @version: protocol version
@@ -277,23 +272,6 @@ static void qrtr_log_tx_msg(struct qrtr_node *node, struct qrtr_hdr_v1 *hdr,
}
}
-#if defined(CONFIG_RPMSG_QCOM_GLINK_NATIVE)
-static void qrtr_log_resume_pkt(struct qrtr_cb *cb, u64 pl_buf)
-{
- unsigned int service_id;
-
- if (glink_resume_pkt) {
- glink_resume_pkt = false;
- service_id = qrtr_get_service_id(cb->src_node, cb->src_port);
- pr_info("[QRTR RESUME PKT]:src[0x%x:0x%x] dst[0x%x:0x%x] [%08x %08x]: service[0x%x]\n",
- cb->src_node, cb->src_port,
- cb->dst_node, cb->dst_port,
- (unsigned int)pl_buf, (unsigned int)(pl_buf >> 32),
- service_id);
- }
-}
-#endif
-
static void qrtr_log_rx_msg(struct qrtr_node *node, struct sk_buff *skb)
{
struct qrtr_ctrl_pkt pkt = {0,};
@@ -312,9 +290,6 @@ static void qrtr_log_rx_msg(struct qrtr_node *node, struct sk_buff *skb)
skb->len, cb->confirm_rx, cb->src_node, cb->src_port,
cb->dst_node, cb->dst_port,
(unsigned int)pl_buf, (unsigned int)(pl_buf >> 32));
-#if defined(CONFIG_RPMSG_QCOM_GLINK_NATIVE)
- qrtr_log_resume_pkt(cb, pl_buf);
-#endif
} else {
skb_copy_bits(skb, 0, &pkt, sizeof(pkt));
if (cb->type == QRTR_TYPE_NEW_SERVER ||
@@ -339,6 +314,64 @@ static void qrtr_log_rx_msg(struct qrtr_node *node, struct sk_buff *skb)
}
}
+void qrtr_print_wakeup_reason(const void *data)
+{
+ const struct qrtr_hdr_v1 *v1;
+ const struct qrtr_hdr_v2 *v2;
+ struct qrtr_cb cb;
+ unsigned int size;
+ unsigned int ver;
+ int service_id;
+ size_t hdrlen;
+ u64 preview = 0;
+
+ ver = *(u8 *)data;
+ switch (ver) {
+ case QRTR_PROTO_VER_1:
+ v1 = data;
+ hdrlen = sizeof(*v1);
+ cb.src_node = le32_to_cpu(v1->src_node_id);
+ cb.src_port = le32_to_cpu(v1->src_port_id);
+ cb.dst_node = le32_to_cpu(v1->dst_node_id);
+ cb.dst_port = le32_to_cpu(v1->dst_port_id);
+
+ size = le32_to_cpu(v1->size);
+ break;
+ case QRTR_PROTO_VER_2:
+ v2 = data;
+ hdrlen = sizeof(*v2) + v2->optlen;
+ cb.src_node = le16_to_cpu(v2->src_node_id);
+ cb.src_port = le16_to_cpu(v2->src_port_id);
+ cb.dst_node = le16_to_cpu(v2->dst_node_id);
+ cb.dst_port = le16_to_cpu(v2->dst_port_id);
+
+ if (cb.src_port == (u16)QRTR_PORT_CTRL)
+ cb.src_port = QRTR_PORT_CTRL;
+ if (cb.dst_port == (u16)QRTR_PORT_CTRL)
+ cb.dst_port = QRTR_PORT_CTRL;
+
+ size = le32_to_cpu(v2->size);
+ break;
+ default:
+ return;
+ }
+
+ service_id = qrtr_get_service_id(cb.src_node, cb.src_port);
+ if (service_id < 0)
+ service_id = qrtr_get_service_id(cb.dst_node, cb.dst_port);
+
+ size = (sizeof(preview) > size) ? size : sizeof(preview);
+ memcpy(&preview, data + hdrlen, size);
+
+ pr_info("%s: src[0x%x:0x%x] dst[0x%x:0x%x] [%08x %08x] service[0x%x]\n",
+ __func__,
+ cb.src_node, cb.src_port,
+ cb.dst_node, cb.dst_port,
+ (unsigned int)preview, (unsigned int)(preview >> 32),
+ service_id);
+}
+EXPORT_SYMBOL(qrtr_print_wakeup_reason);
+
static bool refcount_dec_and_rwsem_lock(refcount_t *r,
struct rw_semaphore *sem)
{
@@ -836,7 +869,7 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
struct qrtr_sock *ipc;
struct sk_buff *skb;
struct qrtr_cb *cb;
- unsigned int size;
+ size_t size;
unsigned int ver;
size_t hdrlen;
int errcode;
@@ -903,7 +936,7 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
if (cb->dst_port == QRTR_PORT_CTRL_LEGACY)
cb->dst_port = QRTR_PORT_CTRL;
- if (len != ALIGN(size, 4) + hdrlen)
+ if (!size || len != ALIGN(size, 4) + hdrlen)
goto err;
if (cb->dst_port != QRTR_PORT_CTRL && cb->type != QRTR_TYPE_DATA &&
@@ -1157,17 +1190,13 @@ int qrtr_endpoint_register(struct qrtr_endpoint *ep, unsigned int net_id,
struct qrtr_node *node;
struct sched_param param = {.sched_priority = 1};
- if (!ep || !ep->xmit){
- pr_err("%s: error qrtr ep=%p ex->xmit=%p\n",
- __func__, ep,ep->xmit);
-
+ if (!ep || !ep->xmit)
return -EINVAL;
- }
node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node){
+ if (!node)
return -ENOMEM;
- }
+
kref_init(&node->ref);
mutex_init(&node->ep_lock);
skb_queue_head_init(&node->rx_queue);
@@ -1570,6 +1599,8 @@ static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb,
return 0;
}
if (!ipc || &ipc->sk == skb->sk) { /* do not send to self */
+ if (ipc)
+ qrtr_port_put(ipc);
kfree_skb(skb);
return -ENODEV;
}
@@ -1765,8 +1796,10 @@ static int qrtr_send_resume_tx(struct qrtr_cb *cb)
return -EINVAL;
skb = qrtr_alloc_ctrl_packet(&pkt);
- if (!skb)
+ if (!skb) {
+ qrtr_node_release(node);
return -ENOMEM;
+ }
pkt->cmd = cpu_to_le32(QRTR_TYPE_RESUME_TX);
pkt->client.node = cpu_to_le32(cb->dst_node);
diff --git a/qrtr/qrtr.h b/qrtr/qrtr.h
index 1c7ad59..0cf6948 100644
--- a/qrtr/qrtr.h
+++ b/qrtr/qrtr.h
@@ -41,5 +41,7 @@ void qrtr_ns_remove(void);
int qrtr_peek_pkt_size(const void *data);
-unsigned int qrtr_get_service_id(unsigned int node_id, unsigned int port_id);
+int qrtr_get_service_id(unsigned int node_id, unsigned int port_id);
+
+void qrtr_print_wakeup_reason(const void *data);
#endif