summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorterry-ht.chen <terry-ht.chen@broadcom.corp-partner.google.com>2020-05-11 10:44:28 +0800
committerRoger Wang <wangroger@google.com>2020-06-05 10:23:44 +0800
commit8dba7140a9b55e2d4e7df8e5be25ccdefefcda99 (patch)
tree403051bd347cf73fc1cf056371a13e7fa43f95b0
parent9a4cdd5594f10a327b88b40c304cdf5b82e208b7 (diff)
downloadbcm43752-8dba7140a9b55e2d4e7df8e5be25ccdefefcda99.tar.gz
bcmdhd: Adjust dhd/fw log level, and reduce dhd log during interface up
For dbg ring level : - dhd dbgring level : Add DHD_EVENT_VAL/DHD_PKT_MON_VAL/DHD_IOVAR_MEM_VAL/DHD_INFO_VAL as default level - wl dbgring level : Add WL_DBG_P2P_ACTION/WL_DBG_INFO as default level For driver log level - dhd log level : Remove DHD_FWLOG_VAL/DHD_IOVAR_MEM_VAL/DHD_INFO_VAL - wl log level : No change For fw event log tag - remove BEACON_LOG/PCI_WARN/PCI_INFO/PCI_DEBUG fw event log - add more for debug fw issue Bug: 151785383 Test: hikey960 Change-Id: If36147c2947dd34171080f662000438209de5922 Signed-off-by: Roger Wang <wangroger@google.com>
-rw-r--r--dhd_common.c5
-rwxr-xr-xdhd_dbg.h19
-rwxr-xr-xdhd_debug.c23
-rw-r--r--dhd_event_log_filter.c2
-rwxr-xr-x[-rw-r--r--]dhd_flowring.c6
-rw-r--r--dhd_linux.c26
-rwxr-xr-xdhd_linux_exportfs.c6
-rwxr-xr-x[-rw-r--r--]dhd_linux_lb.c16
-rw-r--r--dhd_linux_platdev.c2
-rwxr-xr-x[-rw-r--r--]dhd_msgbuf.c122
-rwxr-xr-xdhd_pcie.c54
-rwxr-xr-x[-rw-r--r--]dhd_pcie_linux.c8
-rw-r--r--dhd_pktlog.c12
-rw-r--r--dhd_pno.c4
-rwxr-xr-xlinux_pkt.c2
-rw-r--r--wl_cfg80211.c34
-rwxr-xr-x[-rw-r--r--]wl_cfgscan.c16
-rw-r--r--wl_cfgvendor.c8
18 files changed, 195 insertions, 170 deletions
diff --git a/dhd_common.c b/dhd_common.c
index 43354e6..138354f 100644
--- a/dhd_common.c
+++ b/dhd_common.c
@@ -113,9 +113,10 @@
#ifdef DHD_LOG_PRINT_RATE_LIMIT
int log_print_threshold = 0;
#endif /* DHD_LOG_PRINT_RATE_LIMIT */
-int dbgring_msg_level = DHD_ERROR_VAL | DHD_FWLOG_VAL;
+int dbgring_msg_level = DHD_ERROR_VAL | DHD_FWLOG_VAL | DHD_INFO_VAL
+ | DHD_EVENT_VAL | DHD_PKT_MON_VAL | DHD_IOVAR_MEM_VAL;
/* For CUSTOMER_HW4/Hikey do not enable DHD_ERROR_MEM_VAL by default */
-int dhd_msg_level = DHD_ERROR_VAL | DHD_FWLOG_VAL | DHD_EVENT_VAL
+int dhd_msg_level = DHD_ERROR_VAL | DHD_EVENT_VAL
| DHD_PKT_MON_VAL;
#ifdef DHD_DEBUG
diff --git a/dhd_dbg.h b/dhd_dbg.h
index 56c25c9..55237cd 100755
--- a/dhd_dbg.h
+++ b/dhd_dbg.h
@@ -88,6 +88,9 @@ do { \
PRINTCFG("[%s][dhd][wlan] ", dhd_dbg_get_system_timestamp()); \
PRINTCFG args; \
} \
+ if (dbgring_msg_level & DHD_INFO_VAL) { \
+ DHD_DBG_RING_WRITE args; \
+ } \
} while (0)
#else /* DHD_LOG_DUMP */
/* !defined(DHD_LOG_DUMP cases) */
@@ -215,7 +218,21 @@ do { \
#define DHD_REORDER(args) do {if (dhd_msg_level & DHD_REORDER_VAL) printf args;} while (0)
#define DHD_PNO(args) do {if (dhd_msg_level & DHD_PNO_VAL) printf args;} while (0)
#define DHD_RTT(args) do {if (dhd_msg_level & DHD_RTT_VAL) printf args;} while (0)
-#define DHD_PKT_MON(args) do {if (dhd_msg_level & DHD_PKT_MON_VAL) printf args;} while (0)
+#define DHD_PKT_MON(args) \
+do { \
+ if (dhd_msg_level & DHD_ERROR_VAL) { \
+ if (dhd_msg_level & DHD_PKT_MON_VAL) { \
+ PRINTCFG("[%s][dhd][wlan] ", dhd_dbg_get_system_timestamp()); \
+ PRINTCFG args; \
+ } \
+ } \
+ if (dbgring_msg_level & DHD_PKT_MON_VAL) { \
+ DHD_DBG_RING_WRITE ("[%s][%s] %s: ", \
+ dhd_dbg_get_system_timestamp(), \
+ dhd_log_dump_get_timestamp(),__func__); \
+ DHD_DBG_RING_WRITE args; \
+ } \
+} while (0)
#if defined(DHD_LOG_DUMP)
#if defined(DHD_LOG_PRINT_RATE_LIMIT)
diff --git a/dhd_debug.c b/dhd_debug.c
index 19f8789..497ba61 100755
--- a/dhd_debug.c
+++ b/dhd_debug.c
@@ -107,10 +107,10 @@ struct map_table event_tag_map[] = {
/* define log level per ring type */
struct log_level_table fw_verbose_level_map[] = {
{1, EVENT_LOG_TAG_PCI_ERROR, "PCI_ERROR"},
- {1, EVENT_LOG_TAG_PCI_WARN, "PCI_WARN"},
- {2, EVENT_LOG_TAG_PCI_INFO, "PCI_INFO"},
- {3, EVENT_LOG_TAG_PCI_DBG, "PCI_DEBUG"},
- {3, EVENT_LOG_TAG_BEACON_LOG, "BEACON_LOG"},
+ //{1, EVENT_LOG_TAG_PCI_WARN, "PCI_WARN"},
+ //{2, EVENT_LOG_TAG_PCI_INFO, "PCI_INFO"},
+ //{3, EVENT_LOG_TAG_PCI_DBG, "PCI_DEBUG"},
+ //{3, EVENT_LOG_TAG_BEACON_LOG, "BEACON_LOG"},
{2, EVENT_LOG_TAG_WL_ASSOC_LOG, "ASSOC_LOG"},
{2, EVENT_LOG_TAG_WL_ROAM_LOG, "ROAM_LOG"},
{1, EVENT_LOG_TAG_TRACE_WL_INFO, "WL INFO"},
@@ -125,7 +125,14 @@ struct log_level_table fw_verbose_level_map[] = {
#endif /* CUSTOMER_HW4_DEBUG */
{1, EVENT_LOG_TAG_SCAN_ERROR, "SCAN_ERROR"},
{2, EVENT_LOG_TAG_SCAN_TRACE_LOW, "SCAN_TRACE_LOW"},
- {2, EVENT_LOG_TAG_SCAN_TRACE_HIGH, "SCAN_TRACE_HIGH"}
+ {2, EVENT_LOG_TAG_SCAN_TRACE_HIGH, "SCAN_TRACE_HIGH"},
+ {3, EVENT_LOG_TAG_WL_ERROR, "WL_ERROR"},
+ {3, EVENT_LOG_TAG_IE_ERROR, "IE_ERROR"},
+ {3, EVENT_LOG_TAG_ASSOC_ERROR, "ASSOC_ERROR"},
+ {3, EVENT_LOG_TAG_PMU_ERROR, "PMU_ERROR"},
+ {3, EVENT_LOG_TAG_4WAYHANDSHAKE, "8021X_ERROR"},
+ {3, EVENT_LOG_TAG_AMSDU_ERROR, "AMPDU_ERROR"},
+ {3, EVENT_LOG_TAG_SAE_ERROR, "SAE_ERROR"}
};
/* reference tab table */
@@ -1585,7 +1592,7 @@ dhd_dbg_attach_pkt_monitor(dhd_pub_t *dhdp,
dhdp->dbg->pkt_mon.rx_pkt_state = PKT_MON_ATTACHED;
DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags);
- DHD_PKT_MON(("%s(): packet monitor attach succeeded\n", __FUNCTION__));
+ DHD_INFO(("%s(): packet monitor attach succeeded\n", __FUNCTION__));
return ret;
fail:
@@ -1620,7 +1627,7 @@ fail:
dhdp->dbg->pkt_mon.rx_pkt_state = PKT_MON_DETACHED;
DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags);
- DHD_ERROR(("%s(): packet monitor attach failed\n", __FUNCTION__));
+ DHD_INFO(("%s(): packet monitor attach failed\n", __FUNCTION__));
return ret;
}
@@ -1686,7 +1693,7 @@ dhd_dbg_start_pkt_monitor(dhd_pub_t *dhdp)
dhdp->dbg->pkt_mon.rx_pkt_state = PKT_MON_STARTED;
DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags);
- DHD_PKT_MON(("%s(): packet monitor started\n", __FUNCTION__));
+ DHD_INFO(("%s(): packet monitor started\n", __FUNCTION__));
return BCME_OK;
}
diff --git a/dhd_event_log_filter.c b/dhd_event_log_filter.c
index a0d8bd0..0a87a8d 100644
--- a/dhd_event_log_filter.c
+++ b/dhd_event_log_filter.c
@@ -53,7 +53,7 @@
#define dtohchanspec(i) (i)
#define DHD_FILTER_ERR_INTERNAL(fmt, ...) DHD_ERROR(("EWPF-" fmt, ##__VA_ARGS__))
-#define DHD_FILTER_TRACE_INTERNAL(fmt, ...) DHD_INFO(("EWPF-" fmt, ##__VA_ARGS__))
+#define DHD_FILTER_TRACE_INTERNAL(fmt, ...) DHD_TRACE(("EWPF-" fmt, ##__VA_ARGS__))
#define DHD_FILTER_ERR(x) DHD_FILTER_ERR_INTERNAL x
#define DHD_FILTER_TRACE(x) DHD_FILTER_TRACE_INTERNAL x
diff --git a/dhd_flowring.c b/dhd_flowring.c
index 6fa2e29..2a62a71 100644..100755
--- a/dhd_flowring.c
+++ b/dhd_flowring.c
@@ -669,7 +669,7 @@ dhd_flowid_find(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da)
}
DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
- DHD_INFO(("%s: cannot find flowid\n", __FUNCTION__));
+ DHD_TRACE(("%s: cannot find flowid\n", __FUNCTION__));
return FLOWID_INVALID;
} /* dhd_flowid_find */
@@ -785,7 +785,7 @@ dhd_flowid_alloc(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da)
}
DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
- DHD_INFO(("%s: allocated flowid %d\n", __FUNCTION__, fl_hash_node->flowid));
+ DHD_TRACE(("%s: allocated flowid %d\n", __FUNCTION__, fl_hash_node->flowid));
if (fl_hash_node->flowid > dhdp->max_tx_flowid) {
DHD_ERROR(("%s: flowid=%d max_tx_flowid=%d ifindex=%d prio=%d role=%d\n",
@@ -1030,7 +1030,7 @@ BCMFASTPATH(dhd_flowid_update)(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, void
return BCME_ERROR;
}
- DHD_INFO(("%s: prio %d flowid %d\n", __FUNCTION__, prio, flowid));
+ DHD_TRACE(("%s: prio %d flowid %d\n", __FUNCTION__, prio, flowid));
/* Tag the packet with flowid */
DHD_PKT_SET_FLOWID(pktbuf, flowid);
diff --git a/dhd_linux.c b/dhd_linux.c
index 84dd1c9..ad46e5a 100644
--- a/dhd_linux.c
+++ b/dhd_linux.c
@@ -4861,7 +4861,7 @@ dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan)
#if defined(DHD_WAKE_STATUS) && defined(DHD_WAKEPKT_DUMP)
if (pkt_wake) {
prhex("[wakepkt_dump]", (char*)dump_data, MIN(len, 32));
- DHD_ERROR(("config check in_suspend: %d ", dhdp->in_suspend));
+ DHD_ERROR(("config check in_suspend: %d \n", dhdp->in_suspend));
#ifdef ARP_OFFLOAD_SUPPORT
DHD_ERROR(("arp hmac_update:%d \n", dhdp->hmac_updated));
#endif /* ARP_OFFLOAD_SUPPORT */
@@ -7276,7 +7276,7 @@ dhd_open(struct net_device *net)
if (dhd->pub.up == 1) {
/* already up */
- DHD_ERROR(("Primary net_device is already up \n"));
+ DHD_INFO(("Primary net_device is already up \n"));
mutex_unlock(&dhd->pub.ndev_op_sync);
return BCME_OK;
}
@@ -7611,7 +7611,7 @@ dhd_pri_open(struct net_device *net)
/* Allow transmit calls */
dhd_tx_start_queues(net);
- DHD_ERROR(("[%s] tx queue started\n", net->name));
+ DHD_INFO(("[%s] tx queue started\n", net->name));
return ret;
}
@@ -9783,7 +9783,7 @@ dhd_bus_start(dhd_pub_t *dhdp)
/* max_h2d_rings includes H2D common rings */
uint32 max_h2d_rings = dhd_bus_max_h2d_queues(dhd->pub.bus);
- DHD_ERROR(("%s: Initializing %u h2drings\n", __FUNCTION__,
+ DHD_INFO(("%s: Initializing %u h2drings\n", __FUNCTION__,
max_h2d_rings));
if ((ret = dhd_flow_rings_init(&dhd->pub, max_h2d_rings)) != BCME_OK) {
#ifdef BCMSDIO
@@ -10365,7 +10365,7 @@ dhd_get_preserve_log_numbers(dhd_pub_t *dhd, uint32 *logset_mask)
logset_op.type == EVENT_LOG_SET_TYPE_PRSRV) {
*logset_mask |= 0x01u << i;
ret = BCME_OK;
- DHD_ERROR(("[INIT] logset:%d is preserve/chatty\n", i));
+ DHD_INFO(("[INIT] logset:%d is preserve/chatty\n", i));
}
}
@@ -11347,11 +11347,11 @@ dhd_legacy_preinit_ioctls(dhd_pub_t *dhd)
#ifdef DHD_PCIE_RUNTIMEPM
/* Disable RuntimePM in mfg mode */
DHD_DISABLE_RUNTIME_PM(dhd);
- DHD_ERROR(("%s : Disable RuntimePM in Manufactring Firmware\n", __FUNCTION__));
+ DHD_INFO(("%s : Disable RuntimePM in Manufactring Firmware\n", __FUNCTION__));
#endif /* DHD_PCIE_RUNTIME_PM */
/* Check and adjust IOCTL response timeout for Manufactring firmware */
dhd_os_set_ioctl_resp_timeout(MFG_IOCTL_RESP_TIMEOUT);
- DHD_ERROR(("%s : Set IOCTL response time for Manufactring Firmware\n",
+ DHD_INFO(("%s : Set IOCTL response time for Manufactring Firmware\n",
__FUNCTION__));
} else {
dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
@@ -11381,7 +11381,7 @@ dhd_legacy_preinit_ioctls(dhd_pub_t *dhd)
DHD_ERROR(("%s: axierror_logbuf_addr IOVAR not present, proceed\n", __FUNCTION__));
dhd->axierror_logbuf_addr = 0;
} else {
- DHD_ERROR(("%s: axierror_logbuf_addr : 0x%x\n",
+ DHD_INFO(("%s: axierror_logbuf_addr : 0x%x\n",
__FUNCTION__, dhd->axierror_logbuf_addr));
}
#endif /* DNGL_AXI_ERROR_LOGGING */
@@ -11392,7 +11392,7 @@ dhd_legacy_preinit_ioctls(dhd_pub_t *dhd)
if (ret < 0) {
DHD_ERROR(("%s event_log_rate_hc set failed %d\n", __FUNCTION__, ret));
} else {
- DHD_ERROR(("%s event_log_rate_hc set with threshold:%d\n", __FUNCTION__,
+ DHD_INFO(("%s event_log_rate_hc set with threshold:%d\n", __FUNCTION__,
event_log_rate_hc));
}
#endif /* EVENT_LOG_RATE_HC */
@@ -11419,7 +11419,7 @@ dhd_legacy_preinit_ioctls(dhd_pub_t *dhd)
goto done;
}
- DHD_ERROR(("%s: use firmware generated mac_address "MACDBG"\n",
+ DHD_INFO(("%s: use firmware generated mac_address "MACDBG"\n",
__FUNCTION__, MAC2STRDBG(&buf)));
#ifdef MACADDR_PROVISION_ENFORCED
@@ -15073,7 +15073,7 @@ int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num)
#ifndef GAN_LITE_NAT_KEEPALIVE_FILTER
dhd_info_t *dhd = DHD_DEV_INFO(dev);
- DHD_ERROR(("%s: add_remove = %d, num = %d\n", __FUNCTION__, add_remove, num));
+ DHD_INFO(("%s: add_remove = %d, num = %d\n", __FUNCTION__, add_remove, num));
if (!dhd || (num == DHD_UNICAST_FILTER_NUM)) {
return 0;
}
@@ -15126,7 +15126,7 @@ int net_os_enable_packet_filter(struct net_device *dev, int val)
{
dhd_info_t *dhd = DHD_DEV_INFO(dev);
- DHD_ERROR(("%s: val = %d\n", __FUNCTION__, val));
+ DHD_INFO(("%s: val = %d\n", __FUNCTION__, val));
return dhd_os_enable_packet_filter(&dhd->pub, val);
}
#endif /* PKT_FILTER_SUPPORT */
@@ -21583,7 +21583,7 @@ dhd_set_blob_support(dhd_pub_t *dhdp, char *fw_path)
filepath));
dhdp->is_blob = FALSE;
} else {
- DHD_ERROR(("%s: ----- blob file exists (%s) -----\n", __FUNCTION__, filepath));
+ DHD_INFO(("%s: ----- blob file exists (%s) -----\n", __FUNCTION__, filepath));
dhdp->is_blob = TRUE;
#if defined(CONCATE_BLOB)
strncat(fw_path, "_blob", strlen("_blob"));
diff --git a/dhd_linux_exportfs.c b/dhd_linux_exportfs.c
index 0a1214e..0da780c 100755
--- a/dhd_linux_exportfs.c
+++ b/dhd_linux_exportfs.c
@@ -110,7 +110,7 @@ dhd_dbg_ring_proc_create(dhd_pub_t *dhdp)
dbg_verbose_ring)) {
DHD_ERROR(("Failed to create /proc/dhd_trace procfs interface\n"));
} else {
- DHD_ERROR(("Created /proc/dhd_trace procfs interface\n"));
+ DHD_INFO(("Created /proc/dhd_trace procfs interface\n"));
}
} else {
DHD_ERROR(("dbg_verbose_ring is NULL, /proc/dhd_trace not created\n"));
@@ -122,7 +122,7 @@ dhd_dbg_ring_proc_create(dhd_pub_t *dhdp)
dhdp->ecntr_dbg_ring)) {
DHD_ERROR(("Failed to create /proc/dhd_ecounters procfs interface\n"));
} else {
- DHD_ERROR(("Created /proc/dhd_ecounters procfs interface\n"));
+ DHD_INFO(("Created /proc/dhd_ecounters procfs interface\n"));
}
#endif /* EWP_ECNTRS_LOGGING */
@@ -131,7 +131,7 @@ dhd_dbg_ring_proc_create(dhd_pub_t *dhdp)
dhdp->rtt_dbg_ring)) {
DHD_ERROR(("Failed to create /proc/dhd_rtt procfs interface\n"));
} else {
- DHD_ERROR(("Created /proc/dhd_rtt procfs interface\n"));
+ DHD_INFO(("Created /proc/dhd_rtt procfs interface\n"));
}
#endif /* EWP_RTT_LOGGING */
}
diff --git a/dhd_linux_lb.c b/dhd_linux_lb.c
index 882866f..cc86317 100644..100755
--- a/dhd_linux_lb.c
+++ b/dhd_linux_lb.c
@@ -1018,7 +1018,7 @@ dhd_napi_poll(struct napi_struct *napi, int budget)
dhd = container_of(napi, struct dhd_info, rx_napi_struct);
GCC_DIAGNOSTIC_POP();
- DHD_INFO(("%s napi_queue<%d> budget<%d>\n",
+ DHD_TRACE(("%s napi_queue<%d> budget<%d>\n",
__FUNCTION__, skb_queue_len(&dhd->rx_napi_queue), budget));
/*
@@ -1040,7 +1040,7 @@ dhd_napi_poll(struct napi_struct *napi, int budget)
ifid = DHD_PKTTAG_IFID((dhd_pkttag_fr_t *)PKTTAG(skb));
- DHD_INFO(("%s dhd_rx_frame pkt<%p> ifid<%d>\n",
+ DHD_TRACE(("%s dhd_rx_frame pkt<%p> ifid<%d>\n",
__FUNCTION__, skb, ifid));
dhd_rx_frame(&dhd->pub, ifid, skb, pkt_count, chan);
@@ -1049,7 +1049,7 @@ dhd_napi_poll(struct napi_struct *napi, int budget)
DHD_LB_STATS_UPDATE_NAPI_HISTO(&dhd->pub, processed);
- DHD_INFO(("%s processed %d\n", __FUNCTION__, processed));
+ DHD_TRACE(("%s processed %d\n", __FUNCTION__, processed));
if (processed < budget) {
napi_complete(napi);
@@ -1077,7 +1077,7 @@ dhd_napi_schedule(void *info)
dhd_info_t *dhd = (dhd_info_t *)info;
unsigned long flags;
- DHD_INFO(("%s rx_napi_struct<%p> on cpu<%d>\n",
+ DHD_TRACE(("%s rx_napi_struct<%p> on cpu<%d>\n",
__FUNCTION__, &dhd->rx_napi_struct, atomic_read(&dhd->rx_napi_cpu)));
/*
@@ -1185,7 +1185,7 @@ dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp)
return;
}
- DHD_INFO(("%s append napi_queue<%d> pend_queue<%d>\n", __FUNCTION__,
+ DHD_TRACE(("%s append napi_queue<%d> pend_queue<%d>\n", __FUNCTION__,
skb_queue_len(&dhd->rx_napi_queue), skb_queue_len(&dhd->rx_pend_queue)));
/* append the producer's queue of packets to the napi's rx process queue */
@@ -1226,7 +1226,7 @@ dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp)
rx_napi_cpu = atomic_read(&dhd->rx_napi_cpu);
}
- DHD_INFO(("%s : schedule to curr_cpu : %d, rx_napi_cpu : %d\n",
+ DHD_TRACE(("%s : schedule to curr_cpu : %d, rx_napi_cpu : %d\n",
__FUNCTION__, curr_cpu, rx_napi_cpu));
dhd_work_schedule_on(&dhd->rx_napi_dispatcher_work, rx_napi_cpu);
DHD_LB_STATS_INCR(dhd->napi_sched_cnt);
@@ -1242,7 +1242,7 @@ dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx)
{
dhd_info_t *dhd = dhdp->info;
- DHD_INFO(("%s enqueue pkt<%p> ifidx<%d> pend_queue<%d>\n", __FUNCTION__,
+ DHD_TRACE(("%s enqueue pkt<%p> ifidx<%d> pend_queue<%d>\n", __FUNCTION__,
pkt, ifidx, skb_queue_len(&dhd->rx_pend_queue)));
DHD_PKTTAG_SET_IFID((dhd_pkttag_fr_t *)PKTTAG(pkt), ifidx);
__skb_queue_tail(&dhd->rx_pend_queue, pkt);
@@ -1333,7 +1333,7 @@ dhd_lb_tx_process(dhd_info_t *dhd)
} while (1);
- DHD_INFO(("%s(): Processed %d packets \r\n", __FUNCTION__, cnt));
+ DHD_TRACE(("%s(): Processed %d packets \r\n", __FUNCTION__, cnt));
return resched;
}
diff --git a/dhd_linux_platdev.c b/dhd_linux_platdev.c
index 239deab..fb95cd4 100644
--- a/dhd_linux_platdev.c
+++ b/dhd_linux_platdev.c
@@ -238,7 +238,7 @@ int wifi_platform_get_mac_addr(wifi_adapter_info_t *adapter, unsigned char *buf)
{
struct wifi_platform_data *plat_data;
- DHD_ERROR(("%s\n", __FUNCTION__));
+ DHD_INFO(("%s\n", __FUNCTION__));
if (!buf || !adapter || !adapter->wifi_plat_data)
return -EINVAL;
plat_data = adapter->wifi_plat_data;
diff --git a/dhd_msgbuf.c b/dhd_msgbuf.c
index 71bf334..0aea29c 100644..100755
--- a/dhd_msgbuf.c
+++ b/dhd_msgbuf.c
@@ -943,7 +943,7 @@ dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, uint32 msg_seqnum, msgbuf_ring_t *rin
return;
}
- DHD_ERROR((
+ DHD_INFO((
"LIVELOCK DHD<%p> ring<%s> msg_seqnum<%u> ring_seqnum<%u:%u> tries<%u> max<%lu>"
" tot<%lu> dma_buf va<%p> msg<%p> curr_rd<%d> rd<%d> wr<%d>\n",
dhd, ring->name, msg_seqnum, ring_seqnum, ring_seqnum% D2H_EPOCH_MODULO, tries,
@@ -1382,13 +1382,13 @@ dhd_prot_d2h_sync_init(dhd_pub_t *dhd)
#ifdef EWP_EDL
prot->d2h_edl_sync_cb = dhd_prot_d2h_sync_edl;
#endif /* EWP_EDL */
- DHD_ERROR(("%s(): D2H sync mechanism is SEQNUM \r\n", __FUNCTION__));
+ DHD_INFO(("%s(): D2H sync mechanism is SEQNUM \r\n", __FUNCTION__));
} else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM) {
prot->d2h_sync_cb = dhd_prot_d2h_sync_xorcsum;
#ifdef EWP_EDL
prot->d2h_edl_sync_cb = dhd_prot_d2h_sync_edl;
#endif /* EWP_EDL */
- DHD_ERROR(("%s(): D2H sync mechanism is XORCSUM \r\n", __FUNCTION__));
+ DHD_INFO(("%s(): D2H sync mechanism is XORCSUM \r\n", __FUNCTION__));
} else {
prot->d2h_sync_cb = dhd_prot_d2h_sync_none;
#ifdef EWP_EDL
@@ -3193,40 +3193,40 @@ dhd_set_host_cap(dhd_pub_t *dhd)
}
if (dhdpcie_bus_get_pcie_hwa_supported(dhd->bus)) {
- DHD_ERROR(("HWA inited\n"));
+ DHD_INFO(("HWA inited\n"));
data |= HOSTCAP_HWA;
dhd->hwa_inited = TRUE;
} else {
- DHD_ERROR(("HWA not enabled in FW !!\n"));
+ DHD_INFO(("HWA not enabled in FW !!\n"));
dhd->hwa_inited = FALSE;
}
if (dhdpcie_bus_get_pcie_idma_supported(dhd->bus)) {
- DHD_ERROR(("IDMA inited\n"));
+ DHD_INFO(("IDMA inited\n"));
data |= HOSTCAP_H2D_IDMA;
dhd->idma_inited = TRUE;
} else {
- DHD_ERROR(("IDMA not enabled in FW !!\n"));
+ DHD_INFO(("IDMA not enabled in FW !!\n"));
dhd->idma_inited = FALSE;
}
if (dhdpcie_bus_get_pcie_ifrm_supported(dhd->bus)) {
- DHD_ERROR(("IFRM Inited\n"));
+ DHD_INFO(("IFRM Inited\n"));
data |= HOSTCAP_H2D_IFRM;
dhd->ifrm_inited = TRUE;
dhd->dma_h2d_ring_upd_support = FALSE;
dhd_prot_dma_indx_free(dhd);
} else {
- DHD_ERROR(("IFRM not enabled in FW !!\n"));
+ DHD_INFO(("IFRM not enabled in FW !!\n"));
dhd->ifrm_inited = FALSE;
}
if (dhdpcie_bus_get_pcie_dar_supported(dhd->bus)) {
- DHD_ERROR(("DAR doorbell Use\n"));
+ DHD_INFO(("DAR doorbell Use\n"));
data |= HOSTCAP_H2D_DAR;
dhd->dar_inited = TRUE;
} else {
- DHD_ERROR(("DAR not enabled in FW !!\n"));
+ DHD_INFO(("DAR not enabled in FW !!\n"));
dhd->dar_inited = FALSE;
}
@@ -3242,26 +3242,26 @@ dhd_set_host_cap(dhd_pub_t *dhd)
#ifdef EWP_EDL
if (dhd->dongle_edl_support) {
data |= HOSTCAP_EDL_RING;
- DHD_ERROR(("Enable EDL host cap\n"));
+ DHD_INFO(("Enable EDL host cap\n"));
} else {
- DHD_ERROR(("DO NOT SET EDL host cap\n"));
+ DHD_INFO(("DO NOT SET EDL host cap\n"));
}
#endif /* EWP_EDL */
#ifdef DHD_DB0TS
if (dhd->db0ts_capable) {
data |= HOSTCAP_DB0_TIMESTAMP;
- DHD_ERROR(("Enable DB0 TS in host cap\n"));
+ DHD_INFO(("Enable DB0 TS in host cap\n"));
} else {
- DHD_ERROR(("DB0 TS not enabled in host cap\n"));
+ DHD_INFO(("DB0 TS not enabled in host cap\n"));
}
#endif /* DHD_DB0TS */
if (dhd->extdtxs_in_txcpl) {
- DHD_ERROR(("Enable hostcap: EXTD TXS in txcpl\n"));
+ DHD_INFO(("Enable hostcap: EXTD TXS in txcpl\n"));
data |= HOSTCAP_PKT_TXSTATUS;
}
else {
- DHD_ERROR(("Enable hostcap: EXTD TXS in txcpl\n"));
+ DHD_INFO(("Enable hostcap: EXTD TXS in txcpl\n"));
}
DHD_INFO(("%s:Active Ver:%d, Host Ver:%d, FW Ver:%d\n",
@@ -3300,11 +3300,11 @@ dhd_prot_init(dhd_pub_t *dhd)
* if user has not defined any buffers by one of the above methods.
*/
prot->h2d_max_txpost = (uint16)h2d_max_txpost;
- DHD_ERROR(("%s:%d: h2d_max_txpost = %d\n", __FUNCTION__, __LINE__, prot->h2d_max_txpost));
+ DHD_INFO(("%s:%d: h2d_max_txpost = %d\n", __FUNCTION__, __LINE__, prot->h2d_max_txpost));
#if defined(DHD_HTPUT_TUNABLES)
prot->h2d_htput_max_txpost = (uint16)h2d_htput_max_txpost;
- DHD_ERROR(("%s:%d: h2d_htput_max_txpost = %d\n",
+ DHD_INFO(("%s:%d: h2d_htput_max_txpost = %d\n",
__FUNCTION__, __LINE__, prot->h2d_htput_max_txpost));
#endif /* DHD_HTPUT_TUNABLES */
@@ -3315,7 +3315,7 @@ dhd_prot_init(dhd_pub_t *dhd)
/* using the latest shared structure template */
prot->max_rxbufpost = DEFAULT_RX_BUFFERS_TO_POST;
}
- DHD_ERROR(("%s:%d: MAX_RXBUFPOST = %d\n", __FUNCTION__, __LINE__, prot->max_rxbufpost));
+ DHD_INFO(("%s:%d: MAX_RXBUFPOST = %d\n", __FUNCTION__, __LINE__, prot->max_rxbufpost));
/* Initialize. bzero() would blow away the dma pointers. */
prot->max_eventbufpost = DHD_FLOWRING_MAX_EVENTBUF_POST;
@@ -3465,7 +3465,7 @@ dhd_prot_init(dhd_pub_t *dhd)
while (!(idmacontrol & PCIE_IDMA_MODE_EN(buscorerev)) &&
(waitcount++ < IDMA_ENABLE_WAIT)) {
- DHD_ERROR(("iDMA not enabled yet,waiting 1 ms c=%d IDMAControl = %08x\n",
+ DHD_INFO(("iDMA not enabled yet,waiting 1 ms c=%d IDMAControl = %08x\n",
waitcount, idmacontrol));
OSL_DELAY(1000); /* 1ms as its onetime only */
idmacontrol = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
@@ -3473,7 +3473,7 @@ dhd_prot_init(dhd_pub_t *dhd)
}
if (waitcount < IDMA_ENABLE_WAIT) {
- DHD_ERROR(("iDMA enabled PCIEControl = %08x\n", idmacontrol));
+ DHD_INFO(("iDMA enabled PCIEControl = %08x\n", idmacontrol));
} else {
DHD_ERROR(("Error: wait for iDMA timed out wait=%d IDMAControl = %08x\n",
waitcount, idmacontrol));
@@ -3972,7 +3972,7 @@ dhd_prot_init_info_rings(dhd_pub_t *dhd)
return ret;
}
- DHD_TRACE(("trying to send create d2h info ring: id %d\n", prot->d2hring_info_cpln->idx));
+ DHD_INFO(("trying to send create d2h info ring: id %d\n", prot->d2hring_info_cpln->idx));
ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_info_cpln,
BCMPCIE_D2H_RING_TYPE_DBGBUF_CPL, DHD_D2H_DBGRING_REQ_PKTID);
if (ret != BCME_OK)
@@ -3983,7 +3983,7 @@ dhd_prot_init_info_rings(dhd_pub_t *dhd)
prot->d2hring_info_cpln->seqnum = D2H_EPOCH_INIT_VAL;
prot->d2hring_info_cpln->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
- DHD_TRACE(("trying to send create h2d info ring id %d\n", prot->h2dring_info_subn->idx));
+ DHD_INFO(("trying to send create h2d info ring id %d\n", prot->h2dring_info_subn->idx));
prot->h2dring_info_subn->n_completion_ids = 1;
prot->h2dring_info_subn->compeltion_ring_ids[0] = prot->d2hring_info_cpln->idx;
@@ -4077,7 +4077,7 @@ dhd_prot_init_edl_rings(dhd_pub_t *dhd)
return ret;
}
- DHD_ERROR(("trying to send create d2h edl ring: idx %d\n", prot->d2hring_edl->idx));
+ DHD_INFO(("trying to send create d2h edl ring: idx %d\n", prot->d2hring_edl->idx));
ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_edl,
BCMPCIE_D2H_RING_TYPE_EDL, DHD_D2H_DBGRING_REQ_PKTID);
if (ret != BCME_OK)
@@ -4136,7 +4136,7 @@ int dhd_sync_with_dongle(dhd_pub_t *dhd)
dhd->wlc_ver_minor = ((wl_wlc_version_t*)buf)->wlc_ver_minor;
}
- DHD_ERROR(("\nwlc_ver_major %d, wlc_ver_minor %d", dhd->wlc_ver_major, dhd->wlc_ver_minor));
+ DHD_INFO(("\nwlc_ver_major %d, wlc_ver_minor %d", dhd->wlc_ver_major, dhd->wlc_ver_minor));
#ifdef DHD_FW_COREDUMP
/* Check the memdump capability */
@@ -4153,7 +4153,7 @@ int dhd_sync_with_dongle(dhd_pub_t *dhd)
DHD_ERROR(("%s: GET revinfo FAILED\n", __FUNCTION__));
goto done;
}
- DHD_ERROR(("%s: GET_REVINFO device 0x%x, vendor 0x%x, chipnum 0x%x\n", __FUNCTION__,
+ DHD_INFO(("%s: GET_REVINFO device 0x%x, vendor 0x%x, chipnum 0x%x\n", __FUNCTION__,
revinfo.deviceid, revinfo.vendorid, revinfo.chipnum));
/* Get the RxBuf post size */
@@ -4180,7 +4180,7 @@ int dhd_sync_with_dongle(dhd_pub_t *dhd)
DHD_FLOWRING_RX_BUFPOST_PKTSZ));
prot->rxbufpost_sz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
} else {
- DHD_ERROR(("%s: RxBuf Post : %d\n",
+ DHD_INFO(("%s: RxBuf Post : %d\n",
__FUNCTION__, prot->rxbufpost_sz));
}
}
@@ -4787,7 +4787,7 @@ dhd_prot_infobufpost(dhd_pub_t *dhd, msgbuf_ring_t *ring)
DHD_PKTID_AUDIT(dhd, prot->pktid_ctrl_map, pktid, DHD_DUPLICATE_ALLOC);
#endif /* DHD_PKTID_AUDIT_RING */
- DHD_INFO(("ID %d, low_addr 0x%08x, high_addr 0x%08x\n",
+ DHD_TRACE(("ID %d, low_addr 0x%08x, high_addr 0x%08x\n",
infobuf_post->cmn_hdr.request_id, infobuf_post->host_buf_addr.low_addr,
infobuf_post->host_buf_addr.high_addr));
@@ -5104,7 +5104,7 @@ dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, uint8 msg_type, uint32 max_to_pos
uint32 i = 0;
int32 ret_val;
- DHD_INFO(("max to post %d, event %d \n", max_to_post, msg_type));
+ DHD_TRACE(("max to post %d, event %d \n", max_to_post, msg_type));
if (dhd->busstate == DHD_BUS_DOWN) {
DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__));
@@ -5117,7 +5117,7 @@ dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, uint8 msg_type, uint32 max_to_pos
break;
i++;
}
- DHD_INFO(("posted %d buffers of type %d\n", i, msg_type));
+ DHD_TRACE(("posted %d buffers of type %d\n", i, msg_type));
return (uint16)i;
}
@@ -5127,7 +5127,7 @@ dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *dhd)
dhd_prot_t *prot = dhd->prot;
int max_to_post;
- DHD_INFO(("ioctl resp buf post\n"));
+ DHD_TRACE(("ioctl resp buf post\n"));
max_to_post = prot->max_ioctlrespbufpost - prot->cur_ioctlresp_bufs_posted;
if (max_to_post <= 0) {
DHD_INFO(("%s: Cannot post more than max IOCTL resp buffers\n",
@@ -5615,7 +5615,7 @@ BCMFASTPATH(dhd_prot_process_msgbuf_rxcpl)(dhd_pub_t *dhd, uint bound, int ringt
dhd->dma_stats.rxdata--;
dhd->dma_stats.rxdata_sz -= len;
#endif /* DMAMAP_STATS */
- DHD_INFO(("id 0x%04x, offset %d, len %d, idx %d, phase 0x%02x, "
+ DHD_TRACE(("id 0x%04x, offset %d, len %d, idx %d, phase 0x%02x, "
"pktdata %p, metalen %d\n",
ltoh32(msg->cmn_hdr.request_id),
ltoh16(msg->data_offset),
@@ -5920,7 +5920,7 @@ BCMFASTPATH(dhd_prot_process_ctrlbuf)(dhd_pub_t *dhd)
/* Prefetch data to populate the cache */
OSL_PREFETCH(msg_addr);
if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
- DHD_ERROR(("%s: process %s msg addr %p len %d\n",
+ DHD_TRACE(("%s: process %s msg addr %p len %d\n",
__FUNCTION__, ring->name, msg_addr, msg_len));
}
@@ -5984,17 +5984,17 @@ BCMFASTPATH(dhd_prot_process_msgtype)(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8
/* Prefetch data to populate the cache */
OSL_PREFETCH(buf + item_len);
- DHD_INFO(("msg_type %d item_len %d buf_len %d\n",
+ DHD_TRACE(("msg_type %d item_len %d buf_len %d\n",
msg_type, item_len, buf_len));
if (msg_type == MSG_TYPE_LOOPBACK) {
bcm_print_bytes("LPBK RESP: ", (uint8 *)msg, item_len);
- DHD_ERROR((" MSG_TYPE_LOOPBACK, len %d\n", item_len));
+ DHD_TRACE((" MSG_TYPE_LOOPBACK, len %d\n", item_len));
}
ASSERT(msg_type < DHD_PROT_FUNCS);
if (msg_type >= DHD_PROT_FUNCS) {
- DHD_ERROR(("%s: msg_type %d, item_len %d buf_len %d\n",
+ DHD_TRACE(("%s: msg_type %d, item_len %d buf_len %d\n",
__FUNCTION__, msg_type, item_len, buf_len));
ret = BCME_ERROR;
goto done;
@@ -6048,7 +6048,7 @@ dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg)
uint16 status = ltoh16(ring_status->compl_hdr.status);
uint16 ring_id = ltoh16(ring_status->compl_hdr.flow_ring_id);
- DHD_ERROR(("ring status: request_id %d, status 0x%04x, flow ring %d, write_idx %d \n",
+ DHD_TRACE(("ring status: request_id %d, status 0x%04x, flow ring %d, write_idx %d \n",
request_id, status, ring_id, ltoh16(ring_status->write_idx)));
if (ltoh16(ring_status->compl_hdr.ring_id) != BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT)
@@ -6069,7 +6069,7 @@ dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg)
dhd->prot->h2dring_info_subn->create_pending = FALSE;
}
else
- DHD_ERROR(("ring create ID for a ring, create not pending\n"));
+ DHD_INFO(("ring create ID for a ring, create not pending\n"));
} else {
DHD_ERROR(("%s info submit ring doesn't exist\n", __FUNCTION__));
}
@@ -6082,7 +6082,7 @@ dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg)
dhd->prot->d2hring_info_cpln->create_pending = FALSE;
}
else
- DHD_ERROR(("ring create ID for info ring, create not pending\n"));
+ DHD_INFO(("ring create ID for info ring, create not pending\n"));
} else {
DHD_ERROR(("%s info cpl ring doesn't exist\n", __FUNCTION__));
}
@@ -6336,7 +6336,7 @@ BCMFASTPATH(dhd_prot_txstatus_process)(dhd_pub_t *dhd, void *msg)
DHD_DUPLICATE_FREE, msg, D2HRING_TXCMPLT_ITEMSIZE);
#endif
- DHD_INFO(("txstatus for pktid 0x%04x\n", pktid));
+ DHD_TRACE(("txstatus for pktid 0x%04x\n", pktid));
if (OSL_ATOMIC_DEC_RETURN(dhd->osh, &prot->active_tx_count) < 0) {
DHD_ERROR(("Extra packets are freed\n"));
}
@@ -6413,7 +6413,7 @@ BCMFASTPATH(dhd_prot_txstatus_process)(dhd_pub_t *dhd, void *msg)
prot->tx_compl_prod_sync = 0;
}
- DHD_INFO(("%s: tx_compl_prod pkt<%p> sync<%d>\n",
+ DHD_TRACE(("%s: tx_compl_prod pkt<%p> sync<%d>\n",
__FUNCTION__, pkt, prot->tx_compl_prod_sync));
DHD_RING_UNLOCK(ring->ring_lock, flags);
@@ -6470,7 +6470,7 @@ workq_ring_full:
DHD_RING_UNLOCK(ring->ring_lock, flags);
#ifdef DHD_MEM_STATS
DHD_MEM_STATS_LOCK(dhd->mem_stats_lock, flags);
- DHD_INFO(("%s txpath_mem: %llu PKTLEN: %d\n",
+ DHD_TRACE(("%s txpath_mem: %llu PKTLEN: %d\n",
__FUNCTION__, dhd->txpath_mem, PKTLEN(dhd->osh, pkt)));
dhd->txpath_mem -= PKTLEN(dhd->osh, pkt);
DHD_MEM_STATS_UNLOCK(dhd->mem_stats_lock, flags);
@@ -6548,7 +6548,7 @@ BCMFASTPATH(dhd_prot_process_infobuf_complete)(dhd_pub_t *dhd, void* buf)
DHD_DUPLICATE_FREE, buf, D2HRING_INFO_BUFCMPLT_ITEMSIZE);
#endif /* DHD_PKTID_AUDIT_RING */
- DHD_INFO(("id 0x%04x, len %d, phase 0x%02x, seqnum %d, rx_dataoffset %d\n",
+ DHD_TRACE(("id 0x%04x, len %d, phase 0x%02x, seqnum %d, rx_dataoffset %d\n",
pktid, buflen, resp->cmn_hdr.flags, ltoh16(resp->seqnum),
dhd->prot->rx_dataoffset));
@@ -7020,7 +7020,7 @@ BCMFASTPATH(dhd_prot_return_rxbuf)(dhd_pub_t *dhd, uint32 pktid, uint32 rxcnt)
prot->rx_compl_prod_sync = 0;
}
- DHD_INFO(("%s: rx_compl_prod pktid<%u> sync<%d>\n",
+ DHD_TRACE(("%s: rx_compl_prod pktid<%u> sync<%d>\n",
__FUNCTION__, pktid, prot->rx_compl_prod_sync));
#endif /* DHD_LB_RXC */
@@ -7351,7 +7351,7 @@ dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg)
BCM_REFERENCE(cmplt);
end_usec = OSL_SYSUPTIME_US();
- DHD_ERROR(("DMA loopback status: %d\n", cmplt->compl_hdr.status));
+ DHD_TRACE(("DMA loopback status: %d\n", cmplt->compl_hdr.status));
prot->dmaxfer.status = cmplt->compl_hdr.status;
OSL_CACHE_INV(prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
if (prot->dmaxfer.srcmem.va && prot->dmaxfer.dstmem.va) {
@@ -7373,25 +7373,25 @@ dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg)
else {
switch (prot->dmaxfer.d11_lpbk) {
case M2M_DMA_LPBK: {
- DHD_ERROR(("DMA successful pcie m2m DMA loopback\n"));
+ DHD_INFO(("DMA successful pcie m2m DMA loopback\n"));
} break;
case D11_LPBK: {
- DHD_ERROR(("DMA successful with d11 loopback\n"));
+ DHD_INFO(("DMA successful with d11 loopback\n"));
} break;
case BMC_LPBK: {
- DHD_ERROR(("DMA successful with bmc loopback\n"));
+ DHD_INFO(("DMA successful with bmc loopback\n"));
} break;
case M2M_NON_DMA_LPBK: {
- DHD_ERROR(("DMA successful pcie m2m NON DMA loopback\n"));
+ DHD_INFO(("DMA successful pcie m2m NON DMA loopback\n"));
} break;
case D11_HOST_MEM_LPBK: {
- DHD_ERROR(("DMA successful d11 host mem loopback\n"));
+ DHD_INFO(("DMA successful d11 host mem loopback\n"));
} break;
case BMC_HOST_MEM_LPBK: {
- DHD_ERROR(("DMA successful bmc host mem loopback\n"));
+ DHD_INFO(("DMA successful bmc host mem loopback\n"));
} break;
default: {
- DHD_ERROR(("Invalid loopback option\n"));
+ DHD_INFO(("Invalid loopback option\n"));
} break;
}
@@ -8125,7 +8125,7 @@ BCMFASTPATH(dhd_prot_alloc_ring_space)(dhd_pub_t *dhd, msgbuf_ring_t *ring,
}
if (ret_buf == HOST_RING_BASE(ring)) {
- DHD_INFO(("%s: setting the phase now\n", ring->name));
+ DHD_TRACE(("%s: setting the phase now\n", ring->name));
ring->current_phase = ring->current_phase ? 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
}
@@ -8696,7 +8696,7 @@ BCMFASTPATH(dhd_prot_get_ring_space)(msgbuf_ring_t *ring, uint16 nitems, uint16
if ((ring_avail_cnt == 0) ||
(exactly_nitems && (ring_avail_cnt < nitems) &&
((ring->max_items - ring->wr) >= nitems))) {
- DHD_INFO(("Space not available: ring %s items %d write %d read %d\n",
+ DHD_TRACE(("Space not available: ring %s items %d write %d read %d\n",
ring->name, nitems, ring->wr, ring->rd));
return NULL;
}
@@ -8905,7 +8905,7 @@ dhd_send_d2h_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create,
d2h_ring->msg.flags = ctrl_ring->current_phase;
d2h_ring->msg.request_id = htol32(ring_to_create->create_req_id);
d2h_ring->ring_id = htol16(DHD_D2H_RING_OFFSET(ring_to_create->idx, max_h2d_rings));
- DHD_ERROR(("%s ringid: %d idx: %d max_h2d: %d\n", __FUNCTION__, d2h_ring->ring_id,
+ DHD_TRACE(("%s ringid: %d idx: %d max_h2d: %d\n", __FUNCTION__, d2h_ring->ring_id,
ring_to_create->idx, max_h2d_rings));
d2h_ring->ring_type = ring_type;
@@ -9344,7 +9344,7 @@ int dhd_prot_h2d_mbdata_send_ctrlmsg(dhd_pub_t *dhd, uint32 mb_data)
int num_post = 1;
int i;
- DHD_INFO(("%s Sending H2D MB data Req data 0x%04x\n",
+ DHD_TRACE(("%s Sending H2D MB data Req data 0x%04x\n",
__FUNCTION__, mb_data));
if (!ctrl_ring->inited) {
DHD_ERROR(("%s: Ctrl Submit Ring: not inited\n", __FUNCTION__));
@@ -9397,7 +9397,7 @@ int dhd_prot_h2d_mbdata_send_ctrlmsg(dhd_pub_t *dhd, uint32 mb_data)
h2d_mb_data->mail_box_data = htol32(mb_data);
}
- DHD_INFO(("%s Send H2D MB data Req data 0x%04x\n", __FUNCTION__, mb_data));
+ DHD_TRACE(("%s Send H2D MB data Req data 0x%04x\n", __FUNCTION__, mb_data));
/* upd wrt ptr and raise interrupt */
dhd_prot_ring_write_complete_mbdata(dhd, ctrl_ring, h2d_mb_data,
@@ -9554,7 +9554,7 @@ dhd_prot_process_h2d_ring_create_complete(dhd_pub_t *dhd, void *buf)
if (dhd->prot->h2dring_info_subn->create_req_id == ltoh32(resp->cmn_hdr.request_id)) {
dhd->prot->h2dring_info_subn->create_pending = FALSE;
dhd->prot->h2dring_info_subn->inited = TRUE;
- DHD_ERROR(("info buffer post after ring create\n"));
+ DHD_INFO(("info buffer post after ring create\n"));
dhd_prot_infobufpost(dhd, dhd->prot->h2dring_info_subn);
}
}
@@ -9563,7 +9563,7 @@ static void
dhd_prot_process_d2h_ring_create_complete(dhd_pub_t *dhd, void *buf)
{
d2h_ring_create_response_t *resp = (d2h_ring_create_response_t *)buf;
- DHD_ERROR(("%s ring create Response status = %d ring %d, id 0x%04x\n", __FUNCTION__,
+ DHD_INFO(("%s ring create Response status = %d ring %d, id 0x%04x\n", __FUNCTION__,
ltoh16(resp->cmplt.status),
ltoh16(resp->cmplt.ring_id),
ltoh32(resp->cmn_hdr.request_id)));
@@ -9618,7 +9618,7 @@ dhd_prot_process_d2h_mb_data(dhd_pub_t *dhd, void* buf)
d2h_mailbox_data_t *d2h_data;
d2h_data = (d2h_mailbox_data_t *)buf;
- DHD_INFO(("%s dhd_prot_process_d2h_mb_data, 0x%04x\n", __FUNCTION__,
+ DHD_TRACE(("%s dhd_prot_process_d2h_mb_data, 0x%04x\n", __FUNCTION__,
d2h_data->d2h_mailbox_data));
dhd_bus_handle_mb_data(dhd->bus, d2h_data->d2h_mailbox_data);
}
diff --git a/dhd_pcie.c b/dhd_pcie.c
index 75b6271..d8c853a 100755
--- a/dhd_pcie.c
+++ b/dhd_pcie.c
@@ -3178,14 +3178,14 @@ dhdpcie_download_code_file(struct dhd_bus *bus, char *pfw_path)
dhd_tcm_test_enable = FALSE;
}
#endif /* DHD_FW_MEM_CORRUPTION */
- DHD_ERROR(("%s: dhd_tcm_test_enable %u\n", __FUNCTION__, dhd_tcm_test_enable));
+ DHD_INFO(("%s: dhd_tcm_test_enable %u\n", __FUNCTION__, dhd_tcm_test_enable));
/* TCM check */
if (dhd_tcm_test_enable && !dhd_bus_tcm_test(bus)) {
DHD_ERROR(("dhd_bus_tcm_test failed\n"));
bcmerror = BCME_ERROR;
goto err;
}
- DHD_ERROR(("%s: download firmware %s\n", __FUNCTION__, pfw_path));
+ DHD_INFO(("%s: download firmware %s\n", __FUNCTION__, pfw_path));
/* Should succeed in opening image if it is actually given through registry
* entry or in module param.
@@ -3312,7 +3312,7 @@ dhdpcie_download_nvram(struct dhd_bus *bus)
nvram_uefi_exists = TRUE;
}
- DHD_ERROR(("%s: dhd_get_download_buffer len %d\n", __FUNCTION__, len));
+ DHD_INFO(("%s: dhd_get_download_buffer len %d\n", __FUNCTION__, len));
if (len > 0 && len <= MAX_NVRAMBUF_SIZE && memblock != NULL) {
bufp = (char *) memblock;
@@ -3324,7 +3324,7 @@ dhdpcie_download_nvram(struct dhd_bus *bus)
}
}
- DHD_ERROR(("%s: process_nvram_vars len %d\n", __FUNCTION__, len));
+ DHD_INFO(("%s: process_nvram_vars len %d\n", __FUNCTION__, len));
#ifdef CUSTOMER_HW4_DEBUG
if (len < MIN_NVRAMVARS_SIZE) {
DHD_ERROR(("%s: invalid nvram size in process_nvram_vars \n",
@@ -4169,7 +4169,7 @@ BCMFASTPATH(dhd_bus_schedule_queue)(struct dhd_bus *bus, uint16 flow_id, bool t
dhd_pub_t *dhdp = bus->dhd;
#endif
- DHD_INFO(("%s: flow_id is %d\n", __FUNCTION__, flow_id));
+ DHD_TRACE(("%s: flow_id is %d\n", __FUNCTION__, flow_id));
/* ASSERT on flow_id */
if (flow_id >= bus->max_submission_rings) {
@@ -4259,7 +4259,7 @@ BCMFASTPATH(dhd_bus_schedule_queue)(struct dhd_bus *bus, uint16 flow_id, bool t
#ifdef DHD_MEM_STATS
DHD_MEM_STATS_LOCK(bus->dhd->mem_stats_lock, flags);
bus->dhd->txpath_mem += PKTLEN(bus->dhd->osh, txp);
- DHD_INFO(("%s txpath_mem: %llu PKTLEN: %d\n",
+ DHD_TRACE(("%s txpath_mem: %llu PKTLEN: %d\n",
__FUNCTION__, bus->dhd->txpath_mem, PKTLEN(bus->dhd->osh, txp)));
DHD_MEM_STATS_UNLOCK(bus->dhd->mem_stats_lock, flags);
#endif /* DHD_MEM_STATS */
@@ -4556,7 +4556,7 @@ dhdpcie_bar1_window_switch_enab(dhd_bus_t *bus)
bus->bar1_switch_enab = TRUE;
}
- DHD_ERROR(("%s: bar1_switch_enab=%d ramstart=0x%x ramend=0x%x bar1_size=0x%x\n",
+ DHD_INFO(("%s: bar1_switch_enab=%d ramstart=0x%x ramend=0x%x bar1_size=0x%x\n",
__FUNCTION__, bus->bar1_switch_enab, ramstart, ramend, bus->bar1_size));
}
@@ -4788,7 +4788,7 @@ dhd_bus_cmn_writeshared(dhd_bus_t *bus, void *data, uint32 len, uint8 type, uint
uint64 long_data;
ulong addr; /* dongle address */
- DHD_INFO(("%s: writing to dongle type %d len %d\n", __FUNCTION__, type, len));
+ DHD_TRACE(("%s: writing to dongle type %d len %d\n", __FUNCTION__, type, len));
if (bus->is_linkdown) {
DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
@@ -4909,7 +4909,7 @@ dhd_bus_cmn_writeshared(dhd_bus_t *bus, void *data, uint32 len, uint8 type, uint
long_data = HTOL64(*(uint64 *)data);
addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_trap_addr);
dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *) &long_data, len);
- DHD_INFO(("Wrote trap addr:0x%x\n", (uint32) HTOL32(*(uint32 *)data)));
+ DHD_TRACE(("Wrote trap addr:0x%x\n", (uint32) HTOL32(*(uint32 *)data)));
break;
case HOST_SCB_ADDR:
@@ -4919,7 +4919,7 @@ dhd_bus_cmn_writeshared(dhd_bus_t *bus, void *data, uint32 len, uint8 type, uint
#else /* !DHD_SUPPORT_64BIT */
dhdpcie_bus_wtcm32(bus, addr, *((uint32*)data));
#endif /* DHD_SUPPORT_64BIT */
- DHD_INFO(("Wrote host_scb_addr:0x%x\n",
+ DHD_TRACE(("Wrote host_scb_addr:0x%x\n",
(uint32) HTOL32(*(uint32 *)data)));
break;
@@ -5435,7 +5435,7 @@ dhd_bus_perform_flr(dhd_bus_t *bus, bool force_fail)
uint val;
int retry = 0;
- DHD_ERROR(("******** Perform FLR ********\n"));
+ DHD_INFO(("******** Perform FLR ********\n"));
/* Kernel Panic for 4378Ax during traptest/devreset4 reload case:
* For 4378Ax, enum registers will not be reset with FLR (producer index WAR).
@@ -5511,14 +5511,14 @@ dhd_bus_perform_flr(dhd_bus_t *bus, bool force_fail)
"is cleared\n", PCIE_SSRESET_STATUS_BIT, PCIE_CFG_SUBSYSTEM_CONTROL));
do {
val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val));
- DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n",
+ DHD_INFO(("read_config: reg=0x%x read val=0x%x\n",
PCIE_CFG_SUBSYSTEM_CONTROL, val));
val = val & (1 << PCIE_SSRESET_STATUS_BIT);
OSL_DELAY(DHD_SSRESET_STATUS_RETRY_DELAY);
} while (val && (retry++ < DHD_SSRESET_STATUS_RETRIES));
if (val) {
- DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n",
+ DHD_INFO(("ERROR: reg=0x%x bit %d is not cleared\n",
PCIE_CFG_SUBSYSTEM_CONTROL, PCIE_SSRESET_STATUS_BIT));
/* User has to fire the IOVAR again, if force_fail is needed */
if (force_fail) {
@@ -5532,7 +5532,7 @@ dhd_bus_perform_flr(dhd_bus_t *bus, bool force_fail)
DHD_INFO(("Restore Pcie Config Space\n"));
DHD_PCIE_CONFIG_RESTORE(bus);
- DHD_ERROR(("******** FLR Succedeed ********\n"));
+ DHD_INFO(("******** FLR Succedeed ********\n"));
return BCME_OK;
}
@@ -5880,7 +5880,7 @@ dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag)
do {
bcmerror = dhdpcie_bus_start_host_dev(bus);
if (!bcmerror) {
- DHD_ERROR(("%s: dhdpcie_bus_start_host_dev OK\n",
+ DHD_INFO(("%s: dhdpcie_bus_start_host_dev OK\n",
__FUNCTION__));
break;
} else {
@@ -6903,11 +6903,11 @@ dhd_bus_dump_dar_registers(struct dhd_bus *bus)
dar_erraddr_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_erraddr_reg, 0, 0);
dar_pcie_mbint_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_pcie_mbint_reg, 0, 0);
- DHD_ERROR(("%s: dar_clk_ctrl(0x%x:0x%x) dar_pwr_ctrl(0x%x:0x%x) dar_intstat(0x%x:0x%x)\n",
+ DHD_INFO(("%s: dar_clk_ctrl(0x%x:0x%x) dar_pwr_ctrl(0x%x:0x%x) dar_intstat(0x%x:0x%x)\n",
__FUNCTION__, dar_clk_ctrl_reg, dar_clk_ctrl_val,
dar_pwr_ctrl_reg, dar_pwr_ctrl_val, dar_intstat_reg, dar_intstat_val));
- DHD_ERROR(("%s: dar_errlog(0x%x:0x%x) dar_erraddr(0x%x:0x%x) dar_pcie_mbint(0x%x:0x%x)\n",
+ DHD_INFO(("%s: dar_errlog(0x%x:0x%x) dar_erraddr(0x%x:0x%x) dar_pcie_mbint(0x%x:0x%x)\n",
__FUNCTION__, dar_errlog_reg, dar_errlog_val,
dar_erraddr_reg, dar_erraddr_val, dar_pcie_mbint_reg, dar_pcie_mbint_val));
}
@@ -6925,14 +6925,14 @@ dhd_bus_hostready(struct dhd_bus *bus)
return;
}
- DHD_ERROR(("%s : Read PCICMD Reg: 0x%08X\n", __FUNCTION__,
+ DHD_INFO(("%s : Read PCICMD Reg: 0x%08X\n", __FUNCTION__,
dhd_pcie_config_read(bus, PCI_CFG_CMD, sizeof(uint32))));
dhd_bus_dump_dar_registers(bus);
si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db1_addr_get(bus), ~0, 0x12345678);
bus->hostready_count ++;
- DHD_ERROR(("%s: Ring Hostready:%d\n", __FUNCTION__, bus->hostready_count));
+ DHD_INFO(("%s: Ring Hostready:%d\n", __FUNCTION__, bus->hostready_count));
}
/* Clear INTSTATUS */
@@ -10407,10 +10407,10 @@ dhdpcie_readshared(dhd_bus_t *bus)
bus->rd_shared_pass_time = OSL_LOCALTIME_NS();
elapsed = tmo.elapsed;
bus->shared_addr = (ulong)addr;
- DHD_ERROR(("### Total time ARM OOR to Readshared pass took %llu usec ###\n",
+ DHD_INFO(("### Total time ARM OOR to Readshared pass took %llu usec ###\n",
DIV_U64_BY_U32((bus->rd_shared_pass_time - bus->arm_oor_time),
NSEC_PER_USEC)));
- DHD_ERROR(("PCIe shared addr (0x%08x) read took %u usec "
+ DHD_INFO(("PCIe shared addr (0x%08x) read took %u usec "
"before dongle is ready\n", addr, elapsed));
}
@@ -10472,7 +10472,7 @@ dhdpcie_readshared(dhd_bus_t *bus)
#endif /* PCIE_INB_DW */
#if defined(PCIE_INB_DW)
- DHD_ERROR(("FW supports Inband dw ? %s\n",
+ DHD_INFO(("FW supports Inband dw ? %s\n",
d2h_inband_dw ? "Y":"N"));
#endif /* defined(PCIE_INB_DW) */
@@ -10491,7 +10491,7 @@ dhdpcie_readshared(dhd_bus_t *bus)
/* Read flag2 HWA bit */
bus->dhd->hwa_capable = (sh->flags2 & PCIE_SHARED2_HWA) ? TRUE : FALSE;
- DHD_ERROR(("FW supports HWA ? %s\n", bus->dhd->hwa_capable ? "Y":"N"));
+ DHD_INFO(("FW supports HWA ? %s\n", bus->dhd->hwa_capable ? "Y":"N"));
bus->hwa_db_index_sz = PCIE_HWA_DB_INDEX_SZ;
if (idma_en) {
@@ -10642,7 +10642,7 @@ dhdpcie_readshared(dhd_bus_t *bus)
}
DHD_INFO(("ring_info\n"));
- DHD_ERROR(("%s: max H2D queues %d\n",
+ DHD_INFO(("%s: max H2D queues %d\n",
__FUNCTION__, ltoh16(ring_info.max_tx_flowrings)));
DHD_INFO(("mail box address\n"));
@@ -10677,13 +10677,13 @@ dhdpcie_readshared(dhd_bus_t *bus)
#ifdef EWP_EDL
if (host_edl_support) {
bus->dhd->dongle_edl_support = (sh->flags2 & PCIE_SHARED2_EDL_RING) ? TRUE : FALSE;
- DHD_ERROR(("Dongle EDL support: %u\n", bus->dhd->dongle_edl_support));
+ DHD_INFO(("Dongle EDL support: %u\n", bus->dhd->dongle_edl_support));
}
#endif /* EWP_EDL */
bus->dhd->debug_buf_dest_support =
(sh->flags2 & PCIE_SHARED2_DEBUG_BUF_DEST) ? TRUE : FALSE;
- DHD_ERROR(("FW supports debug buf dest ? %s \n",
+ DHD_INFO(("FW supports debug buf dest ? %s \n",
bus->dhd->debug_buf_dest_support ? "Y" : "N"));
#ifdef DHD_DB0TS
@@ -12131,7 +12131,7 @@ dhdpcie_bus_get_pcie_dar_supported(dhd_bus_t *bus)
void
dhdpcie_bus_enab_pcie_dw(dhd_bus_t *bus, uint8 dw_option)
{
- DHD_ERROR(("ENABLING DW:%d\n", dw_option));
+ DHD_INFO(("ENABLING DW:%d\n", dw_option));
bus->dw_option = dw_option;
}
diff --git a/dhd_pcie_linux.c b/dhd_pcie_linux.c
index b4b26aa..23b71c5 100644..100755
--- a/dhd_pcie_linux.c
+++ b/dhd_pcie_linux.c
@@ -597,7 +597,7 @@ dhd_bus_aer_config(dhd_bus_t *bus)
{
uint32 val;
- DHD_ERROR(("%s: Configure AER registers for EP\n", __FUNCTION__));
+ DHD_INFO(("%s: Configure AER registers for EP\n", __FUNCTION__));
val = dhdpcie_ep_access_cap(bus, PCIE_ADVERRREP_CAPID,
PCIE_ADV_CORR_ERR_MASK_OFFSET, TRUE, FALSE, 0);
if (val != (uint32)-1) {
@@ -609,7 +609,7 @@ dhd_bus_aer_config(dhd_bus_t *bus)
__FUNCTION__, val));
}
- DHD_ERROR(("%s: Configure AER registers for RC\n", __FUNCTION__));
+ DHD_INFO(("%s: Configure AER registers for RC\n", __FUNCTION__));
val = dhdpcie_rc_access_cap(bus, PCIE_ADVERRREP_CAPID,
PCIE_ADV_CORR_ERR_MASK_OFFSET, TRUE, FALSE, 0);
if (val != (uint32)-1) {
@@ -1745,12 +1745,12 @@ void dhdpcie_dump_resource(dhd_bus_t *bus)
}
/* BAR0 */
- DHD_ERROR(("%s: BAR0(VA): 0x%pK, BAR0(PA): "PRINTF_RESOURCE", SIZE: %d\n",
+ DHD_INFO(("%s: BAR0(VA): 0x%pK, BAR0(PA): "PRINTF_RESOURCE", SIZE: %d\n",
__FUNCTION__, pch->regs, pci_resource_start(bus->dev, 0),
DONGLE_REG_MAP_SIZE));
/* BAR1 */
- DHD_ERROR(("%s: BAR1(VA): 0x%pK, BAR1(PA): "PRINTF_RESOURCE", SIZE: %d\n",
+ DHD_INFO(("%s: BAR1(VA): 0x%pK, BAR1(PA): "PRINTF_RESOURCE", SIZE: %d\n",
__FUNCTION__, pch->tcm, pci_resource_start(bus->dev, 2),
pch->bar1_size));
}
diff --git a/dhd_pktlog.c b/dhd_pktlog.c
index 3c0b85f..0ad2ac4 100644
--- a/dhd_pktlog.c
+++ b/dhd_pktlog.c
@@ -157,7 +157,7 @@ dhd_os_attach_pktlog(dhd_pub_t *dhdp)
dhd_cpkt_log_init_tt(dhdp);
#endif
- DHD_ERROR(("%s(): dhd_os_attach_pktlog attach\n", __FUNCTION__));
+ DHD_INFO(("%s(): dhd_os_attach_pktlog attach\n", __FUNCTION__));
return BCME_OK;
}
@@ -177,7 +177,7 @@ dhd_os_detach_pktlog(dhd_pub_t *dhdp)
dhd_cpkt_log_deinit_tt(dhdp);
#endif /* DHD_COMPACT_PKT_LOG */
- DHD_ERROR(("%s(): dhd_os_attach_pktlog detach\n", __FUNCTION__));
+ DHD_INFO(("%s(): dhd_os_attach_pktlog detach\n", __FUNCTION__));
MFREE(dhdp->osh, dhdp->pktlog, sizeof(dhd_pktlog_t));
@@ -225,7 +225,7 @@ dhd_pktlog_ring_init(dhd_pub_t *dhdp, int size)
ring->dhdp = dhdp;
ring->pktlog_ring_lock = osl_spin_lock_init(dhdp->osh);
- DHD_ERROR(("%s(): pktlog ring init success\n", __FUNCTION__));
+ DHD_INFO(("%s(): pktlog ring init success\n", __FUNCTION__));
return ring;
fail:
@@ -299,7 +299,7 @@ dhd_pktlog_ring_deinit(dhd_pub_t *dhdp, dhd_pktlog_ring_t *ring)
MFREE(dhdp->osh, ring, sizeof(dhd_pktlog_ring_t));
- DHD_ERROR(("%s(): pktlog ring deinit\n", __FUNCTION__));
+ DHD_INFO(("%s(): pktlog ring deinit\n", __FUNCTION__));
return ret;
}
@@ -481,7 +481,7 @@ dhd_pktlog_filter_init(int size)
filter->enable = PKTLOG_TXPKT_CASE | PKTLOG_TXSTATUS_CASE | PKTLOG_RXPKT_CASE;
- DHD_ERROR(("%s(): pktlog filter init success\n", __FUNCTION__));
+ DHD_INFO(("%s(): pktlog filter init success\n", __FUNCTION__));
return filter;
fail:
@@ -507,7 +507,7 @@ dhd_pktlog_filter_deinit(dhd_pktlog_filter_t *filter)
}
kfree(filter);
- DHD_ERROR(("%s(): pktlog filter deinit\n", __FUNCTION__));
+ DHD_INFO(("%s(): pktlog filter deinit\n", __FUNCTION__));
return ret;
}
diff --git a/dhd_pno.c b/dhd_pno.c
index b7e405b..422214f 100644
--- a/dhd_pno.c
+++ b/dhd_pno.c
@@ -4370,11 +4370,11 @@ int dhd_pno_init(dhd_pub_t *dhd)
FALSE);
if (err == BCME_UNSUPPORTED) {
_pno_state->wls_supported = FALSE;
- DHD_ERROR(("Android Location Service, UNSUPPORTED\n"));
+ DHD_INFO(("Android Location Service, UNSUPPORTED\n"));
DHD_INFO(("Current firmware doesn't support"
" Android Location Service\n"));
} else {
- DHD_ERROR(("%s: Support Android Location Service\n",
+ DHD_INFO(("%s: Support Android Location Service\n",
__FUNCTION__));
}
exit:
diff --git a/linux_pkt.c b/linux_pkt.c
index 74d8fe4..85c2798 100755
--- a/linux_pkt.c
+++ b/linux_pkt.c
@@ -75,7 +75,7 @@ int osl_static_mem_init(osl_t *osh, void *adapter)
ASSERT(osh->magic == OS_HANDLE_MAGIC);
return -ENOMEM;
} else {
- DHD_ERROR(("succeed to alloc static buf\n"));
+ DHD_INFO(("succeed to alloc static buf\n"));
}
spin_lock_init(&bcm_static_buf->static_lock);
diff --git a/wl_cfg80211.c b/wl_cfg80211.c
index dd70f22..a08374d 100644
--- a/wl_cfg80211.c
+++ b/wl_cfg80211.c
@@ -136,8 +136,8 @@ module_param(wl_reassoc_support, uint, 0660);
static struct device *cfg80211_parent_dev = NULL;
static struct bcm_cfg80211 *g_bcmcfg = NULL;
-u32 wl_dbg_level = WL_DBG_ERR | WL_DBG_P2P_ACTION | WL_DBG_INFO;
-u32 wl_dbgring_level = WL_DBG_ERR;
+u32 wl_dbg_level = WL_DBG_ERR | WL_DBG_P2P_ACTION;
+u32 wl_dbgring_level = WL_DBG_ERR | WL_DBG_P2P_ACTION | WL_DBG_INFO;
#define MAX_VIF_OFFSET 15
#define MAX_WAIT_TIME 1500
@@ -14675,8 +14675,8 @@ int wl_get_bss_info(struct bcm_cfg80211 *cfg, struct net_device *dev, struct eth
}
cfg->roam_count = 0;
- WL_ERR(("BSSID:" MACDBG " SSID %s \n", MAC2STRDBG(eabuf), "*****"));
- WL_ERR(("freq:%d, BW:%s, RSSI:%d dBm, Rate:%d Mbps, 11mode:%d, stream:%d,"
+ WL_INFORM_MEM(("BSSID:" MACDBG " SSID %s \n", MAC2STRDBG(eabuf), "*****"));
+ WL_INFORM_MEM(("freq:%d, BW:%s, RSSI:%d dBm, Rate:%d Mbps, 11mode:%d, stream:%d,"
"MU-MIMO:%d, Passpoint:%d, SNR:%d, Noise:%d, \n"
"akm:%s, roam:%s, 11kv:%d/%d \n",
freq, wf_chspec_to_bw_str(bi->chanspec),
@@ -14719,7 +14719,7 @@ int wl_get_bss_info(struct bcm_cfg80211 *cfg, struct net_device *dev, struct eth
}
#endif /* DHD_PUB_ROAM_EVT */
if (cnt_valid) {
- WL_ERR(("GET_BSS: full roam scan count:%d partial roam scan count:%d\n",
+ WL_INFORM_MEM(("GET_BSS: full roam scan count:%d partial roam scan count:%d\n",
full_cnt, partial_cnt));
snprintf(&cfg->bss_info[cfg_bss_info_len],
GET_BSS_INFO_LEN - cfg_bss_info_len, " %d %d",
@@ -14951,7 +14951,7 @@ wl_notify_connect_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
mode = wl_get_mode_by_netdev(cfg, ndev);
/* Push link events to upper layer log */
- SUPP_LOG(("[%s] Mode:%d event:%d status:0x%x reason:%d\n",
+ WL_INFORM_MEM(("[%s] Mode:%d event:%d status:0x%x reason:%d\n",
ndev->name, mode, ntoh32(e->event_type),
ntoh32(e->status), ntoh32(e->reason)));
if (mode == WL_MODE_AP) {
@@ -15086,7 +15086,7 @@ wl_notify_connect_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
ndev->ieee80211_ptr)) < 0) {
WL_ERR(("Find index failed\n"));
} else {
- WL_ERR(("link down--clearing disconnect IEs\n"));
+ WL_INFORM_MEM(("link down--clearing disconnect IEs\n"));
if ((err = wl_cfg80211_set_mgmt_vndr_ies(cfg,
ndev_to_cfgdev(ndev), bssidx, VNDR_IE_DISASSOC_FLAG,
NULL, 0)) != BCME_OK) {
@@ -15566,7 +15566,7 @@ wl_handle_roam_exp_event(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
wdev = ndev->ieee80211_ptr;
wdev->ssid_len = min(ssid->SSID_len, (uint32)DOT11_MAX_SSID_LEN);
memcpy(wdev->ssid, ssid->SSID, wdev->ssid_len);
- WL_ERR(("SSID is %s\n", ssid->SSID));
+ WL_INFORM_MEM(("SSID is %s\n", ssid->SSID));
wl_update_prof(cfg, ndev, NULL, ssid, WL_PROF_SSID);
} else {
WL_ERR(("NULL ndev!\n"));
@@ -15766,7 +15766,7 @@ wl_check_pmstatus(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
}
rssi = wl_rssi_offset(dtoh32(scb_val.val));
}
- WL_ERR(("RSSI %d dBm\n", rssi));
+ WL_INFORM_MEM(("RSSI %d dBm\n", rssi));
if (rssi > DPM_UPD_LMT_RSSI) {
return err;
}
@@ -15791,7 +15791,7 @@ wl_check_pmstatus(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
WL_ERR(("Disassoc error %d\n", err));
return err;
}
- WL_ERR(("Force Disassoc due to updated DPM event.\n"));
+ WL_INFORM_MEM(("Force Disassoc due to updated DPM event.\n"));
last_dpm_upd_time = 0;
} else {
@@ -16040,7 +16040,7 @@ static s32 wl_update_bss_info(struct bcm_cfg80211 *cfg, struct net_device *ndev,
wl_update_prof(cfg, ndev, NULL, &chspec, WL_PROF_CHAN);
if (!bss) {
- WL_DBG(("Could not find the AP\n"));
+ WL_INFORM_MEM(("Could not find the AP\n"));
if (memcmp(bi->BSSID.octet, curbssid, ETHER_ADDR_LEN)) {
WL_ERR(("Bssid doesn't match\n"));
err = -EIO;
@@ -16055,7 +16055,7 @@ static s32 wl_update_bss_info(struct bcm_cfg80211 *cfg, struct net_device *ndev,
beacon_interval = cpu_to_le16(bi->beacon_period);
} else {
u16 channel;
- WL_DBG(("Found the AP in the list - BSSID %pM\n", bss->bssid));
+ WL_INFORM_MEM(("Found the AP in the list - BSSID %pM\n", bss->bssid));
channel = wf_chspec_ctlchan(wl_chspec_driver_to_host(bi->chanspec));
freq = wl_channel_to_frequency(channel, CHSPEC_BAND(bi->chanspec));
bss->channel = ieee80211_get_channel(wiphy, freq);
@@ -16218,7 +16218,7 @@ wl_bss_roaming_done(struct bcm_cfg80211 *cfg, struct net_device *ndev,
#endif /* CUSTOM_LONG_RETRY_LIMIT */
DHD_STATLOG_CTRL(dhdp, ST(REASSOC_INFORM),
dhd_net2idx(dhdp->info, ndev), 0);
- WL_ERR(("Report roam event to upper layer. " MACDBG " (ch:%d)\n",
+ WL_INFORM_MEM(("Report roam event to upper layer. " MACDBG " (ch:%d)\n",
MAC2STRDBG((const u8*)(&e->addr)), CHSPEC_CHANNEL(*chanspec)));
#if (defined(CONFIG_ARCH_MSM) && defined(CFG80211_ROAMED_API_UNIFIED)) || \
@@ -16736,7 +16736,7 @@ wl_gon_req_collision(struct bcm_cfg80211 *cfg, wl_action_frame_t *tx_act_frm,
}
}
- WL_ERR((" GO NEGO Request COLLISION !!! \n"));
+ WL_INFORM_MEM((" GO NEGO Request COLLISION !!! \n"));
/* if sa(peer) addr is less than da(my) addr,
* my device will process peer's gon request and block to send my gon req.
@@ -16747,7 +16747,7 @@ wl_gon_req_collision(struct bcm_cfg80211 *cfg, wl_action_frame_t *tx_act_frm,
if (memcmp(sa.octet, da.octet, ETHER_ADDR_LEN) < 0) {
/* block to send tx gon request */
cfg->block_gon_req_tx_count = BLOCK_GON_REQ_MAX_NUM;
- WL_ERR((" block to send gon req tx !!!\n"));
+ WL_INFORM_MEM((" block to send gon req tx !!!\n"));
/* if we are finding a common channel for sending af,
* do not scan more to block to send current gon req
@@ -17134,7 +17134,7 @@ wl_notify_rx_mgmt_frame(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
if (wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM)) {
if (cfg->next_af_subtype == act_frm->subtype) {
- WL_DBG(("We got a right next frame!(%d)\n",
+ WL_INFORM_MEM(("We got a right next frame!(%d)\n",
act_frm->subtype));
wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM, ndev);
@@ -20000,7 +20000,7 @@ s32 wl_cfg80211_up(struct net_device *net)
if (init_roam_cache(cfg, ioctl_version) == 0) {
/* Enable support for Roam cache */
cfg->rcc_enabled = true;
- WL_ERR(("Roam channel cache enabled\n"));
+ WL_INFORM_MEM(("Roam channel cache enabled\n"));
} else {
WL_ERR(("Failed to enable RCC.\n"));
}
diff --git a/wl_cfgscan.c b/wl_cfgscan.c
index 46423dc..341c3a5 100644..100755
--- a/wl_cfgscan.c
+++ b/wl_cfgscan.c
@@ -477,7 +477,7 @@ wl_inform_bss(struct bcm_cfg80211 *cfg)
s32 i;
bss_list = cfg->bss_list;
- WL_MEM(("scanned AP count (%d)\n", bss_list->count));
+ WL_INFORM_MEM(("scanned AP count (%d)\n", bss_list->count));
#ifdef ESCAN_CHANNEL_CACHE
reset_roam_cache(cfg);
#endif /* ESCAN_CHANNEL_CACHE */
@@ -1077,7 +1077,7 @@ wl_escan_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
DBG_EVENT_LOG((dhd_pub_t *)cfg->pub, WIFI_EVENT_DRIVER_SCAN_COMPLETE);
cfg->bss_list = wl_escan_get_buf(cfg, FALSE);
if (!scan_req_match(cfg)) {
- WL_TRACE_HW4(("SCAN COMPLETED: scanned AP count=%d\n",
+ WL_INFORM_MEM(("SCAN COMPLETED: scanned AP count=%d\n",
cfg->bss_list->count));
}
wl_inform_bss(cfg);
@@ -1117,7 +1117,7 @@ wl_escan_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
cfg->bss_list = wl_escan_get_buf(cfg, TRUE);
if (!scan_req_match(cfg)) {
- WL_TRACE_HW4(("SCAN ABORTED: scanned AP count=%d\n",
+ WL_INFORM_MEM(("SCAN ABORTED: scanned AP count=%d\n",
cfg->bss_list->count));
}
#ifdef DUAL_ESCAN_RESULT_BUFFER
@@ -1166,7 +1166,7 @@ wl_escan_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
} else if ((likely(cfg->scan_request)) || (cfg->sched_scan_running)) {
cfg->bss_list = wl_escan_get_buf(cfg, TRUE);
if (!scan_req_match(cfg)) {
- WL_TRACE_HW4(("SCAN ABORTED(UNEXPECTED): "
+ WL_INFORM_MEM(("SCAN ABORTED(UNEXPECTED): "
"scanned AP count=%d\n",
cfg->bss_list->count));
}
@@ -2060,7 +2060,7 @@ wl_get_scan_timeout_val(struct bcm_cfg80211 *cfg)
scan_timer_interval_ms += WL_SCAN_TIMER_INTERVAL_MS_6G;
}
#endif /* WL_6G_BAND */
- WL_MEM(("scan_timer_interval_ms %d\n", scan_timer_interval_ms));
+ WL_INFORM_MEM(("scan_timer_interval_ms %d\n", scan_timer_interval_ms));
return scan_timer_interval_ms;
}
@@ -2493,7 +2493,7 @@ wl_get_scan_wdev(struct bcm_cfg80211 *cfg)
if (!cfg->scan_request && !cfg->sched_scan_req) {
/* No scans in progress */
- WL_MEM(("no scan in progress \n"));
+ WL_ERR(("no scan in progress \n"));
return NULL;
}
@@ -2504,7 +2504,7 @@ wl_get_scan_wdev(struct bcm_cfg80211 *cfg)
wdev = GET_SCHED_SCAN_WDEV(cfg->sched_scan_req);
#endif /* WL_SCHED_SCAN */
} else {
- WL_MEM(("no scan in progress \n"));
+ WL_INFORM_MEM(("no scan in progress \n"));
}
return wdev;
@@ -3348,7 +3348,7 @@ int wl_cfg80211_scan_mac_config(struct net_device *dev, uint8 *rand_mac, uint8 *
/* Disable scan mac for clean-up */
return err;
}
- WL_INFORM_MEM(("scanmac configured"));
+ WL_INFORM_MEM(("scanmac configured\n"));
cfg->scanmac_config = true;
return err;
diff --git a/wl_cfgvendor.c b/wl_cfgvendor.c
index 3fdd249..d027fed 100644
--- a/wl_cfgvendor.c
+++ b/wl_cfgvendor.c
@@ -1327,7 +1327,7 @@ wl_cfgvendor_get_wake_reason_stats(struct wiphy *wiphy,
struct sk_buff *skb = NULL;
dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
- WL_DBG(("Recv get wake status info cmd.\n"));
+ WL_INFORM_MEM(("Recv get wake status info cmd.\n"));
pwake_count_info = dhd_get_wakecount(dhdp);
mem_needed = VENDOR_REPLY_OVERHEAD + (ATTRIBUTE_U32_LEN * 20) +
@@ -1340,7 +1340,7 @@ wl_cfgvendor_get_wake_reason_stats(struct wiphy *wiphy,
goto exit;
}
#ifdef DHD_WAKE_EVENT_STATUS
- WL_ERR(("pwake_count_info->rcwake %d\n", pwake_count_info->rcwake));
+ WL_INFORM_MEM(("pwake_count_info->rcwake %d\n", pwake_count_info->rcwake));
ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_TOTAL_DRIVER_FW, 0);
if (unlikely(ret)) {
@@ -1381,14 +1381,14 @@ wl_cfgvendor_get_wake_reason_stats(struct wiphy *wiphy,
#ifdef DHD_DEBUG
for (flowid = 0; flowid < MaxWakeReasonStats; flowid++) {
if (pwake_count_info->rc_event[flowid] != -1) {
- WL_INFORM(("Event ID %u = %s\n", pwake_count_info->rc_event[flowid],
+ WL_INFORM_MEM(("Event ID %u = %s\n", pwake_count_info->rc_event[flowid],
bcmevent_get_name(pwake_count_info->rc_event[flowid])));
}
}
#endif /* DHD_DEBUG */
#endif /* DHD_WAKE_EVENT_STATUS */
#ifdef DHD_WAKE_RX_STATUS
- WL_ERR(("pwake_count_info->rxwake %d\n", pwake_count_info->rxwake));
+ WL_INFORM_MEM(("pwake_count_info->rxwake %d\n", pwake_count_info->rxwake));
ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_TOTAL_RX_DATA_WAKE, pwake_count_info->rxwake);
if (unlikely(ret)) {
WL_ERR(("Failed to put Total Wake due RX data, ret=%d\n", ret));