summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVictor Liu <victorliu@google.com>2024-01-23 11:58:05 -0800
committerCherrypicker Worker QA <android-build-cherrypicker-worker@system.gserviceaccount.com>2024-01-26 16:32:09 +0000
commit72ee4af70ece3dfe63bc34868f9b3da0732c2222 (patch)
tree89ee50561dedb85829181eedeb90fbdbf66fdd1e
parent6162b0f7c9915a752c26ba1c8c4d53e78d596b64 (diff)
downloadqm35-72ee4af70ece3dfe63bc34868f9b3da0732c2222.tar.gz
merge partner-android/kernel/private/google-modules/uwb:android14-gs-pixel-5.15-24Q2 @ 3b388c3ee1724ad824be65dfb33b3007c021e386
Bug: 316022384 Signed-off-by: Victor Liu <victorliu@google.com> (cherry picked from https://partner-android-review.googlesource.com/q/commit:f8dc466bc14f66e25233bf94cbc84497d41b106b) Merged-In: Id1442f3768d3624f83f3cc6f76042f04826df623 Change-Id: Id1442f3768d3624f83f3cc6f76042f04826df623
-rw-r--r--.clang-format531
-rw-r--r--.pre-commit-config.yaml32
-rw-r--r--BUILD.bazel20
-rw-r--r--Kbuild23
-rw-r--r--Makefile12
-rw-r--r--debug.c513
-rw-r--r--debug.h90
-rw-r--r--debug_qmrom.c341
-rw-r--r--hsspi.c745
-rw-r--r--hsspi.h328
-rw-r--r--hsspi_coredump.c344
-rw-r--r--hsspi_coredump.h55
-rw-r--r--hsspi_log.c464
-rw-r--r--hsspi_log.h70
-rw-r--r--hsspi_test.c177
-rw-r--r--hsspi_test.h39
-rw-r--r--hsspi_uci.c233
-rw-r--r--hsspi_uci.h129
-rw-r--r--libqmrom/CMakeLists.txt13
-rw-r--r--libqmrom/README.md0
-rw-r--r--libqmrom/include/byteswap.h10
-rw-r--r--libqmrom/include/qmrom.h171
-rw-r--r--libqmrom/include/qmrom_error.h21
-rw-r--r--libqmrom/include/qmrom_log.h94
-rw-r--r--libqmrom/include/qmrom_spi.h59
-rw-r--r--libqmrom/include/qmrom_utils.h51
-rw-r--r--libqmrom/include/spi_rom_protocol.h92
-rw-r--r--libqmrom/src/qmrom_a0.c240
-rw-r--r--libqmrom/src/qmrom_b0.c407
-rw-r--r--libqmrom/src/qmrom_c0.c510
-rw-r--r--libqmrom/src/qmrom_common.c429
-rw-r--r--libqmrom/src/qmrom_log.c78
-rw-r--r--qm35-spi.c947
-rw-r--r--qm35-trace.c32
-rw-r--r--qm35-trace.h129
-rw-r--r--qm35.h103
-rw-r--r--qm35_rb.c230
-rw-r--r--qm35_rb.h55
-rw-r--r--qmrom_spi.c158
-rw-r--r--uci_ioctls.h26
40 files changed, 8001 insertions, 0 deletions
diff --git a/.clang-format b/.clang-format
new file mode 100644
index 0000000..2d1a565
--- /dev/null
+++ b/.clang-format
@@ -0,0 +1,531 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# clang-format configuration file. Intended for clang-format >= 4.
+#
+# For more information, see:
+#
+# Documentation/process/clang-format.rst
+# https://clang.llvm.org/docs/ClangFormat.html
+# https://clang.llvm.org/docs/ClangFormatStyleOptions.html
+#
+---
+AccessModifierOffset: -4
+AlignAfterOpenBracket: Align
+AlignConsecutiveAssignments: false
+AlignConsecutiveDeclarations: false
+AlignEscapedNewlines: Left # Unknown to clang-format-4.0
+AlignOperands: true
+AlignTrailingComments: false
+AllowAllParametersOfDeclarationOnNextLine: false
+AllowShortBlocksOnASingleLine: false
+AllowShortCaseLabelsOnASingleLine: false
+AllowShortFunctionsOnASingleLine: None
+AllowShortIfStatementsOnASingleLine: false
+AllowShortLoopsOnASingleLine: false
+AlwaysBreakAfterDefinitionReturnType: None
+AlwaysBreakAfterReturnType: None
+AlwaysBreakBeforeMultilineStrings: false
+AlwaysBreakTemplateDeclarations: false
+BinPackArguments: true
+BinPackParameters: true
+BraceWrapping:
+ AfterClass: false
+ AfterControlStatement: false
+ AfterEnum: false
+ AfterFunction: true
+ AfterNamespace: true
+ AfterObjCDeclaration: false
+ AfterStruct: false
+ AfterUnion: false
+ AfterExternBlock: false # Unknown to clang-format-5.0
+ BeforeCatch: false
+ BeforeElse: false
+ IndentBraces: false
+ SplitEmptyFunction: true # Unknown to clang-format-4.0
+ SplitEmptyRecord: true # Unknown to clang-format-4.0
+ SplitEmptyNamespace: true # Unknown to clang-format-4.0
+BreakBeforeBinaryOperators: None
+BreakBeforeBraces: Custom
+BreakBeforeInheritanceComma: false # Unknown to clang-format-4.0
+BreakBeforeTernaryOperators: false
+BreakConstructorInitializersBeforeComma: false
+BreakConstructorInitializers: BeforeComma # Unknown to clang-format-4.0
+BreakAfterJavaFieldAnnotations: false
+BreakStringLiterals: false
+ColumnLimit: 80
+CommentPragmas: '^ IWYU pragma:'
+CompactNamespaces: false # Unknown to clang-format-4.0
+ConstructorInitializerAllOnOneLineOrOnePerLine: false
+ConstructorInitializerIndentWidth: 8
+ContinuationIndentWidth: 8
+Cpp11BracedListStyle: false
+DerivePointerAlignment: false
+DisableFormat: false
+ExperimentalAutoDetectBinPacking: false
+FixNamespaceComments: false # Unknown to clang-format-4.0
+
+# Taken from:
+# git grep -h '^#define [^[:space:]]*for_each[^[:space:]]*(' include/ \
+# | sed "s,^#define \([^[:space:]]*for_each[^[:space:]]*\)(.*$, - '\1'," \
+# | sort | uniq
+ForEachMacros:
+ - 'apei_estatus_for_each_section'
+ - 'ata_for_each_dev'
+ - 'ata_for_each_link'
+ - '__ata_qc_for_each'
+ - 'ata_qc_for_each'
+ - 'ata_qc_for_each_raw'
+ - 'ata_qc_for_each_with_internal'
+ - 'ax25_for_each'
+ - 'ax25_uid_for_each'
+ - '__bio_for_each_bvec'
+ - 'bio_for_each_bvec'
+ - 'bio_for_each_integrity_vec'
+ - '__bio_for_each_segment'
+ - 'bio_for_each_segment'
+ - 'bio_for_each_segment_all'
+ - 'bio_list_for_each'
+ - 'bip_for_each_vec'
+ - 'bitmap_for_each_clear_region'
+ - 'bitmap_for_each_set_region'
+ - 'blkg_for_each_descendant_post'
+ - 'blkg_for_each_descendant_pre'
+ - 'blk_queue_for_each_rl'
+ - 'bond_for_each_slave'
+ - 'bond_for_each_slave_rcu'
+ - 'bpf_for_each_spilled_reg'
+ - 'btree_for_each_safe128'
+ - 'btree_for_each_safe32'
+ - 'btree_for_each_safe64'
+ - 'btree_for_each_safel'
+ - 'card_for_each_dev'
+ - 'cgroup_taskset_for_each'
+ - 'cgroup_taskset_for_each_leader'
+ - 'cpufreq_for_each_entry'
+ - 'cpufreq_for_each_entry_idx'
+ - 'cpufreq_for_each_valid_entry'
+ - 'cpufreq_for_each_valid_entry_idx'
+ - 'css_for_each_child'
+ - 'css_for_each_descendant_post'
+ - 'css_for_each_descendant_pre'
+ - 'device_for_each_child_node'
+ - 'dma_fence_chain_for_each'
+ - 'drm_atomic_crtc_for_each_plane'
+ - 'drm_atomic_crtc_state_for_each_plane'
+ - 'drm_atomic_crtc_state_for_each_plane_state'
+ - 'drm_atomic_for_each_plane_damage'
+ - 'drm_client_for_each_connector_iter'
+ - 'drm_client_for_each_modeset'
+ - 'drm_connector_for_each_possible_encoder'
+ - 'drm_for_each_bridge_in_chain'
+ - 'drm_for_each_connector_iter'
+ - 'drm_for_each_crtc'
+ - 'drm_for_each_encoder'
+ - 'drm_for_each_encoder_mask'
+ - 'drm_for_each_fb'
+ - 'drm_for_each_legacy_plane'
+ - 'drm_for_each_plane'
+ - 'drm_for_each_plane_mask'
+ - 'drm_for_each_privobj'
+ - 'drm_mm_for_each_hole'
+ - 'drm_mm_for_each_node'
+ - 'drm_mm_for_each_node_in_range'
+ - 'drm_mm_for_each_node_safe'
+ - 'flow_action_for_each'
+ - 'for_each_active_dev_scope'
+ - 'for_each_active_drhd_unit'
+ - 'for_each_active_iommu'
+ - 'for_each_available_child_of_node'
+ - 'for_each_bio'
+ - 'for_each_board_func_rsrc'
+ - 'for_each_bvec'
+ - 'for_each_card_auxs'
+ - 'for_each_card_auxs_safe'
+ - 'for_each_card_components'
+ - 'for_each_card_dapms'
+ - 'for_each_card_pre_auxs'
+ - 'for_each_card_prelinks'
+ - 'for_each_card_rtds'
+ - 'for_each_card_rtds_safe'
+ - 'for_each_card_widgets'
+ - 'for_each_card_widgets_safe'
+ - 'for_each_cgroup_storage_type'
+ - 'for_each_child_of_node'
+ - 'for_each_clear_bit'
+ - 'for_each_clear_bit_from'
+ - 'for_each_cmsghdr'
+ - 'for_each_compatible_node'
+ - 'for_each_component_dais'
+ - 'for_each_component_dais_safe'
+ - 'for_each_comp_order'
+ - 'for_each_console'
+ - 'for_each_cpu'
+ - 'for_each_cpu_and'
+ - 'for_each_cpu_not'
+ - 'for_each_cpu_wrap'
+ - 'for_each_dapm_widgets'
+ - 'for_each_dev_addr'
+ - 'for_each_dev_scope'
+ - 'for_each_displayid_db'
+ - 'for_each_dma_cap_mask'
+ - 'for_each_dpcm_be'
+ - 'for_each_dpcm_be_rollback'
+ - 'for_each_dpcm_be_safe'
+ - 'for_each_dpcm_fe'
+ - 'for_each_drhd_unit'
+ - 'for_each_dss_dev'
+ - 'for_each_efi_memory_desc'
+ - 'for_each_efi_memory_desc_in_map'
+ - 'for_each_element'
+ - 'for_each_element_extid'
+ - 'for_each_element_id'
+ - 'for_each_endpoint_of_node'
+ - 'for_each_evictable_lru'
+ - 'for_each_fib6_node_rt_rcu'
+ - 'for_each_fib6_walker_rt'
+ - 'for_each_free_mem_pfn_range_in_zone'
+ - 'for_each_free_mem_pfn_range_in_zone_from'
+ - 'for_each_free_mem_range'
+ - 'for_each_free_mem_range_reverse'
+ - 'for_each_func_rsrc'
+ - 'for_each_hstate'
+ - 'for_each_if'
+ - 'for_each_iommu'
+ - 'for_each_ip_tunnel_rcu'
+ - 'for_each_irq_nr'
+ - 'for_each_link_codecs'
+ - 'for_each_link_cpus'
+ - 'for_each_link_platforms'
+ - 'for_each_lru'
+ - 'for_each_matching_node'
+ - 'for_each_matching_node_and_match'
+ - 'for_each_member'
+ - 'for_each_memblock'
+ - 'for_each_memblock_type'
+ - 'for_each_memcg_cache_index'
+ - 'for_each_mem_pfn_range'
+ - 'for_each_mem_range'
+ - 'for_each_mem_range_rev'
+ - 'for_each_migratetype_order'
+ - 'for_each_msi_entry'
+ - 'for_each_msi_entry_safe'
+ - 'for_each_net'
+ - 'for_each_net_continue_reverse'
+ - 'for_each_netdev'
+ - 'for_each_netdev_continue'
+ - 'for_each_netdev_continue_rcu'
+ - 'for_each_netdev_continue_reverse'
+ - 'for_each_netdev_feature'
+ - 'for_each_netdev_in_bond_rcu'
+ - 'for_each_netdev_rcu'
+ - 'for_each_netdev_reverse'
+ - 'for_each_netdev_safe'
+ - 'for_each_net_rcu'
+ - 'for_each_new_connector_in_state'
+ - 'for_each_new_crtc_in_state'
+ - 'for_each_new_mst_mgr_in_state'
+ - 'for_each_new_plane_in_state'
+ - 'for_each_new_private_obj_in_state'
+ - 'for_each_node'
+ - 'for_each_node_by_name'
+ - 'for_each_node_by_type'
+ - 'for_each_node_mask'
+ - 'for_each_node_state'
+ - 'for_each_node_with_cpus'
+ - 'for_each_node_with_property'
+ - 'for_each_of_allnodes'
+ - 'for_each_of_allnodes_from'
+ - 'for_each_of_cpu_node'
+ - 'for_each_of_pci_range'
+ - 'for_each_old_connector_in_state'
+ - 'for_each_old_crtc_in_state'
+ - 'for_each_old_mst_mgr_in_state'
+ - 'for_each_oldnew_connector_in_state'
+ - 'for_each_oldnew_crtc_in_state'
+ - 'for_each_oldnew_mst_mgr_in_state'
+ - 'for_each_oldnew_plane_in_state'
+ - 'for_each_oldnew_plane_in_state_reverse'
+ - 'for_each_oldnew_private_obj_in_state'
+ - 'for_each_old_plane_in_state'
+ - 'for_each_old_private_obj_in_state'
+ - 'for_each_online_cpu'
+ - 'for_each_online_node'
+ - 'for_each_online_pgdat'
+ - 'for_each_pci_bridge'
+ - 'for_each_pci_dev'
+ - 'for_each_pci_msi_entry'
+ - 'for_each_pcm_streams'
+ - 'for_each_populated_zone'
+ - 'for_each_possible_cpu'
+ - 'for_each_present_cpu'
+ - 'for_each_prime_number'
+ - 'for_each_prime_number_from'
+ - 'for_each_process'
+ - 'for_each_process_thread'
+ - 'for_each_property_of_node'
+ - 'for_each_registered_fb'
+ - 'for_each_reserved_mem_region'
+ - 'for_each_rtd_codec_dais'
+ - 'for_each_rtd_codec_dais_rollback'
+ - 'for_each_rtd_components'
+ - 'for_each_rtd_cpu_dais'
+ - 'for_each_rtd_cpu_dais_rollback'
+ - 'for_each_rtd_dais'
+ - 'for_each_set_bit'
+ - 'for_each_set_bit_from'
+ - 'for_each_set_clump8'
+ - 'for_each_sg'
+ - 'for_each_sg_dma_page'
+ - 'for_each_sg_page'
+ - 'for_each_sibling_event'
+ - 'for_each_subelement'
+ - 'for_each_subelement_extid'
+ - 'for_each_subelement_id'
+ - '__for_each_thread'
+ - 'for_each_thread'
+ - 'for_each_wakeup_source'
+ - 'for_each_zone'
+ - 'for_each_zone_zonelist'
+ - 'for_each_zone_zonelist_nodemask'
+ - 'fwnode_for_each_available_child_node'
+ - 'fwnode_for_each_child_node'
+ - 'fwnode_graph_for_each_endpoint'
+ - 'gadget_for_each_ep'
+ - 'genradix_for_each'
+ - 'genradix_for_each_from'
+ - 'hash_for_each'
+ - 'hash_for_each_possible'
+ - 'hash_for_each_possible_rcu'
+ - 'hash_for_each_possible_rcu_notrace'
+ - 'hash_for_each_possible_safe'
+ - 'hash_for_each_rcu'
+ - 'hash_for_each_safe'
+ - 'hctx_for_each_ctx'
+ - 'hlist_bl_for_each_entry'
+ - 'hlist_bl_for_each_entry_rcu'
+ - 'hlist_bl_for_each_entry_safe'
+ - 'hlist_for_each'
+ - 'hlist_for_each_entry'
+ - 'hlist_for_each_entry_continue'
+ - 'hlist_for_each_entry_continue_rcu'
+ - 'hlist_for_each_entry_continue_rcu_bh'
+ - 'hlist_for_each_entry_from'
+ - 'hlist_for_each_entry_from_rcu'
+ - 'hlist_for_each_entry_rcu'
+ - 'hlist_for_each_entry_rcu_bh'
+ - 'hlist_for_each_entry_rcu_notrace'
+ - 'hlist_for_each_entry_safe'
+ - '__hlist_for_each_rcu'
+ - 'hlist_for_each_safe'
+ - 'hlist_nulls_for_each_entry'
+ - 'hlist_nulls_for_each_entry_from'
+ - 'hlist_nulls_for_each_entry_rcu'
+ - 'hlist_nulls_for_each_entry_safe'
+ - 'i3c_bus_for_each_i2cdev'
+ - 'i3c_bus_for_each_i3cdev'
+ - 'ide_host_for_each_port'
+ - 'ide_port_for_each_dev'
+ - 'ide_port_for_each_present_dev'
+ - 'idr_for_each_entry'
+ - 'idr_for_each_entry_continue'
+ - 'idr_for_each_entry_continue_ul'
+ - 'idr_for_each_entry_ul'
+ - 'in_dev_for_each_ifa_rcu'
+ - 'in_dev_for_each_ifa_rtnl'
+ - 'inet_bind_bucket_for_each'
+ - 'inet_lhash2_for_each_icsk_rcu'
+ - 'key_for_each'
+ - 'key_for_each_safe'
+ - 'klp_for_each_func'
+ - 'klp_for_each_func_safe'
+ - 'klp_for_each_func_static'
+ - 'klp_for_each_object'
+ - 'klp_for_each_object_safe'
+ - 'klp_for_each_object_static'
+ - 'kunit_suite_for_each_test_case'
+ - 'kvm_for_each_memslot'
+ - 'kvm_for_each_vcpu'
+ - 'list_for_each'
+ - 'list_for_each_codec'
+ - 'list_for_each_codec_safe'
+ - 'list_for_each_continue'
+ - 'list_for_each_entry'
+ - 'list_for_each_entry_continue'
+ - 'list_for_each_entry_continue_rcu'
+ - 'list_for_each_entry_continue_reverse'
+ - 'list_for_each_entry_from'
+ - 'list_for_each_entry_from_rcu'
+ - 'list_for_each_entry_from_reverse'
+ - 'list_for_each_entry_lockless'
+ - 'list_for_each_entry_rcu'
+ - 'list_for_each_entry_reverse'
+ - 'list_for_each_entry_safe'
+ - 'list_for_each_entry_safe_continue'
+ - 'list_for_each_entry_safe_from'
+ - 'list_for_each_entry_safe_reverse'
+ - 'list_for_each_prev'
+ - 'list_for_each_prev_safe'
+ - 'list_for_each_safe'
+ - 'llist_for_each'
+ - 'llist_for_each_entry'
+ - 'llist_for_each_entry_safe'
+ - 'llist_for_each_safe'
+ - 'mci_for_each_dimm'
+ - 'media_device_for_each_entity'
+ - 'media_device_for_each_intf'
+ - 'media_device_for_each_link'
+ - 'media_device_for_each_pad'
+ - 'nanddev_io_for_each_page'
+ - 'netdev_for_each_lower_dev'
+ - 'netdev_for_each_lower_private'
+ - 'netdev_for_each_lower_private_rcu'
+ - 'netdev_for_each_mc_addr'
+ - 'netdev_for_each_uc_addr'
+ - 'netdev_for_each_upper_dev_rcu'
+ - 'netdev_hw_addr_list_for_each'
+ - 'nft_rule_for_each_expr'
+ - 'nla_for_each_attr'
+ - 'nla_for_each_nested'
+ - 'nlmsg_for_each_attr'
+ - 'nlmsg_for_each_msg'
+ - 'nr_neigh_for_each'
+ - 'nr_neigh_for_each_safe'
+ - 'nr_node_for_each'
+ - 'nr_node_for_each_safe'
+ - 'of_for_each_phandle'
+ - 'of_property_for_each_string'
+ - 'of_property_for_each_u32'
+ - 'pci_bus_for_each_resource'
+ - 'pcm_for_each_format'
+ - 'ping_portaddr_for_each_entry'
+ - 'plist_for_each'
+ - 'plist_for_each_continue'
+ - 'plist_for_each_entry'
+ - 'plist_for_each_entry_continue'
+ - 'plist_for_each_entry_safe'
+ - 'plist_for_each_safe'
+ - 'pnp_for_each_card'
+ - 'pnp_for_each_dev'
+ - 'protocol_for_each_card'
+ - 'protocol_for_each_dev'
+ - 'queue_for_each_hw_ctx'
+ - 'radix_tree_for_each_slot'
+ - 'radix_tree_for_each_tagged'
+ - 'rbtree_postorder_for_each_entry_safe'
+ - 'rdma_for_each_block'
+ - 'rdma_for_each_port'
+ - 'resource_list_for_each_entry'
+ - 'resource_list_for_each_entry_safe'
+ - 'rhl_for_each_entry_rcu'
+ - 'rhl_for_each_rcu'
+ - 'rht_for_each'
+ - 'rht_for_each_entry'
+ - 'rht_for_each_entry_from'
+ - 'rht_for_each_entry_rcu'
+ - 'rht_for_each_entry_rcu_from'
+ - 'rht_for_each_entry_safe'
+ - 'rht_for_each_from'
+ - 'rht_for_each_rcu'
+ - 'rht_for_each_rcu_from'
+ - '__rq_for_each_bio'
+ - 'rq_for_each_bvec'
+ - 'rq_for_each_segment'
+ - 'scsi_for_each_prot_sg'
+ - 'scsi_for_each_sg'
+ - 'sctp_for_each_hentry'
+ - 'sctp_skb_for_each'
+ - 'shdma_for_each_chan'
+ - '__shost_for_each_device'
+ - 'shost_for_each_device'
+ - 'sk_for_each'
+ - 'sk_for_each_bound'
+ - 'sk_for_each_entry_offset_rcu'
+ - 'sk_for_each_from'
+ - 'sk_for_each_rcu'
+ - 'sk_for_each_safe'
+ - 'sk_nulls_for_each'
+ - 'sk_nulls_for_each_from'
+ - 'sk_nulls_for_each_rcu'
+ - 'snd_array_for_each'
+ - 'snd_pcm_group_for_each_entry'
+ - 'snd_soc_dapm_widget_for_each_path'
+ - 'snd_soc_dapm_widget_for_each_path_safe'
+ - 'snd_soc_dapm_widget_for_each_sink_path'
+ - 'snd_soc_dapm_widget_for_each_source_path'
+ - 'tb_property_for_each'
+ - 'tcf_exts_for_each_action'
+ - 'udp_portaddr_for_each_entry'
+ - 'udp_portaddr_for_each_entry_rcu'
+ - 'usb_hub_for_each_child'
+ - 'v4l2_device_for_each_subdev'
+ - 'v4l2_m2m_for_each_dst_buf'
+ - 'v4l2_m2m_for_each_dst_buf_safe'
+ - 'v4l2_m2m_for_each_src_buf'
+ - 'v4l2_m2m_for_each_src_buf_safe'
+ - 'virtio_device_for_each_vq'
+ - 'xa_for_each'
+ - 'xa_for_each_marked'
+ - 'xa_for_each_range'
+ - 'xa_for_each_start'
+ - 'xas_for_each'
+ - 'xas_for_each_conflict'
+ - 'xas_for_each_marked'
+ - 'xbc_array_for_each_value'
+ - 'xbc_for_each_key_value'
+ - 'xbc_node_for_each_array_value'
+ - 'xbc_node_for_each_child'
+ - 'xbc_node_for_each_key_value'
+ - 'zorro_for_each_dev'
+
+IncludeBlocks: Preserve # Unknown to clang-format-5.0
+IncludeCategories:
+ - Regex: '.*'
+ Priority: 1
+IncludeIsMainRegex: '(Test)?$'
+IndentCaseLabels: false
+IndentPPDirectives: None # Unknown to clang-format-5.0
+IndentWidth: 8
+IndentWrappedFunctionNames: false
+JavaScriptQuotes: Leave
+JavaScriptWrapImports: true
+KeepEmptyLinesAtTheStartOfBlocks: false
+MacroBlockBegin: ''
+MacroBlockEnd: ''
+MaxEmptyLinesToKeep: 1
+NamespaceIndentation: None
+ObjCBinPackProtocolList: Auto # Unknown to clang-format-5.0
+ObjCBlockIndentWidth: 8
+ObjCSpaceAfterProperty: true
+ObjCSpaceBeforeProtocolList: true
+
+# Taken from git's rules
+PenaltyBreakAssignment: 10 # Unknown to clang-format-4.0
+PenaltyBreakBeforeFirstCallParameter: 30
+PenaltyBreakComment: 10
+PenaltyBreakFirstLessLess: 0
+PenaltyBreakString: 10
+PenaltyExcessCharacter: 100
+PenaltyReturnTypeOnItsOwnLine: 60
+
+PointerAlignment: Right
+ReflowComments: false
+SortIncludes: false
+SortUsingDeclarations: false # Unknown to clang-format-4.0
+SpaceAfterCStyleCast: false
+SpaceAfterTemplateKeyword: true
+SpaceBeforeAssignmentOperators: true
+SpaceBeforeCtorInitializerColon: true # Unknown to clang-format-5.0
+SpaceBeforeInheritanceColon: true # Unknown to clang-format-5.0
+SpaceBeforeParens: ControlStatements
+SpaceBeforeRangeBasedForLoopColon: true # Unknown to clang-format-5.0
+SpaceInEmptyParentheses: false
+SpacesBeforeTrailingComments: 1
+SpacesInAngles: false
+SpacesInContainerLiterals: false
+SpacesInCStyleCastParentheses: false
+SpacesInParentheses: false
+SpacesInSquareBrackets: false
+Standard: Cpp03
+TabWidth: 8
+UseTab: Always
+...
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
new file mode 100644
index 0000000..b930863
--- /dev/null
+++ b/.pre-commit-config.yaml
@@ -0,0 +1,32 @@
+---
+repos:
+ - repo: local
+ hooks:
+ - id: clang-format
+ name: clang-format
+ stages:
+ - commit
+ language: system
+ entry: clang-format-9 -style=file -i
+ types_or:
+ - c
+ - c++
+ - repo: https://github.com/adrienverge/yamllint.git
+ rev: v1.26.3
+ hooks:
+ - id: yamllint
+ files: '^(\.gitlab-ci.yml|ci/)'
+ args:
+ - -c
+ - ci/.yamllint.yml
+ - repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v4.1.0
+ hooks:
+ - id: check-json
+ - id: check-merge-conflict
+ - id: check-symlinks
+ - id: check-yaml
+ exclude: '^(\.gitlab-ci.yml|ci/)'
+ - id: detect-private-key
+ - id: end-of-file-fixer
+ - id: trailing-whitespace
diff --git a/BUILD.bazel b/BUILD.bazel
new file mode 100644
index 0000000..79dc4bc
--- /dev/null
+++ b/BUILD.bazel
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+load("//build/kernel/kleaf:kernel.bzl", "kernel_module")
+
+kernel_module(
+ name = "uwb.qm35",
+ srcs = glob([
+ "**/*.c",
+ "**/*.h",
+ "Kbuild",
+ ]),
+ outs = [
+ "qm35.ko",
+ ],
+ kernel_build = "//private/google-modules/soc/gs:gs_kernel_build",
+ visibility = [
+ "//private/devices/google:__subpackages__",
+ "//private/google-modules/soc/gs:__pkg__",
+ ],
+)
diff --git a/Kbuild b/Kbuild
new file mode 100644
index 0000000..c2fd666
--- /dev/null
+++ b/Kbuild
@@ -0,0 +1,23 @@
+ccflags-y := -I$(srctree)/$(src)/libqmrom/include -Werror
+
+obj-$(CONFIG_QM35_SPI) := qm35.o
+
+qm35-y := \
+ qm35-spi.o \
+ qm35_rb.o \
+ qmrom_spi.o \
+ libqmrom/src/qmrom_common.o \
+ libqmrom/src/qmrom_a0.o \
+ libqmrom/src/qmrom_b0.o \
+ libqmrom/src/qmrom_c0.o \
+ libqmrom/src/qmrom_log.o \
+ hsspi.o \
+ hsspi_uci.o \
+ hsspi_log.o \
+ hsspi_coredump.o \
+ debug.o \
+ hsspi_test.o
+
+qm35-$(CONFIG_QM35_SPI_DEBUG_FW) += debug_qmrom.o
+qm35-$(CONFIG_EVENT_TRACING) += qm35-trace.o
+CFLAGS_qm35-trace.o = -I$(srctree)/$(src)
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..fae1a30
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,12 @@
+KERNEL_SRC ?= /lib/modules/$(shell uname -r)/build
+
+M ?= $(shell pwd)
+O := $(abspath $(KDIR))
+B := $(abspath $O/$M)
+
+$(info *** Building Android in $O/$M )
+
+KBUILD_OPTIONS += CONFIG_QM35_SPI=m
+
+modules modules_install clean:
+ $(MAKE) -C $(KERNEL_SRC) M=$(M) INSTALL_MOD_STRIP=1 $(KBUILD_OPTIONS) $(@)
diff --git a/debug.c b/debug.c
new file mode 100644
index 0000000..6b80fd4
--- /dev/null
+++ b/debug.c
@@ -0,0 +1,513 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file is part of the QM35 UCI stack for linux.
+ *
+ * Copyright (c) 2022 Qorvo US, Inc.
+ *
+ * This software is provided under the GNU General Public License, version 2
+ * (GPLv2), as well as under a Qorvo commercial license.
+ *
+ * You may choose to use this software under the terms of the GPLv2 License,
+ * version 2 ("GPLv2"), as published by the Free Software Foundation.
+ * You should have received a copy of the GPLv2 along with this program. If
+ * not, see <http://www.gnu.org/licenses/>.
+ *
+ * This program is distributed under the GPLv2 in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GPLv2 for more
+ * details.
+ *
+ * If you cannot meet the requirements of the GPLv2, you may not use this
+ * software for any purpose without first obtaining a commercial license from
+ * Qorvo.
+ * Please contact Qorvo to inquire about licensing terms.
+ *
+ * QM35 LOG layer HSSPI Protocol
+ */
+
+#include <linux/debugfs.h>
+#include <linux/poll.h>
+#include <linux/fsnotify.h>
+
+#include <qmrom.h>
+#include <qmrom_spi.h>
+
+#include "qm35.h"
+#include "debug.h"
+#include "hsspi_test.h"
+
+#if IS_ENABLED(CONFIG_QM35_SPI_DEBUG_FW)
+extern void debug_rom_code_init(struct debug *debug);
+#endif
+
+static const struct file_operations debug_enable_fops;
+static const struct file_operations debug_log_level_fops;
+static const struct file_operations debug_test_hsspi_sleep_fops;
+
+static void *priv_from_file(const struct file *filp)
+{
+ return filp->f_path.dentry->d_inode->i_private;
+}
+
+static ssize_t debug_enable_write(struct file *filp, const char __user *buff,
+ size_t count, loff_t *off)
+{
+ struct debug *debug;
+ u8 enabled;
+
+ debug = priv_from_file(filp);
+
+ if (kstrtou8_from_user(buff, count, 10, &enabled))
+ return -EFAULT;
+
+ if (debug->trace_ops)
+ debug->trace_ops->enable_set(debug, enabled == 1 ? 1 : 0);
+ else
+ return -ENOSYS;
+
+ return count;
+}
+
+static ssize_t debug_enable_read(struct file *filp, char __user *buff,
+ size_t count, loff_t *off)
+{
+ char enabled[2];
+ struct debug *debug;
+
+ debug = priv_from_file(filp);
+
+ if (debug->trace_ops)
+ enabled[0] = debug->trace_ops->enable_get(debug) + '0';
+ else
+ return -ENOSYS;
+
+ enabled[1] = '\n';
+
+ return simple_read_from_buffer(buff, count, off, enabled,
+ sizeof(enabled));
+}
+
+static ssize_t debug_log_level_write(struct file *filp, const char __user *buff,
+ size_t count, loff_t *off)
+{
+ u8 log_level = 0;
+ struct log_module *log_module;
+
+ log_module = priv_from_file(filp);
+ if (kstrtou8_from_user(buff, count, 10, &log_level))
+ return -EFAULT;
+
+ if (log_module->debug->trace_ops)
+ log_module->debug->trace_ops->level_set(log_module->debug,
+ log_module, log_level);
+ else
+ return -ENOSYS;
+
+ return count;
+}
+
+static ssize_t debug_log_level_read(struct file *filp, char __user *buff,
+ size_t count, loff_t *off)
+{
+ char log_level[2];
+ struct log_module *log_module;
+
+ log_module = priv_from_file(filp);
+
+ if (log_module->debug->trace_ops)
+ log_level[0] = log_module->debug->trace_ops->level_get(
+ log_module->debug, log_module) +
+ '0';
+ else
+ return -ENOSYS;
+
+ log_level[1] = '\n';
+
+ return simple_read_from_buffer(buff, count, off, log_level,
+ sizeof(log_level));
+}
+
+static ssize_t debug_test_hsspi_sleep_write(struct file *filp,
+ const char __user *buff,
+ size_t count, loff_t *off)
+{
+ int sleep_inter_frame_ms;
+
+ if (kstrtoint_from_user(buff, count, 10, &sleep_inter_frame_ms))
+ return -EFAULT;
+
+ hsspi_test_set_inter_frame_ms(sleep_inter_frame_ms);
+ return count;
+}
+
+static ssize_t debug_traces_read(struct file *filp, char __user *buff,
+ size_t count, loff_t *off)
+{
+ char *entry;
+ rb_entry_size_t entry_size;
+ struct qm35_ctx *qm35_hdl;
+ uint16_t ret;
+ struct debug *debug;
+
+ debug = priv_from_file(filp);
+ qm35_hdl = container_of(debug, struct qm35_ctx, debug);
+
+ if (!debug->trace_ops)
+ return -ENOSYS;
+
+ entry_size = debug->trace_ops->trace_get_next_size(debug);
+ if (!entry_size) {
+ if (filp->f_flags & O_NONBLOCK)
+ return 0;
+
+ ret = wait_event_interruptible(
+ debug->wq,
+ (entry_size =
+ debug->trace_ops->trace_get_next_size(debug)));
+ if (ret)
+ return ret;
+ }
+
+ if (entry_size > count)
+ return -EMSGSIZE;
+
+ entry = debug->trace_ops->trace_get_next(debug, &entry_size);
+ if (!entry)
+ return 0;
+
+ ret = copy_to_user(buff, entry, entry_size);
+
+ kfree(entry);
+
+ return ret ? -EFAULT : entry_size;
+}
+
+static __poll_t debug_traces_poll(struct file *filp,
+ struct poll_table_struct *wait)
+{
+ struct debug *debug;
+ __poll_t mask = 0;
+
+ debug = priv_from_file(filp);
+
+ poll_wait(filp, &debug->wq, wait);
+
+ if (debug->trace_ops && debug->trace_ops->trace_next_avail(debug))
+ mask |= POLLIN;
+
+ return mask;
+}
+
+static int debug_traces_open(struct inode *inodep, struct file *filep)
+{
+ struct debug *debug;
+
+ debug = priv_from_file(filep);
+
+ mutex_lock(&debug->pv_filp_lock);
+ if (debug->pv_filp) {
+ mutex_unlock(&debug->pv_filp_lock);
+ return -EBUSY;
+ }
+
+ debug->pv_filp = filep;
+
+ if (debug->trace_ops)
+ debug->trace_ops->trace_reset(debug);
+
+ mutex_unlock(&debug->pv_filp_lock);
+
+ return 0;
+}
+
+static int debug_traces_release(struct inode *inodep, struct file *filep)
+{
+ struct debug *debug;
+
+ debug = priv_from_file(filep);
+
+ mutex_lock(&debug->pv_filp_lock);
+ debug->pv_filp = NULL;
+ mutex_unlock(&debug->pv_filp_lock);
+
+ return 0;
+}
+
+static ssize_t debug_coredump_read(struct file *filep, char __user *buff,
+ size_t count, loff_t *off)
+{
+ struct qm35_ctx *qm35_hdl;
+ struct debug *debug;
+ char *cd;
+ size_t cd_len = 0;
+
+ debug = priv_from_file(filep);
+ qm35_hdl = container_of(debug, struct qm35_ctx, debug);
+
+ if (!debug->coredump_ops)
+ return -ENOSYS;
+
+ cd = debug->coredump_ops->coredump_get(debug, &cd_len);
+
+ return simple_read_from_buffer(buff, count, off, cd, cd_len);
+}
+
+static ssize_t debug_coredump_write(struct file *filp, const char __user *buff,
+ size_t count, loff_t *off)
+{
+ struct debug *debug;
+ u8 force;
+
+ debug = priv_from_file(filp);
+
+ if (kstrtou8_from_user(buff, count, 10, &force))
+ return -EFAULT;
+
+ if (debug->coredump_ops && force != 0)
+ debug->coredump_ops->coredump_force(debug);
+ else if (force == 0)
+ pr_warn("qm35: write non null value to force coredump\n");
+ else
+ return -ENOSYS;
+
+ return count;
+}
+
+static ssize_t debug_hw_reset_write(struct file *filp, const char __user *buff,
+ size_t count, loff_t *off)
+{
+ struct qm35_ctx *qm35_hdl;
+ struct debug *debug;
+ int ret;
+ u8 reset;
+
+ debug = priv_from_file(filp);
+ qm35_hdl = container_of(debug, struct qm35_ctx, debug);
+
+ if (kstrtou8_from_user(buff, count, 10, &reset))
+ return -EFAULT;
+
+ ret = -1;
+ if (reset != 0) {
+ pr_info("qm35: resetting chip...\n");
+ ret = qm35_reset_sync(qm35_hdl);
+ } else
+ pr_warn("qm35: write non null value to force a hw reset\n");
+
+ if (ret)
+ return -ENOSYS;
+
+ return count;
+}
+
+static const struct file_operations debug_enable_fops = {
+ .owner = THIS_MODULE,
+ .write = debug_enable_write,
+ .read = debug_enable_read,
+};
+
+static const struct file_operations debug_log_level_fops = {
+ .owner = THIS_MODULE,
+ .write = debug_log_level_write,
+ .read = debug_log_level_read
+};
+
+static const struct file_operations debug_test_hsspi_sleep_fops = {
+ .owner = THIS_MODULE,
+ .write = debug_test_hsspi_sleep_write
+};
+
+static const struct file_operations debug_traces_fops = {
+ .owner = THIS_MODULE,
+ .open = debug_traces_open,
+ .release = debug_traces_release,
+ .read = debug_traces_read,
+ .poll = debug_traces_poll,
+ .llseek = no_llseek,
+};
+
+static const struct file_operations debug_coredump_fops = {
+ .owner = THIS_MODULE,
+ .read = debug_coredump_read,
+ .write = debug_coredump_write,
+};
+
+static const struct file_operations debug_hw_reset_fops = {
+ .owner = THIS_MODULE,
+ .write = debug_hw_reset_write,
+};
+
+int debug_create_module_entry(struct debug *debug,
+ struct log_module *log_module)
+{
+ struct dentry *dir;
+ struct dentry *file;
+
+ dir = debugfs_create_dir(log_module->name, debug->fw_dir);
+ if (!dir) {
+ pr_err("qm35: failed to create /sys/kernel/debug/uwb0/%s\n",
+ log_module->name);
+ return -1;
+ }
+
+ file = debugfs_create_file("log_level", 0644, dir, log_module,
+ &debug_log_level_fops);
+ if (!file) {
+ pr_err("qm35: failed to create /sys/kernel/debug/uwb0/%s/log_level\n",
+ log_module->name);
+ return -1;
+ }
+
+ pr_info("qm35 debug: created /sys/kernel/debug/uwb0/%s/log_level\n",
+ log_module->name);
+
+ return 0;
+}
+
+void debug_new_trace_available(struct debug *debug)
+{
+ if (debug->pv_filp)
+ fsnotify_modify(debug->pv_filp);
+
+ wake_up_interruptible(&debug->wq);
+}
+
+static int debug_devid_show(struct seq_file *s, void *unused)
+{
+ struct debug *debug = (struct debug *)s->private;
+ uint16_t dev_id;
+ int rc;
+
+ if (debug->trace_ops && debug->trace_ops->get_dev_id) {
+ rc = debug->trace_ops->get_dev_id(debug, &dev_id);
+ if (rc < 0)
+ return -EIO;
+ seq_printf(s, "deca%04x\n", dev_id);
+ }
+ return 0;
+}
+
+static int debug_socid_show(struct seq_file *s, void *unused)
+{
+ struct debug *debug = (struct debug *)s->private;
+ uint8_t soc_id[ROM_SOC_ID_LEN];
+ int rc;
+
+ if (debug->trace_ops && debug->trace_ops->get_soc_id) {
+ rc = debug->trace_ops->get_soc_id(debug, soc_id);
+ if (rc < 0)
+ return -EIO;
+ seq_printf(s, "%*phN\n", ROM_SOC_ID_LEN, soc_id);
+ }
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(debug_devid);
+DEFINE_SHOW_ATTRIBUTE(debug_socid);
+
+void debug_soc_info_available(struct debug *debug)
+{
+ struct dentry *file;
+
+ file = debugfs_create_file("dev_id", 0444, debug->chip_dir, debug,
+ &debug_devid_fops);
+ if (!file) {
+ pr_err("qm35: failed to create /sys/kernel/debug/uwb0/fw/dev_id\n");
+ goto unregister;
+ }
+
+ file = debugfs_create_file("soc_id", 0444, debug->chip_dir, debug,
+ &debug_socid_fops);
+ if (!file) {
+ pr_err("qm35: failed to create /sys/kernel/debug/uwb0/fw/soc_id\n");
+ goto unregister;
+ }
+
+ return;
+
+unregister:
+ debugfs_remove_recursive(debug->chip_dir);
+}
+
+int debug_init_root(struct debug *debug, struct dentry *root)
+{
+ debug->root_dir = debugfs_create_dir("uwb0", root);
+ if (!debug->root_dir) {
+ pr_err("qm35: failed to create /sys/kernel/debug/uwb0\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+int debug_init(struct debug *debug)
+{
+ struct dentry *file;
+
+ init_waitqueue_head(&debug->wq);
+ mutex_init(&debug->pv_filp_lock);
+ debug->pv_filp = NULL;
+
+ debug->fw_dir = debugfs_create_dir("fw", debug->root_dir);
+ if (!debug->fw_dir) {
+ pr_err("qm35: failed to create /sys/kernel/debug/uwb0/fw\n");
+ goto unregister;
+ }
+
+ debug->chip_dir = debugfs_create_dir("chip", debug->root_dir);
+ if (!debug->chip_dir) {
+ pr_err("qm35: failed to create /sys/kernel/debug/uwb0/chip\n");
+ goto unregister;
+ }
+
+ file = debugfs_create_file("hw_reset", 0444, debug->chip_dir, debug,
+ &debug_hw_reset_fops);
+ if (!file) {
+ pr_err("qm35: failed to create /sys/kernel/debug/uwb0/chip/hw_reset\n");
+ goto unregister;
+ }
+
+ file = debugfs_create_file("enable", 0644, debug->fw_dir, debug,
+ &debug_enable_fops);
+ if (!file) {
+ pr_err("qm35: failed to create /sys/kernel/debug/uwb0/fw/enable\n");
+ goto unregister;
+ }
+
+ file = debugfs_create_file("traces", 0444, debug->fw_dir, debug,
+ &debug_traces_fops);
+ if (!file) {
+ pr_err("qm35: failed to create /sys/kernel/debug/uwb0/fw/traces\n");
+ goto unregister;
+ }
+
+ file = debugfs_create_file("coredump", 0444, debug->fw_dir, debug,
+ &debug_coredump_fops);
+ if (!file) {
+ pr_err("qm35: failed to create /sys/kernel/debug/uwb0/fw/coredump\n");
+ goto unregister;
+ }
+
+ file = debugfs_create_file("test_sleep_hsspi_ms", 0200, debug->fw_dir,
+ debug, &debug_test_hsspi_sleep_fops);
+ if (!file) {
+ pr_err("qm35: failed to create /sys/kernel/debug/uwb0/fw/test_sleep_hsspi\n");
+ goto unregister;
+ }
+
+#if IS_ENABLED(CONFIG_QM35_SPI_DEBUG_FW)
+ debug_rom_code_init(debug);
+#endif
+
+ return 0;
+
+unregister:
+ debug_deinit(debug);
+ return -1;
+}
+
+void debug_deinit(struct debug *debug)
+{
+ wake_up_interruptible(&debug->wq);
+ debugfs_remove_recursive(debug->root_dir);
+}
diff --git a/debug.h b/debug.h
new file mode 100644
index 0000000..6c78cc2
--- /dev/null
+++ b/debug.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * This file is part of the QM35 UCI stack for linux.
+ *
+ * Copyright (c) 2022 Qorvo US, Inc.
+ *
+ * This software is provided under the GNU General Public License, version 2
+ * (GPLv2), as well as under a Qorvo commercial license.
+ *
+ * You may choose to use this software under the terms of the GPLv2 License,
+ * version 2 ("GPLv2"), as published by the Free Software Foundation.
+ * You should have received a copy of the GPLv2 along with this program. If
+ * not, see <http://www.gnu.org/licenses/>.
+ *
+ * This program is distributed under the GPLv2 in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GPLv2 for more
+ * details.
+ *
+ * If you cannot meet the requirements of the GPLv2, you may not use this
+ * software for any purpose without first obtaining a commercial license from
+ * Qorvo.
+ * Please contact Qorvo to inquire about licensing terms.
+ *
+ * QM35 LOG layer HSSPI Protocol
+ */
+
+#ifndef __DEBUG_H__
+#define __DEBUG_H__
+
+#include <linux/fs.h>
+#include <linux/mutex.h>
+#include "qm35_rb.h"
+
+struct debug;
+struct log_module;
+
+struct debug_trace_ops {
+ void (*enable_set)(struct debug *dbg, int enable);
+ int (*enable_get)(struct debug *dbg);
+ void (*level_set)(struct debug *dbg, struct log_module *log_module,
+ int lvl);
+ int (*level_get)(struct debug *dbg, struct log_module *log_module);
+ char *(*trace_get_next)(struct debug *dbg, rb_entry_size_t *len);
+ rb_entry_size_t (*trace_get_next_size)(struct debug *dbg);
+ bool (*trace_next_avail)(struct debug *dbg);
+ void (*trace_reset)(struct debug *dbg);
+ int (*get_dev_id)(struct debug *dbg, uint16_t *dev_id);
+ int (*get_soc_id)(struct debug *dbg, uint8_t *soc_id);
+};
+
+struct debug_coredump_ops {
+ char *(*coredump_get)(struct debug *dbg, size_t *len);
+ int (*coredump_force)(struct debug *dbg);
+};
+
+struct debug {
+ struct dentry *root_dir;
+ struct dentry *fw_dir;
+ struct dentry *chip_dir;
+ const struct debug_trace_ops *trace_ops;
+ const struct debug_coredump_ops *coredump_ops;
+ struct wait_queue_head wq;
+ struct file *pv_filp;
+ struct mutex pv_filp_lock;
+ struct firmware *certificate;
+};
+
+// TODO move this from here to a commom place for both log layer and debug
+struct log_module {
+ uint8_t id;
+ uint8_t lvl;
+ char name[64];
+ struct completion *read_done;
+ struct debug *debug;
+};
+
+int debug_init_root(struct debug *debug, struct dentry *root);
+int debug_init(struct debug *debug);
+void debug_deinit(struct debug *debug);
+
+int debug_create_module_entry(struct debug *debug,
+ struct log_module *log_module);
+
+void debug_new_trace_available(struct debug *debug);
+
+void debug_soc_info_available(struct debug *debug);
+
+#endif // __DEBUG_H__
diff --git a/debug_qmrom.c b/debug_qmrom.c
new file mode 100644
index 0000000..8c77bd3
--- /dev/null
+++ b/debug_qmrom.c
@@ -0,0 +1,341 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file is part of the QM35 UCI stack for linux.
+ *
+ * Copyright (c) 2022 Qorvo US, Inc.
+ *
+ * This software is provided under the GNU General Public License, version 2
+ * (GPLv2), as well as under a Qorvo commercial license.
+ *
+ * You may choose to use this software under the terms of the GPLv2 License,
+ * version 2 ("GPLv2"), as published by the Free Software Foundation.
+ * You should have received a copy of the GPLv2 along with this program. If
+ * not, see <http://www.gnu.org/licenses/>.
+ *
+ * This program is distributed under the GPLv2 in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GPLv2 for more
+ * details.
+ *
+ * If you cannot meet the requirements of the GPLv2, you may not use this
+ * software for any purpose without first obtaining a commercial license from
+ * Qorvo.
+ * Please contact Qorvo to inquire about licensing terms.
+ *
+ * QM35 LOG layer HSSPI Protocol
+ */
+
+#include <linux/version.h>
+#include <linux/printk.h>
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/fsnotify.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0))
+#include <linux/kernel_read_file.h>
+#endif
+#include <linux/poll.h>
+#include <linux/vmalloc.h>
+
+#include <qmrom.h>
+#include <qmrom_log.h>
+#include <qmrom_spi.h>
+
+#include "qm35.h"
+#include "debug.h"
+
+#define QMROM_RETRIES 10
+
+static void *priv_from_file(const struct file *filp)
+{
+ return filp->f_path.dentry->d_inode->i_private;
+}
+
+static struct qm35_ctx *rom_test_prepare(struct file *filp)
+{
+ struct debug *debug = priv_from_file(filp);
+ struct qm35_ctx *qm35_hdl = container_of(debug, struct qm35_ctx, debug);
+
+ qm35_hsspi_stop(qm35_hdl);
+ qmrom_set_log_device(&qm35_hdl->spi->dev, LOG_DBG);
+ return qm35_hdl;
+}
+
+static void rom_test_unprepare(struct qm35_ctx *qm35_hdl)
+{
+ qmrom_set_log_device(&qm35_hdl->spi->dev, LOG_WARN);
+ qm35_hsspi_start(qm35_hdl);
+}
+
+static struct firmware *file2firmware(const char *filename, size_t file_size)
+{
+ struct firmware *firmware =
+ kmalloc(sizeof(struct firmware), GFP_KERNEL | __GFP_ZERO);
+ if (!firmware) {
+ goto fail;
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0))
+ {
+ ssize_t bytes_read = kernel_read_file_from_path(
+ filename, 0, (void **)&firmware->data, INT_MAX,
+ &firmware->size, READING_FIRMWARE);
+ if (bytes_read < 0) {
+ pr_err("kernel_read_file_from_path(%s) returned %d\n",
+ filename, (int)bytes_read);
+ goto fail;
+ }
+ if (bytes_read != firmware->size) {
+ pr_err("kernel_read_file_from_path returned %zu; expected %zu\n",
+ bytes_read, firmware->size);
+ goto fail;
+ }
+ }
+#else
+ {
+ loff_t size = 0;
+ int ret = kernel_read_file_from_path(filename,
+ (void **)&firmware->data,
+ &size, INT_MAX,
+ READING_FIRMWARE);
+ if (ret < 0) {
+ pr_err("kernel_read_file_from_path(%s) returned %d\n",
+ filename, ret);
+ goto fail;
+ }
+ firmware->size = (size_t)size;
+ }
+#endif
+
+ print_hex_dump(KERN_DEBUG, "Bin file:", DUMP_PREFIX_ADDRESS, 16, 1,
+ firmware->data, 16, false);
+ return firmware;
+
+fail:
+ if (firmware) {
+ if (firmware->data)
+ vfree(firmware->data);
+ kfree(firmware);
+ }
+ return NULL;
+}
+
+static ssize_t rom_probe(struct file *filp, const char __user *buff,
+ size_t count, loff_t *off)
+{
+ struct qm35_ctx *qm35_hdl = rom_test_prepare(filp);
+ struct qmrom_handle *h;
+
+ pr_info("Starting the probe test...\n");
+ h = qmrom_init(&qm35_hdl->spi->dev, qm35_hdl, qm35_hdl->gpio_ss_rdy,
+ QMROM_RETRIES, qmrom_spi_reset_device);
+ if (!h) {
+ pr_err("qmrom_init failed\n");
+ goto end;
+ }
+
+ pr_info("chip_revision %#2x\n", h->chip_rev);
+ pr_info("device version %#02x\n", h->device_version);
+ if (h->chip_rev != 0xa001) {
+ pr_info("lcs_state %d\n", h->lcs_state);
+ print_hex_dump(KERN_DEBUG, "soc_id:", DUMP_PREFIX_NONE, 16, 1,
+ h->soc_id, sizeof(h->soc_id), false);
+ print_hex_dump(KERN_DEBUG, "uuid:", DUMP_PREFIX_NONE, 16, 1,
+ h->uuid, sizeof(h->uuid), false);
+ }
+ qmrom_deinit(h);
+
+end:
+ rom_test_unprepare(qm35_hdl);
+ return count;
+}
+
+static ssize_t rom_flash_dbg_cert(struct file *filp, const char __user *buff,
+ size_t count, loff_t *off)
+{
+ struct qm35_ctx *qm35_hdl = rom_test_prepare(filp);
+ struct firmware *certificate = NULL;
+ struct qmrom_handle *h = NULL;
+ char *filename;
+ int err;
+
+ filename = kmalloc(count, GFP_KERNEL);
+ if (!filename) {
+ count = -ENOMEM;
+ goto end;
+ }
+
+ err = copy_from_user(filename, buff, count);
+ if (err) {
+ pr_err("copy_from_user failed with error %d\n", err);
+ count = err;
+ goto end;
+ }
+ filename[count - 1] = '\0';
+ certificate = file2firmware(filename, DEBUG_CERTIFICATE_SIZE);
+ if (!certificate) {
+ pr_err("%s: file retrieval failed, abort\n", __func__);
+ count = -1;
+ goto end;
+ }
+
+ /* Flash the debug certificate */
+ pr_info("Flashing debug certificate %s...\n", filename);
+
+ h = qmrom_init(&qm35_hdl->spi->dev, qm35_hdl, qm35_hdl->gpio_ss_rdy,
+ QMROM_RETRIES, qmrom_spi_reset_device);
+ if (!h) {
+ pr_err("qmrom_init failed\n");
+ goto end;
+ }
+ err = qmrom_flash_dbg_cert(h, certificate);
+ if (err)
+ pr_err("Flashing debug certificate %s failed with %d!\n",
+ filename, err);
+ else
+ pr_info("Flashing debug certificate %s succeeded!\n", filename);
+
+end:
+ if (h)
+ qmrom_deinit(h);
+ if (certificate) {
+ if (certificate->data)
+ vfree(certificate->data);
+ kfree(certificate);
+ }
+ rom_test_unprepare(qm35_hdl);
+ return count;
+}
+
+static ssize_t rom_erase_dbg_cert(struct file *filp, const char __user *buff,
+ size_t count, loff_t *off)
+{
+ struct qm35_ctx *qm35_hdl = rom_test_prepare(filp);
+ struct qmrom_handle *h = NULL;
+ int err;
+
+ pr_info("Erasing debug certificate...\n");
+
+ h = qmrom_init(&qm35_hdl->spi->dev, qm35_hdl, qm35_hdl->gpio_ss_rdy,
+ QMROM_RETRIES, qmrom_spi_reset_device);
+ if (!h) {
+ pr_err("qmrom_init failed\n");
+ goto end;
+ }
+ err = qmrom_erase_dbg_cert(h);
+ if (err)
+ pr_err("Erasing debug certificate failed with %d!\n", err);
+ else
+ pr_info("Erasing debug certificate succeeded!\n");
+
+end:
+ if (h)
+ qmrom_deinit(h);
+ rom_test_unprepare(qm35_hdl);
+ return count;
+}
+
+static ssize_t rom_flash_fw(struct file *filp, const char __user *buff,
+ size_t count, loff_t *off)
+{
+ struct qm35_ctx *qm35_hdl = rom_test_prepare(filp);
+ struct firmware *fw = NULL;
+ struct qmrom_handle *h = NULL;
+ char *filename;
+ int rc;
+
+ filename = kmalloc(count, GFP_KERNEL);
+ if (!filename) {
+ count = -ENOMEM;
+ goto end;
+ }
+
+ rc = copy_from_user(filename, buff, count);
+ if (rc) {
+ pr_err("copy_from_user failed with error %d\n", rc);
+ goto end;
+ }
+ filename[count - 1] = '\0';
+ fw = file2firmware(filename, 0);
+ if (!fw) {
+ pr_err("%s: file retrieval failed, abort\n", __func__);
+ rc = -1;
+ goto end;
+ }
+
+ pr_info("Flashing image %s (%pK->data %pK)...\n", filename, fw,
+ fw->data);
+
+ h = qmrom_init(&qm35_hdl->spi->dev, qm35_hdl, qm35_hdl->gpio_ss_rdy,
+ QMROM_RETRIES, qmrom_spi_reset_device);
+ if (!h) {
+ pr_err("qmrom_init failed\n");
+ rc = -1;
+ goto end;
+ }
+ rc = qmrom_flash_fw(h, fw);
+ if (rc)
+ pr_err("Flashing firmware %s failed with %d!\n", filename, rc);
+ else
+ pr_info("Flashing firmware %s succeeded!\n", filename);
+
+end:
+ if (h)
+ qmrom_deinit(h);
+ if (fw) {
+ if (fw->data)
+ vfree(fw->data);
+ kfree(fw);
+ }
+ rom_test_unprepare(qm35_hdl);
+ return count;
+}
+
+static const struct file_operations rom_probe_fops = { .owner = THIS_MODULE,
+ .write = rom_probe };
+
+static const struct file_operations rom_flash_dbg_cert_fops = {
+ .owner = THIS_MODULE,
+ .write = rom_flash_dbg_cert
+};
+
+static const struct file_operations rom_erase_dbg_cert_fops = {
+ .owner = THIS_MODULE,
+ .write = rom_erase_dbg_cert
+};
+
+static const struct file_operations rom_flash_fw_fops = {
+ .owner = THIS_MODULE,
+ .write = rom_flash_fw
+};
+
+void debug_rom_code_init(struct debug *debug)
+{
+ struct dentry *file;
+ file = debugfs_create_file("rom_probe", 0200, debug->fw_dir, debug,
+ &rom_probe_fops);
+ if (!file) {
+ pr_err("qm35: failed to create uwb0/fw/rom_probe\n");
+ return;
+ }
+
+ file = debugfs_create_file("rom_flash_dbg_cert", 0200, debug->fw_dir,
+ debug, &rom_flash_dbg_cert_fops);
+ if (!file) {
+ pr_err("qm35: failed to create uwb0/fw/rom_flash_dbg_cert\n");
+ return;
+ }
+
+ file = debugfs_create_file("rom_erase_dbg_cert", 0200, debug->fw_dir,
+ debug, &rom_erase_dbg_cert_fops);
+ if (!file) {
+ pr_err("qm35: failed to create uwb0/fw/rom_erase_dbg_cert\n");
+ return;
+ }
+
+ file = debugfs_create_file("rom_flash_fw", 0200, debug->fw_dir, debug,
+ &rom_flash_fw_fops);
+ if (!file) {
+ pr_err("qm35: failed to create uwb0/fw/rom_flash_fw\n");
+ return;
+ }
+}
diff --git a/hsspi.c b/hsspi.c
new file mode 100644
index 0000000..1170ca1
--- /dev/null
+++ b/hsspi.c
@@ -0,0 +1,745 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * This file is part of the QM35 UCI stack for linux.
+ *
+ * Copyright (c) 2021 Qorvo US, Inc.
+ *
+ * This software is provided under the GNU General Public License, version 2
+ * (GPLv2), as well as under a Qorvo commercial license.
+ *
+ * You may choose to use this software under the terms of the GPLv2 License,
+ * version 2 ("GPLv2"), as published by the Free Software Foundation.
+ * You should have received a copy of the GPLv2 along with this program. If
+ * not, see <http://www.gnu.org/licenses/>.
+ *
+ * This program is distributed under the GPLv2 in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GPLv2 for more
+ * details.
+ *
+ * If you cannot meet the requirements of the GPLv2, you may not use this
+ * software for any purpose without first obtaining a commercial license from
+ * Qorvo.
+ * Please contact Qorvo to inquire about licensing terms.
+ *
+ * QM35 HSSPI Protocol
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+
+#include "qm35-trace.h"
+#include "hsspi.h"
+
+/* STC HOST flags */
+#define STC_HOST_WR BIT(7)
+#define STC_HOST_PRD BIT(6)
+#define STC_HOST_RD BIT(5)
+
+/* STC SOC flags */
+#define STC_SOC_ODW BIT(7)
+#define STC_SOC_OA BIT(6)
+#define STC_SOC_RDY BIT(5)
+#define STC_SOC_ERR BIT(4)
+
+#define SS_READY_TIMEOUT_MS (250)
+
+#define MAX_SUCCESSIVE_ERRORS (5)
+#define SPI_CS_SETUP_DELAY_US (5)
+
+#ifdef HSSPI_MANUAL_CS_SETUP
+#define HSSPI_MANUAL_CS_SETUP_US SPI_CS_SETUP_DELAY_US
+#endif
+
+struct hsspi_work {
+ struct list_head list;
+ enum hsspi_work_type type;
+ union {
+ struct {
+ struct hsspi_block *blk;
+ struct hsspi_layer *layer;
+ } tx;
+ struct completion *completion;
+ };
+};
+
+int test_sleep_after_ss_ready_us = 0;
+
+static inline bool layer_id_is_valid(struct hsspi *hsspi, u8 ul)
+{
+ return (ul < ARRAY_SIZE(hsspi->layers));
+}
+
+/**
+ * get_work() - get a work from the list
+ *
+ * @hsspi: &struct hsspi
+ *
+ * Return: a &struct hsspi_work
+ * The work can be:
+ * - a TX work enqueued by hsspi_send
+ * - a COMPLETION work used for synchronization (always allocated on stack)
+ */
+static struct hsspi_work *get_work(struct hsspi *hsspi)
+{
+ struct hsspi_work *hw;
+
+ spin_lock(&hsspi->lock);
+
+ hw = list_first_entry_or_null(&hsspi->work_list, struct hsspi_work,
+ list);
+ if (hw)
+ list_del(&hw->list);
+
+ spin_unlock(&hsspi->lock);
+
+ trace_hsspi_get_work(&hsspi->spi->dev, hw ? hw->type : -1);
+ return hw;
+}
+
+/**
+ * is_txrx_waiting() - is there something to do
+ *
+ * @hsspi: &struct hsspi
+ *
+ * Return: True if there is a TX work available or if the SS_IRQ flag
+ * is set. False otherwise.
+ */
+static bool is_txrx_waiting(struct hsspi *hsspi)
+{
+ enum hsspi_state state;
+ bool is_empty;
+
+ spin_lock(&hsspi->lock);
+
+ is_empty = list_empty(&hsspi->work_list);
+ state = hsspi->state;
+
+ spin_unlock(&hsspi->lock);
+
+ trace_hsspi_is_txrx_waiting(&hsspi->spi->dev, is_empty, state);
+ /*
+ * There is no work in the list but we must check SS_IRQ to
+ * know if we need to make an empty TX transfer with PRE_READ
+ * flag set.
+ */
+ return !is_empty || ((state == HSSPI_RUNNING) &&
+ test_bit(HSSPI_FLAGS_SS_IRQ, hsspi->flags));
+}
+
+/**
+ * hsspi_wait_ss_ready() - waits for ss_ready to be up
+ *
+ * @hsspi: &struct hsspi
+ *
+ * Return: 0 if ss_ready is up and EAGAIN if not.
+ */
+static int hsspi_wait_ss_ready(struct hsspi *hsspi)
+{
+ int ret;
+
+ hsspi->waiting_ss_rdy = true;
+
+ if (!test_bit(HSSPI_FLAGS_SS_BUSY, hsspi->flags)) {
+ /* The ss_ready went low, so the fw is not busy anymore,
+ * if the ss_ready is high, we can proceed, else,
+ * either the fw went to sleep or crashed, in any case
+ * we need to wait for it to be ready again.
+ */
+ clear_bit(HSSPI_FLAGS_SS_READY, hsspi->flags);
+ if (gpiod_get_value(hsspi->gpio_ss_rdy)) {
+ return 0;
+ }
+ }
+
+ /* Check if the QM went to sleep and wake it up if it did */
+ if (!gpiod_get_value(hsspi->gpio_exton)) {
+ hsspi->wakeup(hsspi);
+ }
+
+ ret = wait_event_interruptible_timeout(
+ hsspi->wq_ready,
+ test_and_clear_bit(HSSPI_FLAGS_SS_READY, hsspi->flags),
+ msecs_to_jiffies(SS_READY_TIMEOUT_MS));
+ if (ret == 0) {
+ dev_warn(&hsspi->spi->dev,
+ "timed out waiting for ss_ready(%d)\n",
+ test_bit(HSSPI_FLAGS_SS_READY, hsspi->flags));
+ return -EAGAIN;
+ }
+ if (ret < 0) {
+ dev_err(&hsspi->spi->dev,
+ "Error %d while waiting for ss_ready\n", ret);
+ return ret;
+ }
+ /* WA: QM35 C0 have a very short (<100ns) ss_ready toggle
+ * in the ROM code when waking up from S4. If we transfer immediately,
+ * the ROM code will enter its command mode and we'll end up
+ * communicating with the ROM code instead of the firmware.
+ */
+ if (!gpiod_get_value(hsspi->gpio_ss_rdy))
+ return -EAGAIN;
+
+ hsspi->waiting_ss_rdy = false;
+ return 0;
+}
+
+#ifdef HSSPI_MANUAL_CS_SETUP
+int hsspi_set_cs_level(struct spi_device *spi, int level)
+{
+ struct spi_transfer xfer[] = {
+ {
+ .cs_change = !level,
+ .speed_hz = 1000000,
+ },
+ };
+
+ return spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer));
+}
+#endif
+
+/**
+ * spi_xfer() - Single SPI transfer
+ *
+ * @hsspi: &struct hsspi
+ * @tx: tx payload
+ * @rx: rx payload
+ * @length: payload length
+ */
+static int spi_xfer(struct hsspi *hsspi, const void *tx, void *rx,
+ size_t length)
+{
+ struct spi_transfer xfers[2] = {
+ {
+ .tx_buf = hsspi->host,
+ .rx_buf = hsspi->soc,
+ .len = sizeof(*(hsspi->host)),
+ },
+ {
+ .tx_buf = tx,
+ .rx_buf = rx,
+ .len = length,
+ },
+ };
+ int ret, retry = 5;
+
+ hsspi->soc->flags = 0;
+ hsspi->soc->ul = 0;
+ hsspi->soc->length = 0;
+
+ do {
+ ret = hsspi_wait_ss_ready(hsspi);
+ if (ret < 0) {
+ continue;
+ }
+
+ if (test_sleep_after_ss_ready_us > 0)
+ usleep_range(test_sleep_after_ss_ready_us,
+ test_sleep_after_ss_ready_us + 1);
+
+ hsspi_set_spi_slave_busy(hsspi);
+#ifdef HSSPI_MANUAL_CS_SETUP
+ hsspi_set_cs_level(hsspi->spi, 0);
+ udelay(HSSPI_MANUAL_CS_SETUP_US);
+#endif
+ ret = spi_sync_transfer(hsspi->spi, xfers, length ? 2 : 1);
+
+ trace_hsspi_spi_xfer(&hsspi->spi->dev, hsspi->host, hsspi->soc,
+ ret);
+
+ if (ret) {
+ dev_err(&hsspi->spi->dev, "spi_sync_transfer: %d\n",
+ ret);
+ continue;
+ }
+
+ if (!(hsspi->soc->flags & STC_SOC_RDY) ||
+ (hsspi->soc->flags & 0x0f)) {
+ hsspi->wakeup(hsspi);
+ ret = -EAGAIN;
+ continue;
+ }
+
+ /* All looks good! */
+ break;
+ } while ((ret == -EAGAIN) && (--retry > 0));
+
+ if (!(hsspi->soc->flags & STC_SOC_RDY) || (hsspi->soc->flags & 0x0f)) {
+ dev_err(&hsspi->spi->dev, "FW not ready (flags %#02x)\n",
+ hsspi->soc->flags);
+ }
+
+ hsspi->waiting_ss_rdy = false;
+ return ret;
+}
+
+static bool check_soc_flag(const struct device *dev, const char *func_name,
+ u8 soc_flags, bool is_tx)
+{
+ u8 expected;
+ bool res;
+
+ expected = is_tx ? 0x0 : STC_SOC_OA;
+ res = (soc_flags & (STC_SOC_ERR | STC_SOC_OA)) == expected;
+
+ if (!res) {
+ dev_warn(dev, "%s: bad soc flags %#hhx, expected %#hhx\n",
+ func_name, soc_flags, expected);
+ }
+ return res;
+}
+
+/**
+ * hsspi_rx() - request data from the QM35 on the HSSPI
+ *
+ * @hsspi: &struct hsspi
+ * @ul: upper layer id
+ * @length: length of data requested
+ */
+static int hsspi_rx(struct hsspi *hsspi, u8 ul, u16 length)
+{
+ struct hsspi_layer *layer;
+ struct hsspi_block *blk;
+ int ret;
+
+ hsspi->host->flags = STC_HOST_RD;
+ hsspi->host->ul = ul;
+ hsspi->host->length = length;
+
+ if (layer_id_is_valid(hsspi, ul)) {
+ spin_lock(&hsspi->lock);
+ layer = hsspi->layers[ul];
+ spin_unlock(&hsspi->lock);
+ } else
+ layer = NULL;
+
+ blk = layer ? layer->ops->get(layer, length) : NULL;
+ if (blk) {
+ ret = spi_xfer(hsspi, NULL, blk->data, blk->size);
+
+ layer->ops->received(layer, blk, ret);
+ } else
+ ret = spi_xfer(hsspi, NULL, NULL, 0);
+
+ if (ret)
+ return ret;
+
+ if (!check_soc_flag(&hsspi->spi->dev, __func__, hsspi->soc->flags,
+ false)) {
+ ret = -1;
+ }
+
+ if ((hsspi->soc->ul != ul) || (hsspi->soc->length != length)) {
+ dev_warn(&hsspi->spi->dev,
+ "%s: received %hhu %hu but expecting %hhu %hu\n",
+ __func__, hsspi->soc->ul, hsspi->soc->length, ul,
+ length);
+ ret = -1;
+ }
+
+ if (!(hsspi->soc->flags & STC_SOC_ODW) &&
+ test_and_clear_bit(HSSPI_FLAGS_SS_IRQ, hsspi->flags))
+ hsspi->odw_cleared(hsspi);
+
+ return ret;
+}
+
+/**
+ * hsspi_tx() - send a hsspi block to the QM35 on the HSSPI
+ *
+ * @hsspi: &struct hsspi
+ * @layer: &struct hsspi_layer
+ * @blk: &struct hsspi_block
+ *
+ * It also adds PRD flag if SS_IRQ is set. Therefore it will try a RX
+ * transfer accordingly.
+ */
+static int hsspi_tx(struct hsspi *hsspi, struct hsspi_layer *layer,
+ struct hsspi_block *blk)
+{
+ int ret;
+
+ hsspi->host->flags = STC_HOST_WR;
+ hsspi->host->ul = layer->id;
+ hsspi->host->length = blk->length;
+
+ if (test_bit(HSSPI_FLAGS_SS_IRQ, hsspi->flags))
+ hsspi->host->flags |= STC_HOST_PRD;
+
+ ret = spi_xfer(hsspi, blk->data, NULL, blk->size);
+
+ layer->ops->sent(layer, blk, ret);
+
+ if (ret)
+ return ret;
+
+ /* Ignore tx check flags */
+ check_soc_flag(&hsspi->spi->dev, __func__, hsspi->soc->flags, true);
+
+ if (hsspi->host->flags & STC_HOST_PRD)
+ return hsspi_rx(hsspi, hsspi->soc->ul, hsspi->soc->length);
+
+ return ret;
+}
+
+/**
+ * hsspi_pre_read() - send a PRE_READ with no TX and do RX
+ *
+ * @hsspi: &struct hsspi
+ *
+ * This function is a particular case of hsspi_tx with no layer or
+ * blk.
+ *
+ */
+static int hsspi_pre_read(struct hsspi *hsspi)
+{
+ int ret;
+
+ hsspi->host->flags = STC_HOST_PRD;
+ hsspi->host->ul = 0;
+ hsspi->host->length = 0;
+
+ ret = spi_xfer(hsspi, NULL, NULL, 0);
+ if (ret)
+ return ret;
+
+ /* Ignore pre-read check flags */
+ check_soc_flag(&hsspi->spi->dev, __func__, hsspi->soc->flags, true);
+
+ return hsspi_rx(hsspi, hsspi->soc->ul, hsspi->soc->length);
+}
+
+/**
+ * hsspi_thread_fn() - the thread that manage all SPI transfers
+ * @data: the &struct hsspi
+ *
+ */
+static int hsspi_thread_fn(void *data)
+{
+ struct hsspi *hsspi = data;
+ static int successive_errors;
+
+ successive_errors = 0;
+ while (1) {
+ struct hsspi_work *hw;
+ int ret;
+
+ ret = wait_event_interruptible(hsspi->wq,
+ is_txrx_waiting(hsspi) ||
+ kthread_should_stop());
+ if (ret)
+ return ret;
+
+ if (kthread_should_stop())
+ break;
+
+ hw = get_work(hsspi);
+ if (hw) {
+ if (hw->type == HSSPI_WORK_COMPLETION) {
+ complete(hw->completion);
+ /* on the stack no need to free */
+ continue;
+ } else if (hw->type == HSSPI_WORK_TX) {
+ ret = hsspi_tx(hsspi, hw->tx.layer, hw->tx.blk);
+ kfree(hw);
+ } else {
+ dev_err(&hsspi->spi->dev,
+ "unknown hsspi_work type: %d\n",
+ hw->type);
+ continue;
+ }
+ } else
+ /* If there is no work, we are here because
+ * SS_IRQ is set.
+ */
+ ret = hsspi_pre_read(hsspi);
+
+ if (ret) {
+ successive_errors++;
+
+ if (successive_errors > MAX_SUCCESSIVE_ERRORS) {
+ dev_err(&hsspi->spi->dev,
+ "Max successive errors %d reached, likely entered ROM code...\n",
+ successive_errors);
+
+ /* When the device reboots, the ROM code might raise
+ * ss_ready; if a SPI transfer is requested, the AP
+ * will initiate the SPI xfer and the ROM code will
+ * enter its command mode infinite loop...
+ * No choice but rebooting the device.
+ */
+ hsspi->reset_qm35(hsspi);
+ successive_errors = 0;
+ }
+ } else {
+ successive_errors = 0;
+ }
+ }
+ return 0;
+}
+
+int hsspi_init(struct hsspi *hsspi, struct spi_device *spi)
+{
+ memset(hsspi, 0, sizeof(*hsspi));
+
+ spin_lock_init(&hsspi->lock);
+ INIT_LIST_HEAD(&hsspi->work_list);
+
+ hsspi->state = HSSPI_STOPPED;
+ hsspi->spi = spi;
+
+ init_waitqueue_head(&hsspi->wq);
+ init_waitqueue_head(&hsspi->wq_ready);
+
+ hsspi->host = kmalloc(sizeof(*(hsspi->host)), GFP_KERNEL | GFP_DMA);
+ hsspi->soc = kmalloc(sizeof(*(hsspi->soc)), GFP_KERNEL | GFP_DMA);
+
+ hsspi->thread = kthread_create(hsspi_thread_fn, hsspi, "hsspi");
+ if (IS_ERR(hsspi->thread))
+ return PTR_ERR(hsspi->thread);
+
+ wake_up_process(hsspi->thread);
+
+ dev_info(&hsspi->spi->dev, "HSSPI initialized\n");
+ return 0;
+}
+
+void hsspi_set_gpios(struct hsspi *hsspi, struct gpio_desc *gpio_ss_rdy,
+ struct gpio_desc *gpio_exton)
+{
+ hsspi->gpio_ss_rdy = gpio_ss_rdy;
+ hsspi->gpio_exton = gpio_exton;
+}
+
+int hsspi_deinit(struct hsspi *hsspi)
+{
+ int i;
+
+ spin_lock(&hsspi->lock);
+
+ if (hsspi->state == HSSPI_RUNNING) {
+ spin_unlock(&hsspi->lock);
+ return -EBUSY;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(hsspi->layers); i++) {
+ if (!hsspi->layers[i])
+ continue;
+
+ dev_err(&hsspi->spi->dev,
+ "HSSPI upper layer '%s' not unregistered\n",
+ hsspi->layers[i]->name);
+ spin_unlock(&hsspi->lock);
+ return -EBUSY;
+ }
+
+ spin_unlock(&hsspi->lock);
+
+ kthread_stop(hsspi->thread);
+
+ kfree(hsspi->host);
+ kfree(hsspi->soc);
+
+ dev_info(&hsspi->spi->dev, "HSSPI uninitialized\n");
+ return 0;
+}
+
+int hsspi_register(struct hsspi *hsspi, struct hsspi_layer *layer)
+{
+ int ret = 0;
+
+ if (!layer_id_is_valid(hsspi, layer->id))
+ return -EINVAL;
+
+ spin_lock(&hsspi->lock);
+
+ if (hsspi->layers[layer->id])
+ ret = -EBUSY;
+ else
+ hsspi->layers[layer->id] = layer;
+
+ spin_unlock(&hsspi->lock);
+
+ if (ret) {
+ dev_err(&hsspi->spi->dev, "%s: '%s' ret: %d\n", __func__,
+ layer->name, ret);
+ return ret;
+ }
+
+ dev_dbg(&hsspi->spi->dev, "HSSPI upper layer '%s' registered\n",
+ layer->name);
+ return 0;
+}
+
+int hsspi_unregister(struct hsspi *hsspi, struct hsspi_layer *layer)
+{
+ DECLARE_COMPLETION_ONSTACK(complete);
+ struct hsspi_work complete_work = {
+ .type = HSSPI_WORK_COMPLETION,
+ .completion = &complete,
+ };
+ int ret = 0;
+
+ if (!layer_id_is_valid(hsspi, layer->id))
+ return -EINVAL;
+
+ spin_lock(&hsspi->lock);
+
+ if (hsspi->layers[layer->id] == layer) {
+ hsspi->layers[layer->id] = NULL;
+
+ list_add_tail(&complete_work.list, &hsspi->work_list);
+ } else
+ ret = -EINVAL;
+
+ spin_unlock(&hsspi->lock);
+
+ if (ret) {
+ dev_err(&hsspi->spi->dev, "%s: '%s' ret: %d\n", __func__,
+ layer->name, ret);
+ return ret;
+ }
+
+ wake_up_interruptible(&hsspi->wq);
+
+ /* when completed there is no more reference to layer in the
+ * work_list or in the hsspi_thread_fn
+ */
+ wait_for_completion(&complete);
+
+ dev_dbg(&hsspi->spi->dev, "HSSPI upper layer '%s' unregistered\n",
+ layer->name);
+ return 0;
+}
+
+void hsspi_clear_spi_slave_busy(struct hsspi *hsspi)
+{
+ clear_bit(HSSPI_FLAGS_SS_BUSY, hsspi->flags);
+}
+
+void hsspi_set_spi_slave_busy(struct hsspi *hsspi)
+{
+ set_bit(HSSPI_FLAGS_SS_BUSY, hsspi->flags);
+}
+
+void hsspi_set_spi_slave_ready(struct hsspi *hsspi)
+{
+ set_bit(HSSPI_FLAGS_SS_READY, hsspi->flags);
+
+ wake_up_interruptible(&hsspi->wq_ready);
+}
+
+void hsspi_clear_spi_slave_ready(struct hsspi *hsspi)
+{
+ clear_bit(HSSPI_FLAGS_SS_READY, hsspi->flags);
+}
+
+void hsspi_set_output_data_waiting(struct hsspi *hsspi)
+{
+ set_bit(HSSPI_FLAGS_SS_IRQ, hsspi->flags);
+
+ wake_up_interruptible(&hsspi->wq);
+}
+
+int hsspi_init_block(struct hsspi_block *blk, u16 length)
+{
+ void *data;
+
+ data = krealloc(blk->data, length, GFP_KERNEL | GFP_DMA);
+ if (!data)
+ return -ENOMEM;
+
+ blk->data = data;
+ blk->length = length;
+ blk->size = length;
+
+ return 0;
+}
+
+void hsspi_deinit_block(struct hsspi_block *blk)
+{
+ kfree(blk->data);
+ blk->data = NULL;
+}
+
+int hsspi_send(struct hsspi *hsspi, struct hsspi_layer *layer,
+ struct hsspi_block *blk)
+{
+ struct hsspi_work *tx_work;
+ int ret = 0;
+
+ if (!layer || !blk)
+ return -EINVAL;
+
+ if (!layer_id_is_valid(hsspi, layer->id))
+ return -EINVAL;
+
+ tx_work = kzalloc(sizeof(*tx_work), GFP_KERNEL);
+ if (!tx_work)
+ return -ENOMEM;
+
+ tx_work->type = HSSPI_WORK_TX;
+ tx_work->tx.blk = blk;
+ tx_work->tx.layer = layer;
+
+ spin_lock(&hsspi->lock);
+
+ if (hsspi->state == HSSPI_RUNNING) {
+ if (hsspi->layers[layer->id] == layer)
+ list_add_tail(&tx_work->list, &hsspi->work_list);
+ else
+ ret = -EINVAL;
+ } else
+ ret = -EAGAIN;
+
+ spin_unlock(&hsspi->lock);
+
+ if (ret) {
+ kfree(tx_work);
+ dev_err(&hsspi->spi->dev, "%s: %d\n", __func__, ret);
+ return ret;
+ }
+
+ wake_up_interruptible(&hsspi->wq);
+
+ dev_dbg(&hsspi->spi->dev, "send %d bytes on HSSPI '%s' layer\n",
+ blk->length, layer->name);
+ return 0;
+}
+
+void hsspi_start(struct hsspi *hsspi)
+{
+ spin_lock(&hsspi->lock);
+
+ hsspi->state = HSSPI_RUNNING;
+
+ spin_unlock(&hsspi->lock);
+
+ wake_up_interruptible(&hsspi->wq);
+
+ dev_dbg(&hsspi->spi->dev, "HSSPI started\n");
+}
+
+void hsspi_stop(struct hsspi *hsspi)
+{
+ DECLARE_COMPLETION_ONSTACK(complete);
+ struct hsspi_work complete_work = {
+ .type = HSSPI_WORK_COMPLETION,
+ .completion = &complete,
+ };
+
+ spin_lock(&hsspi->lock);
+
+ hsspi->state = HSSPI_STOPPED;
+
+ list_add_tail(&complete_work.list, &hsspi->work_list);
+
+ spin_unlock(&hsspi->lock);
+
+ wake_up_interruptible(&hsspi->wq);
+
+ wait_for_completion(&complete);
+
+ dev_dbg(&hsspi->spi->dev, "HSSPI stopped\n");
+}
diff --git a/hsspi.h b/hsspi.h
new file mode 100644
index 0000000..c57c849
--- /dev/null
+++ b/hsspi.h
@@ -0,0 +1,328 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * This file is part of the QM35 UCI stack for linux.
+ *
+ * Copyright (c) 2021 Qorvo US, Inc.
+ *
+ * This software is provided under the GNU General Public License, version 2
+ * (GPLv2), as well as under a Qorvo commercial license.
+ *
+ * You may choose to use this software under the terms of the GPLv2 License,
+ * version 2 ("GPLv2"), as published by the Free Software Foundation.
+ * You should have received a copy of the GPLv2 along with this program. If
+ * not, see <http://www.gnu.org/licenses/>.
+ *
+ * This program is distributed under the GPLv2 in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GPLv2 for more
+ * details.
+ *
+ * If you cannot meet the requirements of the GPLv2, you may not use this
+ * software for any purpose without first obtaining a commercial license from
+ * Qorvo.
+ * Please contact Qorvo to inquire about licensing terms.
+ *
+ * QM35 HSSPI Protocol
+ */
+
+#ifndef __HSSPI_H__
+#define __HSSPI_H__
+
+#include <linux/gpio.h>
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/spi/spi.h>
+#include <linux/wait.h>
+
+enum { UL_RESERVED,
+ UL_BOOT_FLASH,
+ UL_UCI_APP,
+ UL_COREDUMP,
+ UL_LOG,
+ UL_TEST_HSSPI,
+ UL_MAX_IDX };
+
+struct stc_header {
+ u8 flags;
+ u8 ul;
+ u16 length;
+} __packed;
+
+enum hsspi_work_type {
+ HSSPI_WORK_TX = 0,
+ HSSPI_WORK_COMPLETION,
+};
+
+/**
+ * struct hsspi_block - Memory block used by the HSSPI.
+ * @data: pointer to some memory
+ * @length: requested length of the data
+ * @size: size of the data (could be greater than length)
+ *
+ * This structure represents the memory used by the HSSPI driver for
+ * sending or receiving message. Upper layer must provides the HSSPI
+ * driver a way to allocate such structure for reception and uses this
+ * structure for sending function.
+ *
+ * The goal here is to prevent useless copy.
+ */
+struct hsspi_block {
+ void *data;
+ u16 length;
+ u16 size;
+};
+
+struct hsspi_layer;
+
+/**
+ * struct hsspi_layer_ops - Upper layer operations.
+ *
+ * @registered: Called when this upper layer is registered.
+ * @unregistered: Called when unregistered.
+ *
+ * @get: Called when the HSSPI driver need some memory for
+ * reception. This &struct hsspi_block will be give back to the upper
+ * layer in the received callback.
+ *
+ * @received: Called when the HSSPI driver received some data for this
+ * upper layer. In case of error, status is used to notify the upper
+ * layer.
+ *
+ * @sent: Called when a &struct hsspi_block is sent by the HSSPI
+ * driver. In case of error, status is used to notify the upper layer.
+ *
+ * Operation needed to be implemented by an upper layer. All ops are
+ * called by the HSSPI driver and are mandatory.
+ */
+struct hsspi_layer_ops {
+ int (*registered)(struct hsspi_layer *upper_layer);
+ void (*unregistered)(struct hsspi_layer *upper_layer);
+
+ struct hsspi_block *(*get)(struct hsspi_layer *upper_layer, u16 length);
+ void (*received)(struct hsspi_layer *upper_layer,
+ struct hsspi_block *blk, int status);
+ void (*sent)(struct hsspi_layer *upper_layer, struct hsspi_block *blk,
+ int status);
+};
+
+/**
+ * struct hsspi_layer - HSSPI upper layer.
+ * @name: Name of this upper layer.
+ * @id: id (ul used in the STC header) of this upper layer
+ * @ops: &struct hsspi_layer_ops
+ *
+ * Basic upper layer structure. Inherit from it to implement a
+ * concrete upper layer.
+ */
+struct hsspi_layer {
+ char *name;
+ u8 id;
+ const struct hsspi_layer_ops *ops;
+};
+
+enum hsspi_flags {
+ HSSPI_FLAGS_SS_IRQ = 0,
+ HSSPI_FLAGS_SS_READY = 1,
+ HSSPI_FLAGS_SS_BUSY = 2,
+ HSSPI_FLAGS_MAX = 3,
+};
+
+enum hsspi_state {
+ HSSPI_RUNNING = 0,
+ HSSPI_ERROR = 1,
+ HSSPI_STOPPED = 2,
+};
+
+/**
+ * struct hsspi - HSSPI driver.
+ *
+ * Some things need to be refine:
+ * 1. a better way to disable/enable ss_irq or ss_ready GPIOs
+ * 2. be able to change spi speed on the fly (for flashing purpose)
+ *
+ * Actually this structure should be abstract.
+ *
+ */
+struct hsspi {
+ spinlock_t lock; /* protect work_list, layers and state */
+ struct list_head work_list;
+ struct hsspi_layer *layers[UL_MAX_IDX];
+ enum hsspi_state state;
+
+ DECLARE_BITMAP(flags, HSSPI_FLAGS_MAX);
+ struct wait_queue_head wq;
+ struct wait_queue_head wq_ready;
+ struct task_struct *thread;
+
+ // re-enable SS_IRQ
+ void (*odw_cleared)(struct hsspi *hsspi);
+
+ // wakeup QM35
+ void (*wakeup)(struct hsspi *hsspi);
+
+ // reset QM35
+ void (*reset_qm35)(struct hsspi *hsspi);
+
+ struct spi_device *spi;
+
+ struct stc_header *host, *soc;
+ ktime_t next_cs_active_time;
+
+ struct gpio_desc *gpio_ss_rdy;
+ struct gpio_desc *gpio_exton;
+
+ volatile bool xfer_ongoing;
+ volatile bool waiting_ss_rdy;
+};
+
+/**
+ * hsspi_init() - Initialiaze the HSSPI
+ * @hsspi: pointer to a &struct hsspi
+ * @spi: pointer to the &struct spi_device used by the HSSPI for SPI
+ * transmission
+ *
+ * Initialize the HSSPI structure.
+ *
+ * Return: 0 if no error or -errno.
+ *
+ */
+int hsspi_init(struct hsspi *hsspi, struct spi_device *spi);
+void hsspi_set_gpios(struct hsspi *hsspi, struct gpio_desc *gpio_ss_rdy,
+ struct gpio_desc *gpio_exton);
+
+/**
+ * hsspi_deinit() - Initialiaze the HSSPI
+ * @hsspi: pointer to a &struct hsspi
+ *
+ * Deinitialize the HSSPI structure.
+ *
+ * Return: 0 if no error or -errno
+ */
+int hsspi_deinit(struct hsspi *hsspi);
+
+/**
+ * hsspi_register() - Register an upper layer
+ * @hsspi: pointer to a &struct hsspi
+ * @layer: pointer to a &struct hsspi_layer
+ *
+ * Register an upper layer.
+ *
+ * Return: 0 if no error or -errno.
+ *
+ */
+int hsspi_register(struct hsspi *hsspi, struct hsspi_layer *layer);
+
+/**
+ * hsspi_unregister() - Unregister an upper layer
+ * @hsspi: pointer to a &struct hsspi
+ * @layer: pointer to a &struct hsspi_layer
+ *
+ * Unregister an upper layer.
+ *
+ * Return: 0 if no error or -errno.
+ *
+ */
+int hsspi_unregister(struct hsspi *hsspi, struct hsspi_layer *layer);
+
+/**
+ * hsspi_set_spi_slave_ready() - tell the hsspi that the ss_ready is active
+ * @hsspi: pointer to a &struct hsspi
+ *
+ * This function is called in the ss_ready irq handler. It notices the
+ * HSSPI driver that the QM is ready for transfer.
+ */
+void hsspi_set_spi_slave_ready(struct hsspi *hsspi);
+
+/**
+ * hsspi_clear_spi_slave_ready() - tell the hsspi that the ss_ready has
+ * been lowered meaning that the fw is busy or asleep,
+ * @hsspi: pointer to a &struct hsspi
+ */
+void hsspi_clear_spi_slave_ready(struct hsspi *hsspi);
+
+/**
+ * hsspi_set_spi_slave_busy() - tell the hsspi that the ss_ready has
+ * not been lowered and raised again meaning that the fw is busy,
+ * @hsspi: pointer to a &struct hsspi
+ */
+void hsspi_set_spi_slave_busy(struct hsspi *hsspi);
+
+/**
+ * hsspi_clear_spi_slave_busy() - tell the hsspi that the ss_ready has
+ * been lowered and raised again meaning that the fw is not busy anymore,
+ * @hsspi: pointer to a &struct hsspi
+ *
+ * This function is called in the ss_ready irq handler. It notices the
+ * HSSPI driver that the QM has acknowledged the last SPI xfer.
+ */
+void hsspi_clear_spi_slave_busy(struct hsspi *hsspi);
+
+/**
+ * hsspi_set_output_data_waiting() - tell the hsspi that the ss_irq is active
+ * @hsspi: pointer to a &struct hsspi
+ *
+ * This function is called in the ss_irq irq handler. It notices the
+ * HSSPI dirver that the QM has some date to outpput.
+ *
+ * The HSSPI must work with or without the ss_irq gpio. The current
+ * implementation is far from ideal regarding this requirement.
+ *
+ * W/o the gpio we should send repeatly some 0-length write transfer
+ * in order to check for ODW flag in SOC STC header.
+ */
+void hsspi_set_output_data_waiting(struct hsspi *hsspi);
+
+/**
+ * hsspi_init_block() - allocate a block data that suits HSSPI.
+ *
+ * @blk: point to a &struct hsspi_block
+ * @length: block length
+ *
+ * Return: 0 or -ENOMEM on error
+ */
+int hsspi_init_block(struct hsspi_block *blk, u16 length);
+
+/**
+ * hsspi_deinit_block() - deallocate a block data.
+ *
+ * @blk: point to a &struct hsspi_block
+ *
+ */
+void hsspi_deinit_block(struct hsspi_block *blk);
+
+/**
+ * hsspi_send() - send a &struct hsspi_block for a &struct hsspi_layer
+ * layer.
+ *
+ * @hsspi: pointer to a &struct hsspi
+ * @layer: pointer to a &struct hsspi_layer
+ * @blk: pointer to a &struct hsspi_block
+ *
+ * Send the block `blk` of the upper layer `layer` on the `hsspi`
+ * driver.
+ *
+ * Return: 0 if no error or -errno.
+ *
+ */
+int hsspi_send(struct hsspi *hsspi, struct hsspi_layer *layer,
+ struct hsspi_block *blk);
+
+/**
+ * hsspi_start() - start the HSSPI
+ *
+ * @hsspi: pointer to a &struct hsspi
+ *
+ */
+void hsspi_start(struct hsspi *hsspi);
+
+/**
+ * hsspi_stop() - stop the HSSPI
+ *
+ * @hsspi: pointer to a &struct hsspi
+ *
+ */
+void hsspi_stop(struct hsspi *hsspi);
+
+#endif // __HSSPI_H__
diff --git a/hsspi_coredump.c b/hsspi_coredump.c
new file mode 100644
index 0000000..36ddbb6
--- /dev/null
+++ b/hsspi_coredump.c
@@ -0,0 +1,344 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * This file is part of the QM35 UCI stack for linux.
+ *
+ * Copyright (c) 2022 Qorvo US, Inc.
+ *
+ * This software is provided under the GNU General Public License, version 2
+ * (GPLv2), as well as under a Qorvo commercial license.
+ *
+ * You may choose to use this software under the terms of the GPLv2 License,
+ * version 2 ("GPLv2"), as published by the Free Software Foundation.
+ * You should have received a copy of the GPLv2 along with this program. If
+ * not, see <http://www.gnu.org/licenses/>.
+ *
+ * This program is distributed under the GPLv2 in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GPLv2 for more
+ * details.
+ *
+ * If you cannot meet the requirements of the GPLv2, you may not use this
+ * software for any purpose without first obtaining a commercial license from
+ * Qorvo.
+ * Please contact Qorvo to inquire about licensing terms.
+ *
+ * QM35 COREDUMP layer HSSPI Protocol
+ */
+
+#include "qm35.h"
+#include "hsspi_coredump.h"
+
+#define COREDUMP_HEADER_NTF 0x00
+#define COREDUMP_BODY_NTF 0x01
+#define COREDUMP_RCV_STATUS 0x02
+#define COREDUMP_FORCE_CMD 0x03
+
+#define COREDUMP_RCV_NACK 0x00
+#define COREDUMP_RCV_ACK 0x01
+
+#define COREDUMP_RCV_TIMER_TIMEOUT_S 2
+
+struct __packed coredump_common_hdr {
+ uint8_t cmd_id;
+};
+
+struct __packed coredump_hdr_ntf {
+ uint32_t size;
+ uint16_t crc;
+};
+
+struct __packed coredump_rcv_status {
+ uint8_t ack;
+};
+
+struct coredump_packet *coredump_packet_alloc(u16 length)
+{
+ struct coredump_packet *p;
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return NULL;
+
+ if (hsspi_init_block(&p->blk, length)) {
+ kfree(p);
+ return NULL;
+ }
+ return p;
+}
+
+void coredump_packet_free(struct coredump_packet *p)
+{
+ hsspi_deinit_block(&p->blk);
+ kfree(p);
+}
+
+static int coredump_send_rcv_status(struct coredump_layer *layer, uint8_t ack)
+{
+ struct coredump_packet *p;
+ struct coredump_common_hdr hdr;
+ struct coredump_rcv_status rcv;
+ struct qm35_ctx *qm35_hdl;
+
+ pr_info("qm35: coredump: sending status %s\n",
+ layer->coredump_status == COREDUMP_RCV_ACK ? "ACK" : "NACK");
+
+ qm35_hdl = container_of(layer, struct qm35_ctx, coredump_layer);
+
+ p = coredump_packet_alloc(sizeof(hdr) + sizeof(rcv));
+ if (!p)
+ return -ENOMEM;
+
+ hdr.cmd_id = COREDUMP_RCV_STATUS;
+ rcv.ack = ack;
+
+ memcpy(p->blk.data, &hdr, sizeof(hdr));
+ memcpy(p->blk.data + sizeof(hdr), &rcv, sizeof(rcv));
+
+ return hsspi_send(&qm35_hdl->hsspi, &qm35_hdl->coredump_layer.hlayer,
+ &p->blk);
+}
+
+static uint16_t coredump_get_checksum(struct coredump_layer *layer)
+{
+ uint32_t idx;
+ uint16_t crc = 0;
+
+ for (idx = 0; idx < layer->coredump_data_wr_idx; idx++) {
+ uint8_t *val;
+
+ val = layer->coredump_data + idx;
+ crc += *val;
+ }
+
+ return crc;
+}
+
+static void corredump_on_expired_timer(struct timer_list *timer)
+{
+ struct coredump_layer *layer =
+ container_of(timer, struct coredump_layer, timer);
+
+ pr_warn("qm35: coredump receive timer expired\n");
+
+ coredump_send_rcv_status(layer, layer->coredump_status);
+}
+
+static int coredump_registered(struct hsspi_layer *hlayer)
+{
+ return 0;
+}
+
+static void coredump_unregistered(struct hsspi_layer *hlayer)
+{
+ ;
+}
+
+static struct hsspi_block *coredump_get(struct hsspi_layer *hlayer, u16 length)
+{
+ struct coredump_packet *p;
+
+ p = coredump_packet_alloc(length);
+ if (!p)
+ return NULL;
+
+ return &p->blk;
+}
+
+static void coredump_header_ntf_received(struct coredump_layer *layer,
+ struct coredump_hdr_ntf chn)
+{
+ void *data;
+
+ pr_info("qm35: coredump: receiving coredump with len: %d and crc: 0x%x\n",
+ chn.size, chn.crc);
+
+ layer->coredump_data_wr_idx = 0;
+ layer->coredump_size = chn.size;
+ layer->coredump_crc = chn.crc;
+ layer->coredump_status = COREDUMP_RCV_NACK;
+
+ data = krealloc(layer->coredump_data, layer->coredump_size, GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(data)) {
+ layer->coredump_data = NULL;
+ layer->coredump_size = 0;
+ layer->coredump_crc = 0;
+ pr_err("qm35: failed to allocate coredump mem: %px\n", data);
+ } else {
+ layer->coredump_data = data;
+ }
+}
+
+static int coredump_body_ntf_received(struct coredump_layer *layer,
+ uint8_t *cch_body, uint16_t cch_body_size)
+{
+ if (!layer->coredump_data) {
+ pr_err("qm35: failed to save coredump, mem not allocated\n");
+ return 1;
+ }
+
+ if (cch_body_size + layer->coredump_data_wr_idx >
+ layer->coredump_size) {
+ pr_err("qm35: failed to save coredump, mem overflow: max size: %d, wr_idx: %d, cd size: %d\n",
+ layer->coredump_size, layer->coredump_data_wr_idx,
+ cch_body_size);
+ return 1;
+ }
+
+ memcpy(layer->coredump_data + layer->coredump_data_wr_idx, cch_body,
+ cch_body_size);
+ layer->coredump_data_wr_idx += cch_body_size;
+
+ return 0;
+}
+
+static void coredump_received(struct hsspi_layer *hlayer,
+ struct hsspi_block *blk, int status)
+{
+ struct coredump_common_hdr cch;
+ struct coredump_hdr_ntf chn;
+ uint8_t *cch_body;
+ uint16_t cch_body_size;
+
+ struct coredump_packet *packet =
+ container_of(blk, struct coredump_packet, blk);
+
+ struct coredump_layer *layer =
+ container_of(hlayer, struct coredump_layer, hlayer);
+
+ if (status)
+ goto out;
+
+ if (blk->length < sizeof(struct coredump_common_hdr)) {
+ pr_err("qm35: coredump packet header too small: %d bytes\n",
+ blk->length);
+ goto out;
+ }
+
+ del_timer_sync(&layer->timer);
+
+ memcpy(&cch, blk->data, sizeof(cch));
+ cch_body = blk->data + sizeof(cch);
+ cch_body_size = blk->length - sizeof(cch);
+
+ switch (cch.cmd_id) {
+ case COREDUMP_HEADER_NTF:
+ if (cch_body_size < sizeof(chn)) {
+ pr_err("qm35: coredump packet header ntf too small: %d bytes\n",
+ cch_body_size);
+ break;
+ }
+
+ memcpy(&chn, cch_body, sizeof(chn));
+ coredump_header_ntf_received(layer, chn);
+ break;
+
+ case COREDUMP_BODY_NTF:
+ if (coredump_body_ntf_received(layer, cch_body, cch_body_size))
+ break;
+
+ if (layer->coredump_data_wr_idx == layer->coredump_size) {
+ uint16_t crc = coredump_get_checksum(layer);
+
+ pr_info("qm35: coredump: calculated crc: 0x%x, header crc: 0x%x\n",
+ crc, layer->coredump_crc);
+
+ if (crc == layer->coredump_crc)
+ layer->coredump_status = COREDUMP_RCV_ACK;
+
+ coredump_send_rcv_status(layer, layer->coredump_status);
+
+ break;
+ }
+
+ mod_timer(&layer->timer,
+ jiffies + COREDUMP_RCV_TIMER_TIMEOUT_S * HZ);
+
+ break;
+
+ default:
+ pr_err("qm35: coredump: wrong cmd id received: 0x%x\n",
+ cch.cmd_id);
+ break;
+ }
+
+out:
+
+ coredump_packet_free(packet);
+}
+
+static void coredump_sent(struct hsspi_layer *hlayer, struct hsspi_block *blk,
+ int status)
+{
+ struct coredump_packet *buf =
+ container_of(blk, struct coredump_packet, blk);
+
+ coredump_packet_free(buf);
+}
+
+static const struct hsspi_layer_ops coredump_ops = {
+ .registered = coredump_registered,
+ .unregistered = coredump_unregistered,
+ .get = coredump_get,
+ .received = coredump_received,
+ .sent = coredump_sent,
+};
+
+char *debug_coredump_get(struct debug *dbg, size_t *len)
+{
+ char *data;
+ struct qm35_ctx *qm35_hdl = container_of(dbg, struct qm35_ctx, debug);
+
+ *len = qm35_hdl->coredump_layer.coredump_data_wr_idx;
+ data = qm35_hdl->coredump_layer.coredump_data;
+
+ return data;
+}
+
+int debug_coredump_force(struct debug *dbg)
+{
+ struct coredump_packet *p;
+ struct coredump_common_hdr hdr = { .cmd_id = COREDUMP_FORCE_CMD };
+ struct qm35_ctx *qm35_hdl;
+
+ pr_info("qm35: force coredump");
+
+ qm35_hdl = container_of(dbg, struct qm35_ctx, debug);
+
+ p = coredump_packet_alloc(sizeof(hdr));
+ if (!p)
+ return -ENOMEM;
+
+ memcpy(p->blk.data, &hdr, sizeof(hdr));
+
+ return hsspi_send(&qm35_hdl->hsspi, &qm35_hdl->coredump_layer.hlayer,
+ &p->blk);
+}
+
+static const struct debug_coredump_ops debug_coredump_ops = {
+ .coredump_get = debug_coredump_get,
+ .coredump_force = debug_coredump_force,
+};
+
+int coredump_layer_init(struct coredump_layer *layer, struct debug *debug)
+{
+ layer->hlayer.name = "QM35 COREDUMP";
+ layer->hlayer.id = UL_COREDUMP;
+ layer->hlayer.ops = &coredump_ops;
+
+ layer->coredump_data = NULL;
+ layer->coredump_data_wr_idx = 0;
+ layer->coredump_size = 0;
+ layer->coredump_crc = 0;
+ layer->coredump_status = 0;
+ timer_setup(&layer->timer, corredump_on_expired_timer, 0);
+
+ debug->coredump_ops = &debug_coredump_ops;
+
+ return 0;
+}
+
+void coredump_layer_deinit(struct coredump_layer *layer)
+{
+ kfree(layer->coredump_data);
+}
diff --git a/hsspi_coredump.h b/hsspi_coredump.h
new file mode 100644
index 0000000..c7403fc
--- /dev/null
+++ b/hsspi_coredump.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * This file is part of the QM35 UCI stack for linux.
+ *
+ * Copyright (c) 2022 Qorvo US, Inc.
+ *
+ * This software is provided under the GNU General Public License, version 2
+ * (GPLv2), as well as under a Qorvo commercial license.
+ *
+ * You may choose to use this software under the terms of the GPLv2 License,
+ * version 2 ("GPLv2"), as published by the Free Software Foundation.
+ * You should have received a copy of the GPLv2 along with this program. If
+ * not, see <http://www.gnu.org/licenses/>.
+ *
+ * This program is distributed under the GPLv2 in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GPLv2 for more
+ * details.
+ *
+ * If you cannot meet the requirements of the GPLv2, you may not use this
+ * software for any purpose without first obtaining a commercial license from
+ * Qorvo.
+ * Please contact Qorvo to inquire about licensing terms.
+ *
+ * QM35 COREDUMP layer HSSPI Protocol
+ */
+
+#ifndef __HSSPI_COREDUMP_H__
+#define __HSSPI_COREDUMP_H__
+
+#include <linux/mutex.h>
+#include <linux/sched.h>
+
+#include "hsspi.h"
+#include "debug.h"
+
+struct coredump_packet {
+ struct hsspi_block blk;
+};
+
+struct coredump_layer {
+ struct hsspi_layer hlayer;
+ void *coredump_data;
+ uint32_t coredump_data_wr_idx;
+ uint32_t coredump_size;
+ uint16_t coredump_crc;
+ uint8_t coredump_status;
+ struct timer_list timer;
+};
+
+int coredump_layer_init(struct coredump_layer *coredump, struct debug *debug);
+void coredump_layer_deinit(struct coredump_layer *coredump);
+
+#endif // __HSSPI_COREDUMP_H__
diff --git a/hsspi_log.c b/hsspi_log.c
new file mode 100644
index 0000000..d4a8616
--- /dev/null
+++ b/hsspi_log.c
@@ -0,0 +1,464 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * This file is part of the QM35 UCI stack for linux.
+ *
+ * Copyright (c) 2022 Qorvo US, Inc.
+ *
+ * This software is provided under the GNU General Public License, version 2
+ * (GPLv2), as well as under a Qorvo commercial license.
+ *
+ * You may choose to use this software under the terms of the GPLv2 License,
+ * version 2 ("GPLv2"), as published by the Free Software Foundation.
+ * You should have received a copy of the GPLv2 along with this program. If
+ * not, see <http://www.gnu.org/licenses/>.
+ *
+ * This program is distributed under the GPLv2 in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GPLv2 for more
+ * details.
+ *
+ * If you cannot meet the requirements of the GPLv2, you may not use this
+ * software for any purpose without first obtaining a commercial license from
+ * Qorvo.
+ * Please contact Qorvo to inquire about licensing terms.
+ *
+ * QM35 LOG layer HSSPI Protocol
+ */
+
+#include <qmrom.h>
+
+#include "qm35.h"
+#include "hsspi_log.h"
+
+#define LOG_CID_TRACE_NTF 0x0000
+#define LOG_CID_SET_LOG_LVL 0x0001
+#define LOG_CID_GET_LOG_LVL 0x0002
+#define LOG_CID_GET_LOG_SRC 0x0003
+
+#define TRACE_RB_SIZE 0xFFFFF // 1MB
+
+struct __packed log_packet_hdr {
+ uint16_t cmd_id;
+ uint16_t b_size;
+};
+
+struct __packed log_level_set_cmd {
+ struct log_packet_hdr hdr;
+ uint8_t id;
+ uint8_t lvl;
+};
+
+struct __packed log_level_get_cmd {
+ struct log_packet_hdr hdr;
+ uint8_t id;
+};
+
+struct log_packet *log_packet_alloc(u16 length)
+{
+ struct log_packet *p;
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return NULL;
+
+ if (hsspi_init_block(&p->blk, length)) {
+ kfree(p);
+ return NULL;
+ }
+ return p;
+}
+
+void log_packet_free(struct log_packet *p)
+{
+ hsspi_deinit_block(&p->blk);
+ kfree(p);
+}
+
+static struct log_packet *encode_get_log_sources_packet(void)
+{
+ struct log_packet *p;
+ struct log_packet_hdr msg;
+
+ p = log_packet_alloc(sizeof(msg));
+ if (!p)
+ return p;
+
+ msg.cmd_id = LOG_CID_GET_LOG_SRC;
+ msg.b_size = 0;
+
+ memcpy(p->blk.data, &msg, sizeof(msg));
+
+ return p;
+}
+
+static struct log_packet *encode_set_log_level_packet(uint8_t id, uint8_t lvl)
+{
+ struct log_packet *p;
+ struct log_level_set_cmd cmd;
+
+ p = log_packet_alloc(sizeof(cmd));
+ if (!p)
+ return p;
+
+ cmd.hdr.cmd_id = LOG_CID_SET_LOG_LVL;
+ cmd.hdr.b_size = sizeof(cmd) - sizeof(cmd.hdr);
+ cmd.id = id;
+ cmd.lvl = lvl;
+
+ memcpy(p->blk.data, &cmd, sizeof(cmd));
+
+ return p;
+}
+
+static struct log_packet *encode_get_log_level_packet(uint8_t id)
+{
+ struct log_packet *p;
+ struct log_level_get_cmd cmd;
+
+ p = log_packet_alloc(sizeof(cmd));
+ if (!p)
+ return p;
+
+ cmd.hdr.cmd_id = LOG_CID_GET_LOG_LVL;
+ cmd.hdr.b_size = sizeof(cmd) - sizeof(cmd.hdr);
+ cmd.id = id;
+
+ memcpy(p->blk.data, &cmd, sizeof(cmd));
+
+ return p;
+}
+
+static int parse_log_sources_response(struct log_layer *layer, uint8_t *data,
+ uint16_t data_size)
+{
+ struct qm35_ctx *qm35_hdl;
+ int idx = 0;
+
+ qm35_hdl = container_of(layer, struct qm35_ctx, log_layer);
+
+ layer->log_modules_count = *data++;
+
+ kfree(layer->log_modules);
+
+ layer->log_modules = kcalloc(layer->log_modules_count,
+ sizeof(struct log_module), GFP_KERNEL);
+ if (!layer->log_modules)
+ return 1;
+
+ for (idx = 0; idx < layer->log_modules_count; idx++) {
+ layer->log_modules[idx].debug = &qm35_hdl->debug;
+ layer->log_modules[idx].id = *data++;
+ layer->log_modules[idx].lvl = *data++;
+ strcpy(layer->log_modules[idx].name, (char *)data);
+ data += (strlen((char *)data) + 1);
+
+ debug_create_module_entry(&qm35_hdl->debug,
+ &layer->log_modules[idx]);
+ }
+
+ return 0;
+}
+
+static int parse_get_log_lvl_response(struct log_layer *layer, uint8_t *data,
+ uint16_t data_size)
+{
+ uint8_t src_id, src_lvl;
+ struct qm35_ctx *qm35_hdl;
+ int idx = 0;
+
+ src_id = *data++;
+ src_lvl = *data;
+
+ qm35_hdl = container_of(layer, struct qm35_ctx, log_layer);
+
+ for (idx = 0; idx < layer->log_modules_count; idx++) {
+ if (layer->log_modules[idx].id == src_id) {
+ layer->log_modules[idx].lvl = src_lvl;
+
+ if (layer->log_modules[idx].read_done)
+ complete(layer->log_modules[idx].read_done);
+ }
+ }
+
+ return 0;
+}
+
+static int log_registered(struct hsspi_layer *hlayer)
+{
+ return 0;
+}
+
+static void log_unregistered(struct hsspi_layer *hlayer)
+{
+ ;
+}
+
+static struct hsspi_block *log_get(struct hsspi_layer *hlayer, u16 length)
+{
+ struct log_packet *p;
+
+ p = log_packet_alloc(length);
+ if (!p)
+ return NULL;
+
+ return &p->blk;
+}
+
+static void log_received(struct hsspi_layer *hlayer, struct hsspi_block *blk,
+ int status)
+{
+ struct log_layer *layer;
+ struct log_packet_hdr hdr;
+ uint8_t *body;
+ struct qm35_ctx *qm35_hdl;
+ struct log_packet *p;
+
+ p = container_of(blk, struct log_packet, blk);
+
+ if (status)
+ goto out;
+
+ layer = container_of(hlayer, struct log_layer, hlayer);
+ qm35_hdl = container_of(layer, struct qm35_ctx, log_layer);
+
+ if (blk->length < sizeof(struct log_packet_hdr)) {
+ pr_err("qm35: log packet header too small: %d bytes\n",
+ blk->length);
+ goto out;
+ }
+
+ memcpy(&hdr, blk->data, sizeof(struct log_packet_hdr));
+ body = blk->data + sizeof(struct log_packet_hdr);
+
+ if (blk->length < sizeof(struct log_packet_hdr) + hdr.b_size) {
+ pr_err("qm35: incomplete log packet: %d/%d bytes\n",
+ blk->length, hdr.b_size);
+ goto out;
+ }
+ if (qm35_hdl->log_qm_traces)
+ pr_info("qm35_log: %.*s\n", hdr.b_size - 2, body);
+
+ switch (hdr.cmd_id) {
+ case LOG_CID_TRACE_NTF:
+ rb_push(&layer->rb, body, hdr.b_size);
+ debug_new_trace_available(&qm35_hdl->debug);
+ break;
+ case LOG_CID_SET_LOG_LVL:
+ break;
+ case LOG_CID_GET_LOG_LVL:
+ parse_get_log_lvl_response(layer, body, hdr.b_size);
+ break;
+ case LOG_CID_GET_LOG_SRC:
+ parse_log_sources_response(layer, body, hdr.b_size);
+ break;
+ default:
+ break;
+ }
+out:
+ log_packet_free(p);
+}
+
+static void log_sent(struct hsspi_layer *hlayer, struct hsspi_block *blk,
+ int status)
+{
+ struct log_packet *p = container_of(blk, struct log_packet, blk);
+
+ log_packet_free(p);
+}
+
+static const struct hsspi_layer_ops log_ops = {
+ .registered = log_registered,
+ .unregistered = log_unregistered,
+ .get = log_get,
+ .received = log_received,
+ .sent = log_sent,
+};
+
+static void log_enable_set(struct debug *dbg, int enable)
+{
+ struct qm35_ctx *qm35_hdl;
+ struct log_packet *p;
+ int ret;
+
+ // TODO: what happens if enable is false?
+ // if (!enable)
+ // return;
+ qm35_hdl = container_of(dbg, struct qm35_ctx, debug);
+
+ if (qm35_hdl->log_layer.enabled) {
+ pr_warn("qm35: logging already enabled\n");
+ return;
+ }
+
+ p = encode_get_log_sources_packet();
+ if (!p) {
+ pr_err("failed to encode get log sources packet\n");
+ return;
+ }
+
+ ret = hsspi_send(&qm35_hdl->hsspi, &qm35_hdl->log_layer.hlayer,
+ &p->blk);
+
+ if (ret) {
+ pr_err("failed to send spi packet\n");
+ log_packet_free(p);
+ } else {
+ qm35_hdl->log_layer.enabled = enable;
+ }
+}
+
+static int log_enable_get(struct debug *dbg)
+{
+ struct qm35_ctx *qm35_hdl;
+
+ qm35_hdl = container_of(dbg, struct qm35_ctx, debug);
+
+ return qm35_hdl->log_layer.enabled;
+}
+
+static void log_level_set(struct debug *dbg, struct log_module *log_module,
+ int lvl)
+{
+ struct qm35_ctx *qm35_hdl;
+ struct log_packet *p;
+ int ret;
+
+ qm35_hdl = container_of(dbg, struct qm35_ctx, debug);
+
+ p = encode_set_log_level_packet(log_module->id, lvl);
+ if (!p) {
+ pr_err("failed to encode set log level packet\n");
+ return;
+ }
+
+ ret = hsspi_send(&qm35_hdl->hsspi, &qm35_hdl->log_layer.hlayer,
+ &p->blk);
+ if (ret) {
+ pr_err("failed to send spi packet\n");
+ log_packet_free(p);
+ }
+}
+
+static int log_level_get(struct debug *dbg, struct log_module *log_module)
+{
+ struct qm35_ctx *qm35_hdl;
+ struct log_packet *p;
+ int ret = 0;
+ DECLARE_COMPLETION_ONSTACK(comp);
+
+ qm35_hdl = container_of(dbg, struct qm35_ctx, debug);
+
+ p = encode_get_log_level_packet(log_module->id);
+ if (!p) {
+ pr_err("failed to encode get log level packet\n");
+ return 0;
+ }
+
+ log_module->read_done = &comp;
+
+ ret = hsspi_send(&qm35_hdl->hsspi, &qm35_hdl->log_layer.hlayer,
+ &p->blk);
+ if (ret) {
+ pr_err("failed to send spi packet\n");
+ log_packet_free(p);
+ return 0;
+ }
+
+ wait_for_completion(log_module->read_done);
+
+ return log_module->lvl;
+}
+
+static char *log_trace_get_next(struct debug *dbg, rb_entry_size_t *len)
+{
+ struct qm35_ctx *qm35_hdl;
+
+ qm35_hdl = container_of(dbg, struct qm35_ctx, debug);
+
+ return rb_pop(&qm35_hdl->log_layer.rb, len);
+}
+
+static rb_entry_size_t log_trace_get_next_size(struct debug *dbg)
+{
+ struct qm35_ctx *qm35_hdl;
+
+ qm35_hdl = container_of(dbg, struct qm35_ctx, debug);
+
+ return rb_next_size(&qm35_hdl->log_layer.rb);
+}
+
+static bool log_trace_next_avail(struct debug *dbg)
+{
+ struct qm35_ctx *qm35_hdl;
+
+ qm35_hdl = container_of(dbg, struct qm35_ctx, debug);
+
+ return rb_can_pop(&qm35_hdl->log_layer.rb);
+}
+
+static void log_trace_reset(struct debug *dbg)
+{
+ struct qm35_ctx *qm35_hdl;
+
+ qm35_hdl = container_of(dbg, struct qm35_ctx, debug);
+
+ rb_reset(&qm35_hdl->log_layer.rb);
+}
+
+static int get_dev_id(struct debug *dbg, uint16_t *dev_id)
+{
+ struct qm35_ctx *qm35_hdl;
+
+ qm35_hdl = container_of(dbg, struct qm35_ctx, debug);
+
+ return qm_get_dev_id(qm35_hdl, dev_id);
+}
+
+static int get_soc_id(struct debug *dbg, uint8_t *soc_id)
+{
+ struct qm35_ctx *qm35_hdl;
+
+ qm35_hdl = container_of(dbg, struct qm35_ctx, debug);
+
+ return qm_get_soc_id(qm35_hdl, soc_id);
+}
+
+static const struct debug_trace_ops debug_trace_ops = {
+ .enable_set = log_enable_set,
+ .enable_get = log_enable_get,
+ .level_set = log_level_set,
+ .level_get = log_level_get,
+ .trace_get_next = log_trace_get_next,
+ .trace_get_next_size = log_trace_get_next_size,
+ .trace_next_avail = log_trace_next_avail,
+ .trace_reset = log_trace_reset,
+ .get_dev_id = get_dev_id,
+ .get_soc_id = get_soc_id,
+};
+
+int log_layer_init(struct log_layer *log, struct debug *debug)
+{
+ int ret;
+
+ ret = rb_init(&log->rb, TRACE_RB_SIZE);
+ if (ret)
+ return ret;
+
+ log->hlayer.name = "QM35 LOG";
+ log->hlayer.id = UL_LOG;
+ log->hlayer.ops = &log_ops;
+ log->enabled = false;
+ log->log_modules = NULL;
+ log->log_modules_count = 0;
+
+ debug->trace_ops = &debug_trace_ops;
+
+ return 0;
+}
+
+void log_layer_deinit(struct log_layer *log)
+{
+ kfree(log->log_modules);
+ rb_deinit(&log->rb);
+}
diff --git a/hsspi_log.h b/hsspi_log.h
new file mode 100644
index 0000000..e5e11ad
--- /dev/null
+++ b/hsspi_log.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * This file is part of the QM35 UCI stack for linux.
+ *
+ * Copyright (c) 2022 Qorvo US, Inc.
+ *
+ * This software is provided under the GNU General Public License, version 2
+ * (GPLv2), as well as under a Qorvo commercial license.
+ *
+ * You may choose to use this software under the terms of the GPLv2 License,
+ * version 2 ("GPLv2"), as published by the Free Software Foundation.
+ * You should have received a copy of the GPLv2 along with this program. If
+ * not, see <http://www.gnu.org/licenses/>.
+ *
+ * This program is distributed under the GPLv2 in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GPLv2 for more
+ * details.
+ *
+ * If you cannot meet the requirements of the GPLv2, you may not use this
+ * software for any purpose without first obtaining a commercial license from
+ * Qorvo.
+ * Please contact Qorvo to inquire about licensing terms.
+ *
+ * QM35 LOG layer HSSPI Protocol
+ */
+
+#ifndef __HSSPI_LOG_H__
+#define __HSSPI_LOG_H__
+
+#include "hsspi.h"
+#include "debug.h"
+#include "qm35_rb.h"
+
+struct log_packet {
+ struct hsspi_block blk;
+ struct completion *write_done;
+};
+
+struct log_layer {
+ struct hsspi_layer hlayer;
+ uint8_t log_modules_count;
+ struct log_module *log_modules;
+ struct rb rb;
+ bool enabled;
+};
+
+/**
+ * log_packet_alloc() - Allocate an LOG packet
+ * @length: length of the LOG packet
+ *
+ * Allocate an LOG packet that can be used by the HSSPI driver in
+ * order to send or receive an LOG packet.
+ *
+ * Return: a newly allocated &struct log_packet or NULL
+ */
+struct log_packet *log_packet_alloc(u16 length);
+
+/**
+ * log_packet_free() - Free an LOG packet
+ * @p: pointer to the &struct log_packet to free
+ *
+ */
+void log_packet_free(struct log_packet *p);
+
+int log_layer_init(struct log_layer *log, struct debug *debug);
+void log_layer_deinit(struct log_layer *log);
+
+#endif // __HSSPI_LOG_H__
diff --git a/hsspi_test.c b/hsspi_test.c
new file mode 100644
index 0000000..ae02d52
--- /dev/null
+++ b/hsspi_test.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * This file is part of the QM35 UCI stack for linux.
+ *
+ * Copyright (c) 2021 Qorvo US, Inc.
+ *
+ * This software is provided under the GNU General Public License, version 2
+ * (GPLv2), as well as under a Qorvo commercial license.
+ *
+ * You may choose to use this software under the terms of the GPLv2 License,
+ * version 2 ("GPLv2"), as published by the Free Software Foundation.
+ * You should have received a copy of the GPLv2 along with this program. If
+ * not, see <http://www.gnu.org/licenses/>.
+ *
+ * This program is distributed under the GPLv2 in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GPLv2 for more
+ * details.
+ *
+ * If you cannot meet the requirements of the GPLv2, you may not use this
+ * software for any purpose without first obtaining a commercial license from
+ * Qorvo.
+ * Please contact Qorvo to inquire about licensing terms.
+ *
+ * QM35 UCI layer HSSPI Protocol
+ */
+
+#include "hsspi_test.h"
+#include "hsspi_uci.h"
+#include <linux/printk.h>
+#include <linux/delay.h>
+
+int hsspi_test_registered(struct hsspi_layer *upper_layer);
+void hsspi_test_unregistered(struct hsspi_layer *upper_layer);
+struct hsspi_block *hsspi_test_get(struct hsspi_layer *upper_layer, u16 length);
+void hsspi_test_received(struct hsspi_layer *upper_layer,
+ struct hsspi_block *blk, int status);
+void hsspi_test_sent(struct hsspi_layer *upper_layer, struct hsspi_block *blk,
+ int status);
+
+struct hsspi_layer_ops test_hsspi_layer_ops = {
+ .registered = hsspi_test_registered,
+ .unregistered = hsspi_test_unregistered,
+ .get = hsspi_test_get,
+ .received = hsspi_test_received,
+ .sent = hsspi_test_sent,
+};
+
+struct hsspi_layer test_hsspi_layer = {
+ .name = "hsspi_test_layer",
+ .id = UL_TEST_HSSPI,
+ .ops = &test_hsspi_layer_ops,
+};
+
+static struct hsspi *ghsspi;
+int sleep_inter_frame_ms;
+extern int test_sleep_after_ss_ready_us;
+
+int hsspi_test_init(struct hsspi *hsspi)
+{
+ ghsspi = hsspi;
+ return hsspi_register(hsspi, &test_hsspi_layer);
+}
+
+void hsspi_test_deinit(struct hsspi *hsspi)
+{
+ hsspi_unregister(hsspi, &test_hsspi_layer);
+}
+
+int hsspi_test_registered(struct hsspi_layer *upper_layer)
+{
+ return 0;
+}
+
+void hsspi_test_unregistered(struct hsspi_layer *upper_layer)
+{
+}
+
+static int check_rx(const u8 *rx, int len)
+{
+ int idx = 0, err = 0;
+ for (; idx < len; idx++) {
+ if (rx[idx] != (idx & 0xff)) {
+ pr_err("hsspi test: check_rx rx[%u] != %u\n", idx,
+ rx[idx]);
+ print_hex_dump(KERN_DEBUG, "rx:", DUMP_PREFIX_ADDRESS,
+ 16, 1, rx, len, false);
+ err = -5963;
+ break;
+ }
+ }
+ return err;
+}
+
+struct hsspi_block *hsspi_test_get(struct hsspi_layer *layer, u16 length)
+{
+ struct hsspi_block *blk = kzalloc(sizeof(*blk) + length, GFP_KERNEL);
+ if (blk) {
+ blk->data = blk + 1;
+ blk->size = length;
+ blk->length = length;
+ }
+ return blk;
+}
+
+void hsspi_test_set_inter_frame_ms(int ms)
+{
+ sleep_inter_frame_ms = ms;
+}
+
+void hsspi_test_received(struct hsspi_layer *layer, struct hsspi_block *blk,
+ int status)
+{
+ static uint64_t bytes, msgs, errors, bytes0, msgs0, errors0;
+ static time64_t last_perf_dump;
+ int error = check_rx(blk->data, blk->length) ? 1 : 0;
+ time64_t now;
+ errors += error;
+
+ if (!last_perf_dump) {
+ last_perf_dump = ktime_get_seconds();
+ }
+ now = ktime_get_seconds();
+
+ /* inject latencies between each message and between the check
+ * of ss-ready and the xfer.
+ * The test is expected to fail if
+ * sleep_inter_frame_ms > CONFIG_PM_RET_SLEEP_DELAY_US
+ */
+ if (sleep_inter_frame_ms > 0) {
+ static int delay_us = 0;
+ test_sleep_after_ss_ready_us =
+ sleep_inter_frame_ms * 1000 - delay_us;
+ usleep_range(delay_us, delay_us + 1);
+ delay_us += 100;
+ if (delay_us > sleep_inter_frame_ms * 1000)
+ delay_us = 0;
+ } else {
+ test_sleep_after_ss_ready_us = 0;
+ }
+ bytes += blk->length;
+ msgs++;
+ error |= hsspi_send(ghsspi, layer, blk);
+ if (error || ((msgs % 100) == 0))
+ pr_info("hsspi test: bytes received %llu, msgs %llu, errors %llu\n",
+ bytes, msgs, errors);
+ if (now > last_perf_dump) {
+ uint64_t dbytes = bytes >= bytes0 ? bytes - bytes0 :
+ ~0ULL - bytes0 + bytes;
+ uint64_t dmsgs = msgs >= msgs0 ? msgs - msgs0 :
+ ~0ULL - msgs0 + msgs;
+ uint64_t derrors = errors >= errors0 ? errors - errors0 :
+ ~0ULL - errors0 + errors;
+ pr_info("hsspi test perfs: %llu B/s, %llu msgs/s, %llu errors/s\n",
+ dbytes / (now - last_perf_dump),
+ dmsgs / (now - last_perf_dump),
+ derrors / (now - last_perf_dump));
+ bytes0 = bytes;
+ msgs0 = msgs;
+ errors0 = errors;
+ last_perf_dump = now;
+ }
+}
+
+void hsspi_test_sent(struct hsspi_layer *layer, struct hsspi_block *blk,
+ int status)
+{
+ static uint64_t bytes, msgs, errors;
+ errors += status ? 1 : 0;
+ msgs++;
+ bytes += blk->length;
+ if (status || ((msgs % 100) == 0))
+ pr_info("hsspi test: bytes sent %llu, msgs %llu, errors %llu\n",
+ bytes, msgs, errors);
+ kfree(blk);
+}
diff --git a/hsspi_test.h b/hsspi_test.h
new file mode 100644
index 0000000..3a8eefc
--- /dev/null
+++ b/hsspi_test.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * This file is part of the QM35 UCI stack for linux.
+ *
+ * Copyright (c) 2022 Qorvo US, Inc.
+ *
+ * This software is provided under the GNU General Public License, version 2
+ * (GPLv2), as well as under a Qorvo commercial license.
+ *
+ * You may choose to use this software under the terms of the GPLv2 License,
+ * version 2 ("GPLv2"), as published by the Free Software Foundation.
+ * You should have received a copy of the GPLv2 along with this program. If
+ * not, see <http://www.gnu.org/licenses/>.
+ *
+ * This program is distributed under the GPLv2 in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GPLv2 for more
+ * details.
+ *
+ * If you cannot meet the requirements of the GPLv2, you may not use this
+ * software for any purpose without first obtaining a commercial license from
+ * Qorvo.
+ * Please contact Qorvo to inquire about licensing terms.
+ *
+ * QM35 Ringbuffer
+ */
+
+#ifndef __HSSPI_TEST_H___
+#define __HSSPI_TEST_H___
+
+#include "hsspi.h"
+
+int hsspi_test_init(struct hsspi *hsspi);
+void hsspi_test_deinit(struct hsspi *hsspi);
+
+void hsspi_test_set_inter_frame_ms(int ms);
+
+#endif /* __HSSPI_TEST_H___ */ \ No newline at end of file
diff --git a/hsspi_uci.c b/hsspi_uci.c
new file mode 100644
index 0000000..0d85591
--- /dev/null
+++ b/hsspi_uci.c
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * This file is part of the QM35 UCI stack for linux.
+ *
+ * Copyright (c) 2021 Qorvo US, Inc.
+ *
+ * This software is provided under the GNU General Public License, version 2
+ * (GPLv2), as well as under a Qorvo commercial license.
+ *
+ * You may choose to use this software under the terms of the GPLv2 License,
+ * version 2 ("GPLv2"), as published by the Free Software Foundation.
+ * You should have received a copy of the GPLv2 along with this program. If
+ * not, see <http://www.gnu.org/licenses/>.
+ *
+ * This program is distributed under the GPLv2 in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GPLv2 for more
+ * details.
+ *
+ * If you cannot meet the requirements of the GPLv2, you may not use this
+ * software for any purpose without first obtaining a commercial license from
+ * Qorvo.
+ * Please contact Qorvo to inquire about licensing terms.
+ *
+ * QM35 UCI layer HSSPI Protocol
+ */
+
+#include "qm35.h"
+#include "hsspi_uci.h"
+
+struct uci_packet *uci_packet_alloc(u16 length)
+{
+ struct uci_packet *p;
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return NULL;
+
+ if (hsspi_init_block(&p->blk, length)) {
+ kfree(p);
+ return NULL;
+ }
+
+ p->data = p->blk.data;
+ p->length = p->blk.length;
+ return p;
+}
+
+void uci_packet_free(struct uci_packet *p)
+{
+ hsspi_deinit_block(&p->blk);
+ kfree(p);
+}
+
+static int uci_registered(struct hsspi_layer *layer)
+{
+ return 0;
+}
+
+static void clear_rx_list(struct uci_layer *uci)
+{
+ struct uci_packet *p;
+
+ mutex_lock(&uci->lock);
+
+ while (!list_empty(&uci->rx_list)) {
+ p = list_first_entry(&uci->rx_list, struct uci_packet, list);
+
+ list_del(&p->list);
+
+ uci_packet_free(p);
+ }
+
+ mutex_unlock(&uci->lock);
+
+ wake_up_interruptible(&uci->wq);
+}
+
+static void uci_unregistered(struct hsspi_layer *hlayer)
+{
+ struct uci_layer *uci = container_of(hlayer, struct uci_layer, hlayer);
+
+ clear_rx_list(uci);
+}
+
+static struct hsspi_block *uci_get(struct hsspi_layer *hlayer, u16 length)
+{
+ struct uci_packet *p;
+
+ p = uci_packet_alloc(length);
+ if (!p)
+ return NULL;
+
+ return &p->blk;
+}
+
+static void uci_sent(struct hsspi_layer *hlayer, struct hsspi_block *blk,
+ int status)
+{
+ struct uci_packet *p = container_of(blk, struct uci_packet, blk);
+
+ p->status = status;
+ complete(p->write_done);
+}
+
+#define UCI_CONTROL_PACKET_PAYLOAD_SIZE_LOCATION (3)
+
+static size_t get_payload_size_from_header(const u8 *header)
+{
+ bool is_data_packet = ((header[0] >> 5) & 0x07) == 0;
+ bool is_control_packet = !is_data_packet && ((header[0] >> 7) == 0);
+
+ if (is_control_packet)
+ return header[UCI_CONTROL_PACKET_PAYLOAD_SIZE_LOCATION];
+
+ return (header[3] << 8) | header[2];
+}
+
+#define UCI_PACKET_HEADER_SIZE (4)
+
+static void uci_received(struct hsspi_layer *hlayer, struct hsspi_block *blk,
+ int status)
+{
+ struct uci_layer *uci = container_of(hlayer, struct uci_layer, hlayer);
+ struct uci_packet *p = container_of(blk, struct uci_packet, blk);
+
+ if (status)
+ uci_packet_free(p);
+ else {
+ struct uci_packet *next;
+ size_t readn = 0;
+ size_t payload_size;
+
+ while (1) {
+ if (blk->length - readn < UCI_PACKET_HEADER_SIZE)
+ // Incomplete UCI header
+ break;
+
+ payload_size = get_payload_size_from_header(
+ (u8 *)blk->data + readn);
+
+ if (blk->length - readn <=
+ UCI_PACKET_HEADER_SIZE + payload_size)
+ // blk contains no additional packet
+ break;
+
+ next = kzalloc(sizeof(*next), GFP_KERNEL);
+ if (!next)
+ break;
+
+ next->data = p->blk.data + readn;
+ next->length = UCI_PACKET_HEADER_SIZE + payload_size;
+
+ readn += next->length;
+
+ mutex_lock(&uci->lock);
+ list_add_tail(&next->list, &uci->rx_list);
+ mutex_unlock(&uci->lock);
+ }
+
+ p->data = p->blk.data + readn;
+ p->length = p->blk.length - readn;
+
+ mutex_lock(&uci->lock);
+ list_add_tail(&p->list, &uci->rx_list);
+ mutex_unlock(&uci->lock);
+
+ wake_up_interruptible(&uci->wq);
+ }
+}
+
+static const struct hsspi_layer_ops uci_ops = {
+ .registered = uci_registered,
+ .unregistered = uci_unregistered,
+ .get = uci_get,
+ .received = uci_received,
+ .sent = uci_sent,
+};
+
+int uci_layer_init(struct uci_layer *uci)
+{
+ uci->hlayer.name = "UCI";
+ uci->hlayer.id = UL_UCI_APP;
+ uci->hlayer.ops = &uci_ops;
+
+ INIT_LIST_HEAD(&uci->rx_list);
+ mutex_init(&uci->lock);
+ init_waitqueue_head(&uci->wq);
+ return 0;
+}
+
+void uci_layer_deinit(struct uci_layer *uci)
+{
+ clear_rx_list(uci);
+}
+
+bool uci_layer_has_data_available(struct uci_layer *uci)
+{
+ bool ret;
+
+ mutex_lock(&uci->lock);
+ ret = !list_empty(&uci->rx_list);
+ mutex_unlock(&uci->lock);
+ return ret;
+}
+
+struct uci_packet *uci_layer_read(struct uci_layer *uci, size_t max_size,
+ bool non_blocking)
+{
+ struct uci_packet *p;
+ int ret;
+
+ if (!non_blocking) {
+ ret = wait_event_interruptible(
+ uci->wq, uci_layer_has_data_available(uci));
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ mutex_lock(&uci->lock);
+ p = list_first_entry_or_null(&uci->rx_list, struct uci_packet, list);
+ if (p) {
+ if (p->length > max_size)
+ p = ERR_PTR(-EMSGSIZE);
+ else
+ list_del(&p->list);
+ } else
+ p = ERR_PTR(-EAGAIN);
+
+ mutex_unlock(&uci->lock);
+ return p;
+}
diff --git a/hsspi_uci.h b/hsspi_uci.h
new file mode 100644
index 0000000..8890424
--- /dev/null
+++ b/hsspi_uci.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * This file is part of the QM35 UCI stack for linux.
+ *
+ * Copyright (c) 2021 Qorvo US, Inc.
+ *
+ * This software is provided under the GNU General Public License, version 2
+ * (GPLv2), as well as under a Qorvo commercial license.
+ *
+ * You may choose to use this software under the terms of the GPLv2 License,
+ * version 2 ("GPLv2"), as published by the Free Software Foundation.
+ * You should have received a copy of the GPLv2 along with this program. If
+ * not, see <http://www.gnu.org/licenses/>.
+ *
+ * This program is distributed under the GPLv2 in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GPLv2 for more
+ * details.
+ *
+ * If you cannot meet the requirements of the GPLv2, you may not use this
+ * software for any purpose without first obtaining a commercial license from
+ * Qorvo.
+ * Please contact Qorvo to inquire about licensing terms.
+ *
+ * QM35 UCI layer HSSPI Protocol
+ */
+
+#ifndef __HSSPI_UCI_H__
+#define __HSSPI_UCI_H__
+
+#include <linux/completion.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/wait.h>
+
+#include "hsspi.h"
+
+/**
+ * struct uci_packet - UCI packet that implements a &struct hsspi_block.
+ * @blk: &struct hsspi_block
+ * @write_done: norify when the packet has been really send
+ * @list: link with &struct uci_layer.rx_list
+ * @status: status of the transfer
+ */
+struct uci_packet {
+ struct hsspi_block blk;
+ struct completion *write_done;
+ struct list_head list;
+ u8 *data;
+ int length;
+ int status;
+};
+
+/**
+ * uci_packet_alloc() - Allocate an UCI packet
+ * @length: length of the UCI packet
+ *
+ * Allocate an UCI packet that can be used by the HSSPI driver in
+ * order to send or receive an UCI packet.
+ *
+ * Return: a newly allocated &struct uci_packet or NULL
+ */
+struct uci_packet *uci_packet_alloc(u16 length);
+
+/**
+ * uci_packet_free() - Free an UCI packet
+ * @p: pointer to the &struct uci_packet to free
+ *
+ */
+void uci_packet_free(struct uci_packet *p);
+
+/**
+ * struct uci_layer - Implement an HSSPI Layer
+ * @hlayer: &struct hsspi_layer
+ * @rx_list: list of received UCI packets
+ * @lock: protect the &struct uci_layer.rx_list
+ * @wq: notify when the &struct uci_layer.rx_list is not empty
+ */
+struct uci_layer {
+ struct hsspi_layer hlayer;
+ struct list_head rx_list;
+ struct mutex lock;
+ wait_queue_head_t wq;
+};
+
+/**
+ * uci_layer_init() - Initialize UCI Layer
+ *
+ */
+int uci_layer_init(struct uci_layer *uci);
+
+/**
+ * uci_layer_deinit() - Deinitialize UCI Layer
+ *
+ */
+void uci_layer_deinit(struct uci_layer *uci);
+
+/**
+ * uci_layer_has_data_availal() - checks if the layer has some rx packets
+ * @uci: pointer to &struct uci_layer
+ *
+ * Function that checks if the UCI layer has some data waiting to be read.
+ *
+ * Return: true if some data is available, false otherwise.
+ */
+bool uci_layer_has_data_available(struct uci_layer *uci);
+
+/**
+ * uci_layer_read() - get a packet from the rx_list
+ * @uci: pointer to &struct uci_layer
+ * @max_size: maximum size possible for the UCI packet
+ * @non_blocking: true if non blocking, false otherwise
+ *
+ * This function returns an UCI packet if available in the
+ * &struct uci_layer.rx_list. The max_size argument logic is due to the way
+ * the /dev/uci is make. We should ensure that we return an entire
+ * packet in uci_read, so we must get a packet only if the caller has
+ * enougth room for it.
+ *
+ * Return: a &struct uci_packet if succeed,
+ * -EINTR if it was interrupted (in blocking mode),
+ * -ESMGSIZE if the available UCI packet is bigger than max_size,
+ * -EAGAIN if there is no available UCI packet (in non blocking mode)
+ */
+struct uci_packet *uci_layer_read(struct uci_layer *uci, size_t max_size,
+ bool non_blocking);
+
+#endif // __HSSPI_UCI_H__
diff --git a/libqmrom/CMakeLists.txt b/libqmrom/CMakeLists.txt
new file mode 100644
index 0000000..3e11781
--- /dev/null
+++ b/libqmrom/CMakeLists.txt
@@ -0,0 +1,13 @@
+set(SOURCES
+ src/qmrom_common.c
+ src/qmrom_a0.c
+ src/qmrom_b0.c
+ src/qmrom_c0.c
+ src/qmrom_log.c
+)
+
+add_library(qmrom SHARED ${SOURCES})
+
+target_include_directories(qmrom PUBLIC
+ include
+) \ No newline at end of file
diff --git a/libqmrom/README.md b/libqmrom/README.md
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/libqmrom/README.md
diff --git a/libqmrom/include/byteswap.h b/libqmrom/include/byteswap.h
new file mode 100644
index 0000000..6852736
--- /dev/null
+++ b/libqmrom/include/byteswap.h
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0 OR Apache-2.0
+/*
+ * Copyright 2022 Qorvo US, Inc.
+ *
+ */
+
+#ifndef BYTESWAP_H
+#define BYTESWAP_H
+#define bswap_16 __builtin_bswap16
+#endif
diff --git a/libqmrom/include/qmrom.h b/libqmrom/include/qmrom.h
new file mode 100644
index 0000000..c403ed1
--- /dev/null
+++ b/libqmrom/include/qmrom.h
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: GPL-2.0 OR Apache-2.0
+/*
+ * Copyright 2021 Qorvo US, Inc.
+ *
+ */
+
+#ifndef __QMROM_H__
+#define __QMROM_H__
+
+#ifndef __KERNEL__
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <byteswap.h>
+#include <inttypes.h>
+#else
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#define bswap_16 be16_to_cpu
+#define PRIu32 "u"
+#endif
+
+#include <qmrom_error.h>
+
+#undef CHECK_STCS
+
+#define PEG_ERR_TIMEOUT PEG_ERR_BASE - 1
+#define PEG_ERR_ROM_NOT_READY PEG_ERR_BASE - 2
+#define PEG_ERR_SEND_CERT_WRITE PEG_ERR_BASE - 3
+#define PEG_ERR_WRONG_REVISION PEG_ERR_BASE - 4
+#define PEG_ERR_FIRST_KEY_CERT_OR_FW_VER PEG_ERR_BASE - 5
+
+enum chip_revision_e {
+ CHIP_REVISION_A0 = 0xA0,
+ CHIP_REVISION_B0 = 0xB0,
+ CHIP_REVISION_C0 = 0xC0,
+ CHIP_REVISION_UNKNOWN = 0xFF
+};
+
+#define HBK_LOC 12
+typedef enum {
+ HBK_2E_ICV = 0,
+ HBK_2E_OEM = 1,
+ HBK_1E_ICV_OEM = 2,
+} hbk_t;
+
+#define ROM_VERSION_A0 0x01a0
+#define ROM_VERSION_B0 0xb000
+
+#define ROM_SOC_ID_LEN 0x20
+#define ROM_UUID_LEN 0x10
+
+/* Life cycle state definitions. */
+
+/*! Defines the CM life-cycle state value. */
+#define CC_BSV_CHIP_MANUFACTURE_LCS 0x0
+/*! Defines the DM life-cycle state value. */
+#define CC_BSV_DEVICE_MANUFACTURE_LCS 0x1
+/*! Defines the Secure life-cycle state value. */
+#define CC_BSV_SECURE_LCS 0x5
+/*! Defines the RMA life-cycle state value. */
+#define CC_BSV_RMA_LCS 0x7
+
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wpedantic"
+
+struct qmrom_handle;
+
+struct unstitched_firmware {
+ struct firmware *fw_img;
+ struct firmware *fw_crt;
+ struct firmware *key1_crt;
+ struct firmware *key2_crt;
+};
+
+#define SSTC2UINT32(handle, offset) \
+ ({ \
+ uint32_t tmp = 0xbeefdeed; \
+ if ((handle)->sstc->len >= (offset) + sizeof(tmp)) \
+ memcpy(&tmp, &(handle)->sstc->payload[(offset)], \
+ sizeof(tmp)); \
+ tmp; \
+ })
+
+#define SSTC2UINT16(handle, offset) \
+ ({ \
+ uint16_t tmp = 0xbeed; \
+ if ((handle)->sstc->len >= (offset) + sizeof(tmp)) \
+ memcpy(&tmp, &(handle)->sstc->payload[(offset)], \
+ sizeof(tmp)); \
+ tmp; \
+ })
+
+/* Those functions allow the libqmrom to call
+ * revision specific functions
+ */
+typedef int (*flash_fw_fn)(struct qmrom_handle *handle,
+ const struct firmware *fw);
+typedef int (*flash_unstitched_fw_fn)(struct qmrom_handle *handle,
+ const struct unstitched_firmware *fw);
+typedef int (*flash_debug_cert_fn)(struct qmrom_handle *handle,
+ struct firmware *dbg_cert);
+typedef int (*erase_debug_cert_fn)(struct qmrom_handle *handle);
+
+struct rom_code_ops {
+ flash_fw_fn flash_fw;
+ flash_unstitched_fw_fn flash_unstitched_fw;
+ flash_debug_cert_fn flash_debug_cert;
+ erase_debug_cert_fn erase_debug_cert;
+};
+
+/* Those functions allow the libqmrom to call
+ * device specific functions
+ */
+typedef int (*reset_device_fn)(void *handle);
+
+struct device_ops {
+ reset_device_fn reset;
+};
+
+struct qmrom_handle {
+ void *spi_handle;
+ void *reset_handle;
+ void *ss_rdy_handle;
+ int comms_retries;
+ enum chip_revision_e chip_rev;
+ uint16_t device_version;
+ struct device_ops dev_ops;
+ struct rom_code_ops rom_ops;
+ uint32_t lcs_state;
+ struct stc *hstc;
+ struct stc *sstc;
+ uint8_t soc_id[ROM_SOC_ID_LEN];
+ uint8_t uuid[ROM_UUID_LEN];
+ bool is_be;
+};
+
+int qmrom_unstitch_fw(const struct firmware *fw,
+ struct unstitched_firmware *unstitched_fw,
+ enum chip_revision_e revision);
+struct qmrom_handle *qmrom_init(void *spi_handle, void *reset_handle,
+ void *ss_rdy_handle, int comms_retries,
+ reset_device_fn reset);
+void qmrom_deinit(struct qmrom_handle *handle);
+int qmrom_reboot_bootloader(struct qmrom_handle *handle);
+int qmrom_flash_dbg_cert(struct qmrom_handle *handle,
+ struct firmware *dbg_cert);
+int qmrom_erase_dbg_cert(struct qmrom_handle *handle);
+int qmrom_flash_fw(struct qmrom_handle *handle, const struct firmware *fw);
+int qmrom_flash_unstitched_fw(struct qmrom_handle *handle,
+ const struct unstitched_firmware *fw);
+
+int qmrom_pre_read(struct qmrom_handle *handle);
+int qmrom_read(struct qmrom_handle *handle);
+int qmrom_write_cmd(struct qmrom_handle *handle, uint8_t cmd);
+int qmrom_write_cmd32(struct qmrom_handle *handle, uint32_t cmd);
+int qmrom_write_size_cmd(struct qmrom_handle *handle, uint8_t cmd,
+ uint16_t data_size, const char *data);
+int qmrom_write_size_cmd32(struct qmrom_handle *handle, uint32_t cmd,
+ uint16_t data_size, const char *data);
+
+#ifdef CHECK_STCS
+void check_stcs(const char *func, int line, struct qmrom_handle *h);
+#else
+#define check_stcs(f, l, h)
+#endif
+#endif /* __QMROM_H__ */
diff --git a/libqmrom/include/qmrom_error.h b/libqmrom/include/qmrom_error.h
new file mode 100644
index 0000000..b893bdf
--- /dev/null
+++ b/libqmrom/include/qmrom_error.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2021 Qorvo US, Inc.
+ *
+ * SPDX-License-Identifier: GPL-2.0 OR Apache-2.0
+ *
+ * This file is provided under the Apache License 2.0, or the
+ * GNU General Public License v2.0.
+ *
+ */
+#ifndef __QMROM_ERROR_H__
+#define __QMROM_ERROR_H__
+
+#define SPI_ERR_BASE -8000
+#define PEG_ERR_BASE -9000
+#define SPI_PROTO_ERR_BASE -10000
+
+#define IS_PTR_ERROR(ptr) (((intptr_t)ptr) <= 0)
+#define PTR2ERROR(ptr) ((int)((intptr_t)ptr))
+#define ERROR2PTR(error) ((void *)((intptr_t)error))
+
+#endif /* __QMROM_ERROR_H__ */ \ No newline at end of file
diff --git a/libqmrom/include/qmrom_log.h b/libqmrom/include/qmrom_log.h
new file mode 100644
index 0000000..5bd4400
--- /dev/null
+++ b/libqmrom/include/qmrom_log.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2021 Qorvo US, Inc.
+ *
+ * SPDX-License-Identifier: GPL-2.0 OR Apache-2.0
+ *
+ * This file is provided under the Apache License 2.0, or the
+ * GNU General Public License v2.0.
+ *
+ */
+#ifndef __QMROM_LOGGER_H__
+#define __QMROM_LOGGER_H__
+
+enum log_level_e {
+ LOG_QUIET = 0,
+ LOG_ERR,
+ LOG_WARN,
+ LOG_INFO,
+ LOG_DBG,
+ LOG_LVL_MAX = LOG_DBG,
+};
+
+extern enum log_level_e __log_level__;
+
+void set_log_level(enum log_level_e lvl);
+
+static inline int is_debug_mode(void)
+{
+ return __log_level__ >= LOG_DBG;
+}
+
+static inline int is_log_level_allowed(enum log_level_e lvl)
+{
+ return (__log_level__ >= lvl);
+}
+
+void hexdump(enum log_level_e lvl, void *address, unsigned short length);
+void hexrawdump(enum log_level_e lvl, void *address, unsigned short length);
+
+#ifndef __KERNEL__
+#include <stdio.h>
+#define LOG_ERR(...) \
+ do { \
+ if (__log_level__ > LOG_QUIET) \
+ fprintf(stderr, __VA_ARGS__); \
+ } while (0)
+#define LOG_WARN(...) \
+ do { \
+ if (__log_level__ > LOG_ERR) \
+ fprintf(stderr, __VA_ARGS__); \
+ } while (0)
+#define LOG_INFO(...) \
+ do { \
+ if (__log_level__ > LOG_WARN) \
+ fprintf(stdout, __VA_ARGS__); \
+ } while (0)
+#define LOG_DBG(...) \
+ do { \
+ if (__log_level__ > LOG_INFO) \
+ fprintf(stdout, __VA_ARGS__); \
+ } while (0)
+#else
+#include <linux/device.h>
+
+extern struct device *__qmrom_log_dev__;
+
+void qmrom_set_log_device(struct device *dev, enum log_level_e lvl);
+
+#define LOG_ERR(...) \
+ do { \
+ if (__qmrom_log_dev__) \
+ if (__log_level__ > LOG_QUIET) \
+ dev_err(__qmrom_log_dev__, __VA_ARGS__); \
+ } while (0)
+#define LOG_WARN(...) \
+ do { \
+ if (__qmrom_log_dev__) \
+ if (__log_level__ > LOG_ERR) \
+ dev_warn(__qmrom_log_dev__, __VA_ARGS__); \
+ } while (0)
+#define LOG_INFO(...) \
+ do { \
+ if (__qmrom_log_dev__) \
+ if (__log_level__ > LOG_WARN) \
+ dev_info(__qmrom_log_dev__, __VA_ARGS__); \
+ } while (0)
+#define LOG_DBG(...) \
+ do { \
+ if (__qmrom_log_dev__) \
+ if (__log_level__ > LOG_INFO) \
+ dev_dbg(__qmrom_log_dev__, __VA_ARGS__); \
+ } while (0)
+#endif
+
+#endif /* __QMROM_LOGGER_H__ */
diff --git a/libqmrom/include/qmrom_spi.h b/libqmrom/include/qmrom_spi.h
new file mode 100644
index 0000000..c3d8c92
--- /dev/null
+++ b/libqmrom/include/qmrom_spi.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2021 Qorvo US, Inc.
+ *
+ * SPDX-License-Identifier: GPL-2.0 OR Apache-2.0
+ *
+ * This file is provided under the Apache License 2.0, or the
+ * GNU General Public License v2.0.
+ *
+ */
+#ifndef __QMROM_SPI_H__
+#define __QMROM_SPI_H__
+
+#include <stddef.h>
+#include <qmrom_error.h>
+#include <qmrom.h>
+
+#ifndef __KERNEL__
+struct firmware {
+ size_t size;
+ const uint8_t *data;
+};
+#else
+#include <linux/firmware.h>
+#endif
+
+#define DEFAULT_SPI_LATENCY_MS 2
+
+#define SPI_ERR_NOCHAN SPI_ERR_BASE - 1
+#define SPI_ERR_INFNOTFOUND SPI_ERR_BASE - 2
+#define SPI_ERR_NOMEM SPI_ERR_BASE - 3
+#define SPI_ERR_READ_INCOMPLETE SPI_ERR_BASE - 4
+#define SPI_ERR_GPIO_WRITE_CMD_INCOMPLETE SPI_ERR_BASE - 5
+#define SPI_ERR_GPIO_READ_CMD_INCOMPLETE SPI_ERR_BASE - 6
+#define SPI_ERR_READY_LINE_TIMEOUT SPI_ERR_BASE - 7
+#define SPI_ERR_WRITE_INCOMPLETE SPI_ERR_BASE - 8
+#define SPI_ERR_RW_INCOMPLETE SPI_ERR_BASE - 9
+#define SPI_ERR_INVALID_STC_LEN SPI_ERR_BASE - 10
+#define SPI_ERR_WAIT_READY_TIMEOUT SPI_ERR_BASE - 11
+
+/*Make sure that the error ranges don't overlap */
+#define SPI_ERR_LIB_BASE (SPI_ERR_BASE - 500)
+#define SPI_ERR_LIB(rc) (SPI_ERR_LIB_BASE - rc)
+
+void *qmrom_spi_init(int spi_interface_index);
+void qmrom_spi_uninit(void *handle);
+int qmrom_spi_read(void *handle, char *buffer, size_t size);
+int qmrom_spi_write(void *handle, const char *buffer, size_t size);
+int qmrom_spi_transfer(void *handle, char *rbuf, const char *wbuf, size_t size);
+int qmrom_spi_set_cs_level(void *handle, int level);
+int qmrom_spi_reset_device(void *reset_handle);
+const struct firmware *qmrom_spi_get_firmware(void *handle,
+ struct qmrom_handle *qmrom_h,
+ bool use_prod_fw);
+void qmrom_spi_release_firmware(const struct firmware *fw);
+int qmrom_spi_wait_for_ready_line(void *handle, unsigned int timeout_ms);
+void qmrom_spi_set_freq(unsigned int freq);
+unsigned int qmrom_spi_get_freq(void);
+
+#endif /* __QMROM_SPI_H__ */
diff --git a/libqmrom/include/qmrom_utils.h b/libqmrom/include/qmrom_utils.h
new file mode 100644
index 0000000..06ece2a
--- /dev/null
+++ b/libqmrom/include/qmrom_utils.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2021 Qorvo US, Inc.
+ *
+ * SPDX-License-Identifier: GPL-2.0 OR Apache-2.0
+ *
+ * This file is provided under the Apache License 2.0, or the
+ * GNU General Public License v2.0.
+ *
+ */
+#ifndef __QMROM_UTILS_H__
+#define __QMROM_UTILS_H__
+
+#ifndef __KERNEL__
+#include <stdlib.h>
+#include <stdint.h>
+
+#ifndef __linux__
+extern void usleep(unsigned int us);
+#else
+#include <unistd.h>
+#endif
+
+#define qmrom_msleep(ms) \
+ do { \
+ usleep(ms * 1000); \
+ } while (0)
+
+#define qmrom_alloc(ptr, size) \
+ do { \
+ ptr = calloc(1, size); \
+ } while (0)
+
+#define qmrom_free free
+#else
+
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+#define qmrom_msleep(ms) \
+ do { \
+ usleep_range(ms * 1000, ms * 1000); \
+ } while (0)
+
+#define qmrom_alloc(ptr, size) \
+ do { \
+ ptr = kzalloc(size, GFP_KERNEL); \
+ } while (0)
+
+#define qmrom_free kfree
+#endif
+#endif /* __QMROM_UTILS_H__ */
diff --git a/libqmrom/include/spi_rom_protocol.h b/libqmrom/include/spi_rom_protocol.h
new file mode 100644
index 0000000..f17575e
--- /dev/null
+++ b/libqmrom/include/spi_rom_protocol.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2021 Qorvo US, Inc.
+ *
+ * SPDX-License-Identifier: GPL-2.0 OR Apache-2.0
+ *
+ * This file is provided under the Apache License 2.0, or the
+ * GNU General Public License v2.0.
+ *
+ */
+#ifndef __SPI_ROM_PROTOCOL_H__
+#define __SPI_ROM_PROTOCOL_H__
+
+#include <qmrom_error.h>
+#include <qmrom.h>
+
+#ifndef __KERNEL__
+#include <stdint.h>
+#else
+#include <linux/types.h>
+#endif
+
+#define SPI_PROTO_WRONG_RESP SPI_PROTO_ERR_BASE - 1
+
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wpedantic"
+struct stc {
+ union {
+ struct {
+ union {
+ struct {
+ uint8_t reserved : 5;
+ uint8_t read : 1; // Read indication: Set to one by the host to tell the SOC SPI driver that this transaction is a read, otherwise it is set to zero.
+ uint8_t pre_read : 1; // Pre-read indication: Set to one by the host to tell the SOC SPI driver that the next transaction will be a read, otherwise it is set to zero
+ uint8_t write : 1; // Write indication: Set to one by the host when it is doing a write transaction. Set to zero when the host is not doing a write.
+ } host_flags;
+ struct {
+ uint8_t reserved : 4;
+ uint8_t err : 1; // Error indication: This is set to one by the SOC SPI driver to tell the HOST that it has detected some error in the SPI protocol.
+ uint8_t ready : 1; //
+ uint8_t out_active : 1; // Output active indication: Set to one by the SOC SPI driver to tell the HOST that it is outputting data on MISO line and expecting that the host is doing a read transaction at this time. This is set to zero for all other transactions.
+ uint8_t out_waiting : 1; // Output data waiting indication: Set to one by the SOC SPI driver to tell the HOST there is data awaiting reading. This is set to zero when there is no data pending output.
+ } soc_flags;
+ uint8_t raw_flags;
+ };
+ uint8_t ul;
+ uint16_t len;
+ };
+ uint32_t all;
+ };
+ uint8_t payload[0];
+} __attribute__((packed));
+#pragma GCC diagnostic pop
+
+/* Host to Soc (HS) masks */
+#define SPI_HS_WRITE_CMD_BIT_MASK 0x80
+#define SPI_HS_PRD_CMD_BIT_MASK 0x40
+#define SPI_HS_RD_CMD_BIT_MASK 0x20
+
+/* Soc to Host (SH) masks */
+#define SPI_SH_ODW_BIT_MASK 0x80
+#define SPI_SH_ACTIVE_BIT_MASK 0x40
+#define SPI_SH_READY_CMD_BIT_MASK 0x20
+#define SPI_DEVICE_READY_FLAGS SPI_SH_ODW_BIT_MASK
+
+/* Communication parameters */
+#define MAX_STC_FRAME_LEN 2048
+#define MAX_STC_PAYLOAD_LEN (MAX_STC_FRAME_LEN - sizeof(struct stc))
+#define SPI_NUM_READS_FOR_READY 1
+#define SPI_NUM_FAILS_RETRY 4
+#define SPI_ET_PROTOCOL 5
+#define SPI_RST_LOW_DELAY_MS 20
+#define SPI_INTERCMD_DELAY_MS 1
+#define SPI_DEVICE_POLL_RETRY 10
+#define SPI_READY_TIMEOUT_MS 50
+#define SPI_ET_VERSION_LOCATION 0x601f0000
+
+/* ROM boot proto */
+#define SPI_ROM_READ_VERSION_SIZE_A0 3
+#define SPI_ROM_READ_IMAGE_CERT_SIZE_A0 3
+#define SPI_ROM_READ_IMAGE_SIZE_SIZE_A0 3
+
+#define SPI_ROM_READ_VERSION_SIZE_B0 5
+#define SPI_ROM_READ_INFO_SIZE_B0 50
+#define SPI_ROM_READ_IMAGE_CERT_SIZE_B0 1
+#define SPI_ROM_READ_IMAGE_SIZE_SIZE_B0 1
+
+#define SPI_ROM_WRITE_KEY_CERT_SIZE 6
+#define SPI_ROM_WRITE_IMAGE_CERT_SIZE 40
+#define SPI_ROM_WRITE_IMAGE_SIZE_SIZE 4
+#define SPI_ROM_DBG_CERT_SIZE_SIZE 5
+
+#endif /* __SPI_ROM_PROTOCOL_H__ */
diff --git a/libqmrom/src/qmrom_a0.c b/libqmrom/src/qmrom_a0.c
new file mode 100644
index 0000000..ce75612
--- /dev/null
+++ b/libqmrom/src/qmrom_a0.c
@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: GPL-2.0 OR Apache-2.0
+/*
+ * Copyright 2022 Qorvo US, Inc.
+ *
+ */
+
+#include <qmrom.h>
+#include <qmrom_spi.h>
+#include <qmrom_log.h>
+#include <qmrom_utils.h>
+#include <spi_rom_protocol.h>
+
+#define DEFAULT_SPI_CLOCKRATE 750000
+#define CHIP_VERSION_CHIP_REV_PAYLOAD_OFFSET 1
+#define CHUNK_SIZE_A0 1016
+
+enum A0_CMD {
+ ROM_CMD_A0_GET_CHIP_VER = 0x0,
+ ROM_CMD_A0_DOWNLOAD_RRAM_CMD = 0x40,
+};
+
+enum A0_RESP {
+ /* Waiting for download command */
+ SPI_RSP_WAIT_DOWNLOAD_MODE = 1,
+ SPI_RSP_WAIT_FOR_KEY1_CERT,
+ SPI_RSP_WAIT_FOR_KEY2_CERT,
+ SPI_RSP_WAIT_FOR_IMAGE_CERT,
+ SPI_RSP_WAIT_IMAGE_SIZE,
+ SPI_RSP_WAIT_FOR_IMAGE,
+ SPI_RSP_DOWNLOAD_OK,
+ SPI_RSP_BOOT_OK,
+ /* Checksum/CRC error */
+ SPI_RSP_ERROR_CS,
+ /* Got error certificate RSA/FW ver. Didn't get
+ * all the data before switching to image...
+ */
+ SPI_RSP_ERROR_CERTIFICATE,
+ /* Got command smaller than SPI_HEADER_SIZE.
+ * Each command must be at least this size.
+ */
+ SPI_RSP_CMD_TOO_SHORT,
+ /* Error checking certificates or image, going
+ * to download mode.
+ */
+ SPI_RSP_ERROR_LOADING_IN_DOWNLOAD,
+};
+
+static int qmrom_a0_flash_fw(struct qmrom_handle *handle,
+ const struct firmware *fw);
+
+void qmrom_a0_poll_soc(struct qmrom_handle *handle)
+{
+ int retries = handle->comms_retries;
+ handle->hstc->all = 0;
+ qmrom_msleep(SPI_READY_TIMEOUT_MS);
+ do {
+ qmrom_spi_transfer(handle->spi_handle, (char *)handle->sstc,
+ (const char *)handle->hstc, 1);
+ } while (retries-- && handle->sstc->raw_flags == 0);
+}
+
+int qmrom_a0_wait_ready(struct qmrom_handle *handle)
+{
+ int retries = handle->comms_retries;
+ qmrom_a0_poll_soc(handle);
+
+ while (retries-- && !handle->sstc->soc_flags.out_waiting) {
+ qmrom_a0_poll_soc(handle);
+ }
+ return handle->sstc->soc_flags.out_waiting ? 0 :
+ SPI_ERR_WAIT_READY_TIMEOUT;
+}
+
+int qmrom_a0_probe_device(struct qmrom_handle *handle)
+{
+ int rc;
+ LOG_DBG("%s: enters...\n", __func__);
+ handle->is_be = true;
+
+ qmrom_spi_set_freq(DEFAULT_SPI_CLOCKRATE);
+
+ rc = qmrom_reboot_bootloader(handle);
+ if (rc) {
+ LOG_ERR("%s: cannot reset the device...\n", __func__);
+ return rc;
+ }
+
+ rc = qmrom_a0_wait_ready(handle);
+ if (rc) {
+ LOG_INFO("%s: maybe not a A0 device\n", __func__);
+ return rc;
+ }
+ qmrom_pre_read(handle);
+ handle->sstc->len = bswap_16(handle->sstc->len);
+ if (handle->sstc->len > 0xff) {
+ /* likely the wrong endianness, B0 or C0? */
+ return -1;
+ }
+ qmrom_read(handle);
+
+ LOG_DBG("%s: Set the chip_rev/device_version\n", __func__);
+ handle->chip_rev =
+ bswap_16(SSTC2UINT16(handle,
+ CHIP_VERSION_CHIP_REV_PAYLOAD_OFFSET)) &
+ 0xFF;
+
+ if (handle->chip_rev != CHIP_REVISION_A0) {
+ LOG_ERR("%s: wrong chip revision %#x\n", __func__,
+ handle->chip_rev);
+ handle->chip_rev = -1;
+ return -1;
+ }
+
+ /* Set rom ops */
+ handle->rom_ops.flash_fw = qmrom_a0_flash_fw;
+ handle->rom_ops.flash_debug_cert = NULL;
+ handle->rom_ops.erase_debug_cert = NULL;
+ return 0;
+}
+
+int qmrom_a0_write_data(struct qmrom_handle *handle, uint16_t data_size,
+ const char *data)
+{
+ handle->hstc->all = 0;
+ handle->hstc->host_flags.write = 1;
+ handle->hstc->ul = 1;
+ handle->hstc->len = data_size;
+ memcpy(handle->hstc->payload, data, data_size);
+
+ return qmrom_spi_transfer(handle->spi_handle, (char *)handle->sstc,
+ (const char *)handle->hstc,
+ sizeof(struct stc) + data_size);
+}
+
+static int qmrom_a0_write_chunks(struct qmrom_handle *handle,
+ const struct firmware *fw)
+{
+ int rc, sent = 0;
+ const char *bin_data = (const char *)fw->data;
+
+ check_stcs(__func__, __LINE__, handle);
+ while (sent < fw->size) {
+ uint32_t tx_bytes = fw->size - sent;
+ if (tx_bytes > CHUNK_SIZE_A0)
+ tx_bytes = CHUNK_SIZE_A0;
+
+ LOG_DBG("%s: poll soc...\n", __func__);
+ check_stcs(__func__, __LINE__, handle);
+ qmrom_a0_poll_soc(handle);
+ qmrom_pre_read(handle);
+ handle->sstc->len = bswap_16(handle->sstc->len);
+ qmrom_read(handle);
+ if (handle->sstc->payload[0] != SPI_RSP_WAIT_FOR_IMAGE) {
+ LOG_ERR("%s: wrong data result (%#x vs %#x)!!!\n",
+ __func__, handle->sstc->payload[0] & 0xff,
+ SPI_RSP_WAIT_FOR_IMAGE);
+ return SPI_PROTO_WRONG_RESP;
+ }
+
+ LOG_DBG("%s: sending %" PRIu32 " bytes of data\n", __func__,
+ tx_bytes);
+ rc = qmrom_a0_write_data(handle, tx_bytes, bin_data);
+ if (rc)
+ return rc;
+ sent += tx_bytes;
+ bin_data += tx_bytes;
+ check_stcs(__func__, __LINE__, handle);
+ }
+ return 0;
+}
+
+static int qmrom_a0_flash_fw(struct qmrom_handle *handle,
+ const struct firmware *fw)
+{
+ int rc = 0, resp;
+
+ LOG_DBG("%s: starting...\n", __func__);
+
+ /* Reboot since the rom code on A0 seems
+ * to have issues when starting flashing
+ * after some prior interaction (like GET_CHIP_VERSION)
+ */
+ rc = qmrom_reboot_bootloader(handle);
+ if (rc) {
+ LOG_ERR("%s: cannot reset the device...\n", __func__);
+ return rc;
+ }
+
+ rc = qmrom_a0_wait_ready(handle);
+ if (rc) {
+ LOG_ERR("%s: timedout waiting for the device to be ready\n",
+ __func__);
+ return rc;
+ }
+ qmrom_pre_read(handle);
+ handle->sstc->len = bswap_16(handle->sstc->len);
+ qmrom_read(handle);
+ if (handle->sstc->payload[0] != SPI_RSP_WAIT_DOWNLOAD_MODE) {
+ LOG_ERR("%s: wrong data result (%#x vs %#x)!!!\n", __func__,
+ handle->sstc->payload[0] & 0xff,
+ SPI_RSP_WAIT_DOWNLOAD_MODE);
+ return SPI_PROTO_WRONG_RESP;
+ }
+
+ check_stcs(__func__, __LINE__, handle);
+ LOG_DBG("%s: sending ROM_CMD_A0_DOWNLOAD_RRAM_CMD command\n", __func__);
+ rc = qmrom_write_cmd(handle, ROM_CMD_A0_DOWNLOAD_RRAM_CMD);
+ if (rc)
+ return rc;
+
+ for (resp = SPI_RSP_WAIT_FOR_KEY1_CERT; resp < SPI_RSP_WAIT_FOR_IMAGE;
+ resp++) {
+ qmrom_a0_poll_soc(handle);
+ qmrom_pre_read(handle);
+ handle->sstc->len = bswap_16(handle->sstc->len);
+ qmrom_read(handle);
+ if (handle->sstc->payload[0] != resp) {
+ LOG_ERR("%s: wrong data result (%#x vs %#x)!!!\n",
+ __func__, handle->sstc->payload[0] & 0xff,
+ resp);
+ return SPI_PROTO_WRONG_RESP;
+ }
+ if (resp < SPI_RSP_WAIT_IMAGE_SIZE) {
+ rc = qmrom_write_cmd(handle, 0);
+ if (rc)
+ return rc;
+ }
+ }
+
+ LOG_DBG("%s: sending fw size\n", __func__);
+ rc = qmrom_a0_write_data(handle, sizeof(uint32_t),
+ (const char *)&fw->size);
+ if (rc)
+ return rc;
+
+ check_stcs(__func__, __LINE__, handle);
+ rc = qmrom_a0_write_chunks(handle, fw);
+ check_stcs(__func__, __LINE__, handle);
+ return rc;
+}
diff --git a/libqmrom/src/qmrom_b0.c b/libqmrom/src/qmrom_b0.c
new file mode 100644
index 0000000..9b23cff
--- /dev/null
+++ b/libqmrom/src/qmrom_b0.c
@@ -0,0 +1,407 @@
+// SPDX-License-Identifier: GPL-2.0 OR Apache-2.0
+/*
+ * Copyright 2022 Qorvo US, Inc.
+ *
+ */
+
+#include <qmrom.h>
+#include <qmrom_spi.h>
+#include <qmrom_log.h>
+#include <qmrom_utils.h>
+#include <spi_rom_protocol.h>
+
+#define DEFAULT_SPI_CLOCKRATE 3000000
+#define CHIP_VERSION_CHIP_REV_PAYLOAD_OFFSET 1
+#define CHIP_VERSION_DEV_REV_PAYLOAD_OFFSET 3
+#define CHUNK_SIZE_B0 1008
+
+enum B0_CMD {
+ ROM_CMD_B0_SEC_LOAD_ICV_IMG_TO_RRAM = 0x0,
+ ROM_CMD_B0_SEC_LOAD_OEM_IMG_TO_RRAM = 0x1,
+ ROM_CMD_B0_GET_CHIP_VER = 0x2,
+ ROM_CMD_B0_GET_SOC_INFO = 0x3,
+ ROM_CMD_B0_ERASE_DBG_CERT = 0x4,
+ ROM_CMD_B0_WRITE_DBG_CERT = 0x5,
+ ROM_CMD_B0_SEC_IMAGE_DATA = 0xf,
+ ROM_CMD_B0_CERT_DATA = 0x10,
+ ROM_CMD_B0_DEBUG_CERT_SIZE = 0x11,
+};
+
+enum B0_RESP {
+ READY_FOR_CS_LOW_CMD = 0x00,
+ WRONG_CS_LOW_CMD = 0x01,
+ WAITING_FOR_NS_RRAM_FILE_SIZE = 0x02,
+ WAITING_FOR_NS_SRAM_FILE_SIZE = 0x03,
+ WAITING_FOR_NS_RRAM_FILE_DATA = 0x04,
+ WAITING_FOR_NS_SRAM_FILE_DATA = 0x05,
+ WAITING_FOR_SEC_FILE_DATA = 0x06,
+ ERR_NS_SRAM_OR_RRAM_SIZE_CMD = 0x07,
+ ERR_SEC_RRAM_SIZE_CMD = 0x08,
+ ERR_WAITING_FOR_NS_IMAGE_DATA_CMD = 0x09,
+ ERR_WAITING_FOR_SEC_IMAGE_DATA_CMD = 0x0A,
+ ERR_IMAGE_SIZE_IS_ZERO = 0x0B,
+ /* Got more data than expected size */
+ ERR_IMAGE_SIZE_TOO_BIG = 0x0C,
+ /* Image must divide in 16 without remainder */
+ ERR_IMAGE_IS_NOT_16BYTES_MUL = 0x0D,
+ ERR_GOT_DATA_MORE_THAN_ALLOWED = 0x0E,
+ /* Remainder is allowed only for last packet */
+ ERR_RRAM_DATA_REMAINDER_NOT_ALLOWED = 0x0F,
+ ERR_WAITING_FOR_CERT_DATA_CMD = 0x10,
+ WAITING_FOR_FIRST_KEY_CERT = 0x11,
+ WAITING_FOR_SECOND_KEY_CERT = 0x12,
+ WAITING_FOR_CONTENT_CERT = 0x13,
+ WAITING_FOR_DEBUG_CERT_DATA = 0x14,
+ ERR_FIRST_KEY_CERT_OR_FW_VER = 0x15,
+ ERR_SECOND_KEY_CERT = 0x16,
+ ERR_CONTENT_CERT_DOWNLOAD_ADDR = 0x17,
+ /* If the content certificate contains to much images */
+ ERR_TOO_MANY_IMAGES_IN_CONTENT_CERT = 0x18,
+ ERR_ADDRESS_NOT_DIVIDED_BY_8 = 0x19,
+ ERR_IMAGE_BOUNDARIES = 0x1A,
+ /* Expected ICV type and got OEM */
+ ERR_CERT_TYPE = 0x1B,
+ ERR_PRODUCT_ID = 0x1C,
+ ERR_RRAM_RANGE_OR_WRITE = 0x1D,
+ WAITING_TO_DEBUG_CERTIFICATE_SIZE = 0x1E,
+ ERR_DEBUG_CERT_SIZE = 0x1F,
+};
+
+static int qmrom_b0_flash_fw(struct qmrom_handle *handle,
+ const struct firmware *fw);
+static int qmrom_b0_flash_debug_cert(struct qmrom_handle *handle,
+ struct firmware *dbg_cert);
+static int qmrom_b0_erase_debug_cert(struct qmrom_handle *handle);
+static int
+qmrom_b0_flash_unstitched_fw(struct qmrom_handle *handle,
+ const struct unstitched_firmware *all_fws);
+
+static void qmrom_b0_poll_soc(struct qmrom_handle *handle)
+{
+ int retries = handle->comms_retries;
+ memset(handle->hstc, 0, sizeof(struct stc));
+ qmrom_msleep(SPI_READY_TIMEOUT_MS);
+ do {
+ qmrom_spi_transfer(handle->spi_handle, (char *)handle->sstc,
+ (const char *)handle->hstc,
+ sizeof(struct stc) + handle->hstc->len);
+ } while (retries-- && handle->sstc->raw_flags == 0);
+}
+
+static int qmrom_b0_wait_ready(struct qmrom_handle *handle)
+{
+ int retries = handle->comms_retries;
+ int rc;
+
+ qmrom_b0_poll_soc(handle);
+
+ /* handle->sstc has been updated */
+ while (retries-- &&
+ handle->sstc->raw_flags != SPI_SH_READY_CMD_BIT_MASK) {
+ if (handle->sstc->soc_flags.out_waiting) {
+ qmrom_pre_read(handle);
+ } else if (handle->sstc->soc_flags.out_active) {
+ rc = qmrom_read(handle);
+ if (rc)
+ return rc;
+ } else {
+ /* error? */
+ qmrom_b0_poll_soc(handle);
+ }
+ }
+
+ return handle->sstc->raw_flags == SPI_SH_READY_CMD_BIT_MASK ?
+ 0 :
+ SPI_ERR_WAIT_READY_TIMEOUT;
+}
+
+static int qmrom_b0_poll_cmd_resp(struct qmrom_handle *handle)
+{
+ int retries = handle->comms_retries;
+
+ qmrom_b0_poll_soc(handle);
+ do {
+ if (handle->sstc->soc_flags.out_waiting) {
+ qmrom_pre_read(handle);
+ if (handle->sstc->len > 0xff) {
+ /* likely the wrong endianness, A0? */
+ return -1;
+ }
+ qmrom_read(handle);
+ break;
+ } else
+ qmrom_b0_poll_soc(handle);
+ } while (retries--);
+
+ return retries > 0 ? 0 : -1;
+}
+
+int qmrom_b0_probe_device(struct qmrom_handle *handle)
+{
+ int rc, i;
+ uint8_t *soc_lcs_uuid;
+ handle->is_be = false;
+ check_stcs(__func__, __LINE__, handle);
+
+ qmrom_spi_set_freq(DEFAULT_SPI_CLOCKRATE);
+
+ rc = qmrom_reboot_bootloader(handle);
+ if (rc) {
+ LOG_ERR("%s: cannot reset the device...\n", __func__);
+ return rc;
+ }
+
+ rc = qmrom_b0_wait_ready(handle);
+ if (rc) {
+ LOG_INFO("%s: maybe not a B0 device\n", __func__);
+ return rc;
+ }
+
+ rc = qmrom_write_cmd(handle, ROM_CMD_B0_GET_CHIP_VER);
+ if (rc)
+ return rc;
+
+ rc = qmrom_b0_poll_cmd_resp(handle);
+ if (rc)
+ return rc;
+
+ handle->chip_rev =
+ SSTC2UINT16(handle, CHIP_VERSION_CHIP_REV_PAYLOAD_OFFSET) &
+ 0xFF;
+ handle->device_version = bswap_16(
+ SSTC2UINT16(handle, CHIP_VERSION_DEV_REV_PAYLOAD_OFFSET));
+ if (handle->chip_rev != CHIP_REVISION_B0) {
+ LOG_ERR("%s: wrong chip revision 0x%x\n", __func__,
+ handle->chip_rev);
+ handle->chip_rev = -1;
+ return -1;
+ }
+
+ rc = qmrom_b0_wait_ready(handle);
+ if (rc) {
+ LOG_ERR("%s: hmm something went wrong!!!\n", __func__);
+ return rc;
+ }
+
+ rc = qmrom_write_cmd(handle, ROM_CMD_B0_GET_SOC_INFO);
+ if (rc)
+ return rc;
+
+ rc = qmrom_b0_poll_cmd_resp(handle);
+ if (rc)
+ return rc;
+
+ /* skip the first byte */
+ soc_lcs_uuid = &(handle->sstc->payload[1]);
+ for (i = 0; i < ROM_SOC_ID_LEN; i++)
+ handle->soc_id[i] = soc_lcs_uuid[ROM_SOC_ID_LEN - i - 1];
+ soc_lcs_uuid += ROM_SOC_ID_LEN;
+ handle->lcs_state = soc_lcs_uuid[0];
+ soc_lcs_uuid += 1;
+ for (i = 0; i < ROM_UUID_LEN; i++)
+ handle->uuid[i] = soc_lcs_uuid[ROM_UUID_LEN - i - 1];
+
+ /* Set rom ops */
+ handle->rom_ops.flash_fw = qmrom_b0_flash_fw;
+ handle->rom_ops.flash_unstitched_fw = qmrom_b0_flash_unstitched_fw;
+ handle->rom_ops.flash_debug_cert = qmrom_b0_flash_debug_cert;
+ handle->rom_ops.erase_debug_cert = qmrom_b0_erase_debug_cert;
+
+ check_stcs(__func__, __LINE__, handle);
+ return 0;
+}
+
+static int qmrom_b0_flash_data(struct qmrom_handle *handle, struct firmware *fw,
+ uint8_t cmd, uint8_t exp)
+{
+ int rc, sent = 0;
+ const char *bin_data = (const char *)fw->data;
+
+ check_stcs(__func__, __LINE__, handle);
+ while (sent < fw->size) {
+ uint32_t tx_bytes = fw->size - sent;
+ if (tx_bytes > CHUNK_SIZE_B0)
+ tx_bytes = CHUNK_SIZE_B0;
+
+ LOG_DBG("%s: poll soc...\n", __func__);
+ check_stcs(__func__, __LINE__, handle);
+ qmrom_b0_poll_soc(handle);
+ qmrom_pre_read(handle);
+ qmrom_read(handle);
+ if (handle->sstc->payload[0] != exp) {
+ LOG_ERR("%s: wrong data expected (%#x vs %#x)!!!\n",
+ __func__, handle->sstc->payload[0] & 0xff, exp);
+ if (handle->sstc->payload[0] ==
+ ERR_FIRST_KEY_CERT_OR_FW_VER)
+ return PEG_ERR_FIRST_KEY_CERT_OR_FW_VER;
+ else
+ return SPI_PROTO_WRONG_RESP;
+ }
+
+ LOG_DBG("%s: sending %d command with %" PRIu32 " bytes\n",
+ __func__, cmd, tx_bytes);
+ rc = qmrom_write_size_cmd(handle, cmd, tx_bytes, bin_data);
+ if (rc)
+ return rc;
+ sent += tx_bytes;
+ bin_data += tx_bytes;
+ check_stcs(__func__, __LINE__, handle);
+ }
+ return 0;
+}
+
+static int qmrom_b0_flash_fw(struct qmrom_handle *handle,
+ const struct firmware *fw)
+{
+ int rc = 0;
+ struct unstitched_firmware all_fws = { 0 };
+
+ rc = qmrom_unstitch_fw(fw, &all_fws, handle->chip_rev);
+ if (rc) {
+ LOG_ERR("%s: Unstitched fw flashing not supported yet\n",
+ __func__);
+ return rc;
+ }
+ rc = qmrom_b0_flash_unstitched_fw(handle, &all_fws);
+ return rc;
+}
+
+static int
+qmrom_b0_flash_unstitched_fw(struct qmrom_handle *handle,
+ const struct unstitched_firmware *all_fws)
+{
+ int rc = 0;
+ uint8_t flash_cmd = handle->lcs_state == CC_BSV_SECURE_LCS ?
+ ROM_CMD_B0_SEC_LOAD_OEM_IMG_TO_RRAM :
+ ROM_CMD_B0_SEC_LOAD_ICV_IMG_TO_RRAM;
+
+ if (all_fws->key1_crt->data[HBK_LOC] == HBK_2E_ICV &&
+ handle->lcs_state != CC_BSV_CHIP_MANUFACTURE_LCS) {
+ LOG_ERR("%s: Trying to flash an ICV fw on a non ICV platform\n",
+ __func__);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ if (all_fws->key1_crt->data[HBK_LOC] == HBK_2E_OEM &&
+ handle->lcs_state != CC_BSV_SECURE_LCS) {
+ LOG_ERR("%s: Trying to flash an OEM fw on a non OEM platform\n",
+ __func__);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ LOG_DBG("%s: starting...\n", __func__);
+ check_stcs(__func__, __LINE__, handle);
+
+ rc = qmrom_b0_wait_ready(handle);
+ if (rc)
+ goto end;
+
+ check_stcs(__func__, __LINE__, handle);
+ LOG_DBG("%s: sending flash_cmd %u command\n", __func__, flash_cmd);
+ rc = qmrom_write_cmd(handle, flash_cmd);
+ if (rc)
+ goto end;
+
+ check_stcs(__func__, __LINE__, handle);
+ rc = qmrom_b0_flash_data(handle, all_fws->key1_crt,
+ ROM_CMD_B0_CERT_DATA,
+ WAITING_FOR_FIRST_KEY_CERT);
+ if (rc)
+ goto end;
+
+ check_stcs(__func__, __LINE__, handle);
+ rc = qmrom_b0_flash_data(handle, all_fws->key2_crt,
+ ROM_CMD_B0_CERT_DATA,
+ WAITING_FOR_SECOND_KEY_CERT);
+ if (rc)
+ goto end;
+
+ check_stcs(__func__, __LINE__, handle);
+ rc = qmrom_b0_flash_data(handle, all_fws->fw_crt, ROM_CMD_B0_CERT_DATA,
+ WAITING_FOR_CONTENT_CERT);
+ if (rc)
+ goto end;
+
+ check_stcs(__func__, __LINE__, handle);
+ rc = qmrom_b0_flash_data(handle, all_fws->fw_img,
+ ROM_CMD_B0_SEC_IMAGE_DATA,
+ WAITING_FOR_SEC_FILE_DATA);
+
+ if (!rc)
+ qmrom_msleep(SPI_READY_TIMEOUT_MS);
+
+end:
+ check_stcs(__func__, __LINE__, handle);
+ qmrom_free(all_fws->fw_img);
+ qmrom_free(all_fws->fw_crt);
+ qmrom_free(all_fws->key1_crt);
+ qmrom_free(all_fws->key2_crt);
+ return rc;
+}
+
+static int qmrom_b0_flash_debug_cert(struct qmrom_handle *handle,
+ struct firmware *dbg_cert)
+{
+ int rc;
+
+ LOG_DBG("%s: starting...\n", __func__);
+ check_stcs(__func__, __LINE__, handle);
+
+ rc = qmrom_b0_wait_ready(handle);
+ if (rc)
+ return rc;
+
+ check_stcs(__func__, __LINE__, handle);
+ LOG_DBG("%s: sending ROM_CMD_B0_WRITE_DBG_CERT command\n", __func__);
+ rc = qmrom_write_cmd(handle, ROM_CMD_B0_WRITE_DBG_CERT);
+ if (rc)
+ return rc;
+
+ check_stcs(__func__, __LINE__, handle);
+ LOG_DBG("%s: poll soc...\n", __func__);
+ qmrom_b0_poll_soc(handle);
+ qmrom_pre_read(handle);
+ qmrom_read(handle);
+
+ check_stcs(__func__, __LINE__, handle);
+ LOG_DBG("%s: sending ROM_CMD_B0_DEBUG_CERT_SIZE command\n", __func__);
+ rc = qmrom_write_size_cmd(handle, ROM_CMD_B0_DEBUG_CERT_SIZE,
+ sizeof(uint32_t),
+ (const char *)&dbg_cert->size);
+ if (handle->sstc->payload[0] != WAITING_TO_DEBUG_CERTIFICATE_SIZE) {
+ LOG_ERR("%s: wrong debug cert size result (0x%x vs 0x%x)!!!\n",
+ __func__, handle->sstc->payload[0] & 0xff,
+ WAITING_TO_DEBUG_CERTIFICATE_SIZE);
+ return SPI_PROTO_WRONG_RESP;
+ }
+ if (rc)
+ return rc;
+
+ rc = qmrom_b0_flash_data(handle, dbg_cert, ROM_CMD_B0_CERT_DATA,
+ WAITING_FOR_DEBUG_CERT_DATA);
+ check_stcs(__func__, __LINE__, handle);
+ return 0;
+}
+
+static int qmrom_b0_erase_debug_cert(struct qmrom_handle *handle)
+{
+ int rc;
+
+ LOG_INFO("%s: starting...\n", __func__);
+ check_stcs(__func__, __LINE__, handle);
+
+ rc = qmrom_b0_wait_ready(handle);
+ if (!rc)
+ return rc;
+
+ LOG_DBG("%s: sending ROM_CMD_B0_ERASE_DBG_CERT command\n", __func__);
+ rc = qmrom_write_cmd(handle, ROM_CMD_B0_ERASE_DBG_CERT);
+ if (rc)
+ return rc;
+
+ qmrom_msleep(SPI_READY_TIMEOUT_MS);
+ check_stcs(__func__, __LINE__, handle);
+ return 0;
+}
diff --git a/libqmrom/src/qmrom_c0.c b/libqmrom/src/qmrom_c0.c
new file mode 100644
index 0000000..76a04f3
--- /dev/null
+++ b/libqmrom/src/qmrom_c0.c
@@ -0,0 +1,510 @@
+// SPDX-License-Identifier: GPL-2.0 OR Apache-2.0
+/*
+ * Copyright 2022 Qorvo US, Inc.
+ *
+ */
+
+#include <qmrom.h>
+#include <qmrom_spi.h>
+#include <qmrom_log.h>
+#include <qmrom_utils.h>
+#include <spi_rom_protocol.h>
+
+#define DEFAULT_SPI_CLOCKRATE 5000000
+#define CHIP_VERSION_CHIP_REV_PAYLOAD_OFFSET 4
+#define CHIP_VERSION_DEV_REV_PAYLOAD_OFFSET 6
+#define CHUNK_SIZE_C0 2040
+#define SPI_READY_TIMEOUT_MS_C0 200
+
+#ifdef C0_WRITE_STATS
+#include <linux/ktime.h>
+#endif
+
+#define SPI_SH_READY_CMD_BIT_MASK_C0 \
+ (SPI_SH_READY_CMD_BIT_MASK >> 4 | SPI_SH_READY_CMD_BIT_MASK)
+
+enum C0_CMD {
+ ROM_CMD_C0_SEC_LOAD_ICV_IMG_TO_RRAM = 0x0,
+ ROM_CMD_C0_SEC_LOAD_OEM_IMG_TO_RRAM = 0x1,
+ ROM_CMD_C0_GET_CHIP_VER = 0x2,
+ ROM_CMD_C0_GET_SOC_INFO = 0x3,
+ ROM_CMD_C0_ERASE_DBG_CERT = 0x4,
+ ROM_CMD_C0_USE_DIRECT_RRAM_WR = 0X5,
+ ROM_CMD_C0_USE_INDIRECT_RRAM_WR = 0x6,
+ ROM_CMD_C0_WRITE_DBG_CERT = 0x7,
+ ROM_CMD_C0_SEC_IMAGE_DATA = 0x12,
+ ROM_CMD_C0_CERT_DATA = 0x13,
+ ROM_CMD_C0_DEBUG_CERT_SIZE = 0x14,
+};
+
+enum C0_RESP {
+ READY_FOR_CS_LOW_CMD = 0x00,
+ WRONG_CS_LOW_CMD = 0x01,
+ WAITING_FOR_NS_RRAM_FILE_SIZE = 0x02,
+ WAITING_FOR_NS_SRAM_FILE_SIZE = 0x03,
+ WAITING_FOR_NS_RRAM_FILE_DATA = 0x04,
+ WAITING_FOR_NS_SRAM_FILE_DATA = 0x05,
+ WAITING_FOR_SEC_FILE_DATA = 0x06,
+ ERR_NS_SRAM_OR_RRAM_SIZE_CMD = 0x07,
+ ERR_SEC_RRAM_SIZE_CMD = 0x08,
+ ERR_WAITING_FOR_NS_IMAGE_DATA_CMD = 0x09,
+ ERR_WAITING_FOR_SEC_IMAGE_DATA_CMD = 0x0A,
+ ERR_IMAGE_SIZE_IS_ZERO = 0x0B,
+ /* Got more data than expected size */
+ ERR_IMAGE_SIZE_TOO_BIG = 0x0C,
+ /* Image must divide in 16 without remainder */
+ ERR_IMAGE_IS_NOT_16BYTES_MUL = 0x0D,
+ ERR_GOT_DATA_MORE_THAN_ALLOWED = 0x0E,
+ /* Remainder is allowed only for last packet */
+ ERR_RRAM_DATA_REMAINDER_NOT_ALLOWED = 0x0F,
+ ERR_WAITING_FOR_CERT_DATA_CMD = 0x10,
+ WAITING_FOR_FIRST_KEY_CERT = 0x11,
+ WAITING_FOR_SECOND_KEY_CERT = 0x12,
+ WAITING_FOR_CONTENT_CERT = 0x13,
+ WAITING_FOR_DEBUG_CERT_DATA = 0x14,
+ ERR_FIRST_KEY_CERT_OR_FW_VER = 0x15,
+ ERR_SECOND_KEY_CERT = 0x16,
+ ERR_CONTENT_CERT_DOWNLOAD_ADDR = 0x17,
+ /* If the content certificate contains to much images */
+ ERR_TOO_MANY_IMAGES_IN_CONTENT_CERT = 0x18,
+ ERR_ADDRESS_NOT_DIVIDED_BY_8 = 0x19,
+ ERR_IMAGE_BOUNDARIES = 0x1A,
+ /* Expected ICV type and got OEM */
+ ERR_CERT_TYPE = 0x1B,
+ ERR_PRODUCT_ID = 0x1C,
+ ERR_RRAM_RANGE_OR_WRITE = 0x1D,
+ WAITING_TO_DEBUG_CERTIFICATE_SIZE = 0x1E,
+ ERR_DEBUG_CERT_SIZE = 0x1F,
+};
+
+static int qmrom_c0_flash_fw(struct qmrom_handle *handle,
+ const struct firmware *fw);
+static int qmrom_c0_flash_debug_cert(struct qmrom_handle *handle,
+ struct firmware *dbg_cert);
+static int qmrom_c0_erase_debug_cert(struct qmrom_handle *handle);
+
+#define qmrom_pre_read_c0(h) \
+ ({ \
+ int rc; \
+ qmrom_spi_wait_for_ready_line((h)->ss_rdy_handle, \
+ SPI_READY_TIMEOUT_MS_C0); \
+ rc = qmrom_pre_read((h)); \
+ rc; \
+ })
+#define qmrom_read_c0(h) \
+ ({ \
+ int rc; \
+ qmrom_spi_wait_for_ready_line((h)->ss_rdy_handle, \
+ SPI_READY_TIMEOUT_MS_C0); \
+ rc = qmrom_read((h)); \
+ rc; \
+ })
+#define qmrom_write_cmd_c0(h, cmd) \
+ ({ \
+ int rc; \
+ qmrom_spi_wait_for_ready_line((h)->ss_rdy_handle, \
+ SPI_READY_TIMEOUT_MS_C0); \
+ rc = qmrom_write_cmd((h), (cmd)); \
+ rc; \
+ })
+#define qmrom_write_cmd32_c0(h, cmd) \
+ ({ \
+ int rc; \
+ qmrom_spi_wait_for_ready_line((h)->ss_rdy_handle, \
+ SPI_READY_TIMEOUT_MS_C0); \
+ rc = qmrom_write_cmd32((h), (cmd)); \
+ rc; \
+ })
+#define qmrom_write_size_cmd_c0(h, cmd, ds, d) \
+ ({ \
+ int rc; \
+ qmrom_spi_wait_for_ready_line((h)->ss_rdy_handle, \
+ SPI_READY_TIMEOUT_MS_C0); \
+ rc = qmrom_write_size_cmd((h), (cmd), (ds), (d)); \
+ rc; \
+ })
+#define qmrom_write_size_cmd32_c0(h, cmd, ds, d) \
+ ({ \
+ int rc; \
+ qmrom_spi_wait_for_ready_line((h)->ss_rdy_handle, \
+ SPI_READY_TIMEOUT_MS_C0); \
+ rc = qmrom_write_size_cmd32((h), (cmd), (ds), (d)); \
+ rc; \
+ })
+
+static void qmrom_c0_poll_soc(struct qmrom_handle *handle)
+{
+ int retries = handle->comms_retries;
+ memset(handle->hstc, 0, sizeof(struct stc));
+ handle->sstc->raw_flags = 0;
+ do {
+ int rc = qmrom_spi_wait_for_ready_line(handle->ss_rdy_handle,
+ SPI_READY_TIMEOUT_MS_C0);
+ if (rc) {
+ LOG_ERR("%s qmrom_spi_wait_for_ready_line failed\n",
+ __func__);
+ continue;
+ }
+ qmrom_spi_transfer(handle->spi_handle, (char *)handle->sstc,
+ (const char *)handle->hstc,
+ sizeof(struct stc) + handle->hstc->len);
+ } while (retries-- && handle->sstc->raw_flags == 0);
+}
+
+static int qmrom_c0_wait_ready(struct qmrom_handle *handle)
+{
+ int retries = handle->comms_retries;
+
+ qmrom_c0_poll_soc(handle);
+
+ /* handle->sstc has been updated */
+ while (retries-- &&
+ handle->sstc->raw_flags != SPI_SH_READY_CMD_BIT_MASK_C0) {
+ if (handle->sstc->soc_flags.out_waiting) {
+ qmrom_pre_read_c0(handle);
+ qmrom_read_c0(handle);
+ } else if (handle->sstc->soc_flags.out_active) {
+ return qmrom_read_c0(handle);
+ } else
+ qmrom_c0_poll_soc(handle);
+ }
+
+ return handle->sstc->raw_flags == SPI_SH_READY_CMD_BIT_MASK_C0 ?
+ 0 :
+ SPI_ERR_WAIT_READY_TIMEOUT;
+}
+
+static int qmrom_c0_poll_cmd_resp(struct qmrom_handle *handle)
+{
+ int retries = handle->comms_retries;
+
+ qmrom_c0_poll_soc(handle);
+ do {
+ if (handle->sstc->soc_flags.out_waiting) {
+ qmrom_pre_read_c0(handle);
+ return qmrom_read_c0(handle);
+ } else
+ qmrom_c0_poll_soc(handle);
+ } while (retries--);
+ if (retries <= 0)
+ LOG_ERR("%s failed after %d replies\n", __func__,
+ handle->comms_retries);
+
+ return retries > 0 ? 0 : -1;
+}
+
+int qmrom_c0_probe_device(struct qmrom_handle *handle)
+{
+ int rc, i;
+ uint8_t *soc_lcs_uuid;
+
+ handle->is_be = false;
+
+ qmrom_spi_set_freq(DEFAULT_SPI_CLOCKRATE);
+
+ rc = qmrom_reboot_bootloader(handle);
+ if (rc) {
+ LOG_ERR("%s: cannot reset the device...\n", __func__);
+ return rc;
+ }
+
+ rc = qmrom_c0_wait_ready(handle);
+ if (rc) {
+ LOG_INFO("%s: maybe not a C0 device\n", __func__);
+ return rc;
+ }
+
+ rc = qmrom_write_cmd32_c0(handle, ROM_CMD_C0_GET_CHIP_VER);
+ if (rc)
+ return rc;
+
+ rc = qmrom_c0_poll_cmd_resp(handle);
+ if (rc)
+ return rc;
+
+ handle->chip_rev =
+ SSTC2UINT16(handle, CHIP_VERSION_CHIP_REV_PAYLOAD_OFFSET) &
+ 0xFF;
+ handle->device_version = bswap_16(
+ SSTC2UINT16(handle, CHIP_VERSION_DEV_REV_PAYLOAD_OFFSET));
+ if (handle->chip_rev != CHIP_REVISION_C0) {
+ LOG_ERR("%s: wrong chip revision %#x\n", __func__,
+ handle->chip_rev);
+ handle->chip_rev = -1;
+ return -1;
+ }
+
+ rc = qmrom_c0_wait_ready(handle);
+ if (rc) {
+ LOG_ERR("%s: hmm something went wrong!!!\n", __func__);
+ return rc;
+ }
+
+ rc = qmrom_write_cmd32_c0(handle, ROM_CMD_C0_GET_SOC_INFO);
+ if (rc)
+ return rc;
+
+ rc = qmrom_c0_poll_cmd_resp(handle);
+ if (rc)
+ return rc;
+
+ /* skip the first 4 bytes */
+ soc_lcs_uuid = &(handle->sstc->payload[4]);
+ for (i = 0; i < ROM_SOC_ID_LEN; i++)
+ handle->soc_id[i] = soc_lcs_uuid[ROM_SOC_ID_LEN - i - 1];
+ soc_lcs_uuid += ROM_SOC_ID_LEN;
+ memcpy(&handle->lcs_state, soc_lcs_uuid, sizeof(uint32_t));
+ soc_lcs_uuid += 4;
+ for (i = 0; i < ROM_UUID_LEN; i++)
+ handle->uuid[i] = soc_lcs_uuid[ROM_UUID_LEN - i - 1];
+
+ /* Set rom ops */
+ handle->rom_ops.flash_fw = qmrom_c0_flash_fw;
+ handle->rom_ops.flash_debug_cert = qmrom_c0_flash_debug_cert;
+ handle->rom_ops.erase_debug_cert = qmrom_c0_erase_debug_cert;
+
+ return 0;
+}
+
+#ifdef C0_WRITE_STATS
+static uint64_t total_bytes, total_time_ns;
+static uint32_t max_write_time_ns, min_write_time_ns = ~0;
+
+static void update_write_max_chunk_stats(ktime_t start_time)
+{
+ uint64_t elapsed_time_ns;
+
+ total_bytes += CHUNK_SIZE_C0;
+ elapsed_time_ns = ktime_to_ns(ktime_sub(ktime_get(), start_time));
+ total_time_ns += elapsed_time_ns;
+ if (elapsed_time_ns > max_write_time_ns)
+ max_write_time_ns = elapsed_time_ns;
+ if (elapsed_time_ns < min_write_time_ns)
+ min_write_time_ns = elapsed_time_ns;
+}
+
+static void dump_stats(void)
+{
+ uint32_t nb_chunks = total_bytes / CHUNK_SIZE_C0;
+ LOG_WARN(
+ "C0 flashing time stats: %llu bytes over %llu us (chunk size %u, write timings: mean %u us, min %u us, max %u us)\n",
+ total_bytes, total_time_ns / 1000, CHUNK_SIZE_C0,
+ (uint32_t)((total_time_ns / nb_chunks) / 1000),
+ min_write_time_ns / 1000, max_write_time_ns / 1000);
+}
+#endif
+
+static int qmrom_c0_flash_data(struct qmrom_handle *handle, struct firmware *fw,
+ uint8_t cmd, uint8_t resp, bool skip_last_check)
+{
+ int rc, sent = 0;
+ const char *bin_data = (const char *)fw->data;
+#ifdef C0_WRITE_STATS
+ ktime_t start_time;
+#endif
+
+ while (sent < fw->size) {
+ uint32_t tx_bytes = fw->size - sent;
+ if (tx_bytes > CHUNK_SIZE_C0)
+ tx_bytes = CHUNK_SIZE_C0;
+
+ LOG_DBG("%s: sending command %#x with %" PRIu32 " bytes\n",
+ __func__, cmd, tx_bytes);
+#ifdef C0_WRITE_STATS
+ start_time = ktime_get();
+#endif
+ rc = qmrom_write_size_cmd32_c0(handle, cmd, tx_bytes, bin_data);
+ if (rc)
+ return rc;
+ sent += tx_bytes;
+ bin_data += tx_bytes;
+ if (skip_last_check && sent == fw->size) {
+ LOG_INFO("%s: flashing done, quitting now\n", __func__);
+ break;
+ }
+ qmrom_c0_poll_soc(handle);
+#ifdef C0_WRITE_STATS
+ if (tx_bytes == CHUNK_SIZE_C0)
+ update_write_max_chunk_stats(start_time);
+#endif
+ qmrom_pre_read_c0(handle);
+ qmrom_read_c0(handle);
+ if (handle->sstc->payload[0] != resp) {
+ LOG_ERR("%s: wrong data result (%#x vs %#x)!!!\n",
+ __func__, handle->sstc->payload[0] & 0xff,
+ resp);
+ if (handle->sstc->payload[0] ==
+ ERR_FIRST_KEY_CERT_OR_FW_VER)
+ return PEG_ERR_FIRST_KEY_CERT_OR_FW_VER;
+ else
+ return SPI_PROTO_WRONG_RESP;
+ }
+ }
+ qmrom_msleep(SPI_READY_TIMEOUT_MS_C0);
+ return 0;
+}
+
+static int qmrom_c0_flash_fw(struct qmrom_handle *handle,
+ const struct firmware *fw)
+{
+ int rc = 0;
+ struct unstitched_firmware all_fws = { 0 };
+ uint8_t flash_cmd = handle->lcs_state == CC_BSV_SECURE_LCS ?
+ ROM_CMD_C0_SEC_LOAD_OEM_IMG_TO_RRAM :
+ ROM_CMD_C0_SEC_LOAD_ICV_IMG_TO_RRAM;
+
+ LOG_INFO("Unstitching the fw %p->data %p\n", (void *)fw,
+ (void *)fw->data);
+ rc = qmrom_unstitch_fw(fw, &all_fws, handle->chip_rev);
+ if (rc) {
+ LOG_ERR("%s: Unstitched fw flashing not supported yet\n",
+ __func__);
+ return rc;
+ }
+
+ if (all_fws.key1_crt->data[HBK_LOC] == HBK_2E_ICV &&
+ handle->lcs_state != CC_BSV_CHIP_MANUFACTURE_LCS) {
+ LOG_ERR("%s: Trying to flash an ICV fw on a non ICV platform\n",
+ __func__);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ if (all_fws.key1_crt->data[HBK_LOC] == HBK_2E_OEM &&
+ handle->lcs_state != CC_BSV_SECURE_LCS) {
+ LOG_ERR("%s: Trying to flash an OEM fw on a non OEM platform\n",
+ __func__);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ LOG_DBG("%s: starting...\n", __func__);
+
+ /* Set RRAM write mode */
+ rc = qmrom_c0_wait_ready(handle);
+ if (rc)
+ goto end;
+
+ rc = qmrom_write_cmd32_c0(handle, ROM_CMD_C0_USE_INDIRECT_RRAM_WR);
+ if (rc)
+ goto end;
+
+ qmrom_c0_poll_soc(handle);
+ qmrom_pre_read_c0(handle);
+ qmrom_read_c0(handle);
+ qmrom_c0_poll_soc(handle);
+
+ LOG_DBG("%s: sending flash_cmd %u command\n", __func__, flash_cmd);
+ rc = qmrom_write_cmd32_c0(handle, flash_cmd);
+ if (rc)
+ goto end;
+
+ qmrom_c0_poll_cmd_resp(handle);
+ if (handle->sstc->payload[0] != WAITING_FOR_FIRST_KEY_CERT) {
+ LOG_ERR("%s: Waiting for WAITING_FOR_FIRST_KEY_CERT(%#x) but got %#x\n",
+ __func__, WAITING_FOR_FIRST_KEY_CERT,
+ handle->sstc->payload[0]);
+ goto end;
+ }
+
+ qmrom_c0_poll_soc(handle);
+
+ rc = qmrom_c0_flash_data(handle, all_fws.key1_crt, ROM_CMD_C0_CERT_DATA,
+ WAITING_FOR_SECOND_KEY_CERT, false);
+ if (rc)
+ goto end;
+
+ rc = qmrom_c0_flash_data(handle, all_fws.key2_crt, ROM_CMD_C0_CERT_DATA,
+ WAITING_FOR_CONTENT_CERT, false);
+ if (rc)
+ goto end;
+
+ rc = qmrom_c0_flash_data(handle, all_fws.fw_crt, ROM_CMD_C0_CERT_DATA,
+ WAITING_FOR_SEC_FILE_DATA, false);
+ if (rc)
+ goto end;
+
+ rc = qmrom_c0_flash_data(handle, all_fws.fw_img,
+ ROM_CMD_C0_SEC_IMAGE_DATA,
+ WAITING_FOR_SEC_FILE_DATA, true);
+
+#ifdef C0_WRITE_STATS
+ dump_stats();
+#endif
+
+end:
+ qmrom_free(all_fws.fw_img);
+ qmrom_free(all_fws.fw_crt);
+ qmrom_free(all_fws.key1_crt);
+ qmrom_free(all_fws.key2_crt);
+ return rc;
+}
+
+static int qmrom_c0_flash_debug_cert(struct qmrom_handle *handle,
+ struct firmware *dbg_cert)
+{
+ int rc;
+
+ LOG_DBG("%s: starting...\n", __func__);
+ rc = qmrom_c0_wait_ready(handle);
+ if (rc)
+ return rc;
+
+ LOG_DBG("%s: sending ROM_CMD_C0_USE_DIRECT_RRAM_WR command\n",
+ __func__);
+ rc = qmrom_write_cmd32_c0(handle, ROM_CMD_C0_USE_INDIRECT_RRAM_WR);
+ if (rc)
+ return rc;
+
+ qmrom_c0_poll_soc(handle);
+ qmrom_pre_read_c0(handle);
+ qmrom_read_c0(handle);
+ qmrom_c0_poll_soc(handle);
+
+ LOG_DBG("%s: sending ROM_CMD_C0_WRITE_DBG_CERT command\n", __func__);
+ rc = qmrom_write_cmd32_c0(handle, ROM_CMD_C0_WRITE_DBG_CERT);
+ if (rc)
+ return rc;
+ qmrom_c0_poll_cmd_resp(handle);
+ if (handle->sstc->payload[0] != WAITING_TO_DEBUG_CERTIFICATE_SIZE) {
+ LOG_ERR("%s: Waiting for WAITING_TO_DEBUG_CERTIFICATE_SIZE(%#x) but got %#x\n",
+ __func__, WAITING_TO_DEBUG_CERTIFICATE_SIZE,
+ handle->sstc->payload[0]);
+ return rc;
+ }
+
+ LOG_DBG("%s: sending ROM_CMD_C0_DEBUG_CERT_SIZE command\n", __func__);
+ rc = qmrom_write_size_cmd32_c0(handle, ROM_CMD_C0_DEBUG_CERT_SIZE,
+ sizeof(uint32_t),
+ (const char *)&dbg_cert->size);
+ qmrom_c0_poll_cmd_resp(handle);
+ if (handle->sstc->payload[0] != WAITING_FOR_DEBUG_CERT_DATA) {
+ LOG_ERR("%s: Waiting for WAITING_FOR_DEBUG_CERT_DATA(%#x) but got %#x\n",
+ __func__, WAITING_FOR_DEBUG_CERT_DATA,
+ handle->sstc->payload[0]);
+ return rc;
+ }
+
+ rc = qmrom_c0_flash_data(handle, dbg_cert, ROM_CMD_C0_CERT_DATA,
+ WAITING_FOR_DEBUG_CERT_DATA, true);
+ return 0;
+}
+
+static int qmrom_c0_erase_debug_cert(struct qmrom_handle *handle)
+{
+ int rc;
+
+ LOG_DBG("%s: starting...\n", __func__);
+
+ rc = qmrom_c0_wait_ready(handle);
+ if (rc)
+ return rc;
+
+ LOG_DBG("%s: sending ROM_CMD_C0_ERASE_DBG_CERT command\n", __func__);
+ rc = qmrom_write_cmd32_c0(handle, ROM_CMD_C0_ERASE_DBG_CERT);
+ if (rc)
+ return rc;
+
+ qmrom_msleep(SPI_READY_TIMEOUT_MS_C0);
+ return 0;
+}
diff --git a/libqmrom/src/qmrom_common.c b/libqmrom/src/qmrom_common.c
new file mode 100644
index 0000000..58ca662
--- /dev/null
+++ b/libqmrom/src/qmrom_common.c
@@ -0,0 +1,429 @@
+// SPDX-License-Identifier: GPL-2.0 OR Apache-2.0
+/*
+ * Copyright 2022 Qorvo US, Inc.
+ *
+ */
+
+#include <qmrom_utils.h>
+#include <qmrom_log.h>
+#include <qmrom_spi.h>
+#include <qmrom.h>
+#include <spi_rom_protocol.h>
+
+int qmrom_a0_probe_device(struct qmrom_handle *handle);
+int qmrom_b0_probe_device(struct qmrom_handle *handle);
+int qmrom_c0_probe_device(struct qmrom_handle *handle);
+
+static void qmrom_free_stcs(struct qmrom_handle *h)
+{
+ if (h->hstc)
+ qmrom_free(h->hstc);
+ if (h->sstc)
+ qmrom_free(h->sstc);
+}
+
+#ifdef CHECK_STCS
+void check_stcs(const char *func, int line, struct qmrom_handle *h)
+{
+ uint32_t *buff = (uint32_t *)h->hstc;
+ if (buff[MAX_STC_FRAME_LEN / sizeof(uint32_t)] != 0xfeeddeef) {
+ LOG_ERR("%s:%d - hstc %pK corrupted\n", func, line,
+ (void *)h->hstc);
+ } else {
+ LOG_ERR("%s:%d - hstc %pK safe\n", func, line, (void *)h->hstc);
+ }
+ buff = (uint32_t *)h->sstc;
+ if (buff[MAX_STC_FRAME_LEN / sizeof(uint32_t)] != 0xfeeddeef) {
+ LOG_ERR("%s:%d - sstc %pK corrupted\n", func, line,
+ (void *)h->sstc);
+ } else {
+ LOG_ERR("%s:%d - sstc %pK safe\n", func, line, (void *)h->sstc);
+ }
+}
+#endif
+
+static int qmrom_allocate_stcs(struct qmrom_handle *h)
+{
+ int rc = 0;
+ uint8_t *tx_buf = NULL, *rx_buf = NULL;
+
+ qmrom_alloc(tx_buf, MAX_STC_FRAME_LEN + sizeof(uint32_t));
+ if (tx_buf == NULL) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ qmrom_alloc(rx_buf, MAX_STC_FRAME_LEN + sizeof(uint32_t));
+ if (rx_buf == NULL) {
+ qmrom_free(tx_buf);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+#ifdef CHECK_STCS
+ ((uint32_t *)tx_buf)[MAX_STC_FRAME_LEN / sizeof(uint32_t)] = 0xfeeddeef;
+ ((uint32_t *)rx_buf)[MAX_STC_FRAME_LEN / sizeof(uint32_t)] = 0xfeeddeef;
+#endif
+ h->hstc = (struct stc *)tx_buf;
+ h->sstc = (struct stc *)rx_buf;
+ return rc;
+out:
+ qmrom_free_stcs(h);
+ return rc;
+}
+
+int qmrom_pre_read(struct qmrom_handle *handle)
+{
+ handle->hstc->all = 0;
+ handle->hstc->host_flags.pre_read = 1;
+ handle->hstc->ul = 1;
+ handle->hstc->len = 0;
+ handle->hstc->payload[0] = 0;
+ return qmrom_spi_transfer(handle->spi_handle, (char *)handle->sstc,
+ (const char *)handle->hstc,
+ sizeof(struct stc) + handle->hstc->len);
+}
+
+int qmrom_read(struct qmrom_handle *handle)
+{
+ size_t rd_size = handle->sstc->len;
+ if (rd_size > MAX_STC_FRAME_LEN)
+ return SPI_ERR_INVALID_STC_LEN;
+ LOG_DBG("%s: reading %zu bytes...\n", __func__, rd_size);
+ memset(handle->hstc, 0, sizeof(struct stc) + rd_size);
+ handle->hstc->host_flags.read = 1;
+ handle->hstc->ul = 1;
+ handle->hstc->len = handle->sstc->len;
+
+ return qmrom_spi_transfer(handle->spi_handle, (char *)handle->sstc,
+ (const char *)handle->hstc,
+ sizeof(struct stc) + rd_size);
+}
+
+int qmrom_write_cmd(struct qmrom_handle *handle, uint8_t cmd)
+{
+ handle->hstc->all = 0;
+ handle->hstc->host_flags.write = 1;
+ handle->hstc->ul = 1;
+ handle->hstc->len = 1;
+ handle->hstc->payload[0] = cmd;
+
+ return qmrom_spi_transfer(handle->spi_handle, (char *)handle->sstc,
+ (const char *)handle->hstc,
+ sizeof(struct stc) + handle->hstc->len);
+}
+
+int qmrom_write_cmd32(struct qmrom_handle *handle, uint32_t cmd)
+{
+ handle->hstc->all = 0;
+ handle->hstc->host_flags.write = 1;
+ handle->hstc->ul = 1;
+ handle->hstc->len = sizeof(cmd);
+ memcpy(handle->hstc->payload, &cmd, sizeof(cmd));
+
+ return qmrom_spi_transfer(handle->spi_handle, (char *)handle->sstc,
+ (const char *)handle->hstc,
+ sizeof(struct stc) + handle->hstc->len);
+}
+
+int qmrom_write_size_cmd(struct qmrom_handle *handle, uint8_t cmd,
+ uint16_t data_size, const char *data)
+{
+ handle->hstc->all = 0;
+ handle->hstc->host_flags.write = 1;
+ handle->hstc->ul = 1;
+ handle->hstc->len = data_size + 1;
+ handle->hstc->payload[0] = cmd;
+ memcpy(&handle->hstc->payload[1], data, data_size);
+
+ return qmrom_spi_transfer(handle->spi_handle, (char *)handle->sstc,
+ (const char *)handle->hstc,
+ sizeof(struct stc) + handle->hstc->len);
+}
+
+int qmrom_write_size_cmd32(struct qmrom_handle *handle, uint32_t cmd,
+ uint16_t data_size, const char *data)
+{
+ handle->hstc->all = 0;
+ handle->hstc->host_flags.write = 1;
+ handle->hstc->ul = 1;
+ handle->hstc->len = data_size + sizeof(cmd);
+ memcpy(handle->hstc->payload, &cmd, sizeof(cmd));
+ memcpy(&handle->hstc->payload[sizeof(cmd)], data, data_size);
+
+ return qmrom_spi_transfer(handle->spi_handle, (char *)handle->sstc,
+ (const char *)handle->hstc,
+ sizeof(struct stc) + handle->hstc->len);
+}
+
+/*
+ * Unfortunately, A0, B0 and C0 have different
+ * APIs to get the chip version...
+ *
+ */
+int qmrom_probe_device(struct qmrom_handle *handle)
+{
+ int rc;
+
+ /* Test B0 first */
+ rc = qmrom_b0_probe_device(handle);
+ if (!rc)
+ return rc;
+
+ /* Test C0 next */
+ rc = qmrom_c0_probe_device(handle);
+ if (!rc)
+ return rc;
+
+ /* Finally try A0 */
+ rc = qmrom_a0_probe_device(handle);
+ if (!rc)
+ return rc;
+
+ /* None matched!!! */
+ return -1;
+}
+
+struct qmrom_handle *qmrom_init(void *spi_handle, void *reset_handle,
+ void *ss_rdy_handle, int comms_retries,
+ reset_device_fn reset)
+{
+ struct qmrom_handle *handle;
+ int rc;
+
+ qmrom_alloc(handle, sizeof(struct qmrom_handle));
+ if (!handle) {
+ LOG_ERR("%s: Couldn't allocate %zu bytes...\n", __func__,
+ sizeof(struct qmrom_handle));
+ return NULL;
+ }
+ rc = qmrom_allocate_stcs(handle);
+ if (rc) {
+ LOG_ERR("%s: Couldn't allocate stcs...\n", __func__);
+ qmrom_free(handle);
+ return NULL;
+ }
+
+ handle->spi_handle = spi_handle;
+ handle->reset_handle = reset_handle;
+ handle->ss_rdy_handle = ss_rdy_handle;
+ handle->comms_retries = comms_retries;
+ handle->chip_rev = CHIP_REVISION_UNKNOWN;
+ handle->device_version = -1;
+ handle->lcs_state = -1;
+
+ handle->dev_ops.reset = reset;
+
+ rc = qmrom_probe_device(handle);
+ if (rc) {
+ LOG_ERR("%s: qmrom_probe_device returned %d!\n", __func__, rc);
+ qmrom_free_stcs(handle);
+ qmrom_free(handle);
+ return NULL;
+ }
+
+ check_stcs(__func__, __LINE__, handle);
+ return handle;
+}
+
+void qmrom_deinit(struct qmrom_handle *handle)
+{
+ LOG_DBG("Deinitializing %pK\n", (void *)handle);
+ qmrom_free_stcs(handle);
+ qmrom_free(handle);
+}
+
+int qmrom_flash_dbg_cert(struct qmrom_handle *handle, struct firmware *dbg_cert)
+{
+ if (!handle->rom_ops.flash_debug_cert) {
+ LOG_ERR("%s: flash debug certificate not support on this device\n",
+ __func__);
+ return -EINVAL;
+ }
+ return handle->rom_ops.flash_debug_cert(handle, dbg_cert);
+}
+
+int qmrom_erase_dbg_cert(struct qmrom_handle *handle)
+{
+ if (!handle->rom_ops.erase_debug_cert) {
+ LOG_ERR("%s: erase debug certificate not support on this device\n",
+ __func__);
+ return -EINVAL;
+ }
+ return handle->rom_ops.erase_debug_cert(handle);
+}
+
+int qmrom_flash_fw(struct qmrom_handle *handle, const struct firmware *fw)
+{
+ return handle->rom_ops.flash_fw(handle, fw);
+}
+
+int qmrom_flash_unstitched_fw(struct qmrom_handle *handle,
+ const struct unstitched_firmware *fw)
+{
+ return handle->rom_ops.flash_unstitched_fw(handle, fw);
+}
+
+int qmrom_unstitch_fw(const struct firmware *fw,
+ struct unstitched_firmware *unstitched_fw,
+ enum chip_revision_e revision)
+{
+ uint32_t tot_len = 0;
+ uint32_t fw_img_sz = 0;
+ uint32_t fw_crt_sz = 0;
+ uint32_t key1_crt_sz = 0;
+ uint32_t key2_crt_sz = 0;
+ uint8_t *p_key1;
+ uint8_t *p_key2;
+ uint8_t *p_crt;
+ uint8_t *p_fw;
+ int ret = 0;
+
+ if (revision == CHIP_REVISION_A0) {
+ LOG_ERR("%s: A0, no unstitching!!!\n", __func__);
+ return -EINVAL;
+ }
+ if (fw->size < 2 * sizeof(key1_crt_sz)) {
+ LOG_ERR("%s: Not enough data (%zu) to unstitch\n", __func__,
+ fw->size);
+ return -EINVAL;
+ }
+ LOG_INFO("%s: Unstitching %zu bytes\n", __func__, fw->size);
+
+ /* key1 */
+ key1_crt_sz = *(uint32_t *)&fw->data[tot_len];
+ if (tot_len + key1_crt_sz + sizeof(key1_crt_sz) > fw->size) {
+ LOG_ERR("%s: Invalid or corrupted stitched file at offset \
+ %" PRIu32 " (key1)\n",
+ __func__, tot_len);
+ ret = -EINVAL;
+ goto out;
+ }
+ tot_len += sizeof(key1_crt_sz);
+ p_key1 = (uint8_t *)&fw->data[tot_len];
+ tot_len += key1_crt_sz;
+
+ /* key2 */
+ key2_crt_sz = *(uint32_t *)&fw->data[tot_len];
+ if (tot_len + key2_crt_sz + sizeof(key2_crt_sz) > fw->size) {
+ LOG_ERR("%s: Invalid or corrupted stitched file at offset \
+ %" PRIu32 " (key2)\n",
+ __func__, tot_len);
+ ret = -EINVAL;
+ goto out;
+ }
+ tot_len += sizeof(key2_crt_sz);
+ p_key2 = (uint8_t *)&fw->data[tot_len];
+ tot_len += key2_crt_sz;
+
+ /* cert */
+ fw_crt_sz = *(uint32_t *)&fw->data[tot_len];
+ if (tot_len + fw_crt_sz + sizeof(fw_crt_sz) > fw->size) {
+ LOG_ERR("%s: Invalid or corrupted stitched file at offset \
+ %" PRIu32 " (content cert)\n",
+ __func__, tot_len);
+ ret = -EINVAL;
+ goto out;
+ }
+ tot_len += sizeof(fw_crt_sz);
+ p_crt = (uint8_t *)&fw->data[tot_len];
+ tot_len += fw_crt_sz;
+
+ /* fw */
+ fw_img_sz = *(uint32_t *)&fw->data[tot_len];
+ if (tot_len + fw_img_sz + sizeof(fw_img_sz) != fw->size) {
+ LOG_ERR("%s: Invalid or corrupted stitched file at offset \
+ %" PRIu32 " (firmnware)\n",
+ __func__, tot_len);
+ ret = -EINVAL;
+ goto out;
+ }
+ tot_len += sizeof(fw_img_sz);
+ p_fw = (uint8_t *)&fw->data[tot_len];
+
+ qmrom_alloc(unstitched_fw->fw_img, fw_img_sz + sizeof(struct firmware));
+ if (unstitched_fw->fw_img == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ qmrom_alloc(unstitched_fw->fw_crt, fw_crt_sz + sizeof(struct firmware));
+ if (unstitched_fw->fw_crt == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ qmrom_alloc(unstitched_fw->key1_crt,
+ key1_crt_sz + sizeof(struct firmware));
+ if (unstitched_fw->key1_crt == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ qmrom_alloc(unstitched_fw->key2_crt,
+ key2_crt_sz + sizeof(struct firmware));
+ if (unstitched_fw->key2_crt == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ unstitched_fw->key1_crt->data =
+ (const uint8_t *)(unstitched_fw->key1_crt + 1);
+ unstitched_fw->key2_crt->data =
+ (const uint8_t *)(unstitched_fw->key2_crt + 1);
+ unstitched_fw->fw_crt->data =
+ (const uint8_t *)(unstitched_fw->fw_crt + 1);
+ unstitched_fw->fw_img->data =
+ (const uint8_t *)(unstitched_fw->fw_img + 1);
+ unstitched_fw->key1_crt->size = key1_crt_sz;
+ unstitched_fw->key2_crt->size = key2_crt_sz;
+ unstitched_fw->fw_crt->size = fw_crt_sz;
+ unstitched_fw->fw_img->size = fw_img_sz;
+
+ memcpy((void *)unstitched_fw->key1_crt->data, p_key1, key1_crt_sz);
+ memcpy((void *)unstitched_fw->key2_crt->data, p_key2, key2_crt_sz);
+ memcpy((void *)unstitched_fw->fw_crt->data, p_crt, fw_crt_sz);
+ memcpy((void *)unstitched_fw->fw_img->data, p_fw, fw_img_sz);
+ return 0;
+
+err:
+ if (unstitched_fw->fw_img)
+ qmrom_free(unstitched_fw->fw_img);
+ if (unstitched_fw->fw_crt)
+ qmrom_free(unstitched_fw->fw_crt);
+ if (unstitched_fw->key1_crt)
+ qmrom_free(unstitched_fw->key1_crt);
+ if (unstitched_fw->key2_crt)
+ qmrom_free(unstitched_fw->key2_crt);
+
+out:
+ return ret;
+}
+
+int qmrom_reboot_bootloader(struct qmrom_handle *handle)
+{
+ int rc;
+
+ rc = qmrom_spi_set_cs_level(handle->spi_handle, 0);
+ if (rc) {
+ LOG_ERR("%s: spi_set_cs_level(0) failed with %d\n", __func__,
+ rc);
+ return rc;
+ }
+ qmrom_msleep(SPI_RST_LOW_DELAY_MS);
+
+ handle->dev_ops.reset(handle->reset_handle);
+
+ qmrom_msleep(SPI_RST_LOW_DELAY_MS);
+
+ rc = qmrom_spi_set_cs_level(handle->spi_handle, 1);
+ if (rc) {
+ LOG_ERR("%s: spi_set_cs_level(1) failed with %d\n", __func__,
+ rc);
+ return rc;
+ }
+
+ qmrom_msleep(SPI_RST_LOW_DELAY_MS);
+
+ return 0;
+}
diff --git a/libqmrom/src/qmrom_log.c b/libqmrom/src/qmrom_log.c
new file mode 100644
index 0000000..670f4fc
--- /dev/null
+++ b/libqmrom/src/qmrom_log.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2021 Qorvo US, Inc.
+ *
+ * SPDX-License-Identifier: GPL-2.0 OR Apache-2.0
+ *
+ * This file is provided under the Apache License 2.0, or the
+ * GNU General Public License v2.0.
+ *
+ */
+#include <qmrom_log.h>
+
+#define LOG_PRINT(lvl, ...) \
+ do { \
+ switch (lvl) { \
+ case LOG_ERR: \
+ LOG_ERR(__VA_ARGS__); \
+ break; \
+ case LOG_WARN: \
+ LOG_WARN(__VA_ARGS__); \
+ break; \
+ case LOG_INFO: \
+ LOG_INFO(__VA_ARGS__); \
+ break; \
+ case LOG_DBG: \
+ LOG_DBG(__VA_ARGS__); \
+ break; \
+ default: \
+ break; \
+ } \
+ } while (0)
+
+enum log_level_e __log_level__ = LOG_INFO;
+
+void set_log_level(enum log_level_e lvl)
+{
+ __log_level__ = lvl;
+}
+
+#ifdef __KERNEL__
+#include <linux/device.h>
+
+struct device *__qmrom_log_dev__ = NULL;
+
+void qmrom_set_log_device(struct device *dev, enum log_level_e lvl)
+{
+ __qmrom_log_dev__ = dev;
+ __log_level__ = lvl;
+}
+#endif
+
+void hexdump(enum log_level_e lvl, void *_array, unsigned short length)
+{
+ unsigned char *array = _array;
+ int i;
+
+ if (lvl < __log_level__)
+ return;
+
+ for (i = 0; i < length; i += 16) {
+ int j = 0;
+ for (; j < 15 && i + j + 1 < length; j++)
+ LOG_PRINT(lvl, "0x%02x, ", array[i + j]);
+ LOG_PRINT(lvl, "0x%02x\n", array[i + j]);
+ }
+}
+
+void hexrawdump(enum log_level_e lvl, void *_array, unsigned short length)
+{
+ unsigned char *array = _array;
+ int i;
+
+ if (lvl < __log_level__)
+ return;
+
+ for (i = 0; i < length; i++) {
+ LOG_PRINT(lvl, "%02x", array[i]);
+ }
+}
diff --git a/qm35-spi.c b/qm35-spi.c
new file mode 100644
index 0000000..054518f
--- /dev/null
+++ b/qm35-spi.c
@@ -0,0 +1,947 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * This file is part of the QM35 UCI stack for linux.
+ *
+ * Copyright (c) 2021 Qorvo US, Inc.
+ *
+ * This software is provided under the GNU General Public License, version 2
+ * (GPLv2), as well as under a Qorvo commercial license.
+ *
+ * You may choose to use this software under the terms of the GPLv2 License,
+ * version 2 ("GPLv2"), as published by the Free Software Foundation.
+ * You should have received a copy of the GPLv2 along with this program. If
+ * not, see <http://www.gnu.org/licenses/>.
+ *
+ * This program is distributed under the GPLv2 in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GPLv2 for more
+ * details.
+ *
+ * If you cannot meet the requirements of the GPLv2, you may not use this
+ * software for any purpose without first obtaining a commercial license from
+ * Qorvo.
+ * Please contact Qorvo to inquire about licensing terms.
+ *
+ * QM35 UCI over HSSPI protocol
+ */
+
+#include <linux/bitfield.h>
+#include <linux/interrupt.h>
+#include <linux/ioctl.h>
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/spi/spi.h>
+#include <linux/spinlock.h>
+#include <linux/miscdevice.h>
+#include <linux/poll.h>
+#include <linux/uaccess.h>
+#include <linux/firmware.h>
+#include <linux/completion.h>
+#include <linux/regulator/consumer.h>
+#ifdef CONFIG_QM35_DEBOUNCE_TIME_US
+#include <linux/ktime.h>
+#endif
+
+#include <qmrom.h>
+#include <qmrom_spi.h>
+#include <qmrom_log.h>
+#include <spi_rom_protocol.h>
+
+#include "qm35.h"
+#include "uci_ioctls.h"
+#include "hsspi.h"
+#include "hsspi_uci.h"
+#include "hsspi_test.h"
+
+#define QM35_REGULATOR_DELAY_US 1000
+#define QMROM_RETRIES 10
+#define REGULATORS_ENABLED(x) (x->vdd1 || x->vdd2 || x->vdd3 || x->vdd4)
+
+#ifndef NO_UWB_HAL
+#define NO_UWB_HAL false
+#endif
+
+static int qm_firmware_load(struct qm35_ctx *qm35_hdl);
+static void qm35_regulators_set(struct qm35_ctx *qm35_hdl, bool on);
+
+static const struct file_operations uci_fops;
+
+static const struct of_device_id qm35_dt_ids[] = {
+ { .compatible = "qorvo,qm35" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, qm35_dt_ids);
+
+static bool flash_on_probe = false;
+module_param(flash_on_probe, bool, 0444);
+MODULE_PARM_DESC(flash_on_probe, "Flash during the module probe");
+
+static int spi_speed_hz;
+module_param(spi_speed_hz, int, 0444);
+MODULE_PARM_DESC(spi_speed_hz, "SPI speed (if not set use DTS's one)");
+
+static char *fwname = NULL;
+module_param(fwname, charp, 0444);
+MODULE_PARM_DESC(fwname, "Use fwname as firmware binary to flash QM35");
+
+static bool wake_use_wakeup = true;
+module_param(wake_use_wakeup, bool, 0444);
+MODULE_PARM_DESC(wake_use_wakeup, "Use wakeup pin to wake up QM35");
+
+static bool wake_use_csn = false;
+module_param(wake_use_csn, bool, 0444);
+MODULE_PARM_DESC(wake_use_csn, "Use HSSPI CSn pin to wake up QM35");
+
+static bool wake_on_ssirq = true;
+module_param(wake_on_ssirq, bool, 0644);
+MODULE_PARM_DESC(wake_on_ssirq,
+ "Allow QM35 to wakeup the platform using ss_irq");
+
+int trace_spi_xfers;
+module_param(trace_spi_xfers, int, 0444);
+MODULE_PARM_DESC(trace_spi_xfers, "Trace all the SPI transfers");
+
+int qmrom_retries = QMROM_RETRIES;
+module_param(qmrom_retries, int, 0444);
+MODULE_PARM_DESC(qmrom_retries, "QMROM retries");
+
+int reset_on_error = 1;
+module_param(reset_on_error, int, 0444);
+MODULE_PARM_DESC(reset_on_error, "Reset the QM35 on successive errors");
+
+int log_qm_traces = 1;
+module_param(log_qm_traces, int, 0444);
+MODULE_PARM_DESC(log_qm_traces, "Logs the QM35 traces in the kernel messages");
+
+static uint8_t qm_soc_id[ROM_SOC_ID_LEN];
+static uint16_t qm_dev_id;
+
+/*
+ * uci_open() : open operation for uci device
+ *
+ */
+static int uci_open(struct inode *inode, struct file *file)
+{
+ struct miscdevice *uci_dev = file->private_data;
+ struct qm35_ctx *qm35_hdl =
+ container_of(uci_dev, struct qm35_ctx, uci_dev);
+
+ return hsspi_register(&qm35_hdl->hsspi, &qm35_hdl->uci_layer.hlayer);
+}
+
+/*
+ * uci_ioctl() - ioctl operation for uci device.
+ *
+ */
+static long uci_ioctl(struct file *filp, unsigned int cmd, unsigned long args)
+{
+ void __user *argp = (void __user *)args;
+ struct miscdevice *uci_dev = filp->private_data;
+ struct qm35_ctx *qm35_hdl =
+ container_of(uci_dev, struct qm35_ctx, uci_dev);
+ int ret;
+
+ switch (cmd) {
+ case QM35_CTRL_GET_STATE: {
+ return copy_to_user(argp, &qm35_hdl->state,
+ sizeof(qm35_hdl->state)) ?
+ -EFAULT :
+ 0;
+ }
+ case QM35_CTRL_RESET: {
+ qm35_hsspi_stop(qm35_hdl);
+
+ ret = qm35_reset(qm35_hdl, QM_RESET_LOW_MS, true);
+
+ msleep(QM_BOOT_MS);
+
+ qm35_hsspi_start(qm35_hdl);
+
+ if (ret)
+ return ret;
+
+ return copy_to_user(argp, &qm35_hdl->state,
+ sizeof(qm35_hdl->state)) ?
+ -EFAULT :
+ 0;
+ }
+ case QM35_CTRL_FW_UPLOAD: {
+ qm35_hsspi_stop(qm35_hdl);
+
+ ret = qm_firmware_load(qm35_hdl);
+
+ msleep(QM_BOOT_MS);
+
+ qm35_hsspi_start(qm35_hdl);
+
+ if (ret)
+ return ret;
+
+ return copy_to_user(argp, &qm35_hdl->state,
+ sizeof(qm35_hdl->state)) ?
+ -EFAULT :
+ 0;
+ }
+ case QM35_CTRL_POWER: {
+ unsigned int on;
+
+ ret = get_user(on, (unsigned int __user *)argp);
+ if (ret)
+ return ret;
+
+ qm35_hsspi_stop(qm35_hdl);
+
+ if (REGULATORS_ENABLED(qm35_hdl))
+ qm35_regulators_set(qm35_hdl, on);
+
+ /*
+ * Always reset QM as regulators could be shared with
+ * other devices and power may not be controlled as
+ * expected
+ */
+ qm35_reset(qm35_hdl, QM_RESET_LOW_MS, on);
+ msleep(QM_BOOT_MS);
+
+ /* If reset or power on */
+ if (!REGULATORS_ENABLED(qm35_hdl) ||
+ (REGULATORS_ENABLED(qm35_hdl) && on))
+ qm35_hsspi_start(qm35_hdl);
+
+ return 0;
+ }
+ default:
+ dev_err(&qm35_hdl->spi->dev, "unknown ioctl %x to %s device\n",
+ cmd, qm35_hdl->uci_dev.name);
+ return -EINVAL;
+ }
+}
+
+/*
+ * uci_release() - release operation for uci device.
+ *
+ */
+static int uci_release(struct inode *inode, struct file *filp)
+{
+ struct miscdevice *uci_dev = filp->private_data;
+ struct qm35_ctx *qm35_hdl =
+ container_of(uci_dev, struct qm35_ctx, uci_dev);
+
+ hsspi_unregister(&qm35_hdl->hsspi, &qm35_hdl->uci_layer.hlayer);
+ return 0;
+}
+
+static ssize_t uci_read(struct file *filp, char __user *buf, size_t len,
+ loff_t *off)
+{
+ struct miscdevice *uci_dev = filp->private_data;
+ struct qm35_ctx *qm35_hdl =
+ container_of(uci_dev, struct qm35_ctx, uci_dev);
+ struct uci_packet *p;
+ int ret;
+
+ p = uci_layer_read(&qm35_hdl->uci_layer, len,
+ filp->f_flags & O_NONBLOCK);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
+
+ ret = copy_to_user(buf, p->data, p->length);
+ if (!ret)
+ ret = p->length;
+
+ uci_packet_free(p);
+ return ret;
+}
+
+static ssize_t uci_write(struct file *filp, const char __user *buf, size_t len,
+ loff_t *off)
+{
+ struct miscdevice *uci_dev = filp->private_data;
+ struct qm35_ctx *qm35_hdl =
+ container_of(uci_dev, struct qm35_ctx, uci_dev);
+ struct uci_packet *p;
+ DECLARE_COMPLETION_ONSTACK(comp);
+ int ret;
+
+ p = uci_packet_alloc(len);
+ if (!p)
+ return -ENOMEM;
+
+ p->write_done = &comp;
+
+ if (copy_from_user(p->data, buf, len)) {
+ ret = -EFAULT;
+ goto free;
+ }
+
+ ret = hsspi_send(&qm35_hdl->hsspi, &qm35_hdl->uci_layer.hlayer,
+ &p->blk);
+ if (ret)
+ goto free;
+
+ wait_for_completion(&comp);
+
+ ret = p->status ? p->status : len;
+free:
+ uci_packet_free(p);
+ return ret;
+}
+
+static __poll_t uci_poll(struct file *filp, struct poll_table_struct *wait)
+{
+ struct miscdevice *uci_dev = filp->private_data;
+ struct qm35_ctx *qm35_ctx =
+ container_of(uci_dev, struct qm35_ctx, uci_dev);
+ __poll_t mask = 0;
+
+ poll_wait(filp, &qm35_ctx->uci_layer.wq, wait);
+
+ if (uci_layer_has_data_available(&qm35_ctx->uci_layer))
+ mask |= EPOLLIN;
+
+ return mask;
+}
+
+static const struct file_operations uci_fops = {
+ .owner = THIS_MODULE,
+ .open = uci_open,
+ .release = uci_release,
+ .unlocked_ioctl = uci_ioctl,
+ .read = uci_read,
+ .write = uci_write,
+ .poll = uci_poll,
+};
+
+static irqreturn_t qm35_irq_handler(int irq, void *qm35_ctx)
+{
+ struct qm35_ctx *qm35_hdl = qm35_ctx;
+
+ spin_lock(&qm35_hdl->lock);
+ qm35_hdl->state = QM35_CTRL_STATE_READY;
+ spin_unlock(&qm35_hdl->lock);
+
+ disable_irq_nosync(irq);
+
+ hsspi_set_output_data_waiting(&qm35_hdl->hsspi);
+
+ return IRQ_HANDLED;
+}
+
+static void reenable_ss_irq(struct hsspi *hsspi)
+{
+ struct qm35_ctx *qm35_hdl = container_of(hsspi, struct qm35_ctx, hsspi);
+
+ enable_irq(qm35_hdl->spi->irq);
+}
+
+static irqreturn_t qm35_ss_rdy_handler(int irq, void *data)
+{
+ struct qm35_ctx *qm35_hdl = data;
+#ifdef CONFIG_QM35_DEBOUNCE_TIME_US
+ static ktime_t old_time;
+ ktime_t current_time;
+
+ current_time = ktime_get();
+
+ if (ktime_after(ktime_add(old_time,
+ CONFIG_QM35_DEBOUNCE_TIME_US * 1000),
+ current_time))
+ return IRQ_HANDLED;
+
+ old_time = current_time;
+#endif
+ /* Should be low already but in case we just woke up QM */
+ if (wake_use_csn)
+ gpiod_set_value(qm35_hdl->gpio_csn, 0);
+ if (wake_use_wakeup)
+ gpiod_set_value(qm35_hdl->gpio_wakeup, 0);
+
+ hsspi_clear_spi_slave_busy(&qm35_hdl->hsspi);
+ hsspi_set_spi_slave_ready(&qm35_hdl->hsspi);
+
+ return IRQ_HANDLED;
+}
+
+static void qm35_wakeup(struct hsspi *hsspi)
+{
+ struct qm35_ctx *qm35_hdl = container_of(hsspi, struct qm35_ctx, hsspi);
+
+ if (wake_use_csn)
+ gpiod_set_value(qm35_hdl->gpio_csn, 1);
+ if (wake_use_wakeup)
+ gpiod_set_value(qm35_hdl->gpio_wakeup, 1);
+
+ /* The wake up will be cleared only when ss-rdy is raised again */
+}
+
+static void qm35_reset_hook(struct hsspi *hsspi)
+{
+ struct qm35_ctx *qm35_hdl = container_of(hsspi, struct qm35_ctx, hsspi);
+
+ if (reset_on_error)
+ qm35_reset(qm35_hdl, QM_RESET_LOW_MS, true);
+ usleep_range(QM_BEFORE_RESET_MS * 1000, QM_BEFORE_RESET_MS * 1000);
+}
+
+static irqreturn_t qm35_exton_handler(int irq, void *data)
+{
+ struct qm35_ctx *qm35_hdl = data;
+
+ hsspi_clear_spi_slave_ready(&qm35_hdl->hsspi);
+
+ if (qm35_hdl->hsspi.waiting_ss_rdy)
+ qm35_wakeup(&qm35_hdl->hsspi);
+
+ return IRQ_HANDLED;
+}
+
+void qm35_hsspi_start(struct qm35_ctx *qm35_hdl)
+{
+ int irq;
+
+ /* nothing to do as HSSPI is already started */
+ if (qm35_hdl->hsspi.state == HSSPI_RUNNING)
+ return;
+
+ irq = gpiod_to_irq(qm35_hdl->gpio_ss_rdy);
+ if (irq >= 0) {
+ enable_irq(irq);
+#ifdef CONFIG_QM35_RISING_IRQ_NOT_TRIGGERED
+ /* Some IRQ controller will trigger a rising edge if
+ * the gpio is high when enabling the IRQ, some will
+ * not (RPI board for example). In the second case we
+ * can miss an event depending on if the level rise
+ * before or after enable_irq. Besides, the handler
+ * can also run *after* hsspi_start, breaking the
+ * hsspi thread with a false information. So let's
+ * sleep and force the SS_READY bit after.
+ */
+
+ if (gpiod_get_value(qm35_hdl->gpio_ss_rdy))
+ hsspi_set_spi_slave_ready(&qm35_hdl->hsspi);
+#endif
+ }
+
+ hsspi_start(&qm35_hdl->hsspi);
+}
+
+void qm35_hsspi_stop(struct qm35_ctx *qm35_hdl)
+{
+ int irq;
+
+ /* nothing to do as HSSPI is already stopped */
+ if (qm35_hdl->hsspi.state == HSSPI_STOPPED)
+ return;
+
+ hsspi_stop(&qm35_hdl->hsspi);
+
+ irq = gpiod_to_irq(qm35_hdl->gpio_ss_rdy);
+ if (irq >= 0) {
+ disable_irq_nosync(irq);
+
+ clear_bit(HSSPI_FLAGS_SS_READY, qm35_hdl->hsspi.flags);
+ }
+}
+
+int qm35_reset_sync(struct qm35_ctx *qm35_hdl)
+{
+ int ret;
+
+ qm35_hsspi_stop(qm35_hdl);
+ ret = qm35_reset(qm35_hdl, QM_RESET_LOW_MS, true);
+ msleep(QM_BOOT_MS);
+ qm35_hsspi_start(qm35_hdl);
+
+ return ret;
+}
+
+static int qm_firmware_flashing(void *handle, struct qmrom_handle *h,
+ bool use_prod_fw)
+{
+ struct qm35_ctx *qm35_hdl = (struct qm35_ctx *)handle;
+ struct spi_device *spi = qm35_hdl->spi;
+ const struct firmware *fw;
+ int ret = 0;
+
+ fw = qmrom_spi_get_firmware(&spi->dev, h, use_prod_fw);
+ if (fw == NULL) {
+ dev_err(&spi->dev, "Firmware file not present!\n");
+ return -1;
+ }
+
+ ret = qmrom_flash_fw(h, fw);
+ dev_dbg(&spi->dev, "Return qmrom_flash_fw = %d!\n", ret);
+
+ qmrom_spi_release_firmware(fw);
+ return ret;
+}
+
+static int qm_firmware_load(struct qm35_ctx *qm35_hdl)
+{
+ struct spi_device *spi = qm35_hdl->spi;
+ unsigned int state = qm35_get_state(qm35_hdl);
+ struct qmrom_handle *h;
+ int ret;
+
+ qm35_set_state(qm35_hdl, QM35_CTRL_STATE_FW_DOWNLOADING);
+
+ qmrom_set_log_device(&spi->dev, LOG_WARN);
+
+ h = qmrom_init(&spi->dev, qm35_hdl, qm35_hdl->gpio_ss_rdy,
+ qmrom_retries, qmrom_spi_reset_device);
+ if (!h) {
+ pr_err("qmrom_init failed\n");
+ ret = -1;
+ goto out;
+ }
+
+ dev_info(&spi->dev, " chip_ver: %x\n", h->chip_rev);
+ dev_info(&spi->dev, " dev_id: deca%04x\n", h->device_version);
+
+ if (h->chip_rev != CHIP_REVISION_A0) {
+ dev_info(&spi->dev, " soc_id: %*phN\n", ROM_SOC_ID_LEN,
+ h->soc_id);
+ dev_info(&spi->dev, " uuid: %*phN\n", ROM_UUID_LEN,
+ h->uuid);
+ dev_info(&spi->dev, " lcs_state: %u\n", h->lcs_state);
+
+ memcpy(&qm_dev_id, &h->device_version, sizeof(qm_dev_id));
+ memcpy(qm_soc_id, h->soc_id, ROM_SOC_ID_LEN);
+
+ debug_soc_info_available(&qm35_hdl->debug);
+ } else {
+ dev_dbg(&spi->dev,
+ "SoC info not supported on chip revision A0\n");
+ }
+
+ dev_dbg(&spi->dev, "Starting device flashing!\n");
+ ret = qm_firmware_flashing(qm35_hdl, h, true);
+ if (ret) {
+ qmrom_reboot_bootloader(h);
+ ret = qm_firmware_flashing(qm35_hdl, h, false);
+ }
+
+ if (ret)
+ dev_err(&spi->dev, "Firmware download failed with %d!\n", ret);
+ else
+ dev_info(&spi->dev, "Device flashing completed!\n");
+
+out:
+ qm35_set_state(qm35_hdl, state);
+
+ return ret;
+}
+
+int qm_get_dev_id(struct qm35_ctx *qm35_hdl, uint16_t *dev_id)
+{
+ memcpy(dev_id, &qm_dev_id, sizeof(qm_dev_id));
+
+ return 0;
+}
+
+int qm_get_soc_id(struct qm35_ctx *qm35_hdl, uint8_t *soc_id)
+{
+ memcpy(soc_id, qm_soc_id, ROM_SOC_ID_LEN);
+
+ return 0;
+}
+
+/**
+ * hsspi_irqs_setup() - setup all irqs needed by HSSPI
+ * @qm35_ctx: pointer to &struct qm35_ctx
+ *
+ * SS_IRQ
+ * ------
+ *
+ * If `ss-irq-gpios` exists in the DTS, it is used. If not, it's using
+ * the `interrupts` definition from the SPI device.
+ *
+ * SS_READY
+ * --------
+ *
+ * The `ss-ready-gpios` is mandatory. It is used by the FW to signal
+ * the driver that it can handle a SPI transaction.
+ *
+ * Return: 0 if no error, -errno otherwise
+ */
+static int hsspi_irqs_setup(struct qm35_ctx *qm35_ctx)
+{
+ int ret;
+ unsigned long ss_irqflags;
+
+ /* Get READY GPIO */
+ qm35_ctx->gpio_ss_rdy =
+ devm_gpiod_get(&qm35_ctx->spi->dev, "ss-ready", GPIOD_IN);
+ if (IS_ERR(qm35_ctx->gpio_ss_rdy))
+ return PTR_ERR(qm35_ctx->gpio_ss_rdy);
+
+ ret = devm_request_irq(&qm35_ctx->spi->dev,
+ gpiod_to_irq(qm35_ctx->gpio_ss_rdy),
+ &qm35_ss_rdy_handler, IRQF_TRIGGER_RISING,
+ "hsspi-ss-rdy", qm35_ctx);
+ if (ret)
+ return ret;
+
+ /* get SS_IRQ GPIO */
+ qm35_ctx->gpio_ss_irq = devm_gpiod_get_optional(&qm35_ctx->spi->dev,
+ "ss-irq", GPIOD_IN);
+
+ if (qm35_ctx->gpio_ss_irq) {
+ if (IS_ERR(qm35_ctx->gpio_ss_irq))
+ return PTR_ERR(qm35_ctx->gpio_ss_irq);
+
+ qm35_ctx->spi->irq = gpiod_to_irq(qm35_ctx->gpio_ss_irq);
+ ss_irqflags = IRQF_TRIGGER_HIGH;
+ } else {
+ ss_irqflags = irq_get_trigger_type(qm35_ctx->spi->irq);
+ }
+
+ if (wake_on_ssirq) {
+ ret = enable_irq_wake(qm35_ctx->spi->irq);
+ if (ret) {
+ return ret;
+ }
+ }
+
+ qm35_ctx->hsspi.odw_cleared = reenable_ss_irq;
+ qm35_ctx->hsspi.wakeup = qm35_wakeup;
+ qm35_ctx->hsspi.reset_qm35 = qm35_reset_hook;
+
+ ret = devm_request_irq(&qm35_ctx->spi->dev, qm35_ctx->spi->irq,
+ &qm35_irq_handler, ss_irqflags, "hsspi-ss-irq",
+ qm35_ctx);
+ if (ret)
+ return ret;
+
+ /* Get exton */
+ qm35_ctx->gpio_exton =
+ devm_gpiod_get_optional(&qm35_ctx->spi->dev, "exton", GPIOD_IN);
+ if (qm35_ctx->gpio_exton) {
+ if (IS_ERR(qm35_ctx->gpio_exton))
+ return PTR_ERR(qm35_ctx->gpio_exton);
+
+ ret = devm_request_irq(&qm35_ctx->spi->dev,
+ gpiod_to_irq(qm35_ctx->gpio_exton),
+ &qm35_exton_handler,
+ IRQF_TRIGGER_FALLING, "hsspi-exton",
+ qm35_ctx);
+ if (ret)
+ return ret;
+
+ if (!gpiod_get_value(qm35_ctx->gpio_exton))
+ hsspi_clear_spi_slave_ready(&qm35_ctx->hsspi);
+ }
+
+ /* Get spi csn */
+ if (wake_use_csn) {
+ qm35_ctx->gpio_csn = devm_gpiod_get(&qm35_ctx->spi->dev, "csn",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(qm35_ctx->gpio_csn))
+ return PTR_ERR(qm35_ctx->gpio_csn);
+ }
+
+ /* Get wakeup */
+ if (wake_use_wakeup) {
+ qm35_ctx->gpio_wakeup = devm_gpiod_get(&qm35_ctx->spi->dev,
+ "wakeup", GPIOD_OUT_LOW);
+ if (IS_ERR(qm35_ctx->gpio_wakeup))
+ return PTR_ERR(qm35_ctx->gpio_wakeup);
+ }
+
+ return 0;
+}
+
+static int qm35_regulator_set_one(struct regulator *reg, bool on)
+{
+ if (!reg)
+ return 0;
+
+ return on ? regulator_enable(reg) : regulator_disable(reg);
+}
+
+static void qm35_regulators_set(struct qm35_ctx *qm35_hdl, bool on)
+{
+ static const char *str_fmt = "failed to %s %s regulator: %d\n";
+ struct device *dev = &qm35_hdl->spi->dev;
+ const char *on_str = on ? "enable" : "disable";
+ bool is_enabled;
+ int ret;
+
+ if (NO_UWB_HAL) {
+ on = true;
+ on_str = "enable";
+ }
+
+ spin_lock(&qm35_hdl->lock);
+
+ is_enabled = qm35_hdl->regulators_enabled;
+ qm35_hdl->regulators_enabled = on;
+
+ spin_unlock(&qm35_hdl->lock);
+
+ /* nothing to do we are already in the desired state */
+ if (is_enabled == on)
+ return;
+
+ ret = qm35_regulator_set_one(qm35_hdl->vdd1, on);
+ if (ret)
+ dev_err(dev, str_fmt, on_str, "vdd1", ret);
+
+ ret = qm35_regulator_set_one(qm35_hdl->vdd2, on);
+ if (ret)
+ dev_err(dev, str_fmt, on_str, "vdd2", ret);
+
+ ret = qm35_regulator_set_one(qm35_hdl->vdd3, on);
+ if (ret)
+ dev_err(dev, str_fmt, on_str, "vdd3", ret);
+
+ ret = qm35_regulator_set_one(qm35_hdl->vdd4, on);
+ if (ret)
+ dev_err(dev, str_fmt, on_str, "vdd4", ret);
+
+ /* wait for regulator stabilization */
+ usleep_range(QM35_REGULATOR_DELAY_US, QM35_REGULATOR_DELAY_US + 100);
+}
+
+static void qm35_regulators_setup_one(struct regulator **reg,
+ struct device *dev, const char *name)
+{
+ static const char *str_fmt =
+ "%s regulator not defined in device tree: %d\n";
+ struct regulator *tmp;
+
+ tmp = devm_regulator_get_optional(dev, name);
+ if (IS_ERR(tmp)) {
+ dev_notice(dev, str_fmt, name, PTR_ERR(tmp));
+ tmp = NULL;
+ }
+ *reg = tmp;
+}
+
+static void qm35_regulators_setup(struct qm35_ctx *qm35_hdl)
+{
+ struct device *dev = &qm35_hdl->spi->dev;
+
+ qm35_regulators_setup_one(&qm35_hdl->vdd1, dev, "qm35-vdd1");
+ qm35_regulators_setup_one(&qm35_hdl->vdd2, dev, "qm35-vdd2");
+ qm35_regulators_setup_one(&qm35_hdl->vdd3, dev, "qm35-vdd3");
+ qm35_regulators_setup_one(&qm35_hdl->vdd4, dev, "qm35-vdd4");
+
+ qm35_hdl->regulators_enabled = false;
+ qm35_hdl->state = QM35_CTRL_STATE_RESET;
+}
+
+static int qm35_probe(struct spi_device *spi)
+{
+ struct qm35_ctx *qm35_ctx;
+ struct miscdevice *uci_misc;
+ int ret = 0;
+
+ if (fwname) {
+ qmrom_set_fwname(fwname);
+ }
+
+ if (spi_speed_hz) {
+ spi->max_speed_hz = spi_speed_hz;
+
+ ret = spi_setup(spi);
+ if (ret) {
+ dev_err(&spi->dev,
+ "spi_setup: requested spi speed=%d ret=%d\n",
+ spi_speed_hz, ret);
+ return ret;
+ }
+ }
+
+ qm35_ctx = devm_kzalloc(&spi->dev, sizeof(*qm35_ctx), GFP_KERNEL);
+ if (!qm35_ctx)
+ return -ENOMEM;
+
+ qm35_ctx->spi = spi;
+ qm35_ctx->log_qm_traces = log_qm_traces;
+ spin_lock_init(&qm35_ctx->lock);
+
+ spi_set_drvdata(spi, qm35_ctx);
+
+ qm35_ctx->gpio_reset =
+ devm_gpiod_get_optional(&spi->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(qm35_ctx->gpio_reset)) {
+ ret = PTR_ERR(qm35_ctx->gpio_reset);
+ return ret;
+ }
+
+ qm35_regulators_setup(qm35_ctx);
+
+ uci_misc = &qm35_ctx->uci_dev;
+ uci_misc->minor = MISC_DYNAMIC_MINOR;
+ uci_misc->name = UCI_DEV_NAME;
+ uci_misc->fops = &uci_fops;
+ uci_misc->parent = &spi->dev;
+
+ /* we need the debugfs root initialized here to be able
+ * to display the soc info populated if flash_on_probe
+ * is set for chips different than A0
+ */
+ ret = debug_init_root(&qm35_ctx->debug, NULL);
+ if (ret) {
+ debug_deinit(&qm35_ctx->debug);
+ goto poweroff;
+ }
+
+ ret = hsspi_init(&qm35_ctx->hsspi, spi);
+ if (ret)
+ goto poweroff;
+
+ ret = uci_layer_init(&qm35_ctx->uci_layer);
+ if (ret)
+ goto hsspi_deinit;
+
+ ret = debug_init(&qm35_ctx->debug);
+ if (ret)
+ goto uci_layer_deinit;
+
+ ret = hsspi_test_init(&qm35_ctx->hsspi);
+ if (ret)
+ goto debug_deinit;
+
+ ret = coredump_layer_init(&qm35_ctx->coredump_layer, &qm35_ctx->debug);
+ if (ret)
+ goto hsspi_test_deinit;
+
+ ret = log_layer_init(&qm35_ctx->log_layer, &qm35_ctx->debug);
+ if (ret)
+ goto coredump_layer_deinit;
+
+ ret = hsspi_register(&qm35_ctx->hsspi,
+ &qm35_ctx->coredump_layer.hlayer);
+ if (ret)
+ goto log_layer_deinit;
+
+ ret = hsspi_register(&qm35_ctx->hsspi, &qm35_ctx->log_layer.hlayer);
+ if (ret)
+ goto coredump_layer_unregister;
+
+ msleep(QM_BOOT_MS);
+
+ ret = hsspi_irqs_setup(qm35_ctx);
+ if (ret)
+ goto log_layer_unregister;
+
+ if (flash_on_probe) {
+ qm35_regulators_set(qm35_ctx, true);
+ ret = qm_firmware_load(qm35_ctx);
+ qm35_regulators_set(qm35_ctx, false);
+ if (ret)
+ goto log_layer_unregister;
+ }
+
+ hsspi_set_gpios(&qm35_ctx->hsspi, qm35_ctx->gpio_ss_rdy,
+ qm35_ctx->gpio_exton);
+
+ if (!NO_UWB_HAL) {
+ /* If regulators not available, QM is powered on */
+ if (!REGULATORS_ENABLED(qm35_ctx))
+ hsspi_start(&qm35_ctx->hsspi);
+ } else {
+ qm35_regulators_set(qm35_ctx, true);
+ usleep_range(100000, 100000);
+ hsspi_start(&qm35_ctx->hsspi);
+ }
+
+ ret = misc_register(&qm35_ctx->uci_dev);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to register uci device\n");
+ goto log_layer_unregister;
+ }
+
+ dev_info(&spi->dev, "Registered: [%s] misc device\n", uci_misc->name);
+
+ dev_info(&spi->dev, "QM35 spi driver version " DRV_VERSION " probed\n");
+ return 0;
+
+log_layer_unregister:
+ hsspi_unregister(&qm35_ctx->hsspi, &qm35_ctx->log_layer.hlayer);
+coredump_layer_unregister:
+ hsspi_unregister(&qm35_ctx->hsspi, &qm35_ctx->coredump_layer.hlayer);
+log_layer_deinit:
+ log_layer_deinit(&qm35_ctx->log_layer);
+coredump_layer_deinit:
+ coredump_layer_deinit(&qm35_ctx->coredump_layer);
+hsspi_test_deinit:
+ hsspi_test_deinit(&qm35_ctx->hsspi);
+debug_deinit:
+ debug_deinit(&qm35_ctx->debug);
+uci_layer_deinit:
+ uci_layer_deinit(&qm35_ctx->uci_layer);
+hsspi_deinit:
+ hsspi_deinit(&qm35_ctx->hsspi);
+poweroff:
+ qm35_regulators_set(qm35_ctx, false);
+ return ret;
+}
+
+static int qm35_remove(struct spi_device *spi)
+{
+ struct qm35_ctx *qm35_hdl = spi_get_drvdata(spi);
+
+ misc_deregister(&qm35_hdl->uci_dev);
+
+ hsspi_stop(&qm35_hdl->hsspi);
+
+ hsspi_unregister(&qm35_hdl->hsspi, &qm35_hdl->log_layer.hlayer);
+ hsspi_unregister(&qm35_hdl->hsspi, &qm35_hdl->coredump_layer.hlayer);
+
+ log_layer_deinit(&qm35_hdl->log_layer);
+ coredump_layer_deinit(&qm35_hdl->coredump_layer);
+ hsspi_test_deinit(&qm35_hdl->hsspi);
+ debug_deinit(&qm35_hdl->debug);
+ uci_layer_deinit(&qm35_hdl->uci_layer);
+
+ hsspi_deinit(&qm35_hdl->hsspi);
+
+ qm35_regulators_set(qm35_hdl, false);
+
+ dev_info(&spi->dev, "Deregistered: [%s] misc device\n",
+ qm35_hdl->uci_dev.name);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int qm35_pm_suspend(struct device *dev)
+{
+ struct qm35_ctx *qm35_hdl = dev_get_drvdata(dev);
+
+ qm35_hsspi_stop(qm35_hdl);
+
+ return 0;
+}
+
+static int qm35_pm_resume(struct device *dev)
+{
+ struct qm35_ctx *qm35_hdl = dev_get_drvdata(dev);
+
+ qm35_hsspi_start(qm35_hdl);
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(qm35_spi_ops, qm35_pm_suspend, qm35_pm_resume);
+#define pm_sleep_ptr(_ptr) (IS_ENABLED(CONFIG_PM_SLEEP) ? (_ptr) : NULL)
+
+static struct spi_driver qm35_spi_driver = {
+ .driver = {
+ .name = "qm35",
+ .of_match_table = of_match_ptr(qm35_dt_ids),
+ .pm = pm_sleep_ptr(&qm35_spi_ops),
+ },
+ .probe = qm35_probe,
+ .remove = qm35_remove,
+};
+module_spi_driver(qm35_spi_driver);
+
+MODULE_AUTHOR("Qorvo US, Inc.");
+MODULE_DESCRIPTION("QM35 SPI device interface");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
diff --git a/qm35-trace.c b/qm35-trace.c
new file mode 100644
index 0000000..9bbc0f3
--- /dev/null
+++ b/qm35-trace.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * This file is part of the QM35 UCI stack for linux.
+ *
+ * Copyright (c) 2021 Qorvo US, Inc.
+ *
+ * This software is provided under the GNU General Public License, version 2
+ * (GPLv2), as well as under a Qorvo commercial license.
+ *
+ * You may choose to use this software under the terms of the GPLv2 License,
+ * version 2 ("GPLv2"), as published by the Free Software Foundation.
+ * You should have received a copy of the GPLv2 along with this program. If
+ * not, see <http://www.gnu.org/licenses/>.
+ *
+ * This program is distributed under the GPLv2 in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GPLv2 for more
+ * details.
+ *
+ * If you cannot meet the requirements of the GPLv2, you may not use this
+ * software for any purpose without first obtaining a commercial license from
+ * Qorvo.
+ * Please contact Qorvo to inquire about licensing terms.
+ *
+ * QM35 trace
+ */
+
+#include <linux/module.h>
+
+#define CREATE_TRACE_POINTS
+#include "qm35-trace.h"
diff --git a/qm35-trace.h b/qm35-trace.h
new file mode 100644
index 0000000..7a54b5a
--- /dev/null
+++ b/qm35-trace.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * This file is part of the QM35 UCI stack for linux.
+ *
+ * Copyright (c) 2021 Qorvo US, Inc.
+ *
+ * This software is provided under the GNU General Public License, version 2
+ * (GPLv2), as well as under a Qorvo commercial license.
+ *
+ * You may choose to use this software under the terms of the GPLv2 License,
+ * version 2 ("GPLv2"), as published by the Free Software Foundation.
+ * You should have received a copy of the GPLv2 along with this program. If
+ * not, see <http://www.gnu.org/licenses/>.
+ *
+ * This program is distributed under the GPLv2 in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GPLv2 for more
+ * details.
+ *
+ * If you cannot meet the requirements of the GPLv2, you may not use this
+ * software for any purpose without first obtaining a commercial license from
+ * Qorvo.
+ * Please contact Qorvo to inquire about licensing terms.
+ *
+ * QM35 trace
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM qm35
+
+#if !defined(_QM35_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _QM35_TRACE_H
+
+#include <linux/tracepoint.h>
+
+#include "hsspi.h"
+
+#define show_work_type(type) \
+ __print_symbolic(type, \
+ { \
+ HSSPI_WORK_TX, \
+ "TX", \
+ }, \
+ { \
+ HSSPI_WORK_COMPLETION, \
+ "COMPLETION", \
+ }, \
+ { \
+ -1, \
+ "no", \
+ })
+
+TRACE_DEFINE_ENUM(HSSPI_WORK_TX);
+TRACE_DEFINE_ENUM(HSSPI_WORK_COMPLETION);
+
+TRACE_EVENT(hsspi_get_work, TP_PROTO(const struct device *dev, int type),
+ TP_ARGS(dev, type),
+ TP_STRUCT__entry(__string(dev, dev_name(dev)) __field(int, type)),
+ TP_fast_assign(__assign_str(dev, dev_name(dev));
+ __entry->type = type;),
+ TP_printk("[%s]: %s work", __get_str(dev),
+ show_work_type(__entry->type)));
+
+#define show_hsspi_state(state) \
+ __print_symbolic(state, \
+ { \
+ HSSPI_RUNNING, \
+ "running", \
+ }, \
+ { \
+ HSSPI_ERROR, \
+ "error", \
+ }, \
+ { \
+ HSSPI_STOPPED, \
+ "stopped", \
+ })
+
+TRACE_DEFINE_ENUM(HSSPI_RUNNING);
+TRACE_DEFINE_ENUM(HSSPI_ERROR);
+TRACE_DEFINE_ENUM(HSSPI_STOPPED);
+
+TRACE_EVENT(hsspi_is_txrx_waiting,
+ TP_PROTO(const struct device *dev, bool is_empty,
+ enum hsspi_state state),
+ TP_ARGS(dev, is_empty, state),
+ TP_STRUCT__entry(__string(dev, dev_name(dev))
+ __field(bool, is_empty)
+ __field(enum hsspi_state, state)),
+ TP_fast_assign(__assign_str(dev, dev_name(dev));
+ __entry->is_empty = is_empty;
+ __entry->state = state;),
+ TP_printk("[%s]: is_empty: %d state: %s", __get_str(dev),
+ __entry->is_empty, show_hsspi_state(__entry->state)));
+
+#define STC_ENTRY(header) \
+ __field(u8, header##flags) __field(u8, header##ul) \
+ __field(u16, header##length)
+#define STC_ASSIGN(header, var) \
+ __entry->header##flags = var->flags; \
+ __entry->header##ul = var->ul; \
+ __entry->header##length = var->length;
+#define STC_FMT "flags:0x%hhx ul:%hhd len:%hd"
+#define STC_ARG(header) \
+ __entry->header##flags, __entry->header##ul, __entry->header##length
+
+TRACE_EVENT(hsspi_spi_xfer,
+ TP_PROTO(const struct device *dev, const struct stc_header *host,
+ struct stc_header *soc, int ret),
+ TP_ARGS(dev, host, soc, ret),
+ TP_STRUCT__entry(__string(dev, dev_name(dev)) STC_ENTRY(host)
+ STC_ENTRY(soc) __field(int, ret)),
+ TP_fast_assign(__assign_str(dev, dev_name(dev));
+ STC_ASSIGN(host, host); STC_ASSIGN(soc, soc);
+ __entry->ret = ret;),
+ TP_printk("[%s]: host " STC_FMT " | soc " STC_FMT " rc=%d",
+ __get_str(dev), STC_ARG(host), STC_ARG(soc),
+ __entry->ret));
+
+#endif /* _QM35_TRACE_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+// clang-format off
+#define TRACE_INCLUDE_FILE qm35-trace
+// clang-format on
+#include <trace/define_trace.h>
diff --git a/qm35.h b/qm35.h
new file mode 100644
index 0000000..59cf825
--- /dev/null
+++ b/qm35.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __QM35_H___
+#define __QM35_H___
+
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/spi/spi.h>
+#include <linux/spinlock.h>
+#include <linux/miscdevice.h>
+
+#include "uci_ioctls.h"
+#include "hsspi.h"
+#include "hsspi_uci.h"
+#include "hsspi_coredump.h"
+#include "hsspi_log.h"
+#include "debug.h"
+
+#define DEBUG_CERTIFICATE_SIZE 2560
+#define QM_RESET_LOW_MS 2
+/*
+ * value found using a SALAE
+ */
+#define QM_BOOT_MS 450
+#define QM_BEFORE_RESET_MS 450
+
+#define DRV_VERSION "6.3.8-rc1"
+
+struct regulator;
+
+/**
+ * struct qm35_ctx - QM35 driver context
+ *
+ */
+struct qm35_ctx {
+ unsigned int state;
+ struct miscdevice uci_dev;
+ struct spi_device *spi;
+ struct gpio_desc *gpio_csn;
+ struct gpio_desc *gpio_reset;
+ struct gpio_desc *gpio_ss_rdy;
+ struct gpio_desc *gpio_ss_irq;
+ struct gpio_desc *gpio_exton;
+ struct gpio_desc *gpio_wakeup;
+ spinlock_t lock;
+ bool out_data_wait;
+ bool out_active;
+ bool soc_error;
+ struct hsspi hsspi;
+ struct uci_layer uci_layer;
+ struct coredump_layer coredump_layer;
+ struct log_layer log_layer;
+ struct debug debug;
+ struct regulator *vdd1;
+ struct regulator *vdd2;
+ struct regulator *vdd3;
+ struct regulator *vdd4;
+ bool regulators_enabled;
+ bool log_qm_traces;
+};
+
+static inline unsigned int qm35_get_state(struct qm35_ctx *qm35_hdl)
+{
+ return qm35_hdl->state;
+}
+
+static inline void qm35_set_state(struct qm35_ctx *qm35_hdl, int state)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&qm35_hdl->lock, flags);
+ qm35_hdl->state = state;
+ spin_unlock_irqrestore(&qm35_hdl->lock, flags);
+}
+
+static inline int qm35_reset(struct qm35_ctx *qm35_hdl, int timeout_ms,
+ bool run)
+{
+ if (qm35_hdl->gpio_reset) {
+ qm35_set_state(qm35_hdl, QM35_CTRL_STATE_RESET);
+ gpiod_set_value(qm35_hdl->gpio_reset, 1);
+ if (!run)
+ return 0;
+ usleep_range(timeout_ms * 1000, timeout_ms * 1000);
+ gpiod_set_value(qm35_hdl->gpio_reset, 0);
+ qm35_set_state(qm35_hdl, QM35_CTRL_STATE_UNKNOWN);
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+int qm35_reset_sync(struct qm35_ctx *qm35_hdl);
+
+int qm_get_dev_id(struct qm35_ctx *qm35_hdl, uint16_t *dev_id);
+int qm_get_soc_id(struct qm35_ctx *qm35_hdl, uint8_t *soc_id);
+
+void qm35_hsspi_start(struct qm35_ctx *qm35_hdl);
+void qm35_hsspi_stop(struct qm35_ctx *qm35_hdl);
+
+void qmrom_set_fwname(const char *name);
+
+#endif /* __QM35_H___ */
diff --git a/qm35_rb.c b/qm35_rb.c
new file mode 100644
index 0000000..cc7becf
--- /dev/null
+++ b/qm35_rb.c
@@ -0,0 +1,230 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * This file is part of the QM35 UCI stack for linux.
+ *
+ * Copyright (c) 2022 Qorvo US, Inc.
+ *
+ * This software is provided under the GNU General Public License, version 2
+ * (GPLv2), as well as under a Qorvo commercial license.
+ *
+ * You may choose to use this software under the terms of the GPLv2 License,
+ * version 2 ("GPLv2"), as published by the Free Software Foundation.
+ * You should have received a copy of the GPLv2 along with this program. If
+ * not, see <http://www.gnu.org/licenses/>.
+ *
+ * This program is distributed under the GPLv2 in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GPLv2 for more
+ * details.
+ *
+ * If you cannot meet the requirements of the GPLv2, you may not use this
+ * software for any purpose without first obtaining a commercial license from
+ * Qorvo.
+ * Please contact Qorvo to inquire about licensing terms.
+ *
+ * QM35 Ringbuffer
+ */
+
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+
+#include "qm35_rb.h"
+
+static bool __rb_can_pop(struct rb *rb, uint32_t *tail)
+{
+ // if tail equals with the head index, no data can be popped
+ return *tail != rb->head;
+}
+
+bool rb_can_pop(struct rb *rb)
+{
+ bool can_pop = false;
+
+ mutex_lock(&rb->lock);
+ can_pop = __rb_can_pop(rb, &rb->rdtail);
+ mutex_unlock(&rb->lock);
+
+ return can_pop;
+}
+
+static rb_entry_size_t __rb_next_size(struct rb *rb, uint32_t *tail)
+{
+ rb_entry_size_t next_packet_size = 0;
+
+ // check if something is available to pop from the ring buffer
+ if (!__rb_can_pop(rb, tail))
+ return 0;
+
+ // read next packet size
+ memcpy(&next_packet_size, rb->buf + *tail, sizeof(next_packet_size));
+
+ if (next_packet_size == 0) {
+ if (*tail == 0)
+ return 0;
+ // if the next packet size is 0x00, it is an
+ // indication that we hit the end marker so we should
+ // start reading from the beginning
+ *tail = 0;
+ memcpy(&next_packet_size, rb->buf + *tail,
+ sizeof(next_packet_size));
+ }
+
+ return next_packet_size;
+}
+
+rb_entry_size_t rb_next_size(struct rb *rb)
+{
+ rb_entry_size_t next_packet_size = 0;
+
+ mutex_lock(&rb->lock);
+ next_packet_size = __rb_next_size(rb, &rb->rdtail);
+ mutex_unlock(&rb->lock);
+
+ return next_packet_size;
+}
+
+static char *__rb_pop(struct rb *rb, rb_entry_size_t *len, uint32_t *tail)
+{
+ char *trace;
+
+ *len = __rb_next_size(rb, tail);
+
+ if (*len == 0)
+ return 0;
+
+ // advance ptr with sizeof(next_entry_size)
+ *tail += sizeof(*len);
+
+ // allocate memory for data
+ trace = kmalloc(*len, GFP_KERNEL);
+
+ // get the data
+ memcpy(trace, rb->buf + *tail, *len);
+ *tail += *len;
+
+ return trace;
+}
+
+char *rb_pop(struct rb *rb, rb_entry_size_t *len)
+{
+ char *entry;
+
+ mutex_lock(&rb->lock);
+ entry = __rb_pop(rb, len, &rb->rdtail);
+ mutex_unlock(&rb->lock);
+
+ return entry;
+}
+
+static bool __rb_skip(struct rb *rb)
+{
+ rb_entry_size_t next_entry_size;
+
+ // read next packet size
+ memcpy(&next_entry_size, rb->buf + rb->tail, sizeof(next_entry_size));
+
+ if (next_entry_size == 0) {
+ if (rb->tail == 0)
+ return false;
+ // if the next packet size is 0x00, it is an
+ // indication that we hit the end marker so we should
+ // start reading from the beginning
+ rb->tail = 0;
+ memcpy(&next_entry_size, rb->buf + rb->tail,
+ sizeof(next_entry_size));
+ }
+
+ // advance ptr with sizeof(next_entry_size)
+ rb->tail += sizeof(next_entry_size) + next_entry_size;
+
+ return true;
+}
+
+int rb_push(struct rb *rb, const char *data, rb_entry_size_t len)
+{
+ rb_entry_size_t entry_size;
+ uint32_t available_to_end;
+
+ // doesn't make sense to push a packet with the payload len 0.
+ if (len == 0)
+ return 1;
+
+ mutex_lock(&rb->lock);
+
+ // calculate how much data we want to store
+ // we add the size of the trace with the size of the size of the trace
+ // because we want to store the size as well for reading
+ entry_size = sizeof(len) + len;
+
+ // available data to the end of the ring buffer
+ available_to_end = rb->size - rb->head;
+
+ // we check that by writing the buffer still has 2 bytes left
+ // to write the buffer end marker 0x0000 if not, we just add
+ // the marker and start from beginning
+ if (available_to_end <= entry_size + sizeof(rb_entry_size_t)) {
+ memset(rb->buf + rb->head, 0, sizeof(rb_entry_size_t));
+ if (rb->tail >= rb->head)
+ rb->tail = 0;
+ if (rb->rdtail >= rb->head)
+ rb->rdtail = 0;
+ rb->head = 0;
+ }
+
+ // check if writing the new data will cause the head index to
+ // overrun the tail index
+ while ((rb->head <= rb->tail) && (rb->head + entry_size >= rb->tail)) {
+ bool equal_tails = rb->tail == rb->rdtail;
+ // need to adjust the tail to point to the next log
+ // not overwritten
+ bool skipped = __rb_skip(rb);
+
+ if (equal_tails)
+ rb->rdtail = rb->tail;
+
+ if (!skipped)
+ break;
+ }
+
+ // copy the size first
+ memcpy(rb->buf + rb->head, &len, sizeof(len));
+ // move the head by sizeof(trace->size) to be in place for copying the data
+ rb->head += sizeof(len);
+ // copy the data
+ memcpy(rb->buf + rb->head, data, len);
+ // update the head ptr to the next write
+ rb->head += len;
+
+ mutex_unlock(&rb->lock);
+
+ return 0;
+}
+
+void rb_reset(struct rb *rb)
+{
+ mutex_lock(&rb->lock);
+ rb->rdtail = rb->tail;
+ mutex_unlock(&rb->lock);
+}
+
+int rb_init(struct rb *rb, uint32_t size)
+{
+ rb->buf = kmalloc(size, GFP_KERNEL);
+ if (!rb->buf)
+ return -ENOMEM;
+
+ mutex_init(&rb->lock);
+ rb->head = 0;
+ rb->tail = 0;
+ rb->rdtail = 0;
+ rb->size = size;
+
+ return 0;
+}
+
+void rb_deinit(struct rb *rb)
+{
+ kfree(rb->buf);
+}
diff --git a/qm35_rb.h b/qm35_rb.h
new file mode 100644
index 0000000..46d3478
--- /dev/null
+++ b/qm35_rb.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * This file is part of the QM35 UCI stack for linux.
+ *
+ * Copyright (c) 2022 Qorvo US, Inc.
+ *
+ * This software is provided under the GNU General Public License, version 2
+ * (GPLv2), as well as under a Qorvo commercial license.
+ *
+ * You may choose to use this software under the terms of the GPLv2 License,
+ * version 2 ("GPLv2"), as published by the Free Software Foundation.
+ * You should have received a copy of the GPLv2 along with this program. If
+ * not, see <http://www.gnu.org/licenses/>.
+ *
+ * This program is distributed under the GPLv2 in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GPLv2 for more
+ * details.
+ *
+ * If you cannot meet the requirements of the GPLv2, you may not use this
+ * software for any purpose without first obtaining a commercial license from
+ * Qorvo.
+ * Please contact Qorvo to inquire about licensing terms.
+ *
+ * QM35 Ringbuffer
+ */
+
+#ifndef __QM35_RB_H__
+#define __QM35_RB_H__
+
+#include <linux/types.h>
+
+typedef uint16_t rb_entry_size_t;
+
+struct rb {
+ uint8_t *buf;
+ uint32_t size;
+ uint32_t head;
+ uint32_t tail;
+ uint32_t rdtail;
+ struct mutex lock;
+};
+
+bool rb_can_pop(struct rb *rb);
+rb_entry_size_t rb_next_size(struct rb *rb);
+
+char *rb_pop(struct rb *rb, rb_entry_size_t *len);
+int rb_push(struct rb *rb, const char *data, rb_entry_size_t len);
+void rb_reset(struct rb *rb);
+
+int rb_init(struct rb *rb, uint32_t size);
+void rb_deinit(struct rb *rb);
+
+#endif // __QM35_RB_H__
diff --git a/qmrom_spi.c b/qmrom_spi.c
new file mode 100644
index 0000000..0592d2f
--- /dev/null
+++ b/qmrom_spi.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * This file is part of the QM35 UCI stack for linux.
+ *
+ * Copyright (c) 2021 Qorvo US, Inc.
+ *
+ * This software is provided under the GNU General Public License, version 2
+ * (GPLv2), as well as under a Qorvo commercial license.
+ *
+ * You may choose to use this software under the terms of the GPLv2 License,
+ * version 2 ("GPLv2"), as published by the Free Software Foundation.
+ * You should have received a copy of the GPLv2 along with this program. If
+ * not, see <http://www.gnu.org/licenses/>.
+ *
+ * This program is distributed under the GPLv2 in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GPLv2 for more
+ * details.
+ *
+ * If you cannot meet the requirements of the GPLv2, you may not use this
+ * software for any purpose without first obtaining a commercial license from
+ * Qorvo.
+ * Please contact Qorvo to inquire about licensing terms.
+ *
+ * QM35 FW ROM protocol SPI ops
+ */
+
+#include <linux/spi/spi.h>
+
+#include <qmrom_spi.h>
+#include <spi_rom_protocol.h>
+
+#include "qm35.h"
+
+static const char *fwname = NULL;
+static unsigned int speed_hz;
+extern int trace_spi_xfers;
+
+void qmrom_set_fwname(const char *name)
+{
+ fwname = name;
+}
+
+int qmrom_spi_transfer(void *handle, char *rbuf, const char *wbuf, size_t size)
+{
+ struct spi_device *spi = (struct spi_device *)handle;
+ int rc;
+
+ struct spi_transfer xfer[] = {
+ {
+ .tx_buf = wbuf,
+ .rx_buf = rbuf,
+ .len = size,
+ .speed_hz = qmrom_spi_get_freq(),
+ },
+ };
+
+ rc = spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer));
+
+ if (trace_spi_xfers) {
+ print_hex_dump(KERN_DEBUG, "tx:", DUMP_PREFIX_NONE, 16, 1, wbuf,
+ size, false);
+ print_hex_dump(KERN_DEBUG, "rx:", DUMP_PREFIX_NONE, 16, 1, rbuf,
+ size, false);
+ }
+
+ return rc;
+}
+
+int qmrom_spi_set_cs_level(void *handle, int level)
+{
+ struct spi_device *spi = (struct spi_device *)handle;
+ uint8_t dummy = 0;
+
+ struct spi_transfer xfer[] = {
+ {
+ .tx_buf = &dummy,
+ .len = 1,
+ .cs_change = !level,
+ .speed_hz = qmrom_spi_get_freq(),
+ },
+ };
+
+ return spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer));
+}
+
+int qmrom_spi_reset_device(void *reset_handle)
+{
+ struct qm35_ctx *qm35_hdl = (struct qm35_ctx *)reset_handle;
+
+ return qm35_reset(qm35_hdl, SPI_RST_LOW_DELAY_MS, true);
+}
+
+const struct firmware *qmrom_spi_get_firmware(void *handle,
+ struct qmrom_handle *qmrom_h,
+ bool use_prod_fw)
+{
+ const struct firmware *fw;
+ struct spi_device *spi = handle;
+ char _fw_name[16]; /* enough room to store "qm35_xx_xxx.bin" */
+ const char *fw_name = _fw_name;
+ int ret;
+ enum chip_revision_e revision = qmrom_h->chip_rev;
+ int lcs_state = qmrom_h->lcs_state;
+
+ if (!fwname) {
+ if (revision == CHIP_REVISION_A0)
+ fw_name = "qm35_a0.bin";
+ else if (lcs_state == CC_BSV_SECURE_LCS) {
+ if (use_prod_fw)
+ fw_name = "qm35_b0_oem_prod.bin";
+ else
+ fw_name = "qm35_b0_oem.bin";
+ } else
+ fw_name = "qm35_b0_icv.bin";
+ } else {
+ fw_name = fwname;
+ }
+ dev_info(&spi->dev, "Requesting fw %s!\n", fw_name);
+
+ ret = request_firmware(&fw, fw_name, &spi->dev);
+ if (ret) {
+ release_firmware(fw);
+ dev_err(&spi->dev,
+ "request_firmware failed (ret=%d) for '%s'\n", ret,
+ fw_name);
+ return NULL;
+ }
+
+ dev_info(&spi->dev, "Firmware size is %zu!\n", fw->size);
+
+ return fw;
+}
+
+void qmrom_spi_release_firmware(const struct firmware *fw)
+{
+ release_firmware(fw);
+}
+
+int qmrom_spi_wait_for_ready_line(void *handle, unsigned int timeout_ms)
+{
+ int count_down = (int)timeout_ms;
+ while (!gpiod_get_value(handle) && (--count_down >= 0)) {
+ usleep_range(1000, 1100);
+ }
+ return gpiod_get_value(handle) ? 0 : -1;
+}
+
+void qmrom_spi_set_freq(unsigned int freq)
+{
+ speed_hz = freq;
+}
+
+unsigned int qmrom_spi_get_freq()
+{
+ return speed_hz;
+}
diff --git a/uci_ioctls.h b/uci_ioctls.h
new file mode 100644
index 0000000..f785c5e
--- /dev/null
+++ b/uci_ioctls.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __UCI_IOCTLS_H___
+#define __UCI_IOCTLS_H___
+
+#include <asm/ioctl.h>
+
+#define UCI_DEV_NAME "uci"
+#define UCI_IOC_TYPE 'U'
+
+#define QM35_CTRL_RESET _IOR(UCI_IOC_TYPE, 1, unsigned int)
+#define QM35_CTRL_GET_STATE _IOR(UCI_IOC_TYPE, 2, unsigned int)
+#define QM35_CTRL_FW_UPLOAD _IOR(UCI_IOC_TYPE, 3, unsigned int)
+#define QM35_CTRL_POWER _IOW(UCI_IOC_TYPE, 4, unsigned int)
+
+/* qm35 states */
+enum { QM35_CTRL_STATE_UNKNOWN = 0x0000,
+ QM35_CTRL_STATE_OFF = 0x0001,
+ QM35_CTRL_STATE_RESET = 0x0002,
+ QM35_CTRL_STATE_COREDUMP = 0x0004,
+ QM35_CTRL_STATE_READY = 0x0008,
+ QM35_CTRL_STATE_FW_DOWNLOADING = 0x0010,
+ QM35_CTRL_STATE_UCI_APP = 0x0020,
+};
+
+#endif /* __UCI_IOCTLS_H___ */