summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNishant Prajapati <nishantpjpt@google.com>2021-06-03 16:07:23 +0530
committerNishant Prajapati <nishantpjpt@google.com>2021-06-03 16:07:23 +0530
commit0813d209999161386ea184c6cb8ecede85160406 (patch)
treeb111d56abf5ef7a679bd2cc5aff53a8e45b6a016
parentdd8b3e9b13c82b85656aeba1eb42c378adbc4070 (diff)
downloadjaneiro-0813d209999161386ea184c6cb8ecede85160406.tar.gz
Merge branch 'pro' into android-gs-cloudripper-5.10
* origin/darwinn-2.0: (82 commits) edgetpu: janeiro: parse fw header to create IOMMU mapping edgetpu: dmabuf map add debug logs for diagnosing errors edgetpu: make various usage metrics group writeable edgetpu: hermosa remove two obsolete TODOs kokoro: checkpatch ignore constant comparison edgetpu: unittest: remove obsolete bug edgetpu: set cacheability according to device edgetpu: add offset back to dmabuf mapping edgetpu: refactor error handling edgetpu: reset thread stats on write edgetpu: abrolhos remove parsing csr-iova nodes edgetpu: remove mapping CSRs on mmu setup edgetpu: add corrupted log queue msg back edgetpu: deprecate offset in edgetpu_map_dmabuf_ioctl edgetpu: fix use-after-free Read in edgetpu_chip_acquire_ext_mailbox edgetpu: map dmabuf ignore size argument edgetpu: hermosa fix UAF in edgetpu_device_dram_getfd edgetpu: abrolhos fix thermal notify null deref edgetpu: fix UAF write on edgetpu_sync_fence_create edgetpu: add firmware_is_loading utility Signed-off-by: Nishant Prajapati <nishantpjpt@google.com> Change-Id: I815985598685d1f0860dda0bf02cad4dc6d1031e
-rw-r--r--.clang-format552
-rw-r--r--.gitignore1
-rw-r--r--drivers/edgetpu/edgetpu-core.c36
-rw-r--r--drivers/edgetpu/edgetpu-device-group.c215
-rw-r--r--drivers/edgetpu/edgetpu-device-group.h51
-rw-r--r--drivers/edgetpu/edgetpu-dmabuf.c215
-rw-r--r--drivers/edgetpu/edgetpu-firmware.c347
-rw-r--r--drivers/edgetpu/edgetpu-firmware.h18
-rw-r--r--drivers/edgetpu/edgetpu-fs.c71
-rw-r--r--drivers/edgetpu/edgetpu-google-iommu.c48
-rw-r--r--drivers/edgetpu/edgetpu-internal.h38
-rw-r--r--drivers/edgetpu/edgetpu-kci.c88
-rw-r--r--drivers/edgetpu/edgetpu-kci.h34
-rw-r--r--drivers/edgetpu/edgetpu-mailbox.c158
-rw-r--r--drivers/edgetpu/edgetpu-mailbox.h64
-rw-r--r--drivers/edgetpu/edgetpu-mapping.h16
-rw-r--r--drivers/edgetpu/edgetpu-mmu.h1
-rw-r--r--drivers/edgetpu/edgetpu-pm.c58
-rw-r--r--drivers/edgetpu/edgetpu-pm.h18
-rw-r--r--drivers/edgetpu/edgetpu-telemetry.c2
-rw-r--r--drivers/edgetpu/edgetpu-thermal.h6
-rw-r--r--drivers/edgetpu/edgetpu-usage-stats.c142
-rw-r--r--drivers/edgetpu/edgetpu-wakelock.c34
-rw-r--r--drivers/edgetpu/edgetpu-wakelock.h17
-rw-r--r--drivers/edgetpu/edgetpu.h129
-rw-r--r--drivers/edgetpu/janeiro-device.c19
-rw-r--r--drivers/edgetpu/janeiro-firmware.c107
-rw-r--r--drivers/edgetpu/janeiro-platform.c7
-rw-r--r--drivers/edgetpu/janeiro/config.h2
-rw-r--r--drivers/edgetpu/mm-backport.h4
30 files changed, 1987 insertions, 511 deletions
diff --git a/.clang-format b/.clang-format
new file mode 100644
index 0000000..3212542
--- /dev/null
+++ b/.clang-format
@@ -0,0 +1,552 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# clang-format configuration file. Intended for clang-format >= 4.
+#
+# For more information, see:
+#
+# Documentation/process/clang-format.rst
+# https://clang.llvm.org/docs/ClangFormat.html
+# https://clang.llvm.org/docs/ClangFormatStyleOptions.html
+#
+---
+AccessModifierOffset: -4
+AlignAfterOpenBracket: Align
+AlignConsecutiveAssignments: false
+AlignConsecutiveDeclarations: false
+#AlignEscapedNewlines: Left # Unknown to clang-format-4.0
+AlignOperands: true
+AlignTrailingComments: false
+AllowAllParametersOfDeclarationOnNextLine: false
+AllowShortBlocksOnASingleLine: false
+AllowShortCaseLabelsOnASingleLine: false
+AllowShortFunctionsOnASingleLine: None
+AllowShortIfStatementsOnASingleLine: false
+AllowShortLoopsOnASingleLine: false
+AlwaysBreakAfterDefinitionReturnType: None
+AlwaysBreakAfterReturnType: None
+AlwaysBreakBeforeMultilineStrings: false
+AlwaysBreakTemplateDeclarations: false
+BinPackArguments: true
+BinPackParameters: true
+BraceWrapping:
+ AfterClass: false
+ AfterControlStatement: false
+ AfterEnum: false
+ AfterFunction: true
+ AfterNamespace: true
+ AfterObjCDeclaration: false
+ AfterStruct: false
+ AfterUnion: false
+ #AfterExternBlock: false # Unknown to clang-format-5.0
+ BeforeCatch: false
+ BeforeElse: false
+ IndentBraces: false
+ #SplitEmptyFunction: true # Unknown to clang-format-4.0
+ #SplitEmptyRecord: true # Unknown to clang-format-4.0
+ #SplitEmptyNamespace: true # Unknown to clang-format-4.0
+BreakBeforeBinaryOperators: None
+BreakBeforeBraces: Custom
+#BreakBeforeInheritanceComma: false # Unknown to clang-format-4.0
+BreakBeforeTernaryOperators: false
+BreakConstructorInitializersBeforeComma: false
+#BreakConstructorInitializers: BeforeComma # Unknown to clang-format-4.0
+BreakAfterJavaFieldAnnotations: false
+BreakStringLiterals: false
+ColumnLimit: 100
+CommentPragmas: '^ IWYU pragma:'
+#CompactNamespaces: false # Unknown to clang-format-4.0
+ConstructorInitializerAllOnOneLineOrOnePerLine: false
+ConstructorInitializerIndentWidth: 8
+ContinuationIndentWidth: 8
+Cpp11BracedListStyle: false
+DerivePointerAlignment: false
+DisableFormat: false
+ExperimentalAutoDetectBinPacking: false
+#FixNamespaceComments: false # Unknown to clang-format-4.0
+
+# Taken from:
+# git grep -h '^#define [^[:space:]]*for_each[^[:space:]]*(' include/ \
+# | sed "s,^#define \([^[:space:]]*for_each[^[:space:]]*\)(.*$, - '\1'," \
+# | sort | uniq
+ForEachMacros:
+ - 'apei_estatus_for_each_section'
+ - 'ata_for_each_dev'
+ - 'ata_for_each_link'
+ - '__ata_qc_for_each'
+ - 'ata_qc_for_each'
+ - 'ata_qc_for_each_raw'
+ - 'ata_qc_for_each_with_internal'
+ - 'ax25_for_each'
+ - 'ax25_uid_for_each'
+ - '__bio_for_each_bvec'
+ - 'bio_for_each_bvec'
+ - 'bio_for_each_bvec_all'
+ - 'bio_for_each_integrity_vec'
+ - '__bio_for_each_segment'
+ - 'bio_for_each_segment'
+ - 'bio_for_each_segment_all'
+ - 'bio_list_for_each'
+ - 'bip_for_each_vec'
+ - 'bitmap_for_each_clear_region'
+ - 'bitmap_for_each_set_region'
+ - 'blkg_for_each_descendant_post'
+ - 'blkg_for_each_descendant_pre'
+ - 'blk_queue_for_each_rl'
+ - 'bond_for_each_slave'
+ - 'bond_for_each_slave_rcu'
+ - 'bpf_for_each_spilled_reg'
+ - 'btree_for_each_safe128'
+ - 'btree_for_each_safe32'
+ - 'btree_for_each_safe64'
+ - 'btree_for_each_safel'
+ - 'card_for_each_dev'
+ - 'cgroup_taskset_for_each'
+ - 'cgroup_taskset_for_each_leader'
+ - 'cpufreq_for_each_entry'
+ - 'cpufreq_for_each_entry_idx'
+ - 'cpufreq_for_each_valid_entry'
+ - 'cpufreq_for_each_valid_entry_idx'
+ - 'css_for_each_child'
+ - 'css_for_each_descendant_post'
+ - 'css_for_each_descendant_pre'
+ - 'cxl_for_each_cmd'
+ - 'device_for_each_child_node'
+ - 'dma_fence_chain_for_each'
+ - 'do_for_each_ftrace_op'
+ - 'drm_atomic_crtc_for_each_plane'
+ - 'drm_atomic_crtc_state_for_each_plane'
+ - 'drm_atomic_crtc_state_for_each_plane_state'
+ - 'drm_atomic_for_each_plane_damage'
+ - 'drm_client_for_each_connector_iter'
+ - 'drm_client_for_each_modeset'
+ - 'drm_connector_for_each_possible_encoder'
+ - 'drm_for_each_bridge_in_chain'
+ - 'drm_for_each_connector_iter'
+ - 'drm_for_each_crtc'
+ - 'drm_for_each_crtc_reverse'
+ - 'drm_for_each_encoder'
+ - 'drm_for_each_encoder_mask'
+ - 'drm_for_each_fb'
+ - 'drm_for_each_legacy_plane'
+ - 'drm_for_each_plane'
+ - 'drm_for_each_plane_mask'
+ - 'drm_for_each_privobj'
+ - 'drm_mm_for_each_hole'
+ - 'drm_mm_for_each_node'
+ - 'drm_mm_for_each_node_in_range'
+ - 'drm_mm_for_each_node_safe'
+ - 'flow_action_for_each'
+ - 'for_each_active_dev_scope'
+ - 'for_each_active_drhd_unit'
+ - 'for_each_active_iommu'
+ - 'for_each_aggr_pgid'
+ - 'for_each_available_child_of_node'
+ - 'for_each_bio'
+ - 'for_each_board_func_rsrc'
+ - 'for_each_bvec'
+ - 'for_each_card_auxs'
+ - 'for_each_card_auxs_safe'
+ - 'for_each_card_components'
+ - 'for_each_card_dapms'
+ - 'for_each_card_pre_auxs'
+ - 'for_each_card_prelinks'
+ - 'for_each_card_rtds'
+ - 'for_each_card_rtds_safe'
+ - 'for_each_card_widgets'
+ - 'for_each_card_widgets_safe'
+ - 'for_each_cgroup_storage_type'
+ - 'for_each_child_of_node'
+ - 'for_each_clear_bit'
+ - 'for_each_clear_bit_from'
+ - 'for_each_cmsghdr'
+ - 'for_each_compatible_node'
+ - 'for_each_component_dais'
+ - 'for_each_component_dais_safe'
+ - 'for_each_comp_order'
+ - 'for_each_console'
+ - 'for_each_cpu'
+ - 'for_each_cpu_and'
+ - 'for_each_cpu_not'
+ - 'for_each_cpu_wrap'
+ - 'for_each_dapm_widgets'
+ - 'for_each_dev_addr'
+ - 'for_each_dev_scope'
+ - 'for_each_displayid_db'
+ - 'for_each_dma_cap_mask'
+ - 'for_each_dpcm_be'
+ - 'for_each_dpcm_be_rollback'
+ - 'for_each_dpcm_be_safe'
+ - 'for_each_dpcm_fe'
+ - 'for_each_drhd_unit'
+ - 'for_each_dss_dev'
+ - 'for_each_efi_memory_desc'
+ - 'for_each_efi_memory_desc_in_map'
+ - 'for_each_element'
+ - 'for_each_element_extid'
+ - 'for_each_element_id'
+ - 'for_each_endpoint_of_node'
+ - 'for_each_evictable_lru'
+ - 'for_each_fib6_node_rt_rcu'
+ - 'for_each_fib6_walker_rt'
+ - 'for_each_free_mem_pfn_range_in_zone'
+ - 'for_each_free_mem_pfn_range_in_zone_from'
+ - 'for_each_free_mem_range'
+ - 'for_each_free_mem_range_reverse'
+ - 'for_each_func_rsrc'
+ - 'for_each_hstate'
+ - 'for_each_if'
+ - 'for_each_iommu'
+ - 'for_each_ip_tunnel_rcu'
+ - 'for_each_irq_nr'
+ - 'for_each_link_codecs'
+ - 'for_each_link_cpus'
+ - 'for_each_link_platforms'
+ - 'for_each_lru'
+ - 'for_each_matching_node'
+ - 'for_each_matching_node_and_match'
+ - 'for_each_member'
+ - 'for_each_memcg_cache_index'
+ - 'for_each_mem_pfn_range'
+ - '__for_each_mem_range'
+ - 'for_each_mem_range'
+ - '__for_each_mem_range_rev'
+ - 'for_each_mem_range_rev'
+ - 'for_each_mem_region'
+ - 'for_each_migratetype_order'
+ - 'for_each_msi_entry'
+ - 'for_each_msi_entry_safe'
+ - 'for_each_net'
+ - 'for_each_net_continue_reverse'
+ - 'for_each_netdev'
+ - 'for_each_netdev_continue'
+ - 'for_each_netdev_continue_rcu'
+ - 'for_each_netdev_continue_reverse'
+ - 'for_each_netdev_feature'
+ - 'for_each_netdev_in_bond_rcu'
+ - 'for_each_netdev_rcu'
+ - 'for_each_netdev_reverse'
+ - 'for_each_netdev_safe'
+ - 'for_each_net_rcu'
+ - 'for_each_new_connector_in_state'
+ - 'for_each_new_crtc_in_state'
+ - 'for_each_new_mst_mgr_in_state'
+ - 'for_each_new_plane_in_state'
+ - 'for_each_new_private_obj_in_state'
+ - 'for_each_node'
+ - 'for_each_node_by_name'
+ - 'for_each_node_by_type'
+ - 'for_each_node_mask'
+ - 'for_each_node_state'
+ - 'for_each_node_with_cpus'
+ - 'for_each_node_with_property'
+ - 'for_each_nonreserved_multicast_dest_pgid'
+ - 'for_each_of_allnodes'
+ - 'for_each_of_allnodes_from'
+ - 'for_each_of_cpu_node'
+ - 'for_each_of_pci_range'
+ - 'for_each_old_connector_in_state'
+ - 'for_each_old_crtc_in_state'
+ - 'for_each_old_mst_mgr_in_state'
+ - 'for_each_oldnew_connector_in_state'
+ - 'for_each_oldnew_crtc_in_state'
+ - 'for_each_oldnew_mst_mgr_in_state'
+ - 'for_each_oldnew_plane_in_state'
+ - 'for_each_oldnew_plane_in_state_reverse'
+ - 'for_each_oldnew_private_obj_in_state'
+ - 'for_each_old_plane_in_state'
+ - 'for_each_old_private_obj_in_state'
+ - 'for_each_online_cpu'
+ - 'for_each_online_node'
+ - 'for_each_online_pgdat'
+ - 'for_each_pci_bridge'
+ - 'for_each_pci_dev'
+ - 'for_each_pci_msi_entry'
+ - 'for_each_pcm_streams'
+ - 'for_each_physmem_range'
+ - 'for_each_populated_zone'
+ - 'for_each_possible_cpu'
+ - 'for_each_present_cpu'
+ - 'for_each_prime_number'
+ - 'for_each_prime_number_from'
+ - 'for_each_process'
+ - 'for_each_process_thread'
+ - 'for_each_property_of_node'
+ - 'for_each_registered_fb'
+ - 'for_each_requested_gpio'
+ - 'for_each_requested_gpio_in_range'
+ - 'for_each_reserved_mem_range'
+ - 'for_each_reserved_mem_region'
+ - 'for_each_rtd_codec_dais'
+ - 'for_each_rtd_components'
+ - 'for_each_rtd_cpu_dais'
+ - 'for_each_rtd_dais'
+ - 'for_each_set_bit'
+ - 'for_each_set_bit_from'
+ - 'for_each_set_clump8'
+ - 'for_each_sg'
+ - 'for_each_sg_dma_page'
+ - 'for_each_sg_page'
+ - 'for_each_sgtable_dma_page'
+ - 'for_each_sgtable_dma_sg'
+ - 'for_each_sgtable_page'
+ - 'for_each_sgtable_sg'
+ - 'for_each_sibling_event'
+ - 'for_each_subelement'
+ - 'for_each_subelement_extid'
+ - 'for_each_subelement_id'
+ - '__for_each_thread'
+ - 'for_each_thread'
+ - 'for_each_unicast_dest_pgid'
+ - 'for_each_vsi'
+ - 'for_each_wakeup_source'
+ - 'for_each_zone'
+ - 'for_each_zone_zonelist'
+ - 'for_each_zone_zonelist_nodemask'
+ - 'fwnode_for_each_available_child_node'
+ - 'fwnode_for_each_child_node'
+ - 'fwnode_graph_for_each_endpoint'
+ - 'gadget_for_each_ep'
+ - 'genradix_for_each'
+ - 'genradix_for_each_from'
+ - 'hash_for_each'
+ - 'hash_for_each_possible'
+ - 'hash_for_each_possible_rcu'
+ - 'hash_for_each_possible_rcu_notrace'
+ - 'hash_for_each_possible_safe'
+ - 'hash_for_each_rcu'
+ - 'hash_for_each_safe'
+ - 'hctx_for_each_ctx'
+ - 'hlist_bl_for_each_entry'
+ - 'hlist_bl_for_each_entry_rcu'
+ - 'hlist_bl_for_each_entry_safe'
+ - 'hlist_for_each'
+ - 'hlist_for_each_entry'
+ - 'hlist_for_each_entry_continue'
+ - 'hlist_for_each_entry_continue_rcu'
+ - 'hlist_for_each_entry_continue_rcu_bh'
+ - 'hlist_for_each_entry_from'
+ - 'hlist_for_each_entry_from_rcu'
+ - 'hlist_for_each_entry_rcu'
+ - 'hlist_for_each_entry_rcu_bh'
+ - 'hlist_for_each_entry_rcu_notrace'
+ - 'hlist_for_each_entry_safe'
+ - 'hlist_for_each_entry_srcu'
+ - '__hlist_for_each_rcu'
+ - 'hlist_for_each_safe'
+ - 'hlist_nulls_for_each_entry'
+ - 'hlist_nulls_for_each_entry_from'
+ - 'hlist_nulls_for_each_entry_rcu'
+ - 'hlist_nulls_for_each_entry_safe'
+ - 'i3c_bus_for_each_i2cdev'
+ - 'i3c_bus_for_each_i3cdev'
+ - 'ide_host_for_each_port'
+ - 'ide_port_for_each_dev'
+ - 'ide_port_for_each_present_dev'
+ - 'idr_for_each_entry'
+ - 'idr_for_each_entry_continue'
+ - 'idr_for_each_entry_continue_ul'
+ - 'idr_for_each_entry_ul'
+ - 'in_dev_for_each_ifa_rcu'
+ - 'in_dev_for_each_ifa_rtnl'
+ - 'inet_bind_bucket_for_each'
+ - 'inet_lhash2_for_each_icsk_rcu'
+ - 'key_for_each'
+ - 'key_for_each_safe'
+ - 'klp_for_each_func'
+ - 'klp_for_each_func_safe'
+ - 'klp_for_each_func_static'
+ - 'klp_for_each_object'
+ - 'klp_for_each_object_safe'
+ - 'klp_for_each_object_static'
+ - 'kunit_suite_for_each_test_case'
+ - 'kvm_for_each_memslot'
+ - 'kvm_for_each_vcpu'
+ - 'list_for_each'
+ - 'list_for_each_codec'
+ - 'list_for_each_codec_safe'
+ - 'list_for_each_continue'
+ - 'list_for_each_entry'
+ - 'list_for_each_entry_continue'
+ - 'list_for_each_entry_continue_rcu'
+ - 'list_for_each_entry_continue_reverse'
+ - 'list_for_each_entry_from'
+ - 'list_for_each_entry_from_rcu'
+ - 'list_for_each_entry_from_reverse'
+ - 'list_for_each_entry_lockless'
+ - 'list_for_each_entry_rcu'
+ - 'list_for_each_entry_reverse'
+ - 'list_for_each_entry_safe'
+ - 'list_for_each_entry_safe_continue'
+ - 'list_for_each_entry_safe_from'
+ - 'list_for_each_entry_safe_reverse'
+ - 'list_for_each_entry_srcu'
+ - 'list_for_each_prev'
+ - 'list_for_each_prev_safe'
+ - 'list_for_each_safe'
+ - 'llist_for_each'
+ - 'llist_for_each_entry'
+ - 'llist_for_each_entry_safe'
+ - 'llist_for_each_safe'
+ - 'mci_for_each_dimm'
+ - 'media_device_for_each_entity'
+ - 'media_device_for_each_intf'
+ - 'media_device_for_each_link'
+ - 'media_device_for_each_pad'
+ - 'nanddev_io_for_each_page'
+ - 'netdev_for_each_lower_dev'
+ - 'netdev_for_each_lower_private'
+ - 'netdev_for_each_lower_private_rcu'
+ - 'netdev_for_each_mc_addr'
+ - 'netdev_for_each_uc_addr'
+ - 'netdev_for_each_upper_dev_rcu'
+ - 'netdev_hw_addr_list_for_each'
+ - 'nft_rule_for_each_expr'
+ - 'nla_for_each_attr'
+ - 'nla_for_each_nested'
+ - 'nlmsg_for_each_attr'
+ - 'nlmsg_for_each_msg'
+ - 'nr_neigh_for_each'
+ - 'nr_neigh_for_each_safe'
+ - 'nr_node_for_each'
+ - 'nr_node_for_each_safe'
+ - 'of_for_each_phandle'
+ - 'of_property_for_each_string'
+ - 'of_property_for_each_u32'
+ - 'pci_bus_for_each_resource'
+ - 'pcl_for_each_chunk'
+ - 'pcl_for_each_segment'
+ - 'pcm_for_each_format'
+ - 'ping_portaddr_for_each_entry'
+ - 'plist_for_each'
+ - 'plist_for_each_continue'
+ - 'plist_for_each_entry'
+ - 'plist_for_each_entry_continue'
+ - 'plist_for_each_entry_safe'
+ - 'plist_for_each_safe'
+ - 'pnp_for_each_card'
+ - 'pnp_for_each_dev'
+ - 'protocol_for_each_card'
+ - 'protocol_for_each_dev'
+ - 'queue_for_each_hw_ctx'
+ - 'radix_tree_for_each_slot'
+ - 'radix_tree_for_each_tagged'
+ - 'rbtree_postorder_for_each_entry_safe'
+ - 'rdma_for_each_block'
+ - 'rdma_for_each_port'
+ - 'rdma_umem_for_each_dma_block'
+ - 'resource_list_for_each_entry'
+ - 'resource_list_for_each_entry_safe'
+ - 'rhl_for_each_entry_rcu'
+ - 'rhl_for_each_rcu'
+ - 'rht_for_each'
+ - 'rht_for_each_entry'
+ - 'rht_for_each_entry_from'
+ - 'rht_for_each_entry_rcu'
+ - 'rht_for_each_entry_rcu_from'
+ - 'rht_for_each_entry_safe'
+ - 'rht_for_each_from'
+ - 'rht_for_each_rcu'
+ - 'rht_for_each_rcu_from'
+ - '__rq_for_each_bio'
+ - 'rq_for_each_bvec'
+ - 'rq_for_each_segment'
+ - 'scsi_for_each_prot_sg'
+ - 'scsi_for_each_sg'
+ - 'sctp_for_each_hentry'
+ - 'sctp_skb_for_each'
+ - 'shdma_for_each_chan'
+ - '__shost_for_each_device'
+ - 'shost_for_each_device'
+ - 'sk_for_each'
+ - 'sk_for_each_bound'
+ - 'sk_for_each_entry_offset_rcu'
+ - 'sk_for_each_from'
+ - 'sk_for_each_rcu'
+ - 'sk_for_each_safe'
+ - 'sk_nulls_for_each'
+ - 'sk_nulls_for_each_from'
+ - 'sk_nulls_for_each_rcu'
+ - 'snd_array_for_each'
+ - 'snd_pcm_group_for_each_entry'
+ - 'snd_soc_dapm_widget_for_each_path'
+ - 'snd_soc_dapm_widget_for_each_path_safe'
+ - 'snd_soc_dapm_widget_for_each_sink_path'
+ - 'snd_soc_dapm_widget_for_each_source_path'
+ - 'tb_property_for_each'
+ - 'tcf_exts_for_each_action'
+ - 'udp_portaddr_for_each_entry'
+ - 'udp_portaddr_for_each_entry_rcu'
+ - 'usb_hub_for_each_child'
+ - 'v4l2_device_for_each_subdev'
+ - 'v4l2_m2m_for_each_dst_buf'
+ - 'v4l2_m2m_for_each_dst_buf_safe'
+ - 'v4l2_m2m_for_each_src_buf'
+ - 'v4l2_m2m_for_each_src_buf_safe'
+ - 'virtio_device_for_each_vq'
+ - 'while_for_each_ftrace_op'
+ - 'xa_for_each'
+ - 'xa_for_each_marked'
+ - 'xa_for_each_range'
+ - 'xa_for_each_start'
+ - 'xas_for_each'
+ - 'xas_for_each_conflict'
+ - 'xas_for_each_marked'
+ - 'xbc_array_for_each_value'
+ - 'xbc_for_each_key_value'
+ - 'xbc_node_for_each_array_value'
+ - 'xbc_node_for_each_child'
+ - 'xbc_node_for_each_key_value'
+ - 'zorro_for_each_dev'
+
+#IncludeBlocks: Preserve # Unknown to clang-format-5.0
+IncludeCategories:
+ - Regex: '.*'
+ Priority: 1
+IncludeIsMainRegex: '(Test)?$'
+IndentCaseLabels: false
+#IndentPPDirectives: None # Unknown to clang-format-5.0
+IndentWidth: 8
+IndentWrappedFunctionNames: false
+JavaScriptQuotes: Leave
+JavaScriptWrapImports: true
+KeepEmptyLinesAtTheStartOfBlocks: false
+MacroBlockBegin: ''
+MacroBlockEnd: ''
+MaxEmptyLinesToKeep: 1
+NamespaceIndentation: None
+#ObjCBinPackProtocolList: Auto # Unknown to clang-format-5.0
+ObjCBlockIndentWidth: 8
+ObjCSpaceAfterProperty: true
+ObjCSpaceBeforeProtocolList: true
+
+# Taken from git's rules
+#PenaltyBreakAssignment: 10 # Unknown to clang-format-4.0
+PenaltyBreakBeforeFirstCallParameter: 30
+PenaltyBreakComment: 10
+PenaltyBreakFirstLessLess: 0
+PenaltyBreakString: 10
+PenaltyExcessCharacter: 100
+PenaltyReturnTypeOnItsOwnLine: 60
+
+PointerAlignment: Right
+ReflowComments: false
+SortIncludes: false
+#SortUsingDeclarations: false # Unknown to clang-format-4.0
+SpaceAfterCStyleCast: false
+SpaceAfterTemplateKeyword: true
+SpaceBeforeAssignmentOperators: true
+#SpaceBeforeCtorInitializerColon: true # Unknown to clang-format-5.0
+#SpaceBeforeInheritanceColon: true # Unknown to clang-format-5.0
+SpaceBeforeParens: ControlStatements
+#SpaceBeforeRangeBasedForLoopColon: true # Unknown to clang-format-5.0
+SpaceInEmptyParentheses: false
+SpacesBeforeTrailingComments: 1
+SpacesInAngles: false
+SpacesInContainerLiterals: false
+SpacesInCStyleCastParentheses: false
+SpacesInParentheses: false
+SpacesInSquareBrackets: false
+Standard: Cpp03
+TabWidth: 8
+UseTab: Always
+...
diff --git a/.gitignore b/.gitignore
index 0c39aa2..778e659 100644
--- a/.gitignore
+++ b/.gitignore
@@ -68,6 +68,7 @@ Module.symvers
#
!.gitignore
!.mailmap
+!.clang-format
!.cocciconfig
#
diff --git a/drivers/edgetpu/edgetpu-core.c b/drivers/edgetpu/edgetpu-core.c
index 7294dd4..0820e95 100644
--- a/drivers/edgetpu/edgetpu-core.c
+++ b/drivers/edgetpu/edgetpu-core.c
@@ -70,9 +70,8 @@ static int edgetpu_mmap_full_csr(struct edgetpu_client *client,
if (!uid_eq(current_euid(), GLOBAL_ROOT_UID))
return -EPERM;
vma_size = vma->vm_end - vma->vm_start;
- map_size = min(vma_size, client->reg_window.size);
- phys_base = client->etdev->regs.phys +
- client->reg_window.start_reg_offset;
+ map_size = min_t(ulong, vma_size, client->etdev->regs.size);
+ phys_base = client->etdev->regs.phys;
ret = io_remap_pfn_range(vma, vma->vm_start, phys_base >> PAGE_SHIFT,
map_size, vma->vm_page_prot);
if (ret)
@@ -367,7 +366,6 @@ int edgetpu_device_add(struct edgetpu_dev *etdev,
etdev->mcp_die_index);
}
- mutex_init(&etdev->open.lock);
mutex_init(&etdev->groups_lock);
INIT_LIST_HEAD(&etdev->groups);
etdev->n_groups = 0;
@@ -391,7 +389,7 @@ int edgetpu_device_add(struct edgetpu_dev *etdev,
etdev->dev_name, ret);
goto remove_dev;
}
- ret = edgetpu_setup_mmu(etdev);
+ ret = edgetpu_chip_setup_mmu(etdev);
if (ret)
goto remove_dev;
@@ -400,20 +398,20 @@ int edgetpu_device_add(struct edgetpu_dev *etdev,
etdev->kci = devm_kzalloc(etdev->dev, sizeof(*etdev->kci), GFP_KERNEL);
if (!etdev->kci) {
ret = -ENOMEM;
- goto detach_mmu;
+ goto remove_usage_stats;
}
etdev->telemetry =
devm_kzalloc(etdev->dev, sizeof(*etdev->telemetry), GFP_KERNEL);
if (!etdev->telemetry) {
ret = -ENOMEM;
- goto detach_mmu;
+ goto remove_usage_stats;
}
ret = edgetpu_kci_init(etdev->mailbox_manager, etdev->kci);
if (ret) {
etdev_err(etdev, "edgetpu_kci_init returns %d\n", ret);
- goto detach_mmu;
+ goto remove_usage_stats;
}
ret = edgetpu_device_dram_init(etdev);
@@ -434,9 +432,9 @@ int edgetpu_device_add(struct edgetpu_dev *etdev,
remove_kci:
/* releases the resources of KCI */
edgetpu_mailbox_remove_all(etdev->mailbox_manager);
-detach_mmu:
+remove_usage_stats:
edgetpu_usage_stats_exit(etdev);
- edgetpu_mmu_detach(etdev);
+ edgetpu_chip_remove_mmu(etdev);
remove_dev:
edgetpu_mark_probe_fail(etdev);
edgetpu_fs_remove(etdev);
@@ -450,7 +448,7 @@ void edgetpu_device_remove(struct edgetpu_dev *etdev)
edgetpu_device_dram_exit(etdev);
edgetpu_mailbox_remove_all(etdev->mailbox_manager);
edgetpu_usage_stats_exit(etdev);
- edgetpu_mmu_detach(etdev);
+ edgetpu_chip_remove_mmu(etdev);
edgetpu_fs_remove(etdev);
}
@@ -467,9 +465,6 @@ struct edgetpu_client *edgetpu_client_add(struct edgetpu_dev *etdev)
return ERR_PTR(-ENOMEM);
}
- /* Allow entire CSR space to be mmap()'ed using 1.0 interface */
- client->reg_window.start_reg_offset = 0;
- client->reg_window.size = etdev->regs.size;
client->pid = current->pid;
client->tgid = current->tgid;
client->etdev = etdev;
@@ -527,6 +522,7 @@ void edgetpu_client_remove(struct edgetpu_client *client)
1 << perdie_event_id_to_num(EDGETPU_PERDIE_EVENT_TRACES_AVAILABLE))
edgetpu_telemetry_unset_event(etdev, EDGETPU_TELEMETRY_TRACE);
+ edgetpu_chip_client_remove(client);
edgetpu_client_put(client);
}
@@ -583,13 +579,15 @@ void edgetpu_free_coherent(struct edgetpu_dev *etdev,
void edgetpu_handle_firmware_crash(struct edgetpu_dev *etdev,
enum edgetpu_fw_crash_type crash_type)
{
- etdev_err(etdev, "firmware crashed: %u", crash_type);
- etdev->firmware_crash_count++;
- edgetpu_fatal_error_notify(etdev);
-
- if (crash_type == EDGETPU_FW_CRASH_UNRECOV_FAULT)
+ if (crash_type == EDGETPU_FW_CRASH_UNRECOV_FAULT) {
+ etdev_err(etdev, "firmware unrecoverable crash");
+ etdev->firmware_crash_count++;
+ edgetpu_fatal_error_notify(etdev);
/* Restart firmware without chip reset */
edgetpu_watchdog_bite(etdev, false);
+ } else {
+ etdev_err(etdev, "firmware crash event: %u", crash_type);
+ }
}
int __init edgetpu_init(void)
diff --git a/drivers/edgetpu/edgetpu-device-group.c b/drivers/edgetpu/edgetpu-device-group.c
index 29b84c7..3b2e6fc 100644
--- a/drivers/edgetpu/edgetpu-device-group.c
+++ b/drivers/edgetpu/edgetpu-device-group.c
@@ -60,6 +60,24 @@ struct edgetpu_host_map {
struct sg_table *sg_tables;
};
+/*
+ * A helper structure for the return value of find_sg_to_sync().
+ */
+struct sglist_to_sync {
+ struct scatterlist *sg;
+ int nelems;
+ /*
+ * The SG that has its length modified by find_sg_to_sync().
+ * Can be NULL, which means no SG's length was modified.
+ */
+ struct scatterlist *last_sg;
+ /*
+ * find_sg_to_sync() will temporarily change the length of @last_sg.
+ * This is used to restore the length.
+ */
+ unsigned int orig_length;
+};
+
#ifdef EDGETPU_HAS_MCP
/* parameter to be used in async KCI jobs */
@@ -86,7 +104,7 @@ static int edgetpu_kci_leave_group_worker(struct kci_worker_param *param)
struct edgetpu_dev *etdev = edgetpu_device_group_nth_etdev(group, i);
etdev_dbg(etdev, "%s: leave group %u", __func__, group->workload_id);
- edgetpu_kci_update_usage(etdev);
+ edgetpu_kci_update_usage_async(etdev);
edgetpu_kci_leave_group(etdev->kci);
return 0;
}
@@ -101,16 +119,11 @@ static int edgetpu_group_kci_open_device(struct edgetpu_device_group *group)
if (edgetpu_group_mailbox_detached_locked(group))
return 0;
mailbox_id = edgetpu_group_context_id_locked(group);
- ret = edgetpu_kci_open_device(group->etdev->kci, BIT(mailbox_id));
- /*
- * This should only happen when the FW hasn't driven this KCI, log once
- * to prevent log storm.
- */
+ ret = edgetpu_mailbox_activate(group->etdev, BIT(mailbox_id));
if (ret)
- etdev_warn_once(group->etdev, "Open device failed with %d",
- ret);
+ etdev_err(group->etdev, "activate mailbox failed with %d", ret);
atomic_inc(&group->etdev->job_count);
- return 0;
+ return ret;
}
static void edgetpu_group_kci_close_device(struct edgetpu_device_group *group)
@@ -121,15 +134,10 @@ static void edgetpu_group_kci_close_device(struct edgetpu_device_group *group)
if (edgetpu_group_mailbox_detached_locked(group))
return;
mailbox_id = edgetpu_group_context_id_locked(group);
- ret = edgetpu_kci_close_device(group->etdev->kci, BIT(mailbox_id));
-
- /*
- * This should only happen when the FW hasn't driven this KCI, log once
- * to prevent log storm.
- */
+ ret = edgetpu_mailbox_deactivate(group->etdev, BIT(mailbox_id));
if (ret)
- etdev_warn_once(group->etdev, "Close device failed with %d",
- ret);
+ etdev_err(group->etdev, "deactivate mailbox failed with %d",
+ ret);
return;
}
@@ -147,7 +155,7 @@ static void edgetpu_group_kci_close_device(struct edgetpu_device_group *group)
static void edgetpu_device_group_kci_leave(struct edgetpu_device_group *group)
{
#ifdef EDGETPU_HAS_MULTI_GROUPS
- edgetpu_kci_update_usage(group->etdev);
+ edgetpu_kci_update_usage_async(group->etdev);
return edgetpu_group_kci_close_device(group);
#else /* !EDGETPU_HAS_MULTI_GROUPS */
struct kci_worker_param *params =
@@ -347,6 +355,12 @@ static void do_detach_mailbox_locked(struct edgetpu_device_group *group)
group->context_id = EDGETPU_CONTEXT_INVALID;
}
+static inline bool is_finalized_or_errored(struct edgetpu_device_group *group)
+{
+ return edgetpu_device_group_is_finalized(group) ||
+ edgetpu_device_group_is_errored(group);
+}
+
int edgetpu_group_set_eventfd(struct edgetpu_device_group *group, uint event_id,
int eventfd)
{
@@ -423,7 +437,7 @@ static void edgetpu_device_group_release(struct edgetpu_device_group *group)
struct edgetpu_dev *etdev;
edgetpu_group_clear_events(group);
- if (edgetpu_device_group_is_finalized(group)) {
+ if (is_finalized_or_errored(group)) {
for (i = 0; i < group->n_clients; i++) {
etdev = edgetpu_device_group_nth_etdev(group, i);
edgetpu_sw_wdt_dec_active_ref(etdev);
@@ -550,8 +564,7 @@ static bool edgetpu_in_any_group_locked(struct edgetpu_dev *etdev)
return etdev->n_groups;
}
-/* caller must hold the client's etdev state_lock. */
-void edgetpu_device_group_leave_locked(struct edgetpu_client *client)
+void edgetpu_device_group_leave(struct edgetpu_client *client)
{
struct edgetpu_device_group *group;
struct edgetpu_list_group *l;
@@ -573,7 +586,7 @@ void edgetpu_device_group_leave_locked(struct edgetpu_client *client)
if (edgetpu_device_group_is_waiting(group)) {
if (edgetpu_device_group_leader(group) == client)
will_disband = true;
- } else if (edgetpu_device_group_is_finalized(group)) {
+ } else if (is_finalized_or_errored(group)) {
will_disband = true;
}
@@ -608,18 +621,6 @@ void edgetpu_device_group_leave_locked(struct edgetpu_client *client)
mutex_unlock(&client->etdev->groups_lock);
}
-void edgetpu_device_group_leave(struct edgetpu_client *client)
-{
- mutex_lock(&client->etdev->state_lock);
- /*
- * The only chance that the state is not GOOD here is the wdt timeout
- * action is working. Let that worker perform the group leaving.
- */
- if (client->etdev->state == ETDEV_STATE_GOOD)
- edgetpu_device_group_leave_locked(client);
- mutex_unlock(&client->etdev->state_lock);
-}
-
struct edgetpu_device_group *
edgetpu_device_group_alloc(struct edgetpu_client *client,
const struct edgetpu_mailbox_attr *attr)
@@ -684,7 +685,7 @@ edgetpu_device_group_alloc(struct edgetpu_client *client,
return group;
error_leave_group:
- edgetpu_device_group_leave_locked(client);
+ edgetpu_device_group_leave(client);
error_put_group:
edgetpu_device_group_put(group);
error:
@@ -762,7 +763,7 @@ int edgetpu_device_group_finalize(struct edgetpu_device_group *group)
mutex_lock(&group->lock);
/* do nothing if the group is finalized */
- if (edgetpu_device_group_is_finalized(group))
+ if (is_finalized_or_errored(group))
goto err_unlock;
if (!edgetpu_device_group_is_waiting(group)) {
@@ -823,6 +824,7 @@ int edgetpu_device_group_finalize(struct edgetpu_device_group *group)
edgetpu_usr_init_group(group);
+ /* send KCI only if the device is powered on */
if (edgetpu_wakelock_count_locked(leader->wakelock)) {
ret = edgetpu_device_group_kci_finalized(group);
if (ret)
@@ -1085,9 +1087,9 @@ static void edgetpu_host_map_show(struct edgetpu_mapping *map,
*/
static struct page **edgetpu_pin_user_pages(struct edgetpu_device_group *group,
struct edgetpu_map_ioctl *arg,
- uint *pnum_pages)
+ uint *pnum_pages, bool *preadonly)
{
- u64 host_addr = arg->host_address;
+ u64 host_addr = untagged_addr(arg->host_address);
u64 size = arg->size;
const enum dma_data_direction dir = arg->flags & EDGETPU_MAP_DIR_MASK;
uint num_pages;
@@ -1096,12 +1098,14 @@ static struct page **edgetpu_pin_user_pages(struct edgetpu_device_group *group,
struct page **pages;
int i;
int ret;
+ struct vm_area_struct *vma;
+ unsigned int foll_flags = FOLL_LONGTERM | FOLL_WRITE;
if (size == 0)
return ERR_PTR(-EINVAL);
offset = host_addr & (PAGE_SIZE - 1);
/* overflow check */
- if (unlikely(size + offset < size))
+ if (unlikely((size + offset) / PAGE_SIZE >= UINT_MAX - 1 || size + offset < size))
return ERR_PTR(-ENOMEM);
num_pages = (size + offset) / PAGE_SIZE;
if ((size + offset) % PAGE_SIZE)
@@ -1118,10 +1122,20 @@ static struct page **edgetpu_pin_user_pages(struct edgetpu_device_group *group,
return ERR_PTR(-ENOMEM);
/*
- * DMA Buffers appear to be always dirty, so mark pages as always writeable
+ * The host pages might be read-only and could fail if we attempt to pin
+ * it with FOLL_WRITE.
+ * default to read/write if find_extend_vma returns NULL
*/
- ret = pin_user_pages_fast(host_addr & PAGE_MASK, num_pages,
- FOLL_WRITE | FOLL_LONGTERM, pages);
+ vma = find_extend_vma(current->mm, host_addr & PAGE_MASK);
+ if (vma && !(vma->vm_flags & VM_WRITE)) {
+ foll_flags &= ~FOLL_WRITE;
+ *preadonly = true;
+ } else {
+ *preadonly = false;
+ }
+
+ ret = pin_user_pages_fast(host_addr & PAGE_MASK, num_pages, foll_flags,
+ pages);
if (ret < 0) {
etdev_dbg(etdev, "get user pages failed %u:%pK-%u: %d",
group->workload_id, (void *)host_addr, num_pages,
@@ -1232,32 +1246,60 @@ error:
}
/*
- * Find the scatterlist covering range [start, end).
+ * Finds the scatterlist covering range [start, end).
+ *
+ * The found SG and number of elements will be stored in @sglist.
+ *
+ * To ensure the returned SG list strictly locates in range [start, end), the
+ * last SG's length is shrunk. Therefore caller must call
+ * restore_sg_after_sync(@sglist) after the DMA sync is performed.
*
- * Returns NULL if:
- * - @start is larger than the whole SG table
+ * @sglist->nelems == 0 means the target range exceeds the whole SG table.
*/
-static struct scatterlist *find_sg_within(const struct sg_table *sgt, u64 start,
- u64 end, int *nelems)
+static void find_sg_to_sync(const struct sg_table *sgt, u64 start, u64 end,
+ struct sglist_to_sync *sglist)
{
- struct scatterlist *sg, *sg_to_sync = NULL;
+ struct scatterlist *sg;
size_t cur_offset = 0;
int i;
- *nelems = 0;
+ sglist->sg = NULL;
+ sglist->nelems = 0;
+ sglist->last_sg = NULL;
+ if (unlikely(end == 0))
+ return;
for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
- if (end <= cur_offset)
- break;
if (cur_offset <= start && start < cur_offset + sg->length)
- sg_to_sync = sg;
- if (sg_to_sync)
- (*nelems)++;
+ sglist->sg = sg;
+ if (sglist->sg)
+ ++sglist->nelems;
cur_offset += sg->length;
+ if (end <= cur_offset) {
+ sglist->last_sg = sg;
+ sglist->orig_length = sg->length;
+ /*
+ * To let the returned SG list have exact length as
+ * [start, end).
+ */
+ sg->length -= cur_offset - end;
+ break;
+ }
}
+}
- return sg_to_sync;
+static void restore_sg_after_sync(struct sglist_to_sync *sglist)
+{
+ if (!sglist->last_sg)
+ return;
+ sglist->last_sg->length = sglist->orig_length;
}
+/*
+ * Performs DMA sync of the mapping with region [offset, offset + size).
+ *
+ * Caller holds mapping's lock, to prevent @hmap being modified / removed by
+ * other processes.
+ */
static int group_sync_host_map(struct edgetpu_device_group *group,
struct edgetpu_host_map *hmap, u64 offset,
u64 size, enum dma_data_direction dir,
@@ -1268,29 +1310,32 @@ static int group_sync_host_map(struct edgetpu_device_group *group,
for_cpu ? dma_sync_sg_for_cpu : dma_sync_sg_for_device;
struct edgetpu_dev *etdev;
struct sg_table *sgt;
- struct scatterlist *sg;
+ struct sglist_to_sync sglist;
int i;
- int nelems;
sgt = &hmap->map.sgt;
- sg = find_sg_within(sgt, offset, end, &nelems);
- if (!sg)
+ find_sg_to_sync(sgt, offset, end, &sglist);
+ if (!sglist.nelems)
return -EINVAL;
+ if (IS_MIRRORED(hmap->map.flags))
+ etdev = group->etdev;
+ else
+ etdev = edgetpu_device_group_nth_etdev(group,
+ hmap->map.die_index);
+ sync(etdev->dev, sglist.sg, sglist.nelems, dir);
+ restore_sg_after_sync(&sglist);
+
if (IS_MIRRORED(hmap->map.flags)) {
- sync(group->etdev->dev, sg, nelems, dir);
for (i = 1; i < group->n_clients; i++) {
etdev = edgetpu_device_group_nth_etdev(group, i);
- sg = find_sg_within(&hmap->sg_tables[i], offset, end,
- &nelems);
- if (WARN_ON(!sg))
+ find_sg_to_sync(&hmap->sg_tables[i], offset, end,
+ &sglist);
+ if (WARN_ON(!sglist.sg))
return -EINVAL;
- sync(etdev->dev, sg, nelems, dir);
+ sync(etdev->dev, sglist.sg, sglist.nelems, dir);
+ restore_sg_after_sync(&sglist);
}
- } else {
- etdev = edgetpu_device_group_nth_etdev(group,
- hmap->map.die_index);
- sync(etdev->dev, sg, nelems, dir);
}
return 0;
@@ -1310,18 +1355,24 @@ int edgetpu_device_group_map(struct edgetpu_device_group *group,
enum edgetpu_context_id context_id;
const u32 mmu_flags = map_to_mmu_flags(flags) | EDGETPU_MMU_HOST;
int i;
+ bool readonly;
if (!valid_dma_direction(flags & EDGETPU_MAP_DIR_MASK))
return -EINVAL;
/* Pin user pages before holding any lock. */
- pages = edgetpu_pin_user_pages(group, arg, &num_pages);
+ pages = edgetpu_pin_user_pages(group, arg, &num_pages, &readonly);
if (IS_ERR(pages))
return PTR_ERR(pages);
+ /* If the host pages are read-only, fallback to use DMA_TO_DEVICE. */
+ if (readonly) {
+ flags &= ~EDGETPU_MAP_DIR_MASK;
+ flags |= EDGETPU_MAP_DMA_TO_DEVICE;
+ }
mutex_lock(&group->lock);
context_id = edgetpu_group_context_id_locked(group);
if (!edgetpu_device_group_is_finalized(group)) {
- ret = -EINVAL;
+ ret = edgetpu_group_errno(group);
goto error;
}
if (!IS_MIRRORED(flags)) {
@@ -1397,7 +1448,7 @@ int edgetpu_device_group_unmap(struct edgetpu_device_group *group,
mutex_lock(&group->lock);
if (!edgetpu_device_group_is_finalized(group)) {
- ret = -EINVAL;
+ ret = edgetpu_group_errno(group);
goto unlock_group;
}
@@ -1442,7 +1493,7 @@ int edgetpu_device_group_sync_buffer(struct edgetpu_device_group *group,
mutex_lock(&group->lock);
if (!edgetpu_device_group_is_finalized(group)) {
- ret = -EINVAL;
+ ret = edgetpu_group_errno(group);
goto unlock_group;
}
@@ -1481,6 +1532,9 @@ void edgetpu_group_mappings_show(struct edgetpu_device_group *group,
case EDGETPU_DEVICE_GROUP_WAITING:
case EDGETPU_DEVICE_GROUP_FINALIZED:
break;
+ case EDGETPU_DEVICE_GROUP_ERRORED:
+ seq_puts(s, " (errored)");
+ break;
case EDGETPU_DEVICE_GROUP_DISBANDED:
seq_puts(s, ": disbanded\n");
return;
@@ -1526,7 +1580,7 @@ int edgetpu_mmap_csr(struct edgetpu_device_group *group,
mutex_lock(&group->lock);
if (!edgetpu_group_finalized_and_attached(group)) {
- ret = -EINVAL;
+ ret = edgetpu_group_errno(group);
goto out;
}
@@ -1553,7 +1607,7 @@ int edgetpu_mmap_queue(struct edgetpu_device_group *group,
mutex_lock(&group->lock);
if (!edgetpu_group_finalized_and_attached(group)) {
- ret = -EINVAL;
+ ret = edgetpu_group_errno(group);
goto out;
}
@@ -1601,8 +1655,12 @@ void edgetpu_group_detach_mailbox_locked(struct edgetpu_device_group *group)
void edgetpu_group_close_and_detach_mailbox(struct edgetpu_device_group *group)
{
mutex_lock(&group->lock);
- /* only a finalized group may have mailbox attached */
- if (edgetpu_device_group_is_finalized(group)) {
+ /*
+ * Only a finalized group may have mailbox attached.
+ *
+ * Detaching mailbox for an errored group is also fine.
+ */
+ if (is_finalized_or_errored(group)) {
edgetpu_group_kci_close_device(group);
edgetpu_group_detach_mailbox_locked(group);
}
@@ -1623,7 +1681,10 @@ int edgetpu_group_attach_and_open_mailbox(struct edgetpu_device_group *group)
int ret = 0;
mutex_lock(&group->lock);
- /* only attaching mailbox for finalized groups */
+ /*
+ * Only attaching mailbox for finalized groups.
+ * Don't attach mailbox for errored groups.
+ */
if (edgetpu_device_group_is_finalized(group)) {
ret = edgetpu_group_attach_mailbox_locked(group);
if (!ret)
diff --git a/drivers/edgetpu/edgetpu-device-group.h b/drivers/edgetpu/edgetpu-device-group.h
index e0cd2a3..3a5e252 100644
--- a/drivers/edgetpu/edgetpu-device-group.h
+++ b/drivers/edgetpu/edgetpu-device-group.h
@@ -34,6 +34,13 @@ enum edgetpu_device_group_status {
EDGETPU_DEVICE_GROUP_WAITING,
/* Most operations can only apply on a finalized group. */
EDGETPU_DEVICE_GROUP_FINALIZED,
+ /*
+ * When a fatal error occurs, groups in FINALIZED status are transformed
+ * into this state. Operations on groups with this status mostly return
+ * ECANCELED. Once a member leaves an ERRORED group, the status is
+ * transitioned to DISBANDED.
+ */
+ EDGETPU_DEVICE_GROUP_ERRORED,
/* No operations except member leaving can be performed. */
EDGETPU_DEVICE_GROUP_DISBANDED,
};
@@ -126,10 +133,10 @@ struct edgetpu_list_group {
/*
* Returns if the group is waiting for members to join.
*
- * Must be called with lock held.
+ * Caller holds @group->lock.
*/
-static inline bool edgetpu_device_group_is_waiting(
- const struct edgetpu_device_group *group)
+static inline bool
+edgetpu_device_group_is_waiting(const struct edgetpu_device_group *group)
{
return group->status == EDGETPU_DEVICE_GROUP_WAITING;
}
@@ -137,7 +144,7 @@ static inline bool edgetpu_device_group_is_waiting(
/*
* Returns if the group is finalized.
*
- * Must be called with lock held.
+ * Caller holds @group->lock.
*/
static inline bool
edgetpu_device_group_is_finalized(const struct edgetpu_device_group *group)
@@ -146,16 +153,40 @@ edgetpu_device_group_is_finalized(const struct edgetpu_device_group *group)
}
/*
+ * Returns if the group is errored.
+ *
+ * Caller holds @group->lock.
+ */
+static inline bool
+edgetpu_device_group_is_errored(const struct edgetpu_device_group *group)
+{
+ return group->status == EDGETPU_DEVICE_GROUP_ERRORED;
+}
+
+/*
* Returns if the group is disbanded.
*
- * Must be called with lock held.
+ * Caller holds @group->lock.
*/
-static inline bool edgetpu_device_group_is_disbanded(
- const struct edgetpu_device_group *group)
+static inline bool
+edgetpu_device_group_is_disbanded(const struct edgetpu_device_group *group)
{
return group->status == EDGETPU_DEVICE_GROUP_DISBANDED;
}
+/*
+ * Returns -ECANCELED if the status of group is ERRORED, otherwise returns
+ * -EINVAL.
+ *
+ * Caller holds @group->lock.
+ */
+static inline int edgetpu_group_errno(struct edgetpu_device_group *group)
+{
+ if (edgetpu_device_group_is_errored(group))
+ return -ECANCELED;
+ return -EINVAL;
+}
+
/* Increases ref_count of @group by one and returns @group. */
static inline struct edgetpu_device_group *
edgetpu_device_group_get(struct edgetpu_device_group *group)
@@ -220,8 +251,7 @@ static inline struct edgetpu_dev *edgetpu_device_group_nth_etdev(
}
/*
- * Let @client leave the group it belongs to. Caller should hold the client's
- * etdev state_lock.
+ * Let @client leave the group it belongs to.
*
* If @client is the leader of a group, the group will be marked as "disbanded".
*
@@ -235,9 +265,6 @@ static inline struct edgetpu_dev *edgetpu_device_group_nth_etdev(
* @client->group will be removed from @client->etdev->groups.
* @client->group will be set as NULL.
*/
-void edgetpu_device_group_leave_locked(struct edgetpu_client *client);
-
-/* Let @client leave the group. Device should be in good state, warn if not. */
void edgetpu_device_group_leave(struct edgetpu_client *client);
/* Returns whether @client is the leader of @group. */
diff --git a/drivers/edgetpu/edgetpu-dmabuf.c b/drivers/edgetpu/edgetpu-dmabuf.c
index c2bf3ae..3d1c190 100644
--- a/drivers/edgetpu/edgetpu-dmabuf.c
+++ b/drivers/edgetpu/edgetpu-dmabuf.c
@@ -97,7 +97,7 @@ static int etdev_add_translations(struct edgetpu_dev *etdev,
enum dma_data_direction dir,
enum edgetpu_context_id ctx_id)
{
- const int prot = __dma_dir_to_iommu_prot(dir);
+ const int prot = __dma_dir_to_iommu_prot(dir, etdev->dev);
uint i;
u64 offset = 0;
int ret;
@@ -280,11 +280,12 @@ static void dmabuf_map_callback_release(struct edgetpu_mapping *map)
container_of(map, struct edgetpu_dmabuf_map, map);
struct edgetpu_device_group *group = map->priv;
const enum dma_data_direction dir = edgetpu_host_dma_dir(map->dir);
- const tpu_addr_t tpu_addr = map->device_address;
+ tpu_addr_t tpu_addr = map->device_address;
struct edgetpu_dev *etdev;
uint i;
if (tpu_addr) {
+ tpu_addr -= dmap->offset;
if (IS_MIRRORED(map->flags)) {
group_unmap_dmabuf(group, dmap, tpu_addr);
} else {
@@ -339,19 +340,13 @@ static void dmabuf_map_callback_show(struct edgetpu_mapping *map,
container_of(map, struct edgetpu_dmabuf_map, map);
if (IS_MIRRORED(dmap->map.flags))
- seq_printf(
- s,
- " <%s> mirrored: iova=0x%llx pages=%llu %s offset=0x%llx",
- dmap->dmabufs[0]->exp_name, map->device_address,
- dmap->size / PAGE_SIZE, edgetpu_dma_dir_rw_s(map->dir),
- dmap->offset);
+ seq_printf(s, " <%s> mirrored: iova=0x%llx pages=%llu %s",
+ dmap->dmabufs[0]->exp_name, map->device_address, dmap->size / PAGE_SIZE,
+ edgetpu_dma_dir_rw_s(map->dir));
else
- seq_printf(
- s,
- " <%s> die %u: iova=0x%llx pages=%llu %s offset=0x%llx",
- dmap->dmabufs[0]->exp_name, map->die_index,
- map->device_address, dmap->size / PAGE_SIZE,
- edgetpu_dma_dir_rw_s(map->dir), dmap->offset);
+ seq_printf(s, " <%s> die %u: iova=0x%llx pages=%llu %s",
+ dmap->dmabufs[0]->exp_name, map->die_index, map->device_address,
+ dmap->size / PAGE_SIZE, edgetpu_dma_dir_rw_s(map->dir));
edgetpu_device_dram_dmabuf_info_show(dmap->dmabufs[0], s);
seq_puts(s, " dma=");
@@ -502,12 +497,11 @@ err_free:
}
/*
- * Duplicates @sgt in region [@offset, @offset + @size] to @out.
+ * Duplicates @sgt in region [0, @size) to @out.
* Only duplicates the "page" parts in @sgt, DMA addresses and lengths are not
* considered.
*/
-static int dup_sgt_in_region(struct sg_table *sgt, u64 offset, u64 size,
- struct sg_table *out)
+static int dup_sgt_in_region(struct sg_table *sgt, u64 size, struct sg_table *out)
{
uint n = 0;
u64 cur_offset = 0;
@@ -519,9 +513,8 @@ static int dup_sgt_in_region(struct sg_table *sgt, u64 offset, u64 size,
for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
size_t pg_len = sg->length + sg->offset;
- if (offset < cur_offset + pg_len)
- n++;
- if (offset + size <= cur_offset + pg_len)
+ n++;
+ if (size <= cur_offset + pg_len)
break;
cur_offset += pg_len;
}
@@ -532,23 +525,16 @@ static int dup_sgt_in_region(struct sg_table *sgt, u64 offset, u64 size,
new_sg = out->sgl;
for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
size_t pg_len = sg->length + sg->offset;
+ struct page *page = sg_page(sg);
+ unsigned int len = pg_len;
+ u64 remain_size = size - cur_offset;
- if (offset < cur_offset + pg_len) {
- struct page *page = sg_page(sg);
- unsigned int len = pg_len;
- u64 remain_size = offset + size - cur_offset;
+ if (remain_size < pg_len)
+ len -= pg_len - remain_size;
+ sg_set_page(new_sg, page, len, 0);
+ new_sg = sg_next(new_sg);
- if (cur_offset < offset) {
- page = nth_page(page, (offset - cur_offset) >>
- PAGE_SHIFT);
- len -= offset - cur_offset;
- }
- if (remain_size < pg_len)
- len -= pg_len - remain_size;
- sg_set_page(new_sg, page, len, 0);
- new_sg = sg_next(new_sg);
- }
- if (offset + size <= cur_offset + pg_len)
+ if (size <= cur_offset + pg_len)
break;
cur_offset += pg_len;
}
@@ -556,13 +542,12 @@ static int dup_sgt_in_region(struct sg_table *sgt, u64 offset, u64 size,
}
/*
- * Copy the DMA addresses and lengths in region [@offset, @offset + @size) from
+ * Copy the DMA addresses and lengths in region [0, @size) from
* @sgt to @out.
*
* The DMA addresses will be condensed when possible.
*/
-static void shrink_sgt_dma_in_region(struct sg_table *sgt, u64 offset, u64 size,
- struct sg_table *out)
+static void shrink_sgt_dma_in_region(struct sg_table *sgt, u64 size, struct sg_table *out)
{
u64 cur_offset = 0;
struct scatterlist *sg, *prv_sg = NULL, *cur_sg;
@@ -571,20 +556,12 @@ static void shrink_sgt_dma_in_region(struct sg_table *sgt, u64 offset, u64 size,
out->nents = 0;
for (sg = sgt->sgl; sg;
cur_offset += sg_dma_len(sg), sg = sg_next(sg)) {
- u64 remain_size = offset + size - cur_offset;
+ u64 remain_size = size - cur_offset;
dma_addr_t dma;
size_t len;
- /* hasn't touched the first covered sg */
- if (offset >= cur_offset + sg_dma_len(sg))
- continue;
dma = sg_dma_address(sg);
len = sg_dma_len(sg);
- /* offset exceeds current sg */
- if (offset > cur_offset) {
- dma += offset - cur_offset;
- len -= offset - cur_offset;
- }
if (remain_size < sg_dma_len(sg))
len -= sg_dma_len(sg) - remain_size;
if (prv_sg &&
@@ -603,15 +580,14 @@ static void shrink_sgt_dma_in_region(struct sg_table *sgt, u64 offset, u64 size,
}
}
-static int entry_set_shrunk_sgt(struct dmabuf_map_entry *entry, u64 offset,
- u64 size)
+static int entry_set_shrunk_sgt(struct dmabuf_map_entry *entry, u64 size)
{
int ret;
- ret = dup_sgt_in_region(entry->sgt, offset, size, &entry->shrunk_sgt);
+ ret = dup_sgt_in_region(entry->sgt, size, &entry->shrunk_sgt);
if (ret)
return ret;
- shrink_sgt_dma_in_region(entry->sgt, offset, size, &entry->shrunk_sgt);
+ shrink_sgt_dma_in_region(entry->sgt, size, &entry->shrunk_sgt);
return 0;
}
@@ -621,10 +597,8 @@ static int entry_set_shrunk_sgt(struct dmabuf_map_entry *entry, u64 offset,
*
* Fields of @entry will be set on success.
*/
-static int etdev_attach_dmabuf_to_entry(struct edgetpu_dev *etdev,
- struct dma_buf *dmabuf,
- struct dmabuf_map_entry *entry,
- u64 offset, u64 size,
+static int etdev_attach_dmabuf_to_entry(struct edgetpu_dev *etdev, struct dma_buf *dmabuf,
+ struct dmabuf_map_entry *entry, u64 size,
enum dma_data_direction dir)
{
struct dma_buf_attachment *attachment;
@@ -641,7 +615,7 @@ static int etdev_attach_dmabuf_to_entry(struct edgetpu_dev *etdev,
}
entry->attachment = attachment;
entry->sgt = sgt;
- ret = entry_set_shrunk_sgt(entry, offset, size);
+ ret = entry_set_shrunk_sgt(entry, size);
if (ret)
goto err_unmap;
@@ -663,7 +637,7 @@ int edgetpu_map_dmabuf(struct edgetpu_device_group *group,
struct dma_buf *dmabuf;
edgetpu_map_flag_t flags = arg->flags;
const u64 offset = arg->offset;
- const u64 size = PAGE_ALIGN(arg->size);
+ u64 size;
const enum dma_data_direction dir =
edgetpu_host_dma_dir(flags & EDGETPU_MAP_DIR_MASK);
struct edgetpu_dev *etdev;
@@ -672,20 +646,38 @@ int edgetpu_map_dmabuf(struct edgetpu_device_group *group,
uint i;
/* invalid DMA direction or offset is not page-aligned */
- if (!valid_dma_direction(dir) || offset_in_page(offset))
- return -EINVAL;
- /* size == 0 or overflow */
- if (offset + size <= offset)
+ if (!valid_dma_direction(dir) || offset_in_page(offset)) {
+ etdev_dbg(group->etdev,
+ "%s: valid=%d offset_in_page=%lu offset=0x%llx\n",
+ __func__, valid_dma_direction(dir),
+ offset_in_page(offset), offset);
return -EINVAL;
+ }
+ /* TODO(b/189278468): entirely ignore @offset */
+ if (offset != 0)
+ etdev_warn_ratelimited(group->etdev,
+ "Non-zero offset for dmabuf mapping is deprecated");
dmabuf = dma_buf_get(arg->dmabuf_fd);
- if (IS_ERR(dmabuf))
+ if (IS_ERR(dmabuf)) {
+ etdev_dbg(group->etdev, "%s: dma_buf_get returns %ld\n",
+ __func__, PTR_ERR(dmabuf));
return PTR_ERR(dmabuf);
- if (offset + size > dmabuf->size)
+ }
+ if (offset >= dmabuf->size) {
+ etdev_dbg(group->etdev,
+ "%s: offset=0x%llx > dmabuf size=%zx\n",
+ __func__, offset, dmabuf->size);
goto err_put;
+ }
mutex_lock(&group->lock);
- if (!edgetpu_device_group_is_finalized(group))
+ if (!edgetpu_device_group_is_finalized(group)) {
+ ret = edgetpu_group_errno(group);
+ etdev_dbg(group->etdev,
+ "%s: edgetpu_device_group_is_finalized returns %d\n",
+ __func__, ret);
goto err_unlock_group;
+ }
dmap = alloc_dmabuf_map(group, flags);
if (!dmap) {
@@ -696,40 +688,60 @@ int edgetpu_map_dmabuf(struct edgetpu_device_group *group,
get_dma_buf(dmabuf);
dmap->dmabufs[0] = dmabuf;
dmap->offset = offset;
- dmap->size = size;
+ dmap->size = size = dmabuf->size;
if (IS_MIRRORED(flags)) {
for (i = 0; i < group->n_clients; i++) {
etdev = edgetpu_device_group_nth_etdev(group, i);
- ret = etdev_attach_dmabuf_to_entry(etdev, dmabuf,
- &dmap->entries[i],
- offset, size, dir);
- if (ret)
+ ret = etdev_attach_dmabuf_to_entry(etdev, dmabuf, &dmap->entries[i], size,
+ dir);
+ if (ret) {
+ etdev_dbg(group->etdev,
+ "%s: etdev_attach_dmabuf_to_entry returns %d\n",
+ __func__, ret);
goto err_release_map;
+ }
}
ret = group_map_dmabuf(group, dmap, dir, &tpu_addr);
- if (ret)
+ if (ret) {
+ etdev_dbg(group->etdev,
+ "%s: group_map_dmabuf returns %d\n",
+ __func__, ret);
goto err_release_map;
+ }
dmap->map.die_index = ALL_DIES;
} else {
etdev = edgetpu_device_group_nth_etdev(group, arg->die_index);
if (!etdev) {
+ etdev_dbg(group->etdev,
+ "%s: edgetpu_device_group_nth_etdev returns NULL\n",
+ __func__);
ret = -EINVAL;
goto err_release_map;
}
- ret = etdev_attach_dmabuf_to_entry(
- etdev, dmabuf, &dmap->entries[0], offset, size, dir);
- if (ret)
+ ret = etdev_attach_dmabuf_to_entry(etdev, dmabuf, &dmap->entries[0], size, dir);
+ if (ret) {
+ etdev_dbg(group->etdev,
+ "%s: etdev_attach_dmabuf_to_entry returns %d\n",
+ __func__, ret);
goto err_release_map;
+ }
ret = etdev_map_dmabuf(etdev, dmap, dir, &tpu_addr);
- if (ret)
+ if (ret) {
+ etdev_dbg(group->etdev,
+ "%s: etdev_map_dmabuf returns %d\n",
+ __func__, ret);
goto err_release_map;
+ }
dmap->map.die_index = arg->die_index;
}
- dmap->map.device_address = tpu_addr;
+ dmap->map.device_address = tpu_addr + offset;
ret = edgetpu_mapping_add(&group->dmabuf_mappings, &dmap->map);
- if (ret)
+ if (ret) {
+ etdev_dbg(group->etdev, "%s: edgetpu_mapping_add returns %d\n",
+ __func__, ret);
goto err_release_map;
- arg->device_address = tpu_addr;
+ }
+ arg->device_address = dmap->map.device_address;
mutex_unlock(&group->lock);
dma_buf_put(dmabuf);
return 0;
@@ -753,9 +765,10 @@ int edgetpu_unmap_dmabuf(struct edgetpu_device_group *group, u32 die_index,
int ret = -EINVAL;
mutex_lock(&group->lock);
- /* the group is disbanded means all the mappings have been released */
- if (!edgetpu_device_group_is_finalized(group))
+ if (!edgetpu_device_group_is_finalized(group)) {
+ ret = edgetpu_group_errno(group);
goto out_unlock;
+ }
edgetpu_mapping_lock(mappings);
map = edgetpu_mapping_find_locked(mappings, die_index, tpu_addr);
if (!map)
@@ -790,8 +803,10 @@ int edgetpu_map_bulk_dmabuf(struct edgetpu_device_group *group,
if (!valid_dma_direction(dir) || arg->size == 0)
return -EINVAL;
mutex_lock(&group->lock);
- if (!edgetpu_device_group_is_finalized(group))
+ if (!edgetpu_device_group_is_finalized(group)) {
+ ret = edgetpu_group_errno(group);
goto err_unlock_group;
+ }
/* checks not all FDs are ignored */
for (i = 0; i < group->n_clients; i++)
if (arg->dmabuf_fds[i] != EDGETPU_IGNORE_FD)
@@ -820,8 +835,7 @@ int edgetpu_map_bulk_dmabuf(struct edgetpu_device_group *group,
if (!bmap->dmabufs[i])
continue;
etdev = edgetpu_device_group_nth_etdev(group, i);
- ret = etdev_attach_dmabuf_to_entry(etdev, bmap->dmabufs[i],
- &bmap->entries[i], 0,
+ ret = etdev_attach_dmabuf_to_entry(etdev, bmap->dmabufs[i], &bmap->entries[i],
bmap->size, dir);
if (ret)
goto err_release_bmap;
@@ -880,7 +894,7 @@ static void edgetpu_dma_fence_release(struct dma_fence *fence)
struct edgetpu_dma_fence *etfence = to_etfence(fence);
unsigned long flags;
- if (!fence)
+ if (!etfence)
return;
spin_lock_irqsave(&etfence_list_lock, flags);
@@ -911,17 +925,26 @@ static const struct dma_fence_ops edgetpu_dma_fence_ops = {
int edgetpu_sync_fence_create(struct edgetpu_create_sync_fence_data *datap)
{
- int fd;
+ int fd = get_unused_fd_flags(O_CLOEXEC);
int ret;
struct edgetpu_dma_fence *etfence;
struct sync_file *sync_file;
unsigned long flags;
+ if (fd < 0)
+ return fd;
etfence = kzalloc(sizeof(*etfence), GFP_KERNEL);
- if (!etfence)
- return -ENOMEM;
+ if (!etfence) {
+ ret = -ENOMEM;
+ goto err_put_fd;
+ }
spin_lock_init(&etfence->lock);
+ /*
+ * If sync_file_create() fails, fence release is called on dma_fence_put(). A valid
+ * list_head is needed for list_del().
+ */
+ INIT_LIST_HEAD(&etfence->etfence_list);
memcpy(&etfence->timeline_name, &datap->timeline_name,
EDGETPU_SYNC_TIMELINE_NAME_LEN - 1);
@@ -933,19 +956,20 @@ int edgetpu_sync_fence_create(struct edgetpu_create_sync_fence_data *datap)
dma_fence_put(&etfence->fence);
if (!sync_file) {
ret = -ENOMEM;
- goto err_freefence;
+ /* doesn't need kfree(etfence) here: dma_fence_put does it for us */
+ goto err_put_fd;
}
- fd = get_unused_fd_flags(O_CLOEXEC);
- datap->fence = fd;
- fd_install(fd, sync_file->file);
spin_lock_irqsave(&etfence_list_lock, flags);
list_add_tail(&etfence->etfence_list, &etfence_list_head);
spin_unlock_irqrestore(&etfence_list_lock, flags);
+
+ fd_install(fd, sync_file->file);
+ datap->fence = fd;
return 0;
-err_freefence:
- kfree(etfence);
+err_put_fd:
+ put_unused_fd(fd);
return ret;
}
@@ -966,6 +990,11 @@ int edgetpu_sync_fence_signal(struct edgetpu_signal_sync_fence_data *datap)
return -EINVAL;
spin_lock_irq(fence->lock);
+ /* don't signal fence twice */
+ if (unlikely(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
pr_debug("%s: %s-%s%llu-" SEQ_FMT " errno=%d\n", __func__,
fence->ops->get_driver_name(fence),
fence->ops->get_timeline_name(fence), fence->context,
@@ -973,6 +1002,8 @@ int edgetpu_sync_fence_signal(struct edgetpu_signal_sync_fence_data *datap)
if (errno)
dma_fence_set_error(fence, errno);
ret = dma_fence_signal_locked(fence);
+
+out_unlock:
spin_unlock_irq(fence->lock);
dma_fence_put(fence);
return ret;
diff --git a/drivers/edgetpu/edgetpu-firmware.c b/drivers/edgetpu/edgetpu-firmware.c
index ded7cc7..d0dc575 100644
--- a/drivers/edgetpu/edgetpu-firmware.c
+++ b/drivers/edgetpu/edgetpu-firmware.c
@@ -263,7 +263,6 @@ static int edgetpu_firmware_handshake(struct edgetpu_firmware *et_fw)
fw_flavor = edgetpu_kci_fw_info(etdev->kci, &et_fw->p->fw_info);
if (fw_flavor < 0) {
etdev_err(etdev, "firmware handshake failed: %d", fw_flavor);
- et_fw->p->status = FW_INVALID;
et_fw->p->fw_info.fw_flavor = FW_FLAVOR_UNKNOWN;
et_fw->p->fw_info.fw_changelist = 0;
et_fw->p->fw_info.fw_build_time = 0;
@@ -281,10 +280,9 @@ static int edgetpu_firmware_handshake(struct edgetpu_firmware *et_fw)
} else {
etdev_dbg(etdev, "loaded stage 2 bootloader");
}
- et_fw->p->status = FW_VALID;
/* In case older firmware that doesn't fill out fw_info. */
et_fw->p->fw_info.fw_flavor = fw_flavor;
- /* Hermosa second-stage bootloader doesn't implement log/trace */
+ /* don't attempt log/trace handshake if it's the second-stage bootloader */
if (fw_flavor != FW_FLAVOR_BL1) {
int ret = edgetpu_telemetry_kci(etdev);
@@ -294,6 +292,55 @@ static int edgetpu_firmware_handshake(struct edgetpu_firmware *et_fw)
return 0;
}
+/*
+ * Do edgetpu_pm_get() but prevent it from running the loaded firmware.
+ *
+ * On success, caller must later call edgetpu_pm_put() to decrease the reference count.
+ *
+ * Caller holds firmware lock.
+ */
+static int edgetpu_firmware_pm_get(struct edgetpu_firmware *et_fw)
+{
+ enum edgetpu_firmware_status prev = et_fw->p->status;
+ int ret;
+
+ /* Prevent platform-specific code from trying to run the previous firmware */
+ et_fw->p->status = FW_LOADING;
+ etdev_dbg(et_fw->etdev, "Requesting power up for firmware run\n");
+ ret = edgetpu_pm_get(et_fw->etdev->pm);
+ if (ret)
+ et_fw->p->status = prev;
+ return ret;
+}
+
+static void edgetpu_firmware_set_loading(struct edgetpu_firmware *et_fw)
+{
+ struct edgetpu_dev *etdev = et_fw->etdev;
+
+ mutex_lock(&etdev->state_lock);
+ etdev->state = ETDEV_STATE_FWLOADING;
+ mutex_unlock(&etdev->state_lock);
+
+ et_fw->p->status = FW_LOADING;
+}
+
+/* Set firmware and etdev state according to @ret, which can be an errno or 0. */
+static void edgetpu_firmware_set_state(struct edgetpu_firmware *et_fw, int ret)
+{
+ struct edgetpu_dev *etdev = et_fw->etdev;
+
+ et_fw->p->status = ret ? FW_INVALID : FW_VALID;
+
+ mutex_lock(&etdev->state_lock);
+ if (ret == -EIO)
+ etdev->state = ETDEV_STATE_BAD; /* f/w handshake error */
+ else if (ret)
+ etdev->state = ETDEV_STATE_NOFW; /* other errors */
+ else
+ etdev->state = ETDEV_STATE_GOOD; /* f/w handshake success */
+ mutex_unlock(&etdev->state_lock);
+}
+
enum edgetpu_fw_flavor
edgetpu_firmware_get_flavor(struct edgetpu_firmware *et_fw)
{
@@ -312,6 +359,87 @@ edgetpu_firmware_get_build_time(struct edgetpu_firmware *et_fw)
return et_fw->p->fw_info.fw_build_time;
}
+/*
+ * Try edgetpu_firmware_lock() if it's not locked yet.
+ *
+ * Returns 1 if the lock is acquired successfully, 0 otherwise.
+ */
+int edgetpu_firmware_trylock(struct edgetpu_dev *etdev)
+{
+ struct edgetpu_firmware *et_fw = etdev->firmware;
+
+ if (!et_fw)
+ return 1;
+ return mutex_trylock(&et_fw->p->fw_desc_lock);
+}
+
+/*
+ * Grab firmware lock to protect against firmware state changes.
+ * Locks out firmware loading / unloading while caller performs ops that are
+ * incompatible with a change in firmware status. Does not care whether or not
+ * the device is joined to a group.
+ */
+int edgetpu_firmware_lock(struct edgetpu_dev *etdev)
+{
+ struct edgetpu_firmware *et_fw = etdev->firmware;
+
+ if (!et_fw)
+ return -EINVAL;
+ mutex_lock(&et_fw->p->fw_desc_lock);
+ return 0;
+}
+
+/* Drop f/w lock, let any pending firmware load proceed. */
+void edgetpu_firmware_unlock(struct edgetpu_dev *etdev)
+{
+ struct edgetpu_firmware *et_fw = etdev->firmware;
+
+ if (!et_fw)
+ return;
+ mutex_unlock(&et_fw->p->fw_desc_lock);
+}
+
+/*
+ * Lock firmware for loading. Disallow group join for device during load.
+ * Failed if device is already joined to a group and is in use.
+ */
+static int edgetpu_firmware_load_lock(struct edgetpu_dev *etdev)
+{
+ struct edgetpu_firmware *et_fw = etdev->firmware;
+
+ if (!et_fw) {
+ etdev_err(
+ etdev,
+ "Cannot load firmware when no loader is available\n");
+ return -EINVAL;
+ }
+ mutex_lock(&et_fw->p->fw_desc_lock);
+
+ /* Disallow group join while loading, fail if already joined */
+ if (!edgetpu_set_group_join_lockout(etdev, true)) {
+ etdev_err(
+ etdev,
+ "Cannot load firmware because device is in use");
+ mutex_unlock(&et_fw->p->fw_desc_lock);
+ return -EBUSY;
+ }
+ return 0;
+}
+
+/* Unlock firmware after lock held for loading, re-allow group join. */
+static void edgetpu_firmware_load_unlock(struct edgetpu_dev *etdev)
+{
+ struct edgetpu_firmware *et_fw = etdev->firmware;
+
+ if (!et_fw) {
+ etdev_dbg(etdev,
+ "Unlock firmware when no loader available\n");
+ return;
+ }
+ edgetpu_set_group_join_lockout(etdev, false);
+ mutex_unlock(&et_fw->p->fw_desc_lock);
+}
+
int edgetpu_firmware_run_locked(struct edgetpu_firmware *et_fw,
const char *name,
enum edgetpu_firmware_flags flags)
@@ -321,14 +449,14 @@ int edgetpu_firmware_run_locked(struct edgetpu_firmware *et_fw,
int ret;
bool is_bl1_run = (flags & FW_BL1);
- et_fw->p->status = FW_LOADING;
+ edgetpu_firmware_set_loading(et_fw);
if (!is_bl1_run)
edgetpu_sw_wdt_stop(et_fw->etdev);
memset(&new_fw_desc, 0, sizeof(new_fw_desc));
ret = edgetpu_firmware_load_locked(et_fw, &new_fw_desc, name, flags);
if (ret)
- return ret;
+ goto out_failed;
etdev_dbg(et_fw->etdev, "run fw %s flags=0x%x", name, flags);
if (handlers && handlers->prepare_run) {
@@ -359,10 +487,17 @@ int edgetpu_firmware_run_locked(struct edgetpu_firmware *et_fw,
if (!ret && !is_bl1_run && handlers && handlers->launch_complete)
handlers->launch_complete(et_fw);
+ else if (ret && handlers && handlers->launch_failed)
+ handlers->launch_failed(et_fw, ret);
+ edgetpu_firmware_set_state(et_fw, ret);
return ret;
out_unload_new_fw:
edgetpu_firmware_unload_locked(et_fw, &new_fw_desc);
+out_failed:
+ if (handlers && handlers->launch_failed)
+ handlers->launch_failed(et_fw, ret);
+ edgetpu_firmware_set_state(et_fw, ret);
return ret;
}
@@ -371,93 +506,35 @@ int edgetpu_firmware_run(struct edgetpu_dev *etdev, const char *name,
{
struct edgetpu_firmware *et_fw = etdev->firmware;
int ret;
- enum edgetpu_dev_state prev_state;
if (!et_fw)
return -ENODEV;
- /*
- * All other operations on device will first check for device state
- * and then proceed.
- */
- mutex_lock(&etdev->state_lock);
- if (etdev->state == ETDEV_STATE_FWLOADING) {
- mutex_unlock(&etdev->state_lock);
- return -EAGAIN;
- }
- prev_state = etdev->state;
- etdev->state = ETDEV_STATE_FWLOADING;
- mutex_unlock(&etdev->state_lock);
- ret = edgetpu_firmware_lock(etdev);
+ ret = edgetpu_firmware_load_lock(etdev);
if (ret) {
etdev_err(etdev, "%s: lock failed (%d)\n", __func__, ret);
- mutex_lock(&etdev->state_lock);
- etdev->state = prev_state; /* restore etdev state */
- mutex_unlock(&etdev->state_lock);
return ret;
}
/* will be overwritten when we successfully parse the f/w header */
etdev->fw_version.kci_version = EDGETPU_INVALID_KCI_VERSION;
- /*
- * Prevent platform-specific code from trying to run the previous
- * firmware
- */
- et_fw->p->status = FW_LOADING;
- etdev_dbg(et_fw->etdev, "Requesting power up for firmware run\n");
- ret = edgetpu_pm_get(etdev->pm);
- if (!ret)
+ ret = edgetpu_firmware_pm_get(et_fw);
+ if (!ret) {
ret = edgetpu_firmware_run_locked(et_fw, name, flags);
- etdev->firmware = et_fw;
- edgetpu_pm_put(etdev->pm);
- edgetpu_firmware_unlock(etdev);
+ edgetpu_pm_put(etdev->pm);
+ }
- mutex_lock(&etdev->state_lock);
- if (ret == -EIO)
- etdev->state = ETDEV_STATE_BAD; /* f/w handshake error */
- else if (ret)
- etdev->state = ETDEV_STATE_NOFW; /* other errors */
- else
- etdev->state = ETDEV_STATE_GOOD; /* f/w handshake success */
- mutex_unlock(&etdev->state_lock);
+ edgetpu_firmware_load_unlock(etdev);
return ret;
}
-int edgetpu_firmware_lock(struct edgetpu_dev *etdev)
+bool edgetpu_firmware_is_loading(struct edgetpu_dev *etdev)
{
struct edgetpu_firmware *et_fw = etdev->firmware;
- if (!et_fw) {
- etdev_err(
- etdev,
- "Cannot load firmware when no loader is available\n");
- return -EINVAL;
- }
- mutex_lock(&et_fw->p->fw_desc_lock);
-
- /* Disallow group join while loading, fail if already joined */
- if (!edgetpu_set_group_join_lockout(etdev, true)) {
- etdev_err(
- etdev,
- "Cannot load firmware because device is in use");
- mutex_unlock(&et_fw->p->fw_desc_lock);
- return -EBUSY;
- }
- return 0;
-}
-
-void edgetpu_firmware_unlock(struct edgetpu_dev *etdev)
-{
- struct edgetpu_firmware *et_fw = etdev->firmware;
-
- if (!et_fw) {
- etdev_dbg(etdev,
- "Unlock firmware when no loader available\n");
- return;
- }
- edgetpu_set_group_join_lockout(etdev, false);
- mutex_unlock(&et_fw->p->fw_desc_lock);
+ return et_fw && et_fw->p->status == FW_LOADING;
}
+/* Caller must hold firmware lock. */
enum edgetpu_firmware_status
edgetpu_firmware_status_locked(struct edgetpu_dev *etdev)
{
@@ -468,13 +545,25 @@ edgetpu_firmware_status_locked(struct edgetpu_dev *etdev)
return et_fw->p->status;
}
+/* Caller must hold firmware lock. For unit tests. */
+void
+edgetpu_firmware_set_status_locked(struct edgetpu_dev *etdev,
+ enum edgetpu_firmware_status status)
+{
+ struct edgetpu_firmware *et_fw = etdev->firmware;
+
+ if (et_fw)
+ et_fw->p->status = status;
+}
+
+/* Caller must hold firmware lock for loading. */
int edgetpu_firmware_restart_locked(struct edgetpu_dev *etdev)
{
struct edgetpu_firmware *et_fw = etdev->firmware;
const struct edgetpu_firmware_handlers *handlers = et_fw->p->handlers;
int ret = -1;
- et_fw->p->status = FW_LOADING;
+ edgetpu_firmware_set_loading(et_fw);
edgetpu_sw_wdt_stop(etdev);
/*
* Try restarting the firmware first, fall back to normal firmware start
@@ -485,11 +574,13 @@ int edgetpu_firmware_restart_locked(struct edgetpu_dev *etdev)
if (ret && handlers && handlers->prepare_run) {
ret = handlers->prepare_run(et_fw, &et_fw->p->fw_desc.buf);
if (ret)
- return ret;
+ goto out;
}
ret = edgetpu_firmware_handshake(et_fw);
if (!ret)
edgetpu_sw_wdt_start(etdev);
+out:
+ edgetpu_firmware_set_state(et_fw, ret);
return ret;
}
@@ -608,55 +699,51 @@ static const struct attribute_group edgetpu_firmware_attr_group = {
};
/*
- * Can only be called with etdev->state == ETDEV_STATE_FWLOADING.
+ * Sets all groups related to @etdev as errored.
*/
-static void edgetpu_abort_clients(struct edgetpu_dev *etdev)
+static void edgetpu_set_groups_error(struct edgetpu_dev *etdev)
{
- int i, num_clients = 0;
+ size_t i, num_groups = 0;
struct edgetpu_device_group *group;
+ struct edgetpu_device_group **groups;
struct edgetpu_list_group *g;
- struct edgetpu_client **clients;
- struct edgetpu_list_client *c;
- /*
- * We don't hold etdev->groups_lock here because
- * 1. All group operations (functions in edgetpu-device-group.c)
- * are skipped when "etdev->state is not GOOD", we shall be the
- * only one accessing @etdev->groups, and
- * 2. to prevent LOCKDEP from reporting deadlock with
- * edgetpu_device_group_add_locked, which nested holds group->lock
- * then etdev->groups_lock.
- */
- clients = kmalloc_array(etdev->n_groups, sizeof(*clients), GFP_KERNEL);
- if (!clients) {
+ mutex_lock(&etdev->groups_lock);
+ groups = kmalloc_array(etdev->n_groups, sizeof(*groups), GFP_KERNEL);
+ if (unlikely(!groups)) {
/*
- * Just give up aborting clients in this case, this should never
- * happen after all.
+ * Just give up setting status in this case, this only happens
+ * when the system is OOM.
*/
+ mutex_unlock(&etdev->groups_lock);
edgetpu_fatal_error_notify(etdev);
return;
}
+ /*
+ * Fetch the groups into an array to set the group status without
+ * holding @etdev->groups_lock. To prevent the potential deadlock that
+ * edgetpu_device_group_add() holds group->lock then etdev->groups_lock.
+ */
etdev_for_each_group(etdev, g, group) {
- mutex_lock(&group->lock);
- list_for_each_entry(c, &group->clients, list) {
- if (etdev == c->client->etdev) {
- clients[num_clients++] =
- edgetpu_client_get(c->client);
- break;
- }
- }
- mutex_unlock(&group->lock);
+ if (edgetpu_device_group_is_disbanded(group))
+ continue;
+ groups[num_groups++] = edgetpu_device_group_get(group);
}
- edgetpu_fatal_error_notify(etdev);
- for (i = 0; i < num_clients; i++) {
+ mutex_unlock(&etdev->groups_lock);
+ for (i = 0; i < num_groups; i++) {
+ group = groups[i];
+ mutex_lock(&group->lock);
/*
- * No need to hold state lock here since all group operations on
- * client are protected by state being GOOD.
+ * Only finalized groups may have handshake with the FW, mark
+ * them as errored.
*/
- edgetpu_device_group_leave_locked(clients[i]);
- edgetpu_client_put(clients[i]);
+ if (edgetpu_device_group_is_finalized(group))
+ group->status = EDGETPU_DEVICE_GROUP_ERRORED;
+ mutex_unlock(&group->lock);
+ edgetpu_device_group_put(group);
}
- kfree(clients);
+ edgetpu_fatal_error_notify(etdev);
+ kfree(groups);
}
static void edgetpu_firmware_wdt_timeout_action(void *data)
@@ -670,40 +757,26 @@ static void edgetpu_firmware_wdt_timeout_action(void *data)
if (!edgetpu_is_powered(etdev))
return;
- mutex_lock(&etdev->state_lock);
- if (etdev->state == ETDEV_STATE_FWLOADING) {
- mutex_unlock(&etdev->state_lock);
- return;
- }
- etdev->state = ETDEV_STATE_FWLOADING;
- mutex_unlock(&etdev->state_lock);
-
- edgetpu_abort_clients(etdev);
-
- ret = edgetpu_firmware_lock(etdev);
/*
- * edgetpu_firmware_lock() should always return success here as etdev
- * is already removed from all groups and fw loader exists.
+ * Zero the FW state of open mailboxes so that when the runtime releases
+ * groups the CLOSE_DEVICE KCIs won't be sent.
*/
- if (ret) {
- etdev_err(etdev, "%s: lock failed (%d)\n", __func__, ret);
+ edgetpu_handshake_clear_fw_state(&etdev->mailbox_manager->open_devices);
+ edgetpu_set_groups_error(etdev);
+
+ /* Another procedure is loading the firmware, let it do the work. */
+ if (edgetpu_firmware_is_loading(etdev))
return;
- }
- et_fw->p->status = FW_LOADING;
- ret = edgetpu_pm_get(etdev->pm);
- if (!ret)
+
+ /* edgetpu_firmware_lock() here never fails */
+ edgetpu_firmware_lock(etdev);
+
+ ret = edgetpu_firmware_pm_get(et_fw);
+ if (!ret) {
ret = edgetpu_firmware_restart_locked(etdev);
- edgetpu_pm_put(etdev->pm);
+ edgetpu_pm_put(etdev->pm);
+ }
edgetpu_firmware_unlock(etdev);
-
- mutex_lock(&etdev->state_lock);
- if (ret == -EIO)
- etdev->state = ETDEV_STATE_BAD;
- else if (ret)
- etdev->state = ETDEV_STATE_NOFW;
- else
- etdev->state = ETDEV_STATE_GOOD;
- mutex_unlock(&etdev->state_lock);
}
int edgetpu_firmware_create(struct edgetpu_dev *etdev,
diff --git a/drivers/edgetpu/edgetpu-firmware.h b/drivers/edgetpu/edgetpu-firmware.h
index 477d1a5..e41543d 100644
--- a/drivers/edgetpu/edgetpu-firmware.h
+++ b/drivers/edgetpu/edgetpu-firmware.h
@@ -147,6 +147,8 @@ struct edgetpu_firmware_handlers {
struct edgetpu_firmware_buffer *fw_buf);
/* Firmware running, after successful handshake. */
void (*launch_complete)(struct edgetpu_firmware *et_fw);
+ /* Firmware load failed or unsuccessful handshake. */
+ void (*launch_failed)(struct edgetpu_firmware *et_fw, int ret);
/*
* Optional platform-specific handler to restart an already loaded
@@ -195,21 +197,29 @@ void edgetpu_firmware_mappings_show(struct edgetpu_dev *etdev,
struct seq_file *s);
/*
- * These two functions grab and release the internal firmware lock
- * and must be used before calling the helper functions suffixed with _locked
- * below
+ * These functions grab and release the internal firmware lock and must be used
+ * before calling the helper functions suffixed with _locked below.
*/
int edgetpu_firmware_lock(struct edgetpu_dev *etdev);
+int edgetpu_firmware_trylock(struct edgetpu_dev *etdev);
void edgetpu_firmware_unlock(struct edgetpu_dev *etdev);
+/* Returns whether the firmware loading work is ongoing. */
+bool edgetpu_firmware_is_loading(struct edgetpu_dev *etdev);
/*
- * Returns the state of the firmware image currently loaded for this device
+ * Returns the state of the firmware image currently loaded for this device.
+ * Caller must hold firmware lock.
*/
enum edgetpu_firmware_status
edgetpu_firmware_status_locked(struct edgetpu_dev *etdev);
+/* Caller must hold firmware lock. For unit tests. */
+void
+edgetpu_firmware_set_status_locked(struct edgetpu_dev *etdev,
+ enum edgetpu_firmware_status status);
+
/*
* Restarts the last firmware image loaded
* Intended for power managed devices to re-run the firmware without a full
diff --git a/drivers/edgetpu/edgetpu-fs.c b/drivers/edgetpu/edgetpu-fs.c
index b74a91b..5b07632 100644
--- a/drivers/edgetpu/edgetpu-fs.c
+++ b/drivers/edgetpu/edgetpu-fs.c
@@ -79,14 +79,9 @@ int edgetpu_open(struct edgetpu_dev *etdev, struct file *file)
/* Set client pointer to NULL if error creating client. */
file->private_data = NULL;
- mutex_lock(&etdev->open.lock);
client = edgetpu_client_add(etdev);
- if (IS_ERR(client)) {
- mutex_unlock(&etdev->open.lock);
+ if (IS_ERR(client))
return PTR_ERR(client);
- }
- etdev->open.count++;
- mutex_unlock(&etdev->open.lock);
file->private_data = client;
return 0;
}
@@ -110,28 +105,27 @@ static int edgetpu_fs_release(struct inode *inode, struct file *file)
etdev = client->etdev;
wakelock_count = edgetpu_wakelock_lock(client->wakelock);
-
+ mutex_lock(&client->group_lock);
/*
- * TODO(b/180528495): remove pm_get when disbanding can be performed
- * with device off.
+ * @wakelock = 0 means the device might be powered off. And for group with a non-detachable
+ * mailbox, its mailbox is removed when the group is released, in such case we need to
+ * ensure the device is powered to prevent kernel panic on programming VII mailbox CSRs.
+ *
+ * For mailbox-detachable groups the mailbox had been removed when the wakelock was
+ * released, edgetpu_device_group_release() doesn't need the device be powered in this case.
*/
- if (client->group && !wakelock_count) {
+ if (!wakelock_count && client->group && !client->group->mailbox_detachable) {
wakelock_count = 1;
edgetpu_pm_get(etdev->pm);
}
-
+ mutex_unlock(&client->group_lock);
edgetpu_wakelock_unlock(client->wakelock);
edgetpu_client_remove(client);
- mutex_lock(&etdev->open.lock);
- if (etdev->open.count)
- --etdev->open.count;
-
/* count was zero if client previously released its wake lock */
if (wakelock_count)
edgetpu_pm_put(etdev->pm);
- mutex_unlock(&etdev->open.lock);
return 0;
}
@@ -227,10 +221,7 @@ static int edgetpu_ioctl_finalize_group(struct edgetpu_client *client)
group = client->group;
if (!group || !edgetpu_device_group_is_leader(group, client))
goto out_unlock;
- /*
- * TODO(b/180528495): remove pm_get when finalization can be performed
- * with device off.
- */
+ /* Finalization has to be performed with device on. */
if (!wakelock_count) {
ret = edgetpu_pm_get(client->etdev->pm);
if (ret) {
@@ -633,6 +624,30 @@ edgetpu_ioctl_dram_usage(struct edgetpu_dev *etdev,
return 0;
}
+static int
+edgetpu_ioctl_acquire_ext_mailbox(struct edgetpu_client *client,
+ struct edgetpu_ext_mailbox __user *argp)
+{
+ struct edgetpu_ext_mailbox ext_mailbox;
+
+ if (copy_from_user(&ext_mailbox, argp, sizeof(ext_mailbox)))
+ return -EFAULT;
+
+ return edgetpu_chip_acquire_ext_mailbox(client, &ext_mailbox);
+}
+
+static int
+edgetpu_ioctl_release_ext_mailbox(struct edgetpu_client *client,
+ struct edgetpu_ext_mailbox __user *argp)
+{
+ struct edgetpu_ext_mailbox ext_mailbox;
+
+ if (copy_from_user(&ext_mailbox, argp, sizeof(ext_mailbox)))
+ return -EFAULT;
+
+ return edgetpu_chip_release_ext_mailbox(client, &ext_mailbox);
+}
+
long edgetpu_ioctl(struct file *file, uint cmd, ulong arg)
{
struct edgetpu_client *client = file->private_data;
@@ -715,6 +730,13 @@ long edgetpu_ioctl(struct file *file, uint cmd, ulong arg)
case EDGETPU_GET_DRAM_USAGE:
ret = edgetpu_ioctl_dram_usage(client->etdev, argp);
break;
+ case EDGETPU_ACQUIRE_EXT_MAILBOX:
+ ret = edgetpu_ioctl_acquire_ext_mailbox(client, argp);
+ break;
+ case EDGETPU_RELEASE_EXT_MAILBOX:
+ ret = edgetpu_ioctl_release_ext_mailbox(client, argp);
+ break;
+
default:
return -ENOTTY; /* unknown command */
}
@@ -1014,11 +1036,18 @@ static const struct file_operations syncfences_ops = {
.release = single_release,
};
-static void edgetpu_debugfs_global_setup(void)
+static int edgetpu_debugfs_global_setup(void)
{
edgetpu_debugfs_dir = debugfs_create_dir("edgetpu", NULL);
+ if (IS_ERR(edgetpu_debugfs_dir)) {
+ pr_err(DRIVER_NAME " error creating edgetpu debugfs dir: %ld\n",
+ PTR_ERR(edgetpu_debugfs_dir));
+ return PTR_ERR(edgetpu_debugfs_dir);
+ }
+
debugfs_create_file("syncfences", 0440, edgetpu_debugfs_dir, NULL,
&syncfences_ops);
+ return 0;
}
int __init edgetpu_fs_init(void)
diff --git a/drivers/edgetpu/edgetpu-google-iommu.c b/drivers/edgetpu/edgetpu-google-iommu.c
index f48fe20..cdd01d9 100644
--- a/drivers/edgetpu/edgetpu-google-iommu.c
+++ b/drivers/edgetpu/edgetpu-google-iommu.c
@@ -14,9 +14,6 @@
#include <linux/types.h>
#include <linux/version.h>
-#ifdef CONFIG_ABROLHOS
-#include "abrolhos-platform.h"
-#endif
#include "edgetpu-internal.h"
#include "edgetpu-mapping.h"
#include "edgetpu-mmu.h"
@@ -234,9 +231,6 @@ out:
/* mmu_info is unused and NULL for IOMMU version, let IOMMU API supply info */
int edgetpu_mmu_attach(struct edgetpu_dev *etdev, void *mmu_info)
{
-#ifdef CONFIG_ABROLHOS
- struct abrolhos_platform_dev *abpdev = to_abrolhos_dev(etdev);
-#endif
struct edgetpu_iommu *etiommu;
int ret;
@@ -270,28 +264,6 @@ int edgetpu_mmu_attach(struct edgetpu_dev *etdev, void *mmu_info)
/* etiommu initialization done */
etdev->mmu_cookie = etiommu;
- /* TODO (b/178571278): remove chipset specific code. */
-#ifdef CONFIG_ABROLHOS
- if (!abpdev->csr_iova)
- goto success;
-
- etdev_dbg(etdev, "Mapping device CSRs: %llX -> %llX (%lu bytes)\n",
- abpdev->csr_iova, abpdev->csr_paddr, abpdev->csr_size);
-
- /* Add an IOMMU translation for the CSR region */
- ret = edgetpu_mmu_add_translation(etdev, abpdev->csr_iova,
- abpdev->csr_paddr, abpdev->csr_size,
- IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV,
- EDGETPU_CONTEXT_KCI);
- if (ret) {
- etdev_err(etdev, "Unable to map device CSRs into IOMMU\n");
- edgetpu_unregister_iommu_device_fault_handler(etdev);
- etdev->mmu_cookie = NULL;
- goto err_free;
- }
-
-success:
-#endif
return 0;
err_free:
@@ -306,24 +278,12 @@ void edgetpu_mmu_reset(struct edgetpu_dev *etdev)
void edgetpu_mmu_detach(struct edgetpu_dev *etdev)
{
-#ifdef CONFIG_ABROLHOS
- struct abrolhos_platform_dev *abpdev = to_abrolhos_dev(etdev);
-#endif
struct edgetpu_iommu *etiommu = etdev->mmu_cookie;
int i, ret;
if (!etiommu)
return;
-#ifdef CONFIG_ABROLHOS
- if (abpdev->csr_iova) {
- edgetpu_mmu_remove_translation(&abpdev->edgetpu_dev,
- abpdev->csr_iova,
- abpdev->csr_size,
- EDGETPU_CONTEXT_KCI);
- }
- abpdev->csr_iova = 0;
-#endif
ret = edgetpu_unregister_iommu_device_fault_handler(etdev);
if (ret)
etdev_warn(etdev,
@@ -364,7 +324,7 @@ static int get_iommu_map_params(struct edgetpu_dev *etdev,
{
struct edgetpu_iommu *etiommu = etdev->mmu_cookie;
size_t size = 0;
- int prot = __dma_dir_to_iommu_prot(map->dir);
+ int prot = __dma_dir_to_iommu_prot(map->dir, etdev->dev);
struct iommu_domain *domain;
int i;
struct scatterlist *sg;
@@ -461,7 +421,7 @@ int edgetpu_mmu_map_iova_sgt(struct edgetpu_dev *etdev, tpu_addr_t iova,
struct sg_table *sgt, enum dma_data_direction dir,
enum edgetpu_context_id context_id)
{
- const int prot = __dma_dir_to_iommu_prot(edgetpu_host_dma_dir(dir));
+ const int prot = __dma_dir_to_iommu_prot(edgetpu_host_dma_dir(dir), etdev->dev);
const tpu_addr_t orig_iova = iova;
struct scatterlist *sg;
int i;
@@ -550,7 +510,7 @@ tpu_addr_t edgetpu_mmu_tpu_map(struct edgetpu_dev *etdev, dma_addr_t down_addr,
struct iommu_domain *default_domain =
iommu_get_domain_for_dev(etdev->dev);
phys_addr_t paddr;
- int prot = __dma_dir_to_iommu_prot(dir);
+ int prot = __dma_dir_to_iommu_prot(dir, etdev->dev);
domain = get_domain_by_context_id(etdev, context_id);
/*
@@ -602,7 +562,7 @@ tpu_addr_t edgetpu_mmu_tpu_map_sgt(struct edgetpu_dev *etdev,
phys_addr_t paddr;
dma_addr_t iova, cur_iova;
size_t size;
- int prot = __dma_dir_to_iommu_prot(dir);
+ int prot = __dma_dir_to_iommu_prot(dir, etdev->dev);
struct scatterlist *sg;
int ret;
int i;
diff --git a/drivers/edgetpu/edgetpu-internal.h b/drivers/edgetpu/edgetpu-internal.h
index 1a8cc35..1258cf0 100644
--- a/drivers/edgetpu/edgetpu-internal.h
+++ b/drivers/edgetpu/edgetpu-internal.h
@@ -91,11 +91,6 @@ struct edgetpu_coherent_mem {
#endif
};
-struct edgetpu_reg_window {
- uint start_reg_offset;
- size_t size;
-};
-
struct edgetpu_device_group;
struct edgetpu_p2p_csr_map;
struct edgetpu_remote_dram_map;
@@ -128,8 +123,6 @@ struct edgetpu_client {
dma_addr_t *p2p_csrs_dma_addrs;
/* Peer DRAM dma addrs for this client, if has on-device DRAM */
dma_addr_t *remote_drams_dma_addrs;
- /* range of device CSRs mmap()'able */
- struct edgetpu_reg_window reg_window;
/* Per-client request to keep device active */
struct edgetpu_wakelock *wakelock;
/* Bit field of registered per die events */
@@ -170,10 +163,6 @@ struct edgetpu_dev {
struct cdev cdev; /* cdev char device structure */
dev_t devno; /* char device dev_t */
char dev_name[EDGETPU_DEVICE_NAME_MAX];
- struct {
- struct mutex lock;
- uint count; /* number times device currently opened */
- } open;
struct edgetpu_mapped_resource regs; /* ioremapped CSRs */
struct dentry *d_entry; /* debugfs dir for this device */
struct mutex state_lock; /* protects state of this device */
@@ -377,7 +366,13 @@ irqreturn_t edgetpu_chip_irq_handler(int irq, void *arg);
*
* Returns 0 on success, otherwise -errno.
*/
-int edgetpu_setup_mmu(struct edgetpu_dev *etdev);
+int edgetpu_chip_setup_mmu(struct edgetpu_dev *etdev);
+
+/*
+ * Reverts edgetpu_chip_setup_mmu().
+ * This is called during device removal.
+ */
+void edgetpu_chip_remove_mmu(struct edgetpu_dev *etdev);
/* Read TPU timestamp */
u64 edgetpu_chip_tpu_timestamp(struct edgetpu_dev *etdev);
@@ -398,6 +393,9 @@ struct edgetpu_client *edgetpu_client_add(struct edgetpu_dev *etdev);
/* Remove TPU client */
void edgetpu_client_remove(struct edgetpu_client *client);
+/* Handle chip-specific client removal */
+void edgetpu_chip_client_remove(struct edgetpu_client *client);
+
/* mmap() device/queue memory */
int edgetpu_mmap(struct edgetpu_client *client, struct vm_area_struct *vma);
@@ -416,4 +414,20 @@ void edgetpu_mark_probe_fail(struct edgetpu_dev *etdev);
*/
int edgetpu_get_state_errno_locked(struct edgetpu_dev *etdev);
+/*
+ * "External mailboxes" below refers to mailboxes that are not handled
+ * directly by the DarwiNN runtime, such as secure or device-to-device.
+ *
+ * Chip specific code will typically keep track of state and inform the firmware
+ * that a mailbox has become active/inactive.
+ */
+
+/* Chip-specific code to acquire external mailboxes */
+int edgetpu_chip_acquire_ext_mailbox(struct edgetpu_client *client,
+ struct edgetpu_ext_mailbox *ext_mbox);
+
+/* Chip-specific code to release external mailboxes */
+int edgetpu_chip_release_ext_mailbox(struct edgetpu_client *client,
+ struct edgetpu_ext_mailbox *ext_mbox);
+
#endif /* __EDGETPU_INTERNAL_H__ */
diff --git a/drivers/edgetpu/edgetpu-kci.c b/drivers/edgetpu/edgetpu-kci.c
index 2f0d2e3..e467fac 100644
--- a/drivers/edgetpu/edgetpu-kci.c
+++ b/drivers/edgetpu/edgetpu-kci.c
@@ -274,6 +274,19 @@ static struct edgetpu_kci_response_element *edgetpu_kci_fetch_responses(
/* loop until our head equals to CSR tail */
while (1) {
tail = EDGETPU_MAILBOX_RESP_QUEUE_READ_SYNC(kci->mailbox, tail);
+ /*
+ * Make sure the CSR is read and reported properly by checking
+ * if any bit higher than CIRCULAR_QUEUE_WRAP_BIT is set and if
+ * the tail exceeds kci->mailbox->resp_queue_size.
+ */
+ if (unlikely(tail & ~CIRCULAR_QUEUE_VALID_MASK ||
+ CIRCULAR_QUEUE_REAL_INDEX(tail) >= size)) {
+ etdev_err_ratelimited(
+ kci->mailbox->etdev,
+ "Invalid response queue tail: 0x%x\n", tail);
+ break;
+ }
+
count = circular_queue_count(head, tail, size);
if (count == 0)
break;
@@ -419,6 +432,14 @@ static void edgetpu_kci_handle_irq(struct edgetpu_mailbox *mailbox)
schedule_work(&kci->work);
}
+static void edgetpu_kci_update_usage_work(struct work_struct *work)
+{
+ struct edgetpu_kci *kci =
+ container_of(work, struct edgetpu_kci, usage_work);
+
+ edgetpu_kci_update_usage(kci->mailbox->etdev);
+}
+
int edgetpu_kci_init(struct edgetpu_mailbox_manager *mgr,
struct edgetpu_kci *kci)
{
@@ -465,6 +486,7 @@ int edgetpu_kci_init(struct edgetpu_mailbox_manager *mgr,
init_waitqueue_head(&kci->wait_list_waitq);
INIT_WORK(&kci->work, edgetpu_kci_consume_responses_work);
edgetpu_reverse_kci_init(&kci->rkci);
+ INIT_WORK(&kci->usage_work, edgetpu_kci_update_usage_work);
EDGETPU_MAILBOX_CONTEXT_WRITE(mailbox, context_enable, 1);
return 0;
}
@@ -494,7 +516,9 @@ int edgetpu_kci_reinit(struct edgetpu_kci *kci)
void edgetpu_kci_cancel_work_queues(struct edgetpu_kci *kci)
{
- /* Cancel KCI and reverse KCI workers */
+ /* Cancel workers that may send KCIs. */
+ cancel_work_sync(&kci->usage_work);
+ /* Cancel KCI and reverse KCI workers. */
cancel_work_sync(&kci->work);
cancel_work_sync(&kci->rkci.work);
}
@@ -503,10 +527,6 @@ void edgetpu_kci_release(struct edgetpu_dev *etdev, struct edgetpu_kci *kci)
{
if (!kci)
return;
- /*
- * Command/Response queues are managed (dmam_alloc_coherent()), we don't
- * need to free them.
- */
edgetpu_kci_cancel_work_queues(kci);
@@ -840,19 +860,42 @@ enum edgetpu_fw_flavor edgetpu_kci_fw_info(struct edgetpu_kci *kci,
return flavor;
}
+void edgetpu_kci_update_usage_async(struct edgetpu_dev *etdev)
+{
+ schedule_work(&etdev->kci->usage_work);
+}
+
int edgetpu_kci_update_usage(struct edgetpu_dev *etdev)
{
- int ret;
+ int ret = -EAGAIN;
- /* Quick return if device already powered down, else get PM ref. */
+ /* Quick return if device is already powered down. */
if (!edgetpu_is_powered(etdev))
return -EAGAIN;
- ret = edgetpu_pm_get(etdev->pm);
- if (ret)
- return ret;
- ret = edgetpu_kci_update_usage_locked(etdev);
+ /*
+ * Lockout change in f/w load/unload status during usage update.
+ * Skip usage update if the firmware is being updated now or is not
+ * valid.
+ */
+ if (!edgetpu_firmware_trylock(etdev))
+ return -EAGAIN;
- edgetpu_pm_put(etdev->pm);
+ if (edgetpu_firmware_status_locked(etdev) != FW_VALID)
+ goto fw_unlock;
+ /*
+ * This function may run in a worker that is being canceled when the
+ * device is powering down, and the power down code holds the PM lock.
+ * Using trylock to prevent cancel_work_sync() waiting forever.
+ */
+ if (!edgetpu_pm_trylock(etdev->pm))
+ goto fw_unlock;
+
+ if (edgetpu_is_powered(etdev))
+ ret = edgetpu_kci_update_usage_locked(etdev);
+ edgetpu_pm_unlock(etdev->pm);
+
+fw_unlock:
+ edgetpu_firmware_unlock(etdev);
return ret;
}
@@ -973,3 +1016,24 @@ int edgetpu_kci_close_device(struct edgetpu_kci *kci, u32 mailbox_ids)
return -ENODEV;
return edgetpu_kci_send_cmd(kci, &cmd);
}
+
+int edgetpu_kci_notify_throttling(struct edgetpu_dev *etdev, u32 level)
+{
+ struct edgetpu_command_element cmd = {
+ .code = KCI_CODE_NOTIFY_THROTTLING,
+ .dma = {
+ .flags = level,
+ },
+ };
+ int ret;
+
+ if (!etdev->kci)
+ return -ENODEV;
+ if (!edgetpu_pm_get_if_powered(etdev->pm))
+ return -EAGAIN;
+
+ ret = edgetpu_kci_send_cmd(etdev->kci, &cmd);
+ edgetpu_pm_put(etdev->pm);
+ return ret;
+}
+
diff --git a/drivers/edgetpu/edgetpu-kci.h b/drivers/edgetpu/edgetpu-kci.h
index 443c690..05f87c8 100644
--- a/drivers/edgetpu/edgetpu-kci.h
+++ b/drivers/edgetpu/edgetpu-kci.h
@@ -108,6 +108,7 @@ enum edgetpu_kci_code {
KCI_CODE_CLOSE_DEVICE = 10,
KCI_CODE_FIRMWARE_INFO = 11,
KCI_CODE_GET_USAGE = 12,
+ KCI_CODE_NOTIFY_THROTTLING = 13,
};
/*
@@ -192,6 +193,7 @@ struct edgetpu_kci {
struct work_struct work; /* worker of consuming responses */
/* Handler for reverse (firmware -> kernel) requests */
struct edgetpu_reverse_kci rkci;
+ struct work_struct usage_work; /* worker that sends update usage KCI */
};
struct edgetpu_kci_device_group_detail {
@@ -279,7 +281,15 @@ enum edgetpu_fw_flavor edgetpu_kci_fw_info(
struct edgetpu_kci *kci, struct edgetpu_fw_info *fw_info);
/*
- * Retrieve usage tracking data from firmware, update info on host.
+ * Schedules a worker to call edgetpu_kci_update_usage().
+ *
+ * For functions that don't require the usage to be updated immediately, use
+ * this function instead of edgetpu_kci_update_usage().
+ */
+void edgetpu_kci_update_usage_async(struct edgetpu_dev *etdev);
+
+/*
+ * Retrieves usage tracking data from firmware, update info on host.
* Also used as a watchdog ping to firmware.
*
* Returns KCI response code on success or < 0 on error (typically -ETIMEDOUT).
@@ -333,13 +343,31 @@ int edgetpu_kci_shutdown(struct edgetpu_kci *kci);
int edgetpu_kci_get_debug_dump(struct edgetpu_kci *kci, tpu_addr_t tpu_addr,
size_t size);
-/* Inform the firmware to prepare to serve the VII of @mailbox_ids. */
+/*
+ * Inform the firmware to prepare to serve the VII of @mailbox_ids.
+ *
+ * You usually shouldn't call this directly - consider using
+ * edgetpu_mailbox_activate() instead.
+ */
int edgetpu_kci_open_device(struct edgetpu_kci *kci, u32 mailbox_ids);
-/* Inform the firmware the VII with @mailbox_ids are closed. */
+/*
+ * Inform the firmware the VII with @mailbox_ids are closed.
+ *
+ * You usually shouldn't call this directly - consider using
+ * edgetpu_mailbox_deactivate() instead.
+ */
int edgetpu_kci_close_device(struct edgetpu_kci *kci, u32 mailbox_ids);
/* Cancel work queues or wait until they're done */
void edgetpu_kci_cancel_work_queues(struct edgetpu_kci *kci);
+/*
+ * Notify the firmware about throttling and the corresponding power level.
+ * The request is sent only if the device is already powered on.
+ *
+ * Returns KCI response code on success or < 0 on error (typically -ETIMEDOUT).
+ */
+int edgetpu_kci_notify_throttling(struct edgetpu_dev *etdev, u32 level);
+
#endif /* __EDGETPU_KCI_H__ */
diff --git a/drivers/edgetpu/edgetpu-mailbox.c b/drivers/edgetpu/edgetpu-mailbox.c
index 6ffc141..11b6fe2 100644
--- a/drivers/edgetpu/edgetpu-mailbox.c
+++ b/drivers/edgetpu/edgetpu-mailbox.c
@@ -18,6 +18,7 @@
#include "edgetpu-kci.h"
#include "edgetpu-mailbox.h"
#include "edgetpu-mmu.h"
+#include "edgetpu-wakelock.h"
#include "edgetpu.h"
/*
@@ -541,9 +542,9 @@ void edgetpu_mailbox_free_queue(struct edgetpu_dev *etdev,
/*
* Creates a mailbox manager, one edgetpu device has one manager.
*/
-struct edgetpu_mailbox_manager *edgetpu_mailbox_create_mgr(
- struct edgetpu_dev *etdev,
- const struct edgetpu_mailbox_manager_desc *desc)
+struct edgetpu_mailbox_manager *
+edgetpu_mailbox_create_mgr(struct edgetpu_dev *etdev,
+ const struct edgetpu_mailbox_manager_desc *desc)
{
struct edgetpu_mailbox_manager *mgr;
uint total = 0;
@@ -574,6 +575,7 @@ struct edgetpu_mailbox_manager *edgetpu_mailbox_create_mgr(
if (!mgr->mailboxes)
return ERR_PTR(-ENOMEM);
rwlock_init(&mgr->mailboxes_lock);
+ mutex_init(&mgr->open_devices.lock);
return mgr;
}
@@ -709,11 +711,157 @@ void edgetpu_mailbox_restore_active_vii_queues(struct edgetpu_dev *etdev)
{
struct edgetpu_list_group *l;
struct edgetpu_device_group *group;
+ struct edgetpu_device_group **groups;
+ size_t i, n = 0;
mutex_lock(&etdev->groups_lock);
+ groups = kmalloc_array(etdev->n_groups, sizeof(*groups), GFP_KERNEL);
+ if (unlikely(!groups)) {
+ /*
+ * Either the runtime is misbehaving (creates tons of groups),
+ * or the system is indeed OOM - we give up this restore
+ * process, which makes the runtime unable to communicate with
+ * the device through VII.
+ */
+ mutex_unlock(&etdev->groups_lock);
+ return;
+ }
+ /*
+ * Fetch the groups into an array to restore the VII without holding
+ * etdev->groups_lock. To prevent the potential deadlock that
+ * edgetpu_device_group_add() holds group->lock then etdev->groups_lock.
+ */
etdev_for_each_group(etdev, l, group) {
- if (!edgetpu_group_mailbox_detached_locked(group))
- edgetpu_mailbox_reinit_vii(group);
+ /*
+ * Quick skip without holding group->lock.
+ * Disbanded groups can never go back to the normal state.
+ */
+ if (edgetpu_device_group_is_disbanded(group))
+ continue;
+ /*
+ * Increase the group reference to prevent the group being
+ * released after we release groups_lock.
+ */
+ groups[n++] = edgetpu_device_group_get(group);
}
mutex_unlock(&etdev->groups_lock);
+
+ /*
+ * We are not holding @etdev->groups_lock, what may race is:
+ * 1. The group is disbanding and being removed from @etdev.
+ * 2. A new group is adding to @etdev
+ *
+ * For (1.) the group will be marked as DISBANDED, so we check whether
+ * the group is finalized before performing VII re-init.
+ *
+ * For (2.), adding group to @etdev (edgetpu_device_group_add()) has
+ * nothing to do with VII, its VII will be set when the group is
+ * finalized.
+ */
+ for (i = 0; i < n; i++) {
+ group = groups[i];
+ mutex_lock(&group->lock);
+ /*
+ * If the group is just finalized or has mailbox attached in
+ * another process, this re-init is redundant but isn't harmful.
+ */
+ if (edgetpu_group_finalized_and_attached(group))
+ edgetpu_mailbox_reinit_vii(group);
+ mutex_unlock(&group->lock);
+ edgetpu_device_group_put(group);
+ }
+ kfree(groups);
+}
+
+int edgetpu_mailbox_enable_ext(struct edgetpu_client *client, u32 mailbox_ids)
+{
+ int ret;
+
+ if (!edgetpu_wakelock_lock(client->wakelock)) {
+ etdev_err(client->etdev,
+ "Enabling mailboxes %08x needs wakelock acquired\n",
+ mailbox_ids);
+ edgetpu_wakelock_unlock(client->wakelock);
+ return -EAGAIN;
+ }
+
+ edgetpu_wakelock_inc_event_locked(client->wakelock,
+ EDGETPU_WAKELOCK_EVENT_EXT_MAILBOX);
+
+ etdev_dbg(client->etdev, "Enabling mailboxes: %08X\n", mailbox_ids);
+
+ ret = edgetpu_mailbox_activate(client->etdev, mailbox_ids);
+ if (ret)
+ etdev_err(client->etdev, "Activate mailboxes %08x failed: %d",
+ mailbox_ids, ret);
+ edgetpu_wakelock_unlock(client->wakelock);
+ return ret;
+}
+
+int edgetpu_mailbox_disable_ext(struct edgetpu_client *client, u32 mailbox_ids)
+{
+ int ret;
+
+ if (!edgetpu_wakelock_lock(client->wakelock)) {
+ etdev_err(client->etdev,
+ "Disabling mailboxes %08x needs wakelock acquired\n",
+ mailbox_ids);
+ edgetpu_wakelock_unlock(client->wakelock);
+ return -EAGAIN;
+ }
+
+ edgetpu_wakelock_dec_event_locked(client->wakelock,
+ EDGETPU_WAKELOCK_EVENT_EXT_MAILBOX);
+
+ etdev_dbg(client->etdev, "Disabling mailbox: %08X\n", mailbox_ids);
+ ret = edgetpu_mailbox_deactivate(client->etdev, mailbox_ids);
+
+ if (ret)
+ etdev_err(client->etdev, "Deactivate mailboxes %08x failed: %d",
+ mailbox_ids, ret);
+ edgetpu_wakelock_unlock(client->wakelock);
+ return ret;
+}
+
+int edgetpu_mailbox_activate(struct edgetpu_dev *etdev, u32 mailbox_ids)
+{
+ struct edgetpu_handshake *eh = &etdev->mailbox_manager->open_devices;
+ u32 to_send;
+ int ret = 0;
+
+ mutex_lock(&eh->lock);
+ to_send = mailbox_ids & ~eh->fw_state;
+ if (to_send)
+ ret = edgetpu_kci_open_device(etdev->kci, to_send);
+ if (!ret) {
+ eh->state |= mailbox_ids;
+ eh->fw_state |= mailbox_ids;
+ }
+ mutex_unlock(&eh->lock);
+ return ret;
+}
+
+int edgetpu_mailbox_deactivate(struct edgetpu_dev *etdev, u32 mailbox_ids)
+{
+ struct edgetpu_handshake *eh = &etdev->mailbox_manager->open_devices;
+ u32 to_send;
+ int ret = 0;
+
+ mutex_lock(&eh->lock);
+ to_send = mailbox_ids & eh->fw_state;
+ if (to_send)
+ ret = edgetpu_kci_close_device(etdev->kci, to_send);
+ if (!ret) {
+ eh->state &= ~mailbox_ids;
+ eh->fw_state &= ~mailbox_ids;
+ }
+ mutex_unlock(&eh->lock);
+ return ret;
+}
+
+void edgetpu_handshake_clear_fw_state(struct edgetpu_handshake *eh)
+{
+ mutex_lock(&eh->lock);
+ eh->fw_state = 0;
+ mutex_unlock(&eh->lock);
}
diff --git a/drivers/edgetpu/edgetpu-mailbox.h b/drivers/edgetpu/edgetpu-mailbox.h
index ddae12a..c212a8a 100644
--- a/drivers/edgetpu/edgetpu-mailbox.h
+++ b/drivers/edgetpu/edgetpu-mailbox.h
@@ -17,6 +17,8 @@
#define CIRCULAR_QUEUE_WRAP_BIT (1 << 10)
#define CIRCULAR_QUEUE_INDEX_MASK (CIRCULAR_QUEUE_WRAP_BIT - 1)
+#define CIRCULAR_QUEUE_VALID_MASK \
+ (CIRCULAR_QUEUE_INDEX_MASK | CIRCULAR_QUEUE_WRAP_BIT)
#define CIRCULAR_QUEUE_WRAPPED(idx) ((idx) & CIRCULAR_QUEUE_WRAP_BIT)
#define CIRCULAR_QUEUE_REAL_INDEX(idx) ((idx) & CIRCULAR_QUEUE_INDEX_MASK)
@@ -76,6 +78,25 @@ struct edgetpu_vii {
edgetpu_queue_mem resp_queue_mem;
};
+/*
+ * Structure for recording the driver state vs FW state.
+ *
+ * Example usage:
+ * @state is a bit mask that denotes each mailbox to which an "OPEN_DEVICE"
+ * KCI has been sent.
+ * @fw_state is the bit mask of mailbox IDs for which the FW has received the
+ * "OPEN_DEVICE" KCI.
+ * In usual cases @state always equals @fw_state. But when the FW is reloaded,
+ * @fw_state is reset to zero, then this structure can be used to know the FW
+ * state is out-of-sync and need further actions.
+ */
+struct edgetpu_handshake {
+ struct mutex lock;
+ /* fields protected by @lock */
+ u32 state;
+ u32 fw_state;
+};
+
typedef u32 (*get_csr_base_t)(uint index);
struct edgetpu_mailbox_manager {
@@ -92,6 +113,7 @@ struct edgetpu_mailbox_manager {
get_csr_base_t get_context_csr_base;
get_csr_base_t get_cmd_queue_csr_base;
get_csr_base_t get_resp_queue_csr_base;
+ struct edgetpu_handshake open_devices;
};
/* the structure to configure a mailbox manager */
@@ -163,9 +185,9 @@ enum mailbox_queue_type {
* Allocations are device-managed so no release function is needed to free the
* manager.
*/
-struct edgetpu_mailbox_manager *edgetpu_mailbox_create_mgr(
- struct edgetpu_dev *etdev,
- const struct edgetpu_mailbox_manager_desc *desc);
+struct edgetpu_mailbox_manager *
+edgetpu_mailbox_create_mgr(struct edgetpu_dev *etdev,
+ const struct edgetpu_mailbox_manager_desc *desc);
/* interrupt handler */
irqreturn_t edgetpu_mailbox_handle_irq(struct edgetpu_mailbox_manager *mgr);
@@ -274,6 +296,29 @@ void edgetpu_mailbox_restore_active_vii_queues(struct edgetpu_dev *etdev);
int edgetpu_mailbox_p2p_batch(struct edgetpu_mailbox_manager *mgr, uint n,
uint skip_i, struct edgetpu_mailbox **mailboxes);
+/* Notify firmware of external mailboxes becoming active */
+int edgetpu_mailbox_enable_ext(struct edgetpu_client *client, u32 mailbox_ids);
+
+/* Notify firmware of external mailboxes becoming inactive */
+int edgetpu_mailbox_disable_ext(struct edgetpu_client *client, u32 mailbox_ids);
+
+/*
+ * Activates @mailbox_ids, OPEN_DEVICE KCI will be sent.
+ *
+ * If @mailbox_ids are known to be activated, KCI is not sent and this function
+ * returns 0.
+ *
+ * Returns what edgetpu_kci_open_device() returned.
+ * Caller ensures device is powered on.
+ */
+int edgetpu_mailbox_activate(struct edgetpu_dev *etdev, u32 mailbox_ids);
+/*
+ * Similar to edgetpu_mailbox_activate() but sends CLOSE_DEVICE KCI instead.
+ */
+int edgetpu_mailbox_deactivate(struct edgetpu_dev *etdev, u32 mailbox_ids);
+/* Sets @eh->fw_state to 0. */
+void edgetpu_handshake_clear_fw_state(struct edgetpu_handshake *eh);
+
/* Utilities of circular queue operations */
/*
@@ -282,11 +327,18 @@ int edgetpu_mailbox_p2p_batch(struct edgetpu_mailbox_manager *mgr, uint n,
*/
static inline u32 circular_queue_count(u32 head, u32 tail, u32 queue_size)
{
+ u32 ret;
+
if (CIRCULAR_QUEUE_WRAPPED(tail) != CIRCULAR_QUEUE_WRAPPED(head))
- return queue_size - CIRCULAR_QUEUE_REAL_INDEX(head) +
- CIRCULAR_QUEUE_REAL_INDEX(tail);
+ ret = queue_size - CIRCULAR_QUEUE_REAL_INDEX(head) +
+ CIRCULAR_QUEUE_REAL_INDEX(tail);
else
- return tail - head;
+ ret = tail - head;
+
+ if (unlikely(ret > queue_size))
+ return 0;
+
+ return ret;
}
/* Increases @index of a circular queue by @inc. */
diff --git a/drivers/edgetpu/edgetpu-mapping.h b/drivers/edgetpu/edgetpu-mapping.h
index 6681427..e3a0dc9 100644
--- a/drivers/edgetpu/edgetpu-mapping.h
+++ b/drivers/edgetpu/edgetpu-mapping.h
@@ -7,6 +7,7 @@
#ifndef __EDGETPU_MAPPING_H__
#define __EDGETPU_MAPPING_H__
+#include <linux/device.h>
#include <linux/dma-direction.h>
#include <linux/iommu.h>
#include <linux/mutex.h>
@@ -14,6 +15,10 @@
#include <linux/scatterlist.h>
#include <linux/seq_file.h>
#include <linux/types.h>
+#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
+#include <linux/dma-map-ops.h>
+#endif
#include "edgetpu-internal.h"
@@ -139,13 +144,14 @@ void edgetpu_mapping_clear(struct edgetpu_mapping_root *mappings);
void edgetpu_mappings_show(struct edgetpu_mapping_root *mappings,
struct seq_file *s);
-static inline int __dma_dir_to_iommu_prot(enum dma_data_direction dir)
+static inline int __dma_dir_to_iommu_prot(enum dma_data_direction dir, struct device *dev)
{
- int prot = 0;
-
-#ifdef EDGETPU_IS_IO_COHERENT
- prot = IOMMU_CACHE;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
+ int prot = dev_is_dma_coherent(dev) ? IOMMU_CACHE : 0;
+#else
+ int prot = 0; /* hardcode to non-dma-coherent for prior kernels */
#endif
+
switch (dir) {
case DMA_BIDIRECTIONAL:
return prot | IOMMU_READ | IOMMU_WRITE;
diff --git a/drivers/edgetpu/edgetpu-mmu.h b/drivers/edgetpu/edgetpu-mmu.h
index 8ef2f2c..8c5ae3c 100644
--- a/drivers/edgetpu/edgetpu-mmu.h
+++ b/drivers/edgetpu/edgetpu-mmu.h
@@ -118,6 +118,7 @@ static inline unsigned long map_to_dma_attr(edgetpu_map_flag_t flags, bool map)
int edgetpu_mmu_attach(struct edgetpu_dev *dev, void *mmu_info);
void edgetpu_mmu_detach(struct edgetpu_dev *dev);
+
/**
* Re-attach to previously attached MMU.
*
diff --git a/drivers/edgetpu/edgetpu-pm.c b/drivers/edgetpu/edgetpu-pm.c
index 8ee47fe..1e28141 100644
--- a/drivers/edgetpu/edgetpu-pm.c
+++ b/drivers/edgetpu/edgetpu-pm.c
@@ -29,15 +29,18 @@ struct edgetpu_pm_private {
int power_up_count;
};
-int edgetpu_pm_get(struct edgetpu_pm *etpm)
+/*
+ * Increases the counter and call the power_up callback.
+ *
+ * Returns zero on success.
+ *
+ * Caller holds etpm->p->lock.
+ */
+static int edgetpu_pm_get_locked(struct edgetpu_pm *etpm)
{
+ int power_up_count = etpm->p->power_up_count++;
int ret = 0;
- int power_up_count;
- if (!etpm || !etpm->p->handlers || !etpm->p->handlers->power_up)
- return 0;
- mutex_lock(&etpm->p->lock);
- power_up_count = etpm->p->power_up_count++;
if (!power_up_count) {
ret = etpm->p->handlers->power_up(etpm);
if (!ret)
@@ -46,6 +49,49 @@ int edgetpu_pm_get(struct edgetpu_pm *etpm)
if (ret)
etpm->p->power_up_count--;
etdev_dbg(etpm->etdev, "%s: %d\n", __func__, etpm->p->power_up_count);
+ return ret;
+}
+
+int edgetpu_pm_trylock(struct edgetpu_pm *etpm)
+{
+ if (!etpm || !etpm->p->handlers || !etpm->p->handlers->power_up)
+ return 1;
+ return mutex_trylock(&etpm->p->lock);
+}
+
+void edgetpu_pm_unlock(struct edgetpu_pm *etpm)
+{
+ if (!etpm || !etpm->p->handlers || !etpm->p->handlers->power_up)
+ return;
+ mutex_unlock(&etpm->p->lock);
+}
+
+bool edgetpu_pm_get_if_powered(struct edgetpu_pm *etpm)
+{
+ bool ret;
+
+ if (!etpm || !etpm->p->handlers || !etpm->p->handlers->power_up)
+ return true;
+ /* fast fail without holding the lock */
+ if (!etpm->p->power_up_count)
+ return false;
+ mutex_lock(&etpm->p->lock);
+ if (etpm->p->power_up_count)
+ ret = !edgetpu_pm_get_locked(etpm);
+ else
+ ret = false;
+ mutex_unlock(&etpm->p->lock);
+ return ret;
+}
+
+int edgetpu_pm_get(struct edgetpu_pm *etpm)
+{
+ int ret;
+
+ if (!etpm || !etpm->p->handlers || !etpm->p->handlers->power_up)
+ return 0;
+ mutex_lock(&etpm->p->lock);
+ ret = edgetpu_pm_get_locked(etpm);
mutex_unlock(&etpm->p->lock);
return ret;
}
diff --git a/drivers/edgetpu/edgetpu-pm.h b/drivers/edgetpu/edgetpu-pm.h
index 3ec7f66..aef35f6 100644
--- a/drivers/edgetpu/edgetpu-pm.h
+++ b/drivers/edgetpu/edgetpu-pm.h
@@ -48,6 +48,24 @@ struct edgetpu_pm {
*/
/*
+ * Tries to acquire the internal lock that ensures power_up_counter won't be
+ * modified.
+ *
+ * Returns 1 if the lock has been acquired successfully, 0 otherwise.
+ */
+int edgetpu_pm_trylock(struct edgetpu_pm *etpm);
+void edgetpu_pm_unlock(struct edgetpu_pm *etpm);
+
+/*
+ * Increase power_up_count if it's already powered on.
+ *
+ * Caller calls edgetpu_pm_put() to decrease power_up_count if this function
+ * returned true, otherwise put() shouldn't be called.
+ *
+ * Return false if device is not powered, true otherwise.
+ */
+bool edgetpu_pm_get_if_powered(struct edgetpu_pm *etpm);
+/*
* Increase power_up_count for active state, power up the device if previous
* power_up_count was zero.
* Returns 0 on success or negative error value
diff --git a/drivers/edgetpu/edgetpu-telemetry.c b/drivers/edgetpu/edgetpu-telemetry.c
index cf9435c..abc9095 100644
--- a/drivers/edgetpu/edgetpu-telemetry.c
+++ b/drivers/edgetpu/edgetpu-telemetry.c
@@ -144,9 +144,7 @@ static void edgetpu_fw_log(struct edgetpu_telemetry *log)
start);
if (entry.length == 0 || entry.length > max_length) {
header->head = header->tail;
-#if 0 /* TODO(b/170340226): add me back */
etdev_err_ratelimited(etdev, "log queue is corrupted");
-#endif
break;
}
copy_with_wrap(header, buffer, entry.length, queue_size, start);
diff --git a/drivers/edgetpu/edgetpu-thermal.h b/drivers/edgetpu/edgetpu-thermal.h
index 7201597..63fc91c 100644
--- a/drivers/edgetpu/edgetpu-thermal.h
+++ b/drivers/edgetpu/edgetpu-thermal.h
@@ -12,6 +12,8 @@
#include <linux/mutex.h>
#include <linux/thermal.h>
+#include "edgetpu-internal.h"
+
#define EDGETPU_COOLING_NAME "tpu_cooling"
struct edgetpu_thermal {
@@ -22,6 +24,7 @@ struct edgetpu_thermal {
void *op_data;
unsigned long cooling_state;
unsigned int tpu_num_states;
+ struct edgetpu_dev *etdev;
};
struct edgetpu_state_pwr {
@@ -34,6 +37,7 @@ struct edgetpu_state_pwr {
*
* Returns -errno on error.
*/
-struct edgetpu_thermal *devm_tpu_thermal_create(struct device *dev);
+struct edgetpu_thermal *devm_tpu_thermal_create(struct device *dev,
+ struct edgetpu_dev *etdev);
#endif /* __EDGETPU_THERMAL_H__ */
diff --git a/drivers/edgetpu/edgetpu-usage-stats.c b/drivers/edgetpu/edgetpu-usage-stats.c
index 1404674..c13b42e 100644
--- a/drivers/edgetpu/edgetpu-usage-stats.c
+++ b/drivers/edgetpu/edgetpu-usage-stats.c
@@ -17,6 +17,7 @@
#include "abrolhos-pm.h"
static enum tpu_pwr_state tpu_states_arr[] = {
+ TPU_ACTIVE_UUD,
TPU_ACTIVE_SUD,
TPU_ACTIVE_UD,
TPU_ACTIVE_NOM,
@@ -151,6 +152,22 @@ static void edgetpu_counter_update(
mutex_unlock(&ustats->usage_stats_lock);
}
+static void edgetpu_counter_clear(
+ struct edgetpu_dev *etdev,
+ enum edgetpu_usage_counter_type counter_type)
+{
+ struct edgetpu_usage_stats *ustats = etdev->usage_stats;
+
+ if (!ustats)
+ return;
+ if (counter_type >= EDGETPU_COUNTER_COUNT)
+ return;
+
+ mutex_lock(&ustats->usage_stats_lock);
+ ustats->counter[counter_type] = 0;
+ mutex_unlock(&ustats->usage_stats_lock);
+}
+
static void edgetpu_max_watermark_update(
struct edgetpu_dev *etdev,
struct edgetpu_usage_max_watermark *max_watermark)
@@ -364,7 +381,7 @@ static ssize_t tpu_usage_clear(struct device *dev,
return count;
}
-static DEVICE_ATTR(tpu_usage, 0644, tpu_usage_show, tpu_usage_clear);
+static DEVICE_ATTR(tpu_usage, 0664, tpu_usage_show, tpu_usage_clear);
static ssize_t device_utilization_show(struct device *dev,
struct device_attribute *attr,
@@ -403,7 +420,19 @@ static ssize_t tpu_active_cycle_count_show(struct device *dev,
EDGETPU_COUNTER_TPU_ACTIVE_CYCLES);
return scnprintf(buf, PAGE_SIZE, "%llu\n", val);
}
-static DEVICE_ATTR_RO(tpu_active_cycle_count);
+
+static ssize_t tpu_active_cycle_count_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct edgetpu_dev *etdev = dev_get_drvdata(dev);
+
+ edgetpu_counter_clear(etdev, EDGETPU_COUNTER_TPU_ACTIVE_CYCLES);
+ return count;
+}
+static DEVICE_ATTR(tpu_active_cycle_count, 0664, tpu_active_cycle_count_show,
+ tpu_active_cycle_count_store);
static ssize_t tpu_throttle_stall_count_show(struct device *dev,
struct device_attribute *attr,
@@ -416,7 +445,20 @@ static ssize_t tpu_throttle_stall_count_show(struct device *dev,
EDGETPU_COUNTER_TPU_THROTTLE_STALLS);
return scnprintf(buf, PAGE_SIZE, "%llu\n", val);
}
-static DEVICE_ATTR_RO(tpu_throttle_stall_count);
+
+static ssize_t tpu_throttle_stall_count_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct edgetpu_dev *etdev = dev_get_drvdata(dev);
+
+ edgetpu_counter_clear(etdev, EDGETPU_COUNTER_TPU_THROTTLE_STALLS);
+ return count;
+}
+static DEVICE_ATTR(tpu_throttle_stall_count, 0664,
+ tpu_throttle_stall_count_show,
+ tpu_throttle_stall_count_store);
static ssize_t inference_count_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -428,7 +470,19 @@ static ssize_t inference_count_show(struct device *dev,
EDGETPU_COUNTER_INFERENCES);
return scnprintf(buf, PAGE_SIZE, "%llu\n", val);
}
-static DEVICE_ATTR_RO(inference_count);
+
+static ssize_t inference_count_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct edgetpu_dev *etdev = dev_get_drvdata(dev);
+
+ edgetpu_counter_clear(etdev, EDGETPU_COUNTER_INFERENCES);
+ return count;
+}
+static DEVICE_ATTR(inference_count, 0664, inference_count_show,
+ inference_count_store);
static ssize_t tpu_op_count_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -440,7 +494,18 @@ static ssize_t tpu_op_count_show(struct device *dev,
EDGETPU_COUNTER_TPU_OPS);
return scnprintf(buf, PAGE_SIZE, "%llu\n", val);
}
-static DEVICE_ATTR_RO(tpu_op_count);
+
+static ssize_t tpu_op_count_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct edgetpu_dev *etdev = dev_get_drvdata(dev);
+
+ edgetpu_counter_clear(etdev, EDGETPU_COUNTER_TPU_OPS);
+ return count;
+}
+static DEVICE_ATTR(tpu_op_count, 0664, tpu_op_count_show, tpu_op_count_store);
static ssize_t param_cache_hit_count_show(struct device *dev,
struct device_attribute *attr,
@@ -453,7 +518,19 @@ static ssize_t param_cache_hit_count_show(struct device *dev,
EDGETPU_COUNTER_PARAM_CACHE_HITS);
return scnprintf(buf, PAGE_SIZE, "%llu\n", val);
}
-static DEVICE_ATTR_RO(param_cache_hit_count);
+
+static ssize_t param_cache_hit_count_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct edgetpu_dev *etdev = dev_get_drvdata(dev);
+
+ edgetpu_counter_clear(etdev, EDGETPU_COUNTER_PARAM_CACHE_HITS);
+ return count;
+}
+static DEVICE_ATTR(param_cache_hit_count, 0664, param_cache_hit_count_show,
+ param_cache_hit_count_store);
static ssize_t param_cache_miss_count_show(struct device *dev,
struct device_attribute *attr,
@@ -466,7 +543,19 @@ static ssize_t param_cache_miss_count_show(struct device *dev,
EDGETPU_COUNTER_PARAM_CACHE_MISSES);
return scnprintf(buf, PAGE_SIZE, "%llu\n", val);
}
-static DEVICE_ATTR_RO(param_cache_miss_count);
+
+static ssize_t param_cache_miss_count_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct edgetpu_dev *etdev = dev_get_drvdata(dev);
+
+ edgetpu_counter_clear(etdev, EDGETPU_COUNTER_PARAM_CACHE_MISSES);
+ return count;
+}
+static DEVICE_ATTR(param_cache_miss_count, 0664, param_cache_miss_count_show,
+ param_cache_miss_count_store);
static ssize_t context_preempt_count_show(struct device *dev,
struct device_attribute *attr,
@@ -479,7 +568,19 @@ static ssize_t context_preempt_count_show(struct device *dev,
EDGETPU_COUNTER_CONTEXT_PREEMPTS);
return scnprintf(buf, PAGE_SIZE, "%llu\n", val);
}
-static DEVICE_ATTR_RO(context_preempt_count);
+
+static ssize_t context_preempt_count_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct edgetpu_dev *etdev = dev_get_drvdata(dev);
+
+ edgetpu_counter_clear(etdev, EDGETPU_COUNTER_CONTEXT_PREEMPTS);
+ return count;
+}
+static DEVICE_ATTR(context_preempt_count, 0664, context_preempt_count_show,
+ context_preempt_count_store);
static ssize_t outstanding_commands_max_show(
struct device *dev, struct device_attribute *attr, char *buf)
@@ -507,7 +608,9 @@ static ssize_t outstanding_commands_max_store(
return count;
}
-static DEVICE_ATTR_RW(outstanding_commands_max);
+static DEVICE_ATTR(outstanding_commands_max, 0664,
+ outstanding_commands_max_show,
+ outstanding_commands_max_store);
static ssize_t preempt_depth_max_show(
struct device *dev, struct device_attribute *attr, char *buf)
@@ -535,7 +638,8 @@ static ssize_t preempt_depth_max_store(
return count;
}
-static DEVICE_ATTR_RW(preempt_depth_max);
+static DEVICE_ATTR(preempt_depth_max, 0664, preempt_depth_max_show,
+ preempt_depth_max_store);
static ssize_t fw_thread_stats_show(
struct device *dev, struct device_attribute *attr, char *buf)
@@ -559,7 +663,23 @@ static ssize_t fw_thread_stats_show(
mutex_unlock(&ustats->usage_stats_lock);
return ret;
}
-static DEVICE_ATTR_RO(fw_thread_stats);
+
+static ssize_t fw_thread_stats_store(
+ struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct edgetpu_dev *etdev = dev_get_drvdata(dev);
+ struct edgetpu_usage_stats *ustats = etdev->usage_stats;
+ int i;
+
+ mutex_lock(&ustats->usage_stats_lock);
+ for (i = 0; i < EDGETPU_FW_THREAD_COUNT; i++)
+ ustats->thread_stack_max[i] = 0;
+ mutex_unlock(&ustats->usage_stats_lock);
+ return count;
+}
+static DEVICE_ATTR(fw_thread_stats, 0664, fw_thread_stats_show,
+ fw_thread_stats_store);
static struct attribute *usage_stats_dev_attrs[] = {
&dev_attr_tpu_usage.attr,
diff --git a/drivers/edgetpu/edgetpu-wakelock.c b/drivers/edgetpu/edgetpu-wakelock.c
index f308d3a..e217ace 100644
--- a/drivers/edgetpu/edgetpu-wakelock.c
+++ b/drivers/edgetpu/edgetpu-wakelock.c
@@ -68,14 +68,13 @@ void edgetpu_wakelock_free(struct edgetpu_wakelock *wakelock)
kfree(wakelock);
}
-bool edgetpu_wakelock_inc_event(struct edgetpu_wakelock *wakelock,
- enum edgetpu_wakelock_event evt)
+bool edgetpu_wakelock_inc_event_locked(struct edgetpu_wakelock *wakelock,
+ enum edgetpu_wakelock_event evt)
{
bool ret = true;
if (NO_WAKELOCK(wakelock))
return true;
- mutex_lock(&wakelock->lock);
if (!wakelock->req_count) {
ret = false;
etdev_warn(
@@ -93,18 +92,29 @@ bool edgetpu_wakelock_inc_event(struct edgetpu_wakelock *wakelock,
evt);
}
}
- mutex_unlock(&wakelock->lock);
return ret;
}
-bool edgetpu_wakelock_dec_event(struct edgetpu_wakelock *wakelock,
+bool edgetpu_wakelock_inc_event(struct edgetpu_wakelock *wakelock,
enum edgetpu_wakelock_event evt)
{
- bool ret = true;
+ bool ret;
if (NO_WAKELOCK(wakelock))
return true;
mutex_lock(&wakelock->lock);
+ ret = edgetpu_wakelock_inc_event_locked(wakelock, evt);
+ mutex_unlock(&wakelock->lock);
+ return ret;
+}
+
+bool edgetpu_wakelock_dec_event_locked(struct edgetpu_wakelock *wakelock,
+ enum edgetpu_wakelock_event evt)
+{
+ bool ret = true;
+
+ if (NO_WAKELOCK(wakelock))
+ return true;
if (!wakelock->event_count[evt]) {
ret = false;
etdev_warn(wakelock->etdev, "event %d unbalanced decreasing",
@@ -112,6 +122,18 @@ bool edgetpu_wakelock_dec_event(struct edgetpu_wakelock *wakelock,
} else {
--wakelock->event_count[evt];
}
+ return ret;
+}
+
+bool edgetpu_wakelock_dec_event(struct edgetpu_wakelock *wakelock,
+ enum edgetpu_wakelock_event evt)
+{
+ bool ret;
+
+ if (NO_WAKELOCK(wakelock))
+ return true;
+ mutex_lock(&wakelock->lock);
+ ret = edgetpu_wakelock_dec_event_locked(wakelock, evt);
mutex_unlock(&wakelock->lock);
return ret;
}
diff --git a/drivers/edgetpu/edgetpu-wakelock.h b/drivers/edgetpu/edgetpu-wakelock.h
index 69e0fa7..a353284 100644
--- a/drivers/edgetpu/edgetpu-wakelock.h
+++ b/drivers/edgetpu/edgetpu-wakelock.h
@@ -33,7 +33,8 @@
X(EDGETPU_WAKELOCK_EVENT_MBOX_CSR, 1), \
X(EDGETPU_WAKELOCK_EVENT_CMD_QUEUE, 2), \
X(EDGETPU_WAKELOCK_EVENT_RESP_QUEUE, 3), \
- X(EDGETPU_WAKELOCK_EVENT_END, 4)
+ X(EDGETPU_WAKELOCK_EVENT_EXT_MAILBOX, 4), \
+ X(EDGETPU_WAKELOCK_EVENT_END, 5)
enum edgetpu_wakelock_event {
#define X(name, val) name = val
@@ -82,6 +83,13 @@ void edgetpu_wakelock_free(struct edgetpu_wakelock *wakelock);
*/
bool edgetpu_wakelock_inc_event(struct edgetpu_wakelock *wakelock,
enum edgetpu_wakelock_event evt);
+
+/*
+ * A version of the above where the caller holds the wakelock internal lock
+ * by calling edgetpu_wakelock_lock.
+ */
+bool edgetpu_wakelock_inc_event_locked(struct edgetpu_wakelock *wakelock,
+ enum edgetpu_wakelock_event evt);
/*
* Decreases the event counter of @evt by one.
*
@@ -96,6 +104,13 @@ bool edgetpu_wakelock_dec_event(struct edgetpu_wakelock *wakelock,
enum edgetpu_wakelock_event evt);
/*
+ * A version of the above where the caller holds the wakelock internal lock
+ * by calling edgetpu_wakelock_lock.
+ */
+bool edgetpu_wakelock_dec_event_locked(struct edgetpu_wakelock *wakelock,
+ enum edgetpu_wakelock_event evt);
+
+/*
* Holds the internal lock of @wakelock. Fields in @wakelock are protected when
* this lock is holding.
*
diff --git a/drivers/edgetpu/edgetpu.h b/drivers/edgetpu/edgetpu.h
index 6455d5f..93d7afa 100644
--- a/drivers/edgetpu/edgetpu.h
+++ b/drivers/edgetpu/edgetpu.h
@@ -41,12 +41,18 @@ typedef __u32 edgetpu_map_flag_t;
#define EDGETPU_MAP_ATTR_PBHA_SHIFT 5
#define EDGETPU_MAP_ATTR_PBHA_MASK 0xf
+/* External mailbox types */
+#define EDGETPU_EXT_MAILBOX_TYPE_TZ 1
+#define EDGETPU_EXT_MAILBOX_TYPE_GSA 2
+#define EDGETPU_EXT_MAILBOX_TYPE_DSP 3
+
struct edgetpu_map_ioctl {
- __u64 host_address;
+ __u64 host_address; /* user-space address to be mapped */
__u64 size; /* size of mapping in bytes */
__u64 device_address; /* returned TPU VA */
/*
- * Flags indicating mapping attribute requests from the runtime.
+ * Flags or'ed with EDGETPU_MAP_*, indicating mapping attribute requests from
+ * the runtime.
* Set RESERVED bits to 0 to ensure backwards compatibility.
*
* Bitfields:
@@ -78,17 +84,31 @@ struct edgetpu_map_ioctl {
/*
* Index of die in a device group. The index is decided by the order of
* joining the group, with value from zero to (# dies in group) - 1.
- * Index 0 for the master die in the group.
+ * Index 0 for the leader die in the group.
*
* This field is ignored unless EDGETPU_MAP_NONMIRRORED is passed to
- * flags.
+ * @flags.
*/
__u32 die_index;
};
#define EDGETPU_IOCTL_BASE 0xED
-/* Map host buffer to TPU. */
+/*
+ * Map a host buffer to TPU.
+ *
+ * This operation can be performed without acquiring the wakelock. This
+ * characteristic holds for all mapping / un-mapping ioctls.
+ *
+ * On success, @device_address is set, and TPU can access the content of
+ * @host_address by @device_address afterwards.
+ *
+ * EINVAL: If the group is not finalized.
+ * EINVAL: If size equals 0.
+ * EINVAL: (for EDGETPU_MAP_NONMIRRORED case) If @die_index exceeds the number
+ * of clients in the group.
+ * EINVAL: If the target device group is disbanded.
+ */
#define EDGETPU_MAP_BUFFER \
_IOWR(EDGETPU_IOCTL_BASE, 0, struct edgetpu_map_ioctl)
@@ -102,6 +122,8 @@ struct edgetpu_map_ioctl {
*
* Note: Only the SKIP_CPU_SYNC flag is considered, other bits in @flags are
* fetched from the kernel's record.
+ *
+ * EINVAL: If the requested @device_address is not found.
*/
#define EDGETPU_UNMAP_BUFFER \
_IOW(EDGETPU_IOCTL_BASE, 4, struct edgetpu_map_ioctl)
@@ -118,7 +140,12 @@ struct edgetpu_event_register {
__u32 eventfd;
};
-/* Set eventfd for notification of events from kernel to the device group. */
+/*
+ * Set eventfd for notification of events from kernel to the device group.
+ *
+ * EINVAL: If @event_id is not one of EDGETPU_EVENT_*.
+ * EBADF, EINVAL: If @eventfd is not a valid event file descriptor.
+ */
#define EDGETPU_SET_EVENTFD \
_IOW(EDGETPU_IOCTL_BASE, 5, struct edgetpu_event_register)
@@ -145,19 +172,29 @@ struct edgetpu_mailbox_attr {
* Create a new device group with the caller as the master.
*
* EINVAL: If the caller already belongs to a group.
- * EINVAL: If @cmd/resp_queue_size equals 0.
- * EINVAL: If @sizeof_cmd/resp equals 0.
+ * EINVAL: If @cmd_queue_size or @resp_queue_size equals 0.
+ * EINVAL: If @sizeof_cmd or @sizeof_resp equals 0.
* EINVAL: If @cmd_queue_size * 1024 / @sizeof_cmd >= 1024, this is a hardware
* limitation. Same rule for the response sizes pair.
*/
#define EDGETPU_CREATE_GROUP \
_IOW(EDGETPU_IOCTL_BASE, 6, struct edgetpu_mailbox_attr)
-/* Join the calling fd to the device group of the supplied fd. */
+/*
+ * Join the calling fd to the device group of the supplied fd.
+ *
+ * EINVAL: If the caller already belongs to a group.
+ * EINVAL: If the supplied FD is not for an open EdgeTPU device file.
+ */
#define EDGETPU_JOIN_GROUP \
_IOW(EDGETPU_IOCTL_BASE, 7, __u32)
-/* Finalize the device group with the caller as the master. */
+/*
+ * Finalize the device group with the caller as the leader.
+ *
+ * EINVAL: If the dies in this group are not allowed to form a device group.
+ * ETIMEDOUT: If the handshake with TPU firmware times out.
+ */
#define EDGETPU_FINALIZE_GROUP \
_IO(EDGETPU_IOCTL_BASE, 8)
@@ -168,7 +205,12 @@ struct edgetpu_mailbox_attr {
#define EDGETPU_PERDIE_EVENT_LOGS_AVAILABLE 0x1000
#define EDGETPU_PERDIE_EVENT_TRACES_AVAILABLE 0x1001
-/* Set eventfd for notification of per-die events from kernel. */
+/*
+ * Set eventfd for notification of per-die events from kernel.
+ *
+ * EINVAL: If @event_id is not one of EDGETPU_PERDIE_EVENT_*.
+ * EBADF, EINVAL: If @eventfd is not a valid eventfd.
+ */
#define EDGETPU_SET_PERDIE_EVENTFD \
_IOW(EDGETPU_IOCTL_BASE, 9, struct edgetpu_event_register)
@@ -189,11 +231,11 @@ struct edgetpu_sync_ioctl {
* device address returned by EDGETPU_MAP_BUFFER.
*/
__u64 device_address;
- /* size in bytes to be sync'ed */
+ /* Size in bytes to be sync'ed. */
__u64 size;
/*
- * offset in bytes at which the sync operation is to begin from the
- * start of the buffer
+ * Offset in bytes at which the sync operation is to begin from the
+ * start of the buffer.
*/
__u64 offset;
/*
@@ -231,13 +273,9 @@ struct edgetpu_sync_ioctl {
_IOW(EDGETPU_IOCTL_BASE, 16, struct edgetpu_sync_ioctl)
struct edgetpu_map_dmabuf_ioctl {
- /*
- * Offset within the dma-buf to be mapped in bytes.
- *
- * Must be page-aligned.
- */
+ /* Deprecated; pass 0 to keep compatibility. */
__u64 offset;
- /* Size to be mapped in bytes. */
+ /* Ignored; the entire dma-buf is mapped. */
__u64 size;
/*
* Returned TPU VA.
@@ -268,8 +306,8 @@ struct edgetpu_map_dmabuf_ioctl {
* On success, @device_address is set and the syscall returns zero.
*
* EINVAL: If @offset is not page-aligned.
- * EINVAL: If @size is zero.
- * EINVAL: If @die_index exceeds the number of clients in the group.
+ * EINVAL: (for EDGETPU_MAP_NONMIRRORED case) If @die_index exceeds the number
+ * of clients in the group.
* EINVAL: If the target device group is disbanded.
*/
#define EDGETPU_MAP_DMABUF \
@@ -278,9 +316,8 @@ struct edgetpu_map_dmabuf_ioctl {
* Un-map address previously mapped by EDGETPU_MAP_DMABUF.
*
* Only fields @die_index and @device_address in the third argument will be
- * used, other fields such as @size and @offset will be fetched from the
- * kernel's internal records. If the buffer was requested as
- * EDGETPU_MAP_MIRRORED, @die_index is ignored as well.
+ * used, other fields will be fetched from the kernel's internal records. If the
+ * buffer was requested as EDGETPU_MAP_MIRRORED, @die_index is ignored as well.
*
* EINVAL: If @device_address is not found.
* EINVAL: If the target device group is disbanded.
@@ -418,8 +455,13 @@ struct edgetpu_sync_fence_status {
* Release the current client's wakelock, allowing firmware to be shut down if
* no other clients are active.
* Groups and buffer mappings are preserved.
- * WARNING: Attempts to access any mapped CSRs before re-acquiring the wakelock
- * may crash the system.
+ *
+ * Some mmap operations (listed below) are not allowed when the client's
+ * wakelock is released. And if the runtime is holding the mmap'ed buffers, this
+ * ioctl returns EAGAIN and the wakelock is not released.
+ * - EDGETPU_MMAP_CSR_OFFSET
+ * - EDGETPU_MMAP_CMD_QUEUE_OFFSET
+ * - EDGETPU_MMAP_RESP_QUEUE_OFFSET
*/
#define EDGETPU_RELEASE_WAKE_LOCK _IO(EDGETPU_IOCTL_BASE, 25)
@@ -441,9 +483,7 @@ struct edgetpu_fw_version {
* When there is an attempt to load firmware, its version numbers are recorded
* by the kernel and will be returned on the following EDGETPU_FIRMWARE_VERSION
* calls. If the latest firmware attempted to load didn't exist or had an
- * invalid header, this call returns -ENODEV.
- *
- * Returns 0 on success, -errno on error.
+ * invalid header, this call returns ENODEV.
*/
#define EDGETPU_FIRMWARE_VERSION \
_IOR(EDGETPU_IOCTL_BASE, 27, struct edgetpu_fw_version)
@@ -476,4 +516,33 @@ struct edgetpu_device_dram_usage {
#define EDGETPU_GET_DRAM_USAGE \
_IOR(EDGETPU_IOCTL_BASE, 29, struct edgetpu_device_dram_usage)
+/*
+ * struct edgetpu_ext_mailbox
+ * @client_id: Client identifier (may not be needed depending on type)
+ * @attrs: Array of mailbox attributes (pointer to
+ * edgetpu_mailbox_attr, may be NULL depending on type)
+ * @type: One of the EDGETPU_EXT_MAILBOX_xxx values
+ * @count: Number of mailboxes to acquire
+ */
+struct edgetpu_ext_mailbox {
+ __u64 client_id;
+ __u64 attrs;
+ __u32 type;
+ __u32 count;
+};
+
+/*
+ * Acquire a chip-specific mailbox that is not directly managed by the TPU
+ * runtime. This can be a secure mailbox or a device-to-device mailbox.
+ */
+#define EDGETPU_ACQUIRE_EXT_MAILBOX \
+ _IOW(EDGETPU_IOCTL_BASE, 30, struct edgetpu_ext_mailbox)
+
+/*
+ * Release a chip-specific mailbox that is not directly managed by the TPU
+ * runtime. This can be a secure mailbox or a device-to-device mailbox.
+ */
+#define EDGETPU_RELEASE_EXT_MAILBOX \
+ _IOW(EDGETPU_IOCTL_BASE, 31, struct edgetpu_ext_mailbox)
+
#endif /* __EDGETPU_H__ */
diff --git a/drivers/edgetpu/janeiro-device.c b/drivers/edgetpu/janeiro-device.c
index 7deca91..7016de3 100644
--- a/drivers/edgetpu/janeiro-device.c
+++ b/drivers/edgetpu/janeiro-device.c
@@ -89,3 +89,22 @@ void edgetpu_chip_handle_reverse_kci(struct edgetpu_dev *etdev,
etdev_warn(etdev, "%s: Unrecognized KCI request: %u\n", __func__,
resp->code);
}
+
+
+/* TODO: This would be a good place to handle AoC/DSP mailboxes */
+
+int edgetpu_chip_acquire_ext_mailbox(struct edgetpu_client *client,
+ struct edgetpu_ext_mailbox *ext_mbox)
+{
+ return -ENODEV;
+}
+
+int edgetpu_chip_release_ext_mailbox(struct edgetpu_client *client,
+ struct edgetpu_ext_mailbox *ext_mbox)
+{
+ return -ENODEV;
+}
+
+void edgetpu_chip_client_remove(struct edgetpu_client *client)
+{
+}
diff --git a/drivers/edgetpu/janeiro-firmware.c b/drivers/edgetpu/janeiro-firmware.c
index 07f4f00..966d539 100644
--- a/drivers/edgetpu/janeiro-firmware.c
+++ b/drivers/edgetpu/janeiro-firmware.c
@@ -11,6 +11,7 @@
#include <linux/iommu.h>
+#include "edgetpu.h"
#include "edgetpu-config.h"
#include "edgetpu-firmware.h"
#include "edgetpu-internal.h"
@@ -20,6 +21,37 @@
#include "janeiro-platform.h"
#include "mobile-firmware.h"
+#define MAX_IOMMU_MAPPINGS 26
+
+#define CONFIG_TO_SIZE(a) ((1 << ((a) & 0xFFF)) << 12)
+
+struct iommu_mapping {
+ /* TPU virt address */
+ __u32 virt_address;
+ /*
+ * contains a 12-bit aligned address and a page-order size into a
+ * 32-bit value i.e. a physical address and size in page order.
+ */
+ __u32 image_config_value;
+};
+
+struct janeiro_image_config {
+ __u32 carveout_base;
+ __u32 firmware_base;
+ __u32 firmware_size;
+ struct edgetpu_fw_version firmware_versions;
+ __u32 config_version;
+ __u32 privilege_level;
+ __u32 remapped_region_start;
+ __u32 remapped_region_end;
+ __u32 num_iommu_mapping;
+ struct iommu_mapping mappings[MAX_IOMMU_MAPPINGS];
+} __packed;
+
+struct janeiro_firmware_data {
+ __u32 num_mapping;
+ struct iommu_mapping mappings[MAX_IOMMU_MAPPINGS];
+};
/*
* Sets the reset state of the R52 core.
* @val: 1 to put the core in reset state, 0 to release core from reset state.
@@ -29,8 +61,38 @@ static void r52_reset(struct edgetpu_dev *etdev, u64 val)
edgetpu_dev_write_32_sync(etdev, EDGETPU_REG_RESET_CONTROL, val);
}
+static int janeiro_firmware_after_create(struct edgetpu_firmware *et_fw)
+{
+ struct janeiro_firmware_data *data;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ edgetpu_firmware_set_data(et_fw, data);
+ return 0;
+}
+
static void janeiro_firmware_before_destroy(struct edgetpu_firmware *et_fw)
{
+ struct janeiro_firmware_data *data;
+ u32 i, tpu_addr, size;
+ struct edgetpu_dev *etdev = et_fw->etdev;
+
+ /* TODO(b/189906347): Remove when GSA/TZ support is available. */
+ /* Remove mappings created by setup_buffer() */
+ data = edgetpu_firmware_get_data(et_fw);
+
+ if (data) {
+ for (i = 0; i < data->num_mapping; i++) {
+ tpu_addr = data->mappings[i].virt_address;
+ size = CONFIG_TO_SIZE(data->mappings[i].image_config_value);
+ edgetpu_mmu_remove_translation(etdev, tpu_addr, size,
+ EDGETPU_CONTEXT_KCI);
+ }
+ edgetpu_firmware_set_data(et_fw, NULL);
+ kfree(data);
+ }
r52_reset(et_fw->etdev, 1);
}
@@ -70,7 +132,9 @@ static int janeiro_firmware_setup_buffer(struct edgetpu_firmware *et_fw,
{
int ret = 0;
void *image_vaddr;
- struct mobile_image_config *image_config;
+ u32 tpu_addr, phys_addr, size, i;
+ struct janeiro_image_config *image_config;
+ struct janeiro_firmware_data *data;
struct edgetpu_dev *etdev = et_fw->etdev;
struct janeiro_platform_dev *edgetpu_pdev =
container_of(etdev, struct janeiro_platform_dev, edgetpu_dev);
@@ -93,10 +157,48 @@ static int janeiro_firmware_setup_buffer(struct edgetpu_firmware *et_fw,
memcpy(&etdev->fw_version, &image_config->firmware_versions,
sizeof(etdev->fw_version));
+ /* TODO(b/189906347): Remove when GSA/TZ support is available. */
+ data = edgetpu_firmware_get_data(et_fw);
+ /* Remove old mappings created for previous firmware. */
+ for (i = 0; i < data->num_mapping; i++) {
+ tpu_addr = data->mappings[i].virt_address;
+ size = CONFIG_TO_SIZE(data->mappings[i].image_config_value);
+ phys_addr = (data->mappings[i].image_config_value & ~(0xFFF));
+
+ edgetpu_mmu_remove_translation(etdev, tpu_addr, size, EDGETPU_CONTEXT_KCI);
+ }
+ for (i = 0; i < image_config->num_iommu_mapping; i++) {
+ tpu_addr = image_config->mappings[i].virt_address;
+ size = CONFIG_TO_SIZE(image_config->mappings[i].image_config_value);
+ phys_addr = (image_config->mappings[i].image_config_value & ~(0xFFF));
+
+ ret = edgetpu_mmu_add_translation(etdev, tpu_addr, phys_addr, size,
+ IOMMU_READ | IOMMU_WRITE, EDGETPU_CONTEXT_KCI);
+ if (ret) {
+ etdev_err(etdev,
+ "Unable to Map: %d tpu_addr: 0x%x phys_addr: 0x%x size: 0x%x\n",
+ ret, tpu_addr, phys_addr, size);
+ goto err;
+ }
+ data->mappings[i].virt_address = tpu_addr;
+ data->mappings[i].image_config_value = image_config->mappings[i].image_config_value;
+ }
+
+ data->num_mapping = image_config->num_iommu_mapping;
+
/* Skip the header */
memcpy(image_vaddr, fw_buf->vaddr + MOBILE_FW_HEADER_SIZE,
fw_buf->used_size - MOBILE_FW_HEADER_SIZE);
-
+ memunmap(image_vaddr);
+ return 0;
+err:
+ while (i--) {
+ tpu_addr = data->mappings[i].virt_address;
+ size = CONFIG_TO_SIZE(data->mappings[i].image_config_value);
+ edgetpu_mmu_remove_translation(etdev, tpu_addr, size, EDGETPU_CONTEXT_KCI);
+ }
+ data->num_mapping = 0;
+ memunmap(image_vaddr);
return ret;
}
@@ -150,6 +252,7 @@ static int janeiro_firmware_prepare_run(struct edgetpu_firmware *et_fw,
}
static const struct edgetpu_firmware_handlers janeiro_firmware_handlers = {
+ .after_create = janeiro_firmware_after_create,
.before_destroy = janeiro_firmware_before_destroy,
.alloc_buffer = janeiro_firmware_alloc_buffer,
.free_buffer = janeiro_firmware_free_buffer,
diff --git a/drivers/edgetpu/janeiro-platform.c b/drivers/edgetpu/janeiro-platform.c
index 9b64972..96f8b60 100644
--- a/drivers/edgetpu/janeiro-platform.c
+++ b/drivers/edgetpu/janeiro-platform.c
@@ -146,7 +146,7 @@ janeiro_platform_unmap_reserved_region(struct janeiro_platform_dev *etpdev)
EDGETPU_CONTEXT_KCI);
}
-int edgetpu_setup_mmu(struct edgetpu_dev *etdev)
+int edgetpu_chip_setup_mmu(struct edgetpu_dev *etdev)
{
int ret;
@@ -156,6 +156,11 @@ int edgetpu_setup_mmu(struct edgetpu_dev *etdev)
return ret;
}
+void edgetpu_chip_remove_mmu(struct edgetpu_dev *etdev)
+{
+ edgetpu_mmu_detach(etdev);
+}
+
#define EDGETPU_PSM0_CFG 0x1c1880
#define EDGETPU_PSM0_START 0x1c1884
#define EDGETPU_PSM0_STATUS 0x1c1888
diff --git a/drivers/edgetpu/janeiro/config.h b/drivers/edgetpu/janeiro/config.h
index 841db2d..23950fb 100644
--- a/drivers/edgetpu/janeiro/config.h
+++ b/drivers/edgetpu/janeiro/config.h
@@ -16,8 +16,6 @@
#define EDGETPU_HAS_WAKELOCK
-#define EDGETPU_IS_IO_COHERENT
-
/*
* The TPU VA where the firmware is located.
*
diff --git a/drivers/edgetpu/mm-backport.h b/drivers/edgetpu/mm-backport.h
index 2e2f9a7..8831285 100644
--- a/drivers/edgetpu/mm-backport.h
+++ b/drivers/edgetpu/mm-backport.h
@@ -23,6 +23,10 @@
#define pin_user_pages_fast get_user_pages_fast
#define unpin_user_page put_page
+#ifndef untagged_addr
+#define untagged_addr(addr) (addr)
+#endif
+
#endif /* FOLL_PIN */
#endif /* __MM_BACKPORT_H__ */