summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAurora pro automerger <aurora-pro-automerger@google.com>2023-01-06 06:59:41 +0000
committerdavidchiang <davidchiang@google.com>2023-01-06 08:08:49 +0000
commit8a4b0d93e1f8b9ae23fb476eef4d43f18bc3cc1d (patch)
tree77a55bc81515aae0827a3cdb0f2ef0d68a46f677
parent9cc533bf4e533f90504fe355b5fcc726c1c549e0 (diff)
downloadgs201-8a4b0d93e1f8b9ae23fb476eef4d43f18bc3cc1d.tar.gz
gxp: [Copybara Auto Merge] Merge branch 'gs201-u' into 'android13-gs-pixel-5.10-udc'
gxp: amalthea uses different edgetpu path Bug: 263918299 gxp: fix uninit warn of gxp_debugfs_mailbox gcip: style fix up gcip: implement noncontiguous alloc Bug: 262684159 gcip: Change hard-coded magic numbers to MACROs Bug: 257300340 gcip: Update the size encoding of image config Bug: 257300340 (repeat) gcip: unittests: implement timeout race trigger Bug: 261822585 gcip: implement reference count to the awaiter Bug: 261822585 (repeat) gcip: implement gcip_mailbox_cancel_awaiter Bug: 261822585 (repeat) gcip: fix log style in gcip domain pool gcip: Add domain pool Bug: 228907682 gcip: init ignore_seq_order of KCI gcip: Use term awaiter instead of async_resp Bug: 249642792 gcip: constantize gcip_kci_args gcip: use rate limiting for reverse KCI failure warning gcip: remote unnecessary pointer cast gcip: Check telemetry buffer size gcip: Remove redundant error gcip: support arbitrary seq order commands Bug: 247414706 gcip: Add gcip-telemetry Bug: 239374826 gcip: use pad/pap instead of llx for logging gcip: implement gcip image config utilities Bug: 243500340 gcip: Don't pass mailbox param when calling the release_async_resp_data callback Bug: 239804137 gcip: Call release_data callback instead of ops->release_async_resp_data Bug: 239804137 (repeat) gcip: fix typo 'timedout' gcip: Implement gcip-firmware.c Bug: 239637765 gcip: Free arrived or timedout, but not handled async_resp Bug: 239010918 gcip: Set base_paddr of gcip_mem_pool Bug: 236673496 gcip: Adopt gcip-mailbox to gcip-kci Bug: 236679300 gcip: Implement the generalized mailbox (gcip-mailbox.c) Bug: 236679300 (repeat) gcip: Make gcip_kci_push_cmd static Bug: 223764481 gcip: implement gcip-mem-pool Bug: 236673496 (repeat) gcip: Add KCI support Bug: 223764481 (repeat) gcip: unittests: Add initial Kconfig and Makefile Bug: 222416109 gcip: Add initial Kconfig and Makefile Bug: 222416109 (repeat) GCIP_MAIN_REV_ID: a1895a61a873d1f52896f955e09d2e263b22a0e8 gxp: bump GXP version to 1.7 gxp: remove debugfs earlier than before_remove Bug: 263830035 gxp: adopt updated gxp_client_allocate_virtual_device gxp: debugfs utilizes UCI in the MCU mode Bug: 263830035 (repeat) gxp: call gxp_client_* funcs from firmware_run debugfs Bug: 263830035 (repeat) gxp: apply clang-format to gxp-debugfs.c gxp: update the interface of execute_cmd callback Bug: 263830035 (repeat) gxp: power off core on mbox allocation failure Bug: 264184974 gxp: Set special client ID to secure VD Bug: 263685745 gxp: Allocate secure VD Bug: 263836991 gxp: Add a secure VD field to struct gxp_dev Bug: 263685535 gxp: Add 'flags' to struct gxp_virtual_device_ioctl Bug: 263836981 gxp: Remove warnings during compilation Bug: 264010198 gxp: cleanup common platform probe Bug: 263844135 gxp: amalthea use GCIP Bug: 263918299 (repeat) gxp: update Makefile for kleaf support gxp: use PMU register Bug: 263830026 gxp: map private firmware data region Bug: 261797596 gxp: introduce gxp_dma_map_iova_sgt Bug: 262825536 gxp: allocate per VD firmware data Bug: 261797596 (repeat) gxp: Correct the lpm offsets calculations Bug: 263239197 gxp: Move shareability config with block power configuration Bug: 263310466 gxp: Enable new telemetry Bug: 247955426 gxp: enable dynamic slice index Bug: 255706432 gxp: Add support for core_boot insmod parameter Bug: 251612313 gxp: Add first_open support to VMBox Bug: 263296400 gxp: new UCI ioctl interfaces Bug: 261667704 gxp: remove unused virt_core in vd_suspend gxp: move system memory rates to config files Bug: 262964769 gxp: increase shared buffer size from 192K to 512K Bug: 262686729 gxp: introduce wait_queue for waiting responses Bug: 261822585 (repeat) gxp: rename queue to dest_queue Bug: 261822585 (repeat) gxp: adopt gxp-mcu-platform Bug: 249918544 gxp: revert tpu mapped checking logic gxp: Add telemetry buffer size fields to specs ioctl Bug: 259404526 gxp: fix error handling of gxp_map_tpu_mbx_queue gxp: fix logic of checking tpu_file when destroying clients gxp: europa: add mailbox array csrs Bug: 261150717 gxp: Implement mmap for new telemetry flow. Bug: 259404466 gxp: add 'features' field to specs Bug: 256073820 gxp: fix up reserved fields in gxp_specs_ioctl gxp: Add missing include Bug: 261550238 gxp: Apply gcip domain pool Bug: 228907682 (repeat) gxp: Move mailbox CSR based calls to chip specific file Bug: 261150717 (repeat) gxp: Make mailbox csr offset chip specific Bug: 261150717 (repeat) gxp: Allocate telemetry buffers during init Bug: 247955426 (repeat) gxp: introduce gxp-mcu-fs Bug: 249918544 (repeat) gxp: move client_has_vd check to client.c Bug: 249918544 (repeat) gxp: use entry point in elf header instead of fixed IOVA Bug: 260647260 gxp: Add _legacy postfix to telemetry function Bug: 247955426 (repeat) gxp: Refactor LPM to accommodate new chips Bug: 254168276 gxp: introduce gxp_pm_update_pm_qos Bug: 239638427 gxp: map tpu mbx queue even in MCU mode Bug: 237624453 gxp: fetch segment boundaries from ELF header Bug: 259197130 gxp: add ifdef guard for edgetpu_ext interface Bug: 259357745 gxp: set VD credit to 256 Bug: 257137038 gxp: Indroduce config-pwr-state.h Bug: 258154981 gxp: add cmd credit to virtual device Bug: 257137038 (repeat) gxp: fix the default domain cache gxp: revert ignore_seq_order settings gxp: Map TPU mbx only in the direct mode Bug: 249440369 gxp: Forward the thermal request to MCU when possible Bug: 255613035 gxp: Introduce gxp_wakelock_acquire_if_powered Bug: 255613035 (repeat) gxp: Add GXP_HAS_MCU config gxp: Roll back aur_power_state2rate changes in thermal gxp: Use aur_power_state2rate for thermal gxp: Fix reader/writer lock for vd allocation Bug: 257049139 gxp: Add a static debug pointer to driver state Bug: 255451381 gxp: fixup headers in platform.c gxp: remove obsolete ACPI support Bug: b/230701592 gxp: Add namespace import for DMA_BUF for 5.16 Bug: 232003048 gxp: Handle Core TelemetryRead rcki Bug: 249096610 gxp: Add GXP_HAS_LAP to config Bug: 249227451 gxp: remove explicit values of LPM PSM enum gxp: temporarily set slice_index to 0 Bug: 255706432 (repeat) Bug: 242011394 gxp: do power votes in VD wakelock acquisition Bug: 253990922, 253555787 gxp: Remove redundant state settings Bug: 189396709 gxp: LPM interfaces accepts enum psm Bug: 254168276 (repeat) gxp: Reduce mailbox timeout to 1s Bug: 250265514 gxp: Adopt awaiter of GCIP Bug: 249642792 (repeat) gxp: disable core interrupts before core releasing Bug: 252915360 gxp: remove response consuming callbacks Bug: 245455607 gxp: abstract GCIP send/put funcs from gxp-mailbox Bug: 245455607 (repeat) gxp: re-purpose GXP_LEGACY_MAILBOX Bug: 245455607 (repeat) gxp: use const args for mailbox init gxp: bump uapi version to 1.5 gxp: remove unnecessary coherent buf cast gxp: fix typo an core gxp: Call MCU telemetry irq handler when KCI irq is fired BUg: 237099103 gxp: move DVFS macro out from lpm.h gxp: use BIT() for shifting operations gxp: introduce coherent buffer data structure Bug: 248436918 gxp: add europa platform driver Bug: 235918085 gxp: address review comments on gdomain alloc gxp: store client IDs in gxp_virtual_device Bug: 246520556 gxp: clang-format gxp.h gxp: introduce {after,before}_{map,unmap}_tpu_mbx_queue callbacks Bug: 246520556 (repeat) gxp: call GCIP consume funcs from gxp-mailbox Bug: 245455607 (repeat) gxp: manage gcip_{mailbox,kci} from gxp_mailbox Bug: 245455607 (repeat) gxp: pass data size and wrap bit to gxp_mailbox_args Bug: 245455607 (repeat) gxp: pass GCIP operators to gxp_mailbox Bug: 245455607 (repeat) gxp: introduce enum gxp_mailbox_type Bug: 245455607 (repeat) gxp: call exposed response consuming funcs of gxp-mailbox-impl.h Bug: 245455607 (repeat) gxp: expose response consuming functions to gxp-mailbox-impl.h Bug: 245455607 (repeat) gxp: Add MCU telemetry support Bug: 237099103 (repeat) gxp: Add chip specific mmap handler Bug: 237099103 (repeat) gxp: Prefix the telemetry with core Bug: 237099103 (repeat) gxp: Rename gxp-telemetry to gxp-core-telemetry Bug: 237099103 (repeat) gxp: add wrappers around iommu domain Bug: 248436918 (repeat) gxp: introduce gxp-mailbox-impl.[c|h] Bug: 237908534 gxp: Hide Amalthea only things of gxp-mailbox from Callisto Bug: 237908534 (repeat) gxp: introduce GXP_LEGACY_MAILBOX Bug: 245455607 (repeat) gxp: return error when gxp_fw_data_create_app fails Bug: 249402363 gxp: fix memory leak on VD allocate resp queues Bug: 247662695 gxp: Wait for PS0 before powering down BLK_AUR Bug: 247273478 gxp: Enable best-fit IOVA allocator Bug: 241190719 gxp: remove compat ioctl interfaces gxp: remove cache invalidate of telemetry buffers Bug: 247772036 Bug: 245238253 gxp: cache invalidate on signal telemetry eventfd Bug: 247772036 (repeat) gxp: fix passing a NULL pointer to the gxp_vd_block_ready Bug: 247660434 gxp: Sort the GXP_IOCTL defines gxp: accept finalizing non-initalized modules Bug: 245690393 gxp: introduce IS_GXP_TEST Bug: 245690393 (repeat) gxp: define chip_rev module param gxp: sync with the fake core firmware Bug: 245270826 gxp: invalidate cache before fetching responses Bug: 242326098 gxp: make load_dsp_firmware return error properly Bug: 245270826 (repeat) gxp: support VD suspend/resume in Zuma direct mode Bug: 244699959 gxp: bump version to 1.4 Revert "gxp: Update gxp.h to reflect OFF being allowed in mbox IOCTL" Revert "gxp: remove GXP_POWER_STATE_OFF check in mbox cmd" gxp: Removing printing the log buff Bug: 244270315 gxp: support both coherent/non-coherent mappings Bug: 205831251 gxp: fix the unused warning on gem5 platform gxp: Update gxp.h to reflect OFF being allowed in mbox IOCTL Bug: 243737206 gxp: add wakelock_{after,before}_blk_{on,off} Bug: 241044848 gxp: Attach device after iommu domain allocated Bug: 243328707 gxp: Introduce {after,before}_vd_block_{ready,unready} callbacks Bug: 241057541 gxp: Rollback {after,before}_{allocate,release}_vd callbacks Bug: 241057541 (repeat) gxp: Introduce the `gxp_vd_block_unready` function. Bug: 241057541 (repeat) gxp: Make the `gxp_vd_stop` can work with READY state Bug: 241057541 (repeat) gxp: wrap power states into a struct Bug: 242155424 gxp: fix various typos gxp: Protect telemetry status in vd alloc/release Bug: 242145264 gxp: Fail to allocate VD if initial mapping fails Bug: 242145264 (repeat) gxp: Use one gxp_mailbox_create_manager Bug: 242939166 gxp: Extract mailbox manager from gxp-mailbox.h Bug: 242939166 (repeat) gxp: Implement the mailbox manager operator setting function of UCI Bug: 242178774 gxp: Add init functions for the legacy mailbox and DCI Bug: 242178774 (repeat) gxp: Introduce `GXP_HAS_DCI` definition Bug: 242964051 gxp: fix error handling on probing gxp: append internal headers after EXTRA_CFLAGS Bug: 242960640 gxp: remove GXP_POWER_STATE_OFF check in mbox cmd gxp: Add power states requests callback Bug: 241782481 gxp: Allocate resources on VD creation Bug: 241206240 gxp: handle VD allocation in gxp-client.c Bug: 241206240 (repeat) gxp: handle wakelock request in gxp-client.c Bug: 241206240 (repeat) gxp: gxp_mapping_create accepts domain Bug: 240415495 gxp: split out gxp_vd & gxp_fw module init/destroy Bug: 193180931 gxp: fixup DVFS requests Bug: 242785262 gxp: Only (un)map telem if enabled on vd start/stop Bug: 241090227 gxp: release TPU file after VD stop Bug: 241085004 gxp: increase the ref to TPU FD on TPU buffer map Bug: 241085004 (repeat) gxp: misc_register as the last step of probe Bug: 241718472 gxp: remove unneeded devm_kfree Bug: 241517691 gxp: Cancel last worker when power req queue is full Bug: 240533763 gxp: Map slice of shared buffer to IOMMU domain instead of whole of it Bug: 240128962 gxp: Allocate slice_index when allocating virtual device Bug: 240128962 (repeat) gxp: Add ID allocator for the slice index of shared buffer Bug: 240128962 (repeat) gxp: Introduce slice_index of the virtual device Bug: 240128962 (repeat) gxp: unittests: Introduce gxp-debug-dump-test.c and enable debug dump test Bug: 241086197 Bug: 234892966 gxp: remove domain attach on VD creation Bug: 241057799 gxp: Add `num_cores` in command ioctls for UCI Bug: 232310140, 237660465 gxp: Disallow /d/gxp/firmware_run if VDs are running Bug: 240764261 gxp: remove unneeded warnings from in PM gxp: Review feedback from 7/21 release Bug: 240315433 gxp: work around thermal_cdev_update Bug: 235433985 gxp: remove virt_core_list from gxp_dmabuf_map Bug: 240661491 gxp: remove virt_core_list from gxp_mapping Bug: 240661491 (repeat) gxp: add gxp_vd_phys_core_list gxp: remove ZEBU_SYSMMU_WORKAROUND gxp: Replace (resp_queue, eventfd) params with (client, virt_core) of async ops gxp: remove virt_core_list from tpu_mbx_desc gxp: check VD is non-null in gxp_client_destroy gxp: remove core_domains from vd Bug: 240415495 (repeat) gxp: remove gxp_vd_phys_core_to_virt_core gxp: clean up gxp-dma.h descriptions Bug: 240415495 (repeat) gxp: dma_map_tpu_buffer accepts domain Bug: 240415495 (repeat) gxp: gxp_dma_alloc_coherent takes domain Bug: 240415495 (repeat) gxp: map_allocated_coherent_buffer takes domain Bug: 240415495 (repeat) gxp: dma_map_dmabuf accepts domain as the arg Bug: 240415495 (repeat) gxp: dma_map_sg accepts domain as arg Bug: 240415495 (repeat) gxp: dma_map_core_resources accepts domain Bug: 240415495 (repeat) gxp: dma_domain_attach_device accepts domain Bug: 240415495 (repeat) gxp: Add callbacks for after allocating / before releasing the vd Bug: 237955391 gxp: program SSMT to the first domain Bug: 240415495 (repeat) gxp: deprecate virtual_core_list Bug: 240514360 gxp: map telemetry buffers to all domains Bug: 240415495 (repeat) gxp: map all core resources to all domains in VD Bug: 240415495 (repeat) gxp: remove the core parameter from dbg dump buf gxp: Map telemetry buffs before writing descriptor Bug: 239640408 gxp: Decide generating interrupt after allocating mailbox according to interface gxp: Add common operators of gcip-mailbox-ops to the gxp-mailbox-driver gxp: Merge resp_queue wait macro into the gxp-mailbox.h gxp: fix typos in comments Bug: 240315433 (repeat) gxp: rename callisto.ko to gxp.ko Bug: 240212593 gxp: Cleanup FW data on VD creation failure Bug: 240192343 gxp: remove the core number patch on gem5 platform Bug: 239908693 gxp: edgetpu symbol path according to target chip Bug: 239766974 gxp: warn deprecate pwr states only once Bug: 237337595 gxp: don't modify clkmux state during core booting Bug: 238960149 gxp: Map mailboxes for IOVA of KCI/UCI Bug: 228401855 gxp: Introduce gxp-usage-stats.c and complete update_usage KCI command Bug: 237967242 gxp: Introduce gxp-kci.c Bug: 228401855 (repeat) gxp: Introduce gxp-dci.c Bug: 236332988 gxp: Remove the dependency of gcip-mailbox from Amalthea Bug: 237908672 gxp: Introduce gxp_mailbox_args Bug: 236332988 (repeat) gxp: Introduce gxp_mailbox_ops Bug: 236332988 (repeat) gxp: Apply gcip-mailbox to gxp-mailbox Bug: 236679300 (repeat) gxp: Apply abstracted mailbox functions Bug: 237908672 (repeat) gxp: Abstracts mailbox related functions into the mailbox manager Bug: 237908672 (repeat) gxp: temporarily attach a domain while VD creating Bug: 232310140 gxp: add interface to override ioctl handlers gxp: implement UCI Bug: 232310140 (repeat) gxp: fix OOT build for amalthea gxp: increase the waiting time for cores booting Bug: 237378056 gxp: fix deadlock on power states queue full Bug: 236087752 gxp: parallelize the firmware startup process Bug: 207036666 gxp: Fix bug when clearing FW buffers on auth fail Bug: 237789581 gxp: firmware load should use configured name gxp: add gcip include for GKI kernel gxp: deprecate NON_AGGRESSOR / add LOW_FREQ_CLKMUX Bug: 237337595 (repeat) Bug: 237378056 (repeat) gxp: Disable telemetry before free Bug: 235771175 gxp: hold mmap lock around call to find_extend_vma Bug: 237404338 gxp: Don't change doorbell mask for running cores Bug: 235447772 gxp: compile gcip as an object Bug: 234674944 gxp: map shared buffer region Bug: 237262124 gxp: add gcip support to Makefile Bug: 234674944 (repeat) gxp: move firmware default name to chip config Bug: 234261504 gxp: Rename gxp-hw-mailbox-driver.c to gxp-mailbox-driver.c Bug: 236431904 gxp: Move increasing queue head / tail functions to gxp-hw-mailbox-driver.c Bug: 236431904 (repeat) gxp: Move setting queue head / tail functions to gxp-hw-mailbox-driver.c Bug: 236431904 (repeat) gxp: Move circ queue cnt / inc functions to gxp-hw-mailbox-driver.c Bug: 236431904 (repeat) gxp: Apply clang-format on the mailbox sources Bug: 236431904 (repeat) gxp: propagate GXP_PLATFORM option to Kbuild gxp: optional chip scratchpad region gxp: gxp-platform adopt common-platform.c Bug: 232894415 gxp: correct MAILBOX_DEVICE_INTERFACE_OFFSET for gem5 gxp: define ZEBU_SYSMMU_WORKAROUND in GEM5 and ZEBU_IP platform gxp: adjust Makefile for gem5 & kernel 5.13 gxp: add a parameter to specify callisto mode Bug: 235193368 gxp: introduce GXP_NUM_MAILBOXES config Bug: 235173180 gxp: remove redundant checks for mailbox mgr gxp: remove unused DMA functions Bug: 235212179 gxp: avoid holding semaphore during mmap Bug: 232183143 gxp: move iova.h to config files Bug: 235173809 gxp: Remove CONFIG_GXP_TEST from the SSMT related codes Bug: 201505925 gxp: lpm enable PS1 only for core CSRs gxp: add an MCU structure Bug: 229587136 gxp: define core CSR addresses as macros Bug: 234705757 gxp: Add option to disable DSP FW auth Bug: 235135800 gxp: Fix hangs and OOB writes when auth fails Bug: 234947988 gxp: Adjust the per-core FW size from 16MB to 1MB Bug: 228277106 gxp: Add enable_debug_dump argument for insmod Bug: 234529356 gxp: Dynamically allocate memory for debug dump buffer Bug: 234529355 gxp: Trigger debug dump only when firmware is up and running Bug: 233660431 Bug: 233607168 gxp: remove support for unsigned firmware Bug: 220246540 gxp: Expose additional DVFS states in power APIs Bug: 233929549 gxp: introduce a gxp_firmware_manager Bug: 230682809 gxp: remove CONFIG_GXP_CLOUDRIPPER gitignore: ignore .repo and gcip-kernel-driver Bug: 234674944 (repeat) gxp: set VID for both SSMTs and possible SIDs gxp: move LPM_TOP_PSM to config files Bug: 232471681 gxp: move SID of core macros to config files Bug: 233989804 gxp: different mailbox offset for platforms Bug: 233887617 gxp: authenticate firmware after requesting it Bug: 232715929 gxp: dma-iommu adopts gxp-ssmt interface Bug: 233989804 (repeat) gxp: add standalone ssmt driver Bug: 233989804 (repeat) gxp: add ifdef guard to disable suspend Bug: 234096867 gxp: set VD state in vd_stop Bug: 234096867 (repeat) gxp: Add lockdep checks to gxp-telemetry.c gxp: add log on probe success Bug: 233887617 (repeat) gxp: add tgid to client tracking and suspend block reporting Bug: 230656700 Revert "gxp: send UUD request before shutting down AUR_BLK" gxp: send UUD request before shutting down AUR_BLK Bug: 233584605 gxp: add parse_dt callback for device probe Bug: 232894415 (repeat) gxp: remove unrequired CORE_SCRATCHPAD_BASE offset Bug: 233381187 gxp: Switch the boot mode storage from CSRs into the scratchpad space Bug: 233381187 (repeat) gxp: Refactor FW boot mode into its own functions Bug: 233381187 (repeat) gxp: Fix locking in gxp_telemetry_disable() Bug: 232876605 gxp: initial gxp mcu firmware support Bug: 229587136 (repeat) gxp: add callbacks for common probing / removal Bug: 232894415 (repeat) gxp: introduce gxp-common-platform.c Bug: 232894415 (repeat) gxp: remove mm-backport.h gxp: Add NULL check of doorbells and barriers allocation Bug: 232612591 gxp: enhance Makefile for future chip support Bug: 230702544 gxp: debugfs use cmu.vaddr for accessing CMU CSRs gcip: add gcip-alloc-helper.h Bug: 262684159 (repeat) gcip: Update the comments in gcip-image-config for new encoding Bug: 257300340 (repeat) gcip: add reference count to the awaiter Bug: 261822585 (repeat) gcip: introduce gcip_mailbox_cancel_awaiter Bug: 261822585 (repeat) gcip: introduce gcip_kci_offload_chip_type Bug: 260690355 gcip: add {link,unlink}_offload_vmbox KCI codes Bug: 260690355 (repeat) gcip: Add domain pool Bug: 228907682 (repeat) gcip: Add remapped_data_{start,size} to image config Bug: 257212385 gcip: image header use unsigned fields gcip: add common authenticated image format header gcip: Use term awaiter instead of async_resp Bug: 249642792 (repeat) gcip: constantize gcip_kci_args gcip: support arbitrary seq order commands Bug: 247414706 (repeat) gcip: Add gcip-telemetry Bug: 239374826 (repeat) gcip: add gcip-image-config.h Bug: 243500340 (repeat) gcip: Add linux/workqueue.h as header gcip: Remove mailbox param from the release_async_resp_data callback Bug: 239804137 (repeat) gcip: Add release_data callback to the gcip_mailbox_async_response Bug: 239804137 (repeat) gcip: fix typo in gcip-mailbox.h gcip: fix various typos gcip: Fix reverse KCI codes Bug: 223764481 (repeat) gcip: Introduce gcip-firmware.h Bug: 239637765 (repeat) gcip: add gcip_mem_pool_offset gcip: Add KCI codes Bug: 223764481 (repeat) gcip: Remove the variables of gcip-kci.h which are moved to gcip-mailbox.h Bug: 236679300 (repeat) gcip: Add mailbox related structures and functions (gcip-mailbox.h) Bug: 236679300 (repeat) gcip: Add getters for gcip_kci Bug: 237785687 gcip: Make gcip_kci_push_cmd static Bug: 223764481 (repeat) gcip: add gcip memory pool Bug: 236673496 (repeat) gcip: Add KCI related structures and functions Bug: 223764481 (repeat) gcip: Add mailbox macro and enum Bug: 223764481 (repeat) gcip: Add circular queue mailbox helper functions Bug: 223764481 (repeat) GCIP_HEADERS_REV_ID: 37a282fd7aad536dc4521a908468bc9557911a19 gxp: Add a static debug pointer to driver state To assist with debugging of ramdumps where the GXP driver is not in the backstack, add a static symbol containing a pointer to the driver state so it can be located quickly in memory. Bug: 255451381 (repeat) gxp: Reduce mailbox timeout to 1s Reduce GXP timeout to allow for faster notification of failure since the longest any workloads are expected to run is ~500ms. Bug: 250265514 (repeat) GitOrigin-RevId: d7c38381aeae2ecc8b3b3f84abf45f1fe26edc4b Change-Id: Id0718e8bff32a18aff796dfd7779e2d61a6c4a64
-rw-r--r--.gitignore2
-rw-r--r--Makefile79
-rw-r--r--amalthea/config-pwr-state.h41
-rw-r--r--amalthea/config.h14
-rw-r--r--amalthea/context.h16
-rw-r--r--amalthea/csrs.h73
-rw-r--r--amalthea/iova.h (renamed from gxp-iova.h)17
-rw-r--r--amalthea/lpm.h139
-rw-r--r--amalthea/mailbox-regs.h30
-rw-r--r--gcip-kernel-driver/drivers/gcip/Makefile22
-rw-r--r--gcip-kernel-driver/drivers/gcip/gcip-alloc-helper.c93
-rw-r--r--gcip-kernel-driver/drivers/gcip/gcip-domain-pool.c101
-rw-r--r--gcip-kernel-driver/drivers/gcip/gcip-firmware.c25
-rw-r--r--gcip-kernel-driver/drivers/gcip/gcip-image-config.c220
-rw-r--r--gcip-kernel-driver/drivers/gcip/gcip-kci.c525
-rw-r--r--gcip-kernel-driver/drivers/gcip/gcip-mailbox.c680
-rw-r--r--gcip-kernel-driver/drivers/gcip/gcip-mem-pool.c69
-rw-r--r--gcip-kernel-driver/drivers/gcip/gcip-telemetry.c267
-rw-r--r--gcip-kernel-driver/include/gcip/gcip-alloc-helper.h50
-rw-r--r--gcip-kernel-driver/include/gcip/gcip-common-image-header.h67
-rw-r--r--gcip-kernel-driver/include/gcip/gcip-domain-pool.h49
-rw-r--r--gcip-kernel-driver/include/gcip/gcip-firmware.h49
-rw-r--r--gcip-kernel-driver/include/gcip/gcip-image-config.h153
-rw-r--r--gcip-kernel-driver/include/gcip/gcip-kci.h387
-rw-r--r--gcip-kernel-driver/include/gcip/gcip-mailbox.h538
-rw-r--r--gcip-kernel-driver/include/gcip/gcip-mem-pool.h71
-rw-r--r--gcip-kernel-driver/include/gcip/gcip-telemetry.h123
-rw-r--r--gsx01-mailbox-driver.c70
-rw-r--r--gxp-bpm.c20
-rw-r--r--gxp-client.c275
-rw-r--r--gxp-client.h92
-rw-r--r--gxp-common-platform.c2043
-rw-r--r--gxp-config.h22
-rw-r--r--gxp-core-telemetry.c935
-rw-r--r--gxp-core-telemetry.h184
-rw-r--r--gxp-debug-dump.c135
-rw-r--r--gxp-debug-dump.h16
-rw-r--r--gxp-debugfs.c273
-rw-r--r--gxp-dma-iommu.c858
-rw-r--r--gxp-dma.h403
-rw-r--r--gxp-dmabuf.c21
-rw-r--r--gxp-dmabuf.h10
-rw-r--r--gxp-domain-pool.c109
-rw-r--r--gxp-domain-pool.h27
-rw-r--r--gxp-doorbell.c4
-rw-r--r--gxp-firmware-data.c98
-rw-r--r--gxp-firmware-data.h36
-rw-r--r--gxp-firmware.c432
-rw-r--r--gxp-firmware.h38
-rw-r--r--gxp-host-device-structs.h34
-rw-r--r--gxp-hw-mailbox-driver.c323
-rw-r--r--gxp-internal.h194
-rw-r--r--gxp-lpm.c80
-rw-r--r--gxp-lpm.h91
-rw-r--r--gxp-mailbox-driver.c511
-rw-r--r--gxp-mailbox-driver.h124
-rw-r--r--gxp-mailbox-impl.c787
-rw-r--r--gxp-mailbox-impl.h142
-rw-r--r--gxp-mailbox-manager.c32
-rw-r--r--gxp-mailbox-manager.h137
-rw-r--r--gxp-mailbox-regs.h29
-rw-r--r--gxp-mailbox.c1015
-rw-r--r--gxp-mailbox.h293
-rw-r--r--gxp-mapping.c19
-rw-r--r--gxp-mapping.h11
-rw-r--r--gxp-mba-driver.c73
-rw-r--r--gxp-notification.h2
-rw-r--r--gxp-platform.c2323
-rw-r--r--gxp-pm.c175
-rw-r--r--gxp-pm.h101
-rw-r--r--gxp-ssmt.c75
-rw-r--r--gxp-ssmt.h34
-rw-r--r--gxp-telemetry.c705
-rw-r--r--gxp-telemetry.h135
-rw-r--r--gxp-thermal.c68
-rw-r--r--gxp-vd.c637
-rw-r--r--gxp-vd.h169
-rw-r--r--gxp-wakelock.c39
-rw-r--r--gxp-wakelock.h17
-rw-r--r--gxp.h1077
-rw-r--r--mm-backport.h33
81 files changed, 12623 insertions, 6863 deletions
diff --git a/.gitignore b/.gitignore
index 0c053d1..ba9b0a9 100644
--- a/.gitignore
+++ b/.gitignore
@@ -7,3 +7,5 @@ modules.order
Module.symvers
.cache.mk
.thinlto-cache/
+.repo/
+/gcip-kernel-driver
diff --git a/Makefile b/Makefile
index 70342d2..21decc8 100644
--- a/Makefile
+++ b/Makefile
@@ -3,32 +3,55 @@
# Makefile for GXP driver.
#
-obj-$(CONFIG_GXP) += gxp.o
+GXP_CHIP ?= AMALTHEA
+CONFIG_$(GXP_CHIP) ?= m
+GCIP_DIR := gcip-kernel-driver/drivers/gcip
-gxp-objs += \
+obj-$(CONFIG_$(GXP_CHIP)) += gxp.o
+
+gxp-objs += \
gxp-bpm.o \
gxp-client.o \
+ gxp-core-telemetry.o \
gxp-debug-dump.o \
gxp-debugfs.o \
+ gxp-dma-iommu.o \
gxp-dmabuf.o \
gxp-domain-pool.o \
gxp-doorbell.o \
gxp-eventfd.o \
- gxp-firmware.o \
gxp-firmware-data.o \
- gxp-hw-mailbox-driver.o \
+ gxp-firmware.o \
gxp-lpm.o \
+ gxp-mailbox-manager.o \
gxp-mailbox.o \
gxp-mapping.o \
gxp-mb-notification.o \
- gxp-platform.o \
- gxp-range-alloc.o \
gxp-pm.o \
- gxp-telemetry.o \
+ gxp-range-alloc.o \
+ gxp-ssmt.o \
gxp-thermal.o \
gxp-vd.o \
gxp-wakelock.o
+ifeq ($(GXP_CHIP),AMALTHEA)
+
+gxp-objs += \
+ gsx01-mailbox-driver.o \
+ gxp-platform.o \
+ gxp-mailbox-impl.o
+
+GMODULE_PATH := $(OUT_DIR)/../google-modules
+EDGETPU_CHIP := janeiro
+
+endif
+
+ifeq ($(CONFIG_$(GXP_CHIP)),m)
+
+gxp-objs += $(GCIP_DIR)/gcip.o
+
+endif
+
KERNEL_SRC ?= /lib/modules/$(shell uname -r)/build
M ?= $(shell pwd)
@@ -44,35 +67,29 @@ endif
# If building via make directly, specify target platform by adding
# "GXP_PLATFORM=<target>"
# With one of the following values:
-# - CLOUDRIPPER
+# - SILICON
# - ZEBU
# - IP_ZEBU
-# Defaults to building for CLOUDRIPPER if not otherwise specified.
-GXP_PLATFORM ?= CLOUDRIPPER
-GXP_CHIP ?= AMALTHEA
-
-# Setup which version of the gxp-dma interface is used.
-# For gem5, need to adopt dma interface without aux domain.
-ifeq ($(GXP_PLATFORM), GEM5)
- gxp-objs += gxp-dma-iommu-gem5.o
-else
- gxp-objs += gxp-dma-iommu.o
-endif
-
-ccflags-y += -DCONFIG_GXP_$(GXP_PLATFORM) -DCONFIG_$(GXP_CHIP)=1 \
- -I$(M)/include -I$(srctree)/drivers/gxp/include
+# - GEM5
+# Defaults to building for SILICON if not otherwise specified.
+GXP_PLATFORM ?= SILICON
-KBUILD_OPTIONS += CONFIG_GXP=m GXP_CHIP=AMALTHEA
+gxp-flags := -DCONFIG_GXP_$(GXP_PLATFORM) -DCONFIG_$(GXP_CHIP)=1 \
+ -I$(M)/include -I$(M)/gcip-kernel-driver/include \
+ -I$(srctree)/$(M)/include \
+ -I$(srctree)/$(M)/gcip-kernel-driver/include \
+ -I$(srctree)/drivers/gxp/include
+ccflags-y += $(EXTRA_CFLAGS) $(gxp-flags)
-ifdef CONFIG_GXP_TEST
-subdir-ccflags-y += -Wall -Werror -I$(srctree)/drivers/gxp/include
-obj-y += unittests/
-include $(srctree)/drivers/gxp/unittests/Makefile.include
-$(call include_test_path, $(gxp-objs))
-endif
+KBUILD_OPTIONS += GXP_CHIP=$(GXP_CHIP) GXP_PLATFORM=$(GXP_PLATFORM)
# Access TPU driver's exported symbols.
-KBUILD_EXTRA_SYMBOLS += ../google-modules/edgetpu/janeiro/drivers/edgetpu/Module.symvers
+EXTRA_SYMBOLS += $(GMODULE_PATH)/edgetpu/$(EDGETPU_CHIP)/drivers/edgetpu/Module.symvers
-modules modules_install clean:
+modules modules_install:
+ $(MAKE) -C $(KERNEL_SRC) M=$(M)/$(GCIP_DIR) gcip.o
+ $(MAKE) -C $(KERNEL_SRC) M=$(M) W=1 $(KBUILD_OPTIONS) \
+ EXTRA_CFLAGS="$(EXTRA_CFLAGS)" KBUILD_EXTRA_SYMBOLS="$(EXTRA_SYMBOLS)" $(@)
+clean:
+ $(MAKE) -C $(KERNEL_SRC) M=$(M)/$(GCIP_DIR) $(@)
$(MAKE) -C $(KERNEL_SRC) M=$(M) W=1 $(KBUILD_OPTIONS) $(@)
diff --git a/amalthea/config-pwr-state.h b/amalthea/config-pwr-state.h
new file mode 100644
index 0000000..3c8a4be
--- /dev/null
+++ b/amalthea/config-pwr-state.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Chip-dependent power configuration and states.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#ifndef __AMALTHEA_CONFIG_PWR_STATE_H__
+#define __AMALTHEA_CONFIG_PWR_STATE_H__
+
+enum aur_power_rate {
+ AUR_OFF_RATE = 0,
+ AUR_UUD_RATE = 178000,
+ AUR_SUD_RATE = 373000,
+ AUR_UD_RATE = 750000,
+ AUR_NOM_RATE = 1155000,
+ AUR_READY_RATE = 178000,
+ AUR_UUD_PLUS_RATE = 268000,
+ AUR_SUD_PLUS_RATE = 560000,
+ AUR_UD_PLUS_RATE = 975000,
+};
+
+enum aur_mem_int_rate {
+ AUR_MEM_INT_MIN = 0,
+ AUR_MEM_INT_VERY_LOW = 0,
+ AUR_MEM_INT_LOW = 200000,
+ AUR_MEM_INT_HIGH = 332000,
+ AUR_MEM_INT_VERY_HIGH = 465000,
+ AUR_MEM_INT_MAX = 533000,
+};
+
+enum aur_mem_mif_rate {
+ AUR_MEM_MIF_MIN = 0,
+ AUR_MEM_MIF_VERY_LOW = 0,
+ AUR_MEM_MIF_LOW = 1014000,
+ AUR_MEM_MIF_HIGH = 1352000,
+ AUR_MEM_MIF_VERY_HIGH = 2028000,
+ AUR_MEM_MIF_MAX = 3172000,
+};
+
+#endif /* __AMALTHEA_CONFIG_PWR_STATE_H__ */
diff --git a/amalthea/config.h b/amalthea/config.h
index 19afff6..79f9eb5 100644
--- a/amalthea/config.h
+++ b/amalthea/config.h
@@ -9,9 +9,23 @@
#define __AMALTHEA_CONFIG_H__
#define GXP_DRIVER_NAME "gxp_platform"
+#define DSP_FIRMWARE_DEFAULT_PREFIX "gxp_fw_core"
+
+#define AUR_DVFS_DOMAIN 17
#define GXP_NUM_CORES 4
+#define GXP_NUM_MAILBOXES GXP_NUM_CORES
+#define GXP_NUM_WAKEUP_DOORBELLS GXP_NUM_CORES
+
+#define GXP_USE_LEGACY_MAILBOX 1
+
+#define GXP_HAS_MCU 0
+#include "config-pwr-state.h"
+#include "context.h"
#include "csrs.h"
+#include "iova.h"
+#include "lpm.h"
+#include "mailbox-regs.h"
#endif /* __AMALTHEA_CONFIG_H__ */
diff --git a/amalthea/context.h b/amalthea/context.h
new file mode 100644
index 0000000..3f88930
--- /dev/null
+++ b/amalthea/context.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Amalthea context related macros.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#ifndef __AMALTHEA_CONTEXT_H__
+#define __AMALTHEA_CONTEXT_H__
+
+/* The stream IDs used for each core. */
+#define INST_SID_FOR_CORE(_x_) ((1 << 6) | ((_x_) << 4) | (0 << 3))
+#define DATA_SID_FOR_CORE(_x_) ((1 << 6) | ((_x_) << 4) | (1 << 3))
+#define IDMA_SID_FOR_CORE(_x_) ((1 << 6) | ((_x_) << 4))
+
+#endif /* __AMALTHEA_CONTEXT_H__ */
diff --git a/amalthea/csrs.h b/amalthea/csrs.h
index a8b8d07..5b9dac4 100644
--- a/amalthea/csrs.h
+++ b/amalthea/csrs.h
@@ -16,11 +16,6 @@
enum gxp_csrs {
GXP_REG_LPM_VERSION = 0x40000,
- GXP_REG_LPM_PSM_0 = 0x41000,
- GXP_REG_LPM_PSM_1 = 0x42000,
- GXP_REG_LPM_PSM_2 = 0x43000,
- GXP_REG_LPM_PSM_3 = 0x44000,
- GXP_REG_LPM_PSM_4 = 0x45000,
GXP_REG_AURORA_REVISION = 0x80000,
GXP_REG_COMMON_INT_POL_0 = 0x81000,
GXP_REG_COMMON_INT_POL_1 = 0x81004,
@@ -49,14 +44,20 @@ enum gxp_csrs {
#define GXP_REG_COMMON_INT_MASK_0_DOORBELLS_MASK 0xFFFFFFFF
#define GXP_REG_ETM_PWRCTL_CORE_RESET_SHIFT 16
-enum gxp_core_csrs {
- GXP_REG_INST_BPM = 0x0000,
- GXP_REG_PROFILING_CONDITION = 0x4000,
- GXP_REG_PROCESSOR_ID = 0x4004,
- GXP_REG_ALT_RESET_VECTOR = 0x4008,
- GXP_REG_COMMON_INT_MASK_0 = 0x4010,
- GXP_REG_ETM_PWRCTL = 0xB020,
-};
+/* helpers for calculating core CSRs offsets */
+#define GXP_CORE_0_BASE GXP_REG_CORE_0_INST_BPM
+#define GXP_CORE_SIZE (GXP_REG_CORE_1_INST_BPM - GXP_REG_CORE_0_INST_BPM)
+#define GXP_CORE_REG(core, off) (GXP_CORE_0_BASE + GXP_CORE_SIZE * core + off)
+
+/* Per core CSRs. @core should be in region 0 ~ GXP_NUM_CORES-1 */
+#define GXP_CORE_REG_INST_BPM(core) GXP_CORE_REG(core, 0x0)
+#define GXP_CORE_REG_PROFILING_CONDITION(core) GXP_CORE_REG(core, 0x4000)
+#define GXP_CORE_REG_PROCESSOR_ID(core) GXP_CORE_REG(core, 0x4004)
+#define GXP_CORE_REG_ALT_RESET_VECTOR(core) GXP_CORE_REG(core, 0x4008)
+#define GXP_CORE_REG_COMMON_INT_MASK_0(core) GXP_CORE_REG(core, 0x4010)
+#define GXP_CORE_REG_COMMON_INT_MASK_1(core) GXP_CORE_REG(core, 0x4014)
+#define GXP_CORE_REG_DEDICATED_INT_MASK(core) GXP_CORE_REG(core, 0x401C)
+#define GXP_CORE_REG_ETM_PWRCTL(core) GXP_CORE_REG(core, 0xB020)
#define SYNC_BARRIER_SHADOW_OFFSET 0x800
@@ -73,50 +74,4 @@ enum gxp_core_csrs {
#define PLL_CON0_PLL_AUR 0x100
#define PLL_CON0_NOC_USER 0x610
-/* LPM Registers */
-#define LPM_VERSION_OFFSET 0x0
-#define TRIGGER_CSR_START_OFFSET 0x4
-#define IMEM_START_OFFSET 0x8
-#define LPM_CONFIG_OFFSET 0xC
-#define PSM_DESCRIPTOR_OFFSET 0x10
-#define EVENTS_EN_OFFSET 0x100
-#define EVENTS_INV_OFFSET 0x140
-#define FUNCTION_SELECT_OFFSET 0x180
-#define TRIGGER_STATUS_OFFSET 0x184
-#define EVENT_STATUS_OFFSET 0x188
-#define OPS_OFFSET 0x800
-#define PSM_DESCRIPTOR_BASE(_x_) ((_x_) << 2)
-#define PSM_DESCRIPTOR_COUNT 5
-#define EVENTS_EN_BASE(_x_) ((_x_) << 2)
-#define EVENTS_EN_COUNT 16
-#define EVENTS_INV_BASE(_x_) ((_x_) << 2)
-#define EVENTS_INV_COUNT 16
-#define OPS_BASE(_x_) ((_x_) << 2)
-#define OPS_COUNT 128
-#define PSM_COUNT 5
-#define PSM_STATE_TABLE_BASE(_x_) ((_x_) << 8)
-#define PSM_STATE_TABLE_COUNT 6
-#define PSM_TRANS_BASE(_x_) ((_x_) << 5)
-#define PSM_TRANS_COUNT 4
-#define PSM_DMEM_BASE(_x_) ((_x_) << 2)
-#define PSM_DATA_COUNT 32
-#define PSM_NEXT_STATE_OFFSET 0x0
-#define PSM_SEQ_ADDR_OFFSET 0x4
-#define PSM_TIMER_VAL_OFFSET 0x8
-#define PSM_TIMER_EN_OFFSET 0xC
-#define PSM_TRIGGER_NUM_OFFSET 0x10
-#define PSM_TRIGGER_EN_OFFSET 0x14
-#define PSM_ENABLE_STATE_OFFSET 0x80
-#define PSM_DATA_OFFSET 0x600
-#define PSM_CFG_OFFSET 0x680
-#define PSM_START_OFFSET 0x684
-#define PSM_STATUS_OFFSET 0x688
-#define PSM_DEBUG_CFG_OFFSET 0x68C
-#define PSM_BREAK_ADDR_OFFSET 0x694
-#define PSM_GPIN_LO_RD_OFFSET 0x6A0
-#define PSM_GPIN_HI_RD_OFFSET 0x6A4
-#define PSM_GPOUT_LO_RD_OFFSET 0x6B0
-#define PSM_GPOUT_HI_RD_OFFSET 0x6B4
-#define PSM_DEBUG_STATUS_OFFSET 0x6B8
-
#endif /* __AMALTHEA_CSRS_H__ */
diff --git a/gxp-iova.h b/amalthea/iova.h
index 8b7de59..b1a07ae 100644
--- a/gxp-iova.h
+++ b/amalthea/iova.h
@@ -2,10 +2,18 @@
/*
* GXP IOVAs. The list of addresses for fixed device-side IOVAs
*
- * Copyright (C) 2021 Google LLC
+ * Copyright (C) 2021-2022 Google LLC
*/
-#ifndef __GXP_IOVAS_H__
-#define __GXP_IOVAS_H__
+
+#ifndef __AMALTHEA_IOVA_H__
+#define __AMALTHEA_IOVA_H__
+
+/*
+ * No local access path.
+ * Need to define GXP_IOVA_SYNC_BARRIERS and GXP_IOVA_AURORA_TOP in this
+ * case.
+ */
+#define GXP_HAS_LAP 0
#define GXP_IOVA_SYNC_BARRIERS (0x100000)
#define GXP_IOVA_MAILBOX(_x_) (0x18390000 + (_x_) * 0x00020000)
@@ -13,6 +21,7 @@
#define GXP_IOVA_AURORA_TOP (0x25C00000)
#define GXP_IOVA_FIRMWARE(_x_) (0xFA000000 + (_x_) * 0x0100000)
#define GXP_IOVA_FW_DATA (0xFA400000)
+#define GXP_IOVA_PRIV_FW_DATA (0xFA500000)
#define GXP_IOVA_TPU_MBX_BUFFER(_x_) (0xFE100000 + (_x_) * 0x00040000)
-#endif /* __GXP_IOVAS_H__ */
+#endif /* __AMALTHEA_IOVA_H__ */
diff --git a/amalthea/lpm.h b/amalthea/lpm.h
new file mode 100644
index 0000000..fa36976
--- /dev/null
+++ b/amalthea/lpm.h
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Amalthea LPM chip-dependent settings.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#ifndef __AMALTHEA_LPM_H__
+#define __AMALTHEA_LPM_H__
+
+#include <linux/types.h>
+
+enum gxp_lpm_psm {
+ LPM_PSM_CORE0,
+ LPM_PSM_CORE1,
+ LPM_PSM_CORE2,
+ LPM_PSM_CORE3,
+ LPM_PSM_TOP,
+ LPM_NUM_PSMS,
+};
+
+#define CORE_TO_PSM(core) (LPM_PSM_CORE0 + (core))
+
+enum lpm_psm_csrs {
+ LPM_REG_ENABLE_STATE_0 = 0x080,
+ LPM_REG_ENABLE_STATE_1 = 0x180,
+ LPM_REG_ENABLE_STATE_2 = 0x280,
+ LPM_REG_ENABLE_STATE_3 = 0x380,
+};
+
+/* offset from GXP_LPM_BASE */
+enum lpm_psm_base {
+ GXP_REG_LPM_PSM_0 = 0x1000,
+ GXP_REG_LPM_PSM_1 = 0x2000,
+ GXP_REG_LPM_PSM_2 = 0x3000,
+ GXP_REG_LPM_PSM_3 = 0x4000,
+ GXP_REG_LPM_PSM_4 = 0x5000,
+};
+
+#define LPM_STATE_TABLE_SIZE (LPM_REG_ENABLE_STATE_1 - LPM_REG_ENABLE_STATE_0)
+
+/* LPM address space starts at lpm_version register */
+#define GXP_LPM_BASE GXP_REG_LPM_VERSION
+#define GXP_LPM_PSM_0_BASE GXP_REG_LPM_PSM_0
+#define GXP_LPM_PSM_SIZE (GXP_REG_LPM_PSM_1 - GXP_REG_LPM_PSM_0)
+
+/* LPM Registers */
+#define LPM_VERSION_OFFSET 0x0
+#define TRIGGER_CSR_START_OFFSET 0x4
+#define IMEM_START_OFFSET 0x8
+#define LPM_CONFIG_OFFSET 0xC
+#define PSM_DESCRIPTOR_OFFSET 0x10
+#define EVENTS_EN_OFFSET 0x100
+#define EVENTS_INV_OFFSET 0x140
+#define FUNCTION_SELECT_OFFSET 0x180
+#define TRIGGER_STATUS_OFFSET 0x184
+#define EVENT_STATUS_OFFSET 0x188
+#define OPS_OFFSET 0x800
+#define PSM_DESCRIPTOR_BASE(_x_) ((_x_) << 2)
+#define PSM_DESCRIPTOR_COUNT 5
+#define EVENTS_EN_BASE(_x_) ((_x_) << 2)
+#define EVENTS_EN_COUNT 16
+#define EVENTS_INV_BASE(_x_) ((_x_) << 2)
+#define EVENTS_INV_COUNT 16
+#define OPS_BASE(_x_) ((_x_) << 2)
+#define OPS_COUNT 128
+#define PSM_COUNT 5
+#define PSM_STATE_TABLE_BASE(_x_) ((_x_) << 8)
+#define PSM_STATE_TABLE_COUNT 6
+#define PSM_TRANS_BASE(_x_) ((_x_) << 5)
+#define PSM_TRANS_COUNT 4
+#define PSM_DMEM_BASE(_x_) ((_x_) << 2)
+#define PSM_DATA_COUNT 32
+#define PSM_NEXT_STATE_OFFSET 0x0
+#define PSM_SEQ_ADDR_OFFSET 0x4
+#define PSM_TIMER_VAL_OFFSET 0x8
+#define PSM_TIMER_EN_OFFSET 0xC
+#define PSM_TRIGGER_NUM_OFFSET 0x10
+#define PSM_TRIGGER_EN_OFFSET 0x14
+#define PSM_ENABLE_STATE_OFFSET 0x80
+#define PSM_DATA_OFFSET 0x600
+#define PSM_CFG_OFFSET 0x680
+#define PSM_START_OFFSET 0x684
+#define PSM_STATUS_OFFSET 0x688
+#define PSM_DEBUG_CFG_OFFSET 0x68C
+#define PSM_BREAK_ADDR_OFFSET 0x694
+#define PSM_GPIN_LO_RD_OFFSET 0x6A0
+#define PSM_GPIN_HI_RD_OFFSET 0x6A4
+#define PSM_GPOUT_LO_RD_OFFSET 0x6B0
+#define PSM_GPOUT_HI_RD_OFFSET 0x6B4
+#define PSM_DEBUG_STATUS_OFFSET 0x6B8
+
+static inline u32 gxp_lpm_psm_get_status_offset(enum gxp_lpm_psm psm)
+{
+ if (psm >= LPM_NUM_PSMS)
+ return 0;
+ return GXP_LPM_PSM_0_BASE + (GXP_LPM_PSM_SIZE * psm) +
+ PSM_STATUS_OFFSET;
+}
+
+static inline u32 gxp_lpm_psm_get_start_offset(enum gxp_lpm_psm psm)
+{
+ if (psm >= LPM_NUM_PSMS)
+ return 0;
+ return GXP_LPM_PSM_0_BASE + (GXP_LPM_PSM_SIZE * psm) + PSM_START_OFFSET;
+}
+
+static inline u32 gxp_lpm_psm_get_cfg_offset(enum gxp_lpm_psm psm)
+{
+ if (psm >= LPM_NUM_PSMS)
+ return 0;
+ return GXP_LPM_PSM_0_BASE + (GXP_LPM_PSM_SIZE * psm) + PSM_CFG_OFFSET;
+}
+
+static inline u32 gxp_lpm_psm_get_state_offset(enum gxp_lpm_psm psm, uint state)
+{
+ uint reg_offset;
+
+ if (psm >= LPM_NUM_PSMS || state > 3)
+ return 0;
+
+ switch (state) {
+ case 0:
+ reg_offset = LPM_REG_ENABLE_STATE_0;
+ break;
+ case 1:
+ reg_offset = LPM_REG_ENABLE_STATE_1;
+ break;
+ case 2:
+ reg_offset = LPM_REG_ENABLE_STATE_2;
+ break;
+ case 3:
+ reg_offset = LPM_REG_ENABLE_STATE_3;
+ break;
+ }
+ return GXP_LPM_PSM_0_BASE + (GXP_LPM_PSM_SIZE * psm) + reg_offset;
+}
+
+#endif /* __AMALTHEA_LPM_H__ */
diff --git a/amalthea/mailbox-regs.h b/amalthea/mailbox-regs.h
new file mode 100644
index 0000000..050398e
--- /dev/null
+++ b/amalthea/mailbox-regs.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * GXP mailbox registers.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#ifndef __AMALTHEA_MAILBOX_REGS_H__
+#define __AMALTHEA_MAILBOX_REGS_H__
+
+/* Mailbox CSRs */
+#define MBOX_MCUCTLR_OFFSET 0x0000
+
+#define MBOX_INTGR0_OFFSET 0x0020
+#define MBOX_INTMSR0_OFFSET 0x0030
+
+#define MBOX_INTCR1_OFFSET 0x0044
+#define MBOX_INTMR1_OFFSET 0x0048
+#define MBOX_INTSR1_OFFSET 0x004C
+#define MBOX_INTMSR1_OFFSET 0x0050
+
+/* Mailbox Shared Data Registers */
+#define MBOX_DATA_REG_BASE 0x0080
+
+#define MBOX_DATA_STATUS_OFFSET 0x00
+#define MBOX_DATA_DESCRIPTOR_ADDR_OFFSET 0x04
+#define MBOX_DATA_CMD_TAIL_RESP_HEAD_OFFSET 0x08
+#define MBOX_DATA_CMD_HEAD_RESP_TAIL_OFFSET 0x0C
+
+#endif /* __AMALTHEA_MAILBOX_REGS_H__ */
diff --git a/gcip-kernel-driver/drivers/gcip/Makefile b/gcip-kernel-driver/drivers/gcip/Makefile
new file mode 100644
index 0000000..c3424ee
--- /dev/null
+++ b/gcip-kernel-driver/drivers/gcip/Makefile
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for GCIP framework.
+#
+
+CONFIG_GCIP ?= m
+obj-$(CONFIG_GCIP) += gcip.o
+
+gcip-objs := gcip-alloc-helper.o gcip-domain-pool.o gcip-firmware.o \
+ gcip-image-config.o gcip-kci.o gcip-mailbox.o gcip-mem-pool.o \
+ gcip-telemetry.o
+
+CURRENT_DIR=$(dir $(abspath $(lastword $(MAKEFILE_LIST))))
+
+ccflags-y += -I$(CURRENT_DIR)/../../include
+
+ifdef CONFIG_GCIP_TEST
+obj-y += unittests/
+endif
+
+modules modules_install clean:
+ $(MAKE) -C $(KERNEL_SRC) M=$(M) W=1 $(KBUILD_OPTIONS) $(@)
diff --git a/gcip-kernel-driver/drivers/gcip/gcip-alloc-helper.c b/gcip-kernel-driver/drivers/gcip/gcip-alloc-helper.c
new file mode 100644
index 0000000..33c95e2
--- /dev/null
+++ b/gcip-kernel-driver/drivers/gcip/gcip-alloc-helper.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * GCIP helpers for allocating memories.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#include <asm/page.h>
+#include <linux/device.h>
+#include <linux/mm_types.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include <gcip/gcip-alloc-helper.h>
+
+/*
+ * Set @pages to the pages @mem represents.
+ * @mem must be a pointer returned by vmalloc.
+ *
+ * Returns 0 on success, -ENOMEM when any page is NULL.
+ */
+static int gcip_vmalloc_to_pages(void *mem, size_t count, struct page **pages)
+{
+ size_t i = 0;
+
+ while (count--) {
+ pages[i] = vmalloc_to_page(mem);
+ if (!pages[i])
+ return -ENOMEM;
+ i++;
+ mem += PAGE_SIZE;
+ }
+ return 0;
+}
+
+struct sg_table *gcip_alloc_noncontiguous(struct device *dev, size_t size, gfp_t gfp)
+{
+ struct gcip_sgt_handle *sh = kmalloc(sizeof(*sh), gfp);
+ void *mem;
+ struct page **pages;
+ size_t count;
+ int ret;
+
+ if (!sh)
+ return NULL;
+
+ size = PAGE_ALIGN(size);
+ count = size >> PAGE_SHIFT;
+ mem = vzalloc_node(size, dev_to_node(dev));
+ if (!mem) {
+ dev_err(dev, "GCIP noncontiguous alloc size=%#zx failed", size);
+ goto err_free_sh;
+ }
+
+ pages = kmalloc_array(count, sizeof(*pages), gfp);
+ if (!pages) {
+ dev_err(dev, "GCIP alloc pages array count=%zu failed", count);
+ goto err_free_mem;
+ }
+
+ if (gcip_vmalloc_to_pages(mem, count, pages)) {
+ dev_err(dev, "convert memory to pages failed");
+ goto err_free_pages;
+ }
+
+ ret = sg_alloc_table_from_pages(&sh->sgt, pages, count, 0, size, gfp);
+ if (ret) {
+ dev_err(dev, "alloc SG table with size=%#zx failed: %d", size, ret);
+ goto err_free_pages;
+ }
+
+ kfree(pages);
+ sh->mem = mem;
+ return &sh->sgt;
+
+err_free_pages:
+ kfree(pages);
+err_free_mem:
+ vfree(mem);
+err_free_sh:
+ kfree(sh);
+ return NULL;
+}
+
+void gcip_free_noncontiguous(struct sg_table *sgt)
+{
+ struct gcip_sgt_handle *sh = container_of(sgt, struct gcip_sgt_handle, sgt);
+
+ sg_free_table(&sh->sgt);
+ vfree(sh->mem);
+ kfree(sh);
+}
diff --git a/gcip-kernel-driver/drivers/gcip/gcip-domain-pool.c b/gcip-kernel-driver/drivers/gcip/gcip-domain-pool.c
new file mode 100644
index 0000000..2341b52
--- /dev/null
+++ b/gcip-kernel-driver/drivers/gcip/gcip-domain-pool.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * GCIP IOMMU domain allocator.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#include <linux/device.h>
+#include <linux/idr.h>
+#include <linux/iommu.h>
+#include <linux/vmalloc.h>
+
+#include <gcip/gcip-domain-pool.h>
+
+int gcip_domain_pool_init(struct device *dev, struct gcip_domain_pool *pool, unsigned int size)
+{
+ unsigned int i;
+ struct iommu_domain *domain;
+
+ pool->size = size;
+ pool->dev = dev;
+
+ if (!size)
+ return 0;
+
+ dev_dbg(pool->dev, "Initializing domain pool with %u domains\n", size);
+
+ ida_init(&pool->idp);
+ pool->array = vzalloc(sizeof(*pool->array) * size);
+ if (!pool->array) {
+ ida_destroy(&pool->idp);
+ return -ENOMEM;
+ }
+ for (i = 0; i < size; i++) {
+ domain = iommu_domain_alloc(dev->bus);
+ if (!domain) {
+ dev_err(pool->dev, "Failed to allocate iommu domain %d of %u\n", i + 1,
+ size);
+ gcip_domain_pool_destroy(pool);
+ return -ENOMEM;
+ }
+
+ pool->array[i] = domain;
+ }
+ return 0;
+}
+
+struct iommu_domain *gcip_domain_pool_alloc(struct gcip_domain_pool *pool)
+{
+ int id;
+
+ if (!pool->size)
+ return iommu_domain_alloc(pool->dev->bus);
+
+ id = ida_alloc_max(&pool->idp, pool->size - 1, GFP_KERNEL);
+
+ if (id < 0) {
+ dev_err(pool->dev, "No more domains available from pool of size %u\n", pool->size);
+ return NULL;
+ }
+
+ dev_dbg(pool->dev, "Allocated domain from pool with id = %d\n", id);
+
+ return pool->array[id];
+}
+
+void gcip_domain_pool_free(struct gcip_domain_pool *pool, struct iommu_domain *domain)
+{
+ int id;
+
+ if (!pool->size) {
+ iommu_domain_free(domain);
+ return;
+ }
+ for (id = 0; id < pool->size; id++) {
+ if (pool->array[id] == domain) {
+ dev_dbg(pool->dev, "Released domain from pool with id = %d\n", id);
+ ida_free(&pool->idp, id);
+ return;
+ }
+ }
+ dev_err(pool->dev, "Domain not found in pool\n");
+}
+
+void gcip_domain_pool_destroy(struct gcip_domain_pool *pool)
+{
+ int i;
+
+ if (!pool->size)
+ return;
+
+ dev_dbg(pool->dev, "Destroying domain pool with %u domains\n", pool->size);
+
+ for (i = 0; i < pool->size; i++) {
+ if (pool->array[i])
+ iommu_domain_free(pool->array[i]);
+ }
+
+ ida_destroy(&pool->idp);
+ vfree(pool->array);
+}
diff --git a/gcip-kernel-driver/drivers/gcip/gcip-firmware.c b/gcip-kernel-driver/drivers/gcip/gcip-firmware.c
new file mode 100644
index 0000000..0b0225c
--- /dev/null
+++ b/gcip-kernel-driver/drivers/gcip/gcip-firmware.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * GCIP firmware interface.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#include <gcip/gcip-firmware.h>
+
+char *gcip_fw_flavor_str(enum gcip_fw_flavor fw_flavor)
+{
+ switch (fw_flavor) {
+ case GCIP_FW_FLAVOR_BL1:
+ return "stage 2 bootloader";
+ case GCIP_FW_FLAVOR_SYSTEST:
+ return "test";
+ case GCIP_FW_FLAVOR_PROD_DEFAULT:
+ return "prod";
+ case GCIP_FW_FLAVOR_CUSTOM:
+ return "custom";
+ case GCIP_FW_FLAVOR_UNKNOWN:
+ default:
+ return "unknown";
+ }
+}
diff --git a/gcip-kernel-driver/drivers/gcip/gcip-image-config.c b/gcip-kernel-driver/drivers/gcip/gcip-image-config.c
new file mode 100644
index 0000000..312bbdc
--- /dev/null
+++ b/gcip-kernel-driver/drivers/gcip/gcip-image-config.c
@@ -0,0 +1,220 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Framework for parsing the firmware image configuration.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include <gcip/gcip-image-config.h>
+
+#define ADDR_SHIFT 12
+#define SIZE_MODE_BIT BIT(ADDR_SHIFT - 1)
+#define SECURE_SIZE_MASK (SIZE_MODE_BIT - 1u)
+#define NS_SIZE_MASK (BIT(ADDR_SHIFT) - 1u)
+#define ADDR_MASK ~(BIT(ADDR_SHIFT) - 1u)
+
+/* used by ns_iommu_mappings */
+#define CONFIG_TO_MBSIZE(a) (((a) & NS_SIZE_MASK) << 20)
+
+/* used by iommu_mappings */
+static inline __u32 config_to_size(__u32 cfg)
+{
+ __u32 page_size;
+
+ if (cfg & SIZE_MODE_BIT)
+ page_size = cfg & SECURE_SIZE_MASK;
+ else
+ page_size = BIT(cfg & SECURE_SIZE_MASK);
+
+ return page_size << PAGE_SHIFT;
+}
+
+static int setup_iommu_mappings(struct gcip_image_config_parser *parser,
+ struct gcip_image_config *config)
+{
+ int i, ret;
+ dma_addr_t daddr;
+ size_t size;
+ phys_addr_t paddr;
+
+ for (i = 0; i < config->num_iommu_mappings; i++) {
+ daddr = config->iommu_mappings[i].virt_address;
+ if (unlikely(!daddr)) {
+ dev_warn(parser->dev, "Invalid config, device address is zero");
+ ret = -EIO;
+ goto err;
+ }
+ size = config_to_size(config->iommu_mappings[i].image_config_value);
+ paddr = config->iommu_mappings[i].image_config_value & ADDR_MASK;
+
+ dev_dbg(parser->dev, "Image config adding IOMMU mapping: %pad -> %pap", &daddr,
+ &paddr);
+
+ if (unlikely(daddr + size <= daddr || paddr + size <= paddr)) {
+ ret = -EOVERFLOW;
+ goto err;
+ }
+ ret = parser->ops->map(parser->data, daddr, paddr, size,
+ GCIP_IMAGE_CONFIG_FLAGS_SECURE);
+ if (ret) {
+ dev_err(parser->dev,
+ "Unable to Map: %d dma_addr: %pad phys_addr: %pap size: %#lx\n",
+ ret, &daddr, &paddr, size);
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ while (i--) {
+ daddr = config->iommu_mappings[i].virt_address;
+ size = config_to_size(config->iommu_mappings[i].image_config_value);
+ parser->ops->unmap(parser->data, daddr, size, GCIP_IMAGE_CONFIG_FLAGS_SECURE);
+ }
+ return ret;
+}
+
+static void clear_iommu_mappings(struct gcip_image_config_parser *parser,
+ struct gcip_image_config *config)
+{
+ dma_addr_t daddr;
+ size_t size;
+ int i;
+
+ for (i = config->num_iommu_mappings - 1; i >= 0; i--) {
+ daddr = config->iommu_mappings[i].virt_address;
+ size = config_to_size(config->iommu_mappings[i].image_config_value);
+ dev_dbg(parser->dev, "Image config removing IOMMU mapping: %pad size=%#lx", &daddr,
+ size);
+ parser->ops->unmap(parser->data, daddr, size, GCIP_IMAGE_CONFIG_FLAGS_SECURE);
+ }
+}
+
+static int setup_ns_iommu_mappings(struct gcip_image_config_parser *parser,
+ struct gcip_image_config *config)
+{
+ dma_addr_t daddr;
+ size_t size;
+ int ret, i;
+ phys_addr_t paddr = 0;
+
+ for (i = 0; i < config->num_ns_iommu_mappings; i++) {
+ daddr = config->ns_iommu_mappings[i] & ADDR_MASK;
+ if (unlikely(!daddr)) {
+ dev_warn(parser->dev, "Invalid config, device address is zero");
+ ret = -EIO;
+ goto err;
+ }
+ size = CONFIG_TO_MBSIZE(config->ns_iommu_mappings[i]);
+ dev_dbg(parser->dev, "Image config adding NS IOMMU mapping: %pad -> %pap", &daddr,
+ &paddr);
+ if (unlikely(daddr + size <= daddr || paddr + size <= paddr)) {
+ ret = -EOVERFLOW;
+ goto err;
+ }
+ ret = parser->ops->map(parser->data, daddr, paddr, size, 0);
+ if (ret)
+ goto err;
+ paddr += size;
+ }
+
+ return 0;
+
+err:
+ while (i--) {
+ size = CONFIG_TO_MBSIZE(config->ns_iommu_mappings[i]);
+ daddr = config->ns_iommu_mappings[i] & ADDR_MASK;
+ parser->ops->unmap(parser->data, daddr, size, 0);
+ }
+ return ret;
+}
+
+static void clear_ns_iommu_mappings(struct gcip_image_config_parser *parser,
+ struct gcip_image_config *config)
+{
+ dma_addr_t daddr;
+ size_t size;
+ int i;
+
+ for (i = config->num_ns_iommu_mappings - 1; i >= 0; i--) {
+ size = CONFIG_TO_MBSIZE(config->ns_iommu_mappings[i]);
+ daddr = config->ns_iommu_mappings[i] & ADDR_MASK;
+ dev_dbg(parser->dev, "Image config removing NS IOMMU mapping: %pad size=%#lx",
+ &daddr, size);
+ parser->ops->unmap(parser->data, daddr, size, 0);
+ }
+}
+
+static int map_image_config(struct gcip_image_config_parser *parser,
+ struct gcip_image_config *config)
+{
+ int ret = setup_ns_iommu_mappings(parser, config);
+
+ if (ret)
+ return ret;
+ if (gcip_image_config_is_ns(config)) {
+ ret = setup_iommu_mappings(parser, config);
+ if (ret)
+ clear_ns_iommu_mappings(parser, config);
+ }
+ return ret;
+}
+
+static void unmap_image_config(struct gcip_image_config_parser *parser,
+ struct gcip_image_config *config)
+{
+ if (gcip_image_config_is_ns(config))
+ clear_iommu_mappings(parser, config);
+ clear_ns_iommu_mappings(parser, config);
+}
+
+int gcip_image_config_parser_init(struct gcip_image_config_parser *parser,
+ const struct gcip_image_config_ops *ops, struct device *dev,
+ void *data)
+{
+ if (!ops->map || !ops->unmap) {
+ dev_err(dev, "Missing mandatory operations for image config parser");
+ return -EINVAL;
+ }
+ parser->dev = dev;
+ parser->data = data;
+ parser->ops = ops;
+ memset(&parser->last_config, 0, sizeof(parser->last_config));
+ return 0;
+}
+
+int gcip_image_config_parse(struct gcip_image_config_parser *parser,
+ struct gcip_image_config *config)
+{
+ int ret;
+
+ if (!memcmp(config, &parser->last_config, sizeof(*config)))
+ return 0;
+ unmap_image_config(parser, &parser->last_config);
+ ret = map_image_config(parser, config);
+ if (ret) {
+ dev_err(parser->dev, "Map image config failed: %d", ret);
+ /*
+ * Weird case as the mappings in the last config were just removed - might happen
+ * if the IOMMU driver state is corrupted. We can't help to rescue it so let's
+ * simply log a message.
+ */
+ if (unlikely(map_image_config(parser, &parser->last_config)))
+ dev_err(parser->dev, "Failed to roll back the last image config");
+ return ret;
+ }
+ memcpy(&parser->last_config, config, sizeof(parser->last_config));
+ return 0;
+}
+
+void gcip_image_config_clear(struct gcip_image_config_parser *parser)
+{
+ unmap_image_config(parser, &parser->last_config);
+ memset(&parser->last_config, 0, sizeof(parser->last_config));
+}
diff --git a/gcip-kernel-driver/drivers/gcip/gcip-kci.c b/gcip-kernel-driver/drivers/gcip/gcip-kci.c
new file mode 100644
index 0000000..15b2c53
--- /dev/null
+++ b/gcip-kernel-driver/drivers/gcip/gcip-kci.c
@@ -0,0 +1,525 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Kernel Control Interface, implements the protocol between AP kernel and GCIP firmware.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#include <linux/circ_buf.h>
+#include <linux/device.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/string.h> /* memcpy */
+
+#include <gcip/gcip-kci.h>
+#include <gcip/gcip-mailbox.h>
+
+static u32 gcip_kci_get_cmd_queue_head(struct gcip_mailbox *mailbox)
+{
+ struct gcip_kci *kci = gcip_mailbox_get_data(mailbox);
+
+ return kci->ops->get_cmd_queue_head(kci);
+}
+
+static u32 gcip_kci_get_cmd_queue_tail(struct gcip_mailbox *mailbox)
+{
+ struct gcip_kci *kci = gcip_mailbox_get_data(mailbox);
+
+ return kci->ops->get_cmd_queue_tail(kci);
+}
+
+static void gcip_kci_inc_cmd_queue_tail(struct gcip_mailbox *mailbox, u32 inc)
+{
+ struct gcip_kci *kci = gcip_mailbox_get_data(mailbox);
+
+ lockdep_assert_held(&kci->cmd_queue_lock);
+ kci->ops->inc_cmd_queue_tail(kci, inc);
+}
+
+static int gcip_kci_acquire_cmd_queue_lock(struct gcip_mailbox *mailbox, bool try)
+{
+ struct gcip_kci *kci = gcip_mailbox_get_data(mailbox);
+
+ mutex_lock(&kci->cmd_queue_lock);
+ return 1;
+}
+
+static void gcip_kci_release_cmd_queue_lock(struct gcip_mailbox *mailbox)
+{
+ struct gcip_kci *kci = gcip_mailbox_get_data(mailbox);
+
+ mutex_unlock(&kci->cmd_queue_lock);
+}
+
+static u64 gcip_kci_get_cmd_elem_seq(struct gcip_mailbox *mailbox, void *cmd)
+{
+ struct gcip_kci_command_element *elem = cmd;
+
+ return elem->seq;
+}
+
+static u32 gcip_kci_get_cmd_elem_code(struct gcip_mailbox *mailbox, void *cmd)
+{
+ struct gcip_kci_command_element *elem = cmd;
+
+ return elem->code;
+}
+
+static void gcip_kci_set_cmd_elem_seq(struct gcip_mailbox *mailbox, void *cmd, u64 seq)
+{
+ struct gcip_kci_command_element *elem = cmd;
+
+ if (!(elem->seq & GCIP_KCI_REVERSE_FLAG))
+ elem->seq = seq;
+}
+
+static u32 gcip_kci_get_resp_queue_size(struct gcip_mailbox *mailbox)
+{
+ struct gcip_kci *kci = gcip_mailbox_get_data(mailbox);
+
+ return kci->ops->get_resp_queue_size(kci);
+}
+
+static u32 gcip_kci_get_resp_queue_head(struct gcip_mailbox *mailbox)
+{
+ struct gcip_kci *kci = gcip_mailbox_get_data(mailbox);
+
+ return kci->ops->get_resp_queue_head(kci);
+}
+
+static u32 gcip_kci_get_resp_queue_tail(struct gcip_mailbox *mailbox)
+{
+ struct gcip_kci *kci = gcip_mailbox_get_data(mailbox);
+
+ return kci->ops->get_resp_queue_tail(kci);
+}
+
+static void gcip_kci_inc_resp_queue_head(struct gcip_mailbox *mailbox, u32 inc)
+{
+ struct gcip_kci *kci = gcip_mailbox_get_data(mailbox);
+
+ lockdep_assert_held(&kci->resp_queue_lock);
+ kci->ops->inc_resp_queue_head(kci, inc);
+}
+
+static int gcip_kci_acquire_resp_queue_lock(struct gcip_mailbox *mailbox, bool try)
+{
+ struct gcip_kci *kci = gcip_mailbox_get_data(mailbox);
+
+ if (try)
+ return spin_trylock(&kci->resp_queue_lock);
+
+ spin_lock(&kci->resp_queue_lock);
+ return 1;
+}
+
+static void gcip_kci_release_resp_queue_lock(struct gcip_mailbox *mailbox)
+{
+ struct gcip_kci *kci = gcip_mailbox_get_data(mailbox);
+
+ spin_unlock(&kci->resp_queue_lock);
+}
+
+static u64 gcip_kci_get_resp_elem_seq(struct gcip_mailbox *mailbox, void *resp)
+{
+ struct gcip_kci_response_element *elem = resp;
+
+ return elem->seq;
+}
+
+static void gcip_kci_set_resp_elem_seq(struct gcip_mailbox *mailbox, void *resp, u64 seq)
+{
+ struct gcip_kci_response_element *elem = resp;
+
+ elem->seq = seq;
+}
+
+static u16 gcip_kci_get_resp_elem_status(struct gcip_mailbox *mailbox, void *resp)
+{
+ struct gcip_kci_response_element *elem = resp;
+
+ return elem->status;
+}
+
+static void gcip_kci_set_resp_elem_status(struct gcip_mailbox *mailbox, void *resp, u16 status)
+{
+ struct gcip_kci_response_element *elem = resp;
+
+ elem->status = status;
+}
+
+static void gcip_kci_acquire_wait_list_lock(struct gcip_mailbox *mailbox, bool irqsave,
+ unsigned long *flags)
+{
+ struct gcip_kci *kci = gcip_mailbox_get_data(mailbox);
+
+ if (irqsave)
+ spin_lock_irqsave(&kci->wait_list_lock, *flags);
+ else
+ spin_lock(&kci->wait_list_lock);
+}
+
+static void gcip_kci_release_wait_list_lock(struct gcip_mailbox *mailbox, bool irqrestore,
+ unsigned long flags)
+{
+ struct gcip_kci *kci = gcip_mailbox_get_data(mailbox);
+
+ if (irqrestore)
+ spin_unlock_irqrestore(&kci->wait_list_lock, flags);
+ else
+ spin_unlock(&kci->wait_list_lock);
+}
+
+static int gcip_kci_wait_for_cmd_queue_not_full(struct gcip_mailbox *mailbox)
+{
+ struct gcip_kci *kci = gcip_mailbox_get_data(mailbox);
+ u32 tail = kci->ops->get_cmd_queue_tail(kci);
+ int ret;
+
+ ret = wait_event_timeout(kci->resp_doorbell_waitq,
+ kci->ops->get_cmd_queue_head(kci) !=
+ (tail ^ mailbox->queue_wrap_bit),
+ msecs_to_jiffies(mailbox->timeout));
+ if (!ret)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int gcip_kci_after_enqueue_cmd(struct gcip_mailbox *mailbox, void *cmd)
+{
+ struct gcip_kci *kci = gcip_mailbox_get_data(mailbox);
+ struct gcip_kci_command_element *elem = cmd;
+
+ kci->ops->trigger_doorbell(kci, GCIP_KCI_PUSH_CMD);
+ if (!(elem->seq & GCIP_KCI_REVERSE_FLAG))
+ return 1;
+ return 0;
+}
+
+static void gcip_kci_after_fetch_resps(struct gcip_mailbox *mailbox, u32 num_resps)
+{
+ struct gcip_kci *kci = gcip_mailbox_get_data(mailbox);
+ u32 size = kci->ops->get_resp_queue_size(kci);
+
+ /*
+ * We consumed a lot of responses - ring the doorbell of *cmd* queue to notify the firmware,
+ * which might be waiting us to consume the response queue.
+ */
+ if (num_resps >= size / 2)
+ kci->ops->trigger_doorbell(kci, GCIP_KCI_CONSUME_RESP);
+}
+
+/*
+ * Adds an incoming request from firmware to the circular buffer and schedules the work queue for
+ * processing.
+ */
+static int gcip_reverse_kci_add_resp(struct gcip_kci *kci,
+ const struct gcip_kci_response_element *resp)
+{
+ struct gcip_reverse_kci *rkci = &kci->rkci;
+ unsigned long head, tail, flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&rkci->producer_lock, flags);
+ head = rkci->head;
+ tail = READ_ONCE(rkci->tail);
+ if (CIRC_SPACE(head, tail, rkci->buffer_size) >= 1) {
+ rkci->buffer[head] = *resp;
+ smp_store_release(&rkci->head, (head + 1) & (rkci->buffer_size - 1));
+ schedule_work(&rkci->work);
+ } else {
+ ret = -ENOSPC;
+ }
+ spin_unlock_irqrestore(&rkci->producer_lock, flags);
+
+ return ret;
+}
+
+static bool gcip_kci_before_handle_resp(struct gcip_mailbox *mailbox, const void *resp)
+{
+ struct gcip_kci *kci = gcip_mailbox_get_data(mailbox);
+ const struct gcip_kci_response_element *elem = resp;
+
+ if (elem->seq & GCIP_KCI_REVERSE_FLAG) {
+ int ret = gcip_reverse_kci_add_resp(kci, elem);
+
+ if (ret)
+ dev_warn_ratelimited(kci->dev,
+ "Failed to handle reverse KCI code %u (%d)\n",
+ elem->code, ret);
+ return false;
+ }
+
+ return true;
+}
+
+static const struct gcip_mailbox_ops gcip_mailbox_ops = {
+ .get_cmd_queue_head = gcip_kci_get_cmd_queue_head,
+ .get_cmd_queue_tail = gcip_kci_get_cmd_queue_tail,
+ .inc_cmd_queue_tail = gcip_kci_inc_cmd_queue_tail,
+ .acquire_cmd_queue_lock = gcip_kci_acquire_cmd_queue_lock,
+ .release_cmd_queue_lock = gcip_kci_release_cmd_queue_lock,
+ .get_cmd_elem_seq = gcip_kci_get_cmd_elem_seq,
+ .set_cmd_elem_seq = gcip_kci_set_cmd_elem_seq,
+ .get_cmd_elem_code = gcip_kci_get_cmd_elem_code,
+ .get_resp_queue_size = gcip_kci_get_resp_queue_size,
+ .get_resp_queue_head = gcip_kci_get_resp_queue_head,
+ .get_resp_queue_tail = gcip_kci_get_resp_queue_tail,
+ .inc_resp_queue_head = gcip_kci_inc_resp_queue_head,
+ .acquire_resp_queue_lock = gcip_kci_acquire_resp_queue_lock,
+ .release_resp_queue_lock = gcip_kci_release_resp_queue_lock,
+ .get_resp_elem_seq = gcip_kci_get_resp_elem_seq,
+ .set_resp_elem_seq = gcip_kci_set_resp_elem_seq,
+ .get_resp_elem_status = gcip_kci_get_resp_elem_status,
+ .set_resp_elem_status = gcip_kci_set_resp_elem_status,
+ .acquire_wait_list_lock = gcip_kci_acquire_wait_list_lock,
+ .release_wait_list_lock = gcip_kci_release_wait_list_lock,
+ .wait_for_cmd_queue_not_full = gcip_kci_wait_for_cmd_queue_not_full,
+ .after_enqueue_cmd = gcip_kci_after_enqueue_cmd,
+ .after_fetch_resps = gcip_kci_after_fetch_resps,
+ .before_handle_resp = gcip_kci_before_handle_resp,
+};
+
+/*
+ * Pushes an element to cmd queue and waits for the response.
+ * Returns -ETIMEDOUT if no response is received within kci->mailbox.timeout msecs.
+ *
+ * Returns the code of response, or a negative errno on error.
+ * @resp is updated with the response, as to retrieve returned retval field.
+ */
+int gcip_kci_send_cmd_return_resp(struct gcip_kci *kci, struct gcip_kci_command_element *cmd,
+ struct gcip_kci_response_element *resp)
+{
+ int ret;
+
+ ret = gcip_mailbox_send_cmd(&kci->mailbox, cmd, resp);
+ if (ret || !resp)
+ return ret;
+
+ return resp->code;
+}
+
+int gcip_kci_send_cmd(struct gcip_kci *kci, struct gcip_kci_command_element *cmd)
+{
+ struct gcip_kci_response_element resp;
+
+ /* Don't wait on a response for reverse KCI response. */
+ if (cmd->seq & GCIP_KCI_REVERSE_FLAG)
+ return gcip_kci_send_cmd_return_resp(kci, cmd, NULL);
+ else
+ return gcip_kci_send_cmd_return_resp(kci, cmd, &resp);
+}
+
+/*
+ * Fetches and handles responses, then wakes up threads that are waiting for a response.
+ *
+ * Note: this worker is scheduled in the IRQ handler, to prevent use-after-free or race-condition
+ * bugs, gcip_kci_cancel_work_queues() must be called before free the mailbox.
+ */
+static void gcip_kci_consume_responses_work(struct work_struct *work)
+{
+ struct gcip_kci *kci = container_of(work, struct gcip_kci, work);
+
+ gcip_mailbox_consume_responses_work(&kci->mailbox);
+}
+
+/*
+ * IRQ handler of KCI mailbox.
+ *
+ * Consumes one response (if any) and puts gcip_kci_consume_responses_work() into the system work
+ * queue.
+ */
+void gcip_kci_handle_irq(struct gcip_kci *kci)
+{
+ struct gcip_kci_response_element resp;
+
+ /* Wakes up threads that are waiting for response doorbell to be rung. */
+ wake_up(&kci->resp_doorbell_waitq);
+
+ /*
+ * Quickly consumes one response, which should be enough for usual cases, to prevent the
+ * host from being too busy to execute the scheduled work.
+ */
+ gcip_mailbox_consume_one_response(&kci->mailbox, &resp);
+
+ schedule_work(&kci->work);
+}
+
+static void gcip_kci_update_usage_work(struct work_struct *work)
+{
+ struct gcip_kci *kci = container_of(work, struct gcip_kci, usage_work);
+
+ kci->ops->update_usage(kci);
+}
+
+void gcip_kci_update_usage_async(struct gcip_kci *kci)
+{
+ schedule_work(&kci->usage_work);
+}
+
+/* Removes one element from the circular buffer. */
+static int gcip_reverse_kci_remove_resp(struct gcip_reverse_kci *rkci,
+ struct gcip_kci_response_element *resp)
+{
+ unsigned long head, tail;
+ int ret = 0;
+
+ spin_lock(&rkci->consumer_lock);
+
+ /*
+ * Prevents the compiler from discarding and reloading its cached value additionally forces
+ * the CPU to order against subsequent memory references.
+ * Shamelessly stolen from:
+ * https://www.kernel.org/doc/html/latest/core-api/circular-buffers.html
+ */
+ head = smp_load_acquire(&rkci->head);
+ tail = rkci->tail;
+ if (CIRC_CNT(head, tail, rkci->buffer_size) >= 1) {
+ *resp = rkci->buffer[tail];
+ tail = (tail + 1) & (rkci->buffer_size - 1);
+ ret = 1;
+ smp_store_release(&rkci->tail, tail);
+ }
+ spin_unlock(&rkci->consumer_lock);
+ return ret;
+}
+
+/* Worker for incoming requests from firmware. */
+static void gcip_reverse_kci_work(struct work_struct *work)
+{
+ struct gcip_kci_response_element resp;
+ struct gcip_reverse_kci *rkci = container_of(work, struct gcip_reverse_kci, work);
+ struct gcip_kci *kci = container_of(rkci, struct gcip_kci, rkci);
+
+ while (gcip_reverse_kci_remove_resp(rkci, &resp))
+ kci->ops->reverse_kci_handle_response(kci, &resp);
+}
+
+/* Initializes the Reverse KCI handler. */
+static int gcip_reverse_kci_init(struct gcip_reverse_kci *rkci, struct device *dev, u32 buffer_size)
+{
+ if (rkci->buffer)
+ return 0;
+
+ rkci->buffer_size = buffer_size;
+ rkci->buffer = devm_kcalloc(dev, buffer_size, sizeof(*rkci->buffer), GFP_KERNEL);
+ if (!rkci->buffer)
+ return -ENOMEM;
+
+ spin_lock_init(&rkci->producer_lock);
+ spin_lock_init(&rkci->consumer_lock);
+ INIT_WORK(&rkci->work, gcip_reverse_kci_work);
+
+ return 0;
+}
+
+/* Verifies and sets the KCI operators. */
+static int gcip_kci_set_ops(struct gcip_kci *kci, const struct gcip_kci_ops *ops)
+{
+ if (!ops) {
+ kci->ops = NULL;
+ return 0;
+ }
+
+ if (!ops->get_cmd_queue_head || !ops->get_cmd_queue_tail || !ops->inc_cmd_queue_tail) {
+ dev_err(kci->dev, "Incomplete KCI CMD queue ops.\n");
+ return -EINVAL;
+ }
+
+ if (!ops->get_resp_queue_size || !ops->get_resp_queue_head || !ops->get_resp_queue_tail ||
+ !ops->inc_resp_queue_head) {
+ dev_err(kci->dev, "Incomplete KCI RESP queue ops.\n");
+ return -EINVAL;
+ }
+
+ if (!ops->trigger_doorbell) {
+ dev_err(kci->dev, "Incomplete KCI ops. Missing trigger_doorbell.\n");
+ return -EINVAL;
+ }
+
+ kci->ops = ops;
+
+ return 0;
+}
+
+/* Sets the KCI private data. */
+static inline void gcip_kci_set_data(struct gcip_kci *kci, void *data)
+{
+ kci->data = data;
+}
+
+int gcip_kci_init(struct gcip_kci *kci, const struct gcip_kci_args *args)
+{
+ int ret;
+ struct gcip_mailbox_args mailbox_args;
+
+ if (kci->ops)
+ return 0;
+
+ kci->dev = args->dev;
+ gcip_kci_set_data(kci, args->data);
+
+ ret = gcip_kci_set_ops(kci, args->ops);
+ if (ret)
+ goto err_unset_data;
+
+ ret = gcip_reverse_kci_init(&kci->rkci, kci->dev, args->rkci_buffer_size);
+ if (ret)
+ goto err_unset_ops;
+
+ mailbox_args.dev = args->dev;
+ mailbox_args.queue_wrap_bit = args->queue_wrap_bit;
+ mailbox_args.cmd_queue = args->cmd_queue;
+ mailbox_args.cmd_elem_size = sizeof(struct gcip_kci_command_element);
+ mailbox_args.resp_queue = args->resp_queue;
+ mailbox_args.resp_elem_size = sizeof(struct gcip_kci_response_element);
+ mailbox_args.timeout = args->timeout;
+ mailbox_args.ops = &gcip_mailbox_ops;
+ mailbox_args.data = kci;
+ mailbox_args.ignore_seq_order = false;
+
+ ret = gcip_mailbox_init(&kci->mailbox, &mailbox_args);
+ if (ret)
+ goto err_unset_ops;
+
+ mutex_init(&kci->cmd_queue_lock);
+ spin_lock_init(&kci->resp_queue_lock);
+ spin_lock_init(&kci->wait_list_lock);
+ init_waitqueue_head(&kci->resp_doorbell_waitq);
+ INIT_WORK(&kci->work, gcip_kci_consume_responses_work);
+ INIT_WORK(&kci->usage_work, gcip_kci_update_usage_work);
+
+ return 0;
+err_unset_ops:
+ gcip_kci_set_ops(kci, NULL);
+err_unset_data:
+ gcip_kci_set_data(kci, NULL);
+
+ return ret;
+}
+
+void gcip_kci_cancel_work_queues(struct gcip_kci *kci)
+{
+ cancel_work_sync(&kci->usage_work);
+ cancel_work_sync(&kci->work);
+ cancel_work_sync(&kci->rkci.work);
+}
+
+void gcip_kci_release(struct gcip_kci *kci)
+{
+ kci->rkci.buffer = NULL;
+ gcip_kci_set_ops(kci, NULL);
+ gcip_kci_set_data(kci, NULL);
+
+ /*
+ * Non-empty @kci->wait_list means someone (gcip_kci_send_cmd) is waiting for a response.
+ *
+ * Since this function should only be called when removing a device, it should be impossible
+ * to reach here with gcip_kci_send_cmd() is still waiting (rmmod should fail), add a simple
+ * check here so we can more easily figure it out when this happens.
+ */
+ if (!list_empty(gcip_kci_get_wait_list(kci)))
+ dev_warn(kci->dev, "KCI commands still pending.\n");
+ gcip_mailbox_release(&kci->mailbox);
+}
diff --git a/gcip-kernel-driver/drivers/gcip/gcip-mailbox.c b/gcip-kernel-driver/drivers/gcip/gcip-mailbox.c
new file mode 100644
index 0000000..cbb3c80
--- /dev/null
+++ b/gcip-kernel-driver/drivers/gcip/gcip-mailbox.c
@@ -0,0 +1,680 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * GCIP Mailbox Interface.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#include <linux/device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h> /* memcpy */
+#include <linux/wait.h>
+
+#include <gcip/gcip-mailbox.h>
+
+#if IS_ENABLED(CONFIG_GCIP_TEST)
+#include "unittests/helper/gcip-mailbox-controller.h"
+
+#define TEST_TRIGGER_TIMEOUT_RACE(awaiter) gcip_mailbox_controller_trigger_timeout_race(awaiter)
+#else
+#define TEST_TRIGGER_TIMEOUT_RACE(...)
+#endif
+
+#define GET_CMD_QUEUE_HEAD() mailbox->ops->get_cmd_queue_head(mailbox)
+#define GET_CMD_QUEUE_TAIL() mailbox->ops->get_cmd_queue_tail(mailbox)
+#define INC_CMD_QUEUE_TAIL(inc) mailbox->ops->inc_cmd_queue_tail(mailbox, inc)
+#define ACQUIRE_CMD_QUEUE_LOCK(try) mailbox->ops->acquire_cmd_queue_lock(mailbox, try)
+#define RELEASE_CMD_QUEUE_LOCK() mailbox->ops->release_cmd_queue_lock(mailbox)
+
+#define GET_CMD_ELEM_SEQ(cmd) mailbox->ops->get_cmd_elem_seq(mailbox, cmd)
+#define SET_CMD_ELEM_SEQ(cmd, seq) mailbox->ops->set_cmd_elem_seq(mailbox, cmd, seq)
+#define GET_CMD_ELEM_CODE(cmd) mailbox->ops->get_cmd_elem_code(mailbox, cmd)
+
+#define GET_RESP_QUEUE_SIZE() mailbox->ops->get_resp_queue_size(mailbox)
+#define GET_RESP_QUEUE_HEAD() mailbox->ops->get_resp_queue_head(mailbox)
+#define INC_RESP_QUEUE_HEAD(inc) mailbox->ops->inc_resp_queue_head(mailbox, inc)
+#define GET_RESP_QUEUE_TAIL() mailbox->ops->get_resp_queue_tail(mailbox)
+#define ACQUIRE_RESP_QUEUE_LOCK(try) mailbox->ops->acquire_resp_queue_lock(mailbox, try)
+#define RELEASE_RESP_QUEUE_LOCK() mailbox->ops->release_resp_queue_lock(mailbox)
+
+#define GET_RESP_ELEM_SEQ(resp) mailbox->ops->get_resp_elem_seq(mailbox, resp)
+#define SET_RESP_ELEM_SEQ(resp, seq) mailbox->ops->set_resp_elem_seq(mailbox, resp, seq)
+#define GET_RESP_ELEM_STATUS(resp) mailbox->ops->get_resp_elem_status(mailbox, resp)
+#define SET_RESP_ELEM_STATUS(resp, status) mailbox->ops->set_resp_elem_status(mailbox, resp, status)
+
+#define ACQUIRE_WAIT_LIST_LOCK(irqsave, flags) \
+ mailbox->ops->acquire_wait_list_lock(mailbox, irqsave, flags)
+#define RELEASE_WAIT_LIST_LOCK(irqrestore, flags) \
+ mailbox->ops->release_wait_list_lock(mailbox, irqrestore, flags)
+
+struct gcip_mailbox_wait_list_elem {
+ struct list_head list;
+ void *resp;
+ struct gcip_mailbox_resp_awaiter *awaiter;
+};
+
+static void gcip_mailbox_awaiter_release(struct gcip_mailbox_resp_awaiter *awaiter)
+{
+ if (awaiter->release_data)
+ awaiter->release_data(awaiter->data);
+ kfree(awaiter);
+}
+
+static void gcip_mailbox_awaiter_dec_refs(struct gcip_mailbox_resp_awaiter *awaiter)
+{
+ if (refcount_dec_and_test(&awaiter->refs))
+ gcip_mailbox_awaiter_release(awaiter);
+}
+
+/*
+ * Removes the response previously pushed with gcip_mailbox_push_wait_resp().
+ *
+ * This is used when the kernel gives up waiting for the response.
+ */
+static void gcip_mailbox_del_wait_resp(struct gcip_mailbox *mailbox, void *resp)
+{
+ struct gcip_mailbox_wait_list_elem *cur;
+ unsigned long flags;
+ u64 cur_seq, seq = GET_RESP_ELEM_SEQ(resp);
+
+ ACQUIRE_WAIT_LIST_LOCK(true, &flags);
+
+ list_for_each_entry (cur, &mailbox->wait_list, list) {
+ cur_seq = GET_RESP_ELEM_SEQ(cur->resp);
+ if (cur_seq > seq)
+ break;
+ if (cur_seq == seq) {
+ list_del(&cur->list);
+ if (cur->awaiter) {
+ /* Remove the reference of the arrived handler. */
+ gcip_mailbox_awaiter_dec_refs(cur->awaiter);
+ }
+ kfree(cur);
+ break;
+ }
+ }
+
+ RELEASE_WAIT_LIST_LOCK(true, flags);
+}
+
+/*
+ * Adds @resp to @mailbox->wait_list. If @awaiter is not NULL, the @resp is asynchronous.
+ * Otherwise, the @resp is synchronous.
+ *
+ * wait_list is a FIFO queue, with sequence number in increasing order.
+ *
+ * Returns 0 on success, or -ENOMEM if failed on allocation.
+ */
+static int gcip_mailbox_push_wait_resp(struct gcip_mailbox *mailbox, void *resp,
+ struct gcip_mailbox_resp_awaiter *awaiter)
+{
+ struct gcip_mailbox_wait_list_elem *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ unsigned long flags;
+
+ if (!entry)
+ return -ENOMEM;
+
+ /* Increase a reference of arrived handler. */
+ if (awaiter)
+ refcount_inc(&awaiter->refs);
+
+ entry->resp = resp;
+ entry->awaiter = awaiter;
+ ACQUIRE_WAIT_LIST_LOCK(true, &flags);
+ list_add_tail(&entry->list, &mailbox->wait_list);
+ RELEASE_WAIT_LIST_LOCK(true, flags);
+
+ return 0;
+}
+
+/*
+ * Pushes @cmd to the command queue of mailbox and returns. @resp should be passed if the request
+ * is synchronous and want to get the response. If @resp is NULL even though the request is
+ * synchronous, the @cmd will be put into the queue, but the caller may not wait the response and
+ * ignore it. If the request is async, @awaiter should be passed too.
+ */
+static int gcip_mailbox_enqueue_cmd(struct gcip_mailbox *mailbox, void *cmd, void *resp,
+ struct gcip_mailbox_resp_awaiter *awaiter)
+{
+ int ret = 0;
+ u32 tail;
+
+ ACQUIRE_CMD_QUEUE_LOCK(false);
+
+ SET_CMD_ELEM_SEQ(cmd, mailbox->cur_seq);
+ /*
+ * The lock ensures mailbox cmd_queue_tail cannot be changed by other processes (this
+ * method should be the only one to modify the value of tail), therefore we can remember
+ * its value here and use it in the condition of wait_event() call.
+ */
+ tail = GET_CMD_QUEUE_TAIL();
+
+ if (mailbox->ops->wait_for_cmd_queue_not_full) {
+ /* Wait until the cmd queue has a space for putting cmd. */
+ ret = mailbox->ops->wait_for_cmd_queue_not_full(mailbox);
+ if (ret)
+ goto out;
+ } else if (GET_CMD_QUEUE_HEAD() == (tail ^ mailbox->queue_wrap_bit)) {
+ /*
+ * Default logic of checking the fullness of cmd_queue. If the cmd_queue is full,
+ * it's up to the caller to retry.
+ */
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ if (resp) {
+ /* Adds @resp to the wait_list only if the cmd can be pushed successfully. */
+ SET_RESP_ELEM_SEQ(resp, GET_CMD_ELEM_SEQ(cmd));
+ SET_RESP_ELEM_STATUS(resp, GCIP_MAILBOX_STATUS_WAITING_RESPONSE);
+ ret = gcip_mailbox_push_wait_resp(mailbox, resp, awaiter);
+ if (ret)
+ goto out;
+ }
+ /* Size of cmd_queue is a multiple of mailbox->cmd_elem_size. */
+ memcpy(mailbox->cmd_queue + mailbox->cmd_elem_size *
+ CIRC_QUEUE_REAL_INDEX(tail, mailbox->queue_wrap_bit),
+ cmd, mailbox->cmd_elem_size);
+ INC_CMD_QUEUE_TAIL(1);
+ if (mailbox->ops->after_enqueue_cmd) {
+ ret = mailbox->ops->after_enqueue_cmd(mailbox, cmd);
+ if (ret < 0) {
+ /*
+ * Currently, as both DSP and EdgeTPU never return errors, do nothing
+ * here. We can decide later how to rollback the status such as
+ * `cmd_queue_tail` when the possibility of returning an error is raised.
+ */
+ dev_warn(mailbox->dev,
+ "after_enqueue_cmd returned an error, but not handled: ret=%d\n",
+ ret);
+ goto out;
+ }
+ mailbox->cur_seq += ret;
+ ret = 0;
+ } else
+ mailbox->cur_seq += 1;
+
+out:
+ RELEASE_CMD_QUEUE_LOCK();
+ if (ret)
+ dev_dbg(mailbox->dev, "%s: ret=%d", __func__, ret);
+
+ return ret;
+}
+
+/*
+ * Handler of a response.
+ * Pops the wait_list until the sequence number of @resp is found, and copies @resp to the found
+ * entry.
+ *
+ * Both entry in wait_list and response handling should have sequence number in increasing order.
+ * Comparing the #seq of head of wait_list with @resp->seq, we have three cases:
+ * 1. #seq > @resp->seq:
+ * - Nothing to do, @resp is not needed and we're done.
+ * 2. #seq == @resp->seq:
+ * - Copy @resp, pop the head and we're done.
+ * 3. #seq < @resp->seq:
+ * - If @mailbox->ignore_seq_order is specified, this is a normal case and the entry is skipped.
+ * - Otherwise, it *should* not happen, this implies the sequence number of either entries in
+ * wait_list or responses are out-of-order, or remote didn't respond to a command. In this
+ * case, the status of response will be set to GCIP_MAILBOX_STATUS_NO_RESPONSE. Then pop until
+ * case 1. or 2.
+ */
+static void gcip_mailbox_handle_response(struct gcip_mailbox *mailbox, void *resp)
+{
+ struct gcip_mailbox_wait_list_elem *cur, *nxt;
+ struct gcip_mailbox_resp_awaiter *awaiter;
+ unsigned long flags;
+ u64 cur_seq, seq = GET_RESP_ELEM_SEQ(resp);
+
+ /* If before_handle_resp is defined and it returns false, don't handle the response */
+ if (mailbox->ops->before_handle_resp && !mailbox->ops->before_handle_resp(mailbox, resp))
+ return;
+
+ SET_RESP_ELEM_STATUS(resp, GCIP_MAILBOX_STATUS_OK);
+ ACQUIRE_WAIT_LIST_LOCK(true, &flags);
+
+ list_for_each_entry_safe (cur, nxt, &mailbox->wait_list, list) {
+ cur_seq = GET_RESP_ELEM_SEQ(cur->resp);
+ if (cur_seq > seq) {
+ /*
+ * This response has already timed out and been removed
+ * from the wait list (or this is an invalid response).
+ * Drop it.
+ */
+ break;
+ }
+ if (cur_seq == seq) {
+ memcpy(cur->resp, resp, mailbox->resp_elem_size);
+ list_del(&cur->list);
+ if (cur->awaiter) {
+ awaiter = cur->awaiter;
+
+ /*
+ * The timedout handler will be fired, but pended by waiting for
+ * acquiring the wait_list_lock.
+ */
+ TEST_TRIGGER_TIMEOUT_RACE(awaiter);
+
+ /*
+ * If canceling timeout_work succeeded, we have to decrease the
+ * reference count here because the timeout handler will not be
+ * called. Otherwise, the timeout handler is already canceled or
+ * pending by race. If it is canceled, the count must be decreased
+ * already, and if it is pending, the timeout handler will decrease
+ * the awaiter reference.
+ */
+ if (cancel_delayed_work(&awaiter->timeout_work))
+ gcip_mailbox_awaiter_dec_refs(awaiter);
+ /*
+ * If `handle_awaiter_arrived` callback is defined, @awaiter
+ * will be released from the implementation side. Otherwise, it
+ * should be freed from here.
+ */
+ if (mailbox->ops->handle_awaiter_arrived)
+ mailbox->ops->handle_awaiter_arrived(mailbox, awaiter);
+ gcip_mailbox_awaiter_dec_refs(awaiter);
+ }
+ kfree(cur);
+ break;
+ }
+ if (!mailbox->ignore_seq_order && cur_seq < seq) {
+ SET_RESP_ELEM_STATUS(cur->resp, GCIP_MAILBOX_STATUS_NO_RESPONSE);
+ list_del(&cur->list);
+ if (cur->awaiter) {
+ /* Remove the reference of the arrived handler. */
+ gcip_mailbox_awaiter_dec_refs(cur->awaiter);
+ }
+ kfree(cur);
+ }
+ }
+
+ RELEASE_WAIT_LIST_LOCK(true, flags);
+}
+
+/*
+ * Fetches elements in the response queue.
+ *
+ * Returns the pointer of fetched response elements.
+ * @total_ptr will be the number of elements fetched.
+ *
+ * Returns -ENOMEM if failed on memory allocation.
+ * Returns NULL if the response queue is empty or there is another worker fetching responses.
+ */
+static void *gcip_mailbox_fetch_responses(struct gcip_mailbox *mailbox, u32 *total_ptr)
+{
+ u32 head;
+ u32 tail;
+ u32 count;
+ u32 i;
+ u32 j;
+ u32 total = 0;
+ const u32 wrap_bit = mailbox->queue_wrap_bit;
+ const u32 size = GET_RESP_QUEUE_SIZE();
+ const u32 elem_size = mailbox->resp_elem_size;
+ void *ret = NULL; /* Array of responses. */
+ void *prev_ptr = NULL; /* Temporary pointer to realloc ret. */
+
+ /* Someone is working on consuming - we can leave early. */
+ if (!ACQUIRE_RESP_QUEUE_LOCK(true))
+ goto out;
+
+ head = GET_RESP_QUEUE_HEAD();
+ /* Loops until our head equals to CSR tail. */
+ while (1) {
+ tail = GET_RESP_QUEUE_TAIL();
+ /*
+ * Make sure the CSR is read and reported properly by checking if any bit higher
+ * than wrap_bit is set and if the tail exceeds resp_queue size.
+ */
+ if (unlikely(tail & ~CIRC_QUEUE_VALID_MASK(wrap_bit) ||
+ CIRC_QUEUE_REAL_INDEX(tail, wrap_bit) >= size)) {
+ dev_err_ratelimited(mailbox->dev, "Invalid response queue tail: %#x\n",
+ tail);
+ break;
+ }
+
+ count = gcip_circ_queue_cnt(head, tail, size, wrap_bit);
+ if (count == 0)
+ break;
+
+ prev_ptr = ret;
+ ret = krealloc(prev_ptr, (total + count) * elem_size, GFP_KERNEL);
+ /*
+ * Out-of-memory, we can return the previously fetched responses if any, or ENOMEM
+ * otherwise.
+ */
+ if (!ret) {
+ if (!prev_ptr)
+ ret = ERR_PTR(-ENOMEM);
+ else
+ ret = prev_ptr;
+ break;
+ }
+ /* Copies responses. */
+ j = CIRC_QUEUE_REAL_INDEX(head, wrap_bit);
+ for (i = 0; i < count; i++) {
+ memcpy(ret + elem_size * total, mailbox->resp_queue + elem_size * j,
+ elem_size);
+ j = (j + 1) % size;
+ total++;
+ }
+ head = gcip_circ_queue_inc(head, count, size, wrap_bit);
+ }
+ INC_RESP_QUEUE_HEAD(total);
+
+ RELEASE_RESP_QUEUE_LOCK();
+
+ if (mailbox->ops->after_fetch_resps)
+ mailbox->ops->after_fetch_resps(mailbox, total);
+out:
+ *total_ptr = total;
+ return ret;
+}
+
+/* Fetches one response from the response queue. */
+static int gcip_mailbox_fetch_one_response(struct gcip_mailbox *mailbox, void *resp)
+{
+ u32 head;
+ u32 tail;
+
+ if (!ACQUIRE_RESP_QUEUE_LOCK(true))
+ return 0;
+
+ head = GET_RESP_QUEUE_HEAD();
+ tail = GET_RESP_QUEUE_TAIL();
+ /* Queue empty. */
+ if (head == tail) {
+ RELEASE_RESP_QUEUE_LOCK();
+ return 0;
+ }
+
+ memcpy(resp,
+ mailbox->resp_queue + CIRC_QUEUE_REAL_INDEX(head, mailbox->queue_wrap_bit) *
+ mailbox->resp_elem_size,
+ mailbox->resp_elem_size);
+ INC_RESP_QUEUE_HEAD(1);
+
+ RELEASE_RESP_QUEUE_LOCK();
+
+ if (mailbox->ops->after_fetch_resps)
+ mailbox->ops->after_fetch_resps(mailbox, 1);
+
+ return 1;
+}
+
+/* Handles the timed out asynchronous commands. */
+static void gcip_mailbox_async_cmd_timeout_work(struct work_struct *work)
+{
+ struct gcip_mailbox_resp_awaiter *awaiter =
+ container_of(work, struct gcip_mailbox_resp_awaiter, timeout_work.work);
+ struct gcip_mailbox *mailbox = awaiter->mailbox;
+
+ /*
+ * This function will acquire the mailbox wait_list_lock. This means if
+ * response processing is in progress, it will complete before this
+ * response can be removed from the wait list.
+ *
+ * Once this function has the wait_list_lock, no future response
+ * processing will begin until this response has been removed.
+ */
+ gcip_mailbox_del_wait_resp(mailbox, awaiter->resp);
+
+ /*
+ * Handle timed out awaiter. If `handle_awaiter_timedout` is defined, @awaiter
+ * will be released from the implementation side. Otherwise, it should be freed from here.
+ */
+ if (mailbox->ops->handle_awaiter_timedout)
+ mailbox->ops->handle_awaiter_timedout(mailbox, awaiter);
+
+ /* Remove the reference of the timedout handler. */
+ gcip_mailbox_awaiter_dec_refs(awaiter);
+}
+
+/* Cleans up all the asynchronous responses which are not responded yet. */
+static void gcip_mailbox_flush_awaiter(struct gcip_mailbox *mailbox)
+{
+ struct gcip_mailbox_wait_list_elem *cur, *nxt;
+ struct gcip_mailbox_resp_awaiter *awaiter;
+ struct list_head resps_to_flush;
+
+ /* If mailbox->ops is NULL, the mailbox is already released. */
+ if (!mailbox->ops)
+ return;
+
+ /*
+ * At this point only async responses should be pending. Flush them all
+ * from the `wait_list` at once so any remaining timeout workers
+ * waiting on `wait_list_lock` will know their responses have been
+ * handled already.
+ */
+ INIT_LIST_HEAD(&resps_to_flush);
+ ACQUIRE_WAIT_LIST_LOCK(false, NULL);
+ list_for_each_entry_safe (cur, nxt, &mailbox->wait_list, list) {
+ list_del(&cur->list);
+ if (cur->awaiter) {
+ list_add_tail(&cur->list, &resps_to_flush);
+ /*
+ * Clear the response's destination queue so that if the
+ * timeout worker is running, it won't try to process
+ * this response after `wait_list_lock` is released.
+ */
+ awaiter = cur->awaiter;
+ if (mailbox->ops->flush_awaiter)
+ mailbox->ops->flush_awaiter(mailbox, awaiter);
+ /* Remove the reference of the arrived handler. */
+ gcip_mailbox_awaiter_dec_refs(cur->awaiter);
+ } else {
+ dev_warn(mailbox->dev,
+ "Unexpected synchronous command pending on mailbox release\n");
+ kfree(cur);
+ }
+ }
+ RELEASE_WAIT_LIST_LOCK(false, 0);
+
+ /*
+ * Cancel the timeout timer of and free any responses that were still in
+ * the `wait_list` above.
+ */
+ list_for_each_entry_safe (cur, nxt, &resps_to_flush, list) {
+ list_del(&cur->list);
+ awaiter = cur->awaiter;
+ /* Cancel the timeout work and remove the reference of the timedout handler. */
+ gcip_mailbox_cancel_awaiter_timeout(awaiter);
+ /* Remove the reference of the caller. */
+ gcip_mailbox_awaiter_dec_refs(cur->awaiter);
+ kfree(cur);
+ }
+}
+
+/* Verifies and sets the mailbox operators. */
+static int gcip_mailbox_set_ops(struct gcip_mailbox *mailbox, const struct gcip_mailbox_ops *ops)
+{
+ if (!ops) {
+ mailbox->ops = NULL;
+ return 0;
+ }
+
+ if (!ops->get_cmd_queue_head || !ops->get_cmd_queue_tail || !ops->inc_cmd_queue_tail ||
+ !ops->acquire_cmd_queue_lock || !ops->release_cmd_queue_lock ||
+ !ops->get_cmd_elem_seq || !ops->set_cmd_elem_seq || !ops->get_cmd_elem_code) {
+ dev_err(mailbox->dev, "Incomplete mailbox CMD queue ops.\n");
+ return -EINVAL;
+ }
+
+ if (!ops->get_resp_queue_size || !ops->get_resp_queue_head || !ops->get_resp_queue_tail ||
+ !ops->inc_resp_queue_head || !ops->acquire_resp_queue_lock ||
+ !ops->release_resp_queue_lock || !ops->get_resp_elem_seq || !ops->set_resp_elem_seq ||
+ !ops->get_resp_elem_status || !ops->set_resp_elem_status) {
+ dev_err(mailbox->dev, "Incomplete mailbox RESP queue ops.\n");
+ return -EINVAL;
+ }
+
+ if (!ops->acquire_wait_list_lock || !ops->release_wait_list_lock) {
+ dev_err(mailbox->dev, "Incomplete mailbox wait_list ops.\n");
+ return -EINVAL;
+ }
+
+ mailbox->ops = ops;
+
+ return 0;
+}
+
+/* Sets the mailbox private data. */
+static inline void gcip_mailbox_set_data(struct gcip_mailbox *mailbox, void *data)
+{
+ mailbox->data = data;
+}
+
+int gcip_mailbox_init(struct gcip_mailbox *mailbox, const struct gcip_mailbox_args *args)
+{
+ int ret;
+
+ mailbox->dev = args->dev;
+ mailbox->queue_wrap_bit = args->queue_wrap_bit;
+ mailbox->cmd_queue = args->cmd_queue;
+ mailbox->cmd_elem_size = args->cmd_elem_size;
+ mailbox->resp_queue = args->resp_queue;
+ mailbox->resp_elem_size = args->resp_elem_size;
+ mailbox->timeout = args->timeout;
+ mailbox->cur_seq = 0;
+ mailbox->ignore_seq_order = args->ignore_seq_order;
+ gcip_mailbox_set_data(mailbox, args->data);
+
+ ret = gcip_mailbox_set_ops(mailbox, args->ops);
+ if (ret)
+ goto err_unset_data;
+
+ INIT_LIST_HEAD(&mailbox->wait_list);
+ init_waitqueue_head(&mailbox->wait_list_waitq);
+
+ return 0;
+
+err_unset_data:
+ gcip_mailbox_set_data(mailbox, NULL);
+
+ return ret;
+}
+
+void gcip_mailbox_release(struct gcip_mailbox *mailbox)
+{
+ gcip_mailbox_flush_awaiter(mailbox);
+ gcip_mailbox_set_ops(mailbox, NULL);
+ gcip_mailbox_set_data(mailbox, NULL);
+}
+
+void gcip_mailbox_consume_responses_work(struct gcip_mailbox *mailbox)
+{
+ void *responses;
+ u32 i;
+ u32 count = 0;
+
+ /* Fetches responses and bumps resp_queue head. */
+ responses = gcip_mailbox_fetch_responses(mailbox, &count);
+ if (count == 0)
+ return;
+ if (IS_ERR(responses)) {
+ dev_err(mailbox->dev, "GCIP mailbox failed on fetching responses: %ld",
+ PTR_ERR(responses));
+ return;
+ }
+
+ for (i = 0; i < count; i++)
+ gcip_mailbox_handle_response(mailbox, responses + mailbox->resp_elem_size * i);
+ /* Responses handled, wake up threads that are waiting for a response. */
+ wake_up(&mailbox->wait_list_waitq);
+ kfree(responses);
+}
+
+int gcip_mailbox_send_cmd(struct gcip_mailbox *mailbox, void *cmd, void *resp)
+{
+ int ret;
+
+ ret = gcip_mailbox_enqueue_cmd(mailbox, cmd, resp, NULL);
+ if (ret)
+ return ret;
+
+ if (!resp)
+ return 0;
+
+ ret = wait_event_timeout(mailbox->wait_list_waitq,
+ GET_RESP_ELEM_STATUS(resp) != GCIP_MAILBOX_STATUS_WAITING_RESPONSE,
+ msecs_to_jiffies(mailbox->timeout));
+ if (!ret) {
+ dev_dbg(mailbox->dev, "event wait timeout");
+ gcip_mailbox_del_wait_resp(mailbox, resp);
+ return -ETIMEDOUT;
+ }
+ if (GET_RESP_ELEM_STATUS(resp) != GCIP_MAILBOX_STATUS_OK) {
+ dev_err(mailbox->dev, "Mailbox cmd %u response status %u", GET_CMD_ELEM_CODE(cmd),
+ GET_RESP_ELEM_STATUS(resp));
+ return -ENOMSG;
+ }
+
+ return 0;
+}
+
+struct gcip_mailbox_resp_awaiter *gcip_mailbox_put_cmd(struct gcip_mailbox *mailbox, void *cmd,
+ void *resp, void *data)
+{
+ struct gcip_mailbox_resp_awaiter *awaiter;
+ int ret;
+
+ awaiter = kzalloc(sizeof(*awaiter), GFP_KERNEL);
+ if (!awaiter)
+ return ERR_PTR(-ENOMEM);
+
+ awaiter->resp = resp;
+ awaiter->mailbox = mailbox;
+ awaiter->data = data;
+ awaiter->release_data = mailbox->ops->release_awaiter_data;
+ /* 2 refs: caller (vd) and timedout handler. */
+ refcount_set(&awaiter->refs, 2);
+
+ INIT_DELAYED_WORK(&awaiter->timeout_work, gcip_mailbox_async_cmd_timeout_work);
+ schedule_delayed_work(&awaiter->timeout_work, msecs_to_jiffies(mailbox->timeout));
+
+ ret = gcip_mailbox_enqueue_cmd(mailbox, cmd, awaiter->resp, awaiter);
+ if (ret)
+ goto err_free_resp;
+
+ return awaiter;
+
+err_free_resp:
+ gcip_mailbox_cancel_awaiter_timeout(awaiter);
+ kfree(awaiter);
+ return ERR_PTR(ret);
+}
+
+void gcip_mailbox_cancel_awaiter(struct gcip_mailbox_resp_awaiter *awaiter)
+{
+ gcip_mailbox_del_wait_resp(awaiter->mailbox, awaiter->resp);
+ gcip_mailbox_cancel_awaiter_timeout(awaiter);
+}
+
+void gcip_mailbox_cancel_awaiter_timeout(struct gcip_mailbox_resp_awaiter *awaiter)
+{
+ if (cancel_delayed_work_sync(&awaiter->timeout_work))
+ gcip_mailbox_awaiter_dec_refs(awaiter);
+}
+
+void gcip_mailbox_release_awaiter(struct gcip_mailbox_resp_awaiter *awaiter)
+{
+ gcip_mailbox_awaiter_dec_refs(awaiter);
+}
+
+void gcip_mailbox_consume_one_response(struct gcip_mailbox *mailbox, void *resp)
+{
+ int ret;
+
+ /* Fetches (at most) one response. */
+ ret = gcip_mailbox_fetch_one_response(mailbox, resp);
+ if (!ret)
+ return;
+
+ gcip_mailbox_handle_response(mailbox, resp);
+
+ /* Responses handled, wakes up threads that are waiting for a response. */
+ wake_up(&mailbox->wait_list_waitq);
+}
diff --git a/gcip-kernel-driver/drivers/gcip/gcip-mem-pool.c b/gcip-kernel-driver/drivers/gcip/gcip-mem-pool.c
new file mode 100644
index 0000000..3e18051
--- /dev/null
+++ b/gcip-kernel-driver/drivers/gcip/gcip-mem-pool.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * A simple memory allocator to help allocating reserved memory pools.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#include <linux/device.h>
+#include <linux/genalloc.h>
+#include <linux/log2.h>
+#include <linux/types.h>
+
+#include <gcip/gcip-mem-pool.h>
+
+int gcip_mem_pool_init(struct gcip_mem_pool *pool, struct device *dev, phys_addr_t base_paddr,
+ size_t size, size_t granule)
+{
+ int ret;
+
+ if (!base_paddr || granule == 0)
+ return -EINVAL;
+ if (base_paddr % granule || size % granule)
+ return -EINVAL;
+ pool->gen_pool = gen_pool_create(ilog2(granule), -1);
+ if (!pool->gen_pool) {
+ dev_err(dev, "gcip memory pool allocate gen_pool failed");
+ return -ENOMEM;
+ }
+ ret = gen_pool_add(pool->gen_pool, base_paddr, size, -1);
+ if (ret) {
+ gen_pool_destroy(pool->gen_pool);
+ pool->gen_pool = NULL;
+ dev_err(dev, "gcip failed to add memory to mem pool: %d", ret);
+ return ret;
+ }
+ pool->dev = dev;
+ pool->granule = granule;
+ pool->base_paddr = base_paddr;
+ return 0;
+}
+
+void gcip_mem_pool_exit(struct gcip_mem_pool *pool)
+{
+ if (!pool->gen_pool)
+ return;
+ gen_pool_destroy(pool->gen_pool);
+ pool->gen_pool = NULL;
+}
+
+phys_addr_t gcip_mem_pool_alloc(struct gcip_mem_pool *pool, size_t size)
+{
+ unsigned long addr;
+ size_t aligned_size = ALIGN(size, pool->granule);
+
+ addr = gen_pool_alloc(pool->gen_pool, aligned_size);
+ if (!addr)
+ return 0;
+ dev_dbg(pool->dev, "%s @ size = %#zx paddr=%#lx", __func__, size, addr);
+ return (phys_addr_t)addr;
+}
+
+void gcip_mem_pool_free(struct gcip_mem_pool *pool, phys_addr_t paddr, size_t size)
+{
+ unsigned long addr = paddr;
+
+ dev_dbg(pool->dev, "%s @ size = %#zx paddr=%#lx", __func__, size, addr);
+ size = ALIGN(size, pool->granule);
+ gen_pool_free(pool->gen_pool, addr, size);
+}
diff --git a/gcip-kernel-driver/drivers/gcip/gcip-telemetry.c b/gcip-kernel-driver/drivers/gcip/gcip-telemetry.c
new file mode 100644
index 0000000..f557c24
--- /dev/null
+++ b/gcip-kernel-driver/drivers/gcip/gcip-telemetry.c
@@ -0,0 +1,267 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * GCIP telemetry: logging and tracing.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#include <linux/delay.h>
+#include <linux/dev_printk.h>
+#include <linux/eventfd.h>
+#include <linux/log2.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#include <gcip/gcip-telemetry.h>
+
+int gcip_telemetry_kci(struct gcip_telemetry *tel,
+ int (*send_kci)(struct gcip_telemetry_kci_args *),
+ struct gcip_telemetry_kci_args *args)
+{
+ int err;
+
+ dev_dbg(tel->dev, "Sending KCI %s", tel->name);
+ err = send_kci(args);
+
+ if (err < 0) {
+ dev_err(tel->dev, "KCI %s failed - %d", tel->name, err);
+ return err;
+ }
+
+ if (err > 0) {
+ dev_err(tel->dev, "KCI %s returned %d", tel->name, err);
+ return -EBADMSG;
+ }
+
+ dev_dbg(tel->dev, "KCI %s Succeeded", tel->name);
+
+ return 0;
+}
+
+int gcip_telemetry_set_event(struct gcip_telemetry *tel, u32 eventfd)
+{
+ struct eventfd_ctx *ctx;
+ ulong flags;
+
+ ctx = eventfd_ctx_fdget(eventfd);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ write_lock_irqsave(&tel->ctx_lock, flags);
+ if (tel->ctx)
+ eventfd_ctx_put(tel->ctx);
+ tel->ctx = ctx;
+ write_unlock_irqrestore(&tel->ctx_lock, flags);
+
+ return 0;
+}
+
+void gcip_telemetry_unset_event(struct gcip_telemetry *tel)
+{
+ ulong flags;
+
+ write_lock_irqsave(&tel->ctx_lock, flags);
+ if (tel->ctx)
+ eventfd_ctx_put(tel->ctx);
+ tel->ctx = NULL;
+ write_unlock_irqrestore(&tel->ctx_lock, flags);
+}
+
+/* Copy data out of the log buffer with wrapping. */
+static void copy_with_wrap(struct gcip_telemetry_header *header, void *dest, u32 length, u32 size,
+ void *start)
+{
+ const u32 wrap_bit = size + sizeof(*header);
+ u32 remaining = 0;
+ u32 head = header->head & (wrap_bit - 1);
+
+ if (head + length < size) {
+ memcpy(dest, start + head, length);
+ header->head += length;
+ } else {
+ remaining = size - head;
+ memcpy(dest, start + head, remaining);
+ memcpy(dest + remaining, start, length - remaining);
+ header->head = (header->head & wrap_bit) ^ wrap_bit;
+ header->head |= length - remaining;
+ }
+}
+
+void gcip_telemetry_fw_log(struct gcip_telemetry *log)
+{
+ struct device *dev = log->dev;
+ struct gcip_telemetry_header *header = log->header;
+ struct gcip_log_entry_header entry;
+ u8 *start;
+ const size_t queue_size = header->size - sizeof(*header);
+ const size_t max_length = queue_size - sizeof(entry);
+ char *buffer = kmalloc(max_length + 1, GFP_ATOMIC);
+
+ if (!buffer) {
+ header->head = header->tail;
+ return;
+ }
+ start = (u8 *)header + sizeof(*header);
+
+ while (header->head != header->tail) {
+ copy_with_wrap(header, &entry, sizeof(entry), queue_size, start);
+ if (entry.length == 0 || entry.length > max_length) {
+ header->head = header->tail;
+ dev_err(dev, "log queue is corrupted");
+ break;
+ }
+ copy_with_wrap(header, buffer, entry.length, queue_size, start);
+ buffer[entry.length] = 0;
+
+ if (entry.code > GCIP_FW_DMESG_LOG_LEVEL)
+ continue;
+
+ switch (entry.code) {
+ case GCIP_FW_LOG_LEVEL_VERBOSE:
+ case GCIP_FW_LOG_LEVEL_DEBUG:
+ dev_dbg(dev, "%s", buffer);
+ break;
+ case GCIP_FW_LOG_LEVEL_WARN:
+ dev_warn(dev, "%s", buffer);
+ break;
+ case GCIP_FW_LOG_LEVEL_ERROR:
+ dev_err(dev, "%s", buffer);
+ break;
+ case GCIP_FW_LOG_LEVEL_INFO:
+ default:
+ dev_info(dev, "%s", buffer);
+ break;
+ }
+ }
+ kfree(buffer);
+}
+
+void gcip_telemetry_fw_trace(struct gcip_telemetry *trace)
+{
+ struct gcip_telemetry_header *header = trace->header;
+
+ header->head = header->tail;
+}
+
+void gcip_telemetry_irq_handler(struct gcip_telemetry *tel)
+{
+ spin_lock(&tel->state_lock);
+
+ if (tel->state == GCIP_TELEMETRY_ENABLED && tel->header->head != tel->header->tail)
+ schedule_work(&tel->work);
+
+ spin_unlock(&tel->state_lock);
+}
+
+void gcip_telemetry_inc_mmap_count(struct gcip_telemetry *tel, int dif)
+{
+ mutex_lock(&tel->mmap_lock);
+ tel->mmapped_count += dif;
+ mutex_unlock(&tel->mmap_lock);
+}
+
+int gcip_telemetry_mmap_buffer(struct gcip_telemetry *tel, int (*mmap)(void *), void *args)
+{
+ int ret;
+
+ mutex_lock(&tel->mmap_lock);
+
+ if (!tel->mmapped_count) {
+ ret = mmap(args);
+
+ if (!ret)
+ tel->mmapped_count = 1;
+ } else {
+ ret = -EBUSY;
+ dev_warn(tel->dev, "%s is already mmapped %ld times", tel->name,
+ tel->mmapped_count);
+ }
+
+ mutex_unlock(&tel->mmap_lock);
+
+ return ret;
+}
+
+/* Worker for processing log/trace buffers. */
+static void gcip_telemetry_worker(struct work_struct *work)
+{
+ struct gcip_telemetry *tel = container_of(work, struct gcip_telemetry, work);
+ u32 prev_head;
+ ulong flags;
+
+ /*
+ * Loops while telemetry enabled, there is data to be consumed, and the previous iteration
+ * made progress. If another IRQ arrives just after the last head != tail check we should
+ * get another worker schedule.
+ */
+ do {
+ spin_lock_irqsave(&tel->state_lock, flags);
+ if (tel->state != GCIP_TELEMETRY_ENABLED) {
+ spin_unlock_irqrestore(&tel->state_lock, flags);
+ return;
+ }
+
+ prev_head = tel->header->head;
+ if (tel->header->head != tel->header->tail) {
+ read_lock(&tel->ctx_lock);
+ if (tel->ctx)
+ eventfd_signal(tel->ctx, 1);
+ else
+ tel->fallback_fn(tel);
+ read_unlock(&tel->ctx_lock);
+ }
+
+ spin_unlock_irqrestore(&tel->state_lock, flags);
+ msleep(GCIP_TELEMETRY_LOG_RECHECK_DELAY);
+ } while (tel->header->head != tel->header->tail && tel->header->head != prev_head);
+}
+
+int gcip_telemetry_init(struct device *dev, struct gcip_telemetry *tel, const char *name,
+ void *vaddr, const size_t size,
+ void (*fallback_fn)(struct gcip_telemetry *))
+{
+ if (!is_power_of_2(size) || size <= sizeof(struct gcip_telemetry_header)) {
+ dev_err(dev,
+ "Size of GCIP telemetry buffer must be a power of 2 and greater than %zu.",
+ sizeof(struct gcip_telemetry_header));
+ return -EINVAL;
+ }
+
+ rwlock_init(&tel->ctx_lock);
+ tel->name = name;
+ tel->dev = dev;
+
+ tel->header = vaddr;
+ tel->header->head = 0;
+ tel->header->tail = 0;
+ tel->header->size = size;
+ tel->header->entries_dropped = 0;
+
+ tel->ctx = NULL;
+
+ spin_lock_init(&tel->state_lock);
+ INIT_WORK(&tel->work, gcip_telemetry_worker);
+ tel->fallback_fn = fallback_fn;
+ tel->state = GCIP_TELEMETRY_ENABLED;
+ mutex_init(&tel->mmap_lock);
+ tel->mmapped_count = 0;
+
+ return 0;
+}
+
+void gcip_telemetry_exit(struct gcip_telemetry *tel)
+{
+ ulong flags;
+
+ spin_lock_irqsave(&tel->state_lock, flags);
+ /* Prevents racing with the IRQ handler or worker. */
+ tel->state = GCIP_TELEMETRY_INVALID;
+ spin_unlock_irqrestore(&tel->state_lock, flags);
+ cancel_work_sync(&tel->work);
+
+ if (tel->ctx)
+ eventfd_ctx_put(tel->ctx);
+ tel->ctx = NULL;
+}
diff --git a/gcip-kernel-driver/include/gcip/gcip-alloc-helper.h b/gcip-kernel-driver/include/gcip/gcip-alloc-helper.h
new file mode 100644
index 0000000..3d2c110
--- /dev/null
+++ b/gcip-kernel-driver/include/gcip/gcip-alloc-helper.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * GCIP helpers for allocating memories.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#ifndef __GCIP_ALLOC_HELPER_H__
+#define __GCIP_ALLOC_HELPER_H__
+
+#include <linux/device.h>
+#include <linux/scatterlist.h>
+#include <linux/types.h>
+
+/*
+ * The actual return value from the alloc_noncontiguous function.
+ * The user should only care about @sgt. @pages is used internally for freeing memory.
+ */
+struct gcip_sgt_handle {
+ struct sg_table sgt;
+ void *mem;
+};
+
+/*
+ * Allocates non-contiguous memory with size @size bytes.
+ *
+ * @dev: pointer to device structure. Is used for logging or the NUMA node for page allocation.
+ * @size: Total size in bytes. Will be page aligned.
+ * @gfp: The GFP flag for malloc internal structures.
+ *
+ * Returns the SG table represents the non-contiguous region.
+ * Returns NULL on any error.
+ */
+struct sg_table *gcip_alloc_noncontiguous(struct device *dev, size_t size, gfp_t gfp);
+/* Frees the memory allocated by gcip_alloc_noncontiguous. */
+void gcip_free_noncontiguous(struct sg_table *sgt);
+
+/*
+ * Returns the virtual memory that was used to allocate @sgt.
+ *
+ * @sgt must be the return pointer of gcip_alloc_noncontiguous.
+ */
+static inline void *gcip_noncontiguous_sgt_to_mem(struct sg_table *sgt)
+{
+ struct gcip_sgt_handle *sh = container_of(sgt, struct gcip_sgt_handle, sgt);
+
+ return sh->mem;
+}
+
+#endif /* __GCIP_ALLOC_HELPER_H__ */
diff --git a/gcip-kernel-driver/include/gcip/gcip-common-image-header.h b/gcip-kernel-driver/include/gcip/gcip-common-image-header.h
new file mode 100644
index 0000000..d986fbc
--- /dev/null
+++ b/gcip-kernel-driver/include/gcip/gcip-common-image-header.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common authenticated image format for Google SoCs
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#ifndef __GCIP_COMMON_IMAGE_HEADER_H__
+#define __GCIP_COMMON_IMAGE_HEADER_H__
+
+#include <linux/types.h>
+
+#include "gcip-image-config.h"
+
+#define GCIP_FW_HEADER_SIZE (0x1000)
+
+struct gcip_common_image_sub_header_common {
+ uint32_t magic;
+ uint32_t generation;
+ uint32_t rollback_info;
+ uint32_t length;
+ uint8_t flags[16];
+};
+
+struct gcip_common_image_sub_header_gen1 {
+ uint8_t body_hash[32];
+ uint8_t chip_id[32];
+ uint8_t auth_config[256];
+ struct gcip_image_config image_config;
+};
+
+struct gcip_common_image_sub_header_gen2 {
+ uint8_t body_hash[64];
+ uint8_t chip_id[32];
+ uint8_t auth_config[256];
+ struct gcip_image_config image_config;
+};
+
+struct gcip_common_image_header {
+ uint8_t sig[512];
+ uint8_t pub[512];
+ struct {
+ struct gcip_common_image_sub_header_common common;
+ union {
+ struct gcip_common_image_sub_header_gen1 gen1;
+ struct gcip_common_image_sub_header_gen2 gen2;
+ };
+ };
+};
+
+/*
+ * Returns the image config field from a common image header
+ * or NULL if the header has an invalid generation identifier
+ */
+static inline struct gcip_image_config *
+get_image_config_from_hdr(struct gcip_common_image_header *hdr)
+{
+ switch (hdr->common.generation) {
+ case 1:
+ return &hdr->gen1.image_config;
+ case 2:
+ return &hdr->gen2.image_config;
+ }
+ return NULL;
+}
+
+#endif /* __GCIP_COMMON_IMAGE_HEADER_H__ */
diff --git a/gcip-kernel-driver/include/gcip/gcip-domain-pool.h b/gcip-kernel-driver/include/gcip/gcip-domain-pool.h
new file mode 100644
index 0000000..b740bf9
--- /dev/null
+++ b/gcip-kernel-driver/include/gcip/gcip-domain-pool.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * GCIP IOMMU domain allocator.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#ifndef __GCIP_DOMAIN_POOL_H__
+#define __GCIP_DOMAIN_POOL_H__
+
+#include <linux/idr.h>
+#include <linux/iommu.h>
+
+struct gcip_domain_pool {
+ struct ida idp; /* ID allocator to keep track of used domains. */
+ /*
+ * Size of the pool. Can be set to 0, in which case the implementation will fall back to
+ * dynamic domain allocation using the IOMMU API directly.
+ */
+ unsigned int size;
+ struct iommu_domain **array; /* Array holding the pointers to pre-allocated domains. */
+ struct device *dev; /* The device used for logging warnings/errors. */
+};
+
+/*
+ * Initializes a domain pool.
+ *
+ * @dev: pointer to device structure.
+ * @pool: caller-allocated pool structure.
+ * @size: size of the pre-allocated domains pool.
+ * Set to zero to fall back to dynamically allocated domains.
+ *
+ * returns 0 on success or negative error value.
+ */
+int gcip_domain_pool_init(struct device *dev, struct gcip_domain_pool *pool, unsigned int size);
+
+/*
+ * Allocates a domain from the pool
+ * returns NULL on error.
+ */
+struct iommu_domain *gcip_domain_pool_alloc(struct gcip_domain_pool *pool);
+
+/* Releases a domain from the pool. */
+void gcip_domain_pool_free(struct gcip_domain_pool *pool, struct iommu_domain *domain);
+
+/* Cleans up all resources used by the domain pool. */
+void gcip_domain_pool_destroy(struct gcip_domain_pool *pool);
+
+#endif /* __GCIP_DOMAIN_POOL_H__ */
diff --git a/gcip-kernel-driver/include/gcip/gcip-firmware.h b/gcip-kernel-driver/include/gcip/gcip-firmware.h
new file mode 100644
index 0000000..b856e5c
--- /dev/null
+++ b/gcip-kernel-driver/include/gcip/gcip-firmware.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * GCIP firmware interface.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#ifndef __GCIP_FIRMWARE_H__
+#define __GCIP_FIRMWARE_H__
+
+#include <linux/types.h>
+
+enum gcip_fw_status {
+ /* No firmware loaded yet, or last firmware failed to run. */
+ GCIP_FW_INVALID = 0,
+ /* Load in progress. */
+ GCIP_FW_LOADING = 1,
+ /* Current firmware is valid and can be restarted. */
+ GCIP_FW_VALID = 2,
+};
+
+/* Firmware flavors returned via KCI FIRMWARE_INFO command. */
+enum gcip_fw_flavor {
+ /* Unused value for extending enum storage type. */
+ GCIP_FW_FLAVOR_ERROR = -1,
+ /* Used by host when cannot determine the flavor. */
+ GCIP_FW_FLAVOR_UNKNOWN = 0,
+ /* Second-stage bootloader (no longer used). */
+ GCIP_FW_FLAVOR_BL1 = 1,
+ /* Systest app image. */
+ GCIP_FW_FLAVOR_SYSTEST = 2,
+ /* Default production app image. */
+ GCIP_FW_FLAVOR_PROD_DEFAULT = 3,
+ /* Custom image produced by other teams. */
+ GCIP_FW_FLAVOR_CUSTOM = 4,
+};
+
+/* Firmware info filled out via KCI FIRMWARE_INFO command. */
+struct gcip_fw_info {
+ uint64_t fw_build_time; /* BuildData::Timestamp() */
+ uint32_t fw_flavor; /* enum gcip_fw_flavor */
+ uint32_t fw_changelist; /* BuildData::Changelist() */
+ uint32_t spare[10];
+};
+
+/* Returns the name of @fw_flavor in string. */
+char *gcip_fw_flavor_str(enum gcip_fw_flavor fw_flavor);
+
+#endif /* __GCIP_FIRMWARE_H__ */
diff --git a/gcip-kernel-driver/include/gcip/gcip-image-config.h b/gcip-kernel-driver/include/gcip/gcip-image-config.h
new file mode 100644
index 0000000..a995188
--- /dev/null
+++ b/gcip-kernel-driver/include/gcip/gcip-image-config.h
@@ -0,0 +1,153 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Framework for parsing the firmware image configuration.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#ifndef __GCIP_IMAGE_CONFIG_H__
+#define __GCIP_IMAGE_CONFIG_H__
+
+#include <linux/types.h>
+
+#define GCIP_FW_NUM_VERSIONS 4
+#define GCIP_IMG_CFG_MAX_IOMMU_MAPPINGS 22
+#define GCIP_IMG_CFG_MAX_NS_IOMMU_MAPPINGS 5
+
+#define GCIP_FW_PRIV_LEVEL_GSA 0
+#define GCIP_FW_PRIV_LEVEL_TZ 1
+#define GCIP_FW_PRIV_LEVEL_NS 2
+
+/*
+ * The image configuration attached to the signed firmware.
+ */
+struct gcip_image_config {
+ __u32 carveout_base;
+ __u32 firmware_base;
+ __u32 firmware_size;
+ __u32 firmware_versions[GCIP_FW_NUM_VERSIONS];
+ __u32 config_version;
+ __u32 privilege_level;
+ __u32 remapped_region_start;
+ __u32 remapped_region_size;
+ __u32 num_iommu_mappings;
+ struct {
+ /* Device virtual address */
+ __u32 virt_address;
+ /*
+ * Encodes a 12-bit aligned address and the corresponding size
+ * into a 32-bit value.
+ * Detailed encoding method is defined in gcip-image-config.c.
+ */
+ __u32 image_config_value;
+ } iommu_mappings[GCIP_IMG_CFG_MAX_IOMMU_MAPPINGS];
+ __u32 remapped_data_start;
+ __u32 remapped_data_size;
+ __u32 num_ns_iommu_mappings;
+ __u32 ns_iommu_mappings[GCIP_IMG_CFG_MAX_NS_IOMMU_MAPPINGS];
+} __packed;
+
+#define GCIP_IMAGE_CONFIG_FLAGS_SECURE (1u << 0)
+
+struct gcip_image_config_ops {
+ /*
+ * Adds an IOMMU mapping from @daddr to @paddr with size @size.
+ *
+ * It is ensured that there is no overflow on @paddr + @size before calling this function.
+ *
+ * @flags is a bit-field with the following attributes:
+ * [0:0] - Security. 1 for secure and 0 for non-secure.
+ * [31:1] - Reserved.
+ *
+ * Returns 0 on success. Otherwise a negative errno.
+ * Mandatory.
+ */
+ int (*map)(void *data, dma_addr_t daddr, phys_addr_t paddr, size_t size,
+ unsigned int flags);
+ /*
+ * Removes the IOMMU mapping previously added by @map.
+ *
+ * Mandatory.
+ */
+ void (*unmap)(void *data, dma_addr_t daddr, size_t size, unsigned int flags);
+};
+
+struct gcip_image_config_parser {
+ struct device *dev;
+ void *data; /* User-specify data, will be passed to ops. */
+ const struct gcip_image_config_ops *ops;
+ /* The last image config being successfully parsed. */
+ struct gcip_image_config last_config;
+};
+
+/*
+ * Initializes the image configuration parser.
+ *
+ * @dev is only used for logging.
+ * @data will be passed to operations.
+ *
+ * Returns 0 on success. Returns -EINVAL when any mandatory operations is NULL.
+ */
+int gcip_image_config_parser_init(struct gcip_image_config_parser *parser,
+ const struct gcip_image_config_ops *ops, struct device *dev,
+ void *data);
+
+/*
+ * Parses the image configuration and adds specified IOMMU mappings by calling pre-registered
+ * operations.
+ *
+ * Number of mappings to be added might be different according to the value of
+ * @config->privilege_level:
+ * - GCIP_FW_PRIV_LEVEL_NS:
+ * Both @iommu_mappings and @ns_iommu_mappings will be added. Because GCIP_FW_PRIV_LEVEL_NS means
+ * the firmware will run in non-secure mode and all transactions will go through the non-secure
+ * IOMMU.
+ * - Otherwise:
+ * Only @ns_iommu_mappings are considered. TZ/GSA will be the one who programs secure IOMMU for
+ * those secure IOMMU mappings.
+ *
+ * Before parsing the newly passed @config, the mappings of the last record (stored by @parser
+ * internally) will be reverted. If there is any mapping in the new config fails to be mapped, the
+ * reverted last config will be reverted again. i.e. This function will keep the mapping state the
+ * same as before calling it on any error happens. But if the IOMMU state is somehow corrupted and
+ * hence fails to roll back the reverted last image config, only an error is logged. See the pseudo
+ * code below:
+ *
+ * gcip_image_config_parse(config):
+ * unmap(last_image_config)
+ * if ret = map(config) fails:
+ * LOG("Failed to map image config, rolling back to the last image config.")
+ * if map(last_image_config) fails:
+ * LOG("Failed to roll back the last image config.")
+ * return ret
+ * else:
+ * last_image_config = config
+ * return SUCCESS
+ *
+ * A special case being considered is if the content of @config is identical to the last
+ * successfully parsed image config, this function will return 0 immediately without removing /
+ * adding any mapping.
+ *
+ * Returns 0 on success. Otherwise an errno, which usually would be the one returned by
+ * gcip_image_config_ops.map. On error no new mapping specified in @config is added.
+ */
+int gcip_image_config_parse(struct gcip_image_config_parser *parser,
+ struct gcip_image_config *config);
+
+/*
+ * Clears the mappings specified in the last image config.
+ *
+ * It's valid to call this function without any image config has been successfully parsed, or when
+ * the last image config is already cleared. In which case this function works as no-op.
+ */
+void gcip_image_config_clear(struct gcip_image_config_parser *parser);
+
+/*
+ * Returns whether the privilege level specified by @config is non-secure.
+ */
+static inline bool gcip_image_config_is_ns(struct gcip_image_config *config)
+{
+ return config->privilege_level == GCIP_FW_PRIV_LEVEL_NS;
+}
+
+#endif /* __GCIP_IMAGE_CONFIG_H__ */
diff --git a/gcip-kernel-driver/include/gcip/gcip-kci.h b/gcip-kernel-driver/include/gcip/gcip-kci.h
new file mode 100644
index 0000000..bda1b40
--- /dev/null
+++ b/gcip-kernel-driver/include/gcip/gcip-kci.h
@@ -0,0 +1,387 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Kernel Control Interface, implements the protocol between AP kernel and GCIP firmware.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#ifndef __GCIP_KCI_H__
+#define __GCIP_KCI_H__
+
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+#include <gcip/gcip-mailbox.h>
+
+/*
+ * The status field in a firmware response is set to this by us when the response is fetched from
+ * the queue.
+ */
+#define GCIP_KCI_STATUS_OK GCIP_MAILBOX_STATUS_OK
+/*
+ * gcip_kci#mailbox.wait_list uses this value to record the status of responses that haven't been
+ * received yet.
+ */
+#define GCIP_KCI_STATUS_WAITING_RESPONSE GCIP_MAILBOX_STATUS_WAITING_RESPONSE
+/*
+ * Used when an expected response is not received, see the documentation of
+ * gcip_mailbox_handle_response() for details.
+ */
+#define GCIP_KCI_STATUS_NO_RESPONSE GCIP_MAILBOX_STATUS_NO_RESPONSE
+
+/*
+ * Command/response sequence numbers capped at half the range of the 64-bit value range. The second
+ * half is reserved for incoming requests from firmware.
+ * These are tagged with the MSB set.
+ */
+#define GCIP_KCI_REVERSE_FLAG (0x8000000000000000ull)
+
+/* Command/response queue elements for KCI. */
+
+struct gcip_kci_dma_descriptor {
+ u64 address;
+ u32 size;
+ u32 flags;
+};
+
+struct gcip_kci_command_element {
+ /*
+ * Set by gcip_kci_push_cmd() in case of KCI cmd and copied from the RKCI cmd in case of
+ * RKCI response.
+ */
+ u64 seq;
+ u16 code;
+ u16 reserved[3]; /* Explicit padding, does not affect alignment. */
+ struct gcip_kci_dma_descriptor dma;
+} __packed;
+
+struct gcip_kci_response_element {
+ u64 seq;
+ u16 code;
+ /*
+ * Reserved for host use - firmware can't touch this.
+ * If a value is written here it will be discarded and overwritten during response
+ * processing. However, when repurposed as an RKCI command, the FW can set this field.
+ */
+ u16 status;
+ /*
+ * Return value is not currently needed by KCI command responses.
+ * For reverse KCI commands this is set as value2.
+ */
+ u32 retval;
+} __packed;
+
+/*
+ * Definition of code in command elements.
+ * Code for KCI is a 16-bit unsigned integer.
+ */
+enum gcip_kci_code {
+ GCIP_KCI_CODE_ACK = 0,
+ GCIP_KCI_CODE_UNMAP_BUFFER = 1,
+ GCIP_KCI_CODE_MAP_LOG_BUFFER = 2,
+ GCIP_KCI_CODE_JOIN_GROUP = 3,
+ GCIP_KCI_CODE_LEAVE_GROUP = 4,
+ GCIP_KCI_CODE_MAP_TRACE_BUFFER = 5,
+ GCIP_KCI_CODE_SHUTDOWN = 7,
+ GCIP_KCI_CODE_GET_DEBUG_DUMP = 8,
+ GCIP_KCI_CODE_OPEN_DEVICE = 9,
+ GCIP_KCI_CODE_CLOSE_DEVICE = 10,
+ GCIP_KCI_CODE_FIRMWARE_INFO = 11,
+ GCIP_KCI_CODE_GET_USAGE = 12,
+ GCIP_KCI_CODE_NOTIFY_THROTTLING = 13,
+ GCIP_KCI_CODE_BLOCK_BUS_SPEED_CONTROL = 14,
+ GCIP_KCI_CODE_ALLOCATE_VMBOX = 15,
+ GCIP_KCI_CODE_RELEASE_VMBOX = 16,
+ GCIP_KCI_CODE_LINK_OFFLOAD_VMBOX = 17,
+ GCIP_KCI_CODE_UNLINK_OFFLOAD_VMBOX = 18,
+
+ GCIP_KCI_CODE_RKCI_ACK = 256,
+};
+
+/*
+ * Definition of reverse KCI request code ranges.
+ * Code for reverse KCI is a 16-bit unsigned integer.
+ * The first half is reserved for the chip specific codes and the generic codes can use the
+ * second half.
+ */
+enum gcip_reverse_kci_code {
+ GCIP_RKCI_CHIP_CODE_FIRST = 0,
+ GCIP_RKCI_CHIP_CODE_LAST = 0x7FFF,
+ GCIP_RKCI_GENERIC_CODE_FIRST = 0x8000,
+ GCIP_RKCI_FIRMWARE_CRASH = GCIP_RKCI_GENERIC_CODE_FIRST + 0,
+ GCIP_RKCI_JOB_LOCKUP = GCIP_RKCI_GENERIC_CODE_FIRST + 1,
+ GCIP_RKCI_GENERIC_CODE_LAST = 0xFFFF,
+};
+
+/*
+ * Definition of code in response elements.
+ * It is a 16-bit unsigned integer.
+ */
+enum gcip_kci_error {
+ GCIP_KCI_ERROR_OK = 0, /* Not an error; returned on success. */
+ GCIP_KCI_ERROR_CANCELLED = 1,
+ GCIP_KCI_ERROR_UNKNOWN = 2,
+ GCIP_KCI_ERROR_INVALID_ARGUMENT = 3,
+ GCIP_KCI_ERROR_DEADLINE_EXCEEDED = 4,
+ GCIP_KCI_ERROR_NOT_FOUND = 5,
+ GCIP_KCI_ERROR_ALREADY_EXISTS = 6,
+ GCIP_KCI_ERROR_PERMISSION_DENIED = 7,
+ GCIP_KCI_ERROR_RESOURCE_EXHAUSTED = 8,
+ GCIP_KCI_ERROR_FAILED_PRECONDITION = 9,
+ GCIP_KCI_ERROR_ABORTED = 10,
+ GCIP_KCI_ERROR_OUT_OF_RANGE = 11,
+ GCIP_KCI_ERROR_UNIMPLEMENTED = 12,
+ GCIP_KCI_ERROR_INTERNAL = 13,
+ GCIP_KCI_ERROR_UNAVAILABLE = 14,
+ GCIP_KCI_ERROR_DATA_LOSS = 15,
+ GCIP_KCI_ERROR_UNAUTHENTICATED = 16,
+};
+
+/* Type of the chip of the offload vmbox to be linked. */
+enum gcip_kci_offload_chip_type {
+ GCIP_KCI_OFFLOAD_CHIP_TYPE_TPU = 0,
+};
+
+/*
+ * Reason for triggering the CMD doorbell.
+ * The CMD doorbell is triggered either when a CMD is pushed or the RESP that might blocks the FW is
+ * consumed.
+ */
+enum gcip_kci_doorbell_reason {
+ GCIP_KCI_PUSH_CMD,
+ GCIP_KCI_CONSUME_RESP,
+};
+
+/* Struct to hold a circular buffer for incoming KCI responses. */
+struct gcip_reverse_kci {
+ /* Reverse kci buffer head. */
+ unsigned long head;
+ /* Reverse kci buffer tail. */
+ unsigned long tail;
+ /*
+ * Maximum number of outstanding KCI requests from firmware.
+ * This is used to size a circular buffer, so it must be a power of 2.
+ */
+ u32 buffer_size;
+ struct gcip_kci_response_element *buffer;
+ /* Lock to push elements in the buffer from the interrupt handler. */
+ spinlock_t producer_lock;
+ /* Lock to pop elements from the buffer in the worker. */
+ spinlock_t consumer_lock;
+ /* Worker to handle responses. */
+ struct work_struct work;
+};
+
+struct gcip_kci;
+
+/*
+ * KCI operators.
+ * For in_interrupt() context, see the implementation of gcip_kci_handle_irq for details.
+ */
+struct gcip_kci_ops {
+ /* Mandatory. */
+ /*
+ * Gets the head of mailbox command queue.
+ * Context: normal.
+ */
+ u32 (*get_cmd_queue_head)(struct gcip_kci *kci);
+ /*
+ * Gets the tail of mailbox command queue.
+ * Context: normal.
+ */
+ u32 (*get_cmd_queue_tail)(struct gcip_kci *kci);
+ /*
+ * Increases the tail of mailbox command queue by @inc.
+ * Context: normal.
+ */
+ void (*inc_cmd_queue_tail)(struct gcip_kci *kci, u32 inc);
+
+ /*
+ * Gets the size of mailbox response queue.
+ * Context: normal.
+ */
+ u32 (*get_resp_queue_size)(struct gcip_kci *kci);
+ /*
+ * Gets the head of mailbox response queue.
+ * Context: normal and in_interrupt().
+ */
+ u32 (*get_resp_queue_head)(struct gcip_kci *kci);
+ /*
+ * Gets the tail of mailbox response queue.
+ * Context: normal and in_interrupt().
+ */
+ u32 (*get_resp_queue_tail)(struct gcip_kci *kci);
+ /*
+ * Increases the head of mailbox response queue by @inc.
+ * Context: normal and in_interrupt().
+ */
+ void (*inc_resp_queue_head)(struct gcip_kci *kci, u32 inc);
+ /*
+ * Rings the doorbell.
+ * Context: normal.
+ */
+ void (*trigger_doorbell)(struct gcip_kci *kci, enum gcip_kci_doorbell_reason);
+
+ /* Optional. */
+ /*
+ * Reverse KCI handler called by the worker. Only required if reverse kci is enabled.
+ * Context: normal.
+ */
+ void (*reverse_kci_handle_response)(struct gcip_kci *kci,
+ struct gcip_kci_response_element *resp);
+ /*
+ * Usage updater called by the worker.
+ * Context: normal.
+ */
+ int (*update_usage)(struct gcip_kci *kci);
+};
+
+struct gcip_kci {
+ /* Device used for logging and memory allocation. */
+ struct device *dev;
+ /* Mailbox used by KCI. */
+ struct gcip_mailbox mailbox;
+ /* Protects cmd_queue. */
+ struct mutex cmd_queue_lock;
+ /* Protects resp_queue. */
+ spinlock_t resp_queue_lock;
+ /* Queue for waiting for the response doorbell to be rung. */
+ wait_queue_head_t resp_doorbell_waitq;
+ /* Protects wait_list. */
+ spinlock_t wait_list_lock;
+ /* Worker of consuming responses. */
+ struct work_struct work;
+ /* Handler for reverse (firmware -> kernel) requests. */
+ struct gcip_reverse_kci rkci;
+ /* Worker that sends update usage KCI. */
+ struct work_struct usage_work;
+ /* KCI operators. */
+ const struct gcip_kci_ops *ops;
+ /* Private data. */
+ void *data;
+};
+
+/*
+ * Arguments for gcip_kci_init.
+ *
+ * For the following arguments, see struct gcip_kci and struct gcip_reverse_kci for details.
+ * : `dev`, `rkci_buffer_size`, `ops` and `data`.
+ *
+ * For the following arguments, see struct gcip_mailbox for details. They will be passed to the
+ * struct gcip_mailbox using struct gcip_mailbox_args internally.
+ * : `dev`, `cmd_queue`, `resp_queue`, `queue_wrap_bit` and `timeout`.
+ */
+struct gcip_kci_args {
+ struct device *dev;
+ void *cmd_queue;
+ void *resp_queue;
+ u32 queue_wrap_bit;
+ u32 rkci_buffer_size;
+ u32 timeout;
+ const struct gcip_kci_ops *ops;
+ void *data;
+};
+
+/* Initializes a KCI object. */
+int gcip_kci_init(struct gcip_kci *kci, const struct gcip_kci_args *args);
+
+/* Cancels KCI and reverse KCI workers and workers that may send KCIs. */
+void gcip_kci_cancel_work_queues(struct gcip_kci *kci);
+
+/*
+ * Release KCI.
+ * Caller must call gcip_kci_cancel_work_queues before calling gcip_kci_release.
+ */
+void gcip_kci_release(struct gcip_kci *kci);
+
+/*
+ * Pushes an element to cmd queue and waits for the response.
+ * Returns -ETIMEDOUT if no response is received within kci->mailbox.timeout.
+ *
+ * Returns the code of response, or a negative errno on error.
+ */
+int gcip_kci_send_cmd(struct gcip_kci *kci, struct gcip_kci_command_element *cmd);
+
+/*
+ * Pushes an element to cmd queue and waits for the response.
+ * Returns -ETIMEDOUT if no response is received within kci->mailbox.timeout msecs.
+ *
+ * Returns the code of response, or a negative errno on error.
+ * @resp is updated with the response, as to retrieve returned retval field.
+ */
+int gcip_kci_send_cmd_return_resp(struct gcip_kci *kci, struct gcip_kci_command_element *cmd,
+ struct gcip_kci_response_element *resp);
+
+/*
+ * Interrupt handler.
+ * This function should be called when the interrupt of KCI mailbox is fired.
+ */
+void gcip_kci_handle_irq(struct gcip_kci *kci);
+
+/*
+ * Schedules a usage update worker.
+ *
+ * For functions that don't require the usage to be updated immediately, use this function instead
+ * of update_usage in struct gcip_kci_ops.
+ */
+void gcip_kci_update_usage_async(struct gcip_kci *kci);
+
+/* Gets the KCI private data. */
+static inline void *gcip_kci_get_data(struct gcip_kci *kci)
+{
+ return kci->data;
+}
+
+/* Returns the element size according to @type. */
+static inline u32 gcip_kci_queue_element_size(enum gcip_mailbox_queue_type type)
+{
+ if (type == GCIP_MAILBOX_CMD_QUEUE)
+ return sizeof(struct gcip_kci_command_element);
+ else
+ return sizeof(struct gcip_kci_response_element);
+}
+
+static inline u64 gcip_kci_get_cur_seq(struct gcip_kci *kci)
+{
+ return gcip_mailbox_get_cur_seq(&kci->mailbox);
+}
+
+static inline struct gcip_kci_command_element *gcip_kci_get_cmd_queue(struct gcip_kci *kci)
+{
+ return (struct gcip_kci_command_element *)gcip_mailbox_get_cmd_queue(&kci->mailbox);
+}
+
+static inline struct gcip_kci_response_element *gcip_kci_get_resp_queue(struct gcip_kci *kci)
+{
+ return (struct gcip_kci_response_element *)gcip_mailbox_get_resp_queue(&kci->mailbox);
+}
+
+static inline u64 gcip_kci_get_queue_wrap_bit(struct gcip_kci *kci)
+{
+ return gcip_mailbox_get_queue_wrap_bit(&kci->mailbox);
+}
+
+static inline struct list_head *gcip_kci_get_wait_list(struct gcip_kci *kci)
+{
+ return gcip_mailbox_get_wait_list(&kci->mailbox);
+}
+
+static inline u32 gcip_kci_get_timeout(struct gcip_kci *kci)
+{
+ return gcip_mailbox_get_timeout(&kci->mailbox);
+}
+
+static inline unsigned long gcip_rkci_get_head(struct gcip_kci *kci)
+{
+ return kci->rkci.head;
+}
+
+static inline unsigned long gcip_rkci_get_tail(struct gcip_kci *kci)
+{
+ return kci->rkci.tail;
+}
+
+#endif /* __GCIP_KCI_H__ */
diff --git a/gcip-kernel-driver/include/gcip/gcip-mailbox.h b/gcip-kernel-driver/include/gcip/gcip-mailbox.h
new file mode 100644
index 0000000..e81cfb9
--- /dev/null
+++ b/gcip-kernel-driver/include/gcip/gcip-mailbox.h
@@ -0,0 +1,538 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * GCIP Mailbox Interface.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#ifndef __GCIP_MAILBOX_H__
+#define __GCIP_MAILBOX_H__
+
+#include <linux/compiler.h>
+#include <linux/mutex.h>
+#include <linux/refcount.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+#define CIRC_QUEUE_WRAPPED(idx, wrap_bit) ((idx)&wrap_bit)
+#define CIRC_QUEUE_INDEX_MASK(wrap_bit) (wrap_bit - 1)
+#define CIRC_QUEUE_VALID_MASK(wrap_bit) (CIRC_QUEUE_INDEX_MASK(wrap_bit) | wrap_bit)
+#define CIRC_QUEUE_REAL_INDEX(idx, wrap_bit) ((idx)&CIRC_QUEUE_INDEX_MASK(wrap_bit))
+
+#define CIRC_QUEUE_MAX_SIZE(wrap_bit) (wrap_bit - 1)
+
+/*
+ * The status field in a firmware response is set to this by us when the response is fetched from
+ * the queue.
+ */
+#define GCIP_MAILBOX_STATUS_OK (0)
+/*
+ * gcip_mailbox#wait_list uses this value to record the status of responses that haven't been
+ * received yet.
+ */
+#define GCIP_MAILBOX_STATUS_WAITING_RESPONSE (1)
+/*
+ * Used when an expected response is not received, see the documentation of
+ * gcip_mailbox_consume_wait_list() for details.
+ */
+#define GCIP_MAILBOX_STATUS_NO_RESPONSE (2)
+
+/* To specify the operation is toward cmd or resp queue. */
+enum gcip_mailbox_queue_type { GCIP_MAILBOX_CMD_QUEUE, GCIP_MAILBOX_RESP_QUEUE };
+
+/* Utilities of circular queue operations */
+
+/*
+ * Returns the number of elements in a circular queue given its @head, @tail,
+ * and @queue_size.
+ */
+static inline u32 gcip_circ_queue_cnt(u32 head, u32 tail, u32 queue_size, u32 wrap_bit)
+{
+ u32 ret;
+
+ if (CIRC_QUEUE_WRAPPED(tail, wrap_bit) != CIRC_QUEUE_WRAPPED(head, wrap_bit))
+ ret = queue_size - CIRC_QUEUE_REAL_INDEX(head, wrap_bit) +
+ CIRC_QUEUE_REAL_INDEX(tail, wrap_bit);
+ else
+ ret = tail - head;
+
+ if (unlikely(ret > queue_size))
+ return 0;
+
+ return ret;
+}
+
+/* Increases @index of a circular queue by @inc. */
+static inline u32 gcip_circ_queue_inc(u32 index, u32 inc, u32 queue_size, u32 wrap_bit)
+{
+ u32 new_index = CIRC_QUEUE_REAL_INDEX(index, wrap_bit) + inc;
+
+ if (unlikely(new_index >= queue_size))
+ return (index + inc - queue_size) ^ wrap_bit;
+ else
+ return index + inc;
+}
+
+/*
+ * Checks if @size is a valid circular queue size, which should be a positive
+ * number and less than or equal to MAX_QUEUE_SIZE.
+ */
+static inline bool gcip_valid_circ_queue_size(u32 size, u32 wrap_bit)
+{
+ if (!size || size > CIRC_QUEUE_MAX_SIZE(wrap_bit))
+ return false;
+ return true;
+}
+
+struct gcip_mailbox;
+
+/* Wrapper struct for responses consumed by a thread other than the one which sent the command. */
+struct gcip_mailbox_resp_awaiter {
+ /* Response. */
+ void *resp;
+ /* The work which will be executed when the timeout occurs. */
+ struct delayed_work timeout_work;
+ /*
+ * If this response times out, this pointer to the owning mailbox is
+ * needed to delete this response from the list of pending responses.
+ */
+ struct gcip_mailbox *mailbox;
+ /* User-defined data. */
+ void *data;
+ /* Reference count. */
+ refcount_t refs;
+ /*
+ * The callback for releasing the @data.
+ * It will be set as @release_awaiter_data of struct gcip_mailbox_ops.
+ */
+ void (*release_data)(void *data);
+};
+
+/*
+ * Mailbox operators.
+ * For in_interrupt() context, see the implementation of gcip_mailbox_handle_irq for details.
+ */
+struct gcip_mailbox_ops {
+ /* Mandatory. */
+ /*
+ * Gets the head of mailbox command queue.
+ * Context: normal.
+ */
+ u32 (*get_cmd_queue_head)(struct gcip_mailbox *mailbox);
+ /*
+ * Gets the tail of mailbox command queue.
+ * Context: normal.
+ */
+ u32 (*get_cmd_queue_tail)(struct gcip_mailbox *mailbox);
+ /*
+ * Increases the tail of mailbox command queue by @inc.
+ * Context: normal.
+ */
+ void (*inc_cmd_queue_tail)(struct gcip_mailbox *mailbox, u32 inc);
+ /*
+ * Acquires the lock of cmd_queue. If @try is true, "_trylock" functions can be used, but
+ * also it can be ignored. Returns 1 if succeed, 0 if failed. This callback will be called
+ * in the following situations.
+ * - Enqueue a command to the cmd_queue.
+ * The lock can be mutex lock or spin lock and it will be released by calling
+ * `release_cmd_queue_lock` callback.
+ * Context: normal.
+ */
+ int (*acquire_cmd_queue_lock)(struct gcip_mailbox *mailbox, bool try);
+ /*
+ * Releases the lock of cmd_queue which is acquired by calling `acquire_cmd_queue_lock`.
+ * Context: normal.
+ */
+ void (*release_cmd_queue_lock)(struct gcip_mailbox *mailbox);
+ /*
+ * Gets the sequence number of @cmd queue element.
+ * Context: normal.
+ */
+ u64 (*get_cmd_elem_seq)(struct gcip_mailbox *mailbox, void *cmd);
+ /*
+ * Sets the sequence number of @cmd queue element.
+ * Context: normal.
+ */
+ void (*set_cmd_elem_seq)(struct gcip_mailbox *mailbox, void *cmd, u64 seq);
+ /*
+ * Gets the code of @cmd queue element.
+ * Context: normal.
+ */
+ u32 (*get_cmd_elem_code)(struct gcip_mailbox *mailbox, void *cmd);
+
+ /*
+ * Gets the size of mailbox response queue.
+ * Context: normal.
+ */
+ u32 (*get_resp_queue_size)(struct gcip_mailbox *mailbox);
+ /*
+ * Gets the head of mailbox response queue.
+ * Context: normal and in_interrupt().
+ */
+ u32 (*get_resp_queue_head)(struct gcip_mailbox *mailbox);
+ /*
+ * Gets the tail of mailbox response queue.
+ * Context: normal and in_interrupt().
+ */
+ u32 (*get_resp_queue_tail)(struct gcip_mailbox *mailbox);
+ /*
+ * Increases the head of mailbox response queue by @inc.
+ * Context: normal and in_interrupt().
+ */
+ void (*inc_resp_queue_head)(struct gcip_mailbox *mailbox, u32 inc);
+ /*
+ * Acquires the lock of resp_queue. If @try is true, "_trylock" functions can be used, but
+ * also it can be ignored. Returns 1 if succeed, 0 if failed. This callback will be called
+ * in the following situations.
+ * - Fetch response(s) from the resp_queue.
+ * The lock can be a mutex lock or a spin lock. However, if @try is considered and the
+ * "_trylock" is used, it must be a spin lock only.
+ * The lock will be released by calling `release_resp_queue_lock` callback.
+ * Context: normal and in_interrupt().
+ */
+ int (*acquire_resp_queue_lock)(struct gcip_mailbox *mailbox, bool try);
+ /*
+ * Releases the lock of resp_queue which is acquired by calling `acquire_resp_queue_lock`.
+ * Context: normal and in_interrupt().
+ */
+ void (*release_resp_queue_lock)(struct gcip_mailbox *mailbox);
+ /*
+ * Gets the sequence number of @resp queue element.
+ * Context: normal and in_interrupt().
+ */
+ u64 (*get_resp_elem_seq)(struct gcip_mailbox *mailbox, void *resp);
+ /*
+ * Sets the sequence number of @resp queue element.
+ * Context: normal and in_interrupt().
+ */
+ void (*set_resp_elem_seq)(struct gcip_mailbox *mailbox, void *resp, u64 seq);
+ /*
+ * Gets the status of @resp queue element.
+ * Context: normal and in_interrupt().
+ */
+ u16 (*get_resp_elem_status)(struct gcip_mailbox *mailbox, void *resp);
+ /*
+ * Sets the status of @resp queue element.
+ * Context: normal and in_interrupt().
+ */
+ void (*set_resp_elem_status)(struct gcip_mailbox *mailbox, void *resp, u16 status);
+
+ /*
+ * Acquires the lock of wait_list. If @irqsave is true, "_irqsave" functions can be used to
+ * store the irq state to @flags, but also it can be ignored.
+ * This callback will be called in following situations.
+ * - Push a waiting response to the @mailbox->wait_list.
+ * - Delete a waiting response from the @mailbox->wait_list.
+ * - Handle an arrived response and delete it from the @mailbox->wait_list.
+ * - Flush the asynchronous responses in the @mailbox->wait_list when release the @mailbox.
+ * The lock can be a mutex lock or a spin lock. However, if @irqsave is considered and
+ * "_irqsave" is used, it must be spin lock only.
+ * The lock will be released by calling `release_wait_list_lock` callback.
+ * Context: normal and in_interrupt().
+ */
+ void (*acquire_wait_list_lock)(struct gcip_mailbox *mailbox, bool irqsave,
+ unsigned long *flags);
+ /*
+ * Releases the lock of wait_list which is acquired by calling `acquire_wait_list_lock`.
+ * If @irqsave is true, restores @flags from `acquire_wait_list_lock` to the irq state.
+ * Or it can be ignored, if @irqsave was not considered in the `acquire_wait_list_lock`.
+ * Context: normal and in_interrupt().
+ */
+ void (*release_wait_list_lock)(struct gcip_mailbox *mailbox, bool irqrestore,
+ unsigned long flags);
+
+ /* Optional. */
+ /*
+ * Waits for the cmd queue of @mailbox has a available space for putting the command. If
+ * the queue has a space, returns 0. Otherwise, returns error as non-zero value. It depends
+ * on the implementation details, but it is okay to return right away with error when the
+ * queue is full. If this callback returns an error, `gcip_mailbox_send_cmd` function or
+ * `gcip_mailbox_put_cmd` function will return that error too. This callback is called with
+ * the `cmd_queue_lock` being held.
+ *
+ * Note: if this callback is NULL, it will simply check the fullness of cmd_queue and
+ * return -EAGAIN error right away if it is full. Please refer the implementation of the
+ * `gcip_mailbox_enqueue_cmd` function.
+ *
+ * Context: normal.
+ */
+ int (*wait_for_cmd_queue_not_full)(struct gcip_mailbox *mailbox);
+ /*
+ * This callback will be called after putting the @cmd to the command queue. It can be used
+ * for triggering the doorbell. Also, @mailbox->cur_seq will be increased by the return
+ * value. If error occurs, returns negative value and @mailbox->cur_seq will not be changed
+ * in that case. If this callback is not defined, @mailbox->cur_seq will be increased by 1
+ * each time cmd enters the queue. This is called with the `cmd_queue_lock` being held.
+ * Context: normal.
+ */
+ int (*after_enqueue_cmd)(struct gcip_mailbox *mailbox, void *cmd);
+ /*
+ * This callback will be called after fetching responses. It can be used for triggering
+ * a signal to break up waiting consuming the response queue. This is called without
+ * holding any locks.
+ * - @num_resps: the number of fetched responses.
+ * Context: normal and in_interrupt().
+ */
+ void (*after_fetch_resps)(struct gcip_mailbox *mailbox, u32 num_resps);
+ /*
+ * Before handling each fetched responses, this callback will be called. If this callback
+ * is not defined or returns true, the mailbox will handle the @resp normally. If the @resp
+ * should not be handled, returns false. This is called without holding any locks.
+ * Context: normal and in_interrupt().
+ */
+ bool (*before_handle_resp)(struct gcip_mailbox *mailbox, const void *resp);
+ /*
+ * Handles the asynchronous response which arrives well. How to handle it depends on the
+ * chip implementation. However, @awaiter should be released by calling the
+ * `gcip_mailbox_release_awaiter` function when the kernel driver doesn't need
+ * @awaiter anymore. This is called with the `wait_list_lock` being held.
+ * Context: normal and in_interrupt().
+ */
+ void (*handle_awaiter_arrived)(struct gcip_mailbox *mailbox,
+ struct gcip_mailbox_resp_awaiter *awaiter);
+ /*
+ * Handles the timed out asynchronous response. How to handle it depends on the chip
+ * implementation. However, @awaiter should be released by calling the
+ * `gcip_mailbox_release_awaiter` function when the kernel driver doesn't need
+ * @awaiter anymore. This is called without holding any locks.
+ * Context: normal and in_interrupt().
+ */
+ void (*handle_awaiter_timedout)(struct gcip_mailbox *mailbox,
+ struct gcip_mailbox_resp_awaiter *awaiter);
+ /*
+ * Cleans up asynchronous response which is not arrived yet, but also not timed out.
+ * The @awaiter should be marked as unprocessable to make it not to be processed by
+ * the `handle_awaiter_arrived` or `handle_awaiter_timedout` callbacks in race
+ * conditions. Don't have to release @awaiter of this function by calling the
+ * `gcip_mailbox_release_awaiter` function. It will be released internally. This is
+ * called with the `wait_list_lock` being held.
+ * Context: normal.
+ */
+ void (*flush_awaiter)(struct gcip_mailbox *mailbox,
+ struct gcip_mailbox_resp_awaiter *awaiter);
+ /*
+ * Releases the @data which was passed to the `gcip_mailbox_put_cmd` function. This is
+ * called without holding any locks.
+ * Context: normal and in_interrupt().
+ */
+ void (*release_awaiter_data)(void *data);
+};
+
+struct gcip_mailbox {
+ /* Device used for logging and memory allocation. */
+ struct device *dev;
+ /* Warp bit for both cmd and resp queue. */
+ u64 queue_wrap_bit;
+ /* Cmd sequence number. */
+ u64 cur_seq;
+
+ /* Cmd queue pointer. */
+ void *cmd_queue;
+ /* Size of element of cmd queue. */
+ u32 cmd_elem_size;
+
+ /* Resp queue pointer. */
+ void *resp_queue;
+ /* Size of element of resp queue. */
+ u32 resp_elem_size;
+
+ /* List of commands that need to wait for responses. */
+ struct list_head wait_list;
+ /* Queue for waiting for the wait_list to be consumed. */
+ wait_queue_head_t wait_list_waitq;
+
+ /* Mailbox timeout in milliseconds. */
+ u32 timeout;
+ /* Mailbox operators. */
+ const struct gcip_mailbox_ops *ops;
+ /* User-defined data. */
+ void *data;
+
+ /*
+ * The flag to specify sequence numbers of command responses are not
+ * required to be in order.
+ */
+ bool ignore_seq_order;
+};
+
+/* Arguments for gcip_mailbox_init. See struct gcip_mailbox for details. */
+struct gcip_mailbox_args {
+ struct device *dev;
+ u32 queue_wrap_bit;
+
+ void *cmd_queue;
+ u32 cmd_elem_size;
+
+ void *resp_queue;
+ u32 resp_elem_size;
+
+ u32 timeout;
+ const struct gcip_mailbox_ops *ops;
+ void *data;
+
+ bool ignore_seq_order;
+};
+
+/* Initializes a mailbox object. */
+int gcip_mailbox_init(struct gcip_mailbox *mailbox, const struct gcip_mailbox_args *args);
+
+/* Releases a mailbox object which is initialized by gcip_mailbox_init */
+void gcip_mailbox_release(struct gcip_mailbox *mailbox);
+
+/*
+ * Fetches and handles responses, then wakes up threads that are waiting for a response.
+ * To consume response queue and get responses, this function should be used as deferred work
+ * such as `struct work_struct` or `struct kthread_work`.
+ *
+ * Note: this worker is scheduled in the IRQ handler, to prevent use-after-free or race-condition
+ * bugs, cancel all works before free the mailbox.
+ */
+void gcip_mailbox_consume_responses_work(struct gcip_mailbox *mailbox);
+
+/*
+ * Pushes an element to cmd queue and waits for the response (synchronous).
+ * Returns -ETIMEDOUT if no response is received within mailbox->timeout msecs.
+ *
+ * Returns the code of response, or a negative errno on error.
+ * @resp is updated with the response, as to retrieve returned retval field.
+ */
+int gcip_mailbox_send_cmd(struct gcip_mailbox *mailbox, void *cmd, void *resp);
+
+/*
+ * Executes @cmd command asynchronously. This function returns an instance of
+ * `struct gcip_mailbox_resp_awaiter` which handles the arrival and time-out of the response.
+ * The implementation side can cancel the asynchronous response by calling the
+ * `gcip_mailbox_cancel_awaiter` or `gcip_mailbox_cancel_awaiter_timeout` function with it.
+ *
+ * Arrived asynchronous response will be handled by `handle_awaiter_arrived` callback and timed out
+ * asynchronous response will be handled by `handle_awaiter_timedout` callback. Those callbacks
+ * will pass the @awaiter as a parameter which is the same with the return of this function.
+ * The response can be accessed from `resp` member of it. Also, the @data passed to this function
+ * can be accessed from `data` member variable of it. The @awaiter must be released by calling
+ * the `gcip_mailbox_release_awaiter` function when it is not needed anymore.
+ *
+ * If the mailbox is released before the response arrives, all the waiting asynchronous responses
+ * will be flushed. In this case, the `flush_awaiter` callback will be called for that response
+ * and @awaiter don't have to be released by the implementation side.
+ * (i.e, the `gcip_mailbox_release_awaiter` function will be called internally.)
+ *
+ * The caller defines the way of cleaning up the @data to the `release_awaiter_data` callback.
+ * This callback will be called when the `gcip_mailbox_release_awaiter` function is called or
+ * the response is flushed.
+ *
+ * If this function fails to request the command, it will return the error pointer. In this case,
+ * the caller should free @data explicitly. (i.e, the callback `release_awaiter_data` will not
+ * be.)
+ *
+ * Note: the asynchronous responses fetched from @resp_queue should be released by calling the
+ * `gcip_mailbox_release_awaiter` function.
+ *
+ * Note: if the life cycle of the mailbox is longer than the caller part, you should make sure
+ * that the callbacks don't access the variables of caller part after the release of it.
+ *
+ * Note: if you don't need the result of the response (e.g., if you pass @resp as NULL), you
+ * can release the returned awaiter right away by calling the `gcip_mailbox_release_awaiter`
+ * function.
+ */
+struct gcip_mailbox_resp_awaiter *gcip_mailbox_put_cmd(struct gcip_mailbox *mailbox, void *cmd,
+ void *resp, void *data);
+
+/*
+ * Cancels awaiting the asynchronous response.
+ * This function will remove @awaiter from the waiting list to make it not to be handled by the
+ * arrived callback. Also, it will cancel the timeout work of @awaiter synchronously. Therefore,
+ * AFTER the return of this function, you can guarantee that arrived or timedout callback will
+ * not be called for @awaiter.
+ *
+ * However, by the race condition, you must note that arrived or timedout callback can be executed
+ * BEFORE this function returns. (i.e, this function and arrived/timedout callback is called at the
+ * same time but the callback acquired the lock earlier.)
+ *
+ * Note: this function will cancel or wait for the completion of arrived or timedout callbacks
+ * synchronously. Therefore, make sure that the caller side doesn't hold any locks which can be
+ * acquired by the arrived or timedout callbacks.
+ *
+ * If you already got a response of @awaiter and want to ensure that timedout handler is finished,
+ * you can use the `gcip_mailbox_cancel_awaiter_timeout` function instead.
+ */
+void gcip_mailbox_cancel_awaiter(struct gcip_mailbox_resp_awaiter *awaiter);
+
+/*
+ * Cancels the timeout work of the asynchronous response. In normally, the response arrives and
+ * the timeout is canceled, or the response timed out and the timeout handler executes. However,
+ * rarely, the response handler cancels the timeout handler while it has been already in progress.
+ * To handle this and ensure any in-process timeout handler has been able to exit cleanly, it is
+ * recommended to call this function after fetching the asynchronous response even though the
+ * response arrived successfully.
+ *
+ * Note: this function will cancel or wait for the completion of timedout callbacks synchronously.
+ * Therefore, make sure that the caller side doesn't hold any locks which can be acquired by the
+ * timedout callbacks.
+ *
+ * If you haven't gotten a response of @awaiter yet and want to make it not to be processed by
+ * arrived and timedout callbacks, use the `gcip_mailbox_cancel_awaiter` function.
+ */
+void gcip_mailbox_cancel_awaiter_timeout(struct gcip_mailbox_resp_awaiter *awaiter);
+
+/*
+ * Releases @awaiter. Every fetched (arrived or timed out) asynchronous responses should be
+ * released by calling this. It will call the `release_awaiter_data` callback internally.
+ */
+void gcip_mailbox_release_awaiter(struct gcip_mailbox_resp_awaiter *awaiter);
+
+/*
+ * Consume one response and handle it. This can be used for consuming one response quickly and then
+ * schedule `gcip_mailbox_consume_responses_work` work in the IRQ handler of mailbox.
+ */
+void gcip_mailbox_consume_one_response(struct gcip_mailbox *mailbox, void *resp);
+
+/* Getters for member variables of the `struct gcip_mailbox`. */
+
+static inline u64 gcip_mailbox_get_cur_seq(struct gcip_mailbox *mailbox)
+{
+ return mailbox->cur_seq;
+}
+
+static inline void *gcip_mailbox_get_cmd_queue(struct gcip_mailbox *mailbox)
+{
+ return mailbox->cmd_queue;
+}
+
+static inline u32 gcip_mailbox_get_cmd_elem_size(struct gcip_mailbox *mailbox)
+{
+ return mailbox->cmd_elem_size;
+}
+
+static inline void *gcip_mailbox_get_resp_queue(struct gcip_mailbox *mailbox)
+{
+ return mailbox->resp_queue;
+}
+
+static inline u32 gcip_mailbox_get_resp_elem_size(struct gcip_mailbox *mailbox)
+{
+ return mailbox->resp_elem_size;
+}
+
+static inline u64 gcip_mailbox_get_queue_wrap_bit(struct gcip_mailbox *mailbox)
+{
+ return mailbox->queue_wrap_bit;
+}
+
+static inline struct list_head *gcip_mailbox_get_wait_list(struct gcip_mailbox *mailbox)
+{
+ return &mailbox->wait_list;
+}
+
+static inline u32 gcip_mailbox_get_timeout(struct gcip_mailbox *mailbox)
+{
+ return mailbox->timeout;
+}
+
+static inline void *gcip_mailbox_get_data(struct gcip_mailbox *mailbox)
+{
+ return mailbox->data;
+}
+
+#endif /* __GCIP_MAILBOX_H__ */
diff --git a/gcip-kernel-driver/include/gcip/gcip-mem-pool.h b/gcip-kernel-driver/include/gcip/gcip-mem-pool.h
new file mode 100644
index 0000000..c770300
--- /dev/null
+++ b/gcip-kernel-driver/include/gcip/gcip-mem-pool.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * A simple memory allocator to help allocating reserved memory pools.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#ifndef __GCIP_MEM_POOL_H__
+#define __GCIP_MEM_POOL_H__
+
+#include <linux/device.h>
+#include <linux/genalloc.h>
+#include <linux/types.h>
+
+struct gcip_mem_pool {
+ struct device *dev;
+ struct gen_pool *gen_pool;
+ phys_addr_t base_paddr;
+ size_t granule;
+};
+
+/*
+ * Initializes the memory pool object.
+ *
+ * @pool: The memory pool object to be initialized.
+ * @dev: Used for logging only.
+ * @base_paddr: The base physical address of the pool. Must be greater than 0 and a multiple of
+ * @granule.
+ * @size: The size of the pool. @size should be a multiple of @granule.
+ * @granule: The granule when invoking the allocator. Should be a power of 2.
+ *
+ * Returns 0 on success, a negative errno otherwise.
+ *
+ * Call gcip_mem_pool_exit() to release the resources of @pool.
+ */
+int gcip_mem_pool_init(struct gcip_mem_pool *pool, struct device *dev, phys_addr_t base_paddr,
+ size_t size, size_t granule);
+/*
+ * Releases resources of @pool.
+ *
+ * Note: you must release (by calling gcip_mem_pool_free) all allocations before calling this
+ * function.
+ */
+void gcip_mem_pool_exit(struct gcip_mem_pool *pool);
+
+/*
+ * Allocates and returns the allocated physical address.
+ *
+ * @size: Size to be allocated.
+ *
+ * Returns the allocated address. Returns 0 on allocation failure.
+ */
+phys_addr_t gcip_mem_pool_alloc(struct gcip_mem_pool *pool, size_t size);
+/*
+ * Returns the address previously allocated by gcip_mem_pool_alloc().
+ *
+ * The size and address must match what previously passed to / returned by gcip_mem_pool_alloc().
+ */
+void gcip_mem_pool_free(struct gcip_mem_pool *pool, phys_addr_t paddr, size_t size);
+
+/*
+ * Returns the offset between @paddr and @base_paddr passed to gcip_mem_pool_init().
+ *
+ * @paddr must be a value returned by gcip_mem_pool_alloc().
+ */
+static inline size_t gcip_mem_pool_offset(struct gcip_mem_pool *pool, phys_addr_t paddr)
+{
+ return paddr - pool->base_paddr;
+}
+
+#endif /* __GCIP_MEM_POOL_H__ */
diff --git a/gcip-kernel-driver/include/gcip/gcip-telemetry.h b/gcip-kernel-driver/include/gcip/gcip-telemetry.h
new file mode 100644
index 0000000..4556291
--- /dev/null
+++ b/gcip-kernel-driver/include/gcip/gcip-telemetry.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * GCIP telemetry: logging and tracing.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#ifndef __GCIP_TELEMETRY_H__
+#define __GCIP_TELEMETRY_H__
+
+#include <linux/device.h>
+#include <linux/eventfd.h>
+#include <linux/mutex.h>
+#include <linux/rwlock_types.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+/* Log level codes used by gcip firmware. */
+#define GCIP_FW_LOG_LEVEL_VERBOSE (2)
+#define GCIP_FW_LOG_LEVEL_DEBUG (1)
+#define GCIP_FW_LOG_LEVEL_INFO (0)
+#define GCIP_FW_LOG_LEVEL_WARN (-1)
+#define GCIP_FW_LOG_LEVEL_ERROR (-2)
+
+#define GCIP_FW_DMESG_LOG_LEVEL (GCIP_FW_LOG_LEVEL_WARN)
+
+/* When log data arrives, recheck for more log data after this delay. */
+#define GCIP_TELEMETRY_LOG_RECHECK_DELAY 200 /* ms */
+
+enum gcip_telemetry_state {
+ GCIP_TELEMETRY_DISABLED = 0,
+ GCIP_TELEMETRY_ENABLED = 1,
+ GCIP_TELEMETRY_INVALID = -1,
+};
+
+/* To specify the target of operation. */
+enum gcip_telemetry_type {
+ GCIP_TELEMETRY_LOG = 0,
+ GCIP_TELEMETRY_TRACE = 1,
+};
+
+struct gcip_telemetry_header {
+ u32 head;
+ u32 size;
+ u32 reserved0[14]; /* Place head and tail into different cache lines */
+ u32 tail;
+ u32 entries_dropped; /* Number of entries dropped due to buffer full */
+ u32 reserved1[14]; /* Pad to 128 bytes in total */
+};
+
+struct gcip_log_entry_header {
+ s16 code;
+ u16 length;
+ u64 timestamp;
+ u16 crc16;
+} __packed;
+
+struct gcip_telemetry {
+ /* Device used for logging and memory allocation. */
+ struct device *dev;
+
+ /*
+ * State transitioning is to prevent racing in IRQ handlers. e.g. the interrupt comes when
+ * the kernel is releasing buffers.
+ */
+ enum gcip_telemetry_state state;
+ spinlock_t state_lock; /* protects state */
+
+ struct gcip_telemetry_header *header;
+
+ struct eventfd_ctx *ctx; /* signal this to notify the runtime */
+ rwlock_t ctx_lock; /* protects ctx */
+ const char *name; /* for debugging */
+
+ struct work_struct work; /* worker for handling data */
+ /* Fallback function to call for default log/trace handling. */
+ void (*fallback_fn)(struct gcip_telemetry *tel);
+ struct mutex mmap_lock; /* protects mmapped_count */
+ long mmapped_count; /* number of VMAs that are mapped to this telemetry buffer */
+};
+
+struct gcip_kci;
+
+struct gcip_telemetry_kci_args {
+ struct gcip_kci *kci;
+ u64 addr;
+ u32 size;
+};
+
+/* Sends telemetry KCI through send kci callback and args. */
+int gcip_telemetry_kci(struct gcip_telemetry *tel,
+ int (*send_kci)(struct gcip_telemetry_kci_args *),
+ struct gcip_telemetry_kci_args *args);
+/* Sets the eventfd for telemetry. */
+int gcip_telemetry_set_event(struct gcip_telemetry *tel, u32 eventfd);
+/* Unsets the eventfd for telemetry. */
+void gcip_telemetry_unset_event(struct gcip_telemetry *tel);
+/* Fallback to log messages from host CPU to dmesg. */
+void gcip_telemetry_fw_log(struct gcip_telemetry *log);
+/* Fallback to consumes the trace buffer. */
+void gcip_telemetry_fw_trace(struct gcip_telemetry *trace);
+/* Interrupt handler to schedule the worker when the buffer is not empty. */
+void gcip_telemetry_irq_handler(struct gcip_telemetry *tel);
+/* Increases the telemetry mmap count. */
+void gcip_telemetry_inc_mmap_count(struct gcip_telemetry *tel, int dif);
+/* Mmaps the telemetry buffer through mmap callback and args. */
+int gcip_telemetry_mmap_buffer(struct gcip_telemetry *tel, int (*mmap)(void *), void *args);
+/*
+ * Initializes struct gcip_telemetry.
+ *
+ * @vaddr: Virtual address of the queue buffer.
+ * @size: Size of the queue buffer. Must be power of 2 and greater than the size of struct
+ * gcip_telemetry_header.
+ * @fallback_fn: Fallback function to call for default log/trace handling.
+ */
+int gcip_telemetry_init(struct device *dev, struct gcip_telemetry *tel, const char *name,
+ void *vaddr, const size_t size,
+ void (*fallback_fn)(struct gcip_telemetry *));
+/* Exits and sets the telemetry state to GCIP_TELEMETRY_INVALID. */
+void gcip_telemetry_exit(struct gcip_telemetry *tel);
+
+#endif /* __GCIP_TELEMETRY_H__ */
diff --git a/gsx01-mailbox-driver.c b/gsx01-mailbox-driver.c
new file mode 100644
index 0000000..f0090f4
--- /dev/null
+++ b/gsx01-mailbox-driver.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * GXP hardware-based mailbox csr driver implementation for GSX01.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#include <asm/barrier.h>
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/of_irq.h>
+#include <linux/spinlock.h>
+
+#include "gxp-config.h"
+#include "gxp-mailbox-driver.h"
+#include "gxp-mailbox.h"
+
+#include "gxp-mailbox-driver.c"
+
+static u32 csr_read(struct gxp_mailbox *mailbox, uint reg_offset)
+{
+ return readl(mailbox->csr_reg_base + reg_offset);
+}
+
+static void csr_write(struct gxp_mailbox *mailbox, uint reg_offset, u32 value)
+{
+ writel(value, mailbox->csr_reg_base + reg_offset);
+}
+
+void gxp_mailbox_reset_hw(struct gxp_mailbox *mailbox)
+{
+ csr_write(mailbox, MBOX_MCUCTLR_OFFSET, 1);
+}
+
+void gxp_mailbox_generate_device_interrupt(struct gxp_mailbox *mailbox,
+ u32 int_mask)
+{
+ /*
+ * Ensure all memory writes have been committed to memory before
+ * signalling to the device to read from them. This avoids the scenario
+ * where the interrupt trigger write gets delivered to the MBX HW before
+ * the DRAM transactions made it to DRAM since they're Normal
+ * transactions and can be re-ordered and backed off behind other
+ * transfers.
+ */
+ wmb();
+
+ csr_write(mailbox, MBOX_INTGR0_OFFSET, int_mask);
+}
+
+u32 gxp_mailbox_get_device_mask_status(struct gxp_mailbox *mailbox)
+{
+ return csr_read(mailbox, MBOX_INTMSR0_OFFSET);
+}
+
+void gxp_mailbox_clear_host_interrupt(struct gxp_mailbox *mailbox, u32 int_mask)
+{
+ csr_write(mailbox, MBOX_INTCR1_OFFSET, int_mask);
+}
+
+void gxp_mailbox_mask_host_interrupt(struct gxp_mailbox *mailbox, u32 int_mask)
+{
+ csr_write(mailbox, MBOX_INTMR1_OFFSET, int_mask);
+}
+
+u32 gxp_mailbox_get_host_mask_status(struct gxp_mailbox *mailbox)
+{
+ return csr_read(mailbox, MBOX_INTMSR1_OFFSET);
+}
diff --git a/gxp-bpm.c b/gxp-bpm.c
index 50a41f1..90cf0e9 100644
--- a/gxp-bpm.c
+++ b/gxp-bpm.c
@@ -25,32 +25,32 @@ void gxp_bpm_configure(struct gxp_dev *gxp, u8 core, u32 bpm_offset, u32 event)
{
u32 val = ((event & BPM_EVENT_TYPE_MASK) << BPM_EVENT_TYPE_BIT) |
BPM_ENABLE;
- u32 bpm_base = GXP_REG_INST_BPM + bpm_offset;
+ u32 bpm_base = GXP_CORE_REG_INST_BPM(core) + bpm_offset;
/* Configure event */
- gxp_write_32_core(gxp, core, bpm_base + BPM_CNTR_CONFIG_OFFSET, val);
+ gxp_write_32(gxp, bpm_base + BPM_CNTR_CONFIG_OFFSET, val);
/* Arm counter */
- gxp_write_32_core(gxp, core, bpm_base + BPM_CONFIG_OFFSET, BPM_ENABLE);
+ gxp_write_32(gxp, bpm_base + BPM_CONFIG_OFFSET, BPM_ENABLE);
}
void gxp_bpm_start(struct gxp_dev *gxp, u8 core)
{
- gxp_write_32_core(gxp, core, GXP_REG_PROFILING_CONDITION,
- BPM_ENABLE << BPM_START_BIT);
+ gxp_write_32(gxp, GXP_CORE_REG_PROFILING_CONDITION(core),
+ BPM_ENABLE << BPM_START_BIT);
}
void gxp_bpm_stop(struct gxp_dev *gxp, u8 core)
{
- gxp_write_32_core(gxp, core, GXP_REG_PROFILING_CONDITION,
- BPM_ENABLE << BPM_STOP_BIT);
+ gxp_write_32(gxp, GXP_CORE_REG_PROFILING_CONDITION(core),
+ BPM_ENABLE << BPM_STOP_BIT);
}
u32 gxp_bpm_read_counter(struct gxp_dev *gxp, u8 core, u32 bpm_offset)
{
- u32 bpm_base = GXP_REG_INST_BPM + bpm_offset;
+ u32 bpm_base = GXP_CORE_REG_INST_BPM(core) + bpm_offset;
/* Disarm counter */
- gxp_write_32_core(gxp, core, bpm_base + BPM_CONFIG_OFFSET, BPM_DISABLE);
+ gxp_write_32(gxp, bpm_base + BPM_CONFIG_OFFSET, BPM_DISABLE);
/* Read final counter value */
- return gxp_read_32_core(gxp, core, bpm_base + BPM_SNAPSHOT_CNTR_OFFSET);
+ return gxp_read_32(gxp, bpm_base + BPM_SNAPSHOT_CNTR_OFFSET);
}
diff --git a/gxp-client.c b/gxp-client.c
index c9184d7..f96a100 100644
--- a/gxp-client.c
+++ b/gxp-client.c
@@ -28,10 +28,9 @@ struct gxp_client *gxp_client_create(struct gxp_dev *gxp)
init_rwsem(&client->semaphore);
client->has_block_wakelock = false;
client->has_vd_wakelock = false;
- client->requested_power_state = AUR_OFF;
- client->requested_memory_power_state = 0;
+ client->requested_states = off_states;
client->vd = NULL;
- client->requested_low_clkmux = false;
+
return client;
}
@@ -40,37 +39,285 @@ void gxp_client_destroy(struct gxp_client *client)
struct gxp_dev *gxp = client->gxp;
int core;
- down_write(&gxp->vd_semaphore);
+ if (client->vd && client->has_block_wakelock)
+ gxp_vd_block_unready(client->vd);
- if (client->vd && client->vd->state != GXP_VD_OFF)
+ if (client->vd && client->vd->state != GXP_VD_OFF) {
+ down_read(&gxp->vd_semaphore);
gxp_vd_stop(client->vd);
+ up_read(&gxp->vd_semaphore);
+ }
for (core = 0; core < GXP_NUM_CORES; core++) {
if (client->mb_eventfds[core])
gxp_eventfd_put(client->mb_eventfds[core]);
}
- up_write(&gxp->vd_semaphore);
-
-#if (IS_ENABLED(CONFIG_GXP_TEST) || IS_ENABLED(CONFIG_ANDROID)) && !IS_ENABLED(CONFIG_GXP_GEM5)
+#if (IS_ENABLED(CONFIG_GXP_TEST) || IS_ENABLED(CONFIG_ANDROID)) && \
+ !IS_ENABLED(CONFIG_GXP_GEM5)
if (client->tpu_file) {
+ if (client->vd) {
+ if (gxp->before_unmap_tpu_mbx_queue)
+ gxp->before_unmap_tpu_mbx_queue(gxp, client);
+ /*
+ * TODO(b/237624453): remove '|| 1' once the MCU supports DSP->TPU interop
+ */
+ if (gxp_is_direct_mode(gxp) || 1)
+ gxp_dma_unmap_tpu_buffer(gxp,
+ client->vd->domain,
+ client->mbx_desc);
+ }
fput(client->tpu_file);
client->tpu_file = NULL;
- gxp_dma_unmap_tpu_buffer(gxp, client->vd, client->mbx_desc);
}
#endif
if (client->has_block_wakelock) {
gxp_wakelock_release(client->gxp);
gxp_pm_update_requested_power_states(
- gxp, client->requested_power_state,
- client->requested_low_clkmux, AUR_OFF, false,
- client->requested_memory_power_state,
- AUR_MEM_UNDEFINED);
+ gxp, client->requested_states, off_states);
}
- if (client->vd)
+ if (client->vd) {
+ down_write(&gxp->vd_semaphore);
gxp_vd_release(client->vd);
+ up_write(&gxp->vd_semaphore);
+ }
kfree(client);
}
+
+static int gxp_set_secure_vd(struct gxp_virtual_device *vd)
+{
+ struct gxp_dev *gxp = vd->gxp;
+
+ if (gxp_is_direct_mode(gxp))
+ return 0;
+
+ mutex_lock(&gxp->secure_vd_lock);
+ if (gxp->secure_vd) {
+ mutex_unlock(&gxp->secure_vd_lock);
+ return -EEXIST;
+ }
+ vd->is_secure = true;
+ gxp->secure_vd = vd;
+ mutex_unlock(&gxp->secure_vd_lock);
+
+ return 0;
+}
+
+int gxp_client_allocate_virtual_device(struct gxp_client *client,
+ uint core_count, u8 flags)
+{
+ struct gxp_dev *gxp = client->gxp;
+ struct gxp_virtual_device *vd;
+ int ret;
+
+ lockdep_assert_held(&client->semaphore);
+ if (client->vd) {
+ dev_err(gxp->dev,
+ "Virtual device was already allocated for client\n");
+ return -EINVAL;
+ }
+
+ down_write(&gxp->vd_semaphore);
+ vd = gxp_vd_allocate(gxp, core_count);
+ if (IS_ERR(vd)) {
+ ret = PTR_ERR(vd);
+ dev_err(gxp->dev,
+ "Failed to allocate virtual device for client (%d)\n",
+ ret);
+ goto error;
+ }
+ if (flags & GXP_ALLOCATE_VD_SECURE) {
+ ret = gxp_set_secure_vd(vd);
+ if (ret)
+ goto error_vd_release;
+ }
+ if (client->has_block_wakelock) {
+ ret = gxp_vd_block_ready(vd);
+ if (ret)
+ goto error_vd_release;
+ }
+ up_write(&gxp->vd_semaphore);
+
+ client->vd = vd;
+ return 0;
+
+error_vd_release:
+ gxp_vd_release(vd);
+error:
+ up_write(&gxp->vd_semaphore);
+ return ret;
+}
+
+static int gxp_client_request_power_states(struct gxp_client *client,
+ struct gxp_power_states requested_states)
+{
+ struct gxp_dev *gxp = client->gxp;
+ int ret;
+
+ if (gxp->request_power_states) {
+ ret = gxp->request_power_states(client, requested_states);
+ if (ret != -EOPNOTSUPP)
+ return ret;
+ }
+ gxp_pm_update_requested_power_states(gxp, client->requested_states,
+ requested_states);
+ client->requested_states = requested_states;
+ return 0;
+}
+
+int gxp_client_acquire_block_wakelock(struct gxp_client *client,
+ bool *acquired_wakelock)
+{
+ struct gxp_dev *gxp = client->gxp;
+ int ret;
+
+ lockdep_assert_held(&client->semaphore);
+ if (!client->has_block_wakelock) {
+ ret = gxp_wakelock_acquire(gxp);
+ if (ret)
+ return ret;
+ *acquired_wakelock = true;
+ if (client->vd) {
+ ret = gxp_vd_block_ready(client->vd);
+ if (ret)
+ goto err_wakelock_release;
+ }
+ } else {
+ *acquired_wakelock = false;
+ }
+ client->has_block_wakelock = true;
+
+ /*
+ * Update client's TGID+PID in case the process that opened
+ * /dev/gxp is not the one that called this IOCTL.
+ */
+ client->tgid = current->tgid;
+ client->pid = current->pid;
+
+ return 0;
+
+err_wakelock_release:
+ if (*acquired_wakelock) {
+ gxp_wakelock_release(gxp);
+ *acquired_wakelock = false;
+ }
+ return ret;
+}
+
+void gxp_client_release_block_wakelock(struct gxp_client *client)
+{
+ struct gxp_dev *gxp = client->gxp;
+
+ lockdep_assert_held(&client->semaphore);
+ if (!client->has_block_wakelock)
+ return;
+
+ if (client->vd)
+ gxp_vd_block_unready(client->vd);
+
+ if (client->has_vd_wakelock)
+ gxp_client_release_vd_wakelock(client);
+
+ gxp_wakelock_release(gxp);
+ client->has_block_wakelock = false;
+}
+
+int gxp_client_acquire_vd_wakelock(struct gxp_client *client,
+ struct gxp_power_states requested_states)
+{
+ struct gxp_dev *gxp = client->gxp;
+ int ret = 0;
+ enum gxp_virtual_device_state orig_state;
+
+ lockdep_assert_held(&client->semaphore);
+ if (!client->has_block_wakelock) {
+ dev_err(gxp->dev,
+ "Must hold BLOCK wakelock to acquire VIRTUAL_DEVICE wakelock\n");
+ return -EINVAL;
+ }
+
+ if (client->vd->state == GXP_VD_UNAVAILABLE) {
+ dev_err(gxp->dev,
+ "Cannot acquire VIRTUAL_DEVICE wakelock on a broken virtual device\n");
+ return -ENODEV;
+ }
+
+ if (!client->has_vd_wakelock) {
+ down_write(&gxp->vd_semaphore);
+ orig_state = client->vd->state;
+ if (client->vd->state == GXP_VD_READY || client->vd->state == GXP_VD_OFF)
+ ret = gxp_vd_run(client->vd);
+ else
+ ret = gxp_vd_resume(client->vd);
+ up_write(&gxp->vd_semaphore);
+ }
+
+ if (ret)
+ goto out;
+
+ ret = gxp_client_request_power_states(client, requested_states);
+ if (ret)
+ goto out_release_vd_wakelock;
+
+ client->has_vd_wakelock = true;
+ return 0;
+
+out_release_vd_wakelock:
+ if (!client->has_vd_wakelock) {
+ down_write(&gxp->vd_semaphore);
+ if (orig_state == GXP_VD_READY || orig_state == GXP_VD_OFF)
+ gxp_vd_stop(client->vd);
+ else
+ gxp_vd_suspend(client->vd);
+ up_write(&gxp->vd_semaphore);
+ }
+out:
+ return ret;
+}
+
+void gxp_client_release_vd_wakelock(struct gxp_client *client)
+{
+ struct gxp_dev *gxp = client->gxp;
+
+ lockdep_assert_held(&client->semaphore);
+ if (!client->has_vd_wakelock)
+ return;
+
+ /*
+ * Currently VD state will not be GXP_VD_UNAVAILABLE if
+ * has_vd_wakelock is true. Add this check just in case
+ * GXP_VD_UNAVAILABLE will occur in more scenarios in the
+ * future.
+ */
+ if (client->vd->state == GXP_VD_UNAVAILABLE)
+ return;
+
+ down_write(&gxp->vd_semaphore);
+ gxp_vd_suspend(client->vd);
+ up_write(&gxp->vd_semaphore);
+
+ gxp_client_request_power_states(client, off_states);
+ client->has_vd_wakelock = false;
+}
+
+bool gxp_client_has_available_vd(struct gxp_client *client, const char *name)
+{
+ struct gxp_dev *gxp = client->gxp;
+
+ lockdep_assert_held(&client->semaphore);
+ if (!client->vd) {
+ dev_err(gxp->dev,
+ "%s requires the client allocate a VIRTUAL_DEVICE\n",
+ name);
+ return false;
+ }
+ if (client->vd->state == GXP_VD_UNAVAILABLE) {
+ dev_err(gxp->dev, "Cannot do %s on a broken virtual device\n",
+ name);
+ return false;
+ }
+ return true;
+}
diff --git a/gxp-client.h b/gxp-client.h
index 0d1f860..01d0b2c 100644
--- a/gxp-client.h
+++ b/gxp-client.h
@@ -12,8 +12,9 @@
#include <linux/sched.h>
#include <linux/types.h>
-#include "gxp-internal.h"
#include "gxp-eventfd.h"
+#include "gxp-internal.h"
+#include "gxp-pm.h"
#include "gxp-vd.h"
/* Holds state belonging to a client */
@@ -30,11 +31,8 @@ struct gxp_client {
bool has_block_wakelock;
bool has_vd_wakelock;
- /* Value is one of the GXP_POWER_STATE_* values from gxp.h. */
- uint requested_power_state;
- /* Value is one of the MEMORY_POWER_STATE_* values from gxp.h. */
- uint requested_memory_power_state;
- bool requested_low_clkmux;
+
+ struct gxp_power_states requested_states;
struct gxp_virtual_device *vd;
struct file *tpu_file;
@@ -48,11 +46,12 @@ struct gxp_client {
pid_t pid;
/*
- * Indicates whether the driver needs to disable telemetry when this
- * client closes. For when the client fails to disable telemetry itself.
+ * Indicates whether the driver needs to disable core telemetry when
+ * this client closes. For when the client fails to disable core
+ * telemetry itself.
*/
- bool enabled_telemetry_logging;
- bool enabled_telemetry_tracing;
+ bool enabled_core_telemetry_logging;
+ bool enabled_core_telemetry_tracing;
};
/*
@@ -65,5 +64,78 @@ struct gxp_client *gxp_client_create(struct gxp_dev *gxp);
* TPU mailboxes it holds.
*/
void gxp_client_destroy(struct gxp_client *client);
+/**
+ * gxp_client_allocate_virtual_device() - Allocates a virtual device for the
+ * client.
+ *
+ * @client: The client to allocate a virtual device
+ * @core_count: The requested core count of the virtual device.
+ * @flags: The flags passed from the runtime's request.
+ *
+ * The caller must have locked client->semaphore.
+ *
+ * Return:
+ * * 0 - Success
+ * * -EINVAL - A virtual device of the client has been allocated
+ * * Otherwise - Errno returned by virtual device allocation
+ */
+int gxp_client_allocate_virtual_device(struct gxp_client *client,
+ uint core_count, u8 flags);
+/**
+ * gxp_client_acquire_block_wakelock() - Acquires a block wakelock.
+ *
+ * @client: The client to acquire wakelock.
+ * @acquired_wakelock: True if block wakelock has been acquired by this client.
+ *
+ * The caller must have locked client->semaphore.
+ *
+ * Return:
+ * * 0 - Success
+ * * Otherwise - Errno returned by block wakelock acquisition
+ */
+int gxp_client_acquire_block_wakelock(struct gxp_client *client,
+ bool *acquired_wakelock);
+/**
+ * gxp_client_release_block_wakelock() - Releases the holded block wakelock and
+ * revokes the power votes.
+ *
+ * The caller must have locked client->semaphore.
+ */
+void gxp_client_release_block_wakelock(struct gxp_client *client);
+/**
+ * gxp_client_acquire_vd_wakelock() - Acquires a VD wakelock for the current
+ * virtual device to start the virtual device or resume it if it's suspended.
+ * Also the client can request the power votes tied with the acquired wakelock.
+ *
+ * @client: The client to acquire wakelock and request power votes.
+ * @requested_states: The requested power states.
+ *
+ * The caller must have locked client->semaphore.
+ *
+ * Return:
+ * * 0 - Success
+ * * -EINVAL - No holded block wakelock
+ * * -ENODEV - VD state is unavailable
+ */
+int gxp_client_acquire_vd_wakelock(struct gxp_client *client,
+ struct gxp_power_states requested_states);
+/**
+ * gxp_client_release_vd_wakelock() - Releases the holded VD wakelock to suspend
+ * the current virtual device.
+ *
+ * The caller must have locked client->semaphore.
+ */
+void gxp_client_release_vd_wakelock(struct gxp_client *client);
+
+/**
+ * gxp_client_has_available_vd() - Returns whether @client has an available
+ * virtual device.
+ *
+ * @client: The client to check.
+ * @name: The string used for logging when the client has an invalid VD.
+ *
+ * The caller must have locked client->semaphore.
+ */
+bool gxp_client_has_available_vd(struct gxp_client *client, const char *name);
#endif /* __GXP_CLIENT_H__ */
diff --git a/gxp-common-platform.c b/gxp-common-platform.c
new file mode 100644
index 0000000..d9ac532
--- /dev/null
+++ b/gxp-common-platform.c
@@ -0,0 +1,2043 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * GXP platform driver utilities.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
+#include <linux/platform_data/sscoredump.h>
+#endif
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/uidgid.h>
+
+#include "gxp-client.h"
+#include "gxp-config.h"
+#include "gxp-core-telemetry.h"
+#include "gxp-debug-dump.h"
+#include "gxp-debugfs.h"
+#include "gxp-dma.h"
+#include "gxp-dmabuf.h"
+#include "gxp-domain-pool.h"
+#include "gxp-firmware.h"
+#include "gxp-firmware-data.h"
+#include "gxp-internal.h"
+#include "gxp-lpm.h"
+#include "gxp-mailbox.h"
+#include "gxp-mailbox-driver.h"
+#include "gxp-mapping.h"
+#include "gxp-notification.h"
+#include "gxp-pm.h"
+#include "gxp-thermal.h"
+#include "gxp-vd.h"
+#include "gxp-wakelock.h"
+#include "gxp.h"
+
+#if (IS_ENABLED(CONFIG_GXP_TEST) || IS_ENABLED(CONFIG_ANDROID)) && !IS_ENABLED(CONFIG_GXP_GEM5)
+#define HAS_TPU_EXT
+#include <soc/google/tpu-ext.h>
+#endif
+
+#if GXP_USE_LEGACY_MAILBOX
+#include "gxp-mailbox-impl.h"
+#else
+#include "gxp-dci.h"
+#endif
+
+static struct gxp_dev *gxp_debug_pointer;
+
+/* Caller needs to hold client->semaphore for reading */
+static bool check_client_has_available_vd_wakelock(struct gxp_client *client,
+ char *ioctl_name)
+{
+ struct gxp_dev *gxp = client->gxp;
+
+ lockdep_assert_held_read(&client->semaphore);
+ if (!client->has_vd_wakelock) {
+ dev_err(gxp->dev,
+ "%s requires the client hold a VIRTUAL_DEVICE wakelock\n",
+ ioctl_name);
+ return false;
+ }
+ if (client->vd->state == GXP_VD_UNAVAILABLE) {
+ dev_err(gxp->dev, "Cannot do %s on a broken virtual device\n",
+ ioctl_name);
+ return false;
+ }
+ return true;
+}
+
+#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
+
+static struct sscd_platform_data gxp_sscd_pdata;
+
+static void gxp_sscd_release(struct device *dev)
+{
+ pr_debug("%s\n", __func__);
+}
+
+static struct platform_device gxp_sscd_dev = {
+ .name = GXP_DRIVER_NAME,
+ .driver_override = SSCD_NAME,
+ .id = -1,
+ .dev = {
+ .platform_data = &gxp_sscd_pdata,
+ .release = gxp_sscd_release,
+ },
+};
+
+static void gxp_common_platform_reg_sscd(void)
+{
+ /* Registers SSCD platform device */
+ if (gxp_debug_dump_is_enabled()) {
+ if (platform_device_register(&gxp_sscd_dev))
+ pr_err("Unable to register SSCD platform device\n");
+ }
+}
+
+static void gxp_common_platform_unreg_sscd(void)
+{
+ if (gxp_debug_dump_is_enabled())
+ platform_device_unregister(&gxp_sscd_dev);
+}
+
+#else /* CONFIG_SUBSYSTEM_COREDUMP */
+
+static void gxp_common_platform_reg_sscd(void)
+{
+}
+
+static void gxp_common_platform_unreg_sscd(void)
+{
+}
+
+#endif /* CONFIG_SUBSYSTEM_COREDUMP */
+
+/* Mapping from GXP_POWER_STATE_* to enum aur_power_state in gxp-pm.h */
+static const uint aur_state_array[GXP_NUM_POWER_STATES] = {
+ AUR_OFF, AUR_UUD, AUR_SUD, AUR_UD, AUR_NOM,
+ AUR_READY, AUR_UUD_PLUS, AUR_SUD_PLUS, AUR_UD_PLUS
+};
+/* Mapping from MEMORY_POWER_STATE_* to enum aur_memory_power_state in gxp-pm.h */
+static const uint aur_memory_state_array[MEMORY_POWER_STATE_MAX + 1] = {
+ AUR_MEM_UNDEFINED, AUR_MEM_MIN, AUR_MEM_VERY_LOW, AUR_MEM_LOW,
+ AUR_MEM_HIGH, AUR_MEM_VERY_HIGH, AUR_MEM_MAX
+};
+
+static int gxp_open(struct inode *inode, struct file *file)
+{
+ struct gxp_client *client;
+ struct gxp_dev *gxp = container_of(file->private_data, struct gxp_dev,
+ misc_dev);
+ int ret = 0;
+
+ /* If this is the first call to open(), request the firmware files */
+ ret = gxp_firmware_request_if_needed(gxp);
+ if (ret) {
+ dev_err(gxp->dev,
+ "Failed to request dsp firmware files (ret=%d)\n", ret);
+ return ret;
+ }
+
+ client = gxp_client_create(gxp);
+ if (IS_ERR(client))
+ return PTR_ERR(client);
+
+ client->tgid = current->tgid;
+ client->pid = current->pid;
+
+ file->private_data = client;
+
+ mutex_lock(&gxp->client_list_lock);
+ list_add(&client->list_entry, &gxp->client_list);
+ mutex_unlock(&gxp->client_list_lock);
+
+ return ret;
+}
+
+static int gxp_release(struct inode *inode, struct file *file)
+{
+ struct gxp_client *client = file->private_data;
+
+ /*
+ * If open failed and no client was created then no clean-up is needed.
+ */
+ if (!client)
+ return 0;
+
+ if (client->enabled_core_telemetry_logging)
+ gxp_core_telemetry_disable(client->gxp,
+ GXP_TELEMETRY_TYPE_LOGGING);
+ if (client->enabled_core_telemetry_tracing)
+ gxp_core_telemetry_disable(client->gxp,
+ GXP_TELEMETRY_TYPE_TRACING);
+
+ mutex_lock(&client->gxp->client_list_lock);
+ list_del(&client->list_entry);
+ mutex_unlock(&client->gxp->client_list_lock);
+
+ gxp_client_destroy(client);
+
+ return 0;
+}
+
+static inline enum dma_data_direction mapping_flags_to_dma_dir(u32 flags)
+{
+ switch (flags & 0x3) {
+ case 0x0: /* 0b00 */
+ return DMA_BIDIRECTIONAL;
+ case 0x1: /* 0b01 */
+ return DMA_TO_DEVICE;
+ case 0x2: /* 0b10 */
+ return DMA_FROM_DEVICE;
+ }
+
+ return DMA_NONE;
+}
+
+static int gxp_map_buffer(struct gxp_client *client,
+ struct gxp_map_ioctl __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ struct gxp_map_ioctl ibuf;
+ struct gxp_mapping *map;
+ int ret = 0;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ if (ibuf.size == 0)
+ return -EINVAL;
+
+ if (ibuf.host_address % L1_CACHE_BYTES || ibuf.size % L1_CACHE_BYTES) {
+ dev_err(gxp->dev,
+ "Mapped buffers must be cache line aligned and padded.\n");
+ return -EINVAL;
+ }
+
+ down_read(&client->semaphore);
+
+ if (!gxp_client_has_available_vd(client, "GXP_MAP_BUFFER")) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ map = gxp_mapping_create(gxp, client->vd->domain, ibuf.host_address,
+ ibuf.size,
+ ibuf.flags,
+ mapping_flags_to_dma_dir(ibuf.flags));
+ if (IS_ERR(map)) {
+ ret = PTR_ERR(map);
+ dev_err(gxp->dev, "Failed to create mapping (ret=%d)\n", ret);
+ goto out;
+ }
+
+ ret = gxp_vd_mapping_store(client->vd, map);
+ if (ret) {
+ dev_err(gxp->dev, "Failed to store mapping (ret=%d)\n", ret);
+ goto error_destroy;
+ }
+
+ ibuf.device_address = map->device_address;
+
+ if (copy_to_user(argp, &ibuf, sizeof(ibuf))) {
+ ret = -EFAULT;
+ goto error_remove;
+ }
+
+ /*
+ * The virtual device acquired its own reference to the mapping when
+ * it was stored in the VD's records. Release the reference from
+ * creating the mapping since this function is done using it.
+ */
+ gxp_mapping_put(map);
+
+out:
+ up_read(&client->semaphore);
+
+ return ret;
+
+error_remove:
+ gxp_vd_mapping_remove(client->vd, map);
+error_destroy:
+ gxp_mapping_put(map);
+ up_read(&client->semaphore);
+ return ret;
+}
+
+static int gxp_unmap_buffer(struct gxp_client *client,
+ struct gxp_map_ioctl __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ struct gxp_map_ioctl ibuf;
+ struct gxp_mapping *map;
+ int ret = 0;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ down_read(&client->semaphore);
+
+ if (!client->vd) {
+ dev_err(gxp->dev,
+ "GXP_UNMAP_BUFFER requires the client allocate a VIRTUAL_DEVICE\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ map = gxp_vd_mapping_search(client->vd,
+ (dma_addr_t)ibuf.device_address);
+ if (!map) {
+ dev_err(gxp->dev,
+ "Mapping not found for provided device address %#llX\n",
+ ibuf.device_address);
+ ret = -EINVAL;
+ goto out;
+ } else if (!map->host_address) {
+ dev_err(gxp->dev, "dma-bufs must be unmapped via GXP_UNMAP_DMABUF\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ WARN_ON(map->host_address != ibuf.host_address);
+
+ gxp_vd_mapping_remove(client->vd, map);
+
+ /* Release the reference from gxp_vd_mapping_search() */
+ gxp_mapping_put(map);
+
+out:
+ up_read(&client->semaphore);
+
+ return ret;
+}
+
+static int gxp_sync_buffer(struct gxp_client *client,
+ struct gxp_sync_ioctl __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ struct gxp_sync_ioctl ibuf;
+ struct gxp_mapping *map;
+ int ret;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ down_read(&client->semaphore);
+
+ if (!client->vd) {
+ dev_err(gxp->dev,
+ "GXP_SYNC_BUFFER requires the client allocate a VIRTUAL_DEVICE\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ map = gxp_vd_mapping_search(client->vd,
+ (dma_addr_t)ibuf.device_address);
+ if (!map) {
+ dev_err(gxp->dev,
+ "Mapping not found for provided device address %#llX\n",
+ ibuf.device_address);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = gxp_mapping_sync(map, ibuf.offset, ibuf.size,
+ ibuf.flags == GXP_SYNC_FOR_CPU);
+
+ /* Release the reference from gxp_vd_mapping_search() */
+ gxp_mapping_put(map);
+
+out:
+ up_read(&client->semaphore);
+
+ return ret;
+}
+
+static int gxp_mailbox_command(struct gxp_client *client,
+ struct gxp_mailbox_command_ioctl __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ struct gxp_mailbox_command_ioctl ibuf;
+ int virt_core, phys_core;
+ int ret = 0;
+ struct gxp_power_states power_states;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf))) {
+ dev_err(gxp->dev,
+ "Unable to copy ioctl data from user-space\n");
+ return -EFAULT;
+ }
+ if (ibuf.gxp_power_state == GXP_POWER_STATE_OFF) {
+ dev_err(gxp->dev,
+ "GXP_POWER_STATE_OFF is not a valid value when executing a mailbox command\n");
+ return -EINVAL;
+ }
+ if (ibuf.gxp_power_state < GXP_POWER_STATE_OFF ||
+ ibuf.gxp_power_state >= GXP_NUM_POWER_STATES) {
+ dev_err(gxp->dev, "Requested power state is invalid\n");
+ return -EINVAL;
+ }
+ if (ibuf.memory_power_state < MEMORY_POWER_STATE_UNDEFINED ||
+ ibuf.memory_power_state > MEMORY_POWER_STATE_MAX) {
+ dev_err(gxp->dev, "Requested memory power state is invalid\n");
+ return -EINVAL;
+ }
+
+ if (ibuf.gxp_power_state == GXP_POWER_STATE_READY) {
+ dev_warn_once(
+ gxp->dev,
+ "GXP_POWER_STATE_READY is deprecated, please set GXP_POWER_LOW_FREQ_CLKMUX with GXP_POWER_STATE_UUD state");
+ ibuf.gxp_power_state = GXP_POWER_STATE_UUD;
+ }
+
+ if (ibuf.power_flags & GXP_POWER_NON_AGGRESSOR)
+ dev_warn_once(
+ gxp->dev,
+ "GXP_POWER_NON_AGGRESSOR is deprecated, no operation here");
+
+ /* Caller must hold VIRTUAL_DEVICE wakelock */
+ down_read(&client->semaphore);
+
+ if (!check_client_has_available_vd_wakelock(client,
+ "GXP_MAILBOX_COMMAND")) {
+ ret = -ENODEV;
+ goto out_unlock_client_semaphore;
+ }
+
+ down_read(&gxp->vd_semaphore);
+
+ virt_core = ibuf.virtual_core_id;
+ phys_core = gxp_vd_virt_core_to_phys_core(client->vd, virt_core);
+ if (phys_core < 0) {
+ dev_err(gxp->dev,
+ "Mailbox command failed: Invalid virtual core id (%u)\n",
+ virt_core);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!gxp_is_fw_running(gxp, phys_core)) {
+ dev_err(gxp->dev,
+ "Cannot process mailbox command for core %d when firmware isn't running\n",
+ phys_core);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (gxp->mailbox_mgr->mailboxes[phys_core] == NULL) {
+ dev_err(gxp->dev, "Mailbox not initialized for core %d\n",
+ phys_core);
+ ret = -EIO;
+ goto out;
+ }
+
+ power_states.power = aur_state_array[ibuf.gxp_power_state];
+ power_states.memory = aur_memory_state_array[ibuf.memory_power_state];
+ power_states.low_clkmux = (ibuf.power_flags & GXP_POWER_LOW_FREQ_CLKMUX) != 0;
+
+ ret = gxp->mailbox_mgr->execute_cmd_async(
+ client, gxp->mailbox_mgr->mailboxes[phys_core], virt_core,
+ GXP_MBOX_CODE_DISPATCH, 0, ibuf.device_address, ibuf.size,
+ ibuf.flags, power_states, &ibuf.sequence_number);
+ if (ret) {
+ dev_err(gxp->dev, "Failed to enqueue mailbox command (ret=%d)\n",
+ ret);
+ goto out;
+ }
+
+ if (copy_to_user(argp, &ibuf, sizeof(ibuf))) {
+ dev_err(gxp->dev, "Failed to copy back sequence number!\n");
+ ret = -EFAULT;
+ goto out;
+ }
+
+out:
+ up_read(&gxp->vd_semaphore);
+out_unlock_client_semaphore:
+ up_read(&client->semaphore);
+
+ return ret;
+}
+
+static int gxp_mailbox_response(struct gxp_client *client,
+ struct gxp_mailbox_response_ioctl __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ struct gxp_mailbox_response_ioctl ibuf;
+ int virt_core;
+ int ret = 0;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ /* Caller must hold VIRTUAL_DEVICE wakelock */
+ down_read(&client->semaphore);
+
+ if (!check_client_has_available_vd_wakelock(client,
+ "GXP_MAILBOX_RESPONSE")) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ virt_core = ibuf.virtual_core_id;
+ if (virt_core >= client->vd->num_cores) {
+ dev_err(gxp->dev, "Mailbox response failed: Invalid virtual core id (%u)\n",
+ virt_core);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = gxp->mailbox_mgr->wait_async_resp(client, virt_core,
+ &ibuf.sequence_number, NULL,
+ &ibuf.cmd_retval,
+ &ibuf.error_code);
+ if (ret)
+ goto out;
+
+ if (copy_to_user(argp, &ibuf, sizeof(ibuf)))
+ ret = -EFAULT;
+
+out:
+ up_read(&client->semaphore);
+
+ return ret;
+}
+
+static int gxp_get_specs(struct gxp_client *client,
+ struct gxp_specs_ioctl __user *argp)
+{
+ struct buffer_data *logging_buff_data;
+ struct gxp_dev *gxp = client->gxp;
+ struct gxp_specs_ioctl ibuf = {
+ .core_count = GXP_NUM_CORES,
+ .features = !gxp_is_direct_mode(client->gxp),
+ .telemetry_buffer_size = 0,
+ .secure_telemetry_buffer_size =
+ (u8)(SECURE_CORE_TELEMETRY_BUFFER_SIZE /
+ GXP_CORE_TELEMETRY_BUFFER_UNIT_SIZE),
+ .memory_per_core = client->gxp->memory_per_core,
+ };
+
+ if (!IS_ERR_OR_NULL(gxp->core_telemetry_mgr)) {
+ logging_buff_data = gxp->core_telemetry_mgr->logging_buff_data;
+ if (!IS_ERR_OR_NULL(logging_buff_data)) {
+ ibuf.telemetry_buffer_size =
+ (u8)(logging_buff_data->size /
+ GXP_CORE_TELEMETRY_BUFFER_UNIT_SIZE);
+ }
+ }
+
+ if (copy_to_user(argp, &ibuf, sizeof(ibuf)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int gxp_allocate_vd(struct gxp_client *client,
+ struct gxp_virtual_device_ioctl __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ struct gxp_virtual_device_ioctl ibuf;
+ int ret = 0;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ if (ibuf.core_count == 0 || ibuf.core_count > GXP_NUM_CORES) {
+ dev_err(gxp->dev, "Invalid core count (%u)\n", ibuf.core_count);
+ return -EINVAL;
+ }
+
+ if (ibuf.memory_per_core > gxp->memory_per_core) {
+ dev_err(gxp->dev, "Invalid memory-per-core (%u)\n",
+ ibuf.memory_per_core);
+ return -EINVAL;
+ }
+
+ down_write(&client->semaphore);
+ ret = gxp_client_allocate_virtual_device(client, ibuf.core_count,
+ ibuf.flags);
+ up_write(&client->semaphore);
+
+ return ret;
+}
+
+static int
+gxp_etm_trace_start_command(struct gxp_client *client,
+ struct gxp_etm_trace_start_ioctl __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ struct gxp_etm_trace_start_ioctl ibuf;
+ int phys_core;
+ int ret = 0;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ ibuf.trace_ram_enable &= ETM_TRACE_LSB_MASK;
+ ibuf.atb_enable &= ETM_TRACE_LSB_MASK;
+
+ if (!ibuf.trace_ram_enable && !ibuf.atb_enable)
+ return -EINVAL;
+
+ if (!(ibuf.sync_msg_period == 0 ||
+ (ibuf.sync_msg_period <= ETM_TRACE_SYNC_MSG_PERIOD_MAX &&
+ ibuf.sync_msg_period >= ETM_TRACE_SYNC_MSG_PERIOD_MIN &&
+ is_power_of_2(ibuf.sync_msg_period))))
+ return -EINVAL;
+
+ if (ibuf.pc_match_mask_length > ETM_TRACE_PC_MATCH_MASK_LEN_MAX)
+ return -EINVAL;
+
+ /* Caller must hold VIRTUAL_DEVICE wakelock */
+ down_read(&client->semaphore);
+
+ if (!check_client_has_available_vd_wakelock(
+ client, "GXP_ETM_TRACE_START_COMMAND")) {
+ ret = -ENODEV;
+ goto out_unlock_client_semaphore;
+ }
+
+ down_read(&gxp->vd_semaphore);
+
+ phys_core =
+ gxp_vd_virt_core_to_phys_core(client->vd, ibuf.virtual_core_id);
+ if (phys_core < 0) {
+ dev_err(gxp->dev, "Trace start failed: Invalid virtual core id (%u)\n",
+ ibuf.virtual_core_id);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * TODO (b/185260919): Pass the etm trace configuration to system FW
+ * once communication channel between kernel and system FW is ready
+ * (b/185819530).
+ */
+
+out:
+ up_read(&gxp->vd_semaphore);
+out_unlock_client_semaphore:
+ up_read(&client->semaphore);
+
+ return ret;
+}
+
+static int gxp_etm_trace_sw_stop_command(struct gxp_client *client,
+ __u16 __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ u16 virtual_core_id;
+ int phys_core;
+ int ret = 0;
+
+ if (copy_from_user(&virtual_core_id, argp, sizeof(virtual_core_id)))
+ return -EFAULT;
+
+ /* Caller must hold VIRTUAL_DEVICE wakelock */
+ down_read(&client->semaphore);
+
+ if (!check_client_has_available_vd_wakelock(
+ client, "GXP_ETM_TRACE_SW_STOP_COMMAND")) {
+ ret = -ENODEV;
+ goto out_unlock_client_semaphore;
+ }
+
+ down_read(&gxp->vd_semaphore);
+
+ phys_core = gxp_vd_virt_core_to_phys_core(client->vd, virtual_core_id);
+ if (phys_core < 0) {
+ dev_err(gxp->dev, "Trace stop via software trigger failed: Invalid virtual core id (%u)\n",
+ virtual_core_id);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * TODO (b/185260919): Pass the etm stop signal to system FW once
+ * communication channel between kernel and system FW is ready
+ * (b/185819530).
+ */
+
+out:
+ up_read(&gxp->vd_semaphore);
+out_unlock_client_semaphore:
+ up_read(&client->semaphore);
+
+ return ret;
+}
+
+static int gxp_etm_trace_cleanup_command(struct gxp_client *client,
+ __u16 __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ u16 virtual_core_id;
+ int phys_core;
+ int ret = 0;
+
+ if (copy_from_user(&virtual_core_id, argp, sizeof(virtual_core_id)))
+ return -EFAULT;
+
+ /* Caller must hold VIRTUAL_DEVICE wakelock */
+ down_read(&client->semaphore);
+
+ if (!check_client_has_available_vd_wakelock(
+ client, "GXP_ETM_TRACE_CLEANUP_COMMAND")) {
+ ret = -ENODEV;
+ goto out_unlock_client_semaphore;
+ }
+
+ down_read(&gxp->vd_semaphore);
+
+ phys_core = gxp_vd_virt_core_to_phys_core(client->vd, virtual_core_id);
+ if (phys_core < 0) {
+ dev_err(gxp->dev, "Trace cleanup failed: Invalid virtual core id (%u)\n",
+ virtual_core_id);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * TODO (b/185260919): Pass the etm clean up signal to system FW once
+ * communication channel between kernel and system FW is ready
+ * (b/185819530).
+ */
+
+out:
+ up_read(&gxp->vd_semaphore);
+out_unlock_client_semaphore:
+ up_read(&client->semaphore);
+
+ return ret;
+}
+
+static int
+gxp_etm_get_trace_info_command(struct gxp_client *client,
+ struct gxp_etm_get_trace_info_ioctl __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ struct gxp_etm_get_trace_info_ioctl ibuf;
+ int phys_core;
+ u32 *trace_header;
+ u32 *trace_data;
+ int ret = 0;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ if (ibuf.type > 1)
+ return -EINVAL;
+
+ /* Caller must hold VIRTUAL_DEVICE wakelock */
+ down_read(&client->semaphore);
+
+ if (!check_client_has_available_vd_wakelock(
+ client, "GXP_ETM_GET_TRACE_INFO_COMMAND")) {
+ ret = -ENODEV;
+ goto out_unlock_client_semaphore;
+ }
+
+ down_read(&gxp->vd_semaphore);
+
+ phys_core = gxp_vd_virt_core_to_phys_core(client->vd, ibuf.virtual_core_id);
+ if (phys_core < 0) {
+ dev_err(gxp->dev, "Get trace info failed: Invalid virtual core id (%u)\n",
+ ibuf.virtual_core_id);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ trace_header = kzalloc(GXP_TRACE_HEADER_SIZE, GFP_KERNEL);
+ if (!trace_header) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ trace_data = kzalloc(GXP_TRACE_RAM_SIZE, GFP_KERNEL);
+ if (!trace_data) {
+ ret = -ENOMEM;
+ goto out_free_header;
+ }
+
+ /*
+ * TODO (b/185260919): Get trace information from system FW once
+ * communication channel between kernel and system FW is ready
+ * (b/185819530).
+ */
+
+ if (copy_to_user((void __user *)ibuf.trace_header_addr, trace_header,
+ GXP_TRACE_HEADER_SIZE)) {
+ ret = -EFAULT;
+ goto out_free_data;
+ }
+
+ if (ibuf.type == 1) {
+ if (copy_to_user((void __user *)ibuf.trace_data_addr,
+ trace_data, GXP_TRACE_RAM_SIZE)) {
+ ret = -EFAULT;
+ goto out_free_data;
+ }
+ }
+
+out_free_data:
+ kfree(trace_data);
+out_free_header:
+ kfree(trace_header);
+
+out:
+ up_read(&gxp->vd_semaphore);
+out_unlock_client_semaphore:
+ up_read(&client->semaphore);
+
+ return ret;
+}
+
+static int gxp_enable_core_telemetry(struct gxp_client *client,
+ __u8 __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ __u8 type;
+ int ret;
+
+ if (copy_from_user(&type, argp, sizeof(type)))
+ return -EFAULT;
+
+ if (type != GXP_TELEMETRY_TYPE_LOGGING &&
+ type != GXP_TELEMETRY_TYPE_TRACING)
+ return -EINVAL;
+
+ ret = gxp_core_telemetry_enable(gxp, type);
+
+ /*
+ * Record what core telemetry types this client enabled so they can be
+ * cleaned-up if the client closes without disabling them.
+ */
+ if (!ret && type == GXP_TELEMETRY_TYPE_LOGGING)
+ client->enabled_core_telemetry_logging = true;
+ if (!ret && type == GXP_TELEMETRY_TYPE_TRACING)
+ client->enabled_core_telemetry_tracing = true;
+
+ return ret;
+}
+
+static int gxp_disable_core_telemetry(struct gxp_client *client,
+ __u8 __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ __u8 type;
+ int ret;
+
+ if (copy_from_user(&type, argp, sizeof(type)))
+ return -EFAULT;
+
+ if (type != GXP_TELEMETRY_TYPE_LOGGING &&
+ type != GXP_TELEMETRY_TYPE_TRACING)
+ return -EINVAL;
+
+ ret = gxp_core_telemetry_disable(gxp, type);
+
+ if (!ret && type == GXP_TELEMETRY_TYPE_LOGGING)
+ client->enabled_core_telemetry_logging = false;
+ if (!ret && type == GXP_TELEMETRY_TYPE_TRACING)
+ client->enabled_core_telemetry_tracing = false;
+
+ return ret;
+}
+
+#ifdef HAS_TPU_EXT
+
+/*
+ * Map TPU mailboxes to IOVA.
+ * This function will be called only when the device is in the direct mode.
+ */
+static int map_tpu_mbx_queue(struct gxp_client *client,
+ struct gxp_tpu_mbx_queue_ioctl *ibuf)
+{
+ struct gxp_dev *gxp = client->gxp;
+ struct edgetpu_ext_mailbox_info *mbx_info;
+ struct edgetpu_ext_client_info gxp_tpu_info;
+ u32 phys_core_list = 0;
+ u32 core_count;
+ int ret = 0;
+
+ down_read(&gxp->vd_semaphore);
+
+ core_count = client->vd->num_cores;
+ phys_core_list = gxp_vd_phys_core_list(client->vd);
+
+ mbx_info = kmalloc(
+ sizeof(struct edgetpu_ext_mailbox_info) +
+ core_count *
+ sizeof(struct edgetpu_ext_mailbox_descriptor),
+ GFP_KERNEL);
+ if (!mbx_info) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * TODO(b/249440369): Pass @client->tpu_file file pointer. For the backward compatibility,
+ * keep sending @ibuf->tpu_fd here.
+ */
+ gxp_tpu_info.tpu_fd = ibuf->tpu_fd;
+ gxp_tpu_info.mbox_map = phys_core_list;
+ gxp_tpu_info.attr =
+ (struct edgetpu_mailbox_attr __user *)ibuf->attr_ptr;
+ ret = edgetpu_ext_driver_cmd(gxp->tpu_dev.dev,
+ EDGETPU_EXTERNAL_CLIENT_TYPE_DSP,
+ ALLOCATE_EXTERNAL_MAILBOX, &gxp_tpu_info,
+ mbx_info);
+ if (ret) {
+ dev_err(gxp->dev, "Failed to allocate ext TPU mailboxes %d",
+ ret);
+ goto out_free;
+ }
+
+ /* Align queue size to page size for iommu map. */
+ mbx_info->cmdq_size = ALIGN(mbx_info->cmdq_size, PAGE_SIZE);
+ mbx_info->respq_size = ALIGN(mbx_info->respq_size, PAGE_SIZE);
+
+ ret = gxp_dma_map_tpu_buffer(gxp, client->vd->domain, phys_core_list,
+ mbx_info);
+ if (ret) {
+ dev_err(gxp->dev, "Failed to map TPU mailbox buffer %d", ret);
+ goto err_free_tpu_mbx;
+ }
+ client->mbx_desc.phys_core_list = phys_core_list;
+ client->mbx_desc.cmdq_size = mbx_info->cmdq_size;
+ client->mbx_desc.respq_size = mbx_info->respq_size;
+
+ goto out_free;
+
+err_free_tpu_mbx:
+ edgetpu_ext_driver_cmd(gxp->tpu_dev.dev,
+ EDGETPU_EXTERNAL_CLIENT_TYPE_DSP,
+ FREE_EXTERNAL_MAILBOX, &gxp_tpu_info, NULL);
+out_free:
+ kfree(mbx_info);
+out:
+ up_read(&gxp->vd_semaphore);
+
+ return ret;
+}
+
+/*
+ * Unmap TPU mailboxes from IOVA.
+ * This function will be called only when the device is in the direct mode.
+ */
+static void unmap_tpu_mbx_queue(struct gxp_client *client,
+ struct gxp_tpu_mbx_queue_ioctl *ibuf)
+{
+ struct gxp_dev *gxp = client->gxp;
+ struct edgetpu_ext_client_info gxp_tpu_info;
+
+ gxp_dma_unmap_tpu_buffer(gxp, client->vd->domain, client->mbx_desc);
+ gxp_tpu_info.tpu_fd = ibuf->tpu_fd;
+ edgetpu_ext_driver_cmd(gxp->tpu_dev.dev,
+ EDGETPU_EXTERNAL_CLIENT_TYPE_DSP,
+ FREE_EXTERNAL_MAILBOX, &gxp_tpu_info, NULL);
+}
+
+static int gxp_map_tpu_mbx_queue(struct gxp_client *client,
+ struct gxp_tpu_mbx_queue_ioctl __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ struct gxp_tpu_mbx_queue_ioctl ibuf;
+ int ret = 0;
+
+ if (!gxp->tpu_dev.mbx_paddr) {
+ dev_err(gxp->dev, "%s: TPU is not available for interop\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ down_write(&client->semaphore);
+
+ if (!gxp_client_has_available_vd(client, "GXP_MAP_TPU_MBX_QUEUE")) {
+ ret = -ENODEV;
+ goto out_unlock_client_semaphore;
+ }
+
+ if (client->tpu_file) {
+ dev_err(gxp->dev, "Mapping/linking TPU mailbox information already exists");
+ ret = -EBUSY;
+ goto out_unlock_client_semaphore;
+ }
+
+ /*
+ * If someone is attacking us through this interface -
+ * it's possible that ibuf.tpu_fd here is already a different file from the one passed to
+ * edgetpu_ext_driver_cmd() (if the runtime closes the FD and opens another file exactly
+ * between the TPU driver call above and the fget below).
+ *
+ * However, from Zuma, we pass the file pointer directly to the TPU KD and it will check
+ * whether that file is true TPU device file or not. Therefore, our code is safe from the
+ * fd swapping attack.
+ */
+ client->tpu_file = fget(ibuf.tpu_fd);
+ if (!client->tpu_file) {
+ ret = -EINVAL;
+ goto out_unlock_client_semaphore;
+ }
+
+ /* TODO(b/237624453): remove '|| 1' once the MCU supports DSP->TPU interop */
+ if (gxp_is_direct_mode(gxp) || 1) {
+ ret = map_tpu_mbx_queue(client, &ibuf);
+ if (ret)
+ goto err_fput_tpu_file;
+ }
+
+ if (gxp->after_map_tpu_mbx_queue) {
+ ret = gxp->after_map_tpu_mbx_queue(gxp, client);
+ if (ret)
+ goto err_unmap_tpu_mbx_queue;
+ }
+
+ goto out_unlock_client_semaphore;
+
+err_unmap_tpu_mbx_queue:
+ unmap_tpu_mbx_queue(client, &ibuf);
+err_fput_tpu_file:
+ fput(client->tpu_file);
+ client->tpu_file = NULL;
+out_unlock_client_semaphore:
+ up_write(&client->semaphore);
+
+ return ret;
+}
+
+static int gxp_unmap_tpu_mbx_queue(struct gxp_client *client,
+ struct gxp_tpu_mbx_queue_ioctl __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ struct gxp_tpu_mbx_queue_ioctl ibuf;
+ int ret = 0;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ down_write(&client->semaphore);
+
+ if (!client->vd) {
+ dev_err(gxp->dev,
+ "GXP_UNMAP_TPU_MBX_QUEUE requires the client allocate a VIRTUAL_DEVICE\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (!client->tpu_file) {
+ dev_err(gxp->dev, "No mappings exist for TPU mailboxes");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (gxp->before_unmap_tpu_mbx_queue)
+ gxp->before_unmap_tpu_mbx_queue(gxp, client);
+
+ /* TODO(b/237624453): remove '|| 1' once the MCU supports DSP->TPU interop */
+ if (gxp_is_direct_mode(gxp) || 1)
+ unmap_tpu_mbx_queue(client, &ibuf);
+
+ fput(client->tpu_file);
+ client->tpu_file = NULL;
+
+out:
+ up_write(&client->semaphore);
+
+ return ret;
+}
+
+#else /* HAS_TPU_EXT */
+
+#define gxp_map_tpu_mbx_queue(...) (-ENODEV)
+#define gxp_unmap_tpu_mbx_queue(...) (-ENODEV)
+
+#endif /* HAS_TPU_EXT */
+
+static int gxp_register_core_telemetry_eventfd(
+ struct gxp_client *client,
+ struct gxp_register_telemetry_eventfd_ioctl __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ struct gxp_register_telemetry_eventfd_ioctl ibuf;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ return gxp_core_telemetry_register_eventfd(gxp, ibuf.type,
+ ibuf.eventfd);
+}
+
+static int gxp_unregister_core_telemetry_eventfd(
+ struct gxp_client *client,
+ struct gxp_register_telemetry_eventfd_ioctl __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ struct gxp_register_telemetry_eventfd_ioctl ibuf;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ return gxp_core_telemetry_unregister_eventfd(gxp, ibuf.type);
+}
+
+static int gxp_read_global_counter(struct gxp_client *client,
+ __u64 __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ u32 high_first, high_second, low;
+ u64 counter_val;
+ int ret = 0;
+
+ /* Caller must hold BLOCK wakelock */
+ down_read(&client->semaphore);
+
+ if (!client->has_block_wakelock) {
+ dev_err(gxp->dev,
+ "GXP_READ_GLOBAL_COUNTER requires the client hold a BLOCK wakelock\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ high_first = gxp_read_32(gxp, GXP_REG_GLOBAL_COUNTER_HIGH);
+ low = gxp_read_32(gxp, GXP_REG_GLOBAL_COUNTER_LOW);
+
+ /*
+ * Check if the lower 32 bits could have wrapped in-between reading
+ * the high and low bit registers by validating the higher 32 bits
+ * haven't changed.
+ */
+ high_second = gxp_read_32(gxp, GXP_REG_GLOBAL_COUNTER_HIGH);
+ if (high_first != high_second)
+ low = gxp_read_32(gxp, GXP_REG_GLOBAL_COUNTER_LOW);
+
+ counter_val = ((u64)high_second << 32) | low;
+
+ if (copy_to_user(argp, &counter_val, sizeof(counter_val)))
+ ret = -EFAULT;
+
+out:
+ up_read(&client->semaphore);
+
+ return ret;
+}
+
+static int gxp_acquire_wake_lock(struct gxp_client *client,
+ struct gxp_acquire_wakelock_ioctl __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ struct gxp_acquire_wakelock_ioctl ibuf;
+ bool acquired_block_wakelock = false;
+ bool requested_low_clkmux = false;
+ struct gxp_power_states power_states;
+ int ret = 0;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ if (ibuf.gxp_power_state == GXP_POWER_STATE_OFF) {
+ dev_err(gxp->dev,
+ "GXP_POWER_STATE_OFF is not a valid value when acquiring a wakelock\n");
+ return -EINVAL;
+ }
+ if (ibuf.gxp_power_state < GXP_POWER_STATE_OFF ||
+ ibuf.gxp_power_state >= GXP_NUM_POWER_STATES) {
+ dev_err(gxp->dev, "Requested power state is invalid\n");
+ return -EINVAL;
+ }
+ if ((ibuf.memory_power_state < MEMORY_POWER_STATE_MIN ||
+ ibuf.memory_power_state > MEMORY_POWER_STATE_MAX) &&
+ ibuf.memory_power_state != MEMORY_POWER_STATE_UNDEFINED) {
+ dev_err(gxp->dev,
+ "Requested memory power state %d is invalid\n",
+ ibuf.memory_power_state);
+ return -EINVAL;
+ }
+
+ if (ibuf.gxp_power_state == GXP_POWER_STATE_READY) {
+ dev_warn_once(
+ gxp->dev,
+ "GXP_POWER_STATE_READY is deprecated, please set GXP_POWER_LOW_FREQ_CLKMUX with GXP_POWER_STATE_UUD state");
+ ibuf.gxp_power_state = GXP_POWER_STATE_UUD;
+ }
+
+ if(ibuf.flags & GXP_POWER_NON_AGGRESSOR)
+ dev_warn_once(
+ gxp->dev,
+ "GXP_POWER_NON_AGGRESSOR is deprecated, no operation here");
+
+ down_write(&client->semaphore);
+ if ((ibuf.components_to_wake & WAKELOCK_VIRTUAL_DEVICE) &&
+ (!client->vd)) {
+ dev_err(gxp->dev,
+ "Must allocate a virtual device to acquire VIRTUAL_DEVICE wakelock\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ requested_low_clkmux = (ibuf.flags & GXP_POWER_LOW_FREQ_CLKMUX) != 0;
+
+ /* Acquire a BLOCK wakelock if requested */
+ if (ibuf.components_to_wake & WAKELOCK_BLOCK) {
+ ret = gxp_client_acquire_block_wakelock(
+ client, &acquired_block_wakelock);
+ if (ret) {
+ dev_err(gxp->dev,
+ "Failed to acquire BLOCK wakelock for client (ret=%d)\n",
+ ret);
+ goto out;
+ }
+ }
+
+ /* Acquire a VIRTUAL_DEVICE wakelock if requested */
+ if (ibuf.components_to_wake & WAKELOCK_VIRTUAL_DEVICE) {
+ power_states.power = aur_state_array[ibuf.gxp_power_state];
+ power_states.memory = aur_memory_state_array[ibuf.memory_power_state];
+ power_states.low_clkmux = requested_low_clkmux;
+ ret = gxp_client_acquire_vd_wakelock(client, power_states);
+ if (ret) {
+ dev_err(gxp->dev,
+ "Failed to acquire VIRTUAL_DEVICE wakelock for client (ret=%d)\n",
+ ret);
+ goto err_acquiring_vd_wl;
+ }
+ }
+
+out:
+ up_write(&client->semaphore);
+
+ return ret;
+
+err_acquiring_vd_wl:
+ /*
+ * In a single call, if any wakelock acquisition fails, all of them do.
+ * If the client was acquiring both wakelocks and failed to acquire the
+ * VIRTUAL_DEVICE wakelock after successfully acquiring the BLOCK
+ * wakelock, then release it before returning the error code.
+ */
+ if (acquired_block_wakelock)
+ gxp_client_release_block_wakelock(client);
+
+ up_write(&client->semaphore);
+
+ return ret;
+}
+
+static int gxp_release_wake_lock(struct gxp_client *client, __u32 __user *argp)
+{
+ u32 wakelock_components;
+ int ret = 0;
+
+ if (copy_from_user(&wakelock_components, argp,
+ sizeof(wakelock_components)))
+ return -EFAULT;
+
+ down_write(&client->semaphore);
+
+ if (wakelock_components & WAKELOCK_VIRTUAL_DEVICE)
+ gxp_client_release_vd_wakelock(client);
+
+ if (wakelock_components & WAKELOCK_BLOCK)
+ gxp_client_release_block_wakelock(client);
+
+ up_write(&client->semaphore);
+
+ return ret;
+}
+
+static int gxp_map_dmabuf(struct gxp_client *client,
+ struct gxp_map_dmabuf_ioctl __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ struct gxp_map_dmabuf_ioctl ibuf;
+ struct gxp_mapping *mapping;
+ int ret = 0;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ down_read(&client->semaphore);
+
+ if (!gxp_client_has_available_vd(client, "GXP_MAP_DMABUF")) {
+ ret = -ENODEV;
+ goto out_unlock;
+ }
+
+ mapping = gxp_dmabuf_map(gxp, client->vd->domain, ibuf.dmabuf_fd,
+ /*gxp_dma_flags=*/0,
+ mapping_flags_to_dma_dir(ibuf.flags));
+ if (IS_ERR(mapping)) {
+ ret = PTR_ERR(mapping);
+ dev_err(gxp->dev, "Failed to map dma-buf (ret=%d)\n", ret);
+ goto out_unlock;
+ }
+
+ ret = gxp_vd_mapping_store(client->vd, mapping);
+ if (ret) {
+ dev_err(gxp->dev,
+ "Failed to store mapping for dma-buf (ret=%d)\n", ret);
+ goto out_put;
+ }
+
+ ibuf.device_address = mapping->device_address;
+
+ if (copy_to_user(argp, &ibuf, sizeof(ibuf))) {
+ /* If the IOCTL fails, the dma-buf must be unmapped */
+ gxp_vd_mapping_remove(client->vd, mapping);
+ ret = -EFAULT;
+ }
+
+out_put:
+ /*
+ * Release the reference from creating the dmabuf mapping
+ * If the mapping was not successfully stored in the owning virtual
+ * device, this will unmap and cleanup the dmabuf.
+ */
+ gxp_mapping_put(mapping);
+
+out_unlock:
+ up_read(&client->semaphore);
+
+ return ret;
+}
+
+static int gxp_unmap_dmabuf(struct gxp_client *client,
+ struct gxp_map_dmabuf_ioctl __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ struct gxp_map_dmabuf_ioctl ibuf;
+ struct gxp_mapping *mapping;
+ int ret = 0;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ down_read(&client->semaphore);
+
+ if (!client->vd) {
+ dev_err(gxp->dev,
+ "GXP_UNMAP_DMABUF requires the client allocate a VIRTUAL_DEVICE\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /*
+ * Fetch and remove the internal mapping records.
+ * If host_address is not 0, the provided device_address belongs to a
+ * non-dma-buf mapping.
+ */
+ mapping = gxp_vd_mapping_search(client->vd, ibuf.device_address);
+ if (IS_ERR_OR_NULL(mapping) || mapping->host_address) {
+ dev_warn(gxp->dev, "No dma-buf mapped for given IOVA\n");
+ /*
+ * If the device address belongs to a non-dma-buf mapping,
+ * release the reference to it obtained via the search.
+ */
+ if (!IS_ERR_OR_NULL(mapping))
+ gxp_mapping_put(mapping);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Remove the mapping from its VD, releasing the VD's reference */
+ gxp_vd_mapping_remove(client->vd, mapping);
+
+ /* Release the reference from gxp_vd_mapping_search() */
+ gxp_mapping_put(mapping);
+
+out:
+ up_read(&client->semaphore);
+
+ return ret;
+}
+
+static int gxp_register_mailbox_eventfd(
+ struct gxp_client *client,
+ struct gxp_register_mailbox_eventfd_ioctl __user *argp)
+{
+ struct gxp_register_mailbox_eventfd_ioctl ibuf;
+ struct gxp_eventfd *eventfd;
+ int ret = 0;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ down_write(&client->semaphore);
+
+ if (!gxp_client_has_available_vd(client, "GXP_REGISTER_MAILBOX_EVENTFD")) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (ibuf.virtual_core_id >= client->vd->num_cores) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Make sure the provided eventfd is valid */
+ eventfd = gxp_eventfd_create(ibuf.eventfd);
+ if (IS_ERR(eventfd)) {
+ ret = PTR_ERR(eventfd);
+ goto out;
+ }
+
+ /* Set the new eventfd, replacing any existing one */
+ if (client->mb_eventfds[ibuf.virtual_core_id])
+ gxp_eventfd_put(client->mb_eventfds[ibuf.virtual_core_id]);
+
+ client->mb_eventfds[ibuf.virtual_core_id] = eventfd;
+
+out:
+ up_write(&client->semaphore);
+
+ return ret;
+}
+
+static int gxp_unregister_mailbox_eventfd(
+ struct gxp_client *client,
+ struct gxp_register_mailbox_eventfd_ioctl __user *argp)
+{
+ struct gxp_register_mailbox_eventfd_ioctl ibuf;
+ int ret = 0;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ down_write(&client->semaphore);
+
+ if (!client->vd) {
+ dev_err(client->gxp->dev,
+ "GXP_UNREGISTER_MAILBOX_EVENTFD requires the client allocate a VIRTUAL_DEVICE\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (ibuf.virtual_core_id >= client->vd->num_cores) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (client->mb_eventfds[ibuf.virtual_core_id])
+ gxp_eventfd_put(client->mb_eventfds[ibuf.virtual_core_id]);
+
+ client->mb_eventfds[ibuf.virtual_core_id] = NULL;
+
+out:
+ up_write(&client->semaphore);
+
+ return ret;
+}
+
+static int
+gxp_get_interface_version(struct gxp_client *client,
+ struct gxp_interface_version_ioctl __user *argp)
+{
+ struct gxp_interface_version_ioctl ibuf;
+ int ret;
+
+ ibuf.version_major = GXP_INTERFACE_VERSION_MAJOR;
+ ibuf.version_minor = GXP_INTERFACE_VERSION_MINOR;
+ memset(ibuf.version_build, 0, GXP_INTERFACE_VERSION_BUILD_BUFFER_SIZE);
+ ret = snprintf(ibuf.version_build,
+ GXP_INTERFACE_VERSION_BUILD_BUFFER_SIZE - 1,
+ GIT_REPO_TAG);
+
+ if (ret < 0 || ret >= GXP_INTERFACE_VERSION_BUILD_BUFFER_SIZE) {
+ dev_warn(
+ client->gxp->dev,
+ "Buffer size insufficient to hold GIT_REPO_TAG (size=%d)\n",
+ ret);
+ }
+
+ if (copy_to_user(argp, &ibuf, sizeof(ibuf)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int gxp_trigger_debug_dump(struct gxp_client *client,
+ __u32 __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ int phys_core, i;
+ u32 core_bits;
+ int ret = 0;
+
+ if (!uid_eq(current_euid(), GLOBAL_ROOT_UID))
+ return -EPERM;
+
+ if (!gxp_debug_dump_is_enabled()) {
+ dev_err(gxp->dev, "Debug dump functionality is disabled\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&core_bits, argp, sizeof(core_bits)))
+ return -EFAULT;
+
+ /* Caller must hold VIRTUAL_DEVICE wakelock */
+ down_read(&client->semaphore);
+
+ if (!check_client_has_available_vd_wakelock(client,
+ "GXP_TRIGGER_DEBUG_DUMP")) {
+ ret = -ENODEV;
+ goto out_unlock_client_semaphore;
+ }
+
+ down_read(&gxp->vd_semaphore);
+
+ for (i = 0; i < GXP_NUM_CORES; i++) {
+ if (!(core_bits & BIT(i)))
+ continue;
+ phys_core = gxp_vd_virt_core_to_phys_core(client->vd, i);
+ if (phys_core < 0) {
+ dev_err(gxp->dev,
+ "Trigger debug dump failed: Invalid virtual core id (%u)\n",
+ i);
+ ret = -EINVAL;
+ continue;
+ }
+
+ if (gxp_is_fw_running(gxp, phys_core)) {
+ gxp_notification_send(gxp, phys_core,
+ CORE_NOTIF_GENERATE_DEBUG_DUMP);
+ }
+ }
+
+ up_read(&gxp->vd_semaphore);
+out_unlock_client_semaphore:
+ up_read(&client->semaphore);
+
+ return ret;
+}
+
+static long gxp_ioctl(struct file *file, uint cmd, ulong arg)
+{
+ struct gxp_client *client = file->private_data;
+ void __user *argp = (void __user *)arg;
+ long ret;
+
+ if (client->gxp->handle_ioctl) {
+ ret = client->gxp->handle_ioctl(file, cmd, arg);
+ if (ret != -ENOTTY)
+ return ret;
+ }
+
+ switch (cmd) {
+ case GXP_MAP_BUFFER:
+ ret = gxp_map_buffer(client, argp);
+ break;
+ case GXP_UNMAP_BUFFER:
+ ret = gxp_unmap_buffer(client, argp);
+ break;
+ case GXP_SYNC_BUFFER:
+ ret = gxp_sync_buffer(client, argp);
+ break;
+ case GXP_MAILBOX_RESPONSE:
+ ret = gxp_mailbox_response(client, argp);
+ break;
+ case GXP_GET_SPECS:
+ ret = gxp_get_specs(client, argp);
+ break;
+ case GXP_ALLOCATE_VIRTUAL_DEVICE:
+ ret = gxp_allocate_vd(client, argp);
+ break;
+ case GXP_ETM_TRACE_START_COMMAND:
+ ret = gxp_etm_trace_start_command(client, argp);
+ break;
+ case GXP_ETM_TRACE_SW_STOP_COMMAND:
+ ret = gxp_etm_trace_sw_stop_command(client, argp);
+ break;
+ case GXP_ETM_TRACE_CLEANUP_COMMAND:
+ ret = gxp_etm_trace_cleanup_command(client, argp);
+ break;
+ case GXP_ETM_GET_TRACE_INFO_COMMAND:
+ ret = gxp_etm_get_trace_info_command(client, argp);
+ break;
+ case GXP_ENABLE_CORE_TELEMETRY:
+ ret = gxp_enable_core_telemetry(client, argp);
+ break;
+ case GXP_DISABLE_CORE_TELEMETRY:
+ ret = gxp_disable_core_telemetry(client, argp);
+ break;
+ case GXP_MAP_TPU_MBX_QUEUE:
+ ret = gxp_map_tpu_mbx_queue(client, argp);
+ break;
+ case GXP_UNMAP_TPU_MBX_QUEUE:
+ ret = gxp_unmap_tpu_mbx_queue(client, argp);
+ break;
+ case GXP_REGISTER_CORE_TELEMETRY_EVENTFD:
+ ret = gxp_register_core_telemetry_eventfd(client, argp);
+ break;
+ case GXP_UNREGISTER_CORE_TELEMETRY_EVENTFD:
+ ret = gxp_unregister_core_telemetry_eventfd(client, argp);
+ break;
+ case GXP_READ_GLOBAL_COUNTER:
+ ret = gxp_read_global_counter(client, argp);
+ break;
+ case GXP_RELEASE_WAKE_LOCK:
+ ret = gxp_release_wake_lock(client, argp);
+ break;
+ case GXP_MAP_DMABUF:
+ ret = gxp_map_dmabuf(client, argp);
+ break;
+ case GXP_UNMAP_DMABUF:
+ ret = gxp_unmap_dmabuf(client, argp);
+ break;
+ case GXP_MAILBOX_COMMAND:
+ ret = gxp_mailbox_command(client, argp);
+ break;
+ case GXP_REGISTER_MAILBOX_EVENTFD:
+ ret = gxp_register_mailbox_eventfd(client, argp);
+ break;
+ case GXP_UNREGISTER_MAILBOX_EVENTFD:
+ ret = gxp_unregister_mailbox_eventfd(client, argp);
+ break;
+ case GXP_ACQUIRE_WAKE_LOCK:
+ ret = gxp_acquire_wake_lock(client, argp);
+ break;
+ case GXP_GET_INTERFACE_VERSION:
+ ret = gxp_get_interface_version(client, argp);
+ break;
+ case GXP_TRIGGER_DEBUG_DUMP:
+ ret = gxp_trigger_debug_dump(client, argp);
+ break;
+ default:
+ ret = -ENOTTY; /* unknown command */
+ }
+
+ return ret;
+}
+
+static int gxp_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct gxp_client *client = file->private_data;
+ int ret;
+
+ if (!client)
+ return -ENODEV;
+
+ if (client->gxp->handle_mmap) {
+ ret = client->gxp->handle_mmap(file, vma);
+ if (ret != -EOPNOTSUPP)
+ return ret;
+ }
+
+ switch (vma->vm_pgoff << PAGE_SHIFT) {
+ case GXP_MMAP_CORE_LOG_BUFFER_OFFSET:
+ return gxp_core_telemetry_mmap_buffers(
+ client->gxp, GXP_TELEMETRY_TYPE_LOGGING, vma);
+ case GXP_MMAP_CORE_TRACE_BUFFER_OFFSET:
+ return gxp_core_telemetry_mmap_buffers(
+ client->gxp, GXP_TELEMETRY_TYPE_TRACING, vma);
+ case GXP_MMAP_CORE_LOG_BUFFER_OFFSET_LEGACY:
+ return gxp_core_telemetry_mmap_buffers_legacy(
+ client->gxp, GXP_TELEMETRY_TYPE_LOGGING, vma);
+ case GXP_MMAP_CORE_TRACE_BUFFER_OFFSET_LEGACY:
+ return gxp_core_telemetry_mmap_buffers_legacy(
+ client->gxp, GXP_TELEMETRY_TYPE_TRACING, vma);
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct file_operations gxp_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .mmap = gxp_mmap,
+ .open = gxp_open,
+ .release = gxp_release,
+ .unlocked_ioctl = gxp_ioctl,
+};
+
+static int gxp_set_reg_resources(struct platform_device *pdev, struct gxp_dev *gxp)
+{
+ struct device *dev = gxp->dev;
+ struct resource *r;
+ int i;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (IS_ERR_OR_NULL(r)) {
+ dev_err(dev, "Failed to get memory resource\n");
+ return -ENODEV;
+ }
+
+ gxp->regs.paddr = r->start;
+ gxp->regs.size = resource_size(r);
+ gxp->regs.vaddr = devm_ioremap_resource(dev, r);
+ if (IS_ERR_OR_NULL(gxp->regs.vaddr)) {
+ dev_err(dev, "Failed to map registers\n");
+ return -ENODEV;
+ }
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cmu");
+ if (!IS_ERR_OR_NULL(r)) {
+ gxp->cmu.paddr = r->start;
+ gxp->cmu.size = resource_size(r);
+ gxp->cmu.vaddr = devm_ioremap_resource(dev, r);
+ }
+ /*
+ * TODO (b/224685748): Remove this block after CMU CSR is supported
+ * in device tree config.
+ */
+#ifdef GXP_CMU_OFFSET
+ if (IS_ERR_OR_NULL(r) || IS_ERR_OR_NULL(gxp->cmu.vaddr)) {
+ gxp->cmu.paddr = gxp->regs.paddr - GXP_CMU_OFFSET;
+ gxp->cmu.size = GXP_CMU_SIZE;
+ gxp->cmu.vaddr =
+ devm_ioremap(dev, gxp->cmu.paddr, gxp->cmu.size);
+ if (IS_ERR_OR_NULL(gxp->cmu.vaddr))
+ dev_warn(dev, "Failed to map CMU registers\n");
+ }
+#endif
+
+#ifdef GXP_SEPARATE_LPM_OFFSET
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lpm");
+ if (IS_ERR_OR_NULL(r)) {
+ dev_err(dev, "Failed to get LPM resource\n");
+ return -ENODEV;
+ }
+ gxp->lpm_regs.paddr = r->start;
+ gxp->lpm_regs.size = resource_size(r);
+ gxp->lpm_regs.vaddr = devm_ioremap_resource(dev, r);
+ if (IS_ERR_OR_NULL(gxp->lpm_regs.vaddr)) {
+ dev_err(dev, "Failed to map LPM registers\n");
+ return -ENODEV;
+ }
+#else
+ gxp->lpm_regs.vaddr = gxp->regs.vaddr;
+ gxp->lpm_regs.size = gxp->regs.size;
+ gxp->lpm_regs.paddr = gxp->regs.paddr;
+#endif
+
+ for (i = 0; i < GXP_NUM_MAILBOXES; i++) {
+ r = platform_get_resource(pdev, IORESOURCE_MEM, i + 1);
+ if (IS_ERR_OR_NULL(r)) {
+ dev_err(dev, "Failed to get mailbox%d resource", i);
+ return -ENODEV;
+ }
+
+ gxp->mbx[i].paddr = r->start;
+ gxp->mbx[i].size = resource_size(r);
+ gxp->mbx[i].vaddr = devm_ioremap_resource(dev, r);
+ if (IS_ERR_OR_NULL(gxp->mbx[i].vaddr)) {
+ dev_err(dev, "Failed to map mailbox%d's register", i);
+ return -ENODEV;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Get TPU device from the device tree. Warnings are shown when any expected
+ * device tree entry is missing.
+ */
+static void gxp_get_tpu_dev(struct gxp_dev *gxp)
+{
+ struct device *dev = gxp->dev;
+ struct platform_device *tpu_pdev;
+ struct device_node *np;
+ phys_addr_t offset, base_addr;
+ int ret;
+
+ /* Get TPU device from device tree */
+ np = of_parse_phandle(dev->of_node, "tpu-device", 0);
+ if (IS_ERR_OR_NULL(np)) {
+ dev_warn(dev, "No tpu-device in device tree\n");
+ goto out_not_found;
+ }
+ tpu_pdev = of_find_device_by_node(np);
+ if (!tpu_pdev) {
+ dev_err(dev, "TPU device not found\n");
+ goto out_not_found;
+ }
+ /* get tpu mailbox register base */
+ ret = of_property_read_u64_index(np, "reg", 0, &base_addr);
+ of_node_put(np);
+ if (ret) {
+ dev_warn(dev, "Unable to get tpu-device base address\n");
+ goto out_not_found;
+ }
+ /* get gxp-tpu mailbox register offset */
+ ret = of_property_read_u64(dev->of_node, "gxp-tpu-mbx-offset", &offset);
+ if (ret) {
+ dev_warn(dev, "Unable to get tpu-device mailbox offset\n");
+ goto out_not_found;
+ }
+ gxp->tpu_dev.dev = get_device(&tpu_pdev->dev);
+ gxp->tpu_dev.mbx_paddr = base_addr + offset;
+ return;
+
+out_not_found:
+ dev_warn(dev, "TPU will not be available for interop\n");
+ gxp->tpu_dev.dev = NULL;
+ gxp->tpu_dev.mbx_paddr = 0;
+}
+
+static void gxp_put_tpu_dev(struct gxp_dev *gxp)
+{
+ /* put_device is no-op on !dev */
+ put_device(gxp->tpu_dev.dev);
+}
+
+/* Get GSA device from device tree. */
+static void gxp_get_gsa_dev(struct gxp_dev *gxp)
+{
+ struct device *dev = gxp->dev;
+ struct device_node *np;
+ struct platform_device *gsa_pdev;
+
+ gxp->gsa_dev = NULL;
+ np = of_parse_phandle(dev->of_node, "gsa-device", 0);
+ if (!np) {
+ dev_warn(
+ dev,
+ "No gsa-device in device tree. Firmware authentication not available\n");
+ return;
+ }
+ gsa_pdev = of_find_device_by_node(np);
+ if (!gsa_pdev) {
+ dev_err(dev, "GSA device not found\n");
+ of_node_put(np);
+ return;
+ }
+ gxp->gsa_dev = get_device(&gsa_pdev->dev);
+ of_node_put(np);
+ dev_info(dev, "GSA device found, Firmware authentication available\n");
+}
+
+static void gxp_put_gsa_dev(struct gxp_dev *gxp)
+{
+ put_device(gxp->gsa_dev);
+}
+
+static int gxp_common_platform_probe(struct platform_device *pdev, struct gxp_dev *gxp)
+{
+ struct device *dev = &pdev->dev;
+ int ret;
+ u64 prop;
+
+ dev_notice(dev, "Probing gxp driver with commit %s\n", GIT_REPO_TAG);
+
+ platform_set_drvdata(pdev, gxp);
+ gxp->dev = dev;
+ if (gxp->parse_dt) {
+ ret = gxp->parse_dt(pdev, gxp);
+ if (ret)
+ return ret;
+ }
+
+ ret = gxp_set_reg_resources(pdev, gxp);
+ if (ret)
+ return ret;
+
+ ret = gxp_wakelock_init(gxp);
+ if (ret) {
+ dev_err(dev, "failed to init wakelock: %d", ret);
+ return ret;
+ }
+
+ ret = gxp_pm_init(gxp);
+ if (ret) {
+ dev_err(dev, "Failed to init power management (ret=%d)\n", ret);
+ goto err_wakelock_destroy;
+ }
+
+ gxp_get_gsa_dev(gxp);
+ gxp_get_tpu_dev(gxp);
+
+ ret = gxp_dma_init(gxp);
+ if (ret) {
+ dev_err(dev, "Failed to initialize GXP DMA interface\n");
+ goto err_put_tpu_dev;
+ }
+
+ gxp->mailbox_mgr = gxp_mailbox_create_manager(gxp, GXP_NUM_MAILBOXES);
+ if (IS_ERR(gxp->mailbox_mgr)) {
+ ret = PTR_ERR(gxp->mailbox_mgr);
+ dev_err(dev, "Failed to create mailbox manager: %d\n", ret);
+ goto err_dma_exit;
+ }
+ if (gxp_is_direct_mode(gxp)) {
+#if GXP_USE_LEGACY_MAILBOX
+ gxp_mailbox_init(gxp->mailbox_mgr);
+#else
+ gxp_dci_init(gxp->mailbox_mgr);
+#endif
+ }
+
+#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
+ ret = gxp_debug_dump_init(gxp, &gxp_sscd_dev, &gxp_sscd_pdata);
+#else
+ ret = gxp_debug_dump_init(gxp, NULL, NULL);
+#endif // !CONFIG_SUBSYSTEM_COREDUMP
+ if (ret)
+ dev_warn(dev, "Failed to initialize debug dump\n");
+
+ mutex_init(&gxp->pin_user_pages_lock);
+ mutex_init(&gxp->secure_vd_lock);
+
+ gxp->domain_pool = kmalloc(sizeof(*gxp->domain_pool), GFP_KERNEL);
+ if (!gxp->domain_pool) {
+ ret = -ENOMEM;
+ goto err_debug_dump_exit;
+ }
+ ret = gxp_domain_pool_init(gxp, gxp->domain_pool,
+ GXP_NUM_PREALLOCATED_DOMAINS);
+ if (ret) {
+ dev_err(dev,
+ "Failed to initialize IOMMU domain pool (ret=%d)\n",
+ ret);
+ goto err_free_domain_pool;
+ }
+
+ ret = gxp_fw_init(gxp);
+ if (ret) {
+ dev_err(dev,
+ "Failed to initialize firmware manager (ret=%d)\n",
+ ret);
+ goto err_domain_pool_destroy;
+ }
+
+ gxp_dma_init_default_resources(gxp);
+ gxp_vd_init(gxp);
+
+ ret = of_property_read_u64(dev->of_node, "gxp-memory-per-core",
+ &prop);
+ if (ret) {
+ dev_err(dev, "Unable to get memory-per-core from device tree\n");
+ gxp->memory_per_core = 0;
+ } else {
+ gxp->memory_per_core = (u32)prop;
+ }
+
+ ret = gxp_fw_data_init(gxp);
+ if (ret) {
+ dev_err(dev, "Failed to initialize firmware data: %d\n", ret);
+ goto err_vd_destroy;
+ }
+
+ ret = gxp_core_telemetry_init(gxp);
+ if (ret) {
+ dev_err(dev, "Failed to initialize core telemetry (ret=%d)", ret);
+ goto err_fw_data_destroy;
+ }
+ gxp->thermal_mgr = gxp_thermal_init(gxp);
+ if (IS_ERR(gxp->thermal_mgr)) {
+ ret = PTR_ERR(gxp->thermal_mgr);
+ dev_warn(dev, "Failed to init thermal driver: %d\n", ret);
+ }
+
+ INIT_LIST_HEAD(&gxp->client_list);
+ mutex_init(&gxp->client_list_lock);
+ if (gxp->after_probe) {
+ ret = gxp->after_probe(gxp);
+ if (ret)
+ goto err_thermal_destroy;
+ }
+
+ gxp->misc_dev.minor = MISC_DYNAMIC_MINOR;
+ gxp->misc_dev.name = "gxp";
+ gxp->misc_dev.fops = &gxp_fops;
+ ret = misc_register(&gxp->misc_dev);
+ if (ret) {
+ dev_err(dev, "Failed to register misc device: %d", ret);
+ goto err_before_remove;
+ }
+
+ gxp_create_debugfs(gxp);
+ gxp_debug_pointer = gxp;
+
+ dev_info(dev, "Probe finished");
+ return 0;
+
+err_before_remove:
+ if (gxp->before_remove)
+ gxp->before_remove(gxp);
+err_thermal_destroy:
+ /* thermal init doesn't need revert */
+ gxp_core_telemetry_exit(gxp);
+err_fw_data_destroy:
+ gxp_fw_data_destroy(gxp);
+err_vd_destroy:
+ gxp_vd_destroy(gxp);
+ gxp_fw_destroy(gxp);
+err_domain_pool_destroy:
+ gxp_domain_pool_destroy(gxp->domain_pool);
+err_free_domain_pool:
+ kfree(gxp->domain_pool);
+err_debug_dump_exit:
+ gxp_debug_dump_exit(gxp);
+ /* mailbox manager init doesn't need revert */
+err_dma_exit:
+ gxp_dma_exit(gxp);
+err_put_tpu_dev:
+ gxp_put_tpu_dev(gxp);
+ gxp_put_gsa_dev(gxp);
+ gxp_pm_destroy(gxp);
+err_wakelock_destroy:
+ /* wakelock init doesn't need revert */
+ return ret;
+}
+
+static int gxp_common_platform_remove(struct platform_device *pdev)
+{
+ struct gxp_dev *gxp = platform_get_drvdata(pdev);
+
+ gxp_remove_debugfs(gxp);
+ misc_deregister(&gxp->misc_dev);
+ if (gxp->before_remove)
+ gxp->before_remove(gxp);
+ gxp_core_telemetry_exit(gxp);
+ gxp_fw_data_destroy(gxp);
+ gxp_vd_destroy(gxp);
+ gxp_fw_destroy(gxp);
+ gxp_domain_pool_destroy(gxp->domain_pool);
+ kfree(gxp->domain_pool);
+ gxp_debug_dump_exit(gxp);
+ gxp_dma_exit(gxp);
+ gxp_put_tpu_dev(gxp);
+ gxp_put_gsa_dev(gxp);
+ gxp_pm_destroy(gxp);
+
+ gxp_debug_pointer = NULL;
+
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_PM_SLEEP)
+
+static int gxp_platform_suspend(struct device *dev)
+{
+ struct gxp_dev *gxp = dev_get_drvdata(dev);
+
+ return gxp_wakelock_suspend(gxp);
+}
+
+static int gxp_platform_resume(struct device *dev)
+{
+ struct gxp_dev *gxp = dev_get_drvdata(dev);
+
+ return gxp_wakelock_resume(gxp);
+}
+
+static const struct dev_pm_ops gxp_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(gxp_platform_suspend, gxp_platform_resume)
+};
+
+#endif /* IS_ENABLED(CONFIG_PM_SLEEP) */
diff --git a/gxp-config.h b/gxp-config.h
index 154e767..69adb67 100644
--- a/gxp-config.h
+++ b/gxp-config.h
@@ -18,11 +18,6 @@
#endif /* unknown */
-#if IS_ENABLED(CONFIG_GXP_GEM5)
-#undef GXP_NUM_CORES
-#define GXP_NUM_CORES 1
-#endif
-
#define GXP_NUM_PREALLOCATED_DOMAINS GXP_NUM_CORES
#if defined(CONFIG_GXP_ZEBU) || defined(CONFIG_GXP_IP_ZEBU)
@@ -35,13 +30,16 @@
#define SYNC_BARRIER_COUNT 16
-/* Core address space starts at Inst_BPM block */
-#define GXP_CORE_0_BASE GXP_REG_CORE_0_INST_BPM
-#define GXP_CORE_SIZE (GXP_REG_CORE_1_INST_BPM - GXP_REG_CORE_0_INST_BPM)
+#ifndef GXP_USE_LEGACY_MAILBOX
+#define GXP_USE_LEGACY_MAILBOX 0
+#endif
+
+#ifndef GXP_HAS_LAP
+#define GXP_HAS_LAP 1
+#endif
-/* LPM address space starts at lpm_version register */
-#define GXP_LPM_BASE GXP_REG_LPM_VERSION
-#define GXP_LPM_PSM_0_BASE GXP_REG_LPM_PSM_0
-#define GXP_LPM_PSM_SIZE (GXP_REG_LPM_PSM_1 - GXP_REG_LPM_PSM_0)
+#ifndef GXP_HAS_MCU
+#define GXP_HAS_MCU 1
+#endif
#endif /* __GXP_CONFIG_H__ */
diff --git a/gxp-core-telemetry.c b/gxp-core-telemetry.c
new file mode 100644
index 0000000..bce27c6
--- /dev/null
+++ b/gxp-core-telemetry.c
@@ -0,0 +1,935 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * GXP core telemetry support
+ *
+ * Copyright (C) 2021-2022 Google LLC
+ */
+
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+
+#include "gxp-config.h"
+#include "gxp-core-telemetry.h"
+#include "gxp-dma.h"
+#include "gxp-firmware.h"
+#include "gxp-firmware-data.h"
+#include "gxp-host-device-structs.h"
+#include "gxp-notification.h"
+#include "gxp-vd.h"
+
+static uint gxp_core_telemetry_buffer_size = CORE_TELEMETRY_DEFAULT_BUFFER_SIZE;
+module_param_named(core_telemetry_buffer_size, gxp_core_telemetry_buffer_size, uint, 0660);
+
+static inline bool is_telemetry_enabled(struct gxp_dev *gxp, uint core, u8 type)
+{
+ u32 device_status =
+ gxp_fw_data_get_core_telemetry_device_status(gxp, core, type);
+
+ return device_status & GXP_CORE_TELEMETRY_DEVICE_STATUS_ENABLED;
+}
+
+void gxp_core_telemetry_status_notify(struct gxp_dev *gxp, uint core)
+{
+ struct gxp_core_telemetry_manager *mgr = gxp->core_telemetry_mgr;
+
+ /* Wake any threads waiting on a core telemetry disable ACK */
+ wake_up(&mgr->waitq);
+
+ /* Signal the appropriate eventfd for any active core telemetry types */
+ mutex_lock(&mgr->lock);
+
+ if (is_telemetry_enabled(gxp, core, GXP_TELEMETRY_TYPE_LOGGING) &&
+ mgr->logging_efd)
+ eventfd_signal(mgr->logging_efd, 1);
+
+ if (is_telemetry_enabled(gxp, core, GXP_TELEMETRY_TYPE_TRACING) &&
+ mgr->tracing_efd)
+ eventfd_signal(mgr->tracing_efd, 1);
+
+ mutex_unlock(&mgr->lock);
+}
+
+static void telemetry_status_notification_work(struct work_struct *work)
+{
+ struct gxp_core_telemetry_work *telem_work =
+ container_of(work, struct gxp_core_telemetry_work, work);
+ struct gxp_dev *gxp = telem_work->gxp;
+ uint core = telem_work->core;
+
+ gxp_core_telemetry_status_notify(gxp, core);
+}
+
+static struct buffer_data *allocate_telemetry_buffers(struct gxp_dev *gxp,
+ size_t size);
+static void free_telemetry_buffers(struct gxp_dev *gxp, struct buffer_data *data);
+
+/**
+ * enable_telemetry_buffers() - enable the telemetry buffers from host.
+ *
+ * @gxp: The GXP device the buffers were allocated for.
+ * @data: The data describing a set of core telemetry buffers to be enabled.
+ * @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`.
+ *
+ * Return:
+ * * 0 - Success
+ * * otherwise - Error returned by `gxp_fw_data_set_core_telemetry_descriptors()`
+ */
+static int enable_telemetry_buffers(struct gxp_dev *gxp,
+ struct buffer_data *data, u8 type)
+{
+ int i, ret;
+
+ /* Initialize the per core telemetry buffers header with magic code. */
+ for (i = 0; i < GXP_NUM_CORES; i++) {
+ /*
+ * First 64 bytes of per core telemetry buffers are reserved
+ * for buffer metadata header. We don't need to explicitly
+ * reset the header fields as during buffer allocation the
+ * entire buffer is zeroed out. First 4 bytes of buffer
+ * metadata header are reserved for valid_magic field.
+ */
+ *((uint *)data->buffers[i].vaddr) =
+ GXP_TELEMETRY_BUFFER_VALID_MAGIC_CODE;
+ }
+
+ data->host_status |= GXP_CORE_TELEMETRY_HOST_STATUS_ENABLED;
+ ret = gxp_fw_data_set_core_telemetry_descriptors(
+ gxp, type, data->host_status, data->buffers, data->size);
+
+ if (ret) {
+ dev_err(gxp->dev,
+ "setting telemetry buffers in scratchpad region failed (ret=%d).",
+ ret);
+ return ret;
+ }
+
+ data->is_enabled = true;
+ return 0;
+}
+
+int gxp_core_telemetry_init(struct gxp_dev *gxp)
+{
+ struct gxp_core_telemetry_manager *mgr;
+ struct buffer_data *log_buff_data, *trace_buff_data;
+ int i, ret;
+
+ mgr = devm_kzalloc(gxp->dev, sizeof(*mgr), GFP_KERNEL);
+ if (!mgr)
+ return -ENOMEM;
+
+ mutex_init(&mgr->lock);
+ for (i = 0; i < GXP_NUM_CORES; i++) {
+ INIT_WORK(&mgr->notification_works[i].work,
+ telemetry_status_notification_work);
+ mgr->notification_works[i].gxp = gxp;
+ mgr->notification_works[i].core = i;
+
+ }
+ init_waitqueue_head(&mgr->waitq);
+
+ gxp->core_telemetry_mgr = mgr;
+ gxp_core_telemetry_buffer_size = ALIGN(gxp_core_telemetry_buffer_size,
+ GXP_CORE_TELEMETRY_BUFFER_UNIT_SIZE);
+ if ((gxp_core_telemetry_buffer_size < CORE_TELEMETRY_DEFAULT_BUFFER_SIZE) ||
+ (gxp_core_telemetry_buffer_size > CORE_TELEMETRY_MAX_BUFFER_SIZE)) {
+ dev_warn(gxp->dev,
+ "Invalid core telemetry buffer size, enforcing to default %u bytes\n",
+ CORE_TELEMETRY_DEFAULT_BUFFER_SIZE);
+ gxp_core_telemetry_buffer_size = CORE_TELEMETRY_DEFAULT_BUFFER_SIZE;
+ }
+
+ /* TODO(b/260959553): Remove mutex_lock/unlock during legacy telemetry removal */
+ mutex_lock(&mgr->lock);
+ log_buff_data = allocate_telemetry_buffers(gxp, gxp_core_telemetry_buffer_size);
+ if (IS_ERR_OR_NULL(log_buff_data)) {
+ dev_warn(gxp->dev,
+ "Failed to allocate per core log buffer of %u bytes\n",
+ gxp_core_telemetry_buffer_size);
+ ret = -ENOMEM;
+ goto err_free_buffers;
+ }
+
+ trace_buff_data = allocate_telemetry_buffers(gxp, gxp_core_telemetry_buffer_size);
+ if (IS_ERR_OR_NULL(trace_buff_data)) {
+ dev_warn(gxp->dev,
+ "Failed to allocate per core trace buffer of %u bytes\n",
+ gxp_core_telemetry_buffer_size);
+ free_telemetry_buffers(gxp, log_buff_data);
+ ret = -ENOMEM;
+ goto err_free_buffers;
+ }
+
+ ret = enable_telemetry_buffers(gxp, log_buff_data,
+ GXP_TELEMETRY_TYPE_LOGGING);
+ if (ret) {
+ dev_warn(gxp->dev, "enable telemetry buffer failed (ret=%d)",
+ ret);
+ goto err_free;
+ }
+ ret = enable_telemetry_buffers(gxp, trace_buff_data,
+ GXP_TELEMETRY_TYPE_TRACING);
+ if (ret) {
+ dev_warn(gxp->dev, "enable telemetry buffer failed (ret=%d)",
+ ret);
+ goto err_free;
+ }
+
+ gxp->core_telemetry_mgr->logging_buff_data = log_buff_data;
+ gxp->core_telemetry_mgr->tracing_buff_data = trace_buff_data;
+ mutex_unlock(&mgr->lock);
+ return 0;
+
+err_free:
+ free_telemetry_buffers(gxp, log_buff_data);
+ free_telemetry_buffers(gxp, trace_buff_data);
+err_free_buffers:
+ mutex_unlock(&mgr->lock);
+ mutex_destroy(&mgr->lock);
+ devm_kfree(gxp->dev, mgr);
+ gxp->core_telemetry_mgr = NULL;
+ return ret;
+}
+
+/* Wrapper struct to be used by the core telemetry vma_ops. */
+struct telemetry_vma_data {
+ struct gxp_dev *gxp;
+ struct buffer_data *buff_data;
+ u8 type;
+ refcount_t ref_count;
+};
+
+static void telemetry_vma_open(struct vm_area_struct *vma)
+{
+ struct gxp_dev *gxp;
+ struct telemetry_vma_data *vma_data =
+ (struct telemetry_vma_data *)vma->vm_private_data;
+ /*
+ * vma_ops are required only for legacy telemetry flow
+ * to keep track of buffer allocation during mmap and
+ * buffer free during munmap.
+ */
+ if (IS_ERR_OR_NULL(vma_data))
+ return;
+
+ gxp = vma_data->gxp;
+ mutex_lock(&gxp->core_telemetry_mgr->lock);
+
+ refcount_inc(&vma_data->ref_count);
+
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
+}
+
+static void telemetry_vma_close(struct vm_area_struct *vma)
+{
+ struct gxp_dev *gxp;
+ struct buffer_data *buff_data;
+ u8 type;
+ struct telemetry_vma_data *vma_data =
+ (struct telemetry_vma_data *)vma->vm_private_data;
+ /*
+ * vma_ops are required only for legacy telemetry flow
+ * to keep track of buffer allocation during mmap and
+ * buffer free during munmap.
+ */
+ if (IS_ERR_OR_NULL(vma_data))
+ return;
+
+ gxp = vma_data->gxp;
+ buff_data = vma_data->buff_data;
+ type = vma_data->type;
+
+ mutex_lock(&gxp->core_telemetry_mgr->lock);
+
+ if (!refcount_dec_and_test(&vma_data->ref_count))
+ goto out;
+
+ /*
+ * Free the core telemetry buffers if they are no longer in use.
+ *
+ * If a client enabled core telemetry, then closed their VMA without
+ * disabling it, firmware will still be expecting those buffers to be
+ * mapped. If this is the case, core telemetry will be disabled, and the
+ * buffers freed, when the client is closed.
+ *
+ * We cannot disable core telemetry here, since attempting to lock the
+ * `vd_semaphore` while holding the mmap lock can lead to deadlocks.
+ */
+ if (refcount_dec_and_test(&buff_data->ref_count)) {
+ switch (type) {
+ case GXP_TELEMETRY_TYPE_LOGGING:
+ gxp->core_telemetry_mgr->logging_buff_data_legacy = NULL;
+ break;
+ case GXP_TELEMETRY_TYPE_TRACING:
+ gxp->core_telemetry_mgr->tracing_buff_data_legacy = NULL;
+ break;
+ default:
+ dev_warn(gxp->dev, "%s called with invalid type %u\n",
+ __func__, type);
+ }
+ free_telemetry_buffers(gxp, buff_data);
+ }
+
+ kfree(vma_data);
+
+out:
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
+}
+
+/* TODO(b/260959553): Remove vma ops during legacy telemetry removal */
+static const struct vm_operations_struct telemetry_vma_ops = {
+ .open = telemetry_vma_open,
+ .close = telemetry_vma_close,
+};
+
+/**
+ * check_telemetry_type_availability() - Checks if @type is valid and whether
+ * buffers of that type already exists.
+ * @gxp: The GXP device to check availability for
+ * @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
+ *
+ * Caller must hold the core telemetry_manager's lock.
+ *
+ * Return:
+ * * 0 - @type is valid and can have new buffers created
+ * * -EBUSY - Buffers already exist for @type
+ * * -EINVAL - @type is not a valid core telemetry type
+ */
+static int check_telemetry_type_availability(struct gxp_dev *gxp, u8 type)
+{
+ lockdep_assert_held(&gxp->core_telemetry_mgr->lock);
+
+ switch (type) {
+ case GXP_TELEMETRY_TYPE_LOGGING:
+ if (gxp->core_telemetry_mgr->logging_buff_data_legacy)
+ return -EBUSY;
+ break;
+ case GXP_TELEMETRY_TYPE_TRACING:
+ if (gxp->core_telemetry_mgr->tracing_buff_data_legacy)
+ return -EBUSY;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * allocate_telemetry_buffers() - Allocate and populate a `struct buffer_data`,
+ * including allocating and mapping one coherent
+ * buffer of @size bytes per core.
+ * @gxp: The GXP device to allocate the buffers for
+ * @size: The size of buffer to allocate for each core
+ *
+ * Caller must hold the core telemetry_manager's lock.
+ *
+ * Return: A pointer to the `struct buffer_data` if successful, error otherwise
+ */
+static struct buffer_data *allocate_telemetry_buffers(struct gxp_dev *gxp,
+ size_t size)
+{
+ struct buffer_data *data;
+ int i;
+ int ret = 0;
+
+ size = size < PAGE_SIZE ? PAGE_SIZE : size;
+
+ /* TODO(b/260959553): Remove lockdep_assert_held during legacy telemetry removal */
+ lockdep_assert_held(&gxp->core_telemetry_mgr->lock);
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return NULL;
+
+ /* Allocate cache-coherent buffers for logging/tracing to */
+ for (i = 0; i < GXP_NUM_CORES; i++) {
+ /* Allocate a coherent buffer in the default domain */
+ ret = gxp_dma_alloc_coherent_buf(gxp, NULL, size, GFP_KERNEL, 0,
+ &data->buffers[i]);
+ if (ret) {
+ dev_err(gxp->dev,
+ "Failed to allocate coherent buffer\n");
+ goto err_alloc;
+ }
+ }
+ data->size = size;
+ refcount_set(&data->ref_count, 1);
+ data->is_enabled = false;
+
+ return data;
+
+err_alloc:
+ while (i--)
+ gxp_dma_free_coherent_buf(gxp, NULL, &data->buffers[i]);
+ kfree(data);
+
+ return ERR_PTR(ret);
+}
+
+/**
+ * free_telemetry_buffers() - Unmap and free a `struct buffer_data`
+ * @gxp: The GXP device the buffers were allocated for
+ * @data: The descriptor of the buffers to unmap and free
+ *
+ * Caller must hold the core telemetry_manager's lock.
+ */
+static void free_telemetry_buffers(struct gxp_dev *gxp, struct buffer_data *data)
+{
+ int i;
+
+ /* TODO(b/260959553): Remove lockdep_assert_held during legacy telemetry removal */
+ lockdep_assert_held(&gxp->core_telemetry_mgr->lock);
+
+ for (i = 0; i < GXP_NUM_CORES; i++)
+ gxp_dma_free_coherent_buf(gxp, NULL, &data->buffers[i]);
+
+ kfree(data);
+}
+
+/**
+ * remap_telemetry_buffers() - Remaps a set of core telemetry buffers into a
+ * user-space vm_area.
+ * @gxp: The GXP device the buffers were allocated for
+ * @vma: A vm area to remap the buffers into
+ * @buff_data: The data describing a set of core telemetry buffers to remap
+ *
+ * Caller must hold the core telemetry_manager's lock.
+ *
+ * Return:
+ * * 0 - Success
+ * * otherwise - Error returned by `remap_pfn_range()`
+ */
+static int remap_telemetry_buffers(struct gxp_dev *gxp,
+ struct vm_area_struct *vma,
+ struct buffer_data *buff_data)
+{
+ unsigned long orig_pgoff = vma->vm_pgoff;
+ int i;
+ unsigned long offset;
+ phys_addr_t phys;
+ int ret = 0;
+
+ lockdep_assert_held(&gxp->core_telemetry_mgr->lock);
+
+ /* mmap the buffers */
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_pgoff = 0;
+
+ for (i = 0; i < GXP_NUM_CORES; i++) {
+ /*
+ * Remap each core's buffer a page at a time, in case it is not
+ * physically contiguous.
+ */
+ for (offset = 0; offset < buff_data->size; offset += PAGE_SIZE) {
+ /*
+ * `virt_to_phys()` does not work on memory allocated
+ * by `dma_alloc_coherent()`, so we have to use
+ * `iommu_iova_to_phys()` instead. Since all buffers
+ * are mapped to the default domain as well as any per-
+ * core domains, we can use it here to get the physical
+ * address of any valid IOVA, regardless of its core.
+ */
+ phys = iommu_iova_to_phys(
+ iommu_get_domain_for_dev(gxp->dev),
+ buff_data->buffers[i].dma_addr + offset);
+ ret = remap_pfn_range(
+ vma,
+ vma->vm_start + buff_data->size * i + offset,
+ phys >> PAGE_SHIFT, PAGE_SIZE,
+ vma->vm_page_prot);
+ if (ret)
+ goto out;
+ }
+ }
+
+out:
+ vma->vm_pgoff = orig_pgoff;
+ /* TODO(b/260959553): Remove vma ops during legacy telemetry removal */
+ vma->vm_ops = &telemetry_vma_ops;
+
+ return ret;
+}
+
+int gxp_core_telemetry_mmap_buffers(struct gxp_dev *gxp, u8 type,
+ struct vm_area_struct *vma)
+{
+ int ret = 0;
+ struct buffer_data *buff_data;
+ size_t total_size = vma->vm_end - vma->vm_start;
+ size_t size = total_size / GXP_NUM_CORES;
+
+ if (!gxp->core_telemetry_mgr)
+ return -ENODEV;
+
+ if (type == GXP_TELEMETRY_TYPE_LOGGING)
+ buff_data = gxp->core_telemetry_mgr->logging_buff_data;
+ else if (type == GXP_TELEMETRY_TYPE_TRACING)
+ buff_data = gxp->core_telemetry_mgr->tracing_buff_data;
+ else
+ return -EINVAL;
+ /*
+ * Total size must divide evenly into a GXP_CORE_TELEMETRY_BUFFER_UNIT_SIZE
+ * aligned buffer per core.
+ */
+ if (!total_size ||
+ total_size % (GXP_CORE_TELEMETRY_BUFFER_UNIT_SIZE * GXP_NUM_CORES)) {
+ dev_warn(
+ gxp->dev,
+ "Invalid vma size(%lu bytes) requested for telemetry\n",
+ total_size);
+ return -EINVAL;
+ }
+ /*
+ * Per core log buffer size should be equal to pre allocated
+ * aligned buffer per core.
+ */
+ if (size != buff_data->size) {
+ dev_warn(
+ gxp->dev,
+ "Invalid per core requested telemetry buffer size(%lu bytes)\n",
+ size);
+ return -EINVAL;
+ }
+ mutex_lock(&gxp->core_telemetry_mgr->lock);
+ ret = remap_telemetry_buffers(gxp, vma, buff_data);
+ if (ret)
+ goto err;
+ vma->vm_private_data = NULL;
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
+ return 0;
+err:
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
+ return ret;
+}
+
+int gxp_core_telemetry_mmap_buffers_legacy(struct gxp_dev *gxp, u8 type,
+ struct vm_area_struct *vma)
+{
+ int ret = 0;
+ struct telemetry_vma_data *vma_data;
+ size_t total_size = vma->vm_end - vma->vm_start;
+ size_t size = total_size / GXP_NUM_CORES;
+ struct buffer_data *buff_data;
+ int i;
+
+ if (!gxp->core_telemetry_mgr)
+ return -ENODEV;
+
+ /* Total size must divide evenly into 1 page-aligned buffer per core */
+ if (!total_size || total_size % (PAGE_SIZE * GXP_NUM_CORES))
+ return -EINVAL;
+
+ mutex_lock(&gxp->core_telemetry_mgr->lock);
+
+ ret = check_telemetry_type_availability(gxp, type);
+ if (ret)
+ goto err;
+
+ vma_data = kmalloc(sizeof(*vma_data), GFP_KERNEL);
+ if (!vma_data) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ buff_data = allocate_telemetry_buffers(gxp, size);
+ if (IS_ERR(buff_data)) {
+ ret = PTR_ERR(buff_data);
+ goto err_free_vma_data;
+ }
+
+ ret = remap_telemetry_buffers(gxp, vma, buff_data);
+ if (ret)
+ goto err_free_buffers;
+
+ vma_data->gxp = gxp;
+ vma_data->buff_data = buff_data;
+ vma_data->type = type;
+ refcount_set(&vma_data->ref_count, 1);
+ vma->vm_private_data = vma_data;
+
+ /* Save book-keeping on the buffers in the core telemetry manager */
+ if (type == GXP_TELEMETRY_TYPE_LOGGING)
+ gxp->core_telemetry_mgr->logging_buff_data_legacy = buff_data;
+ else /* type == GXP_TELEMETRY_TYPE_TRACING */
+ gxp->core_telemetry_mgr->tracing_buff_data_legacy = buff_data;
+
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
+
+ return 0;
+
+err_free_buffers:
+ for (i = 0; i < GXP_NUM_CORES; i++)
+ gxp_dma_free_coherent_buf(gxp, NULL, &buff_data->buffers[i]);
+ kfree(buff_data);
+
+err_free_vma_data:
+ kfree(vma_data);
+
+err:
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
+ return ret;
+}
+
+int gxp_core_telemetry_enable(struct gxp_dev *gxp, u8 type)
+{
+ struct buffer_data *data;
+ int ret = 0;
+ uint core;
+ struct gxp_virtual_device *vd;
+
+ /*
+ * `vd_semaphore` cannot be acquired while holding the core telemetry
+ * lock, so acquire it here before locking the core telemetry lock.
+ */
+ down_read(&gxp->vd_semaphore);
+ mutex_lock(&gxp->core_telemetry_mgr->lock);
+
+ switch (type) {
+ case GXP_TELEMETRY_TYPE_LOGGING:
+ data = gxp->core_telemetry_mgr->logging_buff_data_legacy;
+ break;
+ case GXP_TELEMETRY_TYPE_TRACING:
+ data = gxp->core_telemetry_mgr->tracing_buff_data_legacy;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!data) {
+ ret = -ENXIO;
+ goto out;
+ }
+
+ /* Map the buffers for any cores already running */
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ vd = gxp->core_to_vd[core];
+ if (vd != NULL) {
+ ret = gxp_dma_map_allocated_coherent_buffer(
+ gxp, &data->buffers[core], vd->domain, 0);
+ if (ret)
+ goto err;
+ }
+ }
+
+ /* Populate the buffer fields in firmware-data */
+ data->host_status |= GXP_CORE_TELEMETRY_HOST_STATUS_ENABLED;
+ gxp_fw_data_set_core_telemetry_descriptors(gxp, type, data->host_status,
+ data->buffers, data->size);
+
+ /* Notify any running cores that firmware-data was updated */
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ if (gxp_is_fw_running(gxp, core))
+ gxp_notification_send(gxp, core,
+ CORE_NOTIF_TELEMETRY_STATUS);
+ }
+
+ refcount_inc(&data->ref_count);
+ data->is_enabled = true;
+
+ goto out;
+err:
+ while (core--) {
+ vd = gxp->core_to_vd[core];
+ if (vd)
+ gxp_dma_unmap_allocated_coherent_buffer(
+ gxp, vd->domain, &data->buffers[core]);
+ }
+
+out:
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
+ up_read(&gxp->vd_semaphore);
+
+ return ret;
+}
+
+/**
+ * notify_core_and_wait_for_disable() - Notify a core that telemetry state has
+ * been changed by the host and wait for
+ * the core to stop using telemetry.
+ * @gxp: The GXP device core telemetry is changing for
+ * @core: The core in @gxp to notify of the telemetry state change
+ * @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
+ *
+ * Caller must hold `core_telemetry_mgr->lock`.
+ * Caller must hold `gxp->vd_semaphore` for reading only.
+ * It is not allowed to hold `gxp->vd_semaphore` for writing, since this
+ * function needs to release `gxp->vd_semaphore` at different points to sleep.
+ *
+ * Return:
+ * * 0 - Firmware on @core is no longer using telemetry of @type
+ * * -ENXIO - Firmware on @core is unresponsive
+ */
+static int notify_core_and_wait_for_disable(struct gxp_dev *gxp, uint core,
+ u8 type)
+{
+ uint retries_left = 50;
+
+ lockdep_assert_held(&gxp->core_telemetry_mgr->lock);
+ lockdep_assert_held_read(&gxp->vd_semaphore);
+
+ gxp_notification_send(gxp, core, CORE_NOTIF_TELEMETRY_STATUS);
+
+ /* Wait for ACK from firmware */
+ while (is_telemetry_enabled(gxp, core, type) &&
+ gxp_is_fw_running(gxp, core) && retries_left) {
+ /* Release vd_semaphore while waiting */
+ up_read(&gxp->vd_semaphore);
+
+ /*
+ * The VD lock must be held to check if firmware is running, so
+ * the wait condition is only whether the firmware data has been
+ * updated to show the core disabling telemetry.
+ *
+ * If a core does stop running firmware while this function is
+ * asleep, it will be seen at the next timeout.
+ */
+ wait_event_timeout(gxp->core_telemetry_mgr->waitq,
+ !is_telemetry_enabled(gxp, core, type),
+ msecs_to_jiffies(10));
+ retries_left--;
+
+ /*
+ * No function may attempt to acquire the `vd_semaphore` while
+ * holding the core telemetry lock, so it must be released, then
+ * re-acquired once the `vd_semaphore` is held.
+ */
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
+ down_read(&gxp->vd_semaphore);
+ mutex_lock(&gxp->core_telemetry_mgr->lock);
+ }
+
+ /*
+ * If firmware has stopped running altogether, that is sufficient to be
+ * considered disabled. If firmware is started on this core again, it
+ * is responsible for clearing its status.
+ */
+ if (unlikely(is_telemetry_enabled(gxp, core, type) &&
+ gxp_is_fw_running(gxp, core)))
+ return -ENXIO;
+
+ return 0;
+}
+
+/**
+ * telemetry_disable_locked() - Helper function to break out the actual
+ * process of disabling core telemetry so that it
+ * can be invoked by internal functions that are
+ * already holding the core telemetry lock.
+ * @gxp: The GXP device to disable either logging or tracing for
+ * @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
+ *
+ * Caller must hold `core_telemetry_mgr->lock`.
+ * Caller must hold `gxp->vd_semaphore` for reading only.
+ * It is not allowed to hold `gxp->vd_semaphore` for writing, since this
+ * function needs to release `gxp->vd_semaphore` at different points to sleep.
+ *
+ * Return:
+ * * 0 - Success
+ * * -EINVAL - The @type provided is not valid
+ * * -ENXIO - Buffers for @type have not been created/mapped yet
+ */
+static int telemetry_disable_locked(struct gxp_dev *gxp, u8 type)
+{
+ struct buffer_data *data;
+ int ret = 0;
+ uint core;
+ struct gxp_virtual_device *vd;
+
+ lockdep_assert_held(&gxp->core_telemetry_mgr->lock);
+ lockdep_assert_held_read(&gxp->vd_semaphore);
+
+ /* Cleanup core telemetry manager's book-keeping */
+ switch (type) {
+ case GXP_TELEMETRY_TYPE_LOGGING:
+ data = gxp->core_telemetry_mgr->logging_buff_data_legacy;
+ break;
+ case GXP_TELEMETRY_TYPE_TRACING:
+ data = gxp->core_telemetry_mgr->tracing_buff_data_legacy;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (!data)
+ return -ENXIO;
+
+ if (!(data->host_status & GXP_CORE_TELEMETRY_HOST_STATUS_ENABLED))
+ return 0;
+
+ data->is_enabled = false;
+
+ /* Clear the log buffer fields in firmware-data */
+ data->host_status &= ~GXP_CORE_TELEMETRY_HOST_STATUS_ENABLED;
+ gxp_fw_data_set_core_telemetry_descriptors(gxp, type, data->host_status, NULL, 0);
+
+ /* Notify any running cores that firmware-data was updated */
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ if (gxp_is_fw_running(gxp, core)) {
+ ret = notify_core_and_wait_for_disable(gxp, core, type);
+ if (ret)
+ dev_warn(
+ gxp->dev,
+ "%s: core%u failed to disable telemetry (type=%u, ret=%d)\n",
+ __func__, core, type, ret);
+ }
+ vd = gxp->core_to_vd[core];
+ if (vd)
+ gxp_dma_unmap_allocated_coherent_buffer(
+ gxp, vd->domain, &data->buffers[core]);
+ }
+
+ if (refcount_dec_and_test(&data->ref_count)) {
+ switch (type) {
+ case GXP_TELEMETRY_TYPE_LOGGING:
+ gxp->core_telemetry_mgr->logging_buff_data_legacy = NULL;
+ break;
+ case GXP_TELEMETRY_TYPE_TRACING:
+ gxp->core_telemetry_mgr->tracing_buff_data_legacy = NULL;
+ break;
+ default:
+ /* NO-OP, we returned above if `type` was invalid */
+ break;
+ }
+ free_telemetry_buffers(gxp, data);
+ }
+
+ return 0;
+}
+
+int gxp_core_telemetry_disable(struct gxp_dev *gxp, u8 type)
+{
+ int ret;
+
+ /*
+ * `vd_semaphore` cannot be acquired while holding the core telemetry
+ * lock, so acquire it here before locking the core telemetry lock.
+ */
+ down_read(&gxp->vd_semaphore);
+ mutex_lock(&gxp->core_telemetry_mgr->lock);
+
+ ret = telemetry_disable_locked(gxp, type);
+
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
+ up_read(&gxp->vd_semaphore);
+
+ return ret;
+}
+
+int gxp_core_telemetry_register_eventfd(struct gxp_dev *gxp, u8 type, int fd)
+{
+ struct eventfd_ctx *new_ctx;
+ struct eventfd_ctx **ctx_to_set = NULL;
+ int ret = 0;
+
+ new_ctx = eventfd_ctx_fdget(fd);
+ if (IS_ERR(new_ctx))
+ return PTR_ERR(new_ctx);
+
+ mutex_lock(&gxp->core_telemetry_mgr->lock);
+
+ switch (type) {
+ case GXP_TELEMETRY_TYPE_LOGGING:
+ ctx_to_set = &gxp->core_telemetry_mgr->logging_efd;
+ break;
+ case GXP_TELEMETRY_TYPE_TRACING:
+ ctx_to_set = &gxp->core_telemetry_mgr->tracing_efd;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (*ctx_to_set) {
+ dev_warn(
+ gxp->dev,
+ "Replacing existing core telemetry eventfd (type=%u)\n",
+ type);
+ eventfd_ctx_put(*ctx_to_set);
+ }
+
+ *ctx_to_set = new_ctx;
+
+out:
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
+ return ret;
+}
+
+int gxp_core_telemetry_unregister_eventfd(struct gxp_dev *gxp, u8 type)
+{
+ int ret = 0;
+
+ mutex_lock(&gxp->core_telemetry_mgr->lock);
+
+ switch (type) {
+ case GXP_TELEMETRY_TYPE_LOGGING:
+ if (gxp->core_telemetry_mgr->logging_efd)
+ eventfd_ctx_put(gxp->core_telemetry_mgr->logging_efd);
+ gxp->core_telemetry_mgr->logging_efd = NULL;
+ break;
+ case GXP_TELEMETRY_TYPE_TRACING:
+ if (gxp->core_telemetry_mgr->tracing_efd)
+ eventfd_ctx_put(gxp->core_telemetry_mgr->tracing_efd);
+ gxp->core_telemetry_mgr->tracing_efd = NULL;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
+
+ return ret;
+}
+
+struct work_struct *
+gxp_core_telemetry_get_notification_handler(struct gxp_dev *gxp, uint core)
+{
+ struct gxp_core_telemetry_manager *mgr = gxp->core_telemetry_mgr;
+
+ if (!mgr || core >= GXP_NUM_CORES)
+ return NULL;
+
+ return &mgr->notification_works[core].work;
+}
+
+void gxp_core_telemetry_exit(struct gxp_dev *gxp)
+{
+ struct buffer_data *log_buff_data, *trace_buff_data;
+ struct gxp_core_telemetry_manager *mgr = gxp->core_telemetry_mgr;
+
+ if (!mgr) {
+ dev_warn(gxp->dev, "Core telemetry manager was not allocated\n");
+ return;
+ }
+
+ /* TODO(b/260959553): Remove mutex_lock/unlock during legacy telemetry removal */
+ mutex_lock(&gxp->core_telemetry_mgr->lock);
+ log_buff_data = mgr->logging_buff_data;
+ trace_buff_data = mgr->tracing_buff_data;
+
+ if (!IS_ERR_OR_NULL(log_buff_data))
+ free_telemetry_buffers(gxp, log_buff_data);
+
+ if (!IS_ERR_OR_NULL(trace_buff_data))
+ free_telemetry_buffers(gxp, trace_buff_data);
+
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
+
+ if (!IS_ERR_OR_NULL(gxp->core_telemetry_mgr->logging_efd)) {
+ dev_warn(gxp->dev, "logging_efd was not released\n");
+ eventfd_ctx_put(gxp->core_telemetry_mgr->logging_efd);
+ gxp->core_telemetry_mgr->logging_efd = NULL;
+ }
+
+ if (!IS_ERR_OR_NULL(gxp->core_telemetry_mgr->tracing_efd)) {
+ dev_warn(gxp->dev, "tracing_efd was not released\n");
+ eventfd_ctx_put(gxp->core_telemetry_mgr->tracing_efd);
+ gxp->core_telemetry_mgr->tracing_efd = NULL;
+ }
+
+ mutex_destroy(&mgr->lock);
+ devm_kfree(gxp->dev, mgr);
+ gxp->core_telemetry_mgr = NULL;
+}
diff --git a/gxp-core-telemetry.h b/gxp-core-telemetry.h
new file mode 100644
index 0000000..9a89c0e
--- /dev/null
+++ b/gxp-core-telemetry.h
@@ -0,0 +1,184 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * GXP core telemetry support
+ *
+ * Copyright (C) 2021-2022 Google LLC
+ */
+
+#ifndef __GXP_CORE_TELEMETRY_H__
+#define __GXP_CORE_TELEMETRY_H__
+
+#include <linux/eventfd.h>
+#include <linux/refcount.h>
+#include <linux/types.h>
+
+#include "gxp-dma.h"
+#include "gxp-internal.h"
+#include "gxp.h"
+
+/* Default telemetry buffer size per core */
+#define CORE_TELEMETRY_DEFAULT_BUFFER_SIZE GXP_CORE_TELEMETRY_BUFFER_UNIT_SIZE
+/**
+ * Maximum core telemetry buffer size that can be represented by GXP_GET_SPECS
+ * ioctl. 8 bits are reserved to represent telemetry buffer size in GXP_GET_SPECS
+ * ioctl and the size is represented in unit of GXP_CORE_TELEMETRY_BUFFER_UNIT_SIZE.
+ */
+#define CORE_TELEMETRY_MAX_BUFFER_SIZE (U8_MAX * GXP_CORE_TELEMETRY_BUFFER_UNIT_SIZE)
+/* Secure telemetry buffer size per core */
+#define SECURE_CORE_TELEMETRY_BUFFER_SIZE GXP_CORE_TELEMETRY_BUFFER_UNIT_SIZE
+
+struct gxp_core_telemetry_work {
+ struct work_struct work;
+ struct gxp_dev *gxp;
+ uint core;
+};
+
+struct gxp_core_telemetry_manager {
+ struct buffer_data {
+ u32 host_status;
+ struct gxp_coherent_buf buffers[GXP_NUM_CORES];
+ u32 size;
+ refcount_t ref_count;
+ bool is_enabled;
+ } *logging_buff_data_legacy, *tracing_buff_data_legacy,
+ *logging_buff_data, *tracing_buff_data;
+ /* Protects logging_buff_data and tracing_buff_data */
+ struct mutex lock;
+ struct gxp_core_telemetry_work notification_works[GXP_NUM_CORES];
+ wait_queue_head_t waitq;
+ struct eventfd_ctx *logging_efd;
+ struct eventfd_ctx *tracing_efd;
+};
+
+/**
+ * gxp_core_telemetry_init() - Initialize telemetry support
+ * @gxp: The GXP device to initialize core telemetry support for
+ *
+ * Return:
+ * * 0 - Success
+ * * -ENOMEM - Insufficient memory is available to initialize support
+ */
+int gxp_core_telemetry_init(struct gxp_dev *gxp);
+
+/**
+ * gxp_core_telemetry_mmap_buffers() - Maps the preallocated telemetry
+ * buffers to the user-space vma.
+ * @gxp: The GXP device to create the buffers for.
+ * @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`.
+ * @vma: The vma from user-space which all cores' buffers will be mapped into.
+ *
+ * Return:
+ * * 0 - Success.
+ * * -ENODEV - Core telemetry support has not been initialized. Must explicitly
+ * check this, since this function is called based on user-input.
+ * * -EINVAL - Either the vma size is not aligned or @type is not valid.
+ */
+int gxp_core_telemetry_mmap_buffers(struct gxp_dev *gxp, u8 type,
+ struct vm_area_struct *vma);
+
+/**
+ * gxp_core_telemetry_mmap_buffers_legacy() - Allocate a telemetry buffer for
+ * each core and map them to their
+ * core and the user-space vma
+ * @gxp: The GXP device to create the buffers for
+ * @type: EIther `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
+ * @vma: The vma from user-space which all cores' buffers will be mapped into
+ *
+ * Return:
+ * * 0 - Success
+ * * -ENODEV - Core telemetry support has not been initialized. Must explicitly
+ * check this, since this function is called based on user-input.
+ * * -EBUSY - The requested core telemetry @type is already in use
+ * * -EINVAL - Either the vma size is not aligned or @type is not valid
+ * * -ENOMEM - Insufficient memory is available to allocate and map the buffers
+ */
+int gxp_core_telemetry_mmap_buffers_legacy(struct gxp_dev *gxp, u8 type,
+ struct vm_area_struct *vma);
+
+/**
+ * gxp_core_telemetry_enable() - Enable logging or tracing for all DSP cores
+ * @gxp: The GXP device to enable either logging or tracing for
+ * @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
+ *
+ * Return:
+ * * 0 - Success
+ * * -EINVAL - The @type provided is not valid
+ * * -ENXIO - Buffers for @type have not been created/mapped yet
+ */
+int gxp_core_telemetry_enable(struct gxp_dev *gxp, u8 type);
+
+/**
+ * gxp_core_telemetry_disable() - Disable logging or tracing for all DSP cores
+ * @gxp: The GXP device to disable either logging or tracing for
+ * @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
+ *
+ * Return:
+ * * 0 - Success
+ * * -EINVAL - The @type provided is not valid
+ * * -ENXIO - Buffers for @type have not been created/mapped yet
+ */
+int gxp_core_telemetry_disable(struct gxp_dev *gxp, u8 type);
+
+/**
+ * gxp_core_telemetry_register_eventfd() - Register an eventfd to be signaled
+ * when core telemetry notifications
+ * arrive while the specified @type of
+ * core telemetry is enabled
+ * @gxp: The GXP device to register the eventfd for
+ * @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
+ * @fd: A file descriptor for an eventfd from user-space
+ *
+ * If another eventfd has already been registered for the given @type, the old
+ * eventfd will be unregistered and replaced.
+ *
+ * Return:
+ * * 0 - Success
+ * * -EBADF - @fd is not a valid file descriptor (via `eventfd_ctx_fdget()`)
+ * * -EINVAL - Invalid @type or @fd is not an eventfd
+ */
+int gxp_core_telemetry_register_eventfd(struct gxp_dev *gxp, u8 type, int fd);
+
+/**
+ * gxp_core_telemetry_unregister_eventfd() - Unregister and release a reference
+ * to a previously registered eventfd
+ * @gxp: The GXP device to unregister the eventfd for
+ * @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
+ *
+ * Return:
+ * * 0 - Success
+ * * -EINVAL - The @type provided is not valid
+ */
+int gxp_core_telemetry_unregister_eventfd(struct gxp_dev *gxp, u8 type);
+
+/**
+ * gxp_core_telemetry_get_notification_handler() - Get the notification handler
+ * work for the specified core
+ * @gxp: The GXP device to obtain the handler for
+ * @core: The physical core number to obtain the handler
+ *
+ * Return: A pointer to the work_struct for the @core's notification handler if
+ * successful. NULL if core telemetry has not been initialized or @core
+ * is invalid.
+ */
+struct work_struct *
+gxp_core_telemetry_get_notification_handler(struct gxp_dev *gxp, uint core);
+
+/**
+ * gxp_core_telemetry_status_notify() - Checks the telemetry status of the
+ * specified core and signals the eventfd.
+ * @gxp: The GXP device to obtain the handler for
+ * @core: The physical core number to obtain the handler
+ *
+ */
+void gxp_core_telemetry_status_notify(struct gxp_dev *gxp, uint core);
+
+/**
+ * gxp_core_telemetry_exit() - Reverts gxp_core_telemetry_init() to release the
+ * resources acquired by core telemetry manager.
+ * @gxp: The GXP device to obtain the handler for
+ *
+ */
+void gxp_core_telemetry_exit(struct gxp_dev *gxp);
+
+
+#endif /* __GXP_CORE_TELEMETRY_H__ */
diff --git a/gxp-debug-dump.c b/gxp-debug-dump.c
index 1165a28..a29d6af 100644
--- a/gxp-debug-dump.c
+++ b/gxp-debug-dump.c
@@ -2,7 +2,7 @@
/*
* GXP debug dump handler
*
- * Copyright (C) 2020 Google LLC
+ * Copyright (C) 2020-2022 Google LLC
*/
#include <linux/bitops.h>
@@ -14,7 +14,7 @@
#include <linux/string.h>
#include <linux/workqueue.h>
-#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
+#if IS_ENABLED(CONFIG_GXP_TEST) || IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
#include <linux/platform_data/sscoredump.h>
#endif
@@ -32,16 +32,13 @@
#define SSCD_MSG_LENGTH 64
-#define SYNC_BARRIER_BLOCK 0x00100000
-#define SYNC_BARRIER_BASE(_x_) ((_x_) << 12)
+#define SYNC_BARRIER_BLOCK 0x00100000
+#define SYNC_BARRIER_BASE(_x_) ((_x_) << 12)
#define DEBUG_DUMP_MEMORY_SIZE 0x400000 /* size in bytes */
/* Enum indicating the debug dump request reason. */
-enum gxp_debug_dump_init_type {
- DEBUG_DUMP_FW_INIT,
- DEBUG_DUMP_KERNEL_INIT
-};
+enum gxp_debug_dump_init_type { DEBUG_DUMP_FW_INIT, DEBUG_DUMP_KERNEL_INIT };
enum gxp_common_segments_idx {
GXP_COMMON_REGISTERS_IDX,
@@ -49,7 +46,11 @@ enum gxp_common_segments_idx {
};
/* Whether or not the debug dump subsystem should be enabled. */
+#if IS_ENABLED(CONFIG_GXP_TEST)
+static int gxp_debug_dump_enable = 1;
+#else
static int gxp_debug_dump_enable;
+#endif
module_param_named(debug_dump_enable, gxp_debug_dump_enable, int, 0660);
static void gxp_debug_dump_cache_invalidate(struct gxp_dev *gxp)
@@ -81,9 +82,9 @@ static u32 gxp_read_sync_barrier_shadow(struct gxp_dev *gxp, uint index)
return gxp_read_32(gxp, barrier_reg_offset);
}
-static void
-gxp_get_common_registers(struct gxp_dev *gxp, struct gxp_seg_header *seg_header,
- struct gxp_common_registers *common_regs)
+static void gxp_get_common_registers(struct gxp_dev *gxp,
+ struct gxp_seg_header *seg_header,
+ struct gxp_common_registers *common_regs)
{
int i;
u32 addr;
@@ -145,7 +146,13 @@ static void gxp_get_lpm_psm_registers(struct gxp_dev *gxp,
{
struct gxp_lpm_state_table_registers *state_table_regs;
int i, j;
- uint offset;
+ uint offset, lpm_psm_offset;
+
+#ifdef GXP_SEPARATE_LPM_OFFSET
+ lpm_psm_offset = 0;
+#else
+ lpm_psm_offset = GXP_LPM_PSM_0_BASE + (GXP_LPM_PSM_SIZE * psm);
+#endif
/* Get State Table registers */
for (i = 0; i < PSM_STATE_TABLE_COUNT; i++) {
@@ -153,57 +160,56 @@ static void gxp_get_lpm_psm_registers(struct gxp_dev *gxp,
/* Get Trans registers */
for (j = 0; j < PSM_TRANS_COUNT; j++) {
- offset = PSM_STATE_TABLE_BASE(i) + PSM_TRANS_BASE(j);
- state_table_regs->trans[j].next_state =
- lpm_read_32_psm(gxp, psm, offset +
- PSM_NEXT_STATE_OFFSET);
+ offset = PSM_STATE_TABLE_BASE(i) + PSM_TRANS_BASE(j) +
+ lpm_psm_offset;
+ state_table_regs->trans[j].next_state = lpm_read_32(
+ gxp, offset + PSM_NEXT_STATE_OFFSET);
state_table_regs->trans[j].seq_addr =
- lpm_read_32_psm(gxp, psm, offset +
- PSM_SEQ_ADDR_OFFSET);
+ lpm_read_32(gxp, offset + PSM_SEQ_ADDR_OFFSET);
state_table_regs->trans[j].timer_val =
- lpm_read_32_psm(gxp, psm, offset +
- PSM_TIMER_VAL_OFFSET);
+ lpm_read_32(gxp, offset + PSM_TIMER_VAL_OFFSET);
state_table_regs->trans[j].timer_en =
- lpm_read_32_psm(gxp, psm, offset +
- PSM_TIMER_EN_OFFSET);
- state_table_regs->trans[j].trigger_num =
- lpm_read_32_psm(gxp, psm, offset +
- PSM_TRIGGER_NUM_OFFSET);
- state_table_regs->trans[j].trigger_en =
- lpm_read_32_psm(gxp, psm, offset +
- PSM_TRIGGER_EN_OFFSET);
+ lpm_read_32(gxp, offset + PSM_TIMER_EN_OFFSET);
+ state_table_regs->trans[j].trigger_num = lpm_read_32(
+ gxp, offset + PSM_TRIGGER_NUM_OFFSET);
+ state_table_regs->trans[j].trigger_en = lpm_read_32(
+ gxp, offset + PSM_TRIGGER_EN_OFFSET);
}
- state_table_regs->enable_state =
- lpm_read_32_psm(gxp, psm, PSM_STATE_TABLE_BASE(i) +
- PSM_ENABLE_STATE_OFFSET);
+ state_table_regs->enable_state = lpm_read_32(
+ gxp, lpm_psm_offset + PSM_STATE_TABLE_BASE(i) +
+ PSM_ENABLE_STATE_OFFSET);
}
/* Get DMEM registers */
for (i = 0; i < PSM_DATA_COUNT; i++) {
- offset = PSM_DMEM_BASE(i) + PSM_DATA_OFFSET;
- psm_regs->data[i] = lpm_read_32_psm(gxp, psm, offset);
+ offset = PSM_DMEM_BASE(i) + PSM_DATA_OFFSET + lpm_psm_offset;
+ psm_regs->data[i] = lpm_read_32(gxp, offset);
}
- psm_regs->cfg = lpm_read_32_psm(gxp, psm, PSM_CFG_OFFSET);
- psm_regs->status = lpm_read_32_psm(gxp, psm, PSM_STATUS_OFFSET);
+ psm_regs->cfg = lpm_read_32(gxp, lpm_psm_offset + PSM_CFG_OFFSET);
+ psm_regs->status = lpm_read_32(gxp, lpm_psm_offset + PSM_STATUS_OFFSET);
/* Get Debug CSR registers */
- psm_regs->debug_cfg = lpm_read_32_psm(gxp, psm, PSM_DEBUG_CFG_OFFSET);
- psm_regs->break_addr = lpm_read_32_psm(gxp, psm, PSM_BREAK_ADDR_OFFSET);
- psm_regs->gpin_lo_rd = lpm_read_32_psm(gxp, psm, PSM_GPIN_LO_RD_OFFSET);
- psm_regs->gpin_hi_rd = lpm_read_32_psm(gxp, psm, PSM_GPIN_HI_RD_OFFSET);
+ psm_regs->debug_cfg =
+ lpm_read_32(gxp, lpm_psm_offset + PSM_DEBUG_CFG_OFFSET);
+ psm_regs->break_addr =
+ lpm_read_32(gxp, lpm_psm_offset + PSM_BREAK_ADDR_OFFSET);
+ psm_regs->gpin_lo_rd =
+ lpm_read_32(gxp, lpm_psm_offset + PSM_GPIN_LO_RD_OFFSET);
+ psm_regs->gpin_hi_rd =
+ lpm_read_32(gxp, lpm_psm_offset + PSM_GPIN_HI_RD_OFFSET);
psm_regs->gpout_lo_rd =
- lpm_read_32_psm(gxp, psm, PSM_GPOUT_LO_RD_OFFSET);
+ lpm_read_32(gxp, lpm_psm_offset + PSM_GPOUT_LO_RD_OFFSET);
psm_regs->gpout_hi_rd =
- lpm_read_32_psm(gxp, psm, PSM_GPOUT_HI_RD_OFFSET);
+ lpm_read_32(gxp, lpm_psm_offset + PSM_GPOUT_HI_RD_OFFSET);
psm_regs->debug_status =
- lpm_read_32_psm(gxp, psm, PSM_DEBUG_STATUS_OFFSET);
+ lpm_read_32(gxp, lpm_psm_offset + PSM_DEBUG_STATUS_OFFSET);
}
-static void
-gxp_get_lpm_registers(struct gxp_dev *gxp, struct gxp_seg_header *seg_header,
- struct gxp_lpm_registers *lpm_regs)
+static void gxp_get_lpm_registers(struct gxp_dev *gxp,
+ struct gxp_seg_header *seg_header,
+ struct gxp_lpm_registers *lpm_regs)
{
int i;
uint offset;
@@ -272,9 +278,7 @@ static int gxp_get_common_dump(struct gxp_dev *gxp)
"Failed to acquire wakelock for getting common dump\n");
return ret;
}
- gxp_pm_update_requested_power_states(gxp, AUR_OFF, true, AUR_UUD, false,
- AUR_MEM_UNDEFINED,
- AUR_MEM_UNDEFINED);
+ gxp_pm_update_requested_power_states(gxp, off_states, uud_states);
gxp_get_common_registers(gxp,
&common_seg_header[GXP_COMMON_REGISTERS_IDX],
@@ -283,9 +287,7 @@ static int gxp_get_common_dump(struct gxp_dev *gxp)
&common_dump_data->lpm_regs);
gxp_wakelock_release(gxp);
- gxp_pm_update_requested_power_states(gxp, AUR_UUD, false, AUR_OFF, true,
- AUR_MEM_UNDEFINED,
- AUR_MEM_UNDEFINED);
+ gxp_pm_update_requested_power_states(gxp, uud_states, off_states);
dev_dbg(gxp->dev, "Segment Header for Common Segment\n");
dev_dbg(gxp->dev, "Name: %s, Size: 0x%0x bytes, Valid :%0x\n",
@@ -297,7 +299,7 @@ static int gxp_get_common_dump(struct gxp_dev *gxp)
return ret;
}
-#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
+#if IS_ENABLED(CONFIG_GXP_TEST) || IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
static void gxp_send_to_sscd(struct gxp_dev *gxp, void *segs, int seg_cnt,
const char *info)
{
@@ -366,7 +368,8 @@ static void gxp_user_buffers_vunmap(struct gxp_dev *gxp,
*/
vd = gxp->core_to_vd[core_header->core_id];
if (!vd) {
- dev_err(gxp->dev, "Virtual device is not available for vunmap\n");
+ dev_err(gxp->dev,
+ "Virtual device is not available for vunmap\n");
return;
}
@@ -512,7 +515,7 @@ static int gxp_handle_debug_dump(struct gxp_dev *gxp, uint32_t core_id)
&core_dump->core_dump_header[core_id];
struct gxp_core_header *core_header = &core_dump_header->core_header;
int ret = 0;
-#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
+#if IS_ENABLED(CONFIG_GXP_TEST) || IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
struct gxp_common_dump *common_dump = mgr->common_dump;
int i;
int seg_idx = 0;
@@ -529,7 +532,7 @@ static int gxp_handle_debug_dump(struct gxp_dev *gxp, uint32_t core_id)
goto out;
}
-#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
+#if IS_ENABLED(CONFIG_GXP_TEST) || IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
/* Common */
data_addr = &common_dump->common_dump_data.common_regs;
for (i = 0; i < GXP_NUM_COMMON_SEGMENTS; i++) {
@@ -553,9 +556,9 @@ static int gxp_handle_debug_dump(struct gxp_dev *gxp, uint32_t core_id)
mgr->segs[core_id][seg_idx].size = sizeof(struct gxp_core_header);
seg_idx++;
- data_addr = &core_dump->dump_data[core_id *
- core_header->core_dump_size /
- sizeof(u32)];
+ data_addr =
+ &core_dump->dump_data[core_id * core_header->core_dump_size /
+ sizeof(u32)];
for (i = 0; i < GXP_NUM_CORE_SEGMENTS - 1; i++) {
if (seg_idx >= GXP_NUM_SEGMENTS_PER_CORE) {
@@ -617,7 +620,7 @@ out:
static int gxp_init_segments(struct gxp_dev *gxp)
{
-#if !IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
+#if !(IS_ENABLED(CONFIG_GXP_TEST) || IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP))
return 0;
#else
struct gxp_debug_dump_manager *mgr = gxp->debug_dump_mgr;
@@ -730,7 +733,7 @@ struct work_struct *gxp_debug_dump_get_notification_handler(struct gxp_dev *gxp,
int gxp_debug_dump_init(struct gxp_dev *gxp, void *sscd_dev, void *sscd_pdata)
{
struct gxp_debug_dump_manager *mgr;
- int core;
+ int core, ret;
/* Don't initialize the debug dump subsystem unless it's enabled. */
if (!gxp_debug_dump_enable)
@@ -742,12 +745,11 @@ int gxp_debug_dump_init(struct gxp_dev *gxp, void *sscd_dev, void *sscd_pdata)
gxp->debug_dump_mgr = mgr;
mgr->gxp = gxp;
- mgr->buf.vaddr =
- gxp_dma_alloc_coherent(gxp, NULL, 0, DEBUG_DUMP_MEMORY_SIZE,
- &mgr->buf.daddr, GFP_KERNEL, 0);
- if (!mgr->buf.vaddr) {
+ ret = gxp_dma_alloc_coherent_buf(gxp, NULL, DEBUG_DUMP_MEMORY_SIZE,
+ GFP_KERNEL, 0, &mgr->buf);
+ if (ret) {
dev_err(gxp->dev, "Failed to allocate memory for debug dump\n");
- return -ENODEV;
+ return ret;
}
mgr->buf.size = DEBUG_DUMP_MEMORY_SIZE;
@@ -782,8 +784,7 @@ void gxp_debug_dump_exit(struct gxp_dev *gxp)
}
kfree(gxp->debug_dump_mgr->common_dump);
- gxp_dma_free_coherent(gxp, NULL, 0, DEBUG_DUMP_MEMORY_SIZE,
- mgr->buf.vaddr, mgr->buf.daddr);
+ gxp_dma_free_coherent_buf(gxp, NULL, &mgr->buf);
mutex_destroy(&mgr->debug_dump_lock);
devm_kfree(mgr->gxp->dev, mgr);
diff --git a/gxp-debug-dump.h b/gxp-debug-dump.h
index 1b1fda0..2a5d1ce 100644
--- a/gxp-debug-dump.h
+++ b/gxp-debug-dump.h
@@ -2,8 +2,9 @@
/*
* GXP debug dump handler
*
- * Copyright (C) 2020 Google LLC
+ * Copyright (C) 2020-2022 Google LLC
*/
+
#ifndef __GXP_DEBUG_DUMP_H__
#define __GXP_DEBUG_DUMP_H__
@@ -11,10 +12,11 @@
#include <linux/types.h>
#include <linux/workqueue.h>
-#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
+#if IS_ENABLED(CONFIG_GXP_TEST) || IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
#include <linux/platform_data/sscoredump.h>
#endif
+#include "gxp-dma.h"
#include "gxp-internal.h"
#define GXP_NUM_COMMON_SEGMENTS 2
@@ -172,15 +174,9 @@ struct gxp_debug_dump_work {
uint core_id;
};
-struct gxp_debug_dump_buffer {
- void *vaddr;
- dma_addr_t daddr;
- u32 size;
-};
-
struct gxp_debug_dump_manager {
struct gxp_dev *gxp;
- struct gxp_debug_dump_buffer buf;
+ struct gxp_coherent_buf buf; /* Buffer holding debug dump data */
struct gxp_debug_dump_work debug_dump_works[GXP_NUM_CORES];
struct gxp_core_dump *core_dump; /* start of the core dump */
struct gxp_common_dump *common_dump;
@@ -192,7 +188,7 @@ struct gxp_debug_dump_manager {
* time
*/
struct mutex debug_dump_lock;
-#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
+#if IS_ENABLED(CONFIG_GXP_TEST) || IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
struct sscd_segment segs[GXP_NUM_CORES][GXP_NUM_SEGMENTS_PER_CORE];
#endif
};
diff --git a/gxp-debugfs.c b/gxp-debugfs.c
index e1b199b..ae31914 100644
--- a/gxp-debugfs.c
+++ b/gxp-debugfs.c
@@ -8,8 +8,10 @@
#include <linux/acpm_dvfs.h>
#include "gxp-client.h"
+#include "gxp-core-telemetry.h"
#include "gxp-debug-dump.h"
#include "gxp-debugfs.h"
+#include "gxp-dma.h"
#include "gxp-firmware-data.h"
#include "gxp-firmware.h"
#include "gxp-internal.h"
@@ -17,14 +19,17 @@
#include "gxp-lpm.h"
#include "gxp-mailbox.h"
#include "gxp-pm.h"
-#include "gxp-telemetry.h"
#include "gxp-vd.h"
#include "gxp-wakelock.h"
#include "gxp.h"
+#if GXP_HAS_MCU
+#include "gxp-mcu-platform.h"
+#endif
+
static int gxp_debugfs_lpm_test(void *data, u64 val)
{
- struct gxp_dev *gxp = (struct gxp_dev *) data;
+ struct gxp_dev *gxp = (struct gxp_dev *)data;
dev_info(gxp->dev, "%llu\n", val);
@@ -35,49 +40,87 @@ DEFINE_DEBUGFS_ATTRIBUTE(gxp_lpm_test_fops, NULL, gxp_debugfs_lpm_test,
static int gxp_debugfs_mailbox(void *data, u64 val)
{
- int core;
- struct gxp_command cmd;
- struct gxp_response resp;
+ int core = 0, retval;
+ u16 status;
struct gxp_dev *gxp = (struct gxp_dev *)data;
+ struct gxp_mailbox *mbx;
+ struct gxp_power_states power_states = {
+ .power = GXP_POWER_STATE_NOM,
+ .memory = MEMORY_POWER_STATE_UNDEFINED,
+ };
+ u16 cmd_code;
+ int ret;
- core = val / 1000;
- if (core >= GXP_NUM_CORES) {
- dev_notice(gxp->dev,
- "Mailbox for core %d doesn't exist.\n", core);
- return -EINVAL;
- }
+ mutex_lock(&gxp->debugfs_client_lock);
- if (gxp->mailbox_mgr == NULL ||
- gxp->mailbox_mgr->mailboxes[core] == NULL) {
- dev_notice(gxp->dev,
- "Unable to send mailbox command -- mailbox %d not ready\n",
- core);
- return -EINVAL;
- }
+#if GXP_HAS_MCU
+ if (gxp_is_direct_mode(gxp)) {
+#endif
+ core = val / 1000;
+ if (core >= GXP_NUM_CORES) {
+ dev_notice(gxp->dev,
+ "Mailbox for core %d doesn't exist.\n",
+ core);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (gxp->mailbox_mgr->mailboxes[core] == NULL) {
+ dev_notice(
+ gxp->dev,
+ "Unable to send mailbox command -- mailbox %d not ready\n",
+ core);
+ ret = -EINVAL;
+ goto out;
+ }
- cmd.code = (u16) val;
- cmd.priority = 0;
- cmd.buffer_descriptor.address = 0;
- cmd.buffer_descriptor.size = 0;
- cmd.buffer_descriptor.flags = 0;
+ mbx = gxp->mailbox_mgr->mailboxes[core];
+ cmd_code = GXP_MBOX_CODE_DISPATCH;
+#if GXP_HAS_MCU
+ } else {
+ if (!gxp->debugfs_client) {
+ dev_err(gxp->dev,
+ "You should load firmwares via gxp/firmware_run first\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ mbx = to_mcu_dev(gxp)->mcu.uci.mbx;
+ if (!mbx) {
+ dev_err(gxp->dev, "UCI is not initialized.\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ cmd_code = CORE_COMMAND;
+ }
+#endif
down_read(&gxp->vd_semaphore);
- gxp_mailbox_execute_cmd(gxp->mailbox_mgr->mailboxes[core], &cmd, &resp);
+ /* In direct mode, gxp->debugfs_client and core will be ignored. */
+ retval = gxp->mailbox_mgr->execute_cmd(gxp->debugfs_client, mbx, core,
+ cmd_code, 0, 0, 0, 0, 1,
+ power_states, NULL, &status);
up_read(&gxp->vd_semaphore);
- dev_info(gxp->dev,
- "Mailbox Command Sent: cmd.code=%d, resp.status=%d, resp.retval=%d\n",
- cmd.code, resp.status, resp.retval);
- return 0;
+ dev_info(
+ gxp->dev,
+ "Mailbox Command Sent: core=%d, resp.status=%d, resp.retval=%d\n",
+ core, status, retval);
+ ret = 0;
+out:
+ mutex_unlock(&gxp->debugfs_client_lock);
+ return ret;
}
DEFINE_DEBUGFS_ATTRIBUTE(gxp_mailbox_fops, NULL, gxp_debugfs_mailbox, "%llu\n");
static int gxp_firmware_run_set(void *data, u64 val)
{
- struct gxp_dev *gxp = (struct gxp_dev *) data;
+ struct gxp_dev *gxp = (struct gxp_dev *)data;
struct gxp_client *client;
int ret = 0;
uint core;
+ bool acquired_block_wakelock;
ret = gxp_firmware_request_if_needed(gxp);
if (ret) {
@@ -89,7 +132,7 @@ static int gxp_firmware_run_set(void *data, u64 val)
if (val) {
if (gxp->debugfs_client) {
- dev_err(gxp->dev, "Firmware already running!\n");
+ dev_err(gxp->dev, "Firmware is already running!\n");
ret = -EIO;
goto out;
}
@@ -109,6 +152,7 @@ static int gxp_firmware_run_set(void *data, u64 val)
goto out;
}
}
+ up_write(&gxp->vd_semaphore);
/*
* Cleanup any bad state or corruption the device might've
@@ -124,34 +168,31 @@ static int gxp_firmware_run_set(void *data, u64 val)
}
gxp->debugfs_client = client;
- gxp->debugfs_client->vd = gxp_vd_allocate(gxp, GXP_NUM_CORES);
- if (IS_ERR(gxp->debugfs_client->vd)) {
+ down_write(&client->semaphore);
+
+ ret = gxp_client_allocate_virtual_device(client, GXP_NUM_CORES, 0);
+ if (ret) {
dev_err(gxp->dev, "Failed to allocate VD\n");
- ret = PTR_ERR(gxp->debugfs_client->vd);
- goto err_wakelock;
+ goto err_destroy_client;
}
- ret = gxp_wakelock_acquire(gxp);
+ ret = gxp_client_acquire_block_wakelock(
+ client, &acquired_block_wakelock);
if (ret) {
dev_err(gxp->dev, "Failed to acquire BLOCK wakelock\n");
- goto err_wakelock;
+ goto err_destroy_client;
}
- gxp->debugfs_client->has_block_wakelock = true;
- gxp_pm_update_requested_power_states(gxp, AUR_OFF, true,
- AUR_UUD, true,
- AUR_MEM_UNDEFINED,
- AUR_MEM_UNDEFINED);
- ret = gxp_vd_start(gxp->debugfs_client->vd);
- up_write(&gxp->vd_semaphore);
+ ret = gxp_client_acquire_vd_wakelock(client, uud_states);
if (ret) {
- dev_err(gxp->dev, "Failed to start VD\n");
- goto err_start;
+ dev_err(gxp->dev, "Failed to acquire VD wakelock\n");
+ goto err_release_block_wakelock;
}
- gxp->debugfs_client->has_vd_wakelock = true;
+
+ up_write(&client->semaphore);
} else {
if (!gxp->debugfs_client) {
- dev_err(gxp->dev, "Firmware not running!\n");
+ dev_err(gxp->dev, "Firmware is not running!\n");
ret = -EIO;
goto out;
}
@@ -162,10 +203,6 @@ static int gxp_firmware_run_set(void *data, u64 val)
*/
gxp_client_destroy(gxp->debugfs_client);
gxp->debugfs_client = NULL;
- gxp_pm_update_requested_power_states(gxp, AUR_UUD, true,
- AUR_OFF, true,
- AUR_MEM_UNDEFINED,
- AUR_MEM_UNDEFINED);
}
out:
@@ -173,14 +210,12 @@ out:
return ret;
-err_start:
- gxp_wakelock_release(gxp);
- gxp_pm_update_requested_power_states(gxp, AUR_UUD, true, AUR_OFF, true,
- AUR_MEM_UNDEFINED,
- AUR_MEM_UNDEFINED);
-err_wakelock:
+err_release_block_wakelock:
+ gxp_client_release_block_wakelock(client);
+err_destroy_client:
+ up_write(&client->semaphore);
/* Destroying a client cleans up any VDss or wakelocks it held. */
- gxp_client_destroy(gxp->debugfs_client);
+ gxp_client_destroy(client);
gxp->debugfs_client = NULL;
mutex_unlock(&gxp->debugfs_client_lock);
return ret;
@@ -188,10 +223,10 @@ err_wakelock:
static int gxp_firmware_run_get(void *data, u64 *val)
{
- struct gxp_dev *gxp = (struct gxp_dev *) data;
+ struct gxp_dev *gxp = (struct gxp_dev *)data;
down_read(&gxp->vd_semaphore);
- *val = gxp->firmware_running;
+ *val = gxp->firmware_mgr->firmware_running;
up_read(&gxp->vd_semaphore);
return 0;
@@ -224,10 +259,8 @@ static int gxp_wakelock_set(void *data, u64 val)
goto out;
}
gxp->debugfs_wakelock_held = true;
- gxp_pm_update_requested_power_states(gxp, AUR_OFF, true,
- AUR_UUD, true,
- AUR_MEM_UNDEFINED,
- AUR_MEM_UNDEFINED);
+ gxp_pm_update_requested_power_states(gxp, off_states,
+ uud_states);
} else {
/* Wakelock Release */
if (!gxp->debugfs_wakelock_held) {
@@ -238,10 +271,8 @@ static int gxp_wakelock_set(void *data, u64 val)
gxp_wakelock_release(gxp);
gxp->debugfs_wakelock_held = false;
- gxp_pm_update_requested_power_states(gxp, AUR_UUD, true,
- AUR_OFF, true,
- AUR_MEM_UNDEFINED,
- AUR_MEM_UNDEFINED);
+ gxp_pm_update_requested_power_states(gxp, uud_states,
+ off_states);
}
out:
@@ -321,29 +352,24 @@ static int gxp_log_buff_set(void *data, u64 val)
{
struct gxp_dev *gxp = (struct gxp_dev *)data;
int i;
- u64 **buffers;
+ struct gxp_coherent_buf *buffers;
u64 *ptr;
- mutex_lock(&gxp->telemetry_mgr->lock);
+ mutex_lock(&gxp->core_telemetry_mgr->lock);
- if (!gxp->telemetry_mgr->logging_buff_data) {
- dev_err(gxp->dev, "%s: Logging buffer has not been created\n",
- __func__);
- mutex_unlock(&gxp->telemetry_mgr->lock);
+ if (!gxp->core_telemetry_mgr->logging_buff_data_legacy) {
+ dev_err(gxp->dev, "Logging buffer has not been created");
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
return -ENODEV;
}
- buffers = (u64 **)gxp->telemetry_mgr->logging_buff_data->buffers;
+ buffers = gxp->core_telemetry_mgr->logging_buff_data_legacy->buffers;
for (i = 0; i < GXP_NUM_CORES; i++) {
- ptr = buffers[i];
+ ptr = buffers[i].vaddr;
*ptr = val;
}
- dev_dbg(gxp->dev,
- "%s: log buff first bytes: [0] = %llu, [1] = %llu, [2] = %llu, [3] = %llu (val=%llu)\n",
- __func__, *buffers[0], *buffers[1], *buffers[2], *buffers[3],
- val);
- mutex_unlock(&gxp->telemetry_mgr->lock);
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
return 0;
}
@@ -351,25 +377,21 @@ static int gxp_log_buff_set(void *data, u64 val)
static int gxp_log_buff_get(void *data, u64 *val)
{
struct gxp_dev *gxp = (struct gxp_dev *)data;
- u64 **buffers;
+ struct gxp_coherent_buf *buffers;
- mutex_lock(&gxp->telemetry_mgr->lock);
+ mutex_lock(&gxp->core_telemetry_mgr->lock);
- if (!gxp->telemetry_mgr->logging_buff_data) {
- dev_err(gxp->dev, "%s: Logging buffer has not been created\n",
- __func__);
- mutex_unlock(&gxp->telemetry_mgr->lock);
+ if (!gxp->core_telemetry_mgr->logging_buff_data_legacy) {
+ dev_err(gxp->dev, "Logging buffer has not been created");
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
return -ENODEV;
}
- buffers = (u64 **)gxp->telemetry_mgr->logging_buff_data->buffers;
- dev_dbg(gxp->dev,
- "%s: log buff first bytes: [0] = %llu, [1] = %llu, [2] = %llu, [3] = %llu\n",
- __func__, *buffers[0], *buffers[1], *buffers[2], *buffers[3]);
+ buffers = gxp->core_telemetry_mgr->logging_buff_data_legacy->buffers;
- *val = *buffers[0];
+ *val = *(u64 *)(buffers[0].vaddr);
- mutex_unlock(&gxp->telemetry_mgr->lock);
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
return 0;
}
@@ -382,17 +404,17 @@ static int gxp_log_eventfd_signal_set(void *data, u64 val)
struct gxp_dev *gxp = (struct gxp_dev *)data;
int ret = 0;
- mutex_lock(&gxp->telemetry_mgr->lock);
+ mutex_lock(&gxp->core_telemetry_mgr->lock);
- if (!gxp->telemetry_mgr->logging_efd) {
+ if (!gxp->core_telemetry_mgr->logging_efd) {
ret = -ENODEV;
goto out;
}
- ret = eventfd_signal(gxp->telemetry_mgr->logging_efd, 1);
+ ret = eventfd_signal(gxp->core_telemetry_mgr->logging_efd, 1);
out:
- mutex_unlock(&gxp->telemetry_mgr->lock);
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
return ret;
}
@@ -400,37 +422,33 @@ out:
DEFINE_DEBUGFS_ATTRIBUTE(gxp_log_eventfd_signal_fops, NULL,
gxp_log_eventfd_signal_set, "%llu\n");
-/* TODO: Remove these mux entry once experiment is done */
static int gxp_cmu_mux1_set(void *data, u64 val)
{
struct gxp_dev *gxp = (struct gxp_dev *)data;
- void *addr;
+ if (IS_ERR_OR_NULL(gxp->cmu.vaddr)) {
+ dev_err(gxp->dev, "CMU registers are not mapped");
+ return -ENODEV;
+ }
if (val > 1) {
- dev_err(gxp->dev, "Incorrect val for cmu_mux1, only 0 and 1 allowed\n");
+ dev_err(gxp->dev,
+ "Incorrect val for cmu_mux1, only 0 and 1 allowed\n");
return -EINVAL;
}
- addr = ioremap(gxp->regs.paddr - GXP_CMU_OFFSET, 0x1000);
-
- if (!addr) {
- dev_err(gxp->dev, "Cannot map CMU1 address\n");
- return -EIO;
- }
-
- writel(val << 4, addr + PLL_CON0_PLL_AUR);
- iounmap(addr);
+ writel(val << 4, gxp->cmu.vaddr + PLL_CON0_PLL_AUR);
return 0;
}
static int gxp_cmu_mux1_get(void *data, u64 *val)
{
struct gxp_dev *gxp = (struct gxp_dev *)data;
- void *addr;
- addr = ioremap(gxp->regs.paddr - GXP_CMU_OFFSET, 0x1000);
- *val = readl(addr + PLL_CON0_PLL_AUR);
- iounmap(addr);
+ if (IS_ERR_OR_NULL(gxp->cmu.vaddr)) {
+ dev_err(gxp->dev, "CMU registers are not mapped");
+ return -ENODEV;
+ }
+ *val = readl(gxp->cmu.vaddr + PLL_CON0_PLL_AUR);
return 0;
}
@@ -440,33 +458,30 @@ DEFINE_DEBUGFS_ATTRIBUTE(gxp_cmu_mux1_fops, gxp_cmu_mux1_get, gxp_cmu_mux1_set,
static int gxp_cmu_mux2_set(void *data, u64 val)
{
struct gxp_dev *gxp = (struct gxp_dev *)data;
- void *addr;
+ if (IS_ERR_OR_NULL(gxp->cmu.vaddr)) {
+ dev_err(gxp->dev, "CMU registers are not mapped");
+ return -ENODEV;
+ }
if (val > 1) {
- dev_err(gxp->dev, "Incorrect val for cmu_mux2, only 0 and 1 allowed\n");
+ dev_err(gxp->dev,
+ "Incorrect val for cmu_mux2, only 0 and 1 allowed\n");
return -EINVAL;
}
- addr = ioremap(gxp->regs.paddr - GXP_CMU_OFFSET, 0x1000);
-
- if (!addr) {
- dev_err(gxp->dev, "Cannot map CMU2 address\n");
- return -EIO;
- }
-
- writel(val << 4, addr + PLL_CON0_NOC_USER);
- iounmap(addr);
+ writel(val << 4, gxp->cmu.vaddr + PLL_CON0_NOC_USER);
return 0;
}
static int gxp_cmu_mux2_get(void *data, u64 *val)
{
struct gxp_dev *gxp = (struct gxp_dev *)data;
- void *addr;
- addr = ioremap(gxp->regs.paddr - GXP_CMU_OFFSET, 0x1000);
- *val = readl(addr + 0x610);
- iounmap(addr);
+ if (IS_ERR_OR_NULL(gxp->cmu.vaddr)) {
+ dev_err(gxp->dev, "CMU registers are not mapped");
+ return -ENODEV;
+ }
+ *val = readl(gxp->cmu.vaddr + PLL_CON0_NOC_USER);
return 0;
}
@@ -505,6 +520,8 @@ void gxp_create_debugfs(struct gxp_dev *gxp)
void gxp_remove_debugfs(struct gxp_dev *gxp)
{
+ if (IS_GXP_TEST && !gxp->d_entry)
+ return;
debugfs_remove_recursive(gxp->d_entry);
/*
diff --git a/gxp-dma-iommu.c b/gxp-dma-iommu.c
index 97322f5..1480761 100644
--- a/gxp-dma-iommu.c
+++ b/gxp-dma-iommu.c
@@ -5,6 +5,7 @@
* Copyright (C) 2021 Google LLC
*/
+#include <linux/bits.h>
#include <linux/dma-iommu.h>
#include <linux/dma-mapping.h>
#include <linux/iommu.h>
@@ -14,16 +15,16 @@
#include "gxp-config.h"
#include "gxp-dma.h"
-#include "gxp-iova.h"
+#include "gxp-mailbox.h"
#include "gxp-mapping.h"
#include "gxp-pm.h"
-#include "gxp-vd.h"
+#include "gxp-ssmt.h"
+#include "gxp.h"
struct gxp_dma_iommu_manager {
struct gxp_dma_manager dma_mgr;
- struct iommu_domain *default_domain;
- void __iomem *idma_ssmt_base;
- void __iomem *inst_data_ssmt_base;
+ struct gxp_iommu_domain *default_domain;
+ struct gxp_ssmt ssmt;
};
/**
@@ -40,7 +41,13 @@ struct gxp_dma_iommu_manager {
static int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
unsigned long attrs)
{
- int prot = coherent ? IOMMU_CACHE : 0;
+ int prot = 0;
+
+ if (coherent) {
+#ifdef GXP_IS_DMA_COHERENT
+ prot = IOMMU_CACHE;
+#endif
+ }
if (attrs & DMA_ATTR_PRIVILEGED)
prot |= IOMMU_PRIV;
@@ -56,81 +63,29 @@ static int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
}
}
-/* SSMT handling */
-
-#define INST_SID_FOR_CORE(_x_) ((1 << 6) | ((_x_) << 4) | (0 << 3))
-#define DATA_SID_FOR_CORE(_x_) ((1 << 6) | ((_x_) << 4) | (1 << 3))
-#define IDMA_SID_FOR_CORE(_x_) ((1 << 6) | ((_x_) << 4))
-
-static inline void ssmt_set_vid_for_sid(void __iomem *ssmt, int vid, u8 sid)
+static int map_flags_to_iommu_prot(enum dma_data_direction dir,
+ unsigned long attrs, u32 gxp_dma_flags)
{
- /* NS_READ_STREAM_VID_<sid> */
- writel(vid, (ssmt) + 0x1000u + (0x4u * (sid)));
- /* NS_WRITE_STREAM_VID_<sid> */
- writel(vid, (ssmt) + 0x1200u + (0x4u * (sid)));
+ bool coherent = gxp_dma_flags & GXP_MAP_COHERENT ? 1 : 0;
+
+ return dma_info_to_prot(dir, coherent, attrs);
}
static int gxp_dma_ssmt_program(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd, uint virt_core,
- uint core)
+ struct iommu_domain *domain, uint core_list)
{
-/* SSMT is not supported in unittests */
-#ifndef CONFIG_GXP_TEST
struct gxp_dma_iommu_manager *mgr = container_of(
gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr);
- int core_vid;
-
- core_vid = iommu_aux_get_pasid(vd->core_domains[virt_core], gxp->dev);
- dev_dbg(gxp->dev, "SysMMU: core%u assigned vid %d\n", core,
- core_vid);
- ssmt_set_vid_for_sid(mgr->idma_ssmt_base, core_vid,
- IDMA_SID_FOR_CORE(core));
- ssmt_set_vid_for_sid(mgr->inst_data_ssmt_base, core_vid,
- INST_SID_FOR_CORE(core));
- ssmt_set_vid_for_sid(mgr->inst_data_ssmt_base, core_vid,
- DATA_SID_FOR_CORE(core));
-#endif
- return 0;
-}
-
-
-static inline int ssmt_init(struct gxp_dev *gxp,
- struct gxp_dma_iommu_manager *mgr)
-{
- struct platform_device *pdev =
- container_of(gxp->dev, struct platform_device, dev);
- struct resource *r;
-
- r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ssmt_idma");
- if (!r) {
- dev_err(gxp->dev, "Failed to find IDMA SSMT register base\n");
- return -EINVAL;
- }
-
- mgr->idma_ssmt_base = devm_ioremap_resource(gxp->dev, r);
- if (IS_ERR(mgr->idma_ssmt_base)) {
- dev_err(gxp->dev,
- "Failed to map IDMA SSMT register base (%ld)\n",
- PTR_ERR(mgr->idma_ssmt_base));
- return PTR_ERR(mgr->idma_ssmt_base);
- }
-
- r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "ssmt_inst_data");
- if (!r) {
- dev_err(gxp->dev,
- "Failed to find instruction/data SSMT register base\n");
- return -EINVAL;
- }
-
- mgr->inst_data_ssmt_base = devm_ioremap_resource(gxp->dev, r);
- if (IS_ERR(mgr->inst_data_ssmt_base)) {
- dev_err(gxp->dev,
- "Failed to map instruction/data SSMT register base (%ld)\n",
- PTR_ERR(mgr->inst_data_ssmt_base));
- return PTR_ERR(mgr->inst_data_ssmt_base);
- }
-
+ int pasid;
+ uint core;
+
+ pasid = iommu_aux_get_pasid(domain, gxp->dev);
+ for (core = 0; core < GXP_NUM_CORES; core++)
+ if (BIT(core) & core_list) {
+ dev_dbg(gxp->dev, "Assign core%u to PASID %d\n", core,
+ pasid);
+ gxp_ssmt_set_core_vid(&mgr->ssmt, core, pasid);
+ }
return 0;
}
@@ -170,8 +125,100 @@ static int sysmmu_fault_handler(struct iommu_fault *fault, void *token)
return -EAGAIN;
}
+#if GXP_HAS_LAP
+
+/* No need to map CSRs when local access path exists. */
+
+#define gxp_map_csrs(...) 0
+#define gxp_unmap_csrs(...)
+
+#else /* !GXP_HAS_LAP */
+
+#define SYNC_BARRIERS_SIZE 0x100000
+
+static int gxp_map_csrs(struct gxp_dev *gxp, struct iommu_domain *domain,
+ struct gxp_mapped_resource *regs)
+{
+ int ret = iommu_map(domain, GXP_IOVA_AURORA_TOP, gxp->regs.paddr,
+ gxp->regs.size, IOMMU_READ | IOMMU_WRITE);
+ if (ret)
+ return ret;
+ /*
+ * Firmware expects to access the sync barriers at a separate
+ * address, lower than the rest of the AURORA_TOP registers.
+ */
+ ret = iommu_map(domain, GXP_IOVA_SYNC_BARRIERS,
+ gxp->regs.paddr + GXP_IOVA_SYNC_BARRIERS,
+ SYNC_BARRIERS_SIZE, IOMMU_READ | IOMMU_WRITE);
+ if (ret) {
+ iommu_unmap(domain, GXP_IOVA_AURORA_TOP, gxp->regs.size);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void gxp_unmap_csrs(struct gxp_dev *gxp, struct iommu_domain *domain,
+ struct gxp_mapped_resource *regs)
+{
+ iommu_unmap(domain, GXP_IOVA_SYNC_BARRIERS, SYNC_BARRIERS_SIZE);
+ iommu_unmap(domain, GXP_IOVA_AURORA_TOP, gxp->regs.size);
+}
+
+#endif /* GXP_HAS_LAP */
+
+/* Maps the shared buffer region to @domain. */
+static int gxp_map_core_shared_buffer(struct gxp_dev *gxp,
+ struct iommu_domain *domain,
+ u8 slice_index)
+{
+ size_t shared_size = gxp->shared_slice_size;
+
+ if (!gxp->shared_buf.paddr)
+ return 0;
+ return iommu_map(domain, gxp->shared_buf.daddr,
+ gxp->shared_buf.paddr + shared_size * slice_index,
+ shared_size, IOMMU_READ | IOMMU_WRITE);
+}
+
+/* Reverts gxp_map_core_shared_buffer. */
+static void gxp_unmap_core_shared_buffer(struct gxp_dev *gxp,
+ struct iommu_domain *domain)
+{
+ size_t shared_size = gxp->shared_slice_size;
+
+ if (!gxp->shared_buf.paddr)
+ return;
+ iommu_unmap(domain, gxp->shared_buf.daddr, shared_size);
+}
+
/* gxp-dma.h Interface */
+uint gxp_iommu_aux_get_pasid(struct gxp_dev *gxp,
+ struct gxp_iommu_domain *gdomain)
+{
+ return iommu_aux_get_pasid(gdomain->domain, gxp->dev);
+}
+
+struct gxp_iommu_domain *gxp_iommu_get_domain_for_dev(struct gxp_dev *gxp)
+{
+ struct gxp_iommu_domain *gdomain = gxp->default_domain;
+
+ if (IS_ERR_OR_NULL(gdomain)) {
+ gdomain = devm_kzalloc(gxp->dev, sizeof(*gdomain), GFP_KERNEL);
+ if (!gdomain)
+ return ERR_PTR(-ENOMEM);
+ gdomain->domain = iommu_get_domain_for_dev(gxp->dev);
+ if (!gdomain->domain) {
+ devm_kfree(gxp->dev, gdomain);
+ return ERR_PTR(-ENOMEM);
+ }
+ gxp->default_domain = gdomain;
+ }
+
+ return gdomain;
+}
+
int gxp_dma_init(struct gxp_dev *gxp)
{
struct gxp_dma_iommu_manager *mgr;
@@ -188,20 +235,16 @@ int gxp_dma_init(struct gxp_dev *gxp)
if (!mgr)
return -ENOMEM;
-/* TODO(b/201505925): remove this and prepare a of_node in unittests */
-/* SSMT is not supported in unittests */
-#ifndef CONFIG_GXP_TEST
- ret = ssmt_init(gxp, mgr);
+ ret = gxp_ssmt_init(gxp, &mgr->ssmt);
if (ret) {
dev_err(gxp->dev, "Failed to find SSMT\n");
return ret;
}
-#endif
- mgr->default_domain = iommu_get_domain_for_dev(gxp->dev);
- if (!mgr->default_domain) {
+ mgr->default_domain = gxp_iommu_get_domain_for_dev(gxp);
+ if (IS_ERR(mgr->default_domain)) {
dev_err(gxp->dev, "Failed to find default IOMMU domain\n");
- return -EIO;
+ return PTR_ERR(mgr->default_domain);
}
if (iommu_register_device_fault_handler(gxp->dev, sysmmu_fault_handler,
@@ -216,8 +259,14 @@ int gxp_dma_init(struct gxp_dev *gxp)
goto err_unreg_fault_handler;
}
+#if IS_ENABLED(CONFIG_ANDROID)
/* Enable best fit algorithm to minimize fragmentation */
- iommu_dma_enable_best_fit_algo(gxp->dev);
+ ret = iommu_dma_enable_best_fit_algo(gxp->dev);
+ if (ret)
+ dev_warn(gxp->dev,
+ "Failed to enable best-fit IOVA allocator (%d)\n",
+ ret);
+#endif
gxp->dma_mgr = &(mgr->dma_mgr);
@@ -238,95 +287,91 @@ void gxp_dma_exit(struct gxp_dev *gxp)
"Failed to unregister SysMMU fault handler\n");
}
-#define SYNC_BARRIERS_SIZE 0x100000
-#define SYNC_BARRIERS_TOP_OFFSET 0x100000
-#define EXT_TPU_MBX_SIZE 0x2000
-
-/* Offset from mailbox base to the device interface that needs to be mapped */
-#define MAILBOX_DEVICE_INTERFACE_OFFSET 0x10000
+#define EXT_TPU_MBX_SIZE 0x2000
void gxp_dma_init_default_resources(struct gxp_dev *gxp)
{
unsigned int core;
+ int i;
- for (core = 0; core < GXP_NUM_CORES; core++) {
- gxp->mbx[core].daddr = GXP_IOVA_MAILBOX(core);
+ for (i = 0; i < GXP_NUM_MAILBOXES; i++)
+ gxp->mbx[i].daddr = GXP_IOVA_MAILBOX(i);
+ for (core = 0; core < GXP_NUM_CORES; core++)
gxp->fwbufs[core].daddr = GXP_IOVA_FIRMWARE(core);
- }
- gxp->regs.daddr = GXP_IOVA_AURORA_TOP;
gxp->fwdatabuf.daddr = GXP_IOVA_FW_DATA;
}
int gxp_dma_domain_attach_device(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd, uint virt_core,
- uint core)
+ struct gxp_iommu_domain *gdomain,
+ uint core_list)
{
int ret;
- ret = iommu_aux_attach_device(vd->core_domains[virt_core], gxp->dev);
+ ret = iommu_aux_attach_device(gdomain->domain, gxp->dev);
if (ret)
goto out;
- gxp_dma_ssmt_program(gxp, vd, virt_core, core);
+ gxp_dma_ssmt_program(gxp, gdomain->domain, core_list);
out:
return ret;
}
void gxp_dma_domain_detach_device(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd, uint virt_core)
+ struct gxp_iommu_domain *gdomain)
{
- iommu_aux_detach_device(vd->core_domains[virt_core], gxp->dev);
+ iommu_aux_detach_device(gdomain->domain, gxp->dev);
}
int gxp_dma_map_core_resources(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd, uint virt_core,
- uint core)
+ struct gxp_iommu_domain *gdomain, uint core_list,
+ u8 slice_index)
{
int ret;
+ uint i;
+ struct iommu_domain *domain = gdomain->domain;
- ret = iommu_map(vd->core_domains[virt_core], gxp->regs.daddr,
- gxp->regs.paddr, gxp->regs.size,
- IOMMU_READ | IOMMU_WRITE);
- if (ret)
- goto err;
- /*
- * Firmware expects to access the sync barriers at a separate
- * address, lower than the rest of the AURORA_TOP registers.
- */
- ret = iommu_map(vd->core_domains[virt_core], GXP_IOVA_SYNC_BARRIERS,
- gxp->regs.paddr + SYNC_BARRIERS_TOP_OFFSET,
- SYNC_BARRIERS_SIZE, IOMMU_READ | IOMMU_WRITE);
- if (ret)
- goto err;
- ret = iommu_map(vd->core_domains[virt_core], gxp->mbx[core].daddr,
- gxp->mbx[core].paddr + MAILBOX_DEVICE_INTERFACE_OFFSET,
- gxp->mbx[core].size, IOMMU_READ | IOMMU_WRITE);
+ ret = gxp_map_csrs(gxp, domain, &gxp->regs);
if (ret)
goto err;
+
+ for (i = 0; i < GXP_NUM_CORES; i++) {
+ if (!(BIT(i) & core_list))
+ continue;
+ ret = iommu_map(domain, gxp->mbx[i].daddr,
+ gxp->mbx[i].paddr +
+ MAILBOX_DEVICE_INTERFACE_OFFSET,
+ gxp->mbx[i].size, IOMMU_READ | IOMMU_WRITE);
+ if (ret)
+ goto err;
+ }
/*
* TODO(b/202213606): Map FW regions of all cores in a VD for
* each other at VD creation.
*/
- ret = iommu_map(vd->core_domains[virt_core], gxp->fwbufs[0].daddr,
- gxp->fwbufs[0].paddr,
+ ret = iommu_map(domain, gxp->fwbufs[0].daddr, gxp->fwbufs[0].paddr,
gxp->fwbufs[0].size * GXP_NUM_CORES,
IOMMU_READ | IOMMU_WRITE);
if (ret)
goto err;
- ret = iommu_map(vd->core_domains[virt_core], gxp->fwdatabuf.daddr,
- gxp->fwdatabuf.paddr, gxp->fwdatabuf.size,
- IOMMU_READ | IOMMU_WRITE);
+ ret = iommu_map(domain, gxp->fwdatabuf.daddr, gxp->fwdatabuf.paddr,
+ gxp->fwdatabuf.size, IOMMU_READ | IOMMU_WRITE);
+ if (ret)
+ goto err;
+ ret = gxp_map_core_shared_buffer(gxp, domain, slice_index);
if (ret)
goto err;
/* Only map the TPU mailboxes if they were found on probe */
if (gxp->tpu_dev.mbx_paddr) {
- ret = iommu_map(
- vd->core_domains[virt_core],
- GXP_IOVA_EXT_TPU_MBX + core * EXT_TPU_MBX_SIZE,
- gxp->tpu_dev.mbx_paddr +
- core * EXT_TPU_MBX_SIZE,
- EXT_TPU_MBX_SIZE, IOMMU_READ | IOMMU_WRITE);
- if (ret)
- goto err;
+ for (i = 0; i < GXP_NUM_CORES; i++) {
+ if (!(BIT(i) & core_list))
+ continue;
+ ret = iommu_map(
+ domain,
+ GXP_IOVA_EXT_TPU_MBX + i * EXT_TPU_MBX_SIZE,
+ gxp->tpu_dev.mbx_paddr + i * EXT_TPU_MBX_SIZE,
+ EXT_TPU_MBX_SIZE, IOMMU_READ | IOMMU_WRITE);
+ if (ret)
+ goto err;
+ }
}
return ret;
@@ -336,42 +381,47 @@ err:
* Any resource that hadn't been mapped yet will cause `iommu_unmap()`
* to return immediately, so its safe to try to unmap everything.
*/
- gxp_dma_unmap_core_resources(gxp, vd, virt_core, core);
+ gxp_dma_unmap_core_resources(gxp, gdomain, core_list);
return ret;
}
void gxp_dma_unmap_core_resources(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd, uint virt_core,
- uint core)
+ struct gxp_iommu_domain *gdomain,
+ uint core_list)
{
+ uint i;
+ struct iommu_domain *domain = gdomain->domain;
+
/* Only unmap the TPU mailboxes if they were found on probe */
if (gxp->tpu_dev.mbx_paddr) {
- iommu_unmap(vd->core_domains[virt_core],
- GXP_IOVA_EXT_TPU_MBX +
- core * EXT_TPU_MBX_SIZE,
- EXT_TPU_MBX_SIZE);
+ for (i = 0; i < GXP_NUM_CORES; i++) {
+ if (!(BIT(i) & core_list))
+ continue;
+ iommu_unmap(domain,
+ GXP_IOVA_EXT_TPU_MBX + i * EXT_TPU_MBX_SIZE,
+ EXT_TPU_MBX_SIZE);
+ }
}
- iommu_unmap(vd->core_domains[virt_core], gxp->fwdatabuf.daddr,
- gxp->fwdatabuf.size);
+ gxp_unmap_core_shared_buffer(gxp, domain);
+ iommu_unmap(domain, gxp->fwdatabuf.daddr, gxp->fwdatabuf.size);
/*
* TODO(b/202213606): A core should only have access to the FW
* of other cores if they're in the same VD, and have the FW
* region unmapped on VD destruction.
*/
- iommu_unmap(vd->core_domains[virt_core], gxp->fwbufs[0].daddr,
+ iommu_unmap(domain, gxp->fwbufs[0].daddr,
gxp->fwbufs[0].size * GXP_NUM_CORES);
- iommu_unmap(vd->core_domains[virt_core], gxp->mbx[core].daddr,
- gxp->mbx[core].size);
- iommu_unmap(vd->core_domains[virt_core], GXP_IOVA_SYNC_BARRIERS,
- SYNC_BARRIERS_SIZE);
- iommu_unmap(vd->core_domains[virt_core], gxp->regs.daddr,
- gxp->regs.size);
+ for (i = 0; i < GXP_NUM_CORES; i++) {
+ if (!(BIT(i) & core_list))
+ continue;
+ iommu_unmap(domain, gxp->mbx[i].daddr, gxp->mbx[i].size);
+ }
+ gxp_unmap_csrs(gxp, domain, &gxp->regs);
}
-static inline struct sg_table *
-alloc_sgt_for_buffer(void *ptr, size_t size,
- struct iommu_domain *domain,
- dma_addr_t daddr)
+static inline struct sg_table *alloc_sgt_for_buffer(void *ptr, size_t size,
+ struct iommu_domain *domain,
+ dma_addr_t daddr)
{
struct sg_table *sgt;
ulong offset;
@@ -409,7 +459,7 @@ alloc_sgt_for_buffer(void *ptr, size_t size,
*/
size_in_page = size > (PAGE_SIZE - offset_in_page(ptr)) ?
PAGE_SIZE - offset_in_page(ptr) :
- size;
+ size;
page = phys_to_page(iommu_iova_to_phys(domain, daddr));
sg_set_page(next, page, size_in_page, offset_in_page(ptr));
size -= size_in_page;
@@ -437,136 +487,114 @@ alloc_sgt_for_buffer(void *ptr, size_t size,
return sgt;
}
-#if (IS_ENABLED(CONFIG_GXP_TEST) || IS_ENABLED(CONFIG_ANDROID)) && !IS_ENABLED(CONFIG_GXP_GEM5)
-int gxp_dma_map_tpu_buffer(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
- uint virt_core_list, uint core_list,
+#if (IS_ENABLED(CONFIG_GXP_TEST) || IS_ENABLED(CONFIG_ANDROID)) && \
+ !IS_ENABLED(CONFIG_GXP_GEM5)
+int gxp_dma_map_tpu_buffer(struct gxp_dev *gxp,
+ struct gxp_iommu_domain *gdomain, uint core_list,
struct edgetpu_ext_mailbox_info *mbx_info)
{
- uint orig_virt_core_list = virt_core_list;
+ uint orig_core_list = core_list;
u64 queue_iova;
- uint virt_core;
int core;
int ret;
int i = 0;
+ struct iommu_domain *domain = gdomain->domain;
- while (virt_core_list) {
+ while (core_list) {
phys_addr_t cmdq_pa = mbx_info->mailboxes[i].cmdq_pa;
phys_addr_t respq_pa = mbx_info->mailboxes[i++].respq_pa;
- virt_core = ffs(virt_core_list) - 1;
- virt_core_list &= ~BIT(virt_core);
core = ffs(core_list) - 1;
- core_list &= ~BIT(core);
queue_iova = GXP_IOVA_TPU_MBX_BUFFER(core);
- ret = iommu_map(vd->core_domains[virt_core], queue_iova,
- cmdq_pa, mbx_info->cmdq_size, IOMMU_WRITE);
+ ret = iommu_map(domain, queue_iova, cmdq_pa,
+ mbx_info->cmdq_size, IOMMU_WRITE);
if (ret)
goto error;
- ret = iommu_map(vd->core_domains[virt_core],
- queue_iova + mbx_info->cmdq_size, respq_pa,
- mbx_info->respq_size, IOMMU_READ);
+ ret = iommu_map(domain, queue_iova + mbx_info->cmdq_size,
+ respq_pa, mbx_info->respq_size, IOMMU_READ);
if (ret) {
- iommu_unmap(vd->core_domains[virt_core], queue_iova,
- mbx_info->cmdq_size);
+ iommu_unmap(domain, queue_iova, mbx_info->cmdq_size);
goto error;
}
+ core_list &= ~BIT(core);
}
return 0;
error:
- virt_core_list ^= orig_virt_core_list;
- while (virt_core_list) {
- virt_core = ffs(virt_core_list) - 1;
- virt_core_list &= ~BIT(virt_core);
+ core_list ^= orig_core_list;
+ while (core_list) {
core = ffs(core_list) - 1;
core_list &= ~BIT(core);
queue_iova = GXP_IOVA_TPU_MBX_BUFFER(core);
- iommu_unmap(vd->core_domains[virt_core], queue_iova,
- mbx_info->cmdq_size);
- iommu_unmap(vd->core_domains[virt_core], queue_iova +
- mbx_info->cmdq_size, mbx_info->respq_size);
+ iommu_unmap(domain, queue_iova, mbx_info->cmdq_size);
+ iommu_unmap(domain, queue_iova + mbx_info->cmdq_size,
+ mbx_info->respq_size);
}
return ret;
}
void gxp_dma_unmap_tpu_buffer(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd,
+ struct gxp_iommu_domain *gdomain,
struct gxp_tpu_mbx_desc mbx_desc)
{
- uint virt_core_list = mbx_desc.virt_core_list;
uint core_list = mbx_desc.phys_core_list;
u64 queue_iova;
int core;
- uint virt_core;
+ struct iommu_domain *domain = gdomain->domain;
- while (virt_core_list) {
- virt_core = ffs(virt_core_list) - 1;
- virt_core_list &= ~BIT(virt_core);
+ while (core_list) {
core = ffs(core_list) - 1;
core_list &= ~BIT(core);
queue_iova = GXP_IOVA_TPU_MBX_BUFFER(core);
- iommu_unmap(vd->core_domains[virt_core], queue_iova,
- mbx_desc.cmdq_size);
- iommu_unmap(vd->core_domains[virt_core], queue_iova +
- mbx_desc.cmdq_size, mbx_desc.respq_size);
+ iommu_unmap(domain, queue_iova, mbx_desc.cmdq_size);
+ iommu_unmap(domain, queue_iova + mbx_desc.cmdq_size,
+ mbx_desc.respq_size);
}
}
-#endif // (CONFIG_GXP_TEST || CONFIG_ANDROID) && !CONFIG_GXP_GEM5
+#endif // (CONFIG_GXP_TEST || CONFIG_ANDROID) && !CONFIG_GXP_GEM5
-int gxp_dma_map_allocated_coherent_buffer(struct gxp_dev *gxp, void *buf,
- struct gxp_virtual_device *vd,
- uint virt_core_list, size_t size,
- dma_addr_t dma_handle,
+int gxp_dma_map_allocated_coherent_buffer(struct gxp_dev *gxp,
+ struct gxp_coherent_buf *buf,
+ struct gxp_iommu_domain *gdomain,
uint gxp_dma_flags)
{
struct gxp_dma_iommu_manager *mgr = container_of(
gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr);
struct sg_table *sgt;
- int virt_core;
ssize_t size_mapped;
+ int ret = 0;
+ size_t size;
+ struct iommu_domain *domain = gdomain->domain;
- size = size < PAGE_SIZE ? PAGE_SIZE : size;
- sgt = alloc_sgt_for_buffer(buf, size, mgr->default_domain, dma_handle);
+ size = buf->size;
+ sgt = alloc_sgt_for_buffer(buf->vaddr, buf->size,
+ mgr->default_domain->domain, buf->dma_addr);
if (IS_ERR(sgt)) {
dev_err(gxp->dev,
"Failed to allocate sgt for coherent buffer\n");
- return -ENOMEM;
- }
-
- /* Create identical mappings in the specified cores' domains */
- for (virt_core = 0; virt_core < vd->num_cores; virt_core++) {
- if (!(virt_core_list & BIT(virt_core)))
- continue;
- /*
- * In Linux 5.15 and beyond, `iommu_map_sg()` returns a
- * `ssize_t` to encode errors that earlier versions throw out.
- * Explicitly cast here for backwards compatibility.
- */
- size_mapped = (ssize_t)iommu_map_sg(vd->core_domains[virt_core],
- dma_handle, sgt->sgl,
- sgt->orig_nents,
- IOMMU_READ | IOMMU_WRITE);
- if (size_mapped != size)
- goto err;
+ return PTR_ERR(sgt);
}
- sg_free_table(sgt);
- kfree(sgt);
- return 0;
-
-err:
- for (virt_core -= 1; virt_core >= 0; virt_core--)
- iommu_unmap(vd->core_domains[virt_core], dma_handle, size);
+ /*
+ * In Linux 5.15 and beyond, `iommu_map_sg()` returns a
+ * `ssize_t` to encode errors that earlier versions throw out.
+ * Explicitly cast here for backwards compatibility.
+ */
+ size_mapped = (ssize_t)iommu_map_sg(domain, buf->dma_addr, sgt->sgl,
+ sgt->orig_nents,
+ IOMMU_READ | IOMMU_WRITE);
+ if (size_mapped != size)
+ ret = size_mapped < 0 ? -EINVAL : (int)size_mapped;
sg_free_table(sgt);
kfree(sgt);
- return -EINVAL;
+ return ret;
}
-void *gxp_dma_alloc_coherent(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
- uint virt_core_list, size_t size,
- dma_addr_t *dma_handle, gfp_t flag,
- uint gxp_dma_flags)
+int gxp_dma_alloc_coherent_buf(struct gxp_dev *gxp,
+ struct gxp_iommu_domain *gdomain, size_t size,
+ gfp_t flag, uint gxp_dma_flags,
+ struct gxp_coherent_buf *buffer)
{
void *buf;
dma_addr_t daddr;
@@ -578,226 +606,55 @@ void *gxp_dma_alloc_coherent(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
buf = dma_alloc_coherent(gxp->dev, size, &daddr, flag);
if (!buf) {
dev_err(gxp->dev, "Failed to allocate coherent buffer\n");
- return NULL;
+ return -ENOMEM;
}
- if (vd != NULL) {
- ret = gxp_dma_map_allocated_coherent_buffer(gxp, buf, vd,
- virt_core_list,
- size, daddr,
- gxp_dma_flags);
+
+ buffer->vaddr = buf;
+ buffer->size = size;
+ buffer->dma_addr = daddr;
+
+ if (gdomain != NULL) {
+ ret = gxp_dma_map_allocated_coherent_buffer(
+ gxp, buffer, gdomain, gxp_dma_flags);
if (ret) {
+ buffer->vaddr = NULL;
+ buffer->size = 0;
dma_free_coherent(gxp->dev, size, buf, daddr);
- return NULL;
+ return ret;
}
}
- if (dma_handle)
- *dma_handle = daddr;
+ buffer->dsp_addr = daddr;
- return buf;
+ return 0;
}
void gxp_dma_unmap_allocated_coherent_buffer(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd,
- uint virt_core_list, size_t size,
- dma_addr_t dma_handle)
-{
- int virt_core;
-
- size = size < PAGE_SIZE ? PAGE_SIZE : size;
-
- for (virt_core = 0; virt_core < vd->num_cores; virt_core++) {
- if (!(virt_core_list & BIT(virt_core)))
- continue;
- if (size !=
- iommu_unmap(vd->core_domains[virt_core], dma_handle, size))
- dev_warn(gxp->dev, "Failed to unmap coherent buffer\n");
- }
-}
-
-void gxp_dma_free_coherent(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
- uint virt_core_list, size_t size, void *cpu_addr,
- dma_addr_t dma_handle)
-{
- if (vd != NULL)
- gxp_dma_unmap_allocated_coherent_buffer(gxp, vd, virt_core_list,
- size, dma_handle);
- dma_free_coherent(gxp->dev, size, cpu_addr, dma_handle);
-}
-
-dma_addr_t gxp_dma_map_single(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd,
- uint virt_core_list, void *cpu_addr, size_t size,
- enum dma_data_direction direction,
- unsigned long attrs, uint gxp_dma_flags)
-{
- struct gxp_dma_iommu_manager *mgr = container_of(
- gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr);
- dma_addr_t daddr;
- phys_addr_t paddr;
- int prot = dma_info_to_prot(direction, 0, attrs);
- int virt_core;
-
- daddr = dma_map_single_attrs(gxp->dev, cpu_addr, size, direction,
- attrs);
- if (dma_mapping_error(gxp->dev, daddr))
- return DMA_MAPPING_ERROR;
-
- paddr = iommu_iova_to_phys(mgr->default_domain, daddr);
- for (virt_core = 0; virt_core < vd->num_cores; virt_core++) {
- if (!(virt_core_list & BIT(virt_core)))
- continue;
- if (iommu_map(vd->core_domains[virt_core], daddr, paddr, size,
- prot))
- goto err;
- }
-
- return daddr;
-
-err:
- for (virt_core -= 1; virt_core >= 0; virt_core--)
- iommu_unmap(vd->core_domains[virt_core], daddr, size);
- dma_unmap_single_attrs(gxp->dev, daddr, size, direction,
- DMA_ATTR_SKIP_CPU_SYNC);
- return DMA_MAPPING_ERROR;
-}
-
-void gxp_dma_unmap_single(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
- uint virt_core_list, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction direction,
- unsigned long attrs)
-{
- int virt_core;
-
- for (virt_core = 0; virt_core < vd->num_cores; virt_core++) {
- if (!(virt_core_list & BIT(virt_core)))
- continue;
- if (size !=
- iommu_unmap(vd->core_domains[virt_core], dma_addr, size))
- dev_warn(gxp->dev, "Failed to unmap single\n");
- }
-
- dma_unmap_single_attrs(gxp->dev, dma_addr, size, direction, attrs);
-}
-
-dma_addr_t gxp_dma_map_page(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
- uint virt_core_list, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction direction,
- unsigned long attrs, uint gxp_dma_flags)
-{
- struct gxp_dma_iommu_manager *mgr = container_of(
- gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr);
- dma_addr_t daddr;
- phys_addr_t paddr;
- int prot = dma_info_to_prot(direction, 0, attrs);
- int virt_core;
-
- daddr = dma_map_page_attrs(gxp->dev, page, offset, size, direction,
- attrs);
- if (dma_mapping_error(gxp->dev, daddr))
- return DMA_MAPPING_ERROR;
-
- paddr = iommu_iova_to_phys(mgr->default_domain, daddr);
- for (virt_core = 0; virt_core < vd->num_cores; virt_core++) {
- if (!(virt_core_list & BIT(virt_core)))
- continue;
- if (iommu_map(vd->core_domains[virt_core], daddr, paddr, size,
- prot))
- goto err;
- }
-
- return daddr;
-
-err:
- for (virt_core -= 1; virt_core >= 0; virt_core--)
- iommu_unmap(vd->core_domains[virt_core], daddr, size);
- dma_unmap_page_attrs(gxp->dev, daddr, size, direction,
- DMA_ATTR_SKIP_CPU_SYNC);
- return DMA_MAPPING_ERROR;
-}
-
-void gxp_dma_unmap_page(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
- uint virt_core_list, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction direction, unsigned long attrs)
-{
- int virt_core;
-
- for (virt_core = 0; virt_core < vd->num_cores; virt_core++) {
- if (!(virt_core_list & BIT(virt_core)))
- continue;
- if (size !=
- iommu_unmap(vd->core_domains[virt_core], dma_addr, size))
- dev_warn(gxp->dev, "Failed to unmap page\n");
- }
-
- dma_unmap_page_attrs(gxp->dev, dma_addr, size, direction, attrs);
-}
-
-dma_addr_t gxp_dma_map_resource(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd,
- uint virt_core_list, phys_addr_t phys_addr,
- size_t size, enum dma_data_direction direction,
- unsigned long attrs, uint gxp_dma_flags)
+ struct gxp_iommu_domain *gdomain,
+ struct gxp_coherent_buf *buf)
{
- dma_addr_t daddr;
- int prot = dma_info_to_prot(direction, 0, attrs);
- int virt_core;
-
- daddr = dma_map_resource(gxp->dev, phys_addr, size, direction, attrs);
- if (dma_mapping_error(gxp->dev, daddr))
- return DMA_MAPPING_ERROR;
-
- for (virt_core = 0; virt_core < vd->num_cores; virt_core++) {
- if (!(virt_core_list & BIT(virt_core)))
- continue;
- if (iommu_map(vd->core_domains[virt_core], daddr, phys_addr,
- size, prot))
- goto err;
- }
-
- return daddr;
-
-err:
- for (virt_core -= 1; virt_core >= 0; virt_core--)
- iommu_unmap(vd->core_domains[virt_core], daddr, size);
- dma_unmap_resource(gxp->dev, daddr, size, direction,
- DMA_ATTR_SKIP_CPU_SYNC);
- return DMA_MAPPING_ERROR;
+ if (buf->size != iommu_unmap(gdomain->domain, buf->dma_addr, buf->size))
+ dev_warn(gxp->dev, "Failed to unmap coherent buffer\n");
}
-void gxp_dma_unmap_resource(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
- uint virt_core_list, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction,
- unsigned long attrs)
+void gxp_dma_free_coherent_buf(struct gxp_dev *gxp,
+ struct gxp_iommu_domain *gdomain,
+ struct gxp_coherent_buf *buf)
{
- int virt_core;
-
- for (virt_core = 0; virt_core < vd->num_cores; virt_core++) {
- if (!(virt_core_list & BIT(virt_core)))
- continue;
- if (size !=
- iommu_unmap(vd->core_domains[virt_core], dma_addr, size))
- dev_warn(gxp->dev, "Failed to unmap resource\n");
- }
-
- dma_unmap_resource(gxp->dev, dma_addr, size, direction, attrs);
+ if (gdomain != NULL)
+ gxp_dma_unmap_allocated_coherent_buffer(gxp, gdomain, buf);
+ dma_free_coherent(gxp->dev, buf->size, buf->vaddr, buf->dma_addr);
}
-int gxp_dma_map_sg(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
- int virt_core_list, struct scatterlist *sg, int nents,
+int gxp_dma_map_sg(struct gxp_dev *gxp, struct gxp_iommu_domain *gdomain,
+ struct scatterlist *sg, int nents,
enum dma_data_direction direction, unsigned long attrs,
uint gxp_dma_flags)
{
int nents_mapped;
dma_addr_t daddr;
- int prot = dma_info_to_prot(direction, 0, attrs);
- int virt_core;
+ int prot = map_flags_to_iommu_prot(direction, attrs, gxp_dma_flags);
ssize_t size_mapped;
- /* Variables needed to cleanup if an error occurs */
- struct scatterlist *s;
- int i;
- size_t size = 0;
nents_mapped = dma_map_sg_attrs(gxp->dev, sg, nents, direction, attrs);
if (!nents_mapped)
@@ -805,71 +662,71 @@ int gxp_dma_map_sg(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
daddr = sg_dma_address(sg);
- for (virt_core = 0; virt_core < vd->num_cores; virt_core++) {
- if (!(virt_core_list & BIT(virt_core)))
- continue;
- /*
- * In Linux 5.15 and beyond, `iommu_map_sg()` returns a
- * `ssize_t` to encode errors that earlier versions throw out.
- * Explicitly cast here for backwards compatibility.
- */
- size_mapped = (ssize_t)iommu_map_sg(vd->core_domains[virt_core],
- daddr, sg, nents, prot);
- if (size_mapped <= 0)
- goto err;
- }
+ /*
+ * In Linux 5.15 and beyond, `iommu_map_sg()` returns a
+ * `ssize_t` to encode errors that earlier versions throw out.
+ * Explicitly cast here for backwards compatibility.
+ */
+ size_mapped =
+ (ssize_t)iommu_map_sg(gdomain->domain, daddr, sg, nents, prot);
+ if (size_mapped <= 0)
+ goto err;
return nents_mapped;
err:
- for_each_sg(sg, s, nents, i) {
- size += sg_dma_len(s);
- }
-
- for (virt_core -= 1; virt_core >= 0; virt_core--)
- iommu_unmap(vd->core_domains[virt_core], daddr, size);
dma_unmap_sg_attrs(gxp->dev, sg, nents, direction, attrs);
return 0;
}
-void gxp_dma_unmap_sg(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
- uint virt_core_list, struct scatterlist *sg, int nents,
+void gxp_dma_unmap_sg(struct gxp_dev *gxp, struct gxp_iommu_domain *gdomain,
+ struct scatterlist *sg, int nents,
enum dma_data_direction direction, unsigned long attrs)
{
struct scatterlist *s;
int i;
size_t size = 0;
- int virt_core;
- for_each_sg(sg, s, nents, i) {
+ for_each_sg (sg, s, nents, i)
size += sg_dma_len(s);
- }
- for (virt_core = 0; virt_core < vd->num_cores; virt_core++) {
- if (!(virt_core_list & BIT(virt_core)))
- continue;
- if (!iommu_unmap(vd->core_domains[virt_core], sg_dma_address(sg),
- size))
- dev_warn(gxp->dev, "Failed to unmap sg\n");
- }
+ if (!iommu_unmap(gdomain->domain, sg_dma_address(sg), size))
+ dev_warn(gxp->dev, "Failed to unmap sg\n");
dma_unmap_sg_attrs(gxp->dev, sg, nents, direction, attrs);
}
-void gxp_dma_sync_single_for_cpu(struct gxp_dev *gxp, dma_addr_t dma_handle,
- size_t size,
- enum dma_data_direction direction)
+int gxp_dma_map_iova_sgt(struct gxp_dev *gxp, struct gxp_iommu_domain *gdomain,
+ dma_addr_t iova, struct sg_table *sgt, int prot)
{
- /* Syncing is not domain specific. Just call through to DMA API */
- dma_sync_single_for_cpu(gxp->dev, dma_handle, size, direction);
+ ssize_t size_mapped;
+
+ size_mapped = (ssize_t)iommu_map_sg(gdomain->domain, iova, sgt->sgl,
+ sgt->orig_nents, prot);
+ if (size_mapped <= 0) {
+ dev_err(gxp->dev, "map IOVA %pad to SG table failed: %d", &iova,
+ (int)size_mapped);
+ if (size_mapped == 0)
+ return -EINVAL;
+ return size_mapped;
+ }
+
+ return 0;
}
-void gxp_dma_sync_single_for_device(struct gxp_dev *gxp, dma_addr_t dma_handle,
- size_t size,
- enum dma_data_direction direction)
+void gxp_dma_unmap_iova_sgt(struct gxp_dev *gxp,
+ struct gxp_iommu_domain *gdomain, dma_addr_t iova,
+ struct sg_table *sgt)
{
- /* Syncing is not domain specific. Just call through to DMA API */
- dma_sync_single_for_device(gxp->dev, dma_handle, size, direction);
+ struct scatterlist *s;
+ int i;
+ size_t size = 0;
+
+ for_each_sg (sgt->sgl, s, sgt->orig_nents, i)
+ size += s->length;
+
+ if (!iommu_unmap(gdomain->domain, iova, size))
+ dev_warn(gxp->dev, "Failed to unmap sgt");
}
void gxp_dma_sync_sg_for_cpu(struct gxp_dev *gxp, struct scatterlist *sg,
@@ -886,20 +743,16 @@ void gxp_dma_sync_sg_for_device(struct gxp_dev *gxp, struct scatterlist *sg,
dma_sync_sg_for_device(gxp->dev, sg, nents, direction);
}
-struct sg_table *gxp_dma_map_dmabuf_attachment(
- struct gxp_dev *gxp, struct gxp_virtual_device *vd, uint virt_core_list,
- struct dma_buf_attachment *attachment,
- enum dma_data_direction direction)
+struct sg_table *
+gxp_dma_map_dmabuf_attachment(struct gxp_dev *gxp,
+ struct gxp_iommu_domain *gdomain,
+ struct dma_buf_attachment *attachment,
+ enum dma_data_direction direction)
{
struct sg_table *sgt;
int prot = dma_info_to_prot(direction, /*coherent=*/0, /*attrs=*/0);
ssize_t size_mapped;
- int virt_core;
int ret;
- /* Variables needed to cleanup if an error occurs */
- struct scatterlist *s;
- int i;
- size_t size = 0;
/* Map the attachment into the default domain */
sgt = dma_buf_map_attachment(attachment, direction);
@@ -910,49 +763,34 @@ struct sg_table *gxp_dma_map_dmabuf_attachment(
return sgt;
}
- /* Map the sgt into the aux domain of all specified cores */
- for (virt_core = 0; virt_core < vd->num_cores; virt_core++) {
- if (!(virt_core_list & BIT(virt_core)))
- continue;
+ /*
+ * In Linux 5.15 and beyond, `iommu_map_sg()` returns a
+ * `ssize_t` to encode errors that earlier versions throw out.
+ * Explicitly cast here for backwards compatibility.
+ */
+ size_mapped =
+ (ssize_t)iommu_map_sg(gdomain->domain, sg_dma_address(sgt->sgl),
+ sgt->sgl, sgt->orig_nents, prot);
+ if (size_mapped <= 0) {
+ dev_err(gxp->dev, "Failed to map dma-buf: %ld\n", size_mapped);
/*
- * In Linux 5.15 and beyond, `iommu_map_sg()` returns a
- * `ssize_t` to encode errors that earlier versions throw out.
- * Explicitly cast here for backwards compatibility.
+ * Prior to Linux 5.15, `iommu_map_sg()` returns 0 for
+ * any failure. Return a generic IO error in this case.
*/
- size_mapped =
- (ssize_t)iommu_map_sg(vd->core_domains[virt_core],
- sg_dma_address(sgt->sgl),
- sgt->sgl, sgt->orig_nents, prot);
- if (size_mapped <= 0) {
- dev_err(gxp->dev,
- "Failed to map dma-buf to virtual core %d (ret=%ld)\n",
- virt_core, size_mapped);
- /*
- * Prior to Linux 5.15, `iommu_map_sg()` returns 0 for
- * any failure. Return a generic IO error in this case.
- */
- ret = size_mapped == 0 ? -EIO : (int)size_mapped;
- goto err;
- }
+ ret = size_mapped == 0 ? -EIO : (int)size_mapped;
+ goto err;
}
return sgt;
err:
- for_each_sg(sgt->sgl, s, sgt->nents, i)
- size += sg_dma_len(s);
-
- for (virt_core -= 1; virt_core >= 0; virt_core--)
- iommu_unmap(vd->core_domains[virt_core], sg_dma_address(sgt->sgl), size);
dma_buf_unmap_attachment(attachment, sgt, direction);
return ERR_PTR(ret);
-
}
void gxp_dma_unmap_dmabuf_attachment(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd,
- uint virt_core_list,
+ struct gxp_iommu_domain *gdomain,
struct dma_buf_attachment *attachment,
struct sg_table *sgt,
enum dma_data_direction direction)
@@ -960,23 +798,13 @@ void gxp_dma_unmap_dmabuf_attachment(struct gxp_dev *gxp,
struct scatterlist *s;
int i;
size_t size = 0;
- int virt_core;
/* Find the size of the mapping in IOVA-space */
- for_each_sg(sgt->sgl, s, sgt->nents, i)
+ for_each_sg (sgt->sgl, s, sgt->nents, i)
size += sg_dma_len(s);
- /* Unmap the dma-buf from the aux domain of all specified cores */
- for (virt_core = 0; virt_core < vd->num_cores; virt_core++) {
- if (!(virt_core_list & BIT(virt_core)))
- continue;
- if (!iommu_unmap(vd->core_domains[virt_core],
- sg_dma_address(sgt->sgl), size))
- dev_warn(
- gxp->dev,
- "Failed to unmap dma-buf from virtual core %d\n",
- virt_core);
- }
+ if (!iommu_unmap(gdomain->domain, sg_dma_address(sgt->sgl), size))
+ dev_warn(gxp->dev, "Failed to unmap dma-buf\n");
/* Unmap the attachment from the default domain */
dma_buf_unmap_attachment(attachment, sgt, direction);
diff --git a/gxp-dma.h b/gxp-dma.h
index cf05e57..da7d433 100644
--- a/gxp-dma.h
+++ b/gxp-dma.h
@@ -10,13 +10,33 @@
#include <linux/dma-buf.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
+#include <linux/iommu.h>
#include <linux/types.h>
-#if (IS_ENABLED(CONFIG_GXP_TEST) || IS_ENABLED(CONFIG_ANDROID)) && !IS_ENABLED(CONFIG_GXP_GEM5)
+#if (IS_ENABLED(CONFIG_GXP_TEST) || IS_ENABLED(CONFIG_ANDROID)) && \
+ !IS_ENABLED(CONFIG_GXP_GEM5)
#include <soc/google/tpu-ext.h>
#endif
#include "gxp-internal.h"
+struct gxp_iommu_domain {
+ struct iommu_domain *domain;
+ uint ctx_id;
+};
+
+struct gxp_coherent_buf {
+ void *vaddr; /* kernel VA, no allocation if NULL */
+ /* TODO(b/249030390): Use standard DMA-IOMMU APIs returned address */
+ dma_addr_t dma_addr; /* DMA handle obtained from DMA-IOMMU APIs. */
+ /*
+ * IOVA to be accessed by the device. Equal to @dma_addr when there is
+ * no self-managed IOMMU.
+ */
+ dma_addr_t dsp_addr;
+ u64 phys_addr; /* physical address, if available */
+ size_t size;
+};
+
struct gxp_dma_manager {
struct rb_root mapping_tree;
};
@@ -51,35 +71,27 @@ int gxp_dma_init(struct gxp_dev *gxp);
void gxp_dma_exit(struct gxp_dev *gxp);
/**
- * gxp_dma_domain_attach_device() - Attach the page table of a virtual core to
- * the device and perform any necessary initialization.
+ * gxp_dma_domain_attach_device() - Attach the page table to the device and
+ * perform necessary initialization.
* @gxp: The GXP device to attach
- * @vd: The virtual device including the virtual core
- * @virt_core: The virtual core the page table belongs to
- * @core: The physical core is bound with the virtual core
+ * @gdomain: The IOMMU domain to be attached.
+ * @core_list: The physical cores to attach.
*
- * The caller must make sure @vd will not be released for the duration of the
- * call.
+ * Caller ensures a BLOCK wakelock is hold for the iommu attaching.
*/
int gxp_dma_domain_attach_device(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd, uint virt_core,
- uint core);
+ struct gxp_iommu_domain *gdomain,
+ uint core_list);
/**
- * gxp_dma_domain_detach_device() - Detach the page table of a virtual core from
- * the device.
+ * gxp_dma_domain_detach_device() - Detach the page table from the device.
* @gxp: The GXP device to detach
- * @vd: The virtual device including the virtual core
- * @virt_core: The virtual core the page table belongs to
- *
- * The client the @vd belongs to must hold a BLOCK wakelock for the iommu
- * detaching
+ * @gdomain: The IOMMU domain to be detached
*
- * The caller must make sure @vd will not be released for the duration of the
- * call.
+ * Caller ensures a BLOCK wakelock is hold for the iommu detaching.
*/
void gxp_dma_domain_detach_device(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd, uint virt_core);
+ struct gxp_iommu_domain *gdomain);
/**
* gxp_dma_init_default_resources() - Set the various buffers/registers with
@@ -93,11 +105,11 @@ void gxp_dma_init_default_resources(struct gxp_dev *gxp);
/**
* gxp_dma_map_core_resources() - Map the various buffers/registers with
- * fixed IOVAs on certain virtual core
+ * fixed IOVAs on the IOMMU domain.
* @gxp: The GXP device to set up the mappings for
- * @vd: The virtual device including the virtual core the IOVA are mapped for
- * @virt_core: The virtual core the IOVAs are mapped for
- * @core: The corresponding physical core of the @virt_core
+ * @gdomain: The IOMMU domain to be mapped on
+ * @core_list: The physical cores that may use the domain
+ * @slice_index: The index of slice of shared buffer to be mapped
*
* GXP firmware expects several buffers and registers to be mapped to fixed
* locations in their IOVA space. This function initializes all those mappings
@@ -107,324 +119,167 @@ void gxp_dma_init_default_resources(struct gxp_dev *gxp);
* fields of every `struct gxp_mapped_resource` inside of @gxp have been
* initialized.
*
- * The caller must make sure @vd will not be released for the duration of the
- * call.
- *
* Return:
* * 0 - Mappings created successfully
* * -EIO - Failed to create one or more of the mappings
*/
int gxp_dma_map_core_resources(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd, uint virt_core,
- uint core);
+ struct gxp_iommu_domain *gdomain, uint core_list,
+ u8 slice_index);
/**
* gxp_dma_unmap_core_resources() - Unmap the IOVAs mapped by
- * gxp_dma_map_resources
+ * gxp_dma_map_core_resources()
* @gxp: The GXP device that was passed to gxp_dma_map_core_resources()
- * @vd: The virtual device including the virtual core the IOVAs were mapped for
- * @virt_core: The virtual core the IOVAs were mapped for
- * @core: The physical cores the IOVAs were mapped for
+ * @gdomain: The IOMMU domain to be unmapped
+ * @core_list: The physical cores the IOVAs were mapped for
*
* GXP firmware expects several buffers and registers to be mapped to fixed
* locations in their IOVA space. This function releases all those mappings.
- *
- * The caller must make sure @vd will not be released for the duration of the
- * call.
*/
void gxp_dma_unmap_core_resources(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd, uint virt_core,
- uint core);
+ struct gxp_iommu_domain *gdomain,
+ uint core_list);
-#if (IS_ENABLED(CONFIG_GXP_TEST) || IS_ENABLED(CONFIG_ANDROID)) && !IS_ENABLED(CONFIG_GXP_GEM5)
+#if (IS_ENABLED(CONFIG_GXP_TEST) || IS_ENABLED(CONFIG_ANDROID)) && \
+ !IS_ENABLED(CONFIG_GXP_GEM5)
/**
* gxp_dma_map_tpu_buffer() - Map the tpu mbx queue buffers with fixed IOVAs
* @gxp: The GXP device to set up the mappings for
- * @vd: The virtual device including the virtual cores the mapping is for
- * @virt_core_list: A bitfield enumerating the virtual cores the mapping is for
+ * @gdomain: The IOMMU domain to be mapped on
* @core_list: A bitfield enumerating the physical cores the mapping is for
* @mbx_info: Structure holding TPU-DSP mailbox queue buffer information
*
* Return:
* * 0 - Mappings created successfully
* * -EIO - Failed to create the mappings
- *
- * The caller must make sure @vd will not be released for the duration of the
- * call.
*/
-int gxp_dma_map_tpu_buffer(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
- uint virt_core_list, uint core_list,
+int gxp_dma_map_tpu_buffer(struct gxp_dev *gxp,
+ struct gxp_iommu_domain *gdomain, uint core_list,
struct edgetpu_ext_mailbox_info *mbx_info);
/**
* gxp_dma_unmap_tpu_buffer() - Unmap IOVAs mapped by gxp_dma_map_tpu_buffer()
* @gxp: The GXP device that was passed to gxp_dma_map_tpu_buffer()
- * @vd: The virtual device including the virtual cores the mapping was for
- * @mbx_desc: Structure holding info for already mapped TPU-DSP mailboxes. The
- * list of virtual cores to unmap is in this descriptor.
- *
- * The caller must make sure @vd will not be released for the duration of the
- * call.
+ * @gdomain: The IOMMU domain the mappings were mapped on
+ * @mbx_desc: Structure holding info for already mapped TPU-DSP mailboxes.
*/
void gxp_dma_unmap_tpu_buffer(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd,
+ struct gxp_iommu_domain *gdomain,
struct gxp_tpu_mbx_desc mbx_desc);
-#endif // (CONFIG_GXP_TEST || CONFIG_ANDROID) && !CONFIG_GXP_GEM5
+#endif // (CONFIG_GXP_TEST || CONFIG_ANDROID) && !CONFIG_GXP_GEM5
/**
* gxp_dma_map_allocated_coherent_buffer() - Map a coherent buffer
* @gxp: The GXP device to map the allocated buffer for
- * @vd: The virtual device including the virtual cores the mapping is for
- * @virt_core_list: A bitfield enumerating the virtual cores the mapping is for
- * @size: The size of the allocated buffer, in bytes
- * @dma_handle: The allocated device IOVA
+ * @buf: The coherent buffer
+ * @gdomain: The IOMMU domain to be mapped on
* @gxp_dma_flags: The type of mapping to create; currently unused
*
- * Return: Kernel virtual address of the mapped buffer
- *
- * The caller must make sure @vd will not be released for the duration of the
- * call.
+ * Return: 0 on success else error code
*/
-int gxp_dma_map_allocated_coherent_buffer(struct gxp_dev *gxp, void *buf,
- struct gxp_virtual_device *vd,
- uint virt_core_list, size_t size,
- dma_addr_t dma_handle,
+int gxp_dma_map_allocated_coherent_buffer(struct gxp_dev *gxp,
+ struct gxp_coherent_buf *buf,
+ struct gxp_iommu_domain *gdomain,
uint gxp_dma_flags);
/**
* gxp_dma_unmap_allocated_coherent_buffer() - Unmap a coherent buffer
* @gxp: The GXP device the buffer was allocated and mapped for
- * @vd: The virtual device including the virtual cores the mapping was for
- * @virt_core_list: A bitfield enumerating the virtual cores the mapping was for
- * @size: The size of the buffer, in bytes
- * @dma_handle: The device IOVA
- *
- * The caller must make sure @vd will not be released for the duration of the
- * call.
+ * @gdomain: The IOMMU domain the mapping was mapped
+ * @buf: The coherent buffer
*/
void gxp_dma_unmap_allocated_coherent_buffer(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd,
- uint virt_core_list, size_t size,
- dma_addr_t dma_handle);
+ struct gxp_iommu_domain *gdomain,
+ struct gxp_coherent_buf *buf);
/**
* gxp_dma_alloc_coherent() - Allocate and map a coherent buffer for a GXP core
* @gxp: The GXP device to map the allocated buffer for
- * @vd: The virtual device including the virtual cores the mapping is for
- * @virt_core_list: A bitfield enumerating the virtual cores the mapping is for
+ * @gdomain: The IOMMU domain the mapping to be mapped on
* @size: The size of the buffer to be allocated, in bytes
- * @dma_handle: Reference to a variable to be set to the allocated IOVA
* @flag: The type of memory to allocate (see kmalloc)
* @gxp_dma_flags: The type of mapping to create; Currently unused
+ * @buffer: The coherent buffer
*
- * Return: Kernel virtual address of the allocated/mapped buffer
- *
- * If the passed @vd is a null pointer, this function will only allocate a
- * buffer but not map it to any particular core.
+ * Return: 0 on success else error code
*
- * The caller must make sure @vd will not be released for the duration of the
- * call.
+ * If the passed @domain is a null pointer, this function will only allocate a
+ * buffer but not map it to the domain.
+ * Note: Allocated buffers size may be larger than the requested size.
*/
-void *gxp_dma_alloc_coherent(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
- uint virt_core_list, size_t size,
- dma_addr_t *dma_handle, gfp_t flag,
- uint gxp_dma_flags);
+int gxp_dma_alloc_coherent_buf(struct gxp_dev *gxp,
+ struct gxp_iommu_domain *gdomain, size_t size,
+ gfp_t flag, uint gxp_dma_flags,
+ struct gxp_coherent_buf *buffer);
/**
* gxp_dma_free_coherent() - Unmap and free a coherent buffer
* @gxp: The GXP device the buffer was allocated and mapped for
- * @vd: The virtual device including the virtual cores the mapping was for
- * @virt_core_list: A bitfield enumerating the virtual cores the mapping was for
- * @size: The size of the buffer, in bytes, passed to `gxp_dma_alloc_coherent()`
- * @cpu_addr: The kernel virtual address returned by `gxp_dma_alloc_coherent()`
- * @dma_handle: The device IOVA, set by `gxp_dma_alloc_coherent()`
+ * @gdomain: The IOMMU domain the mapping was mapped to
+ * @buf: The coherent buffer
*
* If the buffer is mapped via `gxp_dma_map_allocated_coherent_buffer`, the
* caller must call `gxp_dma_unmap_allocated_coherent_buffer` to unmap before
* freeing the buffer.
*
- * If the passed @vd is a null pointer, this function will only free the buffer
- * but not do any unmapping.
- *
- * The caller must make sure @vd will not be released for the duration of the
- * call.
- */
-void gxp_dma_free_coherent(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
- uint virt_core_list, size_t size, void *cpu_addr,
- dma_addr_t dma_handle);
-
-/**
- * gxp_dma_map_single() - Create a mapping for a kernel buffer
- * @gxp: The GXP device to map the buffer for
- * @vd: The virtual device including the virtual cores the mapping is for
- * @virt_core_list: A bitfield enumerating the virtual cores the mapping is for
- * @cpu_addr: The kernel virtual address of the buffer to map
- * @size: The size of the buffer to map, in bytes
- * @direction: DMA direction
- * @attrs: The same set of flags used by the base DMA API
- * @gxp_dma_flags: The type of mapping to create; Currently unused
- *
- * Return: The IOVA the buffer was mapped to
- *
- * The caller must make sure @vd will not be released for the duration of the
- * call.
- */
-dma_addr_t gxp_dma_map_single(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd,
- uint virt_core_list, void *cpu_addr, size_t size,
- enum dma_data_direction direction,
- unsigned long attrs, uint gxp_dma_flags);
-/**
- * gxp_dma_unmap_single() - Unmap a kernel buffer
- * @gxp: The GXP device the buffer was mapped for
- * @vd: The virtual device including the virtual cores the mapping was for
- * @virt_core_list: A bitfield enumerating the virtual cores the mapping was for
- * @dma_addr: The device IOVA, returned by `gxp_dma_map_single()`
- * @size: The size of the mapping, which was passed to `gxp_dma_map_single()`
- * @direction: DMA direction; same as passed to `gxp_dma_map_single()`
- * @attrs: The same set of flags used by the base DMA API
- *
- * The caller must make sure @vd will not be released for the duration of the
- * call.
- */
-void gxp_dma_unmap_single(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
- uint virt_core_list, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction direction,
- unsigned long attrs);
-
-/**
- * gxp_dma_map_page() - Create a mapping for a physical page of memory
- * @gxp: The GXP device to map the page for
- * @vd: The virtual device including the virtual cores the mapping is for
- * @virt_core_list: A bitfield enumerating the virtual cores the mapping is for
- * @page: The `struct page` of the physical page to create a mapping for
- * @offset: The offset into @page to begin the mapping at
- * @size: The number of bytes in @page to map
- * @direction: DMA direction
- * @attrs: The same set of flags used by the base DMA API
- * @gxp_dma_flags: The type of mapping to create; Currently unused
- *
- * Return: The IOVA the page was mapped to
- *
- * The caller must make sure @vd will not be released for the duration of the
- * call.
- */
-dma_addr_t gxp_dma_map_page(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
- uint virt_core_list, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction direction,
- unsigned long attrs, uint gxp_dma_flags);
-/**
- * gxp_dma_unmap_page() - Unmap a physical page of memory
- * @gxp: The GXP device the page was mapped for
- * @vd: The virtual device including the virtual cores the mapping was for
- * @virt_core_list: A bitfield enumerating the virtual cores the mapping was for
- * @dma_addr: The device IOVA, returned by `gxp_dma_map_page()`
- * @size: The size of the mapping, which was passed to `gxp_dma_map_page()`
- * @direction: DMA direction; Same as passed to `gxp_dma_map_page()`
- * @attrs: The same set of flags used by the base DMA API
- *
- * The caller must make sure @vd will not be released for the duration of the
- * call.
- */
-void gxp_dma_unmap_page(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
- uint virt_core_list, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction direction, unsigned long attrs);
-
-/**
- * gxp_dma_map_resource() - Create a mapping for an MMIO resource
- * @gxp: The GXP device to map the resource for
- * @vd: The virtual device including the virtual cores the mapping is for
- * @virt_core_list: A bitfield enumerating the virtual cores the mapping is for
- * @phys_addr: The physical address of the MMIO resource to map
- * @size: The size of the MMIO region to map, in bytes
- * @direction: DMA direction
- * @attrs: The same set of flags used by the base DMA API
- * @gxp_dma_flags: The type of mapping to create; Currently unused
- *
- * Return: The IOVA the MMIO resource was mapped to
- *
- * The caller must make sure @vd will not be released for the duration of the
- * call.
+ * If the passed @domain is a null pointer, this function will only free the
+ * buffer but not do any unmapping.
*/
-dma_addr_t gxp_dma_map_resource(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd,
- uint virt_core_list, phys_addr_t phys_addr,
- size_t size, enum dma_data_direction direction,
- unsigned long attrs, uint gxp_dma_flags);
-/**
- * gxp_dma_unmap_resource() - Unmap an MMIO resource
- * @gxp: The GXP device the MMIO resource was mapped for
- * @vd: The virtual device including the virtual cores the mapping was for
- * @virt_core_list: A bitfield enumerating the virtual cores the mapping was for
- * @dma_addr: The device IOVA, returned by `gxp_dma_map_resource()`
- * @size: The size of the mapping, which was passed to `gxp_dma_map_resource()`
- * @direction: DMA direction; Same as passed to `gxp_dma_map_resource()`
- * @attrs: The same set of flags used by the base DMA API
- *
- * The caller must make sure @vd will not be released for the duration of the
- * call.
- */
-void gxp_dma_unmap_resource(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
- uint virt_core_list, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction,
- unsigned long attrs);
+void gxp_dma_free_coherent_buf(struct gxp_dev *gxp,
+ struct gxp_iommu_domain *gdomain,
+ struct gxp_coherent_buf *buf);
/**
* gxp_dma_map_sg() - Create a mapping for a scatter-gather list
* @gxp: The GXP device to map the scatter-gather list for
- * @vd: The virtual device including the virtual cores the mapping is for
- * @virt_core_list: A bitfield enumerating the virtual cores the mapping is for
+ * @gdomain: The IOMMU domain to be mapped
* @sg: The scatter-gather list of the buffer to be mapped
* @nents: The number of entries in @sg
* @direction: DMA direction
* @attrs: The same set of flags used by the base DMA API
- * @gxp_dma_flags: The type of mapping to create; Currently unused
+ * @gxp_dma_flags: The type of mapping to create
*
* Return: The number of scatter-gather entries mapped to
- *
- * The caller must make sure @vd will not be released for the duration of the
- * call.
*/
-int gxp_dma_map_sg(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
- int virt_core_list, struct scatterlist *sg, int nents,
+int gxp_dma_map_sg(struct gxp_dev *gxp, struct gxp_iommu_domain *gdomain,
+ struct scatterlist *sg, int nents,
enum dma_data_direction direction, unsigned long attrs,
uint gxp_dma_flags);
/**
* gxp_dma_unmap_sg() - Unmap a scatter-gather list
* @gxp: The GXP device the scatter-gather list was mapped for
- * @vd: The virtual device including the virtual cores the mapping was for
- * @virt_core_list: A bitfield enumerating the virtual cores the mapping was for
+ * @gdomain: The IOMMU domain mapping was mapped on
* @sg: The scatter-gather list to unmap; The same one passed to
* `gxp_dma_map_sg()`
* @nents: The number of entries in @sg; Same value passed to `gxp_dma_map_sg()`
* @direction: DMA direction; Same as passed to `gxp_dma_map_sg()`
* @attrs: The same set of flags used by the base DMA API
- *
- * The caller must make sure @vd will not be released for the duration of the
- * call.
*/
-void gxp_dma_unmap_sg(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
- uint virt_core_list, struct scatterlist *sg, int nents,
+void gxp_dma_unmap_sg(struct gxp_dev *gxp, struct gxp_iommu_domain *gdomain,
+ struct scatterlist *sg, int nents,
enum dma_data_direction direction, unsigned long attrs);
/**
- * gxp_dma_sync_single_for_cpu() - Sync buffer for reading by the CPU
- * @gxp: The GXP device the mapping was created for
- * @dma_handle: The device IOVA, obtained from one of the `gxp_dma_map_*` APIs
- * @size: The size of the mapped region to sync
- * @direction: DMA direction
+ * gxp_dma_map_iova_sgt() - Create a mapping for a scatter-gather list, with specific IOVA.
+ * @gxp: The GXP device to map the scatter-gather list for
+ * @gdomain: The IOMMU domain to be mapped
+ * @iova: The IOVA to be mapped.
+ * @sgt: The scatter-gather list table of the buffer to be mapped
+ * @prot: The protection bits to be passed to IOMMU API
+ *
+ * Return: 0 on success. Negative errno otherwise.
*/
-void gxp_dma_sync_single_for_cpu(struct gxp_dev *gxp, dma_addr_t dma_handle,
- size_t size,
- enum dma_data_direction direction);
+int gxp_dma_map_iova_sgt(struct gxp_dev *gxp, struct gxp_iommu_domain *gdomain,
+ dma_addr_t iova, struct sg_table *sgt, int prot);
/**
- * gxp_dma_sync_single_for_device() - Sync buffer for reading by the device
- * @gxp: The GXP device the mapping was created for
- * @dma_handle: The device IOVA, obtained from one of the `gxp_dma_map_*` APIs
- * @size: The size of the mapped region to sync
- * @direction: DMA direction
+ * gxp_dma_unmap_iova_sgt() - Revert gxp_dma_map_iova_sgt()
+ * @gxp: The GXP device the scatter-gather list was mapped for
+ * @gdomain: The IOMMU domain mapping was mapped on
+ * @iova: The IOVA to be un-mapped.
+ * @sgt: The scatter-gather list to unmap; The same one passed to
+ * `gxp_dma_map_iova_sgt()`
*/
-void gxp_dma_sync_single_for_device(struct gxp_dev *gxp, dma_addr_t dma_handle,
- size_t size,
- enum dma_data_direction direction);
+void gxp_dma_unmap_iova_sgt(struct gxp_dev *gxp,
+ struct gxp_iommu_domain *gdomain, dma_addr_t iova,
+ struct sg_table *sgt);
/**
* gxp_dma_sync_sg_for_cpu() - Sync sg list for reading by the CPU
@@ -448,42 +303,58 @@ void gxp_dma_sync_sg_for_device(struct gxp_dev *gxp, struct scatterlist *sg,
/**
* gxp_dma_map_dmabuf_attachment() - Create a mapping for a dma-buf
* @gxp: The GXP device to map the dma-buf for
- * @vd: The virtual device including the virtual cores the dma-buf is for
- * @virt_core_list: A bitfield enumerating the virtual cores the dma-buf is for
+ * @gdomain: The IOMMU domain the dma-buf to be mapped on
* @attachment: An attachment, representing the dma-buf, obtained from
* `dma_buf_attach()`
* @direction: DMA direction
*
* Return: A scatter-gather table describing the mapping of the dma-buf
* into the default IOMMU domain. Returns ERR_PTR on failure.
- *
- * The caller must make sure @vd will not be released for the duration of the
- * call.
*/
-struct sg_table *gxp_dma_map_dmabuf_attachment(
- struct gxp_dev *gxp, struct gxp_virtual_device *vd, uint virt_core_list,
- struct dma_buf_attachment *attachment,
- enum dma_data_direction direction);
+struct sg_table *
+gxp_dma_map_dmabuf_attachment(struct gxp_dev *gxp,
+ struct gxp_iommu_domain *gdomain,
+ struct dma_buf_attachment *attachment,
+ enum dma_data_direction direction);
/**
* gxp_dma_unmap_dmabuf_attachment() - Unmap a dma-buf
* @gxp: The GXP device the dma-buf was mapped for
- * @vd: The virtual device including the virtual cores the dma-buf is for
- * @virt_core_list: A bitfield enumerating the virtual cores the dma-buf was for
+ * @gdomain: The IOMMU domain the buffer was mapped on
* @attachment: The attachment, representing the dma-buf, that was passed to
* `gxp_dma_map_dmabuf_attachment()` to create the mapping
* @sgt: The scatter-gather table returned by `gxp_dma_map_dmabuf_attachment()`
* when mapping this dma-buf
* @direction: DMA direction
- *
- * The caller must make sure @vd will not be released for the duration of the
- * call.
*/
void gxp_dma_unmap_dmabuf_attachment(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd,
- uint virt_core_list,
+ struct gxp_iommu_domain *gdomain,
struct dma_buf_attachment *attachment,
struct sg_table *sgt,
enum dma_data_direction direction);
+/**
+ * gxp_iommu_get_domain_for_dev() - Get default domain
+ * @gxp: The GXP device to get the default domain for
+ *
+ * Return: Domain embedding default IOMMU domain information.
+ */
+struct gxp_iommu_domain *gxp_iommu_get_domain_for_dev(struct gxp_dev *gxp);
+
+/**
+ * gxp_iommu_aux_get_pasid() - Get PASID corresponding to gdomain
+ * @gxp: The GXP device attached to IOMMU
+ * @gdomain: The IOMMU domain to get the PASID for
+ *
+ * Return: PASID of the passed domain
+ */
+uint gxp_iommu_aux_get_pasid(struct gxp_dev *gxp,
+ struct gxp_iommu_domain *gdomain);
+
+/**
+ * gxp_iommu_setup_shareability() - Set shareability to enable IO-Coherency.
+ * @gxp: The GXP device to set shareability for
+ */
+void gxp_iommu_setup_shareability(struct gxp_dev *gxp);
+
#endif /* __GXP_DMA_H__ */
diff --git a/gxp-dmabuf.c b/gxp-dmabuf.c
index 789efeb..85fd832 100644
--- a/gxp-dmabuf.c
+++ b/gxp-dmabuf.c
@@ -8,9 +8,11 @@
#include <linux/dma-buf.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
+#include <linux/version.h>
#include "gxp-dma.h"
#include "gxp-dmabuf.h"
+#include "gxp-vd.h"
struct gxp_dmabuf_mapping {
struct gxp_mapping mapping;
@@ -34,13 +36,12 @@ static void destroy_dmabuf_mapping(struct gxp_mapping *mapping)
{
struct gxp_dmabuf_mapping *dmabuf_mapping;
struct gxp_dev *gxp = mapping->gxp;
- struct gxp_virtual_device *vd = mapping->vd;
/* Unmap and detach the dma-buf */
dmabuf_mapping =
container_of(mapping, struct gxp_dmabuf_mapping, mapping);
- gxp_dma_unmap_dmabuf_attachment(gxp, vd, mapping->virt_core_list,
+ gxp_dma_unmap_dmabuf_attachment(gxp, mapping->domain,
dmabuf_mapping->attachment,
dmabuf_mapping->sgt, mapping->dir);
dma_buf_detach(dmabuf_mapping->dmabuf, dmabuf_mapping->attachment);
@@ -50,9 +51,8 @@ static void destroy_dmabuf_mapping(struct gxp_mapping *mapping)
}
struct gxp_mapping *gxp_dmabuf_map(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd,
- uint virt_core_list, int fd, u32 flags,
- enum dma_data_direction dir)
+ struct gxp_iommu_domain *domain, int fd,
+ u32 flags, enum dma_data_direction dir)
{
struct dma_buf *dmabuf;
struct dma_buf_attachment *attachment;
@@ -78,7 +78,7 @@ struct gxp_mapping *gxp_dmabuf_map(struct gxp_dev *gxp,
goto err_attach;
}
- sgt = gxp_dma_map_dmabuf_attachment(gxp, vd, virt_core_list, attachment, dir);
+ sgt = gxp_dma_map_dmabuf_attachment(gxp, domain, attachment, dir);
if (IS_ERR(sgt)) {
dev_err(gxp->dev,
"Failed to map dma-buf attachment (ret=%ld)\n",
@@ -98,8 +98,7 @@ struct gxp_mapping *gxp_dmabuf_map(struct gxp_dev *gxp,
dmabuf_mapping->mapping.destructor = destroy_dmabuf_mapping;
dmabuf_mapping->mapping.host_address = 0;
dmabuf_mapping->mapping.gxp = gxp;
- dmabuf_mapping->mapping.virt_core_list = virt_core_list;
- dmabuf_mapping->mapping.vd = vd;
+ dmabuf_mapping->mapping.domain = domain;
dmabuf_mapping->mapping.device_address = sg_dma_address(sgt->sgl);
dmabuf_mapping->mapping.dir = dir;
dmabuf_mapping->dmabuf = dmabuf;
@@ -109,10 +108,14 @@ struct gxp_mapping *gxp_dmabuf_map(struct gxp_dev *gxp,
return &dmabuf_mapping->mapping;
err_alloc_mapping:
- gxp_dma_unmap_dmabuf_attachment(gxp, vd, virt_core_list, attachment, sgt, dir);
+ gxp_dma_unmap_dmabuf_attachment(gxp, domain, attachment, sgt, dir);
err_map_attachment:
dma_buf_detach(dmabuf, attachment);
err_attach:
dma_buf_put(dmabuf);
return ERR_PTR(ret);
}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 16, 0)
+MODULE_IMPORT_NS(DMA_BUF);
+#endif
diff --git a/gxp-dmabuf.h b/gxp-dmabuf.h
index 5803841..8e1e056 100644
--- a/gxp-dmabuf.h
+++ b/gxp-dmabuf.h
@@ -7,7 +7,7 @@
#ifndef __GXP_DMABUF_H__
#define __GXP_DMABUF_H__
-#include <linux/dma-direction.h>
+#include <linux/iommu.h>
#include <linux/types.h>
#include "gxp-internal.h"
@@ -16,8 +16,7 @@
/**
* gxp_dmabuf_map() - Map a dma-buf for access by the specified virtual device
* @gxp: The GXP device to map the dma-buf for
- * @vd: The virtual device includes the virtual cores the dma-buf is mapped for
- * @virt_core_list: A bitfield enumerating the virtual cores the mapping is for
+ * @domain: The iommu domain the dma-buf is mapped for
* @fd: A file descriptor for the dma-buf to be mapped
* @flags: The type of mapping to create; Currently unused
* @direction: DMA direction
@@ -28,8 +27,7 @@
* mapping of the dma-buf. Returns ERR_PTR on failure.
*/
struct gxp_mapping *gxp_dmabuf_map(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd,
- uint virt_core_list, int fd, u32 flags,
- enum dma_data_direction dir);
+ struct gxp_iommu_domain *domain, int fd,
+ u32 flags, enum dma_data_direction dir);
#endif /* __GXP_DMABUF_H__ */
diff --git a/gxp-domain-pool.c b/gxp-domain-pool.c
index 53a5b38..a0f9ead 100644
--- a/gxp-domain-pool.c
+++ b/gxp-domain-pool.c
@@ -5,100 +5,75 @@
* Copyright (C) 2022 Google LLC
*/
-#include <linux/idr.h>
#include <linux/iommu.h>
#include <linux/slab.h>
+#include <gcip/gcip-domain-pool.h>
+
+#include "gxp-dma.h"
#include "gxp-domain-pool.h"
-#include "gxp-internal.h"
-int gxp_domain_pool_init(struct gxp_dev *gxp, struct gxp_domain_pool *pool,
+int gxp_domain_pool_init(struct gxp_dev *gxp, struct gcip_domain_pool *pool,
unsigned int size)
{
- unsigned int i;
- struct iommu_domain *domain;
-
- pool->size = size;
- pool->gxp = gxp;
-
- if (!size)
- return 0;
+ int ret = gcip_domain_pool_init(gxp->dev, pool, size);
+ __maybe_unused int i;
- dev_dbg(pool->gxp->dev, "Initializing domain pool with %u domains\n", size);
+ if (ret)
+ return ret;
- ida_init(&pool->idp);
- pool->array = vzalloc(sizeof(*pool->array) * size);
- if (!pool->array) {
- dev_err(gxp->dev, "Failed to allocate memory for domain pool array\n");
- return -ENOMEM;
- }
+#if IS_ENABLED(CONFIG_GXP_GEM5)
for (i = 0; i < size; i++) {
- domain = iommu_domain_alloc(pool->gxp->dev->bus);
- if (!domain) {
- dev_err(pool->gxp->dev,
- "Failed to allocate iommu domain %d of %u\n",
- i + 1, size);
+ struct iommu_domain *domain = pool->array[i];
+
+ /*
+ * Gem5 uses arm-smmu-v3 which requires domain finalization to do iommu map. Calling
+ * iommu_aux_attach_device to finalize the allocated domain and detach the device
+ * right after that.
+ */
+ ret = iommu_aux_attach_device(domain, gxp->dev);
+ if (ret) {
+ dev_err(gxp->dev,
+ "Failed to attach device to iommu domain %d of %u, ret=%d\n",
+ i + 1, size, ret);
gxp_domain_pool_destroy(pool);
- return -ENOMEM;
+ return ret;
}
- pool->array[i] = domain;
+
+ iommu_aux_detach_device(domain, gxp->dev);
}
+#endif /* CONFIG_GXP_GEM5 */
+
return 0;
}
-struct iommu_domain *gxp_domain_pool_alloc(struct gxp_domain_pool *pool)
+struct gxp_iommu_domain *gxp_domain_pool_alloc(struct gcip_domain_pool *pool)
{
- int id;
-
- if (!pool->size)
- return iommu_domain_alloc(pool->gxp->dev->bus);
+ struct iommu_domain *domain = gcip_domain_pool_alloc(pool);
+ struct gxp_iommu_domain *gdomain;
- id = ida_alloc_max(&pool->idp, pool->size - 1, GFP_KERNEL);
+ if (!domain)
+ return NULL;
- if (id < 0) {
- dev_err(pool->gxp->dev,
- "No more domains available from pool of size %u\n",
- pool->size);
+ gdomain = kmalloc(sizeof(*gdomain), GFP_KERNEL);
+ if (!gdomain) {
+ gcip_domain_pool_free(pool, domain);
return NULL;
}
- dev_dbg(pool->gxp->dev, "Allocated domain from pool with id = %d\n", id);
+ gdomain->domain = domain;
- return pool->array[id];
+ return gdomain;
}
-void gxp_domain_pool_free(struct gxp_domain_pool *pool, struct iommu_domain *domain)
+void gxp_domain_pool_free(struct gcip_domain_pool *pool,
+ struct gxp_iommu_domain *gdomain)
{
- int id;
-
- if (!pool->size) {
- iommu_domain_free(domain);
- return;
- }
- for (id = 0; id < pool->size; id++) {
- if (pool->array[id] == domain) {
- dev_dbg(pool->gxp->dev, "Released domain from pool with id = %d\n", id);
- ida_free(&pool->idp, id);
- return;
- }
- }
- dev_err(pool->gxp->dev, "%s: domain not found in pool", __func__);
+ gcip_domain_pool_free(pool, gdomain->domain);
+ kfree(gdomain);
}
-void gxp_domain_pool_destroy(struct gxp_domain_pool *pool)
+void gxp_domain_pool_destroy(struct gcip_domain_pool *pool)
{
- int i;
-
- if (!pool->size)
- return;
-
- dev_dbg(pool->gxp->dev, "Destroying domain pool with %u domains\n", pool->size);
-
- for (i = 0; i < pool->size; i++) {
- if (pool->array[i])
- iommu_domain_free(pool->array[i]);
- }
-
- ida_destroy(&pool->idp);
- vfree(pool->array);
+ gcip_domain_pool_destroy(pool);
}
diff --git a/gxp-domain-pool.h b/gxp-domain-pool.h
index ee95155..ad2d38a 100644
--- a/gxp-domain-pool.h
+++ b/gxp-domain-pool.h
@@ -8,22 +8,9 @@
#ifndef __GXP_DOMAIN_POOL_H__
#define __GXP_DOMAIN_POOL_H__
-#include <linux/idr.h>
-#include <linux/iommu.h>
-
-#include "gxp-internal.h"
-
-struct gxp_domain_pool {
- struct ida idp; /* ID allocator to keep track of used domains. */
- /*
- * Size of the pool. Can be set to 0, in which case the implementation will fall back to
- * dynamic domain allocation using the IOMMU API directly.
- */
- unsigned int size;
- struct iommu_domain **array; /* Array holding the pointers to pre-allocated domains. */
- struct gxp_dev *gxp; /* The gxp device used for logging warnings/errors. */
-};
+#include <gcip/gcip-domain-pool.h>
+#include "gxp-dma.h"
/*
* Initializes a domain pool.
@@ -35,19 +22,19 @@ struct gxp_domain_pool {
*
* returns 0 on success or negative error value.
*/
-int gxp_domain_pool_init(struct gxp_dev *gxp, struct gxp_domain_pool *pool,
+int gxp_domain_pool_init(struct gxp_dev *gxp, struct gcip_domain_pool *pool,
unsigned int size);
/*
* Allocates a domain from the pool
* returns NULL on error.
*/
-struct iommu_domain *gxp_domain_pool_alloc(struct gxp_domain_pool *pool);
+struct gxp_iommu_domain *gxp_domain_pool_alloc(struct gcip_domain_pool *pool);
/* Releases a domain from the pool. */
-void gxp_domain_pool_free(struct gxp_domain_pool *pool, struct iommu_domain *domain);
+void gxp_domain_pool_free(struct gcip_domain_pool *pool,
+ struct gxp_iommu_domain *gdomain);
/* Cleans up all resources used by the domain pool. */
-void gxp_domain_pool_destroy(struct gxp_domain_pool *pool);
-
+void gxp_domain_pool_destroy(struct gcip_domain_pool *pool);
#endif /* __GXP_DOMAIN_POOL_H__ */
diff --git a/gxp-doorbell.c b/gxp-doorbell.c
index 0fc6389..491fb5b 100644
--- a/gxp-doorbell.c
+++ b/gxp-doorbell.c
@@ -19,9 +19,9 @@ void gxp_doorbell_enable_for_core(struct gxp_dev *gxp, u32 doorbell_num,
u32 val;
/* Enable DOORBELL_NUM on requested core */
- val = gxp_read_32_core(gxp, core, GXP_REG_COMMON_INT_MASK_0);
+ val = gxp_read_32(gxp, GXP_CORE_REG_COMMON_INT_MASK_0(core));
val |= BIT(doorbell_num);
- gxp_write_32_core(gxp, core, GXP_REG_COMMON_INT_MASK_0, val);
+ gxp_write_32(gxp, GXP_CORE_REG_COMMON_INT_MASK_0(core), val);
}
void gxp_doorbell_set(struct gxp_dev *gxp, u32 doorbell_num)
diff --git a/gxp-firmware-data.c b/gxp-firmware-data.c
index d1def41..6f22f8d 100644
--- a/gxp-firmware-data.c
+++ b/gxp-firmware-data.c
@@ -70,7 +70,7 @@ struct gxp_fw_data_manager {
/* Doorbells allocator and reserved doorbell IDs */
struct range_alloc *doorbell_allocator;
- int core_wakeup_doorbells[GXP_NUM_CORES];
+ int core_wakeup_doorbells[GXP_NUM_WAKEUP_DOORBELLS];
int semaphore_doorbells[GXP_NUM_CORES];
/* Sync barriers allocator and reserved sync barrier IDs */
@@ -87,7 +87,7 @@ struct gxp_fw_data_manager {
struct fw_memory_allocator *allocator;
struct fw_memory sys_desc_mem;
struct fw_memory wdog_mem;
- struct fw_memory telemetry_mem;
+ struct fw_memory core_telemetry_mem;
struct fw_memory debug_dump_mem;
};
@@ -266,18 +266,18 @@ static struct fw_memory init_watchdog(struct gxp_fw_data_manager *mgr)
return mem;
}
-static struct fw_memory init_telemetry(struct gxp_fw_data_manager *mgr)
+static struct fw_memory init_core_telemetry(struct gxp_fw_data_manager *mgr)
{
- struct gxp_telemetry_descriptor *tel_region;
+ struct gxp_core_telemetry_descriptor *tel_region;
struct fw_memory mem;
mem_alloc_allocate(mgr->allocator, &mem, sizeof(*tel_region),
- __alignof__(struct gxp_telemetry_descriptor));
+ __alignof__(struct gxp_core_telemetry_descriptor));
tel_region = mem.host_addr;
/*
- * Telemetry is disabled for now.
+ * Core telemetry is disabled for now.
* Subsuequent calls to the FW data module can be used to populate or
* depopulate the descriptor pointers on demand.
*/
@@ -292,7 +292,7 @@ static struct fw_memory init_debug_dump(struct gxp_dev *gxp)
if (gxp->debug_dump_mgr) {
mem.host_addr = gxp->debug_dump_mgr->buf.vaddr;
- mem.device_addr = gxp->debug_dump_mgr->buf.daddr;
+ mem.device_addr = gxp->debug_dump_mgr->buf.dsp_addr;
mem.sz = gxp->debug_dump_mgr->buf.size;
} else {
mem.host_addr = 0;
@@ -510,7 +510,7 @@ int gxp_fw_data_init(struct gxp_dev *gxp)
/* Allocate doorbells */
/* Pinned: Cores wakeup doorbell */
- for (i = 0; i < GXP_NUM_CORES; i++) {
+ for (i = 0; i < GXP_NUM_WAKEUP_DOORBELLS; i++) {
mgr->core_wakeup_doorbells[i] = DOORBELL_ID_CORE_WAKEUP(i);
res = range_alloc_get(mgr->doorbell_allocator,
mgr->core_wakeup_doorbells[i]);
@@ -589,9 +589,10 @@ int gxp_fw_data_init(struct gxp_dev *gxp)
mgr->wdog_mem = init_watchdog(mgr);
mgr->system_desc->watchdog_dev_addr = mgr->wdog_mem.device_addr;
- /* Allocate the descriptor for device-side telemetry */
- mgr->telemetry_mem = init_telemetry(mgr);
- mgr->system_desc->telemetry_dev_addr = mgr->telemetry_mem.device_addr;
+ /* Allocate the descriptor for device-side core telemetry */
+ mgr->core_telemetry_mem = init_core_telemetry(mgr);
+ mgr->system_desc->core_telemetry_dev_addr =
+ mgr->core_telemetry_mem.device_addr;
/* Set the debug dump region parameters if available */
mgr->debug_dump_mem = init_debug_dump(gxp);
@@ -610,6 +611,7 @@ void *gxp_fw_data_create_app(struct gxp_dev *gxp, uint core_list)
{
struct gxp_fw_data_manager *mgr = gxp->data_mgr;
struct app_metadata *app;
+ void *err;
int i;
app = kzalloc(sizeof(struct app_metadata), GFP_KERNEL);
@@ -626,6 +628,11 @@ void *gxp_fw_data_create_app(struct gxp_dev *gxp, uint core_list)
app->user_doorbells_count = DEFAULT_APP_USER_DOORBELL_COUNT;
app->user_doorbells =
kcalloc(app->user_doorbells_count, sizeof(int), GFP_KERNEL);
+ if (!app->user_doorbells) {
+ err = ERR_PTR(-ENOMEM);
+ goto err_user_doorbells;
+ }
+
for (i = 0; i < app->user_doorbells_count; i++) {
range_alloc_get_any(mgr->doorbell_allocator,
&app->user_doorbells[i]);
@@ -635,6 +642,11 @@ void *gxp_fw_data_create_app(struct gxp_dev *gxp, uint core_list)
app->user_barriers_count = DEFAULT_APP_USER_BARRIER_COUNT;
app->user_barriers =
kcalloc(app->user_barriers_count, sizeof(int), GFP_KERNEL);
+ if (!app->user_barriers) {
+ err = ERR_PTR(-ENOMEM);
+ goto err_user_barriers;
+ }
+
for (i = 0; i < app->user_barriers_count; i++) {
range_alloc_get_any(mgr->sync_barrier_allocator,
&app->user_barriers[i]);
@@ -650,6 +662,16 @@ void *gxp_fw_data_create_app(struct gxp_dev *gxp, uint core_list)
}
return app;
+
+err_user_barriers:
+ for (i = 0; i < app->user_doorbells_count; i++)
+ range_alloc_put(mgr->doorbell_allocator,
+ app->user_doorbells[i]);
+ kfree(app->user_doorbells);
+err_user_doorbells:
+ kfree(app);
+
+ return err;
}
void gxp_fw_data_destroy_app(struct gxp_dev *gxp, void *application)
@@ -689,7 +711,7 @@ void gxp_fw_data_destroy(struct gxp_dev *gxp)
if (!mgr)
return;
- mem_alloc_free(mgr->allocator, &mgr->telemetry_mem);
+ mem_alloc_free(mgr->allocator, &mgr->core_telemetry_mem);
mem_alloc_free(mgr->allocator, &mgr->wdog_mem);
mem_alloc_free(mgr->allocator, &mgr->sys_desc_mem);
mem_alloc_destroy(mgr->allocator);
@@ -709,15 +731,16 @@ void gxp_fw_data_destroy(struct gxp_dev *gxp)
}
}
-int gxp_fw_data_set_telemetry_descriptors(struct gxp_dev *gxp, u8 type,
- u32 host_status,
- dma_addr_t *buffer_addrs,
- u32 per_buffer_size)
+int gxp_fw_data_set_core_telemetry_descriptors(struct gxp_dev *gxp, u8 type,
+ u32 host_status,
+ struct gxp_coherent_buf *buffers,
+ u32 per_buffer_size)
{
- struct gxp_telemetry_descriptor *descriptor =
- gxp->data_mgr->telemetry_mem.host_addr;
- struct telemetry_descriptor *core_descriptors;
+ struct gxp_core_telemetry_descriptor *descriptor =
+ gxp->data_mgr->core_telemetry_mem.host_addr;
+ struct core_telemetry_descriptor *core_descriptors;
uint core;
+ bool enable;
if (type == GXP_TELEMETRY_TYPE_LOGGING)
core_descriptors = descriptor->per_core_loggers;
@@ -726,26 +749,37 @@ int gxp_fw_data_set_telemetry_descriptors(struct gxp_dev *gxp, u8 type,
else
return -EINVAL;
- /* Validate that the provided IOVAs are addressable (i.e. 32-bit) */
- for (core = 0; core < GXP_NUM_CORES; core++) {
- if (buffer_addrs[core] > U32_MAX)
- return -EINVAL;
- }
+ enable = (host_status & GXP_CORE_TELEMETRY_HOST_STATUS_ENABLED);
- for (core = 0; core < GXP_NUM_CORES; core++) {
- core_descriptors[core].host_status = host_status;
- core_descriptors[core].buffer_addr = (u32)buffer_addrs[core];
- core_descriptors[core].buffer_size = per_buffer_size;
+ if (enable) {
+ /* Validate that the provided IOVAs are addressable (i.e. 32-bit) */
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ if (buffers && buffers[core].dsp_addr > U32_MAX &&
+ buffers[core].size == per_buffer_size)
+ return -EINVAL;
+ }
+
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ core_descriptors[core].host_status = host_status;
+ core_descriptors[core].buffer_addr = (u32)buffers[core].dsp_addr;
+ core_descriptors[core].buffer_size = per_buffer_size;
+ }
+ } else {
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ core_descriptors[core].host_status = host_status;
+ core_descriptors[core].buffer_addr = 0;
+ core_descriptors[core].buffer_size = 0;
+ }
}
return 0;
}
-u32 gxp_fw_data_get_telemetry_device_status(struct gxp_dev *gxp, uint core,
- u8 type)
+u32 gxp_fw_data_get_core_telemetry_device_status(struct gxp_dev *gxp, uint core,
+ u8 type)
{
- struct gxp_telemetry_descriptor *descriptor =
- gxp->data_mgr->telemetry_mem.host_addr;
+ struct gxp_core_telemetry_descriptor *descriptor =
+ gxp->data_mgr->core_telemetry_mem.host_addr;
if (core >= GXP_NUM_CORES)
return 0;
diff --git a/gxp-firmware-data.h b/gxp-firmware-data.h
index e9851ed..a947cb8 100644
--- a/gxp-firmware-data.h
+++ b/gxp-firmware-data.h
@@ -9,6 +9,7 @@
#ifndef __GXP_FIRMWARE_DATA_H__
#define __GXP_FIRMWARE_DATA_H__
+#include "gxp-dma.h"
#include "gxp-internal.h"
/**
@@ -54,42 +55,43 @@ void gxp_fw_data_destroy_app(struct gxp_dev *gxp, void *application);
void gxp_fw_data_destroy(struct gxp_dev *gxp);
/**
- * gxp_fw_data_set_telemetry_descriptors() - Set new logging or tracing buffers
- * for firmware to write to.
+ * gxp_fw_data_set_core_telemetry_descriptors() - Set new logging or tracing
+ * buffers for firmware to write
+ * to.
* @gxp: The GXP device to set buffer descriptors for
* @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
- * @host_status: Bitfield describing the host's telemetry status. See the
+ * @host_status: Bitfield describing the host's core telemetry status. See the
* bit definitions in gxp-host-device-structs.h.
- * @buffer_addrs: An array containing the IOVA each physical core can access
- * its logging or tracing buffer at
+ * @buffers: An array of coherent buffers for logging and tracing
* @per_buffer_size: The size of each core's logging or tracing buffer in bytes
*
* `gxp_fw_data_init()` must have been called before this function.
*
- * Caller must hold gxp->telemetry_mgr's lock.
+ * Caller must hold gxp->core_telemetry_mgr's lock.
*
* Return:
* 0 - Success
* -EINVAL - Invalid @type provided or @buffer_addrs are not addressable by @gxp
*/
-int gxp_fw_data_set_telemetry_descriptors(struct gxp_dev *gxp, u8 type,
- u32 host_status,
- dma_addr_t *buffer_addrs,
- u32 per_buffer_size);
+int gxp_fw_data_set_core_telemetry_descriptors(struct gxp_dev *gxp, u8 type,
+ u32 host_status,
+ struct gxp_coherent_buf *buffers,
+ u32 per_buffer_size);
/**
- * gxp_fw_data_get_telemetry_device_status() - Returns a bitfield describing a
- * core's telemetry status.
- * @gxp: The GXP device to get device telemetry status for
- * @core: The core in @gxp to get the device telemetry status for
+ * gxp_fw_data_get_core_telemetry_device_status() - Returns a bitfield
+ * describing a core's
+ * telemetry status.
+ * @gxp: The GXP device to get core telemetry status for
+ * @core: The core in @gxp to get the core telemetry status for
* @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
*
- * Caller must hold gxp->telemetry_mgr's lock.
+ * Caller must hold gxp->core_telemetry_mgr's lock.
*
* Return: The bitfield describing @core's telemetry status. If @core or @type
* are invalid, the result will always be 0.
*/
-u32 gxp_fw_data_get_telemetry_device_status(struct gxp_dev *gxp, uint core,
- u8 type);
+u32 gxp_fw_data_get_core_telemetry_device_status(struct gxp_dev *gxp, uint core,
+ u8 type);
#endif /* __GXP_FIRMWARE_DATA_H__ */
diff --git a/gxp-firmware.c b/gxp-firmware.c
index eb31f23..fcf6a6f 100644
--- a/gxp-firmware.c
+++ b/gxp-firmware.c
@@ -17,6 +17,8 @@
#include <linux/types.h>
#include "gxp-bpm.h"
+#include "gxp-config.h"
+#include "gxp-core-telemetry.h"
#include "gxp-debug-dump.h"
#include "gxp-doorbell.h"
#include "gxp-firmware.h"
@@ -26,11 +28,11 @@
#include "gxp-mailbox.h"
#include "gxp-notification.h"
#include "gxp-pm.h"
-#include "gxp-telemetry.h"
#include "gxp-vd.h"
-/* Files need to be copied to /lib/firmware */
-#define DSP_FIRMWARE_DEFAULT_PREFIX "gxp_fw_core"
+#if IS_ENABLED(CONFIG_GXP_TEST)
+#include "unittests/factory/fake-gxp-firmware.h"
+#endif
#define FW_HEADER_SIZE (0x1000)
#define FW_IMAGE_TYPE_OFFSET (0x400)
@@ -38,6 +40,9 @@
static int gxp_dsp_fw_auth_disable;
module_param_named(dsp_fw_auth_disable, gxp_dsp_fw_auth_disable, int, 0660);
+static bool gxp_core_boot = true;
+module_param_named(core_boot, gxp_core_boot, bool, 0660);
+
static int
request_dsp_firmware(struct gxp_dev *gxp, char *name_prefix,
const struct firmware *out_firmwares[GXP_NUM_CORES])
@@ -92,59 +97,19 @@ static int elf_load_segments(struct gxp_dev *gxp, const u8 *elf_data,
ehdr = (struct elf32_hdr *)elf_data;
phdr = (struct elf32_phdr *)(elf_data + ehdr->e_phoff);
- if ((ehdr->e_ident[EI_MAG0] != ELFMAG0) ||
- (ehdr->e_ident[EI_MAG1] != ELFMAG1) ||
- (ehdr->e_ident[EI_MAG2] != ELFMAG2) ||
- (ehdr->e_ident[EI_MAG3] != ELFMAG3)) {
- dev_err(gxp->dev, "Cannot load FW! Invalid ELF format.\n");
- return -EINVAL;
- }
-
/* go through the available ELF segments */
for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
- u64 da = phdr->p_paddr;
- u32 memsz = phdr->p_memsz;
- u32 filesz = phdr->p_filesz;
- u32 offset = phdr->p_offset;
+ const u64 da = phdr->p_paddr;
+ const u32 memsz = phdr->p_memsz;
+ const u32 filesz = phdr->p_filesz;
void *ptr;
- if (phdr->p_type != PT_LOAD)
- continue;
-
- if (!phdr->p_flags)
- continue;
-
- if (!memsz)
+ if (phdr->p_type != PT_LOAD || !phdr->p_flags || !memsz)
continue;
- if (!((da >= (u32)buffer->daddr) &&
- ((da + memsz) <= ((u32)buffer->daddr +
- (u32)buffer->size)))) {
- /*
- * Some BSS data may be referenced from TCM, and can be
- * skipped while loading
- */
- dev_err(gxp->dev, "Segment out of bounds: da 0x%llx mem 0x%x. Skipping...\n",
- da, memsz);
+ if (!(da >= buffer->daddr &&
+ da + memsz <= buffer->daddr + buffer->size))
continue;
- }
-
- dev_notice(gxp->dev, "phdr: type %d da 0x%llx memsz 0x%x filesz 0x%x\n",
- phdr->p_type, da, memsz, filesz);
-
- if (filesz > memsz) {
- dev_err(gxp->dev, "Bad phdr filesz 0x%x memsz 0x%x\n",
- filesz, memsz);
- ret = -EINVAL;
- break;
- }
-
- if (offset + filesz > size) {
- dev_err(gxp->dev, "Truncated fw: need 0x%x avail 0x%zx\n",
- offset + filesz, size);
- ret = -EINVAL;
- break;
- }
/* grab the kernel address for this device address */
ptr = buffer->vaddr + (da - buffer->daddr);
@@ -169,6 +134,15 @@ static int elf_load_segments(struct gxp_dev *gxp, const u8 *elf_data,
return ret;
}
+static void elf_fetch_entry_point(struct gxp_dev *gxp, const u8 *elf_data,
+ uint core)
+{
+ struct elf32_hdr *ehdr;
+
+ ehdr = (struct elf32_hdr *)elf_data;
+ gxp->firmware_mgr->entry_points[core] = ehdr->e_entry;
+}
+
static int
gxp_firmware_authenticate(struct gxp_dev *gxp,
const struct firmware *firmwares[GXP_NUM_CORES])
@@ -258,6 +232,112 @@ error:
return ret;
}
+static int gxp_firmware_fetch_boundary(struct gxp_dev *gxp, const u8 *elf_data,
+ size_t size,
+ const struct gxp_mapped_resource *buffer,
+ dma_addr_t *boundary_ptr)
+{
+ struct elf32_hdr *ehdr = (struct elf32_hdr *)elf_data;
+ struct elf32_phdr *phdr = (struct elf32_phdr *)(elf_data + ehdr->e_phoff);
+ int i, ret = 0;
+ dma_addr_t boundary = 0;
+
+ if ((ehdr->e_ident[EI_MAG0] != ELFMAG0) ||
+ (ehdr->e_ident[EI_MAG1] != ELFMAG1) ||
+ (ehdr->e_ident[EI_MAG2] != ELFMAG2) ||
+ (ehdr->e_ident[EI_MAG3] != ELFMAG3)) {
+ dev_err(gxp->dev, "Invalid ELF format.");
+ return -EINVAL;
+ }
+
+ /* go through the available ELF segments */
+ for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
+ const u64 da = phdr->p_paddr;
+ const u32 memsz = phdr->p_memsz;
+ const u32 filesz = phdr->p_filesz;
+ const u32 offset = phdr->p_offset;
+ const u32 p_flags = phdr->p_flags;
+
+ if (phdr->p_type != PT_LOAD || !p_flags || !memsz)
+ continue;
+
+ if (!(da >= buffer->daddr &&
+ da + memsz <= buffer->daddr + buffer->size)) {
+ /*
+ * Some BSS data may be referenced from TCM, and can be
+ * skipped while loading
+ */
+ dev_err(gxp->dev, "Segment out of bounds: da 0x%llx mem 0x%x. Skipping...",
+ da, memsz);
+ continue;
+ }
+
+ dev_info(gxp->dev,
+ "phdr: da %#llx memsz %#x filesz %#x perm %d", da,
+ memsz, filesz, p_flags);
+
+ if (filesz > memsz) {
+ dev_err(gxp->dev, "Bad phdr filesz %#x memsz %#x",
+ filesz, memsz);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (offset + filesz > size) {
+ dev_err(gxp->dev, "Truncated fw: need %#x avail %#zx",
+ offset + filesz, size);
+ ret = -EINVAL;
+ break;
+ }
+ if (p_flags & PF_W) {
+ if (!boundary)
+ boundary = da;
+ } else if (boundary) {
+ dev_err(gxp->dev,
+ "Found RO region after a writable segment");
+ ret = -EINVAL;
+ break;
+ }
+ }
+ /* no boundary has been found - assume the whole image is RO */
+ if (!boundary)
+ boundary = buffer->daddr + buffer->size;
+ if (!ret)
+ *boundary_ptr = boundary;
+
+ return ret;
+}
+
+/*
+ * Sets @rw_boundaries by analyzing LOAD segments in ELF headers.
+ *
+ * Assumes the LOAD segments are arranged with RO first then RW. Returns -EINVAL
+ * if this is not true.
+ */
+static int gxp_firmware_fetch_boundaries(struct gxp_dev *gxp,
+ struct gxp_firmware_manager *mgr)
+{
+ int core, ret;
+
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ ret = gxp_firmware_fetch_boundary(
+ gxp, mgr->firmwares[core]->data + FW_HEADER_SIZE,
+ mgr->firmwares[core]->size - FW_HEADER_SIZE,
+ &gxp->fwbufs[core], &mgr->rw_boundaries[core]);
+ if (ret) {
+ dev_err(gxp->dev,
+ "failed to fetch boundary of core %d: %d", core,
+ ret);
+ goto error;
+ }
+ }
+ return 0;
+
+error:
+ memset(mgr->rw_boundaries, 0, sizeof(mgr->rw_boundaries));
+ return ret;
+}
+
/* Forward declaration for usage inside gxp_firmware_load(..). */
static void gxp_firmware_unload(struct gxp_dev *gxp, uint core);
@@ -265,39 +345,43 @@ static void gxp_program_reset_vector(struct gxp_dev *gxp, uint core, bool verbos
{
u32 reset_vec;
- reset_vec = gxp_read_32_core(gxp, core,
- GXP_REG_ALT_RESET_VECTOR);
+ reset_vec = gxp_read_32(gxp, GXP_CORE_REG_ALT_RESET_VECTOR(core));
if (verbose)
dev_notice(gxp->dev,
"Current Aurora reset vector for core %u: 0x%x\n",
core, reset_vec);
- gxp_write_32_core(gxp, core, GXP_REG_ALT_RESET_VECTOR,
- gxp->fwbufs[core].daddr);
+ gxp_write_32(gxp, GXP_CORE_REG_ALT_RESET_VECTOR(core),
+ gxp->firmware_mgr->entry_points[core]);
if (verbose)
dev_notice(gxp->dev,
- "New Aurora reset vector for core %u: 0x%llx\n",
- core, gxp->fwbufs[core].daddr);
+ "New Aurora reset vector for core %u: 0x%x\n",
+ core, gxp->firmware_mgr->entry_points[core]);
}
static int gxp_firmware_load(struct gxp_dev *gxp, uint core)
{
+ struct gxp_firmware_manager *mgr = gxp->firmware_mgr;
u32 offset;
void __iomem *core_scratchpad_base;
int ret;
- if (!gxp->firmwares[core])
+ if (!mgr->firmwares[core])
return -ENODEV;
/* Load firmware to System RAM */
ret = elf_load_segments(gxp,
- gxp->firmwares[core]->data + FW_HEADER_SIZE,
- gxp->firmwares[core]->size - FW_HEADER_SIZE,
+ mgr->firmwares[core]->data + FW_HEADER_SIZE,
+ mgr->firmwares[core]->size - FW_HEADER_SIZE,
&gxp->fwbufs[core]);
if (ret) {
dev_err(gxp->dev, "Unable to load elf file\n");
goto out_firmware_unload;
}
+ elf_fetch_entry_point(gxp,
+ mgr->firmwares[core]->data + FW_HEADER_SIZE,
+ core);
+
memset(gxp->fwbufs[core].vaddr + AURORA_SCRATCHPAD_OFF, 0,
AURORA_SCRATCHPAD_LEN);
@@ -326,7 +410,7 @@ out_firmware_unload:
static int gxp_firmware_handshake(struct gxp_dev *gxp, uint core)
{
u32 offset;
- u32 expected_top_value;
+ u32 __maybe_unused expected_top_value;
void __iomem *core_scratchpad_base;
int ctr;
@@ -334,7 +418,7 @@ static int gxp_firmware_handshake(struct gxp_dev *gxp, uint core)
dev_notice(gxp->dev, "Waiting for core %u to power up...\n", core);
ctr = 1000;
while (ctr) {
- if (gxp_lpm_is_powered(gxp, core))
+ if (gxp_lpm_is_powered(gxp, CORE_TO_PSM(core)))
break;
udelay(1 * GXP_TIME_DELAY_FACTOR);
ctr--;
@@ -359,6 +443,15 @@ static int gxp_firmware_handshake(struct gxp_dev *gxp, uint core)
*/
ctr = 5000;
offset = SCRATCHPAD_MSG_OFFSET(MSG_CORE_ALIVE);
+#if IS_ENABLED(CONFIG_GXP_TEST)
+ fake_gxp_firmware_flush_work_all();
+ /*
+ * As the fake firmware works are flushed, we don't have to busy-wait the response of
+ * the firmware. By setting @ctr to 1, just run the while loop below once for the code
+ * coverage.
+ */
+ ctr = 1;
+#endif
usleep_range(50 * GXP_TIME_DELAY_FACTOR, 60 * GXP_TIME_DELAY_FACTOR);
while (ctr--) {
if (readl(core_scratchpad_base + offset) == Q7_ALIVE_MAGIC)
@@ -437,15 +530,16 @@ static ssize_t load_dsp_firmware_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct gxp_dev *gxp = dev_get_drvdata(dev);
+ struct gxp_firmware_manager *mgr = gxp->firmware_mgr;
ssize_t ret;
- mutex_lock(&gxp->dsp_firmware_lock);
+ mutex_lock(&mgr->dsp_firmware_lock);
ret = scnprintf(buf, PAGE_SIZE, "%s\n",
- gxp->firmware_name ? gxp->firmware_name :
+ mgr->firmware_name ? mgr->firmware_name :
DSP_FIRMWARE_DEFAULT_PREFIX);
- mutex_unlock(&gxp->dsp_firmware_lock);
+ mutex_unlock(&mgr->dsp_firmware_lock);
return ret;
}
@@ -455,6 +549,7 @@ static ssize_t load_dsp_firmware_store(struct device *dev,
const char *buf, size_t count)
{
struct gxp_dev *gxp = dev_get_drvdata(dev);
+ struct gxp_firmware_manager *mgr = gxp->firmware_mgr;
const struct firmware *firmwares[GXP_NUM_CORES];
char *name_buf = NULL;
int ret;
@@ -466,10 +561,10 @@ static ssize_t load_dsp_firmware_store(struct device *dev,
*/
down_read(&gxp->vd_semaphore);
- if (gxp->firmware_running) {
+ if (mgr->firmware_running) {
dev_warn(dev, "Cannot update firmware when any core is running\n");
ret = -EBUSY;
- goto out;
+ goto err_out;
}
name_buf = fw_name_from_attr_buf(buf);
@@ -477,10 +572,10 @@ static ssize_t load_dsp_firmware_store(struct device *dev,
dev_err(gxp->dev, "Invalid firmware prefix requested: %s\n",
buf);
ret = PTR_ERR(name_buf);
- goto out;
+ goto err_out;
}
- mutex_lock(&gxp->dsp_firmware_lock);
+ mutex_lock(&mgr->dsp_firmware_lock);
dev_notice(gxp->dev, "Requesting firmware be reloaded: %s\n", name_buf);
@@ -497,25 +592,32 @@ static ssize_t load_dsp_firmware_store(struct device *dev,
goto err_authenticate_firmware;
for (core = 0; core < GXP_NUM_CORES; core++) {
- if (gxp->firmwares[core])
- release_firmware(gxp->firmwares[core]);
- gxp->firmwares[core] = firmwares[core];
+ if (mgr->firmwares[core])
+ release_firmware(mgr->firmwares[core]);
+ mgr->firmwares[core] = firmwares[core];
}
- kfree(gxp->firmware_name);
- gxp->firmware_name = name_buf;
+ ret = gxp_firmware_fetch_boundaries(gxp, mgr);
+ if (ret)
+ goto err_fetch_boundaries;
- mutex_unlock(&gxp->dsp_firmware_lock);
-out:
+ kfree(mgr->firmware_name);
+ mgr->firmware_name = name_buf;
+
+ mutex_unlock(&mgr->dsp_firmware_lock);
up_read(&gxp->vd_semaphore);
return count;
+err_fetch_boundaries:
+ for (core = 0; core < GXP_NUM_CORES; core++)
+ mgr->firmwares[core] = NULL;
err_authenticate_firmware:
for (core = 0; core < GXP_NUM_CORES; core++)
release_firmware(firmwares[core]);
err_request_firmware:
kfree(name_buf);
- mutex_unlock(&gxp->dsp_firmware_lock);
+ mutex_unlock(&mgr->dsp_firmware_lock);
+err_out:
up_read(&gxp->vd_semaphore);
return ret;
}
@@ -537,6 +639,13 @@ int gxp_fw_init(struct gxp_dev *gxp)
uint core;
struct resource r;
int ret;
+ struct gxp_firmware_manager *mgr;
+
+ mgr = devm_kzalloc(gxp->dev, sizeof(*mgr), GFP_KERNEL);
+ if (!mgr)
+ return -ENOMEM;
+ gxp->firmware_mgr = mgr;
+ mutex_init(&mgr->dsp_firmware_lock);
/* Power on BLK_AUR to read the revision and processor ID registers */
gxp_pm_blk_on(gxp);
@@ -545,7 +654,7 @@ int gxp_fw_init(struct gxp_dev *gxp)
dev_notice(gxp->dev, "Aurora version: 0x%x\n", ver);
for (core = 0; core < GXP_NUM_CORES; core++) {
- proc_id = gxp_read_32_core(gxp, core, GXP_REG_PROCESSOR_ID);
+ proc_id = gxp_read_32(gxp, GXP_CORE_REG_PROCESSOR_ID(core));
dev_notice(gxp->dev, "Aurora core %u processor ID: 0x%x\n",
core, proc_id);
}
@@ -609,7 +718,7 @@ int gxp_fw_init(struct gxp_dev *gxp)
if (ret)
goto out_fw_destroy;
- gxp->firmware_running = 0;
+ mgr->firmware_running = 0;
return 0;
out_fw_destroy:
@@ -620,6 +729,10 @@ out_fw_destroy:
void gxp_fw_destroy(struct gxp_dev *gxp)
{
uint core;
+ struct gxp_firmware_manager *mgr = gxp->firmware_mgr;
+
+ if (IS_GXP_TEST && !mgr)
+ return;
device_remove_group(gxp->dev, &gxp_firmware_attr_group);
@@ -629,54 +742,83 @@ void gxp_fw_destroy(struct gxp_dev *gxp)
gxp->fwbufs[core].vaddr = NULL;
}
- if (gxp->firmwares[core]) {
- release_firmware(gxp->firmwares[core]);
- gxp->firmwares[core] = NULL;
+ if (mgr->firmwares[core]) {
+ release_firmware(mgr->firmwares[core]);
+ mgr->firmwares[core] = NULL;
}
}
- kfree(gxp->firmware_name);
+ kfree(mgr->firmware_name);
}
int gxp_firmware_request_if_needed(struct gxp_dev *gxp)
{
int ret = 0;
uint core;
+ struct gxp_firmware_manager *mgr = gxp->firmware_mgr;
+ char *name = NULL;
- mutex_lock(&gxp->dsp_firmware_lock);
+ mutex_lock(&mgr->dsp_firmware_lock);
- if (gxp->is_firmware_requested)
+ if (mgr->is_firmware_requested)
goto out;
- ret = request_dsp_firmware(gxp, DSP_FIRMWARE_DEFAULT_PREFIX,
- gxp->firmwares);
+ if (mgr->firmware_name == NULL)
+ name = DSP_FIRMWARE_DEFAULT_PREFIX;
+ else
+ name = mgr->firmware_name;
+
+ ret = request_dsp_firmware(gxp, name, mgr->firmwares);
if (ret)
goto out;
- ret = gxp_firmware_authenticate(gxp, gxp->firmwares);
+ ret = gxp_firmware_authenticate(gxp, mgr->firmwares);
if (ret)
goto err_authenticate_firmware;
- gxp->is_firmware_requested = true;
+ ret = gxp_firmware_fetch_boundaries(gxp, mgr);
+ if (ret)
+ goto err_authenticate_firmware;
+
+ mgr->is_firmware_requested = true;
out:
- mutex_unlock(&gxp->dsp_firmware_lock);
+ mutex_unlock(&mgr->dsp_firmware_lock);
return ret;
err_authenticate_firmware:
for (core = 0; core < GXP_NUM_CORES; core++) {
- release_firmware(gxp->firmwares[core]);
- gxp->firmwares[core] = NULL;
+ release_firmware(mgr->firmwares[core]);
+ mgr->firmwares[core] = NULL;
}
- mutex_unlock(&gxp->dsp_firmware_lock);
+ mutex_unlock(&mgr->dsp_firmware_lock);
return ret;
}
+/* TODO(b/253464747): Refactor these interrupts handlers and gxp-doorbell.c. */
+static void enable_core_interrupts(struct gxp_dev *gxp, uint core)
+{
+ /*
+ * GXP_CORE_REG_COMMON_INT_MASK_0 is handled in doorbell module, so we
+ * don't need to enable it here.
+ */
+ gxp_write_32(gxp, GXP_CORE_REG_COMMON_INT_MASK_1(core), 0xffffffff);
+ gxp_write_32(gxp, GXP_CORE_REG_DEDICATED_INT_MASK(core), 0xffffffff);
+}
+
+static void disable_core_interrupts(struct gxp_dev *gxp, uint core)
+{
+ gxp_write_32(gxp, GXP_CORE_REG_COMMON_INT_MASK_0(core), 0);
+ gxp_write_32(gxp, GXP_CORE_REG_COMMON_INT_MASK_1(core), 0);
+ gxp_write_32(gxp, GXP_CORE_REG_DEDICATED_INT_MASK(core), 0);
+}
+
static int gxp_firmware_setup(struct gxp_dev *gxp, uint core)
{
int ret = 0;
+ struct gxp_firmware_manager *mgr = gxp->firmware_mgr;
- if (gxp->firmware_running & BIT(core)) {
+ if (mgr->firmware_running & BIT(core)) {
dev_err(gxp->dev, "Firmware is already running on core %u\n",
core);
return -EBUSY;
@@ -689,15 +831,19 @@ static int gxp_firmware_setup(struct gxp_dev *gxp, uint core)
}
/* Mark this as a cold boot */
- gxp_firmware_set_boot_mode(gxp, core, GXP_BOOT_MODE_REQUEST_COLD_BOOT);
+ if (gxp_core_boot)
+ gxp_firmware_set_boot_mode(gxp, core,
+ GXP_BOOT_MODE_REQUEST_COLD_BOOT);
ret = gxp_firmware_setup_hw_after_block_off(gxp, core,
/*verbose=*/true);
if (ret) {
dev_err(gxp->dev, "Failed to power up core %u\n", core);
gxp_firmware_unload(gxp, core);
+ return ret;
}
+ enable_core_interrupts(gxp, core);
return ret;
}
@@ -721,27 +867,35 @@ static int gxp_firmware_finish_startup(struct gxp_dev *gxp,
struct gxp_virtual_device *vd,
uint virt_core, uint core)
{
- int ret;
struct work_struct *work;
+ struct gxp_firmware_manager *mgr = gxp->firmware_mgr;
+ int ret = 0;
- ret = gxp_firmware_handshake(gxp, core);
- if (ret) {
- dev_err(gxp->dev, "Firmware handshake failed on core %u\n",
- core);
- gxp_pm_core_off(gxp, core);
- goto out_firmware_unload;
- }
+ if (gxp_core_boot) {
+ ret = gxp_firmware_handshake(gxp, core);
+ if (ret) {
+ dev_err(gxp->dev,
+ "Firmware handshake failed on core %u\n", core);
+ goto err_firmware_off;
+ }
- /* Initialize mailbox */
- gxp->mailbox_mgr->mailboxes[core] =
- gxp_mailbox_alloc(gxp->mailbox_mgr, vd, virt_core, core);
- if (IS_ERR(gxp->mailbox_mgr->mailboxes[core])) {
- dev_err(gxp->dev,
- "Unable to allocate mailbox (core=%u, ret=%ld)\n", core,
- PTR_ERR(gxp->mailbox_mgr->mailboxes[core]));
- ret = PTR_ERR(gxp->mailbox_mgr->mailboxes[core]);
- gxp->mailbox_mgr->mailboxes[core] = NULL;
- goto out_firmware_unload;
+ /* Initialize mailbox */
+ if (gxp->mailbox_mgr->allocate_mailbox) {
+ gxp->mailbox_mgr->mailboxes[core] =
+ gxp->mailbox_mgr->allocate_mailbox(
+ gxp->mailbox_mgr, vd, virt_core, core);
+ if (IS_ERR(gxp->mailbox_mgr->mailboxes[core])) {
+ dev_err(gxp->dev,
+ "Unable to allocate mailbox (core=%u, ret=%ld)\n",
+ core,
+ PTR_ERR(gxp->mailbox_mgr
+ ->mailboxes[core]));
+ ret = PTR_ERR(
+ gxp->mailbox_mgr->mailboxes[core]);
+ gxp->mailbox_mgr->mailboxes[core] = NULL;
+ goto err_firmware_off;
+ }
+ }
}
work = gxp_debug_dump_get_notification_handler(gxp, core);
@@ -749,16 +903,18 @@ static int gxp_firmware_finish_startup(struct gxp_dev *gxp,
gxp_notification_register_handler(
gxp, core, HOST_NOTIF_DEBUG_DUMP_READY, work);
- work = gxp_telemetry_get_notification_handler(gxp, core);
+ work = gxp_core_telemetry_get_notification_handler(gxp, core);
if (work)
gxp_notification_register_handler(
- gxp, core, HOST_NOTIF_TELEMETRY_STATUS, work);
+ gxp, core, HOST_NOTIF_CORE_TELEMETRY_STATUS, work);
- gxp->firmware_running |= BIT(core);
+ mgr->firmware_running |= BIT(core);
return ret;
-out_firmware_unload:
+err_firmware_off:
+ if (gxp_core_boot)
+ gxp_pm_core_off(gxp, core);
gxp_firmware_unload(gxp, core);
return ret;
}
@@ -767,22 +923,36 @@ static void gxp_firmware_stop_core(struct gxp_dev *gxp,
struct gxp_virtual_device *vd,
uint virt_core, uint core)
{
- if (!(gxp->firmware_running & BIT(core)))
+ struct gxp_firmware_manager *mgr = gxp->firmware_mgr;
+
+ if (!(mgr->firmware_running & BIT(core)))
dev_err(gxp->dev, "Firmware is not running on core %u\n", core);
- gxp->firmware_running &= ~BIT(core);
+ mgr->firmware_running &= ~BIT(core);
gxp_notification_unregister_handler(gxp, core,
HOST_NOTIF_DEBUG_DUMP_READY);
gxp_notification_unregister_handler(gxp, core,
- HOST_NOTIF_TELEMETRY_STATUS);
+ HOST_NOTIF_CORE_TELEMETRY_STATUS);
+
+ if (gxp_core_boot) {
+ if (gxp->mailbox_mgr->release_mailbox) {
+ gxp->mailbox_mgr->release_mailbox(
+ gxp->mailbox_mgr, vd, virt_core,
+ gxp->mailbox_mgr->mailboxes[core]);
+ dev_notice(gxp->dev, "Mailbox %u released\n", core);
+ }
- gxp_mailbox_release(gxp->mailbox_mgr, vd, virt_core,
- gxp->mailbox_mgr->mailboxes[core]);
- dev_notice(gxp->dev, "Mailbox %u released\n", core);
+ if (vd->state == GXP_VD_RUNNING) {
+ /*
+ * Disable interrupts to prevent cores from being woken up
+ * unexpectedly.
+ */
+ disable_core_interrupts(gxp, core);
+ gxp_pm_core_off(gxp, core);
+ }
+ }
- if (vd->state == GXP_VD_RUNNING)
- gxp_pm_core_off(gxp, core);
gxp_firmware_unload(gxp, core);
}
@@ -813,7 +983,8 @@ int gxp_firmware_run(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
for (core = 0; core < GXP_NUM_CORES; core++) {
if (core_list & BIT(core)) {
if (!(failed_cores & BIT(core))) {
- gxp_pm_core_off(gxp, core);
+ if (gxp_core_boot)
+ gxp_pm_core_off(gxp, core);
gxp_firmware_unload(gxp, core);
}
}
@@ -834,8 +1005,11 @@ int gxp_firmware_run(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
}
#endif
/* Switch clock mux to the normal state to guarantee LPM works */
- gxp_pm_force_clkmux_normal(gxp);
- gxp_firmware_wakeup_cores(gxp, core_list);
+ if (gxp_core_boot) {
+ gxp_pm_force_clkmux_normal(gxp);
+ gxp_firmware_wakeup_cores(gxp, core_list);
+ }
+
virt_core = 0;
for (core = 0; core < GXP_NUM_CORES; core++) {
if (core_list & BIT(core)) {
@@ -864,7 +1038,8 @@ int gxp_firmware_run(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
}
}
/* Check if we need to set clock mux to low state as requested */
- gxp_pm_resume_clkmux(gxp);
+ if (gxp_core_boot)
+ gxp_pm_resume_clkmux(gxp);
return ret;
}
@@ -873,7 +1048,8 @@ int gxp_firmware_setup_hw_after_block_off(struct gxp_dev *gxp, uint core,
bool verbose)
{
gxp_program_reset_vector(gxp, core, verbose);
- return gxp_pm_core_on(gxp, core, verbose);
+
+ return gxp_core_boot ? gxp_pm_core_on(gxp, core, verbose) : 0;
}
diff --git a/gxp-firmware.h b/gxp-firmware.h
index 008af5a..aff602a 100644
--- a/gxp-firmware.h
+++ b/gxp-firmware.h
@@ -8,14 +8,25 @@
#define __GXP_FIRMWARE_H__
#include <linux/bitops.h>
+#include <linux/sizes.h>
+#include "gxp-config.h"
#include "gxp-internal.h"
#if !IS_ENABLED(CONFIG_GXP_TEST)
+#ifdef CHIP_AURORA_SCRATCHPAD_OFF
+
+#define AURORA_SCRATCHPAD_OFF CHIP_AURORA_SCRATCHPAD_OFF
+#define AURORA_SCRATCHPAD_LEN CHIP_AURORA_SCRATCHPAD_LEN
+
+#else /* CHIP_AURORA_SCRATCHPAD_OFF */
+
#define AURORA_SCRATCHPAD_OFF 0x000FF000 /* Last 4KB of ELF load region */
#define AURORA_SCRATCHPAD_LEN 0x00001000 /* 4KB */
+#endif /* CHIP_AURORA_SCRATCHPAD_OFF */
+
#else /* CONFIG_GXP_TEST */
/* Firmware memory is shrunk in unit tests. */
#define AURORA_SCRATCHPAD_OFF 0x000F0000
@@ -27,6 +38,31 @@
#define SCRATCHPAD_MSG_OFFSET(_msg_) (_msg_ << 2)
+#define PRIVATE_FW_DATA_SIZE SZ_2M
+#define SHARED_FW_DATA_SIZE SZ_1M
+
+struct gxp_firmware_manager {
+ const struct firmware *firmwares[GXP_NUM_CORES];
+ char *firmware_name;
+ bool is_firmware_requested;
+ /* Protects `firmwares` and `firmware_name` */
+ struct mutex dsp_firmware_lock;
+ /* Firmware status bitmap. Accessors must hold `vd_semaphore`. */
+ u32 firmware_running;
+ /*
+ * The boundary of readonly segments and writable segments.
+ * The mappings are programmed as
+ * [fwbufs[i].daddr, rw_boundaries[i]): RO
+ * [rw_boundaries[i], daddr + fwbufs[i].size): RW
+ *
+ * The boundary information is collected by parsing the ELF
+ * header after @firmwares have been fetched.
+ */
+ dma_addr_t rw_boundaries[GXP_NUM_CORES];
+ /* Store the entry point of the DSP core firmware. */
+ u32 entry_points[GXP_NUM_CORES];
+};
+
enum aurora_msg {
MSG_CORE_ALIVE,
MSG_TOP_ACCESS_OK,
@@ -37,7 +73,7 @@ enum aurora_msg {
/* The caller must have locked gxp->vd_semaphore for reading. */
static inline bool gxp_is_fw_running(struct gxp_dev *gxp, uint core)
{
- return (gxp->firmware_running & BIT(core)) != 0;
+ return (gxp->firmware_mgr->firmware_running & BIT(core)) != 0;
}
/*
diff --git a/gxp-host-device-structs.h b/gxp-host-device-structs.h
index 8e4723c..1c993f1 100644
--- a/gxp-host-device-structs.h
+++ b/gxp-host-device-structs.h
@@ -17,13 +17,13 @@
#define MAX_NUM_CORES 4
#define NUM_SYSTEM_SEMAPHORES 64
-/* Bit masks for the status fields in the telemetry structures. */
-/* The telemetry buffers have been setup by the host. */
-#define GXP_TELEMETRY_HOST_STATUS_ENABLED (1 << 0)
-/* The telemetry buffers are being used by the device. */
-#define GXP_TELEMETRY_DEVICE_STATUS_ENABLED (1 << 0)
+/* Bit masks for the status fields in the core telemetry structures. */
+/* The core telemetry buffers have been setup by the host. */
+#define GXP_CORE_TELEMETRY_HOST_STATUS_ENABLED (1 << 0)
+/* The core telemetry buffers are being used by the device. */
+#define GXP_CORE_TELEMETRY_DEVICE_STATUS_ENABLED (1 << 0)
/* There was an attempt to use the buffers but their content was invalid. */
-#define GXP_TELEMETRY_DEVICE_STATUS_SANITY_CHECK_FAILED (1 << 1)
+#define GXP_CORE_TELEMETRY_DEVICE_STATUS_SANITY_CHECK_FAILED (1 << 1)
/* Definitions for host->device boot mode requests */
/*
@@ -130,20 +130,20 @@ struct gxp_watchdog_descriptor {
};
/*
- * A structure describing the telemetry (logging and tracing) parameters and
- * buffers.
+ * A structure describing the core telemetry (logging and tracing) parameters
+ * and buffers.
*/
-struct gxp_telemetry_descriptor {
- /* A struct for describing the parameters for telemetry buffers */
- struct telemetry_descriptor {
+struct gxp_core_telemetry_descriptor {
+ /* A struct for describing the parameters for core telemetry buffers. */
+ struct core_telemetry_descriptor {
/*
- * The telemetry status from the host's point of view. See the
- * top of the file for the appropriate flags.
+ * The core telemetry status from the host's point of view. See
+ * the top of the file for the appropriate flags.
*/
uint32_t host_status;
/*
- * The telemetry status from the device point of view. See the
- * top of the file for the appropriate flags.
+ * The core telemetry status from the device point of view. See
+ * the top of the file for the appropriate flags.
*/
uint32_t device_status;
/*
@@ -263,8 +263,8 @@ struct gxp_system_descriptor {
uint32_t app_descriptor_dev_addr[MAX_NUM_CORES];
/* A device address for the watchdog descriptor. */
uint32_t watchdog_dev_addr;
- /* A device address for the telemetry descriptor */
- uint32_t telemetry_dev_addr;
+ /* A device address for the core telemetry descriptor */
+ uint32_t core_telemetry_dev_addr;
/* A device address for the common debug dump region */
uint32_t debug_dump_dev_addr;
};
diff --git a/gxp-hw-mailbox-driver.c b/gxp-hw-mailbox-driver.c
deleted file mode 100644
index 8430a65..0000000
--- a/gxp-hw-mailbox-driver.c
+++ /dev/null
@@ -1,323 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GXP hardware-based mailbox driver implementation.
- *
- * Copyright (C) 2021 Google LLC
- */
-
-#include <asm/barrier.h>
-#include <linux/bitops.h>
-#include <linux/interrupt.h>
-#include <linux/kthread.h>
-#include <linux/of_irq.h>
-#include <linux/spinlock.h>
-
-#include "gxp-mailbox-driver.h"
-#include "gxp-mailbox-regs.h"
-#include "gxp-mailbox.h"
-
-static u32 csr_read(struct gxp_mailbox *mailbox, uint reg_offset)
-{
- return readl(mailbox->csr_reg_base + reg_offset);
-}
-
-static void csr_write(struct gxp_mailbox *mailbox, uint reg_offset, u32 value)
-{
- writel(value, mailbox->csr_reg_base + reg_offset);
-}
-
-static u32 data_read(struct gxp_mailbox *mailbox, uint reg_offset)
-{
- return readl(mailbox->data_reg_base + reg_offset);
-}
-
-static void data_write(struct gxp_mailbox *mailbox, uint reg_offset,
- u32 value)
-{
- writel(value, mailbox->data_reg_base + reg_offset);
-}
-
-/* IRQ Handling */
-
-/* Interrupt to signal a response from the device to host */
-#define MBOX_DEVICE_TO_HOST_RESPONSE_IRQ_MASK BIT(0)
-
-static irqreturn_t mailbox_irq_handler(int irq, void *arg)
-{
- u32 masked_status;
- struct gxp_mailbox *mailbox = (struct gxp_mailbox *) arg;
- struct work_struct **handlers = mailbox->interrupt_handlers;
- u32 next_int;
-
- /* Contains only the non-masked, pending interrupt bits */
- masked_status = gxp_mailbox_get_host_mask_status(mailbox);
-
- /* Clear all pending IRQ bits */
- gxp_mailbox_clear_host_interrupt(mailbox, masked_status);
-
- if (masked_status & MBOX_DEVICE_TO_HOST_RESPONSE_IRQ_MASK) {
- mailbox->handle_irq(mailbox);
- masked_status &= ~MBOX_DEVICE_TO_HOST_RESPONSE_IRQ_MASK;
- }
-
- while ((next_int = ffs(masked_status))) {
- next_int--; /* ffs returns 1-based indices */
- masked_status &= ~BIT(next_int);
-
- if (handlers[next_int])
- schedule_work(handlers[next_int]);
- else
- pr_err_ratelimited(
- "mailbox%d: received unknown interrupt bit 0x%X\n",
- mailbox->core_id, next_int);
- }
-
- return IRQ_HANDLED;
-}
-
-static void register_irq(struct gxp_mailbox *mailbox)
-{
- int err;
- unsigned int virq;
-
- virq = irq_of_parse_and_map(mailbox->gxp->dev->of_node,
- mailbox->core_id);
- if (!virq) {
- pr_err("Unable to parse interrupt for core %d from the DT\n",
- mailbox->core_id);
- return;
- }
-
- err = request_irq(virq, mailbox_irq_handler, /*flags=*/ 0,
- "aurora_mbx_irq", (void *) mailbox);
- if (err) {
- pr_err("Unable to register IRQ num=%d; error=%d\n", virq, err);
- return;
- }
-
- mailbox->interrupt_virq = virq;
- pr_debug("Core %d's mailbox interrupt registered as IRQ %u.\n",
- mailbox->core_id, virq);
-}
-
-static void unregister_irq(struct gxp_mailbox *mailbox)
-{
- if (mailbox->interrupt_virq) {
- pr_debug("Freeing IRQ %d\n", mailbox->interrupt_virq);
- free_irq(mailbox->interrupt_virq, mailbox);
- mailbox->interrupt_virq = 0;
- }
-}
-
-/* gxp-mailbox-driver.h interface */
-
-void gxp_mailbox_driver_init(struct gxp_mailbox *mailbox)
-{
- spin_lock_init(&mailbox->cmd_tail_resp_head_lock);
- spin_lock_init(&mailbox->cmd_head_resp_tail_lock);
-}
-
-void gxp_mailbox_driver_exit(struct gxp_mailbox *mailbox)
-{
- /* Nothing to cleanup */
-}
-
-void gxp_mailbox_driver_enable_interrupts(struct gxp_mailbox *mailbox)
-{
- register_irq(mailbox);
-}
-
-void gxp_mailbox_driver_disable_interrupts(struct gxp_mailbox *mailbox)
-{
- unregister_irq(mailbox);
-}
-
-void __iomem *gxp_mailbox_get_csr_base(struct gxp_dev *gxp, uint index)
-{
- return gxp->mbx[index].vaddr;
-}
-
-void __iomem *gxp_mailbox_get_data_base(struct gxp_dev *gxp, uint index)
-{
- return gxp->mbx[index].vaddr + 0x80;
-}
-
-/* gxp-mailbox-driver.h: CSR-based calls */
-
-void gxp_mailbox_reset_hw(struct gxp_mailbox *mailbox)
-{
- csr_write(mailbox, MBOX_MCUCTLR_OFFSET, 1);
-}
-
-void gxp_mailbox_generate_device_interrupt(struct gxp_mailbox *mailbox,
- u32 int_mask)
-{
- /*
- * Ensure all memory writes have been committed to memory before
- * signalling to the device to read from them. This avoids the scenario
- * where the interrupt trigger write gets delivered to the MBX HW before
- * the DRAM transactions made it to DRAM since they're Normal
- * transactions and can be re-ordered and backed off behind other
- * transfers.
- */
- wmb();
-
- csr_write(mailbox, MBOX_INTGR0_OFFSET, int_mask);
-}
-
-u32 gxp_mailbox_get_device_mask_status(struct gxp_mailbox *mailbox)
-{
- return csr_read(mailbox, MBOX_INTMSR0_OFFSET);
-}
-
-void gxp_mailbox_clear_host_interrupt(struct gxp_mailbox *mailbox, u32 int_mask)
-{
- csr_write(mailbox, MBOX_INTCR1_OFFSET, int_mask);
-}
-
-void gxp_mailbox_mask_host_interrupt(struct gxp_mailbox *mailbox, u32 int_mask)
-{
- csr_write(mailbox, MBOX_INTMR1_OFFSET, int_mask);
-}
-
-u32 gxp_mailbox_get_host_mask_status(struct gxp_mailbox *mailbox)
-{
- return csr_read(mailbox, MBOX_INTMSR1_OFFSET);
-}
-
-/* gxp-mailbox-driver.h: Data register-based calls */
-
-void gxp_mailbox_write_status(struct gxp_mailbox *mailbox, u32 status)
-{
- data_write(mailbox, MBOX_STATUS_OFFSET, status);
-}
-
-void gxp_mailbox_write_descriptor(struct gxp_mailbox *mailbox,
- dma_addr_t descriptor_addr)
-{
- data_write(mailbox, MBOX_DESCRIPTOR_ADDR_OFFSET, (u32)descriptor_addr);
-}
-
-void gxp_mailbox_write_cmd_queue_tail(struct gxp_mailbox *mailbox, u16 val)
-{
- u32 current_resp_head;
- u32 new_cmd_tail;
- unsigned long flags;
-
- spin_lock_irqsave(&mailbox->cmd_tail_resp_head_lock, flags);
-
- current_resp_head = data_read(mailbox, MBOX_CMD_TAIL_RESP_HEAD_OFFSET) &
- RESP_HEAD_MASK;
- new_cmd_tail = (u32)val << CMD_TAIL_SHIFT;
- data_write(mailbox, MBOX_CMD_TAIL_RESP_HEAD_OFFSET,
- new_cmd_tail | current_resp_head);
-
- spin_unlock_irqrestore(&mailbox->cmd_tail_resp_head_lock, flags);
-}
-
-void gxp_mailbox_write_resp_queue_head(struct gxp_mailbox *mailbox, u16 val)
-{
- u32 current_cmd_tail;
- u32 new_resp_head;
- unsigned long flags;
-
- spin_lock_irqsave(&mailbox->cmd_tail_resp_head_lock, flags);
-
- current_cmd_tail = data_read(mailbox, MBOX_CMD_TAIL_RESP_HEAD_OFFSET) &
- CMD_TAIL_MASK;
- new_resp_head = (u32)val << RESP_HEAD_SHIFT;
- data_write(mailbox, MBOX_CMD_TAIL_RESP_HEAD_OFFSET,
- current_cmd_tail | new_resp_head);
-
- spin_unlock_irqrestore(&mailbox->cmd_tail_resp_head_lock, flags);
-}
-
-u16 gxp_mailbox_read_cmd_queue_head(struct gxp_mailbox *mailbox)
-{
- u32 reg_val;
- unsigned long flags;
-
- spin_lock_irqsave(&mailbox->cmd_head_resp_tail_lock, flags);
-
- reg_val = data_read(mailbox, MBOX_CMD_HEAD_RESP_TAIL_OFFSET);
-
- spin_unlock_irqrestore(&mailbox->cmd_head_resp_tail_lock, flags);
-
- return (u16)((reg_val & CMD_HEAD_MASK) >> CMD_HEAD_SHIFT);
-}
-
-u16 gxp_mailbox_read_resp_queue_tail(struct gxp_mailbox *mailbox)
-{
- u32 reg_val;
- unsigned long flags;
-
- spin_lock_irqsave(&mailbox->cmd_head_resp_tail_lock, flags);
-
- reg_val = data_read(mailbox, MBOX_CMD_HEAD_RESP_TAIL_OFFSET);
-
- spin_unlock_irqrestore(&mailbox->cmd_head_resp_tail_lock, flags);
-
- return (u16)((reg_val & RESP_TAIL_MASK) >> RESP_TAIL_SHIFT);
-}
-
-void gxp_mailbox_write_cmd_queue_head(struct gxp_mailbox *mailbox, u16 val)
-{
- u32 current_resp_tail;
- u32 new_cmd_head;
- unsigned long flags;
-
- spin_lock_irqsave(&mailbox->cmd_head_resp_tail_lock, flags);
-
- current_resp_tail = data_read(mailbox, MBOX_CMD_HEAD_RESP_TAIL_OFFSET) &
- RESP_TAIL_MASK;
- new_cmd_head = (u32)val << CMD_HEAD_SHIFT;
- data_write(mailbox, MBOX_CMD_HEAD_RESP_TAIL_OFFSET,
- new_cmd_head | current_resp_tail);
-
- spin_unlock_irqrestore(&mailbox->cmd_head_resp_tail_lock, flags);
-}
-
-void gxp_mailbox_write_resp_queue_tail(struct gxp_mailbox *mailbox, u16 val)
-{
- u32 current_cmd_head;
- u32 new_resp_tail;
- unsigned long flags;
-
- spin_lock_irqsave(&mailbox->cmd_head_resp_tail_lock, flags);
-
- current_cmd_head = data_read(mailbox, MBOX_CMD_HEAD_RESP_TAIL_OFFSET) &
- CMD_HEAD_MASK;
- new_resp_tail = (u32)val << RESP_TAIL_SHIFT;
- data_write(mailbox, MBOX_CMD_HEAD_RESP_TAIL_OFFSET,
- current_cmd_head | new_resp_tail);
-
- spin_unlock_irqrestore(&mailbox->cmd_head_resp_tail_lock, flags);
-}
-
-u16 gxp_mailbox_read_cmd_queue_tail(struct gxp_mailbox *mailbox)
-{
- u32 reg_val;
- unsigned long flags;
-
- spin_lock_irqsave(&mailbox->cmd_tail_resp_head_lock, flags);
-
- reg_val = data_read(mailbox, MBOX_CMD_TAIL_RESP_HEAD_OFFSET);
-
- spin_unlock_irqrestore(&mailbox->cmd_tail_resp_head_lock, flags);
-
- return (u16)((reg_val & CMD_TAIL_MASK) >> CMD_TAIL_SHIFT);
-}
-
-u16 gxp_mailbox_read_resp_queue_head(struct gxp_mailbox *mailbox)
-{
- u32 reg_val;
- unsigned long flags;
-
- spin_lock_irqsave(&mailbox->cmd_tail_resp_head_lock, flags);
-
- reg_val = data_read(mailbox, MBOX_CMD_TAIL_RESP_HEAD_OFFSET);
-
- spin_unlock_irqrestore(&mailbox->cmd_tail_resp_head_lock, flags);
-
- return (u16)((reg_val & RESP_HEAD_MASK) >> RESP_HEAD_SHIFT);
-}
diff --git a/gxp-internal.h b/gxp-internal.h
index 82e5303..6988bf8 100644
--- a/gxp-internal.h
+++ b/gxp-internal.h
@@ -10,6 +10,7 @@
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/firmware.h>
+#include <linux/idr.h>
#include <linux/io.h>
#include <linux/iommu.h>
#include <linux/list.h>
@@ -17,15 +18,24 @@
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/platform_device.h>
#include <linux/rwsem.h>
#include <linux/spinlock.h>
#include "gxp-config.h"
+#define IS_GXP_TEST IS_ENABLED(CONFIG_GXP_TEST)
+
+enum gxp_chip_revision {
+ GXP_CHIP_A0,
+ GXP_CHIP_B0,
+ /* used when the revision is not explicitly specified */
+ GXP_CHIP_ANY,
+};
+
/* Holds Client's TPU mailboxes info used during mapping */
struct gxp_tpu_mbx_desc {
uint phys_core_list;
- uint virt_core_list;
size_t cmdq_size, respq_size;
};
@@ -44,36 +54,34 @@ struct gxp_tpu_dev {
};
/* Forward declarations from submodules */
+struct gcip_domain_pool;
struct gxp_client;
struct gxp_mailbox_manager;
struct gxp_debug_dump_manager;
-struct gxp_domain_pool;
struct gxp_dma_manager;
struct gxp_fw_data_manager;
struct gxp_power_manager;
-struct gxp_telemetry_manager;
+struct gxp_core_telemetry_manager;
struct gxp_thermal_manager;
struct gxp_wakelock_manager;
+struct gxp_usage_stats;
+struct gxp_power_states;
+struct gxp_iommu_domain;
struct gxp_dev {
struct device *dev; /* platform bus device */
struct miscdevice misc_dev; /* misc device structure */
struct dentry *d_entry; /* debugfs dir for this device */
struct gxp_mapped_resource regs; /* ioremapped CSRs */
- struct gxp_mapped_resource mbx[GXP_NUM_CORES]; /* mailbox CSRs */
+ struct gxp_mapped_resource lpm_regs; /* ioremapped LPM CSRs, may be equal to @regs */
+ struct gxp_mapped_resource mbx[GXP_NUM_MAILBOXES]; /* mailbox CSRs */
struct gxp_mapped_resource fwbufs[GXP_NUM_CORES]; /* FW carveout */
struct gxp_mapped_resource fwdatabuf; /* Shared FW data carveout */
struct gxp_mapped_resource cmu; /* CMU CSRs */
struct gxp_mailbox_manager *mailbox_mgr;
struct gxp_power_manager *power_mgr;
struct gxp_debug_dump_manager *debug_dump_mgr;
- const struct firmware *firmwares[GXP_NUM_CORES];
- char *firmware_name;
- bool is_firmware_requested;
- /* Protects `firmwares` and `firmware_name` */
- struct mutex dsp_firmware_lock;
- /* Firmware status bitmap. Accessors must hold `vd_semaphore` */
- u32 firmware_running;
+ struct gxp_firmware_manager *firmware_mgr;
/*
* Lock to ensure only one thread at a time is ever calling
* `pin_user_pages_fast()` during mapping, otherwise it will fail.
@@ -98,17 +106,150 @@ struct gxp_dev {
struct gxp_dma_manager *dma_mgr;
struct gxp_fw_data_manager *data_mgr;
struct gxp_tpu_dev tpu_dev;
- struct gxp_telemetry_manager *telemetry_mgr;
+ struct gxp_core_telemetry_manager *core_telemetry_mgr;
struct gxp_wakelock_manager *wakelock_mgr;
+ struct gxp_iommu_domain *default_domain;
/*
* Pointer to GSA device for firmware authentication.
* May be NULL if the chip does not support firmware authentication
*/
struct device *gsa_dev;
u32 memory_per_core;
- struct gxp_domain_pool *domain_pool;
+ struct gcip_domain_pool *domain_pool;
struct list_head client_list;
struct mutex client_list_lock;
+ /* Pointer and mutex of secure virtual device */
+ struct gxp_virtual_device *secure_vd;
+ struct mutex secure_vd_lock;
+ /*
+ * Buffer shared across firmware.
+ * Its paddr is 0 if the shared buffer is not available.
+ * Its vaddr is always 0 as this region is not expected to be accessible
+ * to us.
+ */
+ struct gxp_mapped_resource shared_buf;
+ /*
+ * If the @shared_buf is used as split slices, it will keep track of
+ * which indexes of slices are used by ID allocator.
+ */
+ struct ida shared_slice_idp;
+ size_t shared_slice_size; /* The size of each slice. */
+ /*
+ * The total number of slices.
+ * It can be zero if there is no shared buffer support.
+ */
+ unsigned int num_shared_slices;
+ struct gxp_usage_stats *usage_stats; /* Stores the usage stats */
+
+ void __iomem *sysreg_shareability; /* sysreg shareability csr base */
+
+ /* callbacks for chip-dependent implementations */
+
+ /*
+ * For parsing chip-dependent device tree attributes.
+ *
+ * Called as the first step in the common device probing procedure.
+ *
+ * Do NOT use non-device managed allocations in this function, to
+ * prevent memory leak when the probe procedure fails.
+ *
+ * Return a non-zero value can fail the probe procedure.
+ *
+ * This callback is optional.
+ */
+ int (*parse_dt)(struct platform_device *pdev, struct gxp_dev *gxp);
+ /*
+ * Called when common device probing procedure is done.
+ *
+ * Return a non-zero value can fail the probe procedure.
+ *
+ * This callback is optional.
+ */
+ int (*after_probe)(struct gxp_dev *gxp);
+ /*
+ * Called before common device removal procedure.
+ *
+ * This callback is optional.
+ */
+ void (*before_remove)(struct gxp_dev *gxp);
+ /*
+ * Device ioctl handler for chip-dependent ioctl calls.
+ * Should return -ENOTTY when the ioctl should be handled by common
+ * device ioctl handler.
+ *
+ * This callback is optional.
+ */
+ long (*handle_ioctl)(struct file *file, uint cmd, ulong arg);
+ /*
+ * Device mmap handler for chip-dependent mmap calls.
+ * Should return -EOPNOTSUPP when the mmap should be handled by common
+ * device mmap handler.
+ *
+ * This callback is optional.
+ */
+ int (*handle_mmap)(struct file *file, struct vm_area_struct *vma);
+ /*
+ * Called for sending power states request.
+ *
+ * Return a non-zero value can fail the block wakelock acquisition.
+ *
+ * This callback is optional.
+ */
+ int (*request_power_states)(struct gxp_client *client,
+ struct gxp_power_states power_states);
+ /*
+ * Called when the client acquired the BLOCK wakelock and allocated a virtual device.
+ *
+ * Return a non-zero value can fail the block acquiring.
+ *
+ * This callback is optional.
+ */
+ int (*after_vd_block_ready)(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd);
+ /*
+ * Called before releasing the BLOCK wakelock or the virtual device.
+ *
+ * This callback is optional
+ */
+ void (*before_vd_block_unready)(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd);
+ /*
+ * Called in gxp_wakelock_acquire(), after the block is powered.
+ *
+ * This function is called with holding gxp_wakelock_manager.lock.
+ *
+ * Return a non-zero value can fail gxp_wakelock_acquire().
+ *
+ * This callback is optional.
+ */
+ int (*wakelock_after_blk_on)(struct gxp_dev *gxp);
+ /*
+ * Called in gxp_wakelock_release(), before the block is shutdown.
+ *
+ * This function is called with holding gxp_wakelock_manager.lock.
+ *
+ * This callback is optional.
+ */
+ void (*wakelock_before_blk_off)(struct gxp_dev *gxp);
+ /*
+ * Called in gxp_map_tpu_mbx_queue(), after the TPU mailbox buffers are mapped.
+ *
+ * This function is called with holding the write lock of @client->semaphore and the read
+ * lock of @gxp->vd_semaphore.
+ *
+ * This callback is optional.
+ */
+ int (*after_map_tpu_mbx_queue)(struct gxp_dev *gxp,
+ struct gxp_client *client);
+ /*
+ * Called in gxp_unmap_tpu_mbx_queue(), before unmapping the TPU mailbox buffers.
+ *
+ * This function is called with holding the write lock of @client->semaphore.
+ *
+ * This callback is optional.
+ */
+ void (*before_unmap_tpu_mbx_queue)(struct gxp_dev *gxp,
+ struct gxp_client *client);
};
/* GXP device IO functions */
@@ -123,22 +264,6 @@ static inline void gxp_write_32(struct gxp_dev *gxp, uint reg_offset, u32 value)
writel(value, gxp->regs.vaddr + reg_offset);
}
-static inline u32 gxp_read_32_core(struct gxp_dev *gxp, uint core,
- uint reg_offset)
-{
- uint offset = GXP_CORE_0_BASE + (GXP_CORE_SIZE * core) + reg_offset;
-
- return gxp_read_32(gxp, offset);
-}
-
-static inline void gxp_write_32_core(struct gxp_dev *gxp, uint core,
- uint reg_offset, u32 value)
-{
- uint offset = GXP_CORE_0_BASE + (GXP_CORE_SIZE * core) + reg_offset;
-
- gxp_write_32(gxp, offset, value);
-}
-
static inline int gxp_acquire_rmem_resource(struct gxp_dev *gxp,
struct resource *r, char *phandle)
{
@@ -158,4 +283,15 @@ static inline int gxp_acquire_rmem_resource(struct gxp_dev *gxp,
return ret;
}
+/*
+ * To specify whether AP and DSP cores directly communicate by the core mailboxes.
+ * All platform drivers of each chip should implement this.
+ */
+bool gxp_is_direct_mode(struct gxp_dev *gxp);
+
+/*
+ * Returns the chip revision.
+ */
+enum gxp_chip_revision gxp_get_chip_revision(struct gxp_dev *gxp);
+
#endif /* __GXP_INTERNAL_H__ */
diff --git a/gxp-lpm.c b/gxp-lpm.c
index 1ac8e27..1e51b40 100644
--- a/gxp-lpm.c
+++ b/gxp-lpm.c
@@ -12,6 +12,7 @@
#include <linux/types.h>
#include "gxp-bpm.h"
+#include "gxp-config.h"
#include "gxp-doorbell.h"
#include "gxp-internal.h"
#include "gxp-lpm.h"
@@ -21,7 +22,7 @@
int i = 100000; \
while (i) { \
lpm_state = \
- lpm_read_32_psm(gxp, psm, PSM_STATUS_OFFSET) & \
+ lpm_read_32_psm(gxp, psm, PSM_REG_STATUS_OFFSET) & \
PSM_CURR_STATE_MASK; \
if (condition) \
break; \
@@ -31,26 +32,24 @@
return i != 0; \
} while (0)
-void gxp_lpm_enable_state(struct gxp_dev *gxp, uint psm, uint state)
+void gxp_lpm_enable_state(struct gxp_dev *gxp, enum gxp_lpm_psm psm, uint state)
{
- uint offset = LPM_REG_ENABLE_STATE_0 + (LPM_STATE_TABLE_SIZE * state);
-
/* PS0 should always be enabled */
- if (state == 0)
+ if (state == LPM_ACTIVE_STATE || state > LPM_PG_STATE)
return;
/* Disable all low power states */
- lpm_write_32_psm(gxp, psm, LPM_REG_ENABLE_STATE_1, 0x0);
- lpm_write_32_psm(gxp, psm, LPM_REG_ENABLE_STATE_2, 0x0);
- lpm_write_32_psm(gxp, psm, LPM_REG_ENABLE_STATE_3, 0x0);
+ lpm_write_32_psm(gxp, psm, PSM_REG_ENABLE_STATE1_OFFSET, 0x0);
+ lpm_write_32_psm(gxp, psm, PSM_REG_ENABLE_STATE2_OFFSET, 0x0);
+ lpm_write_32_psm(gxp, psm, PSM_REG_ENABLE_STATE3_OFFSET, 0x0);
/* Enable the requested low power state */
- lpm_write_32_psm(gxp, psm, offset, 0x1);
+ lpm_write_32_psm(gxp, psm, state, 0x1);
}
-bool gxp_lpm_is_initialized(struct gxp_dev *gxp, uint psm)
+bool gxp_lpm_is_initialized(struct gxp_dev *gxp, enum gxp_lpm_psm psm)
{
- u32 status = lpm_read_32_psm(gxp, psm, PSM_STATUS_OFFSET);
+ u32 status = lpm_read_32_psm(gxp, psm, PSM_REG_STATUS_OFFSET);
/*
* state_valid bit goes active and stays high forever the first time you
@@ -62,9 +61,9 @@ bool gxp_lpm_is_initialized(struct gxp_dev *gxp, uint psm)
return false;
}
-bool gxp_lpm_is_powered(struct gxp_dev *gxp, uint psm)
+bool gxp_lpm_is_powered(struct gxp_dev *gxp, enum gxp_lpm_psm psm)
{
- u32 status = lpm_read_32_psm(gxp, psm, PSM_STATUS_OFFSET);
+ u32 status = lpm_read_32_psm(gxp, psm, PSM_REG_STATUS_OFFSET);
u32 state;
if (!(status & PSM_STATE_VALID_MASK))
@@ -73,14 +72,14 @@ bool gxp_lpm_is_powered(struct gxp_dev *gxp, uint psm)
return state == LPM_ACTIVE_STATE || state == LPM_CG_STATE;
}
-uint gxp_lpm_get_state(struct gxp_dev *gxp, uint psm)
+uint gxp_lpm_get_state(struct gxp_dev *gxp, enum gxp_lpm_psm psm)
{
- u32 status = lpm_read_32_psm(gxp, psm, PSM_STATUS_OFFSET);
+ u32 status = lpm_read_32_psm(gxp, psm, PSM_REG_STATUS_OFFSET);
return status & PSM_CURR_STATE_MASK;
}
-static int set_state_internal(struct gxp_dev *gxp, uint psm, uint target_state)
+static int set_state_internal(struct gxp_dev *gxp, enum gxp_lpm_psm psm, uint target_state)
{
u32 val;
int i = 10000;
@@ -88,13 +87,13 @@ static int set_state_internal(struct gxp_dev *gxp, uint psm, uint target_state)
/* Set SW sequencing mode and PS target */
val = LPM_SW_PSM_MODE;
val |= target_state << LPM_CFG_SW_PS_TARGET_OFFSET;
- lpm_write_32_psm(gxp, psm, PSM_CFG_OFFSET, val);
+ lpm_write_32_psm(gxp, psm, PSM_REG_CFG_OFFSET, val);
/* Start the SW sequence */
- lpm_write_32_psm(gxp, psm, PSM_START_OFFSET, 0x1);
+ lpm_write_32_psm(gxp, psm, PSM_REG_START_OFFSET, 0x1);
/* Wait for LPM init done (0x60041688) */
- while (i && !(lpm_read_32_psm(gxp, psm, PSM_STATUS_OFFSET)
+ while (i && !(lpm_read_32_psm(gxp, psm, PSM_REG_STATUS_OFFSET)
& PSM_INIT_DONE_MASK)) {
udelay(1 * GXP_TIME_DELAY_FACTOR);
i--;
@@ -108,7 +107,7 @@ static int set_state_internal(struct gxp_dev *gxp, uint psm, uint target_state)
return 0;
}
-int gxp_lpm_set_state(struct gxp_dev *gxp, uint psm, uint target_state,
+int gxp_lpm_set_state(struct gxp_dev *gxp, enum gxp_lpm_psm psm, uint target_state,
bool verbose)
{
uint curr_state = gxp_lpm_get_state(gxp, psm);
@@ -120,7 +119,7 @@ int gxp_lpm_set_state(struct gxp_dev *gxp, uint psm, uint target_state,
dev_warn(gxp->dev,
"Forcing a transition to PS%u on core%u, status: %x\n",
target_state, psm,
- lpm_read_32_psm(gxp, psm, PSM_STATUS_OFFSET));
+ lpm_read_32_psm(gxp, psm, PSM_REG_STATUS_OFFSET));
gxp_lpm_enable_state(gxp, psm, target_state);
@@ -137,21 +136,21 @@ int gxp_lpm_set_state(struct gxp_dev *gxp, uint psm, uint target_state,
gxp->dev,
"Finished forced transition on core %u. target: PS%u, actual: PS%u, status: %x\n",
psm, target_state, gxp_lpm_get_state(gxp, psm),
- lpm_read_32_psm(gxp, psm, PSM_STATUS_OFFSET));
+ lpm_read_32_psm(gxp, psm, PSM_REG_STATUS_OFFSET));
/* Set HW sequencing mode */
- lpm_write_32_psm(gxp, psm, PSM_CFG_OFFSET, LPM_HW_MODE);
+ lpm_write_32_psm(gxp, psm, PSM_REG_CFG_OFFSET, LPM_HW_MODE);
return 0;
}
-static int psm_enable(struct gxp_dev *gxp, uint psm)
+static int psm_enable(struct gxp_dev *gxp, enum gxp_lpm_psm psm)
{
int i = 10000;
/* Return early if LPM is already initialized */
if (gxp_lpm_is_initialized(gxp, psm)) {
- if (psm != LPM_TOP_PSM) {
+ if (psm != LPM_PSM_TOP) {
/* Ensure core is in PS3 */
return gxp_lpm_set_state(gxp, psm, LPM_PG_STATE,
/*verbose=*/true);
@@ -161,10 +160,10 @@ static int psm_enable(struct gxp_dev *gxp, uint psm)
}
/* Write PSM start bit */
- lpm_write_32_psm(gxp, psm, PSM_START_OFFSET, PSM_START);
+ lpm_write_32_psm(gxp, psm, PSM_REG_START_OFFSET, PSM_START);
/* Wait for LPM init done (0x60041688) */
- while (i && !(lpm_read_32_psm(gxp, psm, PSM_STATUS_OFFSET)
+ while (i && !(lpm_read_32_psm(gxp, psm, PSM_REG_STATUS_OFFSET)
& PSM_INIT_DONE_MASK)) {
udelay(1 * GXP_TIME_DELAY_FACTOR);
i--;
@@ -174,7 +173,7 @@ static int psm_enable(struct gxp_dev *gxp, uint psm)
return 1;
/* Set PSM to HW mode (0x60041680) */
- lpm_write_32_psm(gxp, psm, PSM_CFG_OFFSET, PSM_HW_MODE);
+ lpm_write_32_psm(gxp, psm, PSM_REG_CFG_OFFSET, PSM_HW_MODE);
return 0;
}
@@ -182,7 +181,7 @@ static int psm_enable(struct gxp_dev *gxp, uint psm)
void gxp_lpm_init(struct gxp_dev *gxp)
{
/* Enable Top PSM */
- if (psm_enable(gxp, LPM_TOP_PSM))
+ if (psm_enable(gxp, LPM_PSM_TOP))
dev_err(gxp->dev, "Timed out when enabling Top PSM!\n");
}
@@ -192,8 +191,8 @@ void gxp_lpm_destroy(struct gxp_dev *gxp)
dev_dbg(gxp->dev, "Kicking Top PSM out of ACG\n");
/* Disable all low-power states for TOP */
- lpm_write_32_psm(gxp, LPM_TOP_PSM, LPM_REG_ENABLE_STATE_1, 0x0);
- lpm_write_32_psm(gxp, LPM_TOP_PSM, LPM_REG_ENABLE_STATE_2, 0x0);
+ lpm_write_32_psm(gxp, LPM_PSM_TOP, PSM_REG_ENABLE_STATE1_OFFSET, 0x0);
+ lpm_write_32_psm(gxp, LPM_PSM_TOP, PSM_REG_ENABLE_STATE2_OFFSET, 0x0);
}
int gxp_lpm_up(struct gxp_dev *gxp, uint core)
@@ -202,14 +201,15 @@ int gxp_lpm_up(struct gxp_dev *gxp, uint core)
gxp_doorbell_clear(gxp, CORE_WAKEUP_DOORBELL(core));
/* Enable core PSM */
- if (psm_enable(gxp, core)) {
+ if (psm_enable(gxp, CORE_TO_PSM(core))) {
dev_err(gxp->dev, "Timed out when enabling Core%u PSM!\n",
core);
return -ETIMEDOUT;
}
- /* Enable PS1 (Clk Gated) */
- gxp_lpm_enable_state(gxp, core, LPM_CG_STATE);
+ /* Enable PS1 (Clk Gated). Only required for core PSMs. */
+ if (core < GXP_NUM_CORES)
+ gxp_lpm_enable_state(gxp, CORE_TO_PSM(core), LPM_CG_STATE);
gxp_bpm_start(gxp, core);
@@ -218,10 +218,10 @@ int gxp_lpm_up(struct gxp_dev *gxp, uint core)
void gxp_lpm_down(struct gxp_dev *gxp, uint core)
{
- if (gxp_lpm_get_state(gxp, core) == LPM_PG_STATE)
+ if (gxp_lpm_get_state(gxp, CORE_TO_PSM(core)) == LPM_PG_STATE)
return;
/* Enable PS3 (Pwr Gated) */
- gxp_lpm_enable_state(gxp, core, LPM_PG_STATE);
+ gxp_lpm_enable_state(gxp, CORE_TO_PSM(core), LPM_PG_STATE);
/* Set wakeup doorbell to trigger an automatic transition to PS3 */
gxp_doorbell_enable_for_core(gxp, CORE_WAKEUP_DOORBELL(core), core);
@@ -232,21 +232,21 @@ void gxp_lpm_down(struct gxp_dev *gxp, uint core)
* Clear the core's interrupt mask and the wakeup doorbell to ensure
* the core will not wake unexpectedly.
*/
- gxp_write_32_core(gxp, core, GXP_REG_COMMON_INT_MASK_0, 0);
+ gxp_write_32(gxp, GXP_CORE_REG_COMMON_INT_MASK_0(core), 0);
gxp_doorbell_clear(gxp, CORE_WAKEUP_DOORBELL(core));
/* Ensure core is in PS3 */
- gxp_lpm_set_state(gxp, core, LPM_PG_STATE, /*verbose=*/true);
+ gxp_lpm_set_state(gxp, CORE_TO_PSM(core), LPM_PG_STATE, /*verbose=*/true);
}
-bool gxp_lpm_wait_state_ne(struct gxp_dev *gxp, uint psm, uint state)
+bool gxp_lpm_wait_state_ne(struct gxp_dev *gxp, enum gxp_lpm_psm psm, uint state)
{
uint lpm_state;
gxp_lpm_wait_until(lpm_state, lpm_state != state);
}
-bool gxp_lpm_wait_state_eq(struct gxp_dev *gxp, uint psm, uint state)
+bool gxp_lpm_wait_state_eq(struct gxp_dev *gxp, enum gxp_lpm_psm psm, uint state)
{
uint lpm_state;
diff --git a/gxp-lpm.h b/gxp-lpm.h
index ee1a749..5af1c89 100644
--- a/gxp-lpm.h
+++ b/gxp-lpm.h
@@ -10,15 +10,9 @@
#include <linux/types.h>
+#include "gxp-config.h"
#include "gxp.h"
-enum lpm_psm_csrs {
- LPM_REG_ENABLE_STATE_0 = 0x080,
- LPM_REG_ENABLE_STATE_1 = 0x180,
- LPM_REG_ENABLE_STATE_2 = 0x280,
- LPM_REG_ENABLE_STATE_3 = 0x380,
-};
-
enum lpm_state {
LPM_ACTIVE_STATE = 0,
LPM_CG_STATE = 1,
@@ -26,15 +20,19 @@ enum lpm_state {
LPM_PG_STATE = 3,
};
-#define LPM_STATE_TABLE_SIZE (LPM_REG_ENABLE_STATE_1 - LPM_REG_ENABLE_STATE_0)
+enum psm_reg_offset {
+ PSM_REG_ENABLE_STATE0_OFFSET,
+ PSM_REG_ENABLE_STATE1_OFFSET,
+ PSM_REG_ENABLE_STATE2_OFFSET,
+ PSM_REG_ENABLE_STATE3_OFFSET,
+ PSM_REG_START_OFFSET,
+ PSM_REG_STATUS_OFFSET,
+ PSM_REG_CFG_OFFSET,
+};
#define LPM_INSTRUCTION_OFFSET 0x00000944
#define LPM_INSTRUCTION_MASK 0x03000000
-/*
- * The TOP PSM comes immediately after the last PSM of core, so define its PSM
- * number in terms of the number of cores.
- */
-#define LPM_TOP_PSM GXP_NUM_CORES
+
#define LPM_HW_MODE 0
#define LPM_SW_PSM_MODE 1
@@ -42,10 +40,6 @@ enum lpm_state {
#define CORE_WAKEUP_DOORBELL(__core__) (0 + (__core__))
-#define AUR_DVFS_DOMAIN 17
-#define AUR_DVFS_DEBUG_REQ (1 << 31)
-#define AUR_DEBUG_CORE_FREQ (AUR_DVFS_DEBUG_REQ | (3 << 27))
-
#define PSM_INIT_DONE_MASK 0x80
#define PSM_CURR_STATE_MASK 0x0F
#define PSM_STATE_VALID_MASK 0x10
@@ -75,71 +69,90 @@ void gxp_lpm_down(struct gxp_dev *gxp, uint core);
* Return whether the specified PSM is initialized.
* PSM0-PSM3 are for core0-core3, PSM4 is the TOP LPM.
*/
-bool gxp_lpm_is_initialized(struct gxp_dev *gxp, uint psm);
+bool gxp_lpm_is_initialized(struct gxp_dev *gxp, enum gxp_lpm_psm psm);
/*
* Return whether the specified PSM is powered.
*/
-bool gxp_lpm_is_powered(struct gxp_dev *gxp, uint psm);
+bool gxp_lpm_is_powered(struct gxp_dev *gxp, enum gxp_lpm_psm psm);
/*
* Wait for the specified @psm to be in any state other than @state
* Return whether the waiting is successful or the timeout occurs.
*/
-bool gxp_lpm_wait_state_ne(struct gxp_dev *gxp, uint psm, uint state);
+bool gxp_lpm_wait_state_ne(struct gxp_dev *gxp, enum gxp_lpm_psm psm, uint state);
/*
* Wait for the specified @psm to be in the specified @state
* Return whether the waiting is successful or the timeout occurs.
*/
-bool gxp_lpm_wait_state_eq(struct gxp_dev *gxp, uint psm, uint state);
+bool gxp_lpm_wait_state_eq(struct gxp_dev *gxp, enum gxp_lpm_psm psm, uint state);
/*
* Force a state transition on the specified PSM.
*/
-int gxp_lpm_set_state(struct gxp_dev *gxp, uint psm, uint target_state,
+int gxp_lpm_set_state(struct gxp_dev *gxp, enum gxp_lpm_psm psm, uint target_state,
bool verbose);
/*
* Get current LPM state of the specified PSM.
*/
-uint gxp_lpm_get_state(struct gxp_dev *gxp, uint psm);
+uint gxp_lpm_get_state(struct gxp_dev *gxp, enum gxp_lpm_psm psm);
/*
* Enable a state on the specified PSM.
*/
-void gxp_lpm_enable_state(struct gxp_dev *gxp, uint psm, uint state);
+void gxp_lpm_enable_state(struct gxp_dev *gxp, enum gxp_lpm_psm psm, uint state);
static inline u32 lpm_read_32(struct gxp_dev *gxp, uint reg_offset)
{
- uint offset = GXP_LPM_BASE + reg_offset;
-
- return gxp_read_32(gxp, offset);
+#ifndef GXP_SEPARATE_LPM_OFFSET
+ reg_offset = GXP_LPM_BASE + reg_offset;
+#endif
+ return readl(gxp->lpm_regs.vaddr + reg_offset);
}
static inline void lpm_write_32(struct gxp_dev *gxp, uint reg_offset, u32 value)
{
- uint offset = GXP_LPM_BASE + reg_offset;
+#ifndef GXP_SEPARATE_LPM_OFFSET
+ reg_offset = GXP_LPM_BASE + reg_offset;
+#endif
+ writel(value, gxp->lpm_regs.vaddr + reg_offset);
+}
- gxp_write_32(gxp, offset, value);
+static u32 get_reg_offset(struct gxp_dev *gxp, enum psm_reg_offset reg_offset, enum gxp_lpm_psm psm)
+{
+ switch (reg_offset) {
+ case PSM_REG_ENABLE_STATE0_OFFSET:
+ case PSM_REG_ENABLE_STATE1_OFFSET:
+ case PSM_REG_ENABLE_STATE2_OFFSET:
+ case PSM_REG_ENABLE_STATE3_OFFSET:
+ return gxp_lpm_psm_get_state_offset(psm, (uint)reg_offset);
+ case PSM_REG_START_OFFSET:
+ return gxp_lpm_psm_get_start_offset(psm);
+ case PSM_REG_STATUS_OFFSET:
+ return gxp_lpm_psm_get_status_offset(psm);
+ case PSM_REG_CFG_OFFSET:
+ return gxp_lpm_psm_get_cfg_offset(psm);
+ }
+
+ return 0;
}
-static inline u32 lpm_read_32_psm(struct gxp_dev *gxp, uint psm,
- uint reg_offset)
+static inline u32 lpm_read_32_psm(struct gxp_dev *gxp, enum gxp_lpm_psm psm,
+ enum psm_reg_offset reg_offset)
{
- uint offset =
- GXP_LPM_PSM_0_BASE + (GXP_LPM_PSM_SIZE * psm) + reg_offset;
+ uint offset = get_reg_offset(gxp, reg_offset, psm);
- return gxp_read_32(gxp, offset);
+ return lpm_read_32(gxp, offset);
}
-static inline void lpm_write_32_psm(struct gxp_dev *gxp, uint psm,
- uint reg_offset, u32 value)
+static inline void lpm_write_32_psm(struct gxp_dev *gxp, enum gxp_lpm_psm psm,
+ enum psm_reg_offset reg_offset, u32 value)
{
- uint offset =
- GXP_LPM_PSM_0_BASE + (GXP_LPM_PSM_SIZE * psm) + reg_offset;
+ u32 offset = get_reg_offset(gxp, reg_offset, psm);
- gxp_write_32(gxp, offset, value);
+ lpm_write_32(gxp, offset, value);
}
#endif /* __GXP_LPM_H__ */
diff --git a/gxp-mailbox-driver.c b/gxp-mailbox-driver.c
new file mode 100644
index 0000000..40fdba1
--- /dev/null
+++ b/gxp-mailbox-driver.c
@@ -0,0 +1,511 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * GXP hardware-based mailbox driver implementation.
+ *
+ * Copyright (C) 2021 Google LLC
+ */
+
+#include <asm/barrier.h>
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/of_irq.h>
+#include <linux/spinlock.h>
+
+#include "gxp-config.h" /* GXP_USE_LEGACY_MAILBOX */
+#include "gxp-mailbox-driver.h"
+#include "gxp-mailbox-regs.h"
+#include "gxp-mailbox.h"
+
+static u32 data_read(struct gxp_mailbox *mailbox, uint reg_offset)
+{
+ return readl(mailbox->data_reg_base + reg_offset);
+}
+
+static void data_write(struct gxp_mailbox *mailbox, uint reg_offset, u32 value)
+{
+ writel(value, mailbox->data_reg_base + reg_offset);
+}
+
+/* IRQ Handling */
+
+/* Interrupt to signal a response from the device to host */
+#define MBOX_DEVICE_TO_HOST_RESPONSE_IRQ_MASK BIT(0)
+
+static irqreturn_t mailbox_irq_handler(int irq, void *arg)
+{
+ u32 masked_status;
+ struct gxp_mailbox *mailbox = (struct gxp_mailbox *)arg;
+ struct work_struct **handlers = mailbox->interrupt_handlers;
+ u32 next_int;
+
+ /* Contains only the non-masked, pending interrupt bits */
+ masked_status = gxp_mailbox_get_host_mask_status(mailbox);
+
+ /* Clear all pending IRQ bits */
+ gxp_mailbox_clear_host_interrupt(mailbox, masked_status);
+
+ if (masked_status & MBOX_DEVICE_TO_HOST_RESPONSE_IRQ_MASK) {
+ mailbox->handle_irq(mailbox);
+ masked_status &= ~MBOX_DEVICE_TO_HOST_RESPONSE_IRQ_MASK;
+ }
+
+ while ((next_int = ffs(masked_status))) {
+ next_int--; /* ffs returns 1-based indices */
+ masked_status &= ~BIT(next_int);
+
+ if (handlers[next_int])
+ schedule_work(handlers[next_int]);
+ else
+ pr_err_ratelimited(
+ "mailbox%d: received unknown interrupt bit 0x%X\n",
+ mailbox->core_id, next_int);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void register_irq(struct gxp_mailbox *mailbox)
+{
+ int err;
+ unsigned int virq;
+
+ virq = irq_of_parse_and_map(mailbox->gxp->dev->of_node,
+ mailbox->core_id);
+ if (!virq) {
+ pr_err("Unable to parse interrupt for core %d from the DT\n",
+ mailbox->core_id);
+ return;
+ }
+
+ err = request_irq(virq, mailbox_irq_handler, /*flags=*/0,
+ "aurora_mbx_irq", (void *)mailbox);
+ if (err) {
+ pr_err("Unable to register IRQ num=%d; error=%d\n", virq, err);
+ return;
+ }
+
+ mailbox->interrupt_virq = virq;
+ pr_debug("Core %d's mailbox interrupt registered as IRQ %u.\n",
+ mailbox->core_id, virq);
+}
+
+static void unregister_irq(struct gxp_mailbox *mailbox)
+{
+ if (mailbox->interrupt_virq) {
+ pr_debug("Freeing IRQ %d\n", mailbox->interrupt_virq);
+ free_irq(mailbox->interrupt_virq, mailbox);
+ mailbox->interrupt_virq = 0;
+ }
+}
+
+/* gxp-mailbox-driver.h interface */
+
+u32 gxp_circ_queue_cnt(u32 head, u32 tail, u32 queue_size, u32 wrap_bit)
+{
+ if (CIRCULAR_QUEUE_WRAPPED(tail, wrap_bit) !=
+ CIRCULAR_QUEUE_WRAPPED(head, wrap_bit))
+ return queue_size - CIRCULAR_QUEUE_REAL_INDEX(head, wrap_bit) +
+ CIRCULAR_QUEUE_REAL_INDEX(tail, wrap_bit);
+ else
+ return tail - head;
+}
+
+u32 gxp_circ_queue_inc(u32 index, u32 inc, u32 queue_size, u32 wrap_bit)
+{
+ u32 new_index = CIRCULAR_QUEUE_REAL_INDEX(index, wrap_bit) + inc;
+
+ if (new_index >= queue_size)
+ return (index + inc - queue_size) ^ wrap_bit;
+ else
+ return index + inc;
+}
+
+void gxp_mailbox_driver_init(struct gxp_mailbox *mailbox)
+{
+ spin_lock_init(&mailbox->cmd_tail_resp_head_lock);
+ spin_lock_init(&mailbox->cmd_head_resp_tail_lock);
+}
+
+void gxp_mailbox_driver_exit(struct gxp_mailbox *mailbox)
+{
+ /* Nothing to cleanup */
+}
+
+void gxp_mailbox_driver_enable_interrupts(struct gxp_mailbox *mailbox)
+{
+ register_irq(mailbox);
+}
+
+void gxp_mailbox_driver_disable_interrupts(struct gxp_mailbox *mailbox)
+{
+ unregister_irq(mailbox);
+}
+
+void __iomem *gxp_mailbox_get_csr_base(struct gxp_dev *gxp, uint index)
+{
+ return gxp->mbx[index].vaddr;
+}
+
+void __iomem *gxp_mailbox_get_data_base(struct gxp_dev *gxp, uint index)
+{
+ return gxp->mbx[index].vaddr + MBOX_DATA_REG_BASE;
+}
+
+/* gxp-mailbox-driver.h: Data register-based calls */
+
+void gxp_mailbox_write_status(struct gxp_mailbox *mailbox, u32 status)
+{
+ data_write(mailbox, MBOX_DATA_STATUS_OFFSET, status);
+}
+
+void gxp_mailbox_write_descriptor(struct gxp_mailbox *mailbox,
+ dma_addr_t descriptor_addr)
+{
+ data_write(mailbox, MBOX_DATA_DESCRIPTOR_ADDR_OFFSET, (u32)descriptor_addr);
+}
+
+void gxp_mailbox_write_cmd_queue_tail(struct gxp_mailbox *mailbox, u16 val)
+{
+ u32 current_resp_head;
+ u32 new_cmd_tail;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mailbox->cmd_tail_resp_head_lock, flags);
+
+ current_resp_head = data_read(mailbox, MBOX_DATA_CMD_TAIL_RESP_HEAD_OFFSET) &
+ RESP_HEAD_MASK;
+ new_cmd_tail = (u32)val << CMD_TAIL_SHIFT;
+ data_write(mailbox, MBOX_DATA_CMD_TAIL_RESP_HEAD_OFFSET,
+ new_cmd_tail | current_resp_head);
+
+ spin_unlock_irqrestore(&mailbox->cmd_tail_resp_head_lock, flags);
+}
+
+void gxp_mailbox_write_resp_queue_head(struct gxp_mailbox *mailbox, u16 val)
+{
+ u32 current_cmd_tail;
+ u32 new_resp_head;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mailbox->cmd_tail_resp_head_lock, flags);
+
+ current_cmd_tail = data_read(mailbox, MBOX_DATA_CMD_TAIL_RESP_HEAD_OFFSET) &
+ CMD_TAIL_MASK;
+ new_resp_head = (u32)val << RESP_HEAD_SHIFT;
+ data_write(mailbox, MBOX_DATA_CMD_TAIL_RESP_HEAD_OFFSET,
+ current_cmd_tail | new_resp_head);
+
+ spin_unlock_irqrestore(&mailbox->cmd_tail_resp_head_lock, flags);
+}
+
+u16 gxp_mailbox_read_cmd_queue_head(struct gxp_mailbox *mailbox)
+{
+ u32 reg_val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mailbox->cmd_head_resp_tail_lock, flags);
+
+ reg_val = data_read(mailbox, MBOX_DATA_CMD_HEAD_RESP_TAIL_OFFSET);
+
+ spin_unlock_irqrestore(&mailbox->cmd_head_resp_tail_lock, flags);
+
+ return (u16)((reg_val & CMD_HEAD_MASK) >> CMD_HEAD_SHIFT);
+}
+
+u16 gxp_mailbox_read_resp_queue_tail(struct gxp_mailbox *mailbox)
+{
+ u32 reg_val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mailbox->cmd_head_resp_tail_lock, flags);
+
+ reg_val = data_read(mailbox, MBOX_DATA_CMD_HEAD_RESP_TAIL_OFFSET);
+
+ spin_unlock_irqrestore(&mailbox->cmd_head_resp_tail_lock, flags);
+
+ return (u16)((reg_val & RESP_TAIL_MASK) >> RESP_TAIL_SHIFT);
+}
+
+void gxp_mailbox_write_cmd_queue_head(struct gxp_mailbox *mailbox, u16 val)
+{
+ u32 current_resp_tail;
+ u32 new_cmd_head;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mailbox->cmd_head_resp_tail_lock, flags);
+
+ current_resp_tail = data_read(mailbox, MBOX_DATA_CMD_HEAD_RESP_TAIL_OFFSET) &
+ RESP_TAIL_MASK;
+ new_cmd_head = (u32)val << CMD_HEAD_SHIFT;
+ data_write(mailbox, MBOX_DATA_CMD_HEAD_RESP_TAIL_OFFSET,
+ new_cmd_head | current_resp_tail);
+
+ spin_unlock_irqrestore(&mailbox->cmd_head_resp_tail_lock, flags);
+}
+
+void gxp_mailbox_write_resp_queue_tail(struct gxp_mailbox *mailbox, u16 val)
+{
+ u32 current_cmd_head;
+ u32 new_resp_tail;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mailbox->cmd_head_resp_tail_lock, flags);
+
+ current_cmd_head = data_read(mailbox, MBOX_DATA_CMD_HEAD_RESP_TAIL_OFFSET) &
+ CMD_HEAD_MASK;
+ new_resp_tail = (u32)val << RESP_TAIL_SHIFT;
+ data_write(mailbox, MBOX_DATA_CMD_HEAD_RESP_TAIL_OFFSET,
+ current_cmd_head | new_resp_tail);
+
+ spin_unlock_irqrestore(&mailbox->cmd_head_resp_tail_lock, flags);
+}
+
+u16 gxp_mailbox_read_cmd_queue_tail(struct gxp_mailbox *mailbox)
+{
+ u32 reg_val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mailbox->cmd_tail_resp_head_lock, flags);
+
+ reg_val = data_read(mailbox, MBOX_DATA_CMD_TAIL_RESP_HEAD_OFFSET);
+
+ spin_unlock_irqrestore(&mailbox->cmd_tail_resp_head_lock, flags);
+
+ return (u16)((reg_val & CMD_TAIL_MASK) >> CMD_TAIL_SHIFT);
+}
+
+u16 gxp_mailbox_read_resp_queue_head(struct gxp_mailbox *mailbox)
+{
+ u32 reg_val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mailbox->cmd_tail_resp_head_lock, flags);
+
+ reg_val = data_read(mailbox, MBOX_DATA_CMD_TAIL_RESP_HEAD_OFFSET);
+
+ spin_unlock_irqrestore(&mailbox->cmd_tail_resp_head_lock, flags);
+
+ return (u16)((reg_val & RESP_HEAD_MASK) >> RESP_HEAD_SHIFT);
+}
+
+void gxp_mailbox_set_cmd_queue_tail(struct gxp_mailbox *mailbox, u32 value)
+{
+ mailbox->cmd_queue_tail = value;
+ gxp_mailbox_write_cmd_queue_tail(mailbox, value);
+}
+
+void gxp_mailbox_set_resp_queue_head(struct gxp_mailbox *mailbox, u32 value)
+{
+ mailbox->resp_queue_head = value;
+ gxp_mailbox_write_resp_queue_head(mailbox, value);
+}
+
+int gxp_mailbox_inc_cmd_queue_tail_nolock(struct gxp_mailbox *mailbox, u32 inc,
+ u32 wrap_bit)
+{
+ u32 head;
+ u32 remain_size;
+ u32 new_tail;
+
+ if (inc > mailbox->cmd_queue_size)
+ return -EINVAL;
+
+ head = gxp_mailbox_read_cmd_queue_head(mailbox);
+ remain_size = mailbox->cmd_queue_size -
+ gxp_circ_queue_cnt(head, mailbox->cmd_queue_tail,
+ mailbox->cmd_queue_size, wrap_bit);
+ /* no enough space left */
+ if (inc > remain_size)
+ return -EBUSY;
+
+ new_tail = gxp_circ_queue_inc(mailbox->cmd_queue_tail, inc,
+ mailbox->cmd_queue_size, wrap_bit);
+ gxp_mailbox_set_cmd_queue_tail(mailbox, new_tail);
+ return 0;
+}
+
+int gxp_mailbox_inc_cmd_queue_tail_locked(struct gxp_mailbox *mailbox, u32 inc,
+ u32 wrap_bit)
+{
+ lockdep_assert_held(&mailbox->cmd_queue_lock);
+ return gxp_mailbox_inc_cmd_queue_tail_nolock(mailbox, inc, wrap_bit);
+}
+
+int gxp_mailbox_inc_resp_queue_head_nolock(struct gxp_mailbox *mailbox, u32 inc,
+ u32 wrap_bit)
+{
+ u32 tail;
+ u32 size;
+ u32 new_head;
+
+ if (inc > mailbox->resp_queue_size)
+ return -EINVAL;
+
+ tail = gxp_mailbox_read_resp_queue_tail(mailbox);
+ size = gxp_circ_queue_cnt(mailbox->resp_queue_head, tail,
+ mailbox->resp_queue_size, wrap_bit);
+ if (inc > size)
+ return -EINVAL;
+ new_head = gxp_circ_queue_inc(mailbox->resp_queue_head, inc,
+ mailbox->resp_queue_size, wrap_bit);
+ gxp_mailbox_set_resp_queue_head(mailbox, new_head);
+
+ return 0;
+}
+
+int gxp_mailbox_inc_resp_queue_head_locked(struct gxp_mailbox *mailbox, u32 inc,
+ u32 wrap_bit)
+{
+ lockdep_assert_held(&mailbox->resp_queue_lock);
+ return gxp_mailbox_inc_resp_queue_head_nolock(mailbox, inc, wrap_bit);
+}
+
+#if !GXP_USE_LEGACY_MAILBOX
+u32 gxp_mailbox_gcip_ops_get_cmd_queue_head(struct gcip_mailbox *mailbox)
+{
+ struct gxp_mailbox *gxp_mbx = mailbox->data;
+
+ return gxp_mailbox_read_cmd_queue_head(gxp_mbx);
+}
+
+u32 gxp_mailbox_gcip_ops_get_cmd_queue_tail(struct gcip_mailbox *mailbox)
+{
+ struct gxp_mailbox *gxp_mbx = mailbox->data;
+
+ return gxp_mbx->cmd_queue_tail;
+}
+
+void gxp_mailbox_gcip_ops_inc_cmd_queue_tail(struct gcip_mailbox *mailbox,
+ u32 inc)
+{
+ struct gxp_mailbox *gxp_mbx = mailbox->data;
+
+ lockdep_assert_held(&gxp_mbx->cmd_queue_lock);
+ gxp_mailbox_inc_cmd_queue_tail_nolock(gxp_mbx, inc,
+ mailbox->queue_wrap_bit);
+}
+
+int gxp_mailbox_gcip_ops_acquire_cmd_queue_lock(struct gcip_mailbox *mailbox,
+ bool try)
+{
+ struct gxp_mailbox *gxp_mbx = mailbox->data;
+
+ mutex_lock(&gxp_mbx->cmd_queue_lock);
+ return 1;
+}
+
+void gxp_mailbox_gcip_ops_release_cmd_queue_lock(struct gcip_mailbox *mailbox)
+{
+ struct gxp_mailbox *gxp_mbx = mailbox->data;
+
+ mutex_unlock(&gxp_mbx->cmd_queue_lock);
+}
+
+u32 gxp_mailbox_gcip_ops_get_resp_queue_size(struct gcip_mailbox *mailbox)
+{
+ struct gxp_mailbox *gxp_mbx = mailbox->data;
+
+ return gxp_mbx->resp_queue_size;
+}
+
+u32 gxp_mailbox_gcip_ops_get_resp_queue_head(struct gcip_mailbox *mailbox)
+{
+ struct gxp_mailbox *gxp_mbx = mailbox->data;
+
+ return gxp_mbx->resp_queue_head;
+}
+
+u32 gxp_mailbox_gcip_ops_get_resp_queue_tail(struct gcip_mailbox *mailbox)
+{
+ struct gxp_mailbox *gxp_mbx = mailbox->data;
+
+ return gxp_mailbox_read_resp_queue_tail(gxp_mbx);
+}
+
+void gxp_mailbox_gcip_ops_inc_resp_queue_head(struct gcip_mailbox *mailbox,
+ u32 inc)
+{
+ struct gxp_mailbox *gxp_mbx = mailbox->data;
+
+ lockdep_assert_held(&gxp_mbx->resp_queue_lock);
+ gxp_mailbox_inc_resp_queue_head_nolock(gxp_mbx, inc,
+ mailbox->queue_wrap_bit);
+}
+
+int gxp_mailbox_gcip_ops_acquire_resp_queue_lock(struct gcip_mailbox *mailbox,
+ bool try)
+{
+ struct gxp_mailbox *gxp_mbx = mailbox->data;
+
+ mutex_lock(&gxp_mbx->resp_queue_lock);
+ return 1;
+}
+
+void gxp_mailbox_gcip_ops_release_resp_queue_lock(struct gcip_mailbox *mailbox)
+{
+ struct gxp_mailbox *gxp_mbx = mailbox->data;
+
+ mutex_unlock(&gxp_mbx->resp_queue_lock);
+}
+
+void gxp_mailbox_gcip_ops_acquire_wait_list_lock(struct gcip_mailbox *mailbox,
+ bool irqsave,
+ unsigned long *flags)
+{
+ struct gxp_mailbox *gxp_mbx = mailbox->data;
+
+ mutex_lock(&gxp_mbx->wait_list_lock);
+}
+
+void gxp_mailbox_gcip_ops_release_wait_list_lock(struct gcip_mailbox *mailbox,
+ bool irqrestore,
+ unsigned long flags)
+{
+ struct gxp_mailbox *gxp_mbx = mailbox->data;
+
+ mutex_unlock(&gxp_mbx->wait_list_lock);
+}
+
+int gxp_mailbox_gcip_ops_wait_for_cmd_queue_not_full(
+ struct gcip_mailbox *mailbox)
+{
+ struct gxp_mailbox *gxp_mbx = mailbox->data;
+ u32 tail = gxp_mbx->cmd_queue_tail;
+
+ /*
+ * If the cmd queue is full, it's up to the caller to retry.
+ */
+ if (gxp_mailbox_read_cmd_queue_head(gxp_mbx) ==
+ (tail ^ mailbox->queue_wrap_bit)) {
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+int gxp_mailbox_gcip_ops_after_enqueue_cmd(struct gcip_mailbox *mailbox,
+ void *cmd)
+{
+ struct gxp_mailbox *gxp_mbx = mailbox->data;
+
+ /* triggers doorbell */
+ gxp_mailbox_generate_device_interrupt(gxp_mbx, BIT(0));
+ return 1;
+}
+
+void gxp_mailbox_gcip_ops_after_fetch_resps(struct gcip_mailbox *mailbox,
+ u32 num_resps)
+{
+ struct gxp_mailbox *gxp_mbx = mailbox->data;
+ u32 size = gxp_mbx->resp_queue_size;
+
+ /*
+ * Now that the response queue has been drained, send an interrupt
+ * to the device in case firmware was waiting for us to consume
+ * responses.
+ */
+ if (num_resps == size)
+ gxp_mailbox_generate_device_interrupt(gxp_mbx, BIT(0));
+}
+#endif /* !GXP_USE_LEGACY_MAILBOX */
diff --git a/gxp-mailbox-driver.h b/gxp-mailbox-driver.h
index 9271694..30292d2 100644
--- a/gxp-mailbox-driver.h
+++ b/gxp-mailbox-driver.h
@@ -2,13 +2,34 @@
/*
* GXP mailbox driver.
*
- * Copyright (C) 2020 Google LLC
+ * Copyright (C) 2020-2022 Google LLC
*/
#ifndef __GXP_MAILBOX_DRIVER_H__
#define __GXP_MAILBOX_DRIVER_H__
+#include "gxp-config.h"
#include "gxp-mailbox.h"
+#if !GXP_USE_LEGACY_MAILBOX
+#include <gcip/gcip-mailbox.h>
+#endif
+
+/* Utilities of circular queue operations */
+
+#define CIRCULAR_QUEUE_INDEX_MASK(wrap_bit) (wrap_bit - 1)
+#define CIRCULAR_QUEUE_WRAPPED(idx, wrap_bit) ((idx)&wrap_bit)
+#define CIRCULAR_QUEUE_REAL_INDEX(idx, wrap_bit) \
+ ((idx)&CIRCULAR_QUEUE_INDEX_MASK(wrap_bit))
+
+/*
+ * Returns the number of elements in a circular queue given its @head, @tail,
+ * and @queue_size.
+ */
+u32 gxp_circ_queue_cnt(u32 head, u32 tail, u32 queue_size, u32 wrap_bit);
+
+/* Increases @index of a circular queue by @inc. */
+u32 gxp_circ_queue_inc(u32 index, u32 inc, u32 queue_size, u32 wrap_bit);
+
void gxp_mailbox_driver_init(struct gxp_mailbox *mailbox);
void gxp_mailbox_driver_exit(struct gxp_mailbox *mailbox);
@@ -48,4 +69,105 @@ void gxp_mailbox_write_resp_queue_tail(struct gxp_mailbox *mailbox, u16 val);
u16 gxp_mailbox_read_cmd_queue_tail(struct gxp_mailbox *mailbox);
u16 gxp_mailbox_read_resp_queue_head(struct gxp_mailbox *mailbox);
+/* Sets mailbox->cmd_queue_tail and corresponding CSR on device. */
+void gxp_mailbox_set_cmd_queue_tail(struct gxp_mailbox *mailbox, u32 value);
+
+/* Sets mailbox->resp_queue_head and corresponding CSR on device. */
+void gxp_mailbox_set_resp_queue_head(struct gxp_mailbox *mailbox, u32 value);
+
+/*
+ * Increases the command queue tail by @inc.
+ *
+ * The queue uses the mirrored circular buffer arrangement. Each index (head and
+ * tail) has a wrap bit, represented by the constant CIRCULAR_QUEUE_WRAP_BIT.
+ * Whenever an index is increased and will exceed the end of the queue, the wrap
+ * bit is xor-ed.
+ *
+ * This method will update both mailbox->cmd_queue_tail and CSR on device.
+ *
+ * Returns 0 on success.
+ * If command queue tail will exceed command queue head after adding @inc,
+ * -EBUSY is returned and all fields remain unchanged. The caller should
+ * handle this case and implement a mechanism to wait until the consumer
+ * consumes commands.
+ *
+ * This doesn't acquire any locks internally. The caller may have to hold its own
+ * lock before calling this function. If the caller must hold `@mailbox->cmd_queue_lock`
+ * before calling this, please use `gxp_mailbox_inc_cmd_queue_tail_locked` function instead.
+ */
+int gxp_mailbox_inc_cmd_queue_tail_nolock(struct gxp_mailbox *mailbox, u32 inc,
+ u32 wrap_bit);
+
+/*
+ * Wrapper function of `gxp_mailbox_inc_cmd_queue_tail_nolock`.
+ * Caller must hold @mailbox->cmd_queue_lock.
+ */
+int gxp_mailbox_inc_cmd_queue_tail_locked(struct gxp_mailbox *mailbox, u32 inc,
+ u32 wrap_bit);
+
+/*
+ * Increases the response queue head by @inc.
+ *
+ * The queue uses the mirrored circular buffer arrangement. Each index (head and
+ * tail) has a wrap bit, represented by the constant CIRCULAR_QUEUE_WRAP_BIT.
+ * Whenever an index is increased and will exceed the end of the queue, the wrap
+ * bit is xor-ed.
+ *
+ * This method will update both mailbox->resp_queue_head and CSR on device.
+ *
+ * Returns 0 on success.
+ * -EINVAL is returned if the queue head will exceed tail of queue, and no
+ * fields or CSR is updated in this case.
+ *
+ * This doesn't acquire any locks internally. The caller may have to hold its own
+ * lock before calling this function. If the caller must hold `@mailbox->cmd_queue_lock`
+ * before calling this, please use `gxp_mailbox_inc_cmd_queue_tail_locked` function instead.
+ */
+int gxp_mailbox_inc_resp_queue_head_nolock(struct gxp_mailbox *mailbox, u32 inc,
+ u32 wrap_bit);
+
+/*
+ * Wrapper function of `gxp_mailbox_inc_resp_queue_head_nolock`.
+ * Caller must hold @mailbox->resp_queue_lock.
+ */
+int gxp_mailbox_inc_resp_queue_head_locked(struct gxp_mailbox *mailbox, u32 inc,
+ u32 wrap_bit);
+
+#if !GXP_USE_LEGACY_MAILBOX
+/*
+ * Following functions are used when setting the operators of `struct gcip_mailbox_ops`.
+ * To use these functions, @mailbox->data should be set as an instance of `struct gxp_mailbox`.
+ */
+u32 gxp_mailbox_gcip_ops_get_cmd_queue_head(struct gcip_mailbox *mailbox);
+u32 gxp_mailbox_gcip_ops_get_cmd_queue_tail(struct gcip_mailbox *mailbox);
+void gxp_mailbox_gcip_ops_inc_cmd_queue_tail(struct gcip_mailbox *mailbox,
+ u32 inc);
+int gxp_mailbox_gcip_ops_acquire_cmd_queue_lock(struct gcip_mailbox *mailbox,
+ bool try);
+void gxp_mailbox_gcip_ops_release_cmd_queue_lock(struct gcip_mailbox *mailbox);
+
+u32 gxp_mailbox_gcip_ops_get_resp_queue_size(struct gcip_mailbox *mailbox);
+u32 gxp_mailbox_gcip_ops_get_resp_queue_head(struct gcip_mailbox *mailbox);
+u32 gxp_mailbox_gcip_ops_get_resp_queue_tail(struct gcip_mailbox *mailbox);
+void gxp_mailbox_gcip_ops_inc_resp_queue_head(struct gcip_mailbox *mailbox,
+ u32 inc);
+int gxp_mailbox_gcip_ops_acquire_resp_queue_lock(struct gcip_mailbox *mailbox,
+ bool try);
+void gxp_mailbox_gcip_ops_release_resp_queue_lock(struct gcip_mailbox *mailbox);
+
+void gxp_mailbox_gcip_ops_acquire_wait_list_lock(struct gcip_mailbox *mailbox,
+ bool irqsave,
+ unsigned long *flags);
+void gxp_mailbox_gcip_ops_release_wait_list_lock(struct gcip_mailbox *mailbox,
+ bool irqrestore,
+ unsigned long flags);
+
+int gxp_mailbox_gcip_ops_wait_for_cmd_queue_not_full(
+ struct gcip_mailbox *mailbox);
+int gxp_mailbox_gcip_ops_after_enqueue_cmd(struct gcip_mailbox *mailbox,
+ void *cmd);
+void gxp_mailbox_gcip_ops_after_fetch_resps(struct gcip_mailbox *mailbox,
+ u32 num_resps);
+#endif /* !GXP_USE_LEGACY_MAILBOX */
+
#endif /* __GXP_MAILBOX_DRIVER_H__ */
diff --git a/gxp-mailbox-impl.c b/gxp-mailbox-impl.c
new file mode 100644
index 0000000..4ea4130
--- /dev/null
+++ b/gxp-mailbox-impl.c
@@ -0,0 +1,787 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Legacy implementation of the GXP mailbox interface.
+ * This file must be used only when the kernel driver has to compile the implementation of the
+ * mailbox by itself (i.e, when the target chip can't be compiled with GCIP).
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#include <linux/slab.h>
+
+#include "gxp-dma.h"
+#include "gxp-mailbox-driver.h"
+#include "gxp-mailbox-impl.h"
+#include "gxp-mailbox.h"
+#include "gxp.h"
+
+#define CIRCULAR_QUEUE_WRAP_BIT BIT(15)
+
+#define MBOX_CMD_QUEUE_NUM_ENTRIES 1024
+#define MBOX_RESP_QUEUE_NUM_ENTRIES 1024
+
+static int gxp_mailbox_ops_allocate_resources(struct gxp_mailbox *mailbox,
+ struct gxp_virtual_device *vd,
+ uint virt_core)
+{
+ int ret;
+
+ /* Allocate and initialize the command queue */
+ ret = gxp_dma_alloc_coherent_buf(
+ mailbox->gxp, vd->domain,
+ sizeof(struct gxp_command) * MBOX_CMD_QUEUE_NUM_ENTRIES,
+ GFP_KERNEL, 0, &mailbox->cmd_queue_buf);
+ if (ret)
+ goto err_cmd_queue;
+
+ mailbox->cmd_queue_size = MBOX_CMD_QUEUE_NUM_ENTRIES;
+ mailbox->cmd_queue_tail = 0;
+
+ /* Allocate and initialize the response queue */
+ ret = gxp_dma_alloc_coherent_buf(
+ mailbox->gxp, vd->domain,
+ sizeof(struct gxp_response) * MBOX_RESP_QUEUE_NUM_ENTRIES,
+ GFP_KERNEL, 0, &mailbox->resp_queue_buf);
+ if (ret)
+ goto err_resp_queue;
+
+ mailbox->resp_queue_size = MBOX_RESP_QUEUE_NUM_ENTRIES;
+ mailbox->resp_queue_head = 0;
+
+ /* Allocate and initialize the mailbox descriptor */
+ ret = gxp_dma_alloc_coherent_buf(mailbox->gxp, vd->domain,
+ sizeof(struct gxp_mailbox_descriptor),
+ GFP_KERNEL, 0,
+ &mailbox->descriptor_buf);
+ if (ret)
+ goto err_descriptor;
+
+ mailbox->descriptor =
+ (struct gxp_mailbox_descriptor *)mailbox->descriptor_buf.vaddr;
+ mailbox->descriptor->cmd_queue_device_addr =
+ mailbox->cmd_queue_buf.dsp_addr;
+ mailbox->descriptor->resp_queue_device_addr =
+ mailbox->resp_queue_buf.dsp_addr;
+ mailbox->descriptor->cmd_queue_size = mailbox->cmd_queue_size;
+ mailbox->descriptor->resp_queue_size = mailbox->resp_queue_size;
+
+ return 0;
+
+err_descriptor:
+ gxp_dma_free_coherent_buf(mailbox->gxp, vd->domain,
+ &mailbox->resp_queue_buf);
+err_resp_queue:
+ gxp_dma_free_coherent_buf(mailbox->gxp, vd->domain,
+ &mailbox->cmd_queue_buf);
+err_cmd_queue:
+ return ret;
+}
+
+static void gxp_mailbox_ops_release_resources(struct gxp_mailbox *mailbox,
+ struct gxp_virtual_device *vd,
+ uint virt_core)
+{
+ gxp_dma_free_coherent_buf(mailbox->gxp, vd->domain,
+ &mailbox->cmd_queue_buf);
+ gxp_dma_free_coherent_buf(mailbox->gxp, vd->domain,
+ &mailbox->resp_queue_buf);
+ gxp_dma_free_coherent_buf(mailbox->gxp, vd->domain,
+ &mailbox->descriptor_buf);
+}
+
+/*
+ * Pops the wait_list until the sequence number of @resp is found, and copies
+ * @resp to the found entry.
+ *
+ * Entries in wait_list should have sequence number in increasing order, but
+ * the responses arriving and being handled may be out-of-order.
+ *
+ * Iterate over the wait_list, comparing #cur->resp->seq with @resp->seq:
+ * 1. #cur->resp->seq > @resp->seq:
+ * - Nothing to do, either @resp is invalid or its command timed out.
+ * - We're done.
+ * 2. #cur->resp->seq == @resp->seq:
+ * - Copy @resp, pop the head.
+ * - If #cur->resp has a destination queue, push it to that queue
+ * - We're done.
+ * 3. #cur->resp->seq < @resp->seq:
+ * - @resp has arrived out of sequence order.
+ * - Leave #cur->resp in the wait_list.
+ * - Keep iterating unless the list is exhausted.
+ */
+static void gxp_mailbox_handle_response(struct gxp_mailbox *mailbox,
+ const struct gxp_response *resp)
+{
+ struct gxp_mailbox_wait_list *cur, *nxt;
+ struct gxp_async_response *async_resp;
+ unsigned long flags;
+
+ mutex_lock(&mailbox->wait_list_lock);
+
+ list_for_each_entry_safe (cur, nxt, &mailbox->wait_list, list) {
+ if (cur->resp->seq > resp->seq) {
+ /*
+ * This response has already timed out and been removed
+ * from the wait list (or this is an invalid response).
+ * Drop it.
+ */
+ break;
+ }
+ if (cur->resp->seq == resp->seq) {
+ memcpy(cur->resp, resp, sizeof(*resp));
+ list_del(&cur->list);
+ if (cur->is_async) {
+ async_resp =
+ container_of(cur->resp,
+ struct gxp_async_response,
+ resp);
+
+ cancel_delayed_work(&async_resp->timeout_work);
+ gxp_pm_update_requested_power_states(
+ async_resp->mailbox->gxp,
+ async_resp->requested_states,
+ off_states);
+
+ spin_lock_irqsave(async_resp->dest_queue_lock,
+ flags);
+
+ list_add_tail(&async_resp->list_entry,
+ async_resp->dest_queue);
+ /*
+ * Marking the dest_queue as NULL indicates the
+ * response was handled in case its timeout
+ * handler fired between acquiring the
+ * wait_list_lock and cancelling the timeout.
+ */
+ async_resp->dest_queue = NULL;
+
+ /*
+ * Don't release the dest_queue_lock until both
+ * any eventfd has been signaled and any waiting
+ * thread has been woken. Otherwise one thread
+ * might consume and free the response before
+ * this function is done with it.
+ */
+ if (async_resp->eventfd) {
+ gxp_eventfd_signal(async_resp->eventfd);
+ gxp_eventfd_put(async_resp->eventfd);
+ }
+
+ wake_up(async_resp->dest_queue_waitq);
+
+ spin_unlock_irqrestore(
+ async_resp->dest_queue_lock, flags);
+ }
+ kfree(cur);
+ break;
+ }
+ }
+
+ mutex_unlock(&mailbox->wait_list_lock);
+}
+
+/*
+ * Fetches elements in the response queue.
+ *
+ * Returns the pointer of fetched response elements.
+ * @total_ptr will be the number of elements fetched.
+ *
+ * Returns -ENOMEM if failed on memory allocation.
+ * Returns NULL if the response queue is empty.
+ */
+static struct gxp_response *
+gxp_mailbox_fetch_responses(struct gxp_mailbox *mailbox, u32 *total_ptr)
+{
+ u32 head;
+ u32 tail;
+ u32 count;
+ u32 i;
+ u32 j;
+ u32 total = 0;
+ const u32 size = mailbox->resp_queue_size;
+ const struct gxp_response *queue = mailbox->resp_queue_buf.vaddr;
+ struct gxp_response *ret = NULL;
+ struct gxp_response *prev_ptr = NULL;
+
+ mutex_lock(&mailbox->resp_queue_lock);
+
+ head = mailbox->resp_queue_head;
+ /* loop until our head equals to CSR tail */
+ while (1) {
+ tail = gxp_mailbox_read_resp_queue_tail(mailbox);
+ count = gxp_circ_queue_cnt(head, tail, size,
+ CIRCULAR_QUEUE_WRAP_BIT);
+ if (count == 0)
+ break;
+
+ prev_ptr = ret;
+ ret = krealloc(prev_ptr, (total + count) * sizeof(*queue),
+ GFP_KERNEL);
+ /*
+ * Out-of-memory, we can return the previously fetched responses
+ * if any, or ENOMEM otherwise.
+ */
+ if (!ret) {
+ if (!prev_ptr)
+ ret = ERR_PTR(-ENOMEM);
+ else
+ ret = prev_ptr;
+ break;
+ }
+ /* copy responses */
+ j = CIRCULAR_QUEUE_REAL_INDEX(head, CIRCULAR_QUEUE_WRAP_BIT);
+ for (i = 0; i < count; i++) {
+ memcpy(&ret[total], &queue[j], sizeof(*queue));
+ ret[total].status = GXP_RESP_OK;
+ j = (j + 1) % size;
+ total++;
+ }
+ head = gxp_circ_queue_inc(head, count, size,
+ CIRCULAR_QUEUE_WRAP_BIT);
+ }
+ gxp_mailbox_inc_resp_queue_head_locked(mailbox, total,
+ CIRCULAR_QUEUE_WRAP_BIT);
+
+ mutex_unlock(&mailbox->resp_queue_lock);
+ /*
+ * Now that the response queue has been drained, send an interrupt
+ * to the device in case firmware was waiting for us to consume
+ * responses.
+ */
+ if (total == size) {
+ /* TODO(b/190868834) define interrupt bits */
+ gxp_mailbox_generate_device_interrupt(mailbox, BIT(0));
+ }
+
+ *total_ptr = total;
+ return ret;
+}
+
+/* Default operators for the DSP mailbox */
+struct gxp_mailbox_ops gxp_mailbox_default_ops = {
+ .allocate_resources = gxp_mailbox_ops_allocate_resources,
+ .release_resources = gxp_mailbox_ops_release_resources,
+};
+
+/* Default arguments for the DSP mailbox */
+const struct gxp_mailbox_args gxp_mailbox_default_args = {
+ .type = GXP_MBOX_TYPE_GENERAL,
+ .ops = &gxp_mailbox_default_ops,
+ .data = NULL,
+};
+
+/*
+ * Adds @resp to @mailbox->wait_list.
+ *
+ * wait_list is a FIFO queue, with sequence number in increasing order.
+ *
+ * Returns 0 on success, or -ENOMEM if failed on allocation.
+ */
+static int gxp_mailbox_push_wait_resp(struct gxp_mailbox *mailbox,
+ struct gxp_response *resp, bool is_async)
+{
+ struct gxp_mailbox_wait_list *entry =
+ kzalloc(sizeof(*entry), GFP_KERNEL);
+
+ if (!entry)
+ return -ENOMEM;
+ entry->resp = resp;
+ entry->is_async = is_async;
+ mutex_lock(&mailbox->wait_list_lock);
+ list_add_tail(&entry->list, &mailbox->wait_list);
+ mutex_unlock(&mailbox->wait_list_lock);
+
+ return 0;
+}
+
+/*
+ * Removes the response previously pushed with gxp_mailbox_push_wait_resp().
+ *
+ * This is used when the kernel gives up waiting for the response.
+ */
+static void gxp_mailbox_del_wait_resp(struct gxp_mailbox *mailbox,
+ struct gxp_response *resp)
+{
+ struct gxp_mailbox_wait_list *cur;
+
+ mutex_lock(&mailbox->wait_list_lock);
+
+ list_for_each_entry (cur, &mailbox->wait_list, list) {
+ if (cur->resp->seq > resp->seq) {
+ /*
+ * Sequence numbers in wait_list are in increasing
+ * order. This case implies no entry in the list
+ * matches @resp's sequence number.
+ */
+ break;
+ }
+ if (cur->resp->seq == resp->seq) {
+ list_del(&cur->list);
+ kfree(cur);
+ break;
+ }
+ }
+
+ mutex_unlock(&mailbox->wait_list_lock);
+}
+
+static int gxp_mailbox_enqueue_cmd(struct gxp_mailbox *mailbox,
+ struct gxp_command *cmd,
+ struct gxp_response *resp,
+ bool resp_is_async)
+{
+ int ret;
+ u32 tail;
+ struct gxp_command *cmd_queue = mailbox->cmd_queue_buf.vaddr;
+
+ mutex_lock(&mailbox->cmd_queue_lock);
+
+ cmd->seq = mailbox->cur_seq;
+ /*
+ * The lock ensures mailbox->cmd_queue_tail cannot be changed by
+ * other processes (this method should be the only one to modify the
+ * value of tail), therefore we can remember its value here and use it
+ * in various places below.
+ */
+ tail = mailbox->cmd_queue_tail;
+
+ /*
+ * If the cmd queue is full, it's up to the caller to retry.
+ */
+ if (gxp_mailbox_read_cmd_queue_head(mailbox) ==
+ (tail ^ CIRCULAR_QUEUE_WRAP_BIT)) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ if (resp) {
+ /*
+ * Add @resp to the wait_list only if the cmd can be pushed
+ * successfully.
+ */
+ resp->seq = cmd->seq;
+ resp->status = GXP_RESP_WAITING;
+ ret = gxp_mailbox_push_wait_resp(mailbox, resp, resp_is_async);
+ if (ret)
+ goto out;
+ }
+ /* size of cmd_queue is a multiple of sizeof(*cmd) */
+ memcpy(cmd_queue +
+ CIRCULAR_QUEUE_REAL_INDEX(tail, CIRCULAR_QUEUE_WRAP_BIT),
+ cmd, sizeof(*cmd));
+ gxp_mailbox_inc_cmd_queue_tail_locked(mailbox, 1,
+ CIRCULAR_QUEUE_WRAP_BIT);
+ /* triggers doorbell */
+ /* TODO(b/190868834) define interrupt bits */
+ gxp_mailbox_generate_device_interrupt(mailbox, BIT(0));
+ /* bumps sequence number after the command is sent */
+ mailbox->cur_seq++;
+ ret = 0;
+out:
+ mutex_unlock(&mailbox->cmd_queue_lock);
+ if (ret)
+ dev_err(mailbox->gxp->dev, "%s: ret=%d", __func__, ret);
+
+ return ret;
+}
+
+static int gxp_mailbox_execute_cmd(struct gxp_mailbox *mailbox,
+ struct gxp_command *cmd,
+ struct gxp_response *resp)
+{
+ int ret;
+
+ ret = gxp_mailbox_enqueue_cmd(mailbox, cmd, resp,
+ /* resp_is_async = */ false);
+ if (ret)
+ return ret;
+ ret = wait_event_timeout(mailbox->wait_list_waitq,
+ resp->status != GXP_RESP_WAITING,
+ msecs_to_jiffies(MAILBOX_TIMEOUT));
+ if (!ret) {
+ dev_notice(mailbox->gxp->dev, "%s: event wait timeout",
+ __func__);
+ gxp_mailbox_del_wait_resp(mailbox, resp);
+ return -ETIMEDOUT;
+ }
+ if (resp->status != GXP_RESP_OK) {
+ dev_notice(mailbox->gxp->dev, "%s: resp status=%u", __func__,
+ resp->status);
+ return -ENOMSG;
+ }
+
+ return resp->retval;
+}
+
+static void async_cmd_timeout_work(struct work_struct *work)
+{
+ struct gxp_async_response *async_resp = container_of(
+ work, struct gxp_async_response, timeout_work.work);
+ unsigned long flags;
+
+ /*
+ * This function will acquire the mailbox wait_list_lock. This means if
+ * response processing is in progress, it will complete before this
+ * response can be removed from the wait list.
+ *
+ * Once this function has the wait_list_lock, no future response
+ * processing will begin until this response has been removed.
+ */
+ gxp_mailbox_del_wait_resp(async_resp->mailbox, &async_resp->resp);
+
+ /*
+ * Check if this response still has a valid destination queue, in case
+ * an in-progress call to `gxp_mailbox_handle_response()` completed
+ * the response while `gxp_mailbox_del_wait_resp()` was waiting for
+ * the wait_list_lock.
+ */
+ spin_lock_irqsave(async_resp->dest_queue_lock, flags);
+ if (async_resp->dest_queue) {
+ async_resp->resp.status = GXP_RESP_CANCELLED;
+ list_add_tail(&async_resp->list_entry, async_resp->dest_queue);
+ spin_unlock_irqrestore(async_resp->dest_queue_lock, flags);
+
+ gxp_pm_update_requested_power_states(
+ async_resp->mailbox->gxp, async_resp->requested_states,
+ off_states);
+
+ if (async_resp->eventfd) {
+ gxp_eventfd_signal(async_resp->eventfd);
+ gxp_eventfd_put(async_resp->eventfd);
+ }
+
+ wake_up(async_resp->dest_queue_waitq);
+ } else {
+ spin_unlock_irqrestore(async_resp->dest_queue_lock, flags);
+ }
+}
+
+static int gxp_mailbox_execute_cmd_async(struct gxp_mailbox *mailbox,
+ struct gxp_command *cmd,
+ struct list_head *resp_queue,
+ spinlock_t *queue_lock,
+ wait_queue_head_t *queue_waitq,
+ struct gxp_power_states power_states,
+ struct gxp_eventfd *eventfd)
+{
+ struct gxp_async_response *async_resp;
+ int ret;
+
+ async_resp = kzalloc(sizeof(*async_resp), GFP_KERNEL);
+ if (!async_resp)
+ return -ENOMEM;
+
+ async_resp->mailbox = mailbox;
+ async_resp->dest_queue = resp_queue;
+ async_resp->dest_queue_lock = queue_lock;
+ async_resp->dest_queue_waitq = queue_waitq;
+ async_resp->requested_states = power_states;
+ if (eventfd && gxp_eventfd_get(eventfd))
+ async_resp->eventfd = eventfd;
+ else
+ async_resp->eventfd = NULL;
+
+ INIT_DELAYED_WORK(&async_resp->timeout_work, async_cmd_timeout_work);
+ schedule_delayed_work(&async_resp->timeout_work,
+ msecs_to_jiffies(MAILBOX_TIMEOUT));
+
+ gxp_pm_update_requested_power_states(mailbox->gxp, off_states,
+ power_states);
+ ret = gxp_mailbox_enqueue_cmd(mailbox, cmd, &async_resp->resp,
+ /* resp_is_async = */ true);
+ if (ret)
+ goto err_free_resp;
+
+ return 0;
+
+err_free_resp:
+ gxp_pm_update_requested_power_states(mailbox->gxp, power_states,
+ off_states);
+ cancel_delayed_work_sync(&async_resp->timeout_work);
+ kfree(async_resp);
+ return ret;
+}
+
+static struct gxp_mailbox *
+gxp_mailbox_manager_allocate_mailbox(struct gxp_mailbox_manager *mgr,
+ struct gxp_virtual_device *vd,
+ uint virt_core, u8 core_id)
+{
+ struct gxp_mailbox *mailbox = gxp_mailbox_alloc(
+ mgr, vd, virt_core, core_id, &gxp_mailbox_default_args);
+
+ if (!IS_ERR(mailbox))
+ gxp_mailbox_generate_device_interrupt(mailbox, BIT(0));
+ return mailbox;
+}
+
+static int gxp_mailbox_manager_execute_cmd(
+ struct gxp_client *client, struct gxp_mailbox *mailbox, int virt_core,
+ u16 cmd_code, u8 cmd_priority, u64 cmd_daddr, u32 cmd_size,
+ u32 cmd_flags, u8 num_cores, struct gxp_power_states power_states,
+ u64 *resp_seq, u16 *resp_status)
+{
+ struct gxp_command cmd;
+ struct gxp_response resp;
+ struct buffer_descriptor buffer;
+ int ret;
+
+ /* Pack the command structure */
+ buffer.address = cmd_daddr;
+ buffer.size = cmd_size;
+ buffer.flags = cmd_flags;
+ /* cmd.seq is assigned by mailbox implementation */
+ cmd.code = cmd_code; /* All IOCTL commands are dispatch */
+ cmd.priority = cmd_priority; /* currently unused */
+ cmd.buffer_descriptor = buffer;
+
+ ret = gxp_mailbox_execute_cmd(mailbox, &cmd, &resp);
+
+ /* resp.seq and resp.status can be updated even though it failed to process the command */
+ if (resp_seq)
+ *resp_seq = resp.seq;
+ if (resp_status)
+ *resp_status = resp.status;
+
+ return ret;
+}
+
+static int gxp_mailbox_manager_execute_cmd_async(
+ struct gxp_client *client, struct gxp_mailbox *mailbox, int virt_core,
+ u16 cmd_code, u8 cmd_priority, u64 cmd_daddr, u32 cmd_size,
+ u32 cmd_flags, struct gxp_power_states power_states, u64 *cmd_seq)
+{
+ struct gxp_command cmd;
+ struct buffer_descriptor buffer;
+ struct mailbox_resp_queue *resp_queue =
+ &client->vd->mailbox_resp_queues[virt_core];
+ struct gxp_eventfd *eventfd = client->mb_eventfds[virt_core];
+ int ret;
+
+ /* Pack the command structure */
+ buffer.address = cmd_daddr;
+ buffer.size = cmd_size;
+ buffer.flags = cmd_flags;
+ /* cmd.seq is assigned by mailbox implementation */
+ cmd.code = cmd_code; /* All IOCTL commands are dispatch */
+ cmd.priority = cmd_priority; /* currently unused */
+ cmd.buffer_descriptor = buffer;
+
+ ret = gxp_mailbox_execute_cmd_async(
+ mailbox, &cmd, &resp_queue->dest_queue, &resp_queue->lock,
+ &resp_queue->waitq, power_states, eventfd);
+
+ if (cmd_seq)
+ *cmd_seq = cmd.seq;
+
+ return ret;
+}
+
+static int gxp_mailbox_manager_wait_async_resp(struct gxp_client *client,
+ int virt_core, u64 *resp_seq,
+ u16 *resp_status,
+ u32 *resp_retval,
+ u16 *error_code)
+{
+ struct gxp_async_response *resp_ptr;
+ struct mailbox_resp_queue *resp_queue =
+ &client->vd->mailbox_resp_queues[virt_core];
+ long timeout;
+
+ spin_lock_irq(&resp_queue->lock);
+
+ /*
+ * The "exclusive" version of wait_event is used since each wake
+ * corresponds to the addition of exactly one new response to be
+ * consumed. Therefore, only one waiting response ioctl can ever
+ * proceed per wake event.
+ */
+ timeout = wait_event_interruptible_lock_irq_timeout_exclusive(
+ resp_queue->waitq, !list_empty(&resp_queue->dest_queue),
+ resp_queue->lock, msecs_to_jiffies(MAILBOX_TIMEOUT));
+ if (timeout <= 0) {
+ spin_unlock_irq(&resp_queue->lock);
+ /* unusual case - this only happens when there is no command pushed */
+ return timeout ? -ETIMEDOUT : timeout;
+ }
+ resp_ptr = list_first_entry(&resp_queue->dest_queue,
+ struct gxp_async_response, list_entry);
+
+ /* Pop the front of the response list */
+ list_del(&(resp_ptr->list_entry));
+
+ spin_unlock_irq(&resp_queue->lock);
+
+ if (resp_seq)
+ *resp_seq = resp_ptr->resp.seq;
+ if (resp_status)
+ *resp_status = resp_ptr->resp.status;
+
+ switch (resp_ptr->resp.status) {
+ case GXP_RESP_OK:
+ if (error_code)
+ *error_code = GXP_RESPONSE_ERROR_NONE;
+ /* retval is only valid if status == GXP_RESP_OK */
+ if (resp_retval)
+ *resp_retval = resp_ptr->resp.retval;
+ break;
+ case GXP_RESP_CANCELLED:
+ if (error_code)
+ *error_code = GXP_RESPONSE_ERROR_TIMEOUT;
+ break;
+ default:
+ /* No other status values are valid at this point */
+ WARN(true, "Completed response had invalid status %hu",
+ resp_ptr->resp.status);
+ if (error_code)
+ *error_code = GXP_RESPONSE_ERROR_INTERNAL;
+ break;
+ }
+
+ /*
+ * We must be absolutely sure the timeout work has been cancelled
+ * and/or completed before freeing the `gxp_async_response`.
+ * There are 3 possible cases when we arrive at this point:
+ * 1) The response arrived normally and the timeout was cancelled
+ * 2) The response timedout and its timeout handler finished
+ * 3) The response handler and timeout handler raced, and the response
+ * handler "cancelled" the timeout handler while it was already in
+ * progress.
+ *
+ * This call handles case #3, and ensures any in-process timeout
+ * handler (which may reference the `gxp_async_response`) has
+ * been able to exit cleanly.
+ */
+ cancel_delayed_work_sync(&resp_ptr->timeout_work);
+ kfree(resp_ptr);
+
+ return 0;
+}
+
+static void gxp_mailbox_manager_release_unconsumed_async_resps(
+ struct gxp_virtual_device *vd)
+{
+ struct gxp_async_response *cur, *nxt;
+ int i;
+ unsigned long flags;
+
+ /* Cleanup any unconsumed responses */
+ for (i = 0; i < vd->num_cores; i++) {
+ /*
+ * Since VD is releasing, it is not necessary to lock here.
+ * Do it anyway for consistency.
+ */
+ spin_lock_irqsave(&vd->mailbox_resp_queues[i].lock, flags);
+ list_for_each_entry_safe (
+ cur, nxt, &vd->mailbox_resp_queues[i].dest_queue,
+ list_entry) {
+ list_del(&cur->list_entry);
+ kfree(cur);
+ }
+ spin_unlock_irqrestore(&vd->mailbox_resp_queues[i].lock, flags);
+ }
+}
+
+static void gxp_mailbox_manager_set_ops(struct gxp_mailbox_manager *mgr)
+{
+ mgr->allocate_mailbox = gxp_mailbox_manager_allocate_mailbox;
+ mgr->release_mailbox = gxp_mailbox_release;
+ mgr->reset_mailbox = gxp_mailbox_reset;
+ mgr->execute_cmd = gxp_mailbox_manager_execute_cmd;
+ mgr->execute_cmd_async = gxp_mailbox_manager_execute_cmd_async;
+ mgr->wait_async_resp = gxp_mailbox_manager_wait_async_resp;
+ mgr->release_unconsumed_async_resps =
+ gxp_mailbox_manager_release_unconsumed_async_resps;
+}
+
+void gxp_mailbox_init(struct gxp_mailbox_manager *mgr)
+{
+ gxp_mailbox_manager_set_ops(mgr);
+}
+
+int gxp_mailbox_init_consume_responses(struct gxp_mailbox *mailbox)
+{
+ mailbox->cur_seq = 0;
+ init_waitqueue_head(&mailbox->wait_list_waitq);
+ INIT_LIST_HEAD(&mailbox->wait_list);
+
+ return 0;
+}
+
+void gxp_mailbox_release_consume_responses(struct gxp_mailbox *mailbox)
+{
+ struct gxp_mailbox_wait_list *cur, *nxt;
+ struct gxp_async_response *async_resp;
+ struct list_head resps_to_flush;
+ unsigned long flags;
+
+ /*
+ * At this point only async responses should be pending. Flush them all
+ * from the `wait_list` at once so any remaining timeout workers
+ * waiting on `wait_list_lock` will know their responses have been
+ * handled already.
+ */
+ INIT_LIST_HEAD(&resps_to_flush);
+ mutex_lock(&mailbox->wait_list_lock);
+ list_for_each_entry_safe (cur, nxt, &mailbox->wait_list, list) {
+ list_del(&cur->list);
+ if (cur->is_async) {
+ list_add_tail(&cur->list, &resps_to_flush);
+ /*
+ * Clear the response's destination queue so that if the
+ * timeout worker is running, it won't try to process
+ * this response after `wait_list_lock` is released.
+ */
+ async_resp = container_of(
+ cur->resp, struct gxp_async_response, resp);
+ spin_lock_irqsave(async_resp->dest_queue_lock, flags);
+ async_resp->dest_queue = NULL;
+ spin_unlock_irqrestore(async_resp->dest_queue_lock,
+ flags);
+
+ } else {
+ dev_warn(
+ mailbox->gxp->dev,
+ "Unexpected synchronous command pending on mailbox release\n");
+ kfree(cur);
+ }
+ }
+ mutex_unlock(&mailbox->wait_list_lock);
+
+ /*
+ * Cancel the timeout timer of and free any responses that were still in
+ * the `wait_list` above.
+ */
+ list_for_each_entry_safe (cur, nxt, &resps_to_flush, list) {
+ list_del(&cur->list);
+ async_resp = container_of(cur->resp, struct gxp_async_response,
+ resp);
+ cancel_delayed_work_sync(&async_resp->timeout_work);
+ kfree(async_resp);
+ kfree(cur);
+ }
+}
+
+void gxp_mailbox_consume_responses(struct gxp_mailbox *mailbox)
+{
+ struct gxp_response *responses;
+ u32 i;
+ u32 count = 0;
+
+ /* fetch responses and bump RESP_QUEUE_HEAD */
+ responses = gxp_mailbox_fetch_responses(mailbox, &count);
+ if (IS_ERR(responses)) {
+ dev_err(mailbox->gxp->dev,
+ "GXP Mailbox failed on fetching responses: %ld",
+ PTR_ERR(responses));
+ return;
+ }
+
+ for (i = 0; i < count; i++)
+ gxp_mailbox_handle_response(mailbox, &responses[i]);
+ /*
+ * Responses handled, wake up threads that are waiting for a response.
+ */
+ wake_up(&mailbox->wait_list_waitq);
+ kfree(responses);
+}
diff --git a/gxp-mailbox-impl.h b/gxp-mailbox-impl.h
new file mode 100644
index 0000000..2f4b5d8
--- /dev/null
+++ b/gxp-mailbox-impl.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Legacy implementation of the GXP mailbox interface.
+ * This file must be used only when the kernel driver has to compile the implementation of the
+ * mailbox by itself (i.e, when the target chip can't be compiled with GCIP).
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#ifndef __GXP_MAILBOX_IMPL_H__
+#define __GXP_MAILBOX_IMPL_H__
+
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+#include "gxp-eventfd.h"
+#include "gxp-pm.h"
+
+/*
+ * Basic Buffer descriptor struct for message payloads.
+ */
+struct buffer_descriptor {
+ /* Address in the device's virtual address space. */
+ u64 address;
+ /* Size in bytes. */
+ u32 size;
+ /* Flags can be used to indicate message type, etc. */
+ u32 flags;
+};
+
+/*
+ * Format used for mailbox command queues.
+ */
+struct gxp_command {
+ /* Sequence number. Should match the corresponding response. */
+ u64 seq;
+ /*
+ * Identifies the type of command.
+ * Should be a value from `gxp_mailbox_command_code`
+ */
+ u16 code;
+ /*
+ * Priority level from 0 to 99, with 0 being the highest. Pending
+ * commands with higher priorities will be executed before lower
+ * priority ones.
+ */
+ u8 priority;
+ /*
+ * Insert spaces to make padding explicit. This does not affect
+ * alignment.
+ */
+ u8 reserved[5];
+ /* Struct describing the buffer containing the message payload */
+ struct buffer_descriptor buffer_descriptor;
+};
+
+/*
+ * Format used for mailbox response queues from kernel.
+ */
+struct gxp_response {
+ /* Sequence number. Should match the corresponding command. */
+ u64 seq;
+ /* The status code. Either SUCCESS or an error. */
+ u16 status;
+ /* Padding. */
+ u16 reserved;
+ /* Return value, dependent on the command this responds to. */
+ u32 retval;
+};
+
+/*
+ * Wrapper struct for responses consumed by a thread other than the one which
+ * sent the command.
+ */
+struct gxp_async_response {
+ struct list_head list_entry;
+ struct gxp_response resp;
+ struct delayed_work timeout_work;
+ /*
+ * If this response times out, this pointer to the owning mailbox is
+ * needed to delete this response from the list of pending responses.
+ */
+ struct gxp_mailbox *mailbox;
+ /* Queue to add the response to once it is complete or timed out */
+ struct list_head *dest_queue;
+ /*
+ * The lock that protects queue pointed to by `dest_queue`.
+ * The mailbox code also uses this lock to protect changes to the
+ * `dest_queue` pointer itself when processing this response.
+ */
+ spinlock_t *dest_queue_lock;
+ /* Queue of clients to notify when this response is processed */
+ wait_queue_head_t *dest_queue_waitq;
+ /* Specified power states vote during the command execution */
+ struct gxp_power_states requested_states;
+ /* gxp_eventfd to signal when the response completes. May be NULL */
+ struct gxp_eventfd *eventfd;
+};
+
+struct gxp_mailbox_wait_list {
+ struct list_head list;
+ struct gxp_response *resp;
+ bool is_async;
+};
+
+struct gxp_mailbox;
+struct gxp_mailbox_args;
+struct gxp_mailbox_manager;
+
+extern const struct gxp_mailbox_args gxp_mailbox_default_args;
+
+/* Initializes operators of @mgr to work with the legacy implementation of mailbox. */
+void gxp_mailbox_init(struct gxp_mailbox_manager *mgr);
+
+/*
+ * Following functions will be called by the `gxp-mailbox.c` according to its internal logic.
+ * You may not call these functions directly.
+ */
+
+/*
+ * Initializes the mailbox to be able to wait and consume responses.
+ * This function will be called when the `gxp_mailbox_alloc` function is called.
+ */
+int gxp_mailbox_init_consume_responses(struct gxp_mailbox *mailbox);
+
+/*
+ * Flushes all pending responses in the mailbox.
+ * This function will be called when the `gxp_mailbox_release` function is called.
+ */
+void gxp_mailbox_release_consume_responses(struct gxp_mailbox *mailbox);
+
+/*
+ * Fetches and handles responses, then wakes up threads that are waiting for a response.
+ * This function will be called by a worker which is scheduled in the IRQ handler. (See the
+ * `gxp_mailbox_consume_responses_work` function) To prevent use-after-free or race-condition
+ * bugs, gxp_mailbox_release() must be called before free the mailbox.
+ */
+void gxp_mailbox_consume_responses(struct gxp_mailbox *mailbox);
+
+#endif /* __GXP_MAILBOX_IMPL_H__ */
diff --git a/gxp-mailbox-manager.c b/gxp-mailbox-manager.c
new file mode 100644
index 0000000..1085a51
--- /dev/null
+++ b/gxp-mailbox-manager.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Mailbox manager abstracts the mailbox interfaces of user commands.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#include "gxp-mailbox-driver.h"
+#include "gxp-mailbox-manager.h"
+#include "gxp-mailbox.h"
+
+struct gxp_mailbox_manager *gxp_mailbox_create_manager(struct gxp_dev *gxp,
+ uint num_cores)
+{
+ struct gxp_mailbox_manager *mgr;
+
+ mgr = devm_kzalloc(gxp->dev, sizeof(*mgr), GFP_KERNEL);
+ if (!mgr)
+ return ERR_PTR(-ENOMEM);
+
+ mgr->gxp = gxp;
+ mgr->num_cores = num_cores;
+ mgr->get_mailbox_csr_base = gxp_mailbox_get_csr_base;
+ mgr->get_mailbox_data_base = gxp_mailbox_get_data_base;
+
+ mgr->mailboxes = devm_kcalloc(gxp->dev, mgr->num_cores,
+ sizeof(*mgr->mailboxes), GFP_KERNEL);
+ if (!mgr->mailboxes)
+ return ERR_PTR(-ENOMEM);
+
+ return mgr;
+}
diff --git a/gxp-mailbox-manager.h b/gxp-mailbox-manager.h
new file mode 100644
index 0000000..24cd16b
--- /dev/null
+++ b/gxp-mailbox-manager.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Mailbox manager abstracts the mailbox interfaces of user commands.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#ifndef __GXP_MAILBOX_MANAGER_H__
+#define __GXP_MAILBOX_MANAGER_H__
+
+#include "gxp-internal.h"
+
+struct gxp_mailbox;
+
+typedef void __iomem *(*get_mailbox_base_t)(struct gxp_dev *gxp, uint index);
+
+/*
+ * Following callbacks will be used for manipulating the mailbox to communicating with the
+ * firmware. By using this callbacks instead of calling the functions of each interface directly,
+ * we can abstract the mailbox and reduce effort of updating the codes outside of the mailbox when
+ * we refactor the mailbox in the future.
+ */
+
+/*
+ * Called when allocates a mailbox. The mailbox will be release by the `release_mailbox_t`.
+ *
+ * Return a pointer of allocated mailbox or an error pointer if error occurred.
+ *
+ * This callback is required if the device is in direct mode, otherwise it is optional.
+ */
+typedef struct gxp_mailbox *(*allocate_mailbox_t)(
+ struct gxp_mailbox_manager *mgr, struct gxp_virtual_device *vd,
+ uint virt_core, u8 core_id);
+
+/*
+ * Called to release @mailbox previously allocated by `allocate_mailbox_t`.
+ *
+ * This callback is required if the device is in direct mode, otherwise it is optional.
+ */
+typedef void (*release_mailbox_t)(struct gxp_mailbox_manager *mgr,
+ struct gxp_virtual_device *vd, uint virt_core,
+ struct gxp_mailbox *mailbox);
+
+/* Called when resets the @mailbox. */
+typedef void (*reset_mailbox_t)(struct gxp_mailbox *mailbox);
+
+/*
+ * Called when requests synchronous commands. This callback will be called from the
+ * `gxp_debugfs_mailbox` function. The response will be returned to the @resp_seq, @resp_status
+ * and `retval` of `struct gxp_response` will be returned as the return value of this function.
+ * You can pass NULL to @resp_seq and @resp_status if you don't need the result. See the
+ * `struct gxp_response` for the details.
+ *
+ * Returns the value `retval` of `struct gxp_response` when the request succeeds. Otherwise,
+ * returns a negative value as an error.
+ *
+ * This callback is always required regardless of the mode of device.
+ */
+typedef int (*execute_cmd_t)(struct gxp_client *client,
+ struct gxp_mailbox *mailbox, int virt_core,
+ u16 cmd_code, u8 cmd_priority, u64 cmd_daddr,
+ u32 cmd_size, u32 cmd_flags, u8 num_cores,
+ struct gxp_power_states power_states,
+ u64 *resp_seq, u16 *resp_status);
+
+/*
+ * Called when requests asynchronous commands. This callback will be called when
+ * `GXP_MAILBOX_COMMAND_COMPAT` or `GXP_MAILBOX_COMMAND` ioctls are fired. The sequence number of
+ * the command will be returned to the @cmd_seq. @eventfd will be signalled when the response
+ * arrives.
+ *
+ * Returns a non-zero value when error occurs while putting the command to the cmd_queue of
+ * mailbox.
+ *
+ * This callback is required if the device is in direct mode, otherwise it is optional.
+ */
+typedef int (*execute_cmd_async_t)(struct gxp_client *client,
+ struct gxp_mailbox *mailbox, int virt_core,
+ u16 cmd_code, u8 cmd_priority, u64 cmd_daddr,
+ u32 cmd_size, u32 cmd_flags,
+ struct gxp_power_states power_states,
+ u64 *cmd_seq);
+
+/*
+ * Called when waiting for an asynchronous response which is requested by `execute_cmd_async`.
+ * This callback will be called when `GXP_MAILBOX_RESPONSE` ioctl is fired. The response will be
+ * returned to the @resp_seq, @resp_status and @resp_retval. You can pass NULL to them if you don't
+ * need the result. See the `struct gxp_response` for the details. The corresponding error code of
+ * the response status will be set to the @error_code.
+ *
+ * Returns 0 if it succeed to get the response. Otherwise, returns a non-zero value as an error.
+ *
+ * This callback is required if the device is in direct mode, otherwise it is optional.
+ */
+typedef int (*wait_async_resp_t)(struct gxp_client *client, int virt_core,
+ u64 *resp_seq, u16 *resp_status,
+ u32 *resp_retval, u16 *error_code);
+
+/*
+ * Called when cleans up unconsumed async responses in the queue which arrived or timed out.
+ * This callback will be called when the @vd is released.
+ *
+ * This callback is always required regardless of the mode of device.
+ */
+typedef void (*release_unconsumed_async_resps_t)(struct gxp_virtual_device *vd);
+
+/*
+ * This structure manages how the mailbox works with user commands.
+ * The way how the mailbox works is dependent on the what kind of interface is used by the device.
+ * To minimize the effort of updating the codes outside of the mailbox, it abstracts the interfaces
+ * by defining the callbacks above.
+ */
+struct gxp_mailbox_manager {
+ struct gxp_dev *gxp;
+ u8 num_cores;
+ struct gxp_mailbox **mailboxes;
+ get_mailbox_base_t get_mailbox_csr_base;
+ get_mailbox_base_t get_mailbox_data_base;
+ allocate_mailbox_t allocate_mailbox;
+ release_mailbox_t release_mailbox;
+ reset_mailbox_t reset_mailbox;
+ execute_cmd_t execute_cmd;
+ execute_cmd_async_t execute_cmd_async;
+ wait_async_resp_t wait_async_resp;
+ release_unconsumed_async_resps_t release_unconsumed_async_resps;
+};
+
+/*
+ * Allocate the mailbox manager.
+ *
+ * In general, only one mailbox manager will be used by @gxp. What kind of mailbox interface will
+ * be used is decided internally.
+ */
+struct gxp_mailbox_manager *gxp_mailbox_create_manager(struct gxp_dev *gxp,
+ uint num_cores);
+
+#endif /* __GXP_MAILBOX_MANAGER_H__ */
diff --git a/gxp-mailbox-regs.h b/gxp-mailbox-regs.h
index 5d83b5e..05fb414 100644
--- a/gxp-mailbox-regs.h
+++ b/gxp-mailbox-regs.h
@@ -7,34 +7,9 @@
#ifndef __GXP_MAILBOX_REGS_H__
#define __GXP_MAILBOX_REGS_H__
-/* Mailbox CSRs */
-#define MBOX_MCUCTLR_OFFSET 0x0000
-
-#define MBOX_INTGR0_OFFSET 0x0020
-#define MBOX_INTCR0_OFFSET 0x0024
-#define MBOX_INTMR0_OFFSET 0x0028
-#define MBOX_INTSR0_OFFSET 0x002C
-#define MBOX_INTMSR0_OFFSET 0x0030
-
-#define MBOX_INTGR1_OFFSET 0x0040
-#define MBOX_INTCR1_OFFSET 0x0044
-#define MBOX_INTMR1_OFFSET 0x0048
-#define MBOX_INTSR1_OFFSET 0x004C
-#define MBOX_INTMSR1_OFFSET 0x0050
-
-/* Mailbox Shared Data Registers */
-#define MBOX_DATA_REG_BASE 0x0080
-
-#define MBOX_STATUS_OFFSET 0x00
-#define MBOX_DESCRIPTOR_ADDR_OFFSET 0x04
-#define MBOX_CMD_TAIL_RESP_HEAD_OFFSET 0x08
-#define MBOX_CMD_HEAD_RESP_TAIL_OFFSET 0x0C
-
-#define MBOX_REGS_SIZE 0x180
-
/*
* Macros for separating out the command queue tail and response queue head in
- * the `MBOX_CMD_TAIL_RESP_HEAD_OFFSET` register.
+ * the `MBOX_DATA_CMD_TAIL_RESP_HEAD_OFFSET` register.
*/
#define CMD_TAIL_SHIFT 16
#define RESP_HEAD_SHIFT 0
@@ -43,7 +18,7 @@
/*
* Macros for separating out the command queue head and response queue tail in
- * the `MBOX_CMD_HEAD_RESP_TAIL_OFFSET` register.
+ * the `MBOX_DATA_CMD_HEAD_RESP_TAIL_OFFSET` register.
*/
#define CMD_HEAD_SHIFT 16
#define RESP_TAIL_SHIFT 0
diff --git a/gxp-mailbox.c b/gxp-mailbox.c
index aa28fc0..0e0b365 100644
--- a/gxp-mailbox.c
+++ b/gxp-mailbox.c
@@ -14,344 +14,28 @@
#include <linux/slab.h>
#include <uapi/linux/sched/types.h>
+#include "gxp-config.h" /* GXP_USE_LEGACY_MAILBOX */
#include "gxp-dma.h"
#include "gxp-internal.h"
#include "gxp-mailbox.h"
#include "gxp-mailbox-driver.h"
#include "gxp-pm.h"
+#include "gxp.h"
+
+#if GXP_USE_LEGACY_MAILBOX
+#include "gxp-mailbox-impl.h"
+#else
+#include <gcip/gcip-mailbox.h>
+#include <gcip/gcip-kci.h>
+
+#include "gxp-kci.h"
+#include "gxp-mcu-telemetry.h"
+#endif
/* Timeout of 1s by default */
int gxp_mbx_timeout = 1000;
module_param_named(mbx_timeout, gxp_mbx_timeout, int, 0660);
-/* Utilities of circular queue operations */
-
-#define CIRCULAR_QUEUE_WRAP_BIT BIT(15)
-#define CIRCULAR_QUEUE_INDEX_MASK (CIRCULAR_QUEUE_WRAP_BIT - 1)
-#define CIRCULAR_QUEUE_WRAPPED(idx) ((idx) & CIRCULAR_QUEUE_WRAP_BIT)
-#define CIRCULAR_QUEUE_REAL_INDEX(idx) ((idx) & CIRCULAR_QUEUE_INDEX_MASK)
-
-#define MBOX_CMD_QUEUE_NUM_ENTRIES 1024
-#define MBOX_CMD_QUEUE_SIZE \
- (sizeof(struct gxp_command) * MBOX_CMD_QUEUE_NUM_ENTRIES)
-
-#define MBOX_RESP_QUEUE_NUM_ENTRIES 1024
-#define MBOX_RESP_QUEUE_SIZE \
- (sizeof(struct gxp_response) * MBOX_RESP_QUEUE_NUM_ENTRIES)
-
-/*
- * Returns the number of elements in a circular queue given its @head, @tail,
- * and @queue_size.
- */
-static inline u32 circular_queue_count(u32 head, u32 tail, u32 queue_size)
-{
- if (CIRCULAR_QUEUE_WRAPPED(tail) != CIRCULAR_QUEUE_WRAPPED(head))
- return queue_size - CIRCULAR_QUEUE_REAL_INDEX(head) +
- CIRCULAR_QUEUE_REAL_INDEX(tail);
- else
- return tail - head;
-}
-
-/* Increases @index of a circular queue by @inc. */
-static inline u32 circular_queue_inc(u32 index, u32 inc, u32 queue_size)
-{
- u32 new_index = CIRCULAR_QUEUE_REAL_INDEX(index) + inc;
-
- if (new_index >= queue_size)
- return (index + inc - queue_size) ^ CIRCULAR_QUEUE_WRAP_BIT;
- else
- return index + inc;
-}
-
-/* Sets mailbox->cmd_queue_tail and corresponding CSR on device. */
-static void gxp_mailbox_set_cmd_queue_tail(struct gxp_mailbox *mailbox,
- u32 value)
-{
- mailbox->cmd_queue_tail = value;
- gxp_mailbox_write_cmd_queue_tail(mailbox, value);
-}
-
-/* Sets mailbox->resp_queue_head and corresponding CSR on device. */
-static void gxp_mailbox_set_resp_queue_head(struct gxp_mailbox *mailbox,
- u32 value)
-{
- mailbox->resp_queue_head = value;
- gxp_mailbox_write_resp_queue_head(mailbox, value);
-}
-
-/*
- * Increases the command queue tail by @inc.
- *
- * The queue uses the mirrored circular buffer arrangement. Each index (head and
- * tail) has a wrap bit, represented by the constant CIRCULAR_QUEUE_WRAP_BIT.
- * Whenever an index is increased and will exceed the end of the queue, the wrap
- * bit is xor-ed.
- *
- * This method will update both mailbox->cmd_queue_tail and CSR on device.
- *
- * Returns 0 on success.
- * If command queue tail will exceed command queue head after adding @inc,
- * -EBUSY is returned and all fields remain unchanged. The caller should
- * handle this case and implement a mechanism to wait until the consumer
- * consumes commands.
- *
- * Caller must hold cmd_queue_lock.
- */
-static int gxp_mailbox_inc_cmd_queue_tail(struct gxp_mailbox *mailbox, u32 inc)
-{
- u32 head;
- u32 remain_size;
- u32 new_tail;
-
- lockdep_assert_held(&mailbox->cmd_queue_lock);
-
- if (inc > mailbox->cmd_queue_size)
- return -EINVAL;
-
- head = gxp_mailbox_read_cmd_queue_head(mailbox);
- remain_size = mailbox->cmd_queue_size -
- circular_queue_count(head, mailbox->cmd_queue_tail,
- mailbox->cmd_queue_size);
- /* no enough space left */
- if (inc > remain_size)
- return -EBUSY;
-
- new_tail = circular_queue_inc(mailbox->cmd_queue_tail, inc,
- mailbox->cmd_queue_size);
- gxp_mailbox_set_cmd_queue_tail(mailbox, new_tail);
- return 0;
-}
-
-/*
- * Increases the response queue head by @inc.
- *
- * The queue uses the mirrored circular buffer arrangement. Each index (head and
- * tail) has a wrap bit, represented by the constant CIRCULAR_QUEUE_WRAP_BIT.
- * Whenever an index is increased and will exceed the end of the queue, the wrap
- * bit is xor-ed.
- *
- * This method will update both mailbox->resp_queue_head and CSR on device.
- *
- * Returns 0 on success.
- * -EINVAL is returned if the queue head will exceed tail of queue, and no
- * fields or CSR is updated in this case.
- *
- * Caller must hold resp_queue_lock.
- */
-static int gxp_mailbox_inc_resp_queue_head(struct gxp_mailbox *mailbox, u32 inc)
-{
- u32 tail;
- u32 size;
- u32 new_head;
-
- lockdep_assert_held(&mailbox->resp_queue_lock);
-
- if (inc > mailbox->resp_queue_size)
- return -EINVAL;
-
- tail = gxp_mailbox_read_resp_queue_tail(mailbox);
- size = circular_queue_count(mailbox->resp_queue_head, tail,
- mailbox->resp_queue_size);
- if (inc > size)
- return -EINVAL;
- new_head = circular_queue_inc(mailbox->resp_queue_head, inc,
- mailbox->resp_queue_size);
- gxp_mailbox_set_resp_queue_head(mailbox, new_head);
-
- return 0;
-}
-
-struct gxp_mailbox_manager *gxp_mailbox_create_manager(struct gxp_dev *gxp,
- uint num_cores)
-{
- struct gxp_mailbox_manager *mgr;
-
- mgr = devm_kzalloc(gxp->dev, sizeof(*mgr), GFP_KERNEL);
- if (!mgr)
- return ERR_PTR(-ENOMEM);
-
- mgr->gxp = gxp;
- mgr->num_cores = num_cores;
- mgr->get_mailbox_csr_base = gxp_mailbox_get_csr_base;
- mgr->get_mailbox_data_base = gxp_mailbox_get_data_base;
-
- mgr->mailboxes = devm_kcalloc(gxp->dev, mgr->num_cores,
- sizeof(*mgr->mailboxes), GFP_KERNEL);
- if (!mgr->mailboxes)
- return ERR_PTR(-ENOMEM);
-
- return mgr;
-}
-
-/*
- * Pops the wait_list until the sequence number of @resp is found, and copies
- * @resp to the found entry.
- *
- * Entries in wait_list should have sequence number in increasing order, but
- * the responses arriving and being handled may be out-of-order.
- *
- * Iterate over the wait_list, comparing #cur->resp->seq with @resp->seq:
- * 1. #cur->resp->seq > @resp->seq:
- * - Nothing to do, either @resp is invalid or its command timed out.
- * - We're done.
- * 2. #cur->resp->seq == @resp->seq:
- * - Copy @resp, pop the head.
- * - If #cur->resp has a destination queue, push it to that queue
- * - We're done.
- * 3. #cur->resp->seq < @resp->seq:
- * - @resp has arrived out of sequence order.
- * - Leave #cur->resp in the wait_list.
- * - Keep iterating unless the list is exhausted.
- */
-static void gxp_mailbox_handle_response(struct gxp_mailbox *mailbox,
- const struct gxp_response *resp)
-{
- struct gxp_mailbox_wait_list *cur, *nxt;
- struct gxp_async_response *async_resp;
- unsigned long flags;
-
- mutex_lock(&mailbox->wait_list_lock);
-
- list_for_each_entry_safe(cur, nxt, &mailbox->wait_list, list) {
- if (cur->resp->seq > resp->seq) {
- /*
- * This response has already timed out and been removed
- * from the wait list (or this is an invalid response).
- * Drop it.
- */
- break;
- }
- if (cur->resp->seq == resp->seq) {
- memcpy(cur->resp, resp, sizeof(*resp));
- list_del(&cur->list);
- if (cur->is_async) {
- async_resp =
- container_of(cur->resp,
- struct gxp_async_response,
- resp);
-
- cancel_delayed_work(&async_resp->timeout_work);
- gxp_pm_update_requested_power_states(
- async_resp->mailbox->gxp,
- async_resp->gxp_power_state,
- async_resp->requested_low_clkmux,
- AUR_OFF, false,
- async_resp->memory_power_state,
- AUR_MEM_UNDEFINED);
-
- spin_lock_irqsave(async_resp->dest_queue_lock,
- flags);
-
- list_add_tail(&async_resp->list_entry,
- async_resp->dest_queue);
- /*
- * Marking the dest_queue as NULL indicates the
- * response was handled in case its timeout
- * handler fired between acquiring the
- * wait_list_lock and cancelling the timeout.
- */
- async_resp->dest_queue = NULL;
-
- /*
- * Don't release the dest_queue_lock until both
- * any eventfd has been signaled and any waiting
- * thread has been woken. Otherwise one thread
- * might consume and free the response before
- * this function is done with it.
- */
- if (async_resp->eventfd) {
- gxp_eventfd_signal(async_resp->eventfd);
- gxp_eventfd_put(async_resp->eventfd);
- }
-
- wake_up(async_resp->dest_queue_waitq);
-
- spin_unlock_irqrestore(
- async_resp->dest_queue_lock, flags);
-
- }
- kfree(cur);
- break;
- }
- }
-
- mutex_unlock(&mailbox->wait_list_lock);
-}
-
-/*
- * Fetches elements in the response queue.
- *
- * Returns the pointer of fetched response elements.
- * @total_ptr will be the number of elements fetched.
- *
- * Returns -ENOMEM if failed on memory allocation.
- * Returns NULL if the response queue is empty.
- */
-static struct gxp_response *
-gxp_mailbox_fetch_responses(struct gxp_mailbox *mailbox, u32 *total_ptr)
-{
- u32 head;
- u32 tail;
- u32 count;
- u32 i;
- u32 j;
- u32 total = 0;
- const u32 size = mailbox->resp_queue_size;
- const struct gxp_response *queue = mailbox->resp_queue;
- struct gxp_response *ret = NULL;
- struct gxp_response *prev_ptr = NULL;
-
- mutex_lock(&mailbox->resp_queue_lock);
-
- head = mailbox->resp_queue_head;
- /* loop until our head equals to CSR tail */
- while (1) {
- tail = gxp_mailbox_read_resp_queue_tail(mailbox);
- count = circular_queue_count(head, tail, size);
- if (count == 0)
- break;
-
- prev_ptr = ret;
- ret = krealloc(prev_ptr, (total + count) * sizeof(*queue),
- GFP_KERNEL);
- /*
- * Out-of-memory, we can return the previously fetched responses
- * if any, or ENOMEM otherwise.
- */
- if (!ret) {
- if (!prev_ptr)
- ret = ERR_PTR(-ENOMEM);
- else
- ret = prev_ptr;
- break;
- }
- /* copy responses */
- j = CIRCULAR_QUEUE_REAL_INDEX(head);
- for (i = 0; i < count; i++) {
- memcpy(&ret[total], &queue[j], sizeof(*queue));
- ret[total].status = GXP_RESP_OK;
- j = (j + 1) % size;
- total++;
- }
- head = circular_queue_inc(head, count, size);
- }
- gxp_mailbox_inc_resp_queue_head(mailbox, total);
-
- mutex_unlock(&mailbox->resp_queue_lock);
- /*
- * Now that the response queue has been drained, send an interrupt
- * to the device in case firmware was waiting for us to consume
- * responses.
- */
- if (total == size) {
- /* TODO(b/190868834) define interrupt bits */
- gxp_mailbox_generate_device_interrupt(mailbox, BIT(0));
- }
-
- *total_ptr = total;
- return ret;
-}
-
/*
* Fetches and handles responses, then wakes up threads that are waiting for a
* response.
@@ -364,26 +48,21 @@ static void gxp_mailbox_consume_responses_work(struct kthread_work *work)
{
struct gxp_mailbox *mailbox =
container_of(work, struct gxp_mailbox, response_work);
- struct gxp_response *responses;
- u32 i;
- u32 count = 0;
-
- /* fetch responses and bump RESP_QUEUE_HEAD */
- responses = gxp_mailbox_fetch_responses(mailbox, &count);
- if (IS_ERR(responses)) {
- dev_err(mailbox->gxp->dev,
- "GXP Mailbox failed on fetching responses: %ld",
- PTR_ERR(responses));
- return;
- }
- for (i = 0; i < count; i++)
- gxp_mailbox_handle_response(mailbox, &responses[i]);
- /*
- * Responses handled, wake up threads that are waiting for a response.
- */
- wake_up(&mailbox->wait_list_waitq);
- kfree(responses);
+#if GXP_USE_LEGACY_MAILBOX
+ gxp_mailbox_consume_responses(mailbox);
+#else
+ switch (mailbox->type) {
+ case GXP_MBOX_TYPE_GENERAL:
+ gcip_mailbox_consume_responses_work(mailbox->mbx_impl.gcip_mbx);
+ break;
+ case GXP_MBOX_TYPE_KCI:
+ gcip_kci_handle_irq(mailbox->mbx_impl.gcip_kci);
+ gxp_mcu_telemetry_irq_handler(
+ ((struct gxp_kci *)mailbox->data)->mcu);
+ break;
+ }
+#endif
}
/*
@@ -398,8 +77,8 @@ static inline void gxp_mailbox_handle_irq(struct gxp_mailbox *mailbox)
/* Priority level for realtime worker threads */
#define GXP_RT_THREAD_PRIORITY 2
-static struct task_struct *
-create_response_rt_thread(struct device *dev, void *data, int core_id)
+static struct task_struct *create_response_rt_thread(struct device *dev,
+ void *data, int core_id)
{
static const struct sched_param param = {
.sched_priority = GXP_RT_THREAD_PRIORITY,
@@ -420,66 +99,72 @@ create_response_rt_thread(struct device *dev, void *data, int core_id)
return task;
}
+static int gxp_mailbox_set_ops(struct gxp_mailbox *mailbox,
+ struct gxp_mailbox_ops *ops)
+{
+ if (!ops) {
+ dev_err(mailbox->gxp->dev, "Incomplete gxp_mailbox ops.\n");
+ return -EINVAL;
+ }
+
+ mailbox->ops = ops;
+
+ return 0;
+}
+
+static inline void gxp_mailbox_set_data(struct gxp_mailbox *mailbox, void *data)
+{
+ mailbox->data = data;
+}
+
static struct gxp_mailbox *create_mailbox(struct gxp_mailbox_manager *mgr,
struct gxp_virtual_device *vd,
- uint virt_core, u8 core_id)
+ uint virt_core, u8 core_id,
+ const struct gxp_mailbox_args *args)
{
struct gxp_mailbox *mailbox;
+ int ret;
+
+ if (!args) {
+ dev_err(mgr->gxp->dev, "Incomplete gxp_mailbox args.\n");
+ ret = -EINVAL;
+ goto err_args;
+ }
mailbox = kzalloc(sizeof(*mailbox), GFP_KERNEL);
- if (!mailbox)
+ if (!mailbox) {
+ ret = -ENOMEM;
goto err_mailbox;
+ }
mailbox->core_id = core_id;
mailbox->gxp = mgr->gxp;
mailbox->csr_reg_base = mgr->get_mailbox_csr_base(mgr->gxp, core_id);
mailbox->data_reg_base = mgr->get_mailbox_data_base(mgr->gxp, core_id);
+ mailbox->type = args->type;
+ mailbox->queue_wrap_bit = args->queue_wrap_bit;
+ mailbox->cmd_elem_size = args->cmd_elem_size;
+ mailbox->resp_elem_size = args->resp_elem_size;
+ mailbox->ignore_seq_order = args->ignore_seq_order;
+ gxp_mailbox_set_data(mailbox, args->data);
+
+ ret = gxp_mailbox_set_ops(mailbox, args->ops);
+ if (ret)
+ goto err_set_ops;
- /* Allocate and initialize the command queue */
- mailbox->cmd_queue = (struct gxp_command *)gxp_dma_alloc_coherent(
- mailbox->gxp, vd, BIT(virt_core),
- sizeof(struct gxp_command) * MBOX_CMD_QUEUE_NUM_ENTRIES,
- &(mailbox->cmd_queue_device_addr), GFP_KERNEL, 0);
- if (!mailbox->cmd_queue)
- goto err_cmd_queue;
+ ret = mailbox->ops->allocate_resources(mailbox, vd, virt_core);
+ if (ret)
+ goto err_allocate_resources;
- mailbox->cmd_queue_size = MBOX_CMD_QUEUE_NUM_ENTRIES;
- mailbox->cmd_queue_tail = 0;
mutex_init(&mailbox->cmd_queue_lock);
-
- /* Allocate and initialize the response queue */
- mailbox->resp_queue = (struct gxp_response *)gxp_dma_alloc_coherent(
- mailbox->gxp, vd, BIT(virt_core),
- sizeof(struct gxp_response) * MBOX_RESP_QUEUE_NUM_ENTRIES,
- &(mailbox->resp_queue_device_addr), GFP_KERNEL, 0);
- if (!mailbox->resp_queue)
- goto err_resp_queue;
-
- mailbox->resp_queue_size = MBOX_RESP_QUEUE_NUM_ENTRIES;
- mailbox->resp_queue_head = 0;
mutex_init(&mailbox->resp_queue_lock);
-
- /* Allocate and initialize the mailbox descriptor */
- mailbox->descriptor =
- (struct gxp_mailbox_descriptor *)gxp_dma_alloc_coherent(
- mailbox->gxp, vd, BIT(virt_core),
- sizeof(struct gxp_mailbox_descriptor),
- &(mailbox->descriptor_device_addr), GFP_KERNEL, 0);
- if (!mailbox->descriptor)
- goto err_descriptor;
-
- mailbox->descriptor->cmd_queue_device_addr =
- mailbox->cmd_queue_device_addr;
- mailbox->descriptor->resp_queue_device_addr =
- mailbox->resp_queue_device_addr;
- mailbox->descriptor->cmd_queue_size = mailbox->cmd_queue_size;
- mailbox->descriptor->resp_queue_size = mailbox->resp_queue_size;
-
kthread_init_worker(&mailbox->response_worker);
mailbox->response_thread = create_response_rt_thread(
mailbox->gxp->dev, &mailbox->response_worker, core_id);
- if (IS_ERR(mailbox->response_thread))
+ if (IS_ERR(mailbox->response_thread)) {
+ ret = -ENOMEM;
goto err_thread;
+ }
/* Initialize driver before interacting with its registers */
gxp_mailbox_driver_init(mailbox);
@@ -487,73 +172,236 @@ static struct gxp_mailbox *create_mailbox(struct gxp_mailbox_manager *mgr,
return mailbox;
err_thread:
- gxp_dma_free_coherent(mailbox->gxp, vd, BIT(virt_core),
- sizeof(struct gxp_mailbox_descriptor),
- mailbox->descriptor,
- mailbox->descriptor_device_addr);
-err_descriptor:
- gxp_dma_free_coherent(
- mailbox->gxp, vd, BIT(virt_core),
- sizeof(struct gxp_response) * mailbox->resp_queue_size,
- mailbox->resp_queue, mailbox->resp_queue_device_addr);
-err_resp_queue:
- gxp_dma_free_coherent(
- mailbox->gxp, vd, BIT(virt_core),
- sizeof(struct gxp_command) * mailbox->cmd_queue_size,
- mailbox->cmd_queue, mailbox->cmd_queue_device_addr);
-err_cmd_queue:
+ mailbox->ops->release_resources(mailbox, vd, virt_core);
+err_allocate_resources:
+err_set_ops:
kfree(mailbox);
err_mailbox:
- return ERR_PTR(-ENOMEM);
+err_args:
+ return ERR_PTR(ret);
+}
+
+static void release_mailbox(struct gxp_mailbox *mailbox,
+ struct gxp_virtual_device *vd, uint virt_core)
+{
+ if (IS_GXP_TEST && !mailbox)
+ return;
+ mailbox->ops->release_resources(mailbox, vd, virt_core);
+ kthread_flush_worker(&mailbox->response_worker);
+ if (mailbox->response_thread)
+ kthread_stop(mailbox->response_thread);
+ kfree(mailbox);
+}
+
+#if !GXP_USE_LEGACY_MAILBOX
+static int init_gcip_mailbox(struct gxp_mailbox *mailbox)
+{
+ const struct gcip_mailbox_args args = {
+ .dev = mailbox->gxp->dev,
+ .queue_wrap_bit = mailbox->queue_wrap_bit,
+ .cmd_queue = mailbox->cmd_queue_buf.vaddr,
+ .cmd_elem_size = mailbox->cmd_elem_size,
+ .resp_queue = mailbox->resp_queue_buf.vaddr,
+ .resp_elem_size = mailbox->resp_elem_size,
+ .timeout = MAILBOX_TIMEOUT,
+ .ops = mailbox->ops->gcip_ops.mbx,
+ .data = mailbox,
+ .ignore_seq_order = mailbox->ignore_seq_order,
+ };
+ struct gcip_mailbox *gcip_mbx;
+ int ret;
+
+ gcip_mbx = kzalloc(sizeof(*gcip_mbx), GFP_KERNEL);
+ if (!gcip_mbx)
+ return -ENOMEM;
+
+ /* Initialize gcip_mailbox */
+ ret = gcip_mailbox_init(gcip_mbx, &args);
+ if (ret) {
+ kfree(gcip_mbx);
+ return ret;
+ }
+
+ mailbox->mbx_impl.gcip_mbx = gcip_mbx;
+
+ return 0;
+}
+
+static void release_gcip_mailbox(struct gxp_mailbox *mailbox)
+{
+ struct gcip_mailbox *gcip_mbx = mailbox->mbx_impl.gcip_mbx;
+
+ if (gcip_mbx == NULL)
+ return;
+
+ gcip_mailbox_release(gcip_mbx);
+ kfree(gcip_mbx);
+ mailbox->mbx_impl.gcip_mbx = NULL;
+}
+
+static int init_gcip_kci(struct gxp_mailbox *mailbox)
+{
+ const struct gcip_kci_args args = {
+ .dev = mailbox->gxp->dev,
+ .cmd_queue = mailbox->cmd_queue_buf.vaddr,
+ .resp_queue = mailbox->resp_queue_buf.vaddr,
+ .queue_wrap_bit = mailbox->queue_wrap_bit,
+ .rkci_buffer_size = GXP_REVERSE_KCI_BUFFER_SIZE,
+ .timeout = GXP_KCI_TIMEOUT,
+ .ops = mailbox->ops->gcip_ops.kci,
+ .data = mailbox,
+ };
+ struct gcip_kci *gcip_kci;
+ int ret;
+
+ gcip_kci = kzalloc(sizeof(*gcip_kci), GFP_KERNEL);
+ if (!gcip_kci)
+ return -ENOMEM;
+
+ ret = gcip_kci_init(gcip_kci, &args);
+ if (ret) {
+ kfree(gcip_kci);
+ return ret;
+ }
+
+ mailbox->mbx_impl.gcip_kci = gcip_kci;
+
+ return 0;
+}
+
+static void release_gcip_kci(struct gxp_mailbox *mailbox)
+{
+ struct gcip_kci *gcip_kci = mailbox->mbx_impl.gcip_kci;
+
+ if (gcip_kci == NULL)
+ return;
+
+ gcip_kci_cancel_work_queues(gcip_kci);
+ gcip_kci_release(gcip_kci);
+ kfree(gcip_kci);
+ mailbox->mbx_impl.gcip_kci = NULL;
+}
+#endif /* !GXP_USE_LEGACY_MAILBOX */
+
+/*
+ * Initializes @mailbox->mbx_impl to start waiting and consuming responses.
+ * This will initializes GCIP mailbox modules according to the type of @mailbox.
+ * - GENERAL: will initialize @mailbox->mbx_impl.gcip_mbx
+ * - KCI: will initialize @mailbox->mbx_impl.kci_mbx
+ *
+ * Note: On `GXP_USE_LEGACY_MAILBOX`, it will initialize @mailbox itself as its
+ * queuing logic is implemented in `gxp-mailbox-impl.c`.
+ */
+static int init_mailbox_impl(struct gxp_mailbox *mailbox)
+{
+ int ret;
+
+#if GXP_USE_LEGACY_MAILBOX
+ if (mailbox->type != GXP_MBOX_TYPE_GENERAL)
+ return -EOPNOTSUPP;
+
+ ret = gxp_mailbox_init_consume_responses(mailbox);
+ if (ret)
+ return ret;
+#else
+ switch (mailbox->type) {
+ case GXP_MBOX_TYPE_GENERAL:
+ ret = init_gcip_mailbox(mailbox);
+ if (ret)
+ return ret;
+ break;
+ case GXP_MBOX_TYPE_KCI:
+ ret = init_gcip_kci(mailbox);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+#endif /* GXP_USE_LEGACY_MAILBOX */
+
+ return 0;
}
-static void enable_mailbox(struct gxp_mailbox *mailbox)
+static int enable_mailbox(struct gxp_mailbox *mailbox)
{
- gxp_mailbox_write_descriptor(mailbox, mailbox->descriptor_device_addr);
+ int ret;
+
+ gxp_mailbox_write_descriptor(mailbox, mailbox->descriptor_buf.dsp_addr);
gxp_mailbox_write_cmd_queue_head(mailbox, 0);
gxp_mailbox_write_cmd_queue_tail(mailbox, 0);
gxp_mailbox_write_resp_queue_head(mailbox, 0);
gxp_mailbox_write_resp_queue_tail(mailbox, 0);
+ ret = init_mailbox_impl(mailbox);
+ if (ret)
+ return ret;
+
mailbox->handle_irq = gxp_mailbox_handle_irq;
- mailbox->cur_seq = 0;
- init_waitqueue_head(&mailbox->wait_list_waitq);
- INIT_LIST_HEAD(&mailbox->wait_list);
mutex_init(&mailbox->wait_list_lock);
- kthread_init_work(&mailbox->response_work, gxp_mailbox_consume_responses_work);
+ kthread_init_work(&mailbox->response_work,
+ gxp_mailbox_consume_responses_work);
/* Only enable interrupts once everything has been setup */
gxp_mailbox_driver_enable_interrupts(mailbox);
/* Enable the mailbox */
gxp_mailbox_write_status(mailbox, 1);
- /* TODO(b/190868834) define interrupt bits */
- gxp_mailbox_generate_device_interrupt(mailbox, BIT(0));
+
+ return 0;
}
struct gxp_mailbox *gxp_mailbox_alloc(struct gxp_mailbox_manager *mgr,
struct gxp_virtual_device *vd,
- uint virt_core, u8 core_id)
+ uint virt_core, u8 core_id,
+ const struct gxp_mailbox_args *args)
{
struct gxp_mailbox *mailbox;
+ int ret;
- mailbox = create_mailbox(mgr, vd, virt_core, core_id);
+ mailbox = create_mailbox(mgr, vd, virt_core, core_id, args);
if (IS_ERR(mailbox))
return mailbox;
- enable_mailbox(mailbox);
+ ret = enable_mailbox(mailbox);
+ if (ret) {
+ release_mailbox(mailbox, vd, virt_core);
+ return ERR_PTR(ret);
+ }
return mailbox;
}
+/*
+ * Releases the @mailbox->mbx_impl to flush all pending responses in the wait
+ * list.
+ * This releases GCIP mailbox modules according to the type of @mailbox.
+ * - GENERAL: will release @mailbox->mbx_impl.gcip_mbx
+ * - KCI: will release @mailbox->mbx_impl.kci_mbx
+ *
+ * Note: On `GXP_USE_LEGACY_MAILBOX`, it will release @mailbox itself as its
+ * queuing logic is implemented in `gxp-mailbox-impl.c`.
+ */
+static void release_mailbox_impl(struct gxp_mailbox *mailbox)
+{
+#if GXP_USE_LEGACY_MAILBOX
+ gxp_mailbox_release_consume_responses(mailbox);
+#else
+ switch (mailbox->type) {
+ case GXP_MBOX_TYPE_GENERAL:
+ release_gcip_mailbox(mailbox);
+ break;
+ case GXP_MBOX_TYPE_KCI:
+ release_gcip_kci(mailbox);
+ break;
+ }
+#endif
+}
+
void gxp_mailbox_release(struct gxp_mailbox_manager *mgr,
struct gxp_virtual_device *vd, uint virt_core,
struct gxp_mailbox *mailbox)
{
int i;
- struct gxp_mailbox_wait_list *cur, *nxt;
- struct gxp_async_response *async_resp;
- struct list_head resps_to_flush;
- unsigned long flags;
if (!mailbox) {
dev_err(mgr->gxp->dev,
@@ -576,51 +424,7 @@ void gxp_mailbox_release(struct gxp_mailbox_manager *mgr,
cancel_work_sync(mailbox->interrupt_handlers[i]);
}
- /*
- * At this point only async responses should be pending. Flush them all
- * from the `wait_list` at once so any remaining timeout workers
- * waiting on `wait_list_lock` will know their responses have been
- * handled already.
- */
- INIT_LIST_HEAD(&resps_to_flush);
- mutex_lock(&mailbox->wait_list_lock);
- list_for_each_entry_safe(cur, nxt, &mailbox->wait_list, list) {
- list_del(&cur->list);
- if (cur->is_async) {
- list_add_tail(&cur->list, &resps_to_flush);
- /*
- * Clear the response's destination queue so that if the
- * timeout worker is running, it won't try to process
- * this response after `wait_list_lock` is released.
- */
- async_resp = container_of(
- cur->resp, struct gxp_async_response, resp);
- spin_lock_irqsave(async_resp->dest_queue_lock, flags);
- async_resp->dest_queue = NULL;
- spin_unlock_irqrestore(async_resp->dest_queue_lock,
- flags);
-
- } else {
- dev_warn(
- mailbox->gxp->dev,
- "Unexpected synchronous command pending on mailbox release\n");
- kfree(cur);
- }
- }
- mutex_unlock(&mailbox->wait_list_lock);
-
- /*
- * Cancel the timeout timer of and free any responses that were still in
- * the `wait_list` above.
- */
- list_for_each_entry_safe(cur, nxt, &resps_to_flush, list) {
- list_del(&cur->list);
- async_resp = container_of(cur->resp, struct gxp_async_response,
- resp);
- cancel_delayed_work_sync(&async_resp->timeout_work);
- kfree(async_resp);
- kfree(cur);
- }
+ release_mailbox_impl(mailbox);
/* Reset the mailbox HW */
gxp_mailbox_reset_hw(mailbox);
@@ -637,21 +441,7 @@ void gxp_mailbox_release(struct gxp_mailbox_manager *mgr,
mgr->mailboxes[mailbox->core_id] = NULL;
/* Clean up resources */
- gxp_dma_free_coherent(
- mailbox->gxp, vd, BIT(virt_core),
- sizeof(struct gxp_command) * mailbox->cmd_queue_size,
- mailbox->cmd_queue, mailbox->cmd_queue_device_addr);
- gxp_dma_free_coherent(
- mailbox->gxp, vd, BIT(virt_core),
- sizeof(struct gxp_response) * mailbox->resp_queue_size,
- mailbox->resp_queue, mailbox->resp_queue_device_addr);
- gxp_dma_free_coherent(mailbox->gxp, vd, BIT(virt_core),
- sizeof(struct gxp_mailbox_descriptor),
- mailbox->descriptor,
- mailbox->descriptor_device_addr);
- kthread_flush_worker(&mailbox->response_worker);
- kthread_stop(mailbox->response_thread);
- kfree(mailbox);
+ release_mailbox(mailbox, vd, virt_core);
}
void gxp_mailbox_reset(struct gxp_mailbox *mailbox)
@@ -659,241 +449,6 @@ void gxp_mailbox_reset(struct gxp_mailbox *mailbox)
dev_notice(mailbox->gxp->dev, "%s not yet implemented\n", __func__);
}
-/*
- * Adds @resp to @mailbox->wait_list.
- *
- * wait_list is a FIFO queue, with sequence number in increasing order.
- *
- * Returns 0 on success, or -ENOMEM if failed on allocation.
- */
-static int gxp_mailbox_push_wait_resp(struct gxp_mailbox *mailbox,
- struct gxp_response *resp, bool is_async)
-{
- struct gxp_mailbox_wait_list *entry =
- kzalloc(sizeof(*entry), GFP_KERNEL);
-
- if (!entry)
- return -ENOMEM;
- entry->resp = resp;
- entry->is_async = is_async;
- mutex_lock(&mailbox->wait_list_lock);
- list_add_tail(&entry->list, &mailbox->wait_list);
- mutex_unlock(&mailbox->wait_list_lock);
-
- return 0;
-}
-
-/*
- * Removes the response previously pushed with gxp_mailbox_push_wait_resp().
- *
- * This is used when the kernel gives up waiting for the response.
- */
-static void gxp_mailbox_del_wait_resp(struct gxp_mailbox *mailbox,
- struct gxp_response *resp)
-{
- struct gxp_mailbox_wait_list *cur;
-
- mutex_lock(&mailbox->wait_list_lock);
-
- list_for_each_entry(cur, &mailbox->wait_list, list) {
- if (cur->resp->seq > resp->seq) {
- /*
- * Sequence numbers in wait_list are in increasing
- * order. This case implies no entry in the list
- * matches @resp's sequence number.
- */
- break;
- }
- if (cur->resp->seq == resp->seq) {
- list_del(&cur->list);
- kfree(cur);
- break;
- }
- }
-
- mutex_unlock(&mailbox->wait_list_lock);
-}
-
-static int gxp_mailbox_enqueue_cmd(struct gxp_mailbox *mailbox,
- struct gxp_command *cmd,
- struct gxp_response *resp,
- bool resp_is_async)
-{
- int ret;
- u32 tail;
-
- mutex_lock(&mailbox->cmd_queue_lock);
-
- cmd->seq = mailbox->cur_seq;
- /*
- * The lock ensures mailbox->cmd_queue_tail cannot be changed by
- * other processes (this method should be the only one to modify the
- * value of tail), therefore we can remember its value here and use it
- * in various places below.
- */
- tail = mailbox->cmd_queue_tail;
-
- /*
- * If the cmd queue is full, it's up to the caller to retry.
- */
- if (gxp_mailbox_read_cmd_queue_head(mailbox) ==
- (tail ^ CIRCULAR_QUEUE_WRAP_BIT)) {
- ret = -EAGAIN;
- goto out;
- }
-
- if (resp) {
- /*
- * Add @resp to the wait_list only if the cmd can be pushed
- * successfully.
- */
- resp->seq = cmd->seq;
- resp->status = GXP_RESP_WAITING;
- ret = gxp_mailbox_push_wait_resp(mailbox, resp, resp_is_async);
- if (ret)
- goto out;
- }
- /* size of cmd_queue is a multiple of sizeof(*cmd) */
- memcpy(mailbox->cmd_queue + CIRCULAR_QUEUE_REAL_INDEX(tail), cmd,
- sizeof(*cmd));
- gxp_mailbox_inc_cmd_queue_tail(mailbox, 1);
- /* triggers doorbell */
- /* TODO(b/190868834) define interrupt bits */
- gxp_mailbox_generate_device_interrupt(mailbox, BIT(0));
- /* bumps sequence number after the command is sent */
- mailbox->cur_seq++;
- ret = 0;
-out:
- mutex_unlock(&mailbox->cmd_queue_lock);
- if (ret)
- dev_err(mailbox->gxp->dev, "%s: ret=%d", __func__, ret);
-
- return ret;
-}
-
-int gxp_mailbox_execute_cmd(struct gxp_mailbox *mailbox,
- struct gxp_command *cmd, struct gxp_response *resp)
-{
- int ret;
-
- ret = gxp_mailbox_enqueue_cmd(mailbox, cmd, resp,
- /* resp_is_async = */ false);
- if (ret)
- return ret;
- ret = wait_event_timeout(mailbox->wait_list_waitq,
- resp->status != GXP_RESP_WAITING,
- msecs_to_jiffies(MAILBOX_TIMEOUT));
- if (!ret) {
- dev_notice(mailbox->gxp->dev, "%s: event wait timeout",
- __func__);
- gxp_mailbox_del_wait_resp(mailbox, resp);
- return -ETIMEDOUT;
- }
- if (resp->status != GXP_RESP_OK) {
- dev_notice(mailbox->gxp->dev, "%s: resp status=%u", __func__,
- resp->status);
- return -ENOMSG;
- }
-
- return resp->retval;
-}
-
-static void async_cmd_timeout_work(struct work_struct *work)
-{
- struct gxp_async_response *async_resp = container_of(
- work, struct gxp_async_response, timeout_work.work);
- unsigned long flags;
-
- /*
- * This function will acquire the mailbox wait_list_lock. This means if
- * response processing is in progress, it will complete before this
- * response can be removed from the wait list.
- *
- * Once this function has the wait_list_lock, no future response
- * processing will begin until this response has been removed.
- */
- gxp_mailbox_del_wait_resp(async_resp->mailbox, &async_resp->resp);
-
- /*
- * Check if this response still has a valid destination queue, in case
- * an in-progress call to `gxp_mailbox_handle_response()` completed
- * the response while `gxp_mailbox_del_wait_resp()` was waiting for
- * the wait_list_lock.
- */
- spin_lock_irqsave(async_resp->dest_queue_lock, flags);
- if (async_resp->dest_queue) {
- async_resp->resp.status = GXP_RESP_CANCELLED;
- list_add_tail(&async_resp->list_entry, async_resp->dest_queue);
- spin_unlock_irqrestore(async_resp->dest_queue_lock, flags);
-
- gxp_pm_update_requested_power_states(
- async_resp->mailbox->gxp, async_resp->gxp_power_state,
- async_resp->requested_low_clkmux, AUR_OFF, false,
- async_resp->memory_power_state, AUR_MEM_UNDEFINED);
-
- if (async_resp->eventfd) {
- gxp_eventfd_signal(async_resp->eventfd);
- gxp_eventfd_put(async_resp->eventfd);
- }
-
- wake_up(async_resp->dest_queue_waitq);
- } else {
- spin_unlock_irqrestore(async_resp->dest_queue_lock, flags);
- }
-}
-
-int gxp_mailbox_execute_cmd_async(struct gxp_mailbox *mailbox,
- struct gxp_command *cmd,
- struct list_head *resp_queue,
- spinlock_t *queue_lock,
- wait_queue_head_t *queue_waitq,
- uint gxp_power_state, uint memory_power_state,
- bool requested_low_clkmux,
- struct gxp_eventfd *eventfd)
-{
- struct gxp_async_response *async_resp;
- int ret;
-
- async_resp = kzalloc(sizeof(*async_resp), GFP_KERNEL);
- if (!async_resp)
- return -ENOMEM;
-
- async_resp->mailbox = mailbox;
- async_resp->dest_queue = resp_queue;
- async_resp->dest_queue_lock = queue_lock;
- async_resp->dest_queue_waitq = queue_waitq;
- async_resp->gxp_power_state = gxp_power_state;
- async_resp->memory_power_state = memory_power_state;
- async_resp->requested_low_clkmux = requested_low_clkmux;
- if (eventfd && gxp_eventfd_get(eventfd))
- async_resp->eventfd = eventfd;
- else
- async_resp->eventfd = NULL;
-
- INIT_DELAYED_WORK(&async_resp->timeout_work, async_cmd_timeout_work);
- schedule_delayed_work(&async_resp->timeout_work,
- msecs_to_jiffies(MAILBOX_TIMEOUT));
-
- gxp_pm_update_requested_power_states(
- mailbox->gxp, AUR_OFF, false, gxp_power_state,
- requested_low_clkmux, AUR_MEM_UNDEFINED, memory_power_state);
- ret = gxp_mailbox_enqueue_cmd(mailbox, cmd, &async_resp->resp,
- /* resp_is_async = */ true);
- if (ret)
- goto err_free_resp;
-
- return 0;
-
-err_free_resp:
- gxp_pm_update_requested_power_states(mailbox->gxp, gxp_power_state,
- requested_low_clkmux, AUR_OFF, false,
- memory_power_state,
- AUR_MEM_UNDEFINED);
- cancel_delayed_work_sync(&async_resp->timeout_work);
- kfree(async_resp);
- return ret;
-}
-
int gxp_mailbox_register_interrupt_handler(struct gxp_mailbox *mailbox,
u32 int_bit,
struct work_struct *handler)
@@ -908,7 +463,7 @@ int gxp_mailbox_register_interrupt_handler(struct gxp_mailbox *mailbox,
}
int gxp_mailbox_unregister_interrupt_handler(struct gxp_mailbox *mailbox,
- u32 int_bit)
+ u32 int_bit)
{
/* Bit 0 is reserved for incoming mailbox responses */
if (int_bit == 0 || int_bit >= GXP_MAILBOX_INT_BIT_COUNT)
@@ -918,3 +473,31 @@ int gxp_mailbox_unregister_interrupt_handler(struct gxp_mailbox *mailbox,
return 0;
}
+
+#if !GXP_USE_LEGACY_MAILBOX
+int gxp_mailbox_send_cmd(struct gxp_mailbox *mailbox, void *cmd, void *resp)
+{
+ switch (mailbox->type) {
+ case GXP_MBOX_TYPE_GENERAL:
+ return gcip_mailbox_send_cmd(mailbox->mbx_impl.gcip_mbx, cmd,
+ resp);
+ case GXP_MBOX_TYPE_KCI:
+ return gcip_kci_send_cmd(mailbox->mbx_impl.gcip_kci, cmd);
+ }
+ return -EOPNOTSUPP;
+}
+
+struct gcip_mailbox_resp_awaiter *
+gxp_mailbox_put_cmd(struct gxp_mailbox *mailbox, void *cmd, void *resp,
+ void *data)
+{
+ switch (mailbox->type) {
+ case GXP_MBOX_TYPE_GENERAL:
+ return gcip_mailbox_put_cmd(mailbox->mbx_impl.gcip_mbx, cmd,
+ resp, data);
+ default:
+ break;
+ }
+ return ERR_PTR(-EOPNOTSUPP);
+}
+#endif /* !GXP_USE_LEGACY_MAILBOX */
diff --git a/gxp-mailbox.h b/gxp-mailbox.h
index 4bea5d7..cf72fbe 100644
--- a/gxp-mailbox.h
+++ b/gxp-mailbox.h
@@ -2,7 +2,7 @@
/*
* GXP mailbox interface.
*
- * Copyright (C) 2020 Google LLC
+ * Copyright (C) 2020-2022 Google LLC
*/
#ifndef __GXP_MAILBOX_H__
#define __GXP_MAILBOX_H__
@@ -10,7 +10,45 @@
#include <linux/kthread.h>
#include "gxp-client.h"
+#include "gxp-config.h" /* GXP_USE_LEGACY_MAILBOX */
+#include "gxp-dma.h"
#include "gxp-internal.h"
+#include "gxp-mailbox-manager.h"
+
+#if !GXP_USE_LEGACY_MAILBOX
+#include <gcip/gcip-kci.h>
+#include <gcip/gcip-mailbox.h>
+#endif
+
+/*
+ * Offset from the host mailbox interface to the device interface that needs to
+ * be mapped.
+ */
+#if defined(CONFIG_GXP_IP_ZEBU) || defined(CONFIG_GXP_GEM5)
+#define MAILBOX_DEVICE_INTERFACE_OFFSET 0x180000
+#else
+#define MAILBOX_DEVICE_INTERFACE_OFFSET 0x10000
+#endif
+
+#define __wait_event_lock_irq_timeout_exclusive(wq_head, condition, lock, \
+ timeout, state) \
+ ___wait_event(wq_head, ___wait_cond_timeout(condition), state, 1, \
+ timeout, spin_unlock_irq(&lock); \
+ __ret = schedule_timeout(__ret); spin_lock_irq(&lock))
+
+/*
+ * wait_event_interruptible_lock_irq_timeout() but set the exclusive flag.
+ */
+#define wait_event_interruptible_lock_irq_timeout_exclusive( \
+ wq_head, condition, lock, timeout) \
+ ({ \
+ long __ret = timeout; \
+ if (!___wait_cond_timeout(condition)) \
+ __ret = __wait_event_lock_irq_timeout_exclusive( \
+ wq_head, condition, lock, timeout, \
+ TASK_INTERRUPTIBLE); \
+ __ret; \
+ })
/* Command/Response Structures */
@@ -21,86 +59,19 @@ enum gxp_mailbox_command_code {
GXP_MBOX_CODE_SUSPEND_REQUEST = 1,
};
-/* Basic Buffer descriptor struct for message payloads. */
-struct buffer_descriptor {
- /* Address in the device's virtual address space. */
- u64 address;
- /* Size in bytes. */
- u32 size;
- /* Flags can be used to indicate message type, etc. */
- u32 flags;
-};
-
-/* Format used for mailbox command queues. */
-struct gxp_command {
- /* Sequence number. Should match the corresponding response. */
- u64 seq;
+enum gxp_mailbox_type {
/*
- * Identifies the type of command.
- * Should be a value from `gxp_mailbox_command_code`
+ * Mailbox will utilize `gcip-mailbox.h` internally.
+ * (Note: On `GXP_USE_LEGACY_MAILBOX`, it utilizes `gxp-mailbox-impl.h`
+ * instead.)
+ * Mostly will be used for handling user commands.
*/
- u16 code;
+ GXP_MBOX_TYPE_GENERAL = 0,
/*
- * Priority level from 0 to 99, with 0 being the highest. Pending
- * commands with higher priorities will be executed before lower
- * priority ones.
+ * Mailbox will utilize `gcip-kci.h` internally.
+ * Will be used for handling kernel commands.
*/
- u8 priority;
- /*
- * Insert spaces to make padding explicit. This does not affect
- * alignment.
- */
- u8 reserved[5];
- /* Struct describing the buffer containing the message payload */
- struct buffer_descriptor buffer_descriptor;
-};
-
-/* Format used for mailbox response queues from kernel. */
-struct gxp_response {
- /* Sequence number. Should match the corresponding command. */
- u64 seq;
- /* The status code. Either SUCCESS or an error. */
- u16 status;
- /* Padding. */
- u16 reserved;
- /* Return value, dependent on the command this responds to. */
- u32 retval;
-};
-
-/*
- * Wrapper struct for responses consumed by a thread other than the one which
- * sent the command.
- */
-struct gxp_async_response {
- struct list_head list_entry;
- struct gxp_response resp;
- struct delayed_work timeout_work;
- /*
- * If this response times out, this pointer to the owning mailbox is
- * needed to delete this response from the list of pending responses.
- */
- struct gxp_mailbox *mailbox;
- /* Queue to add the response to once it is complete or timed out */
- struct list_head *dest_queue;
- /*
- * The lock that protects queue pointed to by `dest_queue`.
- * The mailbox code also uses this lock to protect changes to the
- * `dest_queue` pointer itself when processing this response.
- */
- spinlock_t *dest_queue_lock;
- /* Queue of clients to notify when this response is processed */
- wait_queue_head_t *dest_queue_waitq;
- /* Specified power state vote during the command execution */
- uint gxp_power_state;
- /* Specified memory power state vote during the command execution */
- uint memory_power_state;
- /*
- * Specified whether the power state vote is requested with low
- * frequency CLKMUX flag.
- */
- bool requested_low_clkmux;
- /* gxp_eventfd to signal when the response completes. May be NULL */
- struct gxp_eventfd *eventfd;
+ GXP_MBOX_TYPE_KCI = 1,
};
enum gxp_response_status {
@@ -109,12 +80,6 @@ enum gxp_response_status {
GXP_RESP_CANCELLED = 2,
};
-struct gxp_mailbox_wait_list {
- struct list_head list;
- struct gxp_response *resp;
- bool is_async;
-};
-
/* Mailbox Structures */
struct gxp_mailbox_descriptor {
u64 cmd_queue_device_addr;
@@ -123,6 +88,72 @@ struct gxp_mailbox_descriptor {
u32 resp_queue_size;
};
+struct gxp_mailbox;
+
+/*
+ * Defines the callback functions which are used by the mailbox.
+ */
+struct gxp_mailbox_ops {
+ /*
+ * Allocates resources such as cmd_queue and resp_queue which are used by the mailbox.
+ * This callback will be called by the `gxp_mailbox_alloc` internally.
+ * Following variables should be set in this callback.
+ * - @mailbox->cmd_queue : the pointer of the command queue.
+ * - @mailbox->cmd_queue_size : the size of @mailbox->cmd_queue. (the maximum number of
+ * command elements.)
+ * - @mailbox->cmd_queue_tail : the initial value of the tail of command queue.
+ * - @mailbox->resp_queue : the pointer of the response queue.
+ * - @mailbox->resp_queue_size : the size of @mailbox->resp_queue. (the maximum number of
+ * response elements.)
+ * - @mailbox->resp_queue_head : the initial value of the head of response queue.
+ * - @mailbox->descriptor : the pointer of the `strunct gxp_mailbox_descriptor`
+ * instance.
+ * - @mailbox
+ * ->descriptor_device_addr : the device address of @mailbox->descriptor.
+ * - @mailbox->descriptor
+ * ->cmd_queue_device_addr : the device address of @mailbox->cmd_queue.
+ * - @mailbox->descriptor
+ * ->resp_queue_device_addr : the device address of @mailbox->resp_queue.
+ * - @mailbox->descriptor
+ * ->cmd_queue_size : the size of @mailbox->cmd_queue.
+ * - @mailbox->descriptor
+ * ->resp_queue_size : the size of @mailbox->resp_queue.
+ * Context: normal.
+ */
+ int (*allocate_resources)(struct gxp_mailbox *mailbox,
+ struct gxp_virtual_device *vd,
+ uint virt_core);
+ /*
+ * Releases resources which are allocated by `allocate_resources`.
+ * This callback will be called by the `gxp_mailbox_release` internally.
+ * Context: normal.
+ */
+ void (*release_resources)(struct gxp_mailbox *mailbox,
+ struct gxp_virtual_device *vd,
+ uint virt_core);
+#if !GXP_USE_LEGACY_MAILBOX
+ /*
+ * Operators which has dependency on the GCIP according to the type of mailbox.
+ * - GXP_MBOX_TYPE_GENERAL: @gcip_ops.mbx must be defined.
+ * - GXP_MBOX_TYPE_KCI: @gcip_ops.kci must be defined.
+ */
+ union {
+ const struct gcip_mailbox_ops *mbx;
+ const struct gcip_kci_ops *kci;
+ } gcip_ops;
+#endif
+};
+
+struct gxp_mailbox_args {
+ enum gxp_mailbox_type type;
+ struct gxp_mailbox_ops *ops;
+ u64 queue_wrap_bit;
+ u32 cmd_elem_size;
+ u32 resp_elem_size;
+ bool ignore_seq_order;
+ void *data;
+};
+
#define GXP_MAILBOX_INT_BIT_COUNT 16
struct gxp_mailbox {
@@ -140,42 +171,53 @@ struct gxp_mailbox {
/* Protects to_host_poll_task while it holds a sync barrier */
struct mutex polling_lock;
- u64 cur_seq;
+ u64 queue_wrap_bit; /* warp bit for both cmd and resp queue */
+ u32 cmd_elem_size; /* size of element of cmd queue */
+ struct gxp_coherent_buf descriptor_buf;
struct gxp_mailbox_descriptor *descriptor;
- dma_addr_t descriptor_device_addr;
- struct gxp_command *cmd_queue;
+ struct gxp_coherent_buf cmd_queue_buf;
u32 cmd_queue_size; /* size of cmd queue */
u32 cmd_queue_tail; /* offset within the cmd queue */
- dma_addr_t cmd_queue_device_addr; /* device address for cmd queue */
struct mutex cmd_queue_lock; /* protects cmd_queue */
- struct gxp_response *resp_queue;
+ u32 resp_elem_size; /* size of element of resp queue */
+ struct gxp_coherent_buf resp_queue_buf;
u32 resp_queue_size; /* size of resp queue */
u32 resp_queue_head; /* offset within the resp queue */
- dma_addr_t resp_queue_device_addr; /* device address for resp queue */
struct mutex resp_queue_lock; /* protects resp_queue */
- /* add to this list if a command needs to wait for a response */
- struct list_head wait_list;
+ /* commands which need to wait for responses will be added to the wait_list */
struct mutex wait_list_lock; /* protects wait_list */
- /* queue for waiting for the wait_list to be consumed */
- wait_queue_head_t wait_list_waitq;
/* to create our own realtime worker for handling responses */
struct kthread_worker response_worker;
struct task_struct *response_thread;
struct kthread_work response_work;
-};
-typedef void __iomem *(*get_mailbox_base_t)(struct gxp_dev *gxp, uint index);
+ enum gxp_mailbox_type type;
+ struct gxp_mailbox_ops *ops;
+ void *data; /* private data */
-struct gxp_mailbox_manager {
- struct gxp_dev *gxp;
- u8 num_cores;
- struct gxp_mailbox **mailboxes;
- get_mailbox_base_t get_mailbox_csr_base;
- get_mailbox_base_t get_mailbox_data_base;
+ bool ignore_seq_order; /* allow out-of-order responses if true (always false in KCI) */
+
+#if GXP_USE_LEGACY_MAILBOX
+ u64 cur_seq;
+ /* add to this list if a command needs to wait for a response */
+ struct list_head wait_list;
+ /* queue for waiting for the wait_list to be consumed */
+ wait_queue_head_t wait_list_waitq;
+#else /* !GXP_USE_LEGACY_MAILBOX */
+ /*
+ * Implementation of the mailbox according to the type.
+ * - GXP_MBOX_TYPE_GENERAL: @gcip_mbx will be allocated.
+ * - GXP_MBOX_TYPE_KCI: @gcip_kci will be allocated.
+ */
+ union {
+ struct gcip_mailbox *gcip_mbx;
+ struct gcip_kci *gcip_kci;
+ } mbx_impl;
+#endif /* GXP_USE_LEGACY_MAILBOX */
};
/* Mailbox APIs */
@@ -183,40 +225,51 @@ struct gxp_mailbox_manager {
extern int gxp_mbx_timeout;
#define MAILBOX_TIMEOUT (gxp_mbx_timeout * GXP_TIME_DELAY_FACTOR)
-struct gxp_mailbox_manager *gxp_mailbox_create_manager(struct gxp_dev *gxp,
- uint num_cores);
-
/*
- * The following functions all require their caller have locked
- * gxp->vd_semaphore for reading.
+ * The following functions are low-level interfaces of the mailbox. The actual work of it will be
+ * implemented from the high-level interfaces such as DCI, UCI and KCI via the callbacks defined
+ * above. Therefore, you may not call these functions directly.
+ * (Except `gxp_mailbox_{register,unregister}_interrupt_handler` functions.)
+ *
+ * If the mailbox interacts with virtual cores according to the implementation, the caller must
+ * have locked gxp->vd_semaphore for reading.
*/
struct gxp_mailbox *gxp_mailbox_alloc(struct gxp_mailbox_manager *mgr,
struct gxp_virtual_device *vd,
- uint virt_core, u8 core_id);
+ uint virt_core, u8 core_id,
+ const struct gxp_mailbox_args *args);
+
void gxp_mailbox_release(struct gxp_mailbox_manager *mgr,
struct gxp_virtual_device *vd, uint virt_core,
struct gxp_mailbox *mailbox);
void gxp_mailbox_reset(struct gxp_mailbox *mailbox);
-int gxp_mailbox_execute_cmd(struct gxp_mailbox *mailbox,
- struct gxp_command *cmd, struct gxp_response *resp);
-
-int gxp_mailbox_execute_cmd_async(struct gxp_mailbox *mailbox,
- struct gxp_command *cmd,
- struct list_head *resp_queue,
- spinlock_t *queue_lock,
- wait_queue_head_t *queue_waitq,
- uint gxp_power_state, uint memory_power_state,
- bool requested_low_clkmux,
- struct gxp_eventfd *eventfd);
-
int gxp_mailbox_register_interrupt_handler(struct gxp_mailbox *mailbox,
u32 int_bit,
struct work_struct *handler);
int gxp_mailbox_unregister_interrupt_handler(struct gxp_mailbox *mailbox,
- u32 int_bit);
+ u32 int_bit);
+
+#if !GXP_USE_LEGACY_MAILBOX
+/*
+ * Executes command synchronously. If @resp is not NULL, the response will be returned to it.
+ * See the `gcip_mailbox_send_cmd` of `gcip-mailbox.h` or `gcip_kci_send_cmd` of `gcip-kci.h`
+ * for detail.
+ */
+int gxp_mailbox_send_cmd(struct gxp_mailbox *mailbox, void *cmd, void *resp);
+
+/*
+ * Executes command asynchronously. The response will be written to @resp.
+ * See the `gcip_mailbox_put_cmd` function of `gcip-mailbox.h` for detail.
+ *
+ * Note: KCI doesn't support asynchronous requests.
+ */
+struct gcip_mailbox_resp_awaiter *
+gxp_mailbox_put_cmd(struct gxp_mailbox *mailbox, void *cmd, void *resp,
+ void *data);
+#endif /* !GXP_USE_LEGACY_MAILBOX */
#endif /* __GXP_MAILBOX_H__ */
diff --git a/gxp-mapping.c b/gxp-mapping.c
index 9a69173..0188fad 100644
--- a/gxp-mapping.c
+++ b/gxp-mapping.c
@@ -15,7 +15,6 @@
#include "gxp-dma.h"
#include "gxp-internal.h"
#include "gxp-mapping.h"
-#include "mm-backport.h"
/* Destructor for a mapping created with `gxp_mapping_create()` */
static void destroy_mapping(struct gxp_mapping *mapping)
@@ -35,9 +34,9 @@ static void destroy_mapping(struct gxp_mapping *mapping)
* user requires a mapping be synced before unmapping, they are
* responsible for calling `gxp_mapping_sync()` before hand.
*/
- gxp_dma_unmap_sg(mapping->gxp, mapping->vd, mapping->virt_core_list,
- mapping->sgt.sgl, mapping->sgt.orig_nents,
- mapping->dir, DMA_ATTR_SKIP_CPU_SYNC);
+ gxp_dma_unmap_sg(mapping->gxp, mapping->domain, mapping->sgt.sgl,
+ mapping->sgt.orig_nents, mapping->dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
/* Unpin the user pages */
for_each_sg_page(mapping->sgt.sgl, &sg_iter, mapping->sgt.orig_nents,
@@ -57,9 +56,8 @@ static void destroy_mapping(struct gxp_mapping *mapping)
}
struct gxp_mapping *gxp_mapping_create(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd,
- uint virt_core_list, u64 user_address,
- size_t size, u32 flags,
+ struct gxp_iommu_domain *domain,
+ u64 user_address, size_t size, u32 flags,
enum dma_data_direction dir)
{
struct gxp_mapping *mapping = NULL;
@@ -154,8 +152,7 @@ struct gxp_mapping *gxp_mapping_create(struct gxp_dev *gxp,
mapping->destructor = destroy_mapping;
mapping->host_address = user_address;
mapping->gxp = gxp;
- mapping->virt_core_list = virt_core_list;
- mapping->vd = vd;
+ mapping->domain = domain;
mapping->size = size;
mapping->gxp_dma_flags = flags;
mapping->dir = dir;
@@ -168,8 +165,8 @@ struct gxp_mapping *gxp_mapping_create(struct gxp_dev *gxp,
}
/* map the user pages */
- ret = gxp_dma_map_sg(gxp, mapping->vd, mapping->virt_core_list,
- mapping->sgt.sgl, mapping->sgt.nents, mapping->dir,
+ ret = gxp_dma_map_sg(gxp, mapping->domain, mapping->sgt.sgl,
+ mapping->sgt.nents, mapping->dir,
DMA_ATTR_SKIP_CPU_SYNC, mapping->gxp_dma_flags);
if (!ret) {
dev_err(gxp->dev, "Failed to map sgt (ret=%d)\n", ret);
diff --git a/gxp-mapping.h b/gxp-mapping.h
index dbb80d9..18454e6 100644
--- a/gxp-mapping.h
+++ b/gxp-mapping.h
@@ -27,8 +27,7 @@ struct gxp_mapping {
*/
u64 host_address;
struct gxp_dev *gxp;
- uint virt_core_list;
- struct gxp_virtual_device *vd;
+ struct gxp_iommu_domain *domain;
/*
* `device_address` and `size` are the base address and size of the
* user buffer a mapping represents.
@@ -58,8 +57,7 @@ struct gxp_mapping {
/**
* gxp_mapping_create() - Create a mapping for a user buffer
* @gxp: The GXP device to create the mapping for
- * @vd: The virtual device to create the mapping for
- * @virt_core_list: A bitfield indicating the cores in @vd to map the buffer to
+ * @domain: The iommu domain the mapping for
* @user_address: The user-space address of the buffer to map
* @size: The size of the buffer to be mapped
* @flags: Flags describing the type of mapping to create; currently unused
@@ -76,9 +74,8 @@ struct gxp_mapping {
* to map the buffer for the device.
*/
struct gxp_mapping *gxp_mapping_create(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd,
- uint virt_core_list, u64 user_address,
- size_t size, u32 flags,
+ struct gxp_iommu_domain *domain,
+ u64 user_address, size_t size, u32 flags,
enum dma_data_direction dir);
/**
diff --git a/gxp-mba-driver.c b/gxp-mba-driver.c
new file mode 100644
index 0000000..cb91092
--- /dev/null
+++ b/gxp-mba-driver.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * GXP mailbox array driver implementation.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#include <asm/barrier.h>
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/of_irq.h>
+#include <linux/spinlock.h>
+
+#include "gxp-config.h"
+#include "gxp-mailbox-driver.h"
+#include "gxp-mailbox.h"
+
+#include "gxp-mailbox-driver.c"
+
+/* gxp-mailbox-driver.h: CSR-based calls */
+
+static u32 csr_read(struct gxp_mailbox *mailbox, uint reg_offset)
+{
+ return readl(mailbox->csr_reg_base + reg_offset);
+}
+
+static void csr_write(struct gxp_mailbox *mailbox, uint reg_offset, u32 value)
+{
+ writel(value, mailbox->csr_reg_base + reg_offset);
+}
+
+void gxp_mailbox_reset_hw(struct gxp_mailbox *mailbox)
+{
+ //TODO(b/261670165): check if client flush is required.
+}
+
+void gxp_mailbox_generate_device_interrupt(struct gxp_mailbox *mailbox,
+ u32 int_mask)
+{
+ /*
+ * Ensure all memory writes have been committed to memory before
+ * signalling to the device to read from them. This avoids the scenario
+ * where the interrupt trigger write gets delivered to the MBX HW before
+ * the DRAM transactions made it to DRAM since they're Normal
+ * transactions and can be re-ordered and backed off behind other
+ * transfers.
+ */
+ wmb();
+
+ csr_write(mailbox, MBOX_CLIENT_IRQ_TRIG, 0x1);
+}
+
+u32 gxp_mailbox_get_device_mask_status(struct gxp_mailbox *mailbox)
+{
+ return csr_read(mailbox, MBOX_CLIENT_SHDW);
+}
+
+void gxp_mailbox_clear_host_interrupt(struct gxp_mailbox *mailbox, u32 int_mask)
+{
+ /* Write 1 to clear */
+ csr_write(mailbox, MBOX_CLIENT_IRQ_STATUS, 0x1);
+}
+
+void gxp_mailbox_mask_host_interrupt(struct gxp_mailbox *mailbox, u32 int_mask)
+{
+ csr_write(mailbox, MBOX_CLIENT_IRQ_CFG, MBOX_CLIENT_IRQ_MSG_INT);
+}
+
+u32 gxp_mailbox_get_host_mask_status(struct gxp_mailbox *mailbox)
+{
+ return csr_read(mailbox, MBOX_CLIENT_IRQ_CFG);
+}
diff --git a/gxp-notification.h b/gxp-notification.h
index a4e4fd3..6f43b70 100644
--- a/gxp-notification.h
+++ b/gxp-notification.h
@@ -14,7 +14,7 @@
enum gxp_notification_to_host_type {
HOST_NOTIF_MAILBOX_RESPONSE = 0,
HOST_NOTIF_DEBUG_DUMP_READY = 1,
- HOST_NOTIF_TELEMETRY_STATUS = 2,
+ HOST_NOTIF_CORE_TELEMETRY_STATUS = 2,
HOST_NOTIF_MAX
};
diff --git a/gxp-platform.c b/gxp-platform.c
index 5da0eb2..0f56b9c 100644
--- a/gxp-platform.c
+++ b/gxp-platform.c
@@ -5,2310 +5,35 @@
* Copyright (C) 2021 Google LLC
*/
-#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
-#include <linux/platform_data/sscoredump.h>
-#endif
-
-#include <linux/acpi.h>
-#include <linux/cred.h>
#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/file.h>
-#include <linux/fs.h>
-#include <linux/genalloc.h>
-#include <linux/kthread.h>
-#include <linux/log2.h>
-#include <linux/miscdevice.h>
-#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/sched.h>
-#include <linux/uaccess.h>
-#include <linux/uidgid.h>
-#if (IS_ENABLED(CONFIG_GXP_TEST) || IS_ENABLED(CONFIG_ANDROID)) && !IS_ENABLED(CONFIG_GXP_GEM5)
-#include <soc/google/tpu-ext.h>
-#endif
-#include "gxp-client.h"
-#include "gxp-config.h"
-#include "gxp-debug-dump.h"
-#include "gxp-debugfs.h"
-#include "gxp-dma.h"
-#include "gxp-dmabuf.h"
-#include "gxp-domain-pool.h"
-#include "gxp-firmware.h"
-#include "gxp-firmware-data.h"
#include "gxp-internal.h"
-#include "gxp-mailbox.h"
-#include "gxp-mailbox-driver.h"
-#include "gxp-mapping.h"
-#include "gxp-notification.h"
-#include "gxp-pm.h"
-#include "gxp-telemetry.h"
-#include "gxp-thermal.h"
-#include "gxp-vd.h"
-#include "gxp-wakelock.h"
-#include "gxp.h"
-
-static struct gxp_dev *gxp_debug_pointer;
-
-#define __wait_event_lock_irq_timeout_exclusive(wq_head, condition, lock, \
- timeout, state) \
- ___wait_event(wq_head, ___wait_cond_timeout(condition), state, 1, \
- timeout, spin_unlock_irq(&lock); \
- __ret = schedule_timeout(__ret); spin_lock_irq(&lock))
-
-/*
- * wait_event_interruptible_lock_irq_timeout() but set the exclusive flag.
- */
-#define wait_event_interruptible_lock_irq_timeout_exclusive( \
- wq_head, condition, lock, timeout) \
- ({ \
- long __ret = timeout; \
- if (!___wait_cond_timeout(condition)) \
- __ret = __wait_event_lock_irq_timeout_exclusive( \
- wq_head, condition, lock, timeout, \
- TASK_INTERRUPTIBLE); \
- __ret; \
- })
-
-/* Caller needs to hold client->semaphore */
-static bool check_client_has_available_vd(struct gxp_client *client,
- char *ioctl_name)
-{
- struct gxp_dev *gxp = client->gxp;
-
- lockdep_assert_held(&client->semaphore);
- if (!client->vd) {
- dev_err(gxp->dev,
- "%s requires the client allocate a VIRTUAL_DEVICE\n",
- ioctl_name);
- return false;
- }
- if (client->vd->state == GXP_VD_UNAVAILABLE) {
- dev_err(gxp->dev, "Cannot do %s on a broken virtual device\n",
- ioctl_name);
- return false;
- }
- return true;
-}
-
-/* Caller needs to hold client->semaphore for reading */
-static bool check_client_has_available_vd_wakelock(struct gxp_client *client,
- char *ioctl_name)
-{
- struct gxp_dev *gxp = client->gxp;
-
- lockdep_assert_held_read(&client->semaphore);
- if (!client->has_vd_wakelock) {
- dev_err(gxp->dev,
- "%s requires the client hold a VIRTUAL_DEVICE wakelock\n",
- ioctl_name);
- return false;
- }
- if (client->vd->state == GXP_VD_UNAVAILABLE) {
- dev_err(gxp->dev, "Cannot do %s on a broken virtual device\n",
- ioctl_name);
- return false;
- }
- return true;
-}
-
-#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
-static struct sscd_platform_data gxp_sscd_pdata;
-
-static void gxp_sscd_release(struct device *dev)
-{
- pr_debug("%s\n", __func__);
-}
-
-static struct platform_device gxp_sscd_dev = {
- .name = GXP_DRIVER_NAME,
- .driver_override = SSCD_NAME,
- .id = -1,
- .dev = {
- .platform_data = &gxp_sscd_pdata,
- .release = gxp_sscd_release,
- },
-};
-#endif // CONFIG_SUBSYSTEM_COREDUMP
-
-/* Mapping from GXP_POWER_STATE_* to enum aur_power_state in gxp-pm.h */
-static const uint aur_state_array[GXP_NUM_POWER_STATES] = {
- AUR_OFF, AUR_UUD, AUR_SUD, AUR_UD, AUR_NOM,
- AUR_READY, AUR_UUD_PLUS, AUR_SUD_PLUS, AUR_UD_PLUS
-};
-/* Mapping from MEMORY_POWER_STATE_* to enum aur_memory_power_state in gxp-pm.h */
-static const uint aur_memory_state_array[MEMORY_POWER_STATE_MAX + 1] = {
- AUR_MEM_UNDEFINED, AUR_MEM_MIN, AUR_MEM_VERY_LOW, AUR_MEM_LOW,
- AUR_MEM_HIGH, AUR_MEM_VERY_HIGH, AUR_MEM_MAX
-};
-
-static int gxp_open(struct inode *inode, struct file *file)
-{
- struct gxp_client *client;
- struct gxp_dev *gxp = container_of(file->private_data, struct gxp_dev,
- misc_dev);
- int ret = 0;
-
- /* If this is the first call to open(), request the firmware files */
- ret = gxp_firmware_request_if_needed(gxp);
- if (ret) {
- dev_err(gxp->dev,
- "Failed to request dsp firmware files (ret=%d)\n", ret);
- return ret;
- }
-
- client = gxp_client_create(gxp);
- if (IS_ERR(client))
- return PTR_ERR(client);
-
- client->tgid = current->tgid;
- client->pid = current->pid;
-
- file->private_data = client;
-
- mutex_lock(&gxp->client_list_lock);
- list_add(&client->list_entry, &gxp->client_list);
- mutex_unlock(&gxp->client_list_lock);
-
- return ret;
-}
-
-static int gxp_release(struct inode *inode, struct file *file)
-{
- struct gxp_client *client = file->private_data;
-
- /*
- * If open failed and no client was created then no clean-up is needed.
- */
- if (!client)
- return 0;
-
- if (client->enabled_telemetry_logging)
- gxp_telemetry_disable(client->gxp, GXP_TELEMETRY_TYPE_LOGGING);
- if (client->enabled_telemetry_tracing)
- gxp_telemetry_disable(client->gxp, GXP_TELEMETRY_TYPE_TRACING);
-
- mutex_lock(&client->gxp->client_list_lock);
- list_del(&client->list_entry);
- mutex_unlock(&client->gxp->client_list_lock);
-
- gxp_client_destroy(client);
-
- return 0;
-}
-
-static inline enum dma_data_direction mapping_flags_to_dma_dir(u32 flags)
-{
- switch (flags & 0x3) {
- case 0x0: /* 0b00 */
- return DMA_BIDIRECTIONAL;
- case 0x1: /* 0b01 */
- return DMA_TO_DEVICE;
- case 0x2: /* 0b10 */
- return DMA_FROM_DEVICE;
- }
-
- return DMA_NONE;
-}
-
-static int gxp_map_buffer(struct gxp_client *client,
- struct gxp_map_ioctl __user *argp)
-{
- struct gxp_dev *gxp = client->gxp;
- struct gxp_map_ioctl ibuf;
- struct gxp_mapping *map;
- int ret = 0;
-
- if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
- return -EFAULT;
-
- if (ibuf.size == 0 || ibuf.virtual_core_list == 0)
- return -EINVAL;
-
- if (ibuf.host_address % L1_CACHE_BYTES || ibuf.size % L1_CACHE_BYTES) {
- dev_err(gxp->dev,
- "Mapped buffers must be cache line aligned and padded.\n");
- return -EINVAL;
- }
-
- down_read(&client->semaphore);
-
- if (!check_client_has_available_vd(client, "GXP_MAP_BUFFER")) {
- ret = -ENODEV;
- goto out;
- }
-
- /* the list contains un-allocated core bits */
- if (ibuf.virtual_core_list & ~(BIT(client->vd->num_cores) - 1)) {
- ret = -EINVAL;
- goto out;
- }
-
- map = gxp_mapping_create(gxp, client->vd, ibuf.virtual_core_list,
- ibuf.host_address, ibuf.size,
- /*gxp_dma_flags=*/0,
- mapping_flags_to_dma_dir(ibuf.flags));
- if (IS_ERR(map)) {
- ret = PTR_ERR(map);
- dev_err(gxp->dev, "Failed to create mapping (ret=%d)\n", ret);
- goto out;
- }
-
- ret = gxp_vd_mapping_store(client->vd, map);
- if (ret) {
- dev_err(gxp->dev, "Failed to store mapping (ret=%d)\n", ret);
- goto error_destroy;
- }
-
- ibuf.device_address = map->device_address;
-
- if (copy_to_user(argp, &ibuf, sizeof(ibuf))) {
- ret = -EFAULT;
- goto error_remove;
- }
-
- /*
- * The virtual device acquired its own reference to the mapping when
- * it was stored in the VD's records. Release the reference from
- * creating the mapping since this function is done using it.
- */
- gxp_mapping_put(map);
-
-out:
- up_read(&client->semaphore);
-
- return ret;
-
-error_remove:
- gxp_vd_mapping_remove(client->vd, map);
-error_destroy:
- gxp_mapping_put(map);
- up_read(&client->semaphore);
- return ret;
-}
-
-static int gxp_unmap_buffer(struct gxp_client *client,
- struct gxp_map_ioctl __user *argp)
-{
- struct gxp_dev *gxp = client->gxp;
- struct gxp_map_ioctl ibuf;
- struct gxp_mapping *map;
- int ret = 0;
-
- if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
- return -EFAULT;
-
- down_read(&client->semaphore);
-
- if (!client->vd) {
- dev_err(gxp->dev,
- "GXP_UNMAP_BUFFER requires the client allocate a VIRTUAL_DEVICE\n");
- ret = -ENODEV;
- goto out;
- }
-
- map = gxp_vd_mapping_search(client->vd,
- (dma_addr_t)ibuf.device_address);
- if (!map) {
- dev_err(gxp->dev,
- "Mapping not found for provided device address %#llX\n",
- ibuf.device_address);
- ret = -EINVAL;
- goto out;
- } else if (!map->host_address) {
- dev_err(gxp->dev, "dma-bufs must be unmapped via GXP_UNMAP_DMABUF\n");
- ret = -EINVAL;
- goto out;
- }
-
- WARN_ON(map->host_address != ibuf.host_address);
-
- gxp_vd_mapping_remove(client->vd, map);
-
- /* Release the reference from gxp_vd_mapping_search() */
- gxp_mapping_put(map);
-
-out:
- up_read(&client->semaphore);
-
- return ret;
-}
-
-static int gxp_sync_buffer(struct gxp_client *client,
- struct gxp_sync_ioctl __user *argp)
-{
- struct gxp_dev *gxp = client->gxp;
- struct gxp_sync_ioctl ibuf;
- struct gxp_mapping *map;
- int ret;
-
- if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
- return -EFAULT;
-
- down_read(&client->semaphore);
-
- if (!client->vd) {
- dev_err(gxp->dev,
- "GXP_SYNC_BUFFER requires the client allocate a VIRTUAL_DEVICE\n");
- ret = -ENODEV;
- goto out;
- }
-
- map = gxp_vd_mapping_search(client->vd,
- (dma_addr_t)ibuf.device_address);
- if (!map) {
- dev_err(gxp->dev,
- "Mapping not found for provided device address %#llX\n",
- ibuf.device_address);
- ret = -EINVAL;
- goto out;
- }
-
- ret = gxp_mapping_sync(map, ibuf.offset, ibuf.size,
- ibuf.flags == GXP_SYNC_FOR_CPU);
-
- /* Release the reference from gxp_vd_mapping_search() */
- gxp_mapping_put(map);
-
-out:
- up_read(&client->semaphore);
-
- return ret;
-}
-
-static int
-gxp_mailbox_command_compat(struct gxp_client *client,
- struct gxp_mailbox_command_compat_ioctl __user *argp)
-{
- struct gxp_dev *gxp = client->gxp;
- struct gxp_mailbox_command_compat_ioctl ibuf;
- struct gxp_command cmd;
- struct buffer_descriptor buffer;
- int virt_core, phys_core;
- int ret = 0;
- uint gxp_power_state, memory_power_state;
-
- if (copy_from_user(&ibuf, argp, sizeof(ibuf))) {
- dev_err(gxp->dev,
- "Unable to copy ioctl data from user-space\n");
- return -EFAULT;
- }
-
- /* Caller must hold VIRTUAL_DEVICE wakelock */
- down_read(&client->semaphore);
-
- if (!check_client_has_available_vd_wakelock(client,
- "GXP_MAILBOX_COMMAND")) {
- ret = -ENODEV;
- goto out_unlock_client_semaphore;
- }
-
- down_read(&gxp->vd_semaphore);
-
- virt_core = ibuf.virtual_core_id;
- phys_core = gxp_vd_virt_core_to_phys_core(client->vd, virt_core);
- if (phys_core < 0) {
- dev_err(gxp->dev,
- "Mailbox command failed: Invalid virtual core id (%u)\n",
- virt_core);
- ret = -EINVAL;
- goto out;
- }
-
- if (!gxp_is_fw_running(gxp, phys_core)) {
- dev_err(gxp->dev,
- "Cannot process mailbox command for core %d when firmware isn't running\n",
- phys_core);
- ret = -EINVAL;
- goto out;
- }
-
- if (gxp->mailbox_mgr == NULL || gxp->mailbox_mgr->mailboxes == NULL ||
- gxp->mailbox_mgr->mailboxes[phys_core] == NULL) {
- dev_err(gxp->dev, "Mailbox not initialized for core %d\n",
- phys_core);
- ret = -EIO;
- goto out;
- }
-
- /* Pack the command structure */
- buffer.address = ibuf.device_address;
- buffer.size = ibuf.size;
- buffer.flags = ibuf.flags;
- /* cmd.seq is assigned by mailbox implementation */
- cmd.code = GXP_MBOX_CODE_DISPATCH; /* All IOCTL commands are dispatch */
- cmd.priority = 0; /* currently unused */
- cmd.buffer_descriptor = buffer;
- gxp_power_state = AUR_OFF;
- memory_power_state = AUR_MEM_UNDEFINED;
-
- ret = gxp_mailbox_execute_cmd_async(
- gxp->mailbox_mgr->mailboxes[phys_core], &cmd,
- &client->vd->mailbox_resp_queues[virt_core].queue,
- &client->vd->mailbox_resp_queues[virt_core].lock,
- &client->vd->mailbox_resp_queues[virt_core].waitq,
- gxp_power_state, memory_power_state, false,
- client->mb_eventfds[virt_core]);
- if (ret) {
- dev_err(gxp->dev, "Failed to enqueue mailbox command (ret=%d)\n",
- ret);
- goto out;
- }
-
- ibuf.sequence_number = cmd.seq;
- if (copy_to_user(argp, &ibuf, sizeof(ibuf))) {
- dev_err(gxp->dev, "Failed to copy back sequence number!\n");
- ret = -EFAULT;
- goto out;
- }
-
-out:
- up_read(&gxp->vd_semaphore);
-out_unlock_client_semaphore:
- up_read(&client->semaphore);
-
- return ret;
-}
-
-static int gxp_mailbox_command(struct gxp_client *client,
- struct gxp_mailbox_command_ioctl __user *argp)
-{
- struct gxp_dev *gxp = client->gxp;
- struct gxp_mailbox_command_ioctl ibuf;
- struct gxp_command cmd;
- struct buffer_descriptor buffer;
- int virt_core, phys_core;
- int ret = 0;
- uint gxp_power_state, memory_power_state;
- bool requested_low_clkmux = false;
-
- if (copy_from_user(&ibuf, argp, sizeof(ibuf))) {
- dev_err(gxp->dev,
- "Unable to copy ioctl data from user-space\n");
- return -EFAULT;
- }
- if (ibuf.gxp_power_state == GXP_POWER_STATE_OFF) {
- dev_err(gxp->dev,
- "GXP_POWER_STATE_OFF is not a valid value when executing a mailbox command\n");
- return -EINVAL;
- }
- if (ibuf.gxp_power_state < GXP_POWER_STATE_OFF ||
- ibuf.gxp_power_state >= GXP_NUM_POWER_STATES) {
- dev_err(gxp->dev, "Requested power state is invalid\n");
- return -EINVAL;
- }
- if (ibuf.memory_power_state < MEMORY_POWER_STATE_UNDEFINED ||
- ibuf.memory_power_state > MEMORY_POWER_STATE_MAX) {
- dev_err(gxp->dev, "Requested memory power state is invalid\n");
- return -EINVAL;
- }
-
- if (ibuf.gxp_power_state == GXP_POWER_STATE_READY) {
- dev_warn_once(
- gxp->dev,
- "GXP_POWER_STATE_READY is deprecated, please set GXP_POWER_LOW_FREQ_CLKMUX with GXP_POWER_STATE_UUD state");
- ibuf.gxp_power_state = GXP_POWER_STATE_UUD;
- }
-
- if(ibuf.power_flags & GXP_POWER_NON_AGGRESSOR)
- dev_warn_once(
- gxp->dev,
- "GXP_POWER_NON_AGGRESSOR is deprecated, no operation here");
-
- /* Caller must hold VIRTUAL_DEVICE wakelock */
- down_read(&client->semaphore);
-
- if (!check_client_has_available_vd_wakelock(client,
- "GXP_MAILBOX_COMMAND")) {
- ret = -ENODEV;
- goto out_unlock_client_semaphore;
- }
-
- down_read(&gxp->vd_semaphore);
-
- virt_core = ibuf.virtual_core_id;
- phys_core = gxp_vd_virt_core_to_phys_core(client->vd, virt_core);
- if (phys_core < 0) {
- dev_err(gxp->dev,
- "Mailbox command failed: Invalid virtual core id (%u)\n",
- virt_core);
- ret = -EINVAL;
- goto out;
- }
-
- if (!gxp_is_fw_running(gxp, phys_core)) {
- dev_err(gxp->dev,
- "Cannot process mailbox command for core %d when firmware isn't running\n",
- phys_core);
- ret = -EINVAL;
- goto out;
- }
-
- if (gxp->mailbox_mgr == NULL || gxp->mailbox_mgr->mailboxes == NULL ||
- gxp->mailbox_mgr->mailboxes[phys_core] == NULL) {
- dev_err(gxp->dev, "Mailbox not initialized for core %d\n",
- phys_core);
- ret = -EIO;
- goto out;
- }
-
- /* Pack the command structure */
- buffer.address = ibuf.device_address;
- buffer.size = ibuf.size;
- buffer.flags = ibuf.flags;
- /* cmd.seq is assigned by mailbox implementation */
- cmd.code = GXP_MBOX_CODE_DISPATCH; /* All IOCTL commands are dispatch */
- cmd.priority = 0; /* currently unused */
- cmd.buffer_descriptor = buffer;
- gxp_power_state = aur_state_array[ibuf.gxp_power_state];
- memory_power_state = aur_memory_state_array[ibuf.memory_power_state];
- requested_low_clkmux = (ibuf.power_flags & GXP_POWER_LOW_FREQ_CLKMUX) != 0;
-
- ret = gxp_mailbox_execute_cmd_async(
- gxp->mailbox_mgr->mailboxes[phys_core], &cmd,
- &client->vd->mailbox_resp_queues[virt_core].queue,
- &client->vd->mailbox_resp_queues[virt_core].lock,
- &client->vd->mailbox_resp_queues[virt_core].waitq,
- gxp_power_state, memory_power_state, requested_low_clkmux,
- client->mb_eventfds[virt_core]);
- if (ret) {
- dev_err(gxp->dev, "Failed to enqueue mailbox command (ret=%d)\n",
- ret);
- goto out;
- }
-
- ibuf.sequence_number = cmd.seq;
- if (copy_to_user(argp, &ibuf, sizeof(ibuf))) {
- dev_err(gxp->dev, "Failed to copy back sequence number!\n");
- ret = -EFAULT;
- goto out;
- }
-
-out:
- up_read(&gxp->vd_semaphore);
-out_unlock_client_semaphore:
- up_read(&client->semaphore);
-
- return ret;
-}
-
-static int gxp_mailbox_response(struct gxp_client *client,
- struct gxp_mailbox_response_ioctl __user *argp)
-{
- struct gxp_dev *gxp = client->gxp;
- struct gxp_mailbox_response_ioctl ibuf;
- struct gxp_async_response *resp_ptr;
- int virt_core;
- int ret = 0;
- long timeout;
-
- if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
- return -EFAULT;
-
- /* Caller must hold VIRTUAL_DEVICE wakelock */
- down_read(&client->semaphore);
-
- if (!check_client_has_available_vd_wakelock(client,
- "GXP_MAILBOX_RESPONSE")) {
- ret = -ENODEV;
- goto out;
- }
-
- virt_core = ibuf.virtual_core_id;
- if (virt_core >= client->vd->num_cores) {
- dev_err(gxp->dev, "Mailbox response failed: Invalid virtual core id (%u)\n",
- virt_core);
- ret = -EINVAL;
- goto out;
- }
-
- spin_lock_irq(&client->vd->mailbox_resp_queues[virt_core].lock);
-
- /*
- * The "exclusive" version of wait_event is used since each wake
- * corresponds to the addition of exactly one new response to be
- * consumed. Therefore, only one waiting response ioctl can ever
- * proceed per wake event.
- */
- timeout = wait_event_interruptible_lock_irq_timeout_exclusive(
- client->vd->mailbox_resp_queues[virt_core].waitq,
- !list_empty(&client->vd->mailbox_resp_queues[virt_core].queue),
- client->vd->mailbox_resp_queues[virt_core].lock,
- msecs_to_jiffies(MAILBOX_TIMEOUT));
- if (timeout <= 0) {
- spin_unlock_irq(
- &client->vd->mailbox_resp_queues[virt_core].lock);
- /* unusual case - this only happens when there is no command pushed */
- ret = timeout ? -ETIMEDOUT : timeout;
- goto out;
- }
- resp_ptr = list_first_entry(
- &client->vd->mailbox_resp_queues[virt_core].queue,
- struct gxp_async_response, list_entry);
-
- /* Pop the front of the response list */
- list_del(&(resp_ptr->list_entry));
-
- spin_unlock_irq(&client->vd->mailbox_resp_queues[virt_core].lock);
-
- ibuf.sequence_number = resp_ptr->resp.seq;
- switch (resp_ptr->resp.status) {
- case GXP_RESP_OK:
- ibuf.error_code = GXP_RESPONSE_ERROR_NONE;
- /* retval is only valid if status == GXP_RESP_OK */
- ibuf.cmd_retval = resp_ptr->resp.retval;
- break;
- case GXP_RESP_CANCELLED:
- ibuf.error_code = GXP_RESPONSE_ERROR_TIMEOUT;
- break;
- default:
- /* No other status values are valid at this point */
- WARN(true, "Completed response had invalid status %hu",
- resp_ptr->resp.status);
- ibuf.error_code = GXP_RESPONSE_ERROR_INTERNAL;
- break;
- }
-
- /*
- * We must be absolutely sure the timeout work has been cancelled
- * and/or completed before freeing the `gxp_async_response`.
- * There are 3 possible cases when we arrive at this point:
- * 1) The response arrived normally and the timeout was cancelled
- * 2) The response timedout and its timeout handler finished
- * 3) The response handler and timeout handler raced, and the response
- * handler "cancelled" the timeout handler while it was already in
- * progress.
- *
- * This call handles case #3, and ensures any in-process timeout
- * handler (which may reference the `gxp_async_response`) has
- * been able to exit cleanly.
- */
- cancel_delayed_work_sync(&resp_ptr->timeout_work);
- kfree(resp_ptr);
-
- if (copy_to_user(argp, &ibuf, sizeof(ibuf)))
- ret = -EFAULT;
-
-out:
- up_read(&client->semaphore);
-
- return ret;
-}
-
-static int gxp_get_specs(struct gxp_client *client,
- struct gxp_specs_ioctl __user *argp)
-{
- struct gxp_specs_ioctl ibuf = {
- .core_count = GXP_NUM_CORES,
- .memory_per_core = client->gxp->memory_per_core,
- };
-
- if (copy_to_user(argp, &ibuf, sizeof(ibuf)))
- return -EFAULT;
-
- return 0;
-}
-
-static int gxp_allocate_vd(struct gxp_client *client,
- struct gxp_virtual_device_ioctl __user *argp)
-{
- struct gxp_dev *gxp = client->gxp;
- struct gxp_virtual_device_ioctl ibuf;
- struct gxp_virtual_device *vd;
- int ret = 0;
-
- if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
- return -EFAULT;
-
- if (ibuf.core_count == 0 || ibuf.core_count > GXP_NUM_CORES) {
- dev_err(gxp->dev, "Invalid core count (%u)\n", ibuf.core_count);
- return -EINVAL;
- }
-
- if (ibuf.memory_per_core > gxp->memory_per_core) {
- dev_err(gxp->dev, "Invalid memory-per-core (%u)\n",
- ibuf.memory_per_core);
- return -EINVAL;
- }
-
- down_write(&client->semaphore);
-
- if (client->vd) {
- dev_err(gxp->dev, "Virtual device was already allocated for client\n");
- ret = -EINVAL;
- goto out;
- }
-
- vd = gxp_vd_allocate(gxp, ibuf.core_count);
- if (IS_ERR(vd)) {
- ret = PTR_ERR(vd);
- dev_err(gxp->dev,
- "Failed to allocate virtual device for client (%d)\n",
- ret);
- goto out;
- }
-
- client->vd = vd;
-
-out:
- up_write(&client->semaphore);
-
- return ret;
-}
-
-static int
-gxp_etm_trace_start_command(struct gxp_client *client,
- struct gxp_etm_trace_start_ioctl __user *argp)
-{
- struct gxp_dev *gxp = client->gxp;
- struct gxp_etm_trace_start_ioctl ibuf;
- int phys_core;
- int ret = 0;
-
- if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
- return -EFAULT;
-
- ibuf.trace_ram_enable &= ETM_TRACE_LSB_MASK;
- ibuf.atb_enable &= ETM_TRACE_LSB_MASK;
-
- if (!ibuf.trace_ram_enable && !ibuf.atb_enable)
- return -EINVAL;
-
- if (!(ibuf.sync_msg_period == 0 ||
- (ibuf.sync_msg_period <= ETM_TRACE_SYNC_MSG_PERIOD_MAX &&
- ibuf.sync_msg_period >= ETM_TRACE_SYNC_MSG_PERIOD_MIN &&
- is_power_of_2(ibuf.sync_msg_period))))
- return -EINVAL;
-
- if (ibuf.pc_match_mask_length > ETM_TRACE_PC_MATCH_MASK_LEN_MAX)
- return -EINVAL;
-
- /* Caller must hold VIRTUAL_DEVICE wakelock */
- down_read(&client->semaphore);
-
- if (!check_client_has_available_vd_wakelock(
- client, "GXP_ETM_TRACE_START_COMMAND")) {
- ret = -ENODEV;
- goto out_unlock_client_semaphore;
- }
-
- down_read(&gxp->vd_semaphore);
-
- phys_core =
- gxp_vd_virt_core_to_phys_core(client->vd, ibuf.virtual_core_id);
- if (phys_core < 0) {
- dev_err(gxp->dev, "Trace start failed: Invalid virtual core id (%u)\n",
- ibuf.virtual_core_id);
- ret = -EINVAL;
- goto out;
- }
-
- /*
- * TODO (b/185260919): Pass the etm trace configuration to system FW
- * once communication channel between kernel and system FW is ready
- * (b/185819530).
- */
-
-out:
- up_read(&gxp->vd_semaphore);
-out_unlock_client_semaphore:
- up_read(&client->semaphore);
-
- return ret;
-}
-
-static int gxp_etm_trace_sw_stop_command(struct gxp_client *client,
- __u16 __user *argp)
-{
- struct gxp_dev *gxp = client->gxp;
- u16 virtual_core_id;
- int phys_core;
- int ret = 0;
-
- if (copy_from_user(&virtual_core_id, argp, sizeof(virtual_core_id)))
- return -EFAULT;
-
- /* Caller must hold VIRTUAL_DEVICE wakelock */
- down_read(&client->semaphore);
-
- if (!check_client_has_available_vd_wakelock(
- client, "GXP_ETM_TRACE_SW_STOP_COMMAND")) {
- ret = -ENODEV;
- goto out_unlock_client_semaphore;
- }
-
- down_read(&gxp->vd_semaphore);
-
- phys_core = gxp_vd_virt_core_to_phys_core(client->vd, virtual_core_id);
- if (phys_core < 0) {
- dev_err(gxp->dev, "Trace stop via software trigger failed: Invalid virtual core id (%u)\n",
- virtual_core_id);
- ret = -EINVAL;
- goto out;
- }
-
- /*
- * TODO (b/185260919): Pass the etm stop signal to system FW once
- * communication channel between kernel and system FW is ready
- * (b/185819530).
- */
-
-out:
- up_read(&gxp->vd_semaphore);
-out_unlock_client_semaphore:
- up_read(&client->semaphore);
-
- return ret;
-}
-
-static int gxp_etm_trace_cleanup_command(struct gxp_client *client,
- __u16 __user *argp)
-{
- struct gxp_dev *gxp = client->gxp;
- u16 virtual_core_id;
- int phys_core;
- int ret = 0;
-
- if (copy_from_user(&virtual_core_id, argp, sizeof(virtual_core_id)))
- return -EFAULT;
-
- /* Caller must hold VIRTUAL_DEVICE wakelock */
- down_read(&client->semaphore);
-
- if (!check_client_has_available_vd_wakelock(
- client, "GXP_ETM_TRACE_CLEANUP_COMMAND")) {
- ret = -ENODEV;
- goto out_unlock_client_semaphore;
- }
-
- down_read(&gxp->vd_semaphore);
-
- phys_core = gxp_vd_virt_core_to_phys_core(client->vd, virtual_core_id);
- if (phys_core < 0) {
- dev_err(gxp->dev, "Trace cleanup failed: Invalid virtual core id (%u)\n",
- virtual_core_id);
- ret = -EINVAL;
- goto out;
- }
-
- /*
- * TODO (b/185260919): Pass the etm clean up signal to system FW once
- * communication channel between kernel and system FW is ready
- * (b/185819530).
- */
-
-out:
- up_read(&gxp->vd_semaphore);
-out_unlock_client_semaphore:
- up_read(&client->semaphore);
-
- return ret;
-}
-
-static int
-gxp_etm_get_trace_info_command(struct gxp_client *client,
- struct gxp_etm_get_trace_info_ioctl __user *argp)
-{
- struct gxp_dev *gxp = client->gxp;
- struct gxp_etm_get_trace_info_ioctl ibuf;
- int phys_core;
- u32 *trace_header;
- u32 *trace_data;
- int ret = 0;
-
- if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
- return -EFAULT;
-
- if (ibuf.type > 1)
- return -EINVAL;
-
- /* Caller must hold VIRTUAL_DEVICE wakelock */
- down_read(&client->semaphore);
-
- if (!check_client_has_available_vd_wakelock(
- client, "GXP_ETM_GET_TRACE_INFO_COMMAND")) {
- ret = -ENODEV;
- goto out_unlock_client_semaphore;
- }
-
- down_read(&gxp->vd_semaphore);
-
- phys_core = gxp_vd_virt_core_to_phys_core(client->vd, ibuf.virtual_core_id);
- if (phys_core < 0) {
- dev_err(gxp->dev, "Get trace info failed: Invalid virtual core id (%u)\n",
- ibuf.virtual_core_id);
- ret = -EINVAL;
- goto out;
- }
-
- trace_header = kzalloc(GXP_TRACE_HEADER_SIZE, GFP_KERNEL);
- if (!trace_header) {
- ret = -ENOMEM;
- goto out;
- }
-
- trace_data = kzalloc(GXP_TRACE_RAM_SIZE, GFP_KERNEL);
- if (!trace_data) {
- ret = -ENOMEM;
- goto out_free_header;
- }
-
- /*
- * TODO (b/185260919): Get trace information from system FW once
- * communication channel between kernel and system FW is ready
- * (b/185819530).
- */
-
- if (copy_to_user((void __user *)ibuf.trace_header_addr, trace_header,
- GXP_TRACE_HEADER_SIZE)) {
- ret = -EFAULT;
- goto out_free_data;
- }
-
- if (ibuf.type == 1) {
- if (copy_to_user((void __user *)ibuf.trace_data_addr,
- trace_data, GXP_TRACE_RAM_SIZE)) {
- ret = -EFAULT;
- goto out_free_data;
- }
- }
-
-out_free_data:
- kfree(trace_data);
-out_free_header:
- kfree(trace_header);
-
-out:
- up_read(&gxp->vd_semaphore);
-out_unlock_client_semaphore:
- up_read(&client->semaphore);
-
- return ret;
-}
-
-static int gxp_enable_telemetry(struct gxp_client *client,
- __u8 __user *argp)
-{
- struct gxp_dev *gxp = client->gxp;
- __u8 type;
- int ret;
-
- if (copy_from_user(&type, argp, sizeof(type)))
- return -EFAULT;
-
- if (type != GXP_TELEMETRY_TYPE_LOGGING &&
- type != GXP_TELEMETRY_TYPE_TRACING)
- return -EINVAL;
-
- ret = gxp_telemetry_enable(gxp, type);
-
- /*
- * Record what telemetry types this client enabled so they can be
- * cleaned-up if the client closes without disabling them.
- */
- if (!ret && type == GXP_TELEMETRY_TYPE_LOGGING)
- client->enabled_telemetry_logging = true;
- if (!ret && type == GXP_TELEMETRY_TYPE_TRACING)
- client->enabled_telemetry_tracing = true;
-
- return ret;
-}
-
-static int gxp_disable_telemetry(struct gxp_client *client, __u8 __user *argp)
-{
- struct gxp_dev *gxp = client->gxp;
- __u8 type;
- int ret;
-
- if (copy_from_user(&type, argp, sizeof(type)))
- return -EFAULT;
-
- if (type != GXP_TELEMETRY_TYPE_LOGGING &&
- type != GXP_TELEMETRY_TYPE_TRACING)
- return -EINVAL;
-
- ret = gxp_telemetry_disable(gxp, type);
-
- if (!ret && type == GXP_TELEMETRY_TYPE_LOGGING)
- client->enabled_telemetry_logging = false;
- if (!ret && type == GXP_TELEMETRY_TYPE_TRACING)
- client->enabled_telemetry_tracing = false;
-
- return ret;
-}
-
-static int gxp_map_tpu_mbx_queue(struct gxp_client *client,
- struct gxp_tpu_mbx_queue_ioctl __user *argp)
-{
-#if (IS_ENABLED(CONFIG_GXP_TEST) || IS_ENABLED(CONFIG_ANDROID)) && !IS_ENABLED(CONFIG_GXP_GEM5)
- struct gxp_dev *gxp = client->gxp;
- struct edgetpu_ext_mailbox_info *mbx_info;
- struct gxp_tpu_mbx_queue_ioctl ibuf;
- struct edgetpu_ext_client_info gxp_tpu_info;
- u32 phys_core_list = 0;
- u32 virtual_core_list;
- u32 core_count;
- int ret = 0;
-
- if (!gxp->tpu_dev.mbx_paddr) {
- dev_err(gxp->dev, "%s: TPU is not available for interop\n",
- __func__);
- return -EINVAL;
- }
-
- if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
- return -EFAULT;
-
- down_write(&client->semaphore);
-
- if (!check_client_has_available_vd(client, "GXP_MAP_TPU_MBX_QUEUE")) {
- ret = -ENODEV;
- goto out_unlock_client_semaphore;
- }
- down_read(&gxp->vd_semaphore);
+#include "gxp-common-platform.c"
- virtual_core_list = ibuf.virtual_core_list;
- core_count = hweight_long(virtual_core_list);
- phys_core_list = gxp_vd_virt_core_list_to_phys_core_list(
- client->vd, virtual_core_list);
- if (!phys_core_list) {
- dev_err(gxp->dev, "%s: invalid virtual core list 0x%x\n",
- __func__, virtual_core_list);
- ret = -EINVAL;
- goto out;
- }
-
- mbx_info =
- kmalloc(sizeof(struct edgetpu_ext_mailbox_info) + core_count *
- sizeof(struct edgetpu_ext_mailbox_descriptor),
- GFP_KERNEL);
- if (!mbx_info) {
- ret = -ENOMEM;
- goto out;
- }
-
- if (client->tpu_file) {
- dev_err(gxp->dev, "Mappings already exist for TPU mailboxes");
- ret = -EBUSY;
- goto out_free;
- }
-
- gxp_tpu_info.tpu_fd = ibuf.tpu_fd;
- gxp_tpu_info.mbox_map = phys_core_list;
- gxp_tpu_info.attr = (struct edgetpu_mailbox_attr __user *)ibuf.attr_ptr;
- ret = edgetpu_ext_driver_cmd(gxp->tpu_dev.dev,
- EDGETPU_EXTERNAL_CLIENT_TYPE_DSP,
- ALLOCATE_EXTERNAL_MAILBOX, &gxp_tpu_info,
- mbx_info);
- if (ret) {
- dev_err(gxp->dev, "Failed to allocate ext TPU mailboxes %d",
- ret);
- goto out_free;
- }
- /*
- * If someone is attacking us through this interface -
- * it's possible that ibuf.tpu_fd here is already a different file from
- * the one passed to edgetpu_ext_driver_cmd() (if the runtime closes the
- * FD and opens another file exactly between the TPU driver call above
- * and the fget below).
- * But the worst consequence of this attack is we fget() ourselves (GXP
- * FD), which only leads to memory leak (because the file object has a
- * reference to itself). The race is also hard to hit so we don't insist
- * on preventing it.
- */
- client->tpu_file = fget(ibuf.tpu_fd);
- if (!client->tpu_file) {
- edgetpu_ext_driver_cmd(gxp->tpu_dev.dev,
- EDGETPU_EXTERNAL_CLIENT_TYPE_DSP,
- FREE_EXTERNAL_MAILBOX, &gxp_tpu_info,
- NULL);
- ret = -EINVAL;
- goto out_free;
- }
- /* Align queue size to page size for iommu map. */
- mbx_info->cmdq_size = ALIGN(mbx_info->cmdq_size, PAGE_SIZE);
- mbx_info->respq_size = ALIGN(mbx_info->respq_size, PAGE_SIZE);
-
- ret = gxp_dma_map_tpu_buffer(gxp, client->vd, virtual_core_list,
- phys_core_list, mbx_info);
- if (ret) {
- dev_err(gxp->dev, "Failed to map TPU mailbox buffer %d", ret);
- fput(client->tpu_file);
- client->tpu_file = NULL;
- edgetpu_ext_driver_cmd(gxp->tpu_dev.dev,
- EDGETPU_EXTERNAL_CLIENT_TYPE_DSP,
- FREE_EXTERNAL_MAILBOX, &gxp_tpu_info,
- NULL);
- goto out_free;
- }
- client->mbx_desc.phys_core_list = phys_core_list;
- client->mbx_desc.virt_core_list = virtual_core_list;
- client->mbx_desc.cmdq_size = mbx_info->cmdq_size;
- client->mbx_desc.respq_size = mbx_info->respq_size;
-
-out_free:
- kfree(mbx_info);
-
-out:
- up_read(&gxp->vd_semaphore);
-out_unlock_client_semaphore:
- up_write(&client->semaphore);
-
- return ret;
-#else
- return -ENODEV;
-#endif
-}
-
-static int gxp_unmap_tpu_mbx_queue(struct gxp_client *client,
- struct gxp_tpu_mbx_queue_ioctl __user *argp)
-{
-#if (IS_ENABLED(CONFIG_GXP_TEST) || IS_ENABLED(CONFIG_ANDROID)) && !IS_ENABLED(CONFIG_GXP_GEM5)
- struct gxp_dev *gxp = client->gxp;
- struct gxp_tpu_mbx_queue_ioctl ibuf;
- struct edgetpu_ext_client_info gxp_tpu_info;
- int ret = 0;
-
- if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
- return -EFAULT;
-
- down_write(&client->semaphore);
-
- if (!client->vd) {
- dev_err(gxp->dev,
- "GXP_UNMAP_TPU_MBX_QUEUE requires the client allocate a VIRTUAL_DEVICE\n");
- ret = -ENODEV;
- goto out;
- }
-
- if (!client->tpu_file) {
- dev_err(gxp->dev, "No mappings exist for TPU mailboxes");
- ret = -EINVAL;
- goto out;
- }
-
- gxp_dma_unmap_tpu_buffer(gxp, client->vd, client->mbx_desc);
-
- gxp_tpu_info.tpu_fd = ibuf.tpu_fd;
- edgetpu_ext_driver_cmd(gxp->tpu_dev.dev,
- EDGETPU_EXTERNAL_CLIENT_TYPE_DSP,
- FREE_EXTERNAL_MAILBOX, &gxp_tpu_info, NULL);
- fput(client->tpu_file);
- client->tpu_file = NULL;
-
-out:
- up_write(&client->semaphore);
-
- return ret;
-#else
- return -ENODEV;
-#endif
-}
-
-static int gxp_register_telemetry_eventfd(
- struct gxp_client *client,
- struct gxp_register_telemetry_eventfd_ioctl __user *argp)
-{
- struct gxp_dev *gxp = client->gxp;
- struct gxp_register_telemetry_eventfd_ioctl ibuf;
-
- if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
- return -EFAULT;
-
- return gxp_telemetry_register_eventfd(gxp, ibuf.type, ibuf.eventfd);
-}
-
-static int gxp_unregister_telemetry_eventfd(
- struct gxp_client *client,
- struct gxp_register_telemetry_eventfd_ioctl __user *argp)
-{
- struct gxp_dev *gxp = client->gxp;
- struct gxp_register_telemetry_eventfd_ioctl ibuf;
-
- if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
- return -EFAULT;
-
- return gxp_telemetry_unregister_eventfd(gxp, ibuf.type);
-}
-
-static int gxp_read_global_counter(struct gxp_client *client,
- __u64 __user *argp)
+void gxp_iommu_setup_shareability(struct gxp_dev *gxp)
{
- struct gxp_dev *gxp = client->gxp;
- u32 high_first, high_second, low;
- u64 counter_val;
- int ret = 0;
-
- /* Caller must hold BLOCK wakelock */
- down_read(&client->semaphore);
-
- if (!client->has_block_wakelock) {
- dev_err(gxp->dev,
- "GXP_READ_GLOBAL_COUNTER requires the client hold a BLOCK wakelock\n");
- ret = -ENODEV;
- goto out;
- }
-
- high_first = gxp_read_32(gxp, GXP_REG_GLOBAL_COUNTER_HIGH);
- low = gxp_read_32(gxp, GXP_REG_GLOBAL_COUNTER_LOW);
-
- /*
- * Check if the lower 32 bits could have wrapped in-between reading
- * the high and low bit registers by validating the higher 32 bits
- * haven't changed.
- */
- high_second = gxp_read_32(gxp, GXP_REG_GLOBAL_COUNTER_HIGH);
- if (high_first != high_second)
- low = gxp_read_32(gxp, GXP_REG_GLOBAL_COUNTER_LOW);
-
- counter_val = ((u64)high_second << 32) | low;
-
- if (copy_to_user(argp, &counter_val, sizeof(counter_val)))
- ret = -EFAULT;
-
-out:
- up_read(&client->semaphore);
-
- return ret;
+ /* IO coherency not supported */
}
-static int gxp_acquire_wake_lock_compat(
- struct gxp_client *client,
- struct gxp_acquire_wakelock_compat_ioctl __user *argp)
-{
- struct gxp_dev *gxp = client->gxp;
- struct gxp_acquire_wakelock_compat_ioctl ibuf;
- bool acquired_block_wakelock = false;
- int ret = 0;
-
- if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
- return -EFAULT;
-
- if (ibuf.gxp_power_state == GXP_POWER_STATE_OFF) {
- dev_err(gxp->dev,
- "GXP_POWER_STATE_OFF is not a valid value when acquiring a wakelock\n");
- return -EINVAL;
- }
- if (ibuf.gxp_power_state < GXP_POWER_STATE_OFF ||
- ibuf.gxp_power_state >= GXP_NUM_POWER_STATES) {
- dev_err(gxp->dev, "Requested power state is invalid\n");
- return -EINVAL;
- }
- if ((ibuf.memory_power_state < MEMORY_POWER_STATE_MIN ||
- ibuf.memory_power_state > MEMORY_POWER_STATE_MAX) &&
- ibuf.memory_power_state != MEMORY_POWER_STATE_UNDEFINED) {
- dev_err(gxp->dev,
- "Requested memory power state %d is invalid\n",
- ibuf.memory_power_state);
- return -EINVAL;
- }
-
- if (ibuf.gxp_power_state == GXP_POWER_STATE_READY) {
- dev_warn_once(
- gxp->dev,
- "GXP_POWER_STATE_READY is deprecated, please set GXP_POWER_LOW_FREQ_CLKMUX with GXP_POWER_STATE_UUD state");
- ibuf.gxp_power_state = GXP_POWER_STATE_UUD;
- }
-
- down_write(&client->semaphore);
- if ((ibuf.components_to_wake & WAKELOCK_VIRTUAL_DEVICE) &&
- (!client->vd)) {
- dev_err(gxp->dev,
- "Must allocate a virtual device to acquire VIRTUAL_DEVICE wakelock\n");
- ret = -EINVAL;
- goto out;
- }
-
- /* Acquire a BLOCK wakelock if requested */
- if (ibuf.components_to_wake & WAKELOCK_BLOCK) {
- if (!client->has_block_wakelock) {
- ret = gxp_wakelock_acquire(gxp);
- acquired_block_wakelock = true;
- }
-
- if (ret) {
- dev_err(gxp->dev,
- "Failed to acquire BLOCK wakelock for client (ret=%d)\n",
- ret);
- goto out;
- }
-
- client->has_block_wakelock = true;
-
- /*
- * Update client's TGID/PID in case the process that opened
- * /dev/gxp is not the one that called this IOCTL.
- */
- client->tgid = current->tgid;
- client->pid = current->pid;
- }
-
- /* Acquire a VIRTUAL_DEVICE wakelock if requested */
- if (ibuf.components_to_wake & WAKELOCK_VIRTUAL_DEVICE) {
- if (!client->has_block_wakelock) {
- dev_err(gxp->dev,
- "Must hold BLOCK wakelock to acquire VIRTUAL_DEVICE wakelock\n");
- ret = -EINVAL;
- goto out;
-
- }
-
- if (client->vd->state == GXP_VD_UNAVAILABLE) {
- dev_err(gxp->dev,
- "Cannot acquire VIRTUAL_DEVICE wakelock on a broken virtual device\n");
- ret = -ENODEV;
- goto out;
- }
-
- if (!client->has_vd_wakelock) {
- down_write(&gxp->vd_semaphore);
- if (client->vd->state == GXP_VD_OFF)
- ret = gxp_vd_start(client->vd);
- else
- ret = gxp_vd_resume(client->vd);
- up_write(&gxp->vd_semaphore);
- }
-
- if (ret) {
- dev_err(gxp->dev,
- "Failed to acquire VIRTUAL_DEVICE wakelock for client (ret=%d)\n",
- ret);
- goto err_acquiring_vd_wl;
- }
-
- client->has_vd_wakelock = true;
- }
-
- gxp_pm_update_requested_power_states(
- gxp, client->requested_power_state, client->requested_low_clkmux,
- aur_state_array[ibuf.gxp_power_state], false,
- client->requested_memory_power_state,
- aur_memory_state_array[ibuf.memory_power_state]);
- client->requested_power_state = aur_state_array[ibuf.gxp_power_state];
- client->requested_low_clkmux = false;
- client->requested_memory_power_state =
- aur_memory_state_array[ibuf.memory_power_state];
-out:
- up_write(&client->semaphore);
-
- return ret;
-
-err_acquiring_vd_wl:
- /*
- * In a single call, if any wakelock acquisition fails, all of them do.
- * If the client was acquiring both wakelocks and failed to acquire the
- * VIRTUAL_DEVICE wakelock after successfully acquiring the BLOCK
- * wakelock, then release it before returning the error code.
- */
- if (acquired_block_wakelock) {
- gxp_wakelock_release(gxp);
- client->has_block_wakelock = false;
- }
-
- up_write(&client->semaphore);
-
- return ret;
-}
-
-static int gxp_acquire_wake_lock(struct gxp_client *client,
- struct gxp_acquire_wakelock_ioctl __user *argp)
-{
- struct gxp_dev *gxp = client->gxp;
- struct gxp_acquire_wakelock_ioctl ibuf;
- bool acquired_block_wakelock = false;
- bool requested_low_clkmux = false;
- int ret = 0;
-
- if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
- return -EFAULT;
-
- if (ibuf.gxp_power_state == GXP_POWER_STATE_OFF) {
- dev_err(gxp->dev,
- "GXP_POWER_STATE_OFF is not a valid value when acquiring a wakelock\n");
- return -EINVAL;
- }
- if (ibuf.gxp_power_state < GXP_POWER_STATE_OFF ||
- ibuf.gxp_power_state >= GXP_NUM_POWER_STATES) {
- dev_err(gxp->dev, "Requested power state is invalid\n");
- return -EINVAL;
- }
- if ((ibuf.memory_power_state < MEMORY_POWER_STATE_MIN ||
- ibuf.memory_power_state > MEMORY_POWER_STATE_MAX) &&
- ibuf.memory_power_state != MEMORY_POWER_STATE_UNDEFINED) {
- dev_err(gxp->dev,
- "Requested memory power state %d is invalid\n",
- ibuf.memory_power_state);
- return -EINVAL;
- }
-
- if (ibuf.gxp_power_state == GXP_POWER_STATE_READY) {
- dev_warn_once(
- gxp->dev,
- "GXP_POWER_STATE_READY is deprecated, please set GXP_POWER_LOW_FREQ_CLKMUX with GXP_POWER_STATE_UUD state");
- ibuf.gxp_power_state = GXP_POWER_STATE_UUD;
- }
-
- if(ibuf.flags & GXP_POWER_NON_AGGRESSOR)
- dev_warn_once(
- gxp->dev,
- "GXP_POWER_NON_AGGRESSOR is deprecated, no operation here");
-
- down_write(&client->semaphore);
- if ((ibuf.components_to_wake & WAKELOCK_VIRTUAL_DEVICE) &&
- (!client->vd)) {
- dev_err(gxp->dev,
- "Must allocate a virtual device to acquire VIRTUAL_DEVICE wakelock\n");
- ret = -EINVAL;
- goto out;
- }
-
- /* Acquire a BLOCK wakelock if requested */
- if (ibuf.components_to_wake & WAKELOCK_BLOCK) {
- if (!client->has_block_wakelock) {
- ret = gxp_wakelock_acquire(gxp);
- acquired_block_wakelock = true;
- }
-
- if (ret) {
- dev_err(gxp->dev,
- "Failed to acquire BLOCK wakelock for client (ret=%d)\n",
- ret);
- goto out;
- }
-
- client->has_block_wakelock = true;
- }
-
- /* Acquire a VIRTUAL_DEVICE wakelock if requested */
- if (ibuf.components_to_wake & WAKELOCK_VIRTUAL_DEVICE) {
- if (!client->has_block_wakelock) {
- dev_err(gxp->dev,
- "Must hold BLOCK wakelock to acquire VIRTUAL_DEVICE wakelock\n");
- ret = -EINVAL;
- goto out;
-
- }
-
- if (client->vd->state == GXP_VD_UNAVAILABLE) {
- dev_err(gxp->dev,
- "Cannot acquire VIRTUAL_DEVICE wakelock on a broken virtual device\n");
- ret = -ENODEV;
- goto err_acquiring_vd_wl;
- }
-
- if (!client->has_vd_wakelock) {
- down_write(&gxp->vd_semaphore);
- if (client->vd->state == GXP_VD_OFF)
- ret = gxp_vd_start(client->vd);
- else
- ret = gxp_vd_resume(client->vd);
- up_write(&gxp->vd_semaphore);
- }
-
- if (ret) {
- dev_err(gxp->dev,
- "Failed to acquire VIRTUAL_DEVICE wakelock for client (ret=%d)\n",
- ret);
- goto err_acquiring_vd_wl;
- }
-
- client->has_vd_wakelock = true;
- }
- requested_low_clkmux = (ibuf.flags & GXP_POWER_LOW_FREQ_CLKMUX) != 0;
-
- gxp_pm_update_requested_power_states(
- gxp, client->requested_power_state, client->requested_low_clkmux,
- aur_state_array[ibuf.gxp_power_state], requested_low_clkmux,
- client->requested_memory_power_state,
- aur_memory_state_array[ibuf.memory_power_state]);
- client->requested_power_state = aur_state_array[ibuf.gxp_power_state];
- client->requested_low_clkmux = requested_low_clkmux;
- client->requested_memory_power_state =
- aur_memory_state_array[ibuf.memory_power_state];
-out:
- up_write(&client->semaphore);
-
- return ret;
-
-err_acquiring_vd_wl:
- /*
- * In a single call, if any wakelock acquisition fails, all of them do.
- * If the client was acquiring both wakelocks and failed to acquire the
- * VIRTUAL_DEVICE wakelock after successfully acquiring the BLOCK
- * wakelock, then release it before returning the error code.
- */
- if (acquired_block_wakelock) {
- gxp_wakelock_release(gxp);
- client->has_block_wakelock = false;
- }
-
- up_write(&client->semaphore);
-
- return ret;
-}
-
-static int gxp_release_wake_lock(struct gxp_client *client, __u32 __user *argp)
-{
- struct gxp_dev *gxp = client->gxp;
- u32 wakelock_components;
- int ret = 0;
-
- if (copy_from_user(&wakelock_components, argp,
- sizeof(wakelock_components)))
- return -EFAULT;
-
- down_write(&client->semaphore);
-
- if (wakelock_components & WAKELOCK_VIRTUAL_DEVICE) {
- if (!client->has_vd_wakelock) {
- dev_err(gxp->dev,
- "Client must hold a VIRTUAL_DEVICE wakelock to release one\n");
- ret = -ENODEV;
- goto out;
- }
-
- /*
- * Currently VD state will not be GXP_VD_UNAVAILABLE if
- * has_vd_wakelock is true. Add this check just in case
- * GXP_VD_UNAVAILABLE will occur in more scenarios in the
- * future.
- */
- if (client->vd->state != GXP_VD_UNAVAILABLE) {
- down_write(&gxp->vd_semaphore);
- gxp_vd_suspend(client->vd);
- up_write(&gxp->vd_semaphore);
- }
-
- client->has_vd_wakelock = false;
- }
-
- if (wakelock_components & WAKELOCK_BLOCK) {
- if (client->has_vd_wakelock) {
- dev_err(gxp->dev,
- "Client cannot release BLOCK wakelock while holding a VD wakelock\n");
- ret = -EBUSY;
- goto out;
- }
-
- if (!client->has_block_wakelock) {
- dev_err(gxp->dev,
- "Client must hold a BLOCK wakelock to release one\n");
- ret = -ENODEV;
- goto out;
- }
-
- gxp_wakelock_release(gxp);
- /*
- * Other clients may still be using the BLK_AUR, check if we need
- * to change the power state.
- */
- gxp_pm_update_requested_power_states(
- gxp, client->requested_power_state,
- client->requested_low_clkmux, AUR_OFF, false,
- client->requested_memory_power_state,
- AUR_MEM_UNDEFINED);
- client->requested_power_state = AUR_OFF;
- client->requested_memory_power_state = AUR_MEM_UNDEFINED;
- client->requested_low_clkmux = false;
- client->has_block_wakelock = false;
- }
-
-out:
- up_write(&client->semaphore);
-
- return ret;
-}
-
-static int gxp_map_dmabuf(struct gxp_client *client,
- struct gxp_map_dmabuf_ioctl __user *argp)
-{
- struct gxp_dev *gxp = client->gxp;
- struct gxp_map_dmabuf_ioctl ibuf;
- struct gxp_mapping *mapping;
- int ret = 0;
-
- if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
- return -EFAULT;
-
- if (ibuf.virtual_core_list == 0)
- return -EINVAL;
-
- down_read(&client->semaphore);
-
- if (!check_client_has_available_vd(client, "GXP_MAP_DMABUF")) {
- ret = -ENODEV;
- goto out_unlock;
- }
-
- mapping = gxp_dmabuf_map(gxp, client->vd, ibuf.virtual_core_list,
- ibuf.dmabuf_fd,
- /*gxp_dma_flags=*/0,
- mapping_flags_to_dma_dir(ibuf.flags));
- if (IS_ERR(mapping)) {
- ret = PTR_ERR(mapping);
- dev_err(gxp->dev, "Failed to map dma-buf (ret=%d)\n", ret);
- goto out_unlock;
- }
-
- ret = gxp_vd_mapping_store(client->vd, mapping);
- if (ret) {
- dev_err(gxp->dev,
- "Failed to store mapping for dma-buf (ret=%d)\n", ret);
- goto out_put;
- }
-
- ibuf.device_address = mapping->device_address;
-
- if (copy_to_user(argp, &ibuf, sizeof(ibuf))) {
- /* If the IOCTL fails, the dma-buf must be unmapped */
- gxp_vd_mapping_remove(client->vd, mapping);
- ret = -EFAULT;
- }
-
-out_put:
- /*
- * Release the reference from creating the dmabuf mapping
- * If the mapping was not successfully stored in the owning virtual
- * device, this will unmap and cleanup the dmabuf.
- */
- gxp_mapping_put(mapping);
-
-out_unlock:
- up_read(&client->semaphore);
-
- return ret;
-}
-
-static int gxp_unmap_dmabuf(struct gxp_client *client,
- struct gxp_map_dmabuf_ioctl __user *argp)
-{
- struct gxp_dev *gxp = client->gxp;
- struct gxp_map_dmabuf_ioctl ibuf;
- struct gxp_mapping *mapping;
- int ret = 0;
-
- if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
- return -EFAULT;
-
- down_read(&client->semaphore);
-
- if (!client->vd) {
- dev_err(gxp->dev,
- "GXP_UNMAP_DMABUF requires the client allocate a VIRTUAL_DEVICE\n");
- ret = -ENODEV;
- goto out;
- }
-
- /*
- * Fetch and remove the internal mapping records.
- * If host_address is not 0, the provided device_address belongs to a
- * non-dma-buf mapping.
- */
- mapping = gxp_vd_mapping_search(client->vd, ibuf.device_address);
- if (IS_ERR_OR_NULL(mapping) || mapping->host_address) {
- dev_warn(gxp->dev, "No dma-buf mapped for given IOVA\n");
- /*
- * If the device address belongs to a non-dma-buf mapping,
- * release the reference to it obtained via the search.
- */
- if (!IS_ERR_OR_NULL(mapping))
- gxp_mapping_put(mapping);
- ret = -EINVAL;
- goto out;
- }
-
- /* Remove the mapping from its VD, releasing the VD's reference */
- gxp_vd_mapping_remove(client->vd, mapping);
-
- /* Release the reference from gxp_vd_mapping_search() */
- gxp_mapping_put(mapping);
-
-out:
- up_read(&client->semaphore);
-
- return ret;
-}
-
-static int gxp_register_mailbox_eventfd(
- struct gxp_client *client,
- struct gxp_register_mailbox_eventfd_ioctl __user *argp)
-{
- struct gxp_register_mailbox_eventfd_ioctl ibuf;
- struct gxp_eventfd *eventfd;
- int ret = 0;
-
- if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
- return -EFAULT;
-
- down_write(&client->semaphore);
-
- if (!check_client_has_available_vd(client, "GXP_REGISTER_MAILBOX_EVENTFD")) {
- ret = -ENODEV;
- goto out;
- }
-
- if (ibuf.virtual_core_id >= client->vd->num_cores) {
- ret = -EINVAL;
- goto out;
- }
-
- /* Make sure the provided eventfd is valid */
- eventfd = gxp_eventfd_create(ibuf.eventfd);
- if (IS_ERR(eventfd)) {
- ret = PTR_ERR(eventfd);
- goto out;
- }
-
- /* Set the new eventfd, replacing any existing one */
- if (client->mb_eventfds[ibuf.virtual_core_id])
- gxp_eventfd_put(client->mb_eventfds[ibuf.virtual_core_id]);
-
- client->mb_eventfds[ibuf.virtual_core_id] = eventfd;
-
-out:
- up_write(&client->semaphore);
-
- return ret;
-}
-
-static int gxp_unregister_mailbox_eventfd(
- struct gxp_client *client,
- struct gxp_register_mailbox_eventfd_ioctl __user *argp)
-{
- struct gxp_register_mailbox_eventfd_ioctl ibuf;
- int ret = 0;
-
- if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
- return -EFAULT;
-
- down_write(&client->semaphore);
-
- if (!client->vd) {
- dev_err(client->gxp->dev,
- "GXP_UNREGISTER_MAILBOX_EVENTFD requires the client allocate a VIRTUAL_DEVICE\n");
- ret = -ENODEV;
- goto out;
- }
-
- if (ibuf.virtual_core_id >= client->vd->num_cores) {
- ret = -EINVAL;
- goto out;
- }
-
- if (client->mb_eventfds[ibuf.virtual_core_id])
- gxp_eventfd_put(client->mb_eventfds[ibuf.virtual_core_id]);
-
- client->mb_eventfds[ibuf.virtual_core_id] = NULL;
-
-out:
- up_write(&client->semaphore);
-
- return ret;
-}
-
-static int
-gxp_get_interface_version(struct gxp_client *client,
- struct gxp_interface_version_ioctl __user *argp)
-{
- struct gxp_interface_version_ioctl ibuf;
- int ret;
-
- ibuf.version_major = GXP_INTERFACE_VERSION_MAJOR;
- ibuf.version_minor = GXP_INTERFACE_VERSION_MINOR;
- memset(ibuf.version_build, 0, GXP_INTERFACE_VERSION_BUILD_BUFFER_SIZE);
- ret = snprintf(ibuf.version_build,
- GXP_INTERFACE_VERSION_BUILD_BUFFER_SIZE - 1,
- GIT_REPO_TAG);
-
- if (ret < 0 || ret >= GXP_INTERFACE_VERSION_BUILD_BUFFER_SIZE) {
- dev_warn(
- client->gxp->dev,
- "Buffer size insufficient to hold GIT_REPO_TAG (size=%d)\n",
- ret);
- }
-
- if (copy_to_user(argp, &ibuf, sizeof(ibuf)))
- return -EFAULT;
-
- return 0;
-}
-
-static int gxp_trigger_debug_dump(struct gxp_client *client,
- __u32 __user *argp)
-{
- struct gxp_dev *gxp = client->gxp;
- int phys_core, i;
- u32 core_bits;
- int ret = 0;
-
- if (!uid_eq(current_euid(), GLOBAL_ROOT_UID))
- return -EPERM;
-
- if (!gxp_debug_dump_is_enabled()) {
- dev_err(gxp->dev, "Debug dump functionality is disabled\n");
- return -EINVAL;
- }
-
- if (copy_from_user(&core_bits, argp, sizeof(core_bits)))
- return -EFAULT;
-
- /* Caller must hold VIRTUAL_DEVICE wakelock */
- down_read(&client->semaphore);
-
- if (!check_client_has_available_vd_wakelock(client,
- "GXP_TRIGGER_DEBUG_DUMP")) {
- ret = -ENODEV;
- goto out_unlock_client_semaphore;
- }
-
- down_read(&gxp->vd_semaphore);
-
- for (i = 0; i < GXP_NUM_CORES; i++) {
- if (!(core_bits & BIT(i)))
- continue;
- phys_core = gxp_vd_virt_core_to_phys_core(client->vd, i);
- if (phys_core < 0) {
- dev_err(gxp->dev,
- "Trigger debug dump failed: Invalid virtual core id (%u)\n",
- i);
- ret = -EINVAL;
- continue;
- }
-
- if (gxp_is_fw_running(gxp, phys_core)) {
- gxp_notification_send(gxp, phys_core,
- CORE_NOTIF_GENERATE_DEBUG_DUMP);
- }
- }
-
- up_read(&gxp->vd_semaphore);
-out_unlock_client_semaphore:
- up_read(&client->semaphore);
-
- return ret;
-}
-
-static long gxp_ioctl(struct file *file, uint cmd, ulong arg)
-{
- struct gxp_client *client = file->private_data;
- void __user *argp = (void __user *)arg;
- long ret;
-
- switch (cmd) {
- case GXP_MAP_BUFFER:
- ret = gxp_map_buffer(client, argp);
- break;
- case GXP_UNMAP_BUFFER:
- ret = gxp_unmap_buffer(client, argp);
- break;
- case GXP_SYNC_BUFFER:
- ret = gxp_sync_buffer(client, argp);
- break;
- case GXP_MAILBOX_COMMAND_COMPAT:
- ret = gxp_mailbox_command_compat(client, argp);
- break;
- case GXP_MAILBOX_RESPONSE:
- ret = gxp_mailbox_response(client, argp);
- break;
- case GXP_GET_SPECS:
- ret = gxp_get_specs(client, argp);
- break;
- case GXP_ALLOCATE_VIRTUAL_DEVICE:
- ret = gxp_allocate_vd(client, argp);
- break;
- case GXP_ETM_TRACE_START_COMMAND:
- ret = gxp_etm_trace_start_command(client, argp);
- break;
- case GXP_ETM_TRACE_SW_STOP_COMMAND:
- ret = gxp_etm_trace_sw_stop_command(client, argp);
- break;
- case GXP_ETM_TRACE_CLEANUP_COMMAND:
- ret = gxp_etm_trace_cleanup_command(client, argp);
- break;
- case GXP_ETM_GET_TRACE_INFO_COMMAND:
- ret = gxp_etm_get_trace_info_command(client, argp);
- break;
- case GXP_ENABLE_TELEMETRY:
- ret = gxp_enable_telemetry(client, argp);
- break;
- case GXP_DISABLE_TELEMETRY:
- ret = gxp_disable_telemetry(client, argp);
- break;
- case GXP_MAP_TPU_MBX_QUEUE:
- ret = gxp_map_tpu_mbx_queue(client, argp);
- break;
- case GXP_UNMAP_TPU_MBX_QUEUE:
- ret = gxp_unmap_tpu_mbx_queue(client, argp);
- break;
- case GXP_REGISTER_TELEMETRY_EVENTFD:
- ret = gxp_register_telemetry_eventfd(client, argp);
- break;
- case GXP_UNREGISTER_TELEMETRY_EVENTFD:
- ret = gxp_unregister_telemetry_eventfd(client, argp);
- break;
- case GXP_READ_GLOBAL_COUNTER:
- ret = gxp_read_global_counter(client, argp);
- break;
- case GXP_ACQUIRE_WAKE_LOCK_COMPAT:
- ret = gxp_acquire_wake_lock_compat(client, argp);
- break;
- case GXP_RELEASE_WAKE_LOCK:
- ret = gxp_release_wake_lock(client, argp);
- break;
- case GXP_MAP_DMABUF:
- ret = gxp_map_dmabuf(client, argp);
- break;
- case GXP_UNMAP_DMABUF:
- ret = gxp_unmap_dmabuf(client, argp);
- break;
- case GXP_MAILBOX_COMMAND:
- ret = gxp_mailbox_command(client, argp);
- break;
- case GXP_REGISTER_MAILBOX_EVENTFD:
- ret = gxp_register_mailbox_eventfd(client, argp);
- break;
- case GXP_UNREGISTER_MAILBOX_EVENTFD:
- ret = gxp_unregister_mailbox_eventfd(client, argp);
- break;
- case GXP_ACQUIRE_WAKE_LOCK:
- ret = gxp_acquire_wake_lock(client, argp);
- break;
- case GXP_GET_INTERFACE_VERSION:
- ret = gxp_get_interface_version(client, argp);
- break;
- case GXP_TRIGGER_DEBUG_DUMP:
- ret = gxp_trigger_debug_dump(client, argp);
- break;
- default:
- ret = -ENOTTY; /* unknown command */
- }
-
- return ret;
-}
-
-static int gxp_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct gxp_client *client = file->private_data;
-
- if (!client)
- return -ENODEV;
-
- switch (vma->vm_pgoff << PAGE_SHIFT) {
- case GXP_MMAP_LOG_BUFFER_OFFSET:
- return gxp_telemetry_mmap_buffers(client->gxp,
- GXP_TELEMETRY_TYPE_LOGGING,
- vma);
- case GXP_MMAP_TRACE_BUFFER_OFFSET:
- return gxp_telemetry_mmap_buffers(client->gxp,
- GXP_TELEMETRY_TYPE_TRACING,
- vma);
- default:
- return -EINVAL;
- }
-}
-
-static const struct file_operations gxp_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .mmap = gxp_mmap,
- .open = gxp_open,
- .release = gxp_release,
- .unlocked_ioctl = gxp_ioctl,
-};
-
static int gxp_platform_probe(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
- struct gxp_dev *gxp;
- struct resource *r;
- phys_addr_t offset, base_addr;
- struct device_node *np;
- struct platform_device *tpu_pdev;
- struct platform_device *gsa_pdev;
- int ret;
- int __maybe_unused i;
- bool __maybe_unused tpu_found;
- u64 prop;
-
- dev_notice(dev, "Probing gxp driver with commit %s\n", GIT_REPO_TAG);
+ struct gxp_dev *gxp =
+ devm_kzalloc(&pdev->dev, sizeof(*gxp), GFP_KERNEL);
- gxp = devm_kzalloc(dev, sizeof(*gxp), GFP_KERNEL);
if (!gxp)
return -ENOMEM;
- platform_set_drvdata(pdev, gxp);
- gxp->dev = dev;
-
- gxp->misc_dev.minor = MISC_DYNAMIC_MINOR;
- gxp->misc_dev.name = "gxp";
- gxp->misc_dev.fops = &gxp_fops;
-
- gxp_wakelock_init(gxp);
-
- ret = misc_register(&gxp->misc_dev);
- if (ret) {
- dev_err(dev, "Failed to register misc device (ret = %d)\n",
- ret);
- devm_kfree(dev, (void *)gxp);
- return ret;
- }
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (IS_ERR_OR_NULL(r)) {
- dev_err(dev, "Failed to get memory resource\n");
- ret = -ENODEV;
- goto err;
- }
-
- gxp->regs.paddr = r->start;
- gxp->regs.size = resource_size(r);
- gxp->regs.vaddr = devm_ioremap_resource(dev, r);
- if (IS_ERR_OR_NULL(gxp->regs.vaddr)) {
- dev_err(dev, "Failed to map registers\n");
- ret = -ENODEV;
- goto err;
- }
-
- r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cmu");
- if (!IS_ERR_OR_NULL(r)) {
- gxp->cmu.paddr = r->start;
- gxp->cmu.size = resource_size(r);
- gxp->cmu.vaddr = devm_ioremap_resource(dev, r);
- }
- /*
- * TODO (b/224685748): Remove this block after CMU CSR is supported
- * in device tree config.
- */
- if (IS_ERR_OR_NULL(r) || IS_ERR_OR_NULL(gxp->cmu.vaddr)) {
- gxp->cmu.paddr = gxp->regs.paddr - GXP_CMU_OFFSET;
- gxp->cmu.size = GXP_CMU_SIZE;
- gxp->cmu.vaddr = devm_ioremap(dev, gxp->cmu.paddr, gxp->cmu.size);
- if (IS_ERR_OR_NULL(gxp->cmu.vaddr))
- dev_warn(dev, "Failed to map CMU registers\n");
- }
-
- ret = gxp_pm_init(gxp);
- if (ret) {
- dev_err(dev, "Failed to init power management (ret=%d)\n", ret);
- goto err;
- }
-
- for (i = 0; i < GXP_NUM_CORES; i++) {
- r = platform_get_resource(pdev, IORESOURCE_MEM, i + 1);
- if (IS_ERR_OR_NULL(r)) {
- dev_err(dev, "Failed to get mailbox%d resource\n", i);
- ret = -ENODEV;
- goto err_pm_destroy;
- }
-
- gxp->mbx[i].paddr = r->start;
- gxp->mbx[i].size = resource_size(r);
- gxp->mbx[i].vaddr = devm_ioremap_resource(dev, r);
- if (IS_ERR_OR_NULL(gxp->mbx[i].vaddr)) {
- dev_err(dev, "Failed to map mailbox%d registers\n", i);
- ret = -ENODEV;
- goto err_pm_destroy;
- }
- }
-
- tpu_found = true;
- /* Get TPU device from device tree */
- np = of_parse_phandle(dev->of_node, "tpu-device", 0);
- if (IS_ERR_OR_NULL(np)) {
- dev_warn(dev, "No tpu-device in device tree\n");
- tpu_found = false;
- }
- tpu_pdev = of_find_device_by_node(np);
- if (!tpu_pdev) {
- dev_err(dev, "TPU device not found\n");
- tpu_found = false;
- }
- /* get tpu mailbox register base */
- ret = of_property_read_u64_index(np, "reg", 0, &base_addr);
- of_node_put(np);
- if (ret) {
- dev_warn(dev, "Unable to get tpu-device base address\n");
- tpu_found = false;
- }
- /* get gxp-tpu mailbox register offset */
- ret = of_property_read_u64(dev->of_node, "gxp-tpu-mbx-offset",
- &offset);
- if (ret) {
- dev_warn(dev, "Unable to get tpu-device mailbox offset\n");
- tpu_found = false;
- }
- if (tpu_found) {
- gxp->tpu_dev.dev = &tpu_pdev->dev;
- get_device(gxp->tpu_dev.dev);
- gxp->tpu_dev.mbx_paddr = base_addr + offset;
- } else {
- dev_warn(dev, "TPU will not be available for interop\n");
- gxp->tpu_dev.mbx_paddr = 0;
- }
-
- ret = gxp_dma_init(gxp);
- if (ret) {
- dev_err(dev, "Failed to initialize GXP DMA interface\n");
- goto err_put_tpu_dev;
- }
-
- gxp->mailbox_mgr = gxp_mailbox_create_manager(gxp, GXP_NUM_CORES);
- if (IS_ERR_OR_NULL(gxp->mailbox_mgr)) {
- dev_err(dev, "Failed to create mailbox manager\n");
- ret = -ENOMEM;
- goto err_dma_exit;
- }
-
-#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
- ret = gxp_debug_dump_init(gxp, &gxp_sscd_dev, &gxp_sscd_pdata);
-#else
- ret = gxp_debug_dump_init(gxp, NULL, NULL);
-#endif // !CONFIG_SUBSYSTEM_COREDUMP
- if (ret) {
- dev_err(dev, "Failed to initialize debug dump\n");
- gxp_debug_dump_exit(gxp);
- }
-
- mutex_init(&gxp->dsp_firmware_lock);
- mutex_init(&gxp->pin_user_pages_lock);
-
- gxp->domain_pool = kmalloc(sizeof(*gxp->domain_pool), GFP_KERNEL);
- if (!gxp->domain_pool) {
- ret = -ENOMEM;
- goto err_debug_dump_exit;
- }
- ret = gxp_domain_pool_init(gxp, gxp->domain_pool,
- GXP_NUM_PREALLOCATED_DOMAINS);
- if (ret) {
- dev_err(dev,
- "Failed to initialize IOMMU domain pool (ret=%d)\n",
- ret);
- goto err_free_domain_pool;
- }
- ret = gxp_vd_init(gxp);
- if (ret) {
- dev_err(dev,
- "Failed to initialize virtual device manager (ret=%d)\n",
- ret);
- goto err_domain_pool_destroy;
- }
- gxp_dma_init_default_resources(gxp);
-
- /* Get GSA device from device tree */
- np = of_parse_phandle(dev->of_node, "gsa-device", 0);
- if (!np) {
- dev_warn(
- dev,
- "No gsa-device in device tree. Firmware authentication not available\n");
- } else {
- gsa_pdev = of_find_device_by_node(np);
- if (!gsa_pdev) {
- dev_err(dev, "GSA device not found\n");
- of_node_put(np);
- ret = -ENODEV;
- goto err_vd_destroy;
- }
- gxp->gsa_dev = get_device(&gsa_pdev->dev);
- of_node_put(np);
- dev_info(
- dev,
- "GSA device found, Firmware authentication available\n");
- }
-
- ret = of_property_read_u64(dev->of_node, "gxp-memory-per-core",
- &prop);
- if (ret) {
- dev_err(dev, "Unable to get memory-per-core from device tree\n");
- gxp->memory_per_core = 0;
- } else {
- gxp->memory_per_core = (u32)prop;
- }
-
- gxp_fw_data_init(gxp);
- gxp_telemetry_init(gxp);
- gxp_create_debugfs(gxp);
- gxp->thermal_mgr = gxp_thermal_init(gxp);
- if (!gxp->thermal_mgr)
- dev_err(dev, "Failed to init thermal driver\n");
- dev_dbg(dev, "Probe finished\n");
-
- INIT_LIST_HEAD(&gxp->client_list);
- mutex_init(&gxp->client_list_lock);
-
- gxp_debug_pointer = gxp;
-
- return 0;
-err_vd_destroy:
- gxp_vd_destroy(gxp);
-err_domain_pool_destroy:
- gxp_domain_pool_destroy(gxp->domain_pool);
-err_free_domain_pool:
- kfree(gxp->domain_pool);
-err_debug_dump_exit:
- gxp_debug_dump_exit(gxp);
-err_dma_exit:
- gxp_dma_exit(gxp);
-err_put_tpu_dev:
- put_device(gxp->tpu_dev.dev);
-err_pm_destroy:
- gxp_pm_destroy(gxp);
-err:
- misc_deregister(&gxp->misc_dev);
- devm_kfree(dev, (void *)gxp);
- return ret;
+ return gxp_common_platform_probe(pdev, gxp);
}
static int gxp_platform_remove(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
- struct gxp_dev *gxp = platform_get_drvdata(pdev);
-
- gxp_remove_debugfs(gxp);
- gxp_fw_data_destroy(gxp);
- if (gxp->gsa_dev)
- put_device(gxp->gsa_dev);
- gxp_vd_destroy(gxp);
- gxp_domain_pool_destroy(gxp->domain_pool);
- kfree(gxp->domain_pool);
- gxp_debug_dump_exit(gxp);
- gxp_dma_exit(gxp);
- put_device(gxp->tpu_dev.dev);
- gxp_pm_destroy(gxp);
- misc_deregister(&gxp->misc_dev);
-
- devm_kfree(dev, (void *)gxp);
-
- gxp_debug_pointer = NULL;
-
- return 0;
-}
-
-#if IS_ENABLED(CONFIG_PM_SLEEP)
-
-static int gxp_platform_suspend(struct device *dev)
-{
- struct gxp_dev *gxp = dev_get_drvdata(dev);
-
- return gxp_wakelock_suspend(gxp);
+ return gxp_common_platform_remove(pdev);
}
-static int gxp_platform_resume(struct device *dev)
-{
- struct gxp_dev *gxp = dev_get_drvdata(dev);
-
- return gxp_wakelock_resume(gxp);
-}
-
-static const struct dev_pm_ops gxp_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(gxp_platform_suspend, gxp_platform_resume)
-};
-
-#endif /* IS_ENABLED(CONFIG_PM_SLEEP) */
-
#ifdef CONFIG_OF
static const struct of_device_id gxp_of_match[] = {
{ .compatible = "google,gxp", },
@@ -2317,21 +42,12 @@ static const struct of_device_id gxp_of_match[] = {
MODULE_DEVICE_TABLE(of, gxp_of_match);
#endif
-#ifdef CONFIG_ACPI
-static const struct acpi_device_id gxp_acpi_match[] = {
- { "CXRP0001", 0 },
- { /* end of list */ },
-};
-MODULE_DEVICE_TABLE(acpi, gxp_acpi_match);
-#endif
-
static struct platform_driver gxp_platform_driver = {
.probe = gxp_platform_probe,
.remove = gxp_platform_remove,
.driver = {
.name = GXP_DRIVER_NAME,
.of_match_table = of_match_ptr(gxp_of_match),
- .acpi_match_table = ACPI_PTR(gxp_acpi_match),
#if IS_ENABLED(CONFIG_PM_SLEEP)
.pm = &gxp_pm_ops,
#endif
@@ -2340,23 +56,24 @@ static struct platform_driver gxp_platform_driver = {
static int __init gxp_platform_init(void)
{
-#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
- /* Registers SSCD platform device */
- if (gxp_debug_dump_is_enabled()) {
- if (platform_device_register(&gxp_sscd_dev))
- pr_err("Unable to register SSCD platform device\n");
- }
-#endif
+ gxp_common_platform_reg_sscd();
return platform_driver_register(&gxp_platform_driver);
}
static void __exit gxp_platform_exit(void)
{
platform_driver_unregister(&gxp_platform_driver);
-#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
- if (gxp_debug_dump_is_enabled())
- platform_device_unregister(&gxp_sscd_dev);
-#endif
+ gxp_common_platform_unreg_sscd();
+}
+
+bool gxp_is_direct_mode(struct gxp_dev *gxp)
+{
+ return true;
+}
+
+enum gxp_chip_revision gxp_get_chip_revision(struct gxp_dev *gxp)
+{
+ return GXP_CHIP_ANY;
}
MODULE_DESCRIPTION("Google GXP platform driver");
diff --git a/gxp-pm.c b/gxp-pm.c
index 5ed9612..ead9d7c 100644
--- a/gxp-pm.c
+++ b/gxp-pm.c
@@ -6,6 +6,7 @@
*/
#include <linux/acpm_dvfs.h>
+#include <linux/bits.h>
#include <linux/io.h>
#include <linux/pm_runtime.h>
#include <linux/types.h>
@@ -14,11 +15,17 @@
#include "gxp-bpm.h"
#include "gxp-client.h"
+#include "gxp-config.h"
+#include "gxp-dma.h"
#include "gxp-doorbell.h"
#include "gxp-internal.h"
#include "gxp-lpm.h"
#include "gxp-pm.h"
+#define SHUTDOWN_DELAY_US_MIN 200
+#define SHUTDOWN_DELAY_US_MAX 400
+#define SHUTDOWN_MAX_DELAY_COUNT 20
+
/*
* The order of this array decides the voting priority, should be increasing in
* frequencies.
@@ -32,17 +39,20 @@ static const uint aur_memory_state_array[] = {
AUR_MEM_HIGH, AUR_MEM_VERY_HIGH, AUR_MEM_MAX
};
-/*
- * TODO(b/177692488): move frequency values into chip-specific config.
- * TODO(b/221168126): survey how these value are derived from. Below
- * values are copied from the implementation in TPU firmware for PRO,
- * i.e. google3/third_party/darwinn/firmware/janeiro/power_manager.cc.
- */
-static const s32 aur_memory_state2int_table[] = { 0, 0, 0, 200000,
- 332000, 465000, 533000 };
-static const s32 aur_memory_state2mif_table[] = { 0, 0, 0,
- 1014000, 1352000, 2028000,
- 3172000 };
+static const s32 aur_memory_state2int_table[] = { 0,
+ AUR_MEM_INT_MIN,
+ AUR_MEM_INT_VERY_LOW,
+ AUR_MEM_INT_LOW,
+ AUR_MEM_INT_HIGH,
+ AUR_MEM_INT_VERY_HIGH,
+ AUR_MEM_INT_MAX };
+static const s32 aur_memory_state2mif_table[] = { 0,
+ AUR_MEM_MIF_MIN,
+ AUR_MEM_MIF_VERY_LOW,
+ AUR_MEM_MIF_LOW,
+ AUR_MEM_MIF_HIGH,
+ AUR_MEM_MIF_VERY_HIGH,
+ AUR_MEM_MIF_MAX };
static struct gxp_pm_device_ops gxp_aur_ops = {
.pre_blk_powerup = NULL,
@@ -62,8 +72,9 @@ static int gxp_pm_blkpwr_up(struct gxp_dev *gxp)
*/
ret = pm_runtime_resume_and_get(gxp->dev);
if (ret)
- dev_err(gxp->dev, "%s: pm_runtime_resume_and_get returned %d\n",
- __func__, ret);
+ dev_err(gxp->dev,
+ "pm_runtime_resume_and_get returned %d during blk up\n",
+ ret);
return ret;
}
@@ -75,9 +86,7 @@ static int gxp_pm_blkpwr_down(struct gxp_dev *gxp)
* Need to put TOP LPM into active state before blk off
* b/189396709
*/
- lpm_write_32_psm(gxp, LPM_TOP_PSM, LPM_REG_ENABLE_STATE_1, 0x0);
- lpm_write_32_psm(gxp, LPM_TOP_PSM, LPM_REG_ENABLE_STATE_2, 0x0);
- if (!gxp_lpm_wait_state_eq(gxp, LPM_TOP_PSM, LPM_ACTIVE_STATE)) {
+ if (!gxp_lpm_wait_state_eq(gxp, LPM_PSM_TOP, LPM_ACTIVE_STATE)) {
dev_err(gxp->dev,
"failed to force TOP LPM to PS0 during blk down\n");
return -EAGAIN;
@@ -91,8 +100,9 @@ static int gxp_pm_blkpwr_down(struct gxp_dev *gxp)
* indicate the device is still in use somewhere. The only
* expected value here is 0, indicating no remaining users.
*/
- dev_err(gxp->dev, "%s: pm_runtime_put_sync returned %d\n",
- __func__, ret);
+ dev_err(gxp->dev,
+ "pm_runtime_put_sync returned %d during blk down\n",
+ ret);
/* Remove our vote for INT/MIF state (if any) */
exynos_pm_qos_update_request(&gxp->power_mgr->int_min, 0);
exynos_pm_qos_update_request(&gxp->power_mgr->mif_min, 0);
@@ -117,7 +127,7 @@ int gxp_pm_blk_set_rate_acpm(struct gxp_dev *gxp, unsigned long rate)
{
int ret = exynos_acpm_set_rate(AUR_DVFS_DOMAIN, rate);
- dev_dbg(gxp->dev, "%s: rate %lu, ret %d\n", __func__, rate, ret);
+ dev_dbg(gxp->dev, "set blk rate %lu, ret %d\n", rate, ret);
return ret;
}
@@ -199,35 +209,33 @@ out:
mutex_unlock(&set_acpm_state_work->gxp->power_mgr->pm_lock);
}
+#define AUR_DVFS_DEBUG_REQ BIT(31)
+#define AUR_DEBUG_CORE_FREQ (AUR_DVFS_DEBUG_REQ | (3 << 27))
+
int gxp_pm_blk_get_state_acpm(struct gxp_dev *gxp)
{
int ret = exynos_acpm_get_rate(AUR_DVFS_DOMAIN, AUR_DEBUG_CORE_FREQ);
- dev_dbg(gxp->dev, "%s: state %d\n", __func__, ret);
+ dev_dbg(gxp->dev, "current blk state %d\n", ret);
return ret;
}
int gxp_pm_blk_on(struct gxp_dev *gxp)
{
- int ret = 0;
-
- if (WARN_ON(!gxp->power_mgr)) {
- dev_err(gxp->dev, "%s: No PM found\n", __func__);
- return -ENODEV;
- }
+ int ret;
dev_info(gxp->dev, "Powering on BLK ...\n");
mutex_lock(&gxp->power_mgr->pm_lock);
ret = gxp_pm_blkpwr_up(gxp);
- if (!ret) {
- gxp_pm_blk_set_state_acpm(gxp, AUR_INIT_DVFS_STATE);
- gxp->power_mgr->curr_state = AUR_INIT_DVFS_STATE;
- }
-
+ if (ret)
+ goto out;
+ gxp_pm_blk_set_state_acpm(gxp, AUR_INIT_DVFS_STATE);
+ gxp->power_mgr->curr_state = AUR_INIT_DVFS_STATE;
+ gxp_iommu_setup_shareability(gxp);
/* Startup TOP's PSM */
gxp_lpm_init(gxp);
gxp->power_mgr->blk_switch_count++;
-
+out:
mutex_unlock(&gxp->power_mgr->pm_lock);
return ret;
@@ -237,10 +245,6 @@ int gxp_pm_blk_off(struct gxp_dev *gxp)
{
int ret = 0;
- if (WARN_ON(!gxp->power_mgr)) {
- dev_err(gxp->dev, "%s: No PM found\n", __func__);
- return -ENODEV;
- }
dev_info(gxp->dev, "Powering off BLK ...\n");
mutex_lock(&gxp->power_mgr->pm_lock);
/*
@@ -264,14 +268,30 @@ int gxp_pm_blk_off(struct gxp_dev *gxp)
return ret;
}
+bool gxp_pm_is_blk_down(struct gxp_dev *gxp)
+{
+ int timeout_cnt = 0;
+ int curr_state;
+
+ if (!gxp->power_mgr->aur_status)
+ return gxp->power_mgr->curr_state == AUR_OFF;
+
+ do {
+ /* Delay 200~400us per retry till blk shutdown finished */
+ usleep_range(SHUTDOWN_DELAY_US_MIN, SHUTDOWN_DELAY_US_MAX);
+ curr_state = readl(gxp->power_mgr->aur_status);
+ if (!curr_state)
+ return true;
+ timeout_cnt++;
+ } while (timeout_cnt < SHUTDOWN_MAX_DELAY_COUNT);
+
+ return false;
+}
+
int gxp_pm_get_blk_switch_count(struct gxp_dev *gxp)
{
int ret;
- if (!gxp->power_mgr) {
- dev_err(gxp->dev, "%s: No PM found\n", __func__);
- return -ENODEV;
- }
mutex_lock(&gxp->power_mgr->pm_lock);
ret = gxp->power_mgr->blk_switch_count;
mutex_unlock(&gxp->power_mgr->pm_lock);
@@ -283,10 +303,6 @@ int gxp_pm_get_blk_state(struct gxp_dev *gxp)
{
int ret;
- if (!gxp->power_mgr) {
- dev_err(gxp->dev, "%s: No PM found\n", __func__);
- return -ENODEV;
- }
mutex_lock(&gxp->power_mgr->pm_lock);
ret = gxp->power_mgr->curr_state;
mutex_unlock(&gxp->power_mgr->pm_lock);
@@ -296,17 +312,17 @@ int gxp_pm_get_blk_state(struct gxp_dev *gxp)
int gxp_pm_core_on(struct gxp_dev *gxp, uint core, bool verbose)
{
- int ret = 0;
+ int ret;
- /*
- * Check if TOP LPM is already on.
- */
- WARN_ON(!gxp_lpm_is_initialized(gxp, LPM_TOP_PSM));
+ if (!gxp_lpm_is_initialized(gxp, LPM_PSM_TOP)) {
+ dev_err(gxp->dev, "unable to power on core without TOP powered");
+ return -EINVAL;
+ }
mutex_lock(&gxp->power_mgr->pm_lock);
ret = gxp_lpm_up(gxp, core);
if (ret) {
- dev_err(gxp->dev, "%s: Core %d on fail\n", __func__, core);
+ dev_err(gxp->dev, "Core %d on fail\n", core);
mutex_unlock(&gxp->power_mgr->pm_lock);
return ret;
}
@@ -314,22 +330,19 @@ int gxp_pm_core_on(struct gxp_dev *gxp, uint core, bool verbose)
mutex_unlock(&gxp->power_mgr->pm_lock);
if (verbose)
- dev_notice(gxp->dev, "%s: Core %d up\n", __func__, core);
+ dev_notice(gxp->dev, "Core %d powered up\n", core);
return ret;
}
-int gxp_pm_core_off(struct gxp_dev *gxp, uint core)
+void gxp_pm_core_off(struct gxp_dev *gxp, uint core)
{
- /*
- * Check if TOP LPM is already on.
- */
- WARN_ON(!gxp_lpm_is_initialized(gxp, LPM_TOP_PSM));
+ if (!gxp_lpm_is_initialized(gxp, LPM_PSM_TOP))
+ return;
mutex_lock(&gxp->power_mgr->pm_lock);
gxp_lpm_down(gxp, core);
mutex_unlock(&gxp->power_mgr->pm_lock);
- dev_notice(gxp->dev, "%s: Core %d down\n", __func__, core);
- return 0;
+ dev_notice(gxp->dev, "Core %d powered down\n", core);
}
static int gxp_pm_req_state_locked(struct gxp_dev *gxp,
@@ -662,33 +675,39 @@ static int gxp_pm_update_requested_memory_power_state(
}
int gxp_pm_update_requested_power_states(
- struct gxp_dev *gxp, enum aur_power_state origin_state,
- bool origin_requested_low_clkmux, enum aur_power_state requested_state,
- bool requested_low_clkmux, enum aur_memory_power_state origin_mem_state,
- enum aur_memory_power_state requested_mem_state)
+ struct gxp_dev *gxp, struct gxp_power_states origin_vote,
+ struct gxp_power_states requested_states)
{
int ret = 0;
mutex_lock(&gxp->power_mgr->pm_lock);
- if (origin_state != requested_state ||
- origin_requested_low_clkmux != requested_low_clkmux) {
+ if (origin_vote.power != requested_states.power ||
+ origin_vote.low_clkmux != requested_states.low_clkmux) {
ret = gxp_pm_update_requested_power_state(
- gxp, origin_state, origin_requested_low_clkmux,
- requested_state, requested_low_clkmux);
+ gxp, origin_vote.power, origin_vote.low_clkmux,
+ requested_states.power, requested_states.low_clkmux);
if (ret)
goto out;
}
- if (origin_mem_state != requested_mem_state)
+ if (origin_vote.memory != requested_states.memory)
ret = gxp_pm_update_requested_memory_power_state(
- gxp, origin_mem_state, requested_mem_state);
+ gxp, origin_vote.memory, requested_states.memory);
out:
mutex_unlock(&gxp->power_mgr->pm_lock);
return ret;
}
+int gxp_pm_update_pm_qos(struct gxp_dev *gxp, s32 int_val, s32 mif_val)
+{
+ return gxp_pm_req_pm_qos(gxp, int_val, mif_val);
+}
+
int gxp_pm_init(struct gxp_dev *gxp)
{
struct gxp_power_manager *mgr;
+ struct platform_device *pdev =
+ container_of(gxp->dev, struct platform_device, dev);
+ struct resource *r;
uint i;
mgr = devm_kzalloc(gxp->dev, sizeof(*mgr), GFP_KERNEL);
@@ -719,6 +738,20 @@ int gxp_pm_init(struct gxp_dev *gxp)
gxp->power_mgr->force_mux_normal_count = 0;
gxp->power_mgr->blk_switch_count = 0l;
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "pmu_aur_status");
+ if (!r) {
+ dev_warn(gxp->dev, "Failed to find PMU register base\n");
+ } else {
+ gxp->power_mgr->aur_status = devm_ioremap_resource(gxp->dev, r);
+ if (IS_ERR(gxp->power_mgr->aur_status)) {
+ dev_err(gxp->dev,
+ "Failed to map PMU register base, ret=%ld\n",
+ PTR_ERR(gxp->power_mgr->aur_status));
+ gxp->power_mgr->aur_status = NULL;
+ }
+ }
+
pm_runtime_enable(gxp->dev);
exynos_pm_qos_add_request(&mgr->int_min, PM_QOS_DEVICE_THROUGHPUT, 0);
exynos_pm_qos_add_request(&mgr->mif_min, PM_QOS_BUS_THROUGHPUT, 0);
@@ -728,9 +761,11 @@ int gxp_pm_init(struct gxp_dev *gxp)
int gxp_pm_destroy(struct gxp_dev *gxp)
{
- struct gxp_power_manager *mgr;
+ struct gxp_power_manager *mgr = gxp->power_mgr;
+
+ if (IS_GXP_TEST && !mgr)
+ return 0;
- mgr = gxp->power_mgr;
exynos_pm_qos_remove_request(&mgr->mif_min);
exynos_pm_qos_remove_request(&mgr->int_min);
pm_runtime_disable(gxp->dev);
diff --git a/gxp-pm.h b/gxp-pm.h
index c214a8b..188f449 100644
--- a/gxp-pm.h
+++ b/gxp-pm.h
@@ -11,18 +11,7 @@
#include "gxp-internal.h"
-#define AUR_DVFS_MIN_RATE 178000
-static const uint aur_power_state2rate[] = {
- 0, /* AUR_OFF */
- 178000, /* AUR_UUD */
- 373000, /* AUR_SUD */
- 750000, /* AUR_UD */
- 1160000, /* AUR_NOM */
- 178000, /* AUR_READY */
- 268000, /* AUR_UUD_PLUS */
- 560000, /* AUR_SUD_PLUS */
- 975000, /* AUR_UD_PLUS */
-};
+#define AUR_DVFS_MIN_RATE AUR_UUD_RATE
enum aur_power_state {
AUR_OFF = 0,
@@ -36,6 +25,18 @@ enum aur_power_state {
AUR_UD_PLUS = 8,
};
+static const uint aur_power_state2rate[] = {
+ AUR_OFF_RATE,
+ AUR_UUD_RATE,
+ AUR_SUD_RATE,
+ AUR_UD_RATE,
+ AUR_NOM_RATE,
+ AUR_READY_RATE,
+ AUR_UUD_PLUS_RATE,
+ AUR_SUD_PLUS_RATE,
+ AUR_UD_PLUS_RATE,
+};
+
enum aur_memory_power_state {
AUR_MEM_UNDEFINED = 0,
AUR_MEM_MIN = 1,
@@ -91,6 +92,17 @@ struct gxp_req_pm_qos_work {
bool using;
};
+struct gxp_power_states {
+ enum aur_power_state power;
+ enum aur_memory_power_state memory;
+ bool low_clkmux;
+};
+
+static const struct gxp_power_states off_states = { AUR_OFF, AUR_MEM_UNDEFINED,
+ false };
+static const struct gxp_power_states uud_states = { AUR_UUD, AUR_MEM_UNDEFINED,
+ false };
+
struct gxp_power_manager {
struct gxp_dev *gxp;
struct mutex pm_lock;
@@ -107,7 +119,7 @@ struct gxp_power_manager {
/* Last requested clock mux state */
bool last_scheduled_low_clkmux;
int curr_state;
- int curr_memory_state;
+ int curr_memory_state; /* Note: this state will not be maintained in the MCU mode. */
struct gxp_pm_device_ops *ops;
struct gxp_set_acpm_state_work
set_acpm_state_work[AUR_NUM_POWER_STATE_WORKER];
@@ -126,15 +138,20 @@ struct gxp_power_manager {
/* Max frequency that the thermal driver/ACPM will allow in Hz */
unsigned long thermal_limit;
u64 blk_switch_count;
+ /* PMU AUR_STATUS base address for block status, maybe NULL */
+ void __iomem *aur_status;
};
/**
* gxp_pm_blk_on() - Turn on the power for BLK_AUR
* @gxp: The GXP device to turn on
*
+ * Note: For most cases you should use gxp_acquire_wakelock() to ensure the
+ * device is ready to use, unless you really want to power on the block without
+ * setting up the device state.
+ *
* Return:
* * 0 - BLK ON successfully
- * * -ENODEV - Cannot find PM interface
*/
int gxp_pm_blk_on(struct gxp_dev *gxp);
@@ -144,12 +161,19 @@ int gxp_pm_blk_on(struct gxp_dev *gxp);
*
* Return:
* * 0 - BLK OFF successfully
- * * -ENODEV - Cannot find PM interface
- * * -EBUSY - Wakelock is held, blk is still busy
*/
int gxp_pm_blk_off(struct gxp_dev *gxp);
/**
+ * gxp_pm_is_blk_down() - Check weather the blk is turned off or not.
+ * @gxp: The GXP device to check
+ *
+ * Return:
+ * * true - blk is turned off.
+ */
+bool gxp_pm_is_blk_down(struct gxp_dev *gxp);
+
+/**
* gxp_pm_get_blk_state() - Get the blk power state
* @gxp: The GXP device to sample state
*
@@ -183,11 +207,8 @@ int gxp_pm_core_on(struct gxp_dev *gxp, uint core, bool verbose);
* gxp_pm_core_off() - Turn off a core on GXP device
* @gxp: The GXP device to operate
* @core: The core ID to turn off
- *
- * Return:
- * * 0 - Core off process finished successfully
*/
-int gxp_pm_core_off(struct gxp_dev *gxp, uint core);
+void gxp_pm_core_off(struct gxp_dev *gxp, uint core);
/**
* gxp_pm_init() - API for initialize PM interface for GXP, should only be
@@ -239,28 +260,36 @@ int gxp_pm_blk_get_state_acpm(struct gxp_dev *gxp);
* gxp_pm_update_requested_power_states() - API for a GXP client to vote for a
* requested power state and a requested memory power state.
* @gxp: The GXP device to operate.
- * @origin_state: An existing old requested state, will be cleared. If this is
- * the first vote, pass AUR_OFF.
- * @origin_requested_low_clkmux: Specify whether the existing vote was requested with
- * low frequency CLKMUX flag.
- * @requested_state: The new requested state.
- * @requested_low_clkmux: Specify whether the new vote is requested with low frequency
- * CLKMUX flag. Will take no effect if the @requested state is
- * AUR_OFF.
- * @origin_mem_state: An existing old requested state, will be cleared. If this is
- * the first vote, pass AUR_MEM_UNDEFINED.
- * @requested_mem_state: The new requested state.
+ * @origin_states: An existing old requested states, will be cleared. If this is
+ * the first vote, pass AUR_OFF and AUR_MEM_UNDEFINED for field
+ * power_state and memory_state. The low_clkmux field will take no
+ * effect if requested state is AUR_OFF.
+ * @requested_states: The new requested states.
*
* Return:
* * 0 - Voting registered
* * -EINVAL - Invalid original state or requested state
*/
-int gxp_pm_update_requested_power_states(
- struct gxp_dev *gxp, enum aur_power_state origin_state,
- bool origin_requested_low_clkmux, enum aur_power_state requested_state,
- bool requested_low_clkmux, enum aur_memory_power_state origin_mem_state,
- enum aur_memory_power_state requested_mem_state);
+int gxp_pm_update_requested_power_states(struct gxp_dev *gxp,
+ struct gxp_power_states origin_states,
+ struct gxp_power_states requested_states);
+
+/**
+ * gxp_pm_update_pm_qos() - API for updating the memory power state but passing the values of
+ * INT and MIF frequencies directly. This function will ignore the vote ratings and update the
+ * frequencies right away.
+ * @gxp: The GXP device to operate.
+ * @int_val: The value of INT frequency.
+ * @mif_val: The value of MIF frequency.
+ *
+ * Note: This function will not update the @curr_memory_state of gxp_power_manager.
+ *
+ * Return:
+ * * 0 - The memory power state has been changed
+ * * -EINVAL - Invalid requested state
+ */
+int gxp_pm_update_pm_qos(struct gxp_dev *gxp, s32 int_val, s32 mif_val);
/*
* gxp_pm_force_clkmux_normal() - Force PLL_CON0_NOC_USER and PLL_CON0_PLL_AUR MUX
diff --git a/gxp-ssmt.c b/gxp-ssmt.c
new file mode 100644
index 0000000..f44fc6a
--- /dev/null
+++ b/gxp-ssmt.c
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * GXP SSMT driver.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#include <linux/platform_device.h>
+
+#include "gxp-config.h"
+#include "gxp-internal.h"
+#include "gxp-ssmt.h"
+
+static inline void ssmt_set_vid_for_sid(void __iomem *ssmt, uint vid, uint sid)
+{
+ /* NS_READ_STREAM_VID_<sid> */
+ writel(vid, ssmt + 0x1000u + 0x4u * sid);
+ /* NS_WRITE_STREAM_VID_<sid> */
+ writel(vid, ssmt + 0x1200u + 0x4u * sid);
+}
+
+int gxp_ssmt_init(struct gxp_dev *gxp, struct gxp_ssmt *ssmt)
+{
+ struct platform_device *pdev =
+ container_of(gxp->dev, struct platform_device, dev);
+ struct resource *r;
+
+ ssmt->gxp = gxp;
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ssmt_idma");
+ if (!r) {
+ dev_err(gxp->dev, "Failed to find IDMA SSMT register base\n");
+ return -EINVAL;
+ }
+
+ ssmt->idma_ssmt_base = devm_ioremap_resource(gxp->dev, r);
+ if (IS_ERR(ssmt->idma_ssmt_base)) {
+ dev_err(gxp->dev,
+ "Failed to map IDMA SSMT register base (%ld)\n",
+ PTR_ERR(ssmt->idma_ssmt_base));
+ return PTR_ERR(ssmt->idma_ssmt_base);
+ }
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "ssmt_inst_data");
+ if (!r) {
+ dev_err(gxp->dev,
+ "Failed to find instruction/data SSMT register base\n");
+ return -EINVAL;
+ }
+
+ ssmt->inst_data_ssmt_base = devm_ioremap_resource(gxp->dev, r);
+ if (IS_ERR(ssmt->inst_data_ssmt_base)) {
+ dev_err(gxp->dev,
+ "Failed to map instruction/data SSMT register base (%ld)\n",
+ PTR_ERR(ssmt->inst_data_ssmt_base));
+ return PTR_ERR(ssmt->inst_data_ssmt_base);
+ }
+
+ return 0;
+}
+
+void gxp_ssmt_set_core_vid(struct gxp_ssmt *ssmt, uint core, uint vid)
+{
+ const u8 sids[] = {
+ INST_SID_FOR_CORE(core),
+ DATA_SID_FOR_CORE(core),
+ IDMA_SID_FOR_CORE(core),
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sids); i++) {
+ ssmt_set_vid_for_sid(ssmt->idma_ssmt_base, vid, sids[i]);
+ ssmt_set_vid_for_sid(ssmt->inst_data_ssmt_base, vid, sids[i]);
+ }
+}
diff --git a/gxp-ssmt.h b/gxp-ssmt.h
new file mode 100644
index 0000000..f3458df
--- /dev/null
+++ b/gxp-ssmt.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * GXP SSMT driver.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#ifndef __GXP_SSMT_H__
+#define __GXP_SSMT_H__
+
+#include "gxp-internal.h"
+
+struct gxp_ssmt {
+ struct gxp_dev *gxp;
+ void __iomem *idma_ssmt_base;
+ void __iomem *inst_data_ssmt_base;
+};
+
+/*
+ * Initializes @ssmt structure.
+ *
+ * Resources allocated in this function are all device-managed.
+ *
+ * Returns 0 on success, -errno otherwise.
+ */
+int gxp_ssmt_init(struct gxp_dev *gxp, struct gxp_ssmt *ssmt);
+
+/*
+ * Programs SSMT to have @core (0 ~ GXP_NUM_CORES - 1) issue transactions
+ * with VID = @vid.
+ */
+void gxp_ssmt_set_core_vid(struct gxp_ssmt *ssmt, uint core, uint vid);
+
+#endif /* __GXP_SSMT_H__ */
diff --git a/gxp-telemetry.c b/gxp-telemetry.c
deleted file mode 100644
index 7eb18cb..0000000
--- a/gxp-telemetry.c
+++ /dev/null
@@ -1,705 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GXP telemetry support
- *
- * Copyright (C) 2021 Google LLC
- */
-
-#include <linux/slab.h>
-#include <linux/wait.h>
-
-#include "gxp-config.h"
-#include "gxp-dma.h"
-#include "gxp-firmware.h"
-#include "gxp-firmware-data.h"
-#include "gxp-host-device-structs.h"
-#include "gxp-notification.h"
-#include "gxp-telemetry.h"
-#include "gxp-vd.h"
-
-static inline bool is_core_telemetry_enabled(struct gxp_dev *gxp, uint core,
- u8 type)
-{
- u32 device_status =
- gxp_fw_data_get_telemetry_device_status(gxp, core, type);
-
- return device_status & GXP_TELEMETRY_DEVICE_STATUS_ENABLED;
-}
-
-static void telemetry_status_notification_work(struct work_struct *work)
-{
- struct gxp_telemetry_work *telem_work =
- container_of(work, struct gxp_telemetry_work, work);
- struct gxp_dev *gxp = telem_work->gxp;
- uint core = telem_work->core;
- struct gxp_telemetry_manager *mgr = telem_work->gxp->telemetry_mgr;
-
- /* Wake any threads waiting on an telemetry disable ACK */
- wake_up(&mgr->waitq);
-
- /* Signal the appropriate eventfd for any active telemetry types */
- mutex_lock(&mgr->lock);
-
- if (is_core_telemetry_enabled(gxp, core, GXP_TELEMETRY_TYPE_LOGGING) &&
- mgr->logging_efd)
- eventfd_signal(mgr->logging_efd, 1);
-
- if (is_core_telemetry_enabled(gxp, core, GXP_TELEMETRY_TYPE_TRACING) &&
- mgr->tracing_efd)
- eventfd_signal(mgr->tracing_efd, 1);
-
- mutex_unlock(&mgr->lock);
-}
-
-int gxp_telemetry_init(struct gxp_dev *gxp)
-{
- struct gxp_telemetry_manager *mgr;
- uint i;
-
- mgr = devm_kzalloc(gxp->dev, sizeof(*mgr), GFP_KERNEL);
- if (!mgr)
- return -ENOMEM;
-
- mutex_init(&mgr->lock);
- for (i = 0; i < GXP_NUM_CORES; i++) {
- INIT_WORK(&mgr->notification_works[i].work,
- telemetry_status_notification_work);
- mgr->notification_works[i].gxp = gxp;
- mgr->notification_works[i].core = i;
-
- }
- init_waitqueue_head(&mgr->waitq);
-
- gxp->telemetry_mgr = mgr;
-
- return 0;
-}
-
-/* Wrapper struct to be used by the telemetry vma_ops. */
-struct telemetry_vma_data {
- struct gxp_dev *gxp;
- struct buffer_data *buff_data;
- u8 type;
- refcount_t ref_count;
-};
-
-static void gxp_telemetry_vma_open(struct vm_area_struct *vma)
-{
- struct telemetry_vma_data *vma_data =
- (struct telemetry_vma_data *)vma->vm_private_data;
- struct gxp_dev *gxp = vma_data->gxp;
-
- mutex_lock(&gxp->telemetry_mgr->lock);
-
- refcount_inc(&vma_data->ref_count);
-
- mutex_unlock(&gxp->telemetry_mgr->lock);
-}
-
-static void free_telemetry_buffers(struct gxp_dev *gxp, struct buffer_data *data);
-
-static void gxp_telemetry_vma_close(struct vm_area_struct *vma)
-{
- struct telemetry_vma_data *vma_data =
- (struct telemetry_vma_data *)vma->vm_private_data;
- struct gxp_dev *gxp = vma_data->gxp;
- struct buffer_data *buff_data = vma_data->buff_data;
- u8 type = vma_data->type;
-
- mutex_lock(&gxp->telemetry_mgr->lock);
-
- if (!refcount_dec_and_test(&vma_data->ref_count))
- goto out;
-
- /*
- * Free the telemetry buffers if they are no longer in use.
- *
- * If a client enabled telemetry, then closed their VMA without
- * disabling it, firmware will still be expecting those buffers to be
- * mapped. If this is the case, telemetry will be disabled, and the
- * buffers freed, when the client is closed.
- *
- * We cannot disable telemetry here, since attempting to lock the
- * `vd_semaphore` while holding the mmap lock can lead to deadlocks.
- */
- if (refcount_dec_and_test(&buff_data->ref_count)) {
- switch (type) {
- case GXP_TELEMETRY_TYPE_LOGGING:
- gxp->telemetry_mgr->logging_buff_data = NULL;
- break;
- case GXP_TELEMETRY_TYPE_TRACING:
- gxp->telemetry_mgr->tracing_buff_data = NULL;
- break;
- default:
- dev_warn(gxp->dev, "%s called with invalid type %u\n",
- __func__, type);
- }
- free_telemetry_buffers(gxp, buff_data);
- }
-
- kfree(vma_data);
-
-out:
- mutex_unlock(&gxp->telemetry_mgr->lock);
-}
-
-static const struct vm_operations_struct gxp_telemetry_vma_ops = {
- .open = gxp_telemetry_vma_open,
- .close = gxp_telemetry_vma_close,
-};
-
-/**
- * check_telemetry_type_availability() - Checks if @type is valid and whether
- * buffers of that type already exists.
- * @gxp: The GXP device to check availability for
- * @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
- *
- * Caller must hold the telemetry_manager's lock.
- *
- * Return:
- * * 0 - @type is valid and can have new buffers created
- * * -EBUSY - Buffers already exist for @type
- * * -EINVAL - @type is not a valid telemetry type
- */
-static int check_telemetry_type_availability(struct gxp_dev *gxp, u8 type)
-{
- switch (type) {
- case GXP_TELEMETRY_TYPE_LOGGING:
- if (gxp->telemetry_mgr->logging_buff_data)
- return -EBUSY;
- break;
- case GXP_TELEMETRY_TYPE_TRACING:
- if (gxp->telemetry_mgr->tracing_buff_data)
- return -EBUSY;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-/**
- * allocate_telemetry_buffers() - Allocate and populate a `struct buffer_data`,
- * including allocating and mapping one coherent
- * buffer of @size bytes per core.
- * @gxp: The GXP device to allocate the buffers for
- * @size: The size of buffer to allocate for each core
- *
- * Caller must hold the telemetry_manager's lock.
- *
- * Return: A pointer to the `struct buffer_data` if successful, NULL otherwise
- */
-static struct buffer_data *allocate_telemetry_buffers(struct gxp_dev *gxp,
- size_t size)
-{
- struct buffer_data *data;
- int i;
- void *buf;
- dma_addr_t daddr;
-
- size = size < PAGE_SIZE ? PAGE_SIZE : size;
-
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return NULL;
-
- /* Allocate cache-coherent buffers for logging/tracing to */
- for (i = 0; i < GXP_NUM_CORES; i++) {
- /* Allocate a coherent buffer in the default domain */
- buf = dma_alloc_coherent(gxp->dev, size, &daddr, GFP_KERNEL);
- if (!buf) {
- dev_err(gxp->dev,
- "Failed to allocate coherent buffer\n");
- goto err_alloc;
- }
- data->buffers[i] = buf;
- data->buffer_daddrs[i] = daddr;
- }
- data->size = size;
- refcount_set(&data->ref_count, 1);
- data->is_enabled = false;
-
- return data;
-
-err_alloc:
- while (i--)
- dma_free_coherent(gxp->dev, size, data->buffers[i],
- data->buffer_daddrs[i]);
- kfree(data);
-
- return NULL;
-}
-
-/**
- * free_telemetry_buffers() - Unmap and free a `struct buffer_data`
- * @gxp: The GXP device the buffers were allocated for
- * @data: The descriptor of the buffers to unmap and free
- *
- * Caller must hold the telemetry_manager's lock.
- */
-static void free_telemetry_buffers(struct gxp_dev *gxp, struct buffer_data *data)
-{
- int i;
-
- lockdep_assert_held(&gxp->telemetry_mgr->lock);
-
- for (i = 0; i < GXP_NUM_CORES; i++)
- dma_free_coherent(gxp->dev, data->size, data->buffers[i],
- data->buffer_daddrs[i]);
-
- kfree(data);
-}
-
-/**
- * remap_telemetry_buffers() - Remaps a set of telemetry buffers into a
- * user-space vm_area.
- * @gxp: The GXP device the buffers were allocated for
- * @vma: A vm area to remap the buffers into
- * @buff_data: The data describing a set of telemetry buffers to remap
- *
- * Caller must hold the telemetry_manager's lock.
- *
- * Return:
- * * 0 - Success
- * * otherwise - Error returned by `remap_pfn_range()`
- */
-static int remap_telemetry_buffers(struct gxp_dev *gxp,
- struct vm_area_struct *vma,
- struct buffer_data *buff_data)
-{
- unsigned long orig_pgoff = vma->vm_pgoff;
- int i;
- unsigned long offset;
- phys_addr_t phys;
- int ret = 0;
-
- /* mmap the buffers */
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
- vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
- vma->vm_pgoff = 0;
-
- for (i = 0; i < GXP_NUM_CORES; i++) {
- /*
- * Remap each core's buffer a page at a time, in case it is not
- * physically contiguous.
- */
- for (offset = 0; offset < buff_data->size; offset += PAGE_SIZE) {
- /*
- * `virt_to_phys()` does not work on memory allocated
- * by `dma_alloc_coherent()`, so we have to use
- * `iommu_iova_to_phys()` instead. Since all buffers
- * are mapped to the default domain as well as any per-
- * core domains, we can use it here to get the physical
- * address of any valid IOVA, regardless of its core.
- */
- phys = iommu_iova_to_phys(
- iommu_get_domain_for_dev(gxp->dev),
- buff_data->buffer_daddrs[i] + offset);
- ret = remap_pfn_range(
- vma,
- vma->vm_start + buff_data->size * i + offset,
- phys >> PAGE_SHIFT, PAGE_SIZE,
- vma->vm_page_prot);
- if (ret)
- goto out;
- }
- }
-
-out:
- vma->vm_pgoff = orig_pgoff;
- vma->vm_ops = &gxp_telemetry_vma_ops;
-
- return ret;
-}
-
-int gxp_telemetry_mmap_buffers(struct gxp_dev *gxp, u8 type,
- struct vm_area_struct *vma)
-{
- int ret = 0;
- struct telemetry_vma_data *vma_data;
- size_t total_size = vma->vm_end - vma->vm_start;
- size_t size = total_size / GXP_NUM_CORES;
- struct buffer_data *buff_data;
- int i;
-
- if (!gxp->telemetry_mgr)
- return -ENODEV;
-
- /* Total size must divide evenly into 1 page-aligned buffer per core */
- if (!total_size || total_size % (PAGE_SIZE * GXP_NUM_CORES))
- return -EINVAL;
-
- mutex_lock(&gxp->telemetry_mgr->lock);
-
- ret = check_telemetry_type_availability(gxp, type);
- if (ret)
- goto err;
-
- vma_data = kmalloc(sizeof(*vma_data), GFP_KERNEL);
- if (!vma_data) {
- ret = -ENOMEM;
- goto err;
- }
-
- buff_data = allocate_telemetry_buffers(gxp, size);
- if (!buff_data) {
- ret = -ENOMEM;
- goto err_free_vma_data;
- }
-
- ret = remap_telemetry_buffers(gxp, vma, buff_data);
- if (ret)
- goto err_free_buffers;
-
- vma_data->gxp = gxp;
- vma_data->buff_data = buff_data;
- vma_data->type = type;
- refcount_set(&vma_data->ref_count, 1);
- vma->vm_private_data = vma_data;
-
- /* Save book-keeping on the buffers in the telemetry manager */
- if (type == GXP_TELEMETRY_TYPE_LOGGING)
- gxp->telemetry_mgr->logging_buff_data = buff_data;
- else /* type == GXP_TELEMETRY_TYPE_TRACING */
- gxp->telemetry_mgr->tracing_buff_data = buff_data;
-
- mutex_unlock(&gxp->telemetry_mgr->lock);
-
- return 0;
-
-err_free_buffers:
- for (i = 0; i < GXP_NUM_CORES; i++)
- dma_free_coherent(gxp->dev, buff_data->size,
- buff_data->buffers[i],
- buff_data->buffer_daddrs[i]);
- kfree(buff_data);
-
-err_free_vma_data:
- kfree(vma_data);
-
-err:
- mutex_unlock(&gxp->telemetry_mgr->lock);
- return ret;
-}
-
-int gxp_telemetry_enable(struct gxp_dev *gxp, u8 type)
-{
- struct buffer_data *data;
- int ret = 0;
- uint core, virt_core;
- struct gxp_virtual_device *vd;
-
- /*
- * `vd_semaphore` cannot be acquired while holding the telemetry lock,
- * so acquire it here before locking the telemetry lock.
- */
- down_read(&gxp->vd_semaphore);
- mutex_lock(&gxp->telemetry_mgr->lock);
-
- switch (type) {
- case GXP_TELEMETRY_TYPE_LOGGING:
- data = gxp->telemetry_mgr->logging_buff_data;
- break;
- case GXP_TELEMETRY_TYPE_TRACING:
- data = gxp->telemetry_mgr->tracing_buff_data;
- break;
- default:
- ret = -EINVAL;
- goto out;
- }
-
- if (!data) {
- ret = -ENXIO;
- goto out;
- }
-
- /* Map the buffers for any cores already running */
- for (core = 0; core < GXP_NUM_CORES; core++) {
- vd = gxp->core_to_vd[core];
- if (vd != NULL) {
- virt_core = gxp_vd_phys_core_to_virt_core(vd, core);
- ret = gxp_dma_map_allocated_coherent_buffer(
- gxp, data->buffers[core], vd, BIT(virt_core),
- data->size, data->buffer_daddrs[core], 0);
- if (ret)
- goto err;
- }
- }
-
- /* Populate the buffer fields in firmware-data */
- data->host_status |= GXP_TELEMETRY_HOST_STATUS_ENABLED;
- gxp_fw_data_set_telemetry_descriptors(gxp, type, data->host_status,
- data->buffer_daddrs, data->size);
-
- /* Notify any running cores that firmware-data was updated */
- for (core = 0; core < GXP_NUM_CORES; core++) {
- if (gxp_is_fw_running(gxp, core))
- gxp_notification_send(gxp, core,
- CORE_NOTIF_TELEMETRY_STATUS);
- }
-
- refcount_inc(&data->ref_count);
- data->is_enabled = true;
-
- goto out;
-err:
- while (core--) {
- vd = gxp->core_to_vd[core];
- if (vd != NULL) {
- virt_core = gxp_vd_phys_core_to_virt_core(vd, core);
- gxp_dma_unmap_allocated_coherent_buffer(
- gxp, vd, BIT(virt_core), data->size,
- data->buffer_daddrs[core]);
- }
- }
-
-out:
- mutex_unlock(&gxp->telemetry_mgr->lock);
- up_read(&gxp->vd_semaphore);
-
- return ret;
-}
-
-/**
- * notify_core_and_wait_for_disable() - Notify a core that telemetry state has
- * been changed by the host and wait for
- * the core to stop using telemetry.
- * @gxp: The GXP device telemetry is changing for
- * @core: The core in @gxp to notify of the telemetry state change
- * @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
- *
- * Caller must hold `telemetry_mgr->lock`.
- * Caller must hold @gxp's virtual device lock
- *
- * Return:
- * * 0 - Firmware on @core is no longer using telemetry of @type
- * * -ENXIO - Firmware on @core is unresponsive
- */
-static int notify_core_and_wait_for_disable(struct gxp_dev *gxp, uint core,
- u8 type)
-{
- uint retries_left = 50;
-
- gxp_notification_send(gxp, core, CORE_NOTIF_TELEMETRY_STATUS);
-
- /* Wait for ACK from firmware */
- while (is_core_telemetry_enabled(gxp, core, type) &&
- gxp_is_fw_running(gxp, core) && retries_left) {
- /* Release vd_semaphore while waiting */
- up_read(&gxp->vd_semaphore);
-
- /*
- * The VD lock must be held to check if firmware is running, so
- * the wait condition is only whether the firmware data has been
- * updated to show the core disabling telemetry.
- *
- * If a core does stop running firmware while this function is
- * asleep, it will be seen at the next timeout.
- */
- wait_event_timeout(gxp->telemetry_mgr->waitq,
- !is_core_telemetry_enabled(gxp, core, type),
- msecs_to_jiffies(10));
- retries_left--;
-
- /*
- * No function may attempt to acquire the `vd_semaphore` while
- * holding the telemetry lock, so it must be released, then
- * re-acquired once the `vd_semaphore` is held.
- */
- mutex_unlock(&gxp->telemetry_mgr->lock);
- down_read(&gxp->vd_semaphore);
- mutex_lock(&gxp->telemetry_mgr->lock);
- }
-
- /*
- * If firmware has stopped running altogether, that is sufficient to be
- * considered disabled. If firmware is started on this core again, it
- * is responsible for clearing its status.
- */
- if (unlikely(is_core_telemetry_enabled(gxp, core, type) &&
- gxp_is_fw_running(gxp, core)))
- return -ENXIO;
-
- return 0;
-}
-
-/**
- * telemetry_disable_locked() - Helper function to break out the actual
- * process of disabling telemetry so that it
- * can be invoked by internal functions that are
- * already holding the telemetry lock.
- * @gxp: The GXP device to disable either logging or tracing for
- * @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
- *
- * Caller must hold `telemetry_mgr->lock`.
- * Caller must hold `gxp->vd_semaphore` for reading.
- *
- * Return:
- * * 0 - Success
- * * -EINVAL - The @type provided is not valid
- * * -ENXIO - Buffers for @type have not been created/mapped yet
- */
-static int telemetry_disable_locked(struct gxp_dev *gxp, u8 type)
-{
- struct buffer_data *data;
- int ret = 0;
- dma_addr_t null_daddrs[GXP_NUM_CORES] = {0};
- uint core, virt_core;
- struct gxp_virtual_device *vd;
-
- /* Cleanup telemetry manager's book-keeping */
- switch (type) {
- case GXP_TELEMETRY_TYPE_LOGGING:
- data = gxp->telemetry_mgr->logging_buff_data;
- break;
- case GXP_TELEMETRY_TYPE_TRACING:
- data = gxp->telemetry_mgr->tracing_buff_data;
- break;
- default:
- return -EINVAL;
- }
-
- if (!data)
- return -ENXIO;
-
- if (!(data->host_status & GXP_TELEMETRY_HOST_STATUS_ENABLED))
- return 0;
-
- data->is_enabled = false;
-
- /* Clear the log buffer fields in firmware-data */
- data->host_status &= ~GXP_TELEMETRY_HOST_STATUS_ENABLED;
- gxp_fw_data_set_telemetry_descriptors(gxp, type, data->host_status,
- null_daddrs, 0);
-
- /* Notify any running cores that firmware-data was updated */
- for (core = 0; core < GXP_NUM_CORES; core++) {
- if (gxp_is_fw_running(gxp, core)) {
- ret = notify_core_and_wait_for_disable(gxp, core, type);
- if (ret)
- dev_warn(
- gxp->dev,
- "%s: core%u failed to disable telemetry (type=%u, ret=%d)\n",
- __func__, core, type, ret);
- }
- vd = gxp->core_to_vd[core];
- if (vd != NULL) {
- virt_core = gxp_vd_phys_core_to_virt_core(vd, core);
- gxp_dma_unmap_allocated_coherent_buffer(
- gxp, vd, BIT(virt_core), data->size,
- data->buffer_daddrs[core]);
- }
- }
-
- if (refcount_dec_and_test(&data->ref_count)) {
- switch (type) {
- case GXP_TELEMETRY_TYPE_LOGGING:
- gxp->telemetry_mgr->logging_buff_data = NULL;
- break;
- case GXP_TELEMETRY_TYPE_TRACING:
- gxp->telemetry_mgr->tracing_buff_data = NULL;
- break;
- default:
- /* NO-OP, we returned above if `type` was invalid */
- break;
- }
- free_telemetry_buffers(gxp, data);
- }
-
- return 0;
-}
-
-int gxp_telemetry_disable(struct gxp_dev *gxp, u8 type)
-{
- int ret;
-
- /*
- * `vd_semaphore` cannot be acquired while holding the telemetry lock,
- * so acquire it here before locking the telemetry lock.
- */
- down_read(&gxp->vd_semaphore);
- mutex_lock(&gxp->telemetry_mgr->lock);
-
- ret = telemetry_disable_locked(gxp, type);
-
- mutex_unlock(&gxp->telemetry_mgr->lock);
- up_read(&gxp->vd_semaphore);
-
- return ret;
-}
-
-int gxp_telemetry_register_eventfd(struct gxp_dev *gxp, u8 type, int fd)
-{
- struct eventfd_ctx *new_ctx;
- struct eventfd_ctx **ctx_to_set = NULL;
- int ret = 0;
-
- new_ctx = eventfd_ctx_fdget(fd);
- if (IS_ERR(new_ctx))
- return PTR_ERR(new_ctx);
-
- mutex_lock(&gxp->telemetry_mgr->lock);
-
- switch (type) {
- case GXP_TELEMETRY_TYPE_LOGGING:
- ctx_to_set = &gxp->telemetry_mgr->logging_efd;
- break;
- case GXP_TELEMETRY_TYPE_TRACING:
- ctx_to_set = &gxp->telemetry_mgr->tracing_efd;
- break;
- default:
- ret = -EINVAL;
- goto out;
- }
-
- if (*ctx_to_set) {
- dev_warn(gxp->dev,
- "Replacing existing telemetry eventfd (type=%u)\n",
- type);
- eventfd_ctx_put(*ctx_to_set);
- }
-
- *ctx_to_set = new_ctx;
-
-out:
- mutex_unlock(&gxp->telemetry_mgr->lock);
- return ret;
-}
-
-int gxp_telemetry_unregister_eventfd(struct gxp_dev *gxp, u8 type)
-{
- int ret = 0;
-
- mutex_lock(&gxp->telemetry_mgr->lock);
-
- switch (type) {
- case GXP_TELEMETRY_TYPE_LOGGING:
- if (gxp->telemetry_mgr->logging_efd)
- eventfd_ctx_put(gxp->telemetry_mgr->logging_efd);
- gxp->telemetry_mgr->logging_efd = NULL;
- break;
- case GXP_TELEMETRY_TYPE_TRACING:
- if (gxp->telemetry_mgr->tracing_efd)
- eventfd_ctx_put(gxp->telemetry_mgr->tracing_efd);
- gxp->telemetry_mgr->tracing_efd = NULL;
- break;
- default:
- ret = -EINVAL;
- }
-
- mutex_unlock(&gxp->telemetry_mgr->lock);
-
- return ret;
-}
-
-struct work_struct *gxp_telemetry_get_notification_handler(struct gxp_dev *gxp,
- uint core)
-{
- struct gxp_telemetry_manager *mgr = gxp->telemetry_mgr;
-
- if (!mgr || core >= GXP_NUM_CORES)
- return NULL;
-
- return &mgr->notification_works[core].work;
-}
diff --git a/gxp-telemetry.h b/gxp-telemetry.h
deleted file mode 100644
index d2e63de..0000000
--- a/gxp-telemetry.h
+++ /dev/null
@@ -1,135 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * GXP telemetry support
- *
- * Copyright (C) 2021 Google LLC
- */
-#ifndef __GXP_TELEMETRY_H__
-#define __GXP_TELEMETRY_H__
-
-#include <linux/eventfd.h>
-#include <linux/refcount.h>
-#include <linux/types.h>
-
-#include "gxp-internal.h"
-#include "gxp.h"
-
-struct gxp_telemetry_work {
- struct work_struct work;
- struct gxp_dev *gxp;
- uint core;
-};
-
-struct gxp_telemetry_manager {
- struct buffer_data {
- u32 host_status;
- void *buffers[GXP_NUM_CORES];
- dma_addr_t buffer_daddrs[GXP_NUM_CORES];
- u32 size;
- refcount_t ref_count;
- bool is_enabled;
- } *logging_buff_data, *tracing_buff_data;
- /* Protects logging_buff_data and tracing_buff_data */
- struct mutex lock;
- struct gxp_telemetry_work notification_works[GXP_NUM_CORES];
- wait_queue_head_t waitq;
- struct eventfd_ctx *logging_efd;
- struct eventfd_ctx *tracing_efd;
-};
-
-/**
- * gxp_telemetry_init() - Initialize telemetry support
- * @gxp: The GXP device to initialize telemetry support for
- *
- * Return:
- * * 0 - Success
- * * -ENOMEM - Insufficient memory is available to initialize support
- */
-int gxp_telemetry_init(struct gxp_dev *gxp);
-
-/**
- * gxp_telemetry_mmap_buffers() - Allocate a telemetry buffer for each core and
- * map them to their core and the user-space vma
- * @gxp: The GXP device to create the buffers for
- * @type: EIther `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
- * @vma: The vma from user-space which all cores' buffers will be mapped into
- *
- * Return:
- * * 0 - Success
- * * -ENODEV - Telemetry support has not been initialized. Must explicitly
- * check this, since this function is called based on user-input.
- * * -EBUSY - The requested telemetry @type is already in use
- * * -EINVAL - Either the vma size is not aligned or @type is not valid
- * * -ENOMEM - Insufficient memory is available to allocate and map the buffers
- */
-int gxp_telemetry_mmap_buffers(struct gxp_dev *gxp, u8 type,
- struct vm_area_struct *vma);
-
-/**
- * gxp_telemetry_enable() - Enable logging or tracing for all DSP cores
- * @gxp: The GXP device to enable either logging or tracing for
- * @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
- *
- * Return:
- * * 0 - Success
- * * -EINVAL - The @type provided is not valid
- * * -ENXIO - Buffers for @type have not been created/mapped yet
- */
-int gxp_telemetry_enable(struct gxp_dev *gxp, u8 type);
-
-/**
- * gxp_telemetry_disable() - Disable logging or tracing for all DSP cores
- * @gxp: The GXP device to disable either logging or tracing for
- * @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
- *
- * Return:
- * * 0 - Success
- * * -EINVAL - The @type provided is not valid
- * * -ENXIO - Buffers for @type have not been created/mapped yet
- */
-int gxp_telemetry_disable(struct gxp_dev *gxp, u8 type);
-
-/**
- * gxp_telemetry_register_eventfd() - Register an eventfd to be signaled when
- * telemetry notifications arrive while the
- * specified @type of telemetry is enabled
- * @gxp: The GXP device to register the eventfd for
- * @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
- * @fd: A file descriptor for an eventfd from user-space
- *
- * If another eventfd has already been registered for the given @type, the old
- * eventfd will be unregistered and replaced.
- *
- * Return:
- * * 0 - Success
- * * -EBADF - @fd is not a valid file descriptor (via `eventfd_ctx_fdget()`)
- * * -EINVAL - Invalid @type or @fd is not an eventfd
- */
-int gxp_telemetry_register_eventfd(struct gxp_dev *gxp, u8 type, int fd);
-
-/**
- * gxp_telemetry_unregister_eventfd() - Unregister and release a reference to
- * a previously registered eventfd
- * @gxp: The GXP device to unregister the eventfd for
- * @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
- *
- * Return:
- * * 0 - Success
- * * -EINVAL - The @type provided is not valid
- */
-int gxp_telemetry_unregister_eventfd(struct gxp_dev *gxp, u8 type);
-
-/**
- * gxp_telemetry_get_notification_handler() - Get the notification handler work
- * for the specified core
- * @gxp: The GXP device to obtain the handler for
- * @core: The physical core number to obtain the handler
- *
- * Return: A pointer to the work_struct for the @core's notification handler if
- * successful. NULL if telemetry has not been initialized or @core is
- * invalid.
- */
-struct work_struct *gxp_telemetry_get_notification_handler(struct gxp_dev *gxp,
- uint core);
-
-#endif /* __GXP_TELEMETRY_H__ */
diff --git a/gxp-thermal.c b/gxp-thermal.c
index ae6049d..812f466 100644
--- a/gxp-thermal.c
+++ b/gxp-thermal.c
@@ -18,23 +18,38 @@
#include <linux/thermal.h>
#include <linux/version.h>
+/*
+ * thermal_cdev_update is moved to drivers/thermal/thermal_core.h in kernel
+ * 5.12. The symbol is still exported, manually declare the function prototype
+ * to get rid of the implicit declaration compilation error.
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 12, 0)
+void thermal_cdev_update(struct thermal_cooling_device *cdev);
+#endif
+
#include "gxp-internal.h"
#include "gxp-pm.h"
#include "gxp-thermal.h"
#include "gxp-lpm.h"
+#if GXP_HAS_MCU
+#include "gxp-kci.h"
+#include "gxp-mcu.h"
+#include "gxp-wakelock.h"
+#endif /* GXP_HAS_MCU */
+
/*
* Value comes from internal measurement
* b/229623553
*/
static struct gxp_state_pwr state_pwr_map[] = {
- {1155000, 78},
- {975000, 58},
- {750000, 40},
- {560000, 27},
- {373000, 20},
- {268000, 16},
- {178000, 13},
+ { AUR_NOM_RATE, 78 },
+ { AUR_UD_PLUS_RATE, 58 },
+ { AUR_UD_RATE, 40 },
+ { AUR_SUD_PLUS_RATE, 27 },
+ { AUR_SUD_RATE, 20 },
+ { AUR_UUD_PLUS_RATE, 16 },
+ { AUR_UUD_RATE, 13 },
};
static int gxp_get_max_state(struct thermal_cooling_device *cdev,
@@ -57,7 +72,8 @@ static int gxp_set_cur_state(struct thermal_cooling_device *cdev,
{
int ret = 0;
struct gxp_thermal_manager *thermal = cdev->devdata;
- struct device *dev = thermal->gxp->dev;
+ struct gxp_dev *gxp = thermal->gxp;
+ struct device *dev = gxp->dev;
unsigned long pwr_state;
if (cooling_state >= thermal->gxp_num_states) {
@@ -74,17 +90,35 @@ static int gxp_set_cur_state(struct thermal_cooling_device *cdev,
goto out;
}
pwr_state = state_pwr_map[cooling_state].state;
- dev_dbg(dev, "setting policy %ld\n", pwr_state);
+ dev_dbg(dev, "setting state %ld\n", pwr_state);
if (cooling_state != thermal->cooling_state) {
-#ifdef CONFIG_GXP_CLOUDRIPPER
- ret = exynos_acpm_set_policy(AUR_DVFS_DOMAIN,
- pwr_state < aur_power_state2rate[AUR_UUD] ?
- aur_power_state2rate[AUR_UUD] :
- pwr_state);
-#endif
+ if (!gxp_is_direct_mode(gxp)) {
+#if GXP_HAS_MCU
+ struct gxp_mcu *mcu = gxp_mcu_of(gxp);
+
+ ret = gxp_wakelock_acquire_if_powered(mcu->gxp);
+ if (ret) {
+ dev_err(dev,
+ "Can't acquire wakelock when powered down: %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = gxp_kci_notify_throttling(&mcu->kci, pwr_state);
+ gxp_wakelock_release(gxp);
+#endif /* GXP_HAS_MCU */
+ } else {
+ ret = gxp_pm_blk_set_rate_acpm(
+ gxp,
+ max(pwr_state,
+ (unsigned long)
+ aur_power_state2rate[AUR_UUD]));
+ }
+
if (ret) {
- dev_err(dev,
- "error setting gxp cooling policy: %d\n", ret);
+ dev_err(dev, "error setting gxp cooling state: %d\n",
+ ret);
+ ret = -ENODEV;
goto out;
}
thermal->cooling_state = cooling_state;
diff --git a/gxp-vd.c b/gxp-vd.c
index ae07455..3bd01cd 100644
--- a/gxp-vd.c
+++ b/gxp-vd.c
@@ -6,8 +6,14 @@
*/
#include <linux/bitops.h>
+#include <linux/idr.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <gcip/gcip-alloc-helper.h>
+
+#include "gxp-config.h"
+#include "gxp-core-telemetry.h"
#include "gxp-debug-dump.h"
#include "gxp-dma.h"
#include "gxp-domain-pool.h"
@@ -19,48 +25,175 @@
#include "gxp-mailbox.h"
#include "gxp-notification.h"
#include "gxp-pm.h"
-#include "gxp-telemetry.h"
#include "gxp-vd.h"
#include "gxp-wakelock.h"
static inline void hold_core_in_reset(struct gxp_dev *gxp, uint core)
{
- gxp_write_32_core(gxp, core, GXP_REG_ETM_PWRCTL,
- 1 << GXP_REG_ETM_PWRCTL_CORE_RESET_SHIFT);
+ gxp_write_32(gxp, GXP_CORE_REG_ETM_PWRCTL(core),
+ BIT(GXP_REG_ETM_PWRCTL_CORE_RESET_SHIFT));
}
-int gxp_vd_init(struct gxp_dev *gxp)
+void gxp_vd_init(struct gxp_dev *gxp)
{
uint core;
- int ret;
init_rwsem(&gxp->vd_semaphore);
/* All cores start as free */
for (core = 0; core < GXP_NUM_CORES; core++)
gxp->core_to_vd[core] = NULL;
+}
+
+void gxp_vd_destroy(struct gxp_dev *gxp)
+{
+ /* NO-OP for now. */
+}
- ret = gxp_fw_init(gxp);
+static int map_core_telemetry_buffers(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd,
+ uint core_list)
+{
+ struct buffer_data *data[2];
+ int i, core, ret;
+ if (!gxp->core_telemetry_mgr)
+ return 0;
+
+ mutex_lock(&gxp->core_telemetry_mgr->lock);
+ data[0] = gxp->core_telemetry_mgr->logging_buff_data;
+ data[1] = gxp->core_telemetry_mgr->tracing_buff_data;
+
+ for (i = 0; i < ARRAY_SIZE(data); i++) {
+ if (!data[i] || !data[i]->is_enabled)
+ continue;
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ if (!(BIT(core) & core_list))
+ continue;
+ ret = gxp_dma_map_allocated_coherent_buffer(
+ gxp, &data[i]->buffers[core], vd->domain, 0);
+ if (ret) {
+ dev_err(gxp->dev,
+ "Mapping core telemetry buffer to core %d failed",
+ core);
+ goto error;
+ }
+ }
+ }
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
+ return 0;
+error:
+ while (core--) {
+ if (!(BIT(core) & core_list))
+ continue;
+ gxp_dma_unmap_allocated_coherent_buffer(
+ gxp, vd->domain, &data[i]->buffers[core]);
+ }
+ while (i--) {
+ if (!data[i] || !data[i]->is_enabled)
+ continue;
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ if (!(BIT(core) & core_list))
+ continue;
+ gxp_dma_unmap_allocated_coherent_buffer(
+ gxp, vd->domain, &data[i]->buffers[core]);
+ }
+ }
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
return ret;
}
-void gxp_vd_destroy(struct gxp_dev *gxp)
+static void unmap_core_telemetry_buffers(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd,
+ uint core_list)
+{
+ struct buffer_data *data[2];
+ int i, core;
+
+ if (!gxp->core_telemetry_mgr)
+ return;
+ mutex_lock(&gxp->core_telemetry_mgr->lock);
+ data[0] = gxp->core_telemetry_mgr->logging_buff_data;
+ data[1] = gxp->core_telemetry_mgr->tracing_buff_data;
+
+ for (i = 0; i < ARRAY_SIZE(data); i++) {
+ if (!data[i] || !data[i]->is_enabled)
+ continue;
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ if (!(BIT(core) & core_list))
+ continue;
+ gxp_dma_unmap_allocated_coherent_buffer(
+ gxp, vd->domain, &data[i]->buffers[core]);
+ }
+ }
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
+}
+
+static int map_debug_dump_buffer(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd)
{
- down_write(&gxp->vd_semaphore);
+ if (!gxp->debug_dump_mgr)
+ return 0;
- gxp_fw_destroy(gxp);
+ return gxp_dma_map_allocated_coherent_buffer(
+ gxp, &gxp->debug_dump_mgr->buf, vd->domain, 0);
+}
+
+static void unmap_debug_dump_buffer(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd)
+{
+ if (!gxp->debug_dump_mgr)
+ return;
- up_write(&gxp->vd_semaphore);
+ gxp_dma_unmap_allocated_coherent_buffer(gxp, vd->domain,
+ &gxp->debug_dump_mgr->buf);
+}
+
+static int assign_cores(struct gxp_virtual_device *vd)
+{
+ struct gxp_dev *gxp = vd->gxp;
+ uint core;
+ uint available_cores = 0;
+
+ vd->core_list = 0;
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ if (gxp->core_to_vd[core] == NULL) {
+ if (available_cores < vd->num_cores)
+ vd->core_list |= BIT(core);
+ available_cores++;
+ }
+ }
+ if (available_cores < vd->num_cores) {
+ dev_err(gxp->dev, "Insufficient available cores. Available: %u. Requested: %u\n",
+ available_cores, vd->num_cores);
+ return -EBUSY;
+ }
+ for (core = 0; core < GXP_NUM_CORES; core++)
+ if (vd->core_list & BIT(core))
+ gxp->core_to_vd[core] = vd;
+ return 0;
+}
+
+static void unassign_cores(struct gxp_virtual_device *vd)
+{
+ struct gxp_dev *gxp = vd->gxp;
+ uint core;
+
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ if (gxp->core_to_vd[core] == vd)
+ gxp->core_to_vd[core] = NULL;
+ }
}
struct gxp_virtual_device *gxp_vd_allocate(struct gxp_dev *gxp,
u16 requested_cores)
{
struct gxp_virtual_device *vd;
+ unsigned int size;
int i;
int err;
+ lockdep_assert_held_write(&gxp->vd_semaphore);
/* Assumes 0 < requested_cores <= GXP_NUM_CORES */
if (requested_cores == 0 || requested_cores > GXP_NUM_CORES)
return ERR_PTR(-EINVAL);
@@ -72,30 +205,48 @@ struct gxp_virtual_device *gxp_vd_allocate(struct gxp_dev *gxp,
vd->gxp = gxp;
vd->num_cores = requested_cores;
vd->state = GXP_VD_OFF;
-
- vd->core_domains =
- kcalloc(requested_cores, sizeof(*vd->core_domains), GFP_KERNEL);
- if (!vd->core_domains) {
- err = -ENOMEM;
+ vd->slice_index = -1;
+ vd->client_id = -1;
+ vd->tpu_client_id = -1;
+ spin_lock_init(&vd->credit_lock);
+ vd->credit = GXP_COMMAND_CREDIT_PER_VD;
+ vd->first_open = true;
+
+ vd->domain = gxp_domain_pool_alloc(gxp->domain_pool);
+ if (!vd->domain) {
+ err = -EBUSY;
goto error_free_vd;
}
- for (i = 0; i < requested_cores; i++) {
- vd->core_domains[i] = gxp_domain_pool_alloc(gxp->domain_pool);
- if (!vd->core_domains[i]) {
- err = -EBUSY;
- goto error_free_domains;
+
+ if (gxp->num_shared_slices) {
+ vd->slice_index =
+ ida_alloc_max(&gxp->shared_slice_idp,
+ gxp->num_shared_slices - 1, GFP_KERNEL);
+ if (vd->slice_index < 0) {
+ err = vd->slice_index;
+ goto error_free_domain;
}
}
+ size = GXP_NUM_CORES * PRIVATE_FW_DATA_SIZE;
+ vd->fwdata_sgt = gcip_alloc_noncontiguous(gxp->dev, size, GFP_KERNEL);
+ if (!vd->fwdata_sgt) {
+ dev_err(gxp->dev, "allocate firmware data size=%x failed",
+ size);
+ err = -ENOMEM;
+ goto error_free_slice_index;
+ }
+
vd->mailbox_resp_queues = kcalloc(
vd->num_cores, sizeof(*vd->mailbox_resp_queues), GFP_KERNEL);
if (!vd->mailbox_resp_queues) {
err = -ENOMEM;
- goto error_free_domains;
+ goto error_free_fwdata;
}
for (i = 0; i < vd->num_cores; i++) {
- INIT_LIST_HEAD(&vd->mailbox_resp_queues[i].queue);
+ INIT_LIST_HEAD(&vd->mailbox_resp_queues[i].wait_queue);
+ INIT_LIST_HEAD(&vd->mailbox_resp_queues[i].dest_queue);
spin_lock_init(&vd->mailbox_resp_queues[i].lock);
init_waitqueue_head(&vd->mailbox_resp_queues[i].waitq);
}
@@ -103,12 +254,53 @@ struct gxp_virtual_device *gxp_vd_allocate(struct gxp_dev *gxp,
vd->mappings_root = RB_ROOT;
init_rwsem(&vd->mappings_semaphore);
+ err = assign_cores(vd);
+ if (err)
+ goto error_free_resp_queues;
+
+ if (gxp->data_mgr) {
+ vd->fw_app = gxp_fw_data_create_app(gxp, vd->core_list);
+ if (IS_ERR(vd->fw_app)) {
+ err = PTR_ERR(vd->fw_app);
+ goto error_unassign_cores;
+ }
+ }
+ err = gxp_dma_map_core_resources(gxp, vd->domain, vd->core_list,
+ vd->slice_index);
+ if (err)
+ goto error_destroy_fw_data;
+ err = gxp_dma_map_iova_sgt(gxp, vd->domain, GXP_IOVA_PRIV_FW_DATA,
+ vd->fwdata_sgt, IOMMU_READ | IOMMU_WRITE);
+ if (err)
+ goto error_unmap_core_resources;
+ err = map_core_telemetry_buffers(gxp, vd, vd->core_list);
+ if (err)
+ goto error_unmap_fw_data;
+ err = map_debug_dump_buffer(gxp, vd);
+ if (err)
+ goto error_unmap_core_telemetry_buffer;
+
return vd;
-error_free_domains:
- for (i -= 1; i >= 0; i--)
- gxp_domain_pool_free(gxp->domain_pool, vd->core_domains[i]);
- kfree(vd->core_domains);
+error_unmap_core_telemetry_buffer:
+ unmap_core_telemetry_buffers(gxp, vd, vd->core_list);
+error_unmap_fw_data:
+ gxp_dma_unmap_iova_sgt(gxp, vd->domain, GXP_IOVA_PRIV_FW_DATA, vd->fwdata_sgt);
+error_unmap_core_resources:
+ gxp_dma_unmap_core_resources(gxp, vd->domain, vd->core_list);
+error_destroy_fw_data:
+ gxp_fw_data_destroy_app(gxp, vd->fw_app);
+error_unassign_cores:
+ unassign_cores(vd);
+error_free_resp_queues:
+ kfree(vd->mailbox_resp_queues);
+error_free_fwdata:
+ gcip_free_noncontiguous(vd->fwdata_sgt);
+error_free_slice_index:
+ if (vd->slice_index >= 0)
+ ida_free(&gxp->shared_slice_idp, vd->slice_index);
+error_free_domain:
+ gxp_domain_pool_free(gxp->domain_pool, vd->domain);
error_free_vd:
kfree(vd);
@@ -117,28 +309,32 @@ error_free_vd:
void gxp_vd_release(struct gxp_virtual_device *vd)
{
- struct gxp_async_response *cur, *nxt;
- int i;
- unsigned long flags;
struct rb_node *node;
struct gxp_mapping *mapping;
+ struct gxp_dev *gxp = vd->gxp;
+ uint core_list = vd->core_list;
- /* Cleanup any unconsumed responses */
- for (i = 0; i < vd->num_cores; i++) {
- /*
- * Since VD is releasing, it is not necessary to lock here.
- * Do it anyway for consistency.
- */
- spin_lock_irqsave(&vd->mailbox_resp_queues[i].lock, flags);
- list_for_each_entry_safe(cur, nxt,
- &vd->mailbox_resp_queues[i].queue,
- list_entry) {
- list_del(&cur->list_entry);
- kfree(cur);
- }
- spin_unlock_irqrestore(&vd->mailbox_resp_queues[i].lock, flags);
+ lockdep_assert_held_write(&gxp->vd_semaphore);
+
+ if (vd->is_secure) {
+ mutex_lock(&gxp->secure_vd_lock);
+ gxp->secure_vd = NULL;
+ mutex_unlock(&gxp->secure_vd_lock);
+ }
+
+ unassign_cores(vd);
+ unmap_debug_dump_buffer(gxp, vd);
+ unmap_core_telemetry_buffers(gxp, vd, core_list);
+ gxp_dma_unmap_iova_sgt(gxp, vd->domain, GXP_IOVA_PRIV_FW_DATA, vd->fwdata_sgt);
+ gxp_dma_unmap_core_resources(gxp, vd->domain, core_list);
+
+ if (!IS_ERR_OR_NULL(vd->fw_app)) {
+ gxp_fw_data_destroy_app(gxp, vd->fw_app);
+ vd->fw_app = NULL;
}
+ vd->gxp->mailbox_mgr->release_unconsumed_async_resps(vd);
+
/*
* Release any un-mapped mappings
* Once again, it's not necessary to lock the mappings_semaphore here
@@ -152,233 +348,90 @@ void gxp_vd_release(struct gxp_virtual_device *vd)
}
up_write(&vd->mappings_semaphore);
- for (i = 0; i < vd->num_cores; i++)
- gxp_domain_pool_free(vd->gxp->domain_pool, vd->core_domains[i]);
- kfree(vd->core_domains);
kfree(vd->mailbox_resp_queues);
+ gcip_free_noncontiguous(vd->fwdata_sgt);
+ if (vd->slice_index >= 0)
+ ida_free(&vd->gxp->shared_slice_idp, vd->slice_index);
+ gxp_domain_pool_free(vd->gxp->domain_pool, vd->domain);
kfree(vd);
}
-static int map_telemetry_buffers(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd, uint virt_core,
- uint core)
+int gxp_vd_block_ready(struct gxp_virtual_device *vd)
{
- int ret = 0;
- struct buffer_data *buff_data;
-
- mutex_lock(&gxp->telemetry_mgr->lock);
-
- /* Map logging buffers if logging is enabled */
- buff_data = gxp->telemetry_mgr->logging_buff_data;
- if (buff_data && buff_data->is_enabled) {
- ret = gxp_dma_map_allocated_coherent_buffer(
- gxp, buff_data->buffers[core], vd, BIT(virt_core),
- buff_data->size, buff_data->buffer_daddrs[core], 0);
- /* Don't bother checking tracing if logging fails */
- if (ret)
- goto out;
- }
+ struct gxp_dev *gxp = vd->gxp;
+ int ret;
- /* Map tracing buffers if tracing is enabled */
- buff_data = gxp->telemetry_mgr->tracing_buff_data;
- if (buff_data && buff_data->is_enabled) {
- ret = gxp_dma_map_allocated_coherent_buffer(
- gxp, buff_data->buffers[core], vd, BIT(virt_core),
- buff_data->size, buff_data->buffer_daddrs[core], 0);
- /* If tracing fails, unmap logging if it was enabled */
+ if (vd->state == GXP_VD_SUSPENDED)
+ return 0;
+ if (vd->state != GXP_VD_OFF)
+ return -EINVAL;
+ ret = gxp_dma_domain_attach_device(gxp, vd->domain, vd->core_list);
+ if (ret)
+ return ret;
+ vd->state = GXP_VD_READY;
+ if (gxp->after_vd_block_ready) {
+ ret = gxp->after_vd_block_ready(gxp, vd);
if (ret) {
- buff_data = gxp->telemetry_mgr->logging_buff_data;
- if (buff_data && buff_data->is_enabled)
- gxp_dma_unmap_allocated_coherent_buffer(
- gxp, vd, BIT(virt_core),
- buff_data->size,
- buff_data->buffer_daddrs[core]);
+ gxp_dma_domain_detach_device(gxp, vd->domain);
+ return ret;
}
}
-
-out:
- mutex_unlock(&gxp->telemetry_mgr->lock);
-
- return ret;
-}
-
-static void unmap_telemetry_buffers(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd,
- uint virt_core, uint core)
-{
- struct buffer_data *buff_data;
-
- mutex_lock(&gxp->telemetry_mgr->lock);
-
- buff_data = gxp->telemetry_mgr->logging_buff_data;
- if (buff_data && buff_data->is_enabled)
- gxp_dma_unmap_allocated_coherent_buffer(
- gxp, vd, BIT(virt_core), buff_data->size,
- buff_data->buffer_daddrs[core]);
-
- buff_data = gxp->telemetry_mgr->tracing_buff_data;
- if (buff_data && buff_data->is_enabled)
- gxp_dma_unmap_allocated_coherent_buffer(
- gxp, vd, BIT(virt_core), buff_data->size,
- buff_data->buffer_daddrs[core]);
-
- mutex_unlock(&gxp->telemetry_mgr->lock);
-}
-
-static int map_debug_dump_buffer(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd, uint virt_core,
- uint core)
-{
- /* If debug-dump is not enabled, nothing to map */
- if (!gxp->debug_dump_mgr)
- return 0;
-
- return gxp_dma_map_allocated_coherent_buffer(
- gxp, gxp->debug_dump_mgr->buf.vaddr, vd, BIT(virt_core),
- gxp->debug_dump_mgr->buf.size, gxp->debug_dump_mgr->buf.daddr,
- 0);
+ return 0;
}
-static void unmap_debug_dump_buffer(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd,
- uint virt_core, uint core)
+void gxp_vd_block_unready(struct gxp_virtual_device *vd)
{
- if (!gxp->debug_dump_mgr)
- return;
+ struct gxp_dev *gxp = vd->gxp;
- gxp_dma_unmap_allocated_coherent_buffer(
- gxp, vd, BIT(virt_core), gxp->debug_dump_mgr->buf.size,
- gxp->debug_dump_mgr->buf.daddr);
+ if (gxp->before_vd_block_unready)
+ gxp->before_vd_block_unready(gxp, vd);
}
-/* Caller must hold gxp->vd_semaphore for writing */
-int gxp_vd_start(struct gxp_virtual_device *vd)
+int gxp_vd_run(struct gxp_virtual_device *vd)
{
struct gxp_dev *gxp = vd->gxp;
- uint core;
- uint available_cores = 0;
- uint cores_remaining = vd->num_cores;
- uint core_list = 0;
- uint virt_core = 0;
int ret = 0;
- for (core = 0; core < GXP_NUM_CORES; core++) {
- if (gxp->core_to_vd[core] == NULL) {
- if (available_cores < vd->num_cores)
- core_list |= BIT(core);
- available_cores++;
- }
- }
-
- if (available_cores < vd->num_cores) {
- dev_err(gxp->dev, "Insufficient available cores. Available: %u. Requested: %u\n",
- available_cores, vd->num_cores);
- return -EBUSY;
- }
-
- vd->fw_app = gxp_fw_data_create_app(gxp, core_list);
-
- for (core = 0; core < GXP_NUM_CORES; core++) {
- if (cores_remaining == 0)
- break;
-
- if (core_list & BIT(core)) {
- gxp->core_to_vd[core] = vd;
- cores_remaining--;
- ret = gxp_dma_domain_attach_device(gxp, vd, virt_core,
- core);
- if (ret)
- goto err_clean_all_cores;
- ret = gxp_dma_map_core_resources(gxp, vd, virt_core,
- core);
- if (ret)
- goto err_detach_domain;
- ret = map_telemetry_buffers(gxp, vd, virt_core, core);
- if (ret)
- goto err_unmap_res;
- ret = map_debug_dump_buffer(gxp, vd, virt_core, core);
- if (ret)
- goto err_unmap_telem;
- virt_core++;
- }
- }
-
- ret = gxp_firmware_run(gxp, vd, core_list);
+ lockdep_assert_held(&gxp->vd_semaphore);
+ if (vd->state != GXP_VD_READY && vd->state != GXP_VD_OFF)
+ return -EINVAL;
+ if (vd->state == GXP_VD_OFF)
+ gxp_vd_block_ready(vd);
+ ret = gxp_firmware_run(gxp, vd, vd->core_list);
if (ret)
- goto err_clean_all_cores;
-
- vd->state = GXP_VD_RUNNING;
- return ret;
-
-err_unmap_telem:
- unmap_telemetry_buffers(gxp, vd, virt_core, core);
-err_unmap_res:
- gxp_dma_unmap_core_resources(gxp, vd, virt_core, core);
-err_detach_domain:
- gxp_dma_domain_detach_device(gxp, vd, virt_core);
-err_clean_all_cores:
- gxp->core_to_vd[core] = NULL;
- virt_core--;
- while (core > 0) {
- core--;
- if (core_list & BIT(core)) {
- unmap_debug_dump_buffer(gxp, vd, virt_core, core);
- unmap_telemetry_buffers(gxp, vd, virt_core, core);
- gxp_dma_unmap_core_resources(gxp, vd, virt_core, core);
- gxp_dma_domain_detach_device(gxp, vd, virt_core);
- gxp->core_to_vd[core] = NULL;
- virt_core--;
- }
- }
- gxp_fw_data_destroy_app(gxp, vd->fw_app);
-
+ vd->state = GXP_VD_UNAVAILABLE;
+ else
+ vd->state = GXP_VD_RUNNING;
return ret;
}
-/* Caller must hold gxp->vd_semaphore for writing */
+/* Caller must hold gxp->vd_semaphore */
void gxp_vd_stop(struct gxp_virtual_device *vd)
{
struct gxp_dev *gxp = vd->gxp;
- uint core, core_list = 0;
- uint virt_core = 0;
+ uint core;
uint lpm_state;
- if ((vd->state == GXP_VD_OFF || vd->state == GXP_VD_RUNNING) &&
+ lockdep_assert_held(&gxp->vd_semaphore);
+ if ((vd->state == GXP_VD_OFF || vd->state == GXP_VD_READY ||
+ vd->state == GXP_VD_RUNNING) &&
gxp_pm_get_blk_state(gxp) != AUR_OFF) {
/*
* Put all cores in the VD into reset so they can not wake each other up
*/
for (core = 0; core < GXP_NUM_CORES; core++) {
if (gxp->core_to_vd[core] == vd) {
- lpm_state = gxp_lpm_get_state(gxp, core);
+ lpm_state = gxp_lpm_get_state(gxp, CORE_TO_PSM(core));
if (lpm_state != LPM_PG_STATE)
hold_core_in_reset(gxp, core);
}
}
}
- for (core = 0; core < GXP_NUM_CORES; core++)
- if (gxp->core_to_vd[core] == vd)
- core_list |= BIT(core);
-
- gxp_firmware_stop(gxp, vd, core_list);
-
- for (core = 0; core < GXP_NUM_CORES; core++) {
- if (gxp->core_to_vd[core] == vd) {
- unmap_debug_dump_buffer(gxp, vd, virt_core, core);
- unmap_telemetry_buffers(gxp, vd, virt_core, core);
- gxp_dma_unmap_core_resources(gxp, vd, virt_core, core);
- if (vd->state == GXP_VD_RUNNING)
- gxp_dma_domain_detach_device(gxp, vd, virt_core);
- gxp->core_to_vd[core] = NULL;
- virt_core++;
- }
- }
-
- if (!IS_ERR_OR_NULL(vd->fw_app)) {
- gxp_fw_data_destroy_app(gxp, vd->fw_app);
- vd->fw_app = NULL;
- }
+ gxp_firmware_stop(gxp, vd, vd->core_list);
+ if (vd->state == GXP_VD_READY || vd->state == GXP_VD_RUNNING)
+ gxp_dma_domain_detach_device(gxp, vd->domain);
+ vd->state = GXP_VD_OFF;
}
/*
@@ -390,8 +443,11 @@ void gxp_vd_suspend(struct gxp_virtual_device *vd)
struct gxp_dev *gxp = vd->gxp;
u32 boot_state;
uint failed_cores = 0;
- uint virt_core;
+#ifdef DISABLE_VD_SUSPEND_RESUME_SUPPORT
+ if (!gxp_is_direct_mode(gxp))
+ return gxp_vd_stop(vd);
+#endif
lockdep_assert_held_write(&gxp->vd_semaphore);
dev_info(gxp->dev, "Suspending VD ...\n");
if (vd->state == GXP_VD_SUSPENDED) {
@@ -406,7 +462,7 @@ void gxp_vd_suspend(struct gxp_virtual_device *vd)
*/
for (core = 0; core < GXP_NUM_CORES; core++) {
if (gxp->core_to_vd[core] == vd) {
- if (!gxp_lpm_wait_state_ne(gxp, core, LPM_ACTIVE_STATE)) {
+ if (!gxp_lpm_wait_state_ne(gxp, CORE_TO_PSM(core), LPM_ACTIVE_STATE)) {
vd->state = GXP_VD_UNAVAILABLE;
failed_cores |= BIT(core);
hold_core_in_reset(gxp, core);
@@ -424,12 +480,11 @@ void gxp_vd_suspend(struct gxp_virtual_device *vd)
CORE_NOTIF_SUSPEND_REQUEST);
}
}
- virt_core = 0;
/* Wait for all cores to complete core suspension. */
for (core = 0; core < GXP_NUM_CORES; core++) {
if (gxp->core_to_vd[core] == vd) {
if (!(failed_cores & BIT(core))) {
- if (!gxp_lpm_wait_state_eq(gxp, core,
+ if (!gxp_lpm_wait_state_eq(gxp, CORE_TO_PSM(core),
LPM_PG_STATE)) {
boot_state = gxp_firmware_get_boot_mode(
gxp, core);
@@ -444,14 +499,13 @@ void gxp_vd_suspend(struct gxp_virtual_device *vd)
}
} else {
/* Re-set PS1 as the default low power state. */
- gxp_lpm_enable_state(gxp, core,
+ gxp_lpm_enable_state(gxp, CORE_TO_PSM(core),
LPM_CG_STATE);
}
}
- gxp_dma_domain_detach_device(gxp, vd, virt_core);
- virt_core++;
}
}
+ gxp_dma_domain_detach_device(gxp, vd->domain);
if (vd->state == GXP_VD_UNAVAILABLE) {
/* shutdown all cores if virtual device is unavailable */
for (core = 0; core < GXP_NUM_CORES; core++)
@@ -472,7 +526,7 @@ int gxp_vd_resume(struct gxp_virtual_device *vd)
{
int ret = 0;
uint core;
- uint virt_core = 0;
+ uint core_list = 0;
uint timeout;
u32 boot_state;
struct gxp_dev *gxp = vd->gxp;
@@ -488,13 +542,18 @@ int gxp_vd_resume(struct gxp_virtual_device *vd)
}
gxp_pm_force_clkmux_normal(gxp);
curr_blk_switch_count = gxp_pm_get_blk_switch_count(gxp);
+
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ if (gxp->core_to_vd[core] == vd)
+ core_list |= BIT(core);
+ }
+ gxp_dma_domain_attach_device(gxp, vd->domain, core_list);
/*
* Start the resume process for all of this VD's cores without waiting
* for completion.
*/
for (core = 0; core < GXP_NUM_CORES; core++) {
- if (gxp->core_to_vd[core] == vd) {
- gxp_dma_domain_attach_device(gxp, vd, virt_core, core);
+ if (BIT(core) & core_list) {
/*
* The comparison is to check if blk_switch_count is
* changed. If it's changed, it means the block is rebooted and
@@ -506,7 +565,6 @@ int gxp_vd_resume(struct gxp_virtual_device *vd)
if (ret) {
vd->state = GXP_VD_UNAVAILABLE;
failed_cores |= BIT(core);
- virt_core++;
dev_err(gxp->dev, "Failed to power up core %u\n", core);
continue;
}
@@ -518,14 +576,13 @@ int gxp_vd_resume(struct gxp_virtual_device *vd)
* Power on the core by explicitly switching its PSM to
* PS0 (LPM_ACTIVE_STATE).
*/
- gxp_lpm_set_state(gxp, core, LPM_ACTIVE_STATE,
+ gxp_lpm_set_state(gxp, CORE_TO_PSM(core), LPM_ACTIVE_STATE,
/*verbose=*/false);
- virt_core++;
}
}
/* Wait for all cores to complete core resumption. */
for (core = 0; core < GXP_NUM_CORES; core++) {
- if (gxp->core_to_vd[core] == vd) {
+ if (BIT(core) & core_list) {
if (!(failed_cores & BIT(core))) {
/* in microseconds */
timeout = 1000000;
@@ -552,14 +609,11 @@ int gxp_vd_resume(struct gxp_virtual_device *vd)
}
if (vd->state == GXP_VD_UNAVAILABLE) {
/* shutdown all cores if virtual device is unavailable */
- virt_core = 0;
for (core = 0; core < GXP_NUM_CORES; core++) {
- if (gxp->core_to_vd[core] == vd) {
- gxp_dma_domain_detach_device(gxp, vd, virt_core);
+ if (BIT(core) & core_list)
gxp_pm_core_off(gxp, core);
- virt_core++;
- }
}
+ gxp_dma_domain_detach_device(gxp, vd->domain);
} else {
vd->state = GXP_VD_RUNNING;
}
@@ -589,60 +643,18 @@ int gxp_vd_virt_core_to_phys_core(struct gxp_virtual_device *vd, u16 virt_core)
return -EINVAL;
}
-/* Caller must have locked `gxp->vd_semaphore` for reading */
-uint gxp_vd_virt_core_list_to_phys_core_list(struct gxp_virtual_device *vd,
- u16 virt_core_list)
+uint gxp_vd_phys_core_list(struct gxp_virtual_device *vd)
{
- uint phys_core_list = 0;
- uint virt_core = 0;
- int phys_core;
-
- while (virt_core_list) {
- /*
- * Get the next virt core by finding the index of the first
- * set bit in the core list.
- *
- * Subtract 1 since `ffs()` returns a 1-based index. Since
- * virt_core_list cannot be 0 at this point, no need to worry
- * about wrap-around.
- */
- virt_core = ffs(virt_core_list) - 1;
-
- /* Any invalid virt cores invalidate the whole list */
- phys_core = gxp_vd_virt_core_to_phys_core(vd, virt_core);
- if (phys_core < 0)
- return 0;
-
- phys_core_list |= BIT(phys_core);
- virt_core_list &= ~BIT(virt_core);
- }
-
- return phys_core_list;
-}
-
-/* Caller must have locked `gxp->vd_semaphore` for reading */
-int gxp_vd_phys_core_to_virt_core(struct gxp_virtual_device *vd,
- u16 phys_core)
-{
- struct gxp_dev *gxp = vd->gxp;
- int virt_core = 0;
- uint core;
+ uint core_list = 0;
+ int core;
- if (gxp->core_to_vd[phys_core] != vd) {
- virt_core = -EINVAL;
- goto out;
+ lockdep_assert_held(&vd->gxp->vd_semaphore);
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ if (vd->gxp->core_to_vd[core] == vd)
+ core_list |= BIT(core);
}
- /*
- * A core's virtual core ID == the number of physical cores in the same
- * virtual device with a lower physical core ID than its own.
- */
- for (core = 0; core < phys_core; core++) {
- if (gxp->core_to_vd[core] == vd)
- virt_core++;
- }
-out:
- return virt_core;
+ return core_list;
}
int gxp_vd_mapping_store(struct gxp_virtual_device *vd,
@@ -785,3 +797,30 @@ struct gxp_mapping *gxp_vd_mapping_search_host(struct gxp_virtual_device *vd,
return NULL;
}
+
+bool gxp_vd_has_and_use_credit(struct gxp_virtual_device *vd)
+{
+ bool ret = true;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vd->credit_lock, flags);
+ if (vd->credit == 0)
+ ret = false;
+ else
+ vd->credit--;
+ spin_unlock_irqrestore(&vd->credit_lock, flags);
+
+ return ret;
+}
+
+void gxp_vd_release_credit(struct gxp_virtual_device *vd)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&vd->credit_lock, flags);
+ if (unlikely(vd->credit >= GXP_COMMAND_CREDIT_PER_VD))
+ dev_err(vd->gxp->dev, "unbalanced VD credit");
+ else
+ vd->credit++;
+ spin_unlock_irqrestore(&vd->credit_lock, flags);
+}
diff --git a/gxp-vd.h b/gxp-vd.h
index feab79f..704e40f 100644
--- a/gxp-vd.h
+++ b/gxp-vd.h
@@ -2,8 +2,9 @@
/*
* GXP virtual device manager.
*
- * Copyright (C) 2021 Google LLC
+ * Copyright (C) 2021-2022 Google LLC
*/
+
#ifndef __GXP_VD_H__
#define __GXP_VD_H__
@@ -11,6 +12,7 @@
#include <linux/list.h>
#include <linux/rbtree.h>
#include <linux/rwsem.h>
+#include <linux/scatterlist.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/wait.h>
@@ -18,9 +20,17 @@
#include "gxp-internal.h"
#include "gxp-mapping.h"
+/* TODO(b/259192112): set to 8 once the runtime has added the credit limit. */
+#define GXP_COMMAND_CREDIT_PER_VD 256
+
+/* A special client ID for secure workloads pre-agreed with MCU firmware. */
+#define SECURE_CLIENT_ID (3 << 10)
+
struct mailbox_resp_queue {
- /* Queue of `struct gxp_async_response`s */
- struct list_head queue;
+ /* Queue of waiting async responses */
+ struct list_head wait_queue;
+ /* Queue of arrived async responses */
+ struct list_head dest_queue;
/* Lock protecting access to the `queue` */
spinlock_t lock;
/* Waitqueue to wait on if the queue is empty */
@@ -28,22 +38,23 @@ struct mailbox_resp_queue {
};
enum gxp_virtual_device_state {
- GXP_VD_OFF = 0,
- GXP_VD_RUNNING = 1,
- GXP_VD_SUSPENDED = 2,
+ GXP_VD_OFF,
+ GXP_VD_READY,
+ GXP_VD_RUNNING,
+ GXP_VD_SUSPENDED,
/*
* If the virtual device is in the unavailable state, it won't be changed
* back no matter what we do.
* Note: this state will only be set on suspend/resume failure.
*/
- GXP_VD_UNAVAILABLE = 3,
+ GXP_VD_UNAVAILABLE,
};
struct gxp_virtual_device {
struct gxp_dev *gxp;
uint num_cores;
void *fw_app;
- struct iommu_domain **core_domains;
+ struct gxp_iommu_domain *domain;
struct mailbox_resp_queue *mailbox_resp_queues;
struct rb_root mappings_root;
struct rw_semaphore mappings_semaphore;
@@ -55,6 +66,46 @@ struct gxp_virtual_device {
* process.
*/
u64 blk_switch_count_when_suspended;
+ /*
+ * @domain of each virtual device will map a slice of shared buffer. It stores which index
+ * of slice is used by this VD.
+ */
+ int slice_index;
+ /*
+ * The SG table that holds the firmware data region.
+ */
+ struct sg_table *fwdata_sgt;
+ uint core_list;
+ /*
+ * The ID of DSP client. -1 if it is not allocated.
+ * This is allocated by the DSP kernel driver, but will be set to this variable only when
+ * the client of this vd acquires the block wakelock successfully. (i.e, after the kernel
+ * driver allocates a virtual mailbox with the firmware side successfully by sending the
+ * `allocate_vmbox` KCI command.)
+ */
+ int client_id;
+ /*
+ * The ID of TPU client. -1 if it is not allocated.
+ * This ID will be fetched from the TPU kernel driver.
+ */
+ int tpu_client_id;
+ /*
+ * Protects credit. Use a spin lock because the critical section of
+ * using @credit is pretty small.
+ */
+ spinlock_t credit_lock;
+ /*
+ * Credits for sending mailbox commands. It's initialized as
+ * GXP_COMMAND_CREDIT_PER_VD. The value is decreased on sending
+ * mailbox commands; increased on receiving mailbox responses.
+ * Mailbox command requests are rejected when this value reaches 0.
+ *
+ * Only used in MCU mode.
+ */
+ uint credit;
+ /* Whether it's the first time allocating a VMBox for this VD. */
+ bool first_open;
+ bool is_secure;
};
/*
@@ -66,7 +117,7 @@ struct gxp_virtual_device {
* Initializes the device management subsystem and allocates resources for it.
* This is expected to be called once per driver lifecycle.
*/
-int gxp_vd_init(struct gxp_dev *gxp);
+void gxp_vd_init(struct gxp_dev *gxp);
/*
* Tears down the device management subsystem.
@@ -79,38 +130,54 @@ void gxp_vd_destroy(struct gxp_dev *gxp);
* @gxp: The GXP device the virtual device will belong to
* @requested_cores: The number of cores the virtual device will have
*
+ * The state of VD is initialized to GXP_VD_OFF.
+ *
+ * The caller must have locked gxp->vd_semaphore for writing.
+ *
* Return: The virtual address of the virtual device or an ERR_PTR on failure
* * -EINVAL - The number of requested cores was invalid
* * -ENOMEM - Unable to allocate the virtual device
- * * -EBUSY - Not enough iommu domains available
+ * * -EBUSY - Not enough iommu domains available or insufficient physical
+ * cores to be assigned to @vd
+ * * -ENOSPC - There is no more available shared slices
*/
-struct gxp_virtual_device *gxp_vd_allocate(struct gxp_dev *gxp, u16 requested_cores);
+struct gxp_virtual_device *gxp_vd_allocate(struct gxp_dev *gxp,
+ u16 requested_cores);
/**
* gxp_vd_release() - Cleanup and free a struct gxp_virtual_device
* @vd: The virtual device to be released
*
+ * The caller must have locked gxp->vd_semaphore for writing.
+ *
* A virtual device must be stopped before it can be released.
*/
void gxp_vd_release(struct gxp_virtual_device *vd);
/**
- * gxp_vd_start() - Run a virtual device on physical cores
- * @vd: The virtual device to start
+ * gxp_vd_run() - Run a virtual device on physical cores
+ * @vd: The virtual device to run
*
- * The caller must have locked gxp->vd_semaphore for writing.
+ * The state of @vd should be GXP_VD_OFF or GXP_VD_READY before calling this
+ * function. If this function runs successfully, the state becomes
+ * GXP_VD_RUNNING. Otherwise, it would be GXP_VD_UNAVAILABLE.
+ *
+ * The caller must have locked gxp->vd_semaphore.
*
* Return:
- * * 0 - Success
- * * -EBUSY - Insufficient physical cores were free to start @vd
+ * * 0 - Success
+ * * -EINVAL - The VD is not in GXP_VD_READY state
+ * * Otherwise - Errno returned by firmware running
*/
-int gxp_vd_start(struct gxp_virtual_device *vd);
+int gxp_vd_run(struct gxp_virtual_device *vd);
/**
- * gxp_vd_stop() - Stop a running virtual device and free up physical cores
+ * gxp_vd_stop() - Stop a running virtual device
* @vd: The virtual device to stop
*
- * The caller must have locked gxp->vd_semaphore for writing.
+ * The state of @vd will be GXP_VD_OFF.
+ *
+ * The caller must have locked gxp->vd_semaphore.
*/
void gxp_vd_stop(struct gxp_virtual_device *vd);
@@ -124,23 +191,11 @@ void gxp_vd_stop(struct gxp_virtual_device *vd);
int gxp_vd_virt_core_to_phys_core(struct gxp_virtual_device *vd, u16 virt_core);
/*
- * Converts a bitfield of virtual core IDs to a bitfield of physical core IDs.
- *
- * If the virtual list contains any invalid IDs, the entire physical ID list
- * will be considered invalid and this function will return 0.
+ * Acquires the physical core IDs assigned to the virtual device.
*
* The caller must have locked gxp->vd_semaphore for reading.
*/
-uint gxp_vd_virt_core_list_to_phys_core_list(struct gxp_virtual_device *vd,
- u16 virt_core_list);
-
-/*
- * Returns the virtual core number assigned the phys_core, inside of this
- * virtual device or -EINVAL if this core is not part of this virtual device.
- *
- * The caller must have locked gxp->vd_semaphore for reading.
- */
-int gxp_vd_phys_core_to_virt_core(struct gxp_virtual_device *vd, u16 phys_core);
+uint gxp_vd_phys_core_list(struct gxp_virtual_device *vd);
/**
* gxp_vd_mapping_store() - Store a mapping in a virtual device's records
@@ -211,6 +266,10 @@ struct gxp_mapping *gxp_vd_mapping_search_host(struct gxp_virtual_device *vd,
* gxp_vd_suspend() - Suspend a running virtual device
* @vd: The virtual device to suspend
*
+ * The state of @vd should be GXP_VD_RUNNING before calling this function.
+ * If the suspension runs successfully on all cores, the state becomes
+ * GXP_VD_SUSPENDED. Otherwise, it would be GXP_VD_UNAVAILABLE.
+ *
* The caller must have locked gxp->vd_semaphore for writing.
*/
void gxp_vd_suspend(struct gxp_virtual_device *vd);
@@ -219,6 +278,10 @@ void gxp_vd_suspend(struct gxp_virtual_device *vd);
* gxp_vd_resume() - Resume a suspended virtual device
* @vd: The virtual device to resume
*
+ * The state of @vd should be GXP_VD_SUSPENDED before calling this function.
+ * If the resumption runs successfully on all cores, the state becomes
+ * GXP_VD_RUNNING. Otherwise, it would be GXP_VD_UNAVAILABLE.
+ *
* The caller must have locked gxp->vd_semaphore for writing.
*
* Return:
@@ -227,4 +290,44 @@ void gxp_vd_suspend(struct gxp_virtual_device *vd);
*/
int gxp_vd_resume(struct gxp_virtual_device *vd);
+/**
+ * gxp_vd_block_ready() - This is called after the block wakelock is acquired.
+ * Does required setup for serving VD such as attaching its IOMMU domain.
+ *
+ * @vd: The virtual device to prepare the resources
+ *
+ * The state of @vd should be GXP_VD_OFF before calling this function.
+ * If this function runs successfully, the state becomes GXP_VD_READY.
+ *
+ * Return:
+ * * 0 - Success
+ * * -EINVAL - The VD is not in GXP_VD_OFF state
+ * * Otherwise - Errno returned by IOMMU domain attachment
+ */
+int gxp_vd_block_ready(struct gxp_virtual_device *vd);
+
+/**
+ * gxp_vd_block_unready() - This is called before one or both of the virtual device and block
+ * wakelock is going to be released.
+ *
+ * @vd: The virtual device to release the resources
+ *
+ * This function must be called only when the client holds the block wakelock and allocated a
+ * virtual device. It doesn't have a dependency on the state of @vd, but also doesn't change the
+ * state.
+ */
+void gxp_vd_block_unready(struct gxp_virtual_device *vd);
+
+/*
+ * Checks whether the virtual device has a positive credit, and use 1 credit when
+ * yes.
+ *
+ * Returns true when there is enough credit, false otherwise.
+ */
+bool gxp_vd_has_and_use_credit(struct gxp_virtual_device *vd);
+/*
+ * Releases the credit.
+ */
+void gxp_vd_release_credit(struct gxp_virtual_device *vd);
+
#endif /* __GXP_VD_H__ */
diff --git a/gxp-wakelock.c b/gxp-wakelock.c
index 9110a3d..ba4d8d8 100644
--- a/gxp-wakelock.c
+++ b/gxp-wakelock.c
@@ -25,13 +25,11 @@ int gxp_wakelock_init(struct gxp_dev *gxp)
return 0;
}
-int gxp_wakelock_acquire(struct gxp_dev *gxp)
+static int gxp_wakelock_acquire_locked(struct gxp_dev *gxp)
{
struct gxp_wakelock_manager *mgr = gxp->wakelock_mgr;
int ret = 0;
- mutex_lock(&mgr->lock);
-
if (mgr->suspended) {
/*
* Don't allow a new client to obtain a wakelock, powering up
@@ -51,16 +49,45 @@ int gxp_wakelock_acquire(struct gxp_dev *gxp)
ret, mgr->count);
goto err_blk_on;
}
+ if (gxp->wakelock_after_blk_on) {
+ ret = gxp->wakelock_after_blk_on(gxp);
+ if (ret) {
+ gxp_pm_blk_off(gxp);
+ goto err_blk_on;
+ }
+ }
}
out:
- mutex_unlock(&mgr->lock);
-
return ret;
err_blk_on:
mgr->count--;
+ return ret;
+}
+
+int gxp_wakelock_acquire(struct gxp_dev *gxp)
+{
+ struct gxp_wakelock_manager *mgr = gxp->wakelock_mgr;
+ int ret;
+
+ mutex_lock(&mgr->lock);
+ ret = gxp_wakelock_acquire_locked(gxp);
+ mutex_unlock(&mgr->lock);
+
+ return ret;
+}
+
+int gxp_wakelock_acquire_if_powered(struct gxp_dev *gxp)
+{
+ struct gxp_wakelock_manager *mgr = gxp->wakelock_mgr;
+ int ret = -EAGAIN;
+
+ mutex_lock(&mgr->lock);
+ if (mgr->count)
+ ret = gxp_wakelock_acquire_locked(gxp);
mutex_unlock(&mgr->lock);
+
return ret;
}
@@ -78,6 +105,8 @@ void gxp_wakelock_release(struct gxp_dev *gxp)
}
if (!--mgr->count) {
+ if (gxp->wakelock_before_blk_off)
+ gxp->wakelock_before_blk_off(gxp);
ret = gxp_pm_blk_off(gxp);
if (ret)
dev_err(gxp->dev,
diff --git a/gxp-wakelock.h b/gxp-wakelock.h
index ff76325..e02bdb4 100644
--- a/gxp-wakelock.h
+++ b/gxp-wakelock.h
@@ -18,7 +18,7 @@ struct gxp_wakelock_manager {
};
/**
- * gxp_telemetry_init() - Initialize wakelock support
+ * gxp_wakelock_init() - Initialize wakelock support
* @gxp: The GXP device to initialize wakelock support for
*
* Return:
@@ -42,6 +42,21 @@ int gxp_wakelock_init(struct gxp_dev *gxp);
int gxp_wakelock_acquire(struct gxp_dev *gxp);
/**
+ * gxp_wakelock_acquire_if_powered() - Increment the GXP wakelock counter if
+ * the counter is nonzero.
+ * @gxp: The GXP device to increment the wakelock counter for
+ *
+ * Similar to gxp_wakelock_acquire, but only increment the wakelock counter if
+ * the counter is nonzero.
+ *
+ * Return:
+ * * 0 - Success
+ * * -EAGAIN - Wakelock counter is zero
+ * * Other - Error returned by gxp_wakelock_acquire
+ */
+int gxp_wakelock_acquire_if_powered(struct gxp_dev *gxp);
+
+/**
* gxp_wakelock_release() - Decrement the GXP wakelock counter
* @gxp: The GXP device to decrement the wakelock counter for
*
diff --git a/gxp.h b/gxp.h
index a7ce8ee..6c4ea3f 100644
--- a/gxp.h
+++ b/gxp.h
@@ -2,8 +2,9 @@
/*
* GXP kernel-userspace interface definitions.
*
- * Copyright (C) 2020 Google LLC
+ * Copyright (C) 2020-2022 Google LLC
*/
+
#ifndef __GXP_H__
#define __GXP_H__
@@ -11,281 +12,56 @@
#include <linux/types.h>
/* Interface Version */
-#define GXP_INTERFACE_VERSION_MAJOR 1
-#define GXP_INTERFACE_VERSION_MINOR 3
-#define GXP_INTERFACE_VERSION_BUILD 0
+#define GXP_INTERFACE_VERSION_MAJOR 1
+#define GXP_INTERFACE_VERSION_MINOR 7
+#define GXP_INTERFACE_VERSION_BUILD 0
/*
- * mmap offsets for logging and tracing buffers
+ * Legacy mmap offsets for core logging and tracing buffers
* Requested size will be divided evenly among all cores. The whole buffer
* must be page-aligned, and the size of each core's buffer must be a multiple
* of PAGE_SIZE.
*/
-#define GXP_MMAP_LOG_BUFFER_OFFSET 0x10000
-#define GXP_MMAP_TRACE_BUFFER_OFFSET 0x20000
-
-#define GXP_IOCTL_BASE 0xEE
-
-#define GXP_INTERFACE_VERSION_BUILD_BUFFER_SIZE 64
-struct gxp_interface_version_ioctl {
- /*
- * Driver major version number.
- * Increments whenever a non-backwards compatible change to the
- * interface defined in this file changes.
- */
- __u16 version_major;
- /*
- * Driver minor version number.
- * Increments whenever a backwards compatible change, such as the
- * addition of a new IOCTL, is made to the interface defined in this
- * file.
- */
- __u16 version_minor;
- /*
- * Driver build identifier.
- * NULL-terminated string of the git hash of the commit the driver was
- * built from. If the driver had uncommitted changes the string will
- * end with "-dirty".
- */
- char version_build[GXP_INTERFACE_VERSION_BUILD_BUFFER_SIZE];
-};
+#define GXP_MMAP_CORE_LOG_BUFFER_OFFSET_LEGACY 0x10000
+#define GXP_MMAP_CORE_TRACE_BUFFER_OFFSET_LEGACY 0x20000
-/* Query the driver's interface version. */
-#define GXP_GET_INTERFACE_VERSION \
- _IOR(GXP_IOCTL_BASE, 26, struct gxp_interface_version_ioctl)
+/* mmap offsets for MCU logging and tracing buffers */
+#define GXP_MMAP_MCU_LOG_BUFFER_OFFSET 0x30000
+#define GXP_MMAP_MCU_TRACE_BUFFER_OFFSET 0x40000
-struct gxp_specs_ioctl {
- /* Maximum number of cores that can be allocated to a virtual device */
- __u8 core_count;
- /* Deprecated fields that should be ignored */
- __u16 reserved_0;
- __u16 reserved_1;
- __u16 reserved_2;
- __u8 reserved_3;
- /*
- * Amount of "tightly-coupled memory" or TCM available to each core.
- * The value returned will be in kB, or 0 if the value was not
- * specified in the device-tree.
- */
- __u32 memory_per_core;
-};
+/* mmap offsets for core logging and tracing buffers */
+#define GXP_MMAP_CORE_LOG_BUFFER_OFFSET 0x50000
+#define GXP_MMAP_CORE_TRACE_BUFFER_OFFSET 0x60000
-/* Query system specs. */
-#define GXP_GET_SPECS \
- _IOR(GXP_IOCTL_BASE, 5, struct gxp_specs_ioctl)
-
-struct gxp_virtual_device_ioctl {
- /*
- * Input:
- * The number of cores requested for the virtual device.
- */
- __u8 core_count;
- /*
- * Input:
- * The number of threads requested per core.
- */
- __u16 threads_per_core;
- /*
- * Input:
- * The amount of memory requested per core, in kB.
- */
- __u32 memory_per_core;
- /*
- * Output:
- * The ID assigned to the virtual device and shared with its cores.
- */
- __u32 vdid;
-};
-
-/* Allocate virtual device. */
-#define GXP_ALLOCATE_VIRTUAL_DEVICE \
- _IOWR(GXP_IOCTL_BASE, 6, struct gxp_virtual_device_ioctl)
-
-/*
- * Components for which a client may hold a wakelock.
- * Acquired by passing these values as `components_to_wake` in
- * `struct gxp_acquire_wakelock_ioctl` to GXP_ACQUIRE_WAKELOCK and released by
- * passing these values directly as the argument to GXP_RELEASE_WAKELOCK.
- *
- * Multiple wakelocks can be acquired or released at once by passing multiple
- * components, ORed together.
- */
-#define WAKELOCK_BLOCK (1 << 0)
-#define WAKELOCK_VIRTUAL_DEVICE (1 << 1)
-
-/*
- * DSP subsystem Power state values for use as `gxp_power_state` in
- * `struct gxp_acquire_wakelock_ioctl`.
- * Note: GXP_POWER_STATE_READY is a deprecated state. The way to achieve
- * original state is to request GXP_POWER_STATE_UUD with setting
- * GXP_POWER_LOW_FREQ_CLKMUX flag. Requesting GXP_POWER_STATE_READY is treated
- * as identical to GXP_POWER_STATE_UUD.
- */
-#define GXP_POWER_STATE_OFF 0
-#define GXP_POWER_STATE_UUD 1
-#define GXP_POWER_STATE_SUD 2
-#define GXP_POWER_STATE_UD 3
-#define GXP_POWER_STATE_NOM 4
-#define GXP_POWER_STATE_READY 5
-#define GXP_POWER_STATE_UUD_PLUS 6
-#define GXP_POWER_STATE_SUD_PLUS 7
-#define GXP_POWER_STATE_UD_PLUS 8
-#define GXP_NUM_POWER_STATES (GXP_POWER_STATE_UD_PLUS + 1)
-
-/*
- * Memory interface power state values for use as `memory_power_state` in
- * `struct gxp_acquire_wakelock_ioctl`.
- */
-#define MEMORY_POWER_STATE_UNDEFINED 0
-#define MEMORY_POWER_STATE_MIN 1
-#define MEMORY_POWER_STATE_VERY_LOW 2
-#define MEMORY_POWER_STATE_LOW 3
-#define MEMORY_POWER_STATE_HIGH 4
-#define MEMORY_POWER_STATE_VERY_HIGH 5
-#define MEMORY_POWER_STATE_MAX 6
-
-/*
- * GXP power flag macros, supported by `flags` in `gxp_acquire_wakelock_ioctl`
- * and `power_flags in `gxp_mailbox_command_ioctl`.
- *
- * Non-aggressor flag is deprecated. Setting this flag is a no-op since
- * non-aggressor support is defeatured.
- */
-#define GXP_POWER_NON_AGGRESSOR (1 << 0)
-/*
- * The client can request low frequency clkmux vote by this flag, which means
- * the kernel driver will switch the CLKMUX clocks to save more power.
- *
- * Note: The kernel driver keep separate track of low frequency clkmux votes
- * and normal votes, and the low frequency clkmux votes will have lower priority
- * than all normal votes.
- * For example, if the kerenl driver has two votes, one is GXP_POWER_STATE_UUD
- * without GXP_POWER_LOW_FREQ_CLKMUX, and the other one is GXP_POWER_STATE_NOM
- * with GXP_POWER_LOW_FREQ_CLKMUX. The voting result is GXP_POWER_STATE_UUD
- * without GXP_POWER_LOW_FREQ_CLKMUX.
- */
-#define GXP_POWER_LOW_FREQ_CLKMUX (1 << 1)
-
-struct gxp_acquire_wakelock_ioctl {
- /*
- * The components for which a wakelock will be acquired.
- * Should be one of WAKELOCK_BLOCK or WAKELOCK_VIRTUAL_DEVICE, or a
- * bitwise OR of both.
- *
- * A VIRTUAL_DEVICE wakelock cannot be acquired until the client has
- * allocated a virtual device. To acquire a VIRTUAL_DEVICE wakelock, a
- * client must already have acquired a BLOCK wakelock or acquire both
- * in the same call.
- */
- __u32 components_to_wake;
- /*
- * Minimum power state to operate the entire DSP subsystem at until
- * the BLOCK wakelock is released. One of the GXP_POWER_STATE_* defines
- * from above. Note that the requested power state will not be cleared
- * if only the VIRTUAL_DEVICE wakelock is released.
- *
- * `GXP_POWER_STATE_OFF` is not a valid value when acquiring a
- * wakelock.
- */
- __u32 gxp_power_state;
- /*
- * Memory interface power state to request from the system so long as
- * the BLOCK wakelock is held. One of the MEMORY_POWER_STATE* defines
- * from above. The requested memory power state will not be cleared if
- * only the VIRTUAL_DEVICE wakelock is released.
- *
- * If `MEMORY_POWER_STATE_UNDEFINED` is passed, no request to change
- * the memory interface power state will be made.
- */
- __u32 memory_power_state;
- /*
- * How long to wait, in microseconds, before returning if insufficient
- * physical cores are available when attempting to acquire a
- * VIRTUAL_DEVICE wakelock. A value of 0 indicates that the IOCTL
- * should not wait at all if cores are not available.
- */
- __u32 vd_timeout_us;
- /*
- * Flags indicating power attribute requests from the runtime.
- * Set RESERVED bits to 0 to ensure backwards compatibility.
- *
- * Bitfields:
- * [0:0] - Deprecated, do not use
- * [1:1] - LOW_FREQ_CLKMUX setting for power management
- * 0 = Don't switch CLKMUX clocks, default value
- * 1 = Switch CLKMUX clocks
- * [31:2] - RESERVED
- */
- __u32 flags;
-};
-
-/*
- * Acquire a wakelock and request minimum power states for the DSP subsystem
- * and the memory interface.
- *
- * Upon a successful return, the specified components will be powered on and if
- * they were not already running at the specified or higher power states,
- * requests will have been sent to transition both the DSP subsystem and
- * memory interface to the specified states.
- *
- * If the same client invokes this IOCTL for the same component more than once
- * without a corresponding call to `GXP_RELEASE_WAKE_LOCK` in between, the
- * second call will update requested power states, but have no other effects.
- * No additional call to `GXP_RELEASE_WAKE_LOCK` will be required.
- *
- * If a client attempts to acquire a VIRTUAL_DEVICE wakelock and there are
- * insufficient physical cores available, the driver will wait up to
- * `vd_timeout_us` microseconds, then return -EBUSY if sufficient cores were
- * never made available. In this case, if both BLOCK and VIRTUAL_DEVICE
- * wakelocks were being requested, neither will have been acquired.
- */
-#define GXP_ACQUIRE_WAKE_LOCK \
- _IOW(GXP_IOCTL_BASE, 25, struct gxp_acquire_wakelock_ioctl)
-
-/*
- * Legacy "acquire wakelock" IOCTL that does not support power flags.
- * This IOCTL exists for backwards compatibility with older runtimes. All other
- * fields are the same as in `struct gxp_acquire_wakelock_ioctl`.
- */
-struct gxp_acquire_wakelock_compat_ioctl {
- __u32 components_to_wake;
- __u32 gxp_power_state;
- __u32 memory_power_state;
- __u32 vd_timeout_us;
-};
-
-#define GXP_ACQUIRE_WAKE_LOCK_COMPAT \
- _IOW(GXP_IOCTL_BASE, 18, struct gxp_acquire_wakelock_compat_ioctl)
-
-/*
- * Release a wakelock acquired via `GXP_ACQUIRE_WAKE_LOCK`.
- *
- * The argument should be one of WAKELOCK_BLOCK or WAKELOCK_VIRTUAL_DEVICE, or a
- * bitwise OR of both.
- *
- * Upon releasing a VIRTUAL_DEVICE wakelock, a client's virtual device will be
- * removed from physical cores. At that point the cores may be reallocated to
- * another client or powered down.
- *
- * If no clients hold a BLOCK wakelock, the entire DSP subsytem may be powered
- * down. If a client attempts to release a BLOCK wakelock while still holding
- * a VIRTUAL_DEVICE wakelock, this IOCTL will return -EBUSY.
- *
- * If a client attempts to release a wakelock it does not hold, this IOCTL will
- * return -ENODEV.
- */
-#define GXP_RELEASE_WAKE_LOCK _IOW(GXP_IOCTL_BASE, 19, __u32)
+#define GXP_IOCTL_BASE 0xEE
/* GXP map flag macros */
/* The mask for specifying DMA direction in GXP map flag */
-#define GXP_MAP_DIR_MASK 3
+#define GXP_MAP_DIR_MASK 3
/* The targeted DMA direction for the buffer */
-#define GXP_MAP_DMA_BIDIRECTIONAL 0
-#define GXP_MAP_DMA_TO_DEVICE 1
-#define GXP_MAP_DMA_FROM_DEVICE 2
+#define GXP_MAP_DMA_BIDIRECTIONAL 0
+#define GXP_MAP_DMA_TO_DEVICE 1
+#define GXP_MAP_DMA_FROM_DEVICE 2
+/* Create coherent mappings of the buffer. */
+#define GXP_MAP_COHERENT (1 << 2)
+
+/* To check whether the driver is working in MCU mode. */
+#define GXP_SPEC_FEATURE_MODE_MCU (1 << 0)
+
+/* To specify the secureness of the virtual device. */
+#define GXP_ALLOCATE_VD_SECURE BIT(0)
+
+/* Core telemetry buffer size is a multiple of 64 kB */
+#define GXP_CORE_TELEMETRY_BUFFER_UNIT_SIZE 0x10000u
+/* Magic code used to indicate the validity of telemetry buffer contents */
+#define GXP_TELEMETRY_BUFFER_VALID_MAGIC_CODE 0xC0DEC0DEu
+/* Magic code used to indicate the validity of secure telemetry buffer contents */
+#define GXP_TELEMETRY_SECURE_BUFFER_VALID_MAGIC_CODE 0xA0B0C0D0u
struct gxp_map_ioctl {
/*
+ * Deprecated. All virtual cores will be mapped.
+ *
* Bitfield indicating which virtual cores to map the buffer for.
* To map for virtual core X, set bit X in this field, i.e. `1 << X`.
*
@@ -293,8 +69,8 @@ struct gxp_map_ioctl {
* buffer for all cores it had been mapped for.
*/
__u16 virtual_core_list;
- __u64 host_address; /* virtual address in the process space */
- __u32 size; /* size of mapping in bytes */
+ __u64 host_address; /* virtual address in the process space */
+ __u32 size; /* size of mapping in bytes */
/*
* Flags indicating mapping attribute requests from the runtime.
* Set RESERVED bits to 0 to ensure backwards compatibility.
@@ -306,10 +82,15 @@ struct gxp_map_ioctl {
* 10 = DMA_FROM_DEVICE (device can write buffer)
* Note: DMA_DIRECTION is the direction in which data moves
* from the host's perspective.
- * [31:2] - RESERVED
+ * [2:2] - Coherent Mapping:
+ * 0 = Create non-coherent mappings of the buffer.
+ * 1 = Create coherent mappings of the buffer.
+ * Note: this attribute may be ignored on platforms where
+ * gxp is not I/O coherent.
+ * [31:3] - RESERVED
*/
__u32 flags;
- __u64 device_address; /* returned device address */
+ __u64 device_address; /* returned device address */
};
/*
@@ -317,8 +98,7 @@ struct gxp_map_ioctl {
*
* The client must have allocated a virtual device.
*/
-#define GXP_MAP_BUFFER \
- _IOWR(GXP_IOCTL_BASE, 0, struct gxp_map_ioctl)
+#define GXP_MAP_BUFFER _IOWR(GXP_IOCTL_BASE, 0, struct gxp_map_ioctl)
/*
* Un-map host buffer previously mapped by GXP_MAP_BUFFER.
@@ -329,12 +109,11 @@ struct gxp_map_ioctl {
*
* The client must have allocated a virtual device.
*/
-#define GXP_UNMAP_BUFFER \
- _IOW(GXP_IOCTL_BASE, 1, struct gxp_map_ioctl)
+#define GXP_UNMAP_BUFFER _IOW(GXP_IOCTL_BASE, 1, struct gxp_map_ioctl)
/* GXP sync flag macros */
-#define GXP_SYNC_FOR_DEVICE (0)
-#define GXP_SYNC_FOR_CPU (1)
+#define GXP_SYNC_FOR_DEVICE (0)
+#define GXP_SYNC_FOR_CPU (1)
struct gxp_sync_ioctl {
/*
@@ -371,162 +150,18 @@ struct gxp_sync_ioctl {
* EINVAL: If @size equals 0.
* EINVAL: If @offset plus @size exceeds the mapping size.
*/
-#define GXP_SYNC_BUFFER \
- _IOW(GXP_IOCTL_BASE, 2, struct gxp_sync_ioctl)
-
-struct gxp_map_dmabuf_ioctl {
- /*
- * Bitfield indicating which virtual cores to map the dma-buf for.
- * To map for virtual core X, set bit X in this field, i.e. `1 << X`.
- *
- * This field is not used by the unmap dma-buf IOCTL, which always
- * unmaps a dma-buf for all cores it had been mapped for.
- */
- __u16 virtual_core_list;
- __s32 dmabuf_fd; /* File descriptor of the dma-buf to map. */
- /*
- * Flags indicating mapping attribute requests from the runtime.
- * Set RESERVED bits to 0 to ensure backwards compatibility.
- *
- * Bitfields:
- * [1:0] - DMA_DIRECTION:
- * 00 = DMA_BIDIRECTIONAL (host/device can write buffer)
- * 01 = DMA_TO_DEVICE (host can write buffer)
- * 10 = DMA_FROM_DEVICE (device can write buffer)
- * Note: DMA_DIRECTION is the direction in which data moves
- * from the host's perspective.
- * [31:2] - RESERVED
- */
- __u32 flags;
- /*
- * Device address the dmabuf is mapped to.
- * - GXP_MAP_DMABUF uses this field to return the address the dma-buf
- * can be accessed from by the device.
- * - GXP_UNMAP_DMABUF expects this field to contain the value from the
- * mapping call, and uses it to determine which dma-buf to unmap.
- */
- __u64 device_address;
-};
-
-/*
- * Map host buffer via its dma-buf FD.
- *
- * The client must have allocated a virtual device.
- */
-#define GXP_MAP_DMABUF _IOWR(GXP_IOCTL_BASE, 20, struct gxp_map_dmabuf_ioctl)
-
-/*
- * Un-map host buffer previously mapped by GXP_MAP_DMABUF.
- *
- * Only the @device_address field is used. Other fields are fetched from the
- * kernel's internal records. It is recommended to use the argument that was
- * passed in GXP_MAP_DMABUF to un-map the dma-buf.
- *
- * The client must have allocated a virtual device.
- */
-#define GXP_UNMAP_DMABUF _IOW(GXP_IOCTL_BASE, 21, struct gxp_map_dmabuf_ioctl)
-
-struct gxp_mailbox_command_ioctl {
- /*
- * Input:
- * The virtual core to dispatch the command to.
- */
- __u16 virtual_core_id;
- /*
- * Output:
- * The sequence number assigned to this command. The caller can use
- * this value to match responses fetched via `GXP_MAILBOX_RESPONSE`
- * with this command.
- */
- __u64 sequence_number;
- /*
- * Input:
- * Device address to the buffer containing a GXP command. The user
- * should have obtained this address from the GXP_MAP_BUFFER ioctl.
- */
- __u64 device_address;
- /*
- * Input:
- * Size of the buffer at `device_address` in bytes.
- */
- __u32 size;
- /*
- * Input:
- * Minimum power state to operate the entire DSP subsystem at until
- * the mailbox command is finished(executed or timeout). One of the
- * GXP_POWER_STATE_* defines from below.
- *
- * `GXP_POWER_STATE_OFF` is not a valid value when executing a
- * mailbox command. The caller should pass GXP_POWER_STATE_UUD if the
- * command is expected to run at the power state the wakelock has
- * specified.
- */
- __u32 gxp_power_state;
- /*
- * Input:
- * Memory interface power state to request from the system so long as
- * the mailbox command is executing. One of the MEMORY_POWER_STATE*
- * defines from below.
- *
- * If `MEMORY_POWER_STATE_UNDEFINED` is passed, no request to change
- * the memory interface power state will be made.
- */
- __u32 memory_power_state;
- /*
- * Input:
- * Flags describing the command, for use by the GXP device.
- */
- __u32 flags;
- /*
- * Input:
- * Flags indicating power attribute requests from the runtime.
- * Set RESERVED bits to 0 to ensure backwards compatibility.
- *
- * Bitfields:
- * [0:0] - Deprecated, do not use
- * [1:1] - LOW_FREQ_CLKMUX setting for power management
- * 0 = Don't switch CLKMUX clocks, default value
- * 1 = Switch CLKMUX clocks
- * [31:2] - RESERVED
- */
- __u32 power_flags;
-};
-
-/*
- * Push element to the mailbox commmand queue.
- *
- * The client must hold a VIRTUAL_DEVICE wakelock.
- */
-#define GXP_MAILBOX_COMMAND \
- _IOWR(GXP_IOCTL_BASE, 23, struct gxp_mailbox_command_ioctl)
-
-/*
- * Legacy "mailbox command" IOCTL that does not support power requests.
- * This IOCTL exists for backwards compatibility with older runtimes. All
- * fields, other than the unsupported `gxp_power_state`, `memory_power_state`,
- * and `power_flags`, are the same as in `struct gxp_mailbox_command_ioctl`.
- */
-struct gxp_mailbox_command_compat_ioctl {
- __u16 virtual_core_id;
- __u64 sequence_number;
- __u64 device_address;
- __u32 size;
- __u32 flags;
-};
-
-/* The client must hold a VIRTUAL_DEVICE wakelock. */
-#define GXP_MAILBOX_COMMAND_COMPAT \
- _IOW(GXP_IOCTL_BASE, 3, struct gxp_mailbox_command_compat_ioctl)
+#define GXP_SYNC_BUFFER _IOW(GXP_IOCTL_BASE, 2, struct gxp_sync_ioctl)
/* GXP mailbox response error code values */
-#define GXP_RESPONSE_ERROR_NONE (0)
-#define GXP_RESPONSE_ERROR_INTERNAL (1)
-#define GXP_RESPONSE_ERROR_TIMEOUT (2)
+#define GXP_RESPONSE_ERROR_NONE (0)
+#define GXP_RESPONSE_ERROR_INTERNAL (1)
+#define GXP_RESPONSE_ERROR_TIMEOUT (2)
struct gxp_mailbox_response_ioctl {
/*
* Input:
* The virtual core to fetch a response from.
+ * Only used in direct mode.
*/
__u16 virtual_core_id;
/*
@@ -551,56 +186,85 @@ struct gxp_mailbox_response_ioctl {
};
/*
- * Pop element from the mailbox response queue. Blocks until mailbox response
+ * Pop an element from the mailbox response queue. Blocks until mailbox response
* is available.
*
* The client must hold a VIRTUAL_DEVICE wakelock.
*/
-#define GXP_MAILBOX_RESPONSE \
+#define GXP_MAILBOX_RESPONSE \
_IOWR(GXP_IOCTL_BASE, 4, struct gxp_mailbox_response_ioctl)
-struct gxp_register_mailbox_eventfd_ioctl {
+struct gxp_specs_ioctl {
+ /* Maximum number of cores that can be allocated to a virtual device */
+ __u8 core_count;
/*
- * This eventfd will be signaled whenever a mailbox response arrives
- * for the core specified by `virtual_core_id`.
- *
- * When registering, if an eventfd has already been registered for the
- * specified core, the old eventfd will be unregistered and replaced.
- *
- * Not used during the unregister call, which clears any existing
- * eventfd.
+ * A field to indicate the features or modes the device supports.
+ * Bitfields:
+ * [0:0] - Mode:
+ * 0 = direct mode
+ * 1 = MCU mode
+ * [7:1] - RESERVED
*/
- __u32 eventfd;
+ __u8 features;
/*
- * Reserved.
- * Pass 0 for backwards compatibility.
+ * Size of per core allocated telemetry buffer represented in units
+ * of GXP_CORE_TELEMETRY_BUFFER_UNIT_SIZE.
*/
- __u32 flags;
+ __u8 telemetry_buffer_size;
/*
- * The virtual core to register or unregister an eventfd from.
- * While an eventfd is registered, it will be signaled exactly once
- * any time a command to this virtual core receives a response or times
- * out.
+ * Size of per core reserved secure telemetry buffer represented in
+ * units of GXP_CORE_TELEMETRY_BUFFER_UNIT_SIZE.
*/
- __u16 virtual_core_id;
+ __u8 secure_telemetry_buffer_size;
+ /* Deprecated fields that should be ignored */
+ __u8 reserved[8];
+ /*
+ * Amount of "tightly-coupled memory" or TCM available to each core.
+ * The value returned will be in kB, or 0 if the value was not
+ * specified in the device-tree.
+ */
+ __u32 memory_per_core;
};
-/*
- * Register an eventfd to be signaled whenever the specified virtual core
- * sends a mailbox response.
- *
- * The client must have allocated a virtual device.
- */
-#define GXP_REGISTER_MAILBOX_EVENTFD \
- _IOW(GXP_IOCTL_BASE, 22, struct gxp_register_mailbox_eventfd_ioctl)
+/* Query system specs. */
+#define GXP_GET_SPECS _IOR(GXP_IOCTL_BASE, 5, struct gxp_specs_ioctl)
-/*
- * Clear a previously registered mailbox response eventfd.
- *
- * The client must have allocated a virtual device.
- */
-#define GXP_UNREGISTER_MAILBOX_EVENTFD \
- _IOW(GXP_IOCTL_BASE, 24, struct gxp_register_mailbox_eventfd_ioctl)
+struct gxp_virtual_device_ioctl {
+ /*
+ * Input:
+ * The number of cores requested for the virtual device.
+ */
+ __u8 core_count;
+ /*
+ * Set RESERVED bits to 0 to ensure backwards compatibility.
+ *
+ * Bitfields:
+ * [0:0] - GXP_ALLOCATE_VD_SECURE setting for vd secureness
+ * 0 = Non-secure, default value
+ * 1 = Secure
+ * [31:1] - RESERVED
+ */
+ __u8 flags;
+ /*
+ * Input:
+ * The number of threads requested per core.
+ */
+ __u16 threads_per_core;
+ /*
+ * Input:
+ * The amount of memory requested per core, in kB.
+ */
+ __u32 memory_per_core;
+ /*
+ * Output:
+ * The ID assigned to the virtual device and shared with its cores.
+ */
+ __u32 vdid;
+};
+
+/* Allocate virtual device. */
+#define GXP_ALLOCATE_VIRTUAL_DEVICE \
+ _IOWR(GXP_IOCTL_BASE, 6, struct gxp_virtual_device_ioctl)
#define ETM_TRACE_LSB_MASK 0x1
#define ETM_TRACE_SYNC_MSG_PERIOD_MIN 8
@@ -653,7 +317,7 @@ struct gxp_etm_trace_start_ioctl {
*
* The client must hold a VIRTUAL_DEVICE wakelock.
*/
-#define GXP_ETM_TRACE_START_COMMAND \
+#define GXP_ETM_TRACE_START_COMMAND \
_IOW(GXP_IOCTL_BASE, 7, struct gxp_etm_trace_start_ioctl)
/*
@@ -662,8 +326,7 @@ struct gxp_etm_trace_start_ioctl {
*
* The client must hold a VIRTUAL_DEVICE wakelock.
*/
-#define GXP_ETM_TRACE_SW_STOP_COMMAND \
- _IOW(GXP_IOCTL_BASE, 8, __u16)
+#define GXP_ETM_TRACE_SW_STOP_COMMAND _IOW(GXP_IOCTL_BASE, 8, __u16)
/*
* Users should call this IOCTL after tracing has been stopped for the last
@@ -674,8 +337,7 @@ struct gxp_etm_trace_start_ioctl {
*
* The client must hold a VIRTUAL_DEVICE wakelock.
*/
-#define GXP_ETM_TRACE_CLEANUP_COMMAND \
- _IOW(GXP_IOCTL_BASE, 9, __u16)
+#define GXP_ETM_TRACE_CLEANUP_COMMAND _IOW(GXP_IOCTL_BASE, 9, __u16)
#define GXP_TRACE_HEADER_SIZE 256
#define GXP_TRACE_RAM_SIZE 4096
@@ -711,11 +373,11 @@ struct gxp_etm_get_trace_info_ioctl {
*
* The client must hold a VIRTUAL_DEVICE wakelock.
*/
-#define GXP_ETM_GET_TRACE_INFO_COMMAND \
+#define GXP_ETM_GET_TRACE_INFO_COMMAND \
_IOWR(GXP_IOCTL_BASE, 10, struct gxp_etm_get_trace_info_ioctl)
-#define GXP_TELEMETRY_TYPE_LOGGING (0)
-#define GXP_TELEMETRY_TYPE_TRACING (1)
+#define GXP_TELEMETRY_TYPE_LOGGING (0)
+#define GXP_TELEMETRY_TYPE_TRACING (1)
/*
* Enable either logging or software tracing for all cores.
@@ -730,7 +392,7 @@ struct gxp_etm_get_trace_info_ioctl {
* logging/tracing to their buffers. Any cores booting after this call will
* begin logging/tracing as soon as their firmware is able to.
*/
-#define GXP_ENABLE_TELEMETRY _IOWR(GXP_IOCTL_BASE, 11, __u8)
+#define GXP_ENABLE_CORE_TELEMETRY _IOWR(GXP_IOCTL_BASE, 11, __u8)
/*
* Disable either logging or software tracing for all cores.
@@ -740,7 +402,53 @@ struct gxp_etm_get_trace_info_ioctl {
* This call will block until any running cores have been notified and ACKed
* that they have disabled the specified telemetry type.
*/
-#define GXP_DISABLE_TELEMETRY _IOWR(GXP_IOCTL_BASE, 12, __u8)
+#define GXP_DISABLE_CORE_TELEMETRY _IOWR(GXP_IOCTL_BASE, 12, __u8)
+
+/* For backward compatibility. */
+#define GXP_ENABLE_TELEMETRY GXP_ENABLE_CORE_TELEMETRY
+#define GXP_DISABLE_TELEMETRY GXP_DISABLE_CORE_TELEMETRY
+
+struct gxp_tpu_mbx_queue_ioctl {
+ __u32 tpu_fd; /* TPU virtual device group fd */
+ /*
+ * Deprecated. All virtual cores will be mapped.
+ *
+ * Bitfield indicating which virtual cores to allocate and map the
+ * buffers for.
+ * To map for virtual core X, set bit X in this field, i.e. `1 << X`.
+ *
+ * This field is not used by the unmap IOCTL, which always unmaps the
+ * buffers for all cores it had been mapped for.
+ */
+ __u32 virtual_core_list;
+ /*
+ * The user address of an edgetpu_mailbox_attr struct, containing
+ * cmd/rsp queue size, mailbox priority and other relevant info.
+ * This structure is defined in edgetpu.h in the TPU driver.
+ */
+ __u64 attr_ptr;
+};
+
+/*
+ * Map TPU-DSP mailbox cmd/rsp queue buffers.
+ *
+ * The client must have allocated a virtual device.
+ */
+#define GXP_MAP_TPU_MBX_QUEUE \
+ _IOW(GXP_IOCTL_BASE, 13, struct gxp_tpu_mbx_queue_ioctl)
+
+/*
+ * Un-map TPU-DSP mailbox cmd/rsp queue buffers previously mapped by
+ * GXP_MAP_TPU_MBX_QUEUE.
+ *
+ * Only the @tpu_fd field will be used. Other fields will be fetched
+ * from the kernel's internal records. It is recommended to use the argument
+ * that was passed in GXP_MAP_TPU_MBX_QUEUE to un-map the buffers.
+ *
+ * The client must have allocated a virtual device.
+ */
+#define GXP_UNMAP_TPU_MBX_QUEUE \
+ _IOW(GXP_IOCTL_BASE, 14, struct gxp_tpu_mbx_queue_ioctl)
struct gxp_register_telemetry_eventfd_ioctl {
/*
@@ -758,12 +466,16 @@ struct gxp_register_telemetry_eventfd_ioctl {
__u8 type;
};
-#define GXP_REGISTER_TELEMETRY_EVENTFD \
+#define GXP_REGISTER_CORE_TELEMETRY_EVENTFD \
_IOW(GXP_IOCTL_BASE, 15, struct gxp_register_telemetry_eventfd_ioctl)
-#define GXP_UNREGISTER_TELEMETRY_EVENTFD \
+#define GXP_UNREGISTER_CORE_TELEMETRY_EVENTFD \
_IOW(GXP_IOCTL_BASE, 16, struct gxp_register_telemetry_eventfd_ioctl)
+/* For backward compatibility. */
+#define GXP_REGISTER_TELEMETRY_EVENTFD GXP_REGISTER_CORE_TELEMETRY_EVENTFD
+#define GXP_UNREGISTER_TELEMETRY_EVENTFD GXP_UNREGISTER_CORE_TELEMETRY_EVENTFD
+
/*
* Reads the 2 global counter registers in AURORA_TOP and combines them to
* return the full 64-bit value of the counter.
@@ -772,45 +484,372 @@ struct gxp_register_telemetry_eventfd_ioctl {
*/
#define GXP_READ_GLOBAL_COUNTER _IOR(GXP_IOCTL_BASE, 17, __u64)
-struct gxp_tpu_mbx_queue_ioctl {
- __u32 tpu_fd; /* TPU virtual device group fd */
+/*
+ * Release a wakelock acquired via `GXP_ACQUIRE_WAKE_LOCK`.
+ *
+ * The argument should be one of WAKELOCK_BLOCK or WAKELOCK_VIRTUAL_DEVICE, or a
+ * bitwise OR of both.
+ *
+ * Upon releasing a VIRTUAL_DEVICE wakelock, a client's virtual device will be
+ * removed from physical cores. At that point the cores may be reallocated to
+ * another client or powered down.
+ *
+ * If no clients hold a BLOCK wakelock, the entire DSP subsytem may be powered
+ * down. If a client attempts to release a BLOCK wakelock while still holding
+ * a VIRTUAL_DEVICE wakelock, this IOCTL will return -EBUSY.
+ *
+ * If a client attempts to release a wakelock it does not hold, this IOCTL will
+ * return -ENODEV.
+ */
+#define GXP_RELEASE_WAKE_LOCK _IOW(GXP_IOCTL_BASE, 19, __u32)
+
+struct gxp_map_dmabuf_ioctl {
/*
- * Bitfield indicating which virtual cores to allocate and map the
- * buffers for.
+ * Deprecated. All virtual cores will be mapped.
+ *
+ * Bitfield indicating which virtual cores to map the dma-buf for.
* To map for virtual core X, set bit X in this field, i.e. `1 << X`.
*
- * This field is not used by the unmap IOCTL, which always unmaps the
- * buffers for all cores it had been mapped for.
+ * This field is not used by the unmap dma-buf IOCTL, which always
+ * unmaps a dma-buf for all cores it had been mapped for.
*/
- __u32 virtual_core_list;
+ __u16 virtual_core_list;
+ __s32 dmabuf_fd; /* File descriptor of the dma-buf to map. */
/*
- * The user address of an edgetpu_mailbox_attr struct, containing
- * cmd/rsp queue size, mailbox priority and other relevant info.
- * This structure is defined in edgetpu.h in the TPU driver.
+ * Flags indicating mapping attribute requests from the runtime.
+ * Set RESERVED bits to 0 to ensure backwards compatibility.
+ *
+ * Bitfields:
+ * [1:0] - DMA_DIRECTION:
+ * 00 = DMA_BIDIRECTIONAL (host/device can write buffer)
+ * 01 = DMA_TO_DEVICE (host can write buffer)
+ * 10 = DMA_FROM_DEVICE (device can write buffer)
+ * Note: DMA_DIRECTION is the direction in which data moves
+ * from the host's perspective.
+ * [31:2] - RESERVED
*/
- __u64 attr_ptr;
+ __u32 flags;
+ /*
+ * Device address the dmabuf is mapped to.
+ * - GXP_MAP_DMABUF uses this field to return the address the dma-buf
+ * can be accessed from by the device.
+ * - GXP_UNMAP_DMABUF expects this field to contain the value from the
+ * mapping call, and uses it to determine which dma-buf to unmap.
+ */
+ __u64 device_address;
};
/*
- * Map TPU-DSP mailbox cmd/rsp queue buffers.
+ * Map host buffer via its dma-buf FD.
*
* The client must have allocated a virtual device.
*/
-#define GXP_MAP_TPU_MBX_QUEUE \
- _IOW(GXP_IOCTL_BASE, 13, struct gxp_tpu_mbx_queue_ioctl)
+#define GXP_MAP_DMABUF _IOWR(GXP_IOCTL_BASE, 20, struct gxp_map_dmabuf_ioctl)
/*
- * Un-map TPU-DSP mailbox cmd/rsp queue buffers previously mapped by
- * GXP_MAP_TPU_MBX_QUEUE.
+ * Un-map host buffer previously mapped by GXP_MAP_DMABUF.
*
- * Only the @tpu_fd field will be used. Other fields will be fetched
- * from the kernel's internal records. It is recommended to use the argument
- * that was passed in GXP_MAP_TPU_MBX_QUEUE to un-map the buffers.
+ * Only the @device_address field is used. Other fields are fetched from the
+ * kernel's internal records. It is recommended to use the argument that was
+ * passed in GXP_MAP_DMABUF to un-map the dma-buf.
*
* The client must have allocated a virtual device.
*/
-#define GXP_UNMAP_TPU_MBX_QUEUE \
- _IOW(GXP_IOCTL_BASE, 14, struct gxp_tpu_mbx_queue_ioctl)
+#define GXP_UNMAP_DMABUF _IOW(GXP_IOCTL_BASE, 21, struct gxp_map_dmabuf_ioctl)
+
+struct gxp_register_mailbox_eventfd_ioctl {
+ /*
+ * This eventfd will be signaled whenever a mailbox response arrives
+ * for the core specified by `virtual_core_id`.
+ *
+ * When registering, if an eventfd has already been registered for the
+ * specified core, the old eventfd will be unregistered and replaced.
+ *
+ * Not used during the unregister call, which clears any existing
+ * eventfd.
+ */
+ __u32 eventfd;
+ /*
+ * Reserved.
+ * Pass 0 for backwards compatibility.
+ */
+ __u32 flags;
+ /*
+ * The virtual core to register or unregister an eventfd from.
+ * While an eventfd is registered, it will be signaled exactly once
+ * any time a command to this virtual core receives a response or times
+ * out.
+ */
+ __u16 virtual_core_id;
+};
+
+/*
+ * Register an eventfd to be signaled whenever the specified virtual core
+ * sends a mailbox response.
+ *
+ * The client must have allocated a virtual device.
+ */
+#define GXP_REGISTER_MAILBOX_EVENTFD \
+ _IOW(GXP_IOCTL_BASE, 22, struct gxp_register_mailbox_eventfd_ioctl)
+
+struct gxp_mailbox_command_ioctl {
+ /*
+ * Input:
+ * The virtual core to dispatch the command to.
+ * Only used in direct mode.
+ */
+ __u16 virtual_core_id;
+ /*
+ * Input:
+ * The number of cores to dispatch the command to.
+ * Only used in non-direct mode.
+ */
+ __u16 num_cores;
+ /*
+ * Output:
+ * The sequence number assigned to this command. The caller can use
+ * this value to match responses fetched via `GXP_MAILBOX_RESPONSE`
+ * with this command.
+ */
+ __u64 sequence_number;
+ /*
+ * Input:
+ * Device address to the buffer containing a GXP command. The user
+ * should have obtained this address from the GXP_MAP_BUFFER ioctl.
+ */
+ __u64 device_address;
+ /*
+ * Input:
+ * Size of the buffer at `device_address` in bytes.
+ */
+ __u32 size;
+ /*
+ * Input:
+ * Minimum power state to operate the entire DSP subsystem at until
+ * the mailbox command is finished(executed or timeout). One of the
+ * GXP_POWER_STATE_* defines from below.
+ *
+ * `GXP_POWER_STATE_OFF` is not a valid value when executing a
+ * mailbox command. The caller should pass GXP_POWER_STATE_UUD if the
+ * command is expected to run at the power state the wakelock has
+ * specified.
+ */
+ __u32 gxp_power_state;
+ /*
+ * Input:
+ * Memory interface power state to request from the system so long as
+ * the mailbox command is executing. One of the MEMORY_POWER_STATE*
+ * defines from below.
+ *
+ * If `MEMORY_POWER_STATE_UNDEFINED` is passed, no request to change
+ * the memory interface power state will be made.
+ */
+ __u32 memory_power_state;
+ /*
+ * Input:
+ * Flags describing the command, for use by the GXP device.
+ */
+ __u32 flags;
+ /*
+ * Input:
+ * Flags indicating power attribute requests from the runtime.
+ * Set RESERVED bits to 0 to ensure backwards compatibility.
+ *
+ * Bitfields:
+ * [0:0] - Deprecated, do not use
+ * [1:1] - LOW_FREQ_CLKMUX setting for power management
+ * 0 = Don't switch CLKMUX clocks, default value
+ * 1 = Switch CLKMUX clocks
+ * [31:2] - RESERVED
+ */
+ __u32 power_flags;
+};
+
+/*
+ * Push an element to the mailbox command queue.
+ *
+ * The client must hold a VIRTUAL_DEVICE wakelock.
+ */
+#define GXP_MAILBOX_COMMAND \
+ _IOWR(GXP_IOCTL_BASE, 23, struct gxp_mailbox_command_ioctl)
+
+/*
+ * Clear a previously registered mailbox response eventfd.
+ *
+ * The client must have allocated a virtual device.
+ */
+#define GXP_UNREGISTER_MAILBOX_EVENTFD \
+ _IOW(GXP_IOCTL_BASE, 24, struct gxp_register_mailbox_eventfd_ioctl)
+
+/*
+ * Components for which a client may hold a wakelock.
+ * Acquired by passing these values as `components_to_wake` in
+ * `struct gxp_acquire_wakelock_ioctl` to GXP_ACQUIRE_WAKELOCK and released by
+ * passing these values directly as the argument to GXP_RELEASE_WAKELOCK.
+ *
+ * Multiple wakelocks can be acquired or released at once by passing multiple
+ * components, ORed together.
+ */
+#define WAKELOCK_BLOCK (1 << 0)
+#define WAKELOCK_VIRTUAL_DEVICE (1 << 1)
+
+/*
+ * DSP subsystem Power state values for use as `gxp_power_state` in
+ * `struct gxp_acquire_wakelock_ioctl`.
+ * Note: GXP_POWER_STATE_READY is a deprecated state. The way to achieve
+ * original state is to request GXP_POWER_STATE_UUD with setting
+ * GXP_POWER_LOW_FREQ_CLKMUX flag. Requesting GXP_POWER_STATE_READY is treated
+ * as identical to GXP_POWER_STATE_UUD.
+ */
+#define GXP_POWER_STATE_OFF 0
+#define GXP_POWER_STATE_UUD 1
+#define GXP_POWER_STATE_SUD 2
+#define GXP_POWER_STATE_UD 3
+#define GXP_POWER_STATE_NOM 4
+#define GXP_POWER_STATE_READY 5
+#define GXP_POWER_STATE_UUD_PLUS 6
+#define GXP_POWER_STATE_SUD_PLUS 7
+#define GXP_POWER_STATE_UD_PLUS 8
+#define GXP_NUM_POWER_STATES (GXP_POWER_STATE_UD_PLUS + 1)
+
+/*
+ * Memory interface power state values for use as `memory_power_state` in
+ * `struct gxp_acquire_wakelock_ioctl`.
+ */
+#define MEMORY_POWER_STATE_UNDEFINED 0
+#define MEMORY_POWER_STATE_MIN 1
+#define MEMORY_POWER_STATE_VERY_LOW 2
+#define MEMORY_POWER_STATE_LOW 3
+#define MEMORY_POWER_STATE_HIGH 4
+#define MEMORY_POWER_STATE_VERY_HIGH 5
+#define MEMORY_POWER_STATE_MAX 6
+
+/*
+ * GXP power flag macros, supported by `flags` in `gxp_acquire_wakelock_ioctl`
+ * and `power_flags in `gxp_mailbox_command_ioctl`.
+ *
+ * Non-aggressor flag is deprecated. Setting this flag is a no-op since
+ * non-aggressor support is defeatured.
+ */
+#define GXP_POWER_NON_AGGRESSOR (1 << 0)
+/*
+ * The client can request low frequency clkmux vote by this flag, which means
+ * the kernel driver will switch the CLKMUX clocks to save more power.
+ *
+ * Note: The kernel driver keep separate track of low frequency clkmux votes
+ * and normal votes, and the low frequency clkmux votes will have lower priority
+ * than all normal votes.
+ * For example, if the kerenl driver has two votes, one is GXP_POWER_STATE_UUD
+ * without GXP_POWER_LOW_FREQ_CLKMUX, and the other one is GXP_POWER_STATE_NOM
+ * with GXP_POWER_LOW_FREQ_CLKMUX. The voting result is GXP_POWER_STATE_UUD
+ * without GXP_POWER_LOW_FREQ_CLKMUX.
+ */
+#define GXP_POWER_LOW_FREQ_CLKMUX (1 << 1)
+
+struct gxp_acquire_wakelock_ioctl {
+ /*
+ * The components for which a wakelock will be acquired.
+ * Should be one of WAKELOCK_BLOCK or WAKELOCK_VIRTUAL_DEVICE, or a
+ * bitwise OR of both.
+ *
+ * A VIRTUAL_DEVICE wakelock cannot be acquired until the client has
+ * allocated a virtual device. To acquire a VIRTUAL_DEVICE wakelock, a
+ * client must already have acquired a BLOCK wakelock or acquire both
+ * in the same call.
+ */
+ __u32 components_to_wake;
+ /*
+ * Minimum power state to operate the entire DSP subsystem at until
+ * the BLOCK wakelock is released. One of the GXP_POWER_STATE_* defines
+ * from above. Note that the requested power state will not be cleared
+ * if only the VIRTUAL_DEVICE wakelock is released.
+ *
+ * `GXP_POWER_STATE_OFF` is not a valid value when acquiring a
+ * wakelock.
+ */
+ __u32 gxp_power_state;
+ /*
+ * Memory interface power state to request from the system so long as
+ * the BLOCK wakelock is held. One of the MEMORY_POWER_STATE* defines
+ * from above. The requested memory power state will not be cleared if
+ * only the VIRTUAL_DEVICE wakelock is released.
+ *
+ * If `MEMORY_POWER_STATE_UNDEFINED` is passed, no request to change
+ * the memory interface power state will be made.
+ */
+ __u32 memory_power_state;
+ /*
+ * How long to wait, in microseconds, before returning if insufficient
+ * physical cores are available when attempting to acquire a
+ * VIRTUAL_DEVICE wakelock. A value of 0 indicates that the IOCTL
+ * should not wait at all if cores are not available.
+ */
+ __u32 vd_timeout_us;
+ /*
+ * Flags indicating power attribute requests from the runtime.
+ * Set RESERVED bits to 0 to ensure backwards compatibility.
+ *
+ * Bitfields:
+ * [0:0] - Deprecated, do not use
+ * [1:1] - LOW_FREQ_CLKMUX setting for power management
+ * 0 = Don't switch CLKMUX clocks, default value
+ * 1 = Switch CLKMUX clocks
+ * [31:2] - RESERVED
+ */
+ __u32 flags;
+};
+
+/*
+ * Acquire a wakelock and request minimum power states for the DSP subsystem
+ * and the memory interface.
+ *
+ * Upon a successful return, the specified components will be powered on.
+ * If the specified components contain VIRTUAL_DEVICE, and they were not
+ * already running at the specified or higher power states, requests will
+ * have been sent to transition both the DSP subsystem and memory interface
+ * to the specified states.
+ *
+ * If the same client invokes this IOCTL for the same component more than once
+ * without a corresponding call to `GXP_RELEASE_WAKE_LOCK` in between, the
+ * second call may update requested power states, but have no other effects.
+ * No additional call to `GXP_RELEASE_WAKE_LOCK` will be required.
+ *
+ * If a client attempts to acquire a VIRTUAL_DEVICE wakelock and there are
+ * insufficient physical cores available, the driver will wait up to
+ * `vd_timeout_us` microseconds, then return -EBUSY if sufficient cores were
+ * never made available. In this case, if both BLOCK and VIRTUAL_DEVICE
+ * wakelocks were being requested, neither will have been acquired.
+ */
+#define GXP_ACQUIRE_WAKE_LOCK \
+ _IOW(GXP_IOCTL_BASE, 25, struct gxp_acquire_wakelock_ioctl)
+
+#define GXP_INTERFACE_VERSION_BUILD_BUFFER_SIZE 64
+struct gxp_interface_version_ioctl {
+ /*
+ * Driver major version number.
+ * Increments whenever a non-backwards compatible change to the
+ * interface defined in this file changes.
+ */
+ __u16 version_major;
+ /*
+ * Driver minor version number.
+ * Increments whenever a backwards compatible change, such as the
+ * addition of a new IOCTL, is made to the interface defined in this
+ * file.
+ */
+ __u16 version_minor;
+ /*
+ * Driver build identifier.
+ * NULL-terminated string of the git hash of the commit the driver was
+ * built from. If the driver had uncommitted changes the string will
+ * end with "-dirty".
+ */
+ char version_build[GXP_INTERFACE_VERSION_BUILD_BUFFER_SIZE];
+};
+
+/* Query the driver's interface version. */
+#define GXP_GET_INTERFACE_VERSION \
+ _IOR(GXP_IOCTL_BASE, 26, struct gxp_interface_version_ioctl)
/*
* Triggers a debug dump to be generated for cores.
@@ -829,4 +868,68 @@ struct gxp_tpu_mbx_queue_ioctl {
*/
#define GXP_TRIGGER_DEBUG_DUMP _IOW(GXP_IOCTL_BASE, 27, __u32)
+#define GXP_REGISTER_MCU_TELEMETRY_EVENTFD \
+ _IOW(GXP_IOCTL_BASE, 28, struct gxp_register_telemetry_eventfd_ioctl)
+
+#define GXP_UNREGISTER_MCU_TELEMETRY_EVENTFD \
+ _IOW(GXP_IOCTL_BASE, 29, struct gxp_register_telemetry_eventfd_ioctl)
+
+struct gxp_mailbox_uci_command_ioctl {
+ /*
+ * Output:
+ * The sequence number assigned to this command. The caller can use
+ * this value to match responses fetched via `GXP_MAILBOX_UCI_RESPONSE`
+ * with this command.
+ */
+ __u64 sequence_number;
+ /* reserved fields */
+ __u8 reserved[8];
+ /*
+ * Input:
+ * Will be copied to the UCI command without modification.
+ */
+ __u8 opaque[48];
+};
+
+/*
+ * Push an element to the UCI command queue.
+ *
+ * The client must hold a BLOCK wakelock.
+ */
+#define GXP_MAILBOX_UCI_COMMAND \
+ _IOWR(GXP_IOCTL_BASE, 30, struct gxp_mailbox_uci_command_ioctl)
+
+struct gxp_mailbox_uci_response_ioctl {
+ /*
+ * Output:
+ * Sequence number indicating which command this response is for.
+ */
+ __u64 sequence_number;
+ /*
+ * Output:
+ * Driver error code.
+ * Indicates if the response was obtained successfully,
+ * `GXP_RESPONSE_ERROR_NONE`, or what error prevented the command
+ * from completing successfully.
+ */
+ __u16 error_code;
+ /* reserved fields */
+ __u8 reserved[6];
+ /*
+ * Output:
+ * Is copied from the UCI response without modification.
+ * Only valid if `error_code` == GXP_RESPONSE_ERROR_NONE
+ */
+ __u8 opaque[16];
+};
+
+/*
+ * Pop an element from the UCI response queue. Blocks until mailbox response
+ * is available.
+ *
+ * The client must hold a BLOCK wakelock.
+ */
+#define GXP_MAILBOX_UCI_RESPONSE \
+ _IOR(GXP_IOCTL_BASE, 31, struct gxp_mailbox_uci_response_ioctl)
+
#endif /* __GXP_H__ */
diff --git a/mm-backport.h b/mm-backport.h
deleted file mode 100644
index c435281..0000000
--- a/mm-backport.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Backport mm APIs.
- *
- * Copyright (C) 2021 Google LLC
- */
-#ifndef __MM_BACKPORT_H__
-#define __MM_BACKPORT_H__
-
-#include <linux/mm.h>
-#include <linux/version.h>
-
-#if KERNEL_VERSION(5, 6, 0) > LINUX_VERSION_CODE
-
-/*
- * Define pin_user_pages* which are introduced in Linux 5.6.
- *
- * We simply define pin_user_pages* as get_user_pages* here so our driver can
- * prefer PIN over GET when possible.
- */
-#ifndef FOLL_PIN
-
-/* define as zero to prevent older get_user_pages* returning EINVAL */
-#define FOLL_LONGTERM 0
-
-#define pin_user_pages_fast get_user_pages_fast
-#define unpin_user_page put_page
-
-#endif /* FOLL_PIN */
-
-#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(5,6,0) */
-
-#endif /* __MM_BACKPORT_H__ */