diff options
author | Aurora zuma automerger <aurora-zuma-automerger@google.com> | 2023-07-20 11:59:02 +0000 |
---|---|---|
committer | Copybara-Service <copybara-worker@google.com> | 2023-08-03 14:58:59 -0700 |
commit | da392e26dc0810f886f92df4a6d1afd6ff35377d (patch) | |
tree | 12746cd8b2c798377dccc2ba9433db95bf99040a | |
parent | eb3acbdb4097299fcb90b49c3e124af0fc3ff6c8 (diff) | |
download | zuma-android-gs-shusky-5.15-android14-qpr1-beta.tar.gz |
gxp: [Copybara Auto Merge] Merge branch 'zuma' into 'android14-gs-pixel-5.15-udc-qpr1'android-u-qpr2-beta-3_r0.8android-u-qpr2-beta-3.1_r0.8android-u-qpr2-beta-2_r0.8android-u-qpr2-beta-1_r0.7android-u-qpr1-beta-2_r0.8android-u-qpr1-beta-2.2_r0.7android-u-qpr1-beta-2.1_r0.8android-14.0.0_r0.77android-14.0.0_r0.67android-14.0.0_r0.57android-14.0.0_r0.45android-14.0.0_r0.38android-gs-shusky-5.15-u-qpr1-beta2android-gs-shusky-5.15-android14-qpr2-betaandroid-gs-shusky-5.15-android14-qpr2android-gs-shusky-5.15-android14-qpr1-betaandroid-gs-shusky-5.15-android14-qpr1
gxp: deactivate context on domain detaching
Introduce gxp_soc_deactivate_context to disable context that not in use.
Note this function would be meaningless if SSMT is clamp-mode, under
which case we have no way to disable contexts since we don't know which
(physical) core will be scheduled to use by MCU.
Test: gxp_smoke_test * 4 passed. Can see faults if without calling
soc_activacte_context as expected.
Bug: 290708708
(cherry picked from commit 4696e2562571d007c839dd9e575281ab407ac9ac)
GitOrigin-RevId: e678afd3102c5ee61bc9c8b149a1683c51df37f0
Change-Id: I17453f940adb0656ee720bed155daa16ea1b41af
-rw-r--r-- | gxp-dma-iommu.c | 35 | ||||
-rw-r--r-- | gxp-dma.h | 5 | ||||
-rw-r--r-- | gxp-ssmt.c | 49 | ||||
-rw-r--r-- | gxp-ssmt.h | 17 | ||||
-rw-r--r-- | gxp-vd.c | 4 |
5 files changed, 81 insertions, 29 deletions
diff --git a/gxp-dma-iommu.c b/gxp-dma-iommu.c index 1d60888..5aa8b5d 100644 --- a/gxp-dma-iommu.c +++ b/gxp-dma-iommu.c @@ -68,17 +68,16 @@ static int dma_info_to_prot(enum dma_data_direction dir, bool coherent, } } -static int gxp_dma_ssmt_program(struct gxp_dev *gxp, - struct iommu_domain *domain, uint core_list) +static int gxp_dma_ssmt_activate(struct gxp_dev *gxp, + struct iommu_domain *domain, uint core_list) { struct gxp_dma_iommu_manager *mgr = container_of( gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr); - int pasid; uint core; + int pasid = iommu_aux_get_pasid(domain, gxp->dev); /* Program VID only when cores are managed by us. */ if (gxp_is_direct_mode(gxp) || gxp_core_boot(gxp)) { - pasid = iommu_aux_get_pasid(domain, gxp->dev); for (core = 0; core < GXP_NUM_CORES; core++) if (BIT(core) & core_list) { dev_dbg(gxp->dev, "Assign core%u to PASID %d\n", @@ -86,11 +85,30 @@ static int gxp_dma_ssmt_program(struct gxp_dev *gxp, gxp_ssmt_set_core_vid(&mgr->ssmt, core, pasid); } } else { - gxp_ssmt_set_bypass(&mgr->ssmt); + gxp_ssmt_activate_scid(&mgr->ssmt, pasid); } return 0; } +static void gxp_dma_ssmt_deactivate(struct gxp_dev *gxp, + struct iommu_domain *domain, uint core_list) +{ + struct gxp_dma_iommu_manager *mgr = container_of( + gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr); + uint core; + int pasid = iommu_aux_get_pasid(domain, gxp->dev); + + /* Program VID only when cores are managed by us. */ + if (gxp_is_direct_mode(gxp) || gxp_core_boot(gxp)) { + for (core = 0; core < GXP_NUM_CORES; core++) { + if (BIT(core) & core_list) + gxp_ssmt_set_core_vid(&mgr->ssmt, core, 0); + } + } else { + gxp_ssmt_deactivate_scid(&mgr->ssmt, pasid); + } +} + /* Fault handler */ static int sysmmu_fault_handler(struct iommu_fault *fault, void *token) @@ -284,14 +302,15 @@ int gxp_dma_domain_attach_device(struct gxp_dev *gxp, ret = iommu_aux_attach_device(gdomain->domain, gxp->dev); if (ret) goto out; - gxp_dma_ssmt_program(gxp, gdomain->domain, core_list); + gxp_dma_ssmt_activate(gxp, gdomain->domain, core_list); out: return ret; } -void gxp_dma_domain_detach_device(struct gxp_dev *gxp, - struct gcip_iommu_domain *gdomain) +void gxp_dma_domain_detach_device(struct gxp_dev *gxp, struct gcip_iommu_domain *gdomain, + uint core_list) { + gxp_dma_ssmt_deactivate(gxp, gdomain->domain, core_list); iommu_aux_detach_device(gdomain->domain, gxp->dev); } @@ -102,11 +102,12 @@ int gxp_dma_domain_attach_device(struct gxp_dev *gxp, * gxp_dma_domain_detach_device() - Detach the page table from the device. * @gxp: The GXP device to detach * @gdomain: The IOMMU domain to be detached + * @core_list: The physical cores to detach. * * Caller ensures a BLOCK wakelock is hold for the iommu detaching. */ -void gxp_dma_domain_detach_device(struct gxp_dev *gxp, - struct gcip_iommu_domain *gdomain); +void gxp_dma_domain_detach_device(struct gxp_dev *gxp, struct gcip_iommu_domain *gdomain, + uint core_list); /** * gxp_dma_init_default_resources() - Set the various buffers/registers with @@ -11,6 +11,11 @@ #include "gxp-internal.h" #include "gxp-ssmt.h" +static inline bool ssmt_is_client_driven(struct gxp_ssmt *ssmt) +{ + return readl(ssmt->idma_ssmt_base + SSMT_CFG_OFFSET) == SSMT_MODE_CLIENT; +} + static inline void ssmt_set_vid_for_idx(void __iomem *ssmt, uint vid, uint idx) { /* NS_READ_STREAM_VID_<sid> */ @@ -74,20 +79,38 @@ void gxp_ssmt_set_core_vid(struct gxp_ssmt *ssmt, uint core, uint vid) } } -void gxp_ssmt_set_bypass(struct gxp_ssmt *ssmt) +/* + * Programs SSMT to always use SCIDs as VIDs. + * Assumes clamp mode. + */ +static void gxp_ssmt_set_bypass(struct gxp_ssmt *ssmt) +{ + uint core; + + for (core = 0; core < GXP_NUM_CORES; core++) + gxp_ssmt_set_core_vid(ssmt, core, SSMT_CLAMP_MODE_BYPASS); +} + +void gxp_ssmt_activate_scid(struct gxp_ssmt *ssmt, uint scid) +{ + if (ssmt_is_client_driven(ssmt)) { + ssmt_set_vid_for_idx(ssmt->idma_ssmt_base, scid, scid); + ssmt_set_vid_for_idx(ssmt->inst_data_ssmt_base, scid, scid); + } else { + /* + * In clamp mode, we can't configure specific SCID. We can only mark all + * transactions as "bypassed" which have all streams to use their SCID as VID. + */ + gxp_ssmt_set_bypass(ssmt); + } +} + +void gxp_ssmt_deactivate_scid(struct gxp_ssmt *ssmt, uint scid) { - u32 mode; - uint core, i; - - mode = readl(ssmt->idma_ssmt_base + SSMT_CFG_OFFSET); - if (mode == SSMT_MODE_CLIENT) { - for (i = 0; i < MAX_NUM_CONTEXTS; i++) { - ssmt_set_vid_for_idx(ssmt->idma_ssmt_base, i, i); - ssmt_set_vid_for_idx(ssmt->inst_data_ssmt_base, i, i); - } + if (ssmt_is_client_driven(ssmt)) { + ssmt_set_vid_for_idx(ssmt->idma_ssmt_base, scid, 0); + ssmt_set_vid_for_idx(ssmt->inst_data_ssmt_base, scid, 0); } else { - for (core = 0; core < GXP_NUM_CORES; core++) - gxp_ssmt_set_core_vid(ssmt, core, - SSMT_CLAMP_MODE_BYPASS); + dev_warn_once(ssmt->gxp->dev, "Unable to deactivate context on clamp mode"); } } @@ -38,10 +38,19 @@ int gxp_ssmt_init(struct gxp_dev *gxp, struct gxp_ssmt *ssmt); */ void gxp_ssmt_set_core_vid(struct gxp_ssmt *ssmt, uint core, uint vid); -/* - * Programs SSMT to always use SCIDs as VIDs. - * Supports both client-driven and clamp mode. +/** + * gxp_ssmt_activate_scid() - Activates the transactions with SCID @scid. + * + * SSMT will be configured as streams with SCID=@scid to have VID=@scid for memory transactions. + */ +void gxp_ssmt_activate_scid(struct gxp_ssmt *ssmt, uint scid); + +/** + * gxp_ssmt_deactivate_scid() - Deactivates the transactions with SCID @scid. + * + * SSMT will be configured as streams with SCID=@scid to have VID=0 signal for memory + * transactions. */ -void gxp_ssmt_set_bypass(struct gxp_ssmt *ssmt); +void gxp_ssmt_deactivate_scid(struct gxp_ssmt *ssmt, uint scid); #endif /* __GXP_SSMT_H__ */ @@ -787,7 +787,7 @@ int gxp_vd_block_ready(struct gxp_virtual_device *vd) if (gxp->after_vd_block_ready) { ret = gxp->after_vd_block_ready(gxp, vd); if (ret) { - gxp_dma_domain_detach_device(gxp, vd->domain); + gxp_dma_domain_detach_device(gxp, vd->domain, vd->core_list); vd->state = orig_state; return ret; } @@ -816,7 +816,7 @@ void gxp_vd_block_unready(struct gxp_virtual_device *vd) gxp->before_vd_block_unready(gxp, vd); if (vd->state == GXP_VD_READY) vd->state = GXP_VD_OFF; - gxp_dma_domain_detach_device(gxp, vd->domain); + gxp_dma_domain_detach_device(gxp, vd->domain, vd->core_list); if (vd->is_secure) gxp_pm_idle(gxp); |