On Mon, 20 Jul 2020 at 21:21, Rob Clark robdclark@gmail.com wrote:
From: Rob Clark robdclark@chromium.org
The device may be torn down, but the domain should still be valid. Lets use that as the tlb flush ops cookie.
Fixes a problem reported in [1]
This proposed fix patch applied on top of linux mainline master and boot test PASS on db410c.
The reported problem got fixed.
[1] https://lkml.org/lkml/2020/7/20/104
Signed-off-by: Rob Clark robdclark@chromium.org
Reported-by: Naresh Kamboju naresh.kamboju@linaro.org Tested-by: Naresh Kamboju naresh.kamboju@linaro.org
Note I don't have a good setup to test this atm, but I think it should work.
drivers/iommu/qcom_iommu.c | 37 +++++++++++++++++-------------------- 1 file changed, 17 insertions(+), 20 deletions(-)
diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c index c3e1fbd1988c..d176df569af8 100644 --- a/drivers/iommu/qcom_iommu.c +++ b/drivers/iommu/qcom_iommu.c @@ -65,6 +65,7 @@ struct qcom_iommu_domain { struct mutex init_mutex; /* Protects iommu pointer */ struct iommu_domain domain; struct qcom_iommu_dev *iommu;
struct iommu_fwspec *fwspec;
};
static struct qcom_iommu_domain *to_qcom_iommu_domain(struct iommu_domain *dom) @@ -84,9 +85,9 @@ static struct qcom_iommu_dev * to_iommu(struct device *dev) return dev_iommu_priv_get(dev); }
-static struct qcom_iommu_ctx * to_ctx(struct device *dev, unsigned asid) +static struct qcom_iommu_ctx * to_ctx(struct qcom_iommu_domain *d, unsigned asid) {
struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
struct qcom_iommu_dev *qcom_iommu = d->iommu; if (!qcom_iommu) return NULL; return qcom_iommu->ctxs[asid - 1];
@@ -118,14 +119,12 @@ iommu_readq(struct qcom_iommu_ctx *ctx, unsigned reg)
static void qcom_iommu_tlb_sync(void *cookie) {
struct iommu_fwspec *fwspec;
struct device *dev = cookie;
struct qcom_iommu_domain *qcom_domain = cookie;
struct iommu_fwspec *fwspec = qcom_domain->fwspec; unsigned i;
fwspec = dev_iommu_fwspec_get(dev);
for (i = 0; i < fwspec->num_ids; i++) {
struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]); unsigned int val, ret; iommu_writel(ctx, ARM_SMMU_CB_TLBSYNC, 0);
@@ -139,14 +138,12 @@ static void qcom_iommu_tlb_sync(void *cookie)
static void qcom_iommu_tlb_inv_context(void *cookie) {
struct device *dev = cookie;
struct iommu_fwspec *fwspec;
struct qcom_iommu_domain *qcom_domain = cookie;
struct iommu_fwspec *fwspec = qcom_domain->fwspec; unsigned i;
fwspec = dev_iommu_fwspec_get(dev);
for (i = 0; i < fwspec->num_ids; i++) {
struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]); iommu_writel(ctx, ARM_SMMU_CB_S1_TLBIASID, ctx->asid); }
@@ -156,16 +153,14 @@ static void qcom_iommu_tlb_inv_context(void *cookie) static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size, size_t granule, bool leaf, void *cookie) {
struct device *dev = cookie;
struct iommu_fwspec *fwspec;
struct qcom_iommu_domain *qcom_domain = cookie;
struct iommu_fwspec *fwspec = qcom_domain->fwspec; unsigned i, reg; reg = leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
fwspec = dev_iommu_fwspec_get(dev);
for (i = 0; i < fwspec->num_ids; i++) {
struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]); size_t s = size; iova = (iova >> 12) << 12;
@@ -256,7 +251,9 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain, };
qcom_domain->iommu = qcom_iommu;
pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, dev);
qcom_domain->fwspec = fwspec;
pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, qcom_domain); if (!pgtbl_ops) { dev_err(qcom_iommu->dev, "failed to allocate pagetable ops\n"); ret = -ENOMEM;
@@ -269,7 +266,7 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain, domain->geometry.force_aperture = true;
for (i = 0; i < fwspec->num_ids; i++) {
struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]); if (!ctx->secure_init) { ret = qcom_scm_restore_sec_cfg(qcom_iommu->sec_id, ctx->asid);
@@ -419,7 +416,7 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de
pm_runtime_get_sync(qcom_iommu->dev); for (i = 0; i < fwspec->num_ids; i++) {
struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]); /* Disable the context bank: */ iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0);
On Tue, Jul 21, 2020 at 12:45:17AM +0530, Naresh Kamboju wrote:
On Mon, 20 Jul 2020 at 21:21, Rob Clark robdclark@gmail.com wrote:
From: Rob Clark robdclark@chromium.org
The device may be torn down, but the domain should still be valid. Lets use that as the tlb flush ops cookie.
Fixes a problem reported in [1]
This proposed fix patch applied on top of linux mainline master and boot test PASS on db410c.
The reported problem got fixed.
Is this needed for v5.8/stable? A fixes tag would be great too.
Regards,
Joerg
On Wed, Jul 22, 2020 at 6:10 AM Joerg Roedel joro@8bytes.org wrote:
On Tue, Jul 21, 2020 at 12:45:17AM +0530, Naresh Kamboju wrote:
On Mon, 20 Jul 2020 at 21:21, Rob Clark robdclark@gmail.com wrote:
From: Rob Clark robdclark@chromium.org
The device may be torn down, but the domain should still be valid. Lets use that as the tlb flush ops cookie.
Fixes a problem reported in [1]
This proposed fix patch applied on top of linux mainline master and boot test PASS on db410c.
The reported problem got fixed.
Is this needed for v5.8/stable? A fixes tag would be great too.
looks like, yes:
Fixes: 09b5dfff9ad6 ("iommu/qcom: Use accessor functions for iommu private data")
BR, -R
Regards,
Joerg
On Wed, Jul 22, 2020 at 07:54:40AM -0700, Rob Clark wrote:
On Wed, Jul 22, 2020 at 6:10 AM Joerg Roedel joro@8bytes.org wrote:
Is this needed for v5.8/stable? A fixes tag would be great too.
looks like, yes:
Fixes: 09b5dfff9ad6 ("iommu/qcom: Use accessor functions for iommu private data")
Thanks, applied to fixes branch.