drm/msm: get an iova from the address space instead of an id
In the future we won't have a fixed set of addresses spaces.
Instead of going through the effort of assigning a ID for each
address space just use the address space itself as a token for
getting / putting an iova.
This forces a few changes in the gem object however: instead
of using a simple index into a list of domains, we need to
maintain a list of them. Luckily the list will be pretty small;
even with dynamic address spaces we wouldn't ever see more than
two or three.
CRs-Fixed: 2050484
Change-Id: Ic0dedbad4495f02a21135217f3605b93f8b8dfea
Signed-off-by: Jordan Crouse <jcrouse@codeaurora.org>
Signed-off-by: Abhijit Kulkarni <kabhijit@codeaurora.org>
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index b468d2a..961d47f 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -64,7 +64,7 @@
DBG("%s", gpu->name);
- ret = msm_gem_get_iova(gpu->rb->bo, gpu->id, &gpu->rb_iova);
+ ret = msm_gem_get_iova(gpu->rb->bo, gpu->aspace, &gpu->rb_iova);
if (ret) {
gpu->rb_iova = 0;
dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret);
@@ -406,7 +406,7 @@
return -ENOMEM;
}
- ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->id,
+ ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->aspace,
&adreno_gpu->memptrs_iova);
if (ret) {
dev_err(drm->dev, "could not map memptrs: %d\n", ret);
@@ -423,8 +423,7 @@
msm_gem_put_vaddr(gpu->memptrs_bo);
if (gpu->memptrs_iova)
- msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
-
+ msm_gem_put_iova(gpu->memptrs_bo, gpu->base.aspace);
drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
}
release_firmware(gpu->pm4);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
index e2a348d..b2aef9c 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
@@ -24,6 +24,7 @@
#include "msm_drv.h"
#include "msm_kms.h"
#include "msm_gpu.h"
+#include "msm_mmu.h"
#include "dsi_ctrl.h"
#include "dsi_ctrl_hw.h"
#include "dsi_clk.h"
@@ -252,6 +253,16 @@
return 0;
}
+static inline struct msm_gem_address_space*
+dsi_ctrl_get_aspace(struct dsi_ctrl *dsi_ctrl,
+ int domain)
+{
+ if (!dsi_ctrl || !dsi_ctrl->drm_dev)
+ return NULL;
+
+ return msm_gem_smmu_address_space_get(dsi_ctrl->drm_dev, domain);
+}
+
static int dsi_ctrl_check_state(struct dsi_ctrl *dsi_ctrl,
enum dsi_ctrl_driver_ops op,
u32 op_state)
@@ -1170,8 +1181,17 @@
static int dsi_ctrl_buffer_deinit(struct dsi_ctrl *dsi_ctrl)
{
+ struct msm_gem_address_space *aspace = NULL;
+
if (dsi_ctrl->tx_cmd_buf) {
- msm_gem_put_iova(dsi_ctrl->tx_cmd_buf, 0);
+ aspace = dsi_ctrl_get_aspace(dsi_ctrl,
+ MSM_SMMU_DOMAIN_UNSECURE);
+ if (!aspace) {
+ pr_err("failed to get address space\n");
+ return -ENOMEM;
+ }
+
+ msm_gem_put_iova(dsi_ctrl->tx_cmd_buf, aspace);
msm_gem_free_object(dsi_ctrl->tx_cmd_buf);
dsi_ctrl->tx_cmd_buf = NULL;
@@ -1184,6 +1204,13 @@
{
int rc = 0;
u32 iova = 0;
+ struct msm_gem_address_space *aspace = NULL;
+
+ aspace = dsi_ctrl_get_aspace(dsi_ctrl, MSM_SMMU_DOMAIN_UNSECURE);
+ if (!aspace) {
+ pr_err("failed to get address space\n");
+ return -ENOMEM;
+ }
dsi_ctrl->tx_cmd_buf = msm_gem_new(dsi_ctrl->drm_dev,
SZ_4K,
@@ -1198,7 +1225,7 @@
dsi_ctrl->cmd_buffer_size = SZ_4K;
- rc = msm_gem_get_iova(dsi_ctrl->tx_cmd_buf, 0, &iova);
+ rc = msm_gem_get_iova(dsi_ctrl->tx_cmd_buf, aspace, &iova);
if (rc) {
pr_err("failed to get iova, rc=%d\n", rc);
(void)dsi_ctrl_buffer_deinit(dsi_ctrl);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
index 52b1dcb..b61bfde 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
@@ -20,6 +20,7 @@
#include "msm_drv.h"
#include "sde_connector.h"
+#include "msm_mmu.h"
#include "dsi_display.h"
#include "dsi_panel.h"
#include "dsi_ctrl.h"
@@ -1321,6 +1322,7 @@
{
struct dsi_display *display = to_dsi_display(host);
struct dsi_display_ctrl *display_ctrl;
+ struct msm_gem_address_space *aspace = NULL;
int rc = 0, cnt = 0;
if (!host || !msg) {
@@ -1363,7 +1365,16 @@
pr_err("value of display->tx_cmd_buf is NULL");
goto error_disable_cmd_engine;
}
- rc = msm_gem_get_iova(display->tx_cmd_buf, 0,
+
+ aspace = msm_gem_smmu_address_space_get(display->drm_dev,
+ MSM_SMMU_DOMAIN_UNSECURE);
+ if (!aspace) {
+ pr_err("failed to get aspace\n");
+ rc = -EINVAL;
+ goto free_gem;
+ }
+
+ rc = msm_gem_get_iova(display->tx_cmd_buf, aspace,
&(display->cmd_buffer_iova));
if (rc) {
pr_err("failed to get the iova rc %d\n", rc);
@@ -1419,7 +1430,7 @@
}
return rc;
put_iova:
- msm_gem_put_iova(display->tx_cmd_buf, 0);
+ msm_gem_put_iova(display->tx_cmd_buf, aspace);
free_gem:
msm_gem_free_object(display->tx_cmd_buf);
error:
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index 9527daf..75e98dc 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -133,7 +133,7 @@
container_of(work, struct mdp4_crtc, unref_cursor_work);
struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base);
- msm_gem_put_iova(val, mdp4_kms->id);
+ msm_gem_put_iova(val, mdp4_kms->aspace);
drm_gem_object_unreference_unlocked(val);
}
@@ -378,7 +378,8 @@
if (next_bo) {
/* take a obj ref + iova ref when we start scanning out: */
drm_gem_object_reference(next_bo);
- msm_gem_get_iova_locked(next_bo, mdp4_kms->id, &iova);
+ msm_gem_get_iova_locked(next_bo, mdp4_kms->aspace,
+ &iova);
/* enable cursor: */
mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma),
@@ -435,7 +436,7 @@
}
if (cursor_bo) {
- ret = msm_gem_get_iova(cursor_bo, mdp4_kms->id, &iova);
+ ret = msm_gem_get_iova(cursor_bo, mdp4_kms->aspace, &iova);
if (ret)
goto fail;
} else {
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index 80b49a1..acee5da 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -184,7 +184,7 @@
}
if (mdp4_kms->blank_cursor_iova)
- msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id);
+ msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->aspace);
drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo);
if (mdp4_kms->rpm_enabled)
@@ -582,13 +582,6 @@
aspace = NULL;
}
- mdp4_kms->id = msm_register_address_space(dev, aspace);
- if (mdp4_kms->id < 0) {
- ret = mdp4_kms->id;
- dev_err(dev->dev, "failed to register mdp4 iommu: %d\n", ret);
- goto fail;
- }
-
ret = modeset_init(mdp4_kms);
if (ret) {
dev_err(dev->dev, "modeset_init failed: %d\n", ret);
@@ -605,7 +598,7 @@
goto fail;
}
- ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id,
+ ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->aspace,
&mdp4_kms->blank_cursor_iova);
if (ret) {
dev_err(dev->dev, "could not pin blank-cursor bo: %d\n", ret);
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
index 1fe35b2..f9dcadf 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
@@ -33,8 +33,6 @@
int rev;
/* mapper-id used to request GEM buffer mapped for scanout: */
- int id;
-
void __iomem *mmio;
struct regulator *vdd;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index 3903dbc..934992e 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -109,7 +109,7 @@
return 0;
DBG("%s: prepare: FB[%u]", mdp4_plane->name, fb->base.id);
- return msm_framebuffer_prepare(fb, mdp4_kms->id);
+ return msm_framebuffer_prepare(fb, mdp4_kms->aspace);
}
static void mdp4_plane_cleanup_fb(struct drm_plane *plane,
@@ -123,7 +123,7 @@
return;
DBG("%s: cleanup: FB[%u]", mdp4_plane->name, fb->base.id);
- msm_framebuffer_cleanup(fb, mdp4_kms->id);
+ msm_framebuffer_cleanup(fb, mdp4_kms->aspace);
}
@@ -172,13 +172,13 @@
MDP4_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe),
- msm_framebuffer_iova(fb, mdp4_kms->id, 0));
+ msm_framebuffer_iova(fb, mdp4_kms->aspace, 0));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP1_BASE(pipe),
- msm_framebuffer_iova(fb, mdp4_kms->id, 1));
+ msm_framebuffer_iova(fb, mdp4_kms->aspace, 1));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP2_BASE(pipe),
- msm_framebuffer_iova(fb, mdp4_kms->id, 2));
+ msm_framebuffer_iova(fb, mdp4_kms->aspace, 2));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP3_BASE(pipe),
- msm_framebuffer_iova(fb, mdp4_kms->id, 3));
+ msm_framebuffer_iova(fb, mdp4_kms->aspace, 3));
plane->fb = fb;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index c205c36..15e7da2 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -171,7 +171,7 @@
container_of(work, struct mdp5_crtc, unref_cursor_work);
struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base);
- msm_gem_put_iova(val, mdp5_kms->id);
+ msm_gem_put_iova(val, mdp5_kms->aspace);
drm_gem_object_unreference_unlocked(val);
}
@@ -525,7 +525,7 @@
if (!cursor_bo)
return -ENOENT;
- ret = msm_gem_get_iova(cursor_bo, mdp5_kms->id, &cursor_addr);
+ ret = msm_gem_get_iova(cursor_bo, mdp5_kms->aspace, &cursor_addr);
if (ret)
return -EINVAL;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index f022967..d97e4ef 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -640,13 +640,6 @@
aspace = NULL;
}
- mdp5_kms->id = msm_register_address_space(dev, aspace);
- if (mdp5_kms->id < 0) {
- ret = mdp5_kms->id;
- dev_err(&pdev->dev, "failed to register mdp5 iommu: %d\n", ret);
- goto fail;
- }
-
ret = modeset_init(mdp5_kms);
if (ret) {
dev_err(&pdev->dev, "modeset_init failed: %d\n", ret);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index 623ac07..f21e912 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -38,7 +38,6 @@
/* mapper-id used to request GEM buffer mapped for scanout: */
- int id;
struct msm_gem_address_space *aspace;
struct mdp5_smp *smp;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 5e67e8b..88e5d06 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -261,7 +261,7 @@
return 0;
DBG("%s: prepare: FB[%u]", mdp5_plane->name, fb->base.id);
- return msm_framebuffer_prepare(fb, mdp5_kms->id);
+ return msm_framebuffer_prepare(fb, mdp5_kms->aspace);
}
static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
@@ -275,7 +275,7 @@
return;
DBG("%s: cleanup: FB[%u]", mdp5_plane->name, fb->base.id);
- msm_framebuffer_cleanup(fb, mdp5_kms->id);
+ msm_framebuffer_cleanup(fb, mdp5_kms->aspace);
}
static int mdp5_plane_atomic_check(struct drm_plane *plane,
@@ -398,13 +398,13 @@
MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe),
- msm_framebuffer_iova(fb, mdp5_kms->id, 0));
+ msm_framebuffer_iova(fb, mdp5_kms->aspace, 0));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe),
- msm_framebuffer_iova(fb, mdp5_kms->id, 1));
+ msm_framebuffer_iova(fb, mdp5_kms->aspace, 1));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe),
- msm_framebuffer_iova(fb, mdp5_kms->id, 2));
+ msm_framebuffer_iova(fb, mdp5_kms->aspace, 2));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe),
- msm_framebuffer_iova(fb, mdp5_kms->id, 3));
+ msm_framebuffer_iova(fb, mdp5_kms->aspace, 3));
plane->fb = fb;
}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 6a2d239..9aebeb9 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -151,20 +151,6 @@
.atomic_commit = msm_atomic_commit,
};
-int msm_register_address_space(struct drm_device *dev,
- struct msm_gem_address_space *aspace)
-{
- struct msm_drm_private *priv = dev->dev_private;
- int idx = priv->num_aspaces++;
-
- if (WARN_ON(idx >= ARRAY_SIZE(priv->aspace)))
- return -EINVAL;
-
- priv->aspace[idx] = aspace;
-
- return idx;
-}
-
#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
static bool reglog = false;
MODULE_PARM_DESC(reglog, "Enable register read/write logging");
@@ -1933,6 +1919,30 @@
return ret;
}
+struct msm_gem_address_space *
+msm_gem_smmu_address_space_get(struct drm_device *dev,
+ unsigned int domain)
+{
+ struct msm_drm_private *priv = NULL;
+ struct msm_kms *kms;
+ const struct msm_kms_funcs *funcs;
+
+ if ((!dev) || (!dev->dev_private))
+ return NULL;
+
+ priv = dev->dev_private;
+ kms = priv->kms;
+ if (!kms)
+ return NULL;
+
+ funcs = kms->funcs;
+
+ if ((!funcs) || (!funcs->get_address_space))
+ return NULL;
+
+ return funcs->get_address_space(priv->kms, domain);
+}
+
/*
* We don't know what's the best binding to link the gpu with the drm device.
* Fow now, we just hunt for all the possible gpus that we support, and add them
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 665ed365..b22337467 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -642,8 +642,6 @@
struct drm_atomic_state *state, bool nonblock);
void msm_gem_submit_free(struct msm_gem_submit *submit);
-int msm_register_address_space(struct drm_device *dev,
- struct msm_gem_address_space *aspace);
void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt,
void *priv);
@@ -662,6 +660,10 @@
msm_gem_smmu_address_space_create(struct device *dev, struct msm_mmu *mmu,
const char *name);
+struct msm_gem_address_space *
+msm_gem_smmu_address_space_get(struct drm_device *dev,
+ unsigned int domain);
+
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file);
@@ -673,13 +675,16 @@
int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
-int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
- uint32_t *iova);
-int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova);
-uint32_t msm_gem_iova(struct drm_gem_object *obj, int id);
+int msm_gem_get_iova_locked(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint32_t *iova);
+int msm_gem_get_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint32_t *iova);
+uint32_t msm_gem_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace);
struct page **msm_gem_get_pages(struct drm_gem_object *obj);
void msm_gem_put_pages(struct drm_gem_object *obj);
-void msm_gem_put_iova(struct drm_gem_object *obj, int id);
+void msm_gem_put_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace);
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
@@ -716,9 +721,12 @@
struct dma_buf *dmabuf, struct sg_table *sgt);
void msm_framebuffer_set_kmap(struct drm_framebuffer *fb, bool enable);
-int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id);
-void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id);
-uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane);
+int msm_framebuffer_prepare(struct drm_framebuffer *fb,
+ struct msm_gem_address_space *aspace);
+void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
+ struct msm_gem_address_space *aspace);
+uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
+ struct msm_gem_address_space *aspace, int plane);
struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane);
const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb);
struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 918427a..0a9f12d 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -148,14 +148,15 @@
* should be fine, since only the scanout (mdpN) side of things needs
* this, the gpu doesn't care about fb's.
*/
-int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id)
+int msm_framebuffer_prepare(struct drm_framebuffer *fb,
+ struct msm_gem_address_space *aspace)
{
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
int ret, i, n = drm_format_num_planes(fb->pixel_format);
uint32_t iova;
for (i = 0; i < n; i++) {
- ret = msm_gem_get_iova(msm_fb->planes[i], id, &iova);
+ ret = msm_gem_get_iova(msm_fb->planes[i], aspace, &iova);
DBG("FB[%u]: iova[%d]: %08x (%d)", fb->base.id, i, iova, ret);
if (ret)
return ret;
@@ -167,7 +168,8 @@
return 0;
}
-void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id)
+void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
+ struct msm_gem_address_space *aspace)
{
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
int i, n = drm_format_num_planes(fb->pixel_format);
@@ -176,15 +178,16 @@
msm_framebuffer_kunmap(fb);
for (i = 0; i < n; i++)
- msm_gem_put_iova(msm_fb->planes[i], id);
+ msm_gem_put_iova(msm_fb->planes[i], aspace);
}
-uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane)
+uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
+ struct msm_gem_address_space *aspace, int plane)
{
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
if (!msm_fb->planes[plane])
return 0;
- return msm_gem_iova(msm_fb->planes[plane], id) + fb->offsets[plane];
+ return msm_gem_iova(msm_fb->planes[plane], aspace) + fb->offsets[plane];
}
struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane)
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 43e2a26..a7d06d1 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -290,22 +290,63 @@
return offset;
}
+static void obj_remove_domain(struct msm_gem_vma *domain)
+{
+ if (domain) {
+ list_del(&domain->list);
+ kfree(domain);
+ }
+}
+
static void
put_iova(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- struct msm_drm_private *priv = obj->dev->dev_private;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- int id;
+ struct msm_gem_vma *domain, *tmp;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
- msm_gem_unmap_vma(priv->aspace[id], &msm_obj->domain[id],
- msm_obj->sgt, get_dmabuf_ptr(obj));
+ list_for_each_entry_safe(domain, tmp, &msm_obj->domains, list) {
+ if (iommu_present(&platform_bus_type)) {
+ msm_gem_unmap_vma(domain->aspace, domain,
+ msm_obj->sgt, get_dmabuf_ptr(obj));
+ }
+
+ obj_remove_domain(domain);
}
}
+static struct msm_gem_vma *obj_add_domain(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_gem_vma *domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+
+ if (!domain)
+ return ERR_PTR(-ENOMEM);
+
+ domain->aspace = aspace;
+
+ list_add_tail(&domain->list, &msm_obj->domains);
+
+ return domain;
+}
+
+static struct msm_gem_vma *obj_get_domain(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_gem_vma *domain;
+
+ list_for_each_entry(domain, &msm_obj->domains, list) {
+ if (domain->aspace == aspace)
+ return domain;
+ }
+
+ return NULL;
+}
+
/* should be called under struct_mutex.. although it can be called
* from atomic context without struct_mutex to acquire an extra
* iova ref if you know one is already held.
@@ -313,51 +354,65 @@
* That means when I do eventually need to add support for unpinning
* the refcnt counter needs to be atomic_t.
*/
-int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
- uint32_t *iova)
+int msm_gem_get_iova_locked(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint32_t *iova)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct page **pages;
+ struct msm_gem_vma *domain;
int ret = 0;
- if (!msm_obj->domain[id].iova) {
- struct msm_drm_private *priv = obj->dev->dev_private;
- struct page **pages = get_pages(obj);
+ if (!iommu_present(&platform_bus_type)) {
+ pages = get_pages(obj);
if (IS_ERR(pages))
return PTR_ERR(pages);
- if (iommu_present(&platform_bus_type)) {
- ret = msm_gem_map_vma(priv->aspace[id],
- &msm_obj->domain[id], msm_obj->sgt,
- get_dmabuf_ptr(obj),
- msm_obj->flags);
- } else {
- msm_obj->domain[id].iova = physaddr(obj);
- }
+ *iova = physaddr(obj);
+ return 0;
}
- if (!ret)
- *iova = msm_obj->domain[id].iova;
+ domain = obj_get_domain(obj, aspace);
+
+ if (!domain) {
+ domain = obj_add_domain(obj, aspace);
+ if (IS_ERR(domain))
+ return PTR_ERR(domain);
+
+ pages = get_pages(obj);
+ if (IS_ERR(pages)) {
+ obj_remove_domain(domain);
+ return PTR_ERR(pages);
+ }
+
+ ret = msm_gem_map_vma(aspace, domain, msm_obj->sgt,
+ get_dmabuf_ptr(obj),
+ msm_obj->flags);
+ }
+
+ if (!ret && domain)
+ *iova = domain->iova;
+ else
+ obj_remove_domain(domain);
return ret;
}
/* get iova, taking a reference. Should have a matching put */
-int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
+int msm_gem_get_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint32_t *iova)
{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_gem_vma *domain;
int ret;
- /* this is safe right now because we don't unmap until the
- * bo is deleted:
- */
- if (msm_obj->domain[id].iova) {
- *iova = msm_obj->domain[id].iova;
+ domain = obj_get_domain(obj, aspace);
+ if (domain) {
+ *iova = domain->iova;
return 0;
}
mutex_lock(&obj->dev->struct_mutex);
- ret = msm_gem_get_iova_locked(obj, id, iova);
+ ret = msm_gem_get_iova_locked(obj, aspace, iova);
mutex_unlock(&obj->dev->struct_mutex);
return ret;
}
@@ -365,14 +420,18 @@
/* get iova without taking a reference, used in places where you have
* already done a 'msm_gem_get_iova()'.
*/
-uint32_t msm_gem_iova(struct drm_gem_object *obj, int id)
+uint32_t msm_gem_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace)
{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- WARN_ON(!msm_obj->domain[id].iova);
- return msm_obj->domain[id].iova;
+ struct msm_gem_vma *domain = obj_get_domain(obj, aspace);
+
+ WARN_ON(!domain);
+
+ return domain ? domain->iova : 0;
}
-void msm_gem_put_iova(struct drm_gem_object *obj, int id)
+void msm_gem_put_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace)
{
// XXX TODO ..
// NOTE: probably don't need a _locked() version.. we wouldn't
@@ -624,6 +683,7 @@
void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_gem_vma *domain;
struct reservation_object *robj = msm_obj->resv;
struct reservation_object_list *fobj;
struct fence *fence;
@@ -666,6 +726,12 @@
if (fence)
describe_fence(fence, "Exclusive", m);
rcu_read_unlock();
+
+ /* FIXME: we need to print the address space here too */
+ list_for_each_entry(domain, &msm_obj->domains, list)
+ seq_printf(m, " %08llx", domain->iova);
+
+ seq_puts(m, "\n");
}
void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
@@ -783,8 +849,13 @@
if (!msm_obj)
return -ENOMEM;
- if (use_vram)
- msm_obj->vram_node = &msm_obj->domain[0].node;
+ if (use_vram) {
+ struct msm_gem_vma *domain = obj_add_domain(&msm_obj->base,
+ NULL);
+
+ if (!IS_ERR(domain))
+ msm_obj->vram_node = &domain->node;
+ }
msm_obj->flags = flags;
msm_obj->madv = MSM_MADV_WILLNEED;
@@ -797,6 +868,8 @@
}
INIT_LIST_HEAD(&msm_obj->submit_entry);
+ INIT_LIST_HEAD(&msm_obj->domains);
+
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
*obj = &msm_obj->base;
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index b176c11..9d41a00 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -44,7 +44,9 @@
struct msm_gem_vma {
/* Node used by the GPU address space, but not the SDE address space */
struct drm_mm_node node;
+ struct msm_gem_address_space *aspace;
uint64_t iova;
+ struct list_head list;
};
struct msm_gem_object {
@@ -84,7 +86,7 @@
struct sg_table *sgt;
void *vaddr;
- struct msm_gem_vma domain[NUM_DOMAINS];
+ struct list_head domains;
/* normally (resv == &_resv) except for imported bo's */
struct reservation_object *resv;
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index b6a0f37..8d727fe 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -157,7 +157,7 @@
struct msm_gem_object *msm_obj = submit->bos[i].obj;
if (submit->bos[i].flags & BO_PINNED)
- msm_gem_put_iova(&msm_obj->base, submit->gpu->id);
+ msm_gem_put_iova(&msm_obj->base, submit->gpu->aspace);
if (submit->bos[i].flags & BO_LOCKED)
ww_mutex_unlock(&msm_obj->resv->lock);
@@ -245,7 +245,7 @@
/* if locking succeeded, pin bo: */
ret = msm_gem_get_iova_locked(&msm_obj->base,
- submit->gpu->id, &iova);
+ submit->gpu->aspace, &iova);
if (ret)
break;
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index ded4226..49d9e10 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -457,7 +457,7 @@
struct msm_gem_object *msm_obj = submit->bos[i].obj;
/* move to inactive: */
msm_gem_move_to_inactive(&msm_obj->base);
- msm_gem_put_iova(&msm_obj->base, gpu->id);
+ msm_gem_put_iova(&msm_obj->base, gpu->aspace);
drm_gem_object_unreference(&msm_obj->base);
}
@@ -494,6 +494,8 @@
mutex_lock(&dev->struct_mutex);
retire_submits(gpu);
+
+ retire_submits(gpu);
mutex_unlock(&dev->struct_mutex);
if (!msm_gpu_active(gpu))
@@ -538,8 +540,7 @@
/* submit takes a reference to the bo and iova until retired: */
drm_gem_object_reference(&msm_obj->base);
msm_gem_get_iova_locked(&msm_obj->base,
- submit->gpu->id, &iova);
-
+ submit->gpu->aspace, &iova);
if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
@@ -674,8 +675,6 @@
} else {
dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
}
- gpu->id = msm_register_address_space(drm, gpu->aspace);
-
/* Create ringbuffer: */
mutex_lock(&drm->struct_mutex);
@@ -706,7 +705,7 @@
if (gpu->rb) {
if (gpu->rb_iova)
- msm_gem_put_iova(gpu->rb->bo, gpu->id);
+ msm_gem_put_iova(gpu->rb->bo, gpu->aspace);
msm_ringbuffer_destroy(gpu->rb);
}
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index c6bf5d6..13ecd72 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -99,7 +99,6 @@
int irq;
struct msm_gem_address_space *aspace;
- int id;
/* Power Control: */
struct regulator *gpu_reg, *gpu_cx;
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index eed0f1b..eb10d6b 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -93,6 +93,10 @@
struct drm_mode_object *obj, u32 event, bool en);
/* cleanup: */
void (*destroy)(struct msm_kms *kms);
+ /* get address space */
+ struct msm_gem_address_space *(*get_address_space)(
+ struct msm_kms *kms,
+ unsigned int domain);
};
struct msm_kms {
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index 2970b28..c3c5a13 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -327,8 +327,7 @@
return;
}
- msm_framebuffer_cleanup(c_state->out_fb,
- c_state->mmu_id);
+ msm_framebuffer_cleanup(c_state->out_fb, c_state->aspace);
drm_framebuffer_unreference(c_state->out_fb);
c_state->out_fb = NULL;
@@ -432,7 +431,7 @@
if (c_state->out_fb) {
drm_framebuffer_reference(c_state->out_fb);
rc = msm_framebuffer_prepare(c_state->out_fb,
- c_state->mmu_id);
+ c_state->aspace);
if (rc)
SDE_ERROR("failed to prepare fb, %d\n", rc);
}
@@ -652,14 +651,14 @@
c_conn->fb_kmap);
if (c_state->out_fb->flags & DRM_MODE_FB_SECURE)
- c_state->mmu_id =
- c_conn->mmu_id[SDE_IOMMU_DOMAIN_SECURE];
+ c_state->aspace =
+ c_conn->aspace[SDE_IOMMU_DOMAIN_SECURE];
else
- c_state->mmu_id =
- c_conn->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE];
+ c_state->aspace =
+ c_conn->aspace[SDE_IOMMU_DOMAIN_UNSECURE];
rc = msm_framebuffer_prepare(c_state->out_fb,
- c_state->mmu_id);
+ c_state->aspace);
if (rc)
SDE_ERROR("prep fb failed, %d\n", rc);
}
@@ -1010,18 +1009,17 @@
c_conn->lp_mode = 0;
c_conn->last_panel_power_mode = SDE_MODE_DPMS_ON;
- /* cache mmu_id's for later */
sde_kms = to_sde_kms(priv->kms);
if (sde_kms->vbif[VBIF_NRT]) {
- c_conn->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE] =
- sde_kms->mmu_id[MSM_SMMU_DOMAIN_NRT_UNSECURE];
- c_conn->mmu_id[SDE_IOMMU_DOMAIN_SECURE] =
- sde_kms->mmu_id[MSM_SMMU_DOMAIN_NRT_SECURE];
+ c_conn->aspace[SDE_IOMMU_DOMAIN_UNSECURE] =
+ sde_kms->aspace[MSM_SMMU_DOMAIN_NRT_UNSECURE];
+ c_conn->aspace[SDE_IOMMU_DOMAIN_SECURE] =
+ sde_kms->aspace[MSM_SMMU_DOMAIN_NRT_SECURE];
} else {
- c_conn->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE] =
- sde_kms->mmu_id[MSM_SMMU_DOMAIN_UNSECURE];
- c_conn->mmu_id[SDE_IOMMU_DOMAIN_SECURE] =
- sde_kms->mmu_id[MSM_SMMU_DOMAIN_SECURE];
+ c_conn->aspace[SDE_IOMMU_DOMAIN_UNSECURE] =
+ sde_kms->aspace[MSM_SMMU_DOMAIN_UNSECURE];
+ c_conn->aspace[SDE_IOMMU_DOMAIN_SECURE] =
+ sde_kms->aspace[MSM_SMMU_DOMAIN_SECURE];
}
if (ops)
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.h b/drivers/gpu/drm/msm/sde/sde_connector.h
index 497d0db..2318756 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.h
+++ b/drivers/gpu/drm/msm/sde/sde_connector.h
@@ -240,7 +240,7 @@
struct drm_panel *panel;
void *display;
- int mmu_id[SDE_IOMMU_DOMAIN_MAX];
+ struct msm_gem_address_space *aspace[SDE_IOMMU_DOMAIN_MAX];
char name[SDE_CONNECTOR_NAME_SIZE];
@@ -304,14 +304,14 @@
* struct sde_connector_state - private connector status structure
* @base: Base drm connector structure
* @out_fb: Pointer to output frame buffer, if applicable
- * @mmu_id: MMU ID for accessing frame buffer objects, if applicable
+ * @aspace: Address space for accessing frame buffer objects, if applicable
* @property_values: Local cache of current connector property values
* @rois: Regions of interest structure for mapping CRTC to Connector output
*/
struct sde_connector_state {
struct drm_connector_state base;
struct drm_framebuffer *out_fb;
- int mmu_id;
+ struct msm_gem_address_space *aspace;
uint64_t property_values[CONNECTOR_PROP_COUNT];
struct msm_roi_list rois;
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
index b173876..4b12651 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
@@ -333,7 +333,7 @@
* @wb_fmt: Writeback pixel format
* @frame_count: Counter of completed writeback operations
* @kickoff_count: Counter of issued writeback operations
- * @mmu_id: mmu identifier for non-secure/secure domain
+ * @aspace: address space identifier for non-secure/secure domain
* @wb_dev: Pointer to writeback device
* @start_time: Start time of writeback latest request
* @end_time: End time of writeback latest request
@@ -355,7 +355,7 @@
const struct sde_format *wb_fmt;
u32 frame_count;
u32 kickoff_count;
- int mmu_id[SDE_IOMMU_DOMAIN_MAX];
+ struct msm_gem_address_space *aspace[SDE_IOMMU_DOMAIN_MAX];
struct sde_wb_device *wb_dev;
ktime_t start_time;
ktime_t end_time;
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
index 54c1397..875d99d 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
@@ -250,7 +250,8 @@
struct sde_hw_wb_cfg *wb_cfg;
struct sde_hw_wb_cdp_cfg *cdp_cfg;
const struct msm_format *format;
- int ret, mmu_id;
+ int ret;
+ struct msm_gem_address_space *aspace;
if (!phys_enc || !phys_enc->sde_kms || !phys_enc->sde_kms->catalog) {
SDE_ERROR("invalid encoder\n");
@@ -264,9 +265,9 @@
wb_cfg->intf_mode = phys_enc->intf_mode;
wb_cfg->is_secure = (fb->flags & DRM_MODE_FB_SECURE) ? true : false;
- mmu_id = (wb_cfg->is_secure) ?
- wb_enc->mmu_id[SDE_IOMMU_DOMAIN_SECURE] :
- wb_enc->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE];
+ aspace = (wb_cfg->is_secure) ?
+ wb_enc->aspace[SDE_IOMMU_DOMAIN_SECURE] :
+ wb_enc->aspace[SDE_IOMMU_DOMAIN_UNSECURE];
SDE_DEBUG("[fb_secure:%d]\n", wb_cfg->is_secure);
@@ -288,7 +289,7 @@
wb_cfg->roi = *wb_roi;
if (hw_wb->caps->features & BIT(SDE_WB_XY_ROI_OFFSET)) {
- ret = sde_format_populate_layout(mmu_id, fb, &wb_cfg->dest);
+ ret = sde_format_populate_layout(aspace, fb, &wb_cfg->dest);
if (ret) {
SDE_DEBUG("failed to populate layout %d\n", ret);
return;
@@ -297,7 +298,7 @@
wb_cfg->dest.height = fb->height;
wb_cfg->dest.num_planes = wb_cfg->dest.format->num_planes;
} else {
- ret = sde_format_populate_layout_with_roi(mmu_id, fb, wb_roi,
+ ret = sde_format_populate_layout_with_roi(aspace, fb, wb_roi,
&wb_cfg->dest);
if (ret) {
/* this error should be detected during atomic_check */
@@ -914,12 +915,19 @@
struct drm_mode_fb_cmd2 mode_cmd;
uint32_t size;
int nplanes, i, ret;
+ struct msm_gem_address_space *aspace;
if (!wb_enc || !wb_enc->base.parent || !wb_enc->base.sde_kms) {
SDE_ERROR("invalid params\n");
return -EINVAL;
}
+ aspace = wb_enc->base.sde_kms->aspace[SDE_IOMMU_DOMAIN_UNSECURE];
+ if (!aspace) {
+ SDE_ERROR("invalid address space\n");
+ return -EINVAL;
+ }
+
dev = wb_enc->base.sde_kms->dev;
if (!dev) {
SDE_ERROR("invalid dev\n");
@@ -974,8 +982,7 @@
}
/* prepare the backing buffer now so that it's available later */
- ret = msm_framebuffer_prepare(fb,
- wb_enc->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE]);
+ ret = msm_framebuffer_prepare(fb, aspace);
if (!ret)
wb_enc->fb_disable = fb;
return ret;
@@ -1234,15 +1241,15 @@
phys_enc = &wb_enc->base;
if (p->sde_kms->vbif[VBIF_NRT]) {
- wb_enc->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE] =
- p->sde_kms->mmu_id[MSM_SMMU_DOMAIN_NRT_UNSECURE];
- wb_enc->mmu_id[SDE_IOMMU_DOMAIN_SECURE] =
- p->sde_kms->mmu_id[MSM_SMMU_DOMAIN_NRT_SECURE];
+ wb_enc->aspace[SDE_IOMMU_DOMAIN_UNSECURE] =
+ p->sde_kms->aspace[MSM_SMMU_DOMAIN_NRT_UNSECURE];
+ wb_enc->aspace[SDE_IOMMU_DOMAIN_SECURE] =
+ p->sde_kms->aspace[MSM_SMMU_DOMAIN_NRT_SECURE];
} else {
- wb_enc->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE] =
- p->sde_kms->mmu_id[MSM_SMMU_DOMAIN_UNSECURE];
- wb_enc->mmu_id[SDE_IOMMU_DOMAIN_SECURE] =
- p->sde_kms->mmu_id[MSM_SMMU_DOMAIN_SECURE];
+ wb_enc->aspace[SDE_IOMMU_DOMAIN_UNSECURE] =
+ p->sde_kms->aspace[MSM_SMMU_DOMAIN_UNSECURE];
+ wb_enc->aspace[SDE_IOMMU_DOMAIN_SECURE] =
+ p->sde_kms->aspace[MSM_SMMU_DOMAIN_SECURE];
}
hw_mdp = sde_rm_get_mdp(&p->sde_kms->rm);
diff --git a/drivers/gpu/drm/msm/sde/sde_formats.c b/drivers/gpu/drm/msm/sde/sde_formats.c
index c3477b5..04c9e79 100644
--- a/drivers/gpu/drm/msm/sde/sde_formats.c
+++ b/drivers/gpu/drm/msm/sde/sde_formats.c
@@ -818,7 +818,7 @@
}
static int _sde_format_populate_addrs_ubwc(
- int mmu_id,
+ struct msm_gem_address_space *aspace,
struct drm_framebuffer *fb,
struct sde_hw_fmt_layout *layout)
{
@@ -830,7 +830,7 @@
return -EINVAL;
}
- base_addr = msm_framebuffer_iova(fb, mmu_id, 0);
+ base_addr = msm_framebuffer_iova(fb, aspace, 0);
if (!base_addr) {
DRM_ERROR("failed to retrieve base addr\n");
return -EFAULT;
@@ -909,7 +909,7 @@
}
static int _sde_format_populate_addrs_linear(
- int mmu_id,
+ struct msm_gem_address_space *aspace,
struct drm_framebuffer *fb,
struct sde_hw_fmt_layout *layout)
{
@@ -926,7 +926,7 @@
/* Populate addresses for simple formats here */
for (i = 0; i < layout->num_planes; ++i) {
- layout->plane_addr[i] = msm_framebuffer_iova(fb, mmu_id, i);
+ layout->plane_addr[i] = msm_framebuffer_iova(fb, aspace, i);
if (!layout->plane_addr[i]) {
DRM_ERROR("failed to retrieve base addr\n");
return -EFAULT;
@@ -937,7 +937,7 @@
}
int sde_format_populate_layout(
- int mmu_id,
+ struct msm_gem_address_space *aspace,
struct drm_framebuffer *fb,
struct sde_hw_fmt_layout *layout)
{
@@ -969,9 +969,9 @@
/* Populate the addresses given the fb */
if (SDE_FORMAT_IS_UBWC(layout->format) ||
SDE_FORMAT_IS_TILE(layout->format))
- ret = _sde_format_populate_addrs_ubwc(mmu_id, fb, layout);
+ ret = _sde_format_populate_addrs_ubwc(aspace, fb, layout);
else
- ret = _sde_format_populate_addrs_linear(mmu_id, fb, layout);
+ ret = _sde_format_populate_addrs_linear(aspace, fb, layout);
/* check if anything changed */
if (!ret && !memcmp(plane_addr, layout->plane_addr, sizeof(plane_addr)))
@@ -1013,14 +1013,14 @@
}
int sde_format_populate_layout_with_roi(
- int mmu_id,
+ struct msm_gem_address_space *aspace,
struct drm_framebuffer *fb,
struct sde_rect *roi,
struct sde_hw_fmt_layout *layout)
{
int ret;
- ret = sde_format_populate_layout(mmu_id, fb, layout);
+ ret = sde_format_populate_layout(aspace, fb, layout);
if (ret || !roi)
return ret;
diff --git a/drivers/gpu/drm/msm/sde/sde_formats.h b/drivers/gpu/drm/msm/sde/sde_formats.h
index 40aab22..2333a72 100644
--- a/drivers/gpu/drm/msm/sde/sde_formats.h
+++ b/drivers/gpu/drm/msm/sde/sde_formats.h
@@ -14,6 +14,7 @@
#define _SDE_FORMATS_H
#include <drm/drm_fourcc.h>
+#include "msm_gem.h"
#include "sde_hw_mdss.h"
/**
@@ -103,7 +104,7 @@
/**
* sde_format_populate_layout - populate the given format layout based on
* mmu, fb, and format found in the fb
- * @mmu_id: mmu id handle
+ * @aspace: address space pointer
* @fb: framebuffer pointer
* @fmtl: format layout structure to populate
*
@@ -111,14 +112,14 @@
* are the same as before or 0 if new addresses were populated
*/
int sde_format_populate_layout(
- int mmu_id,
+ struct msm_gem_address_space *aspace,
struct drm_framebuffer *fb,
struct sde_hw_fmt_layout *fmtl);
/**
* sde_format_populate_layout_with_roi - populate the given format layout
* based on mmu, fb, roi, and format found in the fb
- * @mmu_id: mmu id handle
+ * @aspace: address space pointer
* @fb: framebuffer pointer
* @roi: region of interest (optional)
* @fmtl: format layout structure to populate
@@ -126,7 +127,7 @@
* Return: error code on failure, 0 on success
*/
int sde_format_populate_layout_with_roi(
- int mmu_id,
+ struct msm_gem_address_space *aspace,
struct drm_framebuffer *fb,
struct sde_rect *roi,
struct sde_hw_fmt_layout *fmtl);
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c
index dbd435b..9bc9837 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c
@@ -13,6 +13,7 @@
#include "sde_hw_ctl.h"
#include "sde_hw_reg_dma_v1.h"
#include "msm_drv.h"
+#include "msm_mmu.h"
#define GUARD_BYTES (BIT(8) - 1)
#define ALIGNED_OFFSET (U32_MAX & ~(GUARD_BYTES))
@@ -582,6 +583,7 @@
struct sde_reg_dma_buffer *dma_buf = NULL;
u32 iova_aligned, offset;
u32 rsize = size + GUARD_BYTES;
+ struct msm_gem_address_space *aspace = NULL;
int rc = 0;
if (!size || SIZE_DWORD(size) > MAX_DWORDS_SZ) {
@@ -602,7 +604,15 @@
goto fail;
}
- rc = msm_gem_get_iova(dma_buf->buf, 0, &dma_buf->iova);
+ aspace = msm_gem_smmu_address_space_get(reg_dma->drm_dev,
+ MSM_SMMU_DOMAIN_UNSECURE);
+ if (!aspace) {
+ DRM_ERROR("failed to get aspace\n");
+ rc = -EINVAL;
+ goto free_gem;
+ }
+
+ rc = msm_gem_get_iova(dma_buf->buf, aspace, &dma_buf->iova);
if (rc) {
DRM_ERROR("failed to get the iova rc %d\n", rc);
goto free_gem;
@@ -625,7 +635,7 @@
return dma_buf;
put_iova:
- msm_gem_put_iova(dma_buf->buf, 0);
+ msm_gem_put_iova(dma_buf->buf, aspace);
free_gem:
msm_gem_free_object(dma_buf->buf);
fail:
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index 78ea685..abb378d 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -1414,6 +1414,29 @@
return drm_atomic_helper_check(dev, state);
}
+static struct msm_gem_address_space*
+_sde_kms_get_address_space(struct msm_kms *kms,
+ unsigned int domain)
+{
+ struct sde_kms *sde_kms;
+
+ if (!kms) {
+ SDE_ERROR("invalid kms\n");
+ return NULL;
+ }
+
+ sde_kms = to_sde_kms(kms);
+ if (!sde_kms) {
+ SDE_ERROR("invalid sde_kms\n");
+ return NULL;
+ }
+
+ if (domain >= MSM_SMMU_DOMAIN_MAX)
+ return NULL;
+
+ return sde_kms->aspace[domain];
+}
+
static const struct msm_kms_funcs kms_funcs = {
.hw_init = sde_kms_hw_init,
.postinit = sde_kms_postinit,
@@ -1436,6 +1459,7 @@
.round_pixclk = sde_kms_round_pixclk,
.destroy = sde_kms_destroy,
.register_events = _sde_kms_register_events,
+ .get_address_space = _sde_kms_get_address_space,
};
/* the caller api needs to turn on clock before calling it */
@@ -1449,17 +1473,17 @@
struct msm_mmu *mmu;
int i;
- for (i = ARRAY_SIZE(sde_kms->mmu_id) - 1; i >= 0; i--) {
- mmu = sde_kms->aspace[i]->mmu;
-
- if (!mmu)
+ for (i = ARRAY_SIZE(sde_kms->aspace) - 1; i >= 0; i--) {
+ if (!sde_kms->aspace[i])
continue;
+ mmu = sde_kms->aspace[i]->mmu;
+
mmu->funcs->detach(mmu, (const char **)iommu_ports,
ARRAY_SIZE(iommu_ports));
msm_gem_address_space_destroy(sde_kms->aspace[i]);
- sde_kms->mmu_id[i] = 0;
+ sde_kms->aspace[i] = NULL;
}
return 0;
@@ -1499,17 +1523,6 @@
goto fail;
}
- sde_kms->mmu_id[i] = msm_register_address_space(sde_kms->dev,
- aspace);
- if (sde_kms->mmu_id[i] < 0) {
- ret = sde_kms->mmu_id[i];
- SDE_ERROR("failed to register sde iommu %d: %d\n",
- i, ret);
- mmu->funcs->detach(mmu, (const char **)iommu_ports,
- ARRAY_SIZE(iommu_ports));
- msm_gem_address_space_destroy(aspace);
- goto fail;
- }
}
return 0;
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h
index 0c5c286..d818fdf 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.h
+++ b/drivers/gpu/drm/msm/sde/sde_kms.h
@@ -160,7 +160,6 @@
struct sde_mdss_cfg *catalog;
struct msm_gem_address_space *aspace[MSM_SMMU_DOMAIN_MAX];
- int mmu_id[MSM_SMMU_DOMAIN_MAX];
struct sde_power_client *core_client;
struct ion_client *iclient;
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index 2a98af4..fb3523d 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -115,6 +115,7 @@
/*
* struct sde_plane - local sde plane structure
+ * @aspace: address space pointer
* @csc_cfg: Decoded user configuration for csc
* @csc_usr_ptr: Points to csc_cfg if valid user config available
* @csc_ptr: Points to sde_csc_cfg structure to use for current
@@ -129,7 +130,7 @@
struct sde_plane {
struct drm_plane base;
- int mmu_id;
+ struct msm_gem_address_space *aspace;
struct mutex lock;
@@ -888,7 +889,7 @@
return;
}
- ret = sde_format_populate_layout(psde->mmu_id, fb, &pipe_cfg->layout);
+ ret = sde_format_populate_layout(psde->aspace, fb, &pipe_cfg->layout);
if (ret == -EAGAIN)
SDE_DEBUG_PLANE(psde, "not updating same src addrs\n");
else if (ret)
@@ -1801,7 +1802,7 @@
struct sde_hw_fmt_layout layout;
memset(&layout, 0, sizeof(struct sde_hw_fmt_layout));
- sde_format_populate_layout(rstate->mmu_id, state->fb,
+ sde_format_populate_layout(rstate->aspace, state->fb,
&layout);
for (i = 0; i < ARRAY_SIZE(rot_cmd->src_iova); i++) {
rot_cmd->src_iova[i] = layout.plane_addr[i];
@@ -1810,7 +1811,7 @@
rot_cmd->src_planes = layout.num_planes;
memset(&layout, 0, sizeof(struct sde_hw_fmt_layout));
- sde_format_populate_layout(rstate->mmu_id, rstate->out_fb,
+ sde_format_populate_layout(rstate->aspace, rstate->out_fb,
&layout);
for (i = 0; i < ARRAY_SIZE(rot_cmd->dst_iova); i++) {
rot_cmd->dst_iova[i] = layout.plane_addr[i];
@@ -1950,6 +1951,7 @@
struct sde_plane_state *new_pstate = to_sde_plane_state(new_state);
struct sde_plane_rot_state *new_rstate = &new_pstate->rot;
struct drm_crtc_state *cstate;
+ struct sde_kms *kms = _sde_plane_get_kms(plane);
int ret;
SDE_DEBUG("plane%d.%d FB[%u] sbuf:%d rot:%d crtc:%d\n",
@@ -1958,6 +1960,9 @@
!!new_rstate->out_sbuf, !!new_rstate->rot_hw,
sde_plane_crtc_enabled(new_state));
+ if (!kms)
+ return -EINVAL;
+
if (!new_rstate->out_sbuf || !new_rstate->rot_hw)
return 0;
@@ -2005,9 +2010,11 @@
new_rstate->sequence_id);
if (new_state->fb->flags & DRM_MODE_FB_SECURE)
- new_rstate->mmu_id = MSM_SMMU_DOMAIN_SECURE;
+ new_rstate->aspace =
+ kms->aspace[MSM_SMMU_DOMAIN_SECURE];
else
- new_rstate->mmu_id = MSM_SMMU_DOMAIN_UNSECURE;
+ new_rstate->aspace =
+ kms->aspace[MSM_SMMU_DOMAIN_UNSECURE];
/* check if out_fb is already attached to rotator */
new_rstate->out_fbo = sde_kms_fbo_alloc(plane->dev, fb_w, fb_h,
@@ -2046,7 +2053,7 @@
}
/* prepare rotator input buffer */
- ret = msm_framebuffer_prepare(new_state->fb, new_rstate->mmu_id);
+ ret = msm_framebuffer_prepare(new_state->fb, new_rstate->aspace);
if (ret) {
SDE_ERROR("failed to prepare input framebuffer\n");
goto error_prepare_input_buffer;
@@ -2058,7 +2065,7 @@
new_rstate->sequence_id);
ret = msm_framebuffer_prepare(new_rstate->out_fb,
- new_rstate->mmu_id);
+ new_rstate->aspace);
if (ret) {
SDE_ERROR("failed to prepare inline framebuffer\n");
goto error_prepare_output_buffer;
@@ -2068,7 +2075,7 @@
return 0;
error_prepare_output_buffer:
- msm_framebuffer_cleanup(new_state->fb, new_rstate->mmu_id);
+ msm_framebuffer_cleanup(new_state->fb, new_rstate->aspace);
error_prepare_input_buffer:
sde_crtc_res_put(cstate, SDE_CRTC_RES_ROT_OUT_FB,
(u64) &new_rstate->rot_hw->base);
@@ -2124,7 +2131,7 @@
if (sde_plane_enabled(old_state)) {
if (old_rstate->out_fb) {
msm_framebuffer_cleanup(old_rstate->out_fb,
- old_rstate->mmu_id);
+ old_rstate->aspace);
sde_crtc_res_put(cstate, SDE_CRTC_RES_ROT_OUT_FB,
(u64) &old_rstate->rot_hw->base);
old_rstate->out_fb = NULL;
@@ -2133,7 +2140,7 @@
old_rstate->out_fbo = NULL;
}
- msm_framebuffer_cleanup(old_state->fb, old_rstate->mmu_id);
+ msm_framebuffer_cleanup(old_state->fb, old_rstate->aspace);
}
}
@@ -2163,6 +2170,7 @@
old_pstate = to_sde_plane_state(plane->state);
rstate = &pstate->rot;
old_rstate = &old_pstate->rot;
+ rstate->aspace = psde->aspace;
/* cstate will be null if crtc is disconnected from plane */
cstate = _sde_plane_get_crtc_state(state);
@@ -2657,14 +2665,14 @@
new_rstate = &to_sde_plane_state(new_state)->rot;
- ret = msm_framebuffer_prepare(new_rstate->out_fb, new_rstate->mmu_id);
+ ret = msm_framebuffer_prepare(new_rstate->out_fb, psde->aspace);
if (ret) {
SDE_ERROR("failed to prepare framebuffer\n");
return ret;
}
/* validate framebuffer layout before commit */
- ret = sde_format_populate_layout(new_rstate->mmu_id,
+ ret = sde_format_populate_layout(psde->aspace,
new_rstate->out_fb, &layout);
if (ret) {
SDE_ERROR_PLANE(psde, "failed to get format layout, %d\n", ret);
@@ -2687,7 +2695,7 @@
old_rstate = &to_sde_plane_state(old_state)->rot;
- msm_framebuffer_cleanup(old_rstate->out_fb, old_rstate->mmu_id);
+ msm_framebuffer_cleanup(old_rstate->out_fb, old_rstate->aspace);
sde_plane_rot_cleanup_fb(plane, old_state);
}
@@ -4481,7 +4489,7 @@
/* cache local stuff for later */
plane = &psde->base;
psde->pipe = pipe;
- psde->mmu_id = kms->mmu_id[MSM_SMMU_DOMAIN_UNSECURE];
+ psde->aspace = kms->aspace[MSM_SMMU_DOMAIN_UNSECURE];
psde->is_virtual = (master_plane_id != 0);
psde->scaler_check_state = SDE_PLANE_SCLCHECK_NONE;
INIT_LIST_HEAD(&psde->mplane_list);
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.h b/drivers/gpu/drm/msm/sde/sde_plane.h
index f83a891..46784e7 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.h
+++ b/drivers/gpu/drm/msm/sde/sde_plane.h
@@ -34,7 +34,7 @@
* @rot90: true if rotation of 90 degree is required
* @hflip: true if horizontal flip is required
* @vflip: true if vertical flip is required
- * @mmu_id: iommu identifier for input/output buffers
+ * @aspace: pointer address space for input/output buffers
* @rot_cmd: rotator configuration command
* @nplane: total number of drm plane attached to rotator
* @in_fb: input fb attached to rotator
@@ -64,7 +64,7 @@
bool rot90;
bool hflip;
bool vflip;
- u32 mmu_id;
+ struct msm_gem_address_space *aspace;
struct sde_hw_rot_cmd rot_cmd;
int nplane;
/* input */