blob: 730fc065795c9f77a82d1669bd1caca596400a64 [file] [log] [blame]
Dhaval Patel14d46ce2017-01-17 16:28:12 -08001/*
2 * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -07005 *
Dhaval Patel14d46ce2017-01-17 16:28:12 -08006 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -07009 *
Dhaval Patel14d46ce2017-01-17 16:28:12 -080010 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -070017 */
18
19#include <linux/module.h>
20#include <linux/of_platform.h>
21#include <linux/pm_runtime.h>
22#include <linux/msm_dma_iommu_mapping.h>
23
24#include <asm/dma-iommu.h>
25#include <soc/qcom/secure_buffer.h>
26
27#include "msm_drv.h"
Clarence Ip24b7c362017-05-14 17:03:50 -040028#include "msm_gem.h"
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -070029#include "msm_mmu.h"
Alan Kwongc16e0922017-05-11 14:50:46 -070030#include "sde_dbg.h"
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -070031
Alan Kwong112a84f2016-05-24 20:49:21 -040032#ifndef SZ_4G
33#define SZ_4G (((size_t) SZ_1G) * 4)
34#endif
35
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -070036struct msm_smmu_client {
37 struct device *dev;
38 struct dma_iommu_mapping *mmu_mapping;
39 bool domain_attached;
Abhijit Kulkarni329a94d2017-06-20 17:07:08 -070040 bool secure;
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -070041};
42
43struct msm_smmu {
44 struct msm_mmu base;
45 struct device *client_dev;
Alan Kwong112a84f2016-05-24 20:49:21 -040046 struct msm_smmu_client *client;
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -070047};
48
49struct msm_smmu_domain {
50 const char *label;
51 size_t va_start;
52 size_t va_size;
53 bool secure;
54};
55
56#define to_msm_smmu(x) container_of(x, struct msm_smmu, base)
Alan Kwong112a84f2016-05-24 20:49:21 -040057#define msm_smmu_to_client(smmu) (smmu->client)
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -070058
59static int _msm_smmu_create_mapping(struct msm_smmu_client *client,
60 const struct msm_smmu_domain *domain);
61
Dhaval Patel04c7e8e2016-09-26 20:14:31 -070062static int msm_smmu_attach(struct msm_mmu *mmu, const char * const *names,
63 int cnt)
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -070064{
65 struct msm_smmu *smmu = to_msm_smmu(mmu);
66 struct msm_smmu_client *client = msm_smmu_to_client(smmu);
67 int rc = 0;
68
Alan Kwong112a84f2016-05-24 20:49:21 -040069 if (!client) {
70 pr_err("undefined smmu client\n");
71 return -EINVAL;
72 }
73
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -070074 /* domain attach only once */
75 if (client->domain_attached)
76 return 0;
77
78 rc = arm_iommu_attach_device(client->dev,
79 client->mmu_mapping);
80 if (rc) {
81 dev_err(client->dev, "iommu attach dev failed (%d)\n",
82 rc);
83 return rc;
84 }
85
86 client->domain_attached = true;
87
88 dev_dbg(client->dev, "iommu domain attached\n");
89
90 return 0;
91}
92
Dhaval Patel04c7e8e2016-09-26 20:14:31 -070093static void msm_smmu_detach(struct msm_mmu *mmu, const char * const *names,
94 int cnt)
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -070095{
Lloyd Atkinson479690d2016-10-04 09:51:22 -040096 struct msm_smmu *smmu = to_msm_smmu(mmu);
97 struct msm_smmu_client *client = msm_smmu_to_client(smmu);
98
99 if (!client) {
100 pr_err("undefined smmu client\n");
101 return;
102 }
103
104 if (!client->domain_attached)
105 return;
106
107 arm_iommu_detach_device(client->dev);
108 client->domain_attached = false;
109 dev_dbg(client->dev, "iommu domain detached\n");
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700110}
111
112static int msm_smmu_map(struct msm_mmu *mmu, uint32_t iova,
Jordan Crousea375c882017-02-13 10:14:10 -0700113 struct sg_table *sgt, int prot)
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700114{
115 struct msm_smmu *smmu = to_msm_smmu(mmu);
116 struct msm_smmu_client *client = msm_smmu_to_client(smmu);
117 struct iommu_domain *domain;
118 struct scatterlist *sg;
119 unsigned int da = iova;
120 unsigned int i, j;
121 int ret;
122
123 if (!client)
124 return -ENODEV;
125
126 domain = client->mmu_mapping->domain;
127 if (!domain || !sgt)
128 return -EINVAL;
129
130 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
131 u32 pa = sg_phys(sg) - sg->offset;
132 size_t bytes = sg->length + sg->offset;
133
134 VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
135
136 ret = iommu_map(domain, da, pa, bytes, prot);
137 if (ret)
138 goto fail;
139
140 da += bytes;
141 }
142
143 return 0;
144
145fail:
146 da = iova;
147
148 for_each_sg(sgt->sgl, sg, i, j) {
149 size_t bytes = sg->length + sg->offset;
150
151 iommu_unmap(domain, da, bytes);
152 da += bytes;
153 }
154 return ret;
155}
156
157static int msm_smmu_map_sg(struct msm_mmu *mmu, struct sg_table *sgt,
158 enum dma_data_direction dir)
159{
160 struct msm_smmu *smmu = to_msm_smmu(mmu);
161 struct msm_smmu_client *client = msm_smmu_to_client(smmu);
162 int ret;
163
164 ret = dma_map_sg(client->dev, sgt->sgl, sgt->nents, dir);
165 if (ret != sgt->nents)
166 return -ENOMEM;
167
168 return 0;
169}
170
171static void msm_smmu_unmap_sg(struct msm_mmu *mmu, struct sg_table *sgt,
172 enum dma_data_direction dir)
173{
174 struct msm_smmu *smmu = to_msm_smmu(mmu);
175 struct msm_smmu_client *client = msm_smmu_to_client(smmu);
176
177 dma_unmap_sg(client->dev, sgt->sgl, sgt->nents, dir);
178}
179
180static int msm_smmu_unmap(struct msm_mmu *mmu, uint32_t iova,
Jordan Crousea375c882017-02-13 10:14:10 -0700181 struct sg_table *sgt)
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700182{
183 struct msm_smmu *smmu = to_msm_smmu(mmu);
184 struct msm_smmu_client *client = msm_smmu_to_client(smmu);
185 struct iommu_domain *domain;
186 struct scatterlist *sg;
187 unsigned int da = iova;
188 int i;
189
190 if (!client)
191 return -ENODEV;
192
193 domain = client->mmu_mapping->domain;
194 if (!domain || !sgt)
195 return -EINVAL;
196
197 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
198 size_t bytes = sg->length + sg->offset;
199 size_t unmapped;
200
201 unmapped = iommu_unmap(domain, da, bytes);
202 if (unmapped < bytes)
203 return unmapped;
204
205 VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
206
207 WARN_ON(!PAGE_ALIGNED(bytes));
208
209 da += bytes;
210 }
211
212 return 0;
213}
214
215static void msm_smmu_destroy(struct msm_mmu *mmu)
216{
217 struct msm_smmu *smmu = to_msm_smmu(mmu);
218 struct platform_device *pdev = to_platform_device(smmu->client_dev);
219
Lloyd Atkinson479690d2016-10-04 09:51:22 -0400220 if (smmu->client_dev)
221 platform_device_unregister(pdev);
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700222 kfree(smmu);
223}
224
225static int msm_smmu_map_dma_buf(struct msm_mmu *mmu, struct sg_table *sgt,
Clarence Ip24b7c362017-05-14 17:03:50 -0400226 struct dma_buf *dma_buf, int dir, u32 flags)
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700227{
228 struct msm_smmu *smmu = to_msm_smmu(mmu);
229 struct msm_smmu_client *client = msm_smmu_to_client(smmu);
Clarence Ip24b7c362017-05-14 17:03:50 -0400230 unsigned long attrs = 0x0;
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700231 int ret;
232
Narendra Muppallaec11a0a2017-06-15 15:35:17 -0700233 if (!sgt || !client) {
234 DRM_ERROR("sg table is invalid\n");
235 return -ENOMEM;
236 }
237
Clarence Ip24b7c362017-05-14 17:03:50 -0400238 if (flags & MSM_BO_KEEPATTRS)
239 attrs |= DMA_ATTR_IOMMU_USE_UPSTREAM_HINT;
240
241 ret = msm_dma_map_sg_attrs(client->dev, sgt->sgl, sgt->nents, dir,
242 dma_buf, attrs);
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700243 if (ret != sgt->nents) {
244 DRM_ERROR("dma map sg failed\n");
245 return -ENOMEM;
246 }
247
Alan Kwongc16e0922017-05-11 14:50:46 -0700248 if (sgt && sgt->sgl) {
249 DRM_DEBUG("%pad/0x%x/0x%x/0x%lx\n", &sgt->sgl->dma_address,
250 sgt->sgl->dma_length, dir, attrs);
251 SDE_EVT32(sgt->sgl->dma_address, sgt->sgl->dma_length,
252 dir, attrs);
253 }
254
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700255 return 0;
256}
257
258
259static void msm_smmu_unmap_dma_buf(struct msm_mmu *mmu, struct sg_table *sgt,
260 struct dma_buf *dma_buf, int dir)
261{
262 struct msm_smmu *smmu = to_msm_smmu(mmu);
263 struct msm_smmu_client *client = msm_smmu_to_client(smmu);
264
Narendra Muppallaec11a0a2017-06-15 15:35:17 -0700265 if (!sgt || !client) {
266 DRM_ERROR("sg table is invalid\n");
267 return;
268 }
269
Alan Kwongc16e0922017-05-11 14:50:46 -0700270 if (sgt && sgt->sgl) {
271 DRM_DEBUG("%pad/0x%x/0x%x\n", &sgt->sgl->dma_address,
272 sgt->sgl->dma_length, dir);
273 SDE_EVT32(sgt->sgl->dma_address, sgt->sgl->dma_length, dir);
274 }
275
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700276 msm_dma_unmap_sg(client->dev, sgt->sgl, sgt->nents, dir, dma_buf);
277}
278
Abhijit Kulkarni329a94d2017-06-20 17:07:08 -0700279static bool msm_smmu_is_domain_secure(struct msm_mmu *mmu)
280{
281 struct msm_smmu *smmu = to_msm_smmu(mmu);
282 struct msm_smmu_client *client = msm_smmu_to_client(smmu);
283
284 return client->secure;
285}
286
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700287static const struct msm_mmu_funcs funcs = {
288 .attach = msm_smmu_attach,
289 .detach = msm_smmu_detach,
290 .map = msm_smmu_map,
291 .map_sg = msm_smmu_map_sg,
292 .unmap_sg = msm_smmu_unmap_sg,
293 .unmap = msm_smmu_unmap,
294 .map_dma_buf = msm_smmu_map_dma_buf,
295 .unmap_dma_buf = msm_smmu_unmap_dma_buf,
296 .destroy = msm_smmu_destroy,
Abhijit Kulkarni329a94d2017-06-20 17:07:08 -0700297 .is_domain_secure = msm_smmu_is_domain_secure,
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700298};
299
300static struct msm_smmu_domain msm_smmu_domains[MSM_SMMU_DOMAIN_MAX] = {
301 [MSM_SMMU_DOMAIN_UNSECURE] = {
302 .label = "mdp_ns",
Alan Kwong112a84f2016-05-24 20:49:21 -0400303 .va_start = SZ_128K,
304 .va_size = SZ_4G - SZ_128K,
305 .secure = false,
306 },
307 [MSM_SMMU_DOMAIN_SECURE] = {
308 .label = "mdp_s",
Abhijit Kulkarnia8f2a992017-05-30 05:04:51 -0700309 .va_start = SZ_128K,
310 .va_size = SZ_4G - SZ_128K,
Alan Kwong112a84f2016-05-24 20:49:21 -0400311 .secure = true,
312 },
313 [MSM_SMMU_DOMAIN_NRT_UNSECURE] = {
314 .label = "rot_ns",
315 .va_start = SZ_128K,
316 .va_size = SZ_4G - SZ_128K,
317 .secure = false,
318 },
319 [MSM_SMMU_DOMAIN_NRT_SECURE] = {
320 .label = "rot_s",
Abhijit Kulkarnia8f2a992017-05-30 05:04:51 -0700321 .va_start = SZ_128K,
322 .va_size = SZ_4G - SZ_128K,
Alan Kwong112a84f2016-05-24 20:49:21 -0400323 .secure = true,
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700324 },
325};
326
327static const struct of_device_id msm_smmu_dt_match[] = {
Abhijit Kulkarni1774dac2017-05-01 10:51:02 -0700328 { .compatible = "qcom,smmu_sde_unsec",
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700329 .data = &msm_smmu_domains[MSM_SMMU_DOMAIN_UNSECURE] },
Abhijit Kulkarni1774dac2017-05-01 10:51:02 -0700330 { .compatible = "qcom,smmu_sde_sec",
Alan Kwong112a84f2016-05-24 20:49:21 -0400331 .data = &msm_smmu_domains[MSM_SMMU_DOMAIN_SECURE] },
Abhijit Kulkarni1774dac2017-05-01 10:51:02 -0700332 { .compatible = "qcom,smmu_sde_nrt_unsec",
Alan Kwong112a84f2016-05-24 20:49:21 -0400333 .data = &msm_smmu_domains[MSM_SMMU_DOMAIN_NRT_UNSECURE] },
Abhijit Kulkarni1774dac2017-05-01 10:51:02 -0700334 { .compatible = "qcom,smmu_sde_nrt_sec",
Alan Kwong112a84f2016-05-24 20:49:21 -0400335 .data = &msm_smmu_domains[MSM_SMMU_DOMAIN_NRT_SECURE] },
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700336 {}
337};
338MODULE_DEVICE_TABLE(of, msm_smmu_dt_match);
339
340static struct device *msm_smmu_device_create(struct device *dev,
341 enum msm_mmu_domain_type domain,
342 struct msm_smmu *smmu)
343{
344 struct device_node *child;
345 struct platform_device *pdev;
346 int i;
347 const char *compat = NULL;
348
349 for (i = 0; i < ARRAY_SIZE(msm_smmu_dt_match); i++) {
350 if (msm_smmu_dt_match[i].data == &msm_smmu_domains[domain]) {
351 compat = msm_smmu_dt_match[i].compatible;
352 break;
353 }
354 }
355
356 if (!compat) {
Dhaval Patel5473cd22017-03-19 21:38:08 -0700357 DRM_DEBUG("unable to find matching domain for %d\n", domain);
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700358 return ERR_PTR(-ENOENT);
359 }
Dhaval Patel5473cd22017-03-19 21:38:08 -0700360 DRM_DEBUG("found domain %d compat: %s\n", domain, compat);
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700361
362 if (domain == MSM_SMMU_DOMAIN_UNSECURE) {
363 int rc;
364
Alan Kwong112a84f2016-05-24 20:49:21 -0400365 smmu->client = devm_kzalloc(dev,
366 sizeof(struct msm_smmu_client), GFP_KERNEL);
367 if (!smmu->client)
368 return ERR_PTR(-ENOMEM);
369
370 smmu->client->dev = dev;
371
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700372 rc = _msm_smmu_create_mapping(msm_smmu_to_client(smmu),
373 msm_smmu_dt_match[i].data);
Alan Kwong112a84f2016-05-24 20:49:21 -0400374 if (rc) {
375 devm_kfree(dev, smmu->client);
376 smmu->client = NULL;
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700377 return ERR_PTR(rc);
Alan Kwong112a84f2016-05-24 20:49:21 -0400378 }
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700379
380 return NULL;
381 }
382
383 child = of_find_compatible_node(dev->of_node, NULL, compat);
384 if (!child) {
Dhaval Patel5473cd22017-03-19 21:38:08 -0700385 DRM_DEBUG("unable to find compatible node for %s\n", compat);
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700386 return ERR_PTR(-ENODEV);
387 }
388
389 pdev = of_platform_device_create(child, NULL, dev);
390 if (!pdev) {
391 DRM_ERROR("unable to create smmu platform dev for domain %d\n",
392 domain);
393 return ERR_PTR(-ENODEV);
394 }
395
Alan Kwong112a84f2016-05-24 20:49:21 -0400396 smmu->client = platform_get_drvdata(pdev);
397
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700398 return &pdev->dev;
399}
400
401struct msm_mmu *msm_smmu_new(struct device *dev,
402 enum msm_mmu_domain_type domain)
403{
404 struct msm_smmu *smmu;
405 struct device *client_dev;
406
407 smmu = kzalloc(sizeof(*smmu), GFP_KERNEL);
408 if (!smmu)
409 return ERR_PTR(-ENOMEM);
410
411 client_dev = msm_smmu_device_create(dev, domain, smmu);
Alan Kwong112a84f2016-05-24 20:49:21 -0400412 if (IS_ERR(client_dev)) {
413 kfree(smmu);
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700414 return (void *)client_dev ? : ERR_PTR(-ENODEV);
Alan Kwong112a84f2016-05-24 20:49:21 -0400415 }
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700416
417 smmu->client_dev = client_dev;
418 msm_mmu_init(&smmu->base, dev, &funcs);
419
420 return &smmu->base;
421}
422
Alan Kwongc16e0922017-05-11 14:50:46 -0700423static int msm_smmu_fault_handler(struct iommu_domain *domain,
424 struct device *dev, unsigned long iova,
425 int flags, void *token)
426{
427 struct msm_smmu_client *client;
428 int rc = -EINVAL;
429
430 if (!token) {
431 DRM_ERROR("Error: token is NULL\n");
432 return -EINVAL;
433 }
434
435 client = (struct msm_smmu_client *)token;
436
437 /* see iommu.h for fault flags definition */
438 SDE_EVT32(iova, flags);
439 DRM_ERROR("trigger dump, iova=0x%08lx, flags=0x%x\n", iova, flags);
440 DRM_ERROR("SMMU device:%s", client->dev ? client->dev->kobj.name : "");
441
442 /* generate dump, but no panic */
443 SDE_DBG_DUMP("sde", "dsi0_ctrl", "dsi0_phy", "dsi1_ctrl",
444 "dsi1_phy", "vbif", "dbg_bus",
445 "vbif_dbg_bus");
446
447 /*
448 * return -ENOSYS to allow smmu driver to dump out useful
449 * debug info.
450 */
451 return rc;
452}
453
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700454static int _msm_smmu_create_mapping(struct msm_smmu_client *client,
455 const struct msm_smmu_domain *domain)
456{
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700457 int rc;
458
459 client->mmu_mapping = arm_iommu_create_mapping(&platform_bus_type,
460 domain->va_start, domain->va_size);
461 if (IS_ERR(client->mmu_mapping)) {
462 dev_err(client->dev,
463 "iommu create mapping failed for domain=%s\n",
464 domain->label);
465 return PTR_ERR(client->mmu_mapping);
466 }
467
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700468 if (domain->secure) {
469 int secure_vmid = VMID_CP_PIXEL;
470
Abhijit Kulkarni329a94d2017-06-20 17:07:08 -0700471 client->secure = true;
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700472 rc = iommu_domain_set_attr(client->mmu_mapping->domain,
473 DOMAIN_ATTR_SECURE_VMID, &secure_vmid);
474 if (rc) {
475 dev_err(client->dev, "couldn't set secure pix vmid\n");
476 goto error;
477 }
478 }
479
Alan Kwongc16e0922017-05-11 14:50:46 -0700480 iommu_set_fault_handler(client->mmu_mapping->domain,
481 msm_smmu_fault_handler, (void *)client);
482
Alan Kwong112a84f2016-05-24 20:49:21 -0400483 DRM_INFO("Created domain %s [%zx,%zx] secure=%d\n",
484 domain->label, domain->va_start, domain->va_size,
485 domain->secure);
486
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700487 return 0;
488
489error:
490 arm_iommu_release_mapping(client->mmu_mapping);
491 return rc;
492}
493
494/**
495 * msm_smmu_probe()
496 * @pdev: platform device
497 *
498 * Each smmu context acts as a separate device and the context banks are
499 * configured with a VA range.
500 * Registers the clks as each context bank has its own clks, for which voting
501 * has to be done everytime before using that context bank.
502 */
503static int msm_smmu_probe(struct platform_device *pdev)
504{
505 const struct of_device_id *match;
506 struct msm_smmu_client *client;
507 const struct msm_smmu_domain *domain;
508 int rc;
509
510 match = of_match_device(msm_smmu_dt_match, &pdev->dev);
511 if (!match || !match->data) {
512 dev_err(&pdev->dev, "probe failed as match data is invalid\n");
513 return -EINVAL;
514 }
515
516 domain = match->data;
517 if (!domain) {
518 dev_err(&pdev->dev, "no matching device found\n");
519 return -EINVAL;
520 }
521
522 DRM_INFO("probing device %s\n", match->compatible);
523
524 client = devm_kzalloc(&pdev->dev, sizeof(*client), GFP_KERNEL);
525 if (!client)
526 return -ENOMEM;
527
528 client->dev = &pdev->dev;
529
530 rc = _msm_smmu_create_mapping(client, domain);
531 platform_set_drvdata(pdev, client);
532
533 return rc;
534}
535
536static int msm_smmu_remove(struct platform_device *pdev)
537{
538 struct msm_smmu_client *client;
539
540 client = platform_get_drvdata(pdev);
541 if (client->domain_attached) {
542 arm_iommu_detach_device(client->dev);
543 client->domain_attached = false;
544 }
545 arm_iommu_release_mapping(client->mmu_mapping);
546
547 return 0;
548}
549
550static struct platform_driver msm_smmu_driver = {
551 .probe = msm_smmu_probe,
552 .remove = msm_smmu_remove,
553 .driver = {
554 .name = "msmdrm_smmu",
555 .of_match_table = msm_smmu_dt_match,
556 },
557};
558
Abhijit Kulkarni1774dac2017-05-01 10:51:02 -0700559int __init msm_smmu_driver_init(void)
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700560{
561 int ret;
562
563 ret = platform_driver_register(&msm_smmu_driver);
564 if (ret)
565 pr_err("mdss_smmu_register_driver() failed!\n");
566
567 return ret;
568}
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700569
Abhijit Kulkarni1774dac2017-05-01 10:51:02 -0700570void __exit msm_smmu_driver_cleanup(void)
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700571{
572 platform_driver_unregister(&msm_smmu_driver);
573}
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700574
575MODULE_LICENSE("GPL v2");
576MODULE_DESCRIPTION("MSM SMMU driver");