blob: 7d7f74a5772d6c8e28d0ef6be4f36869ad658ac8 [file] [log] [blame]
Dhaval Patel14d46ce2017-01-17 16:28:12 -08001/*
2 * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -07005 *
Dhaval Patel14d46ce2017-01-17 16:28:12 -08006 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -07009 *
Dhaval Patel14d46ce2017-01-17 16:28:12 -080010 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -070017 */
18
19#include <linux/module.h>
20#include <linux/of_platform.h>
21#include <linux/pm_runtime.h>
22#include <linux/msm_dma_iommu_mapping.h>
23
24#include <asm/dma-iommu.h>
25#include <soc/qcom/secure_buffer.h>
26
27#include "msm_drv.h"
Clarence Ip24b7c362017-05-14 17:03:50 -040028#include "msm_gem.h"
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -070029#include "msm_mmu.h"
Alan Kwongc16e0922017-05-11 14:50:46 -070030#include "sde_dbg.h"
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -070031
Alan Kwong112a84f2016-05-24 20:49:21 -040032#ifndef SZ_4G
33#define SZ_4G (((size_t) SZ_1G) * 4)
34#endif
35
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -070036struct msm_smmu_client {
37 struct device *dev;
38 struct dma_iommu_mapping *mmu_mapping;
39 bool domain_attached;
40};
41
42struct msm_smmu {
43 struct msm_mmu base;
44 struct device *client_dev;
Alan Kwong112a84f2016-05-24 20:49:21 -040045 struct msm_smmu_client *client;
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -070046};
47
48struct msm_smmu_domain {
49 const char *label;
50 size_t va_start;
51 size_t va_size;
52 bool secure;
53};
54
55#define to_msm_smmu(x) container_of(x, struct msm_smmu, base)
Alan Kwong112a84f2016-05-24 20:49:21 -040056#define msm_smmu_to_client(smmu) (smmu->client)
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -070057
58static int _msm_smmu_create_mapping(struct msm_smmu_client *client,
59 const struct msm_smmu_domain *domain);
60
Dhaval Patel04c7e8e2016-09-26 20:14:31 -070061static int msm_smmu_attach(struct msm_mmu *mmu, const char * const *names,
62 int cnt)
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -070063{
64 struct msm_smmu *smmu = to_msm_smmu(mmu);
65 struct msm_smmu_client *client = msm_smmu_to_client(smmu);
66 int rc = 0;
67
Alan Kwong112a84f2016-05-24 20:49:21 -040068 if (!client) {
69 pr_err("undefined smmu client\n");
70 return -EINVAL;
71 }
72
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -070073 /* domain attach only once */
74 if (client->domain_attached)
75 return 0;
76
77 rc = arm_iommu_attach_device(client->dev,
78 client->mmu_mapping);
79 if (rc) {
80 dev_err(client->dev, "iommu attach dev failed (%d)\n",
81 rc);
82 return rc;
83 }
84
85 client->domain_attached = true;
86
87 dev_dbg(client->dev, "iommu domain attached\n");
88
89 return 0;
90}
91
Dhaval Patel04c7e8e2016-09-26 20:14:31 -070092static void msm_smmu_detach(struct msm_mmu *mmu, const char * const *names,
93 int cnt)
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -070094{
Lloyd Atkinson479690d2016-10-04 09:51:22 -040095 struct msm_smmu *smmu = to_msm_smmu(mmu);
96 struct msm_smmu_client *client = msm_smmu_to_client(smmu);
97
98 if (!client) {
99 pr_err("undefined smmu client\n");
100 return;
101 }
102
103 if (!client->domain_attached)
104 return;
105
106 arm_iommu_detach_device(client->dev);
107 client->domain_attached = false;
108 dev_dbg(client->dev, "iommu domain detached\n");
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700109}
110
111static int msm_smmu_map(struct msm_mmu *mmu, uint32_t iova,
Jordan Crousea375c882017-02-13 10:14:10 -0700112 struct sg_table *sgt, int prot)
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700113{
114 struct msm_smmu *smmu = to_msm_smmu(mmu);
115 struct msm_smmu_client *client = msm_smmu_to_client(smmu);
116 struct iommu_domain *domain;
117 struct scatterlist *sg;
118 unsigned int da = iova;
119 unsigned int i, j;
120 int ret;
121
122 if (!client)
123 return -ENODEV;
124
125 domain = client->mmu_mapping->domain;
126 if (!domain || !sgt)
127 return -EINVAL;
128
129 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
130 u32 pa = sg_phys(sg) - sg->offset;
131 size_t bytes = sg->length + sg->offset;
132
133 VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
134
135 ret = iommu_map(domain, da, pa, bytes, prot);
136 if (ret)
137 goto fail;
138
139 da += bytes;
140 }
141
142 return 0;
143
144fail:
145 da = iova;
146
147 for_each_sg(sgt->sgl, sg, i, j) {
148 size_t bytes = sg->length + sg->offset;
149
150 iommu_unmap(domain, da, bytes);
151 da += bytes;
152 }
153 return ret;
154}
155
156static int msm_smmu_map_sg(struct msm_mmu *mmu, struct sg_table *sgt,
157 enum dma_data_direction dir)
158{
159 struct msm_smmu *smmu = to_msm_smmu(mmu);
160 struct msm_smmu_client *client = msm_smmu_to_client(smmu);
161 int ret;
162
163 ret = dma_map_sg(client->dev, sgt->sgl, sgt->nents, dir);
164 if (ret != sgt->nents)
165 return -ENOMEM;
166
167 return 0;
168}
169
170static void msm_smmu_unmap_sg(struct msm_mmu *mmu, struct sg_table *sgt,
171 enum dma_data_direction dir)
172{
173 struct msm_smmu *smmu = to_msm_smmu(mmu);
174 struct msm_smmu_client *client = msm_smmu_to_client(smmu);
175
176 dma_unmap_sg(client->dev, sgt->sgl, sgt->nents, dir);
177}
178
179static int msm_smmu_unmap(struct msm_mmu *mmu, uint32_t iova,
Jordan Crousea375c882017-02-13 10:14:10 -0700180 struct sg_table *sgt)
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700181{
182 struct msm_smmu *smmu = to_msm_smmu(mmu);
183 struct msm_smmu_client *client = msm_smmu_to_client(smmu);
184 struct iommu_domain *domain;
185 struct scatterlist *sg;
186 unsigned int da = iova;
187 int i;
188
189 if (!client)
190 return -ENODEV;
191
192 domain = client->mmu_mapping->domain;
193 if (!domain || !sgt)
194 return -EINVAL;
195
196 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
197 size_t bytes = sg->length + sg->offset;
198 size_t unmapped;
199
200 unmapped = iommu_unmap(domain, da, bytes);
201 if (unmapped < bytes)
202 return unmapped;
203
204 VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
205
206 WARN_ON(!PAGE_ALIGNED(bytes));
207
208 da += bytes;
209 }
210
211 return 0;
212}
213
214static void msm_smmu_destroy(struct msm_mmu *mmu)
215{
216 struct msm_smmu *smmu = to_msm_smmu(mmu);
217 struct platform_device *pdev = to_platform_device(smmu->client_dev);
218
Lloyd Atkinson479690d2016-10-04 09:51:22 -0400219 if (smmu->client_dev)
220 platform_device_unregister(pdev);
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700221 kfree(smmu);
222}
223
224static int msm_smmu_map_dma_buf(struct msm_mmu *mmu, struct sg_table *sgt,
Clarence Ip24b7c362017-05-14 17:03:50 -0400225 struct dma_buf *dma_buf, int dir, u32 flags)
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700226{
227 struct msm_smmu *smmu = to_msm_smmu(mmu);
228 struct msm_smmu_client *client = msm_smmu_to_client(smmu);
Clarence Ip24b7c362017-05-14 17:03:50 -0400229 unsigned long attrs = 0x0;
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700230 int ret;
231
Narendra Muppallaec11a0a2017-06-15 15:35:17 -0700232 if (!sgt || !client) {
233 DRM_ERROR("sg table is invalid\n");
234 return -ENOMEM;
235 }
236
Clarence Ip24b7c362017-05-14 17:03:50 -0400237 if (flags & MSM_BO_KEEPATTRS)
238 attrs |= DMA_ATTR_IOMMU_USE_UPSTREAM_HINT;
239
240 ret = msm_dma_map_sg_attrs(client->dev, sgt->sgl, sgt->nents, dir,
241 dma_buf, attrs);
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700242 if (ret != sgt->nents) {
243 DRM_ERROR("dma map sg failed\n");
244 return -ENOMEM;
245 }
246
Alan Kwongc16e0922017-05-11 14:50:46 -0700247 if (sgt && sgt->sgl) {
248 DRM_DEBUG("%pad/0x%x/0x%x/0x%lx\n", &sgt->sgl->dma_address,
249 sgt->sgl->dma_length, dir, attrs);
250 SDE_EVT32(sgt->sgl->dma_address, sgt->sgl->dma_length,
251 dir, attrs);
252 }
253
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700254 return 0;
255}
256
257
258static void msm_smmu_unmap_dma_buf(struct msm_mmu *mmu, struct sg_table *sgt,
259 struct dma_buf *dma_buf, int dir)
260{
261 struct msm_smmu *smmu = to_msm_smmu(mmu);
262 struct msm_smmu_client *client = msm_smmu_to_client(smmu);
263
Narendra Muppallaec11a0a2017-06-15 15:35:17 -0700264 if (!sgt || !client) {
265 DRM_ERROR("sg table is invalid\n");
266 return;
267 }
268
Alan Kwongc16e0922017-05-11 14:50:46 -0700269 if (sgt && sgt->sgl) {
270 DRM_DEBUG("%pad/0x%x/0x%x\n", &sgt->sgl->dma_address,
271 sgt->sgl->dma_length, dir);
272 SDE_EVT32(sgt->sgl->dma_address, sgt->sgl->dma_length, dir);
273 }
274
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700275 msm_dma_unmap_sg(client->dev, sgt->sgl, sgt->nents, dir, dma_buf);
276}
277
278static const struct msm_mmu_funcs funcs = {
279 .attach = msm_smmu_attach,
280 .detach = msm_smmu_detach,
281 .map = msm_smmu_map,
282 .map_sg = msm_smmu_map_sg,
283 .unmap_sg = msm_smmu_unmap_sg,
284 .unmap = msm_smmu_unmap,
285 .map_dma_buf = msm_smmu_map_dma_buf,
286 .unmap_dma_buf = msm_smmu_unmap_dma_buf,
287 .destroy = msm_smmu_destroy,
288};
289
290static struct msm_smmu_domain msm_smmu_domains[MSM_SMMU_DOMAIN_MAX] = {
291 [MSM_SMMU_DOMAIN_UNSECURE] = {
292 .label = "mdp_ns",
Alan Kwong112a84f2016-05-24 20:49:21 -0400293 .va_start = SZ_128K,
294 .va_size = SZ_4G - SZ_128K,
295 .secure = false,
296 },
297 [MSM_SMMU_DOMAIN_SECURE] = {
298 .label = "mdp_s",
Abhijit Kulkarnia8f2a992017-05-30 05:04:51 -0700299 .va_start = SZ_128K,
300 .va_size = SZ_4G - SZ_128K,
Alan Kwong112a84f2016-05-24 20:49:21 -0400301 .secure = true,
302 },
303 [MSM_SMMU_DOMAIN_NRT_UNSECURE] = {
304 .label = "rot_ns",
305 .va_start = SZ_128K,
306 .va_size = SZ_4G - SZ_128K,
307 .secure = false,
308 },
309 [MSM_SMMU_DOMAIN_NRT_SECURE] = {
310 .label = "rot_s",
Abhijit Kulkarnia8f2a992017-05-30 05:04:51 -0700311 .va_start = SZ_128K,
312 .va_size = SZ_4G - SZ_128K,
Alan Kwong112a84f2016-05-24 20:49:21 -0400313 .secure = true,
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700314 },
315};
316
317static const struct of_device_id msm_smmu_dt_match[] = {
Abhijit Kulkarni1774dac2017-05-01 10:51:02 -0700318 { .compatible = "qcom,smmu_sde_unsec",
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700319 .data = &msm_smmu_domains[MSM_SMMU_DOMAIN_UNSECURE] },
Abhijit Kulkarni1774dac2017-05-01 10:51:02 -0700320 { .compatible = "qcom,smmu_sde_sec",
Alan Kwong112a84f2016-05-24 20:49:21 -0400321 .data = &msm_smmu_domains[MSM_SMMU_DOMAIN_SECURE] },
Abhijit Kulkarni1774dac2017-05-01 10:51:02 -0700322 { .compatible = "qcom,smmu_sde_nrt_unsec",
Alan Kwong112a84f2016-05-24 20:49:21 -0400323 .data = &msm_smmu_domains[MSM_SMMU_DOMAIN_NRT_UNSECURE] },
Abhijit Kulkarni1774dac2017-05-01 10:51:02 -0700324 { .compatible = "qcom,smmu_sde_nrt_sec",
Alan Kwong112a84f2016-05-24 20:49:21 -0400325 .data = &msm_smmu_domains[MSM_SMMU_DOMAIN_NRT_SECURE] },
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700326 {}
327};
328MODULE_DEVICE_TABLE(of, msm_smmu_dt_match);
329
330static struct device *msm_smmu_device_create(struct device *dev,
331 enum msm_mmu_domain_type domain,
332 struct msm_smmu *smmu)
333{
334 struct device_node *child;
335 struct platform_device *pdev;
336 int i;
337 const char *compat = NULL;
338
339 for (i = 0; i < ARRAY_SIZE(msm_smmu_dt_match); i++) {
340 if (msm_smmu_dt_match[i].data == &msm_smmu_domains[domain]) {
341 compat = msm_smmu_dt_match[i].compatible;
342 break;
343 }
344 }
345
346 if (!compat) {
Dhaval Patel5473cd22017-03-19 21:38:08 -0700347 DRM_DEBUG("unable to find matching domain for %d\n", domain);
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700348 return ERR_PTR(-ENOENT);
349 }
Dhaval Patel5473cd22017-03-19 21:38:08 -0700350 DRM_DEBUG("found domain %d compat: %s\n", domain, compat);
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700351
352 if (domain == MSM_SMMU_DOMAIN_UNSECURE) {
353 int rc;
354
Alan Kwong112a84f2016-05-24 20:49:21 -0400355 smmu->client = devm_kzalloc(dev,
356 sizeof(struct msm_smmu_client), GFP_KERNEL);
357 if (!smmu->client)
358 return ERR_PTR(-ENOMEM);
359
360 smmu->client->dev = dev;
361
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700362 rc = _msm_smmu_create_mapping(msm_smmu_to_client(smmu),
363 msm_smmu_dt_match[i].data);
Alan Kwong112a84f2016-05-24 20:49:21 -0400364 if (rc) {
365 devm_kfree(dev, smmu->client);
366 smmu->client = NULL;
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700367 return ERR_PTR(rc);
Alan Kwong112a84f2016-05-24 20:49:21 -0400368 }
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700369
370 return NULL;
371 }
372
373 child = of_find_compatible_node(dev->of_node, NULL, compat);
374 if (!child) {
Dhaval Patel5473cd22017-03-19 21:38:08 -0700375 DRM_DEBUG("unable to find compatible node for %s\n", compat);
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700376 return ERR_PTR(-ENODEV);
377 }
378
379 pdev = of_platform_device_create(child, NULL, dev);
380 if (!pdev) {
381 DRM_ERROR("unable to create smmu platform dev for domain %d\n",
382 domain);
383 return ERR_PTR(-ENODEV);
384 }
385
Alan Kwong112a84f2016-05-24 20:49:21 -0400386 smmu->client = platform_get_drvdata(pdev);
387
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700388 return &pdev->dev;
389}
390
391struct msm_mmu *msm_smmu_new(struct device *dev,
392 enum msm_mmu_domain_type domain)
393{
394 struct msm_smmu *smmu;
395 struct device *client_dev;
396
397 smmu = kzalloc(sizeof(*smmu), GFP_KERNEL);
398 if (!smmu)
399 return ERR_PTR(-ENOMEM);
400
401 client_dev = msm_smmu_device_create(dev, domain, smmu);
Alan Kwong112a84f2016-05-24 20:49:21 -0400402 if (IS_ERR(client_dev)) {
403 kfree(smmu);
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700404 return (void *)client_dev ? : ERR_PTR(-ENODEV);
Alan Kwong112a84f2016-05-24 20:49:21 -0400405 }
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700406
407 smmu->client_dev = client_dev;
408 msm_mmu_init(&smmu->base, dev, &funcs);
409
410 return &smmu->base;
411}
412
Alan Kwongc16e0922017-05-11 14:50:46 -0700413static int msm_smmu_fault_handler(struct iommu_domain *domain,
414 struct device *dev, unsigned long iova,
415 int flags, void *token)
416{
417 struct msm_smmu_client *client;
418 int rc = -EINVAL;
419
420 if (!token) {
421 DRM_ERROR("Error: token is NULL\n");
422 return -EINVAL;
423 }
424
425 client = (struct msm_smmu_client *)token;
426
427 /* see iommu.h for fault flags definition */
428 SDE_EVT32(iova, flags);
429 DRM_ERROR("trigger dump, iova=0x%08lx, flags=0x%x\n", iova, flags);
430 DRM_ERROR("SMMU device:%s", client->dev ? client->dev->kobj.name : "");
431
432 /* generate dump, but no panic */
433 SDE_DBG_DUMP("sde", "dsi0_ctrl", "dsi0_phy", "dsi1_ctrl",
434 "dsi1_phy", "vbif", "dbg_bus",
435 "vbif_dbg_bus");
436
437 /*
438 * return -ENOSYS to allow smmu driver to dump out useful
439 * debug info.
440 */
441 return rc;
442}
443
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700444static int _msm_smmu_create_mapping(struct msm_smmu_client *client,
445 const struct msm_smmu_domain *domain)
446{
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700447 int rc;
448
449 client->mmu_mapping = arm_iommu_create_mapping(&platform_bus_type,
450 domain->va_start, domain->va_size);
451 if (IS_ERR(client->mmu_mapping)) {
452 dev_err(client->dev,
453 "iommu create mapping failed for domain=%s\n",
454 domain->label);
455 return PTR_ERR(client->mmu_mapping);
456 }
457
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700458 if (domain->secure) {
459 int secure_vmid = VMID_CP_PIXEL;
460
461 rc = iommu_domain_set_attr(client->mmu_mapping->domain,
462 DOMAIN_ATTR_SECURE_VMID, &secure_vmid);
463 if (rc) {
464 dev_err(client->dev, "couldn't set secure pix vmid\n");
465 goto error;
466 }
467 }
468
Alan Kwongc16e0922017-05-11 14:50:46 -0700469 iommu_set_fault_handler(client->mmu_mapping->domain,
470 msm_smmu_fault_handler, (void *)client);
471
Alan Kwong112a84f2016-05-24 20:49:21 -0400472 DRM_INFO("Created domain %s [%zx,%zx] secure=%d\n",
473 domain->label, domain->va_start, domain->va_size,
474 domain->secure);
475
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700476 return 0;
477
478error:
479 arm_iommu_release_mapping(client->mmu_mapping);
480 return rc;
481}
482
483/**
484 * msm_smmu_probe()
485 * @pdev: platform device
486 *
487 * Each smmu context acts as a separate device and the context banks are
488 * configured with a VA range.
489 * Registers the clks as each context bank has its own clks, for which voting
490 * has to be done everytime before using that context bank.
491 */
492static int msm_smmu_probe(struct platform_device *pdev)
493{
494 const struct of_device_id *match;
495 struct msm_smmu_client *client;
496 const struct msm_smmu_domain *domain;
497 int rc;
498
499 match = of_match_device(msm_smmu_dt_match, &pdev->dev);
500 if (!match || !match->data) {
501 dev_err(&pdev->dev, "probe failed as match data is invalid\n");
502 return -EINVAL;
503 }
504
505 domain = match->data;
506 if (!domain) {
507 dev_err(&pdev->dev, "no matching device found\n");
508 return -EINVAL;
509 }
510
511 DRM_INFO("probing device %s\n", match->compatible);
512
513 client = devm_kzalloc(&pdev->dev, sizeof(*client), GFP_KERNEL);
514 if (!client)
515 return -ENOMEM;
516
517 client->dev = &pdev->dev;
518
519 rc = _msm_smmu_create_mapping(client, domain);
520 platform_set_drvdata(pdev, client);
521
522 return rc;
523}
524
525static int msm_smmu_remove(struct platform_device *pdev)
526{
527 struct msm_smmu_client *client;
528
529 client = platform_get_drvdata(pdev);
530 if (client->domain_attached) {
531 arm_iommu_detach_device(client->dev);
532 client->domain_attached = false;
533 }
534 arm_iommu_release_mapping(client->mmu_mapping);
535
536 return 0;
537}
538
539static struct platform_driver msm_smmu_driver = {
540 .probe = msm_smmu_probe,
541 .remove = msm_smmu_remove,
542 .driver = {
543 .name = "msmdrm_smmu",
544 .of_match_table = msm_smmu_dt_match,
545 },
546};
547
Abhijit Kulkarni1774dac2017-05-01 10:51:02 -0700548int __init msm_smmu_driver_init(void)
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700549{
550 int ret;
551
552 ret = platform_driver_register(&msm_smmu_driver);
553 if (ret)
554 pr_err("mdss_smmu_register_driver() failed!\n");
555
556 return ret;
557}
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700558
Abhijit Kulkarni1774dac2017-05-01 10:51:02 -0700559void __exit msm_smmu_driver_cleanup(void)
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700560{
561 platform_driver_unregister(&msm_smmu_driver);
562}
Adrian Salido-Moreno48ebb792015-10-02 15:54:46 -0700563
564MODULE_LICENSE("GPL v2");
565MODULE_DESCRIPTION("MSM SMMU driver");