blob: 7a44824b3542dcad5f793b6cebea44237f70f152 [file] [log] [blame]
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301/* Copyright (c) 2007-2018, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#define pr_fmt(fmt) "%s: " fmt, __func__
15
16#include <linux/clk.h>
17#include <linux/debugfs.h>
18#include <linux/kernel.h>
19#include <linux/iommu.h>
Sachin Bhayareeeb88892018-01-02 16:36:01 +053020#include <linux/of.h>
21#include <linux/of_address.h>
22#include <linux/clk/msm-clk.h>
Sachin Bhayare3d3767e2018-01-02 21:10:57 +053023#include <linux/module.h>
Sachin Bhayareeeb88892018-01-02 16:36:01 +053024#include <linux/dma-mapping.h>
25#include <linux/dma-buf.h>
26#include <linux/of_platform.h>
27#include <linux/msm_dma_iommu_mapping.h>
28
Sachin Bhayareeeb88892018-01-02 16:36:01 +053029#include <asm/dma-iommu.h>
30#include "soc/qcom/secure_buffer.h"
31
32#include "mdss.h"
33#include "mdss_mdp.h"
34#include "mdss_smmu.h"
35#include "mdss_debug.h"
36
37#define SZ_4G 0xF0000000
38
Sachin Bhayare3d3767e2018-01-02 21:10:57 +053039#ifdef CONFIG_QCOM_IOMMU
40#include <linux/qcom_iommu.h>
41static inline struct bus_type *mdss_mmu_get_bus(struct device *dev)
42{
43 return msm_iommu_get_bus(dev);
44}
45static inline struct device *mdss_mmu_get_ctx(const char *name)
46{
47 return msm_iommu_get_ctx(name);
48}
49#else
50static inline struct bus_type *mdss_mmu_get_bus(struct device *dev)
51{
52 return &platform_bus_type;
53}
54static inline struct device *mdss_mmu_get_ctx(const char *name)
55{
56 return ERR_PTR(-ENODEV);
57}
58#endif
59
Sachin Bhayareeeb88892018-01-02 16:36:01 +053060static DEFINE_MUTEX(mdp_iommu_lock);
61
62void mdss_iommu_lock(void)
63{
64 mutex_lock(&mdp_iommu_lock);
65}
66
67void mdss_iommu_unlock(void)
68{
69 mutex_unlock(&mdp_iommu_lock);
70}
71
72static int mdss_smmu_util_parse_dt_clock(struct platform_device *pdev,
Sachin Bhayare5076e252018-01-18 14:56:45 +053073 struct mdss_module_power *mp)
Sachin Bhayareeeb88892018-01-02 16:36:01 +053074{
75 u32 i = 0, rc = 0;
76 const char *clock_name;
77 u32 clock_rate;
78 int num_clk;
79
80 num_clk = of_property_count_strings(pdev->dev.of_node,
81 "clock-names");
82 if (num_clk <= 0) {
83 pr_err("clocks are not defined\n");
84 goto clk_err;
85 }
86
87 mp->num_clk = num_clk;
88 mp->clk_config = devm_kzalloc(&pdev->dev,
Sachin Bhayare5076e252018-01-18 14:56:45 +053089 sizeof(struct mdss_clk) * mp->num_clk, GFP_KERNEL);
Sachin Bhayareeeb88892018-01-02 16:36:01 +053090 if (!mp->clk_config) {
91 rc = -ENOMEM;
92 mp->num_clk = 0;
93 goto clk_err;
94 }
95
96 for (i = 0; i < mp->num_clk; i++) {
97 of_property_read_string_index(pdev->dev.of_node, "clock-names",
98 i, &clock_name);
99 strlcpy(mp->clk_config[i].clk_name, clock_name,
100 sizeof(mp->clk_config[i].clk_name));
101
102 of_property_read_u32_index(pdev->dev.of_node, "clock-rate",
103 i, &clock_rate);
104 mp->clk_config[i].rate = clock_rate;
105
106 if (!clock_rate)
107 mp->clk_config[i].type = DSS_CLK_AHB;
108 else
109 mp->clk_config[i].type = DSS_CLK_PCLK;
110 }
111
112clk_err:
113 return rc;
114}
115
116static int mdss_smmu_clk_register(struct platform_device *pdev,
Sachin Bhayare5076e252018-01-18 14:56:45 +0530117 struct mdss_module_power *mp)
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530118{
119 int i, ret;
120 struct clk *clk;
121
122 ret = mdss_smmu_util_parse_dt_clock(pdev, mp);
123 if (ret) {
124 pr_err("unable to parse clocks\n");
125 return -EINVAL;
126 }
127
128 for (i = 0; i < mp->num_clk; i++) {
129 clk = devm_clk_get(&pdev->dev,
130 mp->clk_config[i].clk_name);
131 if (IS_ERR(clk)) {
132 pr_err("unable to get clk: %s\n",
133 mp->clk_config[i].clk_name);
134 return PTR_ERR(clk);
135 }
136 mp->clk_config[i].clk = clk;
137 }
138 return 0;
139}
140
141static int mdss_smmu_enable_power(struct mdss_smmu_client *mdss_smmu,
142 bool enable)
143{
144 int rc = 0;
Sachin Bhayare5076e252018-01-18 14:56:45 +0530145 struct mdss_module_power *mp;
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530146
147 if (!mdss_smmu)
148 return -EINVAL;
149
150 mp = &mdss_smmu->mp;
151
152 if (!mp->num_vreg && !mp->num_clk)
153 return 0;
154
155 if (enable) {
Sachin Bhayare5076e252018-01-18 14:56:45 +0530156 rc = msm_mdss_enable_vreg(mp->vreg_config, mp->num_vreg, true);
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530157 if (rc) {
158 pr_err("vreg enable failed - rc:%d\n", rc);
159 goto end;
160 }
161 mdss_update_reg_bus_vote(mdss_smmu->reg_bus_clt,
162 VOTE_INDEX_LOW);
Sachin Bhayare5076e252018-01-18 14:56:45 +0530163 rc = msm_mdss_enable_clk(mp->clk_config, mp->num_clk, true);
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530164 if (rc) {
165 pr_err("clock enable failed - rc:%d\n", rc);
166 mdss_update_reg_bus_vote(mdss_smmu->reg_bus_clt,
167 VOTE_INDEX_DISABLE);
Sachin Bhayare5076e252018-01-18 14:56:45 +0530168 msm_mdss_enable_vreg(mp->vreg_config, mp->num_vreg,
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530169 false);
170 goto end;
171 }
172 } else {
Sachin Bhayare5076e252018-01-18 14:56:45 +0530173 msm_mdss_enable_clk(mp->clk_config, mp->num_clk, false);
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530174 mdss_update_reg_bus_vote(mdss_smmu->reg_bus_clt,
175 VOTE_INDEX_DISABLE);
Sachin Bhayare5076e252018-01-18 14:56:45 +0530176 msm_mdss_enable_vreg(mp->vreg_config, mp->num_vreg, false);
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530177 }
178end:
179 return rc;
180}
181
182/*
183 * mdss_smmu_v2_attach()
184 *
185 * Associates each configured VA range with the corresponding smmu context
186 * bank device. Enables the clks as smmu_v2 requires voting it before the usage.
187 * And iommu attach is done only once during the initial attach and it is never
188 * detached as smmu v2 uses a feature called 'retention'.
189 */
190static int mdss_smmu_attach_v2(struct mdss_data_type *mdata)
191{
192 struct mdss_smmu_client *mdss_smmu;
193 int i, rc = 0;
194
195 for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
196 if (!mdss_smmu_is_valid_domain_type(mdata, i))
197 continue;
198
199 mdss_smmu = mdss_smmu_get_cb(i);
200 if (mdss_smmu && mdss_smmu->dev) {
201 if (!mdss_smmu->handoff_pending) {
202 rc = mdss_smmu_enable_power(mdss_smmu, true);
203 if (rc) {
204 pr_err("power enable failed - domain:[%d] rc:%d\n",
205 i, rc);
206 goto err;
207 }
208 }
209 mdss_smmu->handoff_pending = false;
210
211 if (!mdss_smmu->domain_attached) {
212 rc = arm_iommu_attach_device(mdss_smmu->dev,
213 mdss_smmu->mmu_mapping);
214 if (rc) {
215 pr_err("iommu attach device failed for domain[%d] with err:%d\n",
216 i, rc);
217 mdss_smmu_enable_power(mdss_smmu,
218 false);
219 goto err;
220 }
221 mdss_smmu->domain_attached = true;
222 pr_debug("iommu v2 domain[%i] attached\n", i);
223 }
224 } else {
225 pr_err("iommu device not attached for domain[%d]\n", i);
226 return -ENODEV;
227 }
228 }
229
230 return 0;
231
232err:
233 for (i--; i >= 0; i--) {
234 mdss_smmu = mdss_smmu_get_cb(i);
235 if (mdss_smmu && mdss_smmu->dev) {
236 arm_iommu_detach_device(mdss_smmu->dev);
237 mdss_smmu_enable_power(mdss_smmu, false);
238 mdss_smmu->domain_attached = false;
239 }
240 }
241
242 return rc;
243}
244
245/*
246 * mdss_smmu_v2_detach()
247 *
248 * Only disables the clks as it is not required to detach the iommu mapped
249 * VA range from the device in smmu_v2 as explained in the mdss_smmu_v2_attach
250 */
251static int mdss_smmu_detach_v2(struct mdss_data_type *mdata)
252{
253 struct mdss_smmu_client *mdss_smmu;
254 int i;
255
256 for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
257 if (!mdss_smmu_is_valid_domain_type(mdata, i))
258 continue;
259
260 mdss_smmu = mdss_smmu_get_cb(i);
261 if (mdss_smmu && mdss_smmu->dev && !mdss_smmu->handoff_pending)
262 mdss_smmu_enable_power(mdss_smmu, false);
263 }
264
265 return 0;
266}
267
268static int mdss_smmu_get_domain_id_v2(u32 type)
269{
270 return type;
271}
272
273/*
274 * mdss_smmu_dma_buf_attach_v2()
275 *
276 * Same as mdss_smmu_dma_buf_attach except that the device is got from
277 * the configured smmu v2 context banks.
278 */
279static struct dma_buf_attachment *mdss_smmu_dma_buf_attach_v2(
280 struct dma_buf *dma_buf, struct device *dev, int domain)
281{
282 struct mdss_smmu_client *mdss_smmu = mdss_smmu_get_cb(domain);
283
284 if (!mdss_smmu) {
285 pr_err("not able to get smmu context\n");
286 return NULL;
287 }
288
289 return dma_buf_attach(dma_buf, mdss_smmu->dev);
290}
291
292/*
293 * mdss_smmu_map_dma_buf_v2()
294 *
295 * Maps existing buffer (by struct scatterlist) into SMMU context bank device.
296 * From which we can take the virtual address and size allocated.
297 * msm_map_dma_buf is depricated with smmu v2 and it uses dma_map_sg instead
298 */
299static int mdss_smmu_map_dma_buf_v2(struct dma_buf *dma_buf,
300 struct sg_table *table, int domain, dma_addr_t *iova,
301 unsigned long *size, int dir)
302{
303 int rc;
304 struct mdss_smmu_client *mdss_smmu = mdss_smmu_get_cb(domain);
305
306 if (!mdss_smmu) {
307 pr_err("not able to get smmu context\n");
308 return -EINVAL;
309 }
310 ATRACE_BEGIN("map_buffer");
311 rc = msm_dma_map_sg_lazy(mdss_smmu->dev, table->sgl, table->nents, dir,
312 dma_buf);
313 if (rc != table->nents) {
314 pr_err("dma map sg failed\n");
315 return -ENOMEM;
316 }
317 ATRACE_END("map_buffer");
318 *iova = table->sgl->dma_address;
319 *size = table->sgl->dma_length;
320 return 0;
321}
322
323static void mdss_smmu_unmap_dma_buf_v2(struct sg_table *table, int domain,
324 int dir, struct dma_buf *dma_buf)
325{
326 struct mdss_smmu_client *mdss_smmu = mdss_smmu_get_cb(domain);
327
328 if (!mdss_smmu) {
329 pr_err("not able to get smmu context\n");
330 return;
331 }
332
333 ATRACE_BEGIN("unmap_buffer");
334 msm_dma_unmap_sg(mdss_smmu->dev, table->sgl, table->nents, dir,
335 dma_buf);
336 ATRACE_END("unmap_buffer");
337}
338
339/*
340 * mdss_smmu_dma_alloc_coherent_v2()
341 *
342 * Allocates buffer same as mdss_smmu_dma_alloc_coherent_v1, but in addition it
343 * also maps to the SMMU domain with the help of the respective SMMU context
344 * bank device
345 */
346static int mdss_smmu_dma_alloc_coherent_v2(struct device *dev, size_t size,
347 dma_addr_t *phys, dma_addr_t *iova, void **cpu_addr,
348 gfp_t gfp, int domain)
349{
350 struct mdss_smmu_client *mdss_smmu = mdss_smmu_get_cb(domain);
351
352 if (!mdss_smmu) {
353 pr_err("not able to get smmu context\n");
354 return -EINVAL;
355 }
356
357 *cpu_addr = dma_alloc_coherent(mdss_smmu->dev, size, iova, gfp);
358 if (!*cpu_addr) {
359 pr_err("dma alloc coherent failed!\n");
360 return -ENOMEM;
361 }
362 *phys = iommu_iova_to_phys(mdss_smmu->mmu_mapping->domain,
363 *iova);
364 return 0;
365}
366
367static void mdss_smmu_dma_free_coherent_v2(struct device *dev, size_t size,
368 void *cpu_addr, dma_addr_t phys, dma_addr_t iova, int domain)
369{
370 struct mdss_smmu_client *mdss_smmu = mdss_smmu_get_cb(domain);
371
372 if (!mdss_smmu) {
373 pr_err("not able to get smmu context\n");
374 return;
375 }
376
377 dma_free_coherent(mdss_smmu->dev, size, cpu_addr, iova);
378}
379
380/*
381 * mdss_smmu_map_v1()
382 *
383 * Same as mdss_smmu_map_v1, just that it maps to the appropriate domain
384 * referred by the smmu context bank handles.
385 */
386static int mdss_smmu_map_v2(int domain, phys_addr_t iova, phys_addr_t phys,
387 int gfp_order, int prot)
388{
389 struct mdss_smmu_client *mdss_smmu = mdss_smmu_get_cb(domain);
390
391 if (!mdss_smmu) {
392 pr_err("not able to get smmu context\n");
393 return -EINVAL;
394 }
395
396 return iommu_map(mdss_smmu->mmu_mapping->domain,
397 iova, phys, gfp_order, prot);
398}
399
400static void mdss_smmu_unmap_v2(int domain, unsigned long iova, int gfp_order)
401{
402 struct mdss_smmu_client *mdss_smmu = mdss_smmu_get_cb(domain);
403
404 if (!mdss_smmu) {
405 pr_err("not able to get smmu context\n");
406 return;
407 }
408
409 iommu_unmap(mdss_smmu->mmu_mapping->domain, iova, gfp_order);
410}
411
412/*
413 * mdss_smmUdsi_alloc_buf_v2()
414 *
415 * Allocates the buffer and mapping is done later
416 */
417static char *mdss_smmu_dsi_alloc_buf_v2(struct device *dev, int size,
418 dma_addr_t *dmap, gfp_t gfp)
419{
420 char *data;
421
422 data = kzalloc(size, GFP_KERNEL | GFP_DMA);
423 if (data)
424 *dmap = (dma_addr_t) virt_to_phys(data);
425
426 return data;
427}
428
429/*
430 * mdss_smmu_dsi_map_buffer_v2()
431 *
432 * Maps the buffer allocated in mdss_smmu_dsi_alloc_buffer_v2 with the SMMU
433 * domain and uses dma_map_single as msm_iommu_map_contig_buffer is depricated
434 * in smmu v2.
435 */
436static int mdss_smmu_dsi_map_buffer_v2(phys_addr_t phys, unsigned int domain,
437 unsigned long size, dma_addr_t *dma_addr, void *cpu_addr,
438 int dir)
439{
440 struct mdss_smmu_client *mdss_smmu = mdss_smmu_get_cb(domain);
441
442 if (!mdss_smmu) {
443 pr_err("not able to get smmu context\n");
444 return -EINVAL;
445 }
446
447 *dma_addr = dma_map_single(mdss_smmu->dev, cpu_addr, size, dir);
448 if (IS_ERR_VALUE(*dma_addr)) {
449 pr_err("dma map single failed\n");
450 return -ENOMEM;
451 }
452 return 0;
453}
454
455static void mdss_smmu_dsi_unmap_buffer_v2(dma_addr_t dma_addr, int domain,
456 unsigned long size, int dir)
457{
458 struct mdss_smmu_client *mdss_smmu = mdss_smmu_get_cb(domain);
459
460 if (!mdss_smmu) {
461 pr_err("not able to get smmu context\n");
462 return;
463 }
464
465 if (is_mdss_iommu_attached())
466 dma_unmap_single(mdss_smmu->dev, dma_addr, size, dir);
467}
468
469int mdss_smmu_fault_handler(struct iommu_domain *domain, struct device *dev,
470 unsigned long iova, int flags, void *user_data)
471{
472 struct mdss_smmu_client *mdss_smmu =
473 (struct mdss_smmu_client *)user_data;
474 u32 fsynr1, mid, i;
475
476 if (!mdss_smmu || !mdss_smmu->mmu_base)
477 goto end;
478
479 fsynr1 = readl_relaxed(mdss_smmu->mmu_base + SMMU_CBN_FSYNR1);
480 mid = fsynr1 & 0xff;
481 pr_err("mdss_smmu: iova:0x%lx flags:0x%x fsynr1: 0x%x mid: 0x%x\n",
482 iova, flags, fsynr1, mid);
483
484 /* get domain id information */
485 for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
486 if (mdss_smmu == mdss_smmu_get_cb(i))
487 break;
488 }
489
490 if (i == MDSS_IOMMU_MAX_DOMAIN)
491 goto end;
492
493 mdss_mdp_debug_mid(mid);
494end:
495 return -ENODEV;
496}
497
498static void mdss_smmu_deinit_v2(struct mdss_data_type *mdata)
499{
500 int i;
501 struct mdss_smmu_client *mdss_smmu;
502
503 for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
504 mdss_smmu = mdss_smmu_get_cb(i);
505 if (mdss_smmu && mdss_smmu->dev)
506 arm_iommu_release_mapping(mdss_smmu->mmu_mapping);
507 }
508}
509
510/*
511 * sg_clone - Duplicate an existing chained sgl
512 * @orig_sgl: Original sg list to be duplicated
513 * @len: Total length of sg while taking chaining into account
514 * @gfp_mask: GFP allocation mask
515 * @padding: specifies if padding is required
516 *
517 * Description:
518 * Clone a chained sgl. This cloned copy may be modified in some ways while
519 * keeping the original sgl in tact. Also allow the cloned copy to have
520 * a smaller length than the original which may reduce the sgl total
521 * sg entries and also allows cloned copy to have one extra sg entry on
522 * either sides of sgl.
523 *
524 * Returns:
525 * Pointer to new kmalloced sg list, ERR_PTR() on error
526 *
527 */
528static struct scatterlist *sg_clone(struct scatterlist *orig_sgl, u64 len,
529 gfp_t gfp_mask, bool padding)
530{
531 int nents;
532 bool last_entry;
533 struct scatterlist *sgl, *head;
534
535 nents = sg_nents(orig_sgl);
536 if (nents < 0)
537 return ERR_PTR(-EINVAL);
538 if (padding)
539 nents += 2;
540
541 head = kmalloc_array(nents, sizeof(struct scatterlist), gfp_mask);
542 if (!head)
543 return ERR_PTR(-ENOMEM);
544
545 sgl = head;
546
547 sg_init_table(sgl, nents);
548
549 if (padding) {
550 *sgl = *orig_sgl;
551 if (sg_is_chain(orig_sgl)) {
552 orig_sgl = sg_next(orig_sgl);
553 *sgl = *orig_sgl;
554 }
555 sgl->page_link &= (unsigned long)(~0x03);
556 sgl = sg_next(sgl);
557 }
558
559 for (; sgl; orig_sgl = sg_next(orig_sgl), sgl = sg_next(sgl)) {
560
561 last_entry = sg_is_last(sgl);
562
563 /*
564 * * If page_link is pointing to a chained sgl then set
565 * the sg entry in the cloned list to the next sg entry
566 * in the original sg list as chaining is already taken
567 * care.
568 */
569
570 if (sg_is_chain(orig_sgl))
571 orig_sgl = sg_next(orig_sgl);
572
573 if (padding)
574 last_entry = sg_is_last(orig_sgl);
575
576 *sgl = *orig_sgl;
577 sgl->page_link &= (unsigned long)(~0x03);
578
579 if (last_entry) {
580 if (padding) {
581 len -= sg_dma_len(sgl);
582 sgl = sg_next(sgl);
583 *sgl = *orig_sgl;
584 }
585 sg_dma_len(sgl) = len ? len : SZ_4K;
586 /* Set bit 1 to indicate end of sgl */
587 sgl->page_link |= 0x02;
588 } else {
589 len -= sg_dma_len(sgl);
590 }
591 }
592
593 return head;
594}
595
596/*
597 * sg_table_clone - Duplicate an existing sg_table including chained sgl
598 * @orig_table: Original sg_table to be duplicated
599 * @len: Total length of sg while taking chaining into account
600 * @gfp_mask: GFP allocation mask
601 * @padding: specifies if padding is required
602 *
603 * Description:
604 * Clone a sg_table along with chained sgl. This cloned copy may be
605 * modified in some ways while keeping the original table and sgl in tact.
606 * Also allow the cloned sgl copy to have a smaller length than the original
607 * which may reduce the sgl total sg entries.
608 *
609 * Returns:
610 * Pointer to new kmalloced sg_table, ERR_PTR() on error
611 *
612 */
613static struct sg_table *sg_table_clone(struct sg_table *orig_table,
614 gfp_t gfp_mask, bool padding)
615{
616 struct sg_table *table;
617 struct scatterlist *sg = orig_table->sgl;
618 u64 len = 0;
619
620 for (len = 0; sg; sg = sg_next(sg))
621 len += sg->length;
622
623 table = kmalloc(sizeof(struct sg_table), gfp_mask);
624 if (!table)
625 return ERR_PTR(-ENOMEM);
626
627 table->sgl = sg_clone(orig_table->sgl, len, gfp_mask, padding);
628 if (IS_ERR(table->sgl)) {
629 kfree(table);
630 return ERR_PTR(-ENOMEM);
631 }
632
633 table->nents = table->orig_nents = sg_nents(table->sgl);
634
635 return table;
636}
637
638static void mdss_smmu_ops_init(struct mdss_data_type *mdata)
639{
640 mdata->smmu_ops.smmu_attach = mdss_smmu_attach_v2;
641 mdata->smmu_ops.smmu_detach = mdss_smmu_detach_v2;
642 mdata->smmu_ops.smmu_get_domain_id = mdss_smmu_get_domain_id_v2;
643 mdata->smmu_ops.smmu_dma_buf_attach =
644 mdss_smmu_dma_buf_attach_v2;
645 mdata->smmu_ops.smmu_map_dma_buf = mdss_smmu_map_dma_buf_v2;
646 mdata->smmu_ops.smmu_unmap_dma_buf = mdss_smmu_unmap_dma_buf_v2;
647 mdata->smmu_ops.smmu_dma_alloc_coherent =
648 mdss_smmu_dma_alloc_coherent_v2;
649 mdata->smmu_ops.smmu_dma_free_coherent =
650 mdss_smmu_dma_free_coherent_v2;
651 mdata->smmu_ops.smmu_map = mdss_smmu_map_v2;
652 mdata->smmu_ops.smmu_unmap = mdss_smmu_unmap_v2;
653 mdata->smmu_ops.smmu_dsi_alloc_buf = mdss_smmu_dsi_alloc_buf_v2;
654 mdata->smmu_ops.smmu_dsi_map_buffer =
655 mdss_smmu_dsi_map_buffer_v2;
656 mdata->smmu_ops.smmu_dsi_unmap_buffer =
657 mdss_smmu_dsi_unmap_buffer_v2;
658 mdata->smmu_ops.smmu_deinit = mdss_smmu_deinit_v2;
659 mdata->smmu_ops.smmu_sg_table_clone = sg_table_clone;
660}
661
662/*
663 * mdss_smmu_device_create()
664 * @dev: mdss_mdp device
665 *
666 * For smmu_v2, each context bank is a separate child device of mdss_mdp.
667 * Platform devices are created for those smmu related child devices of
668 * mdss_mdp here. This would facilitate probes to happen for these devices in
669 * which the smmu mapping and initialization is handled.
670 */
671void mdss_smmu_device_create(struct device *dev)
672{
673 struct device_node *parent, *child;
674
675 parent = dev->of_node;
676 for_each_child_of_node(parent, child) {
Sachin Bhayare3d3767e2018-01-02 21:10:57 +0530677 char name[MDSS_SMMU_COMPAT_STR_LEN] = {};
678
679 strlcpy(name, child->name, sizeof(name));
680 if (is_mdss_smmu_compatible_device(name))
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530681 of_platform_device_create(child, NULL, dev);
682 }
683}
684
685int mdss_smmu_init(struct mdss_data_type *mdata, struct device *dev)
686{
687 mdss_smmu_device_create(dev);
688 mdss_smmu_ops_init(mdata);
689 mdata->mdss_util->iommu_lock = mdss_iommu_lock;
690 mdata->mdss_util->iommu_unlock = mdss_iommu_unlock;
691 return 0;
692}
693
694static struct mdss_smmu_domain mdss_mdp_unsec = {
695 "mdp_0", MDSS_IOMMU_DOMAIN_UNSECURE, SZ_1M, (SZ_4G - SZ_1M)};
696static struct mdss_smmu_domain mdss_rot_unsec = {
697 NULL, MDSS_IOMMU_DOMAIN_ROT_UNSECURE, SZ_1M, (SZ_4G - SZ_1M)};
698static struct mdss_smmu_domain mdss_mdp_sec = {
699 "mdp_1", MDSS_IOMMU_DOMAIN_SECURE, SZ_1M, (SZ_4G - SZ_1M)};
700static struct mdss_smmu_domain mdss_rot_sec = {
701 NULL, MDSS_IOMMU_DOMAIN_ROT_SECURE, SZ_1M, (SZ_4G - SZ_1M)};
702
703static const struct of_device_id mdss_smmu_dt_match[] = {
704 { .compatible = "qcom,smmu_mdp_unsec", .data = &mdss_mdp_unsec},
705 { .compatible = "qcom,smmu_rot_unsec", .data = &mdss_rot_unsec},
706 { .compatible = "qcom,smmu_mdp_sec", .data = &mdss_mdp_sec},
707 { .compatible = "qcom,smmu_rot_sec", .data = &mdss_rot_sec},
708 {}
709};
710MODULE_DEVICE_TABLE(of, mdss_smmu_dt_match);
711
712/*
713 * mdss_smmu_probe()
714 * @pdev: platform device
715 *
716 * Each smmu context acts as a separate device and the context banks are
717 * configured with a VA range.
718 * Registeres the clks as each context bank has its own clks, for which voting
719 * has to be done everytime before using that context bank.
720 */
721int mdss_smmu_probe(struct platform_device *pdev)
722{
723 struct device *dev;
724 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
725 struct mdss_smmu_client *mdss_smmu;
726 int rc = 0;
727 struct mdss_smmu_domain smmu_domain;
728 const struct of_device_id *match;
Sachin Bhayare5076e252018-01-18 14:56:45 +0530729 struct mdss_module_power *mp;
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530730 char name[MAX_CLIENT_NAME_LEN];
731 const __be32 *address = NULL, *size = NULL;
732
733 if (!mdata) {
734 pr_err("probe failed as mdata is not initialized\n");
735 return -EPROBE_DEFER;
736 }
737
738 match = of_match_device(mdss_smmu_dt_match, &pdev->dev);
739 if (!match || !match->data) {
740 pr_err("probe failed as match data is invalid\n");
741 return -EINVAL;
742 }
743
744 smmu_domain = *(struct mdss_smmu_domain *) (match->data);
745 if (smmu_domain.domain >= MDSS_IOMMU_MAX_DOMAIN) {
746 pr_err("no matching device found\n");
747 return -EINVAL;
748 }
749
750 if (of_find_property(pdev->dev.of_node, "iommus", NULL)) {
751 dev = &pdev->dev;
752 } else {
753 /*
754 * For old iommu driver we query the context bank device
755 * rather than getting it from dt.
756 */
Sachin Bhayare3d3767e2018-01-02 21:10:57 +0530757 dev = mdss_mmu_get_ctx(smmu_domain.ctx_name);
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530758 if (!dev) {
759 pr_err("Invalid SMMU ctx for domain:%d\n",
760 smmu_domain.domain);
761 return -EINVAL;
762 }
763 }
764
765 mdss_smmu = &mdata->mdss_smmu[smmu_domain.domain];
766 mp = &mdss_smmu->mp;
Sachin Bhayare5076e252018-01-18 14:56:45 +0530767 memset(mp, 0, sizeof(struct mdss_module_power));
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530768
769 if (of_find_property(pdev->dev.of_node,
770 "gdsc-mmagic-mdss-supply", NULL)) {
771
772 mp->vreg_config = devm_kzalloc(&pdev->dev,
Sachin Bhayare5076e252018-01-18 14:56:45 +0530773 sizeof(struct mdss_vreg), GFP_KERNEL);
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530774 if (!mp->vreg_config)
775 return -ENOMEM;
776
777 strlcpy(mp->vreg_config->vreg_name, "gdsc-mmagic-mdss",
778 sizeof(mp->vreg_config->vreg_name));
779 mp->num_vreg = 1;
780 }
781
Sachin Bhayare5076e252018-01-18 14:56:45 +0530782 rc = msm_mdss_config_vreg(&pdev->dev, mp->vreg_config,
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530783 mp->num_vreg, true);
784 if (rc) {
785 pr_err("vreg config failed rc=%d\n", rc);
786 return rc;
787 }
788
789 rc = mdss_smmu_clk_register(pdev, mp);
790 if (rc) {
791 pr_err("smmu clk register failed for domain[%d] with err:%d\n",
792 smmu_domain.domain, rc);
Sachin Bhayare5076e252018-01-18 14:56:45 +0530793 msm_mdss_config_vreg(&pdev->dev, mp->vreg_config, mp->num_vreg,
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530794 false);
795 return rc;
796 }
797
798 snprintf(name, MAX_CLIENT_NAME_LEN, "smmu:%u", smmu_domain.domain);
799 mdss_smmu->reg_bus_clt = mdss_reg_bus_vote_client_create(name);
800 if (IS_ERR(mdss_smmu->reg_bus_clt)) {
801 pr_err("mdss bus client register failed\n");
Sachin Bhayare5076e252018-01-18 14:56:45 +0530802 msm_mdss_config_vreg(&pdev->dev, mp->vreg_config, mp->num_vreg,
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530803 false);
804 return PTR_ERR(mdss_smmu->reg_bus_clt);
805 }
806
807 rc = mdss_smmu_enable_power(mdss_smmu, true);
808 if (rc) {
809 pr_err("power enable failed - domain:[%d] rc:%d\n",
810 smmu_domain.domain, rc);
811 goto bus_client_destroy;
812 }
813
814 mdss_smmu->mmu_mapping = arm_iommu_create_mapping(
Sachin Bhayare3d3767e2018-01-02 21:10:57 +0530815 mdss_mmu_get_bus(dev), smmu_domain.start, smmu_domain.size);
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530816 if (IS_ERR(mdss_smmu->mmu_mapping)) {
817 pr_err("iommu create mapping failed for domain[%d]\n",
818 smmu_domain.domain);
819 rc = PTR_ERR(mdss_smmu->mmu_mapping);
820 goto disable_power;
821 }
822
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530823 if (smmu_domain.domain == MDSS_IOMMU_DOMAIN_SECURE ||
824 smmu_domain.domain == MDSS_IOMMU_DOMAIN_ROT_SECURE) {
825 int secure_vmid = VMID_CP_PIXEL;
826
827 rc = iommu_domain_set_attr(mdss_smmu->mmu_mapping->domain,
828 DOMAIN_ATTR_SECURE_VMID, &secure_vmid);
829 if (rc) {
830 pr_err("couldn't set secure pixel vmid\n");
831 goto release_mapping;
832 }
833 }
834
835 if (!mdata->handoff_pending)
836 mdss_smmu_enable_power(mdss_smmu, false);
837 else
838 mdss_smmu->handoff_pending = true;
839
840 mdss_smmu->dev = dev;
841
842 address = of_get_address_by_name(pdev->dev.of_node, "mmu_cb", 0, 0);
843 if (address) {
844 size = address + 1;
845 mdss_smmu->mmu_base = ioremap(be32_to_cpu(*address),
846 be32_to_cpu(*size));
847 if (mdss_smmu->mmu_base)
848 iommu_set_fault_handler(mdss_smmu->mmu_mapping->domain,
849 mdss_smmu_fault_handler, mdss_smmu);
850 } else {
851 pr_debug("unable to map context bank base\n");
852 }
853
854 pr_info("iommu v2 domain[%d] mapping and clk register successful!\n",
855 smmu_domain.domain);
856 return 0;
857
858release_mapping:
859 arm_iommu_release_mapping(mdss_smmu->mmu_mapping);
860disable_power:
861 mdss_smmu_enable_power(mdss_smmu, false);
862bus_client_destroy:
863 mdss_reg_bus_vote_client_destroy(mdss_smmu->reg_bus_clt);
864 mdss_smmu->reg_bus_clt = NULL;
Sachin Bhayare5076e252018-01-18 14:56:45 +0530865 msm_mdss_config_vreg(&pdev->dev, mp->vreg_config, mp->num_vreg,
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530866 false);
867 return rc;
868}
869
870int mdss_smmu_remove(struct platform_device *pdev)
871{
872 int i;
873 struct mdss_smmu_client *mdss_smmu;
874
875 for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
876 mdss_smmu = mdss_smmu_get_cb(i);
877 if (mdss_smmu && mdss_smmu->dev &&
878 (mdss_smmu->dev == &pdev->dev))
879 arm_iommu_release_mapping(mdss_smmu->mmu_mapping);
880 }
881 return 0;
882}
883
884static struct platform_driver mdss_smmu_driver = {
885 .probe = mdss_smmu_probe,
886 .remove = mdss_smmu_remove,
887 .shutdown = NULL,
888 .driver = {
889 .name = "mdss_smmu",
890 .of_match_table = mdss_smmu_dt_match,
891 },
892};
893
894static int mdss_smmu_register_driver(void)
895{
896 return platform_driver_register(&mdss_smmu_driver);
897}
898
899static int __init mdss_smmu_driver_init(void)
900{
901 int ret;
902
903 ret = mdss_smmu_register_driver();
904 if (ret)
905 pr_err("mdss_smmu_register_driver() failed!\n");
906
907 return ret;
908}
909module_init(mdss_smmu_driver_init);
910
911static void __exit mdss_smmu_driver_cleanup(void)
912{
913 platform_driver_unregister(&mdss_smmu_driver);
914}
915module_exit(mdss_smmu_driver_cleanup);
916
917MODULE_LICENSE("GPL v2");
918MODULE_DESCRIPTION("MDSS SMMU driver");