Lloyd Atkinson | 7715873 | 2016-10-23 13:02:00 -0400 | [diff] [blame] | 1 | /* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. |
Alan Kwong | 5d324e4 | 2016-07-28 22:56:18 -0400 | [diff] [blame] | 2 | * |
| 3 | * This program is free software; you can redistribute it and/or modify |
| 4 | * it under the terms of the GNU General Public License version 2 and |
| 5 | * only version 2 as published by the Free Software Foundation. |
| 6 | * |
| 7 | * This program is distributed in the hope that it will be useful, |
| 8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 10 | * GNU General Public License for more details. |
| 11 | */ |
| 12 | |
| 13 | #include "sde_hwio.h" |
| 14 | #include "sde_hw_catalog.h" |
| 15 | #include "sde_hw_vbif.h" |
Lloyd Atkinson | 113aefd | 2016-10-23 13:15:18 -0400 | [diff] [blame] | 16 | #include "sde_dbg.h" |
Alan Kwong | 5d324e4 | 2016-07-28 22:56:18 -0400 | [diff] [blame] | 17 | |
| 18 | #define VBIF_VERSION 0x0000 |
| 19 | #define VBIF_CLK_FORCE_CTRL0 0x0008 |
| 20 | #define VBIF_CLK_FORCE_CTRL1 0x000C |
| 21 | #define VBIF_QOS_REMAP_00 0x0020 |
| 22 | #define VBIF_QOS_REMAP_01 0x0024 |
| 23 | #define VBIF_QOS_REMAP_10 0x0028 |
| 24 | #define VBIF_QOS_REMAP_11 0x002C |
| 25 | #define VBIF_WRITE_GATHTER_EN 0x00AC |
| 26 | #define VBIF_IN_RD_LIM_CONF0 0x00B0 |
| 27 | #define VBIF_IN_RD_LIM_CONF1 0x00B4 |
| 28 | #define VBIF_IN_RD_LIM_CONF2 0x00B8 |
| 29 | #define VBIF_IN_WR_LIM_CONF0 0x00C0 |
| 30 | #define VBIF_IN_WR_LIM_CONF1 0x00C4 |
| 31 | #define VBIF_IN_WR_LIM_CONF2 0x00C8 |
| 32 | #define VBIF_OUT_RD_LIM_CONF0 0x00D0 |
| 33 | #define VBIF_OUT_WR_LIM_CONF0 0x00D4 |
| 34 | #define VBIF_XIN_HALT_CTRL0 0x0200 |
| 35 | #define VBIF_XIN_HALT_CTRL1 0x0204 |
Alan Kwong | a62eeb8 | 2017-04-19 08:57:55 -0700 | [diff] [blame] | 36 | #define VBIF_XINL_QOS_RP_REMAP_000 0x0550 |
| 37 | #define VBIF_XINL_QOS_LVL_REMAP_000 0x0590 |
Alan Kwong | 5d324e4 | 2016-07-28 22:56:18 -0400 | [diff] [blame] | 38 | |
| 39 | static void sde_hw_set_limit_conf(struct sde_hw_vbif *vbif, |
| 40 | u32 xin_id, bool rd, u32 limit) |
| 41 | { |
| 42 | struct sde_hw_blk_reg_map *c = &vbif->hw; |
| 43 | u32 reg_val; |
| 44 | u32 reg_off; |
| 45 | u32 bit_off; |
| 46 | |
| 47 | if (rd) |
| 48 | reg_off = VBIF_IN_RD_LIM_CONF0; |
| 49 | else |
| 50 | reg_off = VBIF_IN_WR_LIM_CONF0; |
| 51 | |
| 52 | reg_off += (xin_id / 4) * 4; |
| 53 | bit_off = (xin_id % 4) * 8; |
| 54 | reg_val = SDE_REG_READ(c, reg_off); |
| 55 | reg_val &= ~(0xFF << bit_off); |
| 56 | reg_val |= (limit) << bit_off; |
| 57 | SDE_REG_WRITE(c, reg_off, reg_val); |
| 58 | } |
| 59 | |
| 60 | static u32 sde_hw_get_limit_conf(struct sde_hw_vbif *vbif, |
| 61 | u32 xin_id, bool rd) |
| 62 | { |
| 63 | struct sde_hw_blk_reg_map *c = &vbif->hw; |
| 64 | u32 reg_val; |
| 65 | u32 reg_off; |
| 66 | u32 bit_off; |
| 67 | u32 limit; |
| 68 | |
| 69 | if (rd) |
| 70 | reg_off = VBIF_IN_RD_LIM_CONF0; |
| 71 | else |
| 72 | reg_off = VBIF_IN_WR_LIM_CONF0; |
| 73 | |
| 74 | reg_off += (xin_id / 4) * 4; |
| 75 | bit_off = (xin_id % 4) * 8; |
| 76 | reg_val = SDE_REG_READ(c, reg_off); |
| 77 | limit = (reg_val >> bit_off) & 0xFF; |
| 78 | |
| 79 | return limit; |
| 80 | } |
| 81 | |
| 82 | static void sde_hw_set_halt_ctrl(struct sde_hw_vbif *vbif, |
| 83 | u32 xin_id, bool enable) |
| 84 | { |
| 85 | struct sde_hw_blk_reg_map *c = &vbif->hw; |
| 86 | u32 reg_val; |
| 87 | |
| 88 | reg_val = SDE_REG_READ(c, VBIF_XIN_HALT_CTRL0); |
| 89 | |
| 90 | if (enable) |
| 91 | reg_val |= BIT(xin_id); |
| 92 | else |
| 93 | reg_val &= ~BIT(xin_id); |
| 94 | |
| 95 | SDE_REG_WRITE(c, VBIF_XIN_HALT_CTRL0, reg_val); |
| 96 | } |
| 97 | |
| 98 | static bool sde_hw_get_halt_ctrl(struct sde_hw_vbif *vbif, |
| 99 | u32 xin_id) |
| 100 | { |
| 101 | struct sde_hw_blk_reg_map *c = &vbif->hw; |
| 102 | u32 reg_val; |
| 103 | |
| 104 | reg_val = SDE_REG_READ(c, VBIF_XIN_HALT_CTRL1); |
| 105 | |
| 106 | return (reg_val & BIT(xin_id)) ? true : false; |
| 107 | } |
| 108 | |
Alan Kwong | a62eeb8 | 2017-04-19 08:57:55 -0700 | [diff] [blame] | 109 | static void sde_hw_set_qos_remap(struct sde_hw_vbif *vbif, |
| 110 | u32 xin_id, u32 level, u32 remap_level) |
| 111 | { |
| 112 | struct sde_hw_blk_reg_map *c; |
| 113 | u32 reg_val, reg_val_lvl, mask, reg_high, reg_shift; |
| 114 | |
| 115 | if (!vbif) |
| 116 | return; |
| 117 | |
| 118 | c = &vbif->hw; |
| 119 | |
| 120 | reg_high = ((xin_id & 0x8) >> 3) * 4 + (level * 8); |
| 121 | reg_shift = (xin_id & 0x7) * 4; |
| 122 | |
| 123 | reg_val = SDE_REG_READ(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high); |
| 124 | reg_val_lvl = SDE_REG_READ(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high); |
| 125 | |
| 126 | mask = 0x7 << reg_shift; |
| 127 | |
| 128 | reg_val &= ~mask; |
| 129 | reg_val |= (remap_level << reg_shift) & mask; |
| 130 | |
| 131 | reg_val_lvl &= ~mask; |
| 132 | reg_val_lvl |= (remap_level << reg_shift) & mask; |
| 133 | |
| 134 | SDE_REG_WRITE(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high, reg_val); |
| 135 | SDE_REG_WRITE(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high, reg_val_lvl); |
| 136 | } |
| 137 | |
Alan Kwong | 5d324e4 | 2016-07-28 22:56:18 -0400 | [diff] [blame] | 138 | static void _setup_vbif_ops(struct sde_hw_vbif_ops *ops, |
| 139 | unsigned long cap) |
| 140 | { |
| 141 | ops->set_limit_conf = sde_hw_set_limit_conf; |
| 142 | ops->get_limit_conf = sde_hw_get_limit_conf; |
| 143 | ops->set_halt_ctrl = sde_hw_set_halt_ctrl; |
| 144 | ops->get_halt_ctrl = sde_hw_get_halt_ctrl; |
Alan Kwong | a62eeb8 | 2017-04-19 08:57:55 -0700 | [diff] [blame] | 145 | if (test_bit(SDE_VBIF_QOS_REMAP, &cap)) |
| 146 | ops->set_qos_remap = sde_hw_set_qos_remap; |
Alan Kwong | 5d324e4 | 2016-07-28 22:56:18 -0400 | [diff] [blame] | 147 | } |
| 148 | |
| 149 | static const struct sde_vbif_cfg *_top_offset(enum sde_vbif vbif, |
| 150 | const struct sde_mdss_cfg *m, |
| 151 | void __iomem *addr, |
| 152 | struct sde_hw_blk_reg_map *b) |
| 153 | { |
| 154 | int i; |
| 155 | |
| 156 | for (i = 0; i < m->vbif_count; i++) { |
| 157 | if (vbif == m->vbif[i].id) { |
| 158 | b->base_off = addr; |
| 159 | b->blk_off = m->vbif[i].base; |
Lloyd Atkinson | 7715873 | 2016-10-23 13:02:00 -0400 | [diff] [blame] | 160 | b->length = m->vbif[i].len; |
Alan Kwong | 5d324e4 | 2016-07-28 22:56:18 -0400 | [diff] [blame] | 161 | b->hwversion = m->hwversion; |
| 162 | b->log_mask = SDE_DBG_MASK_VBIF; |
| 163 | return &m->vbif[i]; |
| 164 | } |
| 165 | } |
| 166 | |
| 167 | return ERR_PTR(-EINVAL); |
| 168 | } |
| 169 | |
| 170 | struct sde_hw_vbif *sde_hw_vbif_init(enum sde_vbif idx, |
| 171 | void __iomem *addr, |
| 172 | const struct sde_mdss_cfg *m) |
| 173 | { |
| 174 | struct sde_hw_vbif *c; |
| 175 | const struct sde_vbif_cfg *cfg; |
| 176 | |
| 177 | c = kzalloc(sizeof(*c), GFP_KERNEL); |
| 178 | if (!c) |
| 179 | return ERR_PTR(-ENOMEM); |
| 180 | |
| 181 | cfg = _top_offset(idx, m, addr, &c->hw); |
| 182 | if (IS_ERR_OR_NULL(cfg)) { |
| 183 | kfree(c); |
| 184 | return ERR_PTR(-EINVAL); |
| 185 | } |
| 186 | |
| 187 | /* |
| 188 | * Assign ops |
| 189 | */ |
| 190 | c->idx = idx; |
| 191 | c->cap = cfg; |
| 192 | _setup_vbif_ops(&c->ops, c->cap->features); |
| 193 | |
Lloyd Atkinson | ac4b6e0 | 2017-03-23 11:43:48 -0700 | [diff] [blame] | 194 | /* no need to register sub-range in sde dbg, dump entire vbif io base */ |
Lloyd Atkinson | 113aefd | 2016-10-23 13:15:18 -0400 | [diff] [blame] | 195 | |
Alan Kwong | 5d324e4 | 2016-07-28 22:56:18 -0400 | [diff] [blame] | 196 | return c; |
| 197 | } |
| 198 | |
| 199 | void sde_hw_vbif_destroy(struct sde_hw_vbif *vbif) |
| 200 | { |
| 201 | kfree(vbif); |
| 202 | } |