Maria Yu | 961074d | 2018-01-16 11:17:58 +0800 | [diff] [blame] | 1 | /* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. |
David Dai | 87584a4 | 2016-09-01 17:13:35 -0700 | [diff] [blame] | 2 | * |
| 3 | * This program is free software; you can redistribute it and/or modify |
| 4 | * it under the terms of the GNU General Public License version 2 and |
| 5 | * only version 2 as published by the Free Software Foundation. |
| 6 | * |
| 7 | * This program is distributed in the hope that it will be useful, |
| 8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 10 | * GNU General Public License for more details. |
| 11 | */ |
| 12 | |
| 13 | #define pr_fmt(fmt) "AXI: BIMC: %s(): " fmt, __func__ |
| 14 | |
| 15 | #include <linux/slab.h> |
| 16 | #include <linux/io.h> |
| 17 | #include <linux/msm-bus-board.h> |
| 18 | #include "msm_bus_core.h" |
| 19 | #include "msm_bus_bimc.h" |
| 20 | #include "msm_bus_adhoc.h" |
| 21 | #include <trace/events/trace_msm_bus.h> |
| 22 | |
| 23 | /* M_Generic */ |
| 24 | |
| 25 | enum bke_sw { |
| 26 | BKE_OFF = 0, |
| 27 | BKE_ON = 1, |
| 28 | }; |
| 29 | |
| 30 | #define M_REG_BASE(b) ((b) + 0x00008000) |
| 31 | |
| 32 | #define M_MODE_ADDR(b, n) \ |
| 33 | (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000210) |
| 34 | enum bimc_m_mode { |
| 35 | M_MODE_RMSK = 0xf0000011, |
| 36 | M_MODE_WR_GATHER_BEATS_BMSK = 0xf0000000, |
| 37 | M_MODE_WR_GATHER_BEATS_SHFT = 0x1c, |
| 38 | M_MODE_NARROW_WR_BMSK = 0x10, |
| 39 | M_MODE_NARROW_WR_SHFT = 0x4, |
| 40 | M_MODE_ORDERING_MODEL_BMSK = 0x1, |
| 41 | M_MODE_ORDERING_MODEL_SHFT = 0x0, |
| 42 | }; |
| 43 | |
| 44 | #define M_PRIOLVL_OVERRIDE_ADDR(b, n) \ |
| 45 | (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000230) |
| 46 | enum bimc_m_priolvl_override { |
| 47 | M_PRIOLVL_OVERRIDE_RMSK = 0x301, |
| 48 | M_PRIOLVL_OVERRIDE_BMSK = 0x300, |
| 49 | M_PRIOLVL_OVERRIDE_SHFT = 0x8, |
| 50 | M_PRIOLVL_OVERRIDE_OVERRIDE_PRIOLVL_BMSK = 0x1, |
| 51 | M_PRIOLVL_OVERRIDE_OVERRIDE_PRIOLVL_SHFT = 0x0, |
| 52 | }; |
| 53 | |
| 54 | #define M_RD_CMD_OVERRIDE_ADDR(b, n) \ |
| 55 | (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000240) |
| 56 | enum bimc_m_read_command_override { |
| 57 | M_RD_CMD_OVERRIDE_RMSK = 0x3071f7f, |
| 58 | M_RD_CMD_OVERRIDE_AREQPRIO_BMSK = 0x3000000, |
| 59 | M_RD_CMD_OVERRIDE_AREQPRIO_SHFT = 0x18, |
| 60 | M_RD_CMD_OVERRIDE_AMEMTYPE_BMSK = 0x70000, |
| 61 | M_RD_CMD_OVERRIDE_AMEMTYPE_SHFT = 0x10, |
| 62 | M_RD_CMD_OVERRIDE_ATRANSIENT_BMSK = 0x1000, |
| 63 | M_RD_CMD_OVERRIDE_ATRANSIENT_SHFT = 0xc, |
| 64 | M_RD_CMD_OVERRIDE_ASHARED_BMSK = 0x800, |
| 65 | M_RD_CMD_OVERRIDE_ASHARED_SHFT = 0xb, |
| 66 | M_RD_CMD_OVERRIDE_AREDIRECT_BMSK = 0x400, |
| 67 | M_RD_CMD_OVERRIDE_AREDIRECT_SHFT = 0xa, |
| 68 | M_RD_CMD_OVERRIDE_AOOO_BMSK = 0x200, |
| 69 | M_RD_CMD_OVERRIDE_AOOO_SHFT = 0x9, |
| 70 | M_RD_CMD_OVERRIDE_AINNERSHARED_BMSK = 0x100, |
| 71 | M_RD_CMD_OVERRIDE_AINNERSHARED_SHFT = 0x8, |
| 72 | M_RD_CMD_OVERRIDE_OVERRIDE_AREQPRIO_BMSK = 0x40, |
| 73 | M_RD_CMD_OVERRIDE_OVERRIDE_AREQPRIO_SHFT = 0x6, |
| 74 | M_RD_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_BMSK = 0x20, |
| 75 | M_RD_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_SHFT = 0x5, |
| 76 | M_RD_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_BMSK = 0x10, |
| 77 | M_RD_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_SHFT = 0x4, |
| 78 | M_RD_CMD_OVERRIDE_OVERRIDE_ASHARED_BMSK = 0x8, |
| 79 | M_RD_CMD_OVERRIDE_OVERRIDE_ASHARED_SHFT = 0x3, |
| 80 | M_RD_CMD_OVERRIDE_OVERRIDE_AREDIRECT_BMSK = 0x4, |
| 81 | M_RD_CMD_OVERRIDE_OVERRIDE_AREDIRECT_SHFT = 0x2, |
| 82 | M_RD_CMD_OVERRIDE_OVERRIDE_AOOO_BMSK = 0x2, |
| 83 | M_RD_CMD_OVERRIDE_OVERRIDE_AOOO_SHFT = 0x1, |
| 84 | M_RD_CMD_OVERRIDE_OVERRIDE_AINNERSHARED_BMSK = 0x1, |
| 85 | M_RD_CMD_OVERRIDE_OVERRIDE_AINNERSHARED_SHFT = 0x0, |
| 86 | }; |
| 87 | |
| 88 | #define M_WR_CMD_OVERRIDE_ADDR(b, n) \ |
| 89 | (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000250) |
| 90 | enum bimc_m_write_command_override { |
| 91 | M_WR_CMD_OVERRIDE_RMSK = 0x3071f7f, |
| 92 | M_WR_CMD_OVERRIDE_AREQPRIO_BMSK = 0x3000000, |
| 93 | M_WR_CMD_OVERRIDE_AREQPRIO_SHFT = 0x18, |
| 94 | M_WR_CMD_OVERRIDE_AMEMTYPE_BMSK = 0x70000, |
| 95 | M_WR_CMD_OVERRIDE_AMEMTYPE_SHFT = 0x10, |
| 96 | M_WR_CMD_OVERRIDE_ATRANSIENT_BMSK = 0x1000, |
| 97 | M_WR_CMD_OVERRIDE_ATRANSIENT_SHFT = 0xc, |
| 98 | M_WR_CMD_OVERRIDE_ASHARED_BMSK = 0x800, |
| 99 | M_WR_CMD_OVERRIDE_ASHARED_SHFT = 0xb, |
| 100 | M_WR_CMD_OVERRIDE_AREDIRECT_BMSK = 0x400, |
| 101 | M_WR_CMD_OVERRIDE_AREDIRECT_SHFT = 0xa, |
| 102 | M_WR_CMD_OVERRIDE_AOOO_BMSK = 0x200, |
| 103 | M_WR_CMD_OVERRIDE_AOOO_SHFT = 0x9, |
| 104 | M_WR_CMD_OVERRIDE_AINNERSHARED_BMSK = 0x100, |
| 105 | M_WR_CMD_OVERRIDE_AINNERSHARED_SHFT = 0x8, |
| 106 | M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_BMSK = 0x40, |
| 107 | M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_SHFT = 0x6, |
| 108 | M_WR_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_BMSK = 0x20, |
| 109 | M_WR_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_SHFT = 0x5, |
| 110 | M_WR_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_BMSK = 0x10, |
| 111 | M_WR_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_SHFT = 0x4, |
| 112 | M_WR_CMD_OVERRIDE_OVERRIDE_ASHARED_BMSK = 0x8, |
| 113 | M_WR_CMD_OVERRIDE_OVERRIDE_ASHARED_SHFT = 0x3, |
| 114 | M_WR_CMD_OVERRIDE_OVERRIDE_AREDIRECT_BMSK = 0x4, |
| 115 | M_WR_CMD_OVERRIDE_OVERRIDE_AREDIRECT_SHFT = 0x2, |
| 116 | M_WR_CMD_OVERRIDE_OVERRIDE_AOOO_BMSK = 0x2, |
| 117 | M_WR_CMD_OVERRIDE_OVERRIDE_AOOO_SHFT = 0x1, |
| 118 | M_WR_CMD_OVERRIDE_OVERRIDE_AINNERSHARED_BMSK = 0x1, |
| 119 | M_WR_CMD_OVERRIDE_OVERRIDE_AINNERSHARED_SHFT = 0x0, |
| 120 | }; |
| 121 | |
| 122 | #define M_BKE_EN_ADDR(b, n) \ |
| 123 | (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000300) |
| 124 | enum bimc_m_bke_en { |
| 125 | M_BKE_EN_RMSK = 0x1, |
| 126 | M_BKE_EN_EN_BMSK = 0x1, |
| 127 | M_BKE_EN_EN_SHFT = 0x0, |
| 128 | }; |
| 129 | |
| 130 | /* Grant Period registers */ |
| 131 | #define M_BKE_GP_ADDR(b, n) \ |
| 132 | (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000304) |
| 133 | enum bimc_m_bke_grant_period { |
| 134 | M_BKE_GP_RMSK = 0x3ff, |
| 135 | M_BKE_GP_GP_BMSK = 0x3ff, |
| 136 | M_BKE_GP_GP_SHFT = 0x0, |
| 137 | }; |
| 138 | |
| 139 | /* Grant count register. |
| 140 | * The Grant count register represents a signed 16 bit |
| 141 | * value, range 0-0x7fff |
| 142 | */ |
| 143 | #define M_BKE_GC_ADDR(b, n) \ |
| 144 | (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000308) |
| 145 | enum bimc_m_bke_grant_count { |
| 146 | M_BKE_GC_RMSK = 0xffff, |
| 147 | M_BKE_GC_GC_BMSK = 0xffff, |
| 148 | M_BKE_GC_GC_SHFT = 0x0, |
| 149 | }; |
| 150 | |
| 151 | /* Threshold High Registers */ |
| 152 | #define M_BKE_THH_ADDR(b, n) \ |
| 153 | (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000320) |
| 154 | enum bimc_m_bke_thresh_high { |
| 155 | M_BKE_THH_RMSK = 0xffff, |
| 156 | M_BKE_THH_THRESH_BMSK = 0xffff, |
| 157 | M_BKE_THH_THRESH_SHFT = 0x0, |
| 158 | }; |
| 159 | |
| 160 | /* Threshold Medium Registers */ |
| 161 | #define M_BKE_THM_ADDR(b, n) \ |
| 162 | (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000324) |
| 163 | enum bimc_m_bke_thresh_medium { |
| 164 | M_BKE_THM_RMSK = 0xffff, |
| 165 | M_BKE_THM_THRESH_BMSK = 0xffff, |
| 166 | M_BKE_THM_THRESH_SHFT = 0x0, |
| 167 | }; |
| 168 | |
| 169 | /* Threshold Low Registers */ |
| 170 | #define M_BKE_THL_ADDR(b, n) \ |
| 171 | (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000328) |
| 172 | enum bimc_m_bke_thresh_low { |
| 173 | M_BKE_THL_RMSK = 0xffff, |
| 174 | M_BKE_THL_THRESH_BMSK = 0xffff, |
| 175 | M_BKE_THL_THRESH_SHFT = 0x0, |
| 176 | }; |
| 177 | |
| 178 | #define NUM_HEALTH_LEVEL (4) |
| 179 | #define M_BKE_HEALTH_0_CONFIG_ADDR(b, n) \ |
| 180 | (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000340) |
| 181 | enum bimc_m_bke_health_0 { |
| 182 | M_BKE_HEALTH_0_CONFIG_RMSK = 0x80000303, |
| 183 | M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_BMSK = 0x80000000, |
| 184 | M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_SHFT = 0x1f, |
| 185 | M_BKE_HEALTH_0_CONFIG_AREQPRIO_BMSK = 0x300, |
| 186 | M_BKE_HEALTH_0_CONFIG_AREQPRIO_SHFT = 0x8, |
| 187 | M_BKE_HEALTH_0_CONFIG_PRIOLVL_BMSK = 0x3, |
| 188 | M_BKE_HEALTH_0_CONFIG_PRIOLVL_SHFT = 0x0, |
| 189 | }; |
| 190 | |
| 191 | #define M_BKE_HEALTH_1_CONFIG_ADDR(b, n) \ |
| 192 | (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000344) |
| 193 | enum bimc_m_bke_health_1 { |
| 194 | M_BKE_HEALTH_1_CONFIG_RMSK = 0x80000303, |
| 195 | M_BKE_HEALTH_1_CONFIG_LIMIT_CMDS_BMSK = 0x80000000, |
| 196 | M_BKE_HEALTH_1_CONFIG_LIMIT_CMDS_SHFT = 0x1f, |
| 197 | M_BKE_HEALTH_1_CONFIG_AREQPRIO_BMSK = 0x300, |
| 198 | M_BKE_HEALTH_1_CONFIG_AREQPRIO_SHFT = 0x8, |
| 199 | M_BKE_HEALTH_1_CONFIG_PRIOLVL_BMSK = 0x3, |
| 200 | M_BKE_HEALTH_1_CONFIG_PRIOLVL_SHFT = 0x0, |
| 201 | }; |
| 202 | |
| 203 | #define M_BKE_HEALTH_2_CONFIG_ADDR(b, n) \ |
| 204 | (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000348) |
| 205 | enum bimc_m_bke_health_2 { |
| 206 | M_BKE_HEALTH_2_CONFIG_RMSK = 0x80000303, |
| 207 | M_BKE_HEALTH_2_CONFIG_LIMIT_CMDS_BMSK = 0x80000000, |
| 208 | M_BKE_HEALTH_2_CONFIG_LIMIT_CMDS_SHFT = 0x1f, |
| 209 | M_BKE_HEALTH_2_CONFIG_AREQPRIO_BMSK = 0x300, |
| 210 | M_BKE_HEALTH_2_CONFIG_AREQPRIO_SHFT = 0x8, |
| 211 | M_BKE_HEALTH_2_CONFIG_PRIOLVL_BMSK = 0x3, |
| 212 | M_BKE_HEALTH_2_CONFIG_PRIOLVL_SHFT = 0x0, |
| 213 | }; |
| 214 | |
| 215 | #define M_BKE_HEALTH_3_CONFIG_ADDR(b, n) \ |
| 216 | (M_REG_BASE(b) + (0x4000 * (n)) + 0x0000034c) |
| 217 | enum bimc_m_bke_health_3 { |
| 218 | M_BKE_HEALTH_3_CONFIG_RMSK = 0x303, |
| 219 | M_BKE_HEALTH_3_CONFIG_AREQPRIO_BMSK = 0x300, |
| 220 | M_BKE_HEALTH_3_CONFIG_AREQPRIO_SHFT = 0x8, |
| 221 | M_BKE_HEALTH_3_CONFIG_PRIOLVL_BMSK = 0x3, |
| 222 | M_BKE_HEALTH_3_CONFIG_PRIOLVL_SHFT = 0x0, |
| 223 | }; |
| 224 | |
| 225 | #define BKE_HEALTH_MASK \ |
| 226 | (M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_BMSK |\ |
| 227 | M_BKE_HEALTH_0_CONFIG_AREQPRIO_BMSK |\ |
| 228 | M_BKE_HEALTH_0_CONFIG_PRIOLVL_BMSK) |
| 229 | |
| 230 | #define BKE_HEALTH_VAL(limit, areq, plvl) \ |
| 231 | ((((limit) << M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_SHFT) & \ |
| 232 | M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_BMSK) | \ |
| 233 | (((areq) << M_BKE_HEALTH_0_CONFIG_AREQPRIO_SHFT) & \ |
| 234 | M_BKE_HEALTH_0_CONFIG_AREQPRIO_BMSK) | \ |
| 235 | (((plvl) << M_BKE_HEALTH_0_CONFIG_PRIOLVL_SHFT) & \ |
| 236 | M_BKE_HEALTH_0_CONFIG_PRIOLVL_BMSK)) |
| 237 | |
| 238 | #define MAX_GRANT_PERIOD \ |
| 239 | (M_BKE_GP_GP_BMSK >> \ |
| 240 | M_BKE_GP_GP_SHFT) |
| 241 | |
| 242 | #define MAX_GC \ |
| 243 | (M_BKE_GC_GC_BMSK >> \ |
| 244 | (M_BKE_GC_GC_SHFT + 1)) |
| 245 | |
Maria Yu | 961074d | 2018-01-16 11:17:58 +0800 | [diff] [blame] | 246 | static int bimc_div(uint64_t *a, uint32_t b) |
David Dai | 87584a4 | 2016-09-01 17:13:35 -0700 | [diff] [blame] | 247 | { |
| 248 | if ((*a > 0) && (*a < b)) { |
| 249 | *a = 0; |
| 250 | return 1; |
| 251 | } else { |
| 252 | return do_div(*a, b); |
| 253 | } |
| 254 | } |
| 255 | |
| 256 | static void set_bke_en(void __iomem *addr, uint32_t index, |
| 257 | bool req) |
| 258 | { |
| 259 | uint32_t old_val, new_val; |
| 260 | |
| 261 | old_val = readl_relaxed(M_BKE_EN_ADDR(addr, index)); |
| 262 | new_val = req << M_BKE_EN_EN_SHFT; |
| 263 | if ((old_val & M_BKE_EN_RMSK) == (new_val)) |
| 264 | return; |
| 265 | writel_relaxed(((old_val & ~(M_BKE_EN_EN_BMSK)) | (new_val & |
| 266 | M_BKE_EN_EN_BMSK)), M_BKE_EN_ADDR(addr, index)); |
| 267 | /* Ensure that BKE register is programmed set before returning */ |
| 268 | wmb(); |
| 269 | } |
| 270 | |
| 271 | static void set_health_reg(void __iomem *addr, uint32_t rmsk, |
| 272 | uint8_t index, struct msm_bus_bimc_qos_mode *qmode) |
| 273 | { |
| 274 | uint32_t reg_val, val0, val; |
| 275 | |
| 276 | /* Note, addr is already passed with right mas_index */ |
| 277 | reg_val = readl_relaxed(addr) & rmsk; |
| 278 | val0 = BKE_HEALTH_VAL(qmode->rl.qhealth[index].limit_commands, |
| 279 | qmode->rl.qhealth[index].areq_prio, |
| 280 | qmode->rl.qhealth[index].prio_level); |
| 281 | val = ((reg_val & (~(BKE_HEALTH_MASK))) | (val0 & BKE_HEALTH_MASK)); |
| 282 | writel_relaxed(val, addr); |
| 283 | /* |
| 284 | * Ensure that priority for regulator/limiter modes are |
| 285 | * set before returning |
| 286 | */ |
| 287 | wmb(); |
| 288 | } |
| 289 | |
| 290 | static void msm_bus_bimc_set_qos_prio(void __iomem *base, |
| 291 | uint32_t mas_index, uint8_t qmode_sel, |
| 292 | struct msm_bus_bimc_qos_mode *qmode) |
| 293 | { |
| 294 | |
| 295 | switch (qmode_sel) { |
| 296 | case BIMC_QOS_MODE_FIXED: |
| 297 | case BIMC_QOS_MODE_REGULATOR: |
| 298 | case BIMC_QOS_MODE_LIMITER: |
| 299 | set_health_reg(M_BKE_HEALTH_3_CONFIG_ADDR(base, |
| 300 | mas_index), M_BKE_HEALTH_3_CONFIG_RMSK, 3, qmode); |
| 301 | set_health_reg(M_BKE_HEALTH_2_CONFIG_ADDR(base, |
| 302 | mas_index), M_BKE_HEALTH_2_CONFIG_RMSK, 2, qmode); |
| 303 | set_health_reg(M_BKE_HEALTH_1_CONFIG_ADDR(base, |
| 304 | mas_index), M_BKE_HEALTH_1_CONFIG_RMSK, 1, qmode); |
| 305 | set_health_reg(M_BKE_HEALTH_0_CONFIG_ADDR(base, |
| 306 | mas_index), M_BKE_HEALTH_0_CONFIG_RMSK, 0, qmode); |
| 307 | set_bke_en(base, mas_index, true); |
| 308 | break; |
| 309 | case BIMC_QOS_MODE_BYPASS: |
| 310 | set_bke_en(base, mas_index, false); |
| 311 | break; |
| 312 | default: |
| 313 | break; |
| 314 | } |
| 315 | } |
| 316 | |
| 317 | static void set_qos_bw_regs(void __iomem *baddr, uint32_t mas_index, |
| 318 | int32_t th, int32_t tm, int32_t tl, uint32_t gp, |
| 319 | uint32_t gc) |
| 320 | { |
| 321 | int32_t reg_val, val; |
| 322 | int32_t bke_reg_val; |
| 323 | int16_t val2; |
| 324 | |
| 325 | /* Disable BKE before writing to registers as per spec */ |
| 326 | bke_reg_val = readl_relaxed(M_BKE_EN_ADDR(baddr, mas_index)); |
| 327 | writel_relaxed((bke_reg_val & ~(M_BKE_EN_EN_BMSK)), |
| 328 | M_BKE_EN_ADDR(baddr, mas_index)); |
| 329 | |
| 330 | /* Write values of registers calculated */ |
| 331 | reg_val = readl_relaxed(M_BKE_GP_ADDR(baddr, mas_index)) |
| 332 | & M_BKE_GP_RMSK; |
| 333 | val = gp << M_BKE_GP_GP_SHFT; |
| 334 | writel_relaxed(((reg_val & ~(M_BKE_GP_GP_BMSK)) | (val & |
| 335 | M_BKE_GP_GP_BMSK)), M_BKE_GP_ADDR(baddr, mas_index)); |
| 336 | |
| 337 | reg_val = readl_relaxed(M_BKE_GC_ADDR(baddr, mas_index)) & |
| 338 | M_BKE_GC_RMSK; |
| 339 | val = gc << M_BKE_GC_GC_SHFT; |
| 340 | writel_relaxed(((reg_val & ~(M_BKE_GC_GC_BMSK)) | (val & |
| 341 | M_BKE_GC_GC_BMSK)), M_BKE_GC_ADDR(baddr, mas_index)); |
| 342 | |
| 343 | reg_val = readl_relaxed(M_BKE_THH_ADDR(baddr, mas_index)) & |
| 344 | M_BKE_THH_RMSK; |
| 345 | val = th << M_BKE_THH_THRESH_SHFT; |
| 346 | writel_relaxed(((reg_val & ~(M_BKE_THH_THRESH_BMSK)) | (val & |
| 347 | M_BKE_THH_THRESH_BMSK)), M_BKE_THH_ADDR(baddr, mas_index)); |
| 348 | |
| 349 | reg_val = readl_relaxed(M_BKE_THM_ADDR(baddr, mas_index)) & |
| 350 | M_BKE_THM_RMSK; |
| 351 | val2 = tm << M_BKE_THM_THRESH_SHFT; |
| 352 | writel_relaxed(((reg_val & ~(M_BKE_THM_THRESH_BMSK)) | (val2 & |
| 353 | M_BKE_THM_THRESH_BMSK)), M_BKE_THM_ADDR(baddr, mas_index)); |
| 354 | |
| 355 | reg_val = readl_relaxed(M_BKE_THL_ADDR(baddr, mas_index)) & |
| 356 | M_BKE_THL_RMSK; |
| 357 | val2 = tl << M_BKE_THL_THRESH_SHFT; |
| 358 | writel_relaxed(((reg_val & ~(M_BKE_THL_THRESH_BMSK)) | |
| 359 | (val2 & M_BKE_THL_THRESH_BMSK)), M_BKE_THL_ADDR(baddr, |
| 360 | mas_index)); |
| 361 | |
| 362 | /* Ensure that all bandwidth register writes have completed |
| 363 | * before returning |
| 364 | */ |
| 365 | wmb(); |
| 366 | } |
| 367 | |
| 368 | static void bimc_set_static_qos_bw(void __iomem *base, unsigned int qos_freq, |
| 369 | int mport, struct msm_bus_bimc_qos_bw *qbw) |
| 370 | { |
| 371 | int32_t bw_mbps, thh = 0, thm, thl, gc; |
| 372 | int32_t gp; |
| 373 | u64 temp; |
| 374 | |
| 375 | if (qos_freq == 0) { |
| 376 | MSM_BUS_DBG("No QoS Frequency.\n"); |
| 377 | return; |
| 378 | } |
| 379 | |
| 380 | if (!(qbw->bw && qbw->gp)) { |
| 381 | MSM_BUS_DBG("No QoS Bandwidth or Window size\n"); |
| 382 | return; |
| 383 | } |
| 384 | |
| 385 | /* Convert bandwidth to MBPS */ |
| 386 | temp = qbw->bw; |
| 387 | bimc_div(&temp, 1000000); |
| 388 | bw_mbps = temp; |
| 389 | |
| 390 | /* Grant period in clock cycles |
| 391 | * Grant period from bandwidth structure |
| 392 | * is in nano seconds, QoS freq is in KHz. |
| 393 | * Divide by 1000 to get clock cycles. |
| 394 | */ |
| 395 | gp = (qos_freq * qbw->gp) / (1000 * NSEC_PER_USEC); |
| 396 | |
| 397 | /* Grant count = BW in MBps * Grant period |
| 398 | * in micro seconds |
| 399 | */ |
| 400 | gc = bw_mbps * (qbw->gp / NSEC_PER_USEC); |
| 401 | gc = min(gc, MAX_GC); |
| 402 | |
| 403 | /* Medium threshold = -((Medium Threshold percentage * |
| 404 | * Grant count) / 100) |
| 405 | */ |
| 406 | thm = -((qbw->thmp * gc) / 100); |
| 407 | qbw->thm = thm; |
| 408 | |
| 409 | /* Low threshold = -(Grant count) */ |
| 410 | thl = -gc; |
| 411 | qbw->thl = thl; |
| 412 | |
| 413 | MSM_BUS_DBG("%s: BKE parameters: gp %d, gc %d, thm %d thl %d thh %d", |
| 414 | __func__, gp, gc, thm, thl, thh); |
| 415 | |
| 416 | trace_bus_bke_params(gc, gp, thl, thm, thl); |
| 417 | set_qos_bw_regs(base, mport, thh, thm, thl, gp, gc); |
| 418 | } |
| 419 | |
| 420 | static int msm_bus_bimc_limit_mport(struct msm_bus_node_device_type *info, |
| 421 | void __iomem *qos_base, uint32_t qos_off, |
| 422 | uint32_t qos_delta, uint32_t qos_freq, |
| 423 | int enable_lim, u64 lim_bw) |
| 424 | { |
| 425 | int mode; |
| 426 | int i; |
| 427 | struct msm_bus_bimc_qos_mode qmode = {0}; |
| 428 | |
| 429 | if (ZERO_OR_NULL_PTR(info->node_info->qport)) { |
| 430 | MSM_BUS_DBG("No QoS Ports to limit\n"); |
| 431 | return 0; |
| 432 | } |
| 433 | |
| 434 | if ((enable_lim == THROTTLE_ON) && lim_bw) { |
| 435 | mode = BIMC_QOS_MODE_LIMITER; |
| 436 | |
| 437 | qmode.rl.qhealth[0].limit_commands = 1; |
| 438 | qmode.rl.qhealth[1].limit_commands = 0; |
| 439 | qmode.rl.qhealth[2].limit_commands = 0; |
| 440 | qmode.rl.qhealth[3].limit_commands = 0; |
| 441 | for (i = 0; i < NUM_HEALTH_LEVEL; i++) { |
| 442 | qmode.rl.qhealth[i].prio_level = |
| 443 | info->node_info->qos_params.prio_lvl; |
| 444 | qmode.rl.qhealth[i].areq_prio = |
| 445 | info->node_info->qos_params.prio_rd; |
| 446 | } |
| 447 | |
| 448 | for (i = 0; i < info->node_info->num_qports; i++) { |
| 449 | struct msm_bus_bimc_qos_bw qbw; |
| 450 | /* If not in fixed mode, update bandwidth */ |
| 451 | if (info->node_info->lim_bw != lim_bw) { |
| 452 | qbw.ws = info->node_info->qos_params.ws; |
| 453 | qbw.bw = lim_bw; |
| 454 | qbw.gp = info->node_info->qos_params.gp; |
| 455 | qbw.thmp = info->node_info->qos_params.thmp; |
| 456 | bimc_set_static_qos_bw(qos_base, qos_freq, |
| 457 | info->node_info->qport[i], &qbw); |
| 458 | } |
| 459 | } |
| 460 | info->node_info->lim_bw = lim_bw; |
| 461 | } else { |
| 462 | mode = info->node_info->qos_params.mode; |
| 463 | if (mode != BIMC_QOS_MODE_BYPASS) { |
| 464 | for (i = 0; i < NUM_HEALTH_LEVEL; i++) { |
| 465 | qmode.rl.qhealth[i].prio_level = |
| 466 | info->node_info->qos_params.prio_lvl; |
| 467 | qmode.rl.qhealth[i].areq_prio = |
| 468 | info->node_info->qos_params.prio_rd; |
| 469 | } |
| 470 | } |
| 471 | } |
| 472 | |
| 473 | for (i = 0; i < info->node_info->num_qports; i++) |
| 474 | msm_bus_bimc_set_qos_prio(qos_base, info->node_info->qport[i], |
| 475 | mode, &qmode); |
| 476 | return 0; |
| 477 | } |
| 478 | |
| 479 | static bool msm_bus_bimc_update_bw_reg(int mode) |
| 480 | { |
| 481 | bool ret = false; |
| 482 | |
| 483 | if ((mode == BIMC_QOS_MODE_LIMITER) |
| 484 | || (mode == BIMC_QOS_MODE_REGULATOR)) |
| 485 | ret = true; |
| 486 | |
| 487 | return ret; |
| 488 | } |
| 489 | |
| 490 | static int msm_bus_bimc_qos_init(struct msm_bus_node_device_type *info, |
| 491 | void __iomem *qos_base, |
| 492 | uint32_t qos_off, uint32_t qos_delta, |
| 493 | uint32_t qos_freq) |
| 494 | { |
| 495 | int i; |
| 496 | struct msm_bus_bimc_qos_mode qmode = {0}; |
| 497 | |
| 498 | if (ZERO_OR_NULL_PTR(info->node_info->qport)) { |
| 499 | MSM_BUS_DBG("No QoS Ports to init\n"); |
| 500 | return 0; |
| 501 | } |
| 502 | |
| 503 | switch (info->node_info->qos_params.mode) { |
| 504 | /* For now Fixed and regulator are handled the same way. */ |
| 505 | case BIMC_QOS_MODE_FIXED: |
| 506 | case BIMC_QOS_MODE_REGULATOR: |
| 507 | for (i = 0; i < NUM_HEALTH_LEVEL; i++) { |
| 508 | qmode.rl.qhealth[i].prio_level = |
| 509 | info->node_info->qos_params.prio_lvl; |
| 510 | qmode.rl.qhealth[i].areq_prio = |
| 511 | info->node_info->qos_params.prio_rd; |
| 512 | } |
| 513 | break; |
| 514 | case BIMC_QOS_MODE_LIMITER: |
| 515 | qmode.rl.qhealth[0].limit_commands = 1; |
| 516 | qmode.rl.qhealth[1].limit_commands = 0; |
| 517 | qmode.rl.qhealth[2].limit_commands = 0; |
| 518 | qmode.rl.qhealth[3].limit_commands = 0; |
| 519 | for (i = 0; i < NUM_HEALTH_LEVEL; i++) { |
| 520 | qmode.rl.qhealth[i].prio_level = |
| 521 | info->node_info->qos_params.prio_lvl; |
| 522 | qmode.rl.qhealth[i].areq_prio = |
| 523 | info->node_info->qos_params.prio_rd; |
| 524 | } |
| 525 | break; |
| 526 | default: |
| 527 | break; |
| 528 | } |
| 529 | |
| 530 | |
| 531 | for (i = 0; i < info->node_info->num_qports; i++) |
| 532 | msm_bus_bimc_set_qos_prio(qos_base, info->node_info->qport[i], |
| 533 | info->node_info->qos_params.mode, &qmode); |
| 534 | |
| 535 | return 0; |
| 536 | } |
| 537 | |
| 538 | static int msm_bus_bimc_set_bw(struct msm_bus_node_device_type *dev, |
| 539 | void __iomem *qos_base, uint32_t qos_off, |
| 540 | uint32_t qos_delta, uint32_t qos_freq) |
| 541 | { |
| 542 | struct msm_bus_bimc_qos_bw qbw; |
| 543 | struct msm_bus_bimc_qos_mode qmode = {0}; |
| 544 | int i; |
| 545 | int64_t bw = 0; |
| 546 | int ret = 0; |
| 547 | struct msm_bus_node_info_type *info = dev->node_info; |
| 548 | int mode; |
| 549 | |
| 550 | if (info && info->num_qports && |
| 551 | ((info->qos_params.mode == BIMC_QOS_MODE_LIMITER))) { |
Odelu Kukatla | 559858c | 2017-11-13 22:12:09 +0530 | [diff] [blame] | 552 | bw = msm_bus_div64(dev->node_bw[ACTIVE_CTX].sum_ab, |
| 553 | info->num_qports); |
David Dai | 87584a4 | 2016-09-01 17:13:35 -0700 | [diff] [blame] | 554 | |
| 555 | MSM_BUS_DBG("BIMC: Update mas_bw for ID: %d -> %llu\n", |
| 556 | info->id, bw); |
| 557 | |
| 558 | if (!info->qport) { |
| 559 | MSM_BUS_DBG("No qos ports to update!\n"); |
| 560 | goto exit_set_bw; |
| 561 | } |
| 562 | |
| 563 | qbw.bw = bw + info->qos_params.bw_buffer; |
| 564 | trace_bus_bimc_config_limiter(info->id, bw); |
| 565 | |
| 566 | /* Default to gp of 5us */ |
| 567 | qbw.gp = (info->qos_params.gp ? |
| 568 | info->qos_params.gp : 5000); |
| 569 | /* Default to thmp of 50% */ |
| 570 | qbw.thmp = (info->qos_params.thmp ? |
| 571 | info->qos_params.thmp : 50); |
| 572 | /* |
| 573 | * If the BW vote is 0 then set the QoS mode to |
| 574 | * Fixed/0/0. |
| 575 | */ |
| 576 | if (bw) { |
| 577 | qmode.rl.qhealth[0].limit_commands = 1; |
| 578 | qmode.rl.qhealth[1].limit_commands = 0; |
| 579 | qmode.rl.qhealth[2].limit_commands = 0; |
| 580 | qmode.rl.qhealth[3].limit_commands = 0; |
| 581 | mode = info->qos_params.mode; |
| 582 | } else { |
| 583 | mode = BIMC_QOS_MODE_FIXED; |
| 584 | } |
| 585 | |
| 586 | for (i = 0; i < info->num_qports; i++) { |
| 587 | msm_bus_bimc_set_qos_prio(qos_base, |
| 588 | info->qport[i], mode, &qmode); |
| 589 | if (bw) |
| 590 | bimc_set_static_qos_bw(qos_base, qos_freq, |
| 591 | info->qport[i], &qbw); |
| 592 | } |
| 593 | } |
| 594 | exit_set_bw: |
| 595 | return ret; |
| 596 | } |
| 597 | |
| 598 | int msm_bus_bimc_set_ops(struct msm_bus_node_device_type *bus_dev) |
| 599 | { |
| 600 | if (!bus_dev) |
| 601 | return -ENODEV; |
| 602 | bus_dev->fabdev->noc_ops.qos_init = msm_bus_bimc_qos_init; |
| 603 | bus_dev->fabdev->noc_ops.set_bw = msm_bus_bimc_set_bw; |
| 604 | bus_dev->fabdev->noc_ops.limit_mport = msm_bus_bimc_limit_mport; |
| 605 | bus_dev->fabdev->noc_ops.update_bw_reg = |
| 606 | msm_bus_bimc_update_bw_reg; |
| 607 | return 0; |
| 608 | } |
| 609 | EXPORT_SYMBOL(msm_bus_bimc_set_ops); |