Junjie Wu | 85817c2 | 2015-07-23 14:20:14 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License version 2 and |
| 6 | * only version 2 as published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope that it will be useful, |
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 11 | * GNU General Public License for more details. |
| 12 | */ |
| 13 | |
| 14 | #define pr_fmt(fmt) "m4m-hwmon: " fmt |
| 15 | |
| 16 | #include <linux/kernel.h> |
| 17 | #include <linux/module.h> |
| 18 | #include <linux/init.h> |
| 19 | #include <linux/io.h> |
| 20 | #include <linux/delay.h> |
| 21 | #include <linux/err.h> |
| 22 | #include <linux/errno.h> |
| 23 | #include <linux/interrupt.h> |
| 24 | #include <linux/platform_device.h> |
| 25 | #include <linux/of.h> |
| 26 | #include <linux/of_device.h> |
| 27 | #include <linux/spinlock.h> |
| 28 | #include "governor_cache_hwmon.h" |
| 29 | |
| 30 | #define cntr_offset(idx) (sizeof(u32) * idx) |
| 31 | |
| 32 | /* register offsets from base address */ |
| 33 | #define DCVS_VERSION(m) ((m)->base + 0x0) |
| 34 | #define GLOBAL_CR_CTL(m) ((m)->base + 0x8) |
| 35 | #define GLOBAL_CR_RESET(m) ((m)->base + 0xC) |
| 36 | #define OVSTAT(m) ((m)->base + 0x30) |
| 37 | #define OVCLR(m) ((m)->base + 0x34) |
| 38 | #define OVSET(m) ((m)->base + 0x3C) /* unused */ |
| 39 | #define EVCNTR(m, x) ((m)->base + 0x40 + cntr_offset(x)) |
| 40 | #define CNTCTL(m, x) ((m)->base + 0x100 + cntr_offset(x)) |
| 41 | /* counter 0/1 does not have type control */ |
| 42 | #define EVTYPER_START 2 |
| 43 | #define EVTYPER(x) ((m)->base + 0x140 + cntr_offset(x)) |
| 44 | |
| 45 | /* bitmasks for GLOBAL_CR_CTL and CNTCTLx */ |
| 46 | #define CNT_EN BIT(0) |
| 47 | #define IRQ_EN BIT(1) |
| 48 | |
| 49 | /* non-configurable counters */ |
| 50 | #define CYC_CNTR_IDX 0 |
| 51 | #define WASTED_CYC_CNTR_IDX 1 |
| 52 | |
| 53 | /* counter is 28-bit */ |
| 54 | #define CNT_MAX 0x0FFFFFFFU |
| 55 | |
| 56 | struct m4m_counter { |
| 57 | int idx; |
| 58 | u32 event_mask; |
| 59 | unsigned int last_start; |
| 60 | }; |
| 61 | |
| 62 | struct m4m_hwmon { |
| 63 | void __iomem *base; |
| 64 | struct m4m_counter cntr[MAX_NUM_GROUPS]; |
| 65 | int num_cntr; |
| 66 | int irq; |
| 67 | struct cache_hwmon hw; |
| 68 | struct device *dev; |
| 69 | }; |
| 70 | |
| 71 | #define to_mon(ptr) container_of(ptr, struct m4m_hwmon, hw) |
| 72 | |
| 73 | static DEFINE_SPINLOCK(init_lock); |
| 74 | |
| 75 | /* Should only be called once while HW is in POR state */ |
| 76 | static inline void mon_global_init(struct m4m_hwmon *m) |
| 77 | { |
| 78 | writel_relaxed(CNT_EN | IRQ_EN, GLOBAL_CR_CTL(m)); |
| 79 | } |
| 80 | |
| 81 | static inline void _mon_disable_cntr_and_irq(struct m4m_hwmon *m, int cntr_idx) |
| 82 | { |
| 83 | writel_relaxed(0, CNTCTL(m, cntr_idx)); |
| 84 | } |
| 85 | |
| 86 | static inline void _mon_enable_cntr_and_irq(struct m4m_hwmon *m, int cntr_idx) |
| 87 | { |
| 88 | writel_relaxed(CNT_EN | IRQ_EN, CNTCTL(m, cntr_idx)); |
| 89 | } |
| 90 | |
| 91 | static void mon_disable(struct m4m_hwmon *m) |
| 92 | { |
| 93 | int i; |
| 94 | |
| 95 | for (i = 0; i < m->num_cntr; i++) |
| 96 | _mon_disable_cntr_and_irq(m, m->cntr[i].idx); |
| 97 | /* make sure all counter/irq are indeed disabled */ |
| 98 | mb(); |
| 99 | } |
| 100 | |
| 101 | static void mon_enable(struct m4m_hwmon *m) |
| 102 | { |
| 103 | int i; |
| 104 | |
| 105 | for (i = 0; i < m->num_cntr; i++) |
| 106 | _mon_enable_cntr_and_irq(m, m->cntr[i].idx); |
| 107 | } |
| 108 | |
| 109 | static inline void _mon_ov_clear(struct m4m_hwmon *m, int cntr_idx) |
| 110 | { |
| 111 | writel_relaxed(BIT(cntr_idx), OVCLR(m)); |
| 112 | } |
| 113 | |
| 114 | static void mon_ov_clear(struct m4m_hwmon *m, enum request_group grp) |
| 115 | { |
| 116 | _mon_ov_clear(m, m->cntr[grp].idx); |
| 117 | } |
| 118 | |
| 119 | static inline u32 mon_irq_status(struct m4m_hwmon *m) |
| 120 | { |
| 121 | return readl_relaxed(OVSTAT(m)); |
| 122 | } |
| 123 | |
| 124 | static bool mon_is_ovstat_set(struct m4m_hwmon *m) |
| 125 | { |
| 126 | int i; |
| 127 | u32 status = mon_irq_status(m); |
| 128 | |
| 129 | for (i = 0; i < m->num_cntr; i++) |
| 130 | if (status & BIT(m->cntr[i].idx)) |
| 131 | return true; |
| 132 | return false; |
| 133 | } |
| 134 | |
| 135 | /* counter must be stopped first */ |
| 136 | static unsigned long _mon_get_count(struct m4m_hwmon *m, |
| 137 | int cntr_idx, unsigned int start) |
| 138 | { |
| 139 | unsigned long cnt; |
| 140 | u32 cur_cnt = readl_relaxed(EVCNTR(m, cntr_idx)); |
| 141 | u32 ov = readl_relaxed(OVSTAT(m)) & BIT(cntr_idx); |
| 142 | |
| 143 | if (!ov && cur_cnt < start) { |
| 144 | dev_warn(m->dev, "Counter%d overflowed but not detected\n", |
| 145 | cntr_idx); |
| 146 | ov = 1; |
| 147 | } |
| 148 | |
| 149 | if (ov) |
| 150 | cnt = CNT_MAX - start + cur_cnt; |
| 151 | else |
| 152 | cnt = cur_cnt - start; |
| 153 | |
| 154 | return cnt; |
| 155 | } |
| 156 | |
| 157 | static unsigned long mon_get_count(struct m4m_hwmon *m, |
| 158 | enum request_group grp) |
| 159 | { |
| 160 | return _mon_get_count(m, m->cntr[grp].idx, m->cntr[grp].last_start); |
| 161 | } |
| 162 | |
| 163 | static inline void mon_set_limit(struct m4m_hwmon *m, enum request_group grp, |
| 164 | unsigned int limit) |
| 165 | { |
| 166 | u32 start; |
| 167 | |
| 168 | if (limit >= CNT_MAX) |
| 169 | limit = CNT_MAX; |
| 170 | start = CNT_MAX - limit; |
| 171 | |
| 172 | writel_relaxed(start, EVCNTR(m, m->cntr[grp].idx)); |
| 173 | m->cntr[grp].last_start = start; |
| 174 | } |
| 175 | |
| 176 | static inline void mon_enable_cycle_cntr(struct m4m_hwmon *m) |
| 177 | { |
| 178 | writel_relaxed(CNT_EN, CNTCTL(m, CYC_CNTR_IDX)); |
| 179 | } |
| 180 | |
| 181 | static inline void mon_disable_cycle_cntr(struct m4m_hwmon *m) |
| 182 | { |
| 183 | _mon_disable_cntr_and_irq(m, CYC_CNTR_IDX); |
| 184 | } |
| 185 | |
| 186 | static inline unsigned long mon_get_cycle_count(struct m4m_hwmon *m) |
| 187 | { |
| 188 | return _mon_get_count(m, CYC_CNTR_IDX, 0); |
| 189 | } |
| 190 | |
| 191 | static inline void mon_clear_cycle_cntr(struct m4m_hwmon *m) |
| 192 | { |
| 193 | writel_relaxed(0, EVCNTR(m, CYC_CNTR_IDX)); |
| 194 | _mon_ov_clear(m, CYC_CNTR_IDX); |
| 195 | } |
| 196 | |
| 197 | static void mon_init(struct m4m_hwmon *m) |
| 198 | { |
| 199 | static bool mon_inited; |
| 200 | unsigned long flags; |
| 201 | int i; |
| 202 | |
| 203 | spin_lock_irqsave(&init_lock, flags); |
| 204 | if (!mon_inited) |
| 205 | mon_global_init(m); |
| 206 | spin_unlock_irqrestore(&init_lock, flags); |
| 207 | |
| 208 | /* configure counter events */ |
| 209 | for (i = 0; i < m->num_cntr; i++) |
| 210 | writel_relaxed(m->cntr[i].event_mask, EVTYPER(m->cntr[i].idx)); |
| 211 | } |
| 212 | |
| 213 | static irqreturn_t m4m_hwmon_intr_handler(int irq, void *dev) |
| 214 | { |
| 215 | struct m4m_hwmon *m = dev; |
| 216 | |
| 217 | if (mon_is_ovstat_set(m)) { |
| 218 | update_cache_hwmon(&m->hw); |
| 219 | return IRQ_HANDLED; |
| 220 | } |
| 221 | return IRQ_NONE; |
| 222 | } |
| 223 | |
| 224 | static int count_to_mrps(unsigned long count, unsigned int us) |
| 225 | { |
| 226 | do_div(count, us); |
| 227 | count++; |
| 228 | return count; |
| 229 | } |
| 230 | |
| 231 | static unsigned int mrps_to_count(unsigned int mrps, unsigned int ms, |
| 232 | unsigned int tolerance) |
| 233 | { |
| 234 | mrps += tolerance; |
| 235 | mrps *= ms * USEC_PER_MSEC; |
| 236 | return mrps; |
| 237 | } |
| 238 | |
| 239 | static unsigned long m4m_meas_mrps_and_set_irq(struct cache_hwmon *hw, |
| 240 | unsigned int tol, unsigned int us, struct mrps_stats *mrps) |
| 241 | { |
| 242 | struct m4m_hwmon *m = to_mon(hw); |
| 243 | unsigned long count, cyc_count; |
| 244 | unsigned long f = hw->df->previous_freq; |
| 245 | unsigned int sample_ms = hw->df->profile->polling_ms; |
| 246 | int i; |
| 247 | u32 limit; |
| 248 | |
| 249 | mon_disable(m); |
| 250 | mon_disable_cycle_cntr(m); |
| 251 | |
| 252 | /* calculate mrps and set limit */ |
| 253 | for (i = 0; i < m->num_cntr; i++) { |
| 254 | count = mon_get_count(m, i); |
| 255 | mrps->mrps[i] = count_to_mrps(count, us); |
| 256 | limit = mrps_to_count(mrps->mrps[i], sample_ms, tol); |
| 257 | mon_ov_clear(m, i); |
| 258 | mon_set_limit(m, i, limit); |
| 259 | dev_dbg(m->dev, "Counter[%d] count 0x%lx, limit 0x%x\n", |
| 260 | m->cntr[i].idx, count, limit); |
| 261 | } |
| 262 | |
| 263 | /* get cycle count and calculate busy percent */ |
| 264 | cyc_count = mon_get_cycle_count(m); |
| 265 | mrps->busy_percent = mult_frac(cyc_count, 1000, us) * 100 / f; |
| 266 | mon_clear_cycle_cntr(m); |
| 267 | dev_dbg(m->dev, "Cycle count 0x%lx\n", cyc_count); |
| 268 | |
| 269 | /* re-enable monitor */ |
| 270 | mon_enable(m); |
| 271 | mon_enable_cycle_cntr(m); |
| 272 | |
| 273 | return 0; |
| 274 | } |
| 275 | |
| 276 | static int m4m_start_hwmon(struct cache_hwmon *hw, struct mrps_stats *mrps) |
| 277 | { |
| 278 | struct m4m_hwmon *m = to_mon(hw); |
| 279 | unsigned int sample_ms = hw->df->profile->polling_ms; |
| 280 | int ret, i; |
| 281 | u32 limit; |
| 282 | |
| 283 | ret = request_threaded_irq(m->irq, NULL, m4m_hwmon_intr_handler, |
| 284 | IRQF_ONESHOT | IRQF_SHARED, |
| 285 | dev_name(m->dev), m); |
| 286 | if (ret) { |
| 287 | dev_err(m->dev, "Unable to register for irq\n"); |
| 288 | return ret; |
| 289 | } |
| 290 | |
| 291 | mon_init(m); |
| 292 | mon_disable(m); |
| 293 | mon_disable_cycle_cntr(m); |
| 294 | for (i = 0; i < m->num_cntr; i++) { |
| 295 | mon_ov_clear(m, i); |
| 296 | limit = mrps_to_count(mrps->mrps[i], sample_ms, 0); |
| 297 | mon_set_limit(m, i, limit); |
| 298 | } |
| 299 | mon_clear_cycle_cntr(m); |
| 300 | mon_enable(m); |
| 301 | mon_enable_cycle_cntr(m); |
| 302 | |
| 303 | return 0; |
| 304 | } |
| 305 | |
| 306 | static void m4m_stop_hwmon(struct cache_hwmon *hw) |
| 307 | { |
| 308 | struct m4m_hwmon *m = to_mon(hw); |
| 309 | int i; |
| 310 | |
| 311 | mon_disable(m); |
| 312 | free_irq(m->irq, m); |
| 313 | for (i = 0; i < m->num_cntr; i++) |
| 314 | mon_ov_clear(m, i); |
| 315 | } |
| 316 | |
| 317 | /* device probe functions */ |
| 318 | static const struct of_device_id m4m_match_table[] = { |
| 319 | { .compatible = "qcom,m4m-hwmon" }, |
| 320 | {} |
| 321 | }; |
| 322 | |
| 323 | static int m4m_hwmon_parse_cntr(struct device *dev, |
| 324 | struct m4m_hwmon *m) |
| 325 | { |
| 326 | u32 *data; |
| 327 | const char *prop_name = "qcom,counter-event-sel"; |
| 328 | int ret, len, i; |
| 329 | |
| 330 | if (!of_find_property(dev->of_node, prop_name, &len)) |
| 331 | return -EINVAL; |
| 332 | len /= sizeof(*data); |
| 333 | |
| 334 | if (len % 2 || len > MAX_NUM_GROUPS * 2) |
| 335 | return -EINVAL; |
| 336 | |
| 337 | data = devm_kcalloc(dev, len, sizeof(*data), GFP_KERNEL); |
| 338 | if (!data) |
| 339 | return -ENOMEM; |
| 340 | ret = of_property_read_u32_array(dev->of_node, prop_name, data, len); |
| 341 | if (ret) |
| 342 | return ret; |
| 343 | |
| 344 | len /= 2; |
| 345 | m->num_cntr = len; |
| 346 | for (i = 0; i < len; i++) { |
| 347 | /* disallow non-configurable counters */ |
| 348 | if (data[i * 2] < EVTYPER_START) |
| 349 | return -EINVAL; |
| 350 | m->cntr[i].idx = data[i * 2]; |
| 351 | m->cntr[i].event_mask = data[i * 2 + 1]; |
| 352 | } |
| 353 | |
| 354 | devm_kfree(dev, data); |
| 355 | return 0; |
| 356 | } |
| 357 | |
| 358 | static int m4m_hwmon_driver_probe(struct platform_device *pdev) |
| 359 | { |
| 360 | struct device *dev = &pdev->dev; |
| 361 | struct resource *res; |
| 362 | struct m4m_hwmon *m; |
| 363 | int ret; |
| 364 | |
| 365 | m = devm_kzalloc(dev, sizeof(*m), GFP_KERNEL); |
| 366 | if (!m) |
| 367 | return -ENOMEM; |
| 368 | m->dev = dev; |
| 369 | |
| 370 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 371 | if (!res) { |
| 372 | dev_err(dev, "base not found!\n"); |
| 373 | return -EINVAL; |
| 374 | } |
| 375 | m->base = devm_ioremap(dev, res->start, resource_size(res)); |
| 376 | if (!m->base) |
| 377 | return -ENOMEM; |
| 378 | |
| 379 | m->irq = platform_get_irq(pdev, 0); |
| 380 | if (m->irq < 0) { |
| 381 | dev_err(dev, "Unable to get IRQ number\n"); |
| 382 | return m->irq; |
| 383 | } |
| 384 | |
| 385 | ret = m4m_hwmon_parse_cntr(dev, m); |
| 386 | if (ret) { |
| 387 | dev_err(dev, "Unable to parse counter events\n"); |
| 388 | return ret; |
| 389 | } |
| 390 | |
| 391 | m->hw.of_node = of_parse_phandle(dev->of_node, "qcom,target-dev", 0); |
| 392 | if (!m->hw.of_node) |
| 393 | return -EINVAL; |
| 394 | m->hw.start_hwmon = &m4m_start_hwmon; |
| 395 | m->hw.stop_hwmon = &m4m_stop_hwmon; |
| 396 | m->hw.meas_mrps_and_set_irq = &m4m_meas_mrps_and_set_irq; |
| 397 | |
| 398 | ret = register_cache_hwmon(dev, &m->hw); |
| 399 | if (ret) { |
| 400 | dev_err(dev, "Dev BW hwmon registration failed\n"); |
| 401 | return ret; |
| 402 | } |
| 403 | |
| 404 | return 0; |
| 405 | } |
| 406 | |
| 407 | static struct platform_driver m4m_hwmon_driver = { |
| 408 | .probe = m4m_hwmon_driver_probe, |
| 409 | .driver = { |
| 410 | .name = "m4m-hwmon", |
| 411 | .of_match_table = m4m_match_table, |
| 412 | }, |
| 413 | }; |
| 414 | |
| 415 | module_platform_driver(m4m_hwmon_driver); |
| 416 | MODULE_DESCRIPTION("M4M hardware monitor driver"); |
| 417 | MODULE_LICENSE("GPL v2"); |