Pierre Ossman | b93931a | 2007-05-19 14:06:24 +0200 | [diff] [blame] | 1 | /* |
| 2 | * linux/drivers/mmc/core/host.c |
| 3 | * |
| 4 | * Copyright (C) 2003 Russell King, All Rights Reserved. |
Pierre Ossman | ff3112f | 2008-03-08 23:43:19 +0100 | [diff] [blame] | 5 | * Copyright (C) 2007-2008 Pierre Ossman |
Linus Walleij | 0456683 | 2010-11-08 21:36:50 -0500 | [diff] [blame] | 6 | * Copyright (C) 2010 Linus Walleij |
Sujit Reddy Thumma | 00d52ac | 2012-11-07 21:23:14 +0530 | [diff] [blame] | 7 | * Copyright (c) 2012, The Linux Foundation. All rights reserved. |
Pierre Ossman | b93931a | 2007-05-19 14:06:24 +0200 | [diff] [blame] | 8 | * |
| 9 | * This program is free software; you can redistribute it and/or modify |
| 10 | * it under the terms of the GNU General Public License version 2 as |
| 11 | * published by the Free Software Foundation. |
| 12 | * |
| 13 | * MMC host class device management |
| 14 | */ |
| 15 | |
| 16 | #include <linux/device.h> |
| 17 | #include <linux/err.h> |
| 18 | #include <linux/idr.h> |
Guennadi Liakhovetski | 6c56e7a | 2013-02-16 16:21:16 +0100 | [diff] [blame] | 19 | #include <linux/of.h> |
| 20 | #include <linux/of_gpio.h> |
Pierre Ossman | b93931a | 2007-05-19 14:06:24 +0200 | [diff] [blame] | 21 | #include <linux/pagemap.h> |
Paul Gortmaker | 3ef77af | 2011-07-10 12:42:00 -0400 | [diff] [blame] | 22 | #include <linux/export.h> |
Pierre Ossman | af8350c | 2007-09-24 07:15:48 +0200 | [diff] [blame] | 23 | #include <linux/leds.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 24 | #include <linux/slab.h> |
Pierre Ossman | b93931a | 2007-05-19 14:06:24 +0200 | [diff] [blame] | 25 | |
| 26 | #include <linux/mmc/host.h> |
Linus Walleij | 0456683 | 2010-11-08 21:36:50 -0500 | [diff] [blame] | 27 | #include <linux/mmc/card.h> |
Sayali Lokhande | 5bf4209 | 2016-11-24 18:08:26 +0530 | [diff] [blame] | 28 | #include <linux/mmc/ring_buffer.h> |
| 29 | |
Guennadi Liakhovetski | 6c56e7a | 2013-02-16 16:21:16 +0100 | [diff] [blame] | 30 | #include <linux/mmc/slot-gpio.h> |
Pierre Ossman | b93931a | 2007-05-19 14:06:24 +0200 | [diff] [blame] | 31 | |
| 32 | #include "core.h" |
| 33 | #include "host.h" |
Ulf Hansson | df8aca1 | 2014-12-18 15:44:36 +0100 | [diff] [blame] | 34 | #include "slot-gpio.h" |
Ulf Hansson | 3aa8793 | 2014-11-28 14:38:36 +0100 | [diff] [blame] | 35 | #include "pwrseq.h" |
Pierre Ossman | b93931a | 2007-05-19 14:06:24 +0200 | [diff] [blame] | 36 | |
Talel Shenhar | 7dc5f79 | 2015-05-18 12:12:48 +0300 | [diff] [blame] | 37 | #define MMC_DEVFRQ_DEFAULT_UP_THRESHOLD 35 |
| 38 | #define MMC_DEVFRQ_DEFAULT_DOWN_THRESHOLD 5 |
| 39 | #define MMC_DEVFRQ_DEFAULT_POLLING_MSEC 100 |
| 40 | |
Ulf Hansson | 5674a9b | 2016-04-07 11:40:59 +0200 | [diff] [blame] | 41 | static DEFINE_IDA(mmc_host_ida); |
Ulf Hansson | e2d1926 | 2014-12-18 15:44:35 +0100 | [diff] [blame] | 42 | static DEFINE_SPINLOCK(mmc_host_lock); |
| 43 | |
Pierre Ossman | b93931a | 2007-05-19 14:06:24 +0200 | [diff] [blame] | 44 | static void mmc_host_classdev_release(struct device *dev) |
| 45 | { |
| 46 | struct mmc_host *host = cls_dev_to_mmc_host(dev); |
Ulf Hansson | e2d1926 | 2014-12-18 15:44:35 +0100 | [diff] [blame] | 47 | spin_lock(&mmc_host_lock); |
Ulf Hansson | 5674a9b | 2016-04-07 11:40:59 +0200 | [diff] [blame] | 48 | ida_remove(&mmc_host_ida, host->index); |
Ulf Hansson | e2d1926 | 2014-12-18 15:44:35 +0100 | [diff] [blame] | 49 | spin_unlock(&mmc_host_lock); |
Pierre Ossman | b93931a | 2007-05-19 14:06:24 +0200 | [diff] [blame] | 50 | kfree(host); |
| 51 | } |
| 52 | |
Vijay Viswanath | 76f628f | 2017-12-21 15:33:33 +0530 | [diff] [blame] | 53 | static int mmc_host_prepare(struct device *dev) |
| 54 | { |
| 55 | /* |
| 56 | * Since mmc_host is a virtual device, we don't have to do anything. |
| 57 | * If we return a positive value, the pm framework will consider that |
| 58 | * the runtime suspend and system suspend of this device is same and |
| 59 | * will set direct_complete flag as true. We don't want this as the |
| 60 | * mmc_host always has positive disable_depth and setting the flag |
| 61 | * will not speed up the suspend process. |
| 62 | * So return 0. |
| 63 | */ |
| 64 | return 0; |
| 65 | } |
| 66 | |
| 67 | static const struct dev_pm_ops mmc_pm_ops = { |
| 68 | .prepare = mmc_host_prepare, |
| 69 | }; |
| 70 | |
Pierre Ossman | b93931a | 2007-05-19 14:06:24 +0200 | [diff] [blame] | 71 | static struct class mmc_host_class = { |
| 72 | .name = "mmc_host", |
| 73 | .dev_release = mmc_host_classdev_release, |
Vijay Viswanath | 76f628f | 2017-12-21 15:33:33 +0530 | [diff] [blame] | 74 | .pm = &mmc_pm_ops, |
Pierre Ossman | b93931a | 2007-05-19 14:06:24 +0200 | [diff] [blame] | 75 | }; |
| 76 | |
| 77 | int mmc_register_host_class(void) |
| 78 | { |
| 79 | return class_register(&mmc_host_class); |
| 80 | } |
| 81 | |
| 82 | void mmc_unregister_host_class(void) |
| 83 | { |
| 84 | class_unregister(&mmc_host_class); |
| 85 | } |
| 86 | |
Subhash Jadavani | dff3126 | 2016-04-19 16:21:16 -0700 | [diff] [blame] | 87 | #ifdef CONFIG_MMC_CLKGATE |
| 88 | static ssize_t clkgate_delay_show(struct device *dev, |
| 89 | struct device_attribute *attr, char *buf) |
| 90 | { |
| 91 | struct mmc_host *host = cls_dev_to_mmc_host(dev); |
| 92 | return snprintf(buf, PAGE_SIZE, "%lu\n", host->clkgate_delay); |
| 93 | } |
| 94 | |
| 95 | static ssize_t clkgate_delay_store(struct device *dev, |
| 96 | struct device_attribute *attr, const char *buf, size_t count) |
| 97 | { |
| 98 | struct mmc_host *host = cls_dev_to_mmc_host(dev); |
| 99 | unsigned long flags, value; |
| 100 | |
| 101 | if (kstrtoul(buf, 0, &value)) |
| 102 | return -EINVAL; |
| 103 | |
| 104 | spin_lock_irqsave(&host->clk_lock, flags); |
| 105 | host->clkgate_delay = value; |
| 106 | spin_unlock_irqrestore(&host->clk_lock, flags); |
| 107 | return count; |
| 108 | } |
| 109 | |
| 110 | /* |
| 111 | * Enabling clock gating will make the core call out to the host |
| 112 | * once up and once down when it performs a request or card operation |
| 113 | * intermingled in any fashion. The driver will see this through |
| 114 | * set_ios() operations with ios.clock field set to 0 to gate (disable) |
| 115 | * the block clock, and to the old frequency to enable it again. |
| 116 | */ |
| 117 | static void mmc_host_clk_gate_delayed(struct mmc_host *host) |
| 118 | { |
| 119 | unsigned long tick_ns; |
| 120 | unsigned long freq = host->ios.clock; |
| 121 | unsigned long flags; |
| 122 | |
| 123 | if (!freq) { |
| 124 | pr_debug("%s: frequency set to 0 in disable function, " |
| 125 | "this means the clock is already disabled.\n", |
| 126 | mmc_hostname(host)); |
| 127 | return; |
| 128 | } |
| 129 | /* |
| 130 | * New requests may have appeared while we were scheduling, |
| 131 | * then there is no reason to delay the check before |
| 132 | * clk_disable(). |
| 133 | */ |
| 134 | spin_lock_irqsave(&host->clk_lock, flags); |
| 135 | |
| 136 | /* |
| 137 | * Delay n bus cycles (at least 8 from MMC spec) before attempting |
| 138 | * to disable the MCI block clock. The reference count may have |
| 139 | * gone up again after this delay due to rescheduling! |
| 140 | */ |
| 141 | if (!host->clk_requests) { |
| 142 | spin_unlock_irqrestore(&host->clk_lock, flags); |
| 143 | tick_ns = DIV_ROUND_UP(1000000000, freq); |
| 144 | ndelay(host->clk_delay * tick_ns); |
| 145 | } else { |
| 146 | /* New users appeared while waiting for this work */ |
| 147 | spin_unlock_irqrestore(&host->clk_lock, flags); |
| 148 | return; |
| 149 | } |
| 150 | mutex_lock(&host->clk_gate_mutex); |
| 151 | spin_lock_irqsave(&host->clk_lock, flags); |
| 152 | if (!host->clk_requests) { |
| 153 | spin_unlock_irqrestore(&host->clk_lock, flags); |
| 154 | /* This will set host->ios.clock to 0 */ |
| 155 | mmc_gate_clock(host); |
| 156 | spin_lock_irqsave(&host->clk_lock, flags); |
| 157 | pr_debug("%s: gated MCI clock\n", mmc_hostname(host)); |
Veerabhadrarao Badiganti | dddcf30 | 2018-10-17 18:32:07 +0530 | [diff] [blame] | 158 | MMC_TRACE(host, "clocks are gated\n"); |
Subhash Jadavani | dff3126 | 2016-04-19 16:21:16 -0700 | [diff] [blame] | 159 | } |
| 160 | spin_unlock_irqrestore(&host->clk_lock, flags); |
| 161 | mutex_unlock(&host->clk_gate_mutex); |
| 162 | } |
| 163 | |
| 164 | /* |
| 165 | * Internal work. Work to disable the clock at some later point. |
| 166 | */ |
| 167 | static void mmc_host_clk_gate_work(struct work_struct *work) |
| 168 | { |
| 169 | struct mmc_host *host = container_of(work, struct mmc_host, |
| 170 | clk_gate_work.work); |
| 171 | |
| 172 | mmc_host_clk_gate_delayed(host); |
| 173 | } |
| 174 | |
| 175 | /** |
| 176 | * mmc_host_clk_hold - ungate hardware MCI clocks |
| 177 | * @host: host to ungate. |
| 178 | * |
| 179 | * Makes sure the host ios.clock is restored to a non-zero value |
| 180 | * past this call. Increase clock reference count and ungate clock |
| 181 | * if we're the first user. |
| 182 | */ |
| 183 | void mmc_host_clk_hold(struct mmc_host *host) |
| 184 | { |
| 185 | unsigned long flags; |
| 186 | |
| 187 | /* cancel any clock gating work scheduled by mmc_host_clk_release() */ |
| 188 | cancel_delayed_work_sync(&host->clk_gate_work); |
| 189 | mutex_lock(&host->clk_gate_mutex); |
| 190 | spin_lock_irqsave(&host->clk_lock, flags); |
| 191 | if (host->clk_gated) { |
| 192 | spin_unlock_irqrestore(&host->clk_lock, flags); |
| 193 | mmc_ungate_clock(host); |
Sujit Reddy Thumma | cb18d85 | 2014-12-04 09:57:23 +0200 | [diff] [blame] | 194 | |
Subhash Jadavani | dff3126 | 2016-04-19 16:21:16 -0700 | [diff] [blame] | 195 | spin_lock_irqsave(&host->clk_lock, flags); |
| 196 | pr_debug("%s: ungated MCI clock\n", mmc_hostname(host)); |
Veerabhadrarao Badiganti | dddcf30 | 2018-10-17 18:32:07 +0530 | [diff] [blame] | 197 | MMC_TRACE(host, "clocks are ungated\n"); |
Subhash Jadavani | dff3126 | 2016-04-19 16:21:16 -0700 | [diff] [blame] | 198 | } |
| 199 | host->clk_requests++; |
| 200 | spin_unlock_irqrestore(&host->clk_lock, flags); |
| 201 | mutex_unlock(&host->clk_gate_mutex); |
| 202 | } |
| 203 | |
| 204 | /** |
| 205 | * mmc_host_may_gate_card - check if this card may be gated |
| 206 | * @card: card to check. |
| 207 | */ |
Subhash Jadavani | ab8c3ff | 2014-12-03 11:12:14 +0200 | [diff] [blame] | 208 | bool mmc_host_may_gate_card(struct mmc_card *card) |
Subhash Jadavani | dff3126 | 2016-04-19 16:21:16 -0700 | [diff] [blame] | 209 | { |
| 210 | /* If there is no card we may gate it */ |
| 211 | if (!card) |
| 212 | return true; |
Subhash Jadavani | 8285766 | 2014-12-04 21:42:45 +0200 | [diff] [blame] | 213 | |
| 214 | /* |
| 215 | * SDIO3.0 card allows the clock to be gated off so check if |
| 216 | * that is the case or not. |
| 217 | */ |
Ritesh Harjani | 5972a16 | 2015-11-06 01:13:49 +0530 | [diff] [blame] | 218 | if (mmc_card_sdio(card) && card->cccr.async_intr_sup) |
Pavan Anamula | 45ef137 | 2015-10-29 23:22:12 +0530 | [diff] [blame] | 219 | return true; |
Subhash Jadavani | 8285766 | 2014-12-04 21:42:45 +0200 | [diff] [blame] | 220 | |
Subhash Jadavani | dff3126 | 2016-04-19 16:21:16 -0700 | [diff] [blame] | 221 | /* |
| 222 | * Don't gate SDIO cards! These need to be clocked at all times |
| 223 | * since they may be independent systems generating interrupts |
| 224 | * and other events. The clock requests counter from the core will |
| 225 | * go down to zero since the core does not need it, but we will not |
| 226 | * gate the clock, because there is somebody out there that may still |
| 227 | * be using it. |
| 228 | */ |
| 229 | return !(card->quirks & MMC_QUIRK_BROKEN_CLK_GATING); |
| 230 | } |
| 231 | |
| 232 | /** |
| 233 | * mmc_host_clk_release - gate off hardware MCI clocks |
| 234 | * @host: host to gate. |
| 235 | * |
| 236 | * Calls the host driver with ios.clock set to zero as often as possible |
| 237 | * in order to gate off hardware MCI clocks. Decrease clock reference |
| 238 | * count and schedule disabling of clock. |
| 239 | */ |
| 240 | void mmc_host_clk_release(struct mmc_host *host) |
| 241 | { |
| 242 | unsigned long flags; |
| 243 | |
| 244 | spin_lock_irqsave(&host->clk_lock, flags); |
| 245 | host->clk_requests--; |
| 246 | if (mmc_host_may_gate_card(host->card) && |
| 247 | !host->clk_requests) |
Can Guo | 29db071 | 2017-05-18 13:26:48 +0800 | [diff] [blame] | 248 | queue_delayed_work(host->clk_gate_wq, &host->clk_gate_work, |
Subhash Jadavani | dff3126 | 2016-04-19 16:21:16 -0700 | [diff] [blame] | 249 | msecs_to_jiffies(host->clkgate_delay)); |
| 250 | spin_unlock_irqrestore(&host->clk_lock, flags); |
| 251 | } |
| 252 | |
| 253 | /** |
| 254 | * mmc_host_clk_rate - get current clock frequency setting |
| 255 | * @host: host to get the clock frequency for. |
| 256 | * |
| 257 | * Returns current clock frequency regardless of gating. |
| 258 | */ |
| 259 | unsigned int mmc_host_clk_rate(struct mmc_host *host) |
| 260 | { |
| 261 | unsigned long freq; |
| 262 | unsigned long flags; |
| 263 | |
| 264 | spin_lock_irqsave(&host->clk_lock, flags); |
| 265 | if (host->clk_gated) |
| 266 | freq = host->clk_old; |
| 267 | else |
| 268 | freq = host->ios.clock; |
| 269 | spin_unlock_irqrestore(&host->clk_lock, flags); |
| 270 | return freq; |
| 271 | } |
| 272 | |
| 273 | /** |
| 274 | * mmc_host_clk_init - set up clock gating code |
| 275 | * @host: host with potential clock to control |
| 276 | */ |
| 277 | static inline void mmc_host_clk_init(struct mmc_host *host) |
| 278 | { |
| 279 | host->clk_requests = 0; |
| 280 | /* Hold MCI clock for 8 cycles by default */ |
| 281 | host->clk_delay = 8; |
| 282 | /* |
| 283 | * Default clock gating delay is 0ms to avoid wasting power. |
| 284 | * This value can be tuned by writing into sysfs entry. |
| 285 | */ |
| 286 | host->clkgate_delay = 0; |
| 287 | host->clk_gated = false; |
| 288 | INIT_DELAYED_WORK(&host->clk_gate_work, mmc_host_clk_gate_work); |
| 289 | spin_lock_init(&host->clk_lock); |
| 290 | mutex_init(&host->clk_gate_mutex); |
| 291 | } |
| 292 | |
| 293 | /** |
| 294 | * mmc_host_clk_exit - shut down clock gating code |
| 295 | * @host: host with potential clock to control |
| 296 | */ |
| 297 | static inline void mmc_host_clk_exit(struct mmc_host *host) |
| 298 | { |
| 299 | /* |
| 300 | * Wait for any outstanding gate and then make sure we're |
| 301 | * ungated before exiting. |
| 302 | */ |
| 303 | if (cancel_delayed_work_sync(&host->clk_gate_work)) |
| 304 | mmc_host_clk_gate_delayed(host); |
| 305 | if (host->clk_gated) |
| 306 | mmc_host_clk_hold(host); |
Can Guo | 29db071 | 2017-05-18 13:26:48 +0800 | [diff] [blame] | 307 | if (host->clk_gate_wq) |
| 308 | destroy_workqueue(host->clk_gate_wq); |
Subhash Jadavani | dff3126 | 2016-04-19 16:21:16 -0700 | [diff] [blame] | 309 | /* There should be only one user now */ |
| 310 | WARN_ON(host->clk_requests > 1); |
| 311 | } |
| 312 | |
| 313 | static inline void mmc_host_clk_sysfs_init(struct mmc_host *host) |
| 314 | { |
| 315 | host->clkgate_delay_attr.show = clkgate_delay_show; |
| 316 | host->clkgate_delay_attr.store = clkgate_delay_store; |
| 317 | sysfs_attr_init(&host->clkgate_delay_attr.attr); |
| 318 | host->clkgate_delay_attr.attr.name = "clkgate_delay"; |
| 319 | host->clkgate_delay_attr.attr.mode = S_IRUGO | S_IWUSR; |
| 320 | if (device_create_file(&host->class_dev, &host->clkgate_delay_attr)) |
| 321 | pr_err("%s: Failed to create clkgate_delay sysfs entry\n", |
| 322 | mmc_hostname(host)); |
| 323 | } |
Can Guo | 29db071 | 2017-05-18 13:26:48 +0800 | [diff] [blame] | 324 | |
| 325 | static inline bool mmc_host_clk_gate_wq_init(struct mmc_host *host) |
| 326 | { |
| 327 | char *wq = NULL; |
| 328 | int wq_nl; |
| 329 | bool ret = true; |
| 330 | |
| 331 | wq_nl = sizeof("mmc_clk_gate/") + sizeof(mmc_hostname(host)) + 1; |
| 332 | |
| 333 | wq = kzalloc(wq_nl, GFP_KERNEL); |
| 334 | if (!wq) { |
| 335 | ret = false; |
| 336 | goto out; |
| 337 | } |
| 338 | |
| 339 | snprintf(wq, wq_nl, "mmc_clk_gate/%s", mmc_hostname(host)); |
| 340 | |
| 341 | /* |
| 342 | * Create a work queue with flag WQ_MEM_RECLAIM set for |
| 343 | * mmc clock gate work. Because mmc thread is created with |
| 344 | * flag PF_MEMALLOC set, kernel will check for work queue |
| 345 | * flag WQ_MEM_RECLAIM when flush the work queue. If work |
| 346 | * queue flag WQ_MEM_RECLAIM is not set, kernel warning |
| 347 | * will be triggered. |
| 348 | */ |
| 349 | host->clk_gate_wq = create_workqueue(wq); |
| 350 | if (!host->clk_gate_wq) { |
| 351 | ret = false; |
| 352 | dev_err(host->parent, |
| 353 | "failed to create clock gate work queue\n"); |
| 354 | } |
| 355 | |
| 356 | kfree(wq); |
| 357 | out: |
| 358 | return ret; |
| 359 | } |
Subhash Jadavani | dff3126 | 2016-04-19 16:21:16 -0700 | [diff] [blame] | 360 | #else |
| 361 | |
| 362 | static inline void mmc_host_clk_init(struct mmc_host *host) |
| 363 | { |
| 364 | } |
| 365 | |
| 366 | static inline void mmc_host_clk_exit(struct mmc_host *host) |
| 367 | { |
| 368 | } |
| 369 | |
| 370 | static inline void mmc_host_clk_sysfs_init(struct mmc_host *host) |
| 371 | { |
| 372 | } |
| 373 | |
Sahitya Tummala | 043744a | 2013-06-24 09:55:33 +0530 | [diff] [blame] | 374 | bool mmc_host_may_gate_card(struct mmc_card *card) |
| 375 | { |
| 376 | return false; |
| 377 | } |
Can Guo | 29db071 | 2017-05-18 13:26:48 +0800 | [diff] [blame] | 378 | |
| 379 | static inline bool mmc_host_clk_gate_wq_init(struct mmc_host *host) |
| 380 | { |
| 381 | return true; |
| 382 | } |
Subhash Jadavani | dff3126 | 2016-04-19 16:21:16 -0700 | [diff] [blame] | 383 | #endif |
| 384 | |
Adrian Hunter | dfa13eb | 2015-05-07 13:10:12 +0300 | [diff] [blame] | 385 | void mmc_retune_enable(struct mmc_host *host) |
| 386 | { |
| 387 | host->can_retune = 1; |
| 388 | if (host->retune_period) |
| 389 | mod_timer(&host->retune_timer, |
| 390 | jiffies + host->retune_period * HZ); |
| 391 | } |
Veerabhadrarao Badiganti | 773bf34 | 2017-06-08 23:04:42 +0530 | [diff] [blame] | 392 | EXPORT_SYMBOL(mmc_retune_enable); |
Adrian Hunter | dfa13eb | 2015-05-07 13:10:12 +0300 | [diff] [blame] | 393 | |
Adrian Hunter | 7ff2760 | 2016-05-16 15:35:24 +0300 | [diff] [blame] | 394 | /* |
| 395 | * Pause re-tuning for a small set of operations. The pause begins after the |
| 396 | * next command and after first doing re-tuning. |
| 397 | */ |
| 398 | void mmc_retune_pause(struct mmc_host *host) |
| 399 | { |
| 400 | if (!host->retune_paused) { |
| 401 | host->retune_paused = 1; |
| 402 | mmc_retune_needed(host); |
| 403 | mmc_retune_hold(host); |
| 404 | } |
| 405 | } |
| 406 | EXPORT_SYMBOL(mmc_retune_pause); |
| 407 | |
| 408 | void mmc_retune_unpause(struct mmc_host *host) |
| 409 | { |
| 410 | if (host->retune_paused) { |
| 411 | host->retune_paused = 0; |
| 412 | mmc_retune_release(host); |
| 413 | } |
| 414 | } |
| 415 | EXPORT_SYMBOL(mmc_retune_unpause); |
| 416 | |
Adrian Hunter | dfa13eb | 2015-05-07 13:10:12 +0300 | [diff] [blame] | 417 | void mmc_retune_disable(struct mmc_host *host) |
| 418 | { |
Adrian Hunter | 7ff2760 | 2016-05-16 15:35:24 +0300 | [diff] [blame] | 419 | mmc_retune_unpause(host); |
Adrian Hunter | dfa13eb | 2015-05-07 13:10:12 +0300 | [diff] [blame] | 420 | host->can_retune = 0; |
| 421 | del_timer_sync(&host->retune_timer); |
| 422 | host->retune_now = 0; |
| 423 | host->need_retune = 0; |
| 424 | } |
Veerabhadrarao Badiganti | 773bf34 | 2017-06-08 23:04:42 +0530 | [diff] [blame] | 425 | EXPORT_SYMBOL(mmc_retune_disable); |
Adrian Hunter | dfa13eb | 2015-05-07 13:10:12 +0300 | [diff] [blame] | 426 | |
| 427 | void mmc_retune_timer_stop(struct mmc_host *host) |
| 428 | { |
| 429 | del_timer_sync(&host->retune_timer); |
| 430 | } |
| 431 | EXPORT_SYMBOL(mmc_retune_timer_stop); |
| 432 | |
| 433 | void mmc_retune_hold(struct mmc_host *host) |
| 434 | { |
| 435 | if (!host->hold_retune) |
| 436 | host->retune_now = 1; |
| 437 | host->hold_retune += 1; |
| 438 | } |
| 439 | |
| 440 | void mmc_retune_release(struct mmc_host *host) |
| 441 | { |
| 442 | if (host->hold_retune) |
| 443 | host->hold_retune -= 1; |
| 444 | else |
| 445 | WARN_ON(1); |
| 446 | } |
| 447 | |
| 448 | int mmc_retune(struct mmc_host *host) |
| 449 | { |
Adrian Hunter | 6376f69 | 2015-05-07 13:10:20 +0300 | [diff] [blame] | 450 | bool return_to_hs400 = false; |
Adrian Hunter | dfa13eb | 2015-05-07 13:10:12 +0300 | [diff] [blame] | 451 | int err; |
| 452 | |
| 453 | if (host->retune_now) |
| 454 | host->retune_now = 0; |
| 455 | else |
| 456 | return 0; |
| 457 | |
Vijay Viswanath | 778491f | 2017-11-08 14:09:01 +0530 | [diff] [blame] | 458 | if (!host->need_retune || host->doing_retune || !host->card || |
| 459 | mmc_card_hs400es(host->card)) |
Adrian Hunter | dfa13eb | 2015-05-07 13:10:12 +0300 | [diff] [blame] | 460 | return 0; |
| 461 | |
| 462 | host->need_retune = 0; |
| 463 | |
| 464 | host->doing_retune = 1; |
| 465 | |
Adrian Hunter | 6376f69 | 2015-05-07 13:10:20 +0300 | [diff] [blame] | 466 | if (host->ios.timing == MMC_TIMING_MMC_HS400) { |
| 467 | err = mmc_hs400_to_hs200(host->card); |
| 468 | if (err) |
| 469 | goto out; |
Adrian Hunter | dfa13eb | 2015-05-07 13:10:12 +0300 | [diff] [blame] | 470 | |
Adrian Hunter | 6376f69 | 2015-05-07 13:10:20 +0300 | [diff] [blame] | 471 | return_to_hs400 = true; |
| 472 | |
| 473 | if (host->ops->prepare_hs400_tuning) |
| 474 | host->ops->prepare_hs400_tuning(host, &host->ios); |
| 475 | } |
| 476 | |
| 477 | err = mmc_execute_tuning(host->card); |
| 478 | if (err) |
| 479 | goto out; |
| 480 | |
| 481 | if (return_to_hs400) |
| 482 | err = mmc_hs200_to_hs400(host->card); |
| 483 | out: |
Adrian Hunter | dfa13eb | 2015-05-07 13:10:12 +0300 | [diff] [blame] | 484 | host->doing_retune = 0; |
| 485 | |
| 486 | return err; |
| 487 | } |
| 488 | |
| 489 | static void mmc_retune_timer(unsigned long data) |
| 490 | { |
| 491 | struct mmc_host *host = (struct mmc_host *)data; |
| 492 | |
| 493 | mmc_retune_needed(host); |
| 494 | } |
| 495 | |
Pierre Ossman | b93931a | 2007-05-19 14:06:24 +0200 | [diff] [blame] | 496 | /** |
Guennadi Liakhovetski | 6c56e7a | 2013-02-16 16:21:16 +0100 | [diff] [blame] | 497 | * mmc_of_parse() - parse host's device-tree node |
| 498 | * @host: host whose node should be parsed. |
| 499 | * |
| 500 | * To keep the rest of the MMC subsystem unaware of whether DT has been |
| 501 | * used to to instantiate and configure this host instance or not, we |
| 502 | * parse the properties and set respective generic mmc-host flags and |
| 503 | * parameters. |
| 504 | */ |
Simon Baatz | ec0a751 | 2013-06-09 22:14:11 +0200 | [diff] [blame] | 505 | int mmc_of_parse(struct mmc_host *host) |
Guennadi Liakhovetski | 6c56e7a | 2013-02-16 16:21:16 +0100 | [diff] [blame] | 506 | { |
David Woods | e92add2 | 2017-05-26 17:53:21 -0400 | [diff] [blame] | 507 | struct device *dev = host->parent; |
Guennadi Liakhovetski | 6c56e7a | 2013-02-16 16:21:16 +0100 | [diff] [blame] | 508 | u32 bus_width; |
Sergei Shtylyov | 90614cd | 2015-08-07 01:06:48 +0300 | [diff] [blame] | 509 | int ret; |
Kristina Martsenko | a31b0c6 | 2014-11-05 02:22:41 +0200 | [diff] [blame] | 510 | bool cd_cap_invert, cd_gpio_invert = false; |
| 511 | bool ro_cap_invert, ro_gpio_invert = false; |
Guennadi Liakhovetski | 6c56e7a | 2013-02-16 16:21:16 +0100 | [diff] [blame] | 512 | |
David Woods | e92add2 | 2017-05-26 17:53:21 -0400 | [diff] [blame] | 513 | if (!dev || !dev_fwnode(dev)) |
Simon Baatz | ec0a751 | 2013-06-09 22:14:11 +0200 | [diff] [blame] | 514 | return 0; |
Guennadi Liakhovetski | 6c56e7a | 2013-02-16 16:21:16 +0100 | [diff] [blame] | 515 | |
Guennadi Liakhovetski | 6c56e7a | 2013-02-16 16:21:16 +0100 | [diff] [blame] | 516 | /* "bus-width" is translated to MMC_CAP_*_BIT_DATA flags */ |
David Woods | e92add2 | 2017-05-26 17:53:21 -0400 | [diff] [blame] | 517 | if (device_property_read_u32(dev, "bus-width", &bus_width) < 0) { |
Guennadi Liakhovetski | 6c56e7a | 2013-02-16 16:21:16 +0100 | [diff] [blame] | 518 | dev_dbg(host->parent, |
| 519 | "\"bus-width\" property is missing, assuming 1 bit.\n"); |
| 520 | bus_width = 1; |
| 521 | } |
| 522 | |
| 523 | switch (bus_width) { |
| 524 | case 8: |
| 525 | host->caps |= MMC_CAP_8_BIT_DATA; |
| 526 | /* Hosts capable of 8-bit transfers can also do 4 bits */ |
| 527 | case 4: |
| 528 | host->caps |= MMC_CAP_4_BIT_DATA; |
| 529 | break; |
| 530 | case 1: |
| 531 | break; |
| 532 | default: |
| 533 | dev_err(host->parent, |
Alexander Shiyan | 1c279f4 | 2014-02-15 15:40:58 +0400 | [diff] [blame] | 534 | "Invalid \"bus-width\" value %u!\n", bus_width); |
Simon Baatz | ec0a751 | 2013-06-09 22:14:11 +0200 | [diff] [blame] | 535 | return -EINVAL; |
Guennadi Liakhovetski | 6c56e7a | 2013-02-16 16:21:16 +0100 | [diff] [blame] | 536 | } |
| 537 | |
| 538 | /* f_max is obtained from the optional "max-frequency" property */ |
David Woods | e92add2 | 2017-05-26 17:53:21 -0400 | [diff] [blame] | 539 | device_property_read_u32(dev, "max-frequency", &host->f_max); |
Guennadi Liakhovetski | 6c56e7a | 2013-02-16 16:21:16 +0100 | [diff] [blame] | 540 | |
| 541 | /* |
| 542 | * Configure CD and WP pins. They are both by default active low to |
| 543 | * match the SDHCI spec. If GPIOs are provided for CD and / or WP, the |
| 544 | * mmc-gpio helpers are used to attach, configure and use them. If |
| 545 | * polarity inversion is specified in DT, one of MMC_CAP2_CD_ACTIVE_HIGH |
| 546 | * and MMC_CAP2_RO_ACTIVE_HIGH capability-2 flags is set. If the |
| 547 | * "broken-cd" property is provided, the MMC_CAP_NEEDS_POLL capability |
| 548 | * is set. If the "non-removable" property is found, the |
| 549 | * MMC_CAP_NONREMOVABLE capability is set and no card-detection |
| 550 | * configuration is performed. |
| 551 | */ |
| 552 | |
| 553 | /* Parse Card Detection */ |
David Woods | e92add2 | 2017-05-26 17:53:21 -0400 | [diff] [blame] | 554 | if (device_property_read_bool(dev, "non-removable")) { |
Guennadi Liakhovetski | 6c56e7a | 2013-02-16 16:21:16 +0100 | [diff] [blame] | 555 | host->caps |= MMC_CAP_NONREMOVABLE; |
| 556 | } else { |
David Woods | e92add2 | 2017-05-26 17:53:21 -0400 | [diff] [blame] | 557 | cd_cap_invert = device_property_read_bool(dev, "cd-inverted"); |
Guennadi Liakhovetski | 6c56e7a | 2013-02-16 16:21:16 +0100 | [diff] [blame] | 558 | |
David Woods | e92add2 | 2017-05-26 17:53:21 -0400 | [diff] [blame] | 559 | if (device_property_read_bool(dev, "broken-cd")) |
Guennadi Liakhovetski | 6c56e7a | 2013-02-16 16:21:16 +0100 | [diff] [blame] | 560 | host->caps |= MMC_CAP_NEEDS_POLL; |
| 561 | |
Linus Walleij | 89168b4 | 2014-10-02 09:08:46 +0200 | [diff] [blame] | 562 | ret = mmc_gpiod_request_cd(host, "cd", 0, true, |
Kristina Martsenko | a31b0c6 | 2014-11-05 02:22:41 +0200 | [diff] [blame] | 563 | 0, &cd_gpio_invert); |
Ulf Hansson | 9116752 | 2014-12-18 10:41:44 +0100 | [diff] [blame] | 564 | if (!ret) |
Linus Walleij | 98e90de | 2014-08-27 13:00:52 +0200 | [diff] [blame] | 565 | dev_info(host->parent, "Got CD GPIO\n"); |
Ulf Hansson | 43934ec | 2015-09-14 12:18:55 +0200 | [diff] [blame] | 566 | else if (ret != -ENOENT && ret != -ENOSYS) |
Ulf Hansson | 9116752 | 2014-12-18 10:41:44 +0100 | [diff] [blame] | 567 | return ret; |
Linus Walleij | 89168b4 | 2014-10-02 09:08:46 +0200 | [diff] [blame] | 568 | |
| 569 | /* |
| 570 | * There are two ways to flag that the CD line is inverted: |
| 571 | * through the cd-inverted flag and by the GPIO line itself |
| 572 | * being inverted from the GPIO subsystem. This is a leftover |
| 573 | * from the times when the GPIO subsystem did not make it |
| 574 | * possible to flag a line as inverted. |
| 575 | * |
| 576 | * If the capability on the host AND the GPIO line are |
| 577 | * both inverted, the end result is that the CD line is |
| 578 | * not inverted. |
| 579 | */ |
Kristina Martsenko | a31b0c6 | 2014-11-05 02:22:41 +0200 | [diff] [blame] | 580 | if (cd_cap_invert ^ cd_gpio_invert) |
Linus Walleij | 89168b4 | 2014-10-02 09:08:46 +0200 | [diff] [blame] | 581 | host->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH; |
Guennadi Liakhovetski | 6c56e7a | 2013-02-16 16:21:16 +0100 | [diff] [blame] | 582 | } |
| 583 | |
| 584 | /* Parse Write Protection */ |
David Woods | e92add2 | 2017-05-26 17:53:21 -0400 | [diff] [blame] | 585 | ro_cap_invert = device_property_read_bool(dev, "wp-inverted"); |
Guennadi Liakhovetski | 2fdb6e2 | 2013-02-15 16:14:01 +0100 | [diff] [blame] | 586 | |
Kristina Martsenko | a31b0c6 | 2014-11-05 02:22:41 +0200 | [diff] [blame] | 587 | ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &ro_gpio_invert); |
Ulf Hansson | 9116752 | 2014-12-18 10:41:44 +0100 | [diff] [blame] | 588 | if (!ret) |
Linus Walleij | 98e90de | 2014-08-27 13:00:52 +0200 | [diff] [blame] | 589 | dev_info(host->parent, "Got WP GPIO\n"); |
Ulf Hansson | 43934ec | 2015-09-14 12:18:55 +0200 | [diff] [blame] | 590 | else if (ret != -ENOENT && ret != -ENOSYS) |
Ulf Hansson | 9116752 | 2014-12-18 10:41:44 +0100 | [diff] [blame] | 591 | return ret; |
Linus Walleij | 98e90de | 2014-08-27 13:00:52 +0200 | [diff] [blame] | 592 | |
David Woods | e92add2 | 2017-05-26 17:53:21 -0400 | [diff] [blame] | 593 | if (device_property_read_bool(dev, "disable-wp")) |
Lars-Peter Clausen | 19f4424 | 2015-05-06 20:31:20 +0200 | [diff] [blame] | 594 | host->caps2 |= MMC_CAP2_NO_WRITE_PROTECT; |
| 595 | |
Linus Walleij | 89168b4 | 2014-10-02 09:08:46 +0200 | [diff] [blame] | 596 | /* See the comment on CD inversion above */ |
Kristina Martsenko | a31b0c6 | 2014-11-05 02:22:41 +0200 | [diff] [blame] | 597 | if (ro_cap_invert ^ ro_gpio_invert) |
Linus Walleij | 89168b4 | 2014-10-02 09:08:46 +0200 | [diff] [blame] | 598 | host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; |
| 599 | |
David Woods | e92add2 | 2017-05-26 17:53:21 -0400 | [diff] [blame] | 600 | if (device_property_read_bool(dev, "cap-sd-highspeed")) |
Guennadi Liakhovetski | 2fdb6e2 | 2013-02-15 16:14:01 +0100 | [diff] [blame] | 601 | host->caps |= MMC_CAP_SD_HIGHSPEED; |
David Woods | e92add2 | 2017-05-26 17:53:21 -0400 | [diff] [blame] | 602 | if (device_property_read_bool(dev, "cap-mmc-highspeed")) |
Guennadi Liakhovetski | 2fdb6e2 | 2013-02-15 16:14:01 +0100 | [diff] [blame] | 603 | host->caps |= MMC_CAP_MMC_HIGHSPEED; |
David Woods | e92add2 | 2017-05-26 17:53:21 -0400 | [diff] [blame] | 604 | if (device_property_read_bool(dev, "sd-uhs-sdr12")) |
Ulf Hansson | b66bd0e | 2014-02-14 13:27:07 +0100 | [diff] [blame] | 605 | host->caps |= MMC_CAP_UHS_SDR12; |
David Woods | e92add2 | 2017-05-26 17:53:21 -0400 | [diff] [blame] | 606 | if (device_property_read_bool(dev, "sd-uhs-sdr25")) |
Ulf Hansson | b66bd0e | 2014-02-14 13:27:07 +0100 | [diff] [blame] | 607 | host->caps |= MMC_CAP_UHS_SDR25; |
David Woods | e92add2 | 2017-05-26 17:53:21 -0400 | [diff] [blame] | 608 | if (device_property_read_bool(dev, "sd-uhs-sdr50")) |
Ulf Hansson | b66bd0e | 2014-02-14 13:27:07 +0100 | [diff] [blame] | 609 | host->caps |= MMC_CAP_UHS_SDR50; |
David Woods | e92add2 | 2017-05-26 17:53:21 -0400 | [diff] [blame] | 610 | if (device_property_read_bool(dev, "sd-uhs-sdr104")) |
Ulf Hansson | b66bd0e | 2014-02-14 13:27:07 +0100 | [diff] [blame] | 611 | host->caps |= MMC_CAP_UHS_SDR104; |
David Woods | e92add2 | 2017-05-26 17:53:21 -0400 | [diff] [blame] | 612 | if (device_property_read_bool(dev, "sd-uhs-ddr50")) |
Ulf Hansson | b66bd0e | 2014-02-14 13:27:07 +0100 | [diff] [blame] | 613 | host->caps |= MMC_CAP_UHS_DDR50; |
David Woods | e92add2 | 2017-05-26 17:53:21 -0400 | [diff] [blame] | 614 | if (device_property_read_bool(dev, "cap-power-off-card")) |
Guennadi Liakhovetski | 2fdb6e2 | 2013-02-15 16:14:01 +0100 | [diff] [blame] | 615 | host->caps |= MMC_CAP_POWER_OFF_CARD; |
David Woods | e92add2 | 2017-05-26 17:53:21 -0400 | [diff] [blame] | 616 | if (device_property_read_bool(dev, "cap-mmc-hw-reset")) |
Chaotian Jing | 794f157 | 2015-10-27 14:24:21 +0800 | [diff] [blame] | 617 | host->caps |= MMC_CAP_HW_RESET; |
David Woods | e92add2 | 2017-05-26 17:53:21 -0400 | [diff] [blame] | 618 | if (device_property_read_bool(dev, "cap-sdio-irq")) |
Guennadi Liakhovetski | 2fdb6e2 | 2013-02-15 16:14:01 +0100 | [diff] [blame] | 619 | host->caps |= MMC_CAP_SDIO_IRQ; |
David Woods | e92add2 | 2017-05-26 17:53:21 -0400 | [diff] [blame] | 620 | if (device_property_read_bool(dev, "full-pwr-cycle")) |
Ulf Hansson | 5a36d6b | 2013-06-10 17:03:47 +0200 | [diff] [blame] | 621 | host->caps2 |= MMC_CAP2_FULL_PWR_CYCLE; |
David Woods | e92add2 | 2017-05-26 17:53:21 -0400 | [diff] [blame] | 622 | if (device_property_read_bool(dev, "keep-power-in-suspend")) |
Guennadi Liakhovetski | 2fdb6e2 | 2013-02-15 16:14:01 +0100 | [diff] [blame] | 623 | host->pm_caps |= MMC_PM_KEEP_POWER; |
David Woods | e92add2 | 2017-05-26 17:53:21 -0400 | [diff] [blame] | 624 | if (device_property_read_bool(dev, "wakeup-source") || |
| 625 | device_property_read_bool(dev, "enable-sdio-wakeup")) /* legacy */ |
Guennadi Liakhovetski | 2fdb6e2 | 2013-02-15 16:14:01 +0100 | [diff] [blame] | 626 | host->pm_caps |= MMC_PM_WAKE_SDIO_IRQ; |
David Woods | e92add2 | 2017-05-26 17:53:21 -0400 | [diff] [blame] | 627 | if (device_property_read_bool(dev, "mmc-ddr-1_8v")) |
Ulf Hansson | c0baf84 | 2014-02-14 13:27:08 +0100 | [diff] [blame] | 628 | host->caps |= MMC_CAP_1_8V_DDR; |
David Woods | e92add2 | 2017-05-26 17:53:21 -0400 | [diff] [blame] | 629 | if (device_property_read_bool(dev, "mmc-ddr-1_2v")) |
Ulf Hansson | c0baf84 | 2014-02-14 13:27:08 +0100 | [diff] [blame] | 630 | host->caps |= MMC_CAP_1_2V_DDR; |
David Woods | e92add2 | 2017-05-26 17:53:21 -0400 | [diff] [blame] | 631 | if (device_property_read_bool(dev, "mmc-hs200-1_8v")) |
Jaehoon Chung | 321bd41 | 2014-02-14 13:27:09 +0100 | [diff] [blame] | 632 | host->caps2 |= MMC_CAP2_HS200_1_8V_SDR; |
David Woods | e92add2 | 2017-05-26 17:53:21 -0400 | [diff] [blame] | 633 | if (device_property_read_bool(dev, "mmc-hs200-1_2v")) |
Jaehoon Chung | 321bd41 | 2014-02-14 13:27:09 +0100 | [diff] [blame] | 634 | host->caps2 |= MMC_CAP2_HS200_1_2V_SDR; |
David Woods | e92add2 | 2017-05-26 17:53:21 -0400 | [diff] [blame] | 635 | if (device_property_read_bool(dev, "mmc-hs400-1_8v")) |
Seungwon Jeon | c373eb4 | 2014-04-23 17:15:08 +0900 | [diff] [blame] | 636 | host->caps2 |= MMC_CAP2_HS400_1_8V | MMC_CAP2_HS200_1_8V_SDR; |
David Woods | e92add2 | 2017-05-26 17:53:21 -0400 | [diff] [blame] | 637 | if (device_property_read_bool(dev, "mmc-hs400-1_2v")) |
Seungwon Jeon | c373eb4 | 2014-04-23 17:15:08 +0900 | [diff] [blame] | 638 | host->caps2 |= MMC_CAP2_HS400_1_2V | MMC_CAP2_HS200_1_2V_SDR; |
David Woods | e92add2 | 2017-05-26 17:53:21 -0400 | [diff] [blame] | 639 | if (device_property_read_bool(dev, "mmc-hs400-enhanced-strobe")) |
Shawn Lin | ef29c0e | 2016-05-26 09:56:12 +0800 | [diff] [blame] | 640 | host->caps2 |= MMC_CAP2_HS400_ES; |
David Woods | e92add2 | 2017-05-26 17:53:21 -0400 | [diff] [blame] | 641 | if (device_property_read_bool(dev, "no-sdio")) |
Shawn Lin | 6ae3e53 | 2016-07-14 16:26:04 +0800 | [diff] [blame] | 642 | host->caps2 |= MMC_CAP2_NO_SDIO; |
David Woods | e92add2 | 2017-05-26 17:53:21 -0400 | [diff] [blame] | 643 | if (device_property_read_bool(dev, "no-sd")) |
Shawn Lin | 6ae3e53 | 2016-07-14 16:26:04 +0800 | [diff] [blame] | 644 | host->caps2 |= MMC_CAP2_NO_SD; |
David Woods | e92add2 | 2017-05-26 17:53:21 -0400 | [diff] [blame] | 645 | if (device_property_read_bool(dev, "no-mmc")) |
Shawn Lin | 6ae3e53 | 2016-07-14 16:26:04 +0800 | [diff] [blame] | 646 | host->caps2 |= MMC_CAP2_NO_MMC; |
Simon Baatz | ec0a751 | 2013-06-09 22:14:11 +0200 | [diff] [blame] | 647 | |
David Woods | e92add2 | 2017-05-26 17:53:21 -0400 | [diff] [blame] | 648 | host->dsr_req = !device_property_read_u32(dev, "dsr", &host->dsr); |
Sascha Hauer | 3d705d1 | 2014-08-19 10:45:51 +0200 | [diff] [blame] | 649 | if (host->dsr_req && (host->dsr & ~0xffff)) { |
| 650 | dev_err(host->parent, |
| 651 | "device tree specified broken value for DSR: 0x%x, ignoring\n", |
| 652 | host->dsr); |
| 653 | host->dsr_req = 0; |
| 654 | } |
| 655 | |
Ulf Hansson | 3aa8793 | 2014-11-28 14:38:36 +0100 | [diff] [blame] | 656 | return mmc_pwrseq_alloc(host); |
Guennadi Liakhovetski | 6c56e7a | 2013-02-16 16:21:16 +0100 | [diff] [blame] | 657 | } |
| 658 | |
| 659 | EXPORT_SYMBOL(mmc_of_parse); |
| 660 | |
| 661 | /** |
Pierre Ossman | b93931a | 2007-05-19 14:06:24 +0200 | [diff] [blame] | 662 | * mmc_alloc_host - initialise the per-host structure. |
| 663 | * @extra: sizeof private data structure |
| 664 | * @dev: pointer to host device model structure |
| 665 | * |
| 666 | * Initialise the per-host structure. |
| 667 | */ |
| 668 | struct mmc_host *mmc_alloc_host(int extra, struct device *dev) |
| 669 | { |
Pierre Ossman | ff3112f | 2008-03-08 23:43:19 +0100 | [diff] [blame] | 670 | int err; |
Pierre Ossman | b93931a | 2007-05-19 14:06:24 +0200 | [diff] [blame] | 671 | struct mmc_host *host; |
| 672 | |
Mariusz Kozlowski | be760a9 | 2007-08-10 14:00:50 -0700 | [diff] [blame] | 673 | host = kzalloc(sizeof(struct mmc_host) + extra, GFP_KERNEL); |
Pierre Ossman | b93931a | 2007-05-19 14:06:24 +0200 | [diff] [blame] | 674 | if (!host) |
| 675 | return NULL; |
| 676 | |
Guennadi Liakhovetski | d9adcc1 | 2012-06-14 10:17:39 +0200 | [diff] [blame] | 677 | /* scanning will be enabled when we're ready */ |
| 678 | host->rescan_disable = 1; |
Ulf Hansson | 5674a9b | 2016-04-07 11:40:59 +0200 | [diff] [blame] | 679 | |
| 680 | again: |
| 681 | if (!ida_pre_get(&mmc_host_ida, GFP_KERNEL)) { |
| 682 | kfree(host); |
| 683 | return NULL; |
| 684 | } |
| 685 | |
Pierre Ossman | ff3112f | 2008-03-08 23:43:19 +0100 | [diff] [blame] | 686 | spin_lock(&mmc_host_lock); |
Ulf Hansson | 5674a9b | 2016-04-07 11:40:59 +0200 | [diff] [blame] | 687 | err = ida_get_new(&mmc_host_ida, &host->index); |
Pierre Ossman | ff3112f | 2008-03-08 23:43:19 +0100 | [diff] [blame] | 688 | spin_unlock(&mmc_host_lock); |
Ulf Hansson | 5674a9b | 2016-04-07 11:40:59 +0200 | [diff] [blame] | 689 | |
| 690 | if (err == -EAGAIN) { |
| 691 | goto again; |
| 692 | } else if (err) { |
Ulf Hansson | df8aca1 | 2014-12-18 15:44:36 +0100 | [diff] [blame] | 693 | kfree(host); |
| 694 | return NULL; |
| 695 | } |
Pierre Ossman | ff3112f | 2008-03-08 23:43:19 +0100 | [diff] [blame] | 696 | |
Kay Sievers | d1b2686 | 2008-11-08 21:37:46 +0100 | [diff] [blame] | 697 | dev_set_name(&host->class_dev, "mmc%d", host->index); |
Pierre Ossman | ff3112f | 2008-03-08 23:43:19 +0100 | [diff] [blame] | 698 | |
Pierre Ossman | b93931a | 2007-05-19 14:06:24 +0200 | [diff] [blame] | 699 | host->parent = dev; |
| 700 | host->class_dev.parent = dev; |
| 701 | host->class_dev.class = &mmc_host_class; |
| 702 | device_initialize(&host->class_dev); |
Fu, Zhonghui | ccf7bfd | 2016-01-22 11:32:18 +0800 | [diff] [blame] | 703 | device_enable_async_suspend(&host->class_dev); |
Pierre Ossman | b93931a | 2007-05-19 14:06:24 +0200 | [diff] [blame] | 704 | |
Ulf Hansson | df8aca1 | 2014-12-18 15:44:36 +0100 | [diff] [blame] | 705 | if (mmc_gpio_alloc(host)) { |
| 706 | put_device(&host->class_dev); |
| 707 | return NULL; |
| 708 | } |
Linus Walleij | 0456683 | 2010-11-08 21:36:50 -0500 | [diff] [blame] | 709 | |
Can Guo | 29db071 | 2017-05-18 13:26:48 +0800 | [diff] [blame] | 710 | if (!mmc_host_clk_gate_wq_init(host)) { |
| 711 | kfree(host); |
| 712 | return NULL; |
| 713 | } |
| 714 | |
Subhash Jadavani | dff3126 | 2016-04-19 16:21:16 -0700 | [diff] [blame] | 715 | mmc_host_clk_init(host); |
| 716 | |
Pierre Ossman | b93931a | 2007-05-19 14:06:24 +0200 | [diff] [blame] | 717 | spin_lock_init(&host->lock); |
| 718 | init_waitqueue_head(&host->wq); |
| 719 | INIT_DELAYED_WORK(&host->detect, mmc_rescan); |
Adrian Hunter | dfa13eb | 2015-05-07 13:10:12 +0300 | [diff] [blame] | 720 | setup_timer(&host->retune_timer, mmc_retune_timer, (unsigned long)host); |
Pierre Ossman | b93931a | 2007-05-19 14:06:24 +0200 | [diff] [blame] | 721 | |
Veerabhadrarao Badiganti | 27c6c05 | 2017-10-09 22:49:49 +0530 | [diff] [blame] | 722 | mutex_init(&host->rpmb_req_mutex); |
| 723 | |
Pierre Ossman | b93931a | 2007-05-19 14:06:24 +0200 | [diff] [blame] | 724 | /* |
| 725 | * By default, hosts do not support SGIO or large requests. |
| 726 | * They have to set these according to their abilities. |
| 727 | */ |
Martin K. Petersen | a36274e | 2010-09-10 01:33:59 -0400 | [diff] [blame] | 728 | host->max_segs = 1; |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 729 | host->max_seg_size = PAGE_SIZE; |
Pierre Ossman | b93931a | 2007-05-19 14:06:24 +0200 | [diff] [blame] | 730 | |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 731 | host->max_req_size = PAGE_SIZE; |
Pierre Ossman | b93931a | 2007-05-19 14:06:24 +0200 | [diff] [blame] | 732 | host->max_blk_size = 512; |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 733 | host->max_blk_count = PAGE_SIZE / 512; |
Pierre Ossman | b93931a | 2007-05-19 14:06:24 +0200 | [diff] [blame] | 734 | |
| 735 | return host; |
| 736 | } |
| 737 | |
| 738 | EXPORT_SYMBOL(mmc_alloc_host); |
Sujit Reddy Thumma | 00d52ac | 2012-11-07 21:23:14 +0530 | [diff] [blame] | 739 | |
| 740 | static ssize_t show_enable(struct device *dev, |
| 741 | struct device_attribute *attr, char *buf) |
| 742 | { |
| 743 | struct mmc_host *host = cls_dev_to_mmc_host(dev); |
| 744 | |
| 745 | if (!host) |
| 746 | return -EINVAL; |
| 747 | |
| 748 | return snprintf(buf, PAGE_SIZE, "%d\n", mmc_can_scale_clk(host)); |
| 749 | } |
| 750 | |
| 751 | static ssize_t store_enable(struct device *dev, |
| 752 | struct device_attribute *attr, const char *buf, size_t count) |
| 753 | { |
| 754 | struct mmc_host *host = cls_dev_to_mmc_host(dev); |
Talel Shenhar | 7dc5f79 | 2015-05-18 12:12:48 +0300 | [diff] [blame] | 755 | unsigned long value; |
Sujit Reddy Thumma | 00d52ac | 2012-11-07 21:23:14 +0530 | [diff] [blame] | 756 | |
Venkat Gopalakrishnan | a0723f2 | 2016-04-01 14:25:42 -0700 | [diff] [blame] | 757 | if (!host || !host->card || kstrtoul(buf, 0, &value)) |
Talel Shenhar | 7dc5f79 | 2015-05-18 12:12:48 +0300 | [diff] [blame] | 758 | return -EINVAL; |
Sujit Reddy Thumma | 64753b8 | 2012-12-12 09:16:58 +0530 | [diff] [blame] | 759 | |
Venkat Gopalakrishnan | ce8b61e2 | 2016-02-19 17:41:42 -0800 | [diff] [blame] | 760 | mmc_get_card(host->card); |
| 761 | |
| 762 | if (!value) { |
Veerabhadrarao Badiganti | 8920fc7 | 2017-04-28 13:07:37 +0530 | [diff] [blame] | 763 | /* Suspend the clock scaling and mask host capability */ |
| 764 | if (host->clk_scaling.enable) |
| 765 | mmc_suspend_clk_scaling(host); |
Vijay Viswanath | f4ac989 | 2018-06-01 14:13:52 +0530 | [diff] [blame] | 766 | host->clk_scaling.enable = false; |
Sujit Reddy Thumma | 00d52ac | 2012-11-07 21:23:14 +0530 | [diff] [blame] | 767 | host->caps2 &= ~MMC_CAP2_CLK_SCALE; |
Venkat Gopalakrishnan | ce8b61e2 | 2016-02-19 17:41:42 -0800 | [diff] [blame] | 768 | host->clk_scaling.state = MMC_LOAD_HIGH; |
| 769 | /* Set to max. frequency when disabling */ |
| 770 | mmc_clk_update_freq(host, host->card->clk_scaling_highest, |
Ritesh Harjani | 936d29a | 2018-10-17 12:30:38 +0530 | [diff] [blame] | 771 | host->clk_scaling.state, 0); |
Talel Shenhar | 7dc5f79 | 2015-05-18 12:12:48 +0300 | [diff] [blame] | 772 | } else if (value) { |
Veerabhadrarao Badiganti | 8920fc7 | 2017-04-28 13:07:37 +0530 | [diff] [blame] | 773 | /* Unmask host capability and resume scaling */ |
Talel Shenhar | 7dc5f79 | 2015-05-18 12:12:48 +0300 | [diff] [blame] | 774 | host->caps2 |= MMC_CAP2_CLK_SCALE; |
Vijay Viswanath | f4ac989 | 2018-06-01 14:13:52 +0530 | [diff] [blame] | 775 | if (!host->clk_scaling.enable) { |
| 776 | host->clk_scaling.enable = true; |
Veerabhadrarao Badiganti | 8920fc7 | 2017-04-28 13:07:37 +0530 | [diff] [blame] | 777 | mmc_resume_clk_scaling(host); |
Vijay Viswanath | f4ac989 | 2018-06-01 14:13:52 +0530 | [diff] [blame] | 778 | } |
Sujit Reddy Thumma | 00d52ac | 2012-11-07 21:23:14 +0530 | [diff] [blame] | 779 | } |
Talel Shenhar | 7dc5f79 | 2015-05-18 12:12:48 +0300 | [diff] [blame] | 780 | |
Venkat Gopalakrishnan | ce8b61e2 | 2016-02-19 17:41:42 -0800 | [diff] [blame] | 781 | mmc_put_card(host->card); |
Talel Shenhar | 7dc5f79 | 2015-05-18 12:12:48 +0300 | [diff] [blame] | 782 | |
| 783 | return count; |
Sujit Reddy Thumma | 00d52ac | 2012-11-07 21:23:14 +0530 | [diff] [blame] | 784 | } |
| 785 | |
| 786 | static ssize_t show_up_threshold(struct device *dev, |
| 787 | struct device_attribute *attr, char *buf) |
| 788 | { |
| 789 | struct mmc_host *host = cls_dev_to_mmc_host(dev); |
| 790 | |
| 791 | if (!host) |
| 792 | return -EINVAL; |
| 793 | |
Talel Shenhar | 7dc5f79 | 2015-05-18 12:12:48 +0300 | [diff] [blame] | 794 | return snprintf(buf, PAGE_SIZE, "%d\n", host->clk_scaling.upthreshold); |
Sujit Reddy Thumma | 00d52ac | 2012-11-07 21:23:14 +0530 | [diff] [blame] | 795 | } |
| 796 | |
| 797 | #define MAX_PERCENTAGE 100 |
| 798 | static ssize_t store_up_threshold(struct device *dev, |
| 799 | struct device_attribute *attr, const char *buf, size_t count) |
| 800 | { |
| 801 | struct mmc_host *host = cls_dev_to_mmc_host(dev); |
| 802 | unsigned long value; |
| 803 | |
| 804 | if (!host || kstrtoul(buf, 0, &value) || (value > MAX_PERCENTAGE)) |
| 805 | return -EINVAL; |
| 806 | |
Talel Shenhar | 7dc5f79 | 2015-05-18 12:12:48 +0300 | [diff] [blame] | 807 | host->clk_scaling.upthreshold = value; |
Sujit Reddy Thumma | 00d52ac | 2012-11-07 21:23:14 +0530 | [diff] [blame] | 808 | |
| 809 | pr_debug("%s: clkscale_up_thresh set to %lu\n", |
| 810 | mmc_hostname(host), value); |
| 811 | return count; |
| 812 | } |
| 813 | |
| 814 | static ssize_t show_down_threshold(struct device *dev, |
| 815 | struct device_attribute *attr, char *buf) |
| 816 | { |
| 817 | struct mmc_host *host = cls_dev_to_mmc_host(dev); |
| 818 | |
| 819 | if (!host) |
| 820 | return -EINVAL; |
| 821 | |
| 822 | return snprintf(buf, PAGE_SIZE, "%d\n", |
Talel Shenhar | 7dc5f79 | 2015-05-18 12:12:48 +0300 | [diff] [blame] | 823 | host->clk_scaling.downthreshold); |
Sujit Reddy Thumma | 00d52ac | 2012-11-07 21:23:14 +0530 | [diff] [blame] | 824 | } |
| 825 | |
| 826 | static ssize_t store_down_threshold(struct device *dev, |
| 827 | struct device_attribute *attr, const char *buf, size_t count) |
| 828 | { |
| 829 | struct mmc_host *host = cls_dev_to_mmc_host(dev); |
| 830 | unsigned long value; |
| 831 | |
| 832 | if (!host || kstrtoul(buf, 0, &value) || (value > MAX_PERCENTAGE)) |
| 833 | return -EINVAL; |
| 834 | |
Talel Shenhar | 7dc5f79 | 2015-05-18 12:12:48 +0300 | [diff] [blame] | 835 | host->clk_scaling.downthreshold = value; |
Sujit Reddy Thumma | 00d52ac | 2012-11-07 21:23:14 +0530 | [diff] [blame] | 836 | |
| 837 | pr_debug("%s: clkscale_down_thresh set to %lu\n", |
| 838 | mmc_hostname(host), value); |
| 839 | return count; |
| 840 | } |
| 841 | |
| 842 | static ssize_t show_polling(struct device *dev, |
| 843 | struct device_attribute *attr, char *buf) |
| 844 | { |
| 845 | struct mmc_host *host = cls_dev_to_mmc_host(dev); |
| 846 | |
| 847 | if (!host) |
| 848 | return -EINVAL; |
| 849 | |
| 850 | return snprintf(buf, PAGE_SIZE, "%lu milliseconds\n", |
| 851 | host->clk_scaling.polling_delay_ms); |
| 852 | } |
| 853 | |
| 854 | static ssize_t store_polling(struct device *dev, |
| 855 | struct device_attribute *attr, const char *buf, size_t count) |
| 856 | { |
| 857 | struct mmc_host *host = cls_dev_to_mmc_host(dev); |
| 858 | unsigned long value; |
| 859 | |
| 860 | if (!host || kstrtoul(buf, 0, &value)) |
| 861 | return -EINVAL; |
| 862 | |
| 863 | host->clk_scaling.polling_delay_ms = value; |
| 864 | |
| 865 | pr_debug("%s: clkscale_polling_delay_ms set to %lu\n", |
| 866 | mmc_hostname(host), value); |
| 867 | return count; |
| 868 | } |
| 869 | |
| 870 | DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, |
| 871 | show_enable, store_enable); |
| 872 | DEVICE_ATTR(polling_interval, S_IRUGO | S_IWUSR, |
| 873 | show_polling, store_polling); |
| 874 | DEVICE_ATTR(up_threshold, S_IRUGO | S_IWUSR, |
| 875 | show_up_threshold, store_up_threshold); |
| 876 | DEVICE_ATTR(down_threshold, S_IRUGO | S_IWUSR, |
| 877 | show_down_threshold, store_down_threshold); |
| 878 | |
| 879 | static struct attribute *clk_scaling_attrs[] = { |
| 880 | &dev_attr_enable.attr, |
| 881 | &dev_attr_up_threshold.attr, |
| 882 | &dev_attr_down_threshold.attr, |
| 883 | &dev_attr_polling_interval.attr, |
| 884 | NULL, |
| 885 | }; |
| 886 | |
| 887 | static struct attribute_group clk_scaling_attr_grp = { |
| 888 | .name = "clk_scaling", |
| 889 | .attrs = clk_scaling_attrs, |
| 890 | }; |
| 891 | |
Aparna Mallavarapu | eb2bbc0 | 2010-06-11 18:13:05 +0530 | [diff] [blame] | 892 | #ifdef CONFIG_MMC_PERF_PROFILING |
| 893 | static ssize_t |
| 894 | show_perf(struct device *dev, struct device_attribute *attr, char *buf) |
| 895 | { |
Sahitya Tummala | 3229dd7 | 2013-03-14 13:44:02 +0530 | [diff] [blame] | 896 | struct mmc_host *host = cls_dev_to_mmc_host(dev); |
Subhash Jadavani | aa23116 | 2012-05-22 22:59:54 +0530 | [diff] [blame] | 897 | int64_t rtime_drv, wtime_drv; |
Asutosh Das | 381fecd | 2015-05-18 10:55:01 +0530 | [diff] [blame] | 898 | unsigned long rbytes_drv, wbytes_drv, flags; |
Aparna Mallavarapu | eb2bbc0 | 2010-06-11 18:13:05 +0530 | [diff] [blame] | 899 | |
Asutosh Das | 381fecd | 2015-05-18 10:55:01 +0530 | [diff] [blame] | 900 | spin_lock_irqsave(&host->lock, flags); |
Aparna Mallavarapu | eb2bbc0 | 2010-06-11 18:13:05 +0530 | [diff] [blame] | 901 | |
Aparna Mallavarapu | eb2bbc0 | 2010-06-11 18:13:05 +0530 | [diff] [blame] | 902 | rbytes_drv = host->perf.rbytes_drv; |
| 903 | wbytes_drv = host->perf.wbytes_drv; |
| 904 | |
Aparna Mallavarapu | eb2bbc0 | 2010-06-11 18:13:05 +0530 | [diff] [blame] | 905 | rtime_drv = ktime_to_us(host->perf.rtime_drv); |
| 906 | wtime_drv = ktime_to_us(host->perf.wtime_drv); |
| 907 | |
Asutosh Das | 381fecd | 2015-05-18 10:55:01 +0530 | [diff] [blame] | 908 | spin_unlock_irqrestore(&host->lock, flags); |
Aparna Mallavarapu | eb2bbc0 | 2010-06-11 18:13:05 +0530 | [diff] [blame] | 909 | |
Subhash Jadavani | aa23116 | 2012-05-22 22:59:54 +0530 | [diff] [blame] | 910 | return snprintf(buf, PAGE_SIZE, "Write performance at driver Level:" |
Aparna Mallavarapu | eb2bbc0 | 2010-06-11 18:13:05 +0530 | [diff] [blame] | 911 | "%lu bytes in %lld microseconds\n" |
| 912 | "Read performance at driver Level:" |
| 913 | "%lu bytes in %lld microseconds\n", |
Subhash Jadavani | aa23116 | 2012-05-22 22:59:54 +0530 | [diff] [blame] | 914 | wbytes_drv, wtime_drv, |
Aparna Mallavarapu | eb2bbc0 | 2010-06-11 18:13:05 +0530 | [diff] [blame] | 915 | rbytes_drv, rtime_drv); |
| 916 | } |
| 917 | |
| 918 | static ssize_t |
| 919 | set_perf(struct device *dev, struct device_attribute *attr, |
| 920 | const char *buf, size_t count) |
| 921 | { |
Sahitya Tummala | 3229dd7 | 2013-03-14 13:44:02 +0530 | [diff] [blame] | 922 | struct mmc_host *host = cls_dev_to_mmc_host(dev); |
Aparna Mallavarapu | eb2bbc0 | 2010-06-11 18:13:05 +0530 | [diff] [blame] | 923 | int64_t value; |
Asutosh Das | 381fecd | 2015-05-18 10:55:01 +0530 | [diff] [blame] | 924 | unsigned long flags; |
Subhash Jadavani | ffb55e8 | 2012-03-25 11:15:41 +0530 | [diff] [blame] | 925 | |
Aparna Mallavarapu | eb2bbc0 | 2010-06-11 18:13:05 +0530 | [diff] [blame] | 926 | sscanf(buf, "%lld", &value); |
Asutosh Das | 381fecd | 2015-05-18 10:55:01 +0530 | [diff] [blame] | 927 | spin_lock_irqsave(&host->lock, flags); |
Aparna Mallavarapu | eb2bbc0 | 2010-06-11 18:13:05 +0530 | [diff] [blame] | 928 | if (!value) { |
Aparna Mallavarapu | eb2bbc0 | 2010-06-11 18:13:05 +0530 | [diff] [blame] | 929 | memset(&host->perf, 0, sizeof(host->perf)); |
Subhash Jadavani | ffb55e8 | 2012-03-25 11:15:41 +0530 | [diff] [blame] | 930 | host->perf_enable = false; |
| 931 | } else { |
| 932 | host->perf_enable = true; |
Aparna Mallavarapu | eb2bbc0 | 2010-06-11 18:13:05 +0530 | [diff] [blame] | 933 | } |
Asutosh Das | 381fecd | 2015-05-18 10:55:01 +0530 | [diff] [blame] | 934 | spin_unlock_irqrestore(&host->lock, flags); |
Aparna Mallavarapu | eb2bbc0 | 2010-06-11 18:13:05 +0530 | [diff] [blame] | 935 | |
| 936 | return count; |
| 937 | } |
| 938 | |
| 939 | static DEVICE_ATTR(perf, S_IRUGO | S_IWUSR, |
| 940 | show_perf, set_perf); |
Subhash Jadavani | ffb55e8 | 2012-03-25 11:15:41 +0530 | [diff] [blame] | 941 | |
Aparna Mallavarapu | eb2bbc0 | 2010-06-11 18:13:05 +0530 | [diff] [blame] | 942 | #endif |
| 943 | |
| 944 | static struct attribute *dev_attrs[] = { |
| 945 | #ifdef CONFIG_MMC_PERF_PROFILING |
| 946 | &dev_attr_perf.attr, |
| 947 | #endif |
| 948 | NULL, |
| 949 | }; |
| 950 | static struct attribute_group dev_attr_grp = { |
| 951 | .attrs = dev_attrs, |
| 952 | }; |
Pierre Ossman | b93931a | 2007-05-19 14:06:24 +0200 | [diff] [blame] | 953 | |
| 954 | /** |
| 955 | * mmc_add_host - initialise host hardware |
| 956 | * @host: mmc host |
Pierre Ossman | 67a61c4 | 2007-07-11 20:22:11 +0200 | [diff] [blame] | 957 | * |
| 958 | * Register the host with the driver model. The host must be |
| 959 | * prepared to start servicing requests before this function |
| 960 | * completes. |
Pierre Ossman | b93931a | 2007-05-19 14:06:24 +0200 | [diff] [blame] | 961 | */ |
| 962 | int mmc_add_host(struct mmc_host *host) |
| 963 | { |
| 964 | int err; |
| 965 | |
Nicolas Pitre | 17b759a | 2007-07-24 02:09:39 -0400 | [diff] [blame] | 966 | WARN_ON((host->caps & MMC_CAP_SDIO_IRQ) && |
| 967 | !host->ops->enable_sdio_irq); |
| 968 | |
Pierre Ossman | b93931a | 2007-05-19 14:06:24 +0200 | [diff] [blame] | 969 | err = device_add(&host->class_dev); |
| 970 | if (err) |
| 971 | return err; |
| 972 | |
Wolfram Sang | f317dfe | 2011-04-11 06:11:29 +0200 | [diff] [blame] | 973 | led_trigger_register_simple(dev_name(&host->class_dev), &host->led); |
| 974 | |
Talel Shenhar | 7dc5f79 | 2015-05-18 12:12:48 +0300 | [diff] [blame] | 975 | host->clk_scaling.upthreshold = MMC_DEVFRQ_DEFAULT_UP_THRESHOLD; |
| 976 | host->clk_scaling.downthreshold = MMC_DEVFRQ_DEFAULT_DOWN_THRESHOLD; |
| 977 | host->clk_scaling.polling_delay_ms = MMC_DEVFRQ_DEFAULT_POLLING_MSEC; |
| 978 | host->clk_scaling.skip_clk_scale_freq_update = false; |
| 979 | |
Haavard Skinnemoen | 6edd8ee | 2008-07-24 14:18:57 +0200 | [diff] [blame] | 980 | #ifdef CONFIG_DEBUG_FS |
| 981 | mmc_add_host_debugfs(host); |
| 982 | #endif |
Subhash Jadavani | dff3126 | 2016-04-19 16:21:16 -0700 | [diff] [blame] | 983 | mmc_host_clk_sysfs_init(host); |
Sayali Lokhande | 5bf4209 | 2016-11-24 18:08:26 +0530 | [diff] [blame] | 984 | mmc_trace_init(host); |
Haavard Skinnemoen | 6edd8ee | 2008-07-24 14:18:57 +0200 | [diff] [blame] | 985 | |
Sujit Reddy Thumma | 00d52ac | 2012-11-07 21:23:14 +0530 | [diff] [blame] | 986 | err = sysfs_create_group(&host->class_dev.kobj, &clk_scaling_attr_grp); |
| 987 | if (err) |
| 988 | pr_err("%s: failed to create clk scale sysfs group with err %d\n", |
| 989 | __func__, err); |
| 990 | |
Mohan Srinivasan | e2d8878 | 2016-12-14 15:55:36 -0800 | [diff] [blame] | 991 | #ifdef CONFIG_BLOCK |
| 992 | mmc_latency_hist_sysfs_init(host); |
| 993 | #endif |
| 994 | |
Sahitya Tummala | 3229dd7 | 2013-03-14 13:44:02 +0530 | [diff] [blame] | 995 | err = sysfs_create_group(&host->class_dev.kobj, &dev_attr_grp); |
Aparna Mallavarapu | eb2bbc0 | 2010-06-11 18:13:05 +0530 | [diff] [blame] | 996 | if (err) |
| 997 | pr_err("%s: failed to create sysfs group with err %d\n", |
| 998 | __func__, err); |
| 999 | |
Pierre Ossman | b93931a | 2007-05-19 14:06:24 +0200 | [diff] [blame] | 1000 | mmc_start_host(host); |
Dmitry Shmidt | 6fc8bec | 2010-10-07 14:39:16 -0700 | [diff] [blame] | 1001 | if (!(host->pm_flags & MMC_PM_IGNORE_PM_NOTIFY)) |
| 1002 | mmc_register_pm_notifier(host); |
Pierre Ossman | b93931a | 2007-05-19 14:06:24 +0200 | [diff] [blame] | 1003 | |
| 1004 | return 0; |
| 1005 | } |
| 1006 | |
| 1007 | EXPORT_SYMBOL(mmc_add_host); |
| 1008 | |
| 1009 | /** |
| 1010 | * mmc_remove_host - remove host hardware |
| 1011 | * @host: mmc host |
| 1012 | * |
| 1013 | * Unregister and remove all cards associated with this host, |
Pierre Ossman | 67a61c4 | 2007-07-11 20:22:11 +0200 | [diff] [blame] | 1014 | * and power down the MMC bus. No new requests will be issued |
| 1015 | * after this function has returned. |
Pierre Ossman | b93931a | 2007-05-19 14:06:24 +0200 | [diff] [blame] | 1016 | */ |
| 1017 | void mmc_remove_host(struct mmc_host *host) |
| 1018 | { |
Dmitry Shmidt | 6fc8bec | 2010-10-07 14:39:16 -0700 | [diff] [blame] | 1019 | if (!(host->pm_flags & MMC_PM_IGNORE_PM_NOTIFY)) |
| 1020 | mmc_unregister_pm_notifier(host); |
Pierre Ossman | b93931a | 2007-05-19 14:06:24 +0200 | [diff] [blame] | 1021 | mmc_stop_host(host); |
| 1022 | |
Haavard Skinnemoen | 6edd8ee | 2008-07-24 14:18:57 +0200 | [diff] [blame] | 1023 | #ifdef CONFIG_DEBUG_FS |
| 1024 | mmc_remove_host_debugfs(host); |
| 1025 | #endif |
| 1026 | |
Mohan Srinivasan | e2d8878 | 2016-12-14 15:55:36 -0800 | [diff] [blame] | 1027 | #ifdef CONFIG_BLOCK |
| 1028 | mmc_latency_hist_sysfs_exit(host); |
| 1029 | #endif |
| 1030 | |
Aparna Mallavarapu | eb2bbc0 | 2010-06-11 18:13:05 +0530 | [diff] [blame] | 1031 | sysfs_remove_group(&host->parent->kobj, &dev_attr_grp); |
Sujit Reddy Thumma | 00d52ac | 2012-11-07 21:23:14 +0530 | [diff] [blame] | 1032 | sysfs_remove_group(&host->class_dev.kobj, &clk_scaling_attr_grp); |
Aparna Mallavarapu | eb2bbc0 | 2010-06-11 18:13:05 +0530 | [diff] [blame] | 1033 | |
Pierre Ossman | b93931a | 2007-05-19 14:06:24 +0200 | [diff] [blame] | 1034 | device_del(&host->class_dev); |
| 1035 | |
Pierre Ossman | 77f1fd6 | 2007-10-12 22:48:46 +0200 | [diff] [blame] | 1036 | led_trigger_unregister_simple(host->led); |
Subhash Jadavani | dff3126 | 2016-04-19 16:21:16 -0700 | [diff] [blame] | 1037 | |
| 1038 | mmc_host_clk_exit(host); |
Pierre Ossman | b93931a | 2007-05-19 14:06:24 +0200 | [diff] [blame] | 1039 | } |
| 1040 | |
| 1041 | EXPORT_SYMBOL(mmc_remove_host); |
| 1042 | |
| 1043 | /** |
| 1044 | * mmc_free_host - free the host structure |
| 1045 | * @host: mmc host |
| 1046 | * |
| 1047 | * Free the host once all references to it have been dropped. |
| 1048 | */ |
| 1049 | void mmc_free_host(struct mmc_host *host) |
| 1050 | { |
Ulf Hansson | 3aa8793 | 2014-11-28 14:38:36 +0100 | [diff] [blame] | 1051 | mmc_pwrseq_free(host); |
Pierre Ossman | b93931a | 2007-05-19 14:06:24 +0200 | [diff] [blame] | 1052 | put_device(&host->class_dev); |
| 1053 | } |
| 1054 | |
| 1055 | EXPORT_SYMBOL(mmc_free_host); |