blob: 8fa3b705322c3887968c8a51fa3922c68adefda4 [file] [log] [blame]
Pierre Ossmanb93931a2007-05-19 14:06:24 +02001/*
2 * linux/drivers/mmc/core/host.c
3 *
4 * Copyright (C) 2003 Russell King, All Rights Reserved.
Pierre Ossmanff3112f2008-03-08 23:43:19 +01005 * Copyright (C) 2007-2008 Pierre Ossman
Linus Walleij04566832010-11-08 21:36:50 -05006 * Copyright (C) 2010 Linus Walleij
Sujit Reddy Thumma00d52ac2012-11-07 21:23:14 +05307 * Copyright (c) 2012, The Linux Foundation. All rights reserved.
Pierre Ossmanb93931a2007-05-19 14:06:24 +02008 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * MMC host class device management
14 */
15
16#include <linux/device.h>
17#include <linux/err.h>
18#include <linux/idr.h>
Guennadi Liakhovetski6c56e7a2013-02-16 16:21:16 +010019#include <linux/of.h>
20#include <linux/of_gpio.h>
Pierre Ossmanb93931a2007-05-19 14:06:24 +020021#include <linux/pagemap.h>
Paul Gortmaker3ef77af2011-07-10 12:42:00 -040022#include <linux/export.h>
Pierre Ossmanaf8350c2007-09-24 07:15:48 +020023#include <linux/leds.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090024#include <linux/slab.h>
Pierre Ossmanb93931a2007-05-19 14:06:24 +020025
26#include <linux/mmc/host.h>
Linus Walleij04566832010-11-08 21:36:50 -050027#include <linux/mmc/card.h>
Sayali Lokhande5bf42092016-11-24 18:08:26 +053028#include <linux/mmc/ring_buffer.h>
29
Guennadi Liakhovetski6c56e7a2013-02-16 16:21:16 +010030#include <linux/mmc/slot-gpio.h>
Pierre Ossmanb93931a2007-05-19 14:06:24 +020031
32#include "core.h"
33#include "host.h"
Ulf Hanssondf8aca12014-12-18 15:44:36 +010034#include "slot-gpio.h"
Ulf Hansson3aa87932014-11-28 14:38:36 +010035#include "pwrseq.h"
Pierre Ossmanb93931a2007-05-19 14:06:24 +020036
Talel Shenhar7dc5f792015-05-18 12:12:48 +030037#define MMC_DEVFRQ_DEFAULT_UP_THRESHOLD 35
38#define MMC_DEVFRQ_DEFAULT_DOWN_THRESHOLD 5
39#define MMC_DEVFRQ_DEFAULT_POLLING_MSEC 100
40
Ulf Hansson5674a9b2016-04-07 11:40:59 +020041static DEFINE_IDA(mmc_host_ida);
Ulf Hanssone2d19262014-12-18 15:44:35 +010042static DEFINE_SPINLOCK(mmc_host_lock);
43
Pierre Ossmanb93931a2007-05-19 14:06:24 +020044static void mmc_host_classdev_release(struct device *dev)
45{
46 struct mmc_host *host = cls_dev_to_mmc_host(dev);
Ulf Hanssone2d19262014-12-18 15:44:35 +010047 spin_lock(&mmc_host_lock);
Ulf Hansson5674a9b2016-04-07 11:40:59 +020048 ida_remove(&mmc_host_ida, host->index);
Ulf Hanssone2d19262014-12-18 15:44:35 +010049 spin_unlock(&mmc_host_lock);
Pierre Ossmanb93931a2007-05-19 14:06:24 +020050 kfree(host);
51}
52
Vijay Viswanath76f628f2017-12-21 15:33:33 +053053static int mmc_host_prepare(struct device *dev)
54{
55 /*
56 * Since mmc_host is a virtual device, we don't have to do anything.
57 * If we return a positive value, the pm framework will consider that
58 * the runtime suspend and system suspend of this device is same and
59 * will set direct_complete flag as true. We don't want this as the
60 * mmc_host always has positive disable_depth and setting the flag
61 * will not speed up the suspend process.
62 * So return 0.
63 */
64 return 0;
65}
66
67static const struct dev_pm_ops mmc_pm_ops = {
68 .prepare = mmc_host_prepare,
69};
70
Pierre Ossmanb93931a2007-05-19 14:06:24 +020071static struct class mmc_host_class = {
72 .name = "mmc_host",
73 .dev_release = mmc_host_classdev_release,
Vijay Viswanath76f628f2017-12-21 15:33:33 +053074 .pm = &mmc_pm_ops,
Pierre Ossmanb93931a2007-05-19 14:06:24 +020075};
76
77int mmc_register_host_class(void)
78{
79 return class_register(&mmc_host_class);
80}
81
82void mmc_unregister_host_class(void)
83{
84 class_unregister(&mmc_host_class);
85}
86
Subhash Jadavanidff31262016-04-19 16:21:16 -070087#ifdef CONFIG_MMC_CLKGATE
88static ssize_t clkgate_delay_show(struct device *dev,
89 struct device_attribute *attr, char *buf)
90{
91 struct mmc_host *host = cls_dev_to_mmc_host(dev);
92 return snprintf(buf, PAGE_SIZE, "%lu\n", host->clkgate_delay);
93}
94
95static ssize_t clkgate_delay_store(struct device *dev,
96 struct device_attribute *attr, const char *buf, size_t count)
97{
98 struct mmc_host *host = cls_dev_to_mmc_host(dev);
99 unsigned long flags, value;
100
101 if (kstrtoul(buf, 0, &value))
102 return -EINVAL;
103
104 spin_lock_irqsave(&host->clk_lock, flags);
105 host->clkgate_delay = value;
106 spin_unlock_irqrestore(&host->clk_lock, flags);
107 return count;
108}
109
110/*
111 * Enabling clock gating will make the core call out to the host
112 * once up and once down when it performs a request or card operation
113 * intermingled in any fashion. The driver will see this through
114 * set_ios() operations with ios.clock field set to 0 to gate (disable)
115 * the block clock, and to the old frequency to enable it again.
116 */
117static void mmc_host_clk_gate_delayed(struct mmc_host *host)
118{
119 unsigned long tick_ns;
120 unsigned long freq = host->ios.clock;
121 unsigned long flags;
122
123 if (!freq) {
124 pr_debug("%s: frequency set to 0 in disable function, "
125 "this means the clock is already disabled.\n",
126 mmc_hostname(host));
127 return;
128 }
129 /*
130 * New requests may have appeared while we were scheduling,
131 * then there is no reason to delay the check before
132 * clk_disable().
133 */
134 spin_lock_irqsave(&host->clk_lock, flags);
135
136 /*
137 * Delay n bus cycles (at least 8 from MMC spec) before attempting
138 * to disable the MCI block clock. The reference count may have
139 * gone up again after this delay due to rescheduling!
140 */
141 if (!host->clk_requests) {
142 spin_unlock_irqrestore(&host->clk_lock, flags);
143 tick_ns = DIV_ROUND_UP(1000000000, freq);
144 ndelay(host->clk_delay * tick_ns);
145 } else {
146 /* New users appeared while waiting for this work */
147 spin_unlock_irqrestore(&host->clk_lock, flags);
148 return;
149 }
150 mutex_lock(&host->clk_gate_mutex);
151 spin_lock_irqsave(&host->clk_lock, flags);
152 if (!host->clk_requests) {
153 spin_unlock_irqrestore(&host->clk_lock, flags);
154 /* This will set host->ios.clock to 0 */
155 mmc_gate_clock(host);
156 spin_lock_irqsave(&host->clk_lock, flags);
157 pr_debug("%s: gated MCI clock\n", mmc_hostname(host));
Veerabhadrarao Badigantidddcf302018-10-17 18:32:07 +0530158 MMC_TRACE(host, "clocks are gated\n");
Subhash Jadavanidff31262016-04-19 16:21:16 -0700159 }
160 spin_unlock_irqrestore(&host->clk_lock, flags);
161 mutex_unlock(&host->clk_gate_mutex);
162}
163
164/*
165 * Internal work. Work to disable the clock at some later point.
166 */
167static void mmc_host_clk_gate_work(struct work_struct *work)
168{
169 struct mmc_host *host = container_of(work, struct mmc_host,
170 clk_gate_work.work);
171
172 mmc_host_clk_gate_delayed(host);
173}
174
175/**
176 * mmc_host_clk_hold - ungate hardware MCI clocks
177 * @host: host to ungate.
178 *
179 * Makes sure the host ios.clock is restored to a non-zero value
180 * past this call. Increase clock reference count and ungate clock
181 * if we're the first user.
182 */
183void mmc_host_clk_hold(struct mmc_host *host)
184{
185 unsigned long flags;
186
187 /* cancel any clock gating work scheduled by mmc_host_clk_release() */
188 cancel_delayed_work_sync(&host->clk_gate_work);
189 mutex_lock(&host->clk_gate_mutex);
190 spin_lock_irqsave(&host->clk_lock, flags);
191 if (host->clk_gated) {
192 spin_unlock_irqrestore(&host->clk_lock, flags);
193 mmc_ungate_clock(host);
Sujit Reddy Thummacb18d852014-12-04 09:57:23 +0200194
Subhash Jadavanidff31262016-04-19 16:21:16 -0700195 spin_lock_irqsave(&host->clk_lock, flags);
196 pr_debug("%s: ungated MCI clock\n", mmc_hostname(host));
Veerabhadrarao Badigantidddcf302018-10-17 18:32:07 +0530197 MMC_TRACE(host, "clocks are ungated\n");
Subhash Jadavanidff31262016-04-19 16:21:16 -0700198 }
199 host->clk_requests++;
200 spin_unlock_irqrestore(&host->clk_lock, flags);
201 mutex_unlock(&host->clk_gate_mutex);
202}
203
204/**
205 * mmc_host_may_gate_card - check if this card may be gated
206 * @card: card to check.
207 */
Subhash Jadavaniab8c3ff2014-12-03 11:12:14 +0200208bool mmc_host_may_gate_card(struct mmc_card *card)
Subhash Jadavanidff31262016-04-19 16:21:16 -0700209{
210 /* If there is no card we may gate it */
211 if (!card)
212 return true;
Subhash Jadavani82857662014-12-04 21:42:45 +0200213
214 /*
215 * SDIO3.0 card allows the clock to be gated off so check if
216 * that is the case or not.
217 */
Ritesh Harjani5972a162015-11-06 01:13:49 +0530218 if (mmc_card_sdio(card) && card->cccr.async_intr_sup)
Pavan Anamula45ef1372015-10-29 23:22:12 +0530219 return true;
Subhash Jadavani82857662014-12-04 21:42:45 +0200220
Subhash Jadavanidff31262016-04-19 16:21:16 -0700221 /*
222 * Don't gate SDIO cards! These need to be clocked at all times
223 * since they may be independent systems generating interrupts
224 * and other events. The clock requests counter from the core will
225 * go down to zero since the core does not need it, but we will not
226 * gate the clock, because there is somebody out there that may still
227 * be using it.
228 */
229 return !(card->quirks & MMC_QUIRK_BROKEN_CLK_GATING);
230}
231
232/**
233 * mmc_host_clk_release - gate off hardware MCI clocks
234 * @host: host to gate.
235 *
236 * Calls the host driver with ios.clock set to zero as often as possible
237 * in order to gate off hardware MCI clocks. Decrease clock reference
238 * count and schedule disabling of clock.
239 */
240void mmc_host_clk_release(struct mmc_host *host)
241{
242 unsigned long flags;
243
244 spin_lock_irqsave(&host->clk_lock, flags);
245 host->clk_requests--;
246 if (mmc_host_may_gate_card(host->card) &&
247 !host->clk_requests)
Can Guo29db0712017-05-18 13:26:48 +0800248 queue_delayed_work(host->clk_gate_wq, &host->clk_gate_work,
Subhash Jadavanidff31262016-04-19 16:21:16 -0700249 msecs_to_jiffies(host->clkgate_delay));
250 spin_unlock_irqrestore(&host->clk_lock, flags);
251}
252
253/**
254 * mmc_host_clk_rate - get current clock frequency setting
255 * @host: host to get the clock frequency for.
256 *
257 * Returns current clock frequency regardless of gating.
258 */
259unsigned int mmc_host_clk_rate(struct mmc_host *host)
260{
261 unsigned long freq;
262 unsigned long flags;
263
264 spin_lock_irqsave(&host->clk_lock, flags);
265 if (host->clk_gated)
266 freq = host->clk_old;
267 else
268 freq = host->ios.clock;
269 spin_unlock_irqrestore(&host->clk_lock, flags);
270 return freq;
271}
272
273/**
274 * mmc_host_clk_init - set up clock gating code
275 * @host: host with potential clock to control
276 */
277static inline void mmc_host_clk_init(struct mmc_host *host)
278{
279 host->clk_requests = 0;
280 /* Hold MCI clock for 8 cycles by default */
281 host->clk_delay = 8;
282 /*
283 * Default clock gating delay is 0ms to avoid wasting power.
284 * This value can be tuned by writing into sysfs entry.
285 */
286 host->clkgate_delay = 0;
287 host->clk_gated = false;
288 INIT_DELAYED_WORK(&host->clk_gate_work, mmc_host_clk_gate_work);
289 spin_lock_init(&host->clk_lock);
290 mutex_init(&host->clk_gate_mutex);
291}
292
293/**
294 * mmc_host_clk_exit - shut down clock gating code
295 * @host: host with potential clock to control
296 */
297static inline void mmc_host_clk_exit(struct mmc_host *host)
298{
299 /*
300 * Wait for any outstanding gate and then make sure we're
301 * ungated before exiting.
302 */
303 if (cancel_delayed_work_sync(&host->clk_gate_work))
304 mmc_host_clk_gate_delayed(host);
305 if (host->clk_gated)
306 mmc_host_clk_hold(host);
Can Guo29db0712017-05-18 13:26:48 +0800307 if (host->clk_gate_wq)
308 destroy_workqueue(host->clk_gate_wq);
Subhash Jadavanidff31262016-04-19 16:21:16 -0700309 /* There should be only one user now */
310 WARN_ON(host->clk_requests > 1);
311}
312
313static inline void mmc_host_clk_sysfs_init(struct mmc_host *host)
314{
315 host->clkgate_delay_attr.show = clkgate_delay_show;
316 host->clkgate_delay_attr.store = clkgate_delay_store;
317 sysfs_attr_init(&host->clkgate_delay_attr.attr);
318 host->clkgate_delay_attr.attr.name = "clkgate_delay";
319 host->clkgate_delay_attr.attr.mode = S_IRUGO | S_IWUSR;
320 if (device_create_file(&host->class_dev, &host->clkgate_delay_attr))
321 pr_err("%s: Failed to create clkgate_delay sysfs entry\n",
322 mmc_hostname(host));
323}
Can Guo29db0712017-05-18 13:26:48 +0800324
325static inline bool mmc_host_clk_gate_wq_init(struct mmc_host *host)
326{
327 char *wq = NULL;
328 int wq_nl;
329 bool ret = true;
330
331 wq_nl = sizeof("mmc_clk_gate/") + sizeof(mmc_hostname(host)) + 1;
332
333 wq = kzalloc(wq_nl, GFP_KERNEL);
334 if (!wq) {
335 ret = false;
336 goto out;
337 }
338
339 snprintf(wq, wq_nl, "mmc_clk_gate/%s", mmc_hostname(host));
340
341 /*
342 * Create a work queue with flag WQ_MEM_RECLAIM set for
343 * mmc clock gate work. Because mmc thread is created with
344 * flag PF_MEMALLOC set, kernel will check for work queue
345 * flag WQ_MEM_RECLAIM when flush the work queue. If work
346 * queue flag WQ_MEM_RECLAIM is not set, kernel warning
347 * will be triggered.
348 */
349 host->clk_gate_wq = create_workqueue(wq);
350 if (!host->clk_gate_wq) {
351 ret = false;
352 dev_err(host->parent,
353 "failed to create clock gate work queue\n");
354 }
355
356 kfree(wq);
357out:
358 return ret;
359}
Subhash Jadavanidff31262016-04-19 16:21:16 -0700360#else
361
362static inline void mmc_host_clk_init(struct mmc_host *host)
363{
364}
365
366static inline void mmc_host_clk_exit(struct mmc_host *host)
367{
368}
369
370static inline void mmc_host_clk_sysfs_init(struct mmc_host *host)
371{
372}
373
Sahitya Tummala043744a2013-06-24 09:55:33 +0530374bool mmc_host_may_gate_card(struct mmc_card *card)
375{
376 return false;
377}
Can Guo29db0712017-05-18 13:26:48 +0800378
379static inline bool mmc_host_clk_gate_wq_init(struct mmc_host *host)
380{
381 return true;
382}
Subhash Jadavanidff31262016-04-19 16:21:16 -0700383#endif
384
Adrian Hunterdfa13eb2015-05-07 13:10:12 +0300385void mmc_retune_enable(struct mmc_host *host)
386{
387 host->can_retune = 1;
388 if (host->retune_period)
389 mod_timer(&host->retune_timer,
390 jiffies + host->retune_period * HZ);
391}
Veerabhadrarao Badiganti773bf342017-06-08 23:04:42 +0530392EXPORT_SYMBOL(mmc_retune_enable);
Adrian Hunterdfa13eb2015-05-07 13:10:12 +0300393
Adrian Hunter7ff27602016-05-16 15:35:24 +0300394/*
395 * Pause re-tuning for a small set of operations. The pause begins after the
396 * next command and after first doing re-tuning.
397 */
398void mmc_retune_pause(struct mmc_host *host)
399{
400 if (!host->retune_paused) {
401 host->retune_paused = 1;
402 mmc_retune_needed(host);
403 mmc_retune_hold(host);
404 }
405}
406EXPORT_SYMBOL(mmc_retune_pause);
407
408void mmc_retune_unpause(struct mmc_host *host)
409{
410 if (host->retune_paused) {
411 host->retune_paused = 0;
412 mmc_retune_release(host);
413 }
414}
415EXPORT_SYMBOL(mmc_retune_unpause);
416
Adrian Hunterdfa13eb2015-05-07 13:10:12 +0300417void mmc_retune_disable(struct mmc_host *host)
418{
Adrian Hunter7ff27602016-05-16 15:35:24 +0300419 mmc_retune_unpause(host);
Adrian Hunterdfa13eb2015-05-07 13:10:12 +0300420 host->can_retune = 0;
421 del_timer_sync(&host->retune_timer);
422 host->retune_now = 0;
423 host->need_retune = 0;
424}
Veerabhadrarao Badiganti773bf342017-06-08 23:04:42 +0530425EXPORT_SYMBOL(mmc_retune_disable);
Adrian Hunterdfa13eb2015-05-07 13:10:12 +0300426
427void mmc_retune_timer_stop(struct mmc_host *host)
428{
429 del_timer_sync(&host->retune_timer);
430}
431EXPORT_SYMBOL(mmc_retune_timer_stop);
432
433void mmc_retune_hold(struct mmc_host *host)
434{
435 if (!host->hold_retune)
436 host->retune_now = 1;
437 host->hold_retune += 1;
438}
439
440void mmc_retune_release(struct mmc_host *host)
441{
442 if (host->hold_retune)
443 host->hold_retune -= 1;
444 else
445 WARN_ON(1);
446}
447
448int mmc_retune(struct mmc_host *host)
449{
Adrian Hunter6376f692015-05-07 13:10:20 +0300450 bool return_to_hs400 = false;
Adrian Hunterdfa13eb2015-05-07 13:10:12 +0300451 int err;
452
453 if (host->retune_now)
454 host->retune_now = 0;
455 else
456 return 0;
457
Vijay Viswanath778491f2017-11-08 14:09:01 +0530458 if (!host->need_retune || host->doing_retune || !host->card ||
459 mmc_card_hs400es(host->card))
Adrian Hunterdfa13eb2015-05-07 13:10:12 +0300460 return 0;
461
462 host->need_retune = 0;
463
464 host->doing_retune = 1;
465
Adrian Hunter6376f692015-05-07 13:10:20 +0300466 if (host->ios.timing == MMC_TIMING_MMC_HS400) {
467 err = mmc_hs400_to_hs200(host->card);
468 if (err)
469 goto out;
Adrian Hunterdfa13eb2015-05-07 13:10:12 +0300470
Adrian Hunter6376f692015-05-07 13:10:20 +0300471 return_to_hs400 = true;
472
473 if (host->ops->prepare_hs400_tuning)
474 host->ops->prepare_hs400_tuning(host, &host->ios);
475 }
476
477 err = mmc_execute_tuning(host->card);
478 if (err)
479 goto out;
480
481 if (return_to_hs400)
482 err = mmc_hs200_to_hs400(host->card);
483out:
Adrian Hunterdfa13eb2015-05-07 13:10:12 +0300484 host->doing_retune = 0;
485
486 return err;
487}
488
489static void mmc_retune_timer(unsigned long data)
490{
491 struct mmc_host *host = (struct mmc_host *)data;
492
493 mmc_retune_needed(host);
494}
495
Pierre Ossmanb93931a2007-05-19 14:06:24 +0200496/**
Guennadi Liakhovetski6c56e7a2013-02-16 16:21:16 +0100497 * mmc_of_parse() - parse host's device-tree node
498 * @host: host whose node should be parsed.
499 *
500 * To keep the rest of the MMC subsystem unaware of whether DT has been
501 * used to to instantiate and configure this host instance or not, we
502 * parse the properties and set respective generic mmc-host flags and
503 * parameters.
504 */
Simon Baatzec0a7512013-06-09 22:14:11 +0200505int mmc_of_parse(struct mmc_host *host)
Guennadi Liakhovetski6c56e7a2013-02-16 16:21:16 +0100506{
David Woodse92add22017-05-26 17:53:21 -0400507 struct device *dev = host->parent;
Guennadi Liakhovetski6c56e7a2013-02-16 16:21:16 +0100508 u32 bus_width;
Sergei Shtylyov90614cd2015-08-07 01:06:48 +0300509 int ret;
Kristina Martsenkoa31b0c62014-11-05 02:22:41 +0200510 bool cd_cap_invert, cd_gpio_invert = false;
511 bool ro_cap_invert, ro_gpio_invert = false;
Guennadi Liakhovetski6c56e7a2013-02-16 16:21:16 +0100512
David Woodse92add22017-05-26 17:53:21 -0400513 if (!dev || !dev_fwnode(dev))
Simon Baatzec0a7512013-06-09 22:14:11 +0200514 return 0;
Guennadi Liakhovetski6c56e7a2013-02-16 16:21:16 +0100515
Guennadi Liakhovetski6c56e7a2013-02-16 16:21:16 +0100516 /* "bus-width" is translated to MMC_CAP_*_BIT_DATA flags */
David Woodse92add22017-05-26 17:53:21 -0400517 if (device_property_read_u32(dev, "bus-width", &bus_width) < 0) {
Guennadi Liakhovetski6c56e7a2013-02-16 16:21:16 +0100518 dev_dbg(host->parent,
519 "\"bus-width\" property is missing, assuming 1 bit.\n");
520 bus_width = 1;
521 }
522
523 switch (bus_width) {
524 case 8:
525 host->caps |= MMC_CAP_8_BIT_DATA;
526 /* Hosts capable of 8-bit transfers can also do 4 bits */
527 case 4:
528 host->caps |= MMC_CAP_4_BIT_DATA;
529 break;
530 case 1:
531 break;
532 default:
533 dev_err(host->parent,
Alexander Shiyan1c279f42014-02-15 15:40:58 +0400534 "Invalid \"bus-width\" value %u!\n", bus_width);
Simon Baatzec0a7512013-06-09 22:14:11 +0200535 return -EINVAL;
Guennadi Liakhovetski6c56e7a2013-02-16 16:21:16 +0100536 }
537
538 /* f_max is obtained from the optional "max-frequency" property */
David Woodse92add22017-05-26 17:53:21 -0400539 device_property_read_u32(dev, "max-frequency", &host->f_max);
Guennadi Liakhovetski6c56e7a2013-02-16 16:21:16 +0100540
541 /*
542 * Configure CD and WP pins. They are both by default active low to
543 * match the SDHCI spec. If GPIOs are provided for CD and / or WP, the
544 * mmc-gpio helpers are used to attach, configure and use them. If
545 * polarity inversion is specified in DT, one of MMC_CAP2_CD_ACTIVE_HIGH
546 * and MMC_CAP2_RO_ACTIVE_HIGH capability-2 flags is set. If the
547 * "broken-cd" property is provided, the MMC_CAP_NEEDS_POLL capability
548 * is set. If the "non-removable" property is found, the
549 * MMC_CAP_NONREMOVABLE capability is set and no card-detection
550 * configuration is performed.
551 */
552
553 /* Parse Card Detection */
David Woodse92add22017-05-26 17:53:21 -0400554 if (device_property_read_bool(dev, "non-removable")) {
Guennadi Liakhovetski6c56e7a2013-02-16 16:21:16 +0100555 host->caps |= MMC_CAP_NONREMOVABLE;
556 } else {
David Woodse92add22017-05-26 17:53:21 -0400557 cd_cap_invert = device_property_read_bool(dev, "cd-inverted");
Guennadi Liakhovetski6c56e7a2013-02-16 16:21:16 +0100558
David Woodse92add22017-05-26 17:53:21 -0400559 if (device_property_read_bool(dev, "broken-cd"))
Guennadi Liakhovetski6c56e7a2013-02-16 16:21:16 +0100560 host->caps |= MMC_CAP_NEEDS_POLL;
561
Linus Walleij89168b42014-10-02 09:08:46 +0200562 ret = mmc_gpiod_request_cd(host, "cd", 0, true,
Kristina Martsenkoa31b0c62014-11-05 02:22:41 +0200563 0, &cd_gpio_invert);
Ulf Hansson91167522014-12-18 10:41:44 +0100564 if (!ret)
Linus Walleij98e90de2014-08-27 13:00:52 +0200565 dev_info(host->parent, "Got CD GPIO\n");
Ulf Hansson43934ec2015-09-14 12:18:55 +0200566 else if (ret != -ENOENT && ret != -ENOSYS)
Ulf Hansson91167522014-12-18 10:41:44 +0100567 return ret;
Linus Walleij89168b42014-10-02 09:08:46 +0200568
569 /*
570 * There are two ways to flag that the CD line is inverted:
571 * through the cd-inverted flag and by the GPIO line itself
572 * being inverted from the GPIO subsystem. This is a leftover
573 * from the times when the GPIO subsystem did not make it
574 * possible to flag a line as inverted.
575 *
576 * If the capability on the host AND the GPIO line are
577 * both inverted, the end result is that the CD line is
578 * not inverted.
579 */
Kristina Martsenkoa31b0c62014-11-05 02:22:41 +0200580 if (cd_cap_invert ^ cd_gpio_invert)
Linus Walleij89168b42014-10-02 09:08:46 +0200581 host->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
Guennadi Liakhovetski6c56e7a2013-02-16 16:21:16 +0100582 }
583
584 /* Parse Write Protection */
David Woodse92add22017-05-26 17:53:21 -0400585 ro_cap_invert = device_property_read_bool(dev, "wp-inverted");
Guennadi Liakhovetski2fdb6e22013-02-15 16:14:01 +0100586
Kristina Martsenkoa31b0c62014-11-05 02:22:41 +0200587 ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &ro_gpio_invert);
Ulf Hansson91167522014-12-18 10:41:44 +0100588 if (!ret)
Linus Walleij98e90de2014-08-27 13:00:52 +0200589 dev_info(host->parent, "Got WP GPIO\n");
Ulf Hansson43934ec2015-09-14 12:18:55 +0200590 else if (ret != -ENOENT && ret != -ENOSYS)
Ulf Hansson91167522014-12-18 10:41:44 +0100591 return ret;
Linus Walleij98e90de2014-08-27 13:00:52 +0200592
David Woodse92add22017-05-26 17:53:21 -0400593 if (device_property_read_bool(dev, "disable-wp"))
Lars-Peter Clausen19f44242015-05-06 20:31:20 +0200594 host->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
595
Linus Walleij89168b42014-10-02 09:08:46 +0200596 /* See the comment on CD inversion above */
Kristina Martsenkoa31b0c62014-11-05 02:22:41 +0200597 if (ro_cap_invert ^ ro_gpio_invert)
Linus Walleij89168b42014-10-02 09:08:46 +0200598 host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
599
David Woodse92add22017-05-26 17:53:21 -0400600 if (device_property_read_bool(dev, "cap-sd-highspeed"))
Guennadi Liakhovetski2fdb6e22013-02-15 16:14:01 +0100601 host->caps |= MMC_CAP_SD_HIGHSPEED;
David Woodse92add22017-05-26 17:53:21 -0400602 if (device_property_read_bool(dev, "cap-mmc-highspeed"))
Guennadi Liakhovetski2fdb6e22013-02-15 16:14:01 +0100603 host->caps |= MMC_CAP_MMC_HIGHSPEED;
David Woodse92add22017-05-26 17:53:21 -0400604 if (device_property_read_bool(dev, "sd-uhs-sdr12"))
Ulf Hanssonb66bd0e2014-02-14 13:27:07 +0100605 host->caps |= MMC_CAP_UHS_SDR12;
David Woodse92add22017-05-26 17:53:21 -0400606 if (device_property_read_bool(dev, "sd-uhs-sdr25"))
Ulf Hanssonb66bd0e2014-02-14 13:27:07 +0100607 host->caps |= MMC_CAP_UHS_SDR25;
David Woodse92add22017-05-26 17:53:21 -0400608 if (device_property_read_bool(dev, "sd-uhs-sdr50"))
Ulf Hanssonb66bd0e2014-02-14 13:27:07 +0100609 host->caps |= MMC_CAP_UHS_SDR50;
David Woodse92add22017-05-26 17:53:21 -0400610 if (device_property_read_bool(dev, "sd-uhs-sdr104"))
Ulf Hanssonb66bd0e2014-02-14 13:27:07 +0100611 host->caps |= MMC_CAP_UHS_SDR104;
David Woodse92add22017-05-26 17:53:21 -0400612 if (device_property_read_bool(dev, "sd-uhs-ddr50"))
Ulf Hanssonb66bd0e2014-02-14 13:27:07 +0100613 host->caps |= MMC_CAP_UHS_DDR50;
David Woodse92add22017-05-26 17:53:21 -0400614 if (device_property_read_bool(dev, "cap-power-off-card"))
Guennadi Liakhovetski2fdb6e22013-02-15 16:14:01 +0100615 host->caps |= MMC_CAP_POWER_OFF_CARD;
David Woodse92add22017-05-26 17:53:21 -0400616 if (device_property_read_bool(dev, "cap-mmc-hw-reset"))
Chaotian Jing794f1572015-10-27 14:24:21 +0800617 host->caps |= MMC_CAP_HW_RESET;
David Woodse92add22017-05-26 17:53:21 -0400618 if (device_property_read_bool(dev, "cap-sdio-irq"))
Guennadi Liakhovetski2fdb6e22013-02-15 16:14:01 +0100619 host->caps |= MMC_CAP_SDIO_IRQ;
David Woodse92add22017-05-26 17:53:21 -0400620 if (device_property_read_bool(dev, "full-pwr-cycle"))
Ulf Hansson5a36d6b2013-06-10 17:03:47 +0200621 host->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
David Woodse92add22017-05-26 17:53:21 -0400622 if (device_property_read_bool(dev, "keep-power-in-suspend"))
Guennadi Liakhovetski2fdb6e22013-02-15 16:14:01 +0100623 host->pm_caps |= MMC_PM_KEEP_POWER;
David Woodse92add22017-05-26 17:53:21 -0400624 if (device_property_read_bool(dev, "wakeup-source") ||
625 device_property_read_bool(dev, "enable-sdio-wakeup")) /* legacy */
Guennadi Liakhovetski2fdb6e22013-02-15 16:14:01 +0100626 host->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
David Woodse92add22017-05-26 17:53:21 -0400627 if (device_property_read_bool(dev, "mmc-ddr-1_8v"))
Ulf Hanssonc0baf842014-02-14 13:27:08 +0100628 host->caps |= MMC_CAP_1_8V_DDR;
David Woodse92add22017-05-26 17:53:21 -0400629 if (device_property_read_bool(dev, "mmc-ddr-1_2v"))
Ulf Hanssonc0baf842014-02-14 13:27:08 +0100630 host->caps |= MMC_CAP_1_2V_DDR;
David Woodse92add22017-05-26 17:53:21 -0400631 if (device_property_read_bool(dev, "mmc-hs200-1_8v"))
Jaehoon Chung321bd412014-02-14 13:27:09 +0100632 host->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
David Woodse92add22017-05-26 17:53:21 -0400633 if (device_property_read_bool(dev, "mmc-hs200-1_2v"))
Jaehoon Chung321bd412014-02-14 13:27:09 +0100634 host->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
David Woodse92add22017-05-26 17:53:21 -0400635 if (device_property_read_bool(dev, "mmc-hs400-1_8v"))
Seungwon Jeonc373eb42014-04-23 17:15:08 +0900636 host->caps2 |= MMC_CAP2_HS400_1_8V | MMC_CAP2_HS200_1_8V_SDR;
David Woodse92add22017-05-26 17:53:21 -0400637 if (device_property_read_bool(dev, "mmc-hs400-1_2v"))
Seungwon Jeonc373eb42014-04-23 17:15:08 +0900638 host->caps2 |= MMC_CAP2_HS400_1_2V | MMC_CAP2_HS200_1_2V_SDR;
David Woodse92add22017-05-26 17:53:21 -0400639 if (device_property_read_bool(dev, "mmc-hs400-enhanced-strobe"))
Shawn Linef29c0e2016-05-26 09:56:12 +0800640 host->caps2 |= MMC_CAP2_HS400_ES;
David Woodse92add22017-05-26 17:53:21 -0400641 if (device_property_read_bool(dev, "no-sdio"))
Shawn Lin6ae3e532016-07-14 16:26:04 +0800642 host->caps2 |= MMC_CAP2_NO_SDIO;
David Woodse92add22017-05-26 17:53:21 -0400643 if (device_property_read_bool(dev, "no-sd"))
Shawn Lin6ae3e532016-07-14 16:26:04 +0800644 host->caps2 |= MMC_CAP2_NO_SD;
David Woodse92add22017-05-26 17:53:21 -0400645 if (device_property_read_bool(dev, "no-mmc"))
Shawn Lin6ae3e532016-07-14 16:26:04 +0800646 host->caps2 |= MMC_CAP2_NO_MMC;
Simon Baatzec0a7512013-06-09 22:14:11 +0200647
David Woodse92add22017-05-26 17:53:21 -0400648 host->dsr_req = !device_property_read_u32(dev, "dsr", &host->dsr);
Sascha Hauer3d705d12014-08-19 10:45:51 +0200649 if (host->dsr_req && (host->dsr & ~0xffff)) {
650 dev_err(host->parent,
651 "device tree specified broken value for DSR: 0x%x, ignoring\n",
652 host->dsr);
653 host->dsr_req = 0;
654 }
655
Ulf Hansson3aa87932014-11-28 14:38:36 +0100656 return mmc_pwrseq_alloc(host);
Guennadi Liakhovetski6c56e7a2013-02-16 16:21:16 +0100657}
658
659EXPORT_SYMBOL(mmc_of_parse);
660
661/**
Pierre Ossmanb93931a2007-05-19 14:06:24 +0200662 * mmc_alloc_host - initialise the per-host structure.
663 * @extra: sizeof private data structure
664 * @dev: pointer to host device model structure
665 *
666 * Initialise the per-host structure.
667 */
668struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
669{
Pierre Ossmanff3112f2008-03-08 23:43:19 +0100670 int err;
Pierre Ossmanb93931a2007-05-19 14:06:24 +0200671 struct mmc_host *host;
672
Mariusz Kozlowskibe760a92007-08-10 14:00:50 -0700673 host = kzalloc(sizeof(struct mmc_host) + extra, GFP_KERNEL);
Pierre Ossmanb93931a2007-05-19 14:06:24 +0200674 if (!host)
675 return NULL;
676
Guennadi Liakhovetskid9adcc12012-06-14 10:17:39 +0200677 /* scanning will be enabled when we're ready */
678 host->rescan_disable = 1;
Ulf Hansson5674a9b2016-04-07 11:40:59 +0200679
680again:
681 if (!ida_pre_get(&mmc_host_ida, GFP_KERNEL)) {
682 kfree(host);
683 return NULL;
684 }
685
Pierre Ossmanff3112f2008-03-08 23:43:19 +0100686 spin_lock(&mmc_host_lock);
Ulf Hansson5674a9b2016-04-07 11:40:59 +0200687 err = ida_get_new(&mmc_host_ida, &host->index);
Pierre Ossmanff3112f2008-03-08 23:43:19 +0100688 spin_unlock(&mmc_host_lock);
Ulf Hansson5674a9b2016-04-07 11:40:59 +0200689
690 if (err == -EAGAIN) {
691 goto again;
692 } else if (err) {
Ulf Hanssondf8aca12014-12-18 15:44:36 +0100693 kfree(host);
694 return NULL;
695 }
Pierre Ossmanff3112f2008-03-08 23:43:19 +0100696
Kay Sieversd1b26862008-11-08 21:37:46 +0100697 dev_set_name(&host->class_dev, "mmc%d", host->index);
Pierre Ossmanff3112f2008-03-08 23:43:19 +0100698
Pierre Ossmanb93931a2007-05-19 14:06:24 +0200699 host->parent = dev;
700 host->class_dev.parent = dev;
701 host->class_dev.class = &mmc_host_class;
702 device_initialize(&host->class_dev);
Fu, Zhonghuiccf7bfd2016-01-22 11:32:18 +0800703 device_enable_async_suspend(&host->class_dev);
Pierre Ossmanb93931a2007-05-19 14:06:24 +0200704
Ulf Hanssondf8aca12014-12-18 15:44:36 +0100705 if (mmc_gpio_alloc(host)) {
706 put_device(&host->class_dev);
707 return NULL;
708 }
Linus Walleij04566832010-11-08 21:36:50 -0500709
Can Guo29db0712017-05-18 13:26:48 +0800710 if (!mmc_host_clk_gate_wq_init(host)) {
711 kfree(host);
712 return NULL;
713 }
714
Subhash Jadavanidff31262016-04-19 16:21:16 -0700715 mmc_host_clk_init(host);
716
Pierre Ossmanb93931a2007-05-19 14:06:24 +0200717 spin_lock_init(&host->lock);
718 init_waitqueue_head(&host->wq);
719 INIT_DELAYED_WORK(&host->detect, mmc_rescan);
Adrian Hunterdfa13eb2015-05-07 13:10:12 +0300720 setup_timer(&host->retune_timer, mmc_retune_timer, (unsigned long)host);
Pierre Ossmanb93931a2007-05-19 14:06:24 +0200721
Veerabhadrarao Badiganti27c6c052017-10-09 22:49:49 +0530722 mutex_init(&host->rpmb_req_mutex);
723
Pierre Ossmanb93931a2007-05-19 14:06:24 +0200724 /*
725 * By default, hosts do not support SGIO or large requests.
726 * They have to set these according to their abilities.
727 */
Martin K. Petersena36274e2010-09-10 01:33:59 -0400728 host->max_segs = 1;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300729 host->max_seg_size = PAGE_SIZE;
Pierre Ossmanb93931a2007-05-19 14:06:24 +0200730
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300731 host->max_req_size = PAGE_SIZE;
Pierre Ossmanb93931a2007-05-19 14:06:24 +0200732 host->max_blk_size = 512;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300733 host->max_blk_count = PAGE_SIZE / 512;
Pierre Ossmanb93931a2007-05-19 14:06:24 +0200734
735 return host;
736}
737
738EXPORT_SYMBOL(mmc_alloc_host);
Sujit Reddy Thumma00d52ac2012-11-07 21:23:14 +0530739
740static ssize_t show_enable(struct device *dev,
741 struct device_attribute *attr, char *buf)
742{
743 struct mmc_host *host = cls_dev_to_mmc_host(dev);
744
745 if (!host)
746 return -EINVAL;
747
748 return snprintf(buf, PAGE_SIZE, "%d\n", mmc_can_scale_clk(host));
749}
750
751static ssize_t store_enable(struct device *dev,
752 struct device_attribute *attr, const char *buf, size_t count)
753{
754 struct mmc_host *host = cls_dev_to_mmc_host(dev);
Talel Shenhar7dc5f792015-05-18 12:12:48 +0300755 unsigned long value;
Sujit Reddy Thumma00d52ac2012-11-07 21:23:14 +0530756
Venkat Gopalakrishnana0723f22016-04-01 14:25:42 -0700757 if (!host || !host->card || kstrtoul(buf, 0, &value))
Talel Shenhar7dc5f792015-05-18 12:12:48 +0300758 return -EINVAL;
Sujit Reddy Thumma64753b82012-12-12 09:16:58 +0530759
Venkat Gopalakrishnance8b61e22016-02-19 17:41:42 -0800760 mmc_get_card(host->card);
761
762 if (!value) {
Veerabhadrarao Badiganti8920fc72017-04-28 13:07:37 +0530763 /* Suspend the clock scaling and mask host capability */
764 if (host->clk_scaling.enable)
765 mmc_suspend_clk_scaling(host);
Vijay Viswanathf4ac9892018-06-01 14:13:52 +0530766 host->clk_scaling.enable = false;
Sujit Reddy Thumma00d52ac2012-11-07 21:23:14 +0530767 host->caps2 &= ~MMC_CAP2_CLK_SCALE;
Venkat Gopalakrishnance8b61e22016-02-19 17:41:42 -0800768 host->clk_scaling.state = MMC_LOAD_HIGH;
769 /* Set to max. frequency when disabling */
770 mmc_clk_update_freq(host, host->card->clk_scaling_highest,
Ritesh Harjani936d29a2018-10-17 12:30:38 +0530771 host->clk_scaling.state, 0);
Talel Shenhar7dc5f792015-05-18 12:12:48 +0300772 } else if (value) {
Veerabhadrarao Badiganti8920fc72017-04-28 13:07:37 +0530773 /* Unmask host capability and resume scaling */
Talel Shenhar7dc5f792015-05-18 12:12:48 +0300774 host->caps2 |= MMC_CAP2_CLK_SCALE;
Vijay Viswanathf4ac9892018-06-01 14:13:52 +0530775 if (!host->clk_scaling.enable) {
776 host->clk_scaling.enable = true;
Veerabhadrarao Badiganti8920fc72017-04-28 13:07:37 +0530777 mmc_resume_clk_scaling(host);
Vijay Viswanathf4ac9892018-06-01 14:13:52 +0530778 }
Sujit Reddy Thumma00d52ac2012-11-07 21:23:14 +0530779 }
Talel Shenhar7dc5f792015-05-18 12:12:48 +0300780
Venkat Gopalakrishnance8b61e22016-02-19 17:41:42 -0800781 mmc_put_card(host->card);
Talel Shenhar7dc5f792015-05-18 12:12:48 +0300782
783 return count;
Sujit Reddy Thumma00d52ac2012-11-07 21:23:14 +0530784}
785
786static ssize_t show_up_threshold(struct device *dev,
787 struct device_attribute *attr, char *buf)
788{
789 struct mmc_host *host = cls_dev_to_mmc_host(dev);
790
791 if (!host)
792 return -EINVAL;
793
Talel Shenhar7dc5f792015-05-18 12:12:48 +0300794 return snprintf(buf, PAGE_SIZE, "%d\n", host->clk_scaling.upthreshold);
Sujit Reddy Thumma00d52ac2012-11-07 21:23:14 +0530795}
796
797#define MAX_PERCENTAGE 100
798static ssize_t store_up_threshold(struct device *dev,
799 struct device_attribute *attr, const char *buf, size_t count)
800{
801 struct mmc_host *host = cls_dev_to_mmc_host(dev);
802 unsigned long value;
803
804 if (!host || kstrtoul(buf, 0, &value) || (value > MAX_PERCENTAGE))
805 return -EINVAL;
806
Talel Shenhar7dc5f792015-05-18 12:12:48 +0300807 host->clk_scaling.upthreshold = value;
Sujit Reddy Thumma00d52ac2012-11-07 21:23:14 +0530808
809 pr_debug("%s: clkscale_up_thresh set to %lu\n",
810 mmc_hostname(host), value);
811 return count;
812}
813
814static ssize_t show_down_threshold(struct device *dev,
815 struct device_attribute *attr, char *buf)
816{
817 struct mmc_host *host = cls_dev_to_mmc_host(dev);
818
819 if (!host)
820 return -EINVAL;
821
822 return snprintf(buf, PAGE_SIZE, "%d\n",
Talel Shenhar7dc5f792015-05-18 12:12:48 +0300823 host->clk_scaling.downthreshold);
Sujit Reddy Thumma00d52ac2012-11-07 21:23:14 +0530824}
825
826static ssize_t store_down_threshold(struct device *dev,
827 struct device_attribute *attr, const char *buf, size_t count)
828{
829 struct mmc_host *host = cls_dev_to_mmc_host(dev);
830 unsigned long value;
831
832 if (!host || kstrtoul(buf, 0, &value) || (value > MAX_PERCENTAGE))
833 return -EINVAL;
834
Talel Shenhar7dc5f792015-05-18 12:12:48 +0300835 host->clk_scaling.downthreshold = value;
Sujit Reddy Thumma00d52ac2012-11-07 21:23:14 +0530836
837 pr_debug("%s: clkscale_down_thresh set to %lu\n",
838 mmc_hostname(host), value);
839 return count;
840}
841
842static ssize_t show_polling(struct device *dev,
843 struct device_attribute *attr, char *buf)
844{
845 struct mmc_host *host = cls_dev_to_mmc_host(dev);
846
847 if (!host)
848 return -EINVAL;
849
850 return snprintf(buf, PAGE_SIZE, "%lu milliseconds\n",
851 host->clk_scaling.polling_delay_ms);
852}
853
854static ssize_t store_polling(struct device *dev,
855 struct device_attribute *attr, const char *buf, size_t count)
856{
857 struct mmc_host *host = cls_dev_to_mmc_host(dev);
858 unsigned long value;
859
860 if (!host || kstrtoul(buf, 0, &value))
861 return -EINVAL;
862
863 host->clk_scaling.polling_delay_ms = value;
864
865 pr_debug("%s: clkscale_polling_delay_ms set to %lu\n",
866 mmc_hostname(host), value);
867 return count;
868}
869
870DEVICE_ATTR(enable, S_IRUGO | S_IWUSR,
871 show_enable, store_enable);
872DEVICE_ATTR(polling_interval, S_IRUGO | S_IWUSR,
873 show_polling, store_polling);
874DEVICE_ATTR(up_threshold, S_IRUGO | S_IWUSR,
875 show_up_threshold, store_up_threshold);
876DEVICE_ATTR(down_threshold, S_IRUGO | S_IWUSR,
877 show_down_threshold, store_down_threshold);
878
879static struct attribute *clk_scaling_attrs[] = {
880 &dev_attr_enable.attr,
881 &dev_attr_up_threshold.attr,
882 &dev_attr_down_threshold.attr,
883 &dev_attr_polling_interval.attr,
884 NULL,
885};
886
887static struct attribute_group clk_scaling_attr_grp = {
888 .name = "clk_scaling",
889 .attrs = clk_scaling_attrs,
890};
891
Aparna Mallavarapueb2bbc02010-06-11 18:13:05 +0530892#ifdef CONFIG_MMC_PERF_PROFILING
893static ssize_t
894show_perf(struct device *dev, struct device_attribute *attr, char *buf)
895{
Sahitya Tummala3229dd72013-03-14 13:44:02 +0530896 struct mmc_host *host = cls_dev_to_mmc_host(dev);
Subhash Jadavaniaa231162012-05-22 22:59:54 +0530897 int64_t rtime_drv, wtime_drv;
Asutosh Das381fecd2015-05-18 10:55:01 +0530898 unsigned long rbytes_drv, wbytes_drv, flags;
Aparna Mallavarapueb2bbc02010-06-11 18:13:05 +0530899
Asutosh Das381fecd2015-05-18 10:55:01 +0530900 spin_lock_irqsave(&host->lock, flags);
Aparna Mallavarapueb2bbc02010-06-11 18:13:05 +0530901
Aparna Mallavarapueb2bbc02010-06-11 18:13:05 +0530902 rbytes_drv = host->perf.rbytes_drv;
903 wbytes_drv = host->perf.wbytes_drv;
904
Aparna Mallavarapueb2bbc02010-06-11 18:13:05 +0530905 rtime_drv = ktime_to_us(host->perf.rtime_drv);
906 wtime_drv = ktime_to_us(host->perf.wtime_drv);
907
Asutosh Das381fecd2015-05-18 10:55:01 +0530908 spin_unlock_irqrestore(&host->lock, flags);
Aparna Mallavarapueb2bbc02010-06-11 18:13:05 +0530909
Subhash Jadavaniaa231162012-05-22 22:59:54 +0530910 return snprintf(buf, PAGE_SIZE, "Write performance at driver Level:"
Aparna Mallavarapueb2bbc02010-06-11 18:13:05 +0530911 "%lu bytes in %lld microseconds\n"
912 "Read performance at driver Level:"
913 "%lu bytes in %lld microseconds\n",
Subhash Jadavaniaa231162012-05-22 22:59:54 +0530914 wbytes_drv, wtime_drv,
Aparna Mallavarapueb2bbc02010-06-11 18:13:05 +0530915 rbytes_drv, rtime_drv);
916}
917
918static ssize_t
919set_perf(struct device *dev, struct device_attribute *attr,
920 const char *buf, size_t count)
921{
Sahitya Tummala3229dd72013-03-14 13:44:02 +0530922 struct mmc_host *host = cls_dev_to_mmc_host(dev);
Aparna Mallavarapueb2bbc02010-06-11 18:13:05 +0530923 int64_t value;
Asutosh Das381fecd2015-05-18 10:55:01 +0530924 unsigned long flags;
Subhash Jadavaniffb55e82012-03-25 11:15:41 +0530925
Aparna Mallavarapueb2bbc02010-06-11 18:13:05 +0530926 sscanf(buf, "%lld", &value);
Asutosh Das381fecd2015-05-18 10:55:01 +0530927 spin_lock_irqsave(&host->lock, flags);
Aparna Mallavarapueb2bbc02010-06-11 18:13:05 +0530928 if (!value) {
Aparna Mallavarapueb2bbc02010-06-11 18:13:05 +0530929 memset(&host->perf, 0, sizeof(host->perf));
Subhash Jadavaniffb55e82012-03-25 11:15:41 +0530930 host->perf_enable = false;
931 } else {
932 host->perf_enable = true;
Aparna Mallavarapueb2bbc02010-06-11 18:13:05 +0530933 }
Asutosh Das381fecd2015-05-18 10:55:01 +0530934 spin_unlock_irqrestore(&host->lock, flags);
Aparna Mallavarapueb2bbc02010-06-11 18:13:05 +0530935
936 return count;
937}
938
939static DEVICE_ATTR(perf, S_IRUGO | S_IWUSR,
940 show_perf, set_perf);
Subhash Jadavaniffb55e82012-03-25 11:15:41 +0530941
Aparna Mallavarapueb2bbc02010-06-11 18:13:05 +0530942#endif
943
944static struct attribute *dev_attrs[] = {
945#ifdef CONFIG_MMC_PERF_PROFILING
946 &dev_attr_perf.attr,
947#endif
948 NULL,
949};
950static struct attribute_group dev_attr_grp = {
951 .attrs = dev_attrs,
952};
Pierre Ossmanb93931a2007-05-19 14:06:24 +0200953
954/**
955 * mmc_add_host - initialise host hardware
956 * @host: mmc host
Pierre Ossman67a61c42007-07-11 20:22:11 +0200957 *
958 * Register the host with the driver model. The host must be
959 * prepared to start servicing requests before this function
960 * completes.
Pierre Ossmanb93931a2007-05-19 14:06:24 +0200961 */
962int mmc_add_host(struct mmc_host *host)
963{
964 int err;
965
Nicolas Pitre17b759a2007-07-24 02:09:39 -0400966 WARN_ON((host->caps & MMC_CAP_SDIO_IRQ) &&
967 !host->ops->enable_sdio_irq);
968
Pierre Ossmanb93931a2007-05-19 14:06:24 +0200969 err = device_add(&host->class_dev);
970 if (err)
971 return err;
972
Wolfram Sangf317dfe2011-04-11 06:11:29 +0200973 led_trigger_register_simple(dev_name(&host->class_dev), &host->led);
974
Talel Shenhar7dc5f792015-05-18 12:12:48 +0300975 host->clk_scaling.upthreshold = MMC_DEVFRQ_DEFAULT_UP_THRESHOLD;
976 host->clk_scaling.downthreshold = MMC_DEVFRQ_DEFAULT_DOWN_THRESHOLD;
977 host->clk_scaling.polling_delay_ms = MMC_DEVFRQ_DEFAULT_POLLING_MSEC;
978 host->clk_scaling.skip_clk_scale_freq_update = false;
979
Haavard Skinnemoen6edd8ee2008-07-24 14:18:57 +0200980#ifdef CONFIG_DEBUG_FS
981 mmc_add_host_debugfs(host);
982#endif
Subhash Jadavanidff31262016-04-19 16:21:16 -0700983 mmc_host_clk_sysfs_init(host);
Sayali Lokhande5bf42092016-11-24 18:08:26 +0530984 mmc_trace_init(host);
Haavard Skinnemoen6edd8ee2008-07-24 14:18:57 +0200985
Sujit Reddy Thumma00d52ac2012-11-07 21:23:14 +0530986 err = sysfs_create_group(&host->class_dev.kobj, &clk_scaling_attr_grp);
987 if (err)
988 pr_err("%s: failed to create clk scale sysfs group with err %d\n",
989 __func__, err);
990
Mohan Srinivasane2d88782016-12-14 15:55:36 -0800991#ifdef CONFIG_BLOCK
992 mmc_latency_hist_sysfs_init(host);
993#endif
994
Sahitya Tummala3229dd72013-03-14 13:44:02 +0530995 err = sysfs_create_group(&host->class_dev.kobj, &dev_attr_grp);
Aparna Mallavarapueb2bbc02010-06-11 18:13:05 +0530996 if (err)
997 pr_err("%s: failed to create sysfs group with err %d\n",
998 __func__, err);
999
Pierre Ossmanb93931a2007-05-19 14:06:24 +02001000 mmc_start_host(host);
Dmitry Shmidt6fc8bec2010-10-07 14:39:16 -07001001 if (!(host->pm_flags & MMC_PM_IGNORE_PM_NOTIFY))
1002 mmc_register_pm_notifier(host);
Pierre Ossmanb93931a2007-05-19 14:06:24 +02001003
1004 return 0;
1005}
1006
1007EXPORT_SYMBOL(mmc_add_host);
1008
1009/**
1010 * mmc_remove_host - remove host hardware
1011 * @host: mmc host
1012 *
1013 * Unregister and remove all cards associated with this host,
Pierre Ossman67a61c42007-07-11 20:22:11 +02001014 * and power down the MMC bus. No new requests will be issued
1015 * after this function has returned.
Pierre Ossmanb93931a2007-05-19 14:06:24 +02001016 */
1017void mmc_remove_host(struct mmc_host *host)
1018{
Dmitry Shmidt6fc8bec2010-10-07 14:39:16 -07001019 if (!(host->pm_flags & MMC_PM_IGNORE_PM_NOTIFY))
1020 mmc_unregister_pm_notifier(host);
Pierre Ossmanb93931a2007-05-19 14:06:24 +02001021 mmc_stop_host(host);
1022
Haavard Skinnemoen6edd8ee2008-07-24 14:18:57 +02001023#ifdef CONFIG_DEBUG_FS
1024 mmc_remove_host_debugfs(host);
1025#endif
1026
Mohan Srinivasane2d88782016-12-14 15:55:36 -08001027#ifdef CONFIG_BLOCK
1028 mmc_latency_hist_sysfs_exit(host);
1029#endif
1030
Aparna Mallavarapueb2bbc02010-06-11 18:13:05 +05301031 sysfs_remove_group(&host->parent->kobj, &dev_attr_grp);
Sujit Reddy Thumma00d52ac2012-11-07 21:23:14 +05301032 sysfs_remove_group(&host->class_dev.kobj, &clk_scaling_attr_grp);
Aparna Mallavarapueb2bbc02010-06-11 18:13:05 +05301033
Pierre Ossmanb93931a2007-05-19 14:06:24 +02001034 device_del(&host->class_dev);
1035
Pierre Ossman77f1fd62007-10-12 22:48:46 +02001036 led_trigger_unregister_simple(host->led);
Subhash Jadavanidff31262016-04-19 16:21:16 -07001037
1038 mmc_host_clk_exit(host);
Pierre Ossmanb93931a2007-05-19 14:06:24 +02001039}
1040
1041EXPORT_SYMBOL(mmc_remove_host);
1042
1043/**
1044 * mmc_free_host - free the host structure
1045 * @host: mmc host
1046 *
1047 * Free the host once all references to it have been dropped.
1048 */
1049void mmc_free_host(struct mmc_host *host)
1050{
Ulf Hansson3aa87932014-11-28 14:38:36 +01001051 mmc_pwrseq_free(host);
Pierre Ossmanb93931a2007-05-19 14:06:24 +02001052 put_device(&host->class_dev);
1053}
1054
1055EXPORT_SYMBOL(mmc_free_host);