| /* |
| * linux/drivers/mmc/core/host.c |
| * |
| * Copyright (C) 2003 Russell King, All Rights Reserved. |
| * Copyright (C) 2007-2008 Pierre Ossman |
| * Copyright (C) 2010 Linus Walleij |
| * Copyright (c) 2012, The Linux Foundation. All rights reserved. |
| * |
| * This program is free software; you can redistribute it and/or modify |
| * it under the terms of the GNU General Public License version 2 as |
| * published by the Free Software Foundation. |
| * |
| * MMC host class device management |
| */ |
| |
| #include <linux/device.h> |
| #include <linux/err.h> |
| #include <linux/idr.h> |
| #include <linux/of.h> |
| #include <linux/of_gpio.h> |
| #include <linux/pagemap.h> |
| #include <linux/export.h> |
| #include <linux/leds.h> |
| #include <linux/slab.h> |
| |
| #include <linux/mmc/host.h> |
| #include <linux/mmc/card.h> |
| #include <linux/mmc/ring_buffer.h> |
| |
| #include <linux/mmc/slot-gpio.h> |
| |
| #include "core.h" |
| #include "host.h" |
| #include "slot-gpio.h" |
| #include "pwrseq.h" |
| |
| #define MMC_DEVFRQ_DEFAULT_UP_THRESHOLD 35 |
| #define MMC_DEVFRQ_DEFAULT_DOWN_THRESHOLD 5 |
| #define MMC_DEVFRQ_DEFAULT_POLLING_MSEC 100 |
| |
| static DEFINE_IDA(mmc_host_ida); |
| static DEFINE_SPINLOCK(mmc_host_lock); |
| |
| static void mmc_host_classdev_release(struct device *dev) |
| { |
| struct mmc_host *host = cls_dev_to_mmc_host(dev); |
| spin_lock(&mmc_host_lock); |
| ida_remove(&mmc_host_ida, host->index); |
| spin_unlock(&mmc_host_lock); |
| kfree(host); |
| } |
| |
| static int mmc_host_prepare(struct device *dev) |
| { |
| /* |
| * Since mmc_host is a virtual device, we don't have to do anything. |
| * If we return a positive value, the pm framework will consider that |
| * the runtime suspend and system suspend of this device is same and |
| * will set direct_complete flag as true. We don't want this as the |
| * mmc_host always has positive disable_depth and setting the flag |
| * will not speed up the suspend process. |
| * So return 0. |
| */ |
| return 0; |
| } |
| |
| static const struct dev_pm_ops mmc_pm_ops = { |
| .prepare = mmc_host_prepare, |
| }; |
| |
| static struct class mmc_host_class = { |
| .name = "mmc_host", |
| .dev_release = mmc_host_classdev_release, |
| .pm = &mmc_pm_ops, |
| }; |
| |
| int mmc_register_host_class(void) |
| { |
| return class_register(&mmc_host_class); |
| } |
| |
| void mmc_unregister_host_class(void) |
| { |
| class_unregister(&mmc_host_class); |
| } |
| |
| #ifdef CONFIG_MMC_CLKGATE |
| static ssize_t clkgate_delay_show(struct device *dev, |
| struct device_attribute *attr, char *buf) |
| { |
| struct mmc_host *host = cls_dev_to_mmc_host(dev); |
| return snprintf(buf, PAGE_SIZE, "%lu\n", host->clkgate_delay); |
| } |
| |
| static ssize_t clkgate_delay_store(struct device *dev, |
| struct device_attribute *attr, const char *buf, size_t count) |
| { |
| struct mmc_host *host = cls_dev_to_mmc_host(dev); |
| unsigned long flags, value; |
| |
| if (kstrtoul(buf, 0, &value)) |
| return -EINVAL; |
| |
| spin_lock_irqsave(&host->clk_lock, flags); |
| host->clkgate_delay = value; |
| spin_unlock_irqrestore(&host->clk_lock, flags); |
| return count; |
| } |
| |
| /* |
| * Enabling clock gating will make the core call out to the host |
| * once up and once down when it performs a request or card operation |
| * intermingled in any fashion. The driver will see this through |
| * set_ios() operations with ios.clock field set to 0 to gate (disable) |
| * the block clock, and to the old frequency to enable it again. |
| */ |
| static void mmc_host_clk_gate_delayed(struct mmc_host *host) |
| { |
| unsigned long tick_ns; |
| unsigned long freq = host->ios.clock; |
| unsigned long flags; |
| |
| if (!freq) { |
| pr_debug("%s: frequency set to 0 in disable function, " |
| "this means the clock is already disabled.\n", |
| mmc_hostname(host)); |
| return; |
| } |
| /* |
| * New requests may have appeared while we were scheduling, |
| * then there is no reason to delay the check before |
| * clk_disable(). |
| */ |
| spin_lock_irqsave(&host->clk_lock, flags); |
| |
| /* |
| * Delay n bus cycles (at least 8 from MMC spec) before attempting |
| * to disable the MCI block clock. The reference count may have |
| * gone up again after this delay due to rescheduling! |
| */ |
| if (!host->clk_requests) { |
| spin_unlock_irqrestore(&host->clk_lock, flags); |
| tick_ns = DIV_ROUND_UP(1000000000, freq); |
| ndelay(host->clk_delay * tick_ns); |
| } else { |
| /* New users appeared while waiting for this work */ |
| spin_unlock_irqrestore(&host->clk_lock, flags); |
| return; |
| } |
| mutex_lock(&host->clk_gate_mutex); |
| spin_lock_irqsave(&host->clk_lock, flags); |
| if (!host->clk_requests) { |
| spin_unlock_irqrestore(&host->clk_lock, flags); |
| /* This will set host->ios.clock to 0 */ |
| mmc_gate_clock(host); |
| spin_lock_irqsave(&host->clk_lock, flags); |
| pr_debug("%s: gated MCI clock\n", mmc_hostname(host)); |
| MMC_TRACE(host, "clocks are gated\n"); |
| } |
| spin_unlock_irqrestore(&host->clk_lock, flags); |
| mutex_unlock(&host->clk_gate_mutex); |
| } |
| |
| /* |
| * Internal work. Work to disable the clock at some later point. |
| */ |
| static void mmc_host_clk_gate_work(struct work_struct *work) |
| { |
| struct mmc_host *host = container_of(work, struct mmc_host, |
| clk_gate_work.work); |
| |
| mmc_host_clk_gate_delayed(host); |
| } |
| |
| /** |
| * mmc_host_clk_hold - ungate hardware MCI clocks |
| * @host: host to ungate. |
| * |
| * Makes sure the host ios.clock is restored to a non-zero value |
| * past this call. Increase clock reference count and ungate clock |
| * if we're the first user. |
| */ |
| void mmc_host_clk_hold(struct mmc_host *host) |
| { |
| unsigned long flags; |
| |
| /* cancel any clock gating work scheduled by mmc_host_clk_release() */ |
| cancel_delayed_work_sync(&host->clk_gate_work); |
| mutex_lock(&host->clk_gate_mutex); |
| spin_lock_irqsave(&host->clk_lock, flags); |
| if (host->clk_gated) { |
| spin_unlock_irqrestore(&host->clk_lock, flags); |
| mmc_ungate_clock(host); |
| |
| spin_lock_irqsave(&host->clk_lock, flags); |
| pr_debug("%s: ungated MCI clock\n", mmc_hostname(host)); |
| MMC_TRACE(host, "clocks are ungated\n"); |
| } |
| host->clk_requests++; |
| spin_unlock_irqrestore(&host->clk_lock, flags); |
| mutex_unlock(&host->clk_gate_mutex); |
| } |
| |
| /** |
| * mmc_host_may_gate_card - check if this card may be gated |
| * @card: card to check. |
| */ |
| bool mmc_host_may_gate_card(struct mmc_card *card) |
| { |
| /* If there is no card we may gate it */ |
| if (!card) |
| return true; |
| |
| /* |
| * SDIO3.0 card allows the clock to be gated off so check if |
| * that is the case or not. |
| */ |
| if (mmc_card_sdio(card) && card->cccr.async_intr_sup) |
| return true; |
| |
| /* |
| * Don't gate SDIO cards! These need to be clocked at all times |
| * since they may be independent systems generating interrupts |
| * and other events. The clock requests counter from the core will |
| * go down to zero since the core does not need it, but we will not |
| * gate the clock, because there is somebody out there that may still |
| * be using it. |
| */ |
| return !(card->quirks & MMC_QUIRK_BROKEN_CLK_GATING); |
| } |
| |
| /** |
| * mmc_host_clk_release - gate off hardware MCI clocks |
| * @host: host to gate. |
| * |
| * Calls the host driver with ios.clock set to zero as often as possible |
| * in order to gate off hardware MCI clocks. Decrease clock reference |
| * count and schedule disabling of clock. |
| */ |
| void mmc_host_clk_release(struct mmc_host *host) |
| { |
| unsigned long flags; |
| |
| spin_lock_irqsave(&host->clk_lock, flags); |
| host->clk_requests--; |
| if (mmc_host_may_gate_card(host->card) && |
| !host->clk_requests) |
| queue_delayed_work(host->clk_gate_wq, &host->clk_gate_work, |
| msecs_to_jiffies(host->clkgate_delay)); |
| spin_unlock_irqrestore(&host->clk_lock, flags); |
| } |
| |
| /** |
| * mmc_host_clk_rate - get current clock frequency setting |
| * @host: host to get the clock frequency for. |
| * |
| * Returns current clock frequency regardless of gating. |
| */ |
| unsigned int mmc_host_clk_rate(struct mmc_host *host) |
| { |
| unsigned long freq; |
| unsigned long flags; |
| |
| spin_lock_irqsave(&host->clk_lock, flags); |
| if (host->clk_gated) |
| freq = host->clk_old; |
| else |
| freq = host->ios.clock; |
| spin_unlock_irqrestore(&host->clk_lock, flags); |
| return freq; |
| } |
| |
| /** |
| * mmc_host_clk_init - set up clock gating code |
| * @host: host with potential clock to control |
| */ |
| static inline void mmc_host_clk_init(struct mmc_host *host) |
| { |
| host->clk_requests = 0; |
| /* Hold MCI clock for 8 cycles by default */ |
| host->clk_delay = 8; |
| /* |
| * Default clock gating delay is 0ms to avoid wasting power. |
| * This value can be tuned by writing into sysfs entry. |
| */ |
| host->clkgate_delay = 0; |
| host->clk_gated = false; |
| INIT_DELAYED_WORK(&host->clk_gate_work, mmc_host_clk_gate_work); |
| spin_lock_init(&host->clk_lock); |
| mutex_init(&host->clk_gate_mutex); |
| } |
| |
| /** |
| * mmc_host_clk_exit - shut down clock gating code |
| * @host: host with potential clock to control |
| */ |
| static inline void mmc_host_clk_exit(struct mmc_host *host) |
| { |
| /* |
| * Wait for any outstanding gate and then make sure we're |
| * ungated before exiting. |
| */ |
| if (cancel_delayed_work_sync(&host->clk_gate_work)) |
| mmc_host_clk_gate_delayed(host); |
| if (host->clk_gated) |
| mmc_host_clk_hold(host); |
| if (host->clk_gate_wq) |
| destroy_workqueue(host->clk_gate_wq); |
| /* There should be only one user now */ |
| WARN_ON(host->clk_requests > 1); |
| } |
| |
| static inline void mmc_host_clk_sysfs_init(struct mmc_host *host) |
| { |
| host->clkgate_delay_attr.show = clkgate_delay_show; |
| host->clkgate_delay_attr.store = clkgate_delay_store; |
| sysfs_attr_init(&host->clkgate_delay_attr.attr); |
| host->clkgate_delay_attr.attr.name = "clkgate_delay"; |
| host->clkgate_delay_attr.attr.mode = S_IRUGO | S_IWUSR; |
| if (device_create_file(&host->class_dev, &host->clkgate_delay_attr)) |
| pr_err("%s: Failed to create clkgate_delay sysfs entry\n", |
| mmc_hostname(host)); |
| } |
| |
| static inline bool mmc_host_clk_gate_wq_init(struct mmc_host *host) |
| { |
| char *wq = NULL; |
| int wq_nl; |
| bool ret = true; |
| |
| wq_nl = sizeof("mmc_clk_gate/") + sizeof(mmc_hostname(host)) + 1; |
| |
| wq = kzalloc(wq_nl, GFP_KERNEL); |
| if (!wq) { |
| ret = false; |
| goto out; |
| } |
| |
| snprintf(wq, wq_nl, "mmc_clk_gate/%s", mmc_hostname(host)); |
| |
| /* |
| * Create a work queue with flag WQ_MEM_RECLAIM set for |
| * mmc clock gate work. Because mmc thread is created with |
| * flag PF_MEMALLOC set, kernel will check for work queue |
| * flag WQ_MEM_RECLAIM when flush the work queue. If work |
| * queue flag WQ_MEM_RECLAIM is not set, kernel warning |
| * will be triggered. |
| */ |
| host->clk_gate_wq = create_workqueue(wq); |
| if (!host->clk_gate_wq) { |
| ret = false; |
| dev_err(host->parent, |
| "failed to create clock gate work queue\n"); |
| } |
| |
| kfree(wq); |
| out: |
| return ret; |
| } |
| #else |
| |
| static inline void mmc_host_clk_init(struct mmc_host *host) |
| { |
| } |
| |
| static inline void mmc_host_clk_exit(struct mmc_host *host) |
| { |
| } |
| |
| static inline void mmc_host_clk_sysfs_init(struct mmc_host *host) |
| { |
| } |
| |
| bool mmc_host_may_gate_card(struct mmc_card *card) |
| { |
| return false; |
| } |
| |
| static inline bool mmc_host_clk_gate_wq_init(struct mmc_host *host) |
| { |
| return true; |
| } |
| #endif |
| |
| void mmc_retune_enable(struct mmc_host *host) |
| { |
| host->can_retune = 1; |
| if (host->retune_period) |
| mod_timer(&host->retune_timer, |
| jiffies + host->retune_period * HZ); |
| } |
| EXPORT_SYMBOL(mmc_retune_enable); |
| |
| /* |
| * Pause re-tuning for a small set of operations. The pause begins after the |
| * next command and after first doing re-tuning. |
| */ |
| void mmc_retune_pause(struct mmc_host *host) |
| { |
| if (!host->retune_paused) { |
| host->retune_paused = 1; |
| mmc_retune_needed(host); |
| mmc_retune_hold(host); |
| } |
| } |
| EXPORT_SYMBOL(mmc_retune_pause); |
| |
| void mmc_retune_unpause(struct mmc_host *host) |
| { |
| if (host->retune_paused) { |
| host->retune_paused = 0; |
| mmc_retune_release(host); |
| } |
| } |
| EXPORT_SYMBOL(mmc_retune_unpause); |
| |
| void mmc_retune_disable(struct mmc_host *host) |
| { |
| mmc_retune_unpause(host); |
| host->can_retune = 0; |
| del_timer_sync(&host->retune_timer); |
| host->retune_now = 0; |
| host->need_retune = 0; |
| } |
| EXPORT_SYMBOL(mmc_retune_disable); |
| |
| void mmc_retune_timer_stop(struct mmc_host *host) |
| { |
| del_timer_sync(&host->retune_timer); |
| } |
| EXPORT_SYMBOL(mmc_retune_timer_stop); |
| |
| void mmc_retune_hold(struct mmc_host *host) |
| { |
| if (!host->hold_retune) |
| host->retune_now = 1; |
| host->hold_retune += 1; |
| } |
| |
| void mmc_retune_release(struct mmc_host *host) |
| { |
| if (host->hold_retune) |
| host->hold_retune -= 1; |
| else |
| WARN_ON(1); |
| } |
| |
| int mmc_retune(struct mmc_host *host) |
| { |
| bool return_to_hs400 = false; |
| int err; |
| |
| if (host->retune_now) |
| host->retune_now = 0; |
| else |
| return 0; |
| |
| if (!host->need_retune || host->doing_retune || !host->card || |
| mmc_card_hs400es(host->card)) |
| return 0; |
| |
| host->need_retune = 0; |
| |
| host->doing_retune = 1; |
| |
| if (host->ios.timing == MMC_TIMING_MMC_HS400) { |
| err = mmc_hs400_to_hs200(host->card); |
| if (err) |
| goto out; |
| |
| return_to_hs400 = true; |
| |
| if (host->ops->prepare_hs400_tuning) |
| host->ops->prepare_hs400_tuning(host, &host->ios); |
| } |
| |
| err = mmc_execute_tuning(host->card); |
| if (err) |
| goto out; |
| |
| if (return_to_hs400) |
| err = mmc_hs200_to_hs400(host->card); |
| out: |
| host->doing_retune = 0; |
| |
| return err; |
| } |
| |
| static void mmc_retune_timer(unsigned long data) |
| { |
| struct mmc_host *host = (struct mmc_host *)data; |
| |
| mmc_retune_needed(host); |
| } |
| |
| /** |
| * mmc_of_parse() - parse host's device-tree node |
| * @host: host whose node should be parsed. |
| * |
| * To keep the rest of the MMC subsystem unaware of whether DT has been |
| * used to to instantiate and configure this host instance or not, we |
| * parse the properties and set respective generic mmc-host flags and |
| * parameters. |
| */ |
| int mmc_of_parse(struct mmc_host *host) |
| { |
| struct device *dev = host->parent; |
| u32 bus_width; |
| int ret; |
| bool cd_cap_invert, cd_gpio_invert = false; |
| bool ro_cap_invert, ro_gpio_invert = false; |
| |
| if (!dev || !dev_fwnode(dev)) |
| return 0; |
| |
| /* "bus-width" is translated to MMC_CAP_*_BIT_DATA flags */ |
| if (device_property_read_u32(dev, "bus-width", &bus_width) < 0) { |
| dev_dbg(host->parent, |
| "\"bus-width\" property is missing, assuming 1 bit.\n"); |
| bus_width = 1; |
| } |
| |
| switch (bus_width) { |
| case 8: |
| host->caps |= MMC_CAP_8_BIT_DATA; |
| /* Hosts capable of 8-bit transfers can also do 4 bits */ |
| case 4: |
| host->caps |= MMC_CAP_4_BIT_DATA; |
| break; |
| case 1: |
| break; |
| default: |
| dev_err(host->parent, |
| "Invalid \"bus-width\" value %u!\n", bus_width); |
| return -EINVAL; |
| } |
| |
| /* f_max is obtained from the optional "max-frequency" property */ |
| device_property_read_u32(dev, "max-frequency", &host->f_max); |
| |
| /* |
| * Configure CD and WP pins. They are both by default active low to |
| * match the SDHCI spec. If GPIOs are provided for CD and / or WP, the |
| * mmc-gpio helpers are used to attach, configure and use them. If |
| * polarity inversion is specified in DT, one of MMC_CAP2_CD_ACTIVE_HIGH |
| * and MMC_CAP2_RO_ACTIVE_HIGH capability-2 flags is set. If the |
| * "broken-cd" property is provided, the MMC_CAP_NEEDS_POLL capability |
| * is set. If the "non-removable" property is found, the |
| * MMC_CAP_NONREMOVABLE capability is set and no card-detection |
| * configuration is performed. |
| */ |
| |
| /* Parse Card Detection */ |
| if (device_property_read_bool(dev, "non-removable")) { |
| host->caps |= MMC_CAP_NONREMOVABLE; |
| } else { |
| cd_cap_invert = device_property_read_bool(dev, "cd-inverted"); |
| |
| if (device_property_read_bool(dev, "broken-cd")) |
| host->caps |= MMC_CAP_NEEDS_POLL; |
| |
| ret = mmc_gpiod_request_cd(host, "cd", 0, true, |
| 0, &cd_gpio_invert); |
| if (!ret) |
| dev_info(host->parent, "Got CD GPIO\n"); |
| else if (ret != -ENOENT && ret != -ENOSYS) |
| return ret; |
| |
| /* |
| * There are two ways to flag that the CD line is inverted: |
| * through the cd-inverted flag and by the GPIO line itself |
| * being inverted from the GPIO subsystem. This is a leftover |
| * from the times when the GPIO subsystem did not make it |
| * possible to flag a line as inverted. |
| * |
| * If the capability on the host AND the GPIO line are |
| * both inverted, the end result is that the CD line is |
| * not inverted. |
| */ |
| if (cd_cap_invert ^ cd_gpio_invert) |
| host->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH; |
| } |
| |
| /* Parse Write Protection */ |
| ro_cap_invert = device_property_read_bool(dev, "wp-inverted"); |
| |
| ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &ro_gpio_invert); |
| if (!ret) |
| dev_info(host->parent, "Got WP GPIO\n"); |
| else if (ret != -ENOENT && ret != -ENOSYS) |
| return ret; |
| |
| if (device_property_read_bool(dev, "disable-wp")) |
| host->caps2 |= MMC_CAP2_NO_WRITE_PROTECT; |
| |
| /* See the comment on CD inversion above */ |
| if (ro_cap_invert ^ ro_gpio_invert) |
| host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; |
| |
| if (device_property_read_bool(dev, "cap-sd-highspeed")) |
| host->caps |= MMC_CAP_SD_HIGHSPEED; |
| if (device_property_read_bool(dev, "cap-mmc-highspeed")) |
| host->caps |= MMC_CAP_MMC_HIGHSPEED; |
| if (device_property_read_bool(dev, "sd-uhs-sdr12")) |
| host->caps |= MMC_CAP_UHS_SDR12; |
| if (device_property_read_bool(dev, "sd-uhs-sdr25")) |
| host->caps |= MMC_CAP_UHS_SDR25; |
| if (device_property_read_bool(dev, "sd-uhs-sdr50")) |
| host->caps |= MMC_CAP_UHS_SDR50; |
| if (device_property_read_bool(dev, "sd-uhs-sdr104")) |
| host->caps |= MMC_CAP_UHS_SDR104; |
| if (device_property_read_bool(dev, "sd-uhs-ddr50")) |
| host->caps |= MMC_CAP_UHS_DDR50; |
| if (device_property_read_bool(dev, "cap-power-off-card")) |
| host->caps |= MMC_CAP_POWER_OFF_CARD; |
| if (device_property_read_bool(dev, "cap-mmc-hw-reset")) |
| host->caps |= MMC_CAP_HW_RESET; |
| if (device_property_read_bool(dev, "cap-sdio-irq")) |
| host->caps |= MMC_CAP_SDIO_IRQ; |
| if (device_property_read_bool(dev, "full-pwr-cycle")) |
| host->caps2 |= MMC_CAP2_FULL_PWR_CYCLE; |
| if (device_property_read_bool(dev, "keep-power-in-suspend")) |
| host->pm_caps |= MMC_PM_KEEP_POWER; |
| if (device_property_read_bool(dev, "wakeup-source") || |
| device_property_read_bool(dev, "enable-sdio-wakeup")) /* legacy */ |
| host->pm_caps |= MMC_PM_WAKE_SDIO_IRQ; |
| if (device_property_read_bool(dev, "mmc-ddr-1_8v")) |
| host->caps |= MMC_CAP_1_8V_DDR; |
| if (device_property_read_bool(dev, "mmc-ddr-1_2v")) |
| host->caps |= MMC_CAP_1_2V_DDR; |
| if (device_property_read_bool(dev, "mmc-hs200-1_8v")) |
| host->caps2 |= MMC_CAP2_HS200_1_8V_SDR; |
| if (device_property_read_bool(dev, "mmc-hs200-1_2v")) |
| host->caps2 |= MMC_CAP2_HS200_1_2V_SDR; |
| if (device_property_read_bool(dev, "mmc-hs400-1_8v")) |
| host->caps2 |= MMC_CAP2_HS400_1_8V | MMC_CAP2_HS200_1_8V_SDR; |
| if (device_property_read_bool(dev, "mmc-hs400-1_2v")) |
| host->caps2 |= MMC_CAP2_HS400_1_2V | MMC_CAP2_HS200_1_2V_SDR; |
| if (device_property_read_bool(dev, "mmc-hs400-enhanced-strobe")) |
| host->caps2 |= MMC_CAP2_HS400_ES; |
| if (device_property_read_bool(dev, "no-sdio")) |
| host->caps2 |= MMC_CAP2_NO_SDIO; |
| if (device_property_read_bool(dev, "no-sd")) |
| host->caps2 |= MMC_CAP2_NO_SD; |
| if (device_property_read_bool(dev, "no-mmc")) |
| host->caps2 |= MMC_CAP2_NO_MMC; |
| |
| host->dsr_req = !device_property_read_u32(dev, "dsr", &host->dsr); |
| if (host->dsr_req && (host->dsr & ~0xffff)) { |
| dev_err(host->parent, |
| "device tree specified broken value for DSR: 0x%x, ignoring\n", |
| host->dsr); |
| host->dsr_req = 0; |
| } |
| |
| return mmc_pwrseq_alloc(host); |
| } |
| |
| EXPORT_SYMBOL(mmc_of_parse); |
| |
| /** |
| * mmc_alloc_host - initialise the per-host structure. |
| * @extra: sizeof private data structure |
| * @dev: pointer to host device model structure |
| * |
| * Initialise the per-host structure. |
| */ |
| struct mmc_host *mmc_alloc_host(int extra, struct device *dev) |
| { |
| int err; |
| struct mmc_host *host; |
| |
| host = kzalloc(sizeof(struct mmc_host) + extra, GFP_KERNEL); |
| if (!host) |
| return NULL; |
| |
| /* scanning will be enabled when we're ready */ |
| host->rescan_disable = 1; |
| |
| again: |
| if (!ida_pre_get(&mmc_host_ida, GFP_KERNEL)) { |
| kfree(host); |
| return NULL; |
| } |
| |
| spin_lock(&mmc_host_lock); |
| err = ida_get_new(&mmc_host_ida, &host->index); |
| spin_unlock(&mmc_host_lock); |
| |
| if (err == -EAGAIN) { |
| goto again; |
| } else if (err) { |
| kfree(host); |
| return NULL; |
| } |
| |
| dev_set_name(&host->class_dev, "mmc%d", host->index); |
| |
| host->parent = dev; |
| host->class_dev.parent = dev; |
| host->class_dev.class = &mmc_host_class; |
| device_initialize(&host->class_dev); |
| device_enable_async_suspend(&host->class_dev); |
| |
| if (mmc_gpio_alloc(host)) { |
| put_device(&host->class_dev); |
| return NULL; |
| } |
| |
| if (!mmc_host_clk_gate_wq_init(host)) { |
| kfree(host); |
| return NULL; |
| } |
| |
| mmc_host_clk_init(host); |
| |
| spin_lock_init(&host->lock); |
| init_waitqueue_head(&host->wq); |
| INIT_DELAYED_WORK(&host->detect, mmc_rescan); |
| setup_timer(&host->retune_timer, mmc_retune_timer, (unsigned long)host); |
| |
| mutex_init(&host->rpmb_req_mutex); |
| |
| /* |
| * By default, hosts do not support SGIO or large requests. |
| * They have to set these according to their abilities. |
| */ |
| host->max_segs = 1; |
| host->max_seg_size = PAGE_SIZE; |
| |
| host->max_req_size = PAGE_SIZE; |
| host->max_blk_size = 512; |
| host->max_blk_count = PAGE_SIZE / 512; |
| |
| return host; |
| } |
| |
| EXPORT_SYMBOL(mmc_alloc_host); |
| |
| static ssize_t show_enable(struct device *dev, |
| struct device_attribute *attr, char *buf) |
| { |
| struct mmc_host *host = cls_dev_to_mmc_host(dev); |
| |
| if (!host) |
| return -EINVAL; |
| |
| return snprintf(buf, PAGE_SIZE, "%d\n", mmc_can_scale_clk(host)); |
| } |
| |
| static ssize_t store_enable(struct device *dev, |
| struct device_attribute *attr, const char *buf, size_t count) |
| { |
| struct mmc_host *host = cls_dev_to_mmc_host(dev); |
| unsigned long value; |
| |
| if (!host || !host->card || kstrtoul(buf, 0, &value)) |
| return -EINVAL; |
| |
| mmc_get_card(host->card); |
| |
| if (!value) { |
| /* Suspend the clock scaling and mask host capability */ |
| if (host->clk_scaling.enable) |
| mmc_suspend_clk_scaling(host); |
| host->clk_scaling.enable = false; |
| host->caps2 &= ~MMC_CAP2_CLK_SCALE; |
| host->clk_scaling.state = MMC_LOAD_HIGH; |
| /* Set to max. frequency when disabling */ |
| mmc_clk_update_freq(host, host->card->clk_scaling_highest, |
| host->clk_scaling.state, 0); |
| } else if (value) { |
| /* Unmask host capability and resume scaling */ |
| host->caps2 |= MMC_CAP2_CLK_SCALE; |
| if (!host->clk_scaling.enable) { |
| host->clk_scaling.enable = true; |
| mmc_resume_clk_scaling(host); |
| } |
| } |
| |
| mmc_put_card(host->card); |
| |
| return count; |
| } |
| |
| static ssize_t show_up_threshold(struct device *dev, |
| struct device_attribute *attr, char *buf) |
| { |
| struct mmc_host *host = cls_dev_to_mmc_host(dev); |
| |
| if (!host) |
| return -EINVAL; |
| |
| return snprintf(buf, PAGE_SIZE, "%d\n", host->clk_scaling.upthreshold); |
| } |
| |
| #define MAX_PERCENTAGE 100 |
| static ssize_t store_up_threshold(struct device *dev, |
| struct device_attribute *attr, const char *buf, size_t count) |
| { |
| struct mmc_host *host = cls_dev_to_mmc_host(dev); |
| unsigned long value; |
| |
| if (!host || kstrtoul(buf, 0, &value) || (value > MAX_PERCENTAGE)) |
| return -EINVAL; |
| |
| host->clk_scaling.upthreshold = value; |
| |
| pr_debug("%s: clkscale_up_thresh set to %lu\n", |
| mmc_hostname(host), value); |
| return count; |
| } |
| |
| static ssize_t show_down_threshold(struct device *dev, |
| struct device_attribute *attr, char *buf) |
| { |
| struct mmc_host *host = cls_dev_to_mmc_host(dev); |
| |
| if (!host) |
| return -EINVAL; |
| |
| return snprintf(buf, PAGE_SIZE, "%d\n", |
| host->clk_scaling.downthreshold); |
| } |
| |
| static ssize_t store_down_threshold(struct device *dev, |
| struct device_attribute *attr, const char *buf, size_t count) |
| { |
| struct mmc_host *host = cls_dev_to_mmc_host(dev); |
| unsigned long value; |
| |
| if (!host || kstrtoul(buf, 0, &value) || (value > MAX_PERCENTAGE)) |
| return -EINVAL; |
| |
| host->clk_scaling.downthreshold = value; |
| |
| pr_debug("%s: clkscale_down_thresh set to %lu\n", |
| mmc_hostname(host), value); |
| return count; |
| } |
| |
| static ssize_t show_polling(struct device *dev, |
| struct device_attribute *attr, char *buf) |
| { |
| struct mmc_host *host = cls_dev_to_mmc_host(dev); |
| |
| if (!host) |
| return -EINVAL; |
| |
| return snprintf(buf, PAGE_SIZE, "%lu milliseconds\n", |
| host->clk_scaling.polling_delay_ms); |
| } |
| |
| static ssize_t store_polling(struct device *dev, |
| struct device_attribute *attr, const char *buf, size_t count) |
| { |
| struct mmc_host *host = cls_dev_to_mmc_host(dev); |
| unsigned long value; |
| |
| if (!host || kstrtoul(buf, 0, &value)) |
| return -EINVAL; |
| |
| host->clk_scaling.polling_delay_ms = value; |
| |
| pr_debug("%s: clkscale_polling_delay_ms set to %lu\n", |
| mmc_hostname(host), value); |
| return count; |
| } |
| |
| DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, |
| show_enable, store_enable); |
| DEVICE_ATTR(polling_interval, S_IRUGO | S_IWUSR, |
| show_polling, store_polling); |
| DEVICE_ATTR(up_threshold, S_IRUGO | S_IWUSR, |
| show_up_threshold, store_up_threshold); |
| DEVICE_ATTR(down_threshold, S_IRUGO | S_IWUSR, |
| show_down_threshold, store_down_threshold); |
| |
| static struct attribute *clk_scaling_attrs[] = { |
| &dev_attr_enable.attr, |
| &dev_attr_up_threshold.attr, |
| &dev_attr_down_threshold.attr, |
| &dev_attr_polling_interval.attr, |
| NULL, |
| }; |
| |
| static struct attribute_group clk_scaling_attr_grp = { |
| .name = "clk_scaling", |
| .attrs = clk_scaling_attrs, |
| }; |
| |
| #ifdef CONFIG_MMC_PERF_PROFILING |
| static ssize_t |
| show_perf(struct device *dev, struct device_attribute *attr, char *buf) |
| { |
| struct mmc_host *host = cls_dev_to_mmc_host(dev); |
| int64_t rtime_drv, wtime_drv; |
| unsigned long rbytes_drv, wbytes_drv, flags; |
| |
| spin_lock_irqsave(&host->lock, flags); |
| |
| rbytes_drv = host->perf.rbytes_drv; |
| wbytes_drv = host->perf.wbytes_drv; |
| |
| rtime_drv = ktime_to_us(host->perf.rtime_drv); |
| wtime_drv = ktime_to_us(host->perf.wtime_drv); |
| |
| spin_unlock_irqrestore(&host->lock, flags); |
| |
| return snprintf(buf, PAGE_SIZE, "Write performance at driver Level:" |
| "%lu bytes in %lld microseconds\n" |
| "Read performance at driver Level:" |
| "%lu bytes in %lld microseconds\n", |
| wbytes_drv, wtime_drv, |
| rbytes_drv, rtime_drv); |
| } |
| |
| static ssize_t |
| set_perf(struct device *dev, struct device_attribute *attr, |
| const char *buf, size_t count) |
| { |
| struct mmc_host *host = cls_dev_to_mmc_host(dev); |
| int64_t value; |
| unsigned long flags; |
| |
| sscanf(buf, "%lld", &value); |
| spin_lock_irqsave(&host->lock, flags); |
| if (!value) { |
| memset(&host->perf, 0, sizeof(host->perf)); |
| host->perf_enable = false; |
| } else { |
| host->perf_enable = true; |
| } |
| spin_unlock_irqrestore(&host->lock, flags); |
| |
| return count; |
| } |
| |
| static DEVICE_ATTR(perf, S_IRUGO | S_IWUSR, |
| show_perf, set_perf); |
| |
| #endif |
| |
| static struct attribute *dev_attrs[] = { |
| #ifdef CONFIG_MMC_PERF_PROFILING |
| &dev_attr_perf.attr, |
| #endif |
| NULL, |
| }; |
| static struct attribute_group dev_attr_grp = { |
| .attrs = dev_attrs, |
| }; |
| |
| /** |
| * mmc_add_host - initialise host hardware |
| * @host: mmc host |
| * |
| * Register the host with the driver model. The host must be |
| * prepared to start servicing requests before this function |
| * completes. |
| */ |
| int mmc_add_host(struct mmc_host *host) |
| { |
| int err; |
| |
| WARN_ON((host->caps & MMC_CAP_SDIO_IRQ) && |
| !host->ops->enable_sdio_irq); |
| |
| err = device_add(&host->class_dev); |
| if (err) |
| return err; |
| |
| led_trigger_register_simple(dev_name(&host->class_dev), &host->led); |
| |
| host->clk_scaling.upthreshold = MMC_DEVFRQ_DEFAULT_UP_THRESHOLD; |
| host->clk_scaling.downthreshold = MMC_DEVFRQ_DEFAULT_DOWN_THRESHOLD; |
| host->clk_scaling.polling_delay_ms = MMC_DEVFRQ_DEFAULT_POLLING_MSEC; |
| host->clk_scaling.skip_clk_scale_freq_update = false; |
| |
| #ifdef CONFIG_DEBUG_FS |
| mmc_add_host_debugfs(host); |
| #endif |
| mmc_host_clk_sysfs_init(host); |
| mmc_trace_init(host); |
| |
| err = sysfs_create_group(&host->class_dev.kobj, &clk_scaling_attr_grp); |
| if (err) |
| pr_err("%s: failed to create clk scale sysfs group with err %d\n", |
| __func__, err); |
| |
| #ifdef CONFIG_BLOCK |
| mmc_latency_hist_sysfs_init(host); |
| #endif |
| |
| err = sysfs_create_group(&host->class_dev.kobj, &dev_attr_grp); |
| if (err) |
| pr_err("%s: failed to create sysfs group with err %d\n", |
| __func__, err); |
| |
| mmc_start_host(host); |
| if (!(host->pm_flags & MMC_PM_IGNORE_PM_NOTIFY)) |
| mmc_register_pm_notifier(host); |
| |
| return 0; |
| } |
| |
| EXPORT_SYMBOL(mmc_add_host); |
| |
| /** |
| * mmc_remove_host - remove host hardware |
| * @host: mmc host |
| * |
| * Unregister and remove all cards associated with this host, |
| * and power down the MMC bus. No new requests will be issued |
| * after this function has returned. |
| */ |
| void mmc_remove_host(struct mmc_host *host) |
| { |
| if (!(host->pm_flags & MMC_PM_IGNORE_PM_NOTIFY)) |
| mmc_unregister_pm_notifier(host); |
| mmc_stop_host(host); |
| |
| #ifdef CONFIG_DEBUG_FS |
| mmc_remove_host_debugfs(host); |
| #endif |
| |
| #ifdef CONFIG_BLOCK |
| mmc_latency_hist_sysfs_exit(host); |
| #endif |
| |
| sysfs_remove_group(&host->parent->kobj, &dev_attr_grp); |
| sysfs_remove_group(&host->class_dev.kobj, &clk_scaling_attr_grp); |
| |
| device_del(&host->class_dev); |
| |
| led_trigger_unregister_simple(host->led); |
| |
| mmc_host_clk_exit(host); |
| } |
| |
| EXPORT_SYMBOL(mmc_remove_host); |
| |
| /** |
| * mmc_free_host - free the host structure |
| * @host: mmc host |
| * |
| * Free the host once all references to it have been dropped. |
| */ |
| void mmc_free_host(struct mmc_host *host) |
| { |
| mmc_pwrseq_free(host); |
| put_device(&host->class_dev); |
| } |
| |
| EXPORT_SYMBOL(mmc_free_host); |