| /* |
| * RTC subsystem, interface functions |
| * |
| * Copyright (C) 2005 Tower Technologies |
| * Author: Alessandro Zummo <a.zummo@towertech.it> |
| * |
| * based on arch/arm/common/rtctime.c |
| * |
| * This program is free software; you can redistribute it and/or modify |
| * it under the terms of the GNU General Public License version 2 as |
| * published by the Free Software Foundation. |
| */ |
| |
| #include <linux/rtc.h> |
| #include <linux/log2.h> |
| |
| int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm) |
| { |
| int err; |
| |
| err = mutex_lock_interruptible(&rtc->ops_lock); |
| if (err) |
| return -EBUSY; |
| |
| if (!rtc->ops) |
| err = -ENODEV; |
| else if (!rtc->ops->read_time) |
| err = -EINVAL; |
| else { |
| memset(tm, 0, sizeof(struct rtc_time)); |
| err = rtc->ops->read_time(rtc->dev.parent, tm); |
| } |
| |
| mutex_unlock(&rtc->ops_lock); |
| return err; |
| } |
| EXPORT_SYMBOL_GPL(rtc_read_time); |
| |
| int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm) |
| { |
| int err; |
| |
| err = rtc_valid_tm(tm); |
| if (err != 0) |
| return err; |
| |
| err = mutex_lock_interruptible(&rtc->ops_lock); |
| if (err) |
| return -EBUSY; |
| |
| if (!rtc->ops) |
| err = -ENODEV; |
| else if (!rtc->ops->set_time) |
| err = -EINVAL; |
| else |
| err = rtc->ops->set_time(rtc->dev.parent, tm); |
| |
| mutex_unlock(&rtc->ops_lock); |
| return err; |
| } |
| EXPORT_SYMBOL_GPL(rtc_set_time); |
| |
| int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs) |
| { |
| int err; |
| |
| err = mutex_lock_interruptible(&rtc->ops_lock); |
| if (err) |
| return -EBUSY; |
| |
| if (!rtc->ops) |
| err = -ENODEV; |
| else if (rtc->ops->set_mmss) |
| err = rtc->ops->set_mmss(rtc->dev.parent, secs); |
| else if (rtc->ops->read_time && rtc->ops->set_time) { |
| struct rtc_time new, old; |
| |
| err = rtc->ops->read_time(rtc->dev.parent, &old); |
| if (err == 0) { |
| rtc_time_to_tm(secs, &new); |
| |
| /* |
| * avoid writing when we're going to change the day of |
| * the month. We will retry in the next minute. This |
| * basically means that if the RTC must not drift |
| * by more than 1 minute in 11 minutes. |
| */ |
| if (!((old.tm_hour == 23 && old.tm_min == 59) || |
| (new.tm_hour == 23 && new.tm_min == 59))) |
| err = rtc->ops->set_time(rtc->dev.parent, |
| &new); |
| } |
| } |
| else |
| err = -EINVAL; |
| |
| mutex_unlock(&rtc->ops_lock); |
| |
| return err; |
| } |
| EXPORT_SYMBOL_GPL(rtc_set_mmss); |
| |
| int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) |
| { |
| int err; |
| |
| err = mutex_lock_interruptible(&rtc->ops_lock); |
| if (err) |
| return -EBUSY; |
| |
| if (rtc->ops == NULL) |
| err = -ENODEV; |
| else if (!rtc->ops->read_alarm) |
| err = -EINVAL; |
| else { |
| memset(alarm, 0, sizeof(struct rtc_wkalrm)); |
| err = rtc->ops->read_alarm(rtc->dev.parent, alarm); |
| } |
| |
| mutex_unlock(&rtc->ops_lock); |
| return err; |
| } |
| EXPORT_SYMBOL_GPL(rtc_read_alarm); |
| |
| int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) |
| { |
| int err; |
| |
| err = rtc_valid_tm(&alarm->time); |
| if (err != 0) |
| return err; |
| |
| err = mutex_lock_interruptible(&rtc->ops_lock); |
| if (err) |
| return -EBUSY; |
| |
| if (!rtc->ops) |
| err = -ENODEV; |
| else if (!rtc->ops->set_alarm) |
| err = -EINVAL; |
| else |
| err = rtc->ops->set_alarm(rtc->dev.parent, alarm); |
| |
| mutex_unlock(&rtc->ops_lock); |
| return err; |
| } |
| EXPORT_SYMBOL_GPL(rtc_set_alarm); |
| |
| /** |
| * rtc_update_irq - report RTC periodic, alarm, and/or update irqs |
| * @rtc: the rtc device |
| * @num: how many irqs are being reported (usually one) |
| * @events: mask of RTC_IRQF with one or more of RTC_PF, RTC_AF, RTC_UF |
| * Context: in_interrupt(), irqs blocked |
| */ |
| void rtc_update_irq(struct rtc_device *rtc, |
| unsigned long num, unsigned long events) |
| { |
| spin_lock(&rtc->irq_lock); |
| rtc->irq_data = (rtc->irq_data + (num << 8)) | events; |
| spin_unlock(&rtc->irq_lock); |
| |
| spin_lock(&rtc->irq_task_lock); |
| if (rtc->irq_task) |
| rtc->irq_task->func(rtc->irq_task->private_data); |
| spin_unlock(&rtc->irq_task_lock); |
| |
| wake_up_interruptible(&rtc->irq_queue); |
| kill_fasync(&rtc->async_queue, SIGIO, POLL_IN); |
| } |
| EXPORT_SYMBOL_GPL(rtc_update_irq); |
| |
| struct rtc_device *rtc_class_open(char *name) |
| { |
| struct device *dev; |
| struct rtc_device *rtc = NULL; |
| |
| down(&rtc_class->sem); |
| list_for_each_entry(dev, &rtc_class->devices, node) { |
| if (strncmp(dev->bus_id, name, BUS_ID_SIZE) == 0) { |
| dev = get_device(dev); |
| if (dev) |
| rtc = to_rtc_device(dev); |
| break; |
| } |
| } |
| |
| if (rtc) { |
| if (!try_module_get(rtc->owner)) { |
| put_device(dev); |
| rtc = NULL; |
| } |
| } |
| up(&rtc_class->sem); |
| |
| return rtc; |
| } |
| EXPORT_SYMBOL_GPL(rtc_class_open); |
| |
| void rtc_class_close(struct rtc_device *rtc) |
| { |
| module_put(rtc->owner); |
| put_device(&rtc->dev); |
| } |
| EXPORT_SYMBOL_GPL(rtc_class_close); |
| |
| int rtc_irq_register(struct rtc_device *rtc, struct rtc_task *task) |
| { |
| int retval = -EBUSY; |
| |
| if (task == NULL || task->func == NULL) |
| return -EINVAL; |
| |
| /* Cannot register while the char dev is in use */ |
| if (!(mutex_trylock(&rtc->char_lock))) |
| return -EBUSY; |
| |
| spin_lock_irq(&rtc->irq_task_lock); |
| if (rtc->irq_task == NULL) { |
| rtc->irq_task = task; |
| retval = 0; |
| } |
| spin_unlock_irq(&rtc->irq_task_lock); |
| |
| mutex_unlock(&rtc->char_lock); |
| |
| return retval; |
| } |
| EXPORT_SYMBOL_GPL(rtc_irq_register); |
| |
| void rtc_irq_unregister(struct rtc_device *rtc, struct rtc_task *task) |
| { |
| spin_lock_irq(&rtc->irq_task_lock); |
| if (rtc->irq_task == task) |
| rtc->irq_task = NULL; |
| spin_unlock_irq(&rtc->irq_task_lock); |
| } |
| EXPORT_SYMBOL_GPL(rtc_irq_unregister); |
| |
| /** |
| * rtc_irq_set_state - enable/disable 2^N Hz periodic IRQs |
| * @rtc: the rtc device |
| * @task: currently registered with rtc_irq_register() |
| * @enabled: true to enable periodic IRQs |
| * Context: any |
| * |
| * Note that rtc_irq_set_freq() should previously have been used to |
| * specify the desired frequency of periodic IRQ task->func() callbacks. |
| */ |
| int rtc_irq_set_state(struct rtc_device *rtc, struct rtc_task *task, int enabled) |
| { |
| int err = 0; |
| unsigned long flags; |
| |
| if (rtc->ops->irq_set_state == NULL) |
| return -ENXIO; |
| |
| spin_lock_irqsave(&rtc->irq_task_lock, flags); |
| if (rtc->irq_task != NULL && task == NULL) |
| err = -EBUSY; |
| if (rtc->irq_task != task) |
| err = -EACCES; |
| spin_unlock_irqrestore(&rtc->irq_task_lock, flags); |
| |
| if (err == 0) |
| err = rtc->ops->irq_set_state(rtc->dev.parent, enabled); |
| |
| return err; |
| } |
| EXPORT_SYMBOL_GPL(rtc_irq_set_state); |
| |
| /** |
| * rtc_irq_set_freq - set 2^N Hz periodic IRQ frequency for IRQ |
| * @rtc: the rtc device |
| * @task: currently registered with rtc_irq_register() |
| * @freq: positive frequency with which task->func() will be called |
| * Context: any |
| * |
| * Note that rtc_irq_set_state() is used to enable or disable the |
| * periodic IRQs. |
| */ |
| int rtc_irq_set_freq(struct rtc_device *rtc, struct rtc_task *task, int freq) |
| { |
| int err = 0; |
| unsigned long flags; |
| |
| if (rtc->ops->irq_set_freq == NULL) |
| return -ENXIO; |
| |
| if (!is_power_of_2(freq)) |
| return -EINVAL; |
| |
| spin_lock_irqsave(&rtc->irq_task_lock, flags); |
| if (rtc->irq_task != NULL && task == NULL) |
| err = -EBUSY; |
| if (rtc->irq_task != task) |
| err = -EACCES; |
| spin_unlock_irqrestore(&rtc->irq_task_lock, flags); |
| |
| if (err == 0) { |
| err = rtc->ops->irq_set_freq(rtc->dev.parent, freq); |
| if (err == 0) |
| rtc->irq_freq = freq; |
| } |
| return err; |
| } |
| EXPORT_SYMBOL_GPL(rtc_irq_set_freq); |