Alessandro Zummo | 0c86edc | 2006-03-27 01:16:37 -0800 | [diff] [blame] | 1 | /* |
| 2 | * RTC subsystem, interface functions |
| 3 | * |
| 4 | * Copyright (C) 2005 Tower Technologies |
| 5 | * Author: Alessandro Zummo <a.zummo@towertech.it> |
| 6 | * |
| 7 | * based on arch/arm/common/rtctime.c |
| 8 | * |
| 9 | * This program is free software; you can redistribute it and/or modify |
| 10 | * it under the terms of the GNU General Public License version 2 as |
| 11 | * published by the Free Software Foundation. |
| 12 | */ |
| 13 | |
| 14 | #include <linux/rtc.h> |
| 15 | |
| 16 | int rtc_read_time(struct class_device *class_dev, struct rtc_time *tm) |
| 17 | { |
| 18 | int err; |
| 19 | struct rtc_device *rtc = to_rtc_device(class_dev); |
| 20 | |
| 21 | err = mutex_lock_interruptible(&rtc->ops_lock); |
| 22 | if (err) |
| 23 | return -EBUSY; |
| 24 | |
| 25 | if (!rtc->ops) |
| 26 | err = -ENODEV; |
| 27 | else if (!rtc->ops->read_time) |
| 28 | err = -EINVAL; |
| 29 | else { |
| 30 | memset(tm, 0, sizeof(struct rtc_time)); |
| 31 | err = rtc->ops->read_time(class_dev->dev, tm); |
| 32 | } |
| 33 | |
| 34 | mutex_unlock(&rtc->ops_lock); |
| 35 | return err; |
| 36 | } |
| 37 | EXPORT_SYMBOL_GPL(rtc_read_time); |
| 38 | |
| 39 | int rtc_set_time(struct class_device *class_dev, struct rtc_time *tm) |
| 40 | { |
| 41 | int err; |
| 42 | struct rtc_device *rtc = to_rtc_device(class_dev); |
| 43 | |
| 44 | err = rtc_valid_tm(tm); |
| 45 | if (err != 0) |
| 46 | return err; |
| 47 | |
| 48 | err = mutex_lock_interruptible(&rtc->ops_lock); |
| 49 | if (err) |
| 50 | return -EBUSY; |
| 51 | |
| 52 | if (!rtc->ops) |
| 53 | err = -ENODEV; |
| 54 | else if (!rtc->ops->set_time) |
| 55 | err = -EINVAL; |
| 56 | else |
| 57 | err = rtc->ops->set_time(class_dev->dev, tm); |
| 58 | |
| 59 | mutex_unlock(&rtc->ops_lock); |
| 60 | return err; |
| 61 | } |
| 62 | EXPORT_SYMBOL_GPL(rtc_set_time); |
| 63 | |
| 64 | int rtc_set_mmss(struct class_device *class_dev, unsigned long secs) |
| 65 | { |
| 66 | int err; |
| 67 | struct rtc_device *rtc = to_rtc_device(class_dev); |
| 68 | |
| 69 | err = mutex_lock_interruptible(&rtc->ops_lock); |
| 70 | if (err) |
| 71 | return -EBUSY; |
| 72 | |
| 73 | if (!rtc->ops) |
| 74 | err = -ENODEV; |
| 75 | else if (rtc->ops->set_mmss) |
| 76 | err = rtc->ops->set_mmss(class_dev->dev, secs); |
| 77 | else if (rtc->ops->read_time && rtc->ops->set_time) { |
| 78 | struct rtc_time new, old; |
| 79 | |
| 80 | err = rtc->ops->read_time(class_dev->dev, &old); |
| 81 | if (err == 0) { |
| 82 | rtc_time_to_tm(secs, &new); |
| 83 | |
| 84 | /* |
| 85 | * avoid writing when we're going to change the day of |
| 86 | * the month. We will retry in the next minute. This |
| 87 | * basically means that if the RTC must not drift |
| 88 | * by more than 1 minute in 11 minutes. |
| 89 | */ |
| 90 | if (!((old.tm_hour == 23 && old.tm_min == 59) || |
| 91 | (new.tm_hour == 23 && new.tm_min == 59))) |
| 92 | err = rtc->ops->set_time(class_dev->dev, &new); |
| 93 | } |
| 94 | } |
| 95 | else |
| 96 | err = -EINVAL; |
| 97 | |
| 98 | mutex_unlock(&rtc->ops_lock); |
| 99 | |
| 100 | return err; |
| 101 | } |
| 102 | EXPORT_SYMBOL_GPL(rtc_set_mmss); |
| 103 | |
| 104 | int rtc_read_alarm(struct class_device *class_dev, struct rtc_wkalrm *alarm) |
| 105 | { |
| 106 | int err; |
| 107 | struct rtc_device *rtc = to_rtc_device(class_dev); |
| 108 | |
| 109 | err = mutex_lock_interruptible(&rtc->ops_lock); |
| 110 | if (err) |
| 111 | return -EBUSY; |
| 112 | |
| 113 | if (rtc->ops == NULL) |
| 114 | err = -ENODEV; |
| 115 | else if (!rtc->ops->read_alarm) |
| 116 | err = -EINVAL; |
| 117 | else { |
| 118 | memset(alarm, 0, sizeof(struct rtc_wkalrm)); |
| 119 | err = rtc->ops->read_alarm(class_dev->dev, alarm); |
| 120 | } |
| 121 | |
| 122 | mutex_unlock(&rtc->ops_lock); |
| 123 | return err; |
| 124 | } |
| 125 | EXPORT_SYMBOL_GPL(rtc_read_alarm); |
| 126 | |
| 127 | int rtc_set_alarm(struct class_device *class_dev, struct rtc_wkalrm *alarm) |
| 128 | { |
| 129 | int err; |
| 130 | struct rtc_device *rtc = to_rtc_device(class_dev); |
| 131 | |
| 132 | err = mutex_lock_interruptible(&rtc->ops_lock); |
| 133 | if (err) |
| 134 | return -EBUSY; |
| 135 | |
| 136 | if (!rtc->ops) |
| 137 | err = -ENODEV; |
| 138 | else if (!rtc->ops->set_alarm) |
| 139 | err = -EINVAL; |
| 140 | else |
| 141 | err = rtc->ops->set_alarm(class_dev->dev, alarm); |
| 142 | |
| 143 | mutex_unlock(&rtc->ops_lock); |
| 144 | return err; |
| 145 | } |
| 146 | EXPORT_SYMBOL_GPL(rtc_set_alarm); |
| 147 | |
David Brownell | d728b1e | 2006-11-25 11:09:28 -0800 | [diff] [blame] | 148 | /** |
| 149 | * rtc_update_irq - report RTC periodic, alarm, and/or update irqs |
| 150 | * @class_dev: the rtc's class device |
| 151 | * @num: how many irqs are being reported (usually one) |
| 152 | * @events: mask of RTC_IRQF with one or more of RTC_PF, RTC_AF, RTC_UF |
| 153 | * Context: in_interrupt(), irqs blocked |
| 154 | */ |
Alessandro Zummo | 0c86edc | 2006-03-27 01:16:37 -0800 | [diff] [blame] | 155 | void rtc_update_irq(struct class_device *class_dev, |
| 156 | unsigned long num, unsigned long events) |
| 157 | { |
| 158 | struct rtc_device *rtc = to_rtc_device(class_dev); |
| 159 | |
| 160 | spin_lock(&rtc->irq_lock); |
| 161 | rtc->irq_data = (rtc->irq_data + (num << 8)) | events; |
| 162 | spin_unlock(&rtc->irq_lock); |
| 163 | |
| 164 | spin_lock(&rtc->irq_task_lock); |
| 165 | if (rtc->irq_task) |
| 166 | rtc->irq_task->func(rtc->irq_task->private_data); |
| 167 | spin_unlock(&rtc->irq_task_lock); |
| 168 | |
| 169 | wake_up_interruptible(&rtc->irq_queue); |
| 170 | kill_fasync(&rtc->async_queue, SIGIO, POLL_IN); |
| 171 | } |
| 172 | EXPORT_SYMBOL_GPL(rtc_update_irq); |
| 173 | |
| 174 | struct class_device *rtc_class_open(char *name) |
| 175 | { |
| 176 | struct class_device *class_dev = NULL, |
| 177 | *class_dev_tmp; |
| 178 | |
| 179 | down(&rtc_class->sem); |
| 180 | list_for_each_entry(class_dev_tmp, &rtc_class->children, node) { |
| 181 | if (strncmp(class_dev_tmp->class_id, name, BUS_ID_SIZE) == 0) { |
| 182 | class_dev = class_dev_tmp; |
| 183 | break; |
| 184 | } |
| 185 | } |
| 186 | |
| 187 | if (class_dev) { |
| 188 | if (!try_module_get(to_rtc_device(class_dev)->owner)) |
| 189 | class_dev = NULL; |
| 190 | } |
| 191 | up(&rtc_class->sem); |
| 192 | |
| 193 | return class_dev; |
| 194 | } |
| 195 | EXPORT_SYMBOL_GPL(rtc_class_open); |
| 196 | |
| 197 | void rtc_class_close(struct class_device *class_dev) |
| 198 | { |
| 199 | module_put(to_rtc_device(class_dev)->owner); |
| 200 | } |
| 201 | EXPORT_SYMBOL_GPL(rtc_class_close); |
| 202 | |
| 203 | int rtc_irq_register(struct class_device *class_dev, struct rtc_task *task) |
| 204 | { |
| 205 | int retval = -EBUSY; |
| 206 | struct rtc_device *rtc = to_rtc_device(class_dev); |
| 207 | |
| 208 | if (task == NULL || task->func == NULL) |
| 209 | return -EINVAL; |
| 210 | |
David Brownell | d728b1e | 2006-11-25 11:09:28 -0800 | [diff] [blame] | 211 | spin_lock_irq(&rtc->irq_task_lock); |
Alessandro Zummo | 0c86edc | 2006-03-27 01:16:37 -0800 | [diff] [blame] | 212 | if (rtc->irq_task == NULL) { |
| 213 | rtc->irq_task = task; |
| 214 | retval = 0; |
| 215 | } |
David Brownell | d728b1e | 2006-11-25 11:09:28 -0800 | [diff] [blame] | 216 | spin_unlock_irq(&rtc->irq_task_lock); |
Alessandro Zummo | 0c86edc | 2006-03-27 01:16:37 -0800 | [diff] [blame] | 217 | |
| 218 | return retval; |
| 219 | } |
| 220 | EXPORT_SYMBOL_GPL(rtc_irq_register); |
| 221 | |
| 222 | void rtc_irq_unregister(struct class_device *class_dev, struct rtc_task *task) |
| 223 | { |
| 224 | struct rtc_device *rtc = to_rtc_device(class_dev); |
| 225 | |
David Brownell | d728b1e | 2006-11-25 11:09:28 -0800 | [diff] [blame] | 226 | spin_lock_irq(&rtc->irq_task_lock); |
Alessandro Zummo | 0c86edc | 2006-03-27 01:16:37 -0800 | [diff] [blame] | 227 | if (rtc->irq_task == task) |
| 228 | rtc->irq_task = NULL; |
David Brownell | d728b1e | 2006-11-25 11:09:28 -0800 | [diff] [blame] | 229 | spin_unlock_irq(&rtc->irq_task_lock); |
Alessandro Zummo | 0c86edc | 2006-03-27 01:16:37 -0800 | [diff] [blame] | 230 | } |
| 231 | EXPORT_SYMBOL_GPL(rtc_irq_unregister); |
| 232 | |
| 233 | int rtc_irq_set_state(struct class_device *class_dev, struct rtc_task *task, int enabled) |
| 234 | { |
| 235 | int err = 0; |
| 236 | unsigned long flags; |
| 237 | struct rtc_device *rtc = to_rtc_device(class_dev); |
| 238 | |
Alessandro Zummo | 56f10c6 | 2006-06-25 05:48:20 -0700 | [diff] [blame] | 239 | if (rtc->ops->irq_set_state == NULL) |
| 240 | return -ENXIO; |
| 241 | |
Alessandro Zummo | 0c86edc | 2006-03-27 01:16:37 -0800 | [diff] [blame] | 242 | spin_lock_irqsave(&rtc->irq_task_lock, flags); |
| 243 | if (rtc->irq_task != task) |
| 244 | err = -ENXIO; |
| 245 | spin_unlock_irqrestore(&rtc->irq_task_lock, flags); |
| 246 | |
| 247 | if (err == 0) |
| 248 | err = rtc->ops->irq_set_state(class_dev->dev, enabled); |
| 249 | |
| 250 | return err; |
| 251 | } |
| 252 | EXPORT_SYMBOL_GPL(rtc_irq_set_state); |
| 253 | |
| 254 | int rtc_irq_set_freq(struct class_device *class_dev, struct rtc_task *task, int freq) |
| 255 | { |
Alessandro Zummo | 56f10c6 | 2006-06-25 05:48:20 -0700 | [diff] [blame] | 256 | int err = 0; |
Alessandro Zummo | 0c86edc | 2006-03-27 01:16:37 -0800 | [diff] [blame] | 257 | unsigned long flags; |
| 258 | struct rtc_device *rtc = to_rtc_device(class_dev); |
| 259 | |
Alessandro Zummo | 56f10c6 | 2006-06-25 05:48:20 -0700 | [diff] [blame] | 260 | if (rtc->ops->irq_set_freq == NULL) |
| 261 | return -ENXIO; |
Alessandro Zummo | 0c86edc | 2006-03-27 01:16:37 -0800 | [diff] [blame] | 262 | |
| 263 | spin_lock_irqsave(&rtc->irq_task_lock, flags); |
| 264 | if (rtc->irq_task != task) |
| 265 | err = -ENXIO; |
| 266 | spin_unlock_irqrestore(&rtc->irq_task_lock, flags); |
| 267 | |
| 268 | if (err == 0) { |
| 269 | err = rtc->ops->irq_set_freq(class_dev->dev, freq); |
| 270 | if (err == 0) |
| 271 | rtc->irq_freq = freq; |
| 272 | } |
| 273 | return err; |
| 274 | } |
David Brownell | 2601a46 | 2006-11-25 11:09:27 -0800 | [diff] [blame] | 275 | EXPORT_SYMBOL_GPL(rtc_irq_set_freq); |