blob: 0427c2c60eab002e1096b67d4891d140b307a4f3 [file] [log] [blame]
Madhusudhan Chikkature9f2bc792008-11-12 13:27:09 -08001/*
2 * drivers/w1/masters/omap_hdq.c
3 *
Paul Walmsleyc354a862012-06-21 21:40:40 -06004 * Copyright (C) 2007,2012 Texas Instruments, Inc.
Madhusudhan Chikkature9f2bc792008-11-12 13:27:09 -08005 *
6 * This file is licensed under the terms of the GNU General Public License
7 * version 2. This program is licensed "as is" without any warranty of any
8 * kind, whether express or implied.
9 *
10 */
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/platform_device.h>
14#include <linux/interrupt.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090015#include <linux/slab.h>
Madhusudhan Chikkature9f2bc792008-11-12 13:27:09 -080016#include <linux/err.h>
Madhusudhan Chikkature9f2bc792008-11-12 13:27:09 -080017#include <linux/io.h>
Amit Kucheria81fa08f2010-04-23 13:18:03 -040018#include <linux/sched.h>
Paul Walmsleyc354a862012-06-21 21:40:40 -060019#include <linux/pm_runtime.h>
Madhusudhan Chikkature9f2bc792008-11-12 13:27:09 -080020
21#include <asm/irq.h>
22#include <mach/hardware.h>
23
24#include "../w1.h"
25#include "../w1_int.h"
26
27#define MOD_NAME "OMAP_HDQ:"
28
29#define OMAP_HDQ_REVISION 0x00
30#define OMAP_HDQ_TX_DATA 0x04
31#define OMAP_HDQ_RX_DATA 0x08
32#define OMAP_HDQ_CTRL_STATUS 0x0c
33#define OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK (1<<6)
34#define OMAP_HDQ_CTRL_STATUS_CLOCKENABLE (1<<5)
35#define OMAP_HDQ_CTRL_STATUS_GO (1<<4)
36#define OMAP_HDQ_CTRL_STATUS_INITIALIZATION (1<<2)
37#define OMAP_HDQ_CTRL_STATUS_DIR (1<<1)
38#define OMAP_HDQ_CTRL_STATUS_MODE (1<<0)
39#define OMAP_HDQ_INT_STATUS 0x10
40#define OMAP_HDQ_INT_STATUS_TXCOMPLETE (1<<2)
41#define OMAP_HDQ_INT_STATUS_RXCOMPLETE (1<<1)
42#define OMAP_HDQ_INT_STATUS_TIMEOUT (1<<0)
43#define OMAP_HDQ_SYSCONFIG 0x14
44#define OMAP_HDQ_SYSCONFIG_SOFTRESET (1<<1)
45#define OMAP_HDQ_SYSCONFIG_AUTOIDLE (1<<0)
46#define OMAP_HDQ_SYSSTATUS 0x18
47#define OMAP_HDQ_SYSSTATUS_RESETDONE (1<<0)
48
49#define OMAP_HDQ_FLAG_CLEAR 0
50#define OMAP_HDQ_FLAG_SET 1
51#define OMAP_HDQ_TIMEOUT (HZ/5)
52
53#define OMAP_HDQ_MAX_USER 4
54
55static DECLARE_WAIT_QUEUE_HEAD(hdq_wait_queue);
56static int w1_id;
57
58struct hdq_data {
59 struct device *dev;
60 void __iomem *hdq_base;
61 /* lock status update */
62 struct mutex hdq_mutex;
63 int hdq_usecount;
Madhusudhan Chikkature9f2bc792008-11-12 13:27:09 -080064 u8 hdq_irqstatus;
65 /* device lock */
66 spinlock_t hdq_spinlock;
67 /*
68 * Used to control the call to omap_hdq_get and omap_hdq_put.
69 * HDQ Protocol: Write the CMD|REG_address first, followed by
70 * the data wrire or read.
71 */
72 int init_trans;
73};
74
Uwe Kleine-Königa96b9122010-02-04 20:56:54 +010075static int __devinit omap_hdq_probe(struct platform_device *pdev);
Felipe Balbi8c3db422012-07-25 15:05:27 +030076static int __devexit omap_hdq_remove(struct platform_device *pdev);
Madhusudhan Chikkature9f2bc792008-11-12 13:27:09 -080077
78static struct platform_driver omap_hdq_driver = {
79 .probe = omap_hdq_probe,
Felipe Balbi8c3db422012-07-25 15:05:27 +030080 .remove = __devexit_p(omap_hdq_remove),
Madhusudhan Chikkature9f2bc792008-11-12 13:27:09 -080081 .driver = {
82 .name = "omap_hdq",
83 },
84};
85
86static u8 omap_w1_read_byte(void *_hdq);
87static void omap_w1_write_byte(void *_hdq, u8 byte);
88static u8 omap_w1_reset_bus(void *_hdq);
Stanley.Miao06b0d4d2008-11-19 15:36:50 -080089static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev,
90 u8 search_type, w1_slave_found_callback slave_found);
Madhusudhan Chikkature9f2bc792008-11-12 13:27:09 -080091
92
93static struct w1_bus_master omap_w1_master = {
94 .read_byte = omap_w1_read_byte,
95 .write_byte = omap_w1_write_byte,
96 .reset_bus = omap_w1_reset_bus,
97 .search = omap_w1_search_bus,
98};
99
100/* HDQ register I/O routines */
101static inline u8 hdq_reg_in(struct hdq_data *hdq_data, u32 offset)
102{
Paul Walmsley2acd08942012-06-21 21:40:37 -0600103 return __raw_readl(hdq_data->hdq_base + offset);
Madhusudhan Chikkature9f2bc792008-11-12 13:27:09 -0800104}
105
106static inline void hdq_reg_out(struct hdq_data *hdq_data, u32 offset, u8 val)
107{
Paul Walmsley2acd08942012-06-21 21:40:37 -0600108 __raw_writel(val, hdq_data->hdq_base + offset);
Madhusudhan Chikkature9f2bc792008-11-12 13:27:09 -0800109}
110
111static inline u8 hdq_reg_merge(struct hdq_data *hdq_data, u32 offset,
112 u8 val, u8 mask)
113{
Paul Walmsley2acd08942012-06-21 21:40:37 -0600114 u8 new_val = (__raw_readl(hdq_data->hdq_base + offset) & ~mask)
Madhusudhan Chikkature9f2bc792008-11-12 13:27:09 -0800115 | (val & mask);
Paul Walmsley2acd08942012-06-21 21:40:37 -0600116 __raw_writel(new_val, hdq_data->hdq_base + offset);
Madhusudhan Chikkature9f2bc792008-11-12 13:27:09 -0800117
118 return new_val;
119}
120
121/*
122 * Wait for one or more bits in flag change.
123 * HDQ_FLAG_SET: wait until any bit in the flag is set.
124 * HDQ_FLAG_CLEAR: wait until all bits in the flag are cleared.
125 * return 0 on success and -ETIMEDOUT in the case of timeout.
126 */
127static int hdq_wait_for_flag(struct hdq_data *hdq_data, u32 offset,
128 u8 flag, u8 flag_set, u8 *status)
129{
130 int ret = 0;
131 unsigned long timeout = jiffies + OMAP_HDQ_TIMEOUT;
132
133 if (flag_set == OMAP_HDQ_FLAG_CLEAR) {
134 /* wait for the flag clear */
135 while (((*status = hdq_reg_in(hdq_data, offset)) & flag)
136 && time_before(jiffies, timeout)) {
137 schedule_timeout_uninterruptible(1);
138 }
139 if (*status & flag)
140 ret = -ETIMEDOUT;
141 } else if (flag_set == OMAP_HDQ_FLAG_SET) {
142 /* wait for the flag set */
143 while (!((*status = hdq_reg_in(hdq_data, offset)) & flag)
144 && time_before(jiffies, timeout)) {
145 schedule_timeout_uninterruptible(1);
146 }
147 if (!(*status & flag))
148 ret = -ETIMEDOUT;
149 } else
150 return -EINVAL;
151
152 return ret;
153}
154
155/* write out a byte and fill *status with HDQ_INT_STATUS */
156static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status)
157{
158 int ret;
159 u8 tmp_status;
160 unsigned long irqflags;
161
162 *status = 0;
163
164 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
165 /* clear interrupt flags via a dummy read */
166 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
167 /* ISR loads it with new INT_STATUS */
168 hdq_data->hdq_irqstatus = 0;
169 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
170
171 hdq_reg_out(hdq_data, OMAP_HDQ_TX_DATA, val);
172
173 /* set the GO bit */
174 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_GO,
175 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
176 /* wait for the TXCOMPLETE bit */
177 ret = wait_event_timeout(hdq_wait_queue,
178 hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT);
179 if (ret == 0) {
180 dev_dbg(hdq_data->dev, "TX wait elapsed\n");
NeilBrown7b5362a2012-05-22 09:43:02 +1000181 ret = -ETIMEDOUT;
Madhusudhan Chikkature9f2bc792008-11-12 13:27:09 -0800182 goto out;
183 }
184
185 *status = hdq_data->hdq_irqstatus;
186 /* check irqstatus */
187 if (!(*status & OMAP_HDQ_INT_STATUS_TXCOMPLETE)) {
188 dev_dbg(hdq_data->dev, "timeout waiting for"
NeilBrown7b5362a2012-05-22 09:43:02 +1000189 " TXCOMPLETE/RXCOMPLETE, %x", *status);
Madhusudhan Chikkature9f2bc792008-11-12 13:27:09 -0800190 ret = -ETIMEDOUT;
191 goto out;
192 }
193
194 /* wait for the GO bit return to zero */
195 ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
196 OMAP_HDQ_CTRL_STATUS_GO,
197 OMAP_HDQ_FLAG_CLEAR, &tmp_status);
198 if (ret) {
199 dev_dbg(hdq_data->dev, "timeout waiting GO bit"
NeilBrown7b5362a2012-05-22 09:43:02 +1000200 " return to zero, %x", tmp_status);
Madhusudhan Chikkature9f2bc792008-11-12 13:27:09 -0800201 }
202
203out:
204 return ret;
205}
206
207/* HDQ Interrupt service routine */
208static irqreturn_t hdq_isr(int irq, void *_hdq)
209{
210 struct hdq_data *hdq_data = _hdq;
211 unsigned long irqflags;
212
213 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
214 hdq_data->hdq_irqstatus = hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
215 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
216 dev_dbg(hdq_data->dev, "hdq_isr: %x", hdq_data->hdq_irqstatus);
217
218 if (hdq_data->hdq_irqstatus &
219 (OMAP_HDQ_INT_STATUS_TXCOMPLETE | OMAP_HDQ_INT_STATUS_RXCOMPLETE
220 | OMAP_HDQ_INT_STATUS_TIMEOUT)) {
221 /* wake up sleeping process */
222 wake_up(&hdq_wait_queue);
223 }
224
225 return IRQ_HANDLED;
226}
227
228/* HDQ Mode: always return success */
229static u8 omap_w1_reset_bus(void *_hdq)
230{
231 return 0;
232}
233
234/* W1 search callback function */
Stanley.Miao06b0d4d2008-11-19 15:36:50 -0800235static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev,
236 u8 search_type, w1_slave_found_callback slave_found)
Madhusudhan Chikkature9f2bc792008-11-12 13:27:09 -0800237{
238 u64 module_id, rn_le, cs, id;
239
240 if (w1_id)
241 module_id = w1_id;
242 else
243 module_id = 0x1;
244
245 rn_le = cpu_to_le64(module_id);
246 /*
247 * HDQ might not obey truly the 1-wire spec.
248 * So calculate CRC based on module parameter.
249 */
250 cs = w1_calc_crc8((u8 *)&rn_le, 7);
251 id = (cs << 56) | module_id;
252
Stanley.Miao06b0d4d2008-11-19 15:36:50 -0800253 slave_found(master_dev, id);
Madhusudhan Chikkature9f2bc792008-11-12 13:27:09 -0800254}
255
256static int _omap_hdq_reset(struct hdq_data *hdq_data)
257{
258 int ret;
259 u8 tmp_status;
260
261 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG, OMAP_HDQ_SYSCONFIG_SOFTRESET);
262 /*
263 * Select HDQ mode & enable clocks.
264 * It is observed that INT flags can't be cleared via a read and GO/INIT
265 * won't return to zero if interrupt is disabled. So we always enable
266 * interrupt.
267 */
268 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
269 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
270 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
271
272 /* wait for reset to complete */
273 ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_SYSSTATUS,
274 OMAP_HDQ_SYSSTATUS_RESETDONE, OMAP_HDQ_FLAG_SET, &tmp_status);
275 if (ret)
276 dev_dbg(hdq_data->dev, "timeout waiting HDQ reset, %x",
277 tmp_status);
278 else {
279 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
280 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
281 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
282 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
283 OMAP_HDQ_SYSCONFIG_AUTOIDLE);
284 }
285
286 return ret;
287}
288
289/* Issue break pulse to the device */
290static int omap_hdq_break(struct hdq_data *hdq_data)
291{
292 int ret = 0;
293 u8 tmp_status;
294 unsigned long irqflags;
295
296 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
297 if (ret < 0) {
298 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
299 ret = -EINTR;
300 goto rtn;
301 }
302
303 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
304 /* clear interrupt flags via a dummy read */
305 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
306 /* ISR loads it with new INT_STATUS */
307 hdq_data->hdq_irqstatus = 0;
308 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
309
310 /* set the INIT and GO bit */
311 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
312 OMAP_HDQ_CTRL_STATUS_INITIALIZATION | OMAP_HDQ_CTRL_STATUS_GO,
313 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
314 OMAP_HDQ_CTRL_STATUS_GO);
315
316 /* wait for the TIMEOUT bit */
317 ret = wait_event_timeout(hdq_wait_queue,
318 hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT);
319 if (ret == 0) {
320 dev_dbg(hdq_data->dev, "break wait elapsed\n");
321 ret = -EINTR;
322 goto out;
323 }
324
325 tmp_status = hdq_data->hdq_irqstatus;
326 /* check irqstatus */
327 if (!(tmp_status & OMAP_HDQ_INT_STATUS_TIMEOUT)) {
328 dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x",
329 tmp_status);
330 ret = -ETIMEDOUT;
331 goto out;
332 }
333 /*
334 * wait for both INIT and GO bits rerurn to zero.
335 * zero wait time expected for interrupt mode.
336 */
337 ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
338 OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
339 OMAP_HDQ_CTRL_STATUS_GO, OMAP_HDQ_FLAG_CLEAR,
340 &tmp_status);
341 if (ret)
342 dev_dbg(hdq_data->dev, "timeout waiting INIT&GO bits"
NeilBrown7b5362a2012-05-22 09:43:02 +1000343 " return to zero, %x", tmp_status);
Madhusudhan Chikkature9f2bc792008-11-12 13:27:09 -0800344
345out:
346 mutex_unlock(&hdq_data->hdq_mutex);
347rtn:
348 return ret;
349}
350
351static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val)
352{
353 int ret = 0;
354 u8 status;
Madhusudhan Chikkature9f2bc792008-11-12 13:27:09 -0800355
356 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
357 if (ret < 0) {
358 ret = -EINTR;
359 goto rtn;
360 }
361
362 if (!hdq_data->hdq_usecount) {
363 ret = -EINVAL;
364 goto out;
365 }
366
367 if (!(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
368 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
369 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO,
370 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
371 /*
NeilBrownb7e938d2012-05-22 09:43:02 +1000372 * The RX comes immediately after TX.
Madhusudhan Chikkature9f2bc792008-11-12 13:27:09 -0800373 */
NeilBrownb7e938d2012-05-22 09:43:02 +1000374 wait_event_timeout(hdq_wait_queue,
375 (hdq_data->hdq_irqstatus
376 & OMAP_HDQ_INT_STATUS_RXCOMPLETE),
377 OMAP_HDQ_TIMEOUT);
378
Madhusudhan Chikkature9f2bc792008-11-12 13:27:09 -0800379 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, 0,
380 OMAP_HDQ_CTRL_STATUS_DIR);
381 status = hdq_data->hdq_irqstatus;
382 /* check irqstatus */
383 if (!(status & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
384 dev_dbg(hdq_data->dev, "timeout waiting for"
NeilBrown7b5362a2012-05-22 09:43:02 +1000385 " RXCOMPLETE, %x", status);
Madhusudhan Chikkature9f2bc792008-11-12 13:27:09 -0800386 ret = -ETIMEDOUT;
387 goto out;
388 }
389 }
390 /* the data is ready. Read it in! */
391 *val = hdq_reg_in(hdq_data, OMAP_HDQ_RX_DATA);
392out:
393 mutex_unlock(&hdq_data->hdq_mutex);
394rtn:
NeilBrown7b5362a2012-05-22 09:43:02 +1000395 return ret;
Madhusudhan Chikkature9f2bc792008-11-12 13:27:09 -0800396
397}
398
399/* Enable clocks and set the controller to HDQ mode */
400static int omap_hdq_get(struct hdq_data *hdq_data)
401{
402 int ret = 0;
403
404 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
405 if (ret < 0) {
406 ret = -EINTR;
407 goto rtn;
408 }
409
410 if (OMAP_HDQ_MAX_USER == hdq_data->hdq_usecount) {
411 dev_dbg(hdq_data->dev, "attempt to exceed the max use count");
412 ret = -EINVAL;
413 goto out;
414 } else {
415 hdq_data->hdq_usecount++;
416 try_module_get(THIS_MODULE);
417 if (1 == hdq_data->hdq_usecount) {
Paul Walmsleyc354a862012-06-21 21:40:40 -0600418
419 pm_runtime_get_sync(hdq_data->dev);
Madhusudhan Chikkature9f2bc792008-11-12 13:27:09 -0800420
421 /* make sure HDQ is out of reset */
422 if (!(hdq_reg_in(hdq_data, OMAP_HDQ_SYSSTATUS) &
423 OMAP_HDQ_SYSSTATUS_RESETDONE)) {
424 ret = _omap_hdq_reset(hdq_data);
425 if (ret)
426 /* back up the count */
427 hdq_data->hdq_usecount--;
428 } else {
429 /* select HDQ mode & enable clocks */
430 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
431 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
432 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
433 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
434 OMAP_HDQ_SYSCONFIG_AUTOIDLE);
435 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
436 }
437 }
438 }
439
Madhusudhan Chikkature9f2bc792008-11-12 13:27:09 -0800440out:
441 mutex_unlock(&hdq_data->hdq_mutex);
442rtn:
443 return ret;
444}
445
446/* Disable clocks to the module */
447static int omap_hdq_put(struct hdq_data *hdq_data)
448{
449 int ret = 0;
450
451 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
452 if (ret < 0)
453 return -EINTR;
454
455 if (0 == hdq_data->hdq_usecount) {
456 dev_dbg(hdq_data->dev, "attempt to decrement use count"
NeilBrown7b5362a2012-05-22 09:43:02 +1000457 " when it is zero");
Madhusudhan Chikkature9f2bc792008-11-12 13:27:09 -0800458 ret = -EINVAL;
459 } else {
460 hdq_data->hdq_usecount--;
461 module_put(THIS_MODULE);
Paul Walmsleyc354a862012-06-21 21:40:40 -0600462 if (0 == hdq_data->hdq_usecount)
463 pm_runtime_put_sync(hdq_data->dev);
Madhusudhan Chikkature9f2bc792008-11-12 13:27:09 -0800464 }
465 mutex_unlock(&hdq_data->hdq_mutex);
466
467 return ret;
468}
469
470/* Read a byte of data from the device */
471static u8 omap_w1_read_byte(void *_hdq)
472{
473 struct hdq_data *hdq_data = _hdq;
474 u8 val = 0;
475 int ret;
476
477 ret = hdq_read_byte(hdq_data, &val);
478 if (ret) {
479 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
480 if (ret < 0) {
481 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
482 return -EINTR;
483 }
484 hdq_data->init_trans = 0;
485 mutex_unlock(&hdq_data->hdq_mutex);
486 omap_hdq_put(hdq_data);
487 return -1;
488 }
489
490 /* Write followed by a read, release the module */
491 if (hdq_data->init_trans) {
492 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
493 if (ret < 0) {
494 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
495 return -EINTR;
496 }
497 hdq_data->init_trans = 0;
498 mutex_unlock(&hdq_data->hdq_mutex);
499 omap_hdq_put(hdq_data);
500 }
501
502 return val;
503}
504
505/* Write a byte of data to the device */
506static void omap_w1_write_byte(void *_hdq, u8 byte)
507{
508 struct hdq_data *hdq_data = _hdq;
509 int ret;
510 u8 status;
511
512 /* First write to initialize the transfer */
513 if (hdq_data->init_trans == 0)
514 omap_hdq_get(hdq_data);
515
516 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
517 if (ret < 0) {
518 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
519 return;
520 }
521 hdq_data->init_trans++;
522 mutex_unlock(&hdq_data->hdq_mutex);
523
524 ret = hdq_write_byte(hdq_data, byte, &status);
NeilBrown7b5362a2012-05-22 09:43:02 +1000525 if (ret < 0) {
Madhusudhan Chikkature9f2bc792008-11-12 13:27:09 -0800526 dev_dbg(hdq_data->dev, "TX failure:Ctrl status %x\n", status);
527 return;
528 }
529
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300530 /* Second write, data transferred. Release the module */
Madhusudhan Chikkature9f2bc792008-11-12 13:27:09 -0800531 if (hdq_data->init_trans > 1) {
532 omap_hdq_put(hdq_data);
533 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
534 if (ret < 0) {
535 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
536 return;
537 }
538 hdq_data->init_trans = 0;
539 mutex_unlock(&hdq_data->hdq_mutex);
540 }
541
542 return;
543}
544
Uwe Kleine-Königa96b9122010-02-04 20:56:54 +0100545static int __devinit omap_hdq_probe(struct platform_device *pdev)
Madhusudhan Chikkature9f2bc792008-11-12 13:27:09 -0800546{
547 struct hdq_data *hdq_data;
548 struct resource *res;
549 int ret, irq;
550 u8 rev;
551
552 hdq_data = kmalloc(sizeof(*hdq_data), GFP_KERNEL);
553 if (!hdq_data) {
554 dev_dbg(&pdev->dev, "unable to allocate memory\n");
555 ret = -ENOMEM;
556 goto err_kmalloc;
557 }
558
559 hdq_data->dev = &pdev->dev;
560 platform_set_drvdata(pdev, hdq_data);
561
562 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
563 if (!res) {
564 dev_dbg(&pdev->dev, "unable to get resource\n");
565 ret = -ENXIO;
566 goto err_resource;
567 }
568
Felipe Balbibe6ec642012-07-25 15:05:28 +0300569 hdq_data->hdq_base = ioremap(res->start, resource_size(res));
Madhusudhan Chikkature9f2bc792008-11-12 13:27:09 -0800570 if (!hdq_data->hdq_base) {
571 dev_dbg(&pdev->dev, "ioremap failed\n");
572 ret = -EINVAL;
573 goto err_ioremap;
574 }
575
Madhusudhan Chikkature9f2bc792008-11-12 13:27:09 -0800576 hdq_data->hdq_usecount = 0;
577 mutex_init(&hdq_data->hdq_mutex);
578
Paul Walmsleyc354a862012-06-21 21:40:40 -0600579 pm_runtime_enable(&pdev->dev);
580 pm_runtime_get_sync(&pdev->dev);
Madhusudhan Chikkature9f2bc792008-11-12 13:27:09 -0800581
582 rev = hdq_reg_in(hdq_data, OMAP_HDQ_REVISION);
583 dev_info(&pdev->dev, "OMAP HDQ Hardware Rev %c.%c. Driver in %s mode\n",
584 (rev >> 4) + '0', (rev & 0x0f) + '0', "Interrupt");
585
586 spin_lock_init(&hdq_data->hdq_spinlock);
587
588 irq = platform_get_irq(pdev, 0);
589 if (irq < 0) {
590 ret = -ENXIO;
591 goto err_irq;
592 }
593
594 ret = request_irq(irq, hdq_isr, IRQF_DISABLED, "omap_hdq", hdq_data);
595 if (ret < 0) {
596 dev_dbg(&pdev->dev, "could not request irq\n");
597 goto err_irq;
598 }
599
600 omap_hdq_break(hdq_data);
601
Paul Walmsleyc354a862012-06-21 21:40:40 -0600602 pm_runtime_put_sync(&pdev->dev);
Madhusudhan Chikkature9f2bc792008-11-12 13:27:09 -0800603
604 omap_w1_master.data = hdq_data;
605
606 ret = w1_add_master_device(&omap_w1_master);
607 if (ret) {
608 dev_dbg(&pdev->dev, "Failure in registering w1 master\n");
609 goto err_w1;
610 }
611
612 return 0;
613
Madhusudhan Chikkature9f2bc792008-11-12 13:27:09 -0800614err_irq:
Paul Walmsleyc354a862012-06-21 21:40:40 -0600615 pm_runtime_put_sync(&pdev->dev);
616err_w1:
617 pm_runtime_disable(&pdev->dev);
Madhusudhan Chikkature9f2bc792008-11-12 13:27:09 -0800618
Madhusudhan Chikkature9f2bc792008-11-12 13:27:09 -0800619 iounmap(hdq_data->hdq_base);
620
621err_ioremap:
622err_resource:
623 platform_set_drvdata(pdev, NULL);
624 kfree(hdq_data);
625
626err_kmalloc:
627 return ret;
628
629}
630
Felipe Balbi8c3db422012-07-25 15:05:27 +0300631static int __devexit omap_hdq_remove(struct platform_device *pdev)
Madhusudhan Chikkature9f2bc792008-11-12 13:27:09 -0800632{
633 struct hdq_data *hdq_data = platform_get_drvdata(pdev);
634
635 mutex_lock(&hdq_data->hdq_mutex);
636
637 if (hdq_data->hdq_usecount) {
638 dev_dbg(&pdev->dev, "removed when use count is not zero\n");
Stoyan Gaydarov20200022009-08-06 15:07:28 -0700639 mutex_unlock(&hdq_data->hdq_mutex);
Madhusudhan Chikkature9f2bc792008-11-12 13:27:09 -0800640 return -EBUSY;
641 }
642
643 mutex_unlock(&hdq_data->hdq_mutex);
644
645 /* remove module dependency */
Paul Walmsleyc354a862012-06-21 21:40:40 -0600646 pm_runtime_disable(&pdev->dev);
Madhusudhan Chikkature9f2bc792008-11-12 13:27:09 -0800647 free_irq(INT_24XX_HDQ_IRQ, hdq_data);
648 platform_set_drvdata(pdev, NULL);
649 iounmap(hdq_data->hdq_base);
650 kfree(hdq_data);
651
652 return 0;
653}
654
Felipe Balbi8650bbb2012-07-25 15:05:29 +0300655module_platform_driver(omap_hdq_driver);
Madhusudhan Chikkature9f2bc792008-11-12 13:27:09 -0800656
657module_param(w1_id, int, S_IRUSR);
658MODULE_PARM_DESC(w1_id, "1-wire id for the slave detection");
659
660MODULE_AUTHOR("Texas Instruments");
661MODULE_DESCRIPTION("HDQ driver Library");
662MODULE_LICENSE("GPL");