include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[linux-2.6-block.git] / drivers / w1 / masters / omap_hdq.c
CommitLineData
9f2bc79f
MC
1/*
2 * drivers/w1/masters/omap_hdq.c
3 *
4 * Copyright (C) 2007 Texas Instruments, Inc.
5 *
6 * This file is licensed under the terms of the GNU General Public License
7 * version 2. This program is licensed "as is" without any warranty of any
8 * kind, whether express or implied.
9 *
10 */
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/platform_device.h>
14#include <linux/interrupt.h>
5a0e3ad6 15#include <linux/slab.h>
9f2bc79f
MC
16#include <linux/err.h>
17#include <linux/clk.h>
18#include <linux/io.h>
19
20#include <asm/irq.h>
21#include <mach/hardware.h>
22
23#include "../w1.h"
24#include "../w1_int.h"
25
26#define MOD_NAME "OMAP_HDQ:"
27
28#define OMAP_HDQ_REVISION 0x00
29#define OMAP_HDQ_TX_DATA 0x04
30#define OMAP_HDQ_RX_DATA 0x08
31#define OMAP_HDQ_CTRL_STATUS 0x0c
32#define OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK (1<<6)
33#define OMAP_HDQ_CTRL_STATUS_CLOCKENABLE (1<<5)
34#define OMAP_HDQ_CTRL_STATUS_GO (1<<4)
35#define OMAP_HDQ_CTRL_STATUS_INITIALIZATION (1<<2)
36#define OMAP_HDQ_CTRL_STATUS_DIR (1<<1)
37#define OMAP_HDQ_CTRL_STATUS_MODE (1<<0)
38#define OMAP_HDQ_INT_STATUS 0x10
39#define OMAP_HDQ_INT_STATUS_TXCOMPLETE (1<<2)
40#define OMAP_HDQ_INT_STATUS_RXCOMPLETE (1<<1)
41#define OMAP_HDQ_INT_STATUS_TIMEOUT (1<<0)
42#define OMAP_HDQ_SYSCONFIG 0x14
43#define OMAP_HDQ_SYSCONFIG_SOFTRESET (1<<1)
44#define OMAP_HDQ_SYSCONFIG_AUTOIDLE (1<<0)
45#define OMAP_HDQ_SYSSTATUS 0x18
46#define OMAP_HDQ_SYSSTATUS_RESETDONE (1<<0)
47
48#define OMAP_HDQ_FLAG_CLEAR 0
49#define OMAP_HDQ_FLAG_SET 1
50#define OMAP_HDQ_TIMEOUT (HZ/5)
51
52#define OMAP_HDQ_MAX_USER 4
53
54static DECLARE_WAIT_QUEUE_HEAD(hdq_wait_queue);
55static int w1_id;
56
57struct hdq_data {
58 struct device *dev;
59 void __iomem *hdq_base;
60 /* lock status update */
61 struct mutex hdq_mutex;
62 int hdq_usecount;
63 struct clk *hdq_ick;
64 struct clk *hdq_fck;
65 u8 hdq_irqstatus;
66 /* device lock */
67 spinlock_t hdq_spinlock;
68 /*
69 * Used to control the call to omap_hdq_get and omap_hdq_put.
70 * HDQ Protocol: Write the CMD|REG_address first, followed by
71 * the data wrire or read.
72 */
73 int init_trans;
74};
75
a96b9121 76static int __devinit omap_hdq_probe(struct platform_device *pdev);
9f2bc79f
MC
77static int omap_hdq_remove(struct platform_device *pdev);
78
79static struct platform_driver omap_hdq_driver = {
80 .probe = omap_hdq_probe,
81 .remove = omap_hdq_remove,
82 .driver = {
83 .name = "omap_hdq",
84 },
85};
86
87static u8 omap_w1_read_byte(void *_hdq);
88static void omap_w1_write_byte(void *_hdq, u8 byte);
89static u8 omap_w1_reset_bus(void *_hdq);
06b0d4dc
SM
90static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev,
91 u8 search_type, w1_slave_found_callback slave_found);
9f2bc79f
MC
92
93
94static struct w1_bus_master omap_w1_master = {
95 .read_byte = omap_w1_read_byte,
96 .write_byte = omap_w1_write_byte,
97 .reset_bus = omap_w1_reset_bus,
98 .search = omap_w1_search_bus,
99};
100
101/* HDQ register I/O routines */
102static inline u8 hdq_reg_in(struct hdq_data *hdq_data, u32 offset)
103{
104 return __raw_readb(hdq_data->hdq_base + offset);
105}
106
107static inline void hdq_reg_out(struct hdq_data *hdq_data, u32 offset, u8 val)
108{
109 __raw_writeb(val, hdq_data->hdq_base + offset);
110}
111
112static inline u8 hdq_reg_merge(struct hdq_data *hdq_data, u32 offset,
113 u8 val, u8 mask)
114{
115 u8 new_val = (__raw_readb(hdq_data->hdq_base + offset) & ~mask)
116 | (val & mask);
117 __raw_writeb(new_val, hdq_data->hdq_base + offset);
118
119 return new_val;
120}
121
122/*
123 * Wait for one or more bits in flag change.
124 * HDQ_FLAG_SET: wait until any bit in the flag is set.
125 * HDQ_FLAG_CLEAR: wait until all bits in the flag are cleared.
126 * return 0 on success and -ETIMEDOUT in the case of timeout.
127 */
128static int hdq_wait_for_flag(struct hdq_data *hdq_data, u32 offset,
129 u8 flag, u8 flag_set, u8 *status)
130{
131 int ret = 0;
132 unsigned long timeout = jiffies + OMAP_HDQ_TIMEOUT;
133
134 if (flag_set == OMAP_HDQ_FLAG_CLEAR) {
135 /* wait for the flag clear */
136 while (((*status = hdq_reg_in(hdq_data, offset)) & flag)
137 && time_before(jiffies, timeout)) {
138 schedule_timeout_uninterruptible(1);
139 }
140 if (*status & flag)
141 ret = -ETIMEDOUT;
142 } else if (flag_set == OMAP_HDQ_FLAG_SET) {
143 /* wait for the flag set */
144 while (!((*status = hdq_reg_in(hdq_data, offset)) & flag)
145 && time_before(jiffies, timeout)) {
146 schedule_timeout_uninterruptible(1);
147 }
148 if (!(*status & flag))
149 ret = -ETIMEDOUT;
150 } else
151 return -EINVAL;
152
153 return ret;
154}
155
156/* write out a byte and fill *status with HDQ_INT_STATUS */
157static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status)
158{
159 int ret;
160 u8 tmp_status;
161 unsigned long irqflags;
162
163 *status = 0;
164
165 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
166 /* clear interrupt flags via a dummy read */
167 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
168 /* ISR loads it with new INT_STATUS */
169 hdq_data->hdq_irqstatus = 0;
170 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
171
172 hdq_reg_out(hdq_data, OMAP_HDQ_TX_DATA, val);
173
174 /* set the GO bit */
175 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_GO,
176 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
177 /* wait for the TXCOMPLETE bit */
178 ret = wait_event_timeout(hdq_wait_queue,
179 hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT);
180 if (ret == 0) {
181 dev_dbg(hdq_data->dev, "TX wait elapsed\n");
182 goto out;
183 }
184
185 *status = hdq_data->hdq_irqstatus;
186 /* check irqstatus */
187 if (!(*status & OMAP_HDQ_INT_STATUS_TXCOMPLETE)) {
188 dev_dbg(hdq_data->dev, "timeout waiting for"
189 "TXCOMPLETE/RXCOMPLETE, %x", *status);
190 ret = -ETIMEDOUT;
191 goto out;
192 }
193
194 /* wait for the GO bit return to zero */
195 ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
196 OMAP_HDQ_CTRL_STATUS_GO,
197 OMAP_HDQ_FLAG_CLEAR, &tmp_status);
198 if (ret) {
199 dev_dbg(hdq_data->dev, "timeout waiting GO bit"
200 "return to zero, %x", tmp_status);
201 }
202
203out:
204 return ret;
205}
206
207/* HDQ Interrupt service routine */
208static irqreturn_t hdq_isr(int irq, void *_hdq)
209{
210 struct hdq_data *hdq_data = _hdq;
211 unsigned long irqflags;
212
213 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
214 hdq_data->hdq_irqstatus = hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
215 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
216 dev_dbg(hdq_data->dev, "hdq_isr: %x", hdq_data->hdq_irqstatus);
217
218 if (hdq_data->hdq_irqstatus &
219 (OMAP_HDQ_INT_STATUS_TXCOMPLETE | OMAP_HDQ_INT_STATUS_RXCOMPLETE
220 | OMAP_HDQ_INT_STATUS_TIMEOUT)) {
221 /* wake up sleeping process */
222 wake_up(&hdq_wait_queue);
223 }
224
225 return IRQ_HANDLED;
226}
227
228/* HDQ Mode: always return success */
229static u8 omap_w1_reset_bus(void *_hdq)
230{
231 return 0;
232}
233
234/* W1 search callback function */
06b0d4dc
SM
235static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev,
236 u8 search_type, w1_slave_found_callback slave_found)
9f2bc79f
MC
237{
238 u64 module_id, rn_le, cs, id;
239
240 if (w1_id)
241 module_id = w1_id;
242 else
243 module_id = 0x1;
244
245 rn_le = cpu_to_le64(module_id);
246 /*
247 * HDQ might not obey truly the 1-wire spec.
248 * So calculate CRC based on module parameter.
249 */
250 cs = w1_calc_crc8((u8 *)&rn_le, 7);
251 id = (cs << 56) | module_id;
252
06b0d4dc 253 slave_found(master_dev, id);
9f2bc79f
MC
254}
255
256static int _omap_hdq_reset(struct hdq_data *hdq_data)
257{
258 int ret;
259 u8 tmp_status;
260
261 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG, OMAP_HDQ_SYSCONFIG_SOFTRESET);
262 /*
263 * Select HDQ mode & enable clocks.
264 * It is observed that INT flags can't be cleared via a read and GO/INIT
265 * won't return to zero if interrupt is disabled. So we always enable
266 * interrupt.
267 */
268 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
269 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
270 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
271
272 /* wait for reset to complete */
273 ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_SYSSTATUS,
274 OMAP_HDQ_SYSSTATUS_RESETDONE, OMAP_HDQ_FLAG_SET, &tmp_status);
275 if (ret)
276 dev_dbg(hdq_data->dev, "timeout waiting HDQ reset, %x",
277 tmp_status);
278 else {
279 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
280 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
281 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
282 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
283 OMAP_HDQ_SYSCONFIG_AUTOIDLE);
284 }
285
286 return ret;
287}
288
289/* Issue break pulse to the device */
290static int omap_hdq_break(struct hdq_data *hdq_data)
291{
292 int ret = 0;
293 u8 tmp_status;
294 unsigned long irqflags;
295
296 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
297 if (ret < 0) {
298 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
299 ret = -EINTR;
300 goto rtn;
301 }
302
303 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
304 /* clear interrupt flags via a dummy read */
305 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
306 /* ISR loads it with new INT_STATUS */
307 hdq_data->hdq_irqstatus = 0;
308 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
309
310 /* set the INIT and GO bit */
311 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
312 OMAP_HDQ_CTRL_STATUS_INITIALIZATION | OMAP_HDQ_CTRL_STATUS_GO,
313 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
314 OMAP_HDQ_CTRL_STATUS_GO);
315
316 /* wait for the TIMEOUT bit */
317 ret = wait_event_timeout(hdq_wait_queue,
318 hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT);
319 if (ret == 0) {
320 dev_dbg(hdq_data->dev, "break wait elapsed\n");
321 ret = -EINTR;
322 goto out;
323 }
324
325 tmp_status = hdq_data->hdq_irqstatus;
326 /* check irqstatus */
327 if (!(tmp_status & OMAP_HDQ_INT_STATUS_TIMEOUT)) {
328 dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x",
329 tmp_status);
330 ret = -ETIMEDOUT;
331 goto out;
332 }
333 /*
334 * wait for both INIT and GO bits rerurn to zero.
335 * zero wait time expected for interrupt mode.
336 */
337 ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
338 OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
339 OMAP_HDQ_CTRL_STATUS_GO, OMAP_HDQ_FLAG_CLEAR,
340 &tmp_status);
341 if (ret)
342 dev_dbg(hdq_data->dev, "timeout waiting INIT&GO bits"
343 "return to zero, %x", tmp_status);
344
345out:
346 mutex_unlock(&hdq_data->hdq_mutex);
347rtn:
348 return ret;
349}
350
351static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val)
352{
353 int ret = 0;
354 u8 status;
355 unsigned long timeout = jiffies + OMAP_HDQ_TIMEOUT;
356
357 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
358 if (ret < 0) {
359 ret = -EINTR;
360 goto rtn;
361 }
362
363 if (!hdq_data->hdq_usecount) {
364 ret = -EINVAL;
365 goto out;
366 }
367
368 if (!(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
369 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
370 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO,
371 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
372 /*
373 * The RX comes immediately after TX. It
374 * triggers another interrupt before we
375 * sleep. So we have to wait for RXCOMPLETE bit.
376 */
377 while (!(hdq_data->hdq_irqstatus
378 & OMAP_HDQ_INT_STATUS_RXCOMPLETE)
379 && time_before(jiffies, timeout)) {
380 schedule_timeout_uninterruptible(1);
381 }
382 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, 0,
383 OMAP_HDQ_CTRL_STATUS_DIR);
384 status = hdq_data->hdq_irqstatus;
385 /* check irqstatus */
386 if (!(status & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
387 dev_dbg(hdq_data->dev, "timeout waiting for"
388 "RXCOMPLETE, %x", status);
389 ret = -ETIMEDOUT;
390 goto out;
391 }
392 }
393 /* the data is ready. Read it in! */
394 *val = hdq_reg_in(hdq_data, OMAP_HDQ_RX_DATA);
395out:
396 mutex_unlock(&hdq_data->hdq_mutex);
397rtn:
398 return 0;
399
400}
401
402/* Enable clocks and set the controller to HDQ mode */
403static int omap_hdq_get(struct hdq_data *hdq_data)
404{
405 int ret = 0;
406
407 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
408 if (ret < 0) {
409 ret = -EINTR;
410 goto rtn;
411 }
412
413 if (OMAP_HDQ_MAX_USER == hdq_data->hdq_usecount) {
414 dev_dbg(hdq_data->dev, "attempt to exceed the max use count");
415 ret = -EINVAL;
416 goto out;
417 } else {
418 hdq_data->hdq_usecount++;
419 try_module_get(THIS_MODULE);
420 if (1 == hdq_data->hdq_usecount) {
421 if (clk_enable(hdq_data->hdq_ick)) {
422 dev_dbg(hdq_data->dev, "Can not enable ick\n");
423 ret = -ENODEV;
424 goto clk_err;
425 }
426 if (clk_enable(hdq_data->hdq_fck)) {
427 dev_dbg(hdq_data->dev, "Can not enable fck\n");
428 clk_disable(hdq_data->hdq_ick);
429 ret = -ENODEV;
430 goto clk_err;
431 }
432
433 /* make sure HDQ is out of reset */
434 if (!(hdq_reg_in(hdq_data, OMAP_HDQ_SYSSTATUS) &
435 OMAP_HDQ_SYSSTATUS_RESETDONE)) {
436 ret = _omap_hdq_reset(hdq_data);
437 if (ret)
438 /* back up the count */
439 hdq_data->hdq_usecount--;
440 } else {
441 /* select HDQ mode & enable clocks */
442 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
443 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
444 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
445 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
446 OMAP_HDQ_SYSCONFIG_AUTOIDLE);
447 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
448 }
449 }
450 }
451
452clk_err:
453 clk_put(hdq_data->hdq_ick);
454 clk_put(hdq_data->hdq_fck);
455out:
456 mutex_unlock(&hdq_data->hdq_mutex);
457rtn:
458 return ret;
459}
460
461/* Disable clocks to the module */
462static int omap_hdq_put(struct hdq_data *hdq_data)
463{
464 int ret = 0;
465
466 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
467 if (ret < 0)
468 return -EINTR;
469
470 if (0 == hdq_data->hdq_usecount) {
471 dev_dbg(hdq_data->dev, "attempt to decrement use count"
472 "when it is zero");
473 ret = -EINVAL;
474 } else {
475 hdq_data->hdq_usecount--;
476 module_put(THIS_MODULE);
477 if (0 == hdq_data->hdq_usecount) {
478 clk_disable(hdq_data->hdq_ick);
479 clk_disable(hdq_data->hdq_fck);
480 }
481 }
482 mutex_unlock(&hdq_data->hdq_mutex);
483
484 return ret;
485}
486
487/* Read a byte of data from the device */
488static u8 omap_w1_read_byte(void *_hdq)
489{
490 struct hdq_data *hdq_data = _hdq;
491 u8 val = 0;
492 int ret;
493
494 ret = hdq_read_byte(hdq_data, &val);
495 if (ret) {
496 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
497 if (ret < 0) {
498 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
499 return -EINTR;
500 }
501 hdq_data->init_trans = 0;
502 mutex_unlock(&hdq_data->hdq_mutex);
503 omap_hdq_put(hdq_data);
504 return -1;
505 }
506
507 /* Write followed by a read, release the module */
508 if (hdq_data->init_trans) {
509 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
510 if (ret < 0) {
511 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
512 return -EINTR;
513 }
514 hdq_data->init_trans = 0;
515 mutex_unlock(&hdq_data->hdq_mutex);
516 omap_hdq_put(hdq_data);
517 }
518
519 return val;
520}
521
522/* Write a byte of data to the device */
523static void omap_w1_write_byte(void *_hdq, u8 byte)
524{
525 struct hdq_data *hdq_data = _hdq;
526 int ret;
527 u8 status;
528
529 /* First write to initialize the transfer */
530 if (hdq_data->init_trans == 0)
531 omap_hdq_get(hdq_data);
532
533 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
534 if (ret < 0) {
535 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
536 return;
537 }
538 hdq_data->init_trans++;
539 mutex_unlock(&hdq_data->hdq_mutex);
540
541 ret = hdq_write_byte(hdq_data, byte, &status);
542 if (ret == 0) {
543 dev_dbg(hdq_data->dev, "TX failure:Ctrl status %x\n", status);
544 return;
545 }
546
547 /* Second write, data transfered. Release the module */
548 if (hdq_data->init_trans > 1) {
549 omap_hdq_put(hdq_data);
550 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
551 if (ret < 0) {
552 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
553 return;
554 }
555 hdq_data->init_trans = 0;
556 mutex_unlock(&hdq_data->hdq_mutex);
557 }
558
559 return;
560}
561
a96b9121 562static int __devinit omap_hdq_probe(struct platform_device *pdev)
9f2bc79f
MC
563{
564 struct hdq_data *hdq_data;
565 struct resource *res;
566 int ret, irq;
567 u8 rev;
568
569 hdq_data = kmalloc(sizeof(*hdq_data), GFP_KERNEL);
570 if (!hdq_data) {
571 dev_dbg(&pdev->dev, "unable to allocate memory\n");
572 ret = -ENOMEM;
573 goto err_kmalloc;
574 }
575
576 hdq_data->dev = &pdev->dev;
577 platform_set_drvdata(pdev, hdq_data);
578
579 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
580 if (!res) {
581 dev_dbg(&pdev->dev, "unable to get resource\n");
582 ret = -ENXIO;
583 goto err_resource;
584 }
585
586 hdq_data->hdq_base = ioremap(res->start, SZ_4K);
587 if (!hdq_data->hdq_base) {
588 dev_dbg(&pdev->dev, "ioremap failed\n");
589 ret = -EINVAL;
590 goto err_ioremap;
591 }
592
593 /* get interface & functional clock objects */
cc51c9d4
RK
594 hdq_data->hdq_ick = clk_get(&pdev->dev, "ick");
595 hdq_data->hdq_fck = clk_get(&pdev->dev, "fck");
9f2bc79f
MC
596
597 if (IS_ERR(hdq_data->hdq_ick) || IS_ERR(hdq_data->hdq_fck)) {
598 dev_dbg(&pdev->dev, "Can't get HDQ clock objects\n");
599 if (IS_ERR(hdq_data->hdq_ick)) {
600 ret = PTR_ERR(hdq_data->hdq_ick);
601 goto err_clk;
602 }
603 if (IS_ERR(hdq_data->hdq_fck)) {
604 ret = PTR_ERR(hdq_data->hdq_fck);
605 clk_put(hdq_data->hdq_ick);
606 goto err_clk;
607 }
608 }
609
610 hdq_data->hdq_usecount = 0;
611 mutex_init(&hdq_data->hdq_mutex);
612
613 if (clk_enable(hdq_data->hdq_ick)) {
614 dev_dbg(&pdev->dev, "Can not enable ick\n");
615 ret = -ENODEV;
616 goto err_intfclk;
617 }
618
619 if (clk_enable(hdq_data->hdq_fck)) {
620 dev_dbg(&pdev->dev, "Can not enable fck\n");
621 ret = -ENODEV;
622 goto err_fnclk;
623 }
624
625 rev = hdq_reg_in(hdq_data, OMAP_HDQ_REVISION);
626 dev_info(&pdev->dev, "OMAP HDQ Hardware Rev %c.%c. Driver in %s mode\n",
627 (rev >> 4) + '0', (rev & 0x0f) + '0', "Interrupt");
628
629 spin_lock_init(&hdq_data->hdq_spinlock);
630
631 irq = platform_get_irq(pdev, 0);
632 if (irq < 0) {
633 ret = -ENXIO;
634 goto err_irq;
635 }
636
637 ret = request_irq(irq, hdq_isr, IRQF_DISABLED, "omap_hdq", hdq_data);
638 if (ret < 0) {
639 dev_dbg(&pdev->dev, "could not request irq\n");
640 goto err_irq;
641 }
642
643 omap_hdq_break(hdq_data);
644
645 /* don't clock the HDQ until it is needed */
646 clk_disable(hdq_data->hdq_ick);
647 clk_disable(hdq_data->hdq_fck);
648
649 omap_w1_master.data = hdq_data;
650
651 ret = w1_add_master_device(&omap_w1_master);
652 if (ret) {
653 dev_dbg(&pdev->dev, "Failure in registering w1 master\n");
654 goto err_w1;
655 }
656
657 return 0;
658
659err_w1:
660err_irq:
661 clk_disable(hdq_data->hdq_fck);
662
663err_fnclk:
664 clk_disable(hdq_data->hdq_ick);
665
666err_intfclk:
667 clk_put(hdq_data->hdq_ick);
668 clk_put(hdq_data->hdq_fck);
669
670err_clk:
671 iounmap(hdq_data->hdq_base);
672
673err_ioremap:
674err_resource:
675 platform_set_drvdata(pdev, NULL);
676 kfree(hdq_data);
677
678err_kmalloc:
679 return ret;
680
681}
682
683static int omap_hdq_remove(struct platform_device *pdev)
684{
685 struct hdq_data *hdq_data = platform_get_drvdata(pdev);
686
687 mutex_lock(&hdq_data->hdq_mutex);
688
689 if (hdq_data->hdq_usecount) {
690 dev_dbg(&pdev->dev, "removed when use count is not zero\n");
2020002a 691 mutex_unlock(&hdq_data->hdq_mutex);
9f2bc79f
MC
692 return -EBUSY;
693 }
694
695 mutex_unlock(&hdq_data->hdq_mutex);
696
697 /* remove module dependency */
698 clk_put(hdq_data->hdq_ick);
699 clk_put(hdq_data->hdq_fck);
700 free_irq(INT_24XX_HDQ_IRQ, hdq_data);
701 platform_set_drvdata(pdev, NULL);
702 iounmap(hdq_data->hdq_base);
703 kfree(hdq_data);
704
705 return 0;
706}
707
708static int __init
709omap_hdq_init(void)
710{
711 return platform_driver_register(&omap_hdq_driver);
712}
713module_init(omap_hdq_init);
714
715static void __exit
716omap_hdq_exit(void)
717{
718 platform_driver_unregister(&omap_hdq_driver);
719}
720module_exit(omap_hdq_exit);
721
722module_param(w1_id, int, S_IRUSR);
723MODULE_PARM_DESC(w1_id, "1-wire id for the slave detection");
724
725MODULE_AUTHOR("Texas Instruments");
726MODULE_DESCRIPTION("HDQ driver Library");
727MODULE_LICENSE("GPL");