Commit | Line | Data |
---|---|---|
5f97f7f9 HS |
1 | /* |
2 | * SMP- and interrupt-safe semaphores. | |
3 | * | |
4 | * Copyright (C) 2006 Atmel Corporation | |
5 | * | |
6 | * Based on include/asm-i386/semaphore.h | |
7 | * Copyright (C) 1996 Linus Torvalds | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License version 2 as | |
11 | * published by the Free Software Foundation. | |
12 | */ | |
13 | #ifndef __ASM_AVR32_SEMAPHORE_H | |
14 | #define __ASM_AVR32_SEMAPHORE_H | |
15 | ||
16 | #include <linux/linkage.h> | |
17 | ||
18 | #include <asm/system.h> | |
19 | #include <asm/atomic.h> | |
20 | #include <linux/wait.h> | |
21 | #include <linux/rwsem.h> | |
22 | ||
23 | struct semaphore { | |
24 | atomic_t count; | |
25 | int sleepers; | |
26 | wait_queue_head_t wait; | |
27 | }; | |
28 | ||
29 | #define __SEMAPHORE_INITIALIZER(name, n) \ | |
30 | { \ | |
31 | .count = ATOMIC_INIT(n), \ | |
32 | .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ | |
33 | } | |
34 | ||
35 | #define __DECLARE_SEMAPHORE_GENERIC(name,count) \ | |
36 | struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) | |
37 | ||
38 | #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) | |
39 | #define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0) | |
40 | ||
41 | static inline void sema_init (struct semaphore *sem, int val) | |
42 | { | |
43 | atomic_set(&sem->count, val); | |
44 | sem->sleepers = 0; | |
45 | init_waitqueue_head(&sem->wait); | |
46 | } | |
47 | ||
48 | static inline void init_MUTEX (struct semaphore *sem) | |
49 | { | |
50 | sema_init(sem, 1); | |
51 | } | |
52 | ||
53 | static inline void init_MUTEX_LOCKED (struct semaphore *sem) | |
54 | { | |
55 | sema_init(sem, 0); | |
56 | } | |
57 | ||
58 | void __down(struct semaphore * sem); | |
59 | int __down_interruptible(struct semaphore * sem); | |
60 | void __up(struct semaphore * sem); | |
61 | ||
62 | /* | |
63 | * This is ugly, but we want the default case to fall through. | |
64 | * "__down_failed" is a special asm handler that calls the C | |
65 | * routine that actually waits. See arch/i386/kernel/semaphore.c | |
66 | */ | |
67 | static inline void down(struct semaphore * sem) | |
68 | { | |
69 | might_sleep(); | |
70 | if (unlikely(atomic_dec_return (&sem->count) < 0)) | |
71 | __down (sem); | |
72 | } | |
73 | ||
74 | /* | |
75 | * Interruptible try to acquire a semaphore. If we obtained | |
76 | * it, return zero. If we were interrupted, returns -EINTR | |
77 | */ | |
78 | static inline int down_interruptible(struct semaphore * sem) | |
79 | { | |
80 | int ret = 0; | |
81 | ||
82 | might_sleep(); | |
83 | if (unlikely(atomic_dec_return (&sem->count) < 0)) | |
84 | ret = __down_interruptible (sem); | |
85 | return ret; | |
86 | } | |
87 | ||
88 | /* | |
89 | * Non-blockingly attempt to down() a semaphore. | |
90 | * Returns zero if we acquired it | |
91 | */ | |
92 | static inline int down_trylock(struct semaphore * sem) | |
93 | { | |
94 | return atomic_dec_if_positive(&sem->count) < 0; | |
95 | } | |
96 | ||
97 | /* | |
98 | * Note! This is subtle. We jump to wake people up only if | |
99 | * the semaphore was negative (== somebody was waiting on it). | |
100 | * The default case (no contention) will result in NO | |
101 | * jumps for both down() and up(). | |
102 | */ | |
103 | static inline void up(struct semaphore * sem) | |
104 | { | |
105 | if (unlikely(atomic_inc_return (&sem->count) <= 0)) | |
106 | __up (sem); | |
107 | } | |
108 | ||
109 | #endif /*__ASM_AVR32_SEMAPHORE_H */ |