| 1 | /* |
| 2 | * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License as published by |
| 6 | * the Free Software Foundation; either version 2 of the License, or |
| 7 | * (at your option) any later version. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; if not, you can access it online at |
| 16 | * http://www.gnu.org/licenses/gpl-2.0.html. |
| 17 | * |
| 18 | * Copyright IBM Corporation, 2008 |
| 19 | * |
| 20 | * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> |
| 21 | * |
| 22 | * For detailed explanation of Read-Copy Update mechanism see - |
| 23 | * Documentation/RCU |
| 24 | */ |
| 25 | #ifndef __LINUX_TINY_H |
| 26 | #define __LINUX_TINY_H |
| 27 | |
| 28 | #include <linux/ktime.h> |
| 29 | |
| 30 | struct rcu_dynticks; |
| 31 | static inline int rcu_dynticks_snap(struct rcu_dynticks *rdtp) |
| 32 | { |
| 33 | return 0; |
| 34 | } |
| 35 | |
| 36 | /* Never flag non-existent other CPUs! */ |
| 37 | static inline bool rcu_eqs_special_set(int cpu) { return false; } |
| 38 | |
| 39 | static inline unsigned long get_state_synchronize_rcu(void) |
| 40 | { |
| 41 | return 0; |
| 42 | } |
| 43 | |
| 44 | static inline void cond_synchronize_rcu(unsigned long oldstate) |
| 45 | { |
| 46 | might_sleep(); |
| 47 | } |
| 48 | |
| 49 | static inline unsigned long get_state_synchronize_sched(void) |
| 50 | { |
| 51 | return 0; |
| 52 | } |
| 53 | |
| 54 | static inline void cond_synchronize_sched(unsigned long oldstate) |
| 55 | { |
| 56 | might_sleep(); |
| 57 | } |
| 58 | |
| 59 | extern void rcu_barrier_bh(void); |
| 60 | extern void rcu_barrier_sched(void); |
| 61 | |
| 62 | static inline void synchronize_rcu_expedited(void) |
| 63 | { |
| 64 | synchronize_sched(); /* Only one CPU, so pretty fast anyway!!! */ |
| 65 | } |
| 66 | |
| 67 | static inline void rcu_barrier(void) |
| 68 | { |
| 69 | rcu_barrier_sched(); /* Only one CPU, so only one list of callbacks! */ |
| 70 | } |
| 71 | |
| 72 | static inline void synchronize_rcu_bh(void) |
| 73 | { |
| 74 | synchronize_sched(); |
| 75 | } |
| 76 | |
| 77 | static inline void synchronize_rcu_bh_expedited(void) |
| 78 | { |
| 79 | synchronize_sched(); |
| 80 | } |
| 81 | |
| 82 | static inline void synchronize_sched_expedited(void) |
| 83 | { |
| 84 | synchronize_sched(); |
| 85 | } |
| 86 | |
| 87 | static inline void kfree_call_rcu(struct rcu_head *head, |
| 88 | rcu_callback_t func) |
| 89 | { |
| 90 | call_rcu(head, func); |
| 91 | } |
| 92 | |
| 93 | #define rcu_note_context_switch(preempt) \ |
| 94 | do { \ |
| 95 | rcu_sched_qs(); \ |
| 96 | rcu_tasks_qs(current); \ |
| 97 | } while (0) |
| 98 | |
| 99 | static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt) |
| 100 | { |
| 101 | *nextevt = KTIME_MAX; |
| 102 | return 0; |
| 103 | } |
| 104 | |
| 105 | /* |
| 106 | * Take advantage of the fact that there is only one CPU, which |
| 107 | * allows us to ignore virtualization-based context switches. |
| 108 | */ |
| 109 | static inline void rcu_virt_note_context_switch(int cpu) { } |
| 110 | static inline void rcu_cpu_stall_reset(void) { } |
| 111 | static inline void rcu_idle_enter(void) { } |
| 112 | static inline void rcu_idle_exit(void) { } |
| 113 | static inline void rcu_irq_enter(void) { } |
| 114 | static inline void rcu_irq_exit_irqson(void) { } |
| 115 | static inline void rcu_irq_enter_irqson(void) { } |
| 116 | static inline void rcu_irq_exit(void) { } |
| 117 | static inline void exit_rcu(void) { } |
| 118 | #ifdef CONFIG_SRCU |
| 119 | void rcu_scheduler_starting(void); |
| 120 | #else /* #ifndef CONFIG_SRCU */ |
| 121 | static inline void rcu_scheduler_starting(void) { } |
| 122 | #endif /* #else #ifndef CONFIG_SRCU */ |
| 123 | static inline void rcu_end_inkernel_boot(void) { } |
| 124 | static inline bool rcu_is_watching(void) { return true; } |
| 125 | |
| 126 | /* Avoid RCU read-side critical sections leaking across. */ |
| 127 | static inline void rcu_all_qs(void) { barrier(); } |
| 128 | |
| 129 | /* RCUtree hotplug events */ |
| 130 | #define rcutree_prepare_cpu NULL |
| 131 | #define rcutree_online_cpu NULL |
| 132 | #define rcutree_offline_cpu NULL |
| 133 | #define rcutree_dead_cpu NULL |
| 134 | #define rcutree_dying_cpu NULL |
| 135 | static inline void rcu_cpu_starting(unsigned int cpu) { } |
| 136 | |
| 137 | #endif /* __LINUX_RCUTINY_H */ |