Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs...
[linux-block.git] / include / linux / res_counter.h
CommitLineData
e552b661
PE
1#ifndef __RES_COUNTER_H__
2#define __RES_COUNTER_H__
3
4/*
5 * Resource Counters
6 * Contain common data types and routines for resource accounting
7 *
8 * Copyright 2007 OpenVZ SWsoft Inc
9 *
10 * Author: Pavel Emelianov <xemul@openvz.org>
11 *
45ce80fb 12 * See Documentation/cgroups/resource_counter.txt for more
faebe9fd 13 * info about what this counter is.
e552b661
PE
14 */
15
16#include <linux/cgroup.h>
17
18/*
19 * The core object. the cgroup that wishes to account for some
20 * resource may include this counter into its structures and use
21 * the helpers described beyond
22 */
23
24struct res_counter {
25 /*
26 * the current resource consumption level
27 */
0eea1030 28 unsigned long long usage;
c84872e1
PE
29 /*
30 * the maximal value of the usage from the counter creation
31 */
32 unsigned long long max_usage;
e552b661
PE
33 /*
34 * the limit that usage cannot exceed
35 */
0eea1030 36 unsigned long long limit;
296c81d8
BS
37 /*
38 * the limit that usage can be exceed
39 */
40 unsigned long long soft_limit;
e552b661
PE
41 /*
42 * the number of unsuccessful attempts to consume the resource
43 */
0eea1030 44 unsigned long long failcnt;
e552b661
PE
45 /*
46 * the lock to protect all of the above.
47 * the routines below consider this to be IRQ-safe
48 */
49 spinlock_t lock;
28dbc4b6
BS
50 /*
51 * Parent counter, used for hierarchial resource accounting
52 */
53 struct res_counter *parent;
e552b661
PE
54};
55
c5b947b2
DN
56#define RESOURCE_MAX (unsigned long long)LLONG_MAX
57
2c7eabf3 58/**
e552b661 59 * Helpers to interact with userspace
2c7eabf3 60 * res_counter_read_u64() - returns the value of the specified member.
e552b661
PE
61 * res_counter_read/_write - put/get the specified fields from the
62 * res_counter struct to/from the user
63 *
64 * @counter: the counter in question
65 * @member: the field to work with (see RES_xxx below)
66 * @buf: the buffer to opeate on,...
67 * @nbytes: its size...
68 * @pos: and the offset.
69 */
70
2c7eabf3
PM
71u64 res_counter_read_u64(struct res_counter *counter, int member);
72
e552b661 73ssize_t res_counter_read(struct res_counter *counter, int member,
0eea1030
BS
74 const char __user *buf, size_t nbytes, loff_t *pos,
75 int (*read_strategy)(unsigned long long val, char *s));
856c13aa
PM
76
77typedef int (*write_strategy_fn)(const char *buf, unsigned long long *val);
78
79int res_counter_memparse_write_strategy(const char *buf,
80 unsigned long long *res);
81
82int res_counter_write(struct res_counter *counter, int member,
83 const char *buffer, write_strategy_fn write_strategy);
e552b661
PE
84
85/*
86 * the field descriptors. one for each member of res_counter
87 */
88
89enum {
90 RES_USAGE,
c84872e1 91 RES_MAX_USAGE,
e552b661
PE
92 RES_LIMIT,
93 RES_FAILCNT,
296c81d8 94 RES_SOFT_LIMIT,
e552b661
PE
95};
96
97/*
98 * helpers for accounting
99 */
100
28dbc4b6 101void res_counter_init(struct res_counter *counter, struct res_counter *parent);
e552b661
PE
102
103/*
104 * charge - try to consume more resource.
105 *
106 * @counter: the counter
107 * @val: the amount of the resource. each controller defines its own
108 * units, e.g. numbers, bytes, Kbytes, etc
109 *
110 * returns 0 on success and <0 if the counter->usage will exceed the
111 * counter->limit _locked call expects the counter->lock to be taken
112 */
113
f2992db2
PE
114int __must_check res_counter_charge_locked(struct res_counter *counter,
115 unsigned long val);
116int __must_check res_counter_charge(struct res_counter *counter,
f64c3f54
BS
117 unsigned long val, struct res_counter **limit_fail_at,
118 struct res_counter **soft_limit_at);
e552b661
PE
119
120/*
121 * uncharge - tell that some portion of the resource is released
122 *
123 * @counter: the counter
124 * @val: the amount of the resource
125 *
126 * these calls check for usage underflow and show a warning on the console
127 * _locked call expects the counter->lock to be taken
128 */
129
130void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val);
f64c3f54
BS
131void res_counter_uncharge(struct res_counter *counter, unsigned long val,
132 bool *was_soft_limit_excess);
e552b661 133
66e1707b
BS
134static inline bool res_counter_limit_check_locked(struct res_counter *cnt)
135{
136 if (cnt->usage < cnt->limit)
137 return true;
138
139 return false;
140}
141
296c81d8
BS
142static inline bool res_counter_soft_limit_check_locked(struct res_counter *cnt)
143{
144 if (cnt->usage < cnt->soft_limit)
145 return true;
146
147 return false;
148}
149
150/**
151 * Get the difference between the usage and the soft limit
152 * @cnt: The counter
153 *
154 * Returns 0 if usage is less than or equal to soft limit
155 * The difference between usage and soft limit, otherwise.
156 */
157static inline unsigned long long
158res_counter_soft_limit_excess(struct res_counter *cnt)
159{
160 unsigned long long excess;
161 unsigned long flags;
162
163 spin_lock_irqsave(&cnt->lock, flags);
164 if (cnt->usage <= cnt->soft_limit)
165 excess = 0;
166 else
167 excess = cnt->usage - cnt->soft_limit;
168 spin_unlock_irqrestore(&cnt->lock, flags);
169 return excess;
170}
171
66e1707b
BS
172/*
173 * Helper function to detect if the cgroup is within it's limit or
174 * not. It's currently called from cgroup_rss_prepare()
175 */
176static inline bool res_counter_check_under_limit(struct res_counter *cnt)
177{
178 bool ret;
179 unsigned long flags;
180
181 spin_lock_irqsave(&cnt->lock, flags);
182 ret = res_counter_limit_check_locked(cnt);
183 spin_unlock_irqrestore(&cnt->lock, flags);
184 return ret;
185}
186
296c81d8
BS
187static inline bool res_counter_check_under_soft_limit(struct res_counter *cnt)
188{
189 bool ret;
190 unsigned long flags;
191
192 spin_lock_irqsave(&cnt->lock, flags);
193 ret = res_counter_soft_limit_check_locked(cnt);
194 spin_unlock_irqrestore(&cnt->lock, flags);
195 return ret;
196}
197
c84872e1
PE
198static inline void res_counter_reset_max(struct res_counter *cnt)
199{
200 unsigned long flags;
201
202 spin_lock_irqsave(&cnt->lock, flags);
203 cnt->max_usage = cnt->usage;
204 spin_unlock_irqrestore(&cnt->lock, flags);
205}
206
29f2a4da
PE
207static inline void res_counter_reset_failcnt(struct res_counter *cnt)
208{
209 unsigned long flags;
210
211 spin_lock_irqsave(&cnt->lock, flags);
212 cnt->failcnt = 0;
213 spin_unlock_irqrestore(&cnt->lock, flags);
214}
12b98044
KH
215
216static inline int res_counter_set_limit(struct res_counter *cnt,
217 unsigned long long limit)
218{
219 unsigned long flags;
220 int ret = -EBUSY;
221
222 spin_lock_irqsave(&cnt->lock, flags);
11d55d2c 223 if (cnt->usage <= limit) {
12b98044
KH
224 cnt->limit = limit;
225 ret = 0;
226 }
227 spin_unlock_irqrestore(&cnt->lock, flags);
228 return ret;
229}
230
296c81d8
BS
231static inline int
232res_counter_set_soft_limit(struct res_counter *cnt,
233 unsigned long long soft_limit)
234{
235 unsigned long flags;
236
237 spin_lock_irqsave(&cnt->lock, flags);
238 cnt->soft_limit = soft_limit;
239 spin_unlock_irqrestore(&cnt->lock, flags);
240 return 0;
241}
242
e552b661 243#endif