Commit | Line | Data |
---|---|---|
6e0534f2 | 1 | /* |
391e43da | 2 | * kernel/sched/cpupri.c |
6e0534f2 GH |
3 | * |
4 | * CPU priority management | |
5 | * | |
6 | * Copyright (C) 2007-2008 Novell | |
7 | * | |
8 | * Author: Gregory Haskins <ghaskins@novell.com> | |
9 | * | |
10 | * This code tracks the priority of each CPU so that global migration | |
11 | * decisions are easy to calculate. Each CPU can be in a state as follows: | |
12 | * | |
13 | * (INVALID), IDLE, NORMAL, RT1, ... RT99 | |
14 | * | |
15 | * going from the lowest priority to the highest. CPUs in the INVALID state | |
16 | * are not eligible for routing. The system maintains this state with | |
97fb7a0a | 17 | * a 2 dimensional bitmap (the first for priority class, the second for CPUs |
6e0534f2 GH |
18 | * in that class). Therefore a typical application without affinity |
19 | * restrictions can find a suitable CPU with O(1) complexity (e.g. two bit | |
20 | * searches). For tasks with affinity restrictions, the algorithm has a | |
21 | * worst case complexity of O(min(102, nr_domcpus)), though the scenario that | |
22 | * yields the worst case search is fairly contrived. | |
23 | * | |
24 | * This program is free software; you can redistribute it and/or | |
25 | * modify it under the terms of the GNU General Public License | |
26 | * as published by the Free Software Foundation; version 2 | |
27 | * of the License. | |
28 | */ | |
325ea10c | 29 | #include "sched.h" |
6e0534f2 GH |
30 | |
31 | /* Convert between a 140 based task->prio, and our 102 based cpupri */ | |
32 | static int convert_prio(int prio) | |
33 | { | |
34 | int cpupri; | |
35 | ||
36 | if (prio == CPUPRI_INVALID) | |
37 | cpupri = CPUPRI_INVALID; | |
38 | else if (prio == MAX_PRIO) | |
39 | cpupri = CPUPRI_IDLE; | |
40 | else if (prio >= MAX_RT_PRIO) | |
41 | cpupri = CPUPRI_NORMAL; | |
42 | else | |
43 | cpupri = MAX_RT_PRIO - prio + 1; | |
44 | ||
45 | return cpupri; | |
46 | } | |
47 | ||
6e0534f2 GH |
48 | /** |
49 | * cpupri_find - find the best (lowest-pri) CPU in the system | |
50 | * @cp: The cpupri context | |
51 | * @p: The task | |
13b8bd0a | 52 | * @lowest_mask: A mask to fill in with selected CPUs (or NULL) |
6e0534f2 GH |
53 | * |
54 | * Note: This function returns the recommended CPUs as calculated during the | |
2a61aa40 | 55 | * current invocation. By the time the call returns, the CPUs may have in |
6e0534f2 GH |
56 | * fact changed priorities any number of times. While not ideal, it is not |
57 | * an issue of correctness since the normal rebalancer logic will correct | |
58 | * any discrepancies created by racing against the uncertainty of the current | |
59 | * priority configuration. | |
60 | * | |
e69f6186 | 61 | * Return: (int)bool - CPUs were found |
6e0534f2 GH |
62 | */ |
63 | int cpupri_find(struct cpupri *cp, struct task_struct *p, | |
68e74568 | 64 | struct cpumask *lowest_mask) |
6e0534f2 | 65 | { |
014acbf0 YX |
66 | int idx = 0; |
67 | int task_pri = convert_prio(p->prio); | |
6e0534f2 | 68 | |
6227cb00 | 69 | BUG_ON(task_pri >= CPUPRI_NR_PRIORITIES); |
c92211d9 SR |
70 | |
71 | for (idx = 0; idx < task_pri; idx++) { | |
6e0534f2 | 72 | struct cpupri_vec *vec = &cp->pri_to_cpu[idx]; |
d473750b | 73 | int skip = 0; |
6e0534f2 | 74 | |
c92211d9 | 75 | if (!atomic_read(&(vec)->count)) |
d473750b | 76 | skip = 1; |
c92211d9 SR |
77 | /* |
78 | * When looking at the vector, we need to read the counter, | |
79 | * do a memory barrier, then read the mask. | |
80 | * | |
81 | * Note: This is still all racey, but we can deal with it. | |
82 | * Ideally, we only want to look at masks that are set. | |
83 | * | |
84 | * If a mask is not set, then the only thing wrong is that we | |
85 | * did a little more work than necessary. | |
86 | * | |
87 | * If we read a zero count but the mask is set, because of the | |
88 | * memory barriers, that can only happen when the highest prio | |
89 | * task for a run queue has left the run queue, in which case, | |
90 | * it will be followed by a pull. If the task we are processing | |
91 | * fails to find a proper place to go, that pull request will | |
92 | * pull this task if the run queue is running at a lower | |
93 | * priority. | |
94 | */ | |
95 | smp_rmb(); | |
6e0534f2 | 96 | |
d473750b SR |
97 | /* Need to do the rmb for every iteration */ |
98 | if (skip) | |
99 | continue; | |
100 | ||
0c98d344 | 101 | if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) |
6e0534f2 GH |
102 | continue; |
103 | ||
07903af1 | 104 | if (lowest_mask) { |
0c98d344 | 105 | cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask); |
07903af1 GH |
106 | |
107 | /* | |
108 | * We have to ensure that we have at least one bit | |
109 | * still set in the array, since the map could have | |
110 | * been concurrently emptied between the first and | |
111 | * second reads of vec->mask. If we hit this | |
112 | * condition, simply act as though we never hit this | |
113 | * priority level and continue on. | |
114 | */ | |
115 | if (cpumask_any(lowest_mask) >= nr_cpu_ids) | |
116 | continue; | |
117 | } | |
118 | ||
6e0534f2 GH |
119 | return 1; |
120 | } | |
121 | ||
122 | return 0; | |
123 | } | |
124 | ||
125 | /** | |
97fb7a0a | 126 | * cpupri_set - update the CPU priority setting |
6e0534f2 | 127 | * @cp: The cpupri context |
97fb7a0a | 128 | * @cpu: The target CPU |
fa757281 | 129 | * @newpri: The priority (INVALID-RT99) to assign to this CPU |
6e0534f2 GH |
130 | * |
131 | * Note: Assumes cpu_rq(cpu)->lock is locked | |
132 | * | |
133 | * Returns: (void) | |
134 | */ | |
135 | void cpupri_set(struct cpupri *cp, int cpu, int newpri) | |
136 | { | |
014acbf0 YX |
137 | int *currpri = &cp->cpu_to_pri[cpu]; |
138 | int oldpri = *currpri; | |
139 | int do_mb = 0; | |
6e0534f2 GH |
140 | |
141 | newpri = convert_prio(newpri); | |
142 | ||
143 | BUG_ON(newpri >= CPUPRI_NR_PRIORITIES); | |
144 | ||
145 | if (newpri == oldpri) | |
146 | return; | |
147 | ||
148 | /* | |
97fb7a0a | 149 | * If the CPU was currently mapped to a different value, we |
c3a2ae3d SR |
150 | * need to map it to the new value then remove the old value. |
151 | * Note, we must add the new value first, otherwise we risk the | |
5710f15b | 152 | * cpu being missed by the priority loop in cpupri_find. |
6e0534f2 | 153 | */ |
6e0534f2 GH |
154 | if (likely(newpri != CPUPRI_INVALID)) { |
155 | struct cpupri_vec *vec = &cp->pri_to_cpu[newpri]; | |
156 | ||
68e74568 | 157 | cpumask_set_cpu(cpu, vec->mask); |
c92211d9 SR |
158 | /* |
159 | * When adding a new vector, we update the mask first, | |
160 | * do a write memory barrier, and then update the count, to | |
161 | * make sure the vector is visible when count is set. | |
162 | */ | |
4e857c58 | 163 | smp_mb__before_atomic(); |
c92211d9 | 164 | atomic_inc(&(vec)->count); |
d473750b | 165 | do_mb = 1; |
6e0534f2 | 166 | } |
c3a2ae3d SR |
167 | if (likely(oldpri != CPUPRI_INVALID)) { |
168 | struct cpupri_vec *vec = &cp->pri_to_cpu[oldpri]; | |
169 | ||
d473750b SR |
170 | /* |
171 | * Because the order of modification of the vec->count | |
172 | * is important, we must make sure that the update | |
173 | * of the new prio is seen before we decrement the | |
174 | * old prio. This makes sure that the loop sees | |
175 | * one or the other when we raise the priority of | |
176 | * the run queue. We don't care about when we lower the | |
177 | * priority, as that will trigger an rt pull anyway. | |
178 | * | |
179 | * We only need to do a memory barrier if we updated | |
180 | * the new priority vec. | |
181 | */ | |
182 | if (do_mb) | |
4e857c58 | 183 | smp_mb__after_atomic(); |
d473750b | 184 | |
c92211d9 SR |
185 | /* |
186 | * When removing from the vector, we decrement the counter first | |
187 | * do a memory barrier and then clear the mask. | |
188 | */ | |
189 | atomic_dec(&(vec)->count); | |
4e857c58 | 190 | smp_mb__after_atomic(); |
c3a2ae3d | 191 | cpumask_clear_cpu(cpu, vec->mask); |
c3a2ae3d | 192 | } |
6e0534f2 GH |
193 | |
194 | *currpri = newpri; | |
195 | } | |
196 | ||
197 | /** | |
198 | * cpupri_init - initialize the cpupri structure | |
199 | * @cp: The cpupri context | |
200 | * | |
e69f6186 | 201 | * Return: -ENOMEM on memory allocation failure. |
6e0534f2 | 202 | */ |
68c38fc3 | 203 | int cpupri_init(struct cpupri *cp) |
6e0534f2 GH |
204 | { |
205 | int i; | |
206 | ||
6e0534f2 GH |
207 | for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) { |
208 | struct cpupri_vec *vec = &cp->pri_to_cpu[i]; | |
209 | ||
c92211d9 | 210 | atomic_set(&vec->count, 0); |
68c38fc3 | 211 | if (!zalloc_cpumask_var(&vec->mask, GFP_KERNEL)) |
68e74568 | 212 | goto cleanup; |
6e0534f2 GH |
213 | } |
214 | ||
4dac0b63 PZ |
215 | cp->cpu_to_pri = kcalloc(nr_cpu_ids, sizeof(int), GFP_KERNEL); |
216 | if (!cp->cpu_to_pri) | |
217 | goto cleanup; | |
218 | ||
6e0534f2 GH |
219 | for_each_possible_cpu(i) |
220 | cp->cpu_to_pri[i] = CPUPRI_INVALID; | |
4dac0b63 | 221 | |
68e74568 RR |
222 | return 0; |
223 | ||
224 | cleanup: | |
225 | for (i--; i >= 0; i--) | |
226 | free_cpumask_var(cp->pri_to_cpu[i].mask); | |
227 | return -ENOMEM; | |
6e0534f2 GH |
228 | } |
229 | ||
68e74568 RR |
230 | /** |
231 | * cpupri_cleanup - clean up the cpupri structure | |
232 | * @cp: The cpupri context | |
233 | */ | |
234 | void cpupri_cleanup(struct cpupri *cp) | |
235 | { | |
236 | int i; | |
6e0534f2 | 237 | |
4dac0b63 | 238 | kfree(cp->cpu_to_pri); |
68e74568 RR |
239 | for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) |
240 | free_cpumask_var(cp->pri_to_cpu[i].mask); | |
241 | } |