Merge branch 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus
[linux-2.6-block.git] / lib / lmb.c
CommitLineData
7c8c6b97
PM
1/*
2 * Procedures for maintaining information about logical memory blocks.
3 *
4 * Peter Bergner, IBM Corp. June 2001.
5 * Copyright (C) 2001 Peter Bergner.
d9b2b2a2 6 *
7c8c6b97
PM
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
7c8c6b97
PM
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/bitops.h>
d9b2b2a2 16#include <linux/lmb.h>
7c8c6b97 17
3b9331da
ME
18#define LMB_ALLOC_ANYWHERE 0
19
eb481899
ME
20struct lmb lmb;
21
faa6cfde
DM
22static int lmb_debug;
23
24static int __init early_lmb(char *p)
25{
26 if (p && strstr(p, "debug"))
27 lmb_debug = 1;
28 return 0;
29}
30early_param("lmb", early_lmb);
31
c37682d9 32static void lmb_dump(struct lmb_region *region, char *name)
7c8c6b97 33{
c37682d9
ME
34 unsigned long long base, size;
35 int i;
36
37 pr_info(" %s.cnt = 0x%lx\n", name, region->cnt);
38
39 for (i = 0; i < region->cnt; i++) {
40 base = region->region[i].base;
41 size = region->region[i].size;
42
43 pr_info(" %s[0x%x]\t0x%016llx - 0x%016llx, 0x%llx bytes\n",
44 name, i, base, base + size - 1, size);
45 }
46}
7c8c6b97 47
c37682d9
ME
48void lmb_dump_all(void)
49{
faa6cfde
DM
50 if (!lmb_debug)
51 return;
52
c37682d9
ME
53 pr_info("LMB configuration:\n");
54 pr_info(" rmo_size = 0x%llx\n", (unsigned long long)lmb.rmo_size);
55 pr_info(" memory.size = 0x%llx\n", (unsigned long long)lmb.memory.size);
7c8c6b97 56
c37682d9
ME
57 lmb_dump(&lmb.memory, "memory");
58 lmb_dump(&lmb.reserved, "reserved");
7c8c6b97
PM
59}
60
98d5c21c
BP
61static unsigned long lmb_addrs_overlap(u64 base1, u64 size1, u64 base2,
62 u64 size2)
7c8c6b97 63{
300613e5 64 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
7c8c6b97
PM
65}
66
98d5c21c 67static long lmb_addrs_adjacent(u64 base1, u64 size1, u64 base2, u64 size2)
7c8c6b97
PM
68{
69 if (base2 == base1 + size1)
70 return 1;
71 else if (base1 == base2 + size2)
72 return -1;
73
74 return 0;
75}
76
98d5c21c 77static long lmb_regions_adjacent(struct lmb_region *rgn,
7c8c6b97
PM
78 unsigned long r1, unsigned long r2)
79{
e5f27095
BB
80 u64 base1 = rgn->region[r1].base;
81 u64 size1 = rgn->region[r1].size;
82 u64 base2 = rgn->region[r2].base;
83 u64 size2 = rgn->region[r2].size;
7c8c6b97
PM
84
85 return lmb_addrs_adjacent(base1, size1, base2, size2);
86}
87
98d5c21c 88static void lmb_remove_region(struct lmb_region *rgn, unsigned long r)
7c8c6b97
PM
89{
90 unsigned long i;
91
2babf5c2
ME
92 for (i = r; i < rgn->cnt - 1; i++) {
93 rgn->region[i].base = rgn->region[i + 1].base;
94 rgn->region[i].size = rgn->region[i + 1].size;
7c8c6b97
PM
95 }
96 rgn->cnt--;
97}
98
2babf5c2 99/* Assumption: base addr of region 1 < base addr of region 2 */
98d5c21c 100static void lmb_coalesce_regions(struct lmb_region *rgn,
2babf5c2
ME
101 unsigned long r1, unsigned long r2)
102{
103 rgn->region[r1].size += rgn->region[r2].size;
104 lmb_remove_region(rgn, r2);
105}
106
7c8c6b97
PM
107void __init lmb_init(void)
108{
109 /* Create a dummy zero size LMB which will get coalesced away later.
110 * This simplifies the lmb_add() code below...
111 */
112 lmb.memory.region[0].base = 0;
113 lmb.memory.region[0].size = 0;
114 lmb.memory.cnt = 1;
115
116 /* Ditto. */
117 lmb.reserved.region[0].base = 0;
118 lmb.reserved.region[0].size = 0;
119 lmb.reserved.cnt = 1;
120}
121
7c8c6b97
PM
122void __init lmb_analyze(void)
123{
124 int i;
125
126 lmb.memory.size = 0;
127
128 for (i = 0; i < lmb.memory.cnt; i++)
129 lmb.memory.size += lmb.memory.region[i].size;
130}
131
98d5c21c 132static long lmb_add_region(struct lmb_region *rgn, u64 base, u64 size)
7c8c6b97 133{
56d6d1a7
MA
134 unsigned long coalesced = 0;
135 long adjacent, i;
7c8c6b97 136
27e6672b
KG
137 if ((rgn->cnt == 1) && (rgn->region[0].size == 0)) {
138 rgn->region[0].base = base;
139 rgn->region[0].size = size;
140 return 0;
141 }
142
7c8c6b97 143 /* First try and coalesce this LMB with another. */
300613e5 144 for (i = 0; i < rgn->cnt; i++) {
e5f27095
BB
145 u64 rgnbase = rgn->region[i].base;
146 u64 rgnsize = rgn->region[i].size;
7c8c6b97 147
eb6de286
DG
148 if ((rgnbase == base) && (rgnsize == size))
149 /* Already have this region, so we're done */
150 return 0;
151
300613e5
PM
152 adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
153 if (adjacent > 0) {
7c8c6b97
PM
154 rgn->region[i].base -= size;
155 rgn->region[i].size += size;
156 coalesced++;
157 break;
300613e5 158 } else if (adjacent < 0) {
7c8c6b97
PM
159 rgn->region[i].size += size;
160 coalesced++;
161 break;
162 }
163 }
164
300613e5 165 if ((i < rgn->cnt - 1) && lmb_regions_adjacent(rgn, i, i+1)) {
7c8c6b97
PM
166 lmb_coalesce_regions(rgn, i, i+1);
167 coalesced++;
168 }
169
170 if (coalesced)
171 return coalesced;
172 if (rgn->cnt >= MAX_LMB_REGIONS)
173 return -1;
174
175 /* Couldn't coalesce the LMB, so add it to the sorted table. */
300613e5 176 for (i = rgn->cnt - 1; i >= 0; i--) {
7c8c6b97
PM
177 if (base < rgn->region[i].base) {
178 rgn->region[i+1].base = rgn->region[i].base;
179 rgn->region[i+1].size = rgn->region[i].size;
180 } else {
181 rgn->region[i+1].base = base;
182 rgn->region[i+1].size = size;
183 break;
184 }
185 }
74b20dad
KG
186
187 if (base < rgn->region[0].base) {
188 rgn->region[0].base = base;
189 rgn->region[0].size = size;
190 }
7c8c6b97
PM
191 rgn->cnt++;
192
193 return 0;
194}
195
98d5c21c 196long lmb_add(u64 base, u64 size)
7c8c6b97 197{
300613e5 198 struct lmb_region *_rgn = &lmb.memory;
7c8c6b97
PM
199
200 /* On pSeries LPAR systems, the first LMB is our RMO region. */
201 if (base == 0)
202 lmb.rmo_size = size;
203
204 return lmb_add_region(_rgn, base, size);
205
206}
207
98d5c21c
BP
208long lmb_remove(u64 base, u64 size)
209{
210 struct lmb_region *rgn = &(lmb.memory);
211 u64 rgnbegin, rgnend;
212 u64 end = base + size;
213 int i;
214
215 rgnbegin = rgnend = 0; /* supress gcc warnings */
216
217 /* Find the region where (base, size) belongs to */
218 for (i=0; i < rgn->cnt; i++) {
219 rgnbegin = rgn->region[i].base;
220 rgnend = rgnbegin + rgn->region[i].size;
221
222 if ((rgnbegin <= base) && (end <= rgnend))
223 break;
224 }
225
226 /* Didn't find the region */
227 if (i == rgn->cnt)
228 return -1;
229
230 /* Check to see if we are removing entire region */
231 if ((rgnbegin == base) && (rgnend == end)) {
232 lmb_remove_region(rgn, i);
233 return 0;
234 }
235
236 /* Check to see if region is matching at the front */
237 if (rgnbegin == base) {
238 rgn->region[i].base = end;
239 rgn->region[i].size -= size;
240 return 0;
241 }
242
243 /* Check to see if the region is matching at the end */
244 if (rgnend == end) {
245 rgn->region[i].size -= size;
246 return 0;
247 }
248
249 /*
250 * We need to split the entry - adjust the current one to the
251 * beginging of the hole and add the region after hole.
252 */
253 rgn->region[i].size = base - rgn->region[i].base;
254 return lmb_add_region(rgn, end, rgnend - end);
255}
256
e5f27095 257long __init lmb_reserve(u64 base, u64 size)
7c8c6b97 258{
300613e5 259 struct lmb_region *_rgn = &lmb.reserved;
7c8c6b97 260
8c20fafa
ME
261 BUG_ON(0 == size);
262
7c8c6b97
PM
263 return lmb_add_region(_rgn, base, size);
264}
265
c5df7f77 266long lmb_overlaps_region(struct lmb_region *rgn, u64 base, u64 size)
7c8c6b97
PM
267{
268 unsigned long i;
269
300613e5 270 for (i = 0; i < rgn->cnt; i++) {
e5f27095
BB
271 u64 rgnbase = rgn->region[i].base;
272 u64 rgnsize = rgn->region[i].size;
300613e5 273 if (lmb_addrs_overlap(base, size, rgnbase, rgnsize))
7c8c6b97 274 break;
7c8c6b97
PM
275 }
276
277 return (i < rgn->cnt) ? i : -1;
278}
279
c50f68c8
DM
280static u64 lmb_align_down(u64 addr, u64 size)
281{
282 return addr & ~(size - 1);
283}
284
285static u64 lmb_align_up(u64 addr, u64 size)
286{
287 return (addr + (size - 1)) & ~(size - 1);
288}
289
290static u64 __init lmb_alloc_nid_unreserved(u64 start, u64 end,
291 u64 size, u64 align)
292{
d9024df0 293 u64 base, res_base;
c50f68c8
DM
294 long j;
295
296 base = lmb_align_down((end - size), align);
d9024df0
PM
297 while (start <= base) {
298 j = lmb_overlaps_region(&lmb.reserved, base, size);
299 if (j < 0) {
300 /* this area isn't reserved, take it */
4978db5b 301 if (lmb_add_region(&lmb.reserved, base, size) < 0)
d9024df0
PM
302 base = ~(u64)0;
303 return base;
304 }
305 res_base = lmb.reserved.region[j].base;
306 if (res_base < size)
307 break;
308 base = lmb_align_down(res_base - size, align);
c50f68c8
DM
309 }
310
311 return ~(u64)0;
312}
313
314static u64 __init lmb_alloc_nid_region(struct lmb_property *mp,
315 u64 (*nid_range)(u64, u64, int *),
316 u64 size, u64 align, int nid)
317{
318 u64 start, end;
319
320 start = mp->base;
321 end = start + mp->size;
322
323 start = lmb_align_up(start, align);
324 while (start < end) {
325 u64 this_end;
326 int this_nid;
327
328 this_end = nid_range(start, end, &this_nid);
329 if (this_nid == nid) {
330 u64 ret = lmb_alloc_nid_unreserved(start, this_end,
331 size, align);
332 if (ret != ~(u64)0)
333 return ret;
334 }
335 start = this_end;
336 }
337
338 return ~(u64)0;
339}
340
341u64 __init lmb_alloc_nid(u64 size, u64 align, int nid,
342 u64 (*nid_range)(u64 start, u64 end, int *nid))
343{
344 struct lmb_region *mem = &lmb.memory;
345 int i;
346
4978db5b
DM
347 BUG_ON(0 == size);
348
349 size = lmb_align_up(size, align);
350
c50f68c8
DM
351 for (i = 0; i < mem->cnt; i++) {
352 u64 ret = lmb_alloc_nid_region(&mem->region[i],
353 nid_range,
354 size, align, nid);
355 if (ret != ~(u64)0)
356 return ret;
357 }
358
359 return lmb_alloc(size, align);
360}
361
e5f27095 362u64 __init lmb_alloc(u64 size, u64 align)
7c8c6b97
PM
363{
364 return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE);
365}
366
e5f27095 367u64 __init lmb_alloc_base(u64 size, u64 align, u64 max_addr)
d7a5b2ff 368{
e5f27095 369 u64 alloc;
d7a5b2ff
ME
370
371 alloc = __lmb_alloc_base(size, align, max_addr);
372
2c276603 373 if (alloc == 0)
e5f27095
BB
374 panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
375 (unsigned long long) size, (unsigned long long) max_addr);
d7a5b2ff
ME
376
377 return alloc;
378}
379
e5f27095 380u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr)
7c8c6b97
PM
381{
382 long i, j;
e5f27095 383 u64 base = 0;
d9024df0 384 u64 res_base;
7c8c6b97 385
8c20fafa
ME
386 BUG_ON(0 == size);
387
4978db5b
DM
388 size = lmb_align_up(size, align);
389
d9b2b2a2 390 /* On some platforms, make sure we allocate lowmem */
d9024df0 391 /* Note that LMB_REAL_LIMIT may be LMB_ALLOC_ANYWHERE */
7c8c6b97 392 if (max_addr == LMB_ALLOC_ANYWHERE)
d9b2b2a2
DM
393 max_addr = LMB_REAL_LIMIT;
394
300613e5 395 for (i = lmb.memory.cnt - 1; i >= 0; i--) {
e5f27095
BB
396 u64 lmbbase = lmb.memory.region[i].base;
397 u64 lmbsize = lmb.memory.region[i].size;
7c8c6b97 398
d9024df0
PM
399 if (lmbsize < size)
400 continue;
7c8c6b97 401 if (max_addr == LMB_ALLOC_ANYWHERE)
d9b2b2a2 402 base = lmb_align_down(lmbbase + lmbsize - size, align);
7c8c6b97
PM
403 else if (lmbbase < max_addr) {
404 base = min(lmbbase + lmbsize, max_addr);
d9b2b2a2 405 base = lmb_align_down(base - size, align);
7c8c6b97
PM
406 } else
407 continue;
408
d9024df0 409 while (base && lmbbase <= base) {
300613e5 410 j = lmb_overlaps_region(&lmb.reserved, base, size);
d9024df0
PM
411 if (j < 0) {
412 /* this area isn't reserved, take it */
4978db5b 413 if (lmb_add_region(&lmb.reserved, base, size) < 0)
d9024df0
PM
414 return 0;
415 return base;
416 }
417 res_base = lmb.reserved.region[j].base;
418 if (res_base < size)
300613e5 419 break;
d9024df0 420 base = lmb_align_down(res_base - size, align);
300613e5 421 }
7c8c6b97 422 }
d9024df0 423 return 0;
7c8c6b97
PM
424}
425
426/* You must call lmb_analyze() before this. */
e5f27095 427u64 __init lmb_phys_mem_size(void)
7c8c6b97
PM
428{
429 return lmb.memory.size;
430}
431
4f8ee2c9 432u64 lmb_end_of_DRAM(void)
7c8c6b97
PM
433{
434 int idx = lmb.memory.cnt - 1;
435
436 return (lmb.memory.region[idx].base + lmb.memory.region[idx].size);
437}
438
2babf5c2 439/* You must call lmb_analyze() after this. */
e5f27095 440void __init lmb_enforce_memory_limit(u64 memory_limit)
7c8c6b97 441{
e5f27095
BB
442 unsigned long i;
443 u64 limit;
2babf5c2 444 struct lmb_property *p;
7c8c6b97 445
300613e5 446 if (!memory_limit)
7c8c6b97
PM
447 return;
448
2babf5c2 449 /* Truncate the lmb regions to satisfy the memory limit. */
7c8c6b97
PM
450 limit = memory_limit;
451 for (i = 0; i < lmb.memory.cnt; i++) {
452 if (limit > lmb.memory.region[i].size) {
453 limit -= lmb.memory.region[i].size;
454 continue;
455 }
456
457 lmb.memory.region[i].size = limit;
458 lmb.memory.cnt = i + 1;
459 break;
460 }
2babf5c2 461
30f30e13
ME
462 if (lmb.memory.region[0].size < lmb.rmo_size)
463 lmb.rmo_size = lmb.memory.region[0].size;
2babf5c2 464
ebb1951d
DM
465 memory_limit = lmb_end_of_DRAM();
466
2babf5c2
ME
467 /* And truncate any reserves above the limit also. */
468 for (i = 0; i < lmb.reserved.cnt; i++) {
469 p = &lmb.reserved.region[i];
470
471 if (p->base > memory_limit)
472 p->size = 0;
473 else if ((p->base + p->size) > memory_limit)
474 p->size = memory_limit - p->base;
475
476 if (p->size == 0) {
477 lmb_remove_region(&lmb.reserved, i);
478 i--;
479 }
480 }
7c8c6b97 481}
f98eeb4e 482
e5f27095 483int __init lmb_is_reserved(u64 addr)
f98eeb4e
KG
484{
485 int i;
486
487 for (i = 0; i < lmb.reserved.cnt; i++) {
e5f27095
BB
488 u64 upper = lmb.reserved.region[i].base +
489 lmb.reserved.region[i].size - 1;
f98eeb4e
KG
490 if ((addr >= lmb.reserved.region[i].base) && (addr <= upper))
491 return 1;
492 }
493 return 0;
494}
9d88a2eb 495
c5df7f77
AH
496int lmb_is_region_reserved(u64 base, u64 size)
497{
498 return lmb_overlaps_region(&lmb.reserved, base, size);
499}
500
9d88a2eb
BP
501/*
502 * Given a <base, len>, find which memory regions belong to this range.
503 * Adjust the request and return a contiguous chunk.
504 */
505int lmb_find(struct lmb_property *res)
506{
507 int i;
508 u64 rstart, rend;
509
510 rstart = res->base;
511 rend = rstart + res->size - 1;
512
513 for (i = 0; i < lmb.memory.cnt; i++) {
514 u64 start = lmb.memory.region[i].base;
515 u64 end = start + lmb.memory.region[i].size - 1;
516
517 if (start > rend)
518 return -1;
519
520 if ((end >= rstart) && (start < rend)) {
521 /* adjust the request */
522 if (rstart < start)
523 rstart = start;
524 if (rend > end)
525 rend = end;
526 res->base = rstart;
527 res->size = rend - rstart + 1;
528 return 0;
529 }
530 }
531 return -1;
532}