| Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 1 | /* | 
 | 2 |  * Procedures for maintaining information about logical memory blocks. | 
 | 3 |  * | 
 | 4 |  * Peter Bergner, IBM Corp.	June 2001. | 
 | 5 |  * Copyright (C) 2001 Peter Bergner. | 
 | 6 |  * | 
 | 7 |  *      This program is free software; you can redistribute it and/or | 
 | 8 |  *      modify it under the terms of the GNU General Public License | 
 | 9 |  *      as published by the Free Software Foundation; either version | 
 | 10 |  *      2 of the License, or (at your option) any later version. | 
 | 11 |  */ | 
 | 12 |  | 
 | 13 | #include <linux/kernel.h> | 
 | 14 | #include <linux/init.h> | 
 | 15 | #include <linux/bitops.h> | 
 | 16 | #include <linux/memblock.h> | 
 | 17 |  | 
 | 18 | #define MEMBLOCK_ALLOC_ANYWHERE	0 | 
 | 19 |  | 
 | 20 | struct memblock memblock; | 
 | 21 |  | 
 | 22 | static int memblock_debug; | 
 | 23 |  | 
 | 24 | static int __init early_memblock(char *p) | 
 | 25 | { | 
 | 26 | 	if (p && strstr(p, "debug")) | 
 | 27 | 		memblock_debug = 1; | 
 | 28 | 	return 0; | 
 | 29 | } | 
 | 30 | early_param("memblock", early_memblock); | 
 | 31 |  | 
 | 32 | static void memblock_dump(struct memblock_region *region, char *name) | 
 | 33 | { | 
 | 34 | 	unsigned long long base, size; | 
 | 35 | 	int i; | 
 | 36 |  | 
 | 37 | 	pr_info(" %s.cnt  = 0x%lx\n", name, region->cnt); | 
 | 38 |  | 
 | 39 | 	for (i = 0; i < region->cnt; i++) { | 
 | 40 | 		base = region->region[i].base; | 
 | 41 | 		size = region->region[i].size; | 
 | 42 |  | 
 | 43 | 		pr_info(" %s[0x%x]\t0x%016llx - 0x%016llx, 0x%llx bytes\n", | 
 | 44 | 		    name, i, base, base + size - 1, size); | 
 | 45 | 	} | 
 | 46 | } | 
 | 47 |  | 
 | 48 | void memblock_dump_all(void) | 
 | 49 | { | 
 | 50 | 	if (!memblock_debug) | 
 | 51 | 		return; | 
 | 52 |  | 
 | 53 | 	pr_info("MEMBLOCK configuration:\n"); | 
 | 54 | 	pr_info(" rmo_size    = 0x%llx\n", (unsigned long long)memblock.rmo_size); | 
 | 55 | 	pr_info(" memory.size = 0x%llx\n", (unsigned long long)memblock.memory.size); | 
 | 56 |  | 
 | 57 | 	memblock_dump(&memblock.memory, "memory"); | 
 | 58 | 	memblock_dump(&memblock.reserved, "reserved"); | 
 | 59 | } | 
 | 60 |  | 
 | 61 | static unsigned long memblock_addrs_overlap(u64 base1, u64 size1, u64 base2, | 
 | 62 | 					u64 size2) | 
 | 63 | { | 
 | 64 | 	return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); | 
 | 65 | } | 
 | 66 |  | 
 | 67 | static long memblock_addrs_adjacent(u64 base1, u64 size1, u64 base2, u64 size2) | 
 | 68 | { | 
 | 69 | 	if (base2 == base1 + size1) | 
 | 70 | 		return 1; | 
 | 71 | 	else if (base1 == base2 + size2) | 
 | 72 | 		return -1; | 
 | 73 |  | 
 | 74 | 	return 0; | 
 | 75 | } | 
 | 76 |  | 
 | 77 | static long memblock_regions_adjacent(struct memblock_region *rgn, | 
 | 78 | 		unsigned long r1, unsigned long r2) | 
 | 79 | { | 
 | 80 | 	u64 base1 = rgn->region[r1].base; | 
 | 81 | 	u64 size1 = rgn->region[r1].size; | 
 | 82 | 	u64 base2 = rgn->region[r2].base; | 
 | 83 | 	u64 size2 = rgn->region[r2].size; | 
 | 84 |  | 
 | 85 | 	return memblock_addrs_adjacent(base1, size1, base2, size2); | 
 | 86 | } | 
 | 87 |  | 
 | 88 | static void memblock_remove_region(struct memblock_region *rgn, unsigned long r) | 
 | 89 | { | 
 | 90 | 	unsigned long i; | 
 | 91 |  | 
 | 92 | 	for (i = r; i < rgn->cnt - 1; i++) { | 
 | 93 | 		rgn->region[i].base = rgn->region[i + 1].base; | 
 | 94 | 		rgn->region[i].size = rgn->region[i + 1].size; | 
 | 95 | 	} | 
 | 96 | 	rgn->cnt--; | 
 | 97 | } | 
 | 98 |  | 
 | 99 | /* Assumption: base addr of region 1 < base addr of region 2 */ | 
 | 100 | static void memblock_coalesce_regions(struct memblock_region *rgn, | 
 | 101 | 		unsigned long r1, unsigned long r2) | 
 | 102 | { | 
 | 103 | 	rgn->region[r1].size += rgn->region[r2].size; | 
 | 104 | 	memblock_remove_region(rgn, r2); | 
 | 105 | } | 
 | 106 |  | 
 | 107 | void __init memblock_init(void) | 
 | 108 | { | 
 | 109 | 	/* Create a dummy zero size MEMBLOCK which will get coalesced away later. | 
 | 110 | 	 * This simplifies the memblock_add() code below... | 
 | 111 | 	 */ | 
 | 112 | 	memblock.memory.region[0].base = 0; | 
 | 113 | 	memblock.memory.region[0].size = 0; | 
 | 114 | 	memblock.memory.cnt = 1; | 
 | 115 |  | 
 | 116 | 	/* Ditto. */ | 
 | 117 | 	memblock.reserved.region[0].base = 0; | 
 | 118 | 	memblock.reserved.region[0].size = 0; | 
 | 119 | 	memblock.reserved.cnt = 1; | 
 | 120 | } | 
 | 121 |  | 
 | 122 | void __init memblock_analyze(void) | 
 | 123 | { | 
 | 124 | 	int i; | 
 | 125 |  | 
 | 126 | 	memblock.memory.size = 0; | 
 | 127 |  | 
 | 128 | 	for (i = 0; i < memblock.memory.cnt; i++) | 
 | 129 | 		memblock.memory.size += memblock.memory.region[i].size; | 
 | 130 | } | 
 | 131 |  | 
 | 132 | static long memblock_add_region(struct memblock_region *rgn, u64 base, u64 size) | 
 | 133 | { | 
 | 134 | 	unsigned long coalesced = 0; | 
 | 135 | 	long adjacent, i; | 
 | 136 |  | 
 | 137 | 	if ((rgn->cnt == 1) && (rgn->region[0].size == 0)) { | 
 | 138 | 		rgn->region[0].base = base; | 
 | 139 | 		rgn->region[0].size = size; | 
 | 140 | 		return 0; | 
 | 141 | 	} | 
 | 142 |  | 
 | 143 | 	/* First try and coalesce this MEMBLOCK with another. */ | 
 | 144 | 	for (i = 0; i < rgn->cnt; i++) { | 
 | 145 | 		u64 rgnbase = rgn->region[i].base; | 
 | 146 | 		u64 rgnsize = rgn->region[i].size; | 
 | 147 |  | 
 | 148 | 		if ((rgnbase == base) && (rgnsize == size)) | 
 | 149 | 			/* Already have this region, so we're done */ | 
 | 150 | 			return 0; | 
 | 151 |  | 
 | 152 | 		adjacent = memblock_addrs_adjacent(base, size, rgnbase, rgnsize); | 
 | 153 | 		if (adjacent > 0) { | 
 | 154 | 			rgn->region[i].base -= size; | 
 | 155 | 			rgn->region[i].size += size; | 
 | 156 | 			coalesced++; | 
 | 157 | 			break; | 
 | 158 | 		} else if (adjacent < 0) { | 
 | 159 | 			rgn->region[i].size += size; | 
 | 160 | 			coalesced++; | 
 | 161 | 			break; | 
 | 162 | 		} | 
 | 163 | 	} | 
 | 164 |  | 
 | 165 | 	if ((i < rgn->cnt - 1) && memblock_regions_adjacent(rgn, i, i+1)) { | 
 | 166 | 		memblock_coalesce_regions(rgn, i, i+1); | 
 | 167 | 		coalesced++; | 
 | 168 | 	} | 
 | 169 |  | 
 | 170 | 	if (coalesced) | 
 | 171 | 		return coalesced; | 
 | 172 | 	if (rgn->cnt >= MAX_MEMBLOCK_REGIONS) | 
 | 173 | 		return -1; | 
 | 174 |  | 
 | 175 | 	/* Couldn't coalesce the MEMBLOCK, so add it to the sorted table. */ | 
 | 176 | 	for (i = rgn->cnt - 1; i >= 0; i--) { | 
 | 177 | 		if (base < rgn->region[i].base) { | 
 | 178 | 			rgn->region[i+1].base = rgn->region[i].base; | 
 | 179 | 			rgn->region[i+1].size = rgn->region[i].size; | 
 | 180 | 		} else { | 
 | 181 | 			rgn->region[i+1].base = base; | 
 | 182 | 			rgn->region[i+1].size = size; | 
 | 183 | 			break; | 
 | 184 | 		} | 
 | 185 | 	} | 
 | 186 |  | 
 | 187 | 	if (base < rgn->region[0].base) { | 
 | 188 | 		rgn->region[0].base = base; | 
 | 189 | 		rgn->region[0].size = size; | 
 | 190 | 	} | 
 | 191 | 	rgn->cnt++; | 
 | 192 |  | 
 | 193 | 	return 0; | 
 | 194 | } | 
 | 195 |  | 
 | 196 | long memblock_add(u64 base, u64 size) | 
 | 197 | { | 
 | 198 | 	struct memblock_region *_rgn = &memblock.memory; | 
 | 199 |  | 
 | 200 | 	/* On pSeries LPAR systems, the first MEMBLOCK is our RMO region. */ | 
 | 201 | 	if (base == 0) | 
 | 202 | 		memblock.rmo_size = size; | 
 | 203 |  | 
 | 204 | 	return memblock_add_region(_rgn, base, size); | 
 | 205 |  | 
 | 206 | } | 
 | 207 |  | 
 | 208 | static long __memblock_remove(struct memblock_region *rgn, u64 base, u64 size) | 
 | 209 | { | 
 | 210 | 	u64 rgnbegin, rgnend; | 
 | 211 | 	u64 end = base + size; | 
 | 212 | 	int i; | 
 | 213 |  | 
 | 214 | 	rgnbegin = rgnend = 0; /* supress gcc warnings */ | 
 | 215 |  | 
 | 216 | 	/* Find the region where (base, size) belongs to */ | 
 | 217 | 	for (i=0; i < rgn->cnt; i++) { | 
 | 218 | 		rgnbegin = rgn->region[i].base; | 
 | 219 | 		rgnend = rgnbegin + rgn->region[i].size; | 
 | 220 |  | 
 | 221 | 		if ((rgnbegin <= base) && (end <= rgnend)) | 
 | 222 | 			break; | 
 | 223 | 	} | 
 | 224 |  | 
 | 225 | 	/* Didn't find the region */ | 
 | 226 | 	if (i == rgn->cnt) | 
 | 227 | 		return -1; | 
 | 228 |  | 
 | 229 | 	/* Check to see if we are removing entire region */ | 
 | 230 | 	if ((rgnbegin == base) && (rgnend == end)) { | 
 | 231 | 		memblock_remove_region(rgn, i); | 
 | 232 | 		return 0; | 
 | 233 | 	} | 
 | 234 |  | 
 | 235 | 	/* Check to see if region is matching at the front */ | 
 | 236 | 	if (rgnbegin == base) { | 
 | 237 | 		rgn->region[i].base = end; | 
 | 238 | 		rgn->region[i].size -= size; | 
 | 239 | 		return 0; | 
 | 240 | 	} | 
 | 241 |  | 
 | 242 | 	/* Check to see if the region is matching at the end */ | 
 | 243 | 	if (rgnend == end) { | 
 | 244 | 		rgn->region[i].size -= size; | 
 | 245 | 		return 0; | 
 | 246 | 	} | 
 | 247 |  | 
 | 248 | 	/* | 
 | 249 | 	 * We need to split the entry -  adjust the current one to the | 
 | 250 | 	 * beginging of the hole and add the region after hole. | 
 | 251 | 	 */ | 
 | 252 | 	rgn->region[i].size = base - rgn->region[i].base; | 
 | 253 | 	return memblock_add_region(rgn, end, rgnend - end); | 
 | 254 | } | 
 | 255 |  | 
 | 256 | long memblock_remove(u64 base, u64 size) | 
 | 257 | { | 
 | 258 | 	return __memblock_remove(&memblock.memory, base, size); | 
 | 259 | } | 
 | 260 |  | 
 | 261 | long __init memblock_free(u64 base, u64 size) | 
 | 262 | { | 
 | 263 | 	return __memblock_remove(&memblock.reserved, base, size); | 
 | 264 | } | 
 | 265 |  | 
 | 266 | long __init memblock_reserve(u64 base, u64 size) | 
 | 267 | { | 
 | 268 | 	struct memblock_region *_rgn = &memblock.reserved; | 
 | 269 |  | 
 | 270 | 	BUG_ON(0 == size); | 
 | 271 |  | 
 | 272 | 	return memblock_add_region(_rgn, base, size); | 
 | 273 | } | 
 | 274 |  | 
 | 275 | long memblock_overlaps_region(struct memblock_region *rgn, u64 base, u64 size) | 
 | 276 | { | 
 | 277 | 	unsigned long i; | 
 | 278 |  | 
 | 279 | 	for (i = 0; i < rgn->cnt; i++) { | 
 | 280 | 		u64 rgnbase = rgn->region[i].base; | 
 | 281 | 		u64 rgnsize = rgn->region[i].size; | 
 | 282 | 		if (memblock_addrs_overlap(base, size, rgnbase, rgnsize)) | 
 | 283 | 			break; | 
 | 284 | 	} | 
 | 285 |  | 
 | 286 | 	return (i < rgn->cnt) ? i : -1; | 
 | 287 | } | 
 | 288 |  | 
 | 289 | static u64 memblock_align_down(u64 addr, u64 size) | 
 | 290 | { | 
 | 291 | 	return addr & ~(size - 1); | 
 | 292 | } | 
 | 293 |  | 
 | 294 | static u64 memblock_align_up(u64 addr, u64 size) | 
 | 295 | { | 
 | 296 | 	return (addr + (size - 1)) & ~(size - 1); | 
 | 297 | } | 
 | 298 |  | 
 | 299 | static u64 __init memblock_alloc_nid_unreserved(u64 start, u64 end, | 
 | 300 | 					   u64 size, u64 align) | 
 | 301 | { | 
 | 302 | 	u64 base, res_base; | 
 | 303 | 	long j; | 
 | 304 |  | 
 | 305 | 	base = memblock_align_down((end - size), align); | 
 | 306 | 	while (start <= base) { | 
 | 307 | 		j = memblock_overlaps_region(&memblock.reserved, base, size); | 
 | 308 | 		if (j < 0) { | 
 | 309 | 			/* this area isn't reserved, take it */ | 
 | 310 | 			if (memblock_add_region(&memblock.reserved, base, size) < 0) | 
 | 311 | 				base = ~(u64)0; | 
 | 312 | 			return base; | 
 | 313 | 		} | 
 | 314 | 		res_base = memblock.reserved.region[j].base; | 
 | 315 | 		if (res_base < size) | 
 | 316 | 			break; | 
 | 317 | 		base = memblock_align_down(res_base - size, align); | 
 | 318 | 	} | 
 | 319 |  | 
 | 320 | 	return ~(u64)0; | 
 | 321 | } | 
 | 322 |  | 
 | 323 | static u64 __init memblock_alloc_nid_region(struct memblock_property *mp, | 
 | 324 | 				       u64 (*nid_range)(u64, u64, int *), | 
 | 325 | 				       u64 size, u64 align, int nid) | 
 | 326 | { | 
 | 327 | 	u64 start, end; | 
 | 328 |  | 
 | 329 | 	start = mp->base; | 
 | 330 | 	end = start + mp->size; | 
 | 331 |  | 
 | 332 | 	start = memblock_align_up(start, align); | 
 | 333 | 	while (start < end) { | 
 | 334 | 		u64 this_end; | 
 | 335 | 		int this_nid; | 
 | 336 |  | 
 | 337 | 		this_end = nid_range(start, end, &this_nid); | 
 | 338 | 		if (this_nid == nid) { | 
 | 339 | 			u64 ret = memblock_alloc_nid_unreserved(start, this_end, | 
 | 340 | 							   size, align); | 
 | 341 | 			if (ret != ~(u64)0) | 
 | 342 | 				return ret; | 
 | 343 | 		} | 
 | 344 | 		start = this_end; | 
 | 345 | 	} | 
 | 346 |  | 
 | 347 | 	return ~(u64)0; | 
 | 348 | } | 
 | 349 |  | 
 | 350 | u64 __init memblock_alloc_nid(u64 size, u64 align, int nid, | 
 | 351 | 			 u64 (*nid_range)(u64 start, u64 end, int *nid)) | 
 | 352 | { | 
 | 353 | 	struct memblock_region *mem = &memblock.memory; | 
 | 354 | 	int i; | 
 | 355 |  | 
 | 356 | 	BUG_ON(0 == size); | 
 | 357 |  | 
 | 358 | 	size = memblock_align_up(size, align); | 
 | 359 |  | 
 | 360 | 	for (i = 0; i < mem->cnt; i++) { | 
 | 361 | 		u64 ret = memblock_alloc_nid_region(&mem->region[i], | 
 | 362 | 					       nid_range, | 
 | 363 | 					       size, align, nid); | 
 | 364 | 		if (ret != ~(u64)0) | 
 | 365 | 			return ret; | 
 | 366 | 	} | 
 | 367 |  | 
 | 368 | 	return memblock_alloc(size, align); | 
 | 369 | } | 
 | 370 |  | 
 | 371 | u64 __init memblock_alloc(u64 size, u64 align) | 
 | 372 | { | 
 | 373 | 	return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE); | 
 | 374 | } | 
 | 375 |  | 
 | 376 | u64 __init memblock_alloc_base(u64 size, u64 align, u64 max_addr) | 
 | 377 | { | 
 | 378 | 	u64 alloc; | 
 | 379 |  | 
 | 380 | 	alloc = __memblock_alloc_base(size, align, max_addr); | 
 | 381 |  | 
 | 382 | 	if (alloc == 0) | 
 | 383 | 		panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n", | 
 | 384 | 		      (unsigned long long) size, (unsigned long long) max_addr); | 
 | 385 |  | 
 | 386 | 	return alloc; | 
 | 387 | } | 
 | 388 |  | 
 | 389 | u64 __init __memblock_alloc_base(u64 size, u64 align, u64 max_addr) | 
 | 390 | { | 
 | 391 | 	long i, j; | 
 | 392 | 	u64 base = 0; | 
 | 393 | 	u64 res_base; | 
 | 394 |  | 
 | 395 | 	BUG_ON(0 == size); | 
 | 396 |  | 
 | 397 | 	size = memblock_align_up(size, align); | 
 | 398 |  | 
 | 399 | 	/* On some platforms, make sure we allocate lowmem */ | 
 | 400 | 	/* Note that MEMBLOCK_REAL_LIMIT may be MEMBLOCK_ALLOC_ANYWHERE */ | 
 | 401 | 	if (max_addr == MEMBLOCK_ALLOC_ANYWHERE) | 
 | 402 | 		max_addr = MEMBLOCK_REAL_LIMIT; | 
 | 403 |  | 
 | 404 | 	for (i = memblock.memory.cnt - 1; i >= 0; i--) { | 
 | 405 | 		u64 memblockbase = memblock.memory.region[i].base; | 
 | 406 | 		u64 memblocksize = memblock.memory.region[i].size; | 
 | 407 |  | 
 | 408 | 		if (memblocksize < size) | 
 | 409 | 			continue; | 
 | 410 | 		if (max_addr == MEMBLOCK_ALLOC_ANYWHERE) | 
 | 411 | 			base = memblock_align_down(memblockbase + memblocksize - size, align); | 
 | 412 | 		else if (memblockbase < max_addr) { | 
 | 413 | 			base = min(memblockbase + memblocksize, max_addr); | 
 | 414 | 			base = memblock_align_down(base - size, align); | 
 | 415 | 		} else | 
 | 416 | 			continue; | 
 | 417 |  | 
 | 418 | 		while (base && memblockbase <= base) { | 
 | 419 | 			j = memblock_overlaps_region(&memblock.reserved, base, size); | 
 | 420 | 			if (j < 0) { | 
 | 421 | 				/* this area isn't reserved, take it */ | 
 | 422 | 				if (memblock_add_region(&memblock.reserved, base, size) < 0) | 
 | 423 | 					return 0; | 
 | 424 | 				return base; | 
 | 425 | 			} | 
 | 426 | 			res_base = memblock.reserved.region[j].base; | 
 | 427 | 			if (res_base < size) | 
 | 428 | 				break; | 
 | 429 | 			base = memblock_align_down(res_base - size, align); | 
 | 430 | 		} | 
 | 431 | 	} | 
 | 432 | 	return 0; | 
 | 433 | } | 
 | 434 |  | 
 | 435 | /* You must call memblock_analyze() before this. */ | 
 | 436 | u64 __init memblock_phys_mem_size(void) | 
 | 437 | { | 
 | 438 | 	return memblock.memory.size; | 
 | 439 | } | 
 | 440 |  | 
 | 441 | u64 memblock_end_of_DRAM(void) | 
 | 442 | { | 
 | 443 | 	int idx = memblock.memory.cnt - 1; | 
 | 444 |  | 
 | 445 | 	return (memblock.memory.region[idx].base + memblock.memory.region[idx].size); | 
 | 446 | } | 
 | 447 |  | 
 | 448 | /* You must call memblock_analyze() after this. */ | 
 | 449 | void __init memblock_enforce_memory_limit(u64 memory_limit) | 
 | 450 | { | 
 | 451 | 	unsigned long i; | 
 | 452 | 	u64 limit; | 
 | 453 | 	struct memblock_property *p; | 
 | 454 |  | 
 | 455 | 	if (!memory_limit) | 
 | 456 | 		return; | 
 | 457 |  | 
 | 458 | 	/* Truncate the memblock regions to satisfy the memory limit. */ | 
 | 459 | 	limit = memory_limit; | 
 | 460 | 	for (i = 0; i < memblock.memory.cnt; i++) { | 
 | 461 | 		if (limit > memblock.memory.region[i].size) { | 
 | 462 | 			limit -= memblock.memory.region[i].size; | 
 | 463 | 			continue; | 
 | 464 | 		} | 
 | 465 |  | 
 | 466 | 		memblock.memory.region[i].size = limit; | 
 | 467 | 		memblock.memory.cnt = i + 1; | 
 | 468 | 		break; | 
 | 469 | 	} | 
 | 470 |  | 
 | 471 | 	if (memblock.memory.region[0].size < memblock.rmo_size) | 
 | 472 | 		memblock.rmo_size = memblock.memory.region[0].size; | 
 | 473 |  | 
 | 474 | 	memory_limit = memblock_end_of_DRAM(); | 
 | 475 |  | 
 | 476 | 	/* And truncate any reserves above the limit also. */ | 
 | 477 | 	for (i = 0; i < memblock.reserved.cnt; i++) { | 
 | 478 | 		p = &memblock.reserved.region[i]; | 
 | 479 |  | 
 | 480 | 		if (p->base > memory_limit) | 
 | 481 | 			p->size = 0; | 
 | 482 | 		else if ((p->base + p->size) > memory_limit) | 
 | 483 | 			p->size = memory_limit - p->base; | 
 | 484 |  | 
 | 485 | 		if (p->size == 0) { | 
 | 486 | 			memblock_remove_region(&memblock.reserved, i); | 
 | 487 | 			i--; | 
 | 488 | 		} | 
 | 489 | 	} | 
 | 490 | } | 
 | 491 |  | 
 | 492 | int __init memblock_is_reserved(u64 addr) | 
 | 493 | { | 
 | 494 | 	int i; | 
 | 495 |  | 
 | 496 | 	for (i = 0; i < memblock.reserved.cnt; i++) { | 
 | 497 | 		u64 upper = memblock.reserved.region[i].base + | 
 | 498 | 			memblock.reserved.region[i].size - 1; | 
 | 499 | 		if ((addr >= memblock.reserved.region[i].base) && (addr <= upper)) | 
 | 500 | 			return 1; | 
 | 501 | 	} | 
 | 502 | 	return 0; | 
 | 503 | } | 
 | 504 |  | 
 | 505 | int memblock_is_region_reserved(u64 base, u64 size) | 
 | 506 | { | 
 | 507 | 	return memblock_overlaps_region(&memblock.reserved, base, size); | 
 | 508 | } | 
 | 509 |  | 
 | 510 | /* | 
 | 511 |  * Given a <base, len>, find which memory regions belong to this range. | 
 | 512 |  * Adjust the request and return a contiguous chunk. | 
 | 513 |  */ | 
 | 514 | int memblock_find(struct memblock_property *res) | 
 | 515 | { | 
 | 516 | 	int i; | 
 | 517 | 	u64 rstart, rend; | 
 | 518 |  | 
 | 519 | 	rstart = res->base; | 
 | 520 | 	rend = rstart + res->size - 1; | 
 | 521 |  | 
 | 522 | 	for (i = 0; i < memblock.memory.cnt; i++) { | 
 | 523 | 		u64 start = memblock.memory.region[i].base; | 
 | 524 | 		u64 end = start + memblock.memory.region[i].size - 1; | 
 | 525 |  | 
 | 526 | 		if (start > rend) | 
 | 527 | 			return -1; | 
 | 528 |  | 
 | 529 | 		if ((end >= rstart) && (start < rend)) { | 
 | 530 | 			/* adjust the request */ | 
 | 531 | 			if (rstart < start) | 
 | 532 | 				rstart = start; | 
 | 533 | 			if (rend > end) | 
 | 534 | 				rend = end; | 
 | 535 | 			res->base = rstart; | 
 | 536 | 			res->size = rend - rstart + 1; | 
 | 537 | 			return 0; | 
 | 538 | 		} | 
 | 539 | 	} | 
 | 540 | 	return -1; | 
 | 541 | } |