forked from luck/tmp_suning_uos_patched
[LMB] Fix some whitespace and other formatting issues, use pr_debug
This makes no semantic changes. It fixes the whitespace and formatting a bit, gets rid of a local DBG macro and uses the equivalent pr_debug instead, and restructures one while loop that had a function call and assignment in the condition to be a bit more readable. Some comments about functions being called with relocation disabled were also removed as they would just be confusing to most readers now that the code is in lib/. Signed-off-by: Paul Mackerras <paulus@samba.org>
This commit is contained in:
parent
c50f68c8ae
commit
300613e523
54
lib/lmb.c
54
lib/lmb.c
|
@ -15,14 +15,6 @@
|
|||
#include <linux/bitops.h>
|
||||
#include <linux/lmb.h>
|
||||
|
||||
#undef DEBUG
|
||||
|
||||
#ifdef DEBUG
|
||||
#define DBG(fmt...) LMB_DBG(fmt)
|
||||
#else
|
||||
#define DBG(fmt...)
|
||||
#endif
|
||||
|
||||
#define LMB_ALLOC_ANYWHERE 0
|
||||
|
||||
struct lmb lmb;
|
||||
|
@ -32,30 +24,30 @@ void lmb_dump_all(void)
|
|||
#ifdef DEBUG
|
||||
unsigned long i;
|
||||
|
||||
DBG("lmb_dump_all:\n");
|
||||
DBG(" memory.cnt = 0x%lx\n", lmb.memory.cnt);
|
||||
DBG(" memory.size = 0x%llx\n",
|
||||
pr_debug("lmb_dump_all:\n");
|
||||
pr_debug(" memory.cnt = 0x%lx\n", lmb.memory.cnt);
|
||||
pr_debug(" memory.size = 0x%llx\n",
|
||||
(unsigned long long)lmb.memory.size);
|
||||
for (i=0; i < lmb.memory.cnt ;i++) {
|
||||
DBG(" memory.region[0x%x].base = 0x%llx\n",
|
||||
pr_debug(" memory.region[0x%x].base = 0x%llx\n",
|
||||
i, (unsigned long long)lmb.memory.region[i].base);
|
||||
DBG(" .size = 0x%llx\n",
|
||||
pr_debug(" .size = 0x%llx\n",
|
||||
(unsigned long long)lmb.memory.region[i].size);
|
||||
}
|
||||
|
||||
DBG("\n reserved.cnt = 0x%lx\n", lmb.reserved.cnt);
|
||||
DBG(" reserved.size = 0x%lx\n", lmb.reserved.size);
|
||||
pr_debug(" reserved.cnt = 0x%lx\n", lmb.reserved.cnt);
|
||||
pr_debug(" reserved.size = 0x%lx\n", lmb.reserved.size);
|
||||
for (i=0; i < lmb.reserved.cnt ;i++) {
|
||||
DBG(" reserved.region[0x%x].base = 0x%llx\n",
|
||||
pr_debug(" reserved.region[0x%x].base = 0x%llx\n",
|
||||
i, (unsigned long long)lmb.reserved.region[i].base);
|
||||
DBG(" .size = 0x%llx\n",
|
||||
pr_debug(" .size = 0x%llx\n",
|
||||
(unsigned long long)lmb.reserved.region[i].size);
|
||||
}
|
||||
#endif /* DEBUG */
|
||||
}
|
||||
|
||||
static unsigned long __init lmb_addrs_overlap(u64 base1,
|
||||
u64 size1, u64 base2, u64 size2)
|
||||
static unsigned long __init lmb_addrs_overlap(u64 base1, u64 size1,
|
||||
u64 base2, u64 size2)
|
||||
{
|
||||
return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
|
||||
}
|
||||
|
@ -101,7 +93,6 @@ static void __init lmb_coalesce_regions(struct lmb_region *rgn,
|
|||
lmb_remove_region(rgn, r2);
|
||||
}
|
||||
|
||||
/* This routine called with relocation disabled. */
|
||||
void __init lmb_init(void)
|
||||
{
|
||||
/* Create a dummy zero size LMB which will get coalesced away later.
|
||||
|
@ -117,7 +108,6 @@ void __init lmb_init(void)
|
|||
lmb.reserved.cnt = 1;
|
||||
}
|
||||
|
||||
/* This routine may be called with relocation disabled. */
|
||||
void __init lmb_analyze(void)
|
||||
{
|
||||
int i;
|
||||
|
@ -128,7 +118,6 @@ void __init lmb_analyze(void)
|
|||
lmb.memory.size += lmb.memory.region[i].size;
|
||||
}
|
||||
|
||||
/* This routine called with relocation disabled. */
|
||||
static long __init lmb_add_region(struct lmb_region *rgn, u64 base, u64 size)
|
||||
{
|
||||
unsigned long coalesced = 0;
|
||||
|
@ -155,8 +144,7 @@ static long __init lmb_add_region(struct lmb_region *rgn, u64 base, u64 size)
|
|||
rgn->region[i].size += size;
|
||||
coalesced++;
|
||||
break;
|
||||
}
|
||||
else if ( adjacent < 0 ) {
|
||||
} else if (adjacent < 0) {
|
||||
rgn->region[i].size += size;
|
||||
coalesced++;
|
||||
break;
|
||||
|
@ -194,10 +182,9 @@ static long __init lmb_add_region(struct lmb_region *rgn, u64 base, u64 size)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* This routine may be called with relocation disabled. */
|
||||
long __init lmb_add(u64 base, u64 size)
|
||||
{
|
||||
struct lmb_region *_rgn = &(lmb.memory);
|
||||
struct lmb_region *_rgn = &lmb.memory;
|
||||
|
||||
/* On pSeries LPAR systems, the first LMB is our RMO region. */
|
||||
if (base == 0)
|
||||
|
@ -209,25 +196,23 @@ long __init lmb_add(u64 base, u64 size)
|
|||
|
||||
long __init lmb_reserve(u64 base, u64 size)
|
||||
{
|
||||
struct lmb_region *_rgn = &(lmb.reserved);
|
||||
struct lmb_region *_rgn = &lmb.reserved;
|
||||
|
||||
BUG_ON(0 == size);
|
||||
|
||||
return lmb_add_region(_rgn, base, size);
|
||||
}
|
||||
|
||||
long __init lmb_overlaps_region(struct lmb_region *rgn, u64 base,
|
||||
u64 size)
|
||||
long __init lmb_overlaps_region(struct lmb_region *rgn, u64 base, u64 size)
|
||||
{
|
||||
unsigned long i;
|
||||
|
||||
for (i = 0; i < rgn->cnt; i++) {
|
||||
u64 rgnbase = rgn->region[i].base;
|
||||
u64 rgnsize = rgn->region[i].size;
|
||||
if ( lmb_addrs_overlap(base,size,rgnbase,rgnsize) ) {
|
||||
if (lmb_addrs_overlap(base, size, rgnbase, rgnsize))
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return (i < rgn->cnt) ? i : -1;
|
||||
}
|
||||
|
@ -349,10 +334,13 @@ u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr)
|
|||
} else
|
||||
continue;
|
||||
|
||||
while ((lmbbase <= base) &&
|
||||
((j = lmb_overlaps_region(&lmb.reserved, base, size)) >= 0) )
|
||||
while (lmbbase <= base) {
|
||||
j = lmb_overlaps_region(&lmb.reserved, base, size);
|
||||
if (j < 0)
|
||||
break;
|
||||
base = lmb_align_down(lmb.reserved.region[j].base - size,
|
||||
align);
|
||||
}
|
||||
|
||||
if ((base != 0) && (lmbbase <= base))
|
||||
break;
|
||||
|
|
Loading…
Reference in New Issue
Block a user