memblock: replace alloc_bootmem with memblock_alloc
The alloc_bootmem(size) is a shortcut for allocation of SMP_CACHE_BYTES aligned memory. When the align parameter of memblock_alloc() is 0, the alignment is implicitly set to SMP_CACHE_BYTES and thus alloc_bootmem(size) and memblock_alloc(size, 0) are equivalent. The conversion is done using the following semantic patch: @@ expression size; @@ - alloc_bootmem(size) + memblock_alloc(size, 0) Link: http://lkml.kernel.org/r/1536927045-23536-22-git-send-email-rppt@linux.vnet.ibm.com Signed-off-by: Mike Rapoport <rppt@linux.vnet.ibm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Chris Zankel <chris@zankel.net> Cc: "David S. Miller" <davem@davemloft.net> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Greentime Hu <green.hu@gmail.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Guan Xuetao <gxt@pku.edu.cn> Cc: Ingo Molnar <mingo@redhat.com> Cc: "James E.J. Bottomley" <jejb@parisc-linux.org> Cc: Jonas Bonn <jonas@southpole.se> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Ley Foon Tan <lftan@altera.com> Cc: Mark Salter <msalter@redhat.com> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Matt Turner <mattst88@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Hocko <mhocko@suse.com> Cc: Michal Simek <monstr@monstr.eu> Cc: Palmer Dabbelt <palmer@sifive.com> Cc: Paul Burton <paul.burton@mips.com> Cc: Richard Kuo <rkuo@codeaurora.org> Cc: Richard Weinberger <richard@nod.at> Cc: Rich Felker <dalias@libc.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Serge Semin <fancer.lancer@gmail.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
4fc4a09e4c
commit
2a5bda5a62
|
@ -82,7 +82,7 @@ mk_resource_name(int pe, int port, char *str)
|
|||
char *name;
|
||||
|
||||
sprintf(tmp, "PCI %s PE %d PORT %d", str, pe, port);
|
||||
name = alloc_bootmem(strlen(tmp) + 1);
|
||||
name = memblock_alloc(strlen(tmp) + 1, 0);
|
||||
strcpy(name, tmp);
|
||||
|
||||
return name;
|
||||
|
@ -117,7 +117,7 @@ alloc_io7(unsigned int pe)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
io7 = alloc_bootmem(sizeof(*io7));
|
||||
io7 = memblock_alloc(sizeof(*io7), 0);
|
||||
io7->pe = pe;
|
||||
raw_spin_lock_init(&io7->irq_lock);
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@ alloc_pci_controller(void)
|
|||
{
|
||||
struct pci_controller *hose;
|
||||
|
||||
hose = alloc_bootmem(sizeof(*hose));
|
||||
hose = memblock_alloc(sizeof(*hose), 0);
|
||||
|
||||
*hose_tail = hose;
|
||||
hose_tail = &hose->next;
|
||||
|
@ -44,7 +44,7 @@ alloc_pci_controller(void)
|
|||
struct resource * __init
|
||||
alloc_resource(void)
|
||||
{
|
||||
return alloc_bootmem(sizeof(struct resource));
|
||||
return memblock_alloc(sizeof(struct resource), 0);
|
||||
}
|
||||
|
||||
SYSCALL_DEFINE3(pciconfig_iobase, long, which, unsigned long, bus,
|
||||
|
|
|
@ -392,7 +392,7 @@ alloc_pci_controller(void)
|
|||
{
|
||||
struct pci_controller *hose;
|
||||
|
||||
hose = alloc_bootmem(sizeof(*hose));
|
||||
hose = memblock_alloc(sizeof(*hose), 0);
|
||||
|
||||
*hose_tail = hose;
|
||||
hose_tail = &hose->next;
|
||||
|
@ -403,7 +403,7 @@ alloc_pci_controller(void)
|
|||
struct resource * __init
|
||||
alloc_resource(void)
|
||||
{
|
||||
return alloc_bootmem(sizeof(struct resource));
|
||||
return memblock_alloc(sizeof(struct resource), 0);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -79,7 +79,7 @@ iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
|
|||
printk("%s: couldn't allocate arena from node %d\n"
|
||||
" falling back to system-wide allocation\n",
|
||||
__func__, nid);
|
||||
arena = alloc_bootmem(sizeof(*arena));
|
||||
arena = memblock_alloc(sizeof(*arena), 0);
|
||||
}
|
||||
|
||||
arena->ptes = memblock_alloc_node(sizeof(*arena), align, nid);
|
||||
|
@ -92,7 +92,7 @@ iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
|
|||
|
||||
#else /* CONFIG_DISCONTIGMEM */
|
||||
|
||||
arena = alloc_bootmem(sizeof(*arena));
|
||||
arena = memblock_alloc(sizeof(*arena), 0);
|
||||
arena->ptes = memblock_alloc_from(mem_size, align, 0);
|
||||
|
||||
#endif /* CONFIG_DISCONTIGMEM */
|
||||
|
|
|
@ -361,9 +361,9 @@ static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES];
|
|||
|
||||
#define IA64_LOG_ALLOCATE(it, size) \
|
||||
{ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = \
|
||||
(ia64_err_rec_t *)alloc_bootmem(size); \
|
||||
(ia64_err_rec_t *)memblock_alloc(size, 0); \
|
||||
ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = \
|
||||
(ia64_err_rec_t *)alloc_bootmem(size);}
|
||||
(ia64_err_rec_t *)memblock_alloc(size, 0);}
|
||||
#define IA64_LOG_LOCK_INIT(it) spin_lock_init(&ia64_state_log[it].isl_lock)
|
||||
#define IA64_LOG_LOCK(it) spin_lock_irqsave(&ia64_state_log[it].isl_lock, s)
|
||||
#define IA64_LOG_UNLOCK(it) spin_unlock_irqrestore(&ia64_state_log[it].isl_lock,s)
|
||||
|
|
|
@ -59,8 +59,8 @@ struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
|
|||
void __init
|
||||
mmu_context_init (void)
|
||||
{
|
||||
ia64_ctx.bitmap = alloc_bootmem((ia64_ctx.max_ctx+1)>>3);
|
||||
ia64_ctx.flushmap = alloc_bootmem((ia64_ctx.max_ctx+1)>>3);
|
||||
ia64_ctx.bitmap = memblock_alloc((ia64_ctx.max_ctx + 1) >> 3, 0);
|
||||
ia64_ctx.flushmap = memblock_alloc((ia64_ctx.max_ctx + 1) >> 3, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -267,7 +267,8 @@ void __init dvma_init(void)
|
|||
|
||||
list_add(&(hole->list), &hole_list);
|
||||
|
||||
iommu_use = alloc_bootmem(IOMMU_TOTAL_ENTRIES * sizeof(unsigned long));
|
||||
iommu_use = memblock_alloc(IOMMU_TOTAL_ENTRIES * sizeof(unsigned long),
|
||||
0);
|
||||
|
||||
dvma_unmap_iommu(DVMA_START, DVMA_SIZE);
|
||||
|
||||
|
|
|
@ -377,7 +377,7 @@ void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask)
|
|||
if (mem_init_done)
|
||||
p = kzalloc(size, mask);
|
||||
else {
|
||||
p = alloc_bootmem(size);
|
||||
p = memblock_alloc(size, 0);
|
||||
if (p)
|
||||
memset(p, 0, size);
|
||||
}
|
||||
|
|
|
@ -917,7 +917,7 @@ static void __init resource_init(void)
|
|||
if (end >= HIGHMEM_START)
|
||||
end = HIGHMEM_START - 1;
|
||||
|
||||
res = alloc_bootmem(sizeof(struct resource));
|
||||
res = memblock_alloc(sizeof(struct resource), 0);
|
||||
|
||||
res->start = start;
|
||||
res->end = end;
|
||||
|
|
|
@ -650,7 +650,7 @@ static int __init eth_setup(char *str)
|
|||
return 1;
|
||||
}
|
||||
|
||||
new = alloc_bootmem(sizeof(*new));
|
||||
new = memblock_alloc(sizeof(*new), 0);
|
||||
|
||||
INIT_LIST_HEAD(&new->list);
|
||||
new->index = n;
|
||||
|
|
|
@ -1580,7 +1580,7 @@ static int __init vector_setup(char *str)
|
|||
str, error);
|
||||
return 1;
|
||||
}
|
||||
new = alloc_bootmem(sizeof(*new));
|
||||
new = memblock_alloc(sizeof(*new), 0);
|
||||
INIT_LIST_HEAD(&new->list);
|
||||
new->unit = n;
|
||||
new->arguments = str;
|
||||
|
|
|
@ -36,7 +36,7 @@ int __init read_initrd(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
area = alloc_bootmem(size);
|
||||
area = memblock_alloc(size, 0);
|
||||
|
||||
if (load_initrd(initrd, area, size) == -1)
|
||||
return 0;
|
||||
|
|
|
@ -933,7 +933,8 @@ static int __init acpi_parse_hpet(struct acpi_table_header *table)
|
|||
* the resource tree during the lateinit timeframe.
|
||||
*/
|
||||
#define HPET_RESOURCE_NAME_SIZE 9
|
||||
hpet_res = alloc_bootmem(sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE);
|
||||
hpet_res = memblock_alloc(sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE,
|
||||
0);
|
||||
|
||||
hpet_res->name = (void *)&hpet_res[1];
|
||||
hpet_res->flags = IORESOURCE_MEM;
|
||||
|
|
|
@ -2578,7 +2578,7 @@ static struct resource * __init ioapic_setup_resources(void)
|
|||
n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource);
|
||||
n *= nr_ioapics;
|
||||
|
||||
mem = alloc_bootmem(n);
|
||||
mem = memblock_alloc(n, 0);
|
||||
res = (void *)mem;
|
||||
|
||||
mem += sizeof(struct resource) * nr_ioapics;
|
||||
|
|
|
@ -1094,7 +1094,7 @@ void __init e820__reserve_resources(void)
|
|||
struct resource *res;
|
||||
u64 end;
|
||||
|
||||
res = alloc_bootmem(sizeof(*res) * e820_table->nr_entries);
|
||||
res = memblock_alloc(sizeof(*res) * e820_table->nr_entries, 0);
|
||||
e820_res = res;
|
||||
|
||||
for (i = 0; i < e820_table->nr_entries; i++) {
|
||||
|
|
|
@ -141,7 +141,7 @@ void * __init prom_early_alloc(unsigned long size)
|
|||
* fast enough on the platforms we care about while minimizing
|
||||
* wasted bootmem) and hand off chunks of it to callers.
|
||||
*/
|
||||
res = alloc_bootmem(chunk_size);
|
||||
res = memblock_alloc(chunk_size, 0);
|
||||
BUG_ON(!res);
|
||||
prom_early_allocated += chunk_size;
|
||||
memset(res, 0, chunk_size);
|
||||
|
|
|
@ -646,7 +646,7 @@ static int __init iss_net_setup(char *str)
|
|||
return 1;
|
||||
}
|
||||
|
||||
new = alloc_bootmem(sizeof(*new));
|
||||
new = memblock_alloc(sizeof(*new), 0);
|
||||
if (new == NULL) {
|
||||
pr_err("Alloc_bootmem failed\n");
|
||||
return 1;
|
||||
|
|
|
@ -493,7 +493,7 @@ int __init smu_init (void)
|
|||
goto fail_np;
|
||||
}
|
||||
|
||||
smu = alloc_bootmem(sizeof(struct smu_device));
|
||||
smu = memblock_alloc(sizeof(struct smu_device), 0);
|
||||
|
||||
spin_lock_init(&smu->lock);
|
||||
INIT_LIST_HEAD(&smu->cmd_list);
|
||||
|
|
|
@ -773,8 +773,8 @@ static int __init initcall_blacklist(char *str)
|
|||
str_entry = strsep(&str, ",");
|
||||
if (str_entry) {
|
||||
pr_debug("blacklisting initcall %s\n", str_entry);
|
||||
entry = alloc_bootmem(sizeof(*entry));
|
||||
entry->buf = alloc_bootmem(strlen(str_entry) + 1);
|
||||
entry = memblock_alloc(sizeof(*entry), 0);
|
||||
entry->buf = memblock_alloc(strlen(str_entry) + 1, 0);
|
||||
strcpy(entry->buf, str_entry);
|
||||
list_add(&entry->next, &blacklisted_initcalls);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue
Block a user