forked from luck/tmp_suning_uos_patched
Rename .data.cacheline_aligned to .data..cacheline_aligned.
Signed-off-by: Tim Abbott <tabbott@ksplice.com> Cc: Sam Ravnborg <sam@ravnborg.org> Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com> Signed-off-by: Michal Marek <mmarek@suse.cz>
This commit is contained in:
parent
bc75cc6b56
commit
4af57b787b
|
@ -231,7 +231,7 @@ SECTIONS
|
|||
PAGE_ALIGNED_DATA(PAGE_SIZE)
|
||||
}
|
||||
|
||||
.data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) {
|
||||
.data..cacheline_aligned : AT(ADDR(.data..cacheline_aligned) - LOAD_OFFSET) {
|
||||
CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
|
||||
}
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@ EXPORT_SYMBOL(init_task);
|
|||
/*
|
||||
* per-CPU TSS segments. Threads are completely 'soft' on Linux,
|
||||
* no more per-task TSS's. The TSS size is kept cacheline-aligned
|
||||
* so they are allowed to end up in the .data.cacheline_aligned
|
||||
* so they are allowed to end up in the .data..cacheline_aligned
|
||||
* section. Since TSS's are completely CPU-local, we want them
|
||||
* on exact cacheline boundaries, to eliminate cacheline ping-pong.
|
||||
*/
|
||||
|
|
|
@ -189,7 +189,7 @@
|
|||
|
||||
#define CACHELINE_ALIGNED_DATA(align) \
|
||||
. = ALIGN(align); \
|
||||
*(.data.cacheline_aligned)
|
||||
*(.data..cacheline_aligned)
|
||||
|
||||
#define INIT_TASK_DATA(align) \
|
||||
. = ALIGN(align); \
|
||||
|
|
|
@ -31,7 +31,7 @@
|
|||
#ifndef __cacheline_aligned
|
||||
#define __cacheline_aligned \
|
||||
__attribute__((__aligned__(SMP_CACHE_BYTES), \
|
||||
__section__(".data.cacheline_aligned")))
|
||||
__section__(".data..cacheline_aligned")))
|
||||
#endif /* __cacheline_aligned */
|
||||
|
||||
#ifndef __cacheline_aligned_in_smp
|
||||
|
|
Loading…
Reference in New Issue
Block a user