forked from luck/tmp_suning_uos_patched
xfs: remove old vmap cache
Re-apply a commit that had been reverted due to regressions
that have since been fixed.
Original commit: d2859751cd
Author: Nick Piggin <npiggin@suse.de>
Date: Tue, 6 Jan 2009 14:40:44 +1100
XFS's vmap batching simply defers a number (up to 64) of vunmaps,
and keeps track of them in a list. To purge the batch, it just goes
through the list and calls vunamp on each one. This is pretty poor:
a global TLB flush is generally still performed on each vunmap, with
the most expensive parts of the operation being the broadcast IPIs
and locking involved in the SMP callouts, and the locking involved
in the vmap management -- none of these are avoided by just batching
up the calls. I'm actually surprised it ever made much difference.
(Now that the lazy vmap allocator is upstream, this description is
not quite right, but the vunmap batching still doesn't seem to do
much).
Rip all this logic out of XFS completely. I will improve vmap
performance and scalability directly in subsequent patch.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Reviewed-by: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Lachlan McIlroy <lachlan@sgi.com>
The only change I made was to use the "new" xfs_buf_is_vmapped()
function in a place it had been open-coded in the original.
Modified-by: Alex Elder <aelder@sgi.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Alex Elder <aelder@sgi.com>
This commit is contained in:
parent
57d54889cd
commit
cd9640a70d
@ -167,75 +167,6 @@ test_page_region(
|
|||||||
return (mask && (page_private(page) & mask) == mask);
|
return (mask && (page_private(page) & mask) == mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Mapping of multi-page buffers into contiguous virtual space
|
|
||||||
*/
|
|
||||||
|
|
||||||
typedef struct a_list {
|
|
||||||
void *vm_addr;
|
|
||||||
struct a_list *next;
|
|
||||||
} a_list_t;
|
|
||||||
|
|
||||||
static a_list_t *as_free_head;
|
|
||||||
static int as_list_len;
|
|
||||||
static DEFINE_SPINLOCK(as_lock);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Try to batch vunmaps because they are costly.
|
|
||||||
*/
|
|
||||||
STATIC void
|
|
||||||
free_address(
|
|
||||||
void *addr)
|
|
||||||
{
|
|
||||||
a_list_t *aentry;
|
|
||||||
|
|
||||||
#ifdef CONFIG_XEN
|
|
||||||
/*
|
|
||||||
* Xen needs to be able to make sure it can get an exclusive
|
|
||||||
* RO mapping of pages it wants to turn into a pagetable. If
|
|
||||||
* a newly allocated page is also still being vmap()ed by xfs,
|
|
||||||
* it will cause pagetable construction to fail. This is a
|
|
||||||
* quick workaround to always eagerly unmap pages so that Xen
|
|
||||||
* is happy.
|
|
||||||
*/
|
|
||||||
vunmap(addr);
|
|
||||||
return;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
aentry = kmalloc(sizeof(a_list_t), GFP_NOWAIT);
|
|
||||||
if (likely(aentry)) {
|
|
||||||
spin_lock(&as_lock);
|
|
||||||
aentry->next = as_free_head;
|
|
||||||
aentry->vm_addr = addr;
|
|
||||||
as_free_head = aentry;
|
|
||||||
as_list_len++;
|
|
||||||
spin_unlock(&as_lock);
|
|
||||||
} else {
|
|
||||||
vunmap(addr);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
STATIC void
|
|
||||||
purge_addresses(void)
|
|
||||||
{
|
|
||||||
a_list_t *aentry, *old;
|
|
||||||
|
|
||||||
if (as_free_head == NULL)
|
|
||||||
return;
|
|
||||||
|
|
||||||
spin_lock(&as_lock);
|
|
||||||
aentry = as_free_head;
|
|
||||||
as_free_head = NULL;
|
|
||||||
as_list_len = 0;
|
|
||||||
spin_unlock(&as_lock);
|
|
||||||
|
|
||||||
while ((old = aentry) != NULL) {
|
|
||||||
vunmap(aentry->vm_addr);
|
|
||||||
aentry = aentry->next;
|
|
||||||
kfree(old);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Internal xfs_buf_t object manipulation
|
* Internal xfs_buf_t object manipulation
|
||||||
*/
|
*/
|
||||||
@ -337,7 +268,7 @@ xfs_buf_free(
|
|||||||
uint i;
|
uint i;
|
||||||
|
|
||||||
if (xfs_buf_is_vmapped(bp))
|
if (xfs_buf_is_vmapped(bp))
|
||||||
free_address(bp->b_addr - bp->b_offset);
|
vunmap(bp->b_addr - bp->b_offset);
|
||||||
|
|
||||||
for (i = 0; i < bp->b_page_count; i++) {
|
for (i = 0; i < bp->b_page_count; i++) {
|
||||||
struct page *page = bp->b_pages[i];
|
struct page *page = bp->b_pages[i];
|
||||||
@ -457,8 +388,6 @@ _xfs_buf_map_pages(
|
|||||||
bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
|
bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
|
||||||
bp->b_flags |= XBF_MAPPED;
|
bp->b_flags |= XBF_MAPPED;
|
||||||
} else if (flags & XBF_MAPPED) {
|
} else if (flags & XBF_MAPPED) {
|
||||||
if (as_list_len > 64)
|
|
||||||
purge_addresses();
|
|
||||||
bp->b_addr = vmap(bp->b_pages, bp->b_page_count,
|
bp->b_addr = vmap(bp->b_pages, bp->b_page_count,
|
||||||
VM_MAP, PAGE_KERNEL);
|
VM_MAP, PAGE_KERNEL);
|
||||||
if (unlikely(bp->b_addr == NULL))
|
if (unlikely(bp->b_addr == NULL))
|
||||||
@ -1955,9 +1884,6 @@ xfsbufd(
|
|||||||
xfs_buf_iostrategy(bp);
|
xfs_buf_iostrategy(bp);
|
||||||
count++;
|
count++;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (as_list_len > 0)
|
|
||||||
purge_addresses();
|
|
||||||
if (count)
|
if (count)
|
||||||
blk_run_address_space(target->bt_mapping);
|
blk_run_address_space(target->bt_mapping);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user