forked from luck/tmp_suning_uos_patched
XArray updates for 5.9
- Fix the test suite after introduction of the local_lock - Fix a bug in the IDA spotted by Coverity - Change the API that allows the workingset code to delete a node - Fix xas_reload() when dealing with entries that occupy multiple indices - Add a few more tests to the test suite - Fix an unsigned int being shifted into an unsigned long -----BEGIN PGP SIGNATURE----- iQEzBAABCgAdFiEEejHryeLBw/spnjHrDpNsjXcpgj4FAl+OzzAACgkQDpNsjXcp gj5YFgf/cV99dyPaal7AfMwhVwFcuVjIRH4S/VeOHkjS2QT1lpu3ffqfKALVR8vU 3IObM3oDCmLk0mYz9O+V/udVJoBYWiduI0LZhR6+V5ZrDjbw/d4VdCbwOplpeF5x rntyI9r8f5d4LxBJ/moLjsosc1KfCzyVnV389eZRvZ8Muxuyc73WdAwZZZfD79nY 66gScEXQokU99zqJJ1nWfh05XTcTsKF25fVBGMLZTUBAytoFyPuC/kO2z8Uq9lEi Ug6gDClskSB7A2W5gvprMcoUAVYcHfTb0wqJD5/MhkHyoTdcWdW8Re0kssXvD86V KwlBdYQ/JuskgY/hbynZ/FP3p8+t1Q== =12E/ -----END PGP SIGNATURE----- Merge tag 'xarray-5.9' of git://git.infradead.org/users/willy/xarray Pull XArray updates from Matthew Wilcox: - Fix the test suite after introduction of the local_lock - Fix a bug in the IDA spotted by Coverity - Change the API that allows the workingset code to delete a node - Fix xas_reload() when dealing with entries that occupy multiple indices - Add a few more tests to the test suite - Fix an unsigned int being shifted into an unsigned long * tag 'xarray-5.9' of git://git.infradead.org/users/willy/xarray: XArray: Fix xas_create_range for ranges above 4 billion radix-tree: fix the comment of radix_tree_next_slot() XArray: Fix xas_reload for multi-index entries XArray: Add private interface for workingset node deletion XArray: Fix xas_for_each_conflict documentation XArray: Test marked multiorder iterations XArray: Test two more things about xa_cmpxchg ida: Free allocated bitmap in error path radix tree test suite: Fix compilation
This commit is contained in:
commit
c4d6fe7311
|
@ -11,6 +11,7 @@
|
|||
#include <linux/bitops.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/preempt.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
@ -376,7 +377,7 @@ radix_tree_chunk_size(struct radix_tree_iter *iter)
|
|||
* radix_tree_next_slot - find next slot in chunk
|
||||
*
|
||||
* @slot: pointer to current slot
|
||||
* @iter: pointer to interator state
|
||||
* @iter: pointer to iterator state
|
||||
* @flags: RADIX_TREE_ITER_*, should be constant
|
||||
* Returns: pointer to next slot, or NULL if there no more left
|
||||
*
|
||||
|
|
|
@ -1286,6 +1286,8 @@ static inline bool xa_is_advanced(const void *entry)
|
|||
*/
|
||||
typedef void (*xa_update_node_t)(struct xa_node *node);
|
||||
|
||||
void xa_delete_node(struct xa_node *, xa_update_node_t);
|
||||
|
||||
/*
|
||||
* The xa_state is opaque to its users. It contains various different pieces
|
||||
* of state involved in the current operation on the XArray. It should be
|
||||
|
@ -1544,10 +1546,21 @@ static inline void xas_split_alloc(struct xa_state *xas, void *entry,
|
|||
static inline void *xas_reload(struct xa_state *xas)
|
||||
{
|
||||
struct xa_node *node = xas->xa_node;
|
||||
void *entry;
|
||||
char offset;
|
||||
|
||||
if (node)
|
||||
return xa_entry(xas->xa, node, xas->xa_offset);
|
||||
if (!node)
|
||||
return xa_head(xas->xa);
|
||||
if (IS_ENABLED(CONFIG_XARRAY_MULTI)) {
|
||||
offset = (xas->xa_index >> node->shift) & XA_CHUNK_MASK;
|
||||
entry = xa_entry(xas->xa, node, offset);
|
||||
if (!xa_is_sibling(entry))
|
||||
return entry;
|
||||
offset = xa_to_sibling(entry);
|
||||
} else {
|
||||
offset = xas->xa_offset;
|
||||
}
|
||||
return xa_entry(xas->xa, node, offset);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1736,13 +1749,12 @@ enum {
|
|||
* @xas: XArray operation state.
|
||||
* @entry: Entry retrieved from the array.
|
||||
*
|
||||
* The loop body will be executed for each entry in the XArray that lies
|
||||
* within the range specified by @xas. If the loop completes successfully,
|
||||
* any entries that lie in this range will be replaced by @entry. The caller
|
||||
* may break out of the loop; if they do so, the contents of the XArray will
|
||||
* be unchanged. The operation may fail due to an out of memory condition.
|
||||
* The caller may also call xa_set_err() to exit the loop while setting an
|
||||
* error to record the reason.
|
||||
* The loop body will be executed for each entry in the XArray that
|
||||
* lies within the range specified by @xas. If the loop terminates
|
||||
* normally, @entry will be %NULL. The user may break out of the loop,
|
||||
* which will leave @entry set to the conflicting entry. The caller
|
||||
* may also call xa_set_err() to exit the loop while setting an error
|
||||
* to record the reason.
|
||||
*/
|
||||
#define xas_for_each_conflict(xas, entry) \
|
||||
while ((entry = xas_find_conflict(xas)))
|
||||
|
|
|
@ -471,6 +471,7 @@ int ida_alloc_range(struct ida *ida, unsigned int min, unsigned int max,
|
|||
goto retry;
|
||||
nospc:
|
||||
xas_unlock_irqrestore(&xas, flags);
|
||||
kfree(alloc);
|
||||
return -ENOSPC;
|
||||
}
|
||||
EXPORT_SYMBOL(ida_alloc_range);
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/kmemleak.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/local_lock.h>
|
||||
#include <linux/preempt.h> /* in_interrupt() */
|
||||
#include <linux/radix-tree.h>
|
||||
#include <linux/rcupdate.h>
|
||||
|
|
|
@ -289,6 +289,27 @@ static noinline void check_xa_mark_2(struct xarray *xa)
|
|||
xa_destroy(xa);
|
||||
}
|
||||
|
||||
static noinline void check_xa_mark_3(struct xarray *xa)
|
||||
{
|
||||
#ifdef CONFIG_XARRAY_MULTI
|
||||
XA_STATE(xas, xa, 0x41);
|
||||
void *entry;
|
||||
int count = 0;
|
||||
|
||||
xa_store_order(xa, 0x40, 2, xa_mk_index(0x40), GFP_KERNEL);
|
||||
xa_set_mark(xa, 0x41, XA_MARK_0);
|
||||
|
||||
rcu_read_lock();
|
||||
xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) {
|
||||
count++;
|
||||
XA_BUG_ON(xa, entry != xa_mk_index(0x40));
|
||||
}
|
||||
XA_BUG_ON(xa, count != 1);
|
||||
rcu_read_unlock();
|
||||
xa_destroy(xa);
|
||||
#endif
|
||||
}
|
||||
|
||||
static noinline void check_xa_mark(struct xarray *xa)
|
||||
{
|
||||
unsigned long index;
|
||||
|
@ -297,6 +318,7 @@ static noinline void check_xa_mark(struct xarray *xa)
|
|||
check_xa_mark_1(xa, index);
|
||||
|
||||
check_xa_mark_2(xa);
|
||||
check_xa_mark_3(xa);
|
||||
}
|
||||
|
||||
static noinline void check_xa_shrink(struct xarray *xa)
|
||||
|
@ -393,6 +415,9 @@ static noinline void check_cmpxchg(struct xarray *xa)
|
|||
XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, FIVE, LOTS, GFP_KERNEL) != FIVE);
|
||||
XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != NULL);
|
||||
XA_BUG_ON(xa, xa_cmpxchg(xa, 5, NULL, FIVE, GFP_KERNEL) != NULL);
|
||||
XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) != -EBUSY);
|
||||
XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != FIVE);
|
||||
XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) == -EBUSY);
|
||||
xa_erase_index(xa, 12345678);
|
||||
xa_erase_index(xa, 5);
|
||||
XA_BUG_ON(xa, !xa_empty(xa));
|
||||
|
@ -1618,14 +1643,9 @@ static noinline void shadow_remove(struct xarray *xa)
|
|||
xa_lock(xa);
|
||||
while ((node = list_first_entry_or_null(&shadow_nodes,
|
||||
struct xa_node, private_list))) {
|
||||
XA_STATE(xas, node->array, 0);
|
||||
XA_BUG_ON(xa, node->array != xa);
|
||||
list_del_init(&node->private_list);
|
||||
xas.xa_node = xa_parent_locked(node->array, node);
|
||||
xas.xa_offset = node->offset;
|
||||
xas.xa_shift = node->shift + XA_CHUNK_SHIFT;
|
||||
xas_set_update(&xas, test_update_node);
|
||||
xas_store(&xas, NULL);
|
||||
xa_delete_node(node, test_update_node);
|
||||
}
|
||||
xa_unlock(xa);
|
||||
}
|
||||
|
|
25
lib/xarray.c
25
lib/xarray.c
|
@ -706,7 +706,7 @@ void xas_create_range(struct xa_state *xas)
|
|||
unsigned char shift = xas->xa_shift;
|
||||
unsigned char sibs = xas->xa_sibs;
|
||||
|
||||
xas->xa_index |= ((sibs + 1) << shift) - 1;
|
||||
xas->xa_index |= ((sibs + 1UL) << shift) - 1;
|
||||
if (xas_is_node(xas) && xas->xa_node->shift == xas->xa_shift)
|
||||
xas->xa_offset |= sibs;
|
||||
xas->xa_shift = 0;
|
||||
|
@ -2163,6 +2163,29 @@ unsigned int xa_extract(struct xarray *xa, void **dst, unsigned long start,
|
|||
}
|
||||
EXPORT_SYMBOL(xa_extract);
|
||||
|
||||
/**
|
||||
* xa_delete_node() - Private interface for workingset code.
|
||||
* @node: Node to be removed from the tree.
|
||||
* @update: Function to call to update ancestor nodes.
|
||||
*
|
||||
* Context: xa_lock must be held on entry and will not be released.
|
||||
*/
|
||||
void xa_delete_node(struct xa_node *node, xa_update_node_t update)
|
||||
{
|
||||
struct xa_state xas = {
|
||||
.xa = node->array,
|
||||
.xa_index = (unsigned long)node->offset <<
|
||||
(node->shift + XA_CHUNK_SHIFT),
|
||||
.xa_shift = node->shift + XA_CHUNK_SHIFT,
|
||||
.xa_offset = node->offset,
|
||||
.xa_node = xa_parent_locked(node->array, node),
|
||||
.xa_update = update,
|
||||
};
|
||||
|
||||
xas_store(&xas, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xa_delete_node); /* For the benefit of the test suite */
|
||||
|
||||
/**
|
||||
* xa_destroy() - Free all internal data structures.
|
||||
* @xa: XArray.
|
||||
|
|
|
@ -519,12 +519,11 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
|
|||
void *arg) __must_hold(lru_lock)
|
||||
{
|
||||
struct xa_node *node = container_of(item, struct xa_node, private_list);
|
||||
XA_STATE(xas, node->array, 0);
|
||||
struct address_space *mapping;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Page cache insertions and deletions synchroneously maintain
|
||||
* Page cache insertions and deletions synchronously maintain
|
||||
* the shadow node LRU under the i_pages lock and the
|
||||
* lru_lock. Because the page cache tree is emptied before
|
||||
* the inode can be destroyed, holding the lru_lock pins any
|
||||
|
@ -559,15 +558,7 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
|
|||
if (WARN_ON_ONCE(node->count != node->nr_values))
|
||||
goto out_invalid;
|
||||
mapping->nrexceptional -= node->nr_values;
|
||||
xas.xa_node = xa_parent_locked(&mapping->i_pages, node);
|
||||
xas.xa_offset = node->offset;
|
||||
xas.xa_shift = node->shift + XA_CHUNK_SHIFT;
|
||||
xas_set_update(&xas, workingset_update_node);
|
||||
/*
|
||||
* We could store a shadow entry here which was the minimum of the
|
||||
* shadow entries we were tracking ...
|
||||
*/
|
||||
xas_store(&xas, NULL);
|
||||
xa_delete_node(node, workingset_update_node);
|
||||
__inc_lruvec_slab_state(node, WORKINGSET_NODERECLAIM);
|
||||
|
||||
out_invalid:
|
||||
|
|
|
@ -523,8 +523,27 @@ static void *ida_random_fn(void *arg)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static void *ida_leak_fn(void *arg)
|
||||
{
|
||||
struct ida *ida = arg;
|
||||
time_t s = time(NULL);
|
||||
int i, ret;
|
||||
|
||||
rcu_register_thread();
|
||||
|
||||
do for (i = 0; i < 1000; i++) {
|
||||
ret = ida_alloc_range(ida, 128, 128, GFP_KERNEL);
|
||||
if (ret >= 0)
|
||||
ida_free(ida, 128);
|
||||
} while (time(NULL) < s + 2);
|
||||
|
||||
rcu_unregister_thread();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void ida_thread_tests(void)
|
||||
{
|
||||
DEFINE_IDA(ida);
|
||||
pthread_t threads[20];
|
||||
int i;
|
||||
|
||||
|
@ -536,6 +555,16 @@ void ida_thread_tests(void)
|
|||
|
||||
while (i--)
|
||||
pthread_join(threads[i], NULL);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(threads); i++)
|
||||
if (pthread_create(&threads[i], NULL, ida_leak_fn, &ida)) {
|
||||
perror("creating ida thread");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
while (i--)
|
||||
pthread_join(threads[i], NULL);
|
||||
assert(ida_is_empty(&ida));
|
||||
}
|
||||
|
||||
void ida_tests(void)
|
||||
|
|
|
@ -22,4 +22,5 @@
|
|||
#define __releases(x)
|
||||
#define __must_hold(x)
|
||||
|
||||
#define EXPORT_PER_CPU_SYMBOL_GPL(x)
|
||||
#endif /* _KERNEL_H */
|
||||
|
|
8
tools/testing/radix-tree/linux/local_lock.h
Normal file
8
tools/testing/radix-tree/linux/local_lock.h
Normal file
|
@ -0,0 +1,8 @@
|
|||
#ifndef _LINUX_LOCAL_LOCK
|
||||
#define _LINUX_LOCAL_LOCK
|
||||
typedef struct { } local_lock_t;
|
||||
|
||||
static inline void local_lock(local_lock_t *lock) { }
|
||||
static inline void local_unlock(local_lock_t *lock) { }
|
||||
#define INIT_LOCAL_LOCK(x) { }
|
||||
#endif
|
|
@ -56,8 +56,4 @@ int root_tag_get(struct radix_tree_root *root, unsigned int tag);
|
|||
unsigned long node_maxindex(struct radix_tree_node *);
|
||||
unsigned long shift_maxindex(unsigned int shift);
|
||||
int radix_tree_cpu_dead(unsigned int cpu);
|
||||
struct radix_tree_preload {
|
||||
unsigned nr;
|
||||
struct radix_tree_node *nodes;
|
||||
};
|
||||
extern struct radix_tree_preload radix_tree_preloads;
|
||||
|
|
Loading…
Reference in New Issue
Block a user