diff --git a/lib/maple_tree.c b/lib/maple_tree.c index 250b4c67fac8..4976522e3e48 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -5913,7 +5913,7 @@ int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries) /* Internal nodes */ nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap); /* Add working room for split (2 nodes) + new parents */ - mas_node_count(mas, nr_nodes + 3); + mas_node_count_gfp(mas, nr_nodes + 3, GFP_KERNEL); /* Detect if allocations run out */ mas->mas_flags |= MA_STATE_PREALLOC; diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c index fad668042f3e..ab9d4461abc9 100644 --- a/lib/test_maple_tree.c +++ b/lib/test_maple_tree.c @@ -9,6 +9,7 @@ #include #include +#include #define MTREE_ALLOC_MAX 0x2000000000000Ul #ifndef CONFIG_DEBUG_MAPLE_TREE @@ -1678,17 +1679,21 @@ static noinline void __init check_forking(struct maple_tree *mt) void *val; MA_STATE(mas, mt, 0, 0); MA_STATE(newmas, mt, 0, 0); + struct rw_semaphore newmt_lock; + + init_rwsem(&newmt_lock); for (i = 0; i <= nr_entries; i++) mtree_store_range(mt, i*10, i*10 + 5, xa_mk_value(i), GFP_KERNEL); mt_set_non_kernel(99999); - mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE); + mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN); + mt_set_external_lock(&newmt, &newmt_lock); newmas.tree = &newmt; mas_reset(&newmas); mas_reset(&mas); - mas_lock(&newmas); + down_write(&newmt_lock); mas.index = 0; mas.last = 0; if (mas_expected_entries(&newmas, nr_entries)) { @@ -1703,10 +1708,10 @@ static noinline void __init check_forking(struct maple_tree *mt) } rcu_read_unlock(); mas_destroy(&newmas); - mas_unlock(&newmas); mt_validate(&newmt); mt_set_non_kernel(0); - mtree_destroy(&newmt); + __mt_destroy(&newmt); + up_write(&newmt_lock); } static noinline void __init check_iteration(struct maple_tree *mt) @@ -1818,6 +1823,10 @@ static noinline void __init bench_forking(struct maple_tree *mt) void *val; MA_STATE(mas, mt, 0, 0); MA_STATE(newmas, mt, 0, 0); + struct rw_semaphore newmt_lock; + + init_rwsem(&newmt_lock); + mt_set_external_lock(&newmt, &newmt_lock); for (i = 0; i <= nr_entries; i++) mtree_store_range(mt, i*10, i*10 + 5, @@ -1832,7 +1841,7 @@ static noinline void __init bench_forking(struct maple_tree *mt) mas.index = 0; mas.last = 0; rcu_read_lock(); - mas_lock(&newmas); + down_write(&newmt_lock); if (mas_expected_entries(&newmas, nr_entries)) { printk("OOM!"); BUG_ON(1); @@ -1843,11 +1852,11 @@ static noinline void __init bench_forking(struct maple_tree *mt) mas_store(&newmas, val); } mas_destroy(&newmas); - mas_unlock(&newmas); rcu_read_unlock(); mt_validate(&newmt); mt_set_non_kernel(0); - mtree_destroy(&newmt); + __mt_destroy(&newmt); + up_write(&newmt_lock); } } #endif @@ -2453,6 +2462,10 @@ static noinline void __init check_dup_gaps(struct maple_tree *mt, void *tmp; MA_STATE(mas, mt, 0, 0); MA_STATE(newmas, &newmt, 0, 0); + struct rw_semaphore newmt_lock; + + init_rwsem(&newmt_lock); + mt_set_external_lock(&newmt, &newmt_lock); if (!zero_start) i = 1; @@ -2462,9 +2475,9 @@ static noinline void __init check_dup_gaps(struct maple_tree *mt, mtree_store_range(mt, i*10, (i+1)*10 - gap, xa_mk_value(i), GFP_KERNEL); - mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE); + mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN); mt_set_non_kernel(99999); - mas_lock(&newmas); + down_write(&newmt_lock); ret = mas_expected_entries(&newmas, nr_entries); mt_set_non_kernel(0); MT_BUG_ON(mt, ret != 0); @@ -2477,9 +2490,9 @@ static noinline void __init check_dup_gaps(struct maple_tree *mt, } rcu_read_unlock(); mas_destroy(&newmas); - mas_unlock(&newmas); - mtree_destroy(&newmt); + __mt_destroy(&newmt); + up_write(&newmt_lock); } /* Duplicate many sizes of trees. Mainly to test expected entry values */ diff --git a/tools/include/linux/rwsem.h b/tools/include/linux/rwsem.h new file mode 100644 index 000000000000..83971b3cbfce --- /dev/null +++ b/tools/include/linux/rwsem.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +#ifndef _TOOLS__RWSEM_H +#define _TOOLS__RWSEM_H + +#include + +struct rw_semaphore { + pthread_rwlock_t lock; +}; + +static inline int init_rwsem(struct rw_semaphore *sem) +{ + return pthread_rwlock_init(&sem->lock, NULL); +} + +static inline int exit_rwsem(struct rw_semaphore *sem) +{ + return pthread_rwlock_destroy(&sem->lock); +} + +static inline int down_read(struct rw_semaphore *sem) +{ + return pthread_rwlock_rdlock(&sem->lock); +} + +static inline int up_read(struct rw_semaphore *sem) +{ + return pthread_rwlock_unlock(&sem->lock); +} + +static inline int down_write(struct rw_semaphore *sem) +{ + return pthread_rwlock_wrlock(&sem->lock); +} + +static inline int up_write(struct rw_semaphore *sem) +{ + return pthread_rwlock_unlock(&sem->lock); +} +#endif /* _TOOLS_RWSEM_H */