forked from luck/tmp_suning_uos_patched
kmod: reduce atomic operations on kmod_concurrent and simplify
When checking if we want to allow a kmod thread to kick off we increment, then read to see if we should enable a thread. If we were over the allowed limit limit we decrement. Splitting the increment far apart from decrement means there could be a time where two increments happen potentially giving a false failure on a thread which should have been allowed. CPU1 CPU2 atomic_inc() atomic_inc() atomic_read() atomic_read() atomic_dec() atomic_dec() In this case a read on CPU1 gets the atomic_inc()'s and we could negate it from getting a kmod thread. We could try to prevent this with a lock or preemption but that is overkill. We can fix by reducing the number of atomic operations. We do this by inverting the logic of of the enabler, instead of incrementing kmod_concurrent as we get new kmod users, define the variable kmod_concurrent_max as the max number of currently allowed kmod users and as we get new kmod users just decrement it if its still positive. This combines the dec and read in one atomic operation. In this case we no longer get the same false failure: CPU1 CPU2 atomic_dec_if_positive() atomic_dec_if_positive() atomic_inc() atomic_inc() The number of threads is computed at init, and since the current computation of kmod_concurrent includes the thread count we can avoid setting kmod_concurrent_max later in boot through an init call by simply sticking to 50 as the kmod_concurrent_max. The assumption here is a system with modules must at least have ~16 MiB of RAM. Suggested-by: Petr Mladek <pmladek@suse.com> Suggested-by: Dmitry Torokhov <dmitry.torokhov@gmail.com> Signed-off-by: Luis R. Rodriguez <mcgrof@kernel.org> Reviewed-by: Petr Mladek <pmladek@suse.com> Signed-off-by: Jessica Yu <jeyu@kernel.org>
This commit is contained in:
parent
93437353da
commit
165d1cc007
|
@ -45,8 +45,6 @@
|
|||
|
||||
#include <trace/events/module.h>
|
||||
|
||||
extern int max_threads;
|
||||
|
||||
#define CAP_BSET (void *)1
|
||||
#define CAP_PI (void *)2
|
||||
|
||||
|
@ -56,6 +54,20 @@ static DEFINE_SPINLOCK(umh_sysctl_lock);
|
|||
static DECLARE_RWSEM(umhelper_sem);
|
||||
|
||||
#ifdef CONFIG_MODULES
|
||||
/*
|
||||
* Assuming:
|
||||
*
|
||||
* threads = div64_u64((u64) totalram_pages * (u64) PAGE_SIZE,
|
||||
* (u64) THREAD_SIZE * 8UL);
|
||||
*
|
||||
* If you need less than 50 threads would mean we're dealing with systems
|
||||
* smaller than 3200 pages. This assuems you are capable of having ~13M memory,
|
||||
* and this would only be an be an upper limit, after which the OOM killer
|
||||
* would take effect. Systems like these are very unlikely if modules are
|
||||
* enabled.
|
||||
*/
|
||||
#define MAX_KMOD_CONCURRENT 50
|
||||
static atomic_t kmod_concurrent_max = ATOMIC_INIT(MAX_KMOD_CONCURRENT);
|
||||
|
||||
/*
|
||||
modprobe_path is set via /proc/sys.
|
||||
|
@ -127,10 +139,7 @@ int __request_module(bool wait, const char *fmt, ...)
|
|||
{
|
||||
va_list args;
|
||||
char module_name[MODULE_NAME_LEN];
|
||||
unsigned int max_modprobes;
|
||||
int ret;
|
||||
static atomic_t kmod_concurrent = ATOMIC_INIT(0);
|
||||
#define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
|
||||
static int kmod_loop_msg;
|
||||
|
||||
/*
|
||||
|
@ -154,21 +163,7 @@ int __request_module(bool wait, const char *fmt, ...)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* If modprobe needs a service that is in a module, we get a recursive
|
||||
* loop. Limit the number of running kmod threads to max_threads/2 or
|
||||
* MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
|
||||
* would be to run the parents of this process, counting how many times
|
||||
* kmod was invoked. That would mean accessing the internals of the
|
||||
* process tables to get the command line, proc_pid_cmdline is static
|
||||
* and it is not worth changing the proc code just to handle this case.
|
||||
* KAO.
|
||||
*
|
||||
* "trace the ppid" is simple, but will fail if someone's
|
||||
* parent exits. I think this is as good as it gets. --RR
|
||||
*/
|
||||
max_modprobes = min(max_threads/2, MAX_KMOD_CONCURRENT);
|
||||
atomic_inc(&kmod_concurrent);
|
||||
if (atomic_read(&kmod_concurrent) > max_modprobes) {
|
||||
if (atomic_dec_if_positive(&kmod_concurrent_max) < 0) {
|
||||
/* We may be blaming an innocent here, but unlikely */
|
||||
if (kmod_loop_msg < 5) {
|
||||
printk(KERN_ERR
|
||||
|
@ -176,7 +171,6 @@ int __request_module(bool wait, const char *fmt, ...)
|
|||
module_name);
|
||||
kmod_loop_msg++;
|
||||
}
|
||||
atomic_dec(&kmod_concurrent);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -184,10 +178,12 @@ int __request_module(bool wait, const char *fmt, ...)
|
|||
|
||||
ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
|
||||
|
||||
atomic_dec(&kmod_concurrent);
|
||||
atomic_inc(&kmod_concurrent_max);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(__request_module);
|
||||
|
||||
#endif /* CONFIG_MODULES */
|
||||
|
||||
static void call_usermodehelper_freeinfo(struct subprocess_info *info)
|
||||
|
|
Loading…
Reference in New Issue
Block a user