forked from luck/tmp_suning_uos_patched
0804ef4b0d
The problem: An opendir, readdir, closedir sequence can fail to report process ids that are continually in use throughout the sequence of system calls. For this race to trigger the process that proc_pid_readdir stops at must exit before readdir is called again. This can cause ps to fail to report processes, and it is in violation of posix guarantees and normal application expectations with respect to readdir. Currently there is no way to work around this problem in user space short of providing a gargantuan buffer to user space so the directory read all happens in on system call. This patch implements the normal directory semantics for proc, that guarantee that a directory entry that is neither created nor destroyed while reading the directory entry will be returned. For directory that are either created or destroyed during the readdir you may or may not see them. Furthermore you may seek to a directory offset you have previously seen. These are the guarantee that ext[23] provides and that posix requires, and more importantly that user space expects. Plus it is a simple semantic to implement reliable service. It is just a matter of calling readdir a second time if you are wondering if something new has show up. These better semantics are implemented by scanning through the pids in numerical order and by making the file offset a pid plus a fixed offset. The pid scan happens on the pid bitmap, which when you look at it is remarkably efficient for a brute force algorithm. Given that a typical cache line is 64 bytes and thus covers space for 64*8 == 200 pids. There are only 40 cache lines for the entire 32K pid space. A typical system will have 100 pids or more so this is actually fewer cache lines we have to look at to scan a linked list, and the worst case of having to scan the entire pid bitmap is pretty reasonable. If we need something more efficient we can go to a more efficient data structure for indexing the pids, but for now what we have should be sufficient. In addition this takes no additional locks and is actually less code than what we are doing now. Also another very subtle bug in this area has been fixed. It is possible to catch a task in the middle of de_thread where a thread is assuming the thread of it's thread group leader. This patch carefully handles that case so if we hit it we don't fail to return the pid, that is undergoing the de_thread dance. Thanks to KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> for providing the first fix, pointing this out and working on it. [oleg@tv-sign.ru: fix it] Signed-off-by: Eric W. Biederman <ebiederm@xmission.com> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru> Cc: Jean Delvare <jdelvare@suse.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
123 lines
3.6 KiB
C
123 lines
3.6 KiB
C
#ifndef _LINUX_PID_H
|
|
#define _LINUX_PID_H
|
|
|
|
#include <linux/rcupdate.h>
|
|
|
|
enum pid_type
|
|
{
|
|
PIDTYPE_PID,
|
|
PIDTYPE_PGID,
|
|
PIDTYPE_SID,
|
|
PIDTYPE_MAX
|
|
};
|
|
|
|
/*
|
|
* What is struct pid?
|
|
*
|
|
* A struct pid is the kernel's internal notion of a process identifier.
|
|
* It refers to individual tasks, process groups, and sessions. While
|
|
* there are processes attached to it the struct pid lives in a hash
|
|
* table, so it and then the processes that it refers to can be found
|
|
* quickly from the numeric pid value. The attached processes may be
|
|
* quickly accessed by following pointers from struct pid.
|
|
*
|
|
* Storing pid_t values in the kernel and refering to them later has a
|
|
* problem. The process originally with that pid may have exited and the
|
|
* pid allocator wrapped, and another process could have come along
|
|
* and been assigned that pid.
|
|
*
|
|
* Referring to user space processes by holding a reference to struct
|
|
* task_struct has a problem. When the user space process exits
|
|
* the now useless task_struct is still kept. A task_struct plus a
|
|
* stack consumes around 10K of low kernel memory. More precisely
|
|
* this is THREAD_SIZE + sizeof(struct task_struct). By comparison
|
|
* a struct pid is about 64 bytes.
|
|
*
|
|
* Holding a reference to struct pid solves both of these problems.
|
|
* It is small so holding a reference does not consume a lot of
|
|
* resources, and since a new struct pid is allocated when the numeric
|
|
* pid value is reused we don't mistakenly refer to new processes.
|
|
*/
|
|
|
|
struct pid
|
|
{
|
|
atomic_t count;
|
|
/* Try to keep pid_chain in the same cacheline as nr for find_pid */
|
|
int nr;
|
|
struct hlist_node pid_chain;
|
|
/* lists of tasks that use this pid */
|
|
struct hlist_head tasks[PIDTYPE_MAX];
|
|
struct rcu_head rcu;
|
|
};
|
|
|
|
struct pid_link
|
|
{
|
|
struct hlist_node node;
|
|
struct pid *pid;
|
|
};
|
|
|
|
static inline struct pid *get_pid(struct pid *pid)
|
|
{
|
|
if (pid)
|
|
atomic_inc(&pid->count);
|
|
return pid;
|
|
}
|
|
|
|
extern void FASTCALL(put_pid(struct pid *pid));
|
|
extern struct task_struct *FASTCALL(pid_task(struct pid *pid, enum pid_type));
|
|
extern struct task_struct *FASTCALL(get_pid_task(struct pid *pid,
|
|
enum pid_type));
|
|
|
|
/*
|
|
* attach_pid() and detach_pid() must be called with the tasklist_lock
|
|
* write-held.
|
|
*/
|
|
extern int FASTCALL(attach_pid(struct task_struct *task,
|
|
enum pid_type type, int nr));
|
|
|
|
extern void FASTCALL(detach_pid(struct task_struct *task, enum pid_type));
|
|
extern void FASTCALL(transfer_pid(struct task_struct *old,
|
|
struct task_struct *new, enum pid_type));
|
|
|
|
/*
|
|
* look up a PID in the hash table. Must be called with the tasklist_lock
|
|
* or rcu_read_lock() held.
|
|
*/
|
|
extern struct pid *FASTCALL(find_pid(int nr));
|
|
|
|
/*
|
|
* Lookup a PID in the hash table, and return with it's count elevated.
|
|
*/
|
|
extern struct pid *find_get_pid(int nr);
|
|
extern struct pid *find_ge_pid(int nr);
|
|
|
|
extern struct pid *alloc_pid(void);
|
|
extern void FASTCALL(free_pid(struct pid *pid));
|
|
|
|
#define pid_next(task, type) \
|
|
((task)->pids[(type)].node.next)
|
|
|
|
#define pid_next_task(task, type) \
|
|
hlist_entry(pid_next(task, type), struct task_struct, \
|
|
pids[(type)].node)
|
|
|
|
|
|
/* We could use hlist_for_each_entry_rcu here but it takes more arguments
|
|
* than the do_each_task_pid/while_each_task_pid. So we roll our own
|
|
* to preserve the existing interface.
|
|
*/
|
|
#define do_each_task_pid(who, type, task) \
|
|
if ((task = find_task_by_pid_type(type, who))) { \
|
|
prefetch(pid_next(task, type)); \
|
|
do {
|
|
|
|
#define while_each_task_pid(who, type, task) \
|
|
} while (pid_next(task, type) && ({ \
|
|
task = pid_next_task(task, type); \
|
|
rcu_dereference(task); \
|
|
prefetch(pid_next(task, type)); \
|
|
1; }) ); \
|
|
}
|
|
|
|
#endif /* _LINUX_PID_H */
|