forked from luck/tmp_suning_uos_patched
rcu: Disable lockdep checking in RCU list-traversal primitives
The theory is that use of bare rcu_dereference() is more prone to error than use of the RCU list-traversal primitives. Therefore, disable lockdep RCU read-side critical-section checking in these primitives for the time being. Once all of the rcu_dereference() uses have been dealt with, it may be time to re-enable lockdep checking for the RCU list-traversal primitives. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: mathieu.desnoyers@polymtl.ca Cc: josh@joshtriplett.org Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: rostedt@goodmis.org Cc: Valdis.Kletnieks@vt.edu Cc: dhowells@redhat.com LKML-Reference: <1266887105-1528-4-git-send-email-paulmck@linux.vnet.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
0632eb3d75
commit
3120438ad6
|
@ -208,7 +208,7 @@ static inline void list_splice_init_rcu(struct list_head *list,
|
|||
* primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
|
||||
*/
|
||||
#define list_entry_rcu(ptr, type, member) \
|
||||
container_of(rcu_dereference(ptr), type, member)
|
||||
container_of(rcu_dereference_raw(ptr), type, member)
|
||||
|
||||
/**
|
||||
* list_first_entry_rcu - get the first element from a list
|
||||
|
@ -225,9 +225,9 @@ static inline void list_splice_init_rcu(struct list_head *list,
|
|||
list_entry_rcu((ptr)->next, type, member)
|
||||
|
||||
#define __list_for_each_rcu(pos, head) \
|
||||
for (pos = rcu_dereference((head)->next); \
|
||||
for (pos = rcu_dereference_raw((head)->next); \
|
||||
pos != (head); \
|
||||
pos = rcu_dereference(pos->next))
|
||||
pos = rcu_dereference_raw(pos->next))
|
||||
|
||||
/**
|
||||
* list_for_each_entry_rcu - iterate over rcu list of given type
|
||||
|
@ -257,9 +257,9 @@ static inline void list_splice_init_rcu(struct list_head *list,
|
|||
* as long as the traversal is guarded by rcu_read_lock().
|
||||
*/
|
||||
#define list_for_each_continue_rcu(pos, head) \
|
||||
for ((pos) = rcu_dereference((pos)->next); \
|
||||
for ((pos) = rcu_dereference_raw((pos)->next); \
|
||||
prefetch((pos)->next), (pos) != (head); \
|
||||
(pos) = rcu_dereference((pos)->next))
|
||||
(pos) = rcu_dereference_raw((pos)->next))
|
||||
|
||||
/**
|
||||
* list_for_each_entry_continue_rcu - continue iteration over list of given type
|
||||
|
@ -418,10 +418,10 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
|
|||
* as long as the traversal is guarded by rcu_read_lock().
|
||||
*/
|
||||
#define hlist_for_each_entry_rcu(tpos, pos, head, member) \
|
||||
for (pos = rcu_dereference((head)->first); \
|
||||
for (pos = rcu_dereference_raw((head)->first); \
|
||||
pos && ({ prefetch(pos->next); 1; }) && \
|
||||
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \
|
||||
pos = rcu_dereference(pos->next))
|
||||
pos = rcu_dereference_raw(pos->next))
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif
|
||||
|
|
|
@ -101,10 +101,10 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
|
|||
*
|
||||
*/
|
||||
#define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \
|
||||
for (pos = rcu_dereference((head)->first); \
|
||||
for (pos = rcu_dereference_raw((head)->first); \
|
||||
(!is_a_nulls(pos)) && \
|
||||
({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \
|
||||
pos = rcu_dereference(pos->next))
|
||||
pos = rcu_dereference_raw(pos->next))
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
|
Loading…
Reference in New Issue
Block a user