From: "Paul E. McKenney" This patch uses the rcu_assign_pointer() API to eliminate a number of explicit memory barriers from code using RCU. This has been tested successfully on i386 and ppc64. Signed-off-by: Signed-off-by: Andrew Morton --- 25-akpm/arch/x86_64/kernel/mce.c | 3 +-- 25-akpm/include/linux/list.h | 2 -- 25-akpm/net/core/netfilter.c | 3 +-- 25-akpm/net/decnet/dn_route.c | 13 +++++-------- 25-akpm/net/ipv4/devinet.c | 3 +-- 25-akpm/net/ipv4/route.c | 7 +++---- 25-akpm/net/sched/sch_api.c | 3 +-- 7 files changed, 12 insertions(+), 22 deletions(-) diff -puN arch/x86_64/kernel/mce.c~rcu-use-rcu_assign_pointer arch/x86_64/kernel/mce.c --- 25/arch/x86_64/kernel/mce.c~rcu-use-rcu_assign_pointer 2004-10-24 03:23:46.314371208 -0700 +++ 25-akpm/arch/x86_64/kernel/mce.c 2004-10-24 03:23:46.327369232 -0700 @@ -361,8 +361,7 @@ static ssize_t mce_read(struct file *fil memset(mcelog.entry, 0, next * sizeof(struct mce)); mcelog.next = 0; - smp_wmb(); - + synchronize_kernel(); /* Collect entries that were still getting written before the synchronize. */ diff -puN include/linux/list.h~rcu-use-rcu_assign_pointer include/linux/list.h --- 25/include/linux/list.h~rcu-use-rcu_assign_pointer 2004-10-24 03:23:46.315371056 -0700 +++ 25-akpm/include/linux/list.h 2004-10-24 03:23:46.328369080 -0700 @@ -571,8 +571,6 @@ static inline void hlist_del_init(struct } } -#define hlist_del_rcu_init hlist_del_init - static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) { struct hlist_node *first = h->first; diff -puN net/core/netfilter.c~rcu-use-rcu_assign_pointer net/core/netfilter.c --- 25/net/core/netfilter.c~rcu-use-rcu_assign_pointer 2004-10-24 03:23:46.317370752 -0700 +++ 25-akpm/net/core/netfilter.c 2004-10-24 03:23:46.329368928 -0700 @@ -751,10 +751,9 @@ int nf_log_register(int pf, nf_logfn *lo /* Any setup of logging members must be done before * substituting pointer. */ - smp_wmb(); spin_lock(&nf_log_lock); if (!nf_logging[pf]) { - nf_logging[pf] = logfn; + rcu_assign_pointer(nf_logging[pf], logfn); ret = 0; } spin_unlock(&nf_log_lock); diff -puN net/decnet/dn_route.c~rcu-use-rcu_assign_pointer net/decnet/dn_route.c --- 25/net/decnet/dn_route.c~rcu-use-rcu_assign_pointer 2004-10-24 03:23:46.319370448 -0700 +++ 25-akpm/net/decnet/dn_route.c 2004-10-24 03:23:46.331368624 -0700 @@ -287,10 +287,9 @@ static int dn_insert_route(struct dn_rou if (compare_keys(&rth->fl, &rt->fl)) { /* Put it first */ *rthp = rth->u.rt_next; - smp_wmb(); - rth->u.rt_next = dn_rt_hash_table[hash].chain; - smp_wmb(); - dn_rt_hash_table[hash].chain = rth; + rcu_assign_pointer(rth->u.rt_next, + dn_rt_hash_table[hash].chain); + rcu_assign_pointer(dn_rt_hash_table[hash].chain, rth); rth->u.dst.__use++; dst_hold(&rth->u.dst); @@ -304,10 +303,8 @@ static int dn_insert_route(struct dn_rou rthp = &rth->u.rt_next; } - smp_wmb(); - rt->u.rt_next = dn_rt_hash_table[hash].chain; - smp_wmb(); - dn_rt_hash_table[hash].chain = rt; + rcu_assign_pointer(rt->u.rt_next, dn_rt_hash_table[hash].chain); + rcu_assign_pointer(dn_rt_hash_table[hash].chain, rt); dst_hold(&rt->u.dst); rt->u.dst.__use++; diff -puN net/ipv4/devinet.c~rcu-use-rcu_assign_pointer net/ipv4/devinet.c --- 25/net/ipv4/devinet.c~rcu-use-rcu_assign_pointer 2004-10-24 03:23:46.320370296 -0700 +++ 25-akpm/net/ipv4/devinet.c 2004-10-24 03:23:46.332368472 -0700 @@ -158,8 +158,7 @@ struct in_device *inetdev_init(struct ne /* Account for reference dev->ip_ptr */ in_dev_hold(in_dev); - smp_wmb(); - dev->ip_ptr = in_dev; + rcu_assign_pointer(dev->ip_ptr, in_dev); #ifdef CONFIG_SYSCTL devinet_sysctl_register(in_dev, &in_dev->cnf); diff -puN net/ipv4/route.c~rcu-use-rcu_assign_pointer net/ipv4/route.c --- 25/net/ipv4/route.c~rcu-use-rcu_assign_pointer 2004-10-24 03:23:46.322369992 -0700 +++ 25-akpm/net/ipv4/route.c 2004-10-24 03:23:46.334368168 -0700 @@ -801,14 +801,13 @@ restart: * must be visible to another weakly ordered CPU before * the insertion at the start of the hash chain. */ - smp_wmb(); - rth->u.rt_next = rt_hash_table[hash].chain; + rcu_assign_pointer(rth->u.rt_next, + rt_hash_table[hash].chain); /* * Since lookup is lockfree, the update writes * must be ordered for consistency on SMP. */ - smp_wmb(); - rt_hash_table[hash].chain = rth; + rcu_assign_pointer(rt_hash_table[hash].chain, rth); rth->u.dst.__use++; dst_hold(&rth->u.dst); diff -puN net/sched/sch_api.c~rcu-use-rcu_assign_pointer net/sched/sch_api.c --- 25/net/sched/sch_api.c~rcu-use-rcu_assign_pointer 2004-10-24 03:23:46.324369688 -0700 +++ 25-akpm/net/sched/sch_api.c 2004-10-24 03:23:46.335368016 -0700 @@ -453,10 +453,9 @@ qdisc_create(struct net_device *dev, u32 /* enqueue is accessed locklessly - make sure it's visible * before we set a netdevice's qdisc pointer to sch */ - smp_wmb(); if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS-1])) == 0) { qdisc_lock_tree(dev); - list_add_tail(&sch->list, &dev->qdisc_list); + list_add_tail_rcu(&sch->list, &dev->qdisc_list); qdisc_unlock_tree(dev); #ifdef CONFIG_NET_ESTIMATOR _