From: To make spinlock/rwlock initialization consistent all over the kernel, this patch converts explicit lock-initializers into spin_lock_init() and rwlock_init() calls. Currently, spinlocks and rwlocks are initialized in two different ways: lock = SPIN_LOCK_UNLOCKED spin_lock_init(&lock) rwlock = RW_LOCK_UNLOCKED rwlock_init(&rwlock) this patch converts all explicit lock initializations to spin_lock_init() or rwlock_init(). (Besides consistency this also helps automatic lock validators and debugging code.) The conversion was done with a script, it was verified manually and it was reviewed, compiled and tested as far as possible on x86, ARM, PPC. There is no runtime overhead or actual code change resulting out of this patch, because spin_lock_init() and rwlock_init() are macros and are thus equivalent to the explicit initialization method. That's the second batch of the unifying patches. Signed-off-by: Thomas Gleixner Acked-by: Ingo Molnar Signed-off-by: Andrew Morton --- 25-akpm/net/atm/mpc.c | 4 ++-- 25-akpm/net/bridge/br_if.c | 4 ++-- 25-akpm/net/bridge/netfilter/ebtables.c | 2 +- 25-akpm/net/core/neighbour.c | 6 +++--- 25-akpm/net/core/sock.c | 4 ++-- 25-akpm/net/decnet/dn_route.c | 2 +- 25-akpm/net/ipv4/igmp.c | 4 ++-- 25-akpm/net/ipv4/ip_fragment.c | 2 +- 25-akpm/net/ipv4/ipvs/ip_vs_conn.c | 4 ++-- 25-akpm/net/ipv4/ipvs/ip_vs_ctl.c | 10 +++++----- 25-akpm/net/ipv4/ipvs/ip_vs_lblc.c | 2 +- 25-akpm/net/ipv4/ipvs/ip_vs_lblcr.c | 4 ++-- 25-akpm/net/ipv4/netfilter/arp_tables.c | 2 +- 25-akpm/net/ipv4/netfilter/ip_tables.c | 2 +- 25-akpm/net/ipv4/netfilter/ipt_hashlimit.c | 2 +- 25-akpm/net/ipv4/netfilter/ipt_recent.c | 2 +- 25-akpm/net/ipv4/route.c | 2 +- 25-akpm/net/ipv4/tcp.c | 6 +++--- 25-akpm/net/ipv4/tcp_minisocks.c | 4 ++-- 25-akpm/net/ipv6/addrconf.c | 2 +- 25-akpm/net/ipv6/anycast.c | 2 +- 25-akpm/net/ipv6/mcast.c | 6 +++--- 25-akpm/net/ipv6/netfilter/ip6_tables.c | 2 +- 25-akpm/net/ipv6/reassembly.c | 2 +- 25-akpm/net/netrom/nr_route.c | 2 +- 25-akpm/net/sctp/associola.c | 2 +- 25-akpm/net/sctp/endpointola.c | 2 +- 25-akpm/net/sctp/protocol.c | 10 +++++----- 25-akpm/net/xfrm/xfrm_policy.c | 2 +- 25-akpm/net/xfrm/xfrm_state.c | 2 +- 30 files changed, 51 insertions(+), 51 deletions(-) diff -puN net/atm/mpc.c~lock-initializer-unifying-batch-2-networking net/atm/mpc.c --- 25/net/atm/mpc.c~lock-initializer-unifying-batch-2-networking Thu Oct 28 15:55:37 2004 +++ 25-akpm/net/atm/mpc.c Thu Oct 28 15:55:37 2004 @@ -267,8 +267,8 @@ static struct mpoa_client *alloc_mpc(voi if (mpc == NULL) return NULL; memset(mpc, 0, sizeof(struct mpoa_client)); - mpc->ingress_lock = RW_LOCK_UNLOCKED; - mpc->egress_lock = RW_LOCK_UNLOCKED; + rwlock_init(&mpc->ingress_lock); + rwlock_init(&mpc->egress_lock); mpc->next = mpcs; atm_mpoa_init_cache(mpc); diff -puN net/bridge/br_if.c~lock-initializer-unifying-batch-2-networking net/bridge/br_if.c --- 25/net/bridge/br_if.c~lock-initializer-unifying-batch-2-networking Thu Oct 28 15:55:37 2004 +++ 25-akpm/net/bridge/br_if.c Thu Oct 28 15:55:37 2004 @@ -147,9 +147,9 @@ static struct net_device *new_bridge_dev br = netdev_priv(dev); br->dev = dev; - br->lock = SPIN_LOCK_UNLOCKED; + spin_lock_init(&br->lock); INIT_LIST_HEAD(&br->port_list); - br->hash_lock = SPIN_LOCK_UNLOCKED; + spin_lock_init(&br->hash_lock); br->bridge_id.prio[0] = 0x80; br->bridge_id.prio[1] = 0x00; diff -puN net/bridge/netfilter/ebtables.c~lock-initializer-unifying-batch-2-networking net/bridge/netfilter/ebtables.c --- 25/net/bridge/netfilter/ebtables.c~lock-initializer-unifying-batch-2-networking Thu Oct 28 15:55:37 2004 +++ 25-akpm/net/bridge/netfilter/ebtables.c Thu Oct 28 15:55:37 2004 @@ -1168,7 +1168,7 @@ int ebt_register_table(struct ebt_table } table->private = newinfo; - table->lock = RW_LOCK_UNLOCKED; + rwlock_init(&table->lock); ret = down_interruptible(&ebt_mutex); if (ret != 0) goto free_chainstack; diff -puN net/core/neighbour.c~lock-initializer-unifying-batch-2-networking net/core/neighbour.c --- 25/net/core/neighbour.c~lock-initializer-unifying-batch-2-networking Thu Oct 28 15:55:37 2004 +++ 25-akpm/net/core/neighbour.c Thu Oct 28 15:55:37 2004 @@ -270,7 +270,7 @@ static struct neighbour *neigh_alloc(str memset(n, 0, tbl->entry_size); skb_queue_head_init(&n->arp_queue); - n->lock = RW_LOCK_UNLOCKED; + rwlock_init(&n->lock); n->updated = n->used = now; n->nud_state = NUD_NONE; n->output = neigh_blackhole; @@ -1089,7 +1089,7 @@ static void neigh_hh_init(struct neighbo if (!hh && (hh = kmalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) { memset(hh, 0, sizeof(struct hh_cache)); - hh->hh_lock = RW_LOCK_UNLOCKED; + rwlock_init(&hh->hh_lock); hh->hh_type = protocol; atomic_set(&hh->hh_refcnt, 0); hh->hh_next = NULL; @@ -1365,7 +1365,7 @@ void neigh_table_init(struct neigh_table get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd)); - tbl->lock = RW_LOCK_UNLOCKED; + rwlock_init(&tbl->lock); init_timer(&tbl->gc_timer); tbl->gc_timer.data = (unsigned long)tbl; tbl->gc_timer.function = neigh_periodic_timer; diff -puN net/core/sock.c~lock-initializer-unifying-batch-2-networking net/core/sock.c --- 25/net/core/sock.c~lock-initializer-unifying-batch-2-networking Thu Oct 28 15:55:37 2004 +++ 25-akpm/net/core/sock.c Thu Oct 28 15:55:37 2004 @@ -1184,8 +1184,8 @@ void sock_init_data(struct socket *sock, } else sk->sk_sleep = NULL; - sk->sk_dst_lock = RW_LOCK_UNLOCKED; - sk->sk_callback_lock = RW_LOCK_UNLOCKED; + rwlock_init(&sk->sk_dst_lock); + rwlock_init(&sk->sk_callback_lock); sk->sk_state_change = sock_def_wakeup; sk->sk_data_ready = sock_def_readable; diff -puN net/decnet/dn_route.c~lock-initializer-unifying-batch-2-networking net/decnet/dn_route.c --- 25/net/decnet/dn_route.c~lock-initializer-unifying-batch-2-networking Thu Oct 28 15:55:37 2004 +++ 25-akpm/net/decnet/dn_route.c Thu Oct 28 15:55:37 2004 @@ -1822,7 +1822,7 @@ void __init dn_route_init(void) dn_rt_hash_mask--; for(i = 0; i <= dn_rt_hash_mask; i++) { - dn_rt_hash_table[i].lock = SPIN_LOCK_UNLOCKED; + spin_lock_init(&dn_rt_hash_table[i].lock); dn_rt_hash_table[i].chain = NULL; } diff -puN net/ipv4/igmp.c~lock-initializer-unifying-batch-2-networking net/ipv4/igmp.c --- 25/net/ipv4/igmp.c~lock-initializer-unifying-batch-2-networking Thu Oct 28 15:55:37 2004 +++ 25-akpm/net/ipv4/igmp.c Thu Oct 28 15:55:37 2004 @@ -1252,8 +1252,8 @@ void ip_mc_init_dev(struct in_device *in in_dev->mr_qrv = IGMP_Unsolicited_Report_Count; #endif - in_dev->mc_list_lock = RW_LOCK_UNLOCKED; - in_dev->mc_tomb_lock = SPIN_LOCK_UNLOCKED; + rwlock_init(&in_dev->mc_list_lock); + spin_lock_init(&in_dev->mc_tomb_lock); } /* Device going up */ diff -puN net/ipv4/ip_fragment.c~lock-initializer-unifying-batch-2-networking net/ipv4/ip_fragment.c --- 25/net/ipv4/ip_fragment.c~lock-initializer-unifying-batch-2-networking Thu Oct 28 15:55:37 2004 +++ 25-akpm/net/ipv4/ip_fragment.c Thu Oct 28 15:55:37 2004 @@ -373,7 +373,7 @@ static struct ipq *ip_frag_create(unsign init_timer(&qp->timer); qp->timer.data = (unsigned long) qp; /* pointer to queue */ qp->timer.function = ip_expire; /* expire function */ - qp->lock = SPIN_LOCK_UNLOCKED; + spin_lock_init(&qp->lock); atomic_set(&qp->refcnt, 1); return ip_frag_intern(hash, qp); diff -puN net/ipv4/ipvs/ip_vs_conn.c~lock-initializer-unifying-batch-2-networking net/ipv4/ipvs/ip_vs_conn.c --- 25/net/ipv4/ipvs/ip_vs_conn.c~lock-initializer-unifying-batch-2-networking Thu Oct 28 15:55:37 2004 +++ 25-akpm/net/ipv4/ipvs/ip_vs_conn.c Thu Oct 28 15:55:37 2004 @@ -580,7 +580,7 @@ ip_vs_conn_new(int proto, __u32 caddr, _ cp->daddr = daddr; cp->dport = dport; cp->flags = flags; - cp->lock = SPIN_LOCK_UNLOCKED; + spin_lock_init(&cp->lock); /* * Set the entry is referenced by the current thread before hashing @@ -894,7 +894,7 @@ int ip_vs_conn_init(void) } for (idx = 0; idx < CT_LOCKARRAY_SIZE; idx++) { - __ip_vs_conntbl_lock_array[idx].l = RW_LOCK_UNLOCKED; + rwlock_init(&__ip_vs_conntbl_lock_array[idx].l); } proc_net_fops_create("ip_vs_conn", 0, &ip_vs_conn_fops); diff -puN net/ipv4/ipvs/ip_vs_ctl.c~lock-initializer-unifying-batch-2-networking net/ipv4/ipvs/ip_vs_ctl.c --- 25/net/ipv4/ipvs/ip_vs_ctl.c~lock-initializer-unifying-batch-2-networking Thu Oct 28 15:55:37 2004 +++ 25-akpm/net/ipv4/ipvs/ip_vs_ctl.c Thu Oct 28 15:55:37 2004 @@ -746,8 +746,8 @@ ip_vs_new_dest(struct ip_vs_service *svc atomic_set(&dest->refcnt, 0); INIT_LIST_HEAD(&dest->d_list); - dest->dst_lock = SPIN_LOCK_UNLOCKED; - dest->stats.lock = SPIN_LOCK_UNLOCKED; + spin_lock_init(&dest->dst_lock); + spin_lock_init(&dest->stats.lock); __ip_vs_update_dest(svc, dest, udest); ip_vs_new_estimator(&dest->stats); @@ -1062,8 +1062,8 @@ ip_vs_add_service(struct ip_vs_service_u svc->netmask = u->netmask; INIT_LIST_HEAD(&svc->destinations); - svc->sched_lock = RW_LOCK_UNLOCKED; - svc->stats.lock = SPIN_LOCK_UNLOCKED; + rwlock_init(&svc->sched_lock); + spin_lock_init(&svc->stats.lock); /* Bind the scheduler */ ret = ip_vs_bind_scheduler(svc, sched); @@ -2357,7 +2357,7 @@ int ip_vs_control_init(void) } memset(&ip_vs_stats, 0, sizeof(ip_vs_stats)); - ip_vs_stats.lock = SPIN_LOCK_UNLOCKED; + spin_lock_init(&ip_vs_stats.lock); ip_vs_new_estimator(&ip_vs_stats); /* Hook the defense timer */ diff -puN net/ipv4/ipvs/ip_vs_lblc.c~lock-initializer-unifying-batch-2-networking net/ipv4/ipvs/ip_vs_lblc.c --- 25/net/ipv4/ipvs/ip_vs_lblc.c~lock-initializer-unifying-batch-2-networking Thu Oct 28 15:55:37 2004 +++ 25-akpm/net/ipv4/ipvs/ip_vs_lblc.c Thu Oct 28 15:55:37 2004 @@ -409,7 +409,7 @@ static int ip_vs_lblc_init_svc(struct ip for (i=0; ibucket[i]); } - tbl->lock = RW_LOCK_UNLOCKED; + rwlock_init(&tbl->lock); tbl->max_size = IP_VS_LBLC_TAB_SIZE*16; tbl->rover = 0; tbl->counter = 1; diff -puN net/ipv4/ipvs/ip_vs_lblcr.c~lock-initializer-unifying-batch-2-networking net/ipv4/ipvs/ip_vs_lblcr.c --- 25/net/ipv4/ipvs/ip_vs_lblcr.c~lock-initializer-unifying-batch-2-networking Thu Oct 28 15:55:37 2004 +++ 25-akpm/net/ipv4/ipvs/ip_vs_lblcr.c Thu Oct 28 15:55:37 2004 @@ -362,7 +362,7 @@ static inline struct ip_vs_lblcr_entry * /* initilize its dest set */ atomic_set(&(en->set.size), 0); en->set.list = NULL; - en->set.lock = RW_LOCK_UNLOCKED; + rwlock_init(&en->set.lock); return en; } @@ -659,7 +659,7 @@ static int ip_vs_lblcr_init_svc(struct i for (i=0; ibucket[i]); } - tbl->lock = RW_LOCK_UNLOCKED; + rwlock_init(&tbl->lock); tbl->max_size = IP_VS_LBLCR_TAB_SIZE*16; tbl->rover = 0; tbl->counter = 1; diff -puN net/ipv4/netfilter/arp_tables.c~lock-initializer-unifying-batch-2-networking net/ipv4/netfilter/arp_tables.c --- 25/net/ipv4/netfilter/arp_tables.c~lock-initializer-unifying-batch-2-networking Thu Oct 28 15:55:37 2004 +++ 25-akpm/net/ipv4/netfilter/arp_tables.c Thu Oct 28 15:55:37 2004 @@ -1199,7 +1199,7 @@ int arpt_register_table(struct arpt_tabl /* save number of initial entries */ table->private->initial_entries = table->private->number; - table->lock = RW_LOCK_UNLOCKED; + rwlock_init(&table->lock); list_prepend(&arpt_tables, table); unlock: diff -puN net/ipv4/netfilter/ip_tables.c~lock-initializer-unifying-batch-2-networking net/ipv4/netfilter/ip_tables.c --- 25/net/ipv4/netfilter/ip_tables.c~lock-initializer-unifying-batch-2-networking Thu Oct 28 15:55:37 2004 +++ 25-akpm/net/ipv4/netfilter/ip_tables.c Thu Oct 28 15:55:37 2004 @@ -1418,7 +1418,7 @@ int ipt_register_table(struct ipt_table /* save number of initial entries */ table->private->initial_entries = table->private->number; - table->lock = RW_LOCK_UNLOCKED; + rwlock_init(&table->lock); list_prepend(&ipt_tables, table); unlock: diff -puN net/ipv4/netfilter/ipt_hashlimit.c~lock-initializer-unifying-batch-2-networking net/ipv4/netfilter/ipt_hashlimit.c --- 25/net/ipv4/netfilter/ipt_hashlimit.c~lock-initializer-unifying-batch-2-networking Thu Oct 28 15:55:37 2004 +++ 25-akpm/net/ipv4/netfilter/ipt_hashlimit.c Thu Oct 28 15:55:37 2004 @@ -214,7 +214,7 @@ static int htable_create(struct ipt_hash atomic_set(&hinfo->count, 0); atomic_set(&hinfo->use, 1); hinfo->rnd = 0; - hinfo->lock = SPIN_LOCK_UNLOCKED; + spin_lock_init(&hinfo->lock); hinfo->pde = create_proc_entry(minfo->name, 0, hashlimit_procdir); if (!hinfo->pde) { vfree(hinfo); diff -puN net/ipv4/netfilter/ipt_recent.c~lock-initializer-unifying-batch-2-networking net/ipv4/netfilter/ipt_recent.c --- 25/net/ipv4/netfilter/ipt_recent.c~lock-initializer-unifying-batch-2-networking Thu Oct 28 15:55:37 2004 +++ 25-akpm/net/ipv4/netfilter/ipt_recent.c Thu Oct 28 15:55:37 2004 @@ -715,7 +715,7 @@ checkentry(const char *tablename, curr_table = vmalloc(sizeof(struct recent_ip_tables)); if(curr_table == NULL) return -ENOMEM; - curr_table->list_lock = SPIN_LOCK_UNLOCKED; + spin_lock_init(&curr_table->list_lock); curr_table->next = NULL; curr_table->count = 1; curr_table->time_pos = 0; diff -puN net/ipv4/route.c~lock-initializer-unifying-batch-2-networking net/ipv4/route.c --- 25/net/ipv4/route.c~lock-initializer-unifying-batch-2-networking Thu Oct 28 15:55:37 2004 +++ 25-akpm/net/ipv4/route.c Thu Oct 28 15:55:37 2004 @@ -2755,7 +2755,7 @@ int __init ip_rt_init(void) rt_hash_mask--; for (i = 0; i <= rt_hash_mask; i++) { - rt_hash_table[i].lock = SPIN_LOCK_UNLOCKED; + spin_lock_init(&rt_hash_table[i].lock); rt_hash_table[i].chain = NULL; } diff -puN net/ipv4/tcp.c~lock-initializer-unifying-batch-2-networking net/ipv4/tcp.c --- 25/net/ipv4/tcp.c~lock-initializer-unifying-batch-2-networking Thu Oct 28 15:55:37 2004 +++ 25-akpm/net/ipv4/tcp.c Thu Oct 28 15:55:37 2004 @@ -467,7 +467,7 @@ int tcp_listen_start(struct sock *sk) sk->sk_max_ack_backlog = 0; sk->sk_ack_backlog = 0; tp->accept_queue = tp->accept_queue_tail = NULL; - tp->syn_wait_lock = RW_LOCK_UNLOCKED; + rwlock_init(&tp->syn_wait_lock); tcp_delack_init(tp); lopt = kmalloc(sizeof(struct tcp_listen_opt), GFP_KERNEL); @@ -2307,7 +2307,7 @@ void __init tcp_init(void) if (!tcp_ehash) panic("Failed to allocate TCP established hash table\n"); for (i = 0; i < (tcp_ehash_size << 1); i++) { - tcp_ehash[i].lock = RW_LOCK_UNLOCKED; + rwlock_init(&tcp_ehash[i].lock); INIT_HLIST_HEAD(&tcp_ehash[i].chain); } @@ -2323,7 +2323,7 @@ void __init tcp_init(void) if (!tcp_bhash) panic("Failed to allocate TCP bind hash table\n"); for (i = 0; i < tcp_bhash_size; i++) { - tcp_bhash[i].lock = SPIN_LOCK_UNLOCKED; + spin_lock_init(&tcp_bhash[i].lock); INIT_HLIST_HEAD(&tcp_bhash[i].chain); } diff -puN net/ipv4/tcp_minisocks.c~lock-initializer-unifying-batch-2-networking net/ipv4/tcp_minisocks.c --- 25/net/ipv4/tcp_minisocks.c~lock-initializer-unifying-batch-2-networking Thu Oct 28 15:55:37 2004 +++ 25-akpm/net/ipv4/tcp_minisocks.c Thu Oct 28 15:55:37 2004 @@ -706,7 +706,7 @@ struct sock *tcp_create_openreq_child(st sock_lock_init(newsk); bh_lock_sock(newsk); - newsk->sk_dst_lock = RW_LOCK_UNLOCKED; + rwlock_init(&newsk->sk_dst_lock); atomic_set(&newsk->sk_rmem_alloc, 0); skb_queue_head_init(&newsk->sk_receive_queue); atomic_set(&newsk->sk_wmem_alloc, 0); @@ -719,7 +719,7 @@ struct sock *tcp_create_openreq_child(st newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; newsk->sk_send_head = NULL; - newsk->sk_callback_lock = RW_LOCK_UNLOCKED; + rwlock_init(&newsk->sk_callback_lock); skb_queue_head_init(&newsk->sk_error_queue); newsk->sk_write_space = sk_stream_write_space; diff -puN net/ipv6/addrconf.c~lock-initializer-unifying-batch-2-networking net/ipv6/addrconf.c --- 25/net/ipv6/addrconf.c~lock-initializer-unifying-batch-2-networking Thu Oct 28 15:55:37 2004 +++ 25-akpm/net/ipv6/addrconf.c Thu Oct 28 15:55:37 2004 @@ -326,7 +326,7 @@ static struct inet6_dev * ipv6_add_dev(s if (ndev) { memset(ndev, 0, sizeof(struct inet6_dev)); - ndev->lock = RW_LOCK_UNLOCKED; + rwlock_init(&ndev->lock); ndev->dev = dev; memcpy(&ndev->cnf, &ipv6_devconf_dflt, sizeof(ndev->cnf)); ndev->cnf.mtu6 = dev->mtu; diff -puN net/ipv6/anycast.c~lock-initializer-unifying-batch-2-networking net/ipv6/anycast.c --- 25/net/ipv6/anycast.c~lock-initializer-unifying-batch-2-networking Thu Oct 28 15:55:37 2004 +++ 25-akpm/net/ipv6/anycast.c Thu Oct 28 15:55:37 2004 @@ -354,7 +354,7 @@ int ipv6_dev_ac_inc(struct net_device *d /* aca_tstamp should be updated upon changes */ aca->aca_cstamp = aca->aca_tstamp = jiffies; atomic_set(&aca->aca_refcnt, 2); - aca->aca_lock = SPIN_LOCK_UNLOCKED; + spin_lock_init(&aca->aca_lock); aca->aca_next = idev->ac_list; idev->ac_list = aca; diff -puN net/ipv6/mcast.c~lock-initializer-unifying-batch-2-networking net/ipv6/mcast.c --- 25/net/ipv6/mcast.c~lock-initializer-unifying-batch-2-networking Thu Oct 28 15:55:37 2004 +++ 25-akpm/net/ipv6/mcast.c Thu Oct 28 15:55:37 2004 @@ -709,7 +709,7 @@ static void mld_add_delrec(struct inet6_ return; memset(pmc, 0, sizeof(*pmc)); spin_lock_bh(&im->mca_lock); - pmc->mca_lock = SPIN_LOCK_UNLOCKED; + spin_lock_init(&pmc->mca_lock); pmc->idev = im->idev; in6_dev_hold(idev); pmc->mca_addr = im->mca_addr; @@ -849,7 +849,7 @@ int ipv6_dev_mc_inc(struct net_device *d /* mca_stamp should be updated upon changes */ mc->mca_cstamp = mc->mca_tstamp = jiffies; atomic_set(&mc->mca_refcnt, 2); - mc->mca_lock = SPIN_LOCK_UNLOCKED; + spin_lock_init(&mc->mca_lock); /* initial mode is (EX, empty) */ mc->mca_sfmode = MCAST_EXCLUDE; @@ -2071,7 +2071,7 @@ void ipv6_mc_init_dev(struct inet6_dev * struct in6_addr maddr; write_lock_bh(&idev->lock); - idev->mc_lock = RW_LOCK_UNLOCKED; + rwlock_init(&idev->mc_lock); idev->mc_gq_running = 0; init_timer(&idev->mc_gq_timer); idev->mc_gq_timer.data = (unsigned long) idev; diff -puN net/ipv6/netfilter/ip6_tables.c~lock-initializer-unifying-batch-2-networking net/ipv6/netfilter/ip6_tables.c --- 25/net/ipv6/netfilter/ip6_tables.c~lock-initializer-unifying-batch-2-networking Thu Oct 28 15:55:37 2004 +++ 25-akpm/net/ipv6/netfilter/ip6_tables.c Thu Oct 28 15:55:37 2004 @@ -1510,7 +1510,7 @@ int ip6t_register_table(struct ip6t_tabl /* save number of initial entries */ table->private->initial_entries = table->private->number; - table->lock = RW_LOCK_UNLOCKED; + rwlock_init(&table->lock); list_prepend(&ip6t_tables, table); unlock: diff -puN net/ipv6/reassembly.c~lock-initializer-unifying-batch-2-networking net/ipv6/reassembly.c --- 25/net/ipv6/reassembly.c~lock-initializer-unifying-batch-2-networking Thu Oct 28 15:55:37 2004 +++ 25-akpm/net/ipv6/reassembly.c Thu Oct 28 15:55:37 2004 @@ -387,7 +387,7 @@ ip6_frag_create(unsigned int hash, u32 i init_timer(&fq->timer); fq->timer.function = ip6_frag_expire; fq->timer.data = (long) fq; - fq->lock = SPIN_LOCK_UNLOCKED; + spin_lock_init(&fq->lock); atomic_set(&fq->refcnt, 1); return ip6_frag_intern(hash, fq); diff -puN net/netrom/nr_route.c~lock-initializer-unifying-batch-2-networking net/netrom/nr_route.c --- 25/net/netrom/nr_route.c~lock-initializer-unifying-batch-2-networking Thu Oct 28 15:55:37 2004 +++ 25-akpm/net/netrom/nr_route.c Thu Oct 28 15:55:37 2004 @@ -186,7 +186,7 @@ static int nr_add_node(ax25_address *nr, nr_node->which = 0; nr_node->count = 1; atomic_set(&nr_node->refcount, 1); - nr_node->node_lock = SPIN_LOCK_UNLOCKED; + spin_lock_init(&nr_node->node_lock); nr_node->routes[0].quality = quality; nr_node->routes[0].obs_count = obs_count; diff -puN net/sctp/associola.c~lock-initializer-unifying-batch-2-networking net/sctp/associola.c --- 25/net/sctp/associola.c~lock-initializer-unifying-batch-2-networking Thu Oct 28 15:55:37 2004 +++ 25-akpm/net/sctp/associola.c Thu Oct 28 15:55:37 2004 @@ -125,7 +125,7 @@ struct sctp_association *sctp_associatio /* Initialize the bind addr area. */ sctp_bind_addr_init(&asoc->base.bind_addr, ep->base.bind_addr.port); - asoc->base.addr_lock = RW_LOCK_UNLOCKED; + rwlock_init(&asoc->base.addr_lock); asoc->state = SCTP_STATE_CLOSED; diff -puN net/sctp/endpointola.c~lock-initializer-unifying-batch-2-networking net/sctp/endpointola.c --- 25/net/sctp/endpointola.c~lock-initializer-unifying-batch-2-networking Thu Oct 28 15:55:37 2004 +++ 25-akpm/net/sctp/endpointola.c Thu Oct 28 15:55:37 2004 @@ -113,7 +113,7 @@ struct sctp_endpoint *sctp_endpoint_init /* Initialize the bind addr area */ sctp_bind_addr_init(&ep->base.bind_addr, 0); - ep->base.addr_lock = RW_LOCK_UNLOCKED; + rwlock_init(&ep->base.addr_lock); /* Remember who we are attached to. */ ep->base.sk = sk; diff -puN net/sctp/protocol.c~lock-initializer-unifying-batch-2-networking net/sctp/protocol.c --- 25/net/sctp/protocol.c~lock-initializer-unifying-batch-2-networking Thu Oct 28 15:55:37 2004 +++ 25-akpm/net/sctp/protocol.c Thu Oct 28 15:55:37 2004 @@ -1084,7 +1084,7 @@ __init int sctp_init(void) goto err_ahash_alloc; } for (i = 0; i < sctp_assoc_hashsize; i++) { - sctp_assoc_hashtable[i].lock = RW_LOCK_UNLOCKED; + rwlock_init(&sctp_assoc_hashtable[i].lock); sctp_assoc_hashtable[i].chain = NULL; } @@ -1098,7 +1098,7 @@ __init int sctp_init(void) goto err_ehash_alloc; } for (i = 0; i < sctp_ep_hashsize; i++) { - sctp_ep_hashtable[i].lock = RW_LOCK_UNLOCKED; + rwlock_init(&sctp_ep_hashtable[i].lock); sctp_ep_hashtable[i].chain = NULL; } @@ -1117,11 +1117,11 @@ __init int sctp_init(void) goto err_bhash_alloc; } for (i = 0; i < sctp_port_hashsize; i++) { - sctp_port_hashtable[i].lock = SPIN_LOCK_UNLOCKED; + spin_lock_init(&sctp_port_hashtable[i].lock); sctp_port_hashtable[i].chain = NULL; } - sctp_port_alloc_lock = SPIN_LOCK_UNLOCKED; + spin_lock_init(&sctp_port_alloc_lock); sctp_port_rover = sysctl_local_port_range[0] - 1; printk(KERN_INFO "SCTP: Hash tables configured " @@ -1152,7 +1152,7 @@ __init int sctp_init(void) /* Initialize the local address list. */ INIT_LIST_HEAD(&sctp_local_addr_list); - sctp_local_addr_lock = SPIN_LOCK_UNLOCKED; + spin_lock_init(&sctp_local_addr_lock); /* Register notifier for inet address additions/deletions. */ register_inetaddr_notifier(&sctp_inetaddr_notifier); diff -puN net/xfrm/xfrm_policy.c~lock-initializer-unifying-batch-2-networking net/xfrm/xfrm_policy.c --- 25/net/xfrm/xfrm_policy.c~lock-initializer-unifying-batch-2-networking Thu Oct 28 15:55:37 2004 +++ 25-akpm/net/xfrm/xfrm_policy.c Thu Oct 28 15:55:37 2004 @@ -227,7 +227,7 @@ struct xfrm_policy *xfrm_policy_alloc(in if (policy) { memset(policy, 0, sizeof(struct xfrm_policy)); atomic_set(&policy->refcnt, 1); - policy->lock = RW_LOCK_UNLOCKED; + rwlock_init(&policy->lock); init_timer(&policy->timer); policy->timer.data = (unsigned long)policy; policy->timer.function = xfrm_policy_timer; diff -puN net/xfrm/xfrm_state.c~lock-initializer-unifying-batch-2-networking net/xfrm/xfrm_state.c --- 25/net/xfrm/xfrm_state.c~lock-initializer-unifying-batch-2-networking Thu Oct 28 15:55:37 2004 +++ 25-akpm/net/xfrm/xfrm_state.c Thu Oct 28 15:55:37 2004 @@ -186,7 +186,7 @@ struct xfrm_state *xfrm_state_alloc(void x->lft.soft_packet_limit = XFRM_INF; x->lft.hard_byte_limit = XFRM_INF; x->lft.hard_packet_limit = XFRM_INF; - x->lock = SPIN_LOCK_UNLOCKED; + spin_lock_init(&x->lock); } return x; } _