From: "Mika Kukkonen" CHECK net/sctp/sm_statefuns.c net/sctp/sm_statefuns.c:998:13: warning: Using plain integer as NULL pointer [...] CHECK net/sctp/protocol.c net/sctp/protocol.c:104:32: warning: Using plain integer as NULL pointer net/sctp/protocol.c:104:32: warning: Using plain integer as NULL pointer [...] CHECK net/sctp/ulpevent.c net/sctp/ulpevent.c:51:43: warning: marked inline, but without a definition net/sctp/ulpevent.c:51:43: warning: marked inline, but without a definition net/sctp/ulpevent.c:51:43: warning: marked inline, but without a definition net/sctp/ulpevent.c:51:43: warning: marked inline, but without a definition net/sctp/ulpevent.c:51:43: warning: marked inline, but without a definition net/sctp/ulpevent.c:51:43: warning: marked inline, but without a definition [...] CHECK net/sctp/inqueue.c net/sctp/inqueue.c:81:16: warning: assignment expression in conditional [...] CHECK net/sctp/outqueue.c net/sctp/outqueue.c:248:18: warning: assignment expression in conditional net/sctp/outqueue.c:285:16: warning: assignment expression in conditional net/sctp/outqueue.c:295:16: warning: assignment expression in conditional net/sctp/outqueue.c:684:16: warning: assignment expression in conditional net/sctp/outqueue.c:815:17: warning: assignment expression in conditional net/sctp/outqueue.c:866:4: warning: Using plain integer as NULL pointer [...] CHECK net/sctp/ulpqueue.c net/sctp/ulpqueue.c:100:14: warning: assignment expression in conditional net/sctp/ulpqueue.c:105:14: warning: assignment expression in conditional net/sctp/ulpqueue.c:52:53: warning: marked inline, but without a definition net/sctp/ulpqueue.c:54:52: warning: marked inline, but without a definition net/sctp/ulpqueue.c:759:14: warning: assignment expression in conditional net/sctp/ulpqueue.c:785:14: warning: assignment expression in conditional [...] CHECK net/sctp/socket.c net/sctp/socket.c:1011:25: warning: Using plain integer as NULL pointer net/sctp/socket.c:98:35: warning: marked inline, but without a definition net/sctp/socket.c:89:30: warning: marked inline, but without a definition net/sctp/socket.c:90:36: warning: marked inline, but without a definition net/sctp/socket.c:98:35: warning: marked inline, but without a definition [...] CHECK net/sctp/output.c net/sctp/output.c:136:23: warning: assignment expression in conditional net/sctp/output.c:373:16: warning: assignment expression in conditional net/sctp/output.c:514:16: warning: assignment expression in conditional This is little big because I have to move functions around to fix the inline warnings (which btw are warnings in GCC-3.4 too, and probably do not get inlined in any case with earlier gcc versions). Signed-off-by: Andrew Morton --- 25-akpm/net/sctp/inqueue.c | 2 25-akpm/net/sctp/output.c | 6 - 25-akpm/net/sctp/outqueue.c | 12 +-- 25-akpm/net/sctp/protocol.c | 2 25-akpm/net/sctp/sm_statefuns.c | 2 25-akpm/net/sctp/socket.c | 123 +++++++++++++++++++--------------------- 25-akpm/net/sctp/ulpevent.c | 77 ++++++++++++------------- 25-akpm/net/sctp/ulpqueue.c | 24 +++---- 8 files changed, 122 insertions(+), 126 deletions(-) diff -puN net/sctp/inqueue.c~sparse-fix-warnings-in-net-sctp net/sctp/inqueue.c --- 25/net/sctp/inqueue.c~sparse-fix-warnings-in-net-sctp 2004-07-05 17:31:32.786989904 -0700 +++ 25-akpm/net/sctp/inqueue.c 2004-07-05 17:31:32.807986712 -0700 @@ -78,7 +78,7 @@ void sctp_inq_free(struct sctp_inq *queu struct sctp_chunk *chunk; /* Empty the queue. */ - while ((chunk = (struct sctp_chunk *) skb_dequeue(&queue->in))) + while ((chunk = (struct sctp_chunk *) skb_dequeue(&queue->in)) != NULL) sctp_chunk_free(chunk); /* If there is a packet which is currently being worked on, diff -puN net/sctp/output.c~sparse-fix-warnings-in-net-sctp net/sctp/output.c --- 25/net/sctp/output.c~sparse-fix-warnings-in-net-sctp 2004-07-05 17:31:32.787989752 -0700 +++ 25-akpm/net/sctp/output.c 2004-07-05 17:31:32.821984584 -0700 @@ -133,7 +133,7 @@ void sctp_packet_free(struct sctp_packet SCTP_DEBUG_PRINTK("%s: packet:%p\n", __FUNCTION__, packet); - while ((chunk = (struct sctp_chunk *)__skb_dequeue(&packet->chunks))) + while ((chunk = (struct sctp_chunk *)__skb_dequeue(&packet->chunks)) != NULL) sctp_chunk_free(chunk); if (packet->malloced) @@ -370,7 +370,7 @@ int sctp_packet_transmit(struct sctp_pac * [This whole comment explains WORD_ROUND() below.] */ SCTP_DEBUG_PRINTK("***sctp_transmit_packet***\n"); - while ((chunk = (struct sctp_chunk *)__skb_dequeue(&packet->chunks))) { + while ((chunk = (struct sctp_chunk *)__skb_dequeue(&packet->chunks)) != NULL) { if (sctp_chunk_is_data(chunk)) { if (!chunk->has_tsn) { @@ -511,7 +511,7 @@ err: * will get resent or dropped later. */ - while ((chunk = (struct sctp_chunk *)__skb_dequeue(&packet->chunks))) { + while ((chunk = (struct sctp_chunk *)__skb_dequeue(&packet->chunks)) != NULL) { if (!sctp_chunk_is_data(chunk)) sctp_chunk_free(chunk); } diff -puN net/sctp/outqueue.c~sparse-fix-warnings-in-net-sctp net/sctp/outqueue.c --- 25/net/sctp/outqueue.c~sparse-fix-warnings-in-net-sctp 2004-07-05 17:31:32.789989448 -0700 +++ 25-akpm/net/sctp/outqueue.c 2004-07-05 17:31:32.808986560 -0700 @@ -245,7 +245,7 @@ void sctp_outq_teardown(struct sctp_outq /* Throw away unacknowledged chunks. */ list_for_each(pos, &q->asoc->peer.transport_addr_list) { transport = list_entry(pos, struct sctp_transport, transports); - while ((lchunk = sctp_list_dequeue(&transport->transmitted))) { + while ((lchunk = sctp_list_dequeue(&transport->transmitted)) != NULL) { chunk = list_entry(lchunk, struct sctp_chunk, transmitted_list); /* Mark as part of a failed message. */ @@ -282,7 +282,7 @@ void sctp_outq_teardown(struct sctp_outq } /* Throw away any leftover data chunks. */ - while ((chunk = sctp_outq_dequeue_data(q))) { + while ((chunk = sctp_outq_dequeue_data(q)) != NULL) { /* Mark as send failure. */ sctp_chunk_fail(chunk, q->error); @@ -292,7 +292,7 @@ void sctp_outq_teardown(struct sctp_outq q->error = 0; /* Throw away any leftover control chunks. */ - while ((chunk = (struct sctp_chunk *) skb_dequeue(&q->control))) + while ((chunk = (struct sctp_chunk *) skb_dequeue(&q->control)) != NULL) sctp_chunk_free(chunk); } @@ -681,7 +681,7 @@ int sctp_outq_flush(struct sctp_outq *q, */ queue = &q->control; - while ((chunk = (struct sctp_chunk *)skb_dequeue(queue))) { + while ((chunk = (struct sctp_chunk *)skb_dequeue(queue)) != NULL) { /* Pick the right transport to use. */ new_transport = chunk->transport; @@ -812,7 +812,7 @@ int sctp_outq_flush(struct sctp_outq *q, start_timer = 0; queue = &q->out; - while ((chunk = sctp_outq_dequeue_data(q))) { + while ((chunk = sctp_outq_dequeue_data(q)) != NULL) { /* RFC 2960 6.5 Every DATA chunk MUST carry a valid * stream identifier. */ @@ -866,7 +866,7 @@ int sctp_outq_flush(struct sctp_outq *q, SCTP_DEBUG_PRINTK("TX TSN 0x%x skb->head " "%p skb->users %d.\n", ntohl(chunk->subh.data_hdr->tsn), - chunk->skb ?chunk->skb->head : 0, + chunk->skb ?chunk->skb->head : NULL, chunk->skb ? atomic_read(&chunk->skb->users) : -1); diff -puN net/sctp/protocol.c~sparse-fix-warnings-in-net-sctp net/sctp/protocol.c --- 25/net/sctp/protocol.c~sparse-fix-warnings-in-net-sctp 2004-07-05 17:31:32.791989144 -0700 +++ 25-akpm/net/sctp/protocol.c 2004-07-05 17:31:32.805987016 -0700 @@ -101,7 +101,7 @@ __init int sctp_proc_init(void) { if (!proc_net_sctp) { struct proc_dir_entry *ent; - ent = proc_mkdir("net/sctp", 0); + ent = proc_mkdir("net/sctp", NULL); if (ent) { ent->owner = THIS_MODULE; proc_net_sctp = ent; diff -puN net/sctp/sm_statefuns.c~sparse-fix-warnings-in-net-sctp net/sctp/sm_statefuns.c --- 25/net/sctp/sm_statefuns.c~sparse-fix-warnings-in-net-sctp 2004-07-05 17:31:32.793988840 -0700 +++ 25-akpm/net/sctp/sm_statefuns.c 2004-07-05 17:31:32.804987168 -0700 @@ -995,7 +995,7 @@ static int sctp_sf_check_restart_addrs(c /* Search through all current addresses and make sure * we aren't adding any new ones. */ - new_addr = 0; + new_addr = NULL; found = 0; list_for_each(pos, &new_asoc->peer.transport_addr_list) { diff -puN net/sctp/socket.c~sparse-fix-warnings-in-net-sctp net/sctp/socket.c --- 25/net/sctp/socket.c~sparse-fix-warnings-in-net-sctp 2004-07-05 17:31:32.795988536 -0700 +++ 25-akpm/net/sctp/socket.c 2004-07-05 17:31:32.820984736 -0700 @@ -86,8 +86,6 @@ /* Forward declarations for internal helper functions. */ static int sctp_writeable(struct sock *sk); -static inline int sctp_wspace(struct sctp_association *asoc); -static inline void sctp_set_owner_w(struct sctp_chunk *chunk); static void sctp_wfree(struct sk_buff *skb); static int sctp_wait_for_sndbuf(struct sctp_association *, long *timeo_p, size_t msg_len); @@ -95,7 +93,8 @@ static int sctp_wait_for_packet(struct s static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p); static int sctp_wait_for_accept(struct sock *sk, long timeo); static void sctp_wait_for_close(struct sock *sk, long timeo); -static inline int sctp_verify_addr(struct sock *, union sctp_addr *, int); +static struct sctp_af *sctp_sockaddr_af(struct sctp_opt *opt, + union sctp_addr *addr, int len); static int sctp_bindx_add(struct sock *, struct sockaddr *, int); static int sctp_bindx_rem(struct sock *, struct sockaddr *, int); static int sctp_send_asconf_add_ip(struct sock *, struct sockaddr *, int); @@ -111,6 +110,64 @@ static char *sctp_hmac_alg = SCTP_COOKIE extern kmem_cache_t *sctp_bucket_cachep; extern int sctp_assoc_valid(struct sock *sk, struct sctp_association *asoc); +/* Get the sndbuf space available at the time on the association. */ +static inline int sctp_wspace(struct sctp_association *asoc) +{ + struct sock *sk = asoc->base.sk; + int amt = 0; + + amt = sk->sk_sndbuf - asoc->sndbuf_used; + if (amt < 0) + amt = 0; + return amt; +} + +/* Increment the used sndbuf space count of the corresponding association by + * the size of the outgoing data chunk. + * Also, set the skb destructor for sndbuf accounting later. + * + * Since it is always 1-1 between chunk and skb, and also a new skb is always + * allocated for chunk bundling in sctp_packet_transmit(), we can use the + * destructor in the data chunk skb for the purpose of the sndbuf space + * tracking. + */ +static inline void sctp_set_owner_w(struct sctp_chunk *chunk) +{ + struct sctp_association *asoc = chunk->asoc; + struct sock *sk = asoc->base.sk; + + /* The sndbuf space is tracked per association. */ + sctp_association_hold(asoc); + + chunk->skb->destructor = sctp_wfree; + /* Save the chunk pointer in skb for sctp_wfree to use later. */ + *((struct sctp_chunk **)(chunk->skb->cb)) = chunk; + + asoc->sndbuf_used += SCTP_DATA_SNDSIZE(chunk); + sk->sk_wmem_queued += SCTP_DATA_SNDSIZE(chunk); +} + +/* Verify that this is a valid address. */ +static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr, + int len) +{ + struct sctp_af *af; + + /* Verify basic sockaddr. */ + af = sctp_sockaddr_af(sctp_sk(sk), addr, len); + if (!af) + return -EINVAL; + + /* Is this a valid SCTP address? */ + if (!af->addr_valid(addr, sctp_sk(sk))) + return -EINVAL; + + if (!sctp_sk(sk)->pf->send_verify(sctp_sk(sk), (addr))) + return -EINVAL; + + return 0; +} + /* Look up the association by its id. If this is not a UDP-style * socket, the ID field is always ignored. */ @@ -1008,7 +1065,7 @@ SCTP_STATIC int sctp_sendmsg(struct kioc struct sctp_sndrcvinfo *sinfo; struct sctp_initmsg *sinit; sctp_assoc_t associd = NULL; - sctp_cmsgs_t cmsgs = { 0 }; + sctp_cmsgs_t cmsgs = { NULL }; int err; sctp_scope_t scope; long timeo; @@ -4144,64 +4201,6 @@ no_packet: return NULL; } -/* Verify that this is a valid address. */ -static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr, - int len) -{ - struct sctp_af *af; - - /* Verify basic sockaddr. */ - af = sctp_sockaddr_af(sctp_sk(sk), addr, len); - if (!af) - return -EINVAL; - - /* Is this a valid SCTP address? */ - if (!af->addr_valid(addr, sctp_sk(sk))) - return -EINVAL; - - if (!sctp_sk(sk)->pf->send_verify(sctp_sk(sk), (addr))) - return -EINVAL; - - return 0; -} - -/* Get the sndbuf space available at the time on the association. */ -static inline int sctp_wspace(struct sctp_association *asoc) -{ - struct sock *sk = asoc->base.sk; - int amt = 0; - - amt = sk->sk_sndbuf - asoc->sndbuf_used; - if (amt < 0) - amt = 0; - return amt; -} - -/* Increment the used sndbuf space count of the corresponding association by - * the size of the outgoing data chunk. - * Also, set the skb destructor for sndbuf accounting later. - * - * Since it is always 1-1 between chunk and skb, and also a new skb is always - * allocated for chunk bundling in sctp_packet_transmit(), we can use the - * destructor in the data chunk skb for the purpose of the sndbuf space - * tracking. - */ -static inline void sctp_set_owner_w(struct sctp_chunk *chunk) -{ - struct sctp_association *asoc = chunk->asoc; - struct sock *sk = asoc->base.sk; - - /* The sndbuf space is tracked per association. */ - sctp_association_hold(asoc); - - chunk->skb->destructor = sctp_wfree; - /* Save the chunk pointer in skb for sctp_wfree to use later. */ - *((struct sctp_chunk **)(chunk->skb->cb)) = chunk; - - asoc->sndbuf_used += SCTP_DATA_SNDSIZE(chunk); - sk->sk_wmem_queued += SCTP_DATA_SNDSIZE(chunk); -} - /* If sndbuf has changed, wake up per association sndbuf waiters. */ static void __sctp_write_space(struct sctp_association *asoc) { diff -puN net/sctp/ulpevent.c~sparse-fix-warnings-in-net-sctp net/sctp/ulpevent.c --- 25/net/sctp/ulpevent.c~sparse-fix-warnings-in-net-sctp 2004-07-05 17:31:32.796988384 -0700 +++ 25-akpm/net/sctp/ulpevent.c 2004-07-05 17:31:32.806986864 -0700 @@ -48,13 +48,23 @@ #include #include -static inline void sctp_ulpevent_set_owner(struct sctp_ulpevent *event, - const struct sctp_association *asoc); -static inline void sctp_ulpevent_release_owner(struct sctp_ulpevent *event); static void sctp_ulpevent_receive_data(struct sctp_ulpevent *event, struct sctp_association *asoc); static void sctp_ulpevent_release_data(struct sctp_ulpevent *event); +/* Stub skb destructor. */ +static void sctp_stub_rfree(struct sk_buff *skb) +{ +/* WARNING: This function is just a warning not to use the + * skb destructor. If the skb is shared, we may get the destructor + * callback on some processor that does not own the sock_lock. This + * was occuring with PACKET socket applications that were monitoring + * our skbs. We can't take the sock_lock, because we can't risk + * recursing if we do really own the sock lock. Instead, do all + * of our rwnd manipulation while we own the sock_lock outright. + */ +} + /* Create a new sctp_ulpevent. */ struct sctp_ulpevent *sctp_ulpevent_new(int size, int msg_flags, int gfp) { @@ -87,6 +97,30 @@ int sctp_ulpevent_is_notification(const return MSG_NOTIFICATION == (event->msg_flags & MSG_NOTIFICATION); } +/* Hold the association in case the msg_name needs read out of + * the association. + */ +static inline void sctp_ulpevent_set_owner(struct sctp_ulpevent *event, + const struct sctp_association *asoc) +{ + struct sk_buff *skb; + + /* Cast away the const, as we are just wanting to + * bump the reference count. + */ + sctp_association_hold((struct sctp_association *)asoc); + skb = sctp_event2skb(event); + skb->sk = asoc->base.sk; + event->asoc = (struct sctp_association *)asoc; + skb->destructor = sctp_stub_rfree; +} + +/* A simple destructor to give up the reference to the association. */ +static inline void sctp_ulpevent_release_owner(struct sctp_ulpevent *event) +{ + sctp_association_put(event->asoc); +} + /* Create and initialize an SCTP_ASSOC_CHANGE event. * * 5.3.1.1 SCTP_ASSOC_CHANGE @@ -789,43 +823,6 @@ void sctp_ulpevent_read_sndrcvinfo(const sizeof(struct sctp_sndrcvinfo), (void *)&sinfo); } -/* Stub skb destructor. */ -static void sctp_stub_rfree(struct sk_buff *skb) -{ -/* WARNING: This function is just a warning not to use the - * skb destructor. If the skb is shared, we may get the destructor - * callback on some processor that does not own the sock_lock. This - * was occuring with PACKET socket applications that were monitoring - * our skbs. We can't take the sock_lock, because we can't risk - * recursing if we do really own the sock lock. Instead, do all - * of our rwnd manipulation while we own the sock_lock outright. - */ -} - -/* Hold the association in case the msg_name needs read out of - * the association. - */ -static inline void sctp_ulpevent_set_owner(struct sctp_ulpevent *event, - const struct sctp_association *asoc) -{ - struct sk_buff *skb; - - /* Cast away the const, as we are just wanting to - * bump the reference count. - */ - sctp_association_hold((struct sctp_association *)asoc); - skb = sctp_event2skb(event); - skb->sk = asoc->base.sk; - event->asoc = (struct sctp_association *)asoc; - skb->destructor = sctp_stub_rfree; -} - -/* A simple destructor to give up the reference to the association. */ -static inline void sctp_ulpevent_release_owner(struct sctp_ulpevent *event) -{ - sctp_association_put(event->asoc); -} - /* Do accounting for bytes received and hold a reference to the association * for each skb. */ diff -puN net/sctp/ulpqueue.c~sparse-fix-warnings-in-net-sctp net/sctp/ulpqueue.c --- 25/net/sctp/ulpqueue.c~sparse-fix-warnings-in-net-sctp 2004-07-05 17:31:32.798988080 -0700 +++ 25-akpm/net/sctp/ulpqueue.c 2004-07-05 17:31:32.810986256 -0700 @@ -49,10 +49,10 @@ #include /* Forward declarations for internal helpers. */ -static inline struct sctp_ulpevent * sctp_ulpq_reasm(struct sctp_ulpq *ulpq, - struct sctp_ulpevent *); -static inline struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *, - struct sctp_ulpevent *); +static struct sctp_ulpevent * sctp_ulpq_reasm(struct sctp_ulpq *ulpq, + struct sctp_ulpevent *); +static struct sctp_ulpevent * sctp_ulpq_order(struct sctp_ulpq *, + struct sctp_ulpevent *); /* 1st Level Abstractions */ @@ -97,12 +97,12 @@ void sctp_ulpq_flush(struct sctp_ulpq *u struct sk_buff *skb; struct sctp_ulpevent *event; - while ((skb = __skb_dequeue(&ulpq->lobby))) { + while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) { event = sctp_skb2event(skb); sctp_ulpevent_free(event); } - while ((skb = __skb_dequeue(&ulpq->reasm))) { + while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) { event = sctp_skb2event(skb); sctp_ulpevent_free(event); } @@ -466,8 +466,8 @@ done: /* Helper function to reassemble chunks. Hold chunks on the reasm queue that * need reassembling. */ -static inline struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq, - struct sctp_ulpevent *event) +static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq, + struct sctp_ulpevent *event) { struct sctp_ulpevent *retval = NULL; @@ -645,8 +645,8 @@ static inline void sctp_ulpq_store_order } -static inline struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq, - struct sctp_ulpevent *event) +static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq, + struct sctp_ulpevent *event) { __u16 sid, ssn; struct sctp_stream *in; @@ -756,7 +756,7 @@ static __u16 sctp_ulpq_renege_order(stru tsnmap = &ulpq->asoc->peer.tsn_map; - while ((skb = __skb_dequeue_tail(&ulpq->lobby))) { + while ((skb = __skb_dequeue_tail(&ulpq->lobby)) != NULL) { freed += skb_headlen(skb); event = sctp_skb2event(skb); tsn = event->tsn; @@ -782,7 +782,7 @@ static __u16 sctp_ulpq_renege_frags(stru tsnmap = &ulpq->asoc->peer.tsn_map; /* Walk backwards through the list, reneges the newest tsns. */ - while ((skb = __skb_dequeue_tail(&ulpq->reasm))) { + while ((skb = __skb_dequeue_tail(&ulpq->reasm)) != NULL) { freed += skb_headlen(skb); event = sctp_skb2event(skb); tsn = event->tsn; _