2 * linux/net/sunrpc/svcsock.c
4 * These are the RPC server socket internals.
6 * The server scheduling algorithm does not always distribute the load
7 * evenly when servicing a single client. May need to modify the
8 * svc_sock_enqueue procedure...
10 * TCP support is largely untested and may be a little slow. The problem
11 * is that we currently do two separate recvfrom's, one for the 4-byte
12 * record length, and the second for the actual record. This could possibly
13 * be improved by always reading a minimum size of around 100 bytes and
14 * tucking any superfluous bytes away in a temporary store. Still, that
15 * leaves write requests out in the rain. An alternative may be to peek at
16 * the first skb in the queue, and if it matches the next TCP sequence
17 * number, to extract the record marker. Yuck.
19 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
22 #include <linux/sched.h>
23 #include <linux/errno.h>
24 #include <linux/fcntl.h>
25 #include <linux/net.h>
27 #include <linux/inet.h>
28 #include <linux/udp.h>
29 #include <linux/tcp.h>
30 #include <linux/unistd.h>
31 #include <linux/slab.h>
32 #include <linux/netdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/file.h>
35 #include <linux/freezer.h>
37 #include <net/checksum.h>
39 #include <net/tcp_states.h>
40 #include <asm/uaccess.h>
41 #include <asm/ioctls.h>
43 #include <linux/sunrpc/types.h>
44 #include <linux/sunrpc/xdr.h>
45 #include <linux/sunrpc/svcsock.h>
46 #include <linux/sunrpc/stats.h>
48 /* SMP locking strategy:
50 * svc_pool->sp_lock protects most of the fields of that pool.
51 * svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt.
52 * when both need to be taken (rare), svc_serv->sv_lock is first.
53 * BKL protects svc_serv->sv_nrthread.
54 * svc_sock->sk_defer_lock protects the svc_sock->sk_deferred list
55 * svc_sock->sk_flags.SK_BUSY prevents a svc_sock being enqueued multiply.
57 * Some flags can be set to certain values at any time
58 * providing that certain rules are followed:
60 * SK_CONN, SK_DATA, can be set or cleared at any time.
61 * after a set, svc_sock_enqueue must be called.
62 * after a clear, the socket must be read/accepted
63 * if this succeeds, it must be set again.
64 * SK_CLOSE can set at any time. It is never cleared.
65 * sk_inuse contains a bias of '1' until SK_DEAD is set.
66 * so when sk_inuse hits zero, we know the socket is dead
67 * and no-one is using it.
68 * SK_DEAD can only be set while SK_BUSY is held which ensures
69 * no other thread will be using the socket or will try to
74 #define RPCDBG_FACILITY RPCDBG_SVCSOCK
77 static struct svc_sock *svc_setup_socket(struct svc_serv *, struct socket *,
78 int *errp, int flags);
79 static void svc_delete_socket(struct svc_sock *svsk);
80 static void svc_udp_data_ready(struct sock *, int);
81 static int svc_udp_recvfrom(struct svc_rqst *);
82 static int svc_udp_sendto(struct svc_rqst *);
84 static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk);
85 static int svc_deferred_recv(struct svc_rqst *rqstp);
86 static struct cache_deferred_req *svc_defer(struct cache_req *req);
88 /* apparently the "standard" is that clients close
89 * idle connections after 5 minutes, servers after
91 * http://www.connectathon.org/talks96/nfstcp.pdf
93 static int svc_conn_age_period = 6*60;
95 #ifdef CONFIG_DEBUG_LOCK_ALLOC
96 static struct lock_class_key svc_key[2];
97 static struct lock_class_key svc_slock_key[2];
99 static inline void svc_reclassify_socket(struct socket *sock)
101 struct sock *sk = sock->sk;
102 BUG_ON(sk->sk_lock.owner != NULL);
103 switch (sk->sk_family) {
105 sock_lock_init_class_and_name(sk, "slock-AF_INET-NFSD",
106 &svc_slock_key[0], "sk_lock-AF_INET-NFSD", &svc_key[0]);
110 sock_lock_init_class_and_name(sk, "slock-AF_INET6-NFSD",
111 &svc_slock_key[1], "sk_lock-AF_INET6-NFSD", &svc_key[1]);
119 static inline void svc_reclassify_socket(struct socket *sock)
125 * Queue up an idle server thread. Must have pool->sp_lock held.
126 * Note: this is really a stack rather than a queue, so that we only
127 * use as many different threads as we need, and the rest don't pollute
131 svc_thread_enqueue(struct svc_pool *pool, struct svc_rqst *rqstp)
133 list_add(&rqstp->rq_list, &pool->sp_threads);
137 * Dequeue an nfsd thread. Must have pool->sp_lock held.
140 svc_thread_dequeue(struct svc_pool *pool, struct svc_rqst *rqstp)
142 list_del(&rqstp->rq_list);
146 * Release an skbuff after use
149 svc_release_skb(struct svc_rqst *rqstp)
151 struct sk_buff *skb = rqstp->rq_skbuff;
152 struct svc_deferred_req *dr = rqstp->rq_deferred;
155 rqstp->rq_skbuff = NULL;
157 dprintk("svc: service %p, releasing skb %p\n", rqstp, skb);
158 skb_free_datagram(rqstp->rq_sock->sk_sk, skb);
161 rqstp->rq_deferred = NULL;
167 * Any space to write?
169 static inline unsigned long
170 svc_sock_wspace(struct svc_sock *svsk)
174 if (svsk->sk_sock->type == SOCK_STREAM)
175 wspace = sk_stream_wspace(svsk->sk_sk);
177 wspace = sock_wspace(svsk->sk_sk);
183 * Queue up a socket with data pending. If there are idle nfsd
184 * processes, wake 'em up.
188 svc_sock_enqueue(struct svc_sock *svsk)
190 struct svc_serv *serv = svsk->sk_server;
191 struct svc_pool *pool;
192 struct svc_rqst *rqstp;
195 if (!(svsk->sk_flags &
196 ( (1<<SK_CONN)|(1<<SK_DATA)|(1<<SK_CLOSE)|(1<<SK_DEFERRED)) ))
198 if (test_bit(SK_DEAD, &svsk->sk_flags))
202 pool = svc_pool_for_cpu(svsk->sk_server, cpu);
205 spin_lock_bh(&pool->sp_lock);
207 if (!list_empty(&pool->sp_threads) &&
208 !list_empty(&pool->sp_sockets))
210 "svc_sock_enqueue: threads and sockets both waiting??\n");
212 if (test_bit(SK_DEAD, &svsk->sk_flags)) {
213 /* Don't enqueue dead sockets */
214 dprintk("svc: socket %p is dead, not enqueued\n", svsk->sk_sk);
218 /* Mark socket as busy. It will remain in this state until the
219 * server has processed all pending data and put the socket back
220 * on the idle list. We update SK_BUSY atomically because
221 * it also guards against trying to enqueue the svc_sock twice.
223 if (test_and_set_bit(SK_BUSY, &svsk->sk_flags)) {
224 /* Don't enqueue socket while already enqueued */
225 dprintk("svc: socket %p busy, not enqueued\n", svsk->sk_sk);
228 BUG_ON(svsk->sk_pool != NULL);
229 svsk->sk_pool = pool;
231 set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
232 if (((atomic_read(&svsk->sk_reserved) + serv->sv_max_mesg)*2
233 > svc_sock_wspace(svsk))
234 && !test_bit(SK_CLOSE, &svsk->sk_flags)
235 && !test_bit(SK_CONN, &svsk->sk_flags)) {
236 /* Don't enqueue while not enough space for reply */
237 dprintk("svc: socket %p no space, %d*2 > %ld, not enqueued\n",
238 svsk->sk_sk, atomic_read(&svsk->sk_reserved)+serv->sv_max_mesg,
239 svc_sock_wspace(svsk));
240 svsk->sk_pool = NULL;
241 clear_bit(SK_BUSY, &svsk->sk_flags);
244 clear_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
247 if (!list_empty(&pool->sp_threads)) {
248 rqstp = list_entry(pool->sp_threads.next,
251 dprintk("svc: socket %p served by daemon %p\n",
253 svc_thread_dequeue(pool, rqstp);
256 "svc_sock_enqueue: server %p, rq_sock=%p!\n",
257 rqstp, rqstp->rq_sock);
258 rqstp->rq_sock = svsk;
259 atomic_inc(&svsk->sk_inuse);
260 rqstp->rq_reserved = serv->sv_max_mesg;
261 atomic_add(rqstp->rq_reserved, &svsk->sk_reserved);
262 BUG_ON(svsk->sk_pool != pool);
263 wake_up(&rqstp->rq_wait);
265 dprintk("svc: socket %p put into queue\n", svsk->sk_sk);
266 list_add_tail(&svsk->sk_ready, &pool->sp_sockets);
267 BUG_ON(svsk->sk_pool != pool);
271 spin_unlock_bh(&pool->sp_lock);
275 * Dequeue the first socket. Must be called with the pool->sp_lock held.
277 static inline struct svc_sock *
278 svc_sock_dequeue(struct svc_pool *pool)
280 struct svc_sock *svsk;
282 if (list_empty(&pool->sp_sockets))
285 svsk = list_entry(pool->sp_sockets.next,
286 struct svc_sock, sk_ready);
287 list_del_init(&svsk->sk_ready);
289 dprintk("svc: socket %p dequeued, inuse=%d\n",
290 svsk->sk_sk, atomic_read(&svsk->sk_inuse));
296 * Having read something from a socket, check whether it
297 * needs to be re-enqueued.
298 * Note: SK_DATA only gets cleared when a read-attempt finds
299 * no (or insufficient) data.
302 svc_sock_received(struct svc_sock *svsk)
304 svsk->sk_pool = NULL;
305 clear_bit(SK_BUSY, &svsk->sk_flags);
306 svc_sock_enqueue(svsk);
311 * svc_reserve - change the space reserved for the reply to a request.
312 * @rqstp: The request in question
313 * @space: new max space to reserve
315 * Each request reserves some space on the output queue of the socket
316 * to make sure the reply fits. This function reduces that reserved
317 * space to be the amount of space used already, plus @space.
320 void svc_reserve(struct svc_rqst *rqstp, int space)
322 space += rqstp->rq_res.head[0].iov_len;
324 if (space < rqstp->rq_reserved) {
325 struct svc_sock *svsk = rqstp->rq_sock;
326 atomic_sub((rqstp->rq_reserved - space), &svsk->sk_reserved);
327 rqstp->rq_reserved = space;
329 svc_sock_enqueue(svsk);
334 * Release a socket after use.
337 svc_sock_put(struct svc_sock *svsk)
339 if (atomic_dec_and_test(&svsk->sk_inuse)) {
340 BUG_ON(! test_bit(SK_DEAD, &svsk->sk_flags));
342 dprintk("svc: releasing dead socket\n");
343 if (svsk->sk_sock->file)
344 sockfd_put(svsk->sk_sock);
346 sock_release(svsk->sk_sock);
347 if (svsk->sk_info_authunix != NULL)
348 svcauth_unix_info_release(svsk->sk_info_authunix);
354 svc_sock_release(struct svc_rqst *rqstp)
356 struct svc_sock *svsk = rqstp->rq_sock;
358 svc_release_skb(rqstp);
360 svc_free_res_pages(rqstp);
361 rqstp->rq_res.page_len = 0;
362 rqstp->rq_res.page_base = 0;
365 /* Reset response buffer and release
367 * But first, check that enough space was reserved
368 * for the reply, otherwise we have a bug!
370 if ((rqstp->rq_res.len) > rqstp->rq_reserved)
371 printk(KERN_ERR "RPC request reserved %d but used %d\n",
375 rqstp->rq_res.head[0].iov_len = 0;
376 svc_reserve(rqstp, 0);
377 rqstp->rq_sock = NULL;
383 * External function to wake up a server waiting for data
384 * This really only makes sense for services like lockd
385 * which have exactly one thread anyway.
388 svc_wake_up(struct svc_serv *serv)
390 struct svc_rqst *rqstp;
392 struct svc_pool *pool;
394 for (i = 0; i < serv->sv_nrpools; i++) {
395 pool = &serv->sv_pools[i];
397 spin_lock_bh(&pool->sp_lock);
398 if (!list_empty(&pool->sp_threads)) {
399 rqstp = list_entry(pool->sp_threads.next,
402 dprintk("svc: daemon %p woken up.\n", rqstp);
404 svc_thread_dequeue(pool, rqstp);
405 rqstp->rq_sock = NULL;
407 wake_up(&rqstp->rq_wait);
409 spin_unlock_bh(&pool->sp_lock);
414 * Generic sendto routine
417 svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr)
419 struct svc_sock *svsk = rqstp->rq_sock;
420 struct socket *sock = svsk->sk_sock;
422 char buffer[CMSG_SPACE(sizeof(struct in_pktinfo))];
423 struct cmsghdr *cmh = (struct cmsghdr *)buffer;
424 struct in_pktinfo *pki = (struct in_pktinfo *)CMSG_DATA(cmh);
428 struct page **ppage = xdr->pages;
429 size_t base = xdr->page_base;
430 unsigned int pglen = xdr->page_len;
431 unsigned int flags = MSG_MORE;
435 if (rqstp->rq_prot == IPPROTO_UDP) {
436 /* set the source and destination */
438 msg.msg_name = &rqstp->rq_addr;
439 msg.msg_namelen = sizeof(rqstp->rq_addr);
442 msg.msg_flags = MSG_MORE;
444 msg.msg_control = cmh;
445 msg.msg_controllen = sizeof(buffer);
446 cmh->cmsg_len = CMSG_LEN(sizeof(*pki));
447 cmh->cmsg_level = SOL_IP;
448 cmh->cmsg_type = IP_PKTINFO;
449 pki->ipi_ifindex = 0;
450 pki->ipi_spec_dst.s_addr = rqstp->rq_daddr;
452 if (sock_sendmsg(sock, &msg, 0) < 0)
457 if (slen == xdr->head[0].iov_len)
459 len = kernel_sendpage(sock, rqstp->rq_respages[0], 0,
460 xdr->head[0].iov_len, flags);
461 if (len != xdr->head[0].iov_len)
463 slen -= xdr->head[0].iov_len;
468 size = PAGE_SIZE - base < pglen ? PAGE_SIZE - base : pglen;
472 result = kernel_sendpage(sock, *ppage, base, size, flags);
479 size = PAGE_SIZE < pglen ? PAGE_SIZE : pglen;
484 if (xdr->tail[0].iov_len) {
485 result = kernel_sendpage(sock, rqstp->rq_respages[0],
486 ((unsigned long)xdr->tail[0].iov_base)
488 xdr->tail[0].iov_len, 0);
494 dprintk("svc: socket %p sendto([%p %Zu... ], %d) = %d (addr %x)\n",
495 rqstp->rq_sock, xdr->head[0].iov_base, xdr->head[0].iov_len, xdr->len, len,
496 rqstp->rq_addr.sin_addr.s_addr);
502 * Report socket names for nfsdfs
504 static int one_sock_name(char *buf, struct svc_sock *svsk)
508 switch(svsk->sk_sk->sk_family) {
510 len = sprintf(buf, "ipv4 %s %u.%u.%u.%u %d\n",
511 svsk->sk_sk->sk_protocol==IPPROTO_UDP?
513 NIPQUAD(inet_sk(svsk->sk_sk)->rcv_saddr),
514 inet_sk(svsk->sk_sk)->num);
517 len = sprintf(buf, "*unknown-%d*\n",
518 svsk->sk_sk->sk_family);
524 svc_sock_names(char *buf, struct svc_serv *serv, char *toclose)
526 struct svc_sock *svsk, *closesk = NULL;
531 spin_lock_bh(&serv->sv_lock);
532 list_for_each_entry(svsk, &serv->sv_permsocks, sk_list) {
533 int onelen = one_sock_name(buf+len, svsk);
534 if (toclose && strcmp(toclose, buf+len) == 0)
539 spin_unlock_bh(&serv->sv_lock);
541 /* Should unregister with portmap, but you cannot
542 * unregister just one protocol...
544 svc_close_socket(closesk);
549 EXPORT_SYMBOL(svc_sock_names);
552 * Check input queue length
555 svc_recv_available(struct svc_sock *svsk)
557 struct socket *sock = svsk->sk_sock;
560 err = kernel_sock_ioctl(sock, TIOCINQ, (unsigned long) &avail);
562 return (err >= 0)? avail : err;
566 * Generic recvfrom routine.
569 svc_recvfrom(struct svc_rqst *rqstp, struct kvec *iov, int nr, int buflen)
571 struct svc_sock *svsk = rqstp->rq_sock;
576 rqstp->rq_addrlen = sizeof(rqstp->rq_addr);
577 sock = svsk->sk_sock;
579 msg.msg_name = &rqstp->rq_addr;
580 msg.msg_namelen = sizeof(rqstp->rq_addr);
581 msg.msg_control = NULL;
582 msg.msg_controllen = 0;
584 msg.msg_flags = MSG_DONTWAIT;
586 len = kernel_recvmsg(sock, &msg, iov, nr, buflen, MSG_DONTWAIT);
588 /* sock_recvmsg doesn't fill in the name/namelen, so we must..
590 memcpy(&rqstp->rq_addr, &svsk->sk_remote, svsk->sk_remotelen);
591 rqstp->rq_addrlen = svsk->sk_remotelen;
593 dprintk("svc: socket %p recvfrom(%p, %Zu) = %d\n",
594 rqstp->rq_sock, iov[0].iov_base, iov[0].iov_len, len);
600 * Set socket snd and rcv buffer lengths
603 svc_sock_setbufsize(struct socket *sock, unsigned int snd, unsigned int rcv)
607 oldfs = get_fs(); set_fs(KERNEL_DS);
608 sock_setsockopt(sock, SOL_SOCKET, SO_SNDBUF,
609 (char*)&snd, sizeof(snd));
610 sock_setsockopt(sock, SOL_SOCKET, SO_RCVBUF,
611 (char*)&rcv, sizeof(rcv));
613 /* sock_setsockopt limits use to sysctl_?mem_max,
614 * which isn't acceptable. Until that is made conditional
615 * on not having CAP_SYS_RESOURCE or similar, we go direct...
616 * DaveM said I could!
619 sock->sk->sk_sndbuf = snd * 2;
620 sock->sk->sk_rcvbuf = rcv * 2;
621 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK|SOCK_RCVBUF_LOCK;
622 release_sock(sock->sk);
626 * INET callback when data has been received on the socket.
629 svc_udp_data_ready(struct sock *sk, int count)
631 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
634 dprintk("svc: socket %p(inet %p), count=%d, busy=%d\n",
635 svsk, sk, count, test_bit(SK_BUSY, &svsk->sk_flags));
636 set_bit(SK_DATA, &svsk->sk_flags);
637 svc_sock_enqueue(svsk);
639 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
640 wake_up_interruptible(sk->sk_sleep);
644 * INET callback when space is newly available on the socket.
647 svc_write_space(struct sock *sk)
649 struct svc_sock *svsk = (struct svc_sock *)(sk->sk_user_data);
652 dprintk("svc: socket %p(inet %p), write_space busy=%d\n",
653 svsk, sk, test_bit(SK_BUSY, &svsk->sk_flags));
654 svc_sock_enqueue(svsk);
657 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) {
658 dprintk("RPC svc_write_space: someone sleeping on %p\n",
660 wake_up_interruptible(sk->sk_sleep);
665 * Receive a datagram from a UDP socket.
668 svc_udp_recvfrom(struct svc_rqst *rqstp)
670 struct svc_sock *svsk = rqstp->rq_sock;
671 struct svc_serv *serv = svsk->sk_server;
675 if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags))
676 /* udp sockets need large rcvbuf as all pending
677 * requests are still in that buffer. sndbuf must
678 * also be large enough that there is enough space
679 * for one reply per thread. We count all threads
680 * rather than threads in a particular pool, which
681 * provides an upper bound on the number of threads
682 * which will access the socket.
684 svc_sock_setbufsize(svsk->sk_sock,
685 (serv->sv_nrthreads+3) * serv->sv_max_mesg,
686 (serv->sv_nrthreads+3) * serv->sv_max_mesg);
688 if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) {
689 svc_sock_received(svsk);
690 return svc_deferred_recv(rqstp);
693 if (test_bit(SK_CLOSE, &svsk->sk_flags)) {
694 svc_delete_socket(svsk);
698 clear_bit(SK_DATA, &svsk->sk_flags);
699 while ((skb = skb_recv_datagram(svsk->sk_sk, 0, 1, &err)) == NULL) {
700 if (err == -EAGAIN) {
701 svc_sock_received(svsk);
704 /* possibly an icmp error */
705 dprintk("svc: recvfrom returned error %d\n", -err);
707 if (skb->tstamp.off_sec == 0) {
710 tv.tv_sec = xtime.tv_sec;
711 tv.tv_usec = xtime.tv_nsec / NSEC_PER_USEC;
712 skb_set_timestamp(skb, &tv);
713 /* Don't enable netstamp, sunrpc doesn't
714 need that much accuracy */
716 skb_get_timestamp(skb, &svsk->sk_sk->sk_stamp);
717 set_bit(SK_DATA, &svsk->sk_flags); /* there may be more data... */
720 * Maybe more packets - kick another thread ASAP.
722 svc_sock_received(svsk);
724 len = skb->len - sizeof(struct udphdr);
725 rqstp->rq_arg.len = len;
727 rqstp->rq_prot = IPPROTO_UDP;
729 /* Get sender address */
730 rqstp->rq_addr.sin_family = AF_INET;
731 rqstp->rq_addr.sin_port = skb->h.uh->source;
732 rqstp->rq_addr.sin_addr.s_addr = skb->nh.iph->saddr;
733 rqstp->rq_daddr = skb->nh.iph->daddr;
735 if (skb_is_nonlinear(skb)) {
736 /* we have to copy */
738 if (csum_partial_copy_to_xdr(&rqstp->rq_arg, skb)) {
741 skb_free_datagram(svsk->sk_sk, skb);
745 skb_free_datagram(svsk->sk_sk, skb);
747 /* we can use it in-place */
748 rqstp->rq_arg.head[0].iov_base = skb->data + sizeof(struct udphdr);
749 rqstp->rq_arg.head[0].iov_len = len;
750 if (skb_checksum_complete(skb)) {
751 skb_free_datagram(svsk->sk_sk, skb);
754 rqstp->rq_skbuff = skb;
757 rqstp->rq_arg.page_base = 0;
758 if (len <= rqstp->rq_arg.head[0].iov_len) {
759 rqstp->rq_arg.head[0].iov_len = len;
760 rqstp->rq_arg.page_len = 0;
761 rqstp->rq_respages = rqstp->rq_pages+1;
763 rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len;
764 rqstp->rq_respages = rqstp->rq_pages + 1 +
765 (rqstp->rq_arg.page_len + PAGE_SIZE - 1)/ PAGE_SIZE;
769 serv->sv_stats->netudpcnt++;
775 svc_udp_sendto(struct svc_rqst *rqstp)
779 error = svc_sendto(rqstp, &rqstp->rq_res);
780 if (error == -ECONNREFUSED)
781 /* ICMP error on earlier request. */
782 error = svc_sendto(rqstp, &rqstp->rq_res);
788 svc_udp_init(struct svc_sock *svsk)
790 svsk->sk_sk->sk_data_ready = svc_udp_data_ready;
791 svsk->sk_sk->sk_write_space = svc_write_space;
792 svsk->sk_recvfrom = svc_udp_recvfrom;
793 svsk->sk_sendto = svc_udp_sendto;
795 /* initialise setting must have enough space to
796 * receive and respond to one request.
797 * svc_udp_recvfrom will re-adjust if necessary
799 svc_sock_setbufsize(svsk->sk_sock,
800 3 * svsk->sk_server->sv_max_mesg,
801 3 * svsk->sk_server->sv_max_mesg);
803 set_bit(SK_DATA, &svsk->sk_flags); /* might have come in before data_ready set up */
804 set_bit(SK_CHNGBUF, &svsk->sk_flags);
808 * A data_ready event on a listening socket means there's a connection
809 * pending. Do not use state_change as a substitute for it.
812 svc_tcp_listen_data_ready(struct sock *sk, int count_unused)
814 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
816 dprintk("svc: socket %p TCP (listen) state change %d\n",
820 * This callback may called twice when a new connection
821 * is established as a child socket inherits everything
822 * from a parent LISTEN socket.
823 * 1) data_ready method of the parent socket will be called
824 * when one of child sockets become ESTABLISHED.
825 * 2) data_ready method of the child socket may be called
826 * when it receives data before the socket is accepted.
827 * In case of 2, we should ignore it silently.
829 if (sk->sk_state == TCP_LISTEN) {
831 set_bit(SK_CONN, &svsk->sk_flags);
832 svc_sock_enqueue(svsk);
834 printk("svc: socket %p: no user data\n", sk);
837 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
838 wake_up_interruptible_all(sk->sk_sleep);
842 * A state change on a connected socket means it's dying or dead.
845 svc_tcp_state_change(struct sock *sk)
847 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
849 dprintk("svc: socket %p TCP (connected) state change %d (svsk %p)\n",
850 sk, sk->sk_state, sk->sk_user_data);
853 printk("svc: socket %p: no user data\n", sk);
855 set_bit(SK_CLOSE, &svsk->sk_flags);
856 svc_sock_enqueue(svsk);
858 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
859 wake_up_interruptible_all(sk->sk_sleep);
863 svc_tcp_data_ready(struct sock *sk, int count)
865 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
867 dprintk("svc: socket %p TCP data ready (svsk %p)\n",
868 sk, sk->sk_user_data);
870 set_bit(SK_DATA, &svsk->sk_flags);
871 svc_sock_enqueue(svsk);
873 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
874 wake_up_interruptible(sk->sk_sleep);
878 * Accept a TCP connection
881 svc_tcp_accept(struct svc_sock *svsk)
883 struct sockaddr_in sin;
884 struct svc_serv *serv = svsk->sk_server;
885 struct socket *sock = svsk->sk_sock;
886 struct socket *newsock;
887 struct svc_sock *newsvsk;
890 dprintk("svc: tcp_accept %p sock %p\n", svsk, sock);
894 clear_bit(SK_CONN, &svsk->sk_flags);
895 err = kernel_accept(sock, &newsock, O_NONBLOCK);
898 printk(KERN_WARNING "%s: no more sockets!\n",
900 else if (err != -EAGAIN && net_ratelimit())
901 printk(KERN_WARNING "%s: accept failed (err %d)!\n",
902 serv->sv_name, -err);
906 set_bit(SK_CONN, &svsk->sk_flags);
907 svc_sock_enqueue(svsk);
910 err = kernel_getpeername(newsock, (struct sockaddr *) &sin, &slen);
913 printk(KERN_WARNING "%s: peername failed (err %d)!\n",
914 serv->sv_name, -err);
915 goto failed; /* aborted connection or whatever */
918 /* Ideally, we would want to reject connections from unauthorized
919 * hosts here, but when we get encription, the IP of the host won't
920 * tell us anything. For now just warn about unpriv connections.
922 if (ntohs(sin.sin_port) >= 1024) {
924 "%s: connect from unprivileged port: %u.%u.%u.%u:%d\n",
926 NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port));
929 dprintk("%s: connect from %u.%u.%u.%u:%04x\n", serv->sv_name,
930 NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port));
932 /* make sure that a write doesn't block forever when
935 newsock->sk->sk_sndtimeo = HZ*30;
937 if (!(newsvsk = svc_setup_socket(serv, newsock, &err,
938 (SVC_SOCK_ANONYMOUS | SVC_SOCK_TEMPORARY))))
940 memcpy(&newsvsk->sk_remote, &sin, slen);
941 newsvsk->sk_remotelen = slen;
943 svc_sock_received(newsvsk);
945 /* make sure that we don't have too many active connections.
946 * If we have, something must be dropped.
948 * There's no point in trying to do random drop here for
949 * DoS prevention. The NFS clients does 1 reconnect in 15
950 * seconds. An attacker can easily beat that.
952 * The only somewhat efficient mechanism would be if drop
953 * old connections from the same IP first. But right now
954 * we don't even record the client IP in svc_sock.
956 if (serv->sv_tmpcnt > (serv->sv_nrthreads+3)*20) {
957 struct svc_sock *svsk = NULL;
958 spin_lock_bh(&serv->sv_lock);
959 if (!list_empty(&serv->sv_tempsocks)) {
960 if (net_ratelimit()) {
961 /* Try to help the admin */
962 printk(KERN_NOTICE "%s: too many open TCP "
963 "sockets, consider increasing the "
964 "number of nfsd threads\n",
966 printk(KERN_NOTICE "%s: last TCP connect from "
969 NIPQUAD(sin.sin_addr.s_addr),
970 ntohs(sin.sin_port));
973 * Always select the oldest socket. It's not fair,
976 svsk = list_entry(serv->sv_tempsocks.prev,
979 set_bit(SK_CLOSE, &svsk->sk_flags);
980 atomic_inc(&svsk->sk_inuse);
982 spin_unlock_bh(&serv->sv_lock);
985 svc_sock_enqueue(svsk);
992 serv->sv_stats->nettcpconn++;
997 sock_release(newsock);
1002 * Receive data from a TCP socket.
1005 svc_tcp_recvfrom(struct svc_rqst *rqstp)
1007 struct svc_sock *svsk = rqstp->rq_sock;
1008 struct svc_serv *serv = svsk->sk_server;
1013 dprintk("svc: tcp_recv %p data %d conn %d close %d\n",
1014 svsk, test_bit(SK_DATA, &svsk->sk_flags),
1015 test_bit(SK_CONN, &svsk->sk_flags),
1016 test_bit(SK_CLOSE, &svsk->sk_flags));
1018 if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) {
1019 svc_sock_received(svsk);
1020 return svc_deferred_recv(rqstp);
1023 if (test_bit(SK_CLOSE, &svsk->sk_flags)) {
1024 svc_delete_socket(svsk);
1028 if (svsk->sk_sk->sk_state == TCP_LISTEN) {
1029 svc_tcp_accept(svsk);
1030 svc_sock_received(svsk);
1034 if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags))
1035 /* sndbuf needs to have room for one request
1036 * per thread, otherwise we can stall even when the
1037 * network isn't a bottleneck.
1039 * We count all threads rather than threads in a
1040 * particular pool, which provides an upper bound
1041 * on the number of threads which will access the socket.
1043 * rcvbuf just needs to be able to hold a few requests.
1044 * Normally they will be removed from the queue
1045 * as soon a a complete request arrives.
1047 svc_sock_setbufsize(svsk->sk_sock,
1048 (serv->sv_nrthreads+3) * serv->sv_max_mesg,
1049 3 * serv->sv_max_mesg);
1051 clear_bit(SK_DATA, &svsk->sk_flags);
1053 /* Receive data. If we haven't got the record length yet, get
1054 * the next four bytes. Otherwise try to gobble up as much as
1055 * possible up to the complete record length.
1057 if (svsk->sk_tcplen < 4) {
1058 unsigned long want = 4 - svsk->sk_tcplen;
1061 iov.iov_base = ((char *) &svsk->sk_reclen) + svsk->sk_tcplen;
1063 if ((len = svc_recvfrom(rqstp, &iov, 1, want)) < 0)
1065 svsk->sk_tcplen += len;
1068 dprintk("svc: short recvfrom while reading record length (%d of %lu)\n",
1070 svc_sock_received(svsk);
1071 return -EAGAIN; /* record header not complete */
1074 svsk->sk_reclen = ntohl(svsk->sk_reclen);
1075 if (!(svsk->sk_reclen & 0x80000000)) {
1076 /* FIXME: technically, a record can be fragmented,
1077 * and non-terminal fragments will not have the top
1078 * bit set in the fragment length header.
1079 * But apparently no known nfs clients send fragmented
1081 if (net_ratelimit())
1082 printk(KERN_NOTICE "RPC: bad TCP reclen 0x%08lx"
1083 " (non-terminal)\n",
1084 (unsigned long) svsk->sk_reclen);
1087 svsk->sk_reclen &= 0x7fffffff;
1088 dprintk("svc: TCP record, %d bytes\n", svsk->sk_reclen);
1089 if (svsk->sk_reclen > serv->sv_max_mesg) {
1090 if (net_ratelimit())
1091 printk(KERN_NOTICE "RPC: bad TCP reclen 0x%08lx"
1093 (unsigned long) svsk->sk_reclen);
1098 /* Check whether enough data is available */
1099 len = svc_recv_available(svsk);
1103 if (len < svsk->sk_reclen) {
1104 dprintk("svc: incomplete TCP record (%d of %d)\n",
1105 len, svsk->sk_reclen);
1106 svc_sock_received(svsk);
1107 return -EAGAIN; /* record not complete */
1109 len = svsk->sk_reclen;
1110 set_bit(SK_DATA, &svsk->sk_flags);
1112 vec = rqstp->rq_vec;
1113 vec[0] = rqstp->rq_arg.head[0];
1116 while (vlen < len) {
1117 vec[pnum].iov_base = page_address(rqstp->rq_pages[pnum]);
1118 vec[pnum].iov_len = PAGE_SIZE;
1122 rqstp->rq_respages = &rqstp->rq_pages[pnum];
1124 /* Now receive data */
1125 len = svc_recvfrom(rqstp, vec, pnum, len);
1129 dprintk("svc: TCP complete record (%d bytes)\n", len);
1130 rqstp->rq_arg.len = len;
1131 rqstp->rq_arg.page_base = 0;
1132 if (len <= rqstp->rq_arg.head[0].iov_len) {
1133 rqstp->rq_arg.head[0].iov_len = len;
1134 rqstp->rq_arg.page_len = 0;
1136 rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len;
1139 rqstp->rq_skbuff = NULL;
1140 rqstp->rq_prot = IPPROTO_TCP;
1142 /* Reset TCP read info */
1143 svsk->sk_reclen = 0;
1144 svsk->sk_tcplen = 0;
1146 svc_sock_received(svsk);
1148 serv->sv_stats->nettcpcnt++;
1153 svc_delete_socket(svsk);
1157 if (len == -EAGAIN) {
1158 dprintk("RPC: TCP recvfrom got EAGAIN\n");
1159 svc_sock_received(svsk);
1161 printk(KERN_NOTICE "%s: recvfrom returned errno %d\n",
1162 svsk->sk_server->sv_name, -len);
1170 * Send out data on TCP socket.
1173 svc_tcp_sendto(struct svc_rqst *rqstp)
1175 struct xdr_buf *xbufp = &rqstp->rq_res;
1179 /* Set up the first element of the reply kvec.
1180 * Any other kvecs that may be in use have been taken
1181 * care of by the server implementation itself.
1183 reclen = htonl(0x80000000|((xbufp->len ) - 4));
1184 memcpy(xbufp->head[0].iov_base, &reclen, 4);
1186 if (test_bit(SK_DEAD, &rqstp->rq_sock->sk_flags))
1189 sent = svc_sendto(rqstp, &rqstp->rq_res);
1190 if (sent != xbufp->len) {
1191 printk(KERN_NOTICE "rpc-srv/tcp: %s: %s %d when sending %d bytes - shutting down socket\n",
1192 rqstp->rq_sock->sk_server->sv_name,
1193 (sent<0)?"got error":"sent only",
1195 set_bit(SK_CLOSE, &rqstp->rq_sock->sk_flags);
1196 svc_sock_enqueue(rqstp->rq_sock);
1203 svc_tcp_init(struct svc_sock *svsk)
1205 struct sock *sk = svsk->sk_sk;
1206 struct tcp_sock *tp = tcp_sk(sk);
1208 svsk->sk_recvfrom = svc_tcp_recvfrom;
1209 svsk->sk_sendto = svc_tcp_sendto;
1211 if (sk->sk_state == TCP_LISTEN) {
1212 dprintk("setting up TCP socket for listening\n");
1213 sk->sk_data_ready = svc_tcp_listen_data_ready;
1214 set_bit(SK_CONN, &svsk->sk_flags);
1216 dprintk("setting up TCP socket for reading\n");
1217 sk->sk_state_change = svc_tcp_state_change;
1218 sk->sk_data_ready = svc_tcp_data_ready;
1219 sk->sk_write_space = svc_write_space;
1221 svsk->sk_reclen = 0;
1222 svsk->sk_tcplen = 0;
1224 tp->nonagle = 1; /* disable Nagle's algorithm */
1226 /* initialise setting must have enough space to
1227 * receive and respond to one request.
1228 * svc_tcp_recvfrom will re-adjust if necessary
1230 svc_sock_setbufsize(svsk->sk_sock,
1231 3 * svsk->sk_server->sv_max_mesg,
1232 3 * svsk->sk_server->sv_max_mesg);
1234 set_bit(SK_CHNGBUF, &svsk->sk_flags);
1235 set_bit(SK_DATA, &svsk->sk_flags);
1236 if (sk->sk_state != TCP_ESTABLISHED)
1237 set_bit(SK_CLOSE, &svsk->sk_flags);
1242 svc_sock_update_bufs(struct svc_serv *serv)
1245 * The number of server threads has changed. Update
1246 * rcvbuf and sndbuf accordingly on all sockets
1248 struct list_head *le;
1250 spin_lock_bh(&serv->sv_lock);
1251 list_for_each(le, &serv->sv_permsocks) {
1252 struct svc_sock *svsk =
1253 list_entry(le, struct svc_sock, sk_list);
1254 set_bit(SK_CHNGBUF, &svsk->sk_flags);
1256 list_for_each(le, &serv->sv_tempsocks) {
1257 struct svc_sock *svsk =
1258 list_entry(le, struct svc_sock, sk_list);
1259 set_bit(SK_CHNGBUF, &svsk->sk_flags);
1261 spin_unlock_bh(&serv->sv_lock);
1265 * Receive the next request on any socket. This code is carefully
1266 * organised not to touch any cachelines in the shared svc_serv
1267 * structure, only cachelines in the local svc_pool.
1270 svc_recv(struct svc_rqst *rqstp, long timeout)
1272 struct svc_sock *svsk =NULL;
1273 struct svc_serv *serv = rqstp->rq_server;
1274 struct svc_pool *pool = rqstp->rq_pool;
1277 struct xdr_buf *arg;
1278 DECLARE_WAITQUEUE(wait, current);
1280 dprintk("svc: server %p waiting for data (to = %ld)\n",
1285 "svc_recv: service %p, socket not NULL!\n",
1287 if (waitqueue_active(&rqstp->rq_wait))
1289 "svc_recv: service %p, wait queue active!\n",
1293 /* now allocate needed pages. If we get a failure, sleep briefly */
1294 pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE;
1295 for (i=0; i < pages ; i++)
1296 while (rqstp->rq_pages[i] == NULL) {
1297 struct page *p = alloc_page(GFP_KERNEL);
1299 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
1300 rqstp->rq_pages[i] = p;
1302 rqstp->rq_pages[i++] = NULL; /* this might be seen in nfs_read_actor */
1303 BUG_ON(pages >= RPCSVC_MAXPAGES);
1305 /* Make arg->head point to first page and arg->pages point to rest */
1306 arg = &rqstp->rq_arg;
1307 arg->head[0].iov_base = page_address(rqstp->rq_pages[0]);
1308 arg->head[0].iov_len = PAGE_SIZE;
1309 arg->pages = rqstp->rq_pages + 1;
1311 /* save at least one page for response */
1312 arg->page_len = (pages-2)*PAGE_SIZE;
1313 arg->len = (pages-1)*PAGE_SIZE;
1314 arg->tail[0].iov_len = 0;
1321 spin_lock_bh(&pool->sp_lock);
1322 if ((svsk = svc_sock_dequeue(pool)) != NULL) {
1323 rqstp->rq_sock = svsk;
1324 atomic_inc(&svsk->sk_inuse);
1325 rqstp->rq_reserved = serv->sv_max_mesg;
1326 atomic_add(rqstp->rq_reserved, &svsk->sk_reserved);
1328 /* No data pending. Go to sleep */
1329 svc_thread_enqueue(pool, rqstp);
1332 * We have to be able to interrupt this wait
1333 * to bring down the daemons ...
1335 set_current_state(TASK_INTERRUPTIBLE);
1336 add_wait_queue(&rqstp->rq_wait, &wait);
1337 spin_unlock_bh(&pool->sp_lock);
1339 schedule_timeout(timeout);
1343 spin_lock_bh(&pool->sp_lock);
1344 remove_wait_queue(&rqstp->rq_wait, &wait);
1346 if (!(svsk = rqstp->rq_sock)) {
1347 svc_thread_dequeue(pool, rqstp);
1348 spin_unlock_bh(&pool->sp_lock);
1349 dprintk("svc: server %p, no data yet\n", rqstp);
1350 return signalled()? -EINTR : -EAGAIN;
1353 spin_unlock_bh(&pool->sp_lock);
1355 dprintk("svc: server %p, pool %u, socket %p, inuse=%d\n",
1356 rqstp, pool->sp_id, svsk, atomic_read(&svsk->sk_inuse));
1357 len = svsk->sk_recvfrom(rqstp);
1358 dprintk("svc: got len=%d\n", len);
1360 /* No data, incomplete (TCP) read, or accept() */
1361 if (len == 0 || len == -EAGAIN) {
1362 rqstp->rq_res.len = 0;
1363 svc_sock_release(rqstp);
1366 svsk->sk_lastrecv = get_seconds();
1367 clear_bit(SK_OLD, &svsk->sk_flags);
1369 rqstp->rq_secure = ntohs(rqstp->rq_addr.sin_port) < 1024;
1370 rqstp->rq_chandle.defer = svc_defer;
1373 serv->sv_stats->netcnt++;
1381 svc_drop(struct svc_rqst *rqstp)
1383 dprintk("svc: socket %p dropped request\n", rqstp->rq_sock);
1384 svc_sock_release(rqstp);
1388 * Return reply to client.
1391 svc_send(struct svc_rqst *rqstp)
1393 struct svc_sock *svsk;
1397 if ((svsk = rqstp->rq_sock) == NULL) {
1398 printk(KERN_WARNING "NULL socket pointer in %s:%d\n",
1399 __FILE__, __LINE__);
1403 /* release the receive skb before sending the reply */
1404 svc_release_skb(rqstp);
1406 /* calculate over-all length */
1407 xb = & rqstp->rq_res;
1408 xb->len = xb->head[0].iov_len +
1410 xb->tail[0].iov_len;
1412 /* Grab svsk->sk_mutex to serialize outgoing data. */
1413 mutex_lock(&svsk->sk_mutex);
1414 if (test_bit(SK_DEAD, &svsk->sk_flags))
1417 len = svsk->sk_sendto(rqstp);
1418 mutex_unlock(&svsk->sk_mutex);
1419 svc_sock_release(rqstp);
1421 if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN)
1427 * Timer function to close old temporary sockets, using
1428 * a mark-and-sweep algorithm.
1431 svc_age_temp_sockets(unsigned long closure)
1433 struct svc_serv *serv = (struct svc_serv *)closure;
1434 struct svc_sock *svsk;
1435 struct list_head *le, *next;
1436 LIST_HEAD(to_be_aged);
1438 dprintk("svc_age_temp_sockets\n");
1440 if (!spin_trylock_bh(&serv->sv_lock)) {
1441 /* busy, try again 1 sec later */
1442 dprintk("svc_age_temp_sockets: busy\n");
1443 mod_timer(&serv->sv_temptimer, jiffies + HZ);
1447 list_for_each_safe(le, next, &serv->sv_tempsocks) {
1448 svsk = list_entry(le, struct svc_sock, sk_list);
1450 if (!test_and_set_bit(SK_OLD, &svsk->sk_flags))
1452 if (atomic_read(&svsk->sk_inuse) || test_bit(SK_BUSY, &svsk->sk_flags))
1454 atomic_inc(&svsk->sk_inuse);
1455 list_move(le, &to_be_aged);
1456 set_bit(SK_CLOSE, &svsk->sk_flags);
1457 set_bit(SK_DETACHED, &svsk->sk_flags);
1459 spin_unlock_bh(&serv->sv_lock);
1461 while (!list_empty(&to_be_aged)) {
1462 le = to_be_aged.next;
1463 /* fiddling the sk_list node is safe 'cos we're SK_DETACHED */
1465 svsk = list_entry(le, struct svc_sock, sk_list);
1467 dprintk("queuing svsk %p for closing, %lu seconds old\n",
1468 svsk, get_seconds() - svsk->sk_lastrecv);
1470 /* a thread will dequeue and close it soon */
1471 svc_sock_enqueue(svsk);
1475 mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ);
1479 * Initialize socket for RPC use and create svc_sock struct
1480 * XXX: May want to setsockopt SO_SNDBUF and SO_RCVBUF.
1482 static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
1483 struct socket *sock,
1484 int *errp, int flags)
1486 struct svc_sock *svsk;
1488 int pmap_register = !(flags & SVC_SOCK_ANONYMOUS);
1489 int is_temporary = flags & SVC_SOCK_TEMPORARY;
1491 dprintk("svc: svc_setup_socket %p\n", sock);
1492 if (!(svsk = kzalloc(sizeof(*svsk), GFP_KERNEL))) {
1499 /* Register socket with portmapper */
1500 if (*errp >= 0 && pmap_register)
1501 *errp = svc_register(serv, inet->sk_protocol,
1502 ntohs(inet_sk(inet)->sport));
1509 set_bit(SK_BUSY, &svsk->sk_flags);
1510 inet->sk_user_data = svsk;
1511 svsk->sk_sock = sock;
1513 svsk->sk_ostate = inet->sk_state_change;
1514 svsk->sk_odata = inet->sk_data_ready;
1515 svsk->sk_owspace = inet->sk_write_space;
1516 svsk->sk_server = serv;
1517 atomic_set(&svsk->sk_inuse, 1);
1518 svsk->sk_lastrecv = get_seconds();
1519 spin_lock_init(&svsk->sk_defer_lock);
1520 INIT_LIST_HEAD(&svsk->sk_deferred);
1521 INIT_LIST_HEAD(&svsk->sk_ready);
1522 mutex_init(&svsk->sk_mutex);
1524 /* Initialize the socket */
1525 if (sock->type == SOCK_DGRAM)
1530 spin_lock_bh(&serv->sv_lock);
1532 set_bit(SK_TEMP, &svsk->sk_flags);
1533 list_add(&svsk->sk_list, &serv->sv_tempsocks);
1535 if (serv->sv_temptimer.function == NULL) {
1536 /* setup timer to age temp sockets */
1537 setup_timer(&serv->sv_temptimer, svc_age_temp_sockets,
1538 (unsigned long)serv);
1539 mod_timer(&serv->sv_temptimer,
1540 jiffies + svc_conn_age_period * HZ);
1543 clear_bit(SK_TEMP, &svsk->sk_flags);
1544 list_add(&svsk->sk_list, &serv->sv_permsocks);
1546 spin_unlock_bh(&serv->sv_lock);
1548 dprintk("svc: svc_setup_socket created %p (inet %p)\n",
1554 int svc_addsock(struct svc_serv *serv,
1560 struct socket *so = sockfd_lookup(fd, &err);
1561 struct svc_sock *svsk = NULL;
1565 if (so->sk->sk_family != AF_INET)
1566 err = -EAFNOSUPPORT;
1567 else if (so->sk->sk_protocol != IPPROTO_TCP &&
1568 so->sk->sk_protocol != IPPROTO_UDP)
1569 err = -EPROTONOSUPPORT;
1570 else if (so->state > SS_UNCONNECTED)
1573 svsk = svc_setup_socket(serv, so, &err, SVC_SOCK_DEFAULTS);
1575 svc_sock_received(svsk);
1583 if (proto) *proto = so->sk->sk_protocol;
1584 return one_sock_name(name_return, svsk);
1586 EXPORT_SYMBOL_GPL(svc_addsock);
1589 * Create socket for RPC service.
1591 static int svc_create_socket(struct svc_serv *serv, int protocol,
1592 struct sockaddr_in *sin, int flags)
1594 struct svc_sock *svsk;
1595 struct socket *sock;
1599 dprintk("svc: svc_create_socket(%s, %d, %u.%u.%u.%u:%d)\n",
1600 serv->sv_program->pg_name, protocol,
1601 NIPQUAD(sin->sin_addr.s_addr),
1602 ntohs(sin->sin_port));
1604 if (protocol != IPPROTO_UDP && protocol != IPPROTO_TCP) {
1605 printk(KERN_WARNING "svc: only UDP and TCP "
1606 "sockets supported\n");
1609 type = (protocol == IPPROTO_UDP)? SOCK_DGRAM : SOCK_STREAM;
1611 if ((error = sock_create_kern(PF_INET, type, protocol, &sock)) < 0)
1614 svc_reclassify_socket(sock);
1616 if (type == SOCK_STREAM)
1617 sock->sk->sk_reuse = 1; /* allow address reuse */
1618 error = kernel_bind(sock, (struct sockaddr *) sin,
1623 if (protocol == IPPROTO_TCP) {
1624 if ((error = kernel_listen(sock, 64)) < 0)
1628 if ((svsk = svc_setup_socket(serv, sock, &error, flags)) != NULL) {
1629 svc_sock_received(svsk);
1630 return ntohs(inet_sk(svsk->sk_sk)->sport);
1634 dprintk("svc: svc_create_socket error = %d\n", -error);
1640 * Remove a dead socket
1643 svc_delete_socket(struct svc_sock *svsk)
1645 struct svc_serv *serv;
1648 dprintk("svc: svc_delete_socket(%p)\n", svsk);
1650 serv = svsk->sk_server;
1653 sk->sk_state_change = svsk->sk_ostate;
1654 sk->sk_data_ready = svsk->sk_odata;
1655 sk->sk_write_space = svsk->sk_owspace;
1657 spin_lock_bh(&serv->sv_lock);
1659 if (!test_and_set_bit(SK_DETACHED, &svsk->sk_flags))
1660 list_del_init(&svsk->sk_list);
1662 * We used to delete the svc_sock from whichever list
1663 * it's sk_ready node was on, but we don't actually
1664 * need to. This is because the only time we're called
1665 * while still attached to a queue, the queue itself
1666 * is about to be destroyed (in svc_destroy).
1668 if (!test_and_set_bit(SK_DEAD, &svsk->sk_flags)) {
1669 BUG_ON(atomic_read(&svsk->sk_inuse)<2);
1670 atomic_dec(&svsk->sk_inuse);
1671 if (test_bit(SK_TEMP, &svsk->sk_flags))
1675 spin_unlock_bh(&serv->sv_lock);
1678 void svc_close_socket(struct svc_sock *svsk)
1680 set_bit(SK_CLOSE, &svsk->sk_flags);
1681 if (test_and_set_bit(SK_BUSY, &svsk->sk_flags))
1682 /* someone else will have to effect the close */
1685 atomic_inc(&svsk->sk_inuse);
1686 svc_delete_socket(svsk);
1687 clear_bit(SK_BUSY, &svsk->sk_flags);
1692 * svc_makesock - Make a socket for nfsd and lockd
1693 * @serv: RPC server structure
1694 * @protocol: transport protocol to use
1695 * @port: port to use
1696 * @flags: requested socket characteristics
1699 int svc_makesock(struct svc_serv *serv, int protocol, unsigned short port,
1702 struct sockaddr_in sin = {
1703 .sin_family = AF_INET,
1704 .sin_addr.s_addr = INADDR_ANY,
1705 .sin_port = htons(port),
1708 dprintk("svc: creating socket proto = %d\n", protocol);
1709 return svc_create_socket(serv, protocol, &sin, flags);
1713 * Handle defer and revisit of requests
1716 static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
1718 struct svc_deferred_req *dr = container_of(dreq, struct svc_deferred_req, handle);
1719 struct svc_sock *svsk;
1722 svc_sock_put(dr->svsk);
1726 dprintk("revisit queued\n");
1729 spin_lock_bh(&svsk->sk_defer_lock);
1730 list_add(&dr->handle.recent, &svsk->sk_deferred);
1731 spin_unlock_bh(&svsk->sk_defer_lock);
1732 set_bit(SK_DEFERRED, &svsk->sk_flags);
1733 svc_sock_enqueue(svsk);
1737 static struct cache_deferred_req *
1738 svc_defer(struct cache_req *req)
1740 struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle);
1741 int size = sizeof(struct svc_deferred_req) + (rqstp->rq_arg.len);
1742 struct svc_deferred_req *dr;
1744 if (rqstp->rq_arg.page_len)
1745 return NULL; /* if more than a page, give up FIXME */
1746 if (rqstp->rq_deferred) {
1747 dr = rqstp->rq_deferred;
1748 rqstp->rq_deferred = NULL;
1750 int skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len;
1751 /* FIXME maybe discard if size too large */
1752 dr = kmalloc(size, GFP_KERNEL);
1756 dr->handle.owner = rqstp->rq_server;
1757 dr->prot = rqstp->rq_prot;
1758 dr->addr = rqstp->rq_addr;
1759 dr->daddr = rqstp->rq_daddr;
1760 dr->argslen = rqstp->rq_arg.len >> 2;
1761 memcpy(dr->args, rqstp->rq_arg.head[0].iov_base-skip, dr->argslen<<2);
1763 atomic_inc(&rqstp->rq_sock->sk_inuse);
1764 dr->svsk = rqstp->rq_sock;
1766 dr->handle.revisit = svc_revisit;
1771 * recv data from a deferred request into an active one
1773 static int svc_deferred_recv(struct svc_rqst *rqstp)
1775 struct svc_deferred_req *dr = rqstp->rq_deferred;
1777 rqstp->rq_arg.head[0].iov_base = dr->args;
1778 rqstp->rq_arg.head[0].iov_len = dr->argslen<<2;
1779 rqstp->rq_arg.page_len = 0;
1780 rqstp->rq_arg.len = dr->argslen<<2;
1781 rqstp->rq_prot = dr->prot;
1782 rqstp->rq_addr = dr->addr;
1783 rqstp->rq_daddr = dr->daddr;
1784 rqstp->rq_respages = rqstp->rq_pages;
1785 return dr->argslen<<2;
1789 static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk)
1791 struct svc_deferred_req *dr = NULL;
1793 if (!test_bit(SK_DEFERRED, &svsk->sk_flags))
1795 spin_lock_bh(&svsk->sk_defer_lock);
1796 clear_bit(SK_DEFERRED, &svsk->sk_flags);
1797 if (!list_empty(&svsk->sk_deferred)) {
1798 dr = list_entry(svsk->sk_deferred.next,
1799 struct svc_deferred_req,
1801 list_del_init(&dr->handle.recent);
1802 set_bit(SK_DEFERRED, &svsk->sk_flags);
1804 spin_unlock_bh(&svsk->sk_defer_lock);