2 * IPVS An implementation of the IP virtual server support for the
3 * LINUX operating system. IPVS is now implemented as a module
4 * over the NetFilter framework. IPVS can be used to build a
5 * high-performance and highly available server based on a
8 * Version 1, is capable of handling both version 0 and 1 messages.
9 * Version 0 is the plain old format.
10 * Note Version 0 receivers will just drop Ver 1 messages.
11 * Version 1 is capable of handle IPv6, Persistence data,
12 * time-outs, and firewall marks.
13 * In ver.1 "ip_vs_sync_conn_options" will be sent in netw. order.
14 * Ver. 0 can be turned on by sysctl -w net.ipv4.vs.sync_version=0
16 * Definitions Message: is a complete datagram
17 * Sync_conn: is a part of a Message
18 * Param Data is an option to a Sync_conn.
20 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
22 * ip_vs_sync: sync connection info from master load balancer to backups
26 * Alexandre Cassen : Added master & backup support at a time.
27 * Alexandre Cassen : Added SyncID support for incoming sync
29 * Justin Ossevoort : Fix endian problem on sync message size.
30 * Hans Schillstrom : Added Version 1: i.e. IPv6,
31 * Persistence support, fwmark and time-out.
34 #define KMSG_COMPONENT "IPVS"
35 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
37 #include <linux/module.h>
38 #include <linux/slab.h>
39 #include <linux/inetdevice.h>
40 #include <linux/net.h>
41 #include <linux/completion.h>
42 #include <linux/delay.h>
43 #include <linux/skbuff.h>
45 #include <linux/igmp.h> /* for ip_mc_join_group */
46 #include <linux/udp.h>
47 #include <linux/err.h>
48 #include <linux/kthread.h>
49 #include <linux/wait.h>
50 #include <linux/kernel.h>
52 #include <asm/unaligned.h> /* Used for ntoh_seq and hton_seq */
57 #include <net/ip_vs.h>
59 #define IP_VS_SYNC_GROUP 0xe0000051 /* multicast addr - 224.0.0.81 */
60 #define IP_VS_SYNC_PORT 8848 /* multicast port */
62 #define SYNC_PROTO_VER 1 /* Protocol version in header */
64 static struct lock_class_key __ipvs_sync_key;
66 * IPVS sync connection entry
67 * Version 0, i.e. original version.
69 struct ip_vs_sync_conn_v0 {
72 /* Protocol, addresses and port numbers */
73 __u8 protocol; /* Which protocol (TCP/UDP) */
77 __be32 caddr; /* client address */
78 __be32 vaddr; /* virtual address */
79 __be32 daddr; /* destination address */
81 /* Flags and state transition */
82 __be16 flags; /* status flags */
83 __be16 state; /* state info */
85 /* The sequence options start here */
88 struct ip_vs_sync_conn_options {
89 struct ip_vs_seq in_seq; /* incoming seq. struct */
90 struct ip_vs_seq out_seq; /* outgoing seq. struct */
94 Sync Connection format (sync_conn)
97 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
98 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
99 | Type | Protocol | Ver. | Size |
100 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
102 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
104 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
106 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
108 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
109 | timeout (in sec.) |
110 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
112 | IP-Addresses (v4 or v6) |
114 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
116 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
117 | Param. Type | Param. Length | Param. data |
118 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
120 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
121 | | Param Type | Param. Length |
122 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
124 | Last Param data should be padded for 32 bit alignment |
125 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
129 * Type 0, IPv4 sync connection format
131 struct ip_vs_sync_v4 {
133 __u8 protocol; /* Which protocol (TCP/UDP) */
134 __be16 ver_size; /* Version msb 4 bits */
135 /* Flags and state transition */
136 __be32 flags; /* status flags */
137 __be16 state; /* state info */
138 /* Protocol, addresses and port numbers */
142 __be32 fwmark; /* Firewall mark from skb */
143 __be32 timeout; /* cp timeout */
144 __be32 caddr; /* client address */
145 __be32 vaddr; /* virtual address */
146 __be32 daddr; /* destination address */
147 /* The sequence options start here */
148 /* PE data padded to 32bit alignment after seq. options */
151 * Type 2 messages IPv6
153 struct ip_vs_sync_v6 {
155 __u8 protocol; /* Which protocol (TCP/UDP) */
156 __be16 ver_size; /* Version msb 4 bits */
157 /* Flags and state transition */
158 __be32 flags; /* status flags */
159 __be16 state; /* state info */
160 /* Protocol, addresses and port numbers */
164 __be32 fwmark; /* Firewall mark from skb */
165 __be32 timeout; /* cp timeout */
166 struct in6_addr caddr; /* client address */
167 struct in6_addr vaddr; /* virtual address */
168 struct in6_addr daddr; /* destination address */
169 /* The sequence options start here */
170 /* PE data padded to 32bit alignment after seq. options */
173 union ip_vs_sync_conn {
174 struct ip_vs_sync_v4 v4;
175 struct ip_vs_sync_v6 v6;
178 /* Bits in Type field in above */
179 #define STYPE_INET6 0
180 #define STYPE_F_INET6 (1 << STYPE_INET6)
182 #define SVER_SHIFT 12 /* Shift to get version */
183 #define SVER_MASK 0x0fff /* Mask to strip version */
185 #define IPVS_OPT_SEQ_DATA 1
186 #define IPVS_OPT_PE_DATA 2
187 #define IPVS_OPT_PE_NAME 3
188 #define IPVS_OPT_PARAM 7
190 #define IPVS_OPT_F_SEQ_DATA (1 << (IPVS_OPT_SEQ_DATA-1))
191 #define IPVS_OPT_F_PE_DATA (1 << (IPVS_OPT_PE_DATA-1))
192 #define IPVS_OPT_F_PE_NAME (1 << (IPVS_OPT_PE_NAME-1))
193 #define IPVS_OPT_F_PARAM (1 << (IPVS_OPT_PARAM-1))
195 struct ip_vs_sync_thread_data {
201 /* Version 0 definition of packet sizes */
202 #define SIMPLE_CONN_SIZE (sizeof(struct ip_vs_sync_conn_v0))
203 #define FULL_CONN_SIZE \
204 (sizeof(struct ip_vs_sync_conn_v0) + sizeof(struct ip_vs_sync_conn_options))
208 The master mulitcasts messages (Datagrams) to the backup load balancers
209 in the following format.
212 Note, first byte should be Zero, so ver 0 receivers will drop the packet.
215 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
216 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
217 | 0 | SyncID | Size |
218 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
219 | Count Conns | Version | Reserved, set to Zero |
220 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
222 | IPVS Sync Connection (1) |
223 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
227 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
229 | IPVS Sync Connection (n) |
230 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
234 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
235 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
236 | Count Conns | SyncID | Size |
237 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
238 | IPVS Sync Connection (1) |
241 #define SYNC_MESG_HEADER_LEN 4
242 #define MAX_CONNS_PER_SYNCBUFF 255 /* nr_conns in ip_vs_sync_mesg is 8 bit */
244 /* Version 0 header */
245 struct ip_vs_sync_mesg_v0 {
250 /* ip_vs_sync_conn entries start here */
253 /* Version 1 header */
254 struct ip_vs_sync_mesg {
255 __u8 reserved; /* must be zero */
259 __s8 version; /* SYNC_PROTO_VER */
261 /* ip_vs_sync_conn entries start here */
264 struct ip_vs_sync_buff {
265 struct list_head list;
266 unsigned long firstuse;
268 /* pointers for the message data */
269 struct ip_vs_sync_mesg *mesg;
275 static struct sockaddr_in mcast_addr = {
276 .sin_family = AF_INET,
277 .sin_port = cpu_to_be16(IP_VS_SYNC_PORT),
278 .sin_addr.s_addr = cpu_to_be32(IP_VS_SYNC_GROUP),
282 * Copy of struct ip_vs_seq
283 * From unaligned network order to aligned host order
285 static void ntoh_seq(struct ip_vs_seq *no, struct ip_vs_seq *ho)
287 ho->init_seq = get_unaligned_be32(&no->init_seq);
288 ho->delta = get_unaligned_be32(&no->delta);
289 ho->previous_delta = get_unaligned_be32(&no->previous_delta);
293 * Copy of struct ip_vs_seq
294 * From Aligned host order to unaligned network order
296 static void hton_seq(struct ip_vs_seq *ho, struct ip_vs_seq *no)
298 put_unaligned_be32(ho->init_seq, &no->init_seq);
299 put_unaligned_be32(ho->delta, &no->delta);
300 put_unaligned_be32(ho->previous_delta, &no->previous_delta);
303 static inline struct ip_vs_sync_buff *sb_dequeue(struct netns_ipvs *ipvs)
305 struct ip_vs_sync_buff *sb;
307 spin_lock_bh(&ipvs->sync_lock);
308 if (list_empty(&ipvs->sync_queue)) {
310 __set_current_state(TASK_INTERRUPTIBLE);
312 sb = list_entry(ipvs->sync_queue.next,
313 struct ip_vs_sync_buff,
316 ipvs->sync_queue_len--;
317 if (!ipvs->sync_queue_len)
318 ipvs->sync_queue_delay = 0;
320 spin_unlock_bh(&ipvs->sync_lock);
326 * Create a new sync buffer for Version 1 proto.
328 static inline struct ip_vs_sync_buff *
329 ip_vs_sync_buff_create(struct netns_ipvs *ipvs)
331 struct ip_vs_sync_buff *sb;
333 if (!(sb=kmalloc(sizeof(struct ip_vs_sync_buff), GFP_ATOMIC)))
336 sb->mesg = kmalloc(ipvs->send_mesg_maxlen, GFP_ATOMIC);
341 sb->mesg->reserved = 0; /* old nr_conns i.e. must be zeo now */
342 sb->mesg->version = SYNC_PROTO_VER;
343 sb->mesg->syncid = ipvs->master_syncid;
344 sb->mesg->size = sizeof(struct ip_vs_sync_mesg);
345 sb->mesg->nr_conns = 0;
347 sb->head = (unsigned char *)sb->mesg + sizeof(struct ip_vs_sync_mesg);
348 sb->end = (unsigned char *)sb->mesg + ipvs->send_mesg_maxlen;
350 sb->firstuse = jiffies;
354 static inline void ip_vs_sync_buff_release(struct ip_vs_sync_buff *sb)
360 static inline void sb_queue_tail(struct netns_ipvs *ipvs)
362 struct ip_vs_sync_buff *sb = ipvs->sync_buff;
364 spin_lock(&ipvs->sync_lock);
365 if (ipvs->sync_state & IP_VS_STATE_MASTER &&
366 ipvs->sync_queue_len < sysctl_sync_qlen_max(ipvs)) {
367 if (!ipvs->sync_queue_len)
368 schedule_delayed_work(&ipvs->master_wakeup_work,
369 max(IPVS_SYNC_SEND_DELAY, 1));
370 ipvs->sync_queue_len++;
371 list_add_tail(&sb->list, &ipvs->sync_queue);
372 if ((++ipvs->sync_queue_delay) == IPVS_SYNC_WAKEUP_RATE)
373 wake_up_process(ipvs->master_thread);
375 ip_vs_sync_buff_release(sb);
376 spin_unlock(&ipvs->sync_lock);
380 * Get the current sync buffer if it has been created for more
381 * than the specified time or the specified time is zero.
383 static inline struct ip_vs_sync_buff *
384 get_curr_sync_buff(struct netns_ipvs *ipvs, unsigned long time)
386 struct ip_vs_sync_buff *sb;
388 spin_lock_bh(&ipvs->sync_buff_lock);
389 if (ipvs->sync_buff &&
390 time_after_eq(jiffies - ipvs->sync_buff->firstuse, time)) {
391 sb = ipvs->sync_buff;
392 ipvs->sync_buff = NULL;
393 __set_current_state(TASK_RUNNING);
396 spin_unlock_bh(&ipvs->sync_buff_lock);
401 * Switch mode from sending version 0 or 1
402 * - must handle sync_buf
404 void ip_vs_sync_switch_mode(struct net *net, int mode)
406 struct netns_ipvs *ipvs = net_ipvs(net);
407 struct ip_vs_sync_buff *sb;
409 spin_lock_bh(&ipvs->sync_buff_lock);
410 if (!(ipvs->sync_state & IP_VS_STATE_MASTER))
412 sb = ipvs->sync_buff;
413 if (mode == sysctl_sync_ver(ipvs) || !sb)
416 /* Buffer empty ? then let buf_create do the job */
417 if (sb->mesg->size <= sizeof(struct ip_vs_sync_mesg)) {
418 ip_vs_sync_buff_release(sb);
419 ipvs->sync_buff = NULL;
424 spin_unlock_bh(&ipvs->sync_buff_lock);
428 * Create a new sync buffer for Version 0 proto.
430 static inline struct ip_vs_sync_buff *
431 ip_vs_sync_buff_create_v0(struct netns_ipvs *ipvs)
433 struct ip_vs_sync_buff *sb;
434 struct ip_vs_sync_mesg_v0 *mesg;
436 if (!(sb=kmalloc(sizeof(struct ip_vs_sync_buff), GFP_ATOMIC)))
439 sb->mesg = kmalloc(ipvs->send_mesg_maxlen, GFP_ATOMIC);
444 mesg = (struct ip_vs_sync_mesg_v0 *)sb->mesg;
446 mesg->syncid = ipvs->master_syncid;
447 mesg->size = sizeof(struct ip_vs_sync_mesg_v0);
448 sb->head = (unsigned char *)mesg + sizeof(struct ip_vs_sync_mesg_v0);
449 sb->end = (unsigned char *)mesg + ipvs->send_mesg_maxlen;
450 sb->firstuse = jiffies;
455 * Version 0 , could be switched in by sys_ctl.
456 * Add an ip_vs_conn information into the current sync_buff.
458 void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp)
460 struct netns_ipvs *ipvs = net_ipvs(net);
461 struct ip_vs_sync_mesg_v0 *m;
462 struct ip_vs_sync_conn_v0 *s;
465 if (unlikely(cp->af != AF_INET))
467 /* Do not sync ONE PACKET */
468 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
471 spin_lock(&ipvs->sync_buff_lock);
472 if (!ipvs->sync_buff) {
474 ip_vs_sync_buff_create_v0(ipvs);
475 if (!ipvs->sync_buff) {
476 spin_unlock(&ipvs->sync_buff_lock);
477 pr_err("ip_vs_sync_buff_create failed.\n");
482 len = (cp->flags & IP_VS_CONN_F_SEQ_MASK) ? FULL_CONN_SIZE :
484 m = (struct ip_vs_sync_mesg_v0 *)ipvs->sync_buff->mesg;
485 s = (struct ip_vs_sync_conn_v0 *)ipvs->sync_buff->head;
489 s->protocol = cp->protocol;
490 s->cport = cp->cport;
491 s->vport = cp->vport;
492 s->dport = cp->dport;
493 s->caddr = cp->caddr.ip;
494 s->vaddr = cp->vaddr.ip;
495 s->daddr = cp->daddr.ip;
496 s->flags = htons(cp->flags & ~IP_VS_CONN_F_HASHED);
497 s->state = htons(cp->state);
498 if (cp->flags & IP_VS_CONN_F_SEQ_MASK) {
499 struct ip_vs_sync_conn_options *opt =
500 (struct ip_vs_sync_conn_options *)&s[1];
501 memcpy(opt, &cp->in_seq, sizeof(*opt));
506 ipvs->sync_buff->head += len;
508 /* check if there is a space for next one */
509 if (ipvs->sync_buff->head + FULL_CONN_SIZE > ipvs->sync_buff->end) {
511 ipvs->sync_buff = NULL;
513 spin_unlock(&ipvs->sync_buff_lock);
515 /* synchronize its controller if it has */
517 ip_vs_sync_conn(net, cp->control);
521 * Add an ip_vs_conn information into the current sync_buff.
522 * Called by ip_vs_in.
523 * Sending Version 1 messages
525 void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp)
527 struct netns_ipvs *ipvs = net_ipvs(net);
528 struct ip_vs_sync_mesg *m;
529 union ip_vs_sync_conn *s;
531 unsigned int len, pe_name_len, pad;
533 /* Handle old version of the protocol */
534 if (sysctl_sync_ver(ipvs) == 0) {
535 ip_vs_sync_conn_v0(net, cp);
538 /* Do not sync ONE PACKET */
539 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
544 if (cp->pe_data_len) {
545 if (!cp->pe_data || !cp->dest) {
546 IP_VS_ERR_RL("SYNC, connection pe_data invalid\n");
549 pe_name_len = strnlen(cp->pe->name, IP_VS_PENAME_MAXLEN);
552 spin_lock(&ipvs->sync_buff_lock);
554 #ifdef CONFIG_IP_VS_IPV6
555 if (cp->af == AF_INET6)
556 len = sizeof(struct ip_vs_sync_v6);
559 len = sizeof(struct ip_vs_sync_v4);
561 if (cp->flags & IP_VS_CONN_F_SEQ_MASK)
562 len += sizeof(struct ip_vs_sync_conn_options) + 2;
565 len += cp->pe_data_len + 2; /* + Param hdr field */
567 len += pe_name_len + 2;
569 /* check if there is a space for this one */
571 if (ipvs->sync_buff) {
572 pad = (4 - (size_t)ipvs->sync_buff->head) & 3;
573 if (ipvs->sync_buff->head + len + pad > ipvs->sync_buff->end) {
575 ipvs->sync_buff = NULL;
580 if (!ipvs->sync_buff) {
581 ipvs->sync_buff = ip_vs_sync_buff_create(ipvs);
582 if (!ipvs->sync_buff) {
583 spin_unlock(&ipvs->sync_buff_lock);
584 pr_err("ip_vs_sync_buff_create failed.\n");
589 m = ipvs->sync_buff->mesg;
590 p = ipvs->sync_buff->head;
591 ipvs->sync_buff->head += pad + len;
592 m->size += pad + len;
593 /* Add ev. padding from prev. sync_conn */
597 s = (union ip_vs_sync_conn *)p;
599 /* Set message type & copy members */
600 s->v4.type = (cp->af == AF_INET6 ? STYPE_F_INET6 : 0);
601 s->v4.ver_size = htons(len & SVER_MASK); /* Version 0 */
602 s->v4.flags = htonl(cp->flags & ~IP_VS_CONN_F_HASHED);
603 s->v4.state = htons(cp->state);
604 s->v4.protocol = cp->protocol;
605 s->v4.cport = cp->cport;
606 s->v4.vport = cp->vport;
607 s->v4.dport = cp->dport;
608 s->v4.fwmark = htonl(cp->fwmark);
609 s->v4.timeout = htonl(cp->timeout / HZ);
612 #ifdef CONFIG_IP_VS_IPV6
613 if (cp->af == AF_INET6) {
614 p += sizeof(struct ip_vs_sync_v6);
615 s->v6.caddr = cp->caddr.in6;
616 s->v6.vaddr = cp->vaddr.in6;
617 s->v6.daddr = cp->daddr.in6;
621 p += sizeof(struct ip_vs_sync_v4); /* options ptr */
622 s->v4.caddr = cp->caddr.ip;
623 s->v4.vaddr = cp->vaddr.ip;
624 s->v4.daddr = cp->daddr.ip;
626 if (cp->flags & IP_VS_CONN_F_SEQ_MASK) {
627 *(p++) = IPVS_OPT_SEQ_DATA;
628 *(p++) = sizeof(struct ip_vs_sync_conn_options);
629 hton_seq((struct ip_vs_seq *)p, &cp->in_seq);
630 p += sizeof(struct ip_vs_seq);
631 hton_seq((struct ip_vs_seq *)p, &cp->out_seq);
632 p += sizeof(struct ip_vs_seq);
635 if (cp->pe_data_len && cp->pe_data) {
636 *(p++) = IPVS_OPT_PE_DATA;
637 *(p++) = cp->pe_data_len;
638 memcpy(p, cp->pe_data, cp->pe_data_len);
639 p += cp->pe_data_len;
642 *(p++) = IPVS_OPT_PE_NAME;
643 *(p++) = pe_name_len;
644 memcpy(p, cp->pe->name, pe_name_len);
649 spin_unlock(&ipvs->sync_buff_lock);
652 /* synchronize its controller if it has */
657 * Reduce sync rate for templates
658 * i.e only increment in_pkts for Templates.
660 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
661 int pkts = atomic_add_return(1, &cp->in_pkts);
663 if (pkts % sysctl_sync_period(ipvs) != 1)
670 * fill_param used by version 1
673 ip_vs_conn_fill_param_sync(struct net *net, int af, union ip_vs_sync_conn *sc,
674 struct ip_vs_conn_param *p,
675 __u8 *pe_data, unsigned int pe_data_len,
676 __u8 *pe_name, unsigned int pe_name_len)
678 #ifdef CONFIG_IP_VS_IPV6
680 ip_vs_conn_fill_param(net, af, sc->v6.protocol,
681 (const union nf_inet_addr *)&sc->v6.caddr,
683 (const union nf_inet_addr *)&sc->v6.vaddr,
687 ip_vs_conn_fill_param(net, af, sc->v4.protocol,
688 (const union nf_inet_addr *)&sc->v4.caddr,
690 (const union nf_inet_addr *)&sc->v4.vaddr,
695 char buff[IP_VS_PENAME_MAXLEN+1];
697 memcpy(buff, pe_name, pe_name_len);
699 p->pe = __ip_vs_pe_getbyname(buff);
701 IP_VS_DBG(3, "BACKUP, no %s engine found/loaded\n",
706 IP_VS_ERR_RL("BACKUP, Invalid PE parameters\n");
710 p->pe_data = kmemdup(pe_data, pe_data_len, GFP_ATOMIC);
713 module_put(p->pe->module);
716 p->pe_data_len = pe_data_len;
722 * Connection Add / Update.
723 * Common for version 0 and 1 reception of backup sync_conns.
727 static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
728 unsigned int flags, unsigned int state,
729 unsigned int protocol, unsigned int type,
730 const union nf_inet_addr *daddr, __be16 dport,
731 unsigned long timeout, __u32 fwmark,
732 struct ip_vs_sync_conn_options *opt)
734 struct ip_vs_dest *dest;
735 struct ip_vs_conn *cp;
736 struct netns_ipvs *ipvs = net_ipvs(net);
738 if (!(flags & IP_VS_CONN_F_TEMPLATE))
739 cp = ip_vs_conn_in_get(param);
741 cp = ip_vs_ct_in_get(param);
745 kfree(param->pe_data);
748 if ((cp->flags ^ flags) & IP_VS_CONN_F_INACTIVE &&
749 !(flags & IP_VS_CONN_F_TEMPLATE) && dest) {
750 if (flags & IP_VS_CONN_F_INACTIVE) {
751 atomic_dec(&dest->activeconns);
752 atomic_inc(&dest->inactconns);
754 atomic_inc(&dest->activeconns);
755 atomic_dec(&dest->inactconns);
758 flags &= IP_VS_CONN_F_BACKUP_UPD_MASK;
759 flags |= cp->flags & ~IP_VS_CONN_F_BACKUP_UPD_MASK;
762 dest = ip_vs_try_bind_dest(cp);
764 atomic_dec(&dest->refcnt);
768 * Find the appropriate destination for the connection.
769 * If it is not found the connection will remain unbound
772 dest = ip_vs_find_dest(net, type, daddr, dport, param->vaddr,
773 param->vport, protocol, fwmark, flags);
775 cp = ip_vs_conn_new(param, daddr, dport, flags, dest, fwmark);
777 atomic_dec(&dest->refcnt);
780 kfree(param->pe_data);
781 IP_VS_DBG(2, "BACKUP, add new conn. failed\n");
787 memcpy(&cp->in_seq, opt, sizeof(*opt));
788 atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
790 cp->old_state = cp->state;
792 * For Ver 0 messages style
793 * - Not possible to recover the right timeout for templates
794 * - can not find the right fwmark
795 * virtual service. If needed, we can do it for
796 * non-fwmark persistent services.
797 * Ver 1 messages style.
801 if (timeout > MAX_SCHEDULE_TIMEOUT / HZ)
802 timeout = MAX_SCHEDULE_TIMEOUT / HZ;
803 cp->timeout = timeout*HZ;
805 struct ip_vs_proto_data *pd;
807 pd = ip_vs_proto_data_get(net, protocol);
808 if (!(flags & IP_VS_CONN_F_TEMPLATE) && pd && pd->timeout_table)
809 cp->timeout = pd->timeout_table[state];
811 cp->timeout = (3*60*HZ);
817 * Process received multicast message for Version 0
819 static void ip_vs_process_message_v0(struct net *net, const char *buffer,
822 struct ip_vs_sync_mesg_v0 *m = (struct ip_vs_sync_mesg_v0 *)buffer;
823 struct ip_vs_sync_conn_v0 *s;
824 struct ip_vs_sync_conn_options *opt;
825 struct ip_vs_protocol *pp;
826 struct ip_vs_conn_param param;
830 p = (char *)buffer + sizeof(struct ip_vs_sync_mesg_v0);
831 for (i=0; i<m->nr_conns; i++) {
832 unsigned int flags, state;
834 if (p + SIMPLE_CONN_SIZE > buffer+buflen) {
835 IP_VS_ERR_RL("BACKUP v0, bogus conn\n");
838 s = (struct ip_vs_sync_conn_v0 *) p;
839 flags = ntohs(s->flags) | IP_VS_CONN_F_SYNC;
840 flags &= ~IP_VS_CONN_F_HASHED;
841 if (flags & IP_VS_CONN_F_SEQ_MASK) {
842 opt = (struct ip_vs_sync_conn_options *)&s[1];
844 if (p > buffer+buflen) {
845 IP_VS_ERR_RL("BACKUP v0, Dropping buffer bogus conn options\n");
850 p += SIMPLE_CONN_SIZE;
853 state = ntohs(s->state);
854 if (!(flags & IP_VS_CONN_F_TEMPLATE)) {
855 pp = ip_vs_proto_get(s->protocol);
857 IP_VS_DBG(2, "BACKUP v0, Unsupported protocol %u\n",
861 if (state >= pp->num_states) {
862 IP_VS_DBG(2, "BACKUP v0, Invalid %s state %u\n",
867 /* protocol in templates is not used for state/timeout */
869 IP_VS_DBG(2, "BACKUP v0, Invalid template state %u\n",
875 ip_vs_conn_fill_param(net, AF_INET, s->protocol,
876 (const union nf_inet_addr *)&s->caddr,
878 (const union nf_inet_addr *)&s->vaddr,
881 /* Send timeout as Zero */
882 ip_vs_proc_conn(net, ¶m, flags, state, s->protocol, AF_INET,
883 (union nf_inet_addr *)&s->daddr, s->dport,
891 static inline int ip_vs_proc_seqopt(__u8 *p, unsigned int plen,
893 struct ip_vs_sync_conn_options *opt)
895 struct ip_vs_sync_conn_options *topt;
897 topt = (struct ip_vs_sync_conn_options *)p;
899 if (plen != sizeof(struct ip_vs_sync_conn_options)) {
900 IP_VS_DBG(2, "BACKUP, bogus conn options length\n");
903 if (*opt_flags & IPVS_OPT_F_SEQ_DATA) {
904 IP_VS_DBG(2, "BACKUP, conn options found twice\n");
907 ntoh_seq(&topt->in_seq, &opt->in_seq);
908 ntoh_seq(&topt->out_seq, &opt->out_seq);
909 *opt_flags |= IPVS_OPT_F_SEQ_DATA;
913 static int ip_vs_proc_str(__u8 *p, unsigned int plen, unsigned int *data_len,
914 __u8 **data, unsigned int maxlen,
915 __u32 *opt_flags, __u32 flag)
918 IP_VS_DBG(2, "BACKUP, bogus par.data len > %d\n", maxlen);
921 if (*opt_flags & flag) {
922 IP_VS_DBG(2, "BACKUP, Par.data found twice 0x%x\n", flag);
931 * Process a Version 1 sync. connection
933 static inline int ip_vs_proc_sync_conn(struct net *net, __u8 *p, __u8 *msg_end)
935 struct ip_vs_sync_conn_options opt;
936 union ip_vs_sync_conn *s;
937 struct ip_vs_protocol *pp;
938 struct ip_vs_conn_param param;
940 unsigned int af, state, pe_data_len=0, pe_name_len=0;
941 __u8 *pe_data=NULL, *pe_name=NULL;
945 s = (union ip_vs_sync_conn *) p;
947 if (s->v6.type & STYPE_F_INET6) {
948 #ifdef CONFIG_IP_VS_IPV6
950 p += sizeof(struct ip_vs_sync_v6);
952 IP_VS_DBG(3,"BACKUP, IPv6 msg received, and IPVS is not compiled for IPv6\n");
956 } else if (!s->v4.type) {
958 p += sizeof(struct ip_vs_sync_v4);
965 /* Process optional params check Type & Len. */
966 while (p < msg_end) {
975 if (!plen || ((p + plen) > msg_end))
977 /* Handle seq option p = param data */
978 switch (ptype & ~IPVS_OPT_F_PARAM) {
979 case IPVS_OPT_SEQ_DATA:
980 if (ip_vs_proc_seqopt(p, plen, &opt_flags, &opt))
984 case IPVS_OPT_PE_DATA:
985 if (ip_vs_proc_str(p, plen, &pe_data_len, &pe_data,
986 IP_VS_PEDATA_MAXLEN, &opt_flags,
991 case IPVS_OPT_PE_NAME:
992 if (ip_vs_proc_str(p, plen,&pe_name_len, &pe_name,
993 IP_VS_PENAME_MAXLEN, &opt_flags,
999 /* Param data mandatory ? */
1000 if (!(ptype & IPVS_OPT_F_PARAM)) {
1001 IP_VS_DBG(3, "BACKUP, Unknown mandatory param %d found\n",
1002 ptype & ~IPVS_OPT_F_PARAM);
1007 p += plen; /* Next option */
1010 /* Get flags and Mask off unsupported */
1011 flags = ntohl(s->v4.flags) & IP_VS_CONN_F_BACKUP_MASK;
1012 flags |= IP_VS_CONN_F_SYNC;
1013 state = ntohs(s->v4.state);
1015 if (!(flags & IP_VS_CONN_F_TEMPLATE)) {
1016 pp = ip_vs_proto_get(s->v4.protocol);
1018 IP_VS_DBG(3,"BACKUP, Unsupported protocol %u\n",
1023 if (state >= pp->num_states) {
1024 IP_VS_DBG(3, "BACKUP, Invalid %s state %u\n",
1030 /* protocol in templates is not used for state/timeout */
1032 IP_VS_DBG(3, "BACKUP, Invalid template state %u\n",
1037 if (ip_vs_conn_fill_param_sync(net, af, s, ¶m, pe_data,
1038 pe_data_len, pe_name, pe_name_len)) {
1042 /* If only IPv4, just silent skip IPv6 */
1044 ip_vs_proc_conn(net, ¶m, flags, state, s->v4.protocol, af,
1045 (union nf_inet_addr *)&s->v4.daddr, s->v4.dport,
1046 ntohl(s->v4.timeout), ntohl(s->v4.fwmark),
1047 (opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL)
1049 #ifdef CONFIG_IP_VS_IPV6
1051 ip_vs_proc_conn(net, ¶m, flags, state, s->v6.protocol, af,
1052 (union nf_inet_addr *)&s->v6.daddr, s->v6.dport,
1053 ntohl(s->v6.timeout), ntohl(s->v6.fwmark),
1054 (opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL)
1060 IP_VS_DBG(2, "BACKUP, Single msg dropped err:%d\n", retc);
1065 * Process received multicast message and create the corresponding
1066 * ip_vs_conn entries.
1067 * Handles Version 0 & 1
1069 static void ip_vs_process_message(struct net *net, __u8 *buffer,
1070 const size_t buflen)
1072 struct netns_ipvs *ipvs = net_ipvs(net);
1073 struct ip_vs_sync_mesg *m2 = (struct ip_vs_sync_mesg *)buffer;
1077 if (buflen < sizeof(struct ip_vs_sync_mesg_v0)) {
1078 IP_VS_DBG(2, "BACKUP, message header too short\n");
1081 /* Convert size back to host byte order */
1082 m2->size = ntohs(m2->size);
1084 if (buflen != m2->size) {
1085 IP_VS_DBG(2, "BACKUP, bogus message size\n");
1088 /* SyncID sanity check */
1089 if (ipvs->backup_syncid != 0 && m2->syncid != ipvs->backup_syncid) {
1090 IP_VS_DBG(7, "BACKUP, Ignoring syncid = %d\n", m2->syncid);
1093 /* Handle version 1 message */
1094 if ((m2->version == SYNC_PROTO_VER) && (m2->reserved == 0)
1095 && (m2->spare == 0)) {
1097 msg_end = buffer + sizeof(struct ip_vs_sync_mesg);
1098 nr_conns = m2->nr_conns;
1100 for (i=0; i<nr_conns; i++) {
1101 union ip_vs_sync_conn *s;
1106 if (p + sizeof(s->v4) > buffer+buflen) {
1107 IP_VS_ERR_RL("BACKUP, Dropping buffer, to small\n");
1110 s = (union ip_vs_sync_conn *)p;
1111 size = ntohs(s->v4.ver_size) & SVER_MASK;
1113 /* Basic sanity checks */
1114 if (msg_end > buffer+buflen) {
1115 IP_VS_ERR_RL("BACKUP, Dropping buffer, msg > buffer\n");
1118 if (ntohs(s->v4.ver_size) >> SVER_SHIFT) {
1119 IP_VS_ERR_RL("BACKUP, Dropping buffer, Unknown version %d\n",
1120 ntohs(s->v4.ver_size) >> SVER_SHIFT);
1123 /* Process a single sync_conn */
1124 retc = ip_vs_proc_sync_conn(net, p, msg_end);
1126 IP_VS_ERR_RL("BACKUP, Dropping buffer, Err: %d in decoding\n",
1130 /* Make sure we have 32 bit alignment */
1131 msg_end = p + ((size + 3) & ~3);
1134 /* Old type of message */
1135 ip_vs_process_message_v0(net, buffer, buflen);
1142 * Setup sndbuf (mode=1) or rcvbuf (mode=0)
1144 static void set_sock_size(struct sock *sk, int mode, int val)
1146 /* setsockopt(sock, SOL_SOCKET, SO_SNDBUF, &val, sizeof(val)); */
1147 /* setsockopt(sock, SOL_SOCKET, SO_RCVBUF, &val, sizeof(val)); */
1150 val = clamp_t(int, val, (SOCK_MIN_SNDBUF + 1) / 2,
1152 sk->sk_sndbuf = val * 2;
1153 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
1155 val = clamp_t(int, val, (SOCK_MIN_RCVBUF + 1) / 2,
1157 sk->sk_rcvbuf = val * 2;
1158 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
1164 * Setup loopback of outgoing multicasts on a sending socket
1166 static void set_mcast_loop(struct sock *sk, u_char loop)
1168 struct inet_sock *inet = inet_sk(sk);
1170 /* setsockopt(sock, SOL_IP, IP_MULTICAST_LOOP, &loop, sizeof(loop)); */
1172 inet->mc_loop = loop ? 1 : 0;
1177 * Specify TTL for outgoing multicasts on a sending socket
1179 static void set_mcast_ttl(struct sock *sk, u_char ttl)
1181 struct inet_sock *inet = inet_sk(sk);
1183 /* setsockopt(sock, SOL_IP, IP_MULTICAST_TTL, &ttl, sizeof(ttl)); */
1190 * Specifiy default interface for outgoing multicasts
1192 static int set_mcast_if(struct sock *sk, char *ifname)
1194 struct net_device *dev;
1195 struct inet_sock *inet = inet_sk(sk);
1196 struct net *net = sock_net(sk);
1198 dev = __dev_get_by_name(net, ifname);
1202 if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
1206 inet->mc_index = dev->ifindex;
1207 /* inet->mc_addr = 0; */
1215 * Set the maximum length of sync message according to the
1216 * specified interface's MTU.
1218 static int set_sync_mesg_maxlen(struct net *net, int sync_state)
1220 struct netns_ipvs *ipvs = net_ipvs(net);
1221 struct net_device *dev;
1224 if (sync_state == IP_VS_STATE_MASTER) {
1225 dev = __dev_get_by_name(net, ipvs->master_mcast_ifn);
1229 num = (dev->mtu - sizeof(struct iphdr) -
1230 sizeof(struct udphdr) -
1231 SYNC_MESG_HEADER_LEN - 20) / SIMPLE_CONN_SIZE;
1232 ipvs->send_mesg_maxlen = SYNC_MESG_HEADER_LEN +
1233 SIMPLE_CONN_SIZE * min(num, MAX_CONNS_PER_SYNCBUFF);
1234 IP_VS_DBG(7, "setting the maximum length of sync sending "
1235 "message %d.\n", ipvs->send_mesg_maxlen);
1236 } else if (sync_state == IP_VS_STATE_BACKUP) {
1237 dev = __dev_get_by_name(net, ipvs->backup_mcast_ifn);
1241 ipvs->recv_mesg_maxlen = dev->mtu -
1242 sizeof(struct iphdr) - sizeof(struct udphdr);
1243 IP_VS_DBG(7, "setting the maximum length of sync receiving "
1244 "message %d.\n", ipvs->recv_mesg_maxlen);
1252 * Join a multicast group.
1253 * the group is specified by a class D multicast address 224.0.0.0/8
1254 * in the in_addr structure passed in as a parameter.
1257 join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname)
1259 struct net *net = sock_net(sk);
1260 struct ip_mreqn mreq;
1261 struct net_device *dev;
1264 memset(&mreq, 0, sizeof(mreq));
1265 memcpy(&mreq.imr_multiaddr, addr, sizeof(struct in_addr));
1267 dev = __dev_get_by_name(net, ifname);
1270 if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
1273 mreq.imr_ifindex = dev->ifindex;
1276 ret = ip_mc_join_group(sk, &mreq);
1283 static int bind_mcastif_addr(struct socket *sock, char *ifname)
1285 struct net *net = sock_net(sock->sk);
1286 struct net_device *dev;
1288 struct sockaddr_in sin;
1290 dev = __dev_get_by_name(net, ifname);
1294 addr = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
1296 pr_err("You probably need to specify IP address on "
1297 "multicast interface.\n");
1299 IP_VS_DBG(7, "binding socket with (%s) %pI4\n",
1302 /* Now bind the socket with the address of multicast interface */
1303 sin.sin_family = AF_INET;
1304 sin.sin_addr.s_addr = addr;
1307 return sock->ops->bind(sock, (struct sockaddr*)&sin, sizeof(sin));
1311 * Set up sending multicast socket over UDP
1313 static struct socket *make_send_sock(struct net *net)
1315 struct netns_ipvs *ipvs = net_ipvs(net);
1316 struct socket *sock;
1319 /* First create a socket move it to right name space later */
1320 result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
1322 pr_err("Error during creation of socket; terminating\n");
1323 return ERR_PTR(result);
1326 * Kernel sockets that are a part of a namespace, should not
1327 * hold a reference to a namespace in order to allow to stop it.
1328 * After sk_change_net should be released using sk_release_kernel.
1330 sk_change_net(sock->sk, net);
1331 result = set_mcast_if(sock->sk, ipvs->master_mcast_ifn);
1333 pr_err("Error setting outbound mcast interface\n");
1337 set_mcast_loop(sock->sk, 0);
1338 set_mcast_ttl(sock->sk, 1);
1339 result = sysctl_sync_sock_size(ipvs);
1341 set_sock_size(sock->sk, 1, result);
1343 result = bind_mcastif_addr(sock, ipvs->master_mcast_ifn);
1345 pr_err("Error binding address of the mcast interface\n");
1349 result = sock->ops->connect(sock, (struct sockaddr *) &mcast_addr,
1350 sizeof(struct sockaddr), 0);
1352 pr_err("Error connecting to the multicast addr\n");
1359 sk_release_kernel(sock->sk);
1360 return ERR_PTR(result);
1365 * Set up receiving multicast socket over UDP
1367 static struct socket *make_receive_sock(struct net *net)
1369 struct netns_ipvs *ipvs = net_ipvs(net);
1370 struct socket *sock;
1373 /* First create a socket */
1374 result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
1376 pr_err("Error during creation of socket; terminating\n");
1377 return ERR_PTR(result);
1380 * Kernel sockets that are a part of a namespace, should not
1381 * hold a reference to a namespace in order to allow to stop it.
1382 * After sk_change_net should be released using sk_release_kernel.
1384 sk_change_net(sock->sk, net);
1385 /* it is equivalent to the REUSEADDR option in user-space */
1386 sock->sk->sk_reuse = SK_CAN_REUSE;
1387 result = sysctl_sync_sock_size(ipvs);
1389 set_sock_size(sock->sk, 0, result);
1391 result = sock->ops->bind(sock, (struct sockaddr *) &mcast_addr,
1392 sizeof(struct sockaddr));
1394 pr_err("Error binding to the multicast addr\n");
1398 /* join the multicast group */
1399 result = join_mcast_group(sock->sk,
1400 (struct in_addr *) &mcast_addr.sin_addr,
1401 ipvs->backup_mcast_ifn);
1403 pr_err("Error joining to the multicast group\n");
1410 sk_release_kernel(sock->sk);
1411 return ERR_PTR(result);
1416 ip_vs_send_async(struct socket *sock, const char *buffer, const size_t length)
1418 struct msghdr msg = {.msg_flags = MSG_DONTWAIT|MSG_NOSIGNAL};
1423 iov.iov_base = (void *)buffer;
1424 iov.iov_len = length;
1426 len = kernel_sendmsg(sock, &msg, &iov, 1, (size_t)(length));
1433 ip_vs_send_sync_msg(struct socket *sock, struct ip_vs_sync_mesg *msg)
1440 /* Put size in network byte order */
1441 msg->size = htons(msg->size);
1443 ret = ip_vs_send_async(sock, (char *)msg, msize);
1444 if (ret >= 0 || ret == -EAGAIN)
1446 pr_err("ip_vs_send_async error %d\n", ret);
1451 ip_vs_receive(struct socket *sock, char *buffer, const size_t buflen)
1453 struct msghdr msg = {NULL,};
1459 /* Receive a packet */
1460 iov.iov_base = buffer;
1461 iov.iov_len = (size_t)buflen;
1463 len = kernel_recvmsg(sock, &msg, &iov, 1, buflen, 0);
1472 /* Wakeup the master thread for sending */
1473 static void master_wakeup_work_handler(struct work_struct *work)
1475 struct netns_ipvs *ipvs = container_of(work, struct netns_ipvs,
1476 master_wakeup_work.work);
1478 spin_lock_bh(&ipvs->sync_lock);
1479 if (ipvs->sync_queue_len &&
1480 ipvs->sync_queue_delay < IPVS_SYNC_WAKEUP_RATE) {
1481 ipvs->sync_queue_delay = IPVS_SYNC_WAKEUP_RATE;
1482 wake_up_process(ipvs->master_thread);
1484 spin_unlock_bh(&ipvs->sync_lock);
1487 /* Get next buffer to send */
1488 static inline struct ip_vs_sync_buff *
1489 next_sync_buff(struct netns_ipvs *ipvs)
1491 struct ip_vs_sync_buff *sb;
1493 sb = sb_dequeue(ipvs);
1496 /* Do not delay entries in buffer for more than 2 seconds */
1497 return get_curr_sync_buff(ipvs, 2 * HZ);
1500 static int sync_thread_master(void *data)
1502 struct ip_vs_sync_thread_data *tinfo = data;
1503 struct netns_ipvs *ipvs = net_ipvs(tinfo->net);
1504 struct sock *sk = tinfo->sock->sk;
1505 struct ip_vs_sync_buff *sb;
1507 pr_info("sync thread started: state = MASTER, mcast_ifn = %s, "
1509 ipvs->master_mcast_ifn, ipvs->master_syncid);
1512 sb = next_sync_buff(ipvs);
1513 if (unlikely(kthread_should_stop()))
1516 schedule_timeout(IPVS_SYNC_CHECK_PERIOD);
1519 while (ip_vs_send_sync_msg(tinfo->sock, sb->mesg) < 0) {
1522 __wait_event_interruptible(*sk_sleep(sk),
1523 sock_writeable(sk) ||
1524 kthread_should_stop(),
1526 if (unlikely(kthread_should_stop()))
1529 ip_vs_sync_buff_release(sb);
1533 __set_current_state(TASK_RUNNING);
1535 ip_vs_sync_buff_release(sb);
1537 /* clean up the sync_buff queue */
1538 while ((sb = sb_dequeue(ipvs)))
1539 ip_vs_sync_buff_release(sb);
1540 __set_current_state(TASK_RUNNING);
1542 /* clean up the current sync_buff */
1543 sb = get_curr_sync_buff(ipvs, 0);
1545 ip_vs_sync_buff_release(sb);
1547 /* release the sending multicast socket */
1548 sk_release_kernel(tinfo->sock->sk);
1555 static int sync_thread_backup(void *data)
1557 struct ip_vs_sync_thread_data *tinfo = data;
1558 struct netns_ipvs *ipvs = net_ipvs(tinfo->net);
1561 pr_info("sync thread started: state = BACKUP, mcast_ifn = %s, "
1563 ipvs->backup_mcast_ifn, ipvs->backup_syncid);
1565 while (!kthread_should_stop()) {
1566 wait_event_interruptible(*sk_sleep(tinfo->sock->sk),
1567 !skb_queue_empty(&tinfo->sock->sk->sk_receive_queue)
1568 || kthread_should_stop());
1570 /* do we have data now? */
1571 while (!skb_queue_empty(&(tinfo->sock->sk->sk_receive_queue))) {
1572 len = ip_vs_receive(tinfo->sock, tinfo->buf,
1573 ipvs->recv_mesg_maxlen);
1575 pr_err("receiving message error\n");
1579 /* disable bottom half, because it accesses the data
1580 shared by softirq while getting/creating conns */
1582 ip_vs_process_message(tinfo->net, tinfo->buf, len);
1587 /* release the sending multicast socket */
1588 sk_release_kernel(tinfo->sock->sk);
1596 int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid)
1598 struct ip_vs_sync_thread_data *tinfo;
1599 struct task_struct **realtask, *task;
1600 struct socket *sock;
1601 struct netns_ipvs *ipvs = net_ipvs(net);
1602 char *name, *buf = NULL;
1603 int (*threadfn)(void *data);
1604 int result = -ENOMEM;
1606 IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current));
1607 IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %Zd bytes\n",
1608 sizeof(struct ip_vs_sync_conn_v0));
1611 if (state == IP_VS_STATE_MASTER) {
1612 if (ipvs->master_thread)
1615 strlcpy(ipvs->master_mcast_ifn, mcast_ifn,
1616 sizeof(ipvs->master_mcast_ifn));
1617 ipvs->master_syncid = syncid;
1618 realtask = &ipvs->master_thread;
1619 name = "ipvs_master:%d";
1620 threadfn = sync_thread_master;
1621 ipvs->sync_queue_len = 0;
1622 ipvs->sync_queue_delay = 0;
1623 INIT_DELAYED_WORK(&ipvs->master_wakeup_work,
1624 master_wakeup_work_handler);
1625 sock = make_send_sock(net);
1626 } else if (state == IP_VS_STATE_BACKUP) {
1627 if (ipvs->backup_thread)
1630 strlcpy(ipvs->backup_mcast_ifn, mcast_ifn,
1631 sizeof(ipvs->backup_mcast_ifn));
1632 ipvs->backup_syncid = syncid;
1633 realtask = &ipvs->backup_thread;
1634 name = "ipvs_backup:%d";
1635 threadfn = sync_thread_backup;
1636 sock = make_receive_sock(net);
1642 result = PTR_ERR(sock);
1646 set_sync_mesg_maxlen(net, state);
1647 if (state == IP_VS_STATE_BACKUP) {
1648 buf = kmalloc(ipvs->recv_mesg_maxlen, GFP_KERNEL);
1653 tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL);
1661 task = kthread_run(threadfn, tinfo, name, ipvs->gen);
1663 result = PTR_ERR(task);
1667 /* mark as active */
1669 ipvs->sync_state |= state;
1671 /* increase the module use count */
1672 ip_vs_use_count_inc();
1681 sk_release_kernel(sock->sk);
1687 int stop_sync_thread(struct net *net, int state)
1689 struct netns_ipvs *ipvs = net_ipvs(net);
1692 IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current));
1694 if (state == IP_VS_STATE_MASTER) {
1695 if (!ipvs->master_thread)
1698 pr_info("stopping master sync thread %d ...\n",
1699 task_pid_nr(ipvs->master_thread));
1702 * The lock synchronizes with sb_queue_tail(), so that we don't
1703 * add sync buffers to the queue, when we are already in
1704 * progress of stopping the master sync daemon.
1707 spin_lock_bh(&ipvs->sync_lock);
1708 ipvs->sync_state &= ~IP_VS_STATE_MASTER;
1709 spin_unlock_bh(&ipvs->sync_lock);
1710 cancel_delayed_work_sync(&ipvs->master_wakeup_work);
1711 retc = kthread_stop(ipvs->master_thread);
1712 ipvs->master_thread = NULL;
1713 } else if (state == IP_VS_STATE_BACKUP) {
1714 if (!ipvs->backup_thread)
1717 pr_info("stopping backup sync thread %d ...\n",
1718 task_pid_nr(ipvs->backup_thread));
1720 ipvs->sync_state &= ~IP_VS_STATE_BACKUP;
1721 retc = kthread_stop(ipvs->backup_thread);
1722 ipvs->backup_thread = NULL;
1725 /* decrease the module use count */
1726 ip_vs_use_count_dec();
1732 * Initialize data struct for each netns
1734 int __net_init ip_vs_sync_net_init(struct net *net)
1736 struct netns_ipvs *ipvs = net_ipvs(net);
1738 __mutex_init(&ipvs->sync_mutex, "ipvs->sync_mutex", &__ipvs_sync_key);
1739 INIT_LIST_HEAD(&ipvs->sync_queue);
1740 spin_lock_init(&ipvs->sync_lock);
1741 spin_lock_init(&ipvs->sync_buff_lock);
1743 ipvs->sync_mcast_addr.sin_family = AF_INET;
1744 ipvs->sync_mcast_addr.sin_port = cpu_to_be16(IP_VS_SYNC_PORT);
1745 ipvs->sync_mcast_addr.sin_addr.s_addr = cpu_to_be32(IP_VS_SYNC_GROUP);
1749 void ip_vs_sync_net_cleanup(struct net *net)
1752 struct netns_ipvs *ipvs = net_ipvs(net);
1754 mutex_lock(&ipvs->sync_mutex);
1755 retc = stop_sync_thread(net, IP_VS_STATE_MASTER);
1756 if (retc && retc != -ESRCH)
1757 pr_err("Failed to stop Master Daemon\n");
1759 retc = stop_sync_thread(net, IP_VS_STATE_BACKUP);
1760 if (retc && retc != -ESRCH)
1761 pr_err("Failed to stop Backup Daemon\n");
1762 mutex_unlock(&ipvs->sync_mutex);