2 * net/tipc/link.c: TIPC link code
4 * Copyright (c) 1996-2007, 2012-2014, Ericsson AB
5 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
41 #include "name_distr.h"
45 #include <linux/pkt_sched.h>
48 * Error message prefixes
50 static const char *link_co_err = "Link changeover error, ";
51 static const char *link_rst_msg = "Resetting link ";
52 static const char *link_unk_evt = "Unknown link event ";
55 * Out-of-range value for link session numbers
57 #define INVALID_SESSION 0x10000
62 #define STARTING_EVT 856384768 /* link processing trigger */
63 #define TRAFFIC_MSG_EVT 560815u /* rx'd ??? */
64 #define TIMEOUT_EVT 560817u /* link timer expired */
67 * The following two 'message types' is really just implementation
68 * data conveniently stored in the message header.
69 * They must not be considered part of the protocol
75 * State value stored in 'exp_msg_count'
77 #define START_CHANGEOVER 100000u
79 static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
81 static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf);
82 static int tipc_link_tunnel_rcv(struct tipc_node *n_ptr,
83 struct sk_buff **buf);
84 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance);
85 static void link_state_event(struct tipc_link *l_ptr, u32 event);
86 static void link_reset_statistics(struct tipc_link *l_ptr);
87 static void link_print(struct tipc_link *l_ptr, const char *str);
88 static int tipc_link_frag_xmit(struct tipc_link *l_ptr, struct sk_buff *buf);
89 static void tipc_link_sync_xmit(struct tipc_link *l);
90 static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf);
91 static int tipc_link_input(struct tipc_link *l, struct sk_buff *buf);
92 static int tipc_link_prepare_input(struct tipc_link *l, struct sk_buff **buf);
95 * Simple link routines
97 static unsigned int align(unsigned int i)
102 static void link_init_max_pkt(struct tipc_link *l_ptr)
104 struct tipc_bearer *b_ptr;
108 b_ptr = rcu_dereference_rtnl(bearer_list[l_ptr->bearer_id]);
113 max_pkt = (b_ptr->mtu & ~3);
116 if (max_pkt > MAX_MSG_SIZE)
117 max_pkt = MAX_MSG_SIZE;
119 l_ptr->max_pkt_target = max_pkt;
120 if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT)
121 l_ptr->max_pkt = l_ptr->max_pkt_target;
123 l_ptr->max_pkt = MAX_PKT_DEFAULT;
125 l_ptr->max_pkt_probes = 0;
128 static u32 link_next_sent(struct tipc_link *l_ptr)
131 return buf_seqno(l_ptr->next_out);
132 return mod(l_ptr->next_out_no);
135 static u32 link_last_sent(struct tipc_link *l_ptr)
137 return mod(link_next_sent(l_ptr) - 1);
141 * Simple non-static link routines (i.e. referenced outside this file)
143 int tipc_link_is_up(struct tipc_link *l_ptr)
147 return link_working_working(l_ptr) || link_working_unknown(l_ptr);
150 int tipc_link_is_active(struct tipc_link *l_ptr)
152 return (l_ptr->owner->active_links[0] == l_ptr) ||
153 (l_ptr->owner->active_links[1] == l_ptr);
157 * link_timeout - handle expiration of link timer
158 * @l_ptr: pointer to link
160 static void link_timeout(struct tipc_link *l_ptr)
162 tipc_node_lock(l_ptr->owner);
164 /* update counters used in statistical profiling of send traffic */
165 l_ptr->stats.accu_queue_sz += l_ptr->out_queue_size;
166 l_ptr->stats.queue_sz_counts++;
168 if (l_ptr->first_out) {
169 struct tipc_msg *msg = buf_msg(l_ptr->first_out);
170 u32 length = msg_size(msg);
172 if ((msg_user(msg) == MSG_FRAGMENTER) &&
173 (msg_type(msg) == FIRST_FRAGMENT)) {
174 length = msg_size(msg_get_wrapped(msg));
177 l_ptr->stats.msg_lengths_total += length;
178 l_ptr->stats.msg_length_counts++;
180 l_ptr->stats.msg_length_profile[0]++;
181 else if (length <= 256)
182 l_ptr->stats.msg_length_profile[1]++;
183 else if (length <= 1024)
184 l_ptr->stats.msg_length_profile[2]++;
185 else if (length <= 4096)
186 l_ptr->stats.msg_length_profile[3]++;
187 else if (length <= 16384)
188 l_ptr->stats.msg_length_profile[4]++;
189 else if (length <= 32768)
190 l_ptr->stats.msg_length_profile[5]++;
192 l_ptr->stats.msg_length_profile[6]++;
196 /* do all other link processing performed on a periodic basis */
198 link_state_event(l_ptr, TIMEOUT_EVT);
201 tipc_link_push_queue(l_ptr);
203 tipc_node_unlock(l_ptr->owner);
206 static void link_set_timer(struct tipc_link *l_ptr, u32 time)
208 k_start_timer(&l_ptr->timer, time);
212 * tipc_link_create - create a new link
213 * @n_ptr: pointer to associated node
214 * @b_ptr: pointer to associated bearer
215 * @media_addr: media address to use when sending messages over link
217 * Returns pointer to link.
219 struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
220 struct tipc_bearer *b_ptr,
221 const struct tipc_media_addr *media_addr)
223 struct tipc_link *l_ptr;
224 struct tipc_msg *msg;
226 char addr_string[16];
227 u32 peer = n_ptr->addr;
229 if (n_ptr->link_cnt >= 2) {
230 tipc_addr_string_fill(addr_string, n_ptr->addr);
231 pr_err("Attempt to establish third link to %s\n", addr_string);
235 if (n_ptr->links[b_ptr->identity]) {
236 tipc_addr_string_fill(addr_string, n_ptr->addr);
237 pr_err("Attempt to establish second link on <%s> to %s\n",
238 b_ptr->name, addr_string);
242 l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC);
244 pr_warn("Link creation failed, no memory\n");
249 if_name = strchr(b_ptr->name, ':') + 1;
250 sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
251 tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
252 tipc_node(tipc_own_addr),
254 tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
255 /* note: peer i/f name is updated by reset/activate message */
256 memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
257 l_ptr->owner = n_ptr;
258 l_ptr->checkpoint = 1;
259 l_ptr->peer_session = INVALID_SESSION;
260 l_ptr->bearer_id = b_ptr->identity;
261 link_set_supervision_props(l_ptr, b_ptr->tolerance);
262 l_ptr->state = RESET_UNKNOWN;
264 l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
266 tipc_msg_init(msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, l_ptr->addr);
267 msg_set_size(msg, sizeof(l_ptr->proto_msg));
268 msg_set_session(msg, (tipc_random & 0xffff));
269 msg_set_bearer_id(msg, b_ptr->identity);
270 strcpy((char *)msg_data(msg), if_name);
272 l_ptr->priority = b_ptr->priority;
273 tipc_link_set_queue_limits(l_ptr, b_ptr->window);
275 l_ptr->net_plane = b_ptr->net_plane;
276 link_init_max_pkt(l_ptr);
278 l_ptr->next_out_no = 1;
279 INIT_LIST_HEAD(&l_ptr->waiting_ports);
281 link_reset_statistics(l_ptr);
283 tipc_node_attach_link(n_ptr, l_ptr);
285 k_init_timer(&l_ptr->timer, (Handler)link_timeout,
286 (unsigned long)l_ptr);
288 link_state_event(l_ptr, STARTING_EVT);
293 void tipc_link_delete_list(unsigned int bearer_id, bool shutting_down)
295 struct tipc_link *l_ptr;
296 struct tipc_node *n_ptr;
299 list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
300 tipc_node_lock(n_ptr);
301 l_ptr = n_ptr->links[bearer_id];
303 tipc_link_reset(l_ptr);
304 if (shutting_down || !tipc_node_is_up(n_ptr)) {
305 tipc_node_detach_link(l_ptr->owner, l_ptr);
306 tipc_link_reset_fragments(l_ptr);
307 tipc_node_unlock(n_ptr);
309 /* Nobody else can access this link now: */
310 del_timer_sync(&l_ptr->timer);
313 /* Detach/delete when failover is finished: */
314 l_ptr->flags |= LINK_STOPPED;
315 tipc_node_unlock(n_ptr);
316 del_timer_sync(&l_ptr->timer);
320 tipc_node_unlock(n_ptr);
326 * link_schedule_port - schedule port for deferred sending
327 * @l_ptr: pointer to link
328 * @origport: reference to sending port
329 * @sz: amount of data to be sent
331 * Schedules port for renewed sending of messages after link congestion
334 static int link_schedule_port(struct tipc_link *l_ptr, u32 origport, u32 sz)
336 struct tipc_port *p_ptr;
337 struct tipc_sock *tsk;
339 spin_lock_bh(&tipc_port_list_lock);
340 p_ptr = tipc_port_lock(origport);
342 if (!list_empty(&p_ptr->wait_list))
344 tsk = tipc_port_to_sock(p_ptr);
346 p_ptr->waiting_pkts = 1 + ((sz - 1) / l_ptr->max_pkt);
347 list_add_tail(&p_ptr->wait_list, &l_ptr->waiting_ports);
348 l_ptr->stats.link_congs++;
350 tipc_port_unlock(p_ptr);
352 spin_unlock_bh(&tipc_port_list_lock);
356 void tipc_link_wakeup_ports(struct tipc_link *l_ptr, int all)
358 struct tipc_port *p_ptr;
359 struct tipc_sock *tsk;
360 struct tipc_port *temp_p_ptr;
361 int win = l_ptr->queue_limit[0] - l_ptr->out_queue_size;
367 if (!spin_trylock_bh(&tipc_port_list_lock))
369 if (link_congested(l_ptr))
371 list_for_each_entry_safe(p_ptr, temp_p_ptr, &l_ptr->waiting_ports,
375 tsk = tipc_port_to_sock(p_ptr);
376 list_del_init(&p_ptr->wait_list);
377 spin_lock_bh(p_ptr->lock);
379 tipc_sock_wakeup(tsk);
380 win -= p_ptr->waiting_pkts;
381 spin_unlock_bh(p_ptr->lock);
385 spin_unlock_bh(&tipc_port_list_lock);
389 * link_release_outqueue - purge link's outbound message queue
390 * @l_ptr: pointer to link
392 static void link_release_outqueue(struct tipc_link *l_ptr)
394 kfree_skb_list(l_ptr->first_out);
395 l_ptr->first_out = NULL;
396 l_ptr->out_queue_size = 0;
400 * tipc_link_reset_fragments - purge link's inbound message fragments queue
401 * @l_ptr: pointer to link
403 void tipc_link_reset_fragments(struct tipc_link *l_ptr)
405 kfree_skb(l_ptr->reasm_buf);
406 l_ptr->reasm_buf = NULL;
410 * tipc_link_purge_queues - purge all pkt queues associated with link
411 * @l_ptr: pointer to link
413 void tipc_link_purge_queues(struct tipc_link *l_ptr)
415 kfree_skb_list(l_ptr->oldest_deferred_in);
416 kfree_skb_list(l_ptr->first_out);
417 tipc_link_reset_fragments(l_ptr);
418 kfree_skb(l_ptr->proto_msg_queue);
419 l_ptr->proto_msg_queue = NULL;
422 void tipc_link_reset(struct tipc_link *l_ptr)
424 u32 prev_state = l_ptr->state;
425 u32 checkpoint = l_ptr->next_in_no;
426 int was_active_link = tipc_link_is_active(l_ptr);
428 msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff));
430 /* Link is down, accept any session */
431 l_ptr->peer_session = INVALID_SESSION;
433 /* Prepare for max packet size negotiation */
434 link_init_max_pkt(l_ptr);
436 l_ptr->state = RESET_UNKNOWN;
438 if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET))
441 tipc_node_link_down(l_ptr->owner, l_ptr);
442 tipc_bearer_remove_dest(l_ptr->bearer_id, l_ptr->addr);
444 if (was_active_link && tipc_node_active_links(l_ptr->owner)) {
445 l_ptr->reset_checkpoint = checkpoint;
446 l_ptr->exp_msg_count = START_CHANGEOVER;
449 /* Clean up all queues: */
450 link_release_outqueue(l_ptr);
451 kfree_skb(l_ptr->proto_msg_queue);
452 l_ptr->proto_msg_queue = NULL;
453 kfree_skb_list(l_ptr->oldest_deferred_in);
454 if (!list_empty(&l_ptr->waiting_ports))
455 tipc_link_wakeup_ports(l_ptr, 1);
457 l_ptr->retransm_queue_head = 0;
458 l_ptr->retransm_queue_size = 0;
459 l_ptr->last_out = NULL;
460 l_ptr->first_out = NULL;
461 l_ptr->next_out = NULL;
462 l_ptr->unacked_window = 0;
463 l_ptr->checkpoint = 1;
464 l_ptr->next_out_no = 1;
465 l_ptr->deferred_inqueue_sz = 0;
466 l_ptr->oldest_deferred_in = NULL;
467 l_ptr->newest_deferred_in = NULL;
468 l_ptr->fsm_msg_cnt = 0;
469 l_ptr->stale_count = 0;
470 link_reset_statistics(l_ptr);
473 void tipc_link_reset_list(unsigned int bearer_id)
475 struct tipc_link *l_ptr;
476 struct tipc_node *n_ptr;
479 list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
480 tipc_node_lock(n_ptr);
481 l_ptr = n_ptr->links[bearer_id];
483 tipc_link_reset(l_ptr);
484 tipc_node_unlock(n_ptr);
489 static void link_activate(struct tipc_link *l_ptr)
491 l_ptr->next_in_no = l_ptr->stats.recv_info = 1;
492 tipc_node_link_up(l_ptr->owner, l_ptr);
493 tipc_bearer_add_dest(l_ptr->bearer_id, l_ptr->addr);
497 * link_state_event - link finite state machine
498 * @l_ptr: pointer to link
499 * @event: state machine event to process
501 static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
503 struct tipc_link *other;
504 u32 cont_intv = l_ptr->continuity_interval;
506 if (l_ptr->flags & LINK_STOPPED)
509 if (!(l_ptr->flags & LINK_STARTED) && (event != STARTING_EVT))
510 return; /* Not yet. */
512 /* Check whether changeover is going on */
513 if (l_ptr->exp_msg_count) {
514 if (event == TIMEOUT_EVT)
515 link_set_timer(l_ptr, cont_intv);
519 switch (l_ptr->state) {
520 case WORKING_WORKING:
522 case TRAFFIC_MSG_EVT:
526 if (l_ptr->next_in_no != l_ptr->checkpoint) {
527 l_ptr->checkpoint = l_ptr->next_in_no;
528 if (tipc_bclink_acks_missing(l_ptr->owner)) {
529 tipc_link_proto_xmit(l_ptr, STATE_MSG,
531 l_ptr->fsm_msg_cnt++;
532 } else if (l_ptr->max_pkt < l_ptr->max_pkt_target) {
533 tipc_link_proto_xmit(l_ptr, STATE_MSG,
535 l_ptr->fsm_msg_cnt++;
537 link_set_timer(l_ptr, cont_intv);
540 l_ptr->state = WORKING_UNKNOWN;
541 l_ptr->fsm_msg_cnt = 0;
542 tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
543 l_ptr->fsm_msg_cnt++;
544 link_set_timer(l_ptr, cont_intv / 4);
547 pr_info("%s<%s>, requested by peer\n", link_rst_msg,
549 tipc_link_reset(l_ptr);
550 l_ptr->state = RESET_RESET;
551 l_ptr->fsm_msg_cnt = 0;
552 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
554 l_ptr->fsm_msg_cnt++;
555 link_set_timer(l_ptr, cont_intv);
558 pr_err("%s%u in WW state\n", link_unk_evt, event);
561 case WORKING_UNKNOWN:
563 case TRAFFIC_MSG_EVT:
565 l_ptr->state = WORKING_WORKING;
566 l_ptr->fsm_msg_cnt = 0;
567 link_set_timer(l_ptr, cont_intv);
570 pr_info("%s<%s>, requested by peer while probing\n",
571 link_rst_msg, l_ptr->name);
572 tipc_link_reset(l_ptr);
573 l_ptr->state = RESET_RESET;
574 l_ptr->fsm_msg_cnt = 0;
575 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
577 l_ptr->fsm_msg_cnt++;
578 link_set_timer(l_ptr, cont_intv);
581 if (l_ptr->next_in_no != l_ptr->checkpoint) {
582 l_ptr->state = WORKING_WORKING;
583 l_ptr->fsm_msg_cnt = 0;
584 l_ptr->checkpoint = l_ptr->next_in_no;
585 if (tipc_bclink_acks_missing(l_ptr->owner)) {
586 tipc_link_proto_xmit(l_ptr, STATE_MSG,
588 l_ptr->fsm_msg_cnt++;
590 link_set_timer(l_ptr, cont_intv);
591 } else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) {
592 tipc_link_proto_xmit(l_ptr, STATE_MSG,
594 l_ptr->fsm_msg_cnt++;
595 link_set_timer(l_ptr, cont_intv / 4);
596 } else { /* Link has failed */
597 pr_warn("%s<%s>, peer not responding\n",
598 link_rst_msg, l_ptr->name);
599 tipc_link_reset(l_ptr);
600 l_ptr->state = RESET_UNKNOWN;
601 l_ptr->fsm_msg_cnt = 0;
602 tipc_link_proto_xmit(l_ptr, RESET_MSG,
604 l_ptr->fsm_msg_cnt++;
605 link_set_timer(l_ptr, cont_intv);
609 pr_err("%s%u in WU state\n", link_unk_evt, event);
614 case TRAFFIC_MSG_EVT:
617 other = l_ptr->owner->active_links[0];
618 if (other && link_working_unknown(other))
620 l_ptr->state = WORKING_WORKING;
621 l_ptr->fsm_msg_cnt = 0;
622 link_activate(l_ptr);
623 tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
624 l_ptr->fsm_msg_cnt++;
625 if (l_ptr->owner->working_links == 1)
626 tipc_link_sync_xmit(l_ptr);
627 link_set_timer(l_ptr, cont_intv);
630 l_ptr->state = RESET_RESET;
631 l_ptr->fsm_msg_cnt = 0;
632 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
634 l_ptr->fsm_msg_cnt++;
635 link_set_timer(l_ptr, cont_intv);
638 l_ptr->flags |= LINK_STARTED;
641 tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
642 l_ptr->fsm_msg_cnt++;
643 link_set_timer(l_ptr, cont_intv);
646 pr_err("%s%u in RU state\n", link_unk_evt, event);
651 case TRAFFIC_MSG_EVT:
653 other = l_ptr->owner->active_links[0];
654 if (other && link_working_unknown(other))
656 l_ptr->state = WORKING_WORKING;
657 l_ptr->fsm_msg_cnt = 0;
658 link_activate(l_ptr);
659 tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
660 l_ptr->fsm_msg_cnt++;
661 if (l_ptr->owner->working_links == 1)
662 tipc_link_sync_xmit(l_ptr);
663 link_set_timer(l_ptr, cont_intv);
668 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
670 l_ptr->fsm_msg_cnt++;
671 link_set_timer(l_ptr, cont_intv);
674 pr_err("%s%u in RR state\n", link_unk_evt, event);
678 pr_err("Unknown link state %u/%u\n", l_ptr->state, event);
683 * link_bundle_buf(): Append contents of a buffer to
684 * the tail of an existing one.
686 static int link_bundle_buf(struct tipc_link *l_ptr, struct sk_buff *bundler,
689 struct tipc_msg *bundler_msg = buf_msg(bundler);
690 struct tipc_msg *msg = buf_msg(buf);
691 u32 size = msg_size(msg);
692 u32 bundle_size = msg_size(bundler_msg);
693 u32 to_pos = align(bundle_size);
694 u32 pad = to_pos - bundle_size;
696 if (msg_user(bundler_msg) != MSG_BUNDLER)
698 if (msg_type(bundler_msg) != OPEN_MSG)
700 if (skb_tailroom(bundler) < (pad + size))
702 if (l_ptr->max_pkt < (to_pos + size))
705 skb_put(bundler, pad + size);
706 skb_copy_to_linear_data_offset(bundler, to_pos, buf->data, size);
707 msg_set_size(bundler_msg, to_pos + size);
708 msg_set_msgcnt(bundler_msg, msg_msgcnt(bundler_msg) + 1);
710 l_ptr->stats.sent_bundled++;
714 static void link_add_to_outqueue(struct tipc_link *l_ptr,
716 struct tipc_msg *msg)
718 u32 ack = mod(l_ptr->next_in_no - 1);
719 u32 seqno = mod(l_ptr->next_out_no++);
721 msg_set_word(msg, 2, ((ack << 16) | seqno));
722 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
724 if (l_ptr->first_out) {
725 l_ptr->last_out->next = buf;
726 l_ptr->last_out = buf;
728 l_ptr->first_out = l_ptr->last_out = buf;
730 l_ptr->out_queue_size++;
731 if (l_ptr->out_queue_size > l_ptr->stats.max_queue_sz)
732 l_ptr->stats.max_queue_sz = l_ptr->out_queue_size;
735 static void link_add_chain_to_outqueue(struct tipc_link *l_ptr,
736 struct sk_buff *buf_chain,
740 struct tipc_msg *msg;
742 if (!l_ptr->next_out)
743 l_ptr->next_out = buf_chain;
746 buf_chain = buf_chain->next;
749 msg_set_long_msgno(msg, long_msgno);
750 link_add_to_outqueue(l_ptr, buf, msg);
755 * tipc_link_xmit() is the 'full path' for messages, called from
756 * inside TIPC when the 'fast path' in tipc_send_xmit
757 * has failed, and from link_send()
759 int __tipc_link_xmit(struct tipc_link *l_ptr, struct sk_buff *buf)
761 struct tipc_msg *msg = buf_msg(buf);
762 u32 size = msg_size(msg);
763 u32 dsz = msg_data_sz(msg);
764 u32 queue_size = l_ptr->out_queue_size;
765 u32 imp = tipc_msg_tot_importance(msg);
766 u32 queue_limit = l_ptr->queue_limit[imp];
767 u32 max_packet = l_ptr->max_pkt;
769 /* Match msg importance against queue limits: */
770 if (unlikely(queue_size >= queue_limit)) {
771 if (imp <= TIPC_CRITICAL_IMPORTANCE) {
772 link_schedule_port(l_ptr, msg_origport(msg), size);
777 if (imp > CONN_MANAGER) {
778 pr_warn("%s<%s>, send queue full", link_rst_msg,
780 tipc_link_reset(l_ptr);
785 /* Fragmentation needed ? */
786 if (size > max_packet)
787 return tipc_link_frag_xmit(l_ptr, buf);
789 /* Packet can be queued or sent. */
790 if (likely(!link_congested(l_ptr))) {
791 link_add_to_outqueue(l_ptr, buf, msg);
793 tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
794 l_ptr->unacked_window = 0;
797 /* Congestion: can message be bundled ? */
798 if ((msg_user(msg) != CHANGEOVER_PROTOCOL) &&
799 (msg_user(msg) != MSG_FRAGMENTER)) {
801 /* Try adding message to an existing bundle */
802 if (l_ptr->next_out &&
803 link_bundle_buf(l_ptr, l_ptr->last_out, buf))
806 /* Try creating a new bundle */
807 if (size <= max_packet * 2 / 3) {
808 struct sk_buff *bundler = tipc_buf_acquire(max_packet);
809 struct tipc_msg bundler_hdr;
812 tipc_msg_init(&bundler_hdr, MSG_BUNDLER, OPEN_MSG,
813 INT_H_SIZE, l_ptr->addr);
814 skb_copy_to_linear_data(bundler, &bundler_hdr,
816 skb_trim(bundler, INT_H_SIZE);
817 link_bundle_buf(l_ptr, bundler, buf);
820 l_ptr->stats.sent_bundles++;
824 if (!l_ptr->next_out)
825 l_ptr->next_out = buf;
826 link_add_to_outqueue(l_ptr, buf, msg);
831 * tipc_link_xmit(): same as __tipc_link_xmit(), but the link to use
832 * has not been selected yet, and the the owner node is not locked
833 * Called by TIPC internal users, e.g. the name distributor
835 int tipc_link_xmit(struct sk_buff *buf, u32 dest, u32 selector)
837 struct tipc_link *l_ptr;
838 struct tipc_node *n_ptr;
839 int res = -ELINKCONG;
841 n_ptr = tipc_node_find(dest);
843 tipc_node_lock(n_ptr);
844 l_ptr = n_ptr->active_links[selector & 1];
846 res = __tipc_link_xmit(l_ptr, buf);
849 tipc_node_unlock(n_ptr);
856 /* tipc_link_cong: determine return value and how to treat the
857 * sent buffer during link congestion.
858 * - For plain, errorless user data messages we keep the buffer and
860 * - For all other messages we discard the buffer and return -EHOSTUNREACH
861 * - For TIPC internal messages we also reset the link
863 static int tipc_link_cong(struct tipc_link *link, struct sk_buff *buf)
865 struct tipc_msg *msg = buf_msg(buf);
866 uint psz = msg_size(msg);
867 uint imp = tipc_msg_tot_importance(msg);
868 u32 oport = msg_tot_origport(msg);
870 if (likely(imp <= TIPC_CRITICAL_IMPORTANCE)) {
871 if (!msg_errcode(msg) && !msg_reroute_cnt(msg)) {
872 link_schedule_port(link, oport, psz);
876 pr_warn("%s<%s>, send queue full", link_rst_msg, link->name);
877 tipc_link_reset(link);
880 return -EHOSTUNREACH;
884 * __tipc_link_xmit2(): same as tipc_link_xmit2, but destlink is known & locked
886 * @buf: chain of buffers containing message
887 * Consumes the buffer chain, except when returning -ELINKCONG
888 * Returns 0 if success, otherwise errno: -ELINKCONG, -EMSGSIZE (plain socket
889 * user data messages) or -EHOSTUNREACH (all other messages/senders)
890 * Only the socket functions tipc_send_stream() and tipc_send_packet() need
891 * to act on the return value, since they may need to do more send attempts.
893 int __tipc_link_xmit2(struct tipc_link *link, struct sk_buff *buf)
895 struct tipc_msg *msg = buf_msg(buf);
896 uint psz = msg_size(msg);
897 uint qsz = link->out_queue_size;
898 uint sndlim = link->queue_limit[0];
899 uint imp = tipc_msg_tot_importance(msg);
900 uint mtu = link->max_pkt;
901 uint ack = mod(link->next_in_no - 1);
902 uint seqno = link->next_out_no;
903 uint bc_last_in = link->owner->bclink.last_in;
904 struct tipc_media_addr *addr = &link->media_addr;
905 struct sk_buff *next = buf->next;
907 /* Match queue limits against msg importance: */
908 if (unlikely(qsz >= link->queue_limit[imp]))
909 return tipc_link_cong(link, buf);
911 /* Has valid packet limit been used ? */
912 if (unlikely(psz > mtu)) {
917 /* Prepare each packet for sending, and add to outqueue: */
921 msg_set_word(msg, 2, ((ack << 16) | mod(seqno)));
922 msg_set_bcast_ack(msg, bc_last_in);
924 if (!link->first_out) {
925 link->first_out = buf;
926 } else if (qsz < sndlim) {
927 link->last_out->next = buf;
928 } else if (tipc_msg_bundle(link->last_out, buf, mtu)) {
929 link->stats.sent_bundled++;
933 } else if (tipc_msg_make_bundle(&buf, mtu, link->addr)) {
934 link->stats.sent_bundled++;
935 link->stats.sent_bundles++;
936 link->last_out->next = buf;
938 link->next_out = buf;
940 link->last_out->next = buf;
942 link->next_out = buf;
945 /* Send packet if possible: */
946 if (likely(++qsz <= sndlim)) {
947 tipc_bearer_send(link->bearer_id, buf, addr);
948 link->next_out = next;
949 link->unacked_window = 0;
952 link->last_out = buf;
955 link->next_out_no = seqno;
956 link->out_queue_size = qsz;
961 * tipc_link_xmit2() is the general link level function for message sending
962 * @buf: chain of buffers containing message
963 * @dsz: amount of user data to be sent
964 * @dnode: address of destination node
965 * @selector: a number used for deterministic link selection
966 * Consumes the buffer chain, except when returning -ELINKCONG
967 * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
969 int tipc_link_xmit2(struct sk_buff *buf, u32 dnode, u32 selector)
971 struct tipc_link *link = NULL;
972 struct tipc_node *node;
973 int rc = -EHOSTUNREACH;
975 node = tipc_node_find(dnode);
977 tipc_node_lock(node);
978 link = node->active_links[selector & 1];
980 rc = __tipc_link_xmit2(link, buf);
981 tipc_node_unlock(node);
987 if (likely(in_own_node(dnode)))
988 return tipc_sk_rcv(buf);
995 * tipc_link_sync_xmit - synchronize broadcast link endpoints.
997 * Give a newly added peer node the sequence number where it should
998 * start receiving and acking broadcast packets.
1000 * Called with node locked
1002 static void tipc_link_sync_xmit(struct tipc_link *l)
1004 struct sk_buff *buf;
1005 struct tipc_msg *msg;
1007 buf = tipc_buf_acquire(INT_H_SIZE);
1012 tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE, l->addr);
1013 msg_set_last_bcast(msg, l->owner->bclink.acked);
1014 link_add_chain_to_outqueue(l, buf, 0);
1015 tipc_link_push_queue(l);
1019 * tipc_link_sync_rcv - synchronize broadcast link endpoints.
1020 * Receive the sequence number where we should start receiving and
1021 * acking broadcast packets from a newly added peer node, and open
1022 * up for reception of such packets.
1024 * Called with node locked
1026 static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf)
1028 struct tipc_msg *msg = buf_msg(buf);
1030 n->bclink.last_sent = n->bclink.last_in = msg_last_bcast(msg);
1031 n->bclink.recv_permitted = true;
1036 * tipc_link_names_xmit - send name table entries to new neighbor
1038 * Send routine for bulk delivery of name table messages when contact
1039 * with a new neighbor occurs. No link congestion checking is performed
1040 * because name table messages *must* be delivered. The messages must be
1041 * small enough not to require fragmentation.
1042 * Called without any locks held.
1044 void tipc_link_names_xmit(struct list_head *message_list, u32 dest)
1046 struct tipc_node *n_ptr;
1047 struct tipc_link *l_ptr;
1048 struct sk_buff *buf;
1049 struct sk_buff *temp_buf;
1051 if (list_empty(message_list))
1054 n_ptr = tipc_node_find(dest);
1056 tipc_node_lock(n_ptr);
1057 l_ptr = n_ptr->active_links[0];
1059 /* convert circular list to linear list */
1060 ((struct sk_buff *)message_list->prev)->next = NULL;
1061 link_add_chain_to_outqueue(l_ptr,
1062 (struct sk_buff *)message_list->next, 0);
1063 tipc_link_push_queue(l_ptr);
1064 INIT_LIST_HEAD(message_list);
1066 tipc_node_unlock(n_ptr);
1069 /* discard the messages if they couldn't be sent */
1070 list_for_each_safe(buf, temp_buf, ((struct sk_buff *)message_list)) {
1071 list_del((struct list_head *)buf);
1077 * tipc_link_push_packet: Push one unsent packet to the media
1079 static u32 tipc_link_push_packet(struct tipc_link *l_ptr)
1081 struct sk_buff *buf = l_ptr->first_out;
1082 u32 r_q_size = l_ptr->retransm_queue_size;
1083 u32 r_q_head = l_ptr->retransm_queue_head;
1085 /* Step to position where retransmission failed, if any, */
1086 /* consider that buffers may have been released in meantime */
1087 if (r_q_size && buf) {
1088 u32 last = lesser(mod(r_q_head + r_q_size),
1089 link_last_sent(l_ptr));
1090 u32 first = buf_seqno(buf);
1092 while (buf && less(first, r_q_head)) {
1093 first = mod(first + 1);
1096 l_ptr->retransm_queue_head = r_q_head = first;
1097 l_ptr->retransm_queue_size = r_q_size = mod(last - first);
1100 /* Continue retransmission now, if there is anything: */
1101 if (r_q_size && buf) {
1102 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1103 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
1104 tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
1105 l_ptr->retransm_queue_head = mod(++r_q_head);
1106 l_ptr->retransm_queue_size = --r_q_size;
1107 l_ptr->stats.retransmitted++;
1111 /* Send deferred protocol message, if any: */
1112 buf = l_ptr->proto_msg_queue;
1114 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1115 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
1116 tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
1117 l_ptr->unacked_window = 0;
1119 l_ptr->proto_msg_queue = NULL;
1123 /* Send one deferred data message, if send window not full: */
1124 buf = l_ptr->next_out;
1126 struct tipc_msg *msg = buf_msg(buf);
1127 u32 next = msg_seqno(msg);
1128 u32 first = buf_seqno(l_ptr->first_out);
1130 if (mod(next - first) < l_ptr->queue_limit[0]) {
1131 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1132 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1133 tipc_bearer_send(l_ptr->bearer_id, buf,
1134 &l_ptr->media_addr);
1135 if (msg_user(msg) == MSG_BUNDLER)
1136 msg_set_type(msg, BUNDLE_CLOSED);
1137 l_ptr->next_out = buf->next;
1145 * push_queue(): push out the unsent messages of a link where
1146 * congestion has abated. Node is locked
1148 void tipc_link_push_queue(struct tipc_link *l_ptr)
1153 res = tipc_link_push_packet(l_ptr);
1157 void tipc_link_reset_all(struct tipc_node *node)
1159 char addr_string[16];
1162 tipc_node_lock(node);
1164 pr_warn("Resetting all links to %s\n",
1165 tipc_addr_string_fill(addr_string, node->addr));
1167 for (i = 0; i < MAX_BEARERS; i++) {
1168 if (node->links[i]) {
1169 link_print(node->links[i], "Resetting link\n");
1170 tipc_link_reset(node->links[i]);
1174 tipc_node_unlock(node);
1177 static void link_retransmit_failure(struct tipc_link *l_ptr,
1178 struct sk_buff *buf)
1180 struct tipc_msg *msg = buf_msg(buf);
1182 pr_warn("Retransmission failure on link <%s>\n", l_ptr->name);
1185 /* Handle failure on standard link */
1186 link_print(l_ptr, "Resetting link\n");
1187 tipc_link_reset(l_ptr);
1190 /* Handle failure on broadcast link */
1191 struct tipc_node *n_ptr;
1192 char addr_string[16];
1194 pr_info("Msg seq number: %u, ", msg_seqno(msg));
1195 pr_cont("Outstanding acks: %lu\n",
1196 (unsigned long) TIPC_SKB_CB(buf)->handle);
1198 n_ptr = tipc_bclink_retransmit_to();
1199 tipc_node_lock(n_ptr);
1201 tipc_addr_string_fill(addr_string, n_ptr->addr);
1202 pr_info("Broadcast link info for %s\n", addr_string);
1203 pr_info("Reception permitted: %d, Acked: %u\n",
1204 n_ptr->bclink.recv_permitted,
1205 n_ptr->bclink.acked);
1206 pr_info("Last in: %u, Oos state: %u, Last sent: %u\n",
1207 n_ptr->bclink.last_in,
1208 n_ptr->bclink.oos_state,
1209 n_ptr->bclink.last_sent);
1211 tipc_node_unlock(n_ptr);
1213 tipc_bclink_set_flags(TIPC_BCLINK_RESET);
1214 l_ptr->stale_count = 0;
1218 void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf,
1221 struct tipc_msg *msg;
1228 /* Detect repeated retransmit failures */
1229 if (l_ptr->last_retransmitted == msg_seqno(msg)) {
1230 if (++l_ptr->stale_count > 100) {
1231 link_retransmit_failure(l_ptr, buf);
1235 l_ptr->last_retransmitted = msg_seqno(msg);
1236 l_ptr->stale_count = 1;
1239 while (retransmits && (buf != l_ptr->next_out) && buf) {
1241 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1242 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1243 tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
1246 l_ptr->stats.retransmitted++;
1249 l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0;
1253 * link_insert_deferred_queue - insert deferred messages back into receive chain
1255 static struct sk_buff *link_insert_deferred_queue(struct tipc_link *l_ptr,
1256 struct sk_buff *buf)
1260 if (l_ptr->oldest_deferred_in == NULL)
1263 seq_no = buf_seqno(l_ptr->oldest_deferred_in);
1264 if (seq_no == mod(l_ptr->next_in_no)) {
1265 l_ptr->newest_deferred_in->next = buf;
1266 buf = l_ptr->oldest_deferred_in;
1267 l_ptr->oldest_deferred_in = NULL;
1268 l_ptr->deferred_inqueue_sz = 0;
1274 * link_recv_buf_validate - validate basic format of received message
1276 * This routine ensures a TIPC message has an acceptable header, and at least
1277 * as much data as the header indicates it should. The routine also ensures
1278 * that the entire message header is stored in the main fragment of the message
1279 * buffer, to simplify future access to message header fields.
1281 * Note: Having extra info present in the message header or data areas is OK.
1282 * TIPC will ignore the excess, under the assumption that it is optional info
1283 * introduced by a later release of the protocol.
1285 static int link_recv_buf_validate(struct sk_buff *buf)
1287 static u32 min_data_hdr_size[8] = {
1288 SHORT_H_SIZE, MCAST_H_SIZE, NAMED_H_SIZE, BASIC_H_SIZE,
1289 MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE
1292 struct tipc_msg *msg;
1298 /* If this packet comes from the defer queue, the skb has already
1301 if (unlikely(TIPC_SKB_CB(buf)->deferred))
1304 if (unlikely(buf->len < MIN_H_SIZE))
1307 msg = skb_header_pointer(buf, 0, sizeof(tipc_hdr), tipc_hdr);
1311 if (unlikely(msg_version(msg) != TIPC_VERSION))
1314 size = msg_size(msg);
1315 hdr_size = msg_hdr_sz(msg);
1316 min_hdr_size = msg_isdata(msg) ?
1317 min_data_hdr_size[msg_type(msg)] : INT_H_SIZE;
1319 if (unlikely((hdr_size < min_hdr_size) ||
1320 (size < hdr_size) ||
1321 (buf->len < size) ||
1322 (size - hdr_size > TIPC_MAX_USER_MSG_SIZE)))
1325 return pskb_may_pull(buf, hdr_size);
1329 * tipc_rcv - process TIPC packets/messages arriving from off-node
1330 * @head: pointer to message buffer chain
1331 * @b_ptr: pointer to bearer message arrived on
1333 * Invoked with no locks held. Bearer pointer must point to a valid bearer
1334 * structure (i.e. cannot be NULL), but bearer can be inactive.
1336 void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
1339 struct tipc_node *n_ptr;
1340 struct tipc_link *l_ptr;
1341 struct sk_buff *crs;
1342 struct sk_buff *buf = head;
1343 struct tipc_msg *msg;
1351 /* Ensure message is well-formed */
1352 if (unlikely(!link_recv_buf_validate(buf)))
1355 /* Ensure message data is a single contiguous unit */
1356 if (unlikely(skb_linearize(buf)))
1359 /* Handle arrival of a non-unicast link message */
1362 if (unlikely(msg_non_seq(msg))) {
1363 if (msg_user(msg) == LINK_CONFIG)
1364 tipc_disc_rcv(buf, b_ptr);
1366 tipc_bclink_rcv(buf);
1370 /* Discard unicast link messages destined for another node */
1371 if (unlikely(!msg_short(msg) &&
1372 (msg_destnode(msg) != tipc_own_addr)))
1375 /* Locate neighboring node that sent message */
1376 n_ptr = tipc_node_find(msg_prevnode(msg));
1377 if (unlikely(!n_ptr))
1379 tipc_node_lock(n_ptr);
1381 /* Locate unicast link endpoint that should handle message */
1382 l_ptr = n_ptr->links[b_ptr->identity];
1383 if (unlikely(!l_ptr))
1384 goto unlock_discard;
1386 /* Verify that communication with node is currently allowed */
1387 if ((n_ptr->action_flags & TIPC_WAIT_PEER_LINKS_DOWN) &&
1388 msg_user(msg) == LINK_PROTOCOL &&
1389 (msg_type(msg) == RESET_MSG ||
1390 msg_type(msg) == ACTIVATE_MSG) &&
1391 !msg_redundant_link(msg))
1392 n_ptr->action_flags &= ~TIPC_WAIT_PEER_LINKS_DOWN;
1394 if (tipc_node_blocked(n_ptr))
1395 goto unlock_discard;
1397 /* Validate message sequence number info */
1398 seq_no = msg_seqno(msg);
1399 ackd = msg_ack(msg);
1401 /* Release acked messages */
1402 if (n_ptr->bclink.recv_permitted)
1403 tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
1405 crs = l_ptr->first_out;
1406 while ((crs != l_ptr->next_out) &&
1407 less_eq(buf_seqno(crs), ackd)) {
1408 struct sk_buff *next = crs->next;
1414 l_ptr->first_out = crs;
1415 l_ptr->out_queue_size -= released;
1418 /* Try sending any messages link endpoint has pending */
1419 if (unlikely(l_ptr->next_out))
1420 tipc_link_push_queue(l_ptr);
1422 if (unlikely(!list_empty(&l_ptr->waiting_ports)))
1423 tipc_link_wakeup_ports(l_ptr, 0);
1425 if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) {
1426 l_ptr->stats.sent_acks++;
1427 tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1430 /* Process the incoming packet */
1431 if (unlikely(!link_working_working(l_ptr))) {
1432 if (msg_user(msg) == LINK_PROTOCOL) {
1433 tipc_link_proto_rcv(l_ptr, buf);
1434 head = link_insert_deferred_queue(l_ptr, head);
1435 tipc_node_unlock(n_ptr);
1439 /* Traffic message. Conditionally activate link */
1440 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
1442 if (link_working_working(l_ptr)) {
1443 /* Re-insert buffer in front of queue */
1446 tipc_node_unlock(n_ptr);
1449 goto unlock_discard;
1452 /* Link is now in state WORKING_WORKING */
1453 if (unlikely(seq_no != mod(l_ptr->next_in_no))) {
1454 link_handle_out_of_seq_msg(l_ptr, buf);
1455 head = link_insert_deferred_queue(l_ptr, head);
1456 tipc_node_unlock(n_ptr);
1459 l_ptr->next_in_no++;
1460 if (unlikely(l_ptr->oldest_deferred_in))
1461 head = link_insert_deferred_queue(l_ptr, head);
1463 if (tipc_link_prepare_input(l_ptr, &buf)) {
1464 tipc_node_unlock(n_ptr);
1467 tipc_node_unlock(n_ptr);
1469 if (tipc_link_input(l_ptr, buf) != 0)
1473 tipc_node_unlock(n_ptr);
1480 * tipc_link_prepare_input - process TIPC link messages
1482 * returns nonzero if the message was consumed
1484 * Node lock must be held
1486 static int tipc_link_prepare_input(struct tipc_link *l, struct sk_buff **buf)
1488 struct tipc_node *n;
1489 struct tipc_msg *msg;
1493 msg = buf_msg(*buf);
1494 switch (msg_user(msg)) {
1495 case CHANGEOVER_PROTOCOL:
1496 if (tipc_link_tunnel_rcv(n, buf))
1499 case MSG_FRAGMENTER:
1500 l->stats.recv_fragments++;
1501 if (tipc_buf_append(&l->reasm_buf, buf)) {
1502 l->stats.recv_fragmented++;
1504 } else if (!l->reasm_buf) {
1509 l->stats.recv_bundles++;
1510 l->stats.recv_bundled += msg_msgcnt(msg);
1513 case NAME_DISTRIBUTOR:
1514 n->bclink.recv_permitted = true;
1517 case BCAST_PROTOCOL:
1518 tipc_link_sync_rcv(n, *buf);
1526 * tipc_link_input - Deliver message too higher layers
1528 static int tipc_link_input(struct tipc_link *l, struct sk_buff *buf)
1530 struct tipc_msg *msg = buf_msg(buf);
1533 switch (msg_user(msg)) {
1534 case TIPC_LOW_IMPORTANCE:
1535 case TIPC_MEDIUM_IMPORTANCE:
1536 case TIPC_HIGH_IMPORTANCE:
1537 case TIPC_CRITICAL_IMPORTANCE:
1541 case NAME_DISTRIBUTOR:
1542 tipc_named_rcv(buf);
1545 tipc_link_bundle_rcv(buf);
1554 * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue
1556 * Returns increase in queue length (i.e. 0 or 1)
1558 u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail,
1559 struct sk_buff *buf)
1561 struct sk_buff *queue_buf;
1562 struct sk_buff **prev;
1563 u32 seq_no = buf_seqno(buf);
1568 if (*head == NULL) {
1569 *head = *tail = buf;
1574 if (less(buf_seqno(*tail), seq_no)) {
1575 (*tail)->next = buf;
1580 /* Locate insertion point in queue, then insert; discard if duplicate */
1584 u32 curr_seqno = buf_seqno(queue_buf);
1586 if (seq_no == curr_seqno) {
1591 if (less(seq_no, curr_seqno))
1594 prev = &queue_buf->next;
1595 queue_buf = queue_buf->next;
1598 buf->next = queue_buf;
1604 * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
1606 static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
1607 struct sk_buff *buf)
1609 u32 seq_no = buf_seqno(buf);
1611 if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) {
1612 tipc_link_proto_rcv(l_ptr, buf);
1616 /* Record OOS packet arrival (force mismatch on next timeout) */
1617 l_ptr->checkpoint--;
1620 * Discard packet if a duplicate; otherwise add it to deferred queue
1621 * and notify peer of gap as per protocol specification
1623 if (less(seq_no, mod(l_ptr->next_in_no))) {
1624 l_ptr->stats.duplicates++;
1629 if (tipc_link_defer_pkt(&l_ptr->oldest_deferred_in,
1630 &l_ptr->newest_deferred_in, buf)) {
1631 l_ptr->deferred_inqueue_sz++;
1632 l_ptr->stats.deferred_recv++;
1633 TIPC_SKB_CB(buf)->deferred = true;
1634 if ((l_ptr->deferred_inqueue_sz % 16) == 1)
1635 tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1637 l_ptr->stats.duplicates++;
1641 * Send protocol message to the other endpoint.
1643 void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
1644 u32 gap, u32 tolerance, u32 priority, u32 ack_mtu)
1646 struct sk_buff *buf = NULL;
1647 struct tipc_msg *msg = l_ptr->pmsg;
1648 u32 msg_size = sizeof(l_ptr->proto_msg);
1651 /* Discard any previous message that was deferred due to congestion */
1652 if (l_ptr->proto_msg_queue) {
1653 kfree_skb(l_ptr->proto_msg_queue);
1654 l_ptr->proto_msg_queue = NULL;
1657 /* Don't send protocol message during link changeover */
1658 if (l_ptr->exp_msg_count)
1661 /* Abort non-RESET send if communication with node is prohibited */
1662 if ((tipc_node_blocked(l_ptr->owner)) && (msg_typ != RESET_MSG))
1665 /* Create protocol message with "out-of-sequence" sequence number */
1666 msg_set_type(msg, msg_typ);
1667 msg_set_net_plane(msg, l_ptr->net_plane);
1668 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1669 msg_set_last_bcast(msg, tipc_bclink_get_last_sent());
1671 if (msg_typ == STATE_MSG) {
1672 u32 next_sent = mod(l_ptr->next_out_no);
1674 if (!tipc_link_is_up(l_ptr))
1676 if (l_ptr->next_out)
1677 next_sent = buf_seqno(l_ptr->next_out);
1678 msg_set_next_sent(msg, next_sent);
1679 if (l_ptr->oldest_deferred_in) {
1680 u32 rec = buf_seqno(l_ptr->oldest_deferred_in);
1681 gap = mod(rec - mod(l_ptr->next_in_no));
1683 msg_set_seq_gap(msg, gap);
1685 l_ptr->stats.sent_nacks++;
1686 msg_set_link_tolerance(msg, tolerance);
1687 msg_set_linkprio(msg, priority);
1688 msg_set_max_pkt(msg, ack_mtu);
1689 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1690 msg_set_probe(msg, probe_msg != 0);
1692 u32 mtu = l_ptr->max_pkt;
1694 if ((mtu < l_ptr->max_pkt_target) &&
1695 link_working_working(l_ptr) &&
1696 l_ptr->fsm_msg_cnt) {
1697 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
1698 if (l_ptr->max_pkt_probes == 10) {
1699 l_ptr->max_pkt_target = (msg_size - 4);
1700 l_ptr->max_pkt_probes = 0;
1701 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
1703 l_ptr->max_pkt_probes++;
1706 l_ptr->stats.sent_probes++;
1708 l_ptr->stats.sent_states++;
1709 } else { /* RESET_MSG or ACTIVATE_MSG */
1710 msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1));
1711 msg_set_seq_gap(msg, 0);
1712 msg_set_next_sent(msg, 1);
1713 msg_set_probe(msg, 0);
1714 msg_set_link_tolerance(msg, l_ptr->tolerance);
1715 msg_set_linkprio(msg, l_ptr->priority);
1716 msg_set_max_pkt(msg, l_ptr->max_pkt_target);
1719 r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr));
1720 msg_set_redundant_link(msg, r_flag);
1721 msg_set_linkprio(msg, l_ptr->priority);
1722 msg_set_size(msg, msg_size);
1724 msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2)));
1726 buf = tipc_buf_acquire(msg_size);
1730 skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
1731 buf->priority = TC_PRIO_CONTROL;
1733 tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
1734 l_ptr->unacked_window = 0;
1739 * Receive protocol message :
1740 * Note that network plane id propagates through the network, and may
1741 * change at any time. The node with lowest address rules
1743 static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf)
1749 struct tipc_msg *msg = buf_msg(buf);
1751 /* Discard protocol message during link changeover */
1752 if (l_ptr->exp_msg_count)
1755 if (l_ptr->net_plane != msg_net_plane(msg))
1756 if (tipc_own_addr > msg_prevnode(msg))
1757 l_ptr->net_plane = msg_net_plane(msg);
1759 switch (msg_type(msg)) {
1762 if (!link_working_unknown(l_ptr) &&
1763 (l_ptr->peer_session != INVALID_SESSION)) {
1764 if (less_eq(msg_session(msg), l_ptr->peer_session))
1765 break; /* duplicate or old reset: ignore */
1768 if (!msg_redundant_link(msg) && (link_working_working(l_ptr) ||
1769 link_working_unknown(l_ptr))) {
1771 * peer has lost contact -- don't allow peer's links
1772 * to reactivate before we recognize loss & clean up
1774 l_ptr->owner->action_flags |= TIPC_WAIT_OWN_LINKS_DOWN;
1777 link_state_event(l_ptr, RESET_MSG);
1781 /* Update link settings according other endpoint's values */
1782 strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg));
1784 msg_tol = msg_link_tolerance(msg);
1785 if (msg_tol > l_ptr->tolerance)
1786 link_set_supervision_props(l_ptr, msg_tol);
1788 if (msg_linkprio(msg) > l_ptr->priority)
1789 l_ptr->priority = msg_linkprio(msg);
1791 max_pkt_info = msg_max_pkt(msg);
1793 if (max_pkt_info < l_ptr->max_pkt_target)
1794 l_ptr->max_pkt_target = max_pkt_info;
1795 if (l_ptr->max_pkt > l_ptr->max_pkt_target)
1796 l_ptr->max_pkt = l_ptr->max_pkt_target;
1798 l_ptr->max_pkt = l_ptr->max_pkt_target;
1801 /* Synchronize broadcast link info, if not done previously */
1802 if (!tipc_node_is_up(l_ptr->owner)) {
1803 l_ptr->owner->bclink.last_sent =
1804 l_ptr->owner->bclink.last_in =
1805 msg_last_bcast(msg);
1806 l_ptr->owner->bclink.oos_state = 0;
1809 l_ptr->peer_session = msg_session(msg);
1810 l_ptr->peer_bearer_id = msg_bearer_id(msg);
1812 if (msg_type(msg) == ACTIVATE_MSG)
1813 link_state_event(l_ptr, ACTIVATE_MSG);
1817 msg_tol = msg_link_tolerance(msg);
1819 link_set_supervision_props(l_ptr, msg_tol);
1821 if (msg_linkprio(msg) &&
1822 (msg_linkprio(msg) != l_ptr->priority)) {
1823 pr_warn("%s<%s>, priority change %u->%u\n",
1824 link_rst_msg, l_ptr->name, l_ptr->priority,
1826 l_ptr->priority = msg_linkprio(msg);
1827 tipc_link_reset(l_ptr); /* Enforce change to take effect */
1831 /* Record reception; force mismatch at next timeout: */
1832 l_ptr->checkpoint--;
1834 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
1835 l_ptr->stats.recv_states++;
1836 if (link_reset_unknown(l_ptr))
1839 if (less_eq(mod(l_ptr->next_in_no), msg_next_sent(msg))) {
1840 rec_gap = mod(msg_next_sent(msg) -
1841 mod(l_ptr->next_in_no));
1844 max_pkt_ack = msg_max_pkt(msg);
1845 if (max_pkt_ack > l_ptr->max_pkt) {
1846 l_ptr->max_pkt = max_pkt_ack;
1847 l_ptr->max_pkt_probes = 0;
1851 if (msg_probe(msg)) {
1852 l_ptr->stats.recv_probes++;
1853 if (msg_size(msg) > sizeof(l_ptr->proto_msg))
1854 max_pkt_ack = msg_size(msg);
1857 /* Protocol message before retransmits, reduce loss risk */
1858 if (l_ptr->owner->bclink.recv_permitted)
1859 tipc_bclink_update_link_state(l_ptr->owner,
1860 msg_last_bcast(msg));
1862 if (rec_gap || (msg_probe(msg))) {
1863 tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, rec_gap, 0,
1866 if (msg_seq_gap(msg)) {
1867 l_ptr->stats.recv_nacks++;
1868 tipc_link_retransmit(l_ptr, l_ptr->first_out,
1878 /* tipc_link_tunnel_xmit(): Tunnel one packet via a link belonging to
1879 * a different bearer. Owner node is locked.
1881 static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr,
1882 struct tipc_msg *tunnel_hdr,
1883 struct tipc_msg *msg,
1886 struct tipc_link *tunnel;
1887 struct sk_buff *buf;
1888 u32 length = msg_size(msg);
1890 tunnel = l_ptr->owner->active_links[selector & 1];
1891 if (!tipc_link_is_up(tunnel)) {
1892 pr_warn("%stunnel link no longer available\n", link_co_err);
1895 msg_set_size(tunnel_hdr, length + INT_H_SIZE);
1896 buf = tipc_buf_acquire(length + INT_H_SIZE);
1898 pr_warn("%sunable to send tunnel msg\n", link_co_err);
1901 skb_copy_to_linear_data(buf, tunnel_hdr, INT_H_SIZE);
1902 skb_copy_to_linear_data_offset(buf, INT_H_SIZE, msg, length);
1903 __tipc_link_xmit(tunnel, buf);
1907 /* tipc_link_failover_send_queue(): A link has gone down, but a second
1908 * link is still active. We can do failover. Tunnel the failing link's
1909 * whole send queue via the remaining link. This way, we don't lose
1910 * any packets, and sequence order is preserved for subsequent traffic
1911 * sent over the remaining link. Owner node is locked.
1913 void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
1915 u32 msgcount = l_ptr->out_queue_size;
1916 struct sk_buff *crs = l_ptr->first_out;
1917 struct tipc_link *tunnel = l_ptr->owner->active_links[0];
1918 struct tipc_msg tunnel_hdr;
1924 tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
1925 ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr);
1926 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
1927 msg_set_msgcnt(&tunnel_hdr, msgcount);
1929 if (!l_ptr->first_out) {
1930 struct sk_buff *buf;
1932 buf = tipc_buf_acquire(INT_H_SIZE);
1934 skb_copy_to_linear_data(buf, &tunnel_hdr, INT_H_SIZE);
1935 msg_set_size(&tunnel_hdr, INT_H_SIZE);
1936 __tipc_link_xmit(tunnel, buf);
1938 pr_warn("%sunable to send changeover msg\n",
1944 split_bundles = (l_ptr->owner->active_links[0] !=
1945 l_ptr->owner->active_links[1]);
1948 struct tipc_msg *msg = buf_msg(crs);
1950 if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
1951 struct tipc_msg *m = msg_get_wrapped(msg);
1952 unchar *pos = (unchar *)m;
1954 msgcount = msg_msgcnt(msg);
1955 while (msgcount--) {
1956 msg_set_seqno(m, msg_seqno(msg));
1957 tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, m,
1958 msg_link_selector(m));
1959 pos += align(msg_size(m));
1960 m = (struct tipc_msg *)pos;
1963 tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, msg,
1964 msg_link_selector(msg));
1970 /* tipc_link_dup_queue_xmit(): A second link has become active. Tunnel a
1971 * duplicate of the first link's send queue via the new link. This way, we
1972 * are guaranteed that currently queued packets from a socket are delivered
1973 * before future traffic from the same socket, even if this is using the
1974 * new link. The last arriving copy of each duplicate packet is dropped at
1975 * the receiving end by the regular protocol check, so packet cardinality
1976 * and sequence order is preserved per sender/receiver socket pair.
1977 * Owner node is locked.
1979 void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr,
1980 struct tipc_link *tunnel)
1982 struct sk_buff *iter;
1983 struct tipc_msg tunnel_hdr;
1985 tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
1986 DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr);
1987 msg_set_msgcnt(&tunnel_hdr, l_ptr->out_queue_size);
1988 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
1989 iter = l_ptr->first_out;
1991 struct sk_buff *outbuf;
1992 struct tipc_msg *msg = buf_msg(iter);
1993 u32 length = msg_size(msg);
1995 if (msg_user(msg) == MSG_BUNDLER)
1996 msg_set_type(msg, CLOSED_MSG);
1997 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); /* Update */
1998 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1999 msg_set_size(&tunnel_hdr, length + INT_H_SIZE);
2000 outbuf = tipc_buf_acquire(length + INT_H_SIZE);
2001 if (outbuf == NULL) {
2002 pr_warn("%sunable to send duplicate msg\n",
2006 skb_copy_to_linear_data(outbuf, &tunnel_hdr, INT_H_SIZE);
2007 skb_copy_to_linear_data_offset(outbuf, INT_H_SIZE, iter->data,
2009 __tipc_link_xmit(tunnel, outbuf);
2010 if (!tipc_link_is_up(l_ptr))
2017 * buf_extract - extracts embedded TIPC message from another message
2018 * @skb: encapsulating message buffer
2019 * @from_pos: offset to extract from
2021 * Returns a new message buffer containing an embedded message. The
2022 * encapsulating message itself is left unchanged.
2024 static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
2026 struct tipc_msg *msg = (struct tipc_msg *)(skb->data + from_pos);
2027 u32 size = msg_size(msg);
2030 eb = tipc_buf_acquire(size);
2032 skb_copy_to_linear_data(eb, msg, size);
2038 /* tipc_link_dup_rcv(): Receive a tunnelled DUPLICATE_MSG packet.
2039 * Owner node is locked.
2041 static void tipc_link_dup_rcv(struct tipc_link *l_ptr,
2042 struct sk_buff *t_buf)
2044 struct sk_buff *buf;
2046 if (!tipc_link_is_up(l_ptr))
2049 buf = buf_extract(t_buf, INT_H_SIZE);
2051 pr_warn("%sfailed to extract inner dup pkt\n", link_co_err);
2055 /* Add buffer to deferred queue, if applicable: */
2056 link_handle_out_of_seq_msg(l_ptr, buf);
2059 /* tipc_link_failover_rcv(): Receive a tunnelled ORIGINAL_MSG packet
2060 * Owner node is locked.
2062 static struct sk_buff *tipc_link_failover_rcv(struct tipc_link *l_ptr,
2063 struct sk_buff *t_buf)
2065 struct tipc_msg *t_msg = buf_msg(t_buf);
2066 struct sk_buff *buf = NULL;
2067 struct tipc_msg *msg;
2069 if (tipc_link_is_up(l_ptr))
2070 tipc_link_reset(l_ptr);
2072 /* First failover packet? */
2073 if (l_ptr->exp_msg_count == START_CHANGEOVER)
2074 l_ptr->exp_msg_count = msg_msgcnt(t_msg);
2076 /* Should there be an inner packet? */
2077 if (l_ptr->exp_msg_count) {
2078 l_ptr->exp_msg_count--;
2079 buf = buf_extract(t_buf, INT_H_SIZE);
2081 pr_warn("%sno inner failover pkt\n", link_co_err);
2086 if (less(msg_seqno(msg), l_ptr->reset_checkpoint)) {
2091 if (msg_user(msg) == MSG_FRAGMENTER) {
2092 l_ptr->stats.recv_fragments++;
2093 tipc_buf_append(&l_ptr->reasm_buf, &buf);
2097 if ((l_ptr->exp_msg_count == 0) && (l_ptr->flags & LINK_STOPPED)) {
2098 tipc_node_detach_link(l_ptr->owner, l_ptr);
2104 /* tipc_link_tunnel_rcv(): Receive a tunnelled packet, sent
2105 * via other link as result of a failover (ORIGINAL_MSG) or
2106 * a new active link (DUPLICATE_MSG). Failover packets are
2107 * returned to the active link for delivery upwards.
2108 * Owner node is locked.
2110 static int tipc_link_tunnel_rcv(struct tipc_node *n_ptr,
2111 struct sk_buff **buf)
2113 struct sk_buff *t_buf = *buf;
2114 struct tipc_link *l_ptr;
2115 struct tipc_msg *t_msg = buf_msg(t_buf);
2116 u32 bearer_id = msg_bearer_id(t_msg);
2120 if (bearer_id >= MAX_BEARERS)
2123 l_ptr = n_ptr->links[bearer_id];
2127 if (msg_type(t_msg) == DUPLICATE_MSG)
2128 tipc_link_dup_rcv(l_ptr, t_buf);
2129 else if (msg_type(t_msg) == ORIGINAL_MSG)
2130 *buf = tipc_link_failover_rcv(l_ptr, t_buf);
2132 pr_warn("%sunknown tunnel pkt received\n", link_co_err);
2135 return *buf != NULL;
2139 * Bundler functionality:
2141 void tipc_link_bundle_rcv(struct sk_buff *buf)
2143 u32 msgcount = msg_msgcnt(buf_msg(buf));
2144 u32 pos = INT_H_SIZE;
2145 struct sk_buff *obuf;
2146 struct tipc_msg *omsg;
2148 while (msgcount--) {
2149 obuf = buf_extract(buf, pos);
2151 pr_warn("Link unable to unbundle message(s)\n");
2154 omsg = buf_msg(obuf);
2155 pos += align(msg_size(omsg));
2156 if (msg_isdata(omsg) || (msg_user(omsg) == CONN_MANAGER)) {
2158 } else if (msg_user(omsg) == NAME_DISTRIBUTOR) {
2159 tipc_named_rcv(obuf);
2161 pr_warn("Illegal bundled msg: %u\n", msg_user(omsg));
2169 * Fragmentation/defragmentation:
2173 * tipc_link_frag_xmit: Entry for buffers needing fragmentation.
2174 * The buffer is complete, inclusive total message length.
2175 * Returns user data length.
2177 static int tipc_link_frag_xmit(struct tipc_link *l_ptr, struct sk_buff *buf)
2179 struct sk_buff *buf_chain = NULL;
2180 struct sk_buff *buf_chain_tail = (struct sk_buff *)&buf_chain;
2181 struct tipc_msg *inmsg = buf_msg(buf);
2182 struct tipc_msg fragm_hdr;
2183 u32 insize = msg_size(inmsg);
2184 u32 dsz = msg_data_sz(inmsg);
2185 unchar *crs = buf->data;
2187 u32 pack_sz = l_ptr->max_pkt;
2188 u32 fragm_sz = pack_sz - INT_H_SIZE;
2192 if (msg_short(inmsg))
2193 destaddr = l_ptr->addr;
2195 destaddr = msg_destnode(inmsg);
2197 /* Prepare reusable fragment header: */
2198 tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
2199 INT_H_SIZE, destaddr);
2201 /* Chop up message: */
2203 struct sk_buff *fragm;
2205 if (rest <= fragm_sz) {
2207 msg_set_type(&fragm_hdr, LAST_FRAGMENT);
2209 fragm = tipc_buf_acquire(fragm_sz + INT_H_SIZE);
2210 if (fragm == NULL) {
2212 kfree_skb_list(buf_chain);
2215 msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
2217 msg_set_fragm_no(&fragm_hdr, fragm_no);
2218 skb_copy_to_linear_data(fragm, &fragm_hdr, INT_H_SIZE);
2219 skb_copy_to_linear_data_offset(fragm, INT_H_SIZE, crs,
2221 buf_chain_tail->next = fragm;
2222 buf_chain_tail = fragm;
2226 msg_set_type(&fragm_hdr, FRAGMENT);
2230 /* Append chain of fragments to send queue & send them */
2231 l_ptr->long_msg_seq_no++;
2232 link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no);
2233 l_ptr->stats.sent_fragments += fragm_no;
2234 l_ptr->stats.sent_fragmented++;
2235 tipc_link_push_queue(l_ptr);
2240 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance)
2242 if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL))
2245 l_ptr->tolerance = tolerance;
2246 l_ptr->continuity_interval =
2247 ((tolerance / 4) > 500) ? 500 : tolerance / 4;
2248 l_ptr->abort_limit = tolerance / (l_ptr->continuity_interval / 4);
2251 void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window)
2253 /* Data messages from this node, inclusive FIRST_FRAGM */
2254 l_ptr->queue_limit[TIPC_LOW_IMPORTANCE] = window;
2255 l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE] = (window / 3) * 4;
2256 l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE] = (window / 3) * 5;
2257 l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE] = (window / 3) * 6;
2258 /* Transiting data messages,inclusive FIRST_FRAGM */
2259 l_ptr->queue_limit[TIPC_LOW_IMPORTANCE + 4] = 300;
2260 l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE + 4] = 600;
2261 l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE + 4] = 900;
2262 l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE + 4] = 1200;
2263 l_ptr->queue_limit[CONN_MANAGER] = 1200;
2264 l_ptr->queue_limit[CHANGEOVER_PROTOCOL] = 2500;
2265 l_ptr->queue_limit[NAME_DISTRIBUTOR] = 3000;
2266 /* FRAGMENT and LAST_FRAGMENT packets */
2267 l_ptr->queue_limit[MSG_FRAGMENTER] = 4000;
2270 /* tipc_link_find_owner - locate owner node of link by link's name
2271 * @name: pointer to link name string
2272 * @bearer_id: pointer to index in 'node->links' array where the link was found.
2274 * Returns pointer to node owning the link, or 0 if no matching link is found.
2276 static struct tipc_node *tipc_link_find_owner(const char *link_name,
2277 unsigned int *bearer_id)
2279 struct tipc_link *l_ptr;
2280 struct tipc_node *n_ptr;
2281 struct tipc_node *found_node = 0;
2286 list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
2287 tipc_node_lock(n_ptr);
2288 for (i = 0; i < MAX_BEARERS; i++) {
2289 l_ptr = n_ptr->links[i];
2290 if (l_ptr && !strcmp(l_ptr->name, link_name)) {
2296 tipc_node_unlock(n_ptr);
2306 * link_value_is_valid -- validate proposed link tolerance/priority/window
2308 * @cmd: value type (TIPC_CMD_SET_LINK_*)
2309 * @new_value: the new value
2311 * Returns 1 if value is within range, 0 if not.
2313 static int link_value_is_valid(u16 cmd, u32 new_value)
2316 case TIPC_CMD_SET_LINK_TOL:
2317 return (new_value >= TIPC_MIN_LINK_TOL) &&
2318 (new_value <= TIPC_MAX_LINK_TOL);
2319 case TIPC_CMD_SET_LINK_PRI:
2320 return (new_value <= TIPC_MAX_LINK_PRI);
2321 case TIPC_CMD_SET_LINK_WINDOW:
2322 return (new_value >= TIPC_MIN_LINK_WIN) &&
2323 (new_value <= TIPC_MAX_LINK_WIN);
2329 * link_cmd_set_value - change priority/tolerance/window for link/bearer/media
2330 * @name: ptr to link, bearer, or media name
2331 * @new_value: new value of link, bearer, or media setting
2332 * @cmd: which link, bearer, or media attribute to set (TIPC_CMD_SET_LINK_*)
2334 * Caller must hold RTNL lock to ensure link/bearer/media is not deleted.
2336 * Returns 0 if value updated and negative value on error.
2338 static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd)
2340 struct tipc_node *node;
2341 struct tipc_link *l_ptr;
2342 struct tipc_bearer *b_ptr;
2343 struct tipc_media *m_ptr;
2347 node = tipc_link_find_owner(name, &bearer_id);
2349 tipc_node_lock(node);
2350 l_ptr = node->links[bearer_id];
2354 case TIPC_CMD_SET_LINK_TOL:
2355 link_set_supervision_props(l_ptr, new_value);
2356 tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0,
2359 case TIPC_CMD_SET_LINK_PRI:
2360 l_ptr->priority = new_value;
2361 tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0,
2364 case TIPC_CMD_SET_LINK_WINDOW:
2365 tipc_link_set_queue_limits(l_ptr, new_value);
2372 tipc_node_unlock(node);
2376 b_ptr = tipc_bearer_find(name);
2379 case TIPC_CMD_SET_LINK_TOL:
2380 b_ptr->tolerance = new_value;
2382 case TIPC_CMD_SET_LINK_PRI:
2383 b_ptr->priority = new_value;
2385 case TIPC_CMD_SET_LINK_WINDOW:
2386 b_ptr->window = new_value;
2395 m_ptr = tipc_media_find(name);
2399 case TIPC_CMD_SET_LINK_TOL:
2400 m_ptr->tolerance = new_value;
2402 case TIPC_CMD_SET_LINK_PRI:
2403 m_ptr->priority = new_value;
2405 case TIPC_CMD_SET_LINK_WINDOW:
2406 m_ptr->window = new_value;
2415 struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space,
2418 struct tipc_link_config *args;
2422 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_CONFIG))
2423 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2425 args = (struct tipc_link_config *)TLV_DATA(req_tlv_area);
2426 new_value = ntohl(args->value);
2428 if (!link_value_is_valid(cmd, new_value))
2429 return tipc_cfg_reply_error_string(
2430 "cannot change, value invalid");
2432 if (!strcmp(args->name, tipc_bclink_name)) {
2433 if ((cmd == TIPC_CMD_SET_LINK_WINDOW) &&
2434 (tipc_bclink_set_queue_limits(new_value) == 0))
2435 return tipc_cfg_reply_none();
2436 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
2437 " (cannot change setting on broadcast link)");
2440 res = link_cmd_set_value(args->name, new_value, cmd);
2442 return tipc_cfg_reply_error_string("cannot change link setting");
2444 return tipc_cfg_reply_none();
2448 * link_reset_statistics - reset link statistics
2449 * @l_ptr: pointer to link
2451 static void link_reset_statistics(struct tipc_link *l_ptr)
2453 memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
2454 l_ptr->stats.sent_info = l_ptr->next_out_no;
2455 l_ptr->stats.recv_info = l_ptr->next_in_no;
2458 struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space)
2461 struct tipc_link *l_ptr;
2462 struct tipc_node *node;
2463 unsigned int bearer_id;
2465 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
2466 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2468 link_name = (char *)TLV_DATA(req_tlv_area);
2469 if (!strcmp(link_name, tipc_bclink_name)) {
2470 if (tipc_bclink_reset_stats())
2471 return tipc_cfg_reply_error_string("link not found");
2472 return tipc_cfg_reply_none();
2474 node = tipc_link_find_owner(link_name, &bearer_id);
2476 return tipc_cfg_reply_error_string("link not found");
2478 tipc_node_lock(node);
2479 l_ptr = node->links[bearer_id];
2481 tipc_node_unlock(node);
2482 return tipc_cfg_reply_error_string("link not found");
2484 link_reset_statistics(l_ptr);
2485 tipc_node_unlock(node);
2486 return tipc_cfg_reply_none();
2490 * percent - convert count to a percentage of total (rounding up or down)
2492 static u32 percent(u32 count, u32 total)
2494 return (count * 100 + (total / 2)) / total;
2498 * tipc_link_stats - print link statistics
2500 * @buf: print buffer area
2501 * @buf_size: size of print buffer area
2503 * Returns length of print buffer data string (or 0 if error)
2505 static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
2507 struct tipc_link *l;
2508 struct tipc_stats *s;
2509 struct tipc_node *node;
2511 u32 profile_total = 0;
2512 unsigned int bearer_id;
2515 if (!strcmp(name, tipc_bclink_name))
2516 return tipc_bclink_stats(buf, buf_size);
2518 node = tipc_link_find_owner(name, &bearer_id);
2522 tipc_node_lock(node);
2524 l = node->links[bearer_id];
2526 tipc_node_unlock(node);
2532 if (tipc_link_is_active(l))
2534 else if (tipc_link_is_up(l))
2539 ret = tipc_snprintf(buf, buf_size, "Link <%s>\n"
2540 " %s MTU:%u Priority:%u Tolerance:%u ms"
2541 " Window:%u packets\n",
2542 l->name, status, l->max_pkt, l->priority,
2543 l->tolerance, l->queue_limit[0]);
2545 ret += tipc_snprintf(buf + ret, buf_size - ret,
2546 " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
2547 l->next_in_no - s->recv_info, s->recv_fragments,
2548 s->recv_fragmented, s->recv_bundles,
2551 ret += tipc_snprintf(buf + ret, buf_size - ret,
2552 " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
2553 l->next_out_no - s->sent_info, s->sent_fragments,
2554 s->sent_fragmented, s->sent_bundles,
2557 profile_total = s->msg_length_counts;
2561 ret += tipc_snprintf(buf + ret, buf_size - ret,
2562 " TX profile sample:%u packets average:%u octets\n"
2563 " 0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% "
2564 "-16384:%u%% -32768:%u%% -66000:%u%%\n",
2565 s->msg_length_counts,
2566 s->msg_lengths_total / profile_total,
2567 percent(s->msg_length_profile[0], profile_total),
2568 percent(s->msg_length_profile[1], profile_total),
2569 percent(s->msg_length_profile[2], profile_total),
2570 percent(s->msg_length_profile[3], profile_total),
2571 percent(s->msg_length_profile[4], profile_total),
2572 percent(s->msg_length_profile[5], profile_total),
2573 percent(s->msg_length_profile[6], profile_total));
2575 ret += tipc_snprintf(buf + ret, buf_size - ret,
2576 " RX states:%u probes:%u naks:%u defs:%u"
2577 " dups:%u\n", s->recv_states, s->recv_probes,
2578 s->recv_nacks, s->deferred_recv, s->duplicates);
2580 ret += tipc_snprintf(buf + ret, buf_size - ret,
2581 " TX states:%u probes:%u naks:%u acks:%u"
2582 " dups:%u\n", s->sent_states, s->sent_probes,
2583 s->sent_nacks, s->sent_acks, s->retransmitted);
2585 ret += tipc_snprintf(buf + ret, buf_size - ret,
2586 " Congestion link:%u Send queue"
2587 " max:%u avg:%u\n", s->link_congs,
2588 s->max_queue_sz, s->queue_sz_counts ?
2589 (s->accu_queue_sz / s->queue_sz_counts) : 0);
2591 tipc_node_unlock(node);
2595 struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space)
2597 struct sk_buff *buf;
2598 struct tlv_desc *rep_tlv;
2603 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
2604 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2606 buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN));
2610 rep_tlv = (struct tlv_desc *)buf->data;
2611 pb = TLV_DATA(rep_tlv);
2612 pb_len = ULTRA_STRING_MAX_LEN;
2613 str_len = tipc_link_stats((char *)TLV_DATA(req_tlv_area),
2617 return tipc_cfg_reply_error_string("link not found");
2619 str_len += 1; /* for "\0" */
2620 skb_put(buf, TLV_SPACE(str_len));
2621 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
2627 * tipc_link_get_max_pkt - get maximum packet size to use when sending to destination
2628 * @dest: network address of destination node
2629 * @selector: used to select from set of active links
2631 * If no active link can be found, uses default maximum packet size.
2633 u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
2635 struct tipc_node *n_ptr;
2636 struct tipc_link *l_ptr;
2637 u32 res = MAX_PKT_DEFAULT;
2639 if (dest == tipc_own_addr)
2640 return MAX_MSG_SIZE;
2642 n_ptr = tipc_node_find(dest);
2644 tipc_node_lock(n_ptr);
2645 l_ptr = n_ptr->active_links[selector & 1];
2647 res = l_ptr->max_pkt;
2648 tipc_node_unlock(n_ptr);
2653 static void link_print(struct tipc_link *l_ptr, const char *str)
2655 struct tipc_bearer *b_ptr;
2658 b_ptr = rcu_dereference_rtnl(bearer_list[l_ptr->bearer_id]);
2660 pr_info("%s Link %x<%s>:", str, l_ptr->addr, b_ptr->name);
2663 if (link_working_unknown(l_ptr))
2665 else if (link_reset_reset(l_ptr))
2667 else if (link_reset_unknown(l_ptr))
2669 else if (link_working_working(l_ptr))