2 * net/tipc/link.c: TIPC link code
4 * Copyright (c) 1996-2007, 2012-2014, Ericsson AB
5 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
41 #include "name_distr.h"
45 #include <linux/pkt_sched.h>
48 * Error message prefixes
50 static const char *link_co_err = "Link changeover error, ";
51 static const char *link_rst_msg = "Resetting link ";
52 static const char *link_unk_evt = "Unknown link event ";
54 static const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
55 [TIPC_NLA_LINK_UNSPEC] = { .type = NLA_UNSPEC },
56 [TIPC_NLA_LINK_NAME] = {
58 .len = TIPC_MAX_LINK_NAME
60 [TIPC_NLA_LINK_MTU] = { .type = NLA_U32 },
61 [TIPC_NLA_LINK_BROADCAST] = { .type = NLA_FLAG },
62 [TIPC_NLA_LINK_UP] = { .type = NLA_FLAG },
63 [TIPC_NLA_LINK_ACTIVE] = { .type = NLA_FLAG },
64 [TIPC_NLA_LINK_PROP] = { .type = NLA_NESTED },
65 [TIPC_NLA_LINK_STATS] = { .type = NLA_NESTED },
66 [TIPC_NLA_LINK_RX] = { .type = NLA_U32 },
67 [TIPC_NLA_LINK_TX] = { .type = NLA_U32 }
70 /* Properties valid for media, bearar and link */
71 static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
72 [TIPC_NLA_PROP_UNSPEC] = { .type = NLA_UNSPEC },
73 [TIPC_NLA_PROP_PRIO] = { .type = NLA_U32 },
74 [TIPC_NLA_PROP_TOL] = { .type = NLA_U32 },
75 [TIPC_NLA_PROP_WIN] = { .type = NLA_U32 }
79 * Out-of-range value for link session numbers
81 #define INVALID_SESSION 0x10000
86 #define STARTING_EVT 856384768 /* link processing trigger */
87 #define TRAFFIC_MSG_EVT 560815u /* rx'd ??? */
88 #define TIMEOUT_EVT 560817u /* link timer expired */
91 * The following two 'message types' is really just implementation
92 * data conveniently stored in the message header.
93 * They must not be considered part of the protocol
99 * State value stored in 'exp_msg_count'
101 #define START_CHANGEOVER 100000u
103 static void link_handle_out_of_seq_msg(struct tipc_link *link,
104 struct sk_buff *skb);
105 static void tipc_link_proto_rcv(struct tipc_link *link,
106 struct sk_buff *skb);
107 static int tipc_link_tunnel_rcv(struct tipc_node *node,
108 struct sk_buff **skb);
109 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol);
110 static void link_state_event(struct tipc_link *l_ptr, u32 event);
111 static void link_reset_statistics(struct tipc_link *l_ptr);
112 static void link_print(struct tipc_link *l_ptr, const char *str);
113 static void tipc_link_sync_xmit(struct tipc_link *l);
114 static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf);
115 static void tipc_link_input(struct tipc_link *l, struct sk_buff *skb);
116 static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb);
119 * Simple link routines
121 static unsigned int align(unsigned int i)
123 return (i + 3) & ~3u;
126 static void tipc_link_release(struct kref *kref)
128 kfree(container_of(kref, struct tipc_link, ref));
131 static void tipc_link_get(struct tipc_link *l_ptr)
133 kref_get(&l_ptr->ref);
136 static void tipc_link_put(struct tipc_link *l_ptr)
138 kref_put(&l_ptr->ref, tipc_link_release);
141 static void link_init_max_pkt(struct tipc_link *l_ptr)
143 struct tipc_node *node = l_ptr->owner;
144 struct tipc_net *tn = net_generic(node->net, tipc_net_id);
145 struct tipc_bearer *b_ptr;
149 b_ptr = rcu_dereference_rtnl(tn->bearer_list[l_ptr->bearer_id]);
154 max_pkt = (b_ptr->mtu & ~3);
157 if (max_pkt > MAX_MSG_SIZE)
158 max_pkt = MAX_MSG_SIZE;
160 l_ptr->max_pkt_target = max_pkt;
161 if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT)
162 l_ptr->max_pkt = l_ptr->max_pkt_target;
164 l_ptr->max_pkt = MAX_PKT_DEFAULT;
166 l_ptr->max_pkt_probes = 0;
170 * Simple non-static link routines (i.e. referenced outside this file)
172 int tipc_link_is_up(struct tipc_link *l_ptr)
176 return link_working_working(l_ptr) || link_working_unknown(l_ptr);
179 int tipc_link_is_active(struct tipc_link *l_ptr)
181 return (l_ptr->owner->active_links[0] == l_ptr) ||
182 (l_ptr->owner->active_links[1] == l_ptr);
186 * link_timeout - handle expiration of link timer
187 * @l_ptr: pointer to link
189 static void link_timeout(unsigned long data)
191 struct tipc_link *l_ptr = (struct tipc_link *)data;
194 tipc_node_lock(l_ptr->owner);
196 /* update counters used in statistical profiling of send traffic */
197 l_ptr->stats.accu_queue_sz += skb_queue_len(&l_ptr->outqueue);
198 l_ptr->stats.queue_sz_counts++;
200 skb = skb_peek(&l_ptr->outqueue);
202 struct tipc_msg *msg = buf_msg(skb);
203 u32 length = msg_size(msg);
205 if ((msg_user(msg) == MSG_FRAGMENTER) &&
206 (msg_type(msg) == FIRST_FRAGMENT)) {
207 length = msg_size(msg_get_wrapped(msg));
210 l_ptr->stats.msg_lengths_total += length;
211 l_ptr->stats.msg_length_counts++;
213 l_ptr->stats.msg_length_profile[0]++;
214 else if (length <= 256)
215 l_ptr->stats.msg_length_profile[1]++;
216 else if (length <= 1024)
217 l_ptr->stats.msg_length_profile[2]++;
218 else if (length <= 4096)
219 l_ptr->stats.msg_length_profile[3]++;
220 else if (length <= 16384)
221 l_ptr->stats.msg_length_profile[4]++;
222 else if (length <= 32768)
223 l_ptr->stats.msg_length_profile[5]++;
225 l_ptr->stats.msg_length_profile[6]++;
229 /* do all other link processing performed on a periodic basis */
230 link_state_event(l_ptr, TIMEOUT_EVT);
233 tipc_link_push_packets(l_ptr);
235 tipc_node_unlock(l_ptr->owner);
236 tipc_link_put(l_ptr);
239 static void link_set_timer(struct tipc_link *link, unsigned long time)
241 if (!mod_timer(&link->timer, jiffies + time))
246 * tipc_link_create - create a new link
247 * @n_ptr: pointer to associated node
248 * @b_ptr: pointer to associated bearer
249 * @media_addr: media address to use when sending messages over link
251 * Returns pointer to link.
253 struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
254 struct tipc_bearer *b_ptr,
255 const struct tipc_media_addr *media_addr)
257 struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
258 struct tipc_link *l_ptr;
259 struct tipc_msg *msg;
261 char addr_string[16];
262 u32 peer = n_ptr->addr;
264 if (n_ptr->link_cnt >= MAX_BEARERS) {
265 tipc_addr_string_fill(addr_string, n_ptr->addr);
266 pr_err("Attempt to establish %uth link to %s. Max %u allowed.\n",
267 n_ptr->link_cnt, addr_string, MAX_BEARERS);
271 if (n_ptr->links[b_ptr->identity]) {
272 tipc_addr_string_fill(addr_string, n_ptr->addr);
273 pr_err("Attempt to establish second link on <%s> to %s\n",
274 b_ptr->name, addr_string);
278 l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC);
280 pr_warn("Link creation failed, no memory\n");
283 kref_init(&l_ptr->ref);
285 if_name = strchr(b_ptr->name, ':') + 1;
286 sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
287 tipc_zone(tn->own_addr), tipc_cluster(tn->own_addr),
288 tipc_node(tn->own_addr),
290 tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
291 /* note: peer i/f name is updated by reset/activate message */
292 memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
293 l_ptr->owner = n_ptr;
294 l_ptr->checkpoint = 1;
295 l_ptr->peer_session = INVALID_SESSION;
296 l_ptr->bearer_id = b_ptr->identity;
297 link_set_supervision_props(l_ptr, b_ptr->tolerance);
298 l_ptr->state = RESET_UNKNOWN;
300 l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
302 tipc_msg_init(tn->own_addr, msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE,
304 msg_set_size(msg, sizeof(l_ptr->proto_msg));
305 msg_set_session(msg, (tn->random & 0xffff));
306 msg_set_bearer_id(msg, b_ptr->identity);
307 strcpy((char *)msg_data(msg), if_name);
309 l_ptr->priority = b_ptr->priority;
310 tipc_link_set_queue_limits(l_ptr, b_ptr->window);
312 l_ptr->net_plane = b_ptr->net_plane;
313 link_init_max_pkt(l_ptr);
315 l_ptr->next_out_no = 1;
316 __skb_queue_head_init(&l_ptr->outqueue);
317 __skb_queue_head_init(&l_ptr->deferred_queue);
318 skb_queue_head_init(&l_ptr->wakeupq);
319 skb_queue_head_init(&l_ptr->inputq);
320 skb_queue_head_init(&l_ptr->namedq);
321 link_reset_statistics(l_ptr);
322 tipc_node_attach_link(n_ptr, l_ptr);
323 setup_timer(&l_ptr->timer, link_timeout, (unsigned long)l_ptr);
324 link_state_event(l_ptr, STARTING_EVT);
330 * link_delete - Conditional deletion of link.
331 * If timer still running, real delete is done when it expires
332 * @link: link to be deleted
334 void tipc_link_delete(struct tipc_link *link)
336 tipc_link_reset_fragments(link);
337 tipc_node_detach_link(link->owner, link);
341 void tipc_link_delete_list(struct net *net, unsigned int bearer_id,
344 struct tipc_net *tn = net_generic(net, tipc_net_id);
345 struct tipc_link *link;
346 struct tipc_node *node;
350 list_for_each_entry_rcu(node, &tn->node_list, list) {
351 tipc_node_lock(node);
352 link = node->links[bearer_id];
354 tipc_node_unlock(node);
357 del_link = !tipc_link_is_up(link) && !link->exp_msg_count;
358 tipc_link_reset(link);
359 if (del_timer(&link->timer))
361 link->flags |= LINK_STOPPED;
362 /* Delete link now, or when failover is finished: */
363 if (shutting_down || !tipc_node_is_up(node) || del_link)
364 tipc_link_delete(link);
365 tipc_node_unlock(node);
371 * link_schedule_user - schedule user for wakeup after congestion
372 * @link: congested link
373 * @oport: sending port
374 * @chain_sz: size of buffer chain that was attempted sent
375 * @imp: importance of message attempted sent
376 * Create pseudo msg to send back to user when congestion abates
378 static bool link_schedule_user(struct tipc_link *link, u32 oport,
379 uint chain_sz, uint imp)
383 buf = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
384 link_own_addr(link), link_own_addr(link),
388 TIPC_SKB_CB(buf)->chain_sz = chain_sz;
389 TIPC_SKB_CB(buf)->chain_imp = imp;
390 skb_queue_tail(&link->wakeupq, buf);
391 link->stats.link_congs++;
396 * link_prepare_wakeup - prepare users for wakeup after congestion
397 * @link: congested link
398 * Move a number of waiting users, as permitted by available space in
399 * the send queue, from link wait queue to node wait queue for wakeup
401 void link_prepare_wakeup(struct tipc_link *link)
403 uint pend_qsz = skb_queue_len(&link->outqueue);
404 struct sk_buff *skb, *tmp;
406 skb_queue_walk_safe(&link->wakeupq, skb, tmp) {
407 if (pend_qsz >= link->queue_limit[TIPC_SKB_CB(skb)->chain_imp])
409 pend_qsz += TIPC_SKB_CB(skb)->chain_sz;
410 skb_unlink(skb, &link->wakeupq);
411 skb_queue_tail(&link->inputq, skb);
412 link->owner->inputq = &link->inputq;
413 link->owner->action_flags |= TIPC_MSG_EVT;
418 * tipc_link_reset_fragments - purge link's inbound message fragments queue
419 * @l_ptr: pointer to link
421 void tipc_link_reset_fragments(struct tipc_link *l_ptr)
423 kfree_skb(l_ptr->reasm_buf);
424 l_ptr->reasm_buf = NULL;
428 * tipc_link_purge_queues - purge all pkt queues associated with link
429 * @l_ptr: pointer to link
431 void tipc_link_purge_queues(struct tipc_link *l_ptr)
433 __skb_queue_purge(&l_ptr->deferred_queue);
434 __skb_queue_purge(&l_ptr->outqueue);
435 tipc_link_reset_fragments(l_ptr);
438 void tipc_link_reset(struct tipc_link *l_ptr)
440 u32 prev_state = l_ptr->state;
441 u32 checkpoint = l_ptr->next_in_no;
442 int was_active_link = tipc_link_is_active(l_ptr);
443 struct tipc_node *owner = l_ptr->owner;
445 msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff));
447 /* Link is down, accept any session */
448 l_ptr->peer_session = INVALID_SESSION;
450 /* Prepare for max packet size negotiation */
451 link_init_max_pkt(l_ptr);
453 l_ptr->state = RESET_UNKNOWN;
455 if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET))
458 tipc_node_link_down(l_ptr->owner, l_ptr);
459 tipc_bearer_remove_dest(owner->net, l_ptr->bearer_id, l_ptr->addr);
461 if (was_active_link && tipc_node_active_links(l_ptr->owner)) {
462 l_ptr->reset_checkpoint = checkpoint;
463 l_ptr->exp_msg_count = START_CHANGEOVER;
466 /* Clean up all queues, except inputq: */
467 __skb_queue_purge(&l_ptr->outqueue);
468 __skb_queue_purge(&l_ptr->deferred_queue);
470 owner->inputq = &l_ptr->inputq;
471 skb_queue_splice_init(&l_ptr->wakeupq, owner->inputq);
472 if (!skb_queue_empty(owner->inputq))
473 owner->action_flags |= TIPC_MSG_EVT;
474 l_ptr->next_out = NULL;
475 l_ptr->unacked_window = 0;
476 l_ptr->checkpoint = 1;
477 l_ptr->next_out_no = 1;
478 l_ptr->fsm_msg_cnt = 0;
479 l_ptr->stale_count = 0;
480 link_reset_statistics(l_ptr);
483 void tipc_link_reset_list(struct net *net, unsigned int bearer_id)
485 struct tipc_net *tn = net_generic(net, tipc_net_id);
486 struct tipc_link *l_ptr;
487 struct tipc_node *n_ptr;
490 list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
491 tipc_node_lock(n_ptr);
492 l_ptr = n_ptr->links[bearer_id];
494 tipc_link_reset(l_ptr);
495 tipc_node_unlock(n_ptr);
500 static void link_activate(struct tipc_link *link)
502 struct tipc_node *node = link->owner;
504 link->next_in_no = 1;
505 link->stats.recv_info = 1;
506 tipc_node_link_up(node, link);
507 tipc_bearer_add_dest(node->net, link->bearer_id, link->addr);
511 * link_state_event - link finite state machine
512 * @l_ptr: pointer to link
513 * @event: state machine event to process
515 static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
517 struct tipc_link *other;
518 unsigned long cont_intv = l_ptr->cont_intv;
520 if (l_ptr->flags & LINK_STOPPED)
523 if (!(l_ptr->flags & LINK_STARTED) && (event != STARTING_EVT))
524 return; /* Not yet. */
526 /* Check whether changeover is going on */
527 if (l_ptr->exp_msg_count) {
528 if (event == TIMEOUT_EVT)
529 link_set_timer(l_ptr, cont_intv);
533 switch (l_ptr->state) {
534 case WORKING_WORKING:
536 case TRAFFIC_MSG_EVT:
540 if (l_ptr->next_in_no != l_ptr->checkpoint) {
541 l_ptr->checkpoint = l_ptr->next_in_no;
542 if (tipc_bclink_acks_missing(l_ptr->owner)) {
543 tipc_link_proto_xmit(l_ptr, STATE_MSG,
545 l_ptr->fsm_msg_cnt++;
546 } else if (l_ptr->max_pkt < l_ptr->max_pkt_target) {
547 tipc_link_proto_xmit(l_ptr, STATE_MSG,
549 l_ptr->fsm_msg_cnt++;
551 link_set_timer(l_ptr, cont_intv);
554 l_ptr->state = WORKING_UNKNOWN;
555 l_ptr->fsm_msg_cnt = 0;
556 tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
557 l_ptr->fsm_msg_cnt++;
558 link_set_timer(l_ptr, cont_intv / 4);
561 pr_debug("%s<%s>, requested by peer\n",
562 link_rst_msg, l_ptr->name);
563 tipc_link_reset(l_ptr);
564 l_ptr->state = RESET_RESET;
565 l_ptr->fsm_msg_cnt = 0;
566 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
568 l_ptr->fsm_msg_cnt++;
569 link_set_timer(l_ptr, cont_intv);
572 pr_debug("%s%u in WW state\n", link_unk_evt, event);
575 case WORKING_UNKNOWN:
577 case TRAFFIC_MSG_EVT:
579 l_ptr->state = WORKING_WORKING;
580 l_ptr->fsm_msg_cnt = 0;
581 link_set_timer(l_ptr, cont_intv);
584 pr_debug("%s<%s>, requested by peer while probing\n",
585 link_rst_msg, l_ptr->name);
586 tipc_link_reset(l_ptr);
587 l_ptr->state = RESET_RESET;
588 l_ptr->fsm_msg_cnt = 0;
589 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
591 l_ptr->fsm_msg_cnt++;
592 link_set_timer(l_ptr, cont_intv);
595 if (l_ptr->next_in_no != l_ptr->checkpoint) {
596 l_ptr->state = WORKING_WORKING;
597 l_ptr->fsm_msg_cnt = 0;
598 l_ptr->checkpoint = l_ptr->next_in_no;
599 if (tipc_bclink_acks_missing(l_ptr->owner)) {
600 tipc_link_proto_xmit(l_ptr, STATE_MSG,
602 l_ptr->fsm_msg_cnt++;
604 link_set_timer(l_ptr, cont_intv);
605 } else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) {
606 tipc_link_proto_xmit(l_ptr, STATE_MSG,
608 l_ptr->fsm_msg_cnt++;
609 link_set_timer(l_ptr, cont_intv / 4);
610 } else { /* Link has failed */
611 pr_debug("%s<%s>, peer not responding\n",
612 link_rst_msg, l_ptr->name);
613 tipc_link_reset(l_ptr);
614 l_ptr->state = RESET_UNKNOWN;
615 l_ptr->fsm_msg_cnt = 0;
616 tipc_link_proto_xmit(l_ptr, RESET_MSG,
618 l_ptr->fsm_msg_cnt++;
619 link_set_timer(l_ptr, cont_intv);
623 pr_err("%s%u in WU state\n", link_unk_evt, event);
628 case TRAFFIC_MSG_EVT:
631 other = l_ptr->owner->active_links[0];
632 if (other && link_working_unknown(other))
634 l_ptr->state = WORKING_WORKING;
635 l_ptr->fsm_msg_cnt = 0;
636 link_activate(l_ptr);
637 tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
638 l_ptr->fsm_msg_cnt++;
639 if (l_ptr->owner->working_links == 1)
640 tipc_link_sync_xmit(l_ptr);
641 link_set_timer(l_ptr, cont_intv);
644 l_ptr->state = RESET_RESET;
645 l_ptr->fsm_msg_cnt = 0;
646 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
648 l_ptr->fsm_msg_cnt++;
649 link_set_timer(l_ptr, cont_intv);
652 l_ptr->flags |= LINK_STARTED;
653 l_ptr->fsm_msg_cnt++;
654 link_set_timer(l_ptr, cont_intv);
657 tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
658 l_ptr->fsm_msg_cnt++;
659 link_set_timer(l_ptr, cont_intv);
662 pr_err("%s%u in RU state\n", link_unk_evt, event);
667 case TRAFFIC_MSG_EVT:
669 other = l_ptr->owner->active_links[0];
670 if (other && link_working_unknown(other))
672 l_ptr->state = WORKING_WORKING;
673 l_ptr->fsm_msg_cnt = 0;
674 link_activate(l_ptr);
675 tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
676 l_ptr->fsm_msg_cnt++;
677 if (l_ptr->owner->working_links == 1)
678 tipc_link_sync_xmit(l_ptr);
679 link_set_timer(l_ptr, cont_intv);
684 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
686 l_ptr->fsm_msg_cnt++;
687 link_set_timer(l_ptr, cont_intv);
690 pr_err("%s%u in RR state\n", link_unk_evt, event);
694 pr_err("Unknown link state %u/%u\n", l_ptr->state, event);
698 /* tipc_link_cong: determine return value and how to treat the
699 * sent buffer during link congestion.
700 * - For plain, errorless user data messages we keep the buffer and
702 * - For all other messages we discard the buffer and return -EHOSTUNREACH
703 * - For TIPC internal messages we also reset the link
705 static int tipc_link_cong(struct tipc_link *link, struct sk_buff_head *list)
707 struct sk_buff *skb = skb_peek(list);
708 struct tipc_msg *msg = buf_msg(skb);
709 uint imp = tipc_msg_tot_importance(msg);
710 u32 oport = msg_tot_origport(msg);
712 if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) {
713 pr_warn("%s<%s>, send queue full", link_rst_msg, link->name);
714 tipc_link_reset(link);
717 if (unlikely(msg_errcode(msg)))
719 if (unlikely(msg_reroute_cnt(msg)))
721 if (TIPC_SKB_CB(skb)->wakeup_pending)
723 if (link_schedule_user(link, oport, skb_queue_len(list), imp))
726 __skb_queue_purge(list);
727 return -EHOSTUNREACH;
731 * __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked
733 * @list: chain of buffers containing message
735 * Consumes the buffer chain, except when returning -ELINKCONG
736 * Returns 0 if success, otherwise errno: -ELINKCONG, -EMSGSIZE (plain socket
737 * user data messages) or -EHOSTUNREACH (all other messages/senders)
738 * Only the socket functions tipc_send_stream() and tipc_send_packet() need
739 * to act on the return value, since they may need to do more send attempts.
741 int __tipc_link_xmit(struct net *net, struct tipc_link *link,
742 struct sk_buff_head *list)
744 struct tipc_msg *msg = buf_msg(skb_peek(list));
745 uint psz = msg_size(msg);
746 uint sndlim = link->queue_limit[0];
747 uint imp = tipc_msg_tot_importance(msg);
748 uint mtu = link->max_pkt;
749 uint ack = mod(link->next_in_no - 1);
750 uint seqno = link->next_out_no;
751 uint bc_last_in = link->owner->bclink.last_in;
752 struct tipc_media_addr *addr = &link->media_addr;
753 struct sk_buff_head *outqueue = &link->outqueue;
754 struct sk_buff *skb, *tmp;
756 /* Match queue limits against msg importance: */
757 if (unlikely(skb_queue_len(outqueue) >= link->queue_limit[imp]))
758 return tipc_link_cong(link, list);
760 /* Has valid packet limit been used ? */
761 if (unlikely(psz > mtu)) {
762 __skb_queue_purge(list);
766 /* Prepare each packet for sending, and add to outqueue: */
767 skb_queue_walk_safe(list, skb, tmp) {
768 __skb_unlink(skb, list);
770 msg_set_word(msg, 2, ((ack << 16) | mod(seqno)));
771 msg_set_bcast_ack(msg, bc_last_in);
773 if (skb_queue_len(outqueue) < sndlim) {
774 __skb_queue_tail(outqueue, skb);
775 tipc_bearer_send(net, link->bearer_id,
777 link->next_out = NULL;
778 link->unacked_window = 0;
779 } else if (tipc_msg_bundle(outqueue, skb, mtu)) {
780 link->stats.sent_bundled++;
782 } else if (tipc_msg_make_bundle(outqueue, skb, mtu,
784 link->stats.sent_bundled++;
785 link->stats.sent_bundles++;
787 link->next_out = skb_peek_tail(outqueue);
789 __skb_queue_tail(outqueue, skb);
791 link->next_out = skb;
795 link->next_out_no = seqno;
799 static void skb2list(struct sk_buff *skb, struct sk_buff_head *list)
801 skb_queue_head_init(list);
802 __skb_queue_tail(list, skb);
805 static int __tipc_link_xmit_skb(struct tipc_link *link, struct sk_buff *skb)
807 struct sk_buff_head head;
809 skb2list(skb, &head);
810 return __tipc_link_xmit(link->owner->net, link, &head);
813 int tipc_link_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
816 struct sk_buff_head head;
818 skb2list(skb, &head);
819 return tipc_link_xmit(net, &head, dnode, selector);
823 * tipc_link_xmit() is the general link level function for message sending
824 * @net: the applicable net namespace
825 * @list: chain of buffers containing message
826 * @dsz: amount of user data to be sent
827 * @dnode: address of destination node
828 * @selector: a number used for deterministic link selection
829 * Consumes the buffer chain, except when returning -ELINKCONG
830 * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
832 int tipc_link_xmit(struct net *net, struct sk_buff_head *list, u32 dnode,
835 struct tipc_link *link = NULL;
836 struct tipc_node *node;
837 int rc = -EHOSTUNREACH;
839 node = tipc_node_find(net, dnode);
841 tipc_node_lock(node);
842 link = node->active_links[selector & 1];
844 rc = __tipc_link_xmit(net, link, list);
845 tipc_node_unlock(node);
850 if (likely(in_own_node(net, dnode)))
851 return tipc_sk_rcv(net, list);
853 __skb_queue_purge(list);
858 * tipc_link_sync_xmit - synchronize broadcast link endpoints.
860 * Give a newly added peer node the sequence number where it should
861 * start receiving and acking broadcast packets.
863 * Called with node locked
865 static void tipc_link_sync_xmit(struct tipc_link *link)
868 struct tipc_msg *msg;
870 skb = tipc_buf_acquire(INT_H_SIZE);
875 tipc_msg_init(link_own_addr(link), msg, BCAST_PROTOCOL, STATE_MSG,
876 INT_H_SIZE, link->addr);
877 msg_set_last_bcast(msg, link->owner->bclink.acked);
878 __tipc_link_xmit_skb(link, skb);
882 * tipc_link_sync_rcv - synchronize broadcast link endpoints.
883 * Receive the sequence number where we should start receiving and
884 * acking broadcast packets from a newly added peer node, and open
885 * up for reception of such packets.
887 * Called with node locked
889 static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf)
891 struct tipc_msg *msg = buf_msg(buf);
893 n->bclink.last_sent = n->bclink.last_in = msg_last_bcast(msg);
894 n->bclink.recv_permitted = true;
898 struct sk_buff *tipc_skb_queue_next(const struct sk_buff_head *list,
899 const struct sk_buff *skb)
901 if (skb_queue_is_last(list, skb))
907 * tipc_link_push_packets - push unsent packets to bearer
909 * Push out the unsent messages of a link where congestion
910 * has abated. Node is locked.
912 * Called with node locked
914 void tipc_link_push_packets(struct tipc_link *l_ptr)
916 struct sk_buff_head *outqueue = &l_ptr->outqueue;
917 struct sk_buff *skb = l_ptr->next_out;
918 struct tipc_msg *msg;
921 skb_queue_walk_from(outqueue, skb) {
923 next = msg_seqno(msg);
924 first = buf_seqno(skb_peek(outqueue));
926 if (mod(next - first) < l_ptr->queue_limit[0]) {
927 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
928 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
929 if (msg_user(msg) == MSG_BUNDLER)
930 TIPC_SKB_CB(skb)->bundling = false;
931 tipc_bearer_send(l_ptr->owner->net,
932 l_ptr->bearer_id, skb,
934 l_ptr->next_out = tipc_skb_queue_next(outqueue, skb);
941 void tipc_link_reset_all(struct tipc_node *node)
943 char addr_string[16];
946 tipc_node_lock(node);
948 pr_warn("Resetting all links to %s\n",
949 tipc_addr_string_fill(addr_string, node->addr));
951 for (i = 0; i < MAX_BEARERS; i++) {
952 if (node->links[i]) {
953 link_print(node->links[i], "Resetting link\n");
954 tipc_link_reset(node->links[i]);
958 tipc_node_unlock(node);
961 static void link_retransmit_failure(struct tipc_link *l_ptr,
964 struct tipc_msg *msg = buf_msg(buf);
965 struct net *net = l_ptr->owner->net;
967 pr_warn("Retransmission failure on link <%s>\n", l_ptr->name);
970 /* Handle failure on standard link */
971 link_print(l_ptr, "Resetting link\n");
972 tipc_link_reset(l_ptr);
975 /* Handle failure on broadcast link */
976 struct tipc_node *n_ptr;
977 char addr_string[16];
979 pr_info("Msg seq number: %u, ", msg_seqno(msg));
980 pr_cont("Outstanding acks: %lu\n",
981 (unsigned long) TIPC_SKB_CB(buf)->handle);
983 n_ptr = tipc_bclink_retransmit_to(net);
984 tipc_node_lock(n_ptr);
986 tipc_addr_string_fill(addr_string, n_ptr->addr);
987 pr_info("Broadcast link info for %s\n", addr_string);
988 pr_info("Reception permitted: %d, Acked: %u\n",
989 n_ptr->bclink.recv_permitted,
990 n_ptr->bclink.acked);
991 pr_info("Last in: %u, Oos state: %u, Last sent: %u\n",
992 n_ptr->bclink.last_in,
993 n_ptr->bclink.oos_state,
994 n_ptr->bclink.last_sent);
996 tipc_node_unlock(n_ptr);
998 tipc_bclink_set_flags(net, TIPC_BCLINK_RESET);
999 l_ptr->stale_count = 0;
1003 void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb,
1006 struct tipc_msg *msg;
1013 /* Detect repeated retransmit failures */
1014 if (l_ptr->last_retransmitted == msg_seqno(msg)) {
1015 if (++l_ptr->stale_count > 100) {
1016 link_retransmit_failure(l_ptr, skb);
1020 l_ptr->last_retransmitted = msg_seqno(msg);
1021 l_ptr->stale_count = 1;
1024 skb_queue_walk_from(&l_ptr->outqueue, skb) {
1025 if (!retransmits || skb == l_ptr->next_out)
1028 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1029 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1030 tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, skb,
1031 &l_ptr->media_addr);
1033 l_ptr->stats.retransmitted++;
1037 static void link_retrieve_defq(struct tipc_link *link,
1038 struct sk_buff_head *list)
1042 if (skb_queue_empty(&link->deferred_queue))
1045 seq_no = buf_seqno(skb_peek(&link->deferred_queue));
1046 if (seq_no == mod(link->next_in_no))
1047 skb_queue_splice_tail_init(&link->deferred_queue, list);
1051 * tipc_rcv - process TIPC packets/messages arriving from off-node
1052 * @net: the applicable net namespace
1054 * @b_ptr: pointer to bearer message arrived on
1056 * Invoked with no locks held. Bearer pointer must point to a valid bearer
1057 * structure (i.e. cannot be NULL), but bearer can be inactive.
1059 void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr)
1061 struct tipc_net *tn = net_generic(net, tipc_net_id);
1062 struct sk_buff_head head;
1063 struct tipc_node *n_ptr;
1064 struct tipc_link *l_ptr;
1065 struct sk_buff *skb1, *tmp;
1066 struct tipc_msg *msg;
1071 skb2list(skb, &head);
1073 while ((skb = __skb_dequeue(&head))) {
1074 /* Ensure message is well-formed */
1075 if (unlikely(!tipc_msg_validate(skb)))
1078 /* Ensure message data is a single contiguous unit */
1079 if (unlikely(skb_linearize(skb)))
1082 /* Handle arrival of a non-unicast link message */
1085 if (unlikely(msg_non_seq(msg))) {
1086 if (msg_user(msg) == LINK_CONFIG)
1087 tipc_disc_rcv(net, skb, b_ptr);
1089 tipc_bclink_rcv(net, skb);
1093 /* Discard unicast link messages destined for another node */
1094 if (unlikely(!msg_short(msg) &&
1095 (msg_destnode(msg) != tn->own_addr)))
1098 /* Locate neighboring node that sent message */
1099 n_ptr = tipc_node_find(net, msg_prevnode(msg));
1100 if (unlikely(!n_ptr))
1102 tipc_node_lock(n_ptr);
1104 /* Locate unicast link endpoint that should handle message */
1105 l_ptr = n_ptr->links[b_ptr->identity];
1106 if (unlikely(!l_ptr))
1109 /* Verify that communication with node is currently allowed */
1110 if ((n_ptr->action_flags & TIPC_WAIT_PEER_LINKS_DOWN) &&
1111 msg_user(msg) == LINK_PROTOCOL &&
1112 (msg_type(msg) == RESET_MSG ||
1113 msg_type(msg) == ACTIVATE_MSG) &&
1114 !msg_redundant_link(msg))
1115 n_ptr->action_flags &= ~TIPC_WAIT_PEER_LINKS_DOWN;
1117 if (tipc_node_blocked(n_ptr))
1120 /* Validate message sequence number info */
1121 seq_no = msg_seqno(msg);
1122 ackd = msg_ack(msg);
1124 /* Release acked messages */
1125 if (n_ptr->bclink.recv_permitted)
1126 tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
1129 skb_queue_walk_safe(&l_ptr->outqueue, skb1, tmp) {
1130 if (skb1 == l_ptr->next_out ||
1131 more(buf_seqno(skb1), ackd))
1133 __skb_unlink(skb1, &l_ptr->outqueue);
1138 /* Try sending any messages link endpoint has pending */
1139 if (unlikely(l_ptr->next_out))
1140 tipc_link_push_packets(l_ptr);
1142 if (released && !skb_queue_empty(&l_ptr->wakeupq))
1143 link_prepare_wakeup(l_ptr);
1145 /* Process the incoming packet */
1146 if (unlikely(!link_working_working(l_ptr))) {
1147 if (msg_user(msg) == LINK_PROTOCOL) {
1148 tipc_link_proto_rcv(l_ptr, skb);
1149 link_retrieve_defq(l_ptr, &head);
1154 /* Traffic message. Conditionally activate link */
1155 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
1157 if (link_working_working(l_ptr)) {
1158 /* Re-insert buffer in front of queue */
1159 __skb_queue_head(&head, skb);
1166 /* Link is now in state WORKING_WORKING */
1167 if (unlikely(seq_no != mod(l_ptr->next_in_no))) {
1168 link_handle_out_of_seq_msg(l_ptr, skb);
1169 link_retrieve_defq(l_ptr, &head);
1173 l_ptr->next_in_no++;
1174 if (unlikely(!skb_queue_empty(&l_ptr->deferred_queue)))
1175 link_retrieve_defq(l_ptr, &head);
1177 if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) {
1178 l_ptr->stats.sent_acks++;
1179 tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1181 tipc_link_input(l_ptr, skb);
1184 tipc_node_unlock(n_ptr);
1191 /* tipc_data_input - deliver data and name distr msgs to upper layer
1193 * Consumes buffer if message is of right type
1194 * Node lock must be held
1196 static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb)
1198 struct tipc_node *node = link->owner;
1199 struct tipc_msg *msg = buf_msg(skb);
1200 u32 dport = msg_destport(msg);
1202 switch (msg_user(msg)) {
1203 case TIPC_LOW_IMPORTANCE:
1204 case TIPC_MEDIUM_IMPORTANCE:
1205 case TIPC_HIGH_IMPORTANCE:
1206 case TIPC_CRITICAL_IMPORTANCE:
1208 if (tipc_skb_queue_tail(&link->inputq, skb, dport)) {
1209 node->inputq = &link->inputq;
1210 node->action_flags |= TIPC_MSG_EVT;
1213 case NAME_DISTRIBUTOR:
1214 node->bclink.recv_permitted = true;
1215 node->namedq = &link->namedq;
1216 skb_queue_tail(&link->namedq, skb);
1217 if (skb_queue_len(&link->namedq) == 1)
1218 node->action_flags |= TIPC_NAMED_MSG_EVT;
1221 case CHANGEOVER_PROTOCOL:
1222 case MSG_FRAGMENTER:
1223 case BCAST_PROTOCOL:
1226 pr_warn("Dropping received illegal msg type\n");
1232 /* tipc_link_input - process packet that has passed link protocol check
1235 * Node lock must be held
1237 static void tipc_link_input(struct tipc_link *link, struct sk_buff *skb)
1239 struct tipc_node *node = link->owner;
1240 struct tipc_msg *msg = buf_msg(skb);
1241 struct sk_buff *iskb;
1244 if (likely(tipc_data_input(link, skb)))
1247 switch (msg_user(msg)) {
1248 case CHANGEOVER_PROTOCOL:
1249 if (!tipc_link_tunnel_rcv(node, &skb))
1251 if (msg_user(buf_msg(skb)) != MSG_BUNDLER) {
1252 tipc_data_input(link, skb);
1256 link->stats.recv_bundles++;
1257 link->stats.recv_bundled += msg_msgcnt(msg);
1259 while (tipc_msg_extract(skb, &iskb, &pos))
1260 tipc_data_input(link, iskb);
1262 case MSG_FRAGMENTER:
1263 link->stats.recv_fragments++;
1264 if (tipc_buf_append(&link->reasm_buf, &skb)) {
1265 link->stats.recv_fragmented++;
1266 tipc_data_input(link, skb);
1267 } else if (!link->reasm_buf) {
1268 tipc_link_reset(link);
1271 case BCAST_PROTOCOL:
1272 tipc_link_sync_rcv(node, skb);
1280 * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue
1282 * Returns increase in queue length (i.e. 0 or 1)
1284 u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *skb)
1286 struct sk_buff *skb1;
1287 u32 seq_no = buf_seqno(skb);
1290 if (skb_queue_empty(list)) {
1291 __skb_queue_tail(list, skb);
1296 if (less(buf_seqno(skb_peek_tail(list)), seq_no)) {
1297 __skb_queue_tail(list, skb);
1301 /* Locate insertion point in queue, then insert; discard if duplicate */
1302 skb_queue_walk(list, skb1) {
1303 u32 curr_seqno = buf_seqno(skb1);
1305 if (seq_no == curr_seqno) {
1310 if (less(seq_no, curr_seqno))
1314 __skb_queue_before(list, skb1, skb);
1319 * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
1321 static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
1322 struct sk_buff *buf)
1324 u32 seq_no = buf_seqno(buf);
1326 if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) {
1327 tipc_link_proto_rcv(l_ptr, buf);
1331 /* Record OOS packet arrival (force mismatch on next timeout) */
1332 l_ptr->checkpoint--;
1335 * Discard packet if a duplicate; otherwise add it to deferred queue
1336 * and notify peer of gap as per protocol specification
1338 if (less(seq_no, mod(l_ptr->next_in_no))) {
1339 l_ptr->stats.duplicates++;
1344 if (tipc_link_defer_pkt(&l_ptr->deferred_queue, buf)) {
1345 l_ptr->stats.deferred_recv++;
1346 if ((skb_queue_len(&l_ptr->deferred_queue) % 16) == 1)
1347 tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1349 l_ptr->stats.duplicates++;
1354 * Send protocol message to the other endpoint.
1356 void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
1357 u32 gap, u32 tolerance, u32 priority, u32 ack_mtu)
1359 struct sk_buff *buf = NULL;
1360 struct tipc_msg *msg = l_ptr->pmsg;
1361 u32 msg_size = sizeof(l_ptr->proto_msg);
1364 /* Don't send protocol message during link changeover */
1365 if (l_ptr->exp_msg_count)
1368 /* Abort non-RESET send if communication with node is prohibited */
1369 if ((tipc_node_blocked(l_ptr->owner)) && (msg_typ != RESET_MSG))
1372 /* Create protocol message with "out-of-sequence" sequence number */
1373 msg_set_type(msg, msg_typ);
1374 msg_set_net_plane(msg, l_ptr->net_plane);
1375 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1376 msg_set_last_bcast(msg, tipc_bclink_get_last_sent(l_ptr->owner->net));
1378 if (msg_typ == STATE_MSG) {
1379 u32 next_sent = mod(l_ptr->next_out_no);
1381 if (!tipc_link_is_up(l_ptr))
1383 if (l_ptr->next_out)
1384 next_sent = buf_seqno(l_ptr->next_out);
1385 msg_set_next_sent(msg, next_sent);
1386 if (!skb_queue_empty(&l_ptr->deferred_queue)) {
1387 u32 rec = buf_seqno(skb_peek(&l_ptr->deferred_queue));
1388 gap = mod(rec - mod(l_ptr->next_in_no));
1390 msg_set_seq_gap(msg, gap);
1392 l_ptr->stats.sent_nacks++;
1393 msg_set_link_tolerance(msg, tolerance);
1394 msg_set_linkprio(msg, priority);
1395 msg_set_max_pkt(msg, ack_mtu);
1396 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1397 msg_set_probe(msg, probe_msg != 0);
1399 u32 mtu = l_ptr->max_pkt;
1401 if ((mtu < l_ptr->max_pkt_target) &&
1402 link_working_working(l_ptr) &&
1403 l_ptr->fsm_msg_cnt) {
1404 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
1405 if (l_ptr->max_pkt_probes == 10) {
1406 l_ptr->max_pkt_target = (msg_size - 4);
1407 l_ptr->max_pkt_probes = 0;
1408 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
1410 l_ptr->max_pkt_probes++;
1413 l_ptr->stats.sent_probes++;
1415 l_ptr->stats.sent_states++;
1416 } else { /* RESET_MSG or ACTIVATE_MSG */
1417 msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1));
1418 msg_set_seq_gap(msg, 0);
1419 msg_set_next_sent(msg, 1);
1420 msg_set_probe(msg, 0);
1421 msg_set_link_tolerance(msg, l_ptr->tolerance);
1422 msg_set_linkprio(msg, l_ptr->priority);
1423 msg_set_max_pkt(msg, l_ptr->max_pkt_target);
1426 r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr));
1427 msg_set_redundant_link(msg, r_flag);
1428 msg_set_linkprio(msg, l_ptr->priority);
1429 msg_set_size(msg, msg_size);
1431 msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2)));
1433 buf = tipc_buf_acquire(msg_size);
1437 skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
1438 buf->priority = TC_PRIO_CONTROL;
1440 tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, buf,
1441 &l_ptr->media_addr);
1442 l_ptr->unacked_window = 0;
1447 * Receive protocol message :
1448 * Note that network plane id propagates through the network, and may
1449 * change at any time. The node with lowest address rules
1451 static void tipc_link_proto_rcv(struct tipc_link *l_ptr,
1452 struct sk_buff *buf)
1458 struct tipc_msg *msg = buf_msg(buf);
1460 /* Discard protocol message during link changeover */
1461 if (l_ptr->exp_msg_count)
1464 if (l_ptr->net_plane != msg_net_plane(msg))
1465 if (link_own_addr(l_ptr) > msg_prevnode(msg))
1466 l_ptr->net_plane = msg_net_plane(msg);
1468 switch (msg_type(msg)) {
1471 if (!link_working_unknown(l_ptr) &&
1472 (l_ptr->peer_session != INVALID_SESSION)) {
1473 if (less_eq(msg_session(msg), l_ptr->peer_session))
1474 break; /* duplicate or old reset: ignore */
1477 if (!msg_redundant_link(msg) && (link_working_working(l_ptr) ||
1478 link_working_unknown(l_ptr))) {
1480 * peer has lost contact -- don't allow peer's links
1481 * to reactivate before we recognize loss & clean up
1483 l_ptr->owner->action_flags |= TIPC_WAIT_OWN_LINKS_DOWN;
1486 link_state_event(l_ptr, RESET_MSG);
1490 /* Update link settings according other endpoint's values */
1491 strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg));
1493 msg_tol = msg_link_tolerance(msg);
1494 if (msg_tol > l_ptr->tolerance)
1495 link_set_supervision_props(l_ptr, msg_tol);
1497 if (msg_linkprio(msg) > l_ptr->priority)
1498 l_ptr->priority = msg_linkprio(msg);
1500 max_pkt_info = msg_max_pkt(msg);
1502 if (max_pkt_info < l_ptr->max_pkt_target)
1503 l_ptr->max_pkt_target = max_pkt_info;
1504 if (l_ptr->max_pkt > l_ptr->max_pkt_target)
1505 l_ptr->max_pkt = l_ptr->max_pkt_target;
1507 l_ptr->max_pkt = l_ptr->max_pkt_target;
1510 /* Synchronize broadcast link info, if not done previously */
1511 if (!tipc_node_is_up(l_ptr->owner)) {
1512 l_ptr->owner->bclink.last_sent =
1513 l_ptr->owner->bclink.last_in =
1514 msg_last_bcast(msg);
1515 l_ptr->owner->bclink.oos_state = 0;
1518 l_ptr->peer_session = msg_session(msg);
1519 l_ptr->peer_bearer_id = msg_bearer_id(msg);
1521 if (msg_type(msg) == ACTIVATE_MSG)
1522 link_state_event(l_ptr, ACTIVATE_MSG);
1526 msg_tol = msg_link_tolerance(msg);
1528 link_set_supervision_props(l_ptr, msg_tol);
1530 if (msg_linkprio(msg) &&
1531 (msg_linkprio(msg) != l_ptr->priority)) {
1532 pr_debug("%s<%s>, priority change %u->%u\n",
1533 link_rst_msg, l_ptr->name,
1534 l_ptr->priority, msg_linkprio(msg));
1535 l_ptr->priority = msg_linkprio(msg);
1536 tipc_link_reset(l_ptr); /* Enforce change to take effect */
1540 /* Record reception; force mismatch at next timeout: */
1541 l_ptr->checkpoint--;
1543 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
1544 l_ptr->stats.recv_states++;
1545 if (link_reset_unknown(l_ptr))
1548 if (less_eq(mod(l_ptr->next_in_no), msg_next_sent(msg))) {
1549 rec_gap = mod(msg_next_sent(msg) -
1550 mod(l_ptr->next_in_no));
1553 max_pkt_ack = msg_max_pkt(msg);
1554 if (max_pkt_ack > l_ptr->max_pkt) {
1555 l_ptr->max_pkt = max_pkt_ack;
1556 l_ptr->max_pkt_probes = 0;
1560 if (msg_probe(msg)) {
1561 l_ptr->stats.recv_probes++;
1562 if (msg_size(msg) > sizeof(l_ptr->proto_msg))
1563 max_pkt_ack = msg_size(msg);
1566 /* Protocol message before retransmits, reduce loss risk */
1567 if (l_ptr->owner->bclink.recv_permitted)
1568 tipc_bclink_update_link_state(l_ptr->owner,
1569 msg_last_bcast(msg));
1571 if (rec_gap || (msg_probe(msg))) {
1572 tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, rec_gap, 0,
1575 if (msg_seq_gap(msg)) {
1576 l_ptr->stats.recv_nacks++;
1577 tipc_link_retransmit(l_ptr, skb_peek(&l_ptr->outqueue),
1587 /* tipc_link_tunnel_xmit(): Tunnel one packet via a link belonging to
1588 * a different bearer. Owner node is locked.
1590 static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr,
1591 struct tipc_msg *tunnel_hdr,
1592 struct tipc_msg *msg,
1595 struct tipc_link *tunnel;
1596 struct sk_buff *skb;
1597 u32 length = msg_size(msg);
1599 tunnel = l_ptr->owner->active_links[selector & 1];
1600 if (!tipc_link_is_up(tunnel)) {
1601 pr_warn("%stunnel link no longer available\n", link_co_err);
1604 msg_set_size(tunnel_hdr, length + INT_H_SIZE);
1605 skb = tipc_buf_acquire(length + INT_H_SIZE);
1607 pr_warn("%sunable to send tunnel msg\n", link_co_err);
1610 skb_copy_to_linear_data(skb, tunnel_hdr, INT_H_SIZE);
1611 skb_copy_to_linear_data_offset(skb, INT_H_SIZE, msg, length);
1612 __tipc_link_xmit_skb(tunnel, skb);
1616 /* tipc_link_failover_send_queue(): A link has gone down, but a second
1617 * link is still active. We can do failover. Tunnel the failing link's
1618 * whole send queue via the remaining link. This way, we don't lose
1619 * any packets, and sequence order is preserved for subsequent traffic
1620 * sent over the remaining link. Owner node is locked.
1622 void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
1624 u32 msgcount = skb_queue_len(&l_ptr->outqueue);
1625 struct tipc_link *tunnel = l_ptr->owner->active_links[0];
1626 struct tipc_msg tunnel_hdr;
1627 struct sk_buff *skb;
1633 tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, CHANGEOVER_PROTOCOL,
1634 ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr);
1635 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
1636 msg_set_msgcnt(&tunnel_hdr, msgcount);
1638 if (skb_queue_empty(&l_ptr->outqueue)) {
1639 skb = tipc_buf_acquire(INT_H_SIZE);
1641 skb_copy_to_linear_data(skb, &tunnel_hdr, INT_H_SIZE);
1642 msg_set_size(&tunnel_hdr, INT_H_SIZE);
1643 __tipc_link_xmit_skb(tunnel, skb);
1645 pr_warn("%sunable to send changeover msg\n",
1651 split_bundles = (l_ptr->owner->active_links[0] !=
1652 l_ptr->owner->active_links[1]);
1654 skb_queue_walk(&l_ptr->outqueue, skb) {
1655 struct tipc_msg *msg = buf_msg(skb);
1657 if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
1658 struct tipc_msg *m = msg_get_wrapped(msg);
1659 unchar *pos = (unchar *)m;
1661 msgcount = msg_msgcnt(msg);
1662 while (msgcount--) {
1663 msg_set_seqno(m, msg_seqno(msg));
1664 tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, m,
1665 msg_link_selector(m));
1666 pos += align(msg_size(m));
1667 m = (struct tipc_msg *)pos;
1670 tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, msg,
1671 msg_link_selector(msg));
1676 /* tipc_link_dup_queue_xmit(): A second link has become active. Tunnel a
1677 * duplicate of the first link's send queue via the new link. This way, we
1678 * are guaranteed that currently queued packets from a socket are delivered
1679 * before future traffic from the same socket, even if this is using the
1680 * new link. The last arriving copy of each duplicate packet is dropped at
1681 * the receiving end by the regular protocol check, so packet cardinality
1682 * and sequence order is preserved per sender/receiver socket pair.
1683 * Owner node is locked.
1685 void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr,
1686 struct tipc_link *tunnel)
1688 struct sk_buff *skb;
1689 struct tipc_msg tunnel_hdr;
1691 tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, CHANGEOVER_PROTOCOL,
1692 DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr);
1693 msg_set_msgcnt(&tunnel_hdr, skb_queue_len(&l_ptr->outqueue));
1694 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
1695 skb_queue_walk(&l_ptr->outqueue, skb) {
1696 struct sk_buff *outskb;
1697 struct tipc_msg *msg = buf_msg(skb);
1698 u32 length = msg_size(msg);
1700 if (msg_user(msg) == MSG_BUNDLER)
1701 msg_set_type(msg, CLOSED_MSG);
1702 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); /* Update */
1703 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1704 msg_set_size(&tunnel_hdr, length + INT_H_SIZE);
1705 outskb = tipc_buf_acquire(length + INT_H_SIZE);
1706 if (outskb == NULL) {
1707 pr_warn("%sunable to send duplicate msg\n",
1711 skb_copy_to_linear_data(outskb, &tunnel_hdr, INT_H_SIZE);
1712 skb_copy_to_linear_data_offset(outskb, INT_H_SIZE, skb->data,
1714 __tipc_link_xmit_skb(tunnel, outskb);
1715 if (!tipc_link_is_up(l_ptr))
1721 * buf_extract - extracts embedded TIPC message from another message
1722 * @skb: encapsulating message buffer
1723 * @from_pos: offset to extract from
1725 * Returns a new message buffer containing an embedded message. The
1726 * encapsulating buffer is left unchanged.
1728 static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
1730 struct tipc_msg *msg = (struct tipc_msg *)(skb->data + from_pos);
1731 u32 size = msg_size(msg);
1734 eb = tipc_buf_acquire(size);
1736 skb_copy_to_linear_data(eb, msg, size);
1740 /* tipc_link_dup_rcv(): Receive a tunnelled DUPLICATE_MSG packet.
1741 * Owner node is locked.
1743 static void tipc_link_dup_rcv(struct tipc_link *l_ptr,
1744 struct sk_buff *t_buf)
1746 struct sk_buff *buf;
1748 if (!tipc_link_is_up(l_ptr))
1751 buf = buf_extract(t_buf, INT_H_SIZE);
1753 pr_warn("%sfailed to extract inner dup pkt\n", link_co_err);
1757 /* Add buffer to deferred queue, if applicable: */
1758 link_handle_out_of_seq_msg(l_ptr, buf);
1761 /* tipc_link_failover_rcv(): Receive a tunnelled ORIGINAL_MSG packet
1762 * Owner node is locked.
1764 static struct sk_buff *tipc_link_failover_rcv(struct tipc_link *l_ptr,
1765 struct sk_buff *t_buf)
1767 struct tipc_msg *t_msg = buf_msg(t_buf);
1768 struct sk_buff *buf = NULL;
1769 struct tipc_msg *msg;
1771 if (tipc_link_is_up(l_ptr))
1772 tipc_link_reset(l_ptr);
1774 /* First failover packet? */
1775 if (l_ptr->exp_msg_count == START_CHANGEOVER)
1776 l_ptr->exp_msg_count = msg_msgcnt(t_msg);
1778 /* Should there be an inner packet? */
1779 if (l_ptr->exp_msg_count) {
1780 l_ptr->exp_msg_count--;
1781 buf = buf_extract(t_buf, INT_H_SIZE);
1783 pr_warn("%sno inner failover pkt\n", link_co_err);
1788 if (less(msg_seqno(msg), l_ptr->reset_checkpoint)) {
1793 if (msg_user(msg) == MSG_FRAGMENTER) {
1794 l_ptr->stats.recv_fragments++;
1795 tipc_buf_append(&l_ptr->reasm_buf, &buf);
1799 if ((!l_ptr->exp_msg_count) && (l_ptr->flags & LINK_STOPPED))
1800 tipc_link_delete(l_ptr);
1804 /* tipc_link_tunnel_rcv(): Receive a tunnelled packet, sent
1805 * via other link as result of a failover (ORIGINAL_MSG) or
1806 * a new active link (DUPLICATE_MSG). Failover packets are
1807 * returned to the active link for delivery upwards.
1808 * Owner node is locked.
1810 static int tipc_link_tunnel_rcv(struct tipc_node *n_ptr,
1811 struct sk_buff **buf)
1813 struct sk_buff *t_buf = *buf;
1814 struct tipc_link *l_ptr;
1815 struct tipc_msg *t_msg = buf_msg(t_buf);
1816 u32 bearer_id = msg_bearer_id(t_msg);
1820 if (bearer_id >= MAX_BEARERS)
1823 l_ptr = n_ptr->links[bearer_id];
1827 if (msg_type(t_msg) == DUPLICATE_MSG)
1828 tipc_link_dup_rcv(l_ptr, t_buf);
1829 else if (msg_type(t_msg) == ORIGINAL_MSG)
1830 *buf = tipc_link_failover_rcv(l_ptr, t_buf);
1832 pr_warn("%sunknown tunnel pkt received\n", link_co_err);
1835 return *buf != NULL;
1838 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol)
1840 unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;
1842 if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
1845 l_ptr->tolerance = tol;
1846 l_ptr->cont_intv = msecs_to_jiffies(intv);
1847 l_ptr->abort_limit = tol / (jiffies_to_msecs(l_ptr->cont_intv) / 4);
1850 void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window)
1852 /* Data messages from this node, inclusive FIRST_FRAGM */
1853 l_ptr->queue_limit[TIPC_LOW_IMPORTANCE] = window;
1854 l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE] = (window / 3) * 4;
1855 l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE] = (window / 3) * 5;
1856 l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE] = (window / 3) * 6;
1857 /* Transiting data messages,inclusive FIRST_FRAGM */
1858 l_ptr->queue_limit[TIPC_LOW_IMPORTANCE + 4] = 300;
1859 l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE + 4] = 600;
1860 l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE + 4] = 900;
1861 l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE + 4] = 1200;
1862 l_ptr->queue_limit[CONN_MANAGER] = 1200;
1863 l_ptr->queue_limit[CHANGEOVER_PROTOCOL] = 2500;
1864 l_ptr->queue_limit[NAME_DISTRIBUTOR] = 3000;
1865 /* FRAGMENT and LAST_FRAGMENT packets */
1866 l_ptr->queue_limit[MSG_FRAGMENTER] = 4000;
1869 /* tipc_link_find_owner - locate owner node of link by link's name
1870 * @net: the applicable net namespace
1871 * @name: pointer to link name string
1872 * @bearer_id: pointer to index in 'node->links' array where the link was found.
1874 * Returns pointer to node owning the link, or 0 if no matching link is found.
1876 static struct tipc_node *tipc_link_find_owner(struct net *net,
1877 const char *link_name,
1878 unsigned int *bearer_id)
1880 struct tipc_net *tn = net_generic(net, tipc_net_id);
1881 struct tipc_link *l_ptr;
1882 struct tipc_node *n_ptr;
1883 struct tipc_node *found_node = NULL;
1888 list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
1889 tipc_node_lock(n_ptr);
1890 for (i = 0; i < MAX_BEARERS; i++) {
1891 l_ptr = n_ptr->links[i];
1892 if (l_ptr && !strcmp(l_ptr->name, link_name)) {
1898 tipc_node_unlock(n_ptr);
1908 * link_reset_statistics - reset link statistics
1909 * @l_ptr: pointer to link
1911 static void link_reset_statistics(struct tipc_link *l_ptr)
1913 memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
1914 l_ptr->stats.sent_info = l_ptr->next_out_no;
1915 l_ptr->stats.recv_info = l_ptr->next_in_no;
1918 static void link_print(struct tipc_link *l_ptr, const char *str)
1920 struct tipc_net *tn = net_generic(l_ptr->owner->net, tipc_net_id);
1921 struct tipc_bearer *b_ptr;
1924 b_ptr = rcu_dereference_rtnl(tn->bearer_list[l_ptr->bearer_id]);
1926 pr_info("%s Link %x<%s>:", str, l_ptr->addr, b_ptr->name);
1929 if (link_working_unknown(l_ptr))
1931 else if (link_reset_reset(l_ptr))
1933 else if (link_reset_unknown(l_ptr))
1935 else if (link_working_working(l_ptr))
1941 /* Parse and validate nested (link) properties valid for media, bearer and link
1943 int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
1947 err = nla_parse_nested(props, TIPC_NLA_PROP_MAX, prop,
1948 tipc_nl_prop_policy);
1952 if (props[TIPC_NLA_PROP_PRIO]) {
1955 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
1956 if (prio > TIPC_MAX_LINK_PRI)
1960 if (props[TIPC_NLA_PROP_TOL]) {
1963 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
1964 if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
1968 if (props[TIPC_NLA_PROP_WIN]) {
1971 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
1972 if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN))
1979 int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
1985 struct tipc_link *link;
1986 struct tipc_node *node;
1987 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
1988 struct net *net = sock_net(skb->sk);
1990 if (!info->attrs[TIPC_NLA_LINK])
1993 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
1994 info->attrs[TIPC_NLA_LINK],
1995 tipc_nl_link_policy);
1999 if (!attrs[TIPC_NLA_LINK_NAME])
2002 name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2004 node = tipc_link_find_owner(net, name, &bearer_id);
2008 tipc_node_lock(node);
2010 link = node->links[bearer_id];
2016 if (attrs[TIPC_NLA_LINK_PROP]) {
2017 struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
2019 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP],
2026 if (props[TIPC_NLA_PROP_TOL]) {
2029 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
2030 link_set_supervision_props(link, tol);
2031 tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0, 0);
2033 if (props[TIPC_NLA_PROP_PRIO]) {
2036 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
2037 link->priority = prio;
2038 tipc_link_proto_xmit(link, STATE_MSG, 0, 0, 0, prio, 0);
2040 if (props[TIPC_NLA_PROP_WIN]) {
2043 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
2044 tipc_link_set_queue_limits(link, win);
2049 tipc_node_unlock(node);
2054 static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
2057 struct nlattr *stats;
2064 struct nla_map map[] = {
2065 {TIPC_NLA_STATS_RX_INFO, s->recv_info},
2066 {TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
2067 {TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
2068 {TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
2069 {TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
2070 {TIPC_NLA_STATS_TX_INFO, s->sent_info},
2071 {TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
2072 {TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
2073 {TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
2074 {TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
2075 {TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
2076 s->msg_length_counts : 1},
2077 {TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
2078 {TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
2079 {TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
2080 {TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
2081 {TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
2082 {TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
2083 {TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
2084 {TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
2085 {TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
2086 {TIPC_NLA_STATS_RX_STATES, s->recv_states},
2087 {TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
2088 {TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
2089 {TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
2090 {TIPC_NLA_STATS_TX_STATES, s->sent_states},
2091 {TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
2092 {TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
2093 {TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
2094 {TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
2095 {TIPC_NLA_STATS_DUPLICATES, s->duplicates},
2096 {TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
2097 {TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
2098 {TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
2099 (s->accu_queue_sz / s->queue_sz_counts) : 0}
2102 stats = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
2106 for (i = 0; i < ARRAY_SIZE(map); i++)
2107 if (nla_put_u32(skb, map[i].key, map[i].val))
2110 nla_nest_end(skb, stats);
2114 nla_nest_cancel(skb, stats);
2119 /* Caller should hold appropriate locks to protect the link */
2120 static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
2121 struct tipc_link *link)
2125 struct nlattr *attrs;
2126 struct nlattr *prop;
2127 struct tipc_net *tn = net_generic(net, tipc_net_id);
2129 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2130 NLM_F_MULTI, TIPC_NL_LINK_GET);
2134 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
2138 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
2140 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST,
2141 tipc_cluster_mask(tn->own_addr)))
2143 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->max_pkt))
2145 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->next_in_no))
2147 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->next_out_no))
2150 if (tipc_link_is_up(link))
2151 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2153 if (tipc_link_is_active(link))
2154 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
2157 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
2160 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2162 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
2164 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
2165 link->queue_limit[TIPC_LOW_IMPORTANCE]))
2167 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2169 nla_nest_end(msg->skb, prop);
2171 err = __tipc_nl_add_stats(msg->skb, &link->stats);
2175 nla_nest_end(msg->skb, attrs);
2176 genlmsg_end(msg->skb, hdr);
2181 nla_nest_cancel(msg->skb, prop);
2183 nla_nest_cancel(msg->skb, attrs);
2185 genlmsg_cancel(msg->skb, hdr);
2190 /* Caller should hold node lock */
2191 static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
2192 struct tipc_node *node, u32 *prev_link)
2197 for (i = *prev_link; i < MAX_BEARERS; i++) {
2200 if (!node->links[i])
2203 err = __tipc_nl_add_link(net, msg, node->links[i]);
2212 int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
2214 struct net *net = sock_net(skb->sk);
2215 struct tipc_net *tn = net_generic(net, tipc_net_id);
2216 struct tipc_node *node;
2217 struct tipc_nl_msg msg;
2218 u32 prev_node = cb->args[0];
2219 u32 prev_link = cb->args[1];
2220 int done = cb->args[2];
2227 msg.portid = NETLINK_CB(cb->skb).portid;
2228 msg.seq = cb->nlh->nlmsg_seq;
2233 node = tipc_node_find(net, prev_node);
2235 /* We never set seq or call nl_dump_check_consistent()
2236 * this means that setting prev_seq here will cause the
2237 * consistence check to fail in the netlink callback
2238 * handler. Resulting in the last NLMSG_DONE message
2239 * having the NLM_F_DUMP_INTR flag set.
2245 list_for_each_entry_continue_rcu(node, &tn->node_list,
2247 tipc_node_lock(node);
2248 err = __tipc_nl_add_node_links(net, &msg, node,
2250 tipc_node_unlock(node);
2254 prev_node = node->addr;
2257 err = tipc_nl_add_bc_link(net, &msg);
2261 list_for_each_entry_rcu(node, &tn->node_list, list) {
2262 tipc_node_lock(node);
2263 err = __tipc_nl_add_node_links(net, &msg, node,
2265 tipc_node_unlock(node);
2269 prev_node = node->addr;
2276 cb->args[0] = prev_node;
2277 cb->args[1] = prev_link;
2283 int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info)
2285 struct net *net = genl_info_net(info);
2286 struct sk_buff *ans_skb;
2287 struct tipc_nl_msg msg;
2288 struct tipc_link *link;
2289 struct tipc_node *node;
2294 if (!info->attrs[TIPC_NLA_LINK_NAME])
2297 name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]);
2298 node = tipc_link_find_owner(net, name, &bearer_id);
2302 ans_skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
2307 msg.portid = info->snd_portid;
2308 msg.seq = info->snd_seq;
2310 tipc_node_lock(node);
2311 link = node->links[bearer_id];
2317 err = __tipc_nl_add_link(net, &msg, link);
2321 tipc_node_unlock(node);
2323 return genlmsg_reply(ans_skb, info);
2326 tipc_node_unlock(node);
2327 nlmsg_free(ans_skb);
2332 int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info)
2336 unsigned int bearer_id;
2337 struct tipc_link *link;
2338 struct tipc_node *node;
2339 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2340 struct net *net = sock_net(skb->sk);
2342 if (!info->attrs[TIPC_NLA_LINK])
2345 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
2346 info->attrs[TIPC_NLA_LINK],
2347 tipc_nl_link_policy);
2351 if (!attrs[TIPC_NLA_LINK_NAME])
2354 link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2356 if (strcmp(link_name, tipc_bclink_name) == 0) {
2357 err = tipc_bclink_reset_stats(net);
2363 node = tipc_link_find_owner(net, link_name, &bearer_id);
2367 tipc_node_lock(node);
2369 link = node->links[bearer_id];
2371 tipc_node_unlock(node);
2375 link_reset_statistics(link);
2377 tipc_node_unlock(node);