2 * net/tipc/link.c: TIPC link code
4 * Copyright (c) 1996-2007, 2012-2015, Ericsson AB
5 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
42 #include "name_distr.h"
46 #include <linux/pkt_sched.h>
49 * Error message prefixes
51 static const char *link_co_err = "Link changeover error, ";
52 static const char *link_rst_msg = "Resetting link ";
53 static const char *link_unk_evt = "Unknown link event ";
55 static const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
56 [TIPC_NLA_LINK_UNSPEC] = { .type = NLA_UNSPEC },
57 [TIPC_NLA_LINK_NAME] = {
59 .len = TIPC_MAX_LINK_NAME
61 [TIPC_NLA_LINK_MTU] = { .type = NLA_U32 },
62 [TIPC_NLA_LINK_BROADCAST] = { .type = NLA_FLAG },
63 [TIPC_NLA_LINK_UP] = { .type = NLA_FLAG },
64 [TIPC_NLA_LINK_ACTIVE] = { .type = NLA_FLAG },
65 [TIPC_NLA_LINK_PROP] = { .type = NLA_NESTED },
66 [TIPC_NLA_LINK_STATS] = { .type = NLA_NESTED },
67 [TIPC_NLA_LINK_RX] = { .type = NLA_U32 },
68 [TIPC_NLA_LINK_TX] = { .type = NLA_U32 }
71 /* Properties valid for media, bearar and link */
72 static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
73 [TIPC_NLA_PROP_UNSPEC] = { .type = NLA_UNSPEC },
74 [TIPC_NLA_PROP_PRIO] = { .type = NLA_U32 },
75 [TIPC_NLA_PROP_TOL] = { .type = NLA_U32 },
76 [TIPC_NLA_PROP_WIN] = { .type = NLA_U32 }
80 * Out-of-range value for link session numbers
82 #define INVALID_SESSION 0x10000
87 #define STARTING_EVT 856384768 /* link processing trigger */
88 #define TRAFFIC_MSG_EVT 560815u /* rx'd ??? */
89 #define TIMEOUT_EVT 560817u /* link timer expired */
92 * The following two 'message types' is really just implementation
93 * data conveniently stored in the message header.
94 * They must not be considered part of the protocol
100 * State value stored in 'exp_msg_count'
102 #define START_CHANGEOVER 100000u
104 static void link_handle_out_of_seq_msg(struct tipc_link *link,
105 struct sk_buff *skb);
106 static void tipc_link_proto_rcv(struct tipc_link *link,
107 struct sk_buff *skb);
108 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol);
109 static void link_state_event(struct tipc_link *l_ptr, u32 event);
110 static void link_reset_statistics(struct tipc_link *l_ptr);
111 static void link_print(struct tipc_link *l_ptr, const char *str);
112 static void tipc_link_sync_xmit(struct tipc_link *l);
113 static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf);
114 static void tipc_link_input(struct tipc_link *l, struct sk_buff *skb);
115 static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb);
116 static bool tipc_link_failover_rcv(struct tipc_node *node,
117 struct sk_buff **skb);
119 * Simple link routines
121 static unsigned int align(unsigned int i)
123 return (i + 3) & ~3u;
126 static void tipc_link_release(struct kref *kref)
128 kfree(container_of(kref, struct tipc_link, ref));
131 static void tipc_link_get(struct tipc_link *l_ptr)
133 kref_get(&l_ptr->ref);
136 static void tipc_link_put(struct tipc_link *l_ptr)
138 kref_put(&l_ptr->ref, tipc_link_release);
141 static struct tipc_link *tipc_parallel_link(struct tipc_link *l)
143 if (l->owner->active_links[0] != l)
144 return l->owner->active_links[0];
145 return l->owner->active_links[1];
148 static void link_init_max_pkt(struct tipc_link *l_ptr)
150 struct tipc_node *node = l_ptr->owner;
151 struct tipc_net *tn = net_generic(node->net, tipc_net_id);
152 struct tipc_bearer *b_ptr;
156 b_ptr = rcu_dereference_rtnl(tn->bearer_list[l_ptr->bearer_id]);
161 max_pkt = (b_ptr->mtu & ~3);
164 if (max_pkt > MAX_MSG_SIZE)
165 max_pkt = MAX_MSG_SIZE;
167 l_ptr->max_pkt_target = max_pkt;
168 if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT)
169 l_ptr->max_pkt = l_ptr->max_pkt_target;
171 l_ptr->max_pkt = MAX_PKT_DEFAULT;
173 l_ptr->max_pkt_probes = 0;
177 * Simple non-static link routines (i.e. referenced outside this file)
179 int tipc_link_is_up(struct tipc_link *l_ptr)
183 return link_working_working(l_ptr) || link_working_unknown(l_ptr);
186 int tipc_link_is_active(struct tipc_link *l_ptr)
188 return (l_ptr->owner->active_links[0] == l_ptr) ||
189 (l_ptr->owner->active_links[1] == l_ptr);
193 * link_timeout - handle expiration of link timer
194 * @l_ptr: pointer to link
196 static void link_timeout(unsigned long data)
198 struct tipc_link *l_ptr = (struct tipc_link *)data;
201 tipc_node_lock(l_ptr->owner);
203 /* update counters used in statistical profiling of send traffic */
204 l_ptr->stats.accu_queue_sz += skb_queue_len(&l_ptr->transmq);
205 l_ptr->stats.queue_sz_counts++;
207 skb = skb_peek(&l_ptr->transmq);
209 struct tipc_msg *msg = buf_msg(skb);
210 u32 length = msg_size(msg);
212 if ((msg_user(msg) == MSG_FRAGMENTER) &&
213 (msg_type(msg) == FIRST_FRAGMENT)) {
214 length = msg_size(msg_get_wrapped(msg));
217 l_ptr->stats.msg_lengths_total += length;
218 l_ptr->stats.msg_length_counts++;
220 l_ptr->stats.msg_length_profile[0]++;
221 else if (length <= 256)
222 l_ptr->stats.msg_length_profile[1]++;
223 else if (length <= 1024)
224 l_ptr->stats.msg_length_profile[2]++;
225 else if (length <= 4096)
226 l_ptr->stats.msg_length_profile[3]++;
227 else if (length <= 16384)
228 l_ptr->stats.msg_length_profile[4]++;
229 else if (length <= 32768)
230 l_ptr->stats.msg_length_profile[5]++;
232 l_ptr->stats.msg_length_profile[6]++;
236 /* do all other link processing performed on a periodic basis */
237 link_state_event(l_ptr, TIMEOUT_EVT);
239 if (skb_queue_len(&l_ptr->backlogq))
240 tipc_link_push_packets(l_ptr);
242 tipc_node_unlock(l_ptr->owner);
243 tipc_link_put(l_ptr);
246 static void link_set_timer(struct tipc_link *link, unsigned long time)
248 if (!mod_timer(&link->timer, jiffies + time))
253 * tipc_link_create - create a new link
254 * @n_ptr: pointer to associated node
255 * @b_ptr: pointer to associated bearer
256 * @media_addr: media address to use when sending messages over link
258 * Returns pointer to link.
260 struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
261 struct tipc_bearer *b_ptr,
262 const struct tipc_media_addr *media_addr)
264 struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
265 struct tipc_link *l_ptr;
266 struct tipc_msg *msg;
268 char addr_string[16];
269 u32 peer = n_ptr->addr;
271 if (n_ptr->link_cnt >= MAX_BEARERS) {
272 tipc_addr_string_fill(addr_string, n_ptr->addr);
273 pr_err("Attempt to establish %uth link to %s. Max %u allowed.\n",
274 n_ptr->link_cnt, addr_string, MAX_BEARERS);
278 if (n_ptr->links[b_ptr->identity]) {
279 tipc_addr_string_fill(addr_string, n_ptr->addr);
280 pr_err("Attempt to establish second link on <%s> to %s\n",
281 b_ptr->name, addr_string);
285 l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC);
287 pr_warn("Link creation failed, no memory\n");
290 kref_init(&l_ptr->ref);
292 if_name = strchr(b_ptr->name, ':') + 1;
293 sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
294 tipc_zone(tn->own_addr), tipc_cluster(tn->own_addr),
295 tipc_node(tn->own_addr),
297 tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
298 /* note: peer i/f name is updated by reset/activate message */
299 memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
300 l_ptr->owner = n_ptr;
301 l_ptr->checkpoint = 1;
302 l_ptr->peer_session = INVALID_SESSION;
303 l_ptr->bearer_id = b_ptr->identity;
304 link_set_supervision_props(l_ptr, b_ptr->tolerance);
305 l_ptr->state = RESET_UNKNOWN;
307 l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
309 tipc_msg_init(tn->own_addr, msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE,
311 msg_set_size(msg, sizeof(l_ptr->proto_msg));
312 msg_set_session(msg, (tn->random & 0xffff));
313 msg_set_bearer_id(msg, b_ptr->identity);
314 strcpy((char *)msg_data(msg), if_name);
315 l_ptr->net_plane = b_ptr->net_plane;
316 link_init_max_pkt(l_ptr);
317 l_ptr->priority = b_ptr->priority;
318 tipc_link_set_queue_limits(l_ptr, b_ptr->window);
319 l_ptr->next_out_no = 1;
320 __skb_queue_head_init(&l_ptr->transmq);
321 __skb_queue_head_init(&l_ptr->backlogq);
322 __skb_queue_head_init(&l_ptr->deferdq);
323 skb_queue_head_init(&l_ptr->wakeupq);
324 skb_queue_head_init(&l_ptr->inputq);
325 skb_queue_head_init(&l_ptr->namedq);
326 link_reset_statistics(l_ptr);
327 tipc_node_attach_link(n_ptr, l_ptr);
328 setup_timer(&l_ptr->timer, link_timeout, (unsigned long)l_ptr);
329 link_state_event(l_ptr, STARTING_EVT);
335 * link_delete - Conditional deletion of link.
336 * If timer still running, real delete is done when it expires
337 * @link: link to be deleted
339 void tipc_link_delete(struct tipc_link *link)
341 tipc_link_reset_fragments(link);
342 tipc_node_detach_link(link->owner, link);
346 void tipc_link_delete_list(struct net *net, unsigned int bearer_id,
349 struct tipc_net *tn = net_generic(net, tipc_net_id);
350 struct tipc_link *link;
351 struct tipc_node *node;
355 list_for_each_entry_rcu(node, &tn->node_list, list) {
356 tipc_node_lock(node);
357 link = node->links[bearer_id];
359 tipc_node_unlock(node);
362 del_link = !tipc_link_is_up(link) && !link->exp_msg_count;
363 tipc_link_reset(link);
364 if (del_timer(&link->timer))
366 link->flags |= LINK_STOPPED;
367 /* Delete link now, or when failover is finished: */
368 if (shutting_down || !tipc_node_is_up(node) || del_link)
369 tipc_link_delete(link);
370 tipc_node_unlock(node);
376 * link_schedule_user - schedule a message sender for wakeup after congestion
377 * @link: congested link
378 * @list: message that was attempted sent
379 * Create pseudo msg to send back to user when congestion abates
380 * Only consumes message if there is an error
382 static int link_schedule_user(struct tipc_link *link, struct sk_buff_head *list)
384 struct tipc_msg *msg = buf_msg(skb_peek(list));
385 int imp = msg_importance(msg);
386 u32 oport = msg_origport(msg);
387 u32 addr = link_own_addr(link);
390 /* This really cannot happen... */
391 if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) {
392 pr_warn("%s<%s>, send queue full", link_rst_msg, link->name);
393 tipc_link_reset(link);
396 /* Non-blocking sender: */
397 if (TIPC_SKB_CB(skb_peek(list))->wakeup_pending)
400 /* Create and schedule wakeup pseudo message */
401 skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
402 addr, addr, oport, 0, 0);
405 TIPC_SKB_CB(skb)->chain_sz = skb_queue_len(list);
406 TIPC_SKB_CB(skb)->chain_imp = imp;
407 skb_queue_tail(&link->wakeupq, skb);
408 link->stats.link_congs++;
411 __skb_queue_purge(list);
416 * link_prepare_wakeup - prepare users for wakeup after congestion
417 * @link: congested link
418 * Move a number of waiting users, as permitted by available space in
419 * the send queue, from link wait queue to node wait queue for wakeup
421 void link_prepare_wakeup(struct tipc_link *l)
423 int pnd[TIPC_SYSTEM_IMPORTANCE + 1] = {0,};
425 struct sk_buff *skb, *tmp;
427 skb_queue_walk_safe(&l->wakeupq, skb, tmp) {
428 imp = TIPC_SKB_CB(skb)->chain_imp;
429 lim = l->window + l->backlog[imp].limit;
430 pnd[imp] += TIPC_SKB_CB(skb)->chain_sz;
431 if ((pnd[imp] + l->backlog[imp].len) >= lim)
433 skb_unlink(skb, &l->wakeupq);
434 skb_queue_tail(&l->inputq, skb);
435 l->owner->inputq = &l->inputq;
436 l->owner->action_flags |= TIPC_MSG_EVT;
441 * tipc_link_reset_fragments - purge link's inbound message fragments queue
442 * @l_ptr: pointer to link
444 void tipc_link_reset_fragments(struct tipc_link *l_ptr)
446 kfree_skb(l_ptr->reasm_buf);
447 l_ptr->reasm_buf = NULL;
450 static void tipc_link_purge_backlog(struct tipc_link *l)
452 __skb_queue_purge(&l->backlogq);
453 l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
454 l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
455 l->backlog[TIPC_HIGH_IMPORTANCE].len = 0;
456 l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0;
457 l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0;
461 * tipc_link_purge_queues - purge all pkt queues associated with link
462 * @l_ptr: pointer to link
464 void tipc_link_purge_queues(struct tipc_link *l_ptr)
466 __skb_queue_purge(&l_ptr->deferdq);
467 __skb_queue_purge(&l_ptr->transmq);
468 tipc_link_purge_backlog(l_ptr);
469 tipc_link_reset_fragments(l_ptr);
472 void tipc_link_reset(struct tipc_link *l_ptr)
474 u32 prev_state = l_ptr->state;
475 u32 checkpoint = l_ptr->next_in_no;
476 int was_active_link = tipc_link_is_active(l_ptr);
477 struct tipc_node *owner = l_ptr->owner;
479 msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff));
481 /* Link is down, accept any session */
482 l_ptr->peer_session = INVALID_SESSION;
484 /* Prepare for max packet size negotiation */
485 link_init_max_pkt(l_ptr);
487 l_ptr->state = RESET_UNKNOWN;
489 if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET))
492 tipc_node_link_down(l_ptr->owner, l_ptr);
493 tipc_bearer_remove_dest(owner->net, l_ptr->bearer_id, l_ptr->addr);
495 if (was_active_link && tipc_node_active_links(l_ptr->owner)) {
496 l_ptr->reset_checkpoint = checkpoint;
497 l_ptr->exp_msg_count = START_CHANGEOVER;
500 /* Clean up all queues, except inputq: */
501 __skb_queue_purge(&l_ptr->transmq);
502 __skb_queue_purge(&l_ptr->deferdq);
504 owner->inputq = &l_ptr->inputq;
505 skb_queue_splice_init(&l_ptr->wakeupq, owner->inputq);
506 if (!skb_queue_empty(owner->inputq))
507 owner->action_flags |= TIPC_MSG_EVT;
508 tipc_link_purge_backlog(l_ptr);
509 l_ptr->rcv_unacked = 0;
510 l_ptr->checkpoint = 1;
511 l_ptr->next_out_no = 1;
512 l_ptr->fsm_msg_cnt = 0;
513 l_ptr->stale_count = 0;
514 link_reset_statistics(l_ptr);
517 void tipc_link_reset_list(struct net *net, unsigned int bearer_id)
519 struct tipc_net *tn = net_generic(net, tipc_net_id);
520 struct tipc_link *l_ptr;
521 struct tipc_node *n_ptr;
524 list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
525 tipc_node_lock(n_ptr);
526 l_ptr = n_ptr->links[bearer_id];
528 tipc_link_reset(l_ptr);
529 tipc_node_unlock(n_ptr);
534 static void link_activate(struct tipc_link *link)
536 struct tipc_node *node = link->owner;
538 link->next_in_no = 1;
539 link->stats.recv_info = 1;
540 tipc_node_link_up(node, link);
541 tipc_bearer_add_dest(node->net, link->bearer_id, link->addr);
545 * link_state_event - link finite state machine
546 * @l_ptr: pointer to link
547 * @event: state machine event to process
549 static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
551 struct tipc_link *other;
552 unsigned long cont_intv = l_ptr->cont_intv;
554 if (l_ptr->flags & LINK_STOPPED)
557 if (!(l_ptr->flags & LINK_STARTED) && (event != STARTING_EVT))
558 return; /* Not yet. */
560 /* Check whether changeover is going on */
561 if (l_ptr->exp_msg_count) {
562 if (event == TIMEOUT_EVT)
563 link_set_timer(l_ptr, cont_intv);
567 switch (l_ptr->state) {
568 case WORKING_WORKING:
570 case TRAFFIC_MSG_EVT:
574 if (l_ptr->next_in_no != l_ptr->checkpoint) {
575 l_ptr->checkpoint = l_ptr->next_in_no;
576 if (tipc_bclink_acks_missing(l_ptr->owner)) {
577 tipc_link_proto_xmit(l_ptr, STATE_MSG,
579 l_ptr->fsm_msg_cnt++;
580 } else if (l_ptr->max_pkt < l_ptr->max_pkt_target) {
581 tipc_link_proto_xmit(l_ptr, STATE_MSG,
583 l_ptr->fsm_msg_cnt++;
585 link_set_timer(l_ptr, cont_intv);
588 l_ptr->state = WORKING_UNKNOWN;
589 l_ptr->fsm_msg_cnt = 0;
590 tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
591 l_ptr->fsm_msg_cnt++;
592 link_set_timer(l_ptr, cont_intv / 4);
595 pr_debug("%s<%s>, requested by peer\n",
596 link_rst_msg, l_ptr->name);
597 tipc_link_reset(l_ptr);
598 l_ptr->state = RESET_RESET;
599 l_ptr->fsm_msg_cnt = 0;
600 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
602 l_ptr->fsm_msg_cnt++;
603 link_set_timer(l_ptr, cont_intv);
606 pr_debug("%s%u in WW state\n", link_unk_evt, event);
609 case WORKING_UNKNOWN:
611 case TRAFFIC_MSG_EVT:
613 l_ptr->state = WORKING_WORKING;
614 l_ptr->fsm_msg_cnt = 0;
615 link_set_timer(l_ptr, cont_intv);
618 pr_debug("%s<%s>, requested by peer while probing\n",
619 link_rst_msg, l_ptr->name);
620 tipc_link_reset(l_ptr);
621 l_ptr->state = RESET_RESET;
622 l_ptr->fsm_msg_cnt = 0;
623 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
625 l_ptr->fsm_msg_cnt++;
626 link_set_timer(l_ptr, cont_intv);
629 if (l_ptr->next_in_no != l_ptr->checkpoint) {
630 l_ptr->state = WORKING_WORKING;
631 l_ptr->fsm_msg_cnt = 0;
632 l_ptr->checkpoint = l_ptr->next_in_no;
633 if (tipc_bclink_acks_missing(l_ptr->owner)) {
634 tipc_link_proto_xmit(l_ptr, STATE_MSG,
636 l_ptr->fsm_msg_cnt++;
638 link_set_timer(l_ptr, cont_intv);
639 } else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) {
640 tipc_link_proto_xmit(l_ptr, STATE_MSG,
642 l_ptr->fsm_msg_cnt++;
643 link_set_timer(l_ptr, cont_intv / 4);
644 } else { /* Link has failed */
645 pr_debug("%s<%s>, peer not responding\n",
646 link_rst_msg, l_ptr->name);
647 tipc_link_reset(l_ptr);
648 l_ptr->state = RESET_UNKNOWN;
649 l_ptr->fsm_msg_cnt = 0;
650 tipc_link_proto_xmit(l_ptr, RESET_MSG,
652 l_ptr->fsm_msg_cnt++;
653 link_set_timer(l_ptr, cont_intv);
657 pr_err("%s%u in WU state\n", link_unk_evt, event);
662 case TRAFFIC_MSG_EVT:
665 other = l_ptr->owner->active_links[0];
666 if (other && link_working_unknown(other))
668 l_ptr->state = WORKING_WORKING;
669 l_ptr->fsm_msg_cnt = 0;
670 link_activate(l_ptr);
671 tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
672 l_ptr->fsm_msg_cnt++;
673 if (l_ptr->owner->working_links == 1)
674 tipc_link_sync_xmit(l_ptr);
675 link_set_timer(l_ptr, cont_intv);
678 l_ptr->state = RESET_RESET;
679 l_ptr->fsm_msg_cnt = 0;
680 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
682 l_ptr->fsm_msg_cnt++;
683 link_set_timer(l_ptr, cont_intv);
686 l_ptr->flags |= LINK_STARTED;
687 l_ptr->fsm_msg_cnt++;
688 link_set_timer(l_ptr, cont_intv);
691 tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
692 l_ptr->fsm_msg_cnt++;
693 link_set_timer(l_ptr, cont_intv);
696 pr_err("%s%u in RU state\n", link_unk_evt, event);
701 case TRAFFIC_MSG_EVT:
703 other = l_ptr->owner->active_links[0];
704 if (other && link_working_unknown(other))
706 l_ptr->state = WORKING_WORKING;
707 l_ptr->fsm_msg_cnt = 0;
708 link_activate(l_ptr);
709 tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
710 l_ptr->fsm_msg_cnt++;
711 if (l_ptr->owner->working_links == 1)
712 tipc_link_sync_xmit(l_ptr);
713 link_set_timer(l_ptr, cont_intv);
718 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
720 l_ptr->fsm_msg_cnt++;
721 link_set_timer(l_ptr, cont_intv);
724 pr_err("%s%u in RR state\n", link_unk_evt, event);
728 pr_err("Unknown link state %u/%u\n", l_ptr->state, event);
733 * __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked
735 * @list: chain of buffers containing message
737 * Consumes the buffer chain, except when returning -ELINKCONG,
738 * since the caller then may want to make more send attempts.
739 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
740 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
742 int __tipc_link_xmit(struct net *net, struct tipc_link *link,
743 struct sk_buff_head *list)
745 struct tipc_msg *msg = buf_msg(skb_peek(list));
746 unsigned int maxwin = link->window;
747 unsigned int imp = msg_importance(msg);
748 uint mtu = link->max_pkt;
749 uint ack = mod(link->next_in_no - 1);
750 uint seqno = link->next_out_no;
751 uint bc_last_in = link->owner->bclink.last_in;
752 struct tipc_media_addr *addr = &link->media_addr;
753 struct sk_buff_head *transmq = &link->transmq;
754 struct sk_buff_head *backlogq = &link->backlogq;
755 struct sk_buff *skb, *tmp;
757 /* Match backlog limit against msg importance: */
758 if (unlikely(link->backlog[imp].len >= link->backlog[imp].limit))
759 return link_schedule_user(link, list);
761 if (unlikely(msg_size(msg) > mtu)) {
762 __skb_queue_purge(list);
765 /* Prepare each packet for sending, and add to relevant queue: */
766 skb_queue_walk_safe(list, skb, tmp) {
767 __skb_unlink(skb, list);
769 msg_set_seqno(msg, seqno);
770 msg_set_ack(msg, ack);
771 msg_set_bcast_ack(msg, bc_last_in);
773 if (likely(skb_queue_len(transmq) < maxwin)) {
774 __skb_queue_tail(transmq, skb);
775 tipc_bearer_send(net, link->bearer_id, skb, addr);
776 link->rcv_unacked = 0;
780 if (tipc_msg_bundle(skb_peek_tail(backlogq), skb, mtu)) {
781 link->stats.sent_bundled++;
784 if (tipc_msg_make_bundle(&skb, mtu, link->addr)) {
785 link->stats.sent_bundled++;
786 link->stats.sent_bundles++;
787 imp = msg_importance(buf_msg(skb));
789 __skb_queue_tail(backlogq, skb);
790 link->backlog[imp].len++;
793 link->next_out_no = seqno;
797 static void skb2list(struct sk_buff *skb, struct sk_buff_head *list)
799 skb_queue_head_init(list);
800 __skb_queue_tail(list, skb);
803 static int __tipc_link_xmit_skb(struct tipc_link *link, struct sk_buff *skb)
805 struct sk_buff_head head;
807 skb2list(skb, &head);
808 return __tipc_link_xmit(link->owner->net, link, &head);
811 /* tipc_link_xmit_skb(): send single buffer to destination
812 * Buffers sent via this functon are generally TIPC_SYSTEM_IMPORTANCE
813 * messages, which will not be rejected
814 * The only exception is datagram messages rerouted after secondary
815 * lookup, which are rare and safe to dispose of anyway.
816 * TODO: Return real return value, and let callers use
817 * tipc_wait_for_sendpkt() where applicable
819 int tipc_link_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
822 struct sk_buff_head head;
825 skb2list(skb, &head);
826 rc = tipc_link_xmit(net, &head, dnode, selector);
827 if (rc == -ELINKCONG)
833 * tipc_link_xmit() is the general link level function for message sending
834 * @net: the applicable net namespace
835 * @list: chain of buffers containing message
836 * @dsz: amount of user data to be sent
837 * @dnode: address of destination node
838 * @selector: a number used for deterministic link selection
839 * Consumes the buffer chain, except when returning -ELINKCONG
840 * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
842 int tipc_link_xmit(struct net *net, struct sk_buff_head *list, u32 dnode,
845 struct tipc_link *link = NULL;
846 struct tipc_node *node;
847 int rc = -EHOSTUNREACH;
849 node = tipc_node_find(net, dnode);
851 tipc_node_lock(node);
852 link = node->active_links[selector & 1];
854 rc = __tipc_link_xmit(net, link, list);
855 tipc_node_unlock(node);
861 if (likely(in_own_node(net, dnode))) {
862 tipc_sk_rcv(net, list);
866 __skb_queue_purge(list);
871 * tipc_link_sync_xmit - synchronize broadcast link endpoints.
873 * Give a newly added peer node the sequence number where it should
874 * start receiving and acking broadcast packets.
876 * Called with node locked
878 static void tipc_link_sync_xmit(struct tipc_link *link)
881 struct tipc_msg *msg;
883 skb = tipc_buf_acquire(INT_H_SIZE);
888 tipc_msg_init(link_own_addr(link), msg, BCAST_PROTOCOL, STATE_MSG,
889 INT_H_SIZE, link->addr);
890 msg_set_last_bcast(msg, link->owner->bclink.acked);
891 __tipc_link_xmit_skb(link, skb);
895 * tipc_link_sync_rcv - synchronize broadcast link endpoints.
896 * Receive the sequence number where we should start receiving and
897 * acking broadcast packets from a newly added peer node, and open
898 * up for reception of such packets.
900 * Called with node locked
902 static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf)
904 struct tipc_msg *msg = buf_msg(buf);
906 n->bclink.last_sent = n->bclink.last_in = msg_last_bcast(msg);
907 n->bclink.recv_permitted = true;
912 * tipc_link_push_packets - push unsent packets to bearer
914 * Push out the unsent messages of a link where congestion
915 * has abated. Node is locked.
917 * Called with node locked
919 void tipc_link_push_packets(struct tipc_link *link)
922 struct tipc_msg *msg;
923 unsigned int ack = mod(link->next_in_no - 1);
925 while (skb_queue_len(&link->transmq) < link->window) {
926 skb = __skb_dequeue(&link->backlogq);
930 link->backlog[msg_importance(msg)].len--;
931 msg_set_ack(msg, ack);
932 msg_set_bcast_ack(msg, link->owner->bclink.last_in);
933 link->rcv_unacked = 0;
934 __skb_queue_tail(&link->transmq, skb);
935 tipc_bearer_send(link->owner->net, link->bearer_id,
936 skb, &link->media_addr);
940 void tipc_link_reset_all(struct tipc_node *node)
942 char addr_string[16];
945 tipc_node_lock(node);
947 pr_warn("Resetting all links to %s\n",
948 tipc_addr_string_fill(addr_string, node->addr));
950 for (i = 0; i < MAX_BEARERS; i++) {
951 if (node->links[i]) {
952 link_print(node->links[i], "Resetting link\n");
953 tipc_link_reset(node->links[i]);
957 tipc_node_unlock(node);
960 static void link_retransmit_failure(struct tipc_link *l_ptr,
963 struct tipc_msg *msg = buf_msg(buf);
964 struct net *net = l_ptr->owner->net;
966 pr_warn("Retransmission failure on link <%s>\n", l_ptr->name);
969 /* Handle failure on standard link */
970 link_print(l_ptr, "Resetting link\n");
971 tipc_link_reset(l_ptr);
974 /* Handle failure on broadcast link */
975 struct tipc_node *n_ptr;
976 char addr_string[16];
978 pr_info("Msg seq number: %u, ", msg_seqno(msg));
979 pr_cont("Outstanding acks: %lu\n",
980 (unsigned long) TIPC_SKB_CB(buf)->handle);
982 n_ptr = tipc_bclink_retransmit_to(net);
984 tipc_addr_string_fill(addr_string, n_ptr->addr);
985 pr_info("Broadcast link info for %s\n", addr_string);
986 pr_info("Reception permitted: %d, Acked: %u\n",
987 n_ptr->bclink.recv_permitted,
988 n_ptr->bclink.acked);
989 pr_info("Last in: %u, Oos state: %u, Last sent: %u\n",
990 n_ptr->bclink.last_in,
991 n_ptr->bclink.oos_state,
992 n_ptr->bclink.last_sent);
994 n_ptr->action_flags |= TIPC_BCAST_RESET;
995 l_ptr->stale_count = 0;
999 void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb,
1002 struct tipc_msg *msg;
1009 /* Detect repeated retransmit failures */
1010 if (l_ptr->last_retransmitted == msg_seqno(msg)) {
1011 if (++l_ptr->stale_count > 100) {
1012 link_retransmit_failure(l_ptr, skb);
1016 l_ptr->last_retransmitted = msg_seqno(msg);
1017 l_ptr->stale_count = 1;
1020 skb_queue_walk_from(&l_ptr->transmq, skb) {
1024 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1025 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1026 tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, skb,
1027 &l_ptr->media_addr);
1029 l_ptr->stats.retransmitted++;
1033 /* link_synch(): check if all packets arrived before the synch
1034 * point have been consumed
1035 * Returns true if the parallel links are synched, otherwise false
1037 static bool link_synch(struct tipc_link *l)
1039 unsigned int post_synch;
1040 struct tipc_link *pl;
1042 pl = tipc_parallel_link(l);
1046 /* Was last pre-synch packet added to input queue ? */
1047 if (less_eq(pl->next_in_no, l->synch_point))
1050 /* Is it still in the input queue ? */
1051 post_synch = mod(pl->next_in_no - l->synch_point) - 1;
1052 if (skb_queue_len(&pl->inputq) > post_synch)
1055 l->flags &= ~LINK_SYNCHING;
1059 static void link_retrieve_defq(struct tipc_link *link,
1060 struct sk_buff_head *list)
1064 if (skb_queue_empty(&link->deferdq))
1067 seq_no = buf_seqno(skb_peek(&link->deferdq));
1068 if (seq_no == mod(link->next_in_no))
1069 skb_queue_splice_tail_init(&link->deferdq, list);
1073 * tipc_rcv - process TIPC packets/messages arriving from off-node
1074 * @net: the applicable net namespace
1076 * @b_ptr: pointer to bearer message arrived on
1078 * Invoked with no locks held. Bearer pointer must point to a valid bearer
1079 * structure (i.e. cannot be NULL), but bearer can be inactive.
1081 void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr)
1083 struct tipc_net *tn = net_generic(net, tipc_net_id);
1084 struct sk_buff_head head;
1085 struct tipc_node *n_ptr;
1086 struct tipc_link *l_ptr;
1087 struct sk_buff *skb1, *tmp;
1088 struct tipc_msg *msg;
1093 skb2list(skb, &head);
1095 while ((skb = __skb_dequeue(&head))) {
1096 /* Ensure message is well-formed */
1097 if (unlikely(!tipc_msg_validate(skb)))
1100 /* Handle arrival of a non-unicast link message */
1102 if (unlikely(msg_non_seq(msg))) {
1103 if (msg_user(msg) == LINK_CONFIG)
1104 tipc_disc_rcv(net, skb, b_ptr);
1106 tipc_bclink_rcv(net, skb);
1110 /* Discard unicast link messages destined for another node */
1111 if (unlikely(!msg_short(msg) &&
1112 (msg_destnode(msg) != tn->own_addr)))
1115 /* Locate neighboring node that sent message */
1116 n_ptr = tipc_node_find(net, msg_prevnode(msg));
1117 if (unlikely(!n_ptr))
1120 tipc_node_lock(n_ptr);
1121 /* Locate unicast link endpoint that should handle message */
1122 l_ptr = n_ptr->links[b_ptr->identity];
1123 if (unlikely(!l_ptr))
1126 /* Verify that communication with node is currently allowed */
1127 if ((n_ptr->action_flags & TIPC_WAIT_PEER_LINKS_DOWN) &&
1128 msg_user(msg) == LINK_PROTOCOL &&
1129 (msg_type(msg) == RESET_MSG ||
1130 msg_type(msg) == ACTIVATE_MSG) &&
1131 !msg_redundant_link(msg))
1132 n_ptr->action_flags &= ~TIPC_WAIT_PEER_LINKS_DOWN;
1134 if (tipc_node_blocked(n_ptr))
1137 /* Validate message sequence number info */
1138 seq_no = msg_seqno(msg);
1139 ackd = msg_ack(msg);
1141 /* Release acked messages */
1142 if (unlikely(n_ptr->bclink.acked != msg_bcast_ack(msg)))
1143 tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
1146 skb_queue_walk_safe(&l_ptr->transmq, skb1, tmp) {
1147 if (more(buf_seqno(skb1), ackd))
1149 __skb_unlink(skb1, &l_ptr->transmq);
1154 /* Try sending any messages link endpoint has pending */
1155 if (unlikely(skb_queue_len(&l_ptr->backlogq)))
1156 tipc_link_push_packets(l_ptr);
1158 if (released && !skb_queue_empty(&l_ptr->wakeupq))
1159 link_prepare_wakeup(l_ptr);
1161 /* Process the incoming packet */
1162 if (unlikely(!link_working_working(l_ptr))) {
1163 if (msg_user(msg) == LINK_PROTOCOL) {
1164 tipc_link_proto_rcv(l_ptr, skb);
1165 link_retrieve_defq(l_ptr, &head);
1170 /* Traffic message. Conditionally activate link */
1171 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
1173 if (link_working_working(l_ptr)) {
1174 /* Re-insert buffer in front of queue */
1175 __skb_queue_head(&head, skb);
1182 /* Link is now in state WORKING_WORKING */
1183 if (unlikely(seq_no != mod(l_ptr->next_in_no))) {
1184 link_handle_out_of_seq_msg(l_ptr, skb);
1185 link_retrieve_defq(l_ptr, &head);
1189 /* Synchronize with parallel link if applicable */
1190 if (unlikely((l_ptr->flags & LINK_SYNCHING) && !msg_dup(msg))) {
1191 link_handle_out_of_seq_msg(l_ptr, skb);
1192 if (link_synch(l_ptr))
1193 link_retrieve_defq(l_ptr, &head);
1197 l_ptr->next_in_no++;
1198 if (unlikely(!skb_queue_empty(&l_ptr->deferdq)))
1199 link_retrieve_defq(l_ptr, &head);
1200 if (unlikely(++l_ptr->rcv_unacked >= TIPC_MIN_LINK_WIN)) {
1201 l_ptr->stats.sent_acks++;
1202 tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1204 tipc_link_input(l_ptr, skb);
1207 tipc_node_unlock(n_ptr);
1208 tipc_node_put(n_ptr);
1215 /* tipc_data_input - deliver data and name distr msgs to upper layer
1217 * Consumes buffer if message is of right type
1218 * Node lock must be held
1220 static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb)
1222 struct tipc_node *node = link->owner;
1223 struct tipc_msg *msg = buf_msg(skb);
1224 u32 dport = msg_destport(msg);
1226 switch (msg_user(msg)) {
1227 case TIPC_LOW_IMPORTANCE:
1228 case TIPC_MEDIUM_IMPORTANCE:
1229 case TIPC_HIGH_IMPORTANCE:
1230 case TIPC_CRITICAL_IMPORTANCE:
1232 if (tipc_skb_queue_tail(&link->inputq, skb, dport)) {
1233 node->inputq = &link->inputq;
1234 node->action_flags |= TIPC_MSG_EVT;
1237 case NAME_DISTRIBUTOR:
1238 node->bclink.recv_permitted = true;
1239 node->namedq = &link->namedq;
1240 skb_queue_tail(&link->namedq, skb);
1241 if (skb_queue_len(&link->namedq) == 1)
1242 node->action_flags |= TIPC_NAMED_MSG_EVT;
1245 case CHANGEOVER_PROTOCOL:
1246 case MSG_FRAGMENTER:
1247 case BCAST_PROTOCOL:
1250 pr_warn("Dropping received illegal msg type\n");
1256 /* tipc_link_input - process packet that has passed link protocol check
1259 * Node lock must be held
1261 static void tipc_link_input(struct tipc_link *link, struct sk_buff *skb)
1263 struct tipc_node *node = link->owner;
1264 struct tipc_msg *msg = buf_msg(skb);
1265 struct sk_buff *iskb;
1268 if (likely(tipc_data_input(link, skb)))
1271 switch (msg_user(msg)) {
1272 case CHANGEOVER_PROTOCOL:
1274 link->flags |= LINK_SYNCHING;
1275 link->synch_point = msg_seqno(msg_get_wrapped(msg));
1279 if (!tipc_link_failover_rcv(node, &skb))
1281 if (msg_user(buf_msg(skb)) != MSG_BUNDLER) {
1282 tipc_data_input(link, skb);
1286 link->stats.recv_bundles++;
1287 link->stats.recv_bundled += msg_msgcnt(msg);
1289 while (tipc_msg_extract(skb, &iskb, &pos))
1290 tipc_data_input(link, iskb);
1292 case MSG_FRAGMENTER:
1293 link->stats.recv_fragments++;
1294 if (tipc_buf_append(&link->reasm_buf, &skb)) {
1295 link->stats.recv_fragmented++;
1296 tipc_data_input(link, skb);
1297 } else if (!link->reasm_buf) {
1298 tipc_link_reset(link);
1301 case BCAST_PROTOCOL:
1302 tipc_link_sync_rcv(node, skb);
1310 * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue
1312 * Returns increase in queue length (i.e. 0 or 1)
1314 u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *skb)
1316 struct sk_buff *skb1;
1317 u32 seq_no = buf_seqno(skb);
1320 if (skb_queue_empty(list)) {
1321 __skb_queue_tail(list, skb);
1326 if (less(buf_seqno(skb_peek_tail(list)), seq_no)) {
1327 __skb_queue_tail(list, skb);
1331 /* Locate insertion point in queue, then insert; discard if duplicate */
1332 skb_queue_walk(list, skb1) {
1333 u32 curr_seqno = buf_seqno(skb1);
1335 if (seq_no == curr_seqno) {
1340 if (less(seq_no, curr_seqno))
1344 __skb_queue_before(list, skb1, skb);
1349 * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
1351 static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
1352 struct sk_buff *buf)
1354 u32 seq_no = buf_seqno(buf);
1356 if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) {
1357 tipc_link_proto_rcv(l_ptr, buf);
1361 /* Record OOS packet arrival (force mismatch on next timeout) */
1362 l_ptr->checkpoint--;
1365 * Discard packet if a duplicate; otherwise add it to deferred queue
1366 * and notify peer of gap as per protocol specification
1368 if (less(seq_no, mod(l_ptr->next_in_no))) {
1369 l_ptr->stats.duplicates++;
1374 if (tipc_link_defer_pkt(&l_ptr->deferdq, buf)) {
1375 l_ptr->stats.deferred_recv++;
1376 if ((skb_queue_len(&l_ptr->deferdq) % TIPC_MIN_LINK_WIN) == 1)
1377 tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1379 l_ptr->stats.duplicates++;
1384 * Send protocol message to the other endpoint.
1386 void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
1387 u32 gap, u32 tolerance, u32 priority, u32 ack_mtu)
1389 struct sk_buff *buf = NULL;
1390 struct tipc_msg *msg = l_ptr->pmsg;
1391 u32 msg_size = sizeof(l_ptr->proto_msg);
1394 /* Don't send protocol message during link changeover */
1395 if (l_ptr->exp_msg_count)
1398 /* Abort non-RESET send if communication with node is prohibited */
1399 if ((tipc_node_blocked(l_ptr->owner)) && (msg_typ != RESET_MSG))
1402 /* Create protocol message with "out-of-sequence" sequence number */
1403 msg_set_type(msg, msg_typ);
1404 msg_set_net_plane(msg, l_ptr->net_plane);
1405 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1406 msg_set_last_bcast(msg, tipc_bclink_get_last_sent(l_ptr->owner->net));
1408 if (msg_typ == STATE_MSG) {
1409 u32 next_sent = mod(l_ptr->next_out_no);
1411 if (!tipc_link_is_up(l_ptr))
1413 if (skb_queue_len(&l_ptr->backlogq))
1414 next_sent = buf_seqno(skb_peek(&l_ptr->backlogq));
1415 msg_set_next_sent(msg, next_sent);
1416 if (!skb_queue_empty(&l_ptr->deferdq)) {
1417 u32 rec = buf_seqno(skb_peek(&l_ptr->deferdq));
1418 gap = mod(rec - mod(l_ptr->next_in_no));
1420 msg_set_seq_gap(msg, gap);
1422 l_ptr->stats.sent_nacks++;
1423 msg_set_link_tolerance(msg, tolerance);
1424 msg_set_linkprio(msg, priority);
1425 msg_set_max_pkt(msg, ack_mtu);
1426 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1427 msg_set_probe(msg, probe_msg != 0);
1429 u32 mtu = l_ptr->max_pkt;
1431 if ((mtu < l_ptr->max_pkt_target) &&
1432 link_working_working(l_ptr) &&
1433 l_ptr->fsm_msg_cnt) {
1434 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
1435 if (l_ptr->max_pkt_probes == 10) {
1436 l_ptr->max_pkt_target = (msg_size - 4);
1437 l_ptr->max_pkt_probes = 0;
1438 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
1440 l_ptr->max_pkt_probes++;
1443 l_ptr->stats.sent_probes++;
1445 l_ptr->stats.sent_states++;
1446 } else { /* RESET_MSG or ACTIVATE_MSG */
1447 msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1));
1448 msg_set_seq_gap(msg, 0);
1449 msg_set_next_sent(msg, 1);
1450 msg_set_probe(msg, 0);
1451 msg_set_link_tolerance(msg, l_ptr->tolerance);
1452 msg_set_linkprio(msg, l_ptr->priority);
1453 msg_set_max_pkt(msg, l_ptr->max_pkt_target);
1456 r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr));
1457 msg_set_redundant_link(msg, r_flag);
1458 msg_set_linkprio(msg, l_ptr->priority);
1459 msg_set_size(msg, msg_size);
1461 msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2)));
1463 buf = tipc_buf_acquire(msg_size);
1467 skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
1468 buf->priority = TC_PRIO_CONTROL;
1469 tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, buf,
1470 &l_ptr->media_addr);
1471 l_ptr->rcv_unacked = 0;
1476 * Receive protocol message :
1477 * Note that network plane id propagates through the network, and may
1478 * change at any time. The node with lowest address rules
1480 static void tipc_link_proto_rcv(struct tipc_link *l_ptr,
1481 struct sk_buff *buf)
1487 struct tipc_msg *msg = buf_msg(buf);
1489 /* Discard protocol message during link changeover */
1490 if (l_ptr->exp_msg_count)
1493 if (l_ptr->net_plane != msg_net_plane(msg))
1494 if (link_own_addr(l_ptr) > msg_prevnode(msg))
1495 l_ptr->net_plane = msg_net_plane(msg);
1497 switch (msg_type(msg)) {
1500 if (!link_working_unknown(l_ptr) &&
1501 (l_ptr->peer_session != INVALID_SESSION)) {
1502 if (less_eq(msg_session(msg), l_ptr->peer_session))
1503 break; /* duplicate or old reset: ignore */
1506 if (!msg_redundant_link(msg) && (link_working_working(l_ptr) ||
1507 link_working_unknown(l_ptr))) {
1509 * peer has lost contact -- don't allow peer's links
1510 * to reactivate before we recognize loss & clean up
1512 l_ptr->owner->action_flags |= TIPC_WAIT_OWN_LINKS_DOWN;
1515 link_state_event(l_ptr, RESET_MSG);
1519 /* Update link settings according other endpoint's values */
1520 strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg));
1522 msg_tol = msg_link_tolerance(msg);
1523 if (msg_tol > l_ptr->tolerance)
1524 link_set_supervision_props(l_ptr, msg_tol);
1526 if (msg_linkprio(msg) > l_ptr->priority)
1527 l_ptr->priority = msg_linkprio(msg);
1529 max_pkt_info = msg_max_pkt(msg);
1531 if (max_pkt_info < l_ptr->max_pkt_target)
1532 l_ptr->max_pkt_target = max_pkt_info;
1533 if (l_ptr->max_pkt > l_ptr->max_pkt_target)
1534 l_ptr->max_pkt = l_ptr->max_pkt_target;
1536 l_ptr->max_pkt = l_ptr->max_pkt_target;
1539 /* Synchronize broadcast link info, if not done previously */
1540 if (!tipc_node_is_up(l_ptr->owner)) {
1541 l_ptr->owner->bclink.last_sent =
1542 l_ptr->owner->bclink.last_in =
1543 msg_last_bcast(msg);
1544 l_ptr->owner->bclink.oos_state = 0;
1547 l_ptr->peer_session = msg_session(msg);
1548 l_ptr->peer_bearer_id = msg_bearer_id(msg);
1550 if (msg_type(msg) == ACTIVATE_MSG)
1551 link_state_event(l_ptr, ACTIVATE_MSG);
1555 msg_tol = msg_link_tolerance(msg);
1557 link_set_supervision_props(l_ptr, msg_tol);
1559 if (msg_linkprio(msg) &&
1560 (msg_linkprio(msg) != l_ptr->priority)) {
1561 pr_debug("%s<%s>, priority change %u->%u\n",
1562 link_rst_msg, l_ptr->name,
1563 l_ptr->priority, msg_linkprio(msg));
1564 l_ptr->priority = msg_linkprio(msg);
1565 tipc_link_reset(l_ptr); /* Enforce change to take effect */
1569 /* Record reception; force mismatch at next timeout: */
1570 l_ptr->checkpoint--;
1572 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
1573 l_ptr->stats.recv_states++;
1574 if (link_reset_unknown(l_ptr))
1577 if (less_eq(mod(l_ptr->next_in_no), msg_next_sent(msg))) {
1578 rec_gap = mod(msg_next_sent(msg) -
1579 mod(l_ptr->next_in_no));
1582 max_pkt_ack = msg_max_pkt(msg);
1583 if (max_pkt_ack > l_ptr->max_pkt) {
1584 l_ptr->max_pkt = max_pkt_ack;
1585 l_ptr->max_pkt_probes = 0;
1589 if (msg_probe(msg)) {
1590 l_ptr->stats.recv_probes++;
1591 if (msg_size(msg) > sizeof(l_ptr->proto_msg))
1592 max_pkt_ack = msg_size(msg);
1595 /* Protocol message before retransmits, reduce loss risk */
1596 if (l_ptr->owner->bclink.recv_permitted)
1597 tipc_bclink_update_link_state(l_ptr->owner,
1598 msg_last_bcast(msg));
1600 if (rec_gap || (msg_probe(msg))) {
1601 tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, rec_gap, 0,
1604 if (msg_seq_gap(msg)) {
1605 l_ptr->stats.recv_nacks++;
1606 tipc_link_retransmit(l_ptr, skb_peek(&l_ptr->transmq),
1616 /* tipc_link_tunnel_xmit(): Tunnel one packet via a link belonging to
1617 * a different bearer. Owner node is locked.
1619 static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr,
1620 struct tipc_msg *tunnel_hdr,
1621 struct tipc_msg *msg,
1624 struct tipc_link *tunnel;
1625 struct sk_buff *skb;
1626 u32 length = msg_size(msg);
1628 tunnel = l_ptr->owner->active_links[selector & 1];
1629 if (!tipc_link_is_up(tunnel)) {
1630 pr_warn("%stunnel link no longer available\n", link_co_err);
1633 msg_set_size(tunnel_hdr, length + INT_H_SIZE);
1634 skb = tipc_buf_acquire(length + INT_H_SIZE);
1636 pr_warn("%sunable to send tunnel msg\n", link_co_err);
1639 skb_copy_to_linear_data(skb, tunnel_hdr, INT_H_SIZE);
1640 skb_copy_to_linear_data_offset(skb, INT_H_SIZE, msg, length);
1641 __tipc_link_xmit_skb(tunnel, skb);
1645 /* tipc_link_failover_send_queue(): A link has gone down, but a second
1646 * link is still active. We can do failover. Tunnel the failing link's
1647 * whole send queue via the remaining link. This way, we don't lose
1648 * any packets, and sequence order is preserved for subsequent traffic
1649 * sent over the remaining link. Owner node is locked.
1651 void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
1654 struct tipc_link *tunnel = l_ptr->owner->active_links[0];
1655 struct tipc_msg tunnel_hdr;
1656 struct sk_buff *skb;
1662 tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, CHANGEOVER_PROTOCOL,
1663 ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr);
1664 skb_queue_splice_tail_init(&l_ptr->backlogq, &l_ptr->transmq);
1665 tipc_link_purge_backlog(l_ptr);
1666 msgcount = skb_queue_len(&l_ptr->transmq);
1667 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
1668 msg_set_msgcnt(&tunnel_hdr, msgcount);
1670 if (skb_queue_empty(&l_ptr->transmq)) {
1671 skb = tipc_buf_acquire(INT_H_SIZE);
1673 skb_copy_to_linear_data(skb, &tunnel_hdr, INT_H_SIZE);
1674 msg_set_size(&tunnel_hdr, INT_H_SIZE);
1675 __tipc_link_xmit_skb(tunnel, skb);
1677 pr_warn("%sunable to send changeover msg\n",
1683 split_bundles = (l_ptr->owner->active_links[0] !=
1684 l_ptr->owner->active_links[1]);
1686 skb_queue_walk(&l_ptr->transmq, skb) {
1687 struct tipc_msg *msg = buf_msg(skb);
1689 if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
1690 struct tipc_msg *m = msg_get_wrapped(msg);
1691 unchar *pos = (unchar *)m;
1693 msgcount = msg_msgcnt(msg);
1694 while (msgcount--) {
1695 msg_set_seqno(m, msg_seqno(msg));
1696 tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, m,
1697 msg_link_selector(m));
1698 pos += align(msg_size(m));
1699 m = (struct tipc_msg *)pos;
1702 tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, msg,
1703 msg_link_selector(msg));
1708 /* tipc_link_dup_queue_xmit(): A second link has become active. Tunnel a
1709 * duplicate of the first link's send queue via the new link. This way, we
1710 * are guaranteed that currently queued packets from a socket are delivered
1711 * before future traffic from the same socket, even if this is using the
1712 * new link. The last arriving copy of each duplicate packet is dropped at
1713 * the receiving end by the regular protocol check, so packet cardinality
1714 * and sequence order is preserved per sender/receiver socket pair.
1715 * Owner node is locked.
1717 void tipc_link_dup_queue_xmit(struct tipc_link *link,
1718 struct tipc_link *tnl)
1720 struct sk_buff *skb;
1721 struct tipc_msg tnl_hdr;
1722 struct sk_buff_head *queue = &link->transmq;
1725 tipc_msg_init(link_own_addr(link), &tnl_hdr, CHANGEOVER_PROTOCOL,
1726 DUPLICATE_MSG, INT_H_SIZE, link->addr);
1727 mcnt = skb_queue_len(&link->transmq) + skb_queue_len(&link->backlogq);
1728 msg_set_msgcnt(&tnl_hdr, mcnt);
1729 msg_set_bearer_id(&tnl_hdr, link->peer_bearer_id);
1732 skb_queue_walk(queue, skb) {
1733 struct sk_buff *outskb;
1734 struct tipc_msg *msg = buf_msg(skb);
1735 u32 len = msg_size(msg);
1737 msg_set_ack(msg, mod(link->next_in_no - 1));
1738 msg_set_bcast_ack(msg, link->owner->bclink.last_in);
1739 msg_set_size(&tnl_hdr, len + INT_H_SIZE);
1740 outskb = tipc_buf_acquire(len + INT_H_SIZE);
1741 if (outskb == NULL) {
1742 pr_warn("%sunable to send duplicate msg\n",
1746 skb_copy_to_linear_data(outskb, &tnl_hdr, INT_H_SIZE);
1747 skb_copy_to_linear_data_offset(outskb, INT_H_SIZE,
1749 __tipc_link_xmit_skb(tnl, outskb);
1750 if (!tipc_link_is_up(link))
1753 if (queue == &link->backlogq)
1755 queue = &link->backlogq;
1759 /* tipc_link_failover_rcv(): Receive a tunnelled ORIGINAL_MSG packet
1760 * Owner node is locked.
1762 static bool tipc_link_failover_rcv(struct tipc_node *node,
1763 struct sk_buff **skb)
1765 struct tipc_msg *msg = buf_msg(*skb);
1766 struct sk_buff *iskb = NULL;
1767 struct tipc_link *link = NULL;
1768 int bearer_id = msg_bearer_id(msg);
1771 if (msg_type(msg) != ORIGINAL_MSG) {
1772 pr_warn("%sunknown tunnel pkt received\n", link_co_err);
1775 if (bearer_id >= MAX_BEARERS)
1777 link = node->links[bearer_id];
1780 if (tipc_link_is_up(link))
1781 tipc_link_reset(link);
1783 /* First failover packet? */
1784 if (link->exp_msg_count == START_CHANGEOVER)
1785 link->exp_msg_count = msg_msgcnt(msg);
1787 /* Should we expect an inner packet? */
1788 if (!link->exp_msg_count)
1791 if (!tipc_msg_extract(*skb, &iskb, &pos)) {
1792 pr_warn("%sno inner failover pkt\n", link_co_err);
1796 link->exp_msg_count--;
1799 /* Was packet already delivered? */
1800 if (less(buf_seqno(iskb), link->reset_checkpoint)) {
1805 if (msg_user(buf_msg(iskb)) == MSG_FRAGMENTER) {
1806 link->stats.recv_fragments++;
1807 tipc_buf_append(&link->reasm_buf, &iskb);
1810 if (link && (!link->exp_msg_count) && (link->flags & LINK_STOPPED))
1811 tipc_link_delete(link);
1817 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol)
1819 unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;
1821 if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
1824 l_ptr->tolerance = tol;
1825 l_ptr->cont_intv = msecs_to_jiffies(intv);
1826 l_ptr->abort_limit = tol / (jiffies_to_msecs(l_ptr->cont_intv) / 4);
1829 void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
1831 int max_bulk = TIPC_MAX_PUBLICATIONS / (l->max_pkt / ITEM_SIZE);
1834 l->backlog[TIPC_LOW_IMPORTANCE].limit = win / 2;
1835 l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = win;
1836 l->backlog[TIPC_HIGH_IMPORTANCE].limit = win / 2 * 3;
1837 l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = win * 2;
1838 l->backlog[TIPC_SYSTEM_IMPORTANCE].limit = max_bulk;
1841 /* tipc_link_find_owner - locate owner node of link by link's name
1842 * @net: the applicable net namespace
1843 * @name: pointer to link name string
1844 * @bearer_id: pointer to index in 'node->links' array where the link was found.
1846 * Returns pointer to node owning the link, or 0 if no matching link is found.
1848 static struct tipc_node *tipc_link_find_owner(struct net *net,
1849 const char *link_name,
1850 unsigned int *bearer_id)
1852 struct tipc_net *tn = net_generic(net, tipc_net_id);
1853 struct tipc_link *l_ptr;
1854 struct tipc_node *n_ptr;
1855 struct tipc_node *found_node = NULL;
1860 list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
1861 tipc_node_lock(n_ptr);
1862 for (i = 0; i < MAX_BEARERS; i++) {
1863 l_ptr = n_ptr->links[i];
1864 if (l_ptr && !strcmp(l_ptr->name, link_name)) {
1870 tipc_node_unlock(n_ptr);
1880 * link_reset_statistics - reset link statistics
1881 * @l_ptr: pointer to link
1883 static void link_reset_statistics(struct tipc_link *l_ptr)
1885 memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
1886 l_ptr->stats.sent_info = l_ptr->next_out_no;
1887 l_ptr->stats.recv_info = l_ptr->next_in_no;
1890 static void link_print(struct tipc_link *l_ptr, const char *str)
1892 struct tipc_net *tn = net_generic(l_ptr->owner->net, tipc_net_id);
1893 struct tipc_bearer *b_ptr;
1896 b_ptr = rcu_dereference_rtnl(tn->bearer_list[l_ptr->bearer_id]);
1898 pr_info("%s Link %x<%s>:", str, l_ptr->addr, b_ptr->name);
1901 if (link_working_unknown(l_ptr))
1903 else if (link_reset_reset(l_ptr))
1905 else if (link_reset_unknown(l_ptr))
1907 else if (link_working_working(l_ptr))
1913 /* Parse and validate nested (link) properties valid for media, bearer and link
1915 int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
1919 err = nla_parse_nested(props, TIPC_NLA_PROP_MAX, prop,
1920 tipc_nl_prop_policy);
1924 if (props[TIPC_NLA_PROP_PRIO]) {
1927 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
1928 if (prio > TIPC_MAX_LINK_PRI)
1932 if (props[TIPC_NLA_PROP_TOL]) {
1935 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
1936 if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
1940 if (props[TIPC_NLA_PROP_WIN]) {
1943 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
1944 if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN))
1951 int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
1957 struct tipc_link *link;
1958 struct tipc_node *node;
1959 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
1960 struct net *net = sock_net(skb->sk);
1962 if (!info->attrs[TIPC_NLA_LINK])
1965 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
1966 info->attrs[TIPC_NLA_LINK],
1967 tipc_nl_link_policy);
1971 if (!attrs[TIPC_NLA_LINK_NAME])
1974 name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
1976 node = tipc_link_find_owner(net, name, &bearer_id);
1980 tipc_node_lock(node);
1982 link = node->links[bearer_id];
1988 if (attrs[TIPC_NLA_LINK_PROP]) {
1989 struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
1991 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP],
1998 if (props[TIPC_NLA_PROP_TOL]) {
2001 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
2002 link_set_supervision_props(link, tol);
2003 tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0, 0);
2005 if (props[TIPC_NLA_PROP_PRIO]) {
2008 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
2009 link->priority = prio;
2010 tipc_link_proto_xmit(link, STATE_MSG, 0, 0, 0, prio, 0);
2012 if (props[TIPC_NLA_PROP_WIN]) {
2015 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
2016 tipc_link_set_queue_limits(link, win);
2021 tipc_node_unlock(node);
2026 static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
2029 struct nlattr *stats;
2036 struct nla_map map[] = {
2037 {TIPC_NLA_STATS_RX_INFO, s->recv_info},
2038 {TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
2039 {TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
2040 {TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
2041 {TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
2042 {TIPC_NLA_STATS_TX_INFO, s->sent_info},
2043 {TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
2044 {TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
2045 {TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
2046 {TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
2047 {TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
2048 s->msg_length_counts : 1},
2049 {TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
2050 {TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
2051 {TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
2052 {TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
2053 {TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
2054 {TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
2055 {TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
2056 {TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
2057 {TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
2058 {TIPC_NLA_STATS_RX_STATES, s->recv_states},
2059 {TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
2060 {TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
2061 {TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
2062 {TIPC_NLA_STATS_TX_STATES, s->sent_states},
2063 {TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
2064 {TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
2065 {TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
2066 {TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
2067 {TIPC_NLA_STATS_DUPLICATES, s->duplicates},
2068 {TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
2069 {TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
2070 {TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
2071 (s->accu_queue_sz / s->queue_sz_counts) : 0}
2074 stats = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
2078 for (i = 0; i < ARRAY_SIZE(map); i++)
2079 if (nla_put_u32(skb, map[i].key, map[i].val))
2082 nla_nest_end(skb, stats);
2086 nla_nest_cancel(skb, stats);
2091 /* Caller should hold appropriate locks to protect the link */
2092 static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
2093 struct tipc_link *link)
2097 struct nlattr *attrs;
2098 struct nlattr *prop;
2099 struct tipc_net *tn = net_generic(net, tipc_net_id);
2101 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2102 NLM_F_MULTI, TIPC_NL_LINK_GET);
2106 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
2110 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
2112 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST,
2113 tipc_cluster_mask(tn->own_addr)))
2115 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->max_pkt))
2117 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->next_in_no))
2119 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->next_out_no))
2122 if (tipc_link_is_up(link))
2123 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2125 if (tipc_link_is_active(link))
2126 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
2129 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
2132 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2134 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
2136 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
2139 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2141 nla_nest_end(msg->skb, prop);
2143 err = __tipc_nl_add_stats(msg->skb, &link->stats);
2147 nla_nest_end(msg->skb, attrs);
2148 genlmsg_end(msg->skb, hdr);
2153 nla_nest_cancel(msg->skb, prop);
2155 nla_nest_cancel(msg->skb, attrs);
2157 genlmsg_cancel(msg->skb, hdr);
2162 /* Caller should hold node lock */
2163 static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
2164 struct tipc_node *node, u32 *prev_link)
2169 for (i = *prev_link; i < MAX_BEARERS; i++) {
2172 if (!node->links[i])
2175 err = __tipc_nl_add_link(net, msg, node->links[i]);
2184 int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
2186 struct net *net = sock_net(skb->sk);
2187 struct tipc_net *tn = net_generic(net, tipc_net_id);
2188 struct tipc_node *node;
2189 struct tipc_nl_msg msg;
2190 u32 prev_node = cb->args[0];
2191 u32 prev_link = cb->args[1];
2192 int done = cb->args[2];
2199 msg.portid = NETLINK_CB(cb->skb).portid;
2200 msg.seq = cb->nlh->nlmsg_seq;
2204 node = tipc_node_find(net, prev_node);
2206 /* We never set seq or call nl_dump_check_consistent()
2207 * this means that setting prev_seq here will cause the
2208 * consistence check to fail in the netlink callback
2209 * handler. Resulting in the last NLMSG_DONE message
2210 * having the NLM_F_DUMP_INTR flag set.
2215 tipc_node_put(node);
2217 list_for_each_entry_continue_rcu(node, &tn->node_list,
2219 tipc_node_lock(node);
2220 err = __tipc_nl_add_node_links(net, &msg, node,
2222 tipc_node_unlock(node);
2223 tipc_node_put(node);
2227 prev_node = node->addr;
2230 err = tipc_nl_add_bc_link(net, &msg);
2234 list_for_each_entry_rcu(node, &tn->node_list, list) {
2235 tipc_node_lock(node);
2236 err = __tipc_nl_add_node_links(net, &msg, node,
2238 tipc_node_unlock(node);
2242 prev_node = node->addr;
2249 cb->args[0] = prev_node;
2250 cb->args[1] = prev_link;
2256 int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info)
2258 struct net *net = genl_info_net(info);
2259 struct sk_buff *ans_skb;
2260 struct tipc_nl_msg msg;
2261 struct tipc_link *link;
2262 struct tipc_node *node;
2267 if (!info->attrs[TIPC_NLA_LINK_NAME])
2270 name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]);
2271 node = tipc_link_find_owner(net, name, &bearer_id);
2275 ans_skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
2280 msg.portid = info->snd_portid;
2281 msg.seq = info->snd_seq;
2283 tipc_node_lock(node);
2284 link = node->links[bearer_id];
2290 err = __tipc_nl_add_link(net, &msg, link);
2294 tipc_node_unlock(node);
2296 return genlmsg_reply(ans_skb, info);
2299 tipc_node_unlock(node);
2300 nlmsg_free(ans_skb);
2305 int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info)
2309 unsigned int bearer_id;
2310 struct tipc_link *link;
2311 struct tipc_node *node;
2312 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2313 struct net *net = sock_net(skb->sk);
2315 if (!info->attrs[TIPC_NLA_LINK])
2318 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
2319 info->attrs[TIPC_NLA_LINK],
2320 tipc_nl_link_policy);
2324 if (!attrs[TIPC_NLA_LINK_NAME])
2327 link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2329 if (strcmp(link_name, tipc_bclink_name) == 0) {
2330 err = tipc_bclink_reset_stats(net);
2336 node = tipc_link_find_owner(net, link_name, &bearer_id);
2340 tipc_node_lock(node);
2342 link = node->links[bearer_id];
2344 tipc_node_unlock(node);
2348 link_reset_statistics(link);
2350 tipc_node_unlock(node);