2 * drivers/net/bond/bond_netlink.c - Netlink interface for bonding
3 * Copyright (c) 2013 Jiri Pirko <jiri@resnulli.us>
4 * Copyright (c) 2013 Scott Feldman <sfeldma@cumulusnetworks.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/module.h>
15 #include <linux/errno.h>
16 #include <linux/netdevice.h>
17 #include <linux/etherdevice.h>
18 #include <linux/if_link.h>
19 #include <linux/if_ether.h>
20 #include <net/netlink.h>
21 #include <net/rtnetlink.h>
24 int bond_get_slave(struct net_device *slave_dev, struct sk_buff *skb)
26 struct slave *slave = bond_slave_get_rtnl(slave_dev);
27 const struct aggregator *agg;
29 if (nla_put_u8(skb, IFLA_SLAVE_STATE, bond_slave_state(slave)))
32 if (nla_put_u8(skb, IFLA_SLAVE_MII_STATUS, slave->link))
35 if (nla_put_u32(skb, IFLA_SLAVE_LINK_FAILURE_COUNT,
36 slave->link_failure_count))
39 if (nla_put(skb, IFLA_SLAVE_PERM_HWADDR,
40 slave_dev->addr_len, slave->perm_hwaddr))
43 if (nla_put_u16(skb, IFLA_SLAVE_QUEUE_ID, slave->queue_id))
46 if (slave->bond->params.mode == BOND_MODE_8023AD) {
47 agg = SLAVE_AD_INFO(slave).port.aggregator;
49 if (nla_put_u16(skb, IFLA_SLAVE_AD_AGGREGATOR_ID,
50 agg->aggregator_identifier))
60 static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
61 [IFLA_BOND_MODE] = { .type = NLA_U8 },
62 [IFLA_BOND_ACTIVE_SLAVE] = { .type = NLA_U32 },
63 [IFLA_BOND_MIIMON] = { .type = NLA_U32 },
64 [IFLA_BOND_UPDELAY] = { .type = NLA_U32 },
65 [IFLA_BOND_DOWNDELAY] = { .type = NLA_U32 },
66 [IFLA_BOND_USE_CARRIER] = { .type = NLA_U8 },
67 [IFLA_BOND_ARP_INTERVAL] = { .type = NLA_U32 },
68 [IFLA_BOND_ARP_IP_TARGET] = { .type = NLA_NESTED },
69 [IFLA_BOND_ARP_VALIDATE] = { .type = NLA_U32 },
70 [IFLA_BOND_ARP_ALL_TARGETS] = { .type = NLA_U32 },
71 [IFLA_BOND_PRIMARY] = { .type = NLA_U32 },
72 [IFLA_BOND_PRIMARY_RESELECT] = { .type = NLA_U8 },
73 [IFLA_BOND_FAIL_OVER_MAC] = { .type = NLA_U8 },
74 [IFLA_BOND_XMIT_HASH_POLICY] = { .type = NLA_U8 },
75 [IFLA_BOND_RESEND_IGMP] = { .type = NLA_U32 },
76 [IFLA_BOND_NUM_PEER_NOTIF] = { .type = NLA_U8 },
77 [IFLA_BOND_ALL_SLAVES_ACTIVE] = { .type = NLA_U8 },
78 [IFLA_BOND_MIN_LINKS] = { .type = NLA_U32 },
79 [IFLA_BOND_LP_INTERVAL] = { .type = NLA_U32 },
80 [IFLA_BOND_PACKETS_PER_SLAVE] = { .type = NLA_U32 },
81 [IFLA_BOND_AD_LACP_RATE] = { .type = NLA_U8 },
82 [IFLA_BOND_AD_SELECT] = { .type = NLA_U8 },
83 [IFLA_BOND_AD_INFO] = { .type = NLA_NESTED },
86 static int bond_validate(struct nlattr *tb[], struct nlattr *data[])
88 if (tb[IFLA_ADDRESS]) {
89 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
91 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
92 return -EADDRNOTAVAIL;
97 static int bond_changelink(struct net_device *bond_dev,
98 struct nlattr *tb[], struct nlattr *data[])
100 struct bonding *bond = netdev_priv(bond_dev);
101 struct bond_opt_value newval;
108 if (data[IFLA_BOND_MODE]) {
109 int mode = nla_get_u8(data[IFLA_BOND_MODE]);
111 bond_opt_initval(&newval, mode);
112 err = __bond_opt_set(bond, BOND_OPT_MODE, &newval);
116 if (data[IFLA_BOND_ACTIVE_SLAVE]) {
117 int ifindex = nla_get_u32(data[IFLA_BOND_ACTIVE_SLAVE]);
118 struct net_device *slave_dev;
123 slave_dev = __dev_get_by_index(dev_net(bond_dev),
128 err = bond_option_active_slave_set(bond, slave_dev);
132 if (data[IFLA_BOND_MIIMON]) {
133 miimon = nla_get_u32(data[IFLA_BOND_MIIMON]);
135 bond_opt_initval(&newval, miimon);
136 err = __bond_opt_set(bond, BOND_OPT_MIIMON, &newval);
140 if (data[IFLA_BOND_UPDELAY]) {
141 int updelay = nla_get_u32(data[IFLA_BOND_UPDELAY]);
143 bond_opt_initval(&newval, updelay);
144 err = __bond_opt_set(bond, BOND_OPT_UPDELAY, &newval);
148 if (data[IFLA_BOND_DOWNDELAY]) {
149 int downdelay = nla_get_u32(data[IFLA_BOND_DOWNDELAY]);
151 bond_opt_initval(&newval, downdelay);
152 err = __bond_opt_set(bond, BOND_OPT_DOWNDELAY, &newval);
156 if (data[IFLA_BOND_USE_CARRIER]) {
157 int use_carrier = nla_get_u8(data[IFLA_BOND_USE_CARRIER]);
159 bond_opt_initval(&newval, use_carrier);
160 err = __bond_opt_set(bond, BOND_OPT_USE_CARRIER, &newval);
164 if (data[IFLA_BOND_ARP_INTERVAL]) {
165 int arp_interval = nla_get_u32(data[IFLA_BOND_ARP_INTERVAL]);
167 if (arp_interval && miimon) {
168 pr_err("%s: ARP monitoring cannot be used with MII monitoring.\n",
173 bond_opt_initval(&newval, arp_interval);
174 err = __bond_opt_set(bond, BOND_OPT_ARP_INTERVAL, &newval);
178 if (data[IFLA_BOND_ARP_IP_TARGET]) {
182 bond_option_arp_ip_targets_clear(bond);
183 nla_for_each_nested(attr, data[IFLA_BOND_ARP_IP_TARGET], rem) {
184 __be32 target = nla_get_be32(attr);
186 bond_opt_initval(&newval, target);
187 err = __bond_opt_set(bond, BOND_OPT_ARP_TARGETS,
193 if (i == 0 && bond->params.arp_interval)
194 pr_warn("%s: removing last arp target with arp_interval on\n",
199 if (data[IFLA_BOND_ARP_VALIDATE]) {
200 int arp_validate = nla_get_u32(data[IFLA_BOND_ARP_VALIDATE]);
202 if (arp_validate && miimon) {
203 pr_err("%s: ARP validating cannot be used with MII monitoring.\n",
208 bond_opt_initval(&newval, arp_validate);
209 err = __bond_opt_set(bond, BOND_OPT_ARP_VALIDATE, &newval);
213 if (data[IFLA_BOND_ARP_ALL_TARGETS]) {
214 int arp_all_targets =
215 nla_get_u32(data[IFLA_BOND_ARP_ALL_TARGETS]);
217 bond_opt_initval(&newval, arp_all_targets);
218 err = __bond_opt_set(bond, BOND_OPT_ARP_ALL_TARGETS, &newval);
222 if (data[IFLA_BOND_PRIMARY]) {
223 int ifindex = nla_get_u32(data[IFLA_BOND_PRIMARY]);
224 struct net_device *dev;
227 dev = __dev_get_by_index(dev_net(bond_dev), ifindex);
231 bond_opt_initstr(&newval, primary);
232 err = __bond_opt_set(bond, BOND_OPT_PRIMARY, &newval);
236 if (data[IFLA_BOND_PRIMARY_RESELECT]) {
237 int primary_reselect =
238 nla_get_u8(data[IFLA_BOND_PRIMARY_RESELECT]);
240 bond_opt_initval(&newval, primary_reselect);
241 err = __bond_opt_set(bond, BOND_OPT_PRIMARY_RESELECT, &newval);
245 if (data[IFLA_BOND_FAIL_OVER_MAC]) {
247 nla_get_u8(data[IFLA_BOND_FAIL_OVER_MAC]);
249 bond_opt_initval(&newval, fail_over_mac);
250 err = __bond_opt_set(bond, BOND_OPT_FAIL_OVER_MAC, &newval);
254 if (data[IFLA_BOND_XMIT_HASH_POLICY]) {
255 int xmit_hash_policy =
256 nla_get_u8(data[IFLA_BOND_XMIT_HASH_POLICY]);
258 bond_opt_initval(&newval, xmit_hash_policy);
259 err = __bond_opt_set(bond, BOND_OPT_XMIT_HASH, &newval);
263 if (data[IFLA_BOND_RESEND_IGMP]) {
265 nla_get_u32(data[IFLA_BOND_RESEND_IGMP]);
267 err = bond_option_resend_igmp_set(bond, resend_igmp);
271 if (data[IFLA_BOND_NUM_PEER_NOTIF]) {
273 nla_get_u8(data[IFLA_BOND_NUM_PEER_NOTIF]);
275 bond_opt_initval(&newval, num_peer_notif);
276 err = __bond_opt_set(bond, BOND_OPT_NUM_PEER_NOTIF, &newval);
280 if (data[IFLA_BOND_ALL_SLAVES_ACTIVE]) {
281 int all_slaves_active =
282 nla_get_u8(data[IFLA_BOND_ALL_SLAVES_ACTIVE]);
284 err = bond_option_all_slaves_active_set(bond,
289 if (data[IFLA_BOND_MIN_LINKS]) {
291 nla_get_u32(data[IFLA_BOND_MIN_LINKS]);
293 bond_opt_initval(&newval, min_links);
294 err = __bond_opt_set(bond, BOND_OPT_MINLINKS, &newval);
298 if (data[IFLA_BOND_LP_INTERVAL]) {
300 nla_get_u32(data[IFLA_BOND_LP_INTERVAL]);
302 err = bond_option_lp_interval_set(bond, lp_interval);
306 if (data[IFLA_BOND_PACKETS_PER_SLAVE]) {
307 int packets_per_slave =
308 nla_get_u32(data[IFLA_BOND_PACKETS_PER_SLAVE]);
310 bond_opt_initval(&newval, packets_per_slave);
311 err = __bond_opt_set(bond, BOND_OPT_PACKETS_PER_SLAVE, &newval);
315 if (data[IFLA_BOND_AD_LACP_RATE]) {
317 nla_get_u8(data[IFLA_BOND_AD_LACP_RATE]);
319 bond_opt_initval(&newval, lacp_rate);
320 err = __bond_opt_set(bond, BOND_OPT_LACP_RATE, &newval);
324 if (data[IFLA_BOND_AD_SELECT]) {
326 nla_get_u8(data[IFLA_BOND_AD_SELECT]);
328 bond_opt_initval(&newval, ad_select);
329 err = __bond_opt_set(bond, BOND_OPT_AD_SELECT, &newval);
336 static int bond_newlink(struct net *src_net, struct net_device *bond_dev,
337 struct nlattr *tb[], struct nlattr *data[])
341 err = bond_changelink(bond_dev, tb, data);
345 return register_netdevice(bond_dev);
348 static size_t bond_get_size(const struct net_device *bond_dev)
350 return nla_total_size(sizeof(u8)) + /* IFLA_BOND_MODE */
351 nla_total_size(sizeof(u32)) + /* IFLA_BOND_ACTIVE_SLAVE */
352 nla_total_size(sizeof(u32)) + /* IFLA_BOND_MIIMON */
353 nla_total_size(sizeof(u32)) + /* IFLA_BOND_UPDELAY */
354 nla_total_size(sizeof(u32)) + /* IFLA_BOND_DOWNDELAY */
355 nla_total_size(sizeof(u8)) + /* IFLA_BOND_USE_CARRIER */
356 nla_total_size(sizeof(u32)) + /* IFLA_BOND_ARP_INTERVAL */
357 /* IFLA_BOND_ARP_IP_TARGET */
358 nla_total_size(sizeof(struct nlattr)) +
359 nla_total_size(sizeof(u32)) * BOND_MAX_ARP_TARGETS +
360 nla_total_size(sizeof(u32)) + /* IFLA_BOND_ARP_VALIDATE */
361 nla_total_size(sizeof(u32)) + /* IFLA_BOND_ARP_ALL_TARGETS */
362 nla_total_size(sizeof(u32)) + /* IFLA_BOND_PRIMARY */
363 nla_total_size(sizeof(u8)) + /* IFLA_BOND_PRIMARY_RESELECT */
364 nla_total_size(sizeof(u8)) + /* IFLA_BOND_FAIL_OVER_MAC */
365 nla_total_size(sizeof(u8)) + /* IFLA_BOND_XMIT_HASH_POLICY */
366 nla_total_size(sizeof(u32)) + /* IFLA_BOND_RESEND_IGMP */
367 nla_total_size(sizeof(u8)) + /* IFLA_BOND_NUM_PEER_NOTIF */
368 nla_total_size(sizeof(u8)) + /* IFLA_BOND_ALL_SLAVES_ACTIVE */
369 nla_total_size(sizeof(u32)) + /* IFLA_BOND_MIN_LINKS */
370 nla_total_size(sizeof(u32)) + /* IFLA_BOND_LP_INTERVAL */
371 nla_total_size(sizeof(u32)) + /* IFLA_BOND_PACKETS_PER_SLAVE */
372 nla_total_size(sizeof(u8)) + /* IFLA_BOND_AD_LACP_RATE */
373 nla_total_size(sizeof(u8)) + /* IFLA_BOND_AD_SELECT */
374 nla_total_size(sizeof(struct nlattr)) + /* IFLA_BOND_AD_INFO */
375 nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_AGGREGATOR */
376 nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_NUM_PORTS */
377 nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_ACTOR_KEY */
378 nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_PARTNER_KEY*/
379 nla_total_size(ETH_ALEN) + /* IFLA_BOND_AD_INFO_PARTNER_MAC*/
383 static int bond_fill_info(struct sk_buff *skb,
384 const struct net_device *bond_dev)
386 struct bonding *bond = netdev_priv(bond_dev);
387 struct net_device *slave_dev = bond_option_active_slave_get(bond);
388 struct nlattr *targets;
389 unsigned int packets_per_slave;
390 int i, targets_added;
392 if (nla_put_u8(skb, IFLA_BOND_MODE, bond->params.mode))
393 goto nla_put_failure;
396 nla_put_u32(skb, IFLA_BOND_ACTIVE_SLAVE, slave_dev->ifindex))
397 goto nla_put_failure;
399 if (nla_put_u32(skb, IFLA_BOND_MIIMON, bond->params.miimon))
400 goto nla_put_failure;
402 if (nla_put_u32(skb, IFLA_BOND_UPDELAY,
403 bond->params.updelay * bond->params.miimon))
404 goto nla_put_failure;
406 if (nla_put_u32(skb, IFLA_BOND_DOWNDELAY,
407 bond->params.downdelay * bond->params.miimon))
408 goto nla_put_failure;
410 if (nla_put_u8(skb, IFLA_BOND_USE_CARRIER, bond->params.use_carrier))
411 goto nla_put_failure;
413 if (nla_put_u32(skb, IFLA_BOND_ARP_INTERVAL, bond->params.arp_interval))
414 goto nla_put_failure;
416 targets = nla_nest_start(skb, IFLA_BOND_ARP_IP_TARGET);
418 goto nla_put_failure;
421 for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) {
422 if (bond->params.arp_targets[i]) {
423 nla_put_be32(skb, i, bond->params.arp_targets[i]);
429 nla_nest_end(skb, targets);
431 nla_nest_cancel(skb, targets);
433 if (nla_put_u32(skb, IFLA_BOND_ARP_VALIDATE, bond->params.arp_validate))
434 goto nla_put_failure;
436 if (nla_put_u32(skb, IFLA_BOND_ARP_ALL_TARGETS,
437 bond->params.arp_all_targets))
438 goto nla_put_failure;
440 if (bond->primary_slave &&
441 nla_put_u32(skb, IFLA_BOND_PRIMARY,
442 bond->primary_slave->dev->ifindex))
443 goto nla_put_failure;
445 if (nla_put_u8(skb, IFLA_BOND_PRIMARY_RESELECT,
446 bond->params.primary_reselect))
447 goto nla_put_failure;
449 if (nla_put_u8(skb, IFLA_BOND_FAIL_OVER_MAC,
450 bond->params.fail_over_mac))
451 goto nla_put_failure;
453 if (nla_put_u8(skb, IFLA_BOND_XMIT_HASH_POLICY,
454 bond->params.xmit_policy))
455 goto nla_put_failure;
457 if (nla_put_u32(skb, IFLA_BOND_RESEND_IGMP,
458 bond->params.resend_igmp))
459 goto nla_put_failure;
461 if (nla_put_u8(skb, IFLA_BOND_NUM_PEER_NOTIF,
462 bond->params.num_peer_notif))
463 goto nla_put_failure;
465 if (nla_put_u8(skb, IFLA_BOND_ALL_SLAVES_ACTIVE,
466 bond->params.all_slaves_active))
467 goto nla_put_failure;
469 if (nla_put_u32(skb, IFLA_BOND_MIN_LINKS,
470 bond->params.min_links))
471 goto nla_put_failure;
473 if (nla_put_u32(skb, IFLA_BOND_LP_INTERVAL,
474 bond->params.lp_interval))
475 goto nla_put_failure;
477 packets_per_slave = bond->params.packets_per_slave;
478 if (nla_put_u32(skb, IFLA_BOND_PACKETS_PER_SLAVE,
480 goto nla_put_failure;
482 if (nla_put_u8(skb, IFLA_BOND_AD_LACP_RATE,
483 bond->params.lacp_fast))
484 goto nla_put_failure;
486 if (nla_put_u8(skb, IFLA_BOND_AD_SELECT,
487 bond->params.ad_select))
488 goto nla_put_failure;
490 if (bond->params.mode == BOND_MODE_8023AD) {
493 if (!bond_3ad_get_active_agg_info(bond, &info)) {
496 nest = nla_nest_start(skb, IFLA_BOND_AD_INFO);
498 goto nla_put_failure;
500 if (nla_put_u16(skb, IFLA_BOND_AD_INFO_AGGREGATOR,
502 goto nla_put_failure;
503 if (nla_put_u16(skb, IFLA_BOND_AD_INFO_NUM_PORTS,
505 goto nla_put_failure;
506 if (nla_put_u16(skb, IFLA_BOND_AD_INFO_ACTOR_KEY,
508 goto nla_put_failure;
509 if (nla_put_u16(skb, IFLA_BOND_AD_INFO_PARTNER_KEY,
511 goto nla_put_failure;
512 if (nla_put(skb, IFLA_BOND_AD_INFO_PARTNER_MAC,
513 sizeof(info.partner_system),
514 &info.partner_system))
515 goto nla_put_failure;
517 nla_nest_end(skb, nest);
527 struct rtnl_link_ops bond_link_ops __read_mostly = {
529 .priv_size = sizeof(struct bonding),
531 .maxtype = IFLA_BOND_MAX,
532 .policy = bond_policy,
533 .validate = bond_validate,
534 .newlink = bond_newlink,
535 .changelink = bond_changelink,
536 .get_size = bond_get_size,
537 .fill_info = bond_fill_info,
538 .get_num_tx_queues = bond_get_num_tx_queues,
539 .get_num_rx_queues = bond_get_num_tx_queues, /* Use the same number
543 int __init bond_netlink_init(void)
545 return rtnl_link_register(&bond_link_ops);
548 void bond_netlink_fini(void)
550 rtnl_link_unregister(&bond_link_ops);
553 MODULE_ALIAS_RTNL_LINK("bond");