2 * drivers/net/bond/bond_netlink.c - Netlink interface for bonding
3 * Copyright (c) 2013 Jiri Pirko <jiri@resnulli.us>
4 * Copyright (c) 2013 Scott Feldman <sfeldma@cumulusnetworks.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/module.h>
15 #include <linux/errno.h>
16 #include <linux/netdevice.h>
17 #include <linux/etherdevice.h>
18 #include <linux/if_link.h>
19 #include <linux/if_ether.h>
20 #include <net/netlink.h>
21 #include <net/rtnetlink.h>
24 int bond_get_slave(struct net_device *slave_dev, struct sk_buff *skb)
26 struct slave *slave = bond_slave_get_rtnl(slave_dev);
27 const struct aggregator *agg;
29 if (nla_put_u8(skb, IFLA_SLAVE_STATE, bond_slave_state(slave)))
32 if (nla_put_u8(skb, IFLA_SLAVE_MII_STATUS, slave->link))
35 if (nla_put_u32(skb, IFLA_SLAVE_LINK_FAILURE_COUNT,
36 slave->link_failure_count))
39 if (nla_put(skb, IFLA_SLAVE_PERM_HWADDR,
40 slave_dev->addr_len, slave->perm_hwaddr))
43 if (nla_put_u16(skb, IFLA_SLAVE_QUEUE_ID, slave->queue_id))
46 if (slave->bond->params.mode == BOND_MODE_8023AD) {
47 agg = SLAVE_AD_INFO(slave).port.aggregator;
49 if (nla_put_u16(skb, IFLA_SLAVE_AD_AGGREGATOR_ID,
50 agg->aggregator_identifier))
60 static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
61 [IFLA_BOND_MODE] = { .type = NLA_U8 },
62 [IFLA_BOND_ACTIVE_SLAVE] = { .type = NLA_U32 },
63 [IFLA_BOND_MIIMON] = { .type = NLA_U32 },
64 [IFLA_BOND_UPDELAY] = { .type = NLA_U32 },
65 [IFLA_BOND_DOWNDELAY] = { .type = NLA_U32 },
66 [IFLA_BOND_USE_CARRIER] = { .type = NLA_U8 },
67 [IFLA_BOND_ARP_INTERVAL] = { .type = NLA_U32 },
68 [IFLA_BOND_ARP_IP_TARGET] = { .type = NLA_NESTED },
69 [IFLA_BOND_ARP_VALIDATE] = { .type = NLA_U32 },
70 [IFLA_BOND_ARP_ALL_TARGETS] = { .type = NLA_U32 },
71 [IFLA_BOND_PRIMARY] = { .type = NLA_U32 },
72 [IFLA_BOND_PRIMARY_RESELECT] = { .type = NLA_U8 },
73 [IFLA_BOND_FAIL_OVER_MAC] = { .type = NLA_U8 },
74 [IFLA_BOND_XMIT_HASH_POLICY] = { .type = NLA_U8 },
75 [IFLA_BOND_RESEND_IGMP] = { .type = NLA_U32 },
76 [IFLA_BOND_NUM_PEER_NOTIF] = { .type = NLA_U8 },
77 [IFLA_BOND_ALL_SLAVES_ACTIVE] = { .type = NLA_U8 },
78 [IFLA_BOND_MIN_LINKS] = { .type = NLA_U32 },
79 [IFLA_BOND_LP_INTERVAL] = { .type = NLA_U32 },
80 [IFLA_BOND_PACKETS_PER_SLAVE] = { .type = NLA_U32 },
81 [IFLA_BOND_AD_LACP_RATE] = { .type = NLA_U8 },
82 [IFLA_BOND_AD_SELECT] = { .type = NLA_U8 },
83 [IFLA_BOND_AD_INFO] = { .type = NLA_NESTED },
86 static int bond_validate(struct nlattr *tb[], struct nlattr *data[])
88 if (tb[IFLA_ADDRESS]) {
89 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
91 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
92 return -EADDRNOTAVAIL;
97 static int bond_changelink(struct net_device *bond_dev,
98 struct nlattr *tb[], struct nlattr *data[])
100 struct bonding *bond = netdev_priv(bond_dev);
101 struct bond_opt_value newval;
108 if (data[IFLA_BOND_MODE]) {
109 int mode = nla_get_u8(data[IFLA_BOND_MODE]);
111 bond_opt_initval(&newval, mode);
112 err = __bond_opt_set(bond, BOND_OPT_MODE, &newval);
116 if (data[IFLA_BOND_ACTIVE_SLAVE]) {
117 int ifindex = nla_get_u32(data[IFLA_BOND_ACTIVE_SLAVE]);
118 struct net_device *slave_dev;
123 slave_dev = __dev_get_by_index(dev_net(bond_dev),
128 err = bond_option_active_slave_set(bond, slave_dev);
132 if (data[IFLA_BOND_MIIMON]) {
133 miimon = nla_get_u32(data[IFLA_BOND_MIIMON]);
135 bond_opt_initval(&newval, miimon);
136 err = __bond_opt_set(bond, BOND_OPT_MIIMON, &newval);
140 if (data[IFLA_BOND_UPDELAY]) {
141 int updelay = nla_get_u32(data[IFLA_BOND_UPDELAY]);
143 bond_opt_initval(&newval, updelay);
144 err = __bond_opt_set(bond, BOND_OPT_UPDELAY, &newval);
148 if (data[IFLA_BOND_DOWNDELAY]) {
149 int downdelay = nla_get_u32(data[IFLA_BOND_DOWNDELAY]);
151 bond_opt_initval(&newval, downdelay);
152 err = __bond_opt_set(bond, BOND_OPT_DOWNDELAY, &newval);
156 if (data[IFLA_BOND_USE_CARRIER]) {
157 int use_carrier = nla_get_u8(data[IFLA_BOND_USE_CARRIER]);
159 err = bond_option_use_carrier_set(bond, use_carrier);
163 if (data[IFLA_BOND_ARP_INTERVAL]) {
164 int arp_interval = nla_get_u32(data[IFLA_BOND_ARP_INTERVAL]);
166 if (arp_interval && miimon) {
167 pr_err("%s: ARP monitoring cannot be used with MII monitoring.\n",
172 bond_opt_initval(&newval, arp_interval);
173 err = __bond_opt_set(bond, BOND_OPT_ARP_INTERVAL, &newval);
177 if (data[IFLA_BOND_ARP_IP_TARGET]) {
181 bond_option_arp_ip_targets_clear(bond);
182 nla_for_each_nested(attr, data[IFLA_BOND_ARP_IP_TARGET], rem) {
183 __be32 target = nla_get_be32(attr);
185 bond_opt_initval(&newval, target);
186 err = __bond_opt_set(bond, BOND_OPT_ARP_TARGETS,
192 if (i == 0 && bond->params.arp_interval)
193 pr_warn("%s: removing last arp target with arp_interval on\n",
198 if (data[IFLA_BOND_ARP_VALIDATE]) {
199 int arp_validate = nla_get_u32(data[IFLA_BOND_ARP_VALIDATE]);
201 if (arp_validate && miimon) {
202 pr_err("%s: ARP validating cannot be used with MII monitoring.\n",
207 bond_opt_initval(&newval, arp_validate);
208 err = __bond_opt_set(bond, BOND_OPT_ARP_VALIDATE, &newval);
212 if (data[IFLA_BOND_ARP_ALL_TARGETS]) {
213 int arp_all_targets =
214 nla_get_u32(data[IFLA_BOND_ARP_ALL_TARGETS]);
216 bond_opt_initval(&newval, arp_all_targets);
217 err = __bond_opt_set(bond, BOND_OPT_ARP_ALL_TARGETS, &newval);
221 if (data[IFLA_BOND_PRIMARY]) {
222 int ifindex = nla_get_u32(data[IFLA_BOND_PRIMARY]);
223 struct net_device *dev;
226 dev = __dev_get_by_index(dev_net(bond_dev), ifindex);
230 bond_opt_initstr(&newval, primary);
231 err = __bond_opt_set(bond, BOND_OPT_PRIMARY, &newval);
235 if (data[IFLA_BOND_PRIMARY_RESELECT]) {
236 int primary_reselect =
237 nla_get_u8(data[IFLA_BOND_PRIMARY_RESELECT]);
239 err = bond_option_primary_reselect_set(bond, primary_reselect);
243 if (data[IFLA_BOND_FAIL_OVER_MAC]) {
245 nla_get_u8(data[IFLA_BOND_FAIL_OVER_MAC]);
247 bond_opt_initval(&newval, fail_over_mac);
248 err = __bond_opt_set(bond, BOND_OPT_FAIL_OVER_MAC, &newval);
252 if (data[IFLA_BOND_XMIT_HASH_POLICY]) {
253 int xmit_hash_policy =
254 nla_get_u8(data[IFLA_BOND_XMIT_HASH_POLICY]);
256 bond_opt_initval(&newval, xmit_hash_policy);
257 err = __bond_opt_set(bond, BOND_OPT_XMIT_HASH, &newval);
261 if (data[IFLA_BOND_RESEND_IGMP]) {
263 nla_get_u32(data[IFLA_BOND_RESEND_IGMP]);
265 err = bond_option_resend_igmp_set(bond, resend_igmp);
269 if (data[IFLA_BOND_NUM_PEER_NOTIF]) {
271 nla_get_u8(data[IFLA_BOND_NUM_PEER_NOTIF]);
273 bond_opt_initval(&newval, num_peer_notif);
274 err = __bond_opt_set(bond, BOND_OPT_NUM_PEER_NOTIF, &newval);
278 if (data[IFLA_BOND_ALL_SLAVES_ACTIVE]) {
279 int all_slaves_active =
280 nla_get_u8(data[IFLA_BOND_ALL_SLAVES_ACTIVE]);
282 err = bond_option_all_slaves_active_set(bond,
287 if (data[IFLA_BOND_MIN_LINKS]) {
289 nla_get_u32(data[IFLA_BOND_MIN_LINKS]);
291 bond_opt_initval(&newval, min_links);
292 err = __bond_opt_set(bond, BOND_OPT_MINLINKS, &newval);
296 if (data[IFLA_BOND_LP_INTERVAL]) {
298 nla_get_u32(data[IFLA_BOND_LP_INTERVAL]);
300 err = bond_option_lp_interval_set(bond, lp_interval);
304 if (data[IFLA_BOND_PACKETS_PER_SLAVE]) {
305 int packets_per_slave =
306 nla_get_u32(data[IFLA_BOND_PACKETS_PER_SLAVE]);
308 bond_opt_initval(&newval, packets_per_slave);
309 err = __bond_opt_set(bond, BOND_OPT_PACKETS_PER_SLAVE, &newval);
313 if (data[IFLA_BOND_AD_LACP_RATE]) {
315 nla_get_u8(data[IFLA_BOND_AD_LACP_RATE]);
317 bond_opt_initval(&newval, lacp_rate);
318 err = __bond_opt_set(bond, BOND_OPT_LACP_RATE, &newval);
322 if (data[IFLA_BOND_AD_SELECT]) {
324 nla_get_u8(data[IFLA_BOND_AD_SELECT]);
326 bond_opt_initval(&newval, ad_select);
327 err = __bond_opt_set(bond, BOND_OPT_AD_SELECT, &newval);
334 static int bond_newlink(struct net *src_net, struct net_device *bond_dev,
335 struct nlattr *tb[], struct nlattr *data[])
339 err = bond_changelink(bond_dev, tb, data);
343 return register_netdevice(bond_dev);
346 static size_t bond_get_size(const struct net_device *bond_dev)
348 return nla_total_size(sizeof(u8)) + /* IFLA_BOND_MODE */
349 nla_total_size(sizeof(u32)) + /* IFLA_BOND_ACTIVE_SLAVE */
350 nla_total_size(sizeof(u32)) + /* IFLA_BOND_MIIMON */
351 nla_total_size(sizeof(u32)) + /* IFLA_BOND_UPDELAY */
352 nla_total_size(sizeof(u32)) + /* IFLA_BOND_DOWNDELAY */
353 nla_total_size(sizeof(u8)) + /* IFLA_BOND_USE_CARRIER */
354 nla_total_size(sizeof(u32)) + /* IFLA_BOND_ARP_INTERVAL */
355 /* IFLA_BOND_ARP_IP_TARGET */
356 nla_total_size(sizeof(struct nlattr)) +
357 nla_total_size(sizeof(u32)) * BOND_MAX_ARP_TARGETS +
358 nla_total_size(sizeof(u32)) + /* IFLA_BOND_ARP_VALIDATE */
359 nla_total_size(sizeof(u32)) + /* IFLA_BOND_ARP_ALL_TARGETS */
360 nla_total_size(sizeof(u32)) + /* IFLA_BOND_PRIMARY */
361 nla_total_size(sizeof(u8)) + /* IFLA_BOND_PRIMARY_RESELECT */
362 nla_total_size(sizeof(u8)) + /* IFLA_BOND_FAIL_OVER_MAC */
363 nla_total_size(sizeof(u8)) + /* IFLA_BOND_XMIT_HASH_POLICY */
364 nla_total_size(sizeof(u32)) + /* IFLA_BOND_RESEND_IGMP */
365 nla_total_size(sizeof(u8)) + /* IFLA_BOND_NUM_PEER_NOTIF */
366 nla_total_size(sizeof(u8)) + /* IFLA_BOND_ALL_SLAVES_ACTIVE */
367 nla_total_size(sizeof(u32)) + /* IFLA_BOND_MIN_LINKS */
368 nla_total_size(sizeof(u32)) + /* IFLA_BOND_LP_INTERVAL */
369 nla_total_size(sizeof(u32)) + /* IFLA_BOND_PACKETS_PER_SLAVE */
370 nla_total_size(sizeof(u8)) + /* IFLA_BOND_AD_LACP_RATE */
371 nla_total_size(sizeof(u8)) + /* IFLA_BOND_AD_SELECT */
372 nla_total_size(sizeof(struct nlattr)) + /* IFLA_BOND_AD_INFO */
373 nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_AGGREGATOR */
374 nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_NUM_PORTS */
375 nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_ACTOR_KEY */
376 nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_PARTNER_KEY*/
377 nla_total_size(ETH_ALEN) + /* IFLA_BOND_AD_INFO_PARTNER_MAC*/
381 static int bond_fill_info(struct sk_buff *skb,
382 const struct net_device *bond_dev)
384 struct bonding *bond = netdev_priv(bond_dev);
385 struct net_device *slave_dev = bond_option_active_slave_get(bond);
386 struct nlattr *targets;
387 unsigned int packets_per_slave;
388 int i, targets_added;
390 if (nla_put_u8(skb, IFLA_BOND_MODE, bond->params.mode))
391 goto nla_put_failure;
394 nla_put_u32(skb, IFLA_BOND_ACTIVE_SLAVE, slave_dev->ifindex))
395 goto nla_put_failure;
397 if (nla_put_u32(skb, IFLA_BOND_MIIMON, bond->params.miimon))
398 goto nla_put_failure;
400 if (nla_put_u32(skb, IFLA_BOND_UPDELAY,
401 bond->params.updelay * bond->params.miimon))
402 goto nla_put_failure;
404 if (nla_put_u32(skb, IFLA_BOND_DOWNDELAY,
405 bond->params.downdelay * bond->params.miimon))
406 goto nla_put_failure;
408 if (nla_put_u8(skb, IFLA_BOND_USE_CARRIER, bond->params.use_carrier))
409 goto nla_put_failure;
411 if (nla_put_u32(skb, IFLA_BOND_ARP_INTERVAL, bond->params.arp_interval))
412 goto nla_put_failure;
414 targets = nla_nest_start(skb, IFLA_BOND_ARP_IP_TARGET);
416 goto nla_put_failure;
419 for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) {
420 if (bond->params.arp_targets[i]) {
421 nla_put_be32(skb, i, bond->params.arp_targets[i]);
427 nla_nest_end(skb, targets);
429 nla_nest_cancel(skb, targets);
431 if (nla_put_u32(skb, IFLA_BOND_ARP_VALIDATE, bond->params.arp_validate))
432 goto nla_put_failure;
434 if (nla_put_u32(skb, IFLA_BOND_ARP_ALL_TARGETS,
435 bond->params.arp_all_targets))
436 goto nla_put_failure;
438 if (bond->primary_slave &&
439 nla_put_u32(skb, IFLA_BOND_PRIMARY,
440 bond->primary_slave->dev->ifindex))
441 goto nla_put_failure;
443 if (nla_put_u8(skb, IFLA_BOND_PRIMARY_RESELECT,
444 bond->params.primary_reselect))
445 goto nla_put_failure;
447 if (nla_put_u8(skb, IFLA_BOND_FAIL_OVER_MAC,
448 bond->params.fail_over_mac))
449 goto nla_put_failure;
451 if (nla_put_u8(skb, IFLA_BOND_XMIT_HASH_POLICY,
452 bond->params.xmit_policy))
453 goto nla_put_failure;
455 if (nla_put_u32(skb, IFLA_BOND_RESEND_IGMP,
456 bond->params.resend_igmp))
457 goto nla_put_failure;
459 if (nla_put_u8(skb, IFLA_BOND_NUM_PEER_NOTIF,
460 bond->params.num_peer_notif))
461 goto nla_put_failure;
463 if (nla_put_u8(skb, IFLA_BOND_ALL_SLAVES_ACTIVE,
464 bond->params.all_slaves_active))
465 goto nla_put_failure;
467 if (nla_put_u32(skb, IFLA_BOND_MIN_LINKS,
468 bond->params.min_links))
469 goto nla_put_failure;
471 if (nla_put_u32(skb, IFLA_BOND_LP_INTERVAL,
472 bond->params.lp_interval))
473 goto nla_put_failure;
475 packets_per_slave = bond->params.packets_per_slave;
476 if (nla_put_u32(skb, IFLA_BOND_PACKETS_PER_SLAVE,
478 goto nla_put_failure;
480 if (nla_put_u8(skb, IFLA_BOND_AD_LACP_RATE,
481 bond->params.lacp_fast))
482 goto nla_put_failure;
484 if (nla_put_u8(skb, IFLA_BOND_AD_SELECT,
485 bond->params.ad_select))
486 goto nla_put_failure;
488 if (bond->params.mode == BOND_MODE_8023AD) {
491 if (!bond_3ad_get_active_agg_info(bond, &info)) {
494 nest = nla_nest_start(skb, IFLA_BOND_AD_INFO);
496 goto nla_put_failure;
498 if (nla_put_u16(skb, IFLA_BOND_AD_INFO_AGGREGATOR,
500 goto nla_put_failure;
501 if (nla_put_u16(skb, IFLA_BOND_AD_INFO_NUM_PORTS,
503 goto nla_put_failure;
504 if (nla_put_u16(skb, IFLA_BOND_AD_INFO_ACTOR_KEY,
506 goto nla_put_failure;
507 if (nla_put_u16(skb, IFLA_BOND_AD_INFO_PARTNER_KEY,
509 goto nla_put_failure;
510 if (nla_put(skb, IFLA_BOND_AD_INFO_PARTNER_MAC,
511 sizeof(info.partner_system),
512 &info.partner_system))
513 goto nla_put_failure;
515 nla_nest_end(skb, nest);
525 struct rtnl_link_ops bond_link_ops __read_mostly = {
527 .priv_size = sizeof(struct bonding),
529 .maxtype = IFLA_BOND_MAX,
530 .policy = bond_policy,
531 .validate = bond_validate,
532 .newlink = bond_newlink,
533 .changelink = bond_changelink,
534 .get_size = bond_get_size,
535 .fill_info = bond_fill_info,
536 .get_num_tx_queues = bond_get_num_tx_queues,
537 .get_num_rx_queues = bond_get_num_tx_queues, /* Use the same number
541 int __init bond_netlink_init(void)
543 return rtnl_link_register(&bond_link_ops);
546 void bond_netlink_fini(void)
548 rtnl_link_unregister(&bond_link_ops);
551 MODULE_ALIAS_RTNL_LINK("bond");