2 * drivers/net/bond/bond_netlink.c - Netlink interface for bonding
3 * Copyright (c) 2013 Jiri Pirko <jiri@resnulli.us>
4 * Copyright (c) 2013 Scott Feldman <sfeldma@cumulusnetworks.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/module.h>
15 #include <linux/errno.h>
16 #include <linux/netdevice.h>
17 #include <linux/etherdevice.h>
18 #include <linux/if_link.h>
19 #include <linux/if_ether.h>
20 #include <net/netlink.h>
21 #include <net/rtnetlink.h>
24 int bond_get_slave(struct net_device *slave_dev, struct sk_buff *skb)
26 struct slave *slave = bond_slave_get_rtnl(slave_dev);
27 const struct aggregator *agg;
29 if (nla_put_u8(skb, IFLA_SLAVE_STATE, bond_slave_state(slave)))
32 if (nla_put_u8(skb, IFLA_SLAVE_MII_STATUS, slave->link))
35 if (nla_put_u32(skb, IFLA_SLAVE_LINK_FAILURE_COUNT,
36 slave->link_failure_count))
39 if (nla_put(skb, IFLA_SLAVE_PERM_HWADDR,
40 slave_dev->addr_len, slave->perm_hwaddr))
43 if (nla_put_u16(skb, IFLA_SLAVE_QUEUE_ID, slave->queue_id))
46 if (slave->bond->params.mode == BOND_MODE_8023AD) {
47 agg = SLAVE_AD_INFO(slave).port.aggregator;
49 if (nla_put_u16(skb, IFLA_SLAVE_AD_AGGREGATOR_ID,
50 agg->aggregator_identifier))
60 static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
61 [IFLA_BOND_MODE] = { .type = NLA_U8 },
62 [IFLA_BOND_ACTIVE_SLAVE] = { .type = NLA_U32 },
63 [IFLA_BOND_MIIMON] = { .type = NLA_U32 },
64 [IFLA_BOND_UPDELAY] = { .type = NLA_U32 },
65 [IFLA_BOND_DOWNDELAY] = { .type = NLA_U32 },
66 [IFLA_BOND_USE_CARRIER] = { .type = NLA_U8 },
67 [IFLA_BOND_ARP_INTERVAL] = { .type = NLA_U32 },
68 [IFLA_BOND_ARP_IP_TARGET] = { .type = NLA_NESTED },
69 [IFLA_BOND_ARP_VALIDATE] = { .type = NLA_U32 },
70 [IFLA_BOND_ARP_ALL_TARGETS] = { .type = NLA_U32 },
71 [IFLA_BOND_PRIMARY] = { .type = NLA_U32 },
72 [IFLA_BOND_PRIMARY_RESELECT] = { .type = NLA_U8 },
73 [IFLA_BOND_FAIL_OVER_MAC] = { .type = NLA_U8 },
74 [IFLA_BOND_XMIT_HASH_POLICY] = { .type = NLA_U8 },
75 [IFLA_BOND_RESEND_IGMP] = { .type = NLA_U32 },
76 [IFLA_BOND_NUM_PEER_NOTIF] = { .type = NLA_U8 },
77 [IFLA_BOND_ALL_SLAVES_ACTIVE] = { .type = NLA_U8 },
78 [IFLA_BOND_MIN_LINKS] = { .type = NLA_U32 },
79 [IFLA_BOND_LP_INTERVAL] = { .type = NLA_U32 },
80 [IFLA_BOND_PACKETS_PER_SLAVE] = { .type = NLA_U32 },
81 [IFLA_BOND_AD_LACP_RATE] = { .type = NLA_U8 },
82 [IFLA_BOND_AD_SELECT] = { .type = NLA_U8 },
83 [IFLA_BOND_AD_INFO] = { .type = NLA_NESTED },
86 static int bond_validate(struct nlattr *tb[], struct nlattr *data[])
88 if (tb[IFLA_ADDRESS]) {
89 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
91 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
92 return -EADDRNOTAVAIL;
97 static int bond_changelink(struct net_device *bond_dev,
98 struct nlattr *tb[], struct nlattr *data[])
100 struct bonding *bond = netdev_priv(bond_dev);
101 struct bond_opt_value newval;
108 if (data[IFLA_BOND_MODE]) {
109 int mode = nla_get_u8(data[IFLA_BOND_MODE]);
111 bond_opt_initval(&newval, mode);
112 err = __bond_opt_set(bond, BOND_OPT_MODE, &newval);
116 if (data[IFLA_BOND_ACTIVE_SLAVE]) {
117 int ifindex = nla_get_u32(data[IFLA_BOND_ACTIVE_SLAVE]);
118 struct net_device *slave_dev;
123 slave_dev = __dev_get_by_index(dev_net(bond_dev),
128 err = bond_option_active_slave_set(bond, slave_dev);
132 if (data[IFLA_BOND_MIIMON]) {
133 miimon = nla_get_u32(data[IFLA_BOND_MIIMON]);
135 err = bond_option_miimon_set(bond, miimon);
139 if (data[IFLA_BOND_UPDELAY]) {
140 int updelay = nla_get_u32(data[IFLA_BOND_UPDELAY]);
142 err = bond_option_updelay_set(bond, updelay);
146 if (data[IFLA_BOND_DOWNDELAY]) {
147 int downdelay = nla_get_u32(data[IFLA_BOND_DOWNDELAY]);
149 err = bond_option_downdelay_set(bond, downdelay);
153 if (data[IFLA_BOND_USE_CARRIER]) {
154 int use_carrier = nla_get_u8(data[IFLA_BOND_USE_CARRIER]);
156 err = bond_option_use_carrier_set(bond, use_carrier);
160 if (data[IFLA_BOND_ARP_INTERVAL]) {
161 int arp_interval = nla_get_u32(data[IFLA_BOND_ARP_INTERVAL]);
163 if (arp_interval && miimon) {
164 pr_err("%s: ARP monitoring cannot be used with MII monitoring.\n",
169 err = bond_option_arp_interval_set(bond, arp_interval);
173 if (data[IFLA_BOND_ARP_IP_TARGET]) {
174 __be32 targets[BOND_MAX_ARP_TARGETS] = { 0, };
178 nla_for_each_nested(attr, data[IFLA_BOND_ARP_IP_TARGET], rem) {
179 __be32 target = nla_get_be32(attr);
180 targets[i++] = target;
183 err = bond_option_arp_ip_targets_set(bond, targets, i);
187 if (data[IFLA_BOND_ARP_VALIDATE]) {
188 int arp_validate = nla_get_u32(data[IFLA_BOND_ARP_VALIDATE]);
190 if (arp_validate && miimon) {
191 pr_err("%s: ARP validating cannot be used with MII monitoring.\n",
196 bond_opt_initval(&newval, arp_validate);
197 err = __bond_opt_set(bond, BOND_OPT_ARP_VALIDATE, &newval);
201 if (data[IFLA_BOND_ARP_ALL_TARGETS]) {
202 int arp_all_targets =
203 nla_get_u32(data[IFLA_BOND_ARP_ALL_TARGETS]);
205 err = bond_option_arp_all_targets_set(bond, arp_all_targets);
209 if (data[IFLA_BOND_PRIMARY]) {
210 int ifindex = nla_get_u32(data[IFLA_BOND_PRIMARY]);
211 struct net_device *dev;
214 dev = __dev_get_by_index(dev_net(bond_dev), ifindex);
218 err = bond_option_primary_set(bond, primary);
222 if (data[IFLA_BOND_PRIMARY_RESELECT]) {
223 int primary_reselect =
224 nla_get_u8(data[IFLA_BOND_PRIMARY_RESELECT]);
226 err = bond_option_primary_reselect_set(bond, primary_reselect);
230 if (data[IFLA_BOND_FAIL_OVER_MAC]) {
232 nla_get_u8(data[IFLA_BOND_FAIL_OVER_MAC]);
234 err = bond_option_fail_over_mac_set(bond, fail_over_mac);
238 if (data[IFLA_BOND_XMIT_HASH_POLICY]) {
239 int xmit_hash_policy =
240 nla_get_u8(data[IFLA_BOND_XMIT_HASH_POLICY]);
242 bond_opt_initval(&newval, xmit_hash_policy);
243 err = __bond_opt_set(bond, BOND_OPT_XMIT_HASH, &newval);
247 if (data[IFLA_BOND_RESEND_IGMP]) {
249 nla_get_u32(data[IFLA_BOND_RESEND_IGMP]);
251 err = bond_option_resend_igmp_set(bond, resend_igmp);
255 if (data[IFLA_BOND_NUM_PEER_NOTIF]) {
257 nla_get_u8(data[IFLA_BOND_NUM_PEER_NOTIF]);
259 err = bond_option_num_peer_notif_set(bond, num_peer_notif);
263 if (data[IFLA_BOND_ALL_SLAVES_ACTIVE]) {
264 int all_slaves_active =
265 nla_get_u8(data[IFLA_BOND_ALL_SLAVES_ACTIVE]);
267 err = bond_option_all_slaves_active_set(bond,
272 if (data[IFLA_BOND_MIN_LINKS]) {
274 nla_get_u32(data[IFLA_BOND_MIN_LINKS]);
276 err = bond_option_min_links_set(bond, min_links);
280 if (data[IFLA_BOND_LP_INTERVAL]) {
282 nla_get_u32(data[IFLA_BOND_LP_INTERVAL]);
284 err = bond_option_lp_interval_set(bond, lp_interval);
288 if (data[IFLA_BOND_PACKETS_PER_SLAVE]) {
289 int packets_per_slave =
290 nla_get_u32(data[IFLA_BOND_PACKETS_PER_SLAVE]);
292 bond_opt_initval(&newval, packets_per_slave);
293 err = __bond_opt_set(bond, BOND_OPT_PACKETS_PER_SLAVE, &newval);
297 if (data[IFLA_BOND_AD_LACP_RATE]) {
299 nla_get_u8(data[IFLA_BOND_AD_LACP_RATE]);
301 err = bond_option_lacp_rate_set(bond, lacp_rate);
305 if (data[IFLA_BOND_AD_SELECT]) {
307 nla_get_u8(data[IFLA_BOND_AD_SELECT]);
309 err = bond_option_ad_select_set(bond, ad_select);
316 static int bond_newlink(struct net *src_net, struct net_device *bond_dev,
317 struct nlattr *tb[], struct nlattr *data[])
321 err = bond_changelink(bond_dev, tb, data);
325 return register_netdevice(bond_dev);
328 static size_t bond_get_size(const struct net_device *bond_dev)
330 return nla_total_size(sizeof(u8)) + /* IFLA_BOND_MODE */
331 nla_total_size(sizeof(u32)) + /* IFLA_BOND_ACTIVE_SLAVE */
332 nla_total_size(sizeof(u32)) + /* IFLA_BOND_MIIMON */
333 nla_total_size(sizeof(u32)) + /* IFLA_BOND_UPDELAY */
334 nla_total_size(sizeof(u32)) + /* IFLA_BOND_DOWNDELAY */
335 nla_total_size(sizeof(u8)) + /* IFLA_BOND_USE_CARRIER */
336 nla_total_size(sizeof(u32)) + /* IFLA_BOND_ARP_INTERVAL */
337 /* IFLA_BOND_ARP_IP_TARGET */
338 nla_total_size(sizeof(struct nlattr)) +
339 nla_total_size(sizeof(u32)) * BOND_MAX_ARP_TARGETS +
340 nla_total_size(sizeof(u32)) + /* IFLA_BOND_ARP_VALIDATE */
341 nla_total_size(sizeof(u32)) + /* IFLA_BOND_ARP_ALL_TARGETS */
342 nla_total_size(sizeof(u32)) + /* IFLA_BOND_PRIMARY */
343 nla_total_size(sizeof(u8)) + /* IFLA_BOND_PRIMARY_RESELECT */
344 nla_total_size(sizeof(u8)) + /* IFLA_BOND_FAIL_OVER_MAC */
345 nla_total_size(sizeof(u8)) + /* IFLA_BOND_XMIT_HASH_POLICY */
346 nla_total_size(sizeof(u32)) + /* IFLA_BOND_RESEND_IGMP */
347 nla_total_size(sizeof(u8)) + /* IFLA_BOND_NUM_PEER_NOTIF */
348 nla_total_size(sizeof(u8)) + /* IFLA_BOND_ALL_SLAVES_ACTIVE */
349 nla_total_size(sizeof(u32)) + /* IFLA_BOND_MIN_LINKS */
350 nla_total_size(sizeof(u32)) + /* IFLA_BOND_LP_INTERVAL */
351 nla_total_size(sizeof(u32)) + /* IFLA_BOND_PACKETS_PER_SLAVE */
352 nla_total_size(sizeof(u8)) + /* IFLA_BOND_AD_LACP_RATE */
353 nla_total_size(sizeof(u8)) + /* IFLA_BOND_AD_SELECT */
354 nla_total_size(sizeof(struct nlattr)) + /* IFLA_BOND_AD_INFO */
355 nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_AGGREGATOR */
356 nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_NUM_PORTS */
357 nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_ACTOR_KEY */
358 nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_PARTNER_KEY*/
359 nla_total_size(ETH_ALEN) + /* IFLA_BOND_AD_INFO_PARTNER_MAC*/
363 static int bond_fill_info(struct sk_buff *skb,
364 const struct net_device *bond_dev)
366 struct bonding *bond = netdev_priv(bond_dev);
367 struct net_device *slave_dev = bond_option_active_slave_get(bond);
368 struct nlattr *targets;
369 unsigned int packets_per_slave;
370 int i, targets_added;
372 if (nla_put_u8(skb, IFLA_BOND_MODE, bond->params.mode))
373 goto nla_put_failure;
376 nla_put_u32(skb, IFLA_BOND_ACTIVE_SLAVE, slave_dev->ifindex))
377 goto nla_put_failure;
379 if (nla_put_u32(skb, IFLA_BOND_MIIMON, bond->params.miimon))
380 goto nla_put_failure;
382 if (nla_put_u32(skb, IFLA_BOND_UPDELAY,
383 bond->params.updelay * bond->params.miimon))
384 goto nla_put_failure;
386 if (nla_put_u32(skb, IFLA_BOND_DOWNDELAY,
387 bond->params.downdelay * bond->params.miimon))
388 goto nla_put_failure;
390 if (nla_put_u8(skb, IFLA_BOND_USE_CARRIER, bond->params.use_carrier))
391 goto nla_put_failure;
393 if (nla_put_u32(skb, IFLA_BOND_ARP_INTERVAL, bond->params.arp_interval))
394 goto nla_put_failure;
396 targets = nla_nest_start(skb, IFLA_BOND_ARP_IP_TARGET);
398 goto nla_put_failure;
401 for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) {
402 if (bond->params.arp_targets[i]) {
403 nla_put_be32(skb, i, bond->params.arp_targets[i]);
409 nla_nest_end(skb, targets);
411 nla_nest_cancel(skb, targets);
413 if (nla_put_u32(skb, IFLA_BOND_ARP_VALIDATE, bond->params.arp_validate))
414 goto nla_put_failure;
416 if (nla_put_u32(skb, IFLA_BOND_ARP_ALL_TARGETS,
417 bond->params.arp_all_targets))
418 goto nla_put_failure;
420 if (bond->primary_slave &&
421 nla_put_u32(skb, IFLA_BOND_PRIMARY,
422 bond->primary_slave->dev->ifindex))
423 goto nla_put_failure;
425 if (nla_put_u8(skb, IFLA_BOND_PRIMARY_RESELECT,
426 bond->params.primary_reselect))
427 goto nla_put_failure;
429 if (nla_put_u8(skb, IFLA_BOND_FAIL_OVER_MAC,
430 bond->params.fail_over_mac))
431 goto nla_put_failure;
433 if (nla_put_u8(skb, IFLA_BOND_XMIT_HASH_POLICY,
434 bond->params.xmit_policy))
435 goto nla_put_failure;
437 if (nla_put_u32(skb, IFLA_BOND_RESEND_IGMP,
438 bond->params.resend_igmp))
439 goto nla_put_failure;
441 if (nla_put_u8(skb, IFLA_BOND_NUM_PEER_NOTIF,
442 bond->params.num_peer_notif))
443 goto nla_put_failure;
445 if (nla_put_u8(skb, IFLA_BOND_ALL_SLAVES_ACTIVE,
446 bond->params.all_slaves_active))
447 goto nla_put_failure;
449 if (nla_put_u32(skb, IFLA_BOND_MIN_LINKS,
450 bond->params.min_links))
451 goto nla_put_failure;
453 if (nla_put_u32(skb, IFLA_BOND_LP_INTERVAL,
454 bond->params.lp_interval))
455 goto nla_put_failure;
457 packets_per_slave = bond->params.packets_per_slave;
458 if (nla_put_u32(skb, IFLA_BOND_PACKETS_PER_SLAVE,
460 goto nla_put_failure;
462 if (nla_put_u8(skb, IFLA_BOND_AD_LACP_RATE,
463 bond->params.lacp_fast))
464 goto nla_put_failure;
466 if (nla_put_u8(skb, IFLA_BOND_AD_SELECT,
467 bond->params.ad_select))
468 goto nla_put_failure;
470 if (bond->params.mode == BOND_MODE_8023AD) {
473 if (!bond_3ad_get_active_agg_info(bond, &info)) {
476 nest = nla_nest_start(skb, IFLA_BOND_AD_INFO);
478 goto nla_put_failure;
480 if (nla_put_u16(skb, IFLA_BOND_AD_INFO_AGGREGATOR,
482 goto nla_put_failure;
483 if (nla_put_u16(skb, IFLA_BOND_AD_INFO_NUM_PORTS,
485 goto nla_put_failure;
486 if (nla_put_u16(skb, IFLA_BOND_AD_INFO_ACTOR_KEY,
488 goto nla_put_failure;
489 if (nla_put_u16(skb, IFLA_BOND_AD_INFO_PARTNER_KEY,
491 goto nla_put_failure;
492 if (nla_put(skb, IFLA_BOND_AD_INFO_PARTNER_MAC,
493 sizeof(info.partner_system),
494 &info.partner_system))
495 goto nla_put_failure;
497 nla_nest_end(skb, nest);
507 struct rtnl_link_ops bond_link_ops __read_mostly = {
509 .priv_size = sizeof(struct bonding),
511 .maxtype = IFLA_BOND_MAX,
512 .policy = bond_policy,
513 .validate = bond_validate,
514 .newlink = bond_newlink,
515 .changelink = bond_changelink,
516 .get_size = bond_get_size,
517 .fill_info = bond_fill_info,
518 .get_num_tx_queues = bond_get_num_tx_queues,
519 .get_num_rx_queues = bond_get_num_tx_queues, /* Use the same number
523 int __init bond_netlink_init(void)
525 return rtnl_link_register(&bond_link_ops);
528 void bond_netlink_fini(void)
530 rtnl_link_unregister(&bond_link_ops);
533 MODULE_ALIAS_RTNL_LINK("bond");