2 * net/switchdev/switchdev.c - Switch device API
3 * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
4 * Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 #include <linux/kernel.h>
13 #include <linux/types.h>
14 #include <linux/init.h>
15 #include <linux/mutex.h>
16 #include <linux/notifier.h>
17 #include <linux/netdevice.h>
18 #include <linux/if_bridge.h>
19 #include <net/ip_fib.h>
20 #include <net/switchdev.h>
23 * switchdev_port_attr_get - Get port attribute
26 * @attr: attribute to get
28 int switchdev_port_attr_get(struct net_device *dev, struct switchdev_attr *attr)
30 const struct switchdev_ops *ops = dev->switchdev_ops;
31 struct net_device *lower_dev;
32 struct list_head *iter;
33 struct switchdev_attr first = {
34 .id = SWITCHDEV_ATTR_UNDEFINED
36 int err = -EOPNOTSUPP;
38 if (ops && ops->switchdev_port_attr_get)
39 return ops->switchdev_port_attr_get(dev, attr);
41 if (attr->flags & SWITCHDEV_F_NO_RECURSE)
44 /* Switch device port(s) may be stacked under
45 * bond/team/vlan dev, so recurse down to get attr on
46 * each port. Return -ENODATA if attr values don't
47 * compare across ports.
50 netdev_for_each_lower_dev(dev, lower_dev, iter) {
51 err = switchdev_port_attr_get(lower_dev, attr);
54 if (first.id == SWITCHDEV_ATTR_UNDEFINED)
56 else if (memcmp(&first, attr, sizeof(*attr)))
62 EXPORT_SYMBOL_GPL(switchdev_port_attr_get);
64 static int __switchdev_port_attr_set(struct net_device *dev,
65 struct switchdev_attr *attr)
67 const struct switchdev_ops *ops = dev->switchdev_ops;
68 struct net_device *lower_dev;
69 struct list_head *iter;
70 int err = -EOPNOTSUPP;
72 if (ops && ops->switchdev_port_attr_set)
73 return ops->switchdev_port_attr_set(dev, attr);
75 if (attr->flags & SWITCHDEV_F_NO_RECURSE)
78 /* Switch device port(s) may be stacked under
79 * bond/team/vlan dev, so recurse down to set attr on
83 netdev_for_each_lower_dev(dev, lower_dev, iter) {
84 err = __switchdev_port_attr_set(lower_dev, attr);
92 struct switchdev_attr_set_work {
93 struct work_struct work;
94 struct net_device *dev;
95 struct switchdev_attr attr;
98 static void switchdev_port_attr_set_work(struct work_struct *work)
100 struct switchdev_attr_set_work *asw =
101 container_of(work, struct switchdev_attr_set_work, work);
105 err = switchdev_port_attr_set(asw->dev, &asw->attr);
106 if (err && err != -EOPNOTSUPP)
107 netdev_err(asw->dev, "failed (err=%d) to set attribute (id=%d)\n",
115 static int switchdev_port_attr_set_defer(struct net_device *dev,
116 struct switchdev_attr *attr)
118 struct switchdev_attr_set_work *asw;
120 asw = kmalloc(sizeof(*asw), GFP_ATOMIC);
124 INIT_WORK(&asw->work, switchdev_port_attr_set_work);
128 memcpy(&asw->attr, attr, sizeof(asw->attr));
130 schedule_work(&asw->work);
136 * switchdev_port_attr_set - Set port attribute
139 * @attr: attribute to set
141 * Use a 2-phase prepare-commit transaction model to ensure
142 * system is not left in a partially updated state due to
143 * failure from driver/device.
145 int switchdev_port_attr_set(struct net_device *dev, struct switchdev_attr *attr)
149 if (!rtnl_is_locked()) {
150 /* Running prepare-commit transaction across stacked
151 * devices requires nothing moves, so if rtnl_lock is
152 * not held, schedule a worker thread to hold rtnl_lock
153 * while setting attr.
156 return switchdev_port_attr_set_defer(dev, attr);
159 /* Phase I: prepare for attr set. Driver/device should fail
160 * here if there are going to be issues in the commit phase,
161 * such as lack of resources or support. The driver/device
162 * should reserve resources needed for the commit phase here,
163 * but should not commit the attr.
166 attr->trans = SWITCHDEV_TRANS_PREPARE;
167 err = __switchdev_port_attr_set(dev, attr);
169 /* Prepare phase failed: abort the transaction. Any
170 * resources reserved in the prepare phase are
174 if (err != -EOPNOTSUPP) {
175 attr->trans = SWITCHDEV_TRANS_ABORT;
176 __switchdev_port_attr_set(dev, attr);
182 /* Phase II: commit attr set. This cannot fail as a fault
183 * of driver/device. If it does, it's a bug in the driver/device
184 * because the driver said everythings was OK in phase I.
187 attr->trans = SWITCHDEV_TRANS_COMMIT;
188 err = __switchdev_port_attr_set(dev, attr);
189 WARN(err, "%s: Commit of attribute (id=%d) failed.\n",
190 dev->name, attr->id);
194 EXPORT_SYMBOL_GPL(switchdev_port_attr_set);
196 static int __switchdev_port_obj_add(struct net_device *dev,
197 struct switchdev_obj *obj)
199 const struct switchdev_ops *ops = dev->switchdev_ops;
200 struct net_device *lower_dev;
201 struct list_head *iter;
202 int err = -EOPNOTSUPP;
204 if (ops && ops->switchdev_port_obj_add)
205 return ops->switchdev_port_obj_add(dev, obj);
207 /* Switch device port(s) may be stacked under
208 * bond/team/vlan dev, so recurse down to add object on
212 netdev_for_each_lower_dev(dev, lower_dev, iter) {
213 err = __switchdev_port_obj_add(lower_dev, obj);
222 * switchdev_port_obj_add - Add port object
225 * @obj: object to add
227 * Use a 2-phase prepare-commit transaction model to ensure
228 * system is not left in a partially updated state due to
229 * failure from driver/device.
231 * rtnl_lock must be held.
233 int switchdev_port_obj_add(struct net_device *dev, struct switchdev_obj *obj)
239 /* Phase I: prepare for obj add. Driver/device should fail
240 * here if there are going to be issues in the commit phase,
241 * such as lack of resources or support. The driver/device
242 * should reserve resources needed for the commit phase here,
243 * but should not commit the obj.
246 obj->trans = SWITCHDEV_TRANS_PREPARE;
247 err = __switchdev_port_obj_add(dev, obj);
249 /* Prepare phase failed: abort the transaction. Any
250 * resources reserved in the prepare phase are
254 if (err != -EOPNOTSUPP) {
255 obj->trans = SWITCHDEV_TRANS_ABORT;
256 __switchdev_port_obj_add(dev, obj);
262 /* Phase II: commit obj add. This cannot fail as a fault
263 * of driver/device. If it does, it's a bug in the driver/device
264 * because the driver said everythings was OK in phase I.
267 obj->trans = SWITCHDEV_TRANS_COMMIT;
268 err = __switchdev_port_obj_add(dev, obj);
269 WARN(err, "%s: Commit of object (id=%d) failed.\n", dev->name, obj->id);
273 EXPORT_SYMBOL_GPL(switchdev_port_obj_add);
276 * switchdev_port_obj_del - Delete port object
279 * @obj: object to delete
281 int switchdev_port_obj_del(struct net_device *dev, struct switchdev_obj *obj)
283 const struct switchdev_ops *ops = dev->switchdev_ops;
284 struct net_device *lower_dev;
285 struct list_head *iter;
286 int err = -EOPNOTSUPP;
288 if (ops && ops->switchdev_port_obj_del)
289 return ops->switchdev_port_obj_del(dev, obj);
291 /* Switch device port(s) may be stacked under
292 * bond/team/vlan dev, so recurse down to delete object on
296 netdev_for_each_lower_dev(dev, lower_dev, iter) {
297 err = switchdev_port_obj_del(lower_dev, obj);
304 EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
307 * switchdev_port_obj_dump - Dump port objects
310 * @obj: object to dump
312 int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj)
314 const struct switchdev_ops *ops = dev->switchdev_ops;
315 struct net_device *lower_dev;
316 struct list_head *iter;
317 int err = -EOPNOTSUPP;
319 if (ops && ops->switchdev_port_obj_dump)
320 return ops->switchdev_port_obj_dump(dev, obj);
322 /* Switch device port(s) may be stacked under
323 * bond/team/vlan dev, so recurse down to dump objects on
324 * first port at bottom of stack.
327 netdev_for_each_lower_dev(dev, lower_dev, iter) {
328 err = switchdev_port_obj_dump(lower_dev, obj);
334 EXPORT_SYMBOL_GPL(switchdev_port_obj_dump);
336 static DEFINE_MUTEX(switchdev_mutex);
337 static RAW_NOTIFIER_HEAD(switchdev_notif_chain);
340 * register_switchdev_notifier - Register notifier
341 * @nb: notifier_block
343 * Register switch device notifier. This should be used by code
344 * which needs to monitor events happening in particular device.
345 * Return values are same as for atomic_notifier_chain_register().
347 int register_switchdev_notifier(struct notifier_block *nb)
351 mutex_lock(&switchdev_mutex);
352 err = raw_notifier_chain_register(&switchdev_notif_chain, nb);
353 mutex_unlock(&switchdev_mutex);
356 EXPORT_SYMBOL_GPL(register_switchdev_notifier);
359 * unregister_switchdev_notifier - Unregister notifier
360 * @nb: notifier_block
362 * Unregister switch device notifier.
363 * Return values are same as for atomic_notifier_chain_unregister().
365 int unregister_switchdev_notifier(struct notifier_block *nb)
369 mutex_lock(&switchdev_mutex);
370 err = raw_notifier_chain_unregister(&switchdev_notif_chain, nb);
371 mutex_unlock(&switchdev_mutex);
374 EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
377 * call_switchdev_notifiers - Call notifiers
378 * @val: value passed unmodified to notifier function
380 * @info: notifier information data
382 * Call all network notifier blocks. This should be called by driver
383 * when it needs to propagate hardware event.
384 * Return values are same as for atomic_notifier_call_chain().
386 int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
387 struct switchdev_notifier_info *info)
392 mutex_lock(&switchdev_mutex);
393 err = raw_notifier_call_chain(&switchdev_notif_chain, val, info);
394 mutex_unlock(&switchdev_mutex);
397 EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
399 struct switchdev_vlan_dump {
400 struct switchdev_obj obj;
408 static int switchdev_port_vlan_dump_put(struct net_device *dev,
409 struct switchdev_vlan_dump *dump)
411 struct bridge_vlan_info vinfo;
413 vinfo.flags = dump->flags;
415 if (dump->begin == 0 && dump->end == 0) {
417 } else if (dump->begin == dump->end) {
418 vinfo.vid = dump->begin;
419 if (nla_put(dump->skb, IFLA_BRIDGE_VLAN_INFO,
420 sizeof(vinfo), &vinfo))
423 vinfo.vid = dump->begin;
424 vinfo.flags |= BRIDGE_VLAN_INFO_RANGE_BEGIN;
425 if (nla_put(dump->skb, IFLA_BRIDGE_VLAN_INFO,
426 sizeof(vinfo), &vinfo))
428 vinfo.vid = dump->end;
429 vinfo.flags &= ~BRIDGE_VLAN_INFO_RANGE_BEGIN;
430 vinfo.flags |= BRIDGE_VLAN_INFO_RANGE_END;
431 if (nla_put(dump->skb, IFLA_BRIDGE_VLAN_INFO,
432 sizeof(vinfo), &vinfo))
439 static int switchdev_port_vlan_dump_cb(struct net_device *dev,
440 struct switchdev_obj *obj)
442 struct switchdev_vlan_dump *dump =
443 container_of(obj, struct switchdev_vlan_dump, obj);
444 struct switchdev_obj_vlan *vlan = &dump->obj.u.vlan;
447 if (vlan->vid_begin > vlan->vid_end)
450 if (dump->filter_mask & RTEXT_FILTER_BRVLAN) {
451 dump->flags = vlan->flags;
452 for (dump->begin = dump->end = vlan->vid_begin;
453 dump->begin <= vlan->vid_end;
454 dump->begin++, dump->end++) {
455 err = switchdev_port_vlan_dump_put(dev, dump);
459 } else if (dump->filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED) {
460 if (dump->begin > vlan->vid_begin &&
461 dump->begin >= vlan->vid_end) {
462 if ((dump->begin - 1) == vlan->vid_end &&
463 dump->flags == vlan->flags) {
465 dump->begin = vlan->vid_begin;
467 err = switchdev_port_vlan_dump_put(dev, dump);
468 dump->flags = vlan->flags;
469 dump->begin = vlan->vid_begin;
470 dump->end = vlan->vid_end;
472 } else if (dump->end <= vlan->vid_begin &&
473 dump->end < vlan->vid_end) {
474 if ((dump->end + 1) == vlan->vid_begin &&
475 dump->flags == vlan->flags) {
477 dump->end = vlan->vid_end;
479 err = switchdev_port_vlan_dump_put(dev, dump);
480 dump->flags = vlan->flags;
481 dump->begin = vlan->vid_begin;
482 dump->end = vlan->vid_end;
492 static int switchdev_port_vlan_fill(struct sk_buff *skb, struct net_device *dev,
495 struct switchdev_vlan_dump dump = {
497 .id = SWITCHDEV_OBJ_PORT_VLAN,
498 .cb = switchdev_port_vlan_dump_cb,
501 .filter_mask = filter_mask,
505 if ((filter_mask & RTEXT_FILTER_BRVLAN) ||
506 (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) {
507 err = switchdev_port_obj_dump(dev, &dump.obj);
510 if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)
512 err = switchdev_port_vlan_dump_put(dev, &dump);
516 return err == -EOPNOTSUPP ? 0 : err;
520 * switchdev_port_bridge_getlink - Get bridge port attributes
524 * Called for SELF on rtnl_bridge_getlink to get bridge port
527 int switchdev_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
528 struct net_device *dev, u32 filter_mask,
531 struct switchdev_attr attr = {
532 .id = SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS,
534 u16 mode = BRIDGE_MODE_UNDEF;
535 u32 mask = BR_LEARNING | BR_LEARNING_SYNC;
538 err = switchdev_port_attr_get(dev, &attr);
539 if (err && err != -EOPNOTSUPP)
542 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode,
543 attr.u.brport_flags, mask, nlflags,
544 filter_mask, switchdev_port_vlan_fill);
546 EXPORT_SYMBOL_GPL(switchdev_port_bridge_getlink);
548 static int switchdev_port_br_setflag(struct net_device *dev,
549 struct nlattr *nlattr,
550 unsigned long brport_flag)
552 struct switchdev_attr attr = {
553 .id = SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS,
555 u8 flag = nla_get_u8(nlattr);
558 err = switchdev_port_attr_get(dev, &attr);
563 attr.u.brport_flags |= brport_flag;
565 attr.u.brport_flags &= ~brport_flag;
567 return switchdev_port_attr_set(dev, &attr);
570 static const struct nla_policy
571 switchdev_port_bridge_policy[IFLA_BRPORT_MAX + 1] = {
572 [IFLA_BRPORT_STATE] = { .type = NLA_U8 },
573 [IFLA_BRPORT_COST] = { .type = NLA_U32 },
574 [IFLA_BRPORT_PRIORITY] = { .type = NLA_U16 },
575 [IFLA_BRPORT_MODE] = { .type = NLA_U8 },
576 [IFLA_BRPORT_GUARD] = { .type = NLA_U8 },
577 [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 },
578 [IFLA_BRPORT_FAST_LEAVE] = { .type = NLA_U8 },
579 [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 },
580 [IFLA_BRPORT_LEARNING_SYNC] = { .type = NLA_U8 },
581 [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 },
584 static int switchdev_port_br_setlink_protinfo(struct net_device *dev,
585 struct nlattr *protinfo)
591 err = nla_validate_nested(protinfo, IFLA_BRPORT_MAX,
592 switchdev_port_bridge_policy);
596 nla_for_each_nested(attr, protinfo, rem) {
597 switch (nla_type(attr)) {
598 case IFLA_BRPORT_LEARNING:
599 err = switchdev_port_br_setflag(dev, attr,
602 case IFLA_BRPORT_LEARNING_SYNC:
603 err = switchdev_port_br_setflag(dev, attr,
617 static int switchdev_port_br_afspec(struct net_device *dev,
618 struct nlattr *afspec,
619 int (*f)(struct net_device *dev,
620 struct switchdev_obj *obj))
623 struct bridge_vlan_info *vinfo;
624 struct switchdev_obj obj = {
625 .id = SWITCHDEV_OBJ_PORT_VLAN,
627 struct switchdev_obj_vlan *vlan = &obj.u.vlan;
631 nla_for_each_nested(attr, afspec, rem) {
632 if (nla_type(attr) != IFLA_BRIDGE_VLAN_INFO)
634 if (nla_len(attr) != sizeof(struct bridge_vlan_info))
636 vinfo = nla_data(attr);
637 vlan->flags = vinfo->flags;
638 if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) {
641 vlan->vid_begin = vinfo->vid;
642 } else if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_END) {
643 if (!vlan->vid_begin)
645 vlan->vid_end = vinfo->vid;
646 if (vlan->vid_end <= vlan->vid_begin)
651 memset(vlan, 0, sizeof(*vlan));
655 vlan->vid_begin = vinfo->vid;
656 vlan->vid_end = vinfo->vid;
660 memset(vlan, 0, sizeof(*vlan));
668 * switchdev_port_bridge_setlink - Set bridge port attributes
671 * @nlh: netlink header
672 * @flags: netlink flags
674 * Called for SELF on rtnl_bridge_setlink to set bridge port
677 int switchdev_port_bridge_setlink(struct net_device *dev,
678 struct nlmsghdr *nlh, u16 flags)
680 struct nlattr *protinfo;
681 struct nlattr *afspec;
684 protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
687 err = switchdev_port_br_setlink_protinfo(dev, protinfo);
692 afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
695 err = switchdev_port_br_afspec(dev, afspec,
696 switchdev_port_obj_add);
700 EXPORT_SYMBOL_GPL(switchdev_port_bridge_setlink);
703 * switchdev_port_bridge_dellink - Set bridge port attributes
706 * @nlh: netlink header
707 * @flags: netlink flags
709 * Called for SELF on rtnl_bridge_dellink to set bridge port
712 int switchdev_port_bridge_dellink(struct net_device *dev,
713 struct nlmsghdr *nlh, u16 flags)
715 struct nlattr *afspec;
717 afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
720 return switchdev_port_br_afspec(dev, afspec,
721 switchdev_port_obj_del);
725 EXPORT_SYMBOL_GPL(switchdev_port_bridge_dellink);
728 * switchdev_port_fdb_add - Add FDB (MAC/VLAN) entry to port
730 * @ndmsg: netlink hdr
731 * @nlattr: netlink attributes
733 * @addr: MAC address to add
736 * Add FDB entry to switch device.
738 int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
739 struct net_device *dev, const unsigned char *addr,
740 u16 vid, u16 nlm_flags)
742 struct switchdev_obj obj = {
743 .id = SWITCHDEV_OBJ_PORT_FDB,
750 return switchdev_port_obj_add(dev, &obj);
752 EXPORT_SYMBOL_GPL(switchdev_port_fdb_add);
755 * switchdev_port_fdb_del - Delete FDB (MAC/VLAN) entry from port
757 * @ndmsg: netlink hdr
758 * @nlattr: netlink attributes
760 * @addr: MAC address to delete
761 * @vid: VLAN to delete
763 * Delete FDB entry from switch device.
765 int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
766 struct net_device *dev, const unsigned char *addr,
769 struct switchdev_obj obj = {
770 .id = SWITCHDEV_OBJ_PORT_FDB,
777 return switchdev_port_obj_del(dev, &obj);
779 EXPORT_SYMBOL_GPL(switchdev_port_fdb_del);
781 struct switchdev_fdb_dump {
782 struct switchdev_obj obj;
784 struct netlink_callback *cb;
788 static int switchdev_port_fdb_dump_cb(struct net_device *dev,
789 struct switchdev_obj *obj)
791 struct switchdev_fdb_dump *dump =
792 container_of(obj, struct switchdev_fdb_dump, obj);
793 u32 portid = NETLINK_CB(dump->cb->skb).portid;
794 u32 seq = dump->cb->nlh->nlmsg_seq;
795 struct nlmsghdr *nlh;
798 if (dump->idx < dump->cb->args[0])
801 nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
802 sizeof(*ndm), NLM_F_MULTI);
806 ndm = nlmsg_data(nlh);
807 ndm->ndm_family = AF_BRIDGE;
810 ndm->ndm_flags = NTF_SELF;
812 ndm->ndm_ifindex = dev->ifindex;
813 ndm->ndm_state = NUD_REACHABLE;
815 if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, obj->u.fdb.addr))
816 goto nla_put_failure;
818 if (obj->u.fdb.vid && nla_put_u16(dump->skb, NDA_VLAN, obj->u.fdb.vid))
819 goto nla_put_failure;
821 nlmsg_end(dump->skb, nlh);
828 nlmsg_cancel(dump->skb, nlh);
833 * switchdev_port_fdb_dump - Dump port FDB (MAC/VLAN) entries
836 * @cb: netlink callback
838 * @filter_dev: filter device
841 * Delete FDB entry from switch device.
843 int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
844 struct net_device *dev,
845 struct net_device *filter_dev, int idx)
847 struct switchdev_fdb_dump dump = {
849 .id = SWITCHDEV_OBJ_PORT_FDB,
850 .cb = switchdev_port_fdb_dump_cb,
858 err = switchdev_port_obj_dump(dev, &dump.obj);
864 EXPORT_SYMBOL_GPL(switchdev_port_fdb_dump);
866 static struct net_device *switchdev_get_lowest_dev(struct net_device *dev)
868 const struct switchdev_ops *ops = dev->switchdev_ops;
869 struct net_device *lower_dev;
870 struct net_device *port_dev;
871 struct list_head *iter;
873 /* Recusively search down until we find a sw port dev.
874 * (A sw port dev supports switchdev_port_attr_get).
877 if (ops && ops->switchdev_port_attr_get)
880 netdev_for_each_lower_dev(dev, lower_dev, iter) {
881 port_dev = switchdev_get_lowest_dev(lower_dev);
889 static struct net_device *switchdev_get_dev_by_nhs(struct fib_info *fi)
891 struct switchdev_attr attr = {
892 .id = SWITCHDEV_ATTR_PORT_PARENT_ID,
894 struct switchdev_attr prev_attr;
895 struct net_device *dev = NULL;
898 /* For this route, all nexthop devs must be on the same switch. */
900 for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) {
901 const struct fib_nh *nh = &fi->fib_nh[nhsel];
906 dev = switchdev_get_lowest_dev(nh->nh_dev);
910 if (switchdev_port_attr_get(dev, &attr))
914 if (prev_attr.u.ppid.id_len != attr.u.ppid.id_len)
916 if (memcmp(prev_attr.u.ppid.id, attr.u.ppid.id,
928 * switchdev_fib_ipv4_add - Add/modify switch IPv4 route entry
930 * @dst: route's IPv4 destination address
931 * @dst_len: destination address length (prefix length)
932 * @fi: route FIB info structure
935 * @nlflags: netlink flags passed in (NLM_F_*)
936 * @tb_id: route table ID
938 * Add/modify switch IPv4 route entry.
940 int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
941 u8 tos, u8 type, u32 nlflags, u32 tb_id)
943 struct switchdev_obj fib_obj = {
944 .id = SWITCHDEV_OBJ_IPV4_FIB,
955 struct net_device *dev;
958 /* Don't offload route if using custom ip rules or if
959 * IPv4 FIB offloading has been disabled completely.
962 #ifdef CONFIG_IP_MULTIPLE_TABLES
963 if (fi->fib_net->ipv4.fib_has_custom_rules)
967 if (fi->fib_net->ipv4.fib_offload_disabled)
970 dev = switchdev_get_dev_by_nhs(fi);
974 err = switchdev_port_obj_add(dev, &fib_obj);
976 fi->fib_flags |= RTNH_F_OFFLOAD;
978 return err == -EOPNOTSUPP ? 0 : err;
980 EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_add);
983 * switchdev_fib_ipv4_del - Delete IPv4 route entry from switch
985 * @dst: route's IPv4 destination address
986 * @dst_len: destination address length (prefix length)
987 * @fi: route FIB info structure
990 * @tb_id: route table ID
992 * Delete IPv4 route entry from switch device.
994 int switchdev_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
995 u8 tos, u8 type, u32 tb_id)
997 struct switchdev_obj fib_obj = {
998 .id = SWITCHDEV_OBJ_IPV4_FIB,
1009 struct net_device *dev;
1012 if (!(fi->fib_flags & RTNH_F_OFFLOAD))
1015 dev = switchdev_get_dev_by_nhs(fi);
1019 err = switchdev_port_obj_del(dev, &fib_obj);
1021 fi->fib_flags &= ~RTNH_F_OFFLOAD;
1023 return err == -EOPNOTSUPP ? 0 : err;
1025 EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_del);
1028 * switchdev_fib_ipv4_abort - Abort an IPv4 FIB operation
1030 * @fi: route FIB info structure
1032 void switchdev_fib_ipv4_abort(struct fib_info *fi)
1034 /* There was a problem installing this route to the offload
1035 * device. For now, until we come up with more refined
1036 * policy handling, abruptly end IPv4 fib offloading for
1037 * for entire net by flushing offload device(s) of all
1038 * IPv4 routes, and mark IPv4 fib offloading broken from
1039 * this point forward.
1042 fib_flush_external(fi->fib_net);
1043 fi->fib_net->ipv4.fib_offload_disabled = true;
1045 EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_abort);