batadv_inc_counter(bat_priv, BAT_CNT_MGMT_TX);
batadv_add_counter(bat_priv, BAT_CNT_MGMT_TX_BYTES,
skb->len + ETH_HLEN);
- send_skb_packet(skb, hard_iface, broadcast_addr);
+ batadv_send_skb_packet(skb, hard_iface, broadcast_addr);
}
}
forw_packet->if_incoming->net_dev->dev_addr);
/* skb is only used once and than forw_packet is free'd */
- send_skb_packet(forw_packet->skb, forw_packet->if_incoming,
- broadcast_addr);
+ batadv_send_skb_packet(forw_packet->skb,
+ forw_packet->if_incoming,
+ broadcast_addr);
forw_packet->skb = NULL;
goto out;
/* start timer for this packet */
INIT_DELAYED_WORK(&forw_packet_aggr->delayed_work,
- send_outstanding_bat_ogm_packet);
+ batadv_send_outstanding_bat_ogm_packet);
queue_delayed_work(bat_event_workqueue,
&forw_packet_aggr->delayed_work,
send_time - jiffies);
hard_iface->net_dev->name);
/* begin scheduling originator messages on that interface */
- schedule_bat_ogm(hard_iface);
+ batadv_schedule_bat_ogm(hard_iface);
out:
return 0;
/* delete all references to this hard_iface */
batadv_purge_orig_ref(bat_priv);
- purge_outstanding_packets(bat_priv, hard_iface);
+ batadv_purge_outstanding_packets(bat_priv, hard_iface);
dev_put(hard_iface->soft_iface);
/* nobody uses this interface anymore */
memcpy(icmp_packet->rr,
neigh_node->if_incoming->net_dev->dev_addr, ETH_ALEN);
- send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
+ batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
goto out;
dst_unreach:
atomic_set(&bat_priv->mesh_state, MESH_DEACTIVATING);
- purge_outstanding_packets(bat_priv, NULL);
+ batadv_purge_outstanding_packets(bat_priv, NULL);
vis_quit(bat_priv);
icmp_packet->msg_type = ECHO_REPLY;
icmp_packet->header.ttl = TTL;
- send_skb_packet(skb, router->if_incoming, router->addr);
+ batadv_send_skb_packet(skb, router->if_incoming, router->addr);
ret = NET_RX_SUCCESS;
out:
icmp_packet->msg_type = TTL_EXCEEDED;
icmp_packet->header.ttl = TTL;
- send_skb_packet(skb, router->if_incoming, router->addr);
+ batadv_send_skb_packet(skb, router->if_incoming, router->addr);
ret = NET_RX_SUCCESS;
out:
icmp_packet->header.ttl--;
/* route it */
- send_skb_packet(skb, router->if_incoming, router->addr);
+ batadv_send_skb_packet(skb, router->if_incoming, router->addr);
ret = NET_RX_SUCCESS;
out:
skb->len + ETH_HLEN);
/* route it */
- send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
+ batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
ret = NET_RX_SUCCESS;
out:
goto out;
/* rebroadcast packet */
- add_bcast_packet_to_list(bat_priv, skb, 1);
+ batadv_add_bcast_packet_to_list(bat_priv, skb, 1);
/* don't hand the broadcast up if it is from an originator
* from the same backbone.
/* send out an already prepared packet to the given address via the
* specified batman interface */
-int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
- const uint8_t *dst_addr)
+int batadv_send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
+ const uint8_t *dst_addr)
{
struct ethhdr *ethhdr;
return NET_XMIT_DROP;
}
-void schedule_bat_ogm(struct hard_iface *hard_iface)
+void batadv_schedule_bat_ogm(struct hard_iface *hard_iface)
{
struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
*
* The skb is not consumed, so the caller should make sure that the
* skb is freed. */
-int add_bcast_packet_to_list(struct bat_priv *bat_priv,
- const struct sk_buff *skb, unsigned long delay)
+int batadv_add_bcast_packet_to_list(struct bat_priv *bat_priv,
+ const struct sk_buff *skb,
+ unsigned long delay)
{
struct hard_iface *primary_if = NULL;
struct forw_packet *forw_packet;
/* send a copy of the saved skb */
skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
if (skb1)
- send_skb_packet(skb1, hard_iface, broadcast_addr);
+ batadv_send_skb_packet(skb1, hard_iface,
+ broadcast_addr);
}
rcu_read_unlock();
atomic_inc(&bat_priv->bcast_queue_left);
}
-void send_outstanding_bat_ogm_packet(struct work_struct *work)
+void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work)
{
struct delayed_work *delayed_work =
container_of(work, struct delayed_work, work);
* shutting down
*/
if (forw_packet->own)
- schedule_bat_ogm(forw_packet->if_incoming);
+ batadv_schedule_bat_ogm(forw_packet->if_incoming);
out:
/* don't count own packet */
forw_packet_free(forw_packet);
}
-void purge_outstanding_packets(struct bat_priv *bat_priv,
- const struct hard_iface *hard_iface)
+void batadv_purge_outstanding_packets(struct bat_priv *bat_priv,
+ const struct hard_iface *hard_iface)
{
struct forw_packet *forw_packet;
struct hlist_node *tmp_node, *safe_tmp_node;
#ifndef _NET_BATMAN_ADV_SEND_H_
#define _NET_BATMAN_ADV_SEND_H_
-int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
- const uint8_t *dst_addr);
-void schedule_bat_ogm(struct hard_iface *hard_iface);
-int add_bcast_packet_to_list(struct bat_priv *bat_priv,
- const struct sk_buff *skb, unsigned long delay);
-void send_outstanding_bat_ogm_packet(struct work_struct *work);
-void purge_outstanding_packets(struct bat_priv *bat_priv,
- const struct hard_iface *hard_iface);
+int batadv_send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
+ const uint8_t *dst_addr);
+void batadv_schedule_bat_ogm(struct hard_iface *hard_iface);
+int batadv_add_bcast_packet_to_list(struct bat_priv *bat_priv,
+ const struct sk_buff *skb,
+ unsigned long delay);
+void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work);
+void batadv_purge_outstanding_packets(struct bat_priv *bat_priv,
+ const struct hard_iface *hard_iface);
#endif /* _NET_BATMAN_ADV_SEND_H_ */
bcast_packet->seqno =
htonl(atomic_inc_return(&bat_priv->bcast_seqno));
- add_bcast_packet_to_list(bat_priv, skb, 1);
+ batadv_add_bcast_packet_to_list(bat_priv, skb, 1);
/* a copy is stored in the bcast list, therefore removing
* the original skb. */
batadv_inc_counter(bat_priv, BAT_CNT_TT_REQUEST_TX);
- send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
+ batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
ret = 0;
out:
batadv_inc_counter(bat_priv, BAT_CNT_TT_RESPONSE_TX);
- send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
+ batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
ret = true;
goto out;
batadv_inc_counter(bat_priv, BAT_CNT_TT_RESPONSE_TX);
- send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
+ batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
ret = true;
goto out;
batadv_inc_counter(bat_priv, BAT_CNT_TT_ROAM_ADV_TX);
- send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
+ batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
ret = 0;
out:
frag1->seqno = htons(seqno - 1);
frag2->seqno = htons(seqno);
- send_skb_packet(skb, hard_iface, dstaddr);
- send_skb_packet(frag_skb, hard_iface, dstaddr);
+ batadv_send_skb_packet(skb, hard_iface, dstaddr);
+ batadv_send_skb_packet(frag_skb, hard_iface, dstaddr);
ret = NET_RX_SUCCESS;
goto out;
goto out;
}
- send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
+ batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
ret = 0;
goto out;
skb = skb_clone(info->skb_packet, GFP_ATOMIC);
if (skb)
- send_skb_packet(skb, hard_iface, dstaddr);
+ batadv_send_skb_packet(skb, hard_iface,
+ dstaddr);
}
rcu_read_unlock();
skb = skb_clone(info->skb_packet, GFP_ATOMIC);
if (skb)
- send_skb_packet(skb, router->if_incoming, router->addr);
+ batadv_send_skb_packet(skb, router->if_incoming, router->addr);
out:
if (router)