7b36e8eef9
It might be possible that 2 threads access the same data in the same rcu grace period. The first thread calls call_rcu() to decrement the refcount and free the data while the second thread increases the refcount to use the data. To avoid this race condition all refcount operations have to be atomic. Reported-by: Sven Eckelmann <sven@narfation.org> Signed-off-by: Marek Lindner <lindner_marek@yahoo.de>
518 lines
13 KiB
C
518 lines
13 KiB
C
/*
|
|
* Copyright (C) 2009-2011 B.A.T.M.A.N. contributors:
|
|
*
|
|
* Marek Lindner
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of version 2 of the GNU General Public
|
|
* License as published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope that it will be useful, but
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
* General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program; if not, write to the Free Software
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
|
* 02110-1301, USA
|
|
*
|
|
*/
|
|
|
|
#include "main.h"
|
|
#include "gateway_client.h"
|
|
#include "gateway_common.h"
|
|
#include "hard-interface.h"
|
|
#include <linux/ip.h>
|
|
#include <linux/ipv6.h>
|
|
#include <linux/udp.h>
|
|
#include <linux/if_vlan.h>
|
|
|
|
static void gw_node_free_rcu(struct rcu_head *rcu)
|
|
{
|
|
struct gw_node *gw_node;
|
|
|
|
gw_node = container_of(rcu, struct gw_node, rcu);
|
|
kfree(gw_node);
|
|
}
|
|
|
|
static void gw_node_free_ref(struct gw_node *gw_node)
|
|
{
|
|
if (atomic_dec_and_test(&gw_node->refcount))
|
|
call_rcu(&gw_node->rcu, gw_node_free_rcu);
|
|
}
|
|
|
|
void *gw_get_selected(struct bat_priv *bat_priv)
|
|
{
|
|
struct gw_node *curr_gateway_tmp;
|
|
struct orig_node *orig_node = NULL;
|
|
|
|
rcu_read_lock();
|
|
curr_gateway_tmp = rcu_dereference(bat_priv->curr_gw);
|
|
if (!curr_gateway_tmp)
|
|
goto out;
|
|
|
|
orig_node = curr_gateway_tmp->orig_node;
|
|
if (!orig_node)
|
|
goto out;
|
|
|
|
if (!atomic_inc_not_zero(&orig_node->refcount))
|
|
orig_node = NULL;
|
|
|
|
out:
|
|
rcu_read_unlock();
|
|
return orig_node;
|
|
}
|
|
|
|
void gw_deselect(struct bat_priv *bat_priv)
|
|
{
|
|
struct gw_node *gw_node;
|
|
|
|
spin_lock_bh(&bat_priv->gw_list_lock);
|
|
gw_node = rcu_dereference(bat_priv->curr_gw);
|
|
rcu_assign_pointer(bat_priv->curr_gw, NULL);
|
|
spin_unlock_bh(&bat_priv->gw_list_lock);
|
|
|
|
if (gw_node)
|
|
gw_node_free_ref(gw_node);
|
|
}
|
|
|
|
static void gw_select(struct bat_priv *bat_priv, struct gw_node *new_gw_node)
|
|
{
|
|
struct gw_node *curr_gw_node;
|
|
|
|
if (new_gw_node && !atomic_inc_not_zero(&new_gw_node->refcount))
|
|
new_gw_node = NULL;
|
|
|
|
spin_lock_bh(&bat_priv->gw_list_lock);
|
|
curr_gw_node = rcu_dereference(bat_priv->curr_gw);
|
|
rcu_assign_pointer(bat_priv->curr_gw, new_gw_node);
|
|
spin_unlock_bh(&bat_priv->gw_list_lock);
|
|
|
|
if (curr_gw_node)
|
|
gw_node_free_ref(curr_gw_node);
|
|
}
|
|
|
|
void gw_election(struct bat_priv *bat_priv)
|
|
{
|
|
struct hlist_node *node;
|
|
struct gw_node *gw_node, *curr_gw, *curr_gw_tmp = NULL;
|
|
uint8_t max_tq = 0;
|
|
uint32_t max_gw_factor = 0, tmp_gw_factor = 0;
|
|
int down, up;
|
|
|
|
/**
|
|
* The batman daemon checks here if we already passed a full originator
|
|
* cycle in order to make sure we don't choose the first gateway we
|
|
* hear about. This check is based on the daemon's uptime which we
|
|
* don't have.
|
|
**/
|
|
if (atomic_read(&bat_priv->gw_mode) != GW_MODE_CLIENT)
|
|
return;
|
|
|
|
rcu_read_lock();
|
|
curr_gw = rcu_dereference(bat_priv->curr_gw);
|
|
if (curr_gw) {
|
|
rcu_read_unlock();
|
|
return;
|
|
}
|
|
|
|
if (hlist_empty(&bat_priv->gw_list)) {
|
|
|
|
if (curr_gw) {
|
|
rcu_read_unlock();
|
|
bat_dbg(DBG_BATMAN, bat_priv,
|
|
"Removing selected gateway - "
|
|
"no gateway in range\n");
|
|
gw_deselect(bat_priv);
|
|
} else
|
|
rcu_read_unlock();
|
|
|
|
return;
|
|
}
|
|
|
|
hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
|
|
if (!gw_node->orig_node->router)
|
|
continue;
|
|
|
|
if (gw_node->deleted)
|
|
continue;
|
|
|
|
switch (atomic_read(&bat_priv->gw_sel_class)) {
|
|
case 1: /* fast connection */
|
|
gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags,
|
|
&down, &up);
|
|
|
|
tmp_gw_factor = (gw_node->orig_node->router->tq_avg *
|
|
gw_node->orig_node->router->tq_avg *
|
|
down * 100 * 100) /
|
|
(TQ_LOCAL_WINDOW_SIZE *
|
|
TQ_LOCAL_WINDOW_SIZE * 64);
|
|
|
|
if ((tmp_gw_factor > max_gw_factor) ||
|
|
((tmp_gw_factor == max_gw_factor) &&
|
|
(gw_node->orig_node->router->tq_avg > max_tq)))
|
|
curr_gw_tmp = gw_node;
|
|
break;
|
|
|
|
default: /**
|
|
* 2: stable connection (use best statistic)
|
|
* 3: fast-switch (use best statistic but change as
|
|
* soon as a better gateway appears)
|
|
* XX: late-switch (use best statistic but change as
|
|
* soon as a better gateway appears which has
|
|
* $routing_class more tq points)
|
|
**/
|
|
if (gw_node->orig_node->router->tq_avg > max_tq)
|
|
curr_gw_tmp = gw_node;
|
|
break;
|
|
}
|
|
|
|
if (gw_node->orig_node->router->tq_avg > max_tq)
|
|
max_tq = gw_node->orig_node->router->tq_avg;
|
|
|
|
if (tmp_gw_factor > max_gw_factor)
|
|
max_gw_factor = tmp_gw_factor;
|
|
}
|
|
|
|
if (curr_gw != curr_gw_tmp) {
|
|
if ((curr_gw) && (!curr_gw_tmp))
|
|
bat_dbg(DBG_BATMAN, bat_priv,
|
|
"Removing selected gateway - "
|
|
"no gateway in range\n");
|
|
else if ((!curr_gw) && (curr_gw_tmp))
|
|
bat_dbg(DBG_BATMAN, bat_priv,
|
|
"Adding route to gateway %pM "
|
|
"(gw_flags: %i, tq: %i)\n",
|
|
curr_gw_tmp->orig_node->orig,
|
|
curr_gw_tmp->orig_node->gw_flags,
|
|
curr_gw_tmp->orig_node->router->tq_avg);
|
|
else
|
|
bat_dbg(DBG_BATMAN, bat_priv,
|
|
"Changing route to gateway %pM "
|
|
"(gw_flags: %i, tq: %i)\n",
|
|
curr_gw_tmp->orig_node->orig,
|
|
curr_gw_tmp->orig_node->gw_flags,
|
|
curr_gw_tmp->orig_node->router->tq_avg);
|
|
|
|
gw_select(bat_priv, curr_gw_tmp);
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
}
|
|
|
|
void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node)
|
|
{
|
|
struct gw_node *curr_gateway_tmp;
|
|
uint8_t gw_tq_avg, orig_tq_avg;
|
|
|
|
rcu_read_lock();
|
|
curr_gateway_tmp = rcu_dereference(bat_priv->curr_gw);
|
|
if (!curr_gateway_tmp)
|
|
goto out_rcu;
|
|
|
|
if (!curr_gateway_tmp->orig_node)
|
|
goto deselect_rcu;
|
|
|
|
if (!curr_gateway_tmp->orig_node->router)
|
|
goto deselect_rcu;
|
|
|
|
/* this node already is the gateway */
|
|
if (curr_gateway_tmp->orig_node == orig_node)
|
|
goto out_rcu;
|
|
|
|
if (!orig_node->router)
|
|
goto out_rcu;
|
|
|
|
gw_tq_avg = curr_gateway_tmp->orig_node->router->tq_avg;
|
|
rcu_read_unlock();
|
|
|
|
orig_tq_avg = orig_node->router->tq_avg;
|
|
|
|
/* the TQ value has to be better */
|
|
if (orig_tq_avg < gw_tq_avg)
|
|
goto out;
|
|
|
|
/**
|
|
* if the routing class is greater than 3 the value tells us how much
|
|
* greater the TQ value of the new gateway must be
|
|
**/
|
|
if ((atomic_read(&bat_priv->gw_sel_class) > 3) &&
|
|
(orig_tq_avg - gw_tq_avg < atomic_read(&bat_priv->gw_sel_class)))
|
|
goto out;
|
|
|
|
bat_dbg(DBG_BATMAN, bat_priv,
|
|
"Restarting gateway selection: better gateway found (tq curr: "
|
|
"%i, tq new: %i)\n",
|
|
gw_tq_avg, orig_tq_avg);
|
|
goto deselect;
|
|
|
|
out_rcu:
|
|
rcu_read_unlock();
|
|
goto out;
|
|
deselect_rcu:
|
|
rcu_read_unlock();
|
|
deselect:
|
|
gw_deselect(bat_priv);
|
|
out:
|
|
return;
|
|
}
|
|
|
|
static void gw_node_add(struct bat_priv *bat_priv,
|
|
struct orig_node *orig_node, uint8_t new_gwflags)
|
|
{
|
|
struct gw_node *gw_node;
|
|
int down, up;
|
|
|
|
gw_node = kmalloc(sizeof(struct gw_node), GFP_ATOMIC);
|
|
if (!gw_node)
|
|
return;
|
|
|
|
memset(gw_node, 0, sizeof(struct gw_node));
|
|
INIT_HLIST_NODE(&gw_node->list);
|
|
gw_node->orig_node = orig_node;
|
|
atomic_set(&gw_node->refcount, 1);
|
|
|
|
spin_lock_bh(&bat_priv->gw_list_lock);
|
|
hlist_add_head_rcu(&gw_node->list, &bat_priv->gw_list);
|
|
spin_unlock_bh(&bat_priv->gw_list_lock);
|
|
|
|
gw_bandwidth_to_kbit(new_gwflags, &down, &up);
|
|
bat_dbg(DBG_BATMAN, bat_priv,
|
|
"Found new gateway %pM -> gw_class: %i - %i%s/%i%s\n",
|
|
orig_node->orig, new_gwflags,
|
|
(down > 2048 ? down / 1024 : down),
|
|
(down > 2048 ? "MBit" : "KBit"),
|
|
(up > 2048 ? up / 1024 : up),
|
|
(up > 2048 ? "MBit" : "KBit"));
|
|
}
|
|
|
|
void gw_node_update(struct bat_priv *bat_priv,
|
|
struct orig_node *orig_node, uint8_t new_gwflags)
|
|
{
|
|
struct hlist_node *node;
|
|
struct gw_node *gw_node;
|
|
|
|
rcu_read_lock();
|
|
hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
|
|
if (gw_node->orig_node != orig_node)
|
|
continue;
|
|
|
|
bat_dbg(DBG_BATMAN, bat_priv,
|
|
"Gateway class of originator %pM changed from "
|
|
"%i to %i\n",
|
|
orig_node->orig, gw_node->orig_node->gw_flags,
|
|
new_gwflags);
|
|
|
|
gw_node->deleted = 0;
|
|
|
|
if (new_gwflags == 0) {
|
|
gw_node->deleted = jiffies;
|
|
bat_dbg(DBG_BATMAN, bat_priv,
|
|
"Gateway %pM removed from gateway list\n",
|
|
orig_node->orig);
|
|
|
|
if (gw_node == rcu_dereference(bat_priv->curr_gw)) {
|
|
rcu_read_unlock();
|
|
gw_deselect(bat_priv);
|
|
return;
|
|
}
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
return;
|
|
}
|
|
rcu_read_unlock();
|
|
|
|
if (new_gwflags == 0)
|
|
return;
|
|
|
|
gw_node_add(bat_priv, orig_node, new_gwflags);
|
|
}
|
|
|
|
void gw_node_delete(struct bat_priv *bat_priv, struct orig_node *orig_node)
|
|
{
|
|
return gw_node_update(bat_priv, orig_node, 0);
|
|
}
|
|
|
|
void gw_node_purge(struct bat_priv *bat_priv)
|
|
{
|
|
struct gw_node *gw_node;
|
|
struct hlist_node *node, *node_tmp;
|
|
unsigned long timeout = 2 * PURGE_TIMEOUT * HZ;
|
|
|
|
spin_lock_bh(&bat_priv->gw_list_lock);
|
|
|
|
hlist_for_each_entry_safe(gw_node, node, node_tmp,
|
|
&bat_priv->gw_list, list) {
|
|
if (((!gw_node->deleted) ||
|
|
(time_before(jiffies, gw_node->deleted + timeout))) &&
|
|
atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE)
|
|
continue;
|
|
|
|
if (rcu_dereference(bat_priv->curr_gw) == gw_node)
|
|
gw_deselect(bat_priv);
|
|
|
|
hlist_del_rcu(&gw_node->list);
|
|
gw_node_free_ref(gw_node);
|
|
}
|
|
|
|
|
|
spin_unlock_bh(&bat_priv->gw_list_lock);
|
|
}
|
|
|
|
static int _write_buffer_text(struct bat_priv *bat_priv,
|
|
struct seq_file *seq, struct gw_node *gw_node)
|
|
{
|
|
struct gw_node *curr_gw;
|
|
int down, up, ret;
|
|
|
|
gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags, &down, &up);
|
|
|
|
rcu_read_lock();
|
|
curr_gw = rcu_dereference(bat_priv->curr_gw);
|
|
|
|
ret = seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %3i - %i%s/%i%s\n",
|
|
(curr_gw == gw_node ? "=>" : " "),
|
|
gw_node->orig_node->orig,
|
|
gw_node->orig_node->router->tq_avg,
|
|
gw_node->orig_node->router->addr,
|
|
gw_node->orig_node->router->if_incoming->net_dev->name,
|
|
gw_node->orig_node->gw_flags,
|
|
(down > 2048 ? down / 1024 : down),
|
|
(down > 2048 ? "MBit" : "KBit"),
|
|
(up > 2048 ? up / 1024 : up),
|
|
(up > 2048 ? "MBit" : "KBit"));
|
|
|
|
rcu_read_unlock();
|
|
return ret;
|
|
}
|
|
|
|
int gw_client_seq_print_text(struct seq_file *seq, void *offset)
|
|
{
|
|
struct net_device *net_dev = (struct net_device *)seq->private;
|
|
struct bat_priv *bat_priv = netdev_priv(net_dev);
|
|
struct gw_node *gw_node;
|
|
struct hlist_node *node;
|
|
int gw_count = 0;
|
|
|
|
if (!bat_priv->primary_if) {
|
|
|
|
return seq_printf(seq, "BATMAN mesh %s disabled - please "
|
|
"specify interfaces to enable it\n",
|
|
net_dev->name);
|
|
}
|
|
|
|
if (bat_priv->primary_if->if_status != IF_ACTIVE) {
|
|
|
|
return seq_printf(seq, "BATMAN mesh %s disabled - "
|
|
"primary interface not active\n",
|
|
net_dev->name);
|
|
}
|
|
|
|
seq_printf(seq, " %-12s (%s/%i) %17s [%10s]: gw_class ... "
|
|
"[B.A.T.M.A.N. adv %s%s, MainIF/MAC: %s/%pM (%s)]\n",
|
|
"Gateway", "#", TQ_MAX_VALUE, "Nexthop",
|
|
"outgoingIF", SOURCE_VERSION, REVISION_VERSION_STR,
|
|
bat_priv->primary_if->net_dev->name,
|
|
bat_priv->primary_if->net_dev->dev_addr, net_dev->name);
|
|
|
|
rcu_read_lock();
|
|
hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
|
|
if (gw_node->deleted)
|
|
continue;
|
|
|
|
if (!gw_node->orig_node->router)
|
|
continue;
|
|
|
|
_write_buffer_text(bat_priv, seq, gw_node);
|
|
gw_count++;
|
|
}
|
|
rcu_read_unlock();
|
|
|
|
if (gw_count == 0)
|
|
seq_printf(seq, "No gateways in range ...\n");
|
|
|
|
return 0;
|
|
}
|
|
|
|
int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb)
|
|
{
|
|
struct ethhdr *ethhdr;
|
|
struct iphdr *iphdr;
|
|
struct ipv6hdr *ipv6hdr;
|
|
struct udphdr *udphdr;
|
|
unsigned int header_len = 0;
|
|
|
|
if (atomic_read(&bat_priv->gw_mode) == GW_MODE_OFF)
|
|
return 0;
|
|
|
|
/* check for ethernet header */
|
|
if (!pskb_may_pull(skb, header_len + ETH_HLEN))
|
|
return 0;
|
|
ethhdr = (struct ethhdr *)skb->data;
|
|
header_len += ETH_HLEN;
|
|
|
|
/* check for initial vlan header */
|
|
if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) {
|
|
if (!pskb_may_pull(skb, header_len + VLAN_HLEN))
|
|
return 0;
|
|
ethhdr = (struct ethhdr *)(skb->data + VLAN_HLEN);
|
|
header_len += VLAN_HLEN;
|
|
}
|
|
|
|
/* check for ip header */
|
|
switch (ntohs(ethhdr->h_proto)) {
|
|
case ETH_P_IP:
|
|
if (!pskb_may_pull(skb, header_len + sizeof(struct iphdr)))
|
|
return 0;
|
|
iphdr = (struct iphdr *)(skb->data + header_len);
|
|
header_len += iphdr->ihl * 4;
|
|
|
|
/* check for udp header */
|
|
if (iphdr->protocol != IPPROTO_UDP)
|
|
return 0;
|
|
|
|
break;
|
|
case ETH_P_IPV6:
|
|
if (!pskb_may_pull(skb, header_len + sizeof(struct ipv6hdr)))
|
|
return 0;
|
|
ipv6hdr = (struct ipv6hdr *)(skb->data + header_len);
|
|
header_len += sizeof(struct ipv6hdr);
|
|
|
|
/* check for udp header */
|
|
if (ipv6hdr->nexthdr != IPPROTO_UDP)
|
|
return 0;
|
|
|
|
break;
|
|
default:
|
|
return 0;
|
|
}
|
|
|
|
if (!pskb_may_pull(skb, header_len + sizeof(struct udphdr)))
|
|
return 0;
|
|
udphdr = (struct udphdr *)(skb->data + header_len);
|
|
header_len += sizeof(struct udphdr);
|
|
|
|
/* check for bootp port */
|
|
if ((ntohs(ethhdr->h_proto) == ETH_P_IP) &&
|
|
(ntohs(udphdr->dest) != 67))
|
|
return 0;
|
|
|
|
if ((ntohs(ethhdr->h_proto) == ETH_P_IPV6) &&
|
|
(ntohs(udphdr->dest) != 547))
|
|
return 0;
|
|
|
|
if (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER)
|
|
return -1;
|
|
|
|
rcu_read_lock();
|
|
if (!rcu_dereference(bat_priv->curr_gw)) {
|
|
rcu_read_unlock();
|
|
return 0;
|
|
}
|
|
rcu_read_unlock();
|
|
|
|
return 1;
|
|
}
|