forked from luck/tmp_suning_uos_patched
net: openvswitch: add masks cache hit counter
Add a counter that counts the number of masks cache hits, and export it through the megaflow netlink statistics. Reviewed-by: Paolo Abeni <pabeni@redhat.com> Reviewed-by: Tonghao Zhang <xiangxia.m.yue@gmail.com> Signed-off-by: Eelco Chaudron <echaudro@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
d6526926de
commit
9d2f627b7e
|
@ -102,8 +102,8 @@ struct ovs_dp_megaflow_stats {
|
|||
__u64 n_mask_hit; /* Number of masks used for flow lookups. */
|
||||
__u32 n_masks; /* Number of masks for the datapath. */
|
||||
__u32 pad0; /* Pad for future expension. */
|
||||
__u64 n_cache_hit; /* Number of cache matches for flow lookups. */
|
||||
__u64 pad1; /* Pad for future expension. */
|
||||
__u64 pad2; /* Pad for future expension. */
|
||||
};
|
||||
|
||||
struct ovs_vport_stats {
|
||||
|
|
|
@ -225,13 +225,14 @@ void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key)
|
|||
struct dp_stats_percpu *stats;
|
||||
u64 *stats_counter;
|
||||
u32 n_mask_hit;
|
||||
u32 n_cache_hit;
|
||||
int error;
|
||||
|
||||
stats = this_cpu_ptr(dp->stats_percpu);
|
||||
|
||||
/* Look up flow. */
|
||||
flow = ovs_flow_tbl_lookup_stats(&dp->table, key, skb_get_hash(skb),
|
||||
&n_mask_hit);
|
||||
&n_mask_hit, &n_cache_hit);
|
||||
if (unlikely(!flow)) {
|
||||
struct dp_upcall_info upcall;
|
||||
|
||||
|
@ -262,6 +263,7 @@ void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key)
|
|||
u64_stats_update_begin(&stats->syncp);
|
||||
(*stats_counter)++;
|
||||
stats->n_mask_hit += n_mask_hit;
|
||||
stats->n_cache_hit += n_cache_hit;
|
||||
u64_stats_update_end(&stats->syncp);
|
||||
}
|
||||
|
||||
|
@ -699,6 +701,7 @@ static void get_dp_stats(const struct datapath *dp, struct ovs_dp_stats *stats,
|
|||
stats->n_missed += local_stats.n_missed;
|
||||
stats->n_lost += local_stats.n_lost;
|
||||
mega_stats->n_mask_hit += local_stats.n_mask_hit;
|
||||
mega_stats->n_cache_hit += local_stats.n_cache_hit;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -38,12 +38,15 @@
|
|||
* @n_mask_hit: Number of masks looked up for flow match.
|
||||
* @n_mask_hit / (@n_hit + @n_missed) will be the average masks looked
|
||||
* up per packet.
|
||||
* @n_cache_hit: The number of received packets that had their mask found using
|
||||
* the mask cache.
|
||||
*/
|
||||
struct dp_stats_percpu {
|
||||
u64 n_hit;
|
||||
u64 n_missed;
|
||||
u64 n_lost;
|
||||
u64 n_mask_hit;
|
||||
u64 n_cache_hit;
|
||||
struct u64_stats_sync syncp;
|
||||
};
|
||||
|
||||
|
|
|
@ -667,6 +667,7 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl,
|
|||
struct mask_array *ma,
|
||||
const struct sw_flow_key *key,
|
||||
u32 *n_mask_hit,
|
||||
u32 *n_cache_hit,
|
||||
u32 *index)
|
||||
{
|
||||
u64 *usage_counters = this_cpu_ptr(ma->masks_usage_cntr);
|
||||
|
@ -682,6 +683,7 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl,
|
|||
u64_stats_update_begin(&ma->syncp);
|
||||
usage_counters[*index]++;
|
||||
u64_stats_update_end(&ma->syncp);
|
||||
(*n_cache_hit)++;
|
||||
return flow;
|
||||
}
|
||||
}
|
||||
|
@ -719,7 +721,8 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl,
|
|||
struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
|
||||
const struct sw_flow_key *key,
|
||||
u32 skb_hash,
|
||||
u32 *n_mask_hit)
|
||||
u32 *n_mask_hit,
|
||||
u32 *n_cache_hit)
|
||||
{
|
||||
struct mask_array *ma = rcu_dereference(tbl->mask_array);
|
||||
struct table_instance *ti = rcu_dereference(tbl->ti);
|
||||
|
@ -729,10 +732,13 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
|
|||
int seg;
|
||||
|
||||
*n_mask_hit = 0;
|
||||
*n_cache_hit = 0;
|
||||
if (unlikely(!skb_hash)) {
|
||||
u32 mask_index = 0;
|
||||
u32 cache = 0;
|
||||
|
||||
return flow_lookup(tbl, ti, ma, key, n_mask_hit, &mask_index);
|
||||
return flow_lookup(tbl, ti, ma, key, n_mask_hit, &cache,
|
||||
&mask_index);
|
||||
}
|
||||
|
||||
/* Pre and post recirulation flows usually have the same skb_hash
|
||||
|
@ -753,7 +759,7 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
|
|||
e = &entries[index];
|
||||
if (e->skb_hash == skb_hash) {
|
||||
flow = flow_lookup(tbl, ti, ma, key, n_mask_hit,
|
||||
&e->mask_index);
|
||||
n_cache_hit, &e->mask_index);
|
||||
if (!flow)
|
||||
e->skb_hash = 0;
|
||||
return flow;
|
||||
|
@ -766,10 +772,12 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
|
|||
}
|
||||
|
||||
/* Cache miss, do full lookup. */
|
||||
flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, &ce->mask_index);
|
||||
flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, n_cache_hit,
|
||||
&ce->mask_index);
|
||||
if (flow)
|
||||
ce->skb_hash = skb_hash;
|
||||
|
||||
*n_cache_hit = 0;
|
||||
return flow;
|
||||
}
|
||||
|
||||
|
@ -779,9 +787,10 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
|
|||
struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
|
||||
struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array);
|
||||
u32 __always_unused n_mask_hit;
|
||||
u32 __always_unused n_cache_hit;
|
||||
u32 index = 0;
|
||||
|
||||
return flow_lookup(tbl, ti, ma, key, &n_mask_hit, &index);
|
||||
return flow_lookup(tbl, ti, ma, key, &n_mask_hit, &n_cache_hit, &index);
|
||||
}
|
||||
|
||||
struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
|
||||
|
|
|
@ -82,7 +82,8 @@ struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *table,
|
|||
struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *,
|
||||
const struct sw_flow_key *,
|
||||
u32 skb_hash,
|
||||
u32 *n_mask_hit);
|
||||
u32 *n_mask_hit,
|
||||
u32 *n_cache_hit);
|
||||
struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *,
|
||||
const struct sw_flow_key *);
|
||||
struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
|
||||
|
|
Loading…
Reference in New Issue
Block a user