forked from luck/tmp_suning_uos_patched
dm cache policy smq: allocate cache blocks in order
Previously, cache blocks were being allocated in reverse order. Fix this by pulling the block off the head of the free list. Shouldn't have any impact on performance or latency but it is more correct to have the cache blocks allocated/mapped in ascending order. This fix will slightly increase the chances of two adjacent oblocks being in adjacent cblocks. Signed-off-by: Joe Thornber <ejt@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
This commit is contained in:
parent
8ee18ede74
commit
9768a10dd3
|
@ -213,6 +213,19 @@ static void l_del(struct entry_space *es, struct ilist *l, struct entry *e)
|
|||
l->nr_elts--;
|
||||
}
|
||||
|
||||
static struct entry *l_pop_head(struct entry_space *es, struct ilist *l)
|
||||
{
|
||||
struct entry *e;
|
||||
|
||||
for (e = l_head(es, l); e; e = l_next(es, e))
|
||||
if (!e->sentinel) {
|
||||
l_del(es, l, e);
|
||||
return e;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct entry *l_pop_tail(struct entry_space *es, struct ilist *l)
|
||||
{
|
||||
struct entry *e;
|
||||
|
@ -719,7 +732,7 @@ static struct entry *alloc_entry(struct entry_alloc *ea)
|
|||
if (l_empty(&ea->free))
|
||||
return NULL;
|
||||
|
||||
e = l_pop_tail(ea->es, &ea->free);
|
||||
e = l_pop_head(ea->es, &ea->free);
|
||||
init_entry(e);
|
||||
ea->nr_allocated++;
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user