[GFS2] Eliminate gl_req_bh
This patch further reduces the memory needs of GFS2 by eliminating the gl_req_bh variable from struct gfs2_glock. Signed-off-by: Bob Peterson <rpeterso@redhat.com> Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
This commit is contained in:
parent
110acf3837
commit
ab0d756681
107
fs/gfs2/glock.c
107
fs/gfs2/glock.c
@ -338,7 +338,6 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
|
|||||||
gl->gl_ip = 0;
|
gl->gl_ip = 0;
|
||||||
gl->gl_ops = glops;
|
gl->gl_ops = glops;
|
||||||
gl->gl_req_gh = NULL;
|
gl->gl_req_gh = NULL;
|
||||||
gl->gl_req_bh = NULL;
|
|
||||||
gl->gl_vn = 0;
|
gl->gl_vn = 0;
|
||||||
gl->gl_stamp = jiffies;
|
gl->gl_stamp = jiffies;
|
||||||
gl->gl_tchange = jiffies;
|
gl->gl_tchange = jiffies;
|
||||||
@ -743,6 +742,50 @@ static void state_change(struct gfs2_glock *gl, unsigned int new_state)
|
|||||||
gl->gl_tchange = jiffies;
|
gl->gl_tchange = jiffies;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* drop_bh - Called after a lock module unlock completes
|
||||||
|
* @gl: the glock
|
||||||
|
* @ret: the return status
|
||||||
|
*
|
||||||
|
* Doesn't wake up the process waiting on the struct gfs2_holder (if any)
|
||||||
|
* Doesn't drop the reference on the glock the top half took out
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
|
||||||
|
{
|
||||||
|
struct gfs2_sbd *sdp = gl->gl_sbd;
|
||||||
|
const struct gfs2_glock_operations *glops = gl->gl_ops;
|
||||||
|
struct gfs2_holder *gh = gl->gl_req_gh;
|
||||||
|
|
||||||
|
gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
|
||||||
|
gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
|
||||||
|
gfs2_assert_warn(sdp, !ret);
|
||||||
|
|
||||||
|
state_change(gl, LM_ST_UNLOCKED);
|
||||||
|
|
||||||
|
if (glops->go_inval)
|
||||||
|
glops->go_inval(gl, DIO_METADATA);
|
||||||
|
|
||||||
|
if (gh) {
|
||||||
|
spin_lock(&gl->gl_spin);
|
||||||
|
list_del_init(&gh->gh_list);
|
||||||
|
gh->gh_error = 0;
|
||||||
|
spin_unlock(&gl->gl_spin);
|
||||||
|
}
|
||||||
|
|
||||||
|
spin_lock(&gl->gl_spin);
|
||||||
|
gfs2_demote_wake(gl);
|
||||||
|
gl->gl_req_gh = NULL;
|
||||||
|
clear_bit(GLF_LOCK, &gl->gl_flags);
|
||||||
|
spin_unlock(&gl->gl_spin);
|
||||||
|
|
||||||
|
gfs2_glock_put(gl);
|
||||||
|
|
||||||
|
if (gh)
|
||||||
|
gfs2_holder_wake(gh);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* xmote_bh - Called after the lock module is done acquiring a lock
|
* xmote_bh - Called after the lock module is done acquiring a lock
|
||||||
* @gl: The glock in question
|
* @gl: The glock in question
|
||||||
@ -758,6 +801,11 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
|
|||||||
int prev_state = gl->gl_state;
|
int prev_state = gl->gl_state;
|
||||||
int op_done = 1;
|
int op_done = 1;
|
||||||
|
|
||||||
|
if ((ret & LM_OUT_ST_MASK) == LM_ST_UNLOCKED) {
|
||||||
|
drop_bh(gl, ret);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
|
gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
|
||||||
gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
|
gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
|
||||||
gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC));
|
gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC));
|
||||||
@ -783,7 +831,6 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
|
|||||||
} else {
|
} else {
|
||||||
spin_lock(&gl->gl_spin);
|
spin_lock(&gl->gl_spin);
|
||||||
if (gl->gl_state != gl->gl_demote_state) {
|
if (gl->gl_state != gl->gl_demote_state) {
|
||||||
gl->gl_req_bh = NULL;
|
|
||||||
spin_unlock(&gl->gl_spin);
|
spin_unlock(&gl->gl_spin);
|
||||||
gfs2_glock_drop_th(gl);
|
gfs2_glock_drop_th(gl);
|
||||||
gfs2_glock_put(gl);
|
gfs2_glock_put(gl);
|
||||||
@ -825,7 +872,6 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
|
|||||||
if (op_done) {
|
if (op_done) {
|
||||||
spin_lock(&gl->gl_spin);
|
spin_lock(&gl->gl_spin);
|
||||||
gl->gl_req_gh = NULL;
|
gl->gl_req_gh = NULL;
|
||||||
gl->gl_req_bh = NULL;
|
|
||||||
clear_bit(GLF_LOCK, &gl->gl_flags);
|
clear_bit(GLF_LOCK, &gl->gl_flags);
|
||||||
spin_unlock(&gl->gl_spin);
|
spin_unlock(&gl->gl_spin);
|
||||||
}
|
}
|
||||||
@ -864,7 +910,6 @@ static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh)
|
|||||||
gfs2_assert_warn(sdp, state != gl->gl_state);
|
gfs2_assert_warn(sdp, state != gl->gl_state);
|
||||||
|
|
||||||
gfs2_glock_hold(gl);
|
gfs2_glock_hold(gl);
|
||||||
gl->gl_req_bh = xmote_bh;
|
|
||||||
|
|
||||||
lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state, lck_flags);
|
lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state, lck_flags);
|
||||||
|
|
||||||
@ -877,51 +922,6 @@ static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh)
|
|||||||
xmote_bh(gl, lck_ret);
|
xmote_bh(gl, lck_ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* drop_bh - Called after a lock module unlock completes
|
|
||||||
* @gl: the glock
|
|
||||||
* @ret: the return status
|
|
||||||
*
|
|
||||||
* Doesn't wake up the process waiting on the struct gfs2_holder (if any)
|
|
||||||
* Doesn't drop the reference on the glock the top half took out
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
|
|
||||||
{
|
|
||||||
struct gfs2_sbd *sdp = gl->gl_sbd;
|
|
||||||
const struct gfs2_glock_operations *glops = gl->gl_ops;
|
|
||||||
struct gfs2_holder *gh = gl->gl_req_gh;
|
|
||||||
|
|
||||||
gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
|
|
||||||
gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
|
|
||||||
gfs2_assert_warn(sdp, !ret);
|
|
||||||
|
|
||||||
state_change(gl, LM_ST_UNLOCKED);
|
|
||||||
|
|
||||||
if (glops->go_inval)
|
|
||||||
glops->go_inval(gl, DIO_METADATA);
|
|
||||||
|
|
||||||
if (gh) {
|
|
||||||
spin_lock(&gl->gl_spin);
|
|
||||||
list_del_init(&gh->gh_list);
|
|
||||||
gh->gh_error = 0;
|
|
||||||
spin_unlock(&gl->gl_spin);
|
|
||||||
}
|
|
||||||
|
|
||||||
spin_lock(&gl->gl_spin);
|
|
||||||
gfs2_demote_wake(gl);
|
|
||||||
gl->gl_req_gh = NULL;
|
|
||||||
gl->gl_req_bh = NULL;
|
|
||||||
clear_bit(GLF_LOCK, &gl->gl_flags);
|
|
||||||
spin_unlock(&gl->gl_spin);
|
|
||||||
|
|
||||||
gfs2_glock_put(gl);
|
|
||||||
|
|
||||||
if (gh)
|
|
||||||
gfs2_holder_wake(gh);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* gfs2_glock_drop_th - call into the lock module to unlock a lock
|
* gfs2_glock_drop_th - call into the lock module to unlock a lock
|
||||||
* @gl: the glock
|
* @gl: the glock
|
||||||
@ -942,7 +942,6 @@ static void gfs2_glock_drop_th(struct gfs2_glock *gl)
|
|||||||
gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED);
|
gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED);
|
||||||
|
|
||||||
gfs2_glock_hold(gl);
|
gfs2_glock_hold(gl);
|
||||||
gl->gl_req_bh = drop_bh;
|
|
||||||
|
|
||||||
ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state);
|
ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state);
|
||||||
|
|
||||||
@ -971,8 +970,7 @@ static void do_cancels(struct gfs2_holder *gh)
|
|||||||
while (gl->gl_req_gh != gh &&
|
while (gl->gl_req_gh != gh &&
|
||||||
!test_bit(HIF_HOLDER, &gh->gh_iflags) &&
|
!test_bit(HIF_HOLDER, &gh->gh_iflags) &&
|
||||||
!list_empty(&gh->gh_list)) {
|
!list_empty(&gh->gh_list)) {
|
||||||
if (gl->gl_req_bh && !(gl->gl_req_gh &&
|
if (!(gl->gl_req_gh && (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) {
|
||||||
(gl->gl_req_gh->gh_flags & GL_NOCANCEL))) {
|
|
||||||
spin_unlock(&gl->gl_spin);
|
spin_unlock(&gl->gl_spin);
|
||||||
gfs2_lm_cancel(gl->gl_sbd, gl->gl_lock);
|
gfs2_lm_cancel(gl->gl_sbd, gl->gl_lock);
|
||||||
msleep(100);
|
msleep(100);
|
||||||
@ -1042,7 +1040,6 @@ static int glock_wait_internal(struct gfs2_holder *gh)
|
|||||||
|
|
||||||
spin_lock(&gl->gl_spin);
|
spin_lock(&gl->gl_spin);
|
||||||
gl->gl_req_gh = NULL;
|
gl->gl_req_gh = NULL;
|
||||||
gl->gl_req_bh = NULL;
|
|
||||||
clear_bit(GLF_LOCK, &gl->gl_flags);
|
clear_bit(GLF_LOCK, &gl->gl_flags);
|
||||||
run_queue(gl);
|
run_queue(gl);
|
||||||
spin_unlock(&gl->gl_spin);
|
spin_unlock(&gl->gl_spin);
|
||||||
@ -1535,8 +1532,7 @@ void gfs2_glock_cb(void *cb_data, unsigned int type, void *data)
|
|||||||
gl = gfs2_glock_find(sdp, &async->lc_name);
|
gl = gfs2_glock_find(sdp, &async->lc_name);
|
||||||
if (gfs2_assert_warn(sdp, gl))
|
if (gfs2_assert_warn(sdp, gl))
|
||||||
return;
|
return;
|
||||||
if (!gfs2_assert_warn(sdp, gl->gl_req_bh))
|
xmote_bh(gl, async->lc_ret);
|
||||||
gl->gl_req_bh(gl, async->lc_ret);
|
|
||||||
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
|
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
|
||||||
gfs2_glock_put(gl);
|
gfs2_glock_put(gl);
|
||||||
up_read(&gfs2_umount_flush_sem);
|
up_read(&gfs2_umount_flush_sem);
|
||||||
@ -1898,7 +1894,6 @@ static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl)
|
|||||||
print_dbg(gi, " gl_owner = -1\n");
|
print_dbg(gi, " gl_owner = -1\n");
|
||||||
print_dbg(gi, " gl_ip = %lu\n", gl->gl_ip);
|
print_dbg(gi, " gl_ip = %lu\n", gl->gl_ip);
|
||||||
print_dbg(gi, " req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no");
|
print_dbg(gi, " req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no");
|
||||||
print_dbg(gi, " req_bh = %s\n", (gl->gl_req_bh) ? "yes" : "no");
|
|
||||||
print_dbg(gi, " lvb_count = %d\n", atomic_read(&gl->gl_lvb_count));
|
print_dbg(gi, " lvb_count = %d\n", atomic_read(&gl->gl_lvb_count));
|
||||||
print_dbg(gi, " object = %s\n", (gl->gl_object) ? "yes" : "no");
|
print_dbg(gi, " object = %s\n", (gl->gl_object) ? "yes" : "no");
|
||||||
print_dbg(gi, " reclaim = %s\n",
|
print_dbg(gi, " reclaim = %s\n",
|
||||||
|
@ -191,7 +191,6 @@ struct gfs2_glock {
|
|||||||
const struct gfs2_glock_operations *gl_ops;
|
const struct gfs2_glock_operations *gl_ops;
|
||||||
|
|
||||||
struct gfs2_holder *gl_req_gh;
|
struct gfs2_holder *gl_req_gh;
|
||||||
gfs2_glop_bh_t gl_req_bh;
|
|
||||||
|
|
||||||
void *gl_lock;
|
void *gl_lock;
|
||||||
char *gl_lvb;
|
char *gl_lvb;
|
||||||
|
Loading…
Reference in New Issue
Block a user