forked from luck/tmp_suning_uos_patched
dlm for 3.8
This set fixes some conditions in which value blocks are invalidated, and includes two trivial cleanups. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.12 (GNU/Linux) iQIcBAABAgAGBQJQx3nAAAoJEDgbc8f8gGmqfBgQAIw46bVtZxxK/DcyGMR5UCtV ARLHieFbse3fJNkIXOD96G7Psk/dDgjulclewccXcdgu+VGyXQ1g1YJG9/L0Sv17 mRL+WlceXWWZ9LuUwRicBNerwd3MBLGndEV78fFweopV7FNYBF7qTWzywTLHCGmf iB5/jdSLhPzj4ele+BA1XUHqQYOiDEmbLlw8sSNU3kxiOCO/lqWlQLd1t+YoOUI/ 8YPOfmPFxu0SbBmvoTlr53w+gvDpoTV1AdVJO2Pe7yuIAAUWcMN1NHTfyL3ua3K9 LPh/eSltcKLdS7wjcNoufL5CEPsaTnmO28MZdHO+S3JG2T7glhBo6j/c3v1JO4rV MpBFu1Blm2CeWzmC8tTzwyK/mLRrfjue/4PV11rYUcaBIl/NwaapnUXF8doS81EX jDgTX7flZa4ykv6f/yca8aJJdIRQtpS4AsmKMigL8TN1JQd22e3LOr30JFkLy7D5 fzidJbhusbeD2kDsskXwMfyF5kUYXLdVQQqwM3BK8+YwjqyM9ReI5XHQWJrdJyzH u3q6HjO8Wb0e3al2Ay1BhYhkARBm+1vBjxc9fdNXuXdESUvrB5GBB2xrWVgv0Uu8 Dj/ml9hiLr2PcZ0yo+kqkLOpRVrTxQ03IBAaAjsraVjX7exFlEv5gsecP/3Ps7P3 yWxfHLapr/8Vf4itF7hn =Pb3H -----END PGP SIGNATURE----- Merge tag 'dlm-3.8' of git://git.kernel.org/pub/scm/linux/kernel/git/teigland/linux-dlm Pull dlm updates from David Teigland: "This set fixes some conditions in which value blocks are invalidated, and includes two trivial cleanups." * tag 'dlm-3.8' of git://git.kernel.org/pub/scm/linux/kernel/git/teigland/linux-dlm: dlm: fix lvb invalidation conditions fs/dlm: remove CONFIG_EXPERIMENTAL dlm: remove unused variable in *dlm_lowcomms_get_buffer()
This commit is contained in:
commit
22a40fd9a6
|
@ -1,6 +1,6 @@
|
|||
menuconfig DLM
|
||||
tristate "Distributed Lock Manager (DLM)"
|
||||
depends on EXPERIMENTAL && INET
|
||||
depends on INET
|
||||
depends on SYSFS && CONFIGFS_FS && (IPV6 || IPV6=n)
|
||||
select IP_SCTP
|
||||
help
|
||||
|
|
|
@ -337,6 +337,7 @@ enum rsb_flags {
|
|||
RSB_NEW_MASTER2,
|
||||
RSB_RECOVER_CONVERT,
|
||||
RSB_RECOVER_GRANT,
|
||||
RSB_RECOVER_LVB_INVAL,
|
||||
};
|
||||
|
||||
static inline void rsb_set_flag(struct dlm_rsb *r, enum rsb_flags flag)
|
||||
|
|
|
@ -5393,6 +5393,13 @@ static void purge_dead_list(struct dlm_ls *ls, struct dlm_rsb *r,
|
|||
if ((lkb->lkb_nodeid == nodeid_gone) ||
|
||||
dlm_is_removed(ls, lkb->lkb_nodeid)) {
|
||||
|
||||
/* tell recover_lvb to invalidate the lvb
|
||||
because a node holding EX/PW failed */
|
||||
if ((lkb->lkb_exflags & DLM_LKF_VALBLK) &&
|
||||
(lkb->lkb_grmode >= DLM_LOCK_PW)) {
|
||||
rsb_set_flag(r, RSB_RECOVER_LVB_INVAL);
|
||||
}
|
||||
|
||||
del_lkb(r, lkb);
|
||||
|
||||
/* this put should free the lkb */
|
||||
|
@ -6025,15 +6032,18 @@ static int orphan_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
|
|||
return error;
|
||||
}
|
||||
|
||||
/* The force flag allows the unlock to go ahead even if the lkb isn't granted.
|
||||
Regardless of what rsb queue the lock is on, it's removed and freed. */
|
||||
/* The FORCEUNLOCK flag allows the unlock to go ahead even if the lkb isn't
|
||||
granted. Regardless of what rsb queue the lock is on, it's removed and
|
||||
freed. The IVVALBLK flag causes the lvb on the resource to be invalidated
|
||||
if our lock is PW/EX (it's ignored if our granted mode is smaller.) */
|
||||
|
||||
static int unlock_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
|
||||
{
|
||||
struct dlm_args args;
|
||||
int error;
|
||||
|
||||
set_unlock_args(DLM_LKF_FORCEUNLOCK, lkb->lkb_ua, &args);
|
||||
set_unlock_args(DLM_LKF_FORCEUNLOCK | DLM_LKF_IVVALBLK,
|
||||
lkb->lkb_ua, &args);
|
||||
|
||||
error = unlock_lock(ls, lkb, &args);
|
||||
if (error == -DLM_EUNLOCK)
|
||||
|
|
|
@ -1385,7 +1385,6 @@ void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc)
|
|||
struct connection *con;
|
||||
struct writequeue_entry *e;
|
||||
int offset = 0;
|
||||
int users = 0;
|
||||
|
||||
con = nodeid2con(nodeid, allocation);
|
||||
if (!con)
|
||||
|
@ -1399,7 +1398,7 @@ void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc)
|
|||
} else {
|
||||
offset = e->end;
|
||||
e->end += len;
|
||||
users = e->users++;
|
||||
e->users++;
|
||||
}
|
||||
spin_unlock(&con->writequeue_lock);
|
||||
|
||||
|
@ -1414,7 +1413,7 @@ void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc)
|
|||
spin_lock(&con->writequeue_lock);
|
||||
offset = e->end;
|
||||
e->end += len;
|
||||
users = e->users++;
|
||||
e->users++;
|
||||
list_add_tail(&e->list, &con->writequeue);
|
||||
spin_unlock(&con->writequeue_lock);
|
||||
goto got_one;
|
||||
|
|
|
@ -717,8 +717,14 @@ void dlm_recovered_lock(struct dlm_rsb *r)
|
|||
* the VALNOTVALID flag if necessary, and determining the correct lvb contents
|
||||
* based on the lvb's of the locks held on the rsb.
|
||||
*
|
||||
* RSB_VALNOTVALID is set if there are only NL/CR locks on the rsb. If it
|
||||
* was already set prior to recovery, it's not cleared, regardless of locks.
|
||||
* RSB_VALNOTVALID is set in two cases:
|
||||
*
|
||||
* 1. we are master, but not new, and we purged an EX/PW lock held by a
|
||||
* failed node (in dlm_recover_purge which set RSB_RECOVER_LVB_INVAL)
|
||||
*
|
||||
* 2. we are a new master, and there are only NL/CR locks left.
|
||||
* (We could probably improve this by only invaliding in this way when
|
||||
* the previous master left uncleanly. VMS docs mention that.)
|
||||
*
|
||||
* The LVB contents are only considered for changing when this is a new master
|
||||
* of the rsb (NEW_MASTER2). Then, the rsb's lvb is taken from any lkb with
|
||||
|
@ -734,6 +740,19 @@ static void recover_lvb(struct dlm_rsb *r)
|
|||
int big_lock_exists = 0;
|
||||
int lvblen = r->res_ls->ls_lvblen;
|
||||
|
||||
if (!rsb_flag(r, RSB_NEW_MASTER2) &&
|
||||
rsb_flag(r, RSB_RECOVER_LVB_INVAL)) {
|
||||
/* case 1 above */
|
||||
rsb_set_flag(r, RSB_VALNOTVALID);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!rsb_flag(r, RSB_NEW_MASTER2))
|
||||
return;
|
||||
|
||||
/* we are the new master, so figure out if VALNOTVALID should
|
||||
be set, and set the rsb lvb from the best lkb available. */
|
||||
|
||||
list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) {
|
||||
if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
|
||||
continue;
|
||||
|
@ -772,13 +791,10 @@ static void recover_lvb(struct dlm_rsb *r)
|
|||
if (!lock_lvb_exists)
|
||||
goto out;
|
||||
|
||||
/* lvb is invalidated if only NL/CR locks remain */
|
||||
if (!big_lock_exists)
|
||||
rsb_set_flag(r, RSB_VALNOTVALID);
|
||||
|
||||
/* don't mess with the lvb unless we're the new master */
|
||||
if (!rsb_flag(r, RSB_NEW_MASTER2))
|
||||
goto out;
|
||||
|
||||
if (!r->res_lvbptr) {
|
||||
r->res_lvbptr = dlm_allocate_lvb(r->res_ls);
|
||||
if (!r->res_lvbptr)
|
||||
|
@ -852,12 +868,19 @@ void dlm_recover_rsbs(struct dlm_ls *ls)
|
|||
if (is_master(r)) {
|
||||
if (rsb_flag(r, RSB_RECOVER_CONVERT))
|
||||
recover_conversion(r);
|
||||
|
||||
/* recover lvb before granting locks so the updated
|
||||
lvb/VALNOTVALID is presented in the completion */
|
||||
recover_lvb(r);
|
||||
|
||||
if (rsb_flag(r, RSB_NEW_MASTER2))
|
||||
recover_grant(r);
|
||||
recover_lvb(r);
|
||||
count++;
|
||||
} else {
|
||||
rsb_clear_flag(r, RSB_VALNOTVALID);
|
||||
}
|
||||
rsb_clear_flag(r, RSB_RECOVER_CONVERT);
|
||||
rsb_clear_flag(r, RSB_RECOVER_LVB_INVAL);
|
||||
rsb_clear_flag(r, RSB_NEW_MASTER2);
|
||||
unlock_rsb(r);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue
Block a user