forked from luck/tmp_suning_uos_patched
A fix for a potential stall on umount caused by the MDS dropping
our REQUEST_CLOSE message. The code that handled this case was inadvertently disabled in 5.9, this patch removes it entirely and fixes the problem in a way that is consistent with ceph-fuse. -----BEGIN PGP SIGNATURE----- iQFHBAABCAAxFiEEydHwtzie9C7TfviiSn/eOAIR84sFAl+lookTHGlkcnlvbW92 QGdtYWlsLmNvbQAKCRBKf944AhHziwFGB/43MBB+nG4wBHM58GybIi2NkS/TMmd5 5D3GPmchWYbE1d3hzAJmAYUUZCIx8kh0TeWjPZzR0iEok+f9Zf8bjrGDEFRWWOZc OL+PMfZckhVS2W8dUkx9CsypnA9/Rx2i7y/XKDDiC3eumfkDMktTdSS4UNxZT5cg ElmfozOPdv7fRGNPZJiQnkgWdMRFkqiGsdL+9wgRP4qc8WOkipoFouw+gJ2lN3vK odcY3UGcmx4iuiBj0uXjiFy/MtdYuNLjJrtMmkkEBklxGgIP/1dTOMnV3ktMMYkT gRUNM7fz/HeZIXb1N6jFs2S/ai1uuS6wP7aTHGfi8W2xgQA5ukDLIbu/ =Tbrl -----END PGP SIGNATURE----- Merge tag 'ceph-for-5.10-rc3' of git://github.com/ceph/ceph-client Pull ceph fix from Ilya Dryomov: "A fix for a potential stall on umount caused by the MDS dropping our REQUEST_CLOSE message. The code that handled this case was inadvertently disabled in 5.9, this patch removes it entirely and fixes the problem in a way that is consistent with ceph-fuse" * tag 'ceph-for-5.10-rc3' of git://github.com/ceph/ceph-client: ceph: check session state after bumping session->s_seq
This commit is contained in:
commit
659caaf65d
|
@ -4074,7 +4074,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
|
|||
vino.snap, inode);
|
||||
|
||||
mutex_lock(&session->s_mutex);
|
||||
session->s_seq++;
|
||||
inc_session_sequence(session);
|
||||
dout(" mds%d seq %lld cap seq %u\n", session->s_mds, session->s_seq,
|
||||
(unsigned)seq);
|
||||
|
||||
|
|
|
@ -4231,7 +4231,7 @@ static void handle_lease(struct ceph_mds_client *mdsc,
|
|||
dname.len, dname.name);
|
||||
|
||||
mutex_lock(&session->s_mutex);
|
||||
session->s_seq++;
|
||||
inc_session_sequence(session);
|
||||
|
||||
if (!inode) {
|
||||
dout("handle_lease no inode %llx\n", vino.ino);
|
||||
|
@ -4385,28 +4385,48 @@ static void maybe_recover_session(struct ceph_mds_client *mdsc)
|
|||
|
||||
bool check_session_state(struct ceph_mds_session *s)
|
||||
{
|
||||
if (s->s_state == CEPH_MDS_SESSION_CLOSING) {
|
||||
dout("resending session close request for mds%d\n",
|
||||
s->s_mds);
|
||||
request_close_session(s);
|
||||
return false;
|
||||
}
|
||||
if (s->s_ttl && time_after(jiffies, s->s_ttl)) {
|
||||
if (s->s_state == CEPH_MDS_SESSION_OPEN) {
|
||||
switch (s->s_state) {
|
||||
case CEPH_MDS_SESSION_OPEN:
|
||||
if (s->s_ttl && time_after(jiffies, s->s_ttl)) {
|
||||
s->s_state = CEPH_MDS_SESSION_HUNG;
|
||||
pr_info("mds%d hung\n", s->s_mds);
|
||||
}
|
||||
}
|
||||
if (s->s_state == CEPH_MDS_SESSION_NEW ||
|
||||
s->s_state == CEPH_MDS_SESSION_RESTARTING ||
|
||||
s->s_state == CEPH_MDS_SESSION_CLOSED ||
|
||||
s->s_state == CEPH_MDS_SESSION_REJECTED)
|
||||
/* this mds is failed or recovering, just wait */
|
||||
break;
|
||||
case CEPH_MDS_SESSION_CLOSING:
|
||||
/* Should never reach this when we're unmounting */
|
||||
WARN_ON_ONCE(true);
|
||||
fallthrough;
|
||||
case CEPH_MDS_SESSION_NEW:
|
||||
case CEPH_MDS_SESSION_RESTARTING:
|
||||
case CEPH_MDS_SESSION_CLOSED:
|
||||
case CEPH_MDS_SESSION_REJECTED:
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the sequence is incremented while we're waiting on a REQUEST_CLOSE reply,
|
||||
* then we need to retransmit that request.
|
||||
*/
|
||||
void inc_session_sequence(struct ceph_mds_session *s)
|
||||
{
|
||||
lockdep_assert_held(&s->s_mutex);
|
||||
|
||||
s->s_seq++;
|
||||
|
||||
if (s->s_state == CEPH_MDS_SESSION_CLOSING) {
|
||||
int ret;
|
||||
|
||||
dout("resending session close request for mds%d\n", s->s_mds);
|
||||
ret = request_close_session(s);
|
||||
if (ret < 0)
|
||||
pr_err("unable to close session to mds%d: %d\n",
|
||||
s->s_mds, ret);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* delayed work -- periodically trim expired leases, renew caps with mds
|
||||
*/
|
||||
|
|
|
@ -480,6 +480,7 @@ struct ceph_mds_client {
|
|||
extern const char *ceph_mds_op_name(int op);
|
||||
|
||||
extern bool check_session_state(struct ceph_mds_session *s);
|
||||
void inc_session_sequence(struct ceph_mds_session *s);
|
||||
|
||||
extern struct ceph_mds_session *
|
||||
__ceph_lookup_mds_session(struct ceph_mds_client *, int mds);
|
||||
|
|
|
@ -53,7 +53,7 @@ void ceph_handle_quota(struct ceph_mds_client *mdsc,
|
|||
|
||||
/* increment msg sequence number */
|
||||
mutex_lock(&session->s_mutex);
|
||||
session->s_seq++;
|
||||
inc_session_sequence(session);
|
||||
mutex_unlock(&session->s_mutex);
|
||||
|
||||
/* lookup inode */
|
||||
|
|
|
@ -873,7 +873,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
|
|||
ceph_snap_op_name(op), split, trace_len);
|
||||
|
||||
mutex_lock(&session->s_mutex);
|
||||
session->s_seq++;
|
||||
inc_session_sequence(session);
|
||||
mutex_unlock(&session->s_mutex);
|
||||
|
||||
down_write(&mdsc->snap_rwsem);
|
||||
|
|
Loading…
Reference in New Issue
Block a user