[ 139/171] libceph: prevent the race of incoming work during teardown

From: Greg Kroah-Hartman
Date: Thu Nov 22 2012 - 17:43:14 EST


3.4-stable review patch. If anyone has any objections, please let me know.

------------------

From: Guanjun He <gjhe@xxxxxxxx>

(cherry picked from commit a2a3258417eb6a1799cf893350771428875a8287)

Add an atomic variable 'stopping' as flag in struct ceph_messenger,
set this flag to 1 in function ceph_destroy_client(), and add the condition code
in function ceph_data_ready() to test the flag value, if true(1), just return.

Signed-off-by: Guanjun He <gjhe@xxxxxxxx>
Reviewed-by: Sage Weil <sage@xxxxxxxxxxx>
Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>
---
include/linux/ceph/messenger.h | 1 +
net/ceph/ceph_common.c | 2 ++
net/ceph/messenger.c | 5 +++++
3 files changed, 8 insertions(+)

--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -50,6 +50,7 @@ struct ceph_messenger {
struct ceph_entity_inst inst; /* my name+address */
struct ceph_entity_addr my_enc_addr;

+ atomic_t stopping;
bool nocrc;

/*
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -495,6 +495,8 @@ void ceph_destroy_client(struct ceph_cli
{
dout("destroy_client %p\n", client);

+ atomic_set(&client->msgr.stopping, 1);
+
/* unmount */
ceph_osdc_stop(&client->osdc);

--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -254,6 +254,9 @@ static void con_sock_state_closed(struct
static void ceph_sock_data_ready(struct sock *sk, int count_unused)
{
struct ceph_connection *con = sk->sk_user_data;
+ if (atomic_read(&con->msgr->stopping)) {
+ return;
+ }

if (sk->sk_state != TCP_CLOSE_WAIT) {
dout("%s on %p state = %lu, queueing work\n", __func__,
@@ -2413,6 +2416,8 @@ void ceph_messenger_init(struct ceph_mes
encode_my_addr(msgr);
msgr->nocrc = nocrc;

+ atomic_set(&msgr->stopping, 0);
+
dout("%s %p\n", __func__, msgr);
}
EXPORT_SYMBOL(ceph_messenger_init);


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/