Merge branch 'drbd-8.4_ed6' into for-3.8-drivers-drbd-8.4_ed6
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index eb0cafe..0331ad0 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -48,17 +48,25 @@
#include "drbd_vli.h"
+struct packet_info {
+ enum drbd_packet cmd;
+ unsigned int size;
+ unsigned int vnr;
+ void *data;
+};
+
enum finish_epoch {
FE_STILL_LIVE,
FE_DESTROYED,
FE_RECYCLED,
};
-static int drbd_do_handshake(struct drbd_conf *mdev);
-static int drbd_do_auth(struct drbd_conf *mdev);
+static int drbd_do_features(struct drbd_tconn *tconn);
+static int drbd_do_auth(struct drbd_tconn *tconn);
+static int drbd_disconnected(struct drbd_conf *mdev);
-static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event);
-static int e_end_block(struct drbd_conf *, struct drbd_work *, int);
+static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *, struct drbd_epoch *, enum epoch_event);
+static int e_end_block(struct drbd_work *, int);
#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
@@ -142,11 +150,12 @@
*head = chain_first;
}
-static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int number)
+static struct page *__drbd_alloc_pages(struct drbd_conf *mdev,
+ unsigned int number)
{
struct page *page = NULL;
struct page *tmp = NULL;
- int i = 0;
+ unsigned int i = 0;
/* Yes, testing drbd_pp_vacant outside the lock is racy.
* So what. It saves a spin_lock. */
@@ -175,7 +184,7 @@
return page;
/* Not enough pages immediately available this time.
- * No need to jump around here, drbd_pp_alloc will retry this
+ * No need to jump around here, drbd_alloc_pages will retry this
* function "soon". */
if (page) {
tmp = page_chain_tail(page, NULL);
@@ -187,9 +196,10 @@
return NULL;
}
-static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed)
+static void reclaim_finished_net_peer_reqs(struct drbd_conf *mdev,
+ struct list_head *to_be_freed)
{
- struct drbd_epoch_entry *e;
+ struct drbd_peer_request *peer_req;
struct list_head *le, *tle;
/* The EEs are always appended to the end of the list. Since
@@ -198,8 +208,8 @@
stop to examine the list... */
list_for_each_safe(le, tle, &mdev->net_ee) {
- e = list_entry(le, struct drbd_epoch_entry, w.list);
- if (drbd_ee_has_active_page(e))
+ peer_req = list_entry(le, struct drbd_peer_request, w.list);
+ if (drbd_peer_req_has_active_page(peer_req))
break;
list_move(le, to_be_freed);
}
@@ -208,18 +218,18 @@
static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
{
LIST_HEAD(reclaimed);
- struct drbd_epoch_entry *e, *t;
+ struct drbd_peer_request *peer_req, *t;
- spin_lock_irq(&mdev->req_lock);
- reclaim_net_ee(mdev, &reclaimed);
- spin_unlock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
+ reclaim_finished_net_peer_reqs(mdev, &reclaimed);
+ spin_unlock_irq(&mdev->tconn->req_lock);
- list_for_each_entry_safe(e, t, &reclaimed, w.list)
- drbd_free_net_ee(mdev, e);
+ list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
+ drbd_free_net_peer_req(mdev, peer_req);
}
/**
- * drbd_pp_alloc() - Returns @number pages, retries forever (or until signalled)
+ * drbd_alloc_pages() - Returns @number pages, retries forever (or until signalled)
* @mdev: DRBD device.
* @number: number of pages requested
* @retry: whether to retry, if not enough pages are available right now
@@ -230,23 +240,31 @@
*
* Returns a page chain linked via page->private.
*/
-static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool retry)
+struct page *drbd_alloc_pages(struct drbd_conf *mdev, unsigned int number,
+ bool retry)
{
struct page *page = NULL;
+ struct net_conf *nc;
DEFINE_WAIT(wait);
+ int mxb;
/* Yes, we may run up to @number over max_buffers. If we
* follow it strictly, the admin will get it wrong anyways. */
- if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers)
- page = drbd_pp_first_pages_or_try_alloc(mdev, number);
+ rcu_read_lock();
+ nc = rcu_dereference(mdev->tconn->net_conf);
+ mxb = nc ? nc->max_buffers : 1000000;
+ rcu_read_unlock();
+
+ if (atomic_read(&mdev->pp_in_use) < mxb)
+ page = __drbd_alloc_pages(mdev, number);
while (page == NULL) {
prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
drbd_kick_lo_and_reclaim_net(mdev);
- if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) {
- page = drbd_pp_first_pages_or_try_alloc(mdev, number);
+ if (atomic_read(&mdev->pp_in_use) < mxb) {
+ page = __drbd_alloc_pages(mdev, number);
if (page)
break;
}
@@ -255,7 +273,7 @@
break;
if (signal_pending(current)) {
- dev_warn(DEV, "drbd_pp_alloc interrupted!\n");
+ dev_warn(DEV, "drbd_alloc_pages interrupted!\n");
break;
}
@@ -268,11 +286,11 @@
return page;
}
-/* Must not be used from irq, as that may deadlock: see drbd_pp_alloc.
- * Is also used from inside an other spin_lock_irq(&mdev->req_lock);
+/* Must not be used from irq, as that may deadlock: see drbd_alloc_pages.
+ * Is also used from inside an other spin_lock_irq(&mdev->tconn->req_lock);
* Either links the page chain back to the global pool,
* or returns all pages to the system. */
-static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net)
+static void drbd_free_pages(struct drbd_conf *mdev, struct page *page, int is_net)
{
atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
int i;
@@ -280,7 +298,7 @@
if (page == NULL)
return;
- if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE)*minor_count)
+ if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count)
i = page_chain_free(page);
else {
struct page *tmp;
@@ -302,127 +320,130 @@
_drbd_wait_ee_list_empty()
You must not have the req_lock:
- drbd_free_ee()
- drbd_alloc_ee()
- drbd_init_ee()
- drbd_release_ee()
+ drbd_free_peer_req()
+ drbd_alloc_peer_req()
+ drbd_free_peer_reqs()
drbd_ee_fix_bhs()
- drbd_process_done_ee()
+ drbd_finish_peer_reqs()
drbd_clear_done_ee()
drbd_wait_ee_list_empty()
*/
-struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
- u64 id,
- sector_t sector,
- unsigned int data_size,
- gfp_t gfp_mask) __must_hold(local)
+struct drbd_peer_request *
+drbd_alloc_peer_req(struct drbd_conf *mdev, u64 id, sector_t sector,
+ unsigned int data_size, gfp_t gfp_mask) __must_hold(local)
{
- struct drbd_epoch_entry *e;
+ struct drbd_peer_request *peer_req;
struct page *page = NULL;
unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE))
return NULL;
- e = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
- if (!e) {
+ peer_req = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
+ if (!peer_req) {
if (!(gfp_mask & __GFP_NOWARN))
- dev_err(DEV, "alloc_ee: Allocation of an EE failed\n");
+ dev_err(DEV, "%s: allocation failed\n", __func__);
return NULL;
}
if (data_size) {
- page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
+ page = drbd_alloc_pages(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
if (!page)
goto fail;
}
- INIT_HLIST_NODE(&e->collision);
- e->epoch = NULL;
- e->mdev = mdev;
- e->pages = page;
- atomic_set(&e->pending_bios, 0);
- e->size = data_size;
- e->flags = 0;
- e->sector = sector;
- e->block_id = id;
+ drbd_clear_interval(&peer_req->i);
+ peer_req->i.size = data_size;
+ peer_req->i.sector = sector;
+ peer_req->i.local = false;
+ peer_req->i.waiting = false;
- return e;
+ peer_req->epoch = NULL;
+ peer_req->w.mdev = mdev;
+ peer_req->pages = page;
+ atomic_set(&peer_req->pending_bios, 0);
+ peer_req->flags = 0;
+ /*
+ * The block_id is opaque to the receiver. It is not endianness
+ * converted, and sent back to the sender unchanged.
+ */
+ peer_req->block_id = id;
+
+ return peer_req;
fail:
- mempool_free(e, drbd_ee_mempool);
+ mempool_free(peer_req, drbd_ee_mempool);
return NULL;
}
-void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, int is_net)
+void __drbd_free_peer_req(struct drbd_conf *mdev, struct drbd_peer_request *peer_req,
+ int is_net)
{
- if (e->flags & EE_HAS_DIGEST)
- kfree(e->digest);
- drbd_pp_free(mdev, e->pages, is_net);
- D_ASSERT(atomic_read(&e->pending_bios) == 0);
- D_ASSERT(hlist_unhashed(&e->collision));
- mempool_free(e, drbd_ee_mempool);
+ if (peer_req->flags & EE_HAS_DIGEST)
+ kfree(peer_req->digest);
+ drbd_free_pages(mdev, peer_req->pages, is_net);
+ D_ASSERT(atomic_read(&peer_req->pending_bios) == 0);
+ D_ASSERT(drbd_interval_empty(&peer_req->i));
+ mempool_free(peer_req, drbd_ee_mempool);
}
-int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list)
+int drbd_free_peer_reqs(struct drbd_conf *mdev, struct list_head *list)
{
LIST_HEAD(work_list);
- struct drbd_epoch_entry *e, *t;
+ struct drbd_peer_request *peer_req, *t;
int count = 0;
int is_net = list == &mdev->net_ee;
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
list_splice_init(list, &work_list);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
- list_for_each_entry_safe(e, t, &work_list, w.list) {
- drbd_free_some_ee(mdev, e, is_net);
+ list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
+ __drbd_free_peer_req(mdev, peer_req, is_net);
count++;
}
return count;
}
-
/*
- * This function is called from _asender only_
- * but see also comments in _req_mod(,barrier_acked)
- * and receive_Barrier.
- *
- * Move entries from net_ee to done_ee, if ready.
- * Grab done_ee, call all callbacks, free the entries.
- * The callbacks typically send out ACKs.
+ * See also comments in _req_mod(,BARRIER_ACKED) and receive_Barrier.
*/
-static int drbd_process_done_ee(struct drbd_conf *mdev)
+static int drbd_finish_peer_reqs(struct drbd_conf *mdev)
{
LIST_HEAD(work_list);
LIST_HEAD(reclaimed);
- struct drbd_epoch_entry *e, *t;
- int ok = (mdev->state.conn >= C_WF_REPORT_PARAMS);
+ struct drbd_peer_request *peer_req, *t;
+ int err = 0;
- spin_lock_irq(&mdev->req_lock);
- reclaim_net_ee(mdev, &reclaimed);
+ spin_lock_irq(&mdev->tconn->req_lock);
+ reclaim_finished_net_peer_reqs(mdev, &reclaimed);
list_splice_init(&mdev->done_ee, &work_list);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
- list_for_each_entry_safe(e, t, &reclaimed, w.list)
- drbd_free_net_ee(mdev, e);
+ list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
+ drbd_free_net_peer_req(mdev, peer_req);
/* possible callbacks here:
- * e_end_block, and e_end_resync_block, e_send_discard_ack.
+ * e_end_block, and e_end_resync_block, e_send_superseded.
* all ignore the last argument.
*/
- list_for_each_entry_safe(e, t, &work_list, w.list) {
+ list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
+ int err2;
+
/* list_del not necessary, next/prev members not touched */
- ok = e->w.cb(mdev, &e->w, !ok) && ok;
- drbd_free_ee(mdev, e);
+ err2 = peer_req->w.cb(&peer_req->w, !!err);
+ if (!err)
+ err = err2;
+ drbd_free_peer_req(mdev, peer_req);
}
wake_up(&mdev->ee_wait);
- return ok;
+ return err;
}
-void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
+static void _drbd_wait_ee_list_empty(struct drbd_conf *mdev,
+ struct list_head *head)
{
DEFINE_WAIT(wait);
@@ -430,55 +451,22 @@
* and calling prepare_to_wait in the fast path */
while (!list_empty(head)) {
prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
io_schedule();
finish_wait(&mdev->ee_wait, &wait);
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
}
}
-void drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
+static void drbd_wait_ee_list_empty(struct drbd_conf *mdev,
+ struct list_head *head)
{
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
_drbd_wait_ee_list_empty(mdev, head);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
}
-/* see also kernel_accept; which is only present since 2.6.18.
- * also we want to log which part of it failed, exactly */
-static int drbd_accept(struct drbd_conf *mdev, const char **what,
- struct socket *sock, struct socket **newsock)
-{
- struct sock *sk = sock->sk;
- int err = 0;
-
- *what = "listen";
- err = sock->ops->listen(sock, 5);
- if (err < 0)
- goto out;
-
- *what = "sock_create_lite";
- err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol,
- newsock);
- if (err < 0)
- goto out;
-
- *what = "accept";
- err = sock->ops->accept(sock, *newsock, 0);
- if (err < 0) {
- sock_release(*newsock);
- *newsock = NULL;
- goto out;
- }
- (*newsock)->ops = sock->ops;
- __module_get((*newsock)->ops->owner);
-
-out:
- return err;
-}
-
-static int drbd_recv_short(struct drbd_conf *mdev, struct socket *sock,
- void *buf, size_t size, int flags)
+static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flags)
{
mm_segment_t oldfs;
struct kvec iov = {
@@ -500,48 +488,62 @@
return rv;
}
-static int drbd_recv(struct drbd_conf *mdev, void *buf, size_t size)
+static int drbd_recv(struct drbd_tconn *tconn, void *buf, size_t size)
{
- mm_segment_t oldfs;
- struct kvec iov = {
- .iov_base = buf,
- .iov_len = size,
- };
- struct msghdr msg = {
- .msg_iovlen = 1,
- .msg_iov = (struct iovec *)&iov,
- .msg_flags = MSG_WAITALL | MSG_NOSIGNAL
- };
int rv;
- oldfs = get_fs();
- set_fs(KERNEL_DS);
- rv = sock_recvmsg(mdev->data.socket, &msg, size, msg.msg_flags);
- set_fs(oldfs);
+ rv = drbd_recv_short(tconn->data.socket, buf, size, 0);
if (rv < 0) {
if (rv == -ECONNRESET)
- dev_info(DEV, "sock was reset by peer\n");
+ conn_info(tconn, "sock was reset by peer\n");
else if (rv != -ERESTARTSYS)
- dev_err(DEV, "sock_recvmsg returned %d\n", rv);
+ conn_err(tconn, "sock_recvmsg returned %d\n", rv);
} else if (rv == 0) {
- if (drbd_test_flag(mdev, DISCONNECT_SENT)) {
- long t; /* time_left */
- t = wait_event_timeout(mdev->state_wait, mdev->state.conn < C_CONNECTED,
- mdev->net_conf->ping_timeo * HZ/10);
+ if (test_bit(DISCONNECT_SENT, &tconn->flags)) {
+ long t;
+ rcu_read_lock();
+ t = rcu_dereference(tconn->net_conf)->ping_timeo * HZ/10;
+ rcu_read_unlock();
+
+ t = wait_event_timeout(tconn->ping_wait, tconn->cstate < C_WF_REPORT_PARAMS, t);
+
if (t)
goto out;
}
- dev_info(DEV, "sock was shut down by peer\n");
+ conn_info(tconn, "sock was shut down by peer\n");
}
if (rv != size)
- drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
+ conn_request_state(tconn, NS(conn, C_BROKEN_PIPE), CS_HARD);
out:
return rv;
}
+static int drbd_recv_all(struct drbd_tconn *tconn, void *buf, size_t size)
+{
+ int err;
+
+ err = drbd_recv(tconn, buf, size);
+ if (err != size) {
+ if (err >= 0)
+ err = -EIO;
+ } else
+ err = 0;
+ return err;
+}
+
+static int drbd_recv_all_warn(struct drbd_tconn *tconn, void *buf, size_t size)
+{
+ int err;
+
+ err = drbd_recv_all(tconn, buf, size);
+ if (err && !signal_pending(current))
+ conn_warn(tconn, "short read (expected size %d)\n", (int)size);
+ return err;
+}
+
/* quoting tcp(7):
* On individual connections, the socket buffer size must be set prior to the
* listen(2) or connect(2) calls in order to have it take effect.
@@ -561,29 +563,50 @@
}
}
-static struct socket *drbd_try_connect(struct drbd_conf *mdev)
+static struct socket *drbd_try_connect(struct drbd_tconn *tconn)
{
const char *what;
struct socket *sock;
struct sockaddr_in6 src_in6;
- int err;
+ struct sockaddr_in6 peer_in6;
+ struct net_conf *nc;
+ int err, peer_addr_len, my_addr_len;
+ int sndbuf_size, rcvbuf_size, connect_int;
int disconnect_on_error = 1;
- if (!get_net_conf(mdev))
+ rcu_read_lock();
+ nc = rcu_dereference(tconn->net_conf);
+ if (!nc) {
+ rcu_read_unlock();
return NULL;
+ }
+ sndbuf_size = nc->sndbuf_size;
+ rcvbuf_size = nc->rcvbuf_size;
+ connect_int = nc->connect_int;
+ rcu_read_unlock();
+
+ my_addr_len = min_t(int, tconn->my_addr_len, sizeof(src_in6));
+ memcpy(&src_in6, &tconn->my_addr, my_addr_len);
+
+ if (((struct sockaddr *)&tconn->my_addr)->sa_family == AF_INET6)
+ src_in6.sin6_port = 0;
+ else
+ ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
+
+ peer_addr_len = min_t(int, tconn->peer_addr_len, sizeof(src_in6));
+ memcpy(&peer_in6, &tconn->peer_addr, peer_addr_len);
what = "sock_create_kern";
- err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
- SOCK_STREAM, IPPROTO_TCP, &sock);
+ err = sock_create_kern(((struct sockaddr *)&src_in6)->sa_family,
+ SOCK_STREAM, IPPROTO_TCP, &sock);
if (err < 0) {
sock = NULL;
goto out;
}
sock->sk->sk_rcvtimeo =
- sock->sk->sk_sndtimeo = mdev->net_conf->try_connect_int*HZ;
- drbd_setbufsize(sock, mdev->net_conf->sndbuf_size,
- mdev->net_conf->rcvbuf_size);
+ sock->sk->sk_sndtimeo = connect_int * HZ;
+ drbd_setbufsize(sock, sndbuf_size, rcvbuf_size);
/* explicitly bind to the configured IP as source IP
* for the outgoing connections.
@@ -592,17 +615,8 @@
* Make sure to use 0 as port number, so linux selects
* a free one dynamically.
*/
- memcpy(&src_in6, mdev->net_conf->my_addr,
- min_t(int, mdev->net_conf->my_addr_len, sizeof(src_in6)));
- if (((struct sockaddr *)mdev->net_conf->my_addr)->sa_family == AF_INET6)
- src_in6.sin6_port = 0;
- else
- ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
-
what = "bind before connect";
- err = sock->ops->bind(sock,
- (struct sockaddr *) &src_in6,
- mdev->net_conf->my_addr_len);
+ err = sock->ops->bind(sock, (struct sockaddr *) &src_in6, my_addr_len);
if (err < 0)
goto out;
@@ -610,9 +624,7 @@
* stay C_WF_CONNECTION, don't go Disconnecting! */
disconnect_on_error = 0;
what = "connect";
- err = sock->ops->connect(sock,
- (struct sockaddr *)mdev->net_conf->peer_addr,
- mdev->net_conf->peer_addr_len, 0);
+ err = sock->ops->connect(sock, (struct sockaddr *) &peer_in6, peer_addr_len, 0);
out:
if (err < 0) {
@@ -630,91 +642,174 @@
disconnect_on_error = 0;
break;
default:
- dev_err(DEV, "%s failed, err = %d\n", what, err);
+ conn_err(tconn, "%s failed, err = %d\n", what, err);
}
if (disconnect_on_error)
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
+ conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
}
- put_net_conf(mdev);
+
return sock;
}
-static struct socket *drbd_wait_for_connect(struct drbd_conf *mdev)
+struct accept_wait_data {
+ struct drbd_tconn *tconn;
+ struct socket *s_listen;
+ struct completion door_bell;
+ void (*original_sk_state_change)(struct sock *sk);
+
+};
+
+static void drbd_incoming_connection(struct sock *sk)
{
- int timeo, err;
- struct socket *s_estab = NULL, *s_listen;
+ struct accept_wait_data *ad = sk->sk_user_data;
+ void (*state_change)(struct sock *sk);
+
+ state_change = ad->original_sk_state_change;
+ if (sk->sk_state == TCP_ESTABLISHED)
+ complete(&ad->door_bell);
+ state_change(sk);
+}
+
+static int prepare_listen_socket(struct drbd_tconn *tconn, struct accept_wait_data *ad)
+{
+ int err, sndbuf_size, rcvbuf_size, my_addr_len;
+ struct sockaddr_in6 my_addr;
+ struct socket *s_listen;
+ struct net_conf *nc;
const char *what;
- if (!get_net_conf(mdev))
- return NULL;
+ rcu_read_lock();
+ nc = rcu_dereference(tconn->net_conf);
+ if (!nc) {
+ rcu_read_unlock();
+ return -EIO;
+ }
+ sndbuf_size = nc->sndbuf_size;
+ rcvbuf_size = nc->rcvbuf_size;
+ rcu_read_unlock();
+
+ my_addr_len = min_t(int, tconn->my_addr_len, sizeof(struct sockaddr_in6));
+ memcpy(&my_addr, &tconn->my_addr, my_addr_len);
what = "sock_create_kern";
- err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
- SOCK_STREAM, IPPROTO_TCP, &s_listen);
+ err = sock_create_kern(((struct sockaddr *)&my_addr)->sa_family,
+ SOCK_STREAM, IPPROTO_TCP, &s_listen);
if (err) {
s_listen = NULL;
goto out;
}
- timeo = mdev->net_conf->try_connect_int * HZ;
- timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
-
- s_listen->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
- s_listen->sk->sk_rcvtimeo = timeo;
- s_listen->sk->sk_sndtimeo = timeo;
- drbd_setbufsize(s_listen, mdev->net_conf->sndbuf_size,
- mdev->net_conf->rcvbuf_size);
+ s_listen->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
+ drbd_setbufsize(s_listen, sndbuf_size, rcvbuf_size);
what = "bind before listen";
- err = s_listen->ops->bind(s_listen,
- (struct sockaddr *) mdev->net_conf->my_addr,
- mdev->net_conf->my_addr_len);
+ err = s_listen->ops->bind(s_listen, (struct sockaddr *)&my_addr, my_addr_len);
if (err < 0)
goto out;
- err = drbd_accept(mdev, &what, s_listen, &s_estab);
+ ad->s_listen = s_listen;
+ write_lock_bh(&s_listen->sk->sk_callback_lock);
+ ad->original_sk_state_change = s_listen->sk->sk_state_change;
+ s_listen->sk->sk_state_change = drbd_incoming_connection;
+ s_listen->sk->sk_user_data = ad;
+ write_unlock_bh(&s_listen->sk->sk_callback_lock);
+ what = "listen";
+ err = s_listen->ops->listen(s_listen, 5);
+ if (err < 0)
+ goto out;
+
+ return 0;
out:
if (s_listen)
sock_release(s_listen);
if (err < 0) {
if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
- dev_err(DEV, "%s failed, err = %d\n", what, err);
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
+ conn_err(tconn, "%s failed, err = %d\n", what, err);
+ conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
}
}
- put_net_conf(mdev);
+
+ return -EIO;
+}
+
+static void unregister_state_change(struct sock *sk, struct accept_wait_data *ad)
+{
+ write_lock_bh(&sk->sk_callback_lock);
+ sk->sk_state_change = ad->original_sk_state_change;
+ sk->sk_user_data = NULL;
+ write_unlock_bh(&sk->sk_callback_lock);
+}
+
+static struct socket *drbd_wait_for_connect(struct drbd_tconn *tconn, struct accept_wait_data *ad)
+{
+ int timeo, connect_int, err = 0;
+ struct socket *s_estab = NULL;
+ struct net_conf *nc;
+
+ rcu_read_lock();
+ nc = rcu_dereference(tconn->net_conf);
+ if (!nc) {
+ rcu_read_unlock();
+ return NULL;
+ }
+ connect_int = nc->connect_int;
+ rcu_read_unlock();
+
+ timeo = connect_int * HZ;
+ timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
+
+ err = wait_for_completion_interruptible_timeout(&ad->door_bell, timeo);
+ if (err <= 0)
+ return NULL;
+
+ err = kernel_accept(ad->s_listen, &s_estab, 0);
+ if (err < 0) {
+ if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
+ conn_err(tconn, "accept failed, err = %d\n", err);
+ conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
+ }
+ }
+
+ if (s_estab)
+ unregister_state_change(s_estab->sk, ad);
return s_estab;
}
-static int drbd_send_fp(struct drbd_conf *mdev,
- struct socket *sock, enum drbd_packets cmd)
-{
- struct p_header80 *h = &mdev->data.sbuf.header.h80;
+static int decode_header(struct drbd_tconn *, void *, struct packet_info *);
- return _drbd_send_cmd(mdev, sock, cmd, h, sizeof(*h), 0);
+static int send_first_packet(struct drbd_tconn *tconn, struct drbd_socket *sock,
+ enum drbd_packet cmd)
+{
+ if (!conn_prepare_command(tconn, sock))
+ return -EIO;
+ return conn_send_command(tconn, sock, cmd, 0, NULL, 0);
}
-static enum drbd_packets drbd_recv_fp(struct drbd_conf *mdev, struct socket *sock)
+static int receive_first_packet(struct drbd_tconn *tconn, struct socket *sock)
{
- struct p_header80 *h = &mdev->data.rbuf.header.h80;
- int rr;
+ unsigned int header_size = drbd_header_size(tconn);
+ struct packet_info pi;
+ int err;
- rr = drbd_recv_short(mdev, sock, h, sizeof(*h), 0);
-
- if (rr == sizeof(*h) && h->magic == BE_DRBD_MAGIC)
- return be16_to_cpu(h->command);
-
- return 0xffff;
+ err = drbd_recv_short(sock, tconn->data.rbuf, header_size, 0);
+ if (err != header_size) {
+ if (err >= 0)
+ err = -EIO;
+ return err;
+ }
+ err = decode_header(tconn, tconn->data.rbuf, &pi);
+ if (err)
+ return err;
+ return pi.cmd;
}
/**
* drbd_socket_okay() - Free the socket if its connection is not okay
- * @mdev: DRBD device.
* @sock: pointer to the pointer to the socket.
*/
-static int drbd_socket_okay(struct drbd_conf *mdev, struct socket **sock)
+static int drbd_socket_okay(struct socket **sock)
{
int rr;
char tb[4];
@@ -722,7 +817,7 @@
if (!*sock)
return false;
- rr = drbd_recv_short(mdev, *sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
+ rr = drbd_recv_short(*sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
if (rr > 0 || rr == -EAGAIN) {
return true;
@@ -732,6 +827,31 @@
return false;
}
}
+/* Gets called if a connection is established, or if a new minor gets created
+ in a connection */
+int drbd_connected(struct drbd_conf *mdev)
+{
+ int err;
+
+ atomic_set(&mdev->packet_seq, 0);
+ mdev->peer_seq = 0;
+
+ mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ?
+ &mdev->tconn->cstate_mutex :
+ &mdev->own_state_mutex;
+
+ err = drbd_send_sync_param(mdev);
+ if (!err)
+ err = drbd_send_sizes(mdev, 0, 0);
+ if (!err)
+ err = drbd_send_uuids(mdev);
+ if (!err)
+ err = drbd_send_current_state(mdev);
+ clear_bit(USE_DEGR_WFC_T, &mdev->flags);
+ clear_bit(RESIZE_PENDING, &mdev->flags);
+ mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */
+ return err;
+}
/*
* return values:
@@ -741,232 +861,305 @@
* no point in trying again, please go standalone.
* -2 We do not have a network config...
*/
-static int drbd_connect(struct drbd_conf *mdev)
+static int conn_connect(struct drbd_tconn *tconn)
{
- struct socket *s, *sock, *msock;
- int try, h, ok;
+ struct drbd_socket sock, msock;
+ struct drbd_conf *mdev;
+ struct net_conf *nc;
+ int vnr, timeout, h, ok;
+ bool discard_my_data;
enum drbd_state_rv rv;
+ struct accept_wait_data ad = {
+ .tconn = tconn,
+ .door_bell = COMPLETION_INITIALIZER_ONSTACK(ad.door_bell),
+ };
- D_ASSERT(!mdev->data.socket);
-
- drbd_clear_flag(mdev, DISCONNECT_SENT);
- if (drbd_request_state(mdev, NS(conn, C_WF_CONNECTION)) < SS_SUCCESS)
+ clear_bit(DISCONNECT_SENT, &tconn->flags);
+ if (conn_request_state(tconn, NS(conn, C_WF_CONNECTION), CS_VERBOSE) < SS_SUCCESS)
return -2;
- sock = NULL;
- msock = NULL;
+ mutex_init(&sock.mutex);
+ sock.sbuf = tconn->data.sbuf;
+ sock.rbuf = tconn->data.rbuf;
+ sock.socket = NULL;
+ mutex_init(&msock.mutex);
+ msock.sbuf = tconn->meta.sbuf;
+ msock.rbuf = tconn->meta.rbuf;
+ msock.socket = NULL;
+
+ /* Assume that the peer only understands protocol 80 until we know better. */
+ tconn->agreed_pro_version = 80;
+
+ if (prepare_listen_socket(tconn, &ad))
+ return 0;
do {
- for (try = 0;;) {
- /* 3 tries, this should take less than a second! */
- s = drbd_try_connect(mdev);
- if (s || ++try >= 3)
- break;
- /* give the other side time to call bind() & listen() */
- schedule_timeout_interruptible(HZ / 10);
- }
+ struct socket *s;
+ s = drbd_try_connect(tconn);
if (s) {
- if (!sock) {
- drbd_send_fp(mdev, s, P_HAND_SHAKE_S);
- sock = s;
- s = NULL;
- } else if (!msock) {
- drbd_clear_flag(mdev, DISCARD_CONCURRENT);
- drbd_send_fp(mdev, s, P_HAND_SHAKE_M);
- msock = s;
- s = NULL;
+ if (!sock.socket) {
+ sock.socket = s;
+ send_first_packet(tconn, &sock, P_INITIAL_DATA);
+ } else if (!msock.socket) {
+ clear_bit(RESOLVE_CONFLICTS, &tconn->flags);
+ msock.socket = s;
+ send_first_packet(tconn, &msock, P_INITIAL_META);
} else {
- dev_err(DEV, "Logic error in drbd_connect()\n");
+ conn_err(tconn, "Logic error in conn_connect()\n");
goto out_release_sockets;
}
}
- if (sock && msock) {
- schedule_timeout_interruptible(mdev->net_conf->ping_timeo*HZ/10);
- ok = drbd_socket_okay(mdev, &sock);
- ok = drbd_socket_okay(mdev, &msock) && ok;
+ if (sock.socket && msock.socket) {
+ rcu_read_lock();
+ nc = rcu_dereference(tconn->net_conf);
+ timeout = nc->ping_timeo * HZ / 10;
+ rcu_read_unlock();
+ schedule_timeout_interruptible(timeout);
+ ok = drbd_socket_okay(&sock.socket);
+ ok = drbd_socket_okay(&msock.socket) && ok;
if (ok)
break;
}
retry:
- s = drbd_wait_for_connect(mdev);
+ s = drbd_wait_for_connect(tconn, &ad);
if (s) {
- try = drbd_recv_fp(mdev, s);
- drbd_socket_okay(mdev, &sock);
- drbd_socket_okay(mdev, &msock);
- switch (try) {
- case P_HAND_SHAKE_S:
- if (sock) {
- dev_warn(DEV, "initial packet S crossed\n");
- sock_release(sock);
+ int fp = receive_first_packet(tconn, s);
+ drbd_socket_okay(&sock.socket);
+ drbd_socket_okay(&msock.socket);
+ switch (fp) {
+ case P_INITIAL_DATA:
+ if (sock.socket) {
+ conn_warn(tconn, "initial packet S crossed\n");
+ sock_release(sock.socket);
+ sock.socket = s;
+ goto randomize;
}
- sock = s;
+ sock.socket = s;
break;
- case P_HAND_SHAKE_M:
- if (msock) {
- dev_warn(DEV, "initial packet M crossed\n");
- sock_release(msock);
+ case P_INITIAL_META:
+ set_bit(RESOLVE_CONFLICTS, &tconn->flags);
+ if (msock.socket) {
+ conn_warn(tconn, "initial packet M crossed\n");
+ sock_release(msock.socket);
+ msock.socket = s;
+ goto randomize;
}
- msock = s;
- drbd_set_flag(mdev, DISCARD_CONCURRENT);
+ msock.socket = s;
break;
default:
- dev_warn(DEV, "Error receiving initial packet\n");
+ conn_warn(tconn, "Error receiving initial packet\n");
sock_release(s);
+randomize:
if (random32() & 1)
goto retry;
}
}
- if (mdev->state.conn <= C_DISCONNECTING)
+ if (tconn->cstate <= C_DISCONNECTING)
goto out_release_sockets;
if (signal_pending(current)) {
flush_signals(current);
smp_rmb();
- if (get_t_state(&mdev->receiver) == Exiting)
+ if (get_t_state(&tconn->receiver) == EXITING)
goto out_release_sockets;
}
- if (sock && msock) {
- ok = drbd_socket_okay(mdev, &sock);
- ok = drbd_socket_okay(mdev, &msock) && ok;
- if (ok)
- break;
- }
- } while (1);
+ ok = drbd_socket_okay(&sock.socket);
+ ok = drbd_socket_okay(&msock.socket) && ok;
+ } while (!ok);
- msock->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
- sock->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
+ if (ad.s_listen)
+ sock_release(ad.s_listen);
- sock->sk->sk_allocation = GFP_NOIO;
- msock->sk->sk_allocation = GFP_NOIO;
+ sock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
+ msock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
- sock->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
- msock->sk->sk_priority = TC_PRIO_INTERACTIVE;
+ sock.socket->sk->sk_allocation = GFP_NOIO;
+ msock.socket->sk->sk_allocation = GFP_NOIO;
+
+ sock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
+ msock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE;
/* NOT YET ...
- * sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
- * sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
- * first set it to the P_HAND_SHAKE timeout,
+ * sock.socket->sk->sk_sndtimeo = tconn->net_conf->timeout*HZ/10;
+ * sock.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
+ * first set it to the P_CONNECTION_FEATURES timeout,
* which we set to 4x the configured ping_timeout. */
- sock->sk->sk_sndtimeo =
- sock->sk->sk_rcvtimeo = mdev->net_conf->ping_timeo*4*HZ/10;
+ rcu_read_lock();
+ nc = rcu_dereference(tconn->net_conf);
- msock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
- msock->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
+ sock.socket->sk->sk_sndtimeo =
+ sock.socket->sk->sk_rcvtimeo = nc->ping_timeo*4*HZ/10;
+
+ msock.socket->sk->sk_rcvtimeo = nc->ping_int*HZ;
+ timeout = nc->timeout * HZ / 10;
+ discard_my_data = nc->discard_my_data;
+ rcu_read_unlock();
+
+ msock.socket->sk->sk_sndtimeo = timeout;
/* we don't want delays.
* we use TCP_CORK where appropriate, though */
- drbd_tcp_nodelay(sock);
- drbd_tcp_nodelay(msock);
+ drbd_tcp_nodelay(sock.socket);
+ drbd_tcp_nodelay(msock.socket);
- mdev->data.socket = sock;
- mdev->meta.socket = msock;
- mdev->last_received = jiffies;
+ tconn->data.socket = sock.socket;
+ tconn->meta.socket = msock.socket;
+ tconn->last_received = jiffies;
- D_ASSERT(mdev->asender.task == NULL);
-
- h = drbd_do_handshake(mdev);
+ h = drbd_do_features(tconn);
if (h <= 0)
return h;
- if (mdev->cram_hmac_tfm) {
+ if (tconn->cram_hmac_tfm) {
/* drbd_request_state(mdev, NS(conn, WFAuth)); */
- switch (drbd_do_auth(mdev)) {
+ switch (drbd_do_auth(tconn)) {
case -1:
- dev_err(DEV, "Authentication of peer failed\n");
+ conn_err(tconn, "Authentication of peer failed\n");
return -1;
case 0:
- dev_err(DEV, "Authentication of peer failed, trying again.\n");
+ conn_err(tconn, "Authentication of peer failed, trying again.\n");
return 0;
}
}
- sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
- sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
+ tconn->data.socket->sk->sk_sndtimeo = timeout;
+ tconn->data.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
- atomic_set(&mdev->packet_seq, 0);
- mdev->peer_seq = 0;
-
- if (drbd_send_protocol(mdev) == -1)
+ if (drbd_send_protocol(tconn) == -EOPNOTSUPP)
return -1;
- drbd_set_flag(mdev, STATE_SENT);
- drbd_send_sync_param(mdev, &mdev->sync_conf);
- drbd_send_sizes(mdev, 0, 0);
- drbd_send_uuids(mdev);
- drbd_send_current_state(mdev);
- drbd_clear_flag(mdev, USE_DEGR_WFC_T);
- drbd_clear_flag(mdev, RESIZE_PENDING);
- spin_lock_irq(&mdev->req_lock);
- rv = _drbd_set_state(_NS(mdev, conn, C_WF_REPORT_PARAMS), CS_VERBOSE, NULL);
- if (mdev->state.conn != C_WF_REPORT_PARAMS)
- drbd_clear_flag(mdev, STATE_SENT);
- spin_unlock_irq(&mdev->req_lock);
+ set_bit(STATE_SENT, &tconn->flags);
- if (rv < SS_SUCCESS)
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ kref_get(&mdev->kref);
+ rcu_read_unlock();
+
+ if (discard_my_data)
+ set_bit(DISCARD_MY_DATA, &mdev->flags);
+ else
+ clear_bit(DISCARD_MY_DATA, &mdev->flags);
+
+ drbd_connected(mdev);
+ kref_put(&mdev->kref, &drbd_minor_destroy);
+ rcu_read_lock();
+ }
+ rcu_read_unlock();
+
+ rv = conn_request_state(tconn, NS(conn, C_WF_REPORT_PARAMS), CS_VERBOSE);
+ if (rv < SS_SUCCESS) {
+ clear_bit(STATE_SENT, &tconn->flags);
return 0;
+ }
- drbd_thread_start(&mdev->asender);
- mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */
+ drbd_thread_start(&tconn->asender);
- return 1;
+ mutex_lock(&tconn->conf_update);
+ /* The discard_my_data flag is a single-shot modifier to the next
+ * connection attempt, the handshake of which is now well underway.
+ * No need for rcu style copying of the whole struct
+ * just to clear a single value. */
+ tconn->net_conf->discard_my_data = 0;
+ mutex_unlock(&tconn->conf_update);
+
+ return h;
out_release_sockets:
- if (sock)
- sock_release(sock);
- if (msock)
- sock_release(msock);
+ if (ad.s_listen)
+ sock_release(ad.s_listen);
+ if (sock.socket)
+ sock_release(sock.socket);
+ if (msock.socket)
+ sock_release(msock.socket);
return -1;
}
-static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsigned int *packet_size)
+static int decode_header(struct drbd_tconn *tconn, void *header, struct packet_info *pi)
{
- union p_header *h = &mdev->data.rbuf.header;
- int r;
+ unsigned int header_size = drbd_header_size(tconn);
- r = drbd_recv(mdev, h, sizeof(*h));
- if (unlikely(r != sizeof(*h))) {
- if (!signal_pending(current))
- dev_warn(DEV, "short read expecting header on sock: r=%d\n", r);
- return false;
- }
-
- if (likely(h->h80.magic == BE_DRBD_MAGIC)) {
- *cmd = be16_to_cpu(h->h80.command);
- *packet_size = be16_to_cpu(h->h80.length);
- } else if (h->h95.magic == BE_DRBD_MAGIC_BIG) {
- *cmd = be16_to_cpu(h->h95.command);
- *packet_size = be32_to_cpu(h->h95.length);
+ if (header_size == sizeof(struct p_header100) &&
+ *(__be32 *)header == cpu_to_be32(DRBD_MAGIC_100)) {
+ struct p_header100 *h = header;
+ if (h->pad != 0) {
+ conn_err(tconn, "Header padding is not zero\n");
+ return -EINVAL;
+ }
+ pi->vnr = be16_to_cpu(h->volume);
+ pi->cmd = be16_to_cpu(h->command);
+ pi->size = be32_to_cpu(h->length);
+ } else if (header_size == sizeof(struct p_header95) &&
+ *(__be16 *)header == cpu_to_be16(DRBD_MAGIC_BIG)) {
+ struct p_header95 *h = header;
+ pi->cmd = be16_to_cpu(h->command);
+ pi->size = be32_to_cpu(h->length);
+ pi->vnr = 0;
+ } else if (header_size == sizeof(struct p_header80) &&
+ *(__be32 *)header == cpu_to_be32(DRBD_MAGIC)) {
+ struct p_header80 *h = header;
+ pi->cmd = be16_to_cpu(h->command);
+ pi->size = be16_to_cpu(h->length);
+ pi->vnr = 0;
} else {
- dev_err(DEV, "magic?? on data m: 0x%08x c: %d l: %d\n",
- be32_to_cpu(h->h80.magic),
- be16_to_cpu(h->h80.command),
- be16_to_cpu(h->h80.length));
- return false;
+ conn_err(tconn, "Wrong magic value 0x%08x in protocol version %d\n",
+ be32_to_cpu(*(__be32 *)header),
+ tconn->agreed_pro_version);
+ return -EINVAL;
}
- mdev->last_received = jiffies;
-
- return true;
+ pi->data = header + header_size;
+ return 0;
}
-static void drbd_flush(struct drbd_conf *mdev)
+static int drbd_recv_header(struct drbd_tconn *tconn, struct packet_info *pi)
+{
+ void *buffer = tconn->data.rbuf;
+ int err;
+
+ err = drbd_recv_all_warn(tconn, buffer, drbd_header_size(tconn));
+ if (err)
+ return err;
+
+ err = decode_header(tconn, buffer, pi);
+ tconn->last_received = jiffies;
+
+ return err;
+}
+
+static void drbd_flush(struct drbd_tconn *tconn)
{
int rv;
+ struct drbd_conf *mdev;
+ int vnr;
- if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) {
- rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_NOIO,
- NULL);
- if (rv) {
- dev_info(DEV, "local disk flush failed with status %d\n", rv);
- /* would rather check on EOPNOTSUPP, but that is not reliable.
- * don't try again for ANY return value != 0
- * if (rv == -EOPNOTSUPP) */
- drbd_bump_write_ordering(mdev, WO_drain_io);
+ if (tconn->write_ordering >= WO_bdev_flush) {
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ if (!get_ldev(mdev))
+ continue;
+ kref_get(&mdev->kref);
+ rcu_read_unlock();
+
+ rv = blkdev_issue_flush(mdev->ldev->backing_bdev,
+ GFP_NOIO, NULL);
+ if (rv) {
+ dev_info(DEV, "local disk flush failed with status %d\n", rv);
+ /* would rather check on EOPNOTSUPP, but that is not reliable.
+ * don't try again for ANY return value != 0
+ * if (rv == -EOPNOTSUPP) */
+ drbd_bump_write_ordering(tconn, WO_drain_io);
+ }
+ put_ldev(mdev);
+ kref_put(&mdev->kref, &drbd_minor_destroy);
+
+ rcu_read_lock();
+ if (rv)
+ break;
}
- put_ldev(mdev);
+ rcu_read_unlock();
}
}
@@ -976,7 +1169,7 @@
* @epoch: Epoch object.
* @ev: Epoch event.
*/
-static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
+static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
struct drbd_epoch *epoch,
enum epoch_event ev)
{
@@ -984,7 +1177,7 @@
struct drbd_epoch *next_epoch;
enum finish_epoch rv = FE_STILL_LIVE;
- spin_lock(&mdev->epoch_lock);
+ spin_lock(&tconn->epoch_lock);
do {
next_epoch = NULL;
@@ -1006,18 +1199,22 @@
atomic_read(&epoch->active) == 0 &&
(test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) {
if (!(ev & EV_CLEANUP)) {
- spin_unlock(&mdev->epoch_lock);
- drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
- spin_lock(&mdev->epoch_lock);
+ spin_unlock(&tconn->epoch_lock);
+ drbd_send_b_ack(epoch->tconn, epoch->barrier_nr, epoch_size);
+ spin_lock(&tconn->epoch_lock);
}
+#if 0
+ /* FIXME: dec unacked on connection, once we have
+ * something to count pending connection packets in. */
if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags))
- dec_unacked(mdev);
+ dec_unacked(epoch->tconn);
+#endif
- if (mdev->current_epoch != epoch) {
+ if (tconn->current_epoch != epoch) {
next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
list_del(&epoch->list);
ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
- mdev->epochs--;
+ tconn->epochs--;
kfree(epoch);
if (rv == FE_STILL_LIVE)
@@ -1028,7 +1225,6 @@
/* atomic_set(&epoch->active, 0); is already zero */
if (rv == FE_STILL_LIVE)
rv = FE_RECYCLED;
- wake_up(&mdev->ee_wait);
}
}
@@ -1038,40 +1234,52 @@
epoch = next_epoch;
} while (1);
- spin_unlock(&mdev->epoch_lock);
+ spin_unlock(&tconn->epoch_lock);
return rv;
}
/**
* drbd_bump_write_ordering() - Fall back to an other write ordering method
- * @mdev: DRBD device.
+ * @tconn: DRBD connection.
* @wo: Write ordering method to try.
*/
-void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo) __must_hold(local)
+void drbd_bump_write_ordering(struct drbd_tconn *tconn, enum write_ordering_e wo)
{
+ struct disk_conf *dc;
+ struct drbd_conf *mdev;
enum write_ordering_e pwo;
+ int vnr;
static char *write_ordering_str[] = {
[WO_none] = "none",
[WO_drain_io] = "drain",
[WO_bdev_flush] = "flush",
};
- pwo = mdev->write_ordering;
+ pwo = tconn->write_ordering;
wo = min(pwo, wo);
- if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush)
- wo = WO_drain_io;
- if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain)
- wo = WO_none;
- mdev->write_ordering = wo;
- if (pwo != mdev->write_ordering || wo == WO_bdev_flush)
- dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]);
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ if (!get_ldev_if_state(mdev, D_ATTACHING))
+ continue;
+ dc = rcu_dereference(mdev->ldev->disk_conf);
+
+ if (wo == WO_bdev_flush && !dc->disk_flushes)
+ wo = WO_drain_io;
+ if (wo == WO_drain_io && !dc->disk_drain)
+ wo = WO_none;
+ put_ldev(mdev);
+ }
+ rcu_read_unlock();
+ tconn->write_ordering = wo;
+ if (pwo != tconn->write_ordering || wo == WO_bdev_flush)
+ conn_info(tconn, "Method to ensure write ordering: %s\n", write_ordering_str[tconn->write_ordering]);
}
/**
- * drbd_submit_ee()
+ * drbd_submit_peer_request()
* @mdev: DRBD device.
- * @e: epoch entry
+ * @peer_req: peer request
* @rw: flag field, see bio->bi_rw
*
* May spread the pages to multiple bios,
@@ -1085,14 +1293,15 @@
* on certain Xen deployments.
*/
/* TODO allocate from our own bio_set. */
-int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
- const unsigned rw, const int fault_type)
+int drbd_submit_peer_request(struct drbd_conf *mdev,
+ struct drbd_peer_request *peer_req,
+ const unsigned rw, const int fault_type)
{
struct bio *bios = NULL;
struct bio *bio;
- struct page *page = e->pages;
- sector_t sector = e->sector;
- unsigned ds = e->size;
+ struct page *page = peer_req->pages;
+ sector_t sector = peer_req->i.sector;
+ unsigned ds = peer_req->i.size;
unsigned n_bios = 0;
unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
int err = -ENOMEM;
@@ -1111,12 +1320,12 @@
dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
goto fail;
}
- /* > e->sector, unless this is the first bio */
+ /* > peer_req->i.sector, unless this is the first bio */
bio->bi_sector = sector;
bio->bi_bdev = mdev->ldev->backing_bdev;
bio->bi_rw = rw;
- bio->bi_private = e;
- bio->bi_end_io = drbd_endio_sec;
+ bio->bi_private = peer_req;
+ bio->bi_end_io = drbd_peer_request_endio;
bio->bi_next = bios;
bios = bio;
@@ -1145,7 +1354,7 @@
D_ASSERT(page == NULL);
D_ASSERT(ds == 0);
- atomic_set(&e->pending_bios, n_bios);
+ atomic_set(&peer_req->pending_bios, n_bios);
do {
bio = bios;
bios = bios->bi_next;
@@ -1164,26 +1373,57 @@
return err;
}
-static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static void drbd_remove_epoch_entry_interval(struct drbd_conf *mdev,
+ struct drbd_peer_request *peer_req)
+{
+ struct drbd_interval *i = &peer_req->i;
+
+ drbd_remove_interval(&mdev->write_requests, i);
+ drbd_clear_interval(i);
+
+ /* Wake up any processes waiting for this peer request to complete. */
+ if (i->waiting)
+ wake_up(&mdev->misc_wait);
+}
+
+void conn_wait_active_ee_empty(struct drbd_tconn *tconn)
+{
+ struct drbd_conf *mdev;
+ int vnr;
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ kref_get(&mdev->kref);
+ rcu_read_unlock();
+ drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
+ kref_put(&mdev->kref, &drbd_minor_destroy);
+ rcu_read_lock();
+ }
+ rcu_read_unlock();
+}
+
+static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
{
int rv;
- struct p_barrier *p = &mdev->data.rbuf.barrier;
+ struct p_barrier *p = pi->data;
struct drbd_epoch *epoch;
- inc_unacked(mdev);
-
- mdev->current_epoch->barrier_nr = p->barrier;
- rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR);
+ /* FIXME these are unacked on connection,
+ * not a specific (peer)device.
+ */
+ tconn->current_epoch->barrier_nr = p->barrier;
+ tconn->current_epoch->tconn = tconn;
+ rv = drbd_may_finish_epoch(tconn, tconn->current_epoch, EV_GOT_BARRIER_NR);
/* P_BARRIER_ACK may imply that the corresponding extent is dropped from
* the activity log, which means it would not be resynced in case the
* R_PRIMARY crashes now.
* Therefore we must send the barrier_ack after the barrier request was
* completed. */
- switch (mdev->write_ordering) {
+ switch (tconn->write_ordering) {
case WO_none:
if (rv == FE_RECYCLED)
- return true;
+ return 0;
/* receiver context, in the writeout path of the other node.
* avoid potential distributed deadlock */
@@ -1191,81 +1431,75 @@
if (epoch)
break;
else
- dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
+ conn_warn(tconn, "Allocation of an epoch failed, slowing down\n");
/* Fall through */
case WO_bdev_flush:
case WO_drain_io:
- drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
- drbd_flush(mdev);
+ conn_wait_active_ee_empty(tconn);
+ drbd_flush(tconn);
- if (atomic_read(&mdev->current_epoch->epoch_size)) {
+ if (atomic_read(&tconn->current_epoch->epoch_size)) {
epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
if (epoch)
break;
}
- epoch = mdev->current_epoch;
- wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
-
- D_ASSERT(atomic_read(&epoch->active) == 0);
- D_ASSERT(epoch->flags == 0);
-
- return true;
+ return 0;
default:
- dev_err(DEV, "Strangeness in mdev->write_ordering %d\n", mdev->write_ordering);
- return false;
+ conn_err(tconn, "Strangeness in tconn->write_ordering %d\n", tconn->write_ordering);
+ return -EIO;
}
epoch->flags = 0;
atomic_set(&epoch->epoch_size, 0);
atomic_set(&epoch->active, 0);
- spin_lock(&mdev->epoch_lock);
- if (atomic_read(&mdev->current_epoch->epoch_size)) {
- list_add(&epoch->list, &mdev->current_epoch->list);
- mdev->current_epoch = epoch;
- mdev->epochs++;
+ spin_lock(&tconn->epoch_lock);
+ if (atomic_read(&tconn->current_epoch->epoch_size)) {
+ list_add(&epoch->list, &tconn->current_epoch->list);
+ tconn->current_epoch = epoch;
+ tconn->epochs++;
} else {
/* The current_epoch got recycled while we allocated this one... */
kfree(epoch);
}
- spin_unlock(&mdev->epoch_lock);
+ spin_unlock(&tconn->epoch_lock);
- return true;
+ return 0;
}
/* used from receive_RSDataReply (recv_resync_read)
* and from receive_Data */
-static struct drbd_epoch_entry *
-read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __must_hold(local)
+static struct drbd_peer_request *
+read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector,
+ int data_size) __must_hold(local)
{
const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
- struct drbd_epoch_entry *e;
+ struct drbd_peer_request *peer_req;
struct page *page;
- int dgs, ds, rr;
- void *dig_in = mdev->int_dig_in;
- void *dig_vv = mdev->int_dig_vv;
+ int dgs, ds, err;
+ void *dig_in = mdev->tconn->int_dig_in;
+ void *dig_vv = mdev->tconn->int_dig_vv;
unsigned long *data;
- dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
- crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
-
- if (dgs) {
- rr = drbd_recv(mdev, dig_in, dgs);
- if (rr != dgs) {
- if (!signal_pending(current))
- dev_warn(DEV,
- "short read receiving data digest: read %d expected %d\n",
- rr, dgs);
+ dgs = 0;
+ if (mdev->tconn->peer_integrity_tfm) {
+ dgs = crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
+ /*
+ * FIXME: Receive the incoming digest into the receive buffer
+ * here, together with its struct p_data?
+ */
+ err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs);
+ if (err)
return NULL;
- }
+ data_size -= dgs;
}
- data_size -= dgs;
-
- ERR_IF(data_size & 0x1ff) return NULL;
- ERR_IF(data_size > DRBD_MAX_BIO_SIZE) return NULL;
+ if (!expect(IS_ALIGNED(data_size, 512)))
+ return NULL;
+ if (!expect(data_size <= DRBD_MAX_BIO_SIZE))
+ return NULL;
/* even though we trust out peer,
* we sometimes have to double check. */
@@ -1280,47 +1514,42 @@
/* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
* "criss-cross" setup, that might cause write-out on some other DRBD,
* which in turn might block on the other node at this very place. */
- e = drbd_alloc_ee(mdev, id, sector, data_size, GFP_NOIO);
- if (!e)
+ peer_req = drbd_alloc_peer_req(mdev, id, sector, data_size, GFP_NOIO);
+ if (!peer_req)
return NULL;
if (!data_size)
- return e;
+ return peer_req;
ds = data_size;
- page = e->pages;
+ page = peer_req->pages;
page_chain_for_each(page) {
unsigned len = min_t(int, ds, PAGE_SIZE);
data = kmap(page);
- rr = drbd_recv(mdev, data, len);
+ err = drbd_recv_all_warn(mdev->tconn, data, len);
if (drbd_insert_fault(mdev, DRBD_FAULT_RECEIVE)) {
dev_err(DEV, "Fault injection: Corrupting data on receive\n");
data[0] = data[0] ^ (unsigned long)-1;
}
kunmap(page);
- if (rr != len) {
- drbd_free_ee(mdev, e);
- if (!signal_pending(current))
- dev_warn(DEV, "short read receiving data: read %d expected %d\n",
- rr, len);
+ if (err) {
+ drbd_free_peer_req(mdev, peer_req);
return NULL;
}
- ds -= rr;
+ ds -= len;
}
if (dgs) {
- drbd_csum_ee(mdev, mdev->integrity_r_tfm, e, dig_vv);
+ drbd_csum_ee(mdev, mdev->tconn->peer_integrity_tfm, peer_req, dig_vv);
if (memcmp(dig_in, dig_vv, dgs)) {
dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n",
(unsigned long long)sector, data_size);
- drbd_bcast_ee(mdev, "digest failed",
- dgs, dig_in, dig_vv, e);
- drbd_free_ee(mdev, e);
+ drbd_free_peer_req(mdev, peer_req);
return NULL;
}
}
mdev->recv_cnt += data_size>>9;
- return e;
+ return peer_req;
}
/* drbd_drain_block() just takes a data block
@@ -1329,30 +1558,26 @@
static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
{
struct page *page;
- int rr, rv = 1;
+ int err = 0;
void *data;
if (!data_size)
- return true;
+ return 0;
- page = drbd_pp_alloc(mdev, 1, 1);
+ page = drbd_alloc_pages(mdev, 1, 1);
data = kmap(page);
while (data_size) {
- rr = drbd_recv(mdev, data, min_t(int, data_size, PAGE_SIZE));
- if (rr != min_t(int, data_size, PAGE_SIZE)) {
- rv = 0;
- if (!signal_pending(current))
- dev_warn(DEV,
- "short read receiving data: read %d expected %d\n",
- rr, min_t(int, data_size, PAGE_SIZE));
+ unsigned int len = min_t(int, data_size, PAGE_SIZE);
+
+ err = drbd_recv_all_warn(mdev->tconn, data, len);
+ if (err)
break;
- }
- data_size -= rr;
+ data_size -= len;
}
kunmap(page);
- drbd_pp_free(mdev, page, 0);
- return rv;
+ drbd_free_pages(mdev, page, 0);
+ return err;
}
static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
@@ -1360,26 +1585,19 @@
{
struct bio_vec *bvec;
struct bio *bio;
- int dgs, rr, i, expect;
- void *dig_in = mdev->int_dig_in;
- void *dig_vv = mdev->int_dig_vv;
+ int dgs, err, i, expect;
+ void *dig_in = mdev->tconn->int_dig_in;
+ void *dig_vv = mdev->tconn->int_dig_vv;
- dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
- crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
-
- if (dgs) {
- rr = drbd_recv(mdev, dig_in, dgs);
- if (rr != dgs) {
- if (!signal_pending(current))
- dev_warn(DEV,
- "short read receiving data reply digest: read %d expected %d\n",
- rr, dgs);
- return 0;
- }
+ dgs = 0;
+ if (mdev->tconn->peer_integrity_tfm) {
+ dgs = crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
+ err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs);
+ if (err)
+ return err;
+ data_size -= dgs;
}
- data_size -= dgs;
-
/* optimistically update recv_cnt. if receiving fails below,
* we disconnect anyways, and counters will be reset. */
mdev->recv_cnt += data_size>>9;
@@ -1388,63 +1606,61 @@
D_ASSERT(sector == bio->bi_sector);
bio_for_each_segment(bvec, bio, i) {
+ void *mapped = kmap(bvec->bv_page) + bvec->bv_offset;
expect = min_t(int, data_size, bvec->bv_len);
- rr = drbd_recv(mdev,
- kmap(bvec->bv_page)+bvec->bv_offset,
- expect);
+ err = drbd_recv_all_warn(mdev->tconn, mapped, expect);
kunmap(bvec->bv_page);
- if (rr != expect) {
- if (!signal_pending(current))
- dev_warn(DEV, "short read receiving data reply: "
- "read %d expected %d\n",
- rr, expect);
- return 0;
- }
- data_size -= rr;
+ if (err)
+ return err;
+ data_size -= expect;
}
if (dgs) {
- drbd_csum_bio(mdev, mdev->integrity_r_tfm, bio, dig_vv);
+ drbd_csum_bio(mdev, mdev->tconn->peer_integrity_tfm, bio, dig_vv);
if (memcmp(dig_in, dig_vv, dgs)) {
dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
- return 0;
+ return -EINVAL;
}
}
D_ASSERT(data_size == 0);
- return 1;
+ return 0;
}
-/* e_end_resync_block() is called via
- * drbd_process_done_ee() by asender only */
-static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int unused)
+/*
+ * e_end_resync_block() is called in asender context via
+ * drbd_finish_peer_reqs().
+ */
+static int e_end_resync_block(struct drbd_work *w, int unused)
{
- struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
- sector_t sector = e->sector;
- int ok;
+ struct drbd_peer_request *peer_req =
+ container_of(w, struct drbd_peer_request, w);
+ struct drbd_conf *mdev = w->mdev;
+ sector_t sector = peer_req->i.sector;
+ int err;
- D_ASSERT(hlist_unhashed(&e->collision));
+ D_ASSERT(drbd_interval_empty(&peer_req->i));
- if (likely((e->flags & EE_WAS_ERROR) == 0)) {
- drbd_set_in_sync(mdev, sector, e->size);
- ok = drbd_send_ack(mdev, P_RS_WRITE_ACK, e);
+ if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
+ drbd_set_in_sync(mdev, sector, peer_req->i.size);
+ err = drbd_send_ack(mdev, P_RS_WRITE_ACK, peer_req);
} else {
/* Record failure to sync */
- drbd_rs_failed_io(mdev, sector, e->size);
+ drbd_rs_failed_io(mdev, sector, peer_req->i.size);
- ok = drbd_send_ack(mdev, P_NEG_ACK, e);
+ err = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
}
dec_unacked(mdev);
- return ok;
+ return err;
}
static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
{
- struct drbd_epoch_entry *e;
+ struct drbd_peer_request *peer_req;
- e = read_in_block(mdev, ID_SYNCER, sector, data_size);
- if (!e)
+ peer_req = read_in_block(mdev, ID_SYNCER, sector, data_size);
+ if (!peer_req)
goto fail;
dec_rs_pending(mdev);
@@ -1453,64 +1669,88 @@
/* corresponding dec_unacked() in e_end_resync_block()
* respective _drbd_clear_done_ee */
- e->w.cb = e_end_resync_block;
+ peer_req->w.cb = e_end_resync_block;
- spin_lock_irq(&mdev->req_lock);
- list_add(&e->w.list, &mdev->sync_ee);
- spin_unlock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
+ list_add(&peer_req->w.list, &mdev->sync_ee);
+ spin_unlock_irq(&mdev->tconn->req_lock);
atomic_add(data_size >> 9, &mdev->rs_sect_ev);
- if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0)
- return true;
+ if (drbd_submit_peer_request(mdev, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
+ return 0;
/* don't care for the reason here */
dev_err(DEV, "submit failed, triggering re-connect\n");
- spin_lock_irq(&mdev->req_lock);
- list_del(&e->w.list);
- spin_unlock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
+ list_del(&peer_req->w.list);
+ spin_unlock_irq(&mdev->tconn->req_lock);
- drbd_free_ee(mdev, e);
+ drbd_free_peer_req(mdev, peer_req);
fail:
put_ldev(mdev);
- return false;
+ return -EIO;
}
-static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static struct drbd_request *
+find_request(struct drbd_conf *mdev, struct rb_root *root, u64 id,
+ sector_t sector, bool missing_ok, const char *func)
{
struct drbd_request *req;
+
+ /* Request object according to our peer */
+ req = (struct drbd_request *)(unsigned long)id;
+ if (drbd_contains_interval(root, sector, &req->i) && req->i.local)
+ return req;
+ if (!missing_ok) {
+ dev_err(DEV, "%s: failed to find request 0x%lx, sector %llus\n", func,
+ (unsigned long)id, (unsigned long long)sector);
+ }
+ return NULL;
+}
+
+static int receive_DataReply(struct drbd_tconn *tconn, struct packet_info *pi)
+{
+ struct drbd_conf *mdev;
+ struct drbd_request *req;
sector_t sector;
- int ok;
- struct p_data *p = &mdev->data.rbuf.data;
+ int err;
+ struct p_data *p = pi->data;
+
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
sector = be64_to_cpu(p->sector);
- spin_lock_irq(&mdev->req_lock);
- req = _ar_id_to_req(mdev, p->block_id, sector);
- spin_unlock_irq(&mdev->req_lock);
- if (unlikely(!req)) {
- dev_err(DEV, "Got a corrupt block_id/sector pair(1).\n");
- return false;
- }
+ spin_lock_irq(&mdev->tconn->req_lock);
+ req = find_request(mdev, &mdev->read_requests, p->block_id, sector, false, __func__);
+ spin_unlock_irq(&mdev->tconn->req_lock);
+ if (unlikely(!req))
+ return -EIO;
/* hlist_del(&req->collision) is done in _req_may_be_done, to avoid
* special casing it there for the various failure cases.
* still no race with drbd_fail_pending_reads */
- ok = recv_dless_read(mdev, req, sector, data_size);
-
- if (ok)
- req_mod(req, data_received);
+ err = recv_dless_read(mdev, req, sector, pi->size);
+ if (!err)
+ req_mod(req, DATA_RECEIVED);
/* else: nothing. handled from drbd_disconnect...
* I don't think we may complete this just yet
* in case we are "on-disconnect: freeze" */
- return ok;
+ return err;
}
-static int receive_RSDataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static int receive_RSDataReply(struct drbd_tconn *tconn, struct packet_info *pi)
{
+ struct drbd_conf *mdev;
sector_t sector;
- int ok;
- struct p_data *p = &mdev->data.rbuf.data;
+ int err;
+ struct p_data *p = pi->data;
+
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
sector = be64_to_cpu(p->sector);
D_ASSERT(p->block_id == ID_SYNCER);
@@ -1518,42 +1758,63 @@
if (get_ldev(mdev)) {
/* data is submitted to disk within recv_resync_read.
* corresponding put_ldev done below on error,
- * or in drbd_endio_write_sec. */
- ok = recv_resync_read(mdev, sector, data_size);
+ * or in drbd_peer_request_endio. */
+ err = recv_resync_read(mdev, sector, pi->size);
} else {
if (__ratelimit(&drbd_ratelimit_state))
dev_err(DEV, "Can not write resync data to local disk.\n");
- ok = drbd_drain_block(mdev, data_size);
+ err = drbd_drain_block(mdev, pi->size);
- drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
+ drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
}
- atomic_add(data_size >> 9, &mdev->rs_sect_in);
+ atomic_add(pi->size >> 9, &mdev->rs_sect_in);
- return ok;
+ return err;
}
-/* e_end_block() is called via drbd_process_done_ee().
- * this means this function only runs in the asender thread
- */
-static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+static void restart_conflicting_writes(struct drbd_conf *mdev,
+ sector_t sector, int size)
{
- struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
- sector_t sector = e->sector;
- int ok = 1, pcmd;
+ struct drbd_interval *i;
+ struct drbd_request *req;
- if (mdev->net_conf->wire_protocol == DRBD_PROT_C) {
- if (likely((e->flags & EE_WAS_ERROR) == 0)) {
+ drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
+ if (!i->local)
+ continue;
+ req = container_of(i, struct drbd_request, i);
+ if (req->rq_state & RQ_LOCAL_PENDING ||
+ !(req->rq_state & RQ_POSTPONED))
+ continue;
+ /* as it is RQ_POSTPONED, this will cause it to
+ * be queued on the retry workqueue. */
+ __req_mod(req, CONFLICT_RESOLVED, NULL);
+ }
+}
+
+/*
+ * e_end_block() is called in asender context via drbd_finish_peer_reqs().
+ */
+static int e_end_block(struct drbd_work *w, int cancel)
+{
+ struct drbd_peer_request *peer_req =
+ container_of(w, struct drbd_peer_request, w);
+ struct drbd_conf *mdev = w->mdev;
+ sector_t sector = peer_req->i.sector;
+ int err = 0, pcmd;
+
+ if (peer_req->flags & EE_SEND_WRITE_ACK) {
+ if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
mdev->state.conn <= C_PAUSED_SYNC_T &&
- e->flags & EE_MAY_SET_IN_SYNC) ?
+ peer_req->flags & EE_MAY_SET_IN_SYNC) ?
P_RS_WRITE_ACK : P_WRITE_ACK;
- ok &= drbd_send_ack(mdev, pcmd, e);
+ err = drbd_send_ack(mdev, pcmd, peer_req);
if (pcmd == P_RS_WRITE_ACK)
- drbd_set_in_sync(mdev, sector, e->size);
+ drbd_set_in_sync(mdev, sector, peer_req->i.size);
} else {
- ok = drbd_send_ack(mdev, P_NEG_ACK, e);
+ err = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
/* we expect it to be marked out of sync anyways...
* maybe assert this? */
}
@@ -1561,52 +1822,115 @@
}
/* we delete from the conflict detection hash _after_ we sent out the
* P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
- if (mdev->net_conf->two_primaries) {
- spin_lock_irq(&mdev->req_lock);
- D_ASSERT(!hlist_unhashed(&e->collision));
- hlist_del_init(&e->collision);
- spin_unlock_irq(&mdev->req_lock);
- } else {
- D_ASSERT(hlist_unhashed(&e->collision));
- }
+ if (peer_req->flags & EE_IN_INTERVAL_TREE) {
+ spin_lock_irq(&mdev->tconn->req_lock);
+ D_ASSERT(!drbd_interval_empty(&peer_req->i));
+ drbd_remove_epoch_entry_interval(mdev, peer_req);
+ if (peer_req->flags & EE_RESTART_REQUESTS)
+ restart_conflicting_writes(mdev, sector, peer_req->i.size);
+ spin_unlock_irq(&mdev->tconn->req_lock);
+ } else
+ D_ASSERT(drbd_interval_empty(&peer_req->i));
- drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
+ drbd_may_finish_epoch(mdev->tconn, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
- return ok;
+ return err;
}
-static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int unused)
+static int e_send_ack(struct drbd_work *w, enum drbd_packet ack)
{
- struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
- int ok = 1;
+ struct drbd_conf *mdev = w->mdev;
+ struct drbd_peer_request *peer_req =
+ container_of(w, struct drbd_peer_request, w);
+ int err;
- D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
- ok = drbd_send_ack(mdev, P_DISCARD_ACK, e);
-
- spin_lock_irq(&mdev->req_lock);
- D_ASSERT(!hlist_unhashed(&e->collision));
- hlist_del_init(&e->collision);
- spin_unlock_irq(&mdev->req_lock);
-
+ err = drbd_send_ack(mdev, ack, peer_req);
dec_unacked(mdev);
- return ok;
+ return err;
}
-static bool overlapping_resync_write(struct drbd_conf *mdev, struct drbd_epoch_entry *data_e)
+static int e_send_superseded(struct drbd_work *w, int unused)
{
+ return e_send_ack(w, P_SUPERSEDED);
+}
- struct drbd_epoch_entry *rs_e;
+static int e_send_retry_write(struct drbd_work *w, int unused)
+{
+ struct drbd_tconn *tconn = w->mdev->tconn;
+
+ return e_send_ack(w, tconn->agreed_pro_version >= 100 ?
+ P_RETRY_WRITE : P_SUPERSEDED);
+}
+
+static bool seq_greater(u32 a, u32 b)
+{
+ /*
+ * We assume 32-bit wrap-around here.
+ * For 24-bit wrap-around, we would have to shift:
+ * a <<= 8; b <<= 8;
+ */
+ return (s32)a - (s32)b > 0;
+}
+
+static u32 seq_max(u32 a, u32 b)
+{
+ return seq_greater(a, b) ? a : b;
+}
+
+static bool need_peer_seq(struct drbd_conf *mdev)
+{
+ struct drbd_tconn *tconn = mdev->tconn;
+ int tp;
+
+ /*
+ * We only need to keep track of the last packet_seq number of our peer
+ * if we are in dual-primary mode and we have the resolve-conflicts flag set; see
+ * handle_write_conflicts().
+ */
+
+ rcu_read_lock();
+ tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
+ rcu_read_unlock();
+
+ return tp && test_bit(RESOLVE_CONFLICTS, &tconn->flags);
+}
+
+static void update_peer_seq(struct drbd_conf *mdev, unsigned int peer_seq)
+{
+ unsigned int newest_peer_seq;
+
+ if (need_peer_seq(mdev)) {
+ spin_lock(&mdev->peer_seq_lock);
+ newest_peer_seq = seq_max(mdev->peer_seq, peer_seq);
+ mdev->peer_seq = newest_peer_seq;
+ spin_unlock(&mdev->peer_seq_lock);
+ /* wake up only if we actually changed mdev->peer_seq */
+ if (peer_seq == newest_peer_seq)
+ wake_up(&mdev->seq_wait);
+ }
+}
+
+static inline int overlaps(sector_t s1, int l1, sector_t s2, int l2)
+{
+ return !((s1 + (l1>>9) <= s2) || (s1 >= s2 + (l2>>9)));
+}
+
+/* maybe change sync_ee into interval trees as well? */
+static bool overlapping_resync_write(struct drbd_conf *mdev, struct drbd_peer_request *peer_req)
+{
+ struct drbd_peer_request *rs_req;
bool rv = 0;
- spin_lock_irq(&mdev->req_lock);
- list_for_each_entry(rs_e, &mdev->sync_ee, w.list) {
- if (overlaps(data_e->sector, data_e->size, rs_e->sector, rs_e->size)) {
+ spin_lock_irq(&mdev->tconn->req_lock);
+ list_for_each_entry(rs_req, &mdev->sync_ee, w.list) {
+ if (overlaps(peer_req->i.sector, peer_req->i.size,
+ rs_req->i.sector, rs_req->i.size)) {
rv = 1;
break;
}
}
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
return rv;
}
@@ -1632,35 +1956,41 @@
*
* returns 0 if we may process the packet,
* -ERESTARTSYS if we were interrupted (by disconnect signal). */
-static int drbd_wait_peer_seq(struct drbd_conf *mdev, const u32 packet_seq)
+static int wait_for_and_update_peer_seq(struct drbd_conf *mdev, const u32 peer_seq)
{
DEFINE_WAIT(wait);
- unsigned int p_seq;
long timeout;
- int ret = 0;
+ int ret;
+
+ if (!need_peer_seq(mdev))
+ return 0;
+
spin_lock(&mdev->peer_seq_lock);
for (;;) {
- prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
- if (seq_le(packet_seq, mdev->peer_seq+1))
+ if (!seq_greater(peer_seq - 1, mdev->peer_seq)) {
+ mdev->peer_seq = seq_max(mdev->peer_seq, peer_seq);
+ ret = 0;
break;
+ }
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
- p_seq = mdev->peer_seq;
+ prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
spin_unlock(&mdev->peer_seq_lock);
- timeout = schedule_timeout(30*HZ);
+ rcu_read_lock();
+ timeout = rcu_dereference(mdev->tconn->net_conf)->ping_timeo*HZ/10;
+ rcu_read_unlock();
+ timeout = schedule_timeout(timeout);
spin_lock(&mdev->peer_seq_lock);
- if (timeout == 0 && p_seq == mdev->peer_seq) {
+ if (!timeout) {
ret = -ETIMEDOUT;
- dev_err(DEV, "ASSERT FAILED waited 30 seconds for sequence update, forcing reconnect\n");
+ dev_err(DEV, "Timed out waiting for missing ack packets; disconnecting\n");
break;
}
}
- finish_wait(&mdev->seq_wait, &wait);
- if (mdev->peer_seq+1 == packet_seq)
- mdev->peer_seq++;
spin_unlock(&mdev->peer_seq_lock);
+ finish_wait(&mdev->seq_wait, &wait);
return ret;
}
@@ -1675,233 +2005,277 @@
(dpf & DP_DISCARD ? REQ_DISCARD : 0);
}
-/* mirrored write */
-static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static void fail_postponed_requests(struct drbd_conf *mdev, sector_t sector,
+ unsigned int size)
{
+ struct drbd_interval *i;
+
+ repeat:
+ drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
+ struct drbd_request *req;
+ struct bio_and_error m;
+
+ if (!i->local)
+ continue;
+ req = container_of(i, struct drbd_request, i);
+ if (!(req->rq_state & RQ_POSTPONED))
+ continue;
+ req->rq_state &= ~RQ_POSTPONED;
+ __req_mod(req, NEG_ACKED, &m);
+ spin_unlock_irq(&mdev->tconn->req_lock);
+ if (m.bio)
+ complete_master_bio(mdev, &m);
+ spin_lock_irq(&mdev->tconn->req_lock);
+ goto repeat;
+ }
+}
+
+static int handle_write_conflicts(struct drbd_conf *mdev,
+ struct drbd_peer_request *peer_req)
+{
+ struct drbd_tconn *tconn = mdev->tconn;
+ bool resolve_conflicts = test_bit(RESOLVE_CONFLICTS, &tconn->flags);
+ sector_t sector = peer_req->i.sector;
+ const unsigned int size = peer_req->i.size;
+ struct drbd_interval *i;
+ bool equal;
+ int err;
+
+ /*
+ * Inserting the peer request into the write_requests tree will prevent
+ * new conflicting local requests from being added.
+ */
+ drbd_insert_interval(&mdev->write_requests, &peer_req->i);
+
+ repeat:
+ drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
+ if (i == &peer_req->i)
+ continue;
+
+ if (!i->local) {
+ /*
+ * Our peer has sent a conflicting remote request; this
+ * should not happen in a two-node setup. Wait for the
+ * earlier peer request to complete.
+ */
+ err = drbd_wait_misc(mdev, i);
+ if (err)
+ goto out;
+ goto repeat;
+ }
+
+ equal = i->sector == sector && i->size == size;
+ if (resolve_conflicts) {
+ /*
+ * If the peer request is fully contained within the
+ * overlapping request, it can be considered overwritten
+ * and thus superseded; otherwise, it will be retried
+ * once all overlapping requests have completed.
+ */
+ bool superseded = i->sector <= sector && i->sector +
+ (i->size >> 9) >= sector + (size >> 9);
+
+ if (!equal)
+ dev_alert(DEV, "Concurrent writes detected: "
+ "local=%llus +%u, remote=%llus +%u, "
+ "assuming %s came first\n",
+ (unsigned long long)i->sector, i->size,
+ (unsigned long long)sector, size,
+ superseded ? "local" : "remote");
+
+ inc_unacked(mdev);
+ peer_req->w.cb = superseded ? e_send_superseded :
+ e_send_retry_write;
+ list_add_tail(&peer_req->w.list, &mdev->done_ee);
+ wake_asender(mdev->tconn);
+
+ err = -ENOENT;
+ goto out;
+ } else {
+ struct drbd_request *req =
+ container_of(i, struct drbd_request, i);
+
+ if (!equal)
+ dev_alert(DEV, "Concurrent writes detected: "
+ "local=%llus +%u, remote=%llus +%u\n",
+ (unsigned long long)i->sector, i->size,
+ (unsigned long long)sector, size);
+
+ if (req->rq_state & RQ_LOCAL_PENDING ||
+ !(req->rq_state & RQ_POSTPONED)) {
+ /*
+ * Wait for the node with the discard flag to
+ * decide if this request has been superseded
+ * or needs to be retried.
+ * Requests that have been superseded will
+ * disappear from the write_requests tree.
+ *
+ * In addition, wait for the conflicting
+ * request to finish locally before submitting
+ * the conflicting peer request.
+ */
+ err = drbd_wait_misc(mdev, &req->i);
+ if (err) {
+ _conn_request_state(mdev->tconn,
+ NS(conn, C_TIMEOUT),
+ CS_HARD);
+ fail_postponed_requests(mdev, sector, size);
+ goto out;
+ }
+ goto repeat;
+ }
+ /*
+ * Remember to restart the conflicting requests after
+ * the new peer request has completed.
+ */
+ peer_req->flags |= EE_RESTART_REQUESTS;
+ }
+ }
+ err = 0;
+
+ out:
+ if (err)
+ drbd_remove_epoch_entry_interval(mdev, peer_req);
+ return err;
+}
+
+/* mirrored write */
+static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
+{
+ struct drbd_conf *mdev;
sector_t sector;
- struct drbd_epoch_entry *e;
- struct p_data *p = &mdev->data.rbuf.data;
+ struct drbd_peer_request *peer_req;
+ struct p_data *p = pi->data;
+ u32 peer_seq = be32_to_cpu(p->seq_num);
int rw = WRITE;
u32 dp_flags;
+ int err, tp;
+
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
if (!get_ldev(mdev)) {
- spin_lock(&mdev->peer_seq_lock);
- if (mdev->peer_seq+1 == be32_to_cpu(p->seq_num))
- mdev->peer_seq++;
- spin_unlock(&mdev->peer_seq_lock);
+ int err2;
- drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
- atomic_inc(&mdev->current_epoch->epoch_size);
- return drbd_drain_block(mdev, data_size);
+ err = wait_for_and_update_peer_seq(mdev, peer_seq);
+ drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
+ atomic_inc(&tconn->current_epoch->epoch_size);
+ err2 = drbd_drain_block(mdev, pi->size);
+ if (!err)
+ err = err2;
+ return err;
}
- /* get_ldev(mdev) successful.
- * Corresponding put_ldev done either below (on various errors),
- * or in drbd_endio_write_sec, if we successfully submit the data at
- * the end of this function. */
+ /*
+ * Corresponding put_ldev done either below (on various errors), or in
+ * drbd_peer_request_endio, if we successfully submit the data at the
+ * end of this function.
+ */
sector = be64_to_cpu(p->sector);
- e = read_in_block(mdev, p->block_id, sector, data_size);
- if (!e) {
+ peer_req = read_in_block(mdev, p->block_id, sector, pi->size);
+ if (!peer_req) {
put_ldev(mdev);
- return false;
+ return -EIO;
}
- e->w.cb = e_end_block;
+ peer_req->w.cb = e_end_block;
dp_flags = be32_to_cpu(p->dp_flags);
rw |= wire_flags_to_bio(mdev, dp_flags);
- if (e->pages == NULL) {
- D_ASSERT(e->size == 0);
+ if (peer_req->pages == NULL) {
+ D_ASSERT(peer_req->i.size == 0);
D_ASSERT(dp_flags & DP_FLUSH);
}
if (dp_flags & DP_MAY_SET_IN_SYNC)
- e->flags |= EE_MAY_SET_IN_SYNC;
+ peer_req->flags |= EE_MAY_SET_IN_SYNC;
- spin_lock(&mdev->epoch_lock);
- e->epoch = mdev->current_epoch;
- atomic_inc(&e->epoch->epoch_size);
- atomic_inc(&e->epoch->active);
- spin_unlock(&mdev->epoch_lock);
+ spin_lock(&tconn->epoch_lock);
+ peer_req->epoch = tconn->current_epoch;
+ atomic_inc(&peer_req->epoch->epoch_size);
+ atomic_inc(&peer_req->epoch->active);
+ spin_unlock(&tconn->epoch_lock);
- /* I'm the receiver, I do hold a net_cnt reference. */
- if (!mdev->net_conf->two_primaries) {
- spin_lock_irq(&mdev->req_lock);
- } else {
- /* don't get the req_lock yet,
- * we may sleep in drbd_wait_peer_seq */
- const int size = e->size;
- const int discard = drbd_test_flag(mdev, DISCARD_CONCURRENT);
- DEFINE_WAIT(wait);
- struct drbd_request *i;
- struct hlist_node *n;
- struct hlist_head *slot;
- int first;
-
- D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
- BUG_ON(mdev->ee_hash == NULL);
- BUG_ON(mdev->tl_hash == NULL);
-
- /* conflict detection and handling:
- * 1. wait on the sequence number,
- * in case this data packet overtook ACK packets.
- * 2. check our hash tables for conflicting requests.
- * we only need to walk the tl_hash, since an ee can not
- * have a conflict with an other ee: on the submitting
- * node, the corresponding req had already been conflicting,
- * and a conflicting req is never sent.
- *
- * Note: for two_primaries, we are protocol C,
- * so there cannot be any request that is DONE
- * but still on the transfer log.
- *
- * unconditionally add to the ee_hash.
- *
- * if no conflicting request is found:
- * submit.
- *
- * if any conflicting request is found
- * that has not yet been acked,
- * AND I have the "discard concurrent writes" flag:
- * queue (via done_ee) the P_DISCARD_ACK; OUT.
- *
- * if any conflicting request is found:
- * block the receiver, waiting on misc_wait
- * until no more conflicting requests are there,
- * or we get interrupted (disconnect).
- *
- * we do not just write after local io completion of those
- * requests, but only after req is done completely, i.e.
- * we wait for the P_DISCARD_ACK to arrive!
- *
- * then proceed normally, i.e. submit.
- */
- if (drbd_wait_peer_seq(mdev, be32_to_cpu(p->seq_num)))
+ rcu_read_lock();
+ tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
+ rcu_read_unlock();
+ if (tp) {
+ peer_req->flags |= EE_IN_INTERVAL_TREE;
+ err = wait_for_and_update_peer_seq(mdev, peer_seq);
+ if (err)
goto out_interrupted;
-
- spin_lock_irq(&mdev->req_lock);
-
- hlist_add_head(&e->collision, ee_hash_slot(mdev, sector));
-
-#define OVERLAPS overlaps(i->sector, i->size, sector, size)
- slot = tl_hash_slot(mdev, sector);
- first = 1;
- for (;;) {
- int have_unacked = 0;
- int have_conflict = 0;
- prepare_to_wait(&mdev->misc_wait, &wait,
- TASK_INTERRUPTIBLE);
- hlist_for_each_entry(i, n, slot, collision) {
- if (OVERLAPS) {
- /* only ALERT on first iteration,
- * we may be woken up early... */
- if (first)
- dev_alert(DEV, "%s[%u] Concurrent local write detected!"
- " new: %llus +%u; pending: %llus +%u\n",
- current->comm, current->pid,
- (unsigned long long)sector, size,
- (unsigned long long)i->sector, i->size);
- if (i->rq_state & RQ_NET_PENDING)
- ++have_unacked;
- ++have_conflict;
- }
- }
-#undef OVERLAPS
- if (!have_conflict)
- break;
-
- /* Discard Ack only for the _first_ iteration */
- if (first && discard && have_unacked) {
- dev_alert(DEV, "Concurrent write! [DISCARD BY FLAG] sec=%llus\n",
- (unsigned long long)sector);
- inc_unacked(mdev);
- e->w.cb = e_send_discard_ack;
- list_add_tail(&e->w.list, &mdev->done_ee);
-
- spin_unlock_irq(&mdev->req_lock);
-
- /* we could probably send that P_DISCARD_ACK ourselves,
- * but I don't like the receiver using the msock */
-
+ spin_lock_irq(&mdev->tconn->req_lock);
+ err = handle_write_conflicts(mdev, peer_req);
+ if (err) {
+ spin_unlock_irq(&mdev->tconn->req_lock);
+ if (err == -ENOENT) {
put_ldev(mdev);
- wake_asender(mdev);
- finish_wait(&mdev->misc_wait, &wait);
- return true;
+ return 0;
}
-
- if (signal_pending(current)) {
- hlist_del_init(&e->collision);
-
- spin_unlock_irq(&mdev->req_lock);
-
- finish_wait(&mdev->misc_wait, &wait);
- goto out_interrupted;
- }
-
- spin_unlock_irq(&mdev->req_lock);
- if (first) {
- first = 0;
- dev_alert(DEV, "Concurrent write! [W AFTERWARDS] "
- "sec=%llus\n", (unsigned long long)sector);
- } else if (discard) {
- /* we had none on the first iteration.
- * there must be none now. */
- D_ASSERT(have_unacked == 0);
- }
- schedule();
- spin_lock_irq(&mdev->req_lock);
+ goto out_interrupted;
}
- finish_wait(&mdev->misc_wait, &wait);
- }
-
- list_add(&e->w.list, &mdev->active_ee);
- spin_unlock_irq(&mdev->req_lock);
+ } else
+ spin_lock_irq(&mdev->tconn->req_lock);
+ list_add(&peer_req->w.list, &mdev->active_ee);
+ spin_unlock_irq(&mdev->tconn->req_lock);
if (mdev->state.conn == C_SYNC_TARGET)
- wait_event(mdev->ee_wait, !overlapping_resync_write(mdev, e));
+ wait_event(mdev->ee_wait, !overlapping_resync_write(mdev, peer_req));
- switch (mdev->net_conf->wire_protocol) {
- case DRBD_PROT_C:
+ if (mdev->tconn->agreed_pro_version < 100) {
+ rcu_read_lock();
+ switch (rcu_dereference(mdev->tconn->net_conf)->wire_protocol) {
+ case DRBD_PROT_C:
+ dp_flags |= DP_SEND_WRITE_ACK;
+ break;
+ case DRBD_PROT_B:
+ dp_flags |= DP_SEND_RECEIVE_ACK;
+ break;
+ }
+ rcu_read_unlock();
+ }
+
+ if (dp_flags & DP_SEND_WRITE_ACK) {
+ peer_req->flags |= EE_SEND_WRITE_ACK;
inc_unacked(mdev);
/* corresponding dec_unacked() in e_end_block()
* respective _drbd_clear_done_ee */
- break;
- case DRBD_PROT_B:
+ }
+
+ if (dp_flags & DP_SEND_RECEIVE_ACK) {
/* I really don't like it that the receiver thread
* sends on the msock, but anyways */
- drbd_send_ack(mdev, P_RECV_ACK, e);
- break;
- case DRBD_PROT_A:
- /* nothing to do */
- break;
+ drbd_send_ack(mdev, P_RECV_ACK, peer_req);
}
if (mdev->state.pdsk < D_INCONSISTENT) {
/* In case we have the only disk of the cluster, */
- drbd_set_out_of_sync(mdev, e->sector, e->size);
- e->flags |= EE_CALL_AL_COMPLETE_IO;
- e->flags &= ~EE_MAY_SET_IN_SYNC;
- drbd_al_begin_io(mdev, e->sector);
+ drbd_set_out_of_sync(mdev, peer_req->i.sector, peer_req->i.size);
+ peer_req->flags |= EE_CALL_AL_COMPLETE_IO;
+ peer_req->flags &= ~EE_MAY_SET_IN_SYNC;
+ drbd_al_begin_io(mdev, &peer_req->i);
}
- if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0)
- return true;
+ err = drbd_submit_peer_request(mdev, peer_req, rw, DRBD_FAULT_DT_WR);
+ if (!err)
+ return 0;
/* don't care for the reason here */
dev_err(DEV, "submit failed, triggering re-connect\n");
- spin_lock_irq(&mdev->req_lock);
- list_del(&e->w.list);
- hlist_del_init(&e->collision);
- spin_unlock_irq(&mdev->req_lock);
- if (e->flags & EE_CALL_AL_COMPLETE_IO)
- drbd_al_complete_io(mdev, e->sector);
+ spin_lock_irq(&mdev->tconn->req_lock);
+ list_del(&peer_req->w.list);
+ drbd_remove_epoch_entry_interval(mdev, peer_req);
+ spin_unlock_irq(&mdev->tconn->req_lock);
+ if (peer_req->flags & EE_CALL_AL_COMPLETE_IO)
+ drbd_al_complete_io(mdev, &peer_req->i);
out_interrupted:
- drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + EV_CLEANUP);
+ drbd_may_finish_epoch(tconn, peer_req->epoch, EV_PUT + EV_CLEANUP);
put_ldev(mdev);
- drbd_free_ee(mdev, e);
- return false;
+ drbd_free_peer_req(mdev, peer_req);
+ return err;
}
/* We may throttle resync, if the lower device seems to be busy,
@@ -1922,9 +2296,14 @@
struct lc_element *tmp;
int curr_events;
int throttle = 0;
+ unsigned int c_min_rate;
+
+ rcu_read_lock();
+ c_min_rate = rcu_dereference(mdev->ldev->disk_conf)->c_min_rate;
+ rcu_read_unlock();
/* feature disabled? */
- if (mdev->sync_conf.c_min_rate == 0)
+ if (c_min_rate == 0)
return 0;
spin_lock_irq(&mdev->al_lock);
@@ -1964,40 +2343,46 @@
db = mdev->rs_mark_left[i] - rs_left;
dbdt = Bit2KB(db/dt);
- if (dbdt > mdev->sync_conf.c_min_rate)
+ if (dbdt > c_min_rate)
throttle = 1;
}
return throttle;
}
-static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int digest_size)
+static int receive_DataRequest(struct drbd_tconn *tconn, struct packet_info *pi)
{
+ struct drbd_conf *mdev;
sector_t sector;
- const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
- struct drbd_epoch_entry *e;
+ sector_t capacity;
+ struct drbd_peer_request *peer_req;
struct digest_info *di = NULL;
int size, verb;
unsigned int fault_type;
- struct p_block_req *p = &mdev->data.rbuf.block_req;
+ struct p_block_req *p = pi->data;
+
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
+ capacity = drbd_get_capacity(mdev->this_bdev);
sector = be64_to_cpu(p->sector);
size = be32_to_cpu(p->blksize);
- if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
+ if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
(unsigned long long)sector, size);
- return false;
+ return -EINVAL;
}
if (sector + (size>>9) > capacity) {
dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
(unsigned long long)sector, size);
- return false;
+ return -EINVAL;
}
if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
verb = 1;
- switch (cmd) {
+ switch (pi->cmd) {
case P_DATA_REQUEST:
drbd_send_ack_rp(mdev, P_NEG_DREPLY, p);
break;
@@ -2012,35 +2397,34 @@
drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, ID_IN_SYNC);
break;
default:
- dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
- cmdname(cmd));
+ BUG();
}
if (verb && __ratelimit(&drbd_ratelimit_state))
dev_err(DEV, "Can not satisfy peer's read request, "
"no local data.\n");
/* drain possibly payload */
- return drbd_drain_block(mdev, digest_size);
+ return drbd_drain_block(mdev, pi->size);
}
/* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
* "criss-cross" setup, that might cause write-out on some other DRBD,
* which in turn might block on the other node at this very place. */
- e = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO);
- if (!e) {
+ peer_req = drbd_alloc_peer_req(mdev, p->block_id, sector, size, GFP_NOIO);
+ if (!peer_req) {
put_ldev(mdev);
- return false;
+ return -ENOMEM;
}
- switch (cmd) {
+ switch (pi->cmd) {
case P_DATA_REQUEST:
- e->w.cb = w_e_end_data_req;
+ peer_req->w.cb = w_e_end_data_req;
fault_type = DRBD_FAULT_DT_RD;
/* application IO, don't drbd_rs_begin_io */
goto submit;
case P_RS_DATA_REQUEST:
- e->w.cb = w_e_end_rsdata_req;
+ peer_req->w.cb = w_e_end_rsdata_req;
fault_type = DRBD_FAULT_RS_RD;
/* used in the sector offset progress display */
mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
@@ -2049,28 +2433,28 @@
case P_OV_REPLY:
case P_CSUM_RS_REQUEST:
fault_type = DRBD_FAULT_RS_RD;
- di = kmalloc(sizeof(*di) + digest_size, GFP_NOIO);
+ di = kmalloc(sizeof(*di) + pi->size, GFP_NOIO);
if (!di)
goto out_free_e;
- di->digest_size = digest_size;
+ di->digest_size = pi->size;
di->digest = (((char *)di)+sizeof(struct digest_info));
- e->digest = di;
- e->flags |= EE_HAS_DIGEST;
+ peer_req->digest = di;
+ peer_req->flags |= EE_HAS_DIGEST;
- if (drbd_recv(mdev, di->digest, digest_size) != digest_size)
+ if (drbd_recv_all(mdev->tconn, di->digest, pi->size))
goto out_free_e;
- if (cmd == P_CSUM_RS_REQUEST) {
- D_ASSERT(mdev->agreed_pro_version >= 89);
- e->w.cb = w_e_end_csum_rs_req;
+ if (pi->cmd == P_CSUM_RS_REQUEST) {
+ D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
+ peer_req->w.cb = w_e_end_csum_rs_req;
/* used in the sector offset progress display */
mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
- } else if (cmd == P_OV_REPLY) {
+ } else if (pi->cmd == P_OV_REPLY) {
/* track progress, we may need to throttle */
atomic_add(size >> 9, &mdev->rs_sect_in);
- e->w.cb = w_e_end_ov_reply;
+ peer_req->w.cb = w_e_end_ov_reply;
dec_rs_pending(mdev);
/* drbd_rs_begin_io done when we sent this request,
* but accounting still needs to be done. */
@@ -2080,7 +2464,7 @@
case P_OV_REQUEST:
if (mdev->ov_start_sector == ~(sector_t)0 &&
- mdev->agreed_pro_version >= 90) {
+ mdev->tconn->agreed_pro_version >= 90) {
unsigned long now = jiffies;
int i;
mdev->ov_start_sector = sector;
@@ -2094,15 +2478,12 @@
dev_info(DEV, "Online Verify start sector: %llu\n",
(unsigned long long)sector);
}
- e->w.cb = w_e_end_ov_req;
+ peer_req->w.cb = w_e_end_ov_req;
fault_type = DRBD_FAULT_RS_RD;
break;
default:
- dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
- cmdname(cmd));
- fault_type = DRBD_FAULT_MAX;
- goto out_free_e;
+ BUG();
}
/* Throttle, drbd_rs_begin_io and submit should become asynchronous
@@ -2137,30 +2518,31 @@
submit:
inc_unacked(mdev);
- spin_lock_irq(&mdev->req_lock);
- list_add_tail(&e->w.list, &mdev->read_ee);
- spin_unlock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
+ list_add_tail(&peer_req->w.list, &mdev->read_ee);
+ spin_unlock_irq(&mdev->tconn->req_lock);
- if (drbd_submit_ee(mdev, e, READ, fault_type) == 0)
- return true;
+ if (drbd_submit_peer_request(mdev, peer_req, READ, fault_type) == 0)
+ return 0;
/* don't care for the reason here */
dev_err(DEV, "submit failed, triggering re-connect\n");
- spin_lock_irq(&mdev->req_lock);
- list_del(&e->w.list);
- spin_unlock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
+ list_del(&peer_req->w.list);
+ spin_unlock_irq(&mdev->tconn->req_lock);
/* no drbd_rs_complete_io(), we are dropping the connection anyways */
out_free_e:
put_ldev(mdev);
- drbd_free_ee(mdev, e);
- return false;
+ drbd_free_peer_req(mdev, peer_req);
+ return -EIO;
}
static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
{
int self, peer, rv = -100;
unsigned long ch_self, ch_peer;
+ enum drbd_after_sb_p after_sb_0p;
self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
peer = mdev->p_uuid[UI_BITMAP] & 1;
@@ -2168,10 +2550,14 @@
ch_peer = mdev->p_uuid[UI_SIZE];
ch_self = mdev->comm_bm_set;
- switch (mdev->net_conf->after_sb_0p) {
+ rcu_read_lock();
+ after_sb_0p = rcu_dereference(mdev->tconn->net_conf)->after_sb_0p;
+ rcu_read_unlock();
+ switch (after_sb_0p) {
case ASB_CONSENSUS:
case ASB_DISCARD_SECONDARY:
case ASB_CALL_HELPER:
+ case ASB_VIOLENTLY:
dev_err(DEV, "Configuration error.\n");
break;
case ASB_DISCONNECT:
@@ -2200,14 +2586,14 @@
"Using discard-least-changes instead\n");
case ASB_DISCARD_ZERO_CHG:
if (ch_peer == 0 && ch_self == 0) {
- rv = drbd_test_flag(mdev, DISCARD_CONCURRENT)
+ rv = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags)
? -1 : 1;
break;
} else {
if (ch_peer == 0) { rv = 1; break; }
if (ch_self == 0) { rv = -1; break; }
}
- if (mdev->net_conf->after_sb_0p == ASB_DISCARD_ZERO_CHG)
+ if (after_sb_0p == ASB_DISCARD_ZERO_CHG)
break;
case ASB_DISCARD_LEAST_CHG:
if (ch_self < ch_peer)
@@ -2216,7 +2602,7 @@
rv = 1;
else /* ( ch_self == ch_peer ) */
/* Well, then use something else. */
- rv = drbd_test_flag(mdev, DISCARD_CONCURRENT)
+ rv = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags)
? -1 : 1;
break;
case ASB_DISCARD_LOCAL:
@@ -2232,13 +2618,18 @@
static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
{
int hg, rv = -100;
+ enum drbd_after_sb_p after_sb_1p;
- switch (mdev->net_conf->after_sb_1p) {
+ rcu_read_lock();
+ after_sb_1p = rcu_dereference(mdev->tconn->net_conf)->after_sb_1p;
+ rcu_read_unlock();
+ switch (after_sb_1p) {
case ASB_DISCARD_YOUNGER_PRI:
case ASB_DISCARD_OLDER_PRI:
case ASB_DISCARD_LEAST_CHG:
case ASB_DISCARD_LOCAL:
case ASB_DISCARD_REMOTE:
+ case ASB_DISCARD_ZERO_CHG:
dev_err(DEV, "Configuration error.\n");
break;
case ASB_DISCONNECT:
@@ -2281,8 +2672,12 @@
static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
{
int hg, rv = -100;
+ enum drbd_after_sb_p after_sb_2p;
- switch (mdev->net_conf->after_sb_2p) {
+ rcu_read_lock();
+ after_sb_2p = rcu_dereference(mdev->tconn->net_conf)->after_sb_2p;
+ rcu_read_unlock();
+ switch (after_sb_2p) {
case ASB_DISCARD_YOUNGER_PRI:
case ASB_DISCARD_OLDER_PRI:
case ASB_DISCARD_LEAST_CHG:
@@ -2290,6 +2685,7 @@
case ASB_DISCARD_REMOTE:
case ASB_CONSENSUS:
case ASB_DISCARD_SECONDARY:
+ case ASB_DISCARD_ZERO_CHG:
dev_err(DEV, "Configuration error.\n");
break;
case ASB_VIOLENTLY:
@@ -2375,7 +2771,7 @@
if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
- if (mdev->agreed_pro_version < 91)
+ if (mdev->tconn->agreed_pro_version < 91)
return -1091;
if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
@@ -2398,7 +2794,7 @@
if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
- if (mdev->agreed_pro_version < 91)
+ if (mdev->tconn->agreed_pro_version < 91)
return -1091;
if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
@@ -2420,7 +2816,7 @@
}
/* Common power [off|failure] */
- rct = (drbd_test_flag(mdev, CRASHED_PRIMARY) ? 1 : 0) +
+ rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
(mdev->p_uuid[UI_FLAGS] & 2);
/* lowest bit is set when we were primary,
* next bit (weight 2) is set when peer was primary */
@@ -2431,7 +2827,7 @@
case 1: /* self_pri && !peer_pri */ return 1;
case 2: /* !self_pri && peer_pri */ return -1;
case 3: /* self_pri && peer_pri */
- dc = drbd_test_flag(mdev, DISCARD_CONCURRENT);
+ dc = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags);
return dc ? -1 : 1;
}
}
@@ -2444,14 +2840,14 @@
*rule_nr = 51;
peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
if (self == peer) {
- if (mdev->agreed_pro_version < 96 ?
+ if (mdev->tconn->agreed_pro_version < 96 ?
(mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
(mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
peer + UUID_NEW_BM_OFFSET == (mdev->p_uuid[UI_BITMAP] & ~((u64)1))) {
/* The last P_SYNC_UUID did not get though. Undo the last start of
resync as sync source modifications of the peer's UUIDs. */
- if (mdev->agreed_pro_version < 91)
+ if (mdev->tconn->agreed_pro_version < 91)
return -1091;
mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
@@ -2481,14 +2877,14 @@
*rule_nr = 71;
self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
if (self == peer) {
- if (mdev->agreed_pro_version < 96 ?
+ if (mdev->tconn->agreed_pro_version < 96 ?
(mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
(mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
self + UUID_NEW_BM_OFFSET == (mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
/* The last P_SYNC_UUID did not get though. Undo the last start of
resync as sync source modifications of our UUIDs. */
- if (mdev->agreed_pro_version < 91)
+ if (mdev->tconn->agreed_pro_version < 91)
return -1091;
__drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
@@ -2536,9 +2932,10 @@
static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
enum drbd_disk_state peer_disk) __must_hold(local)
{
- int hg, rule_nr;
enum drbd_conns rv = C_MASK;
enum drbd_disk_state mydisk;
+ struct net_conf *nc;
+ int hg, rule_nr, rr_conflict, tentative;
mydisk = mdev->state.disk;
if (mydisk == D_NEGOTIATING)
@@ -2578,7 +2975,10 @@
if (abs(hg) == 100)
drbd_khelper(mdev, "initial-split-brain");
- if (hg == 100 || (hg == -100 && mdev->net_conf->always_asbp)) {
+ rcu_read_lock();
+ nc = rcu_dereference(mdev->tconn->net_conf);
+
+ if (hg == 100 || (hg == -100 && nc->always_asbp)) {
int pcount = (mdev->state.role == R_PRIMARY)
+ (peer_role == R_PRIMARY);
int forced = (hg == -100);
@@ -2607,9 +3007,9 @@
}
if (hg == -100) {
- if (mdev->net_conf->want_lose && !(mdev->p_uuid[UI_FLAGS]&1))
+ if (test_bit(DISCARD_MY_DATA, &mdev->flags) && !(mdev->p_uuid[UI_FLAGS]&1))
hg = -1;
- if (!mdev->net_conf->want_lose && (mdev->p_uuid[UI_FLAGS]&1))
+ if (!test_bit(DISCARD_MY_DATA, &mdev->flags) && (mdev->p_uuid[UI_FLAGS]&1))
hg = 1;
if (abs(hg) < 100)
@@ -2617,6 +3017,9 @@
"Sync from %s node\n",
(hg < 0) ? "peer" : "this");
}
+ rr_conflict = nc->rr_conflict;
+ tentative = nc->tentative;
+ rcu_read_unlock();
if (hg == -100) {
/* FIXME this log message is not correct if we end up here
@@ -2635,7 +3038,7 @@
if (hg < 0 && /* by intention we do not use mydisk here. */
mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
- switch (mdev->net_conf->rr_conflict) {
+ switch (rr_conflict) {
case ASB_CALL_HELPER:
drbd_khelper(mdev, "pri-lost");
/* fall through */
@@ -2648,7 +3051,7 @@
}
}
- if (mdev->net_conf->dry_run || drbd_test_flag(mdev, CONN_DRY_RUN)) {
+ if (tentative || test_bit(CONN_DRY_RUN, &mdev->tconn->flags)) {
if (hg == 0)
dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
else
@@ -2680,33 +3083,29 @@
return rv;
}
-/* returns 1 if invalid */
-static int cmp_after_sb(enum drbd_after_sb_p peer, enum drbd_after_sb_p self)
+static enum drbd_after_sb_p convert_after_sb(enum drbd_after_sb_p peer)
{
/* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
- if ((peer == ASB_DISCARD_REMOTE && self == ASB_DISCARD_LOCAL) ||
- (self == ASB_DISCARD_REMOTE && peer == ASB_DISCARD_LOCAL))
- return 0;
+ if (peer == ASB_DISCARD_REMOTE)
+ return ASB_DISCARD_LOCAL;
/* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
- if (peer == ASB_DISCARD_REMOTE || peer == ASB_DISCARD_LOCAL ||
- self == ASB_DISCARD_REMOTE || self == ASB_DISCARD_LOCAL)
- return 1;
+ if (peer == ASB_DISCARD_LOCAL)
+ return ASB_DISCARD_REMOTE;
/* everything else is valid if they are equal on both sides. */
- if (peer == self)
- return 0;
-
- /* everything es is invalid. */
- return 1;
+ return peer;
}
-static int receive_protocol(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static int receive_protocol(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_protocol *p = &mdev->data.rbuf.protocol;
- int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
- int p_want_lose, p_two_primaries, cf;
- char p_integrity_alg[SHARED_SECRET_MAX] = "";
+ struct p_protocol *p = pi->data;
+ enum drbd_after_sb_p p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
+ int p_proto, p_discard_my_data, p_two_primaries, cf;
+ struct net_conf *nc, *old_net_conf, *new_net_conf = NULL;
+ char integrity_alg[SHARED_SECRET_MAX] = "";
+ struct crypto_hash *peer_integrity_tfm = NULL;
+ void *int_dig_in = NULL, *int_dig_vv = NULL;
p_proto = be32_to_cpu(p->protocol);
p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
@@ -2714,63 +3113,138 @@
p_after_sb_2p = be32_to_cpu(p->after_sb_2p);
p_two_primaries = be32_to_cpu(p->two_primaries);
cf = be32_to_cpu(p->conn_flags);
- p_want_lose = cf & CF_WANT_LOSE;
+ p_discard_my_data = cf & CF_DISCARD_MY_DATA;
- drbd_clear_flag(mdev, CONN_DRY_RUN);
+ if (tconn->agreed_pro_version >= 87) {
+ int err;
- if (cf & CF_DRY_RUN)
- drbd_set_flag(mdev, CONN_DRY_RUN);
-
- if (p_proto != mdev->net_conf->wire_protocol) {
- dev_err(DEV, "incompatible communication protocols\n");
- goto disconnect;
+ if (pi->size > sizeof(integrity_alg))
+ return -EIO;
+ err = drbd_recv_all(tconn, integrity_alg, pi->size);
+ if (err)
+ return err;
+ integrity_alg[SHARED_SECRET_MAX - 1] = 0;
}
- if (cmp_after_sb(p_after_sb_0p, mdev->net_conf->after_sb_0p)) {
- dev_err(DEV, "incompatible after-sb-0pri settings\n");
- goto disconnect;
+ if (pi->cmd != P_PROTOCOL_UPDATE) {
+ clear_bit(CONN_DRY_RUN, &tconn->flags);
+
+ if (cf & CF_DRY_RUN)
+ set_bit(CONN_DRY_RUN, &tconn->flags);
+
+ rcu_read_lock();
+ nc = rcu_dereference(tconn->net_conf);
+
+ if (p_proto != nc->wire_protocol) {
+ conn_err(tconn, "incompatible %s settings\n", "protocol");
+ goto disconnect_rcu_unlock;
+ }
+
+ if (convert_after_sb(p_after_sb_0p) != nc->after_sb_0p) {
+ conn_err(tconn, "incompatible %s settings\n", "after-sb-0pri");
+ goto disconnect_rcu_unlock;
+ }
+
+ if (convert_after_sb(p_after_sb_1p) != nc->after_sb_1p) {
+ conn_err(tconn, "incompatible %s settings\n", "after-sb-1pri");
+ goto disconnect_rcu_unlock;
+ }
+
+ if (convert_after_sb(p_after_sb_2p) != nc->after_sb_2p) {
+ conn_err(tconn, "incompatible %s settings\n", "after-sb-2pri");
+ goto disconnect_rcu_unlock;
+ }
+
+ if (p_discard_my_data && nc->discard_my_data) {
+ conn_err(tconn, "incompatible %s settings\n", "discard-my-data");
+ goto disconnect_rcu_unlock;
+ }
+
+ if (p_two_primaries != nc->two_primaries) {
+ conn_err(tconn, "incompatible %s settings\n", "allow-two-primaries");
+ goto disconnect_rcu_unlock;
+ }
+
+ if (strcmp(integrity_alg, nc->integrity_alg)) {
+ conn_err(tconn, "incompatible %s settings\n", "data-integrity-alg");
+ goto disconnect_rcu_unlock;
+ }
+
+ rcu_read_unlock();
}
- if (cmp_after_sb(p_after_sb_1p, mdev->net_conf->after_sb_1p)) {
- dev_err(DEV, "incompatible after-sb-1pri settings\n");
- goto disconnect;
- }
+ if (integrity_alg[0]) {
+ int hash_size;
- if (cmp_after_sb(p_after_sb_2p, mdev->net_conf->after_sb_2p)) {
- dev_err(DEV, "incompatible after-sb-2pri settings\n");
- goto disconnect;
- }
+ /*
+ * We can only change the peer data integrity algorithm
+ * here. Changing our own data integrity algorithm
+ * requires that we send a P_PROTOCOL_UPDATE packet at
+ * the same time; otherwise, the peer has no way to
+ * tell between which packets the algorithm should
+ * change.
+ */
- if (p_want_lose && mdev->net_conf->want_lose) {
- dev_err(DEV, "both sides have the 'want_lose' flag set\n");
- goto disconnect;
- }
-
- if (p_two_primaries != mdev->net_conf->two_primaries) {
- dev_err(DEV, "incompatible setting of the two-primaries options\n");
- goto disconnect;
- }
-
- if (mdev->agreed_pro_version >= 87) {
- unsigned char *my_alg = mdev->net_conf->integrity_alg;
-
- if (drbd_recv(mdev, p_integrity_alg, data_size) != data_size)
- return false;
-
- p_integrity_alg[SHARED_SECRET_MAX-1] = 0;
- if (strcmp(p_integrity_alg, my_alg)) {
- dev_err(DEV, "incompatible setting of the data-integrity-alg\n");
+ peer_integrity_tfm = crypto_alloc_hash(integrity_alg, 0, CRYPTO_ALG_ASYNC);
+ if (!peer_integrity_tfm) {
+ conn_err(tconn, "peer data-integrity-alg %s not supported\n",
+ integrity_alg);
goto disconnect;
}
- dev_info(DEV, "data-integrity-alg: %s\n",
- my_alg[0] ? my_alg : (unsigned char *)"<not-used>");
+
+ hash_size = crypto_hash_digestsize(peer_integrity_tfm);
+ int_dig_in = kmalloc(hash_size, GFP_KERNEL);
+ int_dig_vv = kmalloc(hash_size, GFP_KERNEL);
+ if (!(int_dig_in && int_dig_vv)) {
+ conn_err(tconn, "Allocation of buffers for data integrity checking failed\n");
+ goto disconnect;
+ }
}
- return true;
+ new_net_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL);
+ if (!new_net_conf) {
+ conn_err(tconn, "Allocation of new net_conf failed\n");
+ goto disconnect;
+ }
+ mutex_lock(&tconn->data.mutex);
+ mutex_lock(&tconn->conf_update);
+ old_net_conf = tconn->net_conf;
+ *new_net_conf = *old_net_conf;
+
+ new_net_conf->wire_protocol = p_proto;
+ new_net_conf->after_sb_0p = convert_after_sb(p_after_sb_0p);
+ new_net_conf->after_sb_1p = convert_after_sb(p_after_sb_1p);
+ new_net_conf->after_sb_2p = convert_after_sb(p_after_sb_2p);
+ new_net_conf->two_primaries = p_two_primaries;
+
+ rcu_assign_pointer(tconn->net_conf, new_net_conf);
+ mutex_unlock(&tconn->conf_update);
+ mutex_unlock(&tconn->data.mutex);
+
+ crypto_free_hash(tconn->peer_integrity_tfm);
+ kfree(tconn->int_dig_in);
+ kfree(tconn->int_dig_vv);
+ tconn->peer_integrity_tfm = peer_integrity_tfm;
+ tconn->int_dig_in = int_dig_in;
+ tconn->int_dig_vv = int_dig_vv;
+
+ if (strcmp(old_net_conf->integrity_alg, integrity_alg))
+ conn_info(tconn, "peer data-integrity-alg: %s\n",
+ integrity_alg[0] ? integrity_alg : "(none)");
+
+ synchronize_rcu();
+ kfree(old_net_conf);
+ return 0;
+
+disconnect_rcu_unlock:
+ rcu_read_unlock();
disconnect:
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
- return false;
+ crypto_free_hash(peer_integrity_tfm);
+ kfree(int_dig_in);
+ kfree(int_dig_vv);
+ conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
+ return -EIO;
}
/* helper function
@@ -2792,24 +3266,64 @@
alg, name, PTR_ERR(tfm));
return tfm;
}
- if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
- crypto_free_hash(tfm);
- dev_err(DEV, "\"%s\" is not a digest (%s)\n", alg, name);
- return ERR_PTR(-EINVAL);
- }
return tfm;
}
-static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int packet_size)
+static int ignore_remaining_packet(struct drbd_tconn *tconn, struct packet_info *pi)
{
- int ok = true;
- struct p_rs_param_95 *p = &mdev->data.rbuf.rs_param_95;
+ void *buffer = tconn->data.rbuf;
+ int size = pi->size;
+
+ while (size) {
+ int s = min_t(int, size, DRBD_SOCKET_BUFFER_SIZE);
+ s = drbd_recv(tconn, buffer, s);
+ if (s <= 0) {
+ if (s < 0)
+ return s;
+ break;
+ }
+ size -= s;
+ }
+ if (size)
+ return -EIO;
+ return 0;
+}
+
+/*
+ * config_unknown_volume - device configuration command for unknown volume
+ *
+ * When a device is added to an existing connection, the node on which the
+ * device is added first will send configuration commands to its peer but the
+ * peer will not know about the device yet. It will warn and ignore these
+ * commands. Once the device is added on the second node, the second node will
+ * send the same device configuration commands, but in the other direction.
+ *
+ * (We can also end up here if drbd is misconfigured.)
+ */
+static int config_unknown_volume(struct drbd_tconn *tconn, struct packet_info *pi)
+{
+ conn_warn(tconn, "%s packet received for volume %u, which is not configured locally\n",
+ cmdname(pi->cmd), pi->vnr);
+ return ignore_remaining_packet(tconn, pi);
+}
+
+static int receive_SyncParam(struct drbd_tconn *tconn, struct packet_info *pi)
+{
+ struct drbd_conf *mdev;
+ struct p_rs_param_95 *p;
unsigned int header_size, data_size, exp_max_sz;
struct crypto_hash *verify_tfm = NULL;
struct crypto_hash *csums_tfm = NULL;
- const int apv = mdev->agreed_pro_version;
- int *rs_plan_s = NULL;
+ struct net_conf *old_net_conf, *new_net_conf = NULL;
+ struct disk_conf *old_disk_conf = NULL, *new_disk_conf = NULL;
+ const int apv = tconn->agreed_pro_version;
+ struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
int fifo_size = 0;
+ int err;
+
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return config_unknown_volume(tconn, pi);
exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
: apv == 88 ? sizeof(struct p_rs_param)
@@ -2817,32 +3331,49 @@
: apv <= 94 ? sizeof(struct p_rs_param_89)
: /* apv >= 95 */ sizeof(struct p_rs_param_95);
- if (packet_size > exp_max_sz) {
+ if (pi->size > exp_max_sz) {
dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
- packet_size, exp_max_sz);
- return false;
+ pi->size, exp_max_sz);
+ return -EIO;
}
if (apv <= 88) {
- header_size = sizeof(struct p_rs_param) - sizeof(struct p_header80);
- data_size = packet_size - header_size;
+ header_size = sizeof(struct p_rs_param);
+ data_size = pi->size - header_size;
} else if (apv <= 94) {
- header_size = sizeof(struct p_rs_param_89) - sizeof(struct p_header80);
- data_size = packet_size - header_size;
+ header_size = sizeof(struct p_rs_param_89);
+ data_size = pi->size - header_size;
D_ASSERT(data_size == 0);
} else {
- header_size = sizeof(struct p_rs_param_95) - sizeof(struct p_header80);
- data_size = packet_size - header_size;
+ header_size = sizeof(struct p_rs_param_95);
+ data_size = pi->size - header_size;
D_ASSERT(data_size == 0);
}
/* initialize verify_alg and csums_alg */
+ p = pi->data;
memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
- if (drbd_recv(mdev, &p->head.payload, header_size) != header_size)
- return false;
+ err = drbd_recv_all(mdev->tconn, p, header_size);
+ if (err)
+ return err;
- mdev->sync_conf.rate = be32_to_cpu(p->rate);
+ mutex_lock(&mdev->tconn->conf_update);
+ old_net_conf = mdev->tconn->net_conf;
+ if (get_ldev(mdev)) {
+ new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
+ if (!new_disk_conf) {
+ put_ldev(mdev);
+ mutex_unlock(&mdev->tconn->conf_update);
+ dev_err(DEV, "Allocation of new disk_conf failed\n");
+ return -ENOMEM;
+ }
+
+ old_disk_conf = mdev->ldev->disk_conf;
+ *new_disk_conf = *old_disk_conf;
+
+ new_disk_conf->resync_rate = be32_to_cpu(p->resync_rate);
+ }
if (apv >= 88) {
if (apv == 88) {
@@ -2850,12 +3381,13 @@
dev_err(DEV, "verify-alg of wrong size, "
"peer wants %u, accepting only up to %u byte\n",
data_size, SHARED_SECRET_MAX);
- return false;
+ err = -EIO;
+ goto reconnect;
}
- if (drbd_recv(mdev, p->verify_alg, data_size) != data_size)
- return false;
-
+ err = drbd_recv_all(mdev->tconn, p->verify_alg, data_size);
+ if (err)
+ goto reconnect;
/* we expect NUL terminated string */
/* but just in case someone tries to be evil */
D_ASSERT(p->verify_alg[data_size-1] == 0);
@@ -2870,10 +3402,10 @@
p->csums_alg[SHARED_SECRET_MAX-1] = 0;
}
- if (strcmp(mdev->sync_conf.verify_alg, p->verify_alg)) {
+ if (strcmp(old_net_conf->verify_alg, p->verify_alg)) {
if (mdev->state.conn == C_WF_REPORT_PARAMS) {
dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
- mdev->sync_conf.verify_alg, p->verify_alg);
+ old_net_conf->verify_alg, p->verify_alg);
goto disconnect;
}
verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
@@ -2884,10 +3416,10 @@
}
}
- if (apv >= 89 && strcmp(mdev->sync_conf.csums_alg, p->csums_alg)) {
+ if (apv >= 89 && strcmp(old_net_conf->csums_alg, p->csums_alg)) {
if (mdev->state.conn == C_WF_REPORT_PARAMS) {
dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
- mdev->sync_conf.csums_alg, p->csums_alg);
+ old_net_conf->csums_alg, p->csums_alg);
goto disconnect;
}
csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
@@ -2898,57 +3430,91 @@
}
}
- if (apv > 94) {
- mdev->sync_conf.rate = be32_to_cpu(p->rate);
- mdev->sync_conf.c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
- mdev->sync_conf.c_delay_target = be32_to_cpu(p->c_delay_target);
- mdev->sync_conf.c_fill_target = be32_to_cpu(p->c_fill_target);
- mdev->sync_conf.c_max_rate = be32_to_cpu(p->c_max_rate);
+ if (apv > 94 && new_disk_conf) {
+ new_disk_conf->c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
+ new_disk_conf->c_delay_target = be32_to_cpu(p->c_delay_target);
+ new_disk_conf->c_fill_target = be32_to_cpu(p->c_fill_target);
+ new_disk_conf->c_max_rate = be32_to_cpu(p->c_max_rate);
- fifo_size = (mdev->sync_conf.c_plan_ahead * 10 * SLEEP_TIME) / HZ;
- if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
- rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_NOIO);
- if (!rs_plan_s) {
+ fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
+ if (fifo_size != mdev->rs_plan_s->size) {
+ new_plan = fifo_alloc(fifo_size);
+ if (!new_plan) {
dev_err(DEV, "kmalloc of fifo_buffer failed");
+ put_ldev(mdev);
goto disconnect;
}
}
}
- spin_lock(&mdev->peer_seq_lock);
- /* lock against drbd_nl_syncer_conf() */
- if (verify_tfm) {
- strcpy(mdev->sync_conf.verify_alg, p->verify_alg);
- mdev->sync_conf.verify_alg_len = strlen(p->verify_alg) + 1;
- crypto_free_hash(mdev->verify_tfm);
- mdev->verify_tfm = verify_tfm;
- dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
+ if (verify_tfm || csums_tfm) {
+ new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
+ if (!new_net_conf) {
+ dev_err(DEV, "Allocation of new net_conf failed\n");
+ goto disconnect;
+ }
+
+ *new_net_conf = *old_net_conf;
+
+ if (verify_tfm) {
+ strcpy(new_net_conf->verify_alg, p->verify_alg);
+ new_net_conf->verify_alg_len = strlen(p->verify_alg) + 1;
+ crypto_free_hash(mdev->tconn->verify_tfm);
+ mdev->tconn->verify_tfm = verify_tfm;
+ dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
+ }
+ if (csums_tfm) {
+ strcpy(new_net_conf->csums_alg, p->csums_alg);
+ new_net_conf->csums_alg_len = strlen(p->csums_alg) + 1;
+ crypto_free_hash(mdev->tconn->csums_tfm);
+ mdev->tconn->csums_tfm = csums_tfm;
+ dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
+ }
+ rcu_assign_pointer(tconn->net_conf, new_net_conf);
}
- if (csums_tfm) {
- strcpy(mdev->sync_conf.csums_alg, p->csums_alg);
- mdev->sync_conf.csums_alg_len = strlen(p->csums_alg) + 1;
- crypto_free_hash(mdev->csums_tfm);
- mdev->csums_tfm = csums_tfm;
- dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
- }
- if (fifo_size != mdev->rs_plan_s.size) {
- kfree(mdev->rs_plan_s.values);
- mdev->rs_plan_s.values = rs_plan_s;
- mdev->rs_plan_s.size = fifo_size;
- mdev->rs_planed = 0;
- }
- spin_unlock(&mdev->peer_seq_lock);
}
- return ok;
+ if (new_disk_conf) {
+ rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
+ put_ldev(mdev);
+ }
+
+ if (new_plan) {
+ old_plan = mdev->rs_plan_s;
+ rcu_assign_pointer(mdev->rs_plan_s, new_plan);
+ }
+
+ mutex_unlock(&mdev->tconn->conf_update);
+ synchronize_rcu();
+ if (new_net_conf)
+ kfree(old_net_conf);
+ kfree(old_disk_conf);
+ kfree(old_plan);
+
+ return 0;
+
+reconnect:
+ if (new_disk_conf) {
+ put_ldev(mdev);
+ kfree(new_disk_conf);
+ }
+ mutex_unlock(&mdev->tconn->conf_update);
+ return -EIO;
+
disconnect:
+ kfree(new_plan);
+ if (new_disk_conf) {
+ put_ldev(mdev);
+ kfree(new_disk_conf);
+ }
+ mutex_unlock(&mdev->tconn->conf_update);
/* just for completeness: actually not needed,
* as this is not reached if csums_tfm was ok. */
crypto_free_hash(csums_tfm);
/* but free the verify_tfm again, if csums_tfm did not work out */
crypto_free_hash(verify_tfm);
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
- return false;
+ conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
+ return -EIO;
}
/* warn if the arguments differ by more than 12.5% */
@@ -2964,59 +3530,77 @@
(unsigned long long)a, (unsigned long long)b);
}
-static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static int receive_sizes(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_sizes *p = &mdev->data.rbuf.sizes;
+ struct drbd_conf *mdev;
+ struct p_sizes *p = pi->data;
enum determine_dev_size dd = unchanged;
sector_t p_size, p_usize, my_usize;
int ldsc = 0; /* local disk size changed */
enum dds_flags ddsf;
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return config_unknown_volume(tconn, pi);
+
p_size = be64_to_cpu(p->d_size);
p_usize = be64_to_cpu(p->u_size);
- if (p_size == 0 && mdev->state.disk == D_DISKLESS) {
- dev_err(DEV, "some backing storage is needed\n");
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
- return false;
- }
-
/* just store the peer's disk size for now.
* we still need to figure out whether we accept that. */
mdev->p_size = p_size;
if (get_ldev(mdev)) {
+ rcu_read_lock();
+ my_usize = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
+ rcu_read_unlock();
+
warn_if_differ_considerably(mdev, "lower level device sizes",
p_size, drbd_get_max_capacity(mdev->ldev));
warn_if_differ_considerably(mdev, "user requested size",
- p_usize, mdev->ldev->dc.disk_size);
+ p_usize, my_usize);
/* if this is the first connect, or an otherwise expected
* param exchange, choose the minimum */
if (mdev->state.conn == C_WF_REPORT_PARAMS)
- p_usize = min_not_zero((sector_t)mdev->ldev->dc.disk_size,
- p_usize);
-
- my_usize = mdev->ldev->dc.disk_size;
-
- if (mdev->ldev->dc.disk_size != p_usize) {
- mdev->ldev->dc.disk_size = p_usize;
- dev_info(DEV, "Peer sets u_size to %lu sectors\n",
- (unsigned long)mdev->ldev->dc.disk_size);
- }
+ p_usize = min_not_zero(my_usize, p_usize);
/* Never shrink a device with usable data during connect.
But allow online shrinking if we are connected. */
- if (drbd_new_dev_size(mdev, mdev->ldev, 0) <
- drbd_get_capacity(mdev->this_bdev) &&
- mdev->state.disk >= D_OUTDATED &&
- mdev->state.conn < C_CONNECTED) {
+ if (drbd_new_dev_size(mdev, mdev->ldev, p_usize, 0) <
+ drbd_get_capacity(mdev->this_bdev) &&
+ mdev->state.disk >= D_OUTDATED &&
+ mdev->state.conn < C_CONNECTED) {
dev_err(DEV, "The peer's disk size is too small!\n");
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
- mdev->ldev->dc.disk_size = my_usize;
+ conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
put_ldev(mdev);
- return false;
+ return -EIO;
}
+
+ if (my_usize != p_usize) {
+ struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
+
+ new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
+ if (!new_disk_conf) {
+ dev_err(DEV, "Allocation of new disk_conf failed\n");
+ put_ldev(mdev);
+ return -ENOMEM;
+ }
+
+ mutex_lock(&mdev->tconn->conf_update);
+ old_disk_conf = mdev->ldev->disk_conf;
+ *new_disk_conf = *old_disk_conf;
+ new_disk_conf->disk_size = p_usize;
+
+ rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
+ mutex_unlock(&mdev->tconn->conf_update);
+ synchronize_rcu();
+ kfree(old_disk_conf);
+
+ dev_info(DEV, "Peer sets u_size to %lu sectors\n",
+ (unsigned long)my_usize);
+ }
+
put_ldev(mdev);
}
@@ -3025,7 +3609,7 @@
dd = drbd_determine_dev_size(mdev, ddsf);
put_ldev(mdev);
if (dd == dev_size_error)
- return false;
+ return -EIO;
drbd_md_sync(mdev);
} else {
/* I am diskless, need to accept the peer's size. */
@@ -3051,7 +3635,7 @@
* needs to know my new size... */
drbd_send_sizes(mdev, 0, ddsf);
}
- if (drbd_test_and_clear_flag(mdev, RESIZE_PENDING) ||
+ if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
(dd == grew && mdev->state.conn == C_CONNECTED)) {
if (mdev->state.pdsk >= D_INCONSISTENT &&
mdev->state.disk >= D_INCONSISTENT) {
@@ -3060,19 +3644,24 @@
else
resync_after_online_grow(mdev);
} else
- drbd_set_flag(mdev, RESYNC_AFTER_NEG);
+ set_bit(RESYNC_AFTER_NEG, &mdev->flags);
}
}
- return true;
+ return 0;
}
-static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static int receive_uuids(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_uuids *p = &mdev->data.rbuf.uuids;
+ struct drbd_conf *mdev;
+ struct p_uuids *p = pi->data;
u64 *p_uuid;
int i, updated_uuids = 0;
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return config_unknown_volume(tconn, pi);
+
p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
@@ -3087,14 +3676,14 @@
(mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
(unsigned long long)mdev->ed_uuid);
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
- return false;
+ conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
+ return -EIO;
}
if (get_ldev(mdev)) {
int skip_initial_sync =
mdev->state.conn == C_CONNECTED &&
- mdev->agreed_pro_version >= 90 &&
+ mdev->tconn->agreed_pro_version >= 90 &&
mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
(p_uuid[UI_FLAGS] & 8);
if (skip_initial_sync) {
@@ -3121,14 +3710,15 @@
ongoing cluster wide state change is finished. That is important if
we are primary and are detaching from our disk. We need to see the
new disk state... */
- wait_event(mdev->misc_wait, !drbd_test_flag(mdev, CLUSTER_ST_CHANGE));
+ mutex_lock(mdev->state_mutex);
+ mutex_unlock(mdev->state_mutex);
if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
if (updated_uuids)
drbd_print_uuids(mdev, "receiver updated UUIDs to");
- return true;
+ return 0;
}
/**
@@ -3140,6 +3730,7 @@
union drbd_state ms;
static enum drbd_conns c_tab[] = {
+ [C_WF_REPORT_PARAMS] = C_WF_REPORT_PARAMS,
[C_CONNECTED] = C_CONNECTED,
[C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
@@ -3161,40 +3752,74 @@
return ms;
}
-static int receive_req_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static int receive_req_state(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_req_state *p = &mdev->data.rbuf.req_state;
+ struct drbd_conf *mdev;
+ struct p_req_state *p = pi->data;
union drbd_state mask, val;
enum drbd_state_rv rv;
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
+
mask.i = be32_to_cpu(p->mask);
val.i = be32_to_cpu(p->val);
- if (drbd_test_flag(mdev, DISCARD_CONCURRENT) &&
- drbd_test_flag(mdev, CLUSTER_ST_CHANGE)) {
+ if (test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags) &&
+ mutex_is_locked(mdev->state_mutex)) {
drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
- return true;
+ return 0;
}
mask = convert_state(mask);
val = convert_state(val);
rv = drbd_change_state(mdev, CS_VERBOSE, mask, val);
-
drbd_send_sr_reply(mdev, rv);
+
drbd_md_sync(mdev);
- return true;
+ return 0;
}
-static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static int receive_req_conn_state(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_state *p = &mdev->data.rbuf.state;
+ struct p_req_state *p = pi->data;
+ union drbd_state mask, val;
+ enum drbd_state_rv rv;
+
+ mask.i = be32_to_cpu(p->mask);
+ val.i = be32_to_cpu(p->val);
+
+ if (test_bit(RESOLVE_CONFLICTS, &tconn->flags) &&
+ mutex_is_locked(&tconn->cstate_mutex)) {
+ conn_send_sr_reply(tconn, SS_CONCURRENT_ST_CHG);
+ return 0;
+ }
+
+ mask = convert_state(mask);
+ val = convert_state(val);
+
+ rv = conn_request_state(tconn, mask, val, CS_VERBOSE | CS_LOCAL_ONLY | CS_IGN_OUTD_FAIL);
+ conn_send_sr_reply(tconn, rv);
+
+ return 0;
+}
+
+static int receive_state(struct drbd_tconn *tconn, struct packet_info *pi)
+{
+ struct drbd_conf *mdev;
+ struct p_state *p = pi->data;
union drbd_state os, ns, peer_state;
enum drbd_disk_state real_peer_disk;
enum chg_state_flags cs_flags;
int rv;
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return config_unknown_volume(tconn, pi);
+
peer_state.i = be32_to_cpu(p->state);
real_peer_disk = peer_state.disk;
@@ -3203,16 +3828,16 @@
dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
}
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
retry:
- os = ns = mdev->state;
- spin_unlock_irq(&mdev->req_lock);
+ os = ns = drbd_read_state(mdev);
+ spin_unlock_irq(&mdev->tconn->req_lock);
/* If some other part of the code (asender thread, timeout)
* already decided to close the connection again,
* we must not "re-establish" it here. */
if (os.conn <= C_TEAR_DOWN)
- return false;
+ return -ECONNRESET;
/* If this is the "end of sync" confirmation, usually the peer disk
* transitions from D_INCONSISTENT to D_UP_TO_DATE. For empty (0 bits
@@ -3240,16 +3865,16 @@
peer_state.conn == C_CONNECTED) {
if (drbd_bm_total_weight(mdev) <= mdev->rs_failed)
drbd_resync_finished(mdev);
- return true;
+ return 0;
}
}
/* explicit verify finished notification, stop sector reached. */
if (os.conn == C_VERIFY_T && os.disk == D_UP_TO_DATE &&
peer_state.conn == C_CONNECTED && real_peer_disk == D_UP_TO_DATE) {
- ov_oos_print(mdev);
+ ov_out_of_sync_print(mdev);
drbd_resync_finished(mdev);
- return true;
+ return 0;
}
/* peer says his disk is inconsistent, while we think it is uptodate,
@@ -3280,7 +3905,7 @@
os.disk == D_NEGOTIATING));
/* if we have both been inconsistent, and the peer has been
* forced to be UpToDate with --overwrite-data */
- cr |= drbd_test_flag(mdev, CONSIDER_RESYNC);
+ cr |= test_bit(CONSIDER_RESYNC, &mdev->flags);
/* if we had been plain connected, and the admin requested to
* start a sync by "invalidate" or "invalidate-remote" */
cr |= (os.conn == C_CONNECTED &&
@@ -3300,44 +3925,44 @@
peer_state.disk = D_DISKLESS;
real_peer_disk = D_DISKLESS;
} else {
- if (drbd_test_and_clear_flag(mdev, CONN_DRY_RUN))
- return false;
+ if (test_and_clear_bit(CONN_DRY_RUN, &mdev->tconn->flags))
+ return -EIO;
D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
- return false;
+ conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
+ return -EIO;
}
}
}
- spin_lock_irq(&mdev->req_lock);
- if (mdev->state.i != os.i)
+ spin_lock_irq(&mdev->tconn->req_lock);
+ if (os.i != drbd_read_state(mdev).i)
goto retry;
- drbd_clear_flag(mdev, CONSIDER_RESYNC);
+ clear_bit(CONSIDER_RESYNC, &mdev->flags);
ns.peer = peer_state.role;
ns.pdsk = real_peer_disk;
ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
ns.disk = mdev->new_state_tmp.disk;
cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
- if (ns.pdsk == D_CONSISTENT && is_susp(ns) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
- drbd_test_flag(mdev, NEW_CUR_UUID)) {
- /* Do not allow tl_restart(resend) for a rebooted peer. We can only allow this
+ if (ns.pdsk == D_CONSISTENT && drbd_suspended(mdev) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
+ test_bit(NEW_CUR_UUID, &mdev->flags)) {
+ /* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this
for temporal network outages! */
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
- tl_clear(mdev);
+ tl_clear(mdev->tconn);
drbd_uuid_new_current(mdev);
- drbd_clear_flag(mdev, NEW_CUR_UUID);
- drbd_force_state(mdev, NS2(conn, C_PROTOCOL_ERROR, susp, 0));
- return false;
+ clear_bit(NEW_CUR_UUID, &mdev->flags);
+ conn_request_state(mdev->tconn, NS2(conn, C_PROTOCOL_ERROR, susp, 0), CS_HARD);
+ return -EIO;
}
rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
- ns = mdev->state;
- spin_unlock_irq(&mdev->req_lock);
+ ns = drbd_read_state(mdev);
+ spin_unlock_irq(&mdev->tconn->req_lock);
if (rv < SS_SUCCESS) {
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
- return false;
+ conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
+ return -EIO;
}
if (os.conn > C_WF_REPORT_PARAMS) {
@@ -3351,16 +3976,21 @@
}
}
- mdev->net_conf->want_lose = 0;
+ clear_bit(DISCARD_MY_DATA, &mdev->flags);
drbd_md_sync(mdev); /* update connected indicator, la_size, ... */
- return true;
+ return 0;
}
-static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static int receive_sync_uuid(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_rs_uuid *p = &mdev->data.rbuf.rs_uuid;
+ struct drbd_conf *mdev;
+ struct p_rs_uuid *p = pi->data;
+
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
wait_event(mdev->misc_wait,
mdev->state.conn == C_WF_SYNC_UUID ||
@@ -3383,7 +4013,7 @@
} else
dev_err(DEV, "Ignoring SyncUUID packet!\n");
- return true;
+ return 0;
}
/**
@@ -3393,27 +4023,27 @@
* code upon failure.
*/
static int
-receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size,
- unsigned long *buffer, struct bm_xfer_ctx *c)
+receive_bitmap_plain(struct drbd_conf *mdev, unsigned int size,
+ unsigned long *p, struct bm_xfer_ctx *c)
{
- unsigned num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
- unsigned want = num_words * sizeof(long);
+ unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE -
+ drbd_header_size(mdev->tconn);
+ unsigned int num_words = min_t(size_t, data_size / sizeof(*p),
+ c->bm_words - c->word_offset);
+ unsigned int want = num_words * sizeof(*p);
int err;
- if (want != data_size) {
- dev_err(DEV, "%s:want (%u) != data_size (%u)\n", __func__, want, data_size);
+ if (want != size) {
+ dev_err(DEV, "%s:want (%u) != size (%u)\n", __func__, want, size);
return -EIO;
}
if (want == 0)
return 0;
- err = drbd_recv(mdev, buffer, want);
- if (err != want) {
- if (err >= 0)
- err = -EIO;
+ err = drbd_recv_all(mdev->tconn, p, want);
+ if (err)
return err;
- }
- drbd_bm_merge_lel(mdev, c->word_offset, num_words, buffer);
+ drbd_bm_merge_lel(mdev, c->word_offset, num_words, p);
c->word_offset += num_words;
c->bit_offset = c->word_offset * BITS_PER_LONG;
@@ -3423,6 +4053,21 @@
return 1;
}
+static enum drbd_bitmap_code dcbp_get_code(struct p_compressed_bm *p)
+{
+ return (enum drbd_bitmap_code)(p->encoding & 0x0f);
+}
+
+static int dcbp_get_start(struct p_compressed_bm *p)
+{
+ return (p->encoding & 0x80) != 0;
+}
+
+static int dcbp_get_pad_bits(struct p_compressed_bm *p)
+{
+ return (p->encoding >> 4) & 0x7;
+}
+
/**
* recv_bm_rle_bits
*
@@ -3432,7 +4077,8 @@
static int
recv_bm_rle_bits(struct drbd_conf *mdev,
struct p_compressed_bm *p,
- struct bm_xfer_ctx *c)
+ struct bm_xfer_ctx *c,
+ unsigned int len)
{
struct bitstream bs;
u64 look_ahead;
@@ -3440,12 +4086,11 @@
u64 tmp;
unsigned long s = c->bit_offset;
unsigned long e;
- int len = be16_to_cpu(p->head.length) - (sizeof(*p) - sizeof(p->head));
- int toggle = DCBP_get_start(p);
+ int toggle = dcbp_get_start(p);
int have;
int bits;
- bitstream_init(&bs, p->code, len, DCBP_get_pad_bits(p));
+ bitstream_init(&bs, p->code, len, dcbp_get_pad_bits(p));
bits = bitstream_get_bits(&bs, &look_ahead, 64);
if (bits < 0)
@@ -3497,17 +4142,18 @@
static int
decode_bitmap_c(struct drbd_conf *mdev,
struct p_compressed_bm *p,
- struct bm_xfer_ctx *c)
+ struct bm_xfer_ctx *c,
+ unsigned int len)
{
- if (DCBP_get_code(p) == RLE_VLI_Bits)
- return recv_bm_rle_bits(mdev, p, c);
+ if (dcbp_get_code(p) == RLE_VLI_Bits)
+ return recv_bm_rle_bits(mdev, p, c, len - sizeof(*p));
/* other variants had been implemented for evaluation,
* but have been dropped as this one turned out to be "best"
* during all our tests. */
dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
- drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
+ conn_request_state(mdev->tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
return -EIO;
}
@@ -3515,11 +4161,13 @@
const char *direction, struct bm_xfer_ctx *c)
{
/* what would it take to transfer it "plaintext" */
- unsigned plain = sizeof(struct p_header80) *
- ((c->bm_words+BM_PACKET_WORDS-1)/BM_PACKET_WORDS+1)
- + c->bm_words * sizeof(long);
- unsigned total = c->bytes[0] + c->bytes[1];
- unsigned r;
+ unsigned int header_size = drbd_header_size(mdev->tconn);
+ unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
+ unsigned int plain =
+ header_size * (DIV_ROUND_UP(c->bm_words, data_size) + 1) +
+ c->bm_words * sizeof(unsigned long);
+ unsigned int total = c->bytes[0] + c->bytes[1];
+ unsigned int r;
/* total can not be zero. but just in case: */
if (total == 0)
@@ -3553,67 +4201,63 @@
in order to be agnostic to the 32 vs 64 bits issue.
returns 0 on failure, 1 if we successfully received it. */
-static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static int receive_bitmap(struct drbd_tconn *tconn, struct packet_info *pi)
{
+ struct drbd_conf *mdev;
struct bm_xfer_ctx c;
- void *buffer;
int err;
- int ok = false;
- struct p_header80 *h = &mdev->data.rbuf.header.h80;
+
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
drbd_bm_lock(mdev, "receive bitmap", BM_LOCKED_SET_ALLOWED);
/* you are supposed to send additional out-of-sync information
* if you actually set bits during this phase */
- /* maybe we should use some per thread scratch page,
- * and allocate that during initial device creation? */
- buffer = (unsigned long *) __get_free_page(GFP_NOIO);
- if (!buffer) {
- dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
- goto out;
- }
-
c = (struct bm_xfer_ctx) {
.bm_bits = drbd_bm_bits(mdev),
.bm_words = drbd_bm_words(mdev),
};
for(;;) {
- if (cmd == P_BITMAP) {
- err = receive_bitmap_plain(mdev, data_size, buffer, &c);
- } else if (cmd == P_COMPRESSED_BITMAP) {
+ if (pi->cmd == P_BITMAP)
+ err = receive_bitmap_plain(mdev, pi->size, pi->data, &c);
+ else if (pi->cmd == P_COMPRESSED_BITMAP) {
/* MAYBE: sanity check that we speak proto >= 90,
* and the feature is enabled! */
- struct p_compressed_bm *p;
+ struct p_compressed_bm *p = pi->data;
- if (data_size > BM_PACKET_PAYLOAD_BYTES) {
+ if (pi->size > DRBD_SOCKET_BUFFER_SIZE - drbd_header_size(tconn)) {
dev_err(DEV, "ReportCBitmap packet too large\n");
+ err = -EIO;
goto out;
}
- /* use the page buff */
- p = buffer;
- memcpy(p, h, sizeof(*h));
- if (drbd_recv(mdev, p->head.payload, data_size) != data_size)
- goto out;
- if (data_size <= (sizeof(*p) - sizeof(p->head))) {
- dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", data_size);
+ if (pi->size <= sizeof(*p)) {
+ dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", pi->size);
+ err = -EIO;
goto out;
}
- err = decode_bitmap_c(mdev, p, &c);
+ err = drbd_recv_all(mdev->tconn, p, pi->size);
+ if (err)
+ goto out;
+ err = decode_bitmap_c(mdev, p, &c, pi->size);
} else {
- dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", cmd);
+ dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", pi->cmd);
+ err = -EIO;
goto out;
}
- c.packets[cmd == P_BITMAP]++;
- c.bytes[cmd == P_BITMAP] += sizeof(struct p_header80) + data_size;
+ c.packets[pi->cmd == P_BITMAP]++;
+ c.bytes[pi->cmd == P_BITMAP] += drbd_header_size(tconn) + pi->size;
if (err <= 0) {
if (err < 0)
goto out;
break;
}
- if (!drbd_recv_header(mdev, &cmd, &data_size))
+ err = drbd_recv_header(mdev->tconn, pi);
+ if (err)
goto out;
}
@@ -3622,8 +4266,8 @@
if (mdev->state.conn == C_WF_BITMAP_T) {
enum drbd_state_rv rv;
- ok = !drbd_send_bitmap(mdev);
- if (!ok)
+ err = drbd_send_bitmap(mdev);
+ if (err)
goto out;
/* Omit CS_ORDERED with this state transition to avoid deadlocks. */
rv = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
@@ -3634,47 +4278,40 @@
dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
drbd_conn_str(mdev->state.conn));
}
+ err = 0;
- ok = true;
out:
drbd_bm_unlock(mdev);
- if (ok && mdev->state.conn == C_WF_BITMAP_S)
+ if (!err && mdev->state.conn == C_WF_BITMAP_S)
drbd_start_resync(mdev, C_SYNC_SOURCE);
- free_page((unsigned long) buffer);
- return ok;
+ return err;
}
-static int receive_skip(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static int receive_skip(struct drbd_tconn *tconn, struct packet_info *pi)
{
- /* TODO zero copy sink :) */
- static char sink[128];
- int size, want, r;
+ conn_warn(tconn, "skipping unknown optional packet type %d, l: %d!\n",
+ pi->cmd, pi->size);
- dev_warn(DEV, "skipping unknown optional packet type %d, l: %d!\n",
- cmd, data_size);
-
- size = data_size;
- while (size > 0) {
- want = min_t(int, size, sizeof(sink));
- r = drbd_recv(mdev, sink, want);
- ERR_IF(r <= 0) break;
- size -= r;
- }
- return size == 0;
+ return ignore_remaining_packet(tconn, pi);
}
-static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static int receive_UnplugRemote(struct drbd_tconn *tconn, struct packet_info *pi)
{
/* Make sure we've acked all the TCP data associated
* with the data requests being unplugged */
- drbd_tcp_quickack(mdev->data.socket);
+ drbd_tcp_quickack(tconn->data.socket);
- return true;
+ return 0;
}
-static int receive_out_of_sync(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static int receive_out_of_sync(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_block_desc *p = &mdev->data.rbuf.block_desc;
+ struct drbd_conf *mdev;
+ struct p_block_desc *p = pi->data;
+
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
switch (mdev->state.conn) {
case C_WF_SYNC_UUID:
@@ -3688,15 +4325,13 @@
drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
- return true;
+ return 0;
}
-typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, enum drbd_packets cmd, unsigned int to_receive);
-
struct data_cmd {
int expect_payload;
size_t pkt_size;
- drbd_cmd_handler_f function;
+ int (*fn)(struct drbd_tconn *, struct packet_info *);
};
static struct data_cmd drbd_cmd_handler[] = {
@@ -3704,13 +4339,13 @@
[P_DATA_REPLY] = { 1, sizeof(struct p_data), receive_DataReply },
[P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), receive_RSDataReply } ,
[P_BARRIER] = { 0, sizeof(struct p_barrier), receive_Barrier } ,
- [P_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } ,
- [P_COMPRESSED_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } ,
- [P_UNPLUG_REMOTE] = { 0, sizeof(struct p_header80), receive_UnplugRemote },
+ [P_BITMAP] = { 1, 0, receive_bitmap } ,
+ [P_COMPRESSED_BITMAP] = { 1, 0, receive_bitmap } ,
+ [P_UNPLUG_REMOTE] = { 0, 0, receive_UnplugRemote },
[P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
[P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
- [P_SYNC_PARAM] = { 1, sizeof(struct p_header80), receive_SyncParam },
- [P_SYNC_PARAM89] = { 1, sizeof(struct p_header80), receive_SyncParam },
+ [P_SYNC_PARAM] = { 1, 0, receive_SyncParam },
+ [P_SYNC_PARAM89] = { 1, 0, receive_SyncParam },
[P_PROTOCOL] = { 1, sizeof(struct p_protocol), receive_protocol },
[P_UUIDS] = { 0, sizeof(struct p_uuids), receive_uuids },
[P_SIZES] = { 0, sizeof(struct p_sizes), receive_sizes },
@@ -3722,124 +4357,75 @@
[P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
[P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip },
[P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync },
- /* anything missing from this table is in
- * the asender_tbl, see get_asender_cmd */
- [P_MAX_CMD] = { 0, 0, NULL },
+ [P_CONN_ST_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_conn_state },
+ [P_PROTOCOL_UPDATE] = { 1, sizeof(struct p_protocol), receive_protocol },
};
-/* All handler functions that expect a sub-header get that sub-heder in
- mdev->data.rbuf.header.head.payload.
-
- Usually in mdev->data.rbuf.header.head the callback can find the usual
- p_header, but they may not rely on that. Since there is also p_header95 !
- */
-
-static void drbdd(struct drbd_conf *mdev)
+static void drbdd(struct drbd_tconn *tconn)
{
- union p_header *header = &mdev->data.rbuf.header;
- unsigned int packet_size;
- enum drbd_packets cmd;
+ struct packet_info pi;
size_t shs; /* sub header size */
- int rv;
+ int err;
- while (get_t_state(&mdev->receiver) == Running) {
- drbd_thread_current_set_cpu(mdev);
- if (!drbd_recv_header(mdev, &cmd, &packet_size))
+ while (get_t_state(&tconn->receiver) == RUNNING) {
+ struct data_cmd *cmd;
+
+ drbd_thread_current_set_cpu(&tconn->receiver);
+ if (drbd_recv_header(tconn, &pi))
goto err_out;
- if (unlikely(cmd >= P_MAX_CMD || !drbd_cmd_handler[cmd].function)) {
- dev_err(DEV, "unknown packet type %d, l: %d!\n", cmd, packet_size);
+ cmd = &drbd_cmd_handler[pi.cmd];
+ if (unlikely(pi.cmd >= ARRAY_SIZE(drbd_cmd_handler) || !cmd->fn)) {
+ conn_err(tconn, "Unexpected data packet %s (0x%04x)",
+ cmdname(pi.cmd), pi.cmd);
goto err_out;
}
- shs = drbd_cmd_handler[cmd].pkt_size - sizeof(union p_header);
- if (packet_size - shs > 0 && !drbd_cmd_handler[cmd].expect_payload) {
- dev_err(DEV, "No payload expected %s l:%d\n", cmdname(cmd), packet_size);
+ shs = cmd->pkt_size;
+ if (pi.size > shs && !cmd->expect_payload) {
+ conn_err(tconn, "No payload expected %s l:%d\n",
+ cmdname(pi.cmd), pi.size);
goto err_out;
}
if (shs) {
- rv = drbd_recv(mdev, &header->h80.payload, shs);
- if (unlikely(rv != shs)) {
- if (!signal_pending(current))
- dev_warn(DEV, "short read while reading sub header: rv=%d\n", rv);
+ err = drbd_recv_all_warn(tconn, pi.data, shs);
+ if (err)
goto err_out;
- }
+ pi.size -= shs;
}
- rv = drbd_cmd_handler[cmd].function(mdev, cmd, packet_size - shs);
-
- if (unlikely(!rv)) {
- dev_err(DEV, "error receiving %s, l: %d!\n",
- cmdname(cmd), packet_size);
+ err = cmd->fn(tconn, &pi);
+ if (err) {
+ conn_err(tconn, "error receiving %s, e: %d l: %d!\n",
+ cmdname(pi.cmd), err, pi.size);
goto err_out;
}
}
+ return;
- if (0) {
- err_out:
- drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
- }
- /* If we leave here, we probably want to update at least the
- * "Connected" indicator on stable storage. Do so explicitly here. */
- drbd_md_sync(mdev);
+ err_out:
+ conn_request_state(tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
}
-void drbd_flush_workqueue(struct drbd_conf *mdev)
+void conn_flush_workqueue(struct drbd_tconn *tconn)
{
struct drbd_wq_barrier barr;
barr.w.cb = w_prev_work_done;
+ barr.w.tconn = tconn;
init_completion(&barr.done);
- drbd_queue_work(&mdev->data.work, &barr.w);
+ drbd_queue_work(&tconn->sender_work, &barr.w);
wait_for_completion(&barr.done);
}
-void drbd_free_tl_hash(struct drbd_conf *mdev)
+static void conn_disconnect(struct drbd_tconn *tconn)
{
- struct hlist_head *h;
+ struct drbd_conf *mdev;
+ enum drbd_conns oc;
+ int vnr;
- spin_lock_irq(&mdev->req_lock);
-
- if (!mdev->tl_hash || mdev->state.conn != C_STANDALONE) {
- spin_unlock_irq(&mdev->req_lock);
- return;
- }
- /* paranoia code */
- for (h = mdev->ee_hash; h < mdev->ee_hash + mdev->ee_hash_s; h++)
- if (h->first)
- dev_err(DEV, "ASSERT FAILED ee_hash[%u].first == %p, expected NULL\n",
- (int)(h - mdev->ee_hash), h->first);
- kfree(mdev->ee_hash);
- mdev->ee_hash = NULL;
- mdev->ee_hash_s = 0;
-
- /* We may not have had the chance to wait for all locally pending
- * application requests. The hlist_add_fake() prevents access after
- * free on master bio completion. */
- for (h = mdev->tl_hash; h < mdev->tl_hash + mdev->tl_hash_s; h++) {
- struct drbd_request *req;
- struct hlist_node *pos, *n;
- hlist_for_each_entry_safe(req, pos, n, h, collision) {
- hlist_del_init(&req->collision);
- hlist_add_fake(&req->collision);
- }
- }
-
- kfree(mdev->tl_hash);
- mdev->tl_hash = NULL;
- mdev->tl_hash_s = 0;
- spin_unlock_irq(&mdev->req_lock);
-}
-
-static void drbd_disconnect(struct drbd_conf *mdev)
-{
- enum drbd_fencing_p fp;
- union drbd_state os, ns;
- int rv = SS_UNKNOWN_ERROR;
- unsigned int i;
-
- if (mdev->state.conn == C_STANDALONE)
+ if (tconn->cstate == C_STANDALONE)
return;
/* We are about to start the cleanup after connection loss.
@@ -3847,18 +4433,54 @@
* Usually we should be in some network failure state already,
* but just in case we are not, we fix it up here.
*/
- drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
+ conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);
/* asender does not clean up anything. it must not interfere, either */
- drbd_thread_stop(&mdev->asender);
- drbd_free_sock(mdev);
+ drbd_thread_stop(&tconn->asender);
+ drbd_free_sock(tconn);
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ kref_get(&mdev->kref);
+ rcu_read_unlock();
+ drbd_disconnected(mdev);
+ kref_put(&mdev->kref, &drbd_minor_destroy);
+ rcu_read_lock();
+ }
+ rcu_read_unlock();
+
+ if (!list_empty(&tconn->current_epoch->list))
+ conn_err(tconn, "ASSERTION FAILED: tconn->current_epoch->list not empty\n");
+ /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
+ atomic_set(&tconn->current_epoch->epoch_size, 0);
+ tconn->send.seen_any_write_yet = false;
+
+ conn_info(tconn, "Connection closed\n");
+
+ if (conn_highest_role(tconn) == R_PRIMARY && conn_highest_pdsk(tconn) >= D_UNKNOWN)
+ conn_try_outdate_peer_async(tconn);
+
+ spin_lock_irq(&tconn->req_lock);
+ oc = tconn->cstate;
+ if (oc >= C_UNCONNECTED)
+ _conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
+
+ spin_unlock_irq(&tconn->req_lock);
+
+ if (oc == C_DISCONNECTING)
+ conn_request_state(tconn, NS(conn, C_STANDALONE), CS_VERBOSE | CS_HARD);
+}
+
+static int drbd_disconnected(struct drbd_conf *mdev)
+{
+ unsigned int i;
/* wait for current activity to cease. */
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
_drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
_drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
_drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
/* We do not have data structures that would allow us to
* get the rs_pending_cnt down to 0 again.
@@ -3876,7 +4498,6 @@
atomic_set(&mdev->rs_pending_cnt, 0);
wake_up(&mdev->misc_wait);
- /* make sure syncer is stopped and w_resume_next_sg queued */
del_timer_sync(&mdev->resync_timer);
resync_timer_fn((unsigned long)mdev);
@@ -3885,53 +4506,28 @@
* to be "canceled" */
drbd_flush_workqueue(mdev);
- /* This also does reclaim_net_ee(). If we do this too early, we might
- * miss some resync ee and pages.*/
- drbd_process_done_ee(mdev);
+ drbd_finish_peer_reqs(mdev);
+
+ /* This second workqueue flush is necessary, since drbd_finish_peer_reqs()
+ might have issued a work again. The one before drbd_finish_peer_reqs() is
+ necessary to reclain net_ee in drbd_finish_peer_reqs(). */
+ drbd_flush_workqueue(mdev);
+
+ /* need to do it again, drbd_finish_peer_reqs() may have populated it
+ * again via drbd_try_clear_on_disk_bm(). */
+ drbd_rs_cancel_all(mdev);
kfree(mdev->p_uuid);
mdev->p_uuid = NULL;
- if (!is_susp(mdev->state))
- tl_clear(mdev);
-
- dev_info(DEV, "Connection closed\n");
+ if (!drbd_suspended(mdev))
+ tl_clear(mdev->tconn);
drbd_md_sync(mdev);
- fp = FP_DONT_CARE;
- if (get_ldev(mdev)) {
- fp = mdev->ldev->dc.fencing;
- put_ldev(mdev);
- }
-
- if (mdev->state.role == R_PRIMARY && fp >= FP_RESOURCE && mdev->state.pdsk >= D_UNKNOWN)
- drbd_try_outdate_peer_async(mdev);
-
- spin_lock_irq(&mdev->req_lock);
- os = mdev->state;
- if (os.conn >= C_UNCONNECTED) {
- /* Do not restart in case we are C_DISCONNECTING */
- ns = os;
- ns.conn = C_UNCONNECTED;
- rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
- }
- spin_unlock_irq(&mdev->req_lock);
-
- if (os.conn == C_DISCONNECTING) {
- wait_event(mdev->net_cnt_wait, atomic_read(&mdev->net_cnt) == 0);
-
- crypto_free_hash(mdev->cram_hmac_tfm);
- mdev->cram_hmac_tfm = NULL;
-
- kfree(mdev->net_conf);
- mdev->net_conf = NULL;
- drbd_request_state(mdev, NS(conn, C_STANDALONE));
- }
-
/* serialize with bitmap writeout triggered by the state change,
* if any. */
- wait_event(mdev->misc_wait, !drbd_test_flag(mdev, BITMAP_IO));
+ wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
/* tcp_close and release of sendpage pages can be deferred. I don't
* want to use SO_LINGER, because apparently it can be deferred for
@@ -3940,7 +4536,7 @@
* Actually we don't care for exactly when the network stack does its
* put_page(), but release our reference on these pages right here.
*/
- i = drbd_release_ee(mdev, &mdev->net_ee);
+ i = drbd_free_peer_reqs(mdev, &mdev->net_ee);
if (i)
dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
i = atomic_read(&mdev->pp_in_use_by_net);
@@ -3955,9 +4551,7 @@
D_ASSERT(list_empty(&mdev->sync_ee));
D_ASSERT(list_empty(&mdev->done_ee));
- /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
- atomic_set(&mdev->current_epoch->epoch_size, 0);
- D_ASSERT(list_empty(&mdev->current_epoch->list));
+ return 0;
}
/*
@@ -3969,29 +4563,19 @@
*
* for now, they are expected to be zero, but ignored.
*/
-static int drbd_send_handshake(struct drbd_conf *mdev)
+static int drbd_send_features(struct drbd_tconn *tconn)
{
- /* ASSERT current == mdev->receiver ... */
- struct p_handshake *p = &mdev->data.sbuf.handshake;
- int ok;
+ struct drbd_socket *sock;
+ struct p_connection_features *p;
- if (mutex_lock_interruptible(&mdev->data.mutex)) {
- dev_err(DEV, "interrupted during initial handshake\n");
- return 0; /* interrupted. not ok. */
- }
-
- if (mdev->data.socket == NULL) {
- mutex_unlock(&mdev->data.mutex);
- return 0;
- }
-
+ sock = &tconn->data;
+ p = conn_prepare_command(tconn, sock);
+ if (!p)
+ return -EIO;
memset(p, 0, sizeof(*p));
p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
- ok = _drbd_send_cmd( mdev, mdev->data.socket, P_HAND_SHAKE,
- (struct p_header80 *)p, sizeof(*p), 0 );
- mutex_unlock(&mdev->data.mutex);
- return ok;
+ return conn_send_command(tconn, sock, P_CONNECTION_FEATURES, sizeof(*p), NULL, 0);
}
/*
@@ -4001,42 +4585,38 @@
* -1 peer talks different language,
* no point in trying again, please go standalone.
*/
-static int drbd_do_handshake(struct drbd_conf *mdev)
+static int drbd_do_features(struct drbd_tconn *tconn)
{
- /* ASSERT current == mdev->receiver ... */
- struct p_handshake *p = &mdev->data.rbuf.handshake;
- const int expect = sizeof(struct p_handshake) - sizeof(struct p_header80);
- unsigned int length;
- enum drbd_packets cmd;
- int rv;
+ /* ASSERT current == tconn->receiver ... */
+ struct p_connection_features *p;
+ const int expect = sizeof(struct p_connection_features);
+ struct packet_info pi;
+ int err;
- rv = drbd_send_handshake(mdev);
- if (!rv)
+ err = drbd_send_features(tconn);
+ if (err)
return 0;
- rv = drbd_recv_header(mdev, &cmd, &length);
- if (!rv)
+ err = drbd_recv_header(tconn, &pi);
+ if (err)
return 0;
- if (cmd != P_HAND_SHAKE) {
- dev_err(DEV, "expected HandShake packet, received: %s (0x%04x)\n",
- cmdname(cmd), cmd);
+ if (pi.cmd != P_CONNECTION_FEATURES) {
+ conn_err(tconn, "expected ConnectionFeatures packet, received: %s (0x%04x)\n",
+ cmdname(pi.cmd), pi.cmd);
return -1;
}
- if (length != expect) {
- dev_err(DEV, "expected HandShake length: %u, received: %u\n",
- expect, length);
+ if (pi.size != expect) {
+ conn_err(tconn, "expected ConnectionFeatures length: %u, received: %u\n",
+ expect, pi.size);
return -1;
}
- rv = drbd_recv(mdev, &p->head.payload, expect);
-
- if (rv != expect) {
- if (!signal_pending(current))
- dev_warn(DEV, "short read receiving handshake packet: l=%u\n", rv);
+ p = pi.data;
+ err = drbd_recv_all_warn(tconn, p, expect);
+ if (err)
return 0;
- }
p->protocol_min = be32_to_cpu(p->protocol_min);
p->protocol_max = be32_to_cpu(p->protocol_max);
@@ -4047,15 +4627,15 @@
PRO_VERSION_MIN > p->protocol_max)
goto incompat;
- mdev->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
+ tconn->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
- dev_info(DEV, "Handshake successful: "
- "Agreed network protocol version %d\n", mdev->agreed_pro_version);
+ conn_info(tconn, "Handshake successful: "
+ "Agreed network protocol version %d\n", tconn->agreed_pro_version);
return 1;
incompat:
- dev_err(DEV, "incompatible DRBD dialects: "
+ conn_err(tconn, "incompatible DRBD dialects: "
"I support %d-%d, peer supports %d-%d\n",
PRO_VERSION_MIN, PRO_VERSION_MAX,
p->protocol_min, p->protocol_max);
@@ -4063,7 +4643,7 @@
}
#if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
-static int drbd_do_auth(struct drbd_conf *mdev)
+static int drbd_do_auth(struct drbd_tconn *tconn)
{
dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
@@ -4078,121 +4658,139 @@
-1 - auth failed, don't try again.
*/
-static int drbd_do_auth(struct drbd_conf *mdev)
+static int drbd_do_auth(struct drbd_tconn *tconn)
{
+ struct drbd_socket *sock;
char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
struct scatterlist sg;
char *response = NULL;
char *right_response = NULL;
char *peers_ch = NULL;
- unsigned int key_len = strlen(mdev->net_conf->shared_secret);
+ unsigned int key_len;
+ char secret[SHARED_SECRET_MAX]; /* 64 byte */
unsigned int resp_size;
struct hash_desc desc;
- enum drbd_packets cmd;
- unsigned int length;
- int rv;
+ struct packet_info pi;
+ struct net_conf *nc;
+ int err, rv;
- desc.tfm = mdev->cram_hmac_tfm;
+ /* FIXME: Put the challenge/response into the preallocated socket buffer. */
+
+ rcu_read_lock();
+ nc = rcu_dereference(tconn->net_conf);
+ key_len = strlen(nc->shared_secret);
+ memcpy(secret, nc->shared_secret, key_len);
+ rcu_read_unlock();
+
+ desc.tfm = tconn->cram_hmac_tfm;
desc.flags = 0;
- rv = crypto_hash_setkey(mdev->cram_hmac_tfm,
- (u8 *)mdev->net_conf->shared_secret, key_len);
+ rv = crypto_hash_setkey(tconn->cram_hmac_tfm, (u8 *)secret, key_len);
if (rv) {
- dev_err(DEV, "crypto_hash_setkey() failed with %d\n", rv);
+ conn_err(tconn, "crypto_hash_setkey() failed with %d\n", rv);
rv = -1;
goto fail;
}
get_random_bytes(my_challenge, CHALLENGE_LEN);
- rv = drbd_send_cmd2(mdev, P_AUTH_CHALLENGE, my_challenge, CHALLENGE_LEN);
+ sock = &tconn->data;
+ if (!conn_prepare_command(tconn, sock)) {
+ rv = 0;
+ goto fail;
+ }
+ rv = !conn_send_command(tconn, sock, P_AUTH_CHALLENGE, 0,
+ my_challenge, CHALLENGE_LEN);
if (!rv)
goto fail;
- rv = drbd_recv_header(mdev, &cmd, &length);
- if (!rv)
- goto fail;
-
- if (cmd != P_AUTH_CHALLENGE) {
- dev_err(DEV, "expected AuthChallenge packet, received: %s (0x%04x)\n",
- cmdname(cmd), cmd);
+ err = drbd_recv_header(tconn, &pi);
+ if (err) {
rv = 0;
goto fail;
}
- if (length > CHALLENGE_LEN * 2) {
- dev_err(DEV, "expected AuthChallenge payload too big.\n");
+ if (pi.cmd != P_AUTH_CHALLENGE) {
+ conn_err(tconn, "expected AuthChallenge packet, received: %s (0x%04x)\n",
+ cmdname(pi.cmd), pi.cmd);
+ rv = 0;
+ goto fail;
+ }
+
+ if (pi.size > CHALLENGE_LEN * 2) {
+ conn_err(tconn, "expected AuthChallenge payload too big.\n");
rv = -1;
goto fail;
}
- peers_ch = kmalloc(length, GFP_NOIO);
+ peers_ch = kmalloc(pi.size, GFP_NOIO);
if (peers_ch == NULL) {
- dev_err(DEV, "kmalloc of peers_ch failed\n");
+ conn_err(tconn, "kmalloc of peers_ch failed\n");
rv = -1;
goto fail;
}
- rv = drbd_recv(mdev, peers_ch, length);
-
- if (rv != length) {
- if (!signal_pending(current))
- dev_warn(DEV, "short read AuthChallenge: l=%u\n", rv);
+ err = drbd_recv_all_warn(tconn, peers_ch, pi.size);
+ if (err) {
rv = 0;
goto fail;
}
- resp_size = crypto_hash_digestsize(mdev->cram_hmac_tfm);
+ resp_size = crypto_hash_digestsize(tconn->cram_hmac_tfm);
response = kmalloc(resp_size, GFP_NOIO);
if (response == NULL) {
- dev_err(DEV, "kmalloc of response failed\n");
+ conn_err(tconn, "kmalloc of response failed\n");
rv = -1;
goto fail;
}
sg_init_table(&sg, 1);
- sg_set_buf(&sg, peers_ch, length);
+ sg_set_buf(&sg, peers_ch, pi.size);
rv = crypto_hash_digest(&desc, &sg, sg.length, response);
if (rv) {
- dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
+ conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv);
rv = -1;
goto fail;
}
- rv = drbd_send_cmd2(mdev, P_AUTH_RESPONSE, response, resp_size);
+ if (!conn_prepare_command(tconn, sock)) {
+ rv = 0;
+ goto fail;
+ }
+ rv = !conn_send_command(tconn, sock, P_AUTH_RESPONSE, 0,
+ response, resp_size);
if (!rv)
goto fail;
- rv = drbd_recv_header(mdev, &cmd, &length);
- if (!rv)
- goto fail;
-
- if (cmd != P_AUTH_RESPONSE) {
- dev_err(DEV, "expected AuthResponse packet, received: %s (0x%04x)\n",
- cmdname(cmd), cmd);
+ err = drbd_recv_header(tconn, &pi);
+ if (err) {
rv = 0;
goto fail;
}
- if (length != resp_size) {
- dev_err(DEV, "expected AuthResponse payload of wrong size\n");
+ if (pi.cmd != P_AUTH_RESPONSE) {
+ conn_err(tconn, "expected AuthResponse packet, received: %s (0x%04x)\n",
+ cmdname(pi.cmd), pi.cmd);
rv = 0;
goto fail;
}
- rv = drbd_recv(mdev, response , resp_size);
+ if (pi.size != resp_size) {
+ conn_err(tconn, "expected AuthResponse payload of wrong size\n");
+ rv = 0;
+ goto fail;
+ }
- if (rv != resp_size) {
- if (!signal_pending(current))
- dev_warn(DEV, "short read receiving AuthResponse: l=%u\n", rv);
+ err = drbd_recv_all_warn(tconn, response , resp_size);
+ if (err) {
rv = 0;
goto fail;
}
right_response = kmalloc(resp_size, GFP_NOIO);
if (right_response == NULL) {
- dev_err(DEV, "kmalloc of right_response failed\n");
+ conn_err(tconn, "kmalloc of right_response failed\n");
rv = -1;
goto fail;
}
@@ -4201,7 +4799,7 @@
rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
if (rv) {
- dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
+ conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv);
rv = -1;
goto fail;
}
@@ -4209,8 +4807,8 @@
rv = !memcmp(response, right_response, resp_size);
if (rv)
- dev_info(DEV, "Peer authenticated using %d bytes of '%s' HMAC\n",
- resp_size, mdev->net_conf->cram_hmac_alg);
+ conn_info(tconn, "Peer authenticated using %d bytes HMAC\n",
+ resp_size);
else
rv = -1;
@@ -4225,82 +4823,106 @@
int drbdd_init(struct drbd_thread *thi)
{
- struct drbd_conf *mdev = thi->mdev;
- unsigned int minor = mdev_to_minor(mdev);
+ struct drbd_tconn *tconn = thi->tconn;
int h;
- sprintf(current->comm, "drbd%d_receiver", minor);
-
- dev_info(DEV, "receiver (re)started\n");
+ conn_info(tconn, "receiver (re)started\n");
do {
- h = drbd_connect(mdev);
+ h = conn_connect(tconn);
if (h == 0) {
- drbd_disconnect(mdev);
+ conn_disconnect(tconn);
schedule_timeout_interruptible(HZ);
}
if (h == -1) {
- dev_warn(DEV, "Discarding network configuration.\n");
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
+ conn_warn(tconn, "Discarding network configuration.\n");
+ conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
}
} while (h == 0);
- if (h > 0) {
- if (get_net_conf(mdev)) {
- drbdd(mdev);
- put_net_conf(mdev);
- }
- }
+ if (h > 0)
+ drbdd(tconn);
- drbd_disconnect(mdev);
+ conn_disconnect(tconn);
- dev_info(DEV, "receiver terminated\n");
+ conn_info(tconn, "receiver terminated\n");
return 0;
}
/* ********* acknowledge sender ******** */
-static int got_RqSReply(struct drbd_conf *mdev, struct p_header80 *h)
+static int got_conn_RqSReply(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_req_state_reply *p = (struct p_req_state_reply *)h;
-
+ struct p_req_state_reply *p = pi->data;
int retcode = be32_to_cpu(p->retcode);
if (retcode >= SS_SUCCESS) {
- drbd_set_flag(mdev, CL_ST_CHG_SUCCESS);
+ set_bit(CONN_WD_ST_CHG_OKAY, &tconn->flags);
} else {
- drbd_set_flag(mdev, CL_ST_CHG_FAIL);
+ set_bit(CONN_WD_ST_CHG_FAIL, &tconn->flags);
+ conn_err(tconn, "Requested state change failed by peer: %s (%d)\n",
+ drbd_set_st_err_str(retcode), retcode);
+ }
+ wake_up(&tconn->ping_wait);
+
+ return 0;
+}
+
+static int got_RqSReply(struct drbd_tconn *tconn, struct packet_info *pi)
+{
+ struct drbd_conf *mdev;
+ struct p_req_state_reply *p = pi->data;
+ int retcode = be32_to_cpu(p->retcode);
+
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
+
+ if (test_bit(CONN_WD_ST_CHG_REQ, &tconn->flags)) {
+ D_ASSERT(tconn->agreed_pro_version < 100);
+ return got_conn_RqSReply(tconn, pi);
+ }
+
+ if (retcode >= SS_SUCCESS) {
+ set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
+ } else {
+ set_bit(CL_ST_CHG_FAIL, &mdev->flags);
dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
- drbd_set_st_err_str(retcode), retcode);
+ drbd_set_st_err_str(retcode), retcode);
}
wake_up(&mdev->state_wait);
- return true;
+ return 0;
}
-static int got_Ping(struct drbd_conf *mdev, struct p_header80 *h)
+static int got_Ping(struct drbd_tconn *tconn, struct packet_info *pi)
{
- return drbd_send_ping_ack(mdev);
+ return drbd_send_ping_ack(tconn);
}
-static int got_PingAck(struct drbd_conf *mdev, struct p_header80 *h)
+static int got_PingAck(struct drbd_tconn *tconn, struct packet_info *pi)
{
/* restore idle timeout */
- mdev->meta.socket->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
- if (!drbd_test_and_set_flag(mdev, GOT_PING_ACK))
- wake_up(&mdev->misc_wait);
+ tconn->meta.socket->sk->sk_rcvtimeo = tconn->net_conf->ping_int*HZ;
+ if (!test_and_set_bit(GOT_PING_ACK, &tconn->flags))
+ wake_up(&tconn->ping_wait);
- return true;
+ return 0;
}
-static int got_IsInSync(struct drbd_conf *mdev, struct p_header80 *h)
+static int got_IsInSync(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_block_ack *p = (struct p_block_ack *)h;
+ struct drbd_conf *mdev;
+ struct p_block_ack *p = pi->data;
sector_t sector = be64_to_cpu(p->sector);
int blksize = be32_to_cpu(p->blksize);
- D_ASSERT(mdev->agreed_pro_version >= 89);
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
+
+ D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
update_peer_seq(mdev, be32_to_cpu(p->seq_num));
@@ -4314,162 +4936,139 @@
dec_rs_pending(mdev);
atomic_add(blksize >> 9, &mdev->rs_sect_in);
- return true;
+ return 0;
}
-/* when we receive the ACK for a write request,
- * verify that we actually know about it */
-static struct drbd_request *_ack_id_to_req(struct drbd_conf *mdev,
- u64 id, sector_t sector)
-{
- struct hlist_head *slot = tl_hash_slot(mdev, sector);
- struct hlist_node *n;
- struct drbd_request *req;
-
- hlist_for_each_entry(req, n, slot, collision) {
- if ((unsigned long)req == (unsigned long)id) {
- if (req->sector != sector) {
- dev_err(DEV, "_ack_id_to_req: found req %p but it has "
- "wrong sector (%llus versus %llus)\n", req,
- (unsigned long long)req->sector,
- (unsigned long long)sector);
- break;
- }
- return req;
- }
- }
- return NULL;
-}
-
-typedef struct drbd_request *(req_validator_fn)
- (struct drbd_conf *mdev, u64 id, sector_t sector);
-
-static int validate_req_change_req_state(struct drbd_conf *mdev,
- u64 id, sector_t sector, req_validator_fn validator,
- const char *func, enum drbd_req_event what)
+static int
+validate_req_change_req_state(struct drbd_conf *mdev, u64 id, sector_t sector,
+ struct rb_root *root, const char *func,
+ enum drbd_req_event what, bool missing_ok)
{
struct drbd_request *req;
struct bio_and_error m;
- spin_lock_irq(&mdev->req_lock);
- req = validator(mdev, id, sector);
+ spin_lock_irq(&mdev->tconn->req_lock);
+ req = find_request(mdev, root, id, sector, missing_ok, func);
if (unlikely(!req)) {
- spin_unlock_irq(&mdev->req_lock);
-
- dev_err(DEV, "%s: failed to find req %p, sector %llus\n", func,
- (void *)(unsigned long)id, (unsigned long long)sector);
- return false;
+ spin_unlock_irq(&mdev->tconn->req_lock);
+ return -EIO;
}
__req_mod(req, what, &m);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
if (m.bio)
complete_master_bio(mdev, &m);
- return true;
+ return 0;
}
-static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h)
+static int got_BlockAck(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_block_ack *p = (struct p_block_ack *)h;
+ struct drbd_conf *mdev;
+ struct p_block_ack *p = pi->data;
sector_t sector = be64_to_cpu(p->sector);
int blksize = be32_to_cpu(p->blksize);
enum drbd_req_event what;
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
+
update_peer_seq(mdev, be32_to_cpu(p->seq_num));
- if (is_syncer_block_id(p->block_id)) {
+ if (p->block_id == ID_SYNCER) {
drbd_set_in_sync(mdev, sector, blksize);
dec_rs_pending(mdev);
- return true;
+ return 0;
}
- switch (be16_to_cpu(h->command)) {
+ switch (pi->cmd) {
case P_RS_WRITE_ACK:
- D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
- what = write_acked_by_peer_and_sis;
+ what = WRITE_ACKED_BY_PEER_AND_SIS;
break;
case P_WRITE_ACK:
- D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
- what = write_acked_by_peer;
+ what = WRITE_ACKED_BY_PEER;
break;
case P_RECV_ACK:
- D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_B);
- what = recv_acked_by_peer;
+ what = RECV_ACKED_BY_PEER;
break;
- case P_DISCARD_ACK:
- D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
- what = conflict_discarded_by_peer;
+ case P_SUPERSEDED:
+ what = CONFLICT_RESOLVED;
+ break;
+ case P_RETRY_WRITE:
+ what = POSTPONE_WRITE;
break;
default:
- D_ASSERT(0);
- return false;
+ BUG();
}
return validate_req_change_req_state(mdev, p->block_id, sector,
- _ack_id_to_req, __func__ , what);
+ &mdev->write_requests, __func__,
+ what, false);
}
-static int got_NegAck(struct drbd_conf *mdev, struct p_header80 *h)
+static int got_NegAck(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_block_ack *p = (struct p_block_ack *)h;
+ struct drbd_conf *mdev;
+ struct p_block_ack *p = pi->data;
sector_t sector = be64_to_cpu(p->sector);
int size = be32_to_cpu(p->blksize);
- struct drbd_request *req;
- struct bio_and_error m;
+ int err;
+
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
update_peer_seq(mdev, be32_to_cpu(p->seq_num));
- if (is_syncer_block_id(p->block_id)) {
+ if (p->block_id == ID_SYNCER) {
dec_rs_pending(mdev);
drbd_rs_failed_io(mdev, sector, size);
- return true;
+ return 0;
}
- spin_lock_irq(&mdev->req_lock);
- req = _ack_id_to_req(mdev, p->block_id, sector);
- if (!req) {
- spin_unlock_irq(&mdev->req_lock);
- if (mdev->net_conf->wire_protocol == DRBD_PROT_A ||
- mdev->net_conf->wire_protocol == DRBD_PROT_B) {
- /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
- The master bio might already be completed, therefore the
- request is no longer in the collision hash.
- => Do not try to validate block_id as request. */
- /* In Protocol B we might already have got a P_RECV_ACK
- but then get a P_NEG_ACK after wards. */
- drbd_set_out_of_sync(mdev, sector, size);
- return true;
- } else {
- dev_err(DEV, "%s: failed to find req %p, sector %llus\n", __func__,
- (void *)(unsigned long)p->block_id, (unsigned long long)sector);
- return false;
- }
+ err = validate_req_change_req_state(mdev, p->block_id, sector,
+ &mdev->write_requests, __func__,
+ NEG_ACKED, true);
+ if (err) {
+ /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
+ The master bio might already be completed, therefore the
+ request is no longer in the collision hash. */
+ /* In Protocol B we might already have got a P_RECV_ACK
+ but then get a P_NEG_ACK afterwards. */
+ drbd_set_out_of_sync(mdev, sector, size);
}
- __req_mod(req, neg_acked, &m);
- spin_unlock_irq(&mdev->req_lock);
-
- if (m.bio)
- complete_master_bio(mdev, &m);
- return true;
+ return 0;
}
-static int got_NegDReply(struct drbd_conf *mdev, struct p_header80 *h)
+static int got_NegDReply(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_block_ack *p = (struct p_block_ack *)h;
+ struct drbd_conf *mdev;
+ struct p_block_ack *p = pi->data;
sector_t sector = be64_to_cpu(p->sector);
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
+
update_peer_seq(mdev, be32_to_cpu(p->seq_num));
- dev_err(DEV, "Got NegDReply; Sector %llus, len %u; Fail original request.\n",
+
+ dev_err(DEV, "Got NegDReply; Sector %llus, len %u.\n",
(unsigned long long)sector, be32_to_cpu(p->blksize));
return validate_req_change_req_state(mdev, p->block_id, sector,
- _ar_id_to_req, __func__ , neg_acked);
+ &mdev->read_requests, __func__,
+ NEG_ACKED, false);
}
-static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header80 *h)
+static int got_NegRSDReply(struct drbd_tconn *tconn, struct packet_info *pi)
{
+ struct drbd_conf *mdev;
sector_t sector;
int size;
- struct p_block_ack *p = (struct p_block_ack *)h;
+ struct p_block_ack *p = pi->data;
+
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
sector = be64_to_cpu(p->sector);
size = be32_to_cpu(p->blksize);
@@ -4480,57 +5079,66 @@
if (get_ldev_if_state(mdev, D_FAILED)) {
drbd_rs_complete_io(mdev, sector);
- switch (be16_to_cpu(h->command)) {
+ switch (pi->cmd) {
case P_NEG_RS_DREPLY:
drbd_rs_failed_io(mdev, sector, size);
case P_RS_CANCEL:
break;
default:
- D_ASSERT(0);
- put_ldev(mdev);
- return false;
+ BUG();
}
put_ldev(mdev);
}
- return true;
+ return 0;
}
-static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h)
+static int got_BarrierAck(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_barrier_ack *p = (struct p_barrier_ack *)h;
+ struct p_barrier_ack *p = pi->data;
+ struct drbd_conf *mdev;
+ int vnr;
- tl_release(mdev, p->barrier, be32_to_cpu(p->set_size));
+ tl_release(tconn, p->barrier, be32_to_cpu(p->set_size));
- if (mdev->state.conn == C_AHEAD &&
- atomic_read(&mdev->ap_in_flight) == 0 &&
- !drbd_test_and_set_flag(mdev, AHEAD_TO_SYNC_SOURCE)) {
- mdev->start_resync_timer.expires = jiffies + HZ;
- add_timer(&mdev->start_resync_timer);
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ if (mdev->state.conn == C_AHEAD &&
+ atomic_read(&mdev->ap_in_flight) == 0 &&
+ !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags)) {
+ mdev->start_resync_timer.expires = jiffies + HZ;
+ add_timer(&mdev->start_resync_timer);
+ }
}
+ rcu_read_unlock();
- return true;
+ return 0;
}
-static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h)
+static int got_OVResult(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_block_ack *p = (struct p_block_ack *)h;
+ struct drbd_conf *mdev;
+ struct p_block_ack *p = pi->data;
struct drbd_work *w;
sector_t sector;
int size;
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
+
sector = be64_to_cpu(p->sector);
size = be32_to_cpu(p->blksize);
update_peer_seq(mdev, be32_to_cpu(p->seq_num));
if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
- drbd_ov_oos_found(mdev, sector, size);
+ drbd_ov_out_of_sync_found(mdev, sector, size);
else
- ov_oos_print(mdev);
+ ov_out_of_sync_print(mdev);
if (!get_ldev(mdev))
- return true;
+ return 0;
drbd_rs_complete_io(mdev, sector);
dec_rs_pending(mdev);
@@ -4545,114 +5153,137 @@
w = kmalloc(sizeof(*w), GFP_NOIO);
if (w) {
w->cb = w_ov_finished;
- drbd_queue_work_front(&mdev->data.work, w);
+ w->mdev = mdev;
+ drbd_queue_work(&mdev->tconn->sender_work, w);
} else {
dev_err(DEV, "kmalloc(w) failed.");
- ov_oos_print(mdev);
+ ov_out_of_sync_print(mdev);
drbd_resync_finished(mdev);
}
}
put_ldev(mdev);
- return true;
+ return 0;
}
-static int got_skip(struct drbd_conf *mdev, struct p_header80 *h)
+static int got_skip(struct drbd_tconn *tconn, struct packet_info *pi)
{
- return true;
+ return 0;
+}
+
+static int tconn_finish_peer_reqs(struct drbd_tconn *tconn)
+{
+ struct drbd_conf *mdev;
+ int vnr, not_empty = 0;
+
+ do {
+ clear_bit(SIGNAL_ASENDER, &tconn->flags);
+ flush_signals(current);
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ kref_get(&mdev->kref);
+ rcu_read_unlock();
+ if (drbd_finish_peer_reqs(mdev)) {
+ kref_put(&mdev->kref, &drbd_minor_destroy);
+ return 1;
+ }
+ kref_put(&mdev->kref, &drbd_minor_destroy);
+ rcu_read_lock();
+ }
+ set_bit(SIGNAL_ASENDER, &tconn->flags);
+
+ spin_lock_irq(&tconn->req_lock);
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ not_empty = !list_empty(&mdev->done_ee);
+ if (not_empty)
+ break;
+ }
+ spin_unlock_irq(&tconn->req_lock);
+ rcu_read_unlock();
+ } while (not_empty);
+
+ return 0;
}
struct asender_cmd {
size_t pkt_size;
- int (*process)(struct drbd_conf *mdev, struct p_header80 *h);
+ int (*fn)(struct drbd_tconn *tconn, struct packet_info *);
};
-static struct asender_cmd *get_asender_cmd(int cmd)
-{
- static struct asender_cmd asender_tbl[] = {
- /* anything missing from this table is in
- * the drbd_cmd_handler (drbd_default_handler) table,
- * see the beginning of drbdd() */
- [P_PING] = { sizeof(struct p_header80), got_Ping },
- [P_PING_ACK] = { sizeof(struct p_header80), got_PingAck },
+static struct asender_cmd asender_tbl[] = {
+ [P_PING] = { 0, got_Ping },
+ [P_PING_ACK] = { 0, got_PingAck },
[P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
[P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
[P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
- [P_DISCARD_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
+ [P_SUPERSEDED] = { sizeof(struct p_block_ack), got_BlockAck },
[P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck },
[P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply },
- [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply},
+ [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply },
[P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult },
[P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
[P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
[P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
[P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip },
- [P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply},
- [P_MAX_CMD] = { 0, NULL },
- };
- if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL)
- return NULL;
- return &asender_tbl[cmd];
-}
+ [P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply },
+ [P_CONN_ST_CHG_REPLY]={ sizeof(struct p_req_state_reply), got_conn_RqSReply },
+ [P_RETRY_WRITE] = { sizeof(struct p_block_ack), got_BlockAck },
+};
int drbd_asender(struct drbd_thread *thi)
{
- struct drbd_conf *mdev = thi->mdev;
- struct p_header80 *h = &mdev->meta.rbuf.header.h80;
+ struct drbd_tconn *tconn = thi->tconn;
struct asender_cmd *cmd = NULL;
-
- int rv, len;
- void *buf = h;
+ struct packet_info pi;
+ int rv;
+ void *buf = tconn->meta.rbuf;
int received = 0;
- int expect = sizeof(struct p_header80);
- int empty;
- int ping_timeout_active = 0;
-
- sprintf(current->comm, "drbd%d_asender", mdev_to_minor(mdev));
+ unsigned int header_size = drbd_header_size(tconn);
+ int expect = header_size;
+ bool ping_timeout_active = false;
+ struct net_conf *nc;
+ int ping_timeo, tcp_cork, ping_int;
current->policy = SCHED_RR; /* Make this a realtime task! */
current->rt_priority = 2; /* more important than all other tasks */
- while (get_t_state(thi) == Running) {
- drbd_thread_current_set_cpu(mdev);
- if (drbd_test_and_clear_flag(mdev, SEND_PING)) {
- ERR_IF(!drbd_send_ping(mdev)) goto reconnect;
- mdev->meta.socket->sk->sk_rcvtimeo =
- mdev->net_conf->ping_timeo*HZ/10;
- ping_timeout_active = 1;
+ while (get_t_state(thi) == RUNNING) {
+ drbd_thread_current_set_cpu(thi);
+
+ rcu_read_lock();
+ nc = rcu_dereference(tconn->net_conf);
+ ping_timeo = nc->ping_timeo;
+ tcp_cork = nc->tcp_cork;
+ ping_int = nc->ping_int;
+ rcu_read_unlock();
+
+ if (test_and_clear_bit(SEND_PING, &tconn->flags)) {
+ if (drbd_send_ping(tconn)) {
+ conn_err(tconn, "drbd_send_ping has failed\n");
+ goto reconnect;
+ }
+ tconn->meta.socket->sk->sk_rcvtimeo = ping_timeo * HZ / 10;
+ ping_timeout_active = true;
}
- /* conditionally cork;
- * it may hurt latency if we cork without much to send */
- if (!mdev->net_conf->no_cork &&
- 3 < atomic_read(&mdev->unacked_cnt))
- drbd_tcp_cork(mdev->meta.socket);
- while (1) {
- drbd_clear_flag(mdev, SIGNAL_ASENDER);
- flush_signals(current);
- if (!drbd_process_done_ee(mdev))
- goto reconnect;
- /* to avoid race with newly queued ACKs */
- drbd_set_flag(mdev, SIGNAL_ASENDER);
- spin_lock_irq(&mdev->req_lock);
- empty = list_empty(&mdev->done_ee);
- spin_unlock_irq(&mdev->req_lock);
- /* new ack may have been queued right here,
- * but then there is also a signal pending,
- * and we start over... */
- if (empty)
- break;
+ /* TODO: conditionally cork; it may hurt latency if we cork without
+ much to send */
+ if (tcp_cork)
+ drbd_tcp_cork(tconn->meta.socket);
+ if (tconn_finish_peer_reqs(tconn)) {
+ conn_err(tconn, "tconn_finish_peer_reqs() failed\n");
+ goto reconnect;
}
/* but unconditionally uncork unless disabled */
- if (!mdev->net_conf->no_cork)
- drbd_tcp_uncork(mdev->meta.socket);
+ if (tcp_cork)
+ drbd_tcp_uncork(tconn->meta.socket);
/* short circuit, recv_msg would return EINTR anyways. */
if (signal_pending(current))
continue;
- rv = drbd_recv_short(mdev, mdev->meta.socket,
- buf, expect-received, 0);
- drbd_clear_flag(mdev, SIGNAL_ASENDER);
+ rv = drbd_recv_short(tconn->meta.socket, buf, expect-received, 0);
+ clear_bit(SIGNAL_ASENDER, &tconn->flags);
flush_signals(current);
@@ -4670,87 +5301,91 @@
received += rv;
buf += rv;
} else if (rv == 0) {
- if (drbd_test_flag(mdev, DISCONNECT_SENT)) {
- long t; /* time_left */
- t = wait_event_timeout(mdev->state_wait, mdev->state.conn < C_CONNECTED,
- mdev->net_conf->ping_timeo * HZ/10);
+ if (test_bit(DISCONNECT_SENT, &tconn->flags)) {
+ long t;
+ rcu_read_lock();
+ t = rcu_dereference(tconn->net_conf)->ping_timeo * HZ/10;
+ rcu_read_unlock();
+
+ t = wait_event_timeout(tconn->ping_wait,
+ tconn->cstate < C_WF_REPORT_PARAMS,
+ t);
if (t)
break;
}
- dev_err(DEV, "meta connection shut down by peer.\n");
+ conn_err(tconn, "meta connection shut down by peer.\n");
goto reconnect;
} else if (rv == -EAGAIN) {
/* If the data socket received something meanwhile,
* that is good enough: peer is still alive. */
- if (time_after(mdev->last_received,
- jiffies - mdev->meta.socket->sk->sk_rcvtimeo))
+ if (time_after(tconn->last_received,
+ jiffies - tconn->meta.socket->sk->sk_rcvtimeo))
continue;
if (ping_timeout_active) {
- dev_err(DEV, "PingAck did not arrive in time.\n");
+ conn_err(tconn, "PingAck did not arrive in time.\n");
goto reconnect;
}
- drbd_set_flag(mdev, SEND_PING);
+ set_bit(SEND_PING, &tconn->flags);
continue;
} else if (rv == -EINTR) {
continue;
} else {
- dev_err(DEV, "sock_recvmsg returned %d\n", rv);
+ conn_err(tconn, "sock_recvmsg returned %d\n", rv);
goto reconnect;
}
if (received == expect && cmd == NULL) {
- if (unlikely(h->magic != BE_DRBD_MAGIC)) {
- dev_err(DEV, "magic?? on meta m: 0x%08x c: %d l: %d\n",
- be32_to_cpu(h->magic),
- be16_to_cpu(h->command),
- be16_to_cpu(h->length));
+ if (decode_header(tconn, tconn->meta.rbuf, &pi))
goto reconnect;
- }
- cmd = get_asender_cmd(be16_to_cpu(h->command));
- len = be16_to_cpu(h->length);
- if (unlikely(cmd == NULL)) {
- dev_err(DEV, "unknown command?? on meta m: 0x%08x c: %d l: %d\n",
- be32_to_cpu(h->magic),
- be16_to_cpu(h->command),
- be16_to_cpu(h->length));
+ cmd = &asender_tbl[pi.cmd];
+ if (pi.cmd >= ARRAY_SIZE(asender_tbl) || !cmd->fn) {
+ conn_err(tconn, "Unexpected meta packet %s (0x%04x)\n",
+ cmdname(pi.cmd), pi.cmd);
goto disconnect;
}
- expect = cmd->pkt_size;
- ERR_IF(len != expect-sizeof(struct p_header80))
+ expect = header_size + cmd->pkt_size;
+ if (pi.size != expect - header_size) {
+ conn_err(tconn, "Wrong packet size on meta (c: %d, l: %d)\n",
+ pi.cmd, pi.size);
goto reconnect;
+ }
}
if (received == expect) {
- mdev->last_received = jiffies;
- D_ASSERT(cmd != NULL);
- if (!cmd->process(mdev, h))
+ bool err;
+
+ err = cmd->fn(tconn, &pi);
+ if (err) {
+ conn_err(tconn, "%pf failed\n", cmd->fn);
goto reconnect;
+ }
- /* the idle_timeout (ping-int)
- * has been restored in got_PingAck() */
- if (cmd == get_asender_cmd(P_PING_ACK))
- ping_timeout_active = 0;
+ tconn->last_received = jiffies;
- buf = h;
+ if (cmd == &asender_tbl[P_PING_ACK]) {
+ /* restore idle timeout */
+ tconn->meta.socket->sk->sk_rcvtimeo = ping_int * HZ;
+ ping_timeout_active = false;
+ }
+
+ buf = tconn->meta.rbuf;
received = 0;
- expect = sizeof(struct p_header80);
+ expect = header_size;
cmd = NULL;
}
}
if (0) {
reconnect:
- drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
- drbd_md_sync(mdev);
+ conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);
+ conn_md_sync(tconn);
}
if (0) {
disconnect:
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
- drbd_md_sync(mdev);
+ conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
}
- drbd_clear_flag(mdev, SIGNAL_ASENDER);
+ clear_bit(SIGNAL_ASENDER, &tconn->flags);
- D_ASSERT(mdev->state.conn < C_CONNECTED);
- dev_info(DEV, "asender terminated\n");
+ conn_info(tconn, "asender terminated\n");
return 0;
}