forked from luck/tmp_suning_uos_patched
drbd: fix max_bio_size to be unsigned
We capped our max_bio_size respectively max_hw_sectors with min_t(int, lower level limit, our limit); unfortunately, some drivers, e.g. the kvm virtio block driver, initialize their limits to "-1U", and that is of course a smaller "int" value than our limit. Impact: we started to request 16 MB resync requests, which lead to protocol error and a reconnect loop. Fix all relevant constants and parameters to be unsigned int. Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com> Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
This commit is contained in:
parent
7ee1fb93f3
commit
db141b2f42
@ -1136,8 +1136,8 @@ struct drbd_conf {
|
||||
int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */
|
||||
int rs_planed; /* resync sectors already planned */
|
||||
atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */
|
||||
int peer_max_bio_size;
|
||||
int local_max_bio_size;
|
||||
unsigned int peer_max_bio_size;
|
||||
unsigned int local_max_bio_size;
|
||||
};
|
||||
|
||||
static inline struct drbd_conf *minor_to_mdev(unsigned int minor)
|
||||
@ -1441,9 +1441,9 @@ struct bm_extent {
|
||||
* hash table. */
|
||||
#define HT_SHIFT 8
|
||||
#define DRBD_MAX_BIO_SIZE (1U<<(9+HT_SHIFT))
|
||||
#define DRBD_MAX_BIO_SIZE_SAFE (1 << 12) /* Works always = 4k */
|
||||
#define DRBD_MAX_BIO_SIZE_SAFE (1U << 12) /* Works always = 4k */
|
||||
|
||||
#define DRBD_MAX_SIZE_H80_PACKET (1 << 15) /* The old header only allows packets up to 32Kib data */
|
||||
#define DRBD_MAX_SIZE_H80_PACKET (1U << 15) /* The old header only allows packets up to 32Kib data */
|
||||
|
||||
/* Number of elements in the app_reads_hash */
|
||||
#define APP_R_HSIZE 15
|
||||
|
@ -2209,7 +2209,8 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl
|
||||
{
|
||||
struct p_sizes p;
|
||||
sector_t d_size, u_size;
|
||||
int q_order_type, max_bio_size;
|
||||
int q_order_type;
|
||||
unsigned int max_bio_size;
|
||||
int ok;
|
||||
|
||||
if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
|
||||
@ -2218,7 +2219,7 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl
|
||||
u_size = mdev->ldev->dc.disk_size;
|
||||
q_order_type = drbd_queue_order_type(mdev);
|
||||
max_bio_size = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
|
||||
max_bio_size = min_t(int, max_bio_size, DRBD_MAX_BIO_SIZE);
|
||||
max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE);
|
||||
put_ldev(mdev);
|
||||
} else {
|
||||
d_size = 0;
|
||||
@ -2229,7 +2230,7 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl
|
||||
|
||||
/* Never allow old drbd (up to 8.3.7) to see more than 32KiB */
|
||||
if (mdev->agreed_pro_version <= 94)
|
||||
max_bio_size = min_t(int, max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
|
||||
max_bio_size = min(max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
|
||||
|
||||
p.d_size = cpu_to_be64(d_size);
|
||||
p.u_size = cpu_to_be64(u_size);
|
||||
@ -3981,9 +3982,9 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
|
||||
|
||||
spin_lock_irq(&mdev->req_lock);
|
||||
if (mdev->state.conn < C_CONNECTED) {
|
||||
int peer;
|
||||
unsigned int peer;
|
||||
peer = be32_to_cpu(buffer->la_peer_max_bio_size);
|
||||
peer = max_t(int, peer, DRBD_MAX_BIO_SIZE_SAFE);
|
||||
peer = max(peer, DRBD_MAX_BIO_SIZE_SAFE);
|
||||
mdev->peer_max_bio_size = peer;
|
||||
}
|
||||
spin_unlock_irq(&mdev->req_lock);
|
||||
|
@ -801,8 +801,8 @@ static int drbd_check_al_size(struct drbd_conf *mdev)
|
||||
static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size)
|
||||
{
|
||||
struct request_queue * const q = mdev->rq_queue;
|
||||
int max_hw_sectors = max_bio_size >> 9;
|
||||
int max_segments = 0;
|
||||
unsigned int max_hw_sectors = max_bio_size >> 9;
|
||||
unsigned int max_segments = 0;
|
||||
|
||||
if (get_ldev_if_state(mdev, D_ATTACHING)) {
|
||||
struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
|
||||
@ -835,7 +835,7 @@ static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_
|
||||
|
||||
void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
|
||||
{
|
||||
int now, new, local, peer;
|
||||
unsigned int now, new, local, peer;
|
||||
|
||||
now = queue_max_hw_sectors(mdev->rq_queue) << 9;
|
||||
local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */
|
||||
@ -846,13 +846,14 @@ void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
|
||||
mdev->local_max_bio_size = local;
|
||||
put_ldev(mdev);
|
||||
}
|
||||
local = min(local, DRBD_MAX_BIO_SIZE);
|
||||
|
||||
/* We may ignore peer limits if the peer is modern enough.
|
||||
Because new from 8.3.8 onwards the peer can use multiple
|
||||
BIOs for a single peer_request */
|
||||
if (mdev->state.conn >= C_CONNECTED) {
|
||||
if (mdev->agreed_pro_version < 94) {
|
||||
peer = min_t(int, mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
|
||||
peer = min(mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
|
||||
/* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
|
||||
} else if (mdev->agreed_pro_version == 94)
|
||||
peer = DRBD_MAX_SIZE_H80_PACKET;
|
||||
@ -860,10 +861,10 @@ void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
|
||||
peer = DRBD_MAX_BIO_SIZE;
|
||||
}
|
||||
|
||||
new = min_t(int, local, peer);
|
||||
new = min(local, peer);
|
||||
|
||||
if (mdev->state.role == R_PRIMARY && new < now)
|
||||
dev_err(DEV, "ASSERT FAILED new < now; (%d < %d)\n", new, now);
|
||||
dev_err(DEV, "ASSERT FAILED new < now; (%u < %u)\n", new, now);
|
||||
|
||||
if (new != now)
|
||||
dev_info(DEV, "max BIO size = %u\n", new);
|
||||
|
Loading…
Reference in New Issue
Block a user