Skip to content

Commit

Permalink
drbd: Remove the unused hash tables
Browse files Browse the repository at this point in the history
Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com>
Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
  • Loading branch information
Andreas Gruenbacher authored and Philipp Reisner committed Aug 29, 2011
1 parent 8b94625 commit bb3bfe9
Show file tree
Hide file tree
Showing 7 changed files with 20 additions and 177 deletions.
13 changes: 0 additions & 13 deletions drivers/block/drbd/drbd_int.h
Original file line number Diff line number Diff line change
Expand Up @@ -694,7 +694,6 @@ struct drbd_request {
* see drbd_endio_pri(). */
struct bio *private_bio;

struct hlist_node collision;
struct drbd_interval i;
unsigned int epoch; /* barrier_nr */

Expand Down Expand Up @@ -759,7 +758,6 @@ struct digest_info {

struct drbd_epoch_entry {
struct drbd_work w;
struct hlist_node collision;
struct drbd_epoch *epoch; /* for writes */
struct drbd_conf *mdev;
struct page *pages;
Expand Down Expand Up @@ -1015,8 +1013,6 @@ struct drbd_conf {
struct drbd_tl_epoch *newest_tle;
struct drbd_tl_epoch *oldest_tle;
struct list_head out_of_sequence_requests;
struct hlist_head *tl_hash;
unsigned int tl_hash_s;

/* Interval tree of pending local requests */
struct rb_root read_requests;
Expand Down Expand Up @@ -1077,8 +1073,6 @@ struct drbd_conf {
struct list_head done_ee; /* send ack */
struct list_head read_ee; /* IO in progress (any read) */
struct list_head net_ee; /* zero-copy network send in progress */
struct hlist_head *ee_hash; /* is proteced by req_lock! */
unsigned int ee_hash_s;

/* Interval tree of pending remote write requests (struct drbd_epoch_entry) */
struct rb_root epoch_entries;
Expand All @@ -1087,7 +1081,6 @@ struct drbd_conf {
struct drbd_epoch_entry *last_write_w_barrier;

int next_barrier_nr;
struct hlist_head *app_reads_hash; /* is proteced by req_lock */
struct list_head resync_reads;
atomic_t pp_in_use; /* allocated from page pool */
atomic_t pp_in_use_by_net; /* sendpage()d, still referenced by tcp */
Expand Down Expand Up @@ -1428,18 +1421,12 @@ struct bm_extent {
#endif
#endif

/* Sector shift value for the "hash" functions of tl_hash and ee_hash tables.
* With a value of 8 all IO in one 128K block make it to the same slot of the
* hash table. */
#define HT_SHIFT 8
#define DRBD_MAX_BIO_SIZE (1U<<(9+HT_SHIFT))
#define DRBD_MAX_BIO_SIZE_SAFE (1 << 12) /* Works always = 4k */

#define DRBD_MAX_SIZE_H80_PACKET (1 << 15) /* The old header only allows packets up to 32Kib data */

/* Number of elements in the app_reads_hash */
#define APP_R_HSIZE 15

extern int drbd_bm_init(struct drbd_conf *mdev);
extern int drbd_bm_resize(struct drbd_conf *mdev, sector_t sectors, int set_new_bits);
extern void drbd_bm_cleanup(struct drbd_conf *mdev);
Expand Down
57 changes: 0 additions & 57 deletions drivers/block/drbd/drbd_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -209,9 +209,6 @@ static int tl_init(struct drbd_conf *mdev)
mdev->newest_tle = b;
INIT_LIST_HEAD(&mdev->out_of_sequence_requests);

mdev->tl_hash = NULL;
mdev->tl_hash_s = 0;

return 1;
}

Expand All @@ -223,39 +220,6 @@ static void tl_cleanup(struct drbd_conf *mdev)
mdev->oldest_tle = NULL;
kfree(mdev->unused_spare_tle);
mdev->unused_spare_tle = NULL;
kfree(mdev->tl_hash);
mdev->tl_hash = NULL;
mdev->tl_hash_s = 0;
}

static void drbd_free_tl_hash(struct drbd_conf *mdev)
{
struct hlist_head *h;

spin_lock_irq(&mdev->req_lock);

if (!mdev->tl_hash || mdev->state.conn != C_STANDALONE) {
spin_unlock_irq(&mdev->req_lock);
return;
}
/* paranoia code */
for (h = mdev->ee_hash; h < mdev->ee_hash + mdev->ee_hash_s; h++)
if (h->first)
dev_err(DEV, "ASSERT FAILED ee_hash[%u].first == %p, expected NULL\n",
(int)(h - mdev->ee_hash), h->first);
kfree(mdev->ee_hash);
mdev->ee_hash = NULL;
mdev->ee_hash_s = 0;

/* paranoia code */
for (h = mdev->tl_hash; h < mdev->tl_hash + mdev->tl_hash_s; h++)
if (h->first)
dev_err(DEV, "ASSERT FAILED tl_hash[%u] == %p, expected NULL\n",
(int)(h - mdev->tl_hash), h->first);
kfree(mdev->tl_hash);
mdev->tl_hash = NULL;
mdev->tl_hash_s = 0;
spin_unlock_irq(&mdev->req_lock);
}

/**
Expand Down Expand Up @@ -475,8 +439,6 @@ void tl_clear(struct drbd_conf *mdev)
/* ensure bit indicating barrier is required is clear */
clear_bit(CREATE_BARRIER, &mdev->flags);

memset(mdev->app_reads_hash, 0, APP_R_HSIZE*sizeof(void *));

spin_unlock_irq(&mdev->req_lock);
}

Expand Down Expand Up @@ -1633,10 +1595,6 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
put_ldev(mdev);
}

/* free tl_hash if we Got thawed and are C_STANDALONE */
if (ns.conn == C_STANDALONE && !is_susp(ns) && mdev->tl_hash)
drbd_free_tl_hash(mdev);

/* Upon network connection, we need to start the receiver */
if (os.conn == C_STANDALONE && ns.conn == C_UNCONNECTED)
drbd_thread_start(&mdev->receiver);
Expand Down Expand Up @@ -3317,13 +3275,6 @@ static void drbd_delete_device(unsigned int minor)

drbd_release_ee_lists(mdev);

/* should be freed on disconnect? */
kfree(mdev->ee_hash);
/*
mdev->ee_hash_s = 0;
mdev->ee_hash = NULL;
*/

lc_destroy(mdev->act_log);
lc_destroy(mdev->resync);

Expand Down Expand Up @@ -3477,10 +3428,6 @@ struct drbd_conf *drbd_new_device(unsigned int minor)
mdev->write_requests = RB_ROOT;
mdev->epoch_entries = RB_ROOT;

mdev->app_reads_hash = kzalloc(APP_R_HSIZE*sizeof(void *), GFP_KERNEL);
if (!mdev->app_reads_hash)
goto out_no_app_reads;

mdev->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
if (!mdev->current_epoch)
goto out_no_epoch;
Expand All @@ -3493,8 +3440,6 @@ struct drbd_conf *drbd_new_device(unsigned int minor)
/* out_whatever_else:
kfree(mdev->current_epoch); */
out_no_epoch:
kfree(mdev->app_reads_hash);
out_no_app_reads:
tl_cleanup(mdev);
out_no_tl:
drbd_bm_cleanup(mdev);
Expand All @@ -3516,15 +3461,13 @@ struct drbd_conf *drbd_new_device(unsigned int minor)
void drbd_free_mdev(struct drbd_conf *mdev)
{
kfree(mdev->current_epoch);
kfree(mdev->app_reads_hash);
tl_cleanup(mdev);
if (mdev->bitmap) /* should no longer be there. */
drbd_bm_cleanup(mdev);
__free_page(mdev->md_io_page);
put_disk(mdev->vdisk);
blk_cleanup_queue(mdev->rq_queue);
free_cpumask_var(mdev->cpu_mask);
drbd_free_tl_hash(mdev);
kfree(mdev);
}

Expand Down
36 changes: 1 addition & 35 deletions drivers/block/drbd/drbd_nl.c
Original file line number Diff line number Diff line change
Expand Up @@ -1353,14 +1353,12 @@ static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
struct drbd_nl_cfg_reply *reply)
{
int i, ns;
int i;
enum drbd_ret_code retcode;
struct net_conf *new_conf = NULL;
struct crypto_hash *tfm = NULL;
struct crypto_hash *integrity_w_tfm = NULL;
struct crypto_hash *integrity_r_tfm = NULL;
struct hlist_head *new_tl_hash = NULL;
struct hlist_head *new_ee_hash = NULL;
struct drbd_conf *odev;
char hmac_name[CRYPTO_MAX_ALG_NAME];
void *int_dig_out = NULL;
Expand Down Expand Up @@ -1494,24 +1492,6 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
}
}

ns = new_conf->max_epoch_size/8;
if (mdev->tl_hash_s != ns) {
new_tl_hash = kzalloc(ns*sizeof(void *), GFP_KERNEL);
if (!new_tl_hash) {
retcode = ERR_NOMEM;
goto fail;
}
}

ns = new_conf->max_buffers/8;
if (new_conf->two_primaries && (mdev->ee_hash_s != ns)) {
new_ee_hash = kzalloc(ns*sizeof(void *), GFP_KERNEL);
if (!new_ee_hash) {
retcode = ERR_NOMEM;
goto fail;
}
}

((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;

if (integrity_w_tfm) {
Expand Down Expand Up @@ -1552,18 +1532,6 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
mdev->send_cnt = 0;
mdev->recv_cnt = 0;

if (new_tl_hash) {
kfree(mdev->tl_hash);
mdev->tl_hash_s = mdev->net_conf->max_epoch_size/8;
mdev->tl_hash = new_tl_hash;
}

if (new_ee_hash) {
kfree(mdev->ee_hash);
mdev->ee_hash_s = mdev->net_conf->max_buffers/8;
mdev->ee_hash = new_ee_hash;
}

crypto_free_hash(mdev->cram_hmac_tfm);
mdev->cram_hmac_tfm = tfm;

Expand Down Expand Up @@ -1594,8 +1562,6 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
crypto_free_hash(tfm);
crypto_free_hash(integrity_w_tfm);
crypto_free_hash(integrity_r_tfm);
kfree(new_tl_hash);
kfree(new_ee_hash);
kfree(new_conf);

reply->ret_code = retcode;
Expand Down
27 changes: 6 additions & 21 deletions drivers/block/drbd/drbd_receiver.c
Original file line number Diff line number Diff line change
Expand Up @@ -333,7 +333,6 @@ struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
if (!page)
goto fail;

INIT_HLIST_NODE(&e->collision);
drbd_clear_interval(&e->i);
e->epoch = NULL;
e->mdev = mdev;
Expand Down Expand Up @@ -361,7 +360,6 @@ void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, int i
kfree(e->digest);
drbd_pp_free(mdev, e->pages, is_net);
D_ASSERT(atomic_read(&e->pending_bios) == 0);
D_ASSERT(hlist_unhashed(&e->collision));
D_ASSERT(drbd_interval_empty(&e->i));
mempool_free(e, drbd_ee_mempool);
}
Expand Down Expand Up @@ -1419,7 +1417,6 @@ static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int u
sector_t sector = e->i.sector;
int ok;

D_ASSERT(hlist_unhashed(&e->collision));
D_ASSERT(drbd_interval_empty(&e->i));

if (likely((e->flags & EE_WAS_ERROR) == 0)) {
Expand Down Expand Up @@ -1575,16 +1572,12 @@ static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
* P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
if (mdev->net_conf->two_primaries) {
spin_lock_irq(&mdev->req_lock);
D_ASSERT(!hlist_unhashed(&e->collision));
hlist_del_init(&e->collision);
D_ASSERT(!drbd_interval_empty(&e->i));
drbd_remove_interval(&mdev->epoch_entries, &e->i);
drbd_clear_interval(&e->i);
spin_unlock_irq(&mdev->req_lock);
} else {
D_ASSERT(hlist_unhashed(&e->collision));
} else
D_ASSERT(drbd_interval_empty(&e->i));
}

drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));

Expand All @@ -1600,8 +1593,6 @@ static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int u
ok = drbd_send_ack(mdev, P_DISCARD_ACK, e);

spin_lock_irq(&mdev->req_lock);
D_ASSERT(!hlist_unhashed(&e->collision));
hlist_del_init(&e->collision);
D_ASSERT(!drbd_interval_empty(&e->i));
drbd_remove_interval(&mdev->epoch_entries, &e->i);
drbd_clear_interval(&e->i);
Expand Down Expand Up @@ -1734,23 +1725,20 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
int first;

D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
BUG_ON(mdev->ee_hash == NULL);
BUG_ON(mdev->tl_hash == NULL);

/* conflict detection and handling:
* 1. wait on the sequence number,
* in case this data packet overtook ACK packets.
* 2. check our hash tables for conflicting requests.
* we only need to walk the tl_hash, since an ee can not
* have a conflict with an other ee: on the submitting
* node, the corresponding req had already been conflicting,
* and a conflicting req is never sent.
* 2. check our interval trees for conflicting requests:
* we only need to check the write_requests tree; the
* epoch_entries tree cannot contain any overlaps because
* they were already eliminated on the submitting node.
*
* Note: for two_primaries, we are protocol C,
* so there cannot be any request that is DONE
* but still on the transfer log.
*
* unconditionally add to the ee_hash.
* unconditionally add to the epoch_entries tree.
*
* if no conflicting request is found:
* submit.
Expand All @@ -1776,7 +1764,6 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned

spin_lock_irq(&mdev->req_lock);

hlist_add_head(&e->collision, ee_hash_slot(mdev, sector));
drbd_insert_interval(&mdev->epoch_entries, &e->i);

first = 1;
Expand Down Expand Up @@ -1827,7 +1814,6 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
}

if (signal_pending(current)) {
hlist_del_init(&e->collision);
drbd_remove_interval(&mdev->epoch_entries, &e->i);
drbd_clear_interval(&e->i);

Expand Down Expand Up @@ -1887,7 +1873,6 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
dev_err(DEV, "submit failed, triggering re-connect\n");
spin_lock_irq(&mdev->req_lock);
list_del(&e->w.list);
hlist_del_init(&e->collision);
drbd_remove_interval(&mdev->epoch_entries, &e->i);
drbd_clear_interval(&e->i);
spin_unlock_irq(&mdev->req_lock);
Expand Down
Loading

0 comments on commit bb3bfe9

Please sign in to comment.