Skip to content

Commit

Permalink
dm vdo: remove remaining ring references
Browse files Browse the repository at this point in the history
Lists are the new rings, so update all remaining references to rings to
talk about lists.

Signed-off-by: Sweet Tea Dorminy <sweettea-kernel@dorminy.me>
Signed-off-by: Matthew Sakai <msakai@redhat.com>
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
  • Loading branch information
Sweet Tea Dorminy authored and Mikulas Patocka committed Feb 24, 2025
1 parent 51ba14f commit ff3f711
Show file tree
Hide file tree
Showing 7 changed files with 22 additions and 22 deletions.
2 changes: 1 addition & 1 deletion drivers/md/dm-vdo/block-map.c
Original file line number Diff line number Diff line change
Expand Up @@ -451,7 +451,7 @@ static struct page_info * __must_check find_page(struct vdo_page_cache *cache,
* select_lru_page() - Determine which page is least recently used.
*
* Picks the least recently used from among the non-busy entries at the front of each of the lru
* ring. Since whenever we mark a page busy we also put it to the end of the ring it is unlikely
* list. Since whenever we mark a page busy we also put it to the end of the list it is unlikely
* that the entries at the front are busy unless the queue is very short, but not impossible.
*
* Return: A pointer to the info structure for a relevant page, or NULL if no such page can be
Expand Down
20 changes: 10 additions & 10 deletions drivers/md/dm-vdo/dedupe.c
Original file line number Diff line number Diff line change
Expand Up @@ -226,7 +226,7 @@ struct hash_lock {
* A list containing the data VIOs sharing this lock, all having the same record name and
* data block contents, linked by their hash_lock_node fields.
*/
struct list_head duplicate_ring;
struct list_head duplicate_vios;

/* The number of data_vios sharing this lock instance */
data_vio_count_t reference_count;
Expand Down Expand Up @@ -343,7 +343,7 @@ static void return_hash_lock_to_pool(struct hash_zone *zone, struct hash_lock *l
{
memset(lock, 0, sizeof(*lock));
INIT_LIST_HEAD(&lock->pool_node);
INIT_LIST_HEAD(&lock->duplicate_ring);
INIT_LIST_HEAD(&lock->duplicate_vios);
vdo_waitq_init(&lock->waiters);
list_add_tail(&lock->pool_node, &zone->lock_pool);
}
Expand Down Expand Up @@ -441,7 +441,7 @@ static void set_hash_lock(struct data_vio *data_vio, struct hash_lock *new_lock)
VDO_ASSERT_LOG_ONLY(data_vio->hash_zone != NULL,
"must have a hash zone when holding a hash lock");
VDO_ASSERT_LOG_ONLY(!list_empty(&data_vio->hash_lock_entry),
"must be on a hash lock ring when holding a hash lock");
"must be on a hash lock list when holding a hash lock");
VDO_ASSERT_LOG_ONLY(old_lock->reference_count > 0,
"hash lock reference must be counted");

Expand All @@ -464,10 +464,10 @@ static void set_hash_lock(struct data_vio *data_vio, struct hash_lock *new_lock)

if (new_lock != NULL) {
/*
* Keep all data_vios sharing the lock on a ring since they can complete in any
* Keep all data_vios sharing the lock on a list since they can complete in any
* order and we'll always need a pointer to one to compare data.
*/
list_move_tail(&data_vio->hash_lock_entry, &new_lock->duplicate_ring);
list_move_tail(&data_vio->hash_lock_entry, &new_lock->duplicate_vios);
new_lock->reference_count += 1;
if (new_lock->max_references < new_lock->reference_count)
new_lock->max_references = new_lock->reference_count;
Expand Down Expand Up @@ -1789,10 +1789,10 @@ static bool is_hash_collision(struct hash_lock *lock, struct data_vio *candidate
struct hash_zone *zone;
bool collides;

if (list_empty(&lock->duplicate_ring))
if (list_empty(&lock->duplicate_vios))
return false;

lock_holder = list_first_entry(&lock->duplicate_ring, struct data_vio,
lock_holder = list_first_entry(&lock->duplicate_vios, struct data_vio,
hash_lock_entry);
zone = candidate->hash_zone;
collides = !blocks_equal(lock_holder->vio.data, candidate->vio.data);
Expand All @@ -1815,7 +1815,7 @@ static inline int assert_hash_lock_preconditions(const struct data_vio *data_vio
return result;

result = VDO_ASSERT(list_empty(&data_vio->hash_lock_entry),
"must not already be a member of a hash lock ring");
"must not already be a member of a hash lock list");
if (result != VDO_SUCCESS)
return result;

Expand Down Expand Up @@ -1942,8 +1942,8 @@ void vdo_release_hash_lock(struct data_vio *data_vio)
"returned hash lock must not be in use with state %s",
get_hash_lock_state_name(lock->state));
VDO_ASSERT_LOG_ONLY(list_empty(&lock->pool_node),
"hash lock returned to zone must not be in a pool ring");
VDO_ASSERT_LOG_ONLY(list_empty(&lock->duplicate_ring),
"hash lock returned to zone must not be in a pool list");
VDO_ASSERT_LOG_ONLY(list_empty(&lock->duplicate_vios),
"hash lock returned to zone must not reference DataVIOs");

return_hash_lock_to_pool(zone, lock);
Expand Down
2 changes: 1 addition & 1 deletion drivers/md/dm-vdo/packer.h
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ struct compressed_block {

/*
* Each packer_bin holds an incomplete batch of data_vios that only partially fill a compressed
* block. The bins are kept in a ring sorted by the amount of unused space so the first bin with
* block. The bins are kept in a list sorted by the amount of unused space so the first bin with
* enough space to hold a newly-compressed data_vio can easily be found. When the bin fills up or
* is flushed, the first uncanceled data_vio in the bin is selected to be the agent for that bin.
* Upon entering the packer, each data_vio already has its compressed data in the first slot of the
Expand Down
2 changes: 1 addition & 1 deletion drivers/md/dm-vdo/priority-table.c
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,7 @@ void vdo_priority_table_remove(struct priority_table *table, struct list_head *e

/*
* Remove the entry from the bucket list, remembering a pointer to another entry in the
* ring.
* list.
*/
next_entry = entry->next;
list_del_init(entry);
Expand Down
6 changes: 3 additions & 3 deletions drivers/md/dm-vdo/recovery-journal.h
Original file line number Diff line number Diff line change
Expand Up @@ -43,9 +43,9 @@
* has a vio which is used to commit that block to disk. The vio's data is the on-disk
* representation of the journal block. In addition each in-memory block has a buffer which is used
* to accumulate entries while a partial commit of the block is in progress. In-memory blocks are
* kept on two rings. Free blocks live on the 'free_tail_blocks' ring. When a block becomes active
* (see below) it is moved to the 'active_tail_blocks' ring. When a block is fully committed, it is
* moved back to the 'free_tail_blocks' ring.
* kept on two lists. Free blocks live on the 'free_tail_blocks' list. When a block becomes active
* (see below) it is moved to the 'active_tail_blocks' list. When a block is fully committed, it is
* moved back to the 'free_tail_blocks' list.
*
* When entries are added to the journal, they are added to the active in-memory block, as
* indicated by the 'active_block' field. If the caller wishes to wait for the entry to be
Expand Down
10 changes: 5 additions & 5 deletions drivers/md/dm-vdo/slab-depot.c
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ static bool is_slab_journal_blank(const struct vdo_slab *slab)
}

/**
* mark_slab_journal_dirty() - Put a slab journal on the dirty ring of its allocator in the correct
* mark_slab_journal_dirty() - Put a slab journal on the dirty list of its allocator in the correct
* order.
* @journal: The journal to be marked dirty.
* @lock: The recovery journal lock held by the slab journal.
Expand Down Expand Up @@ -821,7 +821,7 @@ static void commit_tail(struct slab_journal *journal)

/*
* Since we are about to commit the tail block, this journal no longer needs to be on the
* ring of journals which the recovery journal might ask to commit.
* list of journals which the recovery journal might ask to commit.
*/
mark_slab_journal_clean(journal);

Expand Down Expand Up @@ -1371,7 +1371,7 @@ static unsigned int calculate_slab_priority(struct vdo_slab *slab)
static void prioritize_slab(struct vdo_slab *slab)
{
VDO_ASSERT_LOG_ONLY(list_empty(&slab->allocq_entry),
"a slab must not already be on a ring when prioritizing");
"a slab must not already be on a list when prioritizing");
slab->priority = calculate_slab_priority(slab);
vdo_priority_table_enqueue(slab->allocator->prioritized_slabs,
slab->priority, &slab->allocq_entry);
Expand Down Expand Up @@ -2562,7 +2562,7 @@ static void queue_slab(struct vdo_slab *slab)
int result;

VDO_ASSERT_LOG_ONLY(list_empty(&slab->allocq_entry),
"a requeued slab must not already be on a ring");
"a requeued slab must not already be on a list");

if (vdo_is_read_only(allocator->depot->vdo))
return;
Expand Down Expand Up @@ -3297,7 +3297,7 @@ int vdo_release_block_reference(struct block_allocator *allocator,
* This is a min_heap callback function orders slab_status structures using the 'is_clean' field as
* the primary key and the 'emptiness' field as the secondary key.
*
* Slabs need to be pushed onto the rings in the same order they are to be popped off. Popping
* Slabs need to be pushed onto the lists in the same order they are to be popped off. Popping
* should always get the most empty first, so pushing should be from most empty to least empty.
* Thus, the ordering is reversed from the usual sense since min_heap returns smaller elements
* before larger ones.
Expand Down
2 changes: 1 addition & 1 deletion drivers/md/dm-vdo/wait-queue.c
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ void vdo_waitq_enqueue_waiter(struct vdo_wait_queue *waitq, struct vdo_waiter *w
waitq->last_waiter->next_waiter = waiter;
}

/* In both cases, the waiter we added to the ring becomes the last waiter. */
/* In both cases, the waiter we added to the list becomes the last waiter. */
waitq->last_waiter = waiter;
waitq->length += 1;
}
Expand Down

0 comments on commit ff3f711

Please sign in to comment.