[PATCH 4.14 225/228] dm cache: pass cache structure to mode functions

From: Greg Kroah-Hartman
Date: Thu Aug 20 2020 - 06:17:56 EST


From: Mike Snitzer <snitzer@xxxxxxxxxx>

commit 8e3c3827776fc93728c0c8d7c7b731226dc6ee23 upstream.

No functional changes, just a bit cleaner than passing cache_features
structure.

Signed-off-by: Mike Snitzer <snitzer@xxxxxxxxxx>
Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>

---
drivers/md/dm-cache-target.c | 32 ++++++++++++++++----------------
1 file changed, 16 insertions(+), 16 deletions(-)

--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -515,19 +515,19 @@ struct dm_cache_migration {

/*----------------------------------------------------------------*/

-static bool writethrough_mode(struct cache_features *f)
+static bool writethrough_mode(struct cache *cache)
{
- return f->io_mode == CM_IO_WRITETHROUGH;
+ return cache->features.io_mode == CM_IO_WRITETHROUGH;
}

-static bool writeback_mode(struct cache_features *f)
+static bool writeback_mode(struct cache *cache)
{
- return f->io_mode == CM_IO_WRITEBACK;
+ return cache->features.io_mode == CM_IO_WRITEBACK;
}

-static inline bool passthrough_mode(struct cache_features *f)
+static inline bool passthrough_mode(struct cache *cache)
{
- return unlikely(f->io_mode == CM_IO_PASSTHROUGH);
+ return unlikely(cache->features.io_mode == CM_IO_PASSTHROUGH);
}

/*----------------------------------------------------------------*/
@@ -544,7 +544,7 @@ static void wake_deferred_writethrough_w

static void wake_migration_worker(struct cache *cache)
{
- if (passthrough_mode(&cache->features))
+ if (passthrough_mode(cache))
return;

queue_work(cache->wq, &cache->migration_worker);
@@ -626,7 +626,7 @@ static unsigned lock_level(struct bio *b

static size_t get_per_bio_data_size(struct cache *cache)
{
- return writethrough_mode(&cache->features) ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB;
+ return writethrough_mode(cache) ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB;
}

static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size)
@@ -1209,7 +1209,7 @@ static bool bio_writes_complete_block(st

static bool optimisable_bio(struct cache *cache, struct bio *bio, dm_oblock_t block)
{
- return writeback_mode(&cache->features) &&
+ return writeback_mode(cache) &&
(is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio));
}

@@ -1862,7 +1862,7 @@ static int map_bio(struct cache *cache,
* Passthrough always maps to the origin, invalidating any
* cache blocks that are written to.
*/
- if (passthrough_mode(&cache->features)) {
+ if (passthrough_mode(cache)) {
if (bio_data_dir(bio) == WRITE) {
bio_drop_shared_lock(cache, bio);
atomic_inc(&cache->stats.demotion);
@@ -1871,7 +1871,7 @@ static int map_bio(struct cache *cache,
remap_to_origin_clear_discard(cache, bio, block);

} else {
- if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) &&
+ if (bio_data_dir(bio) == WRITE && writethrough_mode(cache) &&
!is_dirty(cache, cblock)) {
remap_to_origin_then_cache(cache, bio, block, cblock);
accounted_begin(cache, bio);
@@ -2649,7 +2649,7 @@ static int cache_create(struct cache_arg
goto bad;
}

- if (passthrough_mode(&cache->features)) {
+ if (passthrough_mode(cache)) {
bool all_clean;

r = dm_cache_metadata_all_clean(cache->cmd, &all_clean);
@@ -3279,13 +3279,13 @@ static void cache_status(struct dm_targe
else
DMEMIT("1 ");

- if (writethrough_mode(&cache->features))
+ if (writethrough_mode(cache))
DMEMIT("writethrough ");

- else if (passthrough_mode(&cache->features))
+ else if (passthrough_mode(cache))
DMEMIT("passthrough ");

- else if (writeback_mode(&cache->features))
+ else if (writeback_mode(cache))
DMEMIT("writeback ");

else {
@@ -3451,7 +3451,7 @@ static int process_invalidate_cblocks_me
unsigned i;
struct cblock_range range;

- if (!passthrough_mode(&cache->features)) {
+ if (!passthrough_mode(cache)) {
DMERR("%s: cache has to be in passthrough mode for invalidation",
cache_device_name(cache));
return -EPERM;