Skip to content
Navigation Menu
Toggle navigation
Sign in
In this repository
All GitHub Enterprise
↵
Jump to
↵
No suggested jump to results
In this repository
All GitHub Enterprise
↵
Jump to
↵
In this organization
All GitHub Enterprise
↵
Jump to
↵
In this repository
All GitHub Enterprise
↵
Jump to
↵
Sign in
Reseting focus
You signed in with another tab or window.
Reload
to refresh your session.
You signed out in another tab or window.
Reload
to refresh your session.
You switched accounts on another tab or window.
Reload
to refresh your session.
Dismiss alert
{{ message }}
mariux64
/
linux
Public
Notifications
You must be signed in to change notification settings
Fork
0
Star
0
Code
Issues
2
Pull requests
0
Actions
Projects
0
Wiki
Security
Insights
Additional navigation options
Code
Issues
Pull requests
Actions
Projects
Wiki
Security
Insights
Files
0b312f8
Breadcrumbs
linux
/
drivers
/
md
/
bcache
/
writeback.h
Copy path
Blame
Blame
Latest commit
History
History
111 lines (84 loc) · 2.51 KB
Breadcrumbs
linux
/
drivers
/
md
/
bcache
/
writeback.h
Top
File metadata and controls
Code
Blame
111 lines (84 loc) · 2.51 KB
Raw
#ifndef _BCACHE_WRITEBACK_H #define _BCACHE_WRITEBACK_H #define CUTOFF_WRITEBACK 40 #define CUTOFF_WRITEBACK_SYNC 70 static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d) { uint64_t i, ret = 0; for (i = 0; i < d->nr_stripes; i++) ret += atomic_read(d->stripe_sectors_dirty + i); return ret; } static inline uint64_t bcache_flash_devs_sectors_dirty(struct cache_set *c) { uint64_t i, ret = 0; mutex_lock(&bch_register_lock); for (i = 0; i < c->nr_uuids; i++) { struct bcache_device *d = c->devices[i]; if (!d || !UUID_FLASH_ONLY(&c->uuids[i])) continue; ret += bcache_dev_sectors_dirty(d); } mutex_unlock(&bch_register_lock); return ret; } static inline unsigned offset_to_stripe(struct bcache_device *d, uint64_t offset) { do_div(offset, d->stripe_size); return offset; } static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc, uint64_t offset, unsigned nr_sectors) { unsigned stripe = offset_to_stripe(&dc->disk, offset); while (1) { if (atomic_read(dc->disk.stripe_sectors_dirty + stripe)) return true; if (nr_sectors <= dc->disk.stripe_size) return false; nr_sectors -= dc->disk.stripe_size; stripe++; } } static inline bool should_writeback(struct cached_dev *dc, struct bio *bio, unsigned cache_mode, bool would_skip) { unsigned in_use = dc->disk.c->gc_stats.in_use; if (cache_mode != CACHE_MODE_WRITEBACK || test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || in_use > CUTOFF_WRITEBACK_SYNC) return false; if (dc->partial_stripes_expensive && bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector, bio_sectors(bio))) return true; if (would_skip) return false; return bio->bi_rw & REQ_SYNC || in_use <= CUTOFF_WRITEBACK; } static inline void bch_writeback_queue(struct cached_dev *dc) { if (!IS_ERR_OR_NULL(dc->writeback_thread)) wake_up_process(dc->writeback_thread); } static inline void bch_writeback_add(struct cached_dev *dc) { if (!atomic_read(&dc->has_dirty) && !atomic_xchg(&dc->has_dirty, 1)) { atomic_inc(&dc->count); if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) { SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY); /* XXX: should do this synchronously */ bch_write_bdev_super(dc, NULL); } bch_writeback_queue(dc); } } void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int); void bch_sectors_dirty_init(struct bcache_device *); void bch_cached_dev_writeback_init(struct cached_dev *); int bch_cached_dev_writeback_start(struct cached_dev *); #endif
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
You can’t perform that action at this time.